pax_global_header00006660000000000000000000000064126701017440014514gustar00rootroot0000000000000052 comment=20f81dde9bd97c86b2d0e33bbbf1388018611929 docker-1.10.3/000077500000000000000000000000001267010174400130455ustar00rootroot00000000000000docker-1.10.3/.dockerignore000066400000000000000000000000331267010174400155150ustar00rootroot00000000000000bundles .gopath vendor/pkg docker-1.10.3/.gitignore000066400000000000000000000011171267010174400150350ustar00rootroot00000000000000# Docker project generated files to ignore # if you want to ignore files created by your editor/tools, # please consider a global .gitignore https://help.github.com/articles/ignoring-files *.exe *.exe~ *.orig *.test .*.swp .DS_Store .bashrc .dotcloud .flymake* .git/ .gopath/ .hg/ .vagrant* Vagrantfile a.out autogen/ bin build_src bundles/ docker/docker dockerversion/version_autogen.go docs/AWS_S3_BUCKET docs/GITCOMMIT docs/GIT_BRANCH docs/VERSION docs/_build docs/_static docs/_templates docs/changed-files # generated by man/md2man-all.sh man/man1 man/man5 man/man8 pyenv vendor/pkg/ docker-1.10.3/.mailmap000066400000000000000000000211571267010174400144740ustar00rootroot00000000000000# Generate AUTHORS: hack/generate-authors.sh # Tip for finding duplicates (besides scanning the output of AUTHORS for name # duplicates that aren't also email duplicates): scan the output of: # git log --format='%aE - %aN' | sort -uf # # For explanation on this file format: man git-shortlog Patrick Stapleton Shishir Mahajan Erwin van der Koogh Ahmed Kamal Tejesh Mehta Cristian Staretu Cristian Staretu Cristian Staretu Marcus Linke Aleksandrs Fadins Christopher Latham Hu Keping Wayne Chang Chen Chao Daehyeok Mun Guillaume J. Charmes Thatcher Peskens Thatcher Peskens Thatcher Peskens dhrp Jérôme Petazzoni jpetazzo Jérôme Petazzoni Joffrey F Joffrey F Joffrey F Tim Terhorst Andy Smith Walter Stanish Roberto Hashioka Konstantin Pelykh David Sissitka Nolan Darilek Benoit Chesneau Jordan Arentsen Daniel Garcia Miguel Angel Fernández Bhiraj Butala Faiz Khan Victor Lyuboslavsky Jean-Baptiste Barth Matthew Mueller Shih-Yuan Lee Daniel Mizyrycki root Jean-Baptiste Dalido Sven Dowideit Sven Dowideit Sven Dowideit Sven Dowideit <¨SvenDowideit@home.org.au¨> Sven Dowideit Sven Dowideit Alexandr Morozov O.S. Tezer Roberto G. Hashioka Sridhar Ratnakumar Sridhar Ratnakumar Liang-Chi Hsieh Aleksa Sarai Will Weaver Timothy Hobbs Nathan LeClaire Nathan LeClaire Matthew Heon Francisco Carriedo Brian Goff Hollie Teal Jessica Frazelle Jessie Frazelle Thomas LEVEIL Thomas LÉVEIL Antonio Murdaca Antonio Murdaca Antonio Murdaca Darren Shepherd Deshi Xiao Deshi Xiao Doug Davis Jacob Atzen Jeff Nickoloff John Howard (VM) John Howard Madhu Venugopal Mary Anthony Mary Anthony moxiegirl Mary Anthony mattyw resouer AJ Bowen soulshake AJ Bowen soulshake Tibor Vass Tibor Vass Vincent Bernat Yestin Sun bin liu John Howard (VM) jhowardmsft Ankush Agarwal Tangi COLIN tangicolin docker-1.10.3/AUTHORS000066400000000000000000001130331267010174400141160ustar00rootroot00000000000000# This file lists all individuals having contributed content to the repository. # For how it is generated, see `hack/generate-authors.sh`. Aanand Prasad Aaron Davidson Aaron Feng Aaron Huslage Aaron Welch Abel Muiño Abhinav Ajgaonkar Abhishek Chanda Abin Shahab Adam Miller Adam Singer Aditya Adria Casas Adrian Mouat Adrien Folie Ahmed Kamal Ahmet Alp Balkan Aidan Hobson Sayers AJ Bowen Al Tobey alambike Alan Thompson Albert Callarisa Albert Zhang Aleksa Sarai Aleksandrs Fadins Alena Prokharchyk Alessandro Boch Alessio Biancalana Alex Gaynor Alex Warhawk Alexander Boyd Alexander Larsson Alexander Morozov Alexander Shopov Alexandr Morozov Alexey Guskov Alexey Kotlyarov Alexey Shamrin Alexis THOMAS Allen Madsen almoehi Alvin Richards amangoel Amit Bakshi Amy Lindburg Anand Patil AnandkumarPatel Anchal Agrawal Anders Janmyr Andre Dublin <81dublin@gmail.com> Andrea Luzzardi Andrea Turli Andreas Köhler Andreas Savvides Andreas Tiefenthaler Andrew C. Bodine Andrew Clay Shafer Andrew Duckworth Andrew France Andrew Kuklewicz Andrew Macgregor Andrew Martin Andrew Munsell Andrew Weiss Andrew Williams Andrews Medina Andrey Petrov Andrey Stolbovsky André Martins Andy Chambers andy diller Andy Goldstein Andy Kipp Andy Rothfusz Andy Smith Andy Wilson Anes Hasicic Ankush Agarwal Anthony Baire Anthony Bishopric Anton Löfgren Anton Nikitin Anton Tiurin Antonio Murdaca Antony Messerli apocas ArikaChen Arnaud Porterie Arthur Barr Arthur Gautier Asbjørn Enge averagehuman Avi Das Avi Miller Barnaby Gray Barry Allard Bartłomiej Piotrowski bdevloed Ben Firshman Ben Sargent Ben Severson Ben Toews Ben Wiklund Benjamin Atkin Benoit Chesneau Bernerd Schaefer Bert Goethals Bharath Thiruveedula Bhiraj Butala bin liu Blake Geno bobby abbott boucher Bouke Haarsma Boyd Hemphill Bradley Cicenas Bradley Wright Brandon Liu Brandon Philips Brandon Rhodes Brendan Dixon Brent Salisbury Brett Kochendorfer Brian (bex) Exelbierd Brian DeHamer Brian Dorsey Brian Flad Brian Goff Brian McCallister Brian Olsen Brian Shumate Brice Jaglin Briehan Lombaard Bruno Bigras Bruno Binet Bruno Gazzera Bruno Renié Bryan Bess Bryan Boreham Bryan Matsuo Bryan Murphy buddhamagnet Burke Libbey Byung Kang Caleb Spare Calen Pennington Cameron Boehmer Carl X. Su Cary Casey Bisson Charles Hooper Charles Lindsay Charles Merriam Charlie Lewis Chen Chao Chen Hanxiao cheney90 Chewey Chia-liang Kao chli Chris Alfonso Chris Armstrong Chris Khoo Chris Snow Chris St. Pierre Chris Stivers Chris Wahl chrismckinnel Christian Berendt Christian Simon Christian Stefanescu ChristoperBiscardi Christophe Troestler Christopher Currie Christopher Latham Christopher Rigor Christy Perez Chun Chen Ciro S. Costa Clayton Coleman Coenraad Loubser Colin Dunklau Colin Rice Colin Walters Colm Hally Cory Forsyth cressie176 Cristian Staretu Cruceru Calin-Cristian Cyril F Daan van Berkel Daehyeok Mun Dafydd Crosby dalanlan Damjan Georgievski Dan Anolik Dan Buch Dan Cotora Dan Griffin Dan Hirsch Dan Keder Dan McPherson Dan Stine Dan Walsh Dan Williams Daniel Antlinger Daniel Exner Daniel Farrell Daniel Garcia Daniel Gasienica Daniel Menet Daniel Mizyrycki Daniel Nephin Daniel Norberg Daniel Nordberg Daniel Robinson Daniel S Daniel Von Fange Daniel YC Lin Daniel Zhang Daniel, Dao Quang Minh Danny Berger Danny Yates Darren Coxall Darren Shepherd Dave Henderson David Anderson David Calavera David Corking David Davis David Gageot David Gebler David Mackey David Mat David Mcanulty David Pelaez David R. Jenni David Röthlisberger David Sissitka David Xia David Young Davide Ceretti Dawn Chen decadent Deng Guangxing Deni Bertovic Derek Derek Derek McGowan Deric Crago Deshi Xiao Dinesh Subhraveti DiuDiugirl Djibril Koné dkumor Dmitry Demeshchuk Dmitry Gusev Dmitry V. Krivenok Dolph Mathews Dominik Finkbeiner Dominik Honnef Don Kirkby Don Kjer Don Spaulding Doug Davis Doug MacEachern doug tangren Dr Nic Williams dragon788 Dražen Lučanin Dustin Sallings Ed Costello Edmund Wagner Eiichi Tsukata Eike Herzbach Eivind Uggedal Elias Probst Elijah Zupancic eluck Emil Hernvall Emily Maier Emily Rose Emir Ozer Enguerran Eohyung Lee Eric Hanchrow Eric Lee Eric Myhre Eric Paris Eric Rafaloff Eric Windisch Eric-Olivier Lamey Erik Dubbelboer Erik Hollensbe Erik Inge Bolsø Erik Kristensen Erno Hopearuoho Erwin van der Koogh Euan Eugene Yakubovich eugenkrizo Evan Carmi Evan Hazlett Evan Krall Evan Phoenix Evan Wies Evgeny Vereshchagin Eystein Måløy Stenberg ezbercih Fabiano Rosas Fabio Falci Fabio Rehm Fabrizio Regini Faiz Khan falmp Fareed Dudhia Felix Rabe Felix Schindler Ferenc Szabo Fernando Filipe Brandenburger Flavio Castelli FLGMwt Florian Weingarten Francisco Carriedo Francisco Souza Frank Herrmann Frank Macreery Frank Rosquin Fred Lifton Frederick F. Kautz IV Frederik Loeffert Freek Kalter Félix Baylac-Jacqué Gabe Rosenhouse Gabor Nagy Gabriel Monroy Galen Sampson Gareth Rushgrove Gaurav gautam, prasanna GennadySpb Geoffrey Bachelet George MacRorie George Xie Gereon Frey German DZ Gert van Valkenhoef Gianluca Borello Giuseppe Mazzotta Gleb Fotengauer-Malinovskiy Gleb M Borisov Glyn Normington Goffert van Gool golubbe Gosuke Miyashita Graydon Hoare Greg Fausak Greg Thornton grossws grunny Guilherme Salgado Guillaume Dufour Guillaume J. Charmes guoxiuyan Gurjeet Singh Guruprasad Günter Zöchbauer Hans Rødtang Harald Albers Harley Laue Harry Zhang He Simei Hector Castro Henning Sprang Hobofan Hollie Teal Hong Xu Hu Keping Hu Tao Huayi Zhang Hugo Duncan Hunter Blanks Huu Nguyen hyeongkyu.lee hyp3rdino Ian Babrou Ian Bishop Ian Bull Ian Calvert Ian Main Ian Truslove Iavael Igor Dolzhikov ILYA Khlopotov imre Fitos inglesp Isaac Dupree Isabel Jimenez Isao Jonas Ivan Fraixedes J Bruni J. Nunn Jack Danger Canty Jacob Atzen Jacob Edelman Jake Champlin Jake Moshenko jakedt James Allen James Carr James DeFelice James Harrison Fisher James Kyle James Lal James Mills James Turnbull Jamie Hannaford Jamshid Afshar Jan Keromnes Jan Koprowski Jan Pazdziora Jan Toebes Jan-Jaap Driessen Jana Radhakrishnan Jared Biel Jaroslaw Zabiello jaseg Jason Divock Jason Giedymin Jason Hall Jason Livesay Jason McVetta Jason Plum Jason Shepherd Jason Smith Jason Sommer Jason Stangroome Jay Jean-Baptiste Barth Jean-Baptiste Dalido Jean-Paul Calderone Jean-Tiare Le Bigot Jeff Anderson Jeff Lindsay Jeff Nickoloff Jeff Welch Jeffrey Bolle Jeffrey Morgan Jeffrey van Gogh Jeremy Grosser Jesse Dearing Jesse Dubay Jessica Frazelle Jezeniel Zapanta jianbosun Jilles Oldenbeuving Jim Alateras Jim Perrin Jimmy Cuadra Jimmy Puckett jimmyxian Jinsoo Park Jiri Popelka Jiří Župka jjy jmzwcn Joe Beda Joe Ferguson Joe Gordon Joe Shaw Joe Van Dyk Joel Friedly Joel Handwell Joey Gibson Joffrey F Johan Euphrosine Johan Rydberg Johannes 'fish' Ziemke John Costa John Feminella John Gardiner Myers John Gossman John Howard (VM) John OBrien III John Tims John Warwick John Willis Jon Wedaman Jonas Pfenniger Jonathan A. Sternberg Jonathan Boulle Jonathan Camp Jonathan Dowland Jonathan McCrohan Jonathan Mueller Jonathan Pares Jonathan Rudenberg Joost Cassee Jordan Arentsen Jordan Sissel Joseph Anthony Pasquale Holsten Joseph Hager Joseph Kern Josh Josh Hawn Josh Poimboeuf Josiah Kiehl José Tomás Albornoz JP Julian Taylor Julien Barbier Julien Bordellier Julien Dubois Jun-Ru Chang Justin Force Justin Plock Justin Simonelis Jyrki Puttonen Jérôme Petazzoni Jörg Thalheim Kamil Domanski Karan Lyons kargakis Karl Grzeszczak Katie McLaughlin Kato Kazuyoshi Katrina Owen Kawsar Saiyeed Keli Hu Ken Cochrane Ken ICHIKAWA Kent Johnson Kevin "qwazerty" Houdebert Kevin Clark Kevin J. Lynagh Kevin Menard Kevin Wallace Kevin Yap Keyvan Fatehi kies Kim BKC Carlbacker Kimbro Staken Kiran Gangadharan Kirill SIbirev knappe Kohei Tsuruta Konrad Kleine Konstantin Pelykh Krasimir Georgiev krrg Kyle Conroy kyu Lachlan Coote Lajos Papp Lakshan Perera lalyos Lance Chen Lance Kinley Lars Kellogg-Stedman Lars R. Damerow Laurie Voss leeplay Lei Jitang Len Weincier Leszek Kowalski Levi Gross Lewis Marshall Lewis Peckover Liana Lo Liang-Chi Hsieh limsy Liu Hua Lloyd Dewolf Lokesh Mandvekar Lorenz Leutgeb Lorenzo Fontana Louis Opter Luis Martínez de Bartolomé Izquierdo lukaspustina lukemarsden Lénaïc Huard Ma Shimiao Mabin Madhu Venugopal Mahesh Tiyyagura malnick Malte Janduda Manfred Touron Manfred Zabarauskas Manuel Meurer Manuel Woelker Marc Abramowitz Marc Kuo Marc Tamsky Marco Hennings Marcus Farkas Marcus Linke Marcus Ramberg Marek Goldmann Marian Marinov Marianna Marius Voila Mark Allen Mark McGranaghan Mark West Marko Mikulicic Marko Tibold Markus Fix Martijn Dwars Martijn van Oosterhout Martin Honermeyer Martin Redmond Mary Anthony Masahito Zembutsu Mason Malone Mateusz Sulima Mathias Monnerville Mathieu Le Marec - Pasquet Matt Apperson Matt Bachmann Matt Bentley Matt Haggard Matt McCormick Matthew Heon Matthew Mayer Matthew Mueller Matthew Riley Matthias Klumpp Matthias Kühnle mattymo mattyw mauriyouth Max Shytikov Maxim Kulkin Maxim Treskin Maxime Petazzoni Meaglith Ma meejah Megan Kostick Mehul Kar Mengdi Gao Mert Yazıcıoğlu Michael A. Smith Michael Brown Michael Chiang Michael Crosby Michael Gorsuch Michael Hudson-Doyle Michael Neale Michael Prokop Michael Scharf Michael Stapelberg Michael Steinert Michael Thies Michael West Michal Fojtik Michal Jemala Michal Minar Michaël Pailloncy Michiel@unhosted Miguel Angel Fernández Mihai Borobocea Mike Chelen Mike Dillon Mike Gaffney Mike Leone Mike MacCana Mike Naberezny Mike Snitzer Mikhail Sobolev Mingzhen Feng Mitch Capper Mohit Soni Morgante Pell Morten Siebuhr Moysés Borges Mrunal Patel mschurenko Mustafa Akın Médi-Rémi Hashim Nan Monnand Deng Naoki Orii Natalie Parker Nate Eagleson Nate Jones Nathan Hsieh Nathan Kleyn Nathan LeClaire Neal McBurnett Nelson Chen Nghia Tran Niall O'Higgins Nicholas E. Rabenau Nick Irvine Nick Parker Nick Payne Nick Stenning Nick Stinemates Nicolas De loof Nicolas Dudebout Nicolas Goy Nicolas Kaiser NikolaMandic nikolas noducks Nolan Darilek nponeccop Nuutti Kotivuori nzwsch O.S. Tezer OddBloke odk- Oguz Bilgic Oh Jinkyun Ole Reifschneider Olivier Gambier pandrew panticz Pascal Borreli Pascal Hartig Patrick Devine Patrick Hemmer Patrick Stapleton pattichen Paul paul Paul Annesley Paul Bellamy Paul Bowsher Paul Hammond Paul Jimenez Paul Lietar Paul Morie Paul Nasrat Paul Weaver Pavel Lobashov Pavel Tikhomirov Pavlos Ratis Peggy Li Peter Bourgon Peter Braden Peter Choi Peter Dave Hello Peter Ericson Peter Esbensen Peter Salvatore Peter Volpe Peter Waller Phil Phil Estes Phil Spitler Philipp Weissensteiner Phillip Alexander Piergiuliano Bossi Pierre Pierre Wacrenier Pierre-Alain RIVIERE Piotr Bogdan pixelistik Porjo Pradeep Chhetri Prasanna Gautam Przemek Hejman pysqz Qiang Huang Quentin Brossard r0n22 Rafal Jeczalik Rafe Colton Raghuram Devarakonda Rajat Pandit Rajdeep Dua Ralph Bean Ramkumar Ramachandra Ramon van Alteren Recursive Madman Remi Rampin Renato Riccieri Santos Zannon resouer rgstephens Rhys Hiltner Rich Seymour Richard Richard Burnison Richard Harvey Richard Metzler Richo Healey Rick Bradley Rick van de Loo Rick Wieman Rik Nijessen Robert Bachmann Robert Bittle Robert Obryk Roberto G. Hashioka Robin Speekenbrink robpc Rodrigo Vaz Roel Van Nyen Roger Peppe Rohit Jnagal Roland Huß Roland Moriz Ron Smits root Rovanion Luckey Rudolph Gottesheim Ryan Anderson Ryan Aslett Ryan Detzel Ryan Fowler Ryan O'Donnell Ryan Seto Ryan Thomas Rémy Greinhofer s. rannou s00318865 Sabin Basyal Sachin Joshi Sam Abed Sam Alba Sam Bailey Sam J Sharpe Sam Reis Sam Rijs Sami Wagiaalla Samuel Andaya Samuel PHAN Sankar சங்கர் Sanket Saurav sapphiredev Satnam Singh satoru Satoshi Amemiya Scott Bessler Scott Collier Scott Johnston Scott Stamp Scott Walls sdreyesg Sean Cronin Sean P. Kane Sebastiaan van Steenis Sebastiaan van Stijn Senthil Kumar Selvaraj SeongJae Park Seongyeol Lim Sergey Alekseev Sergey Evstifeev Shane Canon shaunol Shawn Landden Shawn Siefkas Shih-Yuan Lee Shijiang Wei Shishir Mahajan shuai-z sidharthamani Silas Sewell Simei He Simon Eskildsen Simon Leinen Simon Taranto Sindhu S Sjoerd Langkemper Solomon Hykes Song Gao Soulou Sridatta Thatipamala Sridhar Ratnakumar Srini Brahmaroutu Srini Brahmaroutu Steeve Morin Stefan Praszalowicz Stephen Crosby Stephen J Day Steve Francia Steve Koch Steven Burgess Steven Merrill Steven Richards Steven Taylor Sven Dowideit Swapnil Daingade Sylvain Baubeau Sylvain Bellemare Sébastien Sébastien Luttringer Sébastien Stormacq tang0th Tangi COLIN Tatsuki Sugiura Tatsushi Inagaki Ted M. Young Tehmasp Chaudhri Tejesh Mehta Thatcher Peskens theadactyl Thell 'Bo' Fowler Thermionix Thijs Terlouw Thomas Bikeev Thomas Frössman Thomas Hansen Thomas LEVEIL Thomas Orozco Thomas Schroeter Thomas Sjögren Thomas Texier Tianon Gravi Tibor Vass Tiffany Low Tim Bosse Tim Hockin Tim Ruffles Tim Smith Tim Terhorst Timothy Hobbs tjwebb123 tobe Tobias Bieniek Tobias Gesellchen Tobias Schmidt Tobias Schwab Todd Lunter Todd Whiteman Tom Fotherby Tom Hulihan Tom Maaswinkel Tomas Tomecek Tomasz Lipinski Tomasz Nurkiewicz Tommaso Visconti Tomáš Hrčka Tonis Tiigi Tonny Xu Tony Daws Tony Miller Torstein Husebø tpng Travis Cline Travis Thieman Trent Ogren Tristan Carel Tyler Brock Tzu-Jung Lee Ulysse Carion unknown vagrant Vaidas Jablonskis vgeta Victor Coisne Victor Lyuboslavsky Victor Marmol Victor Vieux Viktor Vojnovski Vincent Batts Vincent Bernat Vincent Bernat Vincent Demeester Vincent Giersch Vincent Mayers Vincent Woo Vinod Kulkarni Vishal Doshi Vishnu Kannan Vitor Monteiro Vivek Agarwal Vivek Dasgupta Vivek Goyal Vladimir Bulyga Vladimir Kirillov Vladimir Rutsky VladimirAus Vojtech Vitek (V-Teq) waitingkuo Walter Leibbrandt Walter Stanish Ward Vandewege WarheadsSE Wayne Chang Wei-Ting Kuo Wes Morgan Will Dietz Will Rouesnel Will Weaver willhf William Delanoue William Henry William Riancho William Thurston WiseTrem wlan0 Wolfgang Powisch wonderflow xamyzhao XiaoBing Jiang Xinzi Zhou Xiuming Chen xuzhaokui y00277921 Yahya YAMADA Tsuyoshi Yan Feng Yang Bai Yasunori Mahata Yestin Sun Yihang Ho Yohei Ueda Yongzhi Pan Yuan Sun Yurii Rashkovskii Zac Dover Zach Borboa Zain Memon Zaiste! Zane DeGraffenried Zefan Li Zen Lin(Zhinan Lin) Zhang Wei Zhang Wentao Zilin Du zimbatm Zoltan Tombol zqh Álex González Álvaro Lázaro 尹吉峰 docker-1.10.3/CHANGELOG.md000066400000000000000000002725111267010174400146660ustar00rootroot00000000000000# Changelog Items starting with `DEPRECATE` are important deprecation notices. For more information on the list of deprecated flags and APIs please have a look at https://docs.docker.com/misc/deprecated/ where target removal dates can also be found. ## 1.10.3 (2016-03-10) ### Runtime - Fix Docker client exiting with an "Unrecognized input header" error [#20706](https://github.com/docker/docker/pull/20706) - Fix Docker exiting if Exec is started with both `AttachStdin` and `Detach` [#20647](https://github.com/docker/docker/pull/20647) ### Distribution - Fix a crash when pushing multiple images sharing the same layers to the same repository in parallel [#20831](https://github.com/docker/docker/pull/20831) - Fix a panic when pushing images to a registry which uses a misconfigured token service [#21030](https://github.com/docker/docker/pull/21030) ### Plugin system - Fix issue preventing volume plugins to start when SELinux is enabled [#20834](https://github.com/docker/docker/pull/20834) - Prevent Docker from exiting if a volume plugin returns a null response for Get requests [#20682](https://github.com/docker/docker/pull/20682) - Fix plugin system leaking file descriptors if a plugin has an error [#20680](https://github.com/docker/docker/pull/20680) ### Security - Fix linux32 emulation to fail during docker build [#20672](https://github.com/docker/docker/pull/20672) It was due to the `personality` syscall being blocked by the default seccomp profile. - Fix Oracle XE 10g failing to start in a container [#20981](https://github.com/docker/docker/pull/20981) It was due to the `ipc` syscall being blocked by the default seccomp profile. - Fix user namespaces not working on Linux From Scratch [#20685](https://github.com/docker/docker/pull/20685) - Fix issue preventing daemon to start if userns is enabled and the `subuid` or `subgid` files contain comments [#20725](https://github.com/docker/docker/pull/20725) ## 1.10.2 (2016-02-22) ### Runtime - Prevent systemd from deleting containers' cgroups when its configuration is reloaded [#20518](https://github.com/docker/docker/pull/20518) - Fix SELinux issues by disregarding `--read-only` when mounting `/dev/mqueue` [#20333](https://github.com/docker/docker/pull/20333) - Fix chown permissions used during `docker cp` when userns is used [#20446](https://github.com/docker/docker/pull/20446) - Fix configuration loading issue with all booleans defaulting to `true` [#20471](https://github.com/docker/docker/pull/20471) - Fix occasional panic with `docker logs -f` [#20522](https://github.com/docker/docker/pull/20522) ### Distribution - Keep layer reference if deletion failed to avoid a badly inconsistent state [#20513](https://github.com/docker/docker/pull/20513) - Handle gracefully a corner case when canceling migration [#20372](https://github.com/docker/docker/pull/20372) - Fix docker import on compressed data [#20367](https://github.com/docker/docker/pull/20367) - Fix tar-split files corruption during migration that later cause docker push and docker save to fail [#20458](https://github.com/docker/docker/pull/20458) ### Networking - Fix daemon crash if embedded DNS is sent garbage [#20510](https://github.com/docker/docker/pull/20510) ### Volumes - Fix issue with multiple volume references with same name [#20381](https://github.com/docker/docker/pull/20381) ### Security - Fix potential cache corruption and delegation conflict issues [#20523](https://github.com/docker/docker/pull/20523) ## 1.10.1 (2016-02-11) ### Runtime * Do not stop daemon on migration hard failure [#20156](https://github.com/docker/docker/pull/20156) - Fix various issues with migration to content-addressable images [#20058](https://github.com/docker/docker/pull/20058) - Fix ZFS permission bug with user namespaces [#20045](https://github.com/docker/docker/pull/20045) - Do not leak /dev/mqueue from the host to all containers, keep it container-specific [#19876](https://github.com/docker/docker/pull/19876) [#20133](https://github.com/docker/docker/pull/20133) - Fix `docker ps --filter before=...` to not show stopped containers without providing `-a` flag [#20135](https://github.com/docker/docker/pull/20135) ### Security - Fix issue preventing docker events to work properly with authorization plugin [#20002](https://github.com/docker/docker/pull/20002) ### Distribution * Add additional verifications and prevent from uploading invalid data to registries [#20164](https://github.com/docker/docker/pull/20164) - Fix regression preventing uppercase characters in image reference hostname [#20175](https://github.com/docker/docker/pull/20175) ### Networking - Fix embedded DNS for user-defined networks in the presence of firewalld [#20060](https://github.com/docker/docker/pull/20060) - Fix issue where removing a network during shutdown left Docker inoperable [#20181](https://github.com/docker/docker/issues/20181) [#20235](https://github.com/docker/docker/issues/20235) - Embedded DNS is now able to return compressed results [#20181](https://github.com/docker/docker/issues/20181) - Fix port-mapping issue with `userland-proxy=false` [#20181](https://github.com/docker/docker/issues/20181) ### Logging - Fix bug where tcp+tls protocol would be rejected [#20109](https://github.com/docker/docker/pull/20109) ### Volumes - Fix issue whereby older volume drivers would not receive volume options [#19983](https://github.com/docker/docker/pull/19983) ### Misc - Remove TasksMax from Docker systemd service [#20167](https://github.com/docker/docker/pull/20167) ## 1.10.0 (2016-02-04) **IMPORTANT**: Docker 1.10 uses a new content-addressable storage for images and layers. A migration is performed the first time docker is run, and can take a significant amount of time depending on the number of images present. Refer to this page on the wiki for more information: https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration We also released a cool migration utility that enables you to perform the migration before updating to reduce downtime. Engine 1.10 migrator can be found on Docker Hub: https://hub.docker.com/r/docker/v1.10-migrator/ ### Runtime + New `docker update` command that allows updating resource constraints on running containers [#15078](https://github.com/docker/docker/pull/15078) + Add `--tmpfs` flag to `docker run` to create a tmpfs mount in a container [#13587](https://github.com/docker/docker/pull/13587) + Add `--format` flag to `docker images` command [#17692](https://github.com/docker/docker/pull/17692) + Allow to set daemon configuration in a file and hot-reload it with the `SIGHUP` signal [#18587](https://github.com/docker/docker/pull/18587) + Updated docker events to include more meta-data and event types [#18888](https://github.com/docker/docker/pull/18888) This change is backward compatible in the API, but not on the CLI. + Add `--blkio-weight-device` flag to `docker run` [#13959](https://github.com/docker/docker/pull/13959) + Add `--device-read-bps` and `--device-write-bps` flags to `docker run` [#14466](https://github.com/docker/docker/pull/14466) + Add `--device-read-iops` and `--device-write-iops` flags to `docker run` [#15879](https://github.com/docker/docker/pull/15879) + Add `--oom-score-adj` flag to `docker run` [#16277](https://github.com/docker/docker/pull/16277) + Add `--detach-keys` flag to `attach`, `run`, `start` and `exec` commands to override the default key sequence that detaches from a container [#15666](https://github.com/docker/docker/pull/15666) + Add `--shm-size` flag to `run`, `create` and `build` to set the size of `/dev/shm` [#16168](https://github.com/docker/docker/pull/16168) + Show the number of running, stopped, and paused containers in `docker info` [#19249](https://github.com/docker/docker/pull/19249) + Show the `OSType` and `Architecture` in `docker info` [#17478](https://github.com/docker/docker/pull/17478) + Add `--cgroup-parent` flag on `daemon` to set cgroup parent for all containers [#19062](https://github.com/docker/docker/pull/19062) + Add `-L` flag to docker cp to follow symlinks [#16613](https://github.com/docker/docker/pull/16613) + New `status=dead` filter for `docker ps` [#17908](https://github.com/docker/docker/pull/17908) * Change `docker run` exit codes to distinguish between runtime and application errors [#14012](https://github.com/docker/docker/pull/14012) * Enhance `docker events --since` and `--until` to support nanoseconds and timezones [#17495](https://github.com/docker/docker/pull/17495) * Add `--all`/`-a` flag to `stats` to include both running and stopped containers [#16742](https://github.com/docker/docker/pull/16742) * Change the default cgroup-driver to `cgroupfs` [#17704](https://github.com/docker/docker/pull/17704) * Emit a "tag" event when tagging an image with `build -t` [#17115](https://github.com/docker/docker/pull/17115) * Best effort for linked containers' start order when starting the daemon [#18208](https://github.com/docker/docker/pull/18208) * Add ability to add multiple tags on `build` [#15780](https://github.com/docker/docker/pull/15780) * Permit `OPTIONS` request against any url, thus fixing issue with CORS [#19569](https://github.com/docker/docker/pull/19569) - Fix the `--quiet` flag on `docker build` to actually be quiet [#17428](https://github.com/docker/docker/pull/17428) - Fix `docker images --filter dangling=false` to now show all non-dangling images [#19326](https://github.com/docker/docker/pull/19326) - Fix race condition causing autorestart turning off on restart [#17629](https://github.com/docker/docker/pull/17629) - Recognize GPFS filesystems [#19216](https://github.com/docker/docker/pull/19216) - Fix obscure bug preventing to start containers [#19751](https://github.com/docker/docker/pull/19751) - Forbid `exec` during container restart [#19722](https://github.com/docker/docker/pull/19722) - devicemapper: Increasing `--storage-opt dm.basesize` will now increase the base device size on daemon restart [#19123](https://github.com/docker/docker/pull/19123) ### Security + Add `--userns-remap` flag to `daemon` to support user namespaces (previously in experimental) [#19187](https://github.com/docker/docker/pull/19187) + Add support for custom seccomp profiles in `--security-opt` [#17989](https://github.com/docker/docker/pull/17989) + Add default seccomp profile [#18780](https://github.com/docker/docker/pull/18780) + Add `--authorization-plugin` flag to `daemon` to customize ACLs [#15365](https://github.com/docker/docker/pull/15365) + Docker Content Trust now supports the ability to read and write user delegations [#18887](https://github.com/docker/docker/pull/18887) This is an optional, opt-in feature that requires the explicit use of the Notary command-line utility in order to be enabled. Enabling delegation support in a specific repository will break the ability of Docker 1.9 and 1.8 to pull from that repository, if content trust is enabled. * Allow SELinux to run in a container when using the BTRFS storage driver [#16452](https://github.com/docker/docker/pull/16452) ### Distribution * Use content-addressable storage for images and layers [#17924](https://github.com/docker/docker/pull/17924) Note that a migration is performed the first time docker is run; it can take a significant amount of time depending on the number of images and containers present. Images no longer depend on the parent chain but contain a list of layer references. `docker load`/`docker save` tarballs now also contain content-addressable image configurations. For more information: https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration * Add support for the new [manifest format ("schema2")](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md) [#18785](https://github.com/docker/docker/pull/18785) * Lots of improvements for push and pull: performance++, retries on failed downloads, cancelling on client disconnect [#18353](https://github.com/docker/docker/pull/18353), [#18418](https://github.com/docker/docker/pull/18418), [#19109](https://github.com/docker/docker/pull/19109), [#18353](https://github.com/docker/docker/pull/18353) * Limit v1 protocol fallbacks [#18590](https://github.com/docker/docker/pull/18590) - Fix issue where docker could hang indefinitely waiting for a nonexistent process to pull an image [#19743](https://github.com/docker/docker/pull/19743) ### Networking + Use DNS-based discovery instead of `/etc/hosts` [#19198](https://github.com/docker/docker/pull/19198) + Support for network-scoped alias using `--net-alias` on `run` and `--alias` on `network connect` [#19242](https://github.com/docker/docker/pull/19242) + Add `--ip` and `--ip6` on `run` and `network connect` to support custom IP addresses for a container in a network [#19001](https://github.com/docker/docker/pull/19001) + Add `--ipam-opt` to `network create` for passing custom IPAM options [#17316](https://github.com/docker/docker/pull/17316) + Add `--internal` flag to `network create` to restrict external access to and from the network [#19276](https://github.com/docker/docker/pull/19276) + Add `kv.path` option to `--cluster-store-opt` [#19167](https://github.com/docker/docker/pull/19167) + Add `discovery.heartbeat` and `discovery.ttl` options to `--cluster-store-opt` to configure discovery TTL and heartbeat timer [#18204](https://github.com/docker/docker/pull/18204) + Add `--format` flag to `network inspect` [#17481](https://github.com/docker/docker/pull/17481) + Add `--link` to `network connect` to provide a container-local alias [#19229](https://github.com/docker/docker/pull/19229) + Support for Capability exchange with remote IPAM plugins [#18775](https://github.com/docker/docker/pull/18775) + Add `--force` to `network disconnect` to force container to be disconnected from network [#19317](https://github.com/docker/docker/pull/19317) * Support for multi-host networking using built-in overlay driver for all engine supported kernels: 3.10+ [#18775](https://github.com/docker/docker/pull/18775) * `--link` is now supported on `docker run` for containers in user-defined network [#19229](https://github.com/docker/docker/pull/19229) * Enhance `docker network rm` to allow removing multiple networks [#17489](https://github.com/docker/docker/pull/17489) * Include container names in `network inspect` [#17615](https://github.com/docker/docker/pull/17615) * Include auto-generated subnets for user-defined networks in `network inspect` [#17316](https://github.com/docker/docker/pull/17316) * Add `--filter` flag to `network ls` to hide predefined networks [#17782](https://github.com/docker/docker/pull/17782) * Add support for network connect/disconnect to stopped containers [#18906](https://github.com/docker/docker/pull/18906) * Add network ID to container inspect [#19323](https://github.com/docker/docker/pull/19323) - Fix MTU issue where Docker would not start with two or more default routes [#18108](https://github.com/docker/docker/pull/18108) - Fix duplicate IP address for containers [#18106](https://github.com/docker/docker/pull/18106) - Fix issue preventing sometimes docker from creating the bridge network [#19338](https://github.com/docker/docker/pull/19338) - Do not substitute 127.0.0.1 name server when using `--net=host` [#19573](https://github.com/docker/docker/pull/19573) ### Logging + New logging driver for Splunk [#16488](https://github.com/docker/docker/pull/16488) + Add support for syslog over TCP+TLS [#18998](https://github.com/docker/docker/pull/18998) * Enhance `docker logs --since` and `--until` to support nanoseconds and time [#17495](https://github.com/docker/docker/pull/17495) * Enhance AWS logs to auto-detect region [#16640](https://github.com/docker/docker/pull/16640) ### Volumes + Add support to set the mount propagation mode for a volume [#17034](https://github.com/docker/docker/pull/17034) * Add `ls` and `inspect` endpoints to volume plugin API [#16534](https://github.com/docker/docker/pull/16534) Existing plugins need to make use of these new APIs to satisfy users' expectation For that, please use the new MIME type `application/vnd.docker.plugins.v1.2+json` [#19549](https://github.com/docker/docker/pull/19549) - Fix data not being copied to named volumes [#19175](https://github.com/docker/docker/pull/19175) - Fix issues preventing volume drivers from being containerized [#19500](https://github.com/docker/docker/pull/19500) - Fix `docker volumes ls --dangling=false` to now show all non-dangling volumes [#19671](https://github.com/docker/docker/pull/19671) - Do not remove named volumes on container removal [#19568](https://github.com/docker/docker/pull/19568) - Allow external volume drivers to host anonymous volumes [#19190](https://github.com/docker/docker/pull/19190) ### Builder + Add support for `**` in `.dockerignore` to wildcard multiple levels of directories [#17090](https://github.com/docker/docker/pull/17090) - Fix handling of UTF-8 characters in Dockerfiles [#17055](https://github.com/docker/docker/pull/17055) - Fix permissions problem when reading from STDIN [#19283](https://github.com/docker/docker/pull/19283) ### Client + Add support for overriding the API version to use via an `DOCKER_API_VERSION` environment-variable [#15964](https://github.com/docker/docker/pull/15964) - Fix a bug preventing Windows clients to log in to Docker Hub [#19891](https://github.com/docker/docker/pull/19891) ### Misc * systemd: Set TasksMax in addition to LimitNPROC in systemd service file [#19391](https://github.com/docker/docker/pull/19391) ### Deprecations * Remove LXC support. The LXC driver was deprecated in Docker 1.8, and has now been removed [#17700](https://github.com/docker/docker/pull/17700) * Remove `--exec-driver` daemon flag, because it is no longer in use [#17700](https://github.com/docker/docker/pull/17700) * Remove old deprecated single-dashed long CLI flags (such as `-rm`; use `--rm` instead) [#17724](https://github.com/docker/docker/pull/17724) * Deprecate HostConfig at API container start [#17799](https://github.com/docker/docker/pull/17799) * Deprecate docker packages for newly EOL'd Linux distributions: Fedora 21 and Ubuntu 15.04 (Vivid) [#18794](https://github.com/docker/docker/pull/18794), [#18809](https://github.com/docker/docker/pull/18809) * Deprecate `-f` flag for docker tag [#18350](https://github.com/docker/docker/pull/18350) ## 1.9.1 (2015-11-21) ### Runtime - Do not prevent daemon from booting if images could not be restored (#17695) - Force IPC mount to unmount on daemon shutdown/init (#17539) - Turn IPC unmount errors into warnings (#17554) - Fix `docker stats` performance regression (#17638) - Clarify cryptic error message upon `docker logs` if `--log-driver=none` (#17767) - Fix seldom panics (#17639, #17634, #17703) - Fix opq whiteouts problems for files with dot prefix (#17819) - devicemapper: try defaulting to xfs instead of ext4 for performance reasons (#17903, #17918) - devicemapper: fix displayed fs in docker info (#17974) - selinux: only relabel if user requested so with the `z` option (#17450, #17834) - Do not make network calls when normalizing names (#18014) ### Client - Fix `docker login` on windows (#17738) - Fix bug with `docker inspect` output when not connected to daemon (#17715) - Fix `docker inspect -f {{.HostConfig.Dns}} somecontainer` (#17680) ### Builder - Fix regression with symlink behavior in ADD/COPY (#17710) ### Networking - Allow passing a network ID as an argument for `--net` (#17558) - Fix connect to host and prevent disconnect from host for `host` network (#17476) - Fix `--fixed-cidr` issue when gateway ip falls in ip-range and ip-range is not the first block in the network (#17853) - Restore deterministic `IPv6` generation from `MAC` address on default `bridge` network (#17890) - Allow port-mapping only for endpoints created on docker run (#17858) - Fixed an endpoint delete issue with a possible stale sbox (#18102) ### Distribution - Correct parent chain in v2 push when v1Compatibility files on the disk are inconsistent (#18047) ## 1.9.0 (2015-11-03) ### Runtime + `docker stats` now returns block IO metrics (#15005) + `docker stats` now details network stats per interface (#15786) + Add `ancestor=` filter to `docker ps --filter` flag to filter containers based on their ancestor images (#14570) + Add `label=` filter to `docker ps --filter` to filter containers based on label (#16530) + Add `--kernel-memory` flag to `docker run` (#14006) + Add `--message` flag to `docker import` allowing to specify an optional message (#15711) + Add `--privileged` flag to `docker exec` (#14113) + Add `--stop-signal` flag to `docker run` allowing to replace the container process stopping signal (#15307) + Add a new `unless-stopped` restart policy (#15348) + Inspecting an image now returns tags (#13185) + Add container size information to `docker inspect` (#15796) + Add `RepoTags` and `RepoDigests` field to `/images/{name:.*}/json` (#17275) - Remove the deprecated `/container/ps` endpoint from the API (#15972) - Send and document correct HTTP codes for `/exec//start` (#16250) - Share shm and mqueue between containers sharing IPC namespace (#15862) - Event stream now shows OOM status when `--oom-kill-disable` is set (#16235) - Ensure special network files (/etc/hosts etc.) are read-only if bind-mounted with `ro` option (#14965) - Improve `rmi` performance (#16890) - Do not update /etc/hosts for the default bridge network, except for links (#17325) - Fix conflict with duplicate container names (#17389) - Fix an issue with incorrect template execution in `docker inspect` (#17284) - DEPRECATE `-c` short flag variant for `--cpu-shares` in docker run (#16271) ### Client + Allow `docker import` to import from local files (#11907) ### Builder + Add a `STOPSIGNAL` Dockerfile instruction allowing to set a different stop-signal for the container process (#15307) + Add an `ARG` Dockerfile instruction and a `--build-arg` flag to `docker build` that allows to add build-time environment variables (#15182) - Improve cache miss performance (#16890) ### Storage - devicemapper: Implement deferred deletion capability (#16381) ## Networking + `docker network` exits experimental and is part of standard release (#16645) + New network top-level concept, with associated subcommands and API (#16645) WARNING: the API is different from the experimental API + Support for multiple isolated/micro-segmented networks (#16645) + Built-in multihost networking using VXLAN based overlay driver (#14071) + Support for third-party network plugins (#13424) + Ability to dynamically connect containers to multiple networks (#16645) + Support for user-defined IP address management via pluggable IPAM drivers (#16910) + Add daemon flags `--cluster-store` and `--cluster-advertise` for built-in nodes discovery (#16229) + Add `--cluster-store-opt` for setting up TLS settings (#16644) + Add `--dns-opt` to the daemon (#16031) - DEPRECATE following container `NetworkSettings` fields in API v1.21: `EndpointID`, `Gateway`, `GlobalIPv6Address`, `GlobalIPv6PrefixLen`, `IPAddress`, `IPPrefixLen`, `IPv6Gateway` and `MacAddress`. Those are now specific to the `bridge` network. Use `NetworkSettings.Networks` to inspect the networking settings of a container per network. ### Volumes + New top-level `volume` subcommand and API (#14242) - Move API volume driver settings to host-specific config (#15798) - Print an error message if volume name is not unique (#16009) - Ensure volumes created from Dockerfiles always use the local volume driver (#15507) - DEPRECATE auto-creating missing host paths for bind mounts (#16349) ### Logging + Add `awslogs` logging driver for Amazon CloudWatch (#15495) + Add generic `tag` log option to allow customizing container/image information passed to driver (e.g. show container names) (#15384) - Implement the `docker logs` endpoint for the journald driver (#13707) - DEPRECATE driver-specific log tags (e.g. `syslog-tag`, etc.) (#15384) ### Distribution + `docker search` now works with partial names (#16509) - Push optimization: avoid buffering to file (#15493) - The daemon will display progress for images that were already being pulled by another client (#15489) - Only permissions required for the current action being performed are requested (#) + Renaming trust keys (and respective environment variables) from `offline` to `root` and `tagging` to `repository` (#16894) - DEPRECATE trust key environment variables `DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE` and `DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE` (#16894) ### Security + Add SELinux profiles to the rpm package (#15832) - Fix various issues with AppArmor profiles provided in the deb package (#14609) - Add AppArmor policy that prevents writing to /proc (#15571) ## 1.8.3 (2015-10-12) ### Distribution - Fix layer IDs lead to local graph poisoning (CVE-2014-8178) - Fix manifest validation and parsing logic errors allow pull-by-digest validation bypass (CVE-2014-8179) + Add `--disable-legacy-registry` to prevent a daemon from using a v1 registry ## 1.8.2 (2015-09-10) ### Distribution - Fixes rare edge case of handling GNU LongLink and LongName entries. - Fix ^C on docker pull. - Fix docker pull issues on client disconnection. - Fix issue that caused the daemon to panic when loggers weren't configured properly. - Fix goroutine leak pulling images from registry V2. ### Runtime - Fix a bug mounting cgroups for docker daemons running inside docker containers. - Initialize log configuration properly. ### Client: - Handle `-q` flag in `docker ps` properly when there is a default format. ### Networking - Fix several corner cases with netlink. ### Contrib - Fix several issues with bash completion. ## 1.8.1 (2015-08-12) ### Distribution * Fix a bug where pushing multiple tags would result in invalid images ## 1.8.0 (2015-08-11) ### Distribution + Trusted pull, push and build, disabled by default * Make tar layers deterministic between registries * Don't allow deleting the image of running containers * Check if a tag name to load is a valid digest * Allow one character repository names * Add a more accurate error description for invalid tag name * Make build cache ignore mtime ### Cli + Add support for DOCKER_CONFIG/--config to specify config file dir + Add --type flag for docker inspect command + Add formatting options to `docker ps` with `--format` + Replace `docker -d` with new subcommand `docker daemon` * Zsh completion updates and improvements * Add some missing events to bash completion * Support daemon urls with base paths in `docker -H` * Validate status= filter to docker ps * Display when a container is in --net=host in docker ps * Extend docker inspect to export image metadata related to graph driver * Restore --default-gateway{,-v6} daemon options * Add missing unpublished ports in docker ps * Allow duration strings in `docker events` as --since/--until * Expose more mounts information in `docker inspect` ### Runtime + Add new Fluentd logging driver + Allow `docker import` to load from local files + Add logging driver for GELF via UDP + Allow to copy files from host to containers with `docker cp` + Promote volume drivers from experimental to master + Add rollover options to json-file log driver, and --log-driver-opts flag + Add memory swappiness tuning options * Remove cgroup read-only flag when privileged * Make /proc, /sys, & /dev readonly for readonly containers * Add cgroup bind mount by default * Overlay: Export metadata for container and image in `docker inspect` * Devicemapper: external device activation * Devicemapper: Compare uuid of base device on startup * Remove RC4 from the list of registry cipher suites * Add syslog-facility option * LXC execdriver compatibility with recent LXC versions * Mark LXC execriver as deprecated (to be removed with the migration to runc) ### Plugins * Separate plugin sockets and specs locations * Allow TLS connections to plugins ### Bug fixes - Add missing 'Names' field to /containers/json API output - Make `docker rmi` of dangling images safe while pulling - Devicemapper: Change default basesize to 100G - Go Scheduler issue with sync.Mutex and gcc - Fix issue where Search API endpoint would panic due to empty AuthConfig - Set image canonical names correctly - Check dockerinit only if lxc driver is used - Fix ulimit usage of nproc - Always attach STDIN if -i,--interactive is specified - Show error messages when saving container state fails - Fixed incorrect assumption on --bridge=none treated as disable network - Check for invalid port specifications in host configuration - Fix endpoint leave failure for --net=host mode - Fix goroutine leak in the stats API if the container is not running - Check for apparmor file before reading it - Fix DOCKER_TLS_VERIFY being ignored - Set umask to the default on startup - Correct the message of pause and unpause a non-running container - Adjust disallowed CpuShares in container creation - ZFS: correctly apply selinux context - Display empty string instead of when IP opt is nil - `docker kill` returns error when container is not running - Fix COPY/ADD quoted/json form - Fix goroutine leak on logs -f with no output - Remove panic in nat package on invalid hostport - Fix container linking in Fedora 22 - Fix error caused using default gateways outside of the allocated range - Format times in inspect command with a template as RFC3339Nano - Make registry client to accept 2xx and 3xx http status responses as successful - Fix race issue that caused the daemon to crash with certain layer downloads failed in a specific order. - Fix error when the docker ps format was not valid. - Remove redundant ip forward check. - Fix issue trying to push images to repository mirrors. - Fix error cleaning up network entrypoints when there is an initialization issue. ## 1.7.1 (2015-07-14) #### Runtime - Fix default user spawning exec process with `docker exec` - Make `--bridge=none` not to configure the network bridge - Publish networking stats properly - Fix implicit devicemapper selection with static binaries - Fix socket connections that hung intermittently - Fix bridge interface creation on CentOS/RHEL 6.6 - Fix local dns lookups added to resolv.conf - Fix copy command mounting volumes - Fix read/write privileges in volumes mounted with --volumes-from #### Remote API - Fix unmarshalling of Command and Entrypoint - Set limit for minimum client version supported - Validate port specification - Return proper errors when attach/reattach fail #### Distribution - Fix pulling private images - Fix fallback between registry V2 and V1 ## 1.7.0 (2015-06-16) #### Runtime + Experimental feature: support for out-of-process volume plugins * The userland proxy can be disabled in favor of hairpin NAT using the daemon’s `--userland-proxy=false` flag * The `exec` command supports the `-u|--user` flag to specify the new process owner + Default gateway for containers can be specified daemon-wide using the `--default-gateway` and `--default-gateway-v6` flags + The CPU CFS (Completely Fair Scheduler) quota can be set in `docker run` using `--cpu-quota` + Container block IO can be controlled in `docker run` using`--blkio-weight` + ZFS support + The `docker logs` command supports a `--since` argument + UTS namespace can be shared with the host with `docker run --uts=host` #### Quality * Networking stack was entirely rewritten as part of the libnetwork effort * Engine internals refactoring * Volumes code was entirely rewritten to support the plugins effort + Sending SIGUSR1 to a daemon will dump all goroutines stacks without exiting #### Build + Support ${variable:-value} and ${variable:+value} syntax for environment variables + Support resource management flags `--cgroup-parent`, `--cpu-period`, `--cpu-quota`, `--cpuset-cpus`, `--cpuset-mems` + git context changes with branches and directories * The .dockerignore file support exclusion rules #### Distribution + Client support for v2 mirroring support for the official registry #### Bugfixes * Firewalld is now supported and will automatically be used when available * mounting --device recursively ## 1.6.2 (2015-05-13) #### Runtime - Revert change prohibiting mounting into /sys ## 1.6.1 (2015-05-07) #### Security - Fix read/write /proc paths (CVE-2015-3630) - Prohibit VOLUME /proc and VOLUME / (CVE-2015-3631) - Fix opening of file-descriptor 1 (CVE-2015-3627) - Fix symlink traversal on container respawn allowing local privilege escalation (CVE-2015-3629) - Prohibit mount of /sys #### Runtime - Update AppArmor policy to not allow mounts ## 1.6.0 (2015-04-07) #### Builder + Building images from an image ID + Build containers with resource constraints, ie `docker build --cpu-shares=100 --memory=1024m...` + `commit --change` to apply specified Dockerfile instructions while committing the image + `import --change` to apply specified Dockerfile instructions while importing the image + Builds no longer continue in the background when canceled with CTRL-C #### Client + Windows Support #### Runtime + Container and image Labels + `--cgroup-parent` for specifying a parent cgroup to place container cgroup within + Logging drivers, `json-file`, `syslog`, or `none` + Pulling images by ID + `--ulimit` to set the ulimit on a container + `--default-ulimit` option on the daemon which applies to all created containers (and overwritten by `--ulimit` on run) ## 1.5.0 (2015-02-10) #### Builder + Dockerfile to use for a given `docker build` can be specified with the `-f` flag * Dockerfile and .dockerignore files can be themselves excluded as part of the .dockerignore file, thus preventing modifications to these files invalidating ADD or COPY instructions cache * ADD and COPY instructions accept relative paths * Dockerfile `FROM scratch` instruction is now interpreted as a no-base specifier * Improve performance when exposing a large number of ports #### Hack + Allow client-side only integration tests for Windows * Include docker-py integration tests against Docker daemon as part of our test suites #### Packaging + Support for the new version of the registry HTTP API * Speed up `docker push` for images with a majority of already existing layers - Fixed contacting a private registry through a proxy #### Remote API + A new endpoint will stream live container resource metrics and can be accessed with the `docker stats` command + Containers can be renamed using the new `rename` endpoint and the associated `docker rename` command * Container `inspect` endpoint show the ID of `exec` commands running in this container * Container `inspect` endpoint show the number of times Docker auto-restarted the container * New types of event can be streamed by the `events` endpoint: ‘OOM’ (container died with out of memory), ‘exec_create’, and ‘exec_start' - Fixed returned string fields which hold numeric characters incorrectly omitting surrounding double quotes #### Runtime + Docker daemon has full IPv6 support + The `docker run` command can take the `--pid=host` flag to use the host PID namespace, which makes it possible for example to debug host processes using containerized debugging tools + The `docker run` command can take the `--read-only` flag to make the container’s root filesystem mounted as readonly, which can be used in combination with volumes to force a container’s processes to only write to locations that will be persisted + Container total memory usage can be limited for `docker run` using the `--memory-swap` flag * Major stability improvements for devicemapper storage driver * Better integration with host system: containers will reflect changes to the host's `/etc/resolv.conf` file when restarted * Better integration with host system: per-container iptable rules are moved to the DOCKER chain - Fixed container exiting on out of memory to return an invalid exit code #### Other * The HTTP_PROXY, HTTPS_PROXY, and NO_PROXY environment variables are properly taken into account by the client when connecting to the Docker daemon ## 1.4.1 (2014-12-15) #### Runtime - Fix issue with volumes-from and bind mounts not being honored after create ## 1.4.0 (2014-12-11) #### Notable Features since 1.3.0 + Set key=value labels to the daemon (displayed in `docker info`), applied with new `-label` daemon flag + Add support for `ENV` in Dockerfile of the form: `ENV name=value name2=value2...` + New Overlayfs Storage Driver + `docker info` now returns an `ID` and `Name` field + Filter events by event name, container, or image + `docker cp` now supports copying from container volumes - Fixed `docker tag`, so it honors `--force` when overriding a tag for existing image. ## 1.3.3 (2014-12-11) #### Security - Fix path traversal vulnerability in processing of absolute symbolic links (CVE-2014-9356) - Fix decompression of xz image archives, preventing privilege escalation (CVE-2014-9357) - Validate image IDs (CVE-2014-9358) #### Runtime - Fix an issue when image archives are being read slowly #### Client - Fix a regression related to stdin redirection - Fix a regression with `docker cp` when destination is the current directory ## 1.3.2 (2014-11-20) #### Security - Fix tar breakout vulnerability * Extractions are now sandboxed chroot - Security options are no longer committed to images #### Runtime - Fix deadlock in `docker ps -f exited=1` - Fix a bug when `--volumes-from` references a container that failed to start #### Registry + `--insecure-registry` now accepts CIDR notation such as 10.1.0.0/16 * Private registries whose IPs fall in the 127.0.0.0/8 range do no need the `--insecure-registry` flag - Skip the experimental registry v2 API when mirroring is enabled ## 1.3.1 (2014-10-28) #### Security * Prevent fallback to SSL protocols < TLS 1.0 for client, daemon and registry + Secure HTTPS connection to registries with certificate verification and without HTTP fallback unless `--insecure-registry` is specified #### Runtime - Fix issue where volumes would not be shared #### Client - Fix issue with `--iptables=false` not automatically setting `--ip-masq=false` - Fix docker run output to non-TTY stdout #### Builder - Fix escaping `$` for environment variables - Fix issue with lowercase `onbuild` Dockerfile instruction - Restrict environment variable expansion to `ENV`, `ADD`, `COPY`, `WORKDIR`, `EXPOSE`, `VOLUME` and `USER` ## 1.3.0 (2014-10-14) #### Notable features since 1.2.0 + Docker `exec` allows you to run additional processes inside existing containers + Docker `create` gives you the ability to create a container via the CLI without executing a process + `--security-opts` options to allow user to customize container labels and apparmor profiles + Docker `ps` filters - Wildcard support to COPY/ADD + Move production URLs to get.docker.com from get.docker.io + Allocate IP address on the bridge inside a valid CIDR + Use drone.io for PR and CI testing + Ability to setup an official registry mirror + Ability to save multiple images with docker `save` ## 1.2.0 (2014-08-20) #### Runtime + Make /etc/hosts /etc/resolv.conf and /etc/hostname editable at runtime + Auto-restart containers using policies + Use /var/lib/docker/tmp for large temporary files + `--cap-add` and `--cap-drop` to tweak what linux capability you want + `--device` to use devices in containers #### Client + `docker search` on private registries + Add `exited` filter to `docker ps --filter` * `docker rm -f` now kills instead of stop + Support for IPv6 addresses in `--dns` flag #### Proxy + Proxy instances in separate processes * Small bug fix on UDP proxy ## 1.1.2 (2014-07-23) #### Runtime + Fix port allocation for existing containers + Fix containers restart on daemon restart #### Packaging + Fix /etc/init.d/docker issue on Debian ## 1.1.1 (2014-07-09) #### Builder * Fix issue with ADD ## 1.1.0 (2014-07-03) #### Notable features since 1.0.1 + Add `.dockerignore` support + Pause containers during `docker commit` + Add `--tail` to `docker logs` #### Builder + Allow a tar file as context for `docker build` * Fix issue with white-spaces and multi-lines in `Dockerfiles` #### Runtime * Overall performance improvements * Allow `/` as source of `docker run -v` * Fix port allocation * Fix bug in `docker save` * Add links information to `docker inspect` #### Client * Improve command line parsing for `docker commit` #### Remote API * Improve status code for the `start` and `stop` endpoints ## 1.0.1 (2014-06-19) #### Notable features since 1.0.0 * Enhance security for the LXC driver #### Builder * Fix `ONBUILD` instruction passed to grandchildren #### Runtime * Fix events subscription * Fix /etc/hostname file with host networking * Allow `-h` and `--net=none` * Fix issue with hotplug devices in `--privileged` #### Client * Fix artifacts with events * Fix a panic with empty flags * Fix `docker cp` on Mac OS X #### Miscellaneous * Fix compilation on Mac OS X * Fix several races ## 1.0.0 (2014-06-09) #### Notable features since 0.12.0 * Production support ## 0.12.0 (2014-06-05) #### Notable features since 0.11.0 * 40+ various improvements to stability, performance and usability * New `COPY` Dockerfile instruction to allow copying a local file from the context into the container without ever extracting if the file is a tar file * Inherit file permissions from the host on `ADD` * New `pause` and `unpause` commands to allow pausing and unpausing of containers using cgroup freezer * The `images` command has a `-f`/`--filter` option to filter the list of images * Add `--force-rm` to clean up after a failed build * Standardize JSON keys in Remote API to CamelCase * Pull from a docker run now assumes `latest` tag if not specified * Enhance security on Linux capabilities and device nodes ## 0.11.1 (2014-05-07) #### Registry - Fix push and pull to private registry ## 0.11.0 (2014-05-07) #### Notable features since 0.10.0 * SELinux support for mount and process labels * Linked containers can be accessed by hostname * Use the net `--net` flag to allow advanced network configuration such as host networking so that containers can use the host's network interfaces * Add a ping endpoint to the Remote API to do healthchecks of your docker daemon * Logs can now be returned with an optional timestamp * Docker now works with registries that support SHA-512 * Multiple registry endpoints are supported to allow registry mirrors ## 0.10.0 (2014-04-08) #### Builder - Fix printing multiple messages on a single line. Fixes broken output during builds. - Follow symlinks inside container's root for ADD build instructions. - Fix EXPOSE caching. #### Documentation - Add the new options of `docker ps` to the documentation. - Add the options of `docker restart` to the documentation. - Update daemon docs and help messages for --iptables and --ip-forward. - Updated apt-cacher-ng docs example. - Remove duplicate description of --mtu from docs. - Add missing -t and -v for `docker images` to the docs. - Add fixes to the cli docs. - Update libcontainer docs. - Update images in docs to remove references to AUFS and LXC. - Update the nodejs_web_app in the docs to use the new epel RPM address. - Fix external link on security of containers. - Update remote API docs. - Add image size to history docs. - Be explicit about binding to all interfaces in redis example. - Document DisableNetwork flag in the 1.10 remote api. - Document that `--lxc-conf` is lxc only. - Add chef usage documentation. - Add example for an image with multiple for `docker load`. - Explain what `docker run -a` does in the docs. #### Contrib - Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. - Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. - Remove inotifywait hack from the upstart host-integration example because it's not necessary any more. - Add check-config script to contrib. - Fix fish shell completion. #### Hack * Clean up "go test" output from "make test" to be much more readable/scannable. * Exclude more "definitely not unit tested Go source code" directories from hack/make/test. + Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. - Include contributed completions in Ubuntu PPA. + Add cli integration tests. * Add tweaks to the hack scripts to make them simpler. #### Remote API + Add TLS auth support for API. * Move git clone from daemon to client. - Fix content-type detection in docker cp. * Split API into 2 go packages. #### Runtime * Support hairpin NAT without going through Docker server. - devicemapper: succeed immediately when removing non-existing devices. - devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time and unlock while sleeping). - devicemapper: increase timeout in waitClose to 10 seconds. - devicemapper: ensure we shut down thin pool cleanly. - devicemapper: pass info, rather than hash to activateDeviceIfNeeded, deactivateDevice, setInitialized, deleteDevice. - devicemapper: avoid AB-BA deadlock. - devicemapper: make shutdown better/faster. - improve alpha sorting in mflag. - Remove manual http cookie management because the cookiejar is being used. - Use BSD raw mode on Darwin. Fixes nano, tmux and others. - Add FreeBSD support for the client. - Merge auth package into registry. - Add deprecation warning for -t on `docker pull`. - Remove goroutine leak on error. - Update parseLxcInfo to comply with new lxc1.0 format. - Fix attach exit on darwin. - Improve deprecation message. - Retry to retrieve the layer metadata up to 5 times for `docker pull`. - Only unshare the mount namespace for execin. - Merge existing config when committing. - Disable daemon startup timeout. - Fix issue #4681: add loopback interface when networking is disabled. - Add failing test case for issue #4681. - Send SIGTERM to child, instead of SIGKILL. - Show the driver and the kernel version in `docker info` even when not in debug mode. - Always symlink /dev/ptmx for libcontainer. This fixes console related problems. - Fix issue caused by the absence of /etc/apparmor.d. - Don't leave empty cidFile behind when failing to create the container. - Mount cgroups automatically if they're not mounted already. - Use mock for search tests. - Update to double-dash everywhere. - Move .dockerenv parsing to lxc driver. - Move all bind-mounts in the container inside the namespace. - Don't use separate bind mount for container. - Always symlink /dev/ptmx for libcontainer. - Don't kill by pid for other drivers. - Add initial logging to libcontainer. * Sort by port in `docker ps`. - Move networking drivers into runtime top level package. + Add --no-prune to `docker rmi`. + Add time since exit in `docker ps`. - graphdriver: add build tags. - Prevent allocation of previously allocated ports & prevent improve port allocation. * Add support for --since/--before in `docker ps`. - Clean up container stop. + Add support for configurable dns search domains. - Add support for relative WORKDIR instructions. - Add --output flag for docker save. - Remove duplication of DNS entries in config merging. - Add cpuset.cpus to cgroups and native driver options. - Remove docker-ci. - Promote btrfs. btrfs is no longer considered experimental. - Add --input flag to `docker load`. - Return error when existing bridge doesn't match IP address. - Strip comments before parsing line continuations to avoid interpreting instructions as comments. - Fix TestOnlyLoopbackExistsWhenUsingDisableNetworkOption to ignore "DOWN" interfaces. - Add systemd implementation of cgroups and make containers show up as systemd units. - Fix commit and import when no repository is specified. - Remount /var/lib/docker as --private to fix scaling issue. - Use the environment's proxy when pinging the remote registry. - Reduce error level from harmless errors. * Allow --volumes-from to be individual files. - Fix expanding buffer in StdCopy. - Set error regardless of attach or stdin. This fixes #3364. - Add support for --env-file to load environment variables from files. - Symlink /etc/mtab and /proc/mounts. - Allow pushing a single tag. - Shut down containers cleanly at shutdown and wait forever for the containers to shut down. This makes container shutdown on daemon shutdown work properly via SIGTERM. - Don't throw error when starting an already running container. - Fix dynamic port allocation limit. - remove setupDev from libcontainer. - Add API version to `docker version`. - Return correct exit code when receiving signal and make SIGQUIT quit without cleanup. - Fix --volumes-from mount failure. - Allow non-privileged containers to create device nodes. - Skip login tests because of external dependency on a hosted service. - Deprecate `docker images --tree` and `docker images --viz`. - Deprecate `docker insert`. - Include base abstraction for apparmor. This fixes some apparmor related problems on Ubuntu 14.04. - Add specific error message when hitting 401 over HTTP on push. - Fix absolute volume check. - Remove volumes-from from the config. - Move DNS options to hostconfig. - Update the apparmor profile for libcontainer. - Add deprecation notice for `docker commit -run`. ## 0.9.1 (2014-03-24) #### Builder - Fix printing multiple messages on a single line. Fixes broken output during builds. #### Documentation - Fix external link on security of containers. #### Contrib - Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. - Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. #### Hack - Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. #### Remote API - Fix content-type detection in `docker cp`. #### Runtime - Use BSD raw mode on Darwin. Fixes nano, tmux and others. - Only unshare the mount namespace for execin. - Retry to retrieve the layer metadata up to 5 times for `docker pull`. - Merge existing config when committing. - Fix panic in monitor. - Disable daemon startup timeout. - Fix issue #4681: add loopback interface when networking is disabled. - Add failing test case for issue #4681. - Send SIGTERM to child, instead of SIGKILL. - Show the driver and the kernel version in `docker info` even when not in debug mode. - Always symlink /dev/ptmx for libcontainer. This fixes console related problems. - Fix issue caused by the absence of /etc/apparmor.d. - Don't leave empty cidFile behind when failing to create the container. - Improve deprecation message. - Fix attach exit on darwin. - devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time, unlock while sleeping). - devicemapper: succeed immediately when removing non-existing devices. - devicemapper: increase timeout in waitClose to 10 seconds. - Remove goroutine leak on error. - Update parseLxcInfo to comply with new lxc1.0 format. ## 0.9.0 (2014-03-10) #### Builder - Avoid extra mount/unmount during build. This fixes mount/unmount related errors during build. - Add error to docker build --rm. This adds missing error handling. - Forbid chained onbuild, `onbuild from` and `onbuild maintainer` triggers. - Make `--rm` the default for `docker build`. #### Documentation - Download the docker client binary for Mac over https. - Update the titles of the install instructions & descriptions. * Add instructions for upgrading boot2docker. * Add port forwarding example in OS X install docs. - Attempt to disentangle repository and registry. - Update docs to explain more about `docker ps`. - Update sshd example to use a Dockerfile. - Rework some examples, including the Python examples. - Update docs to include instructions for a container's lifecycle. - Update docs documentation to discuss the docs branch. - Don't skip cert check for an example & use HTTPS. - Bring back the memory and swap accounting section which was lost when the kernel page was removed. - Explain DNS warnings and how to fix them on systems running and using a local nameserver. #### Contrib - Add Tanglu support for mkimage-debootstrap. - Add SteamOS support for mkimage-debootstrap. #### Hack - Get package coverage when running integration tests. - Remove the Vagrantfile. This is being replaced with boot2docker. - Fix tests on systems where aufs isn't available. - Update packaging instructions and remove the dependency on lxc. #### Remote API * Move code specific to the API to the api package. - Fix header content type for the API. Makes all endpoints use proper content type. - Fix registry auth & remove ping calls from CmdPush and CmdPull. - Add newlines to the JSON stream functions. #### Runtime * Do not ping the registry from the CLI. All requests to registries flow through the daemon. - Check for nil information return in the lxc driver. This fixes panics with older lxc versions. - Devicemapper: cleanups and fix for unmount. Fixes two problems which were causing unmount to fail intermittently. - Devicemapper: remove directory when removing device. Directories don't get left behind when removing the device. * Devicemapper: enable skip_block_zeroing. Improves performance by not zeroing blocks. - Devicemapper: fix shutdown warnings. Fixes shutdown warnings concerning pool device removal. - Ensure docker cp stream is closed properly. Fixes problems with files not being copied by `docker cp`. - Stop making `tcp://` default to `127.0.0.1:4243` and remove the default port for tcp. - Fix `--run` in `docker commit`. This makes `docker commit --run` work again. - Fix custom bridge related options. This makes custom bridges work again. + Mount-bind the PTY as container console. This allows tmux/screen to run. + Add the pure Go libcontainer library to make it possible to run containers using only features of the Linux kernel. + Add native exec driver which uses libcontainer and make it the default exec driver. - Add support for handling extended attributes in archives. * Set the container MTU to be the same as the host MTU. + Add simple sha256 checksums for layers to speed up `docker push`. * Improve kernel version parsing. * Allow flag grouping (`docker run -it`). - Remove chroot exec driver. - Fix divide by zero to fix panic. - Rewrite `docker rmi`. - Fix docker info with lxc 1.0.0. - Fix fedora tty with apparmor. * Don't always append env vars, replace defaults with vars from config. * Fix a goroutine leak. * Switch to Go 1.2.1. - Fix unique constraint error checks. * Handle symlinks for Docker's data directory and for TMPDIR. - Add deprecation warnings for flags (-flag is deprecated in favor of --flag) - Add apparmor profile for the native execution driver. * Move system specific code from archive to pkg/system. - Fix duplicate signal for `docker run -i -t` (issue #3336). - Return correct process pid for lxc. - Add a -G option to specify the group which unix sockets belong to. + Add `-f` flag to `docker rm` to force removal of running containers. + Kill ghost containers and restart all ghost containers when the docker daemon restarts. + Add `DOCKER_RAMDISK` environment variable to make Docker work when the root is on a ramdisk. ## 0.8.1 (2014-02-18) #### Builder - Avoid extra mount/unmount during build. This removes an unneeded mount/unmount operation which was causing problems with devicemapper - Fix regression with ADD of tar files. This stops Docker from decompressing tarballs added via ADD from the local file system - Add error to `docker build --rm`. This adds a missing error check to ensure failures to remove containers are detected and reported #### Documentation * Update issue filing instructions * Warn against the use of symlinks for Docker's storage folder * Replace the Firefox example with an IceWeasel example * Rewrite the PostgresSQL example using a Dockerfile and add more details to it * Improve the OS X documentation #### Remote API - Fix broken images API for version less than 1.7 - Use the right encoding for all API endpoints which return JSON - Move remote api client to api/ - Queue calls to the API using generic socket wait #### Runtime - Fix the use of custom settings for bridges and custom bridges - Refactor the devicemapper code to avoid many mount/unmount race conditions and failures - Remove two panics which could make Docker crash in some situations - Don't ping registry from the CLI client - Enable skip_block_zeroing for devicemapper. This stops devicemapper from always zeroing entire blocks - Fix --run in `docker commit`. This makes docker commit store `--run` in the image configuration - Remove directory when removing devicemapper device. This cleans up leftover mount directories - Drop NET_ADMIN capability for non-privileged containers. Unprivileged containers can't change their network configuration - Ensure `docker cp` stream is closed properly - Avoid extra mount/unmount during container registration. This removes an unneeded mount/unmount operation which was causing problems with devicemapper - Stop allowing tcp:// as a default tcp bin address which binds to 127.0.0.1:4243 and remove the default port + Mount-bind the PTY as container console. This allows tmux and screen to run in a container - Clean up archive closing. This fixes and improves archive handling - Fix engine tests on systems where temp directories are symlinked - Add test methods for save and load - Avoid temporarily unmounting the container when restarting it. This fixes a race for devicemapper during restart - Support submodules when building from a GitHub repository - Quote volume path to allow spaces - Fix remote tar ADD behavior. This fixes a regression which was causing Docker to extract tarballs ## 0.8.0 (2014-02-04) #### Notable features since 0.7.0 * Images and containers can be removed much faster * Building an image from source with docker build is now much faster * The Docker daemon starts and stops much faster * The memory footprint of many common operations has been reduced, by streaming files instead of buffering them in memory, fixing memory leaks, and fixing various suboptimal memory allocations * Several race conditions were fixed, making Docker more stable under very high concurrency load. This makes Docker more stable and less likely to crash and reduces the memory footprint of many common operations * All packaging operations are now built on the Go language’s standard tar implementation, which is bundled with Docker itself. This makes packaging more portable across host distributions, and solves several issues caused by quirks and incompatibilities between different distributions of tar * Docker can now create, remove and modify larger numbers of containers and images graciously thanks to more aggressive releasing of system resources. For example the storage driver API now allows Docker to do reference counting on mounts created by the drivers With the ongoing changes to the networking and execution subsystems of docker testing these areas have been a focus of the refactoring. By moving these subsystems into separate packages we can test, analyze, and monitor coverage and quality of these packages * Many components have been separated into smaller sub-packages, each with a dedicated test suite. As a result the code is better-tested, more readable and easier to change * The ADD instruction now supports caching, which avoids unnecessarily re-uploading the same source content again and again when it hasn’t changed * The new ONBUILD instruction adds to your image a “trigger” instruction to be executed at a later time, when the image is used as the base for another build * Docker now ships with an experimental storage driver which uses the BTRFS filesystem for copy-on-write * Docker is officially supported on Mac OS X * The Docker daemon supports systemd socket activation ## 0.7.6 (2014-01-14) #### Builder * Do not follow symlink outside of build context #### Runtime - Remount bind mounts when ro is specified * Use https for fetching docker version #### Other * Inline the test.docker.io fingerprint * Add ca-certificates to packaging documentation ## 0.7.5 (2014-01-09) #### Builder * Disable compression for build. More space usage but a much faster upload - Fix ADD caching for certain paths - Do not compress archive from git build #### Documentation - Fix error in GROUP add example * Make sure the GPG fingerprint is inline in the documentation * Give more specific advice on setting up signing of commits for DCO #### Runtime - Fix misspelled container names - Do not add hostname when networking is disabled * Return most recent image from the cache by date - Return all errors from docker wait * Add Content-Type Header "application/json" to GET /version and /info responses #### Other * Update DCO to version 1.1 + Update Makefile to use "docker:GIT_BRANCH" as the generated image name * Update Travis to check for new 1.1 DCO version ## 0.7.4 (2014-01-07) #### Builder - Fix ADD caching issue with . prefixed path - Fix docker build on devicemapper by reverting sparse file tar option - Fix issue with file caching and prevent wrong cache hit * Use same error handling while unmarshalling CMD and ENTRYPOINT #### Documentation * Simplify and streamline Amazon Quickstart * Install instructions use unprefixed Fedora image * Update instructions for mtu flag for Docker on GCE + Add Ubuntu Saucy to installation - Fix for wrong version warning on master instead of latest #### Runtime - Only get the image's rootfs when we need to calculate the image size - Correctly handle unmapping UDP ports * Make CopyFileWithTar use a pipe instead of a buffer to save memory on docker build - Fix login message to say pull instead of push - Fix "docker load" help by removing "SOURCE" prompt and mentioning STDIN * Make blank -H option default to the same as no -H was sent * Extract cgroups utilities to own submodule #### Other + Add Travis CI configuration to validate DCO and gofmt requirements + Add Developer Certificate of Origin Text * Upgrade VBox Guest Additions * Check standalone header when pinging a registry server ## 0.7.3 (2014-01-02) #### Builder + Update ADD to use the image cache, based on a hash of the added content * Add error message for empty Dockerfile #### Documentation - Fix outdated link to the "Introduction" on www.docker.io + Update the docs to get wider when the screen does - Add information about needing to install LXC when using raw binaries * Update Fedora documentation to disentangle the docker and docker.io conflict * Add a note about using the new `-mtu` flag in several GCE zones + Add FrugalWare installation instructions + Add a more complete example of `docker run` - Fix API documentation for creating and starting Privileged containers - Add missing "name" parameter documentation on "/containers/create" * Add a mention of `lxc-checkconfig` as a way to check for some of the necessary kernel configuration - Update the 1.8 API documentation with some additions that were added to the docs for 1.7 #### Hack - Add missing libdevmapper dependency to the packagers documentation * Update minimum Go requirement to a hard line at Go 1.2+ * Many minor improvements to the Vagrantfile + Add ability to customize dockerinit search locations when compiling (to be used very sparingly only by packagers of platforms who require a nonstandard location) + Add coverprofile generation reporting - Add `-a` to our Go build flags, removing the need for recompiling the stdlib manually * Update Dockerfile to be more canonical and have less spurious warnings during build - Fix some miscellaneous `docker pull` progress bar display issues * Migrate more miscellaneous packages under the "pkg" folder * Update TextMate highlighting to automatically be enabled for files named "Dockerfile" * Reorganize syntax highlighting files under a common "contrib/syntax" directory * Update install.sh script (https://get.docker.io/) to not fail if busybox fails to download or run at the end of the Ubuntu/Debian installation * Add support for container names in bash completion #### Packaging + Add an official Docker client binary for Darwin (Mac OS X) * Remove empty "Vendor" string and added "License" on deb package + Add a stubbed version of "/etc/default/docker" in the deb package #### Runtime * Update layer application to extract tars in place, avoiding file churn while handling whiteouts - Fix permissiveness of mtime comparisons in tar handling (since GNU tar and Go tar do not yet support sub-second mtime precision) * Reimplement `docker top` in pure Go to work more consistently, and even inside Docker-in-Docker (thus removing the shell injection vulnerability present in some versions of `lxc-ps`) + Update `-H unix://` to work similarly to `-H tcp://` by inserting the default values for missing portions - Fix more edge cases regarding dockerinit and deleted or replaced docker or dockerinit files * Update container name validation to include '.' - Fix use of a symlink or non-absolute path as the argument to `-g` to work as expected * Update to handle external mounts outside of LXC, fixing many small mounting quirks and making future execution backends and other features simpler * Update to use proper box-drawing characters everywhere in `docker images -tree` * Move MTU setting from LXC configuration to directly use netlink * Add `-S` option to external tar invocation for more efficient spare file handling + Add arch/os info to User-Agent string, especially for registry requests + Add `-mtu` option to Docker daemon for configuring MTU - Fix `docker build` to exit with a non-zero exit code on error + Add `DOCKER_HOST` environment variable to configure the client `-H` flag without specifying it manually for every invocation ## 0.7.2 (2013-12-16) #### Runtime + Validate container names on creation with standard regex * Increase maximum image depth to 127 from 42 * Continue to move api endpoints to the job api + Add -bip flag to allow specification of dynamic bridge IP via CIDR - Allow bridge creation when ipv6 is not enabled on certain systems * Set hostname and IP address from within dockerinit * Drop capabilities from within dockerinit - Fix volumes on host when symlink is present the image - Prevent deletion of image if ANY container is depending on it even if the container is not running * Update docker push to use new progress display * Use os.Lstat to allow mounting unix sockets when inspecting volumes - Adjust handling of inactive user login - Add missing defines in devicemapper for older kernels - Allow untag operations with no container validation - Add auth config to docker build #### Documentation * Add more information about Docker logging + Add RHEL documentation * Add a direct example for changing the CMD that is run in a container * Update Arch installation documentation + Add section on Trusted Builds + Add Network documentation page #### Other + Add new cover bundle for providing code coverage reporting * Separate integration tests in bundles * Make Tianon the hack maintainer * Update mkimage-debootstrap with more tweaks for keeping images small * Use https to get the install script * Remove vendored dotcloud/tar now that Go 1.2 has been released ## 0.7.1 (2013-12-05) #### Documentation + Add @SvenDowideit as documentation maintainer + Add links example + Add documentation regarding ambassador pattern + Add Google Cloud Platform docs + Add dockerfile best practices * Update doc for RHEL * Update doc for registry * Update Postgres examples * Update doc for Ubuntu install * Improve remote api doc #### Runtime + Add hostconfig to docker inspect + Implement `docker log -f` to stream logs + Add env variable to disable kernel version warning + Add -format to `docker inspect` + Support bind-mount for files - Fix bridge creation on RHEL - Fix image size calculation - Make sure iptables are called even if the bridge already exists - Fix issue with stderr only attach - Remove init layer when destroying a container - Fix same port binding on different interfaces - `docker build` now returns the correct exit code - Fix `docker port` to display correct port - `docker build` now check that the dockerfile exists client side - `docker attach` now returns the correct exit code - Remove the name entry when the container does not exist #### Registry * Improve progress bars, add ETA for downloads * Simultaneous pulls now waits for the first to finish instead of failing - Tag only the top-layer image when pushing to registry - Fix issue with offline image transfer - Fix issue preventing using ':' in password for registry #### Other + Add pprof handler for debug + Create a Makefile * Use stdlib tar that now includes fix * Improve make.sh test script * Handle SIGQUIT on the daemon * Disable verbose during tests * Upgrade to go1.2 for official build * Improve unit tests * The test suite now runs all tests even if one fails * Refactor C in Go (Devmapper) - Fix OS X compilation ## 0.7.0 (2013-11-25) #### Notable features since 0.6.0 * Storage drivers: choose from aufs, device-mapper, or vfs. * Standard Linux support: docker now runs on unmodified Linux kernels and all major distributions. * Links: compose complex software stacks by connecting containers to each other. * Container naming: organize your containers by giving them memorable names. * Advanced port redirects: specify port redirects per interface, or keep sensitive ports private. * Offline transfer: push and pull images to the filesystem without losing information. * Quality: numerous bugfixes and small usability improvements. Significant increase in test coverage. ## 0.6.7 (2013-11-21) #### Runtime * Improve stability, fixes some race conditions * Skip the volumes mounted when deleting the volumes of container. * Fix layer size computation: handle hard links correctly * Use the work Path for docker cp CONTAINER:PATH * Fix tmp dir never cleanup * Speedup docker ps * More informative error message on name collisions * Fix nameserver regex * Always return long id's * Fix container restart race condition * Keep published ports on docker stop;docker start * Fix container networking on Fedora * Correctly express "any address" to iptables * Fix network setup when reconnecting to ghost container * Prevent deletion if image is used by a running container * Lock around read operations in graph #### RemoteAPI * Return full ID on docker rmi #### Client + Add -tree option to images + Offline image transfer * Exit with status 2 on usage error and display usage on stderr * Do not forward SIGCHLD to container * Use string timestamp for docker events -since #### Other * Update to go 1.2rc5 + Add /etc/default/docker support to upstart ## 0.6.6 (2013-11-06) #### Runtime * Ensure container name on register * Fix regression in /etc/hosts + Add lock around write operations in graph * Check if port is valid * Fix restart runtime error with ghost container networking + Add some more colors and animals to increase the pool of generated names * Fix issues in docker inspect + Escape apparmor confinement + Set environment variables using a file. * Prevent docker insert to erase something + Prevent DNS server conflicts in CreateBridgeIface + Validate bind mounts on the server side + Use parent image config in docker build * Fix regression in /etc/hosts #### Client + Add -P flag to publish all exposed ports + Add -notrunc and -q flags to docker history * Fix docker commit, tag and import usage + Add stars, trusted builds and library flags in docker search * Fix docker logs with tty #### RemoteAPI * Make /events API send headers immediately * Do not split last column docker top + Add size to history #### Other + Contrib: Desktop integration. Firefox usecase. + Dockerfile: bump to go1.2rc3 ## 0.6.5 (2013-10-29) #### Runtime + Containers can now be named + Containers can now be linked together for service discovery + 'run -a', 'start -a' and 'attach' can forward signals to the container for better integration with process supervisors + Automatically start crashed containers after a reboot + Expose IP, port, and proto as separate environment vars for container links * Allow ports to be published to specific ips * Prohibit inter-container communication by default - Ignore ErrClosedPipe for stdin in Container.Attach - Remove unused field kernelVersion * Fix issue when mounting subdirectories of /mnt in container - Fix untag during removal of images * Check return value of syscall.Chdir when changing working directory inside dockerinit #### Client - Only pass stdin to hijack when needed to avoid closed pipe errors * Use less reflection in command-line method invocation - Monitor the tty size after starting the container, not prior - Remove useless os.Exit() calls after log.Fatal #### Hack + Add initial init scripts library and a safer Ubuntu packaging script that works for Debian * Add -p option to invoke debootstrap with http_proxy - Update install.sh with $sh_c to get sudo/su for modprobe * Update all the mkimage scripts to use --numeric-owner as a tar argument * Update hack/release.sh process to automatically invoke hack/make.sh and bail on build and test issues #### Other * Documentation: Fix the flags for nc in example * Testing: Remove warnings and prevent mount issues - Testing: Change logic for tty resize to avoid warning in tests - Builder: Fix race condition in docker build with verbose output - Registry: Fix content-type for PushImageJSONIndex method * Contrib: Improve helper tools to generate debian and Arch linux server images ## 0.6.4 (2013-10-16) #### Runtime - Add cleanup of container when Start() fails * Add better comments to utils/stdcopy.go * Add utils.Errorf for error logging + Add -rm to docker run for removing a container on exit - Remove error messages which are not actually errors - Fix `docker rm` with volumes - Fix some error cases where a HTTP body might not be closed - Fix panic with wrong dockercfg file - Fix the attach behavior with -i * Record termination time in state. - Use empty string so TempDir uses the OS's temp dir automatically - Make sure to close the network allocators + Autorestart containers by default * Bump vendor kr/pty to commit 3b1f6487b `(syscall.O_NOCTTY)` * lxc: Allow set_file_cap capability in container - Move run -rm to the cli only * Split stdout stderr * Always create a new session for the container #### Testing - Add aggregated docker-ci email report - Add cleanup to remove leftover containers * Add nightly release to docker-ci * Add more tests around auth.ResolveAuthConfig - Remove a few errors in tests - Catch errClosing error when TCP and UDP proxies are terminated * Only run certain tests with TESTFLAGS='-run TestName' make.sh * Prevent docker-ci to test closing PRs * Replace panic by log.Fatal in tests - Increase TestRunDetach timeout #### Documentation * Add initial draft of the Docker infrastructure doc * Add devenvironment link to CONTRIBUTING.md * Add `apt-get install curl` to Ubuntu docs * Add explanation for export restrictions * Add .dockercfg doc * Remove Gentoo install notes about #1422 workaround * Fix help text for -v option * Fix Ping endpoint documentation - Fix parameter names in docs for ADD command - Fix ironic typo in changelog * Various command fixes in postgres example * Document how to edit and release docs - Minor updates to `postgresql_service.rst` * Clarify LGTM process to contributors - Corrected error in the package name * Document what `vagrant up` is actually doing + improve doc search results * Cleanup whitespace in API 1.5 docs * use angle brackets in MAINTAINER example email * Update archlinux.rst + Changes to a new style for the docs. Includes version switcher. * Formatting, add information about multiline json * Improve registry and index REST API documentation - Replace deprecated upgrading reference to docker-latest.tgz, which hasn't been updated since 0.5.3 * Update Gentoo installation documentation now that we're in the portage tree proper * Cleanup and reorganize docs and tooling for contributors and maintainers - Minor spelling correction of protocoll -> protocol #### Contrib * Add vim syntax highlighting for Dockerfiles from @honza * Add mkimage-arch.sh * Reorganize contributed completion scripts to add zsh completion #### Hack * Add vagrant user to the docker group * Add proper bash completion for "docker push" * Add xz utils as a runtime dep * Add cleanup/refactor portion of #2010 for hack and Dockerfile updates + Add contrib/mkimage-centos.sh back (from #1621), and associated documentation link * Add several of the small make.sh fixes from #1920, and make the output more consistent and contributor-friendly + Add @tianon to hack/MAINTAINERS * Improve network performance for VirtualBox * Revamp install.sh to be usable by more people, and to use official install methods whenever possible (apt repo, portage tree, etc.) - Fix contrib/mkimage-debian.sh apt caching prevention + Add Dockerfile.tmLanguage to contrib * Configured FPM to make /etc/init/docker.conf a config file * Enable SSH Agent forwarding in Vagrant VM * Several small tweaks/fixes for contrib/mkimage-debian.sh #### Other - Builder: Abort build if mergeConfig returns an error and fix duplicate error message - Packaging: Remove deprecated packaging directory - Registry: Use correct auth config when logging in. - Registry: Fix the error message so it is the same as the regex ## 0.6.3 (2013-09-23) #### Packaging * Add 'docker' group on install for ubuntu package * Update tar vendor dependency * Download apt key over HTTPS #### Runtime - Only copy and change permissions on non-bindmount volumes * Allow multiple volumes-from - Fix HTTP imports from STDIN #### Documentation * Update section on extracting the docker binary after build * Update development environment docs for new build process * Remove 'base' image from documentation #### Other - Client: Fix detach issue - Registry: Update regular expression to match index ## 0.6.2 (2013-09-17) #### Runtime + Add domainname support + Implement image filtering with path.Match * Remove unnecessary warnings * Remove os/user dependency * Only mount the hostname file when the config exists * Handle signals within the `docker login` command - UID and GID are now also applied to volumes - `docker start` set error code upon error - `docker run` set the same error code as the process started #### Builder + Add -rm option in order to remove intermediate containers * Allow multiline for the RUN instruction #### Registry * Implement login with private registry - Fix push issues #### Other + Hack: Vendor all dependencies * Remote API: Bump to v1.5 * Packaging: Break down hack/make.sh into small scripts, one per 'bundle': test, binary, ubuntu etc. * Documentation: General improvements ## 0.6.1 (2013-08-23) #### Registry * Pass "meta" headers in API calls to the registry #### Packaging - Use correct upstart script with new build tool - Use libffi-dev, don`t build it from sources - Remove duplicate mercurial install command ## 0.6.0 (2013-08-22) #### Runtime + Add lxc-conf flag to allow custom lxc options + Add an option to set the working directory * Add Image name to LogEvent tests + Add -privileged flag and relevant tests, docs, and examples * Add websocket support to /container//attach/ws * Add warning when net.ipv4.ip_forwarding = 0 * Add hostname to environment * Add last stable version in `docker version` - Fix race conditions in parallel pull - Fix Graph ByParent() to generate list of child images per parent image. - Fix typo: fmt.Sprint -> fmt.Sprintf - Fix small \n error un docker build * Fix to "Inject dockerinit at /.dockerinit" * Fix #910. print user name to docker info output * Use Go 1.1.2 for dockerbuilder * Use ranged for loop on channels - Use utils.ParseRepositoryTag instead of strings.Split(name, ":") in server.ImageDelete - Improve CMD, ENTRYPOINT, and attach docs. - Improve connect message with socket error - Load authConfig only when needed and fix useless WARNING - Show tag used when image is missing * Apply volumes-from before creating volumes - Make docker run handle SIGINT/SIGTERM - Prevent crash when .dockercfg not readable - Install script should be fetched over https, not http. * API, issue 1471: Use groups for socket permissions - Correctly detect IPv4 forwarding * Mount /dev/shm as a tmpfs - Switch from http to https for get.docker.io * Let userland proxy handle container-bound traffic * Update the Docker CLI to specify a value for the "Host" header. - Change network range to avoid conflict with EC2 DNS - Reduce connect and read timeout when pinging the registry * Parallel pull - Handle ip route showing mask-less IP addresses * Allow ENTRYPOINT without CMD - Always consider localhost as a domain name when parsing the FQN repos name * Refactor checksum #### Documentation * Add MongoDB image example * Add instructions for creating and using the docker group * Add sudo to examples and installation to documentation * Add ufw doc * Add a reference to ps -a * Add information about Docker`s high level tools over LXC. * Fix typo in docs for docker run -dns * Fix a typo in the ubuntu installation guide * Fix to docs regarding adding docker groups * Update default -H docs * Update readme with dependencies for building * Update amazon.rst to explain that Vagrant is not necessary for running Docker on ec2 * PostgreSQL service example in documentation * Suggest installing linux-headers by default. * Change the twitter handle * Clarify Amazon EC2 installation * 'Base' image is deprecated and should no longer be referenced in the docs. * Move note about officially supported kernel - Solved the logo being squished in Safari #### Builder + Add USER instruction do Dockerfile + Add workdir support for the Buildfile * Add no cache for docker build - Fix docker build and docker events output - Only count known instructions as build steps - Make sure ENV instruction within build perform a commit each time - Forbid certain paths within docker build ADD - Repository name (and optionally a tag) in build usage - Make sure ADD will create everything in 0755 #### Remote API * Sort Images by most recent creation date. * Reworking opaque requests in registry module * Add image name in /events * Use mime pkg to parse Content-Type * 650 http utils and user agent field #### Hack + Bash Completion: Limit commands to containers of a relevant state * Add docker dependencies coverage testing into docker-ci #### Packaging + Docker-brew 0.5.2 support and memory footprint reduction * Add new docker dependencies into docker-ci - Revert "docker.upstart: avoid spawning a `sh` process" + Docker-brew and Docker standard library + Release docker with docker * Fix the upstart script generated by get.docker.io * Enabled the docs to generate manpages. * Revert Bind daemon to 0.0.0.0 in Vagrant. #### Register * Improve auth push * Registry unit tests + mock registry #### Tests * Improve TestKillDifferentUser to prevent timeout on buildbot - Fix typo in TestBindMounts (runContainer called without image) * Improve TestGetContainersTop so it does not rely on sleep * Relax the lo interface test to allow iface index != 1 * Add registry functional test to docker-ci * Add some tests in server and utils #### Other * Contrib: bash completion script * Client: Add docker cp command and copy api endpoint to copy container files/folders to the host * Don`t read from stdout when only attached to stdin ## 0.5.3 (2013-08-13) #### Runtime * Use docker group for socket permissions - Spawn shell within upstart script - Handle ip route showing mask-less IP addresses - Add hostname to environment #### Builder - Make sure ENV instruction within build perform a commit each time ## 0.5.2 (2013-08-08) * Builder: Forbid certain paths within docker build ADD - Runtime: Change network range to avoid conflict with EC2 DNS * API: Change daemon to listen on unix socket by default ## 0.5.1 (2013-07-30) #### Runtime + Add `ps` args to `docker top` + Add support for container ID files (pidfile like) + Add container=lxc in default env + Support networkless containers with `docker run -n` and `docker -d -b=none` * Stdout/stderr logs are now stored in the same file as JSON * Allocate a /16 IP range by default, with fallback to /24. Try 12 ranges instead of 3. * Change .dockercfg format to json and support multiple auth remote - Do not override volumes from config - Fix issue with EXPOSE override #### API + Docker client now sets useragent (RFC 2616) + Add /events endpoint #### Builder + ADD command now understands URLs + CmdAdd and CmdEnv now respect Dockerfile-set ENV variables - Create directories with 755 instead of 700 within ADD instruction #### Hack * Simplify unit tests with helpers * Improve docker.upstart event * Add coverage testing into docker-ci ## 0.5.0 (2013-07-17) #### Runtime + List all processes running inside a container with 'docker top' + Host directories can be mounted as volumes with 'docker run -v' + Containers can expose public UDP ports (eg, '-p 123/udp') + Optionally specify an exact public port (eg. '-p 80:4500') * 'docker login' supports additional options - Dont save a container`s hostname when committing an image. #### Registry + New image naming scheme inspired by Go packaging convention allows arbitrary combinations of registries - Fix issues when uploading images to a private registry #### Builder + ENTRYPOINT instruction sets a default binary entry point to a container + VOLUME instruction marks a part of the container as persistent data * 'docker build' displays the full output of a build by default ## 0.4.8 (2013-07-01) + Builder: New build operation ENTRYPOINT adds an executable entry point to the container. - Runtime: Fix a bug which caused 'docker run -d' to no longer print the container ID. - Tests: Fix issues in the test suite ## 0.4.7 (2013-06-28) #### Remote API * The progress bar updates faster when downloading and uploading large files - Fix a bug in the optional unix socket transport #### Runtime * Improve detection of kernel version + Host directories can be mounted as volumes with 'docker run -b' - fix an issue when only attaching to stdin * Use 'tar --numeric-owner' to avoid uid mismatch across multiple hosts #### Hack * Improve test suite and dev environment * Remove dependency on unit tests on 'os/user' #### Other * Registry: easier push/pull to a custom registry + Documentation: add terminology section ## 0.4.6 (2013-06-22) - Runtime: fix a bug which caused creation of empty images (and volumes) to crash. ## 0.4.5 (2013-06-21) + Builder: 'docker build git://URL' fetches and builds a remote git repository * Runtime: 'docker ps -s' optionally prints container size * Tests: improved and simplified - Runtime: fix a regression introduced in 0.4.3 which caused the logs command to fail. - Builder: fix a regression when using ADD with single regular file. ## 0.4.4 (2013-06-19) - Builder: fix a regression introduced in 0.4.3 which caused builds to fail on new clients. ## 0.4.3 (2013-06-19) #### Builder + ADD of a local file will detect tar archives and unpack them * ADD improvements: use tar for copy + automatically unpack local archives * ADD uses tar/untar for copies instead of calling 'cp -ar' * Fix the behavior of ADD to be (mostly) reverse-compatible, predictable and well-documented. - Fix a bug which caused builds to fail if ADD was the first command * Nicer output for 'docker build' #### Runtime * Remove bsdtar dependency * Add unix socket and multiple -H support * Prevent rm of running containers * Use go1.1 cookiejar - Fix issue detaching from running TTY container - Forbid parallel push/pull for a single image/repo. Fixes #311 - Fix race condition within Run command when attaching. #### Client * HumanReadable ProgressBar sizes in pull * Fix docker version`s git commit output #### API * Send all tags on History API call * Add tag lookup to history command. Fixes #882 #### Documentation - Fix missing command in irc bouncer example ## 0.4.2 (2013-06-17) - Packaging: Bumped version to work around an Ubuntu bug ## 0.4.1 (2013-06-17) #### Remote Api + Add flag to enable cross domain requests + Add images and containers sizes in docker ps and docker images #### Runtime + Configure dns configuration host-wide with 'docker -d -dns' + Detect faulty DNS configuration and replace it with a public default + Allow docker run : + You can now specify public port (ex: -p 80:4500) * Improve image removal to garbage-collect unreferenced parents #### Client * Allow multiple params in inspect * Print the container id before the hijack in `docker run` #### Registry * Add regexp check on repo`s name * Move auth to the client - Remove login check on pull #### Other * Vagrantfile: Add the rest api port to vagrantfile`s port_forward * Upgrade to Go 1.1 - Builder: don`t ignore last line in Dockerfile when it doesn`t end with \n ## 0.4.0 (2013-06-03) #### Builder + Introducing Builder + 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile #### Remote API + Introducing Remote API + control Docker programmatically using a simple HTTP/json API #### Runtime * Various reliability and usability improvements ## 0.3.4 (2013-05-30) #### Builder + 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile + 'docker build -t FOO' applies the tag FOO to the newly built container. #### Runtime + Interactive TTYs correctly handle window resize * Fix how configuration is merged between layers #### Remote API + Split stdout and stderr on 'docker run' + Optionally listen on a different IP and port (use at your own risk) #### Documentation * Improve install instructions. ## 0.3.3 (2013-05-23) - Registry: Fix push regression - Various bugfixes ## 0.3.2 (2013-05-09) #### Registry * Improve the checksum process * Use the size to have a good progress bar while pushing * Use the actual archive if it exists in order to speed up the push - Fix error 400 on push #### Runtime * Store the actual archive on commit ## 0.3.1 (2013-05-08) #### Builder + Implement the autorun capability within docker builder + Add caching to docker builder + Add support for docker builder with native API as top level command + Implement ENV within docker builder - Check the command existence prior create and add Unit tests for the case * use any whitespaces instead of tabs #### Runtime + Add go version to debug infos * Kernel version - don`t show the dash if flavor is empty #### Registry + Add docker search top level command in order to search a repository - Fix pull for official images with specific tag - Fix issue when login in with a different user and trying to push * Improve checksum - async calculation #### Images + Output graph of images to dot (graphviz) - Fix ByParent function #### Documentation + New introduction and high-level overview + Add the documentation for docker builder - CSS fix for docker documentation to make REST API docs look better. - Fix CouchDB example page header mistake - Fix README formatting * Update www.docker.io website. #### Other + Website: new high-level overview - Makefile: Swap "go get" for "go get -d", especially to compile on go1.1rc * Packaging: packaging ubuntu; issue #510: Use goland-stable PPA package to build docker ## 0.3.0 (2013-05-06) #### Runtime - Fix the command existence check - strings.Split may return an empty string on no match - Fix an index out of range crash if cgroup memory is not #### Documentation * Various improvements + New example: sharing data between 2 couchdb databases #### Other * Vagrant: Use only one deb line in /etc/apt + Registry: Implement the new registry ## 0.2.2 (2013-05-03) + Support for data volumes ('docker run -v=PATH') + Share data volumes between containers ('docker run -volumes-from') + Improve documentation * Upgrade to Go 1.0.3 * Various upgrades to the dev environment for contributors ## 0.2.1 (2013-05-01) + 'docker commit -run' bundles a layer with default runtime options: command, ports etc. * Improve install process on Vagrant + New Dockerfile operation: "maintainer" + New Dockerfile operation: "expose" + New Dockerfile operation: "cmd" + Contrib script to build a Debian base layer + 'docker -d -r': restart crashed containers at daemon startup * Runtime: improve test coverage ## 0.2.0 (2013-04-23) - Runtime: ghost containers can be killed and waited for * Documentation: update install instructions - Packaging: fix Vagrantfile - Development: automate releasing binaries and ubuntu packages + Add a changelog - Various bugfixes ## 0.1.8 (2013-04-22) - Dynamically detect cgroup capabilities - Issue stability warning on kernels <3.8 - 'docker push' buffers on disk instead of memory - Fix 'docker diff' for removed files - Fix 'docker stop' for ghost containers - Fix handling of pidfile - Various bugfixes and stability improvements ## 0.1.7 (2013-04-18) - Container ports are available on localhost - 'docker ps' shows allocated TCP ports - Contributors can run 'make hack' to start a continuous integration VM - Streamline ubuntu packaging & uploading - Various bugfixes and stability improvements ## 0.1.6 (2013-04-17) - Record the author an image with 'docker commit -author' ## 0.1.5 (2013-04-17) - Disable standalone mode - Use a custom DNS resolver with 'docker -d -dns' - Detect ghost containers - Improve diagnosis of missing system capabilities - Allow disabling memory limits at compile time - Add debian packaging - Documentation: installing on Arch Linux - Documentation: running Redis on docker - Fix lxc 0.9 compatibility - Automatically load aufs module - Various bugfixes and stability improvements ## 0.1.4 (2013-04-09) - Full support for TTY emulation - Detach from a TTY session with the escape sequence `C-p C-q` - Various bugfixes and stability improvements - Minor UI improvements - Automatically create our own bridge interface 'docker0' ## 0.1.3 (2013-04-04) - Choose TCP frontend port with '-p :PORT' - Layer format is versioned - Major reliability improvements to the process manager - Various bugfixes and stability improvements ## 0.1.2 (2013-04-03) - Set container hostname with 'docker run -h' - Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]' - Various bugfixes and stability improvements - UI polish - Progress bar on push/pull - Use XZ compression by default - Make IP allocator lazy ## 0.1.1 (2013-03-31) - Display shorthand IDs for convenience - Stabilize process management - Layers can include a commit message - Simplified 'docker attach' - Fix support for re-attaching - Various bugfixes and stability improvements - Auto-download at run - Auto-login on push - Beefed up documentation ## 0.1.0 (2013-03-23) Initial public release - Implement registry in order to push/pull images - TCP port allocation - Fix termcaps on Linux - Add documentation - Add Vagrant support with Vagrantfile - Add unit tests - Add repository/tags to ease image management - Improve the layer implementation docker-1.10.3/CONTRIBUTING.md000066400000000000000000000431241267010174400153020ustar00rootroot00000000000000# Contributing to Docker Want to hack on Docker? Awesome! We have a contributor's guide that explains [setting up a Docker development environment and the contribution process](https://docs.docker.com/opensource/project/who-written-for/). ![Contributors guide](docs/static_files/contributors.png) This page contains information about reporting issues as well as some tips and guidelines useful to experienced open source contributors. Finally, make sure you read our [community guidelines](#docker-community-guidelines) before you start participating. ## Topics * [Reporting Security Issues](#reporting-security-issues) * [Design and Cleanup Proposals](#design-and-cleanup-proposals) * [Reporting Issues](#reporting-other-issues) * [Quick Contribution Tips and Guidelines](#quick-contribution-tips-and-guidelines) * [Community Guidelines](#docker-community-guidelines) ## Reporting security issues The Docker maintainers take security seriously. If you discover a security issue, please bring it to their attention right away! Please **DO NOT** file a public issue, instead send your report privately to [security@docker.com](mailto:security@docker.com). Security reports are greatly appreciated and we will publicly thank you for it. We also like to send gifts—if you're into Docker schwag, make sure to let us know. We currently do not offer a paid security bounty program, but are not ruling it out in the future. ## Reporting other issues A great way to contribute to the project is to send a detailed report when you encounter an issue. We always appreciate a well-written, thorough bug report, and will thank you for it! Check that [our issue database](https://github.com/docker/docker/issues) doesn't already include that problem or suggestion before submitting an issue. If you find a match, you can use the "subscribe" button to get notified on updates. Do *not* leave random "+1" or "I have this too" comments, as they only clutter the discussion, and don't help resolving it. However, if you have ways to reproduce the issue or have additional information that may help resolving the issue, please leave a comment. When reporting issues, always include: * The output of `docker version`. * The output of `docker info`. Also include the steps required to reproduce the problem if possible and applicable. This information will help us review and fix your issue faster. When sending lengthy log-files, consider posting them as a gist (https://gist.github.com). Don't forget to remove sensitive data from your logfiles before posting (you can replace those parts with "REDACTED"). **Issue Report Template**: ``` Description of problem: `docker version`: `docker info`: `uname -a`: Environment details (AWS, VirtualBox, physical, etc.): How reproducible: Steps to Reproduce: 1. 2. 3. Actual Results: Expected Results: Additional info: ``` ##Quick contribution tips and guidelines This section gives the experienced contributor some tips and guidelines. ###Pull requests are always welcome Not sure if that typo is worth a pull request? Found a bug and know how to fix it? Do it! We will appreciate it. Any significant improvement should be documented as [a GitHub issue](https://github.com/docker/docker/issues) before anybody starts working on it. We are always thrilled to receive pull requests. We do our best to process them quickly. If your pull request is not accepted on the first try, don't get discouraged! Our contributor's guide explains [the review process we use for simple changes](https://docs.docker.com/opensource/workflow/make-a-contribution/). ### Design and cleanup proposals You can propose new designs for existing Docker features. You can also design entirely new features. We really appreciate contributors who want to refactor or otherwise cleanup our project. For information on making these types of contributions, see [the advanced contribution section](https://docs.docker.com/opensource/workflow/advanced-contributing/) in the contributors guide. We try hard to keep Docker lean and focused. Docker can't do everything for everybody. This means that we might decide against incorporating a new feature. However, there might be a way to implement that feature *on top of* Docker. ### Talking to other Docker users and contributors
Internet Relay Chat (IRC)

IRC a direct line to our most knowledgeable Docker users; we have both the #docker and #docker-dev group on irc.freenode.net. IRC is a rich chat protocol but it can overwhelm new users. You can search our chat archives.

Read our IRC quickstart guide for an easy way to get started.
Google Groups There are two groups. Docker-user is for people using Docker containers. The docker-dev group is for contributors and other people contributing to the Docker project.
Twitter You can follow Docker's Twitter feed to get updates on our products. You can also tweet us questions or just share blogs or stories.
Stack Overflow Stack Overflow has over 17000 Docker questions listed. We regularly monitor Docker questions and so do many other knowledgeable Docker users.
### Conventions Fork the repository and make changes on your fork in a feature branch: - If it's a bug fix branch, name it XXXX-something where XXXX is the number of the issue. - If it's a feature branch, create an enhancement issue to announce your intentions, and name it XXXX-something where XXXX is the number of the issue. Submit unit tests for your changes. Go has a great test framework built in; use it! Take a look at existing tests for inspiration. [Run the full test suite](https://docs.docker.com/opensource/project/test-and-docs/) on your branch before submitting a pull request. Update the documentation when creating or modifying features. Test your documentation changes for clarity, concision, and correctness, as well as a clean documentation build. See our contributors guide for [our style guide](https://docs.docker.com/opensource/doc-style) and instructions on [building the documentation](https://docs.docker.com/opensource/project/test-and-docs/#build-and-test-the-documentation). Write clean code. Universally formatted code promotes ease of writing, reading, and maintenance. Always run `gofmt -s -w file.go` on each changed file before committing your changes. Most editors have plug-ins that do this automatically. Pull request descriptions should be as clear as possible and include a reference to all the issues that they address. Commit messages must start with a capitalized and short summary (max. 50 chars) written in the imperative, followed by an optional, more detailed explanatory text which is separated from the summary by an empty line. Code review comments may be added to your pull request. Discuss, then make the suggested modifications and push additional commits to your feature branch. Post a comment after pushing. New commits show up in the pull request automatically, but the reviewers are notified only when you comment. Pull requests must be cleanly rebased on top of master without multiple branches mixed into the PR. **Git tip**: If your PR no longer merges cleanly, use `rebase master` in your feature branch to update your pull request rather than `merge master`. Before you make a pull request, squash your commits into logical units of work using `git rebase -i` and `git push -f`. A logical unit of work is a consistent set of patches that should be reviewed together: for example, upgrading the version of a vendored dependency and taking advantage of its now available new feature constitute two separate units of work. Implementing a new function and calling it in another file constitute a single logical unit of work. The very high majority of submissions should have a single commit, so if in doubt: squash down to one. After every commit, [make sure the test suite passes] (https://docs.docker.com/opensource/project/test-and-docs/). Include documentation changes in the same pull request so that a revert would remove all traces of the feature or fix. Include an issue reference like `Closes #XXXX` or `Fixes #XXXX` in commits that close an issue. Including references automatically closes the issue on a merge. Please do not add yourself to the `AUTHORS` file, as it is regenerated regularly from the Git history. Please see the [Coding Style](#coding-style) for further guidelines. ### Merge approval Docker maintainers use LGTM (Looks Good To Me) in comments on the code review to indicate acceptance. A change requires LGTMs from an absolute majority of the maintainers of each component affected. For example, if a change affects `docs/` and `registry/`, it needs an absolute majority from the maintainers of `docs/` AND, separately, an absolute majority of the maintainers of `registry/`. For more details, see the [MAINTAINERS](MAINTAINERS) page. ### Sign your work The sign-off is a simple line at the end of the explanation for the patch. Your signature certifies that you wrote the patch or otherwise have the right to pass it on as an open-source patch. The rules are pretty simple: if you can certify the below (from [developercertificate.org](http://developercertificate.org/)): ``` Developer Certificate of Origin Version 1.1 Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 660 York Street, Suite 102, San Francisco, CA 94110 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Developer's Certificate of Origin 1.1 By making a contribution to this project, I certify that: (a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or (b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or (c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it. (d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved. ``` Then you just add a line to every git commit message: Signed-off-by: Joe Smith Use your real name (sorry, no pseudonyms or anonymous contributions.) If you set your `user.name` and `user.email` git configs, you can sign your commit automatically with `git commit -s`. Note that the old-style `Docker-DCO-1.1-Signed-off-by: ...` format is still accepted, so there is no need to update outstanding pull requests to the new format right away, but please do adjust your processes for future contributions. ### How can I become a maintainer? The procedures for adding new maintainers are explained in the global [MAINTAINERS](https://github.com/docker/opensource/blob/master/MAINTAINERS) file in the [https://github.com/docker/opensource/](https://github.com/docker/opensource/) repository. Don't forget: being a maintainer is a time investment. Make sure you will have time to make yourself available. You don't have to be a maintainer to make a difference on the project! ## Docker community guidelines We want to keep the Docker community awesome, growing and collaborative. We need your help to keep it that way. To help with this we've come up with some general guidelines for the community as a whole: * Be nice: Be courteous, respectful and polite to fellow community members: no regional, racial, gender, or other abuse will be tolerated. We like nice people way better than mean ones! * Encourage diversity and participation: Make everyone in our community feel welcome, regardless of their background and the extent of their contributions, and do everything possible to encourage participation in our community. * Keep it legal: Basically, don't get us in trouble. Share only content that you own, do not share private or sensitive information, and don't break the law. * Stay on topic: Make sure that you are posting to the correct channel and avoid off-topic discussions. Remember when you update an issue or respond to an email you are potentially sending to a large number of people. Please consider this before you update. Also remember that nobody likes spam. * Don't send email to the maintainers: There's no need to send email to the maintainers to ask them to investigate an issue or to take a look at a pull request. Instead of sending an email, GitHub mentions should be used to ping maintainers to review a pull request, a proposal or an issue. ### Guideline violations — 3 strikes method The point of this section is not to find opportunities to punish people, but we do need a fair way to deal with people who are making our community suck. 1. First occurrence: We'll give you a friendly, but public reminder that the behavior is inappropriate according to our guidelines. 2. Second occurrence: We will send you a private message with a warning that any additional violations will result in removal from the community. 3. Third occurrence: Depending on the violation, we may need to delete or ban your account. **Notes:** * Obvious spammers are banned on first occurrence. If we don't do this, we'll have spam all over the place. * Violations are forgiven after 6 months of good behavior, and we won't hold a grudge. * People who commit minor infractions will get some education, rather than hammering them in the 3 strikes process. * The rules apply equally to everyone in the community, no matter how much you've contributed. * Extreme violations of a threatening, abusive, destructive or illegal nature will be addressed immediately and are not subject to 3 strikes or forgiveness. * Contact abuse@docker.com to report abuse or appeal violations. In the case of appeals, we know that mistakes happen, and we'll work with you to come up with a fair solution if there has been a misunderstanding. ## Coding Style Unless explicitly stated, we follow all coding guidelines from the Go community. While some of these standards may seem arbitrary, they somehow seem to result in a solid, consistent codebase. It is possible that the code base does not currently comply with these guidelines. We are not looking for a massive PR that fixes this, since that goes against the spirit of the guidelines. All new contributions should make a best effort to clean up and make the code base better than they left it. Obviously, apply your best judgement. Remember, the goal here is to make the code base easier for humans to navigate and understand. Always keep that in mind when nudging others to comply. The rules: 1. All code should be formatted with `gofmt -s`. 2. All code should pass the default levels of [`golint`](https://github.com/golang/lint). 3. All code should follow the guidelines covered in [Effective Go](http://golang.org/doc/effective_go.html) and [Go Code Review Comments](https://github.com/golang/go/wiki/CodeReviewComments). 4. Comment the code. Tell us the why, the history and the context. 5. Document _all_ declarations and methods, even private ones. Declare expectations, caveats and anything else that may be important. If a type gets exported, having the comments already there will ensure it's ready. 6. Variable name length should be proportional to it's context and no longer. `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. In practice, short methods will have short variable names and globals will have longer names. 7. No underscores in package names. If you need a compound name, step back, and re-examine why you need a compound name. If you still think you need a compound name, lose the underscore. 8. No utils or helpers packages. If a function is not general enough to warrant it's own package, it has not been written generally enough to be a part of a util package. Just leave it unexported and well-documented. 9. All tests should run with `go test` and outside tooling should not be required. No, we don't need another unit testing framework. Assertion packages are acceptable if they provide _real_ incremental value. 10. Even though we call these "rules" above, they are actually just guidelines. Since you've read all the rules, you now know that. If you are having trouble getting into the mood of idiomatic Go, we recommend reading through [Effective Go](http://golang.org/doc/effective_go.html). The [Go Blog](http://blog.golang.org/) is also a great resource. Drinking the kool-aid is a lot easier than going thirsty. docker-1.10.3/Dockerfile000066400000000000000000000234321267010174400150430ustar00rootroot00000000000000# This file describes the standard way to build Docker, using docker # # Usage: # # # Assemble the full dev environment. This is slow the first time. # docker build -t docker . # # # Mount your source in an interactive container for quick testing: # docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash # # # Run the test suite: # docker run --privileged docker hack/make.sh test # # # Publish a release: # docker run --privileged \ # -e AWS_S3_BUCKET=baz \ # -e AWS_ACCESS_KEY=foo \ # -e AWS_SECRET_KEY=bar \ # -e GPG_PASSPHRASE=gloubiboulga \ # docker hack/release.sh # # Note: AppArmor used to mess with privileged mode, but this is no longer # the case. Therefore, you don't have to disable it anymore. # FROM ubuntu:trusty # add zfs ppa RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys E871F18B51E0147C77796AC81196BA81F6B0FC61 RUN echo deb http://ppa.launchpad.net/zfs-native/stable/ubuntu trusty main > /etc/apt/sources.list.d/zfs.list # add llvm repo RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 6084F3CF814B57C1CF12EFD515CF4D18AF4F7421 RUN echo deb http://llvm.org/apt/trusty/ llvm-toolchain-trusty main > /etc/apt/sources.list.d/llvm.list # Packaged dependencies RUN apt-get update && apt-get install -y \ apparmor \ aufs-tools \ automake \ bash-completion \ btrfs-tools \ build-essential \ clang-3.8 \ createrepo \ curl \ dpkg-sig \ gcc-mingw-w64 \ git \ iptables \ jq \ libapparmor-dev \ libcap-dev \ libltdl-dev \ libsqlite3-dev \ libsystemd-journal-dev \ libtool \ mercurial \ pkg-config \ python-dev \ python-mock \ python-pip \ python-websocket \ s3cmd=1.1.0* \ ubuntu-zfs \ xfsprogs \ libzfs-dev \ tar \ --no-install-recommends \ && ln -snf /usr/bin/clang-3.8 /usr/local/bin/clang \ && ln -snf /usr/bin/clang++-3.8 /usr/local/bin/clang++ # Get lvm2 source for compiling statically ENV LVM2_VERSION 2.02.103 RUN mkdir -p /usr/local/lvm2 \ && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ | tar -xzC /usr/local/lvm2 --strip-components=1 # see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags # Compile and install lvm2 RUN cd /usr/local/lvm2 \ && ./configure \ --build="$(gcc -print-multiarch)" \ --enable-static_link \ && make device-mapper \ && make install_device-mapper # see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL # Install Go # IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines # will need updating, to avoid errors. Ping #docker-maintainers on IRC # with a heads-up. ENV GO_VERSION 1.5.3 RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" \ | tar -xzC /usr/local ENV PATH /go/bin:/usr/local/go/bin:$PATH ENV GOPATH /go:/go/src/github.com/docker/docker/vendor # Compile Go for cross compilation ENV DOCKER_CROSSPLATFORMS \ linux/386 linux/arm \ darwin/amd64 \ freebsd/amd64 freebsd/386 freebsd/arm \ windows/amd64 windows/386 # (set an explicit GOARM of 5 for maximum compatibility) ENV GOARM 5 # This has been commented out and kept as reference because we don't support compiling with older Go anymore. # ENV GOFMT_VERSION 1.3.3 # RUN curl -sSL https://storage.googleapis.com/golang/go${GOFMT_VERSION}.$(go env GOOS)-$(go env GOARCH).tar.gz | tar -C /go/bin -xz --strip-components=2 go/bin/gofmt ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 # Grab Go's cover tool for dead-simple code coverage testing # Grab Go's vet tool for examining go code to find suspicious constructs # and help prevent errors that the compiler might not catch RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) \ && go install -v golang.org/x/tools/cmd/cover \ && go install -v golang.org/x/tools/cmd/vet # Grab Go's lint tool ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ && go install -v github.com/golang/lint/golint # Configure the container for OSX cross compilation ENV OSX_SDK MacOSX10.11.sdk RUN set -x \ && export OSXCROSS_PATH="/osxcross" \ && git clone --depth 1 https://github.com/tpoechtrager/osxcross.git $OSXCROSS_PATH \ && curl -sSL https://s3.dockerproject.org/darwin/${OSX_SDK}.tar.xz -o "${OSXCROSS_PATH}/tarballs/${OSX_SDK}.tar.xz" \ && UNATTENDED=yes OSX_VERSION_MIN=10.6 ${OSXCROSS_PATH}/build.sh ENV PATH /osxcross/target/bin:$PATH # install seccomp # this can be changed to the ubuntu package libseccomp-dev if dockerinit is removed, # we need libseccomp.a (which the package does not provide) for dockerinit ENV SECCOMP_VERSION 2.2.3 RUN set -x \ && export SECCOMP_PATH="$(mktemp -d)" \ && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ && ( \ cd "$SECCOMP_PATH" \ && ./configure --prefix=/usr/local \ && make \ && make install \ && ldconfig \ ) \ && rm -rf "$SECCOMP_PATH" # Install two versions of the registry. The first is an older version that # only supports schema1 manifests. The second is a newer version that supports # both. This allows integration-cli tests to cover push/pull with both schema1 # and schema2 manifests. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ && rm -rf "$GOPATH" # Install notary server ENV NOTARY_VERSION docker-v1.10.2-1 RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ && GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \ go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ && GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \ go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ && rm -rf "$GOPATH" # Get the "docker-py" source so we can run their integration tests ENV DOCKER_PY_COMMIT e2878cbcc3a7eef99917adc1be252800b0e41ece RUN git clone https://github.com/docker/docker-py.git /docker-py \ && cd /docker-py \ && git checkout -q $DOCKER_PY_COMMIT \ && pip install -r test-requirements.txt # Setup s3cmd config RUN { \ echo '[default]'; \ echo 'access_key=$AWS_ACCESS_KEY'; \ echo 'secret_key=$AWS_SECRET_KEY'; \ } > ~/.s3cfg # Set user.email so crosbymichael's in-container merge commits go smoothly RUN git config --global user.email 'docker-dummy@example.com' # Add an unprivileged user to be used for tests which need it RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser VOLUME /var/lib/docker WORKDIR /go/src/github.com/docker/docker ENV DOCKER_BUILDTAGS apparmor seccomp selinux # Let us use a .bashrc file RUN ln -sfv $PWD/.bashrc ~/.bashrc # Register Docker's bash completion. RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ busybox:latest@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 \ debian:jessie@sha256:24a900d1671b269d6640b4224e7b63801880d8e3cb2bcbfaa10a5dddcf4469ed \ hello-world:latest@sha256:8be990ef2aeb16dbcb9271ddfe2610fa6658d13f6dfb8bc72074cc1ca36966a7 # see also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) # Download man page generator RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone --depth 1 -b v1.0.4 https://github.com/cpuguy83/go-md2man.git "$GOPATH/src/github.com/cpuguy83/go-md2man" \ && git clone --depth 1 -b v1.4 https://github.com/russross/blackfriday.git "$GOPATH/src/github.com/russross/blackfriday" \ && go get -v -d github.com/cpuguy83/go-md2man \ && go build -v -o /usr/local/bin/go-md2man github.com/cpuguy83/go-md2man \ && rm -rf "$GOPATH" # Download toml validator ENV TOMLV_COMMIT 9baf8a8a9f2ed20a8e54160840c492f937eeaf9a RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/BurntSushi/toml.git "$GOPATH/src/github.com/BurntSushi/toml" \ && (cd "$GOPATH/src/github.com/BurntSushi/toml" && git checkout -q "$TOMLV_COMMIT") \ && go build -v -o /usr/local/bin/tomlv github.com/BurntSushi/toml/cmd/tomlv \ && rm -rf "$GOPATH" # Build/install the tool for embedding resources in Windows binaries ENV RSRC_VERSION v2 RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone --depth 1 -b "$RSRC_VERSION" https://github.com/akavel/rsrc.git "$GOPATH/src/github.com/akavel/rsrc" \ && go build -v -o /usr/local/bin/rsrc github.com/akavel/rsrc \ && rm -rf "$GOPATH" # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] # Upload docker source COPY . /go/src/github.com/docker/docker docker-1.10.3/Dockerfile.armhf000066400000000000000000000214641267010174400161420ustar00rootroot00000000000000# This file describes the standard way to build Docker on ARMv7, using docker # # Usage: # # # Assemble the full dev environment. This is slow the first time. # docker build -t docker -f Dockerfile.armhf . # # # Mount your source in an interactive container for quick testing: # docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash # # # Run the test suite: # docker run --privileged docker hack/make.sh test # # # Publish a release: # docker run --privileged \ # -e AWS_S3_BUCKET=baz \ # -e AWS_ACCESS_KEY=foo \ # -e AWS_SECRET_KEY=bar \ # -e GPG_PASSPHRASE=gloubiboulga \ # docker hack/release.sh # # Note: AppArmor used to mess with privileged mode, but this is no longer # the case. Therefore, you don't have to disable it anymore. # FROM armhf/ubuntu:trusty # Packaged dependencies RUN apt-get update && apt-get install -y \ apparmor \ aufs-tools \ automake \ bash-completion \ btrfs-tools \ build-essential \ createrepo \ curl \ dpkg-sig \ git \ iptables \ jq \ net-tools \ libapparmor-dev \ libcap-dev \ libltdl-dev \ libsqlite3-dev \ libsystemd-journal-dev \ libtool \ mercurial \ pkg-config \ python-dev \ python-mock \ python-pip \ python-websocket \ xfsprogs \ tar \ --no-install-recommends # Get lvm2 source for compiling statically ENV LVM2_VERSION 2.02.103 RUN mkdir -p /usr/local/lvm2 \ && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ | tar -xzC /usr/local/lvm2 --strip-components=1 # see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags # Compile and install lvm2 RUN cd /usr/local/lvm2 \ && ./configure \ --build="$(gcc -print-multiarch)" \ --enable-static_link \ && make device-mapper \ && make install_device-mapper # see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL # Install Go #ENV GO_VERSION 1.5.3 # TODO update GO_TOOLS_COMMIT below when this updates to 1.5+ ENV GO_VERSION 1.4.3 RUN curl -fsSL "https://github.com/hypriot/golang-armbuilds/releases/download/v${GO_VERSION}/go${GO_VERSION}.linux-armv7.tar.gz" \ | tar -xzC /usr/local # temporarily using Hypriot's tarballs while we wait for official 1.6+ #RUN curl -fsSL https://golang.org/dl/go${GO_VERSION}.linux-arm6.tar.gz \ # | tar -xzC /usr/local ENV PATH /go/bin:/usr/local/go/bin:$PATH ENV GOPATH /go:/go/src/github.com/docker/docker/vendor # we're building for armhf, which is ARMv7, so let's be explicit about that ENV GOARCH arm ENV GOARM 7 # This has been commented out and kept as reference because we don't support compiling with older Go anymore. # ENV GOFMT_VERSION 1.3.3 # RUN curl -sSL https://storage.googleapis.com/golang/go${GOFMT_VERSION}.$(go env GOOS)-$(go env GOARCH).tar.gz | tar -C /go/bin -xz --strip-components=2 go/bin/gofmt #ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 # TODO update this sha when we upgrade to Go 1.5+ ENV GO_TOOLS_COMMIT 069d2f3bcb68257b627205f0486d6cc69a231ff9 # Grab Go's cover tool for dead-simple code coverage testing # Grab Go's vet tool for examining go code to find suspicious constructs # and help prevent errors that the compiler might not catch RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) \ && go install -v golang.org/x/tools/cmd/cover \ && go install -v golang.org/x/tools/cmd/vet # Grab Go's lint tool #ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 # TODO update this sha when we upgrade to Go 1.5+ ENV GO_LINT_COMMIT f42f5c1c440621302702cb0741e9d2ca547ae80f RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ && go install -v github.com/golang/lint/golint # install seccomp # this can be changed to the ubuntu package libseccomp-dev if dockerinit is removed, # we need libseccomp.a (which the package does not provide) for dockerinit ENV SECCOMP_VERSION 2.2.3 RUN set -x \ && export SECCOMP_PATH="$(mktemp -d)" \ && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ && ( \ cd "$SECCOMP_PATH" \ && ./configure --prefix=/usr/local \ && make \ && make install \ && ldconfig \ ) \ && rm -rf "$SECCOMP_PATH" # Install two versions of the registry. The first is an older version that # only supports schema1 manifests. The second is a newer version that supports # both. This allows integration-cli tests to cover push/pull with both schema1 # and schema2 manifests. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT a7ae88da459b98b481a245e5b1750134724ac67d RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ && rm -rf "$GOPATH" # Install notary server ENV NOTARY_VERSION docker-v1.10.2-1 RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ && GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \ go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ && GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \ go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ && rm -rf "$GOPATH" # Get the "docker-py" source so we can run their integration tests ENV DOCKER_PY_COMMIT e2878cbcc3a7eef99917adc1be252800b0e41ece RUN git clone https://github.com/docker/docker-py.git /docker-py \ && cd /docker-py \ && git checkout -q $DOCKER_PY_COMMIT \ && pip install -r test-requirements.txt # Set user.email so crosbymichael's in-container merge commits go smoothly RUN git config --global user.email 'docker-dummy@example.com' # Add an unprivileged user to be used for tests which need it RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser VOLUME /var/lib/docker WORKDIR /go/src/github.com/docker/docker ENV DOCKER_BUILDTAGS apparmor seccomp selinux # Let us use a .bashrc file RUN ln -sfv $PWD/.bashrc ~/.bashrc # Register Docker's bash completion. RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ armhf/busybox:latest@sha256:d98a7343ac750ffe387e3d514f8521ba69846c216778919b01414b8617cfb3d4 \ armhf/debian:jessie@sha256:094687129906d2a43cb4e5946ea379b5619c9ca8e4e27b3ba28b40f237a4150c \ armhf/hello-world:latest@sha256:161dcecea0225975b2ad5f768058212c1e0d39e8211098666ffa1ac74cfb7791 # see also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) # Download man page generator RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone --depth 1 -b v1.0.4 https://github.com/cpuguy83/go-md2man.git "$GOPATH/src/github.com/cpuguy83/go-md2man" \ && git clone --depth 1 -b v1.4 https://github.com/russross/blackfriday.git "$GOPATH/src/github.com/russross/blackfriday" \ && go get -v -d github.com/cpuguy83/go-md2man \ && go build -v -o /usr/local/bin/go-md2man github.com/cpuguy83/go-md2man \ && rm -rf "$GOPATH" # Download toml validator ENV TOMLV_COMMIT 9baf8a8a9f2ed20a8e54160840c492f937eeaf9a RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/BurntSushi/toml.git "$GOPATH/src/github.com/BurntSushi/toml" \ && (cd "$GOPATH/src/github.com/BurntSushi/toml" && git checkout -q "$TOMLV_COMMIT") \ && go build -v -o /usr/local/bin/tomlv github.com/BurntSushi/toml/cmd/tomlv \ && rm -rf "$GOPATH" # Build/install the tool for embedding resources in Windows binaries ENV RSRC_VERSION v2 RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone --depth 1 -b "$RSRC_VERSION" https://github.com/akavel/rsrc.git "$GOPATH/src/github.com/akavel/rsrc" \ && go build -v -o /usr/local/bin/rsrc github.com/akavel/rsrc \ && rm -rf "$GOPATH" # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] # Upload docker source COPY . /go/src/github.com/docker/docker docker-1.10.3/Dockerfile.gccgo000066400000000000000000000043361267010174400161260ustar00rootroot00000000000000# This file describes the standard way to build Docker, using docker # # Usage: # # # Assemble the full dev environment. This is slow the first time. # docker build -t docker -f Dockerfile.gccgo . # FROM gcc:5.3 # Packaged dependencies RUN apt-get update && apt-get install -y \ apparmor \ aufs-tools \ btrfs-tools \ build-essential \ curl \ git \ iptables \ jq \ net-tools \ libapparmor-dev \ libcap-dev \ libsqlite3-dev \ mercurial \ parallel \ python-dev \ python-mock \ python-pip \ python-websocket \ --no-install-recommends # Get lvm2 source for compiling statically RUN git clone -b v2_02_103 https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 # see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags # Compile and install lvm2 RUN cd /usr/local/lvm2 \ && ./configure --enable-static_link \ && make device-mapper \ && make install_device-mapper # see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL # install seccomp # this can be changed to the ubuntu package libseccomp-dev if dockerinit is removed, # we need libseccomp.a (which the package does not provide) for dockerinit ENV SECCOMP_VERSION v2.2.3 RUN set -x \ && export SECCOMP_PATH=$(mktemp -d) \ && git clone https://github.com/seccomp/libseccomp.git "$SECCOMP_PATH" \ && ( \ cd "$SECCOMP_PATH" \ && git checkout "$SECCOMP_VERSION" \ && ./autogen.sh \ && ./configure --prefix=/usr \ && make \ && make install \ ) \ && rm -rf "$SECCOMP_PATH" ENV GOPATH /go:/go/src/github.com/docker/docker/vendor # Get the "docker-py" source so we can run their integration tests ENV DOCKER_PY_COMMIT e2878cbcc3a7eef99917adc1be252800b0e41ece RUN git clone https://github.com/docker/docker-py.git /docker-py \ && cd /docker-py \ && git checkout -q $DOCKER_PY_COMMIT # Add an unprivileged user to be used for tests which need it RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser VOLUME /var/lib/docker WORKDIR /go/src/github.com/docker/docker ENV DOCKER_BUILDTAGS apparmor seccomp selinux # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] # Upload docker source COPY . /go/src/github.com/docker/docker docker-1.10.3/Dockerfile.ppc64le000066400000000000000000000163551267010174400163250ustar00rootroot00000000000000# This file describes the standard way to build Docker on ppc64le, using docker # # Usage: # # # Assemble the full dev environment. This is slow the first time. # docker build -t docker -f Dockerfile.ppc64le . # # # Mount your source in an interactive container for quick testing: # docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash # # # Run the test suite: # docker run --privileged docker hack/make.sh test # # # Publish a release: # docker run --privileged \ # -e AWS_S3_BUCKET=baz \ # -e AWS_ACCESS_KEY=foo \ # -e AWS_SECRET_KEY=bar \ # -e GPG_PASSPHRASE=gloubiboulga \ # docker hack/release.sh # # Note: AppArmor used to mess with privileged mode, but this is no longer # the case. Therefore, you don't have to disable it anymore. # FROM ppc64le/gcc:5.3 # Packaged dependencies RUN apt-get update && apt-get install -y \ apparmor \ aufs-tools \ automake \ bash-completion \ btrfs-tools \ build-essential \ createrepo \ curl \ dpkg-sig \ git \ iptables \ jq \ net-tools \ libapparmor-dev \ libcap-dev \ libltdl-dev \ libsqlite3-dev \ libsystemd-journal-dev \ libtool \ mercurial \ pkg-config \ python-dev \ python-mock \ python-pip \ python-websocket \ xfsprogs \ tar \ --no-install-recommends # Get lvm2 source for compiling statically ENV LVM2_VERSION 2.02.103 RUN mkdir -p /usr/local/lvm2 \ && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ | tar -xzC /usr/local/lvm2 --strip-components=1 # see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags # fix platform enablement in lvm2 to support ppc64le properly RUN set -e \ && for f in config.guess config.sub; do \ curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ done # "arch.c:78:2: error: #error the arch code needs to know about your machine type" # Compile and install lvm2 RUN cd /usr/local/lvm2 \ && ./configure \ --build="$(gcc -print-multiarch)" \ --enable-static_link \ && make device-mapper \ && make install_device-mapper # see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL # TODO install Go, using gccgo as GOROOT_BOOTSTRAP (Go 1.5+ supports ppc64le properly) # possibly a ppc64le/golang image? ENV PATH /go/bin:$PATH ENV GOPATH /go:/go/src/github.com/docker/docker/vendor # This has been commented out and kept as reference because we don't support compiling with older Go anymore. # ENV GOFMT_VERSION 1.3.3 # RUN curl -sSL https://storage.googleapis.com/golang/go${GOFMT_VERSION}.$(go env GOOS)-$(go env GOARCH).tar.gz | tar -C /go/bin -xz --strip-components=2 go/bin/gofmt # TODO update this sha when we upgrade to Go 1.5+ ENV GO_TOOLS_COMMIT 069d2f3bcb68257b627205f0486d6cc69a231ff9 # Grab Go's cover tool for dead-simple code coverage testing # Grab Go's vet tool for examining go code to find suspicious constructs # and help prevent errors that the compiler might not catch RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) \ && go install -v golang.org/x/tools/cmd/cover \ && go install -v golang.org/x/tools/cmd/vet # Grab Go's lint tool ENV GO_LINT_COMMIT f42f5c1c440621302702cb0741e9d2ca547ae80f RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ && go install -v github.com/golang/lint/golint # Install registry ENV REGISTRY_COMMIT ec87e9b6971d831f0eff752ddb54fb64693e51cd RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ && rm -rf "$GOPATH" # Install notary server #ENV NOTARY_VERSION docker-v1.10.2-1 #RUN set -x \ # && export GOPATH="$(mktemp -d)" \ # && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ # && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ # && GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \ # go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ # && rm -rf "$GOPATH" # Get the "docker-py" source so we can run their integration tests ENV DOCKER_PY_COMMIT e2878cbcc3a7eef99917adc1be252800b0e41ece RUN git clone https://github.com/docker/docker-py.git /docker-py \ && cd /docker-py \ && git checkout -q $DOCKER_PY_COMMIT \ && pip install -r test-requirements.txt # Set user.email so crosbymichael's in-container merge commits go smoothly RUN git config --global user.email 'docker-dummy@example.com' # Add an unprivileged user to be used for tests which need it RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser VOLUME /var/lib/docker WORKDIR /go/src/github.com/docker/docker ENV DOCKER_BUILDTAGS apparmor selinux # Let us use a .bashrc file RUN ln -sfv $PWD/.bashrc ~/.bashrc # Register Docker's bash completion. RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ ppc64le/busybox:latest@sha256:38bb82085248d5a3c24bd7a5dc146f2f2c191e189da0441f1c2ca560e3fc6f1b \ ppc64le/debian:jessie@sha256:74e06e6506b23cf8abd00250782838b2d19910824d8e7eab3d14dc1845ea10c6 \ ppc64le/hello-world:latest@sha256:186a40a9a02ca26df0b6c8acdfb8ac2f3ae6678996a838f977e57fac9d963974 # see also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) # Download man page generator RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone --depth 1 -b v1.0.4 https://github.com/cpuguy83/go-md2man.git "$GOPATH/src/github.com/cpuguy83/go-md2man" \ && git clone --depth 1 -b v1.4 https://github.com/russross/blackfriday.git "$GOPATH/src/github.com/russross/blackfriday" \ && go get -v -d github.com/cpuguy83/go-md2man \ && go build -v -o /usr/local/bin/go-md2man github.com/cpuguy83/go-md2man \ && rm -rf "$GOPATH" # Download toml validator ENV TOMLV_COMMIT 9baf8a8a9f2ed20a8e54160840c492f937eeaf9a RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/BurntSushi/toml.git "$GOPATH/src/github.com/BurntSushi/toml" \ && (cd "$GOPATH/src/github.com/BurntSushi/toml" && git checkout -q "$TOMLV_COMMIT") \ && go build -v -o /usr/local/bin/tomlv github.com/BurntSushi/toml/cmd/tomlv \ && rm -rf "$GOPATH" # Build/install the tool for embedding resources in Windows binaries ENV RSRC_VERSION v2 RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone --depth 1 -b "$RSRC_VERSION" https://github.com/akavel/rsrc.git "$GOPATH/src/github.com/akavel/rsrc" \ && go build -v -o /usr/local/bin/rsrc github.com/akavel/rsrc \ && rm -rf "$GOPATH" # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] # Upload docker source COPY . /go/src/github.com/docker/docker docker-1.10.3/Dockerfile.s390x000066400000000000000000000163631267010174400157350ustar00rootroot00000000000000# This file describes the standard way to build Docker on s390x, using docker # # Usage: # # # Assemble the full dev environment. This is slow the first time. # docker build -t docker -f Dockerfile.s390x . # # # Mount your source in an interactive container for quick testing: # docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash # # # Run the test suite: # docker run --privileged docker hack/make.sh test # # # Publish a release: # docker run --privileged \ # -e AWS_S3_BUCKET=baz \ # -e AWS_ACCESS_KEY=foo \ # -e AWS_SECRET_KEY=bar \ # -e GPG_PASSPHRASE=gloubiboulga \ # docker hack/release.sh # # Note: AppArmor used to mess with privileged mode, but this is no longer # the case. Therefore, you don't have to disable it anymore. # FROM s390x/gcc:5.3 # Packaged dependencies RUN apt-get update && apt-get install -y \ apparmor \ aufs-tools \ automake \ bash-completion \ btrfs-tools \ build-essential \ createrepo \ curl \ dpkg-sig \ git \ iptables \ jq \ net-tools \ libapparmor-dev \ libcap-dev \ libltdl-dev \ libsqlite3-dev \ libsystemd-journal-dev \ libtool \ mercurial \ pkg-config \ python-dev \ python-mock \ python-pip \ python-websocket \ xfsprogs \ tar \ --no-install-recommends # Get lvm2 source for compiling statically ENV LVM2_VERSION 2.02.103 RUN mkdir -p /usr/local/lvm2 \ && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ | tar -xzC /usr/local/lvm2 --strip-components=1 # see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags # fix platform enablement in lvm2 to support s390x properly RUN set -e \ && for f in config.guess config.sub; do \ curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ done # "arch.c:78:2: error: #error the arch code needs to know about your machine type" # Compile and install lvm2 RUN cd /usr/local/lvm2 \ && ./configure \ --build="$(gcc -print-multiarch)" \ --enable-static_link \ && make device-mapper \ && make install_device-mapper # see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL # Note: Go comes from the base image (gccgo, specifically) # We can't compile Go proper because s390x isn't an officially supported architecture yet. ENV PATH /go/bin:$PATH ENV GOPATH /go:/go/src/github.com/docker/docker/vendor # This has been commented out and kept as reference because we don't support compiling with older Go anymore. # ENV GOFMT_VERSION 1.3.3 # RUN curl -sSL https://storage.googleapis.com/golang/go${GOFMT_VERSION}.$(go env GOOS)-$(go env GOARCH).tar.gz | tar -C /go/bin -xz --strip-components=2 go/bin/gofmt # TODO update this sha when we upgrade to Go 1.5+ ENV GO_TOOLS_COMMIT 069d2f3bcb68257b627205f0486d6cc69a231ff9 # Grab Go's cover tool for dead-simple code coverage testing # Grab Go's vet tool for examining go code to find suspicious constructs # and help prevent errors that the compiler might not catch RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) \ && go install -v golang.org/x/tools/cmd/cover \ && go install -v golang.org/x/tools/cmd/vet # Grab Go's lint tool ENV GO_LINT_COMMIT f42f5c1c440621302702cb0741e9d2ca547ae80f RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ && go install -v github.com/golang/lint/golint # Install registry ENV REGISTRY_COMMIT ec87e9b6971d831f0eff752ddb54fb64693e51cd RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ && rm -rf "$GOPATH" # Install notary server ENV NOTARY_VERSION docker-v1.10.2-1 RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ && GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \ go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ && rm -rf "$GOPATH" # Get the "docker-py" source so we can run their integration tests ENV DOCKER_PY_COMMIT e2878cbcc3a7eef99917adc1be252800b0e41ece RUN git clone https://github.com/docker/docker-py.git /docker-py \ && cd /docker-py \ && git checkout -q $DOCKER_PY_COMMIT \ && pip install -r test-requirements.txt # Set user.email so crosbymichael's in-container merge commits go smoothly RUN git config --global user.email 'docker-dummy@example.com' # Add an unprivileged user to be used for tests which need it RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser VOLUME /var/lib/docker WORKDIR /go/src/github.com/docker/docker ENV DOCKER_BUILDTAGS apparmor selinux # Let us use a .bashrc file RUN ln -sfv $PWD/.bashrc ~/.bashrc # Register Docker's bash completion. RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ s390x/busybox:latest@sha256:dd61522c983884a66ed72d60301925889028c6d2d5e0220a8fe1d9b4c6a4f01b \ s390x/debian:jessie@sha256:3c478e199f60c877c00306356267798d32727dc3cd38512cdb4b060659ea9d20 \ s390x/hello-world:latest@sha256:780d80b3a7677c3788c0d5cd9168281320c8d4a6d9183892d8ee5cdd610f5699 # see also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) # Download man page generator RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone --depth 1 -b v1.0.4 https://github.com/cpuguy83/go-md2man.git "$GOPATH/src/github.com/cpuguy83/go-md2man" \ && git clone --depth 1 -b v1.4 https://github.com/russross/blackfriday.git "$GOPATH/src/github.com/russross/blackfriday" \ && go get -v -d github.com/cpuguy83/go-md2man \ && go build -v -o /usr/local/bin/go-md2man github.com/cpuguy83/go-md2man \ && rm -rf "$GOPATH" # Download toml validator ENV TOMLV_COMMIT 9baf8a8a9f2ed20a8e54160840c492f937eeaf9a RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/BurntSushi/toml.git "$GOPATH/src/github.com/BurntSushi/toml" \ && (cd "$GOPATH/src/github.com/BurntSushi/toml" && git checkout -q "$TOMLV_COMMIT") \ && go build -v -o /usr/local/bin/tomlv github.com/BurntSushi/toml/cmd/tomlv \ && rm -rf "$GOPATH" # Build/install the tool for embedding resources in Windows binaries ENV RSRC_VERSION v2 RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone --depth 1 -b "$RSRC_VERSION" https://github.com/akavel/rsrc.git "$GOPATH/src/github.com/akavel/rsrc" \ && go build -v -o /usr/local/bin/rsrc github.com/akavel/rsrc \ && rm -rf "$GOPATH" # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] # Upload docker source COPY . /go/src/github.com/docker/docker docker-1.10.3/Dockerfile.simple000066400000000000000000000017051267010174400163320ustar00rootroot00000000000000# docker build -t docker:simple -f Dockerfile.simple . # docker run --rm docker:simple hack/make.sh dynbinary # docker run --rm --privileged docker:simple hack/dind hack/make.sh test-unit # docker run --rm --privileged -v /var/lib/docker docker:simple hack/dind hack/make.sh dynbinary test-integration-cli # This represents the bare minimum required to build and test Docker. FROM debian:jessie # compile and runtime deps # https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies # https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies RUN apt-get update && apt-get install -y --no-install-recommends \ btrfs-tools \ curl \ gcc \ git \ golang \ libdevmapper-dev \ libsqlite3-dev \ \ ca-certificates \ e2fsprogs \ iptables \ procps \ xfsprogs \ xz-utils \ \ aufs-tools \ && rm -rf /var/lib/apt/lists/* ENV AUTO_GOPATH 1 WORKDIR /usr/src/docker COPY . /usr/src/docker docker-1.10.3/LICENSE000066400000000000000000000250151267010174400140550ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS Copyright 2013-2016 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. docker-1.10.3/MAINTAINERS000066400000000000000000000142741267010174400145520ustar00rootroot00000000000000# Docker maintainers file # # This file describes who runs the docker/docker project and how. # This is a living document - if you see something out of date or missing, speak up! # # It is structured to be consumable by both humans and programs. # To extract its contents programmatically, use any TOML-compliant # parser. # # This file is compiled into the MAINTAINERS file in docker/opensource. # [Org] [Org."Core maintainers"] # The Core maintainers are the ghostbusters of the project: when there's a problem others # can't solve, they show up and fix it with bizarre devices and weaponry. # They have final say on technical implementation and coding style. # They are ultimately responsible for quality in all its forms: usability polish, # bugfixes, performance, stability, etc. When ownership can cleanly be passed to # a subsystem, they are responsible for doing so and holding the # subsystem maintainers accountable. If ownership is unclear, they are the de facto owners. # For each release (including minor releases), a "release captain" is assigned from the # pool of core maintainers. Rotation is encouraged across all maintainers, to ensure # the release process is clear and up-to-date. people = [ "calavera", "coolljt0725", "cpuguy83", "crosbymichael", "duglin", "estesp", "icecrime", "jfrazelle", "lk4d4", "mhbauer", "runcom", "tianon", "tibor", "tonistiigi", "unclejack", "vbatts", "vdemeester" ] [Org."Docs maintainers"] # TODO Describe the docs maintainers role. people = [ "jamtur01", "moxiegirl", "sven", "thajeztah" ] [Org.Curators] # The curators help ensure that incoming issues and pull requests are properly triaged and # that our various contribution and reviewing processes are respected. With their knowledge of # the repository activity, they can also guide contributors to relevant material or # discussions. # # They are neither code nor docs reviewers, so they are never expected to merge. They can # however: # - close an issue or pull request when it's an exact duplicate # - close an issue or pull request when it's inappropriate or off-topic people = [ "thajeztah" ] [Org.Alumni] # This list contains maintainers that are no longer active on the project. # It is thanks to these people that the project has become what it is today. # Thank you! people = [ # As a maintainer, Erik was responsible for the "builder", and # started the first designs for the new networking model in # Docker. Erik is now working on all kinds of plugins for Docker # (https://github.com/contiv) and various open source projects # in his own repository https://github.com/erikh. You may # still stumble into him in our issue tracker, or on IRC. "erikh", # Victor is one of the earliest contributors to Docker, having worked on the # project when it was still "dotCloud" in April 2013. He's been responsible # for multiple releases (https://github.com/docker/docker/pulls?q=is%3Apr+bump+in%3Atitle+author%3Avieux), # and up until today (2015), our number 2 contributor. Although he's no longer # a maintainer for the Docker "Engine", he's still actively involved in other # Docker projects, and most likely can be found in the Docker Swarm repository, # for which he's a core maintainer. "vieux", # Vishnu became a maintainer to help out on the daemon codebase and # libcontainer integration. He's currently involved in the # Open Containers Initiative, working on the specifications, # besides his work on cAdvisor and Kubernetes for Google. "vishh" ] [people] # A reference list of all people associated with the project. # All other sections should refer to people by their canonical key # in the people section. # ADD YOURSELF HERE IN ALPHABETICAL ORDER [people.calavera] Name = "David Calavera" Email = "david.calavera@gmail.com" GitHub = "calavera" [people.coolljt0725] Name = "Lei Jitang" Email = "leijitang@huawei.com" GitHub = "coolljt0725" [people.cpuguy83] Name = "Brian Goff" Email = "cpuguy83@gmail.com" Github = "cpuguy83" [people.crosbymichael] Name = "Michael Crosby" Email = "crosbymichael@gmail.com" GitHub = "crosbymichael" [people.duglin] Name = "Doug Davis" Email = "dug@us.ibm.com" GitHub = "duglin" [people.erikh] Name = "Erik Hollensbe" Email = "erik@docker.com" GitHub = "erikh" [people.estesp] Name = "Phil Estes" Email = "estesp@linux.vnet.ibm.com" GitHub = "estesp" [people.icecrime] Name = "Arnaud Porterie" Email = "arnaud@docker.com" GitHub = "icecrime" [people.jamtur01] Name = "James Turnbull" Email = "james@lovedthanlost.net" GitHub = "jamtur01" [people.jfrazelle] Name = "Jessie Frazelle" Email = "j@docker.com" GitHub = "jfrazelle" [people.lk4d4] Name = "Alexander Morozov" Email = "lk4d4@docker.com" GitHub = "lk4d4" [people.mhbauer] Name = "Morgan Bauer" Email = "mbauer@us.ibm.com" GitHub = "mhbauer" [people.moxiegirl] Name = "Mary Anthony" Email = "mary.anthony@docker.com" GitHub = "moxiegirl" [people.runcom] Name = "Antonio Murdaca" Email = "runcom@redhat.com" GitHub = "runcom" [people.shykes] Name = "Solomon Hykes" Email = "solomon@docker.com" GitHub = "shykes" [people.sven] Name = "Sven Dowideit" Email = "SvenDowideit@home.org.au" GitHub = "SvenDowideit" [people.thajeztah] Name = "Sebastiaan van Stijn" Email = "github@gone.nl" GitHub = "thaJeztah" [people.theadactyl] Name = "Thea Lamkin" Email = "thea@docker.com" GitHub = "theadactyl" [people.tianon] Name = "Tianon Gravi" Email = "admwiggin@gmail.com" GitHub = "tianon" [people.tibor] Name = "Tibor Vass" Email = "tibor@docker.com" GitHub = "tiborvass" [people.tonistiigi] Name = "Tõnis Tiigi" Email = "tonis@docker.com" GitHub = "tonistiigi" [people.unclejack] Name = "Cristian Staretu" Email = "cristian.staretu@gmail.com" GitHub = "unclejack" [people.vbatts] Name = "Vincent Batts" Email = "vbatts@redhat.com" GitHub = "vbatts" [people.vdemeester] Name = "Vincent Demeester" Email = "vincent@sbr.pm" GitHub = "vdemeester" [people.vieux] Name = "Victor Vieux" Email = "vieux@docker.com" GitHub = "vieux" [people.vishh] Name = "Vishnu Kannan" Email = "vishnuk@google.com" GitHub = "vishh" docker-1.10.3/Makefile000066400000000000000000000066071267010174400145160ustar00rootroot00000000000000.PHONY: all binary build cross default docs docs-build docs-shell shell test test-docker-py test-integration-cli test-unit validate # get OS/Arch of docker engine DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH:+$$DOCKER_CLIENT_OSARCH}') # default for linux/amd64 and others DOCKERFILE := Dockerfile # switch to different Dockerfile for linux/arm ifeq ($(DOCKER_OSARCH), linux/arm) DOCKERFILE := Dockerfile.armhf else ifeq ($(DOCKER_OSARCH), linux/arm64) # TODO .arm64 DOCKERFILE := Dockerfile.armhf else ifeq ($(DOCKER_OSARCH), linux/ppc64le) DOCKERFILE := Dockerfile.ppc64le else ifeq ($(DOCKER_OSARCH), linux/s390x) DOCKERFILE := Dockerfile.s390x endif endif endif endif export DOCKERFILE # env vars passed through directly to Docker's build scripts # to allow things like `make DOCKER_CLIENTONLY=1 binary` easily # `docs/sources/contributing/devenvironment.md ` and `project/PACKAGERS.md` have some limited documentation of some of these DOCKER_ENVS := \ -e BUILDFLAGS \ -e DOCKER_CLIENTONLY \ -e DOCKER_DEBUG \ -e DOCKER_EXPERIMENTAL \ -e DOCKERFILE \ -e DOCKER_GRAPHDRIVER \ -e DOCKER_REMAP_ROOT \ -e DOCKER_STORAGE_OPTS \ -e DOCKER_USERLANDPROXY \ -e TESTDIRS \ -e TESTFLAGS \ -e TIMEOUT # note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds # to allow `make BIND_DIR=. shell` or `make BIND_DIR= test` # (default to no bind mount if DOCKER_HOST is set) # note: BINDDIR is supported for backwards-compatibility here BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,bundles)) DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)") GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) DOCKER_IMAGE := docker-dev$(if $(GIT_BRANCH),:$(GIT_BRANCH)) DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH)) DOCKER_FLAGS := docker run --rm -i --privileged $(DOCKER_ENVS) $(DOCKER_MOUNT) # if this session isn't interactive, then we don't want to allocate a # TTY, which would fail, but if it is interactive, we do want to attach # so that the user can send e.g. ^C through. INTERACTIVE := $(shell [ -t 0 ] && echo 1 || echo 0) ifeq ($(INTERACTIVE), 1) DOCKER_FLAGS += -t endif DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)" default: binary all: build $(DOCKER_RUN_DOCKER) hack/make.sh binary: build $(DOCKER_RUN_DOCKER) hack/make.sh binary build: bundles docker build -t "$(DOCKER_IMAGE)" -f "$(DOCKERFILE)" . bundles: mkdir bundles cross: build $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross deb: build $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-deb docs: $(MAKE) -C docs docs rpm: build $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-rpm shell: build $(DOCKER_RUN_DOCKER) bash test: build $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary cross test-unit test-integration-cli test-docker-py test-docker-py: build $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-docker-py test-integration-cli: build $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration-cli test-unit: build $(DOCKER_RUN_DOCKER) hack/make.sh test-unit validate: build $(DOCKER_RUN_DOCKER) hack/make.sh validate-dco validate-gofmt validate-pkg validate-lint validate-test validate-toml validate-vet validate-vendor docker-1.10.3/NOTICE000066400000000000000000000011761267010174400137560ustar00rootroot00000000000000Docker Copyright 2012-2016 Docker, Inc. This product includes software developed at Docker, Inc. (https://www.docker.com). This product contains software (https://github.com/kr/pty) developed by Keith Rarick, licensed under the MIT License. The following is courtesy of our legal counsel: Use and transfer of Docker may be subject to certain restrictions by the United States and other governments. It is your responsibility to ensure that your use and/or transfer does not violate applicable laws. For more information, please see https://www.bis.doc.gov See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. docker-1.10.3/README.md000066400000000000000000000322451267010174400143320ustar00rootroot00000000000000Docker: the container engine [![Release](https://img.shields.io/github/release/docker/docker.svg)](https://github.com/docker/docker/releases/latest) ============================ Docker is an open source project to pack, ship and run any application as a lightweight container. Docker containers are both *hardware-agnostic* and *platform-agnostic*. This means they can run anywhere, from your laptop to the largest cloud compute instance and everything in between - and they don't require you to use a particular language, framework or packaging system. That makes them great building blocks for deploying and scaling web apps, databases, and backend services without depending on a particular stack or provider. Docker began as an open-source implementation of the deployment engine which powers [dotCloud](https://www.dotcloud.com), a popular Platform-as-a-Service. It benefits directly from the experience accumulated over several years of large-scale operation and support of hundreds of thousands of applications and databases. ![](docs/static_files/docker-logo-compressed.png "Docker") ## Security Disclosure Security is very important to us. If you have any issue regarding security, please disclose the information responsibly by sending an email to security@docker.com and not by creating a github issue. ## Better than VMs A common method for distributing applications and sandboxing their execution is to use virtual machines, or VMs. Typical VM formats are VMware's vmdk, Oracle VirtualBox's vdi, and Amazon EC2's ami. In theory these formats should allow every developer to automatically package their application into a "machine" for easy distribution and deployment. In practice, that almost never happens, for a few reasons: * *Size*: VMs are very large which makes them impractical to store and transfer. * *Performance*: running VMs consumes significant CPU and memory, which makes them impractical in many scenarios, for example local development of multi-tier applications, and large-scale deployment of cpu and memory-intensive applications on large numbers of machines. * *Portability*: competing VM environments don't play well with each other. Although conversion tools do exist, they are limited and add even more overhead. * *Hardware-centric*: VMs were designed with machine operators in mind, not software developers. As a result, they offer very limited tooling for what developers need most: building, testing and running their software. For example, VMs offer no facilities for application versioning, monitoring, configuration, logging or service discovery. By contrast, Docker relies on a different sandboxing method known as *containerization*. Unlike traditional virtualization, containerization takes place at the kernel level. Most modern operating system kernels now support the primitives necessary for containerization, including Linux with [openvz](https://openvz.org), [vserver](http://linux-vserver.org) and more recently [lxc](https://linuxcontainers.org/), Solaris with [zones](https://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc), and FreeBSD with [Jails](https://www.freebsd.org/doc/handbook/jails.html). Docker builds on top of these low-level primitives to offer developers a portable format and runtime environment that solves all four problems. Docker containers are small (and their transfer can be optimized with layers), they have basically zero memory and cpu overhead, they are completely portable, and are designed from the ground up with an application-centric design. Perhaps best of all, because Docker operates at the OS level, it can still be run inside a VM! ## Plays well with others Docker does not require you to buy into a particular programming language, framework, packaging system, or configuration language. Is your application a Unix process? Does it use files, tcp connections, environment variables, standard Unix streams and command-line arguments as inputs and outputs? Then Docker can run it. Can your application's build be expressed as a sequence of such commands? Then Docker can build it. ## Escape dependency hell A common problem for developers is the difficulty of managing all their application's dependencies in a simple and automated way. This is usually difficult for several reasons: * *Cross-platform dependencies*. Modern applications often depend on a combination of system libraries and binaries, language-specific packages, framework-specific modules, internal components developed for another project, etc. These dependencies live in different "worlds" and require different tools - these tools typically don't work well with each other, requiring awkward custom integrations. * *Conflicting dependencies*. Different applications may depend on different versions of the same dependency. Packaging tools handle these situations with various degrees of ease - but they all handle them in different and incompatible ways, which again forces the developer to do extra work. * *Custom dependencies*. A developer may need to prepare a custom version of their application's dependency. Some packaging systems can handle custom versions of a dependency, others can't - and all of them handle it differently. Docker solves the problem of dependency hell by giving the developer a simple way to express *all* their application's dependencies in one place, while streamlining the process of assembling them. If this makes you think of [XKCD 927](https://xkcd.com/927/), don't worry. Docker doesn't *replace* your favorite packaging systems. It simply orchestrates their use in a simple and repeatable way. How does it do that? With layers. Docker defines a build as running a sequence of Unix commands, one after the other, in the same container. Build commands modify the contents of the container (usually by installing new files on the filesystem), the next command modifies it some more, etc. Since each build command inherits the result of the previous commands, the *order* in which the commands are executed expresses *dependencies*. Here's a typical Docker build process: ```bash FROM ubuntu:12.04 RUN apt-get update && apt-get install -y python python-pip curl RUN curl -sSL https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv RUN cd helloflask-master && pip install -r requirements.txt ``` Note that Docker doesn't care *how* dependencies are built - as long as they can be built by running a Unix command in a container. Getting started =============== Docker can be installed either on your computer for building applications or on servers for running them. To get started, [check out the installation instructions in the documentation](https://docs.docker.com/engine/installation/). We also offer an [interactive tutorial](https://www.docker.com/tryit/) for quickly learning the basics of using Docker. Usage examples ============== Docker can be used to run short-lived commands, long-running daemons (app servers, databases, etc.), interactive shell sessions, etc. You can find a [list of real-world examples](https://docs.docker.com/engine/examples/) in the documentation. Under the hood -------------- Under the hood, Docker is built on the following components: * The [cgroups](https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt) and [namespaces](http://man7.org/linux/man-pages/man7/namespaces.7.html) capabilities of the Linux kernel * The [Go](https://golang.org) programming language * The [Docker Image Specification](https://github.com/docker/docker/blob/master/image/spec/v1.md) * The [Libcontainer Specification](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md) Contributing to Docker [![GoDoc](https://godoc.org/github.com/docker/docker?status.svg)](https://godoc.org/github.com/docker/docker) ====================== | **Master** (Linux) | **Experimental** (linux) | **Windows** | **FreeBSD** | |------------------|----------------------|---------|---------| | [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/) | [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/) | Want to hack on Docker? Awesome! We have [instructions to help you get started contributing code or documentation](https://docs.docker.com/opensource/project/who-written-for/). These instructions are probably not perfect, please let us know if anything feels wrong or incomplete. Better yet, submit a PR and improve them yourself. Getting the development builds ============================== Want to run Docker from a master build? You can download master builds at [master.dockerproject.org](https://master.dockerproject.org). They are updated with each commit merged into the master branch. Don't know how to use that super cool new feature in the master build? Check out the master docs at [docs.master.dockerproject.org](http://docs.master.dockerproject.org). How the project is run ====================== Docker is a very, very active project. If you want to learn more about how it is run, or want to get more involved, the best place to start is [the project directory](https://github.com/docker/docker/tree/master/project). We are always open to suggestions on process improvements, and are always looking for more maintainers. ### Talking to other Docker users and contributors
Internet Relay Chat (IRC)

IRC a direct line to our most knowledgeable Docker users; we have both the #docker and #docker-dev group on irc.freenode.net. IRC is a rich chat protocol but it can overwhelm new users. You can search our chat archives.

Read our IRC quickstart guide for an easy way to get started.
Google Groups There are two groups. Docker-user is for people using Docker containers. The docker-dev group is for contributors and other people contributing to the Docker project.
Twitter You can follow Docker's Twitter feed to get updates on our products. You can also tweet us questions or just share blogs or stories.
Stack Overflow Stack Overflow has over 7000 Docker questions listed. We regularly monitor Docker questions and so do many other knowledgeable Docker users.
### Legal *Brought to you courtesy of our legal counsel. For more context, please see the [NOTICE](https://github.com/docker/docker/blob/master/NOTICE) document in this repo.* Use and transfer of Docker may be subject to certain restrictions by the United States and other governments. It is your responsibility to ensure that your use and/or transfer does not violate applicable laws. For more information, please see https://www.bis.doc.gov Licensing ========= Docker is licensed under the Apache License, Version 2.0. See [LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full license text. Other Docker Related Projects ============================= There are a number of projects under development that are based on Docker's core technology. These projects expand the tooling built around the Docker platform to broaden its application and utility. * [Docker Registry](https://github.com/docker/distribution): Registry server for Docker (hosting/delivery of repositories and images) * [Docker Machine](https://github.com/docker/machine): Machine management for a container-centric world * [Docker Swarm](https://github.com/docker/swarm): A Docker-native clustering system * [Docker Compose](https://github.com/docker/compose) (formerly Fig): Define and run multi-container apps * [Kitematic](https://github.com/docker/kitematic): The easiest way to use Docker on Mac and Windows If you know of another project underway that should be listed here, please help us keep this list up-to-date by submitting a PR. Awesome-Docker ============== You can find more projects, tools and articles related to Docker on the [awesome-docker list](https://github.com/veggiemonk/awesome-docker). Add your project there. docker-1.10.3/ROADMAP.md000066400000000000000000000222231267010174400144530ustar00rootroot00000000000000Docker Engine Roadmap ===================== ### How should I use this document? This document provides description of items that the project decided to prioritize. This should serve as a reference point for Docker contributors to understand where the project is going, and help determine if a contribution could be conflicting with some longer terms plans. The fact that a feature isn't listed here doesn't mean that a patch for it will automatically be refused (except for those mentioned as "frozen features" below)! We are always happy to receive patches for new cool features we haven't thought about, or didn't judge priority. Please however understand that such patches might take longer for us to review. ### How can I help? Short term objectives are listed in the [wiki](https://github.com/docker/docker/wiki) and described in [Issues](https://github.com/docker/docker/issues?q=is%3Aopen+is%3Aissue+label%3Aroadmap). Our goal is to split down the workload in such way that anybody can jump in and help. Please comment on issues if you want to take it to avoid duplicating effort! Similarly, if a maintainer is already assigned on an issue you'd like to participate in, pinging him on IRC or GitHub to offer your help is the best way to go. ### How can I add something to the roadmap? The roadmap process is new to the Docker Engine: we are only beginning to structure and document the project objectives. Our immediate goal is to be more transparent, and work with our community to focus our efforts on fewer prioritized topics. We hope to offer in the near future a process allowing anyone to propose a topic to the roadmap, but we are not quite there yet. For the time being, the BDFL remains the keeper of the roadmap, and we won't be accepting pull requests adding or removing items from this file. # 1. Features and refactoring ## 1.1 Security Security is a top objective for the Docker Engine. The most notable items we intend to provide in the near future are: - Trusted distribution of images: the effort is driven by the [distribution](https://github.com/docker/distribution) group but will have significant impact on the Engine - [User namespaces](https://github.com/docker/docker/pull/12648) - [Seccomp support](https://github.com/docker/libcontainer/pull/613) ## 1.2 Plumbing project We define a plumbing tool as a standalone piece of software usable and meaningful on its own. In the current state of the Docker Engine, most subsystems provide independent functionalities (such the builder, pushing and pulling images, running applications in a containerized environment, etc) but all are coupled in a single binary. We want to offer the users to flexibility to use only the pieces they need, and we will also gain in maintainability by splitting the project among multiple repositories. As it currently stands, the rough design outlines is to have: - Low level plumbing tools, each dealing with one responsibility (e.g., [runC](https://runc.io)) - Docker subsystems services, each exposing an elementary concept over an API, and relying on one or multiple lower level plumbing tools for their implementation (e.g., network management) - Docker Engine to expose higher level actions (e.g., create a container with volume `V` and network `N`), while still providing pass-through access to the individual subsystems. The architectural details are still being worked on, but one thing we know for sure is that we need to technically decouple the pieces. ### 1.2.1 Runtime A Runtime tool already exists today in the form of [runC](https://github.com/opencontainers/runc). We intend to modify the Engine to directly call out to a binary implementing the Open Containers Specification such as runC rather than relying on libcontainer to set the container runtime up. This plan will deprecate the existing [`execdriver`](https://github.com/docker/docker/tree/master/daemon/execdriver) as different runtime backends will be implemented as separated binaries instead of being compiled into the Engine. ### 1.2.2 Builder The Builder (i.e., the ability to build an image from a Dockerfile) is already nicely decoupled, but would benefit from being entirely separated from the Engine, and rely on the standard Engine API for its operations. ### 1.2.3 Distribution Distribution already has a [dedicated repository](https://github.com/docker/distribution) which holds the implementation for Registry v2 and client libraries. We could imagine going further by having the Engine call out to a binary providing image distribution related functionalities. There are two short term goals related to image distribution. The first is stabilize and simplify the push/pull code. Following that is the conversion to the more secure Registry V2 protocol. ### 1.2.4 Networking Most of networking related code was already decoupled today in [libnetwork](https://github.com/docker/libnetwork). As with other ingredients, we might want to take it a step further and make it a meaningful utility that the Engine would call out to instead of a library. ## 1.3 Plugins An initiative around plugins started with Docker 1.7.0, with the goal of allowing for out of process extensibility of some Docker functionalities, starting with volumes and networking. The approach is to provide specific extension points rather than generic hooking facilities. We also deliberately keep the extensions API the simplest possible, expanding as we discover valid use cases that cannot be implemented. At the time of writing: - Plugin support is merged as an experimental feature: real world use cases and user feedback will help us refine the UX to make the feature more user friendly. - There are no immediate plans to expand on the number of pluggable subsystems. - Golang 1.5 might add language support for [plugins](https://docs.google.com/document/d/1nr-TQHw_er6GOQRsF6T43GGhFDelrAP0NqSS_00RgZQ) which we consider supporting as an alternative to JSON/HTTP. ## 1.4 Volume management Volumes are not a first class citizen in the Engine today: we would like better volume management, similar to the way network are managed in the new [CNM](https://github.com/docker/docker/issues/9983). ## 1.5 Better API implementation The current Engine API is insufficiently typed, versioned, and ultimately hard to maintain. We also suffer from the lack of a common implementation with [Swarm](https://github.com/docker/swarm). ## 1.6 Checkpoint/restore Support for checkpoint/restore was [merged](https://github.com/docker/libcontainer/pull/479) in [libcontainer](https://github.com/docker/libcontainer) and made available through [runC](https://runc.io): we intend to take advantage of it in the Engine. # 2 Frozen features ## 2.1 Docker exec We won't accept patches expanding the surface of `docker exec`, which we intend to keep as a *debugging* feature, as well as being strongly dependent on the Runtime ingredient effort. ## 2.2 Dockerfile syntax The Dockerfile syntax as we know it is simple, and has proven successful in supporting all our [official images](https://github.com/docker-library/official-images). Although this is *not* a definitive move, we temporarily won't accept more patches to the Dockerfile syntax for several reasons: - Long term impact of syntax changes is a sensitive matter that require an amount of attention the volume of Engine codebase and activity today doesn't allow us to provide. - Allowing the Builder to be implemented as a separate utility consuming the Engine's API will open the door for many possibilities, such as offering alternate syntaxes or DSL for existing languages without cluttering the Engine's codebase. - A standalone Builder will also offer the opportunity for a better dedicated group of maintainers to own the Dockerfile syntax and decide collectively on the direction to give it. - Our experience with official images tend to show that no new instruction or syntax expansion is *strictly* necessary for the majority of use cases, and although we are aware many things are still lacking for many, we cannot make it a priority yet for the above reasons. Again, this is not about saying that the Dockerfile syntax is done, it's about making choices about what we want to do first! ## 2.3 Remote Registry Operations A large amount of work is ongoing in the area of image distribution and provenance. This includes moving to the V2 Registry API and heavily refactoring the code that powers these features. The desired result is more secure, reliable and easier to use image distribution. Part of the problem with this part of the code base is the lack of a stable and flexible interface. If new features are added that access the registry without solidifying these interfaces, achieving feature parity will continue to be elusive. While we get a handle on this situation, we are imposing a moratorium on new code that accesses the Registry API in commands that don't already make remote calls. Currently, only the following commands cause interaction with a remote registry: - push - pull - run - build - search - login In the interest of stabilizing the registry access model during this ongoing work, we are not accepting additions to other commands that will cause remote interaction with the Registry API. This moratorium will lift when the goals of the distribution project have been met. docker-1.10.3/VENDORING.md000066400000000000000000000036411267010174400147260ustar00rootroot00000000000000# Vendoring policies This document outlines recommended Vendoring policies for Docker repositories. (Example, libnetwork is a Docker repo and logrus is not.) ## Vendoring using tags Commit ID based vendoring provides little/no information about the updates vendored. To fix this, vendors will now require that repositories use annotated tags along with commit ids to snapshot commits. Annotated tags by themselves are not sufficient, since the same tag can be force updated to reference different commits. Each tag should: - Follow Semantic Versioning rules (refer to section on "Semantic Versioning") - Have a corresponding entry in the change tracking document. Each repo should: - Have a change tracking document between tags/releases. Ex: CHANGELOG.md, github releases file. The goal here is for consuming repos to be able to use the tag version and changelog updates to determine whether the vendoring will cause any breaking or backward incompatible changes. This also means that repos can specify having dependency on a package of a specific version or greater up to the next major release, without encountering breaking changes. ## Semantic Versioning Annotated version tags should follow Schema Versioning policies. According to http://semver.org: "Given a version number MAJOR.MINOR.PATCH, increment the: MAJOR version when you make incompatible API changes, MINOR version when you add functionality in a backwards-compatible manner, and PATCH version when you make backwards-compatible bug fixes. Additional labels for pre-release and build metadata are available as extensions to the MAJOR.MINOR.PATCH format." ## Vendoring cadence In order to avoid huge vendoring changes, it is recommended to have a regular cadence for vendoring updates. eg. monthly. ## Pre-merge vendoring tests All related repos will be vendored into docker/docker. CI on docker/docker should catch any breaking changes involving multiple repos. docker-1.10.3/VERSION000066400000000000000000000000071267010174400141120ustar00rootroot000000000000001.10.3 docker-1.10.3/api/000077500000000000000000000000001267010174400136165ustar00rootroot00000000000000docker-1.10.3/api/README.md000066400000000000000000000003151267010174400150740ustar00rootroot00000000000000This directory contains code pertaining to the Docker API: - Used by the docker client when communicating with the docker daemon - Used by third party tools wishing to interface with the docker daemon docker-1.10.3/api/client/000077500000000000000000000000001267010174400150745ustar00rootroot00000000000000docker-1.10.3/api/client/attach.go000066400000000000000000000044651267010174400167000ustar00rootroot00000000000000package client import ( "fmt" "io" "github.com/Sirupsen/logrus" Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/signal" "github.com/docker/engine-api/types" ) // CmdAttach attaches to a running container. // // Usage: docker attach [OPTIONS] CONTAINER func (cli *DockerCli) CmdAttach(args ...string) error { cmd := Cli.Subcmd("attach", []string{"CONTAINER"}, Cli.DockerCommands["attach"].Description, true) noStdin := cmd.Bool([]string{"-no-stdin"}, false, "Do not attach STDIN") proxy := cmd.Bool([]string{"-sig-proxy"}, true, "Proxy all received signals to the process") detachKeys := cmd.String([]string{"-detach-keys"}, "", "Override the key sequence for detaching a container") cmd.Require(flag.Exact, 1) cmd.ParseFlags(args, true) c, err := cli.client.ContainerInspect(cmd.Arg(0)) if err != nil { return err } if !c.State.Running { return fmt.Errorf("You cannot attach to a stopped container, start it first") } if c.State.Paused { return fmt.Errorf("You cannot attach to a paused container, unpause it first") } if err := cli.CheckTtyInput(!*noStdin, c.Config.Tty); err != nil { return err } if c.Config.Tty && cli.isTerminalOut { if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil { logrus.Debugf("Error monitoring TTY size: %s", err) } } if *detachKeys != "" { cli.configFile.DetachKeys = *detachKeys } options := types.ContainerAttachOptions{ ContainerID: cmd.Arg(0), Stream: true, Stdin: !*noStdin && c.Config.OpenStdin, Stdout: true, Stderr: true, DetachKeys: cli.configFile.DetachKeys, } var in io.ReadCloser if options.Stdin { in = cli.in } if *proxy && !c.Config.Tty { sigc := cli.forwardAllSignals(options.ContainerID) defer signal.StopCatch(sigc) } resp, err := cli.client.ContainerAttach(options) if err != nil { return err } defer resp.Close() if in != nil && c.Config.Tty { if err := cli.setRawTerminal(); err != nil { return err } defer cli.restoreTerminal(in) } if err := cli.holdHijackedConnection(c.Config.Tty, in, cli.out, cli.err, resp); err != nil { return err } _, status, err := getExitCode(cli, options.ContainerID) if err != nil { return err } if status != 0 { return Cli.StatusError{StatusCode: status} } return nil } docker-1.10.3/api/client/build.go000066400000000000000000000540271267010174400165320ustar00rootroot00000000000000package client import ( "archive/tar" "bufio" "bytes" "fmt" "io" "io/ioutil" "os" "os/exec" "path/filepath" "regexp" "runtime" "strings" "github.com/docker/docker/api" "github.com/docker/docker/builder/dockerignore" Cli "github.com/docker/docker/cli" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/gitutils" "github.com/docker/docker/pkg/httputils" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/jsonmessage" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/streamformatter" "github.com/docker/docker/pkg/urlutil" "github.com/docker/docker/reference" runconfigopts "github.com/docker/docker/runconfig/opts" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/container" "github.com/docker/go-units" ) type translatorFunc func(reference.NamedTagged) (reference.Canonical, error) // CmdBuild builds a new image from the source code at a given path. // // If '-' is provided instead of a path or URL, Docker will build an image from either a Dockerfile or tar archive read from STDIN. // // Usage: docker build [OPTIONS] PATH | URL | - func (cli *DockerCli) CmdBuild(args ...string) error { cmd := Cli.Subcmd("build", []string{"PATH | URL | -"}, Cli.DockerCommands["build"].Description, true) flTags := opts.NewListOpts(validateTag) cmd.Var(&flTags, []string{"t", "-tag"}, "Name and optionally a tag in the 'name:tag' format") suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the build output and print image ID on success") noCache := cmd.Bool([]string{"-no-cache"}, false, "Do not use cache when building the image") rm := cmd.Bool([]string{"-rm"}, true, "Remove intermediate containers after a successful build") forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers") pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image") dockerfileName := cmd.String([]string{"f", "-file"}, "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')") flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit") flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") flShmSize := cmd.String([]string{"-shm-size"}, "", "Size of /dev/shm, default value is 64MB") flCPUShares := cmd.Int64([]string{"#c", "-cpu-shares"}, 0, "CPU shares (relative weight)") flCPUPeriod := cmd.Int64([]string{"-cpu-period"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) period") flCPUQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") flCPUSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") flCPUSetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") flCgroupParent := cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container") flBuildArg := opts.NewListOpts(runconfigopts.ValidateEnv) cmd.Var(&flBuildArg, []string{"-build-arg"}, "Set build-time variables") isolation := cmd.String([]string{"-isolation"}, "", "Container isolation level") ulimits := make(map[string]*units.Ulimit) flUlimits := runconfigopts.NewUlimitOpt(&ulimits) cmd.Var(flUlimits, []string{"-ulimit"}, "Ulimit options") cmd.Require(flag.Exact, 1) // For trusted pull on "FROM " instruction. addTrustedFlags(cmd, true) cmd.ParseFlags(args, true) var ( context io.ReadCloser isRemote bool err error ) specifiedContext := cmd.Arg(0) var ( contextDir string tempDir string relDockerfile string progBuff io.Writer buildBuff io.Writer ) progBuff = cli.out buildBuff = cli.out if *suppressOutput { progBuff = bytes.NewBuffer(nil) buildBuff = bytes.NewBuffer(nil) } switch { case specifiedContext == "-": context, relDockerfile, err = getContextFromReader(cli.in, *dockerfileName) case urlutil.IsGitURL(specifiedContext): tempDir, relDockerfile, err = getContextFromGitURL(specifiedContext, *dockerfileName) case urlutil.IsURL(specifiedContext): context, relDockerfile, err = getContextFromURL(progBuff, specifiedContext, *dockerfileName) default: contextDir, relDockerfile, err = getContextFromLocalDir(specifiedContext, *dockerfileName) } if err != nil { if *suppressOutput && urlutil.IsURL(specifiedContext) { fmt.Fprintln(cli.err, progBuff) } return fmt.Errorf("unable to prepare context: %s", err) } if tempDir != "" { defer os.RemoveAll(tempDir) contextDir = tempDir } if context == nil { // And canonicalize dockerfile name to a platform-independent one relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile) if err != nil { return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err) } f, err := os.Open(filepath.Join(contextDir, ".dockerignore")) if err != nil && !os.IsNotExist(err) { return err } var excludes []string if err == nil { excludes, err = dockerignore.ReadAll(f) if err != nil { return err } } if err := validateContextDirectory(contextDir, excludes); err != nil { return fmt.Errorf("Error checking context: '%s'.", err) } // If .dockerignore mentions .dockerignore or the Dockerfile // then make sure we send both files over to the daemon // because Dockerfile is, obviously, needed no matter what, and // .dockerignore is needed to know if either one needs to be // removed. The daemon will remove them for us, if needed, after it // parses the Dockerfile. Ignore errors here, as they will have been // caught by validateContextDirectory above. var includes = []string{"."} keepThem1, _ := fileutils.Matches(".dockerignore", excludes) keepThem2, _ := fileutils.Matches(relDockerfile, excludes) if keepThem1 || keepThem2 { includes = append(includes, ".dockerignore", relDockerfile) } context, err = archive.TarWithOptions(contextDir, &archive.TarOptions{ Compression: archive.Uncompressed, ExcludePatterns: excludes, IncludeFiles: includes, }) if err != nil { return err } } var resolvedTags []*resolvedTag if isTrusted() { // Wrap the tar archive to replace the Dockerfile entry with the rewritten // Dockerfile which uses trusted pulls. context = replaceDockerfileTarWrapper(context, relDockerfile, cli.trustedReference, &resolvedTags) } // Setup an upload progress bar progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(progBuff, true) var body io.Reader = progress.NewProgressReader(context, progressOutput, 0, "", "Sending build context to Docker daemon") var memory int64 if *flMemoryString != "" { parsedMemory, err := units.RAMInBytes(*flMemoryString) if err != nil { return err } memory = parsedMemory } var memorySwap int64 if *flMemorySwap != "" { if *flMemorySwap == "-1" { memorySwap = -1 } else { parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap) if err != nil { return err } memorySwap = parsedMemorySwap } } var shmSize int64 if *flShmSize != "" { shmSize, err = units.RAMInBytes(*flShmSize) if err != nil { return err } } var remoteContext string if isRemote { remoteContext = cmd.Arg(0) } options := types.ImageBuildOptions{ Context: body, Memory: memory, MemorySwap: memorySwap, Tags: flTags.GetAll(), SuppressOutput: *suppressOutput, RemoteContext: remoteContext, NoCache: *noCache, Remove: *rm, ForceRemove: *forceRm, PullParent: *pull, IsolationLevel: container.IsolationLevel(*isolation), CPUSetCPUs: *flCPUSetCpus, CPUSetMems: *flCPUSetMems, CPUShares: *flCPUShares, CPUQuota: *flCPUQuota, CPUPeriod: *flCPUPeriod, CgroupParent: *flCgroupParent, Dockerfile: relDockerfile, ShmSize: shmSize, Ulimits: flUlimits.GetList(), BuildArgs: runconfigopts.ConvertKVStringsToMap(flBuildArg.GetAll()), AuthConfigs: cli.configFile.AuthConfigs, } response, err := cli.client.ImageBuild(options) if err != nil { return err } err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, cli.outFd, cli.isTerminalOut, nil) if err != nil { if jerr, ok := err.(*jsonmessage.JSONError); ok { // If no error code is set, default to 1 if jerr.Code == 0 { jerr.Code = 1 } if *suppressOutput { fmt.Fprintf(cli.err, "%s%s", progBuff, buildBuff) } return Cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} } } // Windows: show error message about modified file permissions if the // daemon isn't running Windows. if response.OSType != "windows" && runtime.GOOS == "windows" { fmt.Fprintln(cli.err, `SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`) } // Everything worked so if -q was provided the output from the daemon // should be just the image ID and we'll print that to stdout. if *suppressOutput { fmt.Fprintf(cli.out, "%s", buildBuff) } if isTrusted() { // Since the build was successful, now we must tag any of the resolved // images from the above Dockerfile rewrite. for _, resolved := range resolvedTags { if err := cli.tagTrusted(resolved.digestRef, resolved.tagRef); err != nil { return err } } } return nil } // validateContextDirectory checks if all the contents of the directory // can be read and returns an error if some files can't be read // symlinks which point to non-existing files don't trigger an error func validateContextDirectory(srcPath string, excludes []string) error { contextRoot, err := getContextRoot(srcPath) if err != nil { return err } return filepath.Walk(contextRoot, func(filePath string, f os.FileInfo, err error) error { // skip this directory/file if it's not in the path, it won't get added to the context if relFilePath, err := filepath.Rel(contextRoot, filePath); err != nil { return err } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil { return err } else if skip { if f.IsDir() { return filepath.SkipDir } return nil } if err != nil { if os.IsPermission(err) { return fmt.Errorf("can't stat '%s'", filePath) } if os.IsNotExist(err) { return nil } return err } // skip checking if symlinks point to non-existing files, such symlinks can be useful // also skip named pipes, because they hanging on open if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { return nil } if !f.IsDir() { currentFile, err := os.Open(filePath) if err != nil && os.IsPermission(err) { return fmt.Errorf("no permission to read from '%s'", filePath) } currentFile.Close() } return nil }) } // validateTag checks if the given image name can be resolved. func validateTag(rawRepo string) (string, error) { _, err := reference.ParseNamed(rawRepo) if err != nil { return "", err } return rawRepo, nil } // isUNC returns true if the path is UNC (one starting \\). It always returns // false on Linux. func isUNC(path string) bool { return runtime.GOOS == "windows" && strings.HasPrefix(path, `\\`) } // getDockerfileRelPath uses the given context directory for a `docker build` // and returns the absolute path to the context directory, the relative path of // the dockerfile in that context directory, and a non-nil error on success. func getDockerfileRelPath(givenContextDir, givenDockerfile string) (absContextDir, relDockerfile string, err error) { if absContextDir, err = filepath.Abs(givenContextDir); err != nil { return "", "", fmt.Errorf("unable to get absolute context directory: %v", err) } // The context dir might be a symbolic link, so follow it to the actual // target directory. // // FIXME. We use isUNC (always false on non-Windows platforms) to workaround // an issue in golang. On Windows, EvalSymLinks does not work on UNC file // paths (those starting with \\). This hack means that when using links // on UNC paths, they will not be followed. if !isUNC(absContextDir) { absContextDir, err = filepath.EvalSymlinks(absContextDir) if err != nil { return "", "", fmt.Errorf("unable to evaluate symlinks in context path: %v", err) } } stat, err := os.Lstat(absContextDir) if err != nil { return "", "", fmt.Errorf("unable to stat context directory %q: %v", absContextDir, err) } if !stat.IsDir() { return "", "", fmt.Errorf("context must be a directory: %s", absContextDir) } absDockerfile := givenDockerfile if absDockerfile == "" { // No -f/--file was specified so use the default relative to the // context directory. absDockerfile = filepath.Join(absContextDir, api.DefaultDockerfileName) // Just to be nice ;-) look for 'dockerfile' too but only // use it if we found it, otherwise ignore this check if _, err = os.Lstat(absDockerfile); os.IsNotExist(err) { altPath := filepath.Join(absContextDir, strings.ToLower(api.DefaultDockerfileName)) if _, err = os.Lstat(altPath); err == nil { absDockerfile = altPath } } } // If not already an absolute path, the Dockerfile path should be joined to // the base directory. if !filepath.IsAbs(absDockerfile) { absDockerfile = filepath.Join(absContextDir, absDockerfile) } // Evaluate symlinks in the path to the Dockerfile too. // // FIXME. We use isUNC (always false on non-Windows platforms) to workaround // an issue in golang. On Windows, EvalSymLinks does not work on UNC file // paths (those starting with \\). This hack means that when using links // on UNC paths, they will not be followed. if !isUNC(absDockerfile) { absDockerfile, err = filepath.EvalSymlinks(absDockerfile) if err != nil { return "", "", fmt.Errorf("unable to evaluate symlinks in Dockerfile path: %v", err) } } if _, err := os.Lstat(absDockerfile); err != nil { if os.IsNotExist(err) { return "", "", fmt.Errorf("Cannot locate Dockerfile: %q", absDockerfile) } return "", "", fmt.Errorf("unable to stat Dockerfile: %v", err) } if relDockerfile, err = filepath.Rel(absContextDir, absDockerfile); err != nil { return "", "", fmt.Errorf("unable to get relative Dockerfile path: %v", err) } if strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) { return "", "", fmt.Errorf("The Dockerfile (%s) must be within the build context (%s)", givenDockerfile, givenContextDir) } return absContextDir, relDockerfile, nil } // writeToFile copies from the given reader and writes it to a file with the // given filename. func writeToFile(r io.Reader, filename string) error { file, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600)) if err != nil { return fmt.Errorf("unable to create file: %v", err) } defer file.Close() if _, err := io.Copy(file, r); err != nil { return fmt.Errorf("unable to write file: %v", err) } return nil } // getContextFromReader will read the contents of the given reader as either a // Dockerfile or tar archive. Returns a tar archive used as a context and a // path to the Dockerfile inside the tar. func getContextFromReader(r io.ReadCloser, dockerfileName string) (out io.ReadCloser, relDockerfile string, err error) { buf := bufio.NewReader(r) magic, err := buf.Peek(archive.HeaderSize) if err != nil && err != io.EOF { return nil, "", fmt.Errorf("failed to peek context header from STDIN: %v", err) } if archive.IsArchive(magic) { return ioutils.NewReadCloserWrapper(buf, func() error { return r.Close() }), dockerfileName, nil } // Input should be read as a Dockerfile. tmpDir, err := ioutil.TempDir("", "docker-build-context-") if err != nil { return nil, "", fmt.Errorf("unbale to create temporary context directory: %v", err) } f, err := os.Create(filepath.Join(tmpDir, api.DefaultDockerfileName)) if err != nil { return nil, "", err } _, err = io.Copy(f, buf) if err != nil { f.Close() return nil, "", err } if err := f.Close(); err != nil { return nil, "", err } if err := r.Close(); err != nil { return nil, "", err } tar, err := archive.Tar(tmpDir, archive.Uncompressed) if err != nil { return nil, "", err } return ioutils.NewReadCloserWrapper(tar, func() error { err := tar.Close() os.RemoveAll(tmpDir) return err }), api.DefaultDockerfileName, nil } // getContextFromGitURL uses a Git URL as context for a `docker build`. The // git repo is cloned into a temporary directory used as the context directory. // Returns the absolute path to the temporary context directory, the relative // path of the dockerfile in that context directory, and a non-nil error on // success. func getContextFromGitURL(gitURL, dockerfileName string) (absContextDir, relDockerfile string, err error) { if _, err := exec.LookPath("git"); err != nil { return "", "", fmt.Errorf("unable to find 'git': %v", err) } if absContextDir, err = gitutils.Clone(gitURL); err != nil { return "", "", fmt.Errorf("unable to 'git clone' to temporary context directory: %v", err) } return getDockerfileRelPath(absContextDir, dockerfileName) } // getContextFromURL uses a remote URL as context for a `docker build`. The // remote resource is downloaded as either a Dockerfile or a tar archive. // Returns the tar archive used for the context and a path of the // dockerfile inside the tar. func getContextFromURL(out io.Writer, remoteURL, dockerfileName string) (io.ReadCloser, string, error) { response, err := httputils.Download(remoteURL) if err != nil { return nil, "", fmt.Errorf("unable to download remote context %s: %v", remoteURL, err) } progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(out, true) // Pass the response body through a progress reader. progReader := progress.NewProgressReader(response.Body, progressOutput, response.ContentLength, "", fmt.Sprintf("Downloading build context from remote url: %s", remoteURL)) return getContextFromReader(ioutils.NewReadCloserWrapper(progReader, func() error { return response.Body.Close() }), dockerfileName) } // getContextFromLocalDir uses the given local directory as context for a // `docker build`. Returns the absolute path to the local context directory, // the relative path of the dockerfile in that context directory, and a non-nil // error on success. func getContextFromLocalDir(localDir, dockerfileName string) (absContextDir, relDockerfile string, err error) { // When using a local context directory, when the Dockerfile is specified // with the `-f/--file` option then it is considered relative to the // current directory and not the context directory. if dockerfileName != "" { if dockerfileName, err = filepath.Abs(dockerfileName); err != nil { return "", "", fmt.Errorf("unable to get absolute path to Dockerfile: %v", err) } } return getDockerfileRelPath(localDir, dockerfileName) } var dockerfileFromLinePattern = regexp.MustCompile(`(?i)^[\s]*FROM[ \f\r\t\v]+(?P[^ \f\r\t\v\n#]+)`) // resolvedTag records the repository, tag, and resolved digest reference // from a Dockerfile rewrite. type resolvedTag struct { digestRef reference.Canonical tagRef reference.NamedTagged } // rewriteDockerfileFrom rewrites the given Dockerfile by resolving images in // "FROM " instructions to a digest reference. `translator` is a // function that takes a repository name and tag reference and returns a // trusted digest reference. func rewriteDockerfileFrom(dockerfile io.Reader, translator translatorFunc) (newDockerfile []byte, resolvedTags []*resolvedTag, err error) { scanner := bufio.NewScanner(dockerfile) buf := bytes.NewBuffer(nil) // Scan the lines of the Dockerfile, looking for a "FROM" line. for scanner.Scan() { line := scanner.Text() matches := dockerfileFromLinePattern.FindStringSubmatch(line) if matches != nil && matches[1] != api.NoBaseImageSpecifier { // Replace the line with a resolved "FROM repo@digest" ref, err := reference.ParseNamed(matches[1]) if err != nil { return nil, nil, err } ref = reference.WithDefaultTag(ref) if ref, ok := ref.(reference.NamedTagged); ok && isTrusted() { trustedRef, err := translator(ref) if err != nil { return nil, nil, err } line = dockerfileFromLinePattern.ReplaceAllLiteralString(line, fmt.Sprintf("FROM %s", trustedRef.String())) resolvedTags = append(resolvedTags, &resolvedTag{ digestRef: trustedRef, tagRef: ref, }) } } _, err := fmt.Fprintln(buf, line) if err != nil { return nil, nil, err } } return buf.Bytes(), resolvedTags, scanner.Err() } // replaceDockerfileTarWrapper wraps the given input tar archive stream and // replaces the entry with the given Dockerfile name with the contents of the // new Dockerfile. Returns a new tar archive stream with the replaced // Dockerfile. func replaceDockerfileTarWrapper(inputTarStream io.ReadCloser, dockerfileName string, translator translatorFunc, resolvedTags *[]*resolvedTag) io.ReadCloser { pipeReader, pipeWriter := io.Pipe() go func() { tarReader := tar.NewReader(inputTarStream) tarWriter := tar.NewWriter(pipeWriter) defer inputTarStream.Close() for { hdr, err := tarReader.Next() if err == io.EOF { // Signals end of archive. tarWriter.Close() pipeWriter.Close() return } if err != nil { pipeWriter.CloseWithError(err) return } var content io.Reader = tarReader if hdr.Name == dockerfileName { // This entry is the Dockerfile. Since the tar archive was // generated from a directory on the local filesystem, the // Dockerfile will only appear once in the archive. var newDockerfile []byte newDockerfile, *resolvedTags, err = rewriteDockerfileFrom(content, translator) if err != nil { pipeWriter.CloseWithError(err) return } hdr.Size = int64(len(newDockerfile)) content = bytes.NewBuffer(newDockerfile) } if err := tarWriter.WriteHeader(hdr); err != nil { pipeWriter.CloseWithError(err) return } if _, err := io.Copy(tarWriter, content); err != nil { pipeWriter.CloseWithError(err) return } } }() return pipeReader } docker-1.10.3/api/client/cli.go000066400000000000000000000132231267010174400161730ustar00rootroot00000000000000package client import ( "errors" "fmt" "io" "net/http" "os" "runtime" "github.com/docker/docker/api" "github.com/docker/docker/cli" "github.com/docker/docker/cliconfig" "github.com/docker/docker/dockerversion" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/term" "github.com/docker/engine-api/client" "github.com/docker/go-connections/tlsconfig" ) // DockerCli represents the docker command line client. // Instances of the client can be returned from NewDockerCli. type DockerCli struct { // initializing closure init func() error // configFile has the client configuration file configFile *cliconfig.ConfigFile // in holds the input stream and closer (io.ReadCloser) for the client. in io.ReadCloser // out holds the output stream (io.Writer) for the client. out io.Writer // err holds the error stream (io.Writer) for the client. err io.Writer // keyFile holds the key file as a string. keyFile string // inFd holds the file descriptor of the client's STDIN (if valid). inFd uintptr // outFd holds file descriptor of the client's STDOUT (if valid). outFd uintptr // isTerminalIn indicates whether the client's STDIN is a TTY isTerminalIn bool // isTerminalOut indicates whether the client's STDOUT is a TTY isTerminalOut bool // client is the http client that performs all API operations client client.APIClient // state holds the terminal state state *term.State } // Initialize calls the init function that will setup the configuration for the client // such as the TLS, tcp and other parameters used to run the client. func (cli *DockerCli) Initialize() error { if cli.init == nil { return nil } return cli.init() } // CheckTtyInput checks if we are trying to attach to a container tty // from a non-tty client input stream, and if so, returns an error. func (cli *DockerCli) CheckTtyInput(attachStdin, ttyMode bool) error { // In order to attach to a container tty, input stream for the client must // be a tty itself: redirecting or piping the client standard input is // incompatible with `docker run -t`, `docker exec -t` or `docker attach`. if ttyMode && attachStdin && !cli.isTerminalIn { return errors.New("cannot enable tty mode on non tty input") } return nil } // PsFormat returns the format string specified in the configuration. // String contains columns and format specification, for example {{ID}}\t{{Name}}. func (cli *DockerCli) PsFormat() string { return cli.configFile.PsFormat } // ImagesFormat returns the format string specified in the configuration. // String contains columns and format specification, for example {{ID}}\t{{Name}}. func (cli *DockerCli) ImagesFormat() string { return cli.configFile.ImagesFormat } func (cli *DockerCli) setRawTerminal() error { if cli.isTerminalIn && os.Getenv("NORAW") == "" { state, err := term.SetRawTerminal(cli.inFd) if err != nil { return err } cli.state = state } return nil } func (cli *DockerCli) restoreTerminal(in io.Closer) error { if cli.state != nil { term.RestoreTerminal(cli.inFd, cli.state) } // WARNING: DO NOT REMOVE THE OS CHECK !!! // For some reason this Close call blocks on darwin.. // As the client exists right after, simply discard the close // until we find a better solution. if in != nil && runtime.GOOS != "darwin" { return in.Close() } return nil } // NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err. // The key file, protocol (i.e. unix) and address are passed in as strings, along with the tls.Config. If the tls.Config // is set the client scheme will be set to https. // The client will be given a 32-second timeout (see https://github.com/docker/docker/pull/8035). func NewDockerCli(in io.ReadCloser, out, err io.Writer, clientFlags *cli.ClientFlags) *DockerCli { cli := &DockerCli{ in: in, out: out, err: err, keyFile: clientFlags.Common.TrustKey, } cli.init = func() error { clientFlags.PostParse() configFile, e := cliconfig.Load(cliconfig.ConfigDir()) if e != nil { fmt.Fprintf(cli.err, "WARNING: Error loading config file:%v\n", e) } cli.configFile = configFile host, err := getServerHost(clientFlags.Common.Hosts, clientFlags.Common.TLSOptions) if err != nil { return err } customHeaders := cli.configFile.HTTPHeaders if customHeaders == nil { customHeaders = map[string]string{} } customHeaders["User-Agent"] = "Docker-Client/" + dockerversion.Version + " (" + runtime.GOOS + ")" verStr := api.DefaultVersion.String() if tmpStr := os.Getenv("DOCKER_API_VERSION"); tmpStr != "" { verStr = tmpStr } clientTransport, err := newClientTransport(clientFlags.Common.TLSOptions) if err != nil { return err } client, err := client.NewClient(host, verStr, clientTransport, customHeaders) if err != nil { return err } cli.client = client if cli.in != nil { cli.inFd, cli.isTerminalIn = term.GetFdInfo(cli.in) } if cli.out != nil { cli.outFd, cli.isTerminalOut = term.GetFdInfo(cli.out) } return nil } return cli } func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (host string, err error) { switch len(hosts) { case 0: host = os.Getenv("DOCKER_HOST") case 1: host = hosts[0] default: return "", errors.New("Please specify only one -H") } defaultHost := opts.DefaultTCPHost if tlsOptions != nil { defaultHost = opts.DefaultTLSHost } host, err = opts.ParseHost(defaultHost, host) return } func newClientTransport(tlsOptions *tlsconfig.Options) (*http.Transport, error) { if tlsOptions == nil { return &http.Transport{}, nil } config, err := tlsconfig.Client(*tlsOptions) if err != nil { return nil, err } return &http.Transport{ TLSClientConfig: config, }, nil } docker-1.10.3/api/client/client.go000066400000000000000000000005271267010174400167050ustar00rootroot00000000000000// Package client provides a command-line interface for Docker. // // Run "docker help SUBCOMMAND" or "docker SUBCOMMAND --help" to see more information on any Docker subcommand, including the full list of options supported for the subcommand. // See https://docs.docker.com/installation/ for instructions on installing Docker. package client docker-1.10.3/api/client/commit.go000066400000000000000000000045761267010174400167270ustar00rootroot00000000000000package client import ( "encoding/json" "errors" "fmt" Cli "github.com/docker/docker/cli" "github.com/docker/docker/opts" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/reference" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/container" ) // CmdCommit creates a new image from a container's changes. // // Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]] func (cli *DockerCli) CmdCommit(args ...string) error { cmd := Cli.Subcmd("commit", []string{"CONTAINER [REPOSITORY[:TAG]]"}, Cli.DockerCommands["commit"].Description, true) flPause := cmd.Bool([]string{"p", "-pause"}, true, "Pause container during commit") flComment := cmd.String([]string{"m", "-message"}, "", "Commit message") flAuthor := cmd.String([]string{"a", "-author"}, "", "Author (e.g., \"John Hannibal Smith \")") flChanges := opts.NewListOpts(nil) cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image") // FIXME: --run is deprecated, it will be replaced with inline Dockerfile commands. flConfig := cmd.String([]string{"#-run"}, "", "This option is deprecated and will be removed in a future version in favor of inline Dockerfile-compatible commands") cmd.Require(flag.Max, 2) cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) var ( name = cmd.Arg(0) repositoryAndTag = cmd.Arg(1) repositoryName string tag string ) //Check if the given image name can be resolved if repositoryAndTag != "" { ref, err := reference.ParseNamed(repositoryAndTag) if err != nil { return err } repositoryName = ref.Name() switch x := ref.(type) { case reference.Canonical: return errors.New("cannot commit to digest reference") case reference.NamedTagged: tag = x.Tag() } } var config *container.Config if *flConfig != "" { config = &container.Config{} if err := json.Unmarshal([]byte(*flConfig), config); err != nil { return err } } options := types.ContainerCommitOptions{ ContainerID: name, RepositoryName: repositoryName, Tag: tag, Comment: *flComment, Author: *flAuthor, Changes: flChanges.GetAll(), Pause: *flPause, Config: config, } response, err := cli.client.ContainerCommit(options) if err != nil { return err } fmt.Fprintln(cli.out, response.ID) return nil } docker-1.10.3/api/client/cp.go000066400000000000000000000216751267010174400160400ustar00rootroot00000000000000package client import ( "fmt" "io" "os" "path/filepath" "strings" Cli "github.com/docker/docker/cli" "github.com/docker/docker/pkg/archive" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/system" "github.com/docker/engine-api/types" ) type copyDirection int const ( fromContainer copyDirection = (1 << iota) toContainer acrossContainers = fromContainer | toContainer ) type cpConfig struct { followLink bool } // CmdCp copies files/folders to or from a path in a container. // // When copying from a container, if DEST_PATH is '-' the data is written as a // tar archive file to STDOUT. // // When copying to a container, if SRC_PATH is '-' the data is read as a tar // archive file from STDIN, and the destination CONTAINER:DEST_PATH, must specify // a directory. // // Usage: // docker cp CONTAINER:SRC_PATH DEST_PATH|- // docker cp SRC_PATH|- CONTAINER:DEST_PATH func (cli *DockerCli) CmdCp(args ...string) error { cmd := Cli.Subcmd( "cp", []string{"CONTAINER:SRC_PATH DEST_PATH|-", "SRC_PATH|- CONTAINER:DEST_PATH"}, strings.Join([]string{ Cli.DockerCommands["cp"].Description, "\nUse '-' as the source to read a tar archive from stdin\n", "and extract it to a directory destination in a container.\n", "Use '-' as the destination to stream a tar archive of a\n", "container source to stdout.", }, ""), true, ) followLink := cmd.Bool([]string{"L", "-follow-link"}, false, "Always follow symbol link in SRC_PATH") cmd.Require(flag.Exact, 2) cmd.ParseFlags(args, true) if cmd.Arg(0) == "" { return fmt.Errorf("source can not be empty") } if cmd.Arg(1) == "" { return fmt.Errorf("destination can not be empty") } srcContainer, srcPath := splitCpArg(cmd.Arg(0)) dstContainer, dstPath := splitCpArg(cmd.Arg(1)) var direction copyDirection if srcContainer != "" { direction |= fromContainer } if dstContainer != "" { direction |= toContainer } cpParam := &cpConfig{ followLink: *followLink, } switch direction { case fromContainer: return cli.copyFromContainer(srcContainer, srcPath, dstPath, cpParam) case toContainer: return cli.copyToContainer(srcPath, dstContainer, dstPath, cpParam) case acrossContainers: // Copying between containers isn't supported. return fmt.Errorf("copying between containers is not supported") default: // User didn't specify any container. return fmt.Errorf("must specify at least one container source") } } // We use `:` as a delimiter between CONTAINER and PATH, but `:` could also be // in a valid LOCALPATH, like `file:name.txt`. We can resolve this ambiguity by // requiring a LOCALPATH with a `:` to be made explicit with a relative or // absolute path: // `/path/to/file:name.txt` or `./file:name.txt` // // This is apparently how `scp` handles this as well: // http://www.cyberciti.biz/faq/rsync-scp-file-name-with-colon-punctuation-in-it/ // // We can't simply check for a filepath separator because container names may // have a separator, e.g., "host0/cname1" if container is in a Docker cluster, // so we have to check for a `/` or `.` prefix. Also, in the case of a Windows // client, a `:` could be part of an absolute Windows path, in which case it // is immediately proceeded by a backslash. func splitCpArg(arg string) (container, path string) { if system.IsAbs(arg) { // Explicit local absolute path, e.g., `C:\foo` or `/foo`. return "", arg } parts := strings.SplitN(arg, ":", 2) if len(parts) == 1 || strings.HasPrefix(parts[0], ".") { // Either there's no `:` in the arg // OR it's an explicit local relative path like `./file:name.txt`. return "", arg } return parts[0], parts[1] } func (cli *DockerCli) statContainerPath(containerName, path string) (types.ContainerPathStat, error) { return cli.client.ContainerStatPath(containerName, path) } func resolveLocalPath(localPath string) (absPath string, err error) { if absPath, err = filepath.Abs(localPath); err != nil { return } return archive.PreserveTrailingDotOrSeparator(absPath, localPath), nil } func (cli *DockerCli) copyFromContainer(srcContainer, srcPath, dstPath string, cpParam *cpConfig) (err error) { if dstPath != "-" { // Get an absolute destination path. dstPath, err = resolveLocalPath(dstPath) if err != nil { return err } } // if client requests to follow symbol link, then must decide target file to be copied var rebaseName string if cpParam.followLink { srcStat, err := cli.statContainerPath(srcContainer, srcPath) // If the destination is a symbolic link, we should follow it. if err == nil && srcStat.Mode&os.ModeSymlink != 0 { linkTarget := srcStat.LinkTarget if !system.IsAbs(linkTarget) { // Join with the parent directory. srcParent, _ := archive.SplitPathDirEntry(srcPath) linkTarget = filepath.Join(srcParent, linkTarget) } linkTarget, rebaseName = archive.GetRebaseName(srcPath, linkTarget) srcPath = linkTarget } } content, stat, err := cli.client.CopyFromContainer(srcContainer, srcPath) if err != nil { return err } defer content.Close() if dstPath == "-" { // Send the response to STDOUT. _, err = io.Copy(os.Stdout, content) return err } // Prepare source copy info. srcInfo := archive.CopyInfo{ Path: srcPath, Exists: true, IsDir: stat.Mode.IsDir(), RebaseName: rebaseName, } preArchive := content if len(srcInfo.RebaseName) != 0 { _, srcBase := archive.SplitPathDirEntry(srcInfo.Path) preArchive = archive.RebaseArchiveEntries(content, srcBase, srcInfo.RebaseName) } // See comments in the implementation of `archive.CopyTo` for exactly what // goes into deciding how and whether the source archive needs to be // altered for the correct copy behavior. return archive.CopyTo(preArchive, srcInfo, dstPath) } func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string, cpParam *cpConfig) (err error) { if srcPath != "-" { // Get an absolute source path. srcPath, err = resolveLocalPath(srcPath) if err != nil { return err } } // In order to get the copy behavior right, we need to know information // about both the source and destination. The API is a simple tar // archive/extract API but we can use the stat info header about the // destination to be more informed about exactly what the destination is. // Prepare destination copy info by stat-ing the container path. dstInfo := archive.CopyInfo{Path: dstPath} dstStat, err := cli.statContainerPath(dstContainer, dstPath) // If the destination is a symbolic link, we should evaluate it. if err == nil && dstStat.Mode&os.ModeSymlink != 0 { linkTarget := dstStat.LinkTarget if !system.IsAbs(linkTarget) { // Join with the parent directory. dstParent, _ := archive.SplitPathDirEntry(dstPath) linkTarget = filepath.Join(dstParent, linkTarget) } dstInfo.Path = linkTarget dstStat, err = cli.statContainerPath(dstContainer, linkTarget) } // Ignore any error and assume that the parent directory of the destination // path exists, in which case the copy may still succeed. If there is any // type of conflict (e.g., non-directory overwriting an existing directory // or vice versa) the extraction will fail. If the destination simply did // not exist, but the parent directory does, the extraction will still // succeed. if err == nil { dstInfo.Exists, dstInfo.IsDir = true, dstStat.Mode.IsDir() } var ( content io.Reader resolvedDstPath string ) if srcPath == "-" { // Use STDIN. content = os.Stdin resolvedDstPath = dstInfo.Path if !dstInfo.IsDir { return fmt.Errorf("destination %q must be a directory", fmt.Sprintf("%s:%s", dstContainer, dstPath)) } } else { // Prepare source copy info. srcInfo, err := archive.CopyInfoSourcePath(srcPath, cpParam.followLink) if err != nil { return err } srcArchive, err := archive.TarResource(srcInfo) if err != nil { return err } defer srcArchive.Close() // With the stat info about the local source as well as the // destination, we have enough information to know whether we need to // alter the archive that we upload so that when the server extracts // it to the specified directory in the container we get the desired // copy behavior. // See comments in the implementation of `archive.PrepareArchiveCopy` // for exactly what goes into deciding how and whether the source // archive needs to be altered for the correct copy behavior when it is // extracted. This function also infers from the source and destination // info which directory to extract to, which may be the parent of the // destination that the user specified. dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo) if err != nil { return err } defer preparedArchive.Close() resolvedDstPath = dstDir content = preparedArchive } options := types.CopyToContainerOptions{ ContainerID: dstContainer, Path: resolvedDstPath, Content: content, AllowOverwriteDirWithFile: false, } return cli.client.CopyToContainer(options) } docker-1.10.3/api/client/create.go000066400000000000000000000111251267010174400166660ustar00rootroot00000000000000package client import ( "fmt" "io" "os" Cli "github.com/docker/docker/cli" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/reference" "github.com/docker/docker/registry" runconfigopts "github.com/docker/docker/runconfig/opts" "github.com/docker/engine-api/client" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/container" networktypes "github.com/docker/engine-api/types/network" ) func (cli *DockerCli) pullImage(image string) error { return cli.pullImageCustomOut(image, cli.out) } func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error { ref, err := reference.ParseNamed(image) if err != nil { return err } var tag string switch x := reference.WithDefaultTag(ref).(type) { case reference.Canonical: tag = x.Digest().String() case reference.NamedTagged: tag = x.Tag() } // Resolve the Repository name from fqn to RepositoryInfo repoInfo, err := registry.ParseRepositoryInfo(ref) if err != nil { return err } authConfig := cli.resolveAuthConfig(cli.configFile.AuthConfigs, repoInfo.Index) encodedAuth, err := encodeAuthToBase64(authConfig) if err != nil { return err } options := types.ImageCreateOptions{ Parent: ref.Name(), Tag: tag, RegistryAuth: encodedAuth, } responseBody, err := cli.client.ImageCreate(options) if err != nil { return err } defer responseBody.Close() return jsonmessage.DisplayJSONMessagesStream(responseBody, out, cli.outFd, cli.isTerminalOut, nil) } type cidFile struct { path string file *os.File written bool } func newCIDFile(path string) (*cidFile, error) { if _, err := os.Stat(path); err == nil { return nil, fmt.Errorf("Container ID file found, make sure the other container isn't running or delete %s", path) } f, err := os.Create(path) if err != nil { return nil, fmt.Errorf("Failed to create the container ID file: %s", err) } return &cidFile{path: path, file: f}, nil } func (cli *DockerCli) createContainer(config *container.Config, hostConfig *container.HostConfig, networkingConfig *networktypes.NetworkingConfig, cidfile, name string) (*types.ContainerCreateResponse, error) { var containerIDFile *cidFile if cidfile != "" { var err error if containerIDFile, err = newCIDFile(cidfile); err != nil { return nil, err } defer containerIDFile.Close() } ref, err := reference.ParseNamed(config.Image) if err != nil { return nil, err } ref = reference.WithDefaultTag(ref) var trustedRef reference.Canonical if ref, ok := ref.(reference.NamedTagged); ok && isTrusted() { var err error trustedRef, err = cli.trustedReference(ref) if err != nil { return nil, err } config.Image = trustedRef.String() } //create the container response, err := cli.client.ContainerCreate(config, hostConfig, networkingConfig, name) //if image not found try to pull it if err != nil { if client.IsErrImageNotFound(err) { fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", ref.String()) // we don't want to write to stdout anything apart from container.ID if err = cli.pullImageCustomOut(config.Image, cli.err); err != nil { return nil, err } if ref, ok := ref.(reference.NamedTagged); ok && trustedRef != nil { if err := cli.tagTrusted(trustedRef, ref); err != nil { return nil, err } } // Retry var retryErr error response, retryErr = cli.client.ContainerCreate(config, hostConfig, networkingConfig, name) if retryErr != nil { return nil, retryErr } } else { return nil, err } } for _, warning := range response.Warnings { fmt.Fprintf(cli.err, "WARNING: %s\n", warning) } if containerIDFile != nil { if err = containerIDFile.Write(response.ID); err != nil { return nil, err } } return &response, nil } // CmdCreate creates a new container from a given image. // // Usage: docker create [OPTIONS] IMAGE [COMMAND] [ARG...] func (cli *DockerCli) CmdCreate(args ...string) error { cmd := Cli.Subcmd("create", []string{"IMAGE [COMMAND] [ARG...]"}, Cli.DockerCommands["create"].Description, true) addTrustedFlags(cmd, true) // These are flags not stored in Config/HostConfig var ( flName = cmd.String([]string{"-name"}, "", "Assign a name to the container") ) config, hostConfig, networkingConfig, cmd, err := runconfigopts.Parse(cmd, args) if err != nil { cmd.ReportError(err.Error(), true) os.Exit(1) } if config.Image == "" { cmd.Usage() return nil } response, err := cli.createContainer(config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, *flName) if err != nil { return err } fmt.Fprintf(cli.out, "%s\n", response.ID) return nil } docker-1.10.3/api/client/diff.go000066400000000000000000000020571267010174400163370ustar00rootroot00000000000000package client import ( "fmt" Cli "github.com/docker/docker/cli" "github.com/docker/docker/pkg/archive" flag "github.com/docker/docker/pkg/mflag" ) // CmdDiff shows changes on a container's filesystem. // // Each changed file is printed on a separate line, prefixed with a single // character that indicates the status of the file: C (modified), A (added), // or D (deleted). // // Usage: docker diff CONTAINER func (cli *DockerCli) CmdDiff(args ...string) error { cmd := Cli.Subcmd("diff", []string{"CONTAINER"}, Cli.DockerCommands["diff"].Description, true) cmd.Require(flag.Exact, 1) cmd.ParseFlags(args, true) if cmd.Arg(0) == "" { return fmt.Errorf("Container name cannot be empty") } changes, err := cli.client.ContainerDiff(cmd.Arg(0)) if err != nil { return err } for _, change := range changes { var kind string switch change.Kind { case archive.ChangeModify: kind = "C" case archive.ChangeAdd: kind = "A" case archive.ChangeDelete: kind = "D" } fmt.Fprintf(cli.out, "%s %s\n", kind, change.Path) } return nil } docker-1.10.3/api/client/events.go000066400000000000000000000057121267010174400167340ustar00rootroot00000000000000package client import ( "encoding/json" "fmt" "io" "strings" "time" Cli "github.com/docker/docker/cli" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/jsonlog" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/engine-api/types" eventtypes "github.com/docker/engine-api/types/events" "github.com/docker/engine-api/types/filters" ) // CmdEvents prints a live stream of real time events from the server. // // Usage: docker events [OPTIONS] func (cli *DockerCli) CmdEvents(args ...string) error { cmd := Cli.Subcmd("events", nil, Cli.DockerCommands["events"].Description, true) since := cmd.String([]string{"-since"}, "", "Show all events created since timestamp") until := cmd.String([]string{"-until"}, "", "Stream events until this timestamp") flFilter := opts.NewListOpts(nil) cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") cmd.Require(flag.Exact, 0) cmd.ParseFlags(args, true) eventFilterArgs := filters.NewArgs() // Consolidate all filter flags, and sanity check them early. // They'll get process in the daemon/server. for _, f := range flFilter.GetAll() { var err error eventFilterArgs, err = filters.ParseFlag(f, eventFilterArgs) if err != nil { return err } } options := types.EventsOptions{ Since: *since, Until: *until, Filters: eventFilterArgs, } responseBody, err := cli.client.Events(options) if err != nil { return err } defer responseBody.Close() return streamEvents(responseBody, cli.out) } // streamEvents decodes prints the incoming events in the provided output. func streamEvents(input io.Reader, output io.Writer) error { return decodeEvents(input, func(event eventtypes.Message, err error) error { if err != nil { return err } printOutput(event, output) return nil }) } type eventProcessor func(event eventtypes.Message, err error) error func decodeEvents(input io.Reader, ep eventProcessor) error { dec := json.NewDecoder(input) for { var event eventtypes.Message err := dec.Decode(&event) if err != nil && err == io.EOF { break } if procErr := ep(event, err); procErr != nil { return procErr } } return nil } // printOutput prints all types of event information. // Each output includes the event type, actor id, name and action. // Actor attributes are printed at the end if the actor has any. func printOutput(event eventtypes.Message, output io.Writer) { if event.TimeNano != 0 { fmt.Fprintf(output, "%s ", time.Unix(0, event.TimeNano).Format(jsonlog.RFC3339NanoFixed)) } else if event.Time != 0 { fmt.Fprintf(output, "%s ", time.Unix(event.Time, 0).Format(jsonlog.RFC3339NanoFixed)) } fmt.Fprintf(output, "%s %s %s", event.Type, event.Action, event.Actor.ID) if len(event.Actor.Attributes) > 0 { var attrs []string for k, v := range event.Actor.Attributes { attrs = append(attrs, fmt.Sprintf("%s=%s", k, v)) } fmt.Fprintf(output, " (%s)", strings.Join(attrs, ", ")) } fmt.Fprint(output, "\n") } docker-1.10.3/api/client/exec.go000066400000000000000000000102561267010174400163530ustar00rootroot00000000000000package client import ( "fmt" "io" "github.com/Sirupsen/logrus" Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/promise" "github.com/docker/engine-api/types" ) // CmdExec runs a command in a running container. // // Usage: docker exec [OPTIONS] CONTAINER COMMAND [ARG...] func (cli *DockerCli) CmdExec(args ...string) error { cmd := Cli.Subcmd("exec", []string{"CONTAINER COMMAND [ARG...]"}, Cli.DockerCommands["exec"].Description, true) detachKeys := cmd.String([]string{"-detach-keys"}, "", "Override the key sequence for detaching a container") execConfig, err := ParseExec(cmd, args) // just in case the ParseExec does not exit if execConfig.Container == "" || err != nil { return Cli.StatusError{StatusCode: 1} } if *detachKeys != "" { cli.configFile.DetachKeys = *detachKeys } // Send client escape keys execConfig.DetachKeys = cli.configFile.DetachKeys response, err := cli.client.ContainerExecCreate(*execConfig) if err != nil { return err } execID := response.ID if execID == "" { fmt.Fprintf(cli.out, "exec ID empty") return nil } //Temp struct for execStart so that we don't need to transfer all the execConfig if !execConfig.Detach { if err := cli.CheckTtyInput(execConfig.AttachStdin, execConfig.Tty); err != nil { return err } } else { execStartCheck := types.ExecStartCheck{ Detach: execConfig.Detach, Tty: execConfig.Tty, } if err := cli.client.ContainerExecStart(execID, execStartCheck); err != nil { return err } // For now don't print this - wait for when we support exec wait() // fmt.Fprintf(cli.out, "%s\n", execID) return nil } // Interactive exec requested. var ( out, stderr io.Writer in io.ReadCloser errCh chan error ) if execConfig.AttachStdin { in = cli.in } if execConfig.AttachStdout { out = cli.out } if execConfig.AttachStderr { if execConfig.Tty { stderr = cli.out } else { stderr = cli.err } } resp, err := cli.client.ContainerExecAttach(execID, *execConfig) if err != nil { return err } defer resp.Close() if in != nil && execConfig.Tty { if err := cli.setRawTerminal(); err != nil { return err } defer cli.restoreTerminal(in) } errCh = promise.Go(func() error { return cli.holdHijackedConnection(execConfig.Tty, in, out, stderr, resp) }) if execConfig.Tty && cli.isTerminalIn { if err := cli.monitorTtySize(execID, true); err != nil { fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) } } if err := <-errCh; err != nil { logrus.Debugf("Error hijack: %s", err) return err } var status int if _, status, err = getExecExitCode(cli, execID); err != nil { return err } if status != 0 { return Cli.StatusError{StatusCode: status} } return nil } // ParseExec parses the specified args for the specified command and generates // an ExecConfig from it. // If the minimal number of specified args is not right or if specified args are // not valid, it will return an error. func ParseExec(cmd *flag.FlagSet, args []string) (*types.ExecConfig, error) { var ( flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run command in the background") flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: [:])") flPrivileged = cmd.Bool([]string{"-privileged"}, false, "Give extended privileges to the command") execCmd []string container string ) cmd.Require(flag.Min, 2) if err := cmd.ParseFlags(args, true); err != nil { return nil, err } container = cmd.Arg(0) parsedArgs := cmd.Args() execCmd = parsedArgs[1:] execConfig := &types.ExecConfig{ User: *flUser, Privileged: *flPrivileged, Tty: *flTty, Cmd: execCmd, Container: container, Detach: *flDetach, } // If -d is not set, attach to everything by default if !*flDetach { execConfig.AttachStdout = true execConfig.AttachStderr = true if *flStdin { execConfig.AttachStdin = true } } return execConfig, nil } docker-1.10.3/api/client/exec_test.go000066400000000000000000000062201267010174400174060ustar00rootroot00000000000000package client import ( "fmt" "io/ioutil" "testing" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/engine-api/types" ) type arguments struct { args []string } func TestParseExec(t *testing.T) { invalids := map[*arguments]error{ &arguments{[]string{"-unknown"}}: fmt.Errorf("flag provided but not defined: -unknown"), &arguments{[]string{"-u"}}: fmt.Errorf("flag needs an argument: -u"), &arguments{[]string{"--user"}}: fmt.Errorf("flag needs an argument: --user"), } valids := map[*arguments]*types.ExecConfig{ &arguments{ []string{"container", "command"}, }: { Container: "container", Cmd: []string{"command"}, AttachStdout: true, AttachStderr: true, }, &arguments{ []string{"container", "command1", "command2"}, }: { Container: "container", Cmd: []string{"command1", "command2"}, AttachStdout: true, AttachStderr: true, }, &arguments{ []string{"-i", "-t", "-u", "uid", "container", "command"}, }: { User: "uid", AttachStdin: true, AttachStdout: true, AttachStderr: true, Tty: true, Container: "container", Cmd: []string{"command"}, }, &arguments{ []string{"-d", "container", "command"}, }: { AttachStdin: false, AttachStdout: false, AttachStderr: false, Detach: true, Container: "container", Cmd: []string{"command"}, }, &arguments{ []string{"-t", "-i", "-d", "container", "command"}, }: { AttachStdin: false, AttachStdout: false, AttachStderr: false, Detach: true, Tty: true, Container: "container", Cmd: []string{"command"}, }, } for invalid, expectedError := range invalids { cmd := flag.NewFlagSet("exec", flag.ContinueOnError) cmd.ShortUsage = func() {} cmd.SetOutput(ioutil.Discard) _, err := ParseExec(cmd, invalid.args) if err == nil || err.Error() != expectedError.Error() { t.Fatalf("Expected an error [%v] for %v, got %v", expectedError, invalid, err) } } for valid, expectedExecConfig := range valids { cmd := flag.NewFlagSet("exec", flag.ContinueOnError) cmd.ShortUsage = func() {} cmd.SetOutput(ioutil.Discard) execConfig, err := ParseExec(cmd, valid.args) if err != nil { t.Fatal(err) } if !compareExecConfig(expectedExecConfig, execConfig) { t.Fatalf("Expected [%v] for %v, got [%v]", expectedExecConfig, valid, execConfig) } } } func compareExecConfig(config1 *types.ExecConfig, config2 *types.ExecConfig) bool { if config1.AttachStderr != config2.AttachStderr { return false } if config1.AttachStdin != config2.AttachStdin { return false } if config1.AttachStdout != config2.AttachStdout { return false } if config1.Container != config2.Container { return false } if config1.Detach != config2.Detach { return false } if config1.Privileged != config2.Privileged { return false } if config1.Tty != config2.Tty { return false } if config1.User != config2.User { return false } if len(config1.Cmd) != len(config2.Cmd) { return false } for index, value := range config1.Cmd { if value != config2.Cmd[index] { return false } } return true } docker-1.10.3/api/client/export.go000066400000000000000000000020351267010174400167440ustar00rootroot00000000000000package client import ( "errors" "io" "os" Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" ) // CmdExport exports a filesystem as a tar archive. // // The tar archive is streamed to STDOUT by default or written to a file. // // Usage: docker export [OPTIONS] CONTAINER func (cli *DockerCli) CmdExport(args ...string) error { cmd := Cli.Subcmd("export", []string{"CONTAINER"}, Cli.DockerCommands["export"].Description, true) outfile := cmd.String([]string{"o", "-output"}, "", "Write to a file, instead of STDOUT") cmd.Require(flag.Exact, 1) cmd.ParseFlags(args, true) var ( output = cli.out err error ) if *outfile != "" { output, err = os.Create(*outfile) if err != nil { return err } } else if cli.isTerminalOut { return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") } responseBody, err := cli.client.ContainerExport(cmd.Arg(0)) if err != nil { return err } defer responseBody.Close() _, err = io.Copy(output, responseBody) return err } docker-1.10.3/api/client/formatter/000077500000000000000000000000001267010174400170775ustar00rootroot00000000000000docker-1.10.3/api/client/formatter/custom.go000066400000000000000000000106311267010174400207410ustar00rootroot00000000000000package formatter import ( "fmt" "strconv" "strings" "time" "github.com/docker/docker/api" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/stringutils" "github.com/docker/engine-api/types" "github.com/docker/go-units" ) const ( tableKey = "table" containerIDHeader = "CONTAINER ID" imageHeader = "IMAGE" namesHeader = "NAMES" commandHeader = "COMMAND" createdSinceHeader = "CREATED" createdAtHeader = "CREATED AT" runningForHeader = "CREATED" statusHeader = "STATUS" portsHeader = "PORTS" sizeHeader = "SIZE" labelsHeader = "LABELS" imageIDHeader = "IMAGE ID" repositoryHeader = "REPOSITORY" tagHeader = "TAG" digestHeader = "DIGEST" ) type containerContext struct { baseSubContext trunc bool c types.Container } func (c *containerContext) ID() string { c.addHeader(containerIDHeader) if c.trunc { return stringid.TruncateID(c.c.ID) } return c.c.ID } func (c *containerContext) Names() string { c.addHeader(namesHeader) names := stripNamePrefix(c.c.Names) if c.trunc { for _, name := range names { if len(strings.Split(name, "/")) == 1 { names = []string{name} break } } } return strings.Join(names, ",") } func (c *containerContext) Image() string { c.addHeader(imageHeader) if c.c.Image == "" { return "" } if c.trunc { if trunc := stringid.TruncateID(c.c.ImageID); trunc == stringid.TruncateID(c.c.Image) { return trunc } } return c.c.Image } func (c *containerContext) Command() string { c.addHeader(commandHeader) command := c.c.Command if c.trunc { command = stringutils.Truncate(command, 20) } return strconv.Quote(command) } func (c *containerContext) CreatedAt() string { c.addHeader(createdAtHeader) return time.Unix(int64(c.c.Created), 0).String() } func (c *containerContext) RunningFor() string { c.addHeader(runningForHeader) createdAt := time.Unix(int64(c.c.Created), 0) return units.HumanDuration(time.Now().UTC().Sub(createdAt)) } func (c *containerContext) Ports() string { c.addHeader(portsHeader) return api.DisplayablePorts(c.c.Ports) } func (c *containerContext) Status() string { c.addHeader(statusHeader) return c.c.Status } func (c *containerContext) Size() string { c.addHeader(sizeHeader) srw := units.HumanSize(float64(c.c.SizeRw)) sv := units.HumanSize(float64(c.c.SizeRootFs)) sf := srw if c.c.SizeRootFs > 0 { sf = fmt.Sprintf("%s (virtual %s)", srw, sv) } return sf } func (c *containerContext) Labels() string { c.addHeader(labelsHeader) if c.c.Labels == nil { return "" } var joinLabels []string for k, v := range c.c.Labels { joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) } return strings.Join(joinLabels, ",") } func (c *containerContext) Label(name string) string { n := strings.Split(name, ".") r := strings.NewReplacer("-", " ", "_", " ") h := r.Replace(n[len(n)-1]) c.addHeader(h) if c.c.Labels == nil { return "" } return c.c.Labels[name] } type imageContext struct { baseSubContext trunc bool i types.Image repo string tag string digest string } func (c *imageContext) ID() string { c.addHeader(imageIDHeader) if c.trunc { return stringid.TruncateID(c.i.ID) } return c.i.ID } func (c *imageContext) Repository() string { c.addHeader(repositoryHeader) return c.repo } func (c *imageContext) Tag() string { c.addHeader(tagHeader) return c.tag } func (c *imageContext) Digest() string { c.addHeader(digestHeader) return c.digest } func (c *imageContext) CreatedSince() string { c.addHeader(createdSinceHeader) createdAt := time.Unix(int64(c.i.Created), 0) return units.HumanDuration(time.Now().UTC().Sub(createdAt)) } func (c *imageContext) CreatedAt() string { c.addHeader(createdAtHeader) return time.Unix(int64(c.i.Created), 0).String() } func (c *imageContext) Size() string { c.addHeader(sizeHeader) return units.HumanSize(float64(c.i.Size)) } type subContext interface { fullHeader() string addHeader(header string) } type baseSubContext struct { header []string } func (c *baseSubContext) fullHeader() string { if c.header == nil { return "" } return strings.Join(c.header, "\t") } func (c *baseSubContext) addHeader(header string) { if c.header == nil { c.header = []string{} } c.header = append(c.header, strings.ToUpper(header)) } func stripNamePrefix(ss []string) []string { for i, s := range ss { ss[i] = s[1:] } return ss } docker-1.10.3/api/client/formatter/custom_test.go000066400000000000000000000137401267010174400220040ustar00rootroot00000000000000package formatter import ( "reflect" "strings" "testing" "time" "github.com/docker/docker/pkg/stringid" "github.com/docker/engine-api/types" ) func TestContainerPsContext(t *testing.T) { containerID := stringid.GenerateRandomID() unix := time.Now().Unix() var ctx containerContext cases := []struct { container types.Container trunc bool expValue string expHeader string call func() string }{ {types.Container{ID: containerID}, true, stringid.TruncateID(containerID), containerIDHeader, ctx.ID}, {types.Container{ID: containerID}, false, containerID, containerIDHeader, ctx.ID}, {types.Container{Names: []string{"/foobar_baz"}}, true, "foobar_baz", namesHeader, ctx.Names}, {types.Container{Image: "ubuntu"}, true, "ubuntu", imageHeader, ctx.Image}, {types.Container{Image: "verylongimagename"}, true, "verylongimagename", imageHeader, ctx.Image}, {types.Container{Image: "verylongimagename"}, false, "verylongimagename", imageHeader, ctx.Image}, {types.Container{ Image: "a5a665ff33eced1e0803148700880edab4", ImageID: "a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5", }, true, "a5a665ff33ec", imageHeader, ctx.Image, }, {types.Container{ Image: "a5a665ff33eced1e0803148700880edab4", ImageID: "a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5", }, false, "a5a665ff33eced1e0803148700880edab4", imageHeader, ctx.Image, }, {types.Container{Image: ""}, true, "", imageHeader, ctx.Image}, {types.Container{Command: "sh -c 'ls -la'"}, true, `"sh -c 'ls -la'"`, commandHeader, ctx.Command}, {types.Container{Created: unix}, true, time.Unix(unix, 0).String(), createdAtHeader, ctx.CreatedAt}, {types.Container{Ports: []types.Port{{PrivatePort: 8080, PublicPort: 8080, Type: "tcp"}}}, true, "8080/tcp", portsHeader, ctx.Ports}, {types.Container{Status: "RUNNING"}, true, "RUNNING", statusHeader, ctx.Status}, {types.Container{SizeRw: 10}, true, "10 B", sizeHeader, ctx.Size}, {types.Container{SizeRw: 10, SizeRootFs: 20}, true, "10 B (virtual 20 B)", sizeHeader, ctx.Size}, {types.Container{}, true, "", labelsHeader, ctx.Labels}, {types.Container{Labels: map[string]string{"cpu": "6", "storage": "ssd"}}, true, "cpu=6,storage=ssd", labelsHeader, ctx.Labels}, {types.Container{Created: unix}, true, "Less than a second", runningForHeader, ctx.RunningFor}, } for _, c := range cases { ctx = containerContext{c: c.container, trunc: c.trunc} v := c.call() if strings.Contains(v, ",") { compareMultipleValues(t, v, c.expValue) } else if v != c.expValue { t.Fatalf("Expected %s, was %s\n", c.expValue, v) } h := ctx.fullHeader() if h != c.expHeader { t.Fatalf("Expected %s, was %s\n", c.expHeader, h) } } c1 := types.Container{Labels: map[string]string{"com.docker.swarm.swarm-id": "33", "com.docker.swarm.node_name": "ubuntu"}} ctx = containerContext{c: c1, trunc: true} sid := ctx.Label("com.docker.swarm.swarm-id") node := ctx.Label("com.docker.swarm.node_name") if sid != "33" { t.Fatalf("Expected 33, was %s\n", sid) } if node != "ubuntu" { t.Fatalf("Expected ubuntu, was %s\n", node) } h := ctx.fullHeader() if h != "SWARM ID\tNODE NAME" { t.Fatalf("Expected %s, was %s\n", "SWARM ID\tNODE NAME", h) } c2 := types.Container{} ctx = containerContext{c: c2, trunc: true} label := ctx.Label("anything.really") if label != "" { t.Fatalf("Expected an empty string, was %s", label) } ctx = containerContext{c: c2, trunc: true} fullHeader := ctx.fullHeader() if fullHeader != "" { t.Fatalf("Expected fullHeader to be empty, was %s", fullHeader) } } func TestImagesContext(t *testing.T) { imageID := stringid.GenerateRandomID() unix := time.Now().Unix() var ctx imageContext cases := []struct { imageCtx imageContext expValue string expHeader string call func() string }{ {imageContext{ i: types.Image{ID: imageID}, trunc: true, }, stringid.TruncateID(imageID), imageIDHeader, ctx.ID}, {imageContext{ i: types.Image{ID: imageID}, trunc: false, }, imageID, imageIDHeader, ctx.ID}, {imageContext{ i: types.Image{Size: 10}, trunc: true, }, "10 B", sizeHeader, ctx.Size}, {imageContext{ i: types.Image{Created: unix}, trunc: true, }, time.Unix(unix, 0).String(), createdAtHeader, ctx.CreatedAt}, // FIXME // {imageContext{ // i: types.Image{Created: unix}, // trunc: true, // }, units.HumanDuration(time.Unix(unix, 0)), createdSinceHeader, ctx.CreatedSince}, {imageContext{ i: types.Image{}, repo: "busybox", }, "busybox", repositoryHeader, ctx.Repository}, {imageContext{ i: types.Image{}, tag: "latest", }, "latest", tagHeader, ctx.Tag}, {imageContext{ i: types.Image{}, digest: "sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a", }, "sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a", digestHeader, ctx.Digest}, } for _, c := range cases { ctx = c.imageCtx v := c.call() if strings.Contains(v, ",") { compareMultipleValues(t, v, c.expValue) } else if v != c.expValue { t.Fatalf("Expected %s, was %s\n", c.expValue, v) } h := ctx.fullHeader() if h != c.expHeader { t.Fatalf("Expected %s, was %s\n", c.expHeader, h) } } } func compareMultipleValues(t *testing.T, value, expected string) { // comma-separated values means probably a map input, which won't // be guaranteed to have the same order as our expected value // We'll create maps and use reflect.DeepEquals to check instead: entriesMap := make(map[string]string) expMap := make(map[string]string) entries := strings.Split(value, ",") expectedEntries := strings.Split(expected, ",") for _, entry := range entries { keyval := strings.Split(entry, "=") entriesMap[keyval[0]] = keyval[1] } for _, expected := range expectedEntries { keyval := strings.Split(expected, "=") expMap[keyval[0]] = keyval[1] } if !reflect.DeepEqual(expMap, entriesMap) { t.Fatalf("Expected entries: %v, got: %v", expected, value) } } docker-1.10.3/api/client/formatter/formatter.go000066400000000000000000000141001267010174400214250ustar00rootroot00000000000000package formatter import ( "bytes" "fmt" "io" "strings" "text/tabwriter" "text/template" "github.com/docker/docker/reference" "github.com/docker/engine-api/types" ) const ( tableFormatKey = "table" rawFormatKey = "raw" defaultContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Ports}}\t{{.Names}}" defaultImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.Size}}" defaultImageTableFormatWithDigest = "table {{.Repository}}\t{{.Tag}}\t{{.Digest}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.Size}}" defaultQuietFormat = "{{.ID}}" ) // Context contains information required by the formatter to print the output as desired. type Context struct { // Output is the output stream to which the formatted string is written. Output io.Writer // Format is used to choose raw, table or custom format for the output. Format string // Quiet when set to true will simply print minimal information. Quiet bool // Trunc when set to true will truncate the output of certain fields such as Container ID. Trunc bool // internal element table bool finalFormat string header string buffer *bytes.Buffer } func (c *Context) preformat() { c.finalFormat = c.Format if strings.HasPrefix(c.Format, tableKey) { c.table = true c.finalFormat = c.finalFormat[len(tableKey):] } c.finalFormat = strings.Trim(c.finalFormat, " ") r := strings.NewReplacer(`\t`, "\t", `\n`, "\n") c.finalFormat = r.Replace(c.finalFormat) } func (c *Context) parseFormat() (*template.Template, error) { tmpl, err := template.New("").Parse(c.finalFormat) if err != nil { c.buffer.WriteString(fmt.Sprintf("Template parsing error: %v\n", err)) c.buffer.WriteTo(c.Output) } return tmpl, err } func (c *Context) postformat(tmpl *template.Template, subContext subContext) { if c.table { if len(c.header) == 0 { // if we still don't have a header, we didn't have any containers so we need to fake it to get the right headers from the template tmpl.Execute(bytes.NewBufferString(""), subContext) c.header = subContext.fullHeader() } t := tabwriter.NewWriter(c.Output, 20, 1, 3, ' ', 0) t.Write([]byte(c.header)) t.Write([]byte("\n")) c.buffer.WriteTo(t) t.Flush() } else { c.buffer.WriteTo(c.Output) } } func (c *Context) contextFormat(tmpl *template.Template, subContext subContext) error { if err := tmpl.Execute(c.buffer, subContext); err != nil { c.buffer = bytes.NewBufferString(fmt.Sprintf("Template parsing error: %v\n", err)) c.buffer.WriteTo(c.Output) return err } if c.table && len(c.header) == 0 { c.header = subContext.fullHeader() } c.buffer.WriteString("\n") return nil } // ContainerContext contains container specific information required by the formater, encapsulate a Context struct. type ContainerContext struct { Context // Size when set to true will display the size of the output. Size bool // Containers Containers []types.Container } // ImageContext contains image specific information required by the formater, encapsulate a Context struct. type ImageContext struct { Context Digest bool // Images Images []types.Image } func (ctx ContainerContext) Write() { switch ctx.Format { case tableFormatKey: ctx.Format = defaultContainerTableFormat if ctx.Quiet { ctx.Format = defaultQuietFormat } case rawFormatKey: if ctx.Quiet { ctx.Format = `container_id: {{.ID}}` } else { ctx.Format = `container_id: {{.ID}} image: {{.Image}} command: {{.Command}} created_at: {{.CreatedAt}} status: {{.Status}} names: {{.Names}} labels: {{.Labels}} ports: {{.Ports}} ` if ctx.Size { ctx.Format += `size: {{.Size}} ` } } } ctx.buffer = bytes.NewBufferString("") ctx.preformat() if ctx.table && ctx.Size { ctx.finalFormat += "\t{{.Size}}" } tmpl, err := ctx.parseFormat() if err != nil { return } for _, container := range ctx.Containers { containerCtx := &containerContext{ trunc: ctx.Trunc, c: container, } err = ctx.contextFormat(tmpl, containerCtx) if err != nil { return } } ctx.postformat(tmpl, &containerContext{}) } func (ctx ImageContext) Write() { switch ctx.Format { case tableFormatKey: ctx.Format = defaultImageTableFormat if ctx.Digest { ctx.Format = defaultImageTableFormatWithDigest } if ctx.Quiet { ctx.Format = defaultQuietFormat } case rawFormatKey: if ctx.Quiet { ctx.Format = `image_id: {{.ID}}` } else { if ctx.Digest { ctx.Format = `repository: {{ .Repository }} tag: {{.Tag}} digest: {{.Digest}} image_id: {{.ID}} created_at: {{.CreatedAt}} virtual_size: {{.Size}} ` } else { ctx.Format = `repository: {{ .Repository }} tag: {{.Tag}} image_id: {{.ID}} created_at: {{.CreatedAt}} virtual_size: {{.Size}} ` } } } ctx.buffer = bytes.NewBufferString("") ctx.preformat() if ctx.table && ctx.Digest && !strings.Contains(ctx.Format, "{{.Digest}}") { ctx.finalFormat += "\t{{.Digest}}" } tmpl, err := ctx.parseFormat() if err != nil { return } for _, image := range ctx.Images { repoTags := image.RepoTags repoDigests := image.RepoDigests if len(repoTags) == 1 && repoTags[0] == ":" && len(repoDigests) == 1 && repoDigests[0] == "@" { // dangling image - clear out either repoTags or repoDigests so we only show it once below repoDigests = []string{} } // combine the tags and digests lists tagsAndDigests := append(repoTags, repoDigests...) for _, repoAndRef := range tagsAndDigests { repo := "" tag := "" digest := "" if !strings.HasPrefix(repoAndRef, "") { ref, err := reference.ParseNamed(repoAndRef) if err != nil { continue } repo = ref.Name() switch x := ref.(type) { case reference.Canonical: digest = x.Digest().String() case reference.NamedTagged: tag = x.Tag() } } imageCtx := &imageContext{ trunc: ctx.Trunc, i: image, repo: repo, tag: tag, digest: digest, } err = ctx.contextFormat(tmpl, imageCtx) if err != nil { return } } } ctx.postformat(tmpl, &imageContext{}) } docker-1.10.3/api/client/formatter/formatter_test.go000066400000000000000000000252761267010174400225040ustar00rootroot00000000000000package formatter import ( "bytes" "fmt" "testing" "time" "github.com/docker/engine-api/types" ) func TestContainerContextWrite(t *testing.T) { unixTime := time.Now().AddDate(0, 0, -1).Unix() expectedTime := time.Unix(unixTime, 0).String() contexts := []struct { context ContainerContext expected string }{ // Errors { ContainerContext{ Context: Context{ Format: "{{InvalidFunction}}", }, }, `Template parsing error: template: :1: function "InvalidFunction" not defined `, }, { ContainerContext{ Context: Context{ Format: "{{nil}}", }, }, `Template parsing error: template: :1:2: executing "" at : nil is not a command `, }, // Table Format { ContainerContext{ Context: Context{ Format: "table", }, }, `CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES containerID1 ubuntu "" 24 hours ago foobar_baz containerID2 ubuntu "" 24 hours ago foobar_bar `, }, { ContainerContext{ Context: Context{ Format: "table {{.Image}}", }, }, "IMAGE\nubuntu\nubuntu\n", }, { ContainerContext{ Context: Context{ Format: "table {{.Image}}", }, Size: true, }, "IMAGE SIZE\nubuntu 0 B\nubuntu 0 B\n", }, { ContainerContext{ Context: Context{ Format: "table {{.Image}}", Quiet: true, }, }, "IMAGE\nubuntu\nubuntu\n", }, { ContainerContext{ Context: Context{ Format: "table", Quiet: true, }, }, "containerID1\ncontainerID2\n", }, // Raw Format { ContainerContext{ Context: Context{ Format: "raw", }, }, fmt.Sprintf(`container_id: containerID1 image: ubuntu command: "" created_at: %s status: names: foobar_baz labels: ports: container_id: containerID2 image: ubuntu command: "" created_at: %s status: names: foobar_bar labels: ports: `, expectedTime, expectedTime), }, { ContainerContext{ Context: Context{ Format: "raw", }, Size: true, }, fmt.Sprintf(`container_id: containerID1 image: ubuntu command: "" created_at: %s status: names: foobar_baz labels: ports: size: 0 B container_id: containerID2 image: ubuntu command: "" created_at: %s status: names: foobar_bar labels: ports: size: 0 B `, expectedTime, expectedTime), }, { ContainerContext{ Context: Context{ Format: "raw", Quiet: true, }, }, "container_id: containerID1\ncontainer_id: containerID2\n", }, // Custom Format { ContainerContext{ Context: Context{ Format: "{{.Image}}", }, }, "ubuntu\nubuntu\n", }, { ContainerContext{ Context: Context{ Format: "{{.Image}}", }, Size: true, }, "ubuntu\nubuntu\n", }, } for _, context := range contexts { containers := []types.Container{ {ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu", Created: unixTime}, {ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu", Created: unixTime}, } out := bytes.NewBufferString("") context.context.Output = out context.context.Containers = containers context.context.Write() actual := out.String() if actual != context.expected { t.Fatalf("Expected \n%s, got \n%s", context.expected, actual) } // Clean buffer out.Reset() } } func TestContainerContextWriteWithNoContainers(t *testing.T) { out := bytes.NewBufferString("") containers := []types.Container{} contexts := []struct { context ContainerContext expected string }{ { ContainerContext{ Context: Context{ Format: "{{.Image}}", Output: out, }, }, "", }, { ContainerContext{ Context: Context{ Format: "table {{.Image}}", Output: out, }, }, "IMAGE\n", }, { ContainerContext{ Context: Context{ Format: "{{.Image}}", Output: out, }, Size: true, }, "", }, { ContainerContext{ Context: Context{ Format: "table {{.Image}}", Output: out, }, Size: true, }, "IMAGE SIZE\n", }, } for _, context := range contexts { context.context.Containers = containers context.context.Write() actual := out.String() if actual != context.expected { t.Fatalf("Expected \n%s, got \n%s", context.expected, actual) } // Clean buffer out.Reset() } } func TestImageContextWrite(t *testing.T) { unixTime := time.Now().AddDate(0, 0, -1).Unix() expectedTime := time.Unix(unixTime, 0).String() contexts := []struct { context ImageContext expected string }{ // Errors { ImageContext{ Context: Context{ Format: "{{InvalidFunction}}", }, }, `Template parsing error: template: :1: function "InvalidFunction" not defined `, }, { ImageContext{ Context: Context{ Format: "{{nil}}", }, }, `Template parsing error: template: :1:2: executing "" at : nil is not a command `, }, // Table Format { ImageContext{ Context: Context{ Format: "table", }, }, `REPOSITORY TAG IMAGE ID CREATED SIZE image tag1 imageID1 24 hours ago 0 B image imageID1 24 hours ago 0 B image tag2 imageID2 24 hours ago 0 B imageID3 24 hours ago 0 B `, }, { ImageContext{ Context: Context{ Format: "table {{.Repository}}", }, }, "REPOSITORY\nimage\nimage\nimage\n\n", }, { ImageContext{ Context: Context{ Format: "table {{.Repository}}", }, Digest: true, }, `REPOSITORY DIGEST image image sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf image `, }, { ImageContext{ Context: Context{ Format: "table {{.Repository}}", Quiet: true, }, }, "REPOSITORY\nimage\nimage\nimage\n\n", }, { ImageContext{ Context: Context{ Format: "table", Quiet: true, }, }, "imageID1\nimageID1\nimageID2\nimageID3\n", }, { ImageContext{ Context: Context{ Format: "table", Quiet: false, }, Digest: true, }, `REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE image tag1 imageID1 24 hours ago 0 B image sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf imageID1 24 hours ago 0 B image tag2 imageID2 24 hours ago 0 B imageID3 24 hours ago 0 B `, }, { ImageContext{ Context: Context{ Format: "table", Quiet: true, }, Digest: true, }, "imageID1\nimageID1\nimageID2\nimageID3\n", }, // Raw Format { ImageContext{ Context: Context{ Format: "raw", }, }, fmt.Sprintf(`repository: image tag: tag1 image_id: imageID1 created_at: %s virtual_size: 0 B repository: image tag: image_id: imageID1 created_at: %s virtual_size: 0 B repository: image tag: tag2 image_id: imageID2 created_at: %s virtual_size: 0 B repository: tag: image_id: imageID3 created_at: %s virtual_size: 0 B `, expectedTime, expectedTime, expectedTime, expectedTime), }, { ImageContext{ Context: Context{ Format: "raw", }, Digest: true, }, fmt.Sprintf(`repository: image tag: tag1 digest: image_id: imageID1 created_at: %s virtual_size: 0 B repository: image tag: digest: sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf image_id: imageID1 created_at: %s virtual_size: 0 B repository: image tag: tag2 digest: image_id: imageID2 created_at: %s virtual_size: 0 B repository: tag: digest: image_id: imageID3 created_at: %s virtual_size: 0 B `, expectedTime, expectedTime, expectedTime, expectedTime), }, { ImageContext{ Context: Context{ Format: "raw", Quiet: true, }, }, `image_id: imageID1 image_id: imageID1 image_id: imageID2 image_id: imageID3 `, }, // Custom Format { ImageContext{ Context: Context{ Format: "{{.Repository}}", }, }, "image\nimage\nimage\n\n", }, { ImageContext{ Context: Context{ Format: "{{.Repository}}", }, Digest: true, }, "image\nimage\nimage\n\n", }, } for _, context := range contexts { images := []types.Image{ {ID: "imageID1", RepoTags: []string{"image:tag1"}, RepoDigests: []string{"image@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"}, Created: unixTime}, {ID: "imageID2", RepoTags: []string{"image:tag2"}, Created: unixTime}, {ID: "imageID3", RepoTags: []string{":"}, RepoDigests: []string{"@"}, Created: unixTime}, } out := bytes.NewBufferString("") context.context.Output = out context.context.Images = images context.context.Write() actual := out.String() if actual != context.expected { t.Fatalf("Expected \n%s, got \n%s", context.expected, actual) } // Clean buffer out.Reset() } } func TestImageContextWriteWithNoImage(t *testing.T) { out := bytes.NewBufferString("") images := []types.Image{} contexts := []struct { context ImageContext expected string }{ { ImageContext{ Context: Context{ Format: "{{.Repository}}", Output: out, }, }, "", }, { ImageContext{ Context: Context{ Format: "table {{.Repository}}", Output: out, }, }, "REPOSITORY\n", }, { ImageContext{ Context: Context{ Format: "{{.Repository}}", Output: out, }, Digest: true, }, "", }, { ImageContext{ Context: Context{ Format: "table {{.Repository}}", Output: out, }, Digest: true, }, "REPOSITORY DIGEST\n", }, } for _, context := range contexts { context.context.Images = images context.context.Write() actual := out.String() if actual != context.expected { t.Fatalf("Expected \n%s, got \n%s", context.expected, actual) } // Clean buffer out.Reset() } } docker-1.10.3/api/client/hijack.go000066400000000000000000000024261267010174400166600ustar00rootroot00000000000000package client import ( "io" "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/stdcopy" "github.com/docker/engine-api/types" ) func (cli *DockerCli) holdHijackedConnection(tty bool, inputStream io.ReadCloser, outputStream, errorStream io.Writer, resp types.HijackedResponse) error { var err error receiveStdout := make(chan error, 1) if outputStream != nil || errorStream != nil { go func() { // When TTY is ON, use regular copy if tty && outputStream != nil { _, err = io.Copy(outputStream, resp.Reader) } else { _, err = stdcopy.StdCopy(outputStream, errorStream, resp.Reader) } logrus.Debugf("[hijack] End of stdout") receiveStdout <- err }() } stdinDone := make(chan struct{}) go func() { if inputStream != nil { io.Copy(resp.Conn, inputStream) logrus.Debugf("[hijack] End of stdin") } if err := resp.CloseWrite(); err != nil { logrus.Debugf("Couldn't send EOF: %s", err) } close(stdinDone) }() select { case err := <-receiveStdout: if err != nil { logrus.Debugf("Error receiveStdout: %s", err) return err } case <-stdinDone: if outputStream != nil || errorStream != nil { if err := <-receiveStdout; err != nil { logrus.Debugf("Error receiveStdout: %s", err) return err } } } return nil } docker-1.10.3/api/client/history.go000066400000000000000000000036471267010174400171360ustar00rootroot00000000000000package client import ( "fmt" "strconv" "strings" "text/tabwriter" "time" Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/stringutils" "github.com/docker/go-units" ) // CmdHistory shows the history of an image. // // Usage: docker history [OPTIONS] IMAGE func (cli *DockerCli) CmdHistory(args ...string) error { cmd := Cli.Subcmd("history", []string{"IMAGE"}, Cli.DockerCommands["history"].Description, true) human := cmd.Bool([]string{"H", "-human"}, true, "Print sizes and dates in human readable format") quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output") cmd.Require(flag.Exact, 1) cmd.ParseFlags(args, true) history, err := cli.client.ImageHistory(cmd.Arg(0)) if err != nil { return err } w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) if *quiet { for _, entry := range history { if *noTrunc { fmt.Fprintf(w, "%s\n", entry.ID) } else { fmt.Fprintf(w, "%s\n", stringid.TruncateID(entry.ID)) } } w.Flush() return nil } var imageID string var createdBy string var created string var size string fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE\tCOMMENT") for _, entry := range history { imageID = entry.ID createdBy = strings.Replace(entry.CreatedBy, "\t", " ", -1) if *noTrunc == false { createdBy = stringutils.Truncate(createdBy, 45) imageID = stringid.TruncateID(entry.ID) } if *human { created = units.HumanDuration(time.Now().UTC().Sub(time.Unix(entry.Created, 0))) + " ago" size = units.HumanSize(float64(entry.Size)) } else { created = time.Unix(entry.Created, 0).Format(time.RFC3339) size = strconv.FormatInt(entry.Size, 10) } fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", imageID, created, createdBy, size, entry.Comment) } w.Flush() return nil } docker-1.10.3/api/client/images.go000066400000000000000000000041271267010174400166740ustar00rootroot00000000000000package client import ( "github.com/docker/docker/api/client/formatter" Cli "github.com/docker/docker/cli" "github.com/docker/docker/opts" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/filters" ) // CmdImages lists the images in a specified repository, or all top-level images if no repository is specified. // // Usage: docker images [OPTIONS] [REPOSITORY] func (cli *DockerCli) CmdImages(args ...string) error { cmd := Cli.Subcmd("images", []string{"[REPOSITORY[:TAG]]"}, Cli.DockerCommands["images"].Description, true) quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (default hides intermediate images)") noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output") showDigests := cmd.Bool([]string{"-digests"}, false, "Show digests") format := cmd.String([]string{"-format"}, "", "Pretty-print images using a Go template") flFilter := opts.NewListOpts(nil) cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") cmd.Require(flag.Max, 1) cmd.ParseFlags(args, true) // Consolidate all filter flags, and sanity check them early. // They'll get process in the daemon/server. imageFilterArgs := filters.NewArgs() for _, f := range flFilter.GetAll() { var err error imageFilterArgs, err = filters.ParseFlag(f, imageFilterArgs) if err != nil { return err } } var matchName string if cmd.NArg() == 1 { matchName = cmd.Arg(0) } options := types.ImageListOptions{ MatchName: matchName, All: *all, Filters: imageFilterArgs, } images, err := cli.client.ImageList(options) if err != nil { return err } f := *format if len(f) == 0 { if len(cli.ImagesFormat()) > 0 && !*quiet { f = cli.ImagesFormat() } else { f = "table" } } imagesCtx := formatter.ImageContext{ Context: formatter.Context{ Output: cli.out, Format: f, Quiet: *quiet, Trunc: !*noTrunc, }, Digest: *showDigests, Images: images, } imagesCtx.Write() return nil } docker-1.10.3/api/client/import.go000066400000000000000000000043721267010174400167430ustar00rootroot00000000000000package client import ( "fmt" "io" "os" Cli "github.com/docker/docker/cli" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/jsonmessage" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/urlutil" "github.com/docker/docker/reference" "github.com/docker/engine-api/types" ) // CmdImport creates an empty filesystem image, imports the contents of the tarball into the image, and optionally tags the image. // // The URL argument is the address of a tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) file or a path to local file relative to docker client. If the URL is '-', then the tar file is read from STDIN. // // Usage: docker import [OPTIONS] file|URL|- [REPOSITORY[:TAG]] func (cli *DockerCli) CmdImport(args ...string) error { cmd := Cli.Subcmd("import", []string{"file|URL|- [REPOSITORY[:TAG]]"}, Cli.DockerCommands["import"].Description, true) flChanges := opts.NewListOpts(nil) cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image") message := cmd.String([]string{"m", "-message"}, "", "Set commit message for imported image") cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) var ( in io.Reader tag string src = cmd.Arg(0) srcName = src repository = cmd.Arg(1) changes = flChanges.GetAll() ) if cmd.NArg() == 3 { fmt.Fprintf(cli.err, "[DEPRECATED] The format 'file|URL|- [REPOSITORY [TAG]]' has been deprecated. Please use file|URL|- [REPOSITORY[:TAG]]\n") tag = cmd.Arg(2) } if repository != "" { //Check if the given image name can be resolved if _, err := reference.ParseNamed(repository); err != nil { return err } } if src == "-" { in = cli.in } else if !urlutil.IsURL(src) { srcName = "-" file, err := os.Open(src) if err != nil { return err } defer file.Close() in = file } options := types.ImageImportOptions{ Source: in, SourceName: srcName, RepositoryName: repository, Message: *message, Tag: tag, Changes: changes, } responseBody, err := cli.client.ImageImport(options) if err != nil { return err } defer responseBody.Close() return jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, nil) } docker-1.10.3/api/client/info.go000066400000000000000000000122471267010174400163640ustar00rootroot00000000000000package client import ( "fmt" "strings" Cli "github.com/docker/docker/cli" "github.com/docker/docker/pkg/ioutils" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/go-units" ) // CmdInfo displays system-wide information. // // Usage: docker info func (cli *DockerCli) CmdInfo(args ...string) error { cmd := Cli.Subcmd("info", nil, Cli.DockerCommands["info"].Description, true) cmd.Require(flag.Exact, 0) cmd.ParseFlags(args, true) info, err := cli.client.Info() if err != nil { return err } fmt.Fprintf(cli.out, "Containers: %d\n", info.Containers) fmt.Fprintf(cli.out, " Running: %d\n", info.ContainersRunning) fmt.Fprintf(cli.out, " Paused: %d\n", info.ContainersPaused) fmt.Fprintf(cli.out, " Stopped: %d\n", info.ContainersStopped) fmt.Fprintf(cli.out, "Images: %d\n", info.Images) ioutils.FprintfIfNotEmpty(cli.out, "Server Version: %s\n", info.ServerVersion) ioutils.FprintfIfNotEmpty(cli.out, "Storage Driver: %s\n", info.Driver) if info.DriverStatus != nil { for _, pair := range info.DriverStatus { fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1]) // print a warning if devicemapper is using a loopback file if pair[0] == "Data loop file" { fmt.Fprintln(cli.err, " WARNING: Usage of loopback devices is strongly discouraged for production use. Either use `--storage-opt dm.thinpooldev` or use `--storage-opt dm.no_warn_on_loop_devices=true` to suppress this warning.") } } } if info.SystemStatus != nil { for _, pair := range info.SystemStatus { fmt.Fprintf(cli.out, "%s: %s\n", pair[0], pair[1]) } } ioutils.FprintfIfNotEmpty(cli.out, "Execution Driver: %s\n", info.ExecutionDriver) ioutils.FprintfIfNotEmpty(cli.out, "Logging Driver: %s\n", info.LoggingDriver) fmt.Fprintf(cli.out, "Plugins: \n") fmt.Fprintf(cli.out, " Volume:") fmt.Fprintf(cli.out, " %s", strings.Join(info.Plugins.Volume, " ")) fmt.Fprintf(cli.out, "\n") fmt.Fprintf(cli.out, " Network:") fmt.Fprintf(cli.out, " %s", strings.Join(info.Plugins.Network, " ")) fmt.Fprintf(cli.out, "\n") if len(info.Plugins.Authorization) != 0 { fmt.Fprintf(cli.out, " Authorization:") fmt.Fprintf(cli.out, " %s", strings.Join(info.Plugins.Authorization, " ")) fmt.Fprintf(cli.out, "\n") } ioutils.FprintfIfNotEmpty(cli.out, "Kernel Version: %s\n", info.KernelVersion) ioutils.FprintfIfNotEmpty(cli.out, "Operating System: %s\n", info.OperatingSystem) ioutils.FprintfIfNotEmpty(cli.out, "OSType: %s\n", info.OSType) ioutils.FprintfIfNotEmpty(cli.out, "Architecture: %s\n", info.Architecture) fmt.Fprintf(cli.out, "CPUs: %d\n", info.NCPU) fmt.Fprintf(cli.out, "Total Memory: %s\n", units.BytesSize(float64(info.MemTotal))) ioutils.FprintfIfNotEmpty(cli.out, "Name: %s\n", info.Name) ioutils.FprintfIfNotEmpty(cli.out, "ID: %s\n", info.ID) if info.Debug { fmt.Fprintf(cli.out, "Debug mode (server): %v\n", info.Debug) fmt.Fprintf(cli.out, " File Descriptors: %d\n", info.NFd) fmt.Fprintf(cli.out, " Goroutines: %d\n", info.NGoroutines) fmt.Fprintf(cli.out, " System Time: %s\n", info.SystemTime) fmt.Fprintf(cli.out, " EventsListeners: %d\n", info.NEventsListener) fmt.Fprintf(cli.out, " Init SHA1: %s\n", info.InitSha1) fmt.Fprintf(cli.out, " Init Path: %s\n", info.InitPath) fmt.Fprintf(cli.out, " Docker Root Dir: %s\n", info.DockerRootDir) } ioutils.FprintfIfNotEmpty(cli.out, "Http Proxy: %s\n", info.HTTPProxy) ioutils.FprintfIfNotEmpty(cli.out, "Https Proxy: %s\n", info.HTTPSProxy) ioutils.FprintfIfNotEmpty(cli.out, "No Proxy: %s\n", info.NoProxy) if info.IndexServerAddress != "" { u := cli.configFile.AuthConfigs[info.IndexServerAddress].Username if len(u) > 0 { fmt.Fprintf(cli.out, "Username: %v\n", u) fmt.Fprintf(cli.out, "Registry: %v\n", info.IndexServerAddress) } } // Only output these warnings if the server does not support these features if info.OSType != "windows" { if !info.MemoryLimit { fmt.Fprintln(cli.err, "WARNING: No memory limit support") } if !info.SwapLimit { fmt.Fprintln(cli.err, "WARNING: No swap limit support") } if !info.OomKillDisable { fmt.Fprintln(cli.err, "WARNING: No oom kill disable support") } if !info.CPUCfsQuota { fmt.Fprintln(cli.err, "WARNING: No cpu cfs quota support") } if !info.CPUCfsPeriod { fmt.Fprintln(cli.err, "WARNING: No cpu cfs period support") } if !info.CPUShares { fmt.Fprintln(cli.err, "WARNING: No cpu shares support") } if !info.CPUSet { fmt.Fprintln(cli.err, "WARNING: No cpuset support") } if !info.IPv4Forwarding { fmt.Fprintln(cli.err, "WARNING: IPv4 forwarding is disabled") } if !info.BridgeNfIptables { fmt.Fprintln(cli.err, "WARNING: bridge-nf-call-iptables is disabled") } if !info.BridgeNfIP6tables { fmt.Fprintln(cli.err, "WARNING: bridge-nf-call-ip6tables is disabled") } } if info.Labels != nil { fmt.Fprintln(cli.out, "Labels:") for _, attribute := range info.Labels { fmt.Fprintf(cli.out, " %s\n", attribute) } } ioutils.FprintfIfTrue(cli.out, "Experimental: %v\n", info.ExperimentalBuild) if info.ClusterStore != "" { fmt.Fprintf(cli.out, "Cluster store: %s\n", info.ClusterStore) } if info.ClusterAdvertise != "" { fmt.Fprintf(cli.out, "Cluster advertise: %s\n", info.ClusterAdvertise) } return nil } docker-1.10.3/api/client/inspect.go000066400000000000000000000075571267010174400171060ustar00rootroot00000000000000package client import ( "encoding/json" "fmt" "text/template" "github.com/docker/docker/api/client/inspect" Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/engine-api/client" ) var funcMap = template.FuncMap{ "json": func(v interface{}) string { a, _ := json.Marshal(v) return string(a) }, } // CmdInspect displays low-level information on one or more containers or images. // // Usage: docker inspect [OPTIONS] CONTAINER|IMAGE [CONTAINER|IMAGE...] func (cli *DockerCli) CmdInspect(args ...string) error { cmd := Cli.Subcmd("inspect", []string{"CONTAINER|IMAGE [CONTAINER|IMAGE...]"}, Cli.DockerCommands["inspect"].Description, true) tmplStr := cmd.String([]string{"f", "-format"}, "", "Format the output using the given go template") inspectType := cmd.String([]string{"-type"}, "", "Return JSON for specified type, (e.g image or container)") size := cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes if the type is container") cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) if *inspectType != "" && *inspectType != "container" && *inspectType != "image" { return fmt.Errorf("%q is not a valid value for --type", *inspectType) } var elementSearcher inspectSearcher switch *inspectType { case "container": elementSearcher = cli.inspectContainers(*size) case "image": elementSearcher = cli.inspectImages(*size) default: elementSearcher = cli.inspectAll(*size) } return cli.inspectElements(*tmplStr, cmd.Args(), elementSearcher) } func (cli *DockerCli) inspectContainers(getSize bool) inspectSearcher { return func(ref string) (interface{}, []byte, error) { return cli.client.ContainerInspectWithRaw(ref, getSize) } } func (cli *DockerCli) inspectImages(getSize bool) inspectSearcher { return func(ref string) (interface{}, []byte, error) { return cli.client.ImageInspectWithRaw(ref, getSize) } } func (cli *DockerCli) inspectAll(getSize bool) inspectSearcher { return func(ref string) (interface{}, []byte, error) { c, rawContainer, err := cli.client.ContainerInspectWithRaw(ref, getSize) if err != nil { // Search for image with that id if a container doesn't exist. if client.IsErrContainerNotFound(err) { i, rawImage, err := cli.client.ImageInspectWithRaw(ref, getSize) if err != nil { if client.IsErrImageNotFound(err) { return nil, nil, fmt.Errorf("Error: No such image or container: %s", ref) } return nil, nil, err } return i, rawImage, err } return nil, nil, err } return c, rawContainer, err } } type inspectSearcher func(ref string) (interface{}, []byte, error) func (cli *DockerCli) inspectElements(tmplStr string, references []string, searchByReference inspectSearcher) error { elementInspector, err := cli.newInspectorWithTemplate(tmplStr) if err != nil { return Cli.StatusError{StatusCode: 64, Status: err.Error()} } var inspectErr error for _, ref := range references { element, raw, err := searchByReference(ref) if err != nil { inspectErr = err break } if err := elementInspector.Inspect(element, raw); err != nil { inspectErr = err break } } if err := elementInspector.Flush(); err != nil { cli.inspectErrorStatus(err) } if status := cli.inspectErrorStatus(inspectErr); status != 0 { return Cli.StatusError{StatusCode: status} } return nil } func (cli *DockerCli) inspectErrorStatus(err error) (status int) { if err != nil { fmt.Fprintf(cli.err, "%s\n", err) status = 1 } return } func (cli *DockerCli) newInspectorWithTemplate(tmplStr string) (inspect.Inspector, error) { elementInspector := inspect.NewIndentedInspector(cli.out) if tmplStr != "" { tmpl, err := template.New("").Funcs(funcMap).Parse(tmplStr) if err != nil { return nil, fmt.Errorf("Template parsing error: %s", err) } elementInspector = inspect.NewTemplateInspector(cli.out, tmpl) } return elementInspector, nil } docker-1.10.3/api/client/inspect/000077500000000000000000000000001267010174400165415ustar00rootroot00000000000000docker-1.10.3/api/client/inspect/inspector.go000066400000000000000000000062121267010174400210770ustar00rootroot00000000000000package inspect import ( "bytes" "encoding/json" "fmt" "io" "text/template" ) // Inspector defines an interface to implement to process elements type Inspector interface { Inspect(typedElement interface{}, rawElement []byte) error Flush() error } // TemplateInspector uses a text template to inspect elements. type TemplateInspector struct { outputStream io.Writer buffer *bytes.Buffer tmpl *template.Template } // NewTemplateInspector creates a new inspector with a template. func NewTemplateInspector(outputStream io.Writer, tmpl *template.Template) Inspector { return &TemplateInspector{ outputStream: outputStream, buffer: new(bytes.Buffer), tmpl: tmpl, } } // Inspect executes the inspect template. // It decodes the raw element into a map if the initial execution fails. // This allows docker cli to parse inspect structs injected with Swarm fields. func (i *TemplateInspector) Inspect(typedElement interface{}, rawElement []byte) error { buffer := new(bytes.Buffer) if err := i.tmpl.Execute(buffer, typedElement); err != nil { if rawElement == nil { return fmt.Errorf("Template parsing error: %v", err) } return i.tryRawInspectFallback(rawElement, err) } i.buffer.Write(buffer.Bytes()) i.buffer.WriteByte('\n') return nil } // Flush write the result of inspecting all elements into the output stream. func (i *TemplateInspector) Flush() error { if i.buffer.Len() == 0 { _, err := io.WriteString(i.outputStream, "\n") return err } _, err := io.Copy(i.outputStream, i.buffer) return err } // IndentedInspector uses a buffer to stop the indented representation of an element. type IndentedInspector struct { outputStream io.Writer elements []interface{} rawElements [][]byte } // NewIndentedInspector generates a new IndentedInspector. func NewIndentedInspector(outputStream io.Writer) Inspector { return &IndentedInspector{ outputStream: outputStream, } } // Inspect writes the raw element with an indented json format. func (i *IndentedInspector) Inspect(typedElement interface{}, rawElement []byte) error { if rawElement != nil { i.rawElements = append(i.rawElements, rawElement) } else { i.elements = append(i.elements, typedElement) } return nil } // Flush write the result of inspecting all elements into the output stream. func (i *IndentedInspector) Flush() error { if len(i.elements) == 0 && len(i.rawElements) == 0 { _, err := io.WriteString(i.outputStream, "[]\n") return err } var buffer io.Reader if len(i.rawElements) > 0 { bytesBuffer := new(bytes.Buffer) bytesBuffer.WriteString("[") for idx, r := range i.rawElements { bytesBuffer.Write(r) if idx < len(i.rawElements)-1 { bytesBuffer.WriteString(",") } } bytesBuffer.WriteString("]") indented := new(bytes.Buffer) if err := json.Indent(indented, bytesBuffer.Bytes(), "", " "); err != nil { return err } buffer = indented } else { b, err := json.MarshalIndent(i.elements, "", " ") if err != nil { return err } buffer = bytes.NewReader(b) } if _, err := io.Copy(i.outputStream, buffer); err != nil { return err } _, err := io.WriteString(i.outputStream, "\n") return err } docker-1.10.3/api/client/inspect/inspector_go14.go000066400000000000000000000024361267010174400217350ustar00rootroot00000000000000// +build !go1.5 package inspect import ( "bytes" "encoding/json" "fmt" "strings" ) // tryeRawInspectFallback executes the inspect template with a raw interface. // This allows docker cli to parse inspect structs injected with Swarm fields. // Unfortunately, go 1.4 doesn't fail executing invalid templates when the input is an interface. // It doesn't allow to modify this behavior either, sending messages to the output. // We assume that the template is invalid when there is a , if the template was valid // we'd get or "" values. In that case we fail with the original error raised executing the // template with the typed input. func (i *TemplateInspector) tryRawInspectFallback(rawElement []byte, originalErr error) error { var raw interface{} buffer := new(bytes.Buffer) rdr := bytes.NewReader(rawElement) dec := json.NewDecoder(rdr) if rawErr := dec.Decode(&raw); rawErr != nil { return fmt.Errorf("unable to read inspect data: %v", rawErr) } if rawErr := i.tmpl.Execute(buffer, raw); rawErr != nil { return fmt.Errorf("Template parsing error: %v", rawErr) } if strings.Contains(buffer.String(), "") { return fmt.Errorf("Template parsing error: %v", originalErr) } i.buffer.Write(buffer.Bytes()) i.buffer.WriteByte('\n') return nil } docker-1.10.3/api/client/inspect/inspector_go15.go000066400000000000000000000012121267010174400217250ustar00rootroot00000000000000// +build go1.5 package inspect import ( "bytes" "encoding/json" "fmt" ) func (i *TemplateInspector) tryRawInspectFallback(rawElement []byte, _ error) error { var raw interface{} buffer := new(bytes.Buffer) rdr := bytes.NewReader(rawElement) dec := json.NewDecoder(rdr) if rawErr := dec.Decode(&raw); rawErr != nil { return fmt.Errorf("unable to read inspect data: %v", rawErr) } tmplMissingKey := i.tmpl.Option("missingkey=error") if rawErr := tmplMissingKey.Execute(buffer, raw); rawErr != nil { return fmt.Errorf("Template parsing error: %v", rawErr) } i.buffer.Write(buffer.Bytes()) i.buffer.WriteByte('\n') return nil } docker-1.10.3/api/client/inspect/inspector_test.go000066400000000000000000000107511267010174400221410ustar00rootroot00000000000000package inspect import ( "bytes" "strings" "testing" "text/template" ) type testElement struct { DNS string `json:"Dns"` } func TestTemplateInspectorDefault(t *testing.T) { b := new(bytes.Buffer) tmpl, err := template.New("test").Parse("{{.DNS}}") if err != nil { t.Fatal(err) } i := NewTemplateInspector(b, tmpl) if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { t.Fatal(err) } if err := i.Flush(); err != nil { t.Fatal(err) } if b.String() != "0.0.0.0\n" { t.Fatalf("Expected `0.0.0.0\\n`, got `%s`", b.String()) } } func TestTemplateInspectorEmpty(t *testing.T) { b := new(bytes.Buffer) tmpl, err := template.New("test").Parse("{{.DNS}}") if err != nil { t.Fatal(err) } i := NewTemplateInspector(b, tmpl) if err := i.Flush(); err != nil { t.Fatal(err) } if b.String() != "\n" { t.Fatalf("Expected `\\n`, got `%s`", b.String()) } } func TestTemplateInspectorTemplateError(t *testing.T) { b := new(bytes.Buffer) tmpl, err := template.New("test").Parse("{{.Foo}}") if err != nil { t.Fatal(err) } i := NewTemplateInspector(b, tmpl) err = i.Inspect(testElement{"0.0.0.0"}, nil) if err == nil { t.Fatal("Expected error got nil") } if !strings.HasPrefix(err.Error(), "Template parsing error") { t.Fatalf("Expected template error, got %v", err) } } func TestTemplateInspectorRawFallback(t *testing.T) { b := new(bytes.Buffer) tmpl, err := template.New("test").Parse("{{.Dns}}") if err != nil { t.Fatal(err) } i := NewTemplateInspector(b, tmpl) if err := i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Dns": "0.0.0.0"}`)); err != nil { t.Fatal(err) } if err := i.Flush(); err != nil { t.Fatal(err) } if b.String() != "0.0.0.0\n" { t.Fatalf("Expected `0.0.0.0\\n`, got `%s`", b.String()) } } func TestTemplateInspectorRawFallbackError(t *testing.T) { b := new(bytes.Buffer) tmpl, err := template.New("test").Parse("{{.Dns}}") if err != nil { t.Fatal(err) } i := NewTemplateInspector(b, tmpl) err = i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Foo": "0.0.0.0"}`)) if err == nil { t.Fatal("Expected error got nil") } if !strings.HasPrefix(err.Error(), "Template parsing error") { t.Fatalf("Expected template error, got %v", err) } } func TestTemplateInspectorMultiple(t *testing.T) { b := new(bytes.Buffer) tmpl, err := template.New("test").Parse("{{.DNS}}") if err != nil { t.Fatal(err) } i := NewTemplateInspector(b, tmpl) if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { t.Fatal(err) } if err := i.Inspect(testElement{"1.1.1.1"}, nil); err != nil { t.Fatal(err) } if err := i.Flush(); err != nil { t.Fatal(err) } if b.String() != "0.0.0.0\n1.1.1.1\n" { t.Fatalf("Expected `0.0.0.0\\n1.1.1.1\\n`, got `%s`", b.String()) } } func TestIndentedInspectorDefault(t *testing.T) { b := new(bytes.Buffer) i := NewIndentedInspector(b) if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { t.Fatal(err) } if err := i.Flush(); err != nil { t.Fatal(err) } expected := `[ { "Dns": "0.0.0.0" } ] ` if b.String() != expected { t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) } } func TestIndentedInspectorMultiple(t *testing.T) { b := new(bytes.Buffer) i := NewIndentedInspector(b) if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { t.Fatal(err) } if err := i.Inspect(testElement{"1.1.1.1"}, nil); err != nil { t.Fatal(err) } if err := i.Flush(); err != nil { t.Fatal(err) } expected := `[ { "Dns": "0.0.0.0" }, { "Dns": "1.1.1.1" } ] ` if b.String() != expected { t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) } } func TestIndentedInspectorEmpty(t *testing.T) { b := new(bytes.Buffer) i := NewIndentedInspector(b) if err := i.Flush(); err != nil { t.Fatal(err) } expected := "[]\n" if b.String() != expected { t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) } } func TestIndentedInspectorRawElements(t *testing.T) { b := new(bytes.Buffer) i := NewIndentedInspector(b) if err := i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Dns": "0.0.0.0", "Node": "0"}`)); err != nil { t.Fatal(err) } if err := i.Inspect(testElement{"1.1.1.1"}, []byte(`{"Dns": "1.1.1.1", "Node": "1"}`)); err != nil { t.Fatal(err) } if err := i.Flush(); err != nil { t.Fatal(err) } expected := `[ { "Dns": "0.0.0.0", "Node": "0" }, { "Dns": "1.1.1.1", "Node": "1" } ] ` if b.String() != expected { t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) } } docker-1.10.3/api/client/kill.go000066400000000000000000000016431267010174400163620ustar00rootroot00000000000000package client import ( "fmt" "strings" Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" ) // CmdKill kills one or more running container using SIGKILL or a specified signal. // // Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...] func (cli *DockerCli) CmdKill(args ...string) error { cmd := Cli.Subcmd("kill", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["kill"].Description, true) signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container") cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) var errs []string for _, name := range cmd.Args() { if err := cli.client.ContainerKill(name, *signal); err != nil { errs = append(errs, fmt.Sprintf("Failed to kill container (%s): %s", name, err)) } else { fmt.Fprintf(cli.out, "%s\n", name) } } if len(errs) > 0 { return fmt.Errorf("%s", strings.Join(errs, "\n")) } return nil } docker-1.10.3/api/client/load.go000066400000000000000000000020551267010174400163440ustar00rootroot00000000000000package client import ( "io" "os" Cli "github.com/docker/docker/cli" "github.com/docker/docker/pkg/jsonmessage" flag "github.com/docker/docker/pkg/mflag" ) // CmdLoad loads an image from a tar archive. // // The tar archive is read from STDIN by default, or from a tar archive file. // // Usage: docker load [OPTIONS] func (cli *DockerCli) CmdLoad(args ...string) error { cmd := Cli.Subcmd("load", nil, Cli.DockerCommands["load"].Description, true) infile := cmd.String([]string{"i", "-input"}, "", "Read from a tar archive file, instead of STDIN") cmd.Require(flag.Exact, 0) cmd.ParseFlags(args, true) var input io.Reader = cli.in if *infile != "" { file, err := os.Open(*infile) if err != nil { return err } defer file.Close() input = file } response, err := cli.client.ImageLoad(input) if err != nil { return err } defer response.Body.Close() if response.JSON { return jsonmessage.DisplayJSONMessagesStream(response.Body, cli.out, cli.outFd, cli.isTerminalOut, nil) } _, err = io.Copy(cli.out, response.Body) return err } docker-1.10.3/api/client/login.go000066400000000000000000000077161267010174400165460ustar00rootroot00000000000000package client import ( "bufio" "fmt" "io" "os" "runtime" "strings" Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/term" "github.com/docker/engine-api/client" "github.com/docker/engine-api/types" ) // CmdLogin logs in or registers a user to a Docker registry service. // // If no server is specified, the user will be logged into or registered to the registry's index server. // // Usage: docker login SERVER func (cli *DockerCli) CmdLogin(args ...string) error { cmd := Cli.Subcmd("login", []string{"[SERVER]"}, Cli.DockerCommands["login"].Description+".\nIf no server is specified, the default is defined by the daemon.", true) cmd.Require(flag.Max, 1) flUser := cmd.String([]string{"u", "-username"}, "", "Username") flPassword := cmd.String([]string{"p", "-password"}, "", "Password") flEmail := cmd.String([]string{"e", "-email"}, "", "Email") cmd.ParseFlags(args, true) // On Windows, force the use of the regular OS stdin stream. Fixes #14336/#14210 if runtime.GOOS == "windows" { cli.in = os.Stdin } var serverAddress string if len(cmd.Args()) > 0 { serverAddress = cmd.Arg(0) } else { serverAddress = cli.electAuthServer() } authConfig, err := cli.configureAuth(*flUser, *flPassword, *flEmail, serverAddress) if err != nil { return err } response, err := cli.client.RegistryLogin(authConfig) if err != nil { if client.IsErrUnauthorized(err) { delete(cli.configFile.AuthConfigs, serverAddress) if err2 := cli.configFile.Save(); err2 != nil { fmt.Fprintf(cli.out, "WARNING: could not save config file: %v\n", err2) } } return err } if err := cli.configFile.Save(); err != nil { return fmt.Errorf("Error saving config file: %v", err) } fmt.Fprintf(cli.out, "WARNING: login credentials saved in %s\n", cli.configFile.Filename()) if response.Status != "" { fmt.Fprintf(cli.out, "%s\n", response.Status) } return nil } func (cli *DockerCli) promptWithDefault(prompt string, configDefault string) { if configDefault == "" { fmt.Fprintf(cli.out, "%s: ", prompt) } else { fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault) } } func (cli *DockerCli) configureAuth(flUser, flPassword, flEmail, serverAddress string) (types.AuthConfig, error) { authconfig, ok := cli.configFile.AuthConfigs[serverAddress] if !ok { authconfig = types.AuthConfig{} } if flUser == "" { cli.promptWithDefault("Username", authconfig.Username) flUser = readInput(cli.in, cli.out) flUser = strings.TrimSpace(flUser) if flUser == "" { flUser = authconfig.Username } } if flPassword == "" { oldState, err := term.SaveState(cli.inFd) if err != nil { return authconfig, err } fmt.Fprintf(cli.out, "Password: ") term.DisableEcho(cli.inFd, oldState) flPassword = readInput(cli.in, cli.out) fmt.Fprint(cli.out, "\n") term.RestoreTerminal(cli.inFd, oldState) if flPassword == "" { return authconfig, fmt.Errorf("Error : Password Required") } } // Assume that a different username means they may not want to use // the email from the config file, so prompt it if flUser != authconfig.Username { if flEmail == "" { cli.promptWithDefault("Email", authconfig.Email) flEmail = readInput(cli.in, cli.out) if flEmail == "" { flEmail = authconfig.Email } } } else { // However, if they don't override the username use the // email from the cmd line if specified. IOW, allow // then to change/override them. And if not specified, just // use what's in the config file if flEmail == "" { flEmail = authconfig.Email } } authconfig.Username = flUser authconfig.Password = flPassword authconfig.Email = flEmail authconfig.ServerAddress = serverAddress cli.configFile.AuthConfigs[serverAddress] = authconfig return authconfig, nil } func readInput(in io.Reader, out io.Writer) string { reader := bufio.NewReader(in) line, _, err := reader.ReadLine() if err != nil { fmt.Fprintln(out, err.Error()) os.Exit(1) } return string(line) } docker-1.10.3/api/client/logout.go000066400000000000000000000021241267010174400167330ustar00rootroot00000000000000package client import ( "fmt" Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" ) // CmdLogout logs a user out from a Docker registry. // // If no server is specified, the user will be logged out from the registry's index server. // // Usage: docker logout [SERVER] func (cli *DockerCli) CmdLogout(args ...string) error { cmd := Cli.Subcmd("logout", []string{"[SERVER]"}, Cli.DockerCommands["logout"].Description+".\nIf no server is specified, the default is defined by the daemon.", true) cmd.Require(flag.Max, 1) cmd.ParseFlags(args, true) var serverAddress string if len(cmd.Args()) > 0 { serverAddress = cmd.Arg(0) } else { serverAddress = cli.electAuthServer() } if _, ok := cli.configFile.AuthConfigs[serverAddress]; !ok { fmt.Fprintf(cli.out, "Not logged in to %s\n", serverAddress) return nil } fmt.Fprintf(cli.out, "Remove login credentials for %s\n", serverAddress) delete(cli.configFile.AuthConfigs, serverAddress) if err := cli.configFile.Save(); err != nil { return fmt.Errorf("Failed to save docker config: %v", err) } return nil } docker-1.10.3/api/client/logs.go000066400000000000000000000031731267010174400163730ustar00rootroot00000000000000package client import ( "fmt" "io" Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/stdcopy" "github.com/docker/engine-api/types" ) var validDrivers = map[string]bool{ "json-file": true, "journald": true, } // CmdLogs fetches the logs of a given container. // // docker logs [OPTIONS] CONTAINER func (cli *DockerCli) CmdLogs(args ...string) error { cmd := Cli.Subcmd("logs", []string{"CONTAINER"}, Cli.DockerCommands["logs"].Description, true) follow := cmd.Bool([]string{"f", "-follow"}, false, "Follow log output") since := cmd.String([]string{"-since"}, "", "Show logs since timestamp") times := cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps") tail := cmd.String([]string{"-tail"}, "all", "Number of lines to show from the end of the logs") cmd.Require(flag.Exact, 1) cmd.ParseFlags(args, true) name := cmd.Arg(0) c, err := cli.client.ContainerInspect(name) if err != nil { return err } if !validDrivers[c.HostConfig.LogConfig.Type] { return fmt.Errorf("\"logs\" command is supported only for \"json-file\" and \"journald\" logging drivers (got: %s)", c.HostConfig.LogConfig.Type) } options := types.ContainerLogsOptions{ ContainerID: name, ShowStdout: true, ShowStderr: true, Since: *since, Timestamps: *times, Follow: *follow, Tail: *tail, } responseBody, err := cli.client.ContainerLogs(options) if err != nil { return err } defer responseBody.Close() if c.Config.Tty { _, err = io.Copy(cli.out, responseBody) } else { _, err = stdcopy.StdCopy(cli.out, cli.err, responseBody) } return err } docker-1.10.3/api/client/network.go000066400000000000000000000261411267010174400171200ustar00rootroot00000000000000package client import ( "fmt" "net" "strings" "text/tabwriter" Cli "github.com/docker/docker/cli" "github.com/docker/docker/opts" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/stringid" runconfigopts "github.com/docker/docker/runconfig/opts" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/filters" "github.com/docker/engine-api/types/network" ) // CmdNetwork is the parent subcommand for all network commands // // Usage: docker network [OPTIONS] func (cli *DockerCli) CmdNetwork(args ...string) error { cmd := Cli.Subcmd("network", []string{"COMMAND [OPTIONS]"}, networkUsage(), false) cmd.Require(flag.Min, 1) err := cmd.ParseFlags(args, true) cmd.Usage() return err } // CmdNetworkCreate creates a new network with a given name // // Usage: docker network create [OPTIONS] func (cli *DockerCli) CmdNetworkCreate(args ...string) error { cmd := Cli.Subcmd("network create", []string{"NETWORK-NAME"}, "Creates a new network with a name specified by the user", false) flDriver := cmd.String([]string{"d", "-driver"}, "bridge", "Driver to manage the Network") flOpts := opts.NewMapOpts(nil, nil) flIpamDriver := cmd.String([]string{"-ipam-driver"}, "default", "IP Address Management Driver") flIpamSubnet := opts.NewListOpts(nil) flIpamIPRange := opts.NewListOpts(nil) flIpamGateway := opts.NewListOpts(nil) flIpamAux := opts.NewMapOpts(nil, nil) flIpamOpt := opts.NewMapOpts(nil, nil) cmd.Var(&flIpamSubnet, []string{"-subnet"}, "subnet in CIDR format that represents a network segment") cmd.Var(&flIpamIPRange, []string{"-ip-range"}, "allocate container ip from a sub-range") cmd.Var(&flIpamGateway, []string{"-gateway"}, "ipv4 or ipv6 Gateway for the master subnet") cmd.Var(flIpamAux, []string{"-aux-address"}, "auxiliary ipv4 or ipv6 addresses used by Network driver") cmd.Var(flOpts, []string{"o", "-opt"}, "set driver specific options") cmd.Var(flIpamOpt, []string{"-ipam-opt"}, "set IPAM driver specific options") flInternal := cmd.Bool([]string{"-internal"}, false, "restricts external access to the network") cmd.Require(flag.Exact, 1) err := cmd.ParseFlags(args, true) if err != nil { return err } // Set the default driver to "" if the user didn't set the value. // That way we can know whether it was user input or not. driver := *flDriver if !cmd.IsSet("-driver") && !cmd.IsSet("d") { driver = "" } ipamCfg, err := consolidateIpam(flIpamSubnet.GetAll(), flIpamIPRange.GetAll(), flIpamGateway.GetAll(), flIpamAux.GetAll()) if err != nil { return err } // Construct network create request body nc := types.NetworkCreate{ Name: cmd.Arg(0), Driver: driver, IPAM: network.IPAM{Driver: *flIpamDriver, Config: ipamCfg, Options: flIpamOpt.GetAll()}, Options: flOpts.GetAll(), CheckDuplicate: true, Internal: *flInternal, } resp, err := cli.client.NetworkCreate(nc) if err != nil { return err } fmt.Fprintf(cli.out, "%s\n", resp.ID) return nil } // CmdNetworkRm deletes one or more networks // // Usage: docker network rm NETWORK-NAME|NETWORK-ID [NETWORK-NAME|NETWORK-ID...] func (cli *DockerCli) CmdNetworkRm(args ...string) error { cmd := Cli.Subcmd("network rm", []string{"NETWORK [NETWORK...]"}, "Deletes one or more networks", false) cmd.Require(flag.Min, 1) if err := cmd.ParseFlags(args, true); err != nil { return err } status := 0 for _, net := range cmd.Args() { if err := cli.client.NetworkRemove(net); err != nil { fmt.Fprintf(cli.err, "%s\n", err) status = 1 continue } } if status != 0 { return Cli.StatusError{StatusCode: status} } return nil } // CmdNetworkConnect connects a container to a network // // Usage: docker network connect [OPTIONS] func (cli *DockerCli) CmdNetworkConnect(args ...string) error { cmd := Cli.Subcmd("network connect", []string{"NETWORK CONTAINER"}, "Connects a container to a network", false) flIPAddress := cmd.String([]string{"-ip"}, "", "IP Address") flIPv6Address := cmd.String([]string{"-ip6"}, "", "IPv6 Address") flLinks := opts.NewListOpts(runconfigopts.ValidateLink) cmd.Var(&flLinks, []string{"-link"}, "Add link to another container") flAliases := opts.NewListOpts(nil) cmd.Var(&flAliases, []string{"-alias"}, "Add network-scoped alias for the container") cmd.Require(flag.Min, 2) if err := cmd.ParseFlags(args, true); err != nil { return err } epConfig := &network.EndpointSettings{ IPAMConfig: &network.EndpointIPAMConfig{ IPv4Address: *flIPAddress, IPv6Address: *flIPv6Address, }, Links: flLinks.GetAll(), Aliases: flAliases.GetAll(), } return cli.client.NetworkConnect(cmd.Arg(0), cmd.Arg(1), epConfig) } // CmdNetworkDisconnect disconnects a container from a network // // Usage: docker network disconnect func (cli *DockerCli) CmdNetworkDisconnect(args ...string) error { cmd := Cli.Subcmd("network disconnect", []string{"NETWORK CONTAINER"}, "Disconnects container from a network", false) force := cmd.Bool([]string{"f", "-force"}, false, "Force the container to disconnect from a network") cmd.Require(flag.Exact, 2) if err := cmd.ParseFlags(args, true); err != nil { return err } return cli.client.NetworkDisconnect(cmd.Arg(0), cmd.Arg(1), *force) } // CmdNetworkLs lists all the networks managed by docker daemon // // Usage: docker network ls [OPTIONS] func (cli *DockerCli) CmdNetworkLs(args ...string) error { cmd := Cli.Subcmd("network ls", nil, "Lists networks", true) quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Do not truncate the output") flFilter := opts.NewListOpts(nil) cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") cmd.Require(flag.Exact, 0) err := cmd.ParseFlags(args, true) if err != nil { return err } // Consolidate all filter flags, and sanity check them early. // They'll get process after get response from server. netFilterArgs := filters.NewArgs() for _, f := range flFilter.GetAll() { if netFilterArgs, err = filters.ParseFlag(f, netFilterArgs); err != nil { return err } } options := types.NetworkListOptions{ Filters: netFilterArgs, } networkResources, err := cli.client.NetworkList(options) if err != nil { return err } wr := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) // unless quiet (-q) is specified, print field titles if !*quiet { fmt.Fprintln(wr, "NETWORK ID\tNAME\tDRIVER") } for _, networkResource := range networkResources { ID := networkResource.ID netName := networkResource.Name if !*noTrunc { ID = stringid.TruncateID(ID) } if *quiet { fmt.Fprintln(wr, ID) continue } driver := networkResource.Driver fmt.Fprintf(wr, "%s\t%s\t%s\t", ID, netName, driver) fmt.Fprint(wr, "\n") } wr.Flush() return nil } // CmdNetworkInspect inspects the network object for more details // // Usage: docker network inspect [OPTIONS] [NETWORK...] func (cli *DockerCli) CmdNetworkInspect(args ...string) error { cmd := Cli.Subcmd("network inspect", []string{"NETWORK [NETWORK...]"}, "Displays detailed information on one or more networks", false) tmplStr := cmd.String([]string{"f", "-format"}, "", "Format the output using the given go template") cmd.Require(flag.Min, 1) if err := cmd.ParseFlags(args, true); err != nil { return err } inspectSearcher := func(name string) (interface{}, []byte, error) { i, err := cli.client.NetworkInspect(name) return i, nil, err } return cli.inspectElements(*tmplStr, cmd.Args(), inspectSearcher) } // Consolidates the ipam configuration as a group from different related configurations // user can configure network with multiple non-overlapping subnets and hence it is // possible to correlate the various related parameters and consolidate them. // consoidateIpam consolidates subnets, ip-ranges, gateways and auxiliary addresses into // structured ipam data. func consolidateIpam(subnets, ranges, gateways []string, auxaddrs map[string]string) ([]network.IPAMConfig, error) { if len(subnets) < len(ranges) || len(subnets) < len(gateways) { return nil, fmt.Errorf("every ip-range or gateway must have a corresponding subnet") } iData := map[string]*network.IPAMConfig{} // Populate non-overlapping subnets into consolidation map for _, s := range subnets { for k := range iData { ok1, err := subnetMatches(s, k) if err != nil { return nil, err } ok2, err := subnetMatches(k, s) if err != nil { return nil, err } if ok1 || ok2 { return nil, fmt.Errorf("multiple overlapping subnet configuration is not supported") } } iData[s] = &network.IPAMConfig{Subnet: s, AuxAddress: map[string]string{}} } // Validate and add valid ip ranges for _, r := range ranges { match := false for _, s := range subnets { ok, err := subnetMatches(s, r) if err != nil { return nil, err } if !ok { continue } if iData[s].IPRange != "" { return nil, fmt.Errorf("cannot configure multiple ranges (%s, %s) on the same subnet (%s)", r, iData[s].IPRange, s) } d := iData[s] d.IPRange = r match = true } if !match { return nil, fmt.Errorf("no matching subnet for range %s", r) } } // Validate and add valid gateways for _, g := range gateways { match := false for _, s := range subnets { ok, err := subnetMatches(s, g) if err != nil { return nil, err } if !ok { continue } if iData[s].Gateway != "" { return nil, fmt.Errorf("cannot configure multiple gateways (%s, %s) for the same subnet (%s)", g, iData[s].Gateway, s) } d := iData[s] d.Gateway = g match = true } if !match { return nil, fmt.Errorf("no matching subnet for gateway %s", g) } } // Validate and add aux-addresses for key, aa := range auxaddrs { match := false for _, s := range subnets { ok, err := subnetMatches(s, aa) if err != nil { return nil, err } if !ok { continue } iData[s].AuxAddress[key] = aa match = true } if !match { return nil, fmt.Errorf("no matching subnet for aux-address %s", aa) } } idl := []network.IPAMConfig{} for _, v := range iData { idl = append(idl, *v) } return idl, nil } func subnetMatches(subnet, data string) (bool, error) { var ( ip net.IP ) _, s, err := net.ParseCIDR(subnet) if err != nil { return false, fmt.Errorf("Invalid subnet %s : %v", s, err) } if strings.Contains(data, "/") { ip, _, err = net.ParseCIDR(data) if err != nil { return false, fmt.Errorf("Invalid cidr %s : %v", data, err) } } else { ip = net.ParseIP(data) } return s.Contains(ip), nil } func networkUsage() string { networkCommands := map[string]string{ "create": "Create a network", "connect": "Connect container to a network", "disconnect": "Disconnect container from a network", "inspect": "Display detailed network information", "ls": "List all networks", "rm": "Remove a network", } help := "Commands:\n" for cmd, description := range networkCommands { help += fmt.Sprintf(" %-25.25s%s\n", cmd, description) } help += fmt.Sprintf("\nRun 'docker network COMMAND --help' for more information on a command.") return help } docker-1.10.3/api/client/pause.go000066400000000000000000000014471267010174400165460ustar00rootroot00000000000000package client import ( "fmt" "strings" Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" ) // CmdPause pauses all processes within one or more containers. // // Usage: docker pause CONTAINER [CONTAINER...] func (cli *DockerCli) CmdPause(args ...string) error { cmd := Cli.Subcmd("pause", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["pause"].Description, true) cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) var errs []string for _, name := range cmd.Args() { if err := cli.client.ContainerPause(name); err != nil { errs = append(errs, fmt.Sprintf("Failed to pause container (%s): %s", name, err)) } else { fmt.Fprintf(cli.out, "%s\n", name) } } if len(errs) > 0 { return fmt.Errorf("%s", strings.Join(errs, "\n")) } return nil } docker-1.10.3/api/client/port.go000066400000000000000000000027541267010174400164170ustar00rootroot00000000000000package client import ( "fmt" "strings" Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/go-connections/nat" ) // CmdPort lists port mappings for a container. // If a private port is specified, it also shows the public-facing port that is NATed to the private port. // // Usage: docker port CONTAINER [PRIVATE_PORT[/PROTO]] func (cli *DockerCli) CmdPort(args ...string) error { cmd := Cli.Subcmd("port", []string{"CONTAINER [PRIVATE_PORT[/PROTO]]"}, Cli.DockerCommands["port"].Description, true) cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) c, err := cli.client.ContainerInspect(cmd.Arg(0)) if err != nil { return err } if cmd.NArg() == 2 { var ( port = cmd.Arg(1) proto = "tcp" parts = strings.SplitN(port, "/", 2) ) if len(parts) == 2 && len(parts[1]) != 0 { port = parts[0] proto = parts[1] } natPort := port + "/" + proto newP, err := nat.NewPort(proto, port) if err != nil { return err } if frontends, exists := c.NetworkSettings.Ports[newP]; exists && frontends != nil { for _, frontend := range frontends { fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIP, frontend.HostPort) } return nil } return fmt.Errorf("Error: No public port '%s' published for %s", natPort, cmd.Arg(0)) } for from, frontends := range c.NetworkSettings.Ports { for _, frontend := range frontends { fmt.Fprintf(cli.out, "%s -> %s:%s\n", from, frontend.HostIP, frontend.HostPort) } } return nil } docker-1.10.3/api/client/ps.go000066400000000000000000000046601267010174400160530ustar00rootroot00000000000000package client import ( "github.com/docker/docker/api/client/formatter" Cli "github.com/docker/docker/cli" "github.com/docker/docker/opts" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/filters" ) // CmdPs outputs a list of Docker containers. // // Usage: docker ps [OPTIONS] func (cli *DockerCli) CmdPs(args ...string) error { var ( err error psFilterArgs = filters.NewArgs() cmd = Cli.Subcmd("ps", nil, Cli.DockerCommands["ps"].Description, true) quiet = cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") size = cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes") all = cmd.Bool([]string{"a", "-all"}, false, "Show all containers (default shows just running)") noTrunc = cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output") nLatest = cmd.Bool([]string{"l", "-latest"}, false, "Show the latest created container (includes all states)") since = cmd.String([]string{"#-since"}, "", "Show containers created since Id or Name (includes all states)") before = cmd.String([]string{"#-before"}, "", "Only show containers created before Id or Name") last = cmd.Int([]string{"n"}, -1, "Show n last created containers (includes all states)") format = cmd.String([]string{"-format"}, "", "Pretty-print containers using a Go template") flFilter = opts.NewListOpts(nil) ) cmd.Require(flag.Exact, 0) cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") cmd.ParseFlags(args, true) if *last == -1 && *nLatest { *last = 1 } // Consolidate all filter flags, and sanity check them. // They'll get processed in the daemon/server. for _, f := range flFilter.GetAll() { if psFilterArgs, err = filters.ParseFlag(f, psFilterArgs); err != nil { return err } } options := types.ContainerListOptions{ All: *all, Limit: *last, Since: *since, Before: *before, Size: *size, Filter: psFilterArgs, } containers, err := cli.client.ContainerList(options) if err != nil { return err } f := *format if len(f) == 0 { if len(cli.PsFormat()) > 0 && !*quiet { f = cli.PsFormat() } else { f = "table" } } psCtx := formatter.ContainerContext{ Context: formatter.Context{ Output: cli.out, Format: f, Quiet: *quiet, Trunc: !*noTrunc, }, Size: *size, Containers: containers, } psCtx.Write() return nil } docker-1.10.3/api/client/pull.go000066400000000000000000000047741267010174400164130ustar00rootroot00000000000000package client import ( "errors" "fmt" Cli "github.com/docker/docker/cli" "github.com/docker/docker/pkg/jsonmessage" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/reference" "github.com/docker/docker/registry" "github.com/docker/engine-api/client" "github.com/docker/engine-api/types" ) // CmdPull pulls an image or a repository from the registry. // // Usage: docker pull [OPTIONS] IMAGENAME[:TAG|@DIGEST] func (cli *DockerCli) CmdPull(args ...string) error { cmd := Cli.Subcmd("pull", []string{"NAME[:TAG|@DIGEST]"}, Cli.DockerCommands["pull"].Description, true) allTags := cmd.Bool([]string{"a", "-all-tags"}, false, "Download all tagged images in the repository") addTrustedFlags(cmd, true) cmd.Require(flag.Exact, 1) cmd.ParseFlags(args, true) remote := cmd.Arg(0) distributionRef, err := reference.ParseNamed(remote) if err != nil { return err } if *allTags && !reference.IsNameOnly(distributionRef) { return errors.New("tag can't be used with --all-tags/-a") } if !*allTags && reference.IsNameOnly(distributionRef) { distributionRef = reference.WithDefaultTag(distributionRef) fmt.Fprintf(cli.out, "Using default tag: %s\n", reference.DefaultTag) } var tag string switch x := distributionRef.(type) { case reference.Canonical: tag = x.Digest().String() case reference.NamedTagged: tag = x.Tag() } ref := registry.ParseReference(tag) // Resolve the Repository name from fqn to RepositoryInfo repoInfo, err := registry.ParseRepositoryInfo(distributionRef) if err != nil { return err } authConfig := cli.resolveAuthConfig(cli.configFile.AuthConfigs, repoInfo.Index) requestPrivilege := cli.registryAuthenticationPrivilegedFunc(repoInfo.Index, "pull") if isTrusted() && !ref.HasDigest() { // Check if tag is digest return cli.trustedPull(repoInfo, ref, authConfig, requestPrivilege) } return cli.imagePullPrivileged(authConfig, distributionRef.String(), "", requestPrivilege) } func (cli *DockerCli) imagePullPrivileged(authConfig types.AuthConfig, imageID, tag string, requestPrivilege client.RequestPrivilegeFunc) error { encodedAuth, err := encodeAuthToBase64(authConfig) if err != nil { return err } options := types.ImagePullOptions{ ImageID: imageID, Tag: tag, RegistryAuth: encodedAuth, } responseBody, err := cli.client.ImagePull(options, requestPrivilege) if err != nil { return err } defer responseBody.Close() return jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, nil) } docker-1.10.3/api/client/push.go000066400000000000000000000040021267010174400163760ustar00rootroot00000000000000package client import ( "errors" "io" Cli "github.com/docker/docker/cli" "github.com/docker/docker/pkg/jsonmessage" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/reference" "github.com/docker/docker/registry" "github.com/docker/engine-api/client" "github.com/docker/engine-api/types" ) // CmdPush pushes an image or repository to the registry. // // Usage: docker push NAME[:TAG] func (cli *DockerCli) CmdPush(args ...string) error { cmd := Cli.Subcmd("push", []string{"NAME[:TAG]"}, Cli.DockerCommands["push"].Description, true) addTrustedFlags(cmd, false) cmd.Require(flag.Exact, 1) cmd.ParseFlags(args, true) ref, err := reference.ParseNamed(cmd.Arg(0)) if err != nil { return err } var tag string switch x := ref.(type) { case reference.Canonical: return errors.New("cannot push a digest reference") case reference.NamedTagged: tag = x.Tag() } // Resolve the Repository name from fqn to RepositoryInfo repoInfo, err := registry.ParseRepositoryInfo(ref) if err != nil { return err } // Resolve the Auth config relevant for this server authConfig := cli.resolveAuthConfig(cli.configFile.AuthConfigs, repoInfo.Index) requestPrivilege := cli.registryAuthenticationPrivilegedFunc(repoInfo.Index, "push") if isTrusted() { return cli.trustedPush(repoInfo, tag, authConfig, requestPrivilege) } responseBody, err := cli.imagePushPrivileged(authConfig, ref.Name(), tag, requestPrivilege) if err != nil { return err } defer responseBody.Close() return jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, nil) } func (cli *DockerCli) imagePushPrivileged(authConfig types.AuthConfig, imageID, tag string, requestPrivilege client.RequestPrivilegeFunc) (io.ReadCloser, error) { encodedAuth, err := encodeAuthToBase64(authConfig) if err != nil { return nil, err } options := types.ImagePushOptions{ ImageID: imageID, Tag: tag, RegistryAuth: encodedAuth, } return cli.client.ImagePush(options, requestPrivilege) } docker-1.10.3/api/client/rename.go000066400000000000000000000014641267010174400166770ustar00rootroot00000000000000package client import ( "fmt" "strings" Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" ) // CmdRename renames a container. // // Usage: docker rename OLD_NAME NEW_NAME func (cli *DockerCli) CmdRename(args ...string) error { cmd := Cli.Subcmd("rename", []string{"OLD_NAME NEW_NAME"}, Cli.DockerCommands["rename"].Description, true) cmd.Require(flag.Exact, 2) cmd.ParseFlags(args, true) oldName := strings.TrimSpace(cmd.Arg(0)) newName := strings.TrimSpace(cmd.Arg(1)) if oldName == "" || newName == "" { return fmt.Errorf("Error: Neither old nor new names may be empty") } if err := cli.client.ContainerRename(oldName, newName); err != nil { fmt.Fprintf(cli.err, "%s\n", err) return fmt.Errorf("Error: failed to rename container named %s", oldName) } return nil } docker-1.10.3/api/client/restart.go000066400000000000000000000016361267010174400171150ustar00rootroot00000000000000package client import ( "fmt" "strings" Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" ) // CmdRestart restarts one or more containers. // // Usage: docker restart [OPTIONS] CONTAINER [CONTAINER...] func (cli *DockerCli) CmdRestart(args ...string) error { cmd := Cli.Subcmd("restart", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["restart"].Description, true) nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing the container") cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) var errs []string for _, name := range cmd.Args() { if err := cli.client.ContainerRestart(name, *nSeconds); err != nil { errs = append(errs, fmt.Sprintf("Failed to kill container (%s): %s", name, err)) } else { fmt.Fprintf(cli.out, "%s\n", name) } } if len(errs) > 0 { return fmt.Errorf("%s", strings.Join(errs, "\n")) } return nil } docker-1.10.3/api/client/rm.go000066400000000000000000000025321267010174400160430ustar00rootroot00000000000000package client import ( "fmt" "strings" Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/engine-api/types" ) // CmdRm removes one or more containers. // // Usage: docker rm [OPTIONS] CONTAINER [CONTAINER...] func (cli *DockerCli) CmdRm(args ...string) error { cmd := Cli.Subcmd("rm", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["rm"].Description, true) v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated with the container") link := cmd.Bool([]string{"l", "-link"}, false, "Remove the specified link") force := cmd.Bool([]string{"f", "-force"}, false, "Force the removal of a running container (uses SIGKILL)") cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) var errs []string for _, name := range cmd.Args() { if name == "" { return fmt.Errorf("Container name cannot be empty") } name = strings.Trim(name, "/") options := types.ContainerRemoveOptions{ ContainerID: name, RemoveVolumes: *v, RemoveLinks: *link, Force: *force, } if err := cli.client.ContainerRemove(options); err != nil { errs = append(errs, fmt.Sprintf("Failed to remove container (%s): %s", name, err)) } else { fmt.Fprintf(cli.out, "%s\n", name) } } if len(errs) > 0 { return fmt.Errorf("%s", strings.Join(errs, "\n")) } return nil } docker-1.10.3/api/client/rmi.go000066400000000000000000000025511267010174400162150ustar00rootroot00000000000000package client import ( "fmt" "net/url" "strings" Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/engine-api/types" ) // CmdRmi removes all images with the specified name(s). // // Usage: docker rmi [OPTIONS] IMAGE [IMAGE...] func (cli *DockerCli) CmdRmi(args ...string) error { cmd := Cli.Subcmd("rmi", []string{"IMAGE [IMAGE...]"}, Cli.DockerCommands["rmi"].Description, true) force := cmd.Bool([]string{"f", "-force"}, false, "Force removal of the image") noprune := cmd.Bool([]string{"-no-prune"}, false, "Do not delete untagged parents") cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) v := url.Values{} if *force { v.Set("force", "1") } if *noprune { v.Set("noprune", "1") } var errs []string for _, name := range cmd.Args() { options := types.ImageRemoveOptions{ ImageID: name, Force: *force, PruneChildren: !*noprune, } dels, err := cli.client.ImageRemove(options) if err != nil { errs = append(errs, fmt.Sprintf("Failed to remove image (%s): %s", name, err)) } else { for _, del := range dels { if del.Deleted != "" { fmt.Fprintf(cli.out, "Deleted: %s\n", del.Deleted) } else { fmt.Fprintf(cli.out, "Untagged: %s\n", del.Untagged) } } } } if len(errs) > 0 { return fmt.Errorf("%s", strings.Join(errs, "\n")) } return nil } docker-1.10.3/api/client/run.go000066400000000000000000000205431267010174400162330ustar00rootroot00000000000000package client import ( "fmt" "io" "os" "runtime" "strings" "github.com/Sirupsen/logrus" Cli "github.com/docker/docker/cli" derr "github.com/docker/docker/errors" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/signal" runconfigopts "github.com/docker/docker/runconfig/opts" "github.com/docker/engine-api/types" "github.com/docker/libnetwork/resolvconf/dns" ) func (cid *cidFile) Close() error { cid.file.Close() if !cid.written { if err := os.Remove(cid.path); err != nil { return fmt.Errorf("failed to remove the CID file '%s': %s \n", cid.path, err) } } return nil } func (cid *cidFile) Write(id string) error { if _, err := cid.file.Write([]byte(id)); err != nil { return fmt.Errorf("Failed to write the container ID to the file: %s", err) } cid.written = true return nil } // if container start fails with 'command not found' error, return 127 // if container start fails with 'command cannot be invoked' error, return 126 // return 125 for generic docker daemon failures func runStartContainerErr(err error) error { trimmedErr := strings.Trim(err.Error(), "Error response from daemon: ") statusError := Cli.StatusError{} derrCmdNotFound := derr.ErrorCodeCmdNotFound.Message() derrCouldNotInvoke := derr.ErrorCodeCmdCouldNotBeInvoked.Message() derrNoSuchImage := derr.ErrorCodeNoSuchImageHash.Message() derrNoSuchImageTag := derr.ErrorCodeNoSuchImageTag.Message() switch trimmedErr { case derrCmdNotFound: statusError = Cli.StatusError{StatusCode: 127} case derrCouldNotInvoke: statusError = Cli.StatusError{StatusCode: 126} case derrNoSuchImage, derrNoSuchImageTag: statusError = Cli.StatusError{StatusCode: 125} default: statusError = Cli.StatusError{StatusCode: 125} } return statusError } // CmdRun runs a command in a new container. // // Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] func (cli *DockerCli) CmdRun(args ...string) error { cmd := Cli.Subcmd("run", []string{"IMAGE [COMMAND] [ARG...]"}, Cli.DockerCommands["run"].Description, true) addTrustedFlags(cmd, true) // These are flags not stored in Config/HostConfig var ( flAutoRemove = cmd.Bool([]string{"-rm"}, false, "Automatically remove the container when it exits") flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Run container in background and print container ID") flSigProxy = cmd.Bool([]string{"-sig-proxy"}, true, "Proxy received signals to the process") flName = cmd.String([]string{"-name"}, "", "Assign a name to the container") flDetachKeys = cmd.String([]string{"-detach-keys"}, "", "Override the key sequence for detaching a container") flAttach *opts.ListOpts ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d") ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm") ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: --rm and -d") ) config, hostConfig, networkingConfig, cmd, err := runconfigopts.Parse(cmd, args) // just in case the Parse does not exit if err != nil { cmd.ReportError(err.Error(), true) os.Exit(125) } if hostConfig.OomKillDisable != nil && *hostConfig.OomKillDisable && hostConfig.Memory == 0 { fmt.Fprintf(cli.err, "WARNING: Disabling the OOM killer on containers without setting a '-m/--memory' limit may be dangerous.\n") } if len(hostConfig.DNS) > 0 { // check the DNS settings passed via --dns against // localhost regexp to warn if they are trying to // set a DNS to a localhost address for _, dnsIP := range hostConfig.DNS { if dns.IsLocalhost(dnsIP) { fmt.Fprintf(cli.err, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP) break } } } if config.Image == "" { cmd.Usage() return nil } config.ArgsEscaped = false if !*flDetach { if err := cli.CheckTtyInput(config.AttachStdin, config.Tty); err != nil { return err } } else { if fl := cmd.Lookup("-attach"); fl != nil { flAttach = fl.Value.(*opts.ListOpts) if flAttach.Len() != 0 { return ErrConflictAttachDetach } } if *flAutoRemove { return ErrConflictDetachAutoRemove } config.AttachStdin = false config.AttachStdout = false config.AttachStderr = false config.StdinOnce = false } // Disable flSigProxy when in TTY mode sigProxy := *flSigProxy if config.Tty { sigProxy = false } // Telling the Windows daemon the initial size of the tty during start makes // a far better user experience rather than relying on subsequent resizes // to cause things to catch up. if runtime.GOOS == "windows" { hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = cli.getTtySize() } createResponse, err := cli.createContainer(config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, *flName) if err != nil { cmd.ReportError(err.Error(), true) return runStartContainerErr(err) } if sigProxy { sigc := cli.forwardAllSignals(createResponse.ID) defer signal.StopCatch(sigc) } var ( waitDisplayID chan struct{} errCh chan error ) if !config.AttachStdout && !config.AttachStderr { // Make this asynchronous to allow the client to write to stdin before having to read the ID waitDisplayID = make(chan struct{}) go func() { defer close(waitDisplayID) fmt.Fprintf(cli.out, "%s\n", createResponse.ID) }() } if *flAutoRemove && (hostConfig.RestartPolicy.IsAlways() || hostConfig.RestartPolicy.IsOnFailure()) { return ErrConflictRestartPolicyAndAutoRemove } if config.AttachStdin || config.AttachStdout || config.AttachStderr { var ( out, stderr io.Writer in io.ReadCloser ) if config.AttachStdin { in = cli.in } if config.AttachStdout { out = cli.out } if config.AttachStderr { if config.Tty { stderr = cli.out } else { stderr = cli.err } } if *flDetachKeys != "" { cli.configFile.DetachKeys = *flDetachKeys } options := types.ContainerAttachOptions{ ContainerID: createResponse.ID, Stream: true, Stdin: config.AttachStdin, Stdout: config.AttachStdout, Stderr: config.AttachStderr, DetachKeys: cli.configFile.DetachKeys, } resp, err := cli.client.ContainerAttach(options) if err != nil { return err } if in != nil && config.Tty { if err := cli.setRawTerminal(); err != nil { return err } defer cli.restoreTerminal(in) } errCh = promise.Go(func() error { return cli.holdHijackedConnection(config.Tty, in, out, stderr, resp) }) } defer func() { if *flAutoRemove { options := types.ContainerRemoveOptions{ ContainerID: createResponse.ID, RemoveVolumes: true, } if err := cli.client.ContainerRemove(options); err != nil { fmt.Fprintf(cli.err, "Error deleting container: %s\n", err) } } }() //start the container if err := cli.client.ContainerStart(createResponse.ID); err != nil { cmd.ReportError(err.Error(), false) return runStartContainerErr(err) } if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminalOut { if err := cli.monitorTtySize(createResponse.ID, false); err != nil { fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) } } if errCh != nil { if err := <-errCh; err != nil { logrus.Debugf("Error hijack: %s", err) return err } } // Detached mode: wait for the id to be displayed and return. if !config.AttachStdout && !config.AttachStderr { // Detached mode <-waitDisplayID return nil } var status int // Attached mode if *flAutoRemove { // Autoremove: wait for the container to finish, retrieve // the exit code and remove the container if status, err = cli.client.ContainerWait(createResponse.ID); err != nil { return runStartContainerErr(err) } if _, status, err = getExitCode(cli, createResponse.ID); err != nil { return err } } else { // No Autoremove: Simply retrieve the exit code if !config.Tty { // In non-TTY mode, we can't detach, so we must wait for container exit if status, err = cli.client.ContainerWait(createResponse.ID); err != nil { return err } } else { // In TTY mode, there is a race: if the process dies too slowly, the state could // be updated after the getExitCode call and result in the wrong exit code being reported if _, status, err = getExitCode(cli, createResponse.ID); err != nil { return err } } } if status != 0 { return Cli.StatusError{StatusCode: status} } return nil } docker-1.10.3/api/client/save.go000066400000000000000000000021161267010174400163610ustar00rootroot00000000000000package client import ( "errors" "io" "os" Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" ) // CmdSave saves one or more images to a tar archive. // // The tar archive is written to STDOUT by default, or written to a file. // // Usage: docker save [OPTIONS] IMAGE [IMAGE...] func (cli *DockerCli) CmdSave(args ...string) error { cmd := Cli.Subcmd("save", []string{"IMAGE [IMAGE...]"}, Cli.DockerCommands["save"].Description+" (streamed to STDOUT by default)", true) outfile := cmd.String([]string{"o", "-output"}, "", "Write to a file, instead of STDOUT") cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) var ( output = cli.out err error ) if *outfile == "" && cli.isTerminalOut { return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") } if *outfile != "" { if output, err = os.Create(*outfile); err != nil { return err } } responseBody, err := cli.client.ImageSave(cmd.Args()) if err != nil { return err } defer responseBody.Close() _, err = io.Copy(output, responseBody) return err } docker-1.10.3/api/client/search.go000066400000000000000000000051061267010174400166720ustar00rootroot00000000000000package client import ( "fmt" "net/url" "sort" "strings" "text/tabwriter" Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/stringutils" "github.com/docker/docker/registry" "github.com/docker/engine-api/types" registrytypes "github.com/docker/engine-api/types/registry" ) // CmdSearch searches the Docker Hub for images. // // Usage: docker search [OPTIONS] TERM func (cli *DockerCli) CmdSearch(args ...string) error { cmd := Cli.Subcmd("search", []string{"TERM"}, Cli.DockerCommands["search"].Description, true) noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output") automated := cmd.Bool([]string{"-automated"}, false, "Only show automated builds") stars := cmd.Uint([]string{"s", "-stars"}, 0, "Only displays with at least x stars") cmd.Require(flag.Exact, 1) cmd.ParseFlags(args, true) name := cmd.Arg(0) v := url.Values{} v.Set("term", name) indexInfo, err := registry.ParseSearchIndexInfo(name) if err != nil { return err } authConfig := cli.resolveAuthConfig(cli.configFile.AuthConfigs, indexInfo) requestPrivilege := cli.registryAuthenticationPrivilegedFunc(indexInfo, "search") encodedAuth, err := encodeAuthToBase64(authConfig) if err != nil { return err } options := types.ImageSearchOptions{ Term: name, RegistryAuth: encodedAuth, } unorderedResults, err := cli.client.ImageSearch(options, requestPrivilege) if err != nil { return err } results := searchResultsByStars(unorderedResults) sort.Sort(results) w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0) fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n") for _, res := range results { if (*automated && !res.IsAutomated) || (int(*stars) > res.StarCount) { continue } desc := strings.Replace(res.Description, "\n", " ", -1) desc = strings.Replace(desc, "\r", " ", -1) if !*noTrunc && len(desc) > 45 { desc = stringutils.Truncate(desc, 42) + "..." } fmt.Fprintf(w, "%s\t%s\t%d\t", res.Name, desc, res.StarCount) if res.IsOfficial { fmt.Fprint(w, "[OK]") } fmt.Fprint(w, "\t") if res.IsAutomated || res.IsTrusted { fmt.Fprint(w, "[OK]") } fmt.Fprint(w, "\n") } w.Flush() return nil } // SearchResultsByStars sorts search results in descending order by number of stars. type searchResultsByStars []registrytypes.SearchResult func (r searchResultsByStars) Len() int { return len(r) } func (r searchResultsByStars) Swap(i, j int) { r[i], r[j] = r[j], r[i] } func (r searchResultsByStars) Less(i, j int) bool { return r[j].StarCount < r[i].StarCount } docker-1.10.3/api/client/start.go000066400000000000000000000076071267010174400165720ustar00rootroot00000000000000package client import ( "fmt" "io" "os" "strings" "github.com/Sirupsen/logrus" Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/signal" "github.com/docker/engine-api/types" ) func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { sigc := make(chan os.Signal, 128) signal.CatchAll(sigc) go func() { for s := range sigc { if s == signal.SIGCHLD { continue } var sig string for sigStr, sigN := range signal.SignalMap { if sigN == s { sig = sigStr break } } if sig == "" { fmt.Fprintf(cli.err, "Unsupported signal: %v. Discarding.\n", s) continue } if err := cli.client.ContainerKill(cid, sig); err != nil { logrus.Debugf("Error sending signal: %s", err) } } }() return sigc } // CmdStart starts one or more containers. // // Usage: docker start [OPTIONS] CONTAINER [CONTAINER...] func (cli *DockerCli) CmdStart(args ...string) error { cmd := Cli.Subcmd("start", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["start"].Description, true) attach := cmd.Bool([]string{"a", "-attach"}, false, "Attach STDOUT/STDERR and forward signals") openStdin := cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's STDIN") detachKeys := cmd.String([]string{"-detach-keys"}, "", "Override the key sequence for detaching a container") cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) if *attach || *openStdin { // We're going to attach to a container. // 1. Ensure we only have one container. if cmd.NArg() > 1 { return fmt.Errorf("You cannot start and attach multiple containers at once.") } // 2. Attach to the container. containerID := cmd.Arg(0) c, err := cli.client.ContainerInspect(containerID) if err != nil { return err } if !c.Config.Tty { sigc := cli.forwardAllSignals(containerID) defer signal.StopCatch(sigc) } if *detachKeys != "" { cli.configFile.DetachKeys = *detachKeys } options := types.ContainerAttachOptions{ ContainerID: containerID, Stream: true, Stdin: *openStdin && c.Config.OpenStdin, Stdout: true, Stderr: true, DetachKeys: cli.configFile.DetachKeys, } var in io.ReadCloser if options.Stdin { in = cli.in } resp, err := cli.client.ContainerAttach(options) if err != nil { return err } defer resp.Close() if in != nil && c.Config.Tty { if err := cli.setRawTerminal(); err != nil { return err } defer cli.restoreTerminal(in) } cErr := promise.Go(func() error { return cli.holdHijackedConnection(c.Config.Tty, in, cli.out, cli.err, resp) }) // 3. Start the container. if err := cli.client.ContainerStart(containerID); err != nil { return err } // 4. Wait for attachment to break. if c.Config.Tty && cli.isTerminalOut { if err := cli.monitorTtySize(containerID, false); err != nil { fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) } } if attchErr := <-cErr; attchErr != nil { return attchErr } _, status, err := getExitCode(cli, containerID) if err != nil { return err } if status != 0 { return Cli.StatusError{StatusCode: status} } } else { // We're not going to attach to anything. // Start as many containers as we want. return cli.startContainersWithoutAttachments(cmd.Args()) } return nil } func (cli *DockerCli) startContainersWithoutAttachments(containerIDs []string) error { var failedContainers []string for _, containerID := range containerIDs { if err := cli.client.ContainerStart(containerID); err != nil { fmt.Fprintf(cli.err, "%s\n", err) failedContainers = append(failedContainers, containerID) } else { fmt.Fprintf(cli.out, "%s\n", containerID) } } if len(failedContainers) > 0 { return fmt.Errorf("Error: failed to start containers: %v", strings.Join(failedContainers, ", ")) } return nil } docker-1.10.3/api/client/stats.go000066400000000000000000000203251267010174400165630ustar00rootroot00000000000000package client import ( "encoding/json" "fmt" "io" "sort" "strings" "sync" "text/tabwriter" "time" Cli "github.com/docker/docker/cli" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/events" "github.com/docker/engine-api/types/filters" "github.com/docker/go-units" ) type containerStats struct { Name string CPUPercentage float64 Memory float64 MemoryLimit float64 MemoryPercentage float64 NetworkRx float64 NetworkTx float64 BlockRead float64 BlockWrite float64 mu sync.RWMutex err error } type stats struct { mu sync.Mutex cs []*containerStats } func (s *containerStats) Collect(cli *DockerCli, streamStats bool) { responseBody, err := cli.client.ContainerStats(s.Name, streamStats) if err != nil { s.mu.Lock() s.err = err s.mu.Unlock() return } defer responseBody.Close() var ( previousCPU uint64 previousSystem uint64 dec = json.NewDecoder(responseBody) u = make(chan error, 1) ) go func() { for { var v *types.StatsJSON if err := dec.Decode(&v); err != nil { u <- err return } var memPercent = 0.0 var cpuPercent = 0.0 // MemoryStats.Limit will never be 0 unless the container is not running and we haven't // got any data from cgroup if v.MemoryStats.Limit != 0 { memPercent = float64(v.MemoryStats.Usage) / float64(v.MemoryStats.Limit) * 100.0 } previousCPU = v.PreCPUStats.CPUUsage.TotalUsage previousSystem = v.PreCPUStats.SystemUsage cpuPercent = calculateCPUPercent(previousCPU, previousSystem, v) blkRead, blkWrite := calculateBlockIO(v.BlkioStats) s.mu.Lock() s.CPUPercentage = cpuPercent s.Memory = float64(v.MemoryStats.Usage) s.MemoryLimit = float64(v.MemoryStats.Limit) s.MemoryPercentage = memPercent s.NetworkRx, s.NetworkTx = calculateNetwork(v.Networks) s.BlockRead = float64(blkRead) s.BlockWrite = float64(blkWrite) s.mu.Unlock() u <- nil if !streamStats { return } } }() for { select { case <-time.After(2 * time.Second): // zero out the values if we have not received an update within // the specified duration. s.mu.Lock() s.CPUPercentage = 0 s.Memory = 0 s.MemoryPercentage = 0 s.MemoryLimit = 0 s.NetworkRx = 0 s.NetworkTx = 0 s.BlockRead = 0 s.BlockWrite = 0 s.mu.Unlock() case err := <-u: if err != nil { s.mu.Lock() s.err = err s.mu.Unlock() return } } if !streamStats { return } } } func (s *containerStats) Display(w io.Writer) error { s.mu.RLock() defer s.mu.RUnlock() if s.err != nil { return s.err } fmt.Fprintf(w, "%s\t%.2f%%\t%s / %s\t%.2f%%\t%s / %s\t%s / %s\n", s.Name, s.CPUPercentage, units.HumanSize(s.Memory), units.HumanSize(s.MemoryLimit), s.MemoryPercentage, units.HumanSize(s.NetworkRx), units.HumanSize(s.NetworkTx), units.HumanSize(s.BlockRead), units.HumanSize(s.BlockWrite)) return nil } // CmdStats displays a live stream of resource usage statistics for one or more containers. // // This shows real-time information on CPU usage, memory usage, and network I/O. // // Usage: docker stats [OPTIONS] [CONTAINER...] func (cli *DockerCli) CmdStats(args ...string) error { cmd := Cli.Subcmd("stats", []string{"[CONTAINER...]"}, Cli.DockerCommands["stats"].Description, true) all := cmd.Bool([]string{"a", "-all"}, false, "Show all containers (default shows just running)") noStream := cmd.Bool([]string{"-no-stream"}, false, "Disable streaming stats and only pull the first result") cmd.ParseFlags(args, true) names := cmd.Args() showAll := len(names) == 0 if showAll { options := types.ContainerListOptions{ All: *all, } cs, err := cli.client.ContainerList(options) if err != nil { return err } for _, c := range cs { names = append(names, c.ID[:12]) } } if len(names) == 0 && !showAll { return fmt.Errorf("No containers found") } sort.Strings(names) var ( cStats = stats{} w = tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) ) printHeader := func() { if !*noStream { fmt.Fprint(cli.out, "\033[2J") fmt.Fprint(cli.out, "\033[H") } io.WriteString(w, "CONTAINER\tCPU %\tMEM USAGE / LIMIT\tMEM %\tNET I/O\tBLOCK I/O\n") } for _, n := range names { s := &containerStats{Name: n} // no need to lock here since only the main goroutine is running here cStats.cs = append(cStats.cs, s) go s.Collect(cli, !*noStream) } closeChan := make(chan error) if showAll { type watch struct { cid string event string err error } getNewContainers := func(c chan<- watch) { f := filters.NewArgs() f.Add("type", "container") options := types.EventsOptions{ Filters: f, } resBody, err := cli.client.Events(options) if err != nil { c <- watch{err: err} return } defer resBody.Close() decodeEvents(resBody, func(event events.Message, err error) error { if err != nil { c <- watch{err: err} return nil } c <- watch{event.ID[:12], event.Action, nil} return nil }) } go func(stopChan chan<- error) { cChan := make(chan watch) go getNewContainers(cChan) for { c := <-cChan if c.err != nil { stopChan <- c.err return } switch c.event { case "create": s := &containerStats{Name: c.cid} cStats.mu.Lock() cStats.cs = append(cStats.cs, s) cStats.mu.Unlock() go s.Collect(cli, !*noStream) case "stop": case "die": if !*all { var remove int // cStats cannot be O(1) with a map cause ranging over it would cause // containers in stats to move up and down in the list...:( cStats.mu.Lock() for i, s := range cStats.cs { if s.Name == c.cid { remove = i break } } cStats.cs = append(cStats.cs[:remove], cStats.cs[remove+1:]...) cStats.mu.Unlock() } } } }(closeChan) } else { close(closeChan) } // do a quick pause so that any failed connections for containers that do not exist are able to be // evicted before we display the initial or default values. time.Sleep(1500 * time.Millisecond) var errs []string cStats.mu.Lock() for _, c := range cStats.cs { c.mu.Lock() if c.err != nil { errs = append(errs, fmt.Sprintf("%s: %v", c.Name, c.err)) } c.mu.Unlock() } cStats.mu.Unlock() if len(errs) > 0 { return fmt.Errorf("%s", strings.Join(errs, ", ")) } for range time.Tick(500 * time.Millisecond) { printHeader() toRemove := []int{} cStats.mu.Lock() for i, s := range cStats.cs { if err := s.Display(w); err != nil && !*noStream { toRemove = append(toRemove, i) } } for j := len(toRemove) - 1; j >= 0; j-- { i := toRemove[j] cStats.cs = append(cStats.cs[:i], cStats.cs[i+1:]...) } if len(cStats.cs) == 0 && !showAll { return nil } cStats.mu.Unlock() w.Flush() if *noStream { break } select { case err, ok := <-closeChan: if ok { if err != nil { // this is suppressing "unexpected EOF" in the cli when the // daemon restarts so it shutdowns cleanly if err == io.ErrUnexpectedEOF { return nil } return err } } default: // just skip } } return nil } func calculateCPUPercent(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 { var ( cpuPercent = 0.0 // calculate the change for the cpu usage of the container in between readings cpuDelta = float64(v.CPUStats.CPUUsage.TotalUsage) - float64(previousCPU) // calculate the change for the entire system between readings systemDelta = float64(v.CPUStats.SystemUsage) - float64(previousSystem) ) if systemDelta > 0.0 && cpuDelta > 0.0 { cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CPUStats.CPUUsage.PercpuUsage)) * 100.0 } return cpuPercent } func calculateBlockIO(blkio types.BlkioStats) (blkRead uint64, blkWrite uint64) { for _, bioEntry := range blkio.IoServiceBytesRecursive { switch strings.ToLower(bioEntry.Op) { case "read": blkRead = blkRead + bioEntry.Value case "write": blkWrite = blkWrite + bioEntry.Value } } return } func calculateNetwork(network map[string]types.NetworkStats) (float64, float64) { var rx, tx float64 for _, v := range network { rx += float64(v.RxBytes) tx += float64(v.TxBytes) } return rx, tx } docker-1.10.3/api/client/stats_unit_test.go000066400000000000000000000023111267010174400206540ustar00rootroot00000000000000package client import ( "bytes" "sync" "testing" "github.com/docker/engine-api/types" ) func TestDisplay(t *testing.T) { c := &containerStats{ Name: "app", CPUPercentage: 30.0, Memory: 100 * 1024 * 1024.0, MemoryLimit: 2048 * 1024 * 1024.0, MemoryPercentage: 100.0 / 2048.0 * 100.0, NetworkRx: 100 * 1024 * 1024, NetworkTx: 800 * 1024 * 1024, BlockRead: 100 * 1024 * 1024, BlockWrite: 800 * 1024 * 1024, mu: sync.RWMutex{}, } var b bytes.Buffer if err := c.Display(&b); err != nil { t.Fatalf("c.Display() gave error: %s", err) } got := b.String() want := "app\t30.00%\t104.9 MB / 2.147 GB\t4.88%\t104.9 MB / 838.9 MB\t104.9 MB / 838.9 MB\n" if got != want { t.Fatalf("c.Display() = %q, want %q", got, want) } } func TestCalculBlockIO(t *testing.T) { blkio := types.BlkioStats{ IoServiceBytesRecursive: []types.BlkioStatEntry{{8, 0, "read", 1234}, {8, 1, "read", 4567}, {8, 0, "write", 123}, {8, 1, "write", 456}}, } blkRead, blkWrite := calculateBlockIO(blkio) if blkRead != 5801 { t.Fatalf("blkRead = %d, want 5801", blkRead) } if blkWrite != 579 { t.Fatalf("blkWrite = %d, want 579", blkWrite) } } docker-1.10.3/api/client/stop.go000066400000000000000000000021321267010174400164060ustar00rootroot00000000000000package client import ( "fmt" "strings" Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" ) // CmdStop stops one or more containers. // // A running container is stopped by first sending SIGTERM and then SIGKILL if the container fails to stop within a grace period (the default is 10 seconds). // // Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] func (cli *DockerCli) CmdStop(args ...string) error { cmd := Cli.Subcmd("stop", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["stop"].Description+".\nSending SIGTERM and then SIGKILL after a grace period", true) nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing it") cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) var errs []string for _, name := range cmd.Args() { if err := cli.client.ContainerStop(name, *nSeconds); err != nil { errs = append(errs, fmt.Sprintf("Failed to stop container (%s): %s", name, err)) } else { fmt.Fprintf(cli.out, "%s\n", name) } } if len(errs) > 0 { return fmt.Errorf("%s", strings.Join(errs, "\n")) } return nil } docker-1.10.3/api/client/tag.go000066400000000000000000000022301267010174400161730ustar00rootroot00000000000000package client import ( "errors" Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/reference" "github.com/docker/engine-api/types" ) // CmdTag tags an image into a repository. // // Usage: docker tag [OPTIONS] IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG] func (cli *DockerCli) CmdTag(args ...string) error { cmd := Cli.Subcmd("tag", []string{"IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]"}, Cli.DockerCommands["tag"].Description, true) force := cmd.Bool([]string{"#f", "#-force"}, false, "Force the tagging even if there's a conflict") cmd.Require(flag.Exact, 2) cmd.ParseFlags(args, true) ref, err := reference.ParseNamed(cmd.Arg(1)) if err != nil { return err } if _, isCanonical := ref.(reference.Canonical); isCanonical { return errors.New("refusing to create a tag with a digest reference") } var tag string if tagged, isTagged := ref.(reference.NamedTagged); isTagged { tag = tagged.Tag() } options := types.ImageTagOptions{ ImageID: cmd.Arg(0), RepositoryName: ref.Name(), Tag: tag, Force: *force, } return cli.client.ImageTag(options) } docker-1.10.3/api/client/top.go000066400000000000000000000015231267010174400162260ustar00rootroot00000000000000package client import ( "fmt" "strings" "text/tabwriter" Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" ) // CmdTop displays the running processes of a container. // // Usage: docker top CONTAINER func (cli *DockerCli) CmdTop(args ...string) error { cmd := Cli.Subcmd("top", []string{"CONTAINER [ps OPTIONS]"}, Cli.DockerCommands["top"].Description, true) cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) var arguments []string if cmd.NArg() > 1 { arguments = cmd.Args()[1:] } procList, err := cli.client.ContainerTop(cmd.Arg(0), arguments) if err != nil { return err } w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) fmt.Fprintln(w, strings.Join(procList.Titles, "\t")) for _, proc := range procList.Processes { fmt.Fprintln(w, strings.Join(proc, "\t")) } w.Flush() return nil } docker-1.10.3/api/client/trust.go000066400000000000000000000340501267010174400166060ustar00rootroot00000000000000package client import ( "encoding/hex" "encoding/json" "errors" "fmt" "net" "net/http" "net/url" "os" "path" "path/filepath" "sort" "strconv" "time" "github.com/Sirupsen/logrus" "github.com/docker/distribution/digest" "github.com/docker/distribution/registry/client/auth" "github.com/docker/distribution/registry/client/transport" "github.com/docker/docker/cliconfig" "github.com/docker/docker/distribution" "github.com/docker/docker/pkg/jsonmessage" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/reference" "github.com/docker/docker/registry" apiclient "github.com/docker/engine-api/client" "github.com/docker/engine-api/types" registrytypes "github.com/docker/engine-api/types/registry" "github.com/docker/go-connections/tlsconfig" "github.com/docker/notary/client" "github.com/docker/notary/passphrase" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" "github.com/docker/notary/tuf/store" ) var ( releasesRole = path.Join(data.CanonicalTargetsRole, "releases") untrusted bool ) func addTrustedFlags(fs *flag.FlagSet, verify bool) { var trusted bool if e := os.Getenv("DOCKER_CONTENT_TRUST"); e != "" { if t, err := strconv.ParseBool(e); t || err != nil { // treat any other value as true trusted = true } } message := "Skip image signing" if verify { message = "Skip image verification" } fs.BoolVar(&untrusted, []string{"-disable-content-trust"}, !trusted, message) } func isTrusted() bool { return !untrusted } type target struct { reference registry.Reference digest digest.Digest size int64 } func (cli *DockerCli) trustDirectory() string { return filepath.Join(cliconfig.ConfigDir(), "trust") } // certificateDirectory returns the directory containing // TLS certificates for the given server. An error is // returned if there was an error parsing the server string. func (cli *DockerCli) certificateDirectory(server string) (string, error) { u, err := url.Parse(server) if err != nil { return "", err } return filepath.Join(cliconfig.ConfigDir(), "tls", u.Host), nil } func trustServer(index *registrytypes.IndexInfo) (string, error) { if s := os.Getenv("DOCKER_CONTENT_TRUST_SERVER"); s != "" { urlObj, err := url.Parse(s) if err != nil || urlObj.Scheme != "https" { return "", fmt.Errorf("valid https URL required for trust server, got %s", s) } return s, nil } if index.Official { return registry.NotaryServer, nil } return "https://" + index.Name, nil } type simpleCredentialStore struct { auth types.AuthConfig } func (scs simpleCredentialStore) Basic(u *url.URL) (string, string) { return scs.auth.Username, scs.auth.Password } func (cli *DockerCli) getNotaryRepository(repoInfo *registry.RepositoryInfo, authConfig types.AuthConfig) (*client.NotaryRepository, error) { server, err := trustServer(repoInfo.Index) if err != nil { return nil, err } var cfg = tlsconfig.ClientDefault cfg.InsecureSkipVerify = !repoInfo.Index.Secure // Get certificate base directory certDir, err := cli.certificateDirectory(server) if err != nil { return nil, err } logrus.Debugf("reading certificate directory: %s", certDir) if err := registry.ReadCertsDirectory(&cfg, certDir); err != nil { return nil, err } base := &http.Transport{ Proxy: http.ProxyFromEnvironment, Dial: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, DualStack: true, }).Dial, TLSHandshakeTimeout: 10 * time.Second, TLSClientConfig: &cfg, DisableKeepAlives: true, } // Skip configuration headers since request is not going to Docker daemon modifiers := registry.DockerHeaders(http.Header{}) authTransport := transport.NewTransport(base, modifiers...) pingClient := &http.Client{ Transport: authTransport, Timeout: 5 * time.Second, } endpointStr := server + "/v2/" req, err := http.NewRequest("GET", endpointStr, nil) if err != nil { return nil, err } challengeManager := auth.NewSimpleChallengeManager() resp, err := pingClient.Do(req) if err != nil { // Ignore error on ping to operate in offline mode logrus.Debugf("Error pinging notary server %q: %s", endpointStr, err) } else { defer resp.Body.Close() // Add response to the challenge manager to parse out // authentication header and register authentication method if err := challengeManager.AddResponse(resp); err != nil { return nil, err } } creds := simpleCredentialStore{auth: authConfig} tokenHandler := auth.NewTokenHandler(authTransport, creds, repoInfo.FullName(), "push", "pull") basicHandler := auth.NewBasicHandler(creds) modifiers = append(modifiers, transport.RequestModifier(auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))) tr := transport.NewTransport(base, modifiers...) return client.NewNotaryRepository(cli.trustDirectory(), repoInfo.FullName(), server, tr, cli.getPassphraseRetriever()) } func convertTarget(t client.Target) (target, error) { h, ok := t.Hashes["sha256"] if !ok { return target{}, errors.New("no valid hash, expecting sha256") } return target{ reference: registry.ParseReference(t.Name), digest: digest.NewDigestFromHex("sha256", hex.EncodeToString(h)), size: t.Length, }, nil } func (cli *DockerCli) getPassphraseRetriever() passphrase.Retriever { aliasMap := map[string]string{ "root": "root", "snapshot": "repository", "targets": "repository", "targets/releases": "repository", } baseRetriever := passphrase.PromptRetrieverWithInOut(cli.in, cli.out, aliasMap) env := map[string]string{ "root": os.Getenv("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE"), "snapshot": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), "targets": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), "targets/releases": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), } // Backwards compatibility with old env names. We should remove this in 1.10 if env["root"] == "" { if passphrase := os.Getenv("DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE"); passphrase != "" { env["root"] = passphrase fmt.Fprintf(cli.err, "[DEPRECATED] The environment variable DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE has been deprecated and will be removed in v1.10. Please use DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE\n") } } if env["snapshot"] == "" || env["targets"] == "" || env["targets/releases"] == "" { if passphrase := os.Getenv("DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE"); passphrase != "" { env["snapshot"] = passphrase env["targets"] = passphrase env["targets/releases"] = passphrase fmt.Fprintf(cli.err, "[DEPRECATED] The environment variable DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE has been deprecated and will be removed in v1.10. Please use DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE\n") } } return func(keyName string, alias string, createNew bool, numAttempts int) (string, bool, error) { if v := env[alias]; v != "" { return v, numAttempts > 1, nil } return baseRetriever(keyName, alias, createNew, numAttempts) } } func (cli *DockerCli) trustedReference(ref reference.NamedTagged) (reference.Canonical, error) { repoInfo, err := registry.ParseRepositoryInfo(ref) if err != nil { return nil, err } // Resolve the Auth config relevant for this server authConfig := cli.resolveAuthConfig(cli.configFile.AuthConfigs, repoInfo.Index) notaryRepo, err := cli.getNotaryRepository(repoInfo, authConfig) if err != nil { fmt.Fprintf(cli.out, "Error establishing connection to trust repository: %s\n", err) return nil, err } t, err := notaryRepo.GetTargetByName(ref.Tag(), releasesRole, data.CanonicalTargetsRole) if err != nil { return nil, err } r, err := convertTarget(t.Target) if err != nil { return nil, err } return reference.WithDigest(ref, r.digest) } func (cli *DockerCli) tagTrusted(trustedRef reference.Canonical, ref reference.NamedTagged) error { fmt.Fprintf(cli.out, "Tagging %s as %s\n", trustedRef.String(), ref.String()) options := types.ImageTagOptions{ ImageID: trustedRef.String(), RepositoryName: trustedRef.Name(), Tag: ref.Tag(), Force: true, } return cli.client.ImageTag(options) } func notaryError(repoName string, err error) error { switch err.(type) { case *json.SyntaxError: logrus.Debugf("Notary syntax error: %s", err) return fmt.Errorf("Error: no trust data available for remote repository %s. Try running notary server and setting DOCKER_CONTENT_TRUST_SERVER to its HTTPS address?", repoName) case signed.ErrExpired: return fmt.Errorf("Error: remote repository %s out-of-date: %v", repoName, err) case trustmanager.ErrKeyNotFound: return fmt.Errorf("Error: signing keys for remote repository %s not found: %v", repoName, err) case *net.OpError: return fmt.Errorf("Error: error contacting notary server: %v", err) case store.ErrMetaNotFound: return fmt.Errorf("Error: trust data missing for remote repository %s or remote repository not found: %v", repoName, err) case signed.ErrInvalidKeyType: return fmt.Errorf("Warning: potential malicious behavior - trust data mismatch for remote repository %s: %v", repoName, err) case signed.ErrNoKeys: return fmt.Errorf("Error: could not find signing keys for remote repository %s, or could not decrypt signing key: %v", repoName, err) case signed.ErrLowVersion: return fmt.Errorf("Warning: potential malicious behavior - trust data version is lower than expected for remote repository %s: %v", repoName, err) case signed.ErrRoleThreshold: return fmt.Errorf("Warning: potential malicious behavior - trust data has insufficient signatures for remote repository %s: %v", repoName, err) case client.ErrRepositoryNotExist: return fmt.Errorf("Error: remote trust data does not exist for %s: %v", repoName, err) case signed.ErrInsufficientSignatures: return fmt.Errorf("Error: could not produce valid signature for %s. If Yubikey was used, was touch input provided?: %v", repoName, err) } return err } func (cli *DockerCli) trustedPull(repoInfo *registry.RepositoryInfo, ref registry.Reference, authConfig types.AuthConfig, requestPrivilege apiclient.RequestPrivilegeFunc) error { var refs []target notaryRepo, err := cli.getNotaryRepository(repoInfo, authConfig) if err != nil { fmt.Fprintf(cli.out, "Error establishing connection to trust repository: %s\n", err) return err } if ref.String() == "" { // List all targets targets, err := notaryRepo.ListTargets(releasesRole, data.CanonicalTargetsRole) if err != nil { return notaryError(repoInfo.FullName(), err) } for _, tgt := range targets { t, err := convertTarget(tgt.Target) if err != nil { fmt.Fprintf(cli.out, "Skipping target for %q\n", repoInfo.Name()) continue } refs = append(refs, t) } } else { t, err := notaryRepo.GetTargetByName(ref.String(), releasesRole, data.CanonicalTargetsRole) if err != nil { return notaryError(repoInfo.FullName(), err) } r, err := convertTarget(t.Target) if err != nil { return err } refs = append(refs, r) } for i, r := range refs { displayTag := r.reference.String() if displayTag != "" { displayTag = ":" + displayTag } fmt.Fprintf(cli.out, "Pull (%d of %d): %s%s@%s\n", i+1, len(refs), repoInfo.Name(), displayTag, r.digest) if err := cli.imagePullPrivileged(authConfig, repoInfo.Name(), r.digest.String(), requestPrivilege); err != nil { return err } // If reference is not trusted, tag by trusted reference if !r.reference.HasDigest() { tagged, err := reference.WithTag(repoInfo, r.reference.String()) if err != nil { return err } trustedRef, err := reference.WithDigest(repoInfo, r.digest) if err != nil { return err } if err := cli.tagTrusted(trustedRef, tagged); err != nil { return err } } } return nil } func (cli *DockerCli) trustedPush(repoInfo *registry.RepositoryInfo, tag string, authConfig types.AuthConfig, requestPrivilege apiclient.RequestPrivilegeFunc) error { responseBody, err := cli.imagePushPrivileged(authConfig, repoInfo.Name(), tag, requestPrivilege) if err != nil { return err } defer responseBody.Close() targets := []target{} handleTarget := func(aux *json.RawMessage) { var pushResult distribution.PushResult err := json.Unmarshal(*aux, &pushResult) if err == nil && pushResult.Tag != "" && pushResult.Digest.Validate() == nil { targets = append(targets, target{ reference: registry.ParseReference(pushResult.Tag), digest: pushResult.Digest, size: int64(pushResult.Size), }) } } err = jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, handleTarget) if err != nil { return err } if tag == "" { fmt.Fprintf(cli.out, "No tag specified, skipping trust metadata push\n") return nil } if len(targets) == 0 { fmt.Fprintf(cli.out, "No targets found, skipping trust metadata push\n") return nil } fmt.Fprintf(cli.out, "Signing and pushing trust metadata\n") repo, err := cli.getNotaryRepository(repoInfo, authConfig) if err != nil { fmt.Fprintf(cli.out, "Error establishing connection to notary repository: %s\n", err) return err } for _, target := range targets { h, err := hex.DecodeString(target.digest.Hex()) if err != nil { return err } t := &client.Target{ Name: target.reference.String(), Hashes: data.Hashes{ string(target.digest.Algorithm()): h, }, Length: int64(target.size), } if err := repo.AddTarget(t, releasesRole); err != nil { return err } } err = repo.Publish() if _, ok := err.(client.ErrRepoNotInitialized); !ok { return notaryError(repoInfo.FullName(), err) } keys := repo.CryptoService.ListKeys(data.CanonicalRootRole) var rootKeyID string // always select the first root key if len(keys) > 0 { sort.Strings(keys) rootKeyID = keys[0] } else { rootPublicKey, err := repo.CryptoService.Create(data.CanonicalRootRole, data.ECDSAKey) if err != nil { return err } rootKeyID = rootPublicKey.ID() } if err := repo.Initialize(rootKeyID); err != nil { return notaryError(repoInfo.FullName(), err) } fmt.Fprintf(cli.out, "Finished initializing %q\n", repoInfo.FullName()) return notaryError(repoInfo.FullName(), repo.Publish()) } docker-1.10.3/api/client/trust_test.go000066400000000000000000000032311267010174400176420ustar00rootroot00000000000000package client import ( "os" "testing" "github.com/docker/docker/registry" registrytypes "github.com/docker/engine-api/types/registry" ) func unsetENV() { os.Unsetenv("DOCKER_CONTENT_TRUST") os.Unsetenv("DOCKER_CONTENT_TRUST_SERVER") } func TestENVTrustServer(t *testing.T) { defer unsetENV() indexInfo := ®istrytypes.IndexInfo{Name: "testserver"} if err := os.Setenv("DOCKER_CONTENT_TRUST_SERVER", "https://notary-test.com:5000"); err != nil { t.Fatal("Failed to set ENV variable") } output, err := trustServer(indexInfo) expectedStr := "https://notary-test.com:5000" if err != nil || output != expectedStr { t.Fatalf("Expected server to be %s, got %s", expectedStr, output) } } func TestHTTPENVTrustServer(t *testing.T) { defer unsetENV() indexInfo := ®istrytypes.IndexInfo{Name: "testserver"} if err := os.Setenv("DOCKER_CONTENT_TRUST_SERVER", "http://notary-test.com:5000"); err != nil { t.Fatal("Failed to set ENV variable") } _, err := trustServer(indexInfo) if err == nil { t.Fatal("Expected error with invalid scheme") } } func TestOfficialTrustServer(t *testing.T) { indexInfo := ®istrytypes.IndexInfo{Name: "testserver", Official: true} output, err := trustServer(indexInfo) if err != nil || output != registry.NotaryServer { t.Fatalf("Expected server to be %s, got %s", registry.NotaryServer, output) } } func TestNonOfficialTrustServer(t *testing.T) { indexInfo := ®istrytypes.IndexInfo{Name: "testserver", Official: false} output, err := trustServer(indexInfo) expectedStr := "https://" + indexInfo.Name if err != nil || output != expectedStr { t.Fatalf("Expected server to be %s, got %s", expectedStr, output) } } docker-1.10.3/api/client/unpause.go000066400000000000000000000015101267010174400171000ustar00rootroot00000000000000package client import ( "fmt" "strings" Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" ) // CmdUnpause unpauses all processes within a container, for one or more containers. // // Usage: docker unpause CONTAINER [CONTAINER...] func (cli *DockerCli) CmdUnpause(args ...string) error { cmd := Cli.Subcmd("unpause", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["unpause"].Description, true) cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) var errs []string for _, name := range cmd.Args() { if err := cli.client.ContainerUnpause(name); err != nil { errs = append(errs, fmt.Sprintf("Failed to unpause container (%s): %s", name, err)) } else { fmt.Fprintf(cli.out, "%s\n", name) } } if len(errs) > 0 { return fmt.Errorf("%s", strings.Join(errs, "\n")) } return nil } docker-1.10.3/api/client/update.go000066400000000000000000000061521267010174400167110ustar00rootroot00000000000000package client import ( "fmt" "strings" Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/engine-api/types/container" "github.com/docker/go-units" ) // CmdUpdate updates resources of one or more containers. // // Usage: docker update [OPTIONS] CONTAINER [CONTAINER...] func (cli *DockerCli) CmdUpdate(args ...string) error { cmd := Cli.Subcmd("update", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["update"].Description, true) flBlkioWeight := cmd.Uint16([]string{"-blkio-weight"}, 0, "Block IO (relative weight), between 10 and 1000") flCPUPeriod := cmd.Int64([]string{"-cpu-period"}, 0, "Limit CPU CFS (Completely Fair Scheduler) period") flCPUQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit CPU CFS (Completely Fair Scheduler) quota") flCpusetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") flCpusetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") flCPUShares := cmd.Int64([]string{"#c", "-cpu-shares"}, 0, "CPU shares (relative weight)") flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit") flMemoryReservation := cmd.String([]string{"-memory-reservation"}, "", "Memory soft limit") flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") flKernelMemory := cmd.String([]string{"-kernel-memory"}, "", "Kernel memory limit") cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) if cmd.NFlag() == 0 { return fmt.Errorf("You must provide one or more flags when using this command.") } var err error var flMemory int64 if *flMemoryString != "" { flMemory, err = units.RAMInBytes(*flMemoryString) if err != nil { return err } } var memoryReservation int64 if *flMemoryReservation != "" { memoryReservation, err = units.RAMInBytes(*flMemoryReservation) if err != nil { return err } } var memorySwap int64 if *flMemorySwap != "" { if *flMemorySwap == "-1" { memorySwap = -1 } else { memorySwap, err = units.RAMInBytes(*flMemorySwap) if err != nil { return err } } } var kernelMemory int64 if *flKernelMemory != "" { kernelMemory, err = units.RAMInBytes(*flKernelMemory) if err != nil { return err } } resources := container.Resources{ BlkioWeight: *flBlkioWeight, CpusetCpus: *flCpusetCpus, CpusetMems: *flCpusetMems, CPUShares: *flCPUShares, Memory: flMemory, MemoryReservation: memoryReservation, MemorySwap: memorySwap, KernelMemory: kernelMemory, CPUPeriod: *flCPUPeriod, CPUQuota: *flCPUQuota, } updateConfig := container.UpdateConfig{ Resources: resources, } names := cmd.Args() var errs []string for _, name := range names { if err := cli.client.ContainerUpdate(name, updateConfig); err != nil { errs = append(errs, fmt.Sprintf("Failed to update container (%s): %s", name, err)) } else { fmt.Fprintf(cli.out, "%s\n", name) } } if len(errs) > 0 { return fmt.Errorf("%s", strings.Join(errs, "\n")) } return nil } docker-1.10.3/api/client/utils.go000066400000000000000000000122421267010174400165640ustar00rootroot00000000000000package client import ( "encoding/base64" "encoding/json" "fmt" "os" gosignal "os/signal" "runtime" "strings" "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/term" "github.com/docker/docker/registry" "github.com/docker/engine-api/client" "github.com/docker/engine-api/types" registrytypes "github.com/docker/engine-api/types/registry" ) func (cli *DockerCli) electAuthServer() string { // The daemon `/info` endpoint informs us of the default registry being // used. This is essential in cross-platforms environment, where for // example a Linux client might be interacting with a Windows daemon, hence // the default registry URL might be Windows specific. serverAddress := registry.IndexServer if info, err := cli.client.Info(); err != nil { fmt.Fprintf(cli.out, "Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s\n", err, serverAddress) } else { serverAddress = info.IndexServerAddress } return serverAddress } // encodeAuthToBase64 serializes the auth configuration as JSON base64 payload func encodeAuthToBase64(authConfig types.AuthConfig) (string, error) { buf, err := json.Marshal(authConfig) if err != nil { return "", err } return base64.URLEncoding.EncodeToString(buf), nil } func (cli *DockerCli) encodeRegistryAuth(index *registrytypes.IndexInfo) (string, error) { authConfig := registry.ResolveAuthConfig(cli.configFile.AuthConfigs, index) return encodeAuthToBase64(authConfig) } func (cli *DockerCli) registryAuthenticationPrivilegedFunc(index *registrytypes.IndexInfo, cmdName string) client.RequestPrivilegeFunc { return func() (string, error) { fmt.Fprintf(cli.out, "\nPlease login prior to %s:\n", cmdName) indexServer := registry.GetAuthConfigKey(index) authConfig, err := cli.configureAuth("", "", "", indexServer) if err != nil { return "", err } return encodeAuthToBase64(authConfig) } } func (cli *DockerCli) resizeTty(id string, isExec bool) { height, width := cli.getTtySize() if height == 0 && width == 0 { return } options := types.ResizeOptions{ ID: id, Height: height, Width: width, } var err error if isExec { err = cli.client.ContainerExecResize(options) } else { err = cli.client.ContainerResize(options) } if err != nil { logrus.Debugf("Error resize: %s", err) } } // getExitCode perform an inspect on the container. It returns // the running state and the exit code. func getExitCode(cli *DockerCli, containerID string) (bool, int, error) { c, err := cli.client.ContainerInspect(containerID) if err != nil { // If we can't connect, then the daemon probably died. if err != client.ErrConnectionFailed { return false, -1, err } return false, -1, nil } return c.State.Running, c.State.ExitCode, nil } // getExecExitCode perform an inspect on the exec command. It returns // the running state and the exit code. func getExecExitCode(cli *DockerCli, execID string) (bool, int, error) { resp, err := cli.client.ContainerExecInspect(execID) if err != nil { // If we can't connect, then the daemon probably died. if err != client.ErrConnectionFailed { return false, -1, err } return false, -1, nil } return resp.Running, resp.ExitCode, nil } func (cli *DockerCli) monitorTtySize(id string, isExec bool) error { cli.resizeTty(id, isExec) if runtime.GOOS == "windows" { go func() { prevH, prevW := cli.getTtySize() for { time.Sleep(time.Millisecond * 250) h, w := cli.getTtySize() if prevW != w || prevH != h { cli.resizeTty(id, isExec) } prevH = h prevW = w } }() } else { sigchan := make(chan os.Signal, 1) gosignal.Notify(sigchan, signal.SIGWINCH) go func() { for range sigchan { cli.resizeTty(id, isExec) } }() } return nil } func (cli *DockerCli) getTtySize() (int, int) { if !cli.isTerminalOut { return 0, 0 } ws, err := term.GetWinsize(cli.outFd) if err != nil { logrus.Debugf("Error getting size: %s", err) if ws == nil { return 0, 0 } } return int(ws.Height), int(ws.Width) } // resolveAuthConfig is like registry.ResolveAuthConfig, but if using the // default index, it uses the default index name for the daemon's platform, // not the client's platform. func (cli *DockerCli) resolveAuthConfig(authConfigs map[string]types.AuthConfig, index *registrytypes.IndexInfo) types.AuthConfig { configKey := index.Name if index.Official { configKey = cli.electAuthServer() } // First try the happy case if c, found := authConfigs[configKey]; found || index.Official { return c } convertToHostname := func(url string) string { stripped := url if strings.HasPrefix(url, "http://") { stripped = strings.Replace(url, "http://", "", 1) } else if strings.HasPrefix(url, "https://") { stripped = strings.Replace(url, "https://", "", 1) } nameParts := strings.SplitN(stripped, "/", 2) return nameParts[0] } // Maybe they have a legacy config file, we will iterate the keys converting // them to the new format and testing for registry, ac := range authConfigs { if configKey == convertToHostname(registry) { return ac } } // When all else fails, return an empty auth config return types.AuthConfig{} } docker-1.10.3/api/client/utils_unix.go000066400000000000000000000002421267010174400176240ustar00rootroot00000000000000// +build !windows package client import ( "path/filepath" ) func getContextRoot(srcPath string) (string, error) { return filepath.Join(srcPath, "."), nil } docker-1.10.3/api/client/utils_windows.go000066400000000000000000000004151267010174400203350ustar00rootroot00000000000000// +build windows package client import ( "path/filepath" "github.com/docker/docker/pkg/longpath" ) func getContextRoot(srcPath string) (string, error) { cr, err := filepath.Abs(srcPath) if err != nil { return "", err } return longpath.AddPrefix(cr), nil } docker-1.10.3/api/client/version.go000066400000000000000000000054161267010174400171160ustar00rootroot00000000000000package client import ( "runtime" "text/template" "time" Cli "github.com/docker/docker/cli" "github.com/docker/docker/dockerversion" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/utils" "github.com/docker/engine-api/types" ) var versionTemplate = `Client: Version: {{.Client.Version}} API version: {{.Client.APIVersion}} Go version: {{.Client.GoVersion}} Git commit: {{.Client.GitCommit}} Built: {{.Client.BuildTime}} OS/Arch: {{.Client.Os}}/{{.Client.Arch}}{{if .Client.Experimental}} Experimental: {{.Client.Experimental}}{{end}}{{if .ServerOK}} Server: Version: {{.Server.Version}} API version: {{.Server.APIVersion}} Go version: {{.Server.GoVersion}} Git commit: {{.Server.GitCommit}} Built: {{.Server.BuildTime}} OS/Arch: {{.Server.Os}}/{{.Server.Arch}}{{if .Server.Experimental}} Experimental: {{.Server.Experimental}}{{end}}{{end}}` // CmdVersion shows Docker version information. // // Available version information is shown for: client Docker version, client API version, client Go version, client Git commit, client OS/Arch, server Docker version, server API version, server Go version, server Git commit, and server OS/Arch. // // Usage: docker version func (cli *DockerCli) CmdVersion(args ...string) (err error) { cmd := Cli.Subcmd("version", nil, Cli.DockerCommands["version"].Description, true) tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template") cmd.Require(flag.Exact, 0) cmd.ParseFlags(args, true) templateFormat := versionTemplate if *tmplStr != "" { templateFormat = *tmplStr } var tmpl *template.Template if tmpl, err = template.New("").Funcs(funcMap).Parse(templateFormat); err != nil { return Cli.StatusError{StatusCode: 64, Status: "Template parsing error: " + err.Error()} } vd := types.VersionResponse{ Client: &types.Version{ Version: dockerversion.Version, APIVersion: cli.client.ClientVersion(), GoVersion: runtime.Version(), GitCommit: dockerversion.GitCommit, BuildTime: dockerversion.BuildTime, Os: runtime.GOOS, Arch: runtime.GOARCH, Experimental: utils.ExperimentalBuild(), }, } serverVersion, err := cli.client.ServerVersion() if err == nil { vd.Server = &serverVersion } // first we need to make BuildTime more human friendly t, errTime := time.Parse(time.RFC3339Nano, vd.Client.BuildTime) if errTime == nil { vd.Client.BuildTime = t.Format(time.ANSIC) } if vd.ServerOK() { t, errTime = time.Parse(time.RFC3339Nano, vd.Server.BuildTime) if errTime == nil { vd.Server.BuildTime = t.Format(time.ANSIC) } } if err2 := tmpl.Execute(cli.out, vd); err2 != nil && err == nil { err = err2 } cli.out.Write([]byte{'\n'}) return err } docker-1.10.3/api/client/volume.go000066400000000000000000000103201267010174400167260ustar00rootroot00000000000000package client import ( "fmt" "text/tabwriter" Cli "github.com/docker/docker/cli" "github.com/docker/docker/opts" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/filters" ) // CmdVolume is the parent subcommand for all volume commands // // Usage: docker volume func (cli *DockerCli) CmdVolume(args ...string) error { description := Cli.DockerCommands["volume"].Description + "\n\nCommands:\n" commands := [][]string{ {"create", "Create a volume"}, {"inspect", "Return low-level information on a volume"}, {"ls", "List volumes"}, {"rm", "Remove a volume"}, } for _, cmd := range commands { description += fmt.Sprintf(" %-25.25s%s\n", cmd[0], cmd[1]) } description += "\nRun 'docker volume COMMAND --help' for more information on a command" cmd := Cli.Subcmd("volume", []string{"[COMMAND]"}, description, false) cmd.Require(flag.Exact, 0) err := cmd.ParseFlags(args, true) cmd.Usage() return err } // CmdVolumeLs outputs a list of Docker volumes. // // Usage: docker volume ls [OPTIONS] func (cli *DockerCli) CmdVolumeLs(args ...string) error { cmd := Cli.Subcmd("volume ls", nil, "List volumes", true) quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display volume names") flFilter := opts.NewListOpts(nil) cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values (i.e. 'dangling=true')") cmd.Require(flag.Exact, 0) cmd.ParseFlags(args, true) volFilterArgs := filters.NewArgs() for _, f := range flFilter.GetAll() { var err error volFilterArgs, err = filters.ParseFlag(f, volFilterArgs) if err != nil { return err } } volumes, err := cli.client.VolumeList(volFilterArgs) if err != nil { return err } w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) if !*quiet { for _, warn := range volumes.Warnings { fmt.Fprintln(cli.err, warn) } fmt.Fprintf(w, "DRIVER \tVOLUME NAME") fmt.Fprintf(w, "\n") } for _, vol := range volumes.Volumes { if *quiet { fmt.Fprintln(w, vol.Name) continue } fmt.Fprintf(w, "%s\t%s\n", vol.Driver, vol.Name) } w.Flush() return nil } // CmdVolumeInspect displays low-level information on one or more volumes. // // Usage: docker volume inspect [OPTIONS] VOLUME [VOLUME...] func (cli *DockerCli) CmdVolumeInspect(args ...string) error { cmd := Cli.Subcmd("volume inspect", []string{"VOLUME [VOLUME...]"}, "Return low-level information on a volume", true) tmplStr := cmd.String([]string{"f", "-format"}, "", "Format the output using the given go template") cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) if err := cmd.Parse(args); err != nil { return nil } inspectSearcher := func(name string) (interface{}, []byte, error) { i, err := cli.client.VolumeInspect(name) return i, nil, err } return cli.inspectElements(*tmplStr, cmd.Args(), inspectSearcher) } // CmdVolumeCreate creates a new volume. // // Usage: docker volume create [OPTIONS] func (cli *DockerCli) CmdVolumeCreate(args ...string) error { cmd := Cli.Subcmd("volume create", nil, "Create a volume", true) flDriver := cmd.String([]string{"d", "-driver"}, "local", "Specify volume driver name") flName := cmd.String([]string{"-name"}, "", "Specify volume name") flDriverOpts := opts.NewMapOpts(nil, nil) cmd.Var(flDriverOpts, []string{"o", "-opt"}, "Set driver specific options") cmd.Require(flag.Exact, 0) cmd.ParseFlags(args, true) volReq := types.VolumeCreateRequest{ Driver: *flDriver, DriverOpts: flDriverOpts.GetAll(), Name: *flName, } vol, err := cli.client.VolumeCreate(volReq) if err != nil { return err } fmt.Fprintf(cli.out, "%s\n", vol.Name) return nil } // CmdVolumeRm removes one or more volumes. // // Usage: docker volume rm VOLUME [VOLUME...] func (cli *DockerCli) CmdVolumeRm(args ...string) error { cmd := Cli.Subcmd("volume rm", []string{"VOLUME [VOLUME...]"}, "Remove a volume", true) cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) var status = 0 for _, name := range cmd.Args() { if err := cli.client.VolumeRemove(name); err != nil { fmt.Fprintf(cli.err, "%s\n", err) status = 1 continue } fmt.Fprintf(cli.out, "%s\n", name) } if status != 0 { return Cli.StatusError{StatusCode: status} } return nil } docker-1.10.3/api/client/wait.go000066400000000000000000000016211267010174400163670ustar00rootroot00000000000000package client import ( "fmt" "strings" Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" ) // CmdWait blocks until a container stops, then prints its exit code. // // If more than one container is specified, this will wait synchronously on each container. // // Usage: docker wait CONTAINER [CONTAINER...] func (cli *DockerCli) CmdWait(args ...string) error { cmd := Cli.Subcmd("wait", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["wait"].Description, true) cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) var errs []string for _, name := range cmd.Args() { status, err := cli.client.ContainerWait(name) if err != nil { errs = append(errs, fmt.Sprintf("Failed to wait container (%s): %s", name, err)) } else { fmt.Fprintf(cli.out, "%d\n", status) } } if len(errs) > 0 { return fmt.Errorf("%s", strings.Join(errs, "\n")) } return nil } docker-1.10.3/api/common.go000066400000000000000000000102141267010174400154330ustar00rootroot00000000000000package api import ( "fmt" "mime" "path/filepath" "sort" "strconv" "strings" "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/version" "github.com/docker/engine-api/types" "github.com/docker/libtrust" ) // Common constants for daemon and client. const ( // Version of Current REST API DefaultVersion version.Version = "1.22" // MinVersion represents Minimum REST API version supported MinVersion version.Version = "1.12" // DefaultDockerfileName is the Default filename with Docker commands, read by docker build DefaultDockerfileName string = "Dockerfile" // NoBaseImageSpecifier is the symbol used by the FROM // command to specify that no base image is to be used. NoBaseImageSpecifier string = "scratch" ) // byPortInfo is a temporary type used to sort types.Port by its fields type byPortInfo []types.Port func (r byPortInfo) Len() int { return len(r) } func (r byPortInfo) Swap(i, j int) { r[i], r[j] = r[j], r[i] } func (r byPortInfo) Less(i, j int) bool { if r[i].PrivatePort != r[j].PrivatePort { return r[i].PrivatePort < r[j].PrivatePort } if r[i].IP != r[j].IP { return r[i].IP < r[j].IP } if r[i].PublicPort != r[j].PublicPort { return r[i].PublicPort < r[j].PublicPort } return r[i].Type < r[j].Type } // DisplayablePorts returns formatted string representing open ports of container // e.g. "0.0.0.0:80->9090/tcp, 9988/tcp" // it's used by command 'docker ps' func DisplayablePorts(ports []types.Port) string { type portGroup struct { first int last int } groupMap := make(map[string]*portGroup) var result []string var hostMappings []string var groupMapKeys []string sort.Sort(byPortInfo(ports)) for _, port := range ports { current := port.PrivatePort portKey := port.Type if port.IP != "" { if port.PublicPort != current { hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type)) continue } portKey = fmt.Sprintf("%s/%s", port.IP, port.Type) } group := groupMap[portKey] if group == nil { groupMap[portKey] = &portGroup{first: current, last: current} // record order that groupMap keys are created groupMapKeys = append(groupMapKeys, portKey) continue } if current == (group.last + 1) { group.last = current continue } result = append(result, formGroup(portKey, group.first, group.last)) groupMap[portKey] = &portGroup{first: current, last: current} } for _, portKey := range groupMapKeys { g := groupMap[portKey] result = append(result, formGroup(portKey, g.first, g.last)) } result = append(result, hostMappings...) return strings.Join(result, ", ") } func formGroup(key string, start, last int) string { parts := strings.Split(key, "/") groupType := parts[0] var ip string if len(parts) > 1 { ip = parts[0] groupType = parts[1] } group := strconv.Itoa(start) if start != last { group = fmt.Sprintf("%s-%d", group, last) } if ip != "" { group = fmt.Sprintf("%s:%s->%s", ip, group, group) } return fmt.Sprintf("%s/%s", group, groupType) } // MatchesContentType validates the content type against the expected one func MatchesContentType(contentType, expectedType string) bool { mimetype, _, err := mime.ParseMediaType(contentType) if err != nil { logrus.Errorf("Error parsing media type: %s error: %v", contentType, err) } return err == nil && mimetype == expectedType } // LoadOrCreateTrustKey attempts to load the libtrust key at the given path, // otherwise generates a new one func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700) if err != nil { return nil, err } trustKey, err := libtrust.LoadKeyFile(trustKeyPath) if err == libtrust.ErrKeyFileDoesNotExist { trustKey, err = libtrust.GenerateECP256PrivateKey() if err != nil { return nil, fmt.Errorf("Error generating key: %s", err) } if err := libtrust.SaveKey(trustKeyPath, trustKey); err != nil { return nil, fmt.Errorf("Error saving key file: %s", err) } } else if err != nil { return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err) } return trustKey, nil } docker-1.10.3/api/common_test.go000066400000000000000000000156301267010174400165010ustar00rootroot00000000000000package api import ( "io/ioutil" "path/filepath" "testing" "os" "github.com/docker/engine-api/types" ) type ports struct { ports []types.Port expected string } // DisplayablePorts func TestDisplayablePorts(t *testing.T) { cases := []ports{ { []types.Port{ { PrivatePort: 9988, Type: "tcp", }, }, "9988/tcp"}, { []types.Port{ { PrivatePort: 9988, Type: "udp", }, }, "9988/udp", }, { []types.Port{ { IP: "0.0.0.0", PrivatePort: 9988, Type: "tcp", }, }, "0.0.0.0:0->9988/tcp", }, { []types.Port{ { PrivatePort: 9988, PublicPort: 8899, Type: "tcp", }, }, "9988/tcp", }, { []types.Port{ { IP: "4.3.2.1", PrivatePort: 9988, PublicPort: 8899, Type: "tcp", }, }, "4.3.2.1:8899->9988/tcp", }, { []types.Port{ { IP: "4.3.2.1", PrivatePort: 9988, PublicPort: 9988, Type: "tcp", }, }, "4.3.2.1:9988->9988/tcp", }, { []types.Port{ { PrivatePort: 9988, Type: "udp", }, { PrivatePort: 9988, Type: "udp", }, }, "9988/udp, 9988/udp", }, { []types.Port{ { IP: "1.2.3.4", PublicPort: 9998, PrivatePort: 9998, Type: "udp", }, { IP: "1.2.3.4", PublicPort: 9999, PrivatePort: 9999, Type: "udp", }, }, "1.2.3.4:9998-9999->9998-9999/udp", }, { []types.Port{ { IP: "1.2.3.4", PublicPort: 8887, PrivatePort: 9998, Type: "udp", }, { IP: "1.2.3.4", PublicPort: 8888, PrivatePort: 9999, Type: "udp", }, }, "1.2.3.4:8887->9998/udp, 1.2.3.4:8888->9999/udp", }, { []types.Port{ { PrivatePort: 9998, Type: "udp", }, { PrivatePort: 9999, Type: "udp", }, }, "9998-9999/udp", }, { []types.Port{ { IP: "1.2.3.4", PrivatePort: 6677, PublicPort: 7766, Type: "tcp", }, { PrivatePort: 9988, PublicPort: 8899, Type: "udp", }, }, "9988/udp, 1.2.3.4:7766->6677/tcp", }, { []types.Port{ { IP: "1.2.3.4", PrivatePort: 9988, PublicPort: 8899, Type: "udp", }, { IP: "1.2.3.4", PrivatePort: 9988, PublicPort: 8899, Type: "tcp", }, { IP: "4.3.2.1", PrivatePort: 2233, PublicPort: 3322, Type: "tcp", }, }, "4.3.2.1:3322->2233/tcp, 1.2.3.4:8899->9988/tcp, 1.2.3.4:8899->9988/udp", }, { []types.Port{ { PrivatePort: 9988, PublicPort: 8899, Type: "udp", }, { IP: "1.2.3.4", PrivatePort: 6677, PublicPort: 7766, Type: "tcp", }, { IP: "4.3.2.1", PrivatePort: 2233, PublicPort: 3322, Type: "tcp", }, }, "9988/udp, 4.3.2.1:3322->2233/tcp, 1.2.3.4:7766->6677/tcp", }, { []types.Port{ { PrivatePort: 80, Type: "tcp", }, { PrivatePort: 1024, Type: "tcp", }, { PrivatePort: 80, Type: "udp", }, { PrivatePort: 1024, Type: "udp", }, { IP: "1.1.1.1", PublicPort: 80, PrivatePort: 1024, Type: "tcp", }, { IP: "1.1.1.1", PublicPort: 80, PrivatePort: 1024, Type: "udp", }, { IP: "1.1.1.1", PublicPort: 1024, PrivatePort: 80, Type: "tcp", }, { IP: "1.1.1.1", PublicPort: 1024, PrivatePort: 80, Type: "udp", }, { IP: "2.1.1.1", PublicPort: 80, PrivatePort: 1024, Type: "tcp", }, { IP: "2.1.1.1", PublicPort: 80, PrivatePort: 1024, Type: "udp", }, { IP: "2.1.1.1", PublicPort: 1024, PrivatePort: 80, Type: "tcp", }, { IP: "2.1.1.1", PublicPort: 1024, PrivatePort: 80, Type: "udp", }, }, "80/tcp, 80/udp, 1024/tcp, 1024/udp, 1.1.1.1:1024->80/tcp, 1.1.1.1:1024->80/udp, 2.1.1.1:1024->80/tcp, 2.1.1.1:1024->80/udp, 1.1.1.1:80->1024/tcp, 1.1.1.1:80->1024/udp, 2.1.1.1:80->1024/tcp, 2.1.1.1:80->1024/udp", }, } for _, port := range cases { actual := DisplayablePorts(port.ports) if port.expected != actual { t.Fatalf("Expected %s, got %s.", port.expected, actual) } } } // MatchesContentType func TestJsonContentType(t *testing.T) { if !MatchesContentType("application/json", "application/json") { t.Fail() } if !MatchesContentType("application/json; charset=utf-8", "application/json") { t.Fail() } if MatchesContentType("dockerapplication/json", "application/json") { t.Fail() } } // LoadOrCreateTrustKey func TestLoadOrCreateTrustKeyInvalidKeyFile(t *testing.T) { tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpKeyFolderPath) tmpKeyFile, err := ioutil.TempFile(tmpKeyFolderPath, "keyfile") if err != nil { t.Fatal(err) } if _, err := LoadOrCreateTrustKey(tmpKeyFile.Name()); err == nil { t.Fatalf("expected an error, got nothing.") } } func TestLoadOrCreateTrustKeyCreateKey(t *testing.T) { tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpKeyFolderPath) // Without the need to create the folder hierarchy tmpKeyFile := filepath.Join(tmpKeyFolderPath, "keyfile") if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { t.Fatalf("expected a new key file, got : %v and %v", err, key) } if _, err := os.Stat(tmpKeyFile); err != nil { t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err) } // With the need to create the folder hierarchy as tmpKeyFie is in a path // where some folder do not exists. tmpKeyFile = filepath.Join(tmpKeyFolderPath, "folder/hierarchy/keyfile") if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { t.Fatalf("expected a new key file, got : %v and %v", err, key) } if _, err := os.Stat(tmpKeyFile); err != nil { t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err) } // With no path at all defer os.Remove("keyfile") if key, err := LoadOrCreateTrustKey("keyfile"); err != nil || key == nil { t.Fatalf("expected a new key file, got : %v and %v", err, key) } if _, err := os.Stat("keyfile"); err != nil { t.Fatalf("Expected to find a file keyfile, got %v", err) } } func TestLoadOrCreateTrustKeyLoadValidKey(t *testing.T) { tmpKeyFile := filepath.Join("fixtures", "keyfile") if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { t.Fatalf("expected a key file, got : %v and %v", err, key) } } docker-1.10.3/api/fixtures/000077500000000000000000000000001267010174400154675ustar00rootroot00000000000000docker-1.10.3/api/fixtures/keyfile000066400000000000000000000004471267010174400170470ustar00rootroot00000000000000-----BEGIN EC PRIVATE KEY----- keyID: AWX2:I27X:WQFX:IOMK:CNAK:O7PW:VYNB:ZLKC:CVAE:YJP2:SI4A:XXAY MHcCAQEEILHTRWdcpKWsnORxSFyBnndJ4ROU41hMtr/GCiLVvwBQoAoGCCqGSM49 AwEHoUQDQgAElpVFbQ2V2UQKajqdE3fVxJ+/pE/YuEFOxWbOxF2be19BY209/iky NzeFFK7SLpQ4CBJ7zDVXOHsMzrkY/GquGA== -----END EC PRIVATE KEY----- docker-1.10.3/api/server/000077500000000000000000000000001267010174400151245ustar00rootroot00000000000000docker-1.10.3/api/server/httputils/000077500000000000000000000000001267010174400171645ustar00rootroot00000000000000docker-1.10.3/api/server/httputils/form.go000066400000000000000000000037641267010174400204700ustar00rootroot00000000000000package httputils import ( "fmt" "net/http" "path/filepath" "strconv" "strings" ) // BoolValue transforms a form value in different formats into a boolean type. func BoolValue(r *http.Request, k string) bool { s := strings.ToLower(strings.TrimSpace(r.FormValue(k))) return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none") } // BoolValueOrDefault returns the default bool passed if the query param is // missing, otherwise it's just a proxy to boolValue above func BoolValueOrDefault(r *http.Request, k string, d bool) bool { if _, ok := r.Form[k]; !ok { return d } return BoolValue(r, k) } // Int64ValueOrZero parses a form value into an int64 type. // It returns 0 if the parsing fails. func Int64ValueOrZero(r *http.Request, k string) int64 { val, err := Int64ValueOrDefault(r, k, 0) if err != nil { return 0 } return val } // Int64ValueOrDefault parses a form value into an int64 type. If there is an // error, returns the error. If there is no value returns the default value. func Int64ValueOrDefault(r *http.Request, field string, def int64) (int64, error) { if r.Form.Get(field) != "" { value, err := strconv.ParseInt(r.Form.Get(field), 10, 64) if err != nil { return value, err } return value, nil } return def, nil } // ArchiveOptions stores archive information for different operations. type ArchiveOptions struct { Name string Path string } // ArchiveFormValues parses form values and turns them into ArchiveOptions. // It fails if the archive name and path are not in the request. func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions, error) { if err := ParseForm(r); err != nil { return ArchiveOptions{}, err } name := vars["name"] path := filepath.FromSlash(r.Form.Get("path")) switch { case name == "": return ArchiveOptions{}, fmt.Errorf("bad parameter: 'name' cannot be empty") case path == "": return ArchiveOptions{}, fmt.Errorf("bad parameter: 'path' cannot be empty") } return ArchiveOptions{name, path}, nil } docker-1.10.3/api/server/httputils/form_test.go000066400000000000000000000037021267010174400215170ustar00rootroot00000000000000package httputils import ( "net/http" "net/url" "testing" ) func TestBoolValue(t *testing.T) { cases := map[string]bool{ "": false, "0": false, "no": false, "false": false, "none": false, "1": true, "yes": true, "true": true, "one": true, "100": true, } for c, e := range cases { v := url.Values{} v.Set("test", c) r, _ := http.NewRequest("POST", "", nil) r.Form = v a := BoolValue(r, "test") if a != e { t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) } } } func TestBoolValueOrDefault(t *testing.T) { r, _ := http.NewRequest("GET", "", nil) if !BoolValueOrDefault(r, "queryparam", true) { t.Fatal("Expected to get true default value, got false") } v := url.Values{} v.Set("param", "") r, _ = http.NewRequest("GET", "", nil) r.Form = v if BoolValueOrDefault(r, "param", true) { t.Fatal("Expected not to get true") } } func TestInt64ValueOrZero(t *testing.T) { cases := map[string]int64{ "": 0, "asdf": 0, "0": 0, "1": 1, } for c, e := range cases { v := url.Values{} v.Set("test", c) r, _ := http.NewRequest("POST", "", nil) r.Form = v a := Int64ValueOrZero(r, "test") if a != e { t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) } } } func TestInt64ValueOrDefault(t *testing.T) { cases := map[string]int64{ "": -1, "-1": -1, "42": 42, } for c, e := range cases { v := url.Values{} v.Set("test", c) r, _ := http.NewRequest("POST", "", nil) r.Form = v a, err := Int64ValueOrDefault(r, "test", -1) if a != e { t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) } if err != nil { t.Fatalf("Error should be nil, but received: %s", err) } } } func TestInt64ValueOrDefaultWithError(t *testing.T) { v := url.Values{} v.Set("test", "invalid") r, _ := http.NewRequest("POST", "", nil) r.Form = v _, err := Int64ValueOrDefault(r, "test", -1) if err == nil { t.Fatalf("Expected an error.") } } docker-1.10.3/api/server/httputils/httputils.go000066400000000000000000000131531267010174400215560ustar00rootroot00000000000000package httputils import ( "encoding/json" "fmt" "io" "net/http" "strings" "golang.org/x/net/context" "github.com/Sirupsen/logrus" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/docker/api" "github.com/docker/docker/pkg/version" ) // APIVersionKey is the client's requested API version. const APIVersionKey = "api-version" // APIFunc is an adapter to allow the use of ordinary functions as Docker API endpoints. // Any function that has the appropriate signature can be register as a API endpoint (e.g. getVersion). type APIFunc func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error // HijackConnection interrupts the http response writer to get the // underlying connection and operate with it. func HijackConnection(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { conn, _, err := w.(http.Hijacker).Hijack() if err != nil { return nil, nil, err } // Flush the options to make sure the client sets the raw mode conn.Write([]byte{}) return conn, conn, nil } // CloseStreams ensures that a list for http streams are properly closed. func CloseStreams(streams ...interface{}) { for _, stream := range streams { if tcpc, ok := stream.(interface { CloseWrite() error }); ok { tcpc.CloseWrite() } else if closer, ok := stream.(io.Closer); ok { closer.Close() } } } // CheckForJSON makes sure that the request's Content-Type is application/json. func CheckForJSON(r *http.Request) error { ct := r.Header.Get("Content-Type") // No Content-Type header is ok as long as there's no Body if ct == "" { if r.Body == nil || r.ContentLength == 0 { return nil } } // Otherwise it better be json if api.MatchesContentType(ct, "application/json") { return nil } return fmt.Errorf("Content-Type specified (%s) must be 'application/json'", ct) } // ParseForm ensures the request form is parsed even with invalid content types. // If we don't do this, POST method without Content-type (even with empty body) will fail. func ParseForm(r *http.Request) error { if r == nil { return nil } if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") { return err } return nil } // ParseMultipartForm ensure the request form is parsed, even with invalid content types. func ParseMultipartForm(r *http.Request) error { if err := r.ParseMultipartForm(4096); err != nil && !strings.HasPrefix(err.Error(), "mime:") { return err } return nil } // WriteError decodes a specific docker error and sends it in the response. func WriteError(w http.ResponseWriter, err error) { if err == nil || w == nil { logrus.WithFields(logrus.Fields{"error": err, "writer": w}).Error("unexpected HTTP error handling") return } statusCode := http.StatusInternalServerError errMsg := err.Error() // Based on the type of error we get we need to process things // slightly differently to extract the error message. // In the 'errcode.*' cases there are two different type of // error that could be returned. errocode.ErrorCode is the base // type of error object - it is just an 'int' that can then be // used as the look-up key to find the message. errorcode.Error // extends errorcode.Error by adding error-instance specific // data, like 'details' or variable strings to be inserted into // the message. // // Ideally, we should just be able to call err.Error() for all // cases but the errcode package doesn't support that yet. // // Additionally, in both errcode cases, there might be an http // status code associated with it, and if so use it. switch err.(type) { case errcode.ErrorCode: daError, _ := err.(errcode.ErrorCode) statusCode = daError.Descriptor().HTTPStatusCode errMsg = daError.Message() case errcode.Error: // For reference, if you're looking for a particular error // then you can do something like : // import ( derr "github.com/docker/docker/errors" ) // if daError.ErrorCode() == derr.ErrorCodeNoSuchContainer { ... } daError, _ := err.(errcode.Error) statusCode = daError.ErrorCode().Descriptor().HTTPStatusCode errMsg = daError.Message default: // This part of will be removed once we've // converted everything over to use the errcode package // FIXME: this is brittle and should not be necessary. // If we need to differentiate between different possible error types, // we should create appropriate error types with clearly defined meaning errStr := strings.ToLower(err.Error()) for keyword, status := range map[string]int{ "not found": http.StatusNotFound, "no such": http.StatusNotFound, "bad parameter": http.StatusBadRequest, "conflict": http.StatusConflict, "impossible": http.StatusNotAcceptable, "wrong login/password": http.StatusUnauthorized, "hasn't been activated": http.StatusForbidden, } { if strings.Contains(errStr, keyword) { statusCode = status break } } } if statusCode == 0 { statusCode = http.StatusInternalServerError } http.Error(w, errMsg, statusCode) } // WriteJSON writes the value v to the http response stream as json with standard json encoding. func WriteJSON(w http.ResponseWriter, code int, v interface{}) error { w.Header().Set("Content-Type", "application/json") w.WriteHeader(code) return json.NewEncoder(w).Encode(v) } // VersionFromContext returns an API version from the context using APIVersionKey. // It panics if the context value does not have version.Version type. func VersionFromContext(ctx context.Context) (ver version.Version) { if ctx == nil { return } val := ctx.Value(APIVersionKey) if val == nil { return } return val.(version.Version) } docker-1.10.3/api/server/middleware.go000066400000000000000000000147101267010174400175730ustar00rootroot00000000000000package server import ( "bufio" "encoding/json" "io" "net/http" "runtime" "strings" "github.com/Sirupsen/logrus" "github.com/docker/docker/api" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/dockerversion" "github.com/docker/docker/errors" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/version" "golang.org/x/net/context" ) // middleware is an adapter to allow the use of ordinary functions as Docker API filters. // Any function that has the appropriate signature can be register as a middleware. type middleware func(handler httputils.APIFunc) httputils.APIFunc // debugRequestMiddleware dumps the request to logger func debugRequestMiddleware(handler httputils.APIFunc) httputils.APIFunc { return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { logrus.Debugf("%s %s", r.Method, r.RequestURI) if r.Method != "POST" { return handler(ctx, w, r, vars) } if err := httputils.CheckForJSON(r); err != nil { return handler(ctx, w, r, vars) } maxBodySize := 4096 // 4KB if r.ContentLength > int64(maxBodySize) { return handler(ctx, w, r, vars) } body := r.Body bufReader := bufio.NewReaderSize(body, maxBodySize) r.Body = ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() }) b, err := bufReader.Peek(maxBodySize) if err != io.EOF { // either there was an error reading, or the buffer is full (in which case the request is too large) return handler(ctx, w, r, vars) } var postForm map[string]interface{} if err := json.Unmarshal(b, &postForm); err == nil { if _, exists := postForm["password"]; exists { postForm["password"] = "*****" } formStr, errMarshal := json.Marshal(postForm) if errMarshal == nil { logrus.Debugf("form data: %s", string(formStr)) } else { logrus.Debugf("form data: %q", postForm) } } return handler(ctx, w, r, vars) } } // authorizationMiddleware perform authorization on the request. func (s *Server) authorizationMiddleware(handler httputils.APIFunc) httputils.APIFunc { return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { // FIXME: fill when authN gets in // User and UserAuthNMethod are taken from AuthN plugins // Currently tracked in https://github.com/docker/docker/pull/13994 user := "" userAuthNMethod := "" authCtx := authorization.NewCtx(s.authZPlugins, user, userAuthNMethod, r.Method, r.RequestURI) if err := authCtx.AuthZRequest(w, r); err != nil { logrus.Errorf("AuthZRequest for %s %s returned error: %s", r.Method, r.RequestURI, err) return err } rw := authorization.NewResponseModifier(w) if err := handler(ctx, rw, r, vars); err != nil { logrus.Errorf("Handler for %s %s returned error: %s", r.Method, r.RequestURI, err) return err } if err := authCtx.AuthZResponse(rw, r); err != nil { logrus.Errorf("AuthZResponse for %s %s returned error: %s", r.Method, r.RequestURI, err) return err } return nil } } // userAgentMiddleware checks the User-Agent header looking for a valid docker client spec. func (s *Server) userAgentMiddleware(handler httputils.APIFunc) httputils.APIFunc { return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { dockerVersion := version.Version(s.cfg.Version) userAgent := strings.Split(r.Header.Get("User-Agent"), "/") // v1.20 onwards includes the GOOS of the client after the version // such as Docker/1.7.0 (linux) if len(userAgent) == 2 && strings.Contains(userAgent[1], " ") { userAgent[1] = strings.Split(userAgent[1], " ")[0] } if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) { logrus.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion) } } return handler(ctx, w, r, vars) } } // corsMiddleware sets the CORS header expectations in the server. func (s *Server) corsMiddleware(handler httputils.APIFunc) httputils.APIFunc { return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { // If "api-cors-header" is not given, but "api-enable-cors" is true, we set cors to "*" // otherwise, all head values will be passed to HTTP handler corsHeaders := s.cfg.CorsHeaders if corsHeaders == "" && s.cfg.EnableCors { corsHeaders = "*" } if corsHeaders != "" { writeCorsHeaders(w, r, corsHeaders) } return handler(ctx, w, r, vars) } } // versionMiddleware checks the api version requirements before passing the request to the server handler. func versionMiddleware(handler httputils.APIFunc) httputils.APIFunc { return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { apiVersion := version.Version(vars["version"]) if apiVersion == "" { apiVersion = api.DefaultVersion } if apiVersion.GreaterThan(api.DefaultVersion) { return errors.ErrorCodeNewerClientVersion.WithArgs(apiVersion, api.DefaultVersion) } if apiVersion.LessThan(api.MinVersion) { return errors.ErrorCodeOldClientVersion.WithArgs(apiVersion, api.MinVersion) } w.Header().Set("Server", "Docker/"+dockerversion.Version+" ("+runtime.GOOS+")") ctx = context.WithValue(ctx, httputils.APIVersionKey, apiVersion) return handler(ctx, w, r, vars) } } // handleWithGlobalMiddlwares wraps the handler function for a request with // the server's global middlewares. The order of the middlewares is backwards, // meaning that the first in the list will be evaluated last. // // Example: handleWithGlobalMiddlewares(s.getContainersName) // // s.loggingMiddleware( // s.userAgentMiddleware( // s.corsMiddleware( // versionMiddleware(s.getContainersName) // ) // ) // ) // ) func (s *Server) handleWithGlobalMiddlewares(handler httputils.APIFunc) httputils.APIFunc { middlewares := []middleware{ versionMiddleware, s.corsMiddleware, s.userAgentMiddleware, } // Only want this on debug level if s.cfg.Logging && logrus.GetLevel() == logrus.DebugLevel { middlewares = append(middlewares, debugRequestMiddleware) } if len(s.cfg.AuthorizationPluginNames) > 0 { s.authZPlugins = authorization.NewPlugins(s.cfg.AuthorizationPluginNames) middlewares = append(middlewares, s.authorizationMiddleware) } h := handler for _, m := range middlewares { h = m(h) } return h } docker-1.10.3/api/server/middleware_test.go000066400000000000000000000031711267010174400206310ustar00rootroot00000000000000package server import ( "net/http" "net/http/httptest" "testing" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/errors" "golang.org/x/net/context" ) func TestVersionMiddleware(t *testing.T) { handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if httputils.VersionFromContext(ctx) == "" { t.Fatalf("Expected version, got empty string") } return nil } h := versionMiddleware(handler) req, _ := http.NewRequest("GET", "/containers/json", nil) resp := httptest.NewRecorder() ctx := context.Background() if err := h(ctx, resp, req, map[string]string{}); err != nil { t.Fatal(err) } } func TestVersionMiddlewareWithErrors(t *testing.T) { handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if httputils.VersionFromContext(ctx) == "" { t.Fatalf("Expected version, got empty string") } return nil } h := versionMiddleware(handler) req, _ := http.NewRequest("GET", "/containers/json", nil) resp := httptest.NewRecorder() ctx := context.Background() vars := map[string]string{"version": "0.1"} err := h(ctx, resp, req, vars) if derr, ok := err.(errcode.Error); !ok || derr.ErrorCode() != errors.ErrorCodeOldClientVersion { t.Fatalf("Expected ErrorCodeOldClientVersion, got %v", err) } vars["version"] = "100000" err = h(ctx, resp, req, vars) if derr, ok := err.(errcode.Error); !ok || derr.ErrorCode() != errors.ErrorCodeNewerClientVersion { t.Fatalf("Expected ErrorCodeNewerClientVersion, got %v", err) } } docker-1.10.3/api/server/profiler.go000066400000000000000000000020621267010174400172750ustar00rootroot00000000000000package server import ( "expvar" "fmt" "net/http" "net/http/pprof" "github.com/gorilla/mux" ) func profilerSetup(mainRouter *mux.Router, path string) { var r = mainRouter.PathPrefix(path).Subrouter() r.HandleFunc("/vars", expVars) r.HandleFunc("/pprof/", pprof.Index) r.HandleFunc("/pprof/cmdline", pprof.Cmdline) r.HandleFunc("/pprof/profile", pprof.Profile) r.HandleFunc("/pprof/symbol", pprof.Symbol) r.HandleFunc("/pprof/block", pprof.Handler("block").ServeHTTP) r.HandleFunc("/pprof/heap", pprof.Handler("heap").ServeHTTP) r.HandleFunc("/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP) r.HandleFunc("/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP) } // Replicated from expvar.go as not public. func expVars(w http.ResponseWriter, r *http.Request) { first := true w.Header().Set("Content-Type", "application/json; charset=utf-8") fmt.Fprintf(w, "{\n") expvar.Do(func(kv expvar.KeyValue) { if !first { fmt.Fprintf(w, ",\n") } first = false fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) }) fmt.Fprintf(w, "\n}\n") } docker-1.10.3/api/server/router/000077500000000000000000000000001267010174400164445ustar00rootroot00000000000000docker-1.10.3/api/server/router/build/000077500000000000000000000000001267010174400175435ustar00rootroot00000000000000docker-1.10.3/api/server/router/build/backend.go000066400000000000000000000006271267010174400214660ustar00rootroot00000000000000package build // Backend abstracts an image builder whose only purpose is to build an image referenced by an imageID. type Backend interface { // Build builds a Docker image referenced by an imageID string. // // Note: Tagging an image should not be done by a Builder, it should instead be done // by the caller. // // TODO: make this return a reference instead of string Build() (imageID string) } docker-1.10.3/api/server/router/build/build.go000066400000000000000000000013001267010174400211630ustar00rootroot00000000000000package build import ( "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/local" "github.com/docker/docker/daemon" ) // buildRouter is a router to talk with the build controller type buildRouter struct { backend *daemon.Daemon routes []router.Route } // NewRouter initializes a new build router func NewRouter(b *daemon.Daemon) router.Router { r := &buildRouter{ backend: b, } r.initRoutes() return r } // Routes returns the available routers to the build controller func (r *buildRouter) Routes() []router.Route { return r.routes } func (r *buildRouter) initRoutes() { r.routes = []router.Route{ local.NewPostRoute("/build", r.postBuild), } } docker-1.10.3/api/server/router/build/build_routes.go000066400000000000000000000177411267010174400226040ustar00rootroot00000000000000package build import ( "bytes" "encoding/base64" "encoding/json" "errors" "fmt" "io" "net/http" "strconv" "strings" "github.com/Sirupsen/logrus" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/builder" "github.com/docker/docker/builder/dockerfile" "github.com/docker/docker/daemon/daemonbuilder" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/streamformatter" "github.com/docker/docker/reference" "github.com/docker/docker/utils" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/container" "github.com/docker/go-units" "golang.org/x/net/context" ) // sanitizeRepoAndTags parses the raw "t" parameter received from the client // to a slice of repoAndTag. // It also validates each repoName and tag. func sanitizeRepoAndTags(names []string) ([]reference.Named, error) { var ( repoAndTags []reference.Named // This map is used for deduplicating the "-t" parameter. uniqNames = make(map[string]struct{}) ) for _, repo := range names { if repo == "" { continue } ref, err := reference.ParseNamed(repo) if err != nil { return nil, err } ref = reference.WithDefaultTag(ref) if _, isCanonical := ref.(reference.Canonical); isCanonical { return nil, errors.New("build tag cannot contain a digest") } if _, isTagged := ref.(reference.NamedTagged); !isTagged { ref, err = reference.WithTag(ref, reference.DefaultTag) } nameWithTag := ref.String() if _, exists := uniqNames[nameWithTag]; !exists { uniqNames[nameWithTag] = struct{}{} repoAndTags = append(repoAndTags, ref) } } return repoAndTags, nil } func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBuildOptions, error) { version := httputils.VersionFromContext(ctx) options := &types.ImageBuildOptions{} if httputils.BoolValue(r, "forcerm") && version.GreaterThanOrEqualTo("1.12") { options.Remove = true } else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") { options.Remove = true } else { options.Remove = httputils.BoolValue(r, "rm") } if httputils.BoolValue(r, "pull") && version.GreaterThanOrEqualTo("1.16") { options.PullParent = true } options.Dockerfile = r.FormValue("dockerfile") options.SuppressOutput = httputils.BoolValue(r, "q") options.NoCache = httputils.BoolValue(r, "nocache") options.ForceRemove = httputils.BoolValue(r, "forcerm") options.MemorySwap = httputils.Int64ValueOrZero(r, "memswap") options.Memory = httputils.Int64ValueOrZero(r, "memory") options.CPUShares = httputils.Int64ValueOrZero(r, "cpushares") options.CPUPeriod = httputils.Int64ValueOrZero(r, "cpuperiod") options.CPUQuota = httputils.Int64ValueOrZero(r, "cpuquota") options.CPUSetCPUs = r.FormValue("cpusetcpus") options.CPUSetMems = r.FormValue("cpusetmems") options.CgroupParent = r.FormValue("cgroupparent") if r.Form.Get("shmsize") != "" { shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64) if err != nil { return nil, err } options.ShmSize = shmSize } if i := container.IsolationLevel(r.FormValue("isolation")); i != "" { if !container.IsolationLevel.IsValid(i) { return nil, fmt.Errorf("Unsupported isolation: %q", i) } options.IsolationLevel = i } var buildUlimits = []*units.Ulimit{} ulimitsJSON := r.FormValue("ulimits") if ulimitsJSON != "" { if err := json.NewDecoder(strings.NewReader(ulimitsJSON)).Decode(&buildUlimits); err != nil { return nil, err } options.Ulimits = buildUlimits } var buildArgs = map[string]string{} buildArgsJSON := r.FormValue("buildargs") if buildArgsJSON != "" { if err := json.NewDecoder(strings.NewReader(buildArgsJSON)).Decode(&buildArgs); err != nil { return nil, err } options.BuildArgs = buildArgs } return options, nil } func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { var ( authConfigs = map[string]types.AuthConfig{} authConfigsEncoded = r.Header.Get("X-Registry-Config") notVerboseBuffer = bytes.NewBuffer(nil) ) if authConfigsEncoded != "" { authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded)) if err := json.NewDecoder(authConfigsJSON).Decode(&authConfigs); err != nil { // for a pull it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting // to be empty. } } w.Header().Set("Content-Type", "application/json") output := ioutils.NewWriteFlusher(w) defer output.Close() sf := streamformatter.NewJSONStreamFormatter() errf := func(err error) error { if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 { output.Write(notVerboseBuffer.Bytes()) } // Do not write the error in the http output if it's still empty. // This prevents from writing a 200(OK) when there is an internal error. if !output.Flushed() { return err } _, err = w.Write(sf.FormatError(errors.New(utils.GetErrorMessage(err)))) if err != nil { logrus.Warnf("could not write error response: %v", err) } return nil } buildOptions, err := newImageBuildOptions(ctx, r) if err != nil { return errf(err) } repoAndTags, err := sanitizeRepoAndTags(r.Form["t"]) if err != nil { return errf(err) } remoteURL := r.FormValue("remote") // Currently, only used if context is from a remote url. // Look at code in DetectContextFromRemoteURL for more information. createProgressReader := func(in io.ReadCloser) io.ReadCloser { progressOutput := sf.NewProgressOutput(output, true) if buildOptions.SuppressOutput { progressOutput = sf.NewProgressOutput(notVerboseBuffer, true) } return progress.NewProgressReader(in, progressOutput, r.ContentLength, "Downloading context", remoteURL) } var ( context builder.ModifiableContext dockerfileName string ) context, dockerfileName, err = daemonbuilder.DetectContextFromRemoteURL(r.Body, remoteURL, createProgressReader) if err != nil { return errf(err) } defer func() { if err := context.Close(); err != nil { logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err) } }() if len(dockerfileName) > 0 { buildOptions.Dockerfile = dockerfileName } uidMaps, gidMaps := br.backend.GetUIDGIDMaps() defaultArchiver := &archive.Archiver{ Untar: chrootarchive.Untar, UIDMaps: uidMaps, GIDMaps: gidMaps, } docker := &daemonbuilder.Docker{ Daemon: br.backend, OutOld: output, AuthConfigs: authConfigs, Archiver: defaultArchiver, } if buildOptions.SuppressOutput { docker.OutOld = notVerboseBuffer } b, err := dockerfile.NewBuilder( buildOptions, // result of newBuildConfig docker, builder.DockerIgnoreContext{ModifiableContext: context}, nil) if err != nil { return errf(err) } b.Stdout = &streamformatter.StdoutFormatter{Writer: output, StreamFormatter: sf} b.Stderr = &streamformatter.StderrFormatter{Writer: output, StreamFormatter: sf} if buildOptions.SuppressOutput { b.Stdout = &streamformatter.StdoutFormatter{Writer: notVerboseBuffer, StreamFormatter: sf} b.Stderr = &streamformatter.StderrFormatter{Writer: notVerboseBuffer, StreamFormatter: sf} } if closeNotifier, ok := w.(http.CloseNotifier); ok { finished := make(chan struct{}) defer close(finished) clientGone := closeNotifier.CloseNotify() go func() { select { case <-finished: case <-clientGone: logrus.Infof("Client disconnected, cancelling job: build") b.Cancel() } }() } imgID, err := b.Build() if err != nil { return errf(err) } for _, rt := range repoAndTags { if err := br.backend.TagImage(rt, imgID); err != nil { return errf(err) } } // Everything worked so if -q was provided the output from the daemon // should be just the image ID and we'll print that to stdout. if buildOptions.SuppressOutput { stdout := &streamformatter.StdoutFormatter{Writer: output, StreamFormatter: sf} fmt.Fprintf(stdout, "%s\n", string(imgID)) } return nil } docker-1.10.3/api/server/router/container/000077500000000000000000000000001267010174400204265ustar00rootroot00000000000000docker-1.10.3/api/server/router/container/backend.go000066400000000000000000000061231267010174400223460ustar00rootroot00000000000000package container import ( "io" "time" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/exec" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/version" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/container" ) // execBackend includes functions to implement to provide exec functionality. type execBackend interface { ContainerExecCreate(config *types.ExecConfig) (string, error) ContainerExecInspect(id string) (*exec.Config, error) ContainerExecResize(name string, height, width int) error ContainerExecStart(name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error ExecExists(name string) (bool, error) } // copyBackend includes functions to implement to provide container copy functionality. type copyBackend interface { ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) ContainerCopy(name string, res string) (io.ReadCloser, error) ContainerExport(name string, out io.Writer) error ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) } // stateBackend includes functions to implement to provide container state lifecycle functionality. type stateBackend interface { ContainerCreate(types.ContainerCreateConfig) (types.ContainerCreateResponse, error) ContainerKill(name string, sig uint64) error ContainerPause(name string) error ContainerRename(oldName, newName string) error ContainerResize(name string, height, width int) error ContainerRestart(name string, seconds int) error ContainerRm(name string, config *types.ContainerRmConfig) error ContainerStart(name string, hostConfig *container.HostConfig) error ContainerStop(name string, seconds int) error ContainerUnpause(name string) error ContainerUpdate(name string, hostConfig *container.HostConfig) ([]string, error) ContainerWait(name string, timeout time.Duration) (int, error) Exists(id string) bool } // monitorBackend includes functions to implement to provide containers monitoring functionality. type monitorBackend interface { ContainerChanges(name string) ([]archive.Change, error) ContainerInspect(name string, size bool, version version.Version) (interface{}, error) ContainerLogs(name string, config *daemon.ContainerLogsConfig) error ContainerStats(name string, config *daemon.ContainerStatsConfig) error ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) Containers(config *daemon.ContainersConfig) ([]*types.Container, error) } // attachBackend includes function to implement to provide container attaching functionality. type attachBackend interface { ContainerAttachWithLogs(name string, c *daemon.ContainerAttachWithLogsConfig) error ContainerWsAttachWithLogs(name string, c *daemon.ContainerWsAttachWithLogsConfig) error } // Backend is all the methods that need to be implemented to provide container specific functionality. type Backend interface { execBackend copyBackend stateBackend monitorBackend attachBackend } docker-1.10.3/api/server/router/container/container.go000066400000000000000000000055411267010174400227440ustar00rootroot00000000000000package container import ( "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/local" ) // containerRouter is a router to talk with the container controller type containerRouter struct { backend Backend routes []router.Route } // NewRouter initializes a new container router func NewRouter(b Backend) router.Router { r := &containerRouter{ backend: b, } r.initRoutes() return r } // Routes returns the available routers to the container controller func (r *containerRouter) Routes() []router.Route { return r.routes } // initRoutes initializes the routes in container router func (r *containerRouter) initRoutes() { r.routes = []router.Route{ // HEAD local.NewHeadRoute("/containers/{name:.*}/archive", r.headContainersArchive), // GET local.NewGetRoute("/containers/json", r.getContainersJSON), local.NewGetRoute("/containers/{name:.*}/export", r.getContainersExport), local.NewGetRoute("/containers/{name:.*}/changes", r.getContainersChanges), local.NewGetRoute("/containers/{name:.*}/json", r.getContainersByName), local.NewGetRoute("/containers/{name:.*}/top", r.getContainersTop), local.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs), local.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats), local.NewGetRoute("/containers/{name:.*}/attach/ws", r.wsContainersAttach), local.NewGetRoute("/exec/{id:.*}/json", r.getExecByID), local.NewGetRoute("/containers/{name:.*}/archive", r.getContainersArchive), // POST local.NewPostRoute("/containers/create", r.postContainersCreate), local.NewPostRoute("/containers/{name:.*}/kill", r.postContainersKill), local.NewPostRoute("/containers/{name:.*}/pause", r.postContainersPause), local.NewPostRoute("/containers/{name:.*}/unpause", r.postContainersUnpause), local.NewPostRoute("/containers/{name:.*}/restart", r.postContainersRestart), local.NewPostRoute("/containers/{name:.*}/start", r.postContainersStart), local.NewPostRoute("/containers/{name:.*}/stop", r.postContainersStop), local.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait), local.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize), local.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach), local.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), local.NewPostRoute("/containers/{name:.*}/exec", r.postContainerExecCreate), local.NewPostRoute("/exec/{name:.*}/start", r.postContainerExecStart), local.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize), local.NewPostRoute("/containers/{name:.*}/rename", r.postContainerRename), local.NewPostRoute("/containers/{name:.*}/update", r.postContainerUpdate), // PUT local.NewPutRoute("/containers/{name:.*}/archive", r.putContainersArchive), // DELETE local.NewDeleteRoute("/containers/{name:.*}", r.deleteContainers), } } docker-1.10.3/api/server/router/container/container_routes.go000066400000000000000000000337141267010174400243500ustar00rootroot00000000000000package container import ( "encoding/json" "fmt" "io" "net/http" "strconv" "strings" "syscall" "time" "github.com/Sirupsen/logrus" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/daemon" derr "github.com/docker/docker/errors" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/term" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/container" timetypes "github.com/docker/engine-api/types/time" "golang.org/x/net/context" "golang.org/x/net/websocket" ) func (s *containerRouter) getContainersJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } config := &daemon.ContainersConfig{ All: httputils.BoolValue(r, "all"), Size: httputils.BoolValue(r, "size"), Since: r.Form.Get("since"), Before: r.Form.Get("before"), Filters: r.Form.Get("filters"), } if tmpLimit := r.Form.Get("limit"); tmpLimit != "" { limit, err := strconv.Atoi(tmpLimit) if err != nil { return err } config.Limit = limit } containers, err := s.backend.Containers(config) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, containers) } func (s *containerRouter) getContainersStats(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } stream := httputils.BoolValueOrDefault(r, "stream", true) var out io.Writer if !stream { w.Header().Set("Content-Type", "application/json") out = w } else { wf := ioutils.NewWriteFlusher(w) out = wf defer wf.Close() } var closeNotifier <-chan bool if notifier, ok := w.(http.CloseNotifier); ok { closeNotifier = notifier.CloseNotify() } config := &daemon.ContainerStatsConfig{ Stream: stream, OutStream: out, Stop: closeNotifier, Version: httputils.VersionFromContext(ctx), } return s.backend.ContainerStats(vars["name"], config) } func (s *containerRouter) getContainersLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } // Args are validated before the stream starts because when it starts we're // sending HTTP 200 by writing an empty chunk of data to tell the client that // daemon is going to stream. By sending this initial HTTP 200 we can't report // any error after the stream starts (i.e. container not found, wrong parameters) // with the appropriate status code. stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr") if !(stdout || stderr) { return fmt.Errorf("Bad parameters: you must choose at least one stream") } var since time.Time if r.Form.Get("since") != "" { s, n, err := timetypes.ParseTimestamps(r.Form.Get("since"), 0) if err != nil { return err } since = time.Unix(s, n) } var closeNotifier <-chan bool if notifier, ok := w.(http.CloseNotifier); ok { closeNotifier = notifier.CloseNotify() } containerName := vars["name"] if !s.backend.Exists(containerName) { return derr.ErrorCodeNoSuchContainer.WithArgs(containerName) } // write an empty chunk of data (this is to ensure that the // HTTP Response is sent immediately, even if the container has // not yet produced any data) w.WriteHeader(http.StatusOK) if flusher, ok := w.(http.Flusher); ok { flusher.Flush() } output := ioutils.NewWriteFlusher(w) defer output.Close() logsConfig := &daemon.ContainerLogsConfig{ Follow: httputils.BoolValue(r, "follow"), Timestamps: httputils.BoolValue(r, "timestamps"), Since: since, Tail: r.Form.Get("tail"), UseStdout: stdout, UseStderr: stderr, OutStream: output, Stop: closeNotifier, } if err := s.backend.ContainerLogs(containerName, logsConfig); err != nil { // The client may be expecting all of the data we're sending to // be multiplexed, so send it through OutStream, which will // have been set up to handle that if needed. fmt.Fprintf(logsConfig.OutStream, "Error running logs job: %s\n", utils.GetErrorMessage(err)) } return nil } func (s *containerRouter) getContainersExport(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { return s.backend.ContainerExport(vars["name"], w) } func (s *containerRouter) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { // If contentLength is -1, we can assumed chunked encoding // or more technically that the length is unknown // https://golang.org/src/pkg/net/http/request.go#L139 // net/http otherwise seems to swallow any headers related to chunked encoding // including r.TransferEncoding // allow a nil body for backwards compatibility var hostConfig *container.HostConfig if r.Body != nil && (r.ContentLength > 0 || r.ContentLength == -1) { if err := httputils.CheckForJSON(r); err != nil { return err } c, err := runconfig.DecodeHostConfig(r.Body) if err != nil { return err } hostConfig = c } if err := s.backend.ContainerStart(vars["name"], hostConfig); err != nil { return err } w.WriteHeader(http.StatusNoContent) return nil } func (s *containerRouter) postContainersStop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } seconds, _ := strconv.Atoi(r.Form.Get("t")) if err := s.backend.ContainerStop(vars["name"], seconds); err != nil { return err } w.WriteHeader(http.StatusNoContent) return nil } func (s *containerRouter) postContainersKill(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } var sig syscall.Signal name := vars["name"] // If we have a signal, look at it. Otherwise, do nothing if sigStr := r.Form.Get("signal"); sigStr != "" { var err error if sig, err = signal.ParseSignal(sigStr); err != nil { return err } } if err := s.backend.ContainerKill(name, uint64(sig)); err != nil { theErr, isDerr := err.(errcode.ErrorCoder) isStopped := isDerr && theErr.ErrorCode() == derr.ErrorCodeNotRunning // Return error that's not caused because the container is stopped. // Return error if the container is not running and the api is >= 1.20 // to keep backwards compatibility. version := httputils.VersionFromContext(ctx) if version.GreaterThanOrEqualTo("1.20") || !isStopped { return fmt.Errorf("Cannot kill container %s: %v", name, utils.GetErrorMessage(err)) } } w.WriteHeader(http.StatusNoContent) return nil } func (s *containerRouter) postContainersRestart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } timeout, _ := strconv.Atoi(r.Form.Get("t")) if err := s.backend.ContainerRestart(vars["name"], timeout); err != nil { return err } w.WriteHeader(http.StatusNoContent) return nil } func (s *containerRouter) postContainersPause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } if err := s.backend.ContainerPause(vars["name"]); err != nil { return err } w.WriteHeader(http.StatusNoContent) return nil } func (s *containerRouter) postContainersUnpause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } if err := s.backend.ContainerUnpause(vars["name"]); err != nil { return err } w.WriteHeader(http.StatusNoContent) return nil } func (s *containerRouter) postContainersWait(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { status, err := s.backend.ContainerWait(vars["name"], -1*time.Second) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, &types.ContainerWaitResponse{ StatusCode: status, }) } func (s *containerRouter) getContainersChanges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { changes, err := s.backend.ContainerChanges(vars["name"]) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, changes) } func (s *containerRouter) getContainersTop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } procList, err := s.backend.ContainerTop(vars["name"], r.Form.Get("ps_args")) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, procList) } func (s *containerRouter) postContainerRename(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } name := vars["name"] newName := r.Form.Get("name") if err := s.backend.ContainerRename(name, newName); err != nil { return err } w.WriteHeader(http.StatusNoContent) return nil } func (s *containerRouter) postContainerUpdate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } if err := httputils.CheckForJSON(r); err != nil { return err } var updateConfig container.UpdateConfig decoder := json.NewDecoder(r.Body) if err := decoder.Decode(&updateConfig); err != nil { return err } hostConfig := &container.HostConfig{ Resources: updateConfig.Resources, } name := vars["name"] warnings, err := s.backend.ContainerUpdate(name, hostConfig) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, &types.ContainerUpdateResponse{ Warnings: warnings, }) } func (s *containerRouter) postContainersCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } if err := httputils.CheckForJSON(r); err != nil { return err } name := r.Form.Get("name") config, hostConfig, networkingConfig, err := runconfig.DecodeContainerConfig(r.Body) if err != nil { return err } version := httputils.VersionFromContext(ctx) adjustCPUShares := version.LessThan("1.19") ccr, err := s.backend.ContainerCreate(types.ContainerCreateConfig{ Name: name, Config: config, HostConfig: hostConfig, NetworkingConfig: networkingConfig, AdjustCPUShares: adjustCPUShares, }) if err != nil { return err } return httputils.WriteJSON(w, http.StatusCreated, ccr) } func (s *containerRouter) deleteContainers(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } name := vars["name"] config := &types.ContainerRmConfig{ ForceRemove: httputils.BoolValue(r, "force"), RemoveVolume: httputils.BoolValue(r, "v"), RemoveLink: httputils.BoolValue(r, "link"), } if err := s.backend.ContainerRm(name, config); err != nil { // Force a 404 for the empty string if strings.Contains(strings.ToLower(err.Error()), "prefix can't be empty") { return fmt.Errorf("no such container: \"\"") } return err } w.WriteHeader(http.StatusNoContent) return nil } func (s *containerRouter) postContainersResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } height, err := strconv.Atoi(r.Form.Get("h")) if err != nil { return err } width, err := strconv.Atoi(r.Form.Get("w")) if err != nil { return err } return s.backend.ContainerResize(vars["name"], height, width) } func (s *containerRouter) postContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { err := httputils.ParseForm(r) if err != nil { return err } containerName := vars["name"] _, upgrade := r.Header["Upgrade"] keys := []byte{} detachKeys := r.FormValue("detachKeys") if detachKeys != "" { keys, err = term.ToBytes(detachKeys) if err != nil { logrus.Warnf("Invalid escape keys provided (%s) using default : ctrl-p ctrl-q", detachKeys) } } attachWithLogsConfig := &daemon.ContainerAttachWithLogsConfig{ Hijacker: w.(http.Hijacker), Upgrade: upgrade, UseStdin: httputils.BoolValue(r, "stdin"), UseStdout: httputils.BoolValue(r, "stdout"), UseStderr: httputils.BoolValue(r, "stderr"), Logs: httputils.BoolValue(r, "logs"), Stream: httputils.BoolValue(r, "stream"), DetachKeys: keys, } return s.backend.ContainerAttachWithLogs(containerName, attachWithLogsConfig) } func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } containerName := vars["name"] if !s.backend.Exists(containerName) { return derr.ErrorCodeNoSuchContainer.WithArgs(containerName) } var keys []byte var err error detachKeys := r.FormValue("detachKeys") if detachKeys != "" { keys, err = term.ToBytes(detachKeys) if err != nil { logrus.Warnf("Invalid escape keys provided (%s) using default : ctrl-p ctrl-q", detachKeys) } } h := websocket.Handler(func(ws *websocket.Conn) { defer ws.Close() wsAttachWithLogsConfig := &daemon.ContainerWsAttachWithLogsConfig{ InStream: ws, OutStream: ws, ErrStream: ws, Logs: httputils.BoolValue(r, "logs"), Stream: httputils.BoolValue(r, "stream"), DetachKeys: keys, } if err := s.backend.ContainerWsAttachWithLogs(containerName, wsAttachWithLogsConfig); err != nil { logrus.Errorf("Error attaching websocket: %s", utils.GetErrorMessage(err)) } }) ws := websocket.Server{Handler: h, Handshake: nil} ws.ServeHTTP(w, r) return nil } docker-1.10.3/api/server/router/container/copy.go000066400000000000000000000054131267010174400217320ustar00rootroot00000000000000package container import ( "encoding/base64" "encoding/json" "fmt" "io" "net/http" "os" "strings" "github.com/docker/docker/api/server/httputils" "github.com/docker/engine-api/types" "golang.org/x/net/context" ) // postContainersCopy is deprecated in favor of getContainersArchive. func (s *containerRouter) postContainersCopy(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.CheckForJSON(r); err != nil { return err } cfg := types.CopyConfig{} if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil { return err } if cfg.Resource == "" { return fmt.Errorf("Path cannot be empty") } data, err := s.backend.ContainerCopy(vars["name"], cfg.Resource) if err != nil { if strings.Contains(strings.ToLower(err.Error()), "no such container") { w.WriteHeader(http.StatusNotFound) return nil } if os.IsNotExist(err) { return fmt.Errorf("Could not find the file %s in container %s", cfg.Resource, vars["name"]) } return err } defer data.Close() w.Header().Set("Content-Type", "application/x-tar") if _, err := io.Copy(w, data); err != nil { return err } return nil } // // Encode the stat to JSON, base64 encode, and place in a header. func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error { statJSON, err := json.Marshal(stat) if err != nil { return err } header.Set( "X-Docker-Container-Path-Stat", base64.StdEncoding.EncodeToString(statJSON), ) return nil } func (s *containerRouter) headContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { v, err := httputils.ArchiveFormValues(r, vars) if err != nil { return err } stat, err := s.backend.ContainerStatPath(v.Name, v.Path) if err != nil { return err } return setContainerPathStatHeader(stat, w.Header()) } func (s *containerRouter) getContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { v, err := httputils.ArchiveFormValues(r, vars) if err != nil { return err } tarArchive, stat, err := s.backend.ContainerArchivePath(v.Name, v.Path) if err != nil { return err } defer tarArchive.Close() if err := setContainerPathStatHeader(stat, w.Header()); err != nil { return err } w.Header().Set("Content-Type", "application/x-tar") _, err = io.Copy(w, tarArchive) return err } func (s *containerRouter) putContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { v, err := httputils.ArchiveFormValues(r, vars) if err != nil { return err } noOverwriteDirNonDir := httputils.BoolValue(r, "noOverwriteDirNonDir") return s.backend.ContainerExtractToDir(v.Name, v.Path, noOverwriteDirNonDir, r.Body) } docker-1.10.3/api/server/router/container/exec.go000066400000000000000000000072541267010174400217110ustar00rootroot00000000000000package container import ( "encoding/json" "fmt" "io" "net/http" "strconv" "github.com/Sirupsen/logrus" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/pkg/stdcopy" "github.com/docker/docker/utils" "github.com/docker/engine-api/types" "golang.org/x/net/context" ) func (s *containerRouter) getExecByID(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { eConfig, err := s.backend.ContainerExecInspect(vars["id"]) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, eConfig) } func (s *containerRouter) postContainerExecCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } if err := httputils.CheckForJSON(r); err != nil { return err } name := vars["name"] execConfig := &types.ExecConfig{} if err := json.NewDecoder(r.Body).Decode(execConfig); err != nil { return err } execConfig.Container = name if len(execConfig.Cmd) == 0 { return fmt.Errorf("No exec command specified") } // Register an instance of Exec in container. id, err := s.backend.ContainerExecCreate(execConfig) if err != nil { logrus.Errorf("Error setting up exec command in container %s: %s", name, utils.GetErrorMessage(err)) return err } return httputils.WriteJSON(w, http.StatusCreated, &types.ContainerExecCreateResponse{ ID: id, }) } // TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } version := httputils.VersionFromContext(ctx) if version.GreaterThan("1.21") { if err := httputils.CheckForJSON(r); err != nil { return err } } var ( execName = vars["name"] stdin, inStream io.ReadCloser stdout, stderr, outStream io.Writer ) execStartCheck := &types.ExecStartCheck{} if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil { return err } if exists, err := s.backend.ExecExists(execName); !exists { return err } if !execStartCheck.Detach { var err error // Setting up the streaming http interface. inStream, outStream, err = httputils.HijackConnection(w) if err != nil { return err } defer httputils.CloseStreams(inStream, outStream) if _, ok := r.Header["Upgrade"]; ok { fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") } else { fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") } stdin = inStream stdout = outStream if !execStartCheck.Tty { stderr = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) stdout = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } } else { outStream = w } // Now run the user process in container. if err := s.backend.ContainerExecStart(execName, stdin, stdout, stderr); err != nil { if execStartCheck.Detach { return err } logrus.Errorf("Error running exec in container: %v\n", utils.GetErrorMessage(err)) } return nil } func (s *containerRouter) postContainerExecResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } height, err := strconv.Atoi(r.Form.Get("h")) if err != nil { return err } width, err := strconv.Atoi(r.Form.Get("w")) if err != nil { return err } return s.backend.ContainerExecResize(vars["name"], height, width) } docker-1.10.3/api/server/router/container/inspect.go000066400000000000000000000011331267010174400224200ustar00rootroot00000000000000package container import ( "net/http" "github.com/docker/docker/api/server/httputils" "golang.org/x/net/context" ) // getContainersByName inspects containers configuration and serializes it as json. func (s *containerRouter) getContainersByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { displaySize := httputils.BoolValue(r, "size") version := httputils.VersionFromContext(ctx) json, err := s.backend.ContainerInspect(vars["name"], displaySize, version) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, json) } docker-1.10.3/api/server/router/local/000077500000000000000000000000001267010174400175365ustar00rootroot00000000000000docker-1.10.3/api/server/router/local/image.go000066400000000000000000000250121267010174400211470ustar00rootroot00000000000000package local import ( "encoding/base64" "encoding/json" "errors" "fmt" "io" "net/http" "net/url" "strings" "github.com/docker/distribution/digest" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/builder/dockerfile" derr "github.com/docker/docker/errors" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/streamformatter" "github.com/docker/docker/reference" "github.com/docker/docker/runconfig" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/container" "golang.org/x/net/context" ) func (s *router) postCommit(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } if err := httputils.CheckForJSON(r); err != nil { return err } cname := r.Form.Get("container") pause := httputils.BoolValue(r, "pause") version := httputils.VersionFromContext(ctx) if r.FormValue("pause") == "" && version.GreaterThanOrEqualTo("1.13") { pause = true } c, _, _, err := runconfig.DecodeContainerConfig(r.Body) if err != nil && err != io.EOF { //Do not fail if body is empty. return err } if c == nil { c = &container.Config{} } if !s.daemon.Exists(cname) { return derr.ErrorCodeNoSuchContainer.WithArgs(cname) } newConfig, err := dockerfile.BuildFromConfig(c, r.Form["changes"]) if err != nil { return err } commitCfg := &types.ContainerCommitConfig{ Pause: pause, Repo: r.Form.Get("repo"), Tag: r.Form.Get("tag"), Author: r.Form.Get("author"), Comment: r.Form.Get("comment"), Config: newConfig, MergeConfigs: true, } imgID, err := s.daemon.Commit(cname, commitCfg) if err != nil { return err } return httputils.WriteJSON(w, http.StatusCreated, &types.ContainerCommitResponse{ ID: string(imgID), }) } // Creates an image from Pull or from Import func (s *router) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } var ( image = r.Form.Get("fromImage") repo = r.Form.Get("repo") tag = r.Form.Get("tag") message = r.Form.Get("message") ) authEncoded := r.Header.Get("X-Registry-Auth") authConfig := &types.AuthConfig{} if authEncoded != "" { authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { // for a pull it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty authConfig = &types.AuthConfig{} } } var ( err error output = ioutils.NewWriteFlusher(w) ) defer output.Close() w.Header().Set("Content-Type", "application/json") if image != "" { //pull // Special case: "pull -a" may send an image name with a // trailing :. This is ugly, but let's not break API // compatibility. image = strings.TrimSuffix(image, ":") var ref reference.Named ref, err = reference.ParseNamed(image) if err == nil { if tag != "" { // The "tag" could actually be a digest. var dgst digest.Digest dgst, err = digest.ParseDigest(tag) if err == nil { ref, err = reference.WithDigest(ref, dgst) } else { ref, err = reference.WithTag(ref, tag) } } if err == nil { metaHeaders := map[string][]string{} for k, v := range r.Header { if strings.HasPrefix(k, "X-Meta-") { metaHeaders[k] = v } } err = s.daemon.PullImage(ref, metaHeaders, authConfig, output) } } // Check the error from pulling an image to make sure the request // was authorized. Modify the status if the request was // unauthorized to respond with 401 rather than 500. if err != nil && isAuthorizedError(err) { err = errcode.ErrorCodeUnauthorized.WithMessage(fmt.Sprintf("Authentication is required: %s", err)) } } else { //import var newRef reference.Named if repo != "" { var err error newRef, err = reference.ParseNamed(repo) if err != nil { return err } if _, isCanonical := newRef.(reference.Canonical); isCanonical { return errors.New("cannot import digest reference") } if tag != "" { newRef, err = reference.WithTag(newRef, tag) if err != nil { return err } } } src := r.Form.Get("fromSrc") // 'err' MUST NOT be defined within this block, we need any error // generated from the download to be available to the output // stream processing below var newConfig *container.Config newConfig, err = dockerfile.BuildFromConfig(&container.Config{}, r.Form["changes"]) if err != nil { return err } err = s.daemon.ImportImage(src, newRef, message, r.Body, output, newConfig) } if err != nil { if !output.Flushed() { return err } sf := streamformatter.NewJSONStreamFormatter() output.Write(sf.FormatError(err)) } return nil } func (s *router) postImagesPush(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { metaHeaders := map[string][]string{} for k, v := range r.Header { if strings.HasPrefix(k, "X-Meta-") { metaHeaders[k] = v } } if err := httputils.ParseForm(r); err != nil { return err } authConfig := &types.AuthConfig{} authEncoded := r.Header.Get("X-Registry-Auth") if authEncoded != "" { // the new format is to handle the authConfig as a header authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { // to increase compatibility to existing api it is defaulting to be empty authConfig = &types.AuthConfig{} } } else { // the old format is supported for compatibility if there was no authConfig header if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { return fmt.Errorf("Bad parameters and missing X-Registry-Auth: %v", err) } } ref, err := reference.ParseNamed(vars["name"]) if err != nil { return err } tag := r.Form.Get("tag") if tag != "" { // Push by digest is not supported, so only tags are supported. ref, err = reference.WithTag(ref, tag) if err != nil { return err } } output := ioutils.NewWriteFlusher(w) defer output.Close() w.Header().Set("Content-Type", "application/json") if err := s.daemon.PushImage(ref, metaHeaders, authConfig, output); err != nil { if !output.Flushed() { return err } sf := streamformatter.NewJSONStreamFormatter() output.Write(sf.FormatError(err)) } return nil } func (s *router) getImagesGet(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } w.Header().Set("Content-Type", "application/x-tar") output := ioutils.NewWriteFlusher(w) defer output.Close() var names []string if name, ok := vars["name"]; ok { names = []string{name} } else { names = r.Form["names"] } if err := s.daemon.ExportImage(names, output); err != nil { if !output.Flushed() { return err } sf := streamformatter.NewJSONStreamFormatter() output.Write(sf.FormatError(err)) } return nil } func (s *router) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { return s.daemon.LoadImage(r.Body, w) } func (s *router) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } name := vars["name"] if strings.TrimSpace(name) == "" { return fmt.Errorf("image name cannot be blank") } force := httputils.BoolValue(r, "force") prune := !httputils.BoolValue(r, "noprune") list, err := s.daemon.ImageDelete(name, force, prune) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, list) } func (s *router) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { imageInspect, err := s.daemon.LookupImage(vars["name"]) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, imageInspect) } func (s *router) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } // FIXME: The filter parameter could just be a match filter images, err := s.daemon.Images(r.Form.Get("filters"), r.Form.Get("filter"), httputils.BoolValue(r, "all")) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, images) } func (s *router) getImagesHistory(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { name := vars["name"] history, err := s.daemon.ImageHistory(name) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, history) } func (s *router) postImagesTag(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } repo := r.Form.Get("repo") tag := r.Form.Get("tag") newTag, err := reference.WithName(repo) if err != nil { return err } if tag != "" { if newTag, err = reference.WithTag(newTag, tag); err != nil { return err } } if err := s.daemon.TagImage(newTag, vars["name"]); err != nil { return err } w.WriteHeader(http.StatusCreated) return nil } func (s *router) getImagesSearch(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } var ( config *types.AuthConfig authEncoded = r.Header.Get("X-Registry-Auth") headers = map[string][]string{} ) if authEncoded != "" { authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) if err := json.NewDecoder(authJSON).Decode(&config); err != nil { // for a search it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty config = &types.AuthConfig{} } } for k, v := range r.Header { if strings.HasPrefix(k, "X-Meta-") { headers[k] = v } } query, err := s.daemon.SearchRegistryForImages(r.Form.Get("term"), config, headers) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, query.Results) } func isAuthorizedError(err error) bool { if urlError, ok := err.(*url.Error); ok { err = urlError.Err } if dError, ok := err.(errcode.Error); ok { if dError.ErrorCode() == errcode.ErrorCodeUnauthorized { return true } } return false } docker-1.10.3/api/server/router/local/local.go000066400000000000000000000063441267010174400211660ustar00rootroot00000000000000package local import ( "github.com/docker/docker/api/server/httputils" dkrouter "github.com/docker/docker/api/server/router" "github.com/docker/docker/daemon" ) // router is a docker router that talks with the local docker daemon. type router struct { daemon *daemon.Daemon routes []dkrouter.Route } // localRoute defines an individual API route to connect with the docker daemon. // It implements router.Route. type localRoute struct { method string path string handler httputils.APIFunc } // Handler returns the APIFunc to let the server wrap it in middlewares func (l localRoute) Handler() httputils.APIFunc { return l.handler } // Method returns the http method that the route responds to. func (l localRoute) Method() string { return l.method } // Path returns the subpath where the route responds to. func (l localRoute) Path() string { return l.path } // NewRoute initializes a new local router for the reouter func NewRoute(method, path string, handler httputils.APIFunc) dkrouter.Route { return localRoute{method, path, handler} } // NewGetRoute initializes a new route with the http method GET. func NewGetRoute(path string, handler httputils.APIFunc) dkrouter.Route { return NewRoute("GET", path, handler) } // NewPostRoute initializes a new route with the http method POST. func NewPostRoute(path string, handler httputils.APIFunc) dkrouter.Route { return NewRoute("POST", path, handler) } // NewPutRoute initializes a new route with the http method PUT. func NewPutRoute(path string, handler httputils.APIFunc) dkrouter.Route { return NewRoute("PUT", path, handler) } // NewDeleteRoute initializes a new route with the http method DELETE. func NewDeleteRoute(path string, handler httputils.APIFunc) dkrouter.Route { return NewRoute("DELETE", path, handler) } // NewOptionsRoute initializes a new route with the http method OPTIONS func NewOptionsRoute(path string, handler httputils.APIFunc) dkrouter.Route { return NewRoute("OPTIONS", path, handler) } // NewHeadRoute initializes a new route with the http method HEAD. func NewHeadRoute(path string, handler httputils.APIFunc) dkrouter.Route { return NewRoute("HEAD", path, handler) } // NewRouter initializes a local router with a new daemon. func NewRouter(daemon *daemon.Daemon) dkrouter.Router { r := &router{ daemon: daemon, } r.initRoutes() return r } // Routes returns the list of routes registered in the router. func (r *router) Routes() []dkrouter.Route { return r.routes } // initRoutes initializes the routes in this router func (r *router) initRoutes() { r.routes = []dkrouter.Route{ // OPTIONS // GET NewGetRoute("/images/json", r.getImagesJSON), NewGetRoute("/images/search", r.getImagesSearch), NewGetRoute("/images/get", r.getImagesGet), NewGetRoute("/images/{name:.*}/get", r.getImagesGet), NewGetRoute("/images/{name:.*}/history", r.getImagesHistory), NewGetRoute("/images/{name:.*}/json", r.getImagesByName), // POST NewPostRoute("/commit", r.postCommit), NewPostRoute("/images/create", r.postImagesCreate), NewPostRoute("/images/load", r.postImagesLoad), NewPostRoute("/images/{name:.*}/push", r.postImagesPush), NewPostRoute("/images/{name:.*}/tag", r.postImagesTag), // DELETE NewDeleteRoute("/images/{name:.*}", r.deleteImages), } } docker-1.10.3/api/server/router/network/000077500000000000000000000000001267010174400201355ustar00rootroot00000000000000docker-1.10.3/api/server/router/network/backend.go000066400000000000000000000015311267010174400220530ustar00rootroot00000000000000package network import ( "github.com/docker/engine-api/types/network" "github.com/docker/libnetwork" ) // Backend is all the methods that need to be implemented to provide // network specific functionality type Backend interface { FindNetwork(idName string) (libnetwork.Network, error) GetNetwork(idName string, by int) (libnetwork.Network, error) GetNetworksByID(partialID string) []libnetwork.Network GetAllNetworks() []libnetwork.Network CreateNetwork(name, driver string, ipam network.IPAM, options map[string]string, internal bool) (libnetwork.Network, error) ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error DisconnectContainerFromNetwork(containerName string, network libnetwork.Network, force bool) error NetworkControllerEnabled() bool DeleteNetwork(name string) error } docker-1.10.3/api/server/router/network/filter.go000066400000000000000000000051551267010174400217570ustar00rootroot00000000000000package network import ( "fmt" "regexp" "strings" "github.com/docker/docker/runconfig" "github.com/docker/engine-api/types/filters" "github.com/docker/libnetwork" ) type filterHandler func([]libnetwork.Network, string) ([]libnetwork.Network, error) var ( // supportedFilters predefined some supported filter handler function supportedFilters = map[string]filterHandler{ "type": filterNetworkByType, "name": filterNetworkByName, "id": filterNetworkByID, } // acceptFilters is an acceptable filter flag list // generated for validation. e.g. // acceptedFilters = map[string]bool{ // "type": true, // "name": true, // "id": true, // } acceptedFilters = func() map[string]bool { ret := make(map[string]bool) for k := range supportedFilters { ret[k] = true } return ret }() ) func filterNetworkByType(nws []libnetwork.Network, netType string) (retNws []libnetwork.Network, err error) { switch netType { case "builtin": for _, nw := range nws { if runconfig.IsPreDefinedNetwork(nw.Name()) { retNws = append(retNws, nw) } } case "custom": for _, nw := range nws { if !runconfig.IsPreDefinedNetwork(nw.Name()) { retNws = append(retNws, nw) } } default: return nil, fmt.Errorf("Invalid filter: 'type'='%s'", netType) } return retNws, nil } func filterNetworkByName(nws []libnetwork.Network, name string) (retNws []libnetwork.Network, err error) { for _, nw := range nws { // exact match (fast path) if nw.Name() == name { retNws = append(retNws, nw) continue } // regexp match (slow path) match, err := regexp.MatchString(name, nw.Name()) if err != nil || !match { continue } else { retNws = append(retNws, nw) } } return retNws, nil } func filterNetworkByID(nws []libnetwork.Network, id string) (retNws []libnetwork.Network, err error) { for _, nw := range nws { if strings.HasPrefix(nw.ID(), id) { retNws = append(retNws, nw) } } return retNws, nil } // filterAllNetworks filter network list according to user specified filter // and return user chosen networks func filterNetworks(nws []libnetwork.Network, filter filters.Args) ([]libnetwork.Network, error) { // if filter is empty, return original network list if filter.Len() == 0 { return nws, nil } var displayNet []libnetwork.Network for fkey, fhandler := range supportedFilters { errFilter := filter.WalkValues(fkey, func(fval string) error { passList, err := fhandler(nws, fval) if err != nil { return err } displayNet = append(displayNet, passList...) return nil }) if errFilter != nil { return nil, errFilter } } return displayNet, nil } docker-1.10.3/api/server/router/network/network.go000066400000000000000000000033141267010174400221560ustar00rootroot00000000000000package network import ( "net/http" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/local" "github.com/docker/docker/errors" "golang.org/x/net/context" ) // networkRouter is a router to talk with the network controller type networkRouter struct { backend Backend routes []router.Route } // NewRouter initializes a new network router func NewRouter(b Backend) router.Router { r := &networkRouter{ backend: b, } r.initRoutes() return r } // Routes returns the available routes to the network controller func (r *networkRouter) Routes() []router.Route { return r.routes } func (r *networkRouter) initRoutes() { r.routes = []router.Route{ // GET local.NewGetRoute("/networks", r.controllerEnabledMiddleware(r.getNetworksList)), local.NewGetRoute("/networks/{id:.*}", r.controllerEnabledMiddleware(r.getNetwork)), // POST local.NewPostRoute("/networks/create", r.controllerEnabledMiddleware(r.postNetworkCreate)), local.NewPostRoute("/networks/{id:.*}/connect", r.controllerEnabledMiddleware(r.postNetworkConnect)), local.NewPostRoute("/networks/{id:.*}/disconnect", r.controllerEnabledMiddleware(r.postNetworkDisconnect)), // DELETE local.NewDeleteRoute("/networks/{id:.*}", r.controllerEnabledMiddleware(r.deleteNetwork)), } } func (r *networkRouter) controllerEnabledMiddleware(handler httputils.APIFunc) httputils.APIFunc { if r.backend.NetworkControllerEnabled() { return handler } return networkControllerDisabled } func networkControllerDisabled(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { return errors.ErrorNetworkControllerNotEnabled.WithArgs() } docker-1.10.3/api/server/router/network/network_routes.go000066400000000000000000000146041267010174400235630ustar00rootroot00000000000000package network import ( "encoding/json" "fmt" "net/http" "golang.org/x/net/context" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/daemon" "github.com/docker/docker/runconfig" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/filters" "github.com/docker/engine-api/types/network" "github.com/docker/libnetwork" ) func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } filter := r.Form.Get("filters") netFilters, err := filters.FromParam(filter) if err != nil { return err } if netFilters.Len() != 0 { if err := netFilters.Validate(acceptedFilters); err != nil { return err } } list := []*types.NetworkResource{} nwList := n.backend.GetAllNetworks() displayable, err := filterNetworks(nwList, netFilters) if err != nil { return err } for _, nw := range displayable { list = append(list, buildNetworkResource(nw)) } return httputils.WriteJSON(w, http.StatusOK, list) } func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } nw, err := n.backend.FindNetwork(vars["id"]) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, buildNetworkResource(nw)) } func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { var create types.NetworkCreate var warning string if err := httputils.ParseForm(r); err != nil { return err } if err := httputils.CheckForJSON(r); err != nil { return err } if err := json.NewDecoder(r.Body).Decode(&create); err != nil { return err } if runconfig.IsPreDefinedNetwork(create.Name) { return httputils.WriteJSON(w, http.StatusForbidden, fmt.Sprintf("%s is a pre-defined network and cannot be created", create.Name)) } nw, err := n.backend.GetNetwork(create.Name, daemon.NetworkByName) if _, ok := err.(libnetwork.ErrNoSuchNetwork); err != nil && !ok { return err } if nw != nil { if create.CheckDuplicate { return libnetwork.NetworkNameError(create.Name) } warning = fmt.Sprintf("Network with name %s (id : %s) already exists", nw.Name(), nw.ID()) } nw, err = n.backend.CreateNetwork(create.Name, create.Driver, create.IPAM, create.Options, create.Internal) if err != nil { return err } return httputils.WriteJSON(w, http.StatusCreated, &types.NetworkCreateResponse{ ID: nw.ID(), Warning: warning, }) } func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { var connect types.NetworkConnect if err := httputils.ParseForm(r); err != nil { return err } if err := httputils.CheckForJSON(r); err != nil { return err } if err := json.NewDecoder(r.Body).Decode(&connect); err != nil { return err } nw, err := n.backend.FindNetwork(vars["id"]) if err != nil { return err } return n.backend.ConnectContainerToNetwork(connect.Container, nw.Name(), connect.EndpointConfig) } func (n *networkRouter) postNetworkDisconnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { var disconnect types.NetworkDisconnect if err := httputils.ParseForm(r); err != nil { return err } if err := httputils.CheckForJSON(r); err != nil { return err } if err := json.NewDecoder(r.Body).Decode(&disconnect); err != nil { return err } nw, err := n.backend.FindNetwork(vars["id"]) if err != nil { return err } return n.backend.DisconnectContainerFromNetwork(disconnect.Container, nw, disconnect.Force) } func (n *networkRouter) deleteNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { return n.backend.DeleteNetwork(vars["id"]) } func buildNetworkResource(nw libnetwork.Network) *types.NetworkResource { r := &types.NetworkResource{} if nw == nil { return r } r.Name = nw.Name() r.ID = nw.ID() r.Scope = nw.Info().Scope() r.Driver = nw.Type() r.Options = nw.Info().DriverOptions() r.Containers = make(map[string]types.EndpointResource) buildIpamResources(r, nw) epl := nw.Endpoints() for _, e := range epl { ei := e.Info() if ei == nil { continue } sb := ei.Sandbox() if sb == nil { continue } r.Containers[sb.ContainerID()] = buildEndpointResource(e) } return r } func buildIpamResources(r *types.NetworkResource, nw libnetwork.Network) { id, opts, ipv4conf, ipv6conf := nw.Info().IpamConfig() ipv4Info, ipv6Info := nw.Info().IpamInfo() r.IPAM.Driver = id r.IPAM.Options = opts r.IPAM.Config = []network.IPAMConfig{} for _, ip4 := range ipv4conf { if ip4.PreferredPool == "" { continue } iData := network.IPAMConfig{} iData.Subnet = ip4.PreferredPool iData.IPRange = ip4.SubPool iData.Gateway = ip4.Gateway iData.AuxAddress = ip4.AuxAddresses r.IPAM.Config = append(r.IPAM.Config, iData) } if len(r.IPAM.Config) == 0 { for _, ip4Info := range ipv4Info { iData := network.IPAMConfig{} iData.Subnet = ip4Info.IPAMData.Pool.String() iData.Gateway = ip4Info.IPAMData.Gateway.String() r.IPAM.Config = append(r.IPAM.Config, iData) } } hasIpv6Conf := false for _, ip6 := range ipv6conf { if ip6.PreferredPool == "" { continue } hasIpv6Conf = true iData := network.IPAMConfig{} iData.Subnet = ip6.PreferredPool iData.IPRange = ip6.SubPool iData.Gateway = ip6.Gateway iData.AuxAddress = ip6.AuxAddresses r.IPAM.Config = append(r.IPAM.Config, iData) } if !hasIpv6Conf { for _, ip6Info := range ipv6Info { iData := network.IPAMConfig{} iData.Subnet = ip6Info.IPAMData.Pool.String() iData.Gateway = ip6Info.IPAMData.Gateway.String() r.IPAM.Config = append(r.IPAM.Config, iData) } } } func buildEndpointResource(e libnetwork.Endpoint) types.EndpointResource { er := types.EndpointResource{} if e == nil { return er } er.EndpointID = e.ID() er.Name = e.Name() ei := e.Info() if ei == nil { return er } if iface := ei.Iface(); iface != nil { if mac := iface.MacAddress(); mac != nil { er.MacAddress = mac.String() } if ip := iface.Address(); ip != nil && len(ip.IP) > 0 { er.IPv4Address = ip.String() } if ipv6 := iface.AddressIPv6(); ipv6 != nil && len(ipv6.IP) > 0 { er.IPv6Address = ipv6.String() } } return er } docker-1.10.3/api/server/router/router.go000066400000000000000000000010361267010174400203130ustar00rootroot00000000000000package router import "github.com/docker/docker/api/server/httputils" // Router defines an interface to specify a group of routes to add the the docker server. type Router interface { Routes() []Route } // Route defines an individual API route in the docker server. type Route interface { // Handler returns the raw function to create the http handler. Handler() httputils.APIFunc // Method returns the http method that the route responds to. Method() string // Path returns the subpath where the route responds to. Path() string } docker-1.10.3/api/server/router/system/000077500000000000000000000000001267010174400177705ustar00rootroot00000000000000docker-1.10.3/api/server/router/system/backend.go000066400000000000000000000010551267010174400217070ustar00rootroot00000000000000package system import ( "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/events" "github.com/docker/engine-api/types/filters" ) // Backend is the methods that need to be implemented to provide // system specific functionality. type Backend interface { SystemInfo() (*types.Info, error) SystemVersion() types.Version SubscribeToEvents(since, sinceNano int64, ef filters.Args) ([]events.Message, chan interface{}) UnsubscribeFromEvents(chan interface{}) AuthenticateToRegistry(authConfig *types.AuthConfig) (string, error) } docker-1.10.3/api/server/router/system/system.go000066400000000000000000000016531267010174400216500ustar00rootroot00000000000000package system import ( "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/local" ) // systemRouter is a Router that provides information about // the Docker system overall. It gathers information about // host, daemon and container events. type systemRouter struct { backend Backend routes []router.Route } // NewRouter initializes a new systemRouter func NewRouter(b Backend) router.Router { r := &systemRouter{ backend: b, } r.routes = []router.Route{ local.NewOptionsRoute("/{anyroute:.*}", optionsHandler), local.NewGetRoute("/_ping", pingHandler), local.NewGetRoute("/events", r.getEvents), local.NewGetRoute("/info", r.getInfo), local.NewGetRoute("/version", r.getVersion), local.NewPostRoute("/auth", r.postAuth), } return r } // Routes return all the API routes dedicated to the docker system. func (s *systemRouter) Routes() []router.Route { return s.routes } docker-1.10.3/api/server/router/system/system_routes.go000066400000000000000000000065031267010174400232500ustar00rootroot00000000000000package system import ( "encoding/json" "net/http" "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/api" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/pkg/ioutils" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/events" "github.com/docker/engine-api/types/filters" timetypes "github.com/docker/engine-api/types/time" "golang.org/x/net/context" ) func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.WriteHeader(http.StatusOK) return nil } func pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { _, err := w.Write([]byte{'O', 'K'}) return err } func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info, err := s.backend.SystemInfo() if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemVersion() info.APIVersion = api.DefaultVersion.String() return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } since, sinceNano, err := timetypes.ParseTimestamps(r.Form.Get("since"), -1) if err != nil { return err } until, untilNano, err := timetypes.ParseTimestamps(r.Form.Get("until"), -1) if err != nil { return err } timer := time.NewTimer(0) timer.Stop() if until > 0 || untilNano > 0 { dur := time.Unix(until, untilNano).Sub(time.Now()) timer = time.NewTimer(dur) } ef, err := filters.FromParam(r.Form.Get("filters")) if err != nil { return err } w.Header().Set("Content-Type", "application/json") // This is to ensure that the HTTP status code is sent immediately, // so that it will not block the receiver. w.WriteHeader(http.StatusOK) if flusher, ok := w.(http.Flusher); ok { flusher.Flush() } output := ioutils.NewWriteFlusher(w) defer output.Close() enc := json.NewEncoder(output) buffered, l := s.backend.SubscribeToEvents(since, sinceNano, ef) defer s.backend.UnsubscribeFromEvents(l) for _, ev := range buffered { if err := enc.Encode(ev); err != nil { return err } } var closeNotify <-chan bool if closeNotifier, ok := w.(http.CloseNotifier); ok { closeNotify = closeNotifier.CloseNotify() } for { select { case ev := <-l: jev, ok := ev.(events.Message) if !ok { logrus.Warnf("unexpected event message: %q", ev) continue } if err := enc.Encode(jev); err != nil { return err } case <-timer.C: return nil case <-closeNotify: logrus.Debug("Client disconnected, stop sending events") return nil } } } func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { var config *types.AuthConfig err := json.NewDecoder(r.Body).Decode(&config) r.Body.Close() if err != nil { return err } status, err := s.backend.AuthenticateToRegistry(config) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, &types.AuthResponse{ Status: status, }) } docker-1.10.3/api/server/router/volume/000077500000000000000000000000001267010174400177535ustar00rootroot00000000000000docker-1.10.3/api/server/router/volume/backend.go000066400000000000000000000007271267010174400216770ustar00rootroot00000000000000package volume import ( // TODO return types need to be refactored into pkg "github.com/docker/engine-api/types" ) // Backend is the methods that need to be implemented to provide // volume specific functionality type Backend interface { Volumes(filter string) ([]*types.Volume, []string, error) VolumeInspect(name string) (*types.Volume, error) VolumeCreate(name, driverName string, opts map[string]string) (*types.Volume, error) VolumeRm(name string) error } docker-1.10.3/api/server/router/volume/volume.go000066400000000000000000000015701267010174400216140ustar00rootroot00000000000000package volume import ( "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/local" ) // volumeRouter is a router to talk with the volumes controller type volumeRouter struct { backend Backend routes []router.Route } // NewRouter initializes a new volumeRouter func NewRouter(b Backend) router.Router { r := &volumeRouter{ backend: b, } r.initRoutes() return r } //Routes returns the available routers to the volumes controller func (r *volumeRouter) Routes() []router.Route { return r.routes } func (r *volumeRouter) initRoutes() { r.routes = []router.Route{ // GET local.NewGetRoute("/volumes", r.getVolumesList), local.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), // POST local.NewPostRoute("/volumes/create", r.postVolumesCreate), // DELETE local.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), } } docker-1.10.3/api/server/router/volume/volume_routes.go000066400000000000000000000034061267010174400232150ustar00rootroot00000000000000package volume import ( "encoding/json" "net/http" "github.com/docker/docker/api/server/httputils" "github.com/docker/engine-api/types" "golang.org/x/net/context" ) func (v *volumeRouter) getVolumesList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } volumes, warnings, err := v.backend.Volumes(r.Form.Get("filters")) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, &types.VolumesListResponse{Volumes: volumes, Warnings: warnings}) } func (v *volumeRouter) getVolumeByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } volume, err := v.backend.VolumeInspect(vars["name"]) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, volume) } func (v *volumeRouter) postVolumesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } if err := httputils.CheckForJSON(r); err != nil { return err } var req types.VolumeCreateRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { return err } volume, err := v.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts) if err != nil { return err } return httputils.WriteJSON(w, http.StatusCreated, volume) } func (v *volumeRouter) deleteVolumes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } if err := v.backend.VolumeRm(vars["name"]); err != nil { return err } w.WriteHeader(http.StatusNoContent) return nil } docker-1.10.3/api/server/router_swapper.go000066400000000000000000000011441267010174400205340ustar00rootroot00000000000000package server import ( "net/http" "sync" "github.com/gorilla/mux" ) // routerSwapper is an http.Handler that allow you to swap // mux routers. type routerSwapper struct { mu sync.Mutex router *mux.Router } // Swap changes the old router with the new one. func (rs *routerSwapper) Swap(newRouter *mux.Router) { rs.mu.Lock() rs.router = newRouter rs.mu.Unlock() } // ServeHTTP makes the routerSwapper to implement the http.Handler interface. func (rs *routerSwapper) ServeHTTP(w http.ResponseWriter, r *http.Request) { rs.mu.Lock() router := rs.router rs.mu.Unlock() router.ServeHTTP(w, r) } docker-1.10.3/api/server/server.go000066400000000000000000000162151267010174400167660ustar00rootroot00000000000000package server import ( "crypto/tls" "net" "net/http" "strings" "github.com/Sirupsen/logrus" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/build" "github.com/docker/docker/api/server/router/container" "github.com/docker/docker/api/server/router/local" "github.com/docker/docker/api/server/router/network" "github.com/docker/docker/api/server/router/system" "github.com/docker/docker/api/server/router/volume" "github.com/docker/docker/daemon" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/utils" "github.com/docker/go-connections/sockets" "github.com/gorilla/mux" "golang.org/x/net/context" ) // versionMatcher defines a variable matcher to be parsed by the router // when a request is about to be served. const versionMatcher = "/v{version:[0-9.]+}" // Config provides the configuration for the API server type Config struct { Logging bool EnableCors bool CorsHeaders string AuthorizationPluginNames []string Version string SocketGroup string TLSConfig *tls.Config Addrs []Addr } // Server contains instance details for the server type Server struct { cfg *Config servers []*HTTPServer routers []router.Router authZPlugins []authorization.Plugin routerSwapper *routerSwapper } // Addr contains string representation of address and its protocol (tcp, unix...). type Addr struct { Proto string Addr string } // New returns a new instance of the server based on the specified configuration. // It allocates resources which will be needed for ServeAPI(ports, unix-sockets). func New(cfg *Config) (*Server, error) { s := &Server{ cfg: cfg, } for _, addr := range cfg.Addrs { srv, err := s.newServer(addr.Proto, addr.Addr) if err != nil { return nil, err } logrus.Debugf("Server created for HTTP on %s (%s)", addr.Proto, addr.Addr) s.servers = append(s.servers, srv...) } return s, nil } // Close closes servers and thus stop receiving requests func (s *Server) Close() { for _, srv := range s.servers { if err := srv.Close(); err != nil { logrus.Error(err) } } } // serveAPI loops through all initialized servers and spawns goroutine // with Server method for each. It sets createMux() as Handler also. func (s *Server) serveAPI() error { s.initRouterSwapper() var chErrors = make(chan error, len(s.servers)) for _, srv := range s.servers { srv.srv.Handler = s.routerSwapper go func(srv *HTTPServer) { var err error logrus.Infof("API listen on %s", srv.l.Addr()) if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") { err = nil } chErrors <- err }(srv) } for i := 0; i < len(s.servers); i++ { err := <-chErrors if err != nil { return err } } return nil } // HTTPServer contains an instance of http server and the listener. // srv *http.Server, contains configuration to create a http server and a mux router with all api end points. // l net.Listener, is a TCP or Socket listener that dispatches incoming request to the router. type HTTPServer struct { srv *http.Server l net.Listener } // Serve starts listening for inbound requests. func (s *HTTPServer) Serve() error { return s.srv.Serve(s.l) } // Close closes the HTTPServer from listening for the inbound requests. func (s *HTTPServer) Close() error { return s.l.Close() } func writeCorsHeaders(w http.ResponseWriter, r *http.Request, corsHeaders string) { logrus.Debugf("CORS header is enabled and set to: %s", corsHeaders) w.Header().Add("Access-Control-Allow-Origin", corsHeaders) w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") w.Header().Add("Access-Control-Allow-Methods", "HEAD, GET, POST, DELETE, PUT, OPTIONS") } func (s *Server) initTCPSocket(addr string) (l net.Listener, err error) { if s.cfg.TLSConfig == nil || s.cfg.TLSConfig.ClientAuth != tls.RequireAndVerifyClientCert { logrus.Warn("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") } if l, err = sockets.NewTCPSocket(addr, s.cfg.TLSConfig); err != nil { return nil, err } if err := allocateDaemonPort(addr); err != nil { return nil, err } return } func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { // log the handler call logrus.Debugf("Calling %s %s", r.Method, r.URL.Path) // Define the context that we'll pass around to share info // like the docker-request-id. // // The 'context' will be used for global data that should // apply to all requests. Data that is specific to the // immediate function being called should still be passed // as 'args' on the function call. ctx := context.Background() handlerFunc := s.handleWithGlobalMiddlewares(handler) vars := mux.Vars(r) if vars == nil { vars = make(map[string]string) } if err := handlerFunc(ctx, w, r, vars); err != nil { logrus.Errorf("Handler for %s %s returned error: %s", r.Method, r.URL.Path, utils.GetErrorMessage(err)) httputils.WriteError(w, err) } } } // InitRouters initializes a list of routers for the server. func (s *Server) InitRouters(d *daemon.Daemon) { s.addRouter(container.NewRouter(d)) s.addRouter(local.NewRouter(d)) s.addRouter(network.NewRouter(d)) s.addRouter(system.NewRouter(d)) s.addRouter(volume.NewRouter(d)) s.addRouter(build.NewRouter(d)) } // addRouter adds a new router to the server. func (s *Server) addRouter(r router.Router) { s.routers = append(s.routers, r) } // createMux initializes the main router the server uses. // we keep enableCors just for legacy usage, need to be removed in the future func (s *Server) createMux() *mux.Router { m := mux.NewRouter() if utils.IsDebugEnabled() { profilerSetup(m, "/debug/") } logrus.Debugf("Registering routers") for _, apiRouter := range s.routers { for _, r := range apiRouter.Routes() { f := s.makeHTTPHandler(r.Handler()) logrus.Debugf("Registering %s, %s", r.Method(), r.Path()) m.Path(versionMatcher + r.Path()).Methods(r.Method()).Handler(f) m.Path(r.Path()).Methods(r.Method()).Handler(f) } } return m } // Wait blocks the server goroutine until it exits. // It sends an error message if there is any error during // the API execution. func (s *Server) Wait(waitChan chan error) { if err := s.serveAPI(); err != nil { logrus.Errorf("ServeAPI error: %v", err) waitChan <- err return } waitChan <- nil } func (s *Server) initRouterSwapper() { s.routerSwapper = &routerSwapper{ router: s.createMux(), } } // Reload reads configuration changes and modifies the // server according to those changes. // Currently, only the --debug configuration is taken into account. func (s *Server) Reload(config *daemon.Config) { debugEnabled := utils.IsDebugEnabled() switch { case debugEnabled && !config.Debug: // disable debug utils.DisableDebug() s.routerSwapper.Swap(s.createMux()) case config.Debug && !debugEnabled: // enable debug utils.EnableDebug() s.routerSwapper.Swap(s.createMux()) } } docker-1.10.3/api/server/server_test.go000066400000000000000000000013551267010174400200240ustar00rootroot00000000000000package server import ( "net/http" "net/http/httptest" "testing" "github.com/docker/docker/api/server/httputils" "golang.org/x/net/context" ) func TestMiddlewares(t *testing.T) { cfg := &Config{} srv := &Server{ cfg: cfg, } req, _ := http.NewRequest("GET", "/containers/json", nil) resp := httptest.NewRecorder() ctx := context.Background() localHandler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if httputils.VersionFromContext(ctx) == "" { t.Fatalf("Expected version, got empty string") } return nil } handlerFunc := srv.handleWithGlobalMiddlewares(localHandler) if err := handlerFunc(ctx, resp, req, map[string]string{}); err != nil { t.Fatal(err) } } docker-1.10.3/api/server/server_unix.go000066400000000000000000000062431267010174400200310ustar00rootroot00000000000000// +build freebsd linux package server import ( "crypto/tls" "fmt" "net" "net/http" "strconv" "github.com/Sirupsen/logrus" "github.com/docker/go-connections/sockets" "github.com/docker/libnetwork/portallocator" systemdActivation "github.com/coreos/go-systemd/activation" ) // newServer sets up the required HTTPServers and does protocol specific checking. // newServer does not set any muxers, you should set it later to Handler field func (s *Server) newServer(proto, addr string) ([]*HTTPServer, error) { var ( err error ls []net.Listener ) switch proto { case "fd": ls, err = listenFD(addr, s.cfg.TLSConfig) if err != nil { return nil, err } case "tcp": l, err := s.initTCPSocket(addr) if err != nil { return nil, err } ls = append(ls, l) case "unix": l, err := sockets.NewUnixSocket(addr, s.cfg.SocketGroup) if err != nil { return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err) } ls = append(ls, l) default: return nil, fmt.Errorf("Invalid protocol format: %q", proto) } var res []*HTTPServer for _, l := range ls { res = append(res, &HTTPServer{ &http.Server{ Addr: addr, }, l, }) } return res, nil } func allocateDaemonPort(addr string) error { host, port, err := net.SplitHostPort(addr) if err != nil { return err } intPort, err := strconv.Atoi(port) if err != nil { return err } var hostIPs []net.IP if parsedIP := net.ParseIP(host); parsedIP != nil { hostIPs = append(hostIPs, parsedIP) } else if hostIPs, err = net.LookupIP(host); err != nil { return fmt.Errorf("failed to lookup %s address in host specification", host) } pa := portallocator.Get() for _, hostIP := range hostIPs { if _, err := pa.RequestPort(hostIP, "tcp", intPort); err != nil { return fmt.Errorf("failed to allocate daemon listening port %d (err: %v)", intPort, err) } } return nil } // listenFD returns the specified socket activated files as a slice of // net.Listeners or all of the activated files if "*" is given. func listenFD(addr string, tlsConfig *tls.Config) ([]net.Listener, error) { var ( err error listeners []net.Listener ) // socket activation if tlsConfig != nil { listeners, err = systemdActivation.TLSListeners(false, tlsConfig) } else { listeners, err = systemdActivation.Listeners(false) } if err != nil { return nil, err } if len(listeners) == 0 { return nil, fmt.Errorf("No sockets found") } // default to all fds just like unix:// and tcp:// if addr == "" || addr == "*" { return listeners, nil } fdNum, err := strconv.Atoi(addr) if err != nil { return nil, fmt.Errorf("failed to parse systemd address, should be number: %v", err) } fdOffset := fdNum - 3 if len(listeners) < int(fdOffset)+1 { return nil, fmt.Errorf("Too few socket activated files passed in") } if listeners[fdOffset] == nil { return nil, fmt.Errorf("failed to listen on systemd activated file at fd %d", fdOffset+3) } for i, ls := range listeners { if i == fdOffset || ls == nil { continue } if err := ls.Close(); err != nil { logrus.Errorf("Failed to close systemd activated file at fd %d: %v", fdOffset+3, err) } } return []net.Listener{listeners[fdOffset]}, nil } docker-1.10.3/api/server/server_windows.go000066400000000000000000000012611267010174400205330ustar00rootroot00000000000000// +build windows package server import ( "errors" "net" "net/http" ) // NewServer sets up the required Server and does protocol specific checking. func (s *Server) newServer(proto, addr string) ([]*HTTPServer, error) { var ( ls []net.Listener ) switch proto { case "tcp": l, err := s.initTCPSocket(addr) if err != nil { return nil, err } ls = append(ls, l) default: return nil, errors.New("Invalid protocol format. Windows only supports tcp.") } var res []*HTTPServer for _, l := range ls { res = append(res, &HTTPServer{ &http.Server{ Addr: addr, }, l, }) } return res, nil } func allocateDaemonPort(addr string) error { return nil } docker-1.10.3/builder/000077500000000000000000000000001267010174400144735ustar00rootroot00000000000000docker-1.10.3/builder/builder.go000066400000000000000000000120271267010174400164520ustar00rootroot00000000000000// Package builder defines interfaces for any Docker builder to implement. // // Historically, only server-side Dockerfile interpreters existed. // This package allows for other implementations of Docker builders. package builder import ( "io" "os" "time" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/container" ) // Context represents a file system tree. type Context interface { // Close allows to signal that the filesystem tree won't be used anymore. // For Context implementations using a temporary directory, it is recommended to // delete the temporary directory in Close(). Close() error // Stat returns an entry corresponding to path if any. // It is recommended to return an error if path was not found. // If path is a symlink it also returns the path to the target file. Stat(path string) (string, FileInfo, error) // Open opens path from the context and returns a readable stream of it. Open(path string) (io.ReadCloser, error) // Walk walks the tree of the context with the function passed to it. Walk(root string, walkFn WalkFunc) error } // WalkFunc is the type of the function called for each file or directory visited by Context.Walk(). type WalkFunc func(path string, fi FileInfo, err error) error // ModifiableContext represents a modifiable Context. // TODO: remove this interface once we can get rid of Remove() type ModifiableContext interface { Context // Remove deletes the entry specified by `path`. // It is usual for directory entries to delete all its subentries. Remove(path string) error } // FileInfo extends os.FileInfo to allow retrieving an absolute path to the file. // TODO: remove this interface once pkg/archive exposes a walk function that Context can use. type FileInfo interface { os.FileInfo Path() string } // PathFileInfo is a convenience struct that implements the FileInfo interface. type PathFileInfo struct { os.FileInfo // FilePath holds the absolute path to the file. FilePath string // Name holds the basename for the file. FileName string } // Path returns the absolute path to the file. func (fi PathFileInfo) Path() string { return fi.FilePath } // Name returns the basename of the file. func (fi PathFileInfo) Name() string { if fi.FileName != "" { return fi.FileName } return fi.FileInfo.Name() } // Hashed defines an extra method intended for implementations of os.FileInfo. type Hashed interface { // Hash returns the hash of a file. Hash() string SetHash(string) } // HashedFileInfo is a convenient struct that augments FileInfo with a field. type HashedFileInfo struct { FileInfo // FileHash represents the hash of a file. FileHash string } // Hash returns the hash of a file. func (fi HashedFileInfo) Hash() string { return fi.FileHash } // SetHash sets the hash of a file. func (fi *HashedFileInfo) SetHash(h string) { fi.FileHash = h } // Backend abstracts calls to a Docker Daemon. type Backend interface { // TODO: use digest reference instead of name // GetImage looks up a Docker image referenced by `name`. GetImage(name string) (Image, error) // Pull tells Docker to pull image referenced by `name`. Pull(name string) (Image, error) // ContainerAttach attaches to container. ContainerAttach(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error // ContainerCreate creates a new Docker container and returns potential warnings ContainerCreate(types.ContainerCreateConfig) (types.ContainerCreateResponse, error) // ContainerRm removes a container specified by `id`. ContainerRm(name string, config *types.ContainerRmConfig) error // Commit creates a new Docker image from an existing Docker container. Commit(string, *types.ContainerCommitConfig) (string, error) // Kill stops the container execution abruptly. ContainerKill(containerID string, sig uint64) error // Start starts a new container ContainerStart(containerID string, hostConfig *container.HostConfig) error // ContainerWait stops processing until the given container is stopped. ContainerWait(containerID string, timeout time.Duration) (int, error) // ContainerUpdateCmd updates container.Path and container.Args ContainerUpdateCmd(containerID string, cmd []string) error // ContainerCopy copies/extracts a source FileInfo to a destination path inside a container // specified by a container object. // TODO: make an Extract method instead of passing `decompress` // TODO: do not pass a FileInfo, instead refactor the archive package to export a Walk function that can be used // with Context.Walk //ContainerCopy(name string, res string) (io.ReadCloser, error) // TODO: use copyBackend api BuilderCopy(containerID string, destPath string, src FileInfo, decompress bool) error } // ImageCache abstracts an image cache store. // (parent image, child runconfig) -> child image type ImageCache interface { // GetCachedImage returns a reference to a cached image whose parent equals `parent` // and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. GetCachedImage(parentID string, cfg *container.Config) (imageID string, err error) } docker-1.10.3/builder/dockerfile/000077500000000000000000000000001267010174400166025ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/bflag.go000066400000000000000000000077001267010174400202100ustar00rootroot00000000000000package dockerfile import ( "fmt" "strings" ) // FlagType is the type of the build flag type FlagType int const ( boolType FlagType = iota stringType ) // BFlags contains all flags information for the builder type BFlags struct { Args []string // actual flags/args from cmd line flags map[string]*Flag used map[string]*Flag Err error } // Flag contains all information for a flag type Flag struct { bf *BFlags name string flagType FlagType Value string } // NewBFlags return the new BFlags struct func NewBFlags() *BFlags { return &BFlags{ flags: make(map[string]*Flag), used: make(map[string]*Flag), } } // AddBool adds a bool flag to BFlags // Note, any error will be generated when Parse() is called (see Parse). func (bf *BFlags) AddBool(name string, def bool) *Flag { flag := bf.addFlag(name, boolType) if flag == nil { return nil } if def { flag.Value = "true" } else { flag.Value = "false" } return flag } // AddString adds a string flag to BFlags // Note, any error will be generated when Parse() is called (see Parse). func (bf *BFlags) AddString(name string, def string) *Flag { flag := bf.addFlag(name, stringType) if flag == nil { return nil } flag.Value = def return flag } // addFlag is a generic func used by the other AddXXX() func // to add a new flag to the BFlags struct. // Note, any error will be generated when Parse() is called (see Parse). func (bf *BFlags) addFlag(name string, flagType FlagType) *Flag { if _, ok := bf.flags[name]; ok { bf.Err = fmt.Errorf("Duplicate flag defined: %s", name) return nil } newFlag := &Flag{ bf: bf, name: name, flagType: flagType, } bf.flags[name] = newFlag return newFlag } // IsUsed checks if the flag is used func (fl *Flag) IsUsed() bool { if _, ok := fl.bf.used[fl.name]; ok { return true } return false } // IsTrue checks if a bool flag is true func (fl *Flag) IsTrue() bool { if fl.flagType != boolType { // Should never get here panic(fmt.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name)) } return fl.Value == "true" } // Parse parses and checks if the BFlags is valid. // Any error noticed during the AddXXX() funcs will be generated/returned // here. We do this because an error during AddXXX() is more like a // compile time error so it doesn't matter too much when we stop our // processing as long as we do stop it, so this allows the code // around AddXXX() to be just: // defFlag := AddString("description", "") // w/o needing to add an if-statement around each one. func (bf *BFlags) Parse() error { // If there was an error while defining the possible flags // go ahead and bubble it back up here since we didn't do it // earlier in the processing if bf.Err != nil { return fmt.Errorf("Error setting up flags: %s", bf.Err) } for _, arg := range bf.Args { if !strings.HasPrefix(arg, "--") { return fmt.Errorf("Arg should start with -- : %s", arg) } if arg == "--" { return nil } arg = arg[2:] value := "" index := strings.Index(arg, "=") if index >= 0 { value = arg[index+1:] arg = arg[:index] } flag, ok := bf.flags[arg] if !ok { return fmt.Errorf("Unknown flag: %s", arg) } if _, ok = bf.used[arg]; ok { return fmt.Errorf("Duplicate flag specified: %s", arg) } bf.used[arg] = flag switch flag.flagType { case boolType: // value == "" is only ok if no "=" was specified if index >= 0 && value == "" { return fmt.Errorf("Missing a value on flag: %s", arg) } lower := strings.ToLower(value) if lower == "" { flag.Value = "true" } else if lower == "true" || lower == "false" { flag.Value = lower } else { return fmt.Errorf("Expecting boolean value for flag %s, not: %s", arg, value) } case stringType: if index < 0 { return fmt.Errorf("Missing a value on flag: %s", arg) } flag.Value = value default: panic(fmt.Errorf("No idea what kind of flag we have! Should never get here!")) } } return nil } docker-1.10.3/builder/dockerfile/bflag_test.go000066400000000000000000000073051267010174400212500ustar00rootroot00000000000000package dockerfile import ( "testing" ) func TestBuilderFlags(t *testing.T) { var expected string var err error // --- bf := NewBFlags() bf.Args = []string{} if err := bf.Parse(); err != nil { t.Fatalf("Test1 of %q was supposed to work: %s", bf.Args, err) } // --- bf = NewBFlags() bf.Args = []string{"--"} if err := bf.Parse(); err != nil { t.Fatalf("Test2 of %q was supposed to work: %s", bf.Args, err) } // --- bf = NewBFlags() flStr1 := bf.AddString("str1", "") flBool1 := bf.AddBool("bool1", false) bf.Args = []string{} if err = bf.Parse(); err != nil { t.Fatalf("Test3 of %q was supposed to work: %s", bf.Args, err) } if flStr1.IsUsed() == true { t.Fatalf("Test3 - str1 was not used!") } if flBool1.IsUsed() == true { t.Fatalf("Test3 - bool1 was not used!") } // --- bf = NewBFlags() flStr1 = bf.AddString("str1", "HI") flBool1 = bf.AddBool("bool1", false) bf.Args = []string{} if err = bf.Parse(); err != nil { t.Fatalf("Test4 of %q was supposed to work: %s", bf.Args, err) } if flStr1.Value != "HI" { t.Fatalf("Str1 was supposed to default to: HI") } if flBool1.IsTrue() { t.Fatalf("Bool1 was supposed to default to: false") } if flStr1.IsUsed() == true { t.Fatalf("Str1 was not used!") } if flBool1.IsUsed() == true { t.Fatalf("Bool1 was not used!") } // --- bf = NewBFlags() flStr1 = bf.AddString("str1", "HI") bf.Args = []string{"--str1"} if err = bf.Parse(); err == nil { t.Fatalf("Test %q was supposed to fail", bf.Args) } // --- bf = NewBFlags() flStr1 = bf.AddString("str1", "HI") bf.Args = []string{"--str1="} if err = bf.Parse(); err != nil { t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) } expected = "" if flStr1.Value != expected { t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected) } // --- bf = NewBFlags() flStr1 = bf.AddString("str1", "HI") bf.Args = []string{"--str1=BYE"} if err = bf.Parse(); err != nil { t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) } expected = "BYE" if flStr1.Value != expected { t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected) } // --- bf = NewBFlags() flBool1 = bf.AddBool("bool1", false) bf.Args = []string{"--bool1"} if err = bf.Parse(); err != nil { t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) } if !flBool1.IsTrue() { t.Fatalf("Test-b1 Bool1 was supposed to be true") } // --- bf = NewBFlags() flBool1 = bf.AddBool("bool1", false) bf.Args = []string{"--bool1=true"} if err = bf.Parse(); err != nil { t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) } if !flBool1.IsTrue() { t.Fatalf("Test-b2 Bool1 was supposed to be true") } // --- bf = NewBFlags() flBool1 = bf.AddBool("bool1", false) bf.Args = []string{"--bool1=false"} if err = bf.Parse(); err != nil { t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) } if flBool1.IsTrue() { t.Fatalf("Test-b3 Bool1 was supposed to be false") } // --- bf = NewBFlags() flBool1 = bf.AddBool("bool1", false) bf.Args = []string{"--bool1=false1"} if err = bf.Parse(); err == nil { t.Fatalf("Test %q was supposed to fail", bf.Args) } // --- bf = NewBFlags() flBool1 = bf.AddBool("bool1", false) bf.Args = []string{"--bool2"} if err = bf.Parse(); err == nil { t.Fatalf("Test %q was supposed to fail", bf.Args) } // --- bf = NewBFlags() flStr1 = bf.AddString("str1", "HI") flBool1 = bf.AddBool("bool1", false) bf.Args = []string{"--bool1", "--str1=BYE"} if err = bf.Parse(); err != nil { t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) } if flStr1.Value != "BYE" { t.Fatalf("Teset %s, str1 should be BYE", bf.Args) } if !flBool1.IsTrue() { t.Fatalf("Teset %s, bool1 should be true", bf.Args) } } docker-1.10.3/builder/dockerfile/builder.go000066400000000000000000000131311267010174400205560ustar00rootroot00000000000000package dockerfile import ( "bytes" "fmt" "io" "io/ioutil" "os" "strings" "sync" "github.com/Sirupsen/logrus" "github.com/docker/docker/builder" "github.com/docker/docker/builder/dockerfile/parser" "github.com/docker/docker/pkg/stringid" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/container" ) var validCommitCommands = map[string]bool{ "cmd": true, "entrypoint": true, "env": true, "expose": true, "label": true, "onbuild": true, "user": true, "volume": true, "workdir": true, } // BuiltinAllowedBuildArgs is list of built-in allowed build args var BuiltinAllowedBuildArgs = map[string]bool{ "HTTP_PROXY": true, "http_proxy": true, "HTTPS_PROXY": true, "https_proxy": true, "FTP_PROXY": true, "ftp_proxy": true, "NO_PROXY": true, "no_proxy": true, } // Builder is a Dockerfile builder // It implements the builder.Backend interface. type Builder struct { options *types.ImageBuildOptions Stdout io.Writer Stderr io.Writer docker builder.Backend context builder.Context dockerfile *parser.Node runConfig *container.Config // runconfig for cmd, run, entrypoint etc. flags *BFlags tmpContainers map[string]struct{} image string // imageID noBaseImage bool maintainer string cmdSet bool disableCommit bool cacheBusted bool cancelled chan struct{} cancelOnce sync.Once allowedBuildArgs map[string]bool // list of build-time args that are allowed for expansion/substitution and passing to commands in 'run'. // TODO: remove once docker.Commit can receive a tag id string } // NewBuilder creates a new Dockerfile builder from an optional dockerfile and a Config. // If dockerfile is nil, the Dockerfile specified by Config.DockerfileName, // will be read from the Context passed to Build(). func NewBuilder(config *types.ImageBuildOptions, backend builder.Backend, context builder.Context, dockerfile io.ReadCloser) (b *Builder, err error) { if config == nil { config = new(types.ImageBuildOptions) } if config.BuildArgs == nil { config.BuildArgs = make(map[string]string) } b = &Builder{ options: config, Stdout: os.Stdout, Stderr: os.Stderr, docker: backend, context: context, runConfig: new(container.Config), tmpContainers: map[string]struct{}{}, cancelled: make(chan struct{}), id: stringid.GenerateNonCryptoID(), allowedBuildArgs: make(map[string]bool), } if dockerfile != nil { b.dockerfile, err = parser.Parse(dockerfile) if err != nil { return nil, err } } return b, nil } // Build runs the Dockerfile builder from a context and a docker object that allows to make calls // to Docker. // // This will (barring errors): // // * read the dockerfile from context // * parse the dockerfile if not already parsed // * walk the AST and execute it by dispatching to handlers. If Remove // or ForceRemove is set, additional cleanup around containers happens after // processing. // * Print a happy message and return the image ID. // * NOT tag the image, that is responsibility of the caller. // func (b *Builder) Build() (string, error) { // If Dockerfile was not parsed yet, extract it from the Context if b.dockerfile == nil { if err := b.readDockerfile(); err != nil { return "", err } } var shortImgID string for i, n := range b.dockerfile.Children { select { case <-b.cancelled: logrus.Debug("Builder: build cancelled!") fmt.Fprintf(b.Stdout, "Build cancelled") return "", fmt.Errorf("Build cancelled") default: // Not cancelled yet, keep going... } if err := b.dispatch(i, n); err != nil { if b.options.ForceRemove { b.clearTmp() } return "", err } shortImgID = stringid.TruncateID(b.image) fmt.Fprintf(b.Stdout, " ---> %s\n", shortImgID) if b.options.Remove { b.clearTmp() } } // check if there are any leftover build-args that were passed but not // consumed during build. Return an error, if there are any. leftoverArgs := []string{} for arg := range b.options.BuildArgs { if !b.isBuildArgAllowed(arg) { leftoverArgs = append(leftoverArgs, arg) } } if len(leftoverArgs) > 0 { return "", fmt.Errorf("One or more build-args %v were not consumed, failing build.", leftoverArgs) } if b.image == "" { return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?") } fmt.Fprintf(b.Stdout, "Successfully built %s\n", shortImgID) return b.image, nil } // Cancel cancels an ongoing Dockerfile build. func (b *Builder) Cancel() { b.cancelOnce.Do(func() { close(b.cancelled) }) } // BuildFromConfig will do build directly from parameter 'changes', which comes // from Dockerfile entries, it will: // - call parse.Parse() to get AST root from Dockerfile entries // - do build by calling builder.dispatch() to call all entries' handling routines // TODO: remove? func BuildFromConfig(config *container.Config, changes []string) (*container.Config, error) { ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n"))) if err != nil { return nil, err } // ensure that the commands are valid for _, n := range ast.Children { if !validCommitCommands[n.Value] { return nil, fmt.Errorf("%s is not a valid change command", n.Value) } } b, err := NewBuilder(nil, nil, nil, nil) if err != nil { return nil, err } b.runConfig = config b.Stdout = ioutil.Discard b.Stderr = ioutil.Discard b.disableCommit = true for i, n := range ast.Children { if err := b.dispatch(i, n); err != nil { return nil, err } } return b.runConfig, nil } docker-1.10.3/builder/dockerfile/command/000077500000000000000000000000001267010174400202205ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/command/command.go000066400000000000000000000015211267010174400221640ustar00rootroot00000000000000// Package command contains the set of Dockerfile commands. package command // Define constants for the command strings const ( Env = "env" Label = "label" Maintainer = "maintainer" Add = "add" Copy = "copy" From = "from" Onbuild = "onbuild" Workdir = "workdir" Run = "run" Cmd = "cmd" Entrypoint = "entrypoint" Expose = "expose" Volume = "volume" User = "user" StopSignal = "stopsignal" Arg = "arg" ) // Commands is list of all Dockerfile commands var Commands = map[string]struct{}{ Env: {}, Label: {}, Maintainer: {}, Add: {}, Copy: {}, From: {}, Onbuild: {}, Workdir: {}, Run: {}, Cmd: {}, Entrypoint: {}, Expose: {}, Volume: {}, User: {}, StopSignal: {}, Arg: {}, } docker-1.10.3/builder/dockerfile/dispatchers.go000066400000000000000000000426051267010174400214510ustar00rootroot00000000000000package dockerfile // This file contains the dispatchers for each command. Note that // `nullDispatch` is not actually a command, but support for commands we parse // but do nothing with. // // See evaluator.go for a higher level discussion of the whole evaluator // package. import ( "fmt" "os" "path/filepath" "regexp" "runtime" "sort" "strings" "github.com/Sirupsen/logrus" "github.com/docker/docker/api" "github.com/docker/docker/builder" derr "github.com/docker/docker/errors" "github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/system" runconfigopts "github.com/docker/docker/runconfig/opts" "github.com/docker/engine-api/types/container" "github.com/docker/engine-api/types/strslice" "github.com/docker/go-connections/nat" ) // dispatch with no layer / parsing. This is effectively not a command. func nullDispatch(b *Builder, args []string, attributes map[string]bool, original string) error { return nil } // ENV foo bar // // Sets the environment variable foo to bar, also makes interpolation // in the dockerfile available from the next statement on via ${foo}. // func env(b *Builder, args []string, attributes map[string]bool, original string) error { if len(args) == 0 { return derr.ErrorCodeAtLeastOneArg.WithArgs("ENV") } if len(args)%2 != 0 { // should never get here, but just in case return derr.ErrorCodeTooManyArgs.WithArgs("ENV") } if err := b.flags.Parse(); err != nil { return err } // TODO/FIXME/NOT USED // Just here to show how to use the builder flags stuff within the // context of a builder command. Will remove once we actually add // a builder command to something! /* flBool1 := b.flags.AddBool("bool1", false) flStr1 := b.flags.AddString("str1", "HI") if err := b.flags.Parse(); err != nil { return err } fmt.Printf("Bool1:%v\n", flBool1) fmt.Printf("Str1:%v\n", flStr1) */ commitStr := "ENV" for j := 0; j < len(args); j++ { // name ==> args[j] // value ==> args[j+1] newVar := args[j] + "=" + args[j+1] + "" commitStr += " " + newVar gotOne := false for i, envVar := range b.runConfig.Env { envParts := strings.SplitN(envVar, "=", 2) if envParts[0] == args[j] { b.runConfig.Env[i] = newVar gotOne = true break } } if !gotOne { b.runConfig.Env = append(b.runConfig.Env, newVar) } j++ } return b.commit("", b.runConfig.Cmd, commitStr) } // MAINTAINER some text // // Sets the maintainer metadata. func maintainer(b *Builder, args []string, attributes map[string]bool, original string) error { if len(args) != 1 { return derr.ErrorCodeExactlyOneArg.WithArgs("MAINTAINER") } if err := b.flags.Parse(); err != nil { return err } b.maintainer = args[0] return b.commit("", b.runConfig.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer)) } // LABEL some json data describing the image // // Sets the Label variable foo to bar, // func label(b *Builder, args []string, attributes map[string]bool, original string) error { if len(args) == 0 { return derr.ErrorCodeAtLeastOneArg.WithArgs("LABEL") } if len(args)%2 != 0 { // should never get here, but just in case return derr.ErrorCodeTooManyArgs.WithArgs("LABEL") } if err := b.flags.Parse(); err != nil { return err } commitStr := "LABEL" if b.runConfig.Labels == nil { b.runConfig.Labels = map[string]string{} } for j := 0; j < len(args); j++ { // name ==> args[j] // value ==> args[j+1] newVar := args[j] + "=" + args[j+1] + "" commitStr += " " + newVar b.runConfig.Labels[args[j]] = args[j+1] j++ } return b.commit("", b.runConfig.Cmd, commitStr) } // ADD foo /path // // Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling // exist here. If you do not wish to have this automatic handling, use COPY. // func add(b *Builder, args []string, attributes map[string]bool, original string) error { if len(args) < 2 { return derr.ErrorCodeAtLeastTwoArgs.WithArgs("ADD") } if err := b.flags.Parse(); err != nil { return err } return b.runContextCommand(args, true, true, "ADD") } // COPY foo /path // // Same as 'ADD' but without the tar and remote url handling. // func dispatchCopy(b *Builder, args []string, attributes map[string]bool, original string) error { if len(args) < 2 { return derr.ErrorCodeAtLeastTwoArgs.WithArgs("COPY") } if err := b.flags.Parse(); err != nil { return err } return b.runContextCommand(args, false, false, "COPY") } // FROM imagename // // This sets the image the dockerfile will build on top of. // func from(b *Builder, args []string, attributes map[string]bool, original string) error { if len(args) != 1 { return derr.ErrorCodeExactlyOneArg.WithArgs("FROM") } if err := b.flags.Parse(); err != nil { return err } name := args[0] var ( image builder.Image err error ) // Windows cannot support a container with no base image. if name == api.NoBaseImageSpecifier { if runtime.GOOS == "windows" { return fmt.Errorf("Windows does not support FROM scratch") } b.image = "" b.noBaseImage = true } else { // TODO: don't use `name`, instead resolve it to a digest if !b.options.PullParent { image, err = b.docker.GetImage(name) // TODO: shouldn't we error out if error is different from "not found" ? } if image == nil { image, err = b.docker.Pull(name) if err != nil { return err } } } return b.processImageFrom(image) } // ONBUILD RUN echo yo // // ONBUILD triggers run when the image is used in a FROM statement. // // ONBUILD handling has a lot of special-case functionality, the heading in // evaluator.go and comments around dispatch() in the same file explain the // special cases. search for 'OnBuild' in internals.go for additional special // cases. // func onbuild(b *Builder, args []string, attributes map[string]bool, original string) error { if len(args) == 0 { return derr.ErrorCodeAtLeastOneArg.WithArgs("ONBUILD") } if err := b.flags.Parse(); err != nil { return err } triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0])) switch triggerInstruction { case "ONBUILD": return derr.ErrorCodeChainOnBuild case "MAINTAINER", "FROM": return derr.ErrorCodeBadOnBuildCmd.WithArgs(triggerInstruction) } original = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(original, "") b.runConfig.OnBuild = append(b.runConfig.OnBuild, original) return b.commit("", b.runConfig.Cmd, fmt.Sprintf("ONBUILD %s", original)) } // WORKDIR /tmp // // Set the working directory for future RUN/CMD/etc statements. // func workdir(b *Builder, args []string, attributes map[string]bool, original string) error { if len(args) != 1 { return derr.ErrorCodeExactlyOneArg.WithArgs("WORKDIR") } if err := b.flags.Parse(); err != nil { return err } // This is from the Dockerfile and will not necessarily be in platform // specific semantics, hence ensure it is converted. workdir := filepath.FromSlash(args[0]) if !system.IsAbs(workdir) { current := filepath.FromSlash(b.runConfig.WorkingDir) workdir = filepath.Join(string(os.PathSeparator), current, workdir) } b.runConfig.WorkingDir = workdir return b.commit("", b.runConfig.Cmd, fmt.Sprintf("WORKDIR %v", workdir)) } // RUN some command yo // // run a command and commit the image. Args are automatically prepended with // 'sh -c' under linux or 'cmd /S /C' under Windows, in the event there is // only one argument. The difference in processing: // // RUN echo hi # sh -c echo hi (Linux) // RUN echo hi # cmd /S /C echo hi (Windows) // RUN [ "echo", "hi" ] # echo hi // func run(b *Builder, args []string, attributes map[string]bool, original string) error { if b.image == "" && !b.noBaseImage { return derr.ErrorCodeMissingFrom } if err := b.flags.Parse(); err != nil { return err } args = handleJSONArgs(args, attributes) if !attributes["json"] { if runtime.GOOS != "windows" { args = append([]string{"/bin/sh", "-c"}, args...) } else { args = append([]string{"cmd", "/S", "/C"}, args...) } } config := &container.Config{ Cmd: strslice.New(args...), Image: b.image, } // stash the cmd cmd := b.runConfig.Cmd if b.runConfig.Entrypoint.Len() == 0 && b.runConfig.Cmd.Len() == 0 { b.runConfig.Cmd = config.Cmd } // stash the config environment env := b.runConfig.Env defer func(cmd *strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) defer func(env []string) { b.runConfig.Env = env }(env) // derive the net build-time environment for this run. We let config // environment override the build time environment. // This means that we take the b.buildArgs list of env vars and remove // any of those variables that are defined as part of the container. In other // words, anything in b.Config.Env. What's left is the list of build-time env // vars that we need to add to each RUN command - note the list could be empty. // // We don't persist the build time environment with container's config // environment, but just sort and prepend it to the command string at time // of commit. // This helps with tracing back the image's actual environment at the time // of RUN, without leaking it to the final image. It also aids cache // lookup for same image built with same build time environment. cmdBuildEnv := []string{} configEnv := runconfigopts.ConvertKVStringsToMap(b.runConfig.Env) for key, val := range b.options.BuildArgs { if !b.isBuildArgAllowed(key) { // skip build-args that are not in allowed list, meaning they have // not been defined by an "ARG" Dockerfile command yet. // This is an error condition but only if there is no "ARG" in the entire // Dockerfile, so we'll generate any necessary errors after we parsed // the entire file (see 'leftoverArgs' processing in evaluator.go ) continue } if _, ok := configEnv[key]; !ok { cmdBuildEnv = append(cmdBuildEnv, fmt.Sprintf("%s=%s", key, val)) } } // derive the command to use for probeCache() and to commit in this container. // Note that we only do this if there are any build-time env vars. Also, we // use the special argument "|#" at the start of the args array. This will // avoid conflicts with any RUN command since commands can not // start with | (vertical bar). The "#" (number of build envs) is there to // help ensure proper cache matches. We don't want a RUN command // that starts with "foo=abc" to be considered part of a build-time env var. saveCmd := config.Cmd if len(cmdBuildEnv) > 0 { sort.Strings(cmdBuildEnv) tmpEnv := append([]string{fmt.Sprintf("|%d", len(cmdBuildEnv))}, cmdBuildEnv...) saveCmd = strslice.New(append(tmpEnv, saveCmd.Slice()...)...) } b.runConfig.Cmd = saveCmd hit, err := b.probeCache() if err != nil { return err } if hit { return nil } // set Cmd manually, this is special case only for Dockerfiles b.runConfig.Cmd = config.Cmd // set build-time environment for 'run'. b.runConfig.Env = append(b.runConfig.Env, cmdBuildEnv...) // set config as already being escaped, this prevents double escaping on windows b.runConfig.ArgsEscaped = true logrus.Debugf("[BUILDER] Command to be executed: %v", b.runConfig.Cmd) cID, err := b.create() if err != nil { return err } if err := b.run(cID); err != nil { return err } // revert to original config environment and set the command string to // have the build-time env vars in it (if any) so that future cache look-ups // properly match it. b.runConfig.Env = env b.runConfig.Cmd = saveCmd return b.commit(cID, cmd, "run") } // CMD foo // // Set the default command to run in the container (which may be empty). // Argument handling is the same as RUN. // func cmd(b *Builder, args []string, attributes map[string]bool, original string) error { if err := b.flags.Parse(); err != nil { return err } cmdSlice := handleJSONArgs(args, attributes) if !attributes["json"] { if runtime.GOOS != "windows" { cmdSlice = append([]string{"/bin/sh", "-c"}, cmdSlice...) } else { cmdSlice = append([]string{"cmd", "/S", "/C"}, cmdSlice...) } } b.runConfig.Cmd = strslice.New(cmdSlice...) if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("CMD %q", cmdSlice)); err != nil { return err } if len(args) != 0 { b.cmdSet = true } return nil } // ENTRYPOINT /usr/sbin/nginx // // Set the entrypoint (which defaults to sh -c on linux, or cmd /S /C on Windows) to // /usr/sbin/nginx. Will accept the CMD as the arguments to /usr/sbin/nginx. // // Handles command processing similar to CMD and RUN, only b.runConfig.Entrypoint // is initialized at NewBuilder time instead of through argument parsing. // func entrypoint(b *Builder, args []string, attributes map[string]bool, original string) error { if err := b.flags.Parse(); err != nil { return err } parsed := handleJSONArgs(args, attributes) switch { case attributes["json"]: // ENTRYPOINT ["echo", "hi"] b.runConfig.Entrypoint = strslice.New(parsed...) case len(parsed) == 0: // ENTRYPOINT [] b.runConfig.Entrypoint = nil default: // ENTRYPOINT echo hi if runtime.GOOS != "windows" { b.runConfig.Entrypoint = strslice.New("/bin/sh", "-c", parsed[0]) } else { b.runConfig.Entrypoint = strslice.New("cmd", "/S", "/C", parsed[0]) } } // when setting the entrypoint if a CMD was not explicitly set then // set the command to nil if !b.cmdSet { b.runConfig.Cmd = nil } if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("ENTRYPOINT %q", b.runConfig.Entrypoint)); err != nil { return err } return nil } // EXPOSE 6667/tcp 7000/tcp // // Expose ports for links and port mappings. This all ends up in // b.runConfig.ExposedPorts for runconfig. // func expose(b *Builder, args []string, attributes map[string]bool, original string) error { portsTab := args if len(args) == 0 { return derr.ErrorCodeAtLeastOneArg.WithArgs("EXPOSE") } if err := b.flags.Parse(); err != nil { return err } if b.runConfig.ExposedPorts == nil { b.runConfig.ExposedPorts = make(nat.PortSet) } ports, _, err := nat.ParsePortSpecs(portsTab) if err != nil { return err } // instead of using ports directly, we build a list of ports and sort it so // the order is consistent. This prevents cache burst where map ordering // changes between builds portList := make([]string, len(ports)) var i int for port := range ports { if _, exists := b.runConfig.ExposedPorts[port]; !exists { b.runConfig.ExposedPorts[port] = struct{}{} } portList[i] = string(port) i++ } sort.Strings(portList) return b.commit("", b.runConfig.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " "))) } // USER foo // // Set the user to 'foo' for future commands and when running the // ENTRYPOINT/CMD at container run time. // func user(b *Builder, args []string, attributes map[string]bool, original string) error { if len(args) != 1 { return derr.ErrorCodeExactlyOneArg.WithArgs("USER") } if err := b.flags.Parse(); err != nil { return err } b.runConfig.User = args[0] return b.commit("", b.runConfig.Cmd, fmt.Sprintf("USER %v", args)) } // VOLUME /foo // // Expose the volume /foo for use. Will also accept the JSON array form. // func volume(b *Builder, args []string, attributes map[string]bool, original string) error { if len(args) == 0 { return derr.ErrorCodeAtLeastOneArg.WithArgs("VOLUME") } if err := b.flags.Parse(); err != nil { return err } if b.runConfig.Volumes == nil { b.runConfig.Volumes = map[string]struct{}{} } for _, v := range args { v = strings.TrimSpace(v) if v == "" { return derr.ErrorCodeVolumeEmpty } b.runConfig.Volumes[v] = struct{}{} } if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil { return err } return nil } // STOPSIGNAL signal // // Set the signal that will be used to kill the container. func stopSignal(b *Builder, args []string, attributes map[string]bool, original string) error { if len(args) != 1 { return fmt.Errorf("STOPSIGNAL requires exactly one argument") } sig := args[0] _, err := signal.ParseSignal(sig) if err != nil { return err } b.runConfig.StopSignal = sig return b.commit("", b.runConfig.Cmd, fmt.Sprintf("STOPSIGNAL %v", args)) } // ARG name[=value] // // Adds the variable foo to the trusted list of variables that can be passed // to builder using the --build-arg flag for expansion/subsitution or passing to 'run'. // Dockerfile author may optionally set a default value of this variable. func arg(b *Builder, args []string, attributes map[string]bool, original string) error { if len(args) != 1 { return fmt.Errorf("ARG requires exactly one argument definition") } var ( name string value string hasDefault bool ) arg := args[0] // 'arg' can just be a name or name-value pair. Note that this is different // from 'env' that handles the split of name and value at the parser level. // The reason for doing it differently for 'arg' is that we support just // defining an arg and not assign it a value (while 'env' always expects a // name-value pair). If possible, it will be good to harmonize the two. if strings.Contains(arg, "=") { parts := strings.SplitN(arg, "=", 2) name = parts[0] value = parts[1] hasDefault = true } else { name = arg hasDefault = false } // add the arg to allowed list of build-time args from this step on. b.allowedBuildArgs[name] = true // If there is a default value associated with this arg then add it to the // b.buildArgs if one is not already passed to the builder. The args passed // to builder override the default value of 'arg'. if _, ok := b.options.BuildArgs[name]; !ok && hasDefault { b.options.BuildArgs[name] = value } return b.commit("", b.runConfig.Cmd, fmt.Sprintf("ARG %s", arg)) } docker-1.10.3/builder/dockerfile/envVarTest000066400000000000000000000112721267010174400206310ustar00rootroot00000000000000hello | hello he'll'o | hello he'llo | hello he\'llo | he'llo he\\'llo | he\llo abc\tdef | abctdef "abc\tdef" | abc\tdef 'abc\tdef' | abc\tdef hello\ | hello hello\\ | hello\ "hello | hello "hello\" | hello" "hel'lo" | hel'lo 'hello | hello 'hello\' | hello\ "''" | '' $. | $. $1 | he$1x | hex he$.x | he$.x he$pwd. | he. he$PWD | he/home he\$PWD | he$PWD he\\$PWD | he\/home he\${} | he${} he\${}xx | he${}xx he${} | he he${}xx | hexx he${hi} | he he${hi}xx | hexx he${PWD} | he/home he${.} | error he${XXX:-000}xx | he000xx he${PWD:-000}xx | he/homexx he${XXX:-$PWD}xx | he/homexx he${XXX:-${PWD:-yyy}}xx | he/homexx he${XXX:-${YYY:-yyy}}xx | heyyyxx he${XXX:YYY} | error he${XXX:+${PWD}}xx | hexx he${PWD:+${XXX}}xx | hexx he${PWD:+${SHELL}}xx | hebashxx he${XXX:+000}xx | hexx he${PWD:+000}xx | he000xx 'he${XX}' | he${XX} "he${PWD}" | he/home "he'$PWD'" | he'/home' "$PWD" | /home '$PWD' | $PWD '\$PWD' | \$PWD '"hello"' | "hello" he\$PWD | he$PWD "he\$PWD" | he$PWD 'he\$PWD' | he\$PWD he${PWD | error he${PWD:=000}xx | error he${PWD:+${PWD}:}xx | he/home:xx he${XXX:-\$PWD:}xx | he$PWD:xx he${XXX:-\${PWD}z}xx | he${PWDz}xx 안녕하세요 | 안녕하세요 안'녕'하세요 | 안녕하세요 안'녕하세요 | 안녕하세요 안녕\'하세요 | 안녕'하세요 안\\'녕하세요 | 안\녕하세요 안녕\t하세요 | 안녕t하세요 "안녕\t하세요" | 안녕\t하세요 '안녕\t하세요 | 안녕\t하세요 안녕하세요\ | 안녕하세요 안녕하세요\\ | 안녕하세요\ "안녕하세요 | 안녕하세요 "안녕하세요\" | 안녕하세요" "안녕'하세요" | 안녕'하세요 '안녕하세요 | 안녕하세요 '안녕하세요\' | 안녕하세요\ 안녕$1x | 안녕x 안녕$.x | 안녕$.x 안녕$pwd. | 안녕. 안녕$PWD | 안녕/home 안녕\$PWD | 안녕$PWD 안녕\\$PWD | 안녕\/home 안녕\${} | 안녕${} 안녕\${}xx | 안녕${}xx 안녕${} | 안녕 안녕${}xx | 안녕xx 안녕${hi} | 안녕 안녕${hi}xx | 안녕xx 안녕${PWD} | 안녕/home 안녕${.} | error 안녕${XXX:-000}xx | 안녕000xx 안녕${PWD:-000}xx | 안녕/homexx 안녕${XXX:-$PWD}xx | 안녕/homexx 안녕${XXX:-${PWD:-yyy}}xx | 안녕/homexx 안녕${XXX:-${YYY:-yyy}}xx | 안녕yyyxx 안녕${XXX:YYY} | error 안녕${XXX:+${PWD}}xx | 안녕xx 안녕${PWD:+${XXX}}xx | 안녕xx 안녕${PWD:+${SHELL}}xx | 안녕bashxx 안녕${XXX:+000}xx | 안녕xx 안녕${PWD:+000}xx | 안녕000xx '안녕${XX}' | 안녕${XX} "안녕${PWD}" | 안녕/home "안녕'$PWD'" | 안녕'/home' '"안녕"' | "안녕" 안녕\$PWD | 안녕$PWD "안녕\$PWD" | 안녕$PWD '안녕\$PWD' | 안녕\$PWD 안녕${PWD | error 안녕${PWD:=000}xx | error 안녕${PWD:+${PWD}:}xx | 안녕/home:xx 안녕${XXX:-\$PWD:}xx | 안녕$PWD:xx 안녕${XXX:-\${PWD}z}xx | 안녕${PWDz}xx $KOREAN | 한국어 안녕$KOREAN | 안녕한국어 docker-1.10.3/builder/dockerfile/evaluator.go000066400000000000000000000161271267010174400211420ustar00rootroot00000000000000// Package dockerfile is the evaluation step in the Dockerfile parse/evaluate pipeline. // // It incorporates a dispatch table based on the parser.Node values (see the // parser package for more information) that are yielded from the parser itself. // Calling NewBuilder with the BuildOpts struct can be used to customize the // experience for execution purposes only. Parsing is controlled in the parser // package, and this division of responsibility should be respected. // // Please see the jump table targets for the actual invocations, most of which // will call out to the functions in internals.go to deal with their tasks. // // ONBUILD is a special case, which is covered in the onbuild() func in // dispatchers.go. // // The evaluator uses the concept of "steps", which are usually each processable // line in the Dockerfile. Each step is numbered and certain actions are taken // before and after each step, such as creating an image ID and removing temporary // containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which // includes its own set of steps (usually only one of them). package dockerfile import ( "fmt" "runtime" "strings" "github.com/docker/docker/builder/dockerfile/command" "github.com/docker/docker/builder/dockerfile/parser" ) // Environment variable interpolation will happen on these statements only. var replaceEnvAllowed = map[string]bool{ command.Env: true, command.Label: true, command.Add: true, command.Copy: true, command.Workdir: true, command.Expose: true, command.Volume: true, command.User: true, command.StopSignal: true, command.Arg: true, } // Certain commands are allowed to have their args split into more // words after env var replacements. Meaning: // ENV foo="123 456" // EXPOSE $foo // should result in the same thing as: // EXPOSE 123 456 // and not treat "123 456" as a single word. // Note that: EXPOSE "$foo" and EXPOSE $foo are not the same thing. // Quotes will cause it to still be treated as single word. var allowWordExpansion = map[string]bool{ command.Expose: true, } var evaluateTable map[string]func(*Builder, []string, map[string]bool, string) error func init() { evaluateTable = map[string]func(*Builder, []string, map[string]bool, string) error{ command.Env: env, command.Label: label, command.Maintainer: maintainer, command.Add: add, command.Copy: dispatchCopy, // copy() is a go builtin command.From: from, command.Onbuild: onbuild, command.Workdir: workdir, command.Run: run, command.Cmd: cmd, command.Entrypoint: entrypoint, command.Expose: expose, command.Volume: volume, command.User: user, command.StopSignal: stopSignal, command.Arg: arg, } } // This method is the entrypoint to all statement handling routines. // // Almost all nodes will have this structure: // Child[Node, Node, Node] where Child is from parser.Node.Children and each // node comes from parser.Node.Next. This forms a "line" with a statement and // arguments and we process them in this normalized form by hitting // evaluateTable with the leaf nodes of the command and the Builder object. // // ONBUILD is a special case; in this case the parser will emit: // Child[Node, Child[Node, Node...]] where the first node is the literal // "onbuild" and the child entrypoint is the command of the ONBUILD statement, // such as `RUN` in ONBUILD RUN foo. There is special case logic in here to // deal with that, at least until it becomes more of a general concern with new // features. func (b *Builder) dispatch(stepN int, ast *parser.Node) error { cmd := ast.Value upperCasedCmd := strings.ToUpper(cmd) // To ensure the user is given a decent error message if the platform // on which the daemon is running does not support a builder command. if err := platformSupports(strings.ToLower(cmd)); err != nil { return err } attrs := ast.Attributes original := ast.Original flags := ast.Flags strList := []string{} msg := fmt.Sprintf("Step %d : %s", stepN+1, upperCasedCmd) if len(ast.Flags) > 0 { msg += " " + strings.Join(ast.Flags, " ") } if cmd == "onbuild" { if ast.Next == nil { return fmt.Errorf("ONBUILD requires at least one argument") } ast = ast.Next.Children[0] strList = append(strList, ast.Value) msg += " " + ast.Value if len(ast.Flags) > 0 { msg += " " + strings.Join(ast.Flags, " ") } } // count the number of nodes that we are going to traverse first // so we can pre-create the argument and message array. This speeds up the // allocation of those list a lot when they have a lot of arguments cursor := ast var n int for cursor.Next != nil { cursor = cursor.Next n++ } msgList := make([]string, n) var i int // Append the build-time args to config-environment. // This allows builder config to override the variables, making the behavior similar to // a shell script i.e. `ENV foo bar` overrides value of `foo` passed in build // context. But `ENV foo $foo` will use the value from build context if one // isn't already been defined by a previous ENV primitive. // Note, we get this behavior because we know that ProcessWord() will // stop on the first occurrence of a variable name and not notice // a subsequent one. So, putting the buildArgs list after the Config.Env // list, in 'envs', is safe. envs := b.runConfig.Env for key, val := range b.options.BuildArgs { if !b.isBuildArgAllowed(key) { // skip build-args that are not in allowed list, meaning they have // not been defined by an "ARG" Dockerfile command yet. // This is an error condition but only if there is no "ARG" in the entire // Dockerfile, so we'll generate any necessary errors after we parsed // the entire file (see 'leftoverArgs' processing in evaluator.go ) continue } envs = append(envs, fmt.Sprintf("%s=%s", key, val)) } for ast.Next != nil { ast = ast.Next var str string str = ast.Value if replaceEnvAllowed[cmd] { var err error var words []string if allowWordExpansion[cmd] { words, err = ProcessWords(str, envs) if err != nil { return err } strList = append(strList, words...) } else { str, err = ProcessWord(str, envs) if err != nil { return err } strList = append(strList, str) } } else { strList = append(strList, str) } msgList[i] = ast.Value i++ } msg += " " + strings.Join(msgList, " ") fmt.Fprintln(b.Stdout, msg) // XXX yes, we skip any cmds that are not valid; the parser should have // picked these out already. if f, ok := evaluateTable[cmd]; ok { b.flags = NewBFlags() b.flags.Args = flags return f(b, strList, attrs, original) } return fmt.Errorf("Unknown instruction: %s", upperCasedCmd) } // platformSupports is a short-term function to give users a quality error // message if a Dockerfile uses a command not supported on the platform. func platformSupports(command string) error { if runtime.GOOS != "windows" { return nil } switch command { case "expose", "user", "stopsignal", "arg": return fmt.Errorf("The daemon on this platform does not support the command '%s'", command) } return nil } docker-1.10.3/builder/dockerfile/internals.go000066400000000000000000000435261267010174400211420ustar00rootroot00000000000000package dockerfile // internals for handling commands. Covers many areas and a lot of // non-contiguous functionality. Please read the comments. import ( "crypto/sha256" "encoding/hex" "fmt" "io" "io/ioutil" "net/http" "net/url" "os" "path/filepath" "runtime" "sort" "strings" "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/api" "github.com/docker/docker/builder" "github.com/docker/docker/builder/dockerfile/parser" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/httputils" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/streamformatter" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/tarsum" "github.com/docker/docker/pkg/urlutil" "github.com/docker/docker/runconfig/opts" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/container" "github.com/docker/engine-api/types/strslice" ) func (b *Builder) commit(id string, autoCmd *strslice.StrSlice, comment string) error { if b.disableCommit { return nil } if b.image == "" && !b.noBaseImage { return fmt.Errorf("Please provide a source image with `from` prior to commit") } b.runConfig.Image = b.image if id == "" { cmd := b.runConfig.Cmd if runtime.GOOS != "windows" { b.runConfig.Cmd = strslice.New("/bin/sh", "-c", "#(nop) "+comment) } else { b.runConfig.Cmd = strslice.New("cmd", "/S /C", "REM (nop) "+comment) } defer func(cmd *strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) hit, err := b.probeCache() if err != nil { return err } else if hit { return nil } id, err = b.create() if err != nil { return err } } // Note: Actually copy the struct autoConfig := *b.runConfig autoConfig.Cmd = autoCmd commitCfg := &types.ContainerCommitConfig{ Author: b.maintainer, Pause: true, Config: &autoConfig, } // Commit the container imageID, err := b.docker.Commit(id, commitCfg) if err != nil { return err } b.image = imageID return nil } type copyInfo struct { builder.FileInfo decompress bool } func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalDecompression bool, cmdName string) error { if b.context == nil { return fmt.Errorf("No context given. Impossible to use %s", cmdName) } if len(args) < 2 { return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) } // Work in daemon-specific filepath semantics dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest b.runConfig.Image = b.image var infos []copyInfo // Loop through each src file and calculate the info we need to // do the copy (e.g. hash value if cached). Don't actually do // the copy until we've looked at all src files var err error for _, orig := range args[0 : len(args)-1] { var fi builder.FileInfo decompress := allowLocalDecompression if urlutil.IsURL(orig) { if !allowRemote { return fmt.Errorf("Source can't be a URL for %s", cmdName) } fi, err = b.download(orig) if err != nil { return err } defer os.RemoveAll(filepath.Dir(fi.Path())) decompress = false infos = append(infos, copyInfo{fi, decompress}) continue } // not a URL subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true) if err != nil { return err } infos = append(infos, subInfos...) } if len(infos) == 0 { return fmt.Errorf("No source files were specified") } if len(infos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) { return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) } // For backwards compat, if there's just one info then use it as the // cache look-up string, otherwise hash 'em all into one var srcHash string var origPaths string if len(infos) == 1 { fi := infos[0].FileInfo origPaths = fi.Name() if hfi, ok := fi.(builder.Hashed); ok { srcHash = hfi.Hash() } } else { var hashs []string var origs []string for _, info := range infos { fi := info.FileInfo origs = append(origs, fi.Name()) if hfi, ok := fi.(builder.Hashed); ok { hashs = append(hashs, hfi.Hash()) } } hasher := sha256.New() hasher.Write([]byte(strings.Join(hashs, ","))) srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil)) origPaths = strings.Join(origs, " ") } cmd := b.runConfig.Cmd if runtime.GOOS != "windows" { b.runConfig.Cmd = strslice.New("/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest)) } else { b.runConfig.Cmd = strslice.New("cmd", "/S", "/C", fmt.Sprintf("REM (nop) %s %s in %s", cmdName, srcHash, dest)) } defer func(cmd *strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) if hit, err := b.probeCache(); err != nil { return err } else if hit { return nil } container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{Config: b.runConfig}) if err != nil { return err } b.tmpContainers[container.ID] = struct{}{} comment := fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest) // Twiddle the destination when its a relative path - meaning, make it // relative to the WORKINGDIR if !system.IsAbs(dest) { hasSlash := strings.HasSuffix(dest, string(os.PathSeparator)) dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.runConfig.WorkingDir), dest) // Make sure we preserve any trailing slash if hasSlash { dest += string(os.PathSeparator) } } for _, info := range infos { if err := b.docker.BuilderCopy(container.ID, dest, info.FileInfo, info.decompress); err != nil { return err } } return b.commit(container.ID, cmd, comment) } func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) { // get filename from URL u, err := url.Parse(srcURL) if err != nil { return } path := filepath.FromSlash(u.Path) // Ensure in platform semantics if strings.HasSuffix(path, string(os.PathSeparator)) { path = path[:len(path)-1] } parts := strings.Split(path, string(os.PathSeparator)) filename := parts[len(parts)-1] if filename == "" { err = fmt.Errorf("cannot determine filename from url: %s", u) return } // Initiate the download resp, err := httputils.Download(srcURL) if err != nil { return } // Prepare file in a tmp dir tmpDir, err := ioutils.TempDir("", "docker-remote") if err != nil { return } defer func() { if err != nil { os.RemoveAll(tmpDir) } }() tmpFileName := filepath.Join(tmpDir, filename) tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) if err != nil { return } stdoutFormatter := b.Stdout.(*streamformatter.StdoutFormatter) progressOutput := stdoutFormatter.StreamFormatter.NewProgressOutput(stdoutFormatter.Writer, true) progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading") // Download and dump result to tmp file if _, err = io.Copy(tmpFile, progressReader); err != nil { tmpFile.Close() return } fmt.Fprintln(b.Stdout) // ignoring error because the file was already opened successfully tmpFileSt, err := tmpFile.Stat() if err != nil { return } tmpFile.Close() // Set the mtime to the Last-Modified header value if present // Otherwise just remove atime and mtime mTime := time.Time{} lastMod := resp.Header.Get("Last-Modified") if lastMod != "" { // If we can't parse it then just let it default to 'zero' // otherwise use the parsed time value if parsedMTime, err := http.ParseTime(lastMod); err == nil { mTime = parsedMTime } } if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil { return } // Calc the checksum, even if we're using the cache r, err := archive.Tar(tmpFileName, archive.Uncompressed) if err != nil { return } tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) if err != nil { return } if _, err = io.Copy(ioutil.Discard, tarSum); err != nil { return } hash := tarSum.Sum(nil) r.Close() return &builder.HashedFileInfo{FileInfo: builder.PathFileInfo{FileInfo: tmpFileSt, FilePath: tmpFileName}, FileHash: hash}, nil } func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression, allowWildcards bool) ([]copyInfo, error) { // Work in daemon-specific OS filepath semantics origPath = filepath.FromSlash(origPath) if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 { origPath = origPath[1:] } origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) // Deal with wildcards if allowWildcards && containsWildcards(origPath) { var copyInfos []copyInfo if err := b.context.Walk("", func(path string, info builder.FileInfo, err error) error { if err != nil { return err } if info.Name() == "" { // Why are we doing this check? return nil } if match, _ := filepath.Match(origPath, path); !match { return nil } // Note we set allowWildcards to false in case the name has // a * in it subInfos, err := b.calcCopyInfo(cmdName, path, allowLocalDecompression, false) if err != nil { return err } copyInfos = append(copyInfos, subInfos...) return nil }); err != nil { return nil, err } return copyInfos, nil } // Must be a dir or a file statPath, fi, err := b.context.Stat(origPath) if err != nil { return nil, err } copyInfos := []copyInfo{{FileInfo: fi, decompress: allowLocalDecompression}} hfi, handleHash := fi.(builder.Hashed) if !handleHash { return copyInfos, nil } // Deal with the single file case if !fi.IsDir() { hfi.SetHash("file:" + hfi.Hash()) return copyInfos, nil } // Must be a dir var subfiles []string err = b.context.Walk(statPath, func(path string, info builder.FileInfo, err error) error { if err != nil { return err } // we already checked handleHash above subfiles = append(subfiles, info.(builder.Hashed).Hash()) return nil }) if err != nil { return nil, err } sort.Strings(subfiles) hasher := sha256.New() hasher.Write([]byte(strings.Join(subfiles, ","))) hfi.SetHash("dir:" + hex.EncodeToString(hasher.Sum(nil))) return copyInfos, nil } func containsWildcards(name string) bool { for i := 0; i < len(name); i++ { ch := name[i] if ch == '\\' { i++ } else if ch == '*' || ch == '?' || ch == '[' { return true } } return false } func (b *Builder) processImageFrom(img builder.Image) error { if img != nil { b.image = img.ID() if img.Config() != nil { b.runConfig = img.Config() } } // Check to see if we have a default PATH, note that windows won't // have one as its set by HCS if system.DefaultPathEnv != "" { // Convert the slice of strings that represent the current list // of env vars into a map so we can see if PATH is already set. // If its not set then go ahead and give it our default value configEnv := opts.ConvertKVStringsToMap(b.runConfig.Env) if _, ok := configEnv["PATH"]; !ok { b.runConfig.Env = append(b.runConfig.Env, "PATH="+system.DefaultPathEnv) } } if img == nil { // Typically this means they used "FROM scratch" return nil } // Process ONBUILD triggers if they exist if nTriggers := len(b.runConfig.OnBuild); nTriggers != 0 { word := "trigger" if nTriggers > 1 { word = "triggers" } fmt.Fprintf(b.Stderr, "# Executing %d build %s...\n", nTriggers, word) } // Copy the ONBUILD triggers, and remove them from the config, since the config will be committed. onBuildTriggers := b.runConfig.OnBuild b.runConfig.OnBuild = []string{} // parse the ONBUILD triggers by invoking the parser for _, step := range onBuildTriggers { ast, err := parser.Parse(strings.NewReader(step)) if err != nil { return err } for i, n := range ast.Children { switch strings.ToUpper(n.Value) { case "ONBUILD": return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") case "MAINTAINER", "FROM": return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value) } if err := b.dispatch(i, n); err != nil { return err } } } return nil } // probeCache checks if `b.docker` implements builder.ImageCache and image-caching // is enabled (`b.UseCache`). // If so attempts to look up the current `b.image` and `b.runConfig` pair with `b.docker`. // If an image is found, probeCache returns `(true, nil)`. // If no image is found, it returns `(false, nil)`. // If there is any error, it returns `(false, err)`. func (b *Builder) probeCache() (bool, error) { c, ok := b.docker.(builder.ImageCache) if !ok || b.options.NoCache || b.cacheBusted { return false, nil } cache, err := c.GetCachedImage(b.image, b.runConfig) if err != nil { return false, err } if len(cache) == 0 { logrus.Debugf("[BUILDER] Cache miss: %s", b.runConfig.Cmd) b.cacheBusted = true return false, nil } fmt.Fprintf(b.Stdout, " ---> Using cache\n") logrus.Debugf("[BUILDER] Use cached version: %s", b.runConfig.Cmd) b.image = string(cache) return true, nil } func (b *Builder) create() (string, error) { if b.image == "" && !b.noBaseImage { return "", fmt.Errorf("Please provide a source image with `from` prior to run") } b.runConfig.Image = b.image resources := container.Resources{ CgroupParent: b.options.CgroupParent, CPUShares: b.options.CPUShares, CPUPeriod: b.options.CPUPeriod, CPUQuota: b.options.CPUQuota, CpusetCpus: b.options.CPUSetCPUs, CpusetMems: b.options.CPUSetMems, Memory: b.options.Memory, MemorySwap: b.options.MemorySwap, Ulimits: b.options.Ulimits, } // TODO: why not embed a hostconfig in builder? hostConfig := &container.HostConfig{ Isolation: b.options.IsolationLevel, ShmSize: b.options.ShmSize, Resources: resources, } config := *b.runConfig // Create the container c, err := b.docker.ContainerCreate(types.ContainerCreateConfig{ Config: b.runConfig, HostConfig: hostConfig, }) if err != nil { return "", err } for _, warning := range c.Warnings { fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning) } b.tmpContainers[c.ID] = struct{}{} fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(c.ID)) if config.Cmd.Len() > 0 { // override the entry point that may have been picked up from the base image if err := b.docker.ContainerUpdateCmd(c.ID, config.Cmd.Slice()); err != nil { return "", err } } return c.ID, nil } func (b *Builder) run(cID string) (err error) { errCh := make(chan error) go func() { errCh <- b.docker.ContainerAttach(cID, nil, b.Stdout, b.Stderr, true) }() finished := make(chan struct{}) defer close(finished) go func() { select { case <-b.cancelled: logrus.Debugln("Build cancelled, killing and removing container:", cID) b.docker.ContainerKill(cID, 0) b.removeContainer(cID) case <-finished: } }() if err := b.docker.ContainerStart(cID, nil); err != nil { return err } // Block on reading output from container, stop on err or chan closed if err := <-errCh; err != nil { return err } if ret, _ := b.docker.ContainerWait(cID, -1); ret != 0 { // TODO: change error type, because jsonmessage.JSONError assumes HTTP return &jsonmessage.JSONError{ Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", b.runConfig.Cmd.ToString(), ret), Code: ret, } } return nil } func (b *Builder) removeContainer(c string) error { rmConfig := &types.ContainerRmConfig{ ForceRemove: true, RemoveVolume: true, } if err := b.docker.ContainerRm(c, rmConfig); err != nil { fmt.Fprintf(b.Stdout, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err) return err } return nil } func (b *Builder) clearTmp() { for c := range b.tmpContainers { if err := b.removeContainer(c); err != nil { return } delete(b.tmpContainers, c) fmt.Fprintf(b.Stdout, "Removing intermediate container %s\n", stringid.TruncateID(c)) } } // readDockerfile reads a Dockerfile from the current context. func (b *Builder) readDockerfile() error { // If no -f was specified then look for 'Dockerfile'. If we can't find // that then look for 'dockerfile'. If neither are found then default // back to 'Dockerfile' and use that in the error message. if b.options.Dockerfile == "" { b.options.Dockerfile = api.DefaultDockerfileName if _, _, err := b.context.Stat(b.options.Dockerfile); os.IsNotExist(err) { lowercase := strings.ToLower(b.options.Dockerfile) if _, _, err := b.context.Stat(lowercase); err == nil { b.options.Dockerfile = lowercase } } } f, err := b.context.Open(b.options.Dockerfile) if err != nil { if os.IsNotExist(err) { return fmt.Errorf("Cannot locate specified Dockerfile: %s", b.options.Dockerfile) } return err } if f, ok := f.(*os.File); ok { // ignoring error because Open already succeeded fi, err := f.Stat() if err != nil { return fmt.Errorf("Unexpected error reading Dockerfile: %v", err) } if fi.Size() == 0 { return fmt.Errorf("The Dockerfile (%s) cannot be empty", b.options.Dockerfile) } } b.dockerfile, err = parser.Parse(f) f.Close() if err != nil { return err } // After the Dockerfile has been parsed, we need to check the .dockerignore // file for either "Dockerfile" or ".dockerignore", and if either are // present then erase them from the build context. These files should never // have been sent from the client but we did send them to make sure that // we had the Dockerfile to actually parse, and then we also need the // .dockerignore file to know whether either file should be removed. // Note that this assumes the Dockerfile has been read into memory and // is now safe to be removed. if dockerIgnore, ok := b.context.(builder.DockerIgnoreContext); ok { dockerIgnore.Process([]string{b.options.Dockerfile}) } return nil } // determine if build arg is part of built-in args or user // defined args in Dockerfile at any point in time. func (b *Builder) isBuildArgAllowed(arg string) bool { if _, ok := BuiltinAllowedBuildArgs[arg]; ok { return true } if _, ok := b.allowedBuildArgs[arg]; ok { return true } return false } docker-1.10.3/builder/dockerfile/internals_unix.go000066400000000000000000000023751267010174400222020ustar00rootroot00000000000000// +build !windows package dockerfile import ( "os" "path/filepath" ) func fixPermissions(source, destination string, uid, gid int, destExisted bool) error { // If the destination didn't already exist, or the destination isn't a // directory, then we should Lchown the destination. Otherwise, we shouldn't // Lchown the destination. destStat, err := os.Stat(destination) if err != nil { // This should *never* be reached, because the destination must've already // been created while untar-ing the context. return err } doChownDestination := !destExisted || !destStat.IsDir() // We Walk on the source rather than on the destination because we don't // want to change permissions on things we haven't created or modified. return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error { // Do not alter the walk root iff. it existed before, as it doesn't fall under // the domain of "things we should chown". if !doChownDestination && (source == fullpath) { return nil } // Path is prefixed by source: substitute with destination instead. cleaned, err := filepath.Rel(source, fullpath) if err != nil { return err } fullpath = filepath.Join(destination, cleaned) return os.Lchown(fullpath, uid, gid) }) } docker-1.10.3/builder/dockerfile/internals_windows.go000066400000000000000000000002631267010174400227030ustar00rootroot00000000000000// +build windows package dockerfile func fixPermissions(source, destination string, uid, gid int, destExisted bool) error { // chown is not supported on Windows return nil } docker-1.10.3/builder/dockerfile/parser/000077500000000000000000000000001267010174400200765ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/dumper/000077500000000000000000000000001267010174400213725ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/dumper/main.go000066400000000000000000000006521267010174400226500ustar00rootroot00000000000000package main import ( "fmt" "os" "github.com/docker/docker/builder/dockerfile/parser" ) func main() { var f *os.File var err error if len(os.Args) < 2 { fmt.Println("please supply filename(s)") os.Exit(1) } for _, fn := range os.Args[1:] { f, err = os.Open(fn) if err != nil { panic(err) } ast, err := parser.Parse(f) if err != nil { panic(err) } else { fmt.Println(ast.Dump()) } } } docker-1.10.3/builder/dockerfile/parser/json_test.go000066400000000000000000000030221267010174400224320ustar00rootroot00000000000000package parser import ( "testing" ) var invalidJSONArraysOfStrings = []string{ `["a",42,"b"]`, `["a",123.456,"b"]`, `["a",{},"b"]`, `["a",{"c": "d"},"b"]`, `["a",["c"],"b"]`, `["a",true,"b"]`, `["a",false,"b"]`, `["a",null,"b"]`, } var validJSONArraysOfStrings = map[string][]string{ `[]`: {}, `[""]`: {""}, `["a"]`: {"a"}, `["a","b"]`: {"a", "b"}, `[ "a", "b" ]`: {"a", "b"}, `[ "a", "b" ]`: {"a", "b"}, ` [ "a", "b" ] `: {"a", "b"}, `["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"]`: {"abc 123", "♥", "☃", "\" \\ / \b \f \n \r \t \u0000"}, } func TestJSONArraysOfStrings(t *testing.T) { for json, expected := range validJSONArraysOfStrings { if node, _, err := parseJSON(json); err != nil { t.Fatalf("%q should be a valid JSON array of strings, but wasn't! (err: %q)", json, err) } else { i := 0 for node != nil { if i >= len(expected) { t.Fatalf("expected result is shorter than parsed result (%d vs %d+) in %q", len(expected), i+1, json) } if node.Value != expected[i] { t.Fatalf("expected %q (not %q) in %q at pos %d", expected[i], node.Value, json, i) } node = node.Next i++ } if i != len(expected) { t.Fatalf("expected result is longer than parsed result (%d vs %d) in %q", len(expected), i+1, json) } } } for _, json := range invalidJSONArraysOfStrings { if _, _, err := parseJSON(json); err != errDockerfileNotStringArray { t.Fatalf("%q should be an invalid JSON array of strings, but wasn't!", json) } } } docker-1.10.3/builder/dockerfile/parser/line_parsers.go000066400000000000000000000177011267010174400231210ustar00rootroot00000000000000package parser // line parsers are dispatch calls that parse a single unit of text into a // Node object which contains the whole statement. Dockerfiles have varied // (but not usually unique, see ONBUILD for a unique example) parsing rules // per-command, and these unify the processing in a way that makes it // manageable. import ( "encoding/json" "errors" "fmt" "strings" "unicode" ) var ( errDockerfileNotStringArray = errors.New("When using JSON array syntax, arrays must be comprised of strings only.") ) // ignore the current argument. This will still leave a command parsed, but // will not incorporate the arguments into the ast. func parseIgnore(rest string) (*Node, map[string]bool, error) { return &Node{}, nil, nil } // used for onbuild. Could potentially be used for anything that represents a // statement with sub-statements. // // ONBUILD RUN foo bar -> (onbuild (run foo bar)) // func parseSubCommand(rest string) (*Node, map[string]bool, error) { if rest == "" { return nil, nil, nil } _, child, err := parseLine(rest) if err != nil { return nil, nil, err } return &Node{Children: []*Node{child}}, nil, nil } // helper to parse words (i.e space delimited or quoted strings) in a statement. // The quotes are preserved as part of this function and they are stripped later // as part of processWords(). func parseWords(rest string) []string { const ( inSpaces = iota // looking for start of a word inWord inQuote ) words := []string{} phase := inSpaces word := "" quote := '\000' blankOK := false var ch rune for pos := 0; pos <= len(rest); pos++ { if pos != len(rest) { ch = rune(rest[pos]) } if phase == inSpaces { // Looking for start of word if pos == len(rest) { // end of input break } if unicode.IsSpace(ch) { // skip spaces continue } phase = inWord // found it, fall thru } if (phase == inWord || phase == inQuote) && (pos == len(rest)) { if blankOK || len(word) > 0 { words = append(words, word) } break } if phase == inWord { if unicode.IsSpace(ch) { phase = inSpaces if blankOK || len(word) > 0 { words = append(words, word) } word = "" blankOK = false continue } if ch == '\'' || ch == '"' { quote = ch blankOK = true phase = inQuote } if ch == '\\' { if pos+1 == len(rest) { continue // just skip \ at end } // If we're not quoted and we see a \, then always just // add \ plus the char to the word, even if the char // is a quote. word += string(ch) pos++ ch = rune(rest[pos]) } word += string(ch) continue } if phase == inQuote { if ch == quote { phase = inWord } // \ is special except for ' quotes - can't escape anything for ' if ch == '\\' && quote != '\'' { if pos+1 == len(rest) { phase = inWord continue // just skip \ at end } pos++ nextCh := rune(rest[pos]) word += string(ch) ch = nextCh } word += string(ch) } } return words } // parse environment like statements. Note that this does *not* handle // variable interpolation, which will be handled in the evaluator. func parseNameVal(rest string, key string) (*Node, map[string]bool, error) { // This is kind of tricky because we need to support the old // variant: KEY name value // as well as the new one: KEY name=value ... // The trigger to know which one is being used will be whether we hit // a space or = first. space ==> old, "=" ==> new words := parseWords(rest) if len(words) == 0 { return nil, nil, nil } var rootnode *Node // Old format (KEY name value) if !strings.Contains(words[0], "=") { node := &Node{} rootnode = node strs := tokenWhitespace.Split(rest, 2) if len(strs) < 2 { return nil, nil, fmt.Errorf(key + " must have two arguments") } node.Value = strs[0] node.Next = &Node{} node.Next.Value = strs[1] } else { var prevNode *Node for i, word := range words { if !strings.Contains(word, "=") { return nil, nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word) } parts := strings.SplitN(word, "=", 2) name := &Node{} value := &Node{} name.Next = value name.Value = parts[0] value.Value = parts[1] if i == 0 { rootnode = name } else { prevNode.Next = name } prevNode = value } } return rootnode, nil, nil } func parseEnv(rest string) (*Node, map[string]bool, error) { return parseNameVal(rest, "ENV") } func parseLabel(rest string) (*Node, map[string]bool, error) { return parseNameVal(rest, "LABEL") } // parses a statement containing one or more keyword definition(s) and/or // value assignments, like `name1 name2= name3="" name4=value`. // Note that this is a stricter format than the old format of assignment, // allowed by parseNameVal(), in a way that this only allows assignment of the // form `keyword=[]` like `name2=`, `name3=""`, and `name4=value` above. // In addition, a keyword definition alone is of the form `keyword` like `name1` // above. And the assignments `name2=` and `name3=""` are equivalent and // assign an empty value to the respective keywords. func parseNameOrNameVal(rest string) (*Node, map[string]bool, error) { words := parseWords(rest) if len(words) == 0 { return nil, nil, nil } var ( rootnode *Node prevNode *Node ) for i, word := range words { node := &Node{} node.Value = word if i == 0 { rootnode = node } else { prevNode.Next = node } prevNode = node } return rootnode, nil, nil } // parses a whitespace-delimited set of arguments. The result is effectively a // linked list of string arguments. func parseStringsWhitespaceDelimited(rest string) (*Node, map[string]bool, error) { if rest == "" { return nil, nil, nil } node := &Node{} rootnode := node prevnode := node for _, str := range tokenWhitespace.Split(rest, -1) { // use regexp prevnode = node node.Value = str node.Next = &Node{} node = node.Next } // XXX to get around regexp.Split *always* providing an empty string at the // end due to how our loop is constructed, nil out the last node in the // chain. prevnode.Next = nil return rootnode, nil, nil } // parsestring just wraps the string in quotes and returns a working node. func parseString(rest string) (*Node, map[string]bool, error) { if rest == "" { return nil, nil, nil } n := &Node{} n.Value = rest return n, nil, nil } // parseJSON converts JSON arrays to an AST. func parseJSON(rest string) (*Node, map[string]bool, error) { rest = strings.TrimLeftFunc(rest, unicode.IsSpace) if !strings.HasPrefix(rest, "[") { return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest) } var myJSON []interface{} if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil { return nil, nil, err } var top, prev *Node for _, str := range myJSON { s, ok := str.(string) if !ok { return nil, nil, errDockerfileNotStringArray } node := &Node{Value: s} if prev == nil { top = node } else { prev.Next = node } prev = node } return top, map[string]bool{"json": true}, nil } // parseMaybeJSON determines if the argument appears to be a JSON array. If // so, passes to parseJSON; if not, quotes the result and returns a single // node. func parseMaybeJSON(rest string) (*Node, map[string]bool, error) { if rest == "" { return nil, nil, nil } node, attrs, err := parseJSON(rest) if err == nil { return node, attrs, nil } if err == errDockerfileNotStringArray { return nil, nil, err } node = &Node{} node.Value = rest return node, nil, nil } // parseMaybeJSONToList determines if the argument appears to be a JSON array. If // so, passes to parseJSON; if not, attempts to parse it as a whitespace // delimited string. func parseMaybeJSONToList(rest string) (*Node, map[string]bool, error) { node, attrs, err := parseJSON(rest) if err == nil { return node, attrs, nil } if err == errDockerfileNotStringArray { return nil, nil, err } return parseStringsWhitespaceDelimited(rest) } docker-1.10.3/builder/dockerfile/parser/parser.go000066400000000000000000000112421267010174400217210ustar00rootroot00000000000000// Package parser implements a parser and parse tree dumper for Dockerfiles. package parser import ( "bufio" "io" "regexp" "strings" "unicode" "github.com/docker/docker/builder/dockerfile/command" ) // Node is a structure used to represent a parse tree. // // In the node there are three fields, Value, Next, and Children. Value is the // current token's string value. Next is always the next non-child token, and // children contains all the children. Here's an example: // // (value next (child child-next child-next-next) next-next) // // This data structure is frankly pretty lousy for handling complex languages, // but lucky for us the Dockerfile isn't very complicated. This structure // works a little more effectively than a "proper" parse tree for our needs. // type Node struct { Value string // actual content Next *Node // the next item in the current sexp Children []*Node // the children of this sexp Attributes map[string]bool // special attributes for this node Original string // original line used before parsing Flags []string // only top Node should have this set StartLine int // the line in the original dockerfile where the node begins EndLine int // the line in the original dockerfile where the node ends } var ( dispatch map[string]func(string) (*Node, map[string]bool, error) tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) tokenLineContinuation = regexp.MustCompile(`\\[ \t]*$`) tokenComment = regexp.MustCompile(`^#.*$`) ) func init() { // Dispatch Table. see line_parsers.go for the parse functions. // The command is parsed and mapped to the line parser. The line parser // receives the arguments but not the command, and returns an AST after // reformulating the arguments according to the rules in the parser // functions. Errors are propagated up by Parse() and the resulting AST can // be incorporated directly into the existing AST as a next. dispatch = map[string]func(string) (*Node, map[string]bool, error){ command.User: parseString, command.Onbuild: parseSubCommand, command.Workdir: parseString, command.Env: parseEnv, command.Label: parseLabel, command.Maintainer: parseString, command.From: parseString, command.Add: parseMaybeJSONToList, command.Copy: parseMaybeJSONToList, command.Run: parseMaybeJSON, command.Cmd: parseMaybeJSON, command.Entrypoint: parseMaybeJSON, command.Expose: parseStringsWhitespaceDelimited, command.Volume: parseMaybeJSONToList, command.StopSignal: parseString, command.Arg: parseNameOrNameVal, } } // parse a line and return the remainder. func parseLine(line string) (string, *Node, error) { if line = stripComments(line); line == "" { return "", nil, nil } if tokenLineContinuation.MatchString(line) { line = tokenLineContinuation.ReplaceAllString(line, "") return line, nil, nil } cmd, flags, args, err := splitCommand(line) if err != nil { return "", nil, err } node := &Node{} node.Value = cmd sexp, attrs, err := fullDispatch(cmd, args) if err != nil { return "", nil, err } node.Next = sexp node.Attributes = attrs node.Original = line node.Flags = flags return "", node, nil } // Parse is the main parse routine. // It handles an io.ReadWriteCloser and returns the root of the AST. func Parse(rwc io.Reader) (*Node, error) { currentLine := 0 root := &Node{} root.StartLine = -1 scanner := bufio.NewScanner(rwc) for scanner.Scan() { scannedLine := strings.TrimLeftFunc(scanner.Text(), unicode.IsSpace) currentLine++ line, child, err := parseLine(scannedLine) if err != nil { return nil, err } startLine := currentLine if line != "" && child == nil { for scanner.Scan() { newline := scanner.Text() currentLine++ if stripComments(strings.TrimSpace(newline)) == "" { continue } line, child, err = parseLine(line + newline) if err != nil { return nil, err } if child != nil { break } } if child == nil && line != "" { line, child, err = parseLine(line) if err != nil { return nil, err } } } if child != nil { // Update the line information for the current child. child.StartLine = startLine child.EndLine = currentLine // Update the line information for the root. The starting line of the root is always the // starting line of the first child and the ending line is the ending line of the last child. if root.StartLine < 0 { root.StartLine = currentLine } root.EndLine = currentLine root.Children = append(root.Children, child) } } return root, nil } docker-1.10.3/builder/dockerfile/parser/parser_test.go000066400000000000000000000072671267010174400227740ustar00rootroot00000000000000package parser import ( "bytes" "fmt" "io/ioutil" "os" "path/filepath" "runtime" "testing" ) const testDir = "testfiles" const negativeTestDir = "testfiles-negative" const testFileLineInfo = "testfile-line/Dockerfile" func getDirs(t *testing.T, dir string) []string { f, err := os.Open(dir) if err != nil { t.Fatal(err) } defer f.Close() dirs, err := f.Readdirnames(0) if err != nil { t.Fatal(err) } return dirs } func TestTestNegative(t *testing.T) { for _, dir := range getDirs(t, negativeTestDir) { dockerfile := filepath.Join(negativeTestDir, dir, "Dockerfile") df, err := os.Open(dockerfile) if err != nil { t.Fatalf("Dockerfile missing for %s: %v", dir, err) } _, err = Parse(df) if err == nil { t.Fatalf("No error parsing broken dockerfile for %s", dir) } df.Close() } } func TestTestData(t *testing.T) { for _, dir := range getDirs(t, testDir) { dockerfile := filepath.Join(testDir, dir, "Dockerfile") resultfile := filepath.Join(testDir, dir, "result") df, err := os.Open(dockerfile) if err != nil { t.Fatalf("Dockerfile missing for %s: %v", dir, err) } defer df.Close() ast, err := Parse(df) if err != nil { t.Fatalf("Error parsing %s's dockerfile: %v", dir, err) } content, err := ioutil.ReadFile(resultfile) if err != nil { t.Fatalf("Error reading %s's result file: %v", dir, err) } if runtime.GOOS == "windows" { // CRLF --> CR to match Unix behavior content = bytes.Replace(content, []byte{'\x0d', '\x0a'}, []byte{'\x0a'}, -1) } if ast.Dump()+"\n" != string(content) { fmt.Fprintln(os.Stderr, "Result:\n"+ast.Dump()) fmt.Fprintln(os.Stderr, "Expected:\n"+string(content)) t.Fatalf("%s: AST dump of dockerfile does not match result", dir) } } } func TestParseWords(t *testing.T) { tests := []map[string][]string{ { "input": {"foo"}, "expect": {"foo"}, }, { "input": {"foo bar"}, "expect": {"foo", "bar"}, }, { "input": {"foo=bar"}, "expect": {"foo=bar"}, }, { "input": {"foo bar 'abc xyz'"}, "expect": {"foo", "bar", "'abc xyz'"}, }, { "input": {`foo bar "abc xyz"`}, "expect": {"foo", "bar", `"abc xyz"`}, }, } for _, test := range tests { words := parseWords(test["input"][0]) if len(words) != len(test["expect"]) { t.Fatalf("length check failed. input: %v, expect: %v, output: %v", test["input"][0], test["expect"], words) } for i, word := range words { if word != test["expect"][i] { t.Fatalf("word check failed for word: %q. input: %v, expect: %v, output: %v", word, test["input"][0], test["expect"], words) } } } } func TestLineInformation(t *testing.T) { df, err := os.Open(testFileLineInfo) if err != nil { t.Fatalf("Dockerfile missing for %s: %v", testFileLineInfo, err) } defer df.Close() ast, err := Parse(df) if err != nil { t.Fatalf("Error parsing dockerfile %s: %v", testFileLineInfo, err) } if ast.StartLine != 4 || ast.EndLine != 30 { fmt.Fprintf(os.Stderr, "Wrong root line information: expected(%d-%d), actual(%d-%d)\n", 4, 30, ast.StartLine, ast.EndLine) t.Fatalf("Root line information doesn't match result.") } if len(ast.Children) != 3 { fmt.Fprintf(os.Stderr, "Wrong number of child: expected(%d), actual(%d)\n", 3, len(ast.Children)) t.Fatalf("Root line information doesn't match result.") } expected := [][]int{ {4, 4}, {10, 11}, {16, 30}, } for i, child := range ast.Children { if child.StartLine != expected[i][0] || child.EndLine != expected[i][1] { fmt.Fprintf(os.Stderr, "Wrong line information for child %d: expected(%d-%d), actual(%d-%d)\n", i, expected[i][0], expected[i][1], child.StartLine, child.EndLine) t.Fatalf("Root line information doesn't match result.") } } } docker-1.10.3/builder/dockerfile/parser/testfile-line/000077500000000000000000000000001267010174400226425ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfile-line/Dockerfile000066400000000000000000000012641267010174400246370ustar00rootroot00000000000000 FROM brimstone/ubuntu:14.04 # TORUN -v /var/run/docker.sock:/var/run/docker.sock ENV GOPATH \ /go # Install the packages we need, clean up after them and us RUN apt-get update \ && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ && apt-get install -y --no-install-recommends git golang ca-certificates \ && apt-get clean \ && rm -rf /var/lib/apt/lists \ && go get -v github.com/brimstone/consuldock \ && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ && rm /tmp/dpkg.* \ && rm -rf $GOPATH docker-1.10.3/builder/dockerfile/parser/testfiles-negative/000077500000000000000000000000001267010174400237005ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles-negative/env_no_value/000077500000000000000000000000001267010174400263605ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile000066400000000000000000000000271267010174400303510ustar00rootroot00000000000000FROM busybox ENV PATH docker-1.10.3/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/000077500000000000000000000000001267010174400274355ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile000066400000000000000000000000421267010174400314230ustar00rootroot00000000000000CMD [ "echo", [ "nested json" ] ] docker-1.10.3/builder/dockerfile/parser/testfiles/000077500000000000000000000000001267010174400221005ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/000077500000000000000000000000001267010174400247605ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile000066400000000000000000000004131267010174400267500ustar00rootroot00000000000000FROM ubuntu:14.04 MAINTAINER Seongyeol Lim COPY . /go/src/github.com/docker/docker ADD . / ADD null / COPY nullfile /tmp ADD [ "vimrc", "/tmp" ] COPY [ "bashrc", "/tmp" ] COPY [ "test file", "/tmp" ] ADD [ "test file", "/tmp/test file" ] docker-1.10.3/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result000066400000000000000000000004321267010174400262200ustar00rootroot00000000000000(from "ubuntu:14.04") (maintainer "Seongyeol Lim ") (copy "." "/go/src/github.com/docker/docker") (add "." "/") (add "null" "/") (copy "nullfile" "/tmp") (add "vimrc" "/tmp") (copy "bashrc" "/tmp") (copy "test file" "/tmp") (add "test file" "/tmp/test file") docker-1.10.3/builder/dockerfile/parser/testfiles/brimstone-consuldock/000077500000000000000000000000001267010174400262445ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile000066400000000000000000000014041267010174400302350ustar00rootroot00000000000000FROM brimstone/ubuntu:14.04 MAINTAINER brimstone@the.narro.ws # TORUN -v /var/run/docker.sock:/var/run/docker.sock ENV GOPATH /go # Set our command ENTRYPOINT ["/usr/local/bin/consuldock"] # Install the packages we need, clean up after them and us RUN apt-get update \ && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ && apt-get install -y --no-install-recommends git golang ca-certificates \ && apt-get clean \ && rm -rf /var/lib/apt/lists \ && go get -v github.com/brimstone/consuldock \ && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ && rm /tmp/dpkg.* \ && rm -rf $GOPATH docker-1.10.3/builder/dockerfile/parser/testfiles/brimstone-consuldock/result000066400000000000000000000011771267010174400275130ustar00rootroot00000000000000(from "brimstone/ubuntu:14.04") (maintainer "brimstone@the.narro.ws") (env "GOPATH" "/go") (entrypoint "/usr/local/bin/consuldock") (run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/brimstone/consuldock && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH") docker-1.10.3/builder/dockerfile/parser/testfiles/brimstone-docker-consul/000077500000000000000000000000001267010174400266505ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile000066400000000000000000000030271267010174400306440ustar00rootroot00000000000000FROM brimstone/ubuntu:14.04 CMD [] ENTRYPOINT ["/usr/bin/consul", "agent", "-server", "-data-dir=/consul", "-client=0.0.0.0", "-ui-dir=/webui"] EXPOSE 8500 8600 8400 8301 8302 RUN apt-get update \ && apt-get install -y unzip wget \ && apt-get clean \ && rm -rf /var/lib/apt/lists RUN cd /tmp \ && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ -O web_ui.zip \ && unzip web_ui.zip \ && mv dist /webui \ && rm web_ui.zip RUN apt-get update \ && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ && apt-get install -y --no-install-recommends unzip wget \ && apt-get clean \ && rm -rf /var/lib/apt/lists \ && cd /tmp \ && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ -O web_ui.zip \ && unzip web_ui.zip \ && mv dist /webui \ && rm web_ui.zip \ && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ && rm /tmp/dpkg.* ENV GOPATH /go RUN apt-get update \ && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ && apt-get install -y --no-install-recommends git golang ca-certificates build-essential \ && apt-get clean \ && rm -rf /var/lib/apt/lists \ && go get -v github.com/hashicorp/consul \ && mv $GOPATH/bin/consul /usr/bin/consul \ && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ && rm /tmp/dpkg.* \ && rm -rf $GOPATH docker-1.10.3/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result000066400000000000000000000027721267010174400301210ustar00rootroot00000000000000(from "brimstone/ubuntu:14.04") (cmd) (entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui") (expose "8500" "8600" "8400" "8301" "8302") (run "apt-get update && apt-get install -y unzip wget \t&& apt-get clean \t&& rm -rf /var/lib/apt/lists") (run "cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip") (run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends unzip wget && apt-get clean && rm -rf /var/lib/apt/lists && cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.*") (env "GOPATH" "/go") (run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates build-essential && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/hashicorp/consul \t&& mv $GOPATH/bin/consul /usr/bin/consul \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH") docker-1.10.3/builder/dockerfile/parser/testfiles/continueIndent/000077500000000000000000000000001267010174400250665ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile000066400000000000000000000006231267010174400270610ustar00rootroot00000000000000FROM ubuntu:14.04 RUN echo hello\ world\ goodnight \ moon\ light\ ning RUN echo hello \ world RUN echo hello \ world RUN echo hello \ goodbye\ frog RUN echo hello \ world RUN echo hi \ \ world \ \ good\ \ night RUN echo goodbye\ frog RUN echo good\ bye\ frog RUN echo hello \ # this is a comment # this is a comment with a blank line surrounding it this is some more useful stuff docker-1.10.3/builder/dockerfile/parser/testfiles/continueIndent/result000066400000000000000000000005041267010174400263260ustar00rootroot00000000000000(from "ubuntu:14.04") (run "echo hello world goodnight moon lightning") (run "echo hello world") (run "echo hello world") (run "echo hello goodbyefrog") (run "echo hello world") (run "echo hi world goodnight") (run "echo goodbyefrog") (run "echo goodbyefrog") (run "echo hello this is some more useful stuff") docker-1.10.3/builder/dockerfile/parser/testfiles/cpuguy83-nagios/000077500000000000000000000000001267010174400250455ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile000066400000000000000000000064221267010174400270430ustar00rootroot00000000000000FROM cpuguy83/ubuntu ENV NAGIOS_HOME /opt/nagios ENV NAGIOS_USER nagios ENV NAGIOS_GROUP nagios ENV NAGIOS_CMDUSER nagios ENV NAGIOS_CMDGROUP nagios ENV NAGIOSADMIN_USER nagiosadmin ENV NAGIOSADMIN_PASS nagios ENV APACHE_RUN_USER nagios ENV APACHE_RUN_GROUP nagios ENV NAGIOS_TIMEZONE UTC RUN sed -i 's/universe/universe multiverse/' /etc/apt/sources.list RUN apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx RUN ( egrep -i "^${NAGIOS_GROUP}" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i "^${NAGIOS_CMDGROUP}" /etc/group || groupadd $NAGIOS_CMDGROUP ) RUN ( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER ) ADD http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3 /tmp/nagios.tar.gz RUN cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf ADD http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz /tmp/ RUN cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install RUN sed -i.bak 's/.*\=www\-data//g' /etc/apache2/envvars RUN export DOC_ROOT="DocumentRoot $(echo $NAGIOS_HOME/share)"; sed -i "s,DocumentRoot.*,$DOC_ROOT," /etc/apache2/sites-enabled/000-default RUN ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo RUN echo "use_timezone=$NAGIOS_TIMEZONE" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo "SetEnv TZ \"${NAGIOS_TIMEZONE}\"" >> /etc/apache2/conf.d/nagios.conf RUN mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs RUN echo "cfg_dir=${NAGIOS_HOME}/etc/conf.d" >> ${NAGIOS_HOME}/etc/nagios.cfg RUN echo "cfg_dir=${NAGIOS_HOME}/etc/monitor" >> ${NAGIOS_HOME}/etc/nagios.cfg RUN download-mibs && echo "mibs +ALL" > /etc/snmp/snmp.conf RUN sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && \ sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg RUN cp /etc/services /var/spool/postfix/etc/ RUN mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix ADD nagios.init /etc/sv/nagios/run ADD apache.init /etc/sv/apache/run ADD postfix.init /etc/sv/postfix/run ADD postfix.stop /etc/sv/postfix/finish ADD start.sh /usr/local/bin/start_nagios ENV APACHE_LOCK_DIR /var/run ENV APACHE_LOG_DIR /var/log/apache2 EXPOSE 80 VOLUME ["/opt/nagios/var", "/opt/nagios/etc", "/opt/nagios/libexec", "/var/log/apache2", "/usr/share/snmp/mibs"] CMD ["/usr/local/bin/start_nagios"] docker-1.10.3/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result000066400000000000000000000067251267010174400263200ustar00rootroot00000000000000(from "cpuguy83/ubuntu") (env "NAGIOS_HOME" "/opt/nagios") (env "NAGIOS_USER" "nagios") (env "NAGIOS_GROUP" "nagios") (env "NAGIOS_CMDUSER" "nagios") (env "NAGIOS_CMDGROUP" "nagios") (env "NAGIOSADMIN_USER" "nagiosadmin") (env "NAGIOSADMIN_PASS" "nagios") (env "APACHE_RUN_USER" "nagios") (env "APACHE_RUN_GROUP" "nagios") (env "NAGIOS_TIMEZONE" "UTC") (run "sed -i 's/universe/universe multiverse/' /etc/apt/sources.list") (run "apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx") (run "( egrep -i \"^${NAGIOS_GROUP}\" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i \"^${NAGIOS_CMDGROUP}\" /etc/group || groupadd $NAGIOS_CMDGROUP )") (run "( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )") (add "http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3" "/tmp/nagios.tar.gz") (run "cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf") (add "http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz" "/tmp/") (run "cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install") (run "sed -i.bak 's/.*\\=www\\-data//g' /etc/apache2/envvars") (run "export DOC_ROOT=\"DocumentRoot $(echo $NAGIOS_HOME/share)\"; sed -i \"s,DocumentRoot.*,$DOC_ROOT,\" /etc/apache2/sites-enabled/000-default") (run "ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo") (run "echo \"use_timezone=$NAGIOS_TIMEZONE\" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo \"SetEnv TZ \\\"${NAGIOS_TIMEZONE}\\\"\" >> /etc/apache2/conf.d/nagios.conf") (run "mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs") (run "echo \"cfg_dir=${NAGIOS_HOME}/etc/conf.d\" >> ${NAGIOS_HOME}/etc/nagios.cfg") (run "echo \"cfg_dir=${NAGIOS_HOME}/etc/monitor\" >> ${NAGIOS_HOME}/etc/nagios.cfg") (run "download-mibs && echo \"mibs +ALL\" > /etc/snmp/snmp.conf") (run "sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg") (run "cp /etc/services /var/spool/postfix/etc/") (run "mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix") (add "nagios.init" "/etc/sv/nagios/run") (add "apache.init" "/etc/sv/apache/run") (add "postfix.init" "/etc/sv/postfix/run") (add "postfix.stop" "/etc/sv/postfix/finish") (add "start.sh" "/usr/local/bin/start_nagios") (env "APACHE_LOCK_DIR" "/var/run") (env "APACHE_LOG_DIR" "/var/log/apache2") (expose "80") (volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") (cmd "/usr/local/bin/start_nagios") docker-1.10.3/builder/dockerfile/parser/testfiles/docker/000077500000000000000000000000001267010174400233475ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles/docker/Dockerfile000066400000000000000000000067161267010174400253530ustar00rootroot00000000000000# This file describes the standard way to build Docker, using docker # # Usage: # # # Assemble the full dev environment. This is slow the first time. # docker build -t docker . # # # Mount your source in an interactive container for quick testing: # docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash # # # Run the test suite: # docker run --privileged docker hack/make.sh test # # # Publish a release: # docker run --privileged \ # -e AWS_S3_BUCKET=baz \ # -e AWS_ACCESS_KEY=foo \ # -e AWS_SECRET_KEY=bar \ # -e GPG_PASSPHRASE=gloubiboulga \ # docker hack/release.sh # # Note: AppArmor used to mess with privileged mode, but this is no longer # the case. Therefore, you don't have to disable it anymore. # FROM ubuntu:14.04 MAINTAINER Tianon Gravi (@tianon) # Packaged dependencies RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ apt-utils \ aufs-tools \ automake \ btrfs-tools \ build-essential \ curl \ dpkg-sig \ git \ iptables \ libapparmor-dev \ libcap-dev \ libsqlite3-dev \ mercurial \ pandoc \ parallel \ reprepro \ ruby1.9.1 \ ruby1.9.1-dev \ s3cmd=1.1.0* \ --no-install-recommends # Get lvm2 source for compiling statically RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103 # see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags # note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly # Compile and install lvm2 RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper # see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL # Install Go RUN curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz ENV PATH /usr/local/go/bin:$PATH ENV GOPATH /go:/go/src/github.com/docker/docker/vendor RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 # Compile Go for cross compilation ENV DOCKER_CROSSPLATFORMS \ linux/386 linux/arm \ darwin/amd64 darwin/386 \ freebsd/amd64 freebsd/386 freebsd/arm # (set an explicit GOARM of 5 for maximum compatibility) ENV GOARM 5 RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done' # Grab Go's cover tool for dead-simple code coverage testing RUN go get golang.org/x/tools/cmd/cover # TODO replace FPM with some very minimal debhelper stuff RUN gem install --no-rdoc --no-ri fpm --version 1.0.2 # Get the "busybox" image source so we can build locally instead of pulling RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox # Setup s3cmd config RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg # Set user.email so crosbymichael's in-container merge commits go smoothly RUN git config --global user.email 'docker-dummy@example.com' # Add an unprivileged user to be used for tests which need it RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser VOLUME /var/lib/docker WORKDIR /go/src/github.com/docker/docker ENV DOCKER_BUILDTAGS apparmor selinux # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] # Upload docker source COPY . /go/src/github.com/docker/docker docker-1.10.3/builder/dockerfile/parser/testfiles/docker/result000066400000000000000000000035121267010174400246110ustar00rootroot00000000000000(from "ubuntu:14.04") (maintainer "Tianon Gravi (@tianon)") (run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \tapt-utils \taufs-tools \tautomake \tbtrfs-tools \tbuild-essential \tcurl \tdpkg-sig \tgit \tiptables \tlibapparmor-dev \tlibcap-dev \tlibsqlite3-dev \tmercurial \tpandoc \tparallel \treprepro \truby1.9.1 \truby1.9.1-dev \ts3cmd=1.1.0* \t--no-install-recommends") (run "git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103") (run "cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper") (run "curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz") (env "PATH" "/usr/local/go/bin:$PATH") (env "GOPATH" "/go:/go/src/github.com/docker/docker/vendor") (run "cd /usr/local/go/src && ./make.bash --no-clean 2>&1") (env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm \tdarwin/amd64 darwin/386 \tfreebsd/amd64 freebsd/386 freebsd/arm") (env "GOARM" "5") (run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'") (run "go get golang.org/x/tools/cmd/cover") (run "gem install --no-rdoc --no-ri fpm --version 1.0.2") (run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox") (run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg") (run "git config --global user.email 'docker-dummy@example.com'") (run "groupadd -r docker") (run "useradd --create-home --gid docker unprivilegeduser") (volume "/var/lib/docker") (workdir "/go/src/github.com/docker/docker") (env "DOCKER_BUILDTAGS" "apparmor selinux") (entrypoint "hack/dind") (copy "." "/go/src/github.com/docker/docker") docker-1.10.3/builder/dockerfile/parser/testfiles/env/000077500000000000000000000000001267010174400226705ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles/env/Dockerfile000066400000000000000000000011071267010174400246610ustar00rootroot00000000000000FROM ubuntu ENV name value ENV name=value ENV name=value name2=value2 ENV name="value value1" ENV name=value\ value2 ENV name="value'quote space'value2" ENV name='value"double quote"value2' ENV name=value\ value2 name2=value2\ value3 ENV name="a\"b" ENV name="a\'b" ENV name='a\'b' ENV name='a\'b'' ENV name='a\"b' ENV name="''" # don't put anything after the next line - it must be the last line of the # Dockerfile and it must end with \ ENV name=value \ name1=value1 \ name2="value2a \ value2b" \ name3="value3a\n\"value3b\"" \ name4="value4a\\nvalue4b" \ docker-1.10.3/builder/dockerfile/parser/testfiles/env/result000066400000000000000000000011301267010174400241240ustar00rootroot00000000000000(from "ubuntu") (env "name" "value") (env "name" "value") (env "name" "value" "name2" "value2") (env "name" "\"value value1\"") (env "name" "value\\ value2") (env "name" "\"value'quote space'value2\"") (env "name" "'value\"double quote\"value2'") (env "name" "value\\ value2" "name2" "value2\\ value3") (env "name" "\"a\\\"b\"") (env "name" "\"a\\'b\"") (env "name" "'a\\'b'") (env "name" "'a\\'b''") (env "name" "'a\\\"b'") (env "name" "\"''\"") (env "name" "value" "name1" "value1" "name2" "\"value2a value2b\"" "name3" "\"value3a\\n\\\"value3b\\\"\"" "name4" "\"value4a\\\\nvalue4b\"") docker-1.10.3/builder/dockerfile/parser/testfiles/escapes/000077500000000000000000000000001267010174400235235ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles/escapes/Dockerfile000066400000000000000000000003151267010174400255140ustar00rootroot00000000000000FROM ubuntu:14.04 MAINTAINER Erik \\Hollensbe \" RUN apt-get \update && \ apt-get \"install znc -y ADD \conf\\" /.znc RUN foo \ bar \ baz CMD [ "\/usr\\\"/bin/znc", "-f", "-r" ] docker-1.10.3/builder/dockerfile/parser/testfiles/escapes/result000066400000000000000000000003361267010174400247660ustar00rootroot00000000000000(from "ubuntu:14.04") (maintainer "Erik \\\\Hollensbe \\\"") (run "apt-get \\update && apt-get \\\"install znc -y") (add "\\conf\\\\\"" "/.znc") (run "foo bar baz") (cmd "/usr\\\"/bin/znc" "-f" "-r") docker-1.10.3/builder/dockerfile/parser/testfiles/flags/000077500000000000000000000000001267010174400231745ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles/flags/Dockerfile000066400000000000000000000003571267010174400251730ustar00rootroot00000000000000FROM scratch COPY foo /tmp/ COPY --user=me foo /tmp/ COPY --doit=true foo /tmp/ COPY --user=me --doit=true foo /tmp/ COPY --doit=true -- foo /tmp/ COPY -- foo /tmp/ CMD --doit [ "a", "b" ] CMD --doit=true -- [ "a", "b" ] CMD --doit -- [ ] docker-1.10.3/builder/dockerfile/parser/testfiles/flags/result000066400000000000000000000004411267010174400244340ustar00rootroot00000000000000(from "scratch") (copy "foo" "/tmp/") (copy ["--user=me"] "foo" "/tmp/") (copy ["--doit=true"] "foo" "/tmp/") (copy ["--user=me" "--doit=true"] "foo" "/tmp/") (copy ["--doit=true"] "foo" "/tmp/") (copy "foo" "/tmp/") (cmd ["--doit"] "a" "b") (cmd ["--doit=true"] "a" "b") (cmd ["--doit"]) docker-1.10.3/builder/dockerfile/parser/testfiles/influxdb/000077500000000000000000000000001267010174400237135ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles/influxdb/Dockerfile000066400000000000000000000005701267010174400257070ustar00rootroot00000000000000FROM ubuntu:14.04 RUN apt-get update && apt-get install wget -y RUN wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb RUN dpkg -i influxdb_latest_amd64.deb RUN rm -r /opt/influxdb/shared VOLUME /opt/influxdb/shared CMD /usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml EXPOSE 8083 EXPOSE 8086 EXPOSE 8090 EXPOSE 8099 docker-1.10.3/builder/dockerfile/parser/testfiles/influxdb/result000066400000000000000000000006401267010174400251540ustar00rootroot00000000000000(from "ubuntu:14.04") (run "apt-get update && apt-get install wget -y") (run "wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb") (run "dpkg -i influxdb_latest_amd64.deb") (run "rm -r /opt/influxdb/shared") (volume "/opt/influxdb/shared") (cmd "/usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml") (expose "8083") (expose "8086") (expose "8090") (expose "8099") docker-1.10.3/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/000077500000000000000000000000001267010174400327545ustar00rootroot00000000000000Dockerfile000066400000000000000000000001121267010174400346610ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-doubleCMD "[\"echo\", \"Phew, I just managed to escaped those double quotes\"]" result000066400000000000000000000001301267010174400341300ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double(cmd "\"[\\\"echo\\\", \\\"Phew, I just managed to escaped those double quotes\\\"]\"") docker-1.10.3/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/000077500000000000000000000000001267010174400315045ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile000066400000000000000000000000661267010174400335000ustar00rootroot00000000000000CMD '["echo", "Well, JSON in a string is JSON too?"]' docker-1.10.3/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result000066400000000000000000000000761267010174400327500ustar00rootroot00000000000000(cmd "'[\"echo\", \"Well, JSON in a string is JSON too?\"]'") docker-1.10.3/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/000077500000000000000000000000001267010174400305555ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile000066400000000000000000000000561267010174400325500ustar00rootroot00000000000000CMD ['echo','single quotes are invalid JSON'] docker-1.10.3/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result000066400000000000000000000000621267010174400320140ustar00rootroot00000000000000(cmd "['echo','single quotes are invalid JSON']") docker-1.10.3/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/000077500000000000000000000000001267010174400320665ustar00rootroot00000000000000Dockerfile000066400000000000000000000000731267010174400340010ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracketCMD ["echo", "Please, close the brackets when you're done" docker-1.10.3/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result000066400000000000000000000001031267010174400333210ustar00rootroot00000000000000(cmd "[\"echo\", \"Please, close the brackets when you're done\"") docker-1.10.3/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/000077500000000000000000000000001267010174400317615ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile000066400000000000000000000000421267010174400337470ustar00rootroot00000000000000CMD ["echo", "look ma, no quote!] docker-1.10.3/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result000066400000000000000000000000511267010174400332160ustar00rootroot00000000000000(cmd "[\"echo\", \"look ma, no quote!]") docker-1.10.3/builder/dockerfile/parser/testfiles/json/000077500000000000000000000000001267010174400230515ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles/json/Dockerfile000066400000000000000000000002341267010174400250420ustar00rootroot00000000000000CMD [] CMD [""] CMD ["a"] CMD ["a","b"] CMD [ "a", "b" ] CMD [ "a", "b" ] CMD [ "a", "b" ] CMD ["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"] docker-1.10.3/builder/dockerfile/parser/testfiles/json/result000066400000000000000000000002131267010174400243060ustar00rootroot00000000000000(cmd) (cmd "") (cmd "a") (cmd "a" "b") (cmd "a" "b") (cmd "a" "b") (cmd "a" "b") (cmd "abc 123" "♥" "☃" "\" \\ / \b \f \n \r \t \x00") docker-1.10.3/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/000077500000000000000000000000001267010174400273775ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile000066400000000000000000000003221267010174400313660ustar00rootroot00000000000000FROM ubuntu:14.04 MAINTAINER James Turnbull "james@example.com" ENV REFRESHED_AT 2014-06-01 RUN apt-get update RUN apt-get -y install redis-server redis-tools EXPOSE 6379 ENTRYPOINT [ "/usr/bin/redis-server" ] docker-1.10.3/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result000066400000000000000000000003541267010174400306420ustar00rootroot00000000000000(from "ubuntu:14.04") (maintainer "James Turnbull \"james@example.com\"") (env "REFRESHED_AT" "2014-06-01") (run "apt-get update") (run "apt-get -y install redis-server redis-tools") (expose "6379") (entrypoint "/usr/bin/redis-server") docker-1.10.3/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/000077500000000000000000000000001267010174400273775ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile000066400000000000000000000013201267010174400313650ustar00rootroot00000000000000FROM busybox:buildroot-2014.02 MAINTAINER docker ONBUILD RUN ["echo", "test"] ONBUILD RUN echo test ONBUILD COPY . / # RUN Commands \ # linebreak in comment \ RUN ["ls", "-la"] RUN ["echo", "'1234'"] RUN echo "1234" RUN echo 1234 RUN echo '1234' && \ echo "456" && \ echo 789 RUN sh -c 'echo root:testpass \ > /tmp/passwd' RUN mkdir -p /test /test2 /test3/test # ENV \ ENV SCUBA 1 DUBA 3 ENV SCUBA "1 DUBA 3" # CMD \ CMD ["echo", "test"] CMD echo test CMD echo "test" CMD echo 'test' CMD echo 'test' | wc - #EXPOSE\ EXPOSE 3000 EXPOSE 9000 5000 6000 USER docker USER docker:root VOLUME ["/test"] VOLUME ["/test", "/test2"] VOLUME /test3 WORKDIR /test ADD . / COPY . copy docker-1.10.3/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result000066400000000000000000000013541267010174400306430ustar00rootroot00000000000000(from "busybox:buildroot-2014.02") (maintainer "docker ") (onbuild (run "echo" "test")) (onbuild (run "echo test")) (onbuild (copy "." "/")) (run "ls" "-la") (run "echo" "'1234'") (run "echo \"1234\"") (run "echo 1234") (run "echo '1234' && echo \"456\" && echo 789") (run "sh -c 'echo root:testpass > /tmp/passwd'") (run "mkdir -p /test /test2 /test3/test") (env "SCUBA" "1 DUBA 3") (env "SCUBA" "\"1 DUBA 3\"") (cmd "echo" "test") (cmd "echo test") (cmd "echo \"test\"") (cmd "echo 'test'") (cmd "echo 'test' | wc -") (expose "3000") (expose "9000" "5000" "6000") (user "docker") (user "docker:root") (volume "/test") (volume "/test" "/test2") (volume "/test3") (workdir "/test") (add "." "/") (copy "." "copy") docker-1.10.3/builder/dockerfile/parser/testfiles/mail/000077500000000000000000000000001267010174400230225ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles/mail/Dockerfile000066400000000000000000000006041267010174400250140ustar00rootroot00000000000000FROM ubuntu:14.04 RUN apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y ADD .muttrc / ADD .offlineimaprc / ADD .tmux.conf / ADD mutt /.mutt ADD vim /.vim ADD vimrc /.vimrc ADD crontab /etc/crontab RUN chmod 644 /etc/crontab RUN mkdir /Mail RUN mkdir /.offlineimap RUN echo "export TERM=screen-256color" >/.zshenv CMD setsid cron; tmux -2 docker-1.10.3/builder/dockerfile/parser/testfiles/mail/result000066400000000000000000000007121267010174400242630ustar00rootroot00000000000000(from "ubuntu:14.04") (run "apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y") (add ".muttrc" "/") (add ".offlineimaprc" "/") (add ".tmux.conf" "/") (add "mutt" "/.mutt") (add "vim" "/.vim") (add "vimrc" "/.vimrc") (add "crontab" "/etc/crontab") (run "chmod 644 /etc/crontab") (run "mkdir /Mail") (run "mkdir /.offlineimap") (run "echo \"export TERM=screen-256color\" >/.zshenv") (cmd "setsid cron; tmux -2") docker-1.10.3/builder/dockerfile/parser/testfiles/multiple-volumes/000077500000000000000000000000001267010174400254235ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile000066400000000000000000000001531267010174400274140ustar00rootroot00000000000000FROM foo VOLUME /opt/nagios/var /opt/nagios/etc /opt/nagios/libexec /var/log/apache2 /usr/share/snmp/mibs docker-1.10.3/builder/dockerfile/parser/testfiles/multiple-volumes/result000066400000000000000000000001721267010174400266640ustar00rootroot00000000000000(from "foo") (volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") docker-1.10.3/builder/dockerfile/parser/testfiles/mumble/000077500000000000000000000000001267010174400233615ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles/mumble/Dockerfile000066400000000000000000000002351267010174400253530ustar00rootroot00000000000000FROM ubuntu:14.04 RUN apt-get update && apt-get install libcap2-bin mumble-server -y ADD ./mumble-server.ini /etc/mumble-server.ini CMD /usr/sbin/murmurd docker-1.10.3/builder/dockerfile/parser/testfiles/mumble/result000066400000000000000000000002541267010174400246230ustar00rootroot00000000000000(from "ubuntu:14.04") (run "apt-get update && apt-get install libcap2-bin mumble-server -y") (add "./mumble-server.ini" "/etc/mumble-server.ini") (cmd "/usr/sbin/murmurd") docker-1.10.3/builder/dockerfile/parser/testfiles/nginx/000077500000000000000000000000001267010174400232235ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles/nginx/Dockerfile000066400000000000000000000004301267010174400252120ustar00rootroot00000000000000FROM ubuntu:14.04 MAINTAINER Erik Hollensbe RUN apt-get update && apt-get install nginx-full -y RUN rm -rf /etc/nginx ADD etc /etc/nginx RUN chown -R root:root /etc/nginx RUN /usr/sbin/nginx -qt RUN mkdir /www CMD ["/usr/sbin/nginx"] VOLUME /www EXPOSE 80 docker-1.10.3/builder/dockerfile/parser/testfiles/nginx/result000066400000000000000000000004771267010174400244740ustar00rootroot00000000000000(from "ubuntu:14.04") (maintainer "Erik Hollensbe ") (run "apt-get update && apt-get install nginx-full -y") (run "rm -rf /etc/nginx") (add "etc" "/etc/nginx") (run "chown -R root:root /etc/nginx") (run "/usr/sbin/nginx -qt") (run "mkdir /www") (cmd "/usr/sbin/nginx") (volume "/www") (expose "80") docker-1.10.3/builder/dockerfile/parser/testfiles/tf2/000077500000000000000000000000001267010174400225735ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles/tf2/Dockerfile000066400000000000000000000021731267010174400245700ustar00rootroot00000000000000FROM ubuntu:12.04 EXPOSE 27015 EXPOSE 27005 EXPOSE 26901 EXPOSE 27020 RUN apt-get update && apt-get install libc6-dev-i386 curl unzip -y RUN mkdir -p /steam RUN curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam ADD ./script /steam/script RUN /steam/steamcmd.sh +runscript /steam/script RUN curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf RUN curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf ADD ./server.cfg /steam/tf2/tf/cfg/server.cfg ADD ./ctf_2fort.cfg /steam/tf2/tf/cfg/ctf_2fort.cfg ADD ./sourcemod.cfg /steam/tf2/tf/cfg/sourcemod/sourcemod.cfg RUN rm -r /steam/tf2/tf/addons/sourcemod/configs ADD ./configs /steam/tf2/tf/addons/sourcemod/configs RUN mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en RUN cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en CMD cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill docker-1.10.3/builder/dockerfile/parser/testfiles/tf2/result000066400000000000000000000023221267010174400240330ustar00rootroot00000000000000(from "ubuntu:12.04") (expose "27015") (expose "27005") (expose "26901") (expose "27020") (run "apt-get update && apt-get install libc6-dev-i386 curl unzip -y") (run "mkdir -p /steam") (run "curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam") (add "./script" "/steam/script") (run "/steam/steamcmd.sh +runscript /steam/script") (run "curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf") (run "curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf") (add "./server.cfg" "/steam/tf2/tf/cfg/server.cfg") (add "./ctf_2fort.cfg" "/steam/tf2/tf/cfg/ctf_2fort.cfg") (add "./sourcemod.cfg" "/steam/tf2/tf/cfg/sourcemod/sourcemod.cfg") (run "rm -r /steam/tf2/tf/addons/sourcemod/configs") (add "./configs" "/steam/tf2/tf/addons/sourcemod/configs") (run "mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en") (run "cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en") (cmd "cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill") docker-1.10.3/builder/dockerfile/parser/testfiles/weechat/000077500000000000000000000000001267010174400235205ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles/weechat/Dockerfile000066400000000000000000000003061267010174400255110ustar00rootroot00000000000000FROM ubuntu:14.04 RUN apt-get update -qy && apt-get install tmux zsh weechat-curses -y ADD .weechat /.weechat ADD .tmux.conf / RUN echo "export TERM=screen-256color" >/.zshenv CMD zsh -c weechat docker-1.10.3/builder/dockerfile/parser/testfiles/weechat/result000066400000000000000000000003411267010174400247570ustar00rootroot00000000000000(from "ubuntu:14.04") (run "apt-get update -qy && apt-get install tmux zsh weechat-curses -y") (add ".weechat" "/.weechat") (add ".tmux.conf" "/") (run "echo \"export TERM=screen-256color\" >/.zshenv") (cmd "zsh -c weechat") docker-1.10.3/builder/dockerfile/parser/testfiles/znc/000077500000000000000000000000001267010174400226725ustar00rootroot00000000000000docker-1.10.3/builder/dockerfile/parser/testfiles/znc/Dockerfile000066400000000000000000000002421267010174400246620ustar00rootroot00000000000000FROM ubuntu:14.04 MAINTAINER Erik Hollensbe RUN apt-get update && apt-get install znc -y ADD conf /.znc CMD [ "/usr/bin/znc", "-f", "-r" ] docker-1.10.3/builder/dockerfile/parser/testfiles/znc/result000066400000000000000000000002561267010174400241360ustar00rootroot00000000000000(from "ubuntu:14.04") (maintainer "Erik Hollensbe ") (run "apt-get update && apt-get install znc -y") (add "conf" "/.znc") (cmd "/usr/bin/znc" "-f" "-r") docker-1.10.3/builder/dockerfile/parser/utils.go000066400000000000000000000072271267010174400215750ustar00rootroot00000000000000package parser import ( "fmt" "strconv" "strings" "unicode" ) // Dump dumps the AST defined by `node` as a list of sexps. // Returns a string suitable for printing. func (node *Node) Dump() string { str := "" str += node.Value if len(node.Flags) > 0 { str += fmt.Sprintf(" %q", node.Flags) } for _, n := range node.Children { str += "(" + n.Dump() + ")\n" } if node.Next != nil { for n := node.Next; n != nil; n = n.Next { if len(n.Children) > 0 { str += " " + n.Dump() } else { str += " " + strconv.Quote(n.Value) } } } return strings.TrimSpace(str) } // performs the dispatch based on the two primal strings, cmd and args. Please // look at the dispatch table in parser.go to see how these dispatchers work. func fullDispatch(cmd, args string) (*Node, map[string]bool, error) { fn := dispatch[cmd] // Ignore invalid Dockerfile instructions if fn == nil { fn = parseIgnore } sexp, attrs, err := fn(args) if err != nil { return nil, nil, err } return sexp, attrs, nil } // splitCommand takes a single line of text and parses out the cmd and args, // which are used for dispatching to more exact parsing functions. func splitCommand(line string) (string, []string, string, error) { var args string var flags []string // Make sure we get the same results irrespective of leading/trailing spaces cmdline := tokenWhitespace.Split(strings.TrimSpace(line), 2) cmd := strings.ToLower(cmdline[0]) if len(cmdline) == 2 { var err error args, flags, err = extractBuilderFlags(cmdline[1]) if err != nil { return "", nil, "", err } } return cmd, flags, strings.TrimSpace(args), nil } // covers comments and empty lines. Lines should be trimmed before passing to // this function. func stripComments(line string) string { // string is already trimmed at this point if tokenComment.MatchString(line) { return tokenComment.ReplaceAllString(line, "") } return line } func extractBuilderFlags(line string) (string, []string, error) { // Parses the BuilderFlags and returns the remaining part of the line const ( inSpaces = iota // looking for start of a word inWord inQuote ) words := []string{} phase := inSpaces word := "" quote := '\000' blankOK := false var ch rune for pos := 0; pos <= len(line); pos++ { if pos != len(line) { ch = rune(line[pos]) } if phase == inSpaces { // Looking for start of word if pos == len(line) { // end of input break } if unicode.IsSpace(ch) { // skip spaces continue } // Only keep going if the next word starts with -- if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' { return line[pos:], words, nil } phase = inWord // found someting with "--", fall thru } if (phase == inWord || phase == inQuote) && (pos == len(line)) { if word != "--" && (blankOK || len(word) > 0) { words = append(words, word) } break } if phase == inWord { if unicode.IsSpace(ch) { phase = inSpaces if word == "--" { return line[pos:], words, nil } if blankOK || len(word) > 0 { words = append(words, word) } word = "" blankOK = false continue } if ch == '\'' || ch == '"' { quote = ch blankOK = true phase = inQuote continue } if ch == '\\' { if pos+1 == len(line) { continue // just skip \ at end } pos++ ch = rune(line[pos]) } word += string(ch) continue } if phase == inQuote { if ch == quote { phase = inWord continue } if ch == '\\' { if pos+1 == len(line) { phase = inWord continue // just skip \ at end } pos++ ch = rune(line[pos]) } word += string(ch) } } return "", words, nil } docker-1.10.3/builder/dockerfile/shell_parser.go000066400000000000000000000152311267010174400216160ustar00rootroot00000000000000package dockerfile // This will take a single word and an array of env variables and // process all quotes (" and ') as well as $xxx and ${xxx} env variable // tokens. Tries to mimic bash shell process. // It doesn't support all flavors of ${xx:...} formats but new ones can // be added by adding code to the "special ${} format processing" section import ( "fmt" "strings" "text/scanner" "unicode" ) type shellWord struct { word string scanner scanner.Scanner envs []string pos int } // ProcessWord will use the 'env' list of environment variables, // and replace any env var references in 'word'. func ProcessWord(word string, env []string) (string, error) { sw := &shellWord{ word: word, envs: env, pos: 0, } sw.scanner.Init(strings.NewReader(word)) word, _, err := sw.process() return word, err } // ProcessWords will use the 'env' list of environment variables, // and replace any env var references in 'word' then it will also // return a slice of strings which represents the 'word' // split up based on spaces - taking into account quotes. Note that // this splitting is done **after** the env var substitutions are done. // Note, each one is trimmed to remove leading and trailing spaces (unless // they are quoted", but ProcessWord retains spaces between words. func ProcessWords(word string, env []string) ([]string, error) { sw := &shellWord{ word: word, envs: env, pos: 0, } sw.scanner.Init(strings.NewReader(word)) _, words, err := sw.process() return words, err } func (sw *shellWord) process() (string, []string, error) { return sw.processStopOn(scanner.EOF) } type wordsStruct struct { word string words []string inWord bool } func (w *wordsStruct) addChar(ch rune) { if unicode.IsSpace(ch) && w.inWord { if len(w.word) != 0 { w.words = append(w.words, w.word) w.word = "" w.inWord = false } } else if !unicode.IsSpace(ch) { w.addRawChar(ch) } } func (w *wordsStruct) addRawChar(ch rune) { w.word += string(ch) w.inWord = true } func (w *wordsStruct) addString(str string) { var scan scanner.Scanner scan.Init(strings.NewReader(str)) for scan.Peek() != scanner.EOF { w.addChar(scan.Next()) } } func (w *wordsStruct) addRawString(str string) { w.word += str w.inWord = true } func (w *wordsStruct) getWords() []string { if len(w.word) > 0 { w.words = append(w.words, w.word) // Just in case we're called again by mistake w.word = "" w.inWord = false } return w.words } // Process the word, starting at 'pos', and stop when we get to the // end of the word or the 'stopChar' character func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) { var result string var words wordsStruct var charFuncMapping = map[rune]func() (string, error){ '\'': sw.processSingleQuote, '"': sw.processDoubleQuote, '$': sw.processDollar, } for sw.scanner.Peek() != scanner.EOF { ch := sw.scanner.Peek() if stopChar != scanner.EOF && ch == stopChar { sw.scanner.Next() break } if fn, ok := charFuncMapping[ch]; ok { // Call special processing func for certain chars tmp, err := fn() if err != nil { return "", []string{}, err } result += tmp if ch == rune('$') { words.addString(tmp) } else { words.addRawString(tmp) } } else { // Not special, just add it to the result ch = sw.scanner.Next() if ch == '\\' { // '\' escapes, except end of line ch = sw.scanner.Next() if ch == scanner.EOF { break } words.addRawChar(ch) } else { words.addChar(ch) } result += string(ch) } } return result, words.getWords(), nil } func (sw *shellWord) processSingleQuote() (string, error) { // All chars between single quotes are taken as-is // Note, you can't escape ' var result string sw.scanner.Next() for { ch := sw.scanner.Next() if ch == '\'' || ch == scanner.EOF { break } result += string(ch) } return result, nil } func (sw *shellWord) processDoubleQuote() (string, error) { // All chars up to the next " are taken as-is, even ', except any $ chars // But you can escape " with a \ var result string sw.scanner.Next() for sw.scanner.Peek() != scanner.EOF { ch := sw.scanner.Peek() if ch == '"' { sw.scanner.Next() break } if ch == '$' { tmp, err := sw.processDollar() if err != nil { return "", err } result += tmp } else { ch = sw.scanner.Next() if ch == '\\' { chNext := sw.scanner.Peek() if chNext == scanner.EOF { // Ignore \ at end of word continue } if chNext == '"' || chNext == '$' { // \" and \$ can be escaped, all other \'s are left as-is ch = sw.scanner.Next() } } result += string(ch) } } return result, nil } func (sw *shellWord) processDollar() (string, error) { sw.scanner.Next() ch := sw.scanner.Peek() if ch == '{' { sw.scanner.Next() name := sw.processName() ch = sw.scanner.Peek() if ch == '}' { // Normal ${xx} case sw.scanner.Next() return sw.getEnv(name), nil } if ch == ':' { // Special ${xx:...} format processing // Yes it allows for recursive $'s in the ... spot sw.scanner.Next() // skip over : modifier := sw.scanner.Next() word, _, err := sw.processStopOn('}') if err != nil { return "", err } // Grab the current value of the variable in question so we // can use to to determine what to do based on the modifier newValue := sw.getEnv(name) switch modifier { case '+': if newValue != "" { newValue = word } return newValue, nil case '-': if newValue == "" { newValue = word } return newValue, nil default: return "", fmt.Errorf("Unsupported modifier (%c) in substitution: %s", modifier, sw.word) } } return "", fmt.Errorf("Missing ':' in substitution: %s", sw.word) } // $xxx case name := sw.processName() if name == "" { return "$", nil } return sw.getEnv(name), nil } func (sw *shellWord) processName() string { // Read in a name (alphanumeric or _) // If it starts with a numeric then just return $# var name string for sw.scanner.Peek() != scanner.EOF { ch := sw.scanner.Peek() if len(name) == 0 && unicode.IsDigit(ch) { ch = sw.scanner.Next() return string(ch) } if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' { break } ch = sw.scanner.Next() name += string(ch) } return name } func (sw *shellWord) getEnv(name string) string { for _, env := range sw.envs { i := strings.Index(env, "=") if i < 0 { if name == env { // Should probably never get here, but just in case treat // it like "var" and "var=" are the same return "" } continue } if name != env[:i] { continue } return env[i+1:] } return "" } docker-1.10.3/builder/dockerfile/shell_parser_test.go000066400000000000000000000056421267010174400226620ustar00rootroot00000000000000package dockerfile import ( "bufio" "os" "strings" "testing" ) func TestShellParser4EnvVars(t *testing.T) { fn := "envVarTest" file, err := os.Open(fn) if err != nil { t.Fatalf("Can't open '%s': %s", err, fn) } defer file.Close() scanner := bufio.NewScanner(file) envs := []string{"PWD=/home", "SHELL=bash", "KOREAN=한국어"} for scanner.Scan() { line := scanner.Text() // Trim comments and blank lines i := strings.Index(line, "#") if i >= 0 { line = line[:i] } line = strings.TrimSpace(line) if line == "" { continue } words := strings.Split(line, "|") if len(words) != 2 { t.Fatalf("Error in '%s' - should be exactly one | in:%q", fn, line) } words[0] = strings.TrimSpace(words[0]) words[1] = strings.TrimSpace(words[1]) newWord, err := ProcessWord(words[0], envs) if err != nil { newWord = "error" } if newWord != words[1] { t.Fatalf("Error. Src: %s Calc: %s Expected: %s", words[0], newWord, words[1]) } } } func TestShellParser4Words(t *testing.T) { fn := "wordsTest" file, err := os.Open(fn) if err != nil { t.Fatalf("Can't open '%s': %s", err, fn) } defer file.Close() envs := []string{} scanner := bufio.NewScanner(file) for scanner.Scan() { line := scanner.Text() if strings.HasPrefix(line, "#") { continue } if strings.HasPrefix(line, "ENV ") { line = strings.TrimLeft(line[3:], " ") envs = append(envs, line) continue } words := strings.Split(line, "|") if len(words) != 2 { t.Fatalf("Error in '%s' - should be exactly one | in: %q", fn, line) } test := strings.TrimSpace(words[0]) expected := strings.Split(strings.TrimLeft(words[1], " "), ",") result, err := ProcessWords(test, envs) if err != nil { result = []string{"error"} } if len(result) != len(expected) { t.Fatalf("Error. %q was suppose to result in %q, but got %q instead", test, expected, result) } for i, w := range expected { if w != result[i] { t.Fatalf("Error. %q was suppose to result in %q, but got %q instead", test, expected, result) } } } } func TestGetEnv(t *testing.T) { sw := &shellWord{ word: "", envs: nil, pos: 0, } sw.envs = []string{} if sw.getEnv("foo") != "" { t.Fatalf("2 - 'foo' should map to ''") } sw.envs = []string{"foo"} if sw.getEnv("foo") != "" { t.Fatalf("3 - 'foo' should map to ''") } sw.envs = []string{"foo="} if sw.getEnv("foo") != "" { t.Fatalf("4 - 'foo' should map to ''") } sw.envs = []string{"foo=bar"} if sw.getEnv("foo") != "bar" { t.Fatalf("5 - 'foo' should map to 'bar'") } sw.envs = []string{"foo=bar", "car=hat"} if sw.getEnv("foo") != "bar" { t.Fatalf("6 - 'foo' should map to 'bar'") } if sw.getEnv("car") != "hat" { t.Fatalf("7 - 'car' should map to 'hat'") } // Make sure we grab the first 'car' in the list sw.envs = []string{"foo=bar", "car=hat", "car=bike"} if sw.getEnv("car") != "hat" { t.Fatalf("8 - 'car' should map to 'hat'") } } docker-1.10.3/builder/dockerfile/support.go000066400000000000000000000004671267010174400206540ustar00rootroot00000000000000package dockerfile import "strings" func handleJSONArgs(args []string, attributes map[string]bool) []string { if len(args) == 0 { return []string{} } if attributes != nil && attributes["json"] { return args } // literal string command, not an exec array return []string{strings.Join(args, " ")} } docker-1.10.3/builder/dockerfile/wordsTest000066400000000000000000000014121267010174400205210ustar00rootroot00000000000000hello | hello hello${hi}bye | hellobye ENV hi=hi hello${hi}bye | hellohibye ENV space=abc def hello${space}bye | helloabc,defbye hello"${space}"bye | helloabc defbye hello "${space}"bye | hello,abc defbye ENV leading= ab c hello${leading}def | hello,ab,cdef hello"${leading}" def | hello ab c,def hello"${leading}" | hello ab c hello${leading} | hello,ab,c # next line MUST have 3 trailing spaces, don't erase them! ENV trailing=ab c hello${trailing} | helloab,c hello${trailing}d | helloab,c,d hello"${trailing}"d | helloab c d # next line MUST have 3 trailing spaces, don't erase them! hel"lo${trailing}" | helloab c hello" there " | hello there hello there | hello,there hello\ there | hello there hello" there | hello there hello\" there | hello",there docker-1.10.3/builder/dockerignore.go000066400000000000000000000030761267010174400175030ustar00rootroot00000000000000package builder import ( "os" "github.com/docker/docker/builder/dockerignore" "github.com/docker/docker/pkg/fileutils" ) // DockerIgnoreContext wraps a ModifiableContext to add a method // for handling the .dockerignore file at the root of the context. type DockerIgnoreContext struct { ModifiableContext } // Process reads the .dockerignore file at the root of the embedded context. // If .dockerignore does not exist in the context, then nil is returned. // // It can take a list of files to be removed after .dockerignore is removed. // This is used for server-side implementations of builders that need to send // the .dockerignore file as well as the special files specified in filesToRemove, // but expect them to be excluded from the context after they were processed. // // For example, server-side Dockerfile builders are expected to pass in the name // of the Dockerfile to be removed after it was parsed. // // TODO: Don't require a ModifiableContext (use Context instead) and don't remove // files, instead handle a list of files to be excluded from the context. func (c DockerIgnoreContext) Process(filesToRemove []string) error { f, err := c.Open(".dockerignore") // Note that a missing .dockerignore file isn't treated as an error if err != nil { if os.IsNotExist(err) { return nil } return err } excludes, _ := dockerignore.ReadAll(f) filesToRemove = append([]string{".dockerignore"}, filesToRemove...) for _, fileToRemove := range filesToRemove { rm, _ := fileutils.Matches(fileToRemove, excludes) if rm { c.Remove(fileToRemove) } } return nil } docker-1.10.3/builder/dockerignore/000077500000000000000000000000001267010174400171465ustar00rootroot00000000000000docker-1.10.3/builder/dockerignore/dockerignore.go000066400000000000000000000014321267010174400221500ustar00rootroot00000000000000package dockerignore import ( "bufio" "fmt" "io" "path/filepath" "strings" ) // ReadAll reads a .dockerignore file and returns the list of file patterns // to ignore. Note this will trim whitespace from each line as well // as use GO's "clean" func to get the shortest/cleanest path for each. func ReadAll(reader io.ReadCloser) ([]string, error) { if reader == nil { return nil, nil } defer reader.Close() scanner := bufio.NewScanner(reader) var excludes []string for scanner.Scan() { pattern := strings.TrimSpace(scanner.Text()) if pattern == "" { continue } pattern = filepath.Clean(pattern) excludes = append(excludes, pattern) } if err := scanner.Err(); err != nil { return nil, fmt.Errorf("Error reading .dockerignore: %v", err) } return excludes, nil } docker-1.10.3/builder/dockerignore/dockerignore_test.go000066400000000000000000000020661267010174400232130ustar00rootroot00000000000000package dockerignore import ( "fmt" "io/ioutil" "os" "path/filepath" "testing" ) func TestReadAll(t *testing.T) { tmpDir, err := ioutil.TempDir("", "dockerignore-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) di, err := ReadAll(nil) if err != nil { t.Fatalf("Expected not to have error, got %v", err) } if diLen := len(di); diLen != 0 { t.Fatalf("Expected to have zero dockerignore entry, got %d", diLen) } diName := filepath.Join(tmpDir, ".dockerignore") content := fmt.Sprintf("test1\n/test2\n/a/file/here\n\nlastfile") err = ioutil.WriteFile(diName, []byte(content), 0777) if err != nil { t.Fatal(err) } diFd, err := os.Open(diName) if err != nil { t.Fatal(err) } di, err = ReadAll(diFd) if err != nil { t.Fatal(err) } if di[0] != "test1" { t.Fatalf("First element is not test1") } if di[1] != "/test2" { t.Fatalf("Second element is not /test2") } if di[2] != "/a/file/here" { t.Fatalf("Third element is not /a/file/here") } if di[3] != "lastfile" { t.Fatalf("Fourth element is not lastfile") } } docker-1.10.3/builder/git.go000066400000000000000000000010421267010174400156020ustar00rootroot00000000000000package builder import ( "os" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/gitutils" ) // MakeGitContext returns a Context from gitURL that is cloned in a temporary directory. func MakeGitContext(gitURL string) (ModifiableContext, error) { root, err := gitutils.Clone(gitURL) if err != nil { return nil, err } c, err := archive.Tar(root, archive.Uncompressed) if err != nil { return nil, err } defer func() { // TODO: print errors? c.Close() os.RemoveAll(root) }() return MakeTarSumContext(c) } docker-1.10.3/builder/image.go000066400000000000000000000003021267010174400160770ustar00rootroot00000000000000package builder import "github.com/docker/engine-api/types/container" // Image represents a Docker image used by the builder. type Image interface { ID() string Config() *container.Config } docker-1.10.3/builder/remote.go000066400000000000000000000077621267010174400163310ustar00rootroot00000000000000package builder import ( "bytes" "errors" "fmt" "io" "io/ioutil" "regexp" "github.com/docker/docker/pkg/httputils" ) // When downloading remote contexts, limit the amount (in bytes) // to be read from the response body in order to detect its Content-Type const maxPreambleLength = 100 const acceptableRemoteMIME = `(?:application/(?:(?:x\-)?tar|octet\-stream|((?:x\-)?(?:gzip|bzip2?|xz)))|(?:text/plain))` var mimeRe = regexp.MustCompile(acceptableRemoteMIME) // MakeRemoteContext downloads a context from remoteURL and returns it. // // If contentTypeHandlers is non-nil, then the Content-Type header is read along with a maximum of // maxPreambleLength bytes from the body to help detecting the MIME type. // Look at acceptableRemoteMIME for more details. // // If a match is found, then the body is sent to the contentType handler and a (potentially compressed) tar stream is expected // to be returned. If no match is found, it is assumed the body is a tar stream (compressed or not). // In either case, an (assumed) tar stream is passed to MakeTarSumContext whose result is returned. func MakeRemoteContext(remoteURL string, contentTypeHandlers map[string]func(io.ReadCloser) (io.ReadCloser, error)) (ModifiableContext, error) { f, err := httputils.Download(remoteURL) if err != nil { return nil, fmt.Errorf("Error downloading remote context %s: %v", remoteURL, err) } defer f.Body.Close() var contextReader io.ReadCloser if contentTypeHandlers != nil { contentType := f.Header.Get("Content-Type") clen := f.ContentLength contentType, contextReader, err = inspectResponse(contentType, f.Body, clen) if err != nil { return nil, fmt.Errorf("Error detecting content type for remote %s: %v", remoteURL, err) } defer contextReader.Close() // This loop tries to find a content-type handler for the detected content-type. // If it could not find one from the caller-supplied map, it tries the empty content-type `""` // which is interpreted as a fallback handler (usually used for raw tar contexts). for _, ct := range []string{contentType, ""} { if fn, ok := contentTypeHandlers[ct]; ok { defer contextReader.Close() if contextReader, err = fn(contextReader); err != nil { return nil, err } break } } } // Pass through - this is a pre-packaged context, presumably // with a Dockerfile with the right name inside it. return MakeTarSumContext(contextReader) } // inspectResponse looks into the http response data at r to determine whether its // content-type is on the list of acceptable content types for remote build contexts. // This function returns: // - a string representation of the detected content-type // - an io.Reader for the response body // - an error value which will be non-nil either when something goes wrong while // reading bytes from r or when the detected content-type is not acceptable. func inspectResponse(ct string, r io.ReadCloser, clen int64) (string, io.ReadCloser, error) { plen := clen if plen <= 0 || plen > maxPreambleLength { plen = maxPreambleLength } preamble := make([]byte, plen, plen) rlen, err := r.Read(preamble) if rlen == 0 { return ct, r, errors.New("Empty response") } if err != nil && err != io.EOF { return ct, r, err } preambleR := bytes.NewReader(preamble) bodyReader := ioutil.NopCloser(io.MultiReader(preambleR, r)) // Some web servers will use application/octet-stream as the default // content type for files without an extension (e.g. 'Dockerfile') // so if we receive this value we better check for text content contentType := ct if len(ct) == 0 || ct == httputils.MimeTypes.OctetStream { contentType, _, err = httputils.DetectContentType(preamble) if err != nil { return contentType, bodyReader, err } } contentType = selectAcceptableMIME(contentType) var cterr error if len(contentType) == 0 { cterr = fmt.Errorf("unsupported Content-Type %q", ct) contentType = ct } return contentType, bodyReader, cterr } func selectAcceptableMIME(ct string) string { return mimeRe.FindString(ct) } docker-1.10.3/builder/remote_test.go000066400000000000000000000074161267010174400173640ustar00rootroot00000000000000package builder import ( "bytes" "io/ioutil" "testing" ) var textPlainDockerfile = "FROM busybox" var binaryContext = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} //xz magic func TestSelectAcceptableMIME(t *testing.T) { validMimeStrings := []string{ "application/x-bzip2", "application/bzip2", "application/gzip", "application/x-gzip", "application/x-xz", "application/xz", "application/tar", "application/x-tar", "application/octet-stream", "text/plain", } invalidMimeStrings := []string{ "", "application/octet", "application/json", } for _, m := range invalidMimeStrings { if len(selectAcceptableMIME(m)) > 0 { t.Fatalf("Should not have accepted %q", m) } } for _, m := range validMimeStrings { if str := selectAcceptableMIME(m); str == "" { t.Fatalf("Should have accepted %q", m) } } } func TestInspectEmptyResponse(t *testing.T) { ct := "application/octet-stream" br := ioutil.NopCloser(bytes.NewReader([]byte(""))) contentType, bReader, err := inspectResponse(ct, br, 0) if err == nil { t.Fatalf("Should have generated an error for an empty response") } if contentType != "application/octet-stream" { t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType) } body, err := ioutil.ReadAll(bReader) if err != nil { t.Fatal(err) } if len(body) != 0 { t.Fatal("response body should remain empty") } } func TestInspectResponseBinary(t *testing.T) { ct := "application/octet-stream" br := ioutil.NopCloser(bytes.NewReader(binaryContext)) contentType, bReader, err := inspectResponse(ct, br, int64(len(binaryContext))) if err != nil { t.Fatal(err) } if contentType != "application/octet-stream" { t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType) } body, err := ioutil.ReadAll(bReader) if err != nil { t.Fatal(err) } if len(body) != len(binaryContext) { t.Fatalf("Wrong response size %d, should be == len(binaryContext)", len(body)) } for i := range body { if body[i] != binaryContext[i] { t.Fatalf("Corrupted response body at byte index %d", i) } } } func TestResponseUnsupportedContentType(t *testing.T) { content := []byte(textPlainDockerfile) ct := "application/json" br := ioutil.NopCloser(bytes.NewReader(content)) contentType, bReader, err := inspectResponse(ct, br, int64(len(textPlainDockerfile))) if err == nil { t.Fatal("Should have returned an error on content-type 'application/json'") } if contentType != ct { t.Fatalf("Should not have altered content-type: orig: %s, altered: %s", ct, contentType) } body, err := ioutil.ReadAll(bReader) if err != nil { t.Fatal(err) } if string(body) != textPlainDockerfile { t.Fatalf("Corrupted response body %s", body) } } func TestInspectResponseTextSimple(t *testing.T) { content := []byte(textPlainDockerfile) ct := "text/plain" br := ioutil.NopCloser(bytes.NewReader(content)) contentType, bReader, err := inspectResponse(ct, br, int64(len(content))) if err != nil { t.Fatal(err) } if contentType != "text/plain" { t.Fatalf("Content type should be 'text/plain' but is %q", contentType) } body, err := ioutil.ReadAll(bReader) if err != nil { t.Fatal(err) } if string(body) != textPlainDockerfile { t.Fatalf("Corrupted response body %s", body) } } func TestInspectResponseEmptyContentType(t *testing.T) { content := []byte(textPlainDockerfile) br := ioutil.NopCloser(bytes.NewReader(content)) contentType, bodyReader, err := inspectResponse("", br, int64(len(content))) if err != nil { t.Fatal(err) } if contentType != "text/plain" { t.Fatalf("Content type should be 'text/plain' but is %q", contentType) } body, err := ioutil.ReadAll(bodyReader) if err != nil { t.Fatal(err) } if string(body) != textPlainDockerfile { t.Fatalf("Corrupted response body %s", body) } } docker-1.10.3/builder/tarsum.go000066400000000000000000000076151267010174400163460ustar00rootroot00000000000000package builder import ( "fmt" "io" "os" "path/filepath" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/tarsum" ) type tarSumContext struct { root string sums tarsum.FileInfoSums } func (c *tarSumContext) Close() error { return os.RemoveAll(c.root) } func convertPathError(err error, cleanpath string) error { if err, ok := err.(*os.PathError); ok { err.Path = cleanpath return err } return err } func (c *tarSumContext) Open(path string) (io.ReadCloser, error) { cleanpath, fullpath, err := c.normalize(path) if err != nil { return nil, err } r, err := os.Open(fullpath) if err != nil { return nil, convertPathError(err, cleanpath) } return r, nil } func (c *tarSumContext) Stat(path string) (string, FileInfo, error) { cleanpath, fullpath, err := c.normalize(path) if err != nil { return "", nil, err } st, err := os.Lstat(fullpath) if err != nil { return "", nil, convertPathError(err, cleanpath) } rel, err := filepath.Rel(c.root, fullpath) if err != nil { return "", nil, convertPathError(err, cleanpath) } // We set sum to path by default for the case where GetFile returns nil. // The usual case is if relative path is empty. sum := path // Use the checksum of the followed path(not the possible symlink) because // this is the file that is actually copied. if tsInfo := c.sums.GetFile(rel); tsInfo != nil { sum = tsInfo.Sum() } fi := &HashedFileInfo{PathFileInfo{st, fullpath, filepath.Base(cleanpath)}, sum} return rel, fi, nil } // MakeTarSumContext returns a build Context from a tar stream. // // It extracts the tar stream to a temporary folder that is deleted as soon as // the Context is closed. // As the extraction happens, a tarsum is calculated for every file, and the set of // all those sums then becomes the source of truth for all operations on this Context. // // Closing tarStream has to be done by the caller. func MakeTarSumContext(tarStream io.Reader) (ModifiableContext, error) { root, err := ioutils.TempDir("", "docker-builder") if err != nil { return nil, err } tsc := &tarSumContext{root: root} // Make sure we clean-up upon error. In the happy case the caller // is expected to manage the clean-up defer func() { if err != nil { tsc.Close() } }() decompressedStream, err := archive.DecompressStream(tarStream) if err != nil { return nil, err } sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1) if err != nil { return nil, err } if err := chrootarchive.Untar(sum, root, nil); err != nil { return nil, err } tsc.sums = sum.GetSums() return tsc, nil } func (c *tarSumContext) normalize(path string) (cleanpath, fullpath string, err error) { cleanpath = filepath.Clean(string(os.PathSeparator) + path)[1:] fullpath, err = symlink.FollowSymlinkInScope(filepath.Join(c.root, path), c.root) if err != nil { return "", "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullpath) } _, err = os.Lstat(fullpath) if err != nil { return "", "", convertPathError(err, path) } return } func (c *tarSumContext) Walk(root string, walkFn WalkFunc) error { root = filepath.Join(c.root, filepath.Join(string(filepath.Separator), root)) return filepath.Walk(root, func(fullpath string, info os.FileInfo, err error) error { rel, err := filepath.Rel(c.root, fullpath) if err != nil { return err } if rel == "." { return nil } sum := rel if tsInfo := c.sums.GetFile(rel); tsInfo != nil { sum = tsInfo.Sum() } fi := &HashedFileInfo{PathFileInfo{FileInfo: info, FilePath: fullpath}, sum} if err := walkFn(rel, fi, nil); err != nil { return err } return nil }) } func (c *tarSumContext) Remove(path string) error { _, fullpath, err := c.normalize(path) if err != nil { return err } return os.RemoveAll(fullpath) } docker-1.10.3/cli/000077500000000000000000000000001267010174400136145ustar00rootroot00000000000000docker-1.10.3/cli/cli.go000066400000000000000000000107431267010174400147170ustar00rootroot00000000000000package cli import ( "errors" "fmt" "io" "os" "reflect" "strings" flag "github.com/docker/docker/pkg/mflag" ) // Cli represents a command line interface. type Cli struct { Stderr io.Writer handlers []Handler Usage func() } // Handler holds the different commands Cli will call // It should have methods with names starting with `Cmd` like: // func (h myHandler) CmdFoo(args ...string) error type Handler interface{} // Initializer can be optionally implemented by a Handler to // initialize before each call to one of its commands. type Initializer interface { Initialize() error } // New instantiates a ready-to-use Cli. func New(handlers ...Handler) *Cli { // make the generic Cli object the first cli handler // in order to handle `docker help` appropriately cli := new(Cli) cli.handlers = append([]Handler{cli}, handlers...) return cli } // initErr is an error returned upon initialization of a handler implementing Initializer. type initErr struct{ error } func (err initErr) Error() string { return err.Error() } func (cli *Cli) command(args ...string) (func(...string) error, error) { for _, c := range cli.handlers { if c == nil { continue } camelArgs := make([]string, len(args)) for i, s := range args { if len(s) == 0 { return nil, errors.New("empty command") } camelArgs[i] = strings.ToUpper(s[:1]) + strings.ToLower(s[1:]) } methodName := "Cmd" + strings.Join(camelArgs, "") method := reflect.ValueOf(c).MethodByName(methodName) if method.IsValid() { if c, ok := c.(Initializer); ok { if err := c.Initialize(); err != nil { return nil, initErr{err} } } return method.Interface().(func(...string) error), nil } } return nil, errors.New("command not found") } // Run executes the specified command. func (cli *Cli) Run(args ...string) error { if len(args) > 1 { command, err := cli.command(args[:2]...) switch err := err.(type) { case nil: return command(args[2:]...) case initErr: return err.error } } if len(args) > 0 { command, err := cli.command(args[0]) switch err := err.(type) { case nil: return command(args[1:]...) case initErr: return err.error } cli.noSuchCommand(args[0]) } return cli.CmdHelp() } func (cli *Cli) noSuchCommand(command string) { if cli.Stderr == nil { cli.Stderr = os.Stderr } fmt.Fprintf(cli.Stderr, "docker: '%s' is not a docker command.\nSee 'docker --help'.\n", command) os.Exit(1) } // CmdHelp displays information on a Docker command. // // If more than one command is specified, information is only shown for the first command. // // Usage: docker help COMMAND or docker COMMAND --help func (cli *Cli) CmdHelp(args ...string) error { if len(args) > 1 { command, err := cli.command(args[:2]...) switch err := err.(type) { case nil: command("--help") return nil case initErr: return err.error } } if len(args) > 0 { command, err := cli.command(args[0]) switch err := err.(type) { case nil: command("--help") return nil case initErr: return err.error } cli.noSuchCommand(args[0]) } if cli.Usage == nil { flag.Usage() } else { cli.Usage() } return nil } // Subcmd is a subcommand of the main "docker" command. // A subcommand represents an action that can be performed // from the Docker command line client. // // To see all available subcommands, run "docker --help". func Subcmd(name string, synopses []string, description string, exitOnError bool) *flag.FlagSet { var errorHandling flag.ErrorHandling if exitOnError { errorHandling = flag.ExitOnError } else { errorHandling = flag.ContinueOnError } flags := flag.NewFlagSet(name, errorHandling) flags.Usage = func() { flags.ShortUsage() flags.PrintDefaults() } flags.ShortUsage = func() { options := "" if flags.FlagCountUndeprecated() > 0 { options = " [OPTIONS]" } if len(synopses) == 0 { synopses = []string{""} } // Allow for multiple command usage synopses. for i, synopsis := range synopses { lead := "\t" if i == 0 { // First line needs the word 'Usage'. lead = "Usage:\t" } if synopsis != "" { synopsis = " " + synopsis } fmt.Fprintf(flags.Out(), "\n%sdocker %s%s%s", lead, name, options, synopsis) } fmt.Fprintf(flags.Out(), "\n\n%s\n", description) } return flags } // An StatusError reports an unsuccessful exit by a command. type StatusError struct { Status string StatusCode int } func (e StatusError) Error() string { return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) } docker-1.10.3/cli/client.go000066400000000000000000000003501267010174400154170ustar00rootroot00000000000000package cli import flag "github.com/docker/docker/pkg/mflag" // ClientFlags represents flags for the docker client. type ClientFlags struct { FlagSet *flag.FlagSet Common *CommonFlags PostParse func() ConfigDir string } docker-1.10.3/cli/common.go000066400000000000000000000055571267010174400154470ustar00rootroot00000000000000package cli import ( flag "github.com/docker/docker/pkg/mflag" "github.com/docker/go-connections/tlsconfig" ) // CommonFlags represents flags that are common to both the client and the daemon. type CommonFlags struct { FlagSet *flag.FlagSet PostParse func() Debug bool Hosts []string LogLevel string TLS bool TLSVerify bool TLSOptions *tlsconfig.Options TrustKey string } // Command is the struct contains command name and description type Command struct { Name string Description string } var dockerCommands = []Command{ {"attach", "Attach to a running container"}, {"build", "Build an image from a Dockerfile"}, {"commit", "Create a new image from a container's changes"}, {"cp", "Copy files/folders between a container and the local filesystem"}, {"create", "Create a new container"}, {"diff", "Inspect changes on a container's filesystem"}, {"events", "Get real time events from the server"}, {"exec", "Run a command in a running container"}, {"export", "Export a container's filesystem as a tar archive"}, {"history", "Show the history of an image"}, {"images", "List images"}, {"import", "Import the contents from a tarball to create a filesystem image"}, {"info", "Display system-wide information"}, {"inspect", "Return low-level information on a container or image"}, {"kill", "Kill a running container"}, {"load", "Load an image from a tar archive or STDIN"}, {"login", "Register or log in to a Docker registry"}, {"logout", "Log out from a Docker registry"}, {"logs", "Fetch the logs of a container"}, {"network", "Manage Docker networks"}, {"pause", "Pause all processes within a container"}, {"port", "List port mappings or a specific mapping for the CONTAINER"}, {"ps", "List containers"}, {"pull", "Pull an image or a repository from a registry"}, {"push", "Push an image or a repository to a registry"}, {"rename", "Rename a container"}, {"restart", "Restart a container"}, {"rm", "Remove one or more containers"}, {"rmi", "Remove one or more images"}, {"run", "Run a command in a new container"}, {"save", "Save an image(s) to a tar archive"}, {"search", "Search the Docker Hub for images"}, {"start", "Start one or more stopped containers"}, {"stats", "Display a live stream of container(s) resource usage statistics"}, {"stop", "Stop a running container"}, {"tag", "Tag an image into a repository"}, {"top", "Display the running processes of a container"}, {"unpause", "Unpause all processes within a container"}, {"update", "Update resources of one or more containers"}, {"version", "Show the Docker version information"}, {"volume", "Manage Docker volumes"}, {"wait", "Block until a container stops, then print its exit code"}, } // DockerCommands stores all the docker command var DockerCommands = make(map[string]Command) func init() { for _, cmd := range dockerCommands { DockerCommands[cmd.Name] = cmd } } docker-1.10.3/cliconfig/000077500000000000000000000000001267010174400150025ustar00rootroot00000000000000docker-1.10.3/cliconfig/config.go000066400000000000000000000203031267010174400165740ustar00rootroot00000000000000package cliconfig import ( "encoding/base64" "encoding/json" "fmt" "io" "io/ioutil" "os" "path/filepath" "strings" "github.com/docker/docker/pkg/homedir" "github.com/docker/engine-api/types" ) const ( // ConfigFileName is the name of config file ConfigFileName = "config.json" oldConfigfile = ".dockercfg" // This constant is only used for really old config files when the // URL wasn't saved as part of the config file and it was just // assumed to be this value. defaultIndexserver = "https://index.docker.io/v1/" ) var ( configDir = os.Getenv("DOCKER_CONFIG") ) func init() { if configDir == "" { configDir = filepath.Join(homedir.Get(), ".docker") } } // ConfigDir returns the directory the configuration file is stored in func ConfigDir() string { return configDir } // SetConfigDir sets the directory the configuration file is stored in func SetConfigDir(dir string) { configDir = dir } // ConfigFile ~/.docker/config.json file info type ConfigFile struct { AuthConfigs map[string]types.AuthConfig `json:"auths"` HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` PsFormat string `json:"psFormat,omitempty"` ImagesFormat string `json:"imagesFormat,omitempty"` DetachKeys string `json:"detachKeys,omitempty"` filename string // Note: not serialized - for internal use only } // NewConfigFile initializes an empty configuration file for the given filename 'fn' func NewConfigFile(fn string) *ConfigFile { return &ConfigFile{ AuthConfigs: make(map[string]types.AuthConfig), HTTPHeaders: make(map[string]string), filename: fn, } } // LegacyLoadFromReader reads the non-nested configuration data given and sets up the // auth config information with given directory and populates the receiver object func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error { b, err := ioutil.ReadAll(configData) if err != nil { return err } if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil { arr := strings.Split(string(b), "\n") if len(arr) < 2 { return fmt.Errorf("The Auth config file is empty") } authConfig := types.AuthConfig{} origAuth := strings.Split(arr[0], " = ") if len(origAuth) != 2 { return fmt.Errorf("Invalid Auth config file") } authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) if err != nil { return err } origEmail := strings.Split(arr[1], " = ") if len(origEmail) != 2 { return fmt.Errorf("Invalid Auth config file") } authConfig.Email = origEmail[1] authConfig.ServerAddress = defaultIndexserver configFile.AuthConfigs[defaultIndexserver] = authConfig } else { for k, authConfig := range configFile.AuthConfigs { authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) if err != nil { return err } authConfig.Auth = "" authConfig.ServerAddress = k configFile.AuthConfigs[k] = authConfig } } return nil } // LoadFromReader reads the configuration data given and sets up the auth config // information with given directory and populates the receiver object func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { if err := json.NewDecoder(configData).Decode(&configFile); err != nil { return err } var err error for addr, ac := range configFile.AuthConfigs { ac.Username, ac.Password, err = decodeAuth(ac.Auth) if err != nil { return err } ac.Auth = "" ac.ServerAddress = addr configFile.AuthConfigs[addr] = ac } return nil } // LegacyLoadFromReader is a convenience function that creates a ConfigFile object from // a non-nested reader func LegacyLoadFromReader(configData io.Reader) (*ConfigFile, error) { configFile := ConfigFile{ AuthConfigs: make(map[string]types.AuthConfig), } err := configFile.LegacyLoadFromReader(configData) return &configFile, err } // LoadFromReader is a convenience function that creates a ConfigFile object from // a reader func LoadFromReader(configData io.Reader) (*ConfigFile, error) { configFile := ConfigFile{ AuthConfigs: make(map[string]types.AuthConfig), } err := configFile.LoadFromReader(configData) return &configFile, err } // Load reads the configuration files in the given directory, and sets up // the auth config information and return values. // FIXME: use the internal golang config parser func Load(configDir string) (*ConfigFile, error) { if configDir == "" { configDir = ConfigDir() } configFile := ConfigFile{ AuthConfigs: make(map[string]types.AuthConfig), filename: filepath.Join(configDir, ConfigFileName), } // Try happy path first - latest config file if _, err := os.Stat(configFile.filename); err == nil { file, err := os.Open(configFile.filename) if err != nil { return &configFile, fmt.Errorf("%s - %v", configFile.filename, err) } defer file.Close() err = configFile.LoadFromReader(file) if err != nil { err = fmt.Errorf("%s - %v", configFile.filename, err) } return &configFile, err } else if !os.IsNotExist(err) { // if file is there but we can't stat it for any reason other // than it doesn't exist then stop return &configFile, fmt.Errorf("%s - %v", configFile.filename, err) } // Can't find latest config file so check for the old one confFile := filepath.Join(homedir.Get(), oldConfigfile) if _, err := os.Stat(confFile); err != nil { return &configFile, nil //missing file is not an error } file, err := os.Open(confFile) if err != nil { return &configFile, fmt.Errorf("%s - %v", confFile, err) } defer file.Close() err = configFile.LegacyLoadFromReader(file) if err != nil { return &configFile, fmt.Errorf("%s - %v", confFile, err) } if configFile.HTTPHeaders == nil { configFile.HTTPHeaders = map[string]string{} } return &configFile, nil } // SaveToWriter encodes and writes out all the authorization information to // the given writer func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error { // Encode sensitive data into a new/temp struct tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs)) for k, authConfig := range configFile.AuthConfigs { authCopy := authConfig // encode and save the authstring, while blanking out the original fields authCopy.Auth = encodeAuth(&authCopy) authCopy.Username = "" authCopy.Password = "" authCopy.ServerAddress = "" tmpAuthConfigs[k] = authCopy } saveAuthConfigs := configFile.AuthConfigs configFile.AuthConfigs = tmpAuthConfigs defer func() { configFile.AuthConfigs = saveAuthConfigs }() data, err := json.MarshalIndent(configFile, "", "\t") if err != nil { return err } _, err = writer.Write(data) return err } // Save encodes and writes out all the authorization information func (configFile *ConfigFile) Save() error { if configFile.Filename() == "" { return fmt.Errorf("Can't save config with empty filename") } if err := os.MkdirAll(filepath.Dir(configFile.filename), 0700); err != nil { return err } f, err := os.OpenFile(configFile.filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { return err } defer f.Close() return configFile.SaveToWriter(f) } // Filename returns the name of the configuration file func (configFile *ConfigFile) Filename() string { return configFile.filename } // encodeAuth creates a base64 encoded string to containing authorization information func encodeAuth(authConfig *types.AuthConfig) string { authStr := authConfig.Username + ":" + authConfig.Password msg := []byte(authStr) encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) base64.StdEncoding.Encode(encoded, msg) return string(encoded) } // decodeAuth decodes a base64 encoded string and returns username and password func decodeAuth(authStr string) (string, string, error) { decLen := base64.StdEncoding.DecodedLen(len(authStr)) decoded := make([]byte, decLen) authByte := []byte(authStr) n, err := base64.StdEncoding.Decode(decoded, authByte) if err != nil { return "", "", err } if n > decLen { return "", "", fmt.Errorf("Something went wrong decoding auth config") } arr := strings.SplitN(string(decoded), ":", 2) if len(arr) != 2 { return "", "", fmt.Errorf("Invalid auth configuration file") } password := strings.Trim(arr[1], "\x00") return arr[0], password, nil } docker-1.10.3/cliconfig/config_test.go000066400000000000000000000322231267010174400176370ustar00rootroot00000000000000package cliconfig import ( "io/ioutil" "os" "path/filepath" "strings" "testing" "github.com/docker/docker/pkg/homedir" "github.com/docker/engine-api/types" ) func TestEmptyConfigDir(t *testing.T) { tmpHome, err := ioutil.TempDir("", "config-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpHome) SetConfigDir(tmpHome) config, err := Load("") if err != nil { t.Fatalf("Failed loading on empty config dir: %q", err) } expectedConfigFilename := filepath.Join(tmpHome, ConfigFileName) if config.Filename() != expectedConfigFilename { t.Fatalf("Expected config filename %s, got %s", expectedConfigFilename, config.Filename()) } // Now save it and make sure it shows up in new form saveConfigAndValidateNewFormat(t, config, tmpHome) } func TestMissingFile(t *testing.T) { tmpHome, err := ioutil.TempDir("", "config-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpHome) config, err := Load(tmpHome) if err != nil { t.Fatalf("Failed loading on missing file: %q", err) } // Now save it and make sure it shows up in new form saveConfigAndValidateNewFormat(t, config, tmpHome) } func TestSaveFileToDirs(t *testing.T) { tmpHome, err := ioutil.TempDir("", "config-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpHome) tmpHome += "/.docker" config, err := Load(tmpHome) if err != nil { t.Fatalf("Failed loading on missing file: %q", err) } // Now save it and make sure it shows up in new form saveConfigAndValidateNewFormat(t, config, tmpHome) } func TestEmptyFile(t *testing.T) { tmpHome, err := ioutil.TempDir("", "config-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpHome) fn := filepath.Join(tmpHome, ConfigFileName) if err := ioutil.WriteFile(fn, []byte(""), 0600); err != nil { t.Fatal(err) } _, err = Load(tmpHome) if err == nil { t.Fatalf("Was supposed to fail") } } func TestEmptyJson(t *testing.T) { tmpHome, err := ioutil.TempDir("", "config-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpHome) fn := filepath.Join(tmpHome, ConfigFileName) if err := ioutil.WriteFile(fn, []byte("{}"), 0600); err != nil { t.Fatal(err) } config, err := Load(tmpHome) if err != nil { t.Fatalf("Failed loading on empty json file: %q", err) } // Now save it and make sure it shows up in new form saveConfigAndValidateNewFormat(t, config, tmpHome) } func TestOldInvalidsAuth(t *testing.T) { invalids := map[string]string{ `username = test`: "The Auth config file is empty", `username password email`: "Invalid Auth config file", `username = test email`: "Invalid auth configuration file", `username = am9lam9lOmhlbGxv email`: "Invalid Auth config file", } tmpHome, err := ioutil.TempDir("", "config-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpHome) homeKey := homedir.Key() homeVal := homedir.Get() defer func() { os.Setenv(homeKey, homeVal) }() os.Setenv(homeKey, tmpHome) for content, expectedError := range invalids { fn := filepath.Join(tmpHome, oldConfigfile) if err := ioutil.WriteFile(fn, []byte(content), 0600); err != nil { t.Fatal(err) } config, err := Load(tmpHome) // Use Contains instead of == since the file name will change each time if err == nil || !strings.Contains(err.Error(), expectedError) { t.Fatalf("Should have failed\nConfig: %v\nGot: %v\nExpected: %v", config, err, expectedError) } } } func TestOldValidAuth(t *testing.T) { tmpHome, err := ioutil.TempDir("", "config-test") if err != nil { t.Fatal(err) } if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpHome) homeKey := homedir.Key() homeVal := homedir.Get() defer func() { os.Setenv(homeKey, homeVal) }() os.Setenv(homeKey, tmpHome) fn := filepath.Join(tmpHome, oldConfigfile) js := `username = am9lam9lOmhlbGxv email = user@example.com` if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { t.Fatal(err) } config, err := Load(tmpHome) if err != nil { t.Fatal(err) } // defaultIndexserver is https://index.docker.io/v1/ ac := config.AuthConfigs["https://index.docker.io/v1/"] if ac.Email != "user@example.com" || ac.Username != "joejoe" || ac.Password != "hello" { t.Fatalf("Missing data from parsing:\n%q", config) } // Now save it and make sure it shows up in new form configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) if !strings.Contains(configStr, "user@example.com") { t.Fatalf("Should have save in new form: %s", configStr) } } func TestOldJsonInvalid(t *testing.T) { tmpHome, err := ioutil.TempDir("", "config-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpHome) homeKey := homedir.Key() homeVal := homedir.Get() defer func() { os.Setenv(homeKey, homeVal) }() os.Setenv(homeKey, tmpHome) fn := filepath.Join(tmpHome, oldConfigfile) js := `{"https://index.docker.io/v1/":{"auth":"test","email":"user@example.com"}}` if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { t.Fatal(err) } config, err := Load(tmpHome) // Use Contains instead of == since the file name will change each time if err == nil || !strings.Contains(err.Error(), "Invalid auth configuration file") { t.Fatalf("Expected an error got : %v, %v", config, err) } } func TestOldJson(t *testing.T) { tmpHome, err := ioutil.TempDir("", "config-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpHome) homeKey := homedir.Key() homeVal := homedir.Get() defer func() { os.Setenv(homeKey, homeVal) }() os.Setenv(homeKey, tmpHome) fn := filepath.Join(tmpHome, oldConfigfile) js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { t.Fatal(err) } config, err := Load(tmpHome) if err != nil { t.Fatalf("Failed loading on empty json file: %q", err) } ac := config.AuthConfigs["https://index.docker.io/v1/"] if ac.Email != "user@example.com" || ac.Username != "joejoe" || ac.Password != "hello" { t.Fatalf("Missing data from parsing:\n%q", config) } // Now save it and make sure it shows up in new form configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) if !strings.Contains(configStr, "user@example.com") { t.Fatalf("Should have save in new form: %s", configStr) } } func TestNewJson(t *testing.T) { tmpHome, err := ioutil.TempDir("", "config-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpHome) fn := filepath.Join(tmpHome, ConfigFileName) js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } } }` if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { t.Fatal(err) } config, err := Load(tmpHome) if err != nil { t.Fatalf("Failed loading on empty json file: %q", err) } ac := config.AuthConfigs["https://index.docker.io/v1/"] if ac.Email != "user@example.com" || ac.Username != "joejoe" || ac.Password != "hello" { t.Fatalf("Missing data from parsing:\n%q", config) } // Now save it and make sure it shows up in new form configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) if !strings.Contains(configStr, "user@example.com") { t.Fatalf("Should have save in new form: %s", configStr) } } func TestJsonWithPsFormat(t *testing.T) { tmpHome, err := ioutil.TempDir("", "config-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpHome) fn := filepath.Join(tmpHome, ConfigFileName) js := `{ "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" }` if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { t.Fatal(err) } config, err := Load(tmpHome) if err != nil { t.Fatalf("Failed loading on empty json file: %q", err) } if config.PsFormat != `table {{.ID}}\t{{.Label "com.docker.label.cpu"}}` { t.Fatalf("Unknown ps format: %s\n", config.PsFormat) } // Now save it and make sure it shows up in new form configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) if !strings.Contains(configStr, `"psFormat":`) || !strings.Contains(configStr, "{{.ID}}") { t.Fatalf("Should have save in new form: %s", configStr) } } // Save it and make sure it shows up in new form func saveConfigAndValidateNewFormat(t *testing.T, config *ConfigFile, homeFolder string) string { err := config.Save() if err != nil { t.Fatalf("Failed to save: %q", err) } buf, err := ioutil.ReadFile(filepath.Join(homeFolder, ConfigFileName)) if !strings.Contains(string(buf), `"auths":`) { t.Fatalf("Should have save in new form: %s", string(buf)) } return string(buf) } func TestConfigDir(t *testing.T) { tmpHome, err := ioutil.TempDir("", "config-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpHome) if ConfigDir() == tmpHome { t.Fatalf("Expected ConfigDir to be different than %s by default, but was the same", tmpHome) } // Update configDir SetConfigDir(tmpHome) if ConfigDir() != tmpHome { t.Fatalf("Expected ConfigDir to %s, but was %s", tmpHome, ConfigDir()) } } func TestConfigFile(t *testing.T) { configFilename := "configFilename" configFile := NewConfigFile(configFilename) if configFile.Filename() != configFilename { t.Fatalf("Expected %s, got %s", configFilename, configFile.Filename()) } } func TestJsonReaderNoFile(t *testing.T) { js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } } }` config, err := LoadFromReader(strings.NewReader(js)) if err != nil { t.Fatalf("Failed loading on empty json file: %q", err) } ac := config.AuthConfigs["https://index.docker.io/v1/"] if ac.Email != "user@example.com" || ac.Username != "joejoe" || ac.Password != "hello" { t.Fatalf("Missing data from parsing:\n%q", config) } } func TestOldJsonReaderNoFile(t *testing.T) { js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` config, err := LegacyLoadFromReader(strings.NewReader(js)) if err != nil { t.Fatalf("Failed loading on empty json file: %q", err) } ac := config.AuthConfigs["https://index.docker.io/v1/"] if ac.Email != "user@example.com" || ac.Username != "joejoe" || ac.Password != "hello" { t.Fatalf("Missing data from parsing:\n%q", config) } } func TestJsonWithPsFormatNoFile(t *testing.T) { js := `{ "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" }` config, err := LoadFromReader(strings.NewReader(js)) if err != nil { t.Fatalf("Failed loading on empty json file: %q", err) } if config.PsFormat != `table {{.ID}}\t{{.Label "com.docker.label.cpu"}}` { t.Fatalf("Unknown ps format: %s\n", config.PsFormat) } } func TestJsonSaveWithNoFile(t *testing.T) { js := `{ "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" }` config, err := LoadFromReader(strings.NewReader(js)) err = config.Save() if err == nil { t.Fatalf("Expected error. File should not have been able to save with no file name.") } tmpHome, err := ioutil.TempDir("", "config-test") if err != nil { t.Fatalf("Failed to create a temp dir: %q", err) } defer os.RemoveAll(tmpHome) fn := filepath.Join(tmpHome, ConfigFileName) f, _ := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) err = config.SaveToWriter(f) if err != nil { t.Fatalf("Failed saving to file: %q", err) } buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) if !strings.Contains(string(buf), `"auths":`) || !strings.Contains(string(buf), "user@example.com") { t.Fatalf("Should have save in new form: %s", string(buf)) } } func TestLegacyJsonSaveWithNoFile(t *testing.T) { js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` config, err := LegacyLoadFromReader(strings.NewReader(js)) err = config.Save() if err == nil { t.Fatalf("Expected error. File should not have been able to save with no file name.") } tmpHome, err := ioutil.TempDir("", "config-test") if err != nil { t.Fatalf("Failed to create a temp dir: %q", err) } defer os.RemoveAll(tmpHome) fn := filepath.Join(tmpHome, ConfigFileName) f, _ := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) err = config.SaveToWriter(f) if err != nil { t.Fatalf("Failed saving to file: %q", err) } buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) if !strings.Contains(string(buf), `"auths":`) || !strings.Contains(string(buf), "user@example.com") { t.Fatalf("Should have save in new form: %s", string(buf)) } } func TestEncodeAuth(t *testing.T) { newAuthConfig := &types.AuthConfig{Username: "ken", Password: "test", Email: "test@example.com"} authStr := encodeAuth(newAuthConfig) decAuthConfig := &types.AuthConfig{} var err error decAuthConfig.Username, decAuthConfig.Password, err = decodeAuth(authStr) if err != nil { t.Fatal(err) } if newAuthConfig.Username != decAuthConfig.Username { t.Fatal("Encode Username doesn't match decoded Username") } if newAuthConfig.Password != decAuthConfig.Password { t.Fatal("Encode Password doesn't match decoded Password") } if authStr != "a2VuOnRlc3Q=" { t.Fatal("AuthString encoding isn't correct.") } } docker-1.10.3/container/000077500000000000000000000000001267010174400150275ustar00rootroot00000000000000docker-1.10.3/container/archive.go000066400000000000000000000044001267010174400167750ustar00rootroot00000000000000package container import ( "os" "path/filepath" "github.com/docker/docker/pkg/archive" "github.com/docker/engine-api/types" ) // ResolvePath resolves the given path in the container to a resource on the // host. Returns a resolved path (absolute path to the resource on the host), // the absolute path to the resource relative to the container's rootfs, and // a error if the path points to outside the container's rootfs. func (container *Container) ResolvePath(path string) (resolvedPath, absPath string, err error) { // Consider the given path as an absolute path in the container. absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) // Split the absPath into its Directory and Base components. We will // resolve the dir in the scope of the container then append the base. dirPath, basePath := filepath.Split(absPath) resolvedDirPath, err := container.GetResourcePath(dirPath) if err != nil { return "", "", err } // resolvedDirPath will have been cleaned (no trailing path separators) so // we can manually join it with the base path element. resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath return resolvedPath, absPath, nil } // StatPath is the unexported version of StatPath. Locks and mounts should // be acquired before calling this method and the given path should be fully // resolved to a path on the host corresponding to the given absolute path // inside the container. func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) { lstat, err := os.Lstat(resolvedPath) if err != nil { return nil, err } var linkTarget string if lstat.Mode()&os.ModeSymlink != 0 { // Fully evaluate the symlink in the scope of the container rootfs. hostPath, err := container.GetResourcePath(absPath) if err != nil { return nil, err } linkTarget, err = filepath.Rel(container.BaseFS, hostPath) if err != nil { return nil, err } // Make it an absolute path. linkTarget = filepath.Join(string(filepath.Separator), linkTarget) } return &types.ContainerPathStat{ Name: filepath.Base(absPath), Size: lstat.Size(), Mode: lstat.Mode(), Mtime: lstat.ModTime(), LinkTarget: linkTarget, }, nil } docker-1.10.3/container/container.go000066400000000000000000000411631267010174400173450ustar00rootroot00000000000000package container import ( "encoding/json" "fmt" "io" "os" "path/filepath" "sync" "syscall" "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/exec" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/jsonfilelog" "github.com/docker/docker/daemon/network" derr "github.com/docker/docker/errors" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/runconfig" "github.com/docker/docker/volume" containertypes "github.com/docker/engine-api/types/container" "github.com/docker/go-connections/nat" "github.com/opencontainers/runc/libcontainer/label" ) const configFileName = "config.v2.json" // CommonContainer holds the fields for a container which are // applicable across all platforms supported by the daemon. type CommonContainer struct { *runconfig.StreamConfig // embed for Container to support states directly. *State `json:"State"` // Needed for remote api version <= 1.11 Root string `json:"-"` // Path to the "home" of the container, including metadata. BaseFS string `json:"-"` // Path to the graphdriver mountpoint RWLayer layer.RWLayer `json:"-"` ID string Created time.Time Path string Args []string Config *containertypes.Config ImageID image.ID `json:"Image"` NetworkSettings *network.Settings LogPath string Name string Driver string // MountLabel contains the options for the 'mount' command MountLabel string ProcessLabel string RestartCount int HasBeenStartedBefore bool HasBeenManuallyStopped bool // used for unless-stopped restart policy MountPoints map[string]*volume.MountPoint HostConfig *containertypes.HostConfig `json:"-"` // do not serialize the host config in the json, otherwise we'll make the container unportable Command *execdriver.Command `json:"-"` monitor *containerMonitor ExecCommands *exec.Store `json:"-"` // logDriver for closing LogDriver logger.Logger `json:"-"` LogCopier *logger.Copier `json:"-"` } // NewBaseContainer creates a new container with its // basic configuration. func NewBaseContainer(id, root string) *Container { return &Container{ CommonContainer: CommonContainer{ ID: id, State: NewState(), ExecCommands: exec.NewStore(), Root: root, MountPoints: make(map[string]*volume.MountPoint), StreamConfig: runconfig.NewStreamConfig(), }, } } // FromDisk loads the container configuration stored in the host. func (container *Container) FromDisk() error { pth, err := container.ConfigPath() if err != nil { return err } jsonSource, err := os.Open(pth) if err != nil { return err } defer jsonSource.Close() dec := json.NewDecoder(jsonSource) // Load container settings if err := dec.Decode(container); err != nil { return err } if err := label.ReserveLabel(container.ProcessLabel); err != nil { return err } return container.readHostConfig() } // ToDisk saves the container configuration on disk. func (container *Container) ToDisk() error { pth, err := container.ConfigPath() if err != nil { return err } jsonSource, err := os.Create(pth) if err != nil { return err } defer jsonSource.Close() enc := json.NewEncoder(jsonSource) // Save container settings if err := enc.Encode(container); err != nil { return err } return container.WriteHostConfig() } // ToDiskLocking saves the container configuration on disk in a thread safe way. func (container *Container) ToDiskLocking() error { container.Lock() err := container.ToDisk() container.Unlock() return err } // readHostConfig reads the host configuration from disk for the container. func (container *Container) readHostConfig() error { container.HostConfig = &containertypes.HostConfig{} // If the hostconfig file does not exist, do not read it. // (We still have to initialize container.HostConfig, // but that's OK, since we just did that above.) pth, err := container.HostConfigPath() if err != nil { return err } f, err := os.Open(pth) if err != nil { if os.IsNotExist(err) { return nil } return err } defer f.Close() if err := json.NewDecoder(f).Decode(&container.HostConfig); err != nil { return err } container.InitDNSHostConfig() return nil } // WriteHostConfig saves the host configuration on disk for the container. func (container *Container) WriteHostConfig() error { pth, err := container.HostConfigPath() if err != nil { return err } f, err := os.Create(pth) if err != nil { return err } defer f.Close() return json.NewEncoder(f).Encode(&container.HostConfig) } // GetResourcePath evaluates `path` in the scope of the container's BaseFS, with proper path // sanitisation. Symlinks are all scoped to the BaseFS of the container, as // though the container's BaseFS was `/`. // // The BaseFS of a container is the host-facing path which is bind-mounted as // `/` inside the container. This method is essentially used to access a // particular path inside the container as though you were a process in that // container. // // NOTE: The returned path is *only* safely scoped inside the container's BaseFS // if no component of the returned path changes (such as a component // symlinking to a different path) between using this method and using the // path. See symlink.FollowSymlinkInScope for more details. func (container *Container) GetResourcePath(path string) (string, error) { // IMPORTANT - These are paths on the OS where the daemon is running, hence // any filepath operations must be done in an OS agnostic way. cleanPath := filepath.Join(string(os.PathSeparator), path) r, e := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, cleanPath), container.BaseFS) return r, e } // GetRootResourcePath evaluates `path` in the scope of the container's root, with proper path // sanitisation. Symlinks are all scoped to the root of the container, as // though the container's root was `/`. // // The root of a container is the host-facing configuration metadata directory. // Only use this method to safely access the container's `container.json` or // other metadata files. If in doubt, use container.GetResourcePath. // // NOTE: The returned path is *only* safely scoped inside the container's root // if no component of the returned path changes (such as a component // symlinking to a different path) between using this method and using the // path. See symlink.FollowSymlinkInScope for more details. func (container *Container) GetRootResourcePath(path string) (string, error) { // IMPORTANT - These are paths on the OS where the daemon is running, hence // any filepath operations must be done in an OS agnostic way. cleanPath := filepath.Join(string(os.PathSeparator), path) return symlink.FollowSymlinkInScope(filepath.Join(container.Root, cleanPath), container.Root) } // ExitOnNext signals to the monitor that it should not restart the container // after we send the kill signal. func (container *Container) ExitOnNext() { container.monitor.ExitOnNext() } // Resize changes the TTY of the process running inside the container // to the given height and width. The container must be running. func (container *Container) Resize(h, w int) error { if container.Command.ProcessConfig.Terminal == nil { return fmt.Errorf("Container %s does not have a terminal ready", container.ID) } if err := container.Command.ProcessConfig.Terminal.Resize(h, w); err != nil { return err } return nil } // HostConfigPath returns the path to the container's JSON hostconfig func (container *Container) HostConfigPath() (string, error) { return container.GetRootResourcePath("hostconfig.json") } // ConfigPath returns the path to the container's JSON config func (container *Container) ConfigPath() (string, error) { return container.GetRootResourcePath(configFileName) } func validateID(id string) error { if id == "" { return derr.ErrorCodeEmptyID } return nil } // Returns true if the container exposes a certain port func (container *Container) exposes(p nat.Port) bool { _, exists := container.Config.ExposedPorts[p] return exists } // GetLogConfig returns the log configuration for the container. func (container *Container) GetLogConfig(defaultConfig containertypes.LogConfig) containertypes.LogConfig { cfg := container.HostConfig.LogConfig if cfg.Type != "" || len(cfg.Config) > 0 { // container has log driver configured if cfg.Type == "" { cfg.Type = jsonfilelog.Name } return cfg } // Use daemon's default log config for containers return defaultConfig } // StartLogger starts a new logger driver for the container. func (container *Container) StartLogger(cfg containertypes.LogConfig) (logger.Logger, error) { c, err := logger.GetLogDriver(cfg.Type) if err != nil { return nil, derr.ErrorCodeLoggingFactory.WithArgs(err) } ctx := logger.Context{ Config: cfg.Config, ContainerID: container.ID, ContainerName: container.Name, ContainerEntrypoint: container.Path, ContainerArgs: container.Args, ContainerImageID: container.ImageID.String(), ContainerImageName: container.Config.Image, ContainerCreated: container.Created, ContainerEnv: container.Config.Env, ContainerLabels: container.Config.Labels, } // Set logging file for "json-logger" if cfg.Type == jsonfilelog.Name { ctx.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID)) if err != nil { return nil, err } } return c(ctx) } // GetProcessLabel returns the process label for the container. func (container *Container) GetProcessLabel() string { // even if we have a process label return "" if we are running // in privileged mode if container.HostConfig.Privileged { return "" } return container.ProcessLabel } // GetMountLabel returns the mounting label for the container. // This label is empty if the container is privileged. func (container *Container) GetMountLabel() string { if container.HostConfig.Privileged { return "" } return container.MountLabel } // GetExecIDs returns the list of exec commands running on the container. func (container *Container) GetExecIDs() []string { return container.ExecCommands.List() } // Attach connects to the container's TTY, delegating to standard // streams or websockets depending on the configuration. func (container *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer, keys []byte) chan error { return AttachStreams(container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, stdin, stdout, stderr, keys) } // AttachStreams connects streams to a TTY. // Used by exec too. Should this move somewhere else? func AttachStreams(streamConfig *runconfig.StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer, keys []byte) chan error { var ( cStdout, cStderr io.ReadCloser cStdin io.WriteCloser wg sync.WaitGroup errors = make(chan error, 3) ) if stdin != nil && openStdin { cStdin = streamConfig.StdinPipe() wg.Add(1) } if stdout != nil { cStdout = streamConfig.StdoutPipe() wg.Add(1) } if stderr != nil { cStderr = streamConfig.StderrPipe() wg.Add(1) } // Connect stdin of container to the http conn. go func() { if stdin == nil || !openStdin { return } logrus.Debugf("attach: stdin: begin") defer func() { if stdinOnce && !tty { cStdin.Close() } else { // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr if cStdout != nil { cStdout.Close() } if cStderr != nil { cStderr.Close() } } wg.Done() logrus.Debugf("attach: stdin: end") }() var err error if tty { _, err = copyEscapable(cStdin, stdin, keys) } else { _, err = io.Copy(cStdin, stdin) } if err == io.ErrClosedPipe { err = nil } if err != nil { logrus.Errorf("attach: stdin: %s", err) errors <- err return } }() attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) { if stream == nil { return } defer func() { // Make sure stdin gets closed if stdin != nil { stdin.Close() } streamPipe.Close() wg.Done() logrus.Debugf("attach: %s: end", name) }() logrus.Debugf("attach: %s: begin", name) _, err := io.Copy(stream, streamPipe) if err == io.ErrClosedPipe { err = nil } if err != nil { logrus.Errorf("attach: %s: %v", name, err) errors <- err } } go attachStream("stdout", stdout, cStdout) go attachStream("stderr", stderr, cStderr) return promise.Go(func() error { wg.Wait() close(errors) for err := range errors { if err != nil { return err } } return nil }) } // Code c/c from io.Copy() modified to handle escape sequence func copyEscapable(dst io.Writer, src io.ReadCloser, keys []byte) (written int64, err error) { if len(keys) == 0 { // Default keys : ctrl-p ctrl-q keys = []byte{16, 17} } buf := make([]byte, 32*1024) for { nr, er := src.Read(buf) if nr > 0 { // ---- Docker addition for i, key := range keys { if nr != 1 || buf[0] != key { break } if i == len(keys)-1 { if err := src.Close(); err != nil { return 0, err } return 0, nil } nr, er = src.Read(buf) } // ---- End of docker nw, ew := dst.Write(buf[0:nr]) if nw > 0 { written += int64(nw) } if ew != nil { err = ew break } if nr != nw { err = io.ErrShortWrite break } } if er == io.EOF { break } if er != nil { err = er break } } return written, err } // ShouldRestart decides whether the daemon should restart the container or not. // This is based on the container's restart policy. func (container *Container) ShouldRestart() bool { return container.HostConfig.RestartPolicy.Name == "always" || (container.HostConfig.RestartPolicy.Name == "unless-stopped" && !container.HasBeenManuallyStopped) || (container.HostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0) } // AddBindMountPoint adds a new bind mount point configuration to the container. func (container *Container) AddBindMountPoint(name, source, destination string, rw bool) { container.MountPoints[destination] = &volume.MountPoint{ Name: name, Source: source, Destination: destination, RW: rw, } } // AddLocalMountPoint adds a new local mount point configuration to the container. func (container *Container) AddLocalMountPoint(name, destination string, rw bool) { container.MountPoints[destination] = &volume.MountPoint{ Name: name, Driver: volume.DefaultDriverName, Destination: destination, RW: rw, } } // AddMountPointWithVolume adds a new mount point configured with a volume to the container. func (container *Container) AddMountPointWithVolume(destination string, vol volume.Volume, rw bool) { container.MountPoints[destination] = &volume.MountPoint{ Name: vol.Name(), Driver: vol.DriverName(), Destination: destination, RW: rw, Volume: vol, } } // IsDestinationMounted checks whether a path is mounted on the container or not. func (container *Container) IsDestinationMounted(destination string) bool { return container.MountPoints[destination] != nil } // StopSignal returns the signal used to stop the container. func (container *Container) StopSignal() int { var stopSignal syscall.Signal if container.Config.StopSignal != "" { stopSignal, _ = signal.ParseSignal(container.Config.StopSignal) } if int(stopSignal) == 0 { stopSignal, _ = signal.ParseSignal(signal.DefaultStopSignal) } return int(stopSignal) } // InitDNSHostConfig ensures that the dns fields are never nil. // New containers don't ever have those fields nil, // but pre created containers can still have those nil values. // The non-recommended host configuration in the start api can // make these fields nil again, this corrects that issue until // we remove that behavior for good. // See https://github.com/docker/docker/pull/17779 // for a more detailed explanation on why we don't want that. func (container *Container) InitDNSHostConfig() { container.Lock() defer container.Unlock() if container.HostConfig.DNS == nil { container.HostConfig.DNS = make([]string, 0) } if container.HostConfig.DNSSearch == nil { container.HostConfig.DNSSearch = make([]string, 0) } if container.HostConfig.DNSOptions == nil { container.HostConfig.DNSOptions = make([]string, 0) } } docker-1.10.3/container/container_unit_test.go000066400000000000000000000011771267010174400214440ustar00rootroot00000000000000package container import ( "testing" "github.com/docker/docker/pkg/signal" "github.com/docker/engine-api/types/container" ) func TestContainerStopSignal(t *testing.T) { c := &Container{ CommonContainer: CommonContainer{ Config: &container.Config{}, }, } def, err := signal.ParseSignal(signal.DefaultStopSignal) if err != nil { t.Fatal(err) } s := c.StopSignal() if s != int(def) { t.Fatalf("Expected %v, got %v", def, s) } c = &Container{ CommonContainer: CommonContainer{ Config: &container.Config{StopSignal: "SIGKILL"}, }, } s = c.StopSignal() if s != 9 { t.Fatalf("Expected 9, got %v", s) } } docker-1.10.3/container/container_unix.go000066400000000000000000000542221267010174400204100ustar00rootroot00000000000000// +build linux freebsd package container import ( "fmt" "io/ioutil" "net" "os" "path/filepath" "strconv" "strings" "syscall" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" derr "github.com/docker/docker/errors" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/system" runconfigopts "github.com/docker/docker/runconfig/opts" "github.com/docker/docker/utils" "github.com/docker/docker/volume" containertypes "github.com/docker/engine-api/types/container" "github.com/docker/engine-api/types/network" "github.com/docker/go-connections/nat" "github.com/docker/libnetwork" "github.com/docker/libnetwork/netlabel" "github.com/docker/libnetwork/options" "github.com/docker/libnetwork/types" "github.com/opencontainers/runc/libcontainer/label" ) // DefaultSHMSize is the default size (64MB) of the SHM which will be mounted in the container const DefaultSHMSize int64 = 67108864 // Container holds the fields specific to unixen implementations. // See CommonContainer for standard fields common to all containers. type Container struct { CommonContainer // Fields below here are platform specific. AppArmorProfile string HostnamePath string HostsPath string ShmPath string ResolvConfPath string SeccompProfile string } // CreateDaemonEnvironment returns the list of all environment variables given the list of // environment variables related to links. // Sets PATH, HOSTNAME and if container.Config.Tty is set: TERM. // The defaults set here do not override the values in container.Config.Env func (container *Container) CreateDaemonEnvironment(linkedEnv []string) []string { // if a domain name was specified, append it to the hostname (see #7851) fullHostname := container.Config.Hostname if container.Config.Domainname != "" { fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname) } // Setup environment env := []string{ "PATH=" + system.DefaultPathEnv, "HOSTNAME=" + fullHostname, // Note: we don't set HOME here because it'll get autoset intelligently // based on the value of USER inside dockerinit, but only if it isn't // set already (ie, that can be overridden by setting HOME via -e or ENV // in a Dockerfile). } if container.Config.Tty { env = append(env, "TERM=xterm") } env = append(env, linkedEnv...) // because the env on the container can override certain default values // we need to replace the 'env' keys where they match and append anything // else. env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env) return env } // TrySetNetworkMount attempts to set the network mounts given a provided destination and // the path to use for it; return true if the given destination was a network mount file func (container *Container) TrySetNetworkMount(destination string, path string) bool { if destination == "/etc/resolv.conf" { container.ResolvConfPath = path return true } if destination == "/etc/hostname" { container.HostnamePath = path return true } if destination == "/etc/hosts" { container.HostsPath = path return true } return false } // BuildHostnameFile writes the container's hostname file. func (container *Container) BuildHostnameFile() error { hostnamePath, err := container.GetRootResourcePath("hostname") if err != nil { return err } container.HostnamePath = hostnamePath if container.Config.Domainname != "" { return ioutil.WriteFile(container.HostnamePath, []byte(fmt.Sprintf("%s.%s\n", container.Config.Hostname, container.Config.Domainname)), 0644) } return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) } // GetEndpointInNetwork returns the container's endpoint to the provided network. func (container *Container) GetEndpointInNetwork(n libnetwork.Network) (libnetwork.Endpoint, error) { endpointName := strings.TrimPrefix(container.Name, "/") return n.EndpointByName(endpointName) } func (container *Container) buildPortMapInfo(ep libnetwork.Endpoint) error { if ep == nil { return derr.ErrorCodeEmptyEndpoint } networkSettings := container.NetworkSettings if networkSettings == nil { return derr.ErrorCodeEmptyNetwork } if len(networkSettings.Ports) == 0 { pm, err := getEndpointPortMapInfo(ep) if err != nil { return err } networkSettings.Ports = pm } return nil } func getEndpointPortMapInfo(ep libnetwork.Endpoint) (nat.PortMap, error) { pm := nat.PortMap{} driverInfo, err := ep.DriverInfo() if err != nil { return pm, err } if driverInfo == nil { // It is not an error for epInfo to be nil return pm, nil } if expData, ok := driverInfo[netlabel.ExposedPorts]; ok { if exposedPorts, ok := expData.([]types.TransportPort); ok { for _, tp := range exposedPorts { natPort, err := nat.NewPort(tp.Proto.String(), strconv.Itoa(int(tp.Port))) if err != nil { return pm, derr.ErrorCodeParsingPort.WithArgs(tp.Port, err) } pm[natPort] = nil } } } mapData, ok := driverInfo[netlabel.PortMap] if !ok { return pm, nil } if portMapping, ok := mapData.([]types.PortBinding); ok { for _, pp := range portMapping { natPort, err := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port))) if err != nil { return pm, err } natBndg := nat.PortBinding{HostIP: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))} pm[natPort] = append(pm[natPort], natBndg) } } return pm, nil } func getSandboxPortMapInfo(sb libnetwork.Sandbox) nat.PortMap { pm := nat.PortMap{} if sb == nil { return pm } for _, ep := range sb.Endpoints() { pm, _ = getEndpointPortMapInfo(ep) if len(pm) > 0 { break } } return pm } // BuildEndpointInfo sets endpoint-related fields on container.NetworkSettings based on the provided network and endpoint. func (container *Container) BuildEndpointInfo(n libnetwork.Network, ep libnetwork.Endpoint) error { if ep == nil { return derr.ErrorCodeEmptyEndpoint } networkSettings := container.NetworkSettings if networkSettings == nil { return derr.ErrorCodeEmptyNetwork } epInfo := ep.Info() if epInfo == nil { // It is not an error to get an empty endpoint info return nil } if _, ok := networkSettings.Networks[n.Name()]; !ok { networkSettings.Networks[n.Name()] = new(network.EndpointSettings) } networkSettings.Networks[n.Name()].NetworkID = n.ID() networkSettings.Networks[n.Name()].EndpointID = ep.ID() iface := epInfo.Iface() if iface == nil { return nil } if iface.MacAddress() != nil { networkSettings.Networks[n.Name()].MacAddress = iface.MacAddress().String() } if iface.Address() != nil { ones, _ := iface.Address().Mask.Size() networkSettings.Networks[n.Name()].IPAddress = iface.Address().IP.String() networkSettings.Networks[n.Name()].IPPrefixLen = ones } if iface.AddressIPv6() != nil && iface.AddressIPv6().IP.To16() != nil { onesv6, _ := iface.AddressIPv6().Mask.Size() networkSettings.Networks[n.Name()].GlobalIPv6Address = iface.AddressIPv6().IP.String() networkSettings.Networks[n.Name()].GlobalIPv6PrefixLen = onesv6 } return nil } // UpdateJoinInfo updates network settings when container joins network n with endpoint ep. func (container *Container) UpdateJoinInfo(n libnetwork.Network, ep libnetwork.Endpoint) error { if err := container.buildPortMapInfo(ep); err != nil { return err } epInfo := ep.Info() if epInfo == nil { // It is not an error to get an empty endpoint info return nil } if epInfo.Gateway() != nil { container.NetworkSettings.Networks[n.Name()].Gateway = epInfo.Gateway().String() } if epInfo.GatewayIPv6().To16() != nil { container.NetworkSettings.Networks[n.Name()].IPv6Gateway = epInfo.GatewayIPv6().String() } return nil } // UpdateSandboxNetworkSettings updates the sandbox ID and Key. func (container *Container) UpdateSandboxNetworkSettings(sb libnetwork.Sandbox) error { container.NetworkSettings.SandboxID = sb.ID() container.NetworkSettings.SandboxKey = sb.Key() return nil } // BuildJoinOptions builds endpoint Join options from a given network. func (container *Container) BuildJoinOptions(n libnetwork.Network) ([]libnetwork.EndpointOption, error) { var joinOptions []libnetwork.EndpointOption if epConfig, ok := container.NetworkSettings.Networks[n.Name()]; ok { for _, str := range epConfig.Links { name, alias, err := runconfigopts.ParseLink(str) if err != nil { return nil, err } joinOptions = append(joinOptions, libnetwork.CreateOptionAlias(name, alias)) } } return joinOptions, nil } // BuildCreateEndpointOptions builds endpoint options from a given network. func (container *Container) BuildCreateEndpointOptions(n libnetwork.Network, epConfig *network.EndpointSettings, sb libnetwork.Sandbox) ([]libnetwork.EndpointOption, error) { var ( portSpecs = make(nat.PortSet) bindings = make(nat.PortMap) pbList []types.PortBinding exposeList []types.TransportPort createOptions []libnetwork.EndpointOption ) if n.Name() == "bridge" || container.NetworkSettings.IsAnonymousEndpoint { createOptions = append(createOptions, libnetwork.CreateOptionAnonymous()) } if epConfig != nil { ipam := epConfig.IPAMConfig if ipam != nil && (ipam.IPv4Address != "" || ipam.IPv6Address != "") { createOptions = append(createOptions, libnetwork.CreateOptionIpam(net.ParseIP(ipam.IPv4Address), net.ParseIP(ipam.IPv6Address), nil)) } for _, alias := range epConfig.Aliases { createOptions = append(createOptions, libnetwork.CreateOptionMyAlias(alias)) } } if !containertypes.NetworkMode(n.Name()).IsUserDefined() { createOptions = append(createOptions, libnetwork.CreateOptionDisableResolution()) } // configs that are applicable only for the endpoint in the network // to which container was connected to on docker run. // Ideally all these network-specific endpoint configurations must be moved under // container.NetworkSettings.Networks[n.Name()] if n.Name() == container.HostConfig.NetworkMode.NetworkName() || (n.Name() == "bridge" && container.HostConfig.NetworkMode.IsDefault()) { if container.Config.MacAddress != "" { mac, err := net.ParseMAC(container.Config.MacAddress) if err != nil { return nil, err } genericOption := options.Generic{ netlabel.MacAddress: mac, } createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption)) } } // Port-mapping rules belong to the container & applicable only to non-internal networks portmaps := getSandboxPortMapInfo(sb) if n.Info().Internal() || len(portmaps) > 0 { return createOptions, nil } if container.Config.ExposedPorts != nil { portSpecs = container.Config.ExposedPorts } if container.HostConfig.PortBindings != nil { for p, b := range container.HostConfig.PortBindings { bindings[p] = []nat.PortBinding{} for _, bb := range b { bindings[p] = append(bindings[p], nat.PortBinding{ HostIP: bb.HostIP, HostPort: bb.HostPort, }) } } } ports := make([]nat.Port, len(portSpecs)) var i int for p := range portSpecs { ports[i] = p i++ } nat.SortPortMap(ports, bindings) for _, port := range ports { expose := types.TransportPort{} expose.Proto = types.ParseProtocol(port.Proto()) expose.Port = uint16(port.Int()) exposeList = append(exposeList, expose) pb := types.PortBinding{Port: expose.Port, Proto: expose.Proto} binding := bindings[port] for i := 0; i < len(binding); i++ { pbCopy := pb.GetCopy() newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort)) var portStart, portEnd int if err == nil { portStart, portEnd, err = newP.Range() } if err != nil { return nil, derr.ErrorCodeHostPort.WithArgs(binding[i].HostPort, err) } pbCopy.HostPort = uint16(portStart) pbCopy.HostPortEnd = uint16(portEnd) pbCopy.HostIP = net.ParseIP(binding[i].HostIP) pbList = append(pbList, pbCopy) } if container.HostConfig.PublishAllPorts && len(binding) == 0 { pbList = append(pbList, pb) } } createOptions = append(createOptions, libnetwork.CreateOptionPortMapping(pbList), libnetwork.CreateOptionExposedPorts(exposeList)) return createOptions, nil } // SetupWorkingDirectory sets up the container's working directory as set in container.Config.WorkingDir func (container *Container) SetupWorkingDirectory() error { if container.Config.WorkingDir == "" { return nil } container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir) pth, err := container.GetResourcePath(container.Config.WorkingDir) if err != nil { return err } pthInfo, err := os.Stat(pth) if err != nil { if !os.IsNotExist(err) { return err } if err := system.MkdirAll(pth, 0755); err != nil { return err } } if pthInfo != nil && !pthInfo.IsDir() { return derr.ErrorCodeNotADir.WithArgs(container.Config.WorkingDir) } return nil } // appendNetworkMounts appends any network mounts to the array of mount points passed in func appendNetworkMounts(container *Container, volumeMounts []volume.MountPoint) ([]volume.MountPoint, error) { for _, mnt := range container.NetworkMounts() { dest, err := container.GetResourcePath(mnt.Destination) if err != nil { return nil, err } volumeMounts = append(volumeMounts, volume.MountPoint{Destination: dest}) } return volumeMounts, nil } // NetworkMounts returns the list of network mounts. func (container *Container) NetworkMounts() []execdriver.Mount { var mounts []execdriver.Mount shared := container.HostConfig.NetworkMode.IsContainer() if container.ResolvConfPath != "" { if _, err := os.Stat(container.ResolvConfPath); err != nil { logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err) } else { label.Relabel(container.ResolvConfPath, container.MountLabel, shared) writable := !container.HostConfig.ReadonlyRootfs if m, exists := container.MountPoints["/etc/resolv.conf"]; exists { writable = m.RW } mounts = append(mounts, execdriver.Mount{ Source: container.ResolvConfPath, Destination: "/etc/resolv.conf", Writable: writable, Propagation: volume.DefaultPropagationMode, }) } } if container.HostnamePath != "" { if _, err := os.Stat(container.HostnamePath); err != nil { logrus.Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err) } else { label.Relabel(container.HostnamePath, container.MountLabel, shared) writable := !container.HostConfig.ReadonlyRootfs if m, exists := container.MountPoints["/etc/hostname"]; exists { writable = m.RW } mounts = append(mounts, execdriver.Mount{ Source: container.HostnamePath, Destination: "/etc/hostname", Writable: writable, Propagation: volume.DefaultPropagationMode, }) } } if container.HostsPath != "" { if _, err := os.Stat(container.HostsPath); err != nil { logrus.Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err) } else { label.Relabel(container.HostsPath, container.MountLabel, shared) writable := !container.HostConfig.ReadonlyRootfs if m, exists := container.MountPoints["/etc/hosts"]; exists { writable = m.RW } mounts = append(mounts, execdriver.Mount{ Source: container.HostsPath, Destination: "/etc/hosts", Writable: writable, Propagation: volume.DefaultPropagationMode, }) } } return mounts } // CopyImagePathContent copies files in destination to the volume. func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error { rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, destination), container.BaseFS) if err != nil { return err } if _, err = ioutil.ReadDir(rootfs); err != nil { if os.IsNotExist(err) { return nil } return err } path, err := v.Mount() if err != nil { return err } if err := copyExistingContents(rootfs, path); err != nil { return err } return v.Unmount() } // ShmResourcePath returns path to shm func (container *Container) ShmResourcePath() (string, error) { return container.GetRootResourcePath("shm") } // MqueueResourcePath returns path to mqueue func (container *Container) MqueueResourcePath() (string, error) { return container.GetRootResourcePath("mqueue") } // HasMountFor checks if path is a mountpoint func (container *Container) HasMountFor(path string) bool { _, exists := container.MountPoints[path] return exists } // UnmountIpcMounts uses the provided unmount function to unmount shm and mqueue if they were mounted func (container *Container) UnmountIpcMounts(unmount func(pth string) error) { if container.HostConfig.IpcMode.IsContainer() || container.HostConfig.IpcMode.IsHost() { return } var warnings []string if !container.HasMountFor("/dev/shm") { shmPath, err := container.ShmResourcePath() if err != nil { logrus.Error(err) warnings = append(warnings, err.Error()) } else if shmPath != "" { if err := unmount(shmPath); err != nil { warnings = append(warnings, fmt.Sprintf("failed to umount %s: %v", shmPath, err)) } } } if len(warnings) > 0 { logrus.Warnf("failed to cleanup ipc mounts:\n%v", strings.Join(warnings, "\n")) } } // IpcMounts returns the list of IPC mounts func (container *Container) IpcMounts() []execdriver.Mount { var mounts []execdriver.Mount if !container.HasMountFor("/dev/shm") { label.SetFileLabel(container.ShmPath, container.MountLabel) mounts = append(mounts, execdriver.Mount{ Source: container.ShmPath, Destination: "/dev/shm", Writable: true, Propagation: volume.DefaultPropagationMode, }) } return mounts } func updateCommand(c *execdriver.Command, resources containertypes.Resources) { c.Resources.BlkioWeight = resources.BlkioWeight c.Resources.CPUShares = resources.CPUShares c.Resources.CPUPeriod = resources.CPUPeriod c.Resources.CPUQuota = resources.CPUQuota c.Resources.CpusetCpus = resources.CpusetCpus c.Resources.CpusetMems = resources.CpusetMems c.Resources.Memory = resources.Memory c.Resources.MemorySwap = resources.MemorySwap c.Resources.MemoryReservation = resources.MemoryReservation c.Resources.KernelMemory = resources.KernelMemory } // UpdateContainer updates resources of a container. func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { container.Lock() resources := hostConfig.Resources cResources := &container.HostConfig.Resources if resources.BlkioWeight != 0 { cResources.BlkioWeight = resources.BlkioWeight } if resources.CPUShares != 0 { cResources.CPUShares = resources.CPUShares } if resources.CPUPeriod != 0 { cResources.CPUPeriod = resources.CPUPeriod } if resources.CPUQuota != 0 { cResources.CPUQuota = resources.CPUQuota } if resources.CpusetCpus != "" { cResources.CpusetCpus = resources.CpusetCpus } if resources.CpusetMems != "" { cResources.CpusetMems = resources.CpusetMems } if resources.Memory != 0 { cResources.Memory = resources.Memory } if resources.MemorySwap != 0 { cResources.MemorySwap = resources.MemorySwap } if resources.MemoryReservation != 0 { cResources.MemoryReservation = resources.MemoryReservation } if resources.KernelMemory != 0 { cResources.KernelMemory = resources.KernelMemory } container.Unlock() // If container is not running, update hostConfig struct is enough, // resources will be updated when the container is started again. // If container is running (including paused), we need to update // the command so we can update configs to the real world. if container.IsRunning() { container.Lock() updateCommand(container.Command, *cResources) container.Unlock() } if err := container.ToDiskLocking(); err != nil { logrus.Errorf("Error saving updated container: %v", err) return err } return nil } func detachMounted(path string) error { return syscall.Unmount(path, syscall.MNT_DETACH) } // UnmountVolumes unmounts all volumes func (container *Container) UnmountVolumes(forceSyscall bool, volumeEventLog func(name, action string, attributes map[string]string)) error { var ( volumeMounts []volume.MountPoint err error ) for _, mntPoint := range container.MountPoints { dest, err := container.GetResourcePath(mntPoint.Destination) if err != nil { return err } volumeMounts = append(volumeMounts, volume.MountPoint{Destination: dest, Volume: mntPoint.Volume}) } // Append any network mounts to the list (this is a no-op on Windows) if volumeMounts, err = appendNetworkMounts(container, volumeMounts); err != nil { return err } for _, volumeMount := range volumeMounts { if forceSyscall { if err := detachMounted(volumeMount.Destination); err != nil { logrus.Warnf("%s unmountVolumes: Failed to do lazy umount %v", container.ID, err) } } if volumeMount.Volume != nil { if err := volumeMount.Volume.Unmount(); err != nil { return err } attributes := map[string]string{ "driver": volumeMount.Volume.DriverName(), "container": container.ID, } volumeEventLog(volumeMount.Volume.Name(), "unmount", attributes) } } return nil } // copyExistingContents copies from the source to the destination and // ensures the ownership is appropriately set. func copyExistingContents(source, destination string) error { volList, err := ioutil.ReadDir(source) if err != nil { return err } if len(volList) > 0 { srcList, err := ioutil.ReadDir(destination) if err != nil { return err } if len(srcList) == 0 { // If the source volume is empty, copies files from the root into the volume if err := chrootarchive.CopyWithTar(source, destination); err != nil { return err } } } return copyOwnership(source, destination) } // copyOwnership copies the permissions and uid:gid of the source file // to the destination file func copyOwnership(source, destination string) error { stat, err := system.Stat(source) if err != nil { return err } if err := os.Chown(destination, int(stat.UID()), int(stat.GID())); err != nil { return err } return os.Chmod(destination, os.FileMode(stat.Mode())) } // TmpfsMounts returns the list of tmpfs mounts func (container *Container) TmpfsMounts() []execdriver.Mount { var mounts []execdriver.Mount for dest, data := range container.HostConfig.Tmpfs { mounts = append(mounts, execdriver.Mount{ Source: "tmpfs", Destination: dest, Data: data, }) } return mounts } docker-1.10.3/container/container_windows.go000066400000000000000000000037031267010174400211150ustar00rootroot00000000000000// +build windows package container import ( "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/volume" "github.com/docker/engine-api/types/container" ) // Container holds fields specific to the Windows implementation. See // CommonContainer for standard fields common to all containers. type Container struct { CommonContainer // Fields below here are platform specific. } // CreateDaemonEnvironment creates a new environment variable slice for this container. func (container *Container) CreateDaemonEnvironment(linkedEnv []string) []string { // On Windows, nothing to link. Just return the container environment. return container.Config.Env } // SetupWorkingDirectory initializes the container working directory. // This is a NOOP In windows. func (container *Container) SetupWorkingDirectory() error { return nil } // UnmountIpcMounts unmount Ipc related mounts. // This is a NOOP on windows. func (container *Container) UnmountIpcMounts(unmount func(pth string) error) { } // IpcMounts returns the list of Ipc related mounts. func (container *Container) IpcMounts() []execdriver.Mount { return nil } // UnmountVolumes explicitly unmounts volumes from the container. func (container *Container) UnmountVolumes(forceSyscall bool, volumeEventLog func(name, action string, attributes map[string]string)) error { return nil } // TmpfsMounts returns the list of tmpfs mounts func (container *Container) TmpfsMounts() []execdriver.Mount { return nil } // UpdateContainer updates resources of a container func (container *Container) UpdateContainer(hostConfig *container.HostConfig) error { return nil } // appendNetworkMounts appends any network mounts to the array of mount points passed in. // Windows does not support network mounts (not to be confused with SMB network mounts), so // this is a no-op. func appendNetworkMounts(container *Container, volumeMounts []volume.MountPoint) ([]volume.MountPoint, error) { return volumeMounts, nil } docker-1.10.3/container/history.go000066400000000000000000000017171267010174400170650ustar00rootroot00000000000000package container import "sort" // History is a convenience type for storing a list of containers, // sorted by creation date in descendant order. type History []*Container // Len returns the number of containers in the history. func (history *History) Len() int { return len(*history) } // Less compares two containers and returns true if the second one // was created before the first one. func (history *History) Less(i, j int) bool { containers := *history return containers[j].Created.Before(containers[i].Created) } // Swap switches containers i and j positions in the history. func (history *History) Swap(i, j int) { containers := *history containers[i], containers[j] = containers[j], containers[i] } // Add the given container to history. func (history *History) Add(container *Container) { *history = append(*history, container) } // sort orders the history by creation date in descendant order. func (history *History) sort() { sort.Sort(history) } docker-1.10.3/container/memory_store.go000066400000000000000000000034771267010174400201150ustar00rootroot00000000000000package container import "sync" // memoryStore implements a Store in memory. type memoryStore struct { s map[string]*Container sync.Mutex } // NewMemoryStore initializes a new memory store. func NewMemoryStore() Store { return &memoryStore{ s: make(map[string]*Container), } } // Add appends a new container to the memory store. // It overrides the id if it existed before. func (c *memoryStore) Add(id string, cont *Container) { c.Lock() c.s[id] = cont c.Unlock() } // Get returns a container from the store by id. func (c *memoryStore) Get(id string) *Container { c.Lock() res := c.s[id] c.Unlock() return res } // Delete removes a container from the store by id. func (c *memoryStore) Delete(id string) { c.Lock() delete(c.s, id) c.Unlock() } // List returns a sorted list of containers from the store. // The containers are ordered by creation date. func (c *memoryStore) List() []*Container { containers := new(History) c.Lock() for _, cont := range c.s { containers.Add(cont) } c.Unlock() containers.sort() return *containers } // Size returns the number of containers in the store. func (c *memoryStore) Size() int { c.Lock() defer c.Unlock() return len(c.s) } // First returns the first container found in the store by a given filter. func (c *memoryStore) First(filter StoreFilter) *Container { c.Lock() defer c.Unlock() for _, cont := range c.s { if filter(cont) { return cont } } return nil } // ApplyAll calls the reducer function with every container in the store. // This operation is asyncronous in the memory store. func (c *memoryStore) ApplyAll(apply StoreReducer) { c.Lock() defer c.Unlock() wg := new(sync.WaitGroup) for _, cont := range c.s { wg.Add(1) go func(container *Container) { apply(container) wg.Done() }(cont) } wg.Wait() } var _ Store = &memoryStore{} docker-1.10.3/container/memory_store_test.go000066400000000000000000000042451267010174400211460ustar00rootroot00000000000000package container import ( "testing" "time" ) func TestNewMemoryStore(t *testing.T) { s := NewMemoryStore() m, ok := s.(*memoryStore) if !ok { t.Fatalf("store is not a memory store %v", s) } if m.s == nil { t.Fatal("expected store map to not be nil") } } func TestAddContainers(t *testing.T) { s := NewMemoryStore() s.Add("id", NewBaseContainer("id", "root")) if s.Size() != 1 { t.Fatalf("expected store size 1, got %v", s.Size()) } } func TestGetContainer(t *testing.T) { s := NewMemoryStore() s.Add("id", NewBaseContainer("id", "root")) c := s.Get("id") if c == nil { t.Fatal("expected container to not be nil") } } func TestDeleteContainer(t *testing.T) { s := NewMemoryStore() s.Add("id", NewBaseContainer("id", "root")) s.Delete("id") if c := s.Get("id"); c != nil { t.Fatalf("expected container to be nil after removal, got %v", c) } if s.Size() != 0 { t.Fatalf("expected store size to be 0, got %v", s.Size()) } } func TestListContainers(t *testing.T) { s := NewMemoryStore() cont := NewBaseContainer("id", "root") cont.Created = time.Now() cont2 := NewBaseContainer("id2", "root") cont2.Created = time.Now().Add(24 * time.Hour) s.Add("id", cont) s.Add("id2", cont2) list := s.List() if len(list) != 2 { t.Fatalf("expected list size 2, got %v", len(list)) } if list[0].ID != "id2" { t.Fatalf("expected older container to be first, got %v", list[0].ID) } } func TestFirstContainer(t *testing.T) { s := NewMemoryStore() s.Add("id", NewBaseContainer("id", "root")) s.Add("id2", NewBaseContainer("id2", "root")) first := s.First(func(cont *Container) bool { return cont.ID == "id2" }) if first == nil { t.Fatal("expected container to not be nil") } if first.ID != "id2" { t.Fatalf("expected id2, got %v", first) } } func TestApplyAllContainer(t *testing.T) { s := NewMemoryStore() s.Add("id", NewBaseContainer("id", "root")) s.Add("id2", NewBaseContainer("id2", "root")) s.ApplyAll(func(cont *Container) { if cont.ID == "id2" { cont.ID = "newID" } }) cont := s.Get("id2") if cont == nil { t.Fatal("expected container to not be nil") } if cont.ID != "newID" { t.Fatalf("expected newID, got %v", cont) } } docker-1.10.3/container/monitor.go000066400000000000000000000273431267010174400170560ustar00rootroot00000000000000package container import ( "io" "os/exec" "strings" "sync" "syscall" "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" derr "github.com/docker/docker/errors" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/utils" "github.com/docker/engine-api/types/container" ) const ( defaultTimeIncrement = 100 loggerCloseTimeout = 10 * time.Second ) // supervisor defines the interface that a supervisor must implement type supervisor interface { // LogContainerEvent generates events related to a given container LogContainerEvent(*Container, string) // Cleanup ensures that the container is properly unmounted Cleanup(*Container) // StartLogging starts the logging driver for the container StartLogging(*Container) error // Run starts a container Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (execdriver.ExitStatus, error) // IsShuttingDown tells whether the supervisor is shutting down or not IsShuttingDown() bool } // containerMonitor monitors the execution of a container's main process. // If a restart policy is specified for the container the monitor will ensure that the // process is restarted based on the rules of the policy. When the container is finally stopped // the monitor will reset and cleanup any of the container resources such as networking allocations // and the rootfs type containerMonitor struct { mux sync.Mutex // supervisor keeps track of the container and the events it generates supervisor supervisor // container is the container being monitored container *Container // restartPolicy is the current policy being applied to the container monitor restartPolicy container.RestartPolicy // failureCount is the number of times the container has failed to // start in a row failureCount int // shouldStop signals the monitor that the next time the container exits it is // either because docker or the user asked for the container to be stopped shouldStop bool // startSignal is a channel that is closes after the container initially starts startSignal chan struct{} // stopChan is used to signal to the monitor whenever there is a wait for the // next restart so that the timeIncrement is not honored and the user is not // left waiting for nothing to happen during this time stopChan chan struct{} // timeIncrement is the amount of time to wait between restarts // this is in milliseconds timeIncrement int // lastStartTime is the time which the monitor last exec'd the container's process lastStartTime time.Time } // StartMonitor initializes a containerMonitor for this container with the provided supervisor and restart policy // and starts the container's process. func (container *Container) StartMonitor(s supervisor, policy container.RestartPolicy) error { container.monitor = &containerMonitor{ supervisor: s, container: container, restartPolicy: policy, timeIncrement: defaultTimeIncrement, stopChan: make(chan struct{}), startSignal: make(chan struct{}), } return container.monitor.wait() } // wait starts the container and wait until // we either receive an error from the initial start of the container's // process or until the process is running in the container func (m *containerMonitor) wait() error { select { case <-m.startSignal: case err := <-promise.Go(m.start): return err } return nil } // Stop signals to the container monitor that it should stop monitoring the container // for exits the next time the process dies func (m *containerMonitor) ExitOnNext() { m.mux.Lock() // we need to protect having a double close of the channel when stop is called // twice or else we will get a panic if !m.shouldStop { m.shouldStop = true close(m.stopChan) } m.mux.Unlock() } // Close closes the container's resources such as networking allocations and // unmounts the container's root filesystem func (m *containerMonitor) Close() error { // Cleanup networking and mounts m.supervisor.Cleanup(m.container) // FIXME: here is race condition between two RUN instructions in Dockerfile // because they share same runconfig and change image. Must be fixed // in builder/builder.go if err := m.container.ToDisk(); err != nil { logrus.Errorf("Error dumping container %s state to disk: %s", m.container.ID, err) return err } return nil } // Start starts the containers process and monitors it according to the restart policy func (m *containerMonitor) start() error { var ( err error exitStatus execdriver.ExitStatus // this variable indicates where we in execution flow: // before Run or after afterRun bool ) // ensure that when the monitor finally exits we release the networking and unmount the rootfs defer func() { if afterRun { m.container.Lock() defer m.container.Unlock() m.container.SetStopped(&exitStatus) } m.Close() }() // reset stopped flag if m.container.HasBeenManuallyStopped { m.container.HasBeenManuallyStopped = false } // reset the restart count m.container.RestartCount = -1 for { m.container.RestartCount++ if err := m.supervisor.StartLogging(m.container); err != nil { m.resetContainer(false) return err } pipes := execdriver.NewPipes(m.container.Stdin(), m.container.Stdout(), m.container.Stderr(), m.container.Config.OpenStdin) m.logEvent("start") m.lastStartTime = time.Now() if exitStatus, err = m.supervisor.Run(m.container, pipes, m.callback); err != nil { // if we receive an internal error from the initial start of a container then lets // return it instead of entering the restart loop // set to 127 for container cmd not found/does not exist) if strings.Contains(err.Error(), "executable file not found") || strings.Contains(err.Error(), "no such file or directory") || strings.Contains(err.Error(), "system cannot find the file specified") { if m.container.RestartCount == 0 { m.container.ExitCode = 127 m.resetContainer(false) return derr.ErrorCodeCmdNotFound } } // set to 126 for container cmd can't be invoked errors if strings.Contains(err.Error(), syscall.EACCES.Error()) { if m.container.RestartCount == 0 { m.container.ExitCode = 126 m.resetContainer(false) return derr.ErrorCodeCmdCouldNotBeInvoked } } if m.container.RestartCount == 0 { m.container.ExitCode = -1 m.resetContainer(false) return derr.ErrorCodeCantStart.WithArgs(m.container.ID, utils.GetErrorMessage(err)) } logrus.Errorf("Error running container: %s", err) } // here container.Lock is already lost afterRun = true m.resetMonitor(err == nil && exitStatus.ExitCode == 0) if m.shouldRestart(exitStatus.ExitCode) { m.container.SetRestarting(&exitStatus) m.logEvent("die") m.resetContainer(true) // sleep with a small time increment between each restart to help avoid issues cased by quickly // restarting the container because of some types of errors ( networking cut out, etc... ) m.waitForNextRestart() // we need to check this before reentering the loop because the waitForNextRestart could have // been terminated by a request from a user if m.shouldStop { return err } continue } m.logEvent("die") m.resetContainer(true) return err } } // resetMonitor resets the stateful fields on the containerMonitor based on the // previous runs success or failure. Regardless of success, if the container had // an execution time of more than 10s then reset the timer back to the default func (m *containerMonitor) resetMonitor(successful bool) { executionTime := time.Now().Sub(m.lastStartTime).Seconds() if executionTime > 10 { m.timeIncrement = defaultTimeIncrement } else { // otherwise we need to increment the amount of time we wait before restarting // the process. We will build up by multiplying the increment by 2 m.timeIncrement *= 2 } // the container exited successfully so we need to reset the failure counter if successful { m.failureCount = 0 } else { m.failureCount++ } } // waitForNextRestart waits with the default time increment to restart the container unless // a user or docker asks for the container to be stopped func (m *containerMonitor) waitForNextRestart() { select { case <-time.After(time.Duration(m.timeIncrement) * time.Millisecond): case <-m.stopChan: } } // shouldRestart checks the restart policy and applies the rules to determine if // the container's process should be restarted func (m *containerMonitor) shouldRestart(exitCode int) bool { m.mux.Lock() defer m.mux.Unlock() // do not restart if the user or docker has requested that this container be stopped if m.shouldStop { m.container.HasBeenManuallyStopped = !m.supervisor.IsShuttingDown() return false } switch { case m.restartPolicy.IsAlways(), m.restartPolicy.IsUnlessStopped(): return true case m.restartPolicy.IsOnFailure(): // the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count if max := m.restartPolicy.MaximumRetryCount; max != 0 && m.failureCount > max { logrus.Debugf("stopping restart of container %s because maximum failure could of %d has been reached", stringid.TruncateID(m.container.ID), max) return false } return exitCode != 0 } return false } // callback ensures that the container's state is properly updated after we // received ack from the execution drivers func (m *containerMonitor) callback(processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error { go func() { for range chOOM { m.logEvent("oom") } }() if processConfig.Tty { // The callback is called after the process start() // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave // which we close here. if c, ok := processConfig.Stdout.(io.Closer); ok { c.Close() } } m.container.SetRunning(pid) // signal that the process has started // close channel only if not closed select { case <-m.startSignal: default: close(m.startSignal) } if err := m.container.ToDiskLocking(); err != nil { logrus.Errorf("Error saving container to disk: %v", err) } return nil } // resetContainer resets the container's IO and ensures that the command is able to be executed again // by copying the data into a new struct // if lock is true, then container locked during reset func (m *containerMonitor) resetContainer(lock bool) { container := m.container if lock { container.Lock() defer container.Unlock() } if err := container.CloseStreams(); err != nil { logrus.Errorf("%s: %s", container.ID, err) } if container.Command != nil && container.Command.ProcessConfig.Terminal != nil { if err := container.Command.ProcessConfig.Terminal.Close(); err != nil { logrus.Errorf("%s: Error closing terminal: %s", container.ID, err) } } // Re-create a brand new stdin pipe once the container exited if container.Config.OpenStdin { container.NewInputPipes() } if container.LogDriver != nil { if container.LogCopier != nil { exit := make(chan struct{}) go func() { container.LogCopier.Wait() close(exit) }() select { case <-time.After(loggerCloseTimeout): logrus.Warnf("Logger didn't exit in time: logs may be truncated") container.LogCopier.Close() // always waits for the LogCopier to finished before closing <-exit case <-exit: } } container.LogDriver.Close() container.LogCopier = nil container.LogDriver = nil } c := container.Command.ProcessConfig.Cmd container.Command.ProcessConfig.Cmd = exec.Cmd{ Stdin: c.Stdin, Stdout: c.Stdout, Stderr: c.Stderr, Path: c.Path, Env: c.Env, ExtraFiles: c.ExtraFiles, Args: c.Args, Dir: c.Dir, SysProcAttr: c.SysProcAttr, } } func (m *containerMonitor) logEvent(action string) { m.supervisor.LogContainerEvent(m.container, action) } docker-1.10.3/container/state.go000066400000000000000000000152051267010174400165010ustar00rootroot00000000000000package container import ( "fmt" "sync" "time" "github.com/docker/docker/daemon/execdriver" derr "github.com/docker/docker/errors" "github.com/docker/go-units" ) // State holds the current container state, and has methods to get and // set the state. Container has an embed, which allows all of the // functions defined against State to run against Container. type State struct { sync.Mutex // FIXME: Why do we have both paused and running if a // container cannot be paused and running at the same time? Running bool Paused bool Restarting bool OOMKilled bool RemovalInProgress bool // Not need for this to be persistent on disk. Dead bool Pid int ExitCode int Error string // contains last known error when starting the container StartedAt time.Time FinishedAt time.Time waitChan chan struct{} } // NewState creates a default state object with a fresh channel for state changes. func NewState() *State { return &State{ waitChan: make(chan struct{}), } } // String returns a human-readable description of the state func (s *State) String() string { if s.Running { if s.Paused { return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) } if s.Restarting { return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) } return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) } if s.RemovalInProgress { return "Removal In Progress" } if s.Dead { return "Dead" } if s.StartedAt.IsZero() { return "Created" } if s.FinishedAt.IsZero() { return "" } return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) } // StateString returns a single string to describe state func (s *State) StateString() string { if s.Running { if s.Paused { return "paused" } if s.Restarting { return "restarting" } return "running" } if s.Dead { return "dead" } if s.StartedAt.IsZero() { return "created" } return "exited" } // IsValidStateString checks if the provided string is a valid container state or not. func IsValidStateString(s string) bool { if s != "paused" && s != "restarting" && s != "running" && s != "dead" && s != "created" && s != "exited" { return false } return true } func wait(waitChan <-chan struct{}, timeout time.Duration) error { if timeout < 0 { <-waitChan return nil } select { case <-time.After(timeout): return derr.ErrorCodeTimedOut.WithArgs(timeout) case <-waitChan: return nil } } // waitRunning waits until state is running. If state is already // running it returns immediately. If you want wait forever you must // supply negative timeout. Returns pid, that was passed to // SetRunning. func (s *State) waitRunning(timeout time.Duration) (int, error) { s.Lock() if s.Running { pid := s.Pid s.Unlock() return pid, nil } waitChan := s.waitChan s.Unlock() if err := wait(waitChan, timeout); err != nil { return -1, err } return s.GetPID(), nil } // WaitStop waits until state is stopped. If state already stopped it returns // immediately. If you want wait forever you must supply negative timeout. // Returns exit code, that was passed to SetStoppedLocking func (s *State) WaitStop(timeout time.Duration) (int, error) { s.Lock() if !s.Running { exitCode := s.ExitCode s.Unlock() return exitCode, nil } waitChan := s.waitChan s.Unlock() if err := wait(waitChan, timeout); err != nil { return -1, err } return s.getExitCode(), nil } // IsRunning returns whether the running flag is set. Used by Container to check whether a container is running. func (s *State) IsRunning() bool { s.Lock() res := s.Running s.Unlock() return res } // GetPID holds the process id of a container. func (s *State) GetPID() int { s.Lock() res := s.Pid s.Unlock() return res } func (s *State) getExitCode() int { s.Lock() res := s.ExitCode s.Unlock() return res } // SetRunning sets the state of the container to "running". func (s *State) SetRunning(pid int) { s.Error = "" s.Running = true s.Paused = false s.Restarting = false s.ExitCode = 0 s.Pid = pid s.StartedAt = time.Now().UTC() close(s.waitChan) // fire waiters for start s.waitChan = make(chan struct{}) } // SetStoppedLocking locks the container state is sets it to "stopped". func (s *State) SetStoppedLocking(exitStatus *execdriver.ExitStatus) { s.Lock() s.SetStopped(exitStatus) s.Unlock() } // SetStopped sets the container state to "stopped" without locking. func (s *State) SetStopped(exitStatus *execdriver.ExitStatus) { s.Running = false s.Restarting = false s.Pid = 0 s.FinishedAt = time.Now().UTC() s.setFromExitStatus(exitStatus) close(s.waitChan) // fire waiters for stop s.waitChan = make(chan struct{}) } // SetRestartingLocking is when docker handles the auto restart of containers when they are // in the middle of a stop and being restarted again func (s *State) SetRestartingLocking(exitStatus *execdriver.ExitStatus) { s.Lock() s.SetRestarting(exitStatus) s.Unlock() } // SetRestarting sets the container state to "restarting". // It also sets the container PID to 0. func (s *State) SetRestarting(exitStatus *execdriver.ExitStatus) { // we should consider the container running when it is restarting because of // all the checks in docker around rm/stop/etc s.Running = true s.Restarting = true s.Pid = 0 s.FinishedAt = time.Now().UTC() s.setFromExitStatus(exitStatus) close(s.waitChan) // fire waiters for stop s.waitChan = make(chan struct{}) } // SetError sets the container's error state. This is useful when we want to // know the error that occurred when container transits to another state // when inspecting it func (s *State) SetError(err error) { s.Error = err.Error() } // IsPaused returns whether the container is paused or not. func (s *State) IsPaused() bool { s.Lock() res := s.Paused s.Unlock() return res } // IsRestarting returns whether the container is restarting or not. func (s *State) IsRestarting() bool { s.Lock() res := s.Restarting s.Unlock() return res } // SetRemovalInProgress sets the container state as being removed. func (s *State) SetRemovalInProgress() error { s.Lock() defer s.Unlock() if s.RemovalInProgress { return derr.ErrorCodeAlreadyRemoving } s.RemovalInProgress = true return nil } // ResetRemovalInProgress make the RemovalInProgress state to false. func (s *State) ResetRemovalInProgress() { s.Lock() s.RemovalInProgress = false s.Unlock() } // SetDead sets the container state to "dead" func (s *State) SetDead() { s.Lock() s.Dead = true s.Unlock() } docker-1.10.3/container/state_test.go000066400000000000000000000052401267010174400175360ustar00rootroot00000000000000package container import ( "sync/atomic" "testing" "time" "github.com/docker/docker/daemon/execdriver" ) func TestStateRunStop(t *testing.T) { s := NewState() for i := 1; i < 3; i++ { // full lifecycle two times started := make(chan struct{}) var pid int64 go func() { runPid, _ := s.waitRunning(-1 * time.Second) atomic.StoreInt64(&pid, int64(runPid)) close(started) }() s.Lock() s.SetRunning(i + 100) s.Unlock() if !s.IsRunning() { t.Fatal("State not running") } if s.Pid != i+100 { t.Fatalf("Pid %v, expected %v", s.Pid, i+100) } if s.ExitCode != 0 { t.Fatalf("ExitCode %v, expected 0", s.ExitCode) } select { case <-time.After(100 * time.Millisecond): t.Fatal("Start callback doesn't fire in 100 milliseconds") case <-started: t.Log("Start callback fired") } runPid := int(atomic.LoadInt64(&pid)) if runPid != i+100 { t.Fatalf("Pid %v, expected %v", runPid, i+100) } if pid, err := s.waitRunning(-1 * time.Second); err != nil || pid != i+100 { t.Fatalf("waitRunning returned pid: %v, err: %v, expected pid: %v, err: %v", pid, err, i+100, nil) } stopped := make(chan struct{}) var exit int64 go func() { exitCode, _ := s.WaitStop(-1 * time.Second) atomic.StoreInt64(&exit, int64(exitCode)) close(stopped) }() s.SetStoppedLocking(&execdriver.ExitStatus{ExitCode: i}) if s.IsRunning() { t.Fatal("State is running") } if s.ExitCode != i { t.Fatalf("ExitCode %v, expected %v", s.ExitCode, i) } if s.Pid != 0 { t.Fatalf("Pid %v, expected 0", s.Pid) } select { case <-time.After(100 * time.Millisecond): t.Fatal("Stop callback doesn't fire in 100 milliseconds") case <-stopped: t.Log("Stop callback fired") } exitCode := int(atomic.LoadInt64(&exit)) if exitCode != i { t.Fatalf("ExitCode %v, expected %v", exitCode, i) } if exitCode, err := s.WaitStop(-1 * time.Second); err != nil || exitCode != i { t.Fatalf("WaitStop returned exitCode: %v, err: %v, expected exitCode: %v, err: %v", exitCode, err, i, nil) } } } func TestStateTimeoutWait(t *testing.T) { s := NewState() started := make(chan struct{}) go func() { s.waitRunning(100 * time.Millisecond) close(started) }() select { case <-time.After(200 * time.Millisecond): t.Fatal("Start callback doesn't fire in 100 milliseconds") case <-started: t.Log("Start callback fired") } s.Lock() s.SetRunning(49) s.Unlock() stopped := make(chan struct{}) go func() { s.waitRunning(100 * time.Millisecond) close(stopped) }() select { case <-time.After(200 * time.Millisecond): t.Fatal("Start callback doesn't fire in 100 milliseconds") case <-stopped: t.Log("Start callback fired") } } docker-1.10.3/container/state_unix.go000066400000000000000000000005431267010174400175430ustar00rootroot00000000000000// +build linux freebsd package container import "github.com/docker/docker/daemon/execdriver" // setFromExitStatus is a platform specific helper function to set the state // based on the ExitStatus structure. func (s *State) setFromExitStatus(exitStatus *execdriver.ExitStatus) { s.ExitCode = exitStatus.ExitCode s.OOMKilled = exitStatus.OOMKilled } docker-1.10.3/container/state_windows.go000066400000000000000000000004461267010174400202540ustar00rootroot00000000000000package container import "github.com/docker/docker/daemon/execdriver" // setFromExitStatus is a platform specific helper function to set the state // based on the ExitStatus structure. func (s *State) setFromExitStatus(exitStatus *execdriver.ExitStatus) { s.ExitCode = exitStatus.ExitCode } docker-1.10.3/container/store.go000066400000000000000000000017061267010174400165160ustar00rootroot00000000000000package container // StoreFilter defines a function to filter // container in the store. type StoreFilter func(*Container) bool // StoreReducer defines a function to // manipulate containers in the store type StoreReducer func(*Container) // Store defines an interface that // any container store must implement. type Store interface { // Add appends a new container to the store. Add(string, *Container) // Get returns a container from the store by the identifier it was stored with. Get(string) *Container // Delete removes a container from the store by the identifier it was stored with. Delete(string) // List returns a list of containers from the store. List() []*Container // Size returns the number of containers in the store. Size() int // First returns the first container found in the store by a given filter. First(StoreFilter) *Container // ApplyAll calls the reducer function with every container in the store. ApplyAll(StoreReducer) } docker-1.10.3/contrib/000077500000000000000000000000001267010174400145055ustar00rootroot00000000000000docker-1.10.3/contrib/README000066400000000000000000000003671267010174400153730ustar00rootroot00000000000000The `contrib` directory contains scripts, images, and other helpful things which are not part of the core docker distribution. Please note that they could be out of date, since they do not receive the same attention as the rest of the repository. docker-1.10.3/contrib/REVIEWERS000066400000000000000000000000551267010174400160030ustar00rootroot00000000000000Tianon Gravi (@tianon) docker-1.10.3/contrib/apparmor/000077500000000000000000000000001267010174400163265ustar00rootroot00000000000000docker-1.10.3/contrib/apparmor/main.go000066400000000000000000000022731267010174400176050ustar00rootroot00000000000000package main import ( "fmt" "log" "os" "path" "text/template" "github.com/docker/docker/pkg/aaparser" ) type profileData struct { MajorVersion int MinorVersion int } func main() { if len(os.Args) < 2 { log.Fatal("pass a filename to save the profile in.") } // parse the arg apparmorProfilePath := os.Args[1] majorVersion, minorVersion, err := aaparser.GetVersion() if err != nil { log.Fatal(err) } data := profileData{ MajorVersion: majorVersion, MinorVersion: minorVersion, } fmt.Printf("apparmor_parser is of version %+v\n", data) // parse the template compiled, err := template.New("apparmor_profile").Parse(dockerProfileTemplate) if err != nil { log.Fatalf("parsing template failed: %v", err) } // make sure /etc/apparmor.d exists if err := os.MkdirAll(path.Dir(apparmorProfilePath), 0755); err != nil { log.Fatal(err) } f, err := os.OpenFile(apparmorProfilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { log.Fatal(err) } defer f.Close() if err := compiled.Execute(f, data); err != nil { log.Fatalf("executing template failed: %v", err) } fmt.Printf("created apparmor profile for version %+v at %q\n", data, apparmorProfilePath) } docker-1.10.3/contrib/apparmor/template.go000066400000000000000000000132761267010174400205010ustar00rootroot00000000000000package main const dockerProfileTemplate = `@{DOCKER_GRAPH_PATH}=/var/lib/docker profile /usr/bin/docker (attach_disconnected, complain) { # Prevent following links to these files during container setup. deny /etc/** mkl, deny /dev/** kl, deny /sys/** mkl, deny /proc/** mkl, mount -> @{DOCKER_GRAPH_PATH}/**, mount -> /, mount -> /proc/**, mount -> /sys/**, mount -> /run/docker/netns/**, mount -> /.pivot_root[0-9]*/, / r, umount, pivot_root, {{if ge .MajorVersion 2}}{{if ge .MinorVersion 9}} signal (receive) peer=@{profile_name}, signal (receive) peer=unconfined, signal (send), {{end}}{{end}} network, capability, owner /** rw, @{DOCKER_GRAPH_PATH}/** rwl, @{DOCKER_GRAPH_PATH}/linkgraph.db k, @{DOCKER_GRAPH_PATH}/network/files/boltdb.db k, @{DOCKER_GRAPH_PATH}/network/files/local-kv.db k, @{DOCKER_GRAPH_PATH}/[0-9]*.[0-9]*/linkgraph.db k, # For non-root client use: /dev/urandom r, /dev/null rw, /dev/pts/[0-9]* rw, /run/docker.sock rw, /proc/** r, /proc/[0-9]*/attr/exec w, /sys/kernel/mm/hugepages/ r, /etc/localtime r, /etc/ld.so.cache r, /etc/passwd r, {{if ge .MajorVersion 2}}{{if ge .MinorVersion 9}} ptrace peer=@{profile_name}, ptrace (read) peer=docker-default, deny ptrace (trace) peer=docker-default, deny ptrace peer=/usr/bin/docker///bin/ps, {{end}}{{end}} /usr/lib/** rm, /lib/** rm, /usr/bin/docker pix, /sbin/xtables-multi rCx, /sbin/iptables rCx, /sbin/modprobe rCx, /sbin/auplink rCx, /sbin/mke2fs rCx, /sbin/tune2fs rCx, /sbin/blkid rCx, /bin/kmod rCx, /usr/bin/xz rCx, /bin/ps rCx, /bin/tar rCx, /bin/cat rCx, /sbin/zfs rCx, /sbin/apparmor_parser rCx, {{if ge .MajorVersion 2}}{{if ge .MinorVersion 9}} # Transitions change_profile -> docker-*, change_profile -> unconfined, {{end}}{{end}} profile /bin/cat (complain) { /etc/ld.so.cache r, /lib/** rm, /dev/null rw, /proc r, /bin/cat mr, # For reading in 'docker stats': /proc/[0-9]*/net/dev r, } profile /bin/ps (complain) { /etc/ld.so.cache r, /etc/localtime r, /etc/passwd r, /etc/nsswitch.conf r, /lib/** rm, /proc/[0-9]*/** r, /dev/null rw, /bin/ps mr, {{if ge .MajorVersion 2}}{{if ge .MinorVersion 9}} # We don't need ptrace so we'll deny and ignore the error. deny ptrace (read, trace), {{end}}{{end}} # Quiet dac_override denials deny capability dac_override, deny capability dac_read_search, deny capability sys_ptrace, /dev/tty r, /proc/stat r, /proc/cpuinfo r, /proc/meminfo r, /proc/uptime r, /sys/devices/system/cpu/online r, /proc/sys/kernel/pid_max r, /proc/ r, /proc/tty/drivers r, } profile /sbin/iptables (complain) { {{if ge .MajorVersion 2}}{{if ge .MinorVersion 9}} signal (receive) peer=/usr/bin/docker, {{end}}{{end}} capability net_admin, } profile /sbin/auplink flags=(attach_disconnected, complain) { {{if ge .MajorVersion 2}}{{if ge .MinorVersion 9}} signal (receive) peer=/usr/bin/docker, {{end}}{{end}} capability sys_admin, capability dac_override, @{DOCKER_GRAPH_PATH}/aufs/** rw, @{DOCKER_GRAPH_PATH}/tmp/** rw, # For user namespaces: @{DOCKER_GRAPH_PATH}/[0-9]*.[0-9]*/** rw, /sys/fs/aufs/** r, /lib/** rm, /apparmor/.null r, /dev/null rw, /etc/ld.so.cache r, /sbin/auplink rm, /proc/fs/aufs/** rw, /proc/[0-9]*/mounts rw, } profile /sbin/modprobe /bin/kmod (complain) { {{if ge .MajorVersion 2}}{{if ge .MinorVersion 9}} signal (receive) peer=/usr/bin/docker, {{end}}{{end}} capability sys_module, /etc/ld.so.cache r, /lib/** rm, /dev/null rw, /apparmor/.null rw, /sbin/modprobe rm, /bin/kmod rm, /proc/cmdline r, /sys/module/** r, /etc/modprobe.d{/,/**} r, } # xz works via pipes, so we do not need access to the filesystem. profile /usr/bin/xz (complain) { {{if ge .MajorVersion 2}}{{if ge .MinorVersion 9}} signal (receive) peer=/usr/bin/docker, {{end}}{{end}} /etc/ld.so.cache r, /lib/** rm, /usr/bin/xz rm, deny /proc/** rw, deny /sys/** rw, } profile /sbin/xtables-multi (attach_disconnected, complain) { /etc/ld.so.cache r, /lib/** rm, /sbin/xtables-multi rm, /apparmor/.null w, /dev/null rw, /proc r, capability net_raw, capability net_admin, network raw, } profile /sbin/zfs (attach_disconnected, complain) { file, capability, } profile /sbin/mke2fs (complain) { /sbin/mke2fs rm, /lib/** rm, /apparmor/.null w, /etc/ld.so.cache r, /etc/mke2fs.conf r, /etc/mtab r, /dev/dm-* rw, /dev/urandom r, /dev/null rw, /proc/swaps r, /proc/[0-9]*/mounts r, } profile /sbin/tune2fs (complain) { /sbin/tune2fs rm, /lib/** rm, /apparmor/.null w, /etc/blkid.conf r, /etc/mtab r, /etc/ld.so.cache r, /dev/null rw, /dev/.blkid.tab r, /dev/dm-* rw, /proc/swaps r, /proc/[0-9]*/mounts r, } profile /sbin/blkid (complain) { /sbin/blkid rm, /lib/** rm, /apparmor/.null w, /etc/ld.so.cache r, /etc/blkid.conf r, /dev/null rw, /dev/.blkid.tab rl, /dev/.blkid.tab* rwl, /dev/dm-* r, /sys/devices/virtual/block/** r, capability mknod, mount -> @{DOCKER_GRAPH_PATH}/**, } profile /sbin/apparmor_parser (complain) { /sbin/apparmor_parser rm, /lib/** rm, /etc/ld.so.cache r, /etc/apparmor/** r, /etc/apparmor.d/** r, /etc/apparmor.d/cache/** w, /dev/null rw, /sys/kernel/security/apparmor/** r, /sys/kernel/security/apparmor/.replace w, /proc/[0-9]*/mounts r, /proc/sys/kernel/osrelease r, /proc r, capability mac_admin, } }` docker-1.10.3/contrib/builder/000077500000000000000000000000001267010174400161335ustar00rootroot00000000000000docker-1.10.3/contrib/builder/deb/000077500000000000000000000000001267010174400166655ustar00rootroot00000000000000docker-1.10.3/contrib/builder/deb/README.md000066400000000000000000000006241267010174400201460ustar00rootroot00000000000000# `dockercore/builder-deb` This image's tags contain the dependencies for building Docker `.deb`s for each of the Debian-based platforms Docker targets. To add new tags, see [`contrib/builder/deb` in https://github.com/docker/docker](https://github.com/docker/docker/tree/master/contrib/builder/deb), specifically the `generate.sh` script, whose usage is described in a comment at the top of the file. docker-1.10.3/contrib/builder/deb/build.sh000077500000000000000000000002571267010174400203270ustar00rootroot00000000000000#!/bin/bash set -e cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" set -x ./generate.sh for d in */; do docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" done docker-1.10.3/contrib/builder/deb/debian-jessie/000077500000000000000000000000001267010174400213675ustar00rootroot00000000000000docker-1.10.3/contrib/builder/deb/debian-jessie/Dockerfile000066400000000000000000000011531267010174400233610ustar00rootroot00000000000000# # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"! # FROM debian:jessie RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* ENV GO_VERSION 1.5.3 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local ENV PATH $PATH:/usr/local/go/bin ENV AUTO_GOPATH 1 ENV DOCKER_BUILDTAGS apparmor selinux docker-1.10.3/contrib/builder/deb/debian-stretch/000077500000000000000000000000001267010174400215615ustar00rootroot00000000000000docker-1.10.3/contrib/builder/deb/debian-stretch/Dockerfile000066400000000000000000000011721267010174400235540ustar00rootroot00000000000000# # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"! # FROM debian:stretch RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* ENV GO_VERSION 1.5.3 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local ENV PATH $PATH:/usr/local/go/bin ENV AUTO_GOPATH 1 ENV DOCKER_BUILDTAGS apparmor seccomp selinux docker-1.10.3/contrib/builder/deb/debian-wheezy/000077500000000000000000000000001267010174400214205ustar00rootroot00000000000000docker-1.10.3/contrib/builder/deb/debian-wheezy/Dockerfile000066400000000000000000000013241267010174400234120ustar00rootroot00000000000000# # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"! # FROM debian:wheezy-backports RUN apt-get update && apt-get install -y -t wheezy-backports btrfs-tools --no-install-recommends && rm -rf /var/lib/apt/lists/* RUN apt-get update && apt-get install -y apparmor bash-completion build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config --no-install-recommends && rm -rf /var/lib/apt/lists/* ENV GO_VERSION 1.5.3 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local ENV PATH $PATH:/usr/local/go/bin ENV AUTO_GOPATH 1 ENV DOCKER_BUILDTAGS apparmor selinux docker-1.10.3/contrib/builder/deb/generate.sh000077500000000000000000000077471267010174400210350ustar00rootroot00000000000000#!/bin/bash set -e # usage: ./generate.sh [versions] # ie: ./generate.sh # to update all Dockerfiles in this directory # or: ./generate.sh debian-jessie # to only update debian-jessie/Dockerfile # or: ./generate.sh debian-newversion # to create a new folder and a Dockerfile within it cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" versions=( "$@" ) if [ ${#versions[@]} -eq 0 ]; then versions=( */ ) fi versions=( "${versions[@]%/}" ) for version in "${versions[@]}"; do distro="${version%-*}" suite="${version##*-}" from="${distro}:${suite}" case "$from" in debian:wheezy) # add -backports, like our users have to from+='-backports' ;; esac mkdir -p "$version" echo "$version -> FROM $from" cat > "$version/Dockerfile" <<-EOF # # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"! # FROM $from EOF echo >> "$version/Dockerfile" extraBuildTags= # this list is sorted alphabetically; please keep it that way packages=( apparmor # for apparmor_parser for testing the profile bash-completion # for bash-completion debhelper integration btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) build-essential # "essential for building Debian packages" curl ca-certificates # for downloading Go debhelper # for easy ".deb" building dh-apparmor # for apparmor debhelper dh-systemd # for systemd debhelper integration git # for "git commit" info in "docker -v" libapparmor-dev # for "sys/apparmor.h" libdevmapper-dev # for "libdevmapper.h" libltdl-dev # for pkcs11 "ltdl.h" libseccomp-dev # for "seccomp.h" & "libseccomp.so" libsqlite3-dev # for "sqlite3.h" pkg-config # for detecting things like libsystemd-journal dynamically ) # packaging for "sd-journal.h" and libraries varies case "$suite" in precise|wheezy) ;; sid|stretch|wily) packages+=( libsystemd-dev );; *) packages+=( libsystemd-journal-dev );; esac # debian wheezy & ubuntu precise do not have the right libseccomp libs # debian jessie & ubuntu trusty have a libseccomp < 2.2.1 :( case "$suite" in precise|wheezy|jessie|trusty) packages=( "${packages[@]/libseccomp-dev}" ) ;; *) extraBuildTags+=' seccomp' ;; esac if [ "$suite" = 'precise' ]; then # precise has a few package issues # - dh-systemd doesn't exist at all packages=( "${packages[@]/dh-systemd}" ) # - libdevmapper-dev is missing critical structs (too old) packages=( "${packages[@]/libdevmapper-dev}" ) extraBuildTags+=' exclude_graphdriver_devicemapper' # - btrfs-tools is missing "ioctl.h" (too old), so it's useless # (since kernels on precise are old too, just skip btrfs entirely) packages=( "${packages[@]/btrfs-tools}" ) extraBuildTags+=' exclude_graphdriver_btrfs' fi if [ "$suite" = 'wheezy' ]; then # pull a couple packages from backports explicitly # (build failures otherwise) backportsPackages=( btrfs-tools libsystemd-journal-dev ) for pkg in "${backportsPackages[@]}"; do packages=( "${packages[@]/$pkg}" ) done echo "RUN apt-get update && apt-get install -y -t $suite-backports ${backportsPackages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" fi echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" echo >> "$version/Dockerfile" awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../Dockerfile >> "$version/Dockerfile" echo 'RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" echo >> "$version/Dockerfile" echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" echo >> "$version/Dockerfile" # print build tags in alphabetical order buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" done docker-1.10.3/contrib/builder/deb/ubuntu-precise/000077500000000000000000000000001267010174400216375ustar00rootroot00000000000000docker-1.10.3/contrib/builder/deb/ubuntu-precise/Dockerfile000066400000000000000000000011531267010174400236310ustar00rootroot00000000000000# # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"! # FROM ubuntu:precise RUN apt-get update && apt-get install -y apparmor bash-completion build-essential curl ca-certificates debhelper dh-apparmor git libapparmor-dev libltdl-dev libsqlite3-dev pkg-config --no-install-recommends && rm -rf /var/lib/apt/lists/* ENV GO_VERSION 1.5.3 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local ENV PATH $PATH:/usr/local/go/bin ENV AUTO_GOPATH 1 ENV DOCKER_BUILDTAGS apparmor exclude_graphdriver_btrfs exclude_graphdriver_devicemapper selinux docker-1.10.3/contrib/builder/deb/ubuntu-trusty/000077500000000000000000000000001267010174400215575ustar00rootroot00000000000000docker-1.10.3/contrib/builder/deb/ubuntu-trusty/Dockerfile000066400000000000000000000011531267010174400235510ustar00rootroot00000000000000# # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"! # FROM ubuntu:trusty RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* ENV GO_VERSION 1.5.3 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local ENV PATH $PATH:/usr/local/go/bin ENV AUTO_GOPATH 1 ENV DOCKER_BUILDTAGS apparmor selinux docker-1.10.3/contrib/builder/deb/ubuntu-wily/000077500000000000000000000000001267010174400211715ustar00rootroot00000000000000docker-1.10.3/contrib/builder/deb/ubuntu-wily/Dockerfile000066400000000000000000000011671267010174400231700ustar00rootroot00000000000000# # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"! # FROM ubuntu:wily RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* ENV GO_VERSION 1.5.3 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local ENV PATH $PATH:/usr/local/go/bin ENV AUTO_GOPATH 1 ENV DOCKER_BUILDTAGS apparmor seccomp selinux docker-1.10.3/contrib/builder/rpm/000077500000000000000000000000001267010174400167315ustar00rootroot00000000000000docker-1.10.3/contrib/builder/rpm/README.md000066400000000000000000000006211267010174400202070ustar00rootroot00000000000000# `dockercore/builder-rpm` This image's tags contain the dependencies for building Docker `.rpm`s for each of the RPM-based platforms Docker targets. To add new tags, see [`contrib/builder/rpm` in https://github.com/docker/docker](https://github.com/docker/docker/tree/master/contrib/builder/rpm), specifically the `generate.sh` script, whose usage is described in a comment at the top of the file. docker-1.10.3/contrib/builder/rpm/build.sh000077500000000000000000000002571267010174400203730ustar00rootroot00000000000000#!/bin/bash set -e cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" set -x ./generate.sh for d in */; do docker build -t "dockercore/builder-rpm:$(basename "$d")" "$d" done docker-1.10.3/contrib/builder/rpm/centos-7/000077500000000000000000000000001267010174400203705ustar00rootroot00000000000000docker-1.10.3/contrib/builder/rpm/centos-7/Dockerfile000066400000000000000000000011671267010174400223670ustar00rootroot00000000000000# # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"! # FROM centos:7 RUN yum groupinstall -y "Development Tools" RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar ENV GO_VERSION 1.5.3 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local ENV PATH $PATH:/usr/local/go/bin ENV AUTO_GOPATH 1 ENV DOCKER_BUILDTAGS selinux docker-1.10.3/contrib/builder/rpm/fedora-22/000077500000000000000000000000001267010174400204125ustar00rootroot00000000000000docker-1.10.3/contrib/builder/rpm/fedora-22/Dockerfile000066400000000000000000000021131267010174400224010ustar00rootroot00000000000000# # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"! # FROM fedora:22 RUN dnf install -y @development-tools fedora-packager RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar ENV SECCOMP_VERSION v2.2.3 RUN buildDeps=' \ automake \ libtool \ ' \ && set -x \ && yum install -y $buildDeps \ && export SECCOMP_PATH=$(mktemp -d) \ && git clone -b "$SECCOMP_VERSION" --depth 1 https://github.com/seccomp/libseccomp.git "$SECCOMP_PATH" \ && ( \ cd "$SECCOMP_PATH" \ && ./autogen.sh \ && ./configure --prefix=/usr \ && make \ && install -c src/.libs/libseccomp.a /usr/lib/libseccomp.a \ && chmod 644 /usr/lib/libseccomp.a \ && ranlib /usr/lib/libseccomp.a \ && ldconfig -n /usr/lib \ ) \ && rm -rf "$SECCOMP_PATH" ENV GO_VERSION 1.5.3 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local ENV PATH $PATH:/usr/local/go/bin ENV AUTO_GOPATH 1 ENV DOCKER_BUILDTAGS seccomp selinux docker-1.10.3/contrib/builder/rpm/fedora-23/000077500000000000000000000000001267010174400204135ustar00rootroot00000000000000docker-1.10.3/contrib/builder/rpm/fedora-23/Dockerfile000066400000000000000000000021131267010174400224020ustar00rootroot00000000000000# # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"! # FROM fedora:23 RUN dnf install -y @development-tools fedora-packager RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar ENV SECCOMP_VERSION v2.2.3 RUN buildDeps=' \ automake \ libtool \ ' \ && set -x \ && yum install -y $buildDeps \ && export SECCOMP_PATH=$(mktemp -d) \ && git clone -b "$SECCOMP_VERSION" --depth 1 https://github.com/seccomp/libseccomp.git "$SECCOMP_PATH" \ && ( \ cd "$SECCOMP_PATH" \ && ./autogen.sh \ && ./configure --prefix=/usr \ && make \ && install -c src/.libs/libseccomp.a /usr/lib/libseccomp.a \ && chmod 644 /usr/lib/libseccomp.a \ && ranlib /usr/lib/libseccomp.a \ && ldconfig -n /usr/lib \ ) \ && rm -rf "$SECCOMP_PATH" ENV GO_VERSION 1.5.3 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local ENV PATH $PATH:/usr/local/go/bin ENV AUTO_GOPATH 1 ENV DOCKER_BUILDTAGS seccomp selinux docker-1.10.3/contrib/builder/rpm/generate.sh000077500000000000000000000154001267010174400210620ustar00rootroot00000000000000#!/bin/bash set -e # usage: ./generate.sh [versions] # ie: ./generate.sh # to update all Dockerfiles in this directory # or: ./generate.sh # to only update fedora-23/Dockerfile # or: ./generate.sh fedora-newversion # to create a new folder and a Dockerfile within it cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" versions=( "$@" ) if [ ${#versions[@]} -eq 0 ]; then versions=( */ ) fi versions=( "${versions[@]%/}" ) for version in "${versions[@]}"; do distro="${version%-*}" suite="${version##*-}" from="${distro}:${suite}" installer=yum if [[ "$distro" == "fedora" ]]; then installer=dnf fi mkdir -p "$version" echo "$version -> FROM $from" cat > "$version/Dockerfile" <<-EOF # # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"! # FROM $from EOF echo >> "$version/Dockerfile" extraBuildTags= case "$from" in centos:*) # get "Development Tools" packages dependencies echo 'RUN yum groupinstall -y "Development Tools"' >> "$version/Dockerfile" if [[ "$version" == "centos-7" ]]; then echo 'RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs' >> "$version/Dockerfile" fi ;; oraclelinux:*) # get "Development Tools" packages and dependencies # we also need yum-utils for yum-config-manager to pull the latest repo file echo 'RUN yum groupinstall -y "Development Tools"' >> "$version/Dockerfile" ;; opensuse:*) # get rpm-build and curl packages and dependencies echo 'RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build' >> "$version/Dockerfile" ;; *) echo "RUN ${installer} install -y @development-tools fedora-packager" >> "$version/Dockerfile" ;; esac # this list is sorted alphabetically; please keep it that way packages=( btrfs-progs-devel # for "btrfs/ioctl.h" (and "version.h" if possible) device-mapper-devel # for "libdevmapper.h" glibc-static libseccomp-devel # for "seccomp.h" & "libseccomp.so" libselinux-devel # for "libselinux.so" libtool-ltdl-devel # for pkcs11 "ltdl.h" pkgconfig # for the pkg-config command selinux-policy selinux-policy-devel sqlite-devel # for "sqlite3.h" systemd-devel # for "sd-journal.h" and libraries tar # older versions of dev-tools do not have tar ) case "$from" in oraclelinux:7) # Enable the optional repository packages=( --enablerepo=ol7_optional_latest "${packages[*]}" ) ;; esac case "$from" in oraclelinux:6) # doesn't use systemd, doesn't have a devel package for it packages=( "${packages[@]/systemd-devel}" ) ;; esac # opensuse & oraclelinx:6 do not have the right libseccomp libs # centos:7 and oraclelinux:7 have a libseccomp < 2.2.1 :( case "$from" in opensuse:*|oraclelinux:*|centos:7) packages=( "${packages[@]/libseccomp-devel}" ) ;; *) extraBuildTags+=' seccomp' ;; esac case "$from" in opensuse:*) packages=( "${packages[@]/btrfs-progs-devel/libbtrfs-devel}" ) packages=( "${packages[@]/pkgconfig/pkg-config}" ) if [[ "$from" == "opensuse:13."* ]]; then packages+=( systemd-rpm-macros ) fi # use zypper echo "RUN zypper --non-interactive install ${packages[*]}" >> "$version/Dockerfile" ;; *) echo "RUN ${installer} install -y ${packages[*]}" >> "$version/Dockerfile" ;; esac echo >> "$version/Dockerfile" # fedora does not have a libseccomp.a for compiling static dockerinit # ONLY install libseccomp.a from source, this can be removed once dockerinit is removed # TODO remove this manual seccomp compilation once dockerinit is gone or no longer needs to be statically compiled case "$from" in fedora:*) awk '$1 == "ENV" && $2 == "SECCOMP_VERSION" { print; exit }' ../../../Dockerfile >> "$version/Dockerfile" cat <<-'EOF' >> "$version/Dockerfile" RUN buildDeps=' \ automake \ libtool \ ' \ && set -x \ && yum install -y $buildDeps \ && export SECCOMP_PATH=$(mktemp -d) \ && git clone -b "$SECCOMP_VERSION" --depth 1 https://github.com/seccomp/libseccomp.git "$SECCOMP_PATH" \ && ( \ cd "$SECCOMP_PATH" \ && ./autogen.sh \ && ./configure --prefix=/usr \ && make \ && install -c src/.libs/libseccomp.a /usr/lib/libseccomp.a \ && chmod 644 /usr/lib/libseccomp.a \ && ranlib /usr/lib/libseccomp.a \ && ldconfig -n /usr/lib \ ) \ && rm -rf "$SECCOMP_PATH" EOF echo >> "$version/Dockerfile" ;; *) ;; esac case "$from" in oraclelinux:6) # We need a known version of the kernel-uek-devel headers to set CGO_CPPFLAGS, so grab the UEKR4 GA version # This requires using yum-config-manager from yum-utils to enable the UEKR4 yum repo echo "RUN yum install -y yum-utils && curl -o /etc/yum.repos.d/public-yum-ol6.repo http://yum.oracle.com/public-yum-ol6.repo && yum-config-manager -q --enable ol6_UEKR4" >> "$version/Dockerfile" echo "RUN yum install -y kernel-uek-devel-4.1.12-32.el6uek" >> "$version/Dockerfile" echo >> "$version/Dockerfile" ;; *) ;; esac awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../Dockerfile >> "$version/Dockerfile" echo 'RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" echo >> "$version/Dockerfile" echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" echo >> "$version/Dockerfile" # print build tags in alphabetical order buildTags=$( echo "selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" echo >> "$version/Dockerfile" case "$from" in oraclelinux:6) # We need to set the CGO_CPPFLAGS environment to use the updated UEKR4 headers with all the userns stuff. # The ordering is very important and should not be changed. echo 'ENV CGO_CPPFLAGS -D__EXPORTED_HEADERS__ \' >> "$version/Dockerfile" echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/generated/uapi \' >> "$version/Dockerfile" echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/uapi \' >> "$version/Dockerfile" echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/generated/uapi \' >> "$version/Dockerfile" echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/uapi \' >> "$version/Dockerfile" echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include' >> "$version/Dockerfile" echo >> "$version/Dockerfile" ;; *) ;; esac done docker-1.10.3/contrib/builder/rpm/opensuse-13.2/000077500000000000000000000000001267010174400211535ustar00rootroot00000000000000docker-1.10.3/contrib/builder/rpm/opensuse-13.2/Dockerfile000066400000000000000000000011321267010174400231420ustar00rootroot00000000000000# # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"! # FROM opensuse:13.2 RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build RUN zypper --non-interactive install libbtrfs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar systemd-rpm-macros ENV GO_VERSION 1.5.3 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local ENV PATH $PATH:/usr/local/go/bin ENV AUTO_GOPATH 1 ENV DOCKER_BUILDTAGS selinux docker-1.10.3/contrib/builder/rpm/oraclelinux-6/000077500000000000000000000000001267010174400214215ustar00rootroot00000000000000docker-1.10.3/contrib/builder/rpm/oraclelinux-6/Dockerfile000066400000000000000000000022541267010174400234160ustar00rootroot00000000000000# # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! # FROM oraclelinux:6 RUN yum groupinstall -y "Development Tools" RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel tar RUN yum install -y yum-utils && curl -o /etc/yum.repos.d/public-yum-ol6.repo http://yum.oracle.com/public-yum-ol6.repo && yum-config-manager -q --enable ol6_UEKR4 RUN yum install -y kernel-uek-devel-4.1.12-32.el6uek ENV GO_VERSION 1.5.3 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local ENV PATH $PATH:/usr/local/go/bin ENV AUTO_GOPATH 1 ENV DOCKER_BUILDTAGS selinux ENV CGO_CPPFLAGS -D__EXPORTED_HEADERS__ \ -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/generated/uapi \ -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/uapi \ -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/generated/uapi \ -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/uapi \ -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include docker-1.10.3/contrib/builder/rpm/oraclelinux-7/000077500000000000000000000000001267010174400214225ustar00rootroot00000000000000docker-1.10.3/contrib/builder/rpm/oraclelinux-7/Dockerfile000066400000000000000000000010721267010174400234140ustar00rootroot00000000000000# # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"! # FROM oraclelinux:7 RUN yum groupinstall -y "Development Tools" RUN yum install -y --enablerepo=ol7_optional_latest btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar ENV GO_VERSION 1.5.3 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local ENV PATH $PATH:/usr/local/go/bin ENV AUTO_GOPATH 1 ENV DOCKER_BUILDTAGS selinux docker-1.10.3/contrib/check-config.sh000077500000000000000000000146331267010174400173730ustar00rootroot00000000000000#!/usr/bin/env bash set -e # bits of this were adapted from lxc-checkconfig # see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in possibleConfigs=( '/proc/config.gz' "/boot/config-$(uname -r)" "/usr/src/linux-$(uname -r)/.config" '/usr/src/linux/.config' ) if [ $# -gt 0 ]; then CONFIG="$1" else : ${CONFIG:="${possibleConfigs[0]}"} fi if ! command -v zgrep &> /dev/null; then zgrep() { zcat "$2" | grep "$1" } fi kernelVersion="$(uname -r)" kernelMajor="${kernelVersion%%.*}" kernelMinor="${kernelVersion#$kernelMajor.}" kernelMinor="${kernelMinor%%.*}" is_set() { zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null } is_set_in_kernel() { zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null } is_set_as_module() { zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null } color() { local codes=() if [ "$1" = 'bold' ]; then codes=( "${codes[@]}" '1' ) shift fi if [ "$#" -gt 0 ]; then local code= case "$1" in # see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors black) code=30 ;; red) code=31 ;; green) code=32 ;; yellow) code=33 ;; blue) code=34 ;; magenta) code=35 ;; cyan) code=36 ;; white) code=37 ;; esac if [ "$code" ]; then codes=( "${codes[@]}" "$code" ) fi fi local IFS=';' echo -en '\033['"${codes[*]}"'m' } wrap_color() { text="$1" shift color "$@" echo -n "$text" color reset echo } wrap_good() { echo "$(wrap_color "$1" white): $(wrap_color "$2" green)" } wrap_bad() { echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)" } wrap_warning() { wrap_color >&2 "$*" red } check_flag() { if is_set_in_kernel "$1"; then wrap_good "CONFIG_$1" 'enabled' elif is_set_as_module "$1"; then wrap_good "CONFIG_$1" 'enabled (as module)' else wrap_bad "CONFIG_$1" 'missing' fi } check_flags() { for flag in "$@"; do echo "- $(check_flag "$flag")" done } check_command() { if command -v "$1" >/dev/null 2>&1; then wrap_good "$1 command" 'available' else wrap_bad "$1 command" 'missing' fi } check_device() { if [ -c "$1" ]; then wrap_good "$1" 'present' else wrap_bad "$1" 'missing' fi } if [ ! -e "$CONFIG" ]; then wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..." for tryConfig in "${possibleConfigs[@]}"; do if [ -e "$tryConfig" ]; then CONFIG="$tryConfig" break fi done if [ ! -e "$CONFIG" ]; then wrap_warning "error: cannot find kernel config" wrap_warning " try running this script again, specifying the kernel config:" wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config" exit 1 fi fi wrap_color "info: reading kernel config from $CONFIG ..." white echo echo 'Generally Necessary:' echo -n '- ' cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)" cgroupDir="$(dirname "$cgroupSubsystemDir")" if [ -d "$cgroupDir/cpu" -o -d "$cgroupDir/cpuacct" -o -d "$cgroupDir/cpuset" -o -d "$cgroupDir/devices" -o -d "$cgroupDir/freezer" -o -d "$cgroupDir/memory" ]; then echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]" else if [ "$cgroupSubsystemDir" ]; then echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]" else echo "$(wrap_bad 'cgroup hierarchy' 'nonexistent??')" fi echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)" fi if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then echo -n '- ' if command -v apparmor_parser &> /dev/null; then echo "$(wrap_good 'apparmor' 'enabled and tools installed')" else echo "$(wrap_bad 'apparmor' 'enabled, but apparmor_parser missing')" echo -n ' ' if command -v apt-get &> /dev/null; then echo "$(wrap_color '(use "apt-get install apparmor" to fix this)')" elif command -v yum &> /dev/null; then echo "$(wrap_color '(your best bet is "yum install apparmor-parser")')" else echo "$(wrap_color '(look for an "apparmor" package for your distribution)')" fi fi fi flags=( NAMESPACES {NET,PID,IPC,UTS}_NS DEVPTS_MULTIPLE_INSTANCES CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG MACVLAN VETH BRIDGE BRIDGE_NETFILTER NF_NAT_IPV4 IP_NF_FILTER IP_NF_TARGET_MASQUERADE NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK} NF_NAT NF_NAT_NEEDED # required for bind-mounting /dev/mqueue into containers POSIX_MQUEUE ) check_flags "${flags[@]}" echo echo 'Optional Features:' { check_flags USER_NS } { check_flags SECCOMP } { check_flags MEMCG_KMEM MEMCG_SWAP MEMCG_SWAP_ENABLED if is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then echo " $(wrap_color '(note that cgroup swap accounting is not enabled in your kernel config, you can enable it by setting boot option "swapaccount=1")' bold black)" fi } if [ "$kernelMajor" -lt 3 ] || [ "$kernelMajor" -eq 3 -a "$kernelMinor" -le 18 ]; then check_flags RESOURCE_COUNTERS fi if [ "$kernelMajor" -lt 3 ] || [ "$kernelMajor" -eq 3 -a "$kernelMinor" -le 13 ]; then netprio=NETPRIO_CGROUP else netprio=CGROUP_NET_PRIO fi flags=( BLK_CGROUP IOSCHED_CFQ BLK_DEV_THROTTLING CGROUP_PERF CGROUP_HUGETLB NET_CLS_CGROUP $netprio CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED ) check_flags "${flags[@]}" check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)" fi check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)" fi echo '- Storage Drivers:' { echo '- "'$(wrap_color 'aufs' blue)'":' check_flags AUFS_FS | sed 's/^/ /' if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)" fi echo '- "'$(wrap_color 'btrfs' blue)'":' check_flags BTRFS_FS | sed 's/^/ /' echo '- "'$(wrap_color 'devicemapper' blue)'":' check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /' echo '- "'$(wrap_color 'overlay' blue)'":' check_flags OVERLAY_FS | sed 's/^/ /' echo '- "'$(wrap_color 'zfs' blue)'":' echo " - $(check_device /dev/zfs)" echo " - $(check_command zfs)" echo " - $(check_command zpool)" } | sed 's/^/ /' echo docker-1.10.3/contrib/completion/000077500000000000000000000000001267010174400166565ustar00rootroot00000000000000docker-1.10.3/contrib/completion/REVIEWERS000066400000000000000000000001341267010174400201520ustar00rootroot00000000000000Tianon Gravi (@tianon) Jessie Frazelle (@jfrazelle) docker-1.10.3/contrib/completion/bash/000077500000000000000000000000001267010174400175735ustar00rootroot00000000000000docker-1.10.3/contrib/completion/bash/docker000066400000000000000000001237301267010174400207730ustar00rootroot00000000000000#!/bin/bash # # bash completion file for core docker commands # # This script provides completion of: # - commands and their options # - container ids and names # - image repos and tags # - filepaths # # To enable the completions either: # - place this file in /etc/bash_completion.d # or # - copy this file to e.g. ~/.docker-completion.sh and add the line # below to your .bashrc after bash completion features are loaded # . ~/.docker-completion.sh # # Configuration: # # For several commands, the amount of completions can be configured by # setting environment variables. # # DOCKER_COMPLETION_SHOW_NETWORK_IDS # "no" - Show names only (default) # "yes" - Show names and ids # # You can tailor completion for the "events", "history", "inspect", "run", # "rmi" and "save" commands by settings the following environment # variables: # # DOCKER_COMPLETION_SHOW_IMAGE_IDS # "none" - Show names only (default) # "non-intermediate" - Show names and ids, but omit intermediate image IDs # "all" - Show names and ids, including intermediate image IDs # # DOCKER_COMPLETION_SHOW_TAGS # "yes" - include tags in completion options (default) # "no" - don't include tags in completion options # # Note: # Currently, the completions will not work if the docker daemon is not # bound to the default communication port/socket # If the docker daemon is using a unix socket for communication your user # must have access to the socket for the completions to function correctly # # Note for developers: # Please arrange options sorted alphabetically by long name with the short # options immediately following their corresponding long form. # This order should be applied to lists, alternatives and code blocks. __docker_previous_extglob_setting=$(shopt -p extglob) shopt -s extglob __docker_q() { docker ${host:+-H "$host"} ${config:+--config "$config"} 2>/dev/null "$@" } __docker_complete_containers_all() { local IFS=$'\n' local containers=( $(__docker_q ps -aq --no-trunc) ) if [ "$1" ]; then containers=( $(__docker_q inspect --format "{{if $1}}{{.Id}}{{end}}" "${containers[@]}") ) fi local names=( $(__docker_q inspect --format '{{.Name}}' "${containers[@]}") ) names=( "${names[@]#/}" ) # trim off the leading "/" from the container names unset IFS COMPREPLY=( $(compgen -W "${names[*]} ${containers[*]}" -- "$cur") ) } __docker_complete_containers_running() { __docker_complete_containers_all '.State.Running' } __docker_complete_containers_stopped() { __docker_complete_containers_all 'not .State.Running' } __docker_complete_containers_pauseable() { __docker_complete_containers_all 'and .State.Running (not .State.Paused)' } __docker_complete_containers_unpauseable() { __docker_complete_containers_all '.State.Paused' } __docker_complete_container_names() { local containers=( $(__docker_q ps -aq --no-trunc) ) local names=( $(__docker_q inspect --format '{{.Name}}' "${containers[@]}") ) names=( "${names[@]#/}" ) # trim off the leading "/" from the container names COMPREPLY=( $(compgen -W "${names[*]}" -- "$cur") ) } __docker_complete_container_ids() { local containers=( $(__docker_q ps -aq) ) COMPREPLY=( $(compgen -W "${containers[*]}" -- "$cur") ) } __docker_complete_images() { local images_args="" case "$DOCKER_COMPLETION_SHOW_IMAGE_IDS" in all) images_args="--no-trunc -a" ;; non-intermediate) images_args="--no-trunc" ;; esac local repo_print_command if [ "${DOCKER_COMPLETION_SHOW_TAGS:-yes}" = "yes" ]; then repo_print_command='print $1; print $1":"$2' else repo_print_command='print $1' fi local awk_script case "$DOCKER_COMPLETION_SHOW_IMAGE_IDS" in all|non-intermediate) awk_script='NR>1 { print $3; if ($1 != "") { '"$repo_print_command"' } }' ;; none|*) awk_script='NR>1 && $1 != "" { '"$repo_print_command"' }' ;; esac local images=$(__docker_q images $images_args | awk "$awk_script") COMPREPLY=( $(compgen -W "$images" -- "$cur") ) __ltrim_colon_completions "$cur" } __docker_complete_image_repos() { local repos="$(__docker_q images | awk 'NR>1 && $1 != "" { print $1 }')" COMPREPLY=( $(compgen -W "$repos" -- "$cur") ) } __docker_complete_image_repos_and_tags() { local reposAndTags="$(__docker_q images | awk 'NR>1 && $1 != "" { print $1; print $1":"$2 }')" COMPREPLY=( $(compgen -W "$reposAndTags" -- "$cur") ) __ltrim_colon_completions "$cur" } __docker_complete_containers_and_images() { __docker_complete_containers_all local containers=( "${COMPREPLY[@]}" ) __docker_complete_images COMPREPLY+=( "${containers[@]}" ) } __docker_networks() { # By default, only network names are completed. # Set DOCKER_COMPLETION_SHOW_NETWORK_IDS=yes to also complete network IDs. local fields='$2' [ "${DOCKER_COMPLETION_SHOW_NETWORK_IDS}" = yes ] && fields='$1,$2' __docker_q network ls --no-trunc | awk "NR>1 {print $fields}" } __docker_complete_networks() { COMPREPLY=( $(compgen -W "$(__docker_networks)" -- "$cur") ) } __docker_complete_network_ids() { COMPREPLY=( $(compgen -W "$(__docker_q network ls -q --no-trunc)" -- "$cur") ) } __docker_complete_network_names() { COMPREPLY=( $(compgen -W "$(__docker_q network ls | awk 'NR>1 {print $2}')" -- "$cur") ) } __docker_complete_containers_in_network() { local containers=$(__docker_q network inspect -f '{{range $i, $c := .Containers}}{{$i}} {{$c.Name}} {{end}}' "$1") COMPREPLY=( $(compgen -W "$containers" -- "$cur") ) } __docker_complete_volumes() { COMPREPLY=( $(compgen -W "$(__docker_q volume ls -q)" -- "$cur") ) } __docker_plugins() { __docker_q info | sed -n "/^Plugins/,/^[^ ]/s/ $1: //p" } __docker_complete_plugins() { COMPREPLY=( $(compgen -W "$(__docker_plugins $1)" -- "$cur") ) } # Finds the position of the first word that is neither option nor an option's argument. # If there are options that require arguments, you should pass a glob describing those # options, e.g. "--option1|-o|--option2" # Use this function to restrict completions to exact positions after the argument list. __docker_pos_first_nonflag() { local argument_flags=$1 local counter=$((${subcommand_pos:-${command_pos}} + 1)) while [ $counter -le $cword ]; do if [ -n "$argument_flags" ] && eval "case '${words[$counter]}' in $argument_flags) true ;; *) false ;; esac"; then (( counter++ )) # eat "=" in case of --option=arg syntax [ "${words[$counter]}" = "=" ] && (( counter++ )) else case "${words[$counter]}" in -*) ;; *) break ;; esac fi # Bash splits words at "=", retaining "=" as a word, examples: # "--debug=false" => 3 words, "--log-opt syslog-facility=daemon" => 4 words while [ "${words[$counter + 1]}" = "=" ] ; do counter=$(( counter + 2)) done (( counter++ )) done echo $counter } # If we are currently completing the value of a map option (key=value) # which matches the extglob given as an argument, returns key. # This function is needed for key-specific completions. # TODO use this in all "${words[$cword-2]}$prev=" occurrences __docker_map_key_of_current_option() { local glob="$1" local key glob_pos if [ "$cur" = "=" ] ; then # key= case key="$prev" glob_pos=$((cword - 2)) elif [[ $cur == *=* ]] ; then # key=value case (OSX) key=${cur%=*} glob_pos=$((cword - 1)) elif [ "$prev" = "=" ] ; then key=${words[$cword - 2]} # key=value case glob_pos=$((cword - 3)) else return fi [ "${words[$glob_pos]}" = "=" ] && ((glob_pos--)) # --option=key=value syntax [[ ${words[$glob_pos]} == @($glob) ]] && echo "$key" } # Returns the value of the first option matching option_glob. # Valid values for option_glob are option names like '--log-level' and # globs like '--log-level|-l' # Only positions between the command and the current word are considered. __docker_value_of_option() { local option_extglob=$(__docker_to_extglob "$1") local counter=$((command_pos + 1)) while [ $counter -lt $cword ]; do case ${words[$counter]} in $option_extglob ) echo ${words[$counter + 1]} break ;; esac (( counter++ )) done } # Transforms a multiline list of strings into a single line string # with the words separated by "|". # This is used to prepare arguments to __docker_pos_first_nonflag(). __docker_to_alternatives() { local parts=( $1 ) local IFS='|' echo "${parts[*]}" } # Transforms a multiline list of options into an extglob pattern # suitable for use in case statements. __docker_to_extglob() { local extglob=$( __docker_to_alternatives "$1" ) echo "@($extglob)" } # Subcommand processing. # Locates the first occurrence of any of the subcommands contained in the # first argument. In case of a match, calls the corresponding completion # function and returns 0. # If no match is found, 1 is returned. The calling function can then # continue processing its completion. # # TODO if the preceding command has options that accept arguments and an # argument is equal ot one of the subcommands, this is falsely detected as # a match. __docker_subcommands() { local subcommands="$1" local counter=$(($command_pos + 1)) while [ $counter -lt $cword ]; do case "${words[$counter]}" in $(__docker_to_extglob "$subcommands") ) subcommand_pos=$counter local subcommand=${words[$counter]} local completions_func=_docker_${command}_${subcommand} declare -F $completions_func >/dev/null && $completions_func return 0 ;; esac (( counter++ )) done return 1 } # suppress trailing whitespace __docker_nospace() { # compopt is not available in ancient bash versions type compopt &>/dev/null && compopt -o nospace } __docker_complete_resolved_hostname() { command -v host >/dev/null 2>&1 || return COMPREPLY=( $(host 2>/dev/null "${cur%:}" | awk '/has address/ {print $4}') ) } __docker_complete_capabilities() { # The list of capabilities is defined in types.go, ALL was added manually. COMPREPLY=( $( compgen -W " ALL AUDIT_CONTROL AUDIT_WRITE AUDIT_READ BLOCK_SUSPEND CHOWN DAC_OVERRIDE DAC_READ_SEARCH FOWNER FSETID IPC_LOCK IPC_OWNER KILL LEASE LINUX_IMMUTABLE MAC_ADMIN MAC_OVERRIDE MKNOD NET_ADMIN NET_BIND_SERVICE NET_BROADCAST NET_RAW SETFCAP SETGID SETPCAP SETUID SYS_ADMIN SYS_BOOT SYS_CHROOT SYSLOG SYS_MODULE SYS_NICE SYS_PACCT SYS_PTRACE SYS_RAWIO SYS_RESOURCE SYS_TIME SYS_TTY_CONFIG WAKE_ALARM " -- "$cur" ) ) } __docker_complete_detach-keys() { case "$prev" in --detach-keys) case "$cur" in *,) COMPREPLY=( $( compgen -W "${cur}ctrl-" -- "$cur" ) ) ;; *) COMPREPLY=( $( compgen -W "ctrl-" -- "$cur" ) ) ;; esac __docker_nospace return ;; esac return 1 } __docker_complete_isolation() { COMPREPLY=( $( compgen -W "default hyperv process" -- "$cur" ) ) } __docker_complete_log_drivers() { COMPREPLY=( $( compgen -W " awslogs fluentd gelf journald json-file none splunk syslog " -- "$cur" ) ) } __docker_complete_log_options() { # see docs/reference/logging/index.md local awslogs_options="awslogs-region awslogs-group awslogs-stream" local fluentd_options="env fluentd-address labels tag" local gelf_options="env gelf-address labels tag" local journald_options="env labels" local json_file_options="env labels max-file max-size" local syslog_options="syslog-address syslog-tls-ca-cert syslog-tls-cert syslog-tls-key syslog-tls-skip-verify syslog-facility tag" local splunk_options="env labels splunk-caname splunk-capath splunk-index splunk-insecureskipverify splunk-source splunk-sourcetype splunk-token splunk-url tag" local all_options="$fluentd_options $gelf_options $journald_options $json_file_options $syslog_options $splunk_options" case $(__docker_value_of_option --log-driver) in '') COMPREPLY=( $( compgen -W "$all_options" -S = -- "$cur" ) ) ;; awslogs) COMPREPLY=( $( compgen -W "$awslogs_options" -S = -- "$cur" ) ) ;; fluentd) COMPREPLY=( $( compgen -W "$fluentd_options" -S = -- "$cur" ) ) ;; gelf) COMPREPLY=( $( compgen -W "$gelf_options" -S = -- "$cur" ) ) ;; journald) COMPREPLY=( $( compgen -W "$journald_options" -S = -- "$cur" ) ) ;; json-file) COMPREPLY=( $( compgen -W "$json_file_options" -S = -- "$cur" ) ) ;; syslog) COMPREPLY=( $( compgen -W "$syslog_options" -S = -- "$cur" ) ) ;; splunk) COMPREPLY=( $( compgen -W "$splunk_options" -S = -- "$cur" ) ) ;; *) return ;; esac __docker_nospace } __docker_complete_log_driver_options() { # "=" gets parsed to a word and assigned to either $cur or $prev depending on whether # it is the last character or not. So we search for "xxx=" in the the last two words. case "${words[$cword-2]}$prev=" in *gelf-address=*) COMPREPLY=( $( compgen -W "udp" -S "://" -- "${cur#=}" ) ) __docker_nospace return ;; *syslog-address=*) COMPREPLY=( $( compgen -W "tcp:// tcp+tls:// udp:// unix://" -- "${cur#=}" ) ) __docker_nospace __ltrim_colon_completions "${cur}" return ;; *syslog-facility=*) COMPREPLY=( $( compgen -W " auth authpriv cron daemon ftp kern local0 local1 local2 local3 local4 local5 local6 local7 lpr mail news syslog user uucp " -- "${cur#=}" ) ) return ;; *syslog-tls-@(ca-cert|cert|key)=*) _filedir return ;; *syslog-tls-skip-verify=*) COMPREPLY=( $( compgen -W "true" -- "${cur#=}" ) ) return ;; *splunk-url=*) COMPREPLY=( $( compgen -W "http:// https://" -- "${cur#=}" ) ) __docker_nospace __ltrim_colon_completions "${cur}" return ;; *splunk-insecureskipverify=*) COMPREPLY=( $( compgen -W "true false" -- "${cur#=}" ) ) __docker_nospace return ;; esac return 1 } __docker_complete_log_levels() { COMPREPLY=( $( compgen -W "debug info warn error fatal" -- "$cur" ) ) } # a selection of the available signals that is most likely of interest in the # context of docker containers. __docker_complete_signals() { local signals=( SIGCONT SIGHUP SIGINT SIGKILL SIGQUIT SIGSTOP SIGTERM SIGUSR1 SIGUSR2 ) COMPREPLY=( $( compgen -W "${signals[*]} ${signals[*]#SIG}" -- "$( echo $cur | tr '[:lower:]' '[:upper:]')" ) ) } # global options that may appear after the docker command _docker_docker() { local boolean_options=" $global_boolean_options --help --version -v " case "$prev" in --config) _filedir -d return ;; --log-level|-l) __docker_complete_log_levels return ;; $(__docker_to_extglob "$global_options_with_args") ) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "$boolean_options $global_options_with_args" -- "$cur" ) ) ;; *) local counter=$( __docker_pos_first_nonflag $(__docker_to_extglob "$global_options_with_args") ) if [ $cword -eq $counter ]; then COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) ) fi ;; esac } _docker_attach() { __docker_complete_detach-keys && return case "$cur" in -*) COMPREPLY=( $( compgen -W "--detach-keys --help --no-stdin --sig-proxy" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag '--detach-keys') if [ $cword -eq $counter ]; then __docker_complete_containers_running fi ;; esac } _docker_build() { local options_with_args=" --build-arg --cgroup-parent --cpuset-cpus --cpuset-mems --cpu-shares --cpu-period --cpu-quota --file -f --isolation --memory -m --memory-swap --shm-size --tag -t --ulimit " local boolean_options=" --disable-content-trust=false --force-rm --help --no-cache --pull --quiet -q --rm " local all_options="$options_with_args $boolean_options" case "$prev" in --build-arg) COMPREPLY=( $( compgen -e -- "$cur" ) ) __docker_nospace return ;; --file|-f) _filedir return ;; --isolation) __docker_complete_isolation return ;; --tag|-t) __docker_complete_image_repos_and_tags return ;; $(__docker_to_extglob "$options_with_args") ) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) ) ;; *) local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) if [ $cword -eq $counter ]; then _filedir -d fi ;; esac } _docker_commit() { case "$prev" in --author|-a|--change|-c|--message|-m) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--author -a --change -c --help --message -m --pause -p" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag '--author|-a|--change|-c|--message|-m') if [ $cword -eq $counter ]; then __docker_complete_containers_all return fi (( counter++ )) if [ $cword -eq $counter ]; then __docker_complete_image_repos_and_tags return fi ;; esac } _docker_cp() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--follow-link -L --help" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then case "$cur" in *:) return ;; *) # combined container and filename completion _filedir local files=( ${COMPREPLY[@]} ) __docker_complete_containers_all COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) local containers=( ${COMPREPLY[@]} ) COMPREPLY=( $( compgen -W "${files[*]} ${containers[*]}" -- "$cur" ) ) if [[ "$COMPREPLY" == *: ]]; then __docker_nospace fi return ;; esac fi (( counter++ )) if [ $cword -eq $counter ]; then if [ -e "$prev" ]; then __docker_complete_containers_all COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) __docker_nospace else _filedir fi return fi ;; esac } _docker_create() { _docker_run } _docker_daemon() { local boolean_options=" $global_boolean_options --disable-legacy-registry --help --icc=false --ip-forward=false --ip-masq=false --iptables=false --ipv6 --selinux-enabled --userland-proxy=false " local options_with_args=" $global_options_with_args --api-cors-header --authorization-plugin --bip --bridge -b --cgroup-parent --cluster-advertise --cluster-store --cluster-store-opt --default-gateway --default-gateway-v6 --default-ulimit --dns --dns-search --dns-opt --exec-opt --exec-root --fixed-cidr --fixed-cidr-v6 --graph -g --group -G --insecure-registry --ip --label --log-driver --log-opt --mtu --pidfile -p --registry-mirror --storage-driver -s --storage-opt --userns-remap " case "$prev" in --authorization-plugin) __docker_complete_plugins Authorization return ;; --cluster-store) COMPREPLY=( $( compgen -W "consul etcd zk" -S "://" -- "$cur" ) ) __docker_nospace return ;; --cluster-store-opt) COMPREPLY=( $( compgen -W "discovery.heartbeat discovery.ttl kv.cacertfile kv.certfile kv.keyfile kv.path" -S = -- "$cur" ) ) __docker_nospace return ;; --exec-root|--graph|-g) _filedir -d return ;; --log-driver) __docker_complete_log_drivers return ;; --pidfile|-p|--tlscacert|--tlscert|--tlskey) _filedir return ;; --storage-driver|-s) COMPREPLY=( $( compgen -W "aufs btrfs devicemapper overlay vfs zfs" -- "$(echo $cur | tr '[:upper:]' '[:lower:]')" ) ) return ;; --storage-opt) local devicemapper_options=" dm.basesize dm.blkdiscard dm.blocksize dm.fs dm.loopdatasize dm.loopmetadatasize dm.mkfsarg dm.mountopt dm.override_udev_sync_check dm.thinpooldev dm.use_deferred_deletion dm.use_deferred_removal " local zfs_options="zfs.fsname" case $(__docker_value_of_option '--storage-driver|-s') in '') COMPREPLY=( $( compgen -W "$devicemapper_options $zfs_options" -S = -- "$cur" ) ) ;; devicemapper) COMPREPLY=( $( compgen -W "$devicemapper_options" -S = -- "$cur" ) ) ;; zfs) COMPREPLY=( $( compgen -W "$zfs_options" -S = -- "$cur" ) ) ;; *) return ;; esac __docker_nospace return ;; --log-level|-l) __docker_complete_log_levels return ;; --log-opt) __docker_complete_log_options return ;; --userns-remap) if [[ $cur == *:* ]] ; then COMPREPLY=( $(compgen -g -- "${cur#*:}") ) else COMPREPLY=( $(compgen -u -S : -- "$cur") ) __docker_nospace fi return ;; $(__docker_to_extglob "$options_with_args") ) return ;; esac __docker_complete_log_driver_options && return case "${words[$cword-2]}$prev=" in # completions for --storage-opt *dm.@(blkdiscard|override_udev_sync_check|use_deferred_@(removal|deletion))=*) COMPREPLY=( $( compgen -W "false true" -- "${cur#=}" ) ) return ;; *dm.fs=*) COMPREPLY=( $( compgen -W "ext4 xfs" -- "${cur#=}" ) ) return ;; *dm.thinpooldev=*) _filedir return ;; # completions for --cluster-store-opt *kv.*file=*) _filedir return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) ;; esac } _docker_diff() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_complete_containers_all fi ;; esac } _docker_events() { local filter=$(__docker_map_key_of_current_option '-f|--filter') case "$filter" in container) cur="${cur##*=}" __docker_complete_containers_all return ;; event) COMPREPLY=( $( compgen -W " attach commit connect copy create delete destroy die disconnect exec_create exec_start export import kill mount oom pause pull push rename resize restart start stop tag top unmount unpause untag update " -- "${cur##*=}" ) ) return ;; image) cur="${cur##*=}" __docker_complete_images return ;; network) cur="${cur##*=}" __docker_complete_networks return ;; type) COMPREPLY=( $( compgen -W "container image network volume" -- "${cur##*=}" ) ) return ;; volume) cur="${cur##*=}" __docker_complete_volumes return ;; esac case "$prev" in --filter|-f) COMPREPLY=( $( compgen -S = -W "container event image label network type volume" -- "$cur" ) ) __docker_nospace return ;; --since|--until) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--filter -f --help --since --until" -- "$cur" ) ) ;; esac } _docker_exec() { __docker_complete_detach-keys && return case "$prev" in --user|-u) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--detach -d --detach-keys --help --interactive -i --privileged -t --tty -u --user" -- "$cur" ) ) ;; *) __docker_complete_containers_running ;; esac } _docker_export() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_complete_containers_all fi ;; esac } _docker_help() { local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) ) fi } _docker_history() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help --no-trunc --quiet -q" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_complete_images fi ;; esac } _docker_images() { case "$prev" in --filter|-f) COMPREPLY=( $( compgen -S = -W "dangling label" -- "$cur" ) ) __docker_nospace return ;; --format) return ;; esac case "${words[$cword-2]}$prev=" in *dangling=*) COMPREPLY=( $( compgen -W "true false" -- "${cur#=}" ) ) return ;; *label=*) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--all -a --digests --filter -f --format --help --no-trunc --quiet -q" -- "$cur" ) ) ;; =) return ;; *) __docker_complete_image_repos ;; esac } _docker_import() { case "$prev" in --change|-c|--message|-m) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--change -c --help --message -m" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag '--change|-c|--message|-m') if [ $cword -eq $counter ]; then return fi (( counter++ )) if [ $cword -eq $counter ]; then __docker_complete_image_repos_and_tags return fi ;; esac } _docker_info() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; esac } _docker_inspect() { case "$prev" in --format|-f) return ;; --type) COMPREPLY=( $( compgen -W "image container" -- "$cur" ) ) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--format -f --help --size -s --type" -- "$cur" ) ) ;; *) case $(__docker_value_of_option --type) in '') __docker_complete_containers_and_images ;; container) __docker_complete_containers_all ;; image) __docker_complete_images ;; esac esac } _docker_kill() { case "$prev" in --signal|-s) __docker_complete_signals return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--help --signal -s" -- "$cur" ) ) ;; *) __docker_complete_containers_running ;; esac } _docker_load() { case "$prev" in --input|-i) _filedir return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--help --input -i" -- "$cur" ) ) ;; esac } _docker_login() { case "$prev" in --email|-e|--password|-p|--username|-u) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--email -e --help --password -p --username -u" -- "$cur" ) ) ;; esac } _docker_logout() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; esac } _docker_logs() { case "$prev" in --since|--tail) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--follow -f --help --since --tail --timestamps -t" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag '--tail') if [ $cword -eq $counter ]; then __docker_complete_containers_all fi ;; esac } _docker_network_connect() { local options_with_args=" --alias --ip --ip6 --link " local boolean_options=" --help " case "$prev" in --link) case "$cur" in *:*) ;; *) __docker_complete_containers_running COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) __docker_nospace ;; esac return ;; $(__docker_to_extglob "$options_with_args") ) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) ;; *) local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) if [ $cword -eq $counter ]; then __docker_complete_networks elif [ $cword -eq $(($counter + 1)) ]; then __docker_complete_containers_all fi ;; esac } _docker_network_create() { case "$prev" in --aux-address|--gateway|--ip-range|--ipam-opt|--opt|-o|--subnet) return ;; --ipam-driver) COMPREPLY=( $( compgen -W "default" -- "$cur" ) ) return ;; --driver|-d) local plugins=" $(__docker_plugins Network) " # remove drivers that allow one instance only plugins=${plugins/ host / } plugins=${plugins/ null / } COMPREPLY=( $(compgen -W "$plugins" -- "$cur") ) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--aux-address --driver -d --gateway --help --internal --ip-range --ipam-driver --ipam-opt --opt -o --subnet" -- "$cur" ) ) ;; esac } _docker_network_disconnect() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_complete_networks elif [ $cword -eq $(($counter + 1)) ]; then __docker_complete_containers_in_network "$prev" fi ;; esac } _docker_network_inspect() { case "$prev" in --format|-f) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) ;; *) __docker_complete_networks esac } _docker_network_ls() { case "$prev" in --filter|-f) COMPREPLY=( $( compgen -S = -W "id name type" -- "$cur" ) ) __docker_nospace return ;; esac case "${words[$cword-2]}$prev=" in *id=*) cur="${cur#=}" __docker_complete_network_ids return ;; *name=*) cur="${cur#=}" __docker_complete_network_names return ;; *type=*) COMPREPLY=( $( compgen -W "builtin custom" -- "${cur#=}" ) ) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--filter -f --help --no-trunc --quiet -q" -- "$cur" ) ) ;; esac } _docker_network_rm() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) __docker_complete_networks esac } _docker_network() { local subcommands=" connect create disconnect inspect ls rm " __docker_subcommands "$subcommands" && return case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) ;; esac } _docker_pause() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_complete_containers_pauseable fi ;; esac } _docker_port() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_complete_containers_all fi ;; esac } _docker_ps() { case "$prev" in --before|--since) __docker_complete_containers_all ;; --filter|-f) COMPREPLY=( $( compgen -S = -W "ancestor exited id label name status" -- "$cur" ) ) __docker_nospace return ;; --format|-n) return ;; esac case "${words[$cword-2]}$prev=" in *ancestor=*) cur="${cur#=}" __docker_complete_images return ;; *id=*) cur="${cur#=}" __docker_complete_container_ids return ;; *name=*) cur="${cur#=}" __docker_complete_container_names return ;; *status=*) COMPREPLY=( $( compgen -W "created dead exited paused restarting running" -- "${cur#=}" ) ) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--all -a --before --filter -f --format --help --latest -l -n --no-trunc --quiet -q --size -s --since" -- "$cur" ) ) ;; esac } _docker_pull() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--all-tags -a --disable-content-trust=false --help" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then for arg in "${COMP_WORDS[@]}"; do case "$arg" in --all-tags|-a) __docker_complete_image_repos return ;; esac done __docker_complete_image_repos_and_tags fi ;; esac } _docker_push() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--disable-content-trust=false --help" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_complete_image_repos_and_tags fi ;; esac } _docker_rename() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_complete_containers_all fi ;; esac } _docker_restart() { case "$prev" in --time|-t) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--help --time -t" -- "$cur" ) ) ;; *) __docker_complete_containers_all ;; esac } _docker_rm() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--force -f --help --link -l --volumes -v" -- "$cur" ) ) ;; *) for arg in "${COMP_WORDS[@]}"; do case "$arg" in --force|-f) __docker_complete_containers_all return ;; esac done __docker_complete_containers_stopped ;; esac } _docker_rmi() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--force -f --help --no-prune" -- "$cur" ) ) ;; *) __docker_complete_images ;; esac } _docker_run() { local options_with_args=" --add-host --attach -a --blkio-weight --blkio-weight-device --cap-add --cap-drop --cgroup-parent --cidfile --cpu-period --cpu-quota --cpuset-cpus --cpuset-mems --cpu-shares --device --device-read-bps --device-read-iops --device-write-bps --device-write-iops --dns --dns-opt --dns-search --entrypoint --env -e --env-file --expose --group-add --hostname -h --ip --ip6 --ipc --isolation --kernel-memory --label-file --label -l --link --log-driver --log-opt --mac-address --memory -m --memory-swap --memory-swappiness --memory-reservation --name --net --net-alias --oom-score-adj --pid --publish -p --restart --security-opt --shm-size --stop-signal --tmpfs --ulimit --user -u --uts --volume-driver --volumes-from --volume -v --workdir -w " local boolean_options=" --disable-content-trust=false --help --interactive -i --oom-kill-disable --privileged --publish-all -P --read-only --tty -t " if [ "$command" = "run" ] ; then options_with_args="$options_with_args --detach-keys " boolean_options="$boolean_options --detach -d --rm --sig-proxy=false " __docker_complete_detach-keys && return fi local all_options="$options_with_args $boolean_options" case "$prev" in --add-host) case "$cur" in *:) __docker_complete_resolved_hostname return ;; esac ;; --attach|-a) COMPREPLY=( $( compgen -W 'stdin stdout stderr' -- "$cur" ) ) return ;; --cap-add|--cap-drop) __docker_complete_capabilities return ;; --cidfile|--env-file|--label-file) _filedir return ;; --device|--tmpfs|--volume|-v) case "$cur" in *:*) # TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine) ;; '') COMPREPLY=( $( compgen -W '/' -- "$cur" ) ) __docker_nospace ;; /*) _filedir __docker_nospace ;; esac return ;; --env|-e) COMPREPLY=( $( compgen -e -- "$cur" ) ) __docker_nospace return ;; --ipc) case "$cur" in *:*) cur="${cur#*:}" __docker_complete_containers_running ;; *) COMPREPLY=( $( compgen -W 'host container:' -- "$cur" ) ) if [ "$COMPREPLY" = "container:" ]; then __docker_nospace fi ;; esac return ;; --isolation) __docker_complete_isolation return ;; --link) case "$cur" in *:*) ;; *) __docker_complete_containers_running COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) __docker_nospace ;; esac return ;; --log-driver) __docker_complete_log_drivers return ;; --log-opt) __docker_complete_log_options return ;; --net) case "$cur" in container:*) local cur=${cur#*:} __docker_complete_containers_all ;; *) COMPREPLY=( $( compgen -W "$(__docker_plugins Network) $(__docker_networks) container:" -- "$cur") ) if [ "${COMPREPLY[*]}" = "container:" ] ; then __docker_nospace fi ;; esac return ;; --restart) case "$cur" in on-failure:*) ;; *) COMPREPLY=( $( compgen -W "always no on-failure on-failure: unless-stopped" -- "$cur") ) ;; esac return ;; --security-opt) case "$cur" in label:*:*) ;; label:*) local cur=${cur##*:} COMPREPLY=( $( compgen -W "user: role: type: level: disable" -- "$cur") ) if [ "${COMPREPLY[*]}" != "disable" ] ; then __docker_nospace fi ;; seccomp:*) local cur=${cur##*:} _filedir COMPREPLY+=( $( compgen -W "unconfined" -- "$cur" ) ) ;; *) COMPREPLY=( $( compgen -W "label apparmor seccomp" -S ":" -- "$cur") ) __docker_nospace ;; esac return ;; --volume-driver) __docker_complete_plugins Volume return ;; --volumes-from) __docker_complete_containers_all return ;; $(__docker_to_extglob "$options_with_args") ) return ;; esac __docker_complete_log_driver_options && return case "$cur" in -*) COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) ) ;; *) local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) if [ $cword -eq $counter ]; then __docker_complete_images fi ;; esac } _docker_save() { case "$prev" in --output|-o) _filedir return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--help --output -o" -- "$cur" ) ) ;; *) __docker_complete_images ;; esac } _docker_search() { case "$prev" in --stars|-s) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--automated --help --no-trunc --stars -s" -- "$cur" ) ) ;; esac } _docker_start() { __docker_complete_detach-keys && return case "$cur" in -*) COMPREPLY=( $( compgen -W "--attach -a --detach-keys --help --interactive -i" -- "$cur" ) ) ;; *) __docker_complete_containers_stopped ;; esac } _docker_stats() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--all -a --help --no-stream" -- "$cur" ) ) ;; *) __docker_complete_containers_running ;; esac } _docker_stop() { case "$prev" in --time|-t) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--help --time -t" -- "$cur" ) ) ;; *) __docker_complete_containers_running ;; esac } _docker_tag() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_complete_image_repos_and_tags return fi (( counter++ )) if [ $cword -eq $counter ]; then __docker_complete_image_repos_and_tags return fi ;; esac } _docker_unpause() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_complete_containers_unpauseable fi ;; esac } _docker_update() { local options_with_args=" --blkio-weight --cpu-period --cpu-quota --cpuset-cpus --cpuset-mems --cpu-shares --kernel-memory --memory -m --memory-reservation --memory-swap " local boolean_options=" --help " local all_options="$options_with_args $boolean_options" case "$prev" in $(__docker_to_extglob "$options_with_args") ) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) ) ;; *) __docker_complete_containers_all ;; esac } _docker_top() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_complete_containers_running fi ;; esac } _docker_version() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; esac } _docker_volume_create() { case "$prev" in --driver|-d) __docker_complete_plugins Volume return ;; --name|--opt|-o) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--driver -d --help --name --opt -o" -- "$cur" ) ) ;; esac } _docker_volume_inspect() { case "$prev" in --format|-f) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) ;; *) __docker_complete_volumes ;; esac } _docker_volume_ls() { case "$prev" in --filter|-f) COMPREPLY=( $( compgen -S = -W "dangling" -- "$cur" ) ) __docker_nospace return ;; esac case "${words[$cword-2]}$prev=" in *dangling=*) COMPREPLY=( $( compgen -W "true false" -- "${cur#=}" ) ) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--filter -f --help --quiet -q" -- "$cur" ) ) ;; esac } _docker_volume_rm() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) __docker_complete_volumes ;; esac } _docker_volume() { local subcommands=" create inspect ls rm " __docker_subcommands "$subcommands" && return case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) ;; esac } _docker_wait() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) __docker_complete_containers_all ;; esac } _docker() { local previous_extglob_setting=$(shopt -p extglob) shopt -s extglob local commands=( attach build commit cp create daemon diff events exec export history images import info inspect kill load login logout logs network pause port ps pull push rename restart rm rmi run save search start stats stop tag top unpause update version volume wait ) # These options are valid as global options for all client commands # and valid as command options for `docker daemon` local global_boolean_options=" --debug -D --tls --tlsverify " local global_options_with_args=" --config --host -H --log-level -l --tlscacert --tlscert --tlskey " local host config COMPREPLY=() local cur prev words cword _get_comp_words_by_ref -n : cur prev words cword local command='docker' command_pos=0 subcommand_pos local counter=1 while [ $counter -lt $cword ]; do case "${words[$counter]}" in # save host so that completion can use custom daemon --host|-H) (( counter++ )) host="${words[$counter]}" ;; # save config so that completion can use custom configuration directories --config) (( counter++ )) config="${words[$counter]}" ;; $(__docker_to_extglob "$global_options_with_args") ) (( counter++ )) ;; -*) ;; =) (( counter++ )) ;; *) command="${words[$counter]}" command_pos=$counter break ;; esac (( counter++ )) done local completions_func=_docker_${command} declare -F $completions_func >/dev/null && $completions_func eval "$previous_extglob_setting" return 0 } eval "$__docker_previous_extglob_setting" unset __docker_previous_extglob_setting complete -F _docker docker docker-1.10.3/contrib/completion/fish/000077500000000000000000000000001267010174400176075ustar00rootroot00000000000000docker-1.10.3/contrib/completion/fish/docker.fish000066400000000000000000001050721267010174400217360ustar00rootroot00000000000000# docker.fish - docker completions for fish shell # # This file is generated by gen_docker_fish_completions.py from: # https://github.com/barnybug/docker-fish-completion # # To install the completions: # mkdir -p ~/.config/fish/completions # cp docker.fish ~/.config/fish/completions # # Completion supported: # - parameters # - commands # - containers # - images # - repositories function __fish_docker_no_subcommand --description 'Test if docker has yet to be given the subcommand' for i in (commandline -opc) if contains -- $i attach build commit cp create diff events exec export history images import info inspect kill load login logout logs pause port ps pull push rename restart rm rmi run save search start stop tag top unpause version wait stats return 1 end end return 0 end function __fish_print_docker_containers --description 'Print a list of docker containers' -a select switch $select case running docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Up" {print $1 "\n" $(NF)}' | tr ',' '\n' case stopped docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Exit" {print $1 "\n" $(NF)}' | tr ',' '\n' case all docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; {print $1 "\n" $(NF)}' | tr ',' '\n' end end function __fish_print_docker_images --description 'Print a list of docker images' docker images | command awk 'NR>1' | command grep -v '' | command awk '{print $1":"$2}' end function __fish_print_docker_repositories --description 'Print a list of docker repositories' docker images | command awk 'NR>1' | command grep -v '' | command awk '{print $1}' | command sort | command uniq end # common options complete -c docker -f -n '__fish_docker_no_subcommand' -l api-cors-header -d "Set CORS headers in the remote API. Default is cors disabled" complete -c docker -f -n '__fish_docker_no_subcommand' -s b -l bridge -d 'Attach containers to a pre-existing network bridge' complete -c docker -f -n '__fish_docker_no_subcommand' -l bip -d "Use this CIDR notation address for the network bridge's IP, not compatible with -b" complete -c docker -f -n '__fish_docker_no_subcommand' -s D -l debug -d 'Enable debug mode' complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable daemon mode' complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force Docker to use specific DNS servers' complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-opt -d 'Force Docker to use specific DNS options' complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-search -d 'Force Docker to use specific DNS search domains' complete -c docker -f -n '__fish_docker_no_subcommand' -l exec-opt -d 'Set exec driver options' complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr -d 'IPv4 subnet for fixed IPs (e.g. 10.20.0.0/16)' complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr-v6 -d 'IPv6 subnet for fixed IPs (e.g.: 2001:a02b/48)' complete -c docker -f -n '__fish_docker_no_subcommand' -s G -l group -d 'Group to assign the unix socket specified by -H when running in daemon mode' complete -c docker -f -n '__fish_docker_no_subcommand' -s g -l graph -d 'Path to use as the root of the Docker runtime' complete -c docker -f -n '__fish_docker_no_subcommand' -s H -l host -d 'The socket(s) to bind to in daemon mode or connect to in client mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.' complete -c docker -f -n '__fish_docker_no_subcommand' -s h -l help -d 'Print usage' complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Allow unrestricted inter-container and Docker daemon host communication' complete -c docker -f -n '__fish_docker_no_subcommand' -l insecure-registry -d 'Enable insecure communication with specified registries (no certificate verification for HTTPS and enable HTTP fallback) (e.g., localhost:5000 or 10.20.0.0/16)' complete -c docker -f -n '__fish_docker_no_subcommand' -l ip -d 'Default IP address to use when binding container ports' complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-forward -d 'Enable net.ipv4.ip_forward and IPv6 forwarding if --fixed-cidr-v6 is defined. IPv6 forwarding may interfere with your existing IPv6 configuration when using Router Advertisement.' complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-masq -d "Enable IP masquerading for bridge's IP range" complete -c docker -f -n '__fish_docker_no_subcommand' -l iptables -d "Enable Docker's addition of iptables rules" complete -c docker -f -n '__fish_docker_no_subcommand' -l ipv6 -d 'Enable IPv6 networking' complete -c docker -f -n '__fish_docker_no_subcommand' -s l -l log-level -d 'Set the logging level (debug, info, warn, error, fatal)' complete -c docker -f -n '__fish_docker_no_subcommand' -l label -d 'Set key=value labels to the daemon (displayed in `docker info`)' complete -c docker -f -n '__fish_docker_no_subcommand' -l mtu -d 'Set the containers network MTU' complete -c docker -f -n '__fish_docker_no_subcommand' -s p -l pidfile -d 'Path to use for daemon PID file' complete -c docker -f -n '__fish_docker_no_subcommand' -l registry-mirror -d 'Specify a preferred Docker registry mirror' complete -c docker -f -n '__fish_docker_no_subcommand' -s s -l storage-driver -d 'Force the Docker runtime to use a specific storage driver' complete -c docker -f -n '__fish_docker_no_subcommand' -l selinux-enabled -d 'Enable selinux support. SELinux does not presently support the BTRFS storage driver' complete -c docker -f -n '__fish_docker_no_subcommand' -l storage-opt -d 'Set storage driver options' complete -c docker -f -n '__fish_docker_no_subcommand' -l tls -d 'Use TLS; implied by --tlsverify' complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscacert -d 'Trust only remotes providing a certificate signed by the CA given here' complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscert -d 'Path to TLS certificate file' complete -c docker -f -n '__fish_docker_no_subcommand' -l tlskey -d 'Path to TLS key file' complete -c docker -f -n '__fish_docker_no_subcommand' -l tlsverify -d 'Use TLS and verify the remote (daemon: verify client, client: verify daemon)' complete -c docker -f -n '__fish_docker_no_subcommand' -s v -l version -d 'Print version information and quit' # subcommands # attach complete -c docker -f -n '__fish_docker_no_subcommand' -a attach -d 'Attach to a running container' complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l no-stdin -d 'Do not attach STDIN' complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l sig-proxy -d 'Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied.' complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -a '(__fish_print_docker_containers running)' -d "Container" # build complete -c docker -f -n '__fish_docker_no_subcommand' -a build -d 'Build an image from a Dockerfile' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s f -l file -d "Name of the Dockerfile(Default is 'Dockerfile' at context root)" complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l force-rm -d 'Always remove intermediate containers, even after unsuccessful builds' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l no-cache -d 'Do not use cache when building the image' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l pull -d 'Always attempt to pull a newer version of the image' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s q -l quiet -d 'Suppress the build output and print image ID on success' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l rm -d 'Remove intermediate containers after a successful build' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s t -l tag -d 'Repository name (and optionally a tag) to be applied to the resulting image in case of success' # commit complete -c docker -f -n '__fish_docker_no_subcommand' -a commit -d "Create a new image from a container's changes" complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s a -l author -d 'Author (e.g., "John Hannibal Smith ")' complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s m -l message -d 'Commit message' complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s p -l pause -d 'Pause container during commit' complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -a '(__fish_print_docker_containers all)' -d "Container" # cp complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d "Copy files/folders between a container and the local filesystem" complete -c docker -A -f -n '__fish_seen_subcommand_from cp' -l help -d 'Print usage' # create complete -c docker -f -n '__fish_docker_no_subcommand' -a create -d 'Create a new container' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s a -l attach -d 'Attach to STDIN, STDOUT or STDERR.' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l add-host -d 'Add a custom host-to-IP mapping (host:ip)' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cpu-shares -d 'CPU shares (relative weight)' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cap-add -d 'Add Linux capabilities' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cap-drop -d 'Drop Linux capabilities' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cidfile -d 'Write the container ID to the file' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cpuset -d 'CPUs in which to allow execution (0-3, 0,1)' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l device -d 'Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns -d 'Set custom DNS servers' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns-opt -d "Set custom DNS options (Use --dns-opt='' if you don't wish to set options)" complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns-search -d "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)" complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s e -l env -d 'Set environment variables' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l entrypoint -d 'Overwrite the default ENTRYPOINT of the image' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l env-file -d 'Read in a line delimited file of environment variables' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l expose -d 'Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l group-add -d 'Add additional groups to run as' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s h -l hostname -d 'Container host name' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s i -l interactive -d 'Keep STDIN open even if not attached' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l ipc -d 'Default is to create a private IPC namespace (POSIX SysV IPC) for the container' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l link -d 'Add link to another container in the form of :alias' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s m -l memory -d 'Memory limit (format: [], where unit = b, k, m or g)' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l mac-address -d 'Container MAC address (e.g. 92:d0:c6:0a:29:33)' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l memory-swap -d "Total memory usage (memory + swap), set '-1' to disable swap (format: [], where unit = b, k, m or g)" complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l name -d 'Assign a name to the container' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l net -d 'Set the Network mode for the container' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s P -l publish-all -d 'Publish all exposed ports to random ports on the host interfaces' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s p -l publish -d "Publish a container's port to the host" complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l pid -d 'Default is to create a private PID namespace for the container' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l privileged -d 'Give extended privileges to this container' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l read-only -d "Mount the container's root filesystem as read only" complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l restart -d 'Restart policy to apply when a container exits (no, on-failure[:max-retry], always)' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l security-opt -d 'Security Options' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s t -l tty -d 'Allocate a pseudo-TTY' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s u -l user -d 'Username or UID' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s v -l volume -d 'Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l volumes-from -d 'Mount volumes from the specified container(s)' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s w -l workdir -d 'Working directory inside the container' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -a '(__fish_print_docker_images)' -d "Image" # diff complete -c docker -f -n '__fish_docker_no_subcommand' -a diff -d "Inspect changes on a container's filesystem" complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -a '(__fish_print_docker_containers all)' -d "Container" # events complete -c docker -f -n '__fish_docker_no_subcommand' -a events -d 'Get real time events from the server' complete -c docker -A -f -n '__fish_seen_subcommand_from events' -s f -l filter -d "Provide filter values (i.e., 'event=stop')" complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l since -d 'Show all events created since timestamp' complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l until -d 'Stream events until this timestamp' # exec complete -c docker -f -n '__fish_docker_no_subcommand' -a exec -d 'Run a command in a running container' complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s d -l detach -d 'Detached mode: run command in the background' complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s i -l interactive -d 'Keep STDIN open even if not attached' complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s t -l tty -d 'Allocate a pseudo-TTY' complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -a '(__fish_print_docker_containers running)' -d "Container" # export complete -c docker -f -n '__fish_docker_no_subcommand' -a export -d 'Stream the contents of a container as a tar archive' complete -c docker -A -f -n '__fish_seen_subcommand_from export' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from export' -a '(__fish_print_docker_containers all)' -d "Container" # history complete -c docker -f -n '__fish_docker_no_subcommand' -a history -d 'Show the history of an image' complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l no-trunc -d "Don't truncate output" complete -c docker -A -f -n '__fish_seen_subcommand_from history' -s q -l quiet -d 'Only show numeric IDs' complete -c docker -A -f -n '__fish_seen_subcommand_from history' -a '(__fish_print_docker_images)' -d "Image" # images complete -c docker -f -n '__fish_docker_no_subcommand' -a images -d 'List images' complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s a -l all -d 'Show all images (by default filter out the intermediate image layers)' complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s f -l filter -d "Provide filter values (i.e., 'dangling=true')" complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l no-trunc -d "Don't truncate output" complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s q -l quiet -d 'Only show numeric IDs' complete -c docker -A -f -n '__fish_seen_subcommand_from images' -a '(__fish_print_docker_repositories)' -d "Repository" # import complete -c docker -f -n '__fish_docker_no_subcommand' -a import -d 'Create a new filesystem image from the contents of a tarball' complete -c docker -A -f -n '__fish_seen_subcommand_from import' -l help -d 'Print usage' # info complete -c docker -f -n '__fish_docker_no_subcommand' -a info -d 'Display system-wide information' # inspect complete -c docker -f -n '__fish_docker_no_subcommand' -a inspect -d 'Return low-level information on a container or image' complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s f -l format -d 'Format the output using the given go template.' complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s s -l size -d 'Display total file sizes if the type is container.' complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_images)' -d "Image" complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_containers all)' -d "Container" # kill complete -c docker -f -n '__fish_docker_no_subcommand' -a kill -d 'Kill a running container' complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -s s -l signal -d 'Signal to send to the container' complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -a '(__fish_print_docker_containers running)' -d "Container" # load complete -c docker -f -n '__fish_docker_no_subcommand' -a load -d 'Load an image from a tar archive' complete -c docker -A -f -n '__fish_seen_subcommand_from load' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from load' -s i -l input -d 'Read from a tar archive file, instead of STDIN' # login complete -c docker -f -n '__fish_docker_no_subcommand' -a login -d 'Register or log in to a Docker registry server' complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s e -l email -d 'Email' complete -c docker -A -f -n '__fish_seen_subcommand_from login' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s p -l password -d 'Password' complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s u -l username -d 'Username' # logout complete -c docker -f -n '__fish_docker_no_subcommand' -a logout -d 'Log out from a Docker registry server' # logs complete -c docker -f -n '__fish_docker_no_subcommand' -a logs -d 'Fetch the logs of a container' complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s f -l follow -d 'Follow log output' complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s t -l timestamps -d 'Show timestamps' complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l since -d 'Show logs since timestamp' complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l tail -d 'Output the specified number of lines at the end of logs (defaults to all logs)' complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -a '(__fish_print_docker_containers running)' -d "Container" # port complete -c docker -f -n '__fish_docker_no_subcommand' -a port -d 'Lookup the public-facing port that is NAT-ed to PRIVATE_PORT' complete -c docker -A -f -n '__fish_seen_subcommand_from port' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from port' -a '(__fish_print_docker_containers running)' -d "Container" # pause complete -c docker -f -n '__fish_docker_no_subcommand' -a pause -d 'Pause all processes within a container' complete -c docker -A -f -n '__fish_seen_subcommand_from pause' -a '(__fish_print_docker_containers running)' -d "Container" # ps complete -c docker -f -n '__fish_docker_no_subcommand' -a ps -d 'List containers' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s a -l all -d 'Show all containers. Only running containers are shown by default.' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l before -d 'Show only container created before Id or Name, include non-running ones.' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s f -l filter -d 'Provide filter values. Valid filters:' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s l -l latest -d 'Show only the latest created container, include non-running ones.' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s n -d 'Show n last created containers, include non-running ones.' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l no-trunc -d "Don't truncate output" complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s q -l quiet -d 'Only display numeric IDs' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s s -l size -d 'Display total file sizes' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l since -d 'Show only containers created since Id or Name, include non-running ones.' # pull complete -c docker -f -n '__fish_docker_no_subcommand' -a pull -d 'Pull an image or a repository from a Docker registry server' complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -s a -l all-tags -d 'Download all tagged images in the repository' complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_images)' -d "Image" complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_repositories)' -d "Repository" # push complete -c docker -f -n '__fish_docker_no_subcommand' -a push -d 'Push an image or a repository to a Docker registry server' complete -c docker -A -f -n '__fish_seen_subcommand_from push' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_images)' -d "Image" complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_repositories)' -d "Repository" # rename complete -c docker -f -n '__fish_docker_no_subcommand' -a rename -d 'Rename an existing container' # restart complete -c docker -f -n '__fish_docker_no_subcommand' -a restart -d 'Restart a container' complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -s t -l time -d 'Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds.' complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -a '(__fish_print_docker_containers running)' -d "Container" # rm complete -c docker -f -n '__fish_docker_no_subcommand' -a rm -d 'Remove one or more containers' complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -d 'Force the removal of a running container (uses SIGKILL)' complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s l -l link -d 'Remove the specified link and not the underlying container' complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s v -l volumes -d 'Remove the volumes associated with the container' complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -a '(__fish_print_docker_containers stopped)' -d "Container" # rmi complete -c docker -f -n '__fish_docker_no_subcommand' -a rmi -d 'Remove one or more images' complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -s f -l force -d 'Force removal of the image' complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -l no-prune -d 'Do not delete untagged parents' complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -a '(__fish_print_docker_images)' -d "Image" # run complete -c docker -f -n '__fish_docker_no_subcommand' -a run -d 'Run a command in a new container' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s a -l attach -d 'Attach to STDIN, STDOUT or STDERR.' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l add-host -d 'Add a custom host-to-IP mapping (host:ip)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s c -l cpu-shares -d 'CPU shares (relative weight)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cap-add -d 'Add Linux capabilities' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cap-drop -d 'Drop Linux capabilities' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cidfile -d 'Write the container ID to the file' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cpuset -d 'CPUs in which to allow execution (0-3, 0,1)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s d -l detach -d 'Detached mode: run the container in the background and print the new container ID' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l device -d 'Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns -d 'Set custom DNS servers' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns-opt -d "Set custom DNS options (Use --dns-opt='' if you don't wish to set options)" complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns-search -d "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)" complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s e -l env -d 'Set environment variables' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l entrypoint -d 'Overwrite the default ENTRYPOINT of the image' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l env-file -d 'Read in a line delimited file of environment variables' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l expose -d 'Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l group-add -d 'Add additional groups to run as' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s h -l hostname -d 'Container host name' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s i -l interactive -d 'Keep STDIN open even if not attached' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l ipc -d 'Default is to create a private IPC namespace (POSIX SysV IPC) for the container' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l link -d 'Add link to another container in the form of :alias' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s m -l memory -d 'Memory limit (format: [], where unit = b, k, m or g)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l mac-address -d 'Container MAC address (e.g. 92:d0:c6:0a:29:33)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l memory-swap -d "Total memory usage (memory + swap), set '-1' to disable swap (format: [], where unit = b, k, m or g)" complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l name -d 'Assign a name to the container' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l net -d 'Set the Network mode for the container' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s P -l publish-all -d 'Publish all exposed ports to random ports on the host interfaces' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s p -l publish -d "Publish a container's port to the host" complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l pid -d 'Default is to create a private PID namespace for the container' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l privileged -d 'Give extended privileges to this container' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l read-only -d "Mount the container's root filesystem as read only" complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l restart -d 'Restart policy to apply when a container exits (no, on-failure[:max-retry], always)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l rm -d 'Automatically remove the container when it exits (incompatible with -d)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l security-opt -d 'Security Options' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l sig-proxy -d 'Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied.' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l stop-signal -d 'Signal to kill a container' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s t -l tty -d 'Allocate a pseudo-TTY' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s u -l user -d 'Username or UID' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l tmpfs -d 'Mount tmpfs on a directory' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s v -l volume -d 'Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l volumes-from -d 'Mount volumes from the specified container(s)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s w -l workdir -d 'Working directory inside the container' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -a '(__fish_print_docker_images)' -d "Image" # save complete -c docker -f -n '__fish_docker_no_subcommand' -a save -d 'Save an image to a tar archive' complete -c docker -A -f -n '__fish_seen_subcommand_from save' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from save' -s o -l output -d 'Write to an file, instead of STDOUT' complete -c docker -A -f -n '__fish_seen_subcommand_from save' -a '(__fish_print_docker_images)' -d "Image" # search complete -c docker -f -n '__fish_docker_no_subcommand' -a search -d 'Search for an image on the registry (defaults to the Docker Hub)' complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l automated -d 'Only show automated builds' complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l no-trunc -d "Don't truncate output" complete -c docker -A -f -n '__fish_seen_subcommand_from search' -s s -l stars -d 'Only displays with at least x stars' # start complete -c docker -f -n '__fish_docker_no_subcommand' -a start -d 'Start a container' complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s a -l attach -d "Attach container's STDOUT and STDERR and forward all signals to the process" complete -c docker -A -f -n '__fish_seen_subcommand_from start' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s i -l interactive -d "Attach container's STDIN" complete -c docker -A -f -n '__fish_seen_subcommand_from start' -a '(__fish_print_docker_containers stopped)' -d "Container" # stats complete -c docker -f -n '__fish_docker_no_subcommand' -a stats -d "Display a live stream of one or more containers' resource usage statistics" complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l no-stream -d 'Disable streaming stats and only pull the first result' complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -a '(__fish_print_docker_containers running)' -d "Container" # stop complete -c docker -f -n '__fish_docker_no_subcommand' -a stop -d 'Stop a container' complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -s t -l time -d 'Number of seconds to wait for the container to stop before killing it. Default is 10 seconds.' complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -a '(__fish_print_docker_containers running)' -d "Container" # tag complete -c docker -f -n '__fish_docker_no_subcommand' -a tag -d 'Tag an image into a repository' complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -s f -l force -d 'Force' complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -l help -d 'Print usage' # top complete -c docker -f -n '__fish_docker_no_subcommand' -a top -d 'Lookup the running processes of a container' complete -c docker -A -f -n '__fish_seen_subcommand_from top' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from top' -a '(__fish_print_docker_containers running)' -d "Container" # unpause complete -c docker -f -n '__fish_docker_no_subcommand' -a unpause -d 'Unpause a paused container' complete -c docker -A -f -n '__fish_seen_subcommand_from unpause' -a '(__fish_print_docker_containers running)' -d "Container" # version complete -c docker -f -n '__fish_docker_no_subcommand' -a version -d 'Show the Docker version information' # wait complete -c docker -f -n '__fish_docker_no_subcommand' -a wait -d 'Block until a container stops, then print its exit code' complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -a '(__fish_print_docker_containers running)' -d "Container" docker-1.10.3/contrib/completion/zsh/000077500000000000000000000000001267010174400174625ustar00rootroot00000000000000docker-1.10.3/contrib/completion/zsh/REVIEWERS000066400000000000000000000001341267010174400207560ustar00rootroot00000000000000Tianon Gravi (@tianon) Jessie Frazelle (@jfrazelle) docker-1.10.3/contrib/completion/zsh/_docker000066400000000000000000001376111267010174400210240ustar00rootroot00000000000000#compdef docker # # zsh completion for docker (http://docker.com) # # version: 0.3.0 # github: https://github.com/felixr/docker-zsh-completion # # contributors: # - Felix Riedel # - Steve Durrheimer # - Vincent Bernat # # license: # # Copyright (c) 2013, Felix Riedel # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Short-option stacking can be enabled with: # zstyle ':completion:*:*:docker:*' option-stacking yes # zstyle ':completion:*:*:docker-*:*' option-stacking yes __docker_arguments() { if zstyle -t ":completion:${curcontext}:" option-stacking; then print -- -s fi } __docker_get_containers() { [[ $PREFIX = -* ]] && return 1 integer ret=1 local kind declare -a running stopped lines args kind=$1 shift [[ $kind = (stopped|all) ]] && args=($args -a) lines=(${(f)"$(_call_program commands docker $docker_options ps --no-trunc $args)"}) # Parse header line to find columns local i=1 j=1 k header=${lines[1]} declare -A begin end while (( j < ${#header} - 1 )); do i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) begin[${header[$i,$((j-1))]}]=$i end[${header[$i,$((j-1))]}]=$k done end[${header[$i,$((j-1))]}]=-1 # Last column, should go to the end of the line lines=(${lines[2,-1]}) # Container ID local line local s for line in $lines; do s="${${line[${begin[CONTAINER ID]},${end[CONTAINER ID]}]%% ##}[0,12]}" s="$s:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}" s="$s, ${${${line[${begin[IMAGE]},${end[IMAGE]}]}/:/\\:}%% ##}" if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then stopped=($stopped $s) else running=($running $s) fi done # Names: we only display the one without slash. All other names # are generated and may clutter the completion. However, with # Swarm, all names may be prefixed by the swarm node name. local -a names for line in $lines; do names=(${(ps:,:)${${line[${begin[NAMES]},${end[NAMES]}]}%% *}}) # First step: find a common prefix and strip it (swarm node case) (( ${#${(u)names%%/*}} == 1 )) && names=${names#${names[1]%%/*}/} # Second step: only keep the first name without a / s=${${names:#*/*}[1]} # If no name, well give up. (( $#s != 0 )) || continue s="$s:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}" s="$s, ${${${line[${begin[IMAGE]},${end[IMAGE]}]}/:/\\:}%% ##}" if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then stopped=($stopped $s) else running=($running $s) fi done [[ $kind = (running|all) ]] && _describe -t containers-running "running containers" running "$@" && ret=0 [[ $kind = (stopped|all) ]] && _describe -t containers-stopped "stopped containers" stopped "$@" && ret=0 return ret } __docker_stoppedcontainers() { [[ $PREFIX = -* ]] && return 1 __docker_get_containers stopped "$@" } __docker_runningcontainers() { [[ $PREFIX = -* ]] && return 1 __docker_get_containers running "$@" } __docker_containers() { [[ $PREFIX = -* ]] && return 1 __docker_get_containers all "$@" } __docker_images() { [[ $PREFIX = -* ]] && return 1 integer ret=1 declare -a images images=(${${${(f)"$(_call_program commands docker $docker_options images)"}[2,-1]}/(#b)([^ ]##) ##([^ ]##) ##([^ ]##)*/${match[3]}:${(r:15:: :::)match[2]} in ${match[1]}}) _describe -t docker-images "images" images && ret=0 __docker_repositories_with_tags && ret=0 return ret } __docker_repositories() { [[ $PREFIX = -* ]] && return 1 declare -a repos repos=(${${${(f)"$(_call_program commands docker $docker_options images)"}%% *}[2,-1]}) repos=(${repos#}) _describe -t docker-repos "repositories" repos } __docker_repositories_with_tags() { [[ $PREFIX = -* ]] && return 1 integer ret=1 declare -a repos onlyrepos matched declare m repos=(${${${${(f)"$(_call_program commands docker $docker_options images)"}[2,-1]}/ ##/:::}%% *}) repos=(${${repos%:::}#}) # Check if we have a prefix-match for the current prefix. onlyrepos=(${repos%::*}) for m in $onlyrepos; do [[ ${PREFIX##${~~m}} != ${PREFIX} ]] && { # Yes, complete with tags repos=(${${repos/:::/:}/:/\\:}) _describe -t docker-repos-with-tags "repositories with tags" repos && ret=0 return ret } done # No, only complete repositories onlyrepos=(${${repos%:::*}/:/\\:}) _describe -t docker-repos "repositories" onlyrepos -qS : && ret=0 return ret } __docker_search() { [[ $PREFIX = -* ]] && return 1 local cache_policy zstyle -s ":completion:${curcontext}:" cache-policy cache_policy if [[ -z "$cache_policy" ]]; then zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy fi local searchterm cachename searchterm="${words[$CURRENT]%/}" cachename=_docker-search-$searchterm local expl local -a result if ( [[ ${(P)+cachename} -eq 0 ]] || _cache_invalid ${cachename#_} ) \ && ! _retrieve_cache ${cachename#_}; then _message "Searching for ${searchterm}..." result=(${${${(f)"$(_call_program commands docker $docker_options search $searchterm)"}%% *}[2,-1]}) _store_cache ${cachename#_} result fi _wanted dockersearch expl 'available images' compadd -a result } __docker_get_log_options() { [[ $PREFIX = -* ]] && return 1 integer ret=1 local log_driver=${opt_args[--log-driver]:-"all"} local -a awslogs_options fluentd_options gelf_options journald_options json_file_options syslog_options splunk_options awslogs_options=("awslogs-region" "awslogs-group" "awslogs-stream") fluentd_options=("env" "fluentd-address" "labels" "tag") gelf_options=("env" "gelf-address" "labels" "tag") journald_options=("env" "labels") json_file_options=("env" "labels" "max-file" "max-size") syslog_options=("syslog-address" "syslog-tls-ca-cert" "syslog-tls-cert" "syslog-tls-key" "syslog-tls-skip-verify" "syslog-facility" "tag") splunk_options=("env" "labels" "splunk-caname" "splunk-capath" "splunk-index" "splunk-insecureskipverify" "splunk-source" "splunk-sourcetype" "splunk-token" "splunk-url" "tag") [[ $log_driver = (awslogs|all) ]] && _describe -t awslogs-options "awslogs options" awslogs_options "$@" && ret=0 [[ $log_driver = (fluentd|all) ]] && _describe -t fluentd-options "fluentd options" fluentd_options "$@" && ret=0 [[ $log_driver = (gelf|all) ]] && _describe -t gelf-options "gelf options" gelf_options "$@" && ret=0 [[ $log_driver = (journald|all) ]] && _describe -t journald-options "journald options" journald_options "$@" && ret=0 [[ $log_driver = (json-file|all) ]] && _describe -t json-file-options "json-file options" json_file_options "$@" && ret=0 [[ $log_driver = (syslog|all) ]] && _describe -t syslog-options "syslog options" syslog_options "$@" && ret=0 [[ $log_driver = (splunk|all) ]] && _describe -t splunk-options "splunk options" splunk_options "$@" && ret=0 return ret } __docker_log_options() { [[ $PREFIX = -* ]] && return 1 integer ret=1 if compset -P '*='; then _message 'value' && ret=0 else __docker_get_log_options -qS "=" && ret=0 fi return ret } __docker_complete_detach_keys() { [[ $PREFIX = -* ]] && return 1 integer ret=1 compset -P "*," keys=(${:-{a-z}}) ctrl_keys=(${:-ctrl-{{a-z},{@,'[','\\','^',']',_}}}) _describe -t detach_keys "[a-z]" keys -qS "," && ret=0 _describe -t detach_keys-ctrl "'ctrl-' + 'a-z @ [ \\\\ ] ^ _'" ctrl_keys -qS "," && ret=0 } __docker_networks() { [[ $PREFIX = -* ]] && return 1 integer ret=1 declare -a lines networks lines=(${(f)"$(_call_program commands docker $docker_options network ls)"}) # Parse header line to find columns local i=1 j=1 k header=${lines[1]} declare -A begin end while (( j < ${#header} - 1 )); do i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) begin[${header[$i,$((j-1))]}]=$i end[${header[$i,$((j-1))]}]=$k done end[${header[$i,$((j-1))]}]=-1 lines=(${lines[2,-1]}) # Network ID local line s for line in $lines; do s="${line[${begin[NETWORK ID]},${end[NETWORK ID]}]%% ##}" s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}" networks=($networks $s) done # Names for line in $lines; do s="${line[${begin[NAME]},${end[NAME]}]%% ##}" s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}" networks=($networks $s) done _describe -t networks-list "networks" networks && ret=0 return ret } __docker_network_commands() { local -a _docker_network_subcommands _docker_network_subcommands=( "connect:onnects a container to a network" "create:Creates a new network with a name specified by the user" "disconnect:Disconnects a container from a network" "inspect:Displays detailed information on a network" "ls:Lists all the networks created by the user" "rm:Deletes one or more networks" ) _describe -t docker-network-commands "docker network command" _docker_network_subcommands } __docker_network_subcommand() { local -a _command_args opts_help local expl help="--help" integer ret=1 opts_help=("(: -)--help[Print usage]") case "$words[1]" in (connect) _arguments $(__docker_arguments) \ $opts_help \ "($help)*--alias=[Add network-scoped alias for the container]:alias: " \ "($help)--ip=[Container IPv4 address]:IPv4: " \ "($help)--ip6=[Container IPv6 address]:IPv6: " \ "($help)*--link=[Add a link to another container]:link:->link" \ "($help -)1:network:__docker_networks" \ "($help -)2:containers:__docker_containers" && ret=0 case $state in (link) if compset -P "*:"; then _wanted alias expl "Alias" compadd -E "" && ret=0 else __docker_runningcontainers -qS ":" && ret=0 fi ;; esac ;; (create) _arguments $(__docker_arguments) -A '-*' \ $opts_help \ "($help)*--aux-address[Auxiliary ipv4 or ipv6 addresses used by network driver]:key=IP: " \ "($help -d --driver)"{-d=,--driver=}"[Driver to manage the Network]:driver:(null host bridge overlay)" \ "($help)*--gateway=[ipv4 or ipv6 Gateway for the master subnet]:IP: " \ "($help)--internal[Restricts external access to the network]" \ "($help)*--ip-range=[Allocate container ip from a sub-range]:IP/mask: " \ "($help)--ipam-driver=[IP Address Management Driver]:driver:(default)" \ "($help)*--ipam-opt=[Set custom IPAM plugin options]:opt=value: " \ "($help)*"{-o=,--opt=}"[Set driver specific options]:opt=value: " \ "($help)*--subnet=[Subnet in CIDR format that represents a network segment]:IP/mask: " \ "($help -)1:Network Name: " && ret=0 ;; (disconnect) _arguments $(__docker_arguments) \ $opts_help \ "($help -)1:network:__docker_networks" \ "($help -)2:containers:__docker_containers" && ret=0 ;; (inspect) _arguments $(__docker_arguments) \ $opts_help \ "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ "($help -)*:network:__docker_networks" && ret=0 ;; (ls) _arguments $(__docker_arguments) \ $opts_help \ "($help)--no-trunc[Do not truncate the output]" \ "($help -q --quiet)"{-q,--quiet}"[Only display numeric IDs]" && ret=0 ;; (rm) _arguments $(__docker_arguments) \ $opts_help \ "($help -)*:network:__docker_networks" && ret=0 ;; (help) _arguments $(__docker_arguments) ":subcommand:__docker_network_commands" && ret=0 ;; esac return ret } __docker_volumes() { [[ $PREFIX = -* ]] && return 1 integer ret=1 declare -a lines volumes lines=(${(f)"$(_call_program commands docker $docker_options volume ls)"}) # Parse header line to find columns local i=1 j=1 k header=${lines[1]} declare -A begin end while (( j < ${#header} - 1 )); do i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) begin[${header[$i,$((j-1))]}]=$i end[${header[$i,$((j-1))]}]=$k done end[${header[$i,$((j-1))]}]=-1 lines=(${lines[2,-1]}) # Names local line s for line in $lines; do s="${line[${begin[VOLUME NAME]},${end[VOLUME NAME]}]%% ##}" s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}" volumes=($volumes $s) done _describe -t volumes-list "volumes" volumes && ret=0 return ret } __docker_volume_commands() { local -a _docker_volume_subcommands _docker_volume_subcommands=( "create:Create a volume" "inspect:Return low-level information on a volume" "ls:List volumes" "rm:Remove a volume" ) _describe -t docker-volume-commands "docker volume command" _docker_volume_subcommands } __docker_volume_subcommand() { local -a _command_args opts_help local expl help="--help" integer ret=1 opts_help=("(: -)--help[Print usage]") case "$words[1]" in (create) _arguments $(__docker_arguments) \ $opts_help \ "($help -d --driver)"{-d=,--driver=}"[Specify volume driver name]:Driver name:(local)" \ "($help)--name=[Specify volume name]" \ "($help)*"{-o=,--opt=}"[Set driver specific options]:Driver option: " && ret=0 ;; (inspect) _arguments $(__docker_arguments) \ $opts_help \ "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ "($help -)1:volume:__docker_volumes" && ret=0 ;; (ls) _arguments $(__docker_arguments) \ $opts_help \ "($help)*"{-f=,--filter=}"[Provide filter values (i.e. 'dangling=true')]:filter: " \ "($help -q --quiet)"{-q,--quiet}"[Only display volume names]" && ret=0 ;; (rm) _arguments $(__docker_arguments) \ $opts_help \ "($help -):volume:__docker_volumes" && ret=0 ;; (help) _arguments $(__docker_arguments) ":subcommand:__docker_volume_commands" && ret=0 ;; esac return ret } __docker_caching_policy() { oldp=( "$1"(Nmh+1) ) # 1 hour (( $#oldp )) } __docker_commands() { local cache_policy zstyle -s ":completion:${curcontext}:" cache-policy cache_policy if [[ -z "$cache_policy" ]]; then zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy fi if ( [[ ${+_docker_subcommands} -eq 0 ]] || _cache_invalid docker_subcommands) \ && ! _retrieve_cache docker_subcommands; then local -a lines lines=(${(f)"$(_call_program commands docker 2>&1)"}) _docker_subcommands=(${${${lines[$((${lines[(i)Commands:]} + 1)),${lines[(I) *]}]}## #}/ ##/:}) _docker_subcommands=($_docker_subcommands 'daemon:Enable daemon mode' 'help:Show help for a command') (( $#_docker_subcommands > 2 )) && _store_cache docker_subcommands _docker_subcommands fi _describe -t docker-commands "docker command" _docker_subcommands } __docker_subcommand() { local -a _command_args opts_help opts_build_create_run opts_build_create_run_update opts_create_run opts_create_run_update local expl help="--help" integer ret=1 opts_help=("(: -)--help[Print usage]") opts_build_create_run=( "($help)--cgroup-parent=[Parent cgroup for the container]:cgroup: " "($help)--isolation=[]:isolation:(default hyperv process)" "($help)*--shm-size=[Size of '/dev/shm'. The format is ''. Default is '64m'.]:shm size: " "($help)*--ulimit=[ulimit options]:ulimit: " ) opts_build_create_run_update=( "($help)--cpu-shares=[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)" "($help)--cpu-period=[Limit the CPU CFS (Completely Fair Scheduler) period]:CPU period: " "($help)--cpu-quota=[Limit the CPU CFS (Completely Fair Scheduler) quota]:CPU quota: " "($help)--cpuset-cpus=[CPUs in which to allow execution]:CPUs: " "($help)--cpuset-mems=[MEMs in which to allow execution]:MEMs: " "($help -m --memory)"{-m=,--memory=}"[Memory limit]:Memory limit: " "($help)--memory-swap=[Total memory limit with swap]:Memory limit: " ) opts_create_run=( "($help -a --attach)"{-a=,--attach=}"[Attach to stdin, stdout or stderr]:device:(STDIN STDOUT STDERR)" "($help)*--add-host=[Add a custom host-to-IP mapping]:host\:ip mapping: " "($help)*--blkio-weight-device=[Block IO (relative device weight)]:device:Block IO weight: " "($help)*--cap-add=[Add Linux capabilities]:capability: " "($help)*--cap-drop=[Drop Linux capabilities]:capability: " "($help)--cidfile=[Write the container ID to the file]:CID file:_files" "($help)*--device=[Add a host device to the container]:device:_files" "($help)*--device-read-bps=[Limit the read rate (bytes per second) from a device]:device:IO rate: " "($help)*--device-read-iops=[Limit the read rate (IO per second) from a device]:device:IO rate: " "($help)*--device-write-bps=[Limit the write rate (bytes per second) to a device]:device:IO rate: " "($help)*--device-write-iops=[Limit the write rate (IO per second) to a device]:device:IO rate: " "($help)*--dns=[Set custom DNS servers]:DNS server: " "($help)*--dns-opt=[Set custom DNS options]:DNS option: " "($help)*--dns-search=[Set custom DNS search domains]:DNS domains: " "($help)*"{-e=,--env=}"[Set environment variables]:environment variable: " "($help)--entrypoint=[Overwrite the default entrypoint of the image]:entry point: " "($help)*--env-file=[Read environment variables from a file]:environment file:_files" "($help)*--expose=[Expose a port from the container without publishing it]: " "($help)*--group-add=[Add additional groups to run as]:group:_groups" "($help -h --hostname)"{-h=,--hostname=}"[Container host name]:hostname:_hosts" "($help -i --interactive)"{-i,--interactive}"[Keep stdin open even if not attached]" "($help)--ip=[Container IPv4 address]:IPv4: " "($help)--ip6=[Container IPv6 address]:IPv6: " "($help)--ipc=[IPC namespace to use]:IPC namespace: " "($help)*--link=[Add link to another container]:link:->link" "($help)*"{-l=,--label=}"[Set meta data on a container]:label: " "($help)--log-driver=[Default driver for container logs]:Logging driver:(json-file syslog journald gelf fluentd awslogs splunk none)" "($help)*--log-opt=[Log driver specific options]:log driver options:__docker_log_options" "($help)--mac-address=[Container MAC address]:MAC address: " "($help)--name=[Container name]:name: " "($help)--net=[Connect a container to a network]:network mode:(bridge none container host)" "($help)*--net-alias=[Add network-scoped alias for the container]:alias: " "($help)--oom-kill-disable[Disable OOM Killer]" "($help)--oom-score-adj[Tune the host's OOM preferences for containers (accepts -1000 to 1000)]" "($help -P --publish-all)"{-P,--publish-all}"[Publish all exposed ports]" "($help)*"{-p=,--publish=}"[Expose a container's port to the host]:port:_ports" "($help)--pid=[PID namespace to use]:PID: " "($help)--privileged[Give extended privileges to this container]" "($help)--read-only[Mount the container's root filesystem as read only]" "($help)--restart=[Restart policy]:restart policy:(no on-failure always unless-stopped)" "($help)*--security-opt=[Security options]:security option: " "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-tty]" "($help -u --user)"{-u=,--user=}"[Username or UID]:user:_users" "($help)--tmpfs[mount tmpfs]" "($help)*-v[Bind mount a volume]:volume: " "($help)--volume-driver=[Optional volume driver for the container]:volume driver:(local)" "($help)*--volumes-from=[Mount volumes from the specified container]:volume: " "($help -w --workdir)"{-w=,--workdir=}"[Working directory inside the container]:directory:_directories" ) opts_create_run_update=( "($help)--blkio-weight=[Block IO (relative weight), between 10 and 1000]:Block IO weight:(10 100 500 1000)" "($help)--kernel-memory=[Kernel memory limit in bytes.]:Memory limit: " "($help)--memory-reservation=[Memory soft limit]:Memory limit: " ) opts_attach_exec_run_start=( "($help)--detach-keys=[Specify the escape key sequence used to detach a container]:sequence:__docker_complete_detach_keys" ) case "$words[1]" in (attach) _arguments $(__docker_arguments) \ $opts_help \ $opts_attach_exec_run_start \ "($help)--no-stdin[Do not attach stdin]" \ "($help)--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]" \ "($help -):containers:__docker_runningcontainers" && ret=0 ;; (build) _arguments $(__docker_arguments) \ $opts_help \ $opts_build_create_run \ $opts_build_create_run_update \ "($help)*--build-arg[Set build-time variables]:=: " \ "($help -f --file)"{-f=,--file=}"[Name of the Dockerfile]:Dockerfile:_files" \ "($help)--force-rm[Always remove intermediate containers]" \ "($help)--no-cache[Do not use cache when building the image]" \ "($help)--pull[Attempt to pull a newer version of the image]" \ "($help -q --quiet)"{-q,--quiet}"[Suppress verbose build output]" \ "($help)--rm[Remove intermediate containers after a successful build]" \ "($help -t --tag)*"{-t=,--tag=}"[Repository, name and tag for the image]: :__docker_repositories_with_tags" \ "($help -):path or URL:_directories" && ret=0 ;; (commit) _arguments $(__docker_arguments) \ $opts_help \ "($help -a --author)"{-a=,--author=}"[Author]:author: " \ "($help)*"{-c=,--change=}"[Apply Dockerfile instruction to the created image]:Dockerfile:_files" \ "($help -m --message)"{-m=,--message=}"[Commit message]:message: " \ "($help -p --pause)"{-p,--pause}"[Pause container during commit]" \ "($help -):container:__docker_containers" \ "($help -): :__docker_repositories_with_tags" && ret=0 ;; (cp) _arguments $(__docker_arguments) \ $opts_help \ "($help -L --follow-link)"{-L,--follow-link}"[Always follow symbol link in SRC_PATH]" \ "($help -)1:container:->container" \ "($help -)2:hostpath:_files" && ret=0 case $state in (container) if compset -P "*:"; then _files && ret=0 else __docker_containers -qS ":" && ret=0 fi ;; esac ;; (create) _arguments $(__docker_arguments) \ $opts_help \ $opts_build_create_run \ $opts_build_create_run_update \ $opts_create_run \ $opts_create_run_update \ "($help -): :__docker_images" \ "($help -):command: _command_names -e" \ "($help -)*::arguments: _normal" && ret=0 case $state in (link) if compset -P "*:"; then _wanted alias expl "Alias" compadd -E "" && ret=0 else __docker_runningcontainers -qS ":" && ret=0 fi ;; esac ;; (daemon) _arguments $(__docker_arguments) \ $opts_help \ "($help)--api-cors-header=[Set CORS headers in the remote API]:CORS headers: " \ "($help)*--authorization-plugin=[Set authorization plugins to load]" \ "($help -b --bridge)"{-b=,--bridge=}"[Attach containers to a network bridge]:bridge:_net_interfaces" \ "($help)--bip=[Specify network bridge IP]" \ "($help)--cgroup-parent=[Set parent cgroup for all containers]:cgroup: " \ "($help -D --debug)"{-D,--debug}"[Enable debug mode]" \ "($help)--default-gateway[Container default gateway IPv4 address]:IPv4 address: " \ "($help)--default-gateway-v6[Container default gateway IPv6 address]:IPv6 address: " \ "($help)--cluster-store=[URL of the distributed storage backend]:Cluster Store:->cluster-store" \ "($help)--cluster-advertise=[Address of the daemon instance to advertise]:Instance to advertise (host\:port): " \ "($help)*--cluster-store-opt=[Set cluster options]:Cluster options:->cluster-store-options" \ "($help)*--dns=[DNS server to use]:DNS: " \ "($help)*--dns-search=[DNS search domains to use]:DNS search: " \ "($help)*--dns-opt=[DNS options to use]:DNS option: " \ "($help)*--default-ulimit=[Set default ulimit settings for containers]:ulimit: " \ "($help)--disable-legacy-registry[Do not contact legacy registries]" \ "($help)*--exec-opt=[Set exec driver options]:exec driver options: " \ "($help)--exec-root=[Root of the Docker execdriver]:path:_directories" \ "($help)--fixed-cidr=[IPv4 subnet for fixed IPs]:IPv4 subnet: " \ "($help)--fixed-cidr-v6=[IPv6 subnet for fixed IPs]:IPv6 subnet: " \ "($help -G --group)"{-G=,--group=}"[Group for the unix socket]:group:_groups" \ "($help -g --graph)"{-g=,--graph=}"[Root of the Docker runtime]:path:_directories" \ "($help -H --host)"{-H=,--host=}"[tcp://host:port to bind/connect to]:host: " \ "($help)--icc[Enable inter-container communication]" \ "($help)*--insecure-registry=[Enable insecure registry communication]:registry: " \ "($help)--ip=[Default IP when binding container ports]" \ "($help)--ip-forward[Enable net.ipv4.ip_forward]" \ "($help)--ip-masq[Enable IP masquerading]" \ "($help)--iptables[Enable addition of iptables rules]" \ "($help)--ipv6[Enable IPv6 networking]" \ "($help -l --log-level)"{-l=,--log-level=}"[Set the logging level]:level:(debug info warn error fatal)" \ "($help)*--label=[Set key=value labels to the daemon]:label: " \ "($help)--log-driver=[Default driver for container logs]:Logging driver:(json-file syslog journald gelf fluentd awslogs splunk none)" \ "($help)*--log-opt=[Log driver specific options]:log driver options:__docker_log_options" \ "($help)--mtu=[Set the containers network MTU]:mtu:(0 576 1420 1500 9000)" \ "($help -p --pidfile)"{-p=,--pidfile=}"[Path to use for daemon PID file]:PID file:_files" \ "($help)*--registry-mirror=[Preferred Docker registry mirror]:registry mirror: " \ "($help -s --storage-driver)"{-s=,--storage-driver=}"[Storage driver to use]:driver:(aufs devicemapper btrfs zfs overlay)" \ "($help)--selinux-enabled[Enable selinux support]" \ "($help)*--storage-opt=[Set storage driver options]:storage driver options: " \ "($help)--tls[Use TLS]" \ "($help)--tlscacert=[Trust certs signed only by this CA]:PEM file:_files -g "*.(pem|crt)"" \ "($help)--tlscert=[Path to TLS certificate file]:PEM file:_files -g "*.(pem|crt)"" \ "($help)--tlskey=[Path to TLS key file]:Key file:_files -g "*.(pem|key)"" \ "($help)--tlsverify[Use TLS and verify the remote]" \ "($help)--userns-remap=[User/Group setting for user namespaces]:user\:group:->users-groups" \ "($help)--userland-proxy[Use userland proxy for loopback traffic]" && ret=0 case $state in (cluster-store) if compset -P '*://'; then _message 'host:port' && ret=0 else store=('consul' 'etcd' 'zk') _describe -t cluster-store "Cluster Store" store -qS "://" && ret=0 fi ;; (cluster-store-options) if compset -P '*='; then _files && ret=0 else opts=('discovery.heartbeat' 'discovery.ttl' 'kv.cacertfile' 'kv.certfile' 'kv.keyfile' 'kv.path') _describe -t cluster-store-opts "Cluster Store Options" opts -qS "=" && ret=0 fi ;; (users-groups) if compset -P '*:'; then _groups && ret=0 else _describe -t userns-default "default Docker user management" '(default)' && ret=0 _users && ret=0 fi ;; esac ;; (diff) _arguments $(__docker_arguments) \ $opts_help \ "($help -)*:containers:__docker_containers" && ret=0 ;; (events) _arguments $(__docker_arguments) \ $opts_help \ "($help)*"{-f=,--filter=}"[Filter values]:filter: " \ "($help)--since=[Events created since this timestamp]:timestamp: " \ "($help)--until=[Events created until this timestamp]:timestamp: " && ret=0 ;; (exec) local state _arguments $(__docker_arguments) \ $opts_help \ $opts_attach_exec_run_start \ "($help -d --detach)"{-d,--detach}"[Detached mode: leave the container running in the background]" \ "($help -i --interactive)"{-i,--interactive}"[Keep stdin open even if not attached]" \ "($help)--privileged[Give extended Linux capabilities to the command]" \ "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-tty]" \ "($help -u --user)"{-u=,--user=}"[Username or UID]:user:_users" \ "($help -):containers:__docker_runningcontainers" \ "($help -)*::command:->anycommand" && ret=0 case $state in (anycommand) shift 1 words (( CURRENT-- )) _normal && ret=0 ;; esac ;; (export) _arguments $(__docker_arguments) \ $opts_help \ "($help -o --output)"{-o=,--output=}"[Write to a file, instead of stdout]:output file:_files" \ "($help -)*:containers:__docker_containers" && ret=0 ;; (history) _arguments $(__docker_arguments) \ $opts_help \ "($help -H --human)"{-H,--human}"[Print sizes and dates in human readable format]" \ "($help)--no-trunc[Do not truncate output]" \ "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \ "($help -)*: :__docker_images" && ret=0 ;; (images) _arguments $(__docker_arguments) \ $opts_help \ "($help -a --all)"{-a,--all}"[Show all images]" \ "($help)--digests[Show digests]" \ "($help)*"{-f=,--filter=}"[Filter values]:filter: " \ "($help)--format[Pretty-print containers using a Go template]:format: " \ "($help)--no-trunc[Do not truncate output]" \ "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \ "($help -): :__docker_repositories" && ret=0 ;; (import) _arguments $(__docker_arguments) \ $opts_help \ "($help)*"{-c=,--change=}"[Apply Dockerfile instruction to the created image]:Dockerfile:_files" \ "($help -m --message)"{-m=,--message=}"[Set commit message for imported image]:message: " \ "($help -):URL:(- http:// file://)" \ "($help -): :__docker_repositories_with_tags" && ret=0 ;; (info|version) _arguments $(__docker_arguments) \ $opts_help && ret=0 ;; (inspect) local state _arguments $(__docker_arguments) \ $opts_help \ "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ "($help -s --size)"{-s,--size}"[Display total file sizes if the type is container]" \ "($help)--type=[Return JSON for specified type]:type:(image container)" \ "($help -)*: :->values" && ret=0 case $state in (values) if [[ ${words[(r)--type=container]} == --type=container ]]; then __docker_containers && ret=0 elif [[ ${words[(r)--type=image]} == --type=image ]]; then __docker_images && ret=0 else __docker_images && __docker_containers && ret=0 fi ;; esac ;; (kill) _arguments $(__docker_arguments) \ $opts_help \ "($help -s --signal)"{-s=,--signal=}"[Signal to send]:signal:_signals" \ "($help -)*:containers:__docker_runningcontainers" && ret=0 ;; (load) _arguments $(__docker_arguments) \ $opts_help \ "($help -i --input)"{-i=,--input=}"[Read from tar archive file]:archive file:_files -g "*.((tar|TAR)(.gz|.GZ|.Z|.bz2|.lzma|.xz|)|(tbz|tgz|txz))(-.)"" && ret=0 ;; (login) _arguments $(__docker_arguments) \ $opts_help \ "($help -e --email)"{-e=,--email=}"[Email]:email: " \ "($help -p --password)"{-p=,--password=}"[Password]:password: " \ "($help -u --user)"{-u=,--user=}"[Username]:username: " \ "($help -)1:server: " && ret=0 ;; (logout) _arguments $(__docker_arguments) \ $opts_help \ "($help -)1:server: " && ret=0 ;; (logs) _arguments $(__docker_arguments) \ $opts_help \ "($help -f --follow)"{-f,--follow}"[Follow log output]" \ "($help -s --since)"{-s=,--since=}"[Show logs since this timestamp]:timestamp: " \ "($help -t --timestamps)"{-t,--timestamps}"[Show timestamps]" \ "($help)--tail=[Output the last K lines]:lines:(1 10 20 50 all)" \ "($help -)*:containers:__docker_containers" && ret=0 ;; (network) local curcontext="$curcontext" state _arguments $(__docker_arguments) \ $opts_help \ "($help -): :->command" \ "($help -)*:: :->option-or-argument" && ret=0 case $state in (command) __docker_network_commands && ret=0 ;; (option-or-argument) curcontext=${curcontext%:*:*}:docker-${words[-1]}: __docker_network_subcommand && ret=0 ;; esac ;; (pause|unpause) _arguments $(__docker_arguments) \ $opts_help \ "($help -)*:containers:__docker_runningcontainers" && ret=0 ;; (port) _arguments $(__docker_arguments) \ $opts_help \ "($help -)1:containers:__docker_runningcontainers" \ "($help -)2:port:_ports" && ret=0 ;; (ps) _arguments $(__docker_arguments) \ $opts_help \ "($help -a --all)"{-a,--all}"[Show all containers]" \ "($help)--before=[Show only container created before...]:containers:__docker_containers" \ "($help)*"{-f=,--filter=}"[Filter values]:filter: " \ "($help)--format[Pretty-print containers using a Go template]:format: " \ "($help -l --latest)"{-l,--latest}"[Show only the latest created container]" \ "($help)-n[Show n last created containers, include non-running one]:n:(1 5 10 25 50)" \ "($help)--no-trunc[Do not truncate output]" \ "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \ "($help -s --size)"{-s,--size}"[Display total file sizes]" \ "($help)--since=[Show only containers created since...]:containers:__docker_containers" && ret=0 ;; (pull) _arguments $(__docker_arguments) \ $opts_help \ "($help -a --all-tags)"{-a,--all-tags}"[Download all tagged images]" \ "($help -):name:__docker_search" && ret=0 ;; (push) _arguments $(__docker_arguments) \ $opts_help \ "($help -): :__docker_images" && ret=0 ;; (rename) _arguments $(__docker_arguments) \ $opts_help \ "($help -):old name:__docker_containers" \ "($help -):new name: " && ret=0 ;; (restart|stop) _arguments $(__docker_arguments) \ $opts_help \ "($help -t --time)"{-t=,--time=}"[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)" \ "($help -)*:containers:__docker_runningcontainers" && ret=0 ;; (rm) _arguments $(__docker_arguments) \ $opts_help \ "($help -f --force)"{-f,--force}"[Force removal]" \ "($help -l --link)"{-l,--link}"[Remove the specified link and not the underlying container]" \ "($help -v --volumes)"{-v,--volumes}"[Remove the volumes associated to the container]" \ "($help -)*:containers:__docker_stoppedcontainers" && ret=0 ;; (rmi) _arguments $(__docker_arguments) \ $opts_help \ "($help -f --force)"{-f,--force}"[Force removal]" \ "($help)--no-prune[Do not delete untagged parents]" \ "($help -)*: :__docker_images" && ret=0 ;; (run) _arguments $(__docker_arguments) \ $opts_help \ $opts_build_create_run \ $opts_build_create_run_update \ $opts_create_run \ $opts_create_run_update \ $opts_attach_exec_run_start \ "($help -d --detach)"{-d,--detach}"[Detached mode: leave the container running in the background]" \ "($help)--rm[Remove intermediate containers when it exits]" \ "($help)--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]" \ "($help)--stop-signal=[Signal to kill a container]:signal:_signals" \ "($help -): :__docker_images" \ "($help -):command: _command_names -e" \ "($help -)*::arguments: _normal" && ret=0 case $state in (link) if compset -P "*:"; then _wanted alias expl "Alias" compadd -E "" && ret=0 else __docker_runningcontainers -qS ":" && ret=0 fi ;; esac ;; (save) _arguments $(__docker_arguments) \ $opts_help \ "($help -o --output)"{-o=,--output=}"[Write to file]:file:_files" \ "($help -)*: :__docker_images" && ret=0 ;; (search) _arguments $(__docker_arguments) \ $opts_help \ "($help)--automated[Only show automated builds]" \ "($help)--no-trunc[Do not truncate output]" \ "($help -s --stars)"{-s=,--stars=}"[Only display with at least X stars]:stars:(0 10 100 1000)" \ "($help -):term: " && ret=0 ;; (start) _arguments $(__docker_arguments) \ $opts_help \ $opts_attach_exec_run_start \ "($help -a --attach)"{-a,--attach}"[Attach container's stdout/stderr and forward all signals]" \ "($help -i --interactive)"{-i,--interactive}"[Attach container's stding]" \ "($help -)*:containers:__docker_stoppedcontainers" && ret=0 ;; (stats) _arguments $(__docker_arguments) \ $opts_help \ "($help -a --all)"{-a,--all}"[Show all containers (default shows just running)]" \ "($help)--no-stream[Disable streaming stats and only pull the first result]" \ "($help -)*:containers:__docker_runningcontainers" && ret=0 ;; (tag) _arguments $(__docker_arguments) \ $opts_help \ "($help -):source:__docker_images"\ "($help -):destination:__docker_repositories_with_tags" && ret=0 ;; (top) _arguments $(__docker_arguments) \ $opts_help \ "($help -)1:containers:__docker_runningcontainers" \ "($help -)*:: :->ps-arguments" && ret=0 case $state in (ps-arguments) _ps && ret=0 ;; esac ;; (update) _arguments $(__docker_arguments) \ $opts_help \ $opts_create_run_update \ $opts_build_create_run_update \ "($help -)*: :->values" && ret=0 case $state in (values) if [[ ${words[(r)--kernel-memory*]} = (--kernel-memory*) ]]; then __docker_stoppedcontainers && ret=0 else __docker_containers && ret=0 fi ;; esac ;; (volume) local curcontext="$curcontext" state _arguments $(__docker_arguments) \ $opts_help \ "($help -): :->command" \ "($help -)*:: :->option-or-argument" && ret=0 case $state in (command) __docker_volume_commands && ret=0 ;; (option-or-argument) curcontext=${curcontext%:*:*}:docker-${words[-1]}: __docker_volume_subcommand && ret=0 ;; esac ;; (wait) _arguments $(__docker_arguments) \ $opts_help \ "($help -)*:containers:__docker_runningcontainers" && ret=0 ;; (help) _arguments $(__docker_arguments) ":subcommand:__docker_commands" && ret=0 ;; esac return ret } _docker() { # Support for subservices, which allows for `compdef _docker docker-shell=_docker_containers`. # Based on /usr/share/zsh/functions/Completion/Unix/_git without support for `ret`. if [[ $service != docker ]]; then _call_function - _$service return fi local curcontext="$curcontext" state line help="-h --help" integer ret=1 typeset -A opt_args _arguments $(__docker_arguments) -C \ "(: -)"{-h,--help}"[Print usage]" \ "($help)--config[Location of client config files]:path:_directories" \ "($help -D --debug)"{-D,--debug}"[Enable debug mode]" \ "($help -H --host)"{-H=,--host=}"[tcp://host:port to bind/connect to]:host: " \ "($help -l --log-level)"{-l=,--log-level=}"[Set the logging level]:level:(debug info warn error fatal)" \ "($help)--tls[Use TLS]" \ "($help)--tlscacert=[Trust certs signed only by this CA]:PEM file:_files -g "*.(pem|crt)"" \ "($help)--tlscert=[Path to TLS certificate file]:PEM file:_files -g "*.(pem|crt)"" \ "($help)--tlskey=[Path to TLS key file]:Key file:_files -g "*.(pem|key)"" \ "($help)--tlsverify[Use TLS and verify the remote]" \ "($help)--userland-proxy[Use userland proxy for loopback traffic]" \ "($help -v --version)"{-v,--version}"[Print version information and quit]" \ "($help -): :->command" \ "($help -)*:: :->option-or-argument" && ret=0 local host=${opt_args[-H]}${opt_args[--host]} local config=${opt_args[--config]} local docker_options="${host:+--host $host} ${config:+--config $config}" case $state in (command) __docker_commands && ret=0 ;; (option-or-argument) curcontext=${curcontext%:*:*}:docker-$words[1]: __docker_subcommand && ret=0 ;; esac return ret } _docker "$@" # Local Variables: # mode: Shell-Script # sh-indentation: 4 # indent-tabs-mode: nil # sh-basic-offset: 4 # End: # vim: ft=zsh sw=4 ts=4 et docker-1.10.3/contrib/desktop-integration/000077500000000000000000000000001267010174400204775ustar00rootroot00000000000000docker-1.10.3/contrib/desktop-integration/README.md000066400000000000000000000005051267010174400217560ustar00rootroot00000000000000Desktop Integration =================== The ./contrib/desktop-integration contains examples of typical dockerized desktop applications. Examples ======== * Chromium: ./chromium/Dockerfile shows a way to dockerize a common application * Gparted: ./gparted/Dockerfile shows a way to dockerize a common application w devices docker-1.10.3/contrib/desktop-integration/chromium/000077500000000000000000000000001267010174400223225ustar00rootroot00000000000000docker-1.10.3/contrib/desktop-integration/chromium/Dockerfile000066400000000000000000000023241267010174400243150ustar00rootroot00000000000000# VERSION: 0.1 # DESCRIPTION: Create chromium container with its dependencies # AUTHOR: Jessica Frazelle # COMMENTS: # This file describes how to build a Chromium container with all # dependencies installed. It uses native X11 unix socket. # Tested on Debian Jessie # USAGE: # # Download Chromium Dockerfile # wget http://raw.githubusercontent.com/docker/docker/master/contrib/desktop-integration/chromium/Dockerfile # # # Build chromium image # docker build -t chromium . # # # Run stateful data-on-host chromium. For ephemeral, remove -v /data/chromium:/data # docker run -v /data/chromium:/data -v /tmp/.X11-unix:/tmp/.X11-unix \ # -e DISPLAY=unix$DISPLAY chromium # # To run stateful dockerized data containers # docker run --volumes-from chromium-data -v /tmp/.X11-unix:/tmp/.X11-unix \ # -e DISPLAY=unix$DISPLAY chromium # Base docker image FROM debian:jessie MAINTAINER Jessica Frazelle # Install Chromium RUN apt-get update && apt-get install -y \ chromium \ chromium-l10n \ libcanberra-gtk-module \ libexif-dev \ --no-install-recommends # Autorun chromium CMD ["/usr/bin/chromium", "--no-sandbox", "--user-data-dir=/data"] docker-1.10.3/contrib/desktop-integration/gparted/000077500000000000000000000000001267010174400221255ustar00rootroot00000000000000docker-1.10.3/contrib/desktop-integration/gparted/Dockerfile000066400000000000000000000016411267010174400241210ustar00rootroot00000000000000# VERSION: 0.1 # DESCRIPTION: Create gparted container with its dependencies # AUTHOR: Jessica Frazelle # COMMENTS: # This file describes how to build a gparted container with all # dependencies installed. It uses native X11 unix socket. # Tested on Debian Jessie # USAGE: # # Download gparted Dockerfile # wget http://raw.githubusercontent.com/docker/docker/master/contrib/desktop-integration/gparted/Dockerfile # # # Build gparted image # docker build -t gparted . # # docker run -v /tmp/.X11-unix:/tmp/.X11-unix \ # --device=/dev/sda:/dev/sda \ # -e DISPLAY=unix$DISPLAY gparted # # Base docker image FROM debian:jessie MAINTAINER Jessica Frazelle # Install Gparted and its dependencies RUN apt-get update && apt-get install -y \ gparted \ libcanberra-gtk-module \ --no-install-recommends # Autorun gparted CMD ["/usr/sbin/gparted"] docker-1.10.3/contrib/docker-device-tool/000077500000000000000000000000001267010174400201645ustar00rootroot00000000000000docker-1.10.3/contrib/docker-device-tool/device_tool.go000066400000000000000000000074301267010174400230130ustar00rootroot00000000000000// +build !windows package main import ( "flag" "fmt" "os" "path" "sort" "strconv" "strings" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver/devmapper" "github.com/docker/docker/pkg/devicemapper" ) func usage() { fmt.Fprintf(os.Stderr, "Usage: %s [status] | [list] | [device id] | [resize new-pool-size] | [snap new-id base-id] | [remove id] | [mount id mountpoint]\n", os.Args[0]) flag.PrintDefaults() os.Exit(1) } func byteSizeFromString(arg string) (int64, error) { digits := "" rest := "" last := strings.LastIndexAny(arg, "0123456789") if last >= 0 { digits = arg[:last+1] rest = arg[last+1:] } val, err := strconv.ParseInt(digits, 10, 64) if err != nil { return val, err } rest = strings.ToLower(strings.TrimSpace(rest)) var multiplier int64 = 1 switch rest { case "": multiplier = 1 case "k", "kb": multiplier = 1024 case "m", "mb": multiplier = 1024 * 1024 case "g", "gb": multiplier = 1024 * 1024 * 1024 case "t", "tb": multiplier = 1024 * 1024 * 1024 * 1024 default: return 0, fmt.Errorf("Unknown size unit: %s", rest) } return val * multiplier, nil } func main() { root := flag.String("r", "/var/lib/docker", "Docker root dir") flDebug := flag.Bool("D", false, "Debug mode") flag.Parse() if *flDebug { os.Setenv("DEBUG", "1") logrus.SetLevel(logrus.DebugLevel) } if flag.NArg() < 1 { usage() } args := flag.Args() home := path.Join(*root, "devicemapper") devices, err := devmapper.NewDeviceSet(home, false, nil, nil, nil) if err != nil { fmt.Println("Can't initialize device mapper: ", err) os.Exit(1) } switch args[0] { case "status": status := devices.Status() fmt.Printf("Pool name: %s\n", status.PoolName) fmt.Printf("Data Loopback file: %s\n", status.DataLoopback) fmt.Printf("Metadata Loopback file: %s\n", status.MetadataLoopback) fmt.Printf("Sector size: %d\n", status.SectorSize) fmt.Printf("Data use: %d of %d (%.1f %%)\n", status.Data.Used, status.Data.Total, 100.0*float64(status.Data.Used)/float64(status.Data.Total)) fmt.Printf("Metadata use: %d of %d (%.1f %%)\n", status.Metadata.Used, status.Metadata.Total, 100.0*float64(status.Metadata.Used)/float64(status.Metadata.Total)) break case "list": ids := devices.List() sort.Strings(ids) for _, id := range ids { fmt.Println(id) } break case "device": if flag.NArg() < 2 { usage() } status, err := devices.GetDeviceStatus(args[1]) if err != nil { fmt.Println("Can't get device info: ", err) os.Exit(1) } fmt.Printf("Id: %d\n", status.DeviceID) fmt.Printf("Size: %d\n", status.Size) fmt.Printf("Transaction Id: %d\n", status.TransactionID) fmt.Printf("Size in Sectors: %d\n", status.SizeInSectors) fmt.Printf("Mapped Sectors: %d\n", status.MappedSectors) fmt.Printf("Highest Mapped Sector: %d\n", status.HighestMappedSector) break case "resize": if flag.NArg() < 2 { usage() } size, err := byteSizeFromString(args[1]) if err != nil { fmt.Println("Invalid size: ", err) os.Exit(1) } err = devices.ResizePool(size) if err != nil { fmt.Println("Error resizing pool: ", err) os.Exit(1) } break case "snap": if flag.NArg() < 3 { usage() } err := devices.AddDevice(args[1], args[2]) if err != nil { fmt.Println("Can't create snap device: ", err) os.Exit(1) } break case "remove": if flag.NArg() < 2 { usage() } err := devicemapper.RemoveDevice(args[1]) if err != nil { fmt.Println("Can't remove device: ", err) os.Exit(1) } break case "mount": if flag.NArg() < 3 { usage() } err := devices.MountDevice(args[1], args[2], "") if err != nil { fmt.Println("Can't create snap device: ", err) os.Exit(1) } break default: fmt.Printf("Unknown command %s\n", args[0]) usage() os.Exit(1) } return } docker-1.10.3/contrib/docker-device-tool/device_tool_windows.go000066400000000000000000000000361267010174400245600ustar00rootroot00000000000000package main func main() { } docker-1.10.3/contrib/docker-engine-selinux/000077500000000000000000000000001267010174400207045ustar00rootroot00000000000000docker-1.10.3/contrib/docker-engine-selinux/LICENSE000066400000000000000000000431311267010174400217130ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc. 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. docker-1.10.3/contrib/docker-engine-selinux/Makefile000066400000000000000000000004141267010174400223430ustar00rootroot00000000000000TARGETS?=docker MODULES?=${TARGETS:=.pp.bz2} SHAREDIR?=/usr/share all: ${TARGETS:=.pp.bz2} %.pp.bz2: %.pp @echo Compressing $^ -\> $@ bzip2 -9 $^ %.pp: %.te make -f ${SHAREDIR}/selinux/devel/Makefile $@ clean: rm -f *~ *.tc *.pp *.pp.bz2 rm -rf tmp *.tar.gz docker-1.10.3/contrib/docker-engine-selinux/docker.fc000066400000000000000000000021121267010174400224610ustar00rootroot00000000000000/root/\.docker gen_context(system_u:object_r:docker_home_t,s0) /usr/bin/docker -- gen_context(system_u:object_r:docker_exec_t,s0) /usr/lib/systemd/system/docker.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) /etc/docker(/.*)? gen_context(system_u:object_r:docker_config_t,s0) /var/lib/docker(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) /var/lib/kublet(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) /var/lib/docker/vfs(/.*)? gen_context(system_u:object_r:svirt_sandbox_file_t,s0) /var/run/docker\.pid -- gen_context(system_u:object_r:docker_var_run_t,s0) /var/run/docker\.sock -s gen_context(system_u:object_r:docker_var_run_t,s0) /var/run/docker-client(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) /var/lib/docker/init(/.*)? gen_context(system_u:object_r:docker_share_t,s0) /var/lib/docker/containers/.*/hosts gen_context(system_u:object_r:docker_share_t,s0) /var/lib/docker/containers/.*/hostname gen_context(system_u:object_r:docker_share_t,s0) /var/lib/docker/.*/config\.env gen_context(system_u:object_r:docker_share_t,s0) docker-1.10.3/contrib/docker-engine-selinux/docker.if000066400000000000000000000225771267010174400225100ustar00rootroot00000000000000 ## The open-source application container engine. ######################################## ## ## Execute docker in the docker domain. ## ## ## ## Domain allowed to transition. ## ## # interface(`docker_domtrans',` gen_require(` type docker_t, docker_exec_t; ') corecmd_search_bin($1) domtrans_pattern($1, docker_exec_t, docker_t) ') ######################################## ## ## Execute docker in the caller domain. ## ## ## ## Domain allowed to transition. ## ## # interface(`docker_exec',` gen_require(` type docker_exec_t; ') corecmd_search_bin($1) can_exec($1, docker_exec_t) ') ######################################## ## ## Search docker lib directories. ## ## ## ## Domain allowed access. ## ## # interface(`docker_search_lib',` gen_require(` type docker_var_lib_t; ') allow $1 docker_var_lib_t:dir search_dir_perms; files_search_var_lib($1) ') ######################################## ## ## Execute docker lib directories. ## ## ## ## Domain allowed access. ## ## # interface(`docker_exec_lib',` gen_require(` type docker_var_lib_t; ') allow $1 docker_var_lib_t:dir search_dir_perms; can_exec($1, docker_var_lib_t) ') ######################################## ## ## Read docker lib files. ## ## ## ## Domain allowed access. ## ## # interface(`docker_read_lib_files',` gen_require(` type docker_var_lib_t; ') files_search_var_lib($1) read_files_pattern($1, docker_var_lib_t, docker_var_lib_t) ') ######################################## ## ## Read docker share files. ## ## ## ## Domain allowed access. ## ## # interface(`docker_read_share_files',` gen_require(` type docker_share_t; ') files_search_var_lib($1) read_files_pattern($1, docker_share_t, docker_share_t) ') ######################################## ## ## Manage docker lib files. ## ## ## ## Domain allowed access. ## ## # interface(`docker_manage_lib_files',` gen_require(` type docker_var_lib_t; ') files_search_var_lib($1) manage_files_pattern($1, docker_var_lib_t, docker_var_lib_t) manage_lnk_files_pattern($1, docker_var_lib_t, docker_var_lib_t) ') ######################################## ## ## Manage docker lib directories. ## ## ## ## Domain allowed access. ## ## # interface(`docker_manage_lib_dirs',` gen_require(` type docker_var_lib_t; ') files_search_var_lib($1) manage_dirs_pattern($1, docker_var_lib_t, docker_var_lib_t) ') ######################################## ## ## Create objects in a docker var lib directory ## with an automatic type transition to ## a specified private type. ## ## ## ## Domain allowed access. ## ## ## ## ## The type of the object to create. ## ## ## ## ## The class of the object to be created. ## ## ## ## ## The name of the object being created. ## ## # interface(`docker_lib_filetrans',` gen_require(` type docker_var_lib_t; ') filetrans_pattern($1, docker_var_lib_t, $2, $3, $4) ') ######################################## ## ## Read docker PID files. ## ## ## ## Domain allowed access. ## ## # interface(`docker_read_pid_files',` gen_require(` type docker_var_run_t; ') files_search_pids($1) read_files_pattern($1, docker_var_run_t, docker_var_run_t) ') ######################################## ## ## Execute docker server in the docker domain. ## ## ## ## Domain allowed to transition. ## ## # interface(`docker_systemctl',` gen_require(` type docker_t; type docker_unit_file_t; ') systemd_exec_systemctl($1) init_reload_services($1) systemd_read_fifo_file_passwd_run($1) allow $1 docker_unit_file_t:file read_file_perms; allow $1 docker_unit_file_t:service manage_service_perms; ps_process_pattern($1, docker_t) ') ######################################## ## ## Read and write docker shared memory. ## ## ## ## Domain allowed access. ## ## # interface(`docker_rw_sem',` gen_require(` type docker_t; ') allow $1 docker_t:sem rw_sem_perms; ') ####################################### ## ## Read and write the docker pty type. ## ## ## ## Domain allowed access. ## ## # interface(`docker_use_ptys',` gen_require(` type docker_devpts_t; ') allow $1 docker_devpts_t:chr_file rw_term_perms; ') ####################################### ## ## Allow domain to create docker content ## ## ## ## Domain allowed access. ## ## # interface(`docker_filetrans_named_content',` gen_require(` type docker_var_lib_t; type docker_share_t; type docker_log_t; type docker_var_run_t; type docker_home_t; ') files_pid_filetrans($1, docker_var_run_t, file, "docker.pid") files_pid_filetrans($1, docker_var_run_t, sock_file, "docker.sock") files_pid_filetrans($1, docker_var_run_t, dir, "docker-client") files_var_lib_filetrans($1, docker_var_lib_t, dir, "docker") filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "config.env") filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hosts") filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hostname") filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "resolv.conf") filetrans_pattern($1, docker_var_lib_t, docker_share_t, dir, "init") userdom_admin_home_dir_filetrans($1, docker_home_t, dir, ".docker") ') ######################################## ## ## Connect to docker over a unix stream socket. ## ## ## ## Domain allowed access. ## ## # interface(`docker_stream_connect',` gen_require(` type docker_t, docker_var_run_t; ') files_search_pids($1) stream_connect_pattern($1, docker_var_run_t, docker_var_run_t, docker_t) ') ######################################## ## ## Connect to SPC containers over a unix stream socket. ## ## ## ## Domain allowed access. ## ## # interface(`docker_spc_stream_connect',` gen_require(` type spc_t, spc_var_run_t; ') files_search_pids($1) files_write_all_pid_sockets($1) allow $1 spc_t:unix_stream_socket connectto; ') ######################################## ## ## All of the rules required to administrate ## an docker environment ## ## ## ## Domain allowed access. ## ## # interface(`docker_admin',` gen_require(` type docker_t; type docker_var_lib_t, docker_var_run_t; type docker_unit_file_t; type docker_lock_t; type docker_log_t; type docker_config_t; ') allow $1 docker_t:process { ptrace signal_perms }; ps_process_pattern($1, docker_t) admin_pattern($1, docker_config_t) files_search_var_lib($1) admin_pattern($1, docker_var_lib_t) files_search_pids($1) admin_pattern($1, docker_var_run_t) files_search_locks($1) admin_pattern($1, docker_lock_t) logging_search_logs($1) admin_pattern($1, docker_log_t) docker_systemctl($1) admin_pattern($1, docker_unit_file_t) allow $1 docker_unit_file_t:service all_service_perms; optional_policy(` systemd_passwd_agent_exec($1) systemd_read_fifo_file_passwd_run($1) ') ') interface(`domain_stub_named_filetrans_domain',` gen_require(` attribute named_filetrans_domain; ') ') interface(`lvm_stub',` gen_require(` type lvm_t; ') ') interface(`staff_stub',` gen_require(` type staff_t; ') ') interface(`virt_stub_svirt_sandbox_domain',` gen_require(` attribute svirt_sandbox_domain; ') ') interface(`virt_stub_svirt_sandbox_file',` gen_require(` type svirt_sandbox_file_t; ') ') interface(`fs_dontaudit_remount_tmpfs',` gen_require(` type tmpfs_t; ') dontaudit $1 tmpfs_t:filesystem remount; ') interface(`dev_dontaudit_list_all_dev_nodes',` gen_require(` type device_t; ') dontaudit $1 device_t:dir list_dir_perms; ') interface(`kernel_unlabeled_entry_type',` gen_require(` type unlabeled_t; ') domain_entry_file($1, unlabeled_t) ') interface(`kernel_unlabeled_domtrans',` gen_require(` type unlabeled_t; ') read_lnk_files_pattern($1, unlabeled_t, unlabeled_t) domain_transition_pattern($1, unlabeled_t, $2) type_transition $1 unlabeled_t:process $2; ') interface(`files_write_all_pid_sockets',` gen_require(` attribute pidfile; ') allow $1 pidfile:sock_file write_sock_file_perms; ') interface(`dev_dontaudit_mounton_sysfs',` gen_require(` type sysfs_t; ') dontaudit $1 sysfs_t:dir mounton; ') docker-1.10.3/contrib/docker-engine-selinux/docker.te000066400000000000000000000273121267010174400225120ustar00rootroot00000000000000policy_module(docker, 1.0.0) ######################################## # # Declarations # ## ##

## Allow sandbox containers manage fuse files ##

##
gen_tunable(virt_sandbox_use_fusefs, false) ## ##

## Determine whether docker can ## connect to all TCP ports. ##

##
gen_tunable(docker_connect_any, false) type docker_t; type docker_exec_t; init_daemon_domain(docker_t, docker_exec_t) domain_subj_id_change_exemption(docker_t) domain_role_change_exemption(docker_t) type spc_t; domain_type(spc_t) role system_r types spc_t; type spc_var_run_t; files_pid_file(spc_var_run_t) type docker_var_lib_t; files_type(docker_var_lib_t) type docker_home_t; userdom_user_home_content(docker_home_t) type docker_config_t; files_config_file(docker_config_t) type docker_lock_t; files_lock_file(docker_lock_t) type docker_log_t; logging_log_file(docker_log_t) type docker_tmp_t; files_tmp_file(docker_tmp_t) type docker_tmpfs_t; files_tmpfs_file(docker_tmpfs_t) type docker_var_run_t; files_pid_file(docker_var_run_t) type docker_unit_file_t; systemd_unit_file(docker_unit_file_t) type docker_devpts_t; term_pty(docker_devpts_t) type docker_share_t; files_type(docker_share_t) ######################################## # # docker local policy # allow docker_t self:capability { chown kill fowner fsetid mknod net_admin net_bind_service net_raw setfcap }; allow docker_t self:tun_socket relabelto; allow docker_t self:process { getattr signal_perms setrlimit setfscreate }; allow docker_t self:fifo_file rw_fifo_file_perms; allow docker_t self:unix_stream_socket create_stream_socket_perms; allow docker_t self:tcp_socket create_stream_socket_perms; allow docker_t self:udp_socket create_socket_perms; allow docker_t self:capability2 block_suspend; manage_files_pattern(docker_t, docker_home_t, docker_home_t) manage_dirs_pattern(docker_t, docker_home_t, docker_home_t) manage_lnk_files_pattern(docker_t, docker_home_t, docker_home_t) userdom_admin_home_dir_filetrans(docker_t, docker_home_t, dir, ".docker") manage_dirs_pattern(docker_t, docker_config_t, docker_config_t) manage_files_pattern(docker_t, docker_config_t, docker_config_t) files_etc_filetrans(docker_t, docker_config_t, dir, "docker") manage_dirs_pattern(docker_t, docker_lock_t, docker_lock_t) manage_files_pattern(docker_t, docker_lock_t, docker_lock_t) manage_dirs_pattern(docker_t, docker_log_t, docker_log_t) manage_files_pattern(docker_t, docker_log_t, docker_log_t) manage_lnk_files_pattern(docker_t, docker_log_t, docker_log_t) logging_log_filetrans(docker_t, docker_log_t, { dir file lnk_file }) allow docker_t docker_log_t:dir_file_class_set { relabelfrom relabelto }; manage_dirs_pattern(docker_t, docker_tmp_t, docker_tmp_t) manage_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) manage_lnk_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) files_tmp_filetrans(docker_t, docker_tmp_t, { dir file lnk_file }) manage_dirs_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) manage_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) manage_lnk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) manage_fifo_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) manage_chr_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) manage_blk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) allow docker_t docker_tmpfs_t:dir relabelfrom; can_exec(docker_t, docker_tmpfs_t) fs_tmpfs_filetrans(docker_t, docker_tmpfs_t, { dir file }) allow docker_t docker_tmpfs_t:chr_file mounton; manage_dirs_pattern(docker_t, docker_share_t, docker_share_t) manage_files_pattern(docker_t, docker_share_t, docker_share_t) manage_lnk_files_pattern(docker_t, docker_share_t, docker_share_t) allow docker_t docker_share_t:dir_file_class_set { relabelfrom relabelto }; can_exec(docker_t, docker_share_t) #docker_filetrans_named_content(docker_t) manage_dirs_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) manage_chr_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) manage_blk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) manage_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) manage_lnk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) allow docker_t docker_var_lib_t:dir_file_class_set { relabelfrom relabelto }; files_var_lib_filetrans(docker_t, docker_var_lib_t, { dir file lnk_file }) manage_dirs_pattern(docker_t, docker_var_run_t, docker_var_run_t) manage_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) manage_sock_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) manage_lnk_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) files_pid_filetrans(docker_t, docker_var_run_t, { dir file lnk_file sock_file }) allow docker_t docker_devpts_t:chr_file { relabelfrom rw_chr_file_perms setattr_chr_file_perms }; term_create_pty(docker_t, docker_devpts_t) kernel_read_system_state(docker_t) kernel_read_network_state(docker_t) kernel_read_all_sysctls(docker_t) kernel_rw_net_sysctls(docker_t) kernel_setsched(docker_t) kernel_read_all_proc(docker_t) domain_use_interactive_fds(docker_t) domain_dontaudit_read_all_domains_state(docker_t) corecmd_exec_bin(docker_t) corecmd_exec_shell(docker_t) corenet_tcp_bind_generic_node(docker_t) corenet_tcp_sendrecv_generic_if(docker_t) corenet_tcp_sendrecv_generic_node(docker_t) corenet_tcp_sendrecv_generic_port(docker_t) corenet_tcp_bind_all_ports(docker_t) corenet_tcp_connect_http_port(docker_t) corenet_tcp_connect_commplex_main_port(docker_t) corenet_udp_sendrecv_generic_if(docker_t) corenet_udp_sendrecv_generic_node(docker_t) corenet_udp_sendrecv_all_ports(docker_t) corenet_udp_bind_generic_node(docker_t) corenet_udp_bind_all_ports(docker_t) files_read_config_files(docker_t) files_dontaudit_getattr_all_dirs(docker_t) files_dontaudit_getattr_all_files(docker_t) fs_read_cgroup_files(docker_t) fs_read_tmpfs_symlinks(docker_t) fs_search_all(docker_t) fs_getattr_all_fs(docker_t) storage_raw_rw_fixed_disk(docker_t) auth_use_nsswitch(docker_t) auth_dontaudit_getattr_shadow(docker_t) init_read_state(docker_t) init_status(docker_t) logging_send_audit_msgs(docker_t) logging_send_syslog_msg(docker_t) miscfiles_read_localization(docker_t) mount_domtrans(docker_t) seutil_read_default_contexts(docker_t) seutil_read_config(docker_t) sysnet_dns_name_resolve(docker_t) sysnet_exec_ifconfig(docker_t) optional_policy(` rpm_exec(docker_t) rpm_read_db(docker_t) rpm_exec(docker_t) ') optional_policy(` fstools_domtrans(docker_t) ') optional_policy(` iptables_domtrans(docker_t) ') optional_policy(` openvswitch_stream_connect(docker_t) ') allow docker_t self:capability { dac_override setgid setpcap setuid sys_admin sys_boot sys_chroot sys_ptrace }; allow docker_t self:process { getcap setcap setexec setpgid setsched signal_perms }; allow docker_t self:netlink_route_socket rw_netlink_socket_perms;; allow docker_t self:netlink_audit_socket create_netlink_socket_perms; allow docker_t self:unix_dgram_socket { create_socket_perms sendto }; allow docker_t self:unix_stream_socket { create_stream_socket_perms connectto }; allow docker_t docker_var_lib_t:dir mounton; allow docker_t docker_var_lib_t:chr_file mounton; can_exec(docker_t, docker_var_lib_t) kernel_dontaudit_setsched(docker_t) kernel_get_sysvipc_info(docker_t) kernel_request_load_module(docker_t) kernel_mounton_messages(docker_t) kernel_mounton_all_proc(docker_t) kernel_mounton_all_sysctls(docker_t) kernel_unlabeled_entry_type(spc_t) kernel_unlabeled_domtrans(docker_t, spc_t) dev_getattr_all(docker_t) dev_getattr_sysfs_fs(docker_t) dev_read_urand(docker_t) dev_read_lvm_control(docker_t) dev_rw_sysfs(docker_t) dev_rw_loop_control(docker_t) dev_rw_lvm_control(docker_t) files_getattr_isid_type_dirs(docker_t) files_manage_isid_type_dirs(docker_t) files_manage_isid_type_files(docker_t) files_manage_isid_type_symlinks(docker_t) files_manage_isid_type_chr_files(docker_t) files_manage_isid_type_blk_files(docker_t) files_exec_isid_files(docker_t) files_mounton_isid(docker_t) files_mounton_non_security(docker_t) files_mounton_isid_type_chr_file(docker_t) fs_mount_all_fs(docker_t) fs_unmount_all_fs(docker_t) fs_remount_all_fs(docker_t) files_mounton_isid(docker_t) fs_manage_cgroup_dirs(docker_t) fs_manage_cgroup_files(docker_t) fs_relabelfrom_xattr_fs(docker_t) fs_relabelfrom_tmpfs(docker_t) fs_read_tmpfs_symlinks(docker_t) fs_list_hugetlbfs(docker_t) term_use_generic_ptys(docker_t) term_use_ptmx(docker_t) term_getattr_pty_fs(docker_t) term_relabel_pty_fs(docker_t) term_mounton_unallocated_ttys(docker_t) modutils_domtrans_insmod(docker_t) systemd_status_all_unit_files(docker_t) systemd_start_systemd_services(docker_t) userdom_stream_connect(docker_t) userdom_search_user_home_content(docker_t) userdom_read_all_users_state(docker_t) userdom_relabel_user_home_files(docker_t) userdom_relabel_user_tmp_files(docker_t) userdom_relabel_user_tmp_dirs(docker_t) optional_policy(` gpm_getattr_gpmctl(docker_t) ') optional_policy(` dbus_system_bus_client(docker_t) init_dbus_chat(docker_t) init_start_transient_unit(docker_t) optional_policy(` systemd_dbus_chat_logind(docker_t) ') optional_policy(` firewalld_dbus_chat(docker_t) ') ') optional_policy(` udev_read_db(docker_t) ') optional_policy(` virt_read_config(docker_t) virt_exec(docker_t) virt_stream_connect(docker_t) virt_stream_connect_sandbox(docker_t) virt_exec_sandbox_files(docker_t) virt_manage_sandbox_files(docker_t) virt_relabel_sandbox_filesystem(docker_t) virt_transition_svirt_sandbox(docker_t, system_r) virt_mounton_sandbox_file(docker_t) # virt_attach_sandbox_tun_iface(docker_t) allow docker_t svirt_sandbox_domain:tun_socket relabelfrom; ') tunable_policy(`docker_connect_any',` corenet_tcp_connect_all_ports(docker_t) corenet_sendrecv_all_packets(docker_t) corenet_tcp_sendrecv_all_ports(docker_t) ') ######################################## # # spc local policy # domain_entry_file(spc_t, docker_share_t) domain_entry_file(spc_t, docker_var_lib_t) role system_r types spc_t; domain_entry_file(spc_t, docker_share_t) domain_entry_file(spc_t, docker_var_lib_t) domtrans_pattern(docker_t, docker_share_t, spc_t) domtrans_pattern(docker_t, docker_var_lib_t, spc_t) allow docker_t spc_t:process { setsched signal_perms }; ps_process_pattern(docker_t, spc_t) allow docker_t spc_t:socket_class_set { relabelto relabelfrom }; optional_policy(` dbus_chat_system_bus(spc_t) ') optional_policy(` unconfined_domain_noaudit(spc_t) ') optional_policy(` unconfined_domain(docker_t) ') optional_policy(` virt_transition_svirt_sandbox(spc_t, system_r) ') ######################################## # # docker upstream policy # optional_policy(` # domain_stub_named_filetrans_domain() gen_require(` attribute named_filetrans_domain; ') docker_filetrans_named_content(named_filetrans_domain) ') optional_policy(` lvm_stub() docker_rw_sem(lvm_t) ') optional_policy(` staff_stub() docker_stream_connect(staff_t) docker_exec(staff_t) ') optional_policy(` virt_stub_svirt_sandbox_domain() virt_stub_svirt_sandbox_file() allow svirt_sandbox_domain self:netlink_kobject_uevent_socket create_socket_perms; docker_read_share_files(svirt_sandbox_domain) docker_lib_filetrans(svirt_sandbox_domain,svirt_sandbox_file_t, sock_file) docker_use_ptys(svirt_sandbox_domain) docker_spc_stream_connect(svirt_sandbox_domain) fs_list_tmpfs(svirt_sandbox_domain) fs_rw_hugetlbfs_files(svirt_sandbox_domain) fs_dontaudit_remount_tmpfs(svirt_sandbox_domain) dev_dontaudit_mounton_sysfs(svirt_sandbox_domain) tunable_policy(`virt_sandbox_use_fusefs',` fs_manage_fusefs_dirs(svirt_sandbox_domain) fs_manage_fusefs_files(svirt_sandbox_domain) fs_manage_fusefs_symlinks(svirt_sandbox_domain) ') gen_require(` attribute domain; ') dontaudit svirt_sandbox_domain domain:key {search link}; ') docker-1.10.3/contrib/docker-engine-selinux/docker_selinux.8.gz000066400000000000000000000054371267010174400244430ustar00rootroot00000000000000Udocker_selinux.8Z[۶~>_kAήH>khZmb%R%)8?->t3o.Ό4Z}"d䁩-,,|/ß=?@XN?# YȌ'G'2OLQLr?$K KIKF*bv̋$ Ӛi'f-V (ϙCMƞ:c$"F D d6U3{цGnv !"P&KPAKߐ<=C&U)[>x%o>~H9¬៸WI3Lޏ`8D"ch ٟdXQ"g|umq;GX-֌0arϔpqYNDZsQE'C 3*7bJ#f,p/NArfv2G,UGEo9ֶnUčS+l. &hMp 9 ic4IQ>8742"YLTgD56l)sE֥{L~&of;9P`6<Ȗ aȝyR):jMQ-Qm6*99u4Db9˚ކ|(YK lhQbk=2ΆN, UΰW̥G ̠Ed"&?)K, "S"r0 1yzZ#r5/4ʨ 4t͐ ]RE6;KUG. m֏^<{NP2'YJ Gـu 匞4Lµ ؀d~ ׫9%,)f;lGsr\qZEr։g3aOz.S҃qǞ;߁KK n"&)관ԧzcO=E 9gX3Cӱ  a^9|ٳR?2=Q4>D[ۭ=@&as: iT\ }]Eڂ+e:Q"|bEx^Rء%H&iX`#C^w#R]Bӆ3:T0x۟pސR!I m&4~խ5bUBb[(@lY3%A >릯0鉈^Ԯhϖg rk6_oNZ }]pj8Q@(xTE' yv2_ޏ ycu pn釖If÷u&;3G4QV@_)9/Z&yjqVc촺1?O"頄Fq;PМuq9308((hkCtsAUڢ{LX˧dSOmNq:tre[7O3ahJ;U"1&;v.-&^ /dev/null; then echo >&2 'error: "qemu-nbd" not found!' exit 1 fi usage() { echo "Convert disk image to docker image" echo "" echo "usage: $0 image-name disk-image-file [ base-image ]" echo " ie: $0 cirros:0.3.3 cirros-0.3.3-x86_64-disk.img" echo " $0 ubuntu:cloud ubuntu-14.04-server-cloudimg-amd64-disk1.img ubuntu:14.04" } if [ "$#" -lt 2 ]; then usage exit 1 fi CURDIR=$(pwd) image_name="${1%:*}" image_tag="${1#*:}" if [ "$image_tag" == "$1" ]; then image_tag="latest" fi disk_image_file="$2" docker_base_image="$3" block_device=/dev/nbd0 builddir=$(mktemp -d) cleanup() { umount "$builddir/disk_image" || true umount "$builddir/workdir" || true qemu-nbd -d $block_device &> /dev/null || true rm -rf $builddir } trap cleanup EXIT # Mount disk image modprobe nbd max_part=63 qemu-nbd -rc ${block_device} -P 1 "$disk_image_file" mkdir "$builddir/disk_image" mount -o ro ${block_device} "$builddir/disk_image" mkdir "$builddir/workdir" mkdir "$builddir/diff" base_image_mounts="" # Unpack base image if [ -n "$docker_base_image" ]; then mkdir -p "$builddir/base" docker pull "$docker_base_image" docker save "$docker_base_image" | tar -xC "$builddir/base" image_id=$(docker inspect -f "{{.Id}}" "$docker_base_image") while [ -n "$image_id" ]; do mkdir -p "$builddir/base/$image_id/layer" tar -xf "$builddir/base/$image_id/layer.tar" -C "$builddir/base/$image_id/layer" base_image_mounts="${base_image_mounts}:$builddir/base/$image_id/layer=ro+wh" image_id=$(docker inspect -f "{{.Parent}}" "$image_id") done fi # Mount work directory mount -t aufs -o "br=$builddir/diff=rw${base_image_mounts},dio,xino=/dev/shm/aufs.xino" none "$builddir/workdir" # Update files cd $builddir LC_ALL=C diff -rq disk_image workdir \ | sed -re "s|Only in workdir(.*?): |DEL \1/|g;s|Only in disk_image(.*?): |ADD \1/|g;s|Files disk_image/(.+) and workdir/(.+) differ|UPDATE /\1|g" \ | while read action entry; do case "$action" in ADD|UPDATE) cp -a "disk_image$entry" "workdir$entry" ;; DEL) rm -rf "workdir$entry" ;; *) echo "Error: unknown diff line: $action $entry" >&2 ;; esac done # Pack new image new_image_id="$(for i in $(seq 1 32); do printf "%02x" $(($RANDOM % 256)); done)" mkdir -p $builddir/result/$new_image_id cd diff tar -cf $builddir/result/$new_image_id/layer.tar * echo "1.0" > $builddir/result/$new_image_id/VERSION cat > $builddir/result/$new_image_id/json <<-EOS { "docker_version": "1.4.1" , "id": "$new_image_id" , "created": "$(date -u +%Y-%m-%dT%H:%M:%S.%NZ)" EOS if [ -n "$docker_base_image" ]; then image_id=$(docker inspect -f "{{.Id}}" "$docker_base_image") echo ", \"parent\": \"$image_id\"" >> $builddir/result/$new_image_id/json fi echo "}" >> $builddir/result/$new_image_id/json echo "{\"$image_name\":{\"$image_tag\":\"$new_image_id\"}}" > $builddir/result/repositories cd $builddir/result # mkdir -p $CURDIR/$image_name # cp -r * $CURDIR/$image_name tar -c * | docker load docker-1.10.3/contrib/download-frozen-image-v1.sh000077500000000000000000000074321267010174400215660ustar00rootroot00000000000000#!/bin/bash set -e # hello-world latest ef872312fe1b 3 months ago 910 B # hello-world latest ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9 3 months ago 910 B # debian latest f6fab3b798be 10 weeks ago 85.1 MB # debian latest f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd 10 weeks ago 85.1 MB if ! command -v curl &> /dev/null; then echo >&2 'error: "curl" not found!' exit 1 fi usage() { echo "usage: $0 dir image[:tag][@image-id] ..." echo " ie: $0 /tmp/hello-world hello-world" echo " $0 /tmp/debian-jessie debian:jessie" echo " $0 /tmp/old-hello-world hello-world@ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9" echo " $0 /tmp/old-debian debian:latest@f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd" [ -z "$1" ] || exit "$1" } dir="$1" # dir for building tar in shift || usage 1 >&2 [ $# -gt 0 -a "$dir" ] || usage 2 >&2 mkdir -p "$dir" # hacky workarounds for Bash 3 support (no associative arrays) images=() rm -f "$dir"/tags-*.tmp # repositories[busybox]='"latest": "...", "ubuntu-14.04": "..."' while [ $# -gt 0 ]; do imageTag="$1" shift image="${imageTag%%[:@]*}" tag="${imageTag#*:}" imageId="${tag##*@}" [ "$imageId" != "$tag" ] || imageId= [ "$tag" != "$imageTag" ] || tag='latest' tag="${tag%@*}" imageFile="${image//\//_}" # "/" can't be in filenames :) token="$(curl -sSL -o /dev/null -D- -H 'X-Docker-Token: true' "https://index.docker.io/v1/repositories/$image/images" | tr -d '\r' | awk -F ': *' '$1 == "X-Docker-Token" { print $2 }')" if [ -z "$imageId" ]; then imageId="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/repositories/$image/tags/$tag")" imageId="${imageId//\"/}" fi ancestryJson="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/ancestry")" if [ "${ancestryJson:0:1}" != '[' ]; then echo >&2 "error: /v1/images/$imageId/ancestry returned something unexpected:" echo >&2 " $ancestryJson" exit 1 fi IFS=',' ancestry=( ${ancestryJson//[\[\] \"]/} ) unset IFS if [ -s "$dir/tags-$imageFile.tmp" ]; then echo -n ', ' >> "$dir/tags-$imageFile.tmp" else images=( "${images[@]}" "$image" ) fi echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$imageFile.tmp" echo "Downloading '$imageTag' (${#ancestry[@]} layers)..." for imageId in "${ancestry[@]}"; do mkdir -p "$dir/$imageId" echo '1.0' > "$dir/$imageId/VERSION" curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/json" -o "$dir/$imageId/json" # TODO figure out why "-C -" doesn't work here # "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume." # "HTTP/1.1 416 Requested Range Not Satisfiable" if [ -f "$dir/$imageId/layer.tar" ]; then # TODO hackpatch for no -C support :'( echo "skipping existing ${imageId:0:12}" continue fi curl -SL --progress -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/layer" -o "$dir/$imageId/layer.tar" # -C - done echo done echo -n '{' > "$dir/repositories" firstImage=1 for image in "${images[@]}"; do imageFile="${image//\//_}" # "/" can't be in filenames :) [ "$firstImage" ] || echo -n ',' >> "$dir/repositories" firstImage= echo -n $'\n\t' >> "$dir/repositories" echo -n '"'"$image"'": { '"$(cat "$dir/tags-$imageFile.tmp")"' }' >> "$dir/repositories" done echo -n $'\n}\n' >> "$dir/repositories" rm -f "$dir"/tags-*.tmp echo "Download of images into '$dir' complete." echo "Use something like the following to load the result into a Docker daemon:" echo " tar -cC '$dir' . | docker load" docker-1.10.3/contrib/download-frozen-image-v2.sh000077500000000000000000000076311267010174400215700ustar00rootroot00000000000000#!/bin/bash set -e # hello-world latest ef872312fe1b 3 months ago 910 B # hello-world latest ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9 3 months ago 910 B # debian latest f6fab3b798be 10 weeks ago 85.1 MB # debian latest f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd 10 weeks ago 85.1 MB if ! command -v curl &> /dev/null; then echo >&2 'error: "curl" not found!' exit 1 fi usage() { echo "usage: $0 dir image[:tag][@digest] ..." echo " $0 /tmp/old-hello-world hello-world:latest@sha256:8be990ef2aeb16dbcb9271ddfe2610fa6658d13f6dfb8bc72074cc1ca36966a7" [ -z "$1" ] || exit "$1" } dir="$1" # dir for building tar in shift || usage 1 >&2 [ $# -gt 0 -a "$dir" ] || usage 2 >&2 mkdir -p "$dir" # hacky workarounds for Bash 3 support (no associative arrays) images=() rm -f "$dir"/tags-*.tmp # repositories[busybox]='"latest": "...", "ubuntu-14.04": "..."' while [ $# -gt 0 ]; do imageTag="$1" shift image="${imageTag%%[:@]*}" imageTag="${imageTag#*:}" digest="${imageTag##*@}" tag="${imageTag%%@*}" # add prefix library if passed official image if [[ "$image" != *"/"* ]]; then image="library/$image" fi imageFile="${image//\//_}" # "/" can't be in filenames :) token="$(curl -sSL "https://auth.docker.io/token?service=registry.docker.io&scope=repository:$image:pull" | jq --raw-output .token)" manifestJson="$(curl -sSL -H "Authorization: Bearer $token" "https://registry-1.docker.io/v2/$image/manifests/$digest")" if [ "${manifestJson:0:1}" != '{' ]; then echo >&2 "error: /v2/$image/manifests/$digest returned something unexpected:" echo >&2 " $manifestJson" exit 1 fi layersFs=$(echo "$manifestJson" | jq --raw-output '.fsLayers | .[] | .blobSum') IFS=$'\n' # bash v4 on Windows CI requires CRLF separator if [ "$(go env GOHOSTOS)" = 'windows' ]; then major=$(echo ${BASH_VERSION%%[^0.9]} | cut -d. -f1) if [ "$major" -ge 4 ]; then IFS=$'\r\n' fi fi layers=( ${layersFs} ) unset IFS history=$(echo "$manifestJson" | jq '.history | [.[] | .v1Compatibility]') imageId=$(echo "$history" | jq --raw-output .[0] | jq --raw-output .id) if [ -s "$dir/tags-$imageFile.tmp" ]; then echo -n ', ' >> "$dir/tags-$imageFile.tmp" else images=( "${images[@]}" "$image" ) fi echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$imageFile.tmp" echo "Downloading '${image}:${tag}@${digest}' (${#layers[@]} layers)..." for i in "${!layers[@]}"; do imageJson=$(echo "$history" | jq --raw-output .[${i}]) imageId=$(echo "$imageJson" | jq --raw-output .id) imageLayer=${layers[$i]} mkdir -p "$dir/$imageId" echo '1.0' > "$dir/$imageId/VERSION" echo "$imageJson" > "$dir/$imageId/json" # TODO figure out why "-C -" doesn't work here # "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume." # "HTTP/1.1 416 Requested Range Not Satisfiable" if [ -f "$dir/$imageId/layer.tar" ]; then # TODO hackpatch for no -C support :'( echo "skipping existing ${imageId:0:12}" continue fi curl -SL --progress -H "Authorization: Bearer $token" "https://registry-1.docker.io/v2/$image/blobs/$imageLayer" -o "$dir/$imageId/layer.tar" # -C - done echo done echo -n '{' > "$dir/repositories" firstImage=1 for image in "${images[@]}"; do imageFile="${image//\//_}" # "/" can't be in filenames :) image="${image#library\/}" [ "$firstImage" ] || echo -n ',' >> "$dir/repositories" firstImage= echo -n $'\n\t' >> "$dir/repositories" echo -n '"'"$image"'": { '"$(cat "$dir/tags-$imageFile.tmp")"' }' >> "$dir/repositories" done echo -n $'\n}\n' >> "$dir/repositories" rm -f "$dir"/tags-*.tmp echo "Download of images into '$dir' complete." echo "Use something like the following to load the result into a Docker daemon:" echo " tar -cC '$dir' . | docker load" docker-1.10.3/contrib/httpserver/000077500000000000000000000000001267010174400167135ustar00rootroot00000000000000docker-1.10.3/contrib/httpserver/Dockerfile000066400000000000000000000001021267010174400206760ustar00rootroot00000000000000FROM busybox EXPOSE 80/tcp COPY httpserver . CMD ["./httpserver"] docker-1.10.3/contrib/httpserver/server.go000066400000000000000000000002531267010174400205500ustar00rootroot00000000000000package main import ( "log" "net/http" ) func main() { fs := http.FileServer(http.Dir("/static")) http.Handle("/", fs) log.Panic(http.ListenAndServe(":80", nil)) } docker-1.10.3/contrib/init/000077500000000000000000000000001267010174400154505ustar00rootroot00000000000000docker-1.10.3/contrib/init/openrc/000077500000000000000000000000001267010174400167365ustar00rootroot00000000000000docker-1.10.3/contrib/init/openrc/docker.confd000066400000000000000000000005441267010174400212230ustar00rootroot00000000000000# /etc/conf.d/docker: config file for /etc/init.d/docker # where the docker daemon output gets piped #DOCKER_LOGFILE="/var/log/docker.log" # where docker's pid get stored #DOCKER_PIDFILE="/run/docker.pid" # where the docker daemon itself is run from #DOCKER_BINARY="/usr/bin/docker" # any other random options you want to pass to docker DOCKER_OPTS="" docker-1.10.3/contrib/init/openrc/docker.initd000066400000000000000000000010661267010174400212410ustar00rootroot00000000000000#!/sbin/openrc-run # Copyright 1999-2013 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 command="${DOCKER_BINARY:-/usr/bin/docker}" pidfile="${DOCKER_PIDFILE:-/run/${RC_SVCNAME}.pid}" command_args="daemon -p \"${pidfile}\" ${DOCKER_OPTS}" DOCKER_LOGFILE="${DOCKER_LOGFILE:-/var/log/${RC_SVCNAME}.log}" start_stop_daemon_args="--background \ --stderr \"${DOCKER_LOGFILE}\" --stdout \"${DOCKER_LOGFILE}\"" start_pre() { checkpath -f -m 0644 -o root:docker "$DOCKER_LOGFILE" ulimit -n 1048576 ulimit -u 1048576 return 0 } docker-1.10.3/contrib/init/systemd/000077500000000000000000000000001267010174400171405ustar00rootroot00000000000000docker-1.10.3/contrib/init/systemd/REVIEWERS000066400000000000000000000002311267010174400204320ustar00rootroot00000000000000Lokesh Mandvekar (@lsm5) Brandon Philips (@philips) Jessie Frazelle (@jfrazelle) docker-1.10.3/contrib/init/systemd/docker.service000066400000000000000000000005331267010174400217720ustar00rootroot00000000000000[Unit] Description=Docker Application Container Engine Documentation=https://docs.docker.com After=network.target docker.socket Requires=docker.socket [Service] Type=notify ExecStart=/usr/bin/docker daemon -H fd:// MountFlags=slave LimitNOFILE=1048576 LimitNPROC=1048576 LimitCORE=infinity TimeoutStartSec=0 [Install] WantedBy=multi-user.target docker-1.10.3/contrib/init/systemd/docker.socket000066400000000000000000000003051267010174400216170ustar00rootroot00000000000000[Unit] Description=Docker Socket for the API PartOf=docker.service [Socket] ListenStream=/var/run/docker.sock SocketMode=0660 SocketUser=root SocketGroup=docker [Install] WantedBy=sockets.target docker-1.10.3/contrib/init/sysvinit-debian/000077500000000000000000000000001267010174400205605ustar00rootroot00000000000000docker-1.10.3/contrib/init/sysvinit-debian/docker000077500000000000000000000070011267010174400217530ustar00rootroot00000000000000#!/bin/sh set -e ### BEGIN INIT INFO # Provides: docker # Required-Start: $syslog $remote_fs # Required-Stop: $syslog $remote_fs # Should-Start: cgroupfs-mount cgroup-lite # Should-Stop: cgroupfs-mount cgroup-lite # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: Create lightweight, portable, self-sufficient containers. # Description: # Docker is an open-source project to easily create lightweight, portable, # self-sufficient containers from any application. The same container that a # developer builds and tests on a laptop can run at scale, in production, on # VMs, bare metal, OpenStack clusters, public clouds and more. ### END INIT INFO export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin BASE=docker # modify these in /etc/default/$BASE (/etc/default/docker) DOCKER=/usr/bin/$BASE # This is the pid file managed by docker itself DOCKER_PIDFILE=/var/run/$BASE.pid # This is the pid file created/managed by start-stop-daemon DOCKER_SSD_PIDFILE=/var/run/$BASE-ssd.pid DOCKER_LOGFILE=/var/log/$BASE.log DOCKER_OPTS= DOCKER_DESC="Docker" # Get lsb functions . /lib/lsb/init-functions if [ -f /etc/default/$BASE ]; then . /etc/default/$BASE fi # Check docker is present if [ ! -x $DOCKER ]; then log_failure_msg "$DOCKER not present or not executable" exit 1 fi check_init() { # see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it directly) if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then log_failure_msg "$DOCKER_DESC is managed via upstart, try using service $BASE $1" exit 1 fi } fail_unless_root() { if [ "$(id -u)" != '0' ]; then log_failure_msg "$DOCKER_DESC must be run as root" exit 1 fi } cgroupfs_mount() { # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount if grep -v '^#' /etc/fstab | grep -q cgroup \ || [ ! -e /proc/cgroups ] \ || [ ! -d /sys/fs/cgroup ]; then return fi if ! mountpoint -q /sys/fs/cgroup; then mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup fi ( cd /sys/fs/cgroup for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do mkdir -p $sys if ! mountpoint -q $sys; then if ! mount -n -t cgroup -o $sys cgroup $sys; then rmdir $sys || true fi fi done ) } case "$1" in start) check_init fail_unless_root cgroupfs_mount touch "$DOCKER_LOGFILE" chgrp docker "$DOCKER_LOGFILE" ulimit -n 1048576 if [ "$BASH" ]; then ulimit -u 1048576 else ulimit -p 1048576 fi log_begin_msg "Starting $DOCKER_DESC: $BASE" start-stop-daemon --start --background \ --no-close \ --exec "$DOCKER" \ --pidfile "$DOCKER_SSD_PIDFILE" \ --make-pidfile \ -- \ daemon -p "$DOCKER_PIDFILE" \ $DOCKER_OPTS \ >> "$DOCKER_LOGFILE" 2>&1 log_end_msg $? ;; stop) check_init fail_unless_root log_begin_msg "Stopping $DOCKER_DESC: $BASE" start-stop-daemon --stop --pidfile "$DOCKER_SSD_PIDFILE" --retry 10 log_end_msg $? ;; restart) check_init fail_unless_root docker_pid=`cat "$DOCKER_SSD_PIDFILE" 2>/dev/null` [ -n "$docker_pid" ] \ && ps -p $docker_pid > /dev/null 2>&1 \ && $0 stop $0 start ;; force-reload) check_init fail_unless_root $0 restart ;; status) check_init status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKER" "$DOCKER_DESC" ;; *) echo "Usage: service docker {start|stop|restart|status}" exit 1 ;; esac docker-1.10.3/contrib/init/sysvinit-debian/docker.default000066400000000000000000000012101267010174400233670ustar00rootroot00000000000000# Docker Upstart and SysVinit configuration file # # THIS FILE DOES NOT APPLY TO SYSTEMD # # Please see the documentation for "systemd drop-ins": # https://docs.docker.com/engine/articles/systemd/ # # Customize location of Docker binary (especially for development testing). #DOCKER="/usr/local/bin/docker" # Use DOCKER_OPTS to modify the daemon startup options. #DOCKER_OPTS="--dns 8.8.8.8 --dns 8.8.4.4" # If you need Docker to use an HTTP proxy, it can also be specified here. #export http_proxy="http://127.0.0.1:3128/" # This is also a handy place to tweak where Docker's temporary files go. #export TMPDIR="/mnt/bigdrive/docker-tmp" docker-1.10.3/contrib/init/sysvinit-redhat/000077500000000000000000000000001267010174400206055ustar00rootroot00000000000000docker-1.10.3/contrib/init/sysvinit-redhat/docker000077500000000000000000000054321267010174400220060ustar00rootroot00000000000000#!/bin/sh # # /etc/rc.d/init.d/docker # # Daemon for docker.com # # chkconfig: 2345 95 95 # description: Daemon for docker.com ### BEGIN INIT INFO # Provides: docker # Required-Start: $network cgconfig # Required-Stop: # Should-Start: # Should-Stop: # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: start and stop docker # Description: Daemon for docker.com ### END INIT INFO # Source function library. . /etc/rc.d/init.d/functions prog="docker" unshare=/usr/bin/unshare exec="/usr/bin/$prog" pidfile="/var/run/$prog.pid" lockfile="/var/lock/subsys/$prog" logfile="/var/log/$prog" [ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog prestart() { service cgconfig status > /dev/null if [[ $? != 0 ]]; then service cgconfig start fi } start() { if [ ! -x $exec ]; then if [ ! -e $exec ]; then echo "Docker executable $exec not found" else echo "You do not have permission to execute the Docker executable $exec" fi exit 5 fi check_for_cleanup if ! [ -f $pidfile ]; then prestart printf "Starting $prog:\t" echo "\n$(date)\n" >> $logfile "$unshare" -m -- $exec daemon $other_args >> $logfile 2>&1 & pid=$! touch $lockfile # wait up to 10 seconds for the pidfile to exist. see # https://github.com/docker/docker/issues/5359 tries=0 while [ ! -f $pidfile -a $tries -lt 10 ]; do sleep 1 tries=$((tries + 1)) echo -n '.' done if [ ! -f $pidfile ]; then failure echo exit 1 fi success echo else failure echo printf "$pidfile still exists...\n" exit 7 fi } stop() { echo -n $"Stopping $prog: " killproc -p $pidfile -d 300 $prog retval=$? echo [ $retval -eq 0 ] && rm -f $lockfile return $retval } restart() { stop start } reload() { restart } force_reload() { restart } rh_status() { status -p $pidfile $prog } rh_status_q() { rh_status >/dev/null 2>&1 } check_for_cleanup() { if [ -f ${pidfile} ]; then /bin/ps -fp $(cat ${pidfile}) > /dev/null || rm ${pidfile} fi } case "$1" in start) rh_status_q && exit 0 $1 ;; stop) rh_status_q || exit 0 $1 ;; restart) $1 ;; reload) rh_status_q || exit 7 $1 ;; force-reload) force_reload ;; status) rh_status ;; condrestart|try-restart) rh_status_q || exit 0 restart ;; *) echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" exit 2 esac exit $? docker-1.10.3/contrib/init/sysvinit-redhat/docker.sysconfig000066400000000000000000000003131267010174400237770ustar00rootroot00000000000000# /etc/sysconfig/docker # # Other arguments to pass to the docker daemon process # These will be parsed by the sysv initscript and appended # to the arguments list passed to docker daemon other_args="" docker-1.10.3/contrib/init/upstart/000077500000000000000000000000001267010174400171525ustar00rootroot00000000000000docker-1.10.3/contrib/init/upstart/REVIEWERS000066400000000000000000000001341267010174400204460ustar00rootroot00000000000000Tianon Gravi (@tianon) Jessie Frazelle (@jfrazelle) docker-1.10.3/contrib/init/upstart/docker.conf000066400000000000000000000027661267010174400213030ustar00rootroot00000000000000description "Docker daemon" start on (filesystem and net-device-up IFACE!=lo) stop on runlevel [!2345] limit nofile 524288 1048576 limit nproc 524288 1048576 respawn kill timeout 20 pre-start script # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount if grep -v '^#' /etc/fstab | grep -q cgroup \ || [ ! -e /proc/cgroups ] \ || [ ! -d /sys/fs/cgroup ]; then exit 0 fi if ! mountpoint -q /sys/fs/cgroup; then mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup fi ( cd /sys/fs/cgroup for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do mkdir -p $sys if ! mountpoint -q $sys; then if ! mount -n -t cgroup -o $sys cgroup $sys; then rmdir $sys || true fi fi done ) end script script # modify these in /etc/default/$UPSTART_JOB (/etc/default/docker) DOCKER=/usr/bin/$UPSTART_JOB DOCKER_OPTS= if [ -f /etc/default/$UPSTART_JOB ]; then . /etc/default/$UPSTART_JOB fi exec "$DOCKER" daemon $DOCKER_OPTS end script # Don't emit "started" event until docker.sock is ready. # See https://github.com/docker/docker/issues/6647 post-start script DOCKER_OPTS= if [ -f /etc/default/$UPSTART_JOB ]; then . /etc/default/$UPSTART_JOB fi if ! printf "%s" "$DOCKER_OPTS" | grep -qE -e '-H|--host'; then while ! [ -e /var/run/docker.sock ]; do initctl status $UPSTART_JOB | grep -qE "(stop|respawn)/" && exit 1 echo "Waiting for /var/run/docker.sock" sleep 0.1 done echo "/var/run/docker.sock is up" fi end script docker-1.10.3/contrib/mkimage-alpine.sh000077500000000000000000000030751267010174400177310ustar00rootroot00000000000000#!/bin/sh set -e [ $(id -u) -eq 0 ] || { printf >&2 '%s requires root\n' "$0" exit 1 } usage() { printf >&2 '%s: [-r release] [-m mirror] [-s] [-c additional repository]\n' "$0" exit 1 } tmp() { TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-XXXXXXXXXX) ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-rootfs-XXXXXXXXXX) trap "rm -rf $TMP $ROOTFS" EXIT TERM INT } apkv() { curl -sSL $MAINREPO/$ARCH/APKINDEX.tar.gz | tar -Oxz | grep --text '^P:apk-tools-static$' -A1 | tail -n1 | cut -d: -f2 } getapk() { curl -sSL $MAINREPO/$ARCH/apk-tools-static-$(apkv).apk | tar -xz -C $TMP sbin/apk.static } mkbase() { $TMP/sbin/apk.static --repository $MAINREPO --update-cache --allow-untrusted \ --root $ROOTFS --initdb add alpine-base } conf() { printf '%s\n' $MAINREPO > $ROOTFS/etc/apk/repositories printf '%s\n' $ADDITIONALREPO >> $ROOTFS/etc/apk/repositories } pack() { local id id=$(tar --numeric-owner -C $ROOTFS -c . | docker import - alpine:$REL) docker tag $id alpine:latest docker run -i -t --rm alpine printf 'alpine:%s with id=%s created!\n' $REL $id } save() { [ $SAVE -eq 1 ] || return tar --numeric-owner -C $ROOTFS -c . | xz > rootfs.tar.xz } while getopts "hr:m:s" opt; do case $opt in r) REL=$OPTARG ;; m) MIRROR=$OPTARG ;; s) SAVE=1 ;; c) ADDITIONALREPO=community ;; *) usage ;; esac done REL=${REL:-edge} MIRROR=${MIRROR:-http://nl.alpinelinux.org/alpine} SAVE=${SAVE:-0} MAINREPO=$MIRROR/$REL/main ADDITIONALREPO=$MIRROR/$REL/community ARCH=${ARCH:-$(uname -m)} tmp getapk mkbase conf pack save docker-1.10.3/contrib/mkimage-arch-pacman.conf000066400000000000000000000052161267010174400211420ustar00rootroot00000000000000# # /etc/pacman.conf # # See the pacman.conf(5) manpage for option and repository directives # # GENERAL OPTIONS # [options] # The following paths are commented out with their default values listed. # If you wish to use different paths, uncomment and update the paths. #RootDir = / #DBPath = /var/lib/pacman/ #CacheDir = /var/cache/pacman/pkg/ #LogFile = /var/log/pacman.log #GPGDir = /etc/pacman.d/gnupg/ HoldPkg = pacman glibc #XferCommand = /usr/bin/curl -C - -f %u > %o #XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u #CleanMethod = KeepInstalled #UseDelta = 0.7 Architecture = auto # Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup #IgnorePkg = #IgnoreGroup = #NoUpgrade = #NoExtract = # Misc options #UseSyslog #Color #TotalDownload # We cannot check disk space from within a chroot environment #CheckSpace #VerbosePkgLists # By default, pacman accepts packages signed by keys that its local keyring # trusts (see pacman-key and its man page), as well as unsigned packages. SigLevel = Required DatabaseOptional LocalFileSigLevel = Optional #RemoteFileSigLevel = Required # NOTE: You must run `pacman-key --init` before first using pacman; the local # keyring can then be populated with the keys of all official Arch Linux # packagers with `pacman-key --populate archlinux`. # # REPOSITORIES # - can be defined here or included from another file # - pacman will search repositories in the order defined here # - local/custom mirrors can be added here or in separate files # - repositories listed first will take precedence when packages # have identical names, regardless of version number # - URLs will have $repo replaced by the name of the current repo # - URLs will have $arch replaced by the name of the architecture # # Repository entries are of the format: # [repo-name] # Server = ServerName # Include = IncludePath # # The header [repo-name] is crucial - it must be present and # uncommented to enable the repo. # # The testing repositories are disabled by default. To enable, uncomment the # repo name header and Include lines. You can add preferred servers immediately # after the header, and they will be used before the default mirrors. #[testing] #Include = /etc/pacman.d/mirrorlist [core] Include = /etc/pacman.d/mirrorlist [extra] Include = /etc/pacman.d/mirrorlist #[community-testing] #Include = /etc/pacman.d/mirrorlist [community] Include = /etc/pacman.d/mirrorlist # An example of a custom package repository. See the pacman manpage for # tips on creating your own repositories. #[custom] #SigLevel = Optional TrustAll #Server = file:///home/custompkgs docker-1.10.3/contrib/mkimage-arch.sh000077500000000000000000000061031267010174400173710ustar00rootroot00000000000000#!/usr/bin/env bash # Generate a minimal filesystem for archlinux and load it into the local # docker as "archlinux" # requires root set -e hash pacstrap &>/dev/null || { echo "Could not find pacstrap. Run pacman -S arch-install-scripts" exit 1 } hash expect &>/dev/null || { echo "Could not find expect. Run pacman -S expect" exit 1 } export LANG="C.UTF-8" ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-archlinux-XXXXXXXXXX) chmod 755 $ROOTFS # packages to ignore for space savings PKGIGNORE=( cryptsetup device-mapper dhcpcd iproute2 jfsutils linux lvm2 man-db man-pages mdadm nano netctl openresolv pciutils pcmciautils reiserfsprogs s-nail systemd-sysvcompat usbutils vi xfsprogs ) IFS=',' PKGIGNORE="${PKGIGNORE[*]}" unset IFS case "$(uname -m)" in armv*) if pacman -Q archlinuxarm-keyring >/dev/null 2>&1; then pacman-key --init pacman-key --populate archlinuxarm else echo "Could not find archlinuxarm-keyring. Please, install it and run pacman-key --populate archlinuxarm" exit 1 fi PACMAN_CONF='./mkimage-archarm-pacman.conf' PACMAN_MIRRORLIST='Server = http://mirror.archlinuxarm.org/$arch/$repo' PACMAN_EXTRA_PKGS='archlinuxarm-keyring' EXPECT_TIMEOUT=120 ARCH_KEYRING=archlinuxarm DOCKER_IMAGE_NAME=archlinuxarm ;; *) PACMAN_CONF='./mkimage-arch-pacman.conf' PACMAN_MIRRORLIST='Server = https://mirrors.kernel.org/archlinux/$repo/os/$arch' PACMAN_EXTRA_PKGS='' EXPECT_TIMEOUT=60 ARCH_KEYRING=archlinux DOCKER_IMAGE_NAME=archlinux ;; esac export PACMAN_MIRRORLIST expect < $ROOTFS/etc/locale.gen arch-chroot $ROOTFS locale-gen arch-chroot $ROOTFS /bin/sh -c 'echo $PACMAN_MIRRORLIST > /etc/pacman.d/mirrorlist' # udev doesn't work in containers, rebuild /dev DEV=$ROOTFS/dev rm -rf $DEV mkdir -p $DEV mknod -m 666 $DEV/null c 1 3 mknod -m 666 $DEV/zero c 1 5 mknod -m 666 $DEV/random c 1 8 mknod -m 666 $DEV/urandom c 1 9 mkdir -m 755 $DEV/pts mkdir -m 1777 $DEV/shm mknod -m 666 $DEV/tty c 5 0 mknod -m 600 $DEV/console c 5 1 mknod -m 666 $DEV/tty0 c 4 0 mknod -m 666 $DEV/full c 1 7 mknod -m 600 $DEV/initctl p mknod -m 666 $DEV/ptmx c 5 2 ln -sf /proc/self/fd $DEV/fd tar --numeric-owner --xattrs --acls -C $ROOTFS -c . | docker import - $DOCKER_IMAGE_NAME docker run --rm -t $DOCKER_IMAGE_NAME echo Success. rm -rf $ROOTFS docker-1.10.3/contrib/mkimage-archarm-pacman.conf000066400000000000000000000053461267010174400216460ustar00rootroot00000000000000# # /etc/pacman.conf # # See the pacman.conf(5) manpage for option and repository directives # # GENERAL OPTIONS # [options] # The following paths are commented out with their default values listed. # If you wish to use different paths, uncomment and update the paths. #RootDir = / #DBPath = /var/lib/pacman/ #CacheDir = /var/cache/pacman/pkg/ #LogFile = /var/log/pacman.log #GPGDir = /etc/pacman.d/gnupg/ HoldPkg = pacman glibc #XferCommand = /usr/bin/curl -C - -f %u > %o #XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u #CleanMethod = KeepInstalled #UseDelta = 0.7 Architecture = armv7h # Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup #IgnorePkg = #IgnoreGroup = #NoUpgrade = #NoExtract = # Misc options #UseSyslog #Color #TotalDownload # We cannot check disk space from within a chroot environment #CheckSpace #VerbosePkgLists # By default, pacman accepts packages signed by keys that its local keyring # trusts (see pacman-key and its man page), as well as unsigned packages. SigLevel = Required DatabaseOptional LocalFileSigLevel = Optional #RemoteFileSigLevel = Required # NOTE: You must run `pacman-key --init` before first using pacman; the local # keyring can then be populated with the keys of all official Arch Linux # packagers with `pacman-key --populate archlinux`. # # REPOSITORIES # - can be defined here or included from another file # - pacman will search repositories in the order defined here # - local/custom mirrors can be added here or in separate files # - repositories listed first will take precedence when packages # have identical names, regardless of version number # - URLs will have $repo replaced by the name of the current repo # - URLs will have $arch replaced by the name of the architecture # # Repository entries are of the format: # [repo-name] # Server = ServerName # Include = IncludePath # # The header [repo-name] is crucial - it must be present and # uncommented to enable the repo. # # The testing repositories are disabled by default. To enable, uncomment the # repo name header and Include lines. You can add preferred servers immediately # after the header, and they will be used before the default mirrors. #[testing] #Include = /etc/pacman.d/mirrorlist [core] Include = /etc/pacman.d/mirrorlist [extra] Include = /etc/pacman.d/mirrorlist #[community-testing] #Include = /etc/pacman.d/mirrorlist [community] Include = /etc/pacman.d/mirrorlist [alarm] Include = /etc/pacman.d/mirrorlist [aur] Include = /etc/pacman.d/mirrorlist # An example of a custom package repository. See the pacman manpage for # tips on creating your own repositories. #[custom] #SigLevel = Optional TrustAll #Server = file:///home/custompkgs docker-1.10.3/contrib/mkimage-busybox.sh000077500000000000000000000021511267010174400201460ustar00rootroot00000000000000#!/usr/bin/env bash # Generate a very minimal filesystem based on busybox-static, # and load it into the local docker under the name "busybox". echo >&2 echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/busybox-static' echo >&2 BUSYBOX=$(which busybox) [ "$BUSYBOX" ] || { echo "Sorry, I could not locate busybox." echo "Try 'apt-get install busybox-static'?" exit 1 } set -e ROOTFS=${TMPDIR:-/var/tmp}/rootfs-busybox-$$-$RANDOM mkdir $ROOTFS cd $ROOTFS mkdir bin etc dev dev/pts lib proc sys tmp touch etc/resolv.conf cp /etc/nsswitch.conf etc/nsswitch.conf echo root:x:0:0:root:/:/bin/sh > etc/passwd echo root:x:0: > etc/group ln -s lib lib64 ln -s bin sbin cp $BUSYBOX bin for X in $(busybox --list) do ln -s busybox bin/$X done rm bin/init ln bin/busybox bin/init cp /lib/x86_64-linux-gnu/lib{pthread,c,dl,nsl,nss_*}.so.* lib cp /lib/x86_64-linux-gnu/ld-linux-x86-64.so.2 lib for X in console null ptmx random stdin stdout stderr tty urandom zero do cp -a /dev/$X dev done tar --numeric-owner -cf- . | docker import - busybox docker run -i -u root busybox /bin/echo Success. docker-1.10.3/contrib/mkimage-crux.sh000077500000000000000000000035671267010174400174500ustar00rootroot00000000000000#!/usr/bin/env bash # Generate a minimal filesystem for CRUX/Linux and load it into the local # docker as "cruxlinux" # requires root and the crux iso (http://crux.nu) set -e die () { echo >&2 "$@" exit 1 } [ "$#" -eq 1 ] || die "1 argument(s) required, $# provided. Usage: ./mkimage-crux.sh /path/to/iso" ISO=${1} ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-crux-XXXXXXXXXX) CRUX=$(mktemp -d ${TMPDIR:-/var/tmp}/crux-XXXXXXXXXX) TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/XXXXXXXXXX) VERSION=$(basename --suffix=.iso $ISO | sed 's/[^0-9.]*\([0-9.]*\).*/\1/') # Mount the ISO mount -o ro,loop $ISO $CRUX # Extract pkgutils tar -C $TMP -xf $CRUX/tools/pkgutils#*.pkg.tar.gz # Put pkgadd in the $PATH export PATH="$TMP/usr/bin:$PATH" # Install core packages mkdir -p $ROOTFS/var/lib/pkg touch $ROOTFS/var/lib/pkg/db for pkg in $CRUX/crux/core/*; do pkgadd -r $ROOTFS $pkg done # Remove agetty and inittab config if (grep agetty ${ROOTFS}/etc/inittab 2>&1 > /dev/null); then echo "Removing agetty from /etc/inittab ..." chroot ${ROOTFS} sed -i -e "/agetty/d" /etc/inittab chroot ${ROOTFS} sed -i -e "/shutdown/d" /etc/inittab chroot ${ROOTFS} sed -i -e "/^$/N;/^\n$/d" /etc/inittab fi # Remove kernel source rm -rf $ROOTFS/usr/src/* # udev doesn't work in containers, rebuild /dev DEV=$ROOTFS/dev rm -rf $DEV mkdir -p $DEV mknod -m 666 $DEV/null c 1 3 mknod -m 666 $DEV/zero c 1 5 mknod -m 666 $DEV/random c 1 8 mknod -m 666 $DEV/urandom c 1 9 mkdir -m 755 $DEV/pts mkdir -m 1777 $DEV/shm mknod -m 666 $DEV/tty c 5 0 mknod -m 600 $DEV/console c 5 1 mknod -m 666 $DEV/tty0 c 4 0 mknod -m 666 $DEV/full c 1 7 mknod -m 600 $DEV/initctl p mknod -m 666 $DEV/ptmx c 5 2 IMAGE_ID=$(tar --numeric-owner -C $ROOTFS -c . | docker import - crux:$VERSION) docker tag $IMAGE_ID crux:latest docker run -i -t crux echo Success. # Cleanup umount $CRUX rm -rf $ROOTFS rm -rf $CRUX rm -rf $TMP docker-1.10.3/contrib/mkimage-debootstrap.sh000077500000000000000000000217441267010174400210120ustar00rootroot00000000000000#!/usr/bin/env bash set -e echo >&2 echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/debootstrap' echo >&2 variant='minbase' include='iproute,iputils-ping' arch='amd64' # intentionally undocumented for now skipDetection= strictDebootstrap= justTar= usage() { echo >&2 echo >&2 "usage: $0 [options] repo suite [mirror]" echo >&2 echo >&2 'options: (not recommended)' echo >&2 " -p set an http_proxy for debootstrap" echo >&2 " -v $variant # change default debootstrap variant" echo >&2 " -i $include # change default package includes" echo >&2 " -d # strict debootstrap (do not apply any docker-specific tweaks)" echo >&2 " -s # skip version detection and tagging (ie, precise also tagged as 12.04)" echo >&2 " # note that this will also skip adding universe and/or security/updates to sources.list" echo >&2 " -t # just create a tarball, especially for dockerbrew (uses repo as tarball name)" echo >&2 echo >&2 " ie: $0 username/debian squeeze" echo >&2 " $0 username/debian squeeze http://ftp.uk.debian.org/debian/" echo >&2 echo >&2 " ie: $0 username/ubuntu precise" echo >&2 " $0 username/ubuntu precise http://mirrors.melbourne.co.uk/ubuntu/" echo >&2 echo >&2 " ie: $0 -t precise.tar.bz2 precise" echo >&2 " $0 -t wheezy.tgz wheezy" echo >&2 " $0 -t wheezy-uk.tar.xz wheezy http://ftp.uk.debian.org/debian/" echo >&2 } # these should match the names found at http://www.debian.org/releases/ debianStable=wheezy debianUnstable=sid # this should match the name found at http://releases.ubuntu.com/ ubuntuLatestLTS=trusty # this should match the name found at http://releases.tanglu.org/ tangluLatest=aequorea while getopts v:i:a:p:dst name; do case "$name" in p) http_proxy="$OPTARG" ;; v) variant="$OPTARG" ;; i) include="$OPTARG" ;; a) arch="$OPTARG" ;; d) strictDebootstrap=1 ;; s) skipDetection=1 ;; t) justTar=1 ;; ?) usage exit 0 ;; esac done shift $(($OPTIND - 1)) repo="$1" suite="$2" mirror="${3:-}" # stick to the default debootstrap mirror if one is not provided if [ ! "$repo" ] || [ ! "$suite" ]; then usage exit 1 fi # some rudimentary detection for whether we need to "sudo" our docker calls docker='' if docker version > /dev/null 2>&1; then docker='docker' elif sudo docker version > /dev/null 2>&1; then docker='sudo docker' elif command -v docker > /dev/null 2>&1; then docker='docker' else echo >&2 "warning: either docker isn't installed, or your current user cannot run it;" echo >&2 " this script is not likely to work as expected" sleep 3 docker='docker' # give us a command-not-found later fi # make sure we have an absolute path to our final tarball so we can still reference it properly after we change directory if [ "$justTar" ]; then if [ ! -d "$(dirname "$repo")" ]; then echo >&2 "error: $(dirname "$repo") does not exist" exit 1 fi repo="$(cd "$(dirname "$repo")" && pwd -P)/$(basename "$repo")" fi # will be filled in later, if [ -z "$skipDetection" ] lsbDist='' target="${TMPDIR:-/var/tmp}/docker-rootfs-debootstrap-$suite-$$-$RANDOM" cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" returnTo="$(pwd -P)" if [ "$suite" = 'lucid' ]; then # lucid fails and doesn't include gpgv in minbase; "apt-get update" fails include+=',gpgv' fi set -x # bootstrap mkdir -p "$target" sudo http_proxy=$http_proxy debootstrap --verbose --variant="$variant" --include="$include" --arch="$arch" "$suite" "$target" "$mirror" cd "$target" if [ -z "$strictDebootstrap" ]; then # prevent init scripts from running during install/update # policy-rc.d (for most scripts) echo $'#!/bin/sh\nexit 101' | sudo tee usr/sbin/policy-rc.d > /dev/null sudo chmod +x usr/sbin/policy-rc.d # initctl (for some pesky upstart scripts) sudo chroot . dpkg-divert --local --rename --add /sbin/initctl sudo ln -sf /bin/true sbin/initctl # see https://github.com/docker/docker/issues/446#issuecomment-16953173 # shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB) sudo chroot . apt-get clean if strings usr/bin/dpkg | grep -q unsafe-io; then # while we're at it, apt is unnecessarily slow inside containers # this forces dpkg not to call sync() after package extraction and speeds up install # the benefit is huge on spinning disks, and the penalty is nonexistent on SSD or decent server virtualization echo 'force-unsafe-io' | sudo tee etc/dpkg/dpkg.cfg.d/02apt-speedup > /dev/null # we have this wrapped up in an "if" because the "force-unsafe-io" # option was added in dpkg 1.15.8.6 # (see http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=584254#82), # and ubuntu lucid/10.04 only has 1.15.5.6 fi # we want to effectively run "apt-get clean" after every install to keep images small (see output of "apt-get clean -s" for context) { aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";' echo "DPkg::Post-Invoke { ${aptGetClean} };" echo "APT::Update::Post-Invoke { ${aptGetClean} };" echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";' } | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null # and remove the translations, too echo 'Acquire::Languages "none";' | sudo tee etc/apt/apt.conf.d/no-languages > /dev/null # helpful undo lines for each the above tweaks (for lack of a better home to keep track of them): # rm /usr/sbin/policy-rc.d # rm /sbin/initctl; dpkg-divert --rename --remove /sbin/initctl # rm /etc/dpkg/dpkg.cfg.d/02apt-speedup # rm /etc/apt/apt.conf.d/no-cache # rm /etc/apt/apt.conf.d/no-languages if [ -z "$skipDetection" ]; then # see also rudimentary platform detection in hack/install.sh lsbDist='' if [ -r etc/lsb-release ]; then lsbDist="$(. etc/lsb-release && echo "$DISTRIB_ID")" fi if [ -z "$lsbDist" ] && [ -r etc/debian_version ]; then lsbDist='Debian' fi case "$lsbDist" in Debian) # add the updates and security repositories if [ "$suite" != "$debianUnstable" -a "$suite" != 'unstable' ]; then # ${suite}-updates only applies to non-unstable sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list # same for security updates echo "deb http://security.debian.org/ $suite/updates main" | sudo tee -a etc/apt/sources.list > /dev/null fi ;; Ubuntu) # add the universe, updates, and security repositories sudo sed -i " s/ $suite main$/ $suite main universe/; p; s/ $suite main/ ${suite}-updates main/; p; s/ $suite-updates main/ ${suite}-security main/ " etc/apt/sources.list ;; Tanglu) # add the updates repository if [ "$suite" = "$tangluLatest" ]; then # ${suite}-updates only applies to stable Tanglu versions sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list fi ;; SteamOS) # add contrib and non-free sudo sed -i "s/ $suite main$/ $suite main contrib non-free/" etc/apt/sources.list ;; esac fi # make sure our packages lists are as up to date as we can get them sudo chroot . apt-get update sudo chroot . apt-get dist-upgrade -y fi if [ "$justTar" ]; then # create the tarball file so it has the right permissions (ie, not root) touch "$repo" # fill the tarball sudo tar --numeric-owner -caf "$repo" . else # create the image (and tag $repo:$suite) sudo tar --numeric-owner -c . | $docker import - $repo:$suite # test the image $docker run -i -t $repo:$suite echo success if [ -z "$skipDetection" ]; then case "$lsbDist" in Debian) if [ "$suite" = "$debianStable" -o "$suite" = 'stable' ] && [ -r etc/debian_version ]; then # tag latest $docker tag $repo:$suite $repo:latest if [ -r etc/debian_version ]; then # tag the specific debian release version (which is only reasonable to tag on debian stable) ver=$(cat etc/debian_version) $docker tag $repo:$suite $repo:$ver fi fi ;; Ubuntu) if [ "$suite" = "$ubuntuLatestLTS" ]; then # tag latest $docker tag $repo:$suite $repo:latest fi if [ -r etc/lsb-release ]; then lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" if [ "$lsbRelease" ]; then # tag specific Ubuntu version number, if available (12.04, etc.) $docker tag $repo:$suite $repo:$lsbRelease fi fi ;; Tanglu) if [ "$suite" = "$tangluLatest" ]; then # tag latest $docker tag $repo:$suite $repo:latest fi if [ -r etc/lsb-release ]; then lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" if [ "$lsbRelease" ]; then # tag specific Tanglu version number, if available (1.0, 2.0, etc.) $docker tag $repo:$suite $repo:$lsbRelease fi fi ;; SteamOS) if [ -r etc/lsb-release ]; then lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" if [ "$lsbRelease" ]; then # tag specific SteamOS version number, if available (1.0, 2.0, etc.) $docker tag $repo:$suite $repo:$lsbRelease fi fi ;; esac fi fi # cleanup cd "$returnTo" sudo rm -rf "$target" docker-1.10.3/contrib/mkimage-rinse.sh000077500000000000000000000066371267010174400176100ustar00rootroot00000000000000#!/usr/bin/env bash # # Create a base CentOS Docker image. # This script is useful on systems with rinse available (e.g., # building a CentOS image on Debian). See contrib/mkimage-yum.sh for # a way to build CentOS images on systems with yum installed. set -e echo >&2 echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/rinse' echo >&2 repo="$1" distro="$2" mirror="$3" if [ ! "$repo" ] || [ ! "$distro" ]; then self="$(basename $0)" echo >&2 "usage: $self repo distro [mirror]" echo >&2 echo >&2 " ie: $self username/centos centos-5" echo >&2 " $self username/centos centos-6" echo >&2 echo >&2 " ie: $self username/slc slc-5" echo >&2 " $self username/slc slc-6" echo >&2 echo >&2 " ie: $self username/centos centos-5 http://vault.centos.org/5.8/os/x86_64/CentOS/" echo >&2 " $self username/centos centos-6 http://vault.centos.org/6.3/os/x86_64/Packages/" echo >&2 echo >&2 'See /etc/rinse for supported values of "distro" and for examples of' echo >&2 ' expected values of "mirror".' echo >&2 echo >&2 'This script is tested to work with the original upstream version of rinse,' echo >&2 ' found at http://www.steve.org.uk/Software/rinse/ and also in Debian at' echo >&2 ' http://packages.debian.org/wheezy/rinse -- as always, YMMV.' echo >&2 exit 1 fi target="${TMPDIR:-/var/tmp}/docker-rootfs-rinse-$distro-$$-$RANDOM" cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" returnTo="$(pwd -P)" rinseArgs=( --arch amd64 --distribution "$distro" --directory "$target" ) if [ "$mirror" ]; then rinseArgs+=( --mirror "$mirror" ) fi set -x mkdir -p "$target" sudo rinse "${rinseArgs[@]}" cd "$target" # rinse fails a little at setting up /dev, so we'll just wipe it out and create our own sudo rm -rf dev sudo mkdir -m 755 dev ( cd dev sudo ln -sf /proc/self/fd ./ sudo mkdir -m 755 pts sudo mkdir -m 1777 shm sudo mknod -m 600 console c 5 1 sudo mknod -m 600 initctl p sudo mknod -m 666 full c 1 7 sudo mknod -m 666 null c 1 3 sudo mknod -m 666 ptmx c 5 2 sudo mknod -m 666 random c 1 8 sudo mknod -m 666 tty c 5 0 sudo mknod -m 666 tty0 c 4 0 sudo mknod -m 666 urandom c 1 9 sudo mknod -m 666 zero c 1 5 ) # effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target" # locales sudo rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} # docs and man pages sudo rm -rf usr/share/{man,doc,info,gnome/help} # cracklib sudo rm -rf usr/share/cracklib # i18n sudo rm -rf usr/share/i18n # yum cache sudo rm -rf var/cache/yum sudo mkdir -p --mode=0755 var/cache/yum # sln sudo rm -rf sbin/sln # ldconfig #sudo rm -rf sbin/ldconfig sudo rm -rf etc/ld.so.cache var/cache/ldconfig sudo mkdir -p --mode=0755 var/cache/ldconfig # allow networking init scripts inside the container to work without extra steps echo 'NETWORKING=yes' | sudo tee etc/sysconfig/network > /dev/null # to restore locales later: # yum reinstall glibc-common version= if [ -r etc/redhat-release ]; then version="$(sed -E 's/^[^0-9.]*([0-9.]+).*$/\1/' etc/redhat-release)" elif [ -r etc/SuSE-release ]; then version="$(awk '/^VERSION/ { print $3 }' etc/SuSE-release)" fi if [ -z "$version" ]; then echo >&2 "warning: cannot autodetect OS version, using $distro as tag" sleep 20 version="$distro" fi sudo tar --numeric-owner -c . | docker import - $repo:$version docker run -i -t $repo:$version echo success cd "$returnTo" sudo rm -rf "$target" docker-1.10.3/contrib/mkimage-yum.sh000077500000000000000000000070551267010174400172750ustar00rootroot00000000000000#!/usr/bin/env bash # # Create a base CentOS Docker image. # # This script is useful on systems with yum installed (e.g., building # a CentOS image on CentOS). See contrib/mkimage-rinse.sh for a way # to build CentOS images on other systems. usage() { cat < OPTIONS: -p "" The list of packages to install in the container. The default is blank. -g "" The groups of packages to install in the container. The default is "Core". -y The path to the yum config to install packages from. The default is /etc/yum.conf for Centos/RHEL and /etc/dnf/dnf.conf for Fedora EOOPTS exit 1 } # option defaults yum_config=/etc/yum.conf if [ -f /etc/dnf/dnf.conf ] && command -v dnf &> /dev/null; then yum_config=/etc/dnf/dnf.conf alias yum=dnf fi install_groups="Core" while getopts ":y:p:g:h" opt; do case $opt in y) yum_config=$OPTARG ;; h) usage ;; p) install_packages="$OPTARG" ;; g) install_groups="$OPTARG" ;; \?) echo "Invalid option: -$OPTARG" usage ;; esac done shift $((OPTIND - 1)) name=$1 if [[ -z $name ]]; then usage fi target=$(mktemp -d --tmpdir $(basename $0).XXXXXX) set -x mkdir -m 755 "$target"/dev mknod -m 600 "$target"/dev/console c 5 1 mknod -m 600 "$target"/dev/initctl p mknod -m 666 "$target"/dev/full c 1 7 mknod -m 666 "$target"/dev/null c 1 3 mknod -m 666 "$target"/dev/ptmx c 5 2 mknod -m 666 "$target"/dev/random c 1 8 mknod -m 666 "$target"/dev/tty c 5 0 mknod -m 666 "$target"/dev/tty0 c 4 0 mknod -m 666 "$target"/dev/urandom c 1 9 mknod -m 666 "$target"/dev/zero c 1 5 # amazon linux yum will fail without vars set if [ -d /etc/yum/vars ]; then mkdir -p -m 755 "$target"/etc/yum cp -a /etc/yum/vars "$target"/etc/yum/ fi if [[ -n "$install_groups" ]]; then yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \ --setopt=group_package_types=mandatory -y groupinstall $install_groups fi if [[ -n "$install_packages" ]]; then yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \ --setopt=group_package_types=mandatory -y install $install_packages fi yum -c "$yum_config" --installroot="$target" -y clean all cat > "$target"/etc/sysconfig/network <&2 "warning: cannot autodetect OS version, using '$name' as tag" version=$name fi tar --numeric-owner -c -C "$target" . | docker import - $name:$version docker run -i -t --rm $name:$version /bin/bash -c 'echo success' rm -rf "$target" docker-1.10.3/contrib/mkimage.sh000077500000000000000000000062621267010174400164640ustar00rootroot00000000000000#!/usr/bin/env bash set -e mkimg="$(basename "$0")" usage() { echo >&2 "usage: $mkimg [-d dir] [-t tag] [--compression algo| --no-compression] script [script-args]" echo >&2 " ie: $mkimg -t someuser/debian debootstrap --variant=minbase jessie" echo >&2 " $mkimg -t someuser/ubuntu debootstrap --include=ubuntu-minimal --components=main,universe trusty" echo >&2 " $mkimg -t someuser/busybox busybox-static" echo >&2 " $mkimg -t someuser/centos:5 rinse --distribution centos-5" echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4" echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4 --mirror=http://somemirror/" exit 1 } scriptDir="$(dirname "$(readlink -f "$BASH_SOURCE")")/mkimage" optTemp=$(getopt --options '+d:t:c:hC' --longoptions 'dir:,tag:,compression:,no-compression,help' --name "$mkimg" -- "$@") eval set -- "$optTemp" unset optTemp dir= tag= compression="auto" while true; do case "$1" in -d|--dir) dir="$2" ; shift 2 ;; -t|--tag) tag="$2" ; shift 2 ;; --compression) compression="$2" ; shift 2 ;; --no-compression) compression="none" ; shift 1 ;; -h|--help) usage ;; --) shift ; break ;; esac done script="$1" [ "$script" ] || usage shift if [ "$compression" == 'auto' ] || [ -z "$compression" ] then compression='xz' fi [ "$compression" == 'none' ] && compression='' if [ ! -x "$scriptDir/$script" ]; then echo >&2 "error: $script does not exist or is not executable" echo >&2 " see $scriptDir for possible scripts" exit 1 fi # don't mistake common scripts like .febootstrap-minimize as image-creators if [[ "$script" == .* ]]; then echo >&2 "error: $script is a script helper, not a script" echo >&2 " see $scriptDir for possible scripts" exit 1 fi delDir= if [ -z "$dir" ]; then dir="$(mktemp -d ${TMPDIR:-/var/tmp}/docker-mkimage.XXXXXXXXXX)" delDir=1 fi rootfsDir="$dir/rootfs" ( set -x; mkdir -p "$rootfsDir" ) # pass all remaining arguments to $script "$scriptDir/$script" "$rootfsDir" "$@" # Docker mounts tmpfs at /dev and procfs at /proc so we can remove them rm -rf "$rootfsDir/dev" "$rootfsDir/proc" mkdir -p "$rootfsDir/dev" "$rootfsDir/proc" # make sure /etc/resolv.conf has something useful in it mkdir -p "$rootfsDir/etc" cat > "$rootfsDir/etc/resolv.conf" <<'EOF' nameserver 8.8.8.8 nameserver 8.8.4.4 EOF tarFile="$dir/rootfs.tar${compression:+.$compression}" touch "$tarFile" ( set -x tar --numeric-owner --create --auto-compress --file "$tarFile" --directory "$rootfsDir" --transform='s,^./,,' . ) echo >&2 "+ cat > '$dir/Dockerfile'" cat > "$dir/Dockerfile" <> "$dir/Dockerfile" ) break fi done ( set -x; rm -rf "$rootfsDir" ) if [ "$tag" ]; then ( set -x; docker build -t "$tag" "$dir" ) elif [ "$delDir" ]; then # if we didn't specify a tag and we're going to delete our dir, let's just build an untagged image so that we did _something_ ( set -x; docker build "$dir" ) fi if [ "$delDir" ]; then ( set -x; rm -rf "$dir" ) fi docker-1.10.3/contrib/mkimage/000077500000000000000000000000001267010174400161175ustar00rootroot00000000000000docker-1.10.3/contrib/mkimage/.febootstrap-minimize000077500000000000000000000011571267010174400222760ustar00rootroot00000000000000#!/usr/bin/env bash set -e rootfsDir="$1" shift ( cd "$rootfsDir" # effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target" # locales rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} # docs and man pages rm -rf usr/share/{man,doc,info,gnome/help} # cracklib rm -rf usr/share/cracklib # i18n rm -rf usr/share/i18n # yum cache rm -rf var/cache/yum mkdir -p --mode=0755 var/cache/yum # sln rm -rf sbin/sln # ldconfig #rm -rf sbin/ldconfig rm -rf etc/ld.so.cache var/cache/ldconfig mkdir -p --mode=0755 var/cache/ldconfig ) docker-1.10.3/contrib/mkimage/busybox-static000077500000000000000000000014161267010174400210270ustar00rootroot00000000000000#!/usr/bin/env bash set -e rootfsDir="$1" shift busybox="$(which busybox 2>/dev/null || true)" if [ -z "$busybox" ]; then echo >&2 'error: busybox: not found' echo >&2 ' install it with your distribution "busybox-static" package' exit 1 fi if ! ldd "$busybox" 2>&1 | grep -q 'not a dynamic executable'; then echo >&2 "error: '$busybox' appears to be a dynamic executable" echo >&2 ' you should install your distribution "busybox-static" package instead' exit 1 fi mkdir -p "$rootfsDir/bin" rm -f "$rootfsDir/bin/busybox" # just in case cp "$busybox" "$rootfsDir/bin/busybox" ( cd "$rootfsDir" IFS=$'\n' modules=( $(bin/busybox --list-modules) ) unset IFS for module in "${modules[@]}"; do mkdir -p "$(dirname "$module")" ln -sf /bin/busybox "$module" done ) docker-1.10.3/contrib/mkimage/debootstrap000077500000000000000000000207271267010174400204030ustar00rootroot00000000000000#!/usr/bin/env bash set -e rootfsDir="$1" shift # we have to do a little fancy footwork to make sure "rootfsDir" becomes the second non-option argument to debootstrap before=() while [ $# -gt 0 ] && [[ "$1" == -* ]]; do before+=( "$1" ) shift done suite="$1" shift # get path to "chroot" in our current PATH chrootPath="$(type -P chroot)" rootfs_chroot() { # "chroot" doesn't set PATH, so we need to set it explicitly to something our new debootstrap chroot can use appropriately! # set PATH and chroot away! PATH='/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' \ "$chrootPath" "$rootfsDir" "$@" } # allow for DEBOOTSTRAP=qemu-debootstrap ./mkimage.sh ... : ${DEBOOTSTRAP:=debootstrap} ( set -x $DEBOOTSTRAP "${before[@]}" "$suite" "$rootfsDir" "$@" ) # now for some Docker-specific tweaks # prevent init scripts from running during install/update echo >&2 "+ echo exit 101 > '$rootfsDir/usr/sbin/policy-rc.d'" cat > "$rootfsDir/usr/sbin/policy-rc.d" <<-'EOF' #!/bin/sh # For most Docker users, "apt-get install" only happens during "docker build", # where starting services doesn't work and often fails in humorous ways. This # prevents those failures by stopping the services from attempting to start. exit 101 EOF chmod +x "$rootfsDir/usr/sbin/policy-rc.d" # prevent upstart scripts from running during install/update ( set -x rootfs_chroot dpkg-divert --local --rename --add /sbin/initctl cp -a "$rootfsDir/usr/sbin/policy-rc.d" "$rootfsDir/sbin/initctl" sed -i 's/^exit.*/exit 0/' "$rootfsDir/sbin/initctl" ) # shrink a little, since apt makes us cache-fat (wheezy: ~157.5MB vs ~120MB) ( set -x; rootfs_chroot apt-get clean ) # this file is one APT creates to make sure we don't "autoremove" our currently # in-use kernel, which doesn't really apply to debootstraps/Docker images that # don't even have kernels installed rm -f "$rootfsDir/etc/apt/apt.conf.d/01autoremove-kernels" # Ubuntu 10.04 sucks... :) if strings "$rootfsDir/usr/bin/dpkg" | grep -q unsafe-io; then # force dpkg not to call sync() after package extraction (speeding up installs) echo >&2 "+ echo force-unsafe-io > '$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup'" cat > "$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup" <<-'EOF' # For most Docker users, package installs happen during "docker build", which # doesn't survive power loss and gets restarted clean afterwards anyhow, so # this minor tweak gives us a nice speedup (much nicer on spinning disks, # obviously). force-unsafe-io EOF fi if [ -d "$rootfsDir/etc/apt/apt.conf.d" ]; then # _keep_ us lean by effectively running "apt-get clean" after every install aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";' echo >&2 "+ cat > '$rootfsDir/etc/apt/apt.conf.d/docker-clean'" cat > "$rootfsDir/etc/apt/apt.conf.d/docker-clean" <<-EOF # Since for most Docker users, package installs happen in "docker build" steps, # they essentially become individual layers due to the way Docker handles # layering, especially using CoW filesystems. What this means for us is that # the caches that APT keeps end up just wasting space in those layers, making # our layers unnecessarily large (especially since we'll normally never use # these caches again and will instead just "docker build" again and make a brand # new image). # Ideally, these would just be invoking "apt-get clean", but in our testing, # that ended up being cyclic and we got stuck on APT's lock, so we get this fun # creation that's essentially just "apt-get clean". DPkg::Post-Invoke { ${aptGetClean} }; APT::Update::Post-Invoke { ${aptGetClean} }; Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache ""; # Note that we do realize this isn't the ideal way to do this, and are always # open to better suggestions (https://github.com/docker/docker/issues). EOF # remove apt-cache translations for fast "apt-get update" echo >&2 "+ echo Acquire::Languages 'none' > '$rootfsDir/etc/apt/apt.conf.d/docker-no-languages'" cat > "$rootfsDir/etc/apt/apt.conf.d/docker-no-languages" <<-'EOF' # In Docker, we don't often need the "Translations" files, so we're just wasting # time and space by downloading them, and this inhibits that. For users that do # need them, it's a simple matter to delete this file and "apt-get update". :) Acquire::Languages "none"; EOF echo >&2 "+ echo Acquire::GzipIndexes 'true' > '$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes'" cat > "$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes" <<-'EOF' # Since Docker users using "RUN apt-get update && apt-get install -y ..." in # their Dockerfiles don't go delete the lists files afterwards, we want them to # be as small as possible on-disk, so we explicitly request "gz" versions and # tell Apt to keep them gzipped on-disk. # For comparison, an "apt-get update" layer without this on a pristine # "debian:wheezy" base image was "29.88 MB", where with this it was only # "8.273 MB". Acquire::GzipIndexes "true"; Acquire::CompressionTypes::Order:: "gz"; EOF # update "autoremove" configuration to be aggressive about removing suggests deps that weren't manually installed echo >&2 "+ echo Apt::AutoRemove::SuggestsImportant 'false' > '$rootfsDir/etc/apt/apt.conf.d/docker-autoremove-suggests'" cat > "$rootfsDir/etc/apt/apt.conf.d/docker-autoremove-suggests" <<-'EOF' # Since Docker users are looking for the smallest possible final images, the # following emerges as a very common pattern: # RUN apt-get update \ # && apt-get install -y \ # && \ # && apt-get purge -y --auto-remove # By default, APT will actually _keep_ packages installed via Recommends or # Depends if another package Suggests them, even and including if the package # that originally caused them to be installed is removed. Setting this to # "false" ensures that APT is appropriately aggressive about removing the # packages it added. # https://aptitude.alioth.debian.org/doc/en/ch02s05s05.html#configApt-AutoRemove-SuggestsImportant Apt::AutoRemove::SuggestsImportant "false"; EOF fi if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then # tweak sources.list, where appropriate lsbDist= if [ -z "$lsbDist" -a -r "$rootfsDir/etc/os-release" ]; then lsbDist="$(. "$rootfsDir/etc/os-release" && echo "$ID")" fi if [ -z "$lsbDist" -a -r "$rootfsDir/etc/lsb-release" ]; then lsbDist="$(. "$rootfsDir/etc/lsb-release" && echo "$DISTRIB_ID")" fi if [ -z "$lsbDist" -a -r "$rootfsDir/etc/debian_version" ]; then lsbDist='Debian' fi # normalize to lowercase for easier matching lsbDist="$(echo "$lsbDist" | tr '[:upper:]' '[:lower:]')" case "$lsbDist" in debian) # updates and security! if [ "$suite" != 'sid' -a "$suite" != 'unstable' ]; then ( set -x sed -i " p; s/ $suite / ${suite}-updates / " "$rootfsDir/etc/apt/sources.list" echo "deb http://security.debian.org $suite/updates main" >> "$rootfsDir/etc/apt/sources.list" # squeeze-lts if [ -f "$rootfsDir/etc/debian_version" ]; then ltsSuite= case "$(cat "$rootfsDir/etc/debian_version")" in 6.*) ltsSuite='squeeze-lts' ;; #7.*) ltsSuite='wheezy-lts' ;; #8.*) ltsSuite='jessie-lts' ;; esac if [ "$ltsSuite" ]; then head -1 "$rootfsDir/etc/apt/sources.list" \ | sed "s/ $suite / $ltsSuite /" \ >> "$rootfsDir/etc/apt/sources.list" fi fi ) fi ;; ubuntu) # add the updates and security repositories ( set -x sed -i " p; s/ $suite / ${suite}-updates /; p; s/ $suite-updates / ${suite}-security / " "$rootfsDir/etc/apt/sources.list" ) ;; tanglu) # add the updates repository if [ "$suite" != 'devel' ]; then ( set -x sed -i " p; s/ $suite / ${suite}-updates / " "$rootfsDir/etc/apt/sources.list" ) fi ;; steamos) # add contrib and non-free if "main" is the only component ( set -x sed -i "s/ $suite main$/ $suite main contrib non-free/" "$rootfsDir/etc/apt/sources.list" ) ;; esac fi ( set -x # make sure we're fully up-to-date rootfs_chroot sh -xc 'apt-get update && apt-get dist-upgrade -y' # delete all the apt list files since they're big and get stale quickly rm -rf "$rootfsDir/var/lib/apt/lists"/* # this forces "apt-get update" in dependent images, which is also good mkdir "$rootfsDir/var/lib/apt/lists/partial" # Lucid... "E: Lists directory /var/lib/apt/lists/partial is missing." ) docker-1.10.3/contrib/mkimage/mageia-urpmi000077500000000000000000000027611267010174400204300ustar00rootroot00000000000000#!/usr/bin/env bash # # Needs to be run from Mageia 4 or greater for kernel support for docker. # # Mageia 4 does not have docker available in official repos, so please # install and run the docker binary manually. # # Tested working versions are for Mageia 2 onwards (inc. cauldron). # set -e rootfsDir="$1" shift optTemp=$(getopt --options '+v:,m:' --longoptions 'version:,mirror:' --name mageia-urpmi -- "$@") eval set -- "$optTemp" unset optTemp installversion= mirror= while true; do case "$1" in -v|--version) installversion="$2" ; shift 2 ;; -m|--mirror) mirror="$2" ; shift 2 ;; --) shift ; break ;; esac done if [ -z $installversion ]; then # Attempt to match host version if [ -r /etc/mageia-release ]; then installversion="$(sed 's/^[^0-9\]*\([0-9.]\+\).*$/\1/' /etc/mageia-release)" else echo "Error: no version supplied and unable to detect host mageia version" exit 1 fi fi if [ -z $mirror ]; then # No mirror provided, default to mirrorlist mirror="--mirrorlist https://mirrors.mageia.org/api/mageia.$installversion.x86_64.list" fi ( set -x urpmi.addmedia --distrib \ $mirror \ --urpmi-root "$rootfsDir" urpmi basesystem-minimal urpmi \ --auto \ --no-suggests \ --urpmi-root "$rootfsDir" \ --root "$rootfsDir" ) "$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir" if [ -d "$rootfsDir/etc/sysconfig" ]; then # allow networking init scripts inside the container to work without extra steps echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network" fi docker-1.10.3/contrib/mkimage/rinse000077500000000000000000000010421267010174400171620ustar00rootroot00000000000000#!/usr/bin/env bash set -e rootfsDir="$1" shift # specifying --arch below is safe because "$@" can override it and the "latest" one wins :) ( set -x rinse --directory "$rootfsDir" --arch amd64 "$@" ) "$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir" if [ -d "$rootfsDir/etc/sysconfig" ]; then # allow networking init scripts inside the container to work without extra steps echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network" fi # make sure we're fully up-to-date, too ( set -x chroot "$rootfsDir" yum update -y ) docker-1.10.3/contrib/nuke-graph-directory.sh000077500000000000000000000027401267010174400211120ustar00rootroot00000000000000#!/bin/sh set -e dir="$1" if [ -z "$dir" ]; then { echo 'This script is for destroying old /var/lib/docker directories more safely than' echo ' "rm -rf", which can cause data loss or other serious issues.' echo echo "usage: $0 directory" echo " ie: $0 /var/lib/docker" } >&2 exit 1 fi if [ "$(id -u)" != 0 ]; then echo >&2 "error: $0 must be run as root" exit 1 fi if [ ! -d "$dir" ]; then echo >&2 "error: $dir is not a directory" exit 1 fi dir="$(readlink -f "$dir")" echo echo "Nuking $dir ..." echo ' (if this is wrong, press Ctrl+C NOW!)' echo ( set -x; sleep 10 ) echo dir_in_dir() { inner="$1" outer="$2" [ "${inner#$outer}" != "$inner" ] } # let's start by unmounting any submounts in $dir # (like -v /home:... for example - DON'T DELETE MY HOME DIRECTORY BRU!) for mount in $(awk '{ print $5 }' /proc/self/mountinfo); do mount="$(readlink -f "$mount" || true)" if dir_in_dir "$mount" "$dir"; then ( set -x; umount -f "$mount" ) fi done # now, let's go destroy individual btrfs subvolumes, if any exist if command -v btrfs > /dev/null 2>&1; then root="$(df "$dir" | awk 'NR>1 { print $NF }')" root="${root%/}" # if root is "/", we want it to become "" for subvol in $(btrfs subvolume list -o "$root/" 2>/dev/null | awk -F' path ' '{ print $2 }' | sort -r); do subvolDir="$root/$subvol" if dir_in_dir "$subvolDir" "$dir"; then ( set -x; btrfs subvolume delete "$subvolDir" ) fi done fi # finally, DESTROY ALL THINGS ( set -x; rm -rf "$dir" ) docker-1.10.3/contrib/project-stats.sh000077500000000000000000000007351267010174400176530ustar00rootroot00000000000000#!/usr/bin/env bash ## Run this script from the root of the docker repository ## to query project stats useful to the maintainers. ## You will need to install `pulls` and `issues` from ## https://github.com/crosbymichael/pulls set -e echo -n "Open pulls: " PULLS=$(pulls | wc -l); let PULLS=$PULLS-1 echo $PULLS echo -n "Pulls alru: " pulls alru echo -n "Open issues: " ISSUES=$(issues list | wc -l); let ISSUES=$ISSUES-1 echo $ISSUES echo -n "Issues alru: " issues alru docker-1.10.3/contrib/report-issue.sh000066400000000000000000000037471267010174400175150ustar00rootroot00000000000000#!/bin/sh # This is a convenience script for reporting issues that include a base # template of information. See https://github.com/docker/docker/pull/8845 set -e DOCKER_ISSUE_URL=${DOCKER_ISSUE_URL:-"https://github.com/docker/docker/issues/new"} DOCKER_ISSUE_NAME_PREFIX=${DOCKER_ISSUE_NAME_PREFIX:-"Report: "} DOCKER=${DOCKER:-"docker"} DOCKER_COMMAND="${DOCKER}" export DOCKER_COMMAND # pulled from https://gist.github.com/cdown/1163649 function urlencode() { # urlencode local length="${#1}" for (( i = 0; i < length; i++ )); do local c="${1:i:1}" case $c in [a-zA-Z0-9.~_-]) printf "$c" ;; *) printf '%%%02X' "'$c" esac done } function template() { # this should always match the template from CONTRIBUTING.md cat <<- EOM Description of problem: \`docker version\`: `${DOCKER_COMMAND} -D version` \`docker info\`: `${DOCKER_COMMAND} -D info` \`uname -a\`: `uname -a` Environment details (AWS, VirtualBox, physical, etc.): How reproducible: Steps to Reproduce: 1. 2. 3. Actual Results: Expected Results: Additional info: EOM } function format_issue_url() { if [ ${#@} -ne 2 ] ; then return 1 fi local issue_name=$(urlencode "${DOCKER_ISSUE_NAME_PREFIX}${1}") local issue_body=$(urlencode "${2}") echo "${DOCKER_ISSUE_URL}?title=${issue_name}&body=${issue_body}" } echo -ne "Do you use \`sudo\` to call docker? [y|N]: " read -r -n 1 use_sudo echo "" if [ "x${use_sudo}" = "xy" -o "x${use_sudo}" = "xY" ]; then export DOCKER_COMMAND="sudo ${DOCKER}" fi echo -ne "Title of new issue?: " read -r issue_title echo "" issue_url=$(format_issue_url "${issue_title}" "$(template)") if which xdg-open 2>/dev/null >/dev/null ; then echo -ne "Would like to launch this report in your browser? [Y|n]: " read -r -n 1 launch_now echo "" if [ "${launch_now}" != "n" -a "${launch_now}" != "N" ]; then xdg-open "${issue_url}" fi fi echo "If you would like to manually open the url, you can open this link if your browser: ${issue_url}" docker-1.10.3/contrib/reprepro/000077500000000000000000000000001267010174400163435ustar00rootroot00000000000000docker-1.10.3/contrib/reprepro/suites.sh000077500000000000000000000010201267010174400202070ustar00rootroot00000000000000#!/bin/bash set -e cd "$(dirname "$BASH_SOURCE")/../.." targets_from() { git fetch -q https://github.com/docker/docker.git "$1" git ls-tree -r --name-only "$(git rev-parse FETCH_HEAD)" contrib/builder/deb | grep '/Dockerfile$' | sed -r 's!^contrib/builder/deb/|-debootstrap|/Dockerfile$!!g' } release_branch=$(git ls-remote --heads https://github.com/docker/docker.git | awk -F 'refs/heads/' '$2 ~ /^release/ { print $2 }' | sort -V | tail -1) { targets_from master; targets_from "$release_branch"; } | sort -u docker-1.10.3/contrib/syntax/000077500000000000000000000000001267010174400160335ustar00rootroot00000000000000docker-1.10.3/contrib/syntax/kate/000077500000000000000000000000001267010174400167575ustar00rootroot00000000000000docker-1.10.3/contrib/syntax/kate/Dockerfile.xml000066400000000000000000000050301267010174400215460ustar00rootroot00000000000000 FROM MAINTAINER ENV RUN ONBUILD COPY ADD VOLUME EXPOSE ENTRYPOINT CMD WORKDIR USER LABEL STOPSIGNAL docker-1.10.3/contrib/syntax/nano/000077500000000000000000000000001267010174400167665ustar00rootroot00000000000000docker-1.10.3/contrib/syntax/nano/Dockerfile.nanorc000066400000000000000000000014771267010174400222500ustar00rootroot00000000000000## Syntax highlighting for Dockerfiles syntax "Dockerfile" "Dockerfile[^/]*$" ## Keywords icolor red "^(FROM|MAINTAINER|RUN|CMD|LABEL|EXPOSE|ENV|ADD|COPY|ENTRYPOINT|VOLUME|USER|WORKDIR|ONBUILD)[[:space:]]" ## Brackets & parenthesis color brightgreen "(\(|\)|\[|\])" ## Double ampersand color brightmagenta "&&" ## Comments icolor cyan "^[[:space:]]*#.*$" ## Blank space at EOL color ,green "[[:space:]]+$" ## Strings, single-quoted color brightwhite "'([^']|(\\'))*'" "%[qw]\{[^}]*\}" "%[qw]\([^)]*\)" "%[qw]<[^>]*>" "%[qw]\[[^]]*\]" "%[qw]\$[^$]*\$" "%[qw]\^[^^]*\^" "%[qw]![^!]*!" ## Strings, double-quoted color brightwhite ""([^"]|(\\"))*"" "%[QW]?\{[^}]*\}" "%[QW]?\([^)]*\)" "%[QW]?<[^>]*>" "%[QW]?\[[^]]*\]" "%[QW]?\$[^$]*\$" "%[QW]?\^[^^]*\^" "%[QW]?![^!]*!" ## Single and double quotes color brightyellow "('|\")" docker-1.10.3/contrib/syntax/nano/README.md000066400000000000000000000015021267010174400202430ustar00rootroot00000000000000Dockerfile.nanorc ================= Dockerfile syntax highlighting for nano Single User Installation ------------------------ 1. Create a nano syntax directory in your home directory: * `mkdir -p ~/.nano/syntax` 2. Copy `Dockerfile.nanorc` to` ~/.nano/syntax/` * `cp Dockerfile.nanorc ~/.nano/syntax/` 3. Add the following to your `~/.nanorc` to tell nano where to find the `Dockerfile.nanorc` file ``` ## Dockerfile files include "~/.nano/syntax/Dockerfile.nanorc" ``` System Wide Installation ------------------------ 1. Create a nano syntax directory: * `mkdir /usr/local/share/nano` 2. Copy `Dockerfile.nanorc` to `/usr/local/share/nano` * `cp Dockerfile.nanorc /usr/local/share/nano/` 3. Add the following to your `/etc/nanorc`: ``` ## Dockerfile files include "/usr/local/share/nano/Dockerfile.nanorc" ``` docker-1.10.3/contrib/syntax/textmate/000077500000000000000000000000001267010174400176665ustar00rootroot00000000000000docker-1.10.3/contrib/syntax/textmate/Docker.tmbundle/000077500000000000000000000000001267010174400227065ustar00rootroot00000000000000docker-1.10.3/contrib/syntax/textmate/Docker.tmbundle/Preferences/000077500000000000000000000000001267010174400251475ustar00rootroot00000000000000docker-1.10.3/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences000066400000000000000000000011021267010174400317340ustar00rootroot00000000000000 name Comments scope source.dockerfile settings shellVariables name TM_COMMENT_START value # uuid 2B215AC0-A7F3-4090-9FF6-F4842BD56CA7 docker-1.10.3/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/000077500000000000000000000000001267010174400245245ustar00rootroot00000000000000docker-1.10.3/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage000066400000000000000000000063501267010174400306050ustar00rootroot00000000000000 fileTypes Dockerfile name Dockerfile patterns captures 1 name keyword.control.dockerfile 2 name keyword.other.special-method.dockerfile match ^\s*(?:(ONBUILD)\s+)?(FROM|MAINTAINER|RUN|EXPOSE|ENV|ADD|VOLUME|USER|WORKDIR|COPY|LABEL|STOPSIGNAL|ARG)\s captures 1 name keyword.operator.dockerfile 2 name keyword.other.special-method.dockerfile match ^\s*(?:(ONBUILD)\s+)?(CMD|ENTRYPOINT)\s begin " beginCaptures 1 name punctuation.definition.string.begin.dockerfile end " endCaptures 1 name punctuation.definition.string.end.dockerfile name string.quoted.double.dockerfile patterns match \\. name constant.character.escaped.dockerfile begin ' beginCaptures 1 name punctuation.definition.string.begin.dockerfile end ' endCaptures 1 name punctuation.definition.string.end.dockerfile name string.quoted.single.dockerfile patterns match \\. name constant.character.escaped.dockerfile captures 1 name punctuation.whitespace.comment.leading.dockerfile 2 name comment.line.number-sign.dockerfile 3 name punctuation.definition.comment.dockerfile comment comment.line match ^(\s*)((#).*$\n?) scopeName source.dockerfile uuid a39d8795-59d2-49af-aa00-fe74ee29576e docker-1.10.3/contrib/syntax/textmate/Docker.tmbundle/info.plist000066400000000000000000000007511267010174400247210ustar00rootroot00000000000000 contactEmailRot13 germ@andz.com.ar contactName GermanDZ description Helpers for Docker. name Docker uuid 8B9DDBAF-E65C-4E12-FFA7-467D4AA535B1 docker-1.10.3/contrib/syntax/textmate/README.md000066400000000000000000000007171267010174400211520ustar00rootroot00000000000000# Docker.tmbundle Dockerfile syntax highlighting for TextMate and Sublime Text. ## Install ### Sublime Text Available for Sublime Text under [package control](https://sublime.wbond.net/packages/Dockerfile%20Syntax%20Highlighting). Search for *Dockerfile Syntax Highlighting* ### TextMate 2 You can install this bundle in TextMate by opening the preferences and going to the bundles tab. After installation it will be automatically updated for you. enjoy. docker-1.10.3/contrib/syntax/textmate/REVIEWERS000066400000000000000000000000651267010174400211650ustar00rootroot00000000000000Asbjorn Enge (@asbjornenge) docker-1.10.3/contrib/syntax/vim/000077500000000000000000000000001267010174400166265ustar00rootroot00000000000000docker-1.10.3/contrib/syntax/vim/LICENSE000066400000000000000000000024221267010174400176330ustar00rootroot00000000000000Copyright (c) 2013 Honza Pokorny All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. docker-1.10.3/contrib/syntax/vim/README.md000066400000000000000000000006561267010174400201140ustar00rootroot00000000000000dockerfile.vim ============== Syntax highlighting for Dockerfiles Installation ------------ With [pathogen](https://github.com/tpope/vim-pathogen), the usual way... With [Vundle](https://github.com/gmarik/Vundle.vim) Plugin 'docker/docker' , {'rtp': '/contrib/syntax/vim/'} Features -------- The syntax highlighting includes: * The directives (e.g. `FROM`) * Strings * Comments License ------- BSD, short and sweet docker-1.10.3/contrib/syntax/vim/doc/000077500000000000000000000000001267010174400173735ustar00rootroot00000000000000docker-1.10.3/contrib/syntax/vim/doc/dockerfile.txt000066400000000000000000000006651267010174400222520ustar00rootroot00000000000000*dockerfile.txt* Syntax highlighting for Dockerfiles Author: Honza Pokorny License: BSD INSTALLATION *installation* Drop it on your Pathogen path and you're all set. FEATURES *features* The syntax highlighting includes: * The directives (e.g. FROM) * Strings * Comments vim:tw=78:et:ft=help:norl: docker-1.10.3/contrib/syntax/vim/ftdetect/000077500000000000000000000000001267010174400204305ustar00rootroot00000000000000docker-1.10.3/contrib/syntax/vim/ftdetect/dockerfile.vim000066400000000000000000000001111267010174400232450ustar00rootroot00000000000000au BufNewFile,BufRead [Dd]ockerfile,Dockerfile.* set filetype=dockerfile docker-1.10.3/contrib/syntax/vim/syntax/000077500000000000000000000000001267010174400201545ustar00rootroot00000000000000docker-1.10.3/contrib/syntax/vim/syntax/dockerfile.vim000066400000000000000000000023531267010174400230030ustar00rootroot00000000000000" dockerfile.vim - Syntax highlighting for Dockerfiles " Maintainer: Honza Pokorny " Version: 0.5 if exists("b:current_syntax") finish endif let b:current_syntax = "dockerfile" syntax case ignore syntax match dockerfileKeyword /\v^\s*(ONBUILD\s+)?(ADD|CMD|ENTRYPOINT|ENV|EXPOSE|FROM|MAINTAINER|RUN|USER|LABEL|VOLUME|WORKDIR|COPY|STOPSIGNAL|ARG)\s/ highlight link dockerfileKeyword Keyword syntax region dockerfileString start=/\v"/ skip=/\v\\./ end=/\v"/ highlight link dockerfileString String syntax match dockerfileComment "\v^\s*#.*$" highlight link dockerfileComment Comment set commentstring=#\ %s " match "RUN", "CMD", and "ENTRYPOINT" lines, and parse them as shell let s:current_syntax = b:current_syntax unlet b:current_syntax syntax include @SH syntax/sh.vim let b:current_syntax = s:current_syntax syntax region shLine matchgroup=dockerfileKeyword start=/\v^\s*(RUN|CMD|ENTRYPOINT)\s/ end=/\v$/ contains=@SH " since @SH will handle "\" as part of the same line automatically, this "just works" for line continuation too, but with the caveat that it will highlight "RUN echo '" followed by a newline as if it were a block because the "'" is shell line continuation... not sure how to fix that just yet (TODO) docker-1.10.3/contrib/syscall-test/000077500000000000000000000000001267010174400171345ustar00rootroot00000000000000docker-1.10.3/contrib/syscall-test/Dockerfile000066400000000000000000000005321267010174400211260ustar00rootroot00000000000000FROM debian:jessie RUN apt-get update && apt-get install -y \ gcc \ libc6-dev \ --no-install-recommends \ && rm -rf /var/lib/apt/lists/* COPY . /usr/src/ WORKDIR /usr/src/ RUN gcc -g -Wall -static userns.c -o /usr/bin/userns-test \ && gcc -g -Wall -static ns.c -o /usr/bin/ns-test \ && gcc -g -Wall -static acct.c -o /usr/bin/acct-test docker-1.10.3/contrib/syscall-test/acct.c000066400000000000000000000004571267010174400202200ustar00rootroot00000000000000#define _GNU_SOURCE #include #include #include #include #include int main(int argc, char **argv) { int err = acct("/tmp/t"); if (err == -1) { fprintf(stderr, "acct failed: %s\n", strerror(errno)); exit(EXIT_FAILURE); } exit(EXIT_SUCCESS); } docker-1.10.3/contrib/syscall-test/ns.c000066400000000000000000000032211267010174400177160ustar00rootroot00000000000000#define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #define STACK_SIZE (1024 * 1024) /* Stack size for cloned child */ struct clone_args { char **argv; }; // child_exec is the func that will be executed as the result of clone static int child_exec(void *stuff) { struct clone_args *args = (struct clone_args *)stuff; if (execvp(args->argv[0], args->argv) != 0) { fprintf(stderr, "failed to execvp argments %s\n", strerror(errno)); exit(-1); } // we should never reach here! exit(EXIT_FAILURE); } int main(int argc, char **argv) { struct clone_args args; args.argv = &argv[1]; int clone_flags = CLONE_NEWNS | CLONE_NEWPID | SIGCHLD; // allocate stack for child char *stack; /* Start of stack buffer */ char *child_stack; /* End of stack buffer */ stack = mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON | MAP_STACK, -1, 0); if (stack == MAP_FAILED) { fprintf(stderr, "mmap failed: %s\n", strerror(errno)); exit(EXIT_FAILURE); } child_stack = stack + STACK_SIZE; /* Assume stack grows downward */ // the result of this call is that our child_exec will be run in another // process returning it's pid pid_t pid = clone(child_exec, child_stack, clone_flags, &args); if (pid < 0) { fprintf(stderr, "clone failed: %s\n", strerror(errno)); exit(EXIT_FAILURE); } // lets wait on our child process here before we, the parent, exits if (waitpid(pid, NULL, 0) == -1) { fprintf(stderr, "failed to wait pid %d\n", pid); exit(EXIT_FAILURE); } exit(EXIT_SUCCESS); } docker-1.10.3/contrib/syscall-test/userns.c000066400000000000000000000032041267010174400206160ustar00rootroot00000000000000#define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #define STACK_SIZE (1024 * 1024) /* Stack size for cloned child */ struct clone_args { char **argv; }; // child_exec is the func that will be executed as the result of clone static int child_exec(void *stuff) { struct clone_args *args = (struct clone_args *)stuff; if (execvp(args->argv[0], args->argv) != 0) { fprintf(stderr, "failed to execvp argments %s\n", strerror(errno)); exit(-1); } // we should never reach here! exit(EXIT_FAILURE); } int main(int argc, char **argv) { struct clone_args args; args.argv = &argv[1]; int clone_flags = CLONE_NEWUSER | SIGCHLD; // allocate stack for child char *stack; /* Start of stack buffer */ char *child_stack; /* End of stack buffer */ stack = mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON | MAP_STACK, -1, 0); if (stack == MAP_FAILED) { fprintf(stderr, "mmap failed: %s\n", strerror(errno)); exit(EXIT_FAILURE); } child_stack = stack + STACK_SIZE; /* Assume stack grows downward */ // the result of this call is that our child_exec will be run in another // process returning it's pid pid_t pid = clone(child_exec, child_stack, clone_flags, &args); if (pid < 0) { fprintf(stderr, "clone failed: %s\n", strerror(errno)); exit(EXIT_FAILURE); } // lets wait on our child process here before we, the parent, exits if (waitpid(pid, NULL, 0) == -1) { fprintf(stderr, "failed to wait pid %d\n", pid); exit(EXIT_FAILURE); } exit(EXIT_SUCCESS); } docker-1.10.3/contrib/udev/000077500000000000000000000000001267010174400154505ustar00rootroot00000000000000docker-1.10.3/contrib/udev/80-docker.rules000066400000000000000000000005271267010174400202240ustar00rootroot00000000000000# hide docker's loopback devices from udisks, and thus from user desktops SUBSYSTEM=="block", ENV{DM_NAME}=="docker-*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1" SUBSYSTEM=="block", DEVPATH=="/devices/virtual/block/loop*", ATTR{loop/backing_file}=="/var/lib/docker/*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1" docker-1.10.3/contrib/vagrant-docker/000077500000000000000000000000001267010174400174145ustar00rootroot00000000000000docker-1.10.3/contrib/vagrant-docker/README.md000066400000000000000000000040001267010174400206650ustar00rootroot00000000000000# Vagrant integration Currently there are at least 4 different projects that we are aware of that deals with integration with [Vagrant](http://vagrantup.com/) at different levels. One approach is to use Docker as a [provisioner](http://docs.vagrantup.com/v2/provisioning/index.html) which means you can create containers and pull base images on VMs using Docker's CLI and the other is to use Docker as a [provider](http://docs.vagrantup.com/v2/providers/index.html), meaning you can use Vagrant to control Docker containers. ### Provisioners * [Vocker](https://github.com/fgrehm/vocker) * [Ventriloquist](https://github.com/fgrehm/ventriloquist) ### Providers * [docker-provider](https://github.com/fgrehm/docker-provider) * [vagrant-shell](https://github.com/destructuring/vagrant-shell) ## Setting up Vagrant-docker with the Remote API The initial Docker upstart script will not work because it runs on `127.0.0.1`, which is not accessible to the host machine. Instead, we need to change the script to connect to `0.0.0.0`. To do this, modify `/etc/init/docker.conf` to look like this: ``` description "Docker daemon" start on filesystem stop on runlevel [!2345] respawn script /usr/bin/docker daemon -H=tcp://0.0.0.0:2375 end script ``` Once that's done, you need to set up a SSH tunnel between your host machine and the vagrant machine that's running Docker. This can be done by running the following command in a host terminal: ``` ssh -L 2375:localhost:2375 -p 2222 vagrant@localhost ``` (The first 2375 is what your host can connect to, the second 2375 is what port Docker is running on in the vagrant machine, and the 2222 is the port Vagrant is providing for SSH. If VirtualBox is the VM you're using, you can see what value "2222" should be by going to: Network > Adapter 1 > Advanced > Port Forwarding in the VirtualBox GUI.) Note that because the port has been changed, to run docker commands from within the command line you must run them like this: ``` sudo docker -H 0.0.0.0:2375 < commands for docker > ``` docker-1.10.3/daemon/000077500000000000000000000000001267010174400143105ustar00rootroot00000000000000docker-1.10.3/daemon/README.md000066400000000000000000000002741267010174400155720ustar00rootroot00000000000000This directory contains code pertaining to running containers and storing images Code pertaining to running containers: - execdriver Code pertaining to storing images: - graphdriver docker-1.10.3/daemon/archive.go000066400000000000000000000240121267010174400162570ustar00rootroot00000000000000package daemon import ( "errors" "io" "os" "path/filepath" "strings" "github.com/docker/docker/container" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/ioutils" "github.com/docker/engine-api/types" ) // ErrExtractPointNotDirectory is used to convey that the operation to extract // a tar archive to a directory in a container has failed because the specified // path does not refer to a directory. var ErrExtractPointNotDirectory = errors.New("extraction point is not a directory") // ContainerCopy performs a deprecated operation of archiving the resource at // the specified path in the container identified by the given name. func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) { container, err := daemon.GetContainer(name) if err != nil { return nil, err } if res[0] == '/' || res[0] == '\\' { res = res[1:] } return daemon.containerCopy(container, res) } // ContainerStatPath stats the filesystem resource at the specified path in the // container identified by the given name. func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) { container, err := daemon.GetContainer(name) if err != nil { return nil, err } return daemon.containerStatPath(container, path) } // ContainerArchivePath creates an archive of the filesystem resource at the // specified path in the container identified by the given name. Returns a // tar archive of the resource and whether it was a directory or a single file. func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { container, err := daemon.GetContainer(name) if err != nil { return nil, nil, err } return daemon.containerArchivePath(container, path) } // ContainerExtractToDir extracts the given archive to the specified location // in the filesystem of the container identified by the given name. The given // path must be of a directory in the container. If it is not, the error will // be ErrExtractPointNotDirectory. If noOverwriteDirNonDir is true then it will // be an error if unpacking the given content would cause an existing directory // to be replaced with a non-directory and vice versa. func (daemon *Daemon) ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error { container, err := daemon.GetContainer(name) if err != nil { return err } return daemon.containerExtractToDir(container, path, noOverwriteDirNonDir, content) } // containerStatPath stats the filesystem resource at the specified path in this // container. Returns stat info about the resource. func (daemon *Daemon) containerStatPath(container *container.Container, path string) (stat *types.ContainerPathStat, err error) { container.Lock() defer container.Unlock() if err = daemon.Mount(container); err != nil { return nil, err } defer daemon.Unmount(container) err = daemon.mountVolumes(container) defer container.UnmountVolumes(true, daemon.LogVolumeEvent) if err != nil { return nil, err } resolvedPath, absPath, err := container.ResolvePath(path) if err != nil { return nil, err } return container.StatPath(resolvedPath, absPath) } // containerArchivePath creates an archive of the filesystem resource at the specified // path in this container. Returns a tar archive of the resource and stat info // about the resource. func (daemon *Daemon) containerArchivePath(container *container.Container, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { container.Lock() defer func() { if err != nil { // Wait to unlock the container until the archive is fully read // (see the ReadCloseWrapper func below) or if there is an error // before that occurs. container.Unlock() } }() if err = daemon.Mount(container); err != nil { return nil, nil, err } defer func() { if err != nil { // unmount any volumes container.UnmountVolumes(true, daemon.LogVolumeEvent) // unmount the container's rootfs daemon.Unmount(container) } }() if err = daemon.mountVolumes(container); err != nil { return nil, nil, err } resolvedPath, absPath, err := container.ResolvePath(path) if err != nil { return nil, nil, err } stat, err = container.StatPath(resolvedPath, absPath) if err != nil { return nil, nil, err } // We need to rebase the archive entries if the last element of the // resolved path was a symlink that was evaluated and is now different // than the requested path. For example, if the given path was "/foo/bar/", // but it resolved to "/var/lib/docker/containers/{id}/foo/baz/", we want // to ensure that the archive entries start with "bar" and not "baz". This // also catches the case when the root directory of the container is // requested: we want the archive entries to start with "/" and not the // container ID. data, err := archive.TarResourceRebase(resolvedPath, filepath.Base(absPath)) if err != nil { return nil, nil, err } content = ioutils.NewReadCloserWrapper(data, func() error { err := data.Close() container.UnmountVolumes(true, daemon.LogVolumeEvent) daemon.Unmount(container) container.Unlock() return err }) daemon.LogContainerEvent(container, "archive-path") return content, stat, nil } // containerExtractToDir extracts the given tar archive to the specified location in the // filesystem of this container. The given path must be of a directory in the // container. If it is not, the error will be ErrExtractPointNotDirectory. If // noOverwriteDirNonDir is true then it will be an error if unpacking the // given content would cause an existing directory to be replaced with a non- // directory and vice versa. func (daemon *Daemon) containerExtractToDir(container *container.Container, path string, noOverwriteDirNonDir bool, content io.Reader) (err error) { container.Lock() defer container.Unlock() if err = daemon.Mount(container); err != nil { return err } defer daemon.Unmount(container) err = daemon.mountVolumes(container) defer container.UnmountVolumes(true, daemon.LogVolumeEvent) if err != nil { return err } // The destination path needs to be resolved to a host path, with all // symbolic links followed in the scope of the container's rootfs. Note // that we do not use `container.ResolvePath(path)` here because we need // to also evaluate the last path element if it is a symlink. This is so // that you can extract an archive to a symlink that points to a directory. // Consider the given path as an absolute path in the container. absPath := archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) // This will evaluate the last path element if it is a symlink. resolvedPath, err := container.GetResourcePath(absPath) if err != nil { return err } stat, err := os.Lstat(resolvedPath) if err != nil { return err } if !stat.IsDir() { return ErrExtractPointNotDirectory } // Need to check if the path is in a volume. If it is, it cannot be in a // read-only volume. If it is not in a volume, the container cannot be // configured with a read-only rootfs. // Use the resolved path relative to the container rootfs as the new // absPath. This way we fully follow any symlinks in a volume that may // lead back outside the volume. // // The Windows implementation of filepath.Rel in golang 1.4 does not // support volume style file path semantics. On Windows when using the // filter driver, we are guaranteed that the path will always be // a volume file path. var baseRel string if strings.HasPrefix(resolvedPath, `\\?\Volume{`) { if strings.HasPrefix(resolvedPath, container.BaseFS) { baseRel = resolvedPath[len(container.BaseFS):] if baseRel[:1] == `\` { baseRel = baseRel[1:] } } } else { baseRel, err = filepath.Rel(container.BaseFS, resolvedPath) } if err != nil { return err } // Make it an absolute path. absPath = filepath.Join(string(filepath.Separator), baseRel) toVolume, err := checkIfPathIsInAVolume(container, absPath) if err != nil { return err } if !toVolume && container.HostConfig.ReadonlyRootfs { return ErrRootFSReadOnly } uid, gid := daemon.GetRemappedUIDGID() options := &archive.TarOptions{ NoOverwriteDirNonDir: noOverwriteDirNonDir, ChownOpts: &archive.TarChownOptions{ UID: uid, GID: gid, // TODO: should all ownership be set to root (either real or remapped)? }, } if err := chrootarchive.Untar(content, resolvedPath, options); err != nil { return err } daemon.LogContainerEvent(container, "extract-to-dir") return nil } func (daemon *Daemon) containerCopy(container *container.Container, resource string) (rc io.ReadCloser, err error) { container.Lock() defer func() { if err != nil { // Wait to unlock the container until the archive is fully read // (see the ReadCloseWrapper func below) or if there is an error // before that occurs. container.Unlock() } }() if err := daemon.Mount(container); err != nil { return nil, err } defer func() { if err != nil { // unmount any volumes container.UnmountVolumes(true, daemon.LogVolumeEvent) // unmount the container's rootfs daemon.Unmount(container) } }() if err := daemon.mountVolumes(container); err != nil { return nil, err } basePath, err := container.GetResourcePath(resource) if err != nil { return nil, err } stat, err := os.Stat(basePath) if err != nil { return nil, err } var filter []string if !stat.IsDir() { d, f := filepath.Split(basePath) basePath = d filter = []string{f} } else { filter = []string{filepath.Base(basePath)} basePath = filepath.Dir(basePath) } archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{ Compression: archive.Uncompressed, IncludeFiles: filter, }) if err != nil { return nil, err } reader := ioutils.NewReadCloserWrapper(archive, func() error { err := archive.Close() container.UnmountVolumes(true, daemon.LogVolumeEvent) daemon.Unmount(container) container.Unlock() return err }) daemon.LogContainerEvent(container, "copy") return reader, nil } docker-1.10.3/daemon/archive_unix.go000066400000000000000000000011121267010174400173160ustar00rootroot00000000000000// +build !windows package daemon import "github.com/docker/docker/container" // checkIfPathIsInAVolume checks if the path is in a volume. If it is, it // cannot be in a read-only volume. If it is not in a volume, the container // cannot be configured with a read-only rootfs. func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) { var toVolume bool for _, mnt := range container.MountPoints { if toVolume = mnt.HasResource(absPath); toVolume { if mnt.RW { break } return false, ErrVolumeReadonly } } return toVolume, nil } docker-1.10.3/daemon/archive_windows.go000066400000000000000000000010211267010174400200240ustar00rootroot00000000000000package daemon import "github.com/docker/docker/container" // checkIfPathIsInAVolume checks if the path is in a volume. If it is, it // cannot be in a read-only volume. If it is not in a volume, the container // cannot be configured with a read-only rootfs. // // This is a no-op on Windows which does not support read-only volumes, or // extracting to a mount point inside a volume. TODO Windows: FIXME Post-TP4 func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) { return false, nil } docker-1.10.3/daemon/attach.go000066400000000000000000000101361267010174400161040ustar00rootroot00000000000000package daemon import ( "fmt" "io" "net/http" "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/container" "github.com/docker/docker/daemon/logger" derr "github.com/docker/docker/errors" "github.com/docker/docker/pkg/stdcopy" ) // ContainerAttachWithLogsConfig holds the streams to use when connecting to a container to view logs. type ContainerAttachWithLogsConfig struct { Hijacker http.Hijacker Upgrade bool UseStdin bool UseStdout bool UseStderr bool Logs bool Stream bool DetachKeys []byte } // ContainerAttachWithLogs attaches to logs according to the config passed in. See ContainerAttachWithLogsConfig. func (daemon *Daemon) ContainerAttachWithLogs(prefixOrName string, c *ContainerAttachWithLogsConfig) error { if c.Hijacker == nil { return derr.ErrorCodeNoHijackConnection.WithArgs(prefixOrName) } container, err := daemon.GetContainer(prefixOrName) if err != nil { return derr.ErrorCodeNoSuchContainer.WithArgs(prefixOrName) } if container.IsPaused() { return derr.ErrorCodePausedContainer.WithArgs(prefixOrName) } conn, _, err := c.Hijacker.Hijack() if err != nil { return err } defer conn.Close() // Flush the options to make sure the client sets the raw mode conn.Write([]byte{}) inStream := conn.(io.ReadCloser) outStream := conn.(io.Writer) if c.Upgrade { fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") } else { fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") } var errStream io.Writer if !container.Config.Tty { errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } else { errStream = outStream } var stdin io.ReadCloser var stdout, stderr io.Writer if c.UseStdin { stdin = inStream } if c.UseStdout { stdout = outStream } if c.UseStderr { stderr = errStream } if err := daemon.attachWithLogs(container, stdin, stdout, stderr, c.Logs, c.Stream, c.DetachKeys); err != nil { fmt.Fprintf(outStream, "Error attaching: %s\n", err) } return nil } // ContainerWsAttachWithLogsConfig attach with websockets, since all // stream data is delegated to the websocket to handle there. type ContainerWsAttachWithLogsConfig struct { InStream io.ReadCloser OutStream, ErrStream io.Writer Logs, Stream bool DetachKeys []byte } // ContainerWsAttachWithLogs websocket connection func (daemon *Daemon) ContainerWsAttachWithLogs(prefixOrName string, c *ContainerWsAttachWithLogsConfig) error { container, err := daemon.GetContainer(prefixOrName) if err != nil { return err } return daemon.attachWithLogs(container, c.InStream, c.OutStream, c.ErrStream, c.Logs, c.Stream, c.DetachKeys) } func (daemon *Daemon) attachWithLogs(container *container.Container, stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool, keys []byte) error { if logs { logDriver, err := daemon.getLogger(container) if err != nil { return err } cLog, ok := logDriver.(logger.LogReader) if !ok { return logger.ErrReadLogsNotSupported } logs := cLog.ReadLogs(logger.ReadConfig{Tail: -1}) LogLoop: for { select { case msg, ok := <-logs.Msg: if !ok { break LogLoop } if msg.Source == "stdout" && stdout != nil { stdout.Write(msg.Line) } if msg.Source == "stderr" && stderr != nil { stderr.Write(msg.Line) } case err := <-logs.Err: logrus.Errorf("Error streaming logs: %v", err) break LogLoop } } } daemon.LogContainerEvent(container, "attach") //stream if stream { var stdinPipe io.ReadCloser if stdin != nil { r, w := io.Pipe() go func() { defer w.Close() defer logrus.Debugf("Closing buffered stdin pipe") io.Copy(w, stdin) }() stdinPipe = r } <-container.Attach(stdinPipe, stdout, stderr, keys) // If we are in stdinonce mode, wait for the process to end // otherwise, simply return if container.Config.StdinOnce && !container.Config.Tty { container.WaitStop(-1 * time.Second) } } return nil } docker-1.10.3/daemon/changes.go000066400000000000000000000005561267010174400162550ustar00rootroot00000000000000package daemon import "github.com/docker/docker/pkg/archive" // ContainerChanges returns a list of container fs changes func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) { container, err := daemon.GetContainer(name) if err != nil { return nil, err } container.Lock() defer container.Unlock() return daemon.changes(container) } docker-1.10.3/daemon/commit.go000066400000000000000000000125401267010174400161310ustar00rootroot00000000000000package daemon import ( "encoding/json" "fmt" "runtime" "strings" "time" "github.com/docker/docker/container" "github.com/docker/docker/dockerversion" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/reference" "github.com/docker/engine-api/types" containertypes "github.com/docker/engine-api/types/container" "github.com/docker/go-connections/nat" ) // merge merges two Config, the image container configuration (defaults values), // and the user container configuration, either passed by the API or generated // by the cli. // It will mutate the specified user configuration (userConf) with the image // configuration where the user configuration is incomplete. func merge(userConf, imageConf *containertypes.Config) error { if userConf.User == "" { userConf.User = imageConf.User } if len(userConf.ExposedPorts) == 0 { userConf.ExposedPorts = imageConf.ExposedPorts } else if imageConf.ExposedPorts != nil { if userConf.ExposedPorts == nil { userConf.ExposedPorts = make(nat.PortSet) } for port := range imageConf.ExposedPorts { if _, exists := userConf.ExposedPorts[port]; !exists { userConf.ExposedPorts[port] = struct{}{} } } } if len(userConf.Env) == 0 { userConf.Env = imageConf.Env } else { for _, imageEnv := range imageConf.Env { found := false imageEnvKey := strings.Split(imageEnv, "=")[0] for _, userEnv := range userConf.Env { userEnvKey := strings.Split(userEnv, "=")[0] if imageEnvKey == userEnvKey { found = true break } } if !found { userConf.Env = append(userConf.Env, imageEnv) } } } if userConf.Labels == nil { userConf.Labels = map[string]string{} } if imageConf.Labels != nil { for l := range userConf.Labels { imageConf.Labels[l] = userConf.Labels[l] } userConf.Labels = imageConf.Labels } if userConf.Entrypoint.Len() == 0 { if userConf.Cmd.Len() == 0 { userConf.Cmd = imageConf.Cmd } if userConf.Entrypoint == nil { userConf.Entrypoint = imageConf.Entrypoint } } if userConf.WorkingDir == "" { userConf.WorkingDir = imageConf.WorkingDir } if len(userConf.Volumes) == 0 { userConf.Volumes = imageConf.Volumes } else { for k, v := range imageConf.Volumes { userConf.Volumes[k] = v } } return nil } // Commit creates a new filesystem image from the current state of a container. // The image can optionally be tagged into a repository. func (daemon *Daemon) Commit(name string, c *types.ContainerCommitConfig) (string, error) { container, err := daemon.GetContainer(name) if err != nil { return "", err } // It is not possible to commit a running container on Windows if runtime.GOOS == "windows" && container.IsRunning() { return "", fmt.Errorf("Windows does not support commit of a running container") } if c.Pause && !container.IsPaused() { daemon.containerPause(container) defer daemon.containerUnpause(container) } if c.MergeConfigs { if err := merge(c.Config, container.Config); err != nil { return "", err } } rwTar, err := daemon.exportContainerRw(container) if err != nil { return "", err } defer func() { if rwTar != nil { rwTar.Close() } }() var history []image.History rootFS := image.NewRootFS() if container.ImageID != "" { img, err := daemon.imageStore.Get(container.ImageID) if err != nil { return "", err } history = img.History rootFS = img.RootFS } l, err := daemon.layerStore.Register(rwTar, rootFS.ChainID()) if err != nil { return "", err } defer layer.ReleaseAndLog(daemon.layerStore, l) h := image.History{ Author: c.Author, Created: time.Now().UTC(), CreatedBy: strings.Join(container.Config.Cmd.Slice(), " "), Comment: c.Comment, EmptyLayer: true, } if diffID := l.DiffID(); layer.DigestSHA256EmptyTar != diffID { h.EmptyLayer = false rootFS.Append(diffID) } history = append(history, h) config, err := json.Marshal(&image.Image{ V1Image: image.V1Image{ DockerVersion: dockerversion.Version, Config: c.Config, Architecture: runtime.GOARCH, OS: runtime.GOOS, Container: container.ID, ContainerConfig: *container.Config, Author: c.Author, Created: h.Created, }, RootFS: rootFS, History: history, }) if err != nil { return "", err } id, err := daemon.imageStore.Create(config) if err != nil { return "", err } if container.ImageID != "" { if err := daemon.imageStore.SetParent(id, container.ImageID); err != nil { return "", err } } if c.Repo != "" { newTag, err := reference.WithName(c.Repo) // todo: should move this to API layer if err != nil { return "", err } if c.Tag != "" { if newTag, err = reference.WithTag(newTag, c.Tag); err != nil { return "", err } } if err := daemon.TagImage(newTag, id.String()); err != nil { return "", err } } daemon.LogContainerEvent(container, "commit") return id.String(), nil } func (daemon *Daemon) exportContainerRw(container *container.Container) (archive.Archive, error) { if err := daemon.Mount(container); err != nil { return nil, err } archive, err := container.RWLayer.TarStream() if err != nil { return nil, err } return ioutils.NewReadCloserWrapper(archive, func() error { archive.Close() return container.RWLayer.Unmount() }), nil } docker-1.10.3/daemon/config.go000066400000000000000000000317351267010174400161150ustar00rootroot00000000000000package daemon import ( "bytes" "encoding/json" "fmt" "io" "io/ioutil" "strings" "sync" "github.com/Sirupsen/logrus" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/discovery" flag "github.com/docker/docker/pkg/mflag" "github.com/imdario/mergo" ) const ( defaultNetworkMtu = 1500 disableNetworkBridge = "none" ) // flatOptions contains configuration keys // that MUST NOT be parsed as deep structures. // Use this to differentiate these options // with others like the ones in CommonTLSOptions. var flatOptions = map[string]bool{ "cluster-store-opts": true, "log-opts": true, } // LogConfig represents the default log configuration. // It includes json tags to deserialize configuration from a file // using the same names that the flags in the command line uses. type LogConfig struct { Type string `json:"log-driver,omitempty"` Config map[string]string `json:"log-opts,omitempty"` } // CommonTLSOptions defines TLS configuration for the daemon server. // It includes json tags to deserialize configuration from a file // using the same names that the flags in the command line uses. type CommonTLSOptions struct { CAFile string `json:"tlscacert,omitempty"` CertFile string `json:"tlscert,omitempty"` KeyFile string `json:"tlskey,omitempty"` } // CommonConfig defines the configuration of a docker daemon which are // common across platforms. // It includes json tags to deserialize configuration from a file // using the same names that the flags in the command line uses. type CommonConfig struct { AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins AutoRestart bool `json:"-"` Context map[string][]string `json:"-"` DisableBridge bool `json:"-"` DNS []string `json:"dns,omitempty"` DNSOptions []string `json:"dns-opts,omitempty"` DNSSearch []string `json:"dns-search,omitempty"` ExecOptions []string `json:"exec-opts,omitempty"` ExecRoot string `json:"exec-root,omitempty"` GraphDriver string `json:"storage-driver,omitempty"` GraphOptions []string `json:"storage-opts,omitempty"` Labels []string `json:"labels,omitempty"` Mtu int `json:"mtu,omitempty"` Pidfile string `json:"pidfile,omitempty"` Root string `json:"graph,omitempty"` TrustKeyPath string `json:"-"` // ClusterStore is the storage backend used for the cluster information. It is used by both // multihost networking (to store networks and endpoints information) and by the node discovery // mechanism. ClusterStore string `json:"cluster-store,omitempty"` // ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such // as TLS configuration settings. ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"` // ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node // discovery. This should be a 'host:port' combination on which that daemon instance is // reachable by other hosts. ClusterAdvertise string `json:"cluster-advertise,omitempty"` Debug bool `json:"debug,omitempty"` Hosts []string `json:"hosts,omitempty"` LogLevel string `json:"log-level,omitempty"` TLS bool `json:"tls,omitempty"` TLSVerify bool `json:"tlsverify,omitempty"` // Embedded structs that allow config // deserialization without the full struct. CommonTLSOptions LogConfig bridgeConfig // bridgeConfig holds bridge network specific configuration. reloadLock sync.Mutex valuesSet map[string]interface{} } // InstallCommonFlags adds command-line options to the top-level flag parser for // the current process. // Subsequent calls to `flag.Parse` will populate config with values parsed // from the command-line. func (config *Config) InstallCommonFlags(cmd *flag.FlagSet, usageFn func(string) string) { cmd.Var(opts.NewNamedListOptsRef("storage-opts", &config.GraphOptions, nil), []string{"-storage-opt"}, usageFn("Set storage driver options")) cmd.Var(opts.NewNamedListOptsRef("authorization-plugins", &config.AuthorizationPlugins, nil), []string{"-authorization-plugin"}, usageFn("List authorization plugins in order from first evaluator to last")) cmd.Var(opts.NewNamedListOptsRef("exec-opts", &config.ExecOptions, nil), []string{"-exec-opt"}, usageFn("Set exec driver options")) cmd.StringVar(&config.Pidfile, []string{"p", "-pidfile"}, defaultPidFile, usageFn("Path to use for daemon PID file")) cmd.StringVar(&config.Root, []string{"g", "-graph"}, defaultGraph, usageFn("Root of the Docker runtime")) cmd.StringVar(&config.ExecRoot, []string{"-exec-root"}, "/var/run/docker", usageFn("Root of the Docker execdriver")) cmd.BoolVar(&config.AutoRestart, []string{"#r", "#-restart"}, true, usageFn("--restart on the daemon has been deprecated in favor of --restart policies on docker run")) cmd.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", usageFn("Storage driver to use")) cmd.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, usageFn("Set the containers network MTU")) // FIXME: why the inconsistency between "hosts" and "sockets"? cmd.Var(opts.NewListOptsRef(&config.DNS, opts.ValidateIPAddress), []string{"#dns", "-dns"}, usageFn("DNS server to use")) cmd.Var(opts.NewNamedListOptsRef("dns-opts", &config.DNSOptions, nil), []string{"-dns-opt"}, usageFn("DNS options to use")) cmd.Var(opts.NewListOptsRef(&config.DNSSearch, opts.ValidateDNSSearch), []string{"-dns-search"}, usageFn("DNS search domains to use")) cmd.Var(opts.NewNamedListOptsRef("labels", &config.Labels, opts.ValidateLabel), []string{"-label"}, usageFn("Set key=value labels to the daemon")) cmd.StringVar(&config.LogConfig.Type, []string{"-log-driver"}, "json-file", usageFn("Default driver for container logs")) cmd.Var(opts.NewNamedMapOpts("log-opts", config.LogConfig.Config, nil), []string{"-log-opt"}, usageFn("Set log driver options")) cmd.StringVar(&config.ClusterAdvertise, []string{"-cluster-advertise"}, "", usageFn("Address or interface name to advertise")) cmd.StringVar(&config.ClusterStore, []string{"-cluster-store"}, "", usageFn("Set the cluster store")) cmd.Var(opts.NewNamedMapOpts("cluster-store-opts", config.ClusterOpts, nil), []string{"-cluster-store-opt"}, usageFn("Set cluster store options")) } // IsValueSet returns true if a configuration value // was explicitly set in the configuration file. func (config *Config) IsValueSet(name string) bool { if config.valuesSet == nil { return false } _, ok := config.valuesSet[name] return ok } func parseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) { if clusterAdvertise == "" { return "", errDiscoveryDisabled } if clusterStore == "" { return "", fmt.Errorf("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration") } advertise, err := discovery.ParseAdvertise(clusterAdvertise) if err != nil { return "", fmt.Errorf("discovery advertise parsing failed (%v)", err) } return advertise, nil } // ReloadConfiguration reads the configuration in the host and reloads the daemon and server. func ReloadConfiguration(configFile string, flags *flag.FlagSet, reload func(*Config)) error { logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile) newConfig, err := getConflictFreeConfiguration(configFile, flags) if err != nil { return err } reload(newConfig) return nil } // boolValue is an interface that boolean value flags implement // to tell the command line how to make -name equivalent to -name=true. type boolValue interface { IsBoolFlag() bool } // MergeDaemonConfigurations reads a configuration file, // loads the file configuration in an isolated structure, // and merges the configuration provided from flags on top // if there are no conflicts. func MergeDaemonConfigurations(flagsConfig *Config, flags *flag.FlagSet, configFile string) (*Config, error) { fileConfig, err := getConflictFreeConfiguration(configFile, flags) if err != nil { return nil, err } // merge flags configuration on top of the file configuration if err := mergo.Merge(fileConfig, flagsConfig); err != nil { return nil, err } return fileConfig, nil } // getConflictFreeConfiguration loads the configuration from a JSON file. // It compares that configuration with the one provided by the flags, // and returns an error if there are conflicts. func getConflictFreeConfiguration(configFile string, flags *flag.FlagSet) (*Config, error) { b, err := ioutil.ReadFile(configFile) if err != nil { return nil, err } var config Config var reader io.Reader if flags != nil { var jsonConfig map[string]interface{} reader = bytes.NewReader(b) if err := json.NewDecoder(reader).Decode(&jsonConfig); err != nil { return nil, err } configSet := configValuesSet(jsonConfig) if err := findConfigurationConflicts(configSet, flags); err != nil { return nil, err } // Override flag values to make sure the values set in the config file with nullable values, like `false`, // are not overriden by default truthy values from the flags that were not explicitly set. // See https://github.com/docker/docker/issues/20289 for an example. // // TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers. namedOptions := make(map[string]interface{}) for key, value := range configSet { f := flags.Lookup("-" + key) if f == nil { // ignore named flags that don't match namedOptions[key] = value continue } if _, ok := f.Value.(boolValue); ok { f.Value.Set(fmt.Sprintf("%v", value)) } } if len(namedOptions) > 0 { // set also default for mergeVal flags that are boolValue at the same time. flags.VisitAll(func(f *flag.Flag) { if opt, named := f.Value.(opts.NamedOption); named { v, set := namedOptions[opt.Name()] _, boolean := f.Value.(boolValue) if set && boolean { f.Value.Set(fmt.Sprintf("%v", v)) } } }) } config.valuesSet = configSet } reader = bytes.NewReader(b) err = json.NewDecoder(reader).Decode(&config) return &config, err } // configValuesSet returns the configuration values explicitly set in the file. func configValuesSet(config map[string]interface{}) map[string]interface{} { flatten := make(map[string]interface{}) for k, v := range config { if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] { for km, vm := range m { flatten[km] = vm } continue } flatten[k] = v } return flatten } // findConfigurationConflicts iterates over the provided flags searching for // duplicated configurations and unknown keys. It returns an error with all the conflicts if // it finds any. func findConfigurationConflicts(config map[string]interface{}, flags *flag.FlagSet) error { // 1. Search keys from the file that we don't recognize as flags. unknownKeys := make(map[string]interface{}) for key, value := range config { flagName := "-" + key if flag := flags.Lookup(flagName); flag == nil { unknownKeys[key] = value } } // 2. Discard values that implement NamedOption. // Their configuration name differs from their flag name, like `labels` and `label`. if len(unknownKeys) > 0 { unknownNamedConflicts := func(f *flag.Flag) { if namedOption, ok := f.Value.(opts.NamedOption); ok { if _, valid := unknownKeys[namedOption.Name()]; valid { delete(unknownKeys, namedOption.Name()) } } } flags.VisitAll(unknownNamedConflicts) } if len(unknownKeys) > 0 { var unknown []string for key := range unknownKeys { unknown = append(unknown, key) } return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", ")) } var conflicts []string printConflict := func(name string, flagValue, fileValue interface{}) string { return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue) } // 3. Search keys that are present as a flag and as a file option. duplicatedConflicts := func(f *flag.Flag) { // search option name in the json configuration payload if the value is a named option if namedOption, ok := f.Value.(opts.NamedOption); ok { if optsValue, ok := config[namedOption.Name()]; ok { conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue)) } } else { // search flag name in the json configuration payload without trailing dashes for _, name := range f.Names { name = strings.TrimLeft(name, "-") if value, ok := config[name]; ok { conflicts = append(conflicts, printConflict(name, f.Value.String(), value)) break } } } } flags.Visit(duplicatedConflicts) if len(conflicts) > 0 { return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", ")) } return nil } docker-1.10.3/daemon/config_experimental.go000066400000000000000000000002741267010174400206640ustar00rootroot00000000000000// +build experimental package daemon import flag "github.com/docker/docker/pkg/mflag" func (config *Config) attachExperimentalFlags(cmd *flag.FlagSet, usageFn func(string) string) { } docker-1.10.3/daemon/config_stub.go000066400000000000000000000002751267010174400171450ustar00rootroot00000000000000// +build !experimental package daemon import flag "github.com/docker/docker/pkg/mflag" func (config *Config) attachExperimentalFlags(cmd *flag.FlagSet, usageFn func(string) string) { } docker-1.10.3/daemon/config_test.go000066400000000000000000000132011267010174400171400ustar00rootroot00000000000000package daemon import ( "io/ioutil" "os" "strings" "testing" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/mflag" ) func TestDaemonConfigurationMerge(t *testing.T) { f, err := ioutil.TempFile("", "docker-config-") if err != nil { t.Fatal(err) } configFile := f.Name() f.Write([]byte(`{"debug": true}`)) f.Close() c := &Config{ CommonConfig: CommonConfig{ AutoRestart: true, LogConfig: LogConfig{ Type: "syslog", Config: map[string]string{"tag": "test"}, }, }, } cc, err := MergeDaemonConfigurations(c, nil, configFile) if err != nil { t.Fatal(err) } if !cc.Debug { t.Fatalf("expected %v, got %v\n", true, cc.Debug) } if !cc.AutoRestart { t.Fatalf("expected %v, got %v\n", true, cc.AutoRestart) } if cc.LogConfig.Type != "syslog" { t.Fatalf("expected syslog config, got %q\n", cc.LogConfig) } } func TestDaemonConfigurationNotFound(t *testing.T) { _, err := MergeDaemonConfigurations(&Config{}, nil, "/tmp/foo-bar-baz-docker") if err == nil || !os.IsNotExist(err) { t.Fatalf("expected does not exist error, got %v", err) } } func TestDaemonBrokenConfiguration(t *testing.T) { f, err := ioutil.TempFile("", "docker-config-") if err != nil { t.Fatal(err) } configFile := f.Name() f.Write([]byte(`{"Debug": tru`)) f.Close() _, err = MergeDaemonConfigurations(&Config{}, nil, configFile) if err == nil { t.Fatalf("expected error, got %v", err) } } func TestParseClusterAdvertiseSettings(t *testing.T) { _, err := parseClusterAdvertiseSettings("something", "") if err != errDiscoveryDisabled { t.Fatalf("expected discovery disabled error, got %v\n", err) } _, err = parseClusterAdvertiseSettings("", "something") if err == nil { t.Fatalf("expected discovery store error, got %v\n", err) } _, err = parseClusterAdvertiseSettings("etcd", "127.0.0.1:8080") if err != nil { t.Fatal(err) } } func TestFindConfigurationConflicts(t *testing.T) { config := map[string]interface{}{"authorization-plugins": "foobar"} flags := mflag.NewFlagSet("test", mflag.ContinueOnError) flags.String([]string{"-authorization-plugins"}, "", "") if err := flags.Set("-authorization-plugins", "asdf"); err != nil { t.Fatal(err) } err := findConfigurationConflicts(config, flags) if err == nil { t.Fatal("expected error, got nil") } if !strings.Contains(err.Error(), "authorization-plugins: (from flag: asdf, from file: foobar)") { t.Fatalf("expected authorization-plugins conflict, got %v", err) } } func TestFindConfigurationConflictsWithNamedOptions(t *testing.T) { config := map[string]interface{}{"hosts": []string{"qwer"}} flags := mflag.NewFlagSet("test", mflag.ContinueOnError) var hosts []string flags.Var(opts.NewNamedListOptsRef("hosts", &hosts, opts.ValidateHost), []string{"H", "-host"}, "Daemon socket(s) to connect to") if err := flags.Set("-host", "tcp://127.0.0.1:4444"); err != nil { t.Fatal(err) } if err := flags.Set("H", "unix:///var/run/docker.sock"); err != nil { t.Fatal(err) } err := findConfigurationConflicts(config, flags) if err == nil { t.Fatal("expected error, got nil") } if !strings.Contains(err.Error(), "hosts") { t.Fatalf("expected hosts conflict, got %v", err) } } func TestDaemonConfigurationMergeConflicts(t *testing.T) { f, err := ioutil.TempFile("", "docker-config-") if err != nil { t.Fatal(err) } configFile := f.Name() f.Write([]byte(`{"debug": true}`)) f.Close() flags := mflag.NewFlagSet("test", mflag.ContinueOnError) flags.Bool([]string{"debug"}, false, "") flags.Set("debug", "false") _, err = MergeDaemonConfigurations(&Config{}, flags, configFile) if err == nil { t.Fatal("expected error, got nil") } if !strings.Contains(err.Error(), "debug") { t.Fatalf("expected debug conflict, got %v", err) } } func TestDaemonConfigurationMergeConflictsWithInnerStructs(t *testing.T) { f, err := ioutil.TempFile("", "docker-config-") if err != nil { t.Fatal(err) } configFile := f.Name() f.Write([]byte(`{"tlscacert": "/etc/certificates/ca.pem"}`)) f.Close() flags := mflag.NewFlagSet("test", mflag.ContinueOnError) flags.String([]string{"tlscacert"}, "", "") flags.Set("tlscacert", "~/.docker/ca.pem") _, err = MergeDaemonConfigurations(&Config{}, flags, configFile) if err == nil { t.Fatal("expected error, got nil") } if !strings.Contains(err.Error(), "tlscacert") { t.Fatalf("expected tlscacert conflict, got %v", err) } } func TestFindConfigurationConflictsWithUnknownKeys(t *testing.T) { config := map[string]interface{}{"tls-verify": "true"} flags := mflag.NewFlagSet("test", mflag.ContinueOnError) flags.Bool([]string{"-tlsverify"}, false, "") err := findConfigurationConflicts(config, flags) if err == nil { t.Fatal("expected error, got nil") } if !strings.Contains(err.Error(), "the following directives don't match any configuration option: tls-verify") { t.Fatalf("expected tls-verify conflict, got %v", err) } } func TestFindConfigurationConflictsWithMergedValues(t *testing.T) { var hosts []string config := map[string]interface{}{"hosts": "tcp://127.0.0.1:2345"} base := mflag.NewFlagSet("base", mflag.ContinueOnError) base.Var(opts.NewNamedListOptsRef("hosts", &hosts, nil), []string{"H", "-host"}, "") flags := mflag.NewFlagSet("test", mflag.ContinueOnError) mflag.Merge(flags, base) err := findConfigurationConflicts(config, flags) if err != nil { t.Fatal(err) } flags.Set("-host", "unix:///var/run/docker.sock") err = findConfigurationConflicts(config, flags) if err == nil { t.Fatal("expected error, got nil") } if !strings.Contains(err.Error(), "hosts: (from flag: [unix:///var/run/docker.sock], from file: tcp://127.0.0.1:2345)") { t.Fatalf("expected hosts conflict, got %v", err) } } docker-1.10.3/daemon/config_unix.go000066400000000000000000000121471267010174400171540ustar00rootroot00000000000000// +build linux freebsd package daemon import ( "net" "github.com/docker/docker/opts" flag "github.com/docker/docker/pkg/mflag" runconfigopts "github.com/docker/docker/runconfig/opts" "github.com/docker/go-units" ) var ( defaultPidFile = "/var/run/docker.pid" defaultGraph = "/var/lib/docker" defaultExec = "native" ) // Config defines the configuration of a docker daemon. // It includes json tags to deserialize configuration from a file // using the same names that the flags in the command line uses. type Config struct { CommonConfig // Fields below here are platform specific. CorsHeaders string `json:"api-cors-headers,omitempty"` EnableCors bool `json:"api-enable-cors,omitempty"` EnableSelinuxSupport bool `json:"selinux-enabled,omitempty"` RemappedRoot string `json:"userns-remap,omitempty"` SocketGroup string `json:"group,omitempty"` CgroupParent string `json:"cgroup-parent,omitempty"` Ulimits map[string]*units.Ulimit `json:"default-ulimits,omitempty"` } // bridgeConfig stores all the bridge driver specific // configuration. type bridgeConfig struct { EnableIPv6 bool `json:"ipv6,omitempty"` EnableIPTables bool `json:"iptables,omitempty"` EnableIPForward bool `json:"ip-forward,omitempty"` EnableIPMasq bool `json:"ip-mask,omitempty"` EnableUserlandProxy bool `json:"userland-proxy,omitempty"` DefaultIP net.IP `json:"ip,omitempty"` Iface string `json:"bridge,omitempty"` IP string `json:"bip,omitempty"` FixedCIDR string `json:"fixed-cidr,omitempty"` FixedCIDRv6 string `json:"fixed-cidr-v6,omitempty"` DefaultGatewayIPv4 net.IP `json:"default-gateway,omitempty"` DefaultGatewayIPv6 net.IP `json:"default-gateway-v6,omitempty"` InterContainerCommunication bool `json:"icc,omitempty"` } // InstallFlags adds command-line options to the top-level flag parser for // the current process. // Subsequent calls to `flag.Parse` will populate config with values parsed // from the command-line. func (config *Config) InstallFlags(cmd *flag.FlagSet, usageFn func(string) string) { // First handle install flags which are consistent cross-platform config.InstallCommonFlags(cmd, usageFn) // Then platform-specific install flags cmd.BoolVar(&config.EnableSelinuxSupport, []string{"-selinux-enabled"}, false, usageFn("Enable selinux support")) cmd.StringVar(&config.SocketGroup, []string{"G", "-group"}, "docker", usageFn("Group for the unix socket")) config.Ulimits = make(map[string]*units.Ulimit) cmd.Var(runconfigopts.NewUlimitOpt(&config.Ulimits), []string{"-default-ulimit"}, usageFn("Set default ulimits for containers")) cmd.BoolVar(&config.bridgeConfig.EnableIPTables, []string{"#iptables", "-iptables"}, true, usageFn("Enable addition of iptables rules")) cmd.BoolVar(&config.bridgeConfig.EnableIPForward, []string{"#ip-forward", "-ip-forward"}, true, usageFn("Enable net.ipv4.ip_forward")) cmd.BoolVar(&config.bridgeConfig.EnableIPMasq, []string{"-ip-masq"}, true, usageFn("Enable IP masquerading")) cmd.BoolVar(&config.bridgeConfig.EnableIPv6, []string{"-ipv6"}, false, usageFn("Enable IPv6 networking")) cmd.StringVar(&config.bridgeConfig.IP, []string{"#bip", "-bip"}, "", usageFn("Specify network bridge IP")) cmd.StringVar(&config.bridgeConfig.Iface, []string{"b", "-bridge"}, "", usageFn("Attach containers to a network bridge")) cmd.StringVar(&config.bridgeConfig.FixedCIDR, []string{"-fixed-cidr"}, "", usageFn("IPv4 subnet for fixed IPs")) cmd.StringVar(&config.bridgeConfig.FixedCIDRv6, []string{"-fixed-cidr-v6"}, "", usageFn("IPv6 subnet for fixed IPs")) cmd.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultGatewayIPv4, ""), []string{"-default-gateway"}, usageFn("Container default gateway IPv4 address")) cmd.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultGatewayIPv6, ""), []string{"-default-gateway-v6"}, usageFn("Container default gateway IPv6 address")) cmd.BoolVar(&config.bridgeConfig.InterContainerCommunication, []string{"#icc", "-icc"}, true, usageFn("Enable inter-container communication")) cmd.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultIP, "0.0.0.0"), []string{"#ip", "-ip"}, usageFn("Default IP when binding container ports")) cmd.BoolVar(&config.bridgeConfig.EnableUserlandProxy, []string{"-userland-proxy"}, true, usageFn("Use userland proxy for loopback traffic")) cmd.BoolVar(&config.EnableCors, []string{"#api-enable-cors", "#-api-enable-cors"}, false, usageFn("Enable CORS headers in the remote API, this is deprecated by --api-cors-header")) cmd.StringVar(&config.CorsHeaders, []string{"-api-cors-header"}, "", usageFn("Set CORS headers in the remote API")) cmd.StringVar(&config.CgroupParent, []string{"-cgroup-parent"}, "", usageFn("Set parent cgroup for all containers")) cmd.StringVar(&config.RemappedRoot, []string{"-userns-remap"}, "", usageFn("User/Group setting for user namespaces")) config.attachExperimentalFlags(cmd, usageFn) } docker-1.10.3/daemon/config_windows.go000066400000000000000000000024611267010174400176610ustar00rootroot00000000000000package daemon import ( "os" flag "github.com/docker/docker/pkg/mflag" ) var ( defaultPidFile = os.Getenv("programdata") + string(os.PathSeparator) + "docker.pid" defaultGraph = os.Getenv("programdata") + string(os.PathSeparator) + "docker" defaultExec = "windows" ) // bridgeConfig stores all the bridge driver specific // configuration. type bridgeConfig struct { VirtualSwitchName string `json:"bridge,omitempty"` } // Config defines the configuration of a docker daemon. // These are the configuration settings that you pass // to the docker daemon when you launch it with say: `docker daemon -e windows` type Config struct { CommonConfig // Fields below here are platform specific. (There are none presently // for the Windows daemon.) } // InstallFlags adds command-line options to the top-level flag parser for // the current process. // Subsequent calls to `flag.Parse` will populate config with values parsed // from the command-line. func (config *Config) InstallFlags(cmd *flag.FlagSet, usageFn func(string) string) { // First handle install flags which are consistent cross-platform config.InstallCommonFlags(cmd, usageFn) // Then platform-specific install flags. cmd.StringVar(&config.bridgeConfig.VirtualSwitchName, []string{"b", "-bridge"}, "", "Attach containers to a virtual switch") } docker-1.10.3/daemon/container_operations.go000066400000000000000000000003031267010174400210600ustar00rootroot00000000000000package daemon import "errors" var ( // ErrRootFSReadOnly is returned when a container // rootfs is marked readonly. ErrRootFSReadOnly = errors.New("container rootfs is marked read-only") ) docker-1.10.3/daemon/container_operations_unix.go000066400000000000000000001013341267010174400221310ustar00rootroot00000000000000// +build linux freebsd package daemon import ( "fmt" "os" "path" "path/filepath" "strconv" "strings" "syscall" "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/container" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/links" "github.com/docker/docker/daemon/network" derr "github.com/docker/docker/errors" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/runconfig" containertypes "github.com/docker/engine-api/types/container" networktypes "github.com/docker/engine-api/types/network" "github.com/docker/go-units" "github.com/docker/libnetwork" "github.com/docker/libnetwork/netlabel" "github.com/docker/libnetwork/options" "github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/devices" "github.com/opencontainers/runc/libcontainer/label" ) func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { var env []string children := daemon.children(container) bridgeSettings := container.NetworkSettings.Networks["bridge"] if bridgeSettings == nil { return nil, nil } for linkAlias, child := range children { if !child.IsRunning() { return nil, derr.ErrorCodeLinkNotRunning.WithArgs(child.Name, linkAlias) } childBridgeSettings := child.NetworkSettings.Networks["bridge"] if childBridgeSettings == nil { return nil, fmt.Errorf("container %s not attached to default bridge network", child.ID) } link := links.NewLink( bridgeSettings.IPAddress, childBridgeSettings.IPAddress, linkAlias, child.Config.Env, child.Config.ExposedPorts, ) for _, envVar := range link.ToEnv() { env = append(env, envVar) } } return env, nil } func (daemon *Daemon) populateCommand(c *container.Container, env []string) error { var en *execdriver.Network if !c.Config.NetworkDisabled { en = &execdriver.Network{} if !daemon.execDriver.SupportsHooks() || c.HostConfig.NetworkMode.IsHost() { en.NamespacePath = c.NetworkSettings.SandboxKey } if c.HostConfig.NetworkMode.IsContainer() { nc, err := daemon.getNetworkedContainer(c.ID, c.HostConfig.NetworkMode.ConnectedContainer()) if err != nil { return err } en.ContainerID = nc.ID } } ipc := &execdriver.Ipc{} var err error c.ShmPath, err = c.ShmResourcePath() if err != nil { return err } if c.HostConfig.IpcMode.IsContainer() { ic, err := daemon.getIpcContainer(c) if err != nil { return err } ipc.ContainerID = ic.ID c.ShmPath = ic.ShmPath } else { ipc.HostIpc = c.HostConfig.IpcMode.IsHost() if ipc.HostIpc { if _, err := os.Stat("/dev/shm"); err != nil { return fmt.Errorf("/dev/shm is not mounted, but must be for --ipc=host") } c.ShmPath = "/dev/shm" } } pid := &execdriver.Pid{} pid.HostPid = c.HostConfig.PidMode.IsHost() uts := &execdriver.UTS{ HostUTS: c.HostConfig.UTSMode.IsHost(), } // Build lists of devices allowed and created within the container. var userSpecifiedDevices []*configs.Device for _, deviceMapping := range c.HostConfig.Devices { devs, err := getDevicesFromPath(deviceMapping) if err != nil { return err } userSpecifiedDevices = append(userSpecifiedDevices, devs...) } allowedDevices := mergeDevices(configs.DefaultAllowedDevices, userSpecifiedDevices) autoCreatedDevices := mergeDevices(configs.DefaultAutoCreatedDevices, userSpecifiedDevices) var rlimits []*units.Rlimit ulimits := c.HostConfig.Ulimits // Merge ulimits with daemon defaults ulIdx := make(map[string]*units.Ulimit) for _, ul := range ulimits { ulIdx[ul.Name] = ul } for name, ul := range daemon.configStore.Ulimits { if _, exists := ulIdx[name]; !exists { ulimits = append(ulimits, ul) } } weightDevices, err := getBlkioWeightDevices(c.HostConfig) if err != nil { return err } readBpsDevice, err := getBlkioReadBpsDevices(c.HostConfig) if err != nil { return err } writeBpsDevice, err := getBlkioWriteBpsDevices(c.HostConfig) if err != nil { return err } readIOpsDevice, err := getBlkioReadIOpsDevices(c.HostConfig) if err != nil { return err } writeIOpsDevice, err := getBlkioWriteIOpsDevices(c.HostConfig) if err != nil { return err } for _, limit := range ulimits { rl, err := limit.GetRlimit() if err != nil { return err } rlimits = append(rlimits, rl) } resources := &execdriver.Resources{ CommonResources: execdriver.CommonResources{ Memory: c.HostConfig.Memory, MemoryReservation: c.HostConfig.MemoryReservation, CPUShares: c.HostConfig.CPUShares, BlkioWeight: c.HostConfig.BlkioWeight, }, MemorySwap: c.HostConfig.MemorySwap, KernelMemory: c.HostConfig.KernelMemory, CpusetCpus: c.HostConfig.CpusetCpus, CpusetMems: c.HostConfig.CpusetMems, CPUPeriod: c.HostConfig.CPUPeriod, CPUQuota: c.HostConfig.CPUQuota, Rlimits: rlimits, BlkioWeightDevice: weightDevices, BlkioThrottleReadBpsDevice: readBpsDevice, BlkioThrottleWriteBpsDevice: writeBpsDevice, BlkioThrottleReadIOpsDevice: readIOpsDevice, BlkioThrottleWriteIOpsDevice: writeIOpsDevice, MemorySwappiness: -1, } if c.HostConfig.OomKillDisable != nil { resources.OomKillDisable = *c.HostConfig.OomKillDisable } if c.HostConfig.MemorySwappiness != nil { resources.MemorySwappiness = *c.HostConfig.MemorySwappiness } processConfig := execdriver.ProcessConfig{ CommonProcessConfig: execdriver.CommonProcessConfig{ Entrypoint: c.Path, Arguments: c.Args, Tty: c.Config.Tty, }, Privileged: c.HostConfig.Privileged, User: c.Config.User, } processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true} processConfig.Env = env remappedRoot := &execdriver.User{} rootUID, rootGID := daemon.GetRemappedUIDGID() if rootUID != 0 { remappedRoot.UID = rootUID remappedRoot.GID = rootGID } uidMap, gidMap := daemon.GetUIDGIDMaps() if !daemon.seccompEnabled { if c.SeccompProfile != "" && c.SeccompProfile != "unconfined" { return fmt.Errorf("Seccomp is not enabled in your kernel, cannot run a custom seccomp profile.") } logrus.Warn("Seccomp is not enabled in your kernel, running container without default profile.") c.SeccompProfile = "unconfined" } defaultCgroupParent := "/docker" if daemon.configStore.CgroupParent != "" { defaultCgroupParent = daemon.configStore.CgroupParent } else if daemon.usingSystemd() { defaultCgroupParent = "system.slice" } c.Command = &execdriver.Command{ CommonCommand: execdriver.CommonCommand{ ID: c.ID, InitPath: "/.dockerinit", MountLabel: c.GetMountLabel(), Network: en, ProcessConfig: processConfig, ProcessLabel: c.GetProcessLabel(), Rootfs: c.BaseFS, Resources: resources, WorkingDir: c.Config.WorkingDir, }, AllowedDevices: allowedDevices, AppArmorProfile: c.AppArmorProfile, AutoCreatedDevices: autoCreatedDevices, CapAdd: c.HostConfig.CapAdd.Slice(), CapDrop: c.HostConfig.CapDrop.Slice(), CgroupParent: defaultCgroupParent, GIDMapping: gidMap, GroupAdd: c.HostConfig.GroupAdd, Ipc: ipc, OomScoreAdj: c.HostConfig.OomScoreAdj, Pid: pid, ReadonlyRootfs: c.HostConfig.ReadonlyRootfs, RemappedRoot: remappedRoot, SeccompProfile: c.SeccompProfile, UIDMapping: uidMap, UTS: uts, } if c.HostConfig.CgroupParent != "" { c.Command.CgroupParent = c.HostConfig.CgroupParent } return nil } // getSize returns the real size & virtual size of the container. func (daemon *Daemon) getSize(container *container.Container) (int64, int64) { var ( sizeRw, sizeRootfs int64 err error ) if err := daemon.Mount(container); err != nil { logrus.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err) return sizeRw, sizeRootfs } defer daemon.Unmount(container) sizeRw, err = container.RWLayer.Size() if err != nil { logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", daemon.GraphDriverName(), container.ID, err) // FIXME: GetSize should return an error. Not changing it now in case // there is a side-effect. sizeRw = -1 } if parent := container.RWLayer.Parent(); parent != nil { sizeRootfs, err = parent.Size() if err != nil { sizeRootfs = -1 } else if sizeRw != -1 { sizeRootfs += sizeRw } } return sizeRw, sizeRootfs } func (daemon *Daemon) buildSandboxOptions(container *container.Container, n libnetwork.Network) ([]libnetwork.SandboxOption, error) { var ( sboxOptions []libnetwork.SandboxOption err error dns []string dnsSearch []string dnsOptions []string ) sboxOptions = append(sboxOptions, libnetwork.OptionHostname(container.Config.Hostname), libnetwork.OptionDomainname(container.Config.Domainname)) if container.HostConfig.NetworkMode.IsHost() { sboxOptions = append(sboxOptions, libnetwork.OptionUseDefaultSandbox()) sboxOptions = append(sboxOptions, libnetwork.OptionOriginHostsPath("/etc/hosts")) sboxOptions = append(sboxOptions, libnetwork.OptionOriginResolvConfPath("/etc/resolv.conf")) } else if daemon.execDriver.SupportsHooks() { // OptionUseExternalKey is mandatory for userns support. // But optional for non-userns support sboxOptions = append(sboxOptions, libnetwork.OptionUseExternalKey()) } container.HostsPath, err = container.GetRootResourcePath("hosts") if err != nil { return nil, err } sboxOptions = append(sboxOptions, libnetwork.OptionHostsPath(container.HostsPath)) container.ResolvConfPath, err = container.GetRootResourcePath("resolv.conf") if err != nil { return nil, err } sboxOptions = append(sboxOptions, libnetwork.OptionResolvConfPath(container.ResolvConfPath)) if len(container.HostConfig.DNS) > 0 { dns = container.HostConfig.DNS } else if len(daemon.configStore.DNS) > 0 { dns = daemon.configStore.DNS } for _, d := range dns { sboxOptions = append(sboxOptions, libnetwork.OptionDNS(d)) } if len(container.HostConfig.DNSSearch) > 0 { dnsSearch = container.HostConfig.DNSSearch } else if len(daemon.configStore.DNSSearch) > 0 { dnsSearch = daemon.configStore.DNSSearch } for _, ds := range dnsSearch { sboxOptions = append(sboxOptions, libnetwork.OptionDNSSearch(ds)) } if len(container.HostConfig.DNSOptions) > 0 { dnsOptions = container.HostConfig.DNSOptions } else if len(daemon.configStore.DNSOptions) > 0 { dnsOptions = daemon.configStore.DNSOptions } for _, ds := range dnsOptions { sboxOptions = append(sboxOptions, libnetwork.OptionDNSOptions(ds)) } if container.NetworkSettings.SecondaryIPAddresses != nil { name := container.Config.Hostname if container.Config.Domainname != "" { name = name + "." + container.Config.Domainname } for _, a := range container.NetworkSettings.SecondaryIPAddresses { sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(name, a.Addr)) } } for _, extraHost := range container.HostConfig.ExtraHosts { // allow IPv6 addresses in extra hosts; only split on first ":" parts := strings.SplitN(extraHost, ":", 2) sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(parts[0], parts[1])) } // Link feature is supported only for the default bridge network. // return if this call to build join options is not for default bridge network if n.Name() != "bridge" { return sboxOptions, nil } ep, _ := container.GetEndpointInNetwork(n) if ep == nil { return sboxOptions, nil } var childEndpoints, parentEndpoints []string children := daemon.children(container) for linkAlias, child := range children { if !isLinkable(child) { return nil, fmt.Errorf("Cannot link to %s, as it does not belong to the default network", child.Name) } _, alias := path.Split(linkAlias) // allow access to the linked container via the alias, real name, and container hostname aliasList := alias + " " + child.Config.Hostname // only add the name if alias isn't equal to the name if alias != child.Name[1:] { aliasList = aliasList + " " + child.Name[1:] } sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(aliasList, child.NetworkSettings.Networks["bridge"].IPAddress)) cEndpoint, _ := child.GetEndpointInNetwork(n) if cEndpoint != nil && cEndpoint.ID() != "" { childEndpoints = append(childEndpoints, cEndpoint.ID()) } } bridgeSettings := container.NetworkSettings.Networks["bridge"] for alias, parent := range daemon.parents(container) { if daemon.configStore.DisableBridge || !container.HostConfig.NetworkMode.IsPrivate() { continue } _, alias = path.Split(alias) logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", parent.ID, alias, bridgeSettings.IPAddress) sboxOptions = append(sboxOptions, libnetwork.OptionParentUpdate( parent.ID, alias, bridgeSettings.IPAddress, )) if ep.ID() != "" { parentEndpoints = append(parentEndpoints, ep.ID()) } } linkOptions := options.Generic{ netlabel.GenericData: options.Generic{ "ParentEndpoints": parentEndpoints, "ChildEndpoints": childEndpoints, }, } sboxOptions = append(sboxOptions, libnetwork.OptionGeneric(linkOptions)) return sboxOptions, nil } func (daemon *Daemon) updateNetworkSettings(container *container.Container, n libnetwork.Network) error { if container.NetworkSettings == nil { container.NetworkSettings = &network.Settings{Networks: make(map[string]*networktypes.EndpointSettings)} } if !container.HostConfig.NetworkMode.IsHost() && containertypes.NetworkMode(n.Type()).IsHost() { return runconfig.ErrConflictHostNetwork } for s := range container.NetworkSettings.Networks { sn, err := daemon.FindNetwork(s) if err != nil { continue } if sn.Name() == n.Name() { // Avoid duplicate config return nil } if !containertypes.NetworkMode(sn.Type()).IsPrivate() || !containertypes.NetworkMode(n.Type()).IsPrivate() { return runconfig.ErrConflictSharedNetwork } if containertypes.NetworkMode(sn.Name()).IsNone() || containertypes.NetworkMode(n.Name()).IsNone() { return runconfig.ErrConflictNoNetwork } } if _, ok := container.NetworkSettings.Networks[n.Name()]; !ok { container.NetworkSettings.Networks[n.Name()] = new(networktypes.EndpointSettings) } return nil } func (daemon *Daemon) updateEndpointNetworkSettings(container *container.Container, n libnetwork.Network, ep libnetwork.Endpoint) error { if err := container.BuildEndpointInfo(n, ep); err != nil { return err } if container.HostConfig.NetworkMode == containertypes.NetworkMode("bridge") { container.NetworkSettings.Bridge = daemon.configStore.bridgeConfig.Iface } return nil } // UpdateNetwork is used to update the container's network (e.g. when linked containers // get removed/unlinked). func (daemon *Daemon) updateNetwork(container *container.Container) error { ctrl := daemon.netController sid := container.NetworkSettings.SandboxID sb, err := ctrl.SandboxByID(sid) if err != nil { return derr.ErrorCodeNoSandbox.WithArgs(sid, err) } // Find if container is connected to the default bridge network var n libnetwork.Network for name := range container.NetworkSettings.Networks { sn, err := daemon.FindNetwork(name) if err != nil { continue } if sn.Name() == "bridge" { n = sn break } } if n == nil { // Not connected to the default bridge network; Nothing to do return nil } options, err := daemon.buildSandboxOptions(container, n) if err != nil { return derr.ErrorCodeNetworkUpdate.WithArgs(err) } if err := sb.Refresh(options...); err != nil { return derr.ErrorCodeNetworkRefresh.WithArgs(sid, err) } return nil } // updateContainerNetworkSettings update the network settings func (daemon *Daemon) updateContainerNetworkSettings(container *container.Container, endpointsConfig map[string]*networktypes.EndpointSettings) error { var ( n libnetwork.Network err error ) mode := container.HostConfig.NetworkMode if container.Config.NetworkDisabled || mode.IsContainer() { return nil } networkName := mode.NetworkName() if mode.IsDefault() { networkName = daemon.netController.Config().Daemon.DefaultNetwork } if mode.IsUserDefined() { n, err = daemon.FindNetwork(networkName) if err != nil { return err } networkName = n.Name() } if container.NetworkSettings == nil { container.NetworkSettings = &network.Settings{} } if len(endpointsConfig) > 0 { container.NetworkSettings.Networks = endpointsConfig } if container.NetworkSettings.Networks == nil { container.NetworkSettings.Networks = make(map[string]*networktypes.EndpointSettings) container.NetworkSettings.Networks[networkName] = new(networktypes.EndpointSettings) } if !mode.IsUserDefined() { return nil } // Make sure to internally store the per network endpoint config by network name if _, ok := container.NetworkSettings.Networks[networkName]; ok { return nil } if nwConfig, ok := container.NetworkSettings.Networks[n.ID()]; ok { container.NetworkSettings.Networks[networkName] = nwConfig delete(container.NetworkSettings.Networks, n.ID()) return nil } return nil } func (daemon *Daemon) allocateNetwork(container *container.Container) error { controller := daemon.netController // Cleanup any stale sandbox left over due to ungraceful daemon shutdown if err := controller.SandboxDestroy(container.ID); err != nil { logrus.Errorf("failed to cleanup up stale network sandbox for container %s", container.ID) } updateSettings := false if len(container.NetworkSettings.Networks) == 0 { if container.Config.NetworkDisabled || container.HostConfig.NetworkMode.IsContainer() { return nil } err := daemon.updateContainerNetworkSettings(container, nil) if err != nil { return err } updateSettings = true } for n, nConf := range container.NetworkSettings.Networks { if err := daemon.connectToNetwork(container, n, nConf, updateSettings); err != nil { return err } } return container.WriteHostConfig() } func (daemon *Daemon) getNetworkSandbox(container *container.Container) libnetwork.Sandbox { var sb libnetwork.Sandbox daemon.netController.WalkSandboxes(func(s libnetwork.Sandbox) bool { if s.ContainerID() == container.ID { sb = s return true } return false }) return sb } // hasUserDefinedIPAddress returns whether the passed endpoint configuration contains IP address configuration func hasUserDefinedIPAddress(epConfig *networktypes.EndpointSettings) bool { return epConfig != nil && epConfig.IPAMConfig != nil && (len(epConfig.IPAMConfig.IPv4Address) > 0 || len(epConfig.IPAMConfig.IPv6Address) > 0) } // User specified ip address is acceptable only for networks with user specified subnets. func validateNetworkingConfig(n libnetwork.Network, epConfig *networktypes.EndpointSettings) error { if n == nil || epConfig == nil { return nil } if !hasUserDefinedIPAddress(epConfig) { return nil } _, _, nwIPv4Configs, nwIPv6Configs := n.Info().IpamConfig() for _, s := range []struct { ipConfigured bool subnetConfigs []*libnetwork.IpamConf }{ { ipConfigured: len(epConfig.IPAMConfig.IPv4Address) > 0, subnetConfigs: nwIPv4Configs, }, { ipConfigured: len(epConfig.IPAMConfig.IPv6Address) > 0, subnetConfigs: nwIPv6Configs, }, } { if s.ipConfigured { foundSubnet := false for _, cfg := range s.subnetConfigs { if len(cfg.PreferredPool) > 0 { foundSubnet = true break } } if !foundSubnet { return runconfig.ErrUnsupportedNetworkNoSubnetAndIP } } } return nil } // cleanOperationalData resets the operational data from the passed endpoint settings func cleanOperationalData(es *networktypes.EndpointSettings) { es.EndpointID = "" es.Gateway = "" es.IPAddress = "" es.IPPrefixLen = 0 es.IPv6Gateway = "" es.GlobalIPv6Address = "" es.GlobalIPv6PrefixLen = 0 es.MacAddress = "" } func (daemon *Daemon) updateNetworkConfig(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings, updateSettings bool) (libnetwork.Network, error) { if container.HostConfig.NetworkMode.IsContainer() { return nil, runconfig.ErrConflictSharedNetwork } if containertypes.NetworkMode(idOrName).IsBridge() && daemon.configStore.DisableBridge { container.Config.NetworkDisabled = true return nil, nil } if !containertypes.NetworkMode(idOrName).IsUserDefined() { if hasUserDefinedIPAddress(endpointConfig) { return nil, runconfig.ErrUnsupportedNetworkAndIP } if endpointConfig != nil && len(endpointConfig.Aliases) > 0 { return nil, runconfig.ErrUnsupportedNetworkAndAlias } } n, err := daemon.FindNetwork(idOrName) if err != nil { return nil, err } if err := validateNetworkingConfig(n, endpointConfig); err != nil { return nil, err } if updateSettings { if err := daemon.updateNetworkSettings(container, n); err != nil { return nil, err } } return n, nil } // ConnectToNetwork connects a container to a network func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings) error { if !container.Running { if container.RemovalInProgress || container.Dead { return derr.ErrorCodeRemovalContainer.WithArgs(container.ID) } if _, err := daemon.updateNetworkConfig(container, idOrName, endpointConfig, true); err != nil { return err } if endpointConfig != nil { container.NetworkSettings.Networks[idOrName] = endpointConfig } } else { if err := daemon.connectToNetwork(container, idOrName, endpointConfig, true); err != nil { return err } } if err := container.ToDiskLocking(); err != nil { return fmt.Errorf("Error saving container to disk: %v", err) } return nil } func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings, updateSettings bool) (err error) { n, err := daemon.updateNetworkConfig(container, idOrName, endpointConfig, updateSettings) if err != nil { return err } if n == nil { return nil } controller := daemon.netController sb := daemon.getNetworkSandbox(container) createOptions, err := container.BuildCreateEndpointOptions(n, endpointConfig, sb) if err != nil { return err } endpointName := strings.TrimPrefix(container.Name, "/") ep, err := n.CreateEndpoint(endpointName, createOptions...) if err != nil { return err } defer func() { if err != nil { if e := ep.Delete(false); e != nil { logrus.Warnf("Could not rollback container connection to network %s", idOrName) } } }() if endpointConfig != nil { container.NetworkSettings.Networks[n.Name()] = endpointConfig } if err := daemon.updateEndpointNetworkSettings(container, n, ep); err != nil { return err } if sb == nil { options, err := daemon.buildSandboxOptions(container, n) if err != nil { return err } sb, err = controller.NewSandbox(container.ID, options...) if err != nil { return err } container.UpdateSandboxNetworkSettings(sb) } joinOptions, err := container.BuildJoinOptions(n) if err != nil { return err } if err := ep.Join(sb, joinOptions...); err != nil { return err } if err := container.UpdateJoinInfo(n, ep); err != nil { return derr.ErrorCodeJoinInfo.WithArgs(err) } daemon.LogNetworkEventWithAttributes(n, "connect", map[string]string{"container": container.ID}) return nil } // ForceEndpointDelete deletes an endpoing from a network forcefully func (daemon *Daemon) ForceEndpointDelete(name string, n libnetwork.Network) error { ep, err := n.EndpointByName(name) if err != nil { return err } return ep.Delete(true) } // DisconnectFromNetwork disconnects container from network n. func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, n libnetwork.Network, force bool) error { if container.HostConfig.NetworkMode.IsHost() && containertypes.NetworkMode(n.Type()).IsHost() { return runconfig.ErrConflictHostNetwork } if !container.Running { if container.RemovalInProgress || container.Dead { return derr.ErrorCodeRemovalContainer.WithArgs(container.ID) } if _, ok := container.NetworkSettings.Networks[n.Name()]; ok { delete(container.NetworkSettings.Networks, n.Name()) } else { return fmt.Errorf("container %s is not connected to the network %s", container.ID, n.Name()) } } else { if err := disconnectFromNetwork(container, n, false); err != nil { return err } } if err := container.ToDiskLocking(); err != nil { return fmt.Errorf("Error saving container to disk: %v", err) } attributes := map[string]string{ "container": container.ID, } daemon.LogNetworkEventWithAttributes(n, "disconnect", attributes) return nil } func disconnectFromNetwork(container *container.Container, n libnetwork.Network, force bool) error { var ( ep libnetwork.Endpoint sbox libnetwork.Sandbox ) s := func(current libnetwork.Endpoint) bool { epInfo := current.Info() if epInfo == nil { return false } if sb := epInfo.Sandbox(); sb != nil { if sb.ContainerID() == container.ID { ep = current sbox = sb return true } } return false } n.WalkEndpoints(s) if ep == nil && force { epName := strings.TrimPrefix(container.Name, "/") ep, err := n.EndpointByName(epName) if err != nil { return err } return ep.Delete(force) } if ep == nil { return fmt.Errorf("container %s is not connected to the network", container.ID) } if err := ep.Leave(sbox); err != nil { return fmt.Errorf("container %s failed to leave network %s: %v", container.ID, n.Name(), err) } if err := ep.Delete(false); err != nil { return fmt.Errorf("endpoint delete failed for container %s on network %s: %v", container.ID, n.Name(), err) } delete(container.NetworkSettings.Networks, n.Name()) return nil } func (daemon *Daemon) initializeNetworking(container *container.Container) error { var err error if container.HostConfig.NetworkMode.IsContainer() { // we need to get the hosts files from the container to join nc, err := daemon.getNetworkedContainer(container.ID, container.HostConfig.NetworkMode.ConnectedContainer()) if err != nil { return err } container.HostnamePath = nc.HostnamePath container.HostsPath = nc.HostsPath container.ResolvConfPath = nc.ResolvConfPath container.Config.Hostname = nc.Config.Hostname container.Config.Domainname = nc.Config.Domainname return nil } if container.HostConfig.NetworkMode.IsHost() { container.Config.Hostname, err = os.Hostname() if err != nil { return err } parts := strings.SplitN(container.Config.Hostname, ".", 2) if len(parts) > 1 { container.Config.Hostname = parts[0] container.Config.Domainname = parts[1] } } if err := daemon.allocateNetwork(container); err != nil { return err } return container.BuildHostnameFile() } // called from the libcontainer pre-start hook to set the network // namespace configuration linkage to the libnetwork "sandbox" entity func (daemon *Daemon) setNetworkNamespaceKey(containerID string, pid int) error { path := fmt.Sprintf("/proc/%d/ns/net", pid) var sandbox libnetwork.Sandbox search := libnetwork.SandboxContainerWalker(&sandbox, containerID) daemon.netController.WalkSandboxes(search) if sandbox == nil { return derr.ErrorCodeNoSandbox.WithArgs(containerID, "no sandbox found") } return sandbox.SetKey(path) } func (daemon *Daemon) getIpcContainer(container *container.Container) (*container.Container, error) { containerID := container.HostConfig.IpcMode.Container() c, err := daemon.GetContainer(containerID) if err != nil { return nil, err } if !c.IsRunning() { return nil, derr.ErrorCodeIPCRunning.WithArgs(containerID) } return c, nil } func (daemon *Daemon) getNetworkedContainer(containerID, connectedContainerID string) (*container.Container, error) { nc, err := daemon.GetContainer(connectedContainerID) if err != nil { return nil, err } if containerID == nc.ID { return nil, derr.ErrorCodeJoinSelf } if !nc.IsRunning() { return nil, derr.ErrorCodeJoinRunning.WithArgs(connectedContainerID) } return nc, nil } func (daemon *Daemon) releaseNetwork(container *container.Container) { if container.HostConfig.NetworkMode.IsContainer() || container.Config.NetworkDisabled { return } sid := container.NetworkSettings.SandboxID settings := container.NetworkSettings.Networks container.NetworkSettings.Ports = nil if sid == "" || len(settings) == 0 { return } var networks []libnetwork.Network for n, epSettings := range settings { if nw, err := daemon.FindNetwork(n); err == nil { networks = append(networks, nw) } cleanOperationalData(epSettings) } sb, err := daemon.netController.SandboxByID(sid) if err != nil { logrus.Errorf("error locating sandbox id %s: %v", sid, err) return } if err := sb.Delete(); err != nil { logrus.Errorf("Error deleting sandbox id %s for container %s: %v", sid, container.ID, err) } attributes := map[string]string{ "container": container.ID, } for _, nw := range networks { daemon.LogNetworkEventWithAttributes(nw, "disconnect", attributes) } } func (daemon *Daemon) setupIpcDirs(c *container.Container) error { rootUID, rootGID := daemon.GetRemappedUIDGID() if !c.HasMountFor("/dev/shm") { shmPath, err := c.ShmResourcePath() if err != nil { return err } if err := idtools.MkdirAllAs(shmPath, 0700, rootUID, rootGID); err != nil { return err } shmSize := container.DefaultSHMSize if c.HostConfig.ShmSize != 0 { shmSize = c.HostConfig.ShmSize } shmproperty := "mode=1777,size=" + strconv.FormatInt(shmSize, 10) if err := syscall.Mount("shm", shmPath, "tmpfs", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV), label.FormatMountLabel(shmproperty, c.GetMountLabel())); err != nil { return fmt.Errorf("mounting shm tmpfs: %s", err) } if err := os.Chown(shmPath, rootUID, rootGID); err != nil { return err } } return nil } func (daemon *Daemon) mountVolumes(container *container.Container) error { mounts, err := daemon.setupMounts(container) if err != nil { return err } for _, m := range mounts { dest, err := container.GetResourcePath(m.Destination) if err != nil { return err } var stat os.FileInfo stat, err = os.Stat(m.Source) if err != nil { return err } if err = fileutils.CreateIfNotExists(dest, stat.IsDir()); err != nil { return err } opts := "rbind,ro" if m.Writable { opts = "rbind,rw" } if err := mount.Mount(m.Source, dest, "bind", opts); err != nil { return err } } return nil } func killProcessDirectly(container *container.Container) error { if _, err := container.WaitStop(10 * time.Second); err != nil { // Ensure that we don't kill ourselves if pid := container.GetPID(); pid != 0 { logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID)) if err := syscall.Kill(pid, 9); err != nil { if err != syscall.ESRCH { return err } logrus.Debugf("Cannot kill process (pid=%d) with signal 9: no such process.", pid) } } } return nil } func getDevicesFromPath(deviceMapping containertypes.DeviceMapping) (devs []*configs.Device, err error) { device, err := devices.DeviceFromPath(deviceMapping.PathOnHost, deviceMapping.CgroupPermissions) // if there was no error, return the device if err == nil { device.Path = deviceMapping.PathInContainer return append(devs, device), nil } // if the device is not a device node // try to see if it's a directory holding many devices if err == devices.ErrNotADevice { // check if it is a directory if src, e := os.Stat(deviceMapping.PathOnHost); e == nil && src.IsDir() { // mount the internal devices recursively filepath.Walk(deviceMapping.PathOnHost, func(dpath string, f os.FileInfo, e error) error { childDevice, e := devices.DeviceFromPath(dpath, deviceMapping.CgroupPermissions) if e != nil { // ignore the device return nil } // add the device to userSpecified devices childDevice.Path = strings.Replace(dpath, deviceMapping.PathOnHost, deviceMapping.PathInContainer, 1) devs = append(devs, childDevice) return nil }) } } if len(devs) > 0 { return devs, nil } return devs, derr.ErrorCodeDeviceInfo.WithArgs(deviceMapping.PathOnHost, err) } func mergeDevices(defaultDevices, userDevices []*configs.Device) []*configs.Device { if len(userDevices) == 0 { return defaultDevices } paths := map[string]*configs.Device{} for _, d := range userDevices { paths[d.Path] = d } var devs []*configs.Device for _, d := range defaultDevices { if _, defined := paths[d.Path]; !defined { devs = append(devs, d) } } return append(devs, userDevices...) } func detachMounted(path string) error { return syscall.Unmount(path, syscall.MNT_DETACH) } func isLinkable(child *container.Container) bool { // A container is linkable only if it belongs to the default network _, ok := child.NetworkSettings.Networks["bridge"] return ok } docker-1.10.3/daemon/container_operations_windows.go000066400000000000000000000130201267010174400226320ustar00rootroot00000000000000// +build windows package daemon import ( "strings" "github.com/docker/docker/container" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/execdriver/windows" derr "github.com/docker/docker/errors" "github.com/docker/docker/layer" networktypes "github.com/docker/engine-api/types/network" "github.com/docker/libnetwork" ) func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { return nil, nil } // updateContainerNetworkSettings update the network settings func (daemon *Daemon) updateContainerNetworkSettings(container *container.Container, endpointsConfig map[string]*networktypes.EndpointSettings) error { return nil } func (daemon *Daemon) initializeNetworking(container *container.Container) error { return nil } // ConnectToNetwork connects a container to the network func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings) error { return nil } // ForceEndpointDelete deletes an endpoing from a network forcefully func (daemon *Daemon) ForceEndpointDelete(name string, n libnetwork.Network) error { return nil } // DisconnectFromNetwork disconnects a container from the network. func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, n libnetwork.Network, force bool) error { return nil } func (daemon *Daemon) populateCommand(c *container.Container, env []string) error { en := &execdriver.Network{ Interface: nil, } parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2) switch parts[0] { case "none": case "default", "": // empty string to support existing containers if !c.Config.NetworkDisabled { en.Interface = &execdriver.NetworkInterface{ MacAddress: c.Config.MacAddress, Bridge: daemon.configStore.bridgeConfig.VirtualSwitchName, PortBindings: c.HostConfig.PortBindings, // TODO Windows. Include IPAddress. There already is a // property IPAddress on execDrive.CommonNetworkInterface, // but there is no CLI option in docker to pass through // an IPAddress on docker run. } } default: return derr.ErrorCodeInvalidNetworkMode.WithArgs(c.HostConfig.NetworkMode) } // TODO Windows. More resource controls to be implemented later. resources := &execdriver.Resources{ CommonResources: execdriver.CommonResources{ CPUShares: c.HostConfig.CPUShares, }, } processConfig := execdriver.ProcessConfig{ CommonProcessConfig: execdriver.CommonProcessConfig{ Entrypoint: c.Path, Arguments: c.Args, Tty: c.Config.Tty, }, ConsoleSize: c.HostConfig.ConsoleSize, } processConfig.Env = env var layerPaths []string img, err := daemon.imageStore.Get(c.ImageID) if err != nil { return derr.ErrorCodeGetGraph.WithArgs(c.ImageID, err) } if img.RootFS != nil && img.RootFS.Type == "layers+base" { max := len(img.RootFS.DiffIDs) for i := 0; i <= max; i++ { img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i] path, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID()) if err != nil { return derr.ErrorCodeGetLayer.WithArgs(err) } // Reverse order, expecting parent most first layerPaths = append([]string{path}, layerPaths...) } } m, err := c.RWLayer.Metadata() if err != nil { return derr.ErrorCodeGetLayerMetadata.WithArgs(err) } layerFolder := m["dir"] var hvPartition bool // Work out the isolation (whether it is a hypervisor partition) if c.HostConfig.Isolation.IsDefault() { // Not specified by caller. Take daemon default hvPartition = windows.DefaultIsolation.IsHyperV() } else { // Take value specified by caller hvPartition = c.HostConfig.Isolation.IsHyperV() } c.Command = &execdriver.Command{ CommonCommand: execdriver.CommonCommand{ ID: c.ID, Rootfs: c.BaseFS, InitPath: "/.dockerinit", WorkingDir: c.Config.WorkingDir, Network: en, MountLabel: c.GetMountLabel(), Resources: resources, ProcessConfig: processConfig, ProcessLabel: c.GetProcessLabel(), }, FirstStart: !c.HasBeenStartedBefore, LayerFolder: layerFolder, LayerPaths: layerPaths, Hostname: c.Config.Hostname, Isolation: string(c.HostConfig.Isolation), ArgsEscaped: c.Config.ArgsEscaped, HvPartition: hvPartition, } return nil } // getSize returns real size & virtual size func (daemon *Daemon) getSize(container *container.Container) (int64, int64) { // TODO Windows return 0, 0 } // setNetworkNamespaceKey is a no-op on Windows. func (daemon *Daemon) setNetworkNamespaceKey(containerID string, pid int) error { return nil } // allocateNetwork is a no-op on Windows. func (daemon *Daemon) allocateNetwork(container *container.Container) error { return nil } func (daemon *Daemon) updateNetwork(container *container.Container) error { return nil } func (daemon *Daemon) releaseNetwork(container *container.Container) { } func (daemon *Daemon) setupIpcDirs(container *container.Container) error { return nil } // TODO Windows: Fix Post-TP4. This is a hack to allow docker cp to work // against containers which have volumes. You will still be able to cp // to somewhere on the container drive, but not to any mounted volumes // inside the container. Without this fix, docker cp is broken to any // container which has a volume, regardless of where the file is inside the // container. func (daemon *Daemon) mountVolumes(container *container.Container) error { return nil } func detachMounted(path string) error { return nil } func killProcessDirectly(container *container.Container) error { return nil } docker-1.10.3/daemon/create.go000066400000000000000000000124041267010174400161030ustar00rootroot00000000000000package daemon import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/container" derr "github.com/docker/docker/errors" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/stringid" volumestore "github.com/docker/docker/volume/store" "github.com/docker/engine-api/types" containertypes "github.com/docker/engine-api/types/container" networktypes "github.com/docker/engine-api/types/network" "github.com/opencontainers/runc/libcontainer/label" ) // ContainerCreate creates a container. func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (types.ContainerCreateResponse, error) { if params.Config == nil { return types.ContainerCreateResponse{}, derr.ErrorCodeEmptyConfig } warnings, err := daemon.verifyContainerSettings(params.HostConfig, params.Config) if err != nil { return types.ContainerCreateResponse{Warnings: warnings}, err } err = daemon.verifyNetworkingConfig(params.NetworkingConfig) if err != nil { return types.ContainerCreateResponse{}, err } if params.HostConfig == nil { params.HostConfig = &containertypes.HostConfig{} } err = daemon.adaptContainerSettings(params.HostConfig, params.AdjustCPUShares) if err != nil { return types.ContainerCreateResponse{Warnings: warnings}, err } container, err := daemon.create(params) if err != nil { return types.ContainerCreateResponse{Warnings: warnings}, daemon.imageNotExistToErrcode(err) } return types.ContainerCreateResponse{ID: container.ID, Warnings: warnings}, nil } // Create creates a new container from the given configuration with a given name. func (daemon *Daemon) create(params types.ContainerCreateConfig) (retC *container.Container, retErr error) { var ( container *container.Container img *image.Image imgID image.ID err error ) if params.Config.Image != "" { img, err = daemon.GetImage(params.Config.Image) if err != nil { return nil, err } imgID = img.ID() } if err := daemon.mergeAndVerifyConfig(params.Config, img); err != nil { return nil, err } if container, err = daemon.newContainer(params.Name, params.Config, imgID); err != nil { return nil, err } defer func() { if retErr != nil { if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true}); err != nil { logrus.Errorf("Clean up Error! Cannot destroy container %s: %v", container.ID, err) } } }() if err := daemon.setSecurityOptions(container, params.HostConfig); err != nil { return nil, err } // Set RWLayer for container after mount labels have been set if err := daemon.setRWLayer(container); err != nil { return nil, err } if err := daemon.Register(container); err != nil { return nil, err } rootUID, rootGID, err := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) if err != nil { return nil, err } if err := idtools.MkdirAs(container.Root, 0700, rootUID, rootGID); err != nil { return nil, err } if err := daemon.setHostConfig(container, params.HostConfig); err != nil { return nil, err } defer func() { if retErr != nil { if err := daemon.removeMountPoints(container, true); err != nil { logrus.Error(err) } } }() if err := daemon.createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig); err != nil { return nil, err } var endpointsConfigs map[string]*networktypes.EndpointSettings if params.NetworkingConfig != nil { endpointsConfigs = params.NetworkingConfig.EndpointsConfig } if err := daemon.updateContainerNetworkSettings(container, endpointsConfigs); err != nil { return nil, err } if err := container.ToDiskLocking(); err != nil { logrus.Errorf("Error saving new container to disk: %v", err) return nil, err } daemon.LogContainerEvent(container, "create") return container, nil } func (daemon *Daemon) generateSecurityOpt(ipcMode containertypes.IpcMode, pidMode containertypes.PidMode) ([]string, error) { if ipcMode.IsHost() || pidMode.IsHost() { return label.DisableSecOpt(), nil } if ipcContainer := ipcMode.Container(); ipcContainer != "" { c, err := daemon.GetContainer(ipcContainer) if err != nil { return nil, err } return label.DupSecOpt(c.ProcessLabel), nil } return nil, nil } func (daemon *Daemon) setRWLayer(container *container.Container) error { var layerID layer.ChainID if container.ImageID != "" { img, err := daemon.imageStore.Get(container.ImageID) if err != nil { return err } layerID = img.RootFS.ChainID() } rwLayer, err := daemon.layerStore.CreateRWLayer(container.ID, layerID, container.MountLabel, daemon.setupInitLayer) if err != nil { return err } container.RWLayer = rwLayer return nil } // VolumeCreate creates a volume with the specified name, driver, and opts // This is called directly from the remote API func (daemon *Daemon) VolumeCreate(name, driverName string, opts map[string]string) (*types.Volume, error) { if name == "" { name = stringid.GenerateNonCryptoID() } v, err := daemon.volumes.Create(name, driverName, opts) if err != nil { if volumestore.IsNameConflict(err) { return nil, derr.ErrorVolumeNameTaken.WithArgs(name) } return nil, err } daemon.LogVolumeEvent(v.Name(), "create", map[string]string{"driver": v.DriverName()}) return volumeToAPIType(v), nil } docker-1.10.3/daemon/create_unix.go000066400000000000000000000043301267010174400171450ustar00rootroot00000000000000// +build !windows package daemon import ( "os" "path/filepath" "github.com/Sirupsen/logrus" "github.com/docker/docker/container" derr "github.com/docker/docker/errors" "github.com/docker/docker/pkg/stringid" containertypes "github.com/docker/engine-api/types/container" "github.com/opencontainers/runc/libcontainer/label" ) // createContainerPlatformSpecificSettings performs platform specific container create functionality func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { if err := daemon.Mount(container); err != nil { return err } defer daemon.Unmount(container) if err := container.SetupWorkingDirectory(); err != nil { return err } for spec := range config.Volumes { name := stringid.GenerateNonCryptoID() destination := filepath.Clean(spec) // Skip volumes for which we already have something mounted on that // destination because of a --volume-from. if container.IsDestinationMounted(destination) { continue } path, err := container.GetResourcePath(destination) if err != nil { return err } stat, err := os.Stat(path) if err == nil && !stat.IsDir() { return derr.ErrorCodeMountOverFile.WithArgs(path) } v, err := daemon.volumes.CreateWithRef(name, hostConfig.VolumeDriver, container.ID, nil) if err != nil { return err } if err := label.Relabel(v.Path(), container.MountLabel, true); err != nil { return err } container.AddMountPointWithVolume(destination, v, true) } return daemon.populateVolumes(container) } // populateVolumes copies data from the container's rootfs into the volume for non-binds. // this is only called when the container is created. func (daemon *Daemon) populateVolumes(c *container.Container) error { for _, mnt := range c.MountPoints { // skip binds and volumes referenced by other containers (ie, volumes-from) if mnt.Driver == "" || mnt.Volume == nil || len(daemon.volumes.Refs(mnt.Volume)) > 1 { continue } logrus.Debugf("copying image data from %s:%s, to %s", c.ID, mnt.Destination, mnt.Name) if err := c.CopyImagePathContent(mnt.Volume, mnt.Destination); err != nil { return err } } return nil } docker-1.10.3/daemon/create_windows.go000066400000000000000000000052551267010174400176630ustar00rootroot00000000000000package daemon import ( "fmt" "github.com/docker/docker/container" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/volume" containertypes "github.com/docker/engine-api/types/container" ) // createContainerPlatformSpecificSettings performs platform specific container create functionality func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { for spec := range config.Volumes { mp, err := volume.ParseMountSpec(spec, hostConfig.VolumeDriver) if err != nil { return fmt.Errorf("Unrecognised volume spec: %v", err) } // If the mountpoint doesn't have a name, generate one. if len(mp.Name) == 0 { mp.Name = stringid.GenerateNonCryptoID() } // Skip volumes for which we already have something mounted on that // destination because of a --volume-from. if container.IsDestinationMounted(mp.Destination) { continue } volumeDriver := hostConfig.VolumeDriver // Create the volume in the volume driver. If it doesn't exist, // a new one will be created. v, err := daemon.volumes.CreateWithRef(mp.Name, volumeDriver, container.ID, nil) if err != nil { return err } // FIXME Windows: This code block is present in the Linux version and // allows the contents to be copied to the container FS prior to it // being started. However, the function utilizes the FollowSymLinkInScope // path which does not cope with Windows volume-style file paths. There // is a separate effort to resolve this (@swernli), so this processing // is deferred for now. A case where this would be useful is when // a dockerfile includes a VOLUME statement, but something is created // in that directory during the dockerfile processing. What this means // on Windows for TP4 is that in that scenario, the contents will not // copied, but that's (somewhat) OK as HCS will bomb out soon after // at it doesn't support mapped directories which have contents in the // destination path anyway. // // Example for repro later: // FROM windowsservercore // RUN mkdir c:\myvol // RUN copy c:\windows\system32\ntdll.dll c:\myvol // VOLUME "c:\myvol" // // Then // docker build -t vol . // docker run -it --rm vol cmd <-- This is where HCS will error out. // // // never attempt to copy existing content in a container FS to a shared volume // if v.DriverName() == volume.DefaultDriverName { // if err := container.CopyImagePathContent(v, mp.Destination); err != nil { // return err // } // } // Add it to container.MountPoints container.AddMountPointWithVolume(mp.Destination, v, mp.RW) } return nil } docker-1.10.3/daemon/daemon.go000066400000000000000000001417001267010174400161050ustar00rootroot00000000000000// Package daemon exposes the functions that occur on the host server // that the Docker daemon is running. // // In implementing the various functions of the daemon, there is often // a method-specific struct for configuring the runtime behavior. package daemon import ( "errors" "fmt" "io" "io/ioutil" "net" "os" "path" "path/filepath" "runtime" "sync" "syscall" "time" "github.com/Sirupsen/logrus" "github.com/docker/distribution/digest" "github.com/docker/docker/api" "github.com/docker/docker/container" "github.com/docker/docker/daemon/events" "github.com/docker/docker/daemon/exec" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/execdriver/execdrivers" "github.com/docker/engine-api/types" containertypes "github.com/docker/engine-api/types/container" eventtypes "github.com/docker/engine-api/types/events" "github.com/docker/engine-api/types/filters" networktypes "github.com/docker/engine-api/types/network" registrytypes "github.com/docker/engine-api/types/registry" "github.com/docker/engine-api/types/strslice" // register graph drivers _ "github.com/docker/docker/daemon/graphdriver/register" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/network" "github.com/docker/docker/distribution" dmetadata "github.com/docker/docker/distribution/metadata" "github.com/docker/docker/distribution/xfer" derr "github.com/docker/docker/errors" "github.com/docker/docker/image" "github.com/docker/docker/image/tarexport" "github.com/docker/docker/layer" "github.com/docker/docker/migrate/v1" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/graphdb" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/namesgenerator" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/registrar" "github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/streamformatter" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/truncindex" "github.com/docker/docker/reference" "github.com/docker/docker/registry" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" volumedrivers "github.com/docker/docker/volume/drivers" "github.com/docker/docker/volume/local" "github.com/docker/docker/volume/store" "github.com/docker/go-connections/nat" "github.com/docker/libnetwork" lntypes "github.com/docker/libnetwork/types" "github.com/docker/libtrust" "github.com/opencontainers/runc/libcontainer" "golang.org/x/net/context" ) const ( // maxDownloadConcurrency is the maximum number of downloads that // may take place at a time for each pull. maxDownloadConcurrency = 3 // maxUploadConcurrency is the maximum number of uploads that // may take place at a time for each push. maxUploadConcurrency = 5 ) var ( validContainerNameChars = utils.RestrictedNameChars validContainerNamePattern = utils.RestrictedNamePattern errSystemNotSupported = errors.New("The Docker daemon is not supported on this platform.") ) // ErrImageDoesNotExist is error returned when no image can be found for a reference. type ErrImageDoesNotExist struct { RefOrID string } func (e ErrImageDoesNotExist) Error() string { return fmt.Sprintf("no such id: %s", e.RefOrID) } // Daemon holds information about the Docker daemon. type Daemon struct { ID string repository string containers container.Store execCommands *exec.Store referenceStore reference.Store downloadManager *xfer.LayerDownloadManager uploadManager *xfer.LayerUploadManager distributionMetadataStore dmetadata.Store trustKey libtrust.PrivateKey idIndex *truncindex.TruncIndex configStore *Config execDriver execdriver.Driver statsCollector *statsCollector defaultLogConfig containertypes.LogConfig RegistryService *registry.Service EventsService *events.Events netController libnetwork.NetworkController volumes *store.VolumeStore discoveryWatcher discoveryReloader root string seccompEnabled bool shutdown bool uidMaps []idtools.IDMap gidMaps []idtools.IDMap layerStore layer.Store imageStore image.Store nameIndex *registrar.Registrar linkIndex *linkIndex } // GetContainer looks for a container using the provided information, which could be // one of the following inputs from the caller: // - A full container ID, which will exact match a container in daemon's list // - A container name, which will only exact match via the GetByName() function // - A partial container ID prefix (e.g. short ID) of any length that is // unique enough to only return a single container object // If none of these searches succeed, an error is returned func (daemon *Daemon) GetContainer(prefixOrName string) (*container.Container, error) { if containerByID := daemon.containers.Get(prefixOrName); containerByID != nil { // prefix is an exact match to a full container ID return containerByID, nil } // GetByName will match only an exact name provided; we ignore errors if containerByName, _ := daemon.GetByName(prefixOrName); containerByName != nil { // prefix is an exact match to a full container Name return containerByName, nil } containerID, indexError := daemon.idIndex.Get(prefixOrName) if indexError != nil { // When truncindex defines an error type, use that instead if indexError == truncindex.ErrNotExist { return nil, derr.ErrorCodeNoSuchContainer.WithArgs(prefixOrName) } return nil, indexError } return daemon.containers.Get(containerID), nil } // Exists returns a true if a container of the specified ID or name exists, // false otherwise. func (daemon *Daemon) Exists(id string) bool { c, _ := daemon.GetContainer(id) return c != nil } // IsPaused returns a bool indicating if the specified container is paused. func (daemon *Daemon) IsPaused(id string) bool { c, _ := daemon.GetContainer(id) return c.State.IsPaused() } func (daemon *Daemon) containerRoot(id string) string { return filepath.Join(daemon.repository, id) } // Load reads the contents of a container from disk // This is typically done at startup. func (daemon *Daemon) load(id string) (*container.Container, error) { container := daemon.newBaseContainer(id) if err := container.FromDisk(); err != nil { return nil, err } if container.ID != id { return container, fmt.Errorf("Container %s is stored at %s", container.ID, id) } return container, nil } func (daemon *Daemon) registerName(container *container.Container) error { if daemon.Exists(container.ID) { return fmt.Errorf("Container is already loaded") } if err := validateID(container.ID); err != nil { return err } if container.Name == "" { name, err := daemon.generateNewName(container.ID) if err != nil { return err } container.Name = name if err := container.ToDiskLocking(); err != nil { logrus.Errorf("Error saving container name to disk: %v", err) } } return daemon.nameIndex.Reserve(container.Name, container.ID) } // Register makes a container object usable by the daemon as func (daemon *Daemon) Register(container *container.Container) error { // Attach to stdout and stderr if container.Config.OpenStdin { container.NewInputPipes() } else { container.NewNopInputPipe() } daemon.containers.Add(container.ID, container) daemon.idIndex.Add(container.ID) if container.IsRunning() { logrus.Debugf("killing old running container %s", container.ID) // Set exit code to 128 + SIGKILL (9) to properly represent unsuccessful exit container.SetStoppedLocking(&execdriver.ExitStatus{ExitCode: 137}) // use the current driver and ensure that the container is dead x.x cmd := &execdriver.Command{ CommonCommand: execdriver.CommonCommand{ ID: container.ID, }, } daemon.execDriver.Terminate(cmd) container.UnmountIpcMounts(mount.Unmount) daemon.Unmount(container) if err := container.ToDiskLocking(); err != nil { logrus.Errorf("Error saving stopped state to disk: %v", err) } } return nil } func (daemon *Daemon) restore() error { var ( debug = utils.IsDebugEnabled() currentDriver = daemon.GraphDriverName() containers = make(map[string]*container.Container) ) if !debug { logrus.Info("Loading containers: start.") } dir, err := ioutil.ReadDir(daemon.repository) if err != nil { return err } for _, v := range dir { id := v.Name() container, err := daemon.load(id) if !debug && logrus.GetLevel() == logrus.InfoLevel { fmt.Print(".") } if err != nil { logrus.Errorf("Failed to load container %v: %v", id, err) continue } rwlayer, err := daemon.layerStore.GetRWLayer(container.ID) if err != nil { logrus.Errorf("Failed to load container mount %v: %v", id, err) continue } container.RWLayer = rwlayer // Ignore the container if it does not support the current driver being used by the graph if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver { logrus.Debugf("Loaded container %v", container.ID) containers[container.ID] = container } else { logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) } } var migrateLegacyLinks bool restartContainers := make(map[*container.Container]chan struct{}) for _, c := range containers { if err := daemon.registerName(c); err != nil { logrus.Errorf("Failed to register container %s: %s", c.ID, err) continue } if err := daemon.Register(c); err != nil { logrus.Errorf("Failed to register container %s: %s", c.ID, err) continue } // get list of containers we need to restart if daemon.configStore.AutoRestart && c.ShouldRestart() { restartContainers[c] = make(chan struct{}) } // if c.hostConfig.Links is nil (not just empty), then it is using the old sqlite links and needs to be migrated if c.HostConfig != nil && c.HostConfig.Links == nil { migrateLegacyLinks = true } } // migrate any legacy links from sqlite linkdbFile := filepath.Join(daemon.root, "linkgraph.db") var legacyLinkDB *graphdb.Database if migrateLegacyLinks { legacyLinkDB, err = graphdb.NewSqliteConn(linkdbFile) if err != nil { return fmt.Errorf("error connecting to legacy link graph DB %s, container links may be lost: %v", linkdbFile, err) } defer legacyLinkDB.Close() } // Now that all the containers are registered, register the links for _, c := range containers { if migrateLegacyLinks { if err := daemon.migrateLegacySqliteLinks(legacyLinkDB, c); err != nil { return err } } if err := daemon.registerLinks(c, c.HostConfig); err != nil { logrus.Errorf("failed to register link for container %s: %v", c.ID, err) } } group := sync.WaitGroup{} for c, notifier := range restartContainers { group.Add(1) go func(c *container.Container, chNotify chan struct{}) { defer group.Done() logrus.Debugf("Starting container %s", c.ID) // ignore errors here as this is a best effort to wait for children to be // running before we try to start the container children := daemon.children(c) timeout := time.After(5 * time.Second) for _, child := range children { if notifier, exists := restartContainers[child]; exists { select { case <-notifier: case <-timeout: } } } if err := daemon.containerStart(c); err != nil { logrus.Errorf("Failed to start container %s: %s", c.ID, err) } close(chNotify) }(c, notifier) } group.Wait() // any containers that were started above would already have had this done, // however we need to now prepare the mountpoints for the rest of the containers as well. // This shouldn't cause any issue running on the containers that already had this run. // This must be run after any containers with a restart policy so that containerized plugins // can have a chance to be running before we try to initialize them. for _, c := range containers { group.Add(1) go func(c *container.Container) { defer group.Done() if err := daemon.prepareMountPoints(c); err != nil { logrus.Error(err) } }(c) } group.Wait() if !debug { if logrus.GetLevel() == logrus.InfoLevel { fmt.Println() } logrus.Info("Loading containers: done.") } return nil } func (daemon *Daemon) mergeAndVerifyConfig(config *containertypes.Config, img *image.Image) error { if img != nil && img.Config != nil { if err := merge(config, img.Config); err != nil { return err } } if config.Entrypoint.Len() == 0 && config.Cmd.Len() == 0 { return fmt.Errorf("No command specified") } return nil } func (daemon *Daemon) generateIDAndName(name string) (string, string, error) { var ( err error id = stringid.GenerateNonCryptoID() ) if name == "" { if name, err = daemon.generateNewName(id); err != nil { return "", "", err } return id, name, nil } if name, err = daemon.reserveName(id, name); err != nil { return "", "", err } return id, name, nil } func (daemon *Daemon) reserveName(id, name string) (string, error) { if !validContainerNamePattern.MatchString(name) { return "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars) } if name[0] != '/' { name = "/" + name } if err := daemon.nameIndex.Reserve(name, id); err != nil { if err == registrar.ErrNameReserved { id, err := daemon.nameIndex.Get(name) if err != nil { logrus.Errorf("got unexpected error while looking up reserved name: %v", err) return "", err } return "", fmt.Errorf("Conflict. The name %q is already in use by container %s. You have to remove (or rename) that container to be able to reuse that name.", name, id) } return "", fmt.Errorf("error reserving name: %s, error: %v", name, err) } return name, nil } func (daemon *Daemon) releaseName(name string) { daemon.nameIndex.Release(name) } func (daemon *Daemon) generateNewName(id string) (string, error) { var name string for i := 0; i < 6; i++ { name = namesgenerator.GetRandomName(i) if name[0] != '/' { name = "/" + name } if err := daemon.nameIndex.Reserve(name, id); err != nil { if err == registrar.ErrNameReserved { continue } return "", err } return name, nil } name = "/" + stringid.TruncateID(id) if err := daemon.nameIndex.Reserve(name, id); err != nil { return "", err } return name, nil } func (daemon *Daemon) generateHostname(id string, config *containertypes.Config) { // Generate default hostname if config.Hostname == "" { config.Hostname = id[:12] } } func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint *strslice.StrSlice, configCmd *strslice.StrSlice) (string, []string) { cmdSlice := configCmd.Slice() if configEntrypoint.Len() != 0 { eSlice := configEntrypoint.Slice() return eSlice[0], append(eSlice[1:], cmdSlice...) } return cmdSlice[0], cmdSlice[1:] } func (daemon *Daemon) newContainer(name string, config *containertypes.Config, imgID image.ID) (*container.Container, error) { var ( id string err error noExplicitName = name == "" ) id, name, err = daemon.generateIDAndName(name) if err != nil { return nil, err } daemon.generateHostname(id, config) entrypoint, args := daemon.getEntrypointAndArgs(config.Entrypoint, config.Cmd) base := daemon.newBaseContainer(id) base.Created = time.Now().UTC() base.Path = entrypoint base.Args = args //FIXME: de-duplicate from config base.Config = config base.HostConfig = &containertypes.HostConfig{} base.ImageID = imgID base.NetworkSettings = &network.Settings{IsAnonymousEndpoint: noExplicitName} base.Name = name base.Driver = daemon.GraphDriverName() return base, err } // GetByName returns a container given a name. func (daemon *Daemon) GetByName(name string) (*container.Container, error) { fullName := name if name[0] != '/' { fullName = "/" + name } id, err := daemon.nameIndex.Get(fullName) if err != nil { return nil, fmt.Errorf("Could not find entity for %s", name) } e := daemon.containers.Get(id) if e == nil { return nil, fmt.Errorf("Could not find container for entity id %s", id) } return e, nil } // SubscribeToEvents returns the currently record of events, a channel to stream new events from, and a function to cancel the stream of events. func (daemon *Daemon) SubscribeToEvents(since, sinceNano int64, filter filters.Args) ([]eventtypes.Message, chan interface{}) { ef := events.NewFilter(filter) return daemon.EventsService.SubscribeTopic(since, sinceNano, ef) } // UnsubscribeFromEvents stops the event subscription for a client by closing the // channel where the daemon sends events to. func (daemon *Daemon) UnsubscribeFromEvents(listener chan interface{}) { daemon.EventsService.Evict(listener) } // GetLabels for a container or image id func (daemon *Daemon) GetLabels(id string) map[string]string { // TODO: TestCase container := daemon.containers.Get(id) if container != nil { return container.Config.Labels } img, err := daemon.GetImage(id) if err == nil { return img.ContainerConfig.Labels } return nil } func (daemon *Daemon) children(c *container.Container) map[string]*container.Container { return daemon.linkIndex.children(c) } // parents returns the names of the parent containers of the container // with the given name. func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container { return daemon.linkIndex.parents(c) } func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error { fullName := path.Join(parent.Name, alias) if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil { if err == registrar.ErrNameReserved { logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err) return nil } return err } daemon.linkIndex.link(parent, child, fullName) return nil } // NewDaemon sets up everything for the daemon to be able to service // requests from the webserver. func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemon, err error) { setDefaultMtu(config) // Ensure we have compatible and valid configuration options if err := verifyDaemonSettings(config); err != nil { return nil, err } // Do we have a disabled network? config.DisableBridge = isBridgeNetworkDisabled(config) // Verify the platform is supported as a daemon if !platformSupported { return nil, errSystemNotSupported } // Validate platform-specific requirements if err := checkSystem(); err != nil { return nil, err } // set up SIGUSR1 handler on Unix-like systems, or a Win32 global event // on Windows to dump Go routine stacks setupDumpStackTrap() uidMaps, gidMaps, err := setupRemappedRoot(config) if err != nil { return nil, err } rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) if err != nil { return nil, err } // get the canonical path to the Docker root directory var realRoot string if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { realRoot = config.Root } else { realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root) if err != nil { return nil, fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) } } if err = setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil { return nil, err } // set up the tmpDir to use a canonical path tmp, err := tempDir(config.Root, rootUID, rootGID) if err != nil { return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) } realTmp, err := fileutils.ReadSymlinkedDirectory(tmp) if err != nil { return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) } os.Setenv("TMPDIR", realTmp) d := &Daemon{} // Ensure the daemon is properly shutdown if there is a failure during // initialization defer func() { if err != nil { if err := d.Shutdown(); err != nil { logrus.Error(err) } } }() // Verify logging driver type if config.LogConfig.Type != "none" { if _, err := logger.GetLogDriver(config.LogConfig.Type); err != nil { return nil, fmt.Errorf("error finding the logging driver: %v", err) } } logrus.Debugf("Using default logging driver %s", config.LogConfig.Type) daemonRepo := filepath.Join(config.Root, "containers") if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { return nil, err } driverName := os.Getenv("DOCKER_DRIVER") if driverName == "" { driverName = config.GraphDriver } d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{ StorePath: config.Root, MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), GraphDriver: driverName, GraphDriverOptions: config.GraphOptions, UIDMaps: uidMaps, GIDMaps: gidMaps, }) if err != nil { return nil, err } graphDriver := d.layerStore.DriverName() imageRoot := filepath.Join(config.Root, "image", graphDriver) // Configure and validate the kernels security support if err := configureKernelSecuritySupport(config, graphDriver); err != nil { return nil, err } d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, maxDownloadConcurrency) d.uploadManager = xfer.NewLayerUploadManager(maxUploadConcurrency) ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) if err != nil { return nil, err } d.imageStore, err = image.NewImageStore(ifs, d.layerStore) if err != nil { return nil, err } // Configure the volumes driver volStore, err := configureVolumes(config, rootUID, rootGID) if err != nil { return nil, err } trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath) if err != nil { return nil, err } trustDir := filepath.Join(config.Root, "trust") if err := system.MkdirAll(trustDir, 0700); err != nil { return nil, err } distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution")) if err != nil { return nil, err } eventsService := events.New() referenceStore, err := reference.NewReferenceStore(filepath.Join(imageRoot, "repositories.json")) if err != nil { return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err) } if err := restoreCustomImage(d.imageStore, d.layerStore, referenceStore); err != nil { return nil, fmt.Errorf("Couldn't restore custom images: %s", err) } migrationStart := time.Now() if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil { logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err) } logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds()) // Discovery is only enabled when the daemon is launched with an address to advertise. When // initialized, the daemon is registered and we can store the discovery backend as its read-only if err := d.initDiscovery(config); err != nil { return nil, err } d.netController, err = d.initNetworkController(config) if err != nil { return nil, fmt.Errorf("Error initializing network controller: %v", err) } sysInfo := sysinfo.New(false) // Check if Devices cgroup is mounted, it is hard requirement for container security, // on Linux/FreeBSD. if runtime.GOOS != "windows" && !sysInfo.CgroupDevicesEnabled { return nil, fmt.Errorf("Devices cgroup isn't mounted") } ed, err := execdrivers.NewDriver(config.ExecOptions, config.ExecRoot, config.Root, sysInfo) if err != nil { return nil, err } d.ID = trustKey.PublicKey().KeyID() d.repository = daemonRepo d.containers = container.NewMemoryStore() d.execCommands = exec.NewStore() d.referenceStore = referenceStore d.distributionMetadataStore = distributionMetadataStore d.trustKey = trustKey d.idIndex = truncindex.NewTruncIndex([]string{}) d.configStore = config d.execDriver = ed d.statsCollector = d.newStatsCollector(1 * time.Second) d.defaultLogConfig = containertypes.LogConfig{ Type: config.LogConfig.Type, Config: config.LogConfig.Config, } d.RegistryService = registryService d.EventsService = eventsService d.volumes = volStore d.root = config.Root d.uidMaps = uidMaps d.gidMaps = gidMaps d.seccompEnabled = sysInfo.Seccomp d.nameIndex = registrar.NewRegistrar() d.linkIndex = newLinkIndex() if err := d.cleanupMounts(); err != nil { return nil, err } go d.execCommandGC() if err := d.restore(); err != nil { return nil, err } return d, nil } func (daemon *Daemon) shutdownContainer(c *container.Container) error { // TODO(windows): Handle docker restart with paused containers if c.IsPaused() { // To terminate a process in freezer cgroup, we should send // SIGTERM to this process then unfreeze it, and the process will // force to terminate immediately. logrus.Debugf("Found container %s is paused, sending SIGTERM before unpause it", c.ID) sig, ok := signal.SignalMap["TERM"] if !ok { return fmt.Errorf("System doesn not support SIGTERM") } if err := daemon.kill(c, int(sig)); err != nil { return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err) } if err := daemon.containerUnpause(c); err != nil { return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err) } if _, err := c.WaitStop(10 * time.Second); err != nil { logrus.Debugf("container %s failed to exit in 10 second of SIGTERM, sending SIGKILL to force", c.ID) sig, ok := signal.SignalMap["KILL"] if !ok { return fmt.Errorf("System does not support SIGKILL") } if err := daemon.kill(c, int(sig)); err != nil { logrus.Errorf("Failed to SIGKILL container %s", c.ID) } c.WaitStop(-1 * time.Second) return err } } // If container failed to exit in 10 seconds of SIGTERM, then using the force if err := daemon.containerStop(c, 10); err != nil { return fmt.Errorf("Stop container %s with error: %v", c.ID, err) } c.WaitStop(-1 * time.Second) return nil } // Shutdown stops the daemon. func (daemon *Daemon) Shutdown() error { daemon.shutdown = true if daemon.containers != nil { logrus.Debug("starting clean shutdown of all containers...") daemon.containers.ApplyAll(func(c *container.Container) { if !c.IsRunning() { return } logrus.Debugf("stopping %s", c.ID) if err := daemon.shutdownContainer(c); err != nil { logrus.Errorf("Stop container error: %v", err) return } logrus.Debugf("container stopped %s", c.ID) }) } // trigger libnetwork Stop only if it's initialized if daemon.netController != nil { daemon.netController.Stop() } if daemon.layerStore != nil { if err := daemon.layerStore.Cleanup(); err != nil { logrus.Errorf("Error during layer Store.Cleanup(): %v", err) } } if err := daemon.cleanupMounts(); err != nil { return err } return nil } // Mount sets container.BaseFS // (is it not set coming in? why is it unset?) func (daemon *Daemon) Mount(container *container.Container) error { dir, err := container.RWLayer.Mount(container.GetMountLabel()) if err != nil { return err } logrus.Debugf("container mounted via layerStore: %v", dir) if container.BaseFS != dir { // The mount path reported by the graph driver should always be trusted on Windows, since the // volume path for a given mounted layer may change over time. This should only be an error // on non-Windows operating systems. if container.BaseFS != "" && runtime.GOOS != "windows" { daemon.Unmount(container) return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", daemon.GraphDriverName(), container.ID, container.BaseFS, dir) } } container.BaseFS = dir // TODO: combine these fields return nil } // Unmount unsets the container base filesystem func (daemon *Daemon) Unmount(container *container.Container) { if err := container.RWLayer.Unmount(); err != nil { logrus.Errorf("Error unmounting container %s: %s", container.ID, err) } } // Run uses the execution driver to run a given container func (daemon *Daemon) Run(c *container.Container, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (execdriver.ExitStatus, error) { hooks := execdriver.Hooks{ Start: startCallback, } hooks.PreStart = append(hooks.PreStart, func(processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error { return daemon.setNetworkNamespaceKey(c.ID, pid) }) return daemon.execDriver.Run(c.Command, pipes, hooks) } func (daemon *Daemon) kill(c *container.Container, sig int) error { return daemon.execDriver.Kill(c.Command, sig) } func (daemon *Daemon) stats(c *container.Container) (*execdriver.ResourceStats, error) { return daemon.execDriver.Stats(c.ID) } func (daemon *Daemon) subscribeToContainerStats(c *container.Container) chan interface{} { return daemon.statsCollector.collect(c) } func (daemon *Daemon) unsubscribeToContainerStats(c *container.Container, ch chan interface{}) { daemon.statsCollector.unsubscribe(c, ch) } func (daemon *Daemon) changes(container *container.Container) ([]archive.Change, error) { return container.RWLayer.Changes() } // TagImage creates a tag in the repository reponame, pointing to the image named // imageName. func (daemon *Daemon) TagImage(newTag reference.Named, imageName string) error { imageID, err := daemon.GetImageID(imageName) if err != nil { return err } if err := daemon.referenceStore.AddTag(newTag, imageID, true); err != nil { return err } daemon.LogImageEvent(imageID.String(), newTag.String(), "tag") return nil } func writeDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) { progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false) operationCancelled := false for prog := range progressChan { if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled { // don't log broken pipe errors as this is the normal case when a client aborts if isBrokenPipe(err) { logrus.Info("Pull session cancelled") } else { logrus.Errorf("error writing progress to client: %v", err) } cancelFunc() operationCancelled = true // Don't return, because we need to continue draining // progressChan until it's closed to avoid a deadlock. } } } func isBrokenPipe(e error) bool { if netErr, ok := e.(*net.OpError); ok { e = netErr.Err if sysErr, ok := netErr.Err.(*os.SyscallError); ok { e = sysErr.Err } } return e == syscall.EPIPE } // PullImage initiates a pull operation. image is the repository name to pull, and // tag may be either empty, or indicate a specific tag to pull. func (daemon *Daemon) PullImage(ref reference.Named, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { // Include a buffer so that slow client connections don't affect // transfer performance. progressChan := make(chan progress.Progress, 100) writesDone := make(chan struct{}) ctx, cancelFunc := context.WithCancel(context.Background()) go func() { writeDistributionProgress(cancelFunc, outStream, progressChan) close(writesDone) }() imagePullConfig := &distribution.ImagePullConfig{ MetaHeaders: metaHeaders, AuthConfig: authConfig, ProgressOutput: progress.ChanOutput(progressChan), RegistryService: daemon.RegistryService, ImageEventLogger: daemon.LogImageEvent, MetadataStore: daemon.distributionMetadataStore, ImageStore: daemon.imageStore, ReferenceStore: daemon.referenceStore, DownloadManager: daemon.downloadManager, } err := distribution.Pull(ctx, ref, imagePullConfig) close(progressChan) <-writesDone return err } // ExportImage exports a list of images to the given output stream. The // exported images are archived into a tar when written to the output // stream. All images with the given tag and all versions containing // the same tag are exported. names is the set of tags to export, and // outStream is the writer which the images are written to. func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error { imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore) return imageExporter.Save(names, outStream) } // PushImage initiates a push operation on the repository named localName. func (daemon *Daemon) PushImage(ref reference.Named, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { // Include a buffer so that slow client connections don't affect // transfer performance. progressChan := make(chan progress.Progress, 100) writesDone := make(chan struct{}) ctx, cancelFunc := context.WithCancel(context.Background()) go func() { writeDistributionProgress(cancelFunc, outStream, progressChan) close(writesDone) }() imagePushConfig := &distribution.ImagePushConfig{ MetaHeaders: metaHeaders, AuthConfig: authConfig, ProgressOutput: progress.ChanOutput(progressChan), RegistryService: daemon.RegistryService, ImageEventLogger: daemon.LogImageEvent, MetadataStore: daemon.distributionMetadataStore, LayerStore: daemon.layerStore, ImageStore: daemon.imageStore, ReferenceStore: daemon.referenceStore, TrustKey: daemon.trustKey, UploadManager: daemon.uploadManager, } err := distribution.Push(ctx, ref, imagePushConfig) close(progressChan) <-writesDone return err } // LookupImage looks up an image by name and returns it as an ImageInspect // structure. func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) { img, err := daemon.GetImage(name) if err != nil { return nil, fmt.Errorf("No such image: %s", name) } refs := daemon.referenceStore.References(img.ID()) repoTags := []string{} repoDigests := []string{} for _, ref := range refs { switch ref.(type) { case reference.NamedTagged: repoTags = append(repoTags, ref.String()) case reference.Canonical: repoDigests = append(repoDigests, ref.String()) } } var size int64 var layerMetadata map[string]string layerID := img.RootFS.ChainID() if layerID != "" { l, err := daemon.layerStore.Get(layerID) if err != nil { return nil, err } defer layer.ReleaseAndLog(daemon.layerStore, l) size, err = l.Size() if err != nil { return nil, err } layerMetadata, err = l.Metadata() if err != nil { return nil, err } } comment := img.Comment if len(comment) == 0 && len(img.History) > 0 { comment = img.History[len(img.History)-1].Comment } imageInspect := &types.ImageInspect{ ID: img.ID().String(), RepoTags: repoTags, RepoDigests: repoDigests, Parent: img.Parent.String(), Comment: comment, Created: img.Created.Format(time.RFC3339Nano), Container: img.Container, ContainerConfig: &img.ContainerConfig, DockerVersion: img.DockerVersion, Author: img.Author, Config: img.Config, Architecture: img.Architecture, Os: img.OS, Size: size, VirtualSize: size, // TODO: field unused, deprecate } imageInspect.GraphDriver.Name = daemon.GraphDriverName() imageInspect.GraphDriver.Data = layerMetadata return imageInspect, nil } // LoadImage uploads a set of images into the repository. This is the // complement of ImageExport. The input stream is an uncompressed tar // ball containing images and metadata. func (daemon *Daemon) LoadImage(inTar io.ReadCloser, outStream io.Writer) error { imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore) return imageExporter.Load(inTar, outStream) } // ImageHistory returns a slice of ImageHistory structures for the specified image // name by walking the image lineage. func (daemon *Daemon) ImageHistory(name string) ([]*types.ImageHistory, error) { img, err := daemon.GetImage(name) if err != nil { return nil, err } history := []*types.ImageHistory{} layerCounter := 0 rootFS := *img.RootFS rootFS.DiffIDs = nil for _, h := range img.History { var layerSize int64 if !h.EmptyLayer { if len(img.RootFS.DiffIDs) <= layerCounter { return nil, errors.New("too many non-empty layers in History section") } rootFS.Append(img.RootFS.DiffIDs[layerCounter]) l, err := daemon.layerStore.Get(rootFS.ChainID()) if err != nil { return nil, err } layerSize, err = l.DiffSize() layer.ReleaseAndLog(daemon.layerStore, l) if err != nil { return nil, err } layerCounter++ } history = append([]*types.ImageHistory{{ ID: "", Created: h.Created.Unix(), CreatedBy: h.CreatedBy, Comment: h.Comment, Size: layerSize, }}, history...) } // Fill in image IDs and tags histImg := img id := img.ID() for _, h := range history { h.ID = id.String() var tags []string for _, r := range daemon.referenceStore.References(id) { if _, ok := r.(reference.NamedTagged); ok { tags = append(tags, r.String()) } } h.Tags = tags id = histImg.Parent if id == "" { break } histImg, err = daemon.GetImage(id.String()) if err != nil { break } } return history, nil } // GetImageID returns an image ID corresponding to the image referred to by // refOrID. func (daemon *Daemon) GetImageID(refOrID string) (image.ID, error) { // Treat as an ID if id, err := digest.ParseDigest(refOrID); err == nil { if _, err := daemon.imageStore.Get(image.ID(id)); err != nil { return "", ErrImageDoesNotExist{refOrID} } return image.ID(id), nil } // Treat it as a possible tag or digest reference if ref, err := reference.ParseNamed(refOrID); err == nil { if id, err := daemon.referenceStore.Get(ref); err == nil { return id, nil } if tagged, ok := ref.(reference.NamedTagged); ok { if id, err := daemon.imageStore.Search(tagged.Tag()); err == nil { for _, namedRef := range daemon.referenceStore.References(id) { if namedRef.Name() == ref.Name() { return id, nil } } } } } // Search based on ID if id, err := daemon.imageStore.Search(refOrID); err == nil { return id, nil } return "", ErrImageDoesNotExist{refOrID} } // GetImage returns an image corresponding to the image referred to by refOrID. func (daemon *Daemon) GetImage(refOrID string) (*image.Image, error) { imgID, err := daemon.GetImageID(refOrID) if err != nil { return nil, err } return daemon.imageStore.Get(imgID) } // GraphDriverName returns the name of the graph driver used by the layer.Store func (daemon *Daemon) GraphDriverName() string { return daemon.layerStore.DriverName() } // ExecutionDriver returns the currently used driver for creating and // starting execs in a container. func (daemon *Daemon) ExecutionDriver() execdriver.Driver { return daemon.execDriver } // GetUIDGIDMaps returns the current daemon's user namespace settings // for the full uid and gid maps which will be applied to containers // started in this instance. func (daemon *Daemon) GetUIDGIDMaps() ([]idtools.IDMap, []idtools.IDMap) { return daemon.uidMaps, daemon.gidMaps } // GetRemappedUIDGID returns the current daemon's uid and gid values // if user namespaces are in use for this daemon instance. If not // this function will return "real" root values of 0, 0. func (daemon *Daemon) GetRemappedUIDGID() (int, int) { uid, gid, _ := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) return uid, gid } // ImageGetCached returns the most recent created image that is a child // of the image with imgID, that had the same config when it was // created. nil is returned if a child cannot be found. An error is // returned if the parent image cannot be found. func (daemon *Daemon) ImageGetCached(imgID image.ID, config *containertypes.Config) (*image.Image, error) { // Loop on the children of the given image and check the config getMatch := func(siblings []image.ID) (*image.Image, error) { var match *image.Image for _, id := range siblings { img, err := daemon.imageStore.Get(id) if err != nil { return nil, fmt.Errorf("unable to find image %q", id) } if runconfig.Compare(&img.ContainerConfig, config) { // check for the most up to date match if match == nil || match.Created.Before(img.Created) { match = img } } } return match, nil } // In this case, this is `FROM scratch`, which isn't an actual image. if imgID == "" { images := daemon.imageStore.Map() var siblings []image.ID for id, img := range images { if img.Parent == imgID { siblings = append(siblings, id) } } return getMatch(siblings) } // find match from child images siblings := daemon.imageStore.Children(imgID) return getMatch(siblings) } // tempDir returns the default directory to use for temporary files. func tempDir(rootDir string, rootUID, rootGID int) (string, error) { var tmpDir string if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { tmpDir = filepath.Join(rootDir, "tmp") } return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID) } func (daemon *Daemon) setSecurityOptions(container *container.Container, hostConfig *containertypes.HostConfig) error { container.Lock() defer container.Unlock() return parseSecurityOpt(container, hostConfig) } func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *containertypes.HostConfig) error { // Do not lock while creating volumes since this could be calling out to external plugins // Don't want to block other actions, like `docker ps` because we're waiting on an external plugin if err := daemon.registerMountPoints(container, hostConfig); err != nil { return err } container.Lock() defer container.Unlock() // Register any links from the host config before starting the container if err := daemon.registerLinks(container, hostConfig); err != nil { return err } // make sure links is not nil // this ensures that on the next daemon restart we don't try to migrate from legacy sqlite links if hostConfig.Links == nil { hostConfig.Links = []string{} } container.HostConfig = hostConfig return container.ToDisk() } func (daemon *Daemon) setupInitLayer(initPath string) error { rootUID, rootGID := daemon.GetRemappedUIDGID() return setupInitLayer(initPath, rootUID, rootGID) } func setDefaultMtu(config *Config) { // do nothing if the config does not have the default 0 value. if config.Mtu != 0 { return } config.Mtu = defaultNetworkMtu } // verifyContainerSettings performs validation of the hostconfig and config // structures. func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostConfig, config *containertypes.Config) ([]string, error) { // First perform verification of settings common across all platforms. if config != nil { if config.WorkingDir != "" { config.WorkingDir = filepath.FromSlash(config.WorkingDir) // Ensure in platform semantics if !system.IsAbs(config.WorkingDir) { return nil, fmt.Errorf("The working directory '%s' is invalid. It needs to be an absolute path.", config.WorkingDir) } } if len(config.StopSignal) > 0 { _, err := signal.ParseSignal(config.StopSignal) if err != nil { return nil, err } } } if hostConfig == nil { return nil, nil } for port := range hostConfig.PortBindings { _, portStr := nat.SplitProtoPort(string(port)) if _, err := nat.ParsePort(portStr); err != nil { return nil, fmt.Errorf("Invalid port specification: %q", portStr) } for _, pb := range hostConfig.PortBindings[port] { _, err := nat.NewPort(nat.SplitProtoPort(pb.HostPort)) if err != nil { return nil, fmt.Errorf("Invalid port specification: %q", pb.HostPort) } } } // Now do platform-specific verification return verifyPlatformContainerSettings(daemon, hostConfig, config) } // Checks if the client set configurations for more than one network while creating a container func (daemon *Daemon) verifyNetworkingConfig(nwConfig *networktypes.NetworkingConfig) error { if nwConfig == nil || len(nwConfig.EndpointsConfig) <= 1 { return nil } l := make([]string, 0, len(nwConfig.EndpointsConfig)) for k := range nwConfig.EndpointsConfig { l = append(l, k) } return derr.ErrorCodeMultipleNetworkConnect.WithArgs(fmt.Sprintf("%v", l)) } func configureVolumes(config *Config, rootUID, rootGID int) (*store.VolumeStore, error) { volumesDriver, err := local.New(config.Root, rootUID, rootGID) if err != nil { return nil, err } volumedrivers.Register(volumesDriver, volumesDriver.Name()) return store.New(), nil } // AuthenticateToRegistry checks the validity of credentials in authConfig func (daemon *Daemon) AuthenticateToRegistry(authConfig *types.AuthConfig) (string, error) { return daemon.RegistryService.Auth(authConfig) } // SearchRegistryForImages queries the registry for images matching // term. authConfig is used to login. func (daemon *Daemon) SearchRegistryForImages(term string, authConfig *types.AuthConfig, headers map[string][]string) (*registrytypes.SearchResults, error) { return daemon.RegistryService.Search(term, authConfig, headers) } // IsShuttingDown tells whether the daemon is shutting down or not func (daemon *Daemon) IsShuttingDown() bool { return daemon.shutdown } // GetContainerStats collects all the stats published by a container func (daemon *Daemon) GetContainerStats(container *container.Container) (*execdriver.ResourceStats, error) { stats, err := daemon.stats(container) if err != nil { return nil, err } // Retrieve the nw statistics from libnetwork and inject them in the Stats var nwStats []*libcontainer.NetworkInterface if nwStats, err = daemon.getNetworkStats(container); err != nil { return nil, err } stats.Interfaces = nwStats return stats, nil } func (daemon *Daemon) getNetworkStats(c *container.Container) ([]*libcontainer.NetworkInterface, error) { var list []*libcontainer.NetworkInterface sb, err := daemon.netController.SandboxByID(c.NetworkSettings.SandboxID) if err != nil { return list, err } stats, err := sb.Statistics() if err != nil { return list, err } // Convert libnetwork nw stats into libcontainer nw stats for ifName, ifStats := range stats { list = append(list, convertLnNetworkStats(ifName, ifStats)) } return list, nil } // newBaseContainer creates a new container with its initial // configuration based on the root storage from the daemon. func (daemon *Daemon) newBaseContainer(id string) *container.Container { return container.NewBaseContainer(id, daemon.containerRoot(id)) } // initDiscovery initializes the discovery watcher for this daemon. func (daemon *Daemon) initDiscovery(config *Config) error { advertise, err := parseClusterAdvertiseSettings(config.ClusterStore, config.ClusterAdvertise) if err != nil { if err == errDiscoveryDisabled { return nil } return err } config.ClusterAdvertise = advertise discoveryWatcher, err := initDiscovery(config.ClusterStore, config.ClusterAdvertise, config.ClusterOpts) if err != nil { return fmt.Errorf("discovery initialization failed (%v)", err) } daemon.discoveryWatcher = discoveryWatcher return nil } // Reload reads configuration changes and modifies the // daemon according to those changes. // This are the settings that Reload changes: // - Daemon labels. func (daemon *Daemon) Reload(config *Config) error { daemon.configStore.reloadLock.Lock() daemon.configStore.Labels = config.Labels daemon.configStore.reloadLock.Unlock() return nil } func (daemon *Daemon) reloadClusterDiscovery(config *Config) error { newAdvertise, err := parseClusterAdvertiseSettings(config.ClusterStore, config.ClusterAdvertise) if err != nil && err != errDiscoveryDisabled { return err } // check discovery modifications if !modifiedDiscoverySettings(daemon.configStore, newAdvertise, config.ClusterStore, config.ClusterOpts) { return nil } // enable discovery for the first time if it was not previously enabled if daemon.discoveryWatcher == nil { discoveryWatcher, err := initDiscovery(config.ClusterStore, newAdvertise, config.ClusterOpts) if err != nil { return fmt.Errorf("discovery initialization failed (%v)", err) } daemon.discoveryWatcher = discoveryWatcher } else { if err == errDiscoveryDisabled { // disable discovery if it was previously enabled and it's disabled now daemon.discoveryWatcher.Stop() } else { // reload discovery if err = daemon.discoveryWatcher.Reload(config.ClusterStore, newAdvertise, config.ClusterOpts); err != nil { return err } } } daemon.configStore.ClusterStore = config.ClusterStore daemon.configStore.ClusterOpts = config.ClusterOpts daemon.configStore.ClusterAdvertise = newAdvertise return nil } func convertLnNetworkStats(name string, stats *lntypes.InterfaceStatistics) *libcontainer.NetworkInterface { n := &libcontainer.NetworkInterface{Name: name} n.RxBytes = stats.RxBytes n.RxPackets = stats.RxPackets n.RxErrors = stats.RxErrors n.RxDropped = stats.RxDropped n.TxBytes = stats.TxBytes n.TxPackets = stats.TxPackets n.TxErrors = stats.TxErrors n.TxDropped = stats.TxDropped return n } func validateID(id string) error { if id == "" { return derr.ErrorCodeEmptyID } return nil } docker-1.10.3/daemon/daemon_experimental.go000066400000000000000000000003741267010174400206630ustar00rootroot00000000000000// +build experimental package daemon import "github.com/docker/engine-api/types/container" func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *container.HostConfig, config *container.Config) ([]string, error) { return nil, nil } docker-1.10.3/daemon/daemon_linux.go000066400000000000000000000025541267010174400173270ustar00rootroot00000000000000package daemon import ( "bufio" "fmt" "io" "os" "path/filepath" "strings" "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/mount" ) // cleanupMounts umounts shm/mqueue mounts for old containers func (daemon *Daemon) cleanupMounts() error { logrus.Debugf("Cleaning up old shm/mqueue mounts: start.") f, err := os.Open("/proc/self/mountinfo") if err != nil { return err } defer f.Close() return daemon.cleanupMountsFromReader(f, mount.Unmount) } func (daemon *Daemon) cleanupMountsFromReader(reader io.Reader, unmount func(target string) error) error { if daemon.repository == "" { return nil } sc := bufio.NewScanner(reader) var errors []string for sc.Scan() { line := sc.Text() fields := strings.Fields(line) if strings.HasPrefix(fields[4], daemon.repository) { logrus.Debugf("Mount base: %v, repository %s", fields[4], daemon.repository) mnt := fields[4] mountBase := filepath.Base(mnt) if mountBase == "mqueue" || mountBase == "shm" { logrus.Debugf("Unmounting %v", mnt) if err := unmount(mnt); err != nil { logrus.Error(err) errors = append(errors, err.Error()) } } } } if err := sc.Err(); err != nil { return err } if len(errors) > 0 { return fmt.Errorf("Error cleaningup mounts:\n%v", strings.Join(errors, "\n")) } logrus.Debugf("Cleaning up old shm/mqueue mounts: done.") return nil } docker-1.10.3/daemon/daemon_linux_test.go000066400000000000000000000117601267010174400203650ustar00rootroot00000000000000// +build linux package daemon import ( "strings" "testing" ) func TestCleanupMounts(t *testing.T) { fixture := `230 138 0:60 / / rw,relatime - overlay overlay rw,lowerdir=/var/lib/docker/overlay/0ef9f93d5d365c1385b09d54bbee6afff3d92002c16f22eccb6e1549b2ff97d8/root,upperdir=/var/lib/docker/overlay/dfac036ce135a8914e292cb2f6fea114f7339983c186366aa26d0051e93162cb/upper,workdir=/var/lib/docker/overlay/dfac036ce135a8914e292cb2f6fea114f7339983c186366aa26d0051e93162cb/work 231 230 0:56 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw 232 230 0:57 / /dev rw,nosuid - tmpfs tmpfs rw,mode=755 233 232 0:58 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=666 234 232 0:59 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k 235 232 0:55 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw 236 230 0:61 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw 237 236 0:62 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs tmpfs rw 238 237 0:21 /system.slice/docker.service /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd 239 237 0:23 /docker/dfac036ce135a8914e292cb2f6fea114f7339983c186366aa26d0051e93162cb /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,perf_event 240 237 0:24 /docker/dfac036ce135a8914e292cb2f6fea114f7339983c186366aa26d0051e93162cb /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpuset,clone_children 241 237 0:25 /docker/dfac036ce135a8914e292cb2f6fea114f7339983c186366aa26d0051e93162cb /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,devices 242 237 0:26 /docker/dfac036ce135a8914e292cb2f6fea114f7339983c186366aa26d0051e93162cb /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,freezer 243 237 0:27 /docker/dfac036ce135a8914e292cb2f6fea114f7339983c186366aa26d0051e93162cb /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpu,cpuacct 244 237 0:28 /docker/dfac036ce135a8914e292cb2f6fea114f7339983c186366aa26d0051e93162cb /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,blkio 245 237 0:29 /docker/dfac036ce135a8914e292cb2f6fea114f7339983c186366aa26d0051e93162cb /sys/fs/cgroup/net_cls,net_prio rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,net_cls,net_prio 246 237 0:30 /docker/dfac036ce135a8914e292cb2f6fea114f7339983c186366aa26d0051e93162cb /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,hugetlb 247 237 0:31 /docker/dfac036ce135a8914e292cb2f6fea114f7339983c186366aa26d0051e93162cb /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,memory 248 230 253:1 /var/lib/docker/volumes/510cc41ac68c48bd4eac932e3e09711673876287abf1b185312cfbfe6261a111/_data /var/lib/docker rw,relatime - ext4 /dev/disk/by-uuid/ba70ea0c-1a8f-4ee4-9687-cb393730e2b5 rw,errors=remount-ro,data=ordered 250 230 253:1 /var/lib/docker/containers/dfac036ce135a8914e292cb2f6fea114f7339983c186366aa26d0051e93162cb/hostname /etc/hostname rw,relatime - ext4 /dev/disk/by-uuid/ba70ea0c-1a8f-4ee4-9687-cb393730e2b5 rw,errors=remount-ro,data=ordered 251 230 253:1 /var/lib/docker/containers/dfac036ce135a8914e292cb2f6fea114f7339983c186366aa26d0051e93162cb/hosts /etc/hosts rw,relatime - ext4 /dev/disk/by-uuid/ba70ea0c-1a8f-4ee4-9687-cb393730e2b5 rw,errors=remount-ro,data=ordered 252 232 0:13 /1 /dev/console rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 139 236 0:11 / /sys/kernel/security rw,relatime - securityfs none rw 140 230 0:54 / /tmp rw,relatime - tmpfs none rw 145 230 0:3 / /run/docker/netns/default rw - nsfs nsfs rw 130 140 0:45 / /tmp/docker_recursive_mount_test312125472/tmpfs rw,relatime - tmpfs tmpfs rw 131 230 0:3 / /run/docker/netns/47903e2e6701 rw - nsfs nsfs rw 133 230 0:55 / /go/src/github.com/docker/docker/bundles/1.9.0-dev/test-integration-cli/d45526097/graph/containers/47903e2e67014246eba27607809d5f5c2437c3bf84c2986393448f84093cc40b/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw` d := &Daemon{ repository: "/go/src/github.com/docker/docker/bundles/1.9.0-dev/test-integration-cli/d45526097/graph/containers/", } expected := "/go/src/github.com/docker/docker/bundles/1.9.0-dev/test-integration-cli/d45526097/graph/containers/47903e2e67014246eba27607809d5f5c2437c3bf84c2986393448f84093cc40b/mqueue" var unmounted bool unmount := func(target string) error { if target == expected { unmounted = true } return nil } d.cleanupMountsFromReader(strings.NewReader(fixture), unmount) if !unmounted { t.Fatalf("Expected to unmount the mqueue") } } func TestNotCleanupMounts(t *testing.T) { d := &Daemon{ repository: "", } var unmounted bool unmount := func(target string) error { unmounted = true return nil } mountInfo := `234 232 0:59 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k` d.cleanupMountsFromReader(strings.NewReader(mountInfo), unmount) if unmounted { t.Fatalf("Expected not to clean up /dev/shm") } } docker-1.10.3/daemon/daemon_stub.go000066400000000000000000000003751267010174400171440ustar00rootroot00000000000000// +build !experimental package daemon import "github.com/docker/engine-api/types/container" func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *container.HostConfig, config *container.Config) ([]string, error) { return nil, nil } docker-1.10.3/daemon/daemon_test.go000066400000000000000000000355301267010174400171470ustar00rootroot00000000000000package daemon import ( "io/ioutil" "os" "path/filepath" "reflect" "testing" "time" "github.com/docker/docker/container" "github.com/docker/docker/pkg/discovery" _ "github.com/docker/docker/pkg/discovery/memory" "github.com/docker/docker/pkg/registrar" "github.com/docker/docker/pkg/truncindex" "github.com/docker/docker/volume" volumedrivers "github.com/docker/docker/volume/drivers" "github.com/docker/docker/volume/local" "github.com/docker/docker/volume/store" containertypes "github.com/docker/engine-api/types/container" "github.com/docker/go-connections/nat" ) // // https://github.com/docker/docker/issues/8069 // func TestGetContainer(t *testing.T) { c1 := &container.Container{ CommonContainer: container.CommonContainer{ ID: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", Name: "tender_bardeen", }, } c2 := &container.Container{ CommonContainer: container.CommonContainer{ ID: "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de", Name: "drunk_hawking", }, } c3 := &container.Container{ CommonContainer: container.CommonContainer{ ID: "3cdbd1aa394fd68559fd1441d6eff2abfafdcba06e72d2febdba229008b0bf57", Name: "3cdbd1aa", }, } c4 := &container.Container{ CommonContainer: container.CommonContainer{ ID: "75fb0b800922abdbef2d27e60abcdfaf7fb0698b2a96d22d3354da361a6ff4a5", Name: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", }, } c5 := &container.Container{ CommonContainer: container.CommonContainer{ ID: "d22d69a2b8960bf7fafdcba06e72d2febdba960bf7fafdcba06e72d2f9008b060b", Name: "d22d69a2b896", }, } store := container.NewMemoryStore() store.Add(c1.ID, c1) store.Add(c2.ID, c2) store.Add(c3.ID, c3) store.Add(c4.ID, c4) store.Add(c5.ID, c5) index := truncindex.NewTruncIndex([]string{}) index.Add(c1.ID) index.Add(c2.ID) index.Add(c3.ID) index.Add(c4.ID) index.Add(c5.ID) daemon := &Daemon{ containers: store, idIndex: index, nameIndex: registrar.NewRegistrar(), } daemon.reserveName(c1.ID, c1.Name) daemon.reserveName(c2.ID, c2.Name) daemon.reserveName(c3.ID, c3.Name) daemon.reserveName(c4.ID, c4.Name) daemon.reserveName(c5.ID, c5.Name) if container, _ := daemon.GetContainer("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 { t.Fatal("Should explicitly match full container IDs") } if container, _ := daemon.GetContainer("75fb0b8009"); container != c4 { t.Fatal("Should match a partial ID") } if container, _ := daemon.GetContainer("drunk_hawking"); container != c2 { t.Fatal("Should match a full name") } // c3.Name is a partial match for both c3.ID and c2.ID if c, _ := daemon.GetContainer("3cdbd1aa"); c != c3 { t.Fatal("Should match a full name even though it collides with another container's ID") } if container, _ := daemon.GetContainer("d22d69a2b896"); container != c5 { t.Fatal("Should match a container where the provided prefix is an exact match to the it's name, and is also a prefix for it's ID") } if _, err := daemon.GetContainer("3cdbd1"); err == nil { t.Fatal("Should return an error when provided a prefix that partially matches multiple container ID's") } if _, err := daemon.GetContainer("nothing"); err == nil { t.Fatal("Should return an error when provided a prefix that is neither a name or a partial match to an ID") } } func initDaemonWithVolumeStore(tmp string) (*Daemon, error) { daemon := &Daemon{ repository: tmp, root: tmp, volumes: store.New(), } volumesDriver, err := local.New(tmp, 0, 0) if err != nil { return nil, err } volumedrivers.Register(volumesDriver, volumesDriver.Name()) return daemon, nil } func TestParseSecurityOpt(t *testing.T) { container := &container.Container{} config := &containertypes.HostConfig{} // test apparmor config.SecurityOpt = []string{"apparmor:test_profile"} if err := parseSecurityOpt(container, config); err != nil { t.Fatalf("Unexpected parseSecurityOpt error: %v", err) } if container.AppArmorProfile != "test_profile" { t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile) } // test seccomp sp := "/path/to/seccomp_test.json" config.SecurityOpt = []string{"seccomp:" + sp} if err := parseSecurityOpt(container, config); err != nil { t.Fatalf("Unexpected parseSecurityOpt error: %v", err) } if container.SeccompProfile != sp { t.Fatalf("Unexpected AppArmorProfile, expected: %q, got %q", sp, container.SeccompProfile) } // test valid label config.SecurityOpt = []string{"label:user:USER"} if err := parseSecurityOpt(container, config); err != nil { t.Fatalf("Unexpected parseSecurityOpt error: %v", err) } // test invalid label config.SecurityOpt = []string{"label"} if err := parseSecurityOpt(container, config); err == nil { t.Fatal("Expected parseSecurityOpt error, got nil") } // test invalid opt config.SecurityOpt = []string{"test"} if err := parseSecurityOpt(container, config); err == nil { t.Fatal("Expected parseSecurityOpt error, got nil") } } func TestNetworkOptions(t *testing.T) { daemon := &Daemon{} dconfigCorrect := &Config{ CommonConfig: CommonConfig{ ClusterStore: "consul://localhost:8500", ClusterAdvertise: "192.168.0.1:8000", }, } if _, err := daemon.networkOptions(dconfigCorrect); err != nil { t.Fatalf("Expect networkOptions sucess, got error: %v", err) } dconfigWrong := &Config{ CommonConfig: CommonConfig{ ClusterStore: "consul://localhost:8500://test://bbb", }, } if _, err := daemon.networkOptions(dconfigWrong); err == nil { t.Fatalf("Expected networkOptions error, got nil") } } func TestValidContainerNames(t *testing.T) { invalidNames := []string{"-rm", "&sdfsfd", "safd%sd"} validNames := []string{"word-word", "word_word", "1weoid"} for _, name := range invalidNames { if validContainerNamePattern.MatchString(name) { t.Fatalf("%q is not a valid container name and was returned as valid.", name) } } for _, name := range validNames { if !validContainerNamePattern.MatchString(name) { t.Fatalf("%q is a valid container name and was returned as invalid.", name) } } } func TestContainerInitDNS(t *testing.T) { tmp, err := ioutil.TempDir("", "docker-container-test-") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e" containerPath := filepath.Join(tmp, containerID) if err := os.MkdirAll(containerPath, 0755); err != nil { t.Fatal(err) } config := `{"State":{"Running":true,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":2464,"ExitCode":0, "Error":"","StartedAt":"2015-05-26T16:48:53.869308965Z","FinishedAt":"0001-01-01T00:00:00Z"}, "ID":"d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e","Created":"2015-05-26T16:48:53.7987917Z","Path":"top", "Args":[],"Config":{"Hostname":"d59df5276e7b","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"", "AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":true,"OpenStdin":true, "StdinOnce":false,"Env":null,"Cmd":["top"],"Image":"ubuntu:latest","Volumes":null,"WorkingDir":"","Entrypoint":null, "NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":{}},"Image":"07f8e8c5e66084bef8f848877857537ffe1c47edd01a93af27e7161672ad0e95", "NetworkSettings":{"IPAddress":"172.17.0.1","IPPrefixLen":16,"MacAddress":"02:42:ac:11:00:01","LinkLocalIPv6Address":"fe80::42:acff:fe11:1", "LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","Ports":{}}, "ResolvConfPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/resolv.conf", "HostnamePath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hostname", "HostsPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hosts", "LogPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e-json.log", "Name":"/ubuntu","Driver":"aufs","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0, "UpdateDns":false,"Volumes":{},"VolumesRW":{},"AppliedVolumesFrom":null}` // Container struct only used to retrieve path to config file container := &container.Container{CommonContainer: container.CommonContainer{Root: containerPath}} configPath, err := container.ConfigPath() if err != nil { t.Fatal(err) } if err = ioutil.WriteFile(configPath, []byte(config), 0644); err != nil { t.Fatal(err) } hostConfig := `{"Binds":[],"ContainerIDFile":"","Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"", "Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsOptions":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null, "Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0}, "SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}` hostConfigPath, err := container.HostConfigPath() if err != nil { t.Fatal(err) } if err = ioutil.WriteFile(hostConfigPath, []byte(hostConfig), 0644); err != nil { t.Fatal(err) } daemon, err := initDaemonWithVolumeStore(tmp) if err != nil { t.Fatal(err) } defer volumedrivers.Unregister(volume.DefaultDriverName) c, err := daemon.load(containerID) if err != nil { t.Fatal(err) } if c.HostConfig.DNS == nil { t.Fatal("Expected container DNS to not be nil") } if c.HostConfig.DNSSearch == nil { t.Fatal("Expected container DNSSearch to not be nil") } if c.HostConfig.DNSOptions == nil { t.Fatal("Expected container DNSOptions to not be nil") } } func newPortNoError(proto, port string) nat.Port { p, _ := nat.NewPort(proto, port) return p } func TestMerge(t *testing.T) { volumesImage := make(map[string]struct{}) volumesImage["/test1"] = struct{}{} volumesImage["/test2"] = struct{}{} portsImage := make(nat.PortSet) portsImage[newPortNoError("tcp", "1111")] = struct{}{} portsImage[newPortNoError("tcp", "2222")] = struct{}{} configImage := &containertypes.Config{ ExposedPorts: portsImage, Env: []string{"VAR1=1", "VAR2=2"}, Volumes: volumesImage, } portsUser := make(nat.PortSet) portsUser[newPortNoError("tcp", "2222")] = struct{}{} portsUser[newPortNoError("tcp", "3333")] = struct{}{} volumesUser := make(map[string]struct{}) volumesUser["/test3"] = struct{}{} configUser := &containertypes.Config{ ExposedPorts: portsUser, Env: []string{"VAR2=3", "VAR3=3"}, Volumes: volumesUser, } if err := merge(configUser, configImage); err != nil { t.Error(err) } if len(configUser.ExposedPorts) != 3 { t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) } for portSpecs := range configUser.ExposedPorts { if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { t.Fatalf("Expected 1111 or 2222 or 3333, found %s", portSpecs) } } if len(configUser.Env) != 3 { t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(configUser.Env)) } for _, env := range configUser.Env { if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" { t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env) } } if len(configUser.Volumes) != 3 { t.Fatalf("Expected 3 volumes, /test1, /test2 and /test3, found %d", len(configUser.Volumes)) } for v := range configUser.Volumes { if v != "/test1" && v != "/test2" && v != "/test3" { t.Fatalf("Expected /test1 or /test2 or /test3, found %s", v) } } ports, _, err := nat.ParsePortSpecs([]string{"0000"}) if err != nil { t.Error(err) } configImage2 := &containertypes.Config{ ExposedPorts: ports, } if err := merge(configUser, configImage2); err != nil { t.Error(err) } if len(configUser.ExposedPorts) != 4 { t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) } for portSpecs := range configUser.ExposedPorts { if portSpecs.Port() != "0" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { t.Fatalf("Expected %q or %q or %q or %q, found %s", 0, 1111, 2222, 3333, portSpecs) } } } func TestDaemonReloadLabels(t *testing.T) { daemon := &Daemon{} daemon.configStore = &Config{ CommonConfig: CommonConfig{ Labels: []string{"foo:bar"}, }, } newConfig := &Config{ CommonConfig: CommonConfig{ Labels: []string{"foo:baz"}, }, } daemon.Reload(newConfig) label := daemon.configStore.Labels[0] if label != "foo:baz" { t.Fatalf("Expected daemon label `foo:baz`, got %s", label) } } func TestDaemonDiscoveryReload(t *testing.T) { daemon := &Daemon{} daemon.configStore = &Config{ CommonConfig: CommonConfig{ ClusterStore: "memory://127.0.0.1", ClusterAdvertise: "127.0.0.1:3333", }, } if err := daemon.initDiscovery(daemon.configStore); err != nil { t.Fatal(err) } expected := discovery.Entries{ &discovery.Entry{Host: "127.0.0.1", Port: "3333"}, } stopCh := make(chan struct{}) defer close(stopCh) ch, errCh := daemon.discoveryWatcher.Watch(stopCh) select { case <-time.After(1 * time.Second): t.Fatal("failed to get discovery advertisements in time") case e := <-ch: if !reflect.DeepEqual(e, expected) { t.Fatalf("expected %v, got %v\n", expected, e) } case e := <-errCh: t.Fatal(e) } newConfig := &Config{ CommonConfig: CommonConfig{ ClusterStore: "memory://127.0.0.1:2222", ClusterAdvertise: "127.0.0.1:5555", }, } expected = discovery.Entries{ &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, } if err := daemon.reloadClusterDiscovery(newConfig); err != nil { t.Fatal(err) } ch, errCh = daemon.discoveryWatcher.Watch(stopCh) select { case <-time.After(1 * time.Second): t.Fatal("failed to get discovery advertisements in time") case e := <-ch: if !reflect.DeepEqual(e, expected) { t.Fatalf("expected %v, got %v\n", expected, e) } case e := <-errCh: t.Fatal(e) } } func TestDaemonDiscoveryReloadFromEmptyDiscovery(t *testing.T) { daemon := &Daemon{} daemon.configStore = &Config{} newConfig := &Config{ CommonConfig: CommonConfig{ ClusterStore: "memory://127.0.0.1:2222", ClusterAdvertise: "127.0.0.1:5555", }, } expected := discovery.Entries{ &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, } if err := daemon.reloadClusterDiscovery(newConfig); err != nil { t.Fatal(err) } stopCh := make(chan struct{}) defer close(stopCh) ch, errCh := daemon.discoveryWatcher.Watch(stopCh) select { case <-time.After(1 * time.Second): t.Fatal("failed to get discovery advertisements in time") case e := <-ch: if !reflect.DeepEqual(e, expected) { t.Fatalf("expected %v, got %v\n", expected, e) } case e := <-errCh: t.Fatal(e) } } docker-1.10.3/daemon/daemon_unix.go000066400000000000000000001106061267010174400171510ustar00rootroot00000000000000// +build linux freebsd package daemon import ( "fmt" "net" "os" "path/filepath" "runtime" "strconv" "strings" "syscall" "github.com/Sirupsen/logrus" "github.com/docker/docker/container" derr "github.com/docker/docker/errors" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/reference" "github.com/docker/docker/runconfig" runconfigopts "github.com/docker/docker/runconfig/opts" pblkiodev "github.com/docker/engine-api/types/blkiodev" containertypes "github.com/docker/engine-api/types/container" "github.com/docker/libnetwork" nwconfig "github.com/docker/libnetwork/config" "github.com/docker/libnetwork/drivers/bridge" "github.com/docker/libnetwork/ipamutils" "github.com/docker/libnetwork/netlabel" "github.com/docker/libnetwork/options" "github.com/docker/libnetwork/types" blkiodev "github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/label" "github.com/opencontainers/runc/libcontainer/user" ) const ( // See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269 linuxMinCPUShares = 2 linuxMaxCPUShares = 262144 platformSupported = true // It's not kernel limit, we want this 4M limit to supply a reasonable functional container linuxMinMemory = 4194304 // constants for remapped root settings defaultIDSpecifier string = "default" defaultRemappedID string = "dockremap" ) func getBlkioWeightDevices(config *containertypes.HostConfig) ([]*blkiodev.WeightDevice, error) { var stat syscall.Stat_t var blkioWeightDevices []*blkiodev.WeightDevice for _, weightDevice := range config.BlkioWeightDevice { if err := syscall.Stat(weightDevice.Path, &stat); err != nil { return nil, err } weightDevice := blkiodev.NewWeightDevice(int64(stat.Rdev/256), int64(stat.Rdev%256), weightDevice.Weight, 0) blkioWeightDevices = append(blkioWeightDevices, weightDevice) } return blkioWeightDevices, nil } func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { var ( labelOpts []string err error ) for _, opt := range config.SecurityOpt { con := strings.SplitN(opt, ":", 2) if len(con) == 1 { return fmt.Errorf("Invalid --security-opt: %q", opt) } switch con[0] { case "label": labelOpts = append(labelOpts, con[1]) case "apparmor": container.AppArmorProfile = con[1] case "seccomp": container.SeccompProfile = con[1] default: return fmt.Errorf("Invalid --security-opt: %q", opt) } } container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) return err } func getBlkioReadIOpsDevices(config *containertypes.HostConfig) ([]*blkiodev.ThrottleDevice, error) { var blkioReadIOpsDevice []*blkiodev.ThrottleDevice var stat syscall.Stat_t for _, iopsDevice := range config.BlkioDeviceReadIOps { if err := syscall.Stat(iopsDevice.Path, &stat); err != nil { return nil, err } readIOpsDevice := blkiodev.NewThrottleDevice(int64(stat.Rdev/256), int64(stat.Rdev%256), iopsDevice.Rate) blkioReadIOpsDevice = append(blkioReadIOpsDevice, readIOpsDevice) } return blkioReadIOpsDevice, nil } func getBlkioWriteIOpsDevices(config *containertypes.HostConfig) ([]*blkiodev.ThrottleDevice, error) { var blkioWriteIOpsDevice []*blkiodev.ThrottleDevice var stat syscall.Stat_t for _, iopsDevice := range config.BlkioDeviceWriteIOps { if err := syscall.Stat(iopsDevice.Path, &stat); err != nil { return nil, err } writeIOpsDevice := blkiodev.NewThrottleDevice(int64(stat.Rdev/256), int64(stat.Rdev%256), iopsDevice.Rate) blkioWriteIOpsDevice = append(blkioWriteIOpsDevice, writeIOpsDevice) } return blkioWriteIOpsDevice, nil } func getBlkioReadBpsDevices(config *containertypes.HostConfig) ([]*blkiodev.ThrottleDevice, error) { var blkioReadBpsDevice []*blkiodev.ThrottleDevice var stat syscall.Stat_t for _, bpsDevice := range config.BlkioDeviceReadBps { if err := syscall.Stat(bpsDevice.Path, &stat); err != nil { return nil, err } readBpsDevice := blkiodev.NewThrottleDevice(int64(stat.Rdev/256), int64(stat.Rdev%256), bpsDevice.Rate) blkioReadBpsDevice = append(blkioReadBpsDevice, readBpsDevice) } return blkioReadBpsDevice, nil } func getBlkioWriteBpsDevices(config *containertypes.HostConfig) ([]*blkiodev.ThrottleDevice, error) { var blkioWriteBpsDevice []*blkiodev.ThrottleDevice var stat syscall.Stat_t for _, bpsDevice := range config.BlkioDeviceWriteBps { if err := syscall.Stat(bpsDevice.Path, &stat); err != nil { return nil, err } writeBpsDevice := blkiodev.NewThrottleDevice(int64(stat.Rdev/256), int64(stat.Rdev%256), bpsDevice.Rate) blkioWriteBpsDevice = append(blkioWriteBpsDevice, writeBpsDevice) } return blkioWriteBpsDevice, nil } func checkKernelVersion(k, major, minor int) bool { if v, err := kernel.GetKernelVersion(); err != nil { logrus.Warnf("%s", err) } else { if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 { return false } } return true } func checkKernel() error { // Check for unsupported kernel versions // FIXME: it would be cleaner to not test for specific versions, but rather // test for specific functionalities. // Unfortunately we can't test for the feature "does not cause a kernel panic" // without actually causing a kernel panic, so we need this workaround until // the circumstances of pre-3.10 crashes are clearer. // For details see https://github.com/docker/docker/issues/407 if !checkKernelVersion(3, 10, 0) { v, _ := kernel.GetKernelVersion() if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { logrus.Warnf("Your Linux kernel version %s can be unstable running docker. Please upgrade your kernel to 3.10.0.", v.String()) } } return nil } // adaptContainerSettings is called during container creation to modify any // settings necessary in the HostConfig structure. func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { if adjustCPUShares && hostConfig.CPUShares > 0 { // Handle unsupported CPUShares if hostConfig.CPUShares < linuxMinCPUShares { logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares) hostConfig.CPUShares = linuxMinCPUShares } else if hostConfig.CPUShares > linuxMaxCPUShares { logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares) hostConfig.CPUShares = linuxMaxCPUShares } } if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { // By default, MemorySwap is set to twice the size of Memory. hostConfig.MemorySwap = hostConfig.Memory * 2 } if hostConfig.ShmSize == 0 { hostConfig.ShmSize = container.DefaultSHMSize } var err error if hostConfig.SecurityOpt == nil { hostConfig.SecurityOpt, err = daemon.generateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode) if err != nil { return err } } if hostConfig.MemorySwappiness == nil { defaultSwappiness := int64(-1) hostConfig.MemorySwappiness = &defaultSwappiness } if hostConfig.OomKillDisable == nil { defaultOomKillDisable := false hostConfig.OomKillDisable = &defaultOomKillDisable } return nil } func verifyContainerResources(resources *containertypes.Resources) ([]string, error) { warnings := []string{} sysInfo := sysinfo.New(true) // memory subsystem checks and adjustments if resources.Memory != 0 && resources.Memory < linuxMinMemory { return warnings, fmt.Errorf("Minimum memory limit allowed is 4MB") } if resources.Memory > 0 && !sysInfo.MemoryLimit { warnings = append(warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.") logrus.Warnf("Your kernel does not support memory limit capabilities. Limitation discarded.") resources.Memory = 0 resources.MemorySwap = -1 } if resources.Memory > 0 && resources.MemorySwap != -1 && !sysInfo.SwapLimit { warnings = append(warnings, "Your kernel does not support swap limit capabilities, memory limited without swap.") logrus.Warnf("Your kernel does not support swap limit capabilities, memory limited without swap.") resources.MemorySwap = -1 } if resources.Memory > 0 && resources.MemorySwap > 0 && resources.MemorySwap < resources.Memory { return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.") } if resources.Memory == 0 && resources.MemorySwap > 0 { return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage.") } if resources.MemorySwappiness != nil && *resources.MemorySwappiness != -1 && !sysInfo.MemorySwappiness { warnings = append(warnings, "Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") logrus.Warnf("Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") resources.MemorySwappiness = nil } if resources.MemorySwappiness != nil { swappiness := *resources.MemorySwappiness if swappiness < -1 || swappiness > 100 { return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100.", swappiness) } } if resources.MemoryReservation > 0 && !sysInfo.MemoryReservation { warnings = append(warnings, "Your kernel does not support memory soft limit capabilities. Limitation discarded.") logrus.Warnf("Your kernel does not support memory soft limit capabilities. Limitation discarded.") resources.MemoryReservation = 0 } if resources.Memory > 0 && resources.MemoryReservation > 0 && resources.Memory < resources.MemoryReservation { return warnings, fmt.Errorf("Minimum memory limit should be larger than memory reservation limit, see usage.") } if resources.KernelMemory > 0 && !sysInfo.KernelMemory { warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities. Limitation discarded.") logrus.Warnf("Your kernel does not support kernel memory limit capabilities. Limitation discarded.") resources.KernelMemory = 0 } if resources.KernelMemory > 0 && resources.KernelMemory < linuxMinMemory { return warnings, fmt.Errorf("Minimum kernel memory limit allowed is 4MB") } if resources.KernelMemory > 0 && !checkKernelVersion(4, 0, 0) { warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") logrus.Warnf("You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") } if resources.OomKillDisable != nil && !sysInfo.OomKillDisable { // only produce warnings if the setting wasn't to *disable* the OOM Kill; no point // warning the caller if they already wanted the feature to be off if *resources.OomKillDisable { warnings = append(warnings, "Your kernel does not support OomKillDisable, OomKillDisable discarded.") logrus.Warnf("Your kernel does not support OomKillDisable, OomKillDisable discarded.") } resources.OomKillDisable = nil } // cpu subsystem checks and adjustments if resources.CPUShares > 0 && !sysInfo.CPUShares { warnings = append(warnings, "Your kernel does not support CPU shares. Shares discarded.") logrus.Warnf("Your kernel does not support CPU shares. Shares discarded.") resources.CPUShares = 0 } if resources.CPUPeriod > 0 && !sysInfo.CPUCfsPeriod { warnings = append(warnings, "Your kernel does not support CPU cfs period. Period discarded.") logrus.Warnf("Your kernel does not support CPU cfs period. Period discarded.") resources.CPUPeriod = 0 } if resources.CPUQuota > 0 && !sysInfo.CPUCfsQuota { warnings = append(warnings, "Your kernel does not support CPU cfs quota. Quota discarded.") logrus.Warnf("Your kernel does not support CPU cfs quota. Quota discarded.") resources.CPUQuota = 0 } // cpuset subsystem checks and adjustments if (resources.CpusetCpus != "" || resources.CpusetMems != "") && !sysInfo.Cpuset { warnings = append(warnings, "Your kernel does not support cpuset. Cpuset discarded.") logrus.Warnf("Your kernel does not support cpuset. Cpuset discarded.") resources.CpusetCpus = "" resources.CpusetMems = "" } cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(resources.CpusetCpus) if err != nil { return warnings, derr.ErrorCodeInvalidCpusetCpus.WithArgs(resources.CpusetCpus) } if !cpusAvailable { return warnings, derr.ErrorCodeNotAvailableCpusetCpus.WithArgs(resources.CpusetCpus, sysInfo.Cpus) } memsAvailable, err := sysInfo.IsCpusetMemsAvailable(resources.CpusetMems) if err != nil { return warnings, derr.ErrorCodeInvalidCpusetMems.WithArgs(resources.CpusetMems) } if !memsAvailable { return warnings, derr.ErrorCodeNotAvailableCpusetMems.WithArgs(resources.CpusetMems, sysInfo.Mems) } // blkio subsystem checks and adjustments if resources.BlkioWeight > 0 && !sysInfo.BlkioWeight { warnings = append(warnings, "Your kernel does not support Block I/O weight. Weight discarded.") logrus.Warnf("Your kernel does not support Block I/O weight. Weight discarded.") resources.BlkioWeight = 0 } if resources.BlkioWeight > 0 && (resources.BlkioWeight < 10 || resources.BlkioWeight > 1000) { return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000.") } if len(resources.BlkioWeightDevice) > 0 && !sysInfo.BlkioWeightDevice { warnings = append(warnings, "Your kernel does not support Block I/O weight_device.") logrus.Warnf("Your kernel does not support Block I/O weight_device. Weight-device discarded.") resources.BlkioWeightDevice = []*pblkiodev.WeightDevice{} } if len(resources.BlkioDeviceReadBps) > 0 && !sysInfo.BlkioReadBpsDevice { warnings = append(warnings, "Your kernel does not support Block read limit in bytes per second.") logrus.Warnf("Your kernel does not support Block I/O read limit in bytes per second. --device-read-bps discarded.") resources.BlkioDeviceReadBps = []*pblkiodev.ThrottleDevice{} } if len(resources.BlkioDeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice { warnings = append(warnings, "Your kernel does not support Block write limit in bytes per second.") logrus.Warnf("Your kernel does not support Block I/O write limit in bytes per second. --device-write-bps discarded.") resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{} } if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice { warnings = append(warnings, "Your kernel does not support Block read limit in IO per second.") logrus.Warnf("Your kernel does not support Block I/O read limit in IO per second. -device-read-iops discarded.") resources.BlkioDeviceReadIOps = []*pblkiodev.ThrottleDevice{} } if len(resources.BlkioDeviceWriteIOps) > 0 && !sysInfo.BlkioWriteIOpsDevice { warnings = append(warnings, "Your kernel does not support Block write limit in IO per second.") logrus.Warnf("Your kernel does not support Block I/O write limit in IO per second. --device-write-iops discarded.") resources.BlkioDeviceWriteIOps = []*pblkiodev.ThrottleDevice{} } return warnings, nil } func usingSystemd(config *Config) bool { for _, option := range config.ExecOptions { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil || !strings.EqualFold(key, "native.cgroupdriver") { continue } if val == "systemd" { return true } } return false } func (daemon *Daemon) usingSystemd() bool { return usingSystemd(daemon.configStore) } // verifyPlatformContainerSettings performs platform-specific validation of the // hostconfig and config structures. func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config) ([]string, error) { warnings := []string{} sysInfo := sysinfo.New(true) warnings, err := daemon.verifyExperimentalContainerSettings(hostConfig, config) if err != nil { return warnings, err } w, err := verifyContainerResources(&hostConfig.Resources) if err != nil { return warnings, err } warnings = append(warnings, w...) if hostConfig.ShmSize < 0 { return warnings, fmt.Errorf("SHM size must be greater then 0") } if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 { return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000].", hostConfig.OomScoreAdj) } if sysInfo.IPv4ForwardingDisabled { warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") logrus.Warnf("IPv4 forwarding is disabled. Networking will not work") } // check for various conflicting options with user namespaces if daemon.configStore.RemappedRoot != "" { if hostConfig.Privileged { return warnings, fmt.Errorf("Privileged mode is incompatible with user namespaces.") } if hostConfig.NetworkMode.IsHost() || hostConfig.NetworkMode.IsContainer() { return warnings, fmt.Errorf("Cannot share the host or a container's network namespace when user namespaces are enabled.") } if hostConfig.PidMode.IsHost() { return warnings, fmt.Errorf("Cannot share the host PID namespace when user namespaces are enabled.") } if hostConfig.IpcMode.IsContainer() { return warnings, fmt.Errorf("Cannot share a container's IPC namespace when user namespaces are enabled.") } if hostConfig.ReadonlyRootfs { return warnings, fmt.Errorf("Cannot use the --read-only option when user namespaces are enabled.") } } if hostConfig.CgroupParent != "" && daemon.usingSystemd() { // CgroupParent for systemd cgroup should be named as "xxx.slice" if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") { return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") } } return warnings, nil } // verifyDaemonSettings performs validation of daemon config struct func verifyDaemonSettings(config *Config) error { // Check for mutually incompatible config options if config.bridgeConfig.Iface != "" && config.bridgeConfig.IP != "" { return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one.") } if !config.bridgeConfig.EnableIPTables && !config.bridgeConfig.InterContainerCommunication { return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true.") } if !config.bridgeConfig.EnableIPTables && config.bridgeConfig.EnableIPMasq { config.bridgeConfig.EnableIPMasq = false } if config.CgroupParent != "" && usingSystemd(config) { if len(config.CgroupParent) <= 6 || !strings.HasSuffix(config.CgroupParent, ".slice") { return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") } } return nil } // checkSystem validates platform-specific requirements func checkSystem() error { if os.Geteuid() != 0 { return fmt.Errorf("The Docker daemon needs to be run as root") } return checkKernel() } // configureKernelSecuritySupport configures and validate security support for the kernel func configureKernelSecuritySupport(config *Config, driverName string) error { if config.EnableSelinuxSupport { if selinuxEnabled() { // As Docker on overlayFS and SELinux are incompatible at present, error on overlayfs being enabled if driverName == "overlay" { return fmt.Errorf("SELinux is not supported with the %s graph driver", driverName) } logrus.Debug("SELinux enabled successfully") } else { logrus.Warn("Docker could not enable SELinux on the host system") } } else { selinuxSetDisabled() } return nil } func isBridgeNetworkDisabled(config *Config) bool { return config.bridgeConfig.Iface == disableNetworkBridge } func (daemon *Daemon) networkOptions(dconfig *Config) ([]nwconfig.Option, error) { options := []nwconfig.Option{} if dconfig == nil { return options, nil } options = append(options, nwconfig.OptionDataDir(dconfig.Root)) dd := runconfig.DefaultDaemonNetworkMode() dn := runconfig.DefaultDaemonNetworkMode().NetworkName() options = append(options, nwconfig.OptionDefaultDriver(string(dd))) options = append(options, nwconfig.OptionDefaultNetwork(dn)) if strings.TrimSpace(dconfig.ClusterStore) != "" { kv := strings.Split(dconfig.ClusterStore, "://") if len(kv) != 2 { return nil, fmt.Errorf("kv store daemon config must be of the form KV-PROVIDER://KV-URL") } options = append(options, nwconfig.OptionKVProvider(kv[0])) options = append(options, nwconfig.OptionKVProviderURL(kv[1])) } if len(dconfig.ClusterOpts) > 0 { options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts)) } if daemon.discoveryWatcher != nil { options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher)) } if dconfig.ClusterAdvertise != "" { options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise)) } options = append(options, nwconfig.OptionLabels(dconfig.Labels)) options = append(options, driverOptions(dconfig)...) return options, nil } func (daemon *Daemon) initNetworkController(config *Config) (libnetwork.NetworkController, error) { netOptions, err := daemon.networkOptions(config) if err != nil { return nil, err } controller, err := libnetwork.New(netOptions...) if err != nil { return nil, fmt.Errorf("error obtaining controller instance: %v", err) } // Initialize default network on "null" if _, err := controller.NewNetwork("null", "none", libnetwork.NetworkOptionPersist(false)); err != nil { return nil, fmt.Errorf("Error creating default \"null\" network: %v", err) } // Initialize default network on "host" if _, err := controller.NewNetwork("host", "host", libnetwork.NetworkOptionPersist(false)); err != nil { return nil, fmt.Errorf("Error creating default \"host\" network: %v", err) } if !config.DisableBridge { // Initialize default driver "bridge" if err := initBridgeDriver(controller, config); err != nil { return nil, err } } return controller, nil } func driverOptions(config *Config) []nwconfig.Option { bridgeConfig := options.Generic{ "EnableIPForwarding": config.bridgeConfig.EnableIPForward, "EnableIPTables": config.bridgeConfig.EnableIPTables, "EnableUserlandProxy": config.bridgeConfig.EnableUserlandProxy} bridgeOption := options.Generic{netlabel.GenericData: bridgeConfig} dOptions := []nwconfig.Option{} dOptions = append(dOptions, nwconfig.OptionDriverConfig("bridge", bridgeOption)) return dOptions } func initBridgeDriver(controller libnetwork.NetworkController, config *Config) error { if n, err := controller.NetworkByName("bridge"); err == nil { if err = n.Delete(); err != nil { return fmt.Errorf("could not delete the default bridge network: %v", err) } } bridgeName := bridge.DefaultBridgeName if config.bridgeConfig.Iface != "" { bridgeName = config.bridgeConfig.Iface } netOption := map[string]string{ bridge.BridgeName: bridgeName, bridge.DefaultBridge: strconv.FormatBool(true), netlabel.DriverMTU: strconv.Itoa(config.Mtu), bridge.EnableIPMasquerade: strconv.FormatBool(config.bridgeConfig.EnableIPMasq), bridge.EnableICC: strconv.FormatBool(config.bridgeConfig.InterContainerCommunication), } // --ip processing if config.bridgeConfig.DefaultIP != nil { netOption[bridge.DefaultBindingIP] = config.bridgeConfig.DefaultIP.String() } var ( ipamV4Conf *libnetwork.IpamConf ipamV6Conf *libnetwork.IpamConf ) ipamV4Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} nw, nw6List, err := ipamutils.ElectInterfaceAddresses(bridgeName) if err == nil { ipamV4Conf.PreferredPool = types.GetIPNetCanonical(nw).String() hip, _ := types.GetHostPartIP(nw.IP, nw.Mask) if hip.IsGlobalUnicast() { ipamV4Conf.Gateway = nw.IP.String() } } if config.bridgeConfig.IP != "" { ipamV4Conf.PreferredPool = config.bridgeConfig.IP ip, _, err := net.ParseCIDR(config.bridgeConfig.IP) if err != nil { return err } ipamV4Conf.Gateway = ip.String() } else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" { logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) } if config.bridgeConfig.FixedCIDR != "" { _, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR) if err != nil { return err } ipamV4Conf.SubPool = fCIDR.String() } if config.bridgeConfig.DefaultGatewayIPv4 != nil { ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.bridgeConfig.DefaultGatewayIPv4.String() } var deferIPv6Alloc bool if config.bridgeConfig.FixedCIDRv6 != "" { _, fCIDRv6, err := net.ParseCIDR(config.bridgeConfig.FixedCIDRv6) if err != nil { return err } // In case user has specified the daemon flag --fixed-cidr-v6 and the passed network has // at least 48 host bits, we need to guarantee the current behavior where the containers' // IPv6 addresses will be constructed based on the containers' interface MAC address. // We do so by telling libnetwork to defer the IPv6 address allocation for the endpoints // on this network until after the driver has created the endpoint and returned the // constructed address. Libnetwork will then reserve this address with the ipam driver. ones, _ := fCIDRv6.Mask.Size() deferIPv6Alloc = ones <= 80 if ipamV6Conf == nil { ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} } ipamV6Conf.PreferredPool = fCIDRv6.String() // In case the --fixed-cidr-v6 is specified and the current docker0 bridge IPv6 // address belongs to the same network, we need to inform libnetwork about it, so // that it can be reserved with IPAM and it will not be given away to somebody else for _, nw6 := range nw6List { if fCIDRv6.Contains(nw6.IP) { ipamV6Conf.Gateway = nw6.IP.String() break } } } if config.bridgeConfig.DefaultGatewayIPv6 != nil { if ipamV6Conf == nil { ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} } ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.bridgeConfig.DefaultGatewayIPv6.String() } v4Conf := []*libnetwork.IpamConf{ipamV4Conf} v6Conf := []*libnetwork.IpamConf{} if ipamV6Conf != nil { v6Conf = append(v6Conf, ipamV6Conf) } // Initialize default network on "bridge" with the same name _, err = controller.NewNetwork("bridge", "bridge", libnetwork.NetworkOptionGeneric(options.Generic{ netlabel.GenericData: netOption, netlabel.EnableIPv6: config.bridgeConfig.EnableIPv6, }), libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), libnetwork.NetworkOptionDeferIPv6Alloc(deferIPv6Alloc)) if err != nil { return fmt.Errorf("Error creating default \"bridge\" network: %v", err) } return nil } // setupInitLayer populates a directory with mountpoints suitable // for bind-mounting dockerinit into the container. The mountpoint is simply an // empty file at /.dockerinit // // This extra layer is used by all containers as the top-most ro layer. It protects // the container from unwanted side-effects on the rw layer. func setupInitLayer(initLayer string, rootUID, rootGID int) error { for pth, typ := range map[string]string{ "/dev/pts": "dir", "/dev/shm": "dir", "/proc": "dir", "/sys": "dir", "/.dockerinit": "file", "/.dockerenv": "file", "/etc/resolv.conf": "file", "/etc/hosts": "file", "/etc/hostname": "file", "/dev/console": "file", "/etc/mtab": "/proc/mounts", } { parts := strings.Split(pth, "/") prev := "/" for _, p := range parts[1:] { prev = filepath.Join(prev, p) syscall.Unlink(filepath.Join(initLayer, prev)) } if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil { if os.IsNotExist(err) { if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, filepath.Dir(pth)), 0755, rootUID, rootGID); err != nil { return err } switch typ { case "dir": if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, pth), 0755, rootUID, rootGID); err != nil { return err } case "file": f, err := os.OpenFile(filepath.Join(initLayer, pth), os.O_CREATE, 0755) if err != nil { return err } f.Chown(rootUID, rootGID) f.Close() default: if err := os.Symlink(typ, filepath.Join(initLayer, pth)); err != nil { return err } } } else { return err } } } // Layer is ready to use, if it wasn't before. return nil } // Parse the remapped root (user namespace) option, which can be one of: // username - valid username from /etc/passwd // username:groupname - valid username; valid groupname from /etc/group // uid - 32-bit unsigned int valid Linux UID value // uid:gid - uid value; 32-bit unsigned int Linux GID value // // If no groupname is specified, and a username is specified, an attempt // will be made to lookup a gid for that username as a groupname // // If names are used, they are verified to exist in passwd/group func parseRemappedRoot(usergrp string) (string, string, error) { var ( userID, groupID int username, groupname string ) idparts := strings.Split(usergrp, ":") if len(idparts) > 2 { return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp) } if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil { // must be a uid; take it as valid userID = int(uid) luser, err := user.LookupUid(userID) if err != nil { return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err) } username = luser.Name if len(idparts) == 1 { // if the uid was numeric and no gid was specified, take the uid as the gid groupID = userID lgrp, err := user.LookupGid(groupID) if err != nil { return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err) } groupname = lgrp.Name } } else { lookupName := idparts[0] // special case: if the user specified "default", they want Docker to create or // use (after creation) the "dockremap" user/group for root remapping if lookupName == defaultIDSpecifier { lookupName = defaultRemappedID } luser, err := user.LookupUser(lookupName) if err != nil && idparts[0] != defaultIDSpecifier { // error if the name requested isn't the special "dockremap" ID return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err) } else if err != nil { // special case-- if the username == "default", then we have been asked // to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid} // ranges will be used for the user and group mappings in user namespaced containers _, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID) if err == nil { return defaultRemappedID, defaultRemappedID, nil } return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err) } userID = luser.Uid username = luser.Name if len(idparts) == 1 { // we only have a string username, and no group specified; look up gid from username as group group, err := user.LookupGroup(lookupName) if err != nil { return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err) } groupID = group.Gid groupname = group.Name } } if len(idparts) == 2 { // groupname or gid is separately specified and must be resolved // to a unsigned 32-bit gid if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil { // must be a gid, take it as valid groupID = int(gid) lgrp, err := user.LookupGid(groupID) if err != nil { return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err) } groupname = lgrp.Name } else { // not a number; attempt a lookup group, err := user.LookupGroup(idparts[1]) if err != nil { return "", "", fmt.Errorf("Error during gid lookup for %q: %v", idparts[1], err) } groupID = group.Gid groupname = idparts[1] } } return username, groupname, nil } func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { if runtime.GOOS != "linux" && config.RemappedRoot != "" { return nil, nil, fmt.Errorf("User namespaces are only supported on Linux") } // if the daemon was started with remapped root option, parse // the config option to the int uid,gid values var ( uidMaps, gidMaps []idtools.IDMap ) if config.RemappedRoot != "" { username, groupname, err := parseRemappedRoot(config.RemappedRoot) if err != nil { return nil, nil, err } if username == "root" { // Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op // effectively logrus.Warnf("User namespaces: root cannot be remapped with itself; user namespaces are OFF") return uidMaps, gidMaps, nil } logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s:%s", username, groupname) // update remapped root setting now that we have resolved them to actual names config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname) uidMaps, gidMaps, err = idtools.CreateIDMappings(username, groupname) if err != nil { return nil, nil, fmt.Errorf("Can't create ID mappings: %v", err) } } return uidMaps, gidMaps, nil } func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { config.Root = rootDir // the docker root metadata directory needs to have execute permissions for all users (o+x) // so that syscalls executing as non-root, operating on subdirectories of the graph root // (e.g. mounted layers of a container) can traverse this path. // The user namespace support will create subdirectories for the remapped root host uid:gid // pair owned by that same uid:gid pair for proper write access to those needed metadata and // layer content subtrees. if _, err := os.Stat(rootDir); err == nil { // root current exists; verify the access bits are correct by setting them if err = os.Chmod(rootDir, 0701); err != nil { return err } } else if os.IsNotExist(err) { // no root exists yet, create it 0701 with root:root ownership if err := os.MkdirAll(rootDir, 0701); err != nil { return err } } // if user namespaces are enabled we will create a subtree underneath the specified root // with any/all specified remapped root uid/gid options on the daemon creating // a new subdirectory with ownership set to the remapped uid/gid (so as to allow // `chdir()` to work for containers namespaced to that uid/gid) if config.RemappedRoot != "" { config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", rootUID, rootGID)) logrus.Debugf("Creating user namespaced daemon root: %s", config.Root) // Create the root directory if it doesn't exists if err := idtools.MkdirAllAs(config.Root, 0700, rootUID, rootGID); err != nil { return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err) } } return nil } // registerLinks writes the links to a file. func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() { return nil } for _, l := range hostConfig.Links { name, alias, err := runconfigopts.ParseLink(l) if err != nil { return err } child, err := daemon.GetContainer(name) if err != nil { //An error from daemon.GetContainer() means this name could not be found return fmt.Errorf("Could not get container for %s", name) } for child.HostConfig.NetworkMode.IsContainer() { parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2) child, err = daemon.GetContainer(parts[1]) if err != nil { return fmt.Errorf("Could not get container for %s", parts[1]) } } if child.HostConfig.NetworkMode.IsHost() { return runconfig.ErrConflictHostNetworkAndLinks } if err := daemon.registerLink(container, child, alias); err != nil { return err } } // After we load all the links into the daemon // set them to nil on the hostconfig return container.WriteHostConfig() } // conditionalMountOnStart is a platform specific helper function during the // container start to call mount. func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { return daemon.Mount(container) } // conditionalUnmountOnCleanup is a platform specific helper function called // during the cleanup of a container to unmount. func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) { daemon.Unmount(container) } func restoreCustomImage(is image.Store, ls layer.Store, rs reference.Store) error { // Unix has no custom images to register return nil } docker-1.10.3/daemon/daemon_unix_test.go000066400000000000000000000042361267010174400202110ustar00rootroot00000000000000// +build !windows package daemon import ( "io/ioutil" "os" "testing" "github.com/docker/engine-api/types/container" ) func TestAdjustCPUShares(t *testing.T) { tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) daemon := &Daemon{ repository: tmp, root: tmp, } hostConfig := &container.HostConfig{ Resources: container.Resources{CPUShares: linuxMinCPUShares - 1}, } daemon.adaptContainerSettings(hostConfig, true) if hostConfig.CPUShares != linuxMinCPUShares { t.Errorf("Expected CPUShares to be %d", linuxMinCPUShares) } hostConfig.CPUShares = linuxMaxCPUShares + 1 daemon.adaptContainerSettings(hostConfig, true) if hostConfig.CPUShares != linuxMaxCPUShares { t.Errorf("Expected CPUShares to be %d", linuxMaxCPUShares) } hostConfig.CPUShares = 0 daemon.adaptContainerSettings(hostConfig, true) if hostConfig.CPUShares != 0 { t.Error("Expected CPUShares to be unchanged") } hostConfig.CPUShares = 1024 daemon.adaptContainerSettings(hostConfig, true) if hostConfig.CPUShares != 1024 { t.Error("Expected CPUShares to be unchanged") } } func TestAdjustCPUSharesNoAdjustment(t *testing.T) { tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) daemon := &Daemon{ repository: tmp, root: tmp, } hostConfig := &container.HostConfig{ Resources: container.Resources{CPUShares: linuxMinCPUShares - 1}, } daemon.adaptContainerSettings(hostConfig, false) if hostConfig.CPUShares != linuxMinCPUShares-1 { t.Errorf("Expected CPUShares to be %d", linuxMinCPUShares-1) } hostConfig.CPUShares = linuxMaxCPUShares + 1 daemon.adaptContainerSettings(hostConfig, false) if hostConfig.CPUShares != linuxMaxCPUShares+1 { t.Errorf("Expected CPUShares to be %d", linuxMaxCPUShares+1) } hostConfig.CPUShares = 0 daemon.adaptContainerSettings(hostConfig, false) if hostConfig.CPUShares != 0 { t.Error("Expected CPUShares to be unchanged") } hostConfig.CPUShares = 1024 daemon.adaptContainerSettings(hostConfig, false) if hostConfig.CPUShares != 1024 { t.Error("Expected CPUShares to be unchanged") } } docker-1.10.3/daemon/daemon_unsupported.go000066400000000000000000000001241267010174400205470ustar00rootroot00000000000000// +build !linux,!freebsd,!windows package daemon const platformSupported = false docker-1.10.3/daemon/daemon_windows.go000066400000000000000000000155521267010174400176640ustar00rootroot00000000000000package daemon import ( "encoding/json" "errors" "fmt" "os" "path/filepath" "runtime" "strings" "github.com/Sirupsen/logrus" "github.com/docker/docker/container" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/dockerversion" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/reference" containertypes "github.com/docker/engine-api/types/container" // register the windows graph driver "github.com/docker/docker/daemon/graphdriver/windows" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/system" "github.com/docker/libnetwork" blkiodev "github.com/opencontainers/runc/libcontainer/configs" ) const ( defaultVirtualSwitch = "Virtual Switch" platformSupported = true windowsMinCPUShares = 1 windowsMaxCPUShares = 10000 ) func getBlkioWeightDevices(config *containertypes.HostConfig) ([]*blkiodev.WeightDevice, error) { return nil, nil } func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { return nil } func getBlkioReadIOpsDevices(config *containertypes.HostConfig) ([]*blkiodev.ThrottleDevice, error) { return nil, nil } func getBlkioWriteIOpsDevices(config *containertypes.HostConfig) ([]*blkiodev.ThrottleDevice, error) { return nil, nil } func getBlkioReadBpsDevices(config *containertypes.HostConfig) ([]*blkiodev.ThrottleDevice, error) { return nil, nil } func getBlkioWriteBpsDevices(config *containertypes.HostConfig) ([]*blkiodev.ThrottleDevice, error) { return nil, nil } func setupInitLayer(initLayer string, rootUID, rootGID int) error { return nil } func checkKernel() error { return nil } // adaptContainerSettings is called during container creation to modify any // settings necessary in the HostConfig structure. func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { if hostConfig == nil { return nil } if hostConfig.CPUShares < 0 { logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, windowsMinCPUShares) hostConfig.CPUShares = windowsMinCPUShares } else if hostConfig.CPUShares > windowsMaxCPUShares { logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, windowsMaxCPUShares) hostConfig.CPUShares = windowsMaxCPUShares } return nil } // verifyPlatformContainerSettings performs platform-specific validation of the // hostconfig and config structures. func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config) ([]string, error) { return nil, nil } // verifyDaemonSettings performs validation of daemon config struct func verifyDaemonSettings(config *Config) error { return nil } // checkSystem validates platform-specific requirements func checkSystem() error { // Validate the OS version. Note that docker.exe must be manifested for this // call to return the correct version. osv, err := system.GetOSVersion() if err != nil { return err } if osv.MajorVersion < 10 { return fmt.Errorf("This version of Windows does not support the docker daemon") } if osv.Build < 10586 { return fmt.Errorf("The Windows daemon requires Windows Server 2016 Technical Preview 4, build 10586 or later") } return nil } // configureKernelSecuritySupport configures and validate security support for the kernel func configureKernelSecuritySupport(config *Config, driverName string) error { return nil } func isBridgeNetworkDisabled(config *Config) bool { return false } func (daemon *Daemon) initNetworkController(config *Config) (libnetwork.NetworkController, error) { // Set the name of the virtual switch if not specified by -b on daemon start if config.bridgeConfig.VirtualSwitchName == "" { config.bridgeConfig.VirtualSwitchName = defaultVirtualSwitch } return nil, nil } // registerLinks sets up links between containers and writes the // configuration out for persistence. As of Windows TP4, links are not supported. func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { return nil } func (daemon *Daemon) cleanupMounts() error { return nil } func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { return nil, nil, nil } func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { config.Root = rootDir // Create the root directory if it doesn't exists if err := system.MkdirAll(config.Root, 0700); err != nil && !os.IsExist(err) { return err } return nil } // conditionalMountOnStart is a platform specific helper function during the // container start to call mount. func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { // We do not mount if a Hyper-V container if !container.HostConfig.Isolation.IsHyperV() { if err := daemon.Mount(container); err != nil { return err } } return nil } // conditionalUnmountOnCleanup is a platform specific helper function called // during the cleanup of a container to unmount. func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) { // We do not unmount if a Hyper-V container if !container.HostConfig.Isolation.IsHyperV() { daemon.Unmount(container) } } func restoreCustomImage(is image.Store, ls layer.Store, rs reference.Store) error { type graphDriverStore interface { GraphDriver() graphdriver.Driver } gds, ok := ls.(graphDriverStore) if !ok { return nil } driver := gds.GraphDriver() wd, ok := driver.(*windows.Driver) if !ok { return nil } imageInfos, err := wd.GetCustomImageInfos() if err != nil { return err } // Convert imageData to valid image configuration for i := range imageInfos { name := strings.ToLower(imageInfos[i].Name) type registrar interface { RegisterDiffID(graphID string, size int64) (layer.Layer, error) } r, ok := ls.(registrar) if !ok { return errors.New("Layerstore doesn't support RegisterDiffID") } if _, err := r.RegisterDiffID(imageInfos[i].ID, imageInfos[i].Size); err != nil { return err } // layer is intentionally not released rootFS := image.NewRootFS() rootFS.BaseLayer = filepath.Base(imageInfos[i].Path) // Create history for base layer config, err := json.Marshal(&image.Image{ V1Image: image.V1Image{ DockerVersion: dockerversion.Version, Architecture: runtime.GOARCH, OS: runtime.GOOS, Created: imageInfos[i].CreatedTime, }, RootFS: rootFS, History: []image.History{}, }) named, err := reference.ParseNamed(name) if err != nil { return err } ref, err := reference.WithTag(named, imageInfos[i].Version) if err != nil { return err } id, err := is.Create(config) if err != nil { return err } if err := rs.AddTag(ref, id, true); err != nil { return err } logrus.Debugf("Registered base layer %s as %s", ref, id) } return nil } docker-1.10.3/daemon/daemonbuilder/000077500000000000000000000000001267010174400171225ustar00rootroot00000000000000docker-1.10.3/daemon/daemonbuilder/builder.go000066400000000000000000000162551267010174400211100ustar00rootroot00000000000000package daemonbuilder import ( "fmt" "io" "io/ioutil" "os" "path/filepath" "strings" "github.com/Sirupsen/logrus" "github.com/docker/docker/api" "github.com/docker/docker/builder" "github.com/docker/docker/daemon" "github.com/docker/docker/image" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/httputils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/urlutil" "github.com/docker/docker/reference" "github.com/docker/docker/registry" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/container" ) // Docker implements builder.Backend for the docker Daemon object. type Docker struct { *daemon.Daemon OutOld io.Writer AuthConfigs map[string]types.AuthConfig Archiver *archive.Archiver } // ensure Docker implements builder.Backend var _ builder.Backend = Docker{} // Pull tells Docker to pull image referenced by `name`. func (d Docker) Pull(name string) (builder.Image, error) { ref, err := reference.ParseNamed(name) if err != nil { return nil, err } ref = reference.WithDefaultTag(ref) pullRegistryAuth := &types.AuthConfig{} if len(d.AuthConfigs) > 0 { // The request came with a full auth config file, we prefer to use that repoInfo, err := d.Daemon.RegistryService.ResolveRepository(ref) if err != nil { return nil, err } resolvedConfig := registry.ResolveAuthConfig( d.AuthConfigs, repoInfo.Index, ) pullRegistryAuth = &resolvedConfig } if err := d.Daemon.PullImage(ref, nil, pullRegistryAuth, ioutils.NopWriteCloser(d.OutOld)); err != nil { return nil, err } return d.GetImage(name) } // GetImage looks up a Docker image referenced by `name`. func (d Docker) GetImage(name string) (builder.Image, error) { img, err := d.Daemon.GetImage(name) if err != nil { return nil, err } return imgWrap{img}, nil } // ContainerUpdateCmd updates Path and Args for the container with ID cID. func (d Docker) ContainerUpdateCmd(cID string, cmd []string) error { c, err := d.Daemon.GetContainer(cID) if err != nil { return err } c.Path = cmd[0] c.Args = cmd[1:] return nil } // ContainerAttach attaches streams to the container cID. If stream is true, it streams the output. func (d Docker) ContainerAttach(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error { return d.Daemon.ContainerWsAttachWithLogs(cID, &daemon.ContainerWsAttachWithLogsConfig{ InStream: stdin, OutStream: stdout, ErrStream: stderr, Stream: stream, }) } // BuilderCopy copies/extracts a source FileInfo to a destination path inside a container // specified by a container object. // TODO: make sure callers don't unnecessarily convert destPath with filepath.FromSlash (Copy does it already). // BuilderCopy should take in abstract paths (with slashes) and the implementation should convert it to OS-specific paths. func (d Docker) BuilderCopy(cID string, destPath string, src builder.FileInfo, decompress bool) error { srcPath := src.Path() destExists := true destDir := false rootUID, rootGID := d.Daemon.GetRemappedUIDGID() // Work in daemon-local OS specific file paths destPath = filepath.FromSlash(destPath) c, err := d.Daemon.GetContainer(cID) if err != nil { return err } err = d.Daemon.Mount(c) if err != nil { return err } defer d.Daemon.Unmount(c) dest, err := c.GetResourcePath(destPath) if err != nil { return err } // Preserve the trailing slash // TODO: why are we appending another path separator if there was already one? if strings.HasSuffix(destPath, string(os.PathSeparator)) || destPath == "." { destDir = true dest += string(os.PathSeparator) } destPath = dest destStat, err := os.Stat(destPath) if err != nil { if !os.IsNotExist(err) { logrus.Errorf("Error performing os.Stat on %s. %s", destPath, err) return err } destExists = false } if src.IsDir() { // copy as directory if err := d.Archiver.CopyWithTar(srcPath, destPath); err != nil { return err } return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists) } if decompress && archive.IsArchivePath(srcPath) { // Only try to untar if it is a file and that we've been told to decompress (when ADD-ing a remote file) // First try to unpack the source as an archive // to support the untar feature we need to clean up the path a little bit // because tar is very forgiving. First we need to strip off the archive's // filename from the path but this is only added if it does not end in slash tarDest := destPath if strings.HasSuffix(tarDest, string(os.PathSeparator)) { tarDest = filepath.Dir(destPath) } // try to successfully untar the orig err := d.Archiver.UntarPath(srcPath, tarDest) if err != nil { logrus.Errorf("Couldn't untar to %s: %v", tarDest, err) } return err } // only needed for fixPermissions, but might as well put it before CopyFileWithTar if destDir || (destExists && destStat.IsDir()) { destPath = filepath.Join(destPath, src.Name()) } if err := idtools.MkdirAllNewAs(filepath.Dir(destPath), 0755, rootUID, rootGID); err != nil { return err } if err := d.Archiver.CopyFileWithTar(srcPath, destPath); err != nil { return err } return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists) } // GetCachedImage returns a reference to a cached image whose parent equals `parent` // and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. func (d Docker) GetCachedImage(imgID string, cfg *container.Config) (string, error) { cache, err := d.Daemon.ImageGetCached(image.ID(imgID), cfg) if cache == nil || err != nil { return "", err } return cache.ID().String(), nil } // Following is specific to builder contexts // DetectContextFromRemoteURL returns a context and in certain cases the name of the dockerfile to be used // irrespective of user input. // progressReader is only used if remoteURL is actually a URL (not empty, and not a Git endpoint). func DetectContextFromRemoteURL(r io.ReadCloser, remoteURL string, createProgressReader func(in io.ReadCloser) io.ReadCloser) (context builder.ModifiableContext, dockerfileName string, err error) { switch { case remoteURL == "": context, err = builder.MakeTarSumContext(r) case urlutil.IsGitURL(remoteURL): context, err = builder.MakeGitContext(remoteURL) case urlutil.IsURL(remoteURL): context, err = builder.MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){ httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { dockerfile, err := ioutil.ReadAll(rc) if err != nil { return nil, err } // dockerfileName is set to signal that the remote was interpreted as a single Dockerfile, in which case the caller // should use dockerfileName as the new name for the Dockerfile, irrespective of any other user input. dockerfileName = api.DefaultDockerfileName // TODO: return a context without tarsum return archive.Generate(dockerfileName, string(dockerfile)) }, // fallback handler (tar context) "": func(rc io.ReadCloser) (io.ReadCloser, error) { return createProgressReader(rc), nil }, }) default: err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL) } return } docker-1.10.3/daemon/daemonbuilder/builder_unix.go000066400000000000000000000024051267010174400221430ustar00rootroot00000000000000// +build freebsd linux package daemonbuilder import ( "os" "path/filepath" ) func fixPermissions(source, destination string, uid, gid int, destExisted bool) error { // If the destination didn't already exist, or the destination isn't a // directory, then we should Lchown the destination. Otherwise, we shouldn't // Lchown the destination. destStat, err := os.Stat(destination) if err != nil { // This should *never* be reached, because the destination must've already // been created while untar-ing the context. return err } doChownDestination := !destExisted || !destStat.IsDir() // We Walk on the source rather than on the destination because we don't // want to change permissions on things we haven't created or modified. return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error { // Do not alter the walk root iff. it existed before, as it doesn't fall under // the domain of "things we should chown". if !doChownDestination && (source == fullpath) { return nil } // Path is prefixed by source: substitute with destination instead. cleaned, err := filepath.Rel(source, fullpath) if err != nil { return err } fullpath = filepath.Join(destination, cleaned) return os.Lchown(fullpath, uid, gid) }) } docker-1.10.3/daemon/daemonbuilder/builder_windows.go000066400000000000000000000002661267010174400226550ustar00rootroot00000000000000// +build windows package daemonbuilder func fixPermissions(source, destination string, uid, gid int, destExisted bool) error { // chown is not supported on Windows return nil } docker-1.10.3/daemon/daemonbuilder/image.go000066400000000000000000000004601267010174400205330ustar00rootroot00000000000000package daemonbuilder import ( "github.com/docker/docker/image" "github.com/docker/engine-api/types/container" ) type imgWrap struct { inner *image.Image } func (img imgWrap) ID() string { return string(img.inner.ID()) } func (img imgWrap) Config() *container.Config { return img.inner.Config } docker-1.10.3/daemon/debugtrap_unix.go000066400000000000000000000004331267010174400176570ustar00rootroot00000000000000// +build !windows package daemon import ( "os" "os/signal" "syscall" psignal "github.com/docker/docker/pkg/signal" ) func setupDumpStackTrap() { c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGUSR1) go func() { for range c { psignal.DumpStacks() } }() } docker-1.10.3/daemon/debugtrap_unsupported.go000066400000000000000000000001421267010174400212610ustar00rootroot00000000000000// +build !linux,!darwin,!freebsd,!windows package daemon func setupDumpStackTrap() { return } docker-1.10.3/daemon/debugtrap_windows.go000066400000000000000000000012771267010174400203750ustar00rootroot00000000000000package daemon import ( "fmt" "os" "syscall" "github.com/Sirupsen/logrus" psignal "github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/system" ) func setupDumpStackTrap() { // Windows does not support signals like *nix systems. So instead of // trapping on SIGUSR1 to dump stacks, we wait on a Win32 event to be // signaled. go func() { sa := syscall.SecurityAttributes{ Length: 0, } ev := "Global\\docker-daemon-" + fmt.Sprint(os.Getpid()) if h, _ := system.CreateEvent(&sa, false, false, ev); h != 0 { logrus.Debugf("Stackdump - waiting signal at %s", ev) for { syscall.WaitForSingleObject(h, syscall.INFINITE) psignal.DumpStacks() } } }() } docker-1.10.3/daemon/delete.go000066400000000000000000000113161267010174400161030ustar00rootroot00000000000000package daemon import ( "fmt" "os" "path" "strings" "github.com/Sirupsen/logrus" "github.com/docker/docker/container" derr "github.com/docker/docker/errors" "github.com/docker/docker/layer" volumestore "github.com/docker/docker/volume/store" "github.com/docker/engine-api/types" ) // ContainerRm removes the container id from the filesystem. An error // is returned if the container is not found, or if the remove // fails. If the remove succeeds, the container name is released, and // network links are removed. func (daemon *Daemon) ContainerRm(name string, config *types.ContainerRmConfig) error { container, err := daemon.GetContainer(name) if err != nil { return err } // Container state RemovalInProgress should be used to avoid races. if err = container.SetRemovalInProgress(); err != nil { if err == derr.ErrorCodeAlreadyRemoving { // do not fail when the removal is in progress started by other request. return nil } return derr.ErrorCodeRmState.WithArgs(err) } defer container.ResetRemovalInProgress() // check if container wasn't deregistered by previous rm since Get if c := daemon.containers.Get(container.ID); c == nil { return nil } if config.RemoveLink { return daemon.rmLink(container, name) } err = daemon.cleanupContainer(container, config.ForceRemove) if err == nil || config.ForceRemove { if e := daemon.removeMountPoints(container, config.RemoveVolume); e != nil { logrus.Error(e) } } return err } func (daemon *Daemon) rmLink(container *container.Container, name string) error { if name[0] != '/' { name = "/" + name } parent, n := path.Split(name) if parent == "/" { return fmt.Errorf("Conflict, cannot remove the default name of the container") } parent = strings.TrimSuffix(parent, "/") pe, err := daemon.nameIndex.Get(parent) if err != nil { return fmt.Errorf("Cannot get parent %s for name %s", parent, name) } daemon.releaseName(name) parentContainer, _ := daemon.GetContainer(pe) if parentContainer != nil { daemon.linkIndex.unlink(name, container, parentContainer) if err := daemon.updateNetwork(parentContainer); err != nil { logrus.Debugf("Could not update network to remove link %s: %v", n, err) } } return nil } // cleanupContainer unregisters a container from the daemon, stops stats // collection and cleanly removes contents and metadata from the filesystem. func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemove bool) (err error) { if container.IsRunning() { if !forceRemove { return derr.ErrorCodeRmRunning } if err := daemon.Kill(container); err != nil { return derr.ErrorCodeRmFailed.WithArgs(err) } } // stop collection of stats for the container regardless // if stats are currently getting collected. daemon.statsCollector.stopCollection(container) if err = daemon.containerStop(container, 3); err != nil { return err } // Mark container dead. We don't want anybody to be restarting it. container.SetDead() // Save container state to disk. So that if error happens before // container meta file got removed from disk, then a restart of // docker should not make a dead container alive. if err := container.ToDiskLocking(); err != nil { logrus.Errorf("Error saving dying container to disk: %v", err) } // If force removal is required, delete container from various // indexes even if removal failed. defer func() { if err == nil || forceRemove { daemon.nameIndex.Delete(container.ID) daemon.linkIndex.delete(container) selinuxFreeLxcContexts(container.ProcessLabel) daemon.idIndex.Delete(container.ID) daemon.containers.Delete(container.ID) daemon.LogContainerEvent(container, "destroy") } }() if err = os.RemoveAll(container.Root); err != nil { return derr.ErrorCodeRmFS.WithArgs(container.ID, err) } metadata, err := daemon.layerStore.ReleaseRWLayer(container.RWLayer) layer.LogReleaseMetadata(metadata) if err != nil && err != layer.ErrMountDoesNotExist { return derr.ErrorCodeRmDriverFS.WithArgs(daemon.GraphDriverName(), container.ID, err) } if err = daemon.execDriver.Clean(container.ID); err != nil { return derr.ErrorCodeRmExecDriver.WithArgs(container.ID, err) } return nil } // VolumeRm removes the volume with the given name. // If the volume is referenced by a container it is not removed // This is called directly from the remote API func (daemon *Daemon) VolumeRm(name string) error { v, err := daemon.volumes.Get(name) if err != nil { return err } if err := daemon.volumes.Remove(v); err != nil { if volumestore.IsInUse(err) { return derr.ErrorCodeRmVolumeInUse.WithArgs(err) } return derr.ErrorCodeRmVolume.WithArgs(name, err) } daemon.LogVolumeEvent(v.Name(), "destroy", map[string]string{"driver": v.DriverName()}) return nil } docker-1.10.3/daemon/delete_test.go000066400000000000000000000020771267010174400171460ustar00rootroot00000000000000package daemon import ( "io/ioutil" "os" "testing" "github.com/docker/docker/container" "github.com/docker/engine-api/types" containertypes "github.com/docker/engine-api/types/container" ) func TestContainerDoubleDelete(t *testing.T) { tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) daemon := &Daemon{ repository: tmp, root: tmp, } daemon.containers = container.NewMemoryStore() container := &container.Container{ CommonContainer: container.CommonContainer{ ID: "test", State: container.NewState(), Config: &containertypes.Config{}, }, } daemon.containers.Add(container.ID, container) // Mark the container as having a delete in progress if err := container.SetRemovalInProgress(); err != nil { t.Fatal(err) } // Try to remove the container when it's start is removalInProgress. // It should ignore the container and not return an error. if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true}); err != nil { t.Fatal(err) } } docker-1.10.3/daemon/discovery.go000066400000000000000000000115361267010174400166540ustar00rootroot00000000000000package daemon import ( "errors" "fmt" "reflect" "strconv" "time" log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/discovery" // Register the libkv backends for discovery. _ "github.com/docker/docker/pkg/discovery/kv" ) const ( // defaultDiscoveryHeartbeat is the default value for discovery heartbeat interval. defaultDiscoveryHeartbeat = 20 * time.Second // defaultDiscoveryTTLFactor is the default TTL factor for discovery defaultDiscoveryTTLFactor = 3 ) var errDiscoveryDisabled = errors.New("discovery is disabled") type discoveryReloader interface { discovery.Watcher Stop() Reload(backend, address string, clusterOpts map[string]string) error } type daemonDiscoveryReloader struct { backend discovery.Backend ticker *time.Ticker term chan bool } func (d *daemonDiscoveryReloader) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { return d.backend.Watch(stopCh) } func discoveryOpts(clusterOpts map[string]string) (time.Duration, time.Duration, error) { var ( heartbeat = defaultDiscoveryHeartbeat ttl = defaultDiscoveryTTLFactor * defaultDiscoveryHeartbeat ) if hb, ok := clusterOpts["discovery.heartbeat"]; ok { h, err := strconv.Atoi(hb) if err != nil { return time.Duration(0), time.Duration(0), err } heartbeat = time.Duration(h) * time.Second ttl = defaultDiscoveryTTLFactor * heartbeat } if tstr, ok := clusterOpts["discovery.ttl"]; ok { t, err := strconv.Atoi(tstr) if err != nil { return time.Duration(0), time.Duration(0), err } ttl = time.Duration(t) * time.Second if _, ok := clusterOpts["discovery.heartbeat"]; !ok { h := int(t / defaultDiscoveryTTLFactor) heartbeat = time.Duration(h) * time.Second } if ttl <= heartbeat { return time.Duration(0), time.Duration(0), fmt.Errorf("discovery.ttl timer must be greater than discovery.heartbeat") } } return heartbeat, ttl, nil } // initDiscovery initialized the nodes discovery subsystem by connecting to the specified backend // and start a registration loop to advertise the current node under the specified address. func initDiscovery(backendAddress, advertiseAddress string, clusterOpts map[string]string) (discoveryReloader, error) { heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts) if err != nil { return nil, err } reloader := &daemonDiscoveryReloader{ backend: backend, ticker: time.NewTicker(heartbeat), term: make(chan bool), } // We call Register() on the discovery backend in a loop for the whole lifetime of the daemon, // but we never actually Watch() for nodes appearing and disappearing for the moment. reloader.advertise(advertiseAddress) return reloader, nil } func (d *daemonDiscoveryReloader) advertise(address string) { d.registerAddr(address) go d.advertiseHeartbeat(address) } func (d *daemonDiscoveryReloader) registerAddr(addr string) { if err := d.backend.Register(addr); err != nil { log.Warnf("Registering as %q in discovery failed: %v", addr, err) } } // advertiseHeartbeat registers the current node against the discovery backend using the specified // address. The function never returns, as registration against the backend comes with a TTL and // requires regular heartbeats. func (d *daemonDiscoveryReloader) advertiseHeartbeat(address string) { for { select { case <-d.ticker.C: d.registerAddr(address) case <-d.term: return } } } // Reload makes the watcher to stop advertising and reconfigures it to advertise in a new address. func (d *daemonDiscoveryReloader) Reload(backendAddress, advertiseAddress string, clusterOpts map[string]string) error { d.Stop() heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts) if err != nil { return err } d.backend = backend d.ticker = time.NewTicker(heartbeat) d.advertise(advertiseAddress) return nil } // Stop terminates the discovery advertising. func (d *daemonDiscoveryReloader) Stop() { d.ticker.Stop() d.term <- true } func parseDiscoveryOptions(backendAddress string, clusterOpts map[string]string) (time.Duration, discovery.Backend, error) { heartbeat, ttl, err := discoveryOpts(clusterOpts) if err != nil { return 0, nil, err } backend, err := discovery.New(backendAddress, heartbeat, ttl, clusterOpts) if err != nil { return 0, nil, err } return heartbeat, backend, nil } // modifiedDiscoverySettings returns whether the discovery configuration has been modified or not. func modifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool { if config.ClusterStore != backendType || config.ClusterAdvertise != advertise { return true } if (config.ClusterOpts == nil && clusterOpts == nil) || (config.ClusterOpts == nil && len(clusterOpts) == 0) || (len(config.ClusterOpts) == 0 && clusterOpts == nil) { return false } return !reflect.DeepEqual(config.ClusterOpts, clusterOpts) } docker-1.10.3/daemon/discovery_test.go000066400000000000000000000101661267010174400177110ustar00rootroot00000000000000package daemon import ( "testing" "time" ) func TestDiscoveryOpts(t *testing.T) { clusterOpts := map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "5"} heartbeat, ttl, err := discoveryOpts(clusterOpts) if err == nil { t.Fatalf("discovery.ttl < discovery.heartbeat must fail") } clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "10"} heartbeat, ttl, err = discoveryOpts(clusterOpts) if err == nil { t.Fatalf("discovery.ttl == discovery.heartbeat must fail") } clusterOpts = map[string]string{"discovery.heartbeat": "invalid"} heartbeat, ttl, err = discoveryOpts(clusterOpts) if err == nil { t.Fatalf("invalid discovery.heartbeat must fail") } clusterOpts = map[string]string{"discovery.ttl": "invalid"} heartbeat, ttl, err = discoveryOpts(clusterOpts) if err == nil { t.Fatalf("invalid discovery.ttl must fail") } clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "20"} heartbeat, ttl, err = discoveryOpts(clusterOpts) if err != nil { t.Fatal(err) } if heartbeat != 10*time.Second { t.Fatalf("Heatbeat - Expected : %v, Actual : %v", 10*time.Second, heartbeat) } if ttl != 20*time.Second { t.Fatalf("TTL - Expected : %v, Actual : %v", 20*time.Second, ttl) } clusterOpts = map[string]string{"discovery.heartbeat": "10"} heartbeat, ttl, err = discoveryOpts(clusterOpts) if err != nil { t.Fatal(err) } if heartbeat != 10*time.Second { t.Fatalf("Heatbeat - Expected : %v, Actual : %v", 10*time.Second, heartbeat) } expected := 10 * defaultDiscoveryTTLFactor * time.Second if ttl != expected { t.Fatalf("TTL - Expected : %v, Actual : %v", expected, ttl) } clusterOpts = map[string]string{"discovery.ttl": "30"} heartbeat, ttl, err = discoveryOpts(clusterOpts) if err != nil { t.Fatal(err) } if ttl != 30*time.Second { t.Fatalf("TTL - Expected : %v, Actual : %v", 30*time.Second, ttl) } expected = 30 * time.Second / defaultDiscoveryTTLFactor if heartbeat != expected { t.Fatalf("Heatbeat - Expected : %v, Actual : %v", expected, heartbeat) } clusterOpts = map[string]string{} heartbeat, ttl, err = discoveryOpts(clusterOpts) if err != nil { t.Fatal(err) } if heartbeat != defaultDiscoveryHeartbeat { t.Fatalf("Heatbeat - Expected : %v, Actual : %v", defaultDiscoveryHeartbeat, heartbeat) } expected = defaultDiscoveryHeartbeat * defaultDiscoveryTTLFactor if ttl != expected { t.Fatalf("TTL - Expected : %v, Actual : %v", expected, ttl) } } func TestModifiedDiscoverySettings(t *testing.T) { cases := []struct { current *Config modified *Config expected bool }{ { current: discoveryConfig("foo", "bar", map[string]string{}), modified: discoveryConfig("foo", "bar", map[string]string{}), expected: false, }, { current: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), expected: false, }, { current: discoveryConfig("foo", "bar", map[string]string{}), modified: discoveryConfig("foo", "bar", nil), expected: false, }, { current: discoveryConfig("foo", "bar", nil), modified: discoveryConfig("foo", "bar", map[string]string{}), expected: false, }, { current: discoveryConfig("foo", "bar", nil), modified: discoveryConfig("baz", "bar", nil), expected: true, }, { current: discoveryConfig("foo", "bar", nil), modified: discoveryConfig("foo", "baz", nil), expected: true, }, { current: discoveryConfig("foo", "bar", nil), modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), expected: true, }, } for _, c := range cases { got := modifiedDiscoverySettings(c.current, c.modified.ClusterStore, c.modified.ClusterAdvertise, c.modified.ClusterOpts) if c.expected != got { t.Fatalf("expected %v, got %v: current config %q, new config %q", c.expected, got, c.current, c.modified) } } } func discoveryConfig(backendAddr, advertiseAddr string, opts map[string]string) *Config { return &Config{ CommonConfig: CommonConfig{ ClusterStore: backendAddr, ClusterAdvertise: advertiseAddr, ClusterOpts: opts, }, } } docker-1.10.3/daemon/errors.go000066400000000000000000000012451267010174400161550ustar00rootroot00000000000000package daemon import ( "strings" derr "github.com/docker/docker/errors" "github.com/docker/docker/reference" ) func (d *Daemon) imageNotExistToErrcode(err error) error { if dne, isDNE := err.(ErrImageDoesNotExist); isDNE { if strings.Contains(dne.RefOrID, "@") { return derr.ErrorCodeNoSuchImageHash.WithArgs(dne.RefOrID) } tag := reference.DefaultTag ref, err := reference.ParseNamed(dne.RefOrID) if err != nil { return derr.ErrorCodeNoSuchImageTag.WithArgs(dne.RefOrID, tag) } if tagged, isTagged := ref.(reference.NamedTagged); isTagged { tag = tagged.Tag() } return derr.ErrorCodeNoSuchImageTag.WithArgs(ref.Name(), tag) } return err } docker-1.10.3/daemon/events.go000066400000000000000000000047171267010174400161540ustar00rootroot00000000000000package daemon import ( "strings" "github.com/docker/docker/container" "github.com/docker/engine-api/types/events" "github.com/docker/libnetwork" ) // LogContainerEvent generates an event related to a container. func (daemon *Daemon) LogContainerEvent(container *container.Container, action string) { attributes := copyAttributes(container.Config.Labels) if container.Config.Image != "" { attributes["image"] = container.Config.Image } attributes["name"] = strings.TrimLeft(container.Name, "/") actor := events.Actor{ ID: container.ID, Attributes: attributes, } daemon.EventsService.Log(action, events.ContainerEventType, actor) } // LogImageEvent generates an event related to a container. func (daemon *Daemon) LogImageEvent(imageID, refName, action string) { attributes := map[string]string{} img, err := daemon.GetImage(imageID) if err == nil && img.Config != nil { // image has not been removed yet. // it could be missing if the event is `delete`. attributes = copyAttributes(img.Config.Labels) } if refName != "" { attributes["name"] = refName } actor := events.Actor{ ID: imageID, Attributes: attributes, } daemon.EventsService.Log(action, events.ImageEventType, actor) } // LogVolumeEvent generates an event related to a volume. func (daemon *Daemon) LogVolumeEvent(volumeID, action string, attributes map[string]string) { actor := events.Actor{ ID: volumeID, Attributes: attributes, } daemon.EventsService.Log(action, events.VolumeEventType, actor) } // LogNetworkEvent generates an event related to a network with only the default attributes. func (daemon *Daemon) LogNetworkEvent(nw libnetwork.Network, action string) { daemon.LogNetworkEventWithAttributes(nw, action, map[string]string{}) } // LogNetworkEventWithAttributes generates an event related to a network with specific given attributes. func (daemon *Daemon) LogNetworkEventWithAttributes(nw libnetwork.Network, action string, attributes map[string]string) { attributes["name"] = nw.Name() attributes["type"] = nw.Type() actor := events.Actor{ ID: nw.ID(), Attributes: attributes, } daemon.EventsService.Log(action, events.NetworkEventType, actor) } // copyAttributes guarantees that labels are not mutated by event triggers. func copyAttributes(labels map[string]string) map[string]string { attributes := map[string]string{} if labels == nil { return attributes } for k, v := range labels { attributes[k] = v } return attributes } docker-1.10.3/daemon/events/000077500000000000000000000000001267010174400156145ustar00rootroot00000000000000docker-1.10.3/daemon/events/events.go000066400000000000000000000060221267010174400174470ustar00rootroot00000000000000package events import ( "sync" "time" "github.com/docker/docker/pkg/pubsub" eventtypes "github.com/docker/engine-api/types/events" ) const ( eventsLimit = 64 bufferSize = 1024 ) // Events is pubsub channel for events generated by the engine. type Events struct { mu sync.Mutex events []eventtypes.Message pub *pubsub.Publisher } // New returns new *Events instance func New() *Events { return &Events{ events: make([]eventtypes.Message, 0, eventsLimit), pub: pubsub.NewPublisher(100*time.Millisecond, bufferSize), } } // Subscribe adds new listener to events, returns slice of 64 stored // last events, a channel in which you can expect new events (in form // of interface{}, so you need type assertion), and a function to call // to stop the stream of events. func (e *Events) Subscribe() ([]eventtypes.Message, chan interface{}, func()) { e.mu.Lock() current := make([]eventtypes.Message, len(e.events)) copy(current, e.events) l := e.pub.Subscribe() e.mu.Unlock() cancel := func() { e.Evict(l) } return current, l, cancel } // SubscribeTopic adds new listener to events, returns slice of 64 stored // last events, a channel in which you can expect new events (in form // of interface{}, so you need type assertion). func (e *Events) SubscribeTopic(since, sinceNano int64, ef *Filter) ([]eventtypes.Message, chan interface{}) { e.mu.Lock() defer e.mu.Unlock() var buffered []eventtypes.Message topic := func(m interface{}) bool { return ef.Include(m.(eventtypes.Message)) } if since != -1 { for i := len(e.events) - 1; i >= 0; i-- { ev := e.events[i] if ev.Time < since || ((ev.Time == since) && (ev.TimeNano < sinceNano)) { break } if ef.filter.Len() == 0 || topic(ev) { buffered = append([]eventtypes.Message{ev}, buffered...) } } } var ch chan interface{} if ef.filter.Len() > 0 { ch = e.pub.SubscribeTopic(topic) } else { // Subscribe to all events if there are no filters ch = e.pub.Subscribe() } return buffered, ch } // Evict evicts listener from pubsub func (e *Events) Evict(l chan interface{}) { e.pub.Evict(l) } // Log broadcasts event to listeners. Each listener has 100 millisecond for // receiving event or it will be skipped. func (e *Events) Log(action, eventType string, actor eventtypes.Actor) { now := time.Now().UTC() jm := eventtypes.Message{ Action: action, Type: eventType, Actor: actor, Time: now.Unix(), TimeNano: now.UnixNano(), } // fill deprecated fields for container and images switch eventType { case eventtypes.ContainerEventType: jm.ID = actor.ID jm.Status = action jm.From = actor.Attributes["image"] case eventtypes.ImageEventType: jm.ID = actor.ID jm.Status = action } e.mu.Lock() if len(e.events) == cap(e.events) { // discard oldest event copy(e.events, e.events[1:]) e.events[len(e.events)-1] = jm } else { e.events = append(e.events, jm) } e.mu.Unlock() e.pub.Publish(jm) } // SubscribersCount returns number of event listeners func (e *Events) SubscribersCount() int { return e.pub.Len() } docker-1.10.3/daemon/events/events_test.go000066400000000000000000000070151267010174400205110ustar00rootroot00000000000000package events import ( "fmt" "testing" "time" "github.com/docker/engine-api/types/events" ) func TestEventsLog(t *testing.T) { e := New() _, l1, _ := e.Subscribe() _, l2, _ := e.Subscribe() defer e.Evict(l1) defer e.Evict(l2) count := e.SubscribersCount() if count != 2 { t.Fatalf("Must be 2 subscribers, got %d", count) } actor := events.Actor{ ID: "cont", Attributes: map[string]string{"image": "image"}, } e.Log("test", events.ContainerEventType, actor) select { case msg := <-l1: jmsg, ok := msg.(events.Message) if !ok { t.Fatalf("Unexpected type %T", msg) } if len(e.events) != 1 { t.Fatalf("Must be only one event, got %d", len(e.events)) } if jmsg.Status != "test" { t.Fatalf("Status should be test, got %s", jmsg.Status) } if jmsg.ID != "cont" { t.Fatalf("ID should be cont, got %s", jmsg.ID) } if jmsg.From != "image" { t.Fatalf("From should be image, got %s", jmsg.From) } case <-time.After(1 * time.Second): t.Fatal("Timeout waiting for broadcasted message") } select { case msg := <-l2: jmsg, ok := msg.(events.Message) if !ok { t.Fatalf("Unexpected type %T", msg) } if len(e.events) != 1 { t.Fatalf("Must be only one event, got %d", len(e.events)) } if jmsg.Status != "test" { t.Fatalf("Status should be test, got %s", jmsg.Status) } if jmsg.ID != "cont" { t.Fatalf("ID should be cont, got %s", jmsg.ID) } if jmsg.From != "image" { t.Fatalf("From should be image, got %s", jmsg.From) } case <-time.After(1 * time.Second): t.Fatal("Timeout waiting for broadcasted message") } } func TestEventsLogTimeout(t *testing.T) { e := New() _, l, _ := e.Subscribe() defer e.Evict(l) c := make(chan struct{}) go func() { actor := events.Actor{ ID: "image", } e.Log("test", events.ImageEventType, actor) close(c) }() select { case <-c: case <-time.After(time.Second): t.Fatal("Timeout publishing message") } } func TestLogEvents(t *testing.T) { e := New() for i := 0; i < eventsLimit+16; i++ { action := fmt.Sprintf("action_%d", i) id := fmt.Sprintf("cont_%d", i) from := fmt.Sprintf("image_%d", i) actor := events.Actor{ ID: id, Attributes: map[string]string{"image": from}, } e.Log(action, events.ContainerEventType, actor) } time.Sleep(50 * time.Millisecond) current, l, _ := e.Subscribe() for i := 0; i < 10; i++ { num := i + eventsLimit + 16 action := fmt.Sprintf("action_%d", num) id := fmt.Sprintf("cont_%d", num) from := fmt.Sprintf("image_%d", num) actor := events.Actor{ ID: id, Attributes: map[string]string{"image": from}, } e.Log(action, events.ContainerEventType, actor) } if len(e.events) != eventsLimit { t.Fatalf("Must be %d events, got %d", eventsLimit, len(e.events)) } var msgs []events.Message for len(msgs) < 10 { m := <-l jm, ok := (m).(events.Message) if !ok { t.Fatalf("Unexpected type %T", m) } msgs = append(msgs, jm) } if len(current) != eventsLimit { t.Fatalf("Must be %d events, got %d", eventsLimit, len(current)) } first := current[0] if first.Status != "action_16" { t.Fatalf("First action is %s, must be action_16", first.Status) } last := current[len(current)-1] if last.Status != "action_79" { t.Fatalf("Last action is %s, must be action_79", last.Status) } firstC := msgs[0] if firstC.Status != "action_80" { t.Fatalf("First action is %s, must be action_80", firstC.Status) } lastC := msgs[len(msgs)-1] if lastC.Status != "action_89" { t.Fatalf("Last action is %s, must be action_89", lastC.Status) } } docker-1.10.3/daemon/events/filter.go000066400000000000000000000044171267010174400174360ustar00rootroot00000000000000package events import ( "github.com/docker/docker/reference" "github.com/docker/engine-api/types/events" "github.com/docker/engine-api/types/filters" ) // Filter can filter out docker events from a stream type Filter struct { filter filters.Args } // NewFilter creates a new Filter func NewFilter(filter filters.Args) *Filter { return &Filter{filter: filter} } // Include returns true when the event ev is included by the filters func (ef *Filter) Include(ev events.Message) bool { return ef.filter.ExactMatch("event", ev.Action) && ef.filter.ExactMatch("type", ev.Type) && ef.matchContainer(ev) && ef.matchVolume(ev) && ef.matchNetwork(ev) && ef.matchImage(ev) && ef.matchLabels(ev.Actor.Attributes) } func (ef *Filter) matchLabels(attributes map[string]string) bool { if !ef.filter.Include("label") { return true } return ef.filter.MatchKVList("label", attributes) } func (ef *Filter) matchContainer(ev events.Message) bool { return ef.fuzzyMatchName(ev, events.ContainerEventType) } func (ef *Filter) matchVolume(ev events.Message) bool { return ef.fuzzyMatchName(ev, events.VolumeEventType) } func (ef *Filter) matchNetwork(ev events.Message) bool { return ef.fuzzyMatchName(ev, events.NetworkEventType) } func (ef *Filter) fuzzyMatchName(ev events.Message, eventType string) bool { return ef.filter.FuzzyMatch(eventType, ev.Actor.ID) || ef.filter.FuzzyMatch(eventType, ev.Actor.Attributes["name"]) } // matchImage matches against both event.Actor.ID (for image events) // and event.Actor.Attributes["image"] (for container events), so that any container that was created // from an image will be included in the image events. Also compare both // against the stripped repo name without any tags. func (ef *Filter) matchImage(ev events.Message) bool { id := ev.Actor.ID nameAttr := "image" var imageName string if ev.Type == events.ImageEventType { nameAttr = "name" } if n, ok := ev.Actor.Attributes[nameAttr]; ok { imageName = n } return ef.filter.ExactMatch("image", id) || ef.filter.ExactMatch("image", imageName) || ef.filter.ExactMatch("image", stripTag(id)) || ef.filter.ExactMatch("image", stripTag(imageName)) } func stripTag(image string) string { ref, err := reference.ParseNamed(image) if err != nil { return image } return ref.Name() } docker-1.10.3/daemon/events_test.go000066400000000000000000000014461267010174400172070ustar00rootroot00000000000000package daemon import ( "testing" "github.com/docker/docker/container" "github.com/docker/docker/daemon/events" containertypes "github.com/docker/engine-api/types/container" ) func TestLogContainerCopyLabels(t *testing.T) { e := events.New() _, l, _ := e.Subscribe() defer e.Evict(l) container := &container.Container{ CommonContainer: container.CommonContainer{ ID: "container_id", Name: "container_name", Config: &containertypes.Config{ Labels: map[string]string{ "node": "1", "os": "alpine", }, }, }, } daemon := &Daemon{ EventsService: e, } daemon.LogContainerEvent(container, "create") if _, mutated := container.Config.Labels["image"]; mutated { t.Fatalf("Expected to not mutate the container labels, got %q", container.Config.Labels) } } docker-1.10.3/daemon/exec.go000066400000000000000000000224201267010174400155630ustar00rootroot00000000000000package daemon import ( "io" "strings" "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/container" "github.com/docker/docker/daemon/exec" "github.com/docker/docker/daemon/execdriver" derr "github.com/docker/docker/errors" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/term" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/strslice" ) func (d *Daemon) registerExecCommand(container *container.Container, config *exec.Config) { // Storing execs in container in order to kill them gracefully whenever the container is stopped or removed. container.ExecCommands.Add(config.ID, config) // Storing execs in daemon for easy access via remote API. d.execCommands.Add(config.ID, config) } // ExecExists looks up the exec instance and returns a bool if it exists or not. // It will also return the error produced by `getConfig` func (d *Daemon) ExecExists(name string) (bool, error) { if _, err := d.getExecConfig(name); err != nil { return false, err } return true, nil } // getExecConfig looks up the exec instance by name. If the container associated // with the exec instance is stopped or paused, it will return an error. func (d *Daemon) getExecConfig(name string) (*exec.Config, error) { ec := d.execCommands.Get(name) // If the exec is found but its container is not in the daemon's list of // containers then it must have been deleted, in which case instead of // saying the container isn't running, we should return a 404 so that // the user sees the same error now that they will after the // 5 minute clean-up loop is run which erases old/dead execs. if ec != nil { if container := d.containers.Get(ec.ContainerID); container != nil { if !container.IsRunning() { return nil, derr.ErrorCodeContainerNotRunning.WithArgs(container.ID, container.State.String()) } if container.IsPaused() { return nil, derr.ErrorCodeExecPaused.WithArgs(container.ID) } if container.IsRestarting() { return nil, derr.ErrorCodeExecRestarting.WithArgs(container.ID) } return ec, nil } } return nil, derr.ErrorCodeNoExecID.WithArgs(name) } func (d *Daemon) unregisterExecCommand(container *container.Container, execConfig *exec.Config) { container.ExecCommands.Delete(execConfig.ID) d.execCommands.Delete(execConfig.ID) } func (d *Daemon) getActiveContainer(name string) (*container.Container, error) { container, err := d.GetContainer(name) if err != nil { return nil, err } if !container.IsRunning() { return nil, derr.ErrorCodeNotRunning.WithArgs(name) } if container.IsPaused() { return nil, derr.ErrorCodeExecPaused.WithArgs(name) } if container.IsRestarting() { return nil, derr.ErrorCodeExecRestarting.WithArgs(name) } return container, nil } // ContainerExecCreate sets up an exec in a running container. func (d *Daemon) ContainerExecCreate(config *types.ExecConfig) (string, error) { container, err := d.getActiveContainer(config.Container) if err != nil { return "", err } cmd := strslice.New(config.Cmd...) entrypoint, args := d.getEntrypointAndArgs(strslice.New(), cmd) keys := []byte{} if config.DetachKeys != "" { keys, err = term.ToBytes(config.DetachKeys) if err != nil { logrus.Warnf("Wrong escape keys provided (%s, error: %s) using default : ctrl-p ctrl-q", config.DetachKeys, err.Error()) } } processConfig := &execdriver.ProcessConfig{ CommonProcessConfig: execdriver.CommonProcessConfig{ Tty: config.Tty, Entrypoint: entrypoint, Arguments: args, }, } setPlatformSpecificExecProcessConfig(config, container, processConfig) execConfig := exec.NewConfig() execConfig.OpenStdin = config.AttachStdin execConfig.OpenStdout = config.AttachStdout execConfig.OpenStderr = config.AttachStderr execConfig.ProcessConfig = processConfig execConfig.ContainerID = container.ID execConfig.DetachKeys = keys d.registerExecCommand(container, execConfig) d.LogContainerEvent(container, "exec_create: "+execConfig.ProcessConfig.Entrypoint+" "+strings.Join(execConfig.ProcessConfig.Arguments, " ")) return execConfig.ID, nil } // ContainerExecStart starts a previously set up exec instance. The // std streams are set up. func (d *Daemon) ContainerExecStart(name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error { var ( cStdin io.ReadCloser cStdout, cStderr io.Writer ) ec, err := d.getExecConfig(name) if err != nil { return derr.ErrorCodeNoExecID.WithArgs(name) } ec.Lock() if ec.ExitCode != nil { ec.Unlock() return derr.ErrorCodeExecExited.WithArgs(ec.ID) } if ec.Running { ec.Unlock() return derr.ErrorCodeExecRunning.WithArgs(ec.ID) } ec.Running = true ec.Unlock() c := d.containers.Get(ec.ContainerID) logrus.Debugf("starting exec command %s in container %s", ec.ID, c.ID) d.LogContainerEvent(c, "exec_start: "+ec.ProcessConfig.Entrypoint+" "+strings.Join(ec.ProcessConfig.Arguments, " ")) if ec.OpenStdin && stdin != nil { r, w := io.Pipe() go func() { defer w.Close() defer logrus.Debugf("Closing buffered stdin pipe") pools.Copy(w, stdin) }() cStdin = r } if ec.OpenStdout { cStdout = stdout } if ec.OpenStderr { cStderr = stderr } if ec.OpenStdin { ec.NewInputPipes() } else { ec.NewNopInputPipe() } attachErr := container.AttachStreams(ec.StreamConfig, ec.OpenStdin, true, ec.ProcessConfig.Tty, cStdin, cStdout, cStderr, ec.DetachKeys) execErr := make(chan error) // Note, the ExecConfig data will be removed when the container // itself is deleted. This allows us to query it (for things like // the exitStatus) even after the cmd is done running. go func() { execErr <- d.containerExec(c, ec) }() select { case err := <-attachErr: if err != nil { return derr.ErrorCodeExecAttach.WithArgs(err) } return nil case err := <-execErr: if aErr := <-attachErr; aErr != nil && err == nil { return derr.ErrorCodeExecAttach.WithArgs(aErr) } if err == nil { return nil } // Maybe the container stopped while we were trying to exec if !c.IsRunning() { return derr.ErrorCodeExecContainerStopped } return derr.ErrorCodeExecCantRun.WithArgs(ec.ID, c.ID, err) } } // Exec calls the underlying exec driver to run func (d *Daemon) Exec(c *container.Container, execConfig *exec.Config, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (int, error) { hooks := execdriver.Hooks{ Start: startCallback, } exitStatus, err := d.execDriver.Exec(c.Command, execConfig.ProcessConfig, pipes, hooks) // On err, make sure we don't leave ExitCode at zero if err != nil && exitStatus == 0 { exitStatus = 128 } execConfig.ExitCode = &exitStatus execConfig.Running = false return exitStatus, err } // execCommandGC runs a ticker to clean up the daemon references // of exec configs that are no longer part of the container. func (d *Daemon) execCommandGC() { for range time.Tick(5 * time.Minute) { var ( cleaned int liveExecCommands = d.containerExecIds() ) for id, config := range d.execCommands.Commands() { if config.CanRemove { cleaned++ d.execCommands.Delete(id) } else { if _, exists := liveExecCommands[id]; !exists { config.CanRemove = true } } } if cleaned > 0 { logrus.Debugf("clean %d unused exec commands", cleaned) } } } // containerExecIds returns a list of all the current exec ids that are in use // and running inside a container. func (d *Daemon) containerExecIds() map[string]struct{} { ids := map[string]struct{}{} for _, c := range d.containers.List() { for _, id := range c.ExecCommands.List() { ids[id] = struct{}{} } } return ids } func (d *Daemon) containerExec(container *container.Container, ec *exec.Config) error { container.Lock() defer container.Unlock() callback := func(processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error { if processConfig.Tty { // The callback is called after the process Start() // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave // which we close here. if c, ok := processConfig.Stdout.(io.Closer); ok { c.Close() } } ec.Close() return nil } // We use a callback here instead of a goroutine and an chan for // synchronization purposes cErr := promise.Go(func() error { return d.monitorExec(container, ec, callback) }) return ec.Wait(cErr) } func (d *Daemon) monitorExec(container *container.Container, execConfig *exec.Config, callback execdriver.DriverCallback) error { pipes := execdriver.NewPipes(execConfig.Stdin(), execConfig.Stdout(), execConfig.Stderr(), execConfig.OpenStdin) exitCode, err := d.Exec(container, execConfig, pipes, callback) if err != nil { logrus.Errorf("Error running command in existing container %s: %s", container.ID, err) } logrus.Debugf("Exec task in container %s exited with code %d", container.ID, exitCode) if err := execConfig.CloseStreams(); err != nil { logrus.Errorf("%s: %s", container.ID, err) } if execConfig.ProcessConfig.Terminal != nil { if err := execConfig.ProcessConfig.Terminal.Close(); err != nil { logrus.Errorf("Error closing terminal while running in container %s: %s", container.ID, err) } } // remove the exec command from the container's store only and not the // daemon's store so that the exec command can be inspected. container.ExecCommands.Delete(execConfig.ID) return err } docker-1.10.3/daemon/exec/000077500000000000000000000000001267010174400152345ustar00rootroot00000000000000docker-1.10.3/daemon/exec/exec.go000066400000000000000000000053721267010174400165160ustar00rootroot00000000000000package exec import ( "sync" "time" "github.com/docker/docker/daemon/execdriver" derr "github.com/docker/docker/errors" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/runconfig" ) // Config holds the configurations for execs. The Daemon keeps // track of both running and finished execs so that they can be // examined both during and after completion. type Config struct { sync.Mutex *runconfig.StreamConfig ID string Running bool ExitCode *int ProcessConfig *execdriver.ProcessConfig OpenStdin bool OpenStderr bool OpenStdout bool CanRemove bool ContainerID string DetachKeys []byte // waitStart will be closed immediately after the exec is really started. waitStart chan struct{} } // NewConfig initializes the a new exec configuration func NewConfig() *Config { return &Config{ ID: stringid.GenerateNonCryptoID(), StreamConfig: runconfig.NewStreamConfig(), waitStart: make(chan struct{}), } } // Store keeps track of the exec configurations. type Store struct { commands map[string]*Config sync.RWMutex } // NewStore initializes a new exec store. func NewStore() *Store { return &Store{commands: make(map[string]*Config, 0)} } // Commands returns the exec configurations in the store. func (e *Store) Commands() map[string]*Config { e.RLock() commands := make(map[string]*Config, len(e.commands)) for id, config := range e.commands { commands[id] = config } e.RUnlock() return commands } // Add adds a new exec configuration to the store. func (e *Store) Add(id string, Config *Config) { e.Lock() e.commands[id] = Config e.Unlock() } // Get returns an exec configuration by its id. func (e *Store) Get(id string) *Config { e.RLock() res := e.commands[id] e.RUnlock() return res } // Delete removes an exec configuration from the store. func (e *Store) Delete(id string) { e.Lock() delete(e.commands, id) e.Unlock() } // List returns the list of exec ids in the store. func (e *Store) List() []string { var IDs []string e.RLock() for id := range e.commands { IDs = append(IDs, id) } e.RUnlock() return IDs } // Wait waits until the exec process finishes or there is an error in the error channel. func (c *Config) Wait(cErr chan error) error { // Exec should not return until the process is actually running select { case <-c.waitStart: case err := <-cErr: return err } return nil } // Close closes the wait channel for the progress. func (c *Config) Close() { close(c.waitStart) } // Resize changes the size of the terminal for the exec process. func (c *Config) Resize(h, w int) error { select { case <-c.waitStart: case <-time.After(time.Second): return derr.ErrorCodeExecResize.WithArgs(c.ID) } return c.ProcessConfig.Terminal.Resize(h, w) } docker-1.10.3/daemon/exec_unix.go000066400000000000000000000010361267010174400166260ustar00rootroot00000000000000// +build linux freebsd package daemon import ( "github.com/docker/docker/container" "github.com/docker/docker/daemon/execdriver" "github.com/docker/engine-api/types" ) // setPlatformSpecificExecProcessConfig sets platform-specific fields in the // ProcessConfig structure. func setPlatformSpecificExecProcessConfig(config *types.ExecConfig, container *container.Container, pc *execdriver.ProcessConfig) { user := config.User if len(user) == 0 { user = container.Config.User } pc.User = user pc.Privileged = config.Privileged } docker-1.10.3/daemon/exec_windows.go000066400000000000000000000006401267010174400173350ustar00rootroot00000000000000package daemon import ( "github.com/docker/docker/container" "github.com/docker/docker/daemon/execdriver" "github.com/docker/engine-api/types" ) // setPlatformSpecificExecProcessConfig sets platform-specific fields in the // ProcessConfig structure. This is a no-op on Windows func setPlatformSpecificExecProcessConfig(config *types.ExecConfig, container *container.Container, pc *execdriver.ProcessConfig) { } docker-1.10.3/daemon/execdriver/000077500000000000000000000000001267010174400164505ustar00rootroot00000000000000docker-1.10.3/daemon/execdriver/driver.go000066400000000000000000000122371267010174400202770ustar00rootroot00000000000000package execdriver import ( "errors" "io" "os/exec" "time" "github.com/opencontainers/runc/libcontainer" ) // Context is a generic key value pair that allows // arbitrary data to be sent type Context map[string]string // Define error messages var ( ErrNotRunning = errors.New("Container is not running") ErrWaitTimeoutReached = errors.New("Wait timeout reached") ErrDriverAlreadyRegistered = errors.New("A driver already registered this docker init function") ErrDriverNotFound = errors.New("The requested docker init has not been found") ) // DriverCallback defines a callback function which is used in "Run" and "Exec". // This allows work to be done in the parent process when the child is passing // through PreStart, Start and PostStop events. // Callbacks are provided a processConfig pointer and the pid of the child. // The channel will be used to notify the OOM events. type DriverCallback func(processConfig *ProcessConfig, pid int, chOOM <-chan struct{}) error // Hooks is a struct containing function pointers to callbacks // used by any execdriver implementation exploiting hooks capabilities type Hooks struct { // PreStart is called before container's CMD/ENTRYPOINT is executed PreStart []DriverCallback // Start is called after the container's process is full started Start DriverCallback // PostStop is called after the container process exits PostStop []DriverCallback } // Info is driver specific information based on // processes registered with the driver type Info interface { IsRunning() bool } // Terminal represents a pseudo TTY, it is for when // using a container interactively. type Terminal interface { io.Closer Resize(height, width int) error } // Driver is an interface for drivers to implement // including all basic functions a driver should have type Driver interface { // Run executes the process, blocks until the process exits and returns // the exit code. It's the last stage on Docker side for running a container. Run(c *Command, pipes *Pipes, hooks Hooks) (ExitStatus, error) // Exec executes the process in an existing container, blocks until the // process exits and returns the exit code. Exec(c *Command, processConfig *ProcessConfig, pipes *Pipes, hooks Hooks) (int, error) // Kill sends signals to process in container. Kill(c *Command, sig int) error // Pause pauses a container. Pause(c *Command) error // Unpause unpauses a container. Unpause(c *Command) error // Name returns the name of the driver. Name() string // Info returns the configuration stored in the driver struct, // "temporary" hack (until we move state from core to plugins). Info(id string) Info // GetPidsForContainer returns a list of pid for the processes running in a container. GetPidsForContainer(id string) ([]int, error) // Terminate kills a container by sending signal SIGKILL. Terminate(c *Command) error // Clean removes all traces of container exec. Clean(id string) error // Stats returns resource stats for a running container Stats(id string) (*ResourceStats, error) // Update updates resource configs for a container Update(c *Command) error // SupportsHooks refers to the driver capability to exploit pre/post hook functionality SupportsHooks() bool } // CommonResources contains the resource configs for a driver that are // common across platforms. type CommonResources struct { Memory int64 `json:"memory"` MemoryReservation int64 `json:"memory_reservation"` CPUShares int64 `json:"cpu_shares"` BlkioWeight uint16 `json:"blkio_weight"` } // ResourceStats contains information about resource usage by a container. type ResourceStats struct { *libcontainer.Stats Read time.Time `json:"read"` MemoryLimit int64 `json:"memory_limit"` SystemUsage uint64 `json:"system_usage"` } // CommonProcessConfig is the common platform agnostic part of the ProcessConfig // structure that describes a process that will be run inside a container. type CommonProcessConfig struct { exec.Cmd `json:"-"` Tty bool `json:"tty"` Entrypoint string `json:"entrypoint"` Arguments []string `json:"arguments"` Terminal Terminal `json:"-"` // standard or tty terminal } // CommonCommand is the common platform agnostic part of the Command structure // which wraps an os/exec.Cmd to add more metadata type CommonCommand struct { ContainerPid int `json:"container_pid"` // the pid for the process inside a container ID string `json:"id"` InitPath string `json:"initpath"` // dockerinit MountLabel string `json:"mount_label"` // TODO Windows. More involved, but can be factored out Mounts []Mount `json:"mounts"` Network *Network `json:"network"` ProcessConfig ProcessConfig `json:"process_config"` // Describes the init process of the container. ProcessLabel string `json:"process_label"` // TODO Windows. More involved, but can be factored out Resources *Resources `json:"resources"` Rootfs string `json:"rootfs"` // root fs of the container WorkingDir string `json:"working_dir"` TmpDir string `json:"tmpdir"` // Directory used to store docker tmpdirs. } docker-1.10.3/daemon/execdriver/driver_unix.go000066400000000000000000000264451267010174400213500ustar00rootroot00000000000000// +build !windows package execdriver import ( "encoding/json" "io/ioutil" "os" "path/filepath" "strconv" "strings" "time" "github.com/docker/docker/daemon/execdriver/native/template" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/mount" "github.com/docker/go-units" "github.com/opencontainers/runc/libcontainer" "github.com/opencontainers/runc/libcontainer/cgroups/fs" "github.com/opencontainers/runc/libcontainer/configs" blkiodev "github.com/opencontainers/runc/libcontainer/configs" ) // Mount contains information for a mount operation. type Mount struct { Source string `json:"source"` Destination string `json:"destination"` Writable bool `json:"writable"` Data string `json:"data"` Propagation string `json:"mountpropagation"` } // Resources contains all resource configs for a driver. // Currently these are all for cgroup configs. type Resources struct { CommonResources // Fields below here are platform specific BlkioWeightDevice []*blkiodev.WeightDevice `json:"blkio_weight_device"` BlkioThrottleReadBpsDevice []*blkiodev.ThrottleDevice `json:"blkio_throttle_read_bps_device"` BlkioThrottleWriteBpsDevice []*blkiodev.ThrottleDevice `json:"blkio_throttle_write_bps_device"` BlkioThrottleReadIOpsDevice []*blkiodev.ThrottleDevice `json:"blkio_throttle_read_iops_device"` BlkioThrottleWriteIOpsDevice []*blkiodev.ThrottleDevice `json:"blkio_throttle_write_iops_device"` MemorySwap int64 `json:"memory_swap"` KernelMemory int64 `json:"kernel_memory"` CPUQuota int64 `json:"cpu_quota"` CpusetCpus string `json:"cpuset_cpus"` CpusetMems string `json:"cpuset_mems"` CPUPeriod int64 `json:"cpu_period"` Rlimits []*units.Rlimit `json:"rlimits"` OomKillDisable bool `json:"oom_kill_disable"` MemorySwappiness int64 `json:"memory_swappiness"` } // ProcessConfig is the platform specific structure that describes a process // that will be run inside a container. type ProcessConfig struct { CommonProcessConfig // Fields below here are platform specific Privileged bool `json:"privileged"` User string `json:"user"` Console string `json:"-"` // dev/console path } // Ipc settings of the container // It is for IPC namespace setting. Usually different containers // have their own IPC namespace, however this specifies to use // an existing IPC namespace. // You can join the host's or a container's IPC namespace. type Ipc struct { ContainerID string `json:"container_id"` // id of the container to join ipc. HostIpc bool `json:"host_ipc"` } // Pid settings of the container // It is for PID namespace setting. Usually different containers // have their own PID namespace, however this specifies to use // an existing PID namespace. // Joining the host's PID namespace is currently the only supported // option. type Pid struct { HostPid bool `json:"host_pid"` } // UTS settings of the container // It is for UTS namespace setting. Usually different containers // have their own UTS namespace, however this specifies to use // an existing UTS namespace. // Joining the host's UTS namespace is currently the only supported // option. type UTS struct { HostUTS bool `json:"host_uts"` } // Network settings of the container type Network struct { Mtu int `json:"mtu"` ContainerID string `json:"container_id"` // id of the container to join network. NamespacePath string `json:"namespace_path"` HostNetworking bool `json:"host_networking"` } // Command wraps an os/exec.Cmd to add more metadata type Command struct { CommonCommand // Fields below here are platform specific AllowedDevices []*configs.Device `json:"allowed_devices"` AppArmorProfile string `json:"apparmor_profile"` AutoCreatedDevices []*configs.Device `json:"autocreated_devices"` CapAdd []string `json:"cap_add"` CapDrop []string `json:"cap_drop"` CgroupParent string `json:"cgroup_parent"` // The parent cgroup for this command. GIDMapping []idtools.IDMap `json:"gidmapping"` GroupAdd []string `json:"group_add"` Ipc *Ipc `json:"ipc"` OomScoreAdj int `json:"oom_score_adj"` Pid *Pid `json:"pid"` ReadonlyRootfs bool `json:"readonly_rootfs"` RemappedRoot *User `json:"remap_root"` SeccompProfile string `json:"seccomp_profile"` UIDMapping []idtools.IDMap `json:"uidmapping"` UTS *UTS `json:"uts"` } // SetRootPropagation sets the root mount propagation mode. func SetRootPropagation(config *configs.Config, propagation int) { config.RootPropagation = propagation } // InitContainer is the initialization of a container config. // It returns the initial configs for a container. It's mostly // defined by the default template. func InitContainer(c *Command) *configs.Config { container := template.New() container.Hostname = getEnv("HOSTNAME", c.ProcessConfig.Env) container.Cgroups.Name = c.ID container.Cgroups.Resources.AllowedDevices = c.AllowedDevices container.Devices = filterDevices(c.AutoCreatedDevices, (c.RemappedRoot.UID != 0)) container.Rootfs = c.Rootfs container.Readonlyfs = c.ReadonlyRootfs // This can be overridden later by driver during mount setup based // on volume options SetRootPropagation(container, mount.RPRIVATE) container.Cgroups.Parent = c.CgroupParent // check to see if we are running in ramdisk to disable pivot root container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != "" return container } func filterDevices(devices []*configs.Device, userNamespacesEnabled bool) []*configs.Device { if !userNamespacesEnabled { return devices } filtered := []*configs.Device{} // if we have user namespaces enabled, these devices will not be created // because of the mknod limitation in the kernel for an unprivileged process. // Rather, they will be bind-mounted, which will only work if they exist; // check for existence and remove non-existent entries from the list for _, device := range devices { if _, err := os.Stat(device.Path); err == nil { filtered = append(filtered, device) } } return filtered } func getEnv(key string, env []string) string { for _, pair := range env { parts := strings.SplitN(pair, "=", 2) if parts[0] == key { return parts[1] } } return "" } // SetupCgroups setups cgroup resources for a container. func SetupCgroups(container *configs.Config, c *Command) error { if c.Resources != nil { container.Cgroups.Resources.CpuShares = c.Resources.CPUShares container.Cgroups.Resources.Memory = c.Resources.Memory container.Cgroups.Resources.MemoryReservation = c.Resources.MemoryReservation container.Cgroups.Resources.MemorySwap = c.Resources.MemorySwap container.Cgroups.Resources.KernelMemory = c.Resources.KernelMemory container.Cgroups.Resources.CpusetCpus = c.Resources.CpusetCpus container.Cgroups.Resources.CpusetMems = c.Resources.CpusetMems container.Cgroups.Resources.CpuPeriod = c.Resources.CPUPeriod container.Cgroups.Resources.CpuQuota = c.Resources.CPUQuota container.Cgroups.Resources.BlkioWeight = c.Resources.BlkioWeight container.Cgroups.Resources.BlkioWeightDevice = c.Resources.BlkioWeightDevice container.Cgroups.Resources.BlkioThrottleReadBpsDevice = c.Resources.BlkioThrottleReadBpsDevice container.Cgroups.Resources.BlkioThrottleWriteBpsDevice = c.Resources.BlkioThrottleWriteBpsDevice container.Cgroups.Resources.BlkioThrottleReadIOPSDevice = c.Resources.BlkioThrottleReadIOpsDevice container.Cgroups.Resources.BlkioThrottleWriteIOPSDevice = c.Resources.BlkioThrottleWriteIOpsDevice container.Cgroups.Resources.OomKillDisable = c.Resources.OomKillDisable container.Cgroups.Resources.MemorySwappiness = c.Resources.MemorySwappiness } return nil } // Returns the network statistics for the network interfaces represented by the NetworkRuntimeInfo. func getNetworkInterfaceStats(interfaceName string) (*libcontainer.NetworkInterface, error) { out := &libcontainer.NetworkInterface{Name: interfaceName} // This can happen if the network runtime information is missing - possible if the // container was created by an old version of libcontainer. if interfaceName == "" { return out, nil } type netStatsPair struct { // Where to write the output. Out *uint64 // The network stats file to read. File string } // Ingress for host veth is from the container. Hence tx_bytes stat on the host veth is actually number of bytes received by the container. netStats := []netStatsPair{ {Out: &out.RxBytes, File: "tx_bytes"}, {Out: &out.RxPackets, File: "tx_packets"}, {Out: &out.RxErrors, File: "tx_errors"}, {Out: &out.RxDropped, File: "tx_dropped"}, {Out: &out.TxBytes, File: "rx_bytes"}, {Out: &out.TxPackets, File: "rx_packets"}, {Out: &out.TxErrors, File: "rx_errors"}, {Out: &out.TxDropped, File: "rx_dropped"}, } for _, netStat := range netStats { data, err := readSysfsNetworkStats(interfaceName, netStat.File) if err != nil { return nil, err } *(netStat.Out) = data } return out, nil } // Reads the specified statistics available under /sys/class/net//statistics func readSysfsNetworkStats(ethInterface, statsFile string) (uint64, error) { data, err := ioutil.ReadFile(filepath.Join("/sys/class/net", ethInterface, "statistics", statsFile)) if err != nil { return 0, err } return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) } // Stats collects all the resource usage information from a container. func Stats(containerDir string, containerMemoryLimit int64, machineMemory int64) (*ResourceStats, error) { f, err := os.Open(filepath.Join(containerDir, "state.json")) if err != nil { return nil, err } defer f.Close() type network struct { Type string HostInterfaceName string } state := struct { CgroupPaths map[string]string `json:"cgroup_paths"` Networks []network }{} if err := json.NewDecoder(f).Decode(&state); err != nil { return nil, err } now := time.Now() mgr := fs.Manager{Paths: state.CgroupPaths} cstats, err := mgr.GetStats() if err != nil { return nil, err } stats := &libcontainer.Stats{CgroupStats: cstats} // if the container does not have any memory limit specified set the // limit to the machines memory memoryLimit := containerMemoryLimit if memoryLimit == 0 { memoryLimit = machineMemory } for _, iface := range state.Networks { switch iface.Type { case "veth": istats, err := getNetworkInterfaceStats(iface.HostInterfaceName) if err != nil { return nil, err } stats.Interfaces = append(stats.Interfaces, istats) } } return &ResourceStats{ Stats: stats, Read: now, MemoryLimit: memoryLimit, }, nil } // User contains the uid and gid representing a Unix user type User struct { UID int `json:"root_uid"` GID int `json:"root_gid"` } // ExitStatus provides exit reasons for a container. type ExitStatus struct { // The exit code with which the container exited. ExitCode int // Whether the container encountered an OOM. OOMKilled bool } docker-1.10.3/daemon/execdriver/driver_windows.go000066400000000000000000000041261267010174400220470ustar00rootroot00000000000000package execdriver import "github.com/docker/go-connections/nat" // Mount contains information for a mount operation. type Mount struct { Source string `json:"source"` Destination string `json:"destination"` Writable bool `json:"writable"` } // Resources contains all resource configs for a driver. // Currently these are all for cgroup configs. type Resources struct { CommonResources // Fields below here are platform specific } // ProcessConfig is the platform specific structure that describes a process // that will be run inside a container. type ProcessConfig struct { CommonProcessConfig // Fields below here are platform specific ConsoleSize [2]int `json:"-"` // h,w of initial console size } // Network settings of the container type Network struct { Interface *NetworkInterface `json:"interface"` ContainerID string `json:"container_id"` // id of the container to join network. } // NetworkInterface contains network configs for a driver type NetworkInterface struct { MacAddress string `json:"mac"` Bridge string `json:"bridge"` IPAddress string `json:"ip"` // PortBindings is the port mapping between the exposed port in the // container and the port on the host. PortBindings nat.PortMap `json:"port_bindings"` } // Command wraps an os/exec.Cmd to add more metadata type Command struct { CommonCommand // Fields below here are platform specific FirstStart bool `json:"first_start"` // Optimisation for first boot of Windows Hostname string `json:"hostname"` // Windows sets the hostname in the execdriver LayerFolder string `json:"layer_folder"` // Layer folder for a command LayerPaths []string `json:"layer_paths"` // Layer paths for a command Isolation string `json:"isolation"` // Isolation level for the container ArgsEscaped bool `json:"args_escaped"` // True if args are already escaped HvPartition bool `json:"hv_partition"` // True if it's an hypervisor partition } // ExitStatus provides exit reasons for a container. type ExitStatus struct { // The exit code with which the container exited. ExitCode int } docker-1.10.3/daemon/execdriver/execdrivers/000077500000000000000000000000001267010174400207735ustar00rootroot00000000000000docker-1.10.3/daemon/execdriver/execdrivers/execdrivers_freebsd.go000066400000000000000000000006551267010174400253450ustar00rootroot00000000000000// +build freebsd package execdrivers import ( "fmt" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/pkg/sysinfo" ) // NewDriver returns a new execdriver.Driver from the given name configured with the provided options. func NewDriver(options []string, root, libPath string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, error) { return nil, fmt.Errorf("jail driver not yet supported on FreeBSD") } docker-1.10.3/daemon/execdriver/execdrivers/execdrivers_linux.go000066400000000000000000000007501267010174400250660ustar00rootroot00000000000000// +build linux package execdrivers import ( "path" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/execdriver/native" "github.com/docker/docker/pkg/sysinfo" ) // NewDriver returns a new execdriver.Driver from the given name configured with the provided options. func NewDriver(options []string, root, libPath string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, error) { return native.NewDriver(path.Join(root, "execdriver", "native"), options) } docker-1.10.3/daemon/execdriver/execdrivers/execdrivers_windows.go000066400000000000000000000007001267010174400254140ustar00rootroot00000000000000// +build windows package execdrivers import ( "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/execdriver/windows" "github.com/docker/docker/pkg/sysinfo" ) // NewDriver returns a new execdriver.Driver from the given name configured with the provided options. func NewDriver(options []string, root, libPath string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, error) { return windows.NewDriver(root, options) } docker-1.10.3/daemon/execdriver/native/000077500000000000000000000000001267010174400177365ustar00rootroot00000000000000docker-1.10.3/daemon/execdriver/native/apparmor.go000066400000000000000000000075571267010174400221240ustar00rootroot00000000000000// +build linux package native import ( "bufio" "fmt" "io" "os" "os/exec" "path" "strings" "text/template" "github.com/docker/docker/pkg/aaparser" "github.com/opencontainers/runc/libcontainer/apparmor" ) const ( apparmorProfilePath = "/etc/apparmor.d/docker" ) type data struct { Name string ExecPath string Imports []string InnerImports []string MajorVersion int MinorVersion int } const baseTemplate = ` {{range $value := .Imports}} {{$value}} {{end}} profile {{.Name}} flags=(attach_disconnected,mediate_deleted) { {{range $value := .InnerImports}} {{$value}} {{end}} network, capability, file, umount, deny @{PROC}/* w, # deny write for all files directly in /proc (not in a subdir) # deny write to files not in /proc//** or /proc/sys/** deny @{PROC}/{[^1-9],[^1-9][^0-9],[^1-9s][^0-9y][^0-9s],[^1-9][^0-9][^0-9][^0-9]*}/** w, deny @{PROC}/sys/[^k]** w, # deny /proc/sys except /proc/sys/k* (effectively /proc/sys/kernel) deny @{PROC}/sys/kernel/{?,??,[^s][^h][^m]**} w, # deny everything except shm* in /proc/sys/kernel/ deny @{PROC}/sysrq-trigger rwklx, deny @{PROC}/mem rwklx, deny @{PROC}/kmem rwklx, deny @{PROC}/kcore rwklx, deny mount, deny /sys/[^f]*/** wklx, deny /sys/f[^s]*/** wklx, deny /sys/fs/[^c]*/** wklx, deny /sys/fs/c[^g]*/** wklx, deny /sys/fs/cg[^r]*/** wklx, deny /sys/firmware/efi/efivars/** rwklx, deny /sys/kernel/security/** rwklx, {{if ge .MajorVersion 2}}{{if ge .MinorVersion 8}} # suppress ptrace denials when using 'docker ps' or using 'ps' inside a container ptrace (trace,read) peer=docker-default, {{end}}{{end}} {{if ge .MajorVersion 2}}{{if ge .MinorVersion 9}} # docker daemon confinement requires explict allow rule for signal signal (receive) set=(kill,term) peer={{.ExecPath}}, {{end}}{{end}} } ` func generateProfile(out io.Writer) error { compiled, err := template.New("apparmor_profile").Parse(baseTemplate) if err != nil { return err } data := &data{ Name: "docker-default", } if tunablesExists() { data.Imports = append(data.Imports, "#include ") } else { data.Imports = append(data.Imports, "@{PROC}=/proc/") } if abstractionsExists() { data.InnerImports = append(data.InnerImports, "#include ") } data.MajorVersion, data.MinorVersion, err = aaparser.GetVersion() if err != nil { return err } data.ExecPath, err = exec.LookPath("docker") if err != nil { return err } if err := compiled.Execute(out, data); err != nil { return err } return nil } // check if the tunables/global exist func tunablesExists() bool { _, err := os.Stat("/etc/apparmor.d/tunables/global") return err == nil } // check if abstractions/base exist func abstractionsExists() bool { _, err := os.Stat("/etc/apparmor.d/abstractions/base") return err == nil } func installAppArmorProfile() error { if !apparmor.IsEnabled() { return nil } // Make sure /etc/apparmor.d exists if err := os.MkdirAll(path.Dir(apparmorProfilePath), 0755); err != nil { return err } f, err := os.OpenFile(apparmorProfilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { return err } if err := generateProfile(f); err != nil { f.Close() return err } f.Close() cmd := exec.Command("/sbin/apparmor_parser", "-r", "-W", "docker") // to use the parser directly we have to make sure we are in the correct // dir with the profile cmd.Dir = "/etc/apparmor.d" output, err := cmd.CombinedOutput() if err != nil { return fmt.Errorf("Error loading docker apparmor profile: %s (%s)", err, output) } return nil } func hasAppArmorProfileLoaded(profile string) error { file, err := os.Open("/sys/kernel/security/apparmor/profiles") if err != nil { return err } r := bufio.NewReader(file) for { p, err := r.ReadString('\n') if err != nil { return err } if strings.HasPrefix(p, profile+" ") { return nil } } } docker-1.10.3/daemon/execdriver/native/create.go000066400000000000000000000320141267010174400215300ustar00rootroot00000000000000// +build linux,cgo package native import ( "fmt" "path/filepath" "strings" "syscall" "github.com/docker/docker/daemon/execdriver" derr "github.com/docker/docker/errors" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/volume" "github.com/opencontainers/runc/libcontainer/apparmor" "github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/devices" ) // createContainer populates and configures the container type with the // data provided by the execdriver.Command func (d *Driver) createContainer(c *execdriver.Command, hooks execdriver.Hooks) (container *configs.Config, err error) { container = execdriver.InitContainer(c) if err := d.createIpc(container, c); err != nil { return nil, err } if err := d.createPid(container, c); err != nil { return nil, err } if err := d.createUTS(container, c); err != nil { return nil, err } if err := d.setupRemappedRoot(container, c); err != nil { return nil, err } if err := d.createNetwork(container, c, hooks); err != nil { return nil, err } if c.ProcessConfig.Privileged { if !container.Readonlyfs { // clear readonly for /sys for i := range container.Mounts { if container.Mounts[i].Destination == "/sys" { container.Mounts[i].Flags &= ^syscall.MS_RDONLY } } container.ReadonlyPaths = nil } // clear readonly for cgroup for i := range container.Mounts { if container.Mounts[i].Device == "cgroup" { container.Mounts[i].Flags &= ^syscall.MS_RDONLY } } container.MaskPaths = nil if err := d.setPrivileged(container); err != nil { return nil, err } } else { if err := d.setCapabilities(container, c); err != nil { return nil, err } if c.SeccompProfile == "" { container.Seccomp = getDefaultSeccompProfile() } } // add CAP_ prefix to all caps for new libcontainer update to match // the spec format. for i, s := range container.Capabilities { if !strings.HasPrefix(s, "CAP_") { container.Capabilities[i] = fmt.Sprintf("CAP_%s", s) } } container.AdditionalGroups = c.GroupAdd if c.AppArmorProfile != "" { container.AppArmorProfile = c.AppArmorProfile } if c.SeccompProfile != "" && c.SeccompProfile != "unconfined" { container.Seccomp, err = loadSeccompProfile(c.SeccompProfile) if err != nil { return nil, err } } if err := execdriver.SetupCgroups(container, c); err != nil { return nil, err } container.OomScoreAdj = c.OomScoreAdj if container.Readonlyfs { for i := range container.Mounts { switch container.Mounts[i].Destination { case "/proc", "/dev", "/dev/pts", "/dev/mqueue": continue } container.Mounts[i].Flags |= syscall.MS_RDONLY } /* These paths must be remounted as r/o */ container.ReadonlyPaths = append(container.ReadonlyPaths, "/dev") } if err := d.setupMounts(container, c); err != nil { return nil, err } d.setupLabels(container, c) d.setupRlimits(container, c) return container, nil } func (d *Driver) createNetwork(container *configs.Config, c *execdriver.Command, hooks execdriver.Hooks) error { if c.Network == nil { return nil } if c.Network.ContainerID != "" { d.Lock() active := d.activeContainers[c.Network.ContainerID] d.Unlock() if active == nil { return fmt.Errorf("%s is not a valid running container to join", c.Network.ContainerID) } state, err := active.State() if err != nil { return err } container.Namespaces.Add(configs.NEWNET, state.NamespacePaths[configs.NEWNET]) return nil } if c.Network.NamespacePath != "" { container.Namespaces.Add(configs.NEWNET, c.Network.NamespacePath) return nil } // only set up prestart hook if the namespace path is not set (this should be // all cases *except* for --net=host shared networking) container.Hooks = &configs.Hooks{ Prestart: []configs.Hook{ configs.NewFunctionHook(func(s configs.HookState) error { if len(hooks.PreStart) > 0 { for _, fnHook := range hooks.PreStart { // A closed channel for OOM is returned here as it will be // non-blocking and return the correct result when read. chOOM := make(chan struct{}) close(chOOM) if err := fnHook(&c.ProcessConfig, s.Pid, chOOM); err != nil { return err } } } return nil }), }, } return nil } func (d *Driver) createIpc(container *configs.Config, c *execdriver.Command) error { if c.Ipc.HostIpc { container.Namespaces.Remove(configs.NEWIPC) return nil } if c.Ipc.ContainerID != "" { d.Lock() active := d.activeContainers[c.Ipc.ContainerID] d.Unlock() if active == nil { return fmt.Errorf("%s is not a valid running container to join", c.Ipc.ContainerID) } state, err := active.State() if err != nil { return err } container.Namespaces.Add(configs.NEWIPC, state.NamespacePaths[configs.NEWIPC]) } return nil } func (d *Driver) createPid(container *configs.Config, c *execdriver.Command) error { if c.Pid.HostPid { container.Namespaces.Remove(configs.NEWPID) return nil } return nil } func (d *Driver) createUTS(container *configs.Config, c *execdriver.Command) error { if c.UTS.HostUTS { container.Namespaces.Remove(configs.NEWUTS) container.Hostname = "" return nil } return nil } func (d *Driver) setupRemappedRoot(container *configs.Config, c *execdriver.Command) error { if c.RemappedRoot.UID == 0 { container.Namespaces.Remove(configs.NEWUSER) return nil } // convert the Docker daemon id map to the libcontainer variant of the same struct // this keeps us from having to import libcontainer code across Docker client + daemon packages cuidMaps := []configs.IDMap{} cgidMaps := []configs.IDMap{} for _, idMap := range c.UIDMapping { cuidMaps = append(cuidMaps, configs.IDMap(idMap)) } for _, idMap := range c.GIDMapping { cgidMaps = append(cgidMaps, configs.IDMap(idMap)) } container.UidMappings = cuidMaps container.GidMappings = cgidMaps for _, node := range container.Devices { node.Uid = uint32(c.RemappedRoot.UID) node.Gid = uint32(c.RemappedRoot.GID) } // TODO: until a kernel/mount solution exists for handling remount in a user namespace, // we must clear the readonly flag for the cgroups mount (@mrunalp concurs) for i := range container.Mounts { if container.Mounts[i].Device == "cgroup" { container.Mounts[i].Flags &= ^syscall.MS_RDONLY } } return nil } func (d *Driver) setPrivileged(container *configs.Config) (err error) { container.Capabilities = execdriver.GetAllCapabilities() container.Cgroups.Resources.AllowAllDevices = true hostDevices, err := devices.HostDevices() if err != nil { return err } container.Devices = hostDevices if apparmor.IsEnabled() { container.AppArmorProfile = "unconfined" } return nil } func (d *Driver) setCapabilities(container *configs.Config, c *execdriver.Command) (err error) { container.Capabilities, err = execdriver.TweakCapabilities(container.Capabilities, c.CapAdd, c.CapDrop) return err } func (d *Driver) setupRlimits(container *configs.Config, c *execdriver.Command) { if c.Resources == nil { return } for _, rlimit := range c.Resources.Rlimits { container.Rlimits = append(container.Rlimits, configs.Rlimit{ Type: rlimit.Type, Hard: rlimit.Hard, Soft: rlimit.Soft, }) } } // If rootfs mount propagation is RPRIVATE, that means all the volumes are // going to be private anyway. There is no need to apply per volume // propagation on top. This is just an optimzation so that cost of per volume // propagation is paid only if user decides to make some volume non-private // which will force rootfs mount propagation to be non RPRIVATE. func checkResetVolumePropagation(container *configs.Config) { if container.RootPropagation != mount.RPRIVATE { return } for _, m := range container.Mounts { m.PropagationFlags = nil } } func getMountInfo(mountinfo []*mount.Info, dir string) *mount.Info { for _, m := range mountinfo { if m.Mountpoint == dir { return m } } return nil } // Get the source mount point of directory passed in as argument. Also return // optional fields. func getSourceMount(source string) (string, string, error) { // Ensure any symlinks are resolved. sourcePath, err := filepath.EvalSymlinks(source) if err != nil { return "", "", err } mountinfos, err := mount.GetMounts() if err != nil { return "", "", err } mountinfo := getMountInfo(mountinfos, sourcePath) if mountinfo != nil { return sourcePath, mountinfo.Optional, nil } path := sourcePath for { path = filepath.Dir(path) mountinfo = getMountInfo(mountinfos, path) if mountinfo != nil { return path, mountinfo.Optional, nil } if path == "/" { break } } // If we are here, we did not find parent mount. Something is wrong. return "", "", fmt.Errorf("Could not find source mount of %s", source) } // Ensure mount point on which path is mouted, is shared. func ensureShared(path string) error { sharedMount := false sourceMount, optionalOpts, err := getSourceMount(path) if err != nil { return err } // Make sure source mount point is shared. optsSplit := strings.Split(optionalOpts, " ") for _, opt := range optsSplit { if strings.HasPrefix(opt, "shared:") { sharedMount = true break } } if !sharedMount { return fmt.Errorf("Path %s is mounted on %s but it is not a shared mount.", path, sourceMount) } return nil } // Ensure mount point on which path is mounted, is either shared or slave. func ensureSharedOrSlave(path string) error { sharedMount := false slaveMount := false sourceMount, optionalOpts, err := getSourceMount(path) if err != nil { return err } // Make sure source mount point is shared. optsSplit := strings.Split(optionalOpts, " ") for _, opt := range optsSplit { if strings.HasPrefix(opt, "shared:") { sharedMount = true break } else if strings.HasPrefix(opt, "master:") { slaveMount = true break } } if !sharedMount && !slaveMount { return fmt.Errorf("Path %s is mounted on %s but it is not a shared or slave mount.", path, sourceMount) } return nil } func (d *Driver) setupMounts(container *configs.Config, c *execdriver.Command) error { userMounts := make(map[string]struct{}) for _, m := range c.Mounts { userMounts[m.Destination] = struct{}{} } // Filter out mounts that are overridden by user supplied mounts var defaultMounts []*configs.Mount _, mountDev := userMounts["/dev"] for _, m := range container.Mounts { if _, ok := userMounts[m.Destination]; !ok { if mountDev && strings.HasPrefix(m.Destination, "/dev/") { container.Devices = nil continue } defaultMounts = append(defaultMounts, m) } } container.Mounts = defaultMounts mountPropagationMap := map[string]int{ "private": mount.PRIVATE, "rprivate": mount.RPRIVATE, "shared": mount.SHARED, "rshared": mount.RSHARED, "slave": mount.SLAVE, "rslave": mount.RSLAVE, } for _, m := range c.Mounts { for _, cm := range container.Mounts { if cm.Destination == m.Destination { return derr.ErrorCodeMountDup.WithArgs(m.Destination) } } if m.Source == "tmpfs" { var ( data = "size=65536k" flags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV err error ) if m.Data != "" { flags, data, err = mount.ParseTmpfsOptions(m.Data) if err != nil { return err } } container.Mounts = append(container.Mounts, &configs.Mount{ Source: m.Source, Destination: m.Destination, Data: data, Device: "tmpfs", Flags: flags, PropagationFlags: []int{mountPropagationMap[volume.DefaultPropagationMode]}, }) continue } flags := syscall.MS_BIND | syscall.MS_REC var pFlag int if !m.Writable { flags |= syscall.MS_RDONLY } // Determine property of RootPropagation based on volume // properties. If a volume is shared, then keep root propagtion // shared. This should work for slave and private volumes too. // // For slave volumes, it can be either [r]shared/[r]slave. // // For private volumes any root propagation value should work. pFlag = mountPropagationMap[m.Propagation] if pFlag == mount.SHARED || pFlag == mount.RSHARED { if err := ensureShared(m.Source); err != nil { return err } rootpg := container.RootPropagation if rootpg != mount.SHARED && rootpg != mount.RSHARED { execdriver.SetRootPropagation(container, mount.SHARED) } } else if pFlag == mount.SLAVE || pFlag == mount.RSLAVE { if err := ensureSharedOrSlave(m.Source); err != nil { return err } rootpg := container.RootPropagation if rootpg != mount.SHARED && rootpg != mount.RSHARED && rootpg != mount.SLAVE && rootpg != mount.RSLAVE { execdriver.SetRootPropagation(container, mount.RSLAVE) } } mount := &configs.Mount{ Source: m.Source, Destination: m.Destination, Device: "bind", Flags: flags, } if pFlag != 0 { mount.PropagationFlags = []int{pFlag} } container.Mounts = append(container.Mounts, mount) } checkResetVolumePropagation(container) return nil } func (d *Driver) setupLabels(container *configs.Config, c *execdriver.Command) { container.ProcessLabel = c.ProcessLabel container.MountLabel = c.MountLabel } docker-1.10.3/daemon/execdriver/native/driver.go000066400000000000000000000353131267010174400215650ustar00rootroot00000000000000// +build linux,cgo package native import ( "fmt" "io" "io/ioutil" "os" "os/exec" "path/filepath" "strings" "sync" "syscall" "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/reexec" sysinfo "github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/term" "github.com/opencontainers/runc/libcontainer" "github.com/opencontainers/runc/libcontainer/apparmor" "github.com/opencontainers/runc/libcontainer/cgroups/systemd" "github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/system" "github.com/opencontainers/runc/libcontainer/utils" ) // Define constants for native driver const ( DriverName = "native" Version = "0.2" ) // Driver contains all information for native driver, // it implements execdriver.Driver. type Driver struct { root string activeContainers map[string]libcontainer.Container machineMemory int64 factory libcontainer.Factory sync.Mutex } // NewDriver returns a new native driver, called from NewDriver of execdriver. func NewDriver(root string, options []string) (*Driver, error) { meminfo, err := sysinfo.ReadMemInfo() if err != nil { return nil, err } if err := sysinfo.MkdirAll(root, 0700); err != nil { return nil, err } if apparmor.IsEnabled() { if err := installAppArmorProfile(); err != nil { apparmorProfiles := []string{"docker-default"} // Allow daemon to run if loading failed, but are active // (possibly through another run, manually, or via system startup) for _, policy := range apparmorProfiles { if err := hasAppArmorProfileLoaded(policy); err != nil { return nil, fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded.", policy) } } } } // choose cgroup manager // this makes sure there are no breaking changes to people // who upgrade from versions without native.cgroupdriver opt cgm := libcontainer.Cgroupfs // parse the options for _, option := range options { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err } key = strings.ToLower(key) switch key { case "native.cgroupdriver": // override the default if they set options switch val { case "systemd": if systemd.UseSystemd() { cgm = libcontainer.SystemdCgroups } else { // warn them that they chose the wrong driver logrus.Warn("You cannot use systemd as native.cgroupdriver, using cgroupfs instead") } case "cgroupfs": cgm = libcontainer.Cgroupfs default: return nil, fmt.Errorf("Unknown native.cgroupdriver given %q. try cgroupfs or systemd", val) } default: return nil, fmt.Errorf("Unknown option %s\n", key) } } f, err := libcontainer.New( root, cgm, libcontainer.InitPath(reexec.Self(), DriverName), ) if err != nil { return nil, err } return &Driver{ root: root, activeContainers: make(map[string]libcontainer.Container), machineMemory: meminfo.MemTotal, factory: f, }, nil } type execOutput struct { exitCode int err error } // Run implements the exec driver Driver interface, // it calls libcontainer APIs to run a container. func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) { destroyed := false var err error c.TmpDir, err = ioutil.TempDir("", c.ID) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } defer os.RemoveAll(c.TmpDir) // take the Command and populate the libcontainer.Config from it container, err := d.createContainer(c, hooks) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } p := &libcontainer.Process{ Args: append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...), Env: c.ProcessConfig.Env, Cwd: c.WorkingDir, User: c.ProcessConfig.User, } if err := setupPipes(container, &c.ProcessConfig, p, pipes); err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } cont, err := d.factory.Create(c.ID, container) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } d.Lock() d.activeContainers[c.ID] = cont d.Unlock() defer func() { if !destroyed { cont.Destroy() } d.cleanContainer(c.ID) }() if err := cont.Start(p); err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } // 'oom' is used to emit 'oom' events to the eventstream, 'oomKilled' is used // to set the 'OOMKilled' flag in state oom := notifyOnOOM(cont) oomKilled := notifyOnOOM(cont) if hooks.Start != nil { pid, err := p.Pid() if err != nil { p.Signal(os.Kill) p.Wait() return execdriver.ExitStatus{ExitCode: -1}, err } hooks.Start(&c.ProcessConfig, pid, oom) } waitF := p.Wait if nss := cont.Config().Namespaces; !nss.Contains(configs.NEWPID) { // we need such hack for tracking processes with inherited fds, // because cmd.Wait() waiting for all streams to be copied waitF = waitInPIDHost(p, cont) } ps, err := waitF() if err != nil { execErr, ok := err.(*exec.ExitError) if !ok { return execdriver.ExitStatus{ExitCode: -1}, err } ps = execErr.ProcessState } cont.Destroy() destroyed = true // oomKilled will have an oom event if any process within the container was // OOM killed at any time, not only if the init process OOMed. // // Perhaps we only want the OOMKilled flag to be set if the OOM // resulted in a container death, but there isn't a good way to do this // because the kernel's cgroup oom notification does not provide information // such as the PID. This could be heuristically done by checking that the OOM // happened within some very small time slice for the container dying (and // optionally exit-code 137), but I don't think the cgroup oom notification // can be used to reliably determine this // // Even if there were multiple OOMs, it's sufficient to read one value // because libcontainer's oom notify will discard the channel after the // cgroup is destroyed _, oomKill := <-oomKilled return execdriver.ExitStatus{ExitCode: utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), OOMKilled: oomKill}, nil } // notifyOnOOM returns a channel that signals if the container received an OOM notification // for any process. If it is unable to subscribe to OOM notifications then a closed // channel is returned as it will be non-blocking and return the correct result when read. func notifyOnOOM(container libcontainer.Container) <-chan struct{} { oom, err := container.NotifyOOM() if err != nil { logrus.Warnf("Your kernel does not support OOM notifications: %s", err) c := make(chan struct{}) close(c) return c } return oom } func killCgroupProcs(c libcontainer.Container) { var procs []*os.Process if err := c.Pause(); err != nil { logrus.Warn(err) } pids, err := c.Processes() if err != nil { // don't care about childs if we can't get them, this is mostly because cgroup already deleted logrus.Warnf("Failed to get processes from container %s: %v", c.ID(), err) } for _, pid := range pids { if p, err := os.FindProcess(pid); err == nil { procs = append(procs, p) if err := p.Kill(); err != nil { logrus.Warn(err) } } } if err := c.Resume(); err != nil { logrus.Warn(err) } for _, p := range procs { if _, err := p.Wait(); err != nil { logrus.Warn(err) } } } func waitInPIDHost(p *libcontainer.Process, c libcontainer.Container) func() (*os.ProcessState, error) { return func() (*os.ProcessState, error) { pid, err := p.Pid() if err != nil { return nil, err } process, err := os.FindProcess(pid) s, err := process.Wait() if err != nil { execErr, ok := err.(*exec.ExitError) if !ok { return s, err } s = execErr.ProcessState } killCgroupProcs(c) p.Wait() return s, err } } // Kill implements the exec driver Driver interface. func (d *Driver) Kill(c *execdriver.Command, sig int) error { d.Lock() active := d.activeContainers[c.ID] d.Unlock() if active == nil { return fmt.Errorf("active container for %s does not exist", c.ID) } state, err := active.State() if err != nil { return err } return syscall.Kill(state.InitProcessPid, syscall.Signal(sig)) } // Pause implements the exec driver Driver interface, // it calls libcontainer API to pause a container. func (d *Driver) Pause(c *execdriver.Command) error { d.Lock() active := d.activeContainers[c.ID] d.Unlock() if active == nil { return fmt.Errorf("active container for %s does not exist", c.ID) } return active.Pause() } // Unpause implements the exec driver Driver interface, // it calls libcontainer API to unpause a container. func (d *Driver) Unpause(c *execdriver.Command) error { d.Lock() active := d.activeContainers[c.ID] d.Unlock() if active == nil { return fmt.Errorf("active container for %s does not exist", c.ID) } return active.Resume() } // Terminate implements the exec driver Driver interface. func (d *Driver) Terminate(c *execdriver.Command) error { defer d.cleanContainer(c.ID) container, err := d.factory.Load(c.ID) if err != nil { return err } defer container.Destroy() state, err := container.State() if err != nil { return err } pid := state.InitProcessPid currentStartTime, err := system.GetProcessStartTime(pid) if err != nil { return err } if state.InitProcessStartTime == currentStartTime { err = syscall.Kill(pid, 9) syscall.Wait4(pid, nil, 0, nil) } return err } // Info implements the exec driver Driver interface. func (d *Driver) Info(id string) execdriver.Info { return &info{ ID: id, driver: d, } } // Name implements the exec driver Driver interface. func (d *Driver) Name() string { return fmt.Sprintf("%s-%s", DriverName, Version) } // GetPidsForContainer implements the exec driver Driver interface. func (d *Driver) GetPidsForContainer(id string) ([]int, error) { d.Lock() active := d.activeContainers[id] d.Unlock() if active == nil { return nil, fmt.Errorf("active container for %s does not exist", id) } return active.Processes() } func (d *Driver) cleanContainer(id string) error { d.Lock() delete(d.activeContainers, id) d.Unlock() return os.RemoveAll(filepath.Join(d.root, id)) } func (d *Driver) createContainerRoot(id string) error { return os.MkdirAll(filepath.Join(d.root, id), 0655) } // Clean implements the exec driver Driver interface. func (d *Driver) Clean(id string) error { return os.RemoveAll(filepath.Join(d.root, id)) } // Stats implements the exec driver Driver interface. func (d *Driver) Stats(id string) (*execdriver.ResourceStats, error) { d.Lock() c := d.activeContainers[id] d.Unlock() if c == nil { return nil, execdriver.ErrNotRunning } now := time.Now() stats, err := c.Stats() if err != nil { return nil, err } memoryLimit := c.Config().Cgroups.Resources.Memory // if the container does not have any memory limit specified set the // limit to the machines memory if memoryLimit == 0 { memoryLimit = d.machineMemory } return &execdriver.ResourceStats{ Stats: stats, Read: now, MemoryLimit: memoryLimit, }, nil } // Update updates configs for a container func (d *Driver) Update(c *execdriver.Command) error { d.Lock() cont := d.activeContainers[c.ID] d.Unlock() if cont == nil { return execdriver.ErrNotRunning } config := cont.Config() if err := execdriver.SetupCgroups(&config, c); err != nil { return err } if err := cont.Set(config); err != nil { return err } return nil } // TtyConsole implements the exec driver Terminal interface. type TtyConsole struct { console libcontainer.Console } // NewTtyConsole returns a new TtyConsole struct. func NewTtyConsole(console libcontainer.Console, pipes *execdriver.Pipes) (*TtyConsole, error) { tty := &TtyConsole{ console: console, } if err := tty.AttachPipes(pipes); err != nil { tty.Close() return nil, err } return tty, nil } // Resize implements Resize method of Terminal interface func (t *TtyConsole) Resize(h, w int) error { return term.SetWinsize(t.console.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)}) } // AttachPipes attaches given pipes to TtyConsole func (t *TtyConsole) AttachPipes(pipes *execdriver.Pipes) error { go func() { if wb, ok := pipes.Stdout.(interface { CloseWriters() error }); ok { defer wb.CloseWriters() } pools.Copy(pipes.Stdout, t.console) }() if pipes.Stdin != nil { go func() { pools.Copy(t.console, pipes.Stdin) pipes.Stdin.Close() }() } return nil } // Close implements Close method of Terminal interface func (t *TtyConsole) Close() error { return t.console.Close() } func setupPipes(container *configs.Config, processConfig *execdriver.ProcessConfig, p *libcontainer.Process, pipes *execdriver.Pipes) error { rootuid, err := container.HostUID() if err != nil { return err } if processConfig.Tty { cons, err := p.NewConsole(rootuid) if err != nil { return err } term, err := NewTtyConsole(cons, pipes) if err != nil { return err } processConfig.Terminal = term return nil } // not a tty--set up stdio pipes term := &execdriver.StdConsole{} processConfig.Terminal = term // if we are not in a user namespace, there is no reason to go through // the hassle of setting up os-level pipes with proper (remapped) ownership // so we will do the prior shortcut for non-userns containers if rootuid == 0 { p.Stdout = pipes.Stdout p.Stderr = pipes.Stderr r, w, err := os.Pipe() if err != nil { return err } if pipes.Stdin != nil { go func() { io.Copy(w, pipes.Stdin) w.Close() }() p.Stdin = r } return nil } // if we have user namespaces enabled (rootuid != 0), we will set // up os pipes for stderr, stdout, stdin so we can chown them to // the proper ownership to allow for proper access to the underlying // fds var fds []int //setup stdout r, w, err := os.Pipe() if err != nil { return err } fds = append(fds, int(r.Fd()), int(w.Fd())) if pipes.Stdout != nil { go io.Copy(pipes.Stdout, r) } term.Closers = append(term.Closers, r) p.Stdout = w //setup stderr r, w, err = os.Pipe() if err != nil { return err } fds = append(fds, int(r.Fd()), int(w.Fd())) if pipes.Stderr != nil { go io.Copy(pipes.Stderr, r) } term.Closers = append(term.Closers, r) p.Stderr = w //setup stdin r, w, err = os.Pipe() if err != nil { return err } fds = append(fds, int(r.Fd()), int(w.Fd())) if pipes.Stdin != nil { go func() { io.Copy(w, pipes.Stdin) w.Close() }() p.Stdin = r } for _, fd := range fds { if err := syscall.Fchown(fd, rootuid, rootuid); err != nil { return fmt.Errorf("Failed to chown pipes fd: %v", err) } } return nil } // SupportsHooks implements the execdriver Driver interface. // The libcontainer/runC-based native execdriver does exploit the hook mechanism func (d *Driver) SupportsHooks() bool { return true } docker-1.10.3/daemon/execdriver/native/driver_unsupported.go000066400000000000000000000005041267010174400242270ustar00rootroot00000000000000// +build !linux package native import ( "fmt" "github.com/docker/docker/daemon/execdriver" ) // NewDriver returns a new native driver, called from NewDriver of execdriver. func NewDriver(root string, options []string) (execdriver.Driver, error) { return nil, fmt.Errorf("native driver not supported on non-linux") } docker-1.10.3/daemon/execdriver/native/driver_unsupported_nocgo.go000066400000000000000000000005101267010174400254110ustar00rootroot00000000000000// +build linux,!cgo package native import ( "fmt" "github.com/docker/docker/daemon/execdriver" ) // NewDriver returns a new native driver, called from NewDriver of execdriver. func NewDriver(root string, options []string) (execdriver.Driver, error) { return nil, fmt.Errorf("native driver not supported on non-linux") } docker-1.10.3/daemon/execdriver/native/exec.go000066400000000000000000000044221267010174400212130ustar00rootroot00000000000000// +build linux package native import ( "fmt" "os" "os/exec" "strings" "syscall" "github.com/docker/docker/daemon/execdriver" "github.com/opencontainers/runc/libcontainer" // Blank import 'nsenter' so that init in that package will call c // function 'nsexec()' to do 'setns' before Go runtime take over, // it's used for join to exist ns like 'docker exec' command. _ "github.com/opencontainers/runc/libcontainer/nsenter" "github.com/opencontainers/runc/libcontainer/utils" ) // Exec implements the exec driver Driver interface, // it calls libcontainer APIs to execute a container. func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) { active := d.activeContainers[c.ID] if active == nil { return -1, fmt.Errorf("No active container exists with ID %s", c.ID) } user := processConfig.User if c.RemappedRoot.UID != 0 && user == "" { //if user namespaces are enabled, set user explicitly so uid/gid is set to 0 //otherwise we end up with the overflow id and no permissions (65534) user = "0" } p := &libcontainer.Process{ Args: append([]string{processConfig.Entrypoint}, processConfig.Arguments...), Env: c.ProcessConfig.Env, Cwd: c.WorkingDir, User: user, } if processConfig.Privileged { p.Capabilities = execdriver.GetAllCapabilities() } // add CAP_ prefix to all caps for new libcontainer update to match // the spec format. for i, s := range p.Capabilities { if !strings.HasPrefix(s, "CAP_") { p.Capabilities[i] = fmt.Sprintf("CAP_%s", s) } } config := active.Config() if err := setupPipes(&config, processConfig, p, pipes); err != nil { return -1, err } if err := active.Start(p); err != nil { return -1, err } if hooks.Start != nil { pid, err := p.Pid() if err != nil { p.Signal(os.Kill) p.Wait() return -1, err } // A closed channel for OOM is returned here as it will be // non-blocking and return the correct result when read. chOOM := make(chan struct{}) close(chOOM) hooks.Start(&c.ProcessConfig, pid, chOOM) } ps, err := p.Wait() if err != nil { exitErr, ok := err.(*exec.ExitError) if !ok { return -1, err } ps = exitErr.ProcessState } return utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), nil } docker-1.10.3/daemon/execdriver/native/info.go000066400000000000000000000004751267010174400212260ustar00rootroot00000000000000// +build linux,cgo package native type info struct { ID string driver *Driver } // IsRunning is determined by looking for the // pid file for a container. If the file exists then the // container is currently running func (i *info) IsRunning() bool { _, ok := i.driver.activeContainers[i.ID] return ok } docker-1.10.3/daemon/execdriver/native/init.go000066400000000000000000000012501267010174400212260ustar00rootroot00000000000000// +build linux package native import ( "fmt" "os" "runtime" "github.com/docker/docker/pkg/reexec" "github.com/opencontainers/runc/libcontainer" ) func init() { reexec.Register(DriverName, initializer) } func fatal(err error) { if lerr, ok := err.(libcontainer.Error); ok { lerr.Detail(os.Stderr) os.Exit(1) } fmt.Fprintln(os.Stderr, err) os.Exit(1) } func initializer() { runtime.GOMAXPROCS(1) runtime.LockOSThread() factory, err := libcontainer.New("") if err != nil { fatal(err) } if err := factory.StartInitialization(); err != nil { fatal(err) } panic("unreachable") } func writeError(err error) { fmt.Fprint(os.Stderr, err) os.Exit(1) } docker-1.10.3/daemon/execdriver/native/seccomp.go000066400000000000000000000044261267010174400217240ustar00rootroot00000000000000// +build linux package native import ( "encoding/json" "fmt" "github.com/docker/engine-api/types" "github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/seccomp" ) func getDefaultSeccompProfile() *configs.Seccomp { return defaultSeccompProfile } func loadSeccompProfile(body string) (*configs.Seccomp, error) { var config types.Seccomp if err := json.Unmarshal([]byte(body), &config); err != nil { return nil, fmt.Errorf("Decoding seccomp profile failed: %v", err) } return setupSeccomp(&config) } func setupSeccomp(config *types.Seccomp) (newConfig *configs.Seccomp, err error) { if config == nil { return nil, nil } // No default action specified, no syscalls listed, assume seccomp disabled if config.DefaultAction == "" && len(config.Syscalls) == 0 { return nil, nil } newConfig = new(configs.Seccomp) newConfig.Syscalls = []*configs.Syscall{} // if config.Architectures == 0 then libseccomp will figure out the architecture to use if len(config.Architectures) > 0 { newConfig.Architectures = []string{} for _, arch := range config.Architectures { newArch, err := seccomp.ConvertStringToArch(string(arch)) if err != nil { return nil, err } newConfig.Architectures = append(newConfig.Architectures, newArch) } } // Convert default action from string representation newConfig.DefaultAction, err = seccomp.ConvertStringToAction(string(config.DefaultAction)) if err != nil { return nil, err } // Loop through all syscall blocks and convert them to libcontainer format for _, call := range config.Syscalls { newAction, err := seccomp.ConvertStringToAction(string(call.Action)) if err != nil { return nil, err } newCall := configs.Syscall{ Name: call.Name, Action: newAction, Args: []*configs.Arg{}, } // Loop through all the arguments of the syscall and convert them for _, arg := range call.Args { newOp, err := seccomp.ConvertStringToOperator(string(arg.Op)) if err != nil { return nil, err } newArg := configs.Arg{ Index: arg.Index, Value: arg.Value, ValueTwo: arg.ValueTwo, Op: newOp, } newCall.Args = append(newCall.Args, &newArg) } newConfig.Syscalls = append(newConfig.Syscalls, &newCall) } return newConfig, nil } docker-1.10.3/daemon/execdriver/native/seccomp_default.go000066400000000000000000000703671267010174400234370ustar00rootroot00000000000000// +build linux,seccomp package native import ( "syscall" "github.com/opencontainers/runc/libcontainer/configs" libseccomp "github.com/seccomp/libseccomp-golang" ) func arches() []string { var native, err = libseccomp.GetNativeArch() if err != nil { return []string{} } var a = native.String() switch a { case "amd64": return []string{"amd64", "x86", "x32"} case "arm64": return []string{"arm64", "arm"} case "mips64": return []string{"mips64", "mips64n32", "mips"} case "mips64n32": return []string{"mips64", "mips64n32", "mips"} case "mipsel64": return []string{"mipsel64", "mipsel64n32", "mipsel"} case "mipsel64n32": return []string{"mipsel64", "mipsel64n32", "mipsel"} default: return []string{a} } } var defaultSeccompProfile = &configs.Seccomp{ DefaultAction: configs.Errno, Architectures: arches(), Syscalls: []*configs.Syscall{ { Name: "accept", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "accept4", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "access", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "alarm", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "arch_prctl", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "bind", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "brk", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "capget", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "capset", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "chdir", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "chmod", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "chown", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "chown32", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "chroot", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "clock_getres", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "clock_gettime", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "clock_nanosleep", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "clone", Action: configs.Allow, Args: []*configs.Arg{ { Index: 0, Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET, ValueTwo: 0, Op: configs.MaskEqualTo, }, }, }, { Name: "close", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "connect", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "creat", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "dup", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "dup2", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "dup3", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "epoll_create", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "epoll_create1", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "epoll_ctl", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "epoll_ctl_old", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "epoll_pwait", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "epoll_wait", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "epoll_wait_old", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "eventfd", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "eventfd2", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "execve", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "execveat", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "exit", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "exit_group", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "faccessat", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "fadvise64", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "fadvise64_64", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "fallocate", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "fanotify_init", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "fanotify_mark", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "fchdir", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "fchmod", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "fchmodat", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "fchown", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "fchown32", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "fchownat", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "fcntl", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "fcntl64", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "fdatasync", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "fgetxattr", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "flistxattr", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "flock", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "fork", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "fremovexattr", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "fsetxattr", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "fstat", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "fstat64", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "fstatat64", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "fstatfs", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "fstatfs64", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "fsync", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "ftruncate", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "ftruncate64", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "futex", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "futimesat", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getcpu", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getcwd", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getdents", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getdents64", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getegid", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getegid32", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "geteuid", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "geteuid32", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getgid", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getgid32", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getgroups", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getgroups32", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getitimer", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getpeername", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getpgid", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getpgrp", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getpid", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getppid", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getpriority", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getrandom", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getresgid", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getresgid32", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getresuid", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getresuid32", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getrlimit", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "get_robust_list", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getrusage", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getsid", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getsockname", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getsockopt", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "get_thread_area", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "gettid", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "gettimeofday", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getuid", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getuid32", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "getxattr", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "inotify_add_watch", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "inotify_init", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "inotify_init1", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "inotify_rm_watch", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "io_cancel", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "ioctl", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "io_destroy", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "io_getevents", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "ioprio_get", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "ioprio_set", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "io_setup", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "io_submit", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "ipc", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "kill", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "lchown", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "lchown32", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "lgetxattr", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "link", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "linkat", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "listen", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "listxattr", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "llistxattr", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "_llseek", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "lremovexattr", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "lseek", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "lsetxattr", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "lstat", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "lstat64", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "madvise", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "memfd_create", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "mincore", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "mkdir", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "mkdirat", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "mknod", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "mknodat", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "mlock", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "mlockall", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "mmap", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "mmap2", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "mprotect", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "mq_getsetattr", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "mq_notify", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "mq_open", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "mq_timedreceive", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "mq_timedsend", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "mq_unlink", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "mremap", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "msgctl", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "msgget", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "msgrcv", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "msgsnd", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "msync", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "munlock", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "munlockall", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "munmap", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "nanosleep", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "newfstatat", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "_newselect", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "open", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "openat", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "pause", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "personality", Action: configs.Allow, Args: []*configs.Arg{ { Index: 0, Value: 0x0, Op: configs.EqualTo, }, }, }, { Name: "personality", Action: configs.Allow, Args: []*configs.Arg{ { Index: 0, Value: 0x0008, Op: configs.EqualTo, }, }, }, { Name: "personality", Action: configs.Allow, Args: []*configs.Arg{ { Index: 0, Value: 0xffffffff, Op: configs.EqualTo, }, }, }, { Name: "pipe", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "pipe2", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "poll", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "ppoll", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "prctl", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "pread64", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "preadv", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "prlimit64", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "pselect6", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "pwrite64", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "pwritev", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "read", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "readahead", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "readlink", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "readlinkat", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "readv", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "recv", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "recvfrom", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "recvmmsg", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "recvmsg", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "remap_file_pages", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "removexattr", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "rename", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "renameat", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "renameat2", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "rmdir", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "rt_sigaction", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "rt_sigpending", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "rt_sigprocmask", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "rt_sigqueueinfo", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "rt_sigreturn", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "rt_sigsuspend", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "rt_sigtimedwait", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "rt_tgsigqueueinfo", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "sched_getaffinity", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "sched_getattr", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "sched_getparam", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "sched_get_priority_max", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "sched_get_priority_min", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "sched_getscheduler", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "sched_rr_get_interval", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "sched_setaffinity", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "sched_setattr", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "sched_setparam", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "sched_setscheduler", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "sched_yield", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "seccomp", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "select", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "semctl", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "semget", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "semop", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "semtimedop", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "send", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "sendfile", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "sendfile64", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "sendmmsg", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "sendmsg", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "sendto", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "setdomainname", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "setfsgid", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "setfsgid32", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "setfsuid", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "setfsuid32", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "setgid", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "setgid32", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "setgroups", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "setgroups32", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "sethostname", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "setitimer", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "setpgid", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "setpriority", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "setregid", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "setregid32", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "setresgid", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "setresgid32", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "setresuid", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "setresuid32", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "setreuid", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "setreuid32", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "setrlimit", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "set_robust_list", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "setsid", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "setsockopt", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "set_thread_area", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "set_tid_address", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "setuid", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "setuid32", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "setxattr", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "shmat", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "shmctl", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "shmdt", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "shmget", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "shutdown", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "sigaltstack", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "signalfd", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "signalfd4", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "sigreturn", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "socket", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "socketpair", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "splice", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "stat", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "stat64", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "statfs", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "statfs64", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "symlink", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "symlinkat", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "sync", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "sync_file_range", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "syncfs", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "sysinfo", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "syslog", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "tee", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "tgkill", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "time", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "timer_create", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "timer_delete", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "timerfd_create", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "timerfd_gettime", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "timerfd_settime", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "timer_getoverrun", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "timer_gettime", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "timer_settime", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "times", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "tkill", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "truncate", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "truncate64", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "ugetrlimit", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "umask", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "uname", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "unlink", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "unlinkat", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "utime", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "utimensat", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "utimes", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "vfork", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "vhangup", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "vmsplice", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "wait4", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "waitid", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "waitpid", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "write", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "writev", Action: configs.Allow, Args: []*configs.Arg{}, }, // i386 specific syscalls { Name: "modify_ldt", Action: configs.Allow, Args: []*configs.Arg{}, }, // arm specific syscalls { Name: "breakpoint", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "cacheflush", Action: configs.Allow, Args: []*configs.Arg{}, }, { Name: "set_tls", Action: configs.Allow, Args: []*configs.Arg{}, }, }, } docker-1.10.3/daemon/execdriver/native/seccomp_unsupported.go000066400000000000000000000002301267010174400243610ustar00rootroot00000000000000// +build linux,!seccomp package native import "github.com/opencontainers/runc/libcontainer/configs" var ( defaultSeccompProfile *configs.Seccomp ) docker-1.10.3/daemon/execdriver/native/template/000077500000000000000000000000001267010174400215515ustar00rootroot00000000000000docker-1.10.3/daemon/execdriver/native/template/default_template_linux.go000066400000000000000000000043401267010174400266370ustar00rootroot00000000000000package template import ( "syscall" "github.com/opencontainers/runc/libcontainer/apparmor" "github.com/opencontainers/runc/libcontainer/configs" ) const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV // New returns the docker default configuration for libcontainer func New() *configs.Config { container := &configs.Config{ Capabilities: []string{ "CHOWN", "DAC_OVERRIDE", "FSETID", "FOWNER", "MKNOD", "NET_RAW", "SETGID", "SETUID", "SETFCAP", "SETPCAP", "NET_BIND_SERVICE", "SYS_CHROOT", "KILL", "AUDIT_WRITE", }, Namespaces: configs.Namespaces([]configs.Namespace{ {Type: "NEWNS"}, {Type: "NEWUTS"}, {Type: "NEWIPC"}, {Type: "NEWPID"}, {Type: "NEWNET"}, {Type: "NEWUSER"}, }), Cgroups: &configs.Cgroup{ ScopePrefix: "docker", // systemd only Resources: &configs.Resources{ AllowAllDevices: false, MemorySwappiness: -1, }, }, Mounts: []*configs.Mount{ { Source: "proc", Destination: "/proc", Device: "proc", Flags: defaultMountFlags, }, { Source: "tmpfs", Destination: "/dev", Device: "tmpfs", Flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME, Data: "mode=755", }, { Source: "devpts", Destination: "/dev/pts", Device: "devpts", Flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, Data: "newinstance,ptmxmode=0666,mode=0620,gid=5", }, { Source: "mqueue", Destination: "/dev/mqueue", Device: "mqueue", Flags: defaultMountFlags, }, { Source: "sysfs", Destination: "/sys", Device: "sysfs", Flags: defaultMountFlags | syscall.MS_RDONLY, }, { Source: "cgroup", Destination: "/sys/fs/cgroup", Device: "cgroup", Flags: defaultMountFlags | syscall.MS_RDONLY, }, }, MaskPaths: []string{ "/proc/kcore", "/proc/latency_stats", "/proc/timer_stats", }, ReadonlyPaths: []string{ "/proc/asound", "/proc/bus", "/proc/fs", "/proc/irq", "/proc/sys", "/proc/sysrq-trigger", }, } if apparmor.IsEnabled() { container.AppArmorProfile = "docker-default" } return container } docker-1.10.3/daemon/execdriver/native/template/default_template_unsupported.go000066400000000000000000000000431267010174400300640ustar00rootroot00000000000000// +build !linux package template docker-1.10.3/daemon/execdriver/pipes.go000066400000000000000000000006701267010174400201220ustar00rootroot00000000000000package execdriver import ( "io" ) // Pipes is a wrapper around a container's output for // stdin, stdout, stderr type Pipes struct { Stdin io.ReadCloser Stdout, Stderr io.Writer } // NewPipes returns a wrapper around a container's output func NewPipes(stdin io.ReadCloser, stdout, stderr io.Writer, useStdin bool) *Pipes { p := &Pipes{ Stdout: stdout, Stderr: stderr, } if useStdin { p.Stdin = stdin } return p } docker-1.10.3/daemon/execdriver/termconsole.go000066400000000000000000000022441267010174400213330ustar00rootroot00000000000000package execdriver import ( "io" "os/exec" ) // StdConsole defines standard console operations for execdriver type StdConsole struct { // Closers holds io.Closer references for closing at terminal close time Closers []io.Closer } // NewStdConsole returns a new StdConsole struct func NewStdConsole(processConfig *ProcessConfig, pipes *Pipes) (*StdConsole, error) { std := &StdConsole{} if err := std.AttachPipes(&processConfig.Cmd, pipes); err != nil { return nil, err } return std, nil } // AttachPipes attaches given pipes to exec.Cmd func (s *StdConsole) AttachPipes(command *exec.Cmd, pipes *Pipes) error { command.Stdout = pipes.Stdout command.Stderr = pipes.Stderr if pipes.Stdin != nil { stdin, err := command.StdinPipe() if err != nil { return err } go func() { defer stdin.Close() io.Copy(stdin, pipes.Stdin) }() } return nil } // Resize implements Resize method of Terminal interface func (s *StdConsole) Resize(h, w int) error { // we do not need to resize a non tty return nil } // Close implements Close method of Terminal interface func (s *StdConsole) Close() error { for _, c := range s.Closers { c.Close() } return nil } docker-1.10.3/daemon/execdriver/utils_unix.go000066400000000000000000000056701267010174400212120ustar00rootroot00000000000000// +build !windows package execdriver import ( "fmt" "strings" "github.com/docker/docker/pkg/stringutils" "github.com/syndtr/gocapability/capability" ) var capabilityList Capabilities func init() { last := capability.CAP_LAST_CAP // hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap if last == capability.Cap(63) { last = capability.CAP_BLOCK_SUSPEND } for _, cap := range capability.List() { if cap > last { continue } capabilityList = append(capabilityList, &CapabilityMapping{ Key: strings.ToUpper(cap.String()), Value: cap, }, ) } } type ( // CapabilityMapping maps linux capability name to its value of capability.Cap type // Capabilities is one of the security systems in Linux Security Module (LSM) // framework provided by the kernel. // For more details on capabilities, see http://man7.org/linux/man-pages/man7/capabilities.7.html CapabilityMapping struct { Key string `json:"key,omitempty"` Value capability.Cap `json:"value,omitempty"` } // Capabilities contains all CapabilityMapping Capabilities []*CapabilityMapping ) // String returns of CapabilityMapping func (c *CapabilityMapping) String() string { return c.Key } // GetCapability returns CapabilityMapping which contains specific key func GetCapability(key string) *CapabilityMapping { for _, capp := range capabilityList { if capp.Key == key { cpy := *capp return &cpy } } return nil } // GetAllCapabilities returns all of the capabilities func GetAllCapabilities() []string { output := make([]string, len(capabilityList)) for i, capability := range capabilityList { output[i] = capability.String() } return output } // TweakCapabilities can tweak capabilities by adding or dropping capabilities // based on the basics capabilities. func TweakCapabilities(basics, adds, drops []string) ([]string, error) { var ( newCaps []string allCaps = GetAllCapabilities() ) // look for invalid cap in the drop list for _, cap := range drops { if strings.ToLower(cap) == "all" { continue } if !stringutils.InSlice(allCaps, cap) { return nil, fmt.Errorf("Unknown capability drop: %q", cap) } } // handle --cap-add=all if stringutils.InSlice(adds, "all") { basics = allCaps } if !stringutils.InSlice(drops, "all") { for _, cap := range basics { // skip `all` already handled above if strings.ToLower(cap) == "all" { continue } // if we don't drop `all`, add back all the non-dropped caps if !stringutils.InSlice(drops, cap) { newCaps = append(newCaps, strings.ToUpper(cap)) } } } for _, cap := range adds { // skip `all` already handled above if strings.ToLower(cap) == "all" { continue } if !stringutils.InSlice(allCaps, cap) { return nil, fmt.Errorf("Unknown capability to add: %q", cap) } // add cap if not already in the list if !stringutils.InSlice(newCaps, cap) { newCaps = append(newCaps, strings.ToUpper(cap)) } } return newCaps, nil } docker-1.10.3/daemon/execdriver/windows/000077500000000000000000000000001267010174400201425ustar00rootroot00000000000000docker-1.10.3/daemon/execdriver/windows/clean.go000066400000000000000000000002221267010174400215470ustar00rootroot00000000000000// +build windows package windows // Clean implements the exec driver Driver interface. func (d *Driver) Clean(id string) error { return nil } docker-1.10.3/daemon/execdriver/windows/commandlinebuilder.go000066400000000000000000000017531267010174400243340ustar00rootroot00000000000000//+build windows package windows import ( "errors" "syscall" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" ) // createCommandLine creates a command line from the Entrypoint and args // of the ProcessConfig. It escapes the arguments if they are not already // escaped func createCommandLine(processConfig *execdriver.ProcessConfig, alreadyEscaped bool) (commandLine string, err error) { // While this should get caught earlier, just in case, validate that we // have something to run. if processConfig.Entrypoint == "" { return "", errors.New("No entrypoint specified") } // Build the command line of the process commandLine = processConfig.Entrypoint logrus.Debugf("Entrypoint: %s", processConfig.Entrypoint) for _, arg := range processConfig.Arguments { logrus.Debugf("appending %s", arg) if !alreadyEscaped { arg = syscall.EscapeArg(arg) } commandLine += " " + arg } logrus.Debugf("commandLine: %s", commandLine) return commandLine, nil } docker-1.10.3/daemon/execdriver/windows/exec.go000066400000000000000000000056651267010174400214310ustar00rootroot00000000000000// +build windows package windows import ( "fmt" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" "github.com/microsoft/hcsshim" ) // Exec implements the exec driver Driver interface. func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) { var ( term execdriver.Terminal err error exitCode int32 errno uint32 ) active := d.activeContainers[c.ID] if active == nil { return -1, fmt.Errorf("Exec - No active container exists with ID %s", c.ID) } createProcessParms := hcsshim.CreateProcessParams{ EmulateConsole: processConfig.Tty, // Note NOT c.ProcessConfig.Tty WorkingDirectory: c.WorkingDir, } // Configure the environment for the process // Note NOT c.ProcessConfig.Env createProcessParms.Environment = setupEnvironmentVariables(processConfig.Env) // Create the commandline for the process // Note NOT c.ProcessConfig createProcessParms.CommandLine, err = createCommandLine(processConfig, false) if err != nil { return -1, err } // Start the command running in the container. pid, stdin, stdout, stderr, rc, err := hcsshim.CreateProcessInComputeSystem(c.ID, pipes.Stdin != nil, true, !processConfig.Tty, createProcessParms) if err != nil { // TODO Windows: TP4 Workaround. In Hyper-V containers, there is a limitation // of one exec per container. This should be fixed post TP4. CreateProcessInComputeSystem // will return a specific error which we handle here to give a good error message // back to the user instead of an inactionable "An invalid argument was supplied" if rc == hcsshim.Win32InvalidArgument { return -1, fmt.Errorf("The limit of docker execs per Hyper-V container has been exceeded") } logrus.Errorf("CreateProcessInComputeSystem() failed %s", err) return -1, err } // Now that the process has been launched, begin copying data to and from // the named pipes for the std handles. setupPipes(stdin, stdout, stderr, pipes) // Note NOT c.ProcessConfig.Tty if processConfig.Tty { term = NewTtyConsole(c.ID, pid) } else { term = NewStdConsole() } processConfig.Terminal = term // Invoke the start callback if hooks.Start != nil { // A closed channel for OOM is returned here as it will be // non-blocking and return the correct result when read. chOOM := make(chan struct{}) close(chOOM) hooks.Start(&c.ProcessConfig, int(pid), chOOM) } if exitCode, errno, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid, hcsshim.TimeoutInfinite); err != nil { if errno == hcsshim.Win32PipeHasBeenEnded { logrus.Debugf("Exiting Run() after WaitForProcessInComputeSystem failed with recognised error 0x%X", errno) return hcsshim.WaitErrExecFailed, nil } logrus.Warnf("WaitForProcessInComputeSystem failed (container may have been killed): 0x%X %s", errno, err) return -1, err } logrus.Debugln("Exiting Run()", c.ID) return int(exitCode), nil } docker-1.10.3/daemon/execdriver/windows/getpids.go000066400000000000000000000004721267010174400221330ustar00rootroot00000000000000// +build windows package windows import "fmt" // GetPidsForContainer implements the exec driver Driver interface. func (d *Driver) GetPidsForContainer(id string) ([]int, error) { // TODO Windows: Implementation required. return nil, fmt.Errorf("GetPidsForContainer: GetPidsForContainer() not implemented") } docker-1.10.3/daemon/execdriver/windows/info.go000066400000000000000000000010301267010174400214160ustar00rootroot00000000000000// +build windows package windows import ( "github.com/docker/docker/daemon/execdriver" "github.com/docker/engine-api/types/container" ) type info struct { ID string driver *Driver isolation container.IsolationLevel } // Info implements the exec driver Driver interface. func (d *Driver) Info(id string) execdriver.Info { return &info{ ID: id, driver: d, isolation: DefaultIsolation, } } func (i *info) IsRunning() bool { var running bool running = true // TODO Need an HCS API return running } docker-1.10.3/daemon/execdriver/windows/namedpipes.go000066400000000000000000000035771267010174400226320ustar00rootroot00000000000000// +build windows package windows import ( "fmt" "io" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" ) // General comment. Handling I/O for a container is very different to Linux. // We use a named pipe to HCS to copy I/O both in and out of the container, // very similar to how docker daemon communicates with a CLI. // startStdinCopy asynchronously copies an io.Reader to the container's // process's stdin pipe and closes the pipe when there is no more data to copy. func startStdinCopy(dst io.WriteCloser, src io.Reader) { // Anything that comes from the client stdin should be copied // across to the stdin named pipe of the container. go func() { defer dst.Close() bytes, err := io.Copy(dst, src) log := fmt.Sprintf("Copied %d bytes from stdin.", bytes) if err != nil { log = log + " err=" + err.Error() } logrus.Debugf(log) }() } // startStdouterrCopy asynchronously copies data from the container's process's // stdout or stderr pipe to an io.Writer and closes the pipe when there is no // more data to copy. func startStdouterrCopy(dst io.Writer, src io.ReadCloser, name string) { // Anything that comes from the container named pipe stdout/err should be copied // across to the stdout/err of the client go func() { defer src.Close() bytes, err := io.Copy(dst, src) log := fmt.Sprintf("Copied %d bytes from %s.", bytes, name) if err != nil { log = log + " err=" + err.Error() } logrus.Debugf(log) }() } // setupPipes starts the asynchronous copying of data to and from the named // pipes used byt he HCS for the std handles. func setupPipes(stdin io.WriteCloser, stdout, stderr io.ReadCloser, pipes *execdriver.Pipes) { if stdin != nil { startStdinCopy(stdin, pipes.Stdin) } if stdout != nil { startStdouterrCopy(pipes.Stdout, stdout, "stdout") } if stderr != nil { startStdouterrCopy(pipes.Stderr, stderr, "stderr") } } docker-1.10.3/daemon/execdriver/windows/pauseunpause.go000066400000000000000000000006751267010174400232170ustar00rootroot00000000000000// +build windows package windows import ( "fmt" "github.com/docker/docker/daemon/execdriver" ) // Pause implements the exec driver Driver interface. func (d *Driver) Pause(c *execdriver.Command) error { return fmt.Errorf("Windows: Containers cannot be paused") } // Unpause implements the exec driver Driver interface. func (d *Driver) Unpause(c *execdriver.Command) error { return fmt.Errorf("Windows: Containers cannot be paused") } docker-1.10.3/daemon/execdriver/windows/run.go000066400000000000000000000246061267010174400213050ustar00rootroot00000000000000// +build windows package windows import ( "encoding/json" "fmt" "os" "path/filepath" "strconv" "strings" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" "github.com/microsoft/hcsshim" ) // defaultContainerNAT is the default name of the container NAT device that is // preconfigured on the server. const defaultContainerNAT = "ContainerNAT" type layer struct { ID string Path string } type defConfig struct { DefFile string } type portBinding struct { Protocol string InternalPort int ExternalPort int } type natSettings struct { Name string PortBindings []portBinding } type networkConnection struct { NetworkName string // TODO Windows: Add Ip4Address string to this structure when hooked up in // docker CLI. This is present in the HCS JSON handler. EnableNat bool Nat natSettings } type networkSettings struct { MacAddress string } type device struct { DeviceType string Connection interface{} Settings interface{} } type mappedDir struct { HostPath string ContainerPath string ReadOnly bool } type containerInit struct { SystemType string // HCS requires this to be hard-coded to "Container" Name string // Name of the container. We use the docker ID. Owner string // The management platform that created this container IsDummy bool // Used for development purposes. VolumePath string // Windows volume path for scratch space Devices []device // Devices used by the container IgnoreFlushesDuringBoot bool // Optimisation hint for container startup in Windows LayerFolderPath string // Where the layer folders are located Layers []layer // List of storage layers ProcessorWeight int64 `json:",omitempty"` // CPU Shares 0..10000 on Windows; where 0 will be ommited and HCS will default. HostName string // Hostname MappedDirectories []mappedDir // List of mapped directories (volumes/mounts) SandboxPath string // Location of unmounted sandbox (used for Hyper-V containers, not Windows Server containers) HvPartition bool // True if it a Hyper-V Container } // defaultOwner is a tag passed to HCS to allow it to differentiate between // container creator management stacks. We hard code "docker" in the case // of docker. const defaultOwner = "docker" // Run implements the exec driver Driver interface func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) { var ( term execdriver.Terminal err error ) cu := &containerInit{ SystemType: "Container", Name: c.ID, Owner: defaultOwner, IsDummy: dummyMode, VolumePath: c.Rootfs, IgnoreFlushesDuringBoot: c.FirstStart, LayerFolderPath: c.LayerFolder, ProcessorWeight: c.Resources.CPUShares, HostName: c.Hostname, } cu.HvPartition = c.HvPartition if cu.HvPartition { cu.SandboxPath = filepath.Dir(c.LayerFolder) } else { cu.VolumePath = c.Rootfs cu.LayerFolderPath = c.LayerFolder } for _, layerPath := range c.LayerPaths { _, filename := filepath.Split(layerPath) g, err := hcsshim.NameToGuid(filename) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } cu.Layers = append(cu.Layers, layer{ ID: g.ToString(), Path: layerPath, }) } // Add the mounts (volumes, bind mounts etc) to the structure mds := make([]mappedDir, len(c.Mounts)) for i, mount := range c.Mounts { mds[i] = mappedDir{ HostPath: mount.Source, ContainerPath: mount.Destination, ReadOnly: !mount.Writable} } cu.MappedDirectories = mds // TODO Windows. At some point, when there is CLI on docker run to // enable the IP Address of the container to be passed into docker run, // the IP Address needs to be wired through to HCS in the JSON. It // would be present in c.Network.Interface.IPAddress. See matching // TODO in daemon\container_windows.go, function populateCommand. if c.Network.Interface != nil { var pbs []portBinding // Enumerate through the port bindings specified by the user and convert // them into the internal structure matching the JSON blob that can be // understood by the HCS. for i, v := range c.Network.Interface.PortBindings { proto := strings.ToUpper(i.Proto()) if proto != "TCP" && proto != "UDP" { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid protocol %s", i.Proto()) } if len(v) > 1 { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("Windows does not support more than one host port in NAT settings") } for _, v2 := range v { var ( iPort, ePort int err error ) if len(v2.HostIP) != 0 { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("Windows does not support host IP addresses in NAT settings") } if ePort, err = strconv.Atoi(v2.HostPort); err != nil { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid container port %s: %s", v2.HostPort, err) } if iPort, err = strconv.Atoi(i.Port()); err != nil { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid internal port %s: %s", i.Port(), err) } if iPort < 0 || iPort > 65535 || ePort < 0 || ePort > 65535 { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("specified NAT port is not in allowed range") } pbs = append(pbs, portBinding{ExternalPort: ePort, InternalPort: iPort, Protocol: proto}) } } // TODO Windows: TP3 workaround. Allow the user to override the name of // the Container NAT device through an environment variable. This will // ultimately be a global daemon parameter on Windows, similar to -b // for the name of the virtual switch (aka bridge). cn := os.Getenv("DOCKER_CONTAINER_NAT") if len(cn) == 0 { cn = defaultContainerNAT } dev := device{ DeviceType: "Network", Connection: &networkConnection{ NetworkName: c.Network.Interface.Bridge, // TODO Windows: Fixme, next line. Needs HCS fix. EnableNat: false, Nat: natSettings{ Name: cn, PortBindings: pbs, }, }, } if c.Network.Interface.MacAddress != "" { windowsStyleMAC := strings.Replace( c.Network.Interface.MacAddress, ":", "-", -1) dev.Settings = networkSettings{ MacAddress: windowsStyleMAC, } } cu.Devices = append(cu.Devices, dev) } else { logrus.Debugln("No network interface") } configurationb, err := json.Marshal(cu) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } configuration := string(configurationb) err = hcsshim.CreateComputeSystem(c.ID, configuration) if err != nil { logrus.Debugln("Failed to create temporary container ", err) return execdriver.ExitStatus{ExitCode: -1}, err } // Start the container logrus.Debugln("Starting container ", c.ID) err = hcsshim.StartComputeSystem(c.ID) if err != nil { logrus.Errorf("Failed to start compute system: %s", err) return execdriver.ExitStatus{ExitCode: -1}, err } defer func() { // Stop the container if forceKill { logrus.Debugf("Forcibly terminating container %s", c.ID) if errno, err := hcsshim.TerminateComputeSystem(c.ID, hcsshim.TimeoutInfinite, "exec-run-defer"); err != nil { logrus.Warnf("Ignoring error from TerminateComputeSystem 0x%X %s", errno, err) } } else { logrus.Debugf("Shutting down container %s", c.ID) if errno, err := hcsshim.ShutdownComputeSystem(c.ID, hcsshim.TimeoutInfinite, "exec-run-defer"); err != nil { if errno != hcsshim.Win32SystemShutdownIsInProgress && errno != hcsshim.Win32SpecifiedPathInvalid && errno != hcsshim.Win32SystemCannotFindThePathSpecified { logrus.Warnf("Ignoring error from ShutdownComputeSystem 0x%X %s", errno, err) } } } }() createProcessParms := hcsshim.CreateProcessParams{ EmulateConsole: c.ProcessConfig.Tty, WorkingDirectory: c.WorkingDir, ConsoleSize: c.ProcessConfig.ConsoleSize, } // Configure the environment for the process createProcessParms.Environment = setupEnvironmentVariables(c.ProcessConfig.Env) createProcessParms.CommandLine, err = createCommandLine(&c.ProcessConfig, c.ArgsEscaped) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } // Start the command running in the container. pid, stdin, stdout, stderr, _, err := hcsshim.CreateProcessInComputeSystem(c.ID, pipes.Stdin != nil, true, !c.ProcessConfig.Tty, createProcessParms) if err != nil { logrus.Errorf("CreateProcessInComputeSystem() failed %s", err) return execdriver.ExitStatus{ExitCode: -1}, err } // Now that the process has been launched, begin copying data to and from // the named pipes for the std handles. setupPipes(stdin, stdout, stderr, pipes) //Save the PID as we'll need this in Kill() logrus.Debugf("PID %d", pid) c.ContainerPid = int(pid) if c.ProcessConfig.Tty { term = NewTtyConsole(c.ID, pid) } else { term = NewStdConsole() } c.ProcessConfig.Terminal = term // Maintain our list of active containers. We'll need this later for exec // and other commands. d.Lock() d.activeContainers[c.ID] = &activeContainer{ command: c, } d.Unlock() if hooks.Start != nil { // A closed channel for OOM is returned here as it will be // non-blocking and return the correct result when read. chOOM := make(chan struct{}) close(chOOM) hooks.Start(&c.ProcessConfig, int(pid), chOOM) } var ( exitCode int32 errno uint32 ) exitCode, errno, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid, hcsshim.TimeoutInfinite) if err != nil { if errno != hcsshim.Win32PipeHasBeenEnded { logrus.Warnf("WaitForProcessInComputeSystem failed (container may have been killed): %s", err) } // Do NOT return err here as the container would have // started, otherwise docker will deadlock. It's perfectly legitimate // for WaitForProcessInComputeSystem to fail in situations such // as the container being killed on another thread. return execdriver.ExitStatus{ExitCode: hcsshim.WaitErrExecFailed}, nil } logrus.Debugf("Exiting Run() exitCode %d id=%s", exitCode, c.ID) return execdriver.ExitStatus{ExitCode: int(exitCode)}, nil } // SupportsHooks implements the execdriver Driver interface. // The windows driver does not support the hook mechanism func (d *Driver) SupportsHooks() bool { return false } docker-1.10.3/daemon/execdriver/windows/stats.go000066400000000000000000000004371267010174400216330ustar00rootroot00000000000000// +build windows package windows import ( "fmt" "github.com/docker/docker/daemon/execdriver" ) // Stats implements the exec driver Driver interface. func (d *Driver) Stats(id string) (*execdriver.ResourceStats, error) { return nil, fmt.Errorf("Windows: Stats not implemented") } docker-1.10.3/daemon/execdriver/windows/stdconsole.go000066400000000000000000000010171267010174400226450ustar00rootroot00000000000000// +build windows package windows // StdConsole is for when using a container non-interactively type StdConsole struct { } // NewStdConsole returns a new StdConsole struct. func NewStdConsole() *StdConsole { return &StdConsole{} } // Resize implements Resize method of Terminal interface. func (s *StdConsole) Resize(h, w int) error { // we do not need to resize a non tty return nil } // Close implements Close method of Terminal interface. func (s *StdConsole) Close() error { // nothing to close here return nil } docker-1.10.3/daemon/execdriver/windows/terminatekill.go000066400000000000000000000026151267010174400233410ustar00rootroot00000000000000// +build windows package windows import ( "fmt" "syscall" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" "github.com/microsoft/hcsshim" ) // Terminate implements the exec driver Driver interface. func (d *Driver) Terminate(p *execdriver.Command) error { return kill(p.ID, p.ContainerPid, syscall.SIGTERM) } // Kill implements the exec driver Driver interface. func (d *Driver) Kill(p *execdriver.Command, sig int) error { return kill(p.ID, p.ContainerPid, syscall.Signal(sig)) } func kill(id string, pid int, sig syscall.Signal) error { logrus.Debugf("WindowsExec: kill() id=%s pid=%d sig=%d", id, pid, sig) var err error context := fmt.Sprintf("kill: sig=%d pid=%d", sig, pid) if sig == syscall.SIGKILL || forceKill { // Terminate the compute system if errno, err := hcsshim.TerminateComputeSystem(id, hcsshim.TimeoutInfinite, context); err != nil { logrus.Errorf("Failed to terminate %s - 0x%X %q", id, errno, err) } } else { // Terminate Process if err = hcsshim.TerminateProcessInComputeSystem(id, uint32(pid)); err != nil { logrus.Warnf("Failed to terminate pid %d in %s: %q", pid, id, err) // Ignore errors err = nil } // Shutdown the compute system if errno, err := hcsshim.ShutdownComputeSystem(id, hcsshim.TimeoutInfinite, context); err != nil { logrus.Errorf("Failed to shutdown %s - 0x%X %q", id, errno, err) } } return err } docker-1.10.3/daemon/execdriver/windows/ttyconsole.go000066400000000000000000000012511267010174400226730ustar00rootroot00000000000000// +build windows package windows import ( "github.com/microsoft/hcsshim" ) // TtyConsole implements the exec driver Terminal interface. type TtyConsole struct { id string processid uint32 } // NewTtyConsole returns a new TtyConsole struct. func NewTtyConsole(id string, processid uint32) *TtyConsole { tty := &TtyConsole{ id: id, processid: processid, } return tty } // Resize implements Resize method of Terminal interface. func (t *TtyConsole) Resize(h, w int) error { return hcsshim.ResizeConsoleInComputeSystem(t.id, t.processid, h, w) } // Close implements Close method of Terminal interface. func (t *TtyConsole) Close() error { return nil } docker-1.10.3/daemon/execdriver/windows/unsupported.go000066400000000000000000000004401267010174400230570ustar00rootroot00000000000000// +build !windows package windows import ( "fmt" "github.com/docker/docker/daemon/execdriver" ) // NewDriver returns a new execdriver.Driver func NewDriver(root, initPath string) (execdriver.Driver, error) { return nil, fmt.Errorf("Windows driver not supported on non-Windows") } docker-1.10.3/daemon/execdriver/windows/update.go000066400000000000000000000004111267010174400217470ustar00rootroot00000000000000// +build windows package windows import ( "fmt" "github.com/docker/docker/daemon/execdriver" ) // Update updates resource configs for a container. func (d *Driver) Update(c *execdriver.Command) error { return fmt.Errorf("Windows: Update not implemented") } docker-1.10.3/daemon/execdriver/windows/windows.go000066400000000000000000000057601267010174400221730ustar00rootroot00000000000000// +build windows package windows import ( "fmt" "strings" "sync" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/dockerversion" "github.com/docker/docker/pkg/parsers" "github.com/docker/engine-api/types/container" ) // This is a daemon development variable only and should not be // used for running production containers on Windows. var dummyMode bool // This allows the daemon to terminate containers rather than shutdown // This allows the daemon to force kill (HCS terminate) rather than shutdown var forceKill bool // DefaultIsolation allows users to specify a default isolation mode for // when running a container on Windows. For example docker daemon -D // --exec-opt isolation=hyperv will cause Windows to always run containers // as Hyper-V containers unless otherwise specified. var DefaultIsolation container.IsolationLevel = "process" // Define name and version for windows var ( DriverName = "Windows 1854" Version = dockerversion.Version + " " + dockerversion.GitCommit ) type activeContainer struct { command *execdriver.Command } // Driver contains all information for windows driver, // it implements execdriver.Driver type Driver struct { root string activeContainers map[string]*activeContainer sync.Mutex } // Name implements the exec driver Driver interface. func (d *Driver) Name() string { return fmt.Sprintf("\n Name: %s\n Build: %s \n Default Isolation: %s", DriverName, Version, DefaultIsolation) } // NewDriver returns a new windows driver, called from NewDriver of execdriver. func NewDriver(root string, options []string) (*Driver, error) { for _, option := range options { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err } key = strings.ToLower(key) switch key { case "dummy": switch val { case "1": dummyMode = true logrus.Warn("Using dummy mode in Windows exec driver. This is for development use only!") } case "forcekill": switch val { case "1": forceKill = true logrus.Warn("Using force kill mode in Windows exec driver. This is for testing purposes only.") } case "isolation": if !container.IsolationLevel(val).IsValid() { return nil, fmt.Errorf("Unrecognised exec driver option 'isolation':'%s'", val) } if container.IsolationLevel(val).IsHyperV() { DefaultIsolation = "hyperv" } logrus.Infof("Windows default isolation level: '%s'", val) default: return nil, fmt.Errorf("Unrecognised exec driver option %s\n", key) } } return &Driver{ root: root, activeContainers: make(map[string]*activeContainer), }, nil } // setupEnvironmentVariables convert a string array of environment variables // into a map as required by the HCS. Source array is in format [v1=k1] [v2=k2] etc. func setupEnvironmentVariables(a []string) map[string]string { r := make(map[string]string) for _, s := range a { arr := strings.Split(s, "=") if len(arr) == 2 { r[arr[0]] = arr[1] } } return r } docker-1.10.3/daemon/export.go000066400000000000000000000027001267010174400161570ustar00rootroot00000000000000package daemon import ( "io" "github.com/docker/docker/container" derr "github.com/docker/docker/errors" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/ioutils" ) // ContainerExport writes the contents of the container to the given // writer. An error is returned if the container cannot be found. func (daemon *Daemon) ContainerExport(name string, out io.Writer) error { container, err := daemon.GetContainer(name) if err != nil { return err } data, err := daemon.containerExport(container) if err != nil { return derr.ErrorCodeExportFailed.WithArgs(name, err) } defer data.Close() // Stream the entire contents of the container (basically a volatile snapshot) if _, err := io.Copy(out, data); err != nil { return derr.ErrorCodeExportFailed.WithArgs(name, err) } return nil } func (daemon *Daemon) containerExport(container *container.Container) (archive.Archive, error) { if err := daemon.Mount(container); err != nil { return nil, err } uidMaps, gidMaps := daemon.GetUIDGIDMaps() archive, err := archive.TarWithOptions(container.BaseFS, &archive.TarOptions{ Compression: archive.Uncompressed, UIDMaps: uidMaps, GIDMaps: gidMaps, }) if err != nil { daemon.Unmount(container) return nil, err } arch := ioutils.NewReadCloserWrapper(archive, func() error { err := archive.Close() daemon.Unmount(container) return err }) daemon.LogContainerEvent(container, "export") return arch, err } docker-1.10.3/daemon/graphdriver/000077500000000000000000000000001267010174400166255ustar00rootroot00000000000000docker-1.10.3/daemon/graphdriver/aufs/000077500000000000000000000000001267010174400175635ustar00rootroot00000000000000docker-1.10.3/daemon/graphdriver/aufs/aufs.go000066400000000000000000000326021267010174400210530ustar00rootroot00000000000000// +build linux /* aufs driver directory structure . ├── layers // Metadata of layers │ ├── 1 │ ├── 2 │ └── 3 ├── diff // Content of the layer │ ├── 1 // Contains layers that need to be mounted for the id │ ├── 2 │ └── 3 └── mnt // Mount points for the rw layers to be mounted ├── 1 ├── 2 └── 3 */ package aufs import ( "bufio" "fmt" "io/ioutil" "os" "os/exec" "path" "strings" "sync" "syscall" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/idtools" mountpk "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/stringid" "github.com/opencontainers/runc/libcontainer/label" ) var ( // ErrAufsNotSupported is returned if aufs is not supported by the host. ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") incompatibleFsMagic = []graphdriver.FsMagic{ graphdriver.FsMagicBtrfs, graphdriver.FsMagicAufs, } backingFs = "" enableDirpermLock sync.Once enableDirperm bool ) func init() { graphdriver.Register("aufs", Init) } type data struct { referenceCount int path string } // Driver contains information about the filesystem mounted. // root of the filesystem // sync.Mutex to protect against concurrent modifications // active maps mount id to the count type Driver struct { root string uidMaps []idtools.IDMap gidMaps []idtools.IDMap sync.Mutex // Protects concurrent modification to active active map[string]*data } // Init returns a new AUFS driver. // An error is returned if AUFS is not supported. func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { // Try to load the aufs kernel module if err := supportsAufs(); err != nil { return nil, graphdriver.ErrNotSupported } fsMagic, err := graphdriver.GetFSMagic(root) if err != nil { return nil, err } if fsName, ok := graphdriver.FsNames[fsMagic]; ok { backingFs = fsName } for _, magic := range incompatibleFsMagic { if fsMagic == magic { return nil, graphdriver.ErrIncompatibleFS } } paths := []string{ "mnt", "diff", "layers", } a := &Driver{ root: root, active: make(map[string]*data), uidMaps: uidMaps, gidMaps: gidMaps, } rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) if err != nil { return nil, err } // Create the root aufs driver dir and return // if it already exists // If not populate the dir structure if err := idtools.MkdirAllAs(root, 0700, rootUID, rootGID); err != nil { if os.IsExist(err) { return a, nil } return nil, err } if err := mountpk.MakePrivate(root); err != nil { return nil, err } // Populate the dir structure for _, p := range paths { if err := idtools.MkdirAllAs(path.Join(root, p), 0700, rootUID, rootGID); err != nil { return nil, err } } return a, nil } // Return a nil error if the kernel supports aufs // We cannot modprobe because inside dind modprobe fails // to run func supportsAufs() error { // We can try to modprobe aufs first before looking at // proc/filesystems for when aufs is supported exec.Command("modprobe", "aufs").Run() f, err := os.Open("/proc/filesystems") if err != nil { return err } defer f.Close() s := bufio.NewScanner(f) for s.Scan() { if strings.Contains(s.Text(), "aufs") { return nil } } return ErrAufsNotSupported } func (a *Driver) rootPath() string { return a.root } func (*Driver) String() string { return "aufs" } // Status returns current information about the filesystem such as root directory, number of directories mounted, etc. func (a *Driver) Status() [][2]string { ids, _ := loadIds(path.Join(a.rootPath(), "layers")) return [][2]string{ {"Root Dir", a.rootPath()}, {"Backing Filesystem", backingFs}, {"Dirs", fmt.Sprintf("%d", len(ids))}, {"Dirperm1 Supported", fmt.Sprintf("%v", useDirperm())}, } } // GetMetadata not implemented func (a *Driver) GetMetadata(id string) (map[string]string, error) { return nil, nil } // Exists returns true if the given id is registered with // this driver func (a *Driver) Exists(id string) bool { if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { return false } return true } // Create three folders for each id // mnt, layers, and diff func (a *Driver) Create(id, parent, mountLabel string) error { if err := a.createDirsFor(id); err != nil { return err } // Write the layers metadata f, err := os.Create(path.Join(a.rootPath(), "layers", id)) if err != nil { return err } defer f.Close() if parent != "" { ids, err := getParentIds(a.rootPath(), parent) if err != nil { return err } if _, err := fmt.Fprintln(f, parent); err != nil { return err } for _, i := range ids { if _, err := fmt.Fprintln(f, i); err != nil { return err } } } a.active[id] = &data{} return nil } func (a *Driver) createDirsFor(id string) error { paths := []string{ "mnt", "diff", } rootUID, rootGID, err := idtools.GetRootUIDGID(a.uidMaps, a.gidMaps) if err != nil { return err } for _, p := range paths { if err := idtools.MkdirAllAs(path.Join(a.rootPath(), p, id), 0755, rootUID, rootGID); err != nil { return err } } return nil } // Remove will unmount and remove the given id. func (a *Driver) Remove(id string) error { // Protect the a.active from concurrent access a.Lock() defer a.Unlock() m := a.active[id] if m != nil { if m.referenceCount > 0 { return nil } // Make sure the dir is umounted first if err := a.unmount(m); err != nil { return err } } tmpDirs := []string{ "mnt", "diff", } // Atomically remove each directory in turn by first moving it out of the // way (so that docker doesn't find it anymore) before doing removal of // the whole tree. for _, p := range tmpDirs { realPath := path.Join(a.rootPath(), p, id) tmpPath := path.Join(a.rootPath(), p, fmt.Sprintf("%s-removing", id)) if err := os.Rename(realPath, tmpPath); err != nil && !os.IsNotExist(err) { return err } defer os.RemoveAll(tmpPath) } // Remove the layers file for the id if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { return err } return nil } // Get returns the rootfs path for the id. // This will mount the dir at it's given path func (a *Driver) Get(id, mountLabel string) (string, error) { ids, err := getParentIds(a.rootPath(), id) if err != nil { if !os.IsNotExist(err) { return "", err } ids = []string{} } // Protect the a.active from concurrent access a.Lock() defer a.Unlock() m := a.active[id] if m == nil { m = &data{} a.active[id] = m } // If a dir does not have a parent ( no layers )do not try to mount // just return the diff path to the data m.path = path.Join(a.rootPath(), "diff", id) if len(ids) > 0 { m.path = path.Join(a.rootPath(), "mnt", id) if m.referenceCount == 0 { if err := a.mount(id, m, mountLabel); err != nil { return "", err } } } m.referenceCount++ return m.path, nil } // Put unmounts and updates list of active mounts. func (a *Driver) Put(id string) error { // Protect the a.active from concurrent access a.Lock() defer a.Unlock() m := a.active[id] if m == nil { // but it might be still here if a.Exists(id) { path := path.Join(a.rootPath(), "mnt", id) err := Unmount(path) if err != nil { logrus.Debugf("Failed to unmount %s aufs: %v", id, err) } } return nil } if count := m.referenceCount; count > 1 { m.referenceCount = count - 1 } else { ids, _ := getParentIds(a.rootPath(), id) // We only mounted if there are any parents if ids != nil && len(ids) > 0 { a.unmount(m) } delete(a.active, id) } return nil } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". func (a *Driver) Diff(id, parent string) (archive.Archive, error) { // AUFS doesn't need the parent layer to produce a diff. return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ Compression: archive.Uncompressed, ExcludePatterns: []string{archive.WhiteoutMetaPrefix + "*", "!" + archive.WhiteoutOpaqueDir}, UIDMaps: a.uidMaps, GIDMaps: a.gidMaps, }) } // DiffPath returns path to the directory that contains files for the layer // differences. Used for direct access for tar-split. func (a *Driver) DiffPath(id string) (string, func() error, error) { return path.Join(a.rootPath(), "diff", id), func() error { return nil }, nil } func (a *Driver) applyDiff(id string, diff archive.Reader) error { return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ UIDMaps: a.uidMaps, GIDMaps: a.gidMaps, }) } // DiffSize calculates the changes between the specified id // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (a *Driver) DiffSize(id, parent string) (size int64, err error) { // AUFS doesn't need the parent layer to calculate the diff size. return directory.Size(path.Join(a.rootPath(), "diff", id)) } // ApplyDiff extracts the changeset from the given diff into the // layer with the specified id and parent, returning the size of the // new layer in bytes. func (a *Driver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) { // AUFS doesn't need the parent id to apply the diff. if err = a.applyDiff(id, diff); err != nil { return } return a.DiffSize(id, parent) } // Changes produces a list of changes between the specified layer // and its parent layer. If parent is "", then all changes will be ADD changes. func (a *Driver) Changes(id, parent string) ([]archive.Change, error) { // AUFS doesn't have snapshots, so we need to get changes from all parent // layers. layers, err := a.getParentLayerPaths(id) if err != nil { return nil, err } return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) } func (a *Driver) getParentLayerPaths(id string) ([]string, error) { parentIds, err := getParentIds(a.rootPath(), id) if err != nil { return nil, err } layers := make([]string, len(parentIds)) // Get the diff paths for all the parent ids for i, p := range parentIds { layers[i] = path.Join(a.rootPath(), "diff", p) } return layers, nil } func (a *Driver) mount(id string, m *data, mountLabel string) error { // If the id is mounted or we get an error return if mounted, err := a.mounted(m); err != nil || mounted { return err } var ( target = m.path rw = path.Join(a.rootPath(), "diff", id) ) layers, err := a.getParentLayerPaths(id) if err != nil { return err } if err := a.aufsMount(layers, rw, target, mountLabel); err != nil { return fmt.Errorf("error creating aufs mount to %s: %v", target, err) } return nil } func (a *Driver) unmount(m *data) error { if mounted, err := a.mounted(m); err != nil || !mounted { return err } return Unmount(m.path) } func (a *Driver) mounted(m *data) (bool, error) { return mountpk.Mounted(m.path) } // Cleanup aufs and unmount all mountpoints func (a *Driver) Cleanup() error { for id, m := range a.active { if err := a.unmount(m); err != nil { logrus.Errorf("Unmounting %s: %s", stringid.TruncateID(id), err) } } return mountpk.Unmount(a.root) } func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) { defer func() { if err != nil { Unmount(target) } }() // Mount options are clipped to page size(4096 bytes). If there are more // layers then these are remounted individually using append. offset := 54 if useDirperm() { offset += len("dirperm1") } b := make([]byte, syscall.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) firstMount := true i := 0 for { for ; i < len(ro); i++ { layer := fmt.Sprintf(":%s=ro+wh", ro[i]) if firstMount { if bp+len(layer) > len(b) { break } bp += copy(b[bp:], layer) } else { data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel) if err = mount("none", target, "aufs", syscall.MS_REMOUNT, data); err != nil { return } } } if firstMount { opts := "dio,xino=/dev/shm/aufs.xino" if useDirperm() { opts += ",dirperm1" } data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel) if err = mount("none", target, "aufs", 0, data); err != nil { return } firstMount = false } if i == len(ro) { break } } return } // useDirperm checks dirperm1 mount option can be used with the current // version of aufs. func useDirperm() bool { enableDirpermLock.Do(func() { base, err := ioutil.TempDir("", "docker-aufs-base") if err != nil { logrus.Errorf("error checking dirperm1: %v", err) return } defer os.RemoveAll(base) union, err := ioutil.TempDir("", "docker-aufs-union") if err != nil { logrus.Errorf("error checking dirperm1: %v", err) return } defer os.RemoveAll(union) opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base) if err := mount("none", union, "aufs", 0, opts); err != nil { return } enableDirperm = true if err := Unmount(union); err != nil { logrus.Errorf("error checking dirperm1: failed to unmount %v", err) } }) return enableDirperm } docker-1.10.3/daemon/graphdriver/aufs/aufs_test.go000066400000000000000000000325451267010174400221200ustar00rootroot00000000000000// +build linux package aufs import ( "crypto/sha256" "encoding/hex" "fmt" "io/ioutil" "os" "path" "testing" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/reexec" ) var ( tmpOuter = path.Join(os.TempDir(), "aufs-tests") tmp = path.Join(tmpOuter, "aufs") ) func init() { reexec.Init() } func testInit(dir string, t *testing.T) graphdriver.Driver { d, err := Init(dir, nil, nil, nil) if err != nil { if err == graphdriver.ErrNotSupported { t.Skip(err) } else { t.Fatal(err) } } return d } func newDriver(t *testing.T) *Driver { if err := os.MkdirAll(tmp, 0755); err != nil { t.Fatal(err) } d := testInit(tmp, t) return d.(*Driver) } func TestNewDriver(t *testing.T) { if err := os.MkdirAll(tmp, 0755); err != nil { t.Fatal(err) } d := testInit(tmp, t) defer os.RemoveAll(tmp) if d == nil { t.Fatalf("Driver should not be nil") } } func TestAufsString(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if d.String() != "aufs" { t.Fatalf("Expected aufs got %s", d.String()) } } func TestCreateDirStructure(t *testing.T) { newDriver(t) defer os.RemoveAll(tmp) paths := []string{ "mnt", "layers", "diff", } for _, p := range paths { if _, err := os.Stat(path.Join(tmp, p)); err != nil { t.Fatal(err) } } } // We should be able to create two drivers with the same dir structure func TestNewDriverFromExistingDir(t *testing.T) { if err := os.MkdirAll(tmp, 0755); err != nil { t.Fatal(err) } testInit(tmp, t) testInit(tmp, t) os.RemoveAll(tmp) } func TestCreateNewDir(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } } func TestCreateNewDirStructure(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } paths := []string{ "mnt", "diff", "layers", } for _, p := range paths { if _, err := os.Stat(path.Join(tmp, p, "1")); err != nil { t.Fatal(err) } } } func TestRemoveImage(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } if err := d.Remove("1"); err != nil { t.Fatal(err) } paths := []string{ "mnt", "diff", "layers", } for _, p := range paths { if _, err := os.Stat(path.Join(tmp, p, "1")); err == nil { t.Fatalf("Error should not be nil because dirs with id 1 should be delted: %s", p) } } } func TestGetWithoutParent(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } diffPath, err := d.Get("1", "") if err != nil { t.Fatal(err) } expected := path.Join(tmp, "diff", "1") if diffPath != expected { t.Fatalf("Expected path %s got %s", expected, diffPath) } } func TestCleanupWithNoDirs(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Cleanup(); err != nil { t.Fatal(err) } } func TestCleanupWithDir(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } if err := d.Cleanup(); err != nil { t.Fatal(err) } } func TestMountedFalseResponse(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } response, err := d.mounted(d.active["1"]) if err != nil { t.Fatal(err) } if response != false { t.Fatalf("Response if dir id 1 is mounted should be false") } } func TestMountedTrueReponse(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) defer d.Cleanup() if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } if err := d.Create("2", "1", ""); err != nil { t.Fatal(err) } _, err := d.Get("2", "") if err != nil { t.Fatal(err) } response, err := d.mounted(d.active["2"]) if err != nil { t.Fatal(err) } if response != true { t.Fatalf("Response if dir id 2 is mounted should be true") } } func TestMountWithParent(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } if err := d.Create("2", "1", ""); err != nil { t.Fatal(err) } defer func() { if err := d.Cleanup(); err != nil { t.Fatal(err) } }() mntPath, err := d.Get("2", "") if err != nil { t.Fatal(err) } if mntPath == "" { t.Fatal("mntPath should not be empty string") } expected := path.Join(tmp, "mnt", "2") if mntPath != expected { t.Fatalf("Expected %s got %s", expected, mntPath) } } func TestRemoveMountedDir(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } if err := d.Create("2", "1", ""); err != nil { t.Fatal(err) } defer func() { if err := d.Cleanup(); err != nil { t.Fatal(err) } }() mntPath, err := d.Get("2", "") if err != nil { t.Fatal(err) } if mntPath == "" { t.Fatal("mntPath should not be empty string") } mounted, err := d.mounted(d.active["2"]) if err != nil { t.Fatal(err) } if !mounted { t.Fatalf("Dir id 2 should be mounted") } if err := d.Remove("2"); err != nil { t.Fatal(err) } } func TestCreateWithInvalidParent(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", "docker", ""); err == nil { t.Fatalf("Error should not be nil with parent does not exist") } } func TestGetDiff(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } diffPath, err := d.Get("1", "") if err != nil { t.Fatal(err) } // Add a file to the diff path with a fixed size size := int64(1024) f, err := os.Create(path.Join(diffPath, "test_file")) if err != nil { t.Fatal(err) } if err := f.Truncate(size); err != nil { t.Fatal(err) } f.Close() a, err := d.Diff("1", "") if err != nil { t.Fatal(err) } if a == nil { t.Fatalf("Archive should not be nil") } } func TestChanges(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } if err := d.Create("2", "1", ""); err != nil { t.Fatal(err) } defer func() { if err := d.Cleanup(); err != nil { t.Fatal(err) } }() mntPoint, err := d.Get("2", "") if err != nil { t.Fatal(err) } // Create a file to save in the mountpoint f, err := os.Create(path.Join(mntPoint, "test.txt")) if err != nil { t.Fatal(err) } if _, err := f.WriteString("testline"); err != nil { t.Fatal(err) } if err := f.Close(); err != nil { t.Fatal(err) } changes, err := d.Changes("2", "") if err != nil { t.Fatal(err) } if len(changes) != 1 { t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) } change := changes[0] expectedPath := "/test.txt" if change.Path != expectedPath { t.Fatalf("Expected path %s got %s", expectedPath, change.Path) } if change.Kind != archive.ChangeAdd { t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) } if err := d.Create("3", "2", ""); err != nil { t.Fatal(err) } mntPoint, err = d.Get("3", "") if err != nil { t.Fatal(err) } // Create a file to save in the mountpoint f, err = os.Create(path.Join(mntPoint, "test2.txt")) if err != nil { t.Fatal(err) } if _, err := f.WriteString("testline"); err != nil { t.Fatal(err) } if err := f.Close(); err != nil { t.Fatal(err) } changes, err = d.Changes("3", "") if err != nil { t.Fatal(err) } if len(changes) != 1 { t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) } change = changes[0] expectedPath = "/test2.txt" if change.Path != expectedPath { t.Fatalf("Expected path %s got %s", expectedPath, change.Path) } if change.Kind != archive.ChangeAdd { t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) } } func TestDiffSize(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } diffPath, err := d.Get("1", "") if err != nil { t.Fatal(err) } // Add a file to the diff path with a fixed size size := int64(1024) f, err := os.Create(path.Join(diffPath, "test_file")) if err != nil { t.Fatal(err) } if err := f.Truncate(size); err != nil { t.Fatal(err) } s, err := f.Stat() if err != nil { t.Fatal(err) } size = s.Size() if err := f.Close(); err != nil { t.Fatal(err) } diffSize, err := d.DiffSize("1", "") if err != nil { t.Fatal(err) } if diffSize != size { t.Fatalf("Expected size to be %d got %d", size, diffSize) } } func TestChildDiffSize(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) defer d.Cleanup() if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } diffPath, err := d.Get("1", "") if err != nil { t.Fatal(err) } // Add a file to the diff path with a fixed size size := int64(1024) f, err := os.Create(path.Join(diffPath, "test_file")) if err != nil { t.Fatal(err) } if err := f.Truncate(size); err != nil { t.Fatal(err) } s, err := f.Stat() if err != nil { t.Fatal(err) } size = s.Size() if err := f.Close(); err != nil { t.Fatal(err) } diffSize, err := d.DiffSize("1", "") if err != nil { t.Fatal(err) } if diffSize != size { t.Fatalf("Expected size to be %d got %d", size, diffSize) } if err := d.Create("2", "1", ""); err != nil { t.Fatal(err) } diffSize, err = d.DiffSize("2", "") if err != nil { t.Fatal(err) } // The diff size for the child should be zero if diffSize != 0 { t.Fatalf("Expected size to be %d got %d", 0, diffSize) } } func TestExists(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) defer d.Cleanup() if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } if d.Exists("none") { t.Fatal("id name should not exist in the driver") } if !d.Exists("1") { t.Fatal("id 1 should exist in the driver") } } func TestStatus(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) defer d.Cleanup() if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } status := d.Status() if status == nil || len(status) == 0 { t.Fatal("Status should not be nil or empty") } rootDir := status[0] dirs := status[2] if rootDir[0] != "Root Dir" { t.Fatalf("Expected Root Dir got %s", rootDir[0]) } if rootDir[1] != d.rootPath() { t.Fatalf("Expected %s got %s", d.rootPath(), rootDir[1]) } if dirs[0] != "Dirs" { t.Fatalf("Expected Dirs got %s", dirs[0]) } if dirs[1] != "1" { t.Fatalf("Expected 1 got %s", dirs[1]) } } func TestApplyDiff(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) defer d.Cleanup() if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } diffPath, err := d.Get("1", "") if err != nil { t.Fatal(err) } // Add a file to the diff path with a fixed size size := int64(1024) f, err := os.Create(path.Join(diffPath, "test_file")) if err != nil { t.Fatal(err) } if err := f.Truncate(size); err != nil { t.Fatal(err) } f.Close() diff, err := d.Diff("1", "") if err != nil { t.Fatal(err) } if err := d.Create("2", "", ""); err != nil { t.Fatal(err) } if err := d.Create("3", "2", ""); err != nil { t.Fatal(err) } if err := d.applyDiff("3", diff); err != nil { t.Fatal(err) } // Ensure that the file is in the mount point for id 3 mountPoint, err := d.Get("3", "") if err != nil { t.Fatal(err) } if _, err := os.Stat(path.Join(mountPoint, "test_file")); err != nil { t.Fatal(err) } } func hash(c string) string { h := sha256.New() fmt.Fprint(h, c) return hex.EncodeToString(h.Sum(nil)) } func testMountMoreThan42Layers(t *testing.T, mountPath string) { if err := os.MkdirAll(mountPath, 0755); err != nil { t.Fatal(err) } defer os.RemoveAll(mountPath) d := testInit(mountPath, t).(*Driver) defer d.Cleanup() var last string var expected int for i := 1; i < 127; i++ { expected++ var ( parent = fmt.Sprintf("%d", i-1) current = fmt.Sprintf("%d", i) ) if parent == "0" { parent = "" } else { parent = hash(parent) } current = hash(current) if err := d.Create(current, parent, ""); err != nil { t.Logf("Current layer %d", i) t.Error(err) } point, err := d.Get(current, "") if err != nil { t.Logf("Current layer %d", i) t.Error(err) } f, err := os.Create(path.Join(point, current)) if err != nil { t.Logf("Current layer %d", i) t.Error(err) } f.Close() if i%10 == 0 { if err := os.Remove(path.Join(point, parent)); err != nil { t.Logf("Current layer %d", i) t.Error(err) } expected-- } last = current } // Perform the actual mount for the top most image point, err := d.Get(last, "") if err != nil { t.Error(err) } files, err := ioutil.ReadDir(point) if err != nil { t.Error(err) } if len(files) != expected { t.Errorf("Expected %d got %d", expected, len(files)) } } func TestMountMoreThan42Layers(t *testing.T) { os.RemoveAll(tmpOuter) testMountMoreThan42Layers(t, tmp) } func TestMountMoreThan42LayersMatchingPathLength(t *testing.T) { defer os.RemoveAll(tmpOuter) zeroes := "0" for { // This finds a mount path so that when combined into aufs mount options // 4096 byte boundary would be in between the paths or in permission // section. For '/tmp' it will use '/tmp/aufs-tests/00000000/aufs' mountPath := path.Join(tmpOuter, zeroes, "aufs") pathLength := 77 + len(mountPath) if mod := 4095 % pathLength; mod == 0 || mod > pathLength-2 { t.Logf("Using path: %s", mountPath) testMountMoreThan42Layers(t, mountPath) return } zeroes += "0" } } docker-1.10.3/daemon/graphdriver/aufs/dirs.go000066400000000000000000000015511267010174400210550ustar00rootroot00000000000000// +build linux package aufs import ( "bufio" "io/ioutil" "os" "path" ) // Return all the directories func loadIds(root string) ([]string, error) { dirs, err := ioutil.ReadDir(root) if err != nil { return nil, err } out := []string{} for _, d := range dirs { if !d.IsDir() { out = append(out, d.Name()) } } return out, nil } // Read the layers file for the current id and return all the // layers represented by new lines in the file // // If there are no lines in the file then the id has no parent // and an empty slice is returned. func getParentIds(root, id string) ([]string, error) { f, err := os.Open(path.Join(root, "layers", id)) if err != nil { return nil, err } defer f.Close() out := []string{} s := bufio.NewScanner(f) for s.Scan() { if t := s.Text(); t != "" { out = append(out, s.Text()) } } return out, s.Err() } docker-1.10.3/daemon/graphdriver/aufs/mount.go000066400000000000000000000006021267010174400212520ustar00rootroot00000000000000// +build linux package aufs import ( "os/exec" "syscall" "github.com/Sirupsen/logrus" ) // Unmount the target specified. func Unmount(target string) error { if err := exec.Command("auplink", target, "flush").Run(); err != nil { logrus.Errorf("Couldn't run auplink before unmount: %s", err) } if err := syscall.Unmount(target, 0); err != nil { return err } return nil } docker-1.10.3/daemon/graphdriver/aufs/mount_linux.go000066400000000000000000000002711267010174400224730ustar00rootroot00000000000000package aufs import "syscall" func mount(source string, target string, fstype string, flags uintptr, data string) error { return syscall.Mount(source, target, fstype, flags, data) } docker-1.10.3/daemon/graphdriver/aufs/mount_unsupported.go000066400000000000000000000004451267010174400237270ustar00rootroot00000000000000// +build !linux package aufs import "errors" // MsRemount declared to specify a non-linux system mount. const MsRemount = 0 func mount(source string, target string, fstype string, flags uintptr, data string) (err error) { return errors.New("mount is not implemented on this platform") } docker-1.10.3/daemon/graphdriver/btrfs/000077500000000000000000000000001267010174400177455ustar00rootroot00000000000000docker-1.10.3/daemon/graphdriver/btrfs/btrfs.go000066400000000000000000000173651267010174400214300ustar00rootroot00000000000000// +build linux package btrfs /* #include #include #include #include */ import "C" import ( "fmt" "os" "path" "path/filepath" "syscall" "unsafe" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/mount" "github.com/opencontainers/runc/libcontainer/label" ) func init() { graphdriver.Register("btrfs", Init) } // Init returns a new BTRFS driver. // An error is returned if BTRFS is not supported. func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { rootdir := path.Dir(home) var buf syscall.Statfs_t if err := syscall.Statfs(rootdir, &buf); err != nil { return nil, err } if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicBtrfs { return nil, graphdriver.ErrPrerequisites } rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) if err != nil { return nil, err } if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { return nil, err } if err := mount.MakePrivate(home); err != nil { return nil, err } driver := &Driver{ home: home, uidMaps: uidMaps, gidMaps: gidMaps, } return graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), nil } // Driver contains information about the filesystem mounted. type Driver struct { //root of the file system home string uidMaps []idtools.IDMap gidMaps []idtools.IDMap } // String prints the name of the driver (btrfs). func (d *Driver) String() string { return "btrfs" } // Status returns current driver information in a two dimensional string array. // Output contains "Build Version" and "Library Version" of the btrfs libraries used. // Version information can be used to check compatibility with your kernel. func (d *Driver) Status() [][2]string { status := [][2]string{} if bv := btrfsBuildVersion(); bv != "-" { status = append(status, [2]string{"Build Version", bv}) } if lv := btrfsLibVersion(); lv != -1 { status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)}) } return status } // GetMetadata returns empty metadata for this driver. func (d *Driver) GetMetadata(id string) (map[string]string, error) { return nil, nil } // Cleanup unmounts the home directory. func (d *Driver) Cleanup() error { return mount.Unmount(d.home) } func free(p *C.char) { C.free(unsafe.Pointer(p)) } func openDir(path string) (*C.DIR, error) { Cpath := C.CString(path) defer free(Cpath) dir := C.opendir(Cpath) if dir == nil { return nil, fmt.Errorf("Can't open dir") } return dir, nil } func closeDir(dir *C.DIR) { if dir != nil { C.closedir(dir) } } func getDirFd(dir *C.DIR) uintptr { return uintptr(C.dirfd(dir)) } func subvolCreate(path, name string) error { dir, err := openDir(path) if err != nil { return err } defer closeDir(dir) var args C.struct_btrfs_ioctl_vol_args for i, c := range []byte(name) { args.name[i] = C.char(c) } _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) } return nil } func subvolSnapshot(src, dest, name string) error { srcDir, err := openDir(src) if err != nil { return err } defer closeDir(srcDir) destDir, err := openDir(dest) if err != nil { return err } defer closeDir(destDir) var args C.struct_btrfs_ioctl_vol_args_v2 args.fd = C.__s64(getDirFd(srcDir)) for i, c := range []byte(name) { args.name[i] = C.char(c) } _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) } return nil } func isSubvolume(p string) (bool, error) { var bufStat syscall.Stat_t if err := syscall.Lstat(p, &bufStat); err != nil { return false, err } // return true if it is a btrfs subvolume return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil } func subvolDelete(dirpath, name string) error { dir, err := openDir(dirpath) if err != nil { return err } defer closeDir(dir) fullPath := path.Join(dirpath, name) var args C.struct_btrfs_ioctl_vol_args // walk the btrfs subvolumes walkSubvolumes := func(p string, f os.FileInfo, err error) error { if err != nil { if os.IsNotExist(err) && p != fullPath { // missing most likely because the path was a subvolume that got removed in the previous iteration // since it's gone anyway, we don't care return nil } return fmt.Errorf("error walking subvolumes: %v", err) } // we want to check children only so skip itself // it will be removed after the filepath walk anyways if f.IsDir() && p != fullPath { sv, err := isSubvolume(p) if err != nil { return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err) } if sv { if err := subvolDelete(path.Dir(p), f.Name()); err != nil { return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err) } } } return nil } if err := filepath.Walk(path.Join(dirpath, name), walkSubvolumes); err != nil { return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err) } // all subvolumes have been removed // now remove the one originally passed in for i, c := range []byte(name) { args.name[i] = C.char(c) } _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error()) } return nil } func (d *Driver) subvolumesDir() string { return path.Join(d.home, "subvolumes") } func (d *Driver) subvolumesDirID(id string) string { return path.Join(d.subvolumesDir(), id) } // Create the filesystem with given id. func (d *Driver) Create(id, parent, mountLabel string) error { subvolumes := path.Join(d.home, "subvolumes") rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return err } if err := idtools.MkdirAllAs(subvolumes, 0700, rootUID, rootGID); err != nil { return err } if parent == "" { if err := subvolCreate(subvolumes, id); err != nil { return err } } else { parentDir, err := d.Get(parent, "") if err != nil { return err } if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { return err } } // if we have a remapped root (user namespaces enabled), change the created snapshot // dir ownership to match if rootUID != 0 || rootGID != 0 { if err := os.Chown(path.Join(subvolumes, id), rootUID, rootGID); err != nil { return err } } return label.Relabel(path.Join(subvolumes, id), mountLabel, false) } // Remove the filesystem with given id. func (d *Driver) Remove(id string) error { dir := d.subvolumesDirID(id) if _, err := os.Stat(dir); err != nil { return err } if err := subvolDelete(d.subvolumesDir(), id); err != nil { return err } if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { return err } return nil } // Get the requested filesystem id. func (d *Driver) Get(id, mountLabel string) (string, error) { dir := d.subvolumesDirID(id) st, err := os.Stat(dir) if err != nil { return "", err } if !st.IsDir() { return "", fmt.Errorf("%s: not a directory", dir) } return dir, nil } // Put is not implemented for BTRFS as there is no cleanup required for the id. func (d *Driver) Put(id string) error { // Get() creates no runtime resources (like e.g. mounts) // so this doesn't need to do anything. return nil } // Exists checks if the id exists in the filesystem. func (d *Driver) Exists(id string) bool { dir := d.subvolumesDirID(id) _, err := os.Stat(dir) return err == nil } docker-1.10.3/daemon/graphdriver/btrfs/btrfs_test.go000066400000000000000000000024461267010174400224610ustar00rootroot00000000000000// +build linux package btrfs import ( "os" "path" "testing" "github.com/docker/docker/daemon/graphdriver/graphtest" ) // This avoids creating a new driver for each test if all tests are run // Make sure to put new tests between TestBtrfsSetup and TestBtrfsTeardown func TestBtrfsSetup(t *testing.T) { graphtest.GetDriver(t, "btrfs") } func TestBtrfsCreateEmpty(t *testing.T) { graphtest.DriverTestCreateEmpty(t, "btrfs") } func TestBtrfsCreateBase(t *testing.T) { graphtest.DriverTestCreateBase(t, "btrfs") } func TestBtrfsCreateSnap(t *testing.T) { graphtest.DriverTestCreateSnap(t, "btrfs") } func TestBtrfsSubvolDelete(t *testing.T) { d := graphtest.GetDriver(t, "btrfs") if err := d.Create("test", "", ""); err != nil { t.Fatal(err) } defer graphtest.PutDriver(t) dir, err := d.Get("test", "") if err != nil { t.Fatal(err) } defer d.Put("test") if err := subvolCreate(dir, "subvoltest"); err != nil { t.Fatal(err) } if _, err := os.Stat(path.Join(dir, "subvoltest")); err != nil { t.Fatal(err) } if err := d.Remove("test"); err != nil { t.Fatal(err) } if _, err := os.Stat(path.Join(dir, "subvoltest")); !os.IsNotExist(err) { t.Fatalf("expected not exist error on nested subvol, got: %v", err) } } func TestBtrfsTeardown(t *testing.T) { graphtest.PutDriver(t) } docker-1.10.3/daemon/graphdriver/btrfs/dummy_unsupported.go000066400000000000000000000000451267010174400240760ustar00rootroot00000000000000// +build !linux !cgo package btrfs docker-1.10.3/daemon/graphdriver/btrfs/version.go000066400000000000000000000007431267010174400217650ustar00rootroot00000000000000// +build linux,!btrfs_noversion package btrfs /* #include // around version 3.16, they did not define lib version yet #ifndef BTRFS_LIB_VERSION #define BTRFS_LIB_VERSION -1 #endif // upstream had removed it, but now it will be coming back #ifndef BTRFS_BUILD_VERSION #define BTRFS_BUILD_VERSION "-" #endif */ import "C" func btrfsBuildVersion() string { return string(C.BTRFS_BUILD_VERSION) } func btrfsLibVersion() int { return int(C.BTRFS_LIB_VERSION) } docker-1.10.3/daemon/graphdriver/btrfs/version_none.go000066400000000000000000000003701267010174400230000ustar00rootroot00000000000000// +build linux,btrfs_noversion package btrfs // TODO(vbatts) remove this work-around once supported linux distros are on // btrfs utilities of >= 3.16.1 func btrfsBuildVersion() string { return "-" } func btrfsLibVersion() int { return -1 } docker-1.10.3/daemon/graphdriver/btrfs/version_test.go000066400000000000000000000003071267010174400230200ustar00rootroot00000000000000// +build linux,!btrfs_noversion package btrfs import ( "testing" ) func TestLibVersion(t *testing.T) { if btrfsLibVersion() <= 0 { t.Errorf("expected output from btrfs lib version > 0") } } docker-1.10.3/daemon/graphdriver/devmapper/000077500000000000000000000000001267010174400206105ustar00rootroot00000000000000docker-1.10.3/daemon/graphdriver/devmapper/README.md000066400000000000000000000112371267010174400220730ustar00rootroot00000000000000## devicemapper - a storage backend based on Device Mapper ### Theory of operation The device mapper graphdriver uses the device mapper thin provisioning module (dm-thinp) to implement CoW snapshots. The preferred model is to have a thin pool reserved outside of Docker and passed to the daemon via the `--storage-opt dm.thinpooldev` option. As a fallback if no thin pool is provided, loopback files will be created. Loopback is very slow, but can be used without any pre-configuration of storage. It is strongly recommended that you do not use loopback in production. Ensure your Docker daemon has a `--storage-opt dm.thinpooldev` argument provided. In loopback, a thin pool is created at `/var/lib/docker/devicemapper` (devicemapper graph location) based on two block devices, one for data and one for metadata. By default these block devices are created automatically by using loopback mounts of automatically created sparse files. The default loopback files used are `/var/lib/docker/devicemapper/devicemapper/data` and `/var/lib/docker/devicemapper/devicemapper/metadata`. Additional metadata required to map from docker entities to the corresponding devicemapper volumes is stored in the `/var/lib/docker/devicemapper/devicemapper/json` file (encoded as Json). In order to support multiple devicemapper graphs on a system, the thin pool will be named something like: `docker-0:33-19478248-pool`, where the `0:33` part is the minor/major device nr and `19478248` is the inode number of the `/var/lib/docker/devicemapper` directory. On the thin pool, docker automatically creates a base thin device, called something like `docker-0:33-19478248-base` of a fixed size. This is automatically formatted with an empty filesystem on creation. This device is the base of all docker images and containers. All base images are snapshots of this device and those images are then in turn used as snapshots for other images and eventually containers. ### Information on `docker info` As of docker-1.4.1, `docker info` when using the `devicemapper` storage driver will display something like: $ sudo docker info [...] Storage Driver: devicemapper Pool Name: docker-253:1-17538953-pool Pool Blocksize: 65.54 kB Base Device Size: 107.4 GB Data file: /dev/loop4 Metadata file: /dev/loop4 Data Space Used: 2.536 GB Data Space Total: 107.4 GB Data Space Available: 104.8 GB Metadata Space Used: 7.93 MB Metadata Space Total: 2.147 GB Metadata Space Available: 2.14 GB Udev Sync Supported: true Data loop file: /home/docker/devicemapper/devicemapper/data Metadata loop file: /home/docker/devicemapper/devicemapper/metadata Library Version: 1.02.82-git (2013-10-04) [...] #### status items Each item in the indented section under `Storage Driver: devicemapper` are status information about the driver. * `Pool Name` name of the devicemapper pool for this driver. * `Pool Blocksize` tells the blocksize the thin pool was initialized with. This only changes on creation. * `Base Device Size` tells the maximum size of a container and image * `Data file` blockdevice file used for the devicemapper data * `Metadata file` blockdevice file used for the devicemapper metadata * `Data Space Used` tells how much of `Data file` is currently used * `Data Space Total` tells max size the `Data file` * `Data Space Available` tells how much free space there is in the `Data file`. If you are using a loop device this will report the actual space available to the loop device on the underlying filesystem. * `Metadata Space Used` tells how much of `Metadata file` is currently used * `Metadata Space Total` tells max size the `Metadata file` * `Metadata Space Available` tells how much free space there is in the `Metadata file`. If you are using a loop device this will report the actual space available to the loop device on the underlying filesystem. * `Udev Sync Supported` tells whether devicemapper is able to sync with Udev. Should be `true`. * `Data loop file` file attached to `Data file`, if loopback device is used * `Metadata loop file` file attached to `Metadata file`, if loopback device is used * `Library Version` from the libdevmapper used ### About the devicemapper options The devicemapper backend supports some options that you can specify when starting the docker daemon using the `--storage-opt` flags. This uses the `dm` prefix and would be used something like `docker daemon --storage-opt dm.foo=bar`. These options are currently documented both in [the man page](../../../man/docker.1.md) and in [the online documentation](https://docs.docker.com/reference/commandline/daemon/#docker- execdriver-option). If you add an options, update both the `man` page and the documentation. docker-1.10.3/daemon/graphdriver/devmapper/deviceset.go000066400000000000000000002205771267010174400231270ustar00rootroot00000000000000// +build linux package devmapper import ( "bufio" "encoding/json" "errors" "fmt" "io" "io/ioutil" "os" "os/exec" "path" "path/filepath" "strconv" "strings" "sync" "syscall" "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/devicemapper" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/loopback" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/parsers" "github.com/docker/go-units" "github.com/opencontainers/runc/libcontainer/label" ) var ( defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors defaultUdevSyncOverride = false maxDeviceID = 0xffffff // 24 bit, pool limit deviceIDMapSz = (maxDeviceID + 1) / 8 // We retry device removal so many a times that even error messages // will fill up console during normal operation. So only log Fatal // messages by default. logLevel = devicemapper.LogLevelFatal driverDeferredRemovalSupport = false enableDeferredRemoval = false enableDeferredDeletion = false userBaseSize = false ) const deviceSetMetaFile string = "deviceset-metadata" const transactionMetaFile string = "transaction-metadata" type transaction struct { OpenTransactionID uint64 `json:"open_transaction_id"` DeviceIDHash string `json:"device_hash"` DeviceID int `json:"device_id"` } type devInfo struct { Hash string `json:"-"` DeviceID int `json:"device_id"` Size uint64 `json:"size"` TransactionID uint64 `json:"transaction_id"` Initialized bool `json:"initialized"` Deleted bool `json:"deleted"` devices *DeviceSet mountCount int mountPath string // The global DeviceSet lock guarantees that we serialize all // the calls to libdevmapper (which is not threadsafe), but we // sometimes release that lock while sleeping. In that case // this per-device lock is still held, protecting against // other accesses to the device that we're doing the wait on. // // WARNING: In order to avoid AB-BA deadlocks when releasing // the global lock while holding the per-device locks all // device locks must be acquired *before* the device lock, and // multiple device locks should be acquired parent before child. lock sync.Mutex } type metaData struct { Devices map[string]*devInfo `json:"Devices"` } // DeviceSet holds information about list of devices type DeviceSet struct { metaData `json:"-"` sync.Mutex `json:"-"` // Protects all fields of DeviceSet and serializes calls into libdevmapper root string devicePrefix string TransactionID uint64 `json:"-"` NextDeviceID int `json:"next_device_id"` deviceIDMap []byte // Options dataLoopbackSize int64 metaDataLoopbackSize int64 baseFsSize uint64 filesystem string mountOptions string mkfsArgs []string dataDevice string // block or loop dev dataLoopFile string // loopback file, if used metadataDevice string // block or loop dev metadataLoopFile string // loopback file, if used doBlkDiscard bool thinpBlockSize uint32 thinPoolDevice string transaction `json:"-"` overrideUdevSyncCheck bool deferredRemove bool // use deferred removal deferredDelete bool // use deferred deletion BaseDeviceUUID string // save UUID of base device BaseDeviceFilesystem string // save filesystem of base device nrDeletedDevices uint // number of deleted devices deletionWorkerTicker *time.Ticker uidMaps []idtools.IDMap gidMaps []idtools.IDMap } // DiskUsage contains information about disk usage and is used when reporting Status of a device. type DiskUsage struct { // Used bytes on the disk. Used uint64 // Total bytes on the disk. Total uint64 // Available bytes on the disk. Available uint64 } // Status returns the information about the device. type Status struct { // PoolName is the name of the data pool. PoolName string // DataFile is the actual block device for data. DataFile string // DataLoopback loopback file, if used. DataLoopback string // MetadataFile is the actual block device for metadata. MetadataFile string // MetadataLoopback is the loopback file, if used. MetadataLoopback string // Data is the disk used for data. Data DiskUsage // Metadata is the disk used for meta data. Metadata DiskUsage // BaseDeviceSize is base size of container and image BaseDeviceSize uint64 // BaseDeviceFS is backing filesystem. BaseDeviceFS string // SectorSize size of the vector. SectorSize uint64 // UdevSyncSupported is true if sync is supported. UdevSyncSupported bool // DeferredRemoveEnabled is true then the device is not unmounted. DeferredRemoveEnabled bool // True if deferred deletion is enabled. This is different from // deferred removal. "removal" means that device mapper device is // deactivated. Thin device is still in thin pool and can be activated // again. But "deletion" means that thin device will be deleted from // thin pool and it can't be activated again. DeferredDeleteEnabled bool DeferredDeletedDeviceCount uint } // Structure used to export image/container metadata in docker inspect. type deviceMetadata struct { deviceID int deviceSize uint64 // size in bytes deviceName string // Device name as used during activation } // DevStatus returns information about device mounted containing its id, size and sector information. type DevStatus struct { // DeviceID is the id of the device. DeviceID int // Size is the size of the filesystem. Size uint64 // TransactionID is a unique integer per device set used to identify an operation on the file system, this number is incremental. TransactionID uint64 // SizeInSectors indicates the size of the sectors allocated. SizeInSectors uint64 // MappedSectors indicates number of mapped sectors. MappedSectors uint64 // HighestMappedSector is the pointer to the highest mapped sector. HighestMappedSector uint64 } func getDevName(name string) string { return "/dev/mapper/" + name } func (info *devInfo) Name() string { hash := info.Hash if hash == "" { hash = "base" } return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) } func (info *devInfo) DevName() string { return getDevName(info.Name()) } func (devices *DeviceSet) loopbackDir() string { return path.Join(devices.root, "devicemapper") } func (devices *DeviceSet) metadataDir() string { return path.Join(devices.root, "metadata") } func (devices *DeviceSet) metadataFile(info *devInfo) string { file := info.Hash if file == "" { file = "base" } return path.Join(devices.metadataDir(), file) } func (devices *DeviceSet) transactionMetaFile() string { return path.Join(devices.metadataDir(), transactionMetaFile) } func (devices *DeviceSet) deviceSetMetaFile() string { return path.Join(devices.metadataDir(), deviceSetMetaFile) } func (devices *DeviceSet) oldMetadataFile() string { return path.Join(devices.loopbackDir(), "json") } func (devices *DeviceSet) getPoolName() string { if devices.thinPoolDevice == "" { return devices.devicePrefix + "-pool" } return devices.thinPoolDevice } func (devices *DeviceSet) getPoolDevName() string { return getDevName(devices.getPoolName()) } func (devices *DeviceSet) hasImage(name string) bool { dirname := devices.loopbackDir() filename := path.Join(dirname, name) _, err := os.Stat(filename) return err == nil } // ensureImage creates a sparse file of bytes at the path // /devicemapper/. // If the file already exists and new size is larger than its current size, it grows to the new size. // Either way it returns the full path. func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { dirname := devices.loopbackDir() filename := path.Join(dirname, name) uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) if err != nil { return "", err } if err := idtools.MkdirAllAs(dirname, 0700, uid, gid); err != nil && !os.IsExist(err) { return "", err } if fi, err := os.Stat(filename); err != nil { if !os.IsNotExist(err) { return "", err } logrus.Debugf("devmapper: Creating loopback file %s for device-manage use", filename) file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) if err != nil { return "", err } defer file.Close() if err := file.Truncate(size); err != nil { return "", err } } else { if fi.Size() < size { file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) if err != nil { return "", err } defer file.Close() if err := file.Truncate(size); err != nil { return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %v", filename, err) } } else if fi.Size() > size { logrus.Warnf("devmapper: Can't shrink loopback file %s", filename) } } return filename, nil } func (devices *DeviceSet) allocateTransactionID() uint64 { devices.OpenTransactionID = devices.TransactionID + 1 return devices.OpenTransactionID } func (devices *DeviceSet) updatePoolTransactionID() error { if err := devicemapper.SetTransactionID(devices.getPoolDevName(), devices.TransactionID, devices.OpenTransactionID); err != nil { return fmt.Errorf("devmapper: Error setting devmapper transaction ID: %s", err) } devices.TransactionID = devices.OpenTransactionID return nil } func (devices *DeviceSet) removeMetadata(info *devInfo) error { if err := os.RemoveAll(devices.metadataFile(info)); err != nil { return fmt.Errorf("devmapper: Error removing metadata file %s: %s", devices.metadataFile(info), err) } return nil } // Given json data and file path, write it to disk func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error { tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp") if err != nil { return fmt.Errorf("devmapper: Error creating metadata file: %s", err) } n, err := tmpFile.Write(jsonData) if err != nil { return fmt.Errorf("devmapper: Error writing metadata to %s: %s", tmpFile.Name(), err) } if n < len(jsonData) { return io.ErrShortWrite } if err := tmpFile.Sync(); err != nil { return fmt.Errorf("devmapper: Error syncing metadata file %s: %s", tmpFile.Name(), err) } if err := tmpFile.Close(); err != nil { return fmt.Errorf("devmapper: Error closing metadata file %s: %s", tmpFile.Name(), err) } if err := os.Rename(tmpFile.Name(), filePath); err != nil { return fmt.Errorf("devmapper: Error committing metadata file %s: %s", tmpFile.Name(), err) } return nil } func (devices *DeviceSet) saveMetadata(info *devInfo) error { jsonData, err := json.Marshal(info) if err != nil { return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) } if err := devices.writeMetaFile(jsonData, devices.metadataFile(info)); err != nil { return err } return nil } func (devices *DeviceSet) markDeviceIDUsed(deviceID int) { var mask byte i := deviceID % 8 mask = 1 << uint(i) devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] | mask } func (devices *DeviceSet) markDeviceIDFree(deviceID int) { var mask byte i := deviceID % 8 mask = ^(1 << uint(i)) devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] & mask } func (devices *DeviceSet) isDeviceIDFree(deviceID int) bool { var mask byte i := deviceID % 8 mask = (1 << uint(i)) if (devices.deviceIDMap[deviceID/8] & mask) != 0 { return false } return true } // Should be called with devices.Lock() held. func (devices *DeviceSet) lookupDevice(hash string) (*devInfo, error) { info := devices.Devices[hash] if info == nil { info = devices.loadMetadata(hash) if info == nil { return nil, fmt.Errorf("devmapper: Unknown device %s", hash) } devices.Devices[hash] = info } return info, nil } func (devices *DeviceSet) lookupDeviceWithLock(hash string) (*devInfo, error) { devices.Lock() defer devices.Unlock() info, err := devices.lookupDevice(hash) return info, err } // This function relies on that device hash map has been loaded in advance. // Should be called with devices.Lock() held. func (devices *DeviceSet) constructDeviceIDMap() { logrus.Debugf("devmapper: constructDeviceIDMap()") defer logrus.Debugf("devmapper: constructDeviceIDMap() END") for _, info := range devices.Devices { devices.markDeviceIDUsed(info.DeviceID) logrus.Debugf("devmapper: Added deviceId=%d to DeviceIdMap", info.DeviceID) } } func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error { // Skip some of the meta files which are not device files. if strings.HasSuffix(finfo.Name(), ".migrated") { logrus.Debugf("devmapper: Skipping file %s", path) return nil } if strings.HasPrefix(finfo.Name(), ".") { logrus.Debugf("devmapper: Skipping file %s", path) return nil } if finfo.Name() == deviceSetMetaFile { logrus.Debugf("devmapper: Skipping file %s", path) return nil } if finfo.Name() == transactionMetaFile { logrus.Debugf("devmapper: Skipping file %s", path) return nil } logrus.Debugf("devmapper: Loading data for file %s", path) hash := finfo.Name() if hash == "base" { hash = "" } // Include deleted devices also as cleanup delete device logic // will go through it and see if there are any deleted devices. if _, err := devices.lookupDevice(hash); err != nil { return fmt.Errorf("devmapper: Error looking up device %s:%v", hash, err) } return nil } func (devices *DeviceSet) loadDeviceFilesOnStart() error { logrus.Debugf("devmapper: loadDeviceFilesOnStart()") defer logrus.Debugf("devmapper: loadDeviceFilesOnStart() END") var scan = func(path string, info os.FileInfo, err error) error { if err != nil { logrus.Debugf("devmapper: Can't walk the file %s", path) return nil } // Skip any directories if info.IsDir() { return nil } return devices.deviceFileWalkFunction(path, info) } return filepath.Walk(devices.metadataDir(), scan) } // Should be called with devices.Lock() held. func (devices *DeviceSet) unregisterDevice(id int, hash string) error { logrus.Debugf("devmapper: unregisterDevice(%v, %v)", id, hash) info := &devInfo{ Hash: hash, DeviceID: id, } delete(devices.Devices, hash) if err := devices.removeMetadata(info); err != nil { logrus.Debugf("devmapper: Error removing metadata: %s", err) return err } return nil } // Should be called with devices.Lock() held. func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionID uint64) (*devInfo, error) { logrus.Debugf("devmapper: registerDevice(%v, %v)", id, hash) info := &devInfo{ Hash: hash, DeviceID: id, Size: size, TransactionID: transactionID, Initialized: false, devices: devices, } devices.Devices[hash] = info if err := devices.saveMetadata(info); err != nil { // Try to remove unused device delete(devices.Devices, hash) return nil, err } return info, nil } func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bool) error { logrus.Debugf("devmapper: activateDeviceIfNeeded(%v)", info.Hash) if info.Deleted && !ignoreDeleted { return fmt.Errorf("devmapper: Can't activate device %v as it is marked for deletion", info.Hash) } // Make sure deferred removal on device is canceled, if one was // scheduled. if err := devices.cancelDeferredRemoval(info); err != nil { return fmt.Errorf("devmapper: Device Deferred Removal Cancellation Failed: %s", err) } if devinfo, _ := devicemapper.GetInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { return nil } return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size) } // Return true only if kernel supports xfs and mkfs.xfs is available func xfsSupported() bool { // Make sure mkfs.xfs is available if _, err := exec.LookPath("mkfs.xfs"); err != nil { return false } // Check if kernel supports xfs filesystem or not. exec.Command("modprobe", "xfs").Run() f, err := os.Open("/proc/filesystems") if err != nil { logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) return false } defer f.Close() s := bufio.NewScanner(f) for s.Scan() { if strings.HasSuffix(s.Text(), "\txfs") { return true } } if err := s.Err(); err != nil { logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) } return false } func determineDefaultFS() string { if xfsSupported() { return "xfs" } logrus.Warn("devmapper: XFS is not supported in your system. Either the kernel doesnt support it or mkfs.xfs is not in your PATH. Defaulting to ext4 filesystem") return "ext4" } func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { devname := info.DevName() args := []string{} for _, arg := range devices.mkfsArgs { args = append(args, arg) } args = append(args, devname) if devices.filesystem == "" { devices.filesystem = determineDefaultFS() } if err := devices.saveBaseDeviceFilesystem(devices.filesystem); err != nil { return err } logrus.Infof("devmapper: Creating filesystem %s on device %s", devices.filesystem, info.Name()) defer func() { if err != nil { logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err) } else { logrus.Infof("devmapper: Successfully created filesystem %s on device %s", devices.filesystem, info.Name()) } }() switch devices.filesystem { case "xfs": err = exec.Command("mkfs.xfs", args...).Run() case "ext4": err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run() if err != nil { err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run() } if err != nil { return err } err = exec.Command("tune2fs", append([]string{"-c", "-1", "-i", "0"}, devname)...).Run() default: err = fmt.Errorf("devmapper: Unsupported filesystem type %s", devices.filesystem) } return } func (devices *DeviceSet) migrateOldMetaData() error { // Migrate old metadata file jsonData, err := ioutil.ReadFile(devices.oldMetadataFile()) if err != nil && !os.IsNotExist(err) { return err } if jsonData != nil { m := metaData{Devices: make(map[string]*devInfo)} if err := json.Unmarshal(jsonData, &m); err != nil { return err } for hash, info := range m.Devices { info.Hash = hash devices.saveMetadata(info) } if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { return err } } return nil } // Cleanup deleted devices. It assumes that all the devices have been // loaded in the hash table. func (devices *DeviceSet) cleanupDeletedDevices() error { devices.Lock() // If there are no deleted devices, there is nothing to do. if devices.nrDeletedDevices == 0 { devices.Unlock() return nil } var deletedDevices []*devInfo for _, info := range devices.Devices { if !info.Deleted { continue } logrus.Debugf("devmapper: Found deleted device %s.", info.Hash) deletedDevices = append(deletedDevices, info) } // Delete the deleted devices. DeleteDevice() first takes the info lock // and then devices.Lock(). So drop it to avoid deadlock. devices.Unlock() for _, info := range deletedDevices { // This will again try deferred deletion. if err := devices.DeleteDevice(info.Hash, false); err != nil { logrus.Warnf("devmapper: Deletion of device %s, device_id=%v failed:%v", info.Hash, info.DeviceID, err) } } return nil } func (devices *DeviceSet) countDeletedDevices() { for _, info := range devices.Devices { if !info.Deleted { continue } devices.nrDeletedDevices++ } } func (devices *DeviceSet) startDeviceDeletionWorker() { // Deferred deletion is not enabled. Don't do anything. if !devices.deferredDelete { return } logrus.Debugf("devmapper: Worker to cleanup deleted devices started") for range devices.deletionWorkerTicker.C { devices.cleanupDeletedDevices() } } func (devices *DeviceSet) initMetaData() error { devices.Lock() defer devices.Unlock() if err := devices.migrateOldMetaData(); err != nil { return err } _, transactionID, _, _, _, _, err := devices.poolStatus() if err != nil { return err } devices.TransactionID = transactionID if err := devices.loadDeviceFilesOnStart(); err != nil { return fmt.Errorf("devmapper: Failed to load device files:%v", err) } devices.constructDeviceIDMap() devices.countDeletedDevices() if err := devices.processPendingTransaction(); err != nil { return err } // Start a goroutine to cleanup Deleted Devices go devices.startDeviceDeletionWorker() return nil } func (devices *DeviceSet) incNextDeviceID() { // IDs are 24bit, so wrap around devices.NextDeviceID = (devices.NextDeviceID + 1) & maxDeviceID } func (devices *DeviceSet) getNextFreeDeviceID() (int, error) { devices.incNextDeviceID() for i := 0; i <= maxDeviceID; i++ { if devices.isDeviceIDFree(devices.NextDeviceID) { devices.markDeviceIDUsed(devices.NextDeviceID) return devices.NextDeviceID, nil } devices.incNextDeviceID() } return 0, fmt.Errorf("devmapper: Unable to find a free device ID") } func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) { devices.Lock() defer devices.Unlock() deviceID, err := devices.getNextFreeDeviceID() if err != nil { return nil, err } if err := devices.openTransaction(hash, deviceID); err != nil { logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) devices.markDeviceIDFree(deviceID) return nil, err } for { if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceID); err != nil { if devicemapper.DeviceIDExists(err) { // Device ID already exists. This should not // happen. Now we have a mechanism to find // a free device ID. So something is not right. // Give a warning and continue. logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) deviceID, err = devices.getNextFreeDeviceID() if err != nil { return nil, err } // Save new device id into transaction devices.refreshTransaction(deviceID) continue } logrus.Debugf("devmapper: Error creating device: %s", err) devices.markDeviceIDFree(deviceID) return nil, err } break } logrus.Debugf("devmapper: Registering device (id %v) with FS size %v", deviceID, devices.baseFsSize) info, err := devices.registerDevice(deviceID, hash, devices.baseFsSize, devices.OpenTransactionID) if err != nil { _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) devices.markDeviceIDFree(deviceID) return nil, err } if err := devices.closeTransaction(); err != nil { devices.unregisterDevice(deviceID, hash) devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) devices.markDeviceIDFree(deviceID) return nil, err } return info, nil } func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo) error { deviceID, err := devices.getNextFreeDeviceID() if err != nil { return err } if err := devices.openTransaction(hash, deviceID); err != nil { logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) devices.markDeviceIDFree(deviceID) return err } for { if err := devicemapper.CreateSnapDevice(devices.getPoolDevName(), deviceID, baseInfo.Name(), baseInfo.DeviceID); err != nil { if devicemapper.DeviceIDExists(err) { // Device ID already exists. This should not // happen. Now we have a mechanism to find // a free device ID. So something is not right. // Give a warning and continue. logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) deviceID, err = devices.getNextFreeDeviceID() if err != nil { return err } // Save new device id into transaction devices.refreshTransaction(deviceID) continue } logrus.Debugf("devmapper: Error creating snap device: %s", err) devices.markDeviceIDFree(deviceID) return err } break } if _, err := devices.registerDevice(deviceID, hash, baseInfo.Size, devices.OpenTransactionID); err != nil { devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) devices.markDeviceIDFree(deviceID) logrus.Debugf("devmapper: Error registering device: %s", err) return err } if err := devices.closeTransaction(); err != nil { devices.unregisterDevice(deviceID, hash) devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) devices.markDeviceIDFree(deviceID) return err } return nil } func (devices *DeviceSet) loadMetadata(hash string) *devInfo { info := &devInfo{Hash: hash, devices: devices} jsonData, err := ioutil.ReadFile(devices.metadataFile(info)) if err != nil { return nil } if err := json.Unmarshal(jsonData, &info); err != nil { return nil } if info.DeviceID > maxDeviceID { logrus.Errorf("devmapper: Ignoring Invalid DeviceId=%d", info.DeviceID) return nil } return info } func getDeviceUUID(device string) (string, error) { out, err := exec.Command("blkid", "-s", "UUID", "-o", "value", device).Output() if err != nil { return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%v", device, err) } uuid := strings.TrimSuffix(string(out), "\n") uuid = strings.TrimSpace(uuid) logrus.Debugf("devmapper: UUID for device: %s is:%s", device, uuid) return uuid, nil } func (devices *DeviceSet) getBaseDeviceSize() uint64 { info, _ := devices.lookupDevice("") if info == nil { return 0 } return info.Size } func (devices *DeviceSet) getBaseDeviceFS() string { return devices.BaseDeviceFilesystem } func (devices *DeviceSet) verifyBaseDeviceUUIDFS(baseInfo *devInfo) error { devices.Lock() defer devices.Unlock() if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { return err } defer devices.deactivateDevice(baseInfo) uuid, err := getDeviceUUID(baseInfo.DevName()) if err != nil { return err } if devices.BaseDeviceUUID != uuid { return fmt.Errorf("devmapper: Current Base Device UUID:%s does not match with stored UUID:%s. Possibly using a different thin pool than last invocation", uuid, devices.BaseDeviceUUID) } if devices.BaseDeviceFilesystem == "" { fsType, err := ProbeFsType(baseInfo.DevName()) if err != nil { return err } if err := devices.saveBaseDeviceFilesystem(fsType); err != nil { return err } } // If user specified a filesystem using dm.fs option and current // file system of base image is not same, warn user that dm.fs // will be ignored. if devices.BaseDeviceFilesystem != devices.filesystem { logrus.Warnf("devmapper: Base device already exists and has filesystem %s on it. User specified filesystem %s will be ignored.", devices.BaseDeviceFilesystem, devices.filesystem) devices.filesystem = devices.BaseDeviceFilesystem } return nil } func (devices *DeviceSet) saveBaseDeviceFilesystem(fs string) error { devices.BaseDeviceFilesystem = fs return devices.saveDeviceSetMetaData() } func (devices *DeviceSet) saveBaseDeviceUUID(baseInfo *devInfo) error { devices.Lock() defer devices.Unlock() if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { return err } defer devices.deactivateDevice(baseInfo) uuid, err := getDeviceUUID(baseInfo.DevName()) if err != nil { return err } devices.BaseDeviceUUID = uuid return devices.saveDeviceSetMetaData() } func (devices *DeviceSet) createBaseImage() error { logrus.Debugf("devmapper: Initializing base device-mapper thin volume") // Create initial device info, err := devices.createRegisterDevice("") if err != nil { return err } logrus.Debugf("devmapper: Creating filesystem on base device-mapper thin volume") if err := devices.activateDeviceIfNeeded(info, false); err != nil { return err } if err := devices.createFilesystem(info); err != nil { return err } info.Initialized = true if err := devices.saveMetadata(info); err != nil { info.Initialized = false return err } if err := devices.saveBaseDeviceUUID(info); err != nil { return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) } return nil } // Returns if thin pool device exists or not. If device exists, also makes // sure it is a thin pool device and not some other type of device. func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) { logrus.Debugf("devmapper: Checking for existence of the pool %s", thinPoolDevice) info, err := devicemapper.GetInfo(thinPoolDevice) if err != nil { return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %v", thinPoolDevice, err) } // Device does not exist. if info.Exists == 0 { return false, nil } _, _, deviceType, _, err := devicemapper.GetStatus(thinPoolDevice) if err != nil { return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %v", thinPoolDevice, err) } if deviceType != "thin-pool" { return false, fmt.Errorf("devmapper: Device %s is not a thin pool", thinPoolDevice) } return true, nil } func (devices *DeviceSet) checkThinPool() error { _, transactionID, dataUsed, _, _, _, err := devices.poolStatus() if err != nil { return err } if dataUsed != 0 { return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) that already has used data blocks", devices.thinPoolDevice) } if transactionID != 0 { return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) with non-zero transaction ID", devices.thinPoolDevice) } return nil } // Base image is initialized properly. Either save UUID for first time (for // upgrade case or verify UUID. func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error { // If BaseDeviceUUID is nil (upgrade case), save it and return success. if devices.BaseDeviceUUID == "" { if err := devices.saveBaseDeviceUUID(baseInfo); err != nil { return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) } return nil } if err := devices.verifyBaseDeviceUUIDFS(baseInfo); err != nil { return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed.%v", err) } return nil } func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error { if !userBaseSize { return nil } if devices.baseFsSize < devices.getBaseDeviceSize() { return fmt.Errorf("devmapper: Base device size cannot be smaller than %s", units.HumanSize(float64(devices.getBaseDeviceSize()))) } if devices.baseFsSize == devices.getBaseDeviceSize() { return nil } info.lock.Lock() defer info.lock.Unlock() devices.Lock() defer devices.Unlock() info.Size = devices.baseFsSize if err := devices.saveMetadata(info); err != nil { // Try to remove unused device delete(devices.Devices, info.Hash) return err } return devices.growFS(info) } func (devices *DeviceSet) growFS(info *devInfo) error { if err := devices.activateDeviceIfNeeded(info, false); err != nil { return fmt.Errorf("Error activating devmapper device: %s", err) } defer devices.deactivateDevice(info) fsMountPoint := "/run/docker/mnt" if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) { if err := os.MkdirAll(fsMountPoint, 0700); err != nil { return err } defer os.RemoveAll(fsMountPoint) } options := "" if devices.BaseDeviceFilesystem == "xfs" { // XFS needs nouuid or it can't mount filesystems with the same fs options = joinMountOptions(options, "nouuid") } options = joinMountOptions(options, devices.mountOptions) if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil { return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), fsMountPoint, err) } defer syscall.Unmount(fsMountPoint, syscall.MNT_DETACH) switch devices.BaseDeviceFilesystem { case "ext4": if out, err := exec.Command("resize2fs", info.DevName()).CombinedOutput(); err != nil { return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) } case "xfs": if out, err := exec.Command("xfs_growfs", info.DevName()).CombinedOutput(); err != nil { return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) } default: return fmt.Errorf("Unsupported filesystem type %s", devices.BaseDeviceFilesystem) } return nil } func (devices *DeviceSet) setupBaseImage() error { oldInfo, _ := devices.lookupDeviceWithLock("") // base image already exists. If it is initialized properly, do UUID // verification and return. Otherwise remove image and set it up // fresh. if oldInfo != nil { if oldInfo.Initialized && !oldInfo.Deleted { if err := devices.setupVerifyBaseImageUUIDFS(oldInfo); err != nil { return err } if err := devices.checkGrowBaseDeviceFS(oldInfo); err != nil { return err } return nil } logrus.Debugf("devmapper: Removing uninitialized base image") // If previous base device is in deferred delete state, // that needs to be cleaned up first. So don't try // deferred deletion. if err := devices.DeleteDevice("", true); err != nil { return err } } // If we are setting up base image for the first time, make sure // thin pool is empty. if devices.thinPoolDevice != "" && oldInfo == nil { if err := devices.checkThinPool(); err != nil { return err } } // Create new base image device if err := devices.createBaseImage(); err != nil { return err } return nil } func setCloseOnExec(name string) { if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil { for _, i := range fileInfos { link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) if link == name { fd, err := strconv.Atoi(i.Name()) if err == nil { syscall.CloseOnExec(fd) } } } } } // DMLog implements logging using DevMapperLogger interface. func (devices *DeviceSet) DMLog(level int, file string, line int, dmError int, message string) { // By default libdm sends us all the messages including debug ones. // We need to filter out messages here and figure out which one // should be printed. if level > logLevel { return } // FIXME(vbatts) push this back into ./pkg/devicemapper/ if level <= devicemapper.LogLevelErr { logrus.Errorf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) } else if level <= devicemapper.LogLevelInfo { logrus.Infof("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) } else { // FIXME(vbatts) push this back into ./pkg/devicemapper/ logrus.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) } } func major(device uint64) uint64 { return (device >> 8) & 0xfff } func minor(device uint64) uint64 { return (device & 0xff) | ((device >> 12) & 0xfff00) } // ResizePool increases the size of the pool. func (devices *DeviceSet) ResizePool(size int64) error { dirname := devices.loopbackDir() datafilename := path.Join(dirname, "data") if len(devices.dataDevice) > 0 { datafilename = devices.dataDevice } metadatafilename := path.Join(dirname, "metadata") if len(devices.metadataDevice) > 0 { metadatafilename = devices.metadataDevice } datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0) if datafile == nil { return err } defer datafile.Close() fi, err := datafile.Stat() if fi == nil { return err } if fi.Size() > size { return fmt.Errorf("devmapper: Can't shrink file") } dataloopback := loopback.FindLoopDeviceFor(datafile) if dataloopback == nil { return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", datafilename) } defer dataloopback.Close() metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0) if metadatafile == nil { return err } defer metadatafile.Close() metadataloopback := loopback.FindLoopDeviceFor(metadatafile) if metadataloopback == nil { return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", metadatafilename) } defer metadataloopback.Close() // Grow loopback file if err := datafile.Truncate(size); err != nil { return fmt.Errorf("devmapper: Unable to grow loopback file: %s", err) } // Reload size for loopback device if err := loopback.SetCapacity(dataloopback); err != nil { return fmt.Errorf("Unable to update loopback capacity: %s", err) } // Suspend the pool if err := devicemapper.SuspendDevice(devices.getPoolName()); err != nil { return fmt.Errorf("devmapper: Unable to suspend pool: %s", err) } // Reload with the new block sizes if err := devicemapper.ReloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { return fmt.Errorf("devmapper: Unable to reload pool: %s", err) } // Resume the pool if err := devicemapper.ResumeDevice(devices.getPoolName()); err != nil { return fmt.Errorf("devmapper: Unable to resume pool: %s", err) } return nil } func (devices *DeviceSet) loadTransactionMetaData() error { jsonData, err := ioutil.ReadFile(devices.transactionMetaFile()) if err != nil { // There is no active transaction. This will be the case // during upgrade. if os.IsNotExist(err) { devices.OpenTransactionID = devices.TransactionID return nil } return err } json.Unmarshal(jsonData, &devices.transaction) return nil } func (devices *DeviceSet) saveTransactionMetaData() error { jsonData, err := json.Marshal(&devices.transaction) if err != nil { return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) } return devices.writeMetaFile(jsonData, devices.transactionMetaFile()) } func (devices *DeviceSet) removeTransactionMetaData() error { if err := os.RemoveAll(devices.transactionMetaFile()); err != nil { return err } return nil } func (devices *DeviceSet) rollbackTransaction() error { logrus.Debugf("devmapper: Rolling back open transaction: TransactionID=%d hash=%s device_id=%d", devices.OpenTransactionID, devices.DeviceIDHash, devices.DeviceID) // A device id might have already been deleted before transaction // closed. In that case this call will fail. Just leave a message // in case of failure. if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceID); err != nil { logrus.Errorf("devmapper: Unable to delete device: %s", err) } dinfo := &devInfo{Hash: devices.DeviceIDHash} if err := devices.removeMetadata(dinfo); err != nil { logrus.Errorf("devmapper: Unable to remove metadata: %s", err) } else { devices.markDeviceIDFree(devices.DeviceID) } if err := devices.removeTransactionMetaData(); err != nil { logrus.Errorf("devmapper: Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err) } return nil } func (devices *DeviceSet) processPendingTransaction() error { if err := devices.loadTransactionMetaData(); err != nil { return err } // If there was open transaction but pool transaction ID is same // as open transaction ID, nothing to roll back. if devices.TransactionID == devices.OpenTransactionID { return nil } // If open transaction ID is less than pool transaction ID, something // is wrong. Bail out. if devices.OpenTransactionID < devices.TransactionID { logrus.Errorf("devmapper: Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionID, devices.TransactionID) return nil } // Pool transaction ID is not same as open transaction. There is // a transaction which was not completed. if err := devices.rollbackTransaction(); err != nil { return fmt.Errorf("devmapper: Rolling back open transaction failed: %s", err) } devices.OpenTransactionID = devices.TransactionID return nil } func (devices *DeviceSet) loadDeviceSetMetaData() error { jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile()) if err != nil { // For backward compatibility return success if file does // not exist. if os.IsNotExist(err) { return nil } return err } return json.Unmarshal(jsonData, devices) } func (devices *DeviceSet) saveDeviceSetMetaData() error { jsonData, err := json.Marshal(devices) if err != nil { return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) } return devices.writeMetaFile(jsonData, devices.deviceSetMetaFile()) } func (devices *DeviceSet) openTransaction(hash string, DeviceID int) error { devices.allocateTransactionID() devices.DeviceIDHash = hash devices.DeviceID = DeviceID if err := devices.saveTransactionMetaData(); err != nil { return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) } return nil } func (devices *DeviceSet) refreshTransaction(DeviceID int) error { devices.DeviceID = DeviceID if err := devices.saveTransactionMetaData(); err != nil { return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) } return nil } func (devices *DeviceSet) closeTransaction() error { if err := devices.updatePoolTransactionID(); err != nil { logrus.Debugf("devmapper: Failed to close Transaction") return err } return nil } func determineDriverCapabilities(version string) error { /* * Driver version 4.27.0 and greater support deferred activation * feature. */ logrus.Debugf("devicemapper: driver version is %s", version) versionSplit := strings.Split(version, ".") major, err := strconv.Atoi(versionSplit[0]) if err != nil { return graphdriver.ErrNotSupported } if major > 4 { driverDeferredRemovalSupport = true return nil } if major < 4 { return nil } minor, err := strconv.Atoi(versionSplit[1]) if err != nil { return graphdriver.ErrNotSupported } /* * If major is 4 and minor is 27, then there is no need to * check for patch level as it can not be less than 0. */ if minor >= 27 { driverDeferredRemovalSupport = true return nil } return nil } // Determine the major and minor number of loopback device func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) { stat, err := file.Stat() if err != nil { return 0, 0, err } dev := stat.Sys().(*syscall.Stat_t).Rdev majorNum := major(dev) minorNum := minor(dev) logrus.Debugf("devmapper: Major:Minor for device: %s is:%v:%v", file.Name(), majorNum, minorNum) return majorNum, minorNum, nil } // Given a file which is backing file of a loop back device, find the // loopback device name and its major/minor number. func getLoopFileDeviceMajMin(filename string) (string, uint64, uint64, error) { file, err := os.Open(filename) if err != nil { logrus.Debugf("devmapper: Failed to open file %s", filename) return "", 0, 0, err } defer file.Close() loopbackDevice := loopback.FindLoopDeviceFor(file) if loopbackDevice == nil { return "", 0, 0, fmt.Errorf("devmapper: Unable to find loopback mount for: %s", filename) } defer loopbackDevice.Close() Major, Minor, err := getDeviceMajorMinor(loopbackDevice) if err != nil { return "", 0, 0, err } return loopbackDevice.Name(), Major, Minor, nil } // Get the major/minor numbers of thin pool data and metadata devices func (devices *DeviceSet) getThinPoolDataMetaMajMin() (uint64, uint64, uint64, uint64, error) { var params, poolDataMajMin, poolMetadataMajMin string _, _, _, params, err := devicemapper.GetTable(devices.getPoolName()) if err != nil { return 0, 0, 0, 0, err } if _, err = fmt.Sscanf(params, "%s %s", &poolMetadataMajMin, &poolDataMajMin); err != nil { return 0, 0, 0, 0, err } logrus.Debugf("devmapper: poolDataMajMin=%s poolMetaMajMin=%s\n", poolDataMajMin, poolMetadataMajMin) poolDataMajMinorSplit := strings.Split(poolDataMajMin, ":") poolDataMajor, err := strconv.ParseUint(poolDataMajMinorSplit[0], 10, 32) if err != nil { return 0, 0, 0, 0, err } poolDataMinor, err := strconv.ParseUint(poolDataMajMinorSplit[1], 10, 32) if err != nil { return 0, 0, 0, 0, err } poolMetadataMajMinorSplit := strings.Split(poolMetadataMajMin, ":") poolMetadataMajor, err := strconv.ParseUint(poolMetadataMajMinorSplit[0], 10, 32) if err != nil { return 0, 0, 0, 0, err } poolMetadataMinor, err := strconv.ParseUint(poolMetadataMajMinorSplit[1], 10, 32) if err != nil { return 0, 0, 0, 0, err } return poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, nil } func (devices *DeviceSet) loadThinPoolLoopBackInfo() error { poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, err := devices.getThinPoolDataMetaMajMin() if err != nil { return err } dirname := devices.loopbackDir() // data device has not been passed in. So there should be a data file // which is being mounted as loop device. if devices.dataDevice == "" { datafilename := path.Join(dirname, "data") dataLoopDevice, dataMajor, dataMinor, err := getLoopFileDeviceMajMin(datafilename) if err != nil { return err } // Compare the two if poolDataMajor == dataMajor && poolDataMinor == dataMinor { devices.dataDevice = dataLoopDevice devices.dataLoopFile = datafilename } } // metadata device has not been passed in. So there should be a // metadata file which is being mounted as loop device. if devices.metadataDevice == "" { metadatafilename := path.Join(dirname, "metadata") metadataLoopDevice, metadataMajor, metadataMinor, err := getLoopFileDeviceMajMin(metadatafilename) if err != nil { return err } if poolMetadataMajor == metadataMajor && poolMetadataMinor == metadataMinor { devices.metadataDevice = metadataLoopDevice devices.metadataLoopFile = metadatafilename } } return nil } func (devices *DeviceSet) initDevmapper(doInit bool) error { // give ourselves to libdm as a log handler devicemapper.LogInit(devices) version, err := devicemapper.GetDriverVersion() if err != nil { // Can't even get driver version, assume not supported return graphdriver.ErrNotSupported } if err := determineDriverCapabilities(version); err != nil { return graphdriver.ErrNotSupported } // If user asked for deferred removal then check both libdm library // and kernel driver support deferred removal otherwise error out. if enableDeferredRemoval { if !driverDeferredRemovalSupport { return fmt.Errorf("devmapper: Deferred removal can not be enabled as kernel does not support it") } if !devicemapper.LibraryDeferredRemovalSupport { return fmt.Errorf("devmapper: Deferred removal can not be enabled as libdm does not support it") } logrus.Debugf("devmapper: Deferred removal support enabled.") devices.deferredRemove = true } if enableDeferredDeletion { if !devices.deferredRemove { return fmt.Errorf("devmapper: Deferred deletion can not be enabled as deferred removal is not enabled. Enable deferred removal using --storage-opt dm.use_deferred_removal=true parameter") } logrus.Debugf("devmapper: Deferred deletion support enabled.") devices.deferredDelete = true } // https://github.com/docker/docker/issues/4036 if supported := devicemapper.UdevSetSyncSupport(true); !supported { logrus.Warn("devmapper: Udev sync is not supported. This will lead to unexpected behavior, data loss and errors. For more information, see https://docs.docker.com/reference/commandline/daemon/#daemon-storage-driver-option") } //create the root dir of the devmapper driver ownership to match this //daemon's remapped root uid/gid so containers can start properly uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) if err != nil { return err } if err := idtools.MkdirAs(devices.root, 0700, uid, gid); err != nil && !os.IsExist(err) { return err } if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) { return err } // Set the device prefix from the device id and inode of the docker root dir st, err := os.Stat(devices.root) if err != nil { return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err) } sysSt := st.Sys().(*syscall.Stat_t) // "reg-" stands for "regular file". // In the future we might use "dev-" for "device file", etc. // docker-maj,min[-inode] stands for: // - Managed by docker // - The target of this device is at major and minor // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino) logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix) // Check for the existence of the thin-pool device poolExists, err := devices.thinPoolExists(devices.getPoolName()) if err != nil { return err } // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files // that are not Close-on-exec, // so we add this badhack to make sure it closes itself setCloseOnExec("/dev/mapper/control") // Make sure the sparse images exist in /devicemapper/data and // /devicemapper/metadata createdLoopback := false // If the pool doesn't exist, create it if !poolExists && devices.thinPoolDevice == "" { logrus.Debugf("devmapper: Pool doesn't exist. Creating it.") var ( dataFile *os.File metadataFile *os.File ) if devices.dataDevice == "" { // Make sure the sparse images exist in /devicemapper/data hasData := devices.hasImage("data") if !doInit && !hasData { return errors.New("Loopback data file not found") } if !hasData { createdLoopback = true } data, err := devices.ensureImage("data", devices.dataLoopbackSize) if err != nil { logrus.Debugf("devmapper: Error device ensureImage (data): %s", err) return err } dataFile, err = loopback.AttachLoopDevice(data) if err != nil { return err } devices.dataLoopFile = data devices.dataDevice = dataFile.Name() } else { dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600) if err != nil { return err } } defer dataFile.Close() if devices.metadataDevice == "" { // Make sure the sparse images exist in /devicemapper/metadata hasMetadata := devices.hasImage("metadata") if !doInit && !hasMetadata { return errors.New("Loopback metadata file not found") } if !hasMetadata { createdLoopback = true } metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) if err != nil { logrus.Debugf("devmapper: Error device ensureImage (metadata): %s", err) return err } metadataFile, err = loopback.AttachLoopDevice(metadata) if err != nil { return err } devices.metadataLoopFile = metadata devices.metadataDevice = metadataFile.Name() } else { metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600) if err != nil { return err } } defer metadataFile.Close() if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { return err } } // Pool already exists and caller did not pass us a pool. That means // we probably created pool earlier and could not remove it as some // containers were still using it. Detect some of the properties of // pool, like is it using loop devices. if poolExists && devices.thinPoolDevice == "" { if err := devices.loadThinPoolLoopBackInfo(); err != nil { logrus.Debugf("devmapper: Failed to load thin pool loopback device information:%v", err) return err } } // If we didn't just create the data or metadata image, we need to // load the transaction id and migrate old metadata if !createdLoopback { if err := devices.initMetaData(); err != nil { return err } } if devices.thinPoolDevice == "" { if devices.metadataLoopFile != "" || devices.dataLoopFile != "" { logrus.Warnf("devmapper: Usage of loopback devices is strongly discouraged for production use. Please use `--storage-opt dm.thinpooldev` or use `man docker` to refer to dm.thinpooldev section.") } } // Right now this loads only NextDeviceID. If there is more metadata // down the line, we might have to move it earlier. if err := devices.loadDeviceSetMetaData(); err != nil { return err } // Setup the base image if doInit { if err := devices.setupBaseImage(); err != nil { logrus.Debugf("devmapper: Error device setupBaseImage: %s", err) return err } } return nil } // AddDevice adds a device and registers in the hash. func (devices *DeviceSet) AddDevice(hash, baseHash string) error { logrus.Debugf("devmapper: AddDevice(hash=%s basehash=%s)", hash, baseHash) defer logrus.Debugf("devmapper: AddDevice(hash=%s basehash=%s) END", hash, baseHash) // If a deleted device exists, return error. baseInfo, err := devices.lookupDeviceWithLock(baseHash) if err != nil { return err } if baseInfo.Deleted { return fmt.Errorf("devmapper: Base device %v has been marked for deferred deletion", baseInfo.Hash) } baseInfo.lock.Lock() defer baseInfo.lock.Unlock() devices.Lock() defer devices.Unlock() // Also include deleted devices in case hash of new device is // same as one of the deleted devices. if info, _ := devices.lookupDevice(hash); info != nil { return fmt.Errorf("devmapper: device %s already exists. Deleted=%v", hash, info.Deleted) } if err := devices.createRegisterSnapDevice(hash, baseInfo); err != nil { return err } return nil } func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error { // If device is already in deleted state, there is nothing to be done. if info.Deleted { return nil } logrus.Debugf("devmapper: Marking device %s for deferred deletion.", info.Hash) info.Deleted = true // save device metadata to reflect deleted state. if err := devices.saveMetadata(info); err != nil { info.Deleted = false return err } devices.nrDeletedDevices++ return nil } // Should be called with devices.Lock() held. func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) error { if err := devices.openTransaction(info.Hash, info.DeviceID); err != nil { logrus.Debugf("devmapper: Error opening transaction hash = %s deviceId = %d", "", info.DeviceID) return err } defer devices.closeTransaction() err := devicemapper.DeleteDevice(devices.getPoolDevName(), info.DeviceID) if err != nil { // If syncDelete is true, we want to return error. If deferred // deletion is not enabled, we return an error. If error is // something other then EBUSY, return an error. if syncDelete || !devices.deferredDelete || err != devicemapper.ErrBusy { logrus.Debugf("devmapper: Error deleting device: %s", err) return err } } if err == nil { if err := devices.unregisterDevice(info.DeviceID, info.Hash); err != nil { return err } // If device was already in deferred delete state that means // deletion was being tried again later. Reduce the deleted // device count. if info.Deleted { devices.nrDeletedDevices-- } devices.markDeviceIDFree(info.DeviceID) } else { if err := devices.markForDeferredDeletion(info); err != nil { return err } } return nil } // Issue discard only if device open count is zero. func (devices *DeviceSet) issueDiscard(info *devInfo) error { logrus.Debugf("devmapper: issueDiscard(device: %s). START", info.Hash) defer logrus.Debugf("devmapper: issueDiscard(device: %s). END", info.Hash) // This is a workaround for the kernel not discarding block so // on the thin pool when we remove a thinp device, so we do it // manually. // Even if device is deferred deleted, activate it and issue // discards. if err := devices.activateDeviceIfNeeded(info, true); err != nil { return err } devinfo, err := devicemapper.GetInfo(info.Name()) if err != nil { return err } if devinfo.OpenCount != 0 { logrus.Debugf("devmapper: Device: %s is in use. OpenCount=%d. Not issuing discards.", info.Hash, devinfo.OpenCount) return nil } if err := devicemapper.BlockDeviceDiscard(info.DevName()); err != nil { logrus.Debugf("devmapper: Error discarding block on device: %s (ignoring)", err) } return nil } // Should be called with devices.Lock() held. func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error { if devices.doBlkDiscard { devices.issueDiscard(info) } // Try to deactivate device in case it is active. if err := devices.deactivateDevice(info); err != nil { logrus.Debugf("devmapper: Error deactivating device: %s", err) return err } if err := devices.deleteTransaction(info, syncDelete); err != nil { return err } return nil } // DeleteDevice will return success if device has been marked for deferred // removal. If one wants to override that and want DeleteDevice() to fail if // device was busy and could not be deleted, set syncDelete=true. func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error { logrus.Debugf("devmapper: DeleteDevice(hash=%v syncDelete=%v) START", hash, syncDelete) defer logrus.Debugf("devmapper: DeleteDevice(hash=%v syncDelete=%v) END", hash, syncDelete) info, err := devices.lookupDeviceWithLock(hash) if err != nil { return err } info.lock.Lock() defer info.lock.Unlock() devices.Lock() defer devices.Unlock() // If mountcount is not zero, that means devices is still in use // or has not been Put() properly. Fail device deletion. if info.mountCount != 0 { return fmt.Errorf("devmapper: Can't delete device %v as it is still mounted. mntCount=%v", info.Hash, info.mountCount) } return devices.deleteDevice(info, syncDelete) } func (devices *DeviceSet) deactivatePool() error { logrus.Debugf("devmapper: deactivatePool()") defer logrus.Debugf("devmapper: deactivatePool END") devname := devices.getPoolDevName() devinfo, err := devicemapper.GetInfo(devname) if err != nil { return err } if devinfo.Exists == 0 { return nil } if err := devicemapper.RemoveDevice(devname); err != nil { return err } if d, err := devicemapper.GetDeps(devname); err == nil { logrus.Warnf("devmapper: device %s still has %d active dependents", devname, d.Count) } return nil } func (devices *DeviceSet) deactivateDevice(info *devInfo) error { logrus.Debugf("devmapper: deactivateDevice(%s)", info.Hash) defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash) devinfo, err := devicemapper.GetInfo(info.Name()) if err != nil { return err } if devinfo.Exists == 0 { return nil } if devices.deferredRemove { if err := devicemapper.RemoveDeviceDeferred(info.Name()); err != nil { return err } } else { if err := devices.removeDevice(info.Name()); err != nil { return err } } return nil } // Issues the underlying dm remove operation. func (devices *DeviceSet) removeDevice(devname string) error { var err error logrus.Debugf("devmapper: removeDevice START(%s)", devname) defer logrus.Debugf("devmapper: removeDevice END(%s)", devname) for i := 0; i < 200; i++ { err = devicemapper.RemoveDevice(devname) if err == nil { break } if err != devicemapper.ErrBusy { return err } // If we see EBUSY it may be a transient error, // sleep a bit a retry a few times. devices.Unlock() time.Sleep(100 * time.Millisecond) devices.Lock() } return err } func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { if !devices.deferredRemove { return nil } logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name()) defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name()) devinfo, err := devicemapper.GetInfoWithDeferred(info.Name()) if devinfo != nil && devinfo.DeferredRemove == 0 { return nil } // Cancel deferred remove for i := 0; i < 100; i++ { err = devicemapper.CancelDeferredRemove(info.Name()) if err == nil { break } if err == devicemapper.ErrEnxio { // Device is probably already gone. Return success. return nil } if err != devicemapper.ErrBusy { return err } // If we see EBUSY it may be a transient error, // sleep a bit a retry a few times. devices.Unlock() time.Sleep(100 * time.Millisecond) devices.Lock() } return err } // Shutdown shuts down the device by unmounting the root. func (devices *DeviceSet) Shutdown() error { logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix) logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root) defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix) var devs []*devInfo // Stop deletion worker. This should start delivering new events to // ticker channel. That means no new instance of cleanupDeletedDevice() // will run after this call. If one instance is already running at // the time of the call, it must be holding devices.Lock() and // we will block on this lock till cleanup function exits. devices.deletionWorkerTicker.Stop() devices.Lock() // Save DeviceSet Metadata first. Docker kills all threads if they // don't finish in certain time. It is possible that Shutdown() // routine does not finish in time as we loop trying to deactivate // some devices while these are busy. In that case shutdown() routine // will be killed and we will not get a chance to save deviceset // metadata. Hence save this early before trying to deactivate devices. devices.saveDeviceSetMetaData() for _, info := range devices.Devices { devs = append(devs, info) } devices.Unlock() for _, info := range devs { info.lock.Lock() if info.mountCount > 0 { // We use MNT_DETACH here in case it is still busy in some running // container. This means it'll go away from the global scope directly, // and the device will be released when that container dies. if err := syscall.Unmount(info.mountPath, syscall.MNT_DETACH); err != nil { logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", info.mountPath, err) } devices.Lock() if err := devices.deactivateDevice(info); err != nil { logrus.Debugf("devmapper: Shutdown deactivate %s , error: %s", info.Hash, err) } devices.Unlock() } info.lock.Unlock() } info, _ := devices.lookupDeviceWithLock("") if info != nil { info.lock.Lock() devices.Lock() if err := devices.deactivateDevice(info); err != nil { logrus.Debugf("devmapper: Shutdown deactivate base , error: %s", err) } devices.Unlock() info.lock.Unlock() } devices.Lock() if devices.thinPoolDevice == "" { if err := devices.deactivatePool(); err != nil { logrus.Debugf("devmapper: Shutdown deactivate pool , error: %s", err) } } devices.Unlock() return nil } // MountDevice mounts the device if not already mounted. func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { info, err := devices.lookupDeviceWithLock(hash) if err != nil { return err } if info.Deleted { return fmt.Errorf("devmapper: Can't mount device %v as it has been marked for deferred deletion", info.Hash) } info.lock.Lock() defer info.lock.Unlock() devices.Lock() defer devices.Unlock() if info.mountCount > 0 { if path != info.mountPath { return fmt.Errorf("devmapper: Trying to mount devmapper device in multiple places (%s, %s)", info.mountPath, path) } info.mountCount++ return nil } if err := devices.activateDeviceIfNeeded(info, false); err != nil { return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) } fstype, err := ProbeFsType(info.DevName()) if err != nil { return err } options := "" if fstype == "xfs" { // XFS needs nouuid or it can't mount filesystems with the same fs options = joinMountOptions(options, "nouuid") } options = joinMountOptions(options, devices.mountOptions) options = joinMountOptions(options, label.FormatMountLabel("", mountLabel)) if err := mount.Mount(info.DevName(), path, fstype, options); err != nil { return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s", info.DevName(), path, err) } info.mountCount = 1 info.mountPath = path return nil } // UnmountDevice unmounts the device and removes it from hash. func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error { logrus.Debugf("devmapper: UnmountDevice(hash=%s)", hash) defer logrus.Debugf("devmapper: UnmountDevice(hash=%s) END", hash) info, err := devices.lookupDeviceWithLock(hash) if err != nil { return err } info.lock.Lock() defer info.lock.Unlock() devices.Lock() defer devices.Unlock() // If there are running containers when daemon crashes, during daemon // restarting, it will kill running containers and will finally call // Put() without calling Get(). So info.MountCount may become negative. // if info.mountCount goes negative, we do the unmount and assign // it to 0. info.mountCount-- if info.mountCount > 0 { return nil } else if info.mountCount < 0 { logrus.Warnf("devmapper: Mount count of device went negative. Put() called without matching Get(). Resetting count to 0") info.mountCount = 0 } logrus.Debugf("devmapper: Unmount(%s)", mountPath) if err := syscall.Unmount(mountPath, syscall.MNT_DETACH); err != nil { return err } logrus.Debugf("devmapper: Unmount done") if err := devices.deactivateDevice(info); err != nil { return err } info.mountPath = "" return nil } // HasDevice returns true if the device metadata exists. func (devices *DeviceSet) HasDevice(hash string) bool { info, _ := devices.lookupDeviceWithLock(hash) return info != nil } // List returns a list of device ids. func (devices *DeviceSet) List() []string { devices.Lock() defer devices.Unlock() ids := make([]string, len(devices.Devices)) i := 0 for k := range devices.Devices { ids[i] = k i++ } return ids } func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { var params string _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName) if err != nil { return } if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil { return } return } // GetDeviceStatus provides size, mapped sectors func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { info, err := devices.lookupDeviceWithLock(hash) if err != nil { return nil, err } info.lock.Lock() defer info.lock.Unlock() devices.Lock() defer devices.Unlock() status := &DevStatus{ DeviceID: info.DeviceID, Size: info.Size, TransactionID: info.TransactionID, } if err := devices.activateDeviceIfNeeded(info, false); err != nil { return nil, fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) } sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()) if err != nil { return nil, err } status.SizeInSectors = sizeInSectors status.MappedSectors = mappedSectors status.HighestMappedSector = highestMappedSector return status, nil } func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionID, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { var params string if _, totalSizeInSectors, _, params, err = devicemapper.GetStatus(devices.getPoolName()); err == nil { _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionID, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) } return } // DataDevicePath returns the path to the data storage for this deviceset, // regardless of loopback or block device func (devices *DeviceSet) DataDevicePath() string { return devices.dataDevice } // MetadataDevicePath returns the path to the metadata storage for this deviceset, // regardless of loopback or block device func (devices *DeviceSet) MetadataDevicePath() string { return devices.metadataDevice } func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) { buf := new(syscall.Statfs_t) if err := syscall.Statfs(loopFile, buf); err != nil { logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err) return 0, err } return buf.Bfree * uint64(buf.Bsize), nil } func (devices *DeviceSet) isRealFile(loopFile string) (bool, error) { if loopFile != "" { fi, err := os.Stat(loopFile) if err != nil { logrus.Warnf("devmapper: Couldn't stat loopfile %v: %v", loopFile, err) return false, err } return fi.Mode().IsRegular(), nil } return false, nil } // Status returns the current status of this deviceset func (devices *DeviceSet) Status() *Status { devices.Lock() defer devices.Unlock() status := &Status{} status.PoolName = devices.getPoolName() status.DataFile = devices.DataDevicePath() status.DataLoopback = devices.dataLoopFile status.MetadataFile = devices.MetadataDevicePath() status.MetadataLoopback = devices.metadataLoopFile status.UdevSyncSupported = devicemapper.UdevSyncSupported() status.DeferredRemoveEnabled = devices.deferredRemove status.DeferredDeleteEnabled = devices.deferredDelete status.DeferredDeletedDeviceCount = devices.nrDeletedDevices status.BaseDeviceSize = devices.getBaseDeviceSize() status.BaseDeviceFS = devices.getBaseDeviceFS() totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() if err == nil { // Convert from blocks to bytes blockSizeInSectors := totalSizeInSectors / dataTotal status.Data.Used = dataUsed * blockSizeInSectors * 512 status.Data.Total = dataTotal * blockSizeInSectors * 512 status.Data.Available = status.Data.Total - status.Data.Used // metadata blocks are always 4k status.Metadata.Used = metadataUsed * 4096 status.Metadata.Total = metadataTotal * 4096 status.Metadata.Available = status.Metadata.Total - status.Metadata.Used status.SectorSize = blockSizeInSectors * 512 if check, _ := devices.isRealFile(devices.dataLoopFile); check { actualSpace, err := devices.getUnderlyingAvailableSpace(devices.dataLoopFile) if err == nil && actualSpace < status.Data.Available { status.Data.Available = actualSpace } } if check, _ := devices.isRealFile(devices.metadataLoopFile); check { actualSpace, err := devices.getUnderlyingAvailableSpace(devices.metadataLoopFile) if err == nil && actualSpace < status.Metadata.Available { status.Metadata.Available = actualSpace } } } return status } // Status returns the current status of this deviceset func (devices *DeviceSet) exportDeviceMetadata(hash string) (*deviceMetadata, error) { info, err := devices.lookupDeviceWithLock(hash) if err != nil { return nil, err } info.lock.Lock() defer info.lock.Unlock() metadata := &deviceMetadata{info.DeviceID, info.Size, info.Name()} return metadata, nil } // NewDeviceSet creates the device set based on the options provided. func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps []idtools.IDMap) (*DeviceSet, error) { devicemapper.SetDevDir("/dev") devices := &DeviceSet{ root: root, metaData: metaData{Devices: make(map[string]*devInfo)}, dataLoopbackSize: defaultDataLoopbackSize, metaDataLoopbackSize: defaultMetaDataLoopbackSize, baseFsSize: defaultBaseFsSize, overrideUdevSyncCheck: defaultUdevSyncOverride, doBlkDiscard: true, thinpBlockSize: defaultThinpBlockSize, deviceIDMap: make([]byte, deviceIDMapSz), deletionWorkerTicker: time.NewTicker(time.Second * 30), uidMaps: uidMaps, gidMaps: gidMaps, } foundBlkDiscard := false for _, option := range options { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err } key = strings.ToLower(key) switch key { case "dm.basesize": size, err := units.RAMInBytes(val) if err != nil { return nil, err } userBaseSize = true devices.baseFsSize = uint64(size) case "dm.loopdatasize": size, err := units.RAMInBytes(val) if err != nil { return nil, err } devices.dataLoopbackSize = size case "dm.loopmetadatasize": size, err := units.RAMInBytes(val) if err != nil { return nil, err } devices.metaDataLoopbackSize = size case "dm.fs": if val != "ext4" && val != "xfs" { return nil, fmt.Errorf("devmapper: Unsupported filesystem %s\n", val) } devices.filesystem = val case "dm.mkfsarg": devices.mkfsArgs = append(devices.mkfsArgs, val) case "dm.mountopt": devices.mountOptions = joinMountOptions(devices.mountOptions, val) case "dm.metadatadev": devices.metadataDevice = val case "dm.datadev": devices.dataDevice = val case "dm.thinpooldev": devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/") case "dm.blkdiscard": foundBlkDiscard = true devices.doBlkDiscard, err = strconv.ParseBool(val) if err != nil { return nil, err } case "dm.blocksize": size, err := units.RAMInBytes(val) if err != nil { return nil, err } // convert to 512b sectors devices.thinpBlockSize = uint32(size) >> 9 case "dm.override_udev_sync_check": devices.overrideUdevSyncCheck, err = strconv.ParseBool(val) if err != nil { return nil, err } case "dm.use_deferred_removal": enableDeferredRemoval, err = strconv.ParseBool(val) if err != nil { return nil, err } case "dm.use_deferred_deletion": enableDeferredDeletion, err = strconv.ParseBool(val) if err != nil { return nil, err } default: return nil, fmt.Errorf("devmapper: Unknown option %s\n", key) } } // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { devices.doBlkDiscard = false } if err := devices.initDevmapper(doInit); err != nil { return nil, err } return devices, nil } docker-1.10.3/daemon/graphdriver/devmapper/devmapper_doc.go000066400000000000000000000053451267010174400237560ustar00rootroot00000000000000package devmapper // Definition of struct dm_task and sub structures (from lvm2) // // struct dm_ioctl { // /* // * The version number is made up of three parts: // * major - no backward or forward compatibility, // * minor - only backwards compatible, // * patch - both backwards and forwards compatible. // * // * All clients of the ioctl interface should fill in the // * version number of the interface that they were // * compiled with. // * // * All recognized ioctl commands (ie. those that don't // * return -ENOTTY) fill out this field, even if the // * command failed. // */ // uint32_t version[3]; /* in/out */ // uint32_t data_size; /* total size of data passed in // * including this struct */ // uint32_t data_start; /* offset to start of data // * relative to start of this struct */ // uint32_t target_count; /* in/out */ // int32_t open_count; /* out */ // uint32_t flags; /* in/out */ // /* // * event_nr holds either the event number (input and output) or the // * udev cookie value (input only). // * The DM_DEV_WAIT ioctl takes an event number as input. // * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls // * use the field as a cookie to return in the DM_COOKIE // * variable with the uevents they issue. // * For output, the ioctls return the event number, not the cookie. // */ // uint32_t event_nr; /* in/out */ // uint32_t padding; // uint64_t dev; /* in/out */ // char name[DM_NAME_LEN]; /* device name */ // char uuid[DM_UUID_LEN]; /* unique identifier for // * the block device */ // char data[7]; /* padding or data */ // }; // struct target { // uint64_t start; // uint64_t length; // char *type; // char *params; // struct target *next; // }; // typedef enum { // DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ // DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ // } dm_add_node_t; // struct dm_task { // int type; // char *dev_name; // char *mangled_dev_name; // struct target *head, *tail; // int read_only; // uint32_t event_nr; // int major; // int minor; // int allow_default_major_fallback; // uid_t uid; // gid_t gid; // mode_t mode; // uint32_t read_ahead; // uint32_t read_ahead_flags; // union { // struct dm_ioctl *v4; // } dmi; // char *newname; // char *message; // char *geometry; // uint64_t sector; // int no_flush; // int no_open_count; // int skip_lockfs; // int query_inactive_table; // int suppress_identical_reload; // dm_add_node_t add_node; // uint64_t existing_table_size; // int cookie_set; // int new_uuid; // int secure_data; // int retry_remove; // int enable_checks; // int expected_errno; // char *uuid; // char *mangled_uuid; // }; // docker-1.10.3/daemon/graphdriver/devmapper/devmapper_test.go000066400000000000000000000070641267010174400241700ustar00rootroot00000000000000// +build linux package devmapper import ( "fmt" "testing" "time" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/graphtest" ) func init() { // Reduce the size the the base fs and loopback for the tests defaultDataLoopbackSize = 300 * 1024 * 1024 defaultMetaDataLoopbackSize = 200 * 1024 * 1024 defaultBaseFsSize = 300 * 1024 * 1024 defaultUdevSyncOverride = true if err := graphtest.InitLoopbacks(); err != nil { panic(err) } } // This avoids creating a new driver for each test if all tests are run // Make sure to put new tests between TestDevmapperSetup and TestDevmapperTeardown func TestDevmapperSetup(t *testing.T) { graphtest.GetDriver(t, "devicemapper") } func TestDevmapperCreateEmpty(t *testing.T) { graphtest.DriverTestCreateEmpty(t, "devicemapper") } func TestDevmapperCreateBase(t *testing.T) { graphtest.DriverTestCreateBase(t, "devicemapper") } func TestDevmapperCreateSnap(t *testing.T) { graphtest.DriverTestCreateSnap(t, "devicemapper") } func TestDevmapperTeardown(t *testing.T) { graphtest.PutDriver(t) } func TestDevmapperReduceLoopBackSize(t *testing.T) { tenMB := int64(10 * 1024 * 1024) testChangeLoopBackSize(t, -tenMB, defaultDataLoopbackSize, defaultMetaDataLoopbackSize) } func TestDevmapperIncreaseLoopBackSize(t *testing.T) { tenMB := int64(10 * 1024 * 1024) testChangeLoopBackSize(t, tenMB, defaultDataLoopbackSize+tenMB, defaultMetaDataLoopbackSize+tenMB) } func testChangeLoopBackSize(t *testing.T, delta, expectDataSize, expectMetaDataSize int64) { driver := graphtest.GetDriver(t, "devicemapper").(*graphtest.Driver).Driver.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver) defer graphtest.PutDriver(t) // make sure data or metadata loopback size are the default size if s := driver.DeviceSet.Status(); s.Data.Total != uint64(defaultDataLoopbackSize) || s.Metadata.Total != uint64(defaultMetaDataLoopbackSize) { t.Fatalf("data or metadata loop back size is incorrect") } if err := driver.Cleanup(); err != nil { t.Fatal(err) } //Reload d, err := Init(driver.home, []string{ fmt.Sprintf("dm.loopdatasize=%d", defaultDataLoopbackSize+delta), fmt.Sprintf("dm.loopmetadatasize=%d", defaultMetaDataLoopbackSize+delta), }, nil, nil) if err != nil { t.Fatalf("error creating devicemapper driver: %v", err) } driver = d.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver) if s := driver.DeviceSet.Status(); s.Data.Total != uint64(expectDataSize) || s.Metadata.Total != uint64(expectMetaDataSize) { t.Fatalf("data or metadata loop back size is incorrect") } if err := driver.Cleanup(); err != nil { t.Fatal(err) } } // Make sure devices.Lock() has been release upon return from cleanupDeletedDevices() function func TestDevmapperLockReleasedDeviceDeletion(t *testing.T) { driver := graphtest.GetDriver(t, "devicemapper").(*graphtest.Driver).Driver.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver) defer graphtest.PutDriver(t) // Call cleanupDeletedDevices() and after the call take and release // DeviceSet Lock. If lock has not been released, this will hang. driver.DeviceSet.cleanupDeletedDevices() doneChan := make(chan bool) go func() { driver.DeviceSet.Lock() defer driver.DeviceSet.Unlock() doneChan <- true }() select { case <-time.After(time.Second * 5): // Timer expired. That means lock was not released upon // function return and we are deadlocked. Release lock // here so that cleanup could succeed and fail the test. driver.DeviceSet.Unlock() t.Fatalf("Could not acquire devices lock after call to cleanupDeletedDevices()") case <-doneChan: } } docker-1.10.3/daemon/graphdriver/devmapper/driver.go000066400000000000000000000135301267010174400224340ustar00rootroot00000000000000// +build linux package devmapper import ( "fmt" "io/ioutil" "os" "path" "strconv" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/devicemapper" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/mount" "github.com/docker/go-units" ) func init() { graphdriver.Register("devicemapper", Init) } // Driver contains the device set mounted and the home directory type Driver struct { *DeviceSet home string uidMaps []idtools.IDMap gidMaps []idtools.IDMap } // Init creates a driver with the given home and the set of options. func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { deviceSet, err := NewDeviceSet(home, true, options, uidMaps, gidMaps) if err != nil { return nil, err } if err := mount.MakePrivate(home); err != nil { return nil, err } d := &Driver{ DeviceSet: deviceSet, home: home, uidMaps: uidMaps, gidMaps: gidMaps, } return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil } func (d *Driver) String() string { return "devicemapper" } // Status returns the status about the driver in a printable format. // Information returned contains Pool Name, Data File, Metadata file, disk usage by // the data and metadata, etc. func (d *Driver) Status() [][2]string { s := d.DeviceSet.Status() status := [][2]string{ {"Pool Name", s.PoolName}, {"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(float64(s.SectorSize)))}, {"Base Device Size", fmt.Sprintf("%s", units.HumanSize(float64(s.BaseDeviceSize)))}, {"Backing Filesystem", s.BaseDeviceFS}, {"Data file", s.DataFile}, {"Metadata file", s.MetadataFile}, {"Data Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Used)))}, {"Data Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Total)))}, {"Data Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Available)))}, {"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Used)))}, {"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))}, {"Metadata Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Available)))}, {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)}, {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)}, {"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)}, {"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)}, } if len(s.DataLoopback) > 0 { status = append(status, [2]string{"Data loop file", s.DataLoopback}) } if len(s.MetadataLoopback) > 0 { status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback}) } if vStr, err := devicemapper.GetLibraryVersion(); err == nil { status = append(status, [2]string{"Library Version", vStr}) } return status } // GetMetadata returns a map of information about the device. func (d *Driver) GetMetadata(id string) (map[string]string, error) { m, err := d.DeviceSet.exportDeviceMetadata(id) if err != nil { return nil, err } metadata := make(map[string]string) metadata["DeviceId"] = strconv.Itoa(m.deviceID) metadata["DeviceSize"] = strconv.FormatUint(m.deviceSize, 10) metadata["DeviceName"] = m.deviceName return metadata, nil } // Cleanup unmounts a device. func (d *Driver) Cleanup() error { err := d.DeviceSet.Shutdown() if err2 := mount.Unmount(d.home); err == nil { err = err2 } return err } // Create adds a device with a given id and the parent. func (d *Driver) Create(id, parent, mountLabel string) error { if err := d.DeviceSet.AddDevice(id, parent); err != nil { return err } return nil } // Remove removes a device with a given id, unmounts the filesystem. func (d *Driver) Remove(id string) error { if !d.DeviceSet.HasDevice(id) { // Consider removing a non-existing device a no-op // This is useful to be able to progress on container removal // if the underlying device has gone away due to earlier errors return nil } // This assumes the device has been properly Get/Put:ed and thus is unmounted if err := d.DeviceSet.DeleteDevice(id, false); err != nil { return err } mp := path.Join(d.home, "mnt", id) if err := os.RemoveAll(mp); err != nil && !os.IsNotExist(err) { return err } return nil } // Get mounts a device with given id into the root filesystem func (d *Driver) Get(id, mountLabel string) (string, error) { mp := path.Join(d.home, "mnt", id) uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return "", err } // Create the target directories if they don't exist if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil && !os.IsExist(err) { return "", err } if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) { return "", err } // Mount the device if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil { return "", err } rootFs := path.Join(mp, "rootfs") if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil && !os.IsExist(err) { d.DeviceSet.UnmountDevice(id, mp) return "", err } idFile := path.Join(mp, "id") if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { // Create an "id" file with the container/image id in it to help reconstruct this in case // of later problems if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { d.DeviceSet.UnmountDevice(id, mp) return "", err } } return rootFs, nil } // Put unmounts a device and removes it. func (d *Driver) Put(id string) error { mp := path.Join(d.home, "mnt", id) err := d.DeviceSet.UnmountDevice(id, mp) if err != nil { logrus.Errorf("devmapper: Error unmounting device %s: %s", id, err) } return err } // Exists checks to see if the device exists. func (d *Driver) Exists(id string) bool { return d.DeviceSet.HasDevice(id) } docker-1.10.3/daemon/graphdriver/devmapper/mount.go000066400000000000000000000033151267010174400223030ustar00rootroot00000000000000// +build linux package devmapper import ( "bytes" "fmt" "os" "path/filepath" "syscall" ) // FIXME: this is copy-pasted from the aufs driver. // It should be moved into the core. // Mounted returns true if a mount point exists. func Mounted(mountpoint string) (bool, error) { mntpoint, err := os.Stat(mountpoint) if err != nil { if os.IsNotExist(err) { return false, nil } return false, err } parent, err := os.Stat(filepath.Join(mountpoint, "..")) if err != nil { return false, err } mntpointSt := mntpoint.Sys().(*syscall.Stat_t) parentSt := parent.Sys().(*syscall.Stat_t) return mntpointSt.Dev != parentSt.Dev, nil } type probeData struct { fsName string magic string offset uint64 } // ProbeFsType returns the filesystem name for the given device id. func ProbeFsType(device string) (string, error) { probes := []probeData{ {"btrfs", "_BHRfS_M", 0x10040}, {"ext4", "\123\357", 0x438}, {"xfs", "XFSB", 0}, } maxLen := uint64(0) for _, p := range probes { l := p.offset + uint64(len(p.magic)) if l > maxLen { maxLen = l } } file, err := os.Open(device) if err != nil { return "", err } defer file.Close() buffer := make([]byte, maxLen) l, err := file.Read(buffer) if err != nil { return "", err } if uint64(l) != maxLen { return "", fmt.Errorf("devmapper: unable to detect filesystem type of %s, short read", device) } for _, p := range probes { if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) { return p.fsName, nil } } return "", fmt.Errorf("devmapper: Unknown filesystem type on %s", device) } func joinMountOptions(a, b string) string { if a == "" { return b } if b == "" { return a } return a + "," + b } docker-1.10.3/daemon/graphdriver/driver.go000066400000000000000000000172531267010174400204570ustar00rootroot00000000000000package graphdriver import ( "errors" "fmt" "os" "path/filepath" "strings" "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/idtools" ) // FsMagic unsigned id of the filesystem in use. type FsMagic uint32 const ( // FsMagicUnsupported is a predefined constant value other than a valid filesystem id. FsMagicUnsupported = FsMagic(0x00000000) ) var ( // All registered drivers drivers map[string]InitFunc // ErrNotSupported returned when driver is not supported. ErrNotSupported = errors.New("driver not supported") // ErrPrerequisites retuned when driver does not meet prerequisites. ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") // ErrIncompatibleFS returned when file system is not supported. ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver") ) // InitFunc initializes the storage driver. type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) // ProtoDriver defines the basic capabilities of a driver. // This interface exists solely to be a minimum set of methods // for client code which choose not to implement the entire Driver // interface and use the NaiveDiffDriver wrapper constructor. // // Use of ProtoDriver directly by client code is not recommended. type ProtoDriver interface { // String returns a string representation of this driver. String() string // Create creates a new, empty, filesystem layer with the // specified id and parent and mountLabel. Parent and mountLabel may be "". Create(id, parent, mountLabel string) error // Remove attempts to remove the filesystem layer with this id. Remove(id string) error // Get returns the mountpoint for the layered filesystem referred // to by this id. You can optionally specify a mountLabel or "". // Returns the absolute path to the mounted layered filesystem. Get(id, mountLabel string) (dir string, err error) // Put releases the system resources for the specified id, // e.g, unmounting layered filesystem. Put(id string) error // Exists returns whether a filesystem layer with the specified // ID exists on this driver. Exists(id string) bool // Status returns a set of key-value pairs which give low // level diagnostic status about this driver. Status() [][2]string // Returns a set of key-value pairs which give low level information // about the image/container driver is managing. GetMetadata(id string) (map[string]string, error) // Cleanup performs necessary tasks to release resources // held by the driver, e.g., unmounting all layered filesystems // known to this driver. Cleanup() error } // Driver is the interface for layered/snapshot file system drivers. type Driver interface { ProtoDriver // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". Diff(id, parent string) (archive.Archive, error) // Changes produces a list of changes between the specified layer // and its parent layer. If parent is "", then all changes will be ADD changes. Changes(id, parent string) ([]archive.Change, error) // ApplyDiff extracts the changeset from the given diff into the // layer with the specified id and parent, returning the size of the // new layer in bytes. // The archive.Reader must be an uncompressed stream. ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) // DiffSize calculates the changes between the specified id // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. DiffSize(id, parent string) (size int64, err error) } func init() { drivers = make(map[string]InitFunc) } // Register registers a InitFunc for the driver. func Register(name string, initFunc InitFunc) error { if _, exists := drivers[name]; exists { return fmt.Errorf("Name already registered %s", name) } drivers[name] = initFunc return nil } // GetDriver initializes and returns the registered driver func GetDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { if initFunc, exists := drivers[name]; exists { return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) } if pluginDriver, err := lookupPlugin(name, home, options); err == nil { return pluginDriver, nil } logrus.Errorf("Failed to GetDriver graph %s %s", name, home) return nil, ErrNotSupported } // getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { if initFunc, exists := drivers[name]; exists { return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) } logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) return nil, ErrNotSupported } // New creates the driver and initializes it at the specified root. func New(root string, name string, options []string, uidMaps, gidMaps []idtools.IDMap) (driver Driver, err error) { if name != "" { logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver return GetDriver(name, root, options, uidMaps, gidMaps) } // Guess for prior driver priorDrivers := scanPriorDrivers(root) for _, name := range priority { if name == "vfs" { // don't use vfs even if there is state present. continue } for _, prior := range priorDrivers { // of the state found from prior drivers, check in order of our priority // which we would prefer if prior == name { driver, err = getBuiltinDriver(name, root, options, uidMaps, gidMaps) if err != nil { // unlike below, we will return error here, because there is prior // state, and now it is no longer supported/prereq/compatible, so // something changed and needs attention. Otherwise the daemon's // images would just "disappear". logrus.Errorf("[graphdriver] prior storage driver %q failed: %s", name, err) return nil, err } if err := checkPriorDriver(name, root); err != nil { return nil, err } logrus.Infof("[graphdriver] using prior storage driver %q", name) return driver, nil } } } // Check for priority drivers first for _, name := range priority { driver, err = getBuiltinDriver(name, root, options, uidMaps, gidMaps) if err != nil { if err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS { continue } return nil, err } return driver, nil } // Check all registered drivers if no priority driver is found for _, initFunc := range drivers { if driver, err = initFunc(root, options, uidMaps, gidMaps); err != nil { if err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS { continue } return nil, err } return driver, nil } return nil, fmt.Errorf("No supported storage backend found") } // scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers func scanPriorDrivers(root string) []string { priorDrivers := []string{} for driver := range drivers { p := filepath.Join(root, driver) if _, err := os.Stat(p); err == nil && driver != "vfs" { priorDrivers = append(priorDrivers, driver) } } return priorDrivers } func checkPriorDriver(name, root string) error { priorDrivers := []string{} for _, prior := range scanPriorDrivers(root) { if prior != name && prior != "vfs" { if _, err := os.Stat(filepath.Join(root, prior)); err == nil { priorDrivers = append(priorDrivers, prior) } } } if len(priorDrivers) > 0 { return fmt.Errorf("%q contains other graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", root, strings.Join(priorDrivers, ",")) } return nil } docker-1.10.3/daemon/graphdriver/driver_freebsd.go000066400000000000000000000001641267010174400221420ustar00rootroot00000000000000package graphdriver var ( // Slice of drivers that should be used in an order priority = []string{ "zfs", } ) docker-1.10.3/daemon/graphdriver/driver_linux.go000066400000000000000000000046321267010174400216730ustar00rootroot00000000000000// +build linux package graphdriver import ( "path/filepath" "syscall" ) const ( // FsMagicAufs filesystem id for Aufs FsMagicAufs = FsMagic(0x61756673) // FsMagicBtrfs filesystem id for Btrfs FsMagicBtrfs = FsMagic(0x9123683E) // FsMagicCramfs filesystem id for Cramfs FsMagicCramfs = FsMagic(0x28cd3d45) // FsMagicExtfs filesystem id for Extfs FsMagicExtfs = FsMagic(0x0000EF53) // FsMagicF2fs filesystem id for F2fs FsMagicF2fs = FsMagic(0xF2F52010) // FsMagicGPFS filesystem id for GPFS FsMagicGPFS = FsMagic(0x47504653) // FsMagicJffs2Fs filesystem if for Jffs2Fs FsMagicJffs2Fs = FsMagic(0x000072b6) // FsMagicJfs filesystem id for Jfs FsMagicJfs = FsMagic(0x3153464a) // FsMagicNfsFs filesystem id for NfsFs FsMagicNfsFs = FsMagic(0x00006969) // FsMagicRAMFs filesystem id for RamFs FsMagicRAMFs = FsMagic(0x858458f6) // FsMagicReiserFs filesystem id for ReiserFs FsMagicReiserFs = FsMagic(0x52654973) // FsMagicSmbFs filesystem id for SmbFs FsMagicSmbFs = FsMagic(0x0000517B) // FsMagicSquashFs filesystem id for SquashFs FsMagicSquashFs = FsMagic(0x73717368) // FsMagicTmpFs filesystem id for TmpFs FsMagicTmpFs = FsMagic(0x01021994) // FsMagicVxFS filesystem id for VxFs FsMagicVxFS = FsMagic(0xa501fcf5) // FsMagicXfs filesystem id for Xfs FsMagicXfs = FsMagic(0x58465342) // FsMagicZfs filesystem id for Zfs FsMagicZfs = FsMagic(0x2fc12fc1) ) var ( // Slice of drivers that should be used in an order priority = []string{ "aufs", "btrfs", "zfs", "devicemapper", "overlay", "vfs", } // FsNames maps filesystem id to name of the filesystem. FsNames = map[FsMagic]string{ FsMagicAufs: "aufs", FsMagicBtrfs: "btrfs", FsMagicCramfs: "cramfs", FsMagicExtfs: "extfs", FsMagicF2fs: "f2fs", FsMagicGPFS: "gpfs", FsMagicJffs2Fs: "jffs2", FsMagicJfs: "jfs", FsMagicNfsFs: "nfs", FsMagicRAMFs: "ramfs", FsMagicReiserFs: "reiserfs", FsMagicSmbFs: "smb", FsMagicSquashFs: "squashfs", FsMagicTmpFs: "tmpfs", FsMagicUnsupported: "unsupported", FsMagicVxFS: "vxfs", FsMagicXfs: "xfs", FsMagicZfs: "zfs", } ) // GetFSMagic returns the filesystem id given the path. func GetFSMagic(rootpath string) (FsMagic, error) { var buf syscall.Statfs_t if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil { return 0, err } return FsMagic(buf.Type), nil } docker-1.10.3/daemon/graphdriver/driver_unsupported.go000066400000000000000000000004571267010174400231250ustar00rootroot00000000000000// +build !linux,!windows,!freebsd package graphdriver var ( // Slice of drivers that should be used in an order priority = []string{ "unsupported", } ) // GetFSMagic returns the filesystem id given the path. func GetFSMagic(rootpath string) (FsMagic, error) { return FsMagicUnsupported, nil } docker-1.10.3/daemon/graphdriver/driver_windows.go000066400000000000000000000005371267010174400222260ustar00rootroot00000000000000package graphdriver var ( // Slice of drivers that should be used in order priority = []string{ "windowsfilter", "windowsdiff", "vfs", } ) // GetFSMagic returns the filesystem id given the path. func GetFSMagic(rootpath string) (FsMagic, error) { // Note it is OK to return FsMagicUnsupported on Windows. return FsMagicUnsupported, nil } docker-1.10.3/daemon/graphdriver/fsdiff.go000066400000000000000000000102361267010174400204170ustar00rootroot00000000000000package graphdriver import ( "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" ) var ( // ApplyUncompressedLayer defines the unpack method used by the graph // driver. ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer ) // NaiveDiffDriver takes a ProtoDriver and adds the // capability of the Diffing methods which it may or may not // support on its own. See the comment on the exported // NewNaiveDiffDriver function below. // Notably, the AUFS driver doesn't need to be wrapped like this. type NaiveDiffDriver struct { ProtoDriver uidMaps []idtools.IDMap gidMaps []idtools.IDMap } // NewNaiveDiffDriver returns a fully functional driver that wraps the // given ProtoDriver and adds the capability of the following methods which // it may or may not support on its own: // Diff(id, parent string) (archive.Archive, error) // Changes(id, parent string) ([]archive.Change, error) // ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) // DiffSize(id, parent string) (size int64, err error) func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver { return &NaiveDiffDriver{ProtoDriver: driver, uidMaps: uidMaps, gidMaps: gidMaps} } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch archive.Archive, err error) { driver := gdw.ProtoDriver layerFs, err := driver.Get(id, "") if err != nil { return nil, err } defer func() { if err != nil { driver.Put(id) } }() if parent == "" { archive, err := archive.Tar(layerFs, archive.Uncompressed) if err != nil { return nil, err } return ioutils.NewReadCloserWrapper(archive, func() error { err := archive.Close() driver.Put(id) return err }), nil } parentFs, err := driver.Get(parent, "") if err != nil { return nil, err } defer driver.Put(parent) changes, err := archive.ChangesDirs(layerFs, parentFs) if err != nil { return nil, err } archive, err := archive.ExportChanges(layerFs, changes, gdw.uidMaps, gdw.gidMaps) if err != nil { return nil, err } return ioutils.NewReadCloserWrapper(archive, func() error { err := archive.Close() driver.Put(id) return err }), nil } // Changes produces a list of changes between the specified layer // and its parent layer. If parent is "", then all changes will be ADD changes. func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { driver := gdw.ProtoDriver layerFs, err := driver.Get(id, "") if err != nil { return nil, err } defer driver.Put(id) parentFs := "" if parent != "" { parentFs, err = driver.Get(parent, "") if err != nil { return nil, err } defer driver.Put(parent) } return archive.ChangesDirs(layerFs, parentFs) } // ApplyDiff extracts the changeset from the given diff into the // layer with the specified id and parent, returning the size of the // new layer in bytes. func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) { driver := gdw.ProtoDriver // Mount the root filesystem so we can apply the diff/layer. layerFs, err := driver.Get(id, "") if err != nil { return } defer driver.Put(id) options := &archive.TarOptions{UIDMaps: gdw.uidMaps, GIDMaps: gdw.gidMaps} start := time.Now().UTC() logrus.Debugf("Start untar layer") if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil { return } logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) return } // DiffSize calculates the changes between the specified layer // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) { driver := gdw.ProtoDriver changes, err := gdw.Changes(id, parent) if err != nil { return } layerFs, err := driver.Get(id, "") if err != nil { return } defer driver.Put(id) return archive.ChangesSize(layerFs, changes), nil } docker-1.10.3/daemon/graphdriver/graphtest/000077500000000000000000000000001267010174400206265ustar00rootroot00000000000000docker-1.10.3/daemon/graphdriver/graphtest/graphtest_unix.go000066400000000000000000000154411267010174400242260ustar00rootroot00000000000000// +build linux freebsd package graphtest import ( "fmt" "io/ioutil" "os" "path" "syscall" "testing" "github.com/docker/docker/daemon/graphdriver" ) var ( drv *Driver ) // Driver conforms to graphdriver.Driver interface and // contains information such as root and reference count of the number of clients using it. // This helps in testing drivers added into the framework. type Driver struct { graphdriver.Driver root string refCount int } // InitLoopbacks ensures that the loopback devices are properly created within // the system running the device mapper tests. func InitLoopbacks() error { statT, err := getBaseLoopStats() if err != nil { return err } // create at least 8 loopback files, ya, that is a good number for i := 0; i < 8; i++ { loopPath := fmt.Sprintf("/dev/loop%d", i) // only create new loopback files if they don't exist if _, err := os.Stat(loopPath); err != nil { if mkerr := syscall.Mknod(loopPath, uint32(statT.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil { return mkerr } os.Chown(loopPath, int(statT.Uid), int(statT.Gid)) } } return nil } // getBaseLoopStats inspects /dev/loop0 to collect uid,gid, and mode for the // loop0 device on the system. If it does not exist we assume 0,0,0660 for the // stat data func getBaseLoopStats() (*syscall.Stat_t, error) { loop0, err := os.Stat("/dev/loop0") if err != nil { if os.IsNotExist(err) { return &syscall.Stat_t{ Uid: 0, Gid: 0, Mode: 0660, }, nil } return nil, err } return loop0.Sys().(*syscall.Stat_t), nil } func newDriver(t *testing.T, name string) *Driver { root, err := ioutil.TempDir("/var/tmp", "docker-graphtest-") if err != nil { t.Fatal(err) } if err := os.MkdirAll(root, 0755); err != nil { t.Fatal(err) } d, err := graphdriver.GetDriver(name, root, nil, nil, nil) if err != nil { t.Logf("graphdriver: %v\n", err) if err == graphdriver.ErrNotSupported || err == graphdriver.ErrPrerequisites || err == graphdriver.ErrIncompatibleFS { t.Skipf("Driver %s not supported", name) } t.Fatal(err) } return &Driver{d, root, 1} } func cleanup(t *testing.T, d *Driver) { if err := drv.Cleanup(); err != nil { t.Fatal(err) } os.RemoveAll(d.root) } // GetDriver create a new driver with given name or return a existing driver with the name updating the reference count. func GetDriver(t *testing.T, name string) graphdriver.Driver { if drv == nil { drv = newDriver(t, name) } else { drv.refCount++ } return drv } // PutDriver removes the driver if it is no longer used and updates the reference count. func PutDriver(t *testing.T) { if drv == nil { t.Skip("No driver to put!") } drv.refCount-- if drv.refCount == 0 { cleanup(t, drv) drv = nil } } func verifyFile(t *testing.T, path string, mode os.FileMode, uid, gid uint32) { fi, err := os.Stat(path) if err != nil { t.Fatal(err) } if fi.Mode()&os.ModeType != mode&os.ModeType { t.Fatalf("Expected %s type 0x%x, got 0x%x", path, mode&os.ModeType, fi.Mode()&os.ModeType) } if fi.Mode()&os.ModePerm != mode&os.ModePerm { t.Fatalf("Expected %s mode %o, got %o", path, mode&os.ModePerm, fi.Mode()&os.ModePerm) } if fi.Mode()&os.ModeSticky != mode&os.ModeSticky { t.Fatalf("Expected %s sticky 0x%x, got 0x%x", path, mode&os.ModeSticky, fi.Mode()&os.ModeSticky) } if fi.Mode()&os.ModeSetuid != mode&os.ModeSetuid { t.Fatalf("Expected %s setuid 0x%x, got 0x%x", path, mode&os.ModeSetuid, fi.Mode()&os.ModeSetuid) } if fi.Mode()&os.ModeSetgid != mode&os.ModeSetgid { t.Fatalf("Expected %s setgid 0x%x, got 0x%x", path, mode&os.ModeSetgid, fi.Mode()&os.ModeSetgid) } if stat, ok := fi.Sys().(*syscall.Stat_t); ok { if stat.Uid != uid { t.Fatalf("%s no owned by uid %d", path, uid) } if stat.Gid != gid { t.Fatalf("%s not owned by gid %d", path, gid) } } } // readDir reads a directory just like ioutil.ReadDir() // then hides specific files (currently "lost+found") // so the tests don't "see" it func readDir(dir string) ([]os.FileInfo, error) { a, err := ioutil.ReadDir(dir) if err != nil { return nil, err } b := a[:0] for _, x := range a { if x.Name() != "lost+found" { // ext4 always have this dir b = append(b, x) } } return b, nil } // DriverTestCreateEmpty creates an new image and verifies it is empty and the right metadata func DriverTestCreateEmpty(t *testing.T, drivername string) { driver := GetDriver(t, drivername) defer PutDriver(t) if err := driver.Create("empty", "", ""); err != nil { t.Fatal(err) } if !driver.Exists("empty") { t.Fatal("Newly created image doesn't exist") } dir, err := driver.Get("empty", "") if err != nil { t.Fatal(err) } verifyFile(t, dir, 0755|os.ModeDir, 0, 0) // Verify that the directory is empty fis, err := readDir(dir) if err != nil { t.Fatal(err) } if len(fis) != 0 { t.Fatal("New directory not empty") } driver.Put("empty") if err := driver.Remove("empty"); err != nil { t.Fatal(err) } } func createBase(t *testing.T, driver graphdriver.Driver, name string) { // We need to be able to set any perms oldmask := syscall.Umask(0) defer syscall.Umask(oldmask) if err := driver.Create(name, "", ""); err != nil { t.Fatal(err) } dir, err := driver.Get(name, "") if err != nil { t.Fatal(err) } defer driver.Put(name) subdir := path.Join(dir, "a subdir") if err := os.Mkdir(subdir, 0705|os.ModeSticky); err != nil { t.Fatal(err) } if err := os.Chown(subdir, 1, 2); err != nil { t.Fatal(err) } file := path.Join(dir, "a file") if err := ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid); err != nil { t.Fatal(err) } } func verifyBase(t *testing.T, driver graphdriver.Driver, name string) { dir, err := driver.Get(name, "") if err != nil { t.Fatal(err) } defer driver.Put(name) subdir := path.Join(dir, "a subdir") verifyFile(t, subdir, 0705|os.ModeDir|os.ModeSticky, 1, 2) file := path.Join(dir, "a file") verifyFile(t, file, 0222|os.ModeSetuid, 0, 0) fis, err := readDir(dir) if err != nil { t.Fatal(err) } if len(fis) != 2 { t.Fatal("Unexpected files in base image") } } // DriverTestCreateBase create a base driver and verify. func DriverTestCreateBase(t *testing.T, drivername string) { driver := GetDriver(t, drivername) defer PutDriver(t) createBase(t, driver, "Base") verifyBase(t, driver, "Base") if err := driver.Remove("Base"); err != nil { t.Fatal(err) } } // DriverTestCreateSnap Create a driver and snap and verify. func DriverTestCreateSnap(t *testing.T, drivername string) { driver := GetDriver(t, drivername) defer PutDriver(t) createBase(t, driver, "Base") if err := driver.Create("Snap", "Base", ""); err != nil { t.Fatal(err) } verifyBase(t, driver, "Snap") if err := driver.Remove("Snap"); err != nil { t.Fatal(err) } if err := driver.Remove("Base"); err != nil { t.Fatal(err) } } docker-1.10.3/daemon/graphdriver/graphtest/graphtest_windows.go000066400000000000000000000000221267010174400247220ustar00rootroot00000000000000package graphtest docker-1.10.3/daemon/graphdriver/overlay/000077500000000000000000000000001267010174400203065ustar00rootroot00000000000000docker-1.10.3/daemon/graphdriver/overlay/copy.go000066400000000000000000000071661267010174400216210ustar00rootroot00000000000000// +build linux package overlay import ( "fmt" "io" "os" "path/filepath" "syscall" "time" "github.com/docker/docker/pkg/system" ) type copyFlags int const ( copyHardlink copyFlags = 1 << iota ) func copyRegular(srcPath, dstPath string, mode os.FileMode) error { srcFile, err := os.Open(srcPath) if err != nil { return err } defer srcFile.Close() dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE, mode) if err != nil { return err } defer dstFile.Close() _, err = io.Copy(dstFile, srcFile) return err } func copyXattr(srcPath, dstPath, attr string) error { data, err := system.Lgetxattr(srcPath, attr) if err != nil { return err } if data != nil { if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { return err } } return nil } func copyDir(srcDir, dstDir string, flags copyFlags) error { err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { if err != nil { return err } // Rebase path relPath, err := filepath.Rel(srcDir, srcPath) if err != nil { return err } dstPath := filepath.Join(dstDir, relPath) if err != nil { return err } stat, ok := f.Sys().(*syscall.Stat_t) if !ok { return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) } isHardlink := false switch f.Mode() & os.ModeType { case 0: // Regular file if flags©Hardlink != 0 { isHardlink = true if err := os.Link(srcPath, dstPath); err != nil { return err } } else { if err := copyRegular(srcPath, dstPath, f.Mode()); err != nil { return err } } case os.ModeDir: if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { return err } case os.ModeSymlink: link, err := os.Readlink(srcPath) if err != nil { return err } if err := os.Symlink(link, dstPath); err != nil { return err } case os.ModeNamedPipe: fallthrough case os.ModeSocket: if err := syscall.Mkfifo(dstPath, stat.Mode); err != nil { return err } case os.ModeDevice: if err := syscall.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { return err } default: return fmt.Errorf("Unknown file type for %s\n", srcPath) } // Everything below is copying metadata from src to dst. All this metadata // already shares an inode for hardlinks. if isHardlink { return nil } if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { return err } if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { return err } // We need to copy this attribute if it appears in an overlay upper layer, as // this function is used to copy those. It is set by overlay if a directory // is removed and then re-created and should not inherit anything from the // same dir in the lower dir. if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil { return err } isSymlink := f.Mode()&os.ModeSymlink != 0 // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if !isSymlink { if err := os.Chmod(dstPath, f.Mode()); err != nil { return err } } // system.Chtimes doesn't support a NOFOLLOW flag atm if !isSymlink { aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec)) if err := system.Chtimes(dstPath, aTime, mTime); err != nil { return err } } else { ts := []syscall.Timespec{stat.Atim, stat.Mtim} if err := system.LUtimesNano(dstPath, ts); err != nil { return err } } return nil }) return err } docker-1.10.3/daemon/graphdriver/overlay/overlay.go000066400000000000000000000335601267010174400223250ustar00rootroot00000000000000// +build linux package overlay import ( "bufio" "fmt" "io/ioutil" "os" "os/exec" "path" "sync" "syscall" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/idtools" "github.com/opencontainers/runc/libcontainer/label" ) // This is a small wrapper over the NaiveDiffWriter that lets us have a custom // implementation of ApplyDiff() var ( // ErrApplyDiffFallback is returned to indicate that a normal ApplyDiff is applied as a fallback from Naive diff writer. ErrApplyDiffFallback = fmt.Errorf("Fall back to normal ApplyDiff") ) // ApplyDiffProtoDriver wraps the ProtoDriver by extending the interface with ApplyDiff method. type ApplyDiffProtoDriver interface { graphdriver.ProtoDriver // ApplyDiff writes the diff to the archive for the given id and parent id. // It returns the size in bytes written if successful, an error ErrApplyDiffFallback is returned otherwise. ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) } type naiveDiffDriverWithApply struct { graphdriver.Driver applyDiff ApplyDiffProtoDriver } // NaiveDiffDriverWithApply returns a NaiveDiff driver with custom ApplyDiff. func NaiveDiffDriverWithApply(driver ApplyDiffProtoDriver, uidMaps, gidMaps []idtools.IDMap) graphdriver.Driver { return &naiveDiffDriverWithApply{ Driver: graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), applyDiff: driver, } } // ApplyDiff creates a diff layer with either the NaiveDiffDriver or with a fallback. func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) { b, err := d.applyDiff.ApplyDiff(id, parent, diff) if err == ErrApplyDiffFallback { return d.Driver.ApplyDiff(id, parent, diff) } return b, err } // This backend uses the overlay union filesystem for containers // plus hard link file sharing for images. // Each container/image can have a "root" subdirectory which is a plain // filesystem hierarchy, or they can use overlay. // If they use overlay there is a "upper" directory and a "lower-id" // file, as well as "merged" and "work" directories. The "upper" // directory has the upper layer of the overlay, and "lower-id" contains // the id of the parent whose "root" directory shall be used as the lower // layer in the overlay. The overlay itself is mounted in the "merged" // directory, and the "work" dir is needed for overlay to work. // When a overlay layer is created there are two cases, either the // parent has a "root" dir, then we start out with a empty "upper" // directory overlaid on the parents root. This is typically the // case with the init layer of a container which is based on an image. // If there is no "root" in the parent, we inherit the lower-id from // the parent and start by making a copy in the parent's "upper" dir. // This is typically the case for a container layer which copies // its parent -init upper layer. // Additionally we also have a custom implementation of ApplyLayer // which makes a recursive copy of the parent "root" layer using // hardlinks to share file data, and then applies the layer on top // of that. This means all child images share file (but not directory) // data with the parent. // ActiveMount contains information about the count, path and whether is mounted or not. // This information is part of the Driver, that contains list of active mounts that are part of this overlay. type ActiveMount struct { count int path string mounted bool } // Driver contains information about the home directory and the list of active mounts that are created using this driver. type Driver struct { home string sync.Mutex // Protects concurrent modification to active active map[string]*ActiveMount uidMaps []idtools.IDMap gidMaps []idtools.IDMap } var backingFs = "" func init() { graphdriver.Register("overlay", Init) } // Init returns the NaiveDiffDriver, a native diff driver for overlay filesystem. // If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. // If a overlay filesystem is not supported over a existing filesystem then error graphdriver.ErrIncompatibleFS is returned. func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { if err := supportsOverlay(); err != nil { return nil, graphdriver.ErrNotSupported } fsMagic, err := graphdriver.GetFSMagic(home) if err != nil { return nil, err } if fsName, ok := graphdriver.FsNames[fsMagic]; ok { backingFs = fsName } // check if they are running over btrfs or aufs switch fsMagic { case graphdriver.FsMagicBtrfs: logrus.Error("'overlay' is not supported over btrfs.") return nil, graphdriver.ErrIncompatibleFS case graphdriver.FsMagicAufs: logrus.Error("'overlay' is not supported over aufs.") return nil, graphdriver.ErrIncompatibleFS case graphdriver.FsMagicZfs: logrus.Error("'overlay' is not supported over zfs.") return nil, graphdriver.ErrIncompatibleFS } rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) if err != nil { return nil, err } // Create the driver home dir if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { return nil, err } d := &Driver{ home: home, active: make(map[string]*ActiveMount), uidMaps: uidMaps, gidMaps: gidMaps, } return NaiveDiffDriverWithApply(d, uidMaps, gidMaps), nil } func supportsOverlay() error { // We can try to modprobe overlay first before looking at // proc/filesystems for when overlay is supported exec.Command("modprobe", "overlay").Run() f, err := os.Open("/proc/filesystems") if err != nil { return err } defer f.Close() s := bufio.NewScanner(f) for s.Scan() { if s.Text() == "nodev\toverlay" { return nil } } logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") return graphdriver.ErrNotSupported } func (d *Driver) String() string { return "overlay" } // Status returns current driver information in a two dimensional string array. // Output contains "Backing Filesystem" used in this implementation. func (d *Driver) Status() [][2]string { return [][2]string{ {"Backing Filesystem", backingFs}, } } // GetMetadata returns meta data about the overlay driver such as root, LowerDir, UpperDir, WorkDir and MergeDir used to store data. func (d *Driver) GetMetadata(id string) (map[string]string, error) { dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } metadata := make(map[string]string) // If id has a root, it is an image rootDir := path.Join(dir, "root") if _, err := os.Stat(rootDir); err == nil { metadata["RootDir"] = rootDir return metadata, nil } lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) if err != nil { return nil, err } metadata["LowerDir"] = path.Join(d.dir(string(lowerID)), "root") metadata["UpperDir"] = path.Join(dir, "upper") metadata["WorkDir"] = path.Join(dir, "work") metadata["MergedDir"] = path.Join(dir, "merged") return metadata, nil } // Cleanup simply returns nil and do not change the existing filesystem. // This is required to satisfy the graphdriver.Driver interface. func (d *Driver) Cleanup() error { return nil } // Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. // The parent filesystem is used to configure these directories for the overlay. func (d *Driver) Create(id, parent, mountLabel string) (retErr error) { dir := d.dir(id) rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return err } if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { return err } if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { return err } defer func() { // Clean up on failure if retErr != nil { os.RemoveAll(dir) } }() // Toplevel images are just a "root" dir if parent == "" { if err := idtools.MkdirAs(path.Join(dir, "root"), 0755, rootUID, rootGID); err != nil { return err } return nil } parentDir := d.dir(parent) // Ensure parent exists if _, err := os.Lstat(parentDir); err != nil { return err } // If parent has a root, just do a overlay to it parentRoot := path.Join(parentDir, "root") if s, err := os.Lstat(parentRoot); err == nil { if err := idtools.MkdirAs(path.Join(dir, "upper"), s.Mode(), rootUID, rootGID); err != nil { return err } if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { return err } if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { return err } if err := ioutil.WriteFile(path.Join(dir, "lower-id"), []byte(parent), 0666); err != nil { return err } return nil } // Otherwise, copy the upper and the lower-id from the parent lowerID, err := ioutil.ReadFile(path.Join(parentDir, "lower-id")) if err != nil { return err } if err := ioutil.WriteFile(path.Join(dir, "lower-id"), lowerID, 0666); err != nil { return err } parentUpperDir := path.Join(parentDir, "upper") s, err := os.Lstat(parentUpperDir) if err != nil { return err } upperDir := path.Join(dir, "upper") if err := idtools.MkdirAs(upperDir, s.Mode(), rootUID, rootGID); err != nil { return err } if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { return err } if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { return err } return copyDir(parentUpperDir, upperDir, 0) } func (d *Driver) dir(id string) string { return path.Join(d.home, id) } // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { return err } return nil } // Get creates and mounts the required file system for the given id and returns the mount path. func (d *Driver) Get(id string, mountLabel string) (string, error) { // Protect the d.active from concurrent access d.Lock() defer d.Unlock() mount := d.active[id] if mount != nil { mount.count++ return mount.path, nil } mount = &ActiveMount{count: 1} dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return "", err } // If id has a root, just return it rootDir := path.Join(dir, "root") if _, err := os.Stat(rootDir); err == nil { mount.path = rootDir d.active[id] = mount return mount.path, nil } lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) if err != nil { return "", err } lowerDir := path.Join(d.dir(string(lowerID)), "root") upperDir := path.Join(dir, "upper") workDir := path.Join(dir, "work") mergedDir := path.Join(dir, "merged") opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir) if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil { return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) } // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a // user namespace requires this to move a directory from lower to upper. rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { return "", err } mount.path = mergedDir mount.mounted = true d.active[id] = mount return mount.path, nil } // Put unmounts the mount path created for the give id. func (d *Driver) Put(id string) error { // Protect the d.active from concurrent access d.Lock() defer d.Unlock() mount := d.active[id] if mount == nil { logrus.Debugf("Put on a non-mounted device %s", id) // but it might be still here if d.Exists(id) { mergedDir := path.Join(d.dir(id), "merged") err := syscall.Unmount(mergedDir, 0) if err != nil { logrus.Debugf("Failed to unmount %s overlay: %v", id, err) } } return nil } mount.count-- if mount.count > 0 { return nil } defer delete(d.active, id) if mount.mounted { err := syscall.Unmount(mount.path, 0) if err != nil { logrus.Debugf("Failed to unmount %s overlay: %v", id, err) } return err } return nil } // ApplyDiff applies the new layer on top of the root, if parent does not exist with will return a ErrApplyDiffFallback error. func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size int64, err error) { dir := d.dir(id) if parent == "" { return 0, ErrApplyDiffFallback } parentRootDir := path.Join(d.dir(parent), "root") if _, err := os.Stat(parentRootDir); err != nil { return 0, ErrApplyDiffFallback } // We now know there is a parent, and it has a "root" directory containing // the full root filesystem. We can just hardlink it and apply the // layer. This relies on two things: // 1) ApplyDiff is only run once on a clean (no writes to upper layer) container // 2) ApplyDiff doesn't do any in-place writes to files (would break hardlinks) // These are all currently true and are not expected to break tmpRootDir, err := ioutil.TempDir(dir, "tmproot") if err != nil { return 0, err } defer func() { if err != nil { os.RemoveAll(tmpRootDir) } else { os.RemoveAll(path.Join(dir, "upper")) os.RemoveAll(path.Join(dir, "work")) os.RemoveAll(path.Join(dir, "merged")) os.RemoveAll(path.Join(dir, "lower-id")) } }() if err = copyDir(parentRootDir, tmpRootDir, copyHardlink); err != nil { return 0, err } options := &archive.TarOptions{UIDMaps: d.uidMaps, GIDMaps: d.gidMaps} if size, err = chrootarchive.ApplyUncompressedLayer(tmpRootDir, diff, options); err != nil { return 0, err } rootDir := path.Join(dir, "root") if err := os.Rename(tmpRootDir, rootDir); err != nil { return 0, err } return } // Exists checks to see if the id is already mounted. func (d *Driver) Exists(id string) bool { _, err := os.Stat(d.dir(id)) return err == nil } docker-1.10.3/daemon/graphdriver/overlay/overlay_test.go000066400000000000000000000012571267010174400233620ustar00rootroot00000000000000// +build linux package overlay import ( "testing" "github.com/docker/docker/daemon/graphdriver/graphtest" ) // This avoids creating a new driver for each test if all tests are run // Make sure to put new tests between TestOverlaySetup and TestOverlayTeardown func TestOverlaySetup(t *testing.T) { graphtest.GetDriver(t, "overlay") } func TestOverlayCreateEmpty(t *testing.T) { graphtest.DriverTestCreateEmpty(t, "overlay") } func TestOverlayCreateBase(t *testing.T) { graphtest.DriverTestCreateBase(t, "overlay") } func TestOverlayCreateSnap(t *testing.T) { graphtest.DriverTestCreateSnap(t, "overlay") } func TestOverlayTeardown(t *testing.T) { graphtest.PutDriver(t) } docker-1.10.3/daemon/graphdriver/overlay/overlay_unsupported.go000066400000000000000000000000421267010174400247620ustar00rootroot00000000000000// +build !linux package overlay docker-1.10.3/daemon/graphdriver/plugin.go000066400000000000000000000017511267010174400204560ustar00rootroot00000000000000// +build experimental package graphdriver import ( "fmt" "io" "github.com/docker/docker/pkg/plugins" ) type pluginClient interface { // Call calls the specified method with the specified arguments for the plugin. Call(string, interface{}, interface{}) error // Stream calls the specified method with the specified arguments for the plugin and returns the response IO stream Stream(string, interface{}) (io.ReadCloser, error) // SendFile calls the specified method, and passes through the IO stream SendFile(string, io.Reader, interface{}) error } func lookupPlugin(name, home string, opts []string) (Driver, error) { pl, err := plugins.Get(name, "GraphDriver") if err != nil { return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err) } return newPluginDriver(name, home, opts, pl.Client) } func newPluginDriver(name, home string, opts []string, c pluginClient) (Driver, error) { proxy := &graphDriverProxy{name, c} return proxy, proxy.Init(home, opts) } docker-1.10.3/daemon/graphdriver/plugin_unsupported.go000066400000000000000000000002231267010174400231170ustar00rootroot00000000000000// +build !experimental package graphdriver func lookupPlugin(name, home string, opts []string) (Driver, error) { return nil, ErrNotSupported } docker-1.10.3/daemon/graphdriver/proxy.go000066400000000000000000000113001267010174400203300ustar00rootroot00000000000000// +build experimental package graphdriver import ( "errors" "fmt" "github.com/docker/docker/pkg/archive" ) type graphDriverProxy struct { name string client pluginClient } type graphDriverRequest struct { ID string `json:",omitempty"` Parent string `json:",omitempty"` MountLabel string `json:",omitempty"` } type graphDriverResponse struct { Err string `json:",omitempty"` Dir string `json:",omitempty"` Exists bool `json:",omitempty"` Status [][2]string `json:",omitempty"` Changes []archive.Change `json:",omitempty"` Size int64 `json:",omitempty"` Metadata map[string]string `json:",omitempty"` } type graphDriverInitRequest struct { Home string Opts []string } func (d *graphDriverProxy) Init(home string, opts []string) error { args := &graphDriverInitRequest{ Home: home, Opts: opts, } var ret graphDriverResponse if err := d.client.Call("GraphDriver.Init", args, &ret); err != nil { return err } if ret.Err != "" { return errors.New(ret.Err) } return nil } func (d *graphDriverProxy) String() string { return d.name } func (d *graphDriverProxy) Create(id, parent, mountLabel string) error { args := &graphDriverRequest{ ID: id, Parent: parent, MountLabel: mountLabel, } var ret graphDriverResponse if err := d.client.Call("GraphDriver.Create", args, &ret); err != nil { return err } if ret.Err != "" { return errors.New(ret.Err) } return nil } func (d *graphDriverProxy) Remove(id string) error { args := &graphDriverRequest{ID: id} var ret graphDriverResponse if err := d.client.Call("GraphDriver.Remove", args, &ret); err != nil { return err } if ret.Err != "" { return errors.New(ret.Err) } return nil } func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) { args := &graphDriverRequest{ ID: id, MountLabel: mountLabel, } var ret graphDriverResponse if err := d.client.Call("GraphDriver.Get", args, &ret); err != nil { return "", err } var err error if ret.Err != "" { err = errors.New(ret.Err) } return ret.Dir, err } func (d *graphDriverProxy) Put(id string) error { args := &graphDriverRequest{ID: id} var ret graphDriverResponse if err := d.client.Call("GraphDriver.Put", args, &ret); err != nil { return err } if ret.Err != "" { return errors.New(ret.Err) } return nil } func (d *graphDriverProxy) Exists(id string) bool { args := &graphDriverRequest{ID: id} var ret graphDriverResponse if err := d.client.Call("GraphDriver.Exists", args, &ret); err != nil { return false } return ret.Exists } func (d *graphDriverProxy) Status() [][2]string { args := &graphDriverRequest{} var ret graphDriverResponse if err := d.client.Call("GraphDriver.Status", args, &ret); err != nil { return nil } return ret.Status } func (d *graphDriverProxy) GetMetadata(id string) (map[string]string, error) { args := &graphDriverRequest{ ID: id, } var ret graphDriverResponse if err := d.client.Call("GraphDriver.GetMetadata", args, &ret); err != nil { return nil, err } if ret.Err != "" { return nil, errors.New(ret.Err) } return ret.Metadata, nil } func (d *graphDriverProxy) Cleanup() error { args := &graphDriverRequest{} var ret graphDriverResponse if err := d.client.Call("GraphDriver.Cleanup", args, &ret); err != nil { return nil } if ret.Err != "" { return errors.New(ret.Err) } return nil } func (d *graphDriverProxy) Diff(id, parent string) (archive.Archive, error) { args := &graphDriverRequest{ ID: id, Parent: parent, } body, err := d.client.Stream("GraphDriver.Diff", args) if err != nil { body.Close() return nil, err } return archive.Archive(body), nil } func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) { args := &graphDriverRequest{ ID: id, Parent: parent, } var ret graphDriverResponse if err := d.client.Call("GraphDriver.Changes", args, &ret); err != nil { return nil, err } if ret.Err != "" { return nil, errors.New(ret.Err) } return ret.Changes, nil } func (d *graphDriverProxy) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) { var ret graphDriverResponse if err := d.client.SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil { return -1, err } if ret.Err != "" { return -1, errors.New(ret.Err) } return ret.Size, nil } func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) { args := &graphDriverRequest{ ID: id, Parent: parent, } var ret graphDriverResponse if err := d.client.Call("GraphDriver.DiffSize", args, &ret); err != nil { return -1, err } if ret.Err != "" { return -1, errors.New(ret.Err) } return ret.Size, nil } docker-1.10.3/daemon/graphdriver/register/000077500000000000000000000000001267010174400204515ustar00rootroot00000000000000docker-1.10.3/daemon/graphdriver/register/register_aufs.go000066400000000000000000000002401267010174400236360ustar00rootroot00000000000000// +build !exclude_graphdriver_aufs,linux package register import ( // register the aufs graphdriver _ "github.com/docker/docker/daemon/graphdriver/aufs" ) docker-1.10.3/daemon/graphdriver/register/register_btrfs.go000066400000000000000000000002431267010174400240230ustar00rootroot00000000000000// +build !exclude_graphdriver_btrfs,linux package register import ( // register the btrfs graphdriver _ "github.com/docker/docker/daemon/graphdriver/btrfs" ) docker-1.10.3/daemon/graphdriver/register/register_devicemapper.go000066400000000000000000000002621267010174400253500ustar00rootroot00000000000000// +build !exclude_graphdriver_devicemapper,linux package register import ( // register the devmapper graphdriver _ "github.com/docker/docker/daemon/graphdriver/devmapper" ) docker-1.10.3/daemon/graphdriver/register/register_overlay.go000066400000000000000000000002511267010174400243630ustar00rootroot00000000000000// +build !exclude_graphdriver_overlay,linux package register import ( // register the overlay graphdriver _ "github.com/docker/docker/daemon/graphdriver/overlay" ) docker-1.10.3/daemon/graphdriver/register/register_vfs.go000066400000000000000000000001431267010174400235000ustar00rootroot00000000000000package register import ( // register vfs _ "github.com/docker/docker/daemon/graphdriver/vfs" ) docker-1.10.3/daemon/graphdriver/register/register_windows.go000066400000000000000000000001741267010174400244000ustar00rootroot00000000000000package register import ( // register the windows graph driver _ "github.com/docker/docker/daemon/graphdriver/windows" ) docker-1.10.3/daemon/graphdriver/register/register_zfs.go000066400000000000000000000002711267010174400235060ustar00rootroot00000000000000// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd package register import ( // register the zfs driver _ "github.com/docker/docker/daemon/graphdriver/zfs" ) docker-1.10.3/daemon/graphdriver/vfs/000077500000000000000000000000001267010174400174235ustar00rootroot00000000000000docker-1.10.3/daemon/graphdriver/vfs/driver.go000066400000000000000000000074351267010174400212560ustar00rootroot00000000000000package vfs import ( "fmt" "os" "path/filepath" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/idtools" "github.com/opencontainers/runc/libcontainer/label" ) var ( // CopyWithTar defines the copy method to use. CopyWithTar = chrootarchive.CopyWithTar ) func init() { graphdriver.Register("vfs", Init) } // Init returns a new VFS driver. // This sets the home directory for the driver and returns NaiveDiffDriver. func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { d := &Driver{ home: home, uidMaps: uidMaps, gidMaps: gidMaps, } rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) if err != nil { return nil, err } if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { return nil, err } return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil } // Driver holds information about the driver, home directory of the driver. // Driver implements graphdriver.ProtoDriver. It uses only basic vfs operations. // In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support. // Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver type Driver struct { home string uidMaps []idtools.IDMap gidMaps []idtools.IDMap } func (d *Driver) String() string { return "vfs" } // Status is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any status information. func (d *Driver) Status() [][2]string { return nil } // GetMetadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data. func (d *Driver) GetMetadata(id string) (map[string]string, error) { return nil, nil } // Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. func (d *Driver) Cleanup() error { return nil } // Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent. func (d *Driver) Create(id, parent, mountLabel string) error { dir := d.dir(id) rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return err } if err := idtools.MkdirAllAs(filepath.Dir(dir), 0700, rootUID, rootGID); err != nil { return err } if err := idtools.MkdirAs(dir, 0755, rootUID, rootGID); err != nil { return err } opts := []string{"level:s0"} if _, mountLabel, err := label.InitLabels(opts); err == nil { label.SetFileLabel(dir, mountLabel) } if parent == "" { return nil } parentDir, err := d.Get(parent, "") if err != nil { return fmt.Errorf("%s: %s", parent, err) } if err := CopyWithTar(parentDir, dir); err != nil { return err } return nil } func (d *Driver) dir(id string) string { return filepath.Join(d.home, "dir", filepath.Base(id)) } // Remove deletes the content from the directory for a given id. func (d *Driver) Remove(id string) error { if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { return err } return nil } // Get returns the directory for the given id. func (d *Driver) Get(id, mountLabel string) (string, error) { dir := d.dir(id) if st, err := os.Stat(dir); err != nil { return "", err } else if !st.IsDir() { return "", fmt.Errorf("%s: not a directory", dir) } return dir, nil } // Put is a noop for vfs that return nil for the error, since this driver has no runtime resources to clean up. func (d *Driver) Put(id string) error { // The vfs driver has no runtime resources (e.g. mounts) // to clean up, so we don't need anything here return nil } // Exists checks to see if the directory exists for the given id. func (d *Driver) Exists(id string) bool { _, err := os.Stat(d.dir(id)) return err == nil } docker-1.10.3/daemon/graphdriver/vfs/vfs_test.go000066400000000000000000000013071267010174400216100ustar00rootroot00000000000000// +build linux package vfs import ( "testing" "github.com/docker/docker/daemon/graphdriver/graphtest" "github.com/docker/docker/pkg/reexec" ) func init() { reexec.Init() } // This avoids creating a new driver for each test if all tests are run // Make sure to put new tests between TestVfsSetup and TestVfsTeardown func TestVfsSetup(t *testing.T) { graphtest.GetDriver(t, "vfs") } func TestVfsCreateEmpty(t *testing.T) { graphtest.DriverTestCreateEmpty(t, "vfs") } func TestVfsCreateBase(t *testing.T) { graphtest.DriverTestCreateBase(t, "vfs") } func TestVfsCreateSnap(t *testing.T) { graphtest.DriverTestCreateSnap(t, "vfs") } func TestVfsTeardown(t *testing.T) { graphtest.PutDriver(t) } docker-1.10.3/daemon/graphdriver/windows/000077500000000000000000000000001267010174400203175ustar00rootroot00000000000000docker-1.10.3/daemon/graphdriver/windows/windows.go000066400000000000000000000406731267010174400223520ustar00rootroot00000000000000//+build windows package windows import ( "crypto/sha512" "encoding/json" "fmt" "io" "io/ioutil" "os" "path/filepath" "strconv" "strings" "sync" "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/random" "github.com/microsoft/hcsshim" ) // init registers the windows graph drivers to the register. func init() { graphdriver.Register("windowsfilter", InitFilter) graphdriver.Register("windowsdiff", InitDiff) } const ( // diffDriver is an hcsshim driver type diffDriver = iota // filterDriver is an hcsshim driver type filterDriver ) // Driver represents a windows graph driver. type Driver struct { // info stores the shim driver information info hcsshim.DriverInfo // Mutex protects concurrent modification to active sync.Mutex // active stores references to the activated layers active map[string]int } // InitFilter returns a new Windows storage filter driver. func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { logrus.Debugf("WindowsGraphDriver InitFilter at %s", home) d := &Driver{ info: hcsshim.DriverInfo{ HomeDir: home, Flavour: filterDriver, }, active: make(map[string]int), } return d, nil } // InitDiff returns a new Windows differencing disk driver. func InitDiff(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { logrus.Debugf("WindowsGraphDriver InitDiff at %s", home) d := &Driver{ info: hcsshim.DriverInfo{ HomeDir: home, Flavour: diffDriver, }, active: make(map[string]int), } return d, nil } // String returns the string representation of a driver. func (d *Driver) String() string { switch d.info.Flavour { case diffDriver: return "windowsdiff" case filterDriver: return "windowsfilter" default: return "Unknown driver flavour" } } // Status returns the status of the driver. func (d *Driver) Status() [][2]string { return [][2]string{ {"Windows", ""}, } } // Exists returns true if the given id is registered with this driver. func (d *Driver) Exists(id string) bool { rID, err := d.resolveID(id) if err != nil { return false } result, err := hcsshim.LayerExists(d.info, rID) if err != nil { return false } return result } // Create creates a new layer with the given id. func (d *Driver) Create(id, parent, mountLabel string) error { rPId, err := d.resolveID(parent) if err != nil { return err } parentChain, err := d.getLayerChain(rPId) if err != nil { return err } var layerChain []string parentIsInit := strings.HasSuffix(rPId, "-init") if !parentIsInit && rPId != "" { parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) if err != nil { return err } layerChain = []string{parentPath} } layerChain = append(layerChain, parentChain...) if parentIsInit { if len(layerChain) == 0 { return fmt.Errorf("Cannot create a read/write layer without a parent layer.") } if err := hcsshim.CreateSandboxLayer(d.info, id, layerChain[0], layerChain); err != nil { return err } } else { if err := hcsshim.CreateLayer(d.info, id, rPId); err != nil { return err } } if _, err := os.Lstat(d.dir(parent)); err != nil { if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) } return fmt.Errorf("Cannot create layer with missing parent %s: %s", parent, err) } if err := d.setLayerChain(id, layerChain); err != nil { if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) } return err } return nil } // dir returns the absolute path to the layer. func (d *Driver) dir(id string) string { return filepath.Join(d.info.HomeDir, filepath.Base(id)) } // Remove unmounts and removes the dir information. func (d *Driver) Remove(id string) error { rID, err := d.resolveID(id) if err != nil { return err } os.RemoveAll(filepath.Join(d.info.HomeDir, "sysfile-backups", rID)) // ok to fail return hcsshim.DestroyLayer(d.info, rID) } // Get returns the rootfs path for the id. This will mount the dir at it's given path. func (d *Driver) Get(id, mountLabel string) (string, error) { logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel) var dir string d.Lock() defer d.Unlock() rID, err := d.resolveID(id) if err != nil { return "", err } // Getting the layer paths must be done outside of the lock. layerChain, err := d.getLayerChain(rID) if err != nil { return "", err } if d.active[rID] == 0 { if err := hcsshim.ActivateLayer(d.info, rID); err != nil { return "", err } if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { logrus.Warnf("Failed to Deactivate %s: %s", id, err) } return "", err } } mountPath, err := hcsshim.GetLayerMountPath(d.info, rID) if err != nil { if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { logrus.Warnf("Failed to Deactivate %s: %s", id, err) } return "", err } d.active[rID]++ // If the layer has a mount path, use that. Otherwise, use the // folder path. if mountPath != "" { dir = mountPath } else { dir = d.dir(id) } return dir, nil } // Put adds a new layer to the driver. func (d *Driver) Put(id string) error { logrus.Debugf("WindowsGraphDriver Put() id %s", id) rID, err := d.resolveID(id) if err != nil { return err } d.Lock() defer d.Unlock() if d.active[rID] > 1 { d.active[rID]-- } else if d.active[rID] == 1 { if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { return err } if err := hcsshim.DeactivateLayer(d.info, rID); err != nil { return err } delete(d.active, rID) } return nil } // Cleanup ensures the information the driver stores is properly removed. func (d *Driver) Cleanup() error { return nil } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". func (d *Driver) Diff(id, parent string) (arch archive.Archive, err error) { rID, err := d.resolveID(id) if err != nil { return } // Getting the layer paths must be done outside of the lock. layerChain, err := d.getLayerChain(rID) if err != nil { return } d.Lock() // To support export, a layer must be activated but not prepared. if d.info.Flavour == filterDriver { if d.active[rID] == 0 { if err = hcsshim.ActivateLayer(d.info, rID); err != nil { d.Unlock() return } defer func() { if err := hcsshim.DeactivateLayer(d.info, rID); err != nil { logrus.Warnf("Failed to Deactivate %s: %s", rID, err) } }() } else { if err = hcsshim.UnprepareLayer(d.info, rID); err != nil { d.Unlock() return } defer func() { if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { logrus.Warnf("Failed to re-PrepareLayer %s: %s", rID, err) } }() } } d.Unlock() return d.exportLayer(rID, layerChain) } // Changes produces a list of changes between the specified layer // and its parent layer. If parent is "", then all changes will be ADD changes. func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { return nil, fmt.Errorf("The Windows graphdriver does not support Changes()") } // ApplyDiff extracts the changeset from the given diff into the // layer with the specified id and parent, returning the size of the // new layer in bytes. func (d *Driver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) { rPId, err := d.resolveID(parent) if err != nil { return } if d.info.Flavour == diffDriver { start := time.Now().UTC() logrus.Debugf("WindowsGraphDriver ApplyDiff: Start untar layer") destination := d.dir(id) destination = filepath.Dir(destination) if size, err = chrootarchive.ApplyUncompressedLayer(destination, diff, nil); err != nil { return } logrus.Debugf("WindowsGraphDriver ApplyDiff: Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) return } parentChain, err := d.getLayerChain(rPId) if err != nil { return } parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) if err != nil { return } layerChain := []string{parentPath} layerChain = append(layerChain, parentChain...) if size, err = d.importLayer(id, diff, layerChain); err != nil { return } if err = d.setLayerChain(id, layerChain); err != nil { return } return } // DiffSize calculates the changes between the specified layer // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (d *Driver) DiffSize(id, parent string) (size int64, err error) { rPId, err := d.resolveID(parent) if err != nil { return } changes, err := d.Changes(id, rPId) if err != nil { return } layerFs, err := d.Get(id, "") if err != nil { return } defer d.Put(id) return archive.ChangesSize(layerFs, changes), nil } // CustomImageInfo is the object returned by the driver describing the base // image. type CustomImageInfo struct { ID string Name string Version string Path string Size int64 CreatedTime time.Time } // GetCustomImageInfos returns the image infos for window specific // base images which should always be present. func (d *Driver) GetCustomImageInfos() ([]CustomImageInfo, error) { strData, err := hcsshim.GetSharedBaseImages() if err != nil { return nil, fmt.Errorf("Failed to restore base images: %s", err) } type customImageInfoList struct { Images []CustomImageInfo } var infoData customImageInfoList if err = json.Unmarshal([]byte(strData), &infoData); err != nil { err = fmt.Errorf("JSON unmarshal returned error=%s", err) logrus.Error(err) return nil, err } var images []CustomImageInfo for _, imageData := range infoData.Images { folderName := filepath.Base(imageData.Path) // Use crypto hash of the foldername to generate a docker style id. h := sha512.Sum384([]byte(folderName)) id := fmt.Sprintf("%x", h[:32]) if err := d.Create(id, "", ""); err != nil { return nil, err } // Create the alternate ID file. if err := d.setID(id, folderName); err != nil { return nil, err } imageData.ID = id images = append(images, imageData) } return images, nil } // GetMetadata returns custom driver information. func (d *Driver) GetMetadata(id string) (map[string]string, error) { m := make(map[string]string) m["dir"] = d.dir(id) return m, nil } // exportLayer generates an archive from a layer based on the given ID. func (d *Driver) exportLayer(id string, parentLayerPaths []string) (arch archive.Archive, err error) { layerFolder := d.dir(id) tempFolder := layerFolder + "-" + strconv.FormatUint(uint64(random.Rand.Uint32()), 10) if err = os.MkdirAll(tempFolder, 0755); err != nil { logrus.Errorf("Could not create %s %s", tempFolder, err) return } defer func() { if err != nil { _, folderName := filepath.Split(tempFolder) if err2 := hcsshim.DestroyLayer(d.info, folderName); err2 != nil { logrus.Warnf("Couldn't clean-up tempFolder: %s %s", tempFolder, err2) } } }() if err = hcsshim.ExportLayer(d.info, id, tempFolder, parentLayerPaths); err != nil { return } archive, err := archive.Tar(tempFolder, archive.Uncompressed) if err != nil { return } return ioutils.NewReadCloserWrapper(archive, func() error { err := archive.Close() d.Put(id) _, folderName := filepath.Split(tempFolder) if err2 := hcsshim.DestroyLayer(d.info, folderName); err2 != nil { logrus.Warnf("Couldn't clean-up tempFolder: %s %s", tempFolder, err2) } return err }), nil } // importLayer adds a new layer to the tag and graph store based on the given data. func (d *Driver) importLayer(id string, layerData archive.Reader, parentLayerPaths []string) (size int64, err error) { layerFolder := d.dir(id) tempFolder := layerFolder + "-" + strconv.FormatUint(uint64(random.Rand.Uint32()), 10) if err = os.MkdirAll(tempFolder, 0755); err != nil { logrus.Errorf("Could not create %s %s", tempFolder, err) return } defer func() { _, folderName := filepath.Split(tempFolder) if err2 := hcsshim.DestroyLayer(d.info, folderName); err2 != nil { logrus.Warnf("Couldn't clean-up tempFolder: %s %s", tempFolder, err2) } }() start := time.Now().UTC() logrus.Debugf("Start untar layer") if size, err = chrootarchive.ApplyLayer(tempFolder, layerData); err != nil { return } err = copySysFiles(tempFolder, filepath.Join(d.info.HomeDir, "sysfile-backups", id)) if err != nil { return } logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) if err = hcsshim.ImportLayer(d.info, id, tempFolder, parentLayerPaths); err != nil { return } return } // resolveID computes the layerID information based on the given id. func (d *Driver) resolveID(id string) (string, error) { content, err := ioutil.ReadFile(filepath.Join(d.dir(id), "layerID")) if os.IsNotExist(err) { return id, nil } else if err != nil { return "", err } return string(content), nil } // setID stores the layerId in disk. func (d *Driver) setID(id, altID string) error { err := ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) if err != nil { return err } return nil } // getLayerChain returns the layer chain information. func (d *Driver) getLayerChain(id string) ([]string, error) { jPath := filepath.Join(d.dir(id), "layerchain.json") content, err := ioutil.ReadFile(jPath) if os.IsNotExist(err) { return nil, nil } else if err != nil { return nil, fmt.Errorf("Unable to read layerchain file - %s", err) } var layerChain []string err = json.Unmarshal(content, &layerChain) if err != nil { return nil, fmt.Errorf("Failed to unmarshall layerchain json - %s", err) } return layerChain, nil } // setLayerChain stores the layer chain information in disk. func (d *Driver) setLayerChain(id string, chain []string) error { content, err := json.Marshal(&chain) if err != nil { return fmt.Errorf("Failed to marshall layerchain json - %s", err) } jPath := filepath.Join(d.dir(id), "layerchain.json") err = ioutil.WriteFile(jPath, content, 0600) if err != nil { return fmt.Errorf("Unable to write layerchain file - %s", err) } return nil } // DiffPath returns a directory that contains files needed to construct layer diff. func (d *Driver) DiffPath(id string) (path string, release func() error, err error) { id, err = d.resolveID(id) if err != nil { return } // Getting the layer paths must be done outside of the lock. layerChain, err := d.getLayerChain(id) if err != nil { return } layerFolder := d.dir(id) tempFolder := layerFolder + "-" + strconv.FormatUint(uint64(random.Rand.Uint32()), 10) if err = os.MkdirAll(tempFolder, 0755); err != nil { logrus.Errorf("Could not create %s %s", tempFolder, err) return } defer func() { if err != nil { _, folderName := filepath.Split(tempFolder) if err2 := hcsshim.DestroyLayer(d.info, folderName); err2 != nil { logrus.Warnf("Couldn't clean-up tempFolder: %s %s", tempFolder, err2) } } }() if err = hcsshim.ExportLayer(d.info, id, tempFolder, layerChain); err != nil { return } err = copySysFiles(filepath.Join(d.info.HomeDir, "sysfile-backups", id), tempFolder) if err != nil { return } return tempFolder, func() error { // TODO: activate layers and release here? _, folderName := filepath.Split(tempFolder) return hcsshim.DestroyLayer(d.info, folderName) }, nil } var sysFileWhiteList = []string{ "Hives\\*", "Files\\BOOTNXT", "tombstones.txt", } // note this only handles files func copySysFiles(src string, dest string) error { if err := os.MkdirAll(dest, 0700); err != nil { return err } return filepath.Walk(src, func(path string, info os.FileInfo, err error) error { rel, err := filepath.Rel(src, path) if err != nil { return err } for _, sysfile := range sysFileWhiteList { if matches, err := filepath.Match(sysfile, rel); err != nil || !matches { continue } fi, err := os.Lstat(path) if err != nil { return err } if !fi.Mode().IsRegular() { continue } targetPath := filepath.Join(dest, rel) if err = os.MkdirAll(filepath.Dir(targetPath), 0700); err != nil { return err } in, err := os.Open(path) if err != nil { return err } out, err := os.Create(targetPath) if err != nil { in.Close() return err } _, err = io.Copy(out, in) in.Close() out.Close() if err != nil { return err } } return nil }) } docker-1.10.3/daemon/graphdriver/zfs/000077500000000000000000000000001267010174400174275ustar00rootroot00000000000000docker-1.10.3/daemon/graphdriver/zfs/MAINTAINERS000066400000000000000000000001321267010174400211200ustar00rootroot00000000000000Jörg Thalheim (@Mic92) Arthur Gautier (@baloose) docker-1.10.3/daemon/graphdriver/zfs/zfs.go000066400000000000000000000220551267010174400205640ustar00rootroot00000000000000// +build linux freebsd package zfs import ( "fmt" "os" "os/exec" "path" "strconv" "strings" "sync" "syscall" "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/parsers" zfs "github.com/mistifyio/go-zfs" "github.com/opencontainers/runc/libcontainer/label" ) type zfsOptions struct { fsName string mountPath string } func init() { graphdriver.Register("zfs", Init) } // Logger returns a zfs logger implementation. type Logger struct{} // Log wraps log message from ZFS driver with a prefix '[zfs]'. func (*Logger) Log(cmd []string) { logrus.Debugf("[zfs] %s", strings.Join(cmd, " ")) } // Init returns a new ZFS driver. // It takes base mount path and a array of options which are represented as key value pairs. // Each option is in the for key=value. 'zfs.fsname' is expected to be a valid key in the options. func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { var err error if _, err := exec.LookPath("zfs"); err != nil { logrus.Debugf("[zfs] zfs command is not available: %v", err) return nil, graphdriver.ErrPrerequisites } file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 600) if err != nil { logrus.Debugf("[zfs] cannot open /dev/zfs: %v", err) return nil, graphdriver.ErrPrerequisites } defer file.Close() options, err := parseOptions(opt) if err != nil { return nil, err } options.mountPath = base rootdir := path.Dir(base) if options.fsName == "" { err = checkRootdirFs(rootdir) if err != nil { return nil, err } } if options.fsName == "" { options.fsName, err = lookupZfsDataset(rootdir) if err != nil { return nil, err } } zfs.SetLogger(new(Logger)) filesystems, err := zfs.Filesystems(options.fsName) if err != nil { return nil, fmt.Errorf("Cannot find root filesystem %s: %v", options.fsName, err) } filesystemsCache := make(map[string]bool, len(filesystems)) var rootDataset *zfs.Dataset for _, fs := range filesystems { if fs.Name == options.fsName { rootDataset = fs } filesystemsCache[fs.Name] = true } if rootDataset == nil { return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName) } d := &Driver{ dataset: rootDataset, options: options, filesystemsCache: filesystemsCache, uidMaps: uidMaps, gidMaps: gidMaps, } return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil } func parseOptions(opt []string) (zfsOptions, error) { var options zfsOptions options.fsName = "" for _, option := range opt { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return options, err } key = strings.ToLower(key) switch key { case "zfs.fsname": options.fsName = val default: return options, fmt.Errorf("Unknown option %s", key) } } return options, nil } func lookupZfsDataset(rootdir string) (string, error) { var stat syscall.Stat_t if err := syscall.Stat(rootdir, &stat); err != nil { return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err) } wantedDev := stat.Dev mounts, err := mount.GetMounts() if err != nil { return "", err } for _, m := range mounts { if err := syscall.Stat(m.Mountpoint, &stat); err != nil { logrus.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err) continue // may fail on fuse file systems } if stat.Dev == wantedDev && m.Fstype == "zfs" { return m.Source, nil } } return "", fmt.Errorf("Failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir) } // Driver holds information about the driver, such as zfs dataset, options and cache. type Driver struct { dataset *zfs.Dataset options zfsOptions sync.Mutex // protects filesystem cache against concurrent access filesystemsCache map[string]bool uidMaps []idtools.IDMap gidMaps []idtools.IDMap } func (d *Driver) String() string { return "zfs" } // Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. func (d *Driver) Cleanup() error { return nil } // Status returns information about the ZFS filesystem. It returns a two dimensional array of information // such as pool name, dataset name, disk usage, parent quota and compression used. // Currently it return 'Zpool', 'Zpool Health', 'Parent Dataset', 'Space Used By Parent', // 'Space Available', 'Parent Quota' and 'Compression'. func (d *Driver) Status() [][2]string { parts := strings.Split(d.dataset.Name, "/") pool, err := zfs.GetZpool(parts[0]) var poolName, poolHealth string if err == nil { poolName = pool.Name poolHealth = pool.Health } else { poolName = fmt.Sprintf("error while getting pool information %v", err) poolHealth = "not available" } quota := "no" if d.dataset.Quota != 0 { quota = strconv.FormatUint(d.dataset.Quota, 10) } return [][2]string{ {"Zpool", poolName}, {"Zpool Health", poolHealth}, {"Parent Dataset", d.dataset.Name}, {"Space Used By Parent", strconv.FormatUint(d.dataset.Used, 10)}, {"Space Available", strconv.FormatUint(d.dataset.Avail, 10)}, {"Parent Quota", quota}, {"Compression", d.dataset.Compression}, } } // GetMetadata returns image/container metadata related to graph driver func (d *Driver) GetMetadata(id string) (map[string]string, error) { return nil, nil } func (d *Driver) cloneFilesystem(name, parentName string) error { snapshotName := fmt.Sprintf("%d", time.Now().Nanosecond()) parentDataset := zfs.Dataset{Name: parentName} snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false) if err != nil { return err } _, err = snapshot.Clone(name, map[string]string{"mountpoint": "legacy"}) if err == nil { d.Lock() d.filesystemsCache[name] = true d.Unlock() } if err != nil { snapshot.Destroy(zfs.DestroyDeferDeletion) return err } return snapshot.Destroy(zfs.DestroyDeferDeletion) } func (d *Driver) zfsPath(id string) string { return d.options.fsName + "/" + id } func (d *Driver) mountPath(id string) string { return path.Join(d.options.mountPath, "graph", getMountpoint(id)) } // Create prepares the dataset and filesystem for the ZFS driver for the given id under the parent. func (d *Driver) Create(id string, parent string, mountLabel string) error { err := d.create(id, parent) if err == nil { return nil } if zfsError, ok := err.(*zfs.Error); ok { if !strings.HasSuffix(zfsError.Stderr, "dataset already exists\n") { return err } // aborted build -> cleanup } else { return err } dataset := zfs.Dataset{Name: d.zfsPath(id)} if err := dataset.Destroy(zfs.DestroyRecursiveClones); err != nil { return err } // retry return d.create(id, parent) } func (d *Driver) create(id, parent string) error { name := d.zfsPath(id) if parent == "" { mountoptions := map[string]string{"mountpoint": "legacy"} fs, err := zfs.CreateFilesystem(name, mountoptions) if err == nil { d.Lock() d.filesystemsCache[fs.Name] = true d.Unlock() } return err } return d.cloneFilesystem(name, d.zfsPath(parent)) } // Remove deletes the dataset, filesystem and the cache for the given id. func (d *Driver) Remove(id string) error { name := d.zfsPath(id) dataset := zfs.Dataset{Name: name} err := dataset.Destroy(zfs.DestroyRecursive) if err == nil { d.Lock() delete(d.filesystemsCache, name) d.Unlock() } return err } // Get returns the mountpoint for the given id after creating the target directories if necessary. func (d *Driver) Get(id, mountLabel string) (string, error) { mountpoint := d.mountPath(id) filesystem := d.zfsPath(id) options := label.FormatMountLabel("", mountLabel) logrus.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, options) rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return "", err } // Create the target directories if they don't exist if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil { return "", err } if err := mount.Mount(filesystem, mountpoint, "zfs", options); err != nil { return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err) } // this could be our first mount after creation of the filesystem, and the root dir may still have root // permissions instead of the remapped root uid:gid (if user namespaces are enabled): if err := os.Chown(mountpoint, rootUID, rootGID); err != nil { return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err) } return mountpoint, nil } // Put removes the existing mountpoint for the given id if it exists. func (d *Driver) Put(id string) error { mountpoint := d.mountPath(id) logrus.Debugf(`[zfs] unmount("%s")`, mountpoint) if err := mount.Unmount(mountpoint); err != nil { return fmt.Errorf("error unmounting to %s: %v", mountpoint, err) } return nil } // Exists checks to see if the cache entry exists for the given id. func (d *Driver) Exists(id string) bool { return d.filesystemsCache[d.zfsPath(id)] == true } docker-1.10.3/daemon/graphdriver/zfs/zfs_freebsd.go000066400000000000000000000015331267010174400222540ustar00rootroot00000000000000package zfs import ( "fmt" "strings" "syscall" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" ) func checkRootdirFs(rootdir string) error { var buf syscall.Statfs_t if err := syscall.Statfs(rootdir, &buf); err != nil { return fmt.Errorf("Failed to access '%s': %s", rootdir, err) } // on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ] if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) { logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) return graphdriver.ErrPrerequisites } return nil } func getMountpoint(id string) string { maxlen := 12 // we need to preserve filesystem suffix suffix := strings.SplitN(id, "-", 2) if len(suffix) > 1 { return id[:maxlen] + "-" + suffix[1] } return id[:maxlen] } docker-1.10.3/daemon/graphdriver/zfs/zfs_linux.go000066400000000000000000000010541267010174400217770ustar00rootroot00000000000000package zfs import ( "fmt" "syscall" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" ) func checkRootdirFs(rootdir string) error { var buf syscall.Statfs_t if err := syscall.Statfs(rootdir, &buf); err != nil { return fmt.Errorf("Failed to access '%s': %s", rootdir, err) } if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicZfs { logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) return graphdriver.ErrPrerequisites } return nil } func getMountpoint(id string) string { return id } docker-1.10.3/daemon/graphdriver/zfs/zfs_test.go000066400000000000000000000011771267010174400216250ustar00rootroot00000000000000// +build linux package zfs import ( "testing" "github.com/docker/docker/daemon/graphdriver/graphtest" ) // This avoids creating a new driver for each test if all tests are run // Make sure to put new tests between TestZfsSetup and TestZfsTeardown func TestZfsSetup(t *testing.T) { graphtest.GetDriver(t, "zfs") } func TestZfsCreateEmpty(t *testing.T) { graphtest.DriverTestCreateEmpty(t, "zfs") } func TestZfsCreateBase(t *testing.T) { graphtest.DriverTestCreateBase(t, "zfs") } func TestZfsCreateSnap(t *testing.T) { graphtest.DriverTestCreateSnap(t, "zfs") } func TestZfsTeardown(t *testing.T) { graphtest.PutDriver(t) } docker-1.10.3/daemon/graphdriver/zfs/zfs_unsupported.go000066400000000000000000000002271267010174400232310ustar00rootroot00000000000000// +build !linux,!freebsd package zfs func checkRootdirFs(rootdir string) error { return nil } func getMountpoint(id string) string { return id } docker-1.10.3/daemon/image_delete.go000066400000000000000000000321221267010174400172430ustar00rootroot00000000000000package daemon import ( "fmt" "strings" "github.com/docker/docker/container" derr "github.com/docker/docker/errors" "github.com/docker/docker/image" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/reference" "github.com/docker/engine-api/types" ) type conflictType int const ( conflictDependentChild conflictType = (1 << iota) conflictRunningContainer conflictActiveReference conflictStoppedContainer conflictHard = conflictDependentChild | conflictRunningContainer conflictSoft = conflictActiveReference | conflictStoppedContainer ) // ImageDelete deletes the image referenced by the given imageRef from this // daemon. The given imageRef can be an image ID, ID prefix, or a repository // reference (with an optional tag or digest, defaulting to the tag name // "latest"). There is differing behavior depending on whether the given // imageRef is a repository reference or not. // // If the given imageRef is a repository reference then that repository // reference will be removed. However, if there exists any containers which // were created using the same image reference then the repository reference // cannot be removed unless either there are other repository references to the // same image or force is true. Following removal of the repository reference, // the referenced image itself will attempt to be deleted as described below // but quietly, meaning any image delete conflicts will cause the image to not // be deleted and the conflict will not be reported. // // There may be conflicts preventing deletion of an image and these conflicts // are divided into two categories grouped by their severity: // // Hard Conflict: // - a pull or build using the image. // - any descendent image. // - any running container using the image. // // Soft Conflict: // - any stopped container using the image. // - any repository tag or digest references to the image. // // The image cannot be removed if there are any hard conflicts and can be // removed if there are soft conflicts only if force is true. // // If prune is true, ancestor images will each attempt to be deleted quietly, // meaning any delete conflicts will cause the image to not be deleted and the // conflict will not be reported. // // FIXME: remove ImageDelete's dependency on Daemon, then move to the graph // package. This would require that we no longer need the daemon to determine // whether images are being used by a stopped or running container. func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) { records := []types.ImageDelete{} imgID, err := daemon.GetImageID(imageRef) if err != nil { return nil, daemon.imageNotExistToErrcode(err) } repoRefs := daemon.referenceStore.References(imgID) var removedRepositoryRef bool if !isImageIDPrefix(imgID.String(), imageRef) { // A repository reference was given and should be removed // first. We can only remove this reference if either force is // true, there are multiple repository references to this // image, or there are no containers using the given reference. if !(force || len(repoRefs) > 1) { if container := daemon.getContainerUsingImage(imgID); container != nil { // If we removed the repository reference then // this image would remain "dangling" and since // we really want to avoid that the client must // explicitly force its removal. return nil, derr.ErrorCodeImgDelUsed.WithArgs(imageRef, stringid.TruncateID(container.ID), stringid.TruncateID(imgID.String())) } } parsedRef, err := reference.ParseNamed(imageRef) if err != nil { return nil, err } parsedRef, err = daemon.removeImageRef(parsedRef) if err != nil { return nil, err } untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") records = append(records, untaggedRecord) repoRefs = daemon.referenceStore.References(imgID) // If this is a tag reference and all the remaining references // to this image are digest references, delete the remaining // references so that they don't prevent removal of the image. if _, isCanonical := parsedRef.(reference.Canonical); !isCanonical { foundTagRef := false for _, repoRef := range repoRefs { if _, repoRefIsCanonical := repoRef.(reference.Canonical); !repoRefIsCanonical { foundTagRef = true break } } if !foundTagRef { for _, repoRef := range repoRefs { if _, err := daemon.removeImageRef(repoRef); err != nil { return records, err } untaggedRecord := types.ImageDelete{Untagged: repoRef.String()} records = append(records, untaggedRecord) } repoRefs = []reference.Named{} } } // If it has remaining references then the untag finished the remove if len(repoRefs) > 0 { return records, nil } removedRepositoryRef = true } else { // If an ID reference was given AND there is exactly one // repository reference to the image then we will want to // remove that reference. // FIXME: Is this the behavior we want? if len(repoRefs) == 1 { c := conflictHard if !force { c |= conflictSoft &^ conflictActiveReference } if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil { return nil, conflict } parsedRef, err := daemon.removeImageRef(repoRefs[0]) if err != nil { return nil, err } untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") records = append(records, untaggedRecord) } } return records, daemon.imageDeleteHelper(imgID, &records, force, prune, removedRepositoryRef) } // isImageIDPrefix returns whether the given possiblePrefix is a prefix of the // given imageID. func isImageIDPrefix(imageID, possiblePrefix string) bool { if strings.HasPrefix(imageID, possiblePrefix) { return true } if i := strings.IndexRune(imageID, ':'); i >= 0 { return strings.HasPrefix(imageID[i+1:], possiblePrefix) } return false } // getContainerUsingImage returns a container that was created using the given // imageID. Returns nil if there is no such container. func (daemon *Daemon) getContainerUsingImage(imageID image.ID) *container.Container { return daemon.containers.First(func(c *container.Container) bool { return c.ImageID == imageID }) } // removeImageRef attempts to parse and remove the given image reference from // this daemon's store of repository tag/digest references. The given // repositoryRef must not be an image ID but a repository name followed by an // optional tag or digest reference. If tag or digest is omitted, the default // tag is used. Returns the resolved image reference and an error. func (daemon *Daemon) removeImageRef(ref reference.Named) (reference.Named, error) { ref = reference.WithDefaultTag(ref) // Ignore the boolean value returned, as far as we're concerned, this // is an idempotent operation and it's okay if the reference didn't // exist in the first place. _, err := daemon.referenceStore.Delete(ref) return ref, err } // removeAllReferencesToImageID attempts to remove every reference to the given // imgID from this daemon's store of repository tag/digest references. Returns // on the first encountered error. Removed references are logged to this // daemon's event service. An "Untagged" types.ImageDelete is added to the // given list of records. func (daemon *Daemon) removeAllReferencesToImageID(imgID image.ID, records *[]types.ImageDelete) error { imageRefs := daemon.referenceStore.References(imgID) for _, imageRef := range imageRefs { parsedRef, err := daemon.removeImageRef(imageRef) if err != nil { return err } untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") *records = append(*records, untaggedRecord) } return nil } // ImageDeleteConflict holds a soft or hard conflict and an associated error. // Implements the error interface. type imageDeleteConflict struct { hard bool used bool imgID image.ID message string } func (idc *imageDeleteConflict) Error() string { var forceMsg string if idc.hard { forceMsg = "cannot be forced" } else { forceMsg = "must be forced" } return fmt.Sprintf("conflict: unable to delete %s (%s) - %s", stringid.TruncateID(idc.imgID.String()), forceMsg, idc.message) } // imageDeleteHelper attempts to delete the given image from this daemon. If // the image has any hard delete conflicts (child images or running containers // using the image) then it cannot be deleted. If the image has any soft delete // conflicts (any tags/digests referencing the image or any stopped container // using the image) then it can only be deleted if force is true. If the delete // succeeds and prune is true, the parent images are also deleted if they do // not have any soft or hard delete conflicts themselves. Any deleted images // and untagged references are appended to the given records. If any error or // conflict is encountered, it will be returned immediately without deleting // the image. If quiet is true, any encountered conflicts will be ignored and // the function will return nil immediately without deleting the image. func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDelete, force, prune, quiet bool) error { // First, determine if this image has any conflicts. Ignore soft conflicts // if force is true. c := conflictHard if !force { c |= conflictSoft } if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil { if quiet && (!daemon.imageIsDangling(imgID) || conflict.used) { // Ignore conflicts UNLESS the image is "dangling" or not being used in // which case we want the user to know. return nil } // There was a conflict and it's either a hard conflict OR we are not // forcing deletion on soft conflicts. return conflict } parent, err := daemon.imageStore.GetParent(imgID) if err != nil { // There may be no parent parent = "" } // Delete all repository tag/digest references to this image. if err := daemon.removeAllReferencesToImageID(imgID, records); err != nil { return err } removedLayers, err := daemon.imageStore.Delete(imgID) if err != nil { return err } daemon.LogImageEvent(imgID.String(), imgID.String(), "delete") *records = append(*records, types.ImageDelete{Deleted: imgID.String()}) for _, removedLayer := range removedLayers { *records = append(*records, types.ImageDelete{Deleted: removedLayer.ChainID.String()}) } if !prune || parent == "" { return nil } // We need to prune the parent image. This means delete it if there are // no tags/digests referencing it and there are no containers using it ( // either running or stopped). // Do not force prunings, but do so quietly (stopping on any encountered // conflicts). return daemon.imageDeleteHelper(parent, records, false, true, true) } // checkImageDeleteConflict determines whether there are any conflicts // preventing deletion of the given image from this daemon. A hard conflict is // any image which has the given image as a parent or any running container // using the image. A soft conflict is any tags/digest referencing the given // image or any stopped container using the image. If ignoreSoftConflicts is // true, this function will not check for soft conflict conditions. func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType) *imageDeleteConflict { // Check if the image has any descendent images. if mask&conflictDependentChild != 0 && len(daemon.imageStore.Children(imgID)) > 0 { return &imageDeleteConflict{ hard: true, imgID: imgID, message: "image has dependent child images", } } if mask&conflictRunningContainer != 0 { // Check if any running container is using the image. running := func(c *container.Container) bool { return c.IsRunning() && c.ImageID == imgID } if container := daemon.containers.First(running); container != nil { return &imageDeleteConflict{ imgID: imgID, hard: true, used: true, message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)), } } } // Check if any repository tags/digest reference this image. if mask&conflictActiveReference != 0 && len(daemon.referenceStore.References(imgID)) > 0 { return &imageDeleteConflict{ imgID: imgID, message: "image is referenced in one or more repositories", } } if mask&conflictStoppedContainer != 0 { // Check if any stopped containers reference this image. stopped := func(c *container.Container) bool { return !c.IsRunning() && c.ImageID == imgID } if container := daemon.containers.First(stopped); container != nil { return &imageDeleteConflict{ imgID: imgID, used: true, message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)), } } } return nil } // imageIsDangling returns whether the given image is "dangling" which means // that there are no repository references to the given image and it has no // child images. func (daemon *Daemon) imageIsDangling(imgID image.ID) bool { return !(len(daemon.referenceStore.References(imgID)) > 0 || len(daemon.imageStore.Children(imgID)) > 0) } docker-1.10.3/daemon/images.go000066400000000000000000000104141267010174400161040ustar00rootroot00000000000000package daemon import ( "fmt" "path" "sort" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/reference" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/filters" ) var acceptedImageFilterTags = map[string]bool{ "dangling": true, "label": true, } // byCreated is a temporary type used to sort a list of images by creation // time. type byCreated []*types.Image func (r byCreated) Len() int { return len(r) } func (r byCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created } // Map returns a map of all images in the ImageStore func (daemon *Daemon) Map() map[image.ID]*image.Image { return daemon.imageStore.Map() } // Images returns a filtered list of images. filterArgs is a JSON-encoded set // of filter arguments which will be interpreted by api/types/filters. // filter is a shell glob string applied to repository names. The argument // named all controls whether all images in the graph are filtered, or just // the heads. func (daemon *Daemon) Images(filterArgs, filter string, all bool) ([]*types.Image, error) { var ( allImages map[image.ID]*image.Image err error danglingOnly = false ) imageFilters, err := filters.FromParam(filterArgs) if err != nil { return nil, err } if err := imageFilters.Validate(acceptedImageFilterTags); err != nil { return nil, err } if imageFilters.Include("dangling") { if imageFilters.ExactMatch("dangling", "true") { danglingOnly = true } else if !imageFilters.ExactMatch("dangling", "false") { return nil, fmt.Errorf("Invalid filter 'dangling=%s'", imageFilters.Get("dangling")) } } if danglingOnly { allImages = daemon.imageStore.Heads() } else { allImages = daemon.imageStore.Map() } images := []*types.Image{} var filterTagged bool if filter != "" { filterRef, err := reference.ParseNamed(filter) if err == nil { // parse error means wildcard repo if _, ok := filterRef.(reference.NamedTagged); ok { filterTagged = true } } } for id, img := range allImages { if imageFilters.Include("label") { // Very old image that do not have image.Config (or even labels) if img.Config == nil { continue } // We are now sure image.Config is not nil if !imageFilters.MatchKVList("label", img.Config.Labels) { continue } } layerID := img.RootFS.ChainID() var size int64 if layerID != "" { l, err := daemon.layerStore.Get(layerID) if err != nil { return nil, err } size, err = l.Size() layer.ReleaseAndLog(daemon.layerStore, l) if err != nil { return nil, err } } newImage := newImage(img, size) for _, ref := range daemon.referenceStore.References(id) { if filter != "" { // filter by tag/repo name if filterTagged { // filter by tag, require full ref match if ref.String() != filter { continue } } else if matched, err := path.Match(filter, ref.Name()); !matched || err != nil { // name only match, FIXME: docs say exact continue } } if _, ok := ref.(reference.Canonical); ok { newImage.RepoDigests = append(newImage.RepoDigests, ref.String()) } if _, ok := ref.(reference.NamedTagged); ok { newImage.RepoTags = append(newImage.RepoTags, ref.String()) } } if newImage.RepoDigests == nil && newImage.RepoTags == nil { if all || len(daemon.imageStore.Children(id)) == 0 { if imageFilters.Include("dangling") && !danglingOnly { //dangling=false case, so dangling image is not needed continue } if filter != "" { // skip images with no references if filtering by tag continue } newImage.RepoDigests = []string{"@"} newImage.RepoTags = []string{":"} } else { continue } } else if danglingOnly { continue } images = append(images, newImage) } sort.Sort(sort.Reverse(byCreated(images))) return images, nil } func newImage(image *image.Image, size int64) *types.Image { newImage := new(types.Image) newImage.ParentID = image.Parent.String() newImage.ID = image.ID().String() newImage.Created = image.Created.Unix() newImage.Size = size newImage.VirtualSize = size if image.Config != nil { newImage.Labels = image.Config.Labels } return newImage } docker-1.10.3/daemon/import.go000066400000000000000000000052421267010174400161540ustar00rootroot00000000000000package daemon import ( "encoding/json" "io" "net/http" "net/url" "runtime" "time" "github.com/docker/docker/dockerversion" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/httputils" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/streamformatter" "github.com/docker/docker/reference" "github.com/docker/engine-api/types/container" ) // ImportImage imports an image, getting the archived layer data either from // inConfig (if src is "-"), or from a URI specified in src. Progress output is // written to outStream. Repository and tag names can optionally be given in // the repo and tag arguments, respectively. func (daemon *Daemon) ImportImage(src string, newRef reference.Named, msg string, inConfig io.ReadCloser, outStream io.Writer, config *container.Config) error { var ( sf = streamformatter.NewJSONStreamFormatter() rc io.ReadCloser resp *http.Response ) if src == "-" { rc = inConfig } else { inConfig.Close() u, err := url.Parse(src) if err != nil { return err } if u.Scheme == "" { u.Scheme = "http" u.Host = src u.Path = "" } outStream.Write(sf.FormatStatus("", "Downloading from %s", u)) resp, err = httputils.Download(u.String()) if err != nil { return err } progressOutput := sf.NewProgressOutput(outStream, true) rc = progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Importing") } defer rc.Close() if len(msg) == 0 { msg = "Imported from " + src } inflatedLayerData, err := archive.DecompressStream(rc) if err != nil { return err } // TODO: support windows baselayer? l, err := daemon.layerStore.Register(inflatedLayerData, "") if err != nil { return err } defer layer.ReleaseAndLog(daemon.layerStore, l) created := time.Now().UTC() imgConfig, err := json.Marshal(&image.Image{ V1Image: image.V1Image{ DockerVersion: dockerversion.Version, Config: config, Architecture: runtime.GOARCH, OS: runtime.GOOS, Created: created, Comment: msg, }, RootFS: &image.RootFS{ Type: "layers", DiffIDs: []layer.DiffID{l.DiffID()}, }, History: []image.History{{ Created: created, Comment: msg, }}, }) if err != nil { return err } id, err := daemon.imageStore.Create(imgConfig) if err != nil { return err } // FIXME: connect with commit code and call refstore directly if newRef != nil { if err := daemon.TagImage(newRef, id.String()); err != nil { return err } } daemon.LogImageEvent(id.String(), id.String(), "import") outStream.Write(sf.FormatStatus("", id.String())) return nil } docker-1.10.3/daemon/info.go000066400000000000000000000132171267010174400155760ustar00rootroot00000000000000package daemon import ( "os" "runtime" "strings" "sync/atomic" "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/container" "github.com/docker/docker/dockerversion" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/parsers/operatingsystem" "github.com/docker/docker/pkg/platform" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/registry" "github.com/docker/docker/utils" "github.com/docker/docker/volume/drivers" "github.com/docker/engine-api/types" ) // SystemInfo returns information about the host server the daemon is running on. func (daemon *Daemon) SystemInfo() (*types.Info, error) { kernelVersion := "" if kv, err := kernel.GetKernelVersion(); err == nil { kernelVersion = kv.String() } operatingSystem := "" if s, err := operatingsystem.GetOperatingSystem(); err == nil { operatingSystem = s } // Don't do containerized check on Windows if runtime.GOOS != "windows" { if inContainer, err := operatingsystem.IsContainerized(); err != nil { logrus.Errorf("Could not determine if daemon is containerized: %v", err) operatingSystem += " (error determining if containerized)" } else if inContainer { operatingSystem += " (containerized)" } } meminfo, err := system.ReadMemInfo() if err != nil { logrus.Errorf("Could not read system memory info: %v", err) } // if we still have the original dockerinit binary from before // we copied it locally, let's return the path to that, since // that's more intuitive (the copied path is trivial to derive // by hand given VERSION) initPath := utils.DockerInitPath("") sysInfo := sysinfo.New(true) var cRunning, cPaused, cStopped int32 daemon.containers.ApplyAll(func(c *container.Container) { switch c.StateString() { case "paused": atomic.AddInt32(&cPaused, 1) case "running": atomic.AddInt32(&cRunning, 1) default: atomic.AddInt32(&cStopped, 1) } }) v := &types.Info{ ID: daemon.ID, Containers: int(cRunning + cPaused + cStopped), ContainersRunning: int(cRunning), ContainersPaused: int(cPaused), ContainersStopped: int(cStopped), Images: len(daemon.imageStore.Map()), Driver: daemon.GraphDriverName(), DriverStatus: daemon.layerStore.DriverStatus(), Plugins: daemon.showPluginsInfo(), IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled, BridgeNfIptables: !sysInfo.BridgeNfCallIptablesDisabled, BridgeNfIP6tables: !sysInfo.BridgeNfCallIP6tablesDisabled, Debug: utils.IsDebugEnabled(), NFd: fileutils.GetTotalUsedFds(), NGoroutines: runtime.NumGoroutine(), SystemTime: time.Now().Format(time.RFC3339Nano), ExecutionDriver: daemon.ExecutionDriver().Name(), LoggingDriver: daemon.defaultLogConfig.Type, NEventsListener: daemon.EventsService.SubscribersCount(), KernelVersion: kernelVersion, OperatingSystem: operatingSystem, IndexServerAddress: registry.IndexServer, OSType: platform.OSType, Architecture: platform.Architecture, RegistryConfig: daemon.RegistryService.Config, InitSha1: dockerversion.InitSHA1, InitPath: initPath, NCPU: runtime.NumCPU(), MemTotal: meminfo.MemTotal, DockerRootDir: daemon.configStore.Root, Labels: daemon.configStore.Labels, ExperimentalBuild: utils.ExperimentalBuild(), ServerVersion: dockerversion.Version, ClusterStore: daemon.configStore.ClusterStore, ClusterAdvertise: daemon.configStore.ClusterAdvertise, HTTPProxy: getProxyEnv("http_proxy"), HTTPSProxy: getProxyEnv("https_proxy"), NoProxy: getProxyEnv("no_proxy"), } // TODO Windows. Refactor this more once sysinfo is refactored into // platform specific code. On Windows, sysinfo.cgroupMemInfo and // sysinfo.cgroupCpuInfo will be nil otherwise and cause a SIGSEGV if // an attempt is made to access through them. if runtime.GOOS != "windows" { v.MemoryLimit = sysInfo.MemoryLimit v.SwapLimit = sysInfo.SwapLimit v.OomKillDisable = sysInfo.OomKillDisable v.CPUCfsPeriod = sysInfo.CPUCfsPeriod v.CPUCfsQuota = sysInfo.CPUCfsQuota v.CPUShares = sysInfo.CPUShares v.CPUSet = sysInfo.Cpuset } if hostname, err := os.Hostname(); err == nil { v.Name = hostname } return v, nil } // SystemVersion returns version information about the daemon. func (daemon *Daemon) SystemVersion() types.Version { v := types.Version{ Version: dockerversion.Version, GitCommit: dockerversion.GitCommit, GoVersion: runtime.Version(), Os: runtime.GOOS, Arch: runtime.GOARCH, BuildTime: dockerversion.BuildTime, Experimental: utils.ExperimentalBuild(), } if kernelVersion, err := kernel.GetKernelVersion(); err == nil { v.KernelVersion = kernelVersion.String() } return v } func (daemon *Daemon) showPluginsInfo() types.PluginsInfo { var pluginsInfo types.PluginsInfo pluginsInfo.Volume = volumedrivers.GetDriverList() networkDriverList := daemon.GetNetworkDriverList() for nd := range networkDriverList { pluginsInfo.Network = append(pluginsInfo.Network, nd) } pluginsInfo.Authorization = daemon.configStore.AuthorizationPlugins return pluginsInfo } // The uppercase and the lowercase are available for the proxy settings. // See the Go specification for details on these variables. https://golang.org/pkg/net/http/ func getProxyEnv(key string) string { proxyValue := os.Getenv(strings.ToUpper(key)) if proxyValue == "" { return os.Getenv(strings.ToLower(key)) } return proxyValue } docker-1.10.3/daemon/inspect.go000066400000000000000000000172001267010174400163040ustar00rootroot00000000000000package daemon import ( "fmt" "time" "github.com/docker/docker/container" "github.com/docker/docker/daemon/exec" "github.com/docker/docker/daemon/network" "github.com/docker/docker/pkg/version" "github.com/docker/engine-api/types" networktypes "github.com/docker/engine-api/types/network" "github.com/docker/engine-api/types/versions/v1p20" ) // ContainerInspect returns low-level information about a // container. Returns an error if the container cannot be found, or if // there is an error getting the data. func (daemon *Daemon) ContainerInspect(name string, size bool, version version.Version) (interface{}, error) { switch { case version.LessThan("1.20"): return daemon.containerInspectPre120(name) case version.Equal("1.20"): return daemon.containerInspect120(name) } return daemon.containerInspectCurrent(name, size) } func (daemon *Daemon) containerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) { container, err := daemon.GetContainer(name) if err != nil { return nil, err } container.Lock() defer container.Unlock() base, err := daemon.getInspectData(container, size) if err != nil { return nil, err } mountPoints := addMountPoints(container) networkSettings := &types.NetworkSettings{ NetworkSettingsBase: types.NetworkSettingsBase{ Bridge: container.NetworkSettings.Bridge, SandboxID: container.NetworkSettings.SandboxID, HairpinMode: container.NetworkSettings.HairpinMode, LinkLocalIPv6Address: container.NetworkSettings.LinkLocalIPv6Address, LinkLocalIPv6PrefixLen: container.NetworkSettings.LinkLocalIPv6PrefixLen, Ports: container.NetworkSettings.Ports, SandboxKey: container.NetworkSettings.SandboxKey, SecondaryIPAddresses: container.NetworkSettings.SecondaryIPAddresses, SecondaryIPv6Addresses: container.NetworkSettings.SecondaryIPv6Addresses, }, DefaultNetworkSettings: daemon.getDefaultNetworkSettings(container.NetworkSettings.Networks), Networks: container.NetworkSettings.Networks, } return &types.ContainerJSON{ ContainerJSONBase: base, Mounts: mountPoints, Config: container.Config, NetworkSettings: networkSettings, }, nil } // containerInspect120 serializes the master version of a container into a json type. func (daemon *Daemon) containerInspect120(name string) (*v1p20.ContainerJSON, error) { container, err := daemon.GetContainer(name) if err != nil { return nil, err } container.Lock() defer container.Unlock() base, err := daemon.getInspectData(container, false) if err != nil { return nil, err } mountPoints := addMountPoints(container) config := &v1p20.ContainerConfig{ Config: container.Config, MacAddress: container.Config.MacAddress, NetworkDisabled: container.Config.NetworkDisabled, ExposedPorts: container.Config.ExposedPorts, VolumeDriver: container.HostConfig.VolumeDriver, } networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) return &v1p20.ContainerJSON{ ContainerJSONBase: base, Mounts: mountPoints, Config: config, NetworkSettings: networkSettings, }, nil } func (daemon *Daemon) getInspectData(container *container.Container, size bool) (*types.ContainerJSONBase, error) { // make a copy to play with hostConfig := *container.HostConfig children := daemon.children(container) hostConfig.Links = nil // do not expose the internal structure for linkAlias, child := range children { hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) } // we need this trick to preserve empty log driver, so // container will use daemon defaults even if daemon change them if hostConfig.LogConfig.Type == "" { hostConfig.LogConfig.Type = daemon.defaultLogConfig.Type } if len(hostConfig.LogConfig.Config) == 0 { hostConfig.LogConfig.Config = daemon.defaultLogConfig.Config } containerState := &types.ContainerState{ Status: container.State.StateString(), Running: container.State.Running, Paused: container.State.Paused, Restarting: container.State.Restarting, OOMKilled: container.State.OOMKilled, Dead: container.State.Dead, Pid: container.State.Pid, ExitCode: container.State.ExitCode, Error: container.State.Error, StartedAt: container.State.StartedAt.Format(time.RFC3339Nano), FinishedAt: container.State.FinishedAt.Format(time.RFC3339Nano), } contJSONBase := &types.ContainerJSONBase{ ID: container.ID, Created: container.Created.Format(time.RFC3339Nano), Path: container.Path, Args: container.Args, State: containerState, Image: container.ImageID.String(), LogPath: container.LogPath, Name: container.Name, RestartCount: container.RestartCount, Driver: container.Driver, MountLabel: container.MountLabel, ProcessLabel: container.ProcessLabel, ExecIDs: container.GetExecIDs(), HostConfig: &hostConfig, } var ( sizeRw int64 sizeRootFs int64 ) if size { sizeRw, sizeRootFs = daemon.getSize(container) contJSONBase.SizeRw = &sizeRw contJSONBase.SizeRootFs = &sizeRootFs } // Now set any platform-specific fields contJSONBase = setPlatformSpecificContainerFields(container, contJSONBase) contJSONBase.GraphDriver.Name = container.Driver graphDriverData, err := container.RWLayer.Metadata() if err != nil { return nil, err } contJSONBase.GraphDriver.Data = graphDriverData return contJSONBase, nil } // ContainerExecInspect returns low-level information about the exec // command. An error is returned if the exec cannot be found. func (daemon *Daemon) ContainerExecInspect(id string) (*exec.Config, error) { eConfig, err := daemon.getExecConfig(id) if err != nil { return nil, err } return eConfig, nil } // VolumeInspect looks up a volume by name. An error is returned if // the volume cannot be found. func (daemon *Daemon) VolumeInspect(name string) (*types.Volume, error) { v, err := daemon.volumes.Get(name) if err != nil { return nil, err } return volumeToAPIType(v), nil } func (daemon *Daemon) getBackwardsCompatibleNetworkSettings(settings *network.Settings) *v1p20.NetworkSettings { result := &v1p20.NetworkSettings{ NetworkSettingsBase: types.NetworkSettingsBase{ Bridge: settings.Bridge, SandboxID: settings.SandboxID, HairpinMode: settings.HairpinMode, LinkLocalIPv6Address: settings.LinkLocalIPv6Address, LinkLocalIPv6PrefixLen: settings.LinkLocalIPv6PrefixLen, Ports: settings.Ports, SandboxKey: settings.SandboxKey, SecondaryIPAddresses: settings.SecondaryIPAddresses, SecondaryIPv6Addresses: settings.SecondaryIPv6Addresses, }, DefaultNetworkSettings: daemon.getDefaultNetworkSettings(settings.Networks), } return result } // getDefaultNetworkSettings creates the deprecated structure that holds the information // about the bridge network for a container. func (daemon *Daemon) getDefaultNetworkSettings(networks map[string]*networktypes.EndpointSettings) types.DefaultNetworkSettings { var settings types.DefaultNetworkSettings if defaultNetwork, ok := networks["bridge"]; ok { settings.EndpointID = defaultNetwork.EndpointID settings.Gateway = defaultNetwork.Gateway settings.GlobalIPv6Address = defaultNetwork.GlobalIPv6Address settings.GlobalIPv6PrefixLen = defaultNetwork.GlobalIPv6PrefixLen settings.IPAddress = defaultNetwork.IPAddress settings.IPPrefixLen = defaultNetwork.IPPrefixLen settings.IPv6Gateway = defaultNetwork.IPv6Gateway settings.MacAddress = defaultNetwork.MacAddress } return settings } docker-1.10.3/daemon/inspect_unix.go000066400000000000000000000045151267010174400173540ustar00rootroot00000000000000// +build !windows package daemon import ( "github.com/docker/docker/container" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/versions/v1p19" ) // This sets platform-specific fields func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { contJSONBase.AppArmorProfile = container.AppArmorProfile contJSONBase.ResolvConfPath = container.ResolvConfPath contJSONBase.HostnamePath = container.HostnamePath contJSONBase.HostsPath = container.HostsPath return contJSONBase } // containerInspectPre120 gets containers for pre 1.20 APIs. func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, error) { container, err := daemon.GetContainer(name) if err != nil { return nil, err } container.Lock() defer container.Unlock() base, err := daemon.getInspectData(container, false) if err != nil { return nil, err } volumes := make(map[string]string) volumesRW := make(map[string]bool) for _, m := range container.MountPoints { volumes[m.Destination] = m.Path() volumesRW[m.Destination] = m.RW } config := &v1p19.ContainerConfig{ Config: container.Config, MacAddress: container.Config.MacAddress, NetworkDisabled: container.Config.NetworkDisabled, ExposedPorts: container.Config.ExposedPorts, VolumeDriver: container.HostConfig.VolumeDriver, Memory: container.HostConfig.Memory, MemorySwap: container.HostConfig.MemorySwap, CPUShares: container.HostConfig.CPUShares, CPUSet: container.HostConfig.CpusetCpus, } networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) return &v1p19.ContainerJSON{ ContainerJSONBase: base, Volumes: volumes, VolumesRW: volumesRW, Config: config, NetworkSettings: networkSettings, }, nil } func addMountPoints(container *container.Container) []types.MountPoint { mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) for _, m := range container.MountPoints { mountPoints = append(mountPoints, types.MountPoint{ Name: m.Name, Source: m.Path(), Destination: m.Destination, Driver: m.Driver, Mode: m.Mode, RW: m.RW, Propagation: m.Propagation, }) } return mountPoints } docker-1.10.3/daemon/inspect_windows.go000066400000000000000000000016161267010174400200620ustar00rootroot00000000000000package daemon import ( "github.com/docker/docker/container" "github.com/docker/engine-api/types" ) // This sets platform-specific fields func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { return contJSONBase } func addMountPoints(container *container.Container) []types.MountPoint { mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) for _, m := range container.MountPoints { mountPoints = append(mountPoints, types.MountPoint{ Name: m.Name, Source: m.Path(), Destination: m.Destination, Driver: m.Driver, RW: m.RW, }) } return mountPoints } // containerInspectPre120 get containers for pre 1.20 APIs. func (daemon *Daemon) containerInspectPre120(name string) (*types.ContainerJSON, error) { return daemon.containerInspectCurrent(name, false) } docker-1.10.3/daemon/kill.go000066400000000000000000000073141267010174400155770ustar00rootroot00000000000000package daemon import ( "fmt" "runtime" "syscall" "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/container" derr "github.com/docker/docker/errors" "github.com/docker/docker/pkg/signal" ) // ContainerKill send signal to the container // If no signal is given (sig 0), then Kill with SIGKILL and wait // for the container to exit. // If a signal is given, then just send it to the container and return. func (daemon *Daemon) ContainerKill(name string, sig uint64) error { container, err := daemon.GetContainer(name) if err != nil { return err } if sig != 0 && !signal.ValidSignalForPlatform(syscall.Signal(sig)) { return fmt.Errorf("The %s daemon does not support signal %d", runtime.GOOS, sig) } // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { return daemon.Kill(container) } return daemon.killWithSignal(container, int(sig)) } // killWithSignal sends the container the given signal. This wrapper for the // host specific kill command prepares the container before attempting // to send the signal. An error is returned if the container is paused // or not running, or if there is a problem returned from the // underlying kill command. func (daemon *Daemon) killWithSignal(container *container.Container, sig int) error { logrus.Debugf("Sending %d to %s", sig, container.ID) container.Lock() defer container.Unlock() // We could unpause the container for them rather than returning this error if container.Paused { return derr.ErrorCodeUnpauseContainer.WithArgs(container.ID) } if !container.Running { return derr.ErrorCodeNotRunning.WithArgs(container.ID) } container.ExitOnNext() // if the container is currently restarting we do not need to send the signal // to the process. Telling the monitor that it should exit on it's next event // loop is enough if container.Restarting { return nil } if err := daemon.kill(container, sig); err != nil { return err } daemon.LogContainerEvent(container, "kill") return nil } // Kill forcefully terminates a container. func (daemon *Daemon) Kill(container *container.Container) error { if !container.IsRunning() { return derr.ErrorCodeNotRunning.WithArgs(container.ID) } // 1. Send SIGKILL if err := daemon.killPossiblyDeadProcess(container, int(syscall.SIGKILL)); err != nil { // While normally we might "return err" here we're not going to // because if we can't stop the container by this point then // its probably because its already stopped. Meaning, between // the time of the IsRunning() call above and now it stopped. // Also, since the err return will be exec driver specific we can't // look for any particular (common) error that would indicate // that the process is already dead vs something else going wrong. // So, instead we'll give it up to 2 more seconds to complete and if // by that time the container is still running, then the error // we got is probably valid and so we return it to the caller. if container.IsRunning() { container.WaitStop(2 * time.Second) if container.IsRunning() { return err } } } // 2. Wait for the process to die, in last resort, try to kill the process directly if err := killProcessDirectly(container); err != nil { return err } container.WaitStop(-1 * time.Second) return nil } // killPossibleDeadProcess is a wrapper around killSig() suppressing "no such process" error. func (daemon *Daemon) killPossiblyDeadProcess(container *container.Container, sig int) error { err := daemon.killWithSignal(container, sig) if err == syscall.ESRCH { logrus.Debugf("Cannot kill process (pid=%d) with signal %d: no such process.", container.GetPID(), sig) return nil } return err } docker-1.10.3/daemon/links.go000066400000000000000000000074721267010174400157710ustar00rootroot00000000000000package daemon import ( "strings" "sync" "github.com/Sirupsen/logrus" "github.com/docker/docker/container" "github.com/docker/docker/pkg/graphdb" ) // linkIndex stores link relationships between containers, including their specified alias // The alias is the name the parent uses to reference the child type linkIndex struct { // idx maps a parent->alias->child relationship idx map[*container.Container]map[string]*container.Container // childIdx maps child->parent->aliases childIdx map[*container.Container]map[*container.Container]map[string]struct{} mu sync.Mutex } func newLinkIndex() *linkIndex { return &linkIndex{ idx: make(map[*container.Container]map[string]*container.Container), childIdx: make(map[*container.Container]map[*container.Container]map[string]struct{}), } } // link adds indexes for the passed in parent/child/alias relationships func (l *linkIndex) link(parent, child *container.Container, alias string) { l.mu.Lock() if l.idx[parent] == nil { l.idx[parent] = make(map[string]*container.Container) } l.idx[parent][alias] = child if l.childIdx[child] == nil { l.childIdx[child] = make(map[*container.Container]map[string]struct{}) } if l.childIdx[child][parent] == nil { l.childIdx[child][parent] = make(map[string]struct{}) } l.childIdx[child][parent][alias] = struct{}{} l.mu.Unlock() } // unlink removes the requested alias for the given parent/child func (l *linkIndex) unlink(alias string, child, parent *container.Container) { l.mu.Lock() delete(l.idx[parent], alias) delete(l.childIdx[child], parent) l.mu.Unlock() } // children maps all the aliases-> children for the passed in parent // aliases here are the aliases the parent uses to refer to the child func (l *linkIndex) children(parent *container.Container) map[string]*container.Container { l.mu.Lock() children := l.idx[parent] l.mu.Unlock() return children } // parents maps all the aliases->parent for the passed in child // aliases here are the aliases the parents use to refer to the child func (l *linkIndex) parents(child *container.Container) map[string]*container.Container { l.mu.Lock() parents := make(map[string]*container.Container) for parent, aliases := range l.childIdx[child] { for alias := range aliases { parents[alias] = parent } } l.mu.Unlock() return parents } // delete deletes all link relationships referencing this container func (l *linkIndex) delete(container *container.Container) { l.mu.Lock() for _, child := range l.idx[container] { delete(l.childIdx[child], container) } delete(l.idx, container) delete(l.childIdx, container) l.mu.Unlock() } // migrateLegacySqliteLinks migrates sqlite links to use links from HostConfig // when sqlite links were used, hostConfig.Links was set to nil func (daemon *Daemon) migrateLegacySqliteLinks(db *graphdb.Database, container *container.Container) error { // if links is populated (or an empty slice), then this isn't using sqlite links and can be skipped if container.HostConfig == nil || container.HostConfig.Links != nil { return nil } logrus.Debugf("migrating legacy sqlite link info for container: %s", container.ID) fullName := container.Name if fullName[0] != '/' { fullName = "/" + fullName } // don't use a nil slice, this ensures that the check above will skip once the migration has completed links := []string{} children, err := db.Children(fullName, 0) if err != nil { if !strings.Contains(err.Error(), "Cannot find child for") { return err } // else continue... it's ok if we didn't find any children, it'll just be nil and we can continue the migration } for _, child := range children { c, err := daemon.GetContainer(child.Entity.ID()) if err != nil { return err } links = append(links, c.Name+":"+child.Edge.Name) } container.HostConfig.Links = links return container.WriteHostConfig() } docker-1.10.3/daemon/links/000077500000000000000000000000001267010174400154305ustar00rootroot00000000000000docker-1.10.3/daemon/links/links.go000066400000000000000000000102711267010174400171000ustar00rootroot00000000000000package links import ( "fmt" "path" "strings" "github.com/docker/go-connections/nat" ) // Link struct holds informations about parent/child linked container type Link struct { // Parent container IP address ParentIP string // Child container IP address ChildIP string // Link name Name string // Child environments variables ChildEnvironment []string // Child exposed ports Ports []nat.Port } // NewLink initializes a new Link struct with the provided options. func NewLink(parentIP, childIP, name string, env []string, exposedPorts map[nat.Port]struct{}) *Link { var ( i int ports = make([]nat.Port, len(exposedPorts)) ) for p := range exposedPorts { ports[i] = p i++ } return &Link{ Name: name, ChildIP: childIP, ParentIP: parentIP, ChildEnvironment: env, Ports: ports, } } // ToEnv creates a string's slice containing child container informations in // the form of environment variables which will be later exported on container // startup. func (l *Link) ToEnv() []string { env := []string{} _, n := path.Split(l.Name) alias := strings.Replace(strings.ToUpper(n), "-", "_", -1) if p := l.getDefaultPort(); p != nil { env = append(env, fmt.Sprintf("%s_PORT=%s://%s:%s", alias, p.Proto(), l.ChildIP, p.Port())) } //sort the ports so that we can bulk the continuous ports together nat.Sort(l.Ports, func(ip, jp nat.Port) bool { // If the two ports have the same number, tcp takes priority // Sort in desc order return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") }) for i := 0; i < len(l.Ports); { p := l.Ports[i] j := nextContiguous(l.Ports, p.Int(), i) if j > i+1 { env = append(env, fmt.Sprintf("%s_PORT_%s_%s_START=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_START=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) q := l.Ports[j] env = append(env, fmt.Sprintf("%s_PORT_%s_%s_END=%s://%s:%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Proto(), l.ChildIP, q.Port())) env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_END=%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Port())) i = j + 1 continue } else { i++ } } for _, p := range l.Ports { env = append(env, fmt.Sprintf("%s_PORT_%s_%s=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) } // Load the linked container's name into the environment env = append(env, fmt.Sprintf("%s_NAME=%s", alias, l.Name)) if l.ChildEnvironment != nil { for _, v := range l.ChildEnvironment { parts := strings.SplitN(v, "=", 2) if len(parts) < 2 { continue } // Ignore a few variables that are added during docker build (and not really relevant to linked containers) if parts[0] == "HOME" || parts[0] == "PATH" { continue } env = append(env, fmt.Sprintf("%s_ENV_%s=%s", alias, parts[0], parts[1])) } } return env } func nextContiguous(ports []nat.Port, value int, index int) int { if index+1 == len(ports) { return index } for i := index + 1; i < len(ports); i++ { if ports[i].Int() > value+1 { return i - 1 } value++ } return len(ports) - 1 } // Default port rules func (l *Link) getDefaultPort() *nat.Port { var p nat.Port i := len(l.Ports) if i == 0 { return nil } else if i > 1 { nat.Sort(l.Ports, func(ip, jp nat.Port) bool { // If the two ports have the same number, tcp takes priority // Sort in desc order return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") }) } p = l.Ports[0] return &p } docker-1.10.3/daemon/links/links_test.go000066400000000000000000000150131267010174400201360ustar00rootroot00000000000000package links import ( "fmt" "strings" "testing" "github.com/docker/go-connections/nat" ) // Just to make life easier func newPortNoError(proto, port string) nat.Port { p, _ := nat.NewPort(proto, port) return p } func TestLinkNaming(t *testing.T) { ports := make(nat.PortSet) ports[newPortNoError("tcp", "6379")] = struct{}{} link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker-1", nil, ports) rawEnv := link.ToEnv() env := make(map[string]string, len(rawEnv)) for _, e := range rawEnv { parts := strings.Split(e, "=") if len(parts) != 2 { t.FailNow() } env[parts[0]] = parts[1] } value, ok := env["DOCKER_1_PORT"] if !ok { t.Fatalf("DOCKER_1_PORT not found in env") } if value != "tcp://172.0.17.2:6379" { t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_1_PORT"]) } } func TestLinkNew(t *testing.T) { ports := make(nat.PortSet) ports[newPortNoError("tcp", "6379")] = struct{}{} link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", nil, ports) if link.Name != "/db/docker" { t.Fail() } if link.ParentIP != "172.0.17.3" { t.Fail() } if link.ChildIP != "172.0.17.2" { t.Fail() } for _, p := range link.Ports { if p != newPortNoError("tcp", "6379") { t.Fail() } } } func TestLinkEnv(t *testing.T) { ports := make(nat.PortSet) ports[newPortNoError("tcp", "6379")] = struct{}{} link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) rawEnv := link.ToEnv() env := make(map[string]string, len(rawEnv)) for _, e := range rawEnv { parts := strings.Split(e, "=") if len(parts) != 2 { t.FailNow() } env[parts[0]] = parts[1] } if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) } if env["DOCKER_PORT_6379_TCP"] != "tcp://172.0.17.2:6379" { t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP"]) } if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) } if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) } if env["DOCKER_PORT_6379_TCP_PORT"] != "6379" { t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT"]) } if env["DOCKER_NAME"] != "/db/docker" { t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) } if env["DOCKER_ENV_PASSWORD"] != "gordon" { t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) } } func TestLinkMultipleEnv(t *testing.T) { ports := make(nat.PortSet) ports[newPortNoError("tcp", "6379")] = struct{}{} ports[newPortNoError("tcp", "6380")] = struct{}{} ports[newPortNoError("tcp", "6381")] = struct{}{} link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) rawEnv := link.ToEnv() env := make(map[string]string, len(rawEnv)) for _, e := range rawEnv { parts := strings.Split(e, "=") if len(parts) != 2 { t.FailNow() } env[parts[0]] = parts[1] } if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) } if env["DOCKER_PORT_6379_TCP_START"] != "tcp://172.0.17.2:6379" { t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP_START"]) } if env["DOCKER_PORT_6379_TCP_END"] != "tcp://172.0.17.2:6381" { t.Fatalf("Expected tcp://172.0.17.2:6381, got %s", env["DOCKER_PORT_6379_TCP_END"]) } if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) } if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) } if env["DOCKER_PORT_6379_TCP_PORT_START"] != "6379" { t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT_START"]) } if env["DOCKER_PORT_6379_TCP_PORT_END"] != "6381" { t.Fatalf("Expected 6381, got %s", env["DOCKER_PORT_6379_TCP_PORT_END"]) } if env["DOCKER_NAME"] != "/db/docker" { t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) } if env["DOCKER_ENV_PASSWORD"] != "gordon" { t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) } } func TestLinkPortRangeEnv(t *testing.T) { ports := make(nat.PortSet) ports[newPortNoError("tcp", "6379")] = struct{}{} ports[newPortNoError("tcp", "6380")] = struct{}{} ports[newPortNoError("tcp", "6381")] = struct{}{} link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) rawEnv := link.ToEnv() env := make(map[string]string, len(rawEnv)) for _, e := range rawEnv { parts := strings.Split(e, "=") if len(parts) != 2 { t.FailNow() } env[parts[0]] = parts[1] } if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) } if env["DOCKER_PORT_6379_TCP_START"] != "tcp://172.0.17.2:6379" { t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP_START"]) } if env["DOCKER_PORT_6379_TCP_END"] != "tcp://172.0.17.2:6381" { t.Fatalf("Expected tcp://172.0.17.2:6381, got %s", env["DOCKER_PORT_6379_TCP_END"]) } if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) } if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) } if env["DOCKER_PORT_6379_TCP_PORT_START"] != "6379" { t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT_START"]) } if env["DOCKER_PORT_6379_TCP_PORT_END"] != "6381" { t.Fatalf("Expected 6381, got %s", env["DOCKER_PORT_6379_TCP_PORT_END"]) } if env["DOCKER_NAME"] != "/db/docker" { t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) } if env["DOCKER_ENV_PASSWORD"] != "gordon" { t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) } for i := range []int{6379, 6380, 6381} { tcpaddr := fmt.Sprintf("DOCKER_PORT_%d_TCP_ADDR", i) tcpport := fmt.Sprintf("DOCKER_PORT_%d_TCP+PORT", i) tcpproto := fmt.Sprintf("DOCKER_PORT_%d_TCP+PROTO", i) tcp := fmt.Sprintf("DOCKER_PORT_%d_TCP", i) if env[tcpaddr] == "172.0.17.2" { t.Fatalf("Expected env %s = 172.0.17.2, got %s", tcpaddr, env[tcpaddr]) } if env[tcpport] == fmt.Sprintf("%d", i) { t.Fatalf("Expected env %s = %d, got %s", tcpport, i, env[tcpport]) } if env[tcpproto] == "tcp" { t.Fatalf("Expected env %s = tcp, got %s", tcpproto, env[tcpproto]) } if env[tcp] == fmt.Sprintf("tcp://172.0.17.2:%d", i) { t.Fatalf("Expected env %s = tcp://172.0.17.2:%d, got %s", tcp, i, env[tcp]) } } } docker-1.10.3/daemon/links_test.go000066400000000000000000000042041267010174400170160ustar00rootroot00000000000000package daemon import ( "encoding/json" "io/ioutil" "os" "path" "path/filepath" "testing" "github.com/docker/docker/container" "github.com/docker/docker/pkg/graphdb" "github.com/docker/docker/pkg/stringid" containertypes "github.com/docker/engine-api/types/container" ) func TestMigrateLegacySqliteLinks(t *testing.T) { tmpDir, err := ioutil.TempDir("", "legacy-qlite-links-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) name1 := "test1" c1 := &container.Container{ CommonContainer: container.CommonContainer{ ID: stringid.GenerateNonCryptoID(), Name: name1, HostConfig: &containertypes.HostConfig{}, }, } c1.Root = tmpDir name2 := "test2" c2 := &container.Container{ CommonContainer: container.CommonContainer{ ID: stringid.GenerateNonCryptoID(), Name: name2, }, } store := container.NewMemoryStore() store.Add(c1.ID, c1) store.Add(c2.ID, c2) d := &Daemon{root: tmpDir, containers: store} db, err := graphdb.NewSqliteConn(filepath.Join(d.root, "linkgraph.db")) if err != nil { t.Fatal(err) } if _, err := db.Set("/"+name1, c1.ID); err != nil { t.Fatal(err) } if _, err := db.Set("/"+name2, c2.ID); err != nil { t.Fatal(err) } alias := "hello" if _, err := db.Set(path.Join(c1.Name, alias), c2.ID); err != nil { t.Fatal(err) } if err := d.migrateLegacySqliteLinks(db, c1); err != nil { t.Fatal(err) } if len(c1.HostConfig.Links) != 1 { t.Fatal("expected links to be populated but is empty") } expected := name2 + ":" + alias actual := c1.HostConfig.Links[0] if actual != expected { t.Fatalf("got wrong link value, expected: %q, got: %q", expected, actual) } // ensure this is persisted b, err := ioutil.ReadFile(filepath.Join(c1.Root, "hostconfig.json")) if err != nil { t.Fatal(err) } type hc struct { Links []string } var cfg hc if err := json.Unmarshal(b, &cfg); err != nil { t.Fatal(err) } if len(cfg.Links) != 1 { t.Fatalf("expected one entry in links, got: %d", len(cfg.Links)) } if cfg.Links[0] != expected { // same expected as above t.Fatalf("got wrong link value, expected: %q, got: %q", expected, cfg.Links[0]) } } docker-1.10.3/daemon/list.go000066400000000000000000000346151267010174400156230ustar00rootroot00000000000000package daemon import ( "errors" "fmt" "strconv" "strings" "github.com/Sirupsen/logrus" "github.com/docker/docker/container" "github.com/docker/docker/image" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/filters" networktypes "github.com/docker/engine-api/types/network" "github.com/docker/go-connections/nat" ) var acceptedVolumeFilterTags = map[string]bool{ "dangling": true, } // iterationAction represents possible outcomes happening during the container iteration. type iterationAction int // containerReducer represents a reducer for a container. // Returns the object to serialize by the api. type containerReducer func(*container.Container, *listContext) (*types.Container, error) const ( // includeContainer is the action to include a container in the reducer. includeContainer iterationAction = iota // excludeContainer is the action to exclude a container in the reducer. excludeContainer // stopIteration is the action to stop iterating over the list of containers. stopIteration ) // errStopIteration makes the iterator to stop without returning an error. var errStopIteration = errors.New("container list iteration stopped") // List returns an array of all containers registered in the daemon. func (daemon *Daemon) List() []*container.Container { return daemon.containers.List() } // ContainersConfig is the filtering specified by the user to iterate over containers. type ContainersConfig struct { // if true show all containers, otherwise only running containers. All bool // show all containers created after this container id Since string // show all containers created before this container id Before string // number of containers to return at most Limit int // if true include the sizes of the containers Size bool // return only containers that match filters Filters string } // listContext is the daemon generated filtering to iterate over containers. // This is created based on the user specification. type listContext struct { // idx is the container iteration index for this context idx int // ancestorFilter tells whether it should check ancestors or not ancestorFilter bool // names is a list of container names to filter with names map[string][]string // images is a list of images to filter with images map[image.ID]bool // filters is a collection of arguments to filter with, specified by the user filters filters.Args // exitAllowed is a list of exit codes allowed to filter with exitAllowed []int // FIXME Remove this for 1.12 as --since and --before are deprecated // beforeContainer is a filter to ignore containers that appear before the one given beforeContainer *container.Container // sinceContainer is a filter to stop the filtering when the iterator arrive to the given container sinceContainer *container.Container // beforeFilter is a filter to ignore containers that appear before the one given // this is used for --filter=before= and --before=, the latter is deprecated. beforeFilter *container.Container // sinceFilter is a filter to stop the filtering when the iterator arrive to the given container // this is used for --filter=since= and --since=, the latter is deprecated. sinceFilter *container.Container // ContainersConfig is the filters set by the user *ContainersConfig } // Containers returns the list of containers to show given the user's filtering. func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container, error) { return daemon.reduceContainers(config, daemon.transformContainer) } // reduceContainer parses the user filtering and generates the list of containers to return based on a reducer. func (daemon *Daemon) reduceContainers(config *ContainersConfig, reducer containerReducer) ([]*types.Container, error) { containers := []*types.Container{} ctx, err := daemon.foldFilter(config) if err != nil { return nil, err } for _, container := range daemon.List() { t, err := daemon.reducePsContainer(container, ctx, reducer) if err != nil { if err != errStopIteration { return nil, err } break } if t != nil { containers = append(containers, t) ctx.idx++ } } return containers, nil } // reducePsContainer is the basic representation for a container as expected by the ps command. func (daemon *Daemon) reducePsContainer(container *container.Container, ctx *listContext, reducer containerReducer) (*types.Container, error) { container.Lock() defer container.Unlock() // filter containers to return action := includeContainerInList(container, ctx) switch action { case excludeContainer: return nil, nil case stopIteration: return nil, errStopIteration } // transform internal container struct into api structs return reducer(container, ctx) } // foldFilter generates the container filter based in the user's filtering options. func (daemon *Daemon) foldFilter(config *ContainersConfig) (*listContext, error) { psFilters, err := filters.FromParam(config.Filters) if err != nil { return nil, err } var filtExited []int err = psFilters.WalkValues("exited", func(value string) error { code, err := strconv.Atoi(value) if err != nil { return err } filtExited = append(filtExited, code) return nil }) if err != nil { return nil, err } err = psFilters.WalkValues("status", func(value string) error { if !container.IsValidStateString(value) { return fmt.Errorf("Unrecognised filter value for status: %s", value) } config.All = true return nil }) if err != nil { return nil, err } var beforeContFilter, sinceContFilter *container.Container // FIXME remove this for 1.12 as --since and --before are deprecated var beforeContainer, sinceContainer *container.Container err = psFilters.WalkValues("before", func(value string) error { beforeContFilter, err = daemon.GetContainer(value) return err }) if err != nil { return nil, err } err = psFilters.WalkValues("since", func(value string) error { sinceContFilter, err = daemon.GetContainer(value) return err }) if err != nil { return nil, err } imagesFilter := map[image.ID]bool{} var ancestorFilter bool if psFilters.Include("ancestor") { ancestorFilter = true psFilters.WalkValues("ancestor", func(ancestor string) error { id, err := daemon.GetImageID(ancestor) if err != nil { logrus.Warnf("Error while looking up for image %v", ancestor) return nil } if imagesFilter[id] { // Already seen this ancestor, skip it return nil } // Then walk down the graph and put the imageIds in imagesFilter populateImageFilterByParents(imagesFilter, id, daemon.imageStore.Children) return nil }) } // FIXME remove this for 1.12 as --since and --before are deprecated if config.Before != "" { beforeContainer, err = daemon.GetContainer(config.Before) if err != nil { return nil, err } } // FIXME remove this for 1.12 as --since and --before are deprecated if config.Since != "" { sinceContainer, err = daemon.GetContainer(config.Since) if err != nil { return nil, err } } return &listContext{ filters: psFilters, ancestorFilter: ancestorFilter, images: imagesFilter, exitAllowed: filtExited, beforeContainer: beforeContainer, sinceContainer: sinceContainer, beforeFilter: beforeContFilter, sinceFilter: sinceContFilter, ContainersConfig: config, names: daemon.nameIndex.GetAll(), }, nil } // includeContainerInList decides whether a containers should be include in the output or not based in the filter. // It also decides if the iteration should be stopped or not. func includeContainerInList(container *container.Container, ctx *listContext) iterationAction { // Do not include container if it's stopped and we're not filters // FIXME remove the ctx.beforContainer part of the condition for 1.12 as --since and --before are deprecated if !container.Running && !ctx.All && ctx.Limit <= 0 && ctx.beforeContainer == nil && ctx.sinceContainer == nil { return excludeContainer } // Do not include container if the name doesn't match if !ctx.filters.Match("name", container.Name) { return excludeContainer } // Do not include container if the id doesn't match if !ctx.filters.Match("id", container.ID) { return excludeContainer } // Do not include container if any of the labels don't match if !ctx.filters.MatchKVList("label", container.Config.Labels) { return excludeContainer } // Do not include container if the isolation mode doesn't match if excludeContainer == excludeByIsolation(container, ctx) { return excludeContainer } // FIXME remove this for 1.12 as --since and --before are deprecated if ctx.beforeContainer != nil { if container.ID == ctx.beforeContainer.ID { ctx.beforeContainer = nil } return excludeContainer } // FIXME remove this for 1.12 as --since and --before are deprecated if ctx.sinceContainer != nil { if container.ID == ctx.sinceContainer.ID { return stopIteration } } // Do not include container if it's in the list before the filter container. // Set the filter container to nil to include the rest of containers after this one. if ctx.beforeFilter != nil { if container.ID == ctx.beforeFilter.ID { ctx.beforeFilter = nil } return excludeContainer } // Stop iteration when the container arrives to the filter container if ctx.sinceFilter != nil { if container.ID == ctx.sinceFilter.ID { return stopIteration } } // Stop iteration when the index is over the limit if ctx.Limit > 0 && ctx.idx == ctx.Limit { return stopIteration } // Do not include container if its exit code is not in the filter if len(ctx.exitAllowed) > 0 { shouldSkip := true for _, code := range ctx.exitAllowed { if code == container.ExitCode && !container.Running { shouldSkip = false break } } if shouldSkip { return excludeContainer } } // Do not include container if its status doesn't match the filter if !ctx.filters.Match("status", container.State.StateString()) { return excludeContainer } if ctx.ancestorFilter { if len(ctx.images) == 0 { return excludeContainer } if !ctx.images[container.ImageID] { return excludeContainer } } return includeContainer } // transformContainer generates the container type expected by the docker ps command. func (daemon *Daemon) transformContainer(container *container.Container, ctx *listContext) (*types.Container, error) { newC := &types.Container{ ID: container.ID, Names: ctx.names[container.ID], ImageID: container.ImageID.String(), } if newC.Names == nil { // Dead containers will often have no name, so make sure the response isn't null newC.Names = []string{} } image := container.Config.Image // if possible keep the original ref if image != container.ImageID.String() { id, err := daemon.GetImageID(image) if _, isDNE := err.(ErrImageDoesNotExist); err != nil && !isDNE { return nil, err } if err != nil || id != container.ImageID { image = container.ImageID.String() } } newC.Image = image if len(container.Args) > 0 { args := []string{} for _, arg := range container.Args { if strings.Contains(arg, " ") { args = append(args, fmt.Sprintf("'%s'", arg)) } else { args = append(args, arg) } } argsAsString := strings.Join(args, " ") newC.Command = fmt.Sprintf("%s %s", container.Path, argsAsString) } else { newC.Command = container.Path } newC.Created = container.Created.Unix() newC.Status = container.State.String() newC.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode) // copy networks to avoid races networks := make(map[string]*networktypes.EndpointSettings) for name, network := range container.NetworkSettings.Networks { if network == nil { continue } networks[name] = &networktypes.EndpointSettings{ EndpointID: network.EndpointID, Gateway: network.Gateway, IPAddress: network.IPAddress, IPPrefixLen: network.IPPrefixLen, IPv6Gateway: network.IPv6Gateway, GlobalIPv6Address: network.GlobalIPv6Address, GlobalIPv6PrefixLen: network.GlobalIPv6PrefixLen, MacAddress: network.MacAddress, } if network.IPAMConfig != nil { networks[name].IPAMConfig = &networktypes.EndpointIPAMConfig{ IPv4Address: network.IPAMConfig.IPv4Address, IPv6Address: network.IPAMConfig.IPv6Address, } } } newC.NetworkSettings = &types.SummaryNetworkSettings{Networks: networks} newC.Ports = []types.Port{} for port, bindings := range container.NetworkSettings.Ports { p, err := nat.ParsePort(port.Port()) if err != nil { return nil, err } if len(bindings) == 0 { newC.Ports = append(newC.Ports, types.Port{ PrivatePort: p, Type: port.Proto(), }) continue } for _, binding := range bindings { h, err := nat.ParsePort(binding.HostPort) if err != nil { return nil, err } newC.Ports = append(newC.Ports, types.Port{ PrivatePort: p, PublicPort: h, Type: port.Proto(), IP: binding.HostIP, }) } } if ctx.Size { sizeRw, sizeRootFs := daemon.getSize(container) newC.SizeRw = sizeRw newC.SizeRootFs = sizeRootFs } newC.Labels = container.Config.Labels return newC, nil } // Volumes lists known volumes, using the filter to restrict the range // of volumes returned. func (daemon *Daemon) Volumes(filter string) ([]*types.Volume, []string, error) { var ( volumesOut []*types.Volume danglingOnly = false ) volFilters, err := filters.FromParam(filter) if err != nil { return nil, nil, err } if err := volFilters.Validate(acceptedVolumeFilterTags); err != nil { return nil, nil, err } if volFilters.Include("dangling") { if volFilters.ExactMatch("dangling", "true") || volFilters.ExactMatch("dangling", "1") { danglingOnly = true } else if !volFilters.ExactMatch("dangling", "false") && !volFilters.ExactMatch("dangling", "0") { return nil, nil, fmt.Errorf("Invalid filter 'dangling=%s'", volFilters.Get("dangling")) } } volumes, warnings, err := daemon.volumes.List() if err != nil { return nil, nil, err } if volFilters.Include("dangling") { volumes = daemon.volumes.FilterByUsed(volumes, !danglingOnly) } for _, v := range volumes { volumesOut = append(volumesOut, volumeToAPIType(v)) } return volumesOut, warnings, nil } func populateImageFilterByParents(ancestorMap map[image.ID]bool, imageID image.ID, getChildren func(image.ID) []image.ID) { if !ancestorMap[imageID] { for _, id := range getChildren(imageID) { populateImageFilterByParents(ancestorMap, id, getChildren) } ancestorMap[imageID] = true } } docker-1.10.3/daemon/list_unix.go000066400000000000000000000005521267010174400166570ustar00rootroot00000000000000// +build linux freebsd package daemon import "github.com/docker/docker/container" // excludeByIsolation is a platform specific helper function to support PS // filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. func excludeByIsolation(container *container.Container, ctx *listContext) iterationAction { return includeContainer } docker-1.10.3/daemon/list_windows.go000066400000000000000000000010071267010174400173620ustar00rootroot00000000000000package daemon import ( "strings" "github.com/docker/docker/container" ) // excludeByIsolation is a platform specific helper function to support PS // filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. func excludeByIsolation(container *container.Container, ctx *listContext) iterationAction { i := strings.ToLower(string(container.HostConfig.Isolation)) if i == "" { i = "default" } if !ctx.filters.Match("isolation", i) { return excludeContainer } return includeContainer } docker-1.10.3/daemon/logdrivers_linux.go000066400000000000000000000010211267010174400202300ustar00rootroot00000000000000package daemon import ( // Importing packages here only to make sure their init gets called and // therefore they register themselves to the logdriver factory. _ "github.com/docker/docker/daemon/logger/awslogs" _ "github.com/docker/docker/daemon/logger/fluentd" _ "github.com/docker/docker/daemon/logger/gelf" _ "github.com/docker/docker/daemon/logger/journald" _ "github.com/docker/docker/daemon/logger/jsonfilelog" _ "github.com/docker/docker/daemon/logger/splunk" _ "github.com/docker/docker/daemon/logger/syslog" ) docker-1.10.3/daemon/logdrivers_windows.go000066400000000000000000000005041267010174400205700ustar00rootroot00000000000000package daemon import ( // Importing packages here only to make sure their init gets called and // therefore they register themselves to the logdriver factory. _ "github.com/docker/docker/daemon/logger/awslogs" _ "github.com/docker/docker/daemon/logger/jsonfilelog" _ "github.com/docker/docker/daemon/logger/splunk" ) docker-1.10.3/daemon/logger/000077500000000000000000000000001267010174400155675ustar00rootroot00000000000000docker-1.10.3/daemon/logger/awslogs/000077500000000000000000000000001267010174400172465ustar00rootroot00000000000000docker-1.10.3/daemon/logger/awslogs/cloudwatchlogs.go000066400000000000000000000270721267010174400226270ustar00rootroot00000000000000// Package awslogs provides the logdriver for forwarding container logs to Amazon CloudWatch Logs package awslogs import ( "errors" "fmt" "os" "runtime" "sort" "strings" "sync" "time" "github.com/Sirupsen/logrus" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/dockerversion" ) const ( name = "awslogs" regionKey = "awslogs-region" regionEnvKey = "AWS_REGION" logGroupKey = "awslogs-group" logStreamKey = "awslogs-stream" batchPublishFrequency = 5 * time.Second // See: http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html perEventBytes = 26 maximumBytesPerPut = 1048576 maximumLogEventsPerPut = 10000 // See: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html maximumBytesPerEvent = 262144 - perEventBytes resourceAlreadyExistsCode = "ResourceAlreadyExistsException" dataAlreadyAcceptedCode = "DataAlreadyAcceptedException" invalidSequenceTokenCode = "InvalidSequenceTokenException" userAgentHeader = "User-Agent" ) type logStream struct { logStreamName string logGroupName string client api messages chan *logger.Message lock sync.RWMutex closed bool sequenceToken *string } type api interface { CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) } type regionFinder interface { Region() (string, error) } type byTimestamp []*cloudwatchlogs.InputLogEvent // init registers the awslogs driver and sets the default region, if provided func init() { if os.Getenv(regionEnvKey) != "" { defaults.DefaultConfig.Region = aws.String(os.Getenv(regionEnvKey)) } if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { logrus.Fatal(err) } } // New creates an awslogs logger using the configuration passed in on the // context. Supported context configuration variables are awslogs-region, // awslogs-group, and awslogs-stream. When available, configuration is // also taken from environment variables AWS_REGION, AWS_ACCESS_KEY_ID, // AWS_SECRET_ACCESS_KEY, the shared credentials file (~/.aws/credentials), and // the EC2 Instance Metadata Service. func New(ctx logger.Context) (logger.Logger, error) { logGroupName := ctx.Config[logGroupKey] logStreamName := ctx.ContainerID if ctx.Config[logStreamKey] != "" { logStreamName = ctx.Config[logStreamKey] } client, err := newAWSLogsClient(ctx) if err != nil { return nil, err } containerStream := &logStream{ logStreamName: logStreamName, logGroupName: logGroupName, client: client, messages: make(chan *logger.Message, 4096), } err = containerStream.create() if err != nil { return nil, err } go containerStream.collectBatch() return containerStream, nil } // newRegionFinder is a variable such that the implementation // can be swapped out for unit tests. var newRegionFinder = func() regionFinder { return ec2metadata.New(nil) } // newAWSLogsClient creates the service client for Amazon CloudWatch Logs. // Customizations to the default client from the SDK include a Docker-specific // User-Agent string and automatic region detection using the EC2 Instance // Metadata Service when region is otherwise unspecified. func newAWSLogsClient(ctx logger.Context) (api, error) { config := defaults.DefaultConfig if ctx.Config[regionKey] != "" { config = defaults.DefaultConfig.Merge(&aws.Config{ Region: aws.String(ctx.Config[regionKey]), }) } if config.Region == nil || *config.Region == "" { logrus.Info("Trying to get region from EC2 Metadata") ec2MetadataClient := newRegionFinder() region, err := ec2MetadataClient.Region() if err != nil { logrus.WithFields(logrus.Fields{ "error": err, }).Error("Could not get region from EC2 metadata, environment, or log option") return nil, errors.New("Cannot determine region for awslogs driver") } config.Region = ®ion } logrus.WithFields(logrus.Fields{ "region": *config.Region, }).Debug("Created awslogs client") client := cloudwatchlogs.New(config) client.Handlers.Build.PushBackNamed(request.NamedHandler{ Name: "DockerUserAgentHandler", Fn: func(r *request.Request) { currentAgent := r.HTTPRequest.Header.Get(userAgentHeader) r.HTTPRequest.Header.Set(userAgentHeader, fmt.Sprintf("Docker %s (%s) %s", dockerversion.Version, runtime.GOOS, currentAgent)) }, }) return client, nil } // Name returns the name of the awslogs logging driver func (l *logStream) Name() string { return name } // Log submits messages for logging by an instance of the awslogs logging driver func (l *logStream) Log(msg *logger.Message) error { l.lock.RLock() defer l.lock.RUnlock() if !l.closed { l.messages <- msg } return nil } // Close closes the instance of the awslogs logging driver func (l *logStream) Close() error { l.lock.Lock() defer l.lock.Unlock() if !l.closed { close(l.messages) } l.closed = true return nil } // create creates a log stream for the instance of the awslogs logging driver func (l *logStream) create() error { input := &cloudwatchlogs.CreateLogStreamInput{ LogGroupName: aws.String(l.logGroupName), LogStreamName: aws.String(l.logStreamName), } _, err := l.client.CreateLogStream(input) if err != nil { if awsErr, ok := err.(awserr.Error); ok { fields := logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "origError": awsErr.OrigErr(), "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, } if awsErr.Code() == resourceAlreadyExistsCode { // Allow creation to succeed logrus.WithFields(fields).Info("Log stream already exists") return nil } logrus.WithFields(fields).Error("Failed to create log stream") } } return err } // newTicker is used for time-based batching. newTicker is a variable such // that the implementation can be swapped out for unit tests. var newTicker = func(freq time.Duration) *time.Ticker { return time.NewTicker(freq) } // collectBatch executes as a goroutine to perform batching of log events for // submission to the log stream. Batching is performed on time- and size- // bases. Time-based batching occurs at a 5 second interval (defined in the // batchPublishFrequency const). Size-based batching is performed on the // maximum number of events per batch (defined in maximumLogEventsPerPut) and // the maximum number of total bytes in a batch (defined in // maximumBytesPerPut). Log messages are split by the maximum bytes per event // (defined in maximumBytesPerEvent). There is a fixed per-event byte overhead // (defined in perEventBytes) which is accounted for in split- and batch- // calculations. func (l *logStream) collectBatch() { timer := newTicker(batchPublishFrequency) var events []*cloudwatchlogs.InputLogEvent bytes := 0 for { select { case <-timer.C: l.publishBatch(events) events = events[:0] bytes = 0 case msg, more := <-l.messages: if !more { l.publishBatch(events) return } unprocessedLine := msg.Line for len(unprocessedLine) > 0 { // Split line length so it does not exceed the maximum lineBytes := len(unprocessedLine) if lineBytes > maximumBytesPerEvent { lineBytes = maximumBytesPerEvent } line := unprocessedLine[:lineBytes] unprocessedLine = unprocessedLine[lineBytes:] if (len(events) >= maximumLogEventsPerPut) || (bytes+lineBytes+perEventBytes > maximumBytesPerPut) { // Publish an existing batch if it's already over the maximum number of events or if adding this // event would push it over the maximum number of total bytes. l.publishBatch(events) events = events[:0] bytes = 0 } events = append(events, &cloudwatchlogs.InputLogEvent{ Message: aws.String(string(line)), Timestamp: aws.Int64(msg.Timestamp.UnixNano() / int64(time.Millisecond)), }) bytes += (lineBytes + perEventBytes) } } } } // publishBatch calls PutLogEvents for a given set of InputLogEvents, // accounting for sequencing requirements (each request must reference the // sequence token returned by the previous request). func (l *logStream) publishBatch(events []*cloudwatchlogs.InputLogEvent) { if len(events) == 0 { return } sort.Sort(byTimestamp(events)) nextSequenceToken, err := l.putLogEvents(events, l.sequenceToken) if err != nil { if awsErr, ok := err.(awserr.Error); ok { if awsErr.Code() == dataAlreadyAcceptedCode { // already submitted, just grab the correct sequence token parts := strings.Split(awsErr.Message(), " ") nextSequenceToken = &parts[len(parts)-1] logrus.WithFields(logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, }).Info("Data already accepted, ignoring error") err = nil } else if awsErr.Code() == invalidSequenceTokenCode { // sequence code is bad, grab the correct one and retry parts := strings.Split(awsErr.Message(), " ") token := parts[len(parts)-1] nextSequenceToken, err = l.putLogEvents(events, &token) } } } if err != nil { logrus.Error(err) } else { l.sequenceToken = nextSequenceToken } } // putLogEvents wraps the PutLogEvents API func (l *logStream) putLogEvents(events []*cloudwatchlogs.InputLogEvent, sequenceToken *string) (*string, error) { input := &cloudwatchlogs.PutLogEventsInput{ LogEvents: events, SequenceToken: sequenceToken, LogGroupName: aws.String(l.logGroupName), LogStreamName: aws.String(l.logStreamName), } resp, err := l.client.PutLogEvents(input) if err != nil { if awsErr, ok := err.(awserr.Error); ok { logrus.WithFields(logrus.Fields{ "errorCode": awsErr.Code(), "message": awsErr.Message(), "origError": awsErr.OrigErr(), "logGroupName": l.logGroupName, "logStreamName": l.logStreamName, }).Error("Failed to put log events") } return nil, err } return resp.NextSequenceToken, nil } // ValidateLogOpt looks for awslogs-specific log options awslogs-region, // awslogs-group, and awslogs-stream func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case logGroupKey: case logStreamKey: case regionKey: default: return fmt.Errorf("unknown log opt '%s' for %s log driver", key, name) } } if cfg[logGroupKey] == "" { return fmt.Errorf("must specify a value for log opt '%s'", logGroupKey) } return nil } // Len returns the length of a byTimestamp slice. Len is required by the // sort.Interface interface. func (slice byTimestamp) Len() int { return len(slice) } // Less compares two values in a byTimestamp slice by Timestamp. Less is // required by the sort.Interface interface. func (slice byTimestamp) Less(i, j int) bool { iTimestamp, jTimestamp := int64(0), int64(0) if slice != nil && slice[i].Timestamp != nil { iTimestamp = *slice[i].Timestamp } if slice != nil && slice[j].Timestamp != nil { jTimestamp = *slice[j].Timestamp } return iTimestamp < jTimestamp } // Swap swaps two values in a byTimestamp slice with each other. Swap is // required by the sort.Interface interface. func (slice byTimestamp) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] } docker-1.10.3/daemon/logger/awslogs/cloudwatchlogs_test.go000066400000000000000000000411511267010174400236600ustar00rootroot00000000000000package awslogs import ( "errors" "fmt" "net/http" "runtime" "strings" "testing" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/dockerversion" ) const ( groupName = "groupName" streamName = "streamName" sequenceToken = "sequenceToken" nextSequenceToken = "nextSequenceToken" logline = "this is a log line" ) func TestNewAWSLogsClientUserAgentHandler(t *testing.T) { ctx := logger.Context{ Config: map[string]string{ regionKey: "us-east-1", }, } client, err := newAWSLogsClient(ctx) if err != nil { t.Fatal(err) } realClient, ok := client.(*cloudwatchlogs.CloudWatchLogs) if !ok { t.Fatal("Could not cast client to cloudwatchlogs.CloudWatchLogs") } buildHandlerList := realClient.Handlers.Build request := &request.Request{ HTTPRequest: &http.Request{ Header: http.Header{}, }, } buildHandlerList.Run(request) expectedUserAgentString := fmt.Sprintf("Docker %s (%s) %s/%s", dockerversion.Version, runtime.GOOS, aws.SDKName, aws.SDKVersion) userAgent := request.HTTPRequest.Header.Get("User-Agent") if userAgent != expectedUserAgentString { t.Errorf("Wrong User-Agent string, expected \"%s\" but was \"%s\"", expectedUserAgentString, userAgent) } } func TestNewAWSLogsClientRegionDetect(t *testing.T) { ctx := logger.Context{ Config: map[string]string{}, } mockMetadata := newMockMetadataClient() newRegionFinder = func() regionFinder { return mockMetadata } mockMetadata.regionResult <- ®ionResult{ successResult: "us-east-1", } _, err := newAWSLogsClient(ctx) if err != nil { t.Fatal(err) } } func TestCreateSuccess(t *testing.T) { mockClient := newMockClient() stream := &logStream{ client: mockClient, logGroupName: groupName, logStreamName: streamName, } mockClient.createLogStreamResult <- &createLogStreamResult{} err := stream.create() if err != nil { t.Errorf("Received unexpected err: %v\n", err) } argument := <-mockClient.createLogStreamArgument if argument.LogGroupName == nil { t.Fatal("Expected non-nil LogGroupName") } if *argument.LogGroupName != groupName { t.Errorf("Expected LogGroupName to be %s", groupName) } if argument.LogStreamName == nil { t.Fatal("Expected non-nil LogGroupName") } if *argument.LogStreamName != streamName { t.Errorf("Expected LogStreamName to be %s", streamName) } } func TestCreateError(t *testing.T) { mockClient := newMockClient() stream := &logStream{ client: mockClient, } mockClient.createLogStreamResult <- &createLogStreamResult{ errorResult: errors.New("Error!"), } err := stream.create() if err == nil { t.Fatal("Expected non-nil err") } } func TestCreateAlreadyExists(t *testing.T) { mockClient := newMockClient() stream := &logStream{ client: mockClient, } mockClient.createLogStreamResult <- &createLogStreamResult{ errorResult: awserr.New(resourceAlreadyExistsCode, "", nil), } err := stream.create() if err != nil { t.Fatal("Expected nil err") } } func TestPublishBatchSuccess(t *testing.T) { mockClient := newMockClient() stream := &logStream{ client: mockClient, logGroupName: groupName, logStreamName: streamName, sequenceToken: aws.String(sequenceToken), } mockClient.putLogEventsResult <- &putLogEventsResult{ successResult: &cloudwatchlogs.PutLogEventsOutput{ NextSequenceToken: aws.String(nextSequenceToken), }, } events := []*cloudwatchlogs.InputLogEvent{ { Message: aws.String(logline), }, } stream.publishBatch(events) if stream.sequenceToken == nil { t.Fatal("Expected non-nil sequenceToken") } if *stream.sequenceToken != nextSequenceToken { t.Errorf("Expected sequenceToken to be %s, but was %s", nextSequenceToken, *stream.sequenceToken) } argument := <-mockClient.putLogEventsArgument if argument == nil { t.Fatal("Expected non-nil PutLogEventsInput") } if argument.SequenceToken == nil { t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") } if *argument.SequenceToken != sequenceToken { t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", sequenceToken, *argument.SequenceToken) } if len(argument.LogEvents) != 1 { t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) } if argument.LogEvents[0] != events[0] { t.Error("Expected event to equal input") } } func TestPublishBatchError(t *testing.T) { mockClient := newMockClient() stream := &logStream{ client: mockClient, logGroupName: groupName, logStreamName: streamName, sequenceToken: aws.String(sequenceToken), } mockClient.putLogEventsResult <- &putLogEventsResult{ errorResult: errors.New("Error!"), } events := []*cloudwatchlogs.InputLogEvent{ { Message: aws.String(logline), }, } stream.publishBatch(events) if stream.sequenceToken == nil { t.Fatal("Expected non-nil sequenceToken") } if *stream.sequenceToken != sequenceToken { t.Errorf("Expected sequenceToken to be %s, but was %s", sequenceToken, *stream.sequenceToken) } } func TestPublishBatchInvalidSeqSuccess(t *testing.T) { mockClient := newMockClientBuffered(2) stream := &logStream{ client: mockClient, logGroupName: groupName, logStreamName: streamName, sequenceToken: aws.String(sequenceToken), } mockClient.putLogEventsResult <- &putLogEventsResult{ errorResult: awserr.New(invalidSequenceTokenCode, "use token token", nil), } mockClient.putLogEventsResult <- &putLogEventsResult{ successResult: &cloudwatchlogs.PutLogEventsOutput{ NextSequenceToken: aws.String(nextSequenceToken), }, } events := []*cloudwatchlogs.InputLogEvent{ { Message: aws.String(logline), }, } stream.publishBatch(events) if stream.sequenceToken == nil { t.Fatal("Expected non-nil sequenceToken") } if *stream.sequenceToken != nextSequenceToken { t.Errorf("Expected sequenceToken to be %s, but was %s", nextSequenceToken, *stream.sequenceToken) } argument := <-mockClient.putLogEventsArgument if argument == nil { t.Fatal("Expected non-nil PutLogEventsInput") } if argument.SequenceToken == nil { t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") } if *argument.SequenceToken != sequenceToken { t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", sequenceToken, *argument.SequenceToken) } if len(argument.LogEvents) != 1 { t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) } if argument.LogEvents[0] != events[0] { t.Error("Expected event to equal input") } argument = <-mockClient.putLogEventsArgument if argument == nil { t.Fatal("Expected non-nil PutLogEventsInput") } if argument.SequenceToken == nil { t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") } if *argument.SequenceToken != "token" { t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", "token", *argument.SequenceToken) } if len(argument.LogEvents) != 1 { t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) } if argument.LogEvents[0] != events[0] { t.Error("Expected event to equal input") } } func TestPublishBatchAlreadyAccepted(t *testing.T) { mockClient := newMockClient() stream := &logStream{ client: mockClient, logGroupName: groupName, logStreamName: streamName, sequenceToken: aws.String(sequenceToken), } mockClient.putLogEventsResult <- &putLogEventsResult{ errorResult: awserr.New(dataAlreadyAcceptedCode, "use token token", nil), } events := []*cloudwatchlogs.InputLogEvent{ { Message: aws.String(logline), }, } stream.publishBatch(events) if stream.sequenceToken == nil { t.Fatal("Expected non-nil sequenceToken") } if *stream.sequenceToken != "token" { t.Errorf("Expected sequenceToken to be %s, but was %s", "token", *stream.sequenceToken) } argument := <-mockClient.putLogEventsArgument if argument == nil { t.Fatal("Expected non-nil PutLogEventsInput") } if argument.SequenceToken == nil { t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") } if *argument.SequenceToken != sequenceToken { t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", sequenceToken, *argument.SequenceToken) } if len(argument.LogEvents) != 1 { t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) } if argument.LogEvents[0] != events[0] { t.Error("Expected event to equal input") } } func TestCollectBatchSimple(t *testing.T) { mockClient := newMockClient() stream := &logStream{ client: mockClient, logGroupName: groupName, logStreamName: streamName, sequenceToken: aws.String(sequenceToken), messages: make(chan *logger.Message), } mockClient.putLogEventsResult <- &putLogEventsResult{ successResult: &cloudwatchlogs.PutLogEventsOutput{ NextSequenceToken: aws.String(nextSequenceToken), }, } ticks := make(chan time.Time) newTicker = func(_ time.Duration) *time.Ticker { return &time.Ticker{ C: ticks, } } go stream.collectBatch() stream.Log(&logger.Message{ Line: []byte(logline), Timestamp: time.Time{}, }) ticks <- time.Time{} stream.Close() argument := <-mockClient.putLogEventsArgument if argument == nil { t.Fatal("Expected non-nil PutLogEventsInput") } if len(argument.LogEvents) != 1 { t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) } if *argument.LogEvents[0].Message != logline { t.Errorf("Expected message to be %s but was %s", logline, *argument.LogEvents[0].Message) } } func TestCollectBatchTicker(t *testing.T) { mockClient := newMockClient() stream := &logStream{ client: mockClient, logGroupName: groupName, logStreamName: streamName, sequenceToken: aws.String(sequenceToken), messages: make(chan *logger.Message), } mockClient.putLogEventsResult <- &putLogEventsResult{ successResult: &cloudwatchlogs.PutLogEventsOutput{ NextSequenceToken: aws.String(nextSequenceToken), }, } ticks := make(chan time.Time) newTicker = func(_ time.Duration) *time.Ticker { return &time.Ticker{ C: ticks, } } go stream.collectBatch() stream.Log(&logger.Message{ Line: []byte(logline + " 1"), Timestamp: time.Time{}, }) stream.Log(&logger.Message{ Line: []byte(logline + " 2"), Timestamp: time.Time{}, }) ticks <- time.Time{} // Verify first batch argument := <-mockClient.putLogEventsArgument if argument == nil { t.Fatal("Expected non-nil PutLogEventsInput") } if len(argument.LogEvents) != 2 { t.Errorf("Expected LogEvents to contain 2 elements, but contains %d", len(argument.LogEvents)) } if *argument.LogEvents[0].Message != logline+" 1" { t.Errorf("Expected message to be %s but was %s", logline+" 1", *argument.LogEvents[0].Message) } if *argument.LogEvents[1].Message != logline+" 2" { t.Errorf("Expected message to be %s but was %s", logline+" 2", *argument.LogEvents[0].Message) } stream.Log(&logger.Message{ Line: []byte(logline + " 3"), Timestamp: time.Time{}, }) ticks <- time.Time{} argument = <-mockClient.putLogEventsArgument if argument == nil { t.Fatal("Expected non-nil PutLogEventsInput") } if len(argument.LogEvents) != 1 { t.Errorf("Expected LogEvents to contain 1 elements, but contains %d", len(argument.LogEvents)) } if *argument.LogEvents[0].Message != logline+" 3" { t.Errorf("Expected message to be %s but was %s", logline+" 3", *argument.LogEvents[0].Message) } stream.Close() } func TestCollectBatchClose(t *testing.T) { mockClient := newMockClient() stream := &logStream{ client: mockClient, logGroupName: groupName, logStreamName: streamName, sequenceToken: aws.String(sequenceToken), messages: make(chan *logger.Message), } mockClient.putLogEventsResult <- &putLogEventsResult{ successResult: &cloudwatchlogs.PutLogEventsOutput{ NextSequenceToken: aws.String(nextSequenceToken), }, } var ticks = make(chan time.Time) newTicker = func(_ time.Duration) *time.Ticker { return &time.Ticker{ C: ticks, } } go stream.collectBatch() stream.Log(&logger.Message{ Line: []byte(logline), Timestamp: time.Time{}, }) // no ticks stream.Close() argument := <-mockClient.putLogEventsArgument if argument == nil { t.Fatal("Expected non-nil PutLogEventsInput") } if len(argument.LogEvents) != 1 { t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) } if *argument.LogEvents[0].Message != logline { t.Errorf("Expected message to be %s but was %s", logline, *argument.LogEvents[0].Message) } } func TestCollectBatchLineSplit(t *testing.T) { mockClient := newMockClient() stream := &logStream{ client: mockClient, logGroupName: groupName, logStreamName: streamName, sequenceToken: aws.String(sequenceToken), messages: make(chan *logger.Message), } mockClient.putLogEventsResult <- &putLogEventsResult{ successResult: &cloudwatchlogs.PutLogEventsOutput{ NextSequenceToken: aws.String(nextSequenceToken), }, } var ticks = make(chan time.Time) newTicker = func(_ time.Duration) *time.Ticker { return &time.Ticker{ C: ticks, } } go stream.collectBatch() longline := strings.Repeat("A", maximumBytesPerEvent) stream.Log(&logger.Message{ Line: []byte(longline + "B"), Timestamp: time.Time{}, }) // no ticks stream.Close() argument := <-mockClient.putLogEventsArgument if argument == nil { t.Fatal("Expected non-nil PutLogEventsInput") } if len(argument.LogEvents) != 2 { t.Errorf("Expected LogEvents to contain 2 elements, but contains %d", len(argument.LogEvents)) } if *argument.LogEvents[0].Message != longline { t.Errorf("Expected message to be %s but was %s", longline, *argument.LogEvents[0].Message) } if *argument.LogEvents[1].Message != "B" { t.Errorf("Expected message to be %s but was %s", "B", *argument.LogEvents[1].Message) } } func TestCollectBatchMaxEvents(t *testing.T) { mockClient := newMockClientBuffered(1) stream := &logStream{ client: mockClient, logGroupName: groupName, logStreamName: streamName, sequenceToken: aws.String(sequenceToken), messages: make(chan *logger.Message), } mockClient.putLogEventsResult <- &putLogEventsResult{ successResult: &cloudwatchlogs.PutLogEventsOutput{ NextSequenceToken: aws.String(nextSequenceToken), }, } var ticks = make(chan time.Time) newTicker = func(_ time.Duration) *time.Ticker { return &time.Ticker{ C: ticks, } } go stream.collectBatch() line := "A" for i := 0; i <= maximumLogEventsPerPut; i++ { stream.Log(&logger.Message{ Line: []byte(line), Timestamp: time.Time{}, }) } // no ticks stream.Close() argument := <-mockClient.putLogEventsArgument if argument == nil { t.Fatal("Expected non-nil PutLogEventsInput") } if len(argument.LogEvents) != maximumLogEventsPerPut { t.Errorf("Expected LogEvents to contain %d elements, but contains %d", maximumLogEventsPerPut, len(argument.LogEvents)) } argument = <-mockClient.putLogEventsArgument if argument == nil { t.Fatal("Expected non-nil PutLogEventsInput") } if len(argument.LogEvents) != 1 { t.Errorf("Expected LogEvents to contain %d elements, but contains %d", 1, len(argument.LogEvents)) } } func TestCollectBatchMaxTotalBytes(t *testing.T) { mockClient := newMockClientBuffered(1) stream := &logStream{ client: mockClient, logGroupName: groupName, logStreamName: streamName, sequenceToken: aws.String(sequenceToken), messages: make(chan *logger.Message), } mockClient.putLogEventsResult <- &putLogEventsResult{ successResult: &cloudwatchlogs.PutLogEventsOutput{ NextSequenceToken: aws.String(nextSequenceToken), }, } var ticks = make(chan time.Time) newTicker = func(_ time.Duration) *time.Ticker { return &time.Ticker{ C: ticks, } } go stream.collectBatch() longline := strings.Repeat("A", maximumBytesPerPut) stream.Log(&logger.Message{ Line: []byte(longline + "B"), Timestamp: time.Time{}, }) // no ticks stream.Close() argument := <-mockClient.putLogEventsArgument if argument == nil { t.Fatal("Expected non-nil PutLogEventsInput") } bytes := 0 for _, event := range argument.LogEvents { bytes += len(*event.Message) } if bytes > maximumBytesPerPut { t.Errorf("Expected <= %d bytes but was %d", maximumBytesPerPut, bytes) } argument = <-mockClient.putLogEventsArgument if len(argument.LogEvents) != 1 { t.Errorf("Expected LogEvents to contain 1 elements, but contains %d", len(argument.LogEvents)) } message := *argument.LogEvents[0].Message if message[len(message)-1:] != "B" { t.Errorf("Expected message to be %s but was %s", "B", message[len(message)-1:]) } } docker-1.10.3/daemon/logger/awslogs/cwlogsiface_mock_test.go000066400000000000000000000043651267010174400241430ustar00rootroot00000000000000package awslogs import "github.com/aws/aws-sdk-go/service/cloudwatchlogs" type mockcwlogsclient struct { createLogStreamArgument chan *cloudwatchlogs.CreateLogStreamInput createLogStreamResult chan *createLogStreamResult putLogEventsArgument chan *cloudwatchlogs.PutLogEventsInput putLogEventsResult chan *putLogEventsResult } type createLogStreamResult struct { successResult *cloudwatchlogs.CreateLogStreamOutput errorResult error } type putLogEventsResult struct { successResult *cloudwatchlogs.PutLogEventsOutput errorResult error } func newMockClient() *mockcwlogsclient { return &mockcwlogsclient{ createLogStreamArgument: make(chan *cloudwatchlogs.CreateLogStreamInput, 1), createLogStreamResult: make(chan *createLogStreamResult, 1), putLogEventsArgument: make(chan *cloudwatchlogs.PutLogEventsInput, 1), putLogEventsResult: make(chan *putLogEventsResult, 1), } } func newMockClientBuffered(buflen int) *mockcwlogsclient { return &mockcwlogsclient{ createLogStreamArgument: make(chan *cloudwatchlogs.CreateLogStreamInput, buflen), createLogStreamResult: make(chan *createLogStreamResult, buflen), putLogEventsArgument: make(chan *cloudwatchlogs.PutLogEventsInput, buflen), putLogEventsResult: make(chan *putLogEventsResult, buflen), } } func (m *mockcwlogsclient) CreateLogStream(input *cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) { m.createLogStreamArgument <- input output := <-m.createLogStreamResult return output.successResult, output.errorResult } func (m *mockcwlogsclient) PutLogEvents(input *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { m.putLogEventsArgument <- input output := <-m.putLogEventsResult return output.successResult, output.errorResult } type mockmetadataclient struct { regionResult chan *regionResult } type regionResult struct { successResult string errorResult error } func newMockMetadataClient() *mockmetadataclient { return &mockmetadataclient{ regionResult: make(chan *regionResult, 1), } } func (m *mockmetadataclient) Region() (string, error) { output := <-m.regionResult return output.successResult, output.errorResult } func test() { _ = &logStream{ client: newMockClient(), } } docker-1.10.3/daemon/logger/context.go000066400000000000000000000053751267010174400176140ustar00rootroot00000000000000package logger import ( "fmt" "os" "strings" "time" ) // Context provides enough information for a logging driver to do its function. type Context struct { Config map[string]string ContainerID string ContainerName string ContainerEntrypoint string ContainerArgs []string ContainerImageID string ContainerImageName string ContainerCreated time.Time ContainerEnv []string ContainerLabels map[string]string LogPath string } // ExtraAttributes returns the user-defined extra attributes (labels, // environment variables) in key-value format. This can be used by log drivers // that support metadata to add more context to a log. func (ctx *Context) ExtraAttributes(keyMod func(string) string) map[string]string { extra := make(map[string]string) labels, ok := ctx.Config["labels"] if ok && len(labels) > 0 { for _, l := range strings.Split(labels, ",") { if v, ok := ctx.ContainerLabels[l]; ok { if keyMod != nil { l = keyMod(l) } extra[l] = v } } } env, ok := ctx.Config["env"] if ok && len(env) > 0 { envMapping := make(map[string]string) for _, e := range ctx.ContainerEnv { if kv := strings.SplitN(e, "=", 2); len(kv) == 2 { envMapping[kv[0]] = kv[1] } } for _, l := range strings.Split(env, ",") { if v, ok := envMapping[l]; ok { if keyMod != nil { l = keyMod(l) } extra[l] = v } } } return extra } // Hostname returns the hostname from the underlying OS. func (ctx *Context) Hostname() (string, error) { hostname, err := os.Hostname() if err != nil { return "", fmt.Errorf("logger: can not resolve hostname: %v", err) } return hostname, nil } // Command returns the command that the container being logged was // started with. The Entrypoint is prepended to the container // arguments. func (ctx *Context) Command() string { terms := []string{ctx.ContainerEntrypoint} for _, arg := range ctx.ContainerArgs { terms = append(terms, arg) } command := strings.Join(terms, " ") return command } // ID Returns the Container ID shortened to 12 characters. func (ctx *Context) ID() string { return ctx.ContainerID[:12] } // FullID is an alias of ContainerID. func (ctx *Context) FullID() string { return ctx.ContainerID } // Name returns the ContainerName without a preceding '/'. func (ctx *Context) Name() string { return ctx.ContainerName[1:] } // ImageID returns the ContainerImageID shortened to 12 characters. func (ctx *Context) ImageID() string { return ctx.ContainerImageID[:12] } // ImageFullID is an alias of ContainerImageID. func (ctx *Context) ImageFullID() string { return ctx.ContainerImageID } // ImageName is an alias of ContainerImageName func (ctx *Context) ImageName() string { return ctx.ContainerImageName } docker-1.10.3/daemon/logger/copier.go000066400000000000000000000034721267010174400174050ustar00rootroot00000000000000package logger import ( "bufio" "bytes" "io" "sync" "time" "github.com/Sirupsen/logrus" ) // Copier can copy logs from specified sources to Logger and attach // ContainerID and Timestamp. // Writes are concurrent, so you need implement some sync in your logger type Copier struct { // cid is the container id for which we are copying logs cid string // srcs is map of name -> reader pairs, for example "stdout", "stderr" srcs map[string]io.Reader dst Logger copyJobs sync.WaitGroup closed chan struct{} } // NewCopier creates a new Copier func NewCopier(cid string, srcs map[string]io.Reader, dst Logger) *Copier { return &Copier{ cid: cid, srcs: srcs, dst: dst, closed: make(chan struct{}), } } // Run starts logs copying func (c *Copier) Run() { for src, w := range c.srcs { c.copyJobs.Add(1) go c.copySrc(src, w) } } func (c *Copier) copySrc(name string, src io.Reader) { defer c.copyJobs.Done() reader := bufio.NewReader(src) for { select { case <-c.closed: return default: line, err := reader.ReadBytes('\n') line = bytes.TrimSuffix(line, []byte{'\n'}) // ReadBytes can return full or partial output even when it failed. // e.g. it can return a full entry and EOF. if err == nil || len(line) > 0 { if logErr := c.dst.Log(&Message{ContainerID: c.cid, Line: line, Source: name, Timestamp: time.Now().UTC()}); logErr != nil { logrus.Errorf("Failed to log msg %q for logger %s: %s", line, c.dst.Name(), logErr) } } if err != nil { if err != io.EOF { logrus.Errorf("Error scanning log stream: %s", err) } return } } } } // Wait waits until all copying is done func (c *Copier) Wait() { c.copyJobs.Wait() } // Close closes the copier func (c *Copier) Close() { select { case <-c.closed: default: close(c.closed) } } docker-1.10.3/daemon/logger/copier_test.go000066400000000000000000000061101267010174400204340ustar00rootroot00000000000000package logger import ( "bytes" "encoding/json" "io" "testing" "time" ) type TestLoggerJSON struct { *json.Encoder delay time.Duration } func (l *TestLoggerJSON) Log(m *Message) error { if l.delay > 0 { time.Sleep(l.delay) } return l.Encode(m) } func (l *TestLoggerJSON) Close() error { return nil } func (l *TestLoggerJSON) Name() string { return "json" } type TestLoggerText struct { *bytes.Buffer } func (l *TestLoggerText) Log(m *Message) error { _, err := l.WriteString(m.ContainerID + " " + m.Source + " " + string(m.Line) + "\n") return err } func (l *TestLoggerText) Close() error { return nil } func (l *TestLoggerText) Name() string { return "text" } func TestCopier(t *testing.T) { stdoutLine := "Line that thinks that it is log line from docker stdout" stderrLine := "Line that thinks that it is log line from docker stderr" var stdout bytes.Buffer var stderr bytes.Buffer for i := 0; i < 30; i++ { if _, err := stdout.WriteString(stdoutLine + "\n"); err != nil { t.Fatal(err) } if _, err := stderr.WriteString(stderrLine + "\n"); err != nil { t.Fatal(err) } } var jsonBuf bytes.Buffer jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf)} cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" c := NewCopier(cid, map[string]io.Reader{ "stdout": &stdout, "stderr": &stderr, }, jsonLog) c.Run() wait := make(chan struct{}) go func() { c.Wait() close(wait) }() select { case <-time.After(1 * time.Second): t.Fatal("Copier failed to do its work in 1 second") case <-wait: } dec := json.NewDecoder(&jsonBuf) for { var msg Message if err := dec.Decode(&msg); err != nil { if err == io.EOF { break } t.Fatal(err) } if msg.Source != "stdout" && msg.Source != "stderr" { t.Fatalf("Wrong Source: %q, should be %q or %q", msg.Source, "stdout", "stderr") } if msg.ContainerID != cid { t.Fatalf("Wrong ContainerID: %q, expected %q", msg.ContainerID, cid) } if msg.Source == "stdout" { if string(msg.Line) != stdoutLine { t.Fatalf("Wrong Line: %q, expected %q", msg.Line, stdoutLine) } } if msg.Source == "stderr" { if string(msg.Line) != stderrLine { t.Fatalf("Wrong Line: %q, expected %q", msg.Line, stderrLine) } } } } func TestCopierSlow(t *testing.T) { stdoutLine := "Line that thinks that it is log line from docker stdout" var stdout bytes.Buffer for i := 0; i < 30; i++ { if _, err := stdout.WriteString(stdoutLine + "\n"); err != nil { t.Fatal(err) } } var jsonBuf bytes.Buffer //encoder := &encodeCloser{Encoder: json.NewEncoder(&jsonBuf)} jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf), delay: 100 * time.Millisecond} cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" c := NewCopier(cid, map[string]io.Reader{"stdout": &stdout}, jsonLog) c.Run() wait := make(chan struct{}) go func() { c.Wait() close(wait) }() <-time.After(150 * time.Millisecond) c.Close() select { case <-time.After(200 * time.Millisecond): t.Fatalf("failed to exit in time after the copier is closed") case <-wait: } } docker-1.10.3/daemon/logger/factory.go000066400000000000000000000045301267010174400175670ustar00rootroot00000000000000package logger import ( "fmt" "sync" ) // Creator builds a logging driver instance with given context. type Creator func(Context) (Logger, error) // LogOptValidator checks the options specific to the underlying // logging implementation. type LogOptValidator func(cfg map[string]string) error type logdriverFactory struct { registry map[string]Creator optValidator map[string]LogOptValidator m sync.Mutex } func (lf *logdriverFactory) register(name string, c Creator) error { lf.m.Lock() defer lf.m.Unlock() if _, ok := lf.registry[name]; ok { return fmt.Errorf("logger: log driver named '%s' is already registered", name) } lf.registry[name] = c return nil } func (lf *logdriverFactory) registerLogOptValidator(name string, l LogOptValidator) error { lf.m.Lock() defer lf.m.Unlock() if _, ok := lf.optValidator[name]; ok { return fmt.Errorf("logger: log validator named '%s' is already registered", name) } lf.optValidator[name] = l return nil } func (lf *logdriverFactory) get(name string) (Creator, error) { lf.m.Lock() defer lf.m.Unlock() c, ok := lf.registry[name] if !ok { return c, fmt.Errorf("logger: no log driver named '%s' is registered", name) } return c, nil } func (lf *logdriverFactory) getLogOptValidator(name string) LogOptValidator { lf.m.Lock() defer lf.m.Unlock() c, _ := lf.optValidator[name] return c } var factory = &logdriverFactory{registry: make(map[string]Creator), optValidator: make(map[string]LogOptValidator)} // global factory instance // RegisterLogDriver registers the given logging driver builder with given logging // driver name. func RegisterLogDriver(name string, c Creator) error { return factory.register(name, c) } // RegisterLogOptValidator registers the logging option validator with // the given logging driver name. func RegisterLogOptValidator(name string, l LogOptValidator) error { return factory.registerLogOptValidator(name, l) } // GetLogDriver provides the logging driver builder for a logging driver name. func GetLogDriver(name string) (Creator, error) { return factory.get(name) } // ValidateLogOpts checks the options for the given log driver. The // options supported are specific to the LogDriver implementation. func ValidateLogOpts(name string, cfg map[string]string) error { l := factory.getLogOptValidator(name) if l != nil { return l(cfg) } return nil } docker-1.10.3/daemon/logger/fluentd/000077500000000000000000000000001267010174400172305ustar00rootroot00000000000000docker-1.10.3/daemon/logger/fluentd/fluentd.go000066400000000000000000000065451267010174400212320ustar00rootroot00000000000000// Package fluentd provides the log driver for forwarding server logs // to fluentd endpoints. package fluentd import ( "fmt" "math" "net" "strconv" "strings" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/fluent/fluent-logger-golang/fluent" ) type fluentd struct { tag string containerID string containerName string writer *fluent.Fluent extra map[string]string } const ( name = "fluentd" defaultHostName = "localhost" defaultPort = 24224 defaultTagPrefix = "docker" ) func init() { if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { logrus.Fatal(err) } } // New creates a fluentd logger using the configuration passed in on // the context. Supported context configuration variables are // fluentd-address & fluentd-tag. func New(ctx logger.Context) (logger.Logger, error) { host, port, err := parseAddress(ctx.Config["fluentd-address"]) if err != nil { return nil, err } tag, err := loggerutils.ParseLogTag(ctx, "docker.{{.ID}}") if err != nil { return nil, err } extra := ctx.ExtraAttributes(nil) logrus.Debugf("logging driver fluentd configured for container:%s, host:%s, port:%d, tag:%s, extra:%v.", ctx.ContainerID, host, port, tag, extra) // logger tries to reconnect 2**32 - 1 times // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] log, err := fluent.New(fluent.Config{FluentPort: port, FluentHost: host, RetryWait: 1000, MaxRetry: math.MaxInt32}) if err != nil { return nil, err } return &fluentd{ tag: tag, containerID: ctx.ContainerID, containerName: ctx.ContainerName, writer: log, extra: extra, }, nil } func (f *fluentd) Log(msg *logger.Message) error { data := map[string]string{ "container_id": f.containerID, "container_name": f.containerName, "source": msg.Source, "log": string(msg.Line), } for k, v := range f.extra { data[k] = v } // fluent-logger-golang buffers logs from failures and disconnections, // and these are transferred again automatically. return f.writer.PostWithTime(f.tag, msg.Timestamp, data) } func (f *fluentd) Close() error { return f.writer.Close() } func (f *fluentd) Name() string { return name } // ValidateLogOpt looks for fluentd specific log options fluentd-address & fluentd-tag. func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case "fluentd-address": case "fluentd-tag": case "tag": case "labels": case "env": default: return fmt.Errorf("unknown log opt '%s' for fluentd log driver", key) } } if _, _, err := parseAddress(cfg["fluentd-address"]); err != nil { return err } return nil } func parseAddress(address string) (string, int, error) { if address == "" { return defaultHostName, defaultPort, nil } host, port, err := net.SplitHostPort(address) if err != nil { if !strings.Contains(err.Error(), "missing port in address") { return "", 0, fmt.Errorf("invalid fluentd-address %s: %s", address, err) } return host, defaultPort, nil } portnum, err := strconv.Atoi(port) if err != nil { return "", 0, fmt.Errorf("invalid fluentd-address %s: %s", address, err) } return host, portnum, nil } docker-1.10.3/daemon/logger/gelf/000077500000000000000000000000001267010174400165045ustar00rootroot00000000000000docker-1.10.3/daemon/logger/gelf/gelf.go000066400000000000000000000074751267010174400177650ustar00rootroot00000000000000// +build linux // Package gelf provides the log driver for forwarding server logs to // endpoints that support the Graylog Extended Log Format. package gelf import ( "bytes" "fmt" "net" "net/url" "time" "github.com/Graylog2/go-gelf/gelf" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/pkg/urlutil" ) const name = "gelf" type gelfLogger struct { writer *gelf.Writer ctx logger.Context hostname string extra map[string]interface{} } func init() { if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { logrus.Fatal(err) } } // New creates a gelf logger using the configuration passed in on the // context. Supported context configuration variables are // gelf-address, & gelf-tag. func New(ctx logger.Context) (logger.Logger, error) { // parse gelf address address, err := parseAddress(ctx.Config["gelf-address"]) if err != nil { return nil, err } // collect extra data for GELF message hostname, err := ctx.Hostname() if err != nil { return nil, fmt.Errorf("gelf: cannot access hostname to set source field") } // remove trailing slash from container name containerName := bytes.TrimLeft([]byte(ctx.ContainerName), "/") // parse log tag tag, err := loggerutils.ParseLogTag(ctx, "") if err != nil { return nil, err } extra := map[string]interface{}{ "_container_id": ctx.ContainerID, "_container_name": string(containerName), "_image_id": ctx.ContainerImageID, "_image_name": ctx.ContainerImageName, "_command": ctx.Command(), "_tag": tag, "_created": ctx.ContainerCreated, } extraAttrs := ctx.ExtraAttributes(func(key string) string { if key[0] == '_' { return key } return "_" + key }) for k, v := range extraAttrs { extra[k] = v } // create new gelfWriter gelfWriter, err := gelf.NewWriter(address) if err != nil { return nil, fmt.Errorf("gelf: cannot connect to GELF endpoint: %s %v", address, err) } return &gelfLogger{ writer: gelfWriter, ctx: ctx, hostname: hostname, extra: extra, }, nil } func (s *gelfLogger) Log(msg *logger.Message) error { level := gelf.LOG_INFO if msg.Source == "stderr" { level = gelf.LOG_ERR } m := gelf.Message{ Version: "1.1", Host: s.hostname, Short: string(msg.Line), TimeUnix: float64(msg.Timestamp.UnixNano()/int64(time.Millisecond)) / 1000.0, Level: level, Extra: s.extra, } if err := s.writer.WriteMessage(&m); err != nil { return fmt.Errorf("gelf: cannot send GELF message: %v", err) } return nil } func (s *gelfLogger) Close() error { return s.writer.Close() } func (s *gelfLogger) Name() string { return name } // ValidateLogOpt looks for gelf specific log options gelf-address, & // gelf-tag. func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case "gelf-address": case "gelf-tag": case "tag": case "labels": case "env": default: return fmt.Errorf("unknown log opt '%s' for gelf log driver", key) } } if _, err := parseAddress(cfg["gelf-address"]); err != nil { return err } return nil } func parseAddress(address string) (string, error) { if address == "" { return "", nil } if !urlutil.IsTransportURL(address) { return "", fmt.Errorf("gelf-address should be in form proto://address, got %v", address) } url, err := url.Parse(address) if err != nil { return "", err } // we support only udp if url.Scheme != "udp" { return "", fmt.Errorf("gelf: endpoint needs to be UDP") } // get host and port if _, _, err = net.SplitHostPort(url.Host); err != nil { return "", fmt.Errorf("gelf: please provide gelf-address as udp://host:port") } return url.Host, nil } docker-1.10.3/daemon/logger/gelf/gelf_unsupported.go000066400000000000000000000000371267010174400224200ustar00rootroot00000000000000// +build !linux package gelf docker-1.10.3/daemon/logger/journald/000077500000000000000000000000001267010174400174055ustar00rootroot00000000000000docker-1.10.3/daemon/logger/journald/journald.go000066400000000000000000000042351267010174400215560ustar00rootroot00000000000000// +build linux // Package journald provides the log driver for forwarding server logs // to endpoints that receive the systemd format. package journald import ( "fmt" "strings" "sync" "github.com/Sirupsen/logrus" "github.com/coreos/go-systemd/journal" "github.com/docker/docker/daemon/logger" ) const name = "journald" type journald struct { vars map[string]string // additional variables and values to send to the journal along with the log message readers readerList } type readerList struct { mu sync.Mutex readers map[*logger.LogWatcher]*logger.LogWatcher } func init() { if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(name, validateLogOpt); err != nil { logrus.Fatal(err) } } // New creates a journald logger using the configuration passed in on // the context. func New(ctx logger.Context) (logger.Logger, error) { if !journal.Enabled() { return nil, fmt.Errorf("journald is not enabled on this host") } // Strip a leading slash so that people can search for // CONTAINER_NAME=foo rather than CONTAINER_NAME=/foo. name := ctx.ContainerName if name[0] == '/' { name = name[1:] } vars := map[string]string{ "CONTAINER_ID": ctx.ContainerID[:12], "CONTAINER_ID_FULL": ctx.ContainerID, "CONTAINER_NAME": name, } extraAttrs := ctx.ExtraAttributes(strings.ToTitle) for k, v := range extraAttrs { vars[k] = v } return &journald{vars: vars, readers: readerList{readers: make(map[*logger.LogWatcher]*logger.LogWatcher)}}, nil } // We don't actually accept any options, but we have to supply a callback for // the factory to pass the (probably empty) configuration map to. func validateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case "labels": case "env": default: return fmt.Errorf("unknown log opt '%s' for journald log driver", key) } } return nil } func (s *journald) Log(msg *logger.Message) error { if msg.Source == "stderr" { return journal.Send(string(msg.Line), journal.PriErr, s.vars) } return journal.Send(string(msg.Line), journal.PriInfo, s.vars) } func (s *journald) Name() string { return name } docker-1.10.3/daemon/logger/journald/journald_unsupported.go000066400000000000000000000000751267010174400242240ustar00rootroot00000000000000// +build !linux package journald type journald struct { } docker-1.10.3/daemon/logger/journald/read.go000066400000000000000000000201241267010174400206460ustar00rootroot00000000000000// +build linux,cgo,!static_build,journald package journald // #cgo pkg-config: libsystemd-journal // #include // #include // #include // #include // #include // #include // #include // #include // #include // //static int get_message(sd_journal *j, const char **msg, size_t *length) //{ // int rc; // *msg = NULL; // *length = 0; // rc = sd_journal_get_data(j, "MESSAGE", (const void **) msg, length); // if (rc == 0) { // if (*length > 8) { // (*msg) += 8; // *length -= 8; // } else { // *msg = NULL; // *length = 0; // rc = -ENOENT; // } // } // return rc; //} //static int get_priority(sd_journal *j, int *priority) //{ // const void *data; // size_t i, length; // int rc; // *priority = -1; // rc = sd_journal_get_data(j, "PRIORITY", &data, &length); // if (rc == 0) { // if ((length > 9) && (strncmp(data, "PRIORITY=", 9) == 0)) { // *priority = 0; // for (i = 9; i < length; i++) { // *priority = *priority * 10 + ((const char *)data)[i] - '0'; // } // if (length > 9) { // rc = 0; // } // } // } // return rc; //} //static int wait_for_data_or_close(sd_journal *j, int pipefd) //{ // struct pollfd fds[2]; // uint64_t when = 0; // int timeout, jevents, i; // struct timespec ts; // uint64_t now; // do { // memset(&fds, 0, sizeof(fds)); // fds[0].fd = pipefd; // fds[0].events = POLLHUP; // fds[1].fd = sd_journal_get_fd(j); // if (fds[1].fd < 0) { // return -1; // } // jevents = sd_journal_get_events(j); // if (jevents < 0) { // return -1; // } // fds[1].events = jevents; // sd_journal_get_timeout(j, &when); // if (when == -1) { // timeout = -1; // } else { // clock_gettime(CLOCK_MONOTONIC, &ts); // now = (uint64_t) ts.tv_sec * 1000000 + ts.tv_nsec / 1000; // timeout = when > now ? (int) ((when - now + 999) / 1000) : 0; // } // i = poll(fds, 2, timeout); // if ((i == -1) && (errno != EINTR)) { // /* An unexpected error. */ // return -1; // } // if (fds[0].revents & POLLHUP) { // /* The close notification pipe was closed. */ // return 0; // } // if (sd_journal_process(j) == SD_JOURNAL_APPEND) { // /* Data, which we might care about, was appended. */ // return 1; // } // } while ((fds[0].revents & POLLHUP) == 0); // return 0; //} import "C" import ( "fmt" "time" "unsafe" "github.com/coreos/go-systemd/journal" "github.com/docker/docker/daemon/logger" ) func (s *journald) Close() error { s.readers.mu.Lock() for reader := range s.readers.readers { reader.Close() } s.readers.mu.Unlock() return nil } func (s *journald) drainJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, oldCursor string) string { var msg, cursor *C.char var length C.size_t var stamp C.uint64_t var priority C.int // Walk the journal from here forward until we run out of new entries. drain: for { // Try not to send a given entry twice. if oldCursor != "" { ccursor := C.CString(oldCursor) defer C.free(unsafe.Pointer(ccursor)) for C.sd_journal_test_cursor(j, ccursor) > 0 { if C.sd_journal_next(j) <= 0 { break drain } } } // Read and send the logged message, if there is one to read. i := C.get_message(j, &msg, &length) if i != -C.ENOENT && i != -C.EADDRNOTAVAIL { // Read the entry's timestamp. if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { break } // Set up the time and text of the entry. timestamp := time.Unix(int64(stamp)/1000000, (int64(stamp)%1000000)*1000) line := append(C.GoBytes(unsafe.Pointer(msg), C.int(length)), "\n"...) // Recover the stream name by mapping // from the journal priority back to // the stream that we would have // assigned that value. source := "" if C.get_priority(j, &priority) != 0 { source = "" } else if priority == C.int(journal.PriErr) { source = "stderr" } else if priority == C.int(journal.PriInfo) { source = "stdout" } // Send the log message. cid := s.vars["CONTAINER_ID_FULL"] logWatcher.Msg <- &logger.Message{ContainerID: cid, Line: line, Source: source, Timestamp: timestamp} } // If we're at the end of the journal, we're done (for now). if C.sd_journal_next(j) <= 0 { break } } retCursor := "" if C.sd_journal_get_cursor(j, &cursor) == 0 { retCursor = C.GoString(cursor) C.free(unsafe.Pointer(cursor)) } return retCursor } func (s *journald) followJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, pfd [2]C.int, cursor string) { go func() { // Keep copying journal data out until we're notified to stop. for C.wait_for_data_or_close(j, pfd[0]) == 1 { cursor = s.drainJournal(logWatcher, config, j, cursor) } // Clean up. C.close(pfd[0]) s.readers.mu.Lock() delete(s.readers.readers, logWatcher) s.readers.mu.Unlock() }() s.readers.mu.Lock() s.readers.readers[logWatcher] = logWatcher s.readers.mu.Unlock() // Wait until we're told to stop. select { case <-logWatcher.WatchClose(): // Notify the other goroutine that its work is done. C.close(pfd[1]) } } func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { var j *C.sd_journal var cmatch *C.char var stamp C.uint64_t var sinceUnixMicro uint64 var pipes [2]C.int cursor := "" defer close(logWatcher.Msg) // Get a handle to the journal. rc := C.sd_journal_open(&j, C.int(0)) if rc != 0 { logWatcher.Err <- fmt.Errorf("error opening journal") return } defer C.sd_journal_close(j) // Remove limits on the size of data items that we'll retrieve. rc = C.sd_journal_set_data_threshold(j, C.size_t(0)) if rc != 0 { logWatcher.Err <- fmt.Errorf("error setting journal data threshold") return } // Add a match to have the library do the searching for us. cmatch = C.CString("CONTAINER_ID_FULL=" + s.vars["CONTAINER_ID_FULL"]) defer C.free(unsafe.Pointer(cmatch)) rc = C.sd_journal_add_match(j, unsafe.Pointer(cmatch), C.strlen(cmatch)) if rc != 0 { logWatcher.Err <- fmt.Errorf("error setting journal match") return } // If we have a cutoff time, convert it to Unix time once. if !config.Since.IsZero() { nano := config.Since.UnixNano() sinceUnixMicro = uint64(nano / 1000) } if config.Tail > 0 { lines := config.Tail // Start at the end of the journal. if C.sd_journal_seek_tail(j) < 0 { logWatcher.Err <- fmt.Errorf("error seeking to end of journal") return } if C.sd_journal_previous(j) < 0 { logWatcher.Err <- fmt.Errorf("error backtracking to previous journal entry") return } // Walk backward. for lines > 0 { // Stop if the entry time is before our cutoff. // We'll need the entry time if it isn't, so go // ahead and parse it now. if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { break } else { // Compare the timestamp on the entry // to our threshold value. if sinceUnixMicro != 0 && sinceUnixMicro > uint64(stamp) { break } } lines-- // If we're at the start of the journal, or // don't need to back up past any more entries, // stop. if lines == 0 || C.sd_journal_previous(j) <= 0 { break } } } else { // Start at the beginning of the journal. if C.sd_journal_seek_head(j) < 0 { logWatcher.Err <- fmt.Errorf("error seeking to start of journal") return } // If we have a cutoff date, fast-forward to it. if sinceUnixMicro != 0 && C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro)) != 0 { logWatcher.Err <- fmt.Errorf("error seeking to start time in journal") return } if C.sd_journal_next(j) < 0 { logWatcher.Err <- fmt.Errorf("error skipping to next journal entry") return } } cursor = s.drainJournal(logWatcher, config, j, "") if config.Follow { // Create a pipe that we can poll at the same time as the journald descriptor. if C.pipe(&pipes[0]) == C.int(-1) { logWatcher.Err <- fmt.Errorf("error opening journald close notification pipe") } else { s.followJournal(logWatcher, config, j, pipes, cursor) } } return } func (s *journald) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { logWatcher := logger.NewLogWatcher() go s.readLogs(logWatcher, config) return logWatcher } docker-1.10.3/daemon/logger/journald/read_unsupported.go000066400000000000000000000001611267010174400233150ustar00rootroot00000000000000// +build !linux !cgo static_build !journald package journald func (s *journald) Close() error { return nil } docker-1.10.3/daemon/logger/jsonfilelog/000077500000000000000000000000001267010174400201025ustar00rootroot00000000000000docker-1.10.3/daemon/logger/jsonfilelog/jsonfilelog.go000066400000000000000000000066251267010174400227550ustar00rootroot00000000000000// Package jsonfilelog provides the default Logger implementation for // Docker logging. This logger logs to files on the host server in the // JSON format. package jsonfilelog import ( "bytes" "encoding/json" "fmt" "strconv" "sync" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/pkg/jsonlog" "github.com/docker/go-units" ) // Name is the name of the file that the jsonlogger logs to. const Name = "json-file" // JSONFileLogger is Logger implementation for default Docker logging. type JSONFileLogger struct { buf *bytes.Buffer writer *loggerutils.RotateFileWriter mu sync.Mutex ctx logger.Context readers map[*logger.LogWatcher]struct{} // stores the active log followers extra []byte // json-encoded extra attributes } func init() { if err := logger.RegisterLogDriver(Name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(Name, ValidateLogOpt); err != nil { logrus.Fatal(err) } } // New creates new JSONFileLogger which writes to filename passed in // on given context. func New(ctx logger.Context) (logger.Logger, error) { var capval int64 = -1 if capacity, ok := ctx.Config["max-size"]; ok { var err error capval, err = units.FromHumanSize(capacity) if err != nil { return nil, err } } var maxFiles = 1 if maxFileString, ok := ctx.Config["max-file"]; ok { var err error maxFiles, err = strconv.Atoi(maxFileString) if err != nil { return nil, err } if maxFiles < 1 { return nil, fmt.Errorf("max-file cannot be less than 1") } } writer, err := loggerutils.NewRotateFileWriter(ctx.LogPath, capval, maxFiles) if err != nil { return nil, err } var extra []byte if attrs := ctx.ExtraAttributes(nil); len(attrs) > 0 { var err error extra, err = json.Marshal(attrs) if err != nil { return nil, err } } return &JSONFileLogger{ buf: bytes.NewBuffer(nil), writer: writer, readers: make(map[*logger.LogWatcher]struct{}), extra: extra, }, nil } // Log converts logger.Message to jsonlog.JSONLog and serializes it to file. func (l *JSONFileLogger) Log(msg *logger.Message) error { timestamp, err := jsonlog.FastTimeMarshalJSON(msg.Timestamp) if err != nil { return err } l.mu.Lock() defer l.mu.Unlock() err = (&jsonlog.JSONLogs{ Log: append(msg.Line, '\n'), Stream: msg.Source, Created: timestamp, RawAttrs: l.extra, }).MarshalJSONBuf(l.buf) if err != nil { return err } l.buf.WriteByte('\n') _, err = l.writer.Write(l.buf.Bytes()) l.buf.Reset() return err } // ValidateLogOpt looks for json specific log options max-file & max-size. func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case "max-file": case "max-size": case "labels": case "env": default: return fmt.Errorf("unknown log opt '%s' for json-file log driver", key) } } return nil } // LogPath returns the location the given json logger logs to. func (l *JSONFileLogger) LogPath() string { return l.writer.LogPath() } // Close closes underlying file and signals all readers to stop. func (l *JSONFileLogger) Close() error { l.mu.Lock() err := l.writer.Close() for r := range l.readers { r.Close() delete(l.readers, r) } l.mu.Unlock() return err } // Name returns name of this logger. func (l *JSONFileLogger) Name() string { return Name } docker-1.10.3/daemon/logger/jsonfilelog/jsonfilelog_test.go000066400000000000000000000136521267010174400240120ustar00rootroot00000000000000package jsonfilelog import ( "encoding/json" "io/ioutil" "os" "path/filepath" "reflect" "strconv" "testing" "time" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/pkg/jsonlog" ) func TestJSONFileLogger(t *testing.T) { cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" tmp, err := ioutil.TempDir("", "docker-logger-") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) filename := filepath.Join(tmp, "container.log") l, err := New(logger.Context{ ContainerID: cid, LogPath: filename, }) if err != nil { t.Fatal(err) } defer l.Close() if err := l.Log(&logger.Message{ContainerID: cid, Line: []byte("line1"), Source: "src1"}); err != nil { t.Fatal(err) } if err := l.Log(&logger.Message{ContainerID: cid, Line: []byte("line2"), Source: "src2"}); err != nil { t.Fatal(err) } if err := l.Log(&logger.Message{ContainerID: cid, Line: []byte("line3"), Source: "src3"}); err != nil { t.Fatal(err) } res, err := ioutil.ReadFile(filename) if err != nil { t.Fatal(err) } expected := `{"log":"line1\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line2\n","stream":"src2","time":"0001-01-01T00:00:00Z"} {"log":"line3\n","stream":"src3","time":"0001-01-01T00:00:00Z"} ` if string(res) != expected { t.Fatalf("Wrong log content: %q, expected %q", res, expected) } } func BenchmarkJSONFileLogger(b *testing.B) { cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" tmp, err := ioutil.TempDir("", "docker-logger-") if err != nil { b.Fatal(err) } defer os.RemoveAll(tmp) filename := filepath.Join(tmp, "container.log") l, err := New(logger.Context{ ContainerID: cid, LogPath: filename, }) if err != nil { b.Fatal(err) } defer l.Close() testLine := "Line that thinks that it is log line from docker\n" msg := &logger.Message{ContainerID: cid, Line: []byte(testLine), Source: "stderr", Timestamp: time.Now().UTC()} jsonlog, err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSON() if err != nil { b.Fatal(err) } b.SetBytes(int64(len(jsonlog)+1) * 30) b.ResetTimer() for i := 0; i < b.N; i++ { for j := 0; j < 30; j++ { if err := l.Log(msg); err != nil { b.Fatal(err) } } } } func TestJSONFileLoggerWithOpts(t *testing.T) { cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" tmp, err := ioutil.TempDir("", "docker-logger-") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) filename := filepath.Join(tmp, "container.log") config := map[string]string{"max-file": "2", "max-size": "1k"} l, err := New(logger.Context{ ContainerID: cid, LogPath: filename, Config: config, }) if err != nil { t.Fatal(err) } defer l.Close() for i := 0; i < 20; i++ { if err := l.Log(&logger.Message{ContainerID: cid, Line: []byte("line" + strconv.Itoa(i)), Source: "src1"}); err != nil { t.Fatal(err) } } res, err := ioutil.ReadFile(filename) if err != nil { t.Fatal(err) } penUlt, err := ioutil.ReadFile(filename + ".1") if err != nil { t.Fatal(err) } expectedPenultimate := `{"log":"line0\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line1\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line2\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line3\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line4\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line5\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line6\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line7\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line8\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line9\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line10\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line11\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line12\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line13\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line14\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line15\n","stream":"src1","time":"0001-01-01T00:00:00Z"} ` expected := `{"log":"line16\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line17\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line18\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line19\n","stream":"src1","time":"0001-01-01T00:00:00Z"} ` if string(res) != expected { t.Fatalf("Wrong log content: %q, expected %q", res, expected) } if string(penUlt) != expectedPenultimate { t.Fatalf("Wrong log content: %q, expected %q", penUlt, expectedPenultimate) } } func TestJSONFileLoggerWithLabelsEnv(t *testing.T) { cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" tmp, err := ioutil.TempDir("", "docker-logger-") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) filename := filepath.Join(tmp, "container.log") config := map[string]string{"labels": "rack,dc", "env": "environ,debug,ssl"} l, err := New(logger.Context{ ContainerID: cid, LogPath: filename, Config: config, ContainerLabels: map[string]string{"rack": "101", "dc": "lhr"}, ContainerEnv: []string{"environ=production", "debug=false", "port=10001", "ssl=true"}, }) if err != nil { t.Fatal(err) } defer l.Close() if err := l.Log(&logger.Message{ContainerID: cid, Line: []byte("line"), Source: "src1"}); err != nil { t.Fatal(err) } res, err := ioutil.ReadFile(filename) if err != nil { t.Fatal(err) } var jsonLog jsonlog.JSONLogs if err := json.Unmarshal(res, &jsonLog); err != nil { t.Fatal(err) } extra := make(map[string]string) if err := json.Unmarshal(jsonLog.RawAttrs, &extra); err != nil { t.Fatal(err) } expected := map[string]string{ "rack": "101", "dc": "lhr", "environ": "production", "debug": "false", "ssl": "true", } if !reflect.DeepEqual(extra, expected) { t.Fatalf("Wrong log attrs: %q, expected %q", extra, expected) } } docker-1.10.3/daemon/logger/jsonfilelog/read.go000066400000000000000000000116671267010174400213570ustar00rootroot00000000000000package jsonfilelog import ( "bytes" "encoding/json" "fmt" "io" "os" "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/pkg/filenotify" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/jsonlog" "github.com/docker/docker/pkg/tailfile" ) const maxJSONDecodeRetry = 20000 func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { l.Reset() if err := dec.Decode(l); err != nil { return nil, err } msg := &logger.Message{ Source: l.Stream, Timestamp: l.Created, Line: []byte(l.Log), } return msg, nil } // ReadLogs implements the logger's LogReader interface for the logs // created by this driver. func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { logWatcher := logger.NewLogWatcher() go l.readLogs(logWatcher, config) return logWatcher } func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { defer close(logWatcher.Msg) pth := l.writer.LogPath() var files []io.ReadSeeker for i := l.writer.MaxFiles(); i > 1; i-- { f, err := os.Open(fmt.Sprintf("%s.%d", pth, i-1)) if err != nil { if !os.IsNotExist(err) { logWatcher.Err <- err break } continue } defer f.Close() files = append(files, f) } latestFile, err := os.Open(pth) if err != nil { logWatcher.Err <- err return } defer latestFile.Close() files = append(files, latestFile) tailer := ioutils.MultiReadSeeker(files...) if config.Tail != 0 { tailFile(tailer, logWatcher, config.Tail, config.Since) } if !config.Follow { return } if config.Tail >= 0 { latestFile.Seek(0, os.SEEK_END) } l.mu.Lock() l.readers[logWatcher] = struct{}{} l.mu.Unlock() notifyRotate := l.writer.NotifyRotate() followLogs(latestFile, logWatcher, notifyRotate, config.Since) l.mu.Lock() delete(l.readers, logWatcher) l.mu.Unlock() l.writer.NotifyRotateEvict(notifyRotate) } func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) { var rdr io.Reader = f if tail > 0 { ls, err := tailfile.TailFile(f, tail) if err != nil { logWatcher.Err <- err return } rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n"))) } dec := json.NewDecoder(rdr) l := &jsonlog.JSONLog{} for { msg, err := decodeLogLine(dec, l) if err != nil { if err != io.EOF { logWatcher.Err <- err } return } if !since.IsZero() && msg.Timestamp.Before(since) { continue } logWatcher.Msg <- msg } } func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) { dec := json.NewDecoder(f) l := &jsonlog.JSONLog{} fileWatcher, err := filenotify.New() if err != nil { logWatcher.Err <- err } defer fileWatcher.Close() var retries int for { msg, err := decodeLogLine(dec, l) if err != nil { if err != io.EOF { // try again because this shouldn't happen if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry { dec = json.NewDecoder(f) retries++ continue } // io.ErrUnexpectedEOF is returned from json.Decoder when there is // remaining data in the parser's buffer while an io.EOF occurs. // If the json logger writes a partial json log entry to the disk // while at the same time the decoder tries to decode it, the race condition happens. if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry { reader := io.MultiReader(dec.Buffered(), f) dec = json.NewDecoder(reader) retries++ continue } logWatcher.Err <- err return } logrus.WithField("logger", "json-file").Debugf("waiting for events") if err := fileWatcher.Add(f.Name()); err != nil { logrus.WithField("logger", "json-file").Warn("falling back to file poller") fileWatcher.Close() fileWatcher = filenotify.NewPollingWatcher() if err := fileWatcher.Add(f.Name()); err != nil { logrus.Errorf("error watching log file for modifications: %v", err) logWatcher.Err <- err } } select { case <-fileWatcher.Events(): dec = json.NewDecoder(f) fileWatcher.Remove(f.Name()) continue case <-fileWatcher.Errors(): fileWatcher.Remove(f.Name()) logWatcher.Err <- err return case <-logWatcher.WatchClose(): fileWatcher.Remove(f.Name()) return case <-notifyRotate: f, err = os.Open(f.Name()) if err != nil { logWatcher.Err <- err return } dec = json.NewDecoder(f) fileWatcher.Remove(f.Name()) fileWatcher.Add(f.Name()) continue } } retries = 0 // reset retries since we've succeeded if !since.IsZero() && msg.Timestamp.Before(since) { continue } select { case logWatcher.Msg <- msg: case <-logWatcher.WatchClose(): logWatcher.Msg <- msg for { msg, err := decodeLogLine(dec, l) if err != nil { return } if !since.IsZero() && msg.Timestamp.Before(since) { continue } logWatcher.Msg <- msg } } } } docker-1.10.3/daemon/logger/logger.go000066400000000000000000000045031267010174400173770ustar00rootroot00000000000000// Package logger defines interfaces that logger drivers implement to // log messages. // // The other half of a logger driver is the implementation of the // factory, which holds the contextual instance information that // allows multiple loggers of the same type to perform different // actions, such as logging to different locations. package logger import ( "errors" "time" "github.com/docker/docker/pkg/jsonlog" ) // ErrReadLogsNotSupported is returned when the logger does not support reading logs. var ErrReadLogsNotSupported = errors.New("configured logging reader does not support reading") const ( // TimeFormat is the time format used for timestamps sent to log readers. TimeFormat = jsonlog.RFC3339NanoFixed logWatcherBufferSize = 4096 ) // Message is datastructure that represents record from some container. type Message struct { ContainerID string Line []byte Source string Timestamp time.Time } // Logger is the interface for docker logging drivers. type Logger interface { Log(*Message) error Name() string Close() error } // ReadConfig is the configuration passed into ReadLogs. type ReadConfig struct { Since time.Time Tail int Follow bool } // LogReader is the interface for reading log messages for loggers that support reading. type LogReader interface { // Read logs from underlying logging backend ReadLogs(ReadConfig) *LogWatcher } // LogWatcher is used when consuming logs read from the LogReader interface. type LogWatcher struct { // For sending log messages to a reader. Msg chan *Message // For sending error messages that occur while while reading logs. Err chan error closeNotifier chan struct{} } // NewLogWatcher returns a new LogWatcher. func NewLogWatcher() *LogWatcher { return &LogWatcher{ Msg: make(chan *Message, logWatcherBufferSize), Err: make(chan error, 1), closeNotifier: make(chan struct{}), } } // Close notifies the underlying log reader to stop. func (w *LogWatcher) Close() { // only close if not already closed select { case <-w.closeNotifier: default: close(w.closeNotifier) } } // WatchClose returns a channel receiver that receives notification // when the watcher has been closed. This should only be called from // one goroutine. func (w *LogWatcher) WatchClose() <-chan struct{} { return w.closeNotifier } docker-1.10.3/daemon/logger/loggerutils/000077500000000000000000000000001267010174400201275ustar00rootroot00000000000000docker-1.10.3/daemon/logger/loggerutils/log_tag.go000066400000000000000000000022511267010174400220720ustar00rootroot00000000000000package loggerutils import ( "bytes" "fmt" "text/template" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/logger" ) // ParseLogTag generates a context aware tag for consistency across different // log drivers based on the context of the running container. func ParseLogTag(ctx logger.Context, defaultTemplate string) (string, error) { tagTemplate := lookupTagTemplate(ctx, defaultTemplate) tmpl, err := template.New("log-tag").Parse(tagTemplate) if err != nil { return "", err } buf := new(bytes.Buffer) if err := tmpl.Execute(buf, &ctx); err != nil { return "", err } return buf.String(), nil } func lookupTagTemplate(ctx logger.Context, defaultTemplate string) string { tagTemplate := ctx.Config["tag"] deprecatedConfigs := []string{"syslog-tag", "gelf-tag", "fluentd-tag"} for i := 0; tagTemplate == "" && i < len(deprecatedConfigs); i++ { cfg := deprecatedConfigs[i] if ctx.Config[cfg] != "" { tagTemplate = ctx.Config[cfg] logrus.Warn(fmt.Sprintf("Using log tag from deprecated log-opt '%s'. Please use: --log-opt tag=\"%s\"", cfg, tagTemplate)) } } if tagTemplate == "" { tagTemplate = defaultTemplate } return tagTemplate } docker-1.10.3/daemon/logger/loggerutils/log_tag_test.go000066400000000000000000000033371267010174400231370ustar00rootroot00000000000000package loggerutils import ( "testing" "github.com/docker/docker/daemon/logger" ) func TestParseLogTagDefaultTag(t *testing.T) { ctx := buildContext(map[string]string{}) tag, e := ParseLogTag(ctx, "{{.ID}}") assertTag(t, e, tag, ctx.ID()) } func TestParseLogTag(t *testing.T) { ctx := buildContext(map[string]string{"tag": "{{.ImageName}}/{{.Name}}/{{.ID}}"}) tag, e := ParseLogTag(ctx, "{{.ID}}") assertTag(t, e, tag, "test-image/test-container/container-ab") } func TestParseLogTagSyslogTag(t *testing.T) { ctx := buildContext(map[string]string{"syslog-tag": "{{.ImageName}}/{{.Name}}/{{.ID}}"}) tag, e := ParseLogTag(ctx, "{{.ID}}") assertTag(t, e, tag, "test-image/test-container/container-ab") } func TestParseLogTagGelfTag(t *testing.T) { ctx := buildContext(map[string]string{"gelf-tag": "{{.ImageName}}/{{.Name}}/{{.ID}}"}) tag, e := ParseLogTag(ctx, "{{.ID}}") assertTag(t, e, tag, "test-image/test-container/container-ab") } func TestParseLogTagFluentdTag(t *testing.T) { ctx := buildContext(map[string]string{"fluentd-tag": "{{.ImageName}}/{{.Name}}/{{.ID}}"}) tag, e := ParseLogTag(ctx, "{{.ID}}") assertTag(t, e, tag, "test-image/test-container/container-ab") } // Helpers func buildContext(cfg map[string]string) logger.Context { return logger.Context{ ContainerID: "container-abcdefghijklmnopqrstuvwxyz01234567890", ContainerName: "/test-container", ContainerImageID: "image-abcdefghijklmnopqrstuvwxyz01234567890", ContainerImageName: "test-image", Config: cfg, } } func assertTag(t *testing.T, e error, tag string, expected string) { if e != nil { t.Fatalf("Error generating tag: %q", e) } if tag != expected { t.Fatalf("Wrong tag: %q, should be %q", tag, expected) } } docker-1.10.3/daemon/logger/loggerutils/rotatefilewriter.go000066400000000000000000000056561267010174400240650ustar00rootroot00000000000000package loggerutils import ( "os" "strconv" "sync" "github.com/docker/docker/pkg/pubsub" ) // RotateFileWriter is Logger implementation for default Docker logging. type RotateFileWriter struct { f *os.File // store for closing mu sync.Mutex capacity int64 //maximum size of each file maxFiles int //maximum number of files notifyRotate *pubsub.Publisher } //NewRotateFileWriter creates new RotateFileWriter func NewRotateFileWriter(logPath string, capacity int64, maxFiles int) (*RotateFileWriter, error) { log, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640) if err != nil { return &RotateFileWriter{}, err } return &RotateFileWriter{ f: log, capacity: capacity, maxFiles: maxFiles, notifyRotate: pubsub.NewPublisher(0, 1), }, nil } //WriteLog write log message to File func (w *RotateFileWriter) Write(message []byte) (int, error) { w.mu.Lock() defer w.mu.Unlock() if err := w.checkCapacityAndRotate(); err != nil { return -1, err } return w.f.Write(message) } func (w *RotateFileWriter) checkCapacityAndRotate() error { if w.capacity == -1 { return nil } meta, err := w.f.Stat() if err != nil { return err } if meta.Size() >= w.capacity { name := w.f.Name() if err := w.f.Close(); err != nil { return err } if err := rotate(name, w.maxFiles); err != nil { return err } file, err := os.OpenFile(name, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 06400) if err != nil { return err } w.f = file w.notifyRotate.Publish(struct{}{}) } return nil } func rotate(name string, maxFiles int) error { if maxFiles < 2 { return nil } for i := maxFiles - 1; i > 1; i-- { toPath := name + "." + strconv.Itoa(i) fromPath := name + "." + strconv.Itoa(i-1) if err := backup(fromPath, toPath); err != nil && !os.IsNotExist(err) { return err } } if err := backup(name, name+".1"); err != nil { return err } return nil } // backup renames a file from fromPath to toPath func backup(fromPath, toPath string) error { if _, err := os.Stat(fromPath); os.IsNotExist(err) { return err } if _, err := os.Stat(toPath); !os.IsNotExist(err) { err := os.Remove(toPath) if err != nil { return err } } return os.Rename(fromPath, toPath) } // LogPath returns the location the given writer logs to. func (w *RotateFileWriter) LogPath() string { return w.f.Name() } // MaxFiles return maximum number of files func (w *RotateFileWriter) MaxFiles() int { return w.maxFiles } //NotifyRotate returns the new subscriber func (w *RotateFileWriter) NotifyRotate() chan interface{} { return w.notifyRotate.Subscribe() } //NotifyRotateEvict removes the specified subscriber from receiving any more messages. func (w *RotateFileWriter) NotifyRotateEvict(sub chan interface{}) { w.notifyRotate.Evict(sub) } // Close closes underlying file and signals all readers to stop. func (w *RotateFileWriter) Close() error { return w.f.Close() } docker-1.10.3/daemon/logger/splunk/000077500000000000000000000000001267010174400171035ustar00rootroot00000000000000docker-1.10.3/daemon/logger/splunk/splunk.go000066400000000000000000000152471267010174400207570ustar00rootroot00000000000000// Package splunk provides the log driver for forwarding server logs to // Splunk HTTP Event Collector endpoint. package splunk import ( "bytes" "crypto/tls" "crypto/x509" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/url" "strconv" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/pkg/urlutil" ) const ( driverName = "splunk" splunkURLKey = "splunk-url" splunkTokenKey = "splunk-token" splunkSourceKey = "splunk-source" splunkSourceTypeKey = "splunk-sourcetype" splunkIndexKey = "splunk-index" splunkCAPathKey = "splunk-capath" splunkCANameKey = "splunk-caname" splunkInsecureSkipVerifyKey = "splunk-insecureskipverify" envKey = "env" labelsKey = "labels" tagKey = "tag" ) type splunkLogger struct { client *http.Client transport *http.Transport url string auth string nullMessage *splunkMessage } type splunkMessage struct { Event splunkMessageEvent `json:"event"` Time string `json:"time"` Host string `json:"host"` Source string `json:"source,omitempty"` SourceType string `json:"sourcetype,omitempty"` Index string `json:"index,omitempty"` } type splunkMessageEvent struct { Line string `json:"line"` Source string `json:"source"` Tag string `json:"tag,omitempty"` Attrs map[string]string `json:"attrs,omitempty"` } func init() { if err := logger.RegisterLogDriver(driverName, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(driverName, ValidateLogOpt); err != nil { logrus.Fatal(err) } } // New creates splunk logger driver using configuration passed in context func New(ctx logger.Context) (logger.Logger, error) { hostname, err := ctx.Hostname() if err != nil { return nil, fmt.Errorf("%s: cannot access hostname to set source field", driverName) } // Parse and validate Splunk URL splunkURL, err := parseURL(ctx) if err != nil { return nil, err } // Splunk Token is required parameter splunkToken, ok := ctx.Config[splunkTokenKey] if !ok { return nil, fmt.Errorf("%s: %s is expected", driverName, splunkTokenKey) } tlsConfig := &tls.Config{} // Splunk is using autogenerated certificates by default, // allow users to trust them with skipping verification if insecureSkipVerifyStr, ok := ctx.Config[splunkInsecureSkipVerifyKey]; ok { insecureSkipVerify, err := strconv.ParseBool(insecureSkipVerifyStr) if err != nil { return nil, err } tlsConfig.InsecureSkipVerify = insecureSkipVerify } // If path to the root certificate is provided - load it if caPath, ok := ctx.Config[splunkCAPathKey]; ok { caCert, err := ioutil.ReadFile(caPath) if err != nil { return nil, err } caPool := x509.NewCertPool() caPool.AppendCertsFromPEM(caCert) tlsConfig.RootCAs = caPool } if caName, ok := ctx.Config[splunkCANameKey]; ok { tlsConfig.ServerName = caName } transport := &http.Transport{ TLSClientConfig: tlsConfig, } client := &http.Client{ Transport: transport, } var nullMessage = &splunkMessage{ Host: hostname, } // Optional parameters for messages nullMessage.Source = ctx.Config[splunkSourceKey] nullMessage.SourceType = ctx.Config[splunkSourceTypeKey] nullMessage.Index = ctx.Config[splunkIndexKey] tag, err := loggerutils.ParseLogTag(ctx, "{{.ID}}") if err != nil { return nil, err } nullMessage.Event.Tag = tag nullMessage.Event.Attrs = ctx.ExtraAttributes(nil) logger := &splunkLogger{ client: client, transport: transport, url: splunkURL.String(), auth: "Splunk " + splunkToken, nullMessage: nullMessage, } err = verifySplunkConnection(logger) if err != nil { return nil, err } return logger, nil } func (l *splunkLogger) Log(msg *logger.Message) error { // Construct message as a copy of nullMessage message := *l.nullMessage message.Time = fmt.Sprintf("%f", float64(msg.Timestamp.UnixNano())/1000000000) message.Event.Line = string(msg.Line) message.Event.Source = msg.Source jsonEvent, err := json.Marshal(&message) if err != nil { return err } req, err := http.NewRequest("POST", l.url, bytes.NewBuffer(jsonEvent)) if err != nil { return err } req.Header.Set("Authorization", l.auth) res, err := l.client.Do(req) if err != nil { return err } if res.Body != nil { defer res.Body.Close() } if res.StatusCode != http.StatusOK { var body []byte body, err = ioutil.ReadAll(res.Body) if err != nil { return err } return fmt.Errorf("%s: failed to send event - %s - %s", driverName, res.Status, body) } io.Copy(ioutil.Discard, res.Body) return nil } func (l *splunkLogger) Close() error { l.transport.CloseIdleConnections() return nil } func (l *splunkLogger) Name() string { return driverName } // ValidateLogOpt looks for all supported by splunk driver options func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case splunkURLKey: case splunkTokenKey: case splunkSourceKey: case splunkSourceTypeKey: case splunkIndexKey: case splunkCAPathKey: case splunkCANameKey: case splunkInsecureSkipVerifyKey: case envKey: case labelsKey: case tagKey: default: return fmt.Errorf("unknown log opt '%s' for %s log driver", key, driverName) } } return nil } func parseURL(ctx logger.Context) (*url.URL, error) { splunkURLStr, ok := ctx.Config[splunkURLKey] if !ok { return nil, fmt.Errorf("%s: %s is expected", driverName, splunkURLKey) } splunkURL, err := url.Parse(splunkURLStr) if err != nil { return nil, fmt.Errorf("%s: failed to parse %s as url value in %s", driverName, splunkURLStr, splunkURLKey) } if !urlutil.IsURL(splunkURLStr) || !splunkURL.IsAbs() || (splunkURL.Path != "" && splunkURL.Path != "/") || splunkURL.RawQuery != "" || splunkURL.Fragment != "" { return nil, fmt.Errorf("%s: expected format schema://dns_name_or_ip:port for %s", driverName, splunkURLKey) } splunkURL.Path = "/services/collector/event/1.0" return splunkURL, nil } func verifySplunkConnection(l *splunkLogger) error { req, err := http.NewRequest("OPTIONS", l.url, nil) if err != nil { return err } res, err := l.client.Do(req) if err != nil { return err } if res.Body != nil { defer res.Body.Close() } if res.StatusCode != http.StatusOK { var body []byte body, err = ioutil.ReadAll(res.Body) if err != nil { return err } return fmt.Errorf("%s: failed to verify connection - %s - %s", driverName, res.Status, body) } return nil } docker-1.10.3/daemon/logger/syslog/000077500000000000000000000000001267010174400171075ustar00rootroot00000000000000docker-1.10.3/daemon/logger/syslog/syslog.go000066400000000000000000000114011267010174400207530ustar00rootroot00000000000000// +build linux // Package syslog provides the logdriver for forwarding server logs to syslog endpoints. package syslog import ( "crypto/tls" "errors" "fmt" "net" "net/url" "os" "path" "strconv" "strings" syslog "github.com/RackSec/srslog" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/pkg/urlutil" "github.com/docker/go-connections/tlsconfig" ) const ( name = "syslog" secureProto = "tcp+tls" ) var facilities = map[string]syslog.Priority{ "kern": syslog.LOG_KERN, "user": syslog.LOG_USER, "mail": syslog.LOG_MAIL, "daemon": syslog.LOG_DAEMON, "auth": syslog.LOG_AUTH, "syslog": syslog.LOG_SYSLOG, "lpr": syslog.LOG_LPR, "news": syslog.LOG_NEWS, "uucp": syslog.LOG_UUCP, "cron": syslog.LOG_CRON, "authpriv": syslog.LOG_AUTHPRIV, "ftp": syslog.LOG_FTP, "local0": syslog.LOG_LOCAL0, "local1": syslog.LOG_LOCAL1, "local2": syslog.LOG_LOCAL2, "local3": syslog.LOG_LOCAL3, "local4": syslog.LOG_LOCAL4, "local5": syslog.LOG_LOCAL5, "local6": syslog.LOG_LOCAL6, "local7": syslog.LOG_LOCAL7, } type syslogger struct { writer *syslog.Writer } func init() { if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { logrus.Fatal(err) } } // New creates a syslog logger using the configuration passed in on // the context. Supported context configuration variables are // syslog-address, syslog-facility, & syslog-tag. func New(ctx logger.Context) (logger.Logger, error) { tag, err := loggerutils.ParseLogTag(ctx, "{{.ID}}") if err != nil { return nil, err } proto, address, err := parseAddress(ctx.Config["syslog-address"]) if err != nil { return nil, err } facility, err := parseFacility(ctx.Config["syslog-facility"]) if err != nil { return nil, err } logTag := path.Base(os.Args[0]) + "/" + tag var log *syslog.Writer if proto == secureProto { tlsConfig, tlsErr := parseTLSConfig(ctx.Config) if tlsErr != nil { return nil, tlsErr } log, err = syslog.DialWithTLSConfig(proto, address, facility, logTag, tlsConfig) } else { log, err = syslog.Dial(proto, address, facility, logTag) } if err != nil { return nil, err } return &syslogger{ writer: log, }, nil } func (s *syslogger) Log(msg *logger.Message) error { if msg.Source == "stderr" { return s.writer.Err(string(msg.Line)) } return s.writer.Info(string(msg.Line)) } func (s *syslogger) Close() error { return s.writer.Close() } func (s *syslogger) Name() string { return name } func parseAddress(address string) (string, string, error) { if address == "" { return "", "", nil } if !urlutil.IsTransportURL(address) { return "", "", fmt.Errorf("syslog-address should be in form proto://address, got %v", address) } url, err := url.Parse(address) if err != nil { return "", "", err } // unix socket validation if url.Scheme == "unix" { if _, err := os.Stat(url.Path); err != nil { return "", "", err } return url.Scheme, url.Path, nil } // here we process tcp|udp host := url.Host if _, _, err := net.SplitHostPort(host); err != nil { if !strings.Contains(err.Error(), "missing port in address") { return "", "", err } host = host + ":514" } return url.Scheme, host, nil } // ValidateLogOpt looks for syslog specific log options // syslog-address, syslog-facility, & syslog-tag. func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case "syslog-address": case "syslog-facility": case "syslog-tag": case "syslog-tls-ca-cert": case "syslog-tls-cert": case "syslog-tls-key": case "syslog-tls-skip-verify": case "tag": default: return fmt.Errorf("unknown log opt '%s' for syslog log driver", key) } } if _, _, err := parseAddress(cfg["syslog-address"]); err != nil { return err } if _, err := parseFacility(cfg["syslog-facility"]); err != nil { return err } return nil } func parseFacility(facility string) (syslog.Priority, error) { if facility == "" { return syslog.LOG_DAEMON, nil } if syslogFacility, valid := facilities[facility]; valid { return syslogFacility, nil } fInt, err := strconv.Atoi(facility) if err == nil && 0 <= fInt && fInt <= 23 { return syslog.Priority(fInt << 3), nil } return syslog.Priority(0), errors.New("invalid syslog facility") } func parseTLSConfig(cfg map[string]string) (*tls.Config, error) { _, skipVerify := cfg["syslog-tls-skip-verify"] opts := tlsconfig.Options{ CAFile: cfg["syslog-tls-ca-cert"], CertFile: cfg["syslog-tls-cert"], KeyFile: cfg["syslog-tls-key"], InsecureSkipVerify: skipVerify, } return tlsconfig.Client(opts) } docker-1.10.3/daemon/logger/syslog/syslog_unsupported.go000066400000000000000000000000411267010174400234210ustar00rootroot00000000000000// +build !linux package syslog docker-1.10.3/daemon/logs.go000066400000000000000000000073501267010174400156100ustar00rootroot00000000000000package daemon import ( "io" "strconv" "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/container" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/jsonfilelog" derr "github.com/docker/docker/errors" "github.com/docker/docker/pkg/stdcopy" ) // ContainerLogsConfig holds configs for logging operations. Exists // for users of the daemon to to pass it a logging configuration. type ContainerLogsConfig struct { // if true stream log output Follow bool // if true include timestamps for each line of log output Timestamps bool // return that many lines of log output from the end Tail string // filter logs by returning on those entries after this time Since time.Time // whether or not to show stdout and stderr as well as log entries. UseStdout, UseStderr bool OutStream io.Writer Stop <-chan bool } // ContainerLogs hooks up a container's stdout and stderr streams // configured with the given struct. func (daemon *Daemon) ContainerLogs(containerName string, config *ContainerLogsConfig) error { container, err := daemon.GetContainer(containerName) if err != nil { return derr.ErrorCodeNoSuchContainer.WithArgs(containerName) } if !(config.UseStdout || config.UseStderr) { return derr.ErrorCodeNeedStream } outStream := config.OutStream errStream := outStream if !container.Config.Tty { errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } config.OutStream = outStream cLog, err := daemon.getLogger(container) if err != nil { return err } logReader, ok := cLog.(logger.LogReader) if !ok { return logger.ErrReadLogsNotSupported } follow := config.Follow && container.IsRunning() tailLines, err := strconv.Atoi(config.Tail) if err != nil { tailLines = -1 } logrus.Debug("logs: begin stream") readConfig := logger.ReadConfig{ Since: config.Since, Tail: tailLines, Follow: follow, } logs := logReader.ReadLogs(readConfig) for { select { case err := <-logs.Err: logrus.Errorf("Error streaming logs: %v", err) return nil case <-config.Stop: logs.Close() return nil case msg, ok := <-logs.Msg: if !ok { logrus.Debugf("logs: end stream") return nil } logLine := msg.Line if config.Timestamps { logLine = append([]byte(msg.Timestamp.Format(logger.TimeFormat)+" "), logLine...) } if msg.Source == "stdout" && config.UseStdout { outStream.Write(logLine) } if msg.Source == "stderr" && config.UseStderr { errStream.Write(logLine) } } } } func (daemon *Daemon) getLogger(container *container.Container) (logger.Logger, error) { if container.LogDriver != nil && container.IsRunning() { return container.LogDriver, nil } cfg := container.GetLogConfig(daemon.defaultLogConfig) if err := logger.ValidateLogOpts(cfg.Type, cfg.Config); err != nil { return nil, err } return container.StartLogger(cfg) } // StartLogging initializes and starts the container logging stream. func (daemon *Daemon) StartLogging(container *container.Container) error { cfg := container.GetLogConfig(daemon.defaultLogConfig) if cfg.Type == "none" { return nil // do not start logging routines } if err := logger.ValidateLogOpts(cfg.Type, cfg.Config); err != nil { return err } l, err := container.StartLogger(cfg) if err != nil { return derr.ErrorCodeInitLogger.WithArgs(err) } copier := logger.NewCopier(container.ID, map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l) container.LogCopier = copier copier.Run() container.LogDriver = l // set LogPath field only for json-file logdriver if jl, ok := l.(*jsonfilelog.JSONFileLogger); ok { container.LogPath = jl.LogPath() } return nil } docker-1.10.3/daemon/mounts.go000066400000000000000000000025111267010174400161630ustar00rootroot00000000000000package daemon import ( "strings" "github.com/docker/docker/container" derr "github.com/docker/docker/errors" volumestore "github.com/docker/docker/volume/store" ) func (daemon *Daemon) prepareMountPoints(container *container.Container) error { for _, config := range container.MountPoints { if err := daemon.lazyInitializeVolume(container.ID, config); err != nil { return err } } return nil } func (daemon *Daemon) removeMountPoints(container *container.Container, rm bool) error { var rmErrors []string for _, m := range container.MountPoints { if m.Volume == nil { continue } daemon.volumes.Dereference(m.Volume, container.ID) if rm { // Do not remove named mountpoints // these are mountpoints specified like `docker run -v :/foo` if m.Named { continue } err := daemon.volumes.Remove(m.Volume) // Ignore volume in use errors because having this // volume being referenced by other container is // not an error, but an implementation detail. // This prevents docker from logging "ERROR: Volume in use" // where there is another container using the volume. if err != nil && !volumestore.IsInUse(err) { rmErrors = append(rmErrors, err.Error()) } } } if len(rmErrors) > 0 { return derr.ErrorCodeRemovingVolume.WithArgs(strings.Join(rmErrors, "\n")) } return nil } docker-1.10.3/daemon/network.go000066400000000000000000000134741267010174400163410ustar00rootroot00000000000000package daemon import ( "errors" "fmt" "net" "strings" derr "github.com/docker/docker/errors" "github.com/docker/docker/runconfig" "github.com/docker/engine-api/types/network" "github.com/docker/libnetwork" ) const ( // NetworkByID represents a constant to find a network by its ID NetworkByID = iota + 1 // NetworkByName represents a constant to find a network by its Name NetworkByName ) // NetworkControllerEnabled checks if the networking stack is enabled. // This feature depends on OS primitives and it's disabled in systems like Windows. func (daemon *Daemon) NetworkControllerEnabled() bool { return daemon.netController != nil } // FindNetwork function finds a network for a given string that can represent network name or id func (daemon *Daemon) FindNetwork(idName string) (libnetwork.Network, error) { // Find by Name n, err := daemon.GetNetwork(idName, NetworkByName) if _, ok := err.(libnetwork.ErrNoSuchNetwork); err != nil && !ok { return nil, err } if n != nil { return n, nil } // Find by id n, err = daemon.GetNetwork(idName, NetworkByID) if err != nil { return nil, err } return n, nil } // GetNetwork function returns a network for a given string that represents the network and // a hint to indicate if the string is an Id or Name of the network func (daemon *Daemon) GetNetwork(idName string, by int) (libnetwork.Network, error) { c := daemon.netController switch by { case NetworkByID: list := daemon.GetNetworksByID(idName) if len(list) == 0 { return nil, libnetwork.ErrNoSuchNetwork(idName) } if len(list) > 1 { return nil, libnetwork.ErrInvalidID(idName) } return list[0], nil case NetworkByName: if idName == "" { idName = c.Config().Daemon.DefaultNetwork } return c.NetworkByName(idName) } return nil, errors.New("unexpected selector for GetNetwork") } // GetNetworksByID returns a list of networks whose ID partially matches zero or more networks func (daemon *Daemon) GetNetworksByID(partialID string) []libnetwork.Network { c := daemon.netController list := []libnetwork.Network{} l := func(nw libnetwork.Network) bool { if strings.HasPrefix(nw.ID(), partialID) { list = append(list, nw) } return false } c.WalkNetworks(l) return list } // GetAllNetworks returns a list containing all networks func (daemon *Daemon) GetAllNetworks() []libnetwork.Network { c := daemon.netController list := []libnetwork.Network{} l := func(nw libnetwork.Network) bool { list = append(list, nw) return false } c.WalkNetworks(l) return list } // CreateNetwork creates a network with the given name, driver and other optional parameters func (daemon *Daemon) CreateNetwork(name, driver string, ipam network.IPAM, options map[string]string, internal bool) (libnetwork.Network, error) { c := daemon.netController if driver == "" { driver = c.Config().Daemon.DefaultDriver } nwOptions := []libnetwork.NetworkOption{} v4Conf, v6Conf, err := getIpamConfig(ipam.Config) if err != nil { return nil, err } nwOptions = append(nwOptions, libnetwork.NetworkOptionIpam(ipam.Driver, "", v4Conf, v6Conf, ipam.Options)) nwOptions = append(nwOptions, libnetwork.NetworkOptionDriverOpts(options)) if internal { nwOptions = append(nwOptions, libnetwork.NetworkOptionInternalNetwork()) } n, err := c.NewNetwork(driver, name, nwOptions...) if err != nil { return nil, err } daemon.LogNetworkEvent(n, "create") return n, nil } func getIpamConfig(data []network.IPAMConfig) ([]*libnetwork.IpamConf, []*libnetwork.IpamConf, error) { ipamV4Cfg := []*libnetwork.IpamConf{} ipamV6Cfg := []*libnetwork.IpamConf{} for _, d := range data { iCfg := libnetwork.IpamConf{} iCfg.PreferredPool = d.Subnet iCfg.SubPool = d.IPRange iCfg.Gateway = d.Gateway iCfg.AuxAddresses = d.AuxAddress ip, _, err := net.ParseCIDR(d.Subnet) if err != nil { return nil, nil, fmt.Errorf("Invalid subnet %s : %v", d.Subnet, err) } if ip.To4() != nil { ipamV4Cfg = append(ipamV4Cfg, &iCfg) } else { ipamV6Cfg = append(ipamV6Cfg, &iCfg) } } return ipamV4Cfg, ipamV6Cfg, nil } // ConnectContainerToNetwork connects the given container to the given // network. If either cannot be found, an err is returned. If the // network cannot be set up, an err is returned. func (daemon *Daemon) ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error { container, err := daemon.GetContainer(containerName) if err != nil { return err } return daemon.ConnectToNetwork(container, networkName, endpointConfig) } // DisconnectContainerFromNetwork disconnects the given container from // the given network. If either cannot be found, an err is returned. func (daemon *Daemon) DisconnectContainerFromNetwork(containerName string, network libnetwork.Network, force bool) error { container, err := daemon.GetContainer(containerName) if err != nil { if force { return daemon.ForceEndpointDelete(containerName, network) } return err } return daemon.DisconnectFromNetwork(container, network, force) } // GetNetworkDriverList returns the list of plugins drivers // registered for network. func (daemon *Daemon) GetNetworkDriverList() map[string]bool { pluginList := make(map[string]bool) if !daemon.NetworkControllerEnabled() { return nil } c := daemon.netController networks := c.Networks() for _, network := range networks { driver := network.Type() pluginList[driver] = true } return pluginList } // DeleteNetwork destroys a network unless it's one of docker's predefined networks. func (daemon *Daemon) DeleteNetwork(networkID string) error { nw, err := daemon.FindNetwork(networkID) if err != nil { return err } if runconfig.IsPreDefinedNetwork(nw.Name()) { return derr.ErrorCodeCantDeletePredefinedNetwork.WithArgs(nw.Name()) } if err := nw.Delete(); err != nil { return err } daemon.LogNetworkEvent(nw, "destroy") return nil } docker-1.10.3/daemon/network/000077500000000000000000000000001267010174400160015ustar00rootroot00000000000000docker-1.10.3/daemon/network/settings.go000066400000000000000000000012641267010174400201730ustar00rootroot00000000000000package network import ( networktypes "github.com/docker/engine-api/types/network" "github.com/docker/go-connections/nat" ) // Settings stores configuration details about the daemon network config // TODO Windows. Many of these fields can be factored out., type Settings struct { Bridge string SandboxID string HairpinMode bool LinkLocalIPv6Address string LinkLocalIPv6PrefixLen int Networks map[string]*networktypes.EndpointSettings Ports nat.PortMap SandboxKey string SecondaryIPAddresses []networktypes.Address SecondaryIPv6Addresses []networktypes.Address IsAnonymousEndpoint bool } docker-1.10.3/daemon/pause.go000066400000000000000000000021501267010174400157520ustar00rootroot00000000000000package daemon import ( "github.com/docker/docker/container" derr "github.com/docker/docker/errors" ) // ContainerPause pauses a container func (daemon *Daemon) ContainerPause(name string) error { container, err := daemon.GetContainer(name) if err != nil { return err } if err := daemon.containerPause(container); err != nil { return derr.ErrorCodePauseError.WithArgs(name, err) } return nil } // containerPause pauses the container execution without stopping the process. // The execution can be resumed by calling containerUnpause. func (daemon *Daemon) containerPause(container *container.Container) error { container.Lock() defer container.Unlock() // We cannot Pause the container which is not running if !container.Running { return derr.ErrorCodeNotRunning.WithArgs(container.ID) } // We cannot Pause the container which is already paused if container.Paused { return derr.ErrorCodeAlreadyPaused.WithArgs(container.ID) } if err := daemon.execDriver.Pause(container.Command); err != nil { return err } container.Paused = true daemon.LogContainerEvent(container, "pause") return nil } docker-1.10.3/daemon/rename.go000066400000000000000000000031141267010174400161050ustar00rootroot00000000000000package daemon import ( "strings" "github.com/Sirupsen/logrus" derr "github.com/docker/docker/errors" "github.com/docker/libnetwork" ) // ContainerRename changes the name of a container, using the oldName // to find the container. An error is returned if newName is already // reserved. func (daemon *Daemon) ContainerRename(oldName, newName string) error { var ( sid string sb libnetwork.Sandbox ) if oldName == "" || newName == "" { return derr.ErrorCodeEmptyRename } container, err := daemon.GetContainer(oldName) if err != nil { return err } oldName = container.Name container.Lock() defer container.Unlock() if newName, err = daemon.reserveName(container.ID, newName); err != nil { return derr.ErrorCodeRenameTaken.WithArgs(err) } container.Name = newName defer func() { if err != nil { container.Name = oldName daemon.reserveName(container.ID, oldName) daemon.releaseName(newName) } }() daemon.releaseName(oldName) if err = container.ToDisk(); err != nil { return err } if !container.Running { daemon.LogContainerEvent(container, "rename") return nil } defer func() { if err != nil { container.Name = oldName if e := container.ToDisk(); e != nil { logrus.Errorf("%s: Failed in writing to Disk on rename failure: %v", container.ID, e) } } }() sid = container.NetworkSettings.SandboxID sb, err = daemon.netController.SandboxByID(sid) if err != nil { return err } err = sb.Rename(strings.TrimPrefix(container.Name, "/")) if err != nil { return err } daemon.LogContainerEvent(container, "rename") return nil } docker-1.10.3/daemon/resize.go000066400000000000000000000016321267010174400161420ustar00rootroot00000000000000package daemon import derr "github.com/docker/docker/errors" // ContainerResize changes the size of the TTY of the process running // in the container with the given name to the given height and width. func (daemon *Daemon) ContainerResize(name string, height, width int) error { container, err := daemon.GetContainer(name) if err != nil { return err } if !container.IsRunning() { return derr.ErrorCodeNotRunning.WithArgs(container.ID) } if err = container.Resize(height, width); err == nil { daemon.LogContainerEvent(container, "resize") } return err } // ContainerExecResize changes the size of the TTY of the process // running in the exec with the given name to the given height and // width. func (daemon *Daemon) ContainerExecResize(name string, height, width int) error { ExecConfig, err := daemon.getExecConfig(name) if err != nil { return err } return ExecConfig.Resize(height, width) } docker-1.10.3/daemon/restart.go000066400000000000000000000030641267010174400163260ustar00rootroot00000000000000package daemon import ( "github.com/docker/docker/container" derr "github.com/docker/docker/errors" ) // ContainerRestart stops and starts a container. It attempts to // gracefully stop the container within the given timeout, forcefully // stopping it if the timeout is exceeded. If given a negative // timeout, ContainerRestart will wait forever until a graceful // stop. Returns an error if the container cannot be found, or if // there is an underlying error at any stage of the restart. func (daemon *Daemon) ContainerRestart(name string, seconds int) error { container, err := daemon.GetContainer(name) if err != nil { return err } if err := daemon.containerRestart(container, seconds); err != nil { return derr.ErrorCodeCantRestart.WithArgs(name, err) } return nil } // containerRestart attempts to gracefully stop and then start the // container. When stopping, wait for the given duration in seconds to // gracefully stop, before forcefully terminating the container. If // given a negative duration, wait forever for a graceful stop. func (daemon *Daemon) containerRestart(container *container.Container, seconds int) error { // Avoid unnecessarily unmounting and then directly mounting // the container when the container stops and then starts // again if err := daemon.Mount(container); err == nil { defer daemon.Unmount(container) } if err := daemon.containerStop(container, seconds); err != nil { return err } if err := daemon.containerStart(container); err != nil { return err } daemon.LogContainerEvent(container, "restart") return nil } docker-1.10.3/daemon/selinux_linux.go000066400000000000000000000004441267010174400175470ustar00rootroot00000000000000// +build linux package daemon import "github.com/opencontainers/runc/libcontainer/selinux" func selinuxSetDisabled() { selinux.SetDisabled() } func selinuxFreeLxcContexts(label string) { selinux.FreeLxcContexts(label) } func selinuxEnabled() bool { return selinux.SelinuxEnabled() } docker-1.10.3/daemon/selinux_unsupported.go000066400000000000000000000002351267010174400207760ustar00rootroot00000000000000// +build !linux package daemon func selinuxSetDisabled() { } func selinuxFreeLxcContexts(label string) { } func selinuxEnabled() bool { return false } docker-1.10.3/daemon/start.go000066400000000000000000000122501267010174400157740ustar00rootroot00000000000000package daemon import ( "runtime" "github.com/Sirupsen/logrus" "github.com/docker/docker/container" derr "github.com/docker/docker/errors" "github.com/docker/docker/runconfig" containertypes "github.com/docker/engine-api/types/container" ) // ContainerStart starts a container. func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.HostConfig) error { container, err := daemon.GetContainer(name) if err != nil { return err } if container.IsPaused() { return derr.ErrorCodeStartPaused } if container.IsRunning() { return derr.ErrorCodeAlreadyStarted } // Windows does not have the backwards compatibility issue here. if runtime.GOOS != "windows" { // This is kept for backward compatibility - hostconfig should be passed when // creating a container, not during start. if hostConfig != nil { logrus.Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and will be removed in Docker 1.12") oldNetworkMode := container.HostConfig.NetworkMode if err := daemon.setSecurityOptions(container, hostConfig); err != nil { return err } if err := daemon.setHostConfig(container, hostConfig); err != nil { return err } newNetworkMode := container.HostConfig.NetworkMode if string(oldNetworkMode) != string(newNetworkMode) { // if user has change the network mode on starting, clean up the // old networks. It is a deprecated feature and will be removed in Docker 1.12 container.NetworkSettings.Networks = nil if err := container.ToDisk(); err != nil { return err } } container.InitDNSHostConfig() } } else { if hostConfig != nil { return derr.ErrorCodeHostConfigStart } } // check if hostConfig is in line with the current system settings. // It may happen cgroups are umounted or the like. if _, err = daemon.verifyContainerSettings(container.HostConfig, nil); err != nil { return err } // Adapt for old containers in case we have updates in this function and // old containers never have chance to call the new function in create stage. if err := daemon.adaptContainerSettings(container.HostConfig, false); err != nil { return err } return daemon.containerStart(container) } // Start starts a container func (daemon *Daemon) Start(container *container.Container) error { return daemon.containerStart(container) } // containerStart prepares the container to run by setting up everything the // container needs, such as storage and networking, as well as links // between containers. The container is left waiting for a signal to // begin running. func (daemon *Daemon) containerStart(container *container.Container) (err error) { container.Lock() defer container.Unlock() if container.Running { return nil } if container.RemovalInProgress || container.Dead { return derr.ErrorCodeContainerBeingRemoved } // if we encounter an error during start we need to ensure that any other // setup has been cleaned up properly defer func() { if err != nil { container.SetError(err) // if no one else has set it, make sure we don't leave it at zero if container.ExitCode == 0 { container.ExitCode = 128 } container.ToDisk() daemon.Cleanup(container) daemon.LogContainerEvent(container, "die") } }() if err := daemon.conditionalMountOnStart(container); err != nil { return err } // Make sure NetworkMode has an acceptable value. We do this to ensure // backwards API compatibility. container.HostConfig = runconfig.SetDefaultNetModeIfBlank(container.HostConfig) if err := daemon.initializeNetworking(container); err != nil { return err } linkedEnv, err := daemon.setupLinkedContainers(container) if err != nil { return err } if err := container.SetupWorkingDirectory(); err != nil { return err } env := container.CreateDaemonEnvironment(linkedEnv) if err := daemon.populateCommand(container, env); err != nil { return err } if !container.HostConfig.IpcMode.IsContainer() && !container.HostConfig.IpcMode.IsHost() { if err := daemon.setupIpcDirs(container); err != nil { return err } } mounts, err := daemon.setupMounts(container) if err != nil { return err } mounts = append(mounts, container.IpcMounts()...) mounts = append(mounts, container.TmpfsMounts()...) container.Command.Mounts = mounts if err := daemon.waitForStart(container); err != nil { return err } container.HasBeenStartedBefore = true return nil } func (daemon *Daemon) waitForStart(container *container.Container) error { return container.StartMonitor(daemon, container.HostConfig.RestartPolicy) } // Cleanup releases any network resources allocated to the container along with any rules // around how containers are linked together. It also unmounts the container's root filesystem. func (daemon *Daemon) Cleanup(container *container.Container) { daemon.releaseNetwork(container) container.UnmountIpcMounts(detachMounted) daemon.conditionalUnmountOnCleanup(container) for _, eConfig := range container.ExecCommands.Commands() { daemon.unregisterExecCommand(container, eConfig) } if err := container.UnmountVolumes(false, daemon.LogVolumeEvent); err != nil { logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err) } } docker-1.10.3/daemon/stats.go000066400000000000000000000060341267010174400160000ustar00rootroot00000000000000package daemon import ( "encoding/json" "io" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/pkg/version" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/versions/v1p20" ) // ContainerStatsConfig holds information for configuring the runtime // behavior of a daemon.ContainerStats() call. type ContainerStatsConfig struct { Stream bool OutStream io.Writer Stop <-chan bool Version version.Version } // ContainerStats writes information about the container to the stream // given in the config object. func (daemon *Daemon) ContainerStats(prefixOrName string, config *ContainerStatsConfig) error { container, err := daemon.GetContainer(prefixOrName) if err != nil { return err } // If the container is not running and requires no stream, return an empty stats. if !container.IsRunning() && !config.Stream { return json.NewEncoder(config.OutStream).Encode(&types.Stats{}) } if config.Stream { // Write an empty chunk of data. // This is to ensure that the HTTP status code is sent immediately, // even if the container has not yet produced any data. config.OutStream.Write(nil) } var preCPUStats types.CPUStats getStatJSON := func(v interface{}) *types.StatsJSON { update := v.(*execdriver.ResourceStats) ss := convertStatsToAPITypes(update.Stats) ss.PreCPUStats = preCPUStats ss.MemoryStats.Limit = uint64(update.MemoryLimit) ss.Read = update.Read ss.CPUStats.SystemUsage = update.SystemUsage preCPUStats = ss.CPUStats return ss } enc := json.NewEncoder(config.OutStream) updates := daemon.subscribeToContainerStats(container) defer daemon.unsubscribeToContainerStats(container, updates) noStreamFirstFrame := true for { select { case v, ok := <-updates: if !ok { return nil } var statsJSON interface{} statsJSONPost120 := getStatJSON(v) if config.Version.LessThan("1.21") { var ( rxBytes uint64 rxPackets uint64 rxErrors uint64 rxDropped uint64 txBytes uint64 txPackets uint64 txErrors uint64 txDropped uint64 ) for _, v := range statsJSONPost120.Networks { rxBytes += v.RxBytes rxPackets += v.RxPackets rxErrors += v.RxErrors rxDropped += v.RxDropped txBytes += v.TxBytes txPackets += v.TxPackets txErrors += v.TxErrors txDropped += v.TxDropped } statsJSON = &v1p20.StatsJSON{ Stats: statsJSONPost120.Stats, Network: types.NetworkStats{ RxBytes: rxBytes, RxPackets: rxPackets, RxErrors: rxErrors, RxDropped: rxDropped, TxBytes: txBytes, TxPackets: txPackets, TxErrors: txErrors, TxDropped: txDropped, }, } } else { statsJSON = statsJSONPost120 } if !config.Stream && noStreamFirstFrame { // prime the cpu stats so they aren't 0 in the final output noStreamFirstFrame = false continue } if err := enc.Encode(statsJSON); err != nil { return err } if !config.Stream { return nil } case <-config.Stop: return nil } } } docker-1.10.3/daemon/stats_collector_unix.go000066400000000000000000000115121267010174400211060ustar00rootroot00000000000000// +build !windows package daemon import ( "bufio" "os" "strconv" "strings" "sync" "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/container" "github.com/docker/docker/daemon/execdriver" derr "github.com/docker/docker/errors" "github.com/docker/docker/pkg/pubsub" "github.com/opencontainers/runc/libcontainer/system" ) type statsSupervisor interface { // GetContainerStats collects all the stats related to a container GetContainerStats(container *container.Container) (*execdriver.ResourceStats, error) } // newStatsCollector returns a new statsCollector that collections // network and cgroup stats for a registered container at the specified // interval. The collector allows non-running containers to be added // and will start processing stats when they are started. func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector { s := &statsCollector{ interval: interval, supervisor: daemon, publishers: make(map[*container.Container]*pubsub.Publisher), clockTicksPerSecond: uint64(system.GetClockTicks()), bufReader: bufio.NewReaderSize(nil, 128), } go s.run() return s } // statsCollector manages and provides container resource stats type statsCollector struct { m sync.Mutex supervisor statsSupervisor interval time.Duration clockTicksPerSecond uint64 publishers map[*container.Container]*pubsub.Publisher bufReader *bufio.Reader } // collect registers the container with the collector and adds it to // the event loop for collection on the specified interval returning // a channel for the subscriber to receive on. func (s *statsCollector) collect(c *container.Container) chan interface{} { s.m.Lock() defer s.m.Unlock() publisher, exists := s.publishers[c] if !exists { publisher = pubsub.NewPublisher(100*time.Millisecond, 1024) s.publishers[c] = publisher } return publisher.Subscribe() } // stopCollection closes the channels for all subscribers and removes // the container from metrics collection. func (s *statsCollector) stopCollection(c *container.Container) { s.m.Lock() if publisher, exists := s.publishers[c]; exists { publisher.Close() delete(s.publishers, c) } s.m.Unlock() } // unsubscribe removes a specific subscriber from receiving updates for a container's stats. func (s *statsCollector) unsubscribe(c *container.Container, ch chan interface{}) { s.m.Lock() publisher := s.publishers[c] if publisher != nil { publisher.Evict(ch) if publisher.Len() == 0 { delete(s.publishers, c) } } s.m.Unlock() } func (s *statsCollector) run() { type publishersPair struct { container *container.Container publisher *pubsub.Publisher } // we cannot determine the capacity here. // it will grow enough in first iteration var pairs []publishersPair for range time.Tick(s.interval) { // it does not make sense in the first iteration, // but saves allocations in further iterations pairs = pairs[:0] s.m.Lock() for container, publisher := range s.publishers { // copy pointers here to release the lock ASAP pairs = append(pairs, publishersPair{container, publisher}) } s.m.Unlock() if len(pairs) == 0 { continue } systemUsage, err := s.getSystemCPUUsage() if err != nil { logrus.Errorf("collecting system cpu usage: %v", err) continue } for _, pair := range pairs { stats, err := s.supervisor.GetContainerStats(pair.container) if err != nil { if err != execdriver.ErrNotRunning { logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err) } continue } stats.SystemUsage = systemUsage pair.publisher.Publish(stats) } } } const nanoSecondsPerSecond = 1e9 // getSystemCPUUsage returns the host system's cpu usage in // nanoseconds. An error is returned if the format of the underlying // file does not match. // // Uses /proc/stat defined by POSIX. Looks for the cpu // statistics line and then sums up the first seven fields // provided. See `man 5 proc` for details on specific field // information. func (s *statsCollector) getSystemCPUUsage() (uint64, error) { var line string f, err := os.Open("/proc/stat") if err != nil { return 0, err } defer func() { s.bufReader.Reset(nil) f.Close() }() s.bufReader.Reset(f) err = nil for err == nil { line, err = s.bufReader.ReadString('\n') if err != nil { break } parts := strings.Fields(line) switch parts[0] { case "cpu": if len(parts) < 8 { return 0, derr.ErrorCodeBadCPUFields } var totalClockTicks uint64 for _, i := range parts[1:8] { v, err := strconv.ParseUint(i, 10, 64) if err != nil { return 0, derr.ErrorCodeBadCPUInt.WithArgs(i, err) } totalClockTicks += v } return (totalClockTicks * nanoSecondsPerSecond) / s.clockTicksPerSecond, nil } } return 0, derr.ErrorCodeBadStatFormat } docker-1.10.3/daemon/stats_collector_windows.go000066400000000000000000000022131267010174400216130ustar00rootroot00000000000000package daemon import ( "time" "github.com/docker/docker/container" ) // newStatsCollector returns a new statsCollector for collection stats // for a registered container at the specified interval. The collector allows // non-running containers to be added and will start processing stats when // they are started. func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector { return &statsCollector{} } // statsCollector manages and provides container resource stats type statsCollector struct { } // collect registers the container with the collector and adds it to // the event loop for collection on the specified interval returning // a channel for the subscriber to receive on. func (s *statsCollector) collect(c *container.Container) chan interface{} { return nil } // stopCollection closes the channels for all subscribers and removes // the container from metrics collection. func (s *statsCollector) stopCollection(c *container.Container) { } // unsubscribe removes a specific subscriber from receiving updates for a container's stats. func (s *statsCollector) unsubscribe(c *container.Container, ch chan interface{}) { } docker-1.10.3/daemon/stats_freebsd.go000066400000000000000000000006531267010174400174730ustar00rootroot00000000000000package daemon import ( "github.com/docker/engine-api/types" "github.com/opencontainers/runc/libcontainer" ) // convertStatsToAPITypes converts the libcontainer.Stats to the api specific // structs. This is done to preserve API compatibility and versioning. func convertStatsToAPITypes(ls *libcontainer.Stats) *types.StatsJSON { // TODO FreeBSD. Refactor accordingly to fill in stats. s := &types.StatsJSON{} return s } docker-1.10.3/daemon/stats_linux.go000066400000000000000000000051071267010174400172170ustar00rootroot00000000000000package daemon import ( "github.com/docker/engine-api/types" "github.com/opencontainers/runc/libcontainer" "github.com/opencontainers/runc/libcontainer/cgroups" ) // convertStatsToAPITypes converts the libcontainer.Stats to the api specific // structs. This is done to preserve API compatibility and versioning. func convertStatsToAPITypes(ls *libcontainer.Stats) *types.StatsJSON { s := &types.StatsJSON{} if ls.Interfaces != nil { s.Networks = make(map[string]types.NetworkStats) for _, iface := range ls.Interfaces { // For API Version >= 1.21, the original data of network will // be returned. s.Networks[iface.Name] = types.NetworkStats{ RxBytes: iface.RxBytes, RxPackets: iface.RxPackets, RxErrors: iface.RxErrors, RxDropped: iface.RxDropped, TxBytes: iface.TxBytes, TxPackets: iface.TxPackets, TxErrors: iface.TxErrors, TxDropped: iface.TxDropped, } } } cs := ls.CgroupStats if cs != nil { s.BlkioStats = types.BlkioStats{ IoServiceBytesRecursive: copyBlkioEntry(cs.BlkioStats.IoServiceBytesRecursive), IoServicedRecursive: copyBlkioEntry(cs.BlkioStats.IoServicedRecursive), IoQueuedRecursive: copyBlkioEntry(cs.BlkioStats.IoQueuedRecursive), IoServiceTimeRecursive: copyBlkioEntry(cs.BlkioStats.IoServiceTimeRecursive), IoWaitTimeRecursive: copyBlkioEntry(cs.BlkioStats.IoWaitTimeRecursive), IoMergedRecursive: copyBlkioEntry(cs.BlkioStats.IoMergedRecursive), IoTimeRecursive: copyBlkioEntry(cs.BlkioStats.IoTimeRecursive), SectorsRecursive: copyBlkioEntry(cs.BlkioStats.SectorsRecursive), } cpu := cs.CpuStats s.CPUStats = types.CPUStats{ CPUUsage: types.CPUUsage{ TotalUsage: cpu.CpuUsage.TotalUsage, PercpuUsage: cpu.CpuUsage.PercpuUsage, UsageInKernelmode: cpu.CpuUsage.UsageInKernelmode, UsageInUsermode: cpu.CpuUsage.UsageInUsermode, }, ThrottlingData: types.ThrottlingData{ Periods: cpu.ThrottlingData.Periods, ThrottledPeriods: cpu.ThrottlingData.ThrottledPeriods, ThrottledTime: cpu.ThrottlingData.ThrottledTime, }, } mem := cs.MemoryStats s.MemoryStats = types.MemoryStats{ Usage: mem.Usage.Usage, MaxUsage: mem.Usage.MaxUsage, Stats: mem.Stats, Failcnt: mem.Usage.Failcnt, } } return s } func copyBlkioEntry(entries []cgroups.BlkioStatEntry) []types.BlkioStatEntry { out := make([]types.BlkioStatEntry, len(entries)) for i, re := range entries { out[i] = types.BlkioStatEntry{ Major: re.Major, Minor: re.Minor, Op: re.Op, Value: re.Value, } } return out } docker-1.10.3/daemon/stats_windows.go000066400000000000000000000006531267010174400175530ustar00rootroot00000000000000package daemon import ( "github.com/docker/engine-api/types" "github.com/opencontainers/runc/libcontainer" ) // convertStatsToAPITypes converts the libcontainer.Stats to the api specific // structs. This is done to preserve API compatibility and versioning. func convertStatsToAPITypes(ls *libcontainer.Stats) *types.StatsJSON { // TODO Windows. Refactor accordingly to fill in stats. s := &types.StatsJSON{} return s } docker-1.10.3/daemon/stop.go000066400000000000000000000041551267010174400156310ustar00rootroot00000000000000package daemon import ( "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/container" derr "github.com/docker/docker/errors" ) // ContainerStop looks for the given container and terminates it, // waiting the given number of seconds before forcefully killing the // container. If a negative number of seconds is given, ContainerStop // will wait for a graceful termination. An error is returned if the // container is not found, is already stopped, or if there is a // problem stopping the container. func (daemon *Daemon) ContainerStop(name string, seconds int) error { container, err := daemon.GetContainer(name) if err != nil { return err } if !container.IsRunning() { return derr.ErrorCodeStopped } if err := daemon.containerStop(container, seconds); err != nil { return derr.ErrorCodeCantStop.WithArgs(name, err) } return nil } // containerStop halts a container by sending a stop signal, waiting for the given // duration in seconds, and then calling SIGKILL and waiting for the // process to exit. If a negative duration is given, Stop will wait // for the initial signal forever. If the container is not running Stop returns // immediately. func (daemon *Daemon) containerStop(container *container.Container, seconds int) error { if !container.IsRunning() { return nil } // 1. Send a SIGTERM if err := daemon.killPossiblyDeadProcess(container, container.StopSignal()); err != nil { logrus.Infof("Failed to send SIGTERM to the process, force killing") if err := daemon.killPossiblyDeadProcess(container, 9); err != nil { return err } } // 2. Wait for the process to exit on its own if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil { logrus.Infof("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds) // 3. If it doesn't, then send SIGKILL if err := daemon.Kill(container); err != nil { container.WaitStop(-1 * time.Second) logrus.Warn(err) // Don't return error because we only care that container is stopped, not what function stopped it } } daemon.LogContainerEvent(container, "stop") return nil } docker-1.10.3/daemon/top_unix.go000066400000000000000000000037761267010174400165210ustar00rootroot00000000000000//+build !windows package daemon import ( "os/exec" "strconv" "strings" derr "github.com/docker/docker/errors" "github.com/docker/engine-api/types" ) // ContainerTop lists the processes running inside of the given // container by calling ps with the given args, or with the flags // "-ef" if no args are given. An error is returned if the container // is not found, or is not running, or if there are any problems // running ps, or parsing the output. func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) { if psArgs == "" { psArgs = "-ef" } container, err := daemon.GetContainer(name) if err != nil { return nil, err } if !container.IsRunning() { return nil, derr.ErrorCodeNotRunning.WithArgs(name) } pids, err := daemon.ExecutionDriver().GetPidsForContainer(container.ID) if err != nil { return nil, err } output, err := exec.Command("ps", strings.Split(psArgs, " ")...).Output() if err != nil { return nil, derr.ErrorCodePSError.WithArgs(err) } procList := &types.ContainerProcessList{} lines := strings.Split(string(output), "\n") procList.Titles = strings.Fields(lines[0]) pidIndex := -1 for i, name := range procList.Titles { if name == "PID" { pidIndex = i } } if pidIndex == -1 { return nil, derr.ErrorCodeNoPID } // loop through the output and extract the PID from each line for _, line := range lines[1:] { if len(line) == 0 { continue } fields := strings.Fields(line) p, err := strconv.Atoi(fields[pidIndex]) if err != nil { return nil, derr.ErrorCodeBadPID.WithArgs(fields[pidIndex], err) } for _, pid := range pids { if pid == p { // Make sure number of fields equals number of header titles // merging "overhanging" fields process := fields[:len(procList.Titles)-1] process = append(process, strings.Join(fields[len(procList.Titles)-1:], " ")) procList.Processes = append(procList.Processes, process) } } } daemon.LogContainerEvent(container, "top") return procList, nil } docker-1.10.3/daemon/top_windows.go000066400000000000000000000004651267010174400172200ustar00rootroot00000000000000package daemon import ( derr "github.com/docker/docker/errors" "github.com/docker/engine-api/types" ) // ContainerTop is not supported on Windows and returns an error. func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) { return nil, derr.ErrorCodeNoTop } docker-1.10.3/daemon/unpause.go000066400000000000000000000020751267010174400163230ustar00rootroot00000000000000package daemon import ( "github.com/docker/docker/container" derr "github.com/docker/docker/errors" ) // ContainerUnpause unpauses a container func (daemon *Daemon) ContainerUnpause(name string) error { container, err := daemon.GetContainer(name) if err != nil { return err } if err := daemon.containerUnpause(container); err != nil { return derr.ErrorCodeCantUnpause.WithArgs(name, err) } return nil } // containerUnpause resumes the container execution after the container is paused. func (daemon *Daemon) containerUnpause(container *container.Container) error { container.Lock() defer container.Unlock() // We cannot unpause the container which is not running if !container.Running { return derr.ErrorCodeNotRunning.WithArgs(container.ID) } // We cannot unpause the container which is not paused if !container.Paused { return derr.ErrorCodeNotPaused.WithArgs(container.ID) } if err := daemon.execDriver.Unpause(container.Command); err != nil { return err } container.Paused = false daemon.LogContainerEvent(container, "unpause") return nil } docker-1.10.3/daemon/update.go000066400000000000000000000027141267010174400161250ustar00rootroot00000000000000package daemon import ( "fmt" "github.com/docker/engine-api/types/container" ) // ContainerUpdate updates resources of the container func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig) ([]string, error) { var warnings []string warnings, err := daemon.verifyContainerSettings(hostConfig, nil) if err != nil { return warnings, err } if err := daemon.update(name, hostConfig); err != nil { return warnings, err } return warnings, nil } func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error { if hostConfig == nil { return nil } container, err := daemon.GetContainer(name) if err != nil { return err } if container.RemovalInProgress || container.Dead { return fmt.Errorf("Container is marked for removal and cannot be \"update\".") } if container.IsRunning() && hostConfig.KernelMemory != 0 { return fmt.Errorf("Can not update kernel memory to a running container, please stop it first.") } if err := container.UpdateContainer(hostConfig); err != nil { return err } // If container is not running, update hostConfig struct is enough, // resources will be updated when the container is started again. // If container is running (including paused), we need to update configs // to the real world. if container.IsRunning() { if err := daemon.execDriver.Update(container.Command); err != nil { return err } } daemon.LogContainerEvent(container, "update") return nil } docker-1.10.3/daemon/volumes.go000066400000000000000000000114341267010174400163340ustar00rootroot00000000000000package daemon import ( "errors" "os" "path/filepath" "strings" "github.com/docker/docker/container" "github.com/docker/docker/daemon/execdriver" derr "github.com/docker/docker/errors" "github.com/docker/docker/volume" "github.com/docker/engine-api/types" containertypes "github.com/docker/engine-api/types/container" "github.com/opencontainers/runc/libcontainer/label" ) var ( // ErrVolumeReadonly is used to signal an error when trying to copy data into // a volume mount that is not writable. ErrVolumeReadonly = errors.New("mounted volume is marked read-only") ) type mounts []execdriver.Mount // volumeToAPIType converts a volume.Volume to the type used by the remote API func volumeToAPIType(v volume.Volume) *types.Volume { return &types.Volume{ Name: v.Name(), Driver: v.DriverName(), Mountpoint: v.Path(), } } // Len returns the number of mounts. Used in sorting. func (m mounts) Len() int { return len(m) } // Less returns true if the number of parts (a/b/c would be 3 parts) in the // mount indexed by parameter 1 is less than that of the mount indexed by // parameter 2. Used in sorting. func (m mounts) Less(i, j int) bool { return m.parts(i) < m.parts(j) } // Swap swaps two items in an array of mounts. Used in sorting func (m mounts) Swap(i, j int) { m[i], m[j] = m[j], m[i] } // parts returns the number of parts in the destination of a mount. Used in sorting. func (m mounts) parts(i int) int { return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator)) } // registerMountPoints initializes the container mount points with the configured volumes and bind mounts. // It follows the next sequence to decide what to mount in each final destination: // // 1. Select the previously configured mount points for the containers, if any. // 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination. // 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations. // 4. Cleanup old volumes that are about to be reassigned. func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *containertypes.HostConfig) error { binds := map[string]bool{} mountPoints := map[string]*volume.MountPoint{} // 1. Read already configured mount points. for name, point := range container.MountPoints { mountPoints[name] = point } // 2. Read volumes from other containers. for _, v := range hostConfig.VolumesFrom { containerID, mode, err := volume.ParseVolumesFrom(v) if err != nil { return err } c, err := daemon.GetContainer(containerID) if err != nil { return err } for _, m := range c.MountPoints { cp := &volume.MountPoint{ Name: m.Name, Source: m.Source, RW: m.RW && volume.ReadWrite(mode), Driver: m.Driver, Destination: m.Destination, Propagation: m.Propagation, Named: m.Named, } if len(cp.Source) == 0 { v, err := daemon.volumes.GetWithRef(cp.Name, cp.Driver, container.ID) if err != nil { return err } cp.Volume = v } mountPoints[cp.Destination] = cp } } // 3. Read bind mounts for _, b := range hostConfig.Binds { // #10618 bind, err := volume.ParseMountSpec(b, hostConfig.VolumeDriver) if err != nil { return err } if binds[bind.Destination] { return derr.ErrorCodeMountDup.WithArgs(bind.Destination) } if len(bind.Name) > 0 && len(bind.Driver) > 0 { // create the volume v, err := daemon.volumes.CreateWithRef(bind.Name, bind.Driver, container.ID, nil) if err != nil { return err } bind.Volume = v bind.Source = v.Path() // bind.Name is an already existing volume, we need to use that here bind.Driver = v.DriverName() bind.Named = true if bind.Driver == "local" { bind = setBindModeIfNull(bind) } } if label.RelabelNeeded(bind.Mode) { if err := label.Relabel(bind.Source, container.MountLabel, label.IsShared(bind.Mode)); err != nil { return err } } binds[bind.Destination] = true mountPoints[bind.Destination] = bind } container.Lock() // 4. Cleanup old volumes that are about to be reassigned. for _, m := range mountPoints { if m.BackwardsCompatible() { if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil { daemon.volumes.Dereference(mp.Volume, container.ID) } } } container.MountPoints = mountPoints container.Unlock() return nil } // lazyInitializeVolume initializes a mountpoint's volume if needed. // This happens after a daemon restart. func (daemon *Daemon) lazyInitializeVolume(containerID string, m *volume.MountPoint) error { if len(m.Driver) > 0 && m.Volume == nil { v, err := daemon.volumes.GetWithRef(m.Name, m.Driver, containerID) if err != nil { return err } m.Volume = v } return nil } docker-1.10.3/daemon/volumes_unit_test.go000066400000000000000000000014271267010174400204330ustar00rootroot00000000000000package daemon import ( "testing" "github.com/docker/docker/volume" ) func TestParseVolumesFrom(t *testing.T) { cases := []struct { spec string expID string expMode string fail bool }{ {"", "", "", true}, {"foobar", "foobar", "rw", false}, {"foobar:rw", "foobar", "rw", false}, {"foobar:ro", "foobar", "ro", false}, {"foobar:baz", "", "", true}, } for _, c := range cases { id, mode, err := volume.ParseVolumesFrom(c.spec) if c.fail { if err == nil { t.Fatalf("Expected error, was nil, for spec %s\n", c.spec) } continue } if id != c.expID { t.Fatalf("Expected id %s, was %s, for spec %s\n", c.expID, id, c.spec) } if mode != c.expMode { t.Fatalf("Expected mode %s, was %s for spec %s\n", c.expMode, mode, c.spec) } } } docker-1.10.3/daemon/volumes_unix.go000066400000000000000000000074101267010174400173760ustar00rootroot00000000000000// +build !windows package daemon import ( "os" "sort" "strconv" "github.com/docker/docker/container" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/volume" volumedrivers "github.com/docker/docker/volume/drivers" "github.com/docker/docker/volume/local" ) // setupMounts iterates through each of the mount points for a container and // calls Setup() on each. It also looks to see if is a network mount such as // /etc/resolv.conf, and if it is not, appends it to the array of mounts. func (daemon *Daemon) setupMounts(container *container.Container) ([]execdriver.Mount, error) { var mounts []execdriver.Mount for _, m := range container.MountPoints { if err := daemon.lazyInitializeVolume(container.ID, m); err != nil { return nil, err } path, err := m.Setup() if err != nil { return nil, err } if !container.TrySetNetworkMount(m.Destination, path) { mnt := execdriver.Mount{ Source: path, Destination: m.Destination, Writable: m.RW, Propagation: m.Propagation, } if m.Volume != nil { attributes := map[string]string{ "driver": m.Volume.DriverName(), "container": container.ID, "destination": m.Destination, "read/write": strconv.FormatBool(m.RW), "propagation": m.Propagation, } daemon.LogVolumeEvent(m.Volume.Name(), "mount", attributes) } mounts = append(mounts, mnt) } } mounts = sortMounts(mounts) netMounts := container.NetworkMounts() // if we are going to mount any of the network files from container // metadata, the ownership must be set properly for potential container // remapped root (user namespaces) rootUID, rootGID := daemon.GetRemappedUIDGID() for _, mount := range netMounts { if err := os.Chown(mount.Source, rootUID, rootGID); err != nil { return nil, err } } return append(mounts, netMounts...), nil } // sortMounts sorts an array of mounts in lexicographic order. This ensure that // when mounting, the mounts don't shadow other mounts. For example, if mounting // /etc and /etc/resolv.conf, /etc/resolv.conf must not be mounted first. func sortMounts(m []execdriver.Mount) []execdriver.Mount { sort.Sort(mounts(m)) return m } // migrateVolume links the contents of a volume created pre Docker 1.7 // into the location expected by the local driver. // It creates a symlink from DOCKER_ROOT/vfs/dir/VOLUME_ID to DOCKER_ROOT/volumes/VOLUME_ID/_container_data. // It preserves the volume json configuration generated pre Docker 1.7 to be able to // downgrade from Docker 1.7 to Docker 1.6 without losing volume compatibility. func migrateVolume(id, vfs string) error { l, err := volumedrivers.Lookup(volume.DefaultDriverName) if err != nil { return err } newDataPath := l.(*local.Root).DataPath(id) fi, err := os.Stat(newDataPath) if err != nil && !os.IsNotExist(err) { return err } if fi != nil && fi.IsDir() { return nil } return os.Symlink(vfs, newDataPath) } // validVolumeLayout checks whether the volume directory layout // is valid to work with Docker post 1.7 or not. func validVolumeLayout(files []os.FileInfo) bool { if len(files) == 1 && files[0].Name() == local.VolumeDataPathName && files[0].IsDir() { return true } if len(files) != 2 { return false } for _, f := range files { if f.Name() == "config.json" || (f.Name() == local.VolumeDataPathName && f.Mode()&os.ModeSymlink == os.ModeSymlink) { // Old volume configuration, we ignore it continue } return false } return true } // setBindModeIfNull is platform specific processing to ensure the // shared mode is set to 'z' if it is null. This is called in the case // of processing a named volume and not a typical bind. func setBindModeIfNull(bind *volume.MountPoint) *volume.MountPoint { if bind.Mode == "" { bind.Mode = "z" } return bind } docker-1.10.3/daemon/volumes_windows.go000066400000000000000000000026321267010174400201060ustar00rootroot00000000000000// +build windows package daemon import ( "sort" "github.com/docker/docker/container" "github.com/docker/docker/daemon/execdriver" derr "github.com/docker/docker/errors" "github.com/docker/docker/volume" ) // setupMounts configures the mount points for a container by appending each // of the configured mounts on the container to the execdriver mount structure // which will ultimately be passed into the exec driver during container creation. // It also ensures each of the mounts are lexographically sorted. func (daemon *Daemon) setupMounts(container *container.Container) ([]execdriver.Mount, error) { var mnts []execdriver.Mount for _, mount := range container.MountPoints { // type is volume.MountPoint if err := daemon.lazyInitializeVolume(container.ID, mount); err != nil { return nil, err } // If there is no source, take it from the volume path s := mount.Source if s == "" && mount.Volume != nil { s = mount.Volume.Path() } if s == "" { return nil, derr.ErrorCodeVolumeNoSourceForMount.WithArgs(mount.Name, mount.Driver, mount.Destination) } mnts = append(mnts, execdriver.Mount{ Source: s, Destination: mount.Destination, Writable: mount.RW, }) } sort.Sort(mounts(mnts)) return mnts, nil } // setBindModeIfNull is platform specific processing which is a no-op on // Windows. func setBindModeIfNull(bind *volume.MountPoint) *volume.MountPoint { return bind } docker-1.10.3/daemon/wait.go000066400000000000000000000010471267010174400156050ustar00rootroot00000000000000package daemon import "time" // ContainerWait stops processing until the given container is // stopped. If the container is not found, an error is returned. On a // successful stop, the exit code of the container is returned. On a // timeout, an error is returned. If you want to wait forever, supply // a negative duration for the timeout. func (daemon *Daemon) ContainerWait(name string, timeout time.Duration) (int, error) { container, err := daemon.GetContainer(name) if err != nil { return -1, err } return container.WaitStop(timeout) } docker-1.10.3/distribution/000077500000000000000000000000001267010174400155645ustar00rootroot00000000000000docker-1.10.3/distribution/fixtures/000077500000000000000000000000001267010174400174355ustar00rootroot00000000000000docker-1.10.3/distribution/fixtures/validate_manifest/000077500000000000000000000000001267010174400231145ustar00rootroot00000000000000docker-1.10.3/distribution/fixtures/validate_manifest/bad_manifest000066400000000000000000000074051267010174400254610ustar00rootroot00000000000000{ "schemaVersion": 2, "name": "library/hello-world", "tag": "latest", "architecture": "amd64", "fsLayers": [ { "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" }, { "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" } ], "history": [ { "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" }, { "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" } ], "signatures": [ { "header": { "jwk": { "crv": "P-256", "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", "kty": "EC", "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" }, "alg": "ES256" }, "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" } ] } docker-1.10.3/distribution/fixtures/validate_manifest/extra_data_manifest000066400000000000000000000077711267010174400270550ustar00rootroot00000000000000{ "schemaVersion": 1, "name": "library/hello-world", "tag": "latest", "architecture": "amd64", "fsLayers": [ { "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" }, { "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" } ], "history": [ { "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" }, { "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" } ], "fsLayers": [ { "blobSum": "sha256:ffff95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" }, { "blobSum": "sha256:ffff658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" } ], "signatures": [ { "header": { "jwk": { "crv": "P-256", "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", "kty": "EC", "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" }, "alg": "ES256" }, "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" } ] } docker-1.10.3/distribution/fixtures/validate_manifest/good_manifest000066400000000000000000000074041267010174400256620ustar00rootroot00000000000000{ "schemaVersion": 1, "name": "library/hello-world", "tag": "latest", "architecture": "amd64", "fsLayers": [ { "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" }, { "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" } ], "history": [ { "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" }, { "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" } ], "signatures": [ { "header": { "jwk": { "crv": "P-256", "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", "kty": "EC", "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" }, "alg": "ES256" }, "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" } ] }docker-1.10.3/distribution/metadata/000077500000000000000000000000001267010174400173445ustar00rootroot00000000000000docker-1.10.3/distribution/metadata/metadata.go000066400000000000000000000043721267010174400214610ustar00rootroot00000000000000package metadata import ( "io/ioutil" "os" "path/filepath" "sync" ) // Store implements a K/V store for mapping distribution-related IDs // to on-disk layer IDs and image IDs. The namespace identifies the type of // mapping (i.e. "v1ids" or "artifacts"). MetadataStore is goroutine-safe. type Store interface { // Get retrieves data by namespace and key. Get(namespace string, key string) ([]byte, error) // Set writes data indexed by namespace and key. Set(namespace, key string, value []byte) error // Delete removes data indexed by namespace and key. Delete(namespace, key string) error } // FSMetadataStore uses the filesystem to associate metadata with layer and // image IDs. type FSMetadataStore struct { sync.RWMutex basePath string } // NewFSMetadataStore creates a new filesystem-based metadata store. func NewFSMetadataStore(basePath string) (*FSMetadataStore, error) { if err := os.MkdirAll(basePath, 0700); err != nil { return nil, err } return &FSMetadataStore{ basePath: basePath, }, nil } func (store *FSMetadataStore) path(namespace, key string) string { return filepath.Join(store.basePath, namespace, key) } // Get retrieves data by namespace and key. The data is read from a file named // after the key, stored in the namespace's directory. func (store *FSMetadataStore) Get(namespace string, key string) ([]byte, error) { store.RLock() defer store.RUnlock() return ioutil.ReadFile(store.path(namespace, key)) } // Set writes data indexed by namespace and key. The data is written to a file // named after the key, stored in the namespace's directory. func (store *FSMetadataStore) Set(namespace, key string, value []byte) error { store.Lock() defer store.Unlock() path := store.path(namespace, key) tempFilePath := path + ".tmp" if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { return err } if err := ioutil.WriteFile(tempFilePath, value, 0644); err != nil { return err } return os.Rename(tempFilePath, path) } // Delete removes data indexed by namespace and key. The data file named after // the key, stored in the namespace's directory is deleted. func (store *FSMetadataStore) Delete(namespace, key string) error { store.Lock() defer store.Unlock() path := store.path(namespace, key) return os.Remove(path) } docker-1.10.3/distribution/metadata/v1_id_service.go000066400000000000000000000020611267010174400224140ustar00rootroot00000000000000package metadata import ( "github.com/docker/docker/image/v1" "github.com/docker/docker/layer" ) // V1IDService maps v1 IDs to layers on disk. type V1IDService struct { store Store } // NewV1IDService creates a new V1 ID mapping service. func NewV1IDService(store Store) *V1IDService { return &V1IDService{ store: store, } } // namespace returns the namespace used by this service. func (idserv *V1IDService) namespace() string { return "v1id" } // Get finds a layer by its V1 ID. func (idserv *V1IDService) Get(v1ID, registry string) (layer.DiffID, error) { if err := v1.ValidateID(v1ID); err != nil { return layer.DiffID(""), err } idBytes, err := idserv.store.Get(idserv.namespace(), registry+","+v1ID) if err != nil { return layer.DiffID(""), err } return layer.DiffID(idBytes), nil } // Set associates an image with a V1 ID. func (idserv *V1IDService) Set(v1ID, registry string, id layer.DiffID) error { if err := v1.ValidateID(v1ID); err != nil { return err } return idserv.store.Set(idserv.namespace(), registry+","+v1ID, []byte(id)) } docker-1.10.3/distribution/metadata/v1_id_service_test.go000066400000000000000000000044421267010174400234600ustar00rootroot00000000000000package metadata import ( "io/ioutil" "os" "testing" "github.com/docker/docker/layer" ) func TestV1IDService(t *testing.T) { tmpDir, err := ioutil.TempDir("", "v1-id-service-test") if err != nil { t.Fatalf("could not create temp dir: %v", err) } defer os.RemoveAll(tmpDir) metadataStore, err := NewFSMetadataStore(tmpDir) if err != nil { t.Fatalf("could not create metadata store: %v", err) } v1IDService := NewV1IDService(metadataStore) testVectors := []struct { registry string v1ID string layerID layer.DiffID }{ { registry: "registry1", v1ID: "f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937", layerID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), }, { registry: "registry2", v1ID: "9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e", layerID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"), }, { registry: "registry1", v1ID: "9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e", layerID: layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"), }, } // Set some associations for _, vec := range testVectors { err := v1IDService.Set(vec.v1ID, vec.registry, vec.layerID) if err != nil { t.Fatalf("error calling Set: %v", err) } } // Check the correct values are read back for _, vec := range testVectors { layerID, err := v1IDService.Get(vec.v1ID, vec.registry) if err != nil { t.Fatalf("error calling Get: %v", err) } if layerID != vec.layerID { t.Fatal("Get returned incorrect layer ID") } } // Test Get on a nonexistent entry _, err = v1IDService.Get("82379823067823853223359023576437723560923756b03560378f4497753917", "registry1") if err == nil { t.Fatal("expected error looking up nonexistent entry") } // Overwrite one of the entries and read it back err = v1IDService.Set(testVectors[0].v1ID, testVectors[0].registry, testVectors[1].layerID) if err != nil { t.Fatalf("error calling Set: %v", err) } layerID, err := v1IDService.Get(testVectors[0].v1ID, testVectors[0].registry) if err != nil { t.Fatalf("error calling Get: %v", err) } if layerID != testVectors[1].layerID { t.Fatal("Get returned incorrect layer ID") } } docker-1.10.3/distribution/metadata/v2_metadata_service.go000066400000000000000000000070501267010174400236040ustar00rootroot00000000000000package metadata import ( "encoding/json" "github.com/docker/distribution/digest" "github.com/docker/docker/layer" ) // V2MetadataService maps layer IDs to a set of known metadata for // the layer. type V2MetadataService struct { store Store } // V2Metadata contains the digest and source repository information for a layer. type V2Metadata struct { Digest digest.Digest SourceRepository string } // maxMetadata is the number of metadata entries to keep per layer DiffID. const maxMetadata = 50 // NewV2MetadataService creates a new diff ID to v2 metadata mapping service. func NewV2MetadataService(store Store) *V2MetadataService { return &V2MetadataService{ store: store, } } func (serv *V2MetadataService) diffIDNamespace() string { return "v2metadata-by-diffid" } func (serv *V2MetadataService) digestNamespace() string { return "diffid-by-digest" } func (serv *V2MetadataService) diffIDKey(diffID layer.DiffID) string { return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex() } func (serv *V2MetadataService) digestKey(dgst digest.Digest) string { return string(dgst.Algorithm()) + "/" + dgst.Hex() } // GetMetadata finds the metadata associated with a layer DiffID. func (serv *V2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) { jsonBytes, err := serv.store.Get(serv.diffIDNamespace(), serv.diffIDKey(diffID)) if err != nil { return nil, err } var metadata []V2Metadata if err := json.Unmarshal(jsonBytes, &metadata); err != nil { return nil, err } return metadata, nil } // GetDiffID finds a layer DiffID from a digest. func (serv *V2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) { diffIDBytes, err := serv.store.Get(serv.digestNamespace(), serv.digestKey(dgst)) if err != nil { return layer.DiffID(""), err } return layer.DiffID(diffIDBytes), nil } // Add associates metadata with a layer DiffID. If too many metadata entries are // present, the oldest one is dropped. func (serv *V2MetadataService) Add(diffID layer.DiffID, metadata V2Metadata) error { oldMetadata, err := serv.GetMetadata(diffID) if err != nil { oldMetadata = nil } newMetadata := make([]V2Metadata, 0, len(oldMetadata)+1) // Copy all other metadata to new slice for _, oldMeta := range oldMetadata { if oldMeta != metadata { newMetadata = append(newMetadata, oldMeta) } } newMetadata = append(newMetadata, metadata) if len(newMetadata) > maxMetadata { newMetadata = newMetadata[len(newMetadata)-maxMetadata:] } jsonBytes, err := json.Marshal(newMetadata) if err != nil { return err } err = serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) if err != nil { return err } return serv.store.Set(serv.digestNamespace(), serv.digestKey(metadata.Digest), []byte(diffID)) } // Remove unassociates a metadata entry from a layer DiffID. func (serv *V2MetadataService) Remove(metadata V2Metadata) error { diffID, err := serv.GetDiffID(metadata.Digest) if err != nil { return err } oldMetadata, err := serv.GetMetadata(diffID) if err != nil { oldMetadata = nil } newMetadata := make([]V2Metadata, 0, len(oldMetadata)) // Copy all other metadata to new slice for _, oldMeta := range oldMetadata { if oldMeta != metadata { newMetadata = append(newMetadata, oldMeta) } } if len(newMetadata) == 0 { return serv.store.Delete(serv.diffIDNamespace(), serv.diffIDKey(diffID)) } jsonBytes, err := json.Marshal(newMetadata) if err != nil { return err } return serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) } docker-1.10.3/distribution/metadata/v2_metadata_service_test.go000066400000000000000000000064741267010174400246540ustar00rootroot00000000000000package metadata import ( "encoding/hex" "io/ioutil" "math/rand" "os" "reflect" "testing" "github.com/docker/distribution/digest" "github.com/docker/docker/layer" ) func TestV2MetadataService(t *testing.T) { tmpDir, err := ioutil.TempDir("", "blobsum-storage-service-test") if err != nil { t.Fatalf("could not create temp dir: %v", err) } defer os.RemoveAll(tmpDir) metadataStore, err := NewFSMetadataStore(tmpDir) if err != nil { t.Fatalf("could not create metadata store: %v", err) } V2MetadataService := NewV2MetadataService(metadataStore) tooManyBlobSums := make([]V2Metadata, 100) for i := range tooManyBlobSums { randDigest := randomDigest() tooManyBlobSums[i] = V2Metadata{Digest: randDigest} } testVectors := []struct { diffID layer.DiffID metadata []V2Metadata }{ { diffID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), metadata: []V2Metadata{ {Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")}, }, }, { diffID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"), metadata: []V2Metadata{ {Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")}, {Digest: digest.Digest("sha256:9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e")}, }, }, { diffID: layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"), metadata: tooManyBlobSums, }, } // Set some associations for _, vec := range testVectors { for _, blobsum := range vec.metadata { err := V2MetadataService.Add(vec.diffID, blobsum) if err != nil { t.Fatalf("error calling Set: %v", err) } } } // Check the correct values are read back for _, vec := range testVectors { metadata, err := V2MetadataService.GetMetadata(vec.diffID) if err != nil { t.Fatalf("error calling Get: %v", err) } expectedMetadataEntries := len(vec.metadata) if expectedMetadataEntries > 50 { expectedMetadataEntries = 50 } if !reflect.DeepEqual(metadata, vec.metadata[len(vec.metadata)-expectedMetadataEntries:len(vec.metadata)]) { t.Fatal("Get returned incorrect layer ID") } } // Test GetMetadata on a nonexistent entry _, err = V2MetadataService.GetMetadata(layer.DiffID("sha256:82379823067823853223359023576437723560923756b03560378f4497753917")) if err == nil { t.Fatal("expected error looking up nonexistent entry") } // Test GetDiffID on a nonexistent entry _, err = V2MetadataService.GetDiffID(digest.Digest("sha256:82379823067823853223359023576437723560923756b03560378f4497753917")) if err == nil { t.Fatal("expected error looking up nonexistent entry") } // Overwrite one of the entries and read it back err = V2MetadataService.Add(testVectors[1].diffID, testVectors[0].metadata[0]) if err != nil { t.Fatalf("error calling Add: %v", err) } diffID, err := V2MetadataService.GetDiffID(testVectors[0].metadata[0].Digest) if err != nil { t.Fatalf("error calling GetDiffID: %v", err) } if diffID != testVectors[1].diffID { t.Fatal("GetDiffID returned incorrect diffID") } } func randomDigest() digest.Digest { b := [32]byte{} for i := 0; i < len(b); i++ { b[i] = byte(rand.Intn(256)) } d := hex.EncodeToString(b[:]) return digest.Digest("sha256:" + d) } docker-1.10.3/distribution/pull.go000066400000000000000000000157431267010174400171010ustar00rootroot00000000000000package distribution import ( "fmt" "os" "github.com/Sirupsen/logrus" "github.com/docker/docker/api" "github.com/docker/docker/distribution/metadata" "github.com/docker/docker/distribution/xfer" "github.com/docker/docker/image" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/reference" "github.com/docker/docker/registry" "github.com/docker/engine-api/types" "golang.org/x/net/context" ) // ImagePullConfig stores pull configuration. type ImagePullConfig struct { // MetaHeaders stores HTTP headers with metadata about the image // (DockerHeaders with prefix X-Meta- in the request). MetaHeaders map[string][]string // AuthConfig holds authentication credentials for authenticating with // the registry. AuthConfig *types.AuthConfig // ProgressOutput is the interface for showing the status of the pull // operation. ProgressOutput progress.Output // RegistryService is the registry service to use for TLS configuration // and endpoint lookup. RegistryService *registry.Service // ImageEventLogger notifies events for a given image ImageEventLogger func(id, name, action string) // MetadataStore is the storage backend for distribution-specific // metadata. MetadataStore metadata.Store // ImageStore manages images. ImageStore image.Store // ReferenceStore manages tags. ReferenceStore reference.Store // DownloadManager manages concurrent pulls. DownloadManager *xfer.LayerDownloadManager } // Puller is an interface that abstracts pulling for different API versions. type Puller interface { // Pull tries to pull the image referenced by `tag` // Pull returns an error if any, as well as a boolean that determines whether to retry Pull on the next configured endpoint. // Pull(ctx context.Context, ref reference.Named) error } // newPuller returns a Puller interface that will pull from either a v1 or v2 // registry. The endpoint argument contains a Version field that determines // whether a v1 or v2 puller will be created. The other parameters are passed // through to the underlying puller implementation for use during the actual // pull operation. func newPuller(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePullConfig *ImagePullConfig) (Puller, error) { switch endpoint.Version { case registry.APIVersion2: return &v2Puller{ V2MetadataService: metadata.NewV2MetadataService(imagePullConfig.MetadataStore), endpoint: endpoint, config: imagePullConfig, repoInfo: repoInfo, }, nil case registry.APIVersion1: return &v1Puller{ v1IDService: metadata.NewV1IDService(imagePullConfig.MetadataStore), endpoint: endpoint, config: imagePullConfig, repoInfo: repoInfo, }, nil } return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) } // Pull initiates a pull operation. image is the repository name to pull, and // tag may be either empty, or indicate a specific tag to pull. func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullConfig) error { // Resolve the Repository name from fqn to RepositoryInfo repoInfo, err := imagePullConfig.RegistryService.ResolveRepository(ref) if err != nil { return err } // makes sure name is not empty or `scratch` if err := validateRepoName(repoInfo.Name()); err != nil { return err } endpoints, err := imagePullConfig.RegistryService.LookupPullEndpoints(repoInfo) if err != nil { return err } var ( lastErr error // discardNoSupportErrors is used to track whether an endpoint encountered an error of type registry.ErrNoSupport // By default it is false, which means that if a ErrNoSupport error is encountered, it will be saved in lastErr. // As soon as another kind of error is encountered, discardNoSupportErrors is set to true, avoiding the saving of // any subsequent ErrNoSupport errors in lastErr. // It's needed for pull-by-digest on v1 endpoints: if there are only v1 endpoints configured, the error should be // returned and displayed, but if there was a v2 endpoint which supports pull-by-digest, then the last relevant // error is the ones from v2 endpoints not v1. discardNoSupportErrors bool // confirmedV2 is set to true if a pull attempt managed to // confirm that it was talking to a v2 registry. This will // prevent fallback to the v1 protocol. confirmedV2 bool ) for _, endpoint := range endpoints { if confirmedV2 && endpoint.Version == registry.APIVersion1 { logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) continue } logrus.Debugf("Trying to pull %s from %s %s", repoInfo.Name(), endpoint.URL, endpoint.Version) puller, err := newPuller(endpoint, repoInfo, imagePullConfig) if err != nil { lastErr = err continue } if err := puller.Pull(ctx, ref); err != nil { // Was this pull cancelled? If so, don't try to fall // back. fallback := false select { case <-ctx.Done(): default: if fallbackErr, ok := err.(fallbackError); ok { fallback = true confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 err = fallbackErr.err } } if fallback { if _, ok := err.(registry.ErrNoSupport); !ok { // Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors. discardNoSupportErrors = true // append subsequent errors lastErr = err } else if !discardNoSupportErrors { // Save the ErrNoSupport error, because it's either the first error or all encountered errors // were also ErrNoSupport errors. // append subsequent errors lastErr = err } continue } logrus.Debugf("Not continuing with error: %v", err) return err } imagePullConfig.ImageEventLogger(ref.String(), repoInfo.Name(), "pull") return nil } if lastErr == nil { lastErr = fmt.Errorf("no endpoints found for %s", ref.String()) } return lastErr } // writeStatus writes a status message to out. If layersDownloaded is true, the // status message indicates that a newer image was downloaded. Otherwise, it // indicates that the image is up to date. requestedTag is the tag the message // will refer to. func writeStatus(requestedTag string, out progress.Output, layersDownloaded bool) { if layersDownloaded { progress.Message(out, "", "Status: Downloaded newer image for "+requestedTag) } else { progress.Message(out, "", "Status: Image is up to date for "+requestedTag) } } // validateRepoName validates the name of a repository. func validateRepoName(name string) error { if name == "" { return fmt.Errorf("Repository name can't be empty") } if name == api.NoBaseImageSpecifier { return fmt.Errorf("'%s' is a reserved name", api.NoBaseImageSpecifier) } return nil } // tmpFileClose creates a closer function for a temporary file that closes the file // and also deletes it. func tmpFileCloser(tmpFile *os.File) func() error { return func() error { tmpFile.Close() if err := os.RemoveAll(tmpFile.Name()); err != nil { logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) } return nil } } docker-1.10.3/distribution/pull_v1.go000066400000000000000000000242621267010174400175030ustar00rootroot00000000000000package distribution import ( "errors" "fmt" "io" "io/ioutil" "net" "net/url" "strings" "time" "github.com/Sirupsen/logrus" "github.com/docker/distribution/registry/client/transport" "github.com/docker/docker/distribution/metadata" "github.com/docker/docker/distribution/xfer" "github.com/docker/docker/image" "github.com/docker/docker/image/v1" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/reference" "github.com/docker/docker/registry" "golang.org/x/net/context" ) type v1Puller struct { v1IDService *metadata.V1IDService endpoint registry.APIEndpoint config *ImagePullConfig repoInfo *registry.RepositoryInfo session *registry.Session } func (p *v1Puller) Pull(ctx context.Context, ref reference.Named) error { if _, isCanonical := ref.(reference.Canonical); isCanonical { // Allowing fallback, because HTTPS v1 is before HTTP v2 return fallbackError{err: registry.ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")}} } tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) if err != nil { return err } // Adds Docker-specific headers as well as user-specified headers (metaHeaders) tr := transport.NewTransport( // TODO(tiborvass): was ReceiveTimeout registry.NewTransport(tlsConfig), registry.DockerHeaders(p.config.MetaHeaders)..., ) client := registry.HTTPClient(tr) v1Endpoint, err := p.endpoint.ToV1Endpoint(p.config.MetaHeaders) if err != nil { logrus.Debugf("Could not get v1 endpoint: %v", err) return fallbackError{err: err} } p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) if err != nil { // TODO(dmcgowan): Check if should fallback logrus.Debugf("Fallback from error: %s", err) return fallbackError{err: err} } if err := p.pullRepository(ctx, ref); err != nil { // TODO(dmcgowan): Check if should fallback return err } progress.Message(p.config.ProgressOutput, "", p.repoInfo.FullName()+": this image was pulled from a legacy registry. Important: This registry version will not be supported in future versions of docker.") return nil } func (p *v1Puller) pullRepository(ctx context.Context, ref reference.Named) error { progress.Message(p.config.ProgressOutput, "", "Pulling repository "+p.repoInfo.FullName()) repoData, err := p.session.GetRepositoryData(p.repoInfo) if err != nil { if strings.Contains(err.Error(), "HTTP code: 404") { return fmt.Errorf("Error: image %s not found", p.repoInfo.RemoteName()) } // Unexpected HTTP error return err } logrus.Debugf("Retrieving the tag list") var tagsList map[string]string tagged, isTagged := ref.(reference.NamedTagged) if !isTagged { tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo) } else { var tagID string tagsList = make(map[string]string) tagID, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo, tagged.Tag()) if err == registry.ErrRepoNotFound { return fmt.Errorf("Tag %s not found in repository %s", tagged.Tag(), p.repoInfo.FullName()) } tagsList[tagged.Tag()] = tagID } if err != nil { logrus.Errorf("unable to get remote tags: %s", err) return err } for tag, id := range tagsList { repoData.ImgList[id] = ®istry.ImgData{ ID: id, Tag: tag, Checksum: "", } } layersDownloaded := false for _, imgData := range repoData.ImgList { if isTagged && imgData.Tag != tagged.Tag() { continue } err := p.downloadImage(ctx, repoData, imgData, &layersDownloaded) if err != nil { return err } } writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded) return nil } func (p *v1Puller) downloadImage(ctx context.Context, repoData *registry.RepositoryData, img *registry.ImgData, layersDownloaded *bool) error { if img.Tag == "" { logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) return nil } localNameRef, err := reference.WithTag(p.repoInfo, img.Tag) if err != nil { retErr := fmt.Errorf("Image (id: %s) has invalid tag: %s", img.ID, img.Tag) logrus.Debug(retErr.Error()) return retErr } if err := v1.ValidateID(img.ID); err != nil { return err } progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s", img.Tag, p.repoInfo.FullName()) success := false var lastErr error for _, ep := range p.repoInfo.Index.Mirrors { ep += "v1/" progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.FullName(), ep)) if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { // Don't report errors when pulling from mirrors. logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.FullName(), ep, err) continue } success = true break } if !success { for _, ep := range repoData.Endpoints { progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.FullName(), ep) if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { // It's not ideal that only the last error is returned, it would be better to concatenate the errors. // As the error is also given to the output stream the user will see the error. lastErr = err progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.FullName(), ep, err) continue } success = true break } } if !success { err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.FullName(), lastErr) progress.Update(p.config.ProgressOutput, stringid.TruncateID(img.ID), err.Error()) return err } return nil } func (p *v1Puller) pullImage(ctx context.Context, v1ID, endpoint string, localNameRef reference.Named, layersDownloaded *bool) (err error) { var history []string history, err = p.session.GetRemoteHistory(v1ID, endpoint) if err != nil { return err } if len(history) < 1 { return fmt.Errorf("empty history for image %s", v1ID) } progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1ID), "Pulling dependent layers") var ( descriptors []xfer.DownloadDescriptor newHistory []image.History imgJSON []byte imgSize int64 ) // Iterate over layers, in order from bottom-most to top-most. Download // config for all layers and create descriptors. for i := len(history) - 1; i >= 0; i-- { v1LayerID := history[i] imgJSON, imgSize, err = p.downloadLayerConfig(v1LayerID, endpoint) if err != nil { return err } // Create a new-style config from the legacy configs h, err := v1.HistoryFromConfig(imgJSON, false) if err != nil { return err } newHistory = append(newHistory, h) layerDescriptor := &v1LayerDescriptor{ v1LayerID: v1LayerID, indexName: p.repoInfo.Index.Name, endpoint: endpoint, v1IDService: p.v1IDService, layersDownloaded: layersDownloaded, layerSize: imgSize, session: p.session, } descriptors = append(descriptors, layerDescriptor) } rootFS := image.NewRootFS() resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) if err != nil { return err } defer release() config, err := v1.MakeConfigFromV1Config(imgJSON, &resultRootFS, newHistory) if err != nil { return err } imageID, err := p.config.ImageStore.Create(config) if err != nil { return err } if err := p.config.ReferenceStore.AddTag(localNameRef, imageID, true); err != nil { return err } return nil } func (p *v1Puller) downloadLayerConfig(v1LayerID, endpoint string) (imgJSON []byte, imgSize int64, err error) { progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Pulling metadata") retries := 5 for j := 1; j <= retries; j++ { imgJSON, imgSize, err := p.session.GetRemoteImageJSON(v1LayerID, endpoint) if err != nil && j == retries { progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Error pulling layer metadata") return nil, 0, err } else if err != nil { time.Sleep(time.Duration(j) * 500 * time.Millisecond) continue } return imgJSON, imgSize, nil } // not reached return nil, 0, nil } type v1LayerDescriptor struct { v1LayerID string indexName string endpoint string v1IDService *metadata.V1IDService layersDownloaded *bool layerSize int64 session *registry.Session } func (ld *v1LayerDescriptor) Key() string { return "v1:" + ld.v1LayerID } func (ld *v1LayerDescriptor) ID() string { return stringid.TruncateID(ld.v1LayerID) } func (ld *v1LayerDescriptor) DiffID() (layer.DiffID, error) { return ld.v1IDService.Get(ld.v1LayerID, ld.indexName) } func (ld *v1LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { progress.Update(progressOutput, ld.ID(), "Pulling fs layer") layerReader, err := ld.session.GetRemoteImageLayer(ld.v1LayerID, ld.endpoint, ld.layerSize) if err != nil { progress.Update(progressOutput, ld.ID(), "Error pulling dependent layers") if uerr, ok := err.(*url.Error); ok { err = uerr.Err } if terr, ok := err.(net.Error); ok && terr.Timeout() { return nil, 0, err } return nil, 0, xfer.DoNotRetry{Err: err} } *ld.layersDownloaded = true tmpFile, err := ioutil.TempFile("", "GetImageBlob") if err != nil { layerReader.Close() return nil, 0, err } reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerReader), progressOutput, ld.layerSize, ld.ID(), "Downloading") defer reader.Close() _, err = io.Copy(tmpFile, reader) if err != nil { return nil, 0, err } progress.Update(progressOutput, ld.ID(), "Download complete") logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) tmpFile.Seek(0, 0) return ioutils.NewReadCloserWrapper(tmpFile, tmpFileCloser(tmpFile)), ld.layerSize, nil } func (ld *v1LayerDescriptor) Registered(diffID layer.DiffID) { // Cache mapping from this layer's DiffID to the blobsum ld.v1IDService.Set(ld.v1LayerID, ld.indexName, diffID) } docker-1.10.3/distribution/pull_v2.go000066400000000000000000000514561267010174400175110ustar00rootroot00000000000000package distribution import ( "encoding/json" "errors" "fmt" "io" "io/ioutil" "os" "runtime" "github.com/Sirupsen/logrus" "github.com/docker/distribution" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/client" "github.com/docker/docker/distribution/metadata" "github.com/docker/docker/distribution/xfer" "github.com/docker/docker/image" "github.com/docker/docker/image/v1" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/reference" "github.com/docker/docker/registry" "golang.org/x/net/context" ) var errRootFSMismatch = errors.New("layers from manifest don't match image configuration") type v2Puller struct { V2MetadataService *metadata.V2MetadataService endpoint registry.APIEndpoint config *ImagePullConfig repoInfo *registry.RepositoryInfo repo distribution.Repository // confirmedV2 is set to true if we confirm we're talking to a v2 // registry. This is used to limit fallbacks to the v1 protocol. confirmedV2 bool } func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) { // TODO(tiborvass): was ReceiveTimeout p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") if err != nil { logrus.Warnf("Error getting v2 registry: %v", err) return fallbackError{err: err, confirmedV2: p.confirmedV2} } if err = p.pullV2Repository(ctx, ref); err != nil { if _, ok := err.(fallbackError); ok { return err } if registry.ContinueOnError(err) { logrus.Debugf("Error trying v2 registry: %v", err) return fallbackError{err: err, confirmedV2: p.confirmedV2} } } return err } func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) { var layersDownloaded bool if !reference.IsNameOnly(ref) { layersDownloaded, err = p.pullV2Tag(ctx, ref) if err != nil { return err } } else { tags, err := p.repo.Tags(ctx).All(ctx) if err != nil { // If this repository doesn't exist on V2, we should // permit a fallback to V1. return allowV1Fallback(err) } // The v2 registry knows about this repository, so we will not // allow fallback to the v1 protocol even if we encounter an // error later on. p.confirmedV2 = true for _, tag := range tags { tagRef, err := reference.WithTag(ref, tag) if err != nil { return err } pulledNew, err := p.pullV2Tag(ctx, tagRef) if err != nil { // Since this is the pull-all-tags case, don't // allow an error pulling a particular tag to // make the whole pull fall back to v1. if fallbackErr, ok := err.(fallbackError); ok { return fallbackErr.err } return err } // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? layersDownloaded = layersDownloaded || pulledNew } } writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded) return nil } type v2LayerDescriptor struct { digest digest.Digest repoInfo *registry.RepositoryInfo repo distribution.Repository V2MetadataService *metadata.V2MetadataService } func (ld *v2LayerDescriptor) Key() string { return "v2:" + ld.digest.String() } func (ld *v2LayerDescriptor) ID() string { return stringid.TruncateID(ld.digest.String()) } func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) { return ld.V2MetadataService.GetDiffID(ld.digest) } func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { logrus.Debugf("pulling blob %q", ld.digest) blobs := ld.repo.Blobs(ctx) layerDownload, err := blobs.Open(ctx, ld.digest) if err != nil { logrus.Debugf("Error statting layer: %v", err) if err == distribution.ErrBlobUnknown { return nil, 0, xfer.DoNotRetry{Err: err} } return nil, 0, retryOnError(err) } size, err := layerDownload.Seek(0, os.SEEK_END) if err != nil { // Seek failed, perhaps because there was no Content-Length // header. This shouldn't fail the download, because we can // still continue without a progress bar. size = 0 } else { // Restore the seek offset at the beginning of the stream. _, err = layerDownload.Seek(0, os.SEEK_SET) if err != nil { return nil, 0, err } } reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size, ld.ID(), "Downloading") defer reader.Close() verifier, err := digest.NewDigestVerifier(ld.digest) if err != nil { return nil, 0, xfer.DoNotRetry{Err: err} } tmpFile, err := ioutil.TempFile("", "GetImageBlob") if err != nil { return nil, 0, xfer.DoNotRetry{Err: err} } _, err = io.Copy(tmpFile, io.TeeReader(reader, verifier)) if err != nil { tmpFile.Close() if err := os.Remove(tmpFile.Name()); err != nil { logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) } return nil, 0, retryOnError(err) } progress.Update(progressOutput, ld.ID(), "Verifying Checksum") if !verifier.Verified() { err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest) logrus.Error(err) tmpFile.Close() if err := os.Remove(tmpFile.Name()); err != nil { logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) } return nil, 0, xfer.DoNotRetry{Err: err} } progress.Update(progressOutput, ld.ID(), "Download complete") logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) _, err = tmpFile.Seek(0, os.SEEK_SET) if err != nil { tmpFile.Close() if err := os.Remove(tmpFile.Name()); err != nil { logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) } return nil, 0, xfer.DoNotRetry{Err: err} } return ioutils.NewReadCloserWrapper(tmpFile, tmpFileCloser(tmpFile)), size, nil } func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) { // Cache mapping from this layer's DiffID to the blobsum ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.FullName()}) } func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) { manSvc, err := p.repo.Manifests(ctx) if err != nil { return false, err } var ( manifest distribution.Manifest tagOrDigest string // Used for logging/progress only ) if tagged, isTagged := ref.(reference.NamedTagged); isTagged { // NOTE: not using TagService.Get, since it uses HEAD requests // against the manifests endpoint, which are not supported by // all registry versions. manifest, err = manSvc.Get(ctx, "", client.WithTag(tagged.Tag())) if err != nil { return false, allowV1Fallback(err) } tagOrDigest = tagged.Tag() } else if digested, isDigested := ref.(reference.Canonical); isDigested { manifest, err = manSvc.Get(ctx, digested.Digest()) if err != nil { return false, err } tagOrDigest = digested.Digest().String() } else { return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String()) } if manifest == nil { return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) } // If manSvc.Get succeeded, we can be confident that the registry on // the other side speaks the v2 protocol. p.confirmedV2 = true logrus.Debugf("Pulling ref from V2 registry: %s", ref.String()) progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Name()) var ( imageID image.ID manifestDigest digest.Digest ) switch v := manifest.(type) { case *schema1.SignedManifest: imageID, manifestDigest, err = p.pullSchema1(ctx, ref, v) if err != nil { return false, err } case *schema2.DeserializedManifest: imageID, manifestDigest, err = p.pullSchema2(ctx, ref, v) if err != nil { return false, err } case *manifestlist.DeserializedManifestList: imageID, manifestDigest, err = p.pullManifestList(ctx, ref, v) if err != nil { return false, err } default: return false, errors.New("unsupported manifest format") } progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String()) oldTagImageID, err := p.config.ReferenceStore.Get(ref) if err == nil { if oldTagImageID == imageID { return false, nil } } else if err != reference.ErrDoesNotExist { return false, err } if canonical, ok := ref.(reference.Canonical); ok { if err = p.config.ReferenceStore.AddDigest(canonical, imageID, true); err != nil { return false, err } } else if err = p.config.ReferenceStore.AddTag(ref, imageID, true); err != nil { return false, err } return true, nil } func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) { var verifiedManifest *schema1.Manifest verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref) if err != nil { return "", "", err } rootFS := image.NewRootFS() if err := detectBaseLayer(p.config.ImageStore, verifiedManifest, rootFS); err != nil { return "", "", err } // remove duplicate layers and check parent chain validity err = fixManifestLayers(verifiedManifest) if err != nil { return "", "", err } var descriptors []xfer.DownloadDescriptor // Image history converted to the new format var history []image.History // Note that the order of this loop is in the direction of bottom-most // to top-most, so that the downloads slice gets ordered correctly. for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { blobSum := verifiedManifest.FSLayers[i].BlobSum var throwAway struct { ThrowAway bool `json:"throwaway,omitempty"` } if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { return "", "", err } h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) if err != nil { return "", "", err } history = append(history, h) if throwAway.ThrowAway { continue } layerDescriptor := &v2LayerDescriptor{ digest: blobSum, repoInfo: p.repoInfo, repo: p.repo, V2MetadataService: p.V2MetadataService, } descriptors = append(descriptors, layerDescriptor) } resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) if err != nil { return "", "", err } defer release() config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history) if err != nil { return "", "", err } imageID, err = p.config.ImageStore.Create(config) if err != nil { return "", "", err } manifestDigest = digest.FromBytes(unverifiedManifest.Canonical) return imageID, manifestDigest, nil } func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) { manifestDigest, err = schema2ManifestDigest(ref, mfst) if err != nil { return "", "", err } target := mfst.Target() imageID = image.ID(target.Digest) if _, err := p.config.ImageStore.Get(imageID); err == nil { // If the image already exists locally, no need to pull // anything. return imageID, manifestDigest, nil } configChan := make(chan []byte, 1) errChan := make(chan error, 1) var cancel func() ctx, cancel = context.WithCancel(ctx) // Pull the image config go func() { configJSON, err := p.pullSchema2ImageConfig(ctx, target.Digest) if err != nil { errChan <- err cancel() return } configChan <- configJSON }() var descriptors []xfer.DownloadDescriptor // Note that the order of this loop is in the direction of bottom-most // to top-most, so that the downloads slice gets ordered correctly. for _, d := range mfst.References() { layerDescriptor := &v2LayerDescriptor{ digest: d.Digest, repo: p.repo, repoInfo: p.repoInfo, V2MetadataService: p.V2MetadataService, } descriptors = append(descriptors, layerDescriptor) } var ( configJSON []byte // raw serialized image config unmarshalledConfig image.Image // deserialized image config downloadRootFS image.RootFS // rootFS to use for registering layers. ) if runtime.GOOS == "windows" { configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan) if err != nil { return "", "", err } if unmarshalledConfig.RootFS == nil { return "", "", errors.New("image config has no rootfs section") } downloadRootFS = *unmarshalledConfig.RootFS downloadRootFS.DiffIDs = []layer.DiffID{} } else { downloadRootFS = *image.NewRootFS() } rootFS, release, err := p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput) if err != nil { if configJSON != nil { // Already received the config return "", "", err } select { case err = <-errChan: return "", "", err default: cancel() select { case <-configChan: case <-errChan: } return "", "", err } } defer release() if configJSON == nil { configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan) if err != nil { return "", "", err } } // The DiffIDs returned in rootFS MUST match those in the config. // Otherwise the image config could be referencing layers that aren't // included in the manifest. if len(rootFS.DiffIDs) != len(unmarshalledConfig.RootFS.DiffIDs) { return "", "", errRootFSMismatch } for i := range rootFS.DiffIDs { if rootFS.DiffIDs[i] != unmarshalledConfig.RootFS.DiffIDs[i] { return "", "", errRootFSMismatch } } imageID, err = p.config.ImageStore.Create(configJSON) if err != nil { return "", "", err } return imageID, manifestDigest, nil } func receiveConfig(configChan <-chan []byte, errChan <-chan error) ([]byte, image.Image, error) { select { case configJSON := <-configChan: var unmarshalledConfig image.Image if err := json.Unmarshal(configJSON, &unmarshalledConfig); err != nil { return nil, image.Image{}, err } return configJSON, unmarshalledConfig, nil case err := <-errChan: return nil, image.Image{}, err // Don't need a case for ctx.Done in the select because cancellation // will trigger an error in p.pullSchema2ImageConfig. } } // pullManifestList handles "manifest lists" which point to various // platform-specifc manifests. func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (imageID image.ID, manifestListDigest digest.Digest, err error) { manifestListDigest, err = schema2ManifestDigest(ref, mfstList) if err != nil { return "", "", err } var manifestDigest digest.Digest for _, manifestDescriptor := range mfstList.Manifests { // TODO(aaronl): The manifest list spec supports optional // "features" and "variant" fields. These are not yet used. // Once they are, their values should be interpreted here. if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS { manifestDigest = manifestDescriptor.Digest break } } if manifestDigest == "" { return "", "", errors.New("no supported platform found in manifest list") } manSvc, err := p.repo.Manifests(ctx) if err != nil { return "", "", err } manifest, err := manSvc.Get(ctx, manifestDigest) if err != nil { return "", "", err } manifestRef, err := reference.WithDigest(ref, manifestDigest) if err != nil { return "", "", err } switch v := manifest.(type) { case *schema1.SignedManifest: imageID, _, err = p.pullSchema1(ctx, manifestRef, v) if err != nil { return "", "", err } case *schema2.DeserializedManifest: imageID, _, err = p.pullSchema2(ctx, manifestRef, v) if err != nil { return "", "", err } default: return "", "", errors.New("unsupported manifest format") } return imageID, manifestListDigest, err } func (p *v2Puller) pullSchema2ImageConfig(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) { blobs := p.repo.Blobs(ctx) configJSON, err = blobs.Get(ctx, dgst) if err != nil { return nil, err } // Verify image config digest verifier, err := digest.NewDigestVerifier(dgst) if err != nil { return nil, err } if _, err := verifier.Write(configJSON); err != nil { return nil, err } if !verifier.Verified() { err := fmt.Errorf("image config verification failed for digest %s", dgst) logrus.Error(err) return nil, err } return configJSON, nil } // schema2ManifestDigest computes the manifest digest, and, if pulling by // digest, ensures that it matches the requested digest. func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) { _, canonical, err := mfst.Payload() if err != nil { return "", err } // If pull by digest, then verify the manifest digest. if digested, isDigested := ref.(reference.Canonical); isDigested { verifier, err := digest.NewDigestVerifier(digested.Digest()) if err != nil { return "", err } if _, err := verifier.Write(canonical); err != nil { return "", err } if !verifier.Verified() { err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) logrus.Error(err) return "", err } return digested.Digest(), nil } return digest.FromBytes(canonical), nil } // allowV1Fallback checks if the error is a possible reason to fallback to v1 // (even if confirmedV2 has been set already), and if so, wraps the error in // a fallbackError with confirmedV2 set to false. Otherwise, it returns the // error unmodified. func allowV1Fallback(err error) error { switch v := err.(type) { case errcode.Errors: if len(v) != 0 { if v0, ok := v[0].(errcode.Error); ok && registry.ShouldV2Fallback(v0) { return fallbackError{err: err, confirmedV2: false} } } case errcode.Error: if registry.ShouldV2Fallback(v) { return fallbackError{err: err, confirmedV2: false} } } return err } func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) { // If pull by digest, then verify the manifest digest. NOTE: It is // important to do this first, before any other content validation. If the // digest cannot be verified, don't even bother with those other things. if digested, isCanonical := ref.(reference.Canonical); isCanonical { verifier, err := digest.NewDigestVerifier(digested.Digest()) if err != nil { return nil, err } if _, err := verifier.Write(signedManifest.Canonical); err != nil { return nil, err } if !verifier.Verified() { err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) logrus.Error(err) return nil, err } } m = &signedManifest.Manifest if m.SchemaVersion != 1 { return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String()) } if len(m.FSLayers) != len(m.History) { return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String()) } if len(m.FSLayers) == 0 { return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String()) } return m, nil } // fixManifestLayers removes repeated layers from the manifest and checks the // correctness of the parent chain. func fixManifestLayers(m *schema1.Manifest) error { imgs := make([]*image.V1Image, len(m.FSLayers)) for i := range m.FSLayers { img := &image.V1Image{} if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { return err } imgs[i] = img if err := v1.ValidateID(img.ID); err != nil { return err } } if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" { // Windows base layer can point to a base layer parent that is not in manifest. return errors.New("Invalid parent ID in the base layer of the image.") } // check general duplicates to error instead of a deadlock idmap := make(map[string]struct{}) var lastID string for _, img := range imgs { // skip IDs that appear after each other, we handle those later if _, exists := idmap[img.ID]; img.ID != lastID && exists { return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) } lastID = img.ID idmap[lastID] = struct{}{} } // backwards loop so that we keep the remaining indexes after removing items for i := len(imgs) - 2; i >= 0; i-- { if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) m.History = append(m.History[:i], m.History[i+1:]...) } else if imgs[i].Parent != imgs[i+1].ID { return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent) } } return nil } docker-1.10.3/distribution/pull_v2_test.go000066400000000000000000000651561267010174400205520ustar00rootroot00000000000000package distribution import ( "encoding/json" "io/ioutil" "reflect" "strings" "testing" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest/schema1" "github.com/docker/docker/reference" ) // TestFixManifestLayers checks that fixManifestLayers removes a duplicate // layer, and that it makes no changes to the manifest when called a second // time, after the duplicate is removed. func TestFixManifestLayers(t *testing.T) { duplicateLayerManifest := schema1.Manifest{ FSLayers: []schema1.FSLayer{ {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, }, History: []schema1.History{ {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, }, } duplicateLayerManifestExpectedOutput := schema1.Manifest{ FSLayers: []schema1.FSLayer{ {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, }, History: []schema1.History{ {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, }, } if err := fixManifestLayers(&duplicateLayerManifest); err != nil { t.Fatalf("unexpected error from fixManifestLayers: %v", err) } if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) { t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest") } // Run fixManifestLayers again and confirm that it doesn't change the // manifest (which no longer has duplicate layers). if err := fixManifestLayers(&duplicateLayerManifest); err != nil { t.Fatalf("unexpected error from fixManifestLayers: %v", err) } if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) { t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest (second pass)") } } // TestFixManifestLayersBaseLayerParent makes sure that fixManifestLayers fails // if the base layer configuration specifies a parent. func TestFixManifestLayersBaseLayerParent(t *testing.T) { duplicateLayerManifest := schema1.Manifest{ FSLayers: []schema1.FSLayer{ {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, }, History: []schema1.History{ {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"parent\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, }, } if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "Invalid parent ID in the base layer of the image.") { t.Fatalf("expected an invalid parent ID error from fixManifestLayers") } } // TestFixManifestLayersBadParent makes sure that fixManifestLayers fails // if an image configuration specifies a parent that doesn't directly follow // that (deduplicated) image in the image history. func TestFixManifestLayersBadParent(t *testing.T) { duplicateLayerManifest := schema1.Manifest{ FSLayers: []schema1.FSLayer{ {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, }, History: []schema1.History{ {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, }, } if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "Invalid parent ID.") { t.Fatalf("expected an invalid parent ID error from fixManifestLayers") } } // TestValidateManifest verifies the validateManifest function func TestValidateManifest(t *testing.T) { expectedDigest, err := reference.ParseNamed("repo@sha256:02fee8c3220ba806531f606525eceb83f4feb654f62b207191b1c9209188dedd") if err != nil { t.Fatal("could not parse reference") } expectedFSLayer0 := digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") // Good manifest goodManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/good_manifest") if err != nil { t.Fatal("error reading fixture:", err) } var goodSignedManifest schema1.SignedManifest err = json.Unmarshal(goodManifestBytes, &goodSignedManifest) if err != nil { t.Fatal("error unmarshaling manifest:", err) } verifiedManifest, err := verifySchema1Manifest(&goodSignedManifest, expectedDigest) if err != nil { t.Fatal("validateManifest failed:", err) } if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { t.Fatal("unexpected FSLayer in good manifest") } // "Extra data" manifest extraDataManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/extra_data_manifest") if err != nil { t.Fatal("error reading fixture:", err) } var extraDataSignedManifest schema1.SignedManifest err = json.Unmarshal(extraDataManifestBytes, &extraDataSignedManifest) if err != nil { t.Fatal("error unmarshaling manifest:", err) } verifiedManifest, err = verifySchema1Manifest(&extraDataSignedManifest, expectedDigest) if err != nil { t.Fatal("validateManifest failed:", err) } if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { t.Fatal("unexpected FSLayer in extra data manifest") } // Bad manifest badManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/bad_manifest") if err != nil { t.Fatal("error reading fixture:", err) } var badSignedManifest schema1.SignedManifest err = json.Unmarshal(badManifestBytes, &badSignedManifest) if err != nil { t.Fatal("error unmarshaling manifest:", err) } verifiedManifest, err = verifySchema1Manifest(&badSignedManifest, expectedDigest) if err == nil || !strings.HasPrefix(err.Error(), "image verification failed for digest") { t.Fatal("expected validateManifest to fail with digest error") } } docker-1.10.3/distribution/pull_v2_unix.go000066400000000000000000000003611267010174400205410ustar00rootroot00000000000000// +build !windows package distribution import ( "github.com/docker/distribution/manifest/schema1" "github.com/docker/docker/image" ) func detectBaseLayer(is image.Store, m *schema1.Manifest, rootFS *image.RootFS) error { return nil } docker-1.10.3/distribution/pull_v2_windows.go000066400000000000000000000014101267010174400212440ustar00rootroot00000000000000// +build windows package distribution import ( "encoding/json" "fmt" "github.com/docker/distribution/manifest/schema1" "github.com/docker/docker/image" ) func detectBaseLayer(is image.Store, m *schema1.Manifest, rootFS *image.RootFS) error { v1img := &image.V1Image{} if err := json.Unmarshal([]byte(m.History[len(m.History)-1].V1Compatibility), v1img); err != nil { return err } if v1img.Parent == "" { return fmt.Errorf("Last layer %q does not have a base layer reference", v1img.ID) } // There must be an image that already references the baselayer. for _, img := range is.Map() { if img.RootFS.BaseLayerID() == v1img.Parent { rootFS.BaseLayer = img.RootFS.BaseLayer return nil } } return fmt.Errorf("Invalid base layer %q", v1img.Parent) } docker-1.10.3/distribution/push.go000066400000000000000000000152261267010174400171000ustar00rootroot00000000000000package distribution import ( "bufio" "compress/gzip" "fmt" "io" "github.com/Sirupsen/logrus" "github.com/docker/docker/distribution/metadata" "github.com/docker/docker/distribution/xfer" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/reference" "github.com/docker/docker/registry" "github.com/docker/engine-api/types" "github.com/docker/libtrust" "golang.org/x/net/context" ) // ImagePushConfig stores push configuration. type ImagePushConfig struct { // MetaHeaders store HTTP headers with metadata about the image // (DockerHeaders with prefix X-Meta- in the request). MetaHeaders map[string][]string // AuthConfig holds authentication credentials for authenticating with // the registry. AuthConfig *types.AuthConfig // ProgressOutput is the interface for showing the status of the push // operation. ProgressOutput progress.Output // RegistryService is the registry service to use for TLS configuration // and endpoint lookup. RegistryService *registry.Service // ImageEventLogger notifies events for a given image ImageEventLogger func(id, name, action string) // MetadataStore is the storage backend for distribution-specific // metadata. MetadataStore metadata.Store // LayerStore manages layers. LayerStore layer.Store // ImageStore manages images. ImageStore image.Store // ReferenceStore manages tags. ReferenceStore reference.Store // TrustKey is the private key for legacy signatures. This is typically // an ephemeral key, since these signatures are no longer verified. TrustKey libtrust.PrivateKey // UploadManager dispatches uploads. UploadManager *xfer.LayerUploadManager } // Pusher is an interface that abstracts pushing for different API versions. type Pusher interface { // Push tries to push the image configured at the creation of Pusher. // Push returns an error if any, as well as a boolean that determines whether to retry Push on the next configured endpoint. // // TODO(tiborvass): have Push() take a reference to repository + tag, so that the pusher itself is repository-agnostic. Push(ctx context.Context) error } const compressionBufSize = 32768 // NewPusher creates a new Pusher interface that will push to either a v1 or v2 // registry. The endpoint argument contains a Version field that determines // whether a v1 or v2 pusher will be created. The other parameters are passed // through to the underlying pusher implementation for use during the actual // push operation. func NewPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePushConfig *ImagePushConfig) (Pusher, error) { switch endpoint.Version { case registry.APIVersion2: return &v2Pusher{ v2MetadataService: metadata.NewV2MetadataService(imagePushConfig.MetadataStore), ref: ref, endpoint: endpoint, repoInfo: repoInfo, config: imagePushConfig, }, nil case registry.APIVersion1: return &v1Pusher{ v1IDService: metadata.NewV1IDService(imagePushConfig.MetadataStore), ref: ref, endpoint: endpoint, repoInfo: repoInfo, config: imagePushConfig, }, nil } return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) } // Push initiates a push operation on the repository named localName. // ref is the specific variant of the image to be pushed. // If no tag is provided, all tags will be pushed. func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushConfig) error { // FIXME: Allow to interrupt current push when new push of same image is done. // Resolve the Repository name from fqn to RepositoryInfo repoInfo, err := imagePushConfig.RegistryService.ResolveRepository(ref) if err != nil { return err } endpoints, err := imagePushConfig.RegistryService.LookupPushEndpoints(repoInfo) if err != nil { return err } progress.Messagef(imagePushConfig.ProgressOutput, "", "The push refers to a repository [%s]", repoInfo.FullName()) associations := imagePushConfig.ReferenceStore.ReferencesByName(repoInfo) if len(associations) == 0 { return fmt.Errorf("Repository does not exist: %s", repoInfo.Name()) } var ( lastErr error // confirmedV2 is set to true if a push attempt managed to // confirm that it was talking to a v2 registry. This will // prevent fallback to the v1 protocol. confirmedV2 bool ) for _, endpoint := range endpoints { if confirmedV2 && endpoint.Version == registry.APIVersion1 { logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) continue } logrus.Debugf("Trying to push %s to %s %s", repoInfo.FullName(), endpoint.URL, endpoint.Version) pusher, err := NewPusher(ref, endpoint, repoInfo, imagePushConfig) if err != nil { lastErr = err continue } if err := pusher.Push(ctx); err != nil { // Was this push cancelled? If so, don't try to fall // back. select { case <-ctx.Done(): default: if fallbackErr, ok := err.(fallbackError); ok { confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 err = fallbackErr.err lastErr = err continue } } logrus.Debugf("Not continuing with error: %v", err) return err } imagePushConfig.ImageEventLogger(ref.String(), repoInfo.Name(), "push") return nil } if lastErr == nil { lastErr = fmt.Errorf("no endpoints found for %s", repoInfo.FullName()) } return lastErr } // compress returns an io.ReadCloser which will supply a compressed version of // the provided Reader. The caller must close the ReadCloser after reading the // compressed data. // // Note that this function returns a reader instead of taking a writer as an // argument so that it can be used with httpBlobWriter's ReadFrom method. // Using httpBlobWriter's Write method would send a PATCH request for every // Write call. // // The second return value is a channel that gets closed when the goroutine // is finished. This allows the caller to make sure the goroutine finishes // before it releases any resources connected with the reader that was // passed in. func compress(in io.Reader) (io.ReadCloser, chan struct{}) { compressionDone := make(chan struct{}) pipeReader, pipeWriter := io.Pipe() // Use a bufio.Writer to avoid excessive chunking in HTTP request. bufWriter := bufio.NewWriterSize(pipeWriter, compressionBufSize) compressor := gzip.NewWriter(bufWriter) go func() { _, err := io.Copy(compressor, in) if err == nil { err = compressor.Close() } if err == nil { err = bufWriter.Flush() } if err != nil { pipeWriter.CloseWithError(err) } else { pipeWriter.Close() } close(compressionDone) }() return pipeReader, compressionDone } docker-1.10.3/distribution/push_v1.go000066400000000000000000000317451267010174400175120ustar00rootroot00000000000000package distribution import ( "fmt" "sync" "github.com/Sirupsen/logrus" "github.com/docker/distribution/digest" "github.com/docker/distribution/registry/client/transport" "github.com/docker/docker/distribution/metadata" "github.com/docker/docker/image" "github.com/docker/docker/image/v1" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/reference" "github.com/docker/docker/registry" "golang.org/x/net/context" ) type v1Pusher struct { ctx context.Context v1IDService *metadata.V1IDService endpoint registry.APIEndpoint ref reference.Named repoInfo *registry.RepositoryInfo config *ImagePushConfig session *registry.Session } func (p *v1Pusher) Push(ctx context.Context) error { tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) if err != nil { return err } // Adds Docker-specific headers as well as user-specified headers (metaHeaders) tr := transport.NewTransport( // TODO(tiborvass): was NoTimeout registry.NewTransport(tlsConfig), registry.DockerHeaders(p.config.MetaHeaders)..., ) client := registry.HTTPClient(tr) v1Endpoint, err := p.endpoint.ToV1Endpoint(p.config.MetaHeaders) if err != nil { logrus.Debugf("Could not get v1 endpoint: %v", err) return fallbackError{err: err} } p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) if err != nil { // TODO(dmcgowan): Check if should fallback return fallbackError{err: err} } if err := p.pushRepository(ctx); err != nil { // TODO(dmcgowan): Check if should fallback return err } return nil } // v1Image exposes the configuration, filesystem layer ID, and a v1 ID for an // image being pushed to a v1 registry. type v1Image interface { Config() []byte Layer() layer.Layer V1ID() string } type v1ImageCommon struct { layer layer.Layer config []byte v1ID string } func (common *v1ImageCommon) Config() []byte { return common.config } func (common *v1ImageCommon) V1ID() string { return common.v1ID } func (common *v1ImageCommon) Layer() layer.Layer { return common.layer } // v1TopImage defines a runnable (top layer) image being pushed to a v1 // registry. type v1TopImage struct { v1ImageCommon imageID image.ID } func newV1TopImage(imageID image.ID, img *image.Image, l layer.Layer, parent *v1DependencyImage) (*v1TopImage, error) { v1ID := digest.Digest(imageID).Hex() parentV1ID := "" if parent != nil { parentV1ID = parent.V1ID() } config, err := v1.MakeV1ConfigFromConfig(img, v1ID, parentV1ID, false) if err != nil { return nil, err } return &v1TopImage{ v1ImageCommon: v1ImageCommon{ v1ID: v1ID, config: config, layer: l, }, imageID: imageID, }, nil } // v1DependencyImage defines a dependency layer being pushed to a v1 registry. type v1DependencyImage struct { v1ImageCommon } func newV1DependencyImage(l layer.Layer, parent *v1DependencyImage) (*v1DependencyImage, error) { v1ID := digest.Digest(l.ChainID()).Hex() config := "" if parent != nil { config = fmt.Sprintf(`{"id":"%s","parent":"%s"}`, v1ID, parent.V1ID()) } else { config = fmt.Sprintf(`{"id":"%s"}`, v1ID) } return &v1DependencyImage{ v1ImageCommon: v1ImageCommon{ v1ID: v1ID, config: []byte(config), layer: l, }, }, nil } // Retrieve the all the images to be uploaded in the correct order func (p *v1Pusher) getImageList() (imageList []v1Image, tagsByImage map[image.ID][]string, referencedLayers []layer.Layer, err error) { tagsByImage = make(map[image.ID][]string) // Ignore digest references if _, isCanonical := p.ref.(reference.Canonical); isCanonical { return } tagged, isTagged := p.ref.(reference.NamedTagged) if isTagged { // Push a specific tag var imgID image.ID imgID, err = p.config.ReferenceStore.Get(p.ref) if err != nil { return } imageList, err = p.imageListForTag(imgID, nil, &referencedLayers) if err != nil { return } tagsByImage[imgID] = []string{tagged.Tag()} return } imagesSeen := make(map[image.ID]struct{}) dependenciesSeen := make(map[layer.ChainID]*v1DependencyImage) associations := p.config.ReferenceStore.ReferencesByName(p.ref) for _, association := range associations { if tagged, isTagged = association.Ref.(reference.NamedTagged); !isTagged { // Ignore digest references. continue } tagsByImage[association.ImageID] = append(tagsByImage[association.ImageID], tagged.Tag()) if _, present := imagesSeen[association.ImageID]; present { // Skip generating image list for already-seen image continue } imagesSeen[association.ImageID] = struct{}{} imageListForThisTag, err := p.imageListForTag(association.ImageID, dependenciesSeen, &referencedLayers) if err != nil { return nil, nil, nil, err } // append to main image list imageList = append(imageList, imageListForThisTag...) } if len(imageList) == 0 { return nil, nil, nil, fmt.Errorf("No images found for the requested repository / tag") } logrus.Debugf("Image list: %v", imageList) logrus.Debugf("Tags by image: %v", tagsByImage) return } func (p *v1Pusher) imageListForTag(imgID image.ID, dependenciesSeen map[layer.ChainID]*v1DependencyImage, referencedLayers *[]layer.Layer) (imageListForThisTag []v1Image, err error) { img, err := p.config.ImageStore.Get(imgID) if err != nil { return nil, err } topLayerID := img.RootFS.ChainID() var l layer.Layer if topLayerID == "" { l = layer.EmptyLayer } else { l, err = p.config.LayerStore.Get(topLayerID) *referencedLayers = append(*referencedLayers, l) if err != nil { return nil, fmt.Errorf("failed to get top layer from image: %v", err) } } dependencyImages, parent, err := generateDependencyImages(l.Parent(), dependenciesSeen) if err != nil { return nil, err } topImage, err := newV1TopImage(imgID, img, l, parent) if err != nil { return nil, err } imageListForThisTag = append(dependencyImages, topImage) return } func generateDependencyImages(l layer.Layer, dependenciesSeen map[layer.ChainID]*v1DependencyImage) (imageListForThisTag []v1Image, parent *v1DependencyImage, err error) { if l == nil { return nil, nil, nil } imageListForThisTag, parent, err = generateDependencyImages(l.Parent(), dependenciesSeen) if dependenciesSeen != nil { if dependencyImage, present := dependenciesSeen[l.ChainID()]; present { // This layer is already on the list, we can ignore it // and all its parents. return imageListForThisTag, dependencyImage, nil } } dependencyImage, err := newV1DependencyImage(l, parent) if err != nil { return nil, nil, err } imageListForThisTag = append(imageListForThisTag, dependencyImage) if dependenciesSeen != nil { dependenciesSeen[l.ChainID()] = dependencyImage } return imageListForThisTag, dependencyImage, nil } // createImageIndex returns an index of an image's layer IDs and tags. func createImageIndex(images []v1Image, tags map[image.ID][]string) []*registry.ImgData { var imageIndex []*registry.ImgData for _, img := range images { v1ID := img.V1ID() if topImage, isTopImage := img.(*v1TopImage); isTopImage { if tags, hasTags := tags[topImage.imageID]; hasTags { // If an image has tags you must add an entry in the image index // for each tag for _, tag := range tags { imageIndex = append(imageIndex, ®istry.ImgData{ ID: v1ID, Tag: tag, }) } continue } } // If the image does not have a tag it still needs to be sent to the // registry with an empty tag so that it is associated with the repository imageIndex = append(imageIndex, ®istry.ImgData{ ID: v1ID, Tag: "", }) } return imageIndex } // lookupImageOnEndpoint checks the specified endpoint to see if an image exists // and if it is absent then it sends the image id to the channel to be pushed. func (p *v1Pusher) lookupImageOnEndpoint(wg *sync.WaitGroup, endpoint string, images chan v1Image, imagesToPush chan string) { defer wg.Done() for image := range images { v1ID := image.V1ID() truncID := stringid.TruncateID(image.Layer().DiffID().String()) if err := p.session.LookupRemoteImage(v1ID, endpoint); err != nil { logrus.Errorf("Error in LookupRemoteImage: %s", err) imagesToPush <- v1ID progress.Update(p.config.ProgressOutput, truncID, "Waiting") } else { progress.Update(p.config.ProgressOutput, truncID, "Already exists") } } } func (p *v1Pusher) pushImageToEndpoint(ctx context.Context, endpoint string, imageList []v1Image, tags map[image.ID][]string, repo *registry.RepositoryData) error { workerCount := len(imageList) // start a maximum of 5 workers to check if images exist on the specified endpoint. if workerCount > 5 { workerCount = 5 } var ( wg = &sync.WaitGroup{} imageData = make(chan v1Image, workerCount*2) imagesToPush = make(chan string, workerCount*2) pushes = make(chan map[string]struct{}, 1) ) for i := 0; i < workerCount; i++ { wg.Add(1) go p.lookupImageOnEndpoint(wg, endpoint, imageData, imagesToPush) } // start a go routine that consumes the images to push go func() { shouldPush := make(map[string]struct{}) for id := range imagesToPush { shouldPush[id] = struct{}{} } pushes <- shouldPush }() for _, v1Image := range imageList { imageData <- v1Image } // close the channel to notify the workers that there will be no more images to check. close(imageData) wg.Wait() close(imagesToPush) // wait for all the images that require pushes to be collected into a consumable map. shouldPush := <-pushes // finish by pushing any images and tags to the endpoint. The order that the images are pushed // is very important that is why we are still iterating over the ordered list of imageIDs. for _, img := range imageList { v1ID := img.V1ID() if _, push := shouldPush[v1ID]; push { if _, err := p.pushImage(ctx, img, endpoint); err != nil { // FIXME: Continue on error? return err } } if topImage, isTopImage := img.(*v1TopImage); isTopImage { for _, tag := range tags[topImage.imageID] { progress.Messagef(p.config.ProgressOutput, "", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(v1ID), endpoint+"repositories/"+p.repoInfo.RemoteName()+"/tags/"+tag) if err := p.session.PushRegistryTag(p.repoInfo, v1ID, tag, endpoint); err != nil { return err } } } } return nil } // pushRepository pushes layers that do not already exist on the registry. func (p *v1Pusher) pushRepository(ctx context.Context) error { imgList, tags, referencedLayers, err := p.getImageList() defer func() { for _, l := range referencedLayers { p.config.LayerStore.Release(l) } }() if err != nil { return err } imageIndex := createImageIndex(imgList, tags) for _, data := range imageIndex { logrus.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) } // Register all the images in a repository with the registry // If an image is not in this list it will not be associated with the repository repoData, err := p.session.PushImageJSONIndex(p.repoInfo, imageIndex, false, nil) if err != nil { return err } // push the repository to each of the endpoints only if it does not exist. for _, endpoint := range repoData.Endpoints { if err := p.pushImageToEndpoint(ctx, endpoint, imgList, tags, repoData); err != nil { return err } } _, err = p.session.PushImageJSONIndex(p.repoInfo, imageIndex, true, repoData.Endpoints) return err } func (p *v1Pusher) pushImage(ctx context.Context, v1Image v1Image, ep string) (checksum string, err error) { l := v1Image.Layer() v1ID := v1Image.V1ID() truncID := stringid.TruncateID(l.DiffID().String()) jsonRaw := v1Image.Config() progress.Update(p.config.ProgressOutput, truncID, "Pushing") // General rule is to use ID for graph accesses and compatibilityID for // calls to session.registry() imgData := ®istry.ImgData{ ID: v1ID, } // Send the json if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil { if err == registry.ErrAlreadyExists { progress.Update(p.config.ProgressOutput, truncID, "Image already pushed, skipping") return "", nil } return "", err } arch, err := l.TarStream() if err != nil { return "", err } defer arch.Close() // don't care if this fails; best effort size, _ := l.DiffSize() // Send the layer logrus.Debugf("rendered layer for %s of [%d] size", v1ID, size) reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), p.config.ProgressOutput, size, truncID, "Pushing") defer reader.Close() checksum, checksumPayload, err := p.session.PushImageLayerRegistry(v1ID, reader, ep, jsonRaw) if err != nil { return "", err } imgData.Checksum = checksum imgData.ChecksumPayload = checksumPayload // Send the checksum if err := p.session.PushImageChecksumRegistry(imgData, ep); err != nil { return "", err } if err := p.v1IDService.Set(v1ID, p.repoInfo.Index.Name, l.DiffID()); err != nil { logrus.Warnf("Could not set v1 ID mapping: %v", err) } progress.Update(p.config.ProgressOutput, truncID, "Image successfully pushed") return imgData.Checksum, nil } docker-1.10.3/distribution/push_v2.go000066400000000000000000000321431267010174400175040ustar00rootroot00000000000000package distribution import ( "errors" "fmt" "io" "sync" "github.com/Sirupsen/logrus" "github.com/docker/distribution" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" distreference "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/client" "github.com/docker/docker/distribution/metadata" "github.com/docker/docker/distribution/xfer" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/reference" "github.com/docker/docker/registry" "golang.org/x/net/context" ) // PushResult contains the tag, manifest digest, and manifest size from the // push. It's used to signal this information to the trust code in the client // so it can sign the manifest if necessary. type PushResult struct { Tag string Digest digest.Digest Size int } type v2Pusher struct { v2MetadataService *metadata.V2MetadataService ref reference.Named endpoint registry.APIEndpoint repoInfo *registry.RepositoryInfo config *ImagePushConfig repo distribution.Repository // pushState is state built by the Upload functions. pushState pushState } type pushState struct { sync.Mutex // remoteLayers is the set of layers known to exist on the remote side. // This avoids redundant queries when pushing multiple tags that // involve the same layers. It is also used to fill in digest and size // information when building the manifest. remoteLayers map[layer.DiffID]distribution.Descriptor // confirmedV2 is set to true if we confirm we're talking to a v2 // registry. This is used to limit fallbacks to the v1 protocol. confirmedV2 bool } func (p *v2Pusher) Push(ctx context.Context) (err error) { p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor) p.repo, p.pushState.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull") if err != nil { logrus.Debugf("Error getting v2 registry: %v", err) return fallbackError{err: err, confirmedV2: p.pushState.confirmedV2} } if err = p.pushV2Repository(ctx); err != nil { if registry.ContinueOnError(err) { return fallbackError{err: err, confirmedV2: p.pushState.confirmedV2} } } return err } func (p *v2Pusher) pushV2Repository(ctx context.Context) (err error) { if namedTagged, isNamedTagged := p.ref.(reference.NamedTagged); isNamedTagged { imageID, err := p.config.ReferenceStore.Get(p.ref) if err != nil { return fmt.Errorf("tag does not exist: %s", p.ref.String()) } return p.pushV2Tag(ctx, namedTagged, imageID) } if !reference.IsNameOnly(p.ref) { return errors.New("cannot push a digest reference") } // Pull all tags pushed := 0 for _, association := range p.config.ReferenceStore.ReferencesByName(p.ref) { if namedTagged, isNamedTagged := association.Ref.(reference.NamedTagged); isNamedTagged { pushed++ if err := p.pushV2Tag(ctx, namedTagged, association.ImageID); err != nil { return err } } } if pushed == 0 { return fmt.Errorf("no tags to push for %s", p.repoInfo.Name()) } return nil } func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, imageID image.ID) error { logrus.Debugf("Pushing repository: %s", ref.String()) img, err := p.config.ImageStore.Get(imageID) if err != nil { return fmt.Errorf("could not find image from tag %s: %v", ref.String(), err) } var l layer.Layer topLayerID := img.RootFS.ChainID() if topLayerID == "" { l = layer.EmptyLayer } else { l, err = p.config.LayerStore.Get(topLayerID) if err != nil { return fmt.Errorf("failed to get top layer from image: %v", err) } defer layer.ReleaseAndLog(p.config.LayerStore, l) } var descriptors []xfer.UploadDescriptor descriptorTemplate := v2PushDescriptor{ v2MetadataService: p.v2MetadataService, repoInfo: p.repoInfo, repo: p.repo, pushState: &p.pushState, } // Loop bounds condition is to avoid pushing the base layer on Windows. for i := 0; i < len(img.RootFS.DiffIDs); i++ { descriptor := descriptorTemplate descriptor.layer = l descriptors = append(descriptors, &descriptor) l = l.Parent() } if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil { return err } // Try schema2 first builder := schema2.NewManifestBuilder(p.repo.Blobs(ctx), img.RawJSON()) manifest, err := manifestFromBuilder(ctx, builder, descriptors) if err != nil { return err } manSvc, err := p.repo.Manifests(ctx) if err != nil { return err } putOptions := []distribution.ManifestServiceOption{client.WithTag(ref.Tag())} if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err) builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, p.repo.Name(), ref.Tag(), img.RawJSON()) manifest, err = manifestFromBuilder(ctx, builder, descriptors) if err != nil { return err } if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { return err } } var canonicalManifest []byte switch v := manifest.(type) { case *schema1.SignedManifest: canonicalManifest = v.Canonical case *schema2.DeserializedManifest: _, canonicalManifest, err = v.Payload() if err != nil { return err } } manifestDigest := digest.FromBytes(canonicalManifest) progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", ref.Tag(), manifestDigest, len(canonicalManifest)) // Signal digest to the trust client so it can sign the // push, if appropriate. progress.Aux(p.config.ProgressOutput, PushResult{Tag: ref.Tag(), Digest: manifestDigest, Size: len(canonicalManifest)}) return nil } func manifestFromBuilder(ctx context.Context, builder distribution.ManifestBuilder, descriptors []xfer.UploadDescriptor) (distribution.Manifest, error) { // descriptors is in reverse order; iterate backwards to get references // appended in the right order. for i := len(descriptors) - 1; i >= 0; i-- { if err := builder.AppendReference(descriptors[i].(*v2PushDescriptor)); err != nil { return nil, err } } return builder.Build(ctx) } type v2PushDescriptor struct { layer layer.Layer v2MetadataService *metadata.V2MetadataService repoInfo reference.Named repo distribution.Repository pushState *pushState remoteDescriptor distribution.Descriptor } func (pd *v2PushDescriptor) Key() string { return "v2push:" + pd.repo.Name() + " " + pd.layer.DiffID().String() } func (pd *v2PushDescriptor) ID() string { return stringid.TruncateID(pd.layer.DiffID().String()) } func (pd *v2PushDescriptor) DiffID() layer.DiffID { return pd.layer.DiffID() } func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { diffID := pd.DiffID() pd.pushState.Lock() if descriptor, ok := pd.pushState.remoteLayers[diffID]; ok { // it is already known that the push is not needed and // therefore doing a stat is unnecessary pd.pushState.Unlock() progress.Update(progressOutput, pd.ID(), "Layer already exists") return descriptor, nil } pd.pushState.Unlock() // Do we have any metadata associated with this layer's DiffID? v2Metadata, err := pd.v2MetadataService.GetMetadata(diffID) if err == nil { descriptor, exists, err := layerAlreadyExists(ctx, v2Metadata, pd.repoInfo, pd.repo, pd.pushState) if err != nil { progress.Update(progressOutput, pd.ID(), "Image push failed") return distribution.Descriptor{}, retryOnError(err) } if exists { progress.Update(progressOutput, pd.ID(), "Layer already exists") pd.pushState.Lock() pd.pushState.remoteLayers[diffID] = descriptor pd.pushState.Unlock() return descriptor, nil } } logrus.Debugf("Pushing layer: %s", diffID) // if digest was empty or not saved, or if blob does not exist on the remote repository, // then push the blob. bs := pd.repo.Blobs(ctx) var mountFrom metadata.V2Metadata // Attempt to find another repository in the same registry to mount the layer from to avoid an unnecessary upload for _, metadata := range v2Metadata { sourceRepo, err := reference.ParseNamed(metadata.SourceRepository) if err != nil { continue } if pd.repoInfo.Hostname() == sourceRepo.Hostname() { logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, metadata.Digest, sourceRepo.FullName()) mountFrom = metadata break } } var createOpts []distribution.BlobCreateOption if mountFrom.SourceRepository != "" { namedRef, err := reference.WithName(mountFrom.SourceRepository) if err != nil { return distribution.Descriptor{}, err } // TODO (brianbland): We need to construct a reference where the Name is // only the full remote name, so clean this up when distribution has a // richer reference package remoteRef, err := distreference.WithName(namedRef.RemoteName()) if err != nil { return distribution.Descriptor{}, err } canonicalRef, err := distreference.WithDigest(remoteRef, mountFrom.Digest) if err != nil { return distribution.Descriptor{}, err } createOpts = append(createOpts, client.WithMountFrom(canonicalRef)) } // Send the layer layerUpload, err := bs.Create(ctx, createOpts...) switch err := err.(type) { case distribution.ErrBlobMounted: progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name()) err.Descriptor.MediaType = schema2.MediaTypeLayer pd.pushState.Lock() pd.pushState.confirmedV2 = true pd.pushState.remoteLayers[diffID] = err.Descriptor pd.pushState.Unlock() // Cache mapping from this layer's DiffID to the blobsum if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: mountFrom.Digest, SourceRepository: pd.repoInfo.FullName()}); err != nil { return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} } return err.Descriptor, nil } if mountFrom.SourceRepository != "" { // unable to mount layer from this repository, so this source mapping is no longer valid logrus.Debugf("unassociating layer %s (%s) with %s", diffID, mountFrom.Digest, mountFrom.SourceRepository) pd.v2MetadataService.Remove(mountFrom) } if err != nil { return distribution.Descriptor{}, retryOnError(err) } defer layerUpload.Close() arch, err := pd.layer.TarStream() if err != nil { return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} } // don't care if this fails; best effort size, _ := pd.layer.DiffSize() reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), progressOutput, size, pd.ID(), "Pushing") compressedReader, compressionDone := compress(reader) defer func() { reader.Close() <-compressionDone }() digester := digest.Canonical.New() tee := io.TeeReader(compressedReader, digester.Hash()) nn, err := layerUpload.ReadFrom(tee) compressedReader.Close() if err != nil { return distribution.Descriptor{}, retryOnError(err) } pushDigest := digester.Digest() if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil { return distribution.Descriptor{}, retryOnError(err) } logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn) progress.Update(progressOutput, pd.ID(), "Pushed") // Cache mapping from this layer's DiffID to the blobsum if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: pushDigest, SourceRepository: pd.repoInfo.FullName()}); err != nil { return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} } pd.pushState.Lock() // If Commit succeded, that's an indication that the remote registry // speaks the v2 protocol. pd.pushState.confirmedV2 = true descriptor := distribution.Descriptor{ Digest: pushDigest, MediaType: schema2.MediaTypeLayer, Size: nn, } pd.pushState.remoteLayers[diffID] = descriptor pd.pushState.Unlock() return descriptor, nil } func (pd *v2PushDescriptor) SetRemoteDescriptor(descriptor distribution.Descriptor) { pd.remoteDescriptor = descriptor } func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor { return pd.remoteDescriptor } // layerAlreadyExists checks if the registry already know about any of the // metadata passed in the "metadata" slice. If it finds one that the registry // knows about, it returns the known digest and "true". func layerAlreadyExists(ctx context.Context, metadata []metadata.V2Metadata, repoInfo reference.Named, repo distribution.Repository, pushState *pushState) (distribution.Descriptor, bool, error) { for _, meta := range metadata { // Only check blobsums that are known to this repository or have an unknown source if meta.SourceRepository != "" && meta.SourceRepository != repoInfo.FullName() { continue } descriptor, err := repo.Blobs(ctx).Stat(ctx, meta.Digest) switch err { case nil: descriptor.MediaType = schema2.MediaTypeLayer return descriptor, true, nil case distribution.ErrBlobUnknown: // nop default: return distribution.Descriptor{}, false, err } } return distribution.Descriptor{}, false, nil } docker-1.10.3/distribution/registry.go000066400000000000000000000116561267010174400177740ustar00rootroot00000000000000package distribution import ( "fmt" "net" "net/http" "net/url" "strings" "syscall" "time" "github.com/docker/distribution" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/client/auth" "github.com/docker/distribution/registry/client/transport" "github.com/docker/docker/distribution/xfer" "github.com/docker/docker/registry" "github.com/docker/engine-api/types" "golang.org/x/net/context" ) // fallbackError wraps an error that can possibly allow fallback to a different // endpoint. type fallbackError struct { // err is the error being wrapped. err error // confirmedV2 is set to true if it was confirmed that the registry // supports the v2 protocol. This is used to limit fallbacks to the v1 // protocol. confirmedV2 bool } // Error renders the FallbackError as a string. func (f fallbackError) Error() string { return f.err.Error() } type dumbCredentialStore struct { auth *types.AuthConfig } func (dcs dumbCredentialStore) Basic(*url.URL) (string, string) { return dcs.auth.Username, dcs.auth.Password } // NewV2Repository returns a repository (v2 only). It creates a HTTP transport // providing timeout settings and authentication support, and also verifies the // remote API version. func NewV2Repository(ctx context.Context, repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, metaHeaders http.Header, authConfig *types.AuthConfig, actions ...string) (repo distribution.Repository, foundVersion bool, err error) { repoName := repoInfo.FullName() // If endpoint does not support CanonicalName, use the RemoteName instead if endpoint.TrimHostname { repoName = repoInfo.RemoteName() } // TODO(dmcgowan): Call close idle connections when complete, use keep alive base := &http.Transport{ Proxy: http.ProxyFromEnvironment, Dial: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, DualStack: true, }).Dial, TLSHandshakeTimeout: 10 * time.Second, TLSClientConfig: endpoint.TLSConfig, // TODO(dmcgowan): Call close idle connections when complete and use keep alive DisableKeepAlives: true, } modifiers := registry.DockerHeaders(metaHeaders) authTransport := transport.NewTransport(base, modifiers...) pingClient := &http.Client{ Transport: authTransport, Timeout: 15 * time.Second, } endpointStr := strings.TrimRight(endpoint.URL, "/") + "/v2/" req, err := http.NewRequest("GET", endpointStr, nil) if err != nil { return nil, false, err } resp, err := pingClient.Do(req) if err != nil { return nil, false, err } defer resp.Body.Close() v2Version := auth.APIVersion{ Type: "registry", Version: "2.0", } versions := auth.APIVersions(resp, registry.DefaultRegistryVersionHeader) for _, pingVersion := range versions { if pingVersion == v2Version { // The version header indicates we're definitely // talking to a v2 registry. So don't allow future // fallbacks to the v1 protocol. foundVersion = true break } } challengeManager := auth.NewSimpleChallengeManager() if err := challengeManager.AddResponse(resp); err != nil { return nil, foundVersion, err } if authConfig.RegistryToken != "" { passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken} modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler)) } else { creds := dumbCredentialStore{auth: authConfig} tokenHandler := auth.NewTokenHandler(authTransport, creds, repoName, actions...) basicHandler := auth.NewBasicHandler(creds) modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) } tr := transport.NewTransport(base, modifiers...) repo, err = client.NewRepository(ctx, repoName, endpoint.URL, tr) return repo, foundVersion, err } type existingTokenHandler struct { token string } func (th *existingTokenHandler) Scheme() string { return "bearer" } func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.token)) return nil } // retryOnError wraps the error in xfer.DoNotRetry if we should not retry the // operation after this error. func retryOnError(err error) error { switch v := err.(type) { case errcode.Errors: if len(v) != 0 { return retryOnError(v[0]) } case errcode.Error: switch v.Code { case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeUnsupported, errcode.ErrorCodeDenied: return xfer.DoNotRetry{Err: err} } case *url.Error: return retryOnError(v.Err) case *client.UnexpectedHTTPResponseError: return xfer.DoNotRetry{Err: err} case error: if strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) { return xfer.DoNotRetry{Err: err} } } // let's be nice and fallback if the error is a completely // unexpected one. // If new errors have to be handled in some way, please // add them to the switch above. return err } docker-1.10.3/distribution/registry_unit_test.go000066400000000000000000000041261267010174400220640ustar00rootroot00000000000000package distribution import ( "net/http" "net/http/httptest" "os" "strings" "testing" "github.com/Sirupsen/logrus" "github.com/docker/docker/reference" "github.com/docker/docker/registry" "github.com/docker/docker/utils" "github.com/docker/engine-api/types" registrytypes "github.com/docker/engine-api/types/registry" "golang.org/x/net/context" ) func TestTokenPassThru(t *testing.T) { authConfig := &types.AuthConfig{ RegistryToken: "mysecrettoken", } gotToken := false handler := func(w http.ResponseWriter, r *http.Request) { if strings.Contains(r.Header.Get("Authorization"), authConfig.RegistryToken) { logrus.Debug("Detected registry token in auth header") gotToken = true } if r.RequestURI == "/v2/" { w.Header().Set("WWW-Authenticate", `Bearer realm="foorealm"`) w.WriteHeader(401) } } ts := httptest.NewServer(http.HandlerFunc(handler)) defer ts.Close() tmp, err := utils.TestDirectory("") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) endpoint := registry.APIEndpoint{ Mirror: false, URL: ts.URL, Version: 2, Official: false, TrimHostname: false, TLSConfig: nil, //VersionHeader: "verheader", } n, _ := reference.ParseNamed("testremotename") repoInfo := ®istry.RepositoryInfo{ Named: n, Index: ®istrytypes.IndexInfo{ Name: "testrepo", Mirrors: nil, Secure: false, Official: false, }, Official: false, } imagePullConfig := &ImagePullConfig{ MetaHeaders: http.Header{}, AuthConfig: authConfig, } puller, err := newPuller(endpoint, repoInfo, imagePullConfig) if err != nil { t.Fatal(err) } p := puller.(*v2Puller) ctx := context.Background() p.repo, _, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") if err != nil { t.Fatal(err) } logrus.Debug("About to pull") // We expect it to fail, since we haven't mock'd the full registry exchange in our handler above tag, _ := reference.WithTag(n, "tag_goes_here") _ = p.pullV2Repository(ctx, tag) if !gotToken { t.Fatal("Failed to receive registry token") } } docker-1.10.3/distribution/xfer/000077500000000000000000000000001267010174400165305ustar00rootroot00000000000000docker-1.10.3/distribution/xfer/download.go000066400000000000000000000274331267010174400206770ustar00rootroot00000000000000package xfer import ( "errors" "fmt" "io" "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/progress" "golang.org/x/net/context" ) const maxDownloadAttempts = 5 // LayerDownloadManager figures out which layers need to be downloaded, then // registers and downloads those, taking into account dependencies between // layers. type LayerDownloadManager struct { layerStore layer.Store tm TransferManager } // NewLayerDownloadManager returns a new LayerDownloadManager. func NewLayerDownloadManager(layerStore layer.Store, concurrencyLimit int) *LayerDownloadManager { return &LayerDownloadManager{ layerStore: layerStore, tm: NewTransferManager(concurrencyLimit), } } type downloadTransfer struct { Transfer layerStore layer.Store layer layer.Layer err error } // result returns the layer resulting from the download, if the download // and registration were successful. func (d *downloadTransfer) result() (layer.Layer, error) { return d.layer, d.err } // A DownloadDescriptor references a layer that may need to be downloaded. type DownloadDescriptor interface { // Key returns the key used to deduplicate downloads. Key() string // ID returns the ID for display purposes. ID() string // DiffID should return the DiffID for this layer, or an error // if it is unknown (for example, if it has not been downloaded // before). DiffID() (layer.DiffID, error) // Download is called to perform the download. Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) } // DownloadDescriptorWithRegistered is a DownloadDescriptor that has an // additional Registered method which gets called after a downloaded layer is // registered. This allows the user of the download manager to know the DiffID // of each registered layer. This method is called if a cast to // DownloadDescriptorWithRegistered is successful. type DownloadDescriptorWithRegistered interface { DownloadDescriptor Registered(diffID layer.DiffID) } // Download is a blocking function which ensures the requested layers are // present in the layer store. It uses the string returned by the Key method to // deduplicate downloads. If a given layer is not already known to present in // the layer store, and the key is not used by an in-progress download, the // Download method is called to get the layer tar data. Layers are then // registered in the appropriate order. The caller must call the returned // release function once it is is done with the returned RootFS object. func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { var ( topLayer layer.Layer topDownload *downloadTransfer watcher *Watcher missingLayer bool transferKey = "" downloadsByKey = make(map[string]*downloadTransfer) ) rootFS := initialRootFS for _, descriptor := range layers { key := descriptor.Key() transferKey += key if !missingLayer { missingLayer = true diffID, err := descriptor.DiffID() if err == nil { getRootFS := rootFS getRootFS.Append(diffID) l, err := ldm.layerStore.Get(getRootFS.ChainID()) if err == nil { // Layer already exists. logrus.Debugf("Layer already exists: %s", descriptor.ID()) progress.Update(progressOutput, descriptor.ID(), "Already exists") if topLayer != nil { layer.ReleaseAndLog(ldm.layerStore, topLayer) } topLayer = l missingLayer = false rootFS.Append(diffID) continue } } } // Does this layer have the same data as a previous layer in // the stack? If so, avoid downloading it more than once. var topDownloadUncasted Transfer if existingDownload, ok := downloadsByKey[key]; ok { xferFunc := ldm.makeDownloadFuncFromDownload(descriptor, existingDownload, topDownload) defer topDownload.Transfer.Release(watcher) topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) topDownload = topDownloadUncasted.(*downloadTransfer) continue } // Layer is not known to exist - download and register it. progress.Update(progressOutput, descriptor.ID(), "Pulling fs layer") var xferFunc DoFunc if topDownload != nil { xferFunc = ldm.makeDownloadFunc(descriptor, "", topDownload) defer topDownload.Transfer.Release(watcher) } else { xferFunc = ldm.makeDownloadFunc(descriptor, rootFS.ChainID(), nil) } topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) topDownload = topDownloadUncasted.(*downloadTransfer) downloadsByKey[key] = topDownload } if topDownload == nil { return rootFS, func() { layer.ReleaseAndLog(ldm.layerStore, topLayer) }, nil } // Won't be using the list built up so far - will generate it // from downloaded layers instead. rootFS.DiffIDs = []layer.DiffID{} defer func() { if topLayer != nil { layer.ReleaseAndLog(ldm.layerStore, topLayer) } }() select { case <-ctx.Done(): topDownload.Transfer.Release(watcher) return rootFS, func() {}, ctx.Err() case <-topDownload.Done(): break } l, err := topDownload.result() if err != nil { topDownload.Transfer.Release(watcher) return rootFS, func() {}, err } // Must do this exactly len(layers) times, so we don't include the // base layer on Windows. for range layers { if l == nil { topDownload.Transfer.Release(watcher) return rootFS, func() {}, errors.New("internal error: too few parent layers") } rootFS.DiffIDs = append([]layer.DiffID{l.DiffID()}, rootFS.DiffIDs...) l = l.Parent() } return rootFS, func() { topDownload.Transfer.Release(watcher) }, err } // makeDownloadFunc returns a function that performs the layer download and // registration. If parentDownload is non-nil, it waits for that download to // complete before the registration step, and registers the downloaded data // on top of parentDownload's resulting layer. Otherwise, it registers the // layer on top of the ChainID given by parentLayer. func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer) DoFunc { return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { d := &downloadTransfer{ Transfer: NewTransfer(), layerStore: ldm.layerStore, } go func() { defer func() { close(progressChan) }() progressOutput := progress.ChanOutput(progressChan) select { case <-start: default: progress.Update(progressOutput, descriptor.ID(), "Waiting") <-start } if parentDownload != nil { // Did the parent download already fail or get // cancelled? select { case <-parentDownload.Done(): _, err := parentDownload.result() if err != nil { d.err = err return } default: } } var ( downloadReader io.ReadCloser size int64 err error retries int ) for { downloadReader, size, err = descriptor.Download(d.Transfer.Context(), progressOutput) if err == nil { break } // If an error was returned because the context // was cancelled, we shouldn't retry. select { case <-d.Transfer.Context().Done(): d.err = err return default: } retries++ if _, isDNR := err.(DoNotRetry); isDNR || retries == maxDownloadAttempts { logrus.Errorf("Download failed: %v", err) d.err = err return } logrus.Errorf("Download failed, retrying: %v", err) delay := retries * 5 ticker := time.NewTicker(time.Second) selectLoop: for { progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d seconds", delay) select { case <-ticker.C: delay-- if delay == 0 { ticker.Stop() break selectLoop } case <-d.Transfer.Context().Done(): ticker.Stop() d.err = errors.New("download cancelled during retry delay") return } } } close(inactive) if parentDownload != nil { select { case <-d.Transfer.Context().Done(): d.err = errors.New("layer registration cancelled") downloadReader.Close() return case <-parentDownload.Done(): } l, err := parentDownload.result() if err != nil { d.err = err downloadReader.Close() return } parentLayer = l.ChainID() } reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(d.Transfer.Context(), downloadReader), progressOutput, size, descriptor.ID(), "Extracting") defer reader.Close() inflatedLayerData, err := archive.DecompressStream(reader) if err != nil { d.err = fmt.Errorf("could not get decompression stream: %v", err) return } d.layer, err = d.layerStore.Register(inflatedLayerData, parentLayer) if err != nil { select { case <-d.Transfer.Context().Done(): d.err = errors.New("layer registration cancelled") default: d.err = fmt.Errorf("failed to register layer: %v", err) } return } progress.Update(progressOutput, descriptor.ID(), "Pull complete") withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) if hasRegistered { withRegistered.Registered(d.layer.DiffID()) } // Doesn't actually need to be its own goroutine, but // done like this so we can defer close(c). go func() { <-d.Transfer.Released() if d.layer != nil { layer.ReleaseAndLog(d.layerStore, d.layer) } }() }() return d } } // makeDownloadFuncFromDownload returns a function that performs the layer // registration when the layer data is coming from an existing download. It // waits for sourceDownload and parentDownload to complete, and then // reregisters the data from sourceDownload's top layer on top of // parentDownload. This function does not log progress output because it would // interfere with the progress reporting for sourceDownload, which has the same // Key. func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor DownloadDescriptor, sourceDownload *downloadTransfer, parentDownload *downloadTransfer) DoFunc { return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { d := &downloadTransfer{ Transfer: NewTransfer(), layerStore: ldm.layerStore, } go func() { defer func() { close(progressChan) }() <-start close(inactive) select { case <-d.Transfer.Context().Done(): d.err = errors.New("layer registration cancelled") return case <-parentDownload.Done(): } l, err := parentDownload.result() if err != nil { d.err = err return } parentLayer := l.ChainID() // sourceDownload should have already finished if // parentDownload finished, but wait for it explicitly // to be sure. select { case <-d.Transfer.Context().Done(): d.err = errors.New("layer registration cancelled") return case <-sourceDownload.Done(): } l, err = sourceDownload.result() if err != nil { d.err = err return } layerReader, err := l.TarStream() if err != nil { d.err = err return } defer layerReader.Close() d.layer, err = d.layerStore.Register(layerReader, parentLayer) if err != nil { d.err = fmt.Errorf("failed to register layer: %v", err) return } withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) if hasRegistered { withRegistered.Registered(d.layer.DiffID()) } // Doesn't actually need to be its own goroutine, but // done like this so we can defer close(c). go func() { <-d.Transfer.Released() if d.layer != nil { layer.ReleaseAndLog(d.layerStore, d.layer) } }() }() return d } } docker-1.10.3/distribution/xfer/download_test.go000066400000000000000000000211571267010174400217330ustar00rootroot00000000000000package xfer import ( "bytes" "errors" "io" "io/ioutil" "sync/atomic" "testing" "time" "github.com/docker/distribution/digest" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/progress" "golang.org/x/net/context" ) const maxDownloadConcurrency = 3 type mockLayer struct { layerData bytes.Buffer diffID layer.DiffID chainID layer.ChainID parent layer.Layer } func (ml *mockLayer) TarStream() (io.ReadCloser, error) { return ioutil.NopCloser(bytes.NewBuffer(ml.layerData.Bytes())), nil } func (ml *mockLayer) ChainID() layer.ChainID { return ml.chainID } func (ml *mockLayer) DiffID() layer.DiffID { return ml.diffID } func (ml *mockLayer) Parent() layer.Layer { return ml.parent } func (ml *mockLayer) Size() (size int64, err error) { return 0, nil } func (ml *mockLayer) DiffSize() (size int64, err error) { return 0, nil } func (ml *mockLayer) Metadata() (map[string]string, error) { return make(map[string]string), nil } type mockLayerStore struct { layers map[layer.ChainID]*mockLayer } func createChainIDFromParent(parent layer.ChainID, dgsts ...layer.DiffID) layer.ChainID { if len(dgsts) == 0 { return parent } if parent == "" { return createChainIDFromParent(layer.ChainID(dgsts[0]), dgsts[1:]...) } // H = "H(n-1) SHA256(n)" dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) return createChainIDFromParent(layer.ChainID(dgst), dgsts[1:]...) } func (ls *mockLayerStore) Register(reader io.Reader, parentID layer.ChainID) (layer.Layer, error) { var ( parent layer.Layer err error ) if parentID != "" { parent, err = ls.Get(parentID) if err != nil { return nil, err } } l := &mockLayer{parent: parent} _, err = l.layerData.ReadFrom(reader) if err != nil { return nil, err } l.diffID = layer.DiffID(digest.FromBytes(l.layerData.Bytes())) l.chainID = createChainIDFromParent(parentID, l.diffID) ls.layers[l.chainID] = l return l, nil } func (ls *mockLayerStore) Get(chainID layer.ChainID) (layer.Layer, error) { l, ok := ls.layers[chainID] if !ok { return nil, layer.ErrLayerDoesNotExist } return l, nil } func (ls *mockLayerStore) Release(l layer.Layer) ([]layer.Metadata, error) { return []layer.Metadata{}, nil } func (ls *mockLayerStore) CreateRWLayer(string, layer.ChainID, string, layer.MountInit) (layer.RWLayer, error) { return nil, errors.New("not implemented") } func (ls *mockLayerStore) GetRWLayer(string) (layer.RWLayer, error) { return nil, errors.New("not implemented") } func (ls *mockLayerStore) ReleaseRWLayer(layer.RWLayer) ([]layer.Metadata, error) { return nil, errors.New("not implemented") } func (ls *mockLayerStore) Cleanup() error { return nil } func (ls *mockLayerStore) DriverStatus() [][2]string { return [][2]string{} } func (ls *mockLayerStore) DriverName() string { return "mock" } type mockDownloadDescriptor struct { currentDownloads *int32 id string diffID layer.DiffID registeredDiffID layer.DiffID expectedDiffID layer.DiffID simulateRetries int } // Key returns the key used to deduplicate downloads. func (d *mockDownloadDescriptor) Key() string { return d.id } // ID returns the ID for display purposes. func (d *mockDownloadDescriptor) ID() string { return d.id } // DiffID should return the DiffID for this layer, or an error // if it is unknown (for example, if it has not been downloaded // before). func (d *mockDownloadDescriptor) DiffID() (layer.DiffID, error) { if d.diffID != "" { return d.diffID, nil } return "", errors.New("no diffID available") } func (d *mockDownloadDescriptor) Registered(diffID layer.DiffID) { d.registeredDiffID = diffID } func (d *mockDownloadDescriptor) mockTarStream() io.ReadCloser { // The mock implementation returns the ID repeated 5 times as a tar // stream instead of actual tar data. The data is ignored except for // computing IDs. return ioutil.NopCloser(bytes.NewBuffer([]byte(d.id + d.id + d.id + d.id + d.id))) } // Download is called to perform the download. func (d *mockDownloadDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { if d.currentDownloads != nil { defer atomic.AddInt32(d.currentDownloads, -1) if atomic.AddInt32(d.currentDownloads, 1) > maxDownloadConcurrency { return nil, 0, errors.New("concurrency limit exceeded") } } // Sleep a bit to simulate a time-consuming download. for i := int64(0); i <= 10; i++ { select { case <-ctx.Done(): return nil, 0, ctx.Err() case <-time.After(10 * time.Millisecond): progressOutput.WriteProgress(progress.Progress{ID: d.ID(), Action: "Downloading", Current: i, Total: 10}) } } if d.simulateRetries != 0 { d.simulateRetries-- return nil, 0, errors.New("simulating retry") } return d.mockTarStream(), 0, nil } func downloadDescriptors(currentDownloads *int32) []DownloadDescriptor { return []DownloadDescriptor{ &mockDownloadDescriptor{ currentDownloads: currentDownloads, id: "id1", expectedDiffID: layer.DiffID("sha256:68e2c75dc5c78ea9240689c60d7599766c213ae210434c53af18470ae8c53ec1"), }, &mockDownloadDescriptor{ currentDownloads: currentDownloads, id: "id2", expectedDiffID: layer.DiffID("sha256:64a636223116aa837973a5d9c2bdd17d9b204e4f95ac423e20e65dfbb3655473"), }, &mockDownloadDescriptor{ currentDownloads: currentDownloads, id: "id3", expectedDiffID: layer.DiffID("sha256:58745a8bbd669c25213e9de578c4da5c8ee1c836b3581432c2b50e38a6753300"), }, &mockDownloadDescriptor{ currentDownloads: currentDownloads, id: "id2", expectedDiffID: layer.DiffID("sha256:64a636223116aa837973a5d9c2bdd17d9b204e4f95ac423e20e65dfbb3655473"), }, &mockDownloadDescriptor{ currentDownloads: currentDownloads, id: "id4", expectedDiffID: layer.DiffID("sha256:0dfb5b9577716cc173e95af7c10289322c29a6453a1718addc00c0c5b1330936"), simulateRetries: 1, }, &mockDownloadDescriptor{ currentDownloads: currentDownloads, id: "id5", expectedDiffID: layer.DiffID("sha256:0a5f25fa1acbc647f6112a6276735d0fa01e4ee2aa7ec33015e337350e1ea23d"), }, } } func TestSuccessfulDownload(t *testing.T) { layerStore := &mockLayerStore{make(map[layer.ChainID]*mockLayer)} ldm := NewLayerDownloadManager(layerStore, maxDownloadConcurrency) progressChan := make(chan progress.Progress) progressDone := make(chan struct{}) receivedProgress := make(map[string]progress.Progress) go func() { for p := range progressChan { receivedProgress[p.ID] = p } close(progressDone) }() var currentDownloads int32 descriptors := downloadDescriptors(¤tDownloads) firstDescriptor := descriptors[0].(*mockDownloadDescriptor) // Pre-register the first layer to simulate an already-existing layer l, err := layerStore.Register(firstDescriptor.mockTarStream(), "") if err != nil { t.Fatal(err) } firstDescriptor.diffID = l.DiffID() rootFS, releaseFunc, err := ldm.Download(context.Background(), *image.NewRootFS(), descriptors, progress.ChanOutput(progressChan)) if err != nil { t.Fatalf("download error: %v", err) } releaseFunc() close(progressChan) <-progressDone if len(rootFS.DiffIDs) != len(descriptors) { t.Fatal("got wrong number of diffIDs in rootfs") } for i, d := range descriptors { descriptor := d.(*mockDownloadDescriptor) if descriptor.diffID != "" { if receivedProgress[d.ID()].Action != "Already exists" { t.Fatalf("did not get 'Already exists' message for %v", d.ID()) } } else if receivedProgress[d.ID()].Action != "Pull complete" { t.Fatalf("did not get 'Pull complete' message for %v", d.ID()) } if rootFS.DiffIDs[i] != descriptor.expectedDiffID { t.Fatalf("rootFS item %d has the wrong diffID (expected: %v got: %v)", i, descriptor.expectedDiffID, rootFS.DiffIDs[i]) } if descriptor.diffID == "" && descriptor.registeredDiffID != rootFS.DiffIDs[i] { t.Fatal("diffID mismatch between rootFS and Registered callback") } } } func TestCancelledDownload(t *testing.T) { ldm := NewLayerDownloadManager(&mockLayerStore{make(map[layer.ChainID]*mockLayer)}, maxDownloadConcurrency) progressChan := make(chan progress.Progress) progressDone := make(chan struct{}) go func() { for range progressChan { } close(progressDone) }() ctx, cancel := context.WithCancel(context.Background()) go func() { <-time.After(time.Millisecond) cancel() }() descriptors := downloadDescriptors(nil) _, _, err := ldm.Download(ctx, *image.NewRootFS(), descriptors, progress.ChanOutput(progressChan)) if err != context.Canceled { t.Fatal("expected download to be cancelled") } close(progressChan) <-progressDone } docker-1.10.3/distribution/xfer/transfer.go000066400000000000000000000245611267010174400207130ustar00rootroot00000000000000package xfer import ( "runtime" "sync" "github.com/docker/docker/pkg/progress" "golang.org/x/net/context" ) // DoNotRetry is an error wrapper indicating that the error cannot be resolved // with a retry. type DoNotRetry struct { Err error } // Error returns the stringified representation of the encapsulated error. func (e DoNotRetry) Error() string { return e.Err.Error() } // Watcher is returned by Watch and can be passed to Release to stop watching. type Watcher struct { // signalChan is used to signal to the watcher goroutine that // new progress information is available, or that the transfer // has finished. signalChan chan struct{} // releaseChan signals to the watcher goroutine that the watcher // should be detached. releaseChan chan struct{} // running remains open as long as the watcher is watching the // transfer. It gets closed if the transfer finishes or the // watcher is detached. running chan struct{} } // Transfer represents an in-progress transfer. type Transfer interface { Watch(progressOutput progress.Output) *Watcher Release(*Watcher) Context() context.Context Close() Done() <-chan struct{} Released() <-chan struct{} Broadcast(masterProgressChan <-chan progress.Progress) } type transfer struct { mu sync.Mutex ctx context.Context cancel context.CancelFunc // watchers keeps track of the goroutines monitoring progress output, // indexed by the channels that release them. watchers map[chan struct{}]*Watcher // lastProgress is the most recently received progress event. lastProgress progress.Progress // hasLastProgress is true when lastProgress has been set. hasLastProgress bool // running remains open as long as the transfer is in progress. running chan struct{} // released stays open until all watchers release the transfer and // the transfer is no longer tracked by the transfer manager. released chan struct{} // broadcastDone is true if the master progress channel has closed. broadcastDone bool // closed is true if Close has been called closed bool // broadcastSyncChan allows watchers to "ping" the broadcasting // goroutine to wait for it for deplete its input channel. This ensures // a detaching watcher won't miss an event that was sent before it // started detaching. broadcastSyncChan chan struct{} } // NewTransfer creates a new transfer. func NewTransfer() Transfer { t := &transfer{ watchers: make(map[chan struct{}]*Watcher), running: make(chan struct{}), released: make(chan struct{}), broadcastSyncChan: make(chan struct{}), } // This uses context.Background instead of a caller-supplied context // so that a transfer won't be cancelled automatically if the client // which requested it is ^C'd (there could be other viewers). t.ctx, t.cancel = context.WithCancel(context.Background()) return t } // Broadcast copies the progress and error output to all viewers. func (t *transfer) Broadcast(masterProgressChan <-chan progress.Progress) { for { var ( p progress.Progress ok bool ) select { case p, ok = <-masterProgressChan: default: // We've depleted the channel, so now we can handle // reads on broadcastSyncChan to let detaching watchers // know we're caught up. select { case <-t.broadcastSyncChan: continue case p, ok = <-masterProgressChan: } } t.mu.Lock() if ok { t.lastProgress = p t.hasLastProgress = true for _, w := range t.watchers { select { case w.signalChan <- struct{}{}: default: } } } else { t.broadcastDone = true } t.mu.Unlock() if !ok { close(t.running) return } } } // Watch adds a watcher to the transfer. The supplied channel gets progress // updates and is closed when the transfer finishes. func (t *transfer) Watch(progressOutput progress.Output) *Watcher { t.mu.Lock() defer t.mu.Unlock() w := &Watcher{ releaseChan: make(chan struct{}), signalChan: make(chan struct{}), running: make(chan struct{}), } t.watchers[w.releaseChan] = w if t.broadcastDone { close(w.running) return w } go func() { defer func() { close(w.running) }() done := false for { t.mu.Lock() hasLastProgress := t.hasLastProgress lastProgress := t.lastProgress t.mu.Unlock() // This might write the last progress item a // second time (since channel closure also gets // us here), but that's fine. if hasLastProgress { progressOutput.WriteProgress(lastProgress) } if done { return } select { case <-w.signalChan: case <-w.releaseChan: done = true // Since the watcher is going to detach, make // sure the broadcaster is caught up so we // don't miss anything. select { case t.broadcastSyncChan <- struct{}{}: case <-t.running: } case <-t.running: done = true } } }() return w } // Release is the inverse of Watch; indicating that the watcher no longer wants // to be notified about the progress of the transfer. All calls to Watch must // be paired with later calls to Release so that the lifecycle of the transfer // is properly managed. func (t *transfer) Release(watcher *Watcher) { t.mu.Lock() delete(t.watchers, watcher.releaseChan) if len(t.watchers) == 0 { if t.closed { // released may have been closed already if all // watchers were released, then another one was added // while waiting for a previous watcher goroutine to // finish. select { case <-t.released: default: close(t.released) } } else { t.cancel() } } t.mu.Unlock() close(watcher.releaseChan) // Block until the watcher goroutine completes <-watcher.running } // Done returns a channel which is closed if the transfer completes or is // cancelled. Note that having 0 watchers causes a transfer to be cancelled. func (t *transfer) Done() <-chan struct{} { // Note that this doesn't return t.ctx.Done() because that channel will // be closed the moment Cancel is called, and we need to return a // channel that blocks until a cancellation is actually acknowledged by // the transfer function. return t.running } // Released returns a channel which is closed once all watchers release the // transfer AND the transfer is no longer tracked by the transfer manager. func (t *transfer) Released() <-chan struct{} { return t.released } // Context returns the context associated with the transfer. func (t *transfer) Context() context.Context { return t.ctx } // Close is called by the transfer manager when the transfer is no longer // being tracked. func (t *transfer) Close() { t.mu.Lock() t.closed = true if len(t.watchers) == 0 { close(t.released) } t.mu.Unlock() } // DoFunc is a function called by the transfer manager to actually perform // a transfer. It should be non-blocking. It should wait until the start channel // is closed before transferring any data. If the function closes inactive, that // signals to the transfer manager that the job is no longer actively moving // data - for example, it may be waiting for a dependent transfer to finish. // This prevents it from taking up a slot. type DoFunc func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer // TransferManager is used by LayerDownloadManager and LayerUploadManager to // schedule and deduplicate transfers. It is up to the TransferManager // implementation to make the scheduling and concurrency decisions. type TransferManager interface { // Transfer checks if a transfer with the given key is in progress. If // so, it returns progress and error output from that transfer. // Otherwise, it will call xferFunc to initiate the transfer. Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) } type transferManager struct { mu sync.Mutex concurrencyLimit int activeTransfers int transfers map[string]Transfer waitingTransfers []chan struct{} } // NewTransferManager returns a new TransferManager. func NewTransferManager(concurrencyLimit int) TransferManager { return &transferManager{ concurrencyLimit: concurrencyLimit, transfers: make(map[string]Transfer), } } // Transfer checks if a transfer matching the given key is in progress. If not, // it starts one by calling xferFunc. The caller supplies a channel which // receives progress output from the transfer. func (tm *transferManager) Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) { tm.mu.Lock() defer tm.mu.Unlock() for { xfer, present := tm.transfers[key] if !present { break } // Transfer is already in progress. watcher := xfer.Watch(progressOutput) select { case <-xfer.Context().Done(): // We don't want to watch a transfer that has been cancelled. // Wait for it to be removed from the map and try again. xfer.Release(watcher) tm.mu.Unlock() // The goroutine that removes this transfer from the // map is also waiting for xfer.Done(), so yield to it. // This could be avoided by adding a Closed method // to Transfer to allow explicitly waiting for it to be // removed the map, but forcing a scheduling round in // this very rare case seems better than bloating the // interface definition. runtime.Gosched() <-xfer.Done() tm.mu.Lock() default: return xfer, watcher } } start := make(chan struct{}) inactive := make(chan struct{}) if tm.activeTransfers < tm.concurrencyLimit { close(start) tm.activeTransfers++ } else { tm.waitingTransfers = append(tm.waitingTransfers, start) } masterProgressChan := make(chan progress.Progress) xfer := xferFunc(masterProgressChan, start, inactive) watcher := xfer.Watch(progressOutput) go xfer.Broadcast(masterProgressChan) tm.transfers[key] = xfer // When the transfer is finished, remove from the map. go func() { for { select { case <-inactive: tm.mu.Lock() tm.inactivate(start) tm.mu.Unlock() inactive = nil case <-xfer.Done(): tm.mu.Lock() if inactive != nil { tm.inactivate(start) } delete(tm.transfers, key) tm.mu.Unlock() xfer.Close() return } } }() return xfer, watcher } func (tm *transferManager) inactivate(start chan struct{}) { // If the transfer was started, remove it from the activeTransfers // count. select { case <-start: // Start next transfer if any are waiting if len(tm.waitingTransfers) != 0 { close(tm.waitingTransfers[0]) tm.waitingTransfers = tm.waitingTransfers[1:] } else { tm.activeTransfers-- } default: } } docker-1.10.3/distribution/xfer/transfer_test.go000066400000000000000000000255371267010174400217560ustar00rootroot00000000000000package xfer import ( "sync/atomic" "testing" "time" "github.com/docker/docker/pkg/progress" ) func TestTransfer(t *testing.T) { makeXferFunc := func(id string) DoFunc { return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { select { case <-start: default: t.Fatalf("transfer function not started even though concurrency limit not reached") } xfer := NewTransfer() go func() { for i := 0; i <= 10; i++ { progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10} time.Sleep(10 * time.Millisecond) } close(progressChan) }() return xfer } } tm := NewTransferManager(5) progressChan := make(chan progress.Progress) progressDone := make(chan struct{}) receivedProgress := make(map[string]int64) go func() { for p := range progressChan { val, present := receivedProgress[p.ID] if !present { if p.Current != 0 { t.Fatalf("got unexpected progress value: %d (expected 0)", p.Current) } } else if p.Current == 10 { // Special case: last progress output may be // repeated because the transfer finishing // causes the latest progress output to be // written to the channel (in case the watcher // missed it). if p.Current != 9 && p.Current != 10 { t.Fatalf("got unexpected progress value: %d (expected %d)", p.Current, val+1) } } else if p.Current != val+1 { t.Fatalf("got unexpected progress value: %d (expected %d)", p.Current, val+1) } receivedProgress[p.ID] = p.Current } close(progressDone) }() // Start a few transfers ids := []string{"id1", "id2", "id3"} xfers := make([]Transfer, len(ids)) watchers := make([]*Watcher, len(ids)) for i, id := range ids { xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan)) } for i, xfer := range xfers { <-xfer.Done() xfer.Release(watchers[i]) } close(progressChan) <-progressDone for _, id := range ids { if receivedProgress[id] != 10 { t.Fatalf("final progress value %d instead of 10", receivedProgress[id]) } } } func TestConcurrencyLimit(t *testing.T) { concurrencyLimit := 3 var runningJobs int32 makeXferFunc := func(id string) DoFunc { return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { xfer := NewTransfer() go func() { <-start totalJobs := atomic.AddInt32(&runningJobs, 1) if int(totalJobs) > concurrencyLimit { t.Fatalf("too many jobs running") } for i := 0; i <= 10; i++ { progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10} time.Sleep(10 * time.Millisecond) } atomic.AddInt32(&runningJobs, -1) close(progressChan) }() return xfer } } tm := NewTransferManager(concurrencyLimit) progressChan := make(chan progress.Progress) progressDone := make(chan struct{}) receivedProgress := make(map[string]int64) go func() { for p := range progressChan { receivedProgress[p.ID] = p.Current } close(progressDone) }() // Start more transfers than the concurrency limit ids := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8"} xfers := make([]Transfer, len(ids)) watchers := make([]*Watcher, len(ids)) for i, id := range ids { xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan)) } for i, xfer := range xfers { <-xfer.Done() xfer.Release(watchers[i]) } close(progressChan) <-progressDone for _, id := range ids { if receivedProgress[id] != 10 { t.Fatalf("final progress value %d instead of 10", receivedProgress[id]) } } } func TestInactiveJobs(t *testing.T) { concurrencyLimit := 3 var runningJobs int32 testDone := make(chan struct{}) makeXferFunc := func(id string) DoFunc { return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { xfer := NewTransfer() go func() { <-start totalJobs := atomic.AddInt32(&runningJobs, 1) if int(totalJobs) > concurrencyLimit { t.Fatalf("too many jobs running") } for i := 0; i <= 10; i++ { progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10} time.Sleep(10 * time.Millisecond) } atomic.AddInt32(&runningJobs, -1) close(inactive) <-testDone close(progressChan) }() return xfer } } tm := NewTransferManager(concurrencyLimit) progressChan := make(chan progress.Progress) progressDone := make(chan struct{}) receivedProgress := make(map[string]int64) go func() { for p := range progressChan { receivedProgress[p.ID] = p.Current } close(progressDone) }() // Start more transfers than the concurrency limit ids := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8"} xfers := make([]Transfer, len(ids)) watchers := make([]*Watcher, len(ids)) for i, id := range ids { xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan)) } close(testDone) for i, xfer := range xfers { <-xfer.Done() xfer.Release(watchers[i]) } close(progressChan) <-progressDone for _, id := range ids { if receivedProgress[id] != 10 { t.Fatalf("final progress value %d instead of 10", receivedProgress[id]) } } } func TestWatchRelease(t *testing.T) { ready := make(chan struct{}) makeXferFunc := func(id string) DoFunc { return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { xfer := NewTransfer() go func() { defer func() { close(progressChan) }() <-ready for i := int64(0); ; i++ { select { case <-time.After(10 * time.Millisecond): case <-xfer.Context().Done(): return } progressChan <- progress.Progress{ID: id, Action: "testing", Current: i, Total: 10} } }() return xfer } } tm := NewTransferManager(5) type watcherInfo struct { watcher *Watcher progressChan chan progress.Progress progressDone chan struct{} receivedFirstProgress chan struct{} } progressConsumer := func(w watcherInfo) { first := true for range w.progressChan { if first { close(w.receivedFirstProgress) } first = false } close(w.progressDone) } // Start a transfer watchers := make([]watcherInfo, 5) var xfer Transfer watchers[0].progressChan = make(chan progress.Progress) watchers[0].progressDone = make(chan struct{}) watchers[0].receivedFirstProgress = make(chan struct{}) xfer, watchers[0].watcher = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(watchers[0].progressChan)) go progressConsumer(watchers[0]) // Give it multiple watchers for i := 1; i != len(watchers); i++ { watchers[i].progressChan = make(chan progress.Progress) watchers[i].progressDone = make(chan struct{}) watchers[i].receivedFirstProgress = make(chan struct{}) watchers[i].watcher = xfer.Watch(progress.ChanOutput(watchers[i].progressChan)) go progressConsumer(watchers[i]) } // Now that the watchers are set up, allow the transfer goroutine to // proceed. close(ready) // Confirm that each watcher gets progress output. for _, w := range watchers { <-w.receivedFirstProgress } // Release one watcher every 5ms for _, w := range watchers { xfer.Release(w.watcher) <-time.After(5 * time.Millisecond) } // Now that all watchers have been released, Released() should // return a closed channel. <-xfer.Released() // Done() should return a closed channel because the xfer func returned // due to cancellation. <-xfer.Done() for _, w := range watchers { close(w.progressChan) <-w.progressDone } } func TestWatchFinishedTransfer(t *testing.T) { makeXferFunc := func(id string) DoFunc { return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { xfer := NewTransfer() go func() { // Finish immediately close(progressChan) }() return xfer } } tm := NewTransferManager(5) // Start a transfer watchers := make([]*Watcher, 3) var xfer Transfer xfer, watchers[0] = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(make(chan progress.Progress))) // Give it a watcher immediately watchers[1] = xfer.Watch(progress.ChanOutput(make(chan progress.Progress))) // Wait for the transfer to complete <-xfer.Done() // Set up another watcher watchers[2] = xfer.Watch(progress.ChanOutput(make(chan progress.Progress))) // Release the watchers for _, w := range watchers { xfer.Release(w) } // Now that all watchers have been released, Released() should // return a closed channel. <-xfer.Released() } func TestDuplicateTransfer(t *testing.T) { ready := make(chan struct{}) var xferFuncCalls int32 makeXferFunc := func(id string) DoFunc { return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { atomic.AddInt32(&xferFuncCalls, 1) xfer := NewTransfer() go func() { defer func() { close(progressChan) }() <-ready for i := int64(0); ; i++ { select { case <-time.After(10 * time.Millisecond): case <-xfer.Context().Done(): return } progressChan <- progress.Progress{ID: id, Action: "testing", Current: i, Total: 10} } }() return xfer } } tm := NewTransferManager(5) type transferInfo struct { xfer Transfer watcher *Watcher progressChan chan progress.Progress progressDone chan struct{} receivedFirstProgress chan struct{} } progressConsumer := func(t transferInfo) { first := true for range t.progressChan { if first { close(t.receivedFirstProgress) } first = false } close(t.progressDone) } // Try to start multiple transfers with the same ID transfers := make([]transferInfo, 5) for i := range transfers { t := &transfers[i] t.progressChan = make(chan progress.Progress) t.progressDone = make(chan struct{}) t.receivedFirstProgress = make(chan struct{}) t.xfer, t.watcher = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(t.progressChan)) go progressConsumer(*t) } // Allow the transfer goroutine to proceed. close(ready) // Confirm that each watcher gets progress output. for _, t := range transfers { <-t.receivedFirstProgress } // Confirm that the transfer function was called exactly once. if xferFuncCalls != 1 { t.Fatal("transfer function wasn't called exactly once") } // Release one watcher every 5ms for _, t := range transfers { t.xfer.Release(t.watcher) <-time.After(5 * time.Millisecond) } for _, t := range transfers { // Now that all watchers have been released, Released() should // return a closed channel. <-t.xfer.Released() // Done() should return a closed channel because the xfer func returned // due to cancellation. <-t.xfer.Done() } for _, t := range transfers { close(t.progressChan) <-t.progressDone } } docker-1.10.3/distribution/xfer/upload.go000066400000000000000000000101631267010174400203440ustar00rootroot00000000000000package xfer import ( "errors" "time" "github.com/Sirupsen/logrus" "github.com/docker/distribution" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/progress" "golang.org/x/net/context" ) const maxUploadAttempts = 5 // LayerUploadManager provides task management and progress reporting for // uploads. type LayerUploadManager struct { tm TransferManager } // NewLayerUploadManager returns a new LayerUploadManager. func NewLayerUploadManager(concurrencyLimit int) *LayerUploadManager { return &LayerUploadManager{ tm: NewTransferManager(concurrencyLimit), } } type uploadTransfer struct { Transfer remoteDescriptor distribution.Descriptor err error } // An UploadDescriptor references a layer that may need to be uploaded. type UploadDescriptor interface { // Key returns the key used to deduplicate uploads. Key() string // ID returns the ID for display purposes. ID() string // DiffID should return the DiffID for this layer. DiffID() layer.DiffID // Upload is called to perform the Upload. Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) // SetRemoteDescriptor provides the distribution.Descriptor that was // returned by Upload. This descriptor is not to be confused with // the UploadDescriptor interface, which is used for internally // identifying layers that are being uploaded. SetRemoteDescriptor(descriptor distribution.Descriptor) } // Upload is a blocking function which ensures the listed layers are present on // the remote registry. It uses the string returned by the Key method to // deduplicate uploads. func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) error { var ( uploads []*uploadTransfer dedupDescriptors = make(map[string]*uploadTransfer) ) for _, descriptor := range layers { progress.Update(progressOutput, descriptor.ID(), "Preparing") key := descriptor.Key() if _, present := dedupDescriptors[key]; present { continue } xferFunc := lum.makeUploadFunc(descriptor) upload, watcher := lum.tm.Transfer(descriptor.Key(), xferFunc, progressOutput) defer upload.Release(watcher) uploads = append(uploads, upload.(*uploadTransfer)) dedupDescriptors[key] = upload.(*uploadTransfer) } for _, upload := range uploads { select { case <-ctx.Done(): return ctx.Err() case <-upload.Transfer.Done(): if upload.err != nil { return upload.err } } } for _, l := range layers { l.SetRemoteDescriptor(dedupDescriptors[l.Key()].remoteDescriptor) } return nil } func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFunc { return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { u := &uploadTransfer{ Transfer: NewTransfer(), } go func() { defer func() { close(progressChan) }() progressOutput := progress.ChanOutput(progressChan) select { case <-start: default: progress.Update(progressOutput, descriptor.ID(), "Waiting") <-start } retries := 0 for { remoteDescriptor, err := descriptor.Upload(u.Transfer.Context(), progressOutput) if err == nil { u.remoteDescriptor = remoteDescriptor break } // If an error was returned because the context // was cancelled, we shouldn't retry. select { case <-u.Transfer.Context().Done(): u.err = err return default: } retries++ if _, isDNR := err.(DoNotRetry); isDNR || retries == maxUploadAttempts { logrus.Errorf("Upload failed: %v", err) u.err = err return } logrus.Errorf("Upload failed, retrying: %v", err) delay := retries * 5 ticker := time.NewTicker(time.Second) selectLoop: for { progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d seconds", delay) select { case <-ticker.C: delay-- if delay == 0 { ticker.Stop() break selectLoop } case <-u.Transfer.Context().Done(): ticker.Stop() u.err = errors.New("upload cancelled during retry delay") return } } } }() return u } } docker-1.10.3/distribution/xfer/upload_test.go000066400000000000000000000113461267010174400214070ustar00rootroot00000000000000package xfer import ( "errors" "sync/atomic" "testing" "time" "github.com/docker/distribution" "github.com/docker/distribution/digest" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/progress" "golang.org/x/net/context" ) const maxUploadConcurrency = 3 type mockUploadDescriptor struct { currentUploads *int32 diffID layer.DiffID simulateRetries int } // Key returns the key used to deduplicate downloads. func (u *mockUploadDescriptor) Key() string { return u.diffID.String() } // ID returns the ID for display purposes. func (u *mockUploadDescriptor) ID() string { return u.diffID.String() } // DiffID should return the DiffID for this layer. func (u *mockUploadDescriptor) DiffID() layer.DiffID { return u.diffID } // SetRemoteDescriptor is not used in the mock. func (u *mockUploadDescriptor) SetRemoteDescriptor(remoteDescriptor distribution.Descriptor) { } // Upload is called to perform the upload. func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { if u.currentUploads != nil { defer atomic.AddInt32(u.currentUploads, -1) if atomic.AddInt32(u.currentUploads, 1) > maxUploadConcurrency { return distribution.Descriptor{}, errors.New("concurrency limit exceeded") } } // Sleep a bit to simulate a time-consuming upload. for i := int64(0); i <= 10; i++ { select { case <-ctx.Done(): return distribution.Descriptor{}, ctx.Err() case <-time.After(10 * time.Millisecond): progressOutput.WriteProgress(progress.Progress{ID: u.ID(), Current: i, Total: 10}) } } if u.simulateRetries != 0 { u.simulateRetries-- return distribution.Descriptor{}, errors.New("simulating retry") } return distribution.Descriptor{}, nil } func uploadDescriptors(currentUploads *int32) []UploadDescriptor { return []UploadDescriptor{ &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"), 0}, &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:1515325234325236634634608943609283523908626098235490238423902343"), 0}, &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:6929356290463485374960346430698374523437683470934634534953453453"), 0}, &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"), 0}, &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:8159352387436803946235346346368745389534789534897538734598734987"), 1}, &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:4637863963478346897346987346987346789346789364879364897364987346"), 0}, } } var expectedDigests = map[layer.DiffID]digest.Digest{ layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"): digest.Digest("sha256:c5095d6cf7ee42b7b064371dcc1dc3fb4af197f04d01a60009d484bd432724fc"), layer.DiffID("sha256:1515325234325236634634608943609283523908626098235490238423902343"): digest.Digest("sha256:968cbfe2ff5269ea1729b3804767a1f57ffbc442d3bc86f47edbf7e688a4f36e"), layer.DiffID("sha256:6929356290463485374960346430698374523437683470934634534953453453"): digest.Digest("sha256:8a5e56ab4b477a400470a7d5d4c1ca0c91235fd723ab19cc862636a06f3a735d"), layer.DiffID("sha256:8159352387436803946235346346368745389534789534897538734598734987"): digest.Digest("sha256:5e733e5cd3688512fc240bd5c178e72671c9915947d17bb8451750d827944cb2"), layer.DiffID("sha256:4637863963478346897346987346987346789346789364879364897364987346"): digest.Digest("sha256:ec4bb98d15e554a9f66c3ef9296cf46772c0ded3b1592bd8324d96e2f60f460c"), } func TestSuccessfulUpload(t *testing.T) { lum := NewLayerUploadManager(maxUploadConcurrency) progressChan := make(chan progress.Progress) progressDone := make(chan struct{}) receivedProgress := make(map[string]int64) go func() { for p := range progressChan { receivedProgress[p.ID] = p.Current } close(progressDone) }() var currentUploads int32 descriptors := uploadDescriptors(¤tUploads) err := lum.Upload(context.Background(), descriptors, progress.ChanOutput(progressChan)) if err != nil { t.Fatalf("upload error: %v", err) } close(progressChan) <-progressDone } func TestCancelledUpload(t *testing.T) { lum := NewLayerUploadManager(maxUploadConcurrency) progressChan := make(chan progress.Progress) progressDone := make(chan struct{}) go func() { for range progressChan { } close(progressDone) }() ctx, cancel := context.WithCancel(context.Background()) go func() { <-time.After(time.Millisecond) cancel() }() descriptors := uploadDescriptors(nil) err := lum.Upload(ctx, descriptors, progress.ChanOutput(progressChan)) if err != context.Canceled { t.Fatal("expected upload to be cancelled") } close(progressChan) <-progressDone } docker-1.10.3/docker/000077500000000000000000000000001267010174400143145ustar00rootroot00000000000000docker-1.10.3/docker/README.md000066400000000000000000000002011267010174400155640ustar00rootroot00000000000000docker.go contains Docker's main function. This file provides first line CLI argument parsing and environment variable setting. docker-1.10.3/docker/client.go000066400000000000000000000014411267010174400161210ustar00rootroot00000000000000package main import ( "path/filepath" "github.com/docker/docker/cli" "github.com/docker/docker/cliconfig" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/utils" ) var clientFlags = &cli.ClientFlags{FlagSet: new(flag.FlagSet), Common: commonFlags} func init() { client := clientFlags.FlagSet client.StringVar(&clientFlags.ConfigDir, []string{"-config"}, cliconfig.ConfigDir(), "Location of client config files") clientFlags.PostParse = func() { clientFlags.Common.PostParse() if clientFlags.ConfigDir != "" { cliconfig.SetConfigDir(clientFlags.ConfigDir) } if clientFlags.Common.TrustKey == "" { clientFlags.Common.TrustKey = filepath.Join(cliconfig.ConfigDir(), defaultTrustKeyFile) } if clientFlags.Common.Debug { utils.EnableDebug() } } } docker-1.10.3/docker/common.go000066400000000000000000000061771267010174400161460ustar00rootroot00000000000000package main import ( "fmt" "os" "path/filepath" "github.com/Sirupsen/logrus" "github.com/docker/docker/cli" "github.com/docker/docker/cliconfig" "github.com/docker/docker/opts" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/go-connections/tlsconfig" ) const ( defaultTrustKeyFile = "key.json" defaultCaFile = "ca.pem" defaultKeyFile = "key.pem" defaultCertFile = "cert.pem" tlsVerifyKey = "tlsverify" ) var ( commonFlags = &cli.CommonFlags{FlagSet: new(flag.FlagSet)} dockerCertPath = os.Getenv("DOCKER_CERT_PATH") dockerTLSVerify = os.Getenv("DOCKER_TLS_VERIFY") != "" ) func init() { if dockerCertPath == "" { dockerCertPath = cliconfig.ConfigDir() } commonFlags.PostParse = postParseCommon cmd := commonFlags.FlagSet cmd.BoolVar(&commonFlags.Debug, []string{"D", "-debug"}, false, "Enable debug mode") cmd.StringVar(&commonFlags.LogLevel, []string{"l", "-log-level"}, "info", "Set the logging level") cmd.BoolVar(&commonFlags.TLS, []string{"-tls"}, false, "Use TLS; implied by --tlsverify") cmd.BoolVar(&commonFlags.TLSVerify, []string{"-tlsverify"}, dockerTLSVerify, "Use TLS and verify the remote") // TODO use flag flag.String([]string{"i", "-identity"}, "", "Path to libtrust key file") var tlsOptions tlsconfig.Options commonFlags.TLSOptions = &tlsOptions cmd.StringVar(&tlsOptions.CAFile, []string{"-tlscacert"}, filepath.Join(dockerCertPath, defaultCaFile), "Trust certs signed only by this CA") cmd.StringVar(&tlsOptions.CertFile, []string{"-tlscert"}, filepath.Join(dockerCertPath, defaultCertFile), "Path to TLS certificate file") cmd.StringVar(&tlsOptions.KeyFile, []string{"-tlskey"}, filepath.Join(dockerCertPath, defaultKeyFile), "Path to TLS key file") cmd.Var(opts.NewNamedListOptsRef("hosts", &commonFlags.Hosts, opts.ValidateHost), []string{"H", "-host"}, "Daemon socket(s) to connect to") } func postParseCommon() { cmd := commonFlags.FlagSet setDaemonLogLevel(commonFlags.LogLevel) // Regardless of whether the user sets it to true or false, if they // specify --tlsverify at all then we need to turn on tls // TLSVerify can be true even if not set due to DOCKER_TLS_VERIFY env var, so we need to check that here as well if cmd.IsSet("-"+tlsVerifyKey) || commonFlags.TLSVerify { commonFlags.TLS = true } if !commonFlags.TLS { commonFlags.TLSOptions = nil } else { tlsOptions := commonFlags.TLSOptions tlsOptions.InsecureSkipVerify = !commonFlags.TLSVerify // Reset CertFile and KeyFile to empty string if the user did not specify // the respective flags and the respective default files were not found. if !cmd.IsSet("-tlscert") { if _, err := os.Stat(tlsOptions.CertFile); os.IsNotExist(err) { tlsOptions.CertFile = "" } } if !cmd.IsSet("-tlskey") { if _, err := os.Stat(tlsOptions.KeyFile); os.IsNotExist(err) { tlsOptions.KeyFile = "" } } } } func setDaemonLogLevel(logLevel string) { if logLevel != "" { lvl, err := logrus.ParseLevel(logLevel) if err != nil { fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n", logLevel) os.Exit(1) } logrus.SetLevel(lvl) } else { logrus.SetLevel(logrus.InfoLevel) } } docker-1.10.3/docker/daemon.go000066400000000000000000000244531267010174400161160ustar00rootroot00000000000000// +build daemon package main import ( "crypto/tls" "fmt" "io" "os" "path/filepath" "strings" "time" "github.com/Sirupsen/logrus" "github.com/docker/distribution/uuid" apiserver "github.com/docker/docker/api/server" "github.com/docker/docker/cli" "github.com/docker/docker/cliconfig" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/dockerversion" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/jsonlog" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/pidfile" "github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/system" "github.com/docker/docker/registry" "github.com/docker/docker/utils" "github.com/docker/go-connections/tlsconfig" ) const ( daemonUsage = " docker daemon [ --help | ... ]\n" daemonConfigFileFlag = "-config-file" ) var ( daemonCli cli.Handler = NewDaemonCli() ) // DaemonCli represents the daemon CLI. type DaemonCli struct { *daemon.Config registryOptions *registry.Options flags *flag.FlagSet } func presentInHelp(usage string) string { return usage } func absentFromHelp(string) string { return "" } // NewDaemonCli returns a pre-configured daemon CLI func NewDaemonCli() *DaemonCli { daemonFlags := cli.Subcmd("daemon", nil, "Enable daemon mode", true) // TODO(tiborvass): remove InstallFlags? daemonConfig := new(daemon.Config) daemonConfig.LogConfig.Config = make(map[string]string) daemonConfig.ClusterOpts = make(map[string]string) daemonConfig.InstallFlags(daemonFlags, presentInHelp) daemonConfig.InstallFlags(flag.CommandLine, absentFromHelp) registryOptions := new(registry.Options) registryOptions.InstallFlags(daemonFlags, presentInHelp) registryOptions.InstallFlags(flag.CommandLine, absentFromHelp) daemonFlags.Require(flag.Exact, 0) return &DaemonCli{ Config: daemonConfig, registryOptions: registryOptions, flags: daemonFlags, } } func migrateKey() (err error) { // Migrate trust key if exists at ~/.docker/key.json and owned by current user oldPath := filepath.Join(cliconfig.ConfigDir(), defaultTrustKeyFile) newPath := filepath.Join(getDaemonConfDir(), defaultTrustKeyFile) if _, statErr := os.Stat(newPath); os.IsNotExist(statErr) && currentUserIsOwner(oldPath) { defer func() { // Ensure old path is removed if no error occurred if err == nil { err = os.Remove(oldPath) } else { logrus.Warnf("Key migration failed, key file not removed at %s", oldPath) os.Remove(newPath) } }() if err := system.MkdirAll(getDaemonConfDir(), os.FileMode(0644)); err != nil { return fmt.Errorf("Unable to create daemon configuration directory: %s", err) } newFile, err := os.OpenFile(newPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { return fmt.Errorf("error creating key file %q: %s", newPath, err) } defer newFile.Close() oldFile, err := os.Open(oldPath) if err != nil { return fmt.Errorf("error opening key file %q: %s", oldPath, err) } defer oldFile.Close() if _, err := io.Copy(newFile, oldFile); err != nil { return fmt.Errorf("error copying key: %s", err) } logrus.Infof("Migrated key from %s to %s", oldPath, newPath) } return nil } func getGlobalFlag() (globalFlag *flag.Flag) { defer func() { if x := recover(); x != nil { switch f := x.(type) { case *flag.Flag: globalFlag = f default: panic(x) } } }() visitor := func(f *flag.Flag) { panic(f) } commonFlags.FlagSet.Visit(visitor) clientFlags.FlagSet.Visit(visitor) return } // CmdDaemon is the daemon command, called the raw arguments after `docker daemon`. func (cli *DaemonCli) CmdDaemon(args ...string) error { // warn from uuid package when running the daemon uuid.Loggerf = logrus.Warnf if !commonFlags.FlagSet.IsEmpty() || !clientFlags.FlagSet.IsEmpty() { // deny `docker -D daemon` illegalFlag := getGlobalFlag() fmt.Fprintf(os.Stderr, "invalid flag '-%s'.\nSee 'docker daemon --help'.\n", illegalFlag.Names[0]) os.Exit(1) } else { // allow new form `docker daemon -D` flag.Merge(cli.flags, commonFlags.FlagSet) } configFile := cli.flags.String([]string{daemonConfigFileFlag}, defaultDaemonConfigFile, "Daemon configuration file") cli.flags.ParseFlags(args, true) commonFlags.PostParse() if commonFlags.TrustKey == "" { commonFlags.TrustKey = filepath.Join(getDaemonConfDir(), defaultTrustKeyFile) } cliConfig, err := loadDaemonCliConfig(cli.Config, cli.flags, commonFlags, *configFile) if err != nil { fmt.Fprint(os.Stderr, err) os.Exit(1) } cli.Config = cliConfig if cli.Config.Debug { utils.EnableDebug() } if utils.ExperimentalBuild() { logrus.Warn("Running experimental build") } logrus.SetFormatter(&logrus.TextFormatter{TimestampFormat: jsonlog.RFC3339NanoFixed}) if err := setDefaultUmask(); err != nil { logrus.Fatalf("Failed to set umask: %v", err) } if len(cli.LogConfig.Config) > 0 { if err := logger.ValidateLogOpts(cli.LogConfig.Type, cli.LogConfig.Config); err != nil { logrus.Fatalf("Failed to set log opts: %v", err) } } var pfile *pidfile.PIDFile if cli.Pidfile != "" { pf, err := pidfile.New(cli.Pidfile) if err != nil { logrus.Fatalf("Error starting daemon: %v", err) } pfile = pf defer func() { if err := pfile.Remove(); err != nil { logrus.Error(err) } }() } serverConfig := &apiserver.Config{ AuthorizationPluginNames: cli.Config.AuthorizationPlugins, Logging: true, Version: dockerversion.Version, } serverConfig = setPlatformServerConfig(serverConfig, cli.Config) defaultHost := opts.DefaultHost if cli.Config.TLS { tlsOptions := tlsconfig.Options{ CAFile: cli.Config.CommonTLSOptions.CAFile, CertFile: cli.Config.CommonTLSOptions.CertFile, KeyFile: cli.Config.CommonTLSOptions.KeyFile, } if cli.Config.TLSVerify { // server requires and verifies client's certificate tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert } tlsConfig, err := tlsconfig.Server(tlsOptions) if err != nil { logrus.Fatal(err) } serverConfig.TLSConfig = tlsConfig defaultHost = opts.DefaultTLSHost } if len(cli.Config.Hosts) == 0 { cli.Config.Hosts = make([]string, 1) } for i := 0; i < len(cli.Config.Hosts); i++ { var err error if cli.Config.Hosts[i], err = opts.ParseHost(defaultHost, cli.Config.Hosts[i]); err != nil { logrus.Fatalf("error parsing -H %s : %v", cli.Config.Hosts[i], err) } protoAddr := cli.Config.Hosts[i] protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { logrus.Fatalf("bad format %s, expected PROTO://ADDR", protoAddr) } serverConfig.Addrs = append(serverConfig.Addrs, apiserver.Addr{Proto: protoAddrParts[0], Addr: protoAddrParts[1]}) } api, err := apiserver.New(serverConfig) if err != nil { logrus.Fatal(err) } if err := migrateKey(); err != nil { logrus.Fatal(err) } cli.TrustKeyPath = commonFlags.TrustKey registryService := registry.NewService(cli.registryOptions) d, err := daemon.NewDaemon(cli.Config, registryService) if err != nil { if pfile != nil { if err := pfile.Remove(); err != nil { logrus.Error(err) } } logrus.Fatalf("Error starting daemon: %v", err) } logrus.Info("Daemon has completed initialization") logrus.WithFields(logrus.Fields{ "version": dockerversion.Version, "commit": dockerversion.GitCommit, "execdriver": d.ExecutionDriver().Name(), "graphdriver": d.GraphDriverName(), }).Info("Docker daemon") api.InitRouters(d) reload := func(config *daemon.Config) { if err := d.Reload(config); err != nil { logrus.Errorf("Error reconfiguring the daemon: %v", err) return } api.Reload(config) } setupConfigReloadTrap(*configFile, cli.flags, reload) // The serve API routine never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go api.Wait(serveAPIWait) signal.Trap(func() { api.Close() <-serveAPIWait shutdownDaemon(d, 15) if pfile != nil { if err := pfile.Remove(); err != nil { logrus.Error(err) } } }) // after the daemon is done setting up we can notify systemd api notifySystem() // Daemon is fully initialized and handling API traffic // Wait for serve API to complete errAPI := <-serveAPIWait shutdownDaemon(d, 15) if errAPI != nil { if pfile != nil { if err := pfile.Remove(); err != nil { logrus.Error(err) } } logrus.Fatalf("Shutting down due to ServeAPI error: %v", errAPI) } return nil } // shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case // d.Shutdown() is waiting too long to kill container or worst it's // blocked there func shutdownDaemon(d *daemon.Daemon, timeout time.Duration) { ch := make(chan struct{}) go func() { d.Shutdown() close(ch) }() select { case <-ch: logrus.Debug("Clean shutdown succeeded") case <-time.After(timeout * time.Second): logrus.Error("Force shutdown daemon") } } func loadDaemonCliConfig(config *daemon.Config, daemonFlags *flag.FlagSet, commonConfig *cli.CommonFlags, configFile string) (*daemon.Config, error) { config.Debug = commonConfig.Debug config.Hosts = commonConfig.Hosts config.LogLevel = commonConfig.LogLevel config.TLS = commonConfig.TLS config.TLSVerify = commonConfig.TLSVerify config.CommonTLSOptions = daemon.CommonTLSOptions{} if commonConfig.TLSOptions != nil { config.CommonTLSOptions.CAFile = commonConfig.TLSOptions.CAFile config.CommonTLSOptions.CertFile = commonConfig.TLSOptions.CertFile config.CommonTLSOptions.KeyFile = commonConfig.TLSOptions.KeyFile } if configFile != "" { c, err := daemon.MergeDaemonConfigurations(config, daemonFlags, configFile) if err != nil { if daemonFlags.IsSet(daemonConfigFileFlag) || !os.IsNotExist(err) { return nil, fmt.Errorf("unable to configure the Docker daemon with file %s: %v\n", configFile, err) } } // the merged configuration can be nil if the config file didn't exist. // leave the current configuration as it is if when that happens. if c != nil { config = c } } // Regardless of whether the user sets it to true or false, if they // specify TLSVerify at all then we need to turn on TLS if config.IsValueSet(tlsVerifyKey) { config.TLS = true } // ensure that the log level is the one set after merging configurations setDaemonLogLevel(config.LogLevel) return config, nil } docker-1.10.3/docker/daemon_freebsd.go000066400000000000000000000002101267010174400175710ustar00rootroot00000000000000// +build daemon package main // notifySystem sends a message to the host when the server is ready to be used func notifySystem() { } docker-1.10.3/docker/daemon_linux.go000066400000000000000000000004421267010174400173250ustar00rootroot00000000000000// +build daemon package main import ( systemdDaemon "github.com/coreos/go-systemd/daemon" ) // notifySystem sends a message to the host when the server is ready to be used func notifySystem() { // Tell the init daemon we are accepting requests go systemdDaemon.SdNotify("READY=1") } docker-1.10.3/docker/daemon_none.go000066400000000000000000000003431267010174400171250ustar00rootroot00000000000000// +build !daemon package main import "github.com/docker/docker/cli" const daemonUsage = "" var daemonCli cli.Handler // notifySystem sends a message to the host when the server is ready to be used func notifySystem() { } docker-1.10.3/docker/daemon_test.go000066400000000000000000000223341267010174400171510ustar00rootroot00000000000000// +build daemon package main import ( "io/ioutil" "strings" "testing" "github.com/Sirupsen/logrus" "github.com/docker/docker/cli" "github.com/docker/docker/daemon" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/mflag" "github.com/docker/go-connections/tlsconfig" ) func TestLoadDaemonCliConfigWithoutOverriding(t *testing.T) { c := &daemon.Config{} common := &cli.CommonFlags{ Debug: true, } flags := mflag.NewFlagSet("test", mflag.ContinueOnError) loadedConfig, err := loadDaemonCliConfig(c, flags, common, "/tmp/fooobarbaz") if err != nil { t.Fatal(err) } if loadedConfig == nil { t.Fatalf("expected configuration %v, got nil", c) } if !loadedConfig.Debug { t.Fatalf("expected debug to be copied from the common flags, got false") } } func TestLoadDaemonCliConfigWithTLS(t *testing.T) { c := &daemon.Config{} common := &cli.CommonFlags{ TLS: true, TLSOptions: &tlsconfig.Options{ CAFile: "/tmp/ca.pem", }, } flags := mflag.NewFlagSet("test", mflag.ContinueOnError) loadedConfig, err := loadDaemonCliConfig(c, flags, common, "/tmp/fooobarbaz") if err != nil { t.Fatal(err) } if loadedConfig == nil { t.Fatalf("expected configuration %v, got nil", c) } if loadedConfig.CommonTLSOptions.CAFile != "/tmp/ca.pem" { t.Fatalf("expected /tmp/ca.pem, got %s: %q", loadedConfig.CommonTLSOptions.CAFile, loadedConfig) } } func TestLoadDaemonCliConfigWithConflicts(t *testing.T) { c := &daemon.Config{} common := &cli.CommonFlags{} f, err := ioutil.TempFile("", "docker-config-") if err != nil { t.Fatal(err) } configFile := f.Name() f.Write([]byte(`{"labels": ["l3=foo"]}`)) f.Close() var labels []string flags := mflag.NewFlagSet("test", mflag.ContinueOnError) flags.String([]string{daemonConfigFileFlag}, "", "") flags.Var(opts.NewNamedListOptsRef("labels", &labels, opts.ValidateLabel), []string{"-label"}, "") flags.Set(daemonConfigFileFlag, configFile) if err := flags.Set("-label", "l1=bar"); err != nil { t.Fatal(err) } if err := flags.Set("-label", "l2=baz"); err != nil { t.Fatal(err) } _, err = loadDaemonCliConfig(c, flags, common, configFile) if err == nil { t.Fatalf("expected configuration error, got nil") } if !strings.Contains(err.Error(), "labels") { t.Fatalf("expected labels conflict, got %v", err) } } func TestLoadDaemonCliConfigWithTLSVerify(t *testing.T) { c := &daemon.Config{} common := &cli.CommonFlags{ TLSOptions: &tlsconfig.Options{ CAFile: "/tmp/ca.pem", }, } f, err := ioutil.TempFile("", "docker-config-") if err != nil { t.Fatal(err) } configFile := f.Name() f.Write([]byte(`{"tlsverify": true}`)) f.Close() flags := mflag.NewFlagSet("test", mflag.ContinueOnError) flags.Bool([]string{"-tlsverify"}, false, "") loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) if err != nil { t.Fatal(err) } if loadedConfig == nil { t.Fatalf("expected configuration %v, got nil", c) } if !loadedConfig.TLS { t.Fatalf("expected TLS enabled, got %q", loadedConfig) } } func TestLoadDaemonCliConfigWithExplicitTLSVerifyFalse(t *testing.T) { c := &daemon.Config{} common := &cli.CommonFlags{ TLSOptions: &tlsconfig.Options{ CAFile: "/tmp/ca.pem", }, } f, err := ioutil.TempFile("", "docker-config-") if err != nil { t.Fatal(err) } configFile := f.Name() f.Write([]byte(`{"tlsverify": false}`)) f.Close() flags := mflag.NewFlagSet("test", mflag.ContinueOnError) flags.Bool([]string{"-tlsverify"}, false, "") loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) if err != nil { t.Fatal(err) } if loadedConfig == nil { t.Fatalf("expected configuration %v, got nil", c) } if !loadedConfig.TLS { t.Fatalf("expected TLS enabled, got %q", loadedConfig) } } func TestLoadDaemonCliConfigWithoutTLSVerify(t *testing.T) { c := &daemon.Config{} common := &cli.CommonFlags{ TLSOptions: &tlsconfig.Options{ CAFile: "/tmp/ca.pem", }, } f, err := ioutil.TempFile("", "docker-config-") if err != nil { t.Fatal(err) } configFile := f.Name() f.Write([]byte(`{}`)) f.Close() flags := mflag.NewFlagSet("test", mflag.ContinueOnError) loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) if err != nil { t.Fatal(err) } if loadedConfig == nil { t.Fatalf("expected configuration %v, got nil", c) } if loadedConfig.TLS { t.Fatalf("expected TLS disabled, got %q", loadedConfig) } } func TestLoadDaemonCliConfigWithLogLevel(t *testing.T) { c := &daemon.Config{} common := &cli.CommonFlags{} f, err := ioutil.TempFile("", "docker-config-") if err != nil { t.Fatal(err) } configFile := f.Name() f.Write([]byte(`{"log-level": "warn"}`)) f.Close() flags := mflag.NewFlagSet("test", mflag.ContinueOnError) flags.String([]string{"-log-level"}, "", "") loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) if err != nil { t.Fatal(err) } if loadedConfig == nil { t.Fatalf("expected configuration %v, got nil", c) } if loadedConfig.LogLevel != "warn" { t.Fatalf("expected warn log level, got %v", loadedConfig.LogLevel) } if logrus.GetLevel() != logrus.WarnLevel { t.Fatalf("expected warn log level, got %v", logrus.GetLevel()) } } func TestLoadDaemonConfigWithEmbeddedOptions(t *testing.T) { c := &daemon.Config{} common := &cli.CommonFlags{} flags := mflag.NewFlagSet("test", mflag.ContinueOnError) flags.String([]string{"-tlscacert"}, "", "") flags.String([]string{"-log-driver"}, "", "") f, err := ioutil.TempFile("", "docker-config-") if err != nil { t.Fatal(err) } configFile := f.Name() f.Write([]byte(`{"tlscacert": "/etc/certs/ca.pem", "log-driver": "syslog"}`)) f.Close() loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) if err != nil { t.Fatal(err) } if loadedConfig == nil { t.Fatal("expected configuration, got nil") } if loadedConfig.CommonTLSOptions.CAFile != "/etc/certs/ca.pem" { t.Fatalf("expected CA file path /etc/certs/ca.pem, got %v", loadedConfig.CommonTLSOptions.CAFile) } if loadedConfig.LogConfig.Type != "syslog" { t.Fatalf("expected LogConfig type syslog, got %v", loadedConfig.LogConfig.Type) } } func TestLoadDaemonConfigWithMapOptions(t *testing.T) { c := &daemon.Config{} common := &cli.CommonFlags{} flags := mflag.NewFlagSet("test", mflag.ContinueOnError) flags.Var(opts.NewNamedMapOpts("cluster-store-opts", c.ClusterOpts, nil), []string{"-cluster-store-opt"}, "") flags.Var(opts.NewNamedMapOpts("log-opts", c.LogConfig.Config, nil), []string{"-log-opt"}, "") f, err := ioutil.TempFile("", "docker-config-") if err != nil { t.Fatal(err) } configFile := f.Name() f.Write([]byte(`{ "cluster-store-opts": {"kv.cacertfile": "/var/lib/docker/discovery_certs/ca.pem"}, "log-opts": {"tag": "test"} }`)) f.Close() loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) if err != nil { t.Fatal(err) } if loadedConfig == nil { t.Fatal("expected configuration, got nil") } if loadedConfig.ClusterOpts == nil { t.Fatal("expected cluster options, got nil") } expectedPath := "/var/lib/docker/discovery_certs/ca.pem" if caPath := loadedConfig.ClusterOpts["kv.cacertfile"]; caPath != expectedPath { t.Fatalf("expected %s, got %s", expectedPath, caPath) } if loadedConfig.LogConfig.Config == nil { t.Fatal("expected log config options, got nil") } if tag := loadedConfig.LogConfig.Config["tag"]; tag != "test" { t.Fatalf("expected log tag `test`, got %s", tag) } } func TestLoadDaemonConfigWithTrueDefaultValues(t *testing.T) { c := &daemon.Config{} common := &cli.CommonFlags{} flags := mflag.NewFlagSet("test", mflag.ContinueOnError) flags.BoolVar(&c.EnableUserlandProxy, []string{"-userland-proxy"}, true, "") f, err := ioutil.TempFile("", "docker-config-") if err != nil { t.Fatal(err) } if err := flags.ParseFlags([]string{}, false); err != nil { t.Fatal(err) } configFile := f.Name() f.Write([]byte(`{ "userland-proxy": false }`)) f.Close() loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) if err != nil { t.Fatal(err) } if loadedConfig == nil { t.Fatal("expected configuration, got nil") } if loadedConfig.EnableUserlandProxy { t.Fatal("expected userland proxy to be disabled, got enabled") } // make sure reloading doesn't generate configuration // conflicts after normalizing boolean values. err = daemon.ReloadConfiguration(configFile, flags, func(reloadedConfig *daemon.Config) { if reloadedConfig.EnableUserlandProxy { t.Fatal("expected userland proxy to be disabled, got enabled") } }) if err != nil { t.Fatal(err) } } func TestLoadDaemonConfigWithTrueDefaultValuesLeaveDefaults(t *testing.T) { c := &daemon.Config{} common := &cli.CommonFlags{} flags := mflag.NewFlagSet("test", mflag.ContinueOnError) flags.BoolVar(&c.EnableUserlandProxy, []string{"-userland-proxy"}, true, "") f, err := ioutil.TempFile("", "docker-config-") if err != nil { t.Fatal(err) } if err := flags.ParseFlags([]string{}, false); err != nil { t.Fatal(err) } configFile := f.Name() f.Write([]byte(`{}`)) f.Close() loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) if err != nil { t.Fatal(err) } if loadedConfig == nil { t.Fatal("expected configuration, got nil") } if !loadedConfig.EnableUserlandProxy { t.Fatal("expected userland proxy to be enabled, got disabled") } } docker-1.10.3/docker/daemon_unix.go000066400000000000000000000033231267010174400171520ustar00rootroot00000000000000// +build daemon,!windows package main import ( "fmt" "os" "os/signal" "syscall" "github.com/Sirupsen/logrus" apiserver "github.com/docker/docker/api/server" "github.com/docker/docker/daemon" "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/system" _ "github.com/docker/docker/daemon/execdriver/native" ) const defaultDaemonConfigFile = "/etc/docker/daemon.json" func setPlatformServerConfig(serverConfig *apiserver.Config, daemonCfg *daemon.Config) *apiserver.Config { serverConfig.SocketGroup = daemonCfg.SocketGroup serverConfig.EnableCors = daemonCfg.EnableCors serverConfig.CorsHeaders = daemonCfg.CorsHeaders return serverConfig } // currentUserIsOwner checks whether the current user is the owner of the given // file. func currentUserIsOwner(f string) bool { if fileInfo, err := system.Stat(f); err == nil && fileInfo != nil { if int(fileInfo.UID()) == os.Getuid() { return true } } return false } // setDefaultUmask sets the umask to 0022 to avoid problems // caused by custom umask func setDefaultUmask() error { desiredUmask := 0022 syscall.Umask(desiredUmask) if umask := syscall.Umask(desiredUmask); umask != desiredUmask { return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask) } return nil } func getDaemonConfDir() string { return "/etc/docker" } // setupConfigReloadTrap configures the USR2 signal to reload the configuration. func setupConfigReloadTrap(configFile string, flags *mflag.FlagSet, reload func(*daemon.Config)) { c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGHUP) go func() { for range c { if err := daemon.ReloadConfiguration(configFile, flags, reload); err != nil { logrus.Error(err) } } }() } docker-1.10.3/docker/daemon_unix_test.go000066400000000000000000000020061267010174400202060ustar00rootroot00000000000000// +build daemon,!windows package main import ( "io/ioutil" "testing" "github.com/docker/docker/cli" "github.com/docker/docker/daemon" "github.com/docker/docker/pkg/mflag" ) func TestLoadDaemonConfigWithNetwork(t *testing.T) { c := &daemon.Config{} common := &cli.CommonFlags{} flags := mflag.NewFlagSet("test", mflag.ContinueOnError) flags.String([]string{"-bip"}, "", "") flags.String([]string{"-ip"}, "", "") f, err := ioutil.TempFile("", "docker-config-") if err != nil { t.Fatal(err) } configFile := f.Name() f.Write([]byte(`{"bip": "127.0.0.2", "ip": "127.0.0.1"}`)) f.Close() loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) if err != nil { t.Fatal(err) } if loadedConfig == nil { t.Fatalf("expected configuration %v, got nil", c) } if loadedConfig.IP != "127.0.0.2" { t.Fatalf("expected IP 127.0.0.2, got %v", loadedConfig.IP) } if loadedConfig.DefaultIP.String() != "127.0.0.1" { t.Fatalf("expected DefaultIP 127.0.0.1, got %s", loadedConfig.DefaultIP) } } docker-1.10.3/docker/daemon_windows.go000066400000000000000000000031341267010174400176610ustar00rootroot00000000000000// +build daemon package main import ( "fmt" "os" "syscall" "github.com/Sirupsen/logrus" apiserver "github.com/docker/docker/api/server" "github.com/docker/docker/daemon" "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/system" ) var defaultDaemonConfigFile = os.Getenv("programdata") + string(os.PathSeparator) + "docker" + string(os.PathSeparator) + "config" + string(os.PathSeparator) + "daemon.json" func setPlatformServerConfig(serverConfig *apiserver.Config, daemonCfg *daemon.Config) *apiserver.Config { return serverConfig } // currentUserIsOwner checks whether the current user is the owner of the given // file. func currentUserIsOwner(f string) bool { return false } // setDefaultUmask doesn't do anything on windows func setDefaultUmask() error { return nil } func getDaemonConfDir() string { return os.Getenv("PROGRAMDATA") + `\docker\config` } // notifySystem sends a message to the host when the server is ready to be used func notifySystem() { } // setupConfigReloadTrap configures a Win32 event to reload the configuration. func setupConfigReloadTrap(configFile string, flags *mflag.FlagSet, reload func(*daemon.Config)) { go func() { sa := syscall.SecurityAttributes{ Length: 0, } ev := "Global\\docker-daemon-config-" + fmt.Sprint(os.Getpid()) if h, _ := system.CreateEvent(&sa, false, false, ev); h != 0 { logrus.Debugf("Config reload - waiting signal at %s", ev) for { syscall.WaitForSingleObject(h, syscall.INFINITE) if err := daemon.ReloadConfiguration(configFile, flags, reload); err != nil { logrus.Error(err) } } } }() } docker-1.10.3/docker/docker.go000066400000000000000000000037161267010174400161210ustar00rootroot00000000000000package main import ( "fmt" "os" "github.com/Sirupsen/logrus" "github.com/docker/docker/api/client" "github.com/docker/docker/cli" "github.com/docker/docker/dockerversion" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/reexec" "github.com/docker/docker/pkg/term" "github.com/docker/docker/utils" ) func main() { if reexec.Init() { return } // Set terminal emulation based on platform as required. stdin, stdout, stderr := term.StdStreams() logrus.SetOutput(stderr) flag.Merge(flag.CommandLine, clientFlags.FlagSet, commonFlags.FlagSet) flag.Usage = func() { fmt.Fprint(stdout, "Usage: docker [OPTIONS] COMMAND [arg...]\n"+daemonUsage+" docker [ --help | -v | --version ]\n\n") fmt.Fprint(stdout, "A self-sufficient runtime for containers.\n\nOptions:\n") flag.CommandLine.SetOutput(stdout) flag.PrintDefaults() help := "\nCommands:\n" for _, cmd := range dockerCommands { help += fmt.Sprintf(" %-10.10s%s\n", cmd.Name, cmd.Description) } help += "\nRun 'docker COMMAND --help' for more information on a command." fmt.Fprintf(stdout, "%s\n", help) } flag.Parse() if *flVersion { showVersion() return } if *flHelp { // if global flag --help is present, regardless of what other options and commands there are, // just print the usage. flag.Usage() return } clientCli := client.NewDockerCli(stdin, stdout, stderr, clientFlags) c := cli.New(clientCli, daemonCli) if err := c.Run(flag.Args()...); err != nil { if sterr, ok := err.(cli.StatusError); ok { if sterr.Status != "" { fmt.Fprintln(stderr, sterr.Status) os.Exit(1) } os.Exit(sterr.StatusCode) } fmt.Fprintln(stderr, err) os.Exit(1) } } func showVersion() { if utils.ExperimentalBuild() { fmt.Printf("Docker version %s, build %s, experimental\n", dockerversion.Version, dockerversion.GitCommit) } else { fmt.Printf("Docker version %s, build %s\n", dockerversion.Version, dockerversion.GitCommit) } } docker-1.10.3/docker/docker_windows.go000066400000000000000000000001141267010174400176600ustar00rootroot00000000000000package main import ( _ "github.com/docker/docker/autogen/winresources" ) docker-1.10.3/docker/flags.go000066400000000000000000000013451267010174400157420ustar00rootroot00000000000000package main import ( "sort" "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" ) var ( flHelp = flag.Bool([]string{"h", "-help"}, false, "Print usage") flVersion = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit") ) type byName []cli.Command func (a byName) Len() int { return len(a) } func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byName) Less(i, j int) bool { return a[i].Name < a[j].Name } var dockerCommands []cli.Command // TODO(tiborvass): do not show 'daemon' on client-only binaries func init() { for _, cmd := range cli.DockerCommands { dockerCommands = append(dockerCommands, cmd) } sort.Sort(byName(dockerCommands)) } docker-1.10.3/docker/flags_test.go000066400000000000000000000003741267010174400170020ustar00rootroot00000000000000package main import ( "sort" "testing" ) // Tests if the subcommands of docker are sorted func TestDockerSubcommandsAreSorted(t *testing.T) { if !sort.IsSorted(byName(dockerCommands)) { t.Fatal("Docker subcommands are not in sorted order") } } docker-1.10.3/dockerinit/000077500000000000000000000000001267010174400152005ustar00rootroot00000000000000docker-1.10.3/dockerinit/dockerinit.go000066400000000000000000000002601267010174400176600ustar00rootroot00000000000000package main import ( _ "github.com/docker/docker/daemon/execdriver/native" "github.com/docker/docker/pkg/reexec" ) func main() { // Running in init mode reexec.Init() } docker-1.10.3/dockerversion/000077500000000000000000000000001267010174400157225ustar00rootroot00000000000000docker-1.10.3/dockerversion/version_lib.go000066400000000000000000000007021267010174400205630ustar00rootroot00000000000000// +build !autogen // Package dockerversion is auto-generated at build-time package dockerversion // Default build-time variable for library-import. // This file is overridden on build with build-time informations. const ( GitCommit string = "library-import" Version string = "library-import" BuildTime string = "library-import" IAmStatic string = "library-import" InitSHA1 string = "library-import" InitPath string = "library-import" ) docker-1.10.3/docs/000077500000000000000000000000001267010174400137755ustar00rootroot00000000000000docker-1.10.3/docs/.gitignore000066400000000000000000000001021267010174400157560ustar00rootroot00000000000000# avoid committing the awsconfig file used for releases awsconfig docker-1.10.3/docs/Dockerfile000066400000000000000000000014101267010174400157630ustar00rootroot00000000000000FROM docs/base:latest MAINTAINER Mary Anthony (@moxiegirl) RUN svn checkout https://github.com/docker/compose/trunk/docs /docs/content/compose RUN svn checkout https://github.com/docker/swarm/trunk/docs /docs/content/swarm RUN svn checkout https://github.com/docker/machine/trunk/docs /docs/content/machine RUN svn checkout https://github.com/docker/distribution/trunk/docs /docs/content/registry RUN svn checkout https://github.com/kitematic/kitematic/trunk/docs /docs/content/kitematic RUN svn checkout https://github.com/docker/tutorials/trunk/docs /docs/content/ RUN svn checkout https://github.com/docker/opensource/trunk/docs /docs/content/opensource ENV PROJECT=engine # To get the git info for this repo COPY . /src COPY . /docs/content/$PROJECT/ docker-1.10.3/docs/Makefile000066400000000000000000000045261267010174400154440ustar00rootroot00000000000000.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli test-docker-py validate # env vars passed through directly to Docker's build scripts # to allow things like `make DOCKER_CLIENTONLY=1 binary` easily # `docs/sources/contributing/devenvironment.md ` and `project/PACKAGERS.md` have some limited documentation of some of these DOCKER_ENVS := \ -e BUILDFLAGS \ -e DOCKER_CLIENTONLY \ -e DOCKER_GRAPHDRIVER \ -e TESTDIRS \ -e TESTFLAGS \ -e TIMEOUT # note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds # to allow `make DOCSDIR=docs docs-shell` (to create a bind mount in docs) DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) # to allow `make DOCSPORT=9000 docs` DOCSPORT := 8000 # Get the IP ADDRESS DOCKER_IP=$(shell python -c "import urlparse ; print urlparse.urlparse('$(DOCKER_HOST)').hostname or ''") HUGO_BASE_URL=$(shell test -z "$(DOCKER_IP)" && echo localhost || echo "$(DOCKER_IP)") HUGO_BIND_IP=0.0.0.0 GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH)) DOCKER_DOCS_IMAGE := docs-base$(if $(GIT_BRANCH),:$(GIT_BRANCH)) DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE # for some docs workarounds (see below in "docs-build" target) GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null) default: docs docs: docs-build $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) docs-draft: docs-build $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --buildDrafts="true" --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) docs-shell: docs-build $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash docs-build: # ( git remote | grep -v upstream ) || git diff --name-status upstream/release..upstream/docs ./ > ./changed-files # echo "$(GIT_BRANCH)" > GIT_BRANCH # echo "$(AWS_S3_BUCKET)" > AWS_S3_BUCKET # echo "$(GITCOMMIT)" > GITCOMMIT docker build -t "$(DOCKER_DOCS_IMAGE)" . docker-1.10.3/docs/README.md000066400000000000000000000253211267010174400152570ustar00rootroot00000000000000 # Docker Documentation The source for Docker documentation is in this directory. Our documentation uses extended Markdown, as implemented by [MkDocs](http://mkdocs.org). The current release of the Docker documentation resides on [https://docs.docker.com](https://docs.docker.com). ## Understanding the documentation branches and processes Docker has two primary branches for documentation: | Branch | Description | URL (published via commit-hook) | |----------|--------------------------------|------------------------------------------------------------------------------| | `docs` | Official release documentation | [https://docs.docker.com](https://docs.docker.com) | | `master` | Merged but unreleased development work | | Additions and updates to upcoming releases are made in a feature branch off of the `master` branch. The Docker maintainers also support a `docs` branch that contains the last release of documentation. After a release, documentation updates are continually merged into `master` as they occur. This work includes new documentation for forthcoming features, bug fixes, and other updates. Periodically, the Docker maintainers update `docs.docker.com` between official releases of Docker. They do this by cherry-picking commits from `master`, merging them into `docs`, and then publishing the result. In the rare case where a change is not forward-compatible, changes may be made on other branches by special arrangement with the Docker maintainers. ### Quickstart for documentation contributors If you are a new or beginner contributor, we encourage you to read through the [our detailed contributors guide](https://docs.docker.com/opensource/code/). The guide explains in detail, with examples, how to contribute. If you are an experienced contributor this quickstart should be enough to get you started. The following is the essential workflow for contributing to the documentation: 1. Fork the `docker/docker` repository. 2. Clone the repository to your local machine. 3. Select an issue from `docker/docker` to work on or submit a proposal of your own. 4. Create a feature branch from `master` in which to work. By basing from `master` your work is automatically included in the next release. It also allows docs maintainers to easily cherry-pick your changes into the `docs` release branch. 4. Modify existing or add new `.md` files to the `docs` directory. 5. As you work, build the documentation site locally to see your changes. The `docker/docker` repository contains a `Dockerfile` and a `Makefile`. Together, these create a development environment in which you can build and run a container running the Docker documentation website. To build the documentation site, enter `make docs` in the `docs` directory of your `docker/docker` fork: $ make docs .... (lots of output) .... docker run --rm -it -e AWS_S3_BUCKET -p 8000:8000 "docker-docs:master" mkdocs serve Running at: http://0.0.0.0:8000/ Live reload enabled. Hold ctrl+c to quit. The build creates an image containing all the required tools, adds the local `docs/` directory and generates the HTML files. Then, it runs a Docker container with this image. The container exposes port 8000 on the localhost so that you can connect and see your changes. If you use Docker Machine, the `docker-machine ip ` command gives you the address of your server. 6. Check your writing for style and mechanical errors. Use our [documentation style guide](https://docs.docker.com/opensource/doc-style/) to check style. There are several [good grammar and spelling online checkers](http://www.hemingwayapp.com/) that can check your writing mechanics. 7. Squash your commits on your branch. 8. Make a pull request from your fork back to Docker's `master` branch. 9. Work with the reviewers until your change is approved and merged. ### Debugging and testing If you have any issues you need to debug, you can use `make docs-shell` and then run `mkdocs serve`. You can use `make docs-test` to generate a report of missing links that are referenced in the documentation—there should be none. ## Style guide If you have questions about how to write for Docker's documentation, please see the [style guide](https://docs.docker.com/opensource/doc-style/). The style guide provides guidance about grammar, syntax, formatting, styling, language, or tone. If something isn't clear in the guide, please submit an issue to let us know or submit a pull request to help us improve it. ## Publishing documentation (for Docker maintainers) To publish Docker's documentation you need to have Docker up and running on your machine. You'll also need a `docs/awsconfig` file containing the settings you need to access the AWS bucket you'll be deploying to. The process for publishing is to build first to an AWS bucket, verify the build, and then publish the final release. 1. Have Docker installed and running on your machine. 2. Ask the core maintainers for the `awsconfig` file. 3. Copy the `awsconfig` file to the `docs/` directory. The `awsconfig` file contains the profiles of the S3 buckets for our documentation sites. (If needed, the release script creates an S3 bucket and pushes the files to it.) Each profile has this format: [profile dowideit-docs] aws_access_key_id = IHOIUAHSIDH234rwf.... aws_secret_access_key = OIUYSADJHLKUHQWIUHE...... region = ap-southeast-2 The `profile` name must be the same as the name of the bucket you are deploying to. 4. Call the `make` from the `docker` directory. $ make AWS_S3_BUCKET=dowideit-docs docs-release This publishes _only_ to the `http://bucket-url/v1.2/` version of the documentation. 5. If you're publishing the current release's documentation, you need to also update the root docs pages by running $ make AWS_S3_BUCKET=dowideit-docs BUILD_ROOT=yes docs-release ### Errors publishing using a Docker Machine VM Sometimes, in a Windows or Mac environment, the publishing procedure returns this error: Post http:///var/run/docker.sock/build?rm=1&t=docker-docs%3Apost-1.2.0-docs_update-2: dial unix /var/run/docker.sock: no such file or directory. If this happens, set the Docker host. Run the following command to get the variables in your shell: docker-machine env Then, set your environment accordingly. ## Cherry-picking documentation changes to update an existing release. Whenever the core team makes a release, they publish the documentation based on the `release` branch. At that time, the `release` branch is copied into the `docs` branch. The documentation team makes updates between Docker releases by cherry-picking changes from `master` into any of the documentation branches. Typically, we cherry-pick into the `docs` branch. For example, to update the current release's docs, do the following: 1. Go to your `docker/docker` fork and get the latest from master. $ git fetch upstream 2. Checkout a new branch based on `upstream/docs`. You should give your new branch a descriptive name. $ git checkout -b post-1.2.0-docs-update-1 upstream/docs 3. In a browser window, open [https://github.com/docker/docker/commits/master]. 4. Locate the merges you want to publish. You should only cherry-pick individual commits; do not cherry-pick merge commits. To minimize merge conflicts, start with the oldest commit and work your way forward in time. 5. Copy the commit SHA from GitHub. 6. Cherry-pick the commit. $ git cherry-pick -x fe845c4 7. Repeat until you have cherry-picked everything you want to merge. 8. Push your changes to your fork. $ git push origin post-1.2.0-docs-update-1 9. Make a pull request to merge into the `docs` branch. Do __NOT__ merge into `master`. 10. Have maintainers review your pull request. 11. Once the PR has the needed "LGTMs", merge it on GitHub. 12. Return to your local fork and make sure you are still on the `docs` branch. $ git checkout docs 13. Fetch your merged pull request from `docs`. $ git fetch upstream/docs 14. Ensure your branch is clean and set to the latest. $ git reset --hard upstream/docs 15. Copy the `awsconfig` file into the `docs` directory. 16. Make the beta documentation $ make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release 17. Open [the beta website](http://beta-docs.docker.io.s3-website-us-west-2.amazonaws.com/) site and make sure what you published is correct. 19. When you're happy with your content, publish the docs to our live site: $ make AWS_S3_BUCKET=docs.docker.com BUILD_ROOT=yes DISTRIBUTION_ID=C2K6......FL2F docs-release 20. Test the uncached version of the live docs at [http://docs.docker.com.s3-website-us-east-1.amazonaws.com/] ### Caching and the docs New docs do not appear live on the site until the cache (a complex, distributed CDN system) is flushed. The `make docs-release` command flushes the cache _if_ the `DISTRIBUTION_ID` is set to the Cloudfront distribution ID. The cache flush can take at least 15 minutes to run and you can check its progress with the CDN Cloudfront Purge Tool Chrome app. ## Removing files from the docs.docker.com site Sometimes it becomes necessary to remove files from the historical published documentation. The most reliable way to do this is to do it directly using `aws s3` commands running in a docs container: Start the docs container like `make docs-shell`, but bind mount in your `awsconfig`: ``` docker run --rm -it -v $(CURDIR)/docs/awsconfig:/docs/awsconfig docker-docs:master bash ``` and then the following example shows deleting 2 documents from s3, and then requesting the CloudFlare cache to invalidate them: ``` export BUCKET BUCKET=docs.docker.com export AWS_CONFIG_FILE=$(pwd)/awsconfig aws s3 --profile $BUCKET ls s3://$BUCKET aws s3 --profile $BUCKET rm s3://$BUCKET/v1.0/reference/api/docker_io_oauth_api/index.html aws s3 --profile $BUCKET rm s3://$BUCKET/v1.1/reference/api/docker_io_oauth_api/index.html aws configure set preview.cloudfront true export DISTRIBUTION_ID=YUTIYUTIUTIUYTIUT aws cloudfront create-invalidation --profile docs.docker.com --distribution-id $DISTRIBUTION_ID --invalidation-batch '{"Paths":{"Quantity":1, "Items":["/v1.0/reference/api/docker_io_oauth_api/"]},"CallerReference":"6Mar2015sventest1"}' aws cloudfront create-invalidation --profile docs.docker.com --distribution-id $DISTRIBUTION_ID --invalidation-batch '{"Paths":{"Quantity":1, "Items":["/v1.1/reference/api/docker_io_oauth_api/"]},"CallerReference":"6Mar2015sventest1"}' ``` ### Generate the man pages For information on generating man pages (short for manual page), see the README.md document in [the man page directory](https://github.com/docker/docker/tree/master/docker) in this project. docker-1.10.3/docs/admin/000077500000000000000000000000001267010174400150655ustar00rootroot00000000000000docker-1.10.3/docs/admin/ambassador_pattern_linking.md000066400000000000000000000132361267010174400230000ustar00rootroot00000000000000 # Link via an ambassador container Rather than hardcoding network links between a service consumer and provider, Docker encourages service portability, for example instead of: (consumer) --> (redis) Requiring you to restart the `consumer` to attach it to a different `redis` service, you can add ambassadors: (consumer) --> (redis-ambassador) --> (redis) Or (consumer) --> (redis-ambassador) ---network---> (redis-ambassador) --> (redis) When you need to rewire your consumer to talk to a different Redis server, you can just restart the `redis-ambassador` container that the consumer is connected to. This pattern also allows you to transparently move the Redis server to a different docker host from the consumer. Using the `svendowideit/ambassador` container, the link wiring is controlled entirely from the `docker run` parameters. ## Two host example Start actual Redis server on one Docker host big-server $ docker run -d --name redis crosbymichael/redis Then add an ambassador linked to the Redis server, mapping a port to the outside world big-server $ docker run -d --link redis:redis --name redis_ambassador -p 6379:6379 svendowideit/ambassador On the other host, you can set up another ambassador setting environment variables for each remote port we want to proxy to the `big-server` client-server $ docker run -d --name redis_ambassador --expose 6379 -e REDIS_PORT_6379_TCP=tcp://192.168.1.52:6379 svendowideit/ambassador Then on the `client-server` host, you can use a Redis client container to talk to the remote Redis server, just by linking to the local Redis ambassador. client-server $ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli redis 172.17.0.160:6379> ping PONG ## How it works The following example shows what the `svendowideit/ambassador` container does automatically (with a tiny amount of `sed`) On the Docker host (192.168.1.52) that Redis will run on: # start actual redis server $ docker run -d --name redis crosbymichael/redis # get a redis-cli container for connection testing $ docker pull relateiq/redis-cli # test the redis server by talking to it directly $ docker run -t -i --rm --link redis:redis relateiq/redis-cli redis 172.17.0.136:6379> ping PONG ^D # add redis ambassador $ docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 alpine:3.2 sh In the `redis_ambassador` container, you can see the linked Redis containers `env`: / # env REDIS_PORT=tcp://172.17.0.136:6379 REDIS_PORT_6379_TCP_ADDR=172.17.0.136 REDIS_NAME=/redis_ambassador/redis HOSTNAME=19d7adf4705e SHLVL=1 HOME=/root REDIS_PORT_6379_TCP_PORT=6379 REDIS_PORT_6379_TCP_PROTO=tcp REDIS_PORT_6379_TCP=tcp://172.17.0.136:6379 TERM=xterm PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PWD=/ / # exit This environment is used by the ambassador `socat` script to expose Redis to the world (via the `-p 6379:6379` port mapping): $ docker rm redis_ambassador $ CMD="apk update && apk add socat && sh" $ docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 alpine:3.2 sh -c "$CMD" [...] / # socat -t 100000000 TCP4-LISTEN:6379,fork,reuseaddr TCP4:172.17.0.136:6379 Now ping the Redis server via the ambassador: Now go to a different server: $ CMD="apk update && apk add socat && sh" $ docker run -t -i --expose 6379 --name redis_ambassador alpine:3.2 sh -c "$CMD" [...] / # socat -t 100000000 TCP4-LISTEN:6379,fork,reuseaddr TCP4:192.168.1.52:6379 And get the `redis-cli` image so we can talk over the ambassador bridge. $ docker pull relateiq/redis-cli $ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli redis 172.17.0.160:6379> ping PONG ## The svendowideit/ambassador Dockerfile The `svendowideit/ambassador` image is based on the `alpine:3.2` image with `socat` installed. When you start the container, it uses a small `sed` script to parse out the (possibly multiple) link environment variables to set up the port forwarding. On the remote host, you need to set the variable using the `-e` command line option. --expose 1234 -e REDIS_PORT_1234_TCP=tcp://192.168.1.52:6379 Will forward the local `1234` port to the remote IP and port, in this case `192.168.1.52:6379`. # # do # docker build -t svendowideit/ambassador . # then to run it (on the host that has the real backend on it) # docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 svendowideit/ambassador # on the remote host, you can set up another ambassador # docker run -t -i -name redis_ambassador -expose 6379 -e REDIS_PORT_6379_TCP=tcp://192.168.1.52:6379 svendowideit/ambassador sh # you can read more about this process at https://docs.docker.com/articles/ambassador_pattern_linking/ # use alpine because its a minimal image with a package manager. # prettymuch all that is needed is a container that has a functioning env and socat (or equivalent) FROM alpine:3.2 MAINTAINER SvenDowideit@home.org.au RUN apk update && \ apk add socat && \ rm -r /var/cache/ CMD env | grep _TCP= | (sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat -t 100000000 TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' && echo wait) | sh docker-1.10.3/docs/admin/b2d_volume_images/000077500000000000000000000000001267010174400204505ustar00rootroot00000000000000docker-1.10.3/docs/admin/b2d_volume_images/add_cd.png000066400000000000000000000657271267010174400223750ustar00rootroot00000000000000PNG  IHDRHme.PLTE.l᷷m游ܾòŹ󳳳뚚ɼjjj񝝝'd0j^9pVSgggJz\CuV?||{dJarPPPBqqsHP(wY/yF^NBM3GP=:;:```|=лË]PTthorBBH+-;u`%jӖaZ`,ЙR2]:pw:YYX׍?>컷mB^*mZr1uLh>߽~1z3r U |KC| cd6?O;g5}~~zvV~sرoSLX+Ъ[MHcM147$e1|Ƿ =_ŤZa>%j Ap P,耺ho0ydiM]6?0JER0ŕ-ӊʣ U0[Dh0VmV_##OiT9TkN_HQ(}$ 5GCA3Kt8!iٚ>UOGz T>8rqۙcBZߒ %-Sj`62Q8t "g:ue/yQd=/?1"JEVw;ف?F:QU4ťk}ST:bN4qрfRY*U2)t) Y=y"ߖ)qH|D<C&E%XY77va:n?;.^o7u'vɯ}hz"߉CaZ?-X?|QtXaeUOY_;ϸ%fVAhvShzPԷbvUPd'  Ib@j5Y$)EmLVy&T!yctȹˈDd ʙ"}~-9IB[NI貒jP?ȁ`!봼wyZdG(/~קQg^|*I{#c+ ێAZo}*zߨ=Sv_drfyuŜ@Sa 2Ȼ'a9tU[NE'i}f, ڶ7RPNʑ>xJ'sZC,(OhXV韉4ODO˶ں~EUO}>[#Ov%̔e[-0W|dk3 Z)(fD^X%u$@آ ϻ!D6n:t#ndDvpl Z ӲbS p' k mhmuwJ̊nU,]Ҷem6"(T ub$1FlAUK4ORm9уQ]5c\T$XYږpY4D%4oXr )Ea T}Zf>I|{rlsFIm9@7z!p*iy.|IjUEI,VgMPp__m u6uǧ\l&Mni֬6*$S!KBP؍?%c\ǁzذbOzr GQ"䵿kqq5jd]pXV RDCYuQ(X\&ڲxsJ~ ݺU"( s>*{[eg󤬈o#8d7n/+E*:,LJuSIA?;1CW}"4>-8Mu!$6I?+0TP*\FAbgG-yűBQ|w🁹h3C?Kvж09_&g!d:3GaridGòL^ۺ=~Z=O*U"9#($bc`%vQh1&$_˜/uؐ? J}i ?~v-܇=!?{>: 3<gA3ǃ3ר 426:|a9y#vR+^a؁Oښ1ϵ་@x~"Mg>G|֋=Ez,{P|N^cD2ΌmgCH3k$FV2#"Y(#BQ L/D13 MW uG޷󥑳J=:F"*ڍtϡf.7"40o]сMJmdGG=WT='_`S|?سW0 ו~>:)#Tqf6yԒ.kP^&_8~7R-63~8;;;1l]nA͛7e"n:La֪SIjҤsRe&?ݾqGW:|u/_]$&Vt*М[7Ưݣ3:DG~a-^Eh)E]cf"ۓ d/)7R<߻]kf6zፚpI!w3=8ɔ㧿",ߪN(SF ps`uO㲗TjbDLT~#矍Yz 47ҥ 008|-?[iU{9-'F: _qdRKe~#Ͳ-^}wͿ|Z|:_);0R8ހia7𠄸n~gQzͮ6$gMU&'*ny|uqWݰ_HF #ݢ{0_e$h'6R &;Db{j寸P8DeY +꥙LR)k2$AYo͗Fj_%'7B\s\- MSɭ"hv{+S-jk;|3jՇL_]+_lrL z4}zC}1&bg_bF2@_.ӧ 7(C>_( C꥝Gf^B3O+#VJۭ%B>B}V0v#/wl]Îwymt4|Osiά-/|T?ZsNnMES|)~)Tm_]׼lo w]Q9}w`Q24MH^B0 EpŋSFz!d!Z@0 ^efu| gQBH k>-WY+Z|^7}OEUtzD%-֏FR ]I@0yH;ZQj_Q+8%Bb-Bha=Fog沩 HOᥝZң(9~ܻH, MJQ?>*fIYᤫ$㝒eJۭP X W3cIiKTaԼhdRIF?A׮m(b q[2 %s@Goo6zuttHSʽy 2R #6p,?S;x܃_je>c&o&hp._A4PѪvu 'mUw?̓9\S-EVwͿb%c7|!RvbEZ|w=WbE2EϤI/jQT;??h`K Ues/>5="pHv3-,,,_iΗX_d! 9o\{pϯA4{J%5fafx(z.q,6_@Zo3EZ(sϵaϯ|XNXB3zmF8>Z_TI O=^iUe+5_5&dR(R)>Ċ8 S?H.0\H9+R ۃ Hсÿ9r-T ʛ>{n0v;gp) =Y=7IH+1҉>?6R"ȣDÇz. tb3YK-do_K=B]6I뇅\TYl:r+/H]qmmh%PL\6bc_.S6%A Qw?_0"oWb kT)Uy4: dl,˅*@uMUۆ /~!TN )J /bBih#-zc\[5#-*WU|-إhakp) Ñ`t6Rk(2xH|a:# J9TbX#G0lR^yT>/tj/6yoH:ge]`m&_wn&t_ Dhmg+GG; A?PVY8Rl$%_Q9m]!0*_((YtiH#-0R"h29Ѡ$1Ϭ#$4w/S7RGׅ:~5c# > N;G)[Z%`ӏqNGXʝ!bc1?gȓ=N|T6+,$QM5_`#=i|EwͿFt-7\}|&4K}_F1v| d u4g7!z7Ќp~߳nHÝ;O^KCxre{*Re~N!b& ݜt 90RDܲOo]i i/Tr[s6r| WX^~iq9PJ5ʟ}5Ro:|MLNٶЭ?I0$fp{v aӴlj uŒc7ۏd}-_8Jr̟N>il\fNFz¢ZKa&|HGkxGW@o2LQn>w-^ 0Ȏ!Տs9 oVGQ}^J3ϷZJ/mk5!X 7.\ kAB%?MOHt~R@~;|>[Iki(A&ww8'FG ׋Fc(K7͛c 8,Qva6:#܃z|\ǶFz4Ҁ qe Zk5eȌiV,c 6ɉX4:P2xY(5D4%pGNH'o S&%nc$R8kDznոDKB]QP/n6JoY|86.[)hF h'Fr4omse90^Re|Aa[Ih-|55);tjcM9u P}c2܅"(9@`mmml!zm =YJ81ͥʼnمBovVꅇ?vQr((&=P|u6װOaaݸnd#F9P(j0.̳b#%w3RcO,O\5C43GB ڭ{rFSSW0E -bT>Pرs|+X>Otr%H54[| ?saP}-hWJOW׿]b5a!cD6 8@QPĎKHE85K_?~i#@cE[ƒxcѹU8p)WuDlq7F~XxH7J֊/^qzk .;'"5 ͧ,dzmwtOЩ$)윓tx@W׿=b> @ _ak5>}E$|}95,8bVN f C[kKKcKkkKt\[ ݵ5D,gꗵ@H? qV60@O|!J'x ^N4/xl93XgVw\Mŗhz oyy _j ٦s.[*Tֽ7XXb))ehD$dn*ov) )|)G1@p$|Pug3L!9q:OUQvqYۺH:?-3YfIr<E!tr[QKgmS=U3Iҟ|޹o6!skȖ/Wohg.I%QRbt9q~L8?s?Wf3TIO#mkmy$ӤyfEAU+tO^~̨h'GUsn DH)pH˽vor<CTV(1Pr^x:Hf a`zh|i絋6[fbݖ nP9YMkPK‚~SKCB"qf=[u+ҭwa$yiU&4&k9_7$mKPژ}ijDx\ʄ gu%&qrʡĀnV˧>i}&n&9FEϿo?(irgjIF9<>Q~(V dG**:*pGP ǒRd'iXQQU~ί:_>MX 2IJnm$]7@iRMV%-i>=]l|6A`nd/$`0F&WZmV~~Uϓo?Zg8 wR|# v4lsɤ}Tw1&钨/1h;J'[ q,mR]ԗ0L7*~<ޝcJaBBy@``R.3[Џޤp~ԝ_~97voDgn_=i04P7P COH;o?; d/Xadm߲O00&u$ O=?ݽc7x`N0wB~,ú玍'&4T6d+*qOPH~*ܤo忧0ȤO.cSsA!k Bo!k>5>n?IAӆG?__0h"k)Tx52hQUx8CV XF[@vz]ym50 Bo壐mҎoQM! )x aGJjZ P8;X+N׆}AX9mN?@~42hvD׊⓱&Xm:ؽV-~ܩPHA%c<:Qg7o*նL>d< BZ;6`q*V3FXyO.&T_0dKu.N~:w6Q$KіM_wEP66޳ߎ&T7좚iIQHH(mIBuֵ],)g>I"t:/qe5z]ߏZ?*D([xwBv{q|A-v1@ Yo2_ꝓZ+T[Am7cL5+I,׻7J>|!Y0ȩڈoeV5Z "MQ=wȱgnw$8&H{Wy??/(Tyi46+uܨ0CpqՄ#ܴb.2˗XU )Hw^@96Ē@C }}S@OWz/|HF{v\EsLU#QXEuX~?>/aVk!uMa] )qӔM90&BZ'_$OgUuemwinr[`" "7_ k]b5l9YB<䐧o"Ao<3jz"r q4fo.^]ջa}z˟?vNڙZp q.#fč^6V!@UDm7M:nB S},}AQx-HMY>xo1%tZv* @aGJʨ\FDe|GB!QPOj<4V_ÔT^~O Cfj k ZB4GQHs=~*PHnTAdS #PL*5.8qr$݃wDt6CoXV(b0AͿD!Ed>Ř(լMjRHBjG21rdgO1hNMEK#/D`&;xЍz@(Rl3*T[]c>pR&8[~r>cX{?7pCbNUדjXE! +hC! q[վ Wdk֫yt%17 GةLyD,\tb%V8Wx x5muB5X}e-rDDE]L226ւ`׆)#認61,0J{|#l9bfȃ_+?{}b ,dgO0_6F ,IØ@pMO G0Q%WD 0t'qmDKt.-bOpF 45mc`gDoZҲcSL槲f ~nBF)YȃMt@xi?ySgyR~eDbuy574\Ц-岣wGFx.,\H{&jnuid:)wGr# o0%\չ-j|G*FND\TȲSY RǓu~(qnx*r<g&qu]rTrGĻ#slV3ŕq:K#a>ّ, h9)LH|4#;b,U* XN IՂA=_^.%å1IB6Go% N6'!HsKl=OFj܉/&1Z1\dzܪ7j!3KPDa&p*R>􄹣$&Ad=|txg6a*YriƈWo1ߎ3+S#94C3\ȥ70{<_C#ed.0F$ ٕU$SX/3#gVIlT=P%S>V}+ٲU'L_&o8Y\e+3"ه 6%\Y ɣAEDuyCЅrČ&*E,ܔ$ɻ>k7A丢(4!F&vY%I D{M C\ݿ5LJ ֽ꘍|wڦ)lr޺g-+C1Wp"W/_},Qڰ tzҖ) k>xN>~>̬}b狟O' !% IH$$@ HB$!!IH$$@ZLZ H$´ &nX!_}5@2ooёIl<$t}sɚq=tJ ߀’+V߹YAoKdVL!Ƥ!/fP*$z(= P$ I65Z%N[t(lri~ VZ[\ԾqZxaG:U4=l;s%(f 3%i>[Lz&9 T-&O4L]hP%s$h\qć@ sb6 FR%VA8:!]f89.z Pt2Jq8/+zFn*&T>1ޑi9s<(&;-8H_6qgq 4[$g+MWK\qV)>^z晋ħHA,X$5\)RD֔lq͖܂IPx I  OH$I$@ H$I$@ H$I$@z{h1@T D^Z 2W%ŞVe.6H7WbO+\tH$IY$@bu=?ҙgBqY{$iӅi.@2 ~= Ov$|Y޵;لՎwBqRޞ17Fj7Y.d+y#D*H4*,}~,h$6r2A:H#rv!I-t_yC3 W Y^C! (9)X`#I_m `i׷D@z|\=7V: V[*27 !`|zToi1 [eB馫n#_y=8HU"볗H//soH<tj>lhRaSS]fs aMe+ Mu3W bޡ-ӌ&]ĿJ>^nm0ʟtQǡqOVBtdA7FVLR#ߢ:׹pqth2:xRAjIgGUN {Mņa94~x bAC7C]*&C_1 Szzd͒iqrȯ@kAjd (#)mSft-V?H< }kSItX w4bbh{7Tt̕P|*9H7G:obʐ\JG(ba.Ԏ3ȋ伣tv 4@\~&_q~8Gz?٦w.6&o qrwN[+6 SG$HHo[76|A:F2&vm|h_Š Ȟؠ}ëK$~Ӗ?apL8onkOXX\ yϢϑ:uu^<5941C jH@D*%&+ABNU %/˵\L3d:mc7ީ*4fQm3:EGP6gmR=F$iڸtv'z gs}|_z$N,YnI 4D50%@"<HR)tHY[ߊ=m =yH$I$@Zj  ź# # #/'} HT; #銪*ŦuYB:u7Rs}Uu_ӈ܀GjMjK Ġֽ2H$MןUSJͮm~GcI<4ՍqD3OA9 "jʽ[ G3\͓߼ 9 "SkkjkOI2A%#6/_YcڀyxM(3U"CJ16?@uY ?'.ԑDIaȅoLIht6:MƳHoAoF"7Ù#݄'=y34d* AJCNP EmmHU>/i&{&f#Oh,Ս ZMjb\ܟb),I >OmP743alrMh ^QnM) A3]gԹѠ۷ ISv銍=߬Uù[<ׁntG$L "$ngB4lA7h \U ޱOeHhbF 1TWTG-Trsr$=M{N6ި"QA2-$`em$W'[ O H;-nS*K ӑPIϯ~>)RƼr ! 1G:̛AJXt잞> ]=UU ۑ &7bt) vO4@5㳬k,ݮh#1 heOpT/56!dhr%`5E?%* $HP֊%bM 7ݖMzl$@ ZZRpqbER,;)c;+Oc@!q~#-U'Q:ZlG e"HHs"_r$c_F)zSo ˑ\$ۧcHdn1eyi,[nh\i~qksfGHHR|S\^g)HO+mdzIIIE!Jioq"mv ht[)0FЖ|Hpz7;\Z}|r5HQJ 5eEqf# O|(Yb@GH{'4ڨw]ƒ6T?XYf [W4(E~e3"܍ҙHH3G E e9 Q 󽺬~B#ζI,=4x͛~dUmUxJU*-ٓ.)@k:Vmj*9Skkh']U޹{`frFc3Q }R]ո\e  )iJ+lmz8]|`OyT嗟4VqB֦wEH;/+Wwt*(Hlȇ zq@@5a#Αڵ~5.چYegn@PF-ZF'_qhOr0r ^KBm?4Y3;9.S8RFBHiWYخiyڱo&J֬2ʘ^Ow>ƳyMVZ|Tb2LP>BAxaBRn er9H\`܀>z&>G2P,&HQ ҁm|tEiށ'0EblgI)$gzlblo9 #QǙ#q#O8RԂU_$Բ Ӄ <;p=ӈz$L]z8`ŕ-w.L7? '_|%)zA*i.en/`E f<{H5v64Vap:y>]nMS G^޹v'ߧ;YթqsYy+;;g>SƆ%V,@O(}pï}m ҫoW' &񾶥RrJzR7$E^[p 0@Z6L3 ]k Laifb-r/@ymþq|q?~~S 18v@0uL#5r\P0FHbABW J\;|R̾s:$vޮ{hy-R ,kX3KI0$ [iqO#{mkfrZj1p-VYPN#ķ^[5,G HG3T$rtW$p"-U(g‚~Ts LTUCmx3Z}x4ڼRN<1 K34 \R*_Xm$h #$%p! UZmlv&G R(ь!wHڭx )Y{$w/ V,s_+jķOx88s$+H eW'H|Ő2lviKyCaM>u}xQĸ!@ߑA`4jo=ggt6 :f H$iWBG(ZDIyb"$MOQJa[h{6!uOҌ!$}pVG{ $H c·tk{[XR'* ͲR^1 ڃ(ӘwiΤu'?oe¨Jy4 $rtc&3&t)4 C@MndRYҚB36~!1$Đ8 !qA K@`FFiFjFiFFiFiFiFiiFiFiFiiFiFjFiFFiFjFiFFiFiFiFi^iFiFiFiiFiFjFiFiFiFjFiFFiFiFiFiFiFiFiFiiFiFjFiiFiFjFiFFiF {fc'=nznzv핑63ɐJBڄJI*)uD3L0©Cھ֨} S+Hm6A9_YAz eŨgnYO ERe&-^=8-R,@"p^YFݝ(ID 0O1KW8H+HQ eQt)NzZm3T%KQ<^4h41F"1z,$ oPVW4H+H Jx$` wT0r)"G{,Kofn3 xk Aa$ ۀJ P$a)OAM)XO\M4u0.$GۄM{`OY2Ia|A%DI`+I+Hܷ?3H'IG XjKE*%7AA*HmloMo  ݶ HahZEfq҃)S΃&TO/ L7Qf @Ef$OI3;y%p }cA@1Al@[& (bGnU[ݠ 8P7DA?ğn Rh ڞ.uKv]']^b6 8 8lznn92H.AKF<~r]@YkݒhJ\j5EQ @<}Uɧ>+HAE4EzH." Qri؆b4(NT{j < 7c$Z˷?abȷ ұ ;d#OKU1HzRt4Nҁ;0)<p^v&iR!`R"(>x)$ce)C׃LՖ0}o 8ʥe6?N r[ؾ0 9q|_5@VWbw-zzjHe|=HoW 'a1>P.O)g%ƃt2W u ҚpC 0HAr1vJF;)wӵ#U d_m] ґ↶'H N /n@*d{~׶?)GJahIj}ۿRŧt +\SVEtҩC ) Z|Zr 4ۃ RIjPwH=O%xqtdIԦ?>M\L mC5۝c)^XV/튱蹸,A:wmLZ<:iTUS4w ų 5ՠb.GNNHfzC]ƺz`3׃^4ۣtLᒚ(cz ͏n[ n&He7^iM0PA׸/'v 1~X @*km&)D&τ&^dxb9 O_|,a6^A?i=  Z*D*2>Grpq}|>\AFu".><&RpS]r p.-_٣ReBm7fґismplw6!HX l?m(KW$SE?_~z1HK P(vIݎ %67<ˁXc-&y=yLZfe}8~d Ȳ,areљMHTvPB;qC,7ҥsˍeHn,)k|FK#D|0I;7N} O!fyiyybHZ$HQw6g$Ax# #M0ԮHqz= u(9hea~$܎4{cb[[ '+Azٱ?E4-,o6=U6N:Te[%U CfӼ!UfGm@`45hYVraU3+5ML'+:_5EfxӘgՊRs 5X #$Ux Z5Vgv|Vw&R$iuqQ1rE chL´ -+w,j_W$"V"Hc_Q)5a6LyɊY0ц <˨G5(`G U/>z1h($"#a87Cf&LbeVG`ɣbmc$ ߲9;8c[,B_|\$i CU-S2m AֲQ ,`ˑ 0TStC`Yƒ nL u9ˠ1>8]`YTfqLoVzF$6Kp@ "4`z=utfj] n6Zu 4H|oifz}傆u]?GE-& ĤNBPDg]t li?":ڞ}( } zpx1'v%Ho<IGoH#I$H?~ H$ABvF~ss$A dWַOIt\q1H鉍N $AzV<-˿ٹ{68^8mѻ^KWR# MZE)-bnB(D@.:R.}9|L)Ax>Cӗ' IL&3Q5} bΎlYYHR%&em#NLS)_nѹ`de!H+yb5 }I@KbNqM|BVR'^ң$dR=]xyo#_M_NB, ii!u#ł}F%fmךRWRۆb pw$cB 2/ 9?tS۶8 Xk[3Ana(.|\vy:y;$ѩ FC sҪY8$PF[Dz[ id26Ȟ;&6 eAH%_18XDQ̘UVГ6 `mV"{A&|lt+ D`O+Il&ݫYΒuH铤>#)V(UM)^3f~fv+Y~•epxx0 ]%U},Ce۪iah #}YG t\SI,g9s٪5p]Hပj*r\4}H]yJb!r9Ӛt[%!]y/GG LhƕJb*JrziEHŧXH?oKP $rx|||ư9?$RlÞDqz6i VdS+H3BLCN69 (>A\˜/謫>il)N&,%;? ͼ1Mw F/dR#Gʓ` Wj]иA8?PIPPyHQCG7ȫ[aNsmON#6T'\A7t$Y<ۘ(<IJg2D i1Lcd|qEu,{;;H"Ht M0ŝ Ht ʦM\6=_@)+>/ۯ aPD+{g=i,ٙt$HYet_Abu@fN6< 1H]¤I9D>|I8eaip5,)_}*Qto~o}B "H NA&FցH F=f@QEZ*#8t E!N殒._ dE:j?=n}w!wPVAh0H0,sdz#(Ρ"N_aG{G)]:~>"5󮍯H]"() J`|S +΀iSK& 0=~יؕO۶b}Zƃ1rc1H8 s^0F= }BWXr6,@ I%X>jXenFG dE>>8awX@FPo[W\"ǥWgfSDG %HKpǗPY^}RFT 6nc"Y:e.59x*:s/YJ_?5g3c'sqDeY 3p1H }nV'Vu4"J$/iFc{ SJg͈dEk#0 ]kYH?>HV$+@"Y!`E"Y/"۴wo?,ebSD իN$@JHτbfg@) DAI0lB5&eE;޷o`lwfɛεN"V7Z=W+<'HxKpv**l"}6"GErDEj5_}zL"Ysi@O4bAYEVA:*g)ynC͵4IZIfEE(Z-fjYT׿֯l)"*`Vf+͵_#gs-"*ƫĸ+c/ HiU}e2+F$7dQ:ml{@<nTCfEܸ ync1whJ5dї(xG"JĚ6~qTRDz ϳ&V3xM].izHC Πkx\"2ɐSP\cmCEZ+0Mo nF*#CVzM"5^"1!8׍ŤIELmVJHk6016Ԭ_"ryz-Q=n4fET0 S .MHչLV+D\ + _Iu\t\Db\hXhf~B\/R5\4#.p_cGX,nKx-e"cM0GDq"9-It:eD*-d#':ϞTcXX6 ihň`b"dAm\.dDEH-BZW'nYH\D~pv^F$)ʼn''j$/j7/T7m"ZFƢ\~FiT3")54v:EkY^$#VBHj%"U ;dԹq")/lN'ru8OlsI3m6iU8*ZWN&gAMMDwHIhCrr5x#K g<sC`vE0ˈ_QUtkǎ袜ZyoC:Yn"%KV6b{#>h< #,6 IJGν@iNHq22k$yj!1Mq#"N'(b1 [D}HY)IqFJހMvf;zta˺ ]}ܶh!v/³cmeDi(}F*RL^% ԈXEohBz:B 1RٶXTLN$v8e1_]Ǣã{m#"i'WBr*p]{GF$̊PF) HeNHQ F(F"ŋ^xFEq4N(gZX5+#RJdBP.䢧@($];_0Cij#!vKMS ?uH15%-6@&="[)DbVrEˆڄȸYGcH\JjE6$eEڔz3.QK")Q~ˈ$ǭKbwܵԵ&I!-RD_qg;PFTCQ_{$-e.4b劥)NR <>HX!HjTK2EIݶ6"Szg&7s^$ DÛ;÷^B]2""h3+hghs(Rdݜjw7~%P=QGb?MмYWQ aQ$Ẹm\$expt9Hʧ}HF0+'k`^;D\I\JӛA @&/#[hv;~$&{/|\l 2L,}!+QEMl۵k]1rLq\mh8pH~~qi;Eۿ&m(IGHUI/+"G ֏!/ ^5knr(Gc8&| 9jcw{NigfQUHƂz!ĩp8_KL|+udyjmv:BH1Dc]!^m8sjbb2 VԪ=f 8sS vSh.f&럒ti}+բcLZT{[j"%]3ӻzy8h" ^R)֣],<2%fh  ĴvsZ3ʨ]{ВJSL8a:ZC"1Jixx̑LאBD-PEPHsL@|jvΎDu$JEmΐY4]'IENDB`docker-1.10.3/docs/admin/b2d_volume_images/add_new_controller.png000066400000000000000000001064041267010174400250270ustar00rootroot00000000000000PNG  IHDRL/tPLTEmȳ.lֺݛ鵵ܲǫʁڹ~~~iii1j{{{eee'dRRREvrss^JVyyw@(znnn;qN~[IRVd[8]\\H N=NP0{.r@>BBB轡v:V2]m$iONNkG}_l/MTrg6q^^2{OlkЗGxqT+՘h‰>Q<䳋YUיƶr;uS+977SZ`r-`gں|ɚ=-YւnO׽4@^b 1\ϿqʥYpCŠ8>N԰[}"eu"#65x{[}κHJSğxծnIDATxUkGЇv6WMos4]񐴴'Gr/yB(luOCE0]c8H&ĖHrjkb>HϸVF`|> "q ڿ_ο^q୫?94S1}}nޙ+.lZZ^ J|܌|/2v=BNmD\9{s`?8Fu&ݼ,32wʹJFΣ.1S9'.~s``0fw8n`a(Aܿ'@{]G_1|ޅ趾[W*W]mK7.M Zq?L03Н󋿜}`a;x;_ HNh܁ N{Ӡt58c͢/1-< s;8w)f20qf]Dm(C|zK3/wP`x+$pF!v-n|? ᡃ h0.Ŭ5i7 #1gPnJ ~uD x`:j H'BJG7;XQıl;Ҩi]t4_GE޳4j5WVV.n)*.U5-+IRCQdC;ۓ$lR[X*&w=i? ' N% D`68n[+:j{n n'X5K'UUIJܪ븃^um-Nt[}ܔz%i-?HO#&ڭL3{4"HI Vqr {X8MCΰ6=h0 sI}m_ "ELڌ~ڋ;fԝR&Ъ7 YoiY?e\7{~>.%~V|VjWHU(ʝ aq7l8~;Js/ۺZyyUr)lh5HH (S͉ F9,KnxxHe¯%R807.{@W{ ;Wo="H*s:-]oRH,%2bl8t% (Q ԋ&*FRش?%N]5VqqZƠ@0-i@MЭ&pewnP )m,/r2/rVDKWP({a*AS|7m-ydmX!>_.-2f*awp#sŘ9R=Õ.+@ afR*ma븵zoR{e5.שekfļlL/C%Z;UKoo"hMݏ.j{sт|LWzcdTӷٕW~?*?&>,0n}V3):/A&İu\>VryKEK!mozz+Gt1/\:Xce26qΥ@D GU׿̀oNѸQ9Qj I̶%b#H3CysV_f|=I5R竂l'2hφ"Rd6dopB= =Jq%^(Sv O(|_xQ9nl5=1siJJ!Nאs䘐!4%Jq*~Di\G?!N@#!(C$jEƢfïfԨxץw7XRy$&YmPC3J : kY_c $b'G ]['fK 6yI;.;>kgU NœNppQ]Z -NZki,.މ ,sЈW _Xu퍡 #Bɸe [T)$@M.G"of\Lf тq?ɷLiȲ?4U=%z3}Ӂ.ׇRT96u[[RxK2ݸvʙ|iv%&cxli0΅e'\́@v.,J(ù)碾ѿ<ﳖXStڛhֻHf" a4teC yz].IC%J#MĥZvgVjC~^/;CkxQ5[]X]ojZ1Pxӗ7ŕ17V]* ,w4g~"\- 2\uR'7o8߻Q9\RE LSlOfg~5ML!H/=; n"͟0x hޟrգŞ[ǵ#h5pL&&Zv Z+.*Qn>tʄ 4MR[[Ծ\z@׿K'PțQŇn\Y@#Ez%Tɯ{Ƹ6i7ST`P ?D?OF"IlمZfZ^(gz`R_eA7jn:]Ji MWVwv /;w{Z'o̐7߫~;_nLL\Zyֶ_MCCl>LԲمSG|C 5X E_$ubfzl0M~rTQbf€C fm 0BuLAMcn~^},nJ٩i>Ң*38q~ohnI׵;~K+?evNc8?++@9a|.,,:;R2DNSIZT<| ^ZqْFid _ȃmZGfؘD:L@|f]Y#Xۓ1M(Nvcn6ScYo~ȑ7g/<]\2cvYsfGxhޤ M-){ ?uw?ĿD'8 f]tf4-L-K4ɱ q@*3(?&( E}}Ңgo⦥rg6;=Sl&2n~L;H>n$=+3IPdݸ}|pM7xUk!{{ןnH8$f*xsG|XnpbsL$UghL33b&I#fZE,~j. #aCC[eiV٥4AE"=7?B[irGUh! ~qwsR4bHpVFمB^ܴ4L~ϖV;c߻*k{oL/IfZwf~C C0^m܋W^P( IzLq;"GEcѾ-Ly!o [孼vʆH;\>-L|}+K}5@d/f:,Ƀ]3,AŅ؎fjh0m]}g]w f`E5:&P9ixGG.-+lmYH t`lv~'_.p ,%f5 ]<퍍 (SAa^R N]}S67O,M Uhě{gww#S K!8LmH!,_<<<\}/g0SYj rWOVZle|vbb``/j6سeV[^.O}P[|9_ބ^/K2-w&XB7_~(3}2!Ŏqj>8?7O4xZ?)|t|n=-߽|ODA:M&I(^zxLˆKHG2&3S0f +G<i͐V'ibȌGl}0;,*+j%3oҌoY;PiPWԢX$yϧr'3=*&*i,R/zi.ܴyl C՟#̴Q,߳z IFE$H:U 3jz'}/@"Q$/\3?蝵juKg'oLC?.|fjR=%߽07 '/~KRdłu#H4ɼLϢgMaQ#А1-֯iA诧du"Dÿqw_~zogwv#;K%s\pLhD65d[(HqEn~g3=xL2Y*Yd*$&f[3Yk`0pgqI d<-߽0dK[YD:< \=j脆I1!D-h)5d j֗40B)Ihh(2㰘eVm`J_epulttt{v;vw7v71S4 o\Le2a5纠1vHX#|m n@p2c3`&gww$%WE7`N#xOn_q_}>}dŌ 3 ݴ϶ͨǜ-~Z4_jB 4#cУ'/@1:|]Cfff3P"Jrc 9|L.LD+>qlщVհ3mɱ6]~[V{)Qfl"m衑=Eq4N#XNO㥣\TvdHgL|^i0YXtH३n.NF%Dj8 ۴b&zIPaHfFj ;'j6O`()UҼE'vKCN|w̤ꟈф)EB s.aQLyh&P2_,JIe&'7_3槴M3/|Z<\#bK7}p&4h ^H %b/L 6xTB:/#dɟTy$cF̴-/Aor'+t551fz/9fʉzX3`];DߙRѨ$% V)i5V奯30 WhŝɛQ]WdX,-P"Qbj%:`Z"٥ E~I?=UTz쨨uw<-t~9ZkZ?6pCT2>7I;Lͧďw-~& s/?Y>ݨu@D'N3iO({|ĿUkB^jYBu~K*)\<,)EShO~j$ujnf7+&8,(5-2E`m\%qOyy(#׷v u{(n+~9?\ɟ//s7Q }/_[t>2+$򕨩gP+@d zy:߻U. F_?Zx`f??fR2[m&0kz!P)5⪩XϵF߸Ufjx$ɛi?Iºf\./S­s!? 3rYlfC!SkFfD`vTMiL XF9섀E2On W+dbnV eARH`2>&^wki5&+[jU^$ O|||>D^Zхf/,_n?=?[ok׌ΔNKnG R ݲ'Wx3SH$Akv̈`c`|ԗ [Tit 3ieK+e&[<5+8>o#zW[>ʺe7zQoIKc#XVGc/xY1>hBOğda&h!猇=bɘhM4Ĵ1x _SNic ,E83YsNkhgS,MO?,/ͨ'*-HU%53Ꮰ%x+U4+o,bD="/͡}~zCTι}USGiaSW+?YjEQ(iZegѥwĶr>.b͊j;bF1ZM_$-5̈́SbE]%%Ӆ)ZٙWpb&Q^!FPLa ]Bg䏎-f.[E`Gw󊆏Mx ւcmŶ#(kIh*)Nd 3'~ץG/]<˃ 3X0Pz/D1VVB#HԸZULwQ >"_!?Eu ̱'9O D?!34.fɍ ٪Nk%;>v_}G}R(!bˉo pV-lzg6^)N@XZs3y /QW%#&$+wp݅+|:ŊԚyi )l^I1mjWO1@2IàҴXM y47?I3,cY1"WF6 ת)4R3О`S\Jf$+L}k6̤%W!,ot5=͔>[>26RtcT`aX&uCe6G}%s%H DÀN,җ%O Pha PX:d +nG< `'$ڰJǜ^ob$wgRX*Uܿc???㫤az+~=|aCxf?' #@e q1_? Q^d&UuRdhYҟsHh(7@b?FE~ qW),\f۷`Ү_?׷:DLB]f(Glak9Z&8 KDV4*:e6C;.Q4Vk5`}'yuJ<ֳܦ__ݎvk jk)qM^fڢ ,1;Xx<6WW%lݪv 7*OUX3bBCB_֫ Pbw%Mot;b}$ykZ~90$q8 1i%# 8O>]4DDUAqJ(lЯYpcY/&V8pd?[áVPdSuǢ|[X?Ʃm-Y6Q ??t-v ~ fo9 SpLHR|DcjhlެVHTwV"cAmaZ آpK" 5"tGeZ9>/*ņֹJP2iGSu Gbv7|lރMrLr9y"qCL!b.B#fj&,":!~ ~,s|P5Bv%RaG\0>DwJB9Je@wxS ],Pr8<9>gdmQ ơ `Jn %3[ȼkSh= Vxh=}ov;,IYJKYEr* +&3Za0$H,ޫĈ&|`8zSeF]x*S!H#4 B7|K9MC ׋'!d"Z= FR8'}ג爱P NX$&IK"A~Sհԓs:2Zc=P_<B7Ͽ9(1-OQ^ҥzL>}/+$R%Mm`0P\Zշ0wX1)/ 6*8JuJIT<mP|+NCs%ѭ3LrQ8Nr _*Xޒ>pPPKE*n,:1 >[o/-Y{J 1XIR{E6:DEñ4%8AV+BR`M +>iM֯"s agN?{#̤ٺW|lS_֔H vK(}A2Hk&w jJduP&r|8Kd`2QirҴ u-J@s99ٜ)>Tx|c?!NR2qL.Ӽ˴$gy$gGdpL.Os.ep~|- .^e2^&d: pKf7;d#^sqihWeڱC8y&Ԟ"l#tJJL-BtDwuMso!, POCim2A/ڠ>ʦ,+(˯Ubs%?45 *Ho`#^j 8Ll2A>Lur %eLdk|gn¶gLV91Ivs K>6eX3pFclPfee}ueO(Eia1O@clӏة@6?0qYmanɘ睂 {22߰me2L[tYv8>tuv֠L>Ӑ/6DPU&n0,f#ӪHFק59 .͡L+ :Yf#+?v)PQBO٫WơR Y&9r2PW&q2;drC09d❊!@Lr`h̻_?(_K2Ս*cs26oB}4nt5e%J^ Cz ۠rY3]Lr dzT|RۿΐerЕ+)He0vȠ\3 S1kdO>uQid2)bsqJduZ[i(٢@0AжLkSdnYP8!!,gi-j >*ʤ ̂7o*XRc1)c$$dBԦkw0xgb1)c al[5Ls," lȄ6Q|~'-L 772qqdBV:CcPԑhU/E6B˔|8\&.ˤ9{.ӏ)r.g&.L\{)3kjje ɝxfjL eRi+k8 \{03)ъkM*Sxhѡq_[5e♩r^.GC}hK6OxQ*Ȩ_,Lub9Zߟ]t X'05Rޝ=bXYg2-?,h `<ptz?.f*0> <(֢6Vz~p/rS`ʴh:AkXhI,G8ƲdZ#0YxCo]_#]K?w CV*Sz{a'w~Ie*<BȈK0a`ה)]0?ezqAn.0Z鏞B].ЊLUW?z[/I:A֪dM435b9UPTEƇ&J.B8FGͻEp3ڋ+h PN 1@S@#2M&ضHLgX%_oi'+&aTht|JMvf $>`Ɇ[L:,/(:B=Q@nIKhhz  8wTt Ol)A֯f}?ʻ,k&[_F i_{,2^t]4 w_fWT!>G) `p6ͭӛ 4$S 4m'_ٔ5 럓Eeʢ[Gi}0f >R LܛKe,2rf:a*p@GM`󙭔d|(M(aGtTrb2NKhM& ?7>ָ-uP\ ?'7DRxFM2mۖ%.gsf,Gs-umùE,S)l'9Găb4o3|PF.idӂL8=g* oLqT}u`sʄ=6ilߔL*oBs( |&B;W/c*2Mf{Y2Z"6U3I.O&nk䲋oA&D ݵ84*D6hg) eZj̔~GK&N% C9'a&ml32!uB"aQ2M7))2Bx/ [/ ~e|ڋywiAe:\L\n/=ҭ$>AP*Ӗ1<3%.S&FY%EqY 8bec2l/;^LLsgl>FRS&L*< bRlsIw%6-6*@ ۪\N2=7ħ99P2 < *d*Vb D>ҬL/LnnF2@Cΐ}2u M_r_/ DWU^?,㊩lO+SW&l=)ͅLe` tC(n/8b@9픆ؚɄB=.n7w`)æ}+k.QqJj|~pQrXώdxs)}~wS>ݬ^Gj2%nN.ؔ5PK2CHHQ!gpLIK2m\&~eØ؟7Ȕ9DN,cS"Lڔi JUdJbݜT"')ߤ%4*ӂW7>fo|U兂I%rRMl2ʲLd.c%=P߰D ɫJ(b#iYZʳDsdQݜT"7]FɫJ26E2qK26r˔`ݜT"'AX&L\ؔ!.qf/V&MKe2=qr?))pON2pL6| .nof{r$w2>8`$znG;T!?Q!> Uo"4Od*`2`m=qHX7'Qt.?բi^TRdoyy(_E,$Xf -ZDLG&Ӕ Lm^LtHN|oH2ᶎc-Z$ ؑyhXG 2i\;wMN褓\΃ /{s E:׿c3{!L ~9"cgi-Z\CG;CFlΜ5yLi׮I]nr1&稍5l]q_Ei62I;FvX+iaV~6eҺLoSn(i=tc^,J,@-8/NTd2v2i\m%ݐu%L8l8Y&s͏AܢEqv,3y|>d*T[*rz96n (\mhWm~_eZt fY;h9ao46wJ2_8n%_ R1Mn `Г¶A% Xiq?2yYfd/,^]* jEZhx6~GG\ذWYsT4<û&2 s0O,aa*סNANiUCzIv(h*f,C]s\ߖDߎ YBhʁM= 4Ў:)M00]퀡i:4PG. L#<nO}ږY}4ct%&V2GRGKu}if=:çCbG)2/S&{ajFfCݏu*`@*D+`>P%0+puT)LizZD{cnig>W6y.ʧϔ'0i<äcn4 6 r|<ě>x 05gajxR/wEgo4=-t ӝk=;0I>= ;ʫR)09,^J or)U:@`ږ?U”fbua|vX]@90M+9zlί^FV"|GV2L{tkoNaJr+az]6ai[J~p7mmmإil30` 8m&z LizZR0*LF#5_^GeS"āPñ=;V T`hO߀ɤ_ardE SB00c vd(<@l0hcS$kI% /Oe],-hE[T# 3s'1<_sCVb_Ӣ{ksg&>LL4dw9{K,lL2z z`2roϮ>y0nS0?V-3'j~wfӺ6B}]>]ϹfIcDדiNm䗸r|ff6'W؄î s*O3mLoS#.#|g/c&eF~"gdV_=Qu%b}'ZjVAD4+Yl7z$!tz=4jCSYZ.ƚq_[Mۛ*Ԕ & D\j$K#UT )14*2GjN}tV06&E{(}6"&$_ $kmHtzj-.)Dt N#AE~䆇î^P$:q0Рٳq;(RHņSֶ0]uQ:]#-q%e$!$N$`q%&bf<=NOP& RVg/ hsk$%MHFWز|hny1) BeULUDP~&<7Sy W$ A0N]0}ri)q\XLٶk> )~niJrSD]9a$:$l94#u0 y6CT=Gά˭UH]lM,caEkW躶ᡮqd iJ#|MB󳹔%u3}`zlm?#Q* T UՔ[",`N5qM$uHmUz-v[ 4֎lzL$iL3BԈp64-ĆL%ity),^jNJ!(µ-v\ܴ͐]\_Z̵_JVUxC֤t[/!plG K it`Iʾkvb5!0P`D[:lmb`zhċ|Ѵ 'mI~dLSW[{dMo `!0{_[lXn oΖKw[,ȖKO쵷nSbWn'fB `z [E.p&ի]LYG% `#q]:n0L(割0L(?Lӟ)ʱGӿߢtr7鯑OOqx߅&N 54&C'Ǡ,H;wk04&0J%cȖfa`Jm orK'C (C*W LSE[M6h+ώf|h(jI\t7kt6AEvf;,2 ݼ'F!TC)KK}si"Yj:TVTc%8{KEX6Ù: *ΟpN.-+kdTd6UW|SdTT̺LL-ٚgʄ.O}}}kTMh.(XAj-٨DIp^' [L7ioR:8 Z;$:]#bw٭m}Z<ghb+;&`y{.`7w ܟ BB\J^}ԭI u_И X|(lROIJoFV+&]c1](_ H!E}2 qsXGe0wX2}RvAjMLG:݄̐@:V&@ #aM5cn 3 {k} %Bbqp͏V{9]&qʕ?6y-Ҿ$L M xB f: H7ljH!gCB{RIH%Hb;LRFP;R6O>7e)gh2̐a46n:!.ZOGO:FVJJ/,l5x3,$ĬEi6;RtTb1xr49o{4ДILjMp ^U XF&D͛M#B&<2Uvyrpx^IeH`;ۿLBίiJ<4LLVc]`Hb4N |2'9oVB1&;Rt[ $.Ꚁ–FX`6oꌭ֣IX;F~cdxRnL@EooI)ۀ-NT%Iz[g,22J{{rzp ) D߰3PGw&ū~.nZ7kp22${(􍪂= AU}I ɠsPU7%LU&]=eA-."CIIdGee"EF.eeZTpw0d?L⼀KdqDr28e?¥p9yCċKdzq~Gee+H&k**b5!N!gk2?W&qNC&.SqJ!e`Wb2R!lA0_~@&$ӟ[&Pi !tDY4t]vx`ͶOs29qSAL]A]䢺L.(\x*%d.yAY$P&GP 2347.W͜/ƹBc~ 2Ab,kt \&[&?>s@" "Q0hwnLR~ rl$ oy̝Cgق 'c mAԷP1ye24Δw[e, ݋ҲL#=9._&Z&`8<ʚU$e*G㟋ܟK7pWL#\ݰꈀ^⃗JL#J˼T&p xx 00Eqe:KYc8.|8p"H$L D" \ `e+'lԢ&^?}09-e7dW5ddU3M=bZN=X93t(2٪HTNdl)'CK>,f!J,0&hd%DI˯Q9Sp^Q&ɄtX5cEp̑ܟM'z_r\WcxL%*iv7,/0L\acvrh&lEi' 4eD!4dPBdPAL\ٸejya>zi7Caoys9sNHȑPr}:d')mջ&f4daU3oj&pxZ"bf};7 SU '^2ĕ+F^uha>^[{PmgC(:T@C{ RӀ}=0L~-ٕaJG8ZZn[h,liĩ<֢+gՎr8"Bɒ4];BQ*9rŇiϻ\` {wO`L@ؙZ0<ϑ&L0;)B|E\9m`ra(Шq.byH) sXU/p.I%L'~@W51Xjby{L"tGTd ƿ!U_3AnCK&p&+Ԓ\ R*9O%NV0h]f"B7[A)l Uxqe֯GT]+>LR S k8&Etd"Qiil_I}YΩҴ_G*4t`S&-{hxUwk>0Y"Znh A䲄ҋg.Kq EXΉ6!%ⰷm}D\\J,u(8siJajC1H4,x2L=( &J߃NnN ܖ, &ZD#i)ЂW )r \`2hZ0<, _E]Rw}\V=ƪdրI}ݵ_Ą0ȧ> GH)L n4L!&J& nNalz* TINiIRKhvdx0@ L ҙI&!$ j:j;5RG(] Ӈ&Y6Li8 "t[3!H>#&9"V+Hp6șG'6lp +nW@'w=!Z}j@S_/&(P_"T i P}tIsF;U$։b|L;y$4QP9d+]鐞3m/'{ Yyrm$0KG}zQqN0tþ!` LFiH>LQҗeuȴ+\C># \:6MrDm+ʫ`ⷖlۼ?Ϥ`@5pR ӁPmUZ nENV36t`ZB# \ ^3қ$,FeTSTl"d*'4lr慉yw?Dn~ fF1 hz 2rB ߭  v P]dz S3?y0LPwDž;ɧ61L=:3bdښNG/a1v#Z/½Ζ! (2L& r('V*Jy3!% NtoDmb>5<-$fEhڈmN^ LQ0yܸ"ә=EEE_%&ӛEUQaћ1sv6+HȔk)1ob;I?a;ҝj} 0FOe, 3}Gu߲٤iH EVL`Jr7'3Pk#*6dF f3Tp<~݊'7 ҹx;[N0צa3 7#0e _i%,{̝U+oܪM"L@0Yބ:CVdV1QZ JVqs@{7$I-ZV' _ wÔICk@)U2nC~&2'dx6CG^I$aJô) Si0%E0aJONÔ)]JUVnIôqaRYIW&i bMl5>J(O#+La Cۺ5v*?H{ܫSf(0%ҩT6%l$zszN]FnUP+n1w#T (տ;-QdfRD<: '/㱘eYiD̍fSPn4G%41; _vl|2x뇨7o)}Bɮ1'>Jѿ;狔 &#, Pbxh(38;3&Ke`brzQٮMT0$BIh꒸y; *٥Vؕ4u?(',=v$, L`)90&-4(0{0e0ZQM:D(u]Kv3%].%US6edBєHc KI`uNRS;M)J0зưYwKK$~RyssszavuݩֽoJ&,Zz )͠d7Sܒݟ0Qk3C&͛BYJ庱 R-AmUaDnpԪ'$Y<,Ib LIp:LiL+jkt#RU5[| OCYUj@S'PӦ9eeb,e7ܤC֭_:{k,~}RO\ v_hoĥdWH`eU#&;ÔpuVh냹8n#ʹVŨn-)0ʅIjg=1E&5]l J܇7Ҫ,O.2*t (}w\{ݮ!\Kn?"7]̤jo] %Q蘏%<2SYeE,ЉdC l_JVZL"a+A@7Da s911La75 MDj40y7Ul LLo+ I,6iQN.3@(m~=u۷+6hYξ-&L'0ey(q+ 4?Lď¤U[[uѩֲp$v"$&ɦ) AVe SAe!U[e< \XRt5+ [vuu׀P!U^ǵ+tYd[ ^dׁv.M9.9M(7F0MbQ/:@+ %kt~+[V%%F&ͅ˓Ij-A*(yY ,L# 4Ir L `+8ZUtnWTkiӉPMv/q]]rRTG3`&ISbi: jcmd}Skk[Zom,d A]B@aJu# ? f# b=#;i ,Ɇ)pZR&NݗL%m:/4q_-t`ic3_O`ҽ^Ct{O̴bN8wNdo;nËoL20@H&:aJFՆD=֑|̘#`Y՚XdK>5/Ai IV$tฟ&`2l72o''/!`cLi5a{g67:ftz+_WխiJb罏b$z$FI8ahѣ^Z$; O@XҌ@ 7Z0 `|&eVjEcήDž!պbt2.&=8?W^:^*\y:s~%tPioTퟂ2sK_bݠ=k|E~ZtNϾKHDK\`C#Mnr$@A]Ɋ_0%xajYӺ +3m_᧨ պA׀Bi Sߗ:f^6L@܆$n NogZ?L x.mÆICӯ_L鷗04ORط Nwrh18)iX:tH#DNx"PLv+/㙩/eǬ`J@.@m joh:%In߻˜z?zLxu"?FlhiqI7q.#-KG i9Ocu+MO,O.䱃\jQ%7{gw70;#PsfۀAA04Wr`z*,tLɆKtZ-J(zvdFC~J0N{4}8px~#60: 7ZxMmN"g\6݅ajhOslb&\Pv!1{&iSA`{W#90MLo1 a\vfx8DYO73 Vq'Z, 7j4;C`cOhHPT52`O +q7TZXaq6 ʗk([Q,\DFpҟl駥l7h&=һ@ɥ6{yGLQ5h %0aMN8I0`rxEq;8չ?cE` %%b'O~'ZDAǻȰbqze(u|>sqrS݂"ltQ %-smeA1|0/j涆}ԿUN;J!p; }/SKhHd~x†z*~wHt!tq & U:S<^O^?^Y pD $^%_u;N'L'P]UP:xx ]4y[Ԁ5$+_i5h_vpG@L*Ff4 #ڵK53Av VZR 񋘉^P/t[qk@f}R`Zr ᴠSkh꫽pnF8@6X*CidwhH)5 Z(h<Štno0zR֗yy>[ӽ/Ɗ<rK [ၤx_,ݳ{k򯇆/Z$)JZPe])P$>`}i ɿ4Vkz*oXV$Ei79pE>E/hvqb^<TZYQ/qo't:svf(.XV I-S7 hW2q'Q4.YLmE mG ÐrftHr]NG$bOu@Ta[dQI5т05eO2u.(џ.M]UiWX9 T+DY0hrQ'8zM[@\AI.:,A"ID *(Bxo Ȝ>?JLz>?pfUuwZED-C` %N} -n~ÃJw'Şhg5fqSWPAJD$|urZK[6w_shz-Q 2A=R) #J y"RqNŸS7(k~C_96mz+m1֟v&CL `J&†Ҥ s,D։Y+ Xt8)jGeٻmo6&i6si헓VTF&?Lwnnv@y.:J|-4FFfKՂEqiGjL[gƴulj\R2DZK3d;hb9ڔ84ֶ}l0rފTJ{5g)5fФ}}?"^RfLN:ǡ;mC ۃhY Y\go_-No\DGB$qXI3[ՠ^Nܗ!)&sP:.˽='_ST/btA+bQS@ak*J4keI@M B۶I-:*E 1IЏ".lCtTN\woHϐq.UWgN*5eM`&` !̘z̕ tfZibamLĔfI)O|T[vV{HSb1IML/ON=MB?w`*&m:AjNĔbz'*551B^h6)-BaN|M%n҇ ܼOq -;AL7դ>VywUZP}26$IƢ[e7bbjZ&ٓPWWהC@Ti}KVL%(LT杲픐>c5ɶXLuxz9wku0dҟOQ 0+5s1RY:%/t1O017@9Ĕ"'a'%V$&T 1 @n)O"'J'ZqSdQ1e JXژͨȔ䀘VS%% `LM `LuʎUL!`Ly0&1u%d3pu crE?< !H_g8A O{ۈ5$gtcZ-B݄ܪkΎis3ƴ8 pvh(Q[ziߩTL!7fGRi1oTW55 5Lo[i;ӱrN?Ę1򷱘t9?r]? :nljAO;^\11v1yNJ8_ v8S􀲛p'SITa]s w|EG~_5쏻f/Dk8O aLb9ivGgJ?h w\dq9 =Jx2qSC#Y {N'ijr"vKaL1Zc27<@(WAaL@%1AocrQA_ %11IENDB`docker-1.10.3/docs/admin/b2d_volume_images/add_volume.png000066400000000000000000000734521267010174400233100ustar00rootroot00000000000000PNG  IHDRK&PLTE.lmԳؽŵɞ֚}}}RRRЯjjjT,gK3lJzgffCu;qpppT\3p%cd`\V^^^H>@sF}J=wzzyPPPTa]'w X]s8E|-ޠGKQv.wEBD2h;9:g:p.Pj.ny4sJ‰rle^mU|z,V]?Γ?f4˖P3L_g?;4Cau0TЕ7Q(Za3}v\2g÷LXƬõʟW~Jqd⒯N~./..p®}*KM)jHYfЗm}΃ysѬV97_ةf{T6$"pTrhS ˨e^Ŗ@ǙA{1>Z}<)QsIDATxVA#-W)d)5qb6%o%@PP]]f,jm6 $6gˈ/a3v` KzhFvHp{^WOQujJY5MdJR`!@IT IפpXY~_ה}'j_HEVJ m $VDI)FDd$%`=µ $fX| P&_0C4bLR+4_"HOI|I4 ~b"31H+j9t"țX^" =McK; fOzs%D>KsC )  q2N'fXk|~=!o0k~3Mo֧kE5D,IBl???;oyR`1}dBD%! 1/ĥ6ضr6F3?62>9p8G_q\|anq_Q4xAI ,CHv!ϚXr2e+`[y~lTUG Y/!گٟFS[}(%CW#H %Y :B%w,X@"nmY ms8-x使d<}Kg{F߳?D/XQo>E` ~]O!ȘN qZgjЬ΄1Y 4^-78φˬ=-㋬ ?ˬ(L#|) Dž{nx忔8i澵'NjUI4#%_=RɭԱބ,0%Ѭabm쾪|o+CBLkz6U)|+m>⠍WJ5mqŗم,{λ ͇~\rE޻š dL-77+H̥W0b1fgF :u4\pͤD2u-1 eIIoןg,x`Zp vnkp/EFjON݅FC!4dT~о:0?, 9XvM}6) j{ߖ:DwN. #ےclF/$b #;eٖ$mWO`hQ~cgr59k/}.t={'P:ʳ$ܦ7?q#+]Wdµwn[or"ۖ$TIq"yCj4dXWr8KXܸTF]$)& =w{鑨<__^ q6C^'!j*?wO@IIt/ E\&+DB13[)wRhJ[I*QTTݪ.FlZ]Yӈ'ti$Fn(ktW]VӎqQM;eDX@֑ qї;PͶ Ke0zD M,!cku_+oB˻TEֻc*G Q'0@5h&qK >S]b9v4BTWדFo_i[b(!}ALO;AP0NA;g?aحvD.y؂vP IAm6уr8Pyy}}IkܔĂ/ oC,'}z/^6]aVyeuPzk Sǀѥs9j׫sVPGY =%*p~`)Ӣ9-#Ĩzq?ZqF YUѩ =b #ۑ1+9iꐣkrF-gbT:Ͽ^\sۭГnM{Ґ\7}߿(v2K_ƹ0؍Am؅M`&,gtF‘PQd ԍ( ޵WJZBJ+oDQ G".'uσ:c2|~.xv7+㦛IEnAFUHKLBQ/~&8/mz&tssb![l j];{etw^<8WR͠a~&NNk.mrPn-L;vBJHScP̔/)L/(#¸J}v~kzV,o NIeuPE9陹'|Clƕ\?W%P[o겮$A(p-8+ >~ ,}E5` K'jy*UuT6m*_@?kFԑ; e IXd2._*_'3Kq{Ws$gT`[hɮ͙>mc#$Lq2ڪ,&G CoGX:K18Xmoo^ z'O6qB0S>pI>̔u̎G2~:L mZHC X/>.~zwȷx 9gЮgm#.dhަA)0j6~]|0qL&tK)#7{m(z8Ғ'鴖t:8j漕z666xP  vPe緼7{M19~Jg;iE 0H55۵7x]qUwcC|_ 4$ǡq7\i33‚Fz=/zy/ ih}8bıwHC/kZ@;έnZ fz_l40]FfYf dɠcjuBI H =6[xo,3%>Uo[\}R :]/hͤ&!G1> |l2Ga_yi䋮}DuI^lzafzɻ /"Ajm/=xd)z];*,2x3`p([JQ~;oݙ|n|ssmn| |8Bc>8p6[/CC8O^+-`/![(f7+%0.ǐg#dү'Nǹ|9WYx$^RQqWuMg|f~A)PِIC;%aoiIruYߖ`'̴}g wǦUAG[ySyb1P<Cd)',! ?%MD+)/! }DOxiRgf|K_x4?H`|2r6#q?$H?R Gb!:~vֽK|7E)4!3 ; U_Yy3ҾG\#9/9>cbA^Xbc hӝdtnGKlelX8Lxi ~,CK1"(9>SSK6/LR&Xm\Oo4ƣpw\_5OJ%m^1_A'Z nzH׈Cf- .&|fAB#Vj4Sy3%D$;^YuFFw_|yh#u>72I6 ѨL VVV(XiY)K?p1/c6,ʎ #^Z[[1&O*OK_?J9I9z`a%ugkQalCp$>}IFK75Ri8ϘD"j$}_6* УdaGwr})R%/h 5y~*Yig8|Y[(ǵR%'[wʷ?XA!0;ҠHA&ͦm%u_z￿Rg} Yf@Jŧ}gkimH#8ᏤAɋfQFD .{"T)՟@+{GKppNޫ%^zy##D=qs|㶕vVKfPBa7>N%g+lґV5m6g z&)G\m#)+4-xy܁KkDA+ tG4VŽ/ʏ*{C#Q޻/(V`~ET´ˊ!_=E^t/o~fdRTIĐ"E]Wm+YX9pf_zGҎNEp_|/{`:64c/^B2QVq8Ŗ6M% }Ϳz;tv6_,z@ZSzyf0p03 kO@W#P/ܣ Cx)+%҅=Tw_m硥AJY:%Xr&"(5`jl  txWߡ~TIݽ;]sm= Ot]P@Zc4t.Up@XKA zI+%SLDc5uAOKi9= ,=DOWb Qm_JAܖaF61֮v/׏:;(bdr/tvR|b PjC{}hwRY=g瀞;tͬn|\x)wNZ0B{8DK8!јœYDdaDb }K?AUd#hCcs|K/E)S' A@i8DRm~9{Y3HG/]^jq 5墩j7Qs wx;~ d2QkK0V.VZ+mnnսjD7`hIBԑV5mal826o=uҭ~6%t?ՂUxY ŕ[?RJ4ܗ4 Llzoo>A/Mos}scZu>8hH}׃v~<[( ^b~(3E_ӫx9o 鰚J8bn^r_|KȂ4bY]ݱ-&f45WtL Kh@$'?DB7l8%z^**"RFI7FGNK/U8ܭW6J|UDmߝ v㲲\:7M2(]wz;[aQ&3Nj?V\8='YV==[KiYȕYyjڅ\5mox"J1&'=5K2E6I"7F/AQI1g>v'|Q2FʚN^IqQxvD|D+&T¬o'P(8@97_V* Dw0B ߹fmN$'u4^g4t=" (Rng S? B @cX6y ^aV XV޾o+/[ .(woὅ.MnkY:XnkfW*ͮחm}.%>~ScҝB_G&noV2pkj*t7t;>R˲+L OG5U{2(󊔹$x=퐻䁋C.x܄VqTG~>{`,vnn_e{)Ũ&iqtP>rK2]5;iߏW.IiX#zkmZ~6,v"ֈH;Yqˉt ~ Y_lO-Uj=Ѷo?oV"h`)˲HY5LOL(H1, jlDӝ4UZp8f\dS㙆iҴFM?~'¿Y;Ft'9h^报.K,0!bH Kl=#ӆdbο_}!fր𿬒NeD=OY aXzƒVh HH>c3F󽖚&Z?fev1L0%5]Cmy'!P U& ]7˛khn'e7-_f no'ðc4rr/2ΎH:fjẑzvLon{Ps`F;޴" gL IAbsv}F42BKwz2DPg "]uM U {Y~G͸T-uҾڂrVܸWn{G۝l<Y׶Tcj!,E$3O@C Yj3OobL"/#%cC'q^0R-Ei3ϙ^{9u^͢d_?ƿU4ß;Lテ1RKk͚ CxS<^@lGbrtX p>epa1_omug`՘Q)GGb/+U.g>m7n /~3? q2!m0 ӃL ' e;/OWcYoLCrBO+hn|vo-ס 䓤_)) [xrr -g()"pܓJuf,K^id?Y^bHO7rԒ'|Ɓui_[/SV 1zx_F`$=KFCpT\O|iu.?)*u7<>h|)0y{ޜ8o7o퇓FwqUy~]omJρnqg( I?NO$a{bz_D6޾ޘZ1, yuu ^ Ūj|+*NF㋬ږzha,O~Q7Tӽ'o޵# 5'OLi^{I /k͛{9,8[/ pV,bn5 0#sn{)\z\=tIzj+tqu)hn^H4U^‘畩hNJr'\~qccFq!&VJQEAWRuRq EKWIV1cb`Wzsvݰr"f@\"8x.=kbjs<{MK"/_FGYVE8{<>I%1W/}Ƞr/˭q oZOn*} MSEO%瘯 N}:;5lj&2L`F9uom%esh߿t+dFڹ1>D|Rf/-SMRd_u$Lp#$*f (D&Zp(LRŠk'+1`@(,|c,RhKU# []BwްD!-@΄S?ze]Rqq^OPXڪu6@ Ãkm=oxⲫ8-bSD$;lR,p&>4I[Wn +] !/\;ᳶ{)W g=N]揶ʟzX^,1 Ų PqK3+Nl2omhxX1y٪fE_>]ICo=x΁0[vQ2B$ Gd]@4vƬ?r1։>K&SEO%-]aFS?6˨Fi!AJ:5Һ}u:gqX˟EǃFaU1~ko`7b3 Qiq7W 2j g-QvJئKs[zdW5t˺]gaG ~x^vKkîV%x=>h8܊s/n>ہZXo`8O'J'( 8-Y0C|㋬)ȏ *3ǧ=*\axM}?v ^;/??c]Ғ!+̗:X^Mg1ڴveog?mHgAP"U`a.;RPٕ`斳 #u0f:jeF~e\[Z9pQe/d9rrhB R $ w9~c̏;]?(C`~aL;L5V]&P_,4Z[E\tL굶NG!呃2KXiƤ  ,9q//e+.3OXvk i?aCŠT5'VO?5S37Wgw1ě|BSFCV6ʠmW'x^ \`-(=or28n>^Z"3_{q$bJzwnl| q;$> %,S^ ]'%|p4 ƟzKly +4+e+8<˃Ԉ Hd</E3J-DـX)O+ I' ImQӱRh EF/3 H=uôw)U8@o*]hN露RC.gF >IK45)&ݏH>8=?M5Ք,M5,M515j֥ɦl,! B[hsQMf_7ܾN"6ǟq['%?@bI~#t$'RXɛP3o×[Nz0Z- _K9l Ks1P Pm&P/Zfǔ+Ih t_aoaIdvcJL{E)K[֐*`@5Lhf!Yvd趂Y*3]sPuGU\>M 7ݣ->ei+vT%c^\8Aa+3tD6)qeY,΁`2dKzlUǃahh ֥,]O$TEX-Ͻ7u*5^AG G XL$$Dzt:e 4f[<"]suDHv @{sF:aR5eI tBK,m2 p8L`\6GH L'ȯAN'rT ːuZVPwW6w^#B\5xh5Cw[K"lL)KZ9X"(_nWFuȍ,6WqUES۫T,LNS[ϒ>:@U奫ҭ9y408wn߫’x],G>LKW>N_awBLTsKzb.vkUx l6 dUlË\<&&砅ukbۡC K qWtQ 8~7rgRܑ$o! ^C]eI]Eky?}Ԋ~-mI=s{7aiNu6`iTJm$Pz V*MZ wWu2gC@6Ç,NN 򣋒Cy/VDq+}ApDoGuWXRA,UC'&sgGկ6Ͼz=n@[$x껒2l&%iҢ? i2`NS?֤0o_Gb{si:^'T=e"&k79$(@ >@uWYRQ{&?ӅSh{n|_}r1ZcZ`i/ Xz1Ξfʸ$f6bI zQJYs%Ȇj=z Z&y w$$v, ɿ\ XڙLd":5yn07.@C%B ʒT-tL귗l䥗SOzjsӼx-?rě_yjEaIK=ʩ8}?,m{aךC,YH ٧Ç[XX!U5Q*a"Kc-1.R Gnt_!ԅ Qv}tSH:Y< .pn,~XJqAa 8rNܒ,erNR[dᦔKF-֥=ə'+ԢhGCk6H>1&zH#X?oR4/ݷ'’:ljm1a^,>?um.IDA _}N|UW8XieLO6HtPd6T[U^NNYVu]毟^F^H~'^ߞYԛwUȏz^e|=K CkY+°9cuhQVd6Tۀ#as}2u f-:qGQ+?)$c X]c+",@}*r%\ᓪʒҞ>]ݓ\#,^a+W_}!\HmD,IKX!'64eH+ڈ R Bk Reٰ,},-e9UMnb0~XM,}( K"&^?Ke)9,.n_:`e*kPl6&CR{v9F۝757#GOtĎSR0aoTc%LOVϸt>G[eSb8wB KNH9jW}KW9wcOȫŻ\SHc{o(E!~6JAX>ꎼ}p(PqGNGAtwAzk?b}:zl1 z"b&GEHlR\9MyGT|ױר ԟ>|vdZЮWP)0EGLP};hFh?쀻 W#0٘xKpa Cf3,fu'?֣O˰* tK!L}Ԟ(ЧaPD5˖S/&;:iػߘ7nvHkwWJٖQ+.qH1M:/!&a`v#C2C2C!qt^ +UKF+ '}#Z,mԘyw>ps')js,,1K]fիdaҟ ;Wd,emN7`6g%{3KҊ4fO[~_-1Kxf},1Kb垐1)-)))C;*Kh*6Rל (I*V@KuL%\X|{mY ޼K1I"t_(vXۖ"d{w-.̏p"#չZiEN9ȿ`U0%JiuۯO[4Ӯlno" 5jG)~￷cXy}R\l]{*S{\V޻]PڡGK[fP+Ϟ=[/ Of[]1 @ģO>AH.Ww[_ww x3-ux<&,vRAKY~k_8u1l:bO<H.6__v/JMvo,95ڈKݰ3BV%I,KPˉ8?vR 0%H`)ǴkٜM4~M>A 3Kfr2&_$aB|G2)w{rQTdo(k\ꤏq( ̴6C57%atKg^DG9 RK*I$|,67#fQ@| i.2&~ .!δ#K.z6e%WyTw>#k\ZB;yZeF-B3g&p 젖h,m%&f%$倳QD%iޙv$跷.rKx-,;/?M/Hez}rkh,4K7tYI.iyi^ѨB,ю$r\nз~7niZ:iYJiÕ /,Y M~ڹFt@fvsX^&ܬE]OH \SBدxK͒_i wY 5K#weo~_RUKq9Mf0 ZV=gR;ƶ$?Ә)fj~j qtU%x )Kl!#HU7v~_KÝ*O/?CKε_KaǛ(Igɯ) =#3i_zWߠѯnȿ5WubИ?+%ĄfJǷЍ_DU}WҽBƥK$S҅td3`jiG _2` y,1KKfZzYba^m*jyYJ2l^rd%cg%c;sgzgK-q&J[{fYZҫ;C-?h`ٟ -%j)ὴ7\k[͜sUdb$@q+ٖel{feK}l"_=L=AjkDKS-h,~X jċ޾b%W7,PK+[7Z֮neHu]䔥`{&_qk_ƣlZg@DK8}x߃hR Z*2ߤݰ0-=7b<v,X+ⴑ9КfT9ĒC5_q#:d1)/ħe O'?r~XCWVVnw%؊G3f;xNiÀ^щ9%fKC,%ڕ>ҖWKR?N`3yg-qFF(Mؿ6lZbR9#&5KwuK?pC%tgYl%u˛ugYz|麬-FKֺL-LHI%|,1K,1K,1K8beyu9̓~ko=KKң(nTO7n]GK24u8#X"k\ _eIγtiqF1*ãe FSH#j)33q(8RҖAŗO;_Rj޻%v"Pk (VZXvaEM],!kP6HT .Qnq< RZFÂ!],u "N^9(U҆'ڸwDZӥSP߃}D5`!WYSKS5*%f )|WZ~32`2N6DKZ9 rRzh閰zzYJR#T"cƅp-iŕ)KNůk\?ϋ}n#ҩl`> TCJ#WPO)_),+~Rmwn) sbqtAKW,|4t8t'0aUz ki\-1K W>ZLcfhӆ'a} :=ZZU5,jT楻_BK$P%܌yǣ ׽ 0|Khh%j)cʣ+nZJԪ9\Z%fn"z:TӌVp3󈉯=}M\2ןGy ã>qhLV^OUYWK̒C->GBf) K/J'5Y+=k ,%aiYyiCY/֣%fi}LBR%pӣ%fٍgE&˪Ey,0K,1K,0K,1K,-0Kt7QfYJ1`3fY1r'O~{%[rػ0pvtK1ɵ7=p׀ЮZ+FK`ЅE T*PK^_)tTM/tpbK<3Ȭ}~d m)?1uϰ$ؗ*XSyipjZSrk:7Rw`+ӸP;ʰLߋD%[r .jS,<^08R?RN"%Z,סSy'z!62KƶTT Ҋ \^{oYJJprڦ[ed&hEX/j^uK*5fЖv{ڎAK6XՌN鎌gvgbެ($r8j  w.An9+T=W}KXXXXXXr qQ9 9x[a)o~F= 1& 6#Q\7(OWޔr" o3;`f@:,ҜyǒIC \a۟mR%%<'89X t]?)ղt5v\E3 K2?eҔX&V;~3KIU8j(@d& EHW#Iψi;Ph{/YC͒-82`B֨!B3|ޘ%], gKײ4{ o|9kFA7 3)T;̒$Hw9y9;)Ce(K1k$2f\,Uܤ@e  b}o̒ }@n~KkbZ=#"`iesS*rOsc,3B<҄nUM~@FQYJTtc)粷{, IXoLebZO<іTNiew3uA ; fɼU(VE}Z®׍XJбǁO,kne),=Mù͟߇1_(Whbw" ͷwzmb`'=0cJԢ|Q,}qZ{V?n[ӛ,}'7X*Dr4"K~,-yKGU4}b{XdKs ъO+Eg%OՎ!bij#>YBuR;K֬Y2WֿPKEKײĿ%+aQp׊bgQMEō陥{6+\s7ܤsg5e:K%wy\Z}Cqh,|,%R;nB+#(h( [ڱtZ79%)ryfz452,?"hY?=,kbj .[,9s8CSbܦ %׌p%9-{׽ޜqIY"6ίdvYR7~2?Ybw=KK:bQWʿhSʂ*f>|tF;QΥ K"lfY&`&3)->OOd>KXy/0H4/%QJb0ҵ4)n]^?g~y.ҽZ-pvt4Tߐm˗/:^,!sq|^- ˺({¦Um5OQ B<;~,)&g Ru8 픭`5+-NeQ4}OVqCAKףO'Q$eֱ;j1&]&_l;َ$ά>4p6+ /1))Čq&eCOrF,աF]?D~xdSVkgi?wM/ ׾z2X A) .؋|Mw:utHbȎ6EV;MX vӜ[=Vo@FV` P?s_ LRYoNFIY@ r؁9֩FuYN&2a7a/啹[ߛCK //k%  v bO7(s!RV H۶Lz1d Jjqjk]YjE yAE*J,h{e]O NaR?K,AsNw]ߔ!2b88K)_4v[_?J,B1^^YAG>ѼS5{U. |1O,yPc?Qw{UV|6sgWK{$ALi$ fa4L܍6eCS5d mAz+%zmٙe2,Bw+ea_ya)[}ؑ;/ݼ{FUŏJNړk)Ǐ^f&FKl9RPK߼y: φCT5%ꊤ3EׁlW{fvܿ]w˷EVȎuvylD{ $Nc"zc|z nT%9ҪmJ34"ڔ`EUghƒ4,q 7c]h)3Gs/ H -CnՊ<ǥ{J_!g΋%oi [ d(s%i)y͈8 F3:^8-]h^)ݣraɵX~M =x(%0F 7]Ҳ|zLn':[1XKMRU%t],>Ƈ}<[KK-EXJ&yMsD$Y@O,ݳCnqK_;^cOK @[Ң-ZW=4ŒCv-"+oy)KR \KҒ$H9O%Zr-Y\K%גkIH^Xr-~\K J}J =o0 KeZ5S4{ [c3:BS3(q2dZz5,UTT8VXًװ%\֛TǔoK(k1GﶯK%gBDF`h[XO:c R6X)!=% [hT({Id,u(D)7GkM,Mmd5u gZ=/-|$J3QJ|')8!FC4Rfl`:4Kpà;/e=PeKjgSJgZЇAMC͠ݶId6AG fɓ\Kqғ>]YZ,aR lՅCJMAT?t4O-x+֠`NK)QOA'q3f [LUoɵTH<ĉ8{WYF j%$IFB_"˪ת89yn}\\n-9Ҡ G|s3 wF1mOL J;` [C,,B JO6H{/}VeB{ v%%5B.'Ej Tgb0ZQ3 V]+.U:(8uʕr`%KH@^'"Cg+pIvb.4hu0;pťrJnKʩr~孷1]+xvDy1W%'<8x?W]*/&iy}_]jh:9D£p0JI >M~9Yjnl/3|D^?Ku dG:1-}.gޞ3s1F@wr. / z)_*y<ťmڳ{{#;$K_ӂ]a}f̄=K57]MڹNdn z4˶IQ !"aߵz?>㦰;;FC4^S?B0mhfA`?dttAb})i&~^%*UVi&x> Dh4aб!ΛE :v :mŎca&6Ha\EϝLםJ.`moCTKq&hMƥDoQ0+.*K;]WP|a48݌]HTaK-!jJm%{ۼ](YcĪycqk~ռ_/hO<]nC󑽎0,6ьzy~Z|S_r:m.I,//~D/~%x_4׫In`\J'd~R] )D<[kmU?!Kac:gގF_F'(ґͥ*,I!d*y:5-y=& 4vM.Z&E{%",j~"HVM$Zqe |QA:D]O{QhM,=Gh6eL0/$i:Nt:~xAKUA%:,^sd%\*~lC݁?ti ~/NJ0_GP)q%%!+@J(_qkBSQUBVϕrQe^~T$IEk#)g$cQ*QDtC aS"QNRoǿG.vEYXP=t”Zq{8>eV_jkgZj2*D)&RҌG'm1<>fK@R+ BER𸧼 ~3MRig R)USi% &)KzsankO +֡w;Ƞ7&LA%Fzb࢔WY傚H9Eb3c0T +w|$8qi6NglR_HsAzdH/䂴gr _3 xI*8l0ڥ a !xw$ d<.BR΢(bVq,zWpS'C0:@W_t*wN0_\-Y "V~JT)hH>HZ} `s12sRuﰆ,aVY<4bvۇM>?IENDB`docker-1.10.3/docs/admin/b2d_volume_images/boot_order.png000066400000000000000000000674201267010174400233250ustar00rootroot00000000000000PNG  IHDRIXuPLTE.lȷ󻻻齽žތί٪Ĵ뛿jjjv☘ɡtttíS2n```Iggfܻ9ppppIw;tϿ5775A^tEv+)Ru򆡜ƿ%nTYRtg~&6hp8vJfe@=<Ŭ 0V^tV۷z򍫔szHƧAպթТf̈ܘY9ތd/ZȫAxֻ%*3Tgo՗&kxd77=Z/l:;՛uN"kIDATxk#ǻL5saR^"HI tU %rS,9 6m1 ‚FfY"|_.2 xޏU롶\˳m۳=Zl8頕:|j3OdGF G^9X{GgYmsF:2I16 s,u8=<۰2)9l&{ay0}EJu5G*e;X,dа|Øv求Jaq!YE5q%LWNۙ2H7#!.2osƷ8:iM~rsv]dkQtfQ(>X\şQLCQ!Gp1i`\rӜOek价 q߉>~:7_`:hشh`!-tY}RhO&$hزwY?,{;yAō߇_~ew~Һwudn"eBF`#OXrЖ@Fu.=<|;AbfWQ%Q4t]>8Js̒T$onŧ&Q$!3l׎m00V](ڞhG 7 )NG(V{(ZoA4 @syŅda*#mJo$5MddaNLoBfŌSgwğF}7xssěb?8r?$!'ip. Ri>:Rq\\~ZE C)ݣ^1ldbohx4W[2P2NE;( spR> .лT\CAZMAe+{ ;JZ8=3*PQ`f{6~>t_gg/׼/?qo*GI Zg_<{vk'I2iRȍ^Mg'-h,HYpЁ,+s:NdУTi!0>&)n5MZto1^`ӓ,h AIko%R/4>^xO )+;)%mg$u͚< 5˜╟enrjhʪ͊[2̧ۖQ4IIUN)NV$y>%ŻxuJ}?%^_%[\$YC/8LDuM{X~/+^e]uUj=I2_oQT kRLN (() >8zFj,>8=]>tS]uo& |_cVF_xnê۟]}2W-w?[~' BaD:IE/v6]w{?I'IR?/0ovm(vJj+J31IJVZ8C>h~o)'f3 "??cf"7|<Sƅ C_ӵby@|&˨z;>!ÙL]pd-lvWр;As!s|~W.5];4'ͱCݚz!~!A;NsLp÷^- 9 Pi}sss~/5fvKƫ3ٗ jZڮ P;)X 6@R8ߣ_*:_.( ?}ph_׍@wx`;4ſr-bd  Ns,r1">IǥF=U*\qRkRMɮl +%IZxN`/wKo?\^Mu~??[v@+Ab :W4m IMؤe*^t'MX) ش?1/sTmJ;]X}+b ~V_R/Gb%ǭE!NO%3uvXiI-VJ&r`eqRz$%nF{y1sA83rxl!i|))|rN|ӣtJ.*9R#77N)X8lWG3iSFDaF#dhrgVr.|ŷ6(|~k;Y rWffz2+\dJ҃d8#ZFA Lfr  '_- כ-՟nֱ~2J%Xi+Sz#{S|k]ڔb.AH;'-nwXSΗޥ(WVtn(7n1z(:ir+qh!#N6NSNB0(9N_IE' 'Ej/};ҽޥ [i_\A8 ^:jca2o6y~DzHK]Qm%qX}|lZUUn& __?j=! 褫p /skfe&zZ4_ % HmKڟA{bny{{g"'zL -vz-&W&LC+{{McBy"V dVÇt҉#I __?}z=n#b<[8qE! [¹*2ZMߦ4tlI71$K!_p7fi|5;喼/oX(6vv*-L+n&j(V6u^d$G@zmIM').u¦0iV:=9*<X8I_.,a ҝ2=Nbcc%~8B߬IfNJ1ȱ~ +7ܹ!\-vby:j#BG [[ [ziHiއlӰ{~(SO/~=#Iӓݿ6d!VN- +IIկ|D8a caE'=[4BDÌ&4:x^IlpaN*LE ^߾<6lcT@aZ\{WYyw|ș1]Jv|(,Ja.j/igh=d$)=|:tkbN,AH4a Vʟn:~Im}r^e8)1rt<]CvID[:>(?7s;'0ybtPqqKdI \~Vuj-Ϡb,W!:`lq}OhƑۉv`sXV}aNTߗ߃ݖcYadlB+}+  .LB&Njw'@-I AX,愘᷿=Dqp8Ȱ-vN7_yO껕V3O' # )BX\~,TkT̸J1H67HKWIkĄ msp>#=8282NP&=ߟ߃Mb m;ՕMd0HͧQ#q9c\u4~ۜjVښ&𱦓NoUJ/ Z:^@TǾ@gD#-lB)KR=솢q|VaHf~FwA_+wWO0hsl31;ՔΦ, VB`j4_C@2NDQ'y Fɾ5D˾BB2I}ѳ=LAo9U[tç$bR?Gv0N"Cȭtb2#!H͇r~~ު6g~"F4'>V{gPW"IAb DxxLQ:J!Mb-8N2qxqm$tHWrvoOLO L$IơOzg#"qf2SM4 n2{ɱl`#E>n߾Uʙ{Mih VPZAb H³F4HE Y*Z{s]{kt\t[o_|Q61&Ok=1HJ1c޳3J4! 2c,s8T;-ː;J ⟞n:̈c~o )ȩV: /.@~>sxmwgu:+d`I߿hJTT&=?>+||6:[rPt$o?~t+y(;T8ư "LJﲯ>)؄.f#d`@]#* C7ù@z>z&#Nj? Łaj}(!wGU m|HO~6i? _]Ha"F@sp:b˜gmQQ7xmlYA7nq"l ܃!񓋝&/;sr(aX68iű+PIii:÷d >%WF0`ӅחTFvMW hGp؝y$pwH1\+ZŌT`kkhlIji I0#ex&Uؔ CCL+dЎ[@ϤhzSIc[8W^?T]EWU,K5#]taވIH`J8\UZ+}ϡnZ6ӄ+gŷg5.[*pɒ<  ţlʿwM Bd^%86du R,Z|AM€^lۂeHL ,0aO+6_;QW0C)%@crL | f8MmN|N#`:;w#X0b2K0u7A3T}5ubfŮ ]`YDoFik|B3vق@:0=9Jz~˲ V؏%|(7@.|_5>W8ǀ5.|`rt{`;wK6yD 傆g]i](Wlj9k| VzMO1$woTvba.?n>Ǘ#-&:J֪Çt>Nň%&0淑'Mh30?ZdR EC߷~_|CT2+PCy*qnI\@b Uq L9^ϓz=n~JXG]i~Np}#.7yQ[f{ݥmɰ5yԩ]J;<_bio1)ݼM<8^g Ϳ+֖Qe2%i\WrDJr)I8PZnpf˲B~<ғ}{vPIbGLj Y+et[m.a$Z&e egXZґp_^LZ~?矯6'Pvb۽^odfRdB%>xw{3gÛٙapb(?Кz\+uB)VXo줝(R^ؑz*=xzȯU;Lcnn'ig=公4+IQNm>j{QYѓbܗ+3#Rr/#a˅*ww;-!htnx?ŋכH%oY]*a5 XVW:utqЙm͉or^~lMnTe $U)7W`g7Z <\>=7 ;& S|ь so0_ F=~W\IGsaDNPu{~[>wdR*N;$l߁([Mѹu6W6:\:jeͨ<~ }s2WudYRlzz "͌*\\;r+r<[1mÐ 8;["eit7^Tfc\"\IVU8y,T, uS;]WTQmPYwQdqRo%F#?\#nh+ua"&*Wp%ŀSKoPvC滋6qրξ 8rWMї2ANS|?[2 By(H%70( T{J U9$~޵WW"k 7ZqZ)'3Y(ęroTs4 l.;\Y]޸ɳØ^ 3,|B~.@q%IEIyEiST|?]^%u v+ $ǂr`B JBP(5 D#܌?+}d<"%.||=V5a(iޓF.@P*4ĂMx \ErxSD04J Gұ`0pfab>0d(%&fU( U`֤al৒f`_ B#E)(2x>Pj0E 05t9mI(> h0(R܄>?#ħpdCDR` 9˜ڧʷ>Osn-E |juI67fwF@PQ˗i=eK|uk>Gg%Ƣǻu|( 1{o1n#9?a?/hG{ 6Qm{¬6#NXUzfKOs~ƦCUydگ2շks\}uO?{D0n}tvoA$5)E'PDN9"u맇?uðOƞWtT]o|cL#b? j- LM4ɜu%zl!aDּ]_{ l9zh X_Qm EZQFs?淖 TI峚1Q'RH8iD _IKa9g̞?TGc 'ee&*N?eKY' }nȨVMh;r9aq%9n|ѝLt?lAAE7KLy r4QNQ'dշu>KG#iڹzsmxC8?z%z~h"qu)0SODm>j9*IL!)x(q>xWb ;j>X W.*+ru#|%\wWUZDiF12KGwvԓM=u* q4Eʤ KUs=;QvqO8zG+oߢE;}5bZFTO<+Yӯ@kv0jľO/OO#Bû7{uphآNKƢ #d`쨊SXUE&ų:MA$T ɑ$ȁB|I60bWtDgv38 { 3& ?Պ       Z|\;#-UIBp6يեZTJjvsSjy R@6Ttk8H:$s->]Yy7oT K!"EA;E%-h=cNf9ˤ֦o~?7{\?n2ձ)&10c%>J&!٪`o+RNMC*w>'dR$&âgXLX-ؔ"b-I3ɐTߨ!1mTd#^p~u>~Ye>3L"`R'\0&AF/Xq"`|aTʏ-IYeh/Y_!$>D(:.ܻ Νy p])K4Y g5BۿE$_/çCt`d__1L5&Fe#!VvkZzoC} ׮8q{2-;Ӥ ¤FBp6F]!;1n+{nZ|_Kl3I"|wHˇ`ܠ\Ii'SO9(D_??QxϸLZUW$QPqEed\&pgD`qFrgDV%QGE6[ ū"e`?<:c79ǥspm󜷝v^{t 7h?e1+;GЎ[fi5'^(W7C,ƸUWl][6VyLBkdҤ1+`]PLk(K$֌ LAhū6IAw[Hg 6OLDޥi^f Z h2Y4\mCd!hO䀆mȿ#]S-a8h*.*EU$XQNۼc~03Τr& iP$K$$f֋W+ i?F<8b#n~L9牤Z4wd4ݷ3x&%MZ@C2zL)ϼ=m@QWW4F.{d$a!ܗ-AdXR&dN4WDF+&F 2Τ<&i.ȗ?G޺{B7tx|7czZJ3=!u8yb`dL{}@${rO=!8aoDIڲR@ii[M2* ň&!4'nd?*?YqOF<͠K&)LjiR؋"3=,t`z)hZ {-q^B s<԰lKu[Lj@JʤlDRg! 3(XŒjԪͲLZku@IwYlvYa[lP+۾Oز!QA;R1 Q7IZbcd&4dRH`.ȤT -S"nL*_R-Gj*ɤyEIrh#feDdA&dA&DIDIի `VWәDۍ 2 $L"$!Lz%2hxqATA&%!*b[Y7mQUzz2`T~z1>`&UԭU) ZՋQ!^{?#RlXo0u&IL8KzjkjUϻL2tt'mitrr7BȾbQWϤIz{a>jrY~L: ~pۤOa?s`i%9/L+Ԙm~p3 u|t7#3M2siq5:8QeoK{ΗS?./ Ilj0 V݊EM ]~FUõ94e^=O;tܶѵ+vT63Fe bKj>RB4,![A!aDb%"ꔎS!:X3" ]kֺ9فC #?ӳ-{dMSNÚb%]sޭgIRh.N^>?XPhk/DIxULYJ]Fnj nHhJ"n'$ Hj[6(1Әyr/g?$ݬ ?a $q͵8\օ1 )( $ōQޝ3҅J܅RM!Abx?=Rg;!=tIRqa>æe'>w'lA In5 }?;_4Y?4$۵#/JZ4}~>csOpFi%r6K+iIl_;𔯞_>Ռ894׈/{X&g#{xُ$ YзD%>,`\&@*I@鵙' -$8Ί];+M?Ir'm%{lz*J?$@"*S9 $MA$*s$*3IC+'iㅫh D?8ȝ;uh'0>瘷ax3ԙ/\E#\ %tb̝Qn7%JzJ^s ]8S9L%E(\E#\ %D\2(hJHu, 3L%m_\DH!q. I:' lCt$sjp$ =Rbԙv|[R|0[Y_IyTY h)QKjǪ*2Ңjq>%s>?8-Aƹ@JmRqji+] 쳧1>?L%I'ӎF V8H{AbvHZPl{;86im<ΙJ>EOd8T O(QmbxIviۜ pmVf%8iϙJJq.ec/"qj@Hc3$6X7K/Ј"T4f#E4AI`\oE7$ ϙ5цIs&cO#b\̃&ڻ 5͒x%EC4T' z8qRx/d?\O;Ҙ~S][o>:aJ+QIw| I!A~ȩKoшKRK'H5=(klSH:)"1I*wn~pڽj*mA(FITzJ$E1S(hK8(Ost9$E'GR' 5&5Is ^zqb$9/ŗJ$ҹ5!$OH>7 {}FZ>oL*IC>'!̡7 D@|fzWJ+N$-J$K֏FH 3O?cHҌ1/&0CiT=.SIb3YNK)]=GУNj]a 5踯:{#uSL)j“$OJ W"DL R6q57wURF {o&Zdk9 fRM)-ӡIb$}I$gPw$ڻ)(^)o))gssS}#o ?Y1]R7u3\:fvvκa/sxnP<^,JuDiQ#]"wmI gAߖ*I.NmNIᒤwJથH$dkF?\-ڤ{,J>G?VRh< 9n4JϘH70\J]$X`Pd2Iltim^V>%K`IT}y K > s )(#?o3ju>å&M˓aW8ۤj#btI!-J" R"RO\ǥs;'o*pɗ-VSNjEŗz7Ȗs!0ޕOWg׼2J޹Gmb*1^]awT5(cwvY%OњpL_Z0_b\RMj[!͎ODO205Uo7.6c#_1)l%QIdkMΒ|;nT˸rO pP'5ʗ{÷hI > If+CIm;Rx? 1P R3䂎_1 D%}f+R@v fiAQf%0䖲yߚOb̸JWuQ3{ܡfxJ>,S<)$HXj쁕tlnYwI>f 9zWn A=* ,J" &s=󚑳Tdg(-ʷ>8ImEoQX񌔓a"`="KPC?]:Jx~6DJrXqP}K}0ctc g+KYIݦ"DSI $fK9-FK?D\! *J$PISI* qKY]9v*S Cab ?ffIJΉve܇ΙyWI+d6J:OLMr-Mu: ]IERS}y[\q $-`V3lrJkVñ4sbC iZ y$ RI:[()n60jsOR+D(bY:DcI,((-D-uPѾKњukx60^et>zgOI*-)(4+k%EyXg`%"ݓ$Kr-TmW3Y)Y<jJq? & $IJ 7%INlxtcT%q $eˮ&[WyrO>Dz+If(|I5DakjVzݖ/J$FHf;x5H~8v^~%ጹ$36{$^J%[݈1>~AG9tIr|#H56nt {ET%-^.qI/$W$Aܖ_FngƭٳgO%[m<6KuTMnK7T%IW"l?@$:grb>hJ3ݨ$"JIqIguDLz2XGgJR])QIwIvxHzWWSIB%- J 0$k~@%C%=FJH}Pþ^"}T$^š,v$Y[7,)'@?'TK0^FHrf>"/ɰ.[f+]j&2$QI!!2?$Cp%D͵jKY+j^]ә=8}Id^xU3Uq'ճحⶾ--4p&߬#4*)$:R6n-[/[BE*F.:5*Jn #V$m$k> $*)DE"Hڲ)B)۴iv;d?.Ki'J JryP0ÏMsMq'4Sz g:q$}ڍO IЬ*oI_UV q_Mbl.X=$QI^?^Ls/WIqJn3W`,Y'J%ތF۹\C54UL7*JLt$$UJJ*~yc$ .HbH~J*i,ĐY;I*s` [.ARG4D%-cxYN&.C#&B6>E,`$:%5 iHI2$*)Ewl*+%iII-Z-ѿY-[ؤ$OX$*?{v?ڤmu͒bC[mWk$ CGY7;6醽(e{JR;͍$E۩O!IvڤK*4Bske]%:Ci!*=p0C&x\b$2N vڜV ޷RƍZ-ZgvN[P %$A[)PI>l{$)uHbHVBм}Th)?tϒz}!JFPR%2Ktl'/6'\MǕ< Pq{e$TR ys $J%rcȁv 6@#~ n6\( C" FF~Z򈯌"F(4 m-d#v zhB%5-F(jE$ۮ 6QN2~[( $ K7IqXKh$&iP $:gRн =3E׹9}Ho '{J%ub}{ݐUv ^5IT&  4I GE$QId| EZç.H@l$PId\/DN]zlCcB%8gzc4爠@b D(W Lz+x"N$*IJ(M}($qKJw~ I ptdP̃m* Ipw_D* 2w8:/J"ὃSۯSGD2P7m꣌!I6qfו҇:_C7V U@ B[J4!eTO,*ٺ"/XAYٍGQFM@EB?iIBsS Bڭ{3vn\^B6i^ )UIfDj,W+D VM:VN fܺwl[eJa6lU'o-3+ؑmc(n:n8~yjgHjA&<oPiow@5N!zj.ZnuZbRTZM*J?C43ۅ|MbdR=O6D&||lȪ8f۱3eg.M*:pz!wz0T/\kz[U$6)8%Z"_oF_IlRh L&ZLo6Zs@pss!7ؤ_I0bҤv]L9ZIsuka 24iEl7PV-.@,ES),l,廔&݅=[5&-t?IYUڲIB,W$ *F*D}h`I >  fQ-+:ֽ;,L.{xʬeY\9f-/z&H\İI İIl&1l&1 İI İI &1l&1lLC,9g2#8Ēs&sOC,7ϙM%f_~IcHqPh~0A3 =se3,S&i>ٵ*7) DŽ766e"A9\wfҗ]M&a J׻9t&}%DZ5C&8dU*0D4ϙĻ{Rma|#N@9'}% =mT9aTg ~b J7m g;&Q4B7 -G*"}'pv>"_}$hORk#.1Cvvun+̀,XI7ሖxYp*WkFh8>al5퍹F0| ?V{zjZE"G~xX;<%<k+ ^f!lR( hvإI4Qh&QW)䁄I O,p*hqM SrÀ<|/ϙ9 2?@Pf?۾k?kL*X@_Y'}%tCTUi&Js*-׺Iަ Ps^> Ni ߩs&x`ak8þvx~\L mB8AceRGdf; ARTWi'1Olv8v ݤ2iҐ@Lx^6{vh4Dݞplk&|<'u`_傔Ll܌_ d $:E0L6=$@WDh#=WGyKɛוnx3?Ӭ?y.Jf0&wzQn8L+?}zʼn,Y%$yIV9 ;1%o}@k8&IXWK jI;\{44H~|7 %gϲ@C,٤[7 3yC9tXC"B 筸_EM_7.;$;'3WC,s[9v" LLnsLC,$-[ҋI!3!3,!\İI İI &e&1l&1 D0lw My?tAd&5>q<zE,fؤb>o4 |Ny(/~ ]SBMiPNL3vD(􃞔;`.J=iNNKi"*V!v>aг{hT#1qapr?jjk=e p(4<]Յ-[6U/LJ//&Ѕ& AU.FNwK2q?ap6A3bdI4=m6NرxbX(kIa2ɤD7&<(g$̆wȉJˎ6JMB]+'5ν4?sL` eMU8|өnp{I;wa)b\VL䨱%L&MWY:6L@[#FP*T58s1@"D7#&Gl"qw_ xF{K&ъ@ߥ:z>m0bWO ݤ2MؤW~=NC&NyUe }Z&ʐe~7 nDٵw24mw0I~$D+&.blD*4bvMdɆIͺ)7ibӐIo tgfk!鉛fI€CQ4I(ewaɤ}ICͺ S]}yޘf/d }淒I.:q񚸿I-xpt5N8~FD\2-')q(zh.x!&̩7&پ̣p*x&L/ȸ*!}\C-b"ߖXV4Q:POQ Q4%ZlGsp)h]"v$U"/O(ES -<+ʪb"?HIEJ Mb$a6I İI &13/o ^r! L>%kH_$~aL gHb[gH$3T)RJ)]0 7/(rT-lҫVZDRƣ dQ^7c^8Q4 {әDSHM)ԙXay#OuH9MxEܓI_-lRaa*r|tZ'vR>ɇayCKL3INQJguⲹMtzjUwC'qcUJz[kt^gGW~>mqca2)Qtsxi^78,ix$0I3Kcv;l.\DZ[9*%@t*q}cҊr [6>y%hOFVC u^$R\IEd6aMb77ÿd0KY&q/%{)1lR^Jwq2q#ElR^J40wqJ uL2[R&A@cK| kl;/5n~5,jT٤A/S34/91Tb*qI@'L_|cK-b;/ܴ"Xq2ImڶW4tX+TQ?g3IxH+|Z}ZSsRRg}"ޞUӜ>𑣮_^O?193b.QHQh ?Z \NRlRIK>홌.N5c#7#Q2͵P^OdRYk(5kP&Qw'n2ImRgˡ'6)פD&%g28)!xsچ*#c^O1I_Ho'5aHMaX ?G&:M-4$rDt]Z!:[NHةד@=}%qd6 ,|fB&%ֹIFSRR"| g;2]fÕBk2j;zBBpz 㾠s<7\ ~yOZ$"{8QKR23 s'Sz2wLiy]et^'o&ݘcN bF~}*l>쿸`!0&II&a&a4II`&a&]GULdWl|`l}xY01Q6n˃k sWnwbc@S)rkGBڸ[nJEzMyˆR*defH&hsҞo9>=r8s HbJm6Y Q`< $LNVᚥ:n¬4'Dz)_ /eũ4Tl='g6!!rͯ͹> $ T@O9P L%3iDԜ$gN~ԍD%!5I61i^:{p Iy1B3bMR 1)D݀Fj:4Yvv򴄒eiR{w4BJck=Țt v Jor&m1Sc*ulg]dZMוSUD<g%$F.CH6ߤpkm? Iy([֎7s w=jcȋ7jM:g Nc:f0iRKH;NgfQMEy~5葠\_3I{h!jw>\yI]vN@d һa>ڗZ5Cq[ [BbWKie;2FQI~gϰ1&O^7|g^< ڮj>l|Icք|p[o \~a@ZnkXEC DLj O"&#gRQ1vc2u/Ö=ujdO3IYn-IZU?MŚaE5Setս-L4ĘӚr!ːe]8l&agqh0Ldh=L$LoY0"0O7??|TNSÔX$"SlI'NCLR!!wۣėdCǘӕ5FAxʓp P!Y wBlv*r(QMbv'颋IJF\b çH Id*oRg&~<+&9kDqIJĚtQ0S#fpj&&UK&S 侸lWu3INC;_Z|}8k.vn)&ơp=_>ҽ#l ]-Wо"|'0I|06(=lm 2ڮ t 7>W[,(h[ w*-qI$W<%X"' &!8gtEZ‡k7%]npu$XSQ!S$gñqoa&:_$L0ŚaMRz؇9I؇$lL;>@ K&i/ F,4lz=Dƙ\)3=R.'*O3W!RQ&6)ic'#*$U;(d(xv[1ԧT|cR~h)IZJ 6$ ED/I5VI V1֠-ίwl81vuF#Q0,H٤V$%H:[-q1}\ҥjśGG1>spuqBِK0_D,Yb*XŢ<~h{fn8{ QzEAsʵ)6DK kWjSAqS;mAiGsMZTȜf).! A$$$$$$$$$$$$# g(DzJZ IENDB`docker-1.10.3/docs/admin/b2d_volume_images/gparted.png000066400000000000000000002302451267010174400226120ustar00rootroot00000000000000PNG  IHDR5nPLTE.l򒏉ͦ몦[[[Ji麹HHHfWѰÿά^^^221jjkᴴmml||{֑RRQCCC;;:`{666\wĚ##"iii`{((',++WWV>?>rrp//.hhgNMLvvueedzzxbb`󅐟]xym뢠IKKú ugaѺ芖T꘭g٫M|^<`0)$OF\bZgϯ}ɔlUOu;9O/*DjPځ?_)砙v;rMpX@;K;,T}:͎8Ɍ}i(l rKM-cIDATxl?6?pq6~Ð kl͘SJ=6ibX خTRa S-HNt@P -PmjE$B@ls{SxO}y\}==f.|Ņ}}+, \dtw7 r]$ZRғ,Qso%~)*,ޖ[ Qe_lْ 3o+DJN|k]IVg4t==0Moړl6^EPsknfsf)Äuݶz rxv98|gWϢEDQfY 3ᄇ'_({ץb&ӯb+D~u+.JϭlZ?u3:ooq g㟈KqoȟYO\@IWgg1v]YbE) ą)Y6M:::x-`ݐW_+'U?O{ksK;oyKʛokn==9cwS~W>a tv U`u *8bj z.&kkK5Cp?;` )s?KpC zW?}Ts A-/KLab avj{t06 &yC۠Z^ҎOSo3) ܗ|W]uUIirW]X $` `44p <}U~͜RV'abX05q&$Og"!ʏ~:_l n[b /Dv9Aqb2+/]V]P1gdkf:S); >RW-.VBMEP ּX\T6ƒ+CJ@[jz buI0(<0?QrZzxRqT!a S)?;_eW)hP^+'JR`PVLϓETf=J@)JT_oIA, x@#IM'"? S)?38jO>ﻰ_a4$!RSW;k.#W& `kU#K-Ti{%GJ]&iN#,|'S)?2 edqe4p[‚%pa`=3J[J]-P>P?$TI?O3j GfoPxe(l,lGSĚ.Ssm1>XVXSe@ ]Tu?O3OuK}app-JV.\DRQ6xgTal$ 38o['păh84H2ۤ~I?O3E+I~48 (Kel&̨E˕}-^yK=*ֶ84Z~k`|!)eޕ -]+it5)Fx q]FNOO3Т^ҏe]OѯPbR GsZ{gcS{|SnA=e`2'%'q ' i )hWRj YKprZ-$ >N2+  __$?L\gh ~+ Vrͭ}m׮]{%V^c ȱw/ߵDz_( 8'ޜdK֭Q9UwH1DVxpW6(>䨅_ZUR(WQ|V*`WeN{Y nO9}s̟$si7}9L~%S|Ӛ8kI߻ R+*›Ϯ=g>>86t'L'yǞx'7oHD#$NsK@b$QI>d@3q )Ld6##L!3܃wm:?@nٗ{wf~`ӚnxxMw'K%z\ϸ)HKhX1{L7_x/r!PS8/{Q5-G12邐+ѵΑl>5wDAϡK9~lԳu6?3HcKnV!41If|99Mu/_~+3z=^[=#m]~W"#ןm|6ϥ) jߨ*;iJWxQMo^EzwHߺɏ~Ïg6|<ɓl~Ï?>|a;'O~Z8jgaWE@/%oGYMzU`ZuajU4aIl=[57^Y?'w9_t|b@i8tK?|~grU薕t1u}VHDjP/)UI{|p~o`Ϟݲ .?ZxYчj@SU4%sj)İ2';{Οy OG f0j8jBR󙿏߂iM}4'w9_V%S}qպiw_O[Hs?S.7F]U*v]{z@('j]gj~]tU 'O?y˖ 8}Nax`6TFikD%ؤGÛ]9b soX#m?@SҐloLI@)˿|%ͦ8?< ;92Rn ;52_'Iy΍&K3S@oP#d=ghOF'/`JѨJEuaV6#}Vl'4بUw:?!pfn9ZX¯jwیyFc>m!uCQ{+MW XV2}mjh>l%U! U"\fР$'c~܁S~?f C )_㹼`BpZӫ׭>IUm?7yjddÚE>Ii{ݷ*;={`@_^~-gei}Xlw~ۚ~{v۴mR4Jשn rW[ФTxHoH ;siwu`qmDUT2I_9xlJ8sXe?3J=6*5ѐ@:A[~&ThMeʄӚѣj {&5( fj Q Tʔlf&કH(+// |@ɾr H°7k,}ÄjuejO$4BT0v&,-5ԥk/ |{Fgſ|y,]ne2D[(Uj Jvۄ2/.fƩۑɬݘJ> mAy[~S /-8.2/862lUnW)Gv3A>ȆiM- fy,pfK>r[柩C=jRVWH? GqلI:s Sj\4u.ʉeƠ‰Jjj))Yu*uGJBöa9[=|WɪPk}^gw8k5 @aX>qXw 8#"[kn Iz7+Q*ۦŽF>Yt߽'u%m;ȟq^ڱBWחua0+q"J5ILB<=-'?[%9I]66[5q`wsFCVjJ? OO 19JGl6[_WIQ.7Z5rP^*~NyӞU6WO!͆V?&vc\~Av2??Cylu)׳}Bt|dՎjMI6.9jR铅ڀJC[h਄SG(Qۙ x2~3*lJ}4S?3[{G{xX+pj-~&qcp[EYyƪ//wH9_.m.`En lryGu"؈"¬tجz%g0Mc8:k41Vr,t &F?r㕇GοUE2DU&*)˿N#CWh$u:]_*V}MKtLJ#sCT.&}j)n *5l|?quQ(Wfw#JmpRN%7Z+n ߠ|]7RVe+)^ךbRqTŤ:W7DfpF  S[x/i Axo^6шB_ Hݰ;:}YO T#~HXĐlֽ *PoHI2n ar. Ub;#+Y(eVNy%:\6e$)Z9tWsfv Qu@ J k~lzm@dB]?h=;-YUJcἓACZTl\IΚJO\E񧫔+ wF Fe}mA 􈂝.zwّ+ԗt֫7: NޭF i 柩ByuU`s=XPÞ:"t _:pT ; Q o@6u<͆5;i ;`*Rшq5VaοitgZq klo!xHw[WB2UNU^GB}²}}aj bvִ󷶅gML)y񱱇cҁ66q f6fl:&? Gd{ H1;GEx ?.0KU'm%G.&()cY)UrlP cnL 0?DGTdPEwC#B[:~b/}C șg?3vRk.uaV&›;,N-+-q_P[8 ($#z mO>UShB\kPJF^QQdQ"[TZj^P (PLq+Mv3T LWma;{'΋(6@,VZ05?dsZtn^GBtFo/E R-X֣~9G-"-l%%K2/mʾCcc;evSr6Ob:S~KDi>ߓ i4iDAJ)qlkOEfgVµyRY,›A=pSNa7m!th YWAʼ@Wx⾠O ߂ )(B*0yP&Rf &{' d%K! VR'K(= A-pˇAtiQoa}rjl,:C+7ǁhR?QL[0eVW(,F --p7;sbxS__6%lj*qı'-kQY-o?Da`SS\}'mA9޷Yj k?>f7Zw\ ̦I8*k^$y#{?giR|jN-+\vշNMHu)gz|_9IM-}8}T@'Avu ,u+,J9ѝ٢u_.8JSevMPwdQo-"'2'xNeC:`-D 8CekඐI:{.DێO%?wODSoW_ů֯~W_=A@|;|/䥗>qe!rr},ܑyx9p51/20L{aG\g4 Cv=f㏅IpN`Rg=d_yD iGY*.jdx "VƍΝdž-xoH-#Rk^l?ffʝhlM*ɩ<=E]h׏/> m(Dn#' zM CMǖKT#HPTdaD/Y\58ӵvu ?3yDo&%PQo?2M2qd|X+b1yP\J*9Ce,Ǫ**bdU5ɞlbLBQ)p"j=K(dF=VQ&6yL\-oR ueY Ne=ˤMӞe=]ZvRGPO8S~X&<@?I~JYdYP|;ׁОe\hSUoG3# ~W϶Y`0kY;;uYorhcB섿C:L@]h_|_E!u W,ɎT73|Ca1g8HȖxt{OuRLd[ٮNft `塝rƥ&*i z,&NrY;NrVhu!Pr6h4O+HwRA~3xJ#Vx3%_\[1S3s&!ʑqΟiXAc-|Zwl2tC@k;Fa !dp7K,ٞﺃ>-t'Q!w J ݽlu96(HڮD`ٙCz()lQfe~P{V2Y*r,-)QC̚UHfgF&?Y$?.e}>-9⯶0%Nc*_O]ApSOǢ@q1a!0)G/k+kh9h -15vGF *p5(3HWy.1װԗ86ILCP;N nz+ dqB%T5Fl2t'S*QNԂ8!S*ER)pTߙG\3HsvҿK~P|.߅UE~u1%TbE|'4Dk$ Ȫ,qIS)?mոm):b' E+(I4Ef1إ ,` J1 nRVǃBppz.նl``!EЧS)$]+@4^}W5\qWS/HK2hz",&aPdH">f໅u,]%]Mv$&\ Z YOSZm9fnlÛӔ.R A*\0*^JV&e޻5(J+pz]GtI @/IjnԤ=[)\ w7 *ϓeL6z)(%J?} Ți!MzP(';G7n.'M\SU+[G*@v@UeVB{8GYmq}xjY+-)Zbw pX3H4 &a?J'~z+@_ lj,ZFf9kirħKlZzh>{S>}_{*@6q˖_(CU^v)Q] ~m$}IR)\ L4P`IdОN=Zx^xm+*@Z.BXsVVg?Q ^4_? P& !G+@R(eo%gY6aeϡ%=X wSHA,`@P @KX)|@"5ݍ쟜=zc}e M" @kq2l;ޱdQP֨$S8L|X_.x\O*S9z6%صF 4'zQ̴ aǙ]5Ln>N?Y2 jA/9KHn2pr"k֭0i[gR-2ׯi>~|o'[wP Ȋ!FT@b'2zZm٣n+#XnkN~I4䢉4r*2#jYO-ҳN:MF(P7?]?B- Hd}6)1]%m4&vHQ=4=ˌ /8i#6BMkFiouh䒽Fnzw7ΝߠUXX(ّuu1E-z6hU }X'kF'_W(Fʝ]$-EIqMGjAtdA }2Л V۪Y4޼yxm?;+@Uf<[ TAX3*=~?m3.ZEGr4 i8 iYh~Z"k>4 B@QA崐οd4_kXS4U l/0gZd0rLEزv; 7/j\^藰w}:>N$DJŝmEzstNZ#~Wd{[?7|?<{ S`yH-ٺYO2(__*dޮ]MfU#sqWd+d(Ց" sUx;f{ ~b{0Yׯ u?_vFq$df7XxKeayHEN^tu2̮'ɤ rΨ^;'G)'cvb=e6W ;DX,`u}F^#]8D_a[] !e{'k_ 5?; v=W ;7{\YE{Z 45ů 艞ׅ,E!%-p= W߽dq1d,4;<.=|t>>-͊xnU|ZQ9f@3xMvl~. Yoeq'@|e ;eav`683-O ?ޟ3YY/Qb?,+OOg_"|ۗ8'cEߘxdE?~Ybʗ/} p||\xϩ,d8,vA?,u@_ڐe6677 |Y C >L$Y6<O& c wȋElFx%ڻ,,tQ{ dBY8T%גH$bVIP7N2pPoY (P C,Y m(GxixZP˭$ݩDk3HHysVD=QpzhvP>T*tAɶzk녠5<;)qՏD #wYx <-thVO%wJ͖jXHC~k`Y4gjY$AuAh ,͸wG&|NjC ,?ZNVNѩn$Eo#ڹǗ"KȂG71VVâsfȭ|dd!<ia#X鎝HVp8.U8bqk &#J,dugce(W3wU)PQ]˶muHY%IfF^p/K)vX@MJD+ )Vv%\f롚)kܬÄܲSvs2C,e!)ey}lrUeGd!?=@I\o IQԜP,FzǖQSՓɫZmX'xѴ-k~L0}*+ gL{ jZѡ^V[egl#zk%|A$E*/|TѰZ{%lBPHY͎"?^j):Ñ8igA 坮0XouE*2vw^вےK")=M s)=MʹCbϚxݬwXRodݳDz:͕X6&x?3F˷ i~ un|,|+2'󶜦'Щ'1- =0YH/X{;Y0|5#d(fA 6)dM#/ʂ-WNw{A)Ȃof!e",KKv3.KEM+Q3jfKP 96_"8f2)E0!z ,!%! Hd{zea=Ѷ QJNNnq~r4loW[ae%癯چ)ҹ)ry%!Mde%U}!% !?oK"H|$2r-m8]:R$3ːVY2l\ - -9|݅?ZiϬt~F26zK%~8_ _^.gKlTwb+:-JԂr- |+9/$ =`yYxZ 9t?xy,tj'zﮟk~DT{^5"jWOP$Pے](BinoVCasǓ-nuFc>5T/P6W"9Jkdf# E0a{Ye ^^\{`TS{qLP] >$ 9c<*oP|@)fR٦~xYly/zoK1_wȱt݌,RmRfJ U_2{=ϚYll9oL=;fn,g#^0( $٩j٣DM%w*ݱUQ"<'XζA&B d!Y@޷,DQ~I_, @υ{ 9cl)d-yq$/^?`%d3Ҟq*@ j6+I{돲lƲ Uv<:?.63xV+JH0 I780RH91/'``c=\q6QJ[QYrXGuJG]ii"\tb;N6ArMyV=,H,hE dR & d~s-0P:]+Hɔ&ɲxHĂĂ{=>U4h;`( 6r[^:7f2HƘsV+m;,vXdZ\=h*iߐ`rg=۝J%Ƶڣ%$v]}C7 #żp9=GLĸf.3X6t!pDe"p! 4 fUӉFLG=J,Hi{] ff^ \-/LdtJ H10OJ 'ۊǏ {] N -Ne1UuֲIq3S(XFO8ٚM.ZpkTfK%!6\XXg3ޯJ69) "޿ ZaCބB7hS4F 7kol<.qAv^n+kQ+Jc]H NCVZ͇cndOCU**nwB@H,H,HC !&'00ĩ8)ʏYS".h +("깄"IڌB]Qz6Tnn Ypoα+ƬLƨaVQ$A$X"Nd # 4wRVxܼ_A,T** !X[ĂĂ_sv@޿ 2;;R i10LP8+RNX ţGwⲠ:=urmļNM4Ea! 41+sӚwZsg%s BX01$rp5`fa\9x}.(J^YۡacϙO][6C87Ft+XuX؛.-dTK5uNdpg糵7֢켎l6LtDŽovoɚ٦鐸 H@,QfAbAbahhh~i)nI=:88:7/&&Fgv/0 G^?3W4ądǓk#{0ز ;w>s'jf.t+b`}FMq0z<+IoiY)մffOxJ%W*luKs o(  U ,t,S ɽmá)E>g, 6Z3XcC /@6,\A\{^S#l*Z" \_'$y/BqU[' DzrGGpL-A[ǹ,,dOU PFa0Ы[XMV )KV(hIKqXP C9р!9-$D~Y}o_܆at lqS,,6xPH11t,s8 BжÈ}x,@^@0ɂ:Dhfmme68Ac4O&IW/!+_,zfijAbAbAExaNDBoZERb20lA[ YbܾX@m(/! m) P$)Gݨ@j];3Pr67eoԂĂDy AW' m(_q8i88cl h_-4pdmDrPC5Qی&9 _C@D|>a̘pYX@*@$fϤ6NOa>{ŋ:.旖$(-cv0WOrpz Wp/E~`:Pn@, ZqVY}S[;,䋥R܆爳 сTH'D`}{iPwd~[0{) ÜFtm|x))M2hj! @^{X$KMѝjL•t gQ ktI3e0巶 rQ.ܛ0w`ِT ũn\;gAbO 7gJ,v1G,PA Q{NuļNv:n U f `;`ҐH;lȻǁl}BXP#> 04ŏk"agM$ՓjTa!>l$χuBcնt6WN^ȵ Soj&uNwODz`oDmnfI@@ O)3] -,F"_@&Z|n7F{UreiU ,| *lo{=J1,Ao,0YIpn$ dee~.n+J#D`8`y&\Uh2;-LO#2*XKYxÇ\΍ڃ>{vtt TGD&"^慝|rA*g0ŢӃPz$+˜X=]6d\H%"6AKCI vF.,`BKYx;.?T;,tqM:'o +{Y-ǰج;,|oYRgT .&KRAYm-n!I,X{dz5A3Rg5zkDB,,$CN41@)՞ֳ  *Hb͂AF5ΎP{U_O+Mh"0&RPE"ܕL4Pu:ˍ=Ubd(IYH?];@c1ξJ i@ʯHڇa ?j|Q=0U vy{ݢp>P# RY,n ZDrT##nP~Q] h,u ,yJ&)+McA U],Xg ,>YHĂ[`L eAb d' qk!Z[LC?~$ Kj"ٸحtV1NRFMJ7߾,( PE,@Ypuu B]<ḠA&/(S 02plR @^(@8|™1כ,Y(P<FܿYb M@(OCd~=wܱ_Lc--kʟ w҆,, , joM&9. iA(T"5Fp'D&1 AHӧ/ PhbI OsBuda:\ |<:͂lXR<@(.PX]\ڃR:޻כa^'BP,?b1][HZvy\0Fsp-ٺ2 ZV[60VQͩT!ËŪPF,.FD!"& # ] eɂVuGM&U r[xV b!Q,{6vy3lxp͢԰$~ENgt m+%H: ȢD1~,d`.z=6^ nn/ LP`Xz;X {dv?99e|tyS(<6;\'X8$SX q`V^@ZSw8#܉Q! 0/zc^ƩuP !`+ap"` 0vF<\)v+<.dG]DpM ; ˷-, Zǫ6b"Kw&XnϾϬ&6yX(u $7~po9|v~"ҟz X(6۩?iH#)wb&vH[ymƓ~ffRW X'-R^YY6FMߕWJ987_& FXm`#=A,E=zHq? y&X w۷K _|pq.pbl(|'KV%Zw]XtASq@ms$qA#^d4a`?Q:yς-^V|kKs￟mVqm\o;ۭb&SҺzݒ)FbPipC3UEQ]QW益N2Oۃf 0p5MW 4fB[-ZMB ֳ cCYcb.[ZZ\i  U$qa3.\_{n׫V 맄 a=ݞw,د&{X6͠6ovXƪݯ\zowJ!mUcק -WxAMqt! 2GR PV.Y>AK4 *1IuKՉT&#h]ϤI w=)<2ꮬC%b,G ycXwf FTQ,LiXt[D: $^|%"R{o, bx/Ν i W \—`ya 1 SjU*B OK8u, "͑qa9$@l|;deEU'xA-Vũy52e  H執gonaayX SCs)Ekn)RMGj*!bDFSVm+rDw$kHI mO.L__IH鴢:P Wz N!VP㘻y(g]Q$fh:n͋% ZN6DJ˭4e՜: -a:uTegY,|Nq8aN[b^X4fu<:(8(\o, l(yw=Dë8q _g(QMET@+GUyOʊ’ӱ;6 U;XT %qhz+Tc\hˉ8zOt:x= 0R9r_5`(YZӬ6dc4 7?؃H'OfXt_0 7&ҴyHuE86 Y1 +۱c"a'i{ynҨE3V4trJ'[~"22iCPoN+R3cIH|@,TN`RW@_x"A`nayī`-P{Gk7+gR ùV"ŇaYPX޿o/bŰolv^ o߾(jsph+I ~˹"߀@%& ,Mk,S}*iaAyŃP+iaAFm .&‚vlX( Ƃ@+a;8,cD@Bͯ0{n//x^ƶ~_:]N 4pH7~`a!S{7#^jj$ӗ.;0H\փ! DBX&Hll5  Aa}`vzFn;֑y_ɏI0DEUwͦ֡aaΉc2A,3<lq;(ʉiZbm1Nye5MhV1BCsn3̦(oK["*n23uRkKqVXô#i =}ʖhx&vT0y()H`G®$az X|fp6(]B}joXt9xpx# sPi`tHpussϽ>tLE  aPUAEi-.˒+c|A4 eazpB Erx[~ผ ^hR}`I;p!d-B$ ocƂ3tCy.ocp6@,L6r( #H5ey?FQcafW–)ʂ@H0p ݿ=,a4t঑9K8H~b|pdTv<+~qױץ\͊lnaLdHbED lkS1NdeX%^\ܵ@z0P1hdDD$eRdw\ΈAy:nW5;7K{H(CߖDg< ag,2]ic6u,&n<*k,f;Ipo ;s`c=\Ҵ}H <3ys}9I|,z%Sz*n[0CTDTKym,0ښ%ݱCƍN8F~!bfR:e[H)*sH[nv0}k7v򙄳\Gr}4w-pL ֳ*/ʭ\.gYNb3l-$tmGW%|?voMT5yZD|,N6;E䵭;׮pfN|oz.ZfagxqؕBѕLSΥ;\Vj(@OkXnƂn'ŕb9Gl+@b@5v%֖cpCbRe^r/PJ }l#S'8ÿ31 t%^j3P4X FeLr07 zVNiIs-FKciaMM̎,ȭZ> W9t4z-;cR|=ݛvRf/r2qGX 52@S4kFFjQ_&^Oo?n>|iپ ӧ<Al7=;JVl56l쨚ӜKi[I H' :9݊hI'}L6Դˇ'۴~]<5rՔyNEb8i{N~t-X8c7 rw:l7n;3L[3ҵ>t zWvosxnp欇z[,o>|x_7k<֭kJaٮ֝D:yj(Zcx׏[ߧoXl,J|}tgWwDjƂ#0T/UVvh,9 |,cv# XXQhU*9*f+o@ݪ֝`[/ sZ| \c)0ct8?+XHU,XoVØea-*A |,<t`-:X5MSg% Xu0ڱH$ _G  ǁo,@3g->>\G_IlW%ǒaLN~Az|,xX8X/~⡿ u+o!(0\ވY<~t![`{Apbww,8F08P??#8:'k N'.8X?*fH ۵|,F(=G"-!}f,fgD~jk) 2Or:<`Q'X1 qHx 1Bt.`HdpZX<$kŕrq;=S1M'=GO]:JEĂ oDj[}]dy#̜FBc Znhf5<~;YXȔ#qzAvj);=wӕL@Υz1=;vc{Rhmen%ʵؓJ|>_1O>^ݥuUYb.]EF&|,oxCS' R2iciamK#avqaazh}qpğc?u'7OG.`M 󝫉kO >0TNit~XG94>NiG _'asDЗRpRb9Kyxw2ѢE hׄx#sܵ8|:>*n&E{"uNVEKZ:Ɂ=Q;.iL8pf+ѵn`0IiVeU-1L4y"!Rhd:o,fs= 5>*3'\ I籈HFRH6Jq "v,t~l?_+˭e<)X.oʈp]6bHLj:V V q @ G&wt5HWNAr'L)]ƝxK&w/Y ye.)cbjpPh28 wPv3khs|0i=R"K;٧c&n-JL4l_I#w*fh1Ǐ̇ÉJ'~R1^:wMT;nph._]"SMi_(wy&?gnˇLkW滜Vaa1.c»x3|I&җ2F 3 3$(0[.>:FA2ejVqqN~ =IU[swpҳמ^N} a!oS$@*m-,PX/ՊZ.R|zѨiG<=)J}< Zr  Mvc/Cgah\n[PJl"'Xûa;sw|\6!&i%-,6`}u(Ír'?XjϛwpqAUp8f>12p(cg}3~B,೯>^7?|*җ(},Ṱ_̠Ǟo\ע/R1Sd,k'"#H2([[ccsmIǬU-9c:W3Hp j;v4U[2 }LLf\W-, C }A<:wazqx,`M"Q\9JZq}']GU%EvSKIK( MU)}a?KIl.U3sf9s-M<,/%9'}7IQx8i?A NFVcfp>gbO2䘟ҹLxיy7&*{2h; J=z2+9>: kkZO,Gh5ie'L5 |{ Jq#p;3YȹrF5iݚ(TYM}Gmbe_iN'iYf    V;4޿83P]~{(XpG5\WD^?~hj|ZKfq>- b_'~f- +wI qV?A [啕yV`S;DzMh,x%hȬ-|LpԳ&ҳ 8R$cDۢ{SZ>-L kΫ.e~$/Nrܣ_x>n'bzcy;ZuՇι-u@9sݽj1N1\ *ۺjҎ_`s٢[Q&% [p"ҽ#_aGf0-浒χaS!'[*9}(܏XTAcڴyګEJbƧx(vi*|"{㢊WMLQ0cCxߞOJXDL~!I6J2;iQ b,IB4ϧZ @q,⎳P SZnssN4!T[4ձl?ʤP<~ݱc1}pclLD /_~wбIDP7 b4vJڎt^N`GٟCa\H<˧JH %T7*eGC ̷Mz Db-C<,IP,'{bӹEA^H p|;Z&gy WMyqO>;QzT߂W` X+4-~OLiaJ 7HM_t j\VU-.yW̷pO%dHW41WA{&8DjTSf;2Hڇ}m#e }@Uu Ra۫Wz)~G"γBVHU872|N#leWv;?B#[[="z8k]~1>1=Z^T(>-({SInګ$8Vj#Nf\θL-a8XM5:KOYaJ 70Fl` !-0Q[y$<JJ$'9pխPОQ1r8$j"+~|)sҨmdxpttPcrh N1r" gWޞ9񫲜CE >M3Y38,U!Z1:&ZgNR@ Вק iCZ1IGڟkeBZHt 8±-9M# SZn-S G>e/M$d#V&zQo Zٶ)cphOHRUo &#U^gW VY' vhR8 ֝ $N/7|cgxgxP~Taȁ+#+:VX2B2wgH`xPa[7U_ƁyQVgf9f?Ɔ]- 奇cqk(2JNoTZ”y:(Ze&ST/ Vg>=MuJNѯz3h b#RPռ6bl ءK%ϲ@@NG;c* xf!cƛ~P[x$1ӿ#οA pY]^6Zg r&hڂ-Wz<8o ]j'D 8?;_}z7BA҂7o ,WzZD 5Kbb~/e N}CߠFzS-X=([ן`}ŢZP;oZhZ-LD&(+YQaҰ 'h~wBz 6>I Ϯ!/teQc 0-gp=4eQ@-gp~z4eQPFZhID3C(cˢᆳ㻣| 8ψ.Gk$#'C \\-:~~wZZZRh~xqF-ht@-yM Z@'jpF-hAN>Gsz-`oh@  h@ Ԁ7h@ @ V7-\!3~U i,gZ_-@ Z@ Z@ Z@ Z@ Z`*U[Xy _----@ LPZ!a9r6Z_-@ ZZh-h-v'a%ο h'4E 糝hOpϭu ^|h0APkk}~@ ?K-`9lZ'j_.W]\ZHz%l l~y@ dZ:էSZkU 6Fg:ht0-dygϪZj-:}-@$gBYHS%h@ ?2H"Z8Orlv.=q mO"PΪrZLqef6TN*uRGiE(K2+Yjm҂.X 喚4s_qX'IQilWAhHt;ZvF'$})7kz+V(z_?ZZHȲU.KkeZo$׳.bS*=*b1z!ԯH¥śe*!&s3&-Xiv=VVM5aS\ kkab9mJVaM(b@ m:bkֱb"p+gDR\ҥ'dhwf=ge-f| V}ҵ62wR~<-`*Bw4ROF{yn҂(-VRfpHrkkZ+ ,]Ieh`َ"sՒK,[;|{@.+\jqxt]A%.s9]֬'ݺ;sJ Mmݸ36A ~he{zؘ1/B6OkF-v~>t ~_.Ϻbs$R!qNum/-l~躝Ju';\wv.}qjw>w=$w:O.,ЙC 7-D ZD#|y@(;Vf F>ק'"i7Nޞx\MXrnR/Z>lNZp4koy0k[L$3=kv.U"h;x p;vKUeE»4w3d6 B =h2\̗4RfmPZ,_B6dVł֟$"v0 -?ўtt}OIuLMgO9,~wu"pr%\nj]]Ds9Ef-6_'2,'b&Ԭp K3\(\+S@EBI h^Z-RJ{nW5%AmM릧iڴJ"Ru[RqچȎ i4 h )._iZ(QHAPjecjA4Lځx,p$-nqhj1RݚJ)2vAD唤~D-0 X%h1O3if‘ih$qj]!Mڨq&S%:mmŻ\)C GR?R ?S'k6%rY6- G,{2u_9ԥTHZӣ7TZ@ J p-> **dyHd e,8tى *Dўb%AY3ZiMb2m=#h h&뵔J&LnGgsZNV5VyiSu˻h$$oR45խ{H%9Ҏ,oG"DM=v[RZpκղ^$k%_vQDɑ'*O`vW+ZtUTlQqGmJQ'ɛYZ_MP XF귮K #6`9J<>\p(9rBu% T~:.k)L*iBBC\0Ne>F;фݻXUdHX L(u ljZ \-գq2SH*7aի jZh(P6B) 6xBn'f)I}3.B8_J]2tEnoT yq/sno8^+\vjt@t t.*8U-vInΝnѽBiD:>.(sw룡 4klͫFQR=Jƅ8kHw._vk1=K48.gbѻ #)~(9e{pJ8jpaarҮ]o7?GS]nK>rXBڤqL=$NsW&u:?}WabAdeK%y݌(2niPoc v'-D-8E}m}fHmSؗI$+-2&w@ Ђ=R%ٙHQa(K%u􏒦FI}<_SK*ծɧju:dC̀VVjBvBCKe&닱q%j4]hY/iEꐳ"mti!eoL"S%ҵ ?# OUe-4h%϶ѬSpr6c1^p0t?=:Ђnͳ$f{H* DP{|N ݻ>œ#q JqUMT"@DsGDjk-$`)$dW 8U{<)breHNj^|4K:^*JuqD+EфڨBmٙbȹ͵PNխXX)i±[JvjMʮl -hᴐI !g<;PSͫ|\%{0ӯŨBz-QUJtO{-\)oAl~q-7-iD0}[!ũtC W#)+Tcz'sz1ZN2-\L-hOk#HYI$jJ"QLs}z$[ W܀A Z;-.hkX l~'5ۭ SGD#áڐ,o/kh_iB-\|-s1^t~;aGKV(7#[8#PmclFw@ j¤a{հe槊Zœ6q aY,E+{Q[@~-{;r6-h6`68w Sp3vM@!i Gmvn-ŰVQ8E,MW-vBp_n~]?& tlx|"oWSF =nqt.6L*hZ AL|_4ER}ܤHIsN>pHքiN&KYhZ4IKȇqֆ]Y)ԨӮvęx_M I$iMZ*C4vʭd6jZ` LO%lv!ٽHL `cmh$Gu D Pm/3+./ /|JQ,ӣ˫:KK+ݓuK% ~_vkhI|}16qdlER/KŠKjMz?jڜul\-%|V p/נxFOE Vav& ++KKۓO [lOrN|v~W;}zV7< T[[\ZBhI$|maIMO{Yڭz-Lno~|ZY}95eTTӼ5N}YZXxgy>gQ>eyNX.HGhwDjZ79sF ݓ+`R1ٽIpt+;J 䍩qz2^&%yˇƧA8Yh-i--|I H"@/[H|8>  ʗݤaH?}wj0o?/Wv/Dž_TH =ӂhw(>_U*$:noƧOZ~ynf7=LQ --\D-fւ'Dz+//C ͫ_#ʥ,Rӥ0-Fs'05LvJyFOCIP0ް3*{VTE H"/J"yT'aKfcS,3پg7{7zb??tB<_=>>R<>GkA91خ #{}qƮ[(нv0<k@iu-xɭ,?%iQ+K'Zhx})G/mِD Z[H s=)x'o^~7dztCJ5t_iB>?@ (Z`_ =燛>SܺN-&W Ϫ|l} (hhTOZZr6hZZ//hh!chAZ[lG O*l -@ @ 2hZT༅3A^C҂yҽC[^5+1u?.g#rZ0oqS45Z۴È[mhhAX-ҺLUܢF>.Zm4Z.`xb~O mZ@&.qF-<]b8[_6=DhsZwf@6˫ =_9w٫71h7VvgO$ Kch7] Š:|N+ uGvy{wg'(ȺŲT;PZxII$q~8vqqQҹ8#}juRABi$8iCn}>嗟.F^oey>e2-pqw'mU ?kI 3NH1]{o'yܘE﫰>``>cY pJ'ia'2)G@#u|Cƀ b wnQեR(.r}[;Nq8;&ZPolNau kւ>>ft LL9=g D ƚJ'鏙mv>=1ٟ mN-c/͍kɵ1;|Lʨll>|'j=\^|-41ZH{#SC>xts繼׽Mv/mCӝЏ Vc5P Jq]pUbُ$l6~AH|طAv@dV)ah3vx(~3j8H; ,zZ𠝌,ùl6L+AGm6`c4S3 go`9ERZmo"X`R a "W4*v&~4"9[J׋RWk)kp0Zyoo(si~9B^tVXl(Rfw] o9U_ ǯ`Sk =PPap(˃vFٚ0X~؀M/vup߸T7ב ݃i2_+/P-%rIl>/V:hU{[QQJ&EzbH~H'=n/t r] {ۭIH(,-Xh&-@?tlX U̕kкW@5L8/ie;[bƂ)Q=6+Jk' T6Jʹ\U{٩QqtUitښň \IJ62?db/=Uミ;?BjA$XV-j᫕R4lӌ];6Nݾ6%XB?w`XzkкW;X}-J$)}ŋv<ݛ\"ˣ`H%))>ɔ"^ ޹Uh&"!%a48V/{!v7nUO P'kH|_a [$Zœ]}L-<&%ԙ-MǺ},PB?ҘH^ a!TƽT+X0X)g-AtA$ykL>V"k+QT)N0@ ksO𘙎Py͹4&ժX? 1XH{4CX-Kԣ) C:̔P;0)g!uÂ(QQ<嚍R*)s׀G, `i zd&Z6'%.1p3@Ept`԰9&KBq=f`V,B`]9.$f ^ 8 N p z$F5Aix1D֍}ٺPRݩĂgo8 dabu%J4{f8uǘr9])W "r,k$)Y-X8'SXV|PXW4hSdʶ14hvQ(Pf nU;4Xj  O1f `XX@[9j1'(]ǂ^ "0--P‚znw)fx5R*4Ġ+Rxq.dӪkR8 @O oO` -qs)ł9ʙ9T9vN<;- T[88 d+? Tu/8,PzDkB,sdL [[7_g,,DB7j5IHηV^>_ [~IjmwI*v-}<ޞ@w_ *@<~:X5S0C }L)DPLş\,p}n P=/Gո)P AT2Jk=$,"1blqt)Us&cw 'r􉿎F­]_pIX؃'$Z)hP cᅽ cX;*8-[΍c,p iAzeG0v11F}4 B,{7M}ׇFaDk8@nץI'I;@9 XcXcl=C&C}sה~/ b ErH$b}5ܣ t~T[ glT+%&Xݦ>,ܻWQ0u` 6Ie~5;OSu=\mpNIg"aO' ڪmcὅ73o9As^-_G}<_#02wl-)` 1(w:q7ձ`C`b~m>asCp[#Ԝ+Xc^=xpPKTJ$g:u9$NP&.D=z t1UujfZ#iBt@[Qblqs 9ϬCl?vw,<ӯ?= -tp1jrUoB@ZjCa .[k$f#a6ԜuÝZD*7 /& h [-^X>؂XsҝG&t.잍&B4;B5/n|fyNZOhz]pXX\)Ir+HycsU)*jBy\1H 5X`aa=?q]t һg k}Ϸ`ozGABݹ֭ND RWk)kp &}=DRBAc/b+岙8HB`]42 H knJJU;MxqӻʫZӌ?_baa--oRi}g zأgXx4tymˆy5\+J@b5HQ^4M,xG{tTf([YN7׋+h 󼱅,WګMDm7UpƂZ,,7/`,,N] %}Ѫ@S4咙TJG!,+%^#ełI zWFp'*~v,ժ J?WІ1 lH$؄p ޽ ׮]mHJ.mUEÍCj2&Z N9Kl5*@ۗյZTڨDX R ca,5w!:>{4ti-Y@>DzGҵ|=er[X V0`5lWU sUEePL蔔NUl+<+kMU,,럜/pOftd,X0u_QU wikum  WU.QX(3Vk` 4IJ%'$= z: V 5ݯa,`aa,}EXӏON½߲k@abX@RʺIPJj4 py.#KBq= ' vimFnz=L "\0|ɖ6pDP +,,sFTOB, wK¡W؉X3Հ_)pJiR BLt湤Q(+Z7?`7s#J.g+Xc:xQT>Bc\ ccPXX!+־{1*|Xynw)].9"}Xb3N `"J6+h}Uc^t}Z+qP$ 5D06˗Ha wg;Xh{<‚}@j, 5z4bc9[TQ, 7X&n;\5. XHyѠ>&rxw?RWTI= ,悩n4uK@1JcyhZ׳> Y;mcܰGSlXK+FKo/ѠN$tqn}5Bũ_rnr u? pjׄP򴿱(]?Hjy圐k tY2kfS0sԄla붒ȰȂ%X1LA  h*e^qX[06Ե1wT{X|{ b4yX 2P/+r~GV?Ns0[_cdf)2i3`aukGtX˳d[K'r$)jd\JӚn+I&CR;P`EWRre Z׽m1_&K[[cG],uͮ>S(=ǂNP,#jr"}GWw \o)ffl;ŒO&aӥ9AI/ b4wܿE`]6'?rG5"/4 [ПgrM:mqw~:sp2}fqͰc͌G]~륀y=}HZ 25ߜ"!nlg-ޛw"CnðF^a;|aW(H/gqX,EQ}c1cWqdl!GJ992Ӵ9sqX.4El(;iZn@R}XKߋff&uGNo7-})~.9gXʃQLLqmm)BMVrH/W~qfA2g#%0A3MQ!+.旜=1% I1MS/.IyPFpϱ i5`8)Q :D <6#24ϒU!Iu&v$z@^g4]YMZ^^Kts&Izj3Jo7-})Xm,xOr xLIgdArY2jLwX8b4f.CEW=}h1%Wдć7\>d9~֮}֚c[׽NO? KL<ՉIY`2]}簴P@ /?+E'<o/,9c]ЋgI81:@#@w3 z5Xxy<U!T_21PE؉3%z 1At{:6{bK1cc:K&F(ñb\>-v>uŸ{@(!3*/Z&-Gfn)va? <.W/O$ؕ<4h356Nk4 #K Rn»s,jq~n3m(pdߝHB0}/)U ZŸW?Ð%qJj l&h__Ⱦd<0M&J|yd5# T,Hɵ#{$ILb=BV~N؜7N@ɼ[}4&[a?LKybviҫJhjju-!fcMdÉj7\@#I!bH.߳DeraـycI-'Eq2輝y8c+Zp :?u&b `nѤ\ך /fDjߤ/,y5 .S"ަH$L-[Iqw?*Xłfɥpz&ـveaΤɴ1s dˇ+<΂z)3,uUb="{Jy #3+u^aKT(X~ܼN&jBzJ&Kƀ\#h2V`2 ۜ"ZQpa{-}‹-Бx!_.LYt#UzV'Az,ͅY.fQ":ȕ;٨ %FGa 7Ʃn`atC_'_j^U!+X .+d.qOw8MJec - `כ \{{.a&P#BjghHn1㎱Crl\Wabh0AC}Q\g"p/ZX)QCKE@}{H+riS=B2 sNo@k2Ճ*oy/m l.ofM)q'pu{߼yQODŸZ,@CA-}ao?]rc޴\x-gTxA oJj^?Cc"dtFzXw<ʡ#Q"! #|Gj|^MGUqw3pisPl hm2YB_3R@;d zh@#XqӴ~]ޤ .9]H5GCxX ~H:7ٳo~%*=}uZ6+ތ@ԃ0'e*B>Dˡ#YAhH7? 4-}o9l$侁Rzznhrۡrږ"C,,`U4R3[wz,,67AAy?X*j}`c í3H! bzA,t.0*}g@A?^V6c,`aa,`=-cV^ ET,,,uV|uc c a,`aa,`Fφŕt6b cw'߅1:GQ;g mocd=?i3ODa0bQ,}]OahL0a mv!_!k ?O~:j_|ŋӋ}~:zgz6c}ڷe:bGE@L93!pz0P@Ϡ<+,XH˅tVSOty%B@c Q,zt$kL,‚V h cV ݽpXV L;7%֘ `!PIba=:B]7,|s@2B3'zC*+ J蜯#= Q(Pp>oCY"$ވd٠U.γ z  ` hcb!P bv|o!?){srwon?p ]cQ(P pf{ m,^?bqoCP5\~#͜{A? B@& !|1Il?b`W;rhS_?YzUU'5A;-SəT. R3&qL<9N|h#c!P iPZ,Ӳ P}tS[Ob!n^^}}C%Y\}ի7;V\/ǟU+SI =VÃ畎/+ð-X8 V W ñPBӿfN(%?=~ObSy=}ZM\=-9bͯN!.:~: & p|x<'/r/ODRYj˧WZ_>}YLXڿjO彻\}W "A R׻x3,L :+xOB0&`JZA՚B8-CXwlbV.kjg|<6 :^#.|qp43UIz [#ubב&!,81K%A90)vJbLiwp-:c!> _^&>zu kc++[Cr-|·2sx>@ڇ> ñ`2atZ,Z#`av?Xbh ? `P- Mv*i>s̓W;V5sc_l._̾l@1FbQm5N,_XX1qbtt4[?U1guV}ǰH>}[?j!dN Pn\9j!@){ppTxO{Pe^rS}Lt޸wW]F>gA͘V[ /܈˺iR"a62Rܰ,S&`Re&!0fa rd*fmckF'ZgǰLǓf^Fبٝ: ݸMb(,CW oJnRa1kY\)S"79|Z,UM}kg8ucr E&5*1L#Rw-l)X 0f*u ߇\S7aXixCm6Vi^(‘SB>*eD[tco[8^Ke9oAy/T%h7]Ҹz-!ͺe=3Srb&n =z| P}Hz' 5g|^S1 3/B{cah9\.%#JHg[b|ZpEn򹾠#qWǣp,ǓŨfPע̧XmBPj :"݁N[8hr!'5k|[ Ƅ bi`kls/,dr7Ob1om3{ ZzΝgO5;}+  80#[CZu?7fM˘)`A-\XS\_-|A"&PUh-t}jS[Ӧِ8K EoqeIGմZUc@x-<,vlg5mmp;' `!\(ћ'H Jv.8=qC_UY>;jG/pgceNha7Ze'Pn-ɼ,碜F6&2F+Ƭwi\mT |?Ͳ2igb_GX' ϩ}˥e c3t'-."b,TnyI = r3)lڇ6 5j;1,v !Ui09EY-,D*v.&0*ѡYS5v} X& 93XY}F̦;-rIlGVDtr$/q!DFfr#'#mG1nخ+ZEjjn ñsO>f_B>)!T׎Xm<ٖL٥2aDz^,&S2hN6[E2hXl)#Ƀ*㷣~ºgqQa@6ɼM,y\e_}\~qskxK3,Zi w=~rZ,@  Īe+Waǻ,Eת}w9T,Zywf=hWz+j:31n4߿ fQx-4)K}< >c{, E 36[\iXYӈ(D,6[ZHa"6C$50,vw&7fwU=B6hŃ+| ϥW$cC窼r,ɦ~\JӶ*K̴ z\![ vܿ`AkF-5㔥x>xPev2EhR[j-.Y0J#󑖦A26D9˾x;w š'X-c Gbi3^@ɹ/L%Ŗ\˦4Թb=NVvJ/tJA 1% 0|r+ ]Yϐʗ1j7i m@̴|ڤx>x2G߉lN6QqحE,+b `H],cl7·lb ͸MhZ 7My6r;h[;Ww\XXi{]0yw996`Vg5)YvSkyp(b<G/}|,iZ0׎Z~:fEQ8qEDNֺvDam Qn0Ef5阰MQ1ָh6h04d0L|XKK;n+lx 2.f t_dp]6ju,6cr4߻A;zG]01;nk"`ߜsy x>xM^DĹ)y{-fHs%nb'2Ehv X#egn{-vmPE1#UY v/8,vr]hXŜT-cb'FOP>LL!h%hX㋘ΥIަLlaˁ:v/eq!( Z3]BйO#wȹ͠_bAZ;Im]l7.W8zZ0Ք܏tzj!`/"{EbDmjh 9_c?˹"*W]|᛺a-خ:TrxXx>`zW,t~MO.ŦEsd`jj(+ClPtugnB1\J&- 0Xkbw?.1}K;BGҵ ]/ØRXaicPjΦ?X-L> 4,pFF5XV  k@v,5VZ`!Pik߆SɰЗpTH)J  Ŧ@E:ZZM'?gAω':ZZ}ޙtf;Slw麿3 MO_?  @o4   PW-y+/C,@AA,< [HT҆- PP?8[az nN7/f3-mXbivVA:@,@AA,y ϟON^ue._K PP [.v}>y}yNNtW`ʝH,@AA,@oa箝O_;B0FGvkkE 8.((0S8[瓮ן>XI۷ZMWVv_NPPP| 6̭Xp,k;#t:6 =Y{ur*,dPX5sRviq - qm6i{dǢN0Y7AR"ЭsԒun] ibۚJ7ՂҒ% K';7‚ud1 h6k1 Rul =-r N'vJ|`U: j wb|S`]Ub `XX ^ 4MV2 ч m(upe3;[t]aA-r/U 8R;AP 03Z:22؂ rD  8٭+{z'uE\T: j wL[Fܵ3q?  b~C թ/Q&l¶!s6ܤY}C@SN^h'σ5/d68⏪b3\f|2hI~Kܸ&?z8.S0/&G+^EOl WfO9A`04Y]=w(b}eLV#\Ez]Z0mW/LRJrW ~͸ y B$~}ͮK`Ac}`J-`GAZ!}2TRiFir\Q*7OX@`_-GU cᇁ$(ZK9%_ `AfXjڳ4SYjF{#(N%=޾vg|2 G5blA*1DA=f: bB X:{rͱ0Crϓ;&aY{iMk$)AHyam/,o%_=C]_Cki)]^r$%|!XV5c`xf z\o@Νcu{%,t}=y887SQ@}g,lXl@KR=hq`AcaBi6AfUt?frXhVSGYqӕce{_q;~} 9TA,|o,,bG^ɇù鋲Z/]Sq5)\ 4l$?pn/8j֌Fcl}#\DʫA5 2 "]=Xt[gu/u]"H4P |+SMYs0.젊 X׈T?..@z^/zo4kFMyiC 8qrnRsy__K9>?ri=ܺ=jݩDX93aJzecgMcc/vRkM k5x2W62Z~j4%յGA˚D170?:tB=3JЕ{vT[G ^X| sjq&ShAHa%u;&?V$N~Q{/HP ]4нBgenڌzcqr=qkf4@OMR5#@<\:'YcU6ևn: @qlg闝~=T wzr>A~AB=w+`܂:Uއ򢶾ʾq=M)3JaGN9CA,X(|675Z{$'8_N~ G\RDcMzx$4S58UڣN~NX 7ҙ!x5%> -Z}= A =O6dbmp [7 Tsi =D~",\rd_,/v N#9|T)SqTyiyR,烶'S``A}OT``aϕ۞g D cgo*U,4ɶHu``!\Bzr}1_P$rj˅O-um``!{~X/[ƻtɶ(H\],,^Yý{n |# 'ms|`r:"XqGf`3 jjٶJ, gB_hiqJ 0JɖKYLl^97)*,ٶ4pxeA,!ݽwoS/n.l)m]:S, lJ`[ %`gϖX_/NCفhH*eYuY^龞4X -gXݻ}s/4wyt`xx`_m |wT@"XXvkXڶ"ĺuՠZ+j!F>stT0+Rq4Cf~BI"c~=`2qnv*eafOr=ccctZ %:&VV&:!,JBD±Nj{|qo;j VpGzGϝJ:IR .OZҵh]T^X_L5X#Fsy6 2vv wnF4/r$߮,_l}-}/dJR'G{'M&eu~D"X(<{ v޽?*ޠU )% tkdztWdwrsʥsZ thdNKbo51(,<_oh+d D`Nz-gYyݻ,VX/;+T8׻E:.:р?==rez >G5p+ޖs놅X ;χ o9_3QdBJ 7`0VUD#pQnTPyZy4:&珥4US\,T^m,"-;<,5_m?d5 ]h)<5AW{RԈ9F-NNTYr&o>`A;1|xr΃ܹd_FFG/fEQ׌^522;11~y`Ź. _7{n3 `u敶Jc꫶_ʂSw#Fğfba$2MLȂxybs`>/$"Xؼ³g?W䨊(EZ]o(\wlq\X_˂u⟈`a3X[OM*+_P,UTG@:Sy=M5 `mdqAUMζ͙j"X?gl͹"Xʇ_,,BţXDzY XJ D  D  D  DP',_V~< , @D @D-,,znj D𰼈'XX g-<^e``sYQ(e@ f[%˲,BCY-Ս M@ y,BcOCu@T,6U9m o9 ڑ[DTA;Z,,,lyh@ D  @ăX"ڰ D @`E @@ @@ @@ @@ @@ @@ @@ |*ScZWѤ D0'D`!gB l^*S Dֳ.F<>15">`n5W/--n. xZc%:,FڗUP@ `&Xp/xVO:θ\,D<Yz`*6X=g>4Ts`!(7RN;X,|~{;;sA# C3_"X*B 0t(t&<" ?j?|`ikf-^4,gi,,6X.o/ߛծYx]گ`alЌ,,Qȅӛ,4'}{uw%U }u(~N`°P,Ȏ|$2{в(̙ ِ4=xg ;T ll,X , JٽkY ɸM|AՔU, ,{YUQ~4xۍq0t B|wY\ߗRyKXlǐ6,B, d:BawNowy󼔅@&s@< `$Ðe(^LtYwvܑ|:S=2RyO:_D"X5^|>*! YB`a Y0UI< jWKKM&Β !zVD*4J _!wXHuMڷ[Tv]GcFv:\S^tr#9**ѩ\*亙jKhK;v9<4B+ƒO~kn#}K p+.'./-`7sx cZ.N29y2 c J{awJY<X@;I*(/ch'Z~ / B`X@`, RB`, X ,  B`,_`!,Ck, V@{#X heX@,r`, n9 <6)e[c,­faj͜o-M`, n5 1Rnme v 7yhF'z$Ds -0Ē;_,+L:vurYAc~X c2ybDF[vqTwH~,栣tm3f!fYb( ;JhYwn8Bj*tt 6ܘšG;t޳ `£:* 'rsd;iF 纙||d3fݥ(X 7ʯp<]t4Y`۴˯7fqspDw cX \:m7ȫ,TlcyRse5EJEuf"f(l,a*X )N=V=mQpkp\C#4dɞ/ Qh5Y@,"R*wg/wxIUOت7H+lU0h5Y@,`9%fj< 's%A+B`77KW`hapQ?ZhpĬ``!ͱ ʾ-}nʹز2}, s,ϺMc˛7h`~vZgA*wkz}\[T~Ζylb) xg-%YKF^05}66|V=)?(YчhH`!ex2<ɢR k֌=BbZ@MX!vyh'wh'/<3+yEtWζ`x] ]D , +0 X,R$A`!X@,\D B`, X ,  B`aI ,  B`, X , BhX@, I2oG`!pKXKr.ގB` `!_5 `!KXgX@,2o?X `!S`a2U ‹V è"ahbi6挊Y&`,z,XC֏:CAKUp;َV<Bdgyrxd:"&H+péz~wV`,x,+og=d3bA9Q7,1Uu3#- .X  `x|)0+ v1vr B|'Kv'X(j%FfЭջN\n85-0 +XlcMXX~VT c0|0X­q0۾C,{͜RՓ4mdEnYtlT)ŠL$,5,UaSβ28vؙ*}?ԋ."Z\! /m㘅)}[ [S `aC`, c6{=Y8lw=AnX6p&X + `xAvh?+m[ XxPM%z鴯XH~Q~alɲ4yPXXX2<$eͲBpI G + |}JX@h `kem'n۴˯7L3J|0,"I,UBX %䧝 -JiV0QlN+N:]5*g4ZX`w3ͫ-rƂn 2~h~év6Kjﱯr>?=X@} ,b݊GuRhՓ4' 6*h>KE$ޫX- C l]A[oAiK.>mmN߻|Y -g`,0\`G#C xζ2#Anom90Z򣴖M/9]nPE ,zӫM9:Oߪbn8ڕ \QeΣ{-mkm~lKB`,B~ <~Ǐӏ2, n# ;OWyϪ'  `3?fNbޠ}Ғ XX;tJ! oeɋ;o2wY Xx씢7_xY<.ԯ$nHBo@:}ZpY1{}; !>yN~z ϟ|Y%#"; !;XãG=z@{;"sJq\eQq?[?[VFK$"/$͘ga , -aWrLaH^bqTGjsN%k%<[Q #/ga~ ɓ'/?]2SΟ\B Ά;<|β 2{QxKx|a*U=Is,\r`fB,N$zQk,|?w'ܩJ8w+vm) ۯ;X _xq aqҋ_ ^,28vؙjtEB,pr^̂಍`> + X j ( LdŅ,Iy YAl{=c!>}6ca%>*٧ô;go‚H;v}cpk$?g;&X `a~?gw:p@TQT1W7cA?^Ă{0da`C,dg?X_ߐ͏ ď;Py+vԗ5 Gց&G]|L-e? `)Z- [h@f_R{阔݊1B+qF4]#ycn- ?KwXbӃ B)Lсe7A2Ni;-=T`ν ;/x+'0r[զ[5,yn # mR8 qJ)_vd!߽=\)и5gen>,$eYpRBrSd!YhKR[CGG/ ЭQUHZe YCh)M! I,~º,+ ݩ顆驤? Ba/-YhSd= XdA & j P( pF2UYYWd,Ȃ,dËȂ,L͌\Me ŗY*Ie ]Ddak 1ؒˆ,BO,\*dkDa5@dY@dY@dY@dY@, , , , , , Ȃ, , , , , , MdY@2=34+PdY@Z@dY@dY@6v , , @fge8 ,d!d8 !lq!<3q2PX|) up~xTdlDYVCmdz7毡& MCX/d,Tva, DYYO!@JYNO=G$~to,PBe' Գ_gĕ., fgkY+]n{[EV-yiY&¹, б}<B@|7w7pg3@q,\ܸ1BYH_ 86&'3MY۶D) =΂BV% kˋ,[a0ӜcwM.ۼ<"$% V,7e!?oeŹ|-,BV^mƷK_vCdl+\7e7ue s >Ahp-Q cɖC16v,@xK~:$R^/_6U;ٙTefw:7,9ki8V'HE 1@Ee2z^3Grf r}9$D6r؞h3Aճ&%PT^H90ss1İ*hj(`nv5# f'ӣH:&In6z*9x&Y9xU%JQ~Fual)EO;s "O3>UCW)W/◼<A #}?$j= -@x,ՒոJ퉰I6iI6eҊy?dJm'>9I-OQ'0oiaom<I# vq3=[/^6f14ۭlt۝(hiֳm84hjfzM_SVퟲGvFe/(piVp%@ V`r] dғsC9q+zMZS$9cݚP}Pu>g2ȗhhX'#xP!~ D-\Z[ݖiv@ V`rG8z}\? ە>IB*dTbO<[t=t6ɒ,[E.hdsIč/8J|Bh —V!ZZ*ӂ$Kr(+,v =>ˢLv _F o=L5)LpO@uvg1H◽@heSH  h\+U&RۮKW'N GДhy!<2[PQ%~ D> +BV jGi*jnΫC/C7 aeTώBZ@ WC P+կHBd]p˨S7k[,[ھ9T&=(*=ⳣ +k"!`U9[#W$I9ZzY7H#m/1/S83A6NOE'_9_V 5Fۡ7Dn-zA?9nz$\:cw %/@*.{/hj|`\P-[kد{T^nWs5}NQ-m_ϩ* A2UY~i{*hث] r? <[._Y[mU!c$#s5V5#R`#"=_j=T%K/.աqSGZ@ ʵ* $h{:n R1M(6f.Ejגc;vEgu!fLJHh\xtŝjgTQ`ľ6-b~H V~3` /AT& 5>ǰjbg`Kϰ$O9[(_Y( V_W#ޱ#zIENDB`docker-1.10.3/docs/admin/b2d_volume_images/gparted2.png000066400000000000000000002136111267010174400226720ustar00rootroot00000000000000PNG  IHDR >4#PLTE򪦞Jiͦ[[\^^^鋇ZZY\w`{𸸸jjjHHH¾??>331fW000mmlлԹþUVVUWS$#",,+(('-664ghgMLKqqo}}{<<;ϖbb` CCBeecRRQEEDYXVޒxwuzzxuus:99@A@OON}ug]]Zzm777пx@SX98=?xvrEW޶^=gʭFdc=S˳spMFM5c]镍W̴zqdMi* ̨)$95eO,]e><ռ+s/lb_kIb^P`2'G}b>Ȟϗ=Z_`]9MItҷ$ǁ99XҾ[ɔ@@b\|}"ЮuտAc+Op"s7DcCt^[쐡ADIDATxZ_hzC"c:!…Zz/J II!q)(ԉUgj9փspYB 6*:NTqp߷qzjfr{v}jNqC?e8p '7suF9$! .Ov"ϴh 峟͂z=9SrKxUՐ0d6WĢְ %Lj[ŨθN0Dh<#VD?wGVzkG>>nlK;uwh~gy5K݄|t j n8co\_[~cjͦE-)*v<ǧ^*cVu!=Tbeߪ0'eڝaA׀vF}Tl/<, |ioA`UyDoV_nÿM G%;/mJG\U^ DQ`z7.c&up0 N%I& ^"PXg )"!p"EbynO/b_=igTeQc uSѐ hҒoOa*s<$dw5ub<+Z E_s$#~fE _c Q}t,J! i԰.,cOэXΗs^,oxwjOwO  ׮A"gg*Ytrɭܹt;8jmm.o֧k!Et.=;ِg/5r^ ,eZ'JPe>cIK8D7 hzS?K< h?@)ߌ?QʧJlM&۟2Opxԧwg&d# %|}ge?mY% >kڛ3#>Jo_z`JoO&x+wahyҿ L&U 6p~[33/}w.f2k~l n~@PXmʅp5,6 PCmDԼ."XIVDHR<Mr.T+ Ѿ1ocP'{Uf5^ s_[עBOH8 mzOcYX.ٓ*eX|ѿ\RCS@R"rBL?2UU 1[U|,5VGJ *8}w.%yL祍 P_2X@ȸ^{|.4w߽:>σf BKV>%V dB+2C!54Sf;?\ɼL| }*]@eѾoeߛ;\Swh{h=?.-U~!8FGffUx{n^n @oﶓ^C@-†>]#sQpHD;gg$3fefUFU奙g.b >-}p\F0 }w. \z%'oU kmp}}!h} ' PCBHDTCQqR&<EaڬJ1J#$G"/?I,OA& fQb|}Z-Dn o>6B#sde9~X&I9\!#wE=9ioZUkCxu#q’(~YlL\C}(3/B0e.˫© @@n6r4{A6s&a&\1sas\Q fS$|]eκ*|e2. FA ?{"#Т,p0b-t`jub(4U4 Ap.Yye(t!6EIj]?n3{X.8T1y߽V./|F[ga 2.a*(˿z`CCe3oZ;?Ǟ.V.LWAn^x*6?&Z*09X! OsωΜA8s|,.@]4:vP3BqsllЩZ-Q-&w@e˔s2N?p+6G B֣dw/[> s|c~Ӳ! ]0QudL^>pIYf̝xpcÀ'¥zPhEKƄui5l=%H8(d#RV$X.$v$ErN+~iUyF;|gォD 1mno.9|S6\;`!"?wYE>bhM ڡpgBd᦮(``n}s"^R)TXƥZ.E ѿ,.o l#|7tzL.`p0`Q͍qf`/_4&5<@:dVr) 5k ai4(T3>æ(?uC)٣?\2ǽTĵo/v ߩ_D"u6  5Ƹ [Pv$0OSA`6[761!{оoUØQJ1̀cxɖ`!H:4~6k։rk{S͠' X27Ͳ)/5ۚAj;ݲ ѿ$"4duZ (JSl3*\fp| ,|gC3_6dGi4 T*J&$#,bhyN*yՌԀ 4-<,UmJ16_3KyEw'{CE î;+gXx5}.)tDzo?<DJȣʓ31?⌔4%ҬL"9s-!Aho;'IW*€^s/ νlß8N sUEm@A 7։\b zKpIHNnW`NWDoLVNMms!",2ݫYF-e{AQ7@ (ST@!:S?92XǏj"?_6sߞr?<ƚ:ߖ+n|iLE0i$|7De/G?My`KVkU7Gd=_:KcPȟtM~MgAn$0H{g=srSX2Lw|f;CKǐ{o~뽫 .̣%ot f%8jb3o.d S\rC:k|zV\%^{% _Z!z+."{O@4>-–=y2E/g$vb<,?w_<]dm ?~ꉿ9y|hg: ȯO&rO(na}gAQh-ڏ0- s)~>iBGeT'Cl}S!P;$c px5f#5vfѯ`wd7Ћ|fS{Ae<.ڶ??xo ̤OJ3d de_ߚ ` 6kʕ$IuohbEx~ubL,bO(zpގ:|(:}pTT'x‹[ǥ<ҫޓ4ǽA1Wd*}n'# ,`*~ır 2Maǟ&'Ȇ : ν0[3 % 3n;7n<Εww,D/k$3&u;E?B+=lH~7|?~sAw܊.?~p蛹-Od;WE ;_鸊kӗoܸ R=d%񉫻r\ݽz"eOgi?z=ib쩚"83P1箭ت1TH(: -ĉ>lv@ Rk] fFpytf~3͠uzWgɓyXx#[luۋ&O)m?1z~=mVJsUEQE:fC$Z}lW&z CiIqԯzx{_$̵,~v_)G,N`MSD)n)iRZ7"[:&j>A3HollMZ}Βpq6)ai&jrzf h4?fԳeP$03j!t;(G뷀r]'A9M^ӏK|a-YxYy +FOl,mSB)$:t$jRv ᤲ>I#q=ZDMN$MRc(G=i݂9ʈF 'q0l:A!kb'O!n NQwCg'w_O'Eax#_ǽךN"=x}= :\.GSQBX0*Cea) n>nĥ @EjGR/v) 0ēIYNYro&<"3SA刺+wI=#٢(t]?_ε\ď+/i(_>pc?_`p}xPÄP&HBAP̲(ei2?MDJ&Ԯ̉;`Bܠ?=H̏M?n$.;r`q1$ƂGL~K/5  B:GTϕnEf*L @8zbt&- A~pAو0GQ2e^F.´,&6cӑnZ|>a^N `8Q`RNFb~;F$A#/pf42xmX$f"&b"F: A?ʺ dOhdfPn!dbڜI7'L}G uC#.JqYc8w{'C =IC>d{f0Lbwa- w5ηAkgo {M{S5Ofw;ed>LO9 =@TifA%0GCqֽ8 O m n5w䴟)+OvQ PIރA?_ov'4#k6ʄ lc[q`$ӌ, ђ h,k`rښZO0B3RgIm|1VkL0^d(70SVOţh0 ~h+E׼Q Mga ͦ_ˀAo6Af    AA2@4 6 y]A@da 2Zn&KU*1^.olW  hQN`GS8, ]^62e@mL:d`le@ TʓU8) Tyytt^(77vW7tܹ󚅅'!RBwDÔBEpڈZĹZ}Cܬݬ\)Tf2٣At@J3M_+FŦ\p2ˌ$e<`GW*rHڵVyCsɒ?M2 P"82@ z!ٍ@#H᫷{2R d(za Ol3~xw[D7b>Wg+6RY*FJFMhe3j$gd@Q2\o5(b AVW28/TWͣG F2 rOР#DXk{ḔQ(X3?7rĨ$K 1,VUc1dB5- 1l͇֘haf򵝝o<wdrBĴy>[m5Sfs2BJ3GGRGO"L{2n5w]@䁦ш/CG d/:L"S*wqӖӕX_}qo? %ֱ|f4SPSnc 8W7+$e|Tc\N2۴q.4mػ@P͖,tԘ zM/xіlߕr}V_|{O>!ɀ(d%rSfH$D83bG %B!kѱc@|7m{͘(A&Ȫ1QAA 6m?@o~Y2;^Y'/^| ?2o zHB uo @adAuV 8v?wsܯݹu _ zH!DuM[vGl{꟯k/ly@yȕ-FŜ=y)\3 "=d`b֬U# 8$Zټt&X -狗f5}}cc޺~a[,QL,MeP`d\F w㱔 Be5)!yB. xҞ~hz?P(%@BM^T&׋o8w|{oo '^n*fΤakQela2K\/eg3B8mػtaiBHj+9*mrLXciN]:vיk['l>TC޼[.7y#dBg<~,DXz-^ʕo) ]N w\(gd`h_ή7q,t]]A]]dgF##Veqъ"HFX^aKAQQQ-EVbg/f?UqvrrV*<9~8x΋ظ-z_fbbo/﹋`.ћj%ߨε~1>܆^sSujf5۟WAp>~Abք m?kgdzj1*b14##˵/~/^btp6y[p֦8_k+z/ .h~sw7b@D V6Xoe_yX*De`nſ~{vEoO M|7 {^{"[ ?1Ā϶uP$z[l[1x׳]֛xms]Jyd/*xʆKC>Kؿ2v78B 7 Qp%QD"]C0(=5j@M}b@&y4UjZHGCP#7#uy݅-J=ʗhx]5Rlz &b/6*ҧN5j@M}bĀjAZ j}9MeWD!;:zoŀ鰪NȆ/TW]Ä:5|b[:^,rWm{NV.~vǬO.ěC_G v]9qȜ+Z@װYsHxh|b5`l1ݴ;zSu#.`_ɲW֮|\Vz;\(WY@^ɲR;^FlIƒVkM)Y5]?08NI_ x .̎[jT%0Hlkw=!%%u0-@G_ -ci)Xfkh$b@9gE/l$+_)(%5&]5*Վn$c9|IHY1G'+Q]]oXŀ%P^>IN)*k~٫KrJb3GPDQvbf8$$u-j{yT1Z { PQr [(ؖy8/$b@݃;įӪ}g#bj݀d u]沥+Szc?;7ݮHx*5'V[H,-Z _9ͷZ$ZybM/^\"/#VKb5̼˛~URFqzԪWe"޻ѬtȱRF%ٖ{s5Hu] { gR+k"^6 tt4w45kWpt;If>|+,%sld.^mK?.M$8ڝ\rzbmM)7FI!5[G֐N4JIƮ9md[dNz>ѬtX۲/ek|w1u:^6 wdޝ$6p`iI gq,|55è_ Ӷc*}Yar.eFt\[y"oduUWY+7VHsS:CYO \ ؍ȈYSe~0.Dv\^&M7tkf$EIx=V9N'i4lI7}_1޳M-lŀ+LͮQb;~I˜y9ONJ 6{%iH4ݦx )^Eհ*1"lؖ& M BRz}-/#Nl=K9͐ǵc]uw;'4NTHYnC&1lY@NÍ(ܨx~\;~\Y_SUͿhrcDۉ)AH5;^#D81Z { PDubI[f ,=^i";.y ^'􃪾&,V iĀ:M!^,{.:k>jaWo7ȥjJ$˲,Kb@@}}cLgcMOk 0FlkâݘuD.[7E):2ep-x@^G {㈳E9Qd%CՖUYOt3tz9nŜ~{:ږ\w}} o.#NpK맜Vw,: OϋOK&e xxvHK1ŀ?淚o.]zHz0]ЪڧdTږWVeCS%0jvlidHGE /$%P J }[9Q{S'jL~i͹ݹd"eb)Ay s h2x,X_Ot6jW 4?_,533\ ?Zϲ9!/;:S |#݊G8z0[$bhE: q0H{z1T Ά =֗ (R5 Hȝ=k1l(z;@ v@)oPĨӤuޝ9@LbvtfǏ׋߻wfq_\ tFQY.RzD9A3 w 4޽"Q1% A"u_s0Pa[PapAB͘Ko`30r1|PZ,측`qŸn/.nCU [zo;a-(:O00 \Hzq&5w S3T" h`jRa s,@pgC"C?2ilY"z :cn XCw_ E (>Dh]u ^/s#WU_~eddkTKSp) SZ-㽦r7wdW* ! G F DfafSx0iq_NC.@\f,Dk gA4Ǩ0FmKn1SD$BOϧGCS9_PYDөxbmmu3 Tg돉8|P#ϏыYNA4`+\ݑvylv:-L-@y/?lj!  :hT2&e~OxAS,a. \?R1""f ց/xjry=40{s ]ƾvn@`f 2na@0vVQǏX0`P({$n[N~at: ](qvûy!^!jz-Ø]q :wK[ȹ5EO DW1ѳ0:.|9:;=eQ.11Q`ol?݀: PPhLT c rb"zCl92_/u RjuhD:cp2a spNo n",0r6PPHmzKPUf{{6 ޠ`PΞ@ ~Uݽ-,Q1[_`W-í(* N Ԙh~Egz׹;}1HUi>嫬ɃIefUv?7Nx6kZ;1Q#ttH1Qz \+<&rV$&{` G2<*RM) G["Q0cK.Did* @! gta(y~* D8 0Bb0@4pCT*%ґ6 + ˲ohרEc"9#_R &AIq0sKj>U1&p?3 FLxe]ᘈ |Lq(&rVjiiWoA^[um'/ QxԥnY f r@C'ۿXaZ 6`+z-$û# 7{ X{;9Y 0؀0hX0G K^Y4s(XQRR)e< qQA;;vʳc뿯 yᖋ*5&Q1=F%b"U}INNz<폁=! `YUc Gr}𰝏 B9uzBB-& `FBN7qA1F@VSq7 c8Xuf冁a~?ŏš ` >@k,cǏ%x&oj< j(ѥ5 {wanSԵkpub0 vwJ FDRp8H* #J G_cp.BDb" h$BӾ991Ax|-6=4a911@T䣤XjE8`LTXf !)*:w[77APH W1ћRa3$ᖉ5Hʈ`Hj]i9a`u<}ozX#:mS@6_D,JI j)0xtbUa9Ts1 'y4v)(*t _` 0`T2ґ6* daWq(9ށUFaa-gL$I6,kA)=ߜ p&_ PI5`BcU>}oUp >DV*7>/3{w LL_CGQoPGmbOW"&{f6/-Ux| < tB&AU.rSu 0g[q(x[p?}g/?*9,d0c0|s[3 x>  &`(?]}88EC00D' %t< + 760xTd Vbjnc(܄0  n5M=SǩQ%=~pbr$WD11^`E ØL֭phpbt Cb"8%P+ȧQX;6;˘ =/lܵL.-ФZ} 7YL MPvclwCoVz 0_޽x5bEIa\\E'<׊.+ذg*I Iڥx`j@c{0p0Tチ) UUt]w =&^ |#n€]7!hEIDN1r) rd`GYV!S_iWD"$$v? ]בY@0P:XaDzaV 0L@eNy:abFK8oꪌxȀl1P(% -^ֽLqiDO_eR Ӝ"A;.񂽣RHRJ5)%G&{#b6 @yγDV02gfɁϚ,~g?#cnb%"Xڭ"5ǐqڴOE#D>"9۵jjи]>&0 Du>PhN18Aae0Ddn|%ԙSXJa7 RYa@gu -_dwd3әX [L2Rr Ǿٻs-Lτb&{=a6eN5D&;7X't 7LV)7=>0) {DzsP LV#0VwàKLBE!.Sw,hQuWo}MgXE5鴯aS ڮ{}W$X C418s͕:g7 9USzlvlL(RM1 Ԭ՗d[2 *jd&ߜS%hfeS28xD&ߍ6ktd2Qo2}u.uFT, ~HBv1.㔉Q_jgԥbQWYa'&T&:{v 2GoAȲT$Rw8vAZIӣ. fOZ-^o޼ycwJղsӛm@ pj Rimv,<emU- ,lԈH2L\8NŅrU\"Qz}=Ul  t02:XUAc:3,L{`YeV۵px]б.Ti+gvYxpE;X80dЛhO`҅՛M6U=a1*% , w. H`R4c25ۊ1>FA)GvB`pG+|f~⏮Sʼx=/`: $0;l:rAc֒gںd4ˉ1[,5(j_h( Me9s<Ə Xf1NU7}>yN:vsߐ:W`0{nad4S׍L\WJF^+&^$K>+*I.B駷 0[& 2Hf8XtlðaGrYR[=GH2Pg/΅.4Y+\=4o^'0ຂl2W,_22b6ҕA:;WU/5hs\!d̲04F}@%08Kj gXK#? Aj;0Jl+7~۷CM74rߨ F0V,zSς+K0LX pLvlQx^qnUΝ= ]Hd {) +A(^j9%; .C~0hy \qÀ bǽ;n|+foS/׬Zfc1_ۖKZ @XV2bjP|bd"ckkO}/}Pv0h+B]F]WWFVO>)s1,LA&re/p2![Эh6ܿUU5M 2r3N`a Fl7#5& mUU3j])_-Cz(/eyre{5K-euΝ(ʇ3y+];$4`#n3 =:WJ /nF7#T*!_O ٤J>e,iLE؎dJV&;p!8e007HˋMK+hj.qkÇ\/բQ+X%Mq}Ɂ4A@H8NOjòQ[z5@fNUM$8޽ S& 85շ0xsxlKvMTJvR0c iif ,d$yt|a).P:Lpۗózy3d/tF}XZJW-OtY៤+>@<>0"[qxfMMcZfv5/'w ,cwG#4*SbѶS)S*b15\X$X@zar&Za5 M]O. ^&:sj'=ͿC;9RT{~Nj/<hSι20M11l7LjUN!>6+>__J%B碳V³>~8\g>v & t@s:QCszÉjub#"c'v\ 蕕F.15eYÖ5=RL>q`l71QdUfJdh;&i;:PrX5@IL ) 2Pb0H咽+xo~ 4<?xW"c^mTk=5 #렵w 'R VpS_9؛%&ƂT kPB(!¡n*=t(:m;ySg`{73gxG}LV$IQP-I²Cv Cƪ#2 o`DWi(PNŸ; ~|@FiJ2LM3pݱU; 0"#b{z1&B)S* F[ä́2f&3WJޤCi}|M-X6*FoY7H[*AO SI-DzFu4ԓ 4B堙9;Ugma4 4Qhl6lH G;60˂d09baA8m7f@UUd}'߉~W`Fs0c4wI@Xn?;v`hL$g3 $9 P#x fv֌55M! H0M)zVX]^~˸xp|Q^{<2Y!յr,fZGI߼<u B'ԎIz}Ny:[cH9# 6N v1:ŢʔP"tMV+~Q/|NZ<\"u.$:[pkOm8: &6 \qA8'IdZ1>PǝBVyt.`q[~{@A|exJ.$*d`䔤Ӵ1ƊWOLlvի#/Jyu{ռGKx'bxr&iVGA5].crL[Cp3Dc]ul߯ߚ4k")M`86];X-(WuA>Hewhȟng'4`:A+ wJ@Ƀ5ΐPɀVj/>;bhUrfƔ3qD܁L.mo/= 6⦎Xe%D ~hd|xa>\,VZ *t~̧d@TUUXa!™Fݒ7}Ԅx.EE5{,??2(1UȔؤ/qDu~{F&̋D"]6("Jeh쟙/)z!}@ 3ZQ>WZ9mO|IAӤQ3LrV.V=ki|2xmwvn_RxDV$X{ti髵 pz"}kǵlzxn}+@#.lȃ+^Lo9.P7C- !rEVeI2$.24*7N0P)%E5o`)R8 TɢZ3Ú(YSHÎ~2(]+k; 37'ڳZ Qݏx5[8gd9ið,s~8Hd^Ubn YsM![d" qd#ݏ 1U끆Yݤ3k 4>% ɦ vA 3?m ͙J-3 rW'WWX-I>ޖA#[K$q>z[KO [BS~h?.9.+W(TkQQ$=eV ;lk۫BwM]R1>#aÎ~3\b ~lcl3qU^ N0!P,"D)Sю( ?_VZb[$h/-š'ڽs]G [4 d*am`DS4KR]{20)Tֱ ,Ji~8O߾|MV?TavUБ7W; 7ESzwAKZfMfu}y pk.`ոOkCRK|OK;!?7P.)>@VwdU935C|S<"%EPz~qX==|R?d ڊV?Im >{a"T⒢%E "]}$z*, ʰU2HO MJ`DɊ{Ud@ZbƳW/%Qil2XY#l%o1 ?Wέf\ZhV\6ext2h n&?<2J?׺> 7rtjŻ"tYл#~ DUHspW3Қl?A6d'SqV&ЭY@9FEKdV*ׯah 4(ᗠE S >4l\y72>S jI |~%?y dz)t ʟ~PrX8ť4OX6(G,-!_o,NĤO"H@b+?&'i6STuE[; &| : RÚXr3M0Ƨdxa]MB l5!,߈z}+TFdj* &dڤ  Y~t-;~z0L AWf}O%}YMIR(04eb^s7NA98 kAyt ܥsمr˗9ڲQt4d`)u_{dgZҖZVUn@ kԥ;JE*_ q0kF:owF>gգZ45ۊV&meExLm.Y2[/ˇE{A )*O8a>ꁬAG).m=a\R՚ U܌dpb˥6A|J y6 &Y@|m?/ecHQXb{kq <28d0l6LΝ A"q'׵"#W&` LL=X3z!2sߝ3ثѶC .WdY7 'RNG@S)8b\oXqJpd4d~"a>~R?Ű1DU >|EvYgz~tT~~+yMPUIp2N-\9>ċ}!2qp`WK2Kb x Ef`E,mʂ~ZoHt]XfX{ΛV7.B 4]41 7%VH<;,V|?_B1&Sэb -!G㇃DӇ` Ld3L&iZY csX>-PT4T^{8sm@٥#dHq{I>MIXݠI',Ttmě/斗 #H ZIa""51X.PPX+)?v )҉@V+`U:Ѥ`/(%8әr G*Z 䌢 JNgn&8' \)kf@bF۽7gX6S6g_ma2^}%l-wl(}!ME-2X:/Saٲl:׿t8p6#p<8XĒLihSQX.wv3R2_Pepbl[=2S,8 q343@. ׽,;6Ϋrv#FPVWGY>Mع<+P*>:pΥ *Wnm4e0O7S~t^`g4)6>k쬪sξؖ9 CfʻZrmp쥫Cgٹsݖʹ9tfڟj Y% H:P@"iR]tH6kłJԊ7~ǞKE?aͶu*7𝓑5O5Y)Yp! c0X5l7D KENK'CbmE yvyJ-%[Tȯ%XT+Ri \ H.yㅅ_)F8&;Ż%3 E 1gfꈀX¦0;;,;dYp+nCԐRKDҾE"bc25+L- !q;ӏjjҗ* XmJӔ\#{Fl9aL)v.4VWE%J))YS/ݬHX@δI);1 ]K%[9 aW6aHer  $+T4fN'NZny| oۉ궓n%h% 1mҗ1p+{9)~Ah4Tz=k!hG&AW>Vz:ÚmU t۫m;iSȐ&@K5YŽ#Q#9z!;$MD'pz Wi~@ I ]S@VAA?:5ݲ[+Yn7F5ՈaCߘׇzmqrOy|s )~dfء RXܥ&|',$Cb|Tz-vN#p#Ɨ WMg ~՚P vSzX3۳% gpg@A Sav`HY HUM`.Tq)S͞聭 Ͽu^-kxϏל^V 0PN( !ji=52e0P Ɓ8zy |=[30A;[ NT*N Q85YDen`o`s( q:kdpBM]푁G\kCz.PV(# 2 bu% <2?JK,Wmg~ڸG]Z]$S`_^[i8h,mb[R"{EHԼoO(zl|Ci&yy|9g\@ 1pj^26҂ )]Nwbcbpc? wj^U pXr]Α r_;b x@=1)!1SBbp5O !~J1@ @ \O ҈R$'tb= e3oz1۟pN 7bWx󵆉71xoxu[7HܼE ۈW]^+v'>7?<7_o~ ~.+9111111111111111x'11a˟ Ţqstwwr@ bx"74u1Z&1hz:@ ϱzK=bqGH hv}c~:9s/ ~!j=ˬ4_Ji-EC'R+=]]դmsn]^Ie;^"zf-_Ov|'D܉nKt- q~_[l%1`%XWzg+ŭsf-]¦?Wo6c`L9J-zVsg`.a%*X<evZ_N72C٤_/.gE]v}9˓Qf웭#!ϱҹHnVh D"`> F7lg~SrbueQFΚ[j=}?97sLDVerP%bHiƔŒћOsd: 6`_VlDD33xգ)>17A < 8NnlkzOBH-ܸYwYϿvt|1\}ԣ@>"OG᧫kDrpOa}_Y's1sh 擻;*FwljZi@kC/vF@ H8Zx|DScC F3=%lUՍɽ>Gltll`Ĺ&bH3Zٮ' ȕףݸWLD+vk<:RTo֭>Y'{J{O p"x(t"p~@YZicʇGBHTLUutXx1s9kH3ωgRHآXgJ͗?R[UQY[*{Tu;52^+F9^rwo7_qO?T1ęb7kf;Vbsato:3 zt bóL^l7;1h9Nnl$ũ935fgzC|yR'˩krr౦+ c.OL+=Sb-&ΞLlKÓ8eX[Jۜ,~SRɡ`@?sBBjeRv'% X(ͤ܋F_3 ùNĪRTʯs­/ @0M>ZE_{<&z67b}#X"nSf;j_>;\Z UW :Ƒp{fSyga٨l)X_F<1[b ʘ&@H.vߛ; NV㮌ÚlmRm4:::O ja'XM}?zol!6Rp,6\~g :nt?d*8h70mVMC'Oʡ`AĭDq07h;}1уr(҅W0j; wvǐ!E\-TG#za4BfӇZ">8;kMrwn_:nFs%- H?u;tnt[W{ ;;Ab &/v}2Wh.c hw>{AeM_:GG+z6RGz;b0 bH#p=&Rkiu4s/88[׋gxsw1ˍLd_ICu{̪.Du~{ߑEp`n:g_@~`za ^vqc[ 7ϸ1h.Wx}7?m1 }yح ;W1111J?1*<=6k\ahz=|Ҍ{gԚ:΄ffOH.6N9N@$ $C@'UJVm-ڋ|L;}'\ĬZI>ّ};燈J#܇~#1#;oZ 7~#1#;oOk1_E&+FbdGvdb^po$FvdG|F1@ّ=HȎ(7#;#;ȎȎb~#1#;HȎ(7#;#;+l `/ d#"X=C1I>0ʈ ~x1!\f^>8 qHD1#"|vYD*,~Q P Ñbbb1g)Kgb2&1Hyǒ Вl" 0!X1,T1d,EotWkX4RzV-Ha&Q 2ث!X1,T19u0u0&ޢ욎R @ɐnDb 7 ,7~!we2j4bjl '>Ur,{,=EUhz3Vd6˼_W$ɗeSt=)3bY~Hym()>,Z.4Kg W b݋+z5}vEr~'yLIM{ͳ?Z9.Sƚ?/tS2lUo,X)on ۚhOȋ+V,x|jBΠ`/ MS G[75u\K9;O,Ō뼘)/fzgMzgbm}Sړ1a/u"׹pwy%5Oml&vMMGy0 S\, vb!ֵ$u`W]zLK18xuevi&;ȋ+c@<_=]Fr=ԳBՙ~(jAvPjsx{TV_L(d:7] d'-T+:|"ywQo:G Gje:0ފnڱcvb!ֳ:uhWԽm(O1}̧+do\ `v s$;\-fsGDFG!OgNY4 S: jϩͮ嬱,=- QGv(T_t^,5.z[̝(M0|NVtgm)c}`&[G1{!Ab~Q|be&"/0x{E 0;8c0! X $Oa/iIn&xrf^=MtV8aٓSZYmӬͤvwkU>]a7!QY =44!VM,Ir11J 6?u`W V0S ^_Hl}27Wޠc0;}mX[n\:ڴؽ7/ܖ4=!qSqO>''f$kY\,5\/EvlB }.U.PVֿP Ln 1U$uhW}[Ow!cK+IʼD‹EbɯN!gq͉$/n;n^'[ĀҴIc׉&">߬zJ c[cܹOYO1SeUأ=KR> Gb IT>Q>* 1eF9 ABED ;s$v" ۉ8d r!GdYzxdWuES=e61Q[5:4ثOgb7a@78bYa^g(CX~d 3i"JW7XtYO&/o3M99Yk즼AS/]=Ţk~Pڃr⛛, iKA:4ث/ѻU>:;KV-ի S#C_n'4JigHBEb&. ïJxMC0eCUֳ*,cdi6-,\B2ͺ]1HR:{5tG X @2lկ_jBȳ[ŕ*iI*28 ȯY7 V`6Ia"x1_?ܑ(:'|cVʴNq}kc G Ѣ_hzR(߳):((SCCC1pJaʵeojhh(eI7u0wmgo],&/ւ IJ9ԧʟRcC-d\C7|ZiK RHv6Q(}X(zve}0Op8f*Hy:/_>>O>p59YMN(ɥRXT-^6# V2ٲjrQMDƈj|V,2~ODAnOMrJD۟G}ƞO"9 Ibf5ҔdISDލYzīi(N5ΫY6Ѳ}o2@թ*)2贼p/FR*  "y~gH|j.pJ#9:?_Hjk!"LsxF" J=kNh9#bhS4u9|O J.tf$2Fé4-20{~z2GJbZ@Oz0Q'9pD'(t%<@ҒU,+VT7_ן|~AG.lIQJUXgD*[ x{؊e0QӗYA2xt\dH!?S]œ0-I #%[2H,փ oo7I{z;${({P>U+tuY9 y݇z%FDIn~-FX{ 5 I9 j7Z2hodvgFφBO𷿽O[tNaE) נRhF \%|ؤ(.c ri#he3U"ۻO>QTR&dfH[Qq_(ir#@dDJ*e$&2QX6FC[ׇkt : 'xϵChA:zi+8զ:u3'Fj[-KGVe_Tl2 U}YTF\%R.'0 M kJRNp ڣ2[N5yEMrkY~+l^/*JZ9W\{(G\2LUƴM [=6dPbx5d *q8]Nymn618 m9P :UL %"UeEfK/hրMβt8*OKĹP-&Sz44ZYdM|`}oDK_ͥ{D M3]҄zq@ Nʈލ oD҅B|SOƞ~WL :Šzԋ*c#bɜ=nw,t/ O"khL{@D:% f|ά=2ns#*zFl#d o Nu~_2|_{~y#psO'QJ n`6JHen3YS(lQ. KqCAW䒔({j8b; ֔V +OsfQй=2ak9 l=$0e0Q<z肳#뛙hyd֝'Eәz|M++Ш TCۙX6K/k҈Bo 9ҖC{8cGg7wFjקZG|k8%638$ѰѨ,tpOOBxX krCȇQTx.9Zt^ȦuF GI,vb%H9TzόjIzJvs#w0|P6ף[#A ^L O9L$'fc:A5B9o [ ofgǢ!\.g([d2 CA:ݜܛ@^srdٖE޻'Ha8Ly}D*WgFȮȀ\ GqQzii7{h {ց{kׅM۸=~c^sc-ޅʦ|nM2 qo0xEϺqYY^T*Fmh& s7pG#,3s1  Z޻fhaXtO͇7nG;_ETT0tecVRTpʩ76Uo=޴Y  ;ᅵ26+Wp})ZGSTF kuu`!㋋SQb h8NmN NazoFPhpph -)8 xhgh1&l4Qs2FwdZze2s4p(,- <~dy@td2 @ d2 @do62=Ξ vzn $'GO2.2p9zd2 @W^93ϼ|+~g^<㺣oKϞ|G/k~ʏ N@q?=Z`+qLءP*6v/ƀ!,K>_g zЃ4R?C7[Jxq]_s_s\;/3>Yb#)ڒ,KWME^uwn''S1zpk}Zm `?R3ky#id8Im|o*)xGo\7[o͚]D|rb`n蜺#iZCŬɫm|Ɋl*M..!\=!;_\<110݅V6;v:qn; =cY6`M b0N?kM,F4¡T l]11P[7oottՏ7~A˥Ril?u*S1xnPV Mb0d8 noyf;ǨLD@r9F{J7_9vJZEX l=KGPQ Q`wJ֭aԤyEK=v"IlWz)} w1H]U\-h:VP>{0W) x}A j1 *5Npq6D`0NC}ޔ\ &j!qBS^2P ~^|f{I+ZqMᖳ2B o0r]cC6HsZjPbw٪9muD|25Q^f&ޯ{^Zz_v0ǯ0-v\'e?29H0j&<B*y ~^Z_`S'1ZxL`F+E[<.1Po:FC$ l!cc JT޿ƪE?Zzu325Z3S`ױ &@~y+ Y7mJj緫у D0K!(XlaԤy>n2tmj')~Sc|?xQE GG*p*7F NڋQW))efJyU"0 Û)Y06"lỳH@@QT*g8/"u]NVǯÝqŀ.#Zw9gxAWg. כl(^%uDm63KQ)P *r! VB lSjgJ$9j8OW(P8'wD@%^K/%J/_UUnJ~!QW'8R̔6D`0ыIA֬qk'T2 1P&5s*/n5 C (wu10o:9!TY9d㪋Uc4W*Kmg;^}&5W,&LB{F+w6+ AH`SEK g'Ҹ(o|ш~rhت!6bdr;e&/Y'Кӝq6ᭉ0j<#u 9TVX䨡8s~.h VFx0W)(嫔 23*Sax? a,u[@ HV^;Uw8/Bg;~AbM`57 ŵŵgg ~wuv4 N,4nӘ}{S]9p:#݆dHĩgWl@ |r$* f 8.B+4 ^եʖ(M)+'6kq.1BP[[b"r]íމ1Q!!: `Y>^<{b6=l\Ec--MuJH)36D`0NE?&@/!1PXT *-#30㳕j'\2p~^ُ~FЄsA7gshcM~]9z,iR鋛C3{'8iIAk,Ϊsv(d4,81KSriC8J-0j<[~7'[1B.b/nHXX7(@K]B|G+u]Xxbr__%H)36D`0D.ʘz`uk'\2 2 ®.Ջ%7yg[Ͼk3kbxNý{~Wdn32Z169ֲ|3' wrNgqMJ"𹈭*{-T ]fud2 1dػ Q]ódnF;Y! I=6oފnaԤy:U1%u!S4Z^jnGoi`Ѐ 3[R^3\e@W?8R།.}1%L`R}̠HPb{?/؂a+7F4;o}ڹpc=r/L܍m܋w"^^>LFOfv߽m9[|t6i֠x\i2DpFdkdtHJƸ$8=4lgfP]XD(Mb޶FM'W!&(Z(${W ]b#% ݏqcU2ep FJ%q &A-P;|^X7 5{Ͻ![7S5{ 'pqQFj0r.X `p^E:T n%_x ן}:ńɃ;m/g n>O$̟P fcɝݳ4DL\t{=jif&k&׈;[.?q7hFCWPs#j%lvw7䳷ph{UI0yMŋzut'LjЪ6DM'YW,\׊Y+uk$v([~nFO ~ǟ@" Y8Ǩ ј1&.8w8/2tT V'5{ᅀhT ?]&e~\BJtf4f?;VnƵA~I3?&ܸx=$'еJFshKJGڢn7j&n Z ņ@Xq:m?QB"`ySIG`TC&`$yb Z$d" A? xjޭb"Uf񗖂Rfmt,`]9Z Q;"^Z G*lᒁ{=/CiM`=n;b@1Ěb3_ͽޣfܦX-PX*%z@޾mg ʿǟ&5߆Z?qt?FoRزL!c,!M~N3x:b59b5bm1,dH lIĦ.d4(H){UTF&-Ā4CMNv*3︛H)36D`01yPl0ʛΠH_-\2pap^Cy|Ǹ!$&jZ^}) 3w:۷W_ߕPx p ӕw=x0^1kNc#J?i젵$~hퟕU#}SmޗoyB ըCyC; 4z+ԝSO 5Qd:ScTHT KpB_Apӫ{)efpF)H18sB%qH QZN'L p~#Q ~d۰Q>~:Jci/2=H^؏ cㄿ D?/d?92拸oӜu>٩6HȝH5s(yKa[ˇm5z,A \>7U3J'FX$cQ$4^`Fˡ{0W~eǫ1]R1``fy]"0  125}P髰K" L@fg7yoIy'ϟ?yO$]F=O_o. |ŗi>KQ㡧(z_s=X s x[>}dl囌'ST_0oxp~#]i>@"Lt~Ew,k/Z"T!',ė!kVBM'K?0o;x[<,0f ~٫V ~J~OlpT+8R03ż.S:q)֣b].ar$" #}pQyn~vYGQ?r\Әa3i%]?%+<ݶe-aMS7 zV8 j/Bv܁"#d@yC`QxinÖo?𺃢o |plWGY5Q pĤBlY8|/M!s +%Z58|UbG f%q>jmPOX(a{;qvk?GD?{yP(GqOBtiLŕ2O2*2w57-wThWeQ-cv;/Ӹ~?=Wÿb_:#i?Zpi@7?[ ֺ aG᫓Ԥ?4.3T6Uhwg{M6CY1? 51PV̔3D ~\.Nm/gGOy^u%IG fqNFeXJgg{}6]Z:?kVFMH ?{Ӛʶ@REܡZw5aUQؒgQbIER6*x&M~;\p{U5ssVv2klV9ǟ1Jg5-EOb1ig >xy`ۋׂBv$`no H/'!;\E148&c2ׇyAqfr"$$pV DK=@2@22drVJWW@x,? d!@nҿٛ$Kd{iI\ydd ]2X U м4*@ g l+t'g?7 A⌈Dɀ:|R|J]:Hma yU*'w_H,y1GoR;Q~w@2@22dT &AjOR$wpD%,^N/Qx`[|d {lZ4k.mpN2`m1_ ~Og$x_ ɀ0fG"UA)F{ش e}h-LʰO(nX$*tӾ=Azc#dV ֐L2|F,š&?,%jȿL?VqoR/L%w3q'F&cW Җ&\T|tS?kL<~iߞ@Я&Bt`2,e=Xk eKM79,W! _H^*:i|::iEzK [{*5Oh*nR#8SLHHR"OA.`o-tLuy˞&B2@2=UUgB;+Ư SlMGͳ;8EcajjA,M}J4nǣa{~㾽/[ݠ`btY|P%Oi">~{M3KTR;N;*/8 U$O(>ϳH,XkM=4zZx]KQPa:)xxK5B 07/:Z ٌ߳ -HE\D/HtìKuT:<^ؑ+jMVabՆ[ym6}{$$?\YE)b]MR`bs[@?cr?WSeA>Hst7\ܞ$QFN<:*;ٵvtpY`Y@״|M¬R"83: .EmCygj^⫒ 8T#qCPm:M͵A`>тό%쒢khDk="*/2e~[ v5A$u,5J59C pd-D\5A8; dh\m2p>֏ҵJ&J-2nY ՕRda,9P10, 㵦q=H/nOv=ҩ}8 Rd}s~za8T#-l2ƥЅ -f!{E G/-ɔA` ;WO<85 <%>ޑ5oΊ/ϟ&g2<2oK}Z(TZhJv8HHy I{;?[4Zf;h,f4"=HDczpY>C)83rج6GNRP`~q5(Z8T#-Ǭdh㑢* Ak hkҕfNSlQo) Kw-\mZO Ezo8؇@2?2ȒܪD/~6B|p*;𬭥FVH|0\V[/B*>8RTv]?A d)ьGE&j=S!I$} bM{3N-4@ 0n1 E9𲤪2|0>5Z!8:ImVA*'2¶\mZOHA,uM+<dd@1x%A$: /Ɉ<.u)#R#En{]]q<)N~dZpr5AX|F.uQ>Sچqt-?Zh3N-4@ 0n1 E~wk g1nK}#|78QTz6H{GBdVm|j`H8W >4N<euh%jvxd??" i< F,e7'B9z t>~oݖbe`nyg;MD'|~&꺻LB}:W/NmLJkXU:[ zךS6--릉6 +ZCv>)a4i"(1iƒއ@232PY5 b8)gch*}iy#yCچj9|7-E'j`1)AuFBmit$vx3hhrb$j’$QβQMKLa3q|8$$NoNXl~j~-7>d}J< 拾_:KT`<ڀpM,x&:m5w&hJA_|fxg.bRPl3=Kgنq.f> 4iYsjc`YFSN!ZN>}oOdd 5Of2hEQ,y1QyŐOvny";qZ"Z3@4lHهXe2vt݋E@Ykwڭ1]Rp6}qʝXq$.jv[jmvƠТY6Y;'f>X wkRf DZ"Inyg%H O }{$$2&.C!Jbu#BŢK2Вs#{;N-5 6;cS`.x.6f6If&Gʛ`F ?3Ҵpͪ9ܩ*ٕD^1kO]}gهd uk+žAZʓOWaM(bjk)$^1jW+mQ#2,}LƅjDy.cE' xZ ]MY7B8 gHQPs5d8'j`1),a /k}3hg{J%UP0,ftߣ'  4/a2 *1;|5 @ dUx>Ӭ76ie|&qW^xqqwwW籖:-MA-D}>.ZH_ e0.a X%;jVçŲd@ 3IW}Bw^}z/s0IDZN3FG2 HH<'|Q}=f2blJk1wq|y)"$V_gQ%N8^>N@y@xd$^~Qr߷C`HHH2/$^~<-'#0ܾ!  eAػW/CFn9wVbo3  < /C$VV?+CdC= 7K_ѯ {A֣$劏&DTr)D$$BN}8t ~d| ͓GK/! Bb>J~2@2#  Hj,ȲǪEJ [[DSU$UT5YW^=n%~r0 ߒcMF)&h|t{rҥ%i4Ŧ4 |'5:^NlM-40:#0`vK3@&]w)Mޤ(: H2|UM?;!#c<ʨMN:*dv]z5rU;)s?)}<і.FX JţιU@ "~8 i4[ H|u< 3Huy!ϩ.ꜝsNi+ь/9qNΰk`l=fNoK+ ]ngIa #@ݶ]r^iVF {QQ K;% $.Io T*߯59wԒB  &H29"?[4Zf;h,f|fX{ U~Y0ٕ>;,fǎD9p%jN4]N\wH0#@ Bc~pUK@-4n+H=(A%3 47jQ(3*L/(Aw+zRD9hF#iKȠcsĤ/lnY#i0J>q5(Zl Yּ];&Eq֔xnaeFinAPöG:>0sӷmUUG_cdqfZ]2;izAÌjP3@&wƴLtjQΪ6HHH/ A2o _VH|Pl2 J Kf>Bz/u_ZM{[ˆz>iWMJג\kxt55$ɲdhJh}o׵f%Uu'Afk*:8fo4cQ-40:#{QQ 73 47 |@F! ^ i]2,M{XI!7n44YH>>M=#r,>["?X:?9.$}~]kM^, >r; 4l% qsǤ}oDGlyi:@2@2xIHѢ.R)MR;kGK}<9XaJPKD30gw~57Lp4 [FhēvotR$O$l%CCdC+ F}`#J?ƾP3`&wf;7vSGG! ^[ >Ŗ ƹq%V;5IGcN?ŕM@K9F:Aw$Mu6DF)WVΫo0#P~6?8»yd>(a3J?j^TvQKgkZ ^Hϩ:q.n֬aޙE-VK`D_2Ő9PomZScx{V<ԦoWҙl{2E^//H!yd>(a3J?j^TvQKg2kG $ @!:VjhNA1Libp ʝXRӋZ%A ?oţEz[ 5C ԙHf9[Na7u*cZ..^x:kGKW: n{bμH=(AM{ 4S dqGd IFه #)PnHR]qQ0agݕ ;RQ:˱$hrcŝ ;?#2:65C 2Y,wBkN;o]E"[ f~ÌrpKO2;pF}QV0&m1S dyG]T;dXL4KOxE1)N~Z.X2Fݓv!dT/ L ג{TXHxN'2 #- [4C >ڌ`)n; ^b#U]q ES]UX%PF {QQL%.Io%zao:j\ 2>EHaRŁGȪ*|Y˓-$$oH^2C-D}>.ZHH. x~'F̈́XD2JbwԬOd*A$v7d;ri7VddvamO{-{Z?q" .V_${!d{ƺqGw#bp=;Ђw'q# 8tv}5# 'bՠXTocRX͏81dDl jbѺ-AQl9`n?erz>[A%1kƀV [*QPWR+@ZIissk42OS_dJDPŀ3HG+6=)☈Ʈ5$b Lnb!\x!@#ĀeԀǠX,1(]\ZJѥ׏ԇj @O +,y< g11QߝlZI<&]7&(nf~}/Xwyǽ]n1@ !Yg_b(r3Ο!38Nev+=,Rɞ}ngGuwʼnt+1J|7n8B|7gg~K!R1?? kd u m kTȺ%`wEݫjxx_D^?C'SH.o hE(4,ʻOĻҧmm{HgC|=xm ZY Fó.x`+ <˔1@ =1`C5$r -.]+`$ho1N Vfi7O)1imer{y JCKob{b)K嫎Bɣy3M<D|=v_͖r5-]\_%plR"0g?h"7<$phbK3=N? μ~c"93M׀llHۿ[ZXYN;W`lE)Ldv_7tVi wʆD"e]?'bD7;>q/~߷6[Ѕؓg3zƧ=+ƛnã R/+e1h Ŧ{S1c넮'^x,&UAOiKKraW*tt$أCTdx;D@ĺ8l>*l ,ECD&{}7V~0aXGO_c#C/ GYVFN>·Ck]UNAS,;"b1r\RPa/īH~Ƴ6mdG@ߟ^g^c@2l!aW lY8.@`2)@`0AN'45 /;R(!@[ҫE;v ؝A1M6ENۇ|D_H@gN3o>&rn>gܥQ?g,ID 4>_;Wz v$V?v];A㘨89  }d Z;5uk4 خ=ܥz:qӟuLtk@|9I^0bbc"^GdAWuȗ9.ЧGGF:c8&8_f soMuolַ<>7:T ccv&87}4؛Pd1hte~cp``'k6-\t O1|vE  T)& gYc";m{YiU'mjkuD81Sbq]P*qPDg<(x=2{xU ][Z:*\iOR6?[=R[dRx>gKIٻN 1(ه}s‹|#SWuiهdrrieuș 䏐Y 6=ٲRnY)?ZYV]gәo?]ˈbc"àğؠ+## ku@#U1`cgR: B3a/O]Jj/`!bO,ۯ?}Гd1Ġ\,`}z)M}fF wy#yEuހ/Ŧ|fs1`-`= ~bݡߎėCOſta>?{q队8}D=S6l0EbmPP 矽Dڭ+GjWXK<+%5srho=c5e=+]]`&_Y]w~{Ybw DȪA},mK+Z&A F-݌=ޫ{g_~@ X#@{$],xHT셦liq5m& $*nA2{>uI!Mt]Rs+UKVa1 TM aρBcbc";x8v )+g~n+H}Y=}麝E1@ :1TU{XR:"%3PYXHTdZB!MS%>{B!XTU ]r1> VΧř-,hgg HI/{V7~|Ls& mrrpZi&EVӭaTjnDc5#XH@m -Y Q}) odl_9Ǐg='kzKى1q1 #"1 Ld! _k$1v 1Ȉ@ j;[s?1f?VO @ m8sZ:9ƞTO @ ājxXg4!1m8S ~TE \V ꜚTA \n1b@  1b@  1b@  1b@  1b@  1b@ < J q= s+ONd\ d18@ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @;|y2@F l';W;wiɚJ_j56M1>^j5RԞ@ V߃ojOU @u쫭]@ vƩS7VbՉGg_d9/O-|p(>@ @ @ l6>ac &߼y_f`s8Q{'kf3U|8Aln?7xzų<18aWƹ,B'ކnoP]A~+oO=?6ҽ'`87164?6夯 b`0%ӧ/st(\!zΙ(Hf߻|ΦynP:"mxO*-nqb `zoLy2m̔^0I>XjIe2=G\؛SqݚNzeԩDn%Z8,GZ@{־w;s1t|?O95onkHzkƠkutTFvkcp?u%1/ž 2v-毑R|x?7(6k_k;knA׻yZBʩƺ:ь/۰3.]](yzgqE͟,k))H_Pp7t 7KX_~ ,՟XFXڽ849{l Wz*WK€5=9`ʪ?xg˫Wfgc#7s2- t@r?uHW`cpc>w(q==ëKk#회}vT"7(϶r=ݵ乞d8(m~>|]:oKqODfg\V:z8|1mdv8\׋ pN9e̯^$zluptk ]z*}eO !:띮 1xE>gd9PH? Y?;p%զ޴Cؗr‘mZ-?UZ(Ec% fþX޺ݍ~{%k9:p}U݆]?SWnǠO62p0L.M<3Opj=Ġ^Wu4Qz9uZ[boƓ /Nal-jdJ/ޚ}f2rsQxY凫,=J_r)z,Nh՞] mbjowqޕ曛r Tt{l$`wŦ.ODf&[]bo-'X= C_zZ/^oY|d/OkzQNL_z|hU:ģ%B˓Ę> ᇉDNZڿ–:0>2:%+Ƀ@랲ZNe3vOdJ*oo=3+ ڊ7-6cPkcct(Rw1eCКֈ?p\`P\Z/̙'G'kEuVDFg,S"ֿ<'b?kfA(fK&ܥcY_4b_|/ YruًC%@V'd34s] Gu*=[LB7 U6%P ABϞ=6n7Ϳ}\A0ߪJյ{Ry{m3-ltt\ĆfƥclI{EIprfr#M&lՋk+G3S.lCLBUo<]?/m:P{V8\W:_V QcuOR \klݺ9w<컫W=>M›[]-Lf`y?Xє@ %0ٳ2177[͸ԕ{e]> .E\λtMBSkRklۑ? ^meu[N9"Dy[e:WרI6irzfA\ Jo.q˼L"74ᄎ~89,&殱|F]|kW:>9rN@h; WژvtwuDM Tb~gׯbW/ulC]4ڬ "P4Z'6z;VD20pNyW;zşk~?Hn9*df:SkH7J Q&7rYx,Lw}; -*S+/\mZh zH;&> 9{Ne k 5+@Yᠿ^/_|sh ACMCqX @<>YX}RX7'.ΎJ`n ,kh†VEg֟MinC^8!%淖P_;ٻSsc3R8˕߿z2>J Kvm.tLɕ:ׇ~_H8` ڿeo?ihoF 끜z(qB4&<I߼LL E]Һc.VW̄SSCk#-3.D`%pù{}}m= cPz@S߯+A]]UnlbF 7 ,_"Tփ/M**xOmͯ>@ۺL$/yA(p'.)2=ͭqmx+MCoYU:cn|ԛqz,4/L<~s>0iW+foޙCD.>t4"u|sf&W=ʦP]1z뭗j BsfrS=*ƌ!>A5w6߰nXr~jwC?B]D'kztGZk0zF#$V~l=t @ݶlorż@;h#ͮv[V+6þu쓋/Cw5+T4.{BlW.ZD֞8[\{kŲCVDdbt#~Z˼-2UX&Y.m@h6sb`]r~++O0. ! = G'.P+O8\l+^G[ίfwWԩR!P˝ 9*@5u~xl. ܢ0u.~qh{[Zm<4ڭɽ?RXWⵄ4'8e !5Ӻ7ߦk[_Q_YmYWg}؟eu>?[_e.\.MB^N])yM+Aš 96W<|L jj3{Y.5R18gg8V&5w2@ jdwb~:/p]_>y7N8X|˾ڂte"8F15u&bdy"m΋sS,DA*@!`?JSkmt?`u]Iwʲi3s7_ "U'bP?KD,/PM4C b@|;,K쌮0ɰl{շҬ(E-exbg3,+eyYX}7nb=Z*x+YDa^kv&zʶ8 q@ h^_ -n ˮ[6lmmdͼo,joü#pf|$AkpQof*yfmv=@tXl8 $//cŠ _ 7VQY\|_ gpde&j]'IĀG:NyrS fD1 7ΚIĀ d^zW2bK!= Ch8Hrq38o%_᡻<^o]uh9 s#yB-G!ZY8L B^@_ 1P1ML}46eXW<}/ldh6|tuI{&3I!HJЕy :5~YWo$b ECxgVhfw{:fvJCkAIjEFJX 7_I\q7ft- b^ @N_ldJr$ĀfB+b`7f^3=Zl@l%T :W@ $y$T ^Q>:$/x8?tx0}G1r"%Y[b ʟ9@\k+G6?vZ`z}`˙(99OS']'JVE']'ʁl.=@nɿysIENDB`docker-1.10.3/docs/admin/b2d_volume_images/verify.png000066400000000000000000000225551267010174400224730ustar00rootroot00000000000000PNG  IHDRF?)PLTE娨.l(_F ا !NᦦO|```WWV鯯⽖؈SQOֺGED666ɉؼKB?;jea着i0k闶GXժ1Booozyw f㝜r0ֺJŪ^\Y5Toڼ`7H,҇<̲ͅ˷,R"^ 3};׆?PH8ӹ,c~͘_'^}%PA&Y2}鉫Wwх7Qk4y˼6 *2|||1\mWm(c#SwГ>d= T*e_)b޶b]:pkIlFIɕﯴn0ciO-((}:H8Pc"4IDATx쎱0 D3ts$l{|33v[U$?pzNj9k}L+p< >̩X,Ё&OBPh)To@g 4cI ,8Q{enVRw8?]Nsf]ȷ4Qv5lMsH=f׬vHU!D % va kC?…IuׁlPan)tK;gf&&Wiww73MdÛO?sGÛ7WƠ>dϿ{ \Sx{39t(k492^|ܣ8\'ya>~uXߗ~HGm?:1}6IJR%:GB8Ka~&hs}=>F`vYգO+S_yRne=r}ʔ$lIնU7%۴ݘdCnYSGD0>~`YsEɪpy^j0eӷl5Wfma${00RmƑa,+ ]OٓQʨ^>gB^lM󬺻<8 XV'f>Sߗ~Vm,UVSYvI@/D%Nʐ`q . -$G~;4߼.?*?+.[)ҭlZ=TZU&e돛Ƃ}wכ42ZfeݮqgTlQ87R_{RY6eܶJsۥvb\ΫLY<K?H&d2,;"<)CN ta2WbK!;E͡Fc_}3۷r/Mi+o euW3I z^H W.ocf <,\5juSڸ766͍:IH#-m rkOjf1ϲVjl_ |&,9yߗ~HqEqybQݭ:3,;80:j'P;ze#*&x B0\DY,זLRQ)XՁŰQ쁤8Lp8ͫ-9̺uU{y^PD%rYg_(kW :SVc}̳wgnqNӎ<[:잒l1b)@9Ͽ/.RhTRE0.*ubhC 27T'8 er!ѷ:~s'2WJ1}"QY^vSwSbڭK #LsZa oium40)EM$Dx*҉Q,X o 4Ϣռ:AE`* whݔPIj2Ͽ/ O_{1kcaDUm,u>]{E8H;abf yt){<|7=, ?0?biag݃o{>Ql.Mz X,zVO9Z ~iu&9Ͽ/fY:>fldbFXuM4rVW,8 ɑ M y@h^?>ιTkRpusSBuIc?'I麹uzN?}XlvIJsB.&5X)UOе%I:":0R K?h#Xb IG4'd¹\hN𡱬c%m̀ L-ݠuuCk֎rKvTV*wTִƪ%Ҽe-ј98:j4}yAЯ,@*ꪥ9'ͪ7 4 ڑfwߏ~^ "D%ACXŽmaQQ',j"1tю[p#@#5XԴow賃O>bG7XD}tk֥DvnǏ[cG>>Cp,áIGP }"Ea!pN9ͅL '@?{~s^fXc,'in/ϿW?D Z^mqG$qEeK|.C m\ŎߓQ(sGtEh_Cs"PXaނeuLg؃&000q tO#,:?/( W'z*D= Cqͷqich*~5d\NJnAϠgp zL&3N.8AY3uy>ܤQ7?OLg~^ϠgkyQYҜܖ$);CRDנJ"șy9O:qG4ANE$lt><9OÓe"}iGDŽenw˦V lZ?g̈ψֳ7|Kh'3,H_"it*3=˙AמLߠkϓr>=˙Yu~L@ !S1,&Nn$Vii0!gB`Sa nB=#h9#UsFrFI9cZy4H ȡ^#t/ %Oa2cy'zw}D#;`}H}K}V+s7_?j<2x[?A VJ6}wJQDZT-BsE#+c[<]p7LiWUxD"[<ayv; Ovo\ؕv;qLZ,}m?7n$ Å],>%{M2C>:g'p`(r&tbpb/ګ2"9d5Tf?!B!Xy9 ~(!v&܄|,YWB=Dhj&kSM4 P`eU!>-Vz[֏sP:|G9g(0NL$_5i=țeZꨍ-r*S+* Qu}Ɓ<һ5u@4 g su+֭d3ɧ{R:]f꨽Ons d}֪FUϠBp~Ꞛ!*ya Kf_s|c f$MѵYClWPu}~>g-n<^&*fקҮ$No=uHB!B^}vKn@|E`k/m!>Vs5fG%n\ecр*t}x[:ϻnV0YyIܖΰghSy-%xѺحσK!B!B=򳡜n- Ů44XN439ʕ=ܛ,kz|ebg1<> kTӒf){6tW61iLc"H\A]U#>1ܟWߛ6A<gv آ"|u;2=:6 ZZJfgwGfY|GvPFv<"ӉM#Ysg]YCk<Ϛ|; }XD??$ft!>׭⳩gEvM7ogYu*>~(eϠ#ۅ~e}xk}q}W-KKϚjk&~w ?!DVB!B8VdW_y;nt9ۺӠ퐎jd{4z+tAtmK՘|69_Ν ozWurV_.qLeE]]΢9>곩g 4}կי[:ζs]ԘSU)b$AβUYPB!B! `k;@){Ns|>Ѳ?Y6C9?Y;6RbsSռZ7ޕOk}C!hڗ^K08>_j0c,zNK0Hyަv{ol׽kbgeq`9gのҜ5F}WvϏu1}'ic2kdCuj)M# >cyo\S}/+NgMQ_gXz]wgYmUo?Sm'd7cP[Q{%>]]g l^lg}o~Λ!9\R»*ҷ۝ӨJ]͗O<1~WAߺ _܍Rr-}!ܔrCR&V (cp,e\}ϑHwѰo_#Ñw܁mW?|BY&S%xutr lndAlm)zy>=~Pƶ9`T+ctP|~r>7qzG FA˜CUYuttXp>R]:^9seb[`T z\gi,w5}9g@+'tGst8t@ B!gϋ\V扚ϳHN Ϟ`߳gHEqӲTϞTHYxL?/ `hSi7<3gZ(%O,S' ,6a|6åY2?>)Y]ay,g#|'Hl?τ>&URb؆B!B! zTB*S#E֌LZ:>;P'̕0~{y,*Sǩ]C_g㳄0g]\smB!B! s˻7o.Q!]vӣ`̥!H,Z,+M7umoڬu\g!y]vlͥ 칔¼.P3,kY.}>Ò].@yϰh.u/]v:㳬Kܕ!,벫дg\G=Bt,x>¿C!B k~CHgR6kW/qܦ;]HssgFCB.ԿMjE jAT/JܦukQ=iuӦdS,)DY h({&0>,L&S|jG?n0qqyW^xl)nhGWoCshTtyr}jƳgE r\l?EMWcmzÙ38j.]A2ʖT&Xf"L&3<ߏBYTSmǫY}4>7wkTk(uv-HUvh/hNnSu*r\o=Ǣ=m[)_G<8f}&IKφT`34ˌ~z[ Ddl@c󧟢}}ӨPj/2AVwjӭꛝ\~qynљ0gg=e?Jص8 A! @hUYO_Yd?IShqiE 60ֹqD:.(}BGZ|~?1oh{I[Js)iˋ'#Ӌ`z/6f˯?/[Ɉ](eh 8ZjxV2i< I7M(9-O8Al8zےEVI K`,GˉP !sp8rh{TkG\iA@nRT2SA5z_Ƞ gwnw:o0]%rdn2@,G,Ǻ7:oY.帼yP7p {a4 bMm,ApU[x "NUU T8ZZӐ! |] |uj>  w%5rW z}^Y=矣+e/)=^>Ů®LDPgHb(G"N 6@Eyr %gX\j[%bֻ,:,e?HWI AI *䶼YPȺq=sBk -DpaPrA R u¸irګƇ8Y$B)}JNw}o6E@ \jҥ]l&*oyy*0f╗IENDB`docker-1.10.3/docs/admin/b2d_volume_resize.md000066400000000000000000000132401267010174400210260ustar00rootroot00000000000000 # Getting “no space left on device” errors with Boot2Docker? If you're using Boot2Docker with a large number of images, or the images you're working with are very large, your pulls might start failing with "no space left on device" errors when the Boot2Docker volume fills up. There are two solutions you can try. ## Solution 1: Add the `DiskImage` property in boot2docker profile The `boot2docker` command reads its configuration from the `$BOOT2DOCKER_PROFILE` if set, or `$BOOT2DOCKER_DIR/profile` or `$HOME/.boot2docker/profile` (on Windows this is `%USERPROFILE%/.boot2docker/profile`). 1. View the existing configuration, use the `boot2docker config` command. $ boot2docker config # boot2docker profile filename: /Users/mary/.boot2docker/profile Init = false Verbose = false Driver = "virtualbox" Clobber = true ForceUpgradeDownload = false SSH = "ssh" SSHGen = "ssh-keygen" SSHKey = "/Users/mary/.ssh/id_boot2docker" VM = "boot2docker-vm" Dir = "/Users/mary/.boot2docker" ISOURL = "https://api.github.com/repos/boot2docker/boot2docker/releases" ISO = "/Users/mary/.boot2docker/boot2docker.iso" DiskSize = 20000 Memory = 2048 CPUs = 8 SSHPort = 2022 DockerPort = 0 HostIP = "192.168.59.3" DHCPIP = "192.168.59.99" NetMask = [255, 255, 255, 0] LowerIP = "192.168.59.103" UpperIP = "192.168.59.254" DHCPEnabled = true Serial = false SerialFile = "/Users/mary/.boot2docker/boot2docker-vm.sock" Waittime = 300 Retries = 75 The configuration shows you where `boot2docker` is looking for the `profile` file. It also output the settings that are in use. 2. Initialize a default file to customize using `boot2docker config > ~/.boot2docker/profile` command. 3. Add the following lines to `$HOME/.boot2docker/profile`: # Disk image size in MB DiskSize = 50000 4. Run the following sequence of commands to restart Boot2Docker with the new settings. $ boot2docker poweroff $ boot2docker destroy $ boot2docker init $ boot2docker up ## Solution 2: Increase the size of boot2docker volume This solution increases the volume size by first cloning it, then resizing it using a disk partitioning tool. We recommend [GParted](http://gparted.sourceforge.net/download.php/index.php). The tool comes as a bootable ISO, is a free download, and works well with VirtualBox. 1. Stop Boot2Docker Issue the command to stop the Boot2Docker VM on the command line: $ boot2docker stop 2. Clone the VMDK image to a VDI image Boot2Docker ships with a VMDK image, which can't be resized by VirtualBox's native tools. We will instead create a VDI volume and clone the VMDK volume to it. 3. Using the command line VirtualBox tools, clone the VMDK image to a VDI image: $ vboxmanage clonehd /full/path/to/boot2docker-hd.vmdk /full/path/to/.vdi --format VDI --variant Standard 4. Resize the VDI volume Choose a size that will be appropriate for your needs. If you're spinning up a lot of containers, or your containers are particularly large, larger will be better: $ vboxmanage modifyhd /full/path/to/.vdi --resize 5. Download a disk partitioning tool ISO To resize the volume, we'll use [GParted](http://gparted.sourceforge.net/download.php/). Once you've downloaded the tool, add the ISO to the Boot2Docker VM IDE bus. You might need to create the bus before you can add the ISO. > **Note:** > It's important that you choose a partitioning tool that is available as an ISO so > that the Boot2Docker VM can be booted with it.


6. Add the new VDI image In the settings for the Boot2Docker image in VirtualBox, remove the VMDK image from the SATA controller and add the VDI image. 7. Verify the boot order In the **System** settings for the Boot2Docker VM, make sure that **CD/DVD** is at the top of the **Boot Order** list. 8. Boot to the disk partitioning ISO Manually start the Boot2Docker VM in VirtualBox, and the disk partitioning ISO should start up. Using GParted, choose the **GParted Live (default settings)** option. Choose the default keyboard, language, and XWindows settings, and the GParted tool will start up and display the VDI volume you created. Right click on the VDI and choose **Resize/Move**. 9. Drag the slider representing the volume to the maximum available size. 10. Click **Resize/Move** followed by **Apply**. 11. Quit GParted and shut down the VM. 12. Remove the GParted ISO from the IDE controller for the Boot2Docker VM in VirtualBox. 13. Start the Boot2Docker VM Fire up the Boot2Docker VM manually in VirtualBox. The VM should log in automatically, but if it doesn't, the credentials are `docker/tcuser`. Using the `df -h` command, verify that your changes took effect. You're done! docker-1.10.3/docs/admin/cfengine_process_management.md000066400000000000000000000143071267010174400231240ustar00rootroot00000000000000 # Process management with CFEngine Create Docker containers with managed processes. Docker monitors one process in each running container and the container lives or dies with that process. By introducing CFEngine inside Docker containers, we can alleviate a few of the issues that may arise: - It is possible to easily start multiple processes within a container, all of which will be managed automatically, with the normal `docker run` command. - If a managed process dies or crashes, CFEngine will start it again within 1 minute. - The container itself will live as long as the CFEngine scheduling daemon (cf-execd) lives. With CFEngine, we are able to decouple the life of the container from the uptime of the service it provides. ## How it works CFEngine, together with the cfe-docker integration policies, are installed as part of the Dockerfile. This builds CFEngine into our Docker image. The Dockerfile's `ENTRYPOINT` takes an arbitrary amount of commands (with any desired arguments) as parameters. When we run the Docker container these parameters get written to CFEngine policies and CFEngine takes over to ensure that the desired processes are running in the container. CFEngine scans the process table for the `basename` of the commands given to the `ENTRYPOINT` and runs the command to start the process if the `basename` is not found. For example, if we start the container with `docker run "/path/to/my/application parameters"`, CFEngine will look for a process named `application` and run the command. If an entry for `application` is not found in the process table at any point in time, CFEngine will execute `/path/to/my/application parameters` to start the application once again. The check on the process table happens every minute. Note that it is therefore important that the command to start your application leaves a process with the basename of the command. This can be made more flexible by making some minor adjustments to the CFEngine policies, if desired. ## Usage This example assumes you have Docker installed and working. We will install and manage `apache2` and `sshd` in a single container. There are three steps: 1. Install CFEngine into the container. 2. Copy the CFEngine Docker process management policy into the containerized CFEngine installation. 3. Start your application processes as part of the `docker run` command. ### Building the image The first two steps can be done as part of a Dockerfile, as follows. FROM ubuntu MAINTAINER Eystein Måløy Stenberg RUN apt-get update && apt-get install -y wget lsb-release unzip ca-certificates # install latest CFEngine RUN wget -qO- http://cfengine.com/pub/gpg.key | apt-key add - RUN echo "deb http://cfengine.com/pub/apt $(lsb_release -cs) main" > /etc/apt/sources.list.d/cfengine-community.list RUN apt-get update && apt-get install -y cfengine-community # install cfe-docker process management policy RUN wget https://github.com/estenberg/cfe-docker/archive/master.zip -P /tmp/ && unzip /tmp/master.zip -d /tmp/ RUN cp /tmp/cfe-docker-master/cfengine/bin/* /var/cfengine/bin/ RUN cp /tmp/cfe-docker-master/cfengine/inputs/* /var/cfengine/inputs/ RUN rm -rf /tmp/cfe-docker-master /tmp/master.zip # apache2 and openssh are just for testing purposes, install your own apps here RUN apt-get update && apt-get install -y openssh-server apache2 RUN mkdir -p /var/run/sshd RUN echo "root:password" | chpasswd # need a password for ssh ENTRYPOINT ["/var/cfengine/bin/docker_processes_run.sh"] By saving this file as Dockerfile to a working directory, you can then build your image with the docker build command, e.g., `docker build -t managed_image`. ### Testing the container Start the container with `apache2` and `sshd` running and managed, forwarding a port to our SSH instance: $ docker run -p 127.0.0.1:222:22 -d managed_image "/usr/sbin/sshd" "/etc/init.d/apache2 start" We now clearly see one of the benefits of the cfe-docker integration: it allows to start several processes as part of a normal `docker run` command. We can now log in to our new container and see that both `apache2` and `sshd` are running. We have set the root password to "password" in the Dockerfile above and can use that to log in with ssh: ssh -p222 root@127.0.0.1 ps -ef UID PID PPID C STIME TTY TIME CMD root 1 0 0 07:48 ? 00:00:00 /bin/bash /var/cfengine/bin/docker_processes_run.sh /usr/sbin/sshd /etc/init.d/apache2 start root 18 1 0 07:48 ? 00:00:00 /var/cfengine/bin/cf-execd -F root 20 1 0 07:48 ? 00:00:00 /usr/sbin/sshd root 32 1 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start www-data 34 32 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start www-data 35 32 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start www-data 36 32 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start root 93 20 0 07:48 ? 00:00:00 sshd: root@pts/0 root 105 93 0 07:48 pts/0 00:00:00 -bash root 112 105 0 07:49 pts/0 00:00:00 ps -ef If we stop apache2, it will be started again within a minute by CFEngine. service apache2 status Apache2 is running (pid 32). service apache2 stop * Stopping web server apache2 ... waiting [ OK ] service apache2 status Apache2 is NOT running. # ... wait up to 1 minute... service apache2 status Apache2 is running (pid 173). ## Adapting to your applications To make sure your applications get managed in the same manner, there are just two things you need to adjust from the above example: - In the Dockerfile used above, install your applications instead of `apache2` and `sshd`. - When you start the container with `docker run`, specify the command line arguments to your applications rather than `apache2` and `sshd`. docker-1.10.3/docs/admin/chef.md000066400000000000000000000032531267010174400163170ustar00rootroot00000000000000 # Using Chef > **Note**: > Please note this is a community contributed installation path. ## Requirements To use this guide you'll need a working installation of [Chef](https://www.chef.io/). This cookbook supports a variety of operating systems. ## Installation The cookbook is available on the [Chef Supermarket](https://supermarket.chef.io/cookbooks/docker) and can be installed using your favorite cookbook dependency manager. The source can be found on [GitHub](https://github.com/someara/chef-docker). Usage ----- - Add ```depends 'docker', '~> 2.0'``` to your cookbook's metadata.rb - Use resources shipped in cookbook in a recipe, the same way you'd use core Chef resources (file, template, directory, package, etc). ```ruby docker_service 'default' do action [:create, :start] end docker_image 'busybox' do action :pull end docker_container 'an echo server' do repo 'busybox' port '1234:1234' command "nc -ll -p 1234 -e /bin/cat" end ``` ## Getting Started Here's a quick example of pulling the latest image and running a container with exposed ports. ```ruby # Pull latest image docker_image 'nginx' do tag 'latest' action :pull end # Run container exposing ports docker_container 'my_nginx' do repo 'nginx' tag 'latest' port '80:80' binds [ '/some/local/files/:/etc/nginx/conf.d' ] host_name 'www' domain_name 'computers.biz' env 'FOO=bar' subscribes :redeploy, 'docker_image[nginx]' end ``` docker-1.10.3/docs/admin/configuring.md000066400000000000000000000225751267010174400177340ustar00rootroot00000000000000 # Configuring and running Docker on various distributions After successfully installing Docker, the `docker` daemon runs with its default configuration. In a production environment, system administrators typically configure the `docker` daemon to start and stop according to an organization's requirements. In most cases, the system administrator configures a process manager such as `SysVinit`, `Upstart`, or `systemd` to manage the `docker` daemon's start and stop. ### Running the docker daemon directly The `docker` daemon can be run directly using the `docker daemon` command. By default it listens on the Unix socket `unix:///var/run/docker.sock` $ docker daemon INFO[0000] +job init_networkdriver() INFO[0000] +job serveapi(unix:///var/run/docker.sock) INFO[0000] Listening for HTTP on unix (/var/run/docker.sock) ... ... ### Configuring the docker daemon directly If you're running the `docker` daemon directly by running `docker daemon` instead of using a process manager, you can append the configuration options to the `docker` run command directly. Other options can be passed to the `docker` daemon to configure it. Some of the daemon's options are: | Flag | Description | |-----------------------|-----------------------------------------------------------| | `-D`, `--debug=false` | Enable or disable debug mode. By default, this is false. | | `-H`,`--host=[]` | Daemon socket(s) to connect to. | | `--tls=false` | Enable or disable TLS. By default, this is false. | Here is a an example of running the `docker` daemon with configuration options: $ docker daemon -D --tls=true --tlscert=/var/docker/server.pem --tlskey=/var/docker/serverkey.pem -H tcp://192.168.59.3:2376 These options : - Enable `-D` (debug) mode - Set `tls` to true with the server certificate and key specified using `--tlscert` and `--tlskey` respectively - Listen for connections on `tcp://192.168.59.3:2376` The command line reference has the [complete list of daemon flags](../reference/commandline/daemon.md) with explanations. ## Ubuntu As of `14.04`, Ubuntu uses Upstart as a process manager. By default, Upstart jobs are located in `/etc/init` and the `docker` Upstart job can be found at `/etc/init/docker.conf`. After successfully [installing Docker for Ubuntu](../installation/linux/ubuntulinux.md), you can check the running status using Upstart in this way: $ sudo status docker docker start/running, process 989 ### Running Docker You can start/stop/restart the `docker` daemon using $ sudo start docker $ sudo stop docker $ sudo restart docker ### Configuring Docker The instructions below depict configuring Docker on a system that uses `upstart` as the process manager. As of Ubuntu 15.04, Ubuntu uses `systemd` as its process manager. For Ubuntu 15.04 and higher, refer to [control and configure Docker with systemd](systemd.md). You configure the `docker` daemon in the `/etc/default/docker` file on your system. You do this by specifying values in a `DOCKER_OPTS` variable. To configure Docker options: 1. Log into your host as a user with `sudo` or `root` privileges. 2. If you don't have one, create the `/etc/default/docker` file on your host. Depending on how you installed Docker, you may already have this file. 3. Open the file with your favorite editor. ``` $ sudo vi /etc/default/docker ``` 4. Add a `DOCKER_OPTS` variable with the following options. These options are appended to the `docker` daemon's run command. ``` DOCKER_OPTS="-D --tls=true --tlscert=/var/docker/server.pem --tlskey=/var/docker/serverkey.pem -H tcp://192.168.59.3:2376" ``` These options : - Enable `-D` (debug) mode - Set `tls` to true with the server certificate and key specified using `--tlscert` and `--tlskey` respectively - Listen for connections on `tcp://192.168.59.3:2376` The command line reference has the [complete list of daemon flags](../reference/commandline/daemon.md) with explanations. 5. Save and close the file. 6. Restart the `docker` daemon. ``` $ sudo restart docker ``` 7. Verify that the `docker` daemon is running as specified with the `ps` command. ``` $ ps aux | grep docker | grep -v grep ``` ### Logs By default logs for Upstart jobs are located in `/var/log/upstart` and the logs for `docker` daemon can be located at `/var/log/upstart/docker.log` $ tail -f /var/log/upstart/docker.log INFO[0000] Loading containers: done. INFO[0000] docker daemon: 1.6.0 4749651; execdriver: native-0.2; graphdriver: aufs INFO[0000] +job acceptconnections() INFO[0000] -job acceptconnections() = OK (0) INFO[0000] Daemon has completed initialization ## CentOS / Red Hat Enterprise Linux / Fedora As of `7.x`, CentOS and RHEL use `systemd` as the process manager. As of `21`, Fedora uses `systemd` as its process manager. After successfully installing Docker for [CentOS](../installation/linux/centos.md)/[Red Hat Enterprise Linux](../installation/linux/rhel.md)/[Fedora](../installation/linux/fedora.md), you can check the running status in this way: $ sudo systemctl status docker ### Running Docker You can start/stop/restart the `docker` daemon using $ sudo systemctl start docker $ sudo systemctl stop docker $ sudo systemctl restart docker If you want Docker to start at boot, you should also: $ sudo systemctl enable docker ### Configuring Docker For CentOS 7.x and RHEL 7.x you can [control and configure Docker with systemd](systemd.md). Previously, for CentOS 6.x and RHEL 6.x you would configure the `docker` daemon in the `/etc/sysconfig/docker` file on your system. You would do this by specifying values in a `other_args` variable. For a short time in CentOS 7.x and RHEL 7.x you would specify values in a `OPTIONS` variable. This is no longer recommended in favor of using systemd directly. For this section, we will use CentOS 7.x as an example to configure the `docker` daemon. To configure Docker options: 1. Log into your host as a user with `sudo` or `root` privileges. 2. Create the `/etc/systemd/system/docker.service.d` directory. ``` $ sudo mkdir /etc/systemd/system/docker.service.d ``` 3. Create a `/etc/systemd/system/docker.service.d/docker.conf` file. 4. Open the file with your favorite editor. ``` $ sudo vi /etc/systemd/system/docker.service.d/docker.conf ``` 5. Override the `ExecStart` configuration from your `docker.service` file to customize the `docker` daemon. To modify the `ExecStart` configuration you have to specify an empty configuration followed by a new one as follows: ``` [Service] ExecStart= ExecStart=/usr/bin/docker daemon -H fd:// -D --tls=true --tlscert=/var/docker/server.pem --tlskey=/var/docker/serverkey.pem -H tcp://192.168.59.3:2376 ``` These options : - Enable `-D` (debug) mode - Set `tls` to true with the server certificate and key specified using `--tlscert` and `--tlskey` respectively - Listen for connections on `tcp://192.168.59.3:2376` The command line reference has the [complete list of daemon flags](../reference/commandline/daemon.md) with explanations. 6. Save and close the file. 7. Flush changes. ``` $ sudo systemctl daemon-reload ``` 8. Restart the `docker` daemon. ``` $ sudo systemctl restart docker ``` 9. Verify that the `docker` daemon is running as specified with the `ps` command. ``` $ ps aux | grep docker | grep -v grep ``` ### Logs systemd has its own logging system called the journal. The logs for the `docker` daemon can be viewed using `journalctl -u docker` $ sudo journalctl -u docker May 06 00:22:05 localhost.localdomain systemd[1]: Starting Docker Application Container Engine... May 06 00:22:05 localhost.localdomain docker[2495]: time="2015-05-06T00:22:05Z" level="info" msg="+job serveapi(unix:///var/run/docker.sock)" May 06 00:22:05 localhost.localdomain docker[2495]: time="2015-05-06T00:22:05Z" level="info" msg="Listening for HTTP on unix (/var/run/docker.sock)" May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="+job init_networkdriver()" May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="-job init_networkdriver() = OK (0)" May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="Loading containers: start." May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="Loading containers: done." May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="docker daemon: 1.5.0-dev fc0329b/1.5.0; execdriver: native-0.2; graphdriver: devicemapper" May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="+job acceptconnections()" May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="-job acceptconnections() = OK (0)" _Note: Using and configuring journal is an advanced topic and is beyond the scope of this article._ docker-1.10.3/docs/admin/dsc.md000066400000000000000000000124461267010174400161670ustar00rootroot00000000000000 # Using PowerShell DSC Windows PowerShell Desired State Configuration (DSC) is a configuration management tool that extends the existing functionality of Windows PowerShell. DSC uses a declarative syntax to define the state in which a target should be configured. More information about PowerShell DSC can be found at [http://technet.microsoft.com/en-us/library/dn249912.aspx](http://technet.microsoft.com/en-us/library/dn249912.aspx). ## Requirements To use this guide you'll need a Windows host with PowerShell v4.0 or newer. The included DSC configuration script also uses the official PPA so only an Ubuntu target is supported. The Ubuntu target must already have the required OMI Server and PowerShell DSC for Linux providers installed. More information can be found at [https://github.com/MSFTOSSMgmt/WPSDSCLinux](https://github.com/MSFTOSSMgmt/WPSDSCLinux). The source repository listed below also includes PowerShell DSC for Linux installation and init scripts along with more detailed installation information. ## Installation The DSC configuration example source is available in the following repository: [https://github.com/anweiss/DockerClientDSC](https://github.com/anweiss/DockerClientDSC). It can be cloned with: $ git clone https://github.com/anweiss/DockerClientDSC.git ## Usage The DSC configuration utilizes a set of shell scripts to determine whether or not the specified Docker components are configured on the target node(s). The source repository also includes a script (`RunDockerClientConfig.ps1`) that can be used to establish the required CIM session(s) and execute the `Set-DscConfiguration` cmdlet. More detailed usage information can be found at [https://github.com/anweiss/DockerClientDSC](https://github.com/anweiss/DockerClientDSC). ### Install Docker The Docker installation configuration is equivalent to running: ``` apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys\ 36A1D7869245C8950F966E92D8576A8BA88D21E9 sh -c "echo deb https://apt.dockerproject.org/repo ubuntu-trusty main\ > /etc/apt/sources.list.d/docker.list" apt-get update apt-get install docker-engine ``` Ensure that your current working directory is set to the `DockerClientDSC` source and load the DockerClient configuration into the current PowerShell session ```powershell . .\DockerClient.ps1 ``` Generate the required DSC configuration .mof file for the targeted node ```powershell DockerClient -Hostname "myhost" ``` A sample DSC configuration data file has also been included and can be modified and used in conjunction with or in place of the `Hostname` parameter: ```powershell DockerClient -ConfigurationData .\DockerConfigData.psd1 ``` Start the configuration application process on the targeted node ```powershell .\RunDockerClientConfig.ps1 -Hostname "myhost" ``` The `RunDockerClientConfig.ps1` script can also parse a DSC configuration data file and execute configurations against multiple nodes as such: ```powershell .\RunDockerClientConfig.ps1 -ConfigurationData .\DockerConfigData.psd1 ``` ### Images Image configuration is equivalent to running: `docker pull [image]` or `docker rmi -f [IMAGE]`. Using the same steps defined above, execute `DockerClient` with the `Image` parameter and apply the configuration: ```powershell DockerClient -Hostname "myhost" -Image "node" .\RunDockerClientConfig.ps1 -Hostname "myhost" ``` You can also configure the host to pull multiple images: ```powershell DockerClient -Hostname "myhost" -Image "node","mongo" .\RunDockerClientConfig.ps1 -Hostname "myhost" ``` To remove images, use a hashtable as follows: ```powershell DockerClient -Hostname "myhost" -Image @{Name="node"; Remove=$true} .\RunDockerClientConfig.ps1 -Hostname $hostname ``` ### Containers Container configuration is equivalent to running: ``` docker run -d --name="[containername]" -p '[port]' -e '[env]' --link '[link]'\ '[image]' '[command]' ``` or ``` docker rm -f [containername] ``` To create or remove containers, you can use the `Container` parameter with one or more hashtables. The hashtable(s) passed to this parameter can have the following properties: - Name (required) - Image (required unless Remove property is set to `$true`) - Port - Env - Link - Command - Remove For example, create a hashtable with the settings for your container: ```powershell $webContainer = @{Name="web"; Image="anweiss/docker-platynem"; Port="80:80"} ``` Then, using the same steps defined above, execute `DockerClient` with the `-Image` and `-Container` parameters: ```powershell DockerClient -Hostname "myhost" -Image node -Container $webContainer .\RunDockerClientConfig.ps1 -Hostname "myhost" ``` Existing containers can also be removed as follows: ```powershell $containerToRemove = @{Name="web"; Remove=$true} DockerClient -Hostname "myhost" -Container $containerToRemove .\RunDockerClientConfig.ps1 -Hostname "myhost" ``` Here is a hashtable with all of the properties that can be used to create a container: ```powershell $containerProps = @{Name="web"; Image="node:latest"; Port="80:80"; ` Env="PORT=80"; Link="db:db"; Command="grunt"} ``` docker-1.10.3/docs/admin/host_integration.md000066400000000000000000000056321267010174400207750ustar00rootroot00000000000000 # Automatically start containers As of Docker 1.2, [restart policies](../reference/run.md#restart-policies-restart) are the built-in Docker mechanism for restarting containers when they exit. If set, restart policies will be used when the Docker daemon starts up, as typically happens after a system boot. Restart policies will ensure that linked containers are started in the correct order. If restart policies don't suit your needs (i.e., you have non-Docker processes that depend on Docker containers), you can use a process manager like [upstart](http://upstart.ubuntu.com/), [systemd](http://freedesktop.org/wiki/Software/systemd/) or [supervisor](http://supervisord.org/) instead. ## Using a process manager Docker does not set any restart policies by default, but be aware that they will conflict with most process managers. So don't set restart policies if you are using a process manager. When you have finished setting up your image and are happy with your running container, you can then attach a process manager to manage it. When you run `docker start -a`, Docker will automatically attach to the running container, or start it if needed and forward all signals so that the process manager can detect when a container stops and correctly restart it. Here are a few sample scripts for systemd and upstart to integrate with Docker. ## Examples The examples below show configuration files for two popular process managers, upstart and systemd. In these examples, we'll assume that we have already created a container to run Redis with `--name=redis_server`. These files define a new service that will be started after the docker daemon service has started. ### upstart description "Redis container" author "Me" start on filesystem and started docker stop on runlevel [!2345] respawn script /usr/bin/docker start -a redis_server end script ### systemd [Unit] Description=Redis container Requires=docker.service After=docker.service [Service] Restart=always ExecStart=/usr/bin/docker start -a redis_server ExecStop=/usr/bin/docker stop -t 2 redis_server [Install] WantedBy=local.target If you need to pass options to the redis container (such as `--env`), then you'll need to use `docker run` rather than `docker start`. This will create a new container every time the service is started, which will be stopped and removed when the service is stopped. [Service] ... ExecStart=/usr/bin/docker run --env foo=bar --name redis_server redis ExecStop=/usr/bin/docker stop -t 2 redis_server ; /usr/bin/docker rm -f redis_server ... docker-1.10.3/docs/admin/index.md000066400000000000000000000003041267010174400165130ustar00rootroot00000000000000 docker-1.10.3/docs/admin/logging/000077500000000000000000000000001267010174400165135ustar00rootroot00000000000000docker-1.10.3/docs/admin/logging/awslogs.md000066400000000000000000000062211267010174400205150ustar00rootroot00000000000000 # Amazon CloudWatch Logs logging driver The `awslogs` logging driver sends container logs to [Amazon CloudWatch Logs](https://aws.amazon.com/cloudwatch/details/#log-monitoring). Log entries can be retrieved through the [AWS Management Console](https://console.aws.amazon.com/cloudwatch/home#logs:) or the [AWS SDKs and Command Line Tools](http://docs.aws.amazon.com/cli/latest/reference/logs/index.html). ## Usage You can configure the default logging driver by passing the `--log-driver` option to the Docker daemon: docker daemon --log-driver=awslogs You can set the logging driver for a specific container by using the `--log-driver` option to `docker run`: docker run --log-driver=awslogs ... ## Amazon CloudWatch Logs options You can use the `--log-opt NAME=VALUE` flag to specify Amazon CloudWatch Logs logging driver options. ### awslogs-region The `awslogs` logging driver sends your Docker logs to a specific region. Use the `awslogs-region` log option or the `AWS_REGION` environment variable to set the region. By default, if your Docker daemon is running on an EC2 instance and no region is set, the driver uses the instance's region. docker run --log-driver=awslogs --log-opt awslogs-region=us-east-1 ... ### awslogs-group You must specify a [log group](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/WhatIsCloudWatchLogs.html) for the `awslogs` logging driver. You can specify the log group with the `awslogs-group` log option: docker run --log-driver=awslogs --log-opt awslogs-region=us-east-1 --log-opt awslogs-group=myLogGroup ... ### awslogs-stream To configure which [log stream](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/WhatIsCloudWatchLogs.html) should be used, you can specify the `awslogs-stream` log option. If not specified, the container ID is used as the log stream. > **Note:** > Log streams within a given log group should only be used by one container > at a time. Using the same log stream for multiple containers concurrently > can cause reduced logging performance. ## Credentials You must provide AWS credentials to the Docker daemon to use the `awslogs` logging driver. You can provide these credentials with the `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, and `AWS_SESSION_TOKEN` environment variables, the default AWS shared credentials file (`~/.aws/credentials` of the root user), or (if you are running the Docker daemon on an Amazon EC2 instance) the Amazon EC2 instance profile. Credentials must have a policy applied that allows the `logs:CreateLogStream` and `logs:PutLogEvents` actions, as shown in the following example. { "Version": "2012-10-17", "Statement": [ { "Action": [ "logs:CreateLogStream", "logs:PutLogEvents" ], "Effect": "Allow", "Resource": "*" } ] } docker-1.10.3/docs/admin/logging/fluentd.md000066400000000000000000000077141267010174400205070ustar00rootroot00000000000000 # Fluentd logging driver The `fluentd` logging driver sends container logs to the [Fluentd](http://www.fluentd.org/) collector as structured log data. Then, users can use any of the [various output plugins of Fluentd](http://www.fluentd.org/plugins) to write these logs to various destinations. In addition to the log message itself, the `fluentd` log driver sends the following metadata in the structured log message: | Field | Description | -------------------|-------------------------------------| | `container_id` | The full 64-character container ID. | | `container_name` | The container name at the time it was started. If you use `docker rename` to rename a container, the new name is not reflected in the journal entries. | | `source` | `stdout` or `stderr` | The `docker logs` command is not available for this logging driver. ## Usage Some options are supported by specifying `--log-opt` as many times as needed: - `fluentd-address`: specify `host:port` to connect `localhost:24224` - `tag`: specify tag for fluentd message, which interpret some markup, ex `{{.ID}}`, `{{.FullID}}` or `{{.Name}}` `docker.{{.ID}}` Configure the default logging driver by passing the `--log-driver` option to the Docker daemon: docker daemon --log-driver=fluentd To set the logging driver for a specific container, pass the `--log-driver` option to `docker run`: docker run --log-driver=fluentd ... Before using this logging driver, launch a Fluentd daemon. The logging driver connects to this daemon through `localhost:24224` by default. Use the `fluentd-address` option to connect to a different address. docker run --log-driver=fluentd --log-opt fluentd-address=myhost.local:24224 If container cannot connect to the Fluentd daemon, the container stops immediately. ## Options Users can use the `--log-opt NAME=VALUE` flag to specify additional Fluentd logging driver options. ### fluentd-address By default, the logging driver connects to `localhost:24224`. Supply the `fluentd-address` option to connect to a different address. docker run --log-driver=fluentd --log-opt fluentd-address=myhost.local:24224 ### tag By default, Docker uses the first 12 characters of the container ID to tag log messages. Refer to the [log tag option documentation](log_tags.md) for customizing the log tag format. ### labels and env The `labels` and `env` options each take a comma-separated list of keys. If there is collision between `label` and `env` keys, the value of the `env` takes precedence. Both options add additional fields to the extra attributes of a logging message. ## Fluentd daemon management with Docker About `Fluentd` itself, see [the project webpage](http://www.fluentd.org) and [its documents](http://docs.fluentd.org/). To use this logging driver, start the `fluentd` daemon on a host. We recommend that you use [the Fluentd docker image](https://registry.hub.docker.com/u/fluent/fluentd/). This image is especially useful if you want to aggregate multiple container logs on a each host then, later, transfer the logs to another Fluentd node to create an aggregate store. ### Testing container loggers 1. Write a configuration file (`test.conf`) to dump input logs: @type forward @type stdout 2. Launch Fluentd container with this configuration file: $ docker run -it -p 24224:24224 -v /path/to/conf/test.conf:/fluentd/etc -e FLUENTD_CONF=test.conf fluent/fluentd:latest 3. Start one or more containers with the `fluentd` logging driver: $ docker run --log-driver=fluentd your/application docker-1.10.3/docs/admin/logging/index.md000066400000000000000000000010261267010174400201430ustar00rootroot00000000000000 # Logging Drivers * [Configuring logging drivers](overview.md) * [Configuring log tags](log_tags.md) * [Fluentd logging driver](fluentd.md) * [Journald logging driver](journald.md) * [Amazon CloudWatch Logs logging driver](awslogs.md) * [Splunk logging driver](splunk.md) docker-1.10.3/docs/admin/logging/journald.md000066400000000000000000000060011267010174400206500ustar00rootroot00000000000000 # Journald logging driver The `journald` logging driver sends container logs to the [systemd journal](http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html). Log entries can be retrieved using the `journalctl` command, through use of the journal API, or using the `docker logs` command. In addition to the text of the log message itself, the `journald` log driver stores the following metadata in the journal with each message: | Field | Description | ----------------------|-------------| | `CONTAINER_ID` | The container ID truncated to 12 characters. | | `CONTAINER_ID_FULL` | The full 64-character container ID. | | `CONTAINER_NAME` | The container name at the time it was started. If you use `docker rename` to rename a container, the new name is not reflected in the journal entries. | ## Usage You can configure the default logging driver by passing the `--log-driver` option to the Docker daemon: docker daemon --log-driver=journald You can set the logging driver for a specific container by using the `--log-driver` option to `docker run`: docker run --log-driver=journald ... ## Options Users can use the `--log-opt NAME=VALUE` flag to specify additional journald logging driver options. ### labels and env The `labels` and `env` options each take a comma-separated list of keys. If there is collision between `label` and `env` keys, the value of the `env` takes precedence. Both options add additional metadata in the journal with each message. ## Note regarding container names The value logged in the `CONTAINER_NAME` field is the container name that was set at startup. If you use `docker rename` to rename a container, the new name will not be reflected in the journal entries. Journal entries will continue to use the original name. ## Retrieving log messages with journalctl You can use the `journalctl` command to retrieve log messages. You can apply filter expressions to limit the retrieved messages to a specific container. For example, to retrieve all log messages from a container referenced by name: # journalctl CONTAINER_NAME=webserver You can make use of additional filters to further limit the messages retrieved. For example, to see just those messages generated since the system last booted: # journalctl -b CONTAINER_NAME=webserver Or to retrieve log messages in JSON format with complete metadata: # journalctl -o json CONTAINER_NAME=webserver ## Retrieving log messages with the journal API This example uses the `systemd` Python module to retrieve container logs: import systemd.journal reader = systemd.journal.Reader() reader.add_match('CONTAINER_NAME=web') for msg in reader: print '{CONTAINER_ID_FULL}: {MESSAGE}'.format(**msg) docker-1.10.3/docs/admin/logging/log_tags.md000066400000000000000000000043131267010174400206350ustar00rootroot00000000000000 # Log Tags The `tag` log option specifies how to format a tag that identifies the container's log messages. By default, the system uses the first 12 characters of the container id. To override this behavior, specify a `tag` option: ``` docker run --log-driver=fluentd --log-opt fluentd-address=myhost.local:24224 --log-opt tag="mailer" ``` Docker supports some special template markup you can use when specifying a tag's value: | Markup | Description | |--------------------|------------------------------------------------------| | `{{.ID}}` | The first 12 characters of the container id. | | `{{.FullID}}` | The full container id. | | `{{.Name}}` | The container name. | | `{{.ImageID}}` | The first 12 characters of the container's image id. | | `{{.ImageFullID}}` | The container's full image identifier. | | `{{.ImageName}}` | The name of the image used by the container. | For example, specifying a `--log-opt tag="{{.ImageName}}/{{.Name}}/{{.ID}}"` value yields `syslog` log lines like: ``` Aug 7 18:33:19 HOSTNAME docker/hello-world/foobar/5790672ab6a0[9103]: Hello from Docker. ``` At startup time, the system sets the `container_name` field and `{{.Name}}` in the tags. If you use `docker rename` to rename a container, the new name is not reflected in the log messages. Instead, these messages continue to use the original container name. For advanced usage, the generated tag's use [go templates](http://golang.org/pkg/text/template/) and the container's [logging context](https://github.com/docker/docker/blob/master/daemon/logger/context.go). >**Note**:The driver specific log options `syslog-tag`, `fluentd-tag` and >`gelf-tag` still work for backwards compatibility. However, going forward you >should standardize on using the generic `tag` log option instead. docker-1.10.3/docs/admin/logging/overview.md000066400000000000000000000200061267010174400207010ustar00rootroot00000000000000 # Configure logging drivers The container can have a different logging driver than the Docker daemon. Use the `--log-driver=VALUE` with the `docker run` command to configure the container's logging driver. The following options are supported: | `none` | Disables any logging for the container. `docker logs` won't be available with this driver. | |-------------|-------------------------------------------------------------------------------------------------------------------------------| | `json-file` | Default logging driver for Docker. Writes JSON messages to file. | | `syslog` | Syslog logging driver for Docker. Writes log messages to syslog. | | `journald` | Journald logging driver for Docker. Writes log messages to `journald`. | | `gelf` | Graylog Extended Log Format (GELF) logging driver for Docker. Writes log messages to a GELF endpoint likeGraylog or Logstash. | | `fluentd` | Fluentd logging driver for Docker. Writes log messages to `fluentd` (forward input). | | `awslogs` | Amazon CloudWatch Logs logging driver for Docker. Writes log messages to Amazon CloudWatch Logs. | | `splunk` | Splunk logging driver for Docker. Writes log messages to `splunk` using HTTP Event Collector. | The `docker logs`command is available only for the `json-file` and `journald` logging drivers. The `labels` and `env` options add additional attributes for use with logging drivers that accept them. Each option takes a comma-separated list of keys. If there is collision between `label` and `env` keys, the value of the `env` takes precedence. To use attributes, specify them when you start the Docker daemon. ``` docker daemon --log-driver=json-file --log-opt labels=foo --log-opt env=foo,fizz ``` Then, run a container and specify values for the `labels` or `env`. For example, you might use this: ``` docker run --label foo=bar -e fizz=buzz -d -P training/webapp python app.py ``` This adds additional fields to the log depending on the driver, e.g. for `json-file` that looks like: "attrs":{"fizz":"buzz","foo":"bar"} ## json-file options The following logging options are supported for the `json-file` logging driver: --log-opt max-size=[0-9+][k|m|g] --log-opt max-file=[0-9+] --log-opt labels=label1,label2 --log-opt env=env1,env2 Logs that reach `max-size` are rolled over. You can set the size in kilobytes(k), megabytes(m), or gigabytes(g). eg `--log-opt max-size=50m`. If `max-size` is not set, then logs are not rolled over. `max-file` specifies the maximum number of files that a log is rolled over before being discarded. eg `--log-opt max-file=100`. If `max-size` is not set, then `max-file` is not honored. If `max-size` and `max-file` are set, `docker logs` only returns the log lines from the newest log file. ## syslog options The following logging options are supported for the `syslog` logging driver: --log-opt syslog-address=[tcp|udp|tcp+tls]://host:port --log-opt syslog-address=unix://path --log-opt syslog-facility=daemon --log-opt syslog-tls-ca-cert=/etc/ca-certificates/custom/ca.pem --log-opt syslog-tls-cert=/etc/ca-certificates/custom/cert.pem --log-opt syslog-tls-key=/etc/ca-certificates/custom/key.pem --log-opt syslog-tls-skip-verify=true --log-opt tag="mailer" `syslog-address` specifies the remote syslog server address where the driver connects to. If not specified it defaults to the local unix socket of the running system. If transport is either `tcp` or `udp` and `port` is not specified it defaults to `514` The following example shows how to have the `syslog` driver connect to a `syslog` remote server at `192.168.0.42` on port `123` $ docker run --log-driver=syslog --log-opt syslog-address=tcp://192.168.0.42:123 The `syslog-facility` option configures the syslog facility. By default, the system uses the `daemon` value. To override this behavior, you can provide an integer of 0 to 23 or any of the following named facilities: * `kern` * `user` * `mail` * `daemon` * `auth` * `syslog` * `lpr` * `news` * `uucp` * `cron` * `authpriv` * `ftp` * `local0` * `local1` * `local2` * `local3` * `local4` * `local5` * `local6` * `local7` `syslog-tls-ca-cert` specifies the absolute path to the trust certificates signed by the CA. This option is ignored if the address protocol is not `tcp+tls`. `syslog-tls-cert` specifies the absolute path to the TLS certificate file. This option is ignored if the address protocol is not `tcp+tls`. `syslog-tls-key` specifies the absolute path to the TLS key file. This option is ignored if the address protocol is not `tcp+tls`. `syslog-tls-skip-verify` configures the TLS verification. This verification is enabled by default, but it can be overriden by setting this option to `true`. This option is ignored if the address protocol is not `tcp+tls`. By default, Docker uses the first 12 characters of the container ID to tag log messages. Refer to the [log tag option documentation](log_tags.md) for customizing the log tag format. ## journald options The `journald` logging driver stores the container id in the journal's `CONTAINER_ID` field. For detailed information on working with this logging driver, see [the journald logging driver](journald.md) reference documentation. ## gelf options The GELF logging driver supports the following options: --log-opt gelf-address=udp://host:port --log-opt tag="database" --log-opt labels=label1,label2 --log-opt env=env1,env2 The `gelf-address` option specifies the remote GELF server address that the driver connects to. Currently, only `udp` is supported as the transport and you must specify a `port` value. The following example shows how to connect the `gelf` driver to a GELF remote server at `192.168.0.42` on port `12201` $ docker run --log-driver=gelf --log-opt gelf-address=udp://192.168.0.42:12201 By default, Docker uses the first 12 characters of the container ID to tag log messages. Refer to the [log tag option documentation](log_tags.md) for customizing the log tag format. The `labels` and `env` options are supported by the gelf logging driver. It adds additional key on the `extra` fields, prefixed by an underscore (`_`). // […] "_foo": "bar", "_fizz": "buzz", // […] ## fluentd options You can use the `--log-opt NAME=VALUE` flag to specify these additional Fluentd logging driver options. - `fluentd-address`: specify `host:port` to connect [localhost:24224] - `tag`: specify tag for `fluentd` message, For example, to specify both additional options: `docker run --log-driver=fluentd --log-opt fluentd-address=localhost:24224 --log-opt tag=docker.{{.Name}}` If container cannot connect to the Fluentd daemon on the specified address, the container stops immediately. For detailed information on working with this logging driver, see [the fluentd logging driver](fluentd.md) ## Specify Amazon CloudWatch Logs options The Amazon CloudWatch Logs logging driver supports the following options: --log-opt awslogs-region= --log-opt awslogs-group= --log-opt awslogs-stream= For detailed information on working with this logging driver, see [the awslogs logging driver](awslogs.md) reference documentation. ## Splunk options The Splunk logging driver requires the following options: --log-opt splunk-token= --log-opt splunk-url=https://your_splunk_instance:8088 For detailed information about working with this logging driver, see the [Splunk logging driver](splunk.md) reference documentation. docker-1.10.3/docs/admin/logging/splunk.md000066400000000000000000000122001267010174400203440ustar00rootroot00000000000000 # Splunk logging driver The `splunk` logging driver sends container logs to [HTTP Event Collector](http://dev.splunk.com/view/event-collector/SP-CAAAE6M) in Splunk Enterprise and Splunk Cloud. ## Usage You can configure the default logging driver by passing the `--log-driver` option to the Docker daemon: docker daemon --log-driver=splunk You can set the logging driver for a specific container by using the `--log-driver` option to `docker run`: docker run --log-driver=splunk ... ## Splunk options You can use the `--log-opt NAME=VALUE` flag to specify these additional Splunk logging driver options: | Option | Required | Description | |-----------------------------|----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `splunk-token` | required | Splunk HTTP Event Collector token. | | `splunk-url` | required | Path to your Splunk Enterprise or Splunk Cloud instance (including port and schema used by HTTP Event Collector) `https://your_splunk_instance:8088`. | | `splunk-source` | optional | Event source. | | `splunk-sourcetype` | optional | Event source type. | | `splunk-index` | optional | Event index. | | `splunk-capath` | optional | Path to root certificate. | | `splunk-caname` | optional | Name to use for validating server certificate; by default the hostname of the `splunk-url` will be used. | | `splunk-insecureskipverify` | optional | Ignore server certificate validation. | | `tag` | optional | Specify tag for message, which interpret some markup. Default value is `{{.ID}}` (12 characters of the container ID). Refer to the [log tag option documentation](log_tags.md) for customizing the log tag format. | | `labels` | optional | Comma-separated list of keys of labels, which should be included in message, if these labels are specified for container. | | `env` | optional | Comma-separated list of keys of environment variables, which should be included in message, if these variables are specified for container. | If there is collision between `label` and `env` keys, the value of the `env` takes precedence. Both options add additional fields to the attributes of a logging message. Below is an example of the logging option specified for the Splunk Enterprise instance. The instance is installed locally on the same machine on which the Docker daemon is running. The path to the root certificate and Common Name is specified using an HTTPS schema. This is used for verification. The `SplunkServerDefaultCert` is automatically generated by Splunk certificates. docker run --log-driver=splunk \ --log-opt splunk-token=176FCEBF-4CF5-4EDF-91BC-703796522D20 \ --log-opt splunk-url=https://splunkhost:8088 \ --log-opt splunk-capath=/path/to/cert/cacert.pem \ --log-opt splunk-caname=SplunkServerDefaultCert --log-opt tag="{{.Name}}/{{.FullID}}" --log-opt labels=location --log-opt env=TEST --env "TEST=false" --label location=west your/application docker-1.10.3/docs/admin/puppet.md000066400000000000000000000055021267010174400167260ustar00rootroot00000000000000 # Using Puppet > *Note:* Please note this is a community contributed installation path. The > only `official` installation is using the > [*Ubuntu*](../installation/linux/ubuntulinux.md) installation > path. This version may sometimes be out of date. ## Requirements To use this guide you'll need a working installation of Puppet from [Puppet Labs](https://puppetlabs.com) . The module also currently uses the official PPA so only works with Ubuntu. ## Installation The module is available on the [Puppet Forge](https://forge.puppetlabs.com/garethr/docker/) and can be installed using the built-in module tool. $ puppet module install garethr/docker It can also be found on [GitHub](https://github.com/garethr/garethr-docker) if you would rather download the source. ## Usage The module provides a puppet class for installing Docker and two defined types for managing images and containers. ### Installation include 'docker' ### Images The next step is probably to install a Docker image. For this, we have a defined type which can be used like so: docker::image { 'ubuntu': } This is equivalent to running: $ docker pull ubuntu Note that it will only be downloaded if an image of that name does not already exist. This is downloading a large binary so on first run can take a while. For that reason this define turns off the default 5 minute timeout for the exec type. Note that you can also remove images you no longer need with: docker::image { 'ubuntu': ensure => 'absent', } ### Containers Now you have an image where you can run commands within a container managed by Docker. docker::run { 'helloworld': image => 'ubuntu', command => '/bin/sh -c "while true; do echo hello world; sleep 1; done"', } This is equivalent to running the following command, but under upstart: $ docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done" Run also contains a number of optional parameters: docker::run { 'helloworld': image => 'ubuntu', command => '/bin/sh -c "while true; do echo hello world; sleep 1; done"', ports => ['4444', '4555'], volumes => ['/var/lib/couchdb', '/var/log'], volumes_from => '6446ea52fbc9', memory_limit => 10485760, # bytes username => 'example', hostname => 'example.com', env => ['FOO=BAR', 'FOO2=BAR2'], dns => ['8.8.8.8', '8.8.4.4'], } > *Note:* > The `ports`, `env`, `dns` and `volumes` attributes can be set with either a single > string or as above with an array of values. docker-1.10.3/docs/admin/registry_mirror.md000066400000000000000000000012341267010174400206510ustar00rootroot00000000000000 # Run a local registry mirror The original content was deprecated. [An archived version](https://docs.docker.com/v1.6/articles/registry_mirror) is available in the 1.7 documentation. For information about configuring mirrors with the latest Docker Registry version, please file a support request with [the Distribution project](https://github.com/docker/distribution/issues). docker-1.10.3/docs/admin/runmetrics.md000066400000000000000000000476511267010174400176170ustar00rootroot00000000000000 # Runtime metrics ## Docker stats You can use the `docker stats` command to live stream a container's runtime metrics. The command supports CPU, memory usage, memory limit, and network IO metrics. The following is a sample output from the `docker stats` command $ docker stats redis1 redis2 CONTAINER CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O redis1 0.07% 796 KB / 64 MB 1.21% 788 B / 648 B 3.568 MB / 512 KB redis2 0.07% 2.746 MB / 64 MB 4.29% 1.266 KB / 648 B 12.4 MB / 0 B The [docker stats](../reference/commandline/stats.md) reference page has more details about the `docker stats` command. ## Control groups Linux Containers rely on [control groups]( https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt) which not only track groups of processes, but also expose metrics about CPU, memory, and block I/O usage. You can access those metrics and obtain network usage metrics as well. This is relevant for "pure" LXC containers, as well as for Docker containers. Control groups are exposed through a pseudo-filesystem. In recent distros, you should find this filesystem under `/sys/fs/cgroup`. Under that directory, you will see multiple sub-directories, called devices, freezer, blkio, etc.; each sub-directory actually corresponds to a different cgroup hierarchy. On older systems, the control groups might be mounted on `/cgroup`, without distinct hierarchies. In that case, instead of seeing the sub-directories, you will see a bunch of files in that directory, and possibly some directories corresponding to existing containers. To figure out where your control groups are mounted, you can run: $ grep cgroup /proc/mounts ## Enumerating cgroups You can look into `/proc/cgroups` to see the different control group subsystems known to the system, the hierarchy they belong to, and how many groups they contain. You can also look at `/proc//cgroup` to see which control groups a process belongs to. The control group will be shown as a path relative to the root of the hierarchy mountpoint; e.g., `/` means “this process has not been assigned into a particular group”, while `/lxc/pumpkin` means that the process is likely to be a member of a container named `pumpkin`. ## Finding the cgroup for a given container For each container, one cgroup will be created in each hierarchy. On older systems with older versions of the LXC userland tools, the name of the cgroup will be the name of the container. With more recent versions of the LXC tools, the cgroup will be `lxc/.` For Docker containers using cgroups, the container name will be the full ID or long ID of the container. If a container shows up as ae836c95b4c3 in `docker ps`, its long ID might be something like `ae836c95b4c3c9e9179e0e91015512da89fdec91612f63cebae57df9a5444c79`. You can look it up with `docker inspect` or `docker ps --no-trunc`. Putting everything together to look at the memory metrics for a Docker container, take a look at `/sys/fs/cgroup/memory/docker//`. ## Metrics from cgroups: memory, CPU, block I/O For each subsystem (memory, CPU, and block I/O), you will find one or more pseudo-files containing statistics. ### Memory metrics: `memory.stat` Memory metrics are found in the "memory" cgroup. Note that the memory control group adds a little overhead, because it does very fine-grained accounting of the memory usage on your host. Therefore, many distros chose to not enable it by default. Generally, to enable it, all you have to do is to add some kernel command-line parameters: `cgroup_enable=memory swapaccount=1`. The metrics are in the pseudo-file `memory.stat`. Here is what it will look like: cache 11492564992 rss 1930993664 mapped_file 306728960 pgpgin 406632648 pgpgout 403355412 swap 0 pgfault 728281223 pgmajfault 1724 inactive_anon 46608384 active_anon 1884520448 inactive_file 7003344896 active_file 4489052160 unevictable 32768 hierarchical_memory_limit 9223372036854775807 hierarchical_memsw_limit 9223372036854775807 total_cache 11492564992 total_rss 1930993664 total_mapped_file 306728960 total_pgpgin 406632648 total_pgpgout 403355412 total_swap 0 total_pgfault 728281223 total_pgmajfault 1724 total_inactive_anon 46608384 total_active_anon 1884520448 total_inactive_file 7003344896 total_active_file 4489052160 total_unevictable 32768 The first half (without the `total_` prefix) contains statistics relevant to the processes within the cgroup, excluding sub-cgroups. The second half (with the `total_` prefix) includes sub-cgroups as well. Some metrics are "gauges", i.e., values that can increase or decrease (e.g., swap, the amount of swap space used by the members of the cgroup). Some others are "counters", i.e., values that can only go up, because they represent occurrences of a specific event (e.g., pgfault, which indicates the number of page faults which happened since the creation of the cgroup; this number can never decrease). - **cache:** the amount of memory used by the processes of this control group that can be associated precisely with a block on a block device. When you read from and write to files on disk, this amount will increase. This will be the case if you use "conventional" I/O (`open`, `read`, `write` syscalls) as well as mapped files (with `mmap`). It also accounts for the memory used by `tmpfs` mounts, though the reasons are unclear. - **rss:** the amount of memory that *doesn't* correspond to anything on disk: stacks, heaps, and anonymous memory maps. - **mapped_file:** indicates the amount of memory mapped by the processes in the control group. It doesn't give you information about *how much* memory is used; it rather tells you *how* it is used. - **pgfault and pgmajfault:** indicate the number of times that a process of the cgroup triggered a "page fault" and a "major fault", respectively. A page fault happens when a process accesses a part of its virtual memory space which is nonexistent or protected. The former can happen if the process is buggy and tries to access an invalid address (it will then be sent a `SIGSEGV` signal, typically killing it with the famous `Segmentation fault` message). The latter can happen when the process reads from a memory zone which has been swapped out, or which corresponds to a mapped file: in that case, the kernel will load the page from disk, and let the CPU complete the memory access. It can also happen when the process writes to a copy-on-write memory zone: likewise, the kernel will preempt the process, duplicate the memory page, and resume the write operation on the process` own copy of the page. "Major" faults happen when the kernel actually has to read the data from disk. When it just has to duplicate an existing page, or allocate an empty page, it's a regular (or "minor") fault. - **swap:** the amount of swap currently used by the processes in this cgroup. - **active_anon and inactive_anon:** the amount of *anonymous* memory that has been identified has respectively *active* and *inactive* by the kernel. "Anonymous" memory is the memory that is *not* linked to disk pages. In other words, that's the equivalent of the rss counter described above. In fact, the very definition of the rss counter is **active_anon** + **inactive_anon** - **tmpfs** (where tmpfs is the amount of memory used up by `tmpfs` filesystems mounted by this control group). Now, what's the difference between "active" and "inactive"? Pages are initially "active"; and at regular intervals, the kernel sweeps over the memory, and tags some pages as "inactive". Whenever they are accessed again, they are immediately retagged "active". When the kernel is almost out of memory, and time comes to swap out to disk, the kernel will swap "inactive" pages. - **active_file and inactive_file:** cache memory, with *active* and *inactive* similar to the *anon* memory above. The exact formula is cache = **active_file** + **inactive_file** + **tmpfs**. The exact rules used by the kernel to move memory pages between active and inactive sets are different from the ones used for anonymous memory, but the general principle is the same. Note that when the kernel needs to reclaim memory, it is cheaper to reclaim a clean (=non modified) page from this pool, since it can be reclaimed immediately (while anonymous pages and dirty/modified pages have to be written to disk first). - **unevictable:** the amount of memory that cannot be reclaimed; generally, it will account for memory that has been "locked" with `mlock`. It is often used by crypto frameworks to make sure that secret keys and other sensitive material never gets swapped out to disk. - **memory and memsw limits:** These are not really metrics, but a reminder of the limits applied to this cgroup. The first one indicates the maximum amount of physical memory that can be used by the processes of this control group; the second one indicates the maximum amount of RAM+swap. Accounting for memory in the page cache is very complex. If two processes in different control groups both read the same file (ultimately relying on the same blocks on disk), the corresponding memory charge will be split between the control groups. It's nice, but it also means that when a cgroup is terminated, it could increase the memory usage of another cgroup, because they are not splitting the cost anymore for those memory pages. ### CPU metrics: `cpuacct.stat` Now that we've covered memory metrics, everything else will look very simple in comparison. CPU metrics will be found in the `cpuacct` controller. For each container, you will find a pseudo-file `cpuacct.stat`, containing the CPU usage accumulated by the processes of the container, broken down between `user` and `system` time. If you're not familiar with the distinction, `user` is the time during which the processes were in direct control of the CPU (i.e., executing process code), and `system` is the time during which the CPU was executing system calls on behalf of those processes. Those times are expressed in ticks of 1/100th of a second. Actually, they are expressed in "user jiffies". There are `USER_HZ` *"jiffies"* per second, and on x86 systems, `USER_HZ` is 100. This used to map exactly to the number of scheduler "ticks" per second; but with the advent of higher frequency scheduling, as well as [tickless kernels]( http://lwn.net/Articles/549580/), the number of kernel ticks wasn't relevant anymore. It stuck around anyway, mainly for legacy and compatibility reasons. ### Block I/O metrics Block I/O is accounted in the `blkio` controller. Different metrics are scattered across different files. While you can find in-depth details in the [blkio-controller]( https://www.kernel.org/doc/Documentation/cgroups/blkio-controller.txt) file in the kernel documentation, here is a short list of the most relevant ones: - **blkio.sectors:** contain the number of 512-bytes sectors read and written by the processes member of the cgroup, device by device. Reads and writes are merged in a single counter. - **blkio.io_service_bytes:** indicates the number of bytes read and written by the cgroup. It has 4 counters per device, because for each device, it differentiates between synchronous vs. asynchronous I/O, and reads vs. writes. - **blkio.io_serviced:** the number of I/O operations performed, regardless of their size. It also has 4 counters per device. - **blkio.io_queued:** indicates the number of I/O operations currently queued for this cgroup. In other words, if the cgroup isn't doing any I/O, this will be zero. Note that the opposite is not true. In other words, if there is no I/O queued, it does not mean that the cgroup is idle (I/O-wise). It could be doing purely synchronous reads on an otherwise quiescent device, which is therefore able to handle them immediately, without queuing. Also, while it is helpful to figure out which cgroup is putting stress on the I/O subsystem, keep in mind that it is a relative quantity. Even if a process group does not perform more I/O, its queue size can increase just because the device load increases because of other devices. ## Network metrics Network metrics are not exposed directly by control groups. There is a good explanation for that: network interfaces exist within the context of *network namespaces*. The kernel could probably accumulate metrics about packets and bytes sent and received by a group of processes, but those metrics wouldn't be very useful. You want per-interface metrics (because traffic happening on the local `lo` interface doesn't really count). But since processes in a single cgroup can belong to multiple network namespaces, those metrics would be harder to interpret: multiple network namespaces means multiple `lo` interfaces, potentially multiple `eth0` interfaces, etc.; so this is why there is no easy way to gather network metrics with control groups. Instead we can gather network metrics from other sources: ### IPtables IPtables (or rather, the netfilter framework for which iptables is just an interface) can do some serious accounting. For instance, you can setup a rule to account for the outbound HTTP traffic on a web server: $ iptables -I OUTPUT -p tcp --sport 80 There is no `-j` or `-g` flag, so the rule will just count matched packets and go to the following rule. Later, you can check the values of the counters, with: $ iptables -nxvL OUTPUT Technically, `-n` is not required, but it will prevent iptables from doing DNS reverse lookups, which are probably useless in this scenario. Counters include packets and bytes. If you want to setup metrics for container traffic like this, you could execute a `for` loop to add two `iptables` rules per container IP address (one in each direction), in the `FORWARD` chain. This will only meter traffic going through the NAT layer; you will also have to add traffic going through the userland proxy. Then, you will need to check those counters on a regular basis. If you happen to use `collectd`, there is a [nice plugin](https://collectd.org/wiki/index.php/Table_of_Plugins) to automate iptables counters collection. ### Interface-level counters Since each container has a virtual Ethernet interface, you might want to check directly the TX and RX counters of this interface. You will notice that each container is associated to a virtual Ethernet interface in your host, with a name like `vethKk8Zqi`. Figuring out which interface corresponds to which container is, unfortunately, difficult. But for now, the best way is to check the metrics *from within the containers*. To accomplish this, you can run an executable from the host environment within the network namespace of a container using **ip-netns magic**. The `ip-netns exec` command will let you execute any program (present in the host system) within any network namespace visible to the current process. This means that your host will be able to enter the network namespace of your containers, but your containers won't be able to access the host, nor their sibling containers. Containers will be able to “see” and affect their sub-containers, though. The exact format of the command is: $ ip netns exec For example: $ ip netns exec mycontainer netstat -i `ip netns` finds the "mycontainer" container by using namespaces pseudo-files. Each process belongs to one network namespace, one PID namespace, one `mnt` namespace, etc., and those namespaces are materialized under `/proc//ns/`. For example, the network namespace of PID 42 is materialized by the pseudo-file `/proc/42/ns/net`. When you run `ip netns exec mycontainer ...`, it expects `/var/run/netns/mycontainer` to be one of those pseudo-files. (Symlinks are accepted.) In other words, to execute a command within the network namespace of a container, we need to: - Find out the PID of any process within the container that we want to investigate; - Create a symlink from `/var/run/netns/` to `/proc//ns/net` - Execute `ip netns exec ....` Please review [*Enumerating Cgroups*](#enumerating-cgroups) to learn how to find the cgroup of a process running in the container of which you want to measure network usage. From there, you can examine the pseudo-file named `tasks`, which contains the PIDs that are in the control group (i.e., in the container). Pick any one of them. Putting everything together, if the "short ID" of a container is held in the environment variable `$CID`, then you can do this: $ TASKS=/sys/fs/cgroup/devices/docker/$CID*/tasks $ PID=$(head -n 1 $TASKS) $ mkdir -p /var/run/netns $ ln -sf /proc/$PID/ns/net /var/run/netns/$CID $ ip netns exec $CID netstat -i ## Tips for high-performance metric collection Note that running a new process each time you want to update metrics is (relatively) expensive. If you want to collect metrics at high resolutions, and/or over a large number of containers (think 1000 containers on a single host), you do not want to fork a new process each time. Here is how to collect metrics from a single process. You will have to write your metric collector in C (or any language that lets you do low-level system calls). You need to use a special system call, `setns()`, which lets the current process enter any arbitrary namespace. It requires, however, an open file descriptor to the namespace pseudo-file (remember: that's the pseudo-file in `/proc//ns/net`). However, there is a catch: you must not keep this file descriptor open. If you do, when the last process of the control group exits, the namespace will not be destroyed, and its network resources (like the virtual interface of the container) will stay around for ever (or until you close that file descriptor). The right approach would be to keep track of the first PID of each container, and re-open the namespace pseudo-file each time. ## Collecting metrics when a container exits Sometimes, you do not care about real time metric collection, but when a container exits, you want to know how much CPU, memory, etc. it has used. Docker makes this difficult because it relies on `lxc-start`, which carefully cleans up after itself, but it is still possible. It is usually easier to collect metrics at regular intervals (e.g., every minute, with the collectd LXC plugin) and rely on that instead. But, if you'd still like to gather the stats when a container stops, here is how: For each container, start a collection process, and move it to the control groups that you want to monitor by writing its PID to the tasks file of the cgroup. The collection process should periodically re-read the tasks file to check if it's the last process of the control group. (If you also want to collect network statistics as explained in the previous section, you should also move the process to the appropriate network namespace.) When the container exits, `lxc-start` will try to delete the control groups. It will fail, since the control group is still in use; but that's fine. You process should now detect that it is the only one remaining in the group. Now is the right time to collect all the metrics you need! Finally, your process should move itself back to the root control group, and remove the container control group. To remove a control group, just `rmdir` its directory. It's counter-intuitive to `rmdir` a directory as it still contains files; but remember that this is a pseudo-filesystem, so usual rules don't apply. After the cleanup is done, the collection process can exit safely. docker-1.10.3/docs/admin/systemd.md000066400000000000000000000126161267010174400171050ustar00rootroot00000000000000 # Control and configure Docker with systemd Many Linux distributions use systemd to start the Docker daemon. This document shows a few examples of how to customize Docker's settings. ## Starting the Docker daemon Once Docker is installed, you will need to start the Docker daemon. $ sudo systemctl start docker # or on older distributions, you may need to use $ sudo service docker start If you want Docker to start at boot, you should also: $ sudo systemctl enable docker # or on older distributions, you may need to use $ sudo chkconfig docker on ## Custom Docker daemon options There are a number of ways to configure the daemon flags and environment variables for your Docker daemon. The recommended way is to use a systemd drop-in file. These are local files in the `/etc/systemd/system/docker.service.d` directory. This could also be `/etc/systemd/system/docker.service`, which also works for overriding the defaults from `/lib/systemd/system/docker.service`. However, if you had previously used a package which had an `EnvironmentFile` (often pointing to `/etc/sysconfig/docker`) then for backwards compatibility, you drop a file in the `/etc/systemd/system/docker.service.d` directory including the following: [Service] EnvironmentFile=-/etc/sysconfig/docker EnvironmentFile=-/etc/sysconfig/docker-storage EnvironmentFile=-/etc/sysconfig/docker-network ExecStart= ExecStart=/usr/bin/docker daemon -H fd:// $OPTIONS \ $DOCKER_STORAGE_OPTIONS \ $DOCKER_NETWORK_OPTIONS \ $BLOCK_REGISTRY \ $INSECURE_REGISTRY To check if the `docker.service` uses an `EnvironmentFile`: $ sudo systemctl show docker | grep EnvironmentFile EnvironmentFile=-/etc/sysconfig/docker (ignore_errors=yes) Alternatively, find out where the service file is located: $ sudo systemctl status docker | grep Loaded Loaded: loaded (/usr/lib/systemd/system/docker.service; enabled) $ sudo grep EnvironmentFile /usr/lib/systemd/system/docker.service EnvironmentFile=-/etc/sysconfig/docker You can customize the Docker daemon options using override files as explained in the [HTTP Proxy example](#http-proxy) below. The files located in `/usr/lib/systemd/system` or `/lib/systemd/system` contain the default options and should not be edited. ### Runtime directory and storage driver You may want to control the disk space used for Docker images, containers and volumes by moving it to a separate partition. In this example, we'll assume that your `docker.service` file looks something like: [Unit] Description=Docker Application Container Engine Documentation=https://docs.docker.com After=network.target docker.socket Requires=docker.socket [Service] Type=notify ExecStart=/usr/bin/docker daemon -H fd:// LimitNOFILE=1048576 LimitNPROC=1048576 TasksMax=1048576 [Install] Also=docker.socket This will allow us to add extra flags via a drop-in file (mentioned above) by placing a file containing the following in the `/etc/systemd/system/docker.service.d` directory: [Service] ExecStart= ExecStart=/usr/bin/docker daemon -H fd:// --graph="/mnt/docker-data" --storage-driver=overlay You can also set other environment variables in this file, for example, the `HTTP_PROXY` environment variables described below. To modify the ExecStart configuration, specify an empty configuration followed by a new configuration as follows: [Service] ExecStart= ExecStart=/usr/bin/docker daemon -H fd:// --bip=172.17.42.1/16 If you fail to specify an empty configuration, Docker reports an error such as: docker.service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. Refusing. ### HTTP proxy This example overrides the default `docker.service` file. If you are behind a HTTP proxy server, for example in corporate settings, you will need to add this configuration in the Docker systemd service file. First, create a systemd drop-in directory for the docker service: mkdir /etc/systemd/system/docker.service.d Now create a file called `/etc/systemd/system/docker.service.d/http-proxy.conf` that adds the `HTTP_PROXY` environment variable: [Service] Environment="HTTP_PROXY=http://proxy.example.com:80/" If you have internal Docker registries that you need to contact without proxying you can specify them via the `NO_PROXY` environment variable: Environment="HTTP_PROXY=http://proxy.example.com:80/" "NO_PROXY=localhost,127.0.0.1,docker-registry.somecorporation.com" Flush changes: $ sudo systemctl daemon-reload Verify that the configuration has been loaded: $ sudo systemctl show docker --property Environment Environment=HTTP_PROXY=http://proxy.example.com:80/ Restart Docker: $ sudo systemctl restart docker ## Manually creating the systemd unit files When installing the binary without a package, you may want to integrate Docker with systemd. For this, simply install the two unit files (service and socket) from [the github repository](https://github.com/docker/docker/tree/master/contrib/init/systemd) to `/etc/systemd/system`. docker-1.10.3/docs/admin/using_supervisord.md000066400000000000000000000101711267010174400212010ustar00rootroot00000000000000 # Using Supervisor with Docker > **Note**: > - **If you don't like sudo** then see [*Giving non-root > access*](../installation/binaries.md#giving-non-root-access) Traditionally a Docker container runs a single process when it is launched, for example an Apache daemon or a SSH server daemon. Often though you want to run more than one process in a container. There are a number of ways you can achieve this ranging from using a simple Bash script as the value of your container's `CMD` instruction to installing a process management tool. In this example we're going to make use of the process management tool, [Supervisor](http://supervisord.org/), to manage multiple processes in our container. Using Supervisor allows us to better control, manage, and restart the processes we want to run. To demonstrate this we're going to install and manage both an SSH daemon and an Apache daemon. ## Creating a Dockerfile Let's start by creating a basic `Dockerfile` for our new image. FROM ubuntu:13.04 MAINTAINER examples@docker.com ## Installing Supervisor We can now install our SSH and Apache daemons as well as Supervisor in our container. RUN apt-get update && apt-get install -y openssh-server apache2 supervisor RUN mkdir -p /var/lock/apache2 /var/run/apache2 /var/run/sshd /var/log/supervisor Here we're installing the `openssh-server`, `apache2` and `supervisor` (which provides the Supervisor daemon) packages. We're also creating four new directories that are needed to run our SSH daemon and Supervisor. ## Adding Supervisor's configuration file Now let's add a configuration file for Supervisor. The default file is called `supervisord.conf` and is located in `/etc/supervisor/conf.d/`. COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf Let's see what is inside our `supervisord.conf` file. [supervisord] nodaemon=true [program:sshd] command=/usr/sbin/sshd -D [program:apache2] command=/bin/bash -c "source /etc/apache2/envvars && exec /usr/sbin/apache2 -DFOREGROUND" The `supervisord.conf` configuration file contains directives that configure Supervisor and the processes it manages. The first block `[supervisord]` provides configuration for Supervisor itself. We're using one directive, `nodaemon` which tells Supervisor to run interactively rather than daemonize. The next two blocks manage the services we wish to control. Each block controls a separate process. The blocks contain a single directive, `command`, which specifies what command to run to start each process. ## Exposing ports and running Supervisor Now let's finish our `Dockerfile` by exposing some required ports and specifying the `CMD` instruction to start Supervisor when our container launches. EXPOSE 22 80 CMD ["/usr/bin/supervisord"] Here We've exposed ports 22 and 80 on the container and we're running the `/usr/bin/supervisord` binary when the container launches. ## Building our image We can now build our new image. $ docker build -t /supervisord . ## Running our Supervisor container Once We've got a built image we can launch a container from it. $ docker run -p 22 -p 80 -t -i /supervisord 2013-11-25 18:53:22,312 CRIT Supervisor running as root (no user in config file) 2013-11-25 18:53:22,312 WARN Included extra file "/etc/supervisor/conf.d/supervisord.conf" during parsing 2013-11-25 18:53:22,342 INFO supervisord started with pid 1 2013-11-25 18:53:23,346 INFO spawned: 'sshd' with pid 6 2013-11-25 18:53:23,349 INFO spawned: 'apache2' with pid 7 . . . We've launched a new container interactively using the `docker run` command. That container has run Supervisor and launched the SSH and Apache daemons with it. We've specified the `-p` flag to expose ports 22 and 80. From here we can now identify the exposed ports and connect to one or both of the SSH and Apache daemons. docker-1.10.3/docs/article-img/000077500000000000000000000000001267010174400161725ustar00rootroot00000000000000docker-1.10.3/docs/article-img/architecture.svg000066400000000000000000005557261267010174400214210ustar00rootroot00000000000000 2014-04-15 00:37Z image/svg+xml docker-1.10.3/docs/articles/000077500000000000000000000000001267010174400156035ustar00rootroot00000000000000docker-1.10.3/docs/articles/index.md000066400000000000000000000003301267010174400172300ustar00rootroot00000000000000 docker-1.10.3/docs/breaking_changes.md000066400000000000000000000033601267010174400175730ustar00rootroot00000000000000 # Breaking changes and incompatibilities Every Engine release strives to be backward compatible with its predecessors. In all cases, the policy is that feature removal is communicated two releases in advance and documented as part of the [deprecated features](deprecated.md) page. Unfortunately, Docker is a fast moving project, and newly introduced features may sometime introduce breaking changes and/or incompatibilities. This page documents these by Engine version. # Engine 1.10 There were two breaking changes in the 1.10 release. ## Registry Registry 2.3 includes improvements to the image manifest that have caused a breaking change. Images pushed by Engine 1.10 to a Registry 2.3 cannot be pulled by digest by older Engine versions. A `docker pull` that encounters this situation returns the following error: ``` Error response from daemon: unsupported schema version 2 for tag TAGNAME ``` Docker Content Trust heavily relies on pull by digest. As a result, images pushed from the Engine 1.10 CLI to a 2.3 Registry cannot be pulled by older Engine CLIs (< 1.10) with Docker Content Trust enabled. If you are using an older Registry version (< 2.3), this problem does not occur with any version of the Engine CLI; push, pull, with and without content trust work as you would expect. ## Docker Content Trust Engine older than the current 1.10 cannot pull images from repositories that have enabled key delegation. Key delegation is a feature which requires a manual action to enable. docker-1.10.3/docs/deprecated.md000066400000000000000000000117721267010174400164270ustar00rootroot00000000000000 # Deprecated Engine Features The following list of features are deprecated in Engine. ### Ambiguous event fields in API **Deprecated In Release: v1.10** The fields `ID`, `Status` and `From` in the events API have been deprecated in favor of a more rich structure. See the events API documentation for the new format. ### `-f` flag on `docker tag` **Deprecated In Release: v1.10** **Target For Removal In Release: v1.12** To make tagging consistent across the various `docker` commands, the `-f` flag on the `docker tag` command is deprecated. It is not longer necessary to specify `-f` to move a tag from one image to another. Nor will `docker` generate an error if the `-f` flag is missing and the specified tag is already in use. ### HostConfig at API container start **Deprecated In Release: v1.10** **Target For Removal In Release: v1.12** Passing an `HostConfig` to `POST /containers/{name}/start` is deprecated in favor of defining it at container creation (`POST /containers/create`). ### Docker ps 'before' and 'since' options **Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** **Target For Removal In Release: v1.12** The `docker ps --before` and `docker ps --since` options are deprecated. Use `docker ps --filter=before=...` and `docker ps --filter=since=...` instead. ### Command line short variant options **Deprecated In Release: v1.9** **Target For Removal In Release: v1.11** The following short variant options are deprecated in favor of their long variants: docker run -c (--cpu-shares) docker build -c (--cpu-shares) docker create -c (--cpu-shares) ### Driver Specific Log Tags **Deprecated In Release: v1.9** **Target For Removal In Release: v1.11** Log tags are now generated in a standard way across different logging drivers. Because of which, the driver specific log tag options `syslog-tag`, `gelf-tag` and `fluentd-tag` have been deprecated in favor of the generic `tag` option. docker --log-driver=syslog --log-opt tag="{{.ImageName}}/{{.Name}}/{{.ID}}" ### LXC built-in exec driver **Deprecated In Release: v1.8** **Target For Removal In Release: v1.10** The built-in LXC execution driver is deprecated for an external implementation. The lxc-conf flag and API fields will also be removed. ### Old Command Line Options **Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** **Target For Removal In Release: v1.10** The flags `-d` and `--daemon` are deprecated in favor of the `daemon` subcommand: docker daemon -H ... The following single-dash (`-opt`) variant of certain command line options are deprecated and replaced with double-dash options (`--opt`): docker attach -nostdin docker attach -sig-proxy docker build -no-cache docker build -rm docker commit -author docker commit -run docker events -since docker history -notrunc docker images -notrunc docker inspect -format docker ps -beforeId docker ps -notrunc docker ps -sinceId docker rm -link docker run -cidfile docker run -dns docker run -entrypoint docker run -expose docker run -link docker run -lxc-conf docker run -n docker run -privileged docker run -volumes-from docker search -notrunc docker search -stars docker search -t docker search -trusted docker tag -force The following double-dash options are deprecated and have no replacement: docker run --cpuset docker run --networking docker ps --since-id docker ps --before-id docker search --trusted ### Auto-creating missing host paths for bind mounts **Deprecated in Release: v1.9** **Target for Removal in Release: 1.11** When creating a container with a bind-mounted volume-- `docker run -v /host/path:/container/path` -- docker was automatically creating the `/host/path` if it didn't already exist. This auto-creation of the host path is deprecated and docker will error out if the path does not exist. ### Interacting with V1 registries Version 1.9 adds a flag (`--disable-legacy-registry=false`) which prevents the docker daemon from `pull`, `push`, and `login` operations against v1 registries. Though disabled by default, this signals the intent to deprecate the v1 protocol. ### Docker Content Trust ENV passphrase variables name change **Deprecated In Release: v1.9** **Target For Removal In Release: v1.10** As of 1.9, Docker Content Trust Offline key will be renamed to Root key and the Tagging key will be renamed to Repository key. Due to this renaming, we're also changing the corresponding environment variables - DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE will now be named DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE - DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE will now be named DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE docker-1.10.3/docs/examples/000077500000000000000000000000001267010174400156135ustar00rootroot00000000000000docker-1.10.3/docs/examples/apt-cacher-ng.Dockerfile000066400000000000000000000007541267010174400222230ustar00rootroot00000000000000# # Build: docker build -t apt-cacher . # Run: docker run -d -p 3142:3142 --name apt-cacher-run apt-cacher # # and then you can run containers with: # docker run -t -i --rm -e http_proxy http://dockerhost:3142/ debian bash # FROM ubuntu MAINTAINER SvenDowideit@docker.com VOLUME ["/var/cache/apt-cacher-ng"] RUN apt-get update && apt-get install -y apt-cacher-ng EXPOSE 3142 CMD chmod 777 /var/cache/apt-cacher-ng && /etc/init.d/apt-cacher-ng start && tail -f /var/log/apt-cacher-ng/* docker-1.10.3/docs/examples/apt-cacher-ng.md000066400000000000000000000074551267010174400205610ustar00rootroot00000000000000 # Dockerizing an apt-cacher-ng service > **Note**: > - **If you don't like sudo** then see [*Giving non-root > access*](../installation/binaries.md#giving-non-root-access). > - **If you're using OS X or docker via TCP** then you shouldn't use > sudo. When you have multiple Docker servers, or build unrelated Docker containers which can't make use of the Docker build cache, it can be useful to have a caching proxy for your packages. This container makes the second download of any package almost instant. Use the following Dockerfile: # # Build: docker build -t apt-cacher . # Run: docker run -d -p 3142:3142 --name apt-cacher-run apt-cacher # # and then you can run containers with: # docker run -t -i --rm -e http_proxy http://dockerhost:3142/ debian bash # FROM ubuntu MAINTAINER SvenDowideit@docker.com VOLUME ["/var/cache/apt-cacher-ng"] RUN apt-get update && apt-get install -y apt-cacher-ng EXPOSE 3142 CMD chmod 777 /var/cache/apt-cacher-ng && /etc/init.d/apt-cacher-ng start && tail -f /var/log/apt-cacher-ng/* To build the image using: $ docker build -t eg_apt_cacher_ng . Then run it, mapping the exposed port to one on the host $ docker run -d -p 3142:3142 --name test_apt_cacher_ng eg_apt_cacher_ng To see the logfiles that are `tailed` in the default command, you can use: $ docker logs -f test_apt_cacher_ng To get your Debian-based containers to use the proxy, you can do one of three things 1. Add an apt Proxy setting `echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/conf.d/01proxy` 2. Set an environment variable: `http_proxy=http://dockerhost:3142/` 3. Change your `sources.list` entries to start with `http://dockerhost:3142/` **Option 1** injects the settings safely into your apt configuration in a local version of a common base: FROM ubuntu RUN echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/apt.conf.d/01proxy RUN apt-get update && apt-get install -y vim git # docker build -t my_ubuntu . **Option 2** is good for testing, but will break other HTTP clients which obey `http_proxy`, such as `curl`, `wget` and others: $ docker run --rm -t -i -e http_proxy=http://dockerhost:3142/ debian bash **Option 3** is the least portable, but there will be times when you might need to do it and you can do it from your `Dockerfile` too. Apt-cacher-ng has some tools that allow you to manage the repository, and they can be used by leveraging the `VOLUME` instruction, and the image we built to run the service: $ docker run --rm -t -i --volumes-from test_apt_cacher_ng eg_apt_cacher_ng bash $$ /usr/lib/apt-cacher-ng/distkill.pl Scanning /var/cache/apt-cacher-ng, please wait... Found distributions: bla, taggedcount: 0 1. precise-security (36 index files) 2. wheezy (25 index files) 3. precise-updates (36 index files) 4. precise (36 index files) 5. wheezy-updates (18 index files) Found architectures: 6. amd64 (36 index files) 7. i386 (24 index files) WARNING: The removal action may wipe out whole directories containing index files. Select d to see detailed list. (Number nn: tag distribution or architecture nn; 0: exit; d: show details; r: remove tagged; q: quit): q Finally, clean up after your test by stopping and removing the container, and then removing the image. $ docker stop test_apt_cacher_ng $ docker rm test_apt_cacher_ng $ docker rmi eg_apt_cacher_ng docker-1.10.3/docs/examples/couchbase.md000066400000000000000000000221021267010174400200660ustar00rootroot00000000000000 # Dockerizing a Couchbase service This example shows how to start a [Couchbase](http://couchbase.com) server using Docker Compose, configure it using its [REST API](http://developer.couchbase.com/documentation/server/4.0/rest-api/rest-endpoints-all.html), and query it. Couchbase is an open source, document-oriented NoSQL database for modern web, mobile, and IoT applications. It is designed for ease of development and Internet-scale performance. ## Start Couchbase server Couchbase Docker images are published at [Docker Hub](https://hub.docker.com/_/couchbase/). Start Couchbase server as: ``` docker run -d --name db -p 8091-8093:8091-8093 -p 11210:11210 couchbase ``` The purpose of each port exposed is explained at [Couchbase Developer Portal - Network Configuration](http://developer.couchbase.com/documentation/server/4.1/install/install-ports.html). Logs can be seen as: ``` docker logs db Starting Couchbase Server -- Web UI available at http://:8091 ``` > **Note**: The examples on this page assume that the Docker Host > is reachable on `192.168.99.100`. Substitute `192.168.99.100` with > the actual IP address of your Docker Host. If you're running > Docker using Docker machine, you can obtain the IP address > of the Docker host using `docker-machine ip `. The logs show that Couchbase console can be accessed at http://192.168.99.100:8091. The default username is `Administrator` and the password is `password`. ## Configure Couchbase Docker container By default, Couchbase server needs to be configured using the console before it can be used. This can be simplified by configuring it using the REST API. ### Configure memory for Data and Index service Data, Query and Index are three different services that can be configured on a Couchbase instance. Each service has different operating needs. For example, Query is CPU intensive operation and so requires a faster processor. Index is disk heavy and so requires a faster solid state drive. Data needs to be read/written fast and so requires more memory. Memory needs to be configured for Data and Index service only. ``` curl -v -X POST http://192.168.99.100:8091/pools/default -d memoryQuota=300 -d indexMemoryQuota=300 * Hostname was NOT found in DNS cache * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 8091 (#0) > POST /pools/default HTTP/1.1 > User-Agent: curl/7.37.1 > Host: 192.168.99.100:8091 > Accept: */* > Content-Length: 36 > Content-Type: application/x-www-form-urlencoded > * upload completely sent off: 36 out of 36 bytes < HTTP/1.1 401 Unauthorized < WWW-Authenticate: Basic realm="Couchbase Server Admin / REST" * Server Couchbase Server is not blacklisted < Server: Couchbase Server < Pragma: no-cache < Date: Wed, 25 Nov 2015 22:48:16 GMT < Content-Length: 0 < Cache-Control: no-cache < * Connection #0 to host 192.168.99.100 left intact ``` The command shows an HTTP POST request to the REST endpoint `/pools/default`. The host is the IP address of the Docker machine. The port is the exposed port of Couchbase server. The memory and index quota for the server are passed in the request. ### Configure Data, Query, and Index services All three services, or only one of them, can be configured on each instance. This allows different Couchbase instances to use affinities and setup services accordingly. For example, if Docker host is running a machine with solid-state drive then only Data service can be started. ``` curl -v http://192.168.99.100:8091/node/controller/setupServices -d 'services=kv%2Cn1ql%2Cindex' * Hostname was NOT found in DNS cache * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 8091 (#0) > POST /node/controller/setupServices HTTP/1.1 > User-Agent: curl/7.37.1 > Host: 192.168.99.100:8091 > Accept: */* > Content-Length: 26 > Content-Type: application/x-www-form-urlencoded > * upload completely sent off: 26 out of 26 bytes < HTTP/1.1 200 OK * Server Couchbase Server is not blacklisted < Server: Couchbase Server < Pragma: no-cache < Date: Wed, 25 Nov 2015 22:49:51 GMT < Content-Length: 0 < Cache-Control: no-cache < * Connection #0 to host 192.168.99.100 left intact ``` The command shows an HTTP POST request to the REST endpoint `/node/controller/setupServices`. The command shows that all three services are configured for the Couchbase server. The Data service is identified by `kv`, Query service is identified by `n1ql` and Index service identified by `index`. ### Setup credentials for the Couchbase server Sets the username and password credentials that will subsequently be used for managing the Couchbase server. ``` curl -v -X POST http://192.168.99.100:8091/settings/web -d port=8091 -d username=Administrator -d password=password * Hostname was NOT found in DNS cache * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 8091 (#0) > POST /settings/web HTTP/1.1 > User-Agent: curl/7.37.1 > Host: 192.168.99.100:8091 > Accept: */* > Content-Length: 50 > Content-Type: application/x-www-form-urlencoded > * upload completely sent off: 50 out of 50 bytes < HTTP/1.1 200 OK * Server Couchbase Server is not blacklisted < Server: Couchbase Server < Pragma: no-cache < Date: Wed, 25 Nov 2015 22:50:43 GMT < Content-Type: application/json < Content-Length: 44 < Cache-Control: no-cache < * Connection #0 to host 192.168.99.100 left intact {"newBaseUri":"http://192.168.99.100:8091/"} ``` The command shows an HTTP POST request to the REST endpoint `/settings/web`. The user name and password credentials are passed in the request. ### Install sample data The Couchbase server can be easily load some sample data in the Couchbase instance. ``` curl -v -u Administrator:password -X POST http://192.168.99.100:8091/sampleBuckets/install -d '["travel-sample"]' * Hostname was NOT found in DNS cache * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 8091 (#0) * Server auth using Basic with user 'Administrator' > POST /sampleBuckets/install HTTP/1.1 > Authorization: Basic QWRtaW5pc3RyYXRvcjpwYXNzd29yZA== > User-Agent: curl/7.37.1 > Host: 192.168.99.100:8091 > Accept: */* > Content-Length: 17 > Content-Type: application/x-www-form-urlencoded > * upload completely sent off: 17 out of 17 bytes < HTTP/1.1 202 Accepted * Server Couchbase Server is not blacklisted < Server: Couchbase Server < Pragma: no-cache < Date: Wed, 25 Nov 2015 22:51:51 GMT < Content-Type: application/json < Content-Length: 2 < Cache-Control: no-cache < * Connection #0 to host 192.168.99.100 left intact [] ``` The command shows an HTTP POST request to the REST endpoint `/sampleBuckets/install`. The name of the sample bucket is passed in the request. Congratulations, you are now running a Couchbase container, fully configured using the REST API. ## Query Couchbase using CBQ [CBQ](http://developer.couchbase.com/documentation/server/4.1/cli/cbq-tool.html), short for Couchbase Query, is a CLI tool that allows to create, read, update, and delete JSON documents on a Couchbase server. This tool is installed as part of the Couchbase Docker image. Run CBQ tool: ``` docker run -it --link db:db couchbase cbq --engine http://db:8093 Couchbase query shell connected to http://db:8093/ . Type Ctrl-D to exit. cbq> ``` `--engine` parameter to CBQ allows to specify the Couchbase server host and port running on the Docker host. For host, typically the host name or IP address of the host where Couchbase server is running is provided. In this case, the container name used when starting the container, `db`, can be used. `8093` port listens for all incoming queries. Couchbase allows to query JSON documents using [N1QL](http://developer.couchbase.com/documentation/server/4.1/n1ql/n1ql-language-reference/index.html). N1QL is a comprehensive, declarative query language that brings SQL-like query capabilities to JSON documents. Query the database by running a N1QL query: ``` cbq> select * from `travel-sample` limit 1; { "requestID": "97816771-3c25-4a1d-9ea8-eb6ad8a51919", "signature": { "*": "*" }, "results": [ { "travel-sample": { "callsign": "MILE-AIR", "country": "United States", "iata": "Q5", "icao": "MLA", "id": 10, "name": "40-Mile Air", "type": "airline" } } ], "status": "success", "metrics": { "elapsedTime": "60.872423ms", "executionTime": "60.792258ms", "resultCount": 1, "resultSize": 300 } } ``` ## Couchbase Web Console [Couchbase Web Console](http://developer.couchbase.com/documentation/server/4.1/admin/ui-intro.html) is a console that allows to manage a Couchbase instance. It can be seen at: http://192.168.99.100:8091/ Make sure to replace the IP address with the IP address of your Docker Machine or `localhost` if Docker is running locally. ![Couchbase Web Console](couchbase/web-console.png) docker-1.10.3/docs/examples/couchbase/000077500000000000000000000000001267010174400175475ustar00rootroot00000000000000docker-1.10.3/docs/examples/couchbase/web-console.png000066400000000000000000004750421267010174400225060ustar00rootroot00000000000000PNG  IHDRT A iCCPICC ProfileHXSBB D@JM^wA@:IPB;ł]Qp-"` "*+bʛ{ͽ9̝@ў%dJd󄑁>D& P.+X~RkQps 9,5aNh7'[ȪB("Y̩Rs%>ё SY,a* |v*̣ l@`8{ OʆHl]˙q3Ԏ'CV|Ǒ^((F?c e2belJb? q0;RGq78Ts%?3lw!Ù/HD[ya wfDEb͆ߦ">zVX0+6KAW^Zt4Ƈip0#ӌ)-dF]H8cGrb855B{'ȋjq |` ^` %m ,  ,e8I ^@ HZ ڿ[WK"i͗DdpMwC V[wc*=O#f:Pu&BO۷HSB1&pV.X!g$^L0 Yan U;>; \X'޸'~P4X<(++8T$1wcā=1 ;]Na v촘gL{ZD[󱮱Og%qo`_d.3϶Ĵq@}~>0$mq-hLfcp)wlZNH/ g(•t0} x0hfQOYP\,ŠvU08 )p\WA' ù^! BBh@t#E E"$ IEY,GJ vd/RD!..ҋ O(RQUT5F'Ψ7FT4-@5V=֣ЫM}cf9cX8`Bl1VaX-u>D3qK8?v [x/>%Z +!OH%%''# b:qq5q'L"I$ɂN 'Hyb6!YR7,O%ےȉd>\F>H>C&?#)ɹʅq˭/$wM_nL1S)eZyyy}y<[_HUS}3"j%zFӼh<Z5<]J!XD\^[ᥢl2c䔌|XJʕN*VV+(+g)V>|Y IX_ROJ}lr~z*QD5X5]TjꐚZMО=;aՄ ޫOTR窗שTXѠP4ל9Ws&'L<:e@kVְv@{yAN&3:t]]&ݳ2՘LVf+sHOK/HOWCoDD?FPN `A4Å5䌜Ҍ7613^a`D$ؤ)4Ǵ,lY9j`f^n~pYD2?bmKeee*ԪЪdɉOn:z}66M6mmٶ7hvvK^[swq;LsXQX8d鶳sjK.%.\>:u-)&SSOswgu`z$ydyVx>2xzm}{_WE~___vG5C A!Ank:M]45=qy0i:m괍Axp&9M'N^>iMȶ(zԜQ}Fߏ1Ŵ*Ό}!'~r DRbl360xY&ͺ<[svsK"$%L gUw$}[/8^M;wY{ʆS<yWAggTffeeNUly] A'5gsΐ0Dx ɝۘ :"SO|scyJ~eOUdUkSuAkkQ:n[Ǩ+=kүm9|''JC i = ]'lirk:oNV; Lљѳg̓Ri>B.\p|wKN]v|󕆫WOǎkN;]:t>w7 u+֝3oy~7{F/}@xPPa#GQsׯq}Or|/zJ{ZLYs:g ⿔n%|5z7o߶ G ?zn}U?}ldg_̾4} `4ktT$[ V4%ו( 3?&)TzPGd*^bg+E‡7"9:e?{O\pg:_˿l3:s pHYs%%IR$iTXtXML:com.adobe.xmp 2132 1312 {lIDATxw]UٷyoEIBf&TifEE JOPQA *bi"tjz{s9m |k}du_Μk'\Îm<ꄦOm8嬆>VSL5LB~+s}SVǏ}bcszLg/yo}ޚ4,s?pБ ]?q'N:55$XⓟJ]zEug}sB[ m}suvzpcrxxòP~; ߁ů E#|̷|)}`]k?öx|;jq[O=+M窕2:\#)\s=s?u`fa.ܰ8a|78Yaa۶6#%jj :fKUM#7#jC~لɽD[U џHT׿;Dv 0m[*?`īRGKTl23UGTQfQg_ϛ@~^G߆ݷFͳ=[*1v֪ë7 XXSUv#kwR7J ?`kvYXvW^U7v6}Txcg>UU|X)FԮYq[R޶ ՝~1pH+ڮv54Ccjq/t}][pv7lv֭w;7nsp_e; _Y:'O}~}~ˋ_xJ7]l*ڶ}ےWm۾}H^LO;N|5Vvƞ}n=)V.lqc4iIgN?{ec{SGWm;N>6 9k@~,#WyK'4}/-@AT瞟dZiiuwځDbLhުhdÆM<>0wlmkkhhxθ`fy|RWg{ѧ3GϿ0cw\i-|dg`շwW5;#Qva驞®JSNjs|7A~oǖ=`ÕWmUJG|xcD̠䏱mlB୊t ?LioB$-_~b3^h{ڬhnj7 ~)z)yXhV٠+JyvǖXY4`μz'yc}'>C_]ƔH9|!qc[2y|秥.rߔ% 9)O\>#DG׏oʿMǦ*꣸>1\~(_eXjhݙ]r?~ m{ /_A^>\//ziy_ u+lc܂Mg^ʏ$*Z6"HP# %9xdc5m׮s}깄Y㛲G/kCf=iO~L@~kv]ssķ^~l GԬ!Q}̇r>^#k_Uc]6LCeGVYtيWTj] ~o̲ %wR4yiUc[m ]ݲuQ=xBi.VHHhr:/(Uk̵CCQjUTL ~r:J}2nծ{"NW["1YQo77-'7i֟=w;E+N|*[6/a|>wBZ`R~[s/-XT657&_vpɏ<5ҳiL:Vɱ+7`-UĿ褺E[bD}eQv>f5m1| ]uDfYKJǤʤ?'tsxt~h>m=MY G=Mڼ2*Ljw7GڊhʫrV ڊ\?毨I^Q_n*27 ?T[򧟛=Gw‹,v,i{o>/m;n}Uڒ^G1JԾzqk/B~v/1SEm UQfĦ>":{ia(7"lkNml꽹pPv\fUlƾnZU*?Xew$Lh>)[;]`Sfϻ5*&YL%% q;ITeYz/{3fu;$S{ѨgT~)7k,˖4'Zf|u׫$sR$?&U|_ONl0 L۲ʊΚ .+SbJY_b13~|BmCNC&2G-=\,ǃG;x]RIO~ga{w?s7Zz=];9\SkZ_PBc{OtqWJr>d>Q5/}/7mcLծ^^u!ݞ/~Dc@".GQaݺ ~>j(3f "~GC}M4Qk~٥gf^W|Y5WarXyg5E0|}ţMZ۲W&"_ëI]+:pM+yrǝWQcVv]0if9{MŪl%鳂=}\3F[m͚Eό.8S;-{(TiYeQ*?.x.a Xae+-JUj[c~\+?cg!*{ ?`7ʏ2#{];z Y1AaE _;o^,7{sgwΚ1{N'hm]Cc׎>|dڥUA·=<1K ɻD~T2__(HX~c[,cn|~' [0F(]&Fu^lӈm>'FϸǷjߗ_/ I%v]; *fS`} W80ON} a?3}  kJQoJpO1k [ocvL jZRwW%n=.c}ff͝?@R:5)JXYwIvnҌǘk걭y^ZzPZu:,4ևYui3s b&98sG~G\/v\|y%Lt_a;/{\W_mW|+[.҉M\x _^e_ >37qڃWW2#}Q#v{lݳ<;J}zaf/8ӌlgw9ٽ?)l}s~S0VvU('m/E-ӿ ==j`a3?&:ex;ɏ63bLf}T=$S{pv!1zo:@|dǶ|dAG4tXA5x?%<QV CtwM*6_x7ु#$?oB~6.BJRDhewFZ[#?lBѩsXԫRP{W6mvC^KoL(jRt̎2qwR5*XvO*exx}[|+1"v=Gvt~N޸hw˟1o{r$$׷KrXOCЍx0ƣۅJYb'Wֹ- @~^~1ƐȎ8ܿeԘ#4S?r9f1F:rFlY~Dͺ5*uorjdᙏ0352 =,5%#?ltUT}UK[Fx" %C`z2A#?= xsNVU6~T78= + :L.Qt;E;cR=-^+(Ur.ҝNw44糃\ᛎm~OڃZL}rZ[[Mjc]zzL+='vak֮s5<>=gwj0YC+Ttu|Z ,].ޥd'?'et5s(vv;#x ^V=ÅvڲrVʈ ?̀BDbc}yG$P~xcv(8~ik>n Gѩaa|lcx&*|^뙏|Tycو(c>bԘyY8v܊K'!?ZuG 4/Oب E#M9sƩ dwXE> [ʽ{ɏlv^{jǝJM$K~lڼ,UQ"%0fo0خECf'!;wwkUvL&|1K2c eqa.ao^1"<ckǩY LȂ17ntM"a儌ESGw%`k2dLr%t"N/?Vvy-ԏS]Ɍ[yc[z(~7;/0زWݠ=ܜ|T{|Tl6f0/CccKKr><ᛏ,8j&Q -{%+V^%=.ZbC=2 UWWདྷ8NnW+2NjjXݼ|zs u.~Ͷ y#e C!D ^i4u-m1o~yNh]ߘ`Rl#&?~|SԳwhٷRGU*| 4zp0Lr?0l·7"?hT`b}sSI;Ɔbm[$W*N!`굁lkNd_OF&EʏC?.4QmȂ 0r}(?)mXx5]7╽7M{}#>q}Ss>jW՞0œLj|,G풑ՋGV+o|(ca1bc+&?_VӦͥq^O V4+3 SGQ[QZ0a&QSev78kymw3Dmkh U,MDUn&Şk|WtwZɘuu6H:P3}miz TL{\nBO?B#%+ K&EwYIBS}+?^1h*%(sh6=”!S*)K;ٙM7{~Uc=52)n}nN3@VOm淿&OnzrQ0I|s~C SG@~=P}ܩuz!li^<{V}Ry֭}˽S%l e](R27I~[~"{oh>F7뛏VS77U^χR(ZX27jpd}2H~;qUW'zn`Ϥ-ݦV,yu٦ ƕ;Q[~%)岛nUd2;Vr5^uؤijIo{CCyGy +ͣ3ٴ."J-]R{ {-Q)'+H$+KȂ |WWG%+\\.{۸q(W~\Fk]c󹲧Lzi'gHQi5zlxNdwTJ@DM;>1s֙|'f IƎUHfstvM˫ -=o`׫M/5t0F:gM[g{Qx'PMwf=:7:7Xp>Zٯv0oQ#;o W0>&#m+6c17FYp҇^󣆩Ow5$o_lpwqyQwwK\/Gtix>3cvsv*_tw?ܛ$?ZV7979 ^^G,irn ^YQ0|(c/?q֖6B 0_ emi\իِ~zkA  mA TNʦ ˗uw#~(WU}9~9^0cY}| Z}X=W.~Ҥd$SFۣf Rc m(֙SJqN?taܮo}ӲWoJ·)x5SR·)xU)c1sه]|g6e2;C 6fΙڤO1g{vvl/zߐ&!ΙZwMj ?m_;rTv5}GgBX!k1lt˰ëW7V}U}΍>60f}Uڵ#jW]5vȰWG־2f(ڗF.5f1 3o?V3ua9~]5>Bwc{hۼ纮)gsG}PV#'UH{H}*ct۰ѭë=1FaxMTUMΔ֌]=+{e~]6v|Xs>?c3>?O=;Yg?|h'WOOI04? '?a|ם0~㷝8~㷜8~8M'uǟ3WvZqYO;{.;#ȫgs[z|W++&5un% o/N;ٞ'yў>z{wMݷ_t֎[~~Oo%-nΊoolnlG7oon񖆛?5[S"y/S!w&ߥ~g??sCmx'h2.[ڱeKOkˎn# پ#ПLoߪX}6֮)Zշ|y߲%ܷhQa¼s̞3kNό=gvO=}F׋:_xCcFnYs[_ hQ,ɿJ~kWw_߹qcז-]uۻ=͍tw' 3_xгsGWkuhgw2;tMvdG[֖&(wc(T@c &ޖBkKoksn-d2lMr\GCΣkG@~ ?@~ ?xWeĆVZn۶scwz{m-[lZz߸a}cCjGb'=nяx~iΎ|_|4-^]wy?_<3i|@~ {.]-͍[6ox%/pyiт%K^^r2<Bot]ӧӟwǞ|I_ճfNfvm۾'ƍw1\psjkm@~ {(=ݝ*4s;{%7|_>Os'\o뗿\|j@lѫ!瞣kQJNENG{N{>餓;L1)*~œ`8ِۦ?OI%7T> O??F,w]o# _|٥ >|r8yoM j5moC1ǎ9BrS@~ {۶n~1cjR9?>ySvA~Ea&4>}Ad9 vP^'b4f˗/Հ ukhНwA~{NҢ Etwm-CECB&ÌQNƴ_P]~_y4&:B֯1}D5/I?яw>w_տeV!"0M+5r?c~}ǯ]z$7td]~Ъ4?|=31 Dۥ#i/\ .!Z>h5i@:kͺC>XHra[7^*D'5A V:֩NWйcΨ]:ӯZժ2; g#[nV-IWثS+S~)3*WFȩڵG'!ie(D,6CEǎRW A, }--U ڍDTЄ j H. .嫞20 e\e3 QO~MD JP~G&B*i%}45i7'ЏF-(lP)W~hytа>HHE̝3,Yn+?T"Ly{ޣcP0eOu 9RN䇪{%oa~"%z;ik6L^('sgeq<6rNʘ0%Q5-u.M>l0WG]H)IH[R("+'@Pڟ,ʇP)'k0u9y׿5|C4&P[j|%Uh6Vyr'AD* HR?b]BB%d/iWN%R &JgR UP*#xt&tN0gԕJhbԽ\utuQMyH'0/HvjX_gN= 1P^@az RRdW<]JPʟPD^)ʖPD^CdSobQD^eh|.%:~:Ul߶E9Vic)DWTN腌֠/0:AgP)O󹔀xާ RRJ1Y0ret.\EI%Ɛr8TKGBNHS=+G>F̥1.b(3לTe{RZ4]*qiuR 3 u m~;7{"ѥiԚE+\v)EjD-@~ )Sz}CW_U^I@tq ˷A}MtP₄Sn W_C *SVDA<ЃЩ\F9f̊\I%ѩ4HqGRḺ W]]LTJWdUZ5^]%-T$\f͜K.䇚wtpdSL&t_t'*:-Mk~kt{=ѵE);GILzI)GD ȏ! SJ5W:XYR1%UFID(BE=\Q~?񉏫)"/(Bx|b\R(Բ”۲Y#ʨU{QH9ahIJжrtR-kTI~ĖRY}SU- ex1 Wq_TihL+9TJVVbuhMV.)PYGWweZRÏ?,er^BQvERDSvK[RٸLCFlۺYwzJ0Ci_TǩH9/V+p0pۭN0C-`u{Pi&M+uPֈr,1Lr<0lG%!=`WvO(DHF~R5uP~9V^rn9:NJQ6VvD;t) ]f0QEr-2.#QĒinZJQsu1IujI&勨̍&#}S HSD. ] .ocH܊+nzT2PJPNU'UR,/]EU2{ VTx wA%^PrGa5&Q5p+n F~e4_:dj#/iai."2pRR[JI~hUJ х$i6E :MöUׄ21?-v?M-wmL[{@~P_ K< )P&>UyBj8WNTjԈb[%ȻlR&&bL# ƚ8AsJȋ[m(UsR(储o&u׎;VC3O|\+QZ?6z_%0܉b !- yiɃ7W~(B#M?LTc$K{!ڒ˔ռ"Ҷ]~B+媬0=?d C_dt:P瞻奔rqǙyT+LI3*E{ӵ\H6lȏJP%%i s[Q{UXs^S?Oq?V^ͺW΁F̞5Cg~`@,\֓BjwOWNfVs:S6oڨpuka7TIjA(dVT JQ{ӏD9%TK*|#.T5XsjĀĉKNI4HNȗLWHZa(CE+ZGa [p W\. u(8WC~B 7JxT4uw^shtw|Җ@57E'R.#c5jl W-2ݎSO=[ۤeZDTL@@~ 5TLIի$ *P я(A+cC>@s*dZ;Dr*F(CCAˈZ Nc5@FAAq|l$8/B'QR gq"e#dD~!!;C]vf\:RO̕,H)B tőUkD :Pkv^tJQ,hl:5ٖ8@ 4Kh͞!'j=Ȣͣ:(_PCm'jWB*3%7`0EЏJ5P0] CWB*L:VyyC;" y.^Hiye`%j!"Z:PgNйt"-C1zn:Q'ȵ(%B{+Ԫ "r-*Bu_-;JPk j, K*% !;}!rt"q55cJJQ)ժ%+ѧZkͣi3u:5Gr?V, #>,mG^ )H_BE=zD%Ze(ͬtZGJF굦ַ~C(F KL}Z/"1(tn12@azQP|u+V,SWVPt^AvE핽e9JQm*_t^eo(GDjDyW^ *ߤ[pG˅pHϨr5+2H?o|ZLDgךͣ%RGRf=3V~(Ak~tQօ=LgrUwj#GSD:TZBs*Ec{Cg!Y9."ҵS-R.4^FDU^ʨ{.D^X}+_6FJuN F^B`ifOpZt;yc9Jkoug=WR1QNc\wݵ%S +~"r*$%aм26rGԽ\t+n%|_fd*ȋ(4WX_DQ{՛RX_r"1áܑ-$ uP#?HW,j@mJWsL0i7ݨ%Fbu:YGF5~W?v퀊Vi=\.j r{J{B+/0UC9錪ע]Qy$04RCL{y~T_ ]6V4BHQ9/RC{]>=(?v Z򣿯`Ȁ%? =C -Q2 ?`hɏޞ.!20dا`ȰOgG`ȰOG{`Ȁ%?Y!>胡]> }bGG1 nNg :ⵄY SAlTZs|K8fp; vJՅ}G;UC{jo^O?E fcYc^})_ʮ~ {ݝrk8'0>a a,=># 浡xC1y ~c³T< ?s0,ddxj~+a޹c ggû+{4U>4y2-==$ZC g?ֿn2-YgubSnp;O|OnR }@)vz:~wzz;{1=!2=z{=B?:K!:НM Nm/3wc5kpožnzcQUc~L^xo{d*jwǺ}zbޅpwyφ?s{EF{<xs:g|ѭ{W~pv3[;ܻ{bg7OCpu؈p vǽf.uwO{w~ 1{Xpl{:w)t+ehEtf#wJwvdãSovgqgO; ~kfwz2LN] wtY[=2{"c9w39y ~(LvU|LC?vįo!5"~Fwl:>kwϏ:fs~; IwF5 sKI'9 :[;>ўlhhɷFw̘|4-3hGc[,6N93CΟYg8Y\G#\h}i):K0Ɵ0XU..wS|C8sw}h;Ok%w-!ѳ7&K7*М]Qx&\Q\0cOk]UrDŎ_[,ÿhop{6Xa'嚂W1k o.ޫ|kq];F$ C0V۔4\#ac j^(6 zwyrY h?PkFF5K4|8U>͜ktLhBc3M"#4?;ʬ$U;;Ma GMHs]X<ͮ}5Zk֖M腙_gٌh `]IC 1f7b͉tC~LD."s"!|sI2OcWqw̉{ݫ+~Vv%Y ͪrd2ﵻW 1V/ 6ػzjsR'$ ( k>>ٴ$l^;a<&}ZX{pp&ÏgRYUL p-z'D {Ґ-3mѹ`sX\dڒLᱻO8s5jSJKk =*>?3f]Tו%w_72'?*aAڌO[m~t[i)w/=W}=JŞp%vocgq'~s$]1lOk2]c8ᾟ򟥔Ut2T6~wo1NCl[<@S3A  RX6\M^:2M凑%?ēEwW~EðsT<͞ۼ798 W*._'"J‸߹Rwa?_3OÍ J:dřQ i]H|4Dz#? v[ʏŕ c0/Q `Hp-j-Y#P•R#Bb͹goc+I tvJ93ǧIw`(uR7"1aNw[ h$J'p@N%p?u0`G0wgW DYC oQ~/- Gf|ҥ8#cGf#?jXȩxf0}GcmsI w&?3$W p/ŮX2#bU #Ĺ W~7$U\ cob'`/y2x%8>~s@C׮;~ pK6{6u<&;NLVG){2WX\xG16C"ȇh"܍`Fc"Qfmq #=wa(!6Y1L'obz.|W}+[&+EMݭmeIMS'Z넑trT}`,ZH5e)ׄ)_1"!H(-rd*@$mξו|L @tGlQ/$Z(B=(eZ)+ WH8l0{Ӫ'%YGVJ-1lGkX)jWedJfZ"k IrdrLmna(wTt+(j ʶi{~BYeXsYY+z*-:lO~6'ƚ6p YfLyT.,5ۂ'`hx^!& aPh2+ A6ݶճfuGPA~AyOK_E ?\e 2Z=(({TԁLcG[*SqU)ʏX O~$wY~W7cc3 +9]@L1I@~ 5at@7L'?ln(bŬ9 ruZ;eTT)~l,$46Qڃ8㒦FI|_3*֩}cڋUG(6{l('& sMR9o= aMP*>1c# U%ҭnh>f&ޖ#v|/!V2iLM`|ɰȘ)<ʏTb[[=\J@vRd2HQ %!ERGuqt;a.H2̽pC)B Ӗ<~)eotQ( cN@|X)=] .ΧNQÈ)D&21}1FA#"bw*dF2< 3BtMV>+vZI+{/#?F3&tePo6aSFai,/Ry!~n鄑ZX;_/'?nȑq/ji]~pN۠F,l +kb$?c_HZF~WtL!$+qD!W)&H~l1؜@D#ʏH8>lD\~xzi|ҹr#lSsY9a2KLIɏ< ?!jImnP>hF =Ep!fad] Aڦ5gT@`aBƼߛ=A[(U %JPx*M蒦뱕}& 'd&Ahg~"ڙd?~$a+_ll6Ӣmx R( oSs@}`3:0o s+rP=<"jZ -nvS4'foTMTm5 2-mIkwC^~$M]&G~};`A  G*j6| Z~ȏxD~߅w[GXƝM "*ʏt!2tP=r_~Jvfϸ-C;n}`ʒRVy qf6 '-QFK 'w dGN})G~8-#?f~FHeGW2XnϘAqŒvV5\I+G n,ކMyxZM+z}>n[j)#v;jo\23"yඪ}=8cnfA: ; gẽTQlqXvtvّМĞy&0GNPKs%*ddN&_ npsO~mr-r&g}|ᖷ sP¿B5bB zU @~4{ZRNT8]Av#?ڊG5Nɢ -VV*?G=4#xJG|m_Aɏ0O(}>b=PV~Dd\G(jȏ`m\qrDGґ$-6omA DJ#*rÝ)Y%򣷻 ~ DNӮ hK= RZ4*@jTL82#9<6t1qۏsUݶS1a[]y}ت:Lذ=\~KOmސDoz?:đFez-}5JZp;d*γmbfn*0L`2|mUP2*{]JWtDqI+F3l['2e+_B2ʝ1ލÿ/&&| f.o= ^f6HW]US+ޅϪ'ſ7Z#n*/7[F~8VG, $-'?8%ȏ6c̕{&gTGxv۰=iˋE#L $?L~XD~C=V(?q$?ť$Tpk%3ќ ɏ67 dسDʹ(#?пia-d;I/"XnJ#cG[ex&*ȏts" %i *b /'?T}5b)S@~ۍlk  J0!4Qґ Z[Pl2Z%iHea2Rܔ+ iǵ8$X@HUUjb2& a eظ'I*LULb:]w6HH9},2VS3#<2E)mplD"^I4-NeI*+ﱜXS;HX[X mWIMbIP*5wŌn྘3:Z命091m~6y luxyOr RGkQ$ ?`Hȏ(!P=#U$?JsJ2iC#icMlita!B,#?e]•MnnGi*_{GًGs N_[)s%ޚ;R,?n(eG}]Uɏ=`R;X͌qTeF&eGoGX$͓~V ?Z eGҕZpj]1ctɀ eCSɘ' Bv\6cFzӃ0}U~٥Dqg}lC‘AIL ߗW&PUk˸4J y9l3CMiͱH-UdVQͫTQ`bW&Wl7XeZ{;ڎ:xg揙^k Yme˔%YdELJVdʢH1S4)D*b@H"@<{+\u} xIsx\znSNQXe2I.ZjkN``CLX}@?|v8A -'ծ.˝Ub~6FsicJ؏ӘCL2Z*𘳩>\!J5ZfS'}wia1(17'Y,bX,bX~.a$**CyfnBP!e6*cI? P)#G|PQuGo}a2Ҷm)%=GeZy5M^92U*~(,@ !+$*mrv9.6#)rhWA/g6=6~P4B]CgPaL$9M!@&~d[~TH0`X,bX,b]6_fU٫N5^Qv&&B^OjLi_ 7T[s5(B[ɔҪ:%>m^^eɺXϸbPSSSLIU$E'FXb"jcf5lJMCʅ9\UZķ1מ(miѦ7Dk_!6G7v :a-J#z׺$3䈸D#KiD*%clN/i"jnv{.㽩$@,bX,bX,`>Ql?tL~T=AC=㫑Mb @ !xSRWOA D5 qp*9-Jr?)ՎilikW L @؅~?ڡ_ ?*AsQu@@ CG}bX,bX,uYόf:&dhMMbM.Ri[r1:^D7TcyDzZ*6O@Vl[(b!bm cQѦX:ñnN·'@:+bYӄ4pRG&(#.nxJRh/9f~d'DmC~(yg F;u| PŵWm𑶄Nɸ~1~(Z0 1#~ GLtѕ~X,bX,b.?/U RG;1A8a8-ڣ=hԂI\uPڝL7:HʮI.d\㤭Ba,_+۞_ZJ|'eh02Q&\ckpLU%xh)d*DUt6DzrXnWN*w B{̌]?ge S* ?!vجx$2.$[sv]2@Hcsb2f%L ")"Gm_eKliMbX,bX,uyb$T?#xKa5jV|<艧^yh\?Bp"^ @C tAfߟ/arM/9#I'mbR/h?+Ҋ~PXB5HPЂ^C+p$} -@>>YA!*xt|!F sbLᇽ9tjԦL5b1U[֪JIb.ᕱ7Ð6ba 5b;o-c,e ( aFf*Pkq,h4;RZ!mrTJ)6cZ!"DJN+Y$ 2( *T\0B> W*lY04w>ФG̲ |p ',bX,bXA ?f%;CGr3<A2-gᮝ6`F3:^.uOlzC_omk`(,Vn&J+D.TG1Ӧhm\(=G֏$ ?R?"9"05bMb#SɃ2@G~h(Ei,8Y"w2?ğ#?|7h,=ԯB p=\;ʟKqY%l ?<⛗ĦT?\s7SQ[#2Y}] ?X,bX,bXUʂH]!,ZŠ{qʹFS;-%uӯyݾK -?pDXXƴm+bǭD#~-Up *WkdžɕQd[:&D72CܥuLSZ\bX,bX,uy&#a |k`܍Vx4yl}m+now.|C+eçtG-O%~t qzm!|n=t<_1#ZXo ? B~NS #jƞ?Нf a26 00yC)~h|BG2MPaJihό5lޏv?M>~D9CKbabX,bX,?}6ie0FMƱA^AɁ k|ܿ{8_=[7΅_n_V|qF&r/")0I9}54dh[}D.o&@0D庵@Yj?MYE0+EIpM@S@WjyUX)m؃˳G5T ׷h`N|&%Tː{@Ji1l.:]>v'(HLF 8B\l"Л~8#c pȕ LFmr5(0۶i.lv,c};l˶eZ0#07%$neͱŝ|ogX,bHjvAGnwbۄŚ\DpYk)ul ĎÏz|ttylх7΅+ _HNG ? yPBd0%_U3,s{wwn/߶|<0H%~DYAD?GC!t#8T#p{^1+pf_S}eWl{/{w>sumH?xgefλ*Srڽt;9~Zc}@}pr|nq"6yPwQ8pA5Sa]z#)쩵Ƹ̶1# ?a/z<<޸,{xǚ ;ww o_9Z:/9;p{)7DFӗ}SA+`'Ś~pl ulBgq9OoL·oH! FSD:B`Y}҆EtئTc".<FGC=W-}ДS>W |o uqp'aSRI3u\y*ַzuN-d#Et.}j7 d׌K8ʽ[R STJ[Ʋ[|Nb>FwiPbFP$(`sq-)F#͇ z9}׬+)?'O3iḲx#r7 Q]<V]GԖ䢽}ݱڻ|E1k޾|…Ͼ8yRݽ?wUԶ^4yfO˫~nYp9֭;g_.KKgc/zx9O>6oYl}ҝjS;,~T\hlU|ݼ,DÏR-‘ ?h#~،"qvzR+9#nml*(zR߫֔+x~ ?| ?$H|qV}">֐&:$~ e*F#z3JOڼ$,Vt>y.o{O3{hU|W?ͯy/V$AMo(8Co݆'*'& KmPyl}:yqOJ/0y `Ex{d(.6݇Gօ=f|Ņ1E5ص[)3 p%fO0 q_Q,?0XPRxC ɹ «NpMiɸ\R|OXjQnkl \oJNLf!"? [)MvF!2 Xy)c. N"D u^~Xkİm Cx DѲc|4rR[ckMK&\9 mmvwe}wǼf_Y.8oZ\:}f/(wkG^jU7u-<΅CMOo@.=ą5b^]cas=Nd~tIg_'fFH?6 u1&zhgYnOz93g _yPp(B7qΊjͮ - \Z0|NA JX ?f 9kp9Sj ~|)Ԡ ıX ?.;!Cv*5<A|] y0ͽ0az88|m??o9yw}kŐ>6JԏHLQ*?՝4xT_{\5{VwUyd~؜k]^mM)!kdm̕ӵˣǸ b )+'4[eH)ULήڮG$QY#R M+N8co41ص8մ]}7i.4}dTEH@-񤸎D -=Tܟ{ӼPWyRPA}[*KkD4fn8@,*E"S+l#(uN.ul?- E:/Bhp> xx-obX|R/9 u3`Wfp}A"R7v4I .0Cc 1ya;煇E49zZw?agqt5^+{yͷx3~s W!wo&]A~\՘Pµ#Ms [9|lbC?j6꜏;ˮ G ?RRv5I*#iOF&2Pʪ67kJ=GPD|0@Fj@- Wi.8PV~{v]{QdAgHq("K|:44"pb#/I?-W֑H^ëVV00ThǵJ&)v.?%=PB*EBNć GGg7/a~.ߤ,.֞ /,7"@ώ:#6e<͉Ɂ:Q&ovU{,yj*:ȑ$@[(s’f4{Jձ»-W 43'7F_Ӽ쭴J.[*C^!Kٷ~XDۖ A6Vtڧ ~<)C3GU v {wu„K&W=tXxBցVwH#DL܄as;XRW{Rm/솷&m'~lpttC9y2`:0]*%*u5}`Md3Zܣy%D?[JUWɖ:Uw>Cx.@ .C\-77O{d+dGF:|z=l'e Ȁuf!̉'la+:FUjWrY TiaWZ.Pz~*[>*;L8;P@e$bݰKΡEp<~z۽O`[t#!:Z?|( }Bq|x$q8lnXNGUi7W.mt01B=|P8Po.ɧ7IJ8Q vv-#:'F%;lN:|M!I4:'hGL:yp@WK>NV9dVX Έp'7Rt;e0WF UP*( Bt1׹%-+{ea fѕjJ̫Hy$k'6XD A=P֟7ǠIU޵2N<D54wE#iH W'r3?m>tJLFK:)h[qZ9|J;ĻvN/}g*9 Ğ2_~ۙWT'p]٧ s@!ݽ-|?ae잷'!'z =qiRXʽ:O6Sc@}j)wEx ϳ]+0agsOٰ3U4L[Dz$W N^ 9 c\ɉ'pwcfӇ O0\tUn9` AݻVjw(us+FAQ*KGG/6}K~i|R훟;[+n~f6I 2a4GZfbp(n–ԺݺѮj"2\V\"$ ? oH0L `*E ?޺ttJqr}jCIxHFΒTVP.xϕ;YZ=GnEx4CC_g󞕣iHeAV  @#7eG?$Iw($.['xG~SS&z@ԉȉ=K`Qmq-(YD!%?bbWLTy\}}\0Wud_T`S HN!Zhf)`>boD^XMӁO B{Bs=ėy`7>IkttZM ֌`#p_Oy3/r"9tX{ bES\>F[>Oj6%XN{ ~~5ƗLoAwPF7*?o[ }Ƽp(+nGUkz]#3A. #- ?u,#ڤ+4op_" 3ӂrÏ50)pr ?zV y/pZz?wszT# RkU4Չ`xbz9iWAkWX^xwu;[w,w-7O {O~ }K~l×& :m,Id,ݺϤIL01 Dh!r Y#r5t 0wDJ?r0S7_|L#J/Iܸ|s#"+||GG[ܻ+Ѐ]Y|,zPŃ),VC':BY;J-AQ=Lehq}F%2l-p|爂J("7m-gVL9vh^4򺓁26i+Mts9Wl4>e&z`,`f$2)I\1vt=Z8|@iȐ@P 1}IPQ+r$ő@5MOb03Af|P@3$Ag߈Ԁߙfm;G]38 NaM'zA4iuCYMq ڇDEwDmh:NVÛ;;J5$N]cg*xYSpNamtb))V^$A A;~LF+rX2P<6,Y}gGm /-,Ӡ2!Țx\Ohy,y.k]',dџS>/Uxw^D*iu)^pG{xb.b,$YH&Ġs?;W02?B{f?&fteG~֣G% wi,(s؃qVQJ߹Ёκ7x~OIE/tMFQn޽5/c`xbzu (5B9cO_W co i};k>=w-Kvۮs]i U9 ߹I9\Sc=L L̙0- ?b?TW5ows>L۶، ?d"S,ehs׺SRYD\nr%)TݷR-~i(4$~r):@?L0CѶ-=p 7+O4xBtFSq6'D JԊD`'Հtrj>Mڪ]T PBƵ`b 4 `{An`Iar7s^)8lŨ-WS:MALTpngAct*̓&EǬj'!8: ?t` -mNHI03 K}͛A` /;n]}9Xqn$Kù.1*w04=}h b/ gJ46PUmihM ^V/t+jƮ7bsh8d6dSXE251iͫ9jBc'Vqs:Y;>py{X.d~x?mZc=a=,-?dM= N~k|,HW#Qw Ewr̺Rz ~HJf<*a1<:KJMLYBWNw@˼Ҕ@~YP~'|Nf~a^ t-(Z ?xbz9F/ m.*|TUhQRy͑='z#^yÓZ8~\vۊ}϶fƿZ;Rc/2_@*,֜cOm0ա@UE~wzdĝq_ըd-3?F?+!(T]#$LG"qc|@@j(j?T1( `2=U##n6NZ,I ѺSvǾ ojlU ְzTTNU3JIݣ!߲ Ḿo *QC0/a PVK۪$9&~Mii$XMG]g'NK*-Jgg`хT_mnaKԐEDŽFvu=Q+wĄ˗W:]B4zCsu~~$GC09~\eP S<@0"ZOep"9Jͫ[zNKG׶F+1JG(wkCd ܊z2Fhٻ& )4/hξ? >-A3NAN6Py!~)(7-A'۾/%L3nCPQɜٝ~CJd~jdd f+Giͫih|!bi5 s閴).N]0[c&WTLVBX8B>a\ē+rt7]AW~!Ad6'I 3SD X 9zv3tmjBa|[Y%?^`e ^]@Hƃ dFxj~~ㆧ~SVOra|>l0L; ʀucX(>iSw̏7>#0qb Iif?<" ߇GyYCvq.A'@pQli7# 6C٫UD'חT-D2ہH]W?B|[RLY*TnwmuSA7+V t׫ܵ'ª`&(ToaÕ<b}UvY>pNZZ)q(Tc),o.ou+K A''Mcyh{D=.Yc 3QJF[yK9*oeNkKSDV$ ڄ 4eIqX3a a/no,%TJ3q g6_=l#A>om k_?^A8$oQ]_iCoZ"~x zi:^8|ޝpJNnyvނB(P q<|O_6YbNڇ<Ű؆R261Ώ>]ljg_h;݃oAavӟ{uώ4n^zځ9@v*S-jݻFk~4\>rЁtO3gg|g˽ =>ՙoZYr !]LǏ5HҞ^x`GŞ6Dlh)}x@Vt0~ݙfmv,=E qcUB>aQ[4{_cg lc޴2S$zozkHC/vh lo=qYt7W{oɔA }`탫CjD%R(2wUT2siyyĕyCE.E1U?yQ^.PvmwxK~~ÓJan/ܭNS0PCĠ uPG?sI#F|U7 ?Z#(.l-P^h~@c0T4FҊ9Q~L2Gl}4FYaJ|`1?'ܸD$݀-s@3uH[ hM\$6Q3!{y1i (qƑpF #~M=W0KK) KlD`l>w)F??l FfٻW`6Se ?R~ ?7C\@F܈shէ*ӺR(so6R Pow0:Iʇ :7FbQВS-lx0R ~x՚ߦ :s+Op{Yc'm{}np2yus_y9xPnu}]~,=U{] CfvgqWyzY`]Z1SXdl?hJlј @4}̽5φ@%\tq1?aQwz :;E3L8'/{Hu|Xx} ;$iupVa[ 3 ΐ;uE ΡVt ?Cr\ (#"X8{f5k䕇G7H!C's#O>>?@-Pj}}͕ۡO~o٧ܼ $T:N5f{"K\eRauj_bn 2A2X \~e|JS)# R!CٷqBU(NrϮf! nD̐G&yE {J]' ?T+c9&z^d۸G9 '+e9HVF}֦nS굋F-QRJ zI`ǥfK(CQ7e;ȒiT<fe`Ԕ`{`益XP,00$JNK_kcN9$@! 5c#`j |XtXZalsx.;SX],|zBmGeZόZϏ4oiՅwV`CWf6e~qqibr 9cZ;ҲmrҼ7ao!]SZCݷ-;-EݼsGR>C/r4s7sg҇ml tOD#֝]eO~ߪ^ϵ<_KOQwuzTxZDpan)C}E<7<1`OXs эdpp z|~S~iSiLl4߻cP1 Dǻx ]f -:ħ'ZwEi^~|1P諢s](3d{  Fvã֥( ~{9=G*BZ8HHÏѧ uH}/-9U'E ȴq0' ?qY"Gڤ JJA,(q6$ʹCҰDt 7jaaF~%Lܹ=*I;۶Ӄi!;Ј. Hֿ߬|h)Cr!~{GNaJW4հ~ ?{ȷqvy*I`굥{wz{m}eh@tw0o[PK'Ci 7n%'I[L"sq4,uqxu  OcqjG}.D05 e)CGr4 S쮶c~mVx↖lu~زWCzo♵7[h@ K C5#ta4xz9FB.^Zz]+A { ` @ҴU)~,<Ѽ EsshZw~*lZH Uy. ?ux26s,Ԧqg *scm Vc ?.V 2nP|f{Tp F#pjωxЮC;|e1On~ОCu``o[V~Zm׉QS|t}4ch_Q/ʚRd]ĻJcsM0@!v JO>wO[]7tx76ЯXOl?~Rnc/س;DurCa<5+MEzzt_ F$ˁ2{?9j nWQfZ'`(1؊ ծBEGCyEydVJJ3?F[4aSɘZ8 ̞5<:Fif?R ?-UBAyBGpw^q+%G̊`#d58TFwV3x13UbnTi"7tvb`@+D"Na+N2$d/bx.[oH0yE9dƈ]}3y':eCA6&;JP EUi`ɖ6q!{xGK)q߶‹ ^㐌ĩu[j}jȜ%E:yt9v5 8nhKзܭǝzznGF͸}#JRg EzQW xɮʧ>%/ ^u1{bF.Tn'ux酷h^3K/SǛG4;t[Z"Щ4֔$M)׻1M;!~6Jܐw'{O\MAeIe.F.~?[5E=a|hMN6xIW㔎I6OܱJQ ~LD=ޣCNϳz?lX%Q+z)rq"ߏ?Iǻ8aʑ#-Zխ´jUz/IlWwo%𣟁"TW{ybPd=Ṭ/3 (ᖧ_UGʪթZspW? x?*4OMu,63ÆӴ!*CdQ ?t:S1d/4EP nރqԵ9b'|\`փuᇡ%X ~8hp<σQ68ɷ4),Q+Z]~Ecϖ<c ?lUz5?a71V$~??1GwiAD]^>\= %h @Bn3U0ݱd#"sJE,VYiԷp!U+Da nnTp=s ?.?Jw㪵vu!ܾ87//Ds$-O2)28=}h΢Nf0I/G9 gPӃ$gM2~TzM88XmCLߗ~iI0*6ސGK~7١( y蘩*b֡cA>?oܶ|=#xsl,_|?XW ýK?s[ţ@B@*hw췑^ CM- dPvje%{K>aǛZ`yD>Sz!΍oNm:OQ "u[i8P]Ow)bHtcYC9+zaHtvd~zAe?assS5~h}Q-Ǹ3!>W7h㵜;}dϡ} "X~:< ~=ᘻeɡ1bJ*Cqho8bM?>Gґr5NtѻJBJt&A/ib?d:, v%Iם,ڍk̏v%&H`K  m8Gli/j C@~|@=29ña?\"B r<R?UNcR~y]&p2$@v~l1sBӂ˚߳3@~KPP0I6Zr+ui7yZlFC V!Erԥ#~7ˁ֯Isݵ[]ʁ` ďOcȉ!N: בc899>-EzvOliĖ?1ܠ`daU=Ǟv[E B|=xj-n;Ʌ15 0CU<;52%ىΆµxSɻ+l3@_lhry+׎ jHRbJ^ amۜUK׬-ysf"i^kg@/'åt?ݰ>8_hJOFL}~R /@.>Tm+ ?yj1ɡ1E:?VD]q-O;w򾂒Vz etY3)eUI퀲!gL̾-\FA^ a Y`f|7IGB(4!ȲW%ܾ#jhZcv޴Y'}D =~ bFļ0YQ\!Yj@Nr,xBjፋ޾[vք^"y~vS#<ߠ/yjOV@fǾ-=x,?&C:ЀyXpϾ8iGiX;ѣ͢`[ovY!9m wpne{+`Ӊ€Ǐ5M+gC9kIUejq+);ẓB#M8\#ͳkgx W9\sZyMa 䎝cnCyGNDKٮYINk]_ݰu/P)9X$/MX.yQ#ͫ9u{Y:nYQ>0uƼG8h仇iǃ]ugnkGv;60nDyOX}Ě K>L ${H ]\Τ>JH;>(\p=?6y+Єg7f`U?zx 7pSݻ׹9gG>1U9>=^X"69H: jpiNW no]Z^ ?[R*znן;vWwsjGeg69O(~jcA0<`]rb1G of8O\m?鿹y߽O< |+|,/= ԯ^wq='> ? ^8eҊ۸{[6bz8| C/"Ge39% z9V I›џ)U,8SJ ޫ׬bt玀(L ?,p^/w&Jch|TUm"cW},w*( f G!h5#J>jT~8 ~ g(؈&UˠK4}aN"/r[0bc}5Fe^3g&΋.QA{#r{,7L9]Aa6H28{zd_ҡ42EjdM~@&7/{=~.quEX=*6;\dcSL#~okٰQ߷:ҍ1kYԞgx򺭕@]#js^X$W;KfZ4ܣz}`f':Vb- >`Wߴx?Ž.~LN? Mzӆ rg9H=1ޣЩ" k/EiLW;}Ҹc[l)CzSsz. ;qos|^<~L)x?bind_޸N,'C,]]ӂ0ӛ~G,~MHQ6lؙ̔, &kI 5y;-{b<8kGYd$fx 5 K:Q"QˤNS@E'p`']n,Y $AN~fC_B'*Uf͢0P񢵣(A\&J+^RgZFvןS;Pao%~+f@YBgR/A>1 .ӍVѤ.w'7v|i B'~pSip8G )S7NDťd 8tڟmXT|*ޗ %  ̵b. *r[gnoyϡ[`Zw6uE$Ш7 `'\ˆ^fغv01v hoeɧo_ϓW71Kz-JOD}V'FYJs*wT4;W N:ůp<7[vw$xXa0Z6CP$$VP!}H 2Pb|bү&[ƇPuZ h 3Dik97*T.O KpW퀗ʣ֥?^`A.p~THX\8UԓHy0JQ:V8#L*fPUN@'{d1w;b.a< ~t?2 |?n* 8%C0$m)?>PiyiڧU t( Y2YT=[ƞxE?@GhՉppxEЙ:jj?r(_^ IS{!Ў:, fI.*7ZP-~p(] &iԩK-tB},C!EGA7o-wn/?=uM\'zX%ʢȁ(j6y`OoZ}`O9YVjZ̍` ܢm\qeW3BGᇴi kJ-u~\7f5(h!د0]CΔ%߽ [>q=5؁;v A/f?>045hE'[7lI#(J{>ڧ56 P:WvU?>;ι- /KYl-jyj26B7`b, `Keംp-CZz[~u^;J==w=TJ< LlOը7Sއu j$9="_[nÓr1>#/.5+;P cxs{s U5>y!J,jwOIg_ 1 mմr80TCoy9s!v+7ޛ k_s'8 HN(uWΞgu0">.C2S0X\ȂT* z&/E0N=MBd/-_ϞKjֈEII@ ?6WMe*4W+窝)hޱD[ot")h->рGG2b[%rk0FS7@A\@>o9fbM|$&K9<9*X eRpKX_~,-ExIu@zо^Y B|?H8P?5{\Sdڤ Rꦰ]"\9%yD:hRG]QH*pM iBҿ( ;]=RJ2QV2}رW炔t շNf8eʯ%ԇLjv쒀|dX,bXYL|=A.7+Wۗ={WPKZ?YFl(9d,x? ~p]~XY:Љs$I0ţ ; ڇ6g=G4m@na&nŒ$Ji8y ~F3B) ?WT#T m!!?h-Kyȃf\s?TL^kj?:9#u1ᇓ"zCʄD+UX,bX,,8Л7&OsТ)DW?.jqf~ Y#ˀlL7ڡnVAD h kEU#e4 "2nG6KE ij~̈́e΁N >o< hf'q; Ҝdi@,wmH-a]ZY4?3ϜGHH""H[ڕF_!".7b`'j>w7 MaFđf`12+J]VfP0 |N1HN ia,V:tt|ez8^SMa]&6IXqϙRczUL".0<[%bX,bF5ݱF*pk-<87k6,~" ?-IȺM:GAeTuj(eZ6hkO ?2yÊۡ3gCCPשB4bz?D0W]"'B뛥oCg+*~^'K E)Z;Ѱ t9R}rm1YJ1`X,bX,b]./Nf:^dTuhm}/(88x-E Z2Kfc P>;Qr(I)@"6j[ΩEu[F'PKsbR֦y! ~sr5*PI$G B( Q~LHHEք1?@RQ(,o&TdzV*hVHw號_znja]K]*^qX{-!,'}=[YXL_zIT<Q6|Vb$bX,bX,rÏ?\'~м Ldj+NyG~`Ng?*H #1u pu4(/1ӂn*F엨l<?*B~ 6C%'RAI!Z.PM!c? ?hQGMiSElF2A|H܊=ՔʁWmFr)2(3t[YaMډ?DL,bX,bX,ÏY?L`Ew6H9N5rB~ P +sfU Ϣ.ԁ^6~LU #;H6t<6YmJȈrlQ2y :CDs`lJPJ TBW3BFw04o'E2S*uѤіR)6B@tgAdHA!:cCx+!3cT|'6ƘkmW>1fep"%9([FwSejpaJ |Ҁ_K|dX,bX,b1`]=GJ =4 ~h@BG-qH2Ӏd}D݁d,03w~8)]mU ?L>;҉$#b_?Iiy+ ZRT6fY/– ]3E\+Bٞ4&?f]ȡMUYJ0s(l{~xǞн)"i&&G=N p%v?Jw$&Y@{Ե$bX,bX,벀l{2ICE ~R`S"\pE9KtL[J}"ïuZ0_;kAeEQBOšR CwR>0y(dcǶ5&eQjKh&C"oY R&DHƠ ,U O/s@<6\Cr o %6*3ľe{s~ C-65Q{1X zxYJcΐ.IB,Ae3rZa&/|:bX,bX,ź, Ϲ .sr W\"jZAKW N)D Y.\ѹ,jGꐅHFy9>0= g+ g2#tztQ-[qy9v  !W. lEᔷ$hQh`J1K-S,ebX,bX,źLNjgYFm+&8+d5*'!v|b[g6=L{OLQ)mάGJR]З#qЋ? ޔQD_1P!ugm9[ul1RZ[cJyPL`a @ T$i$m7=5?t ,A>)%Z:_<_uC.|rB"Z ~xքUI3tUB\ꕽ`+d{XR/ ?42R&ɫ E^)3$!;Y4bz7{J6~S,bX,bXA1֬ l}]JkRi I. ct>-*82)^}.HbuCjj=?Ls|AΖFߘ+Ӳ ?"LwPht?Զ0p66 xh)lOjUYUGh*# HSlq Cz†AQ QD3(0qU:oCeۘ0M ? H rG~ŵ禤XҌb.b ?TFOX,bX,b.ʙ.k:)O 誚xռu&!{gr} 4cp$:lv܆RbgQ℡]QԲA`9fnL*- ^aK Fea@!yʠ򓺔?ʠT*#B@&);Y#LVmV'M 3HnNG0DA"Mf'Xx_%dyZ)KÌf`f' -ZIrh @YٜLRT(F, ?K c-*4m^r%T^.Q/|dX,bX,b]b1$ By3q|e#0t3/#ĢȬ\Li' ő\gS$4k b]0ᇎ_ b\=??S*fP ?d҆ ?L(o*Q2\WG CPHUexHU(UKV!PE;l((fȿG8^54Hg={OVT}[lzg=ٖ-[dI "L$@d9g r suӿ{oϝ Sbw:{4jDHCx[yI,ȈLP f#2"(nX#ˏ^1&aį,i~TeO; ֚{~䗿BfD~EJDDDDDDDD~~POSvXM'`01نޟ3jJ|O…ݽY{x$!FezC}w9(Q|=.dj) ZUfo߾kggk}]yUEAU-R[Y` ꨓה?ODJHEF%G3t|YJf1 չ=Laڶ6FGE@M &yPaP46 鄿GUmnV ~ݰ];f l$"""""""":0o Ë́i̞7dL[oMWJ6sj63ȺWS̎ ano|Y>8NRN(GANx V2c9) %+ͨ{LG!>o'}3|]K] [5|TT~QXUY'EMkK|7_#~G-%*odV~\␚HmPb~: _'PMU'qHYTV1eڇT֗.$ kfn=cCȝx#9ԇW: /,z&IC}7oq:#٥{:斚>foSWͿ|1oMQ96m`P? {ᇯ{,\ |=Xר)i0h*&s¹*›m7{jyfRn*-Tz&3l#kIXʕQoF %f`{o|=LX[yVUJ͊yKK݇t~/ΖZٷ(G~J"' Ԓ ްĶUo tz%ċ.vaR3(4B 6`J: V * \:0.<B'ꌒ~JocxJxW[\w9^FsKXƿ|胮6դTRdSv9~z0;<ݒ-uE_ELgLmصwC1d=r} " `z4ieK~EGgW9뺲;|OʢSci?K2 ;z:Tϼ&:ZqܨU__7|s;|(? ˥f*oj? Ri.#/늭'6ha>JJ3wo&kp[} cj7'8']9?rxc.,'9ޔ477J.1 ol5j}̷_,DVqK/DY;+6/XH>r5[)xMp4!ie-!79Su DU`?:oXWFf(]I95ݨ iR5Y6ׯj2GԨÏ+njFyRZKak>4O?ikm%$2)+V^v]P-@Zς[F@jMRS$Ym&0H{mƻk~¸ʿ| ƺ`}p*63YǛ0-6^ԚHGEj8 $9]f`D2fR8h]Bf&4sk RD}'aRP9EL=1xօuAl:LP,Q:~a|l2/GWefy^Qv],u%W%.mkkϾ&l(+eIHq+^ H VD.;Yt?La4w,Z) sB G0ex:J;šj13OЏ1Եz3~ªMYPZȥՊ#y6H·(I>&o}yo!rDDDDDDDDCmM7֘Û|^k=UQd{02 g#D =~MUxjca424߮bMyU~VW)aGK8mm*'_ +%)4 ¼HU7QGuU$""""""""zN'|^UL'%%5%ޟJoXi[TՕfR՟VViBsub3ǗTV*&mt~'Tv.:XŚt5_DPG OW& "kwO_TQU[$ ȌY7<;!CHqEWILJt~~9#.|Y]]RTt;T$f$.7ސL"-QTVYZ}Ed}ͷ ̾雩rϊ"loBnLܖ ?07\v44ߪ[x;F2Wom ^*WpgM'?êMaxO7-w&'m"#_wGVi_W-DDDDDDDDC_}%"""""""""""bm< +?  +?  +?  +?  +?  +?  +?  +?  +? ޅ3gg͜= O5fㆍUGO*>S^8~B3OGo-tҗ_~ɍokkk[ xK/qld;k2Sg~##1{lߟoG}ԓx^!~ x֍[lM<Ԧ6RD@DzҲ>E+=z4;oa>3U kb^!xիWM0h|ğ\*~6 q[ O}7j#nݖpȣk͛6_~HG||℉%%8n޽džQqs9ttt<رcˎɍf}#G=B|_x[4v:$rJsejNW;I [␔]?*++/vUyC}<ܭc z\#ٙL+Ϟ5;cwV ?Ç3mmivǺ:}QcC NpWO>Tm0dG]/T!n)e.zbeKWv?qjtz(Jq^7ަpK(RٿoJiCw] I1iz[󛚚e!/9Sn}ڵ9UUUo== zY*sn(EO4iDbL6]-G6d^·:@|rÃΟ;ͥLj01ٛ|?F)7ߤ22G{UsÇˏ8%lK^:-w%i/\x捛{lfdN-Зk[WFؔx@;l}!ӯ0r]EnQ3l~ԒTH`p _}9s-[lW*)O!ӧHe豉{^Zly<yz*&EAF%/2d? R*fIgu}2XIZ_ 1u㪕_>rlk\rix.e֔K&Cu^ }Wwl?'A֮Y|miGzyC߇ΚWSL#g ?rop$/Rza|{A3StN!3ٔK *e"27n{b]dR+#JEa9/](\ 6DN!qh@߇7w/pZvyc2gJPMfmokiVdRw;sVʗn_:}b.8A={%O\J.Jy3YQʉv '}^& Kt Hi{!Cٺ]4GN!: 9?Y>:p=i.щvnpU=gcHQ[[x 7"[l8tI- fΊU}KfVZTXP]_j‘x1mxqN_t[x9H;nb|/KLdq~ؔNo#}E{l}~/G=2En{~8v![eAmce{`lI+__!ūQn޼%ٝ\ ?mڵk~.Al}Gщ g9,: ɱ @^Bq͔/[>Bt>,J~;$vjz-4ۙ񕯜GR:^h0I$NV; L'W߷qc%Ԝ#F_w%7TE8ҒgeF;}1r[wNѻFN>9}o\!VrqFY3g`mg׷:ސCĩp8+6FnGJ| |`z=={SG/}~z~|thTӧ͐ >]F`gL3uׇ/~.=©?' -LPgSrGdD|clwI~Ϯ#~8MtDDi-L/5WgΜ&Owdʘ3g+**tR`@?g ĶR9;?R,14b.ٷo|EuzЧСHpz~ėgu)Nꛜ0n Gz`K,MxE_E]Ck#Qɐi%"6#&?~ HaWq 0vzgl j9 p…͉Rl#K5tnt,唤H9:趣aPtWB=RRb}]4CgEleڴ755Og>B&mJ=Z,iŋ<"vf)Y2`\\x9~; ?5=}.&}޼i|õk:C\{wi8= ?ƶ1o/XΝ[a>31nnoPm? $ N^ࡻqg})MA= ?,2\[[{ni I#5cé Z.\!k^9ʜBG&FNϝ2\Νe,{%m<߸~O>sUW'R$}w o+,WsQi5Y\)mTMGb 2A|H`8; ?$f8abJǎtuKvQ¡fruƦFzU #"5]az~8'ÉǮ_J駶cORrˣ1a={]n*5ܕY)K/GW̜9~WG>$.[}nj-Udj~Gou$X]Z𣾾^߇ZF|l©#I,I<Ή za`gI!Gzq4ɐw˻FֺvCT}tmC?Ezݰ: 3n"Y--C?_d{/ۙDGͶq^g6G,fҲnGbslJ ?cǎy+t^O)EݎwO\9Sٔ"Cw͹? KK9*&r}HնNZ?н946`YW- ۷mL N>.Hi֭d\G.-%dz郔;cFN1wܔq.;looܰ1dq+9)VP Ae܇ .$XVZR =ɼol+?=y;vLm[[؃D Gtِ\>ܾgA澏9lgN8ysޘy󖂂HKkIsi)n%/K19n4H)0H6 mËʤgFbqۯ%dM-9(7Aډ߸qoBTuH6Lq̘"\xޏj5y sΓDA Id,yEٳ><-| +?  +?  +?  +?  +?  +?  +? Ïo%pgW)C}|}5@pIAOf_|@s, DG :ɇ-0?,S6JX!&U ?C&O>CIL BwG߄&XFFf>ȂD (H` $n0 ?lهM>L!hoookkkmmm!!D (H ႉ@l?Ea>̂WRbDDDDDDDDDDD^kW(Ö}"\R"L]]="""""""""""bABlǽ ?l[!)<DDDDDDDDDDD%nXYxKK# r!"""""""""""Z$tA~54$!BDDDDDDDDDD;Q W-"iNw :Clmm """"""""""(q=t;q""""""""""""ZL! ?Ӻ?$@DDDDDDDDDDľ ?$zBb~TW """""""""""ZDDDDDDDDDDD$GUU5"""""""""""b%@DDDDDDDDDDD~TVV!"""""""""""{~||7_}'|Qx'J D@H !aD&< DDDDDDDDDDDD$GEE%"""""""""""b%@DDDDDDDDDDD~W """""""""""Z4ϝ;}իW/_f;v=sN~ۃH}!ƒ#M8ǟ|'9nG֭Iq俓'M> W~^re#R2ѣfK5]ԓf\p߅eуW{$* sGySOEŋ%lي6exp=ʏ$f=/Z`ӧϐ&׮]w(!:t}CUl߶<ȓʏNk> ~؉9r^+..nU~ʰU<ȇCBA^pŒUVa&g 6~ """"""""""~d2]nc\v\),,?q?.\x_.EI}^6܎{*GiQQS~Ãa ;v7o޼zu^c]=:$=!""""""""""GZ!X)ŋ_ w߾}}uGׯ7mYp8ۧNI9͖ }mKS2W/[qUg-[,|{%ˤUxK.YF2ݳw1?ʳW{ {Ք-ҭ[D{=R\Un0g\'H<#""""""""""bZ^Ib ƶʏ-e*'E2d7Ο` N\!=u9GNƮ]ȉF>JLHYVqIb """""""""""=vwiw[s>)/g(TZHޠ׿ڸa\l7|·QƜ=]4rc'e9~D#FoرcQ$""""""""""g=~gRW˗XB#,ZXtեr`#qU+q;+Sbx!#r~1߿_oټ~5~;wHJ.trޔ-|)H<™3tM5a\Q1fq7^/c%#O w%ȓ4v]vVX~NdY=tX*Q u!"""""""""b ?d:^o_ƤM[W\ٳda )[^x!}c)/*)G|9GYT=~Ј##ؾ}wC^".'?~5kO>#0}aj:dG߁d8x?nM.R‹zE,DDDDDDDDDDD*?={yo}*?3gJ!E<Ў?a׮=r\?੧Y+?nЍ7 T~&Ky3f K.2y)/?tF6%Xzm~egY9mtDDDDDDDDDD$h{xgLGGyO?c'GBRؽd'M?-{شiӻ1cf3g̲z}:MFDDDDDDDDDD~c=ɘ9;1'NkS2urnM0BDDDDDDDDDDz~[ 邎$%rŌ9F"XlESؾSG:6VWZ##v N#^=7lht5DDDDDDDDDD$h/waڴ:`xe]n^wgGc97ߐ2$fvS^J9To,u'Hq 5d]'X]RD{!i {Uoϲ~c?ƏK|o'nmq)@ ARnn"MuAZ<֭eN?4:l}|骔C2y}ɒǎ7ghbZ׻ao1')=5kvHr3n8o/~ """"""""""ǽVuƏj媭[mڴYupر@z4nj{nIVv%'5 ?#⢅Y.^;R:88gμ7 C>|\\ I+""""""""""~ܟc`[%H\*=]~POL/X ~ŧ H}YJ'&rGuBUV(XdYCyǓQqYgN8F.[be)* XSRgKr9&4E'@DDDDDDDDDDWfzۉɓ>}&8CR/ķEts Ksf%w8G7wIlX!8wd0IZ&OK?d,DDDDDDDDDD$^zm۶#&$m?}}x.) Y~d,(ӏ>ff/nH&N$W*V;v߷_eRZ,@RsOgٸaӅ uBDDDDDDDDDD:@DDDDDDDDDDDDDDDDDDDDDD$ @DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD$ @DDDDDDDDDDDDDDDDDDDDDD$ @DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD$ @DDDDDDDDDDDDDDDDDDDDDD$ """"""""""""""""""""""""""""""""""~~ """"""""""""""""""""""~ """""""""""~ """"""""""">pGaa"""""""""""b}3g/ """"""""""".ذe7"""""""""""b}CDDDDDDDDDDD\҆{6֮U HAHBDDDDDDDDDDDDDDDDDDDDDD$ @DDDDDDDDDDDDDDDDDDDDDD$ @DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD$ @DDDDDDDDDDDDDDDDDDDDDD$ @DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD$ @DDDDDDDDDDDDDDDDDDDDDD$ @DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD$ @DDDDDDDDDDDDDDDDDDDDDD$ @DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD$ @DDDDDDDDDDDDﮝMM]mm& """""" k\zfk+:;U755+wSj5i~O.7j߾7Mgccvizߚ?1;_n?5:n\ɫ*7amZ!/@WGgB*pbǕ)@gyiǙڏp*mǽ몫)(-lO\N׺e}LbNY'òq:544tV""""""ޡcO yt{vIS:>[O?5'H?3Ϟ9x?i۶p}r߆yEf̿_0~\%55Oy;."%:wɓ.]ږ.?Q6;|]]N#ǽklٟzblmξ|67#<'ړ?2,߹s`ۋfΘ%=Ld39r46f'2Wo\gʾsxMriO74xs~_JT+[>r%NV$wf^_ߐ)IF̈́7Y3gn%Kq}c?ɇo$ @Sy_F#ڈr?r.H` 21?o|ǔJ~S?f־o cû ^v㖉/{gM<$YoQkk$d-#IܬҔ}@ȼ1\|=ӻY'?מ61/^E19ˡ]wT0ȴ|(߈SÏQ|~qW~ȤO_ S&OL~7f\sc{vzT&;f<">ʺI6H)G.myn[Qp=mm[Vym6vvvT(K*,--f:[VjssҎ{~_?_]Eͮ_NަP.mϦ;.wäZZT~ TNΆ#]8;~͚b%oh?s+enS[WKs^Gޢ. ==n)*xl]O,S=9WVV̍Nw|bxǠ_ȥ@&ķo۱eVipEBH2QXPd&Ǐ sηKKd9,YCݽ)(%vQךfr|?x BlKtD*ch|B+ILCCCBHLK 7Ȁߘ=\n+< i!vБjU3-J&[Dd&g+0tX%N2AÏx,a/.O0^{ߍG2(?,|=;F;>qhƾ`7 H-EpjGGiv|.dŸsݽܦߓ]څ)=[3~HnWH.!Fr,_<ʫ).QMDM,eia/uLoZ׭-1z !%O""""""ޣCb^i.[ڿ,sJ!z^zmt3HCTZt;R^v#(쟥0z(;ovVv6%05z%+9'_}MQvͫ98a *l#-ܝ̊[tXIrs}˼i˗wXPV}y3G|+xl^lSm=LSE YH&d|^̍0k .*; ?伧O:USem ?lG,ZXwk^eʇUr|V:Ҁ' ?^$$a*#,Ѱky%V_~/_"rӌg$6A)OG 5zr:1e4╕?F)Y7qㆍmL"Sdqm?/Ï;rܱ|h.[aB"v= vM$";=j}IEHJe"EgWӟ~KKPALFqU2?CfY/%=K֞a4YS+#Ŏ[׳iݶKADDDDDDC'̷e}!ݧA:y/^a8rp='O9ӦS[Ia/M:H`;ΜMC2K8_htll'DR>=~{6}ӧΘu$\dd ?R9t@!5ŗKj[F WM5K;!h[2Cя2;l;B#{- ^˜礷yrTǕeUc%wmkiȏҎAEƅƏgfUr ?Ǧv3e~63 {LP_f̑Q~HU}7nvڮd5 f٦z^[:@HByy.,* 5> ?v~?ڂgCzARh} )!k19%#sy|)Kv0$CK`J|O>Wd{a~Hu[t2S۷CDrt3~ ~,GK!y]ZRRܯ_51|>Z1-3^mIZSufՆ%M/bE8;";fGv59J7A'?Q>%.).= ҎYVDÆ6斮].KOG4ꐫ6kO$<#SlEyISZmے~ ~?xQv$mD.M=nܮ+eVMeiSÏlz}2ɟ%V;u# ?]r ?lr[K{QwX[[gg8+J]p)qv)0OÏSwm? 1t{foM7nܐb2!i!c-["^pG(`~׊`9s#{+32[-]:[JqG -tD>8'+,~v] L2=w?+)i?v%ȩi?~л]g(Hӵ6Pe٫ޅME Au=ܾ͐ |O&a܁`yocUT7I""""""bi8D^p)YȞ~tO">-B_tݺp['ʏyR;Ȁw#A)ج4ulCg@),p-@ZObgMM܅d$fQMPdH$4-w\5gWn<{~42|Y،wAa{Ao߶#Di_a`=//+A9[C15'N%ncn:I'ta_qݝjA*'$wiAWe⑆gǎ][6o5={~Hx,au6d ?̚Zep)*grE&:3[N-K~Ǥ͉z~56}9">DvۛomHeHHLj-?l 0'=hV[[I:e֨Nw';k/-g_z!K_wNKMF0aH46! m=Gpz{--M={[ڣ0xؔfn8U+8~49>Q3Y\2٨[s ?yj!q*ώ_7=~z]*t w*:ô4WV2c{;y.=t|Xď~ ^1gl0,$Q_"^iE[[:$TS8P05m w' ;Ο )#L(Q隞Ck[1Wm+;93Wj6R`:?GalSSSkME e2/gΘe?t&uIORL`uͶmn"R€z=k/%UUޔ]I%%Yܱ~h'z~xՓ2V·O 4}ߓ)"juoo=9Aa|"VF)d(cţ$!hݳKNrr-v衿LXu[z6`h bEU 5:FD|콶G1d7Q[d5\[6GI_W'j8eM\vmVPPK-';p-q^fHf7܋du34RžQ,=VO/?1!+ ttePeRYݦUqVZϽh+͇]нzCŕĿA]zsUJ.W^dRs6UUgΔq־GKxhHVm^!a=ՇJ>t-_:ncee)]:5`m-G9[~NDݷ}۶:0w {- M4q=:Jx Tu8y5봥afz5EmX&TWA) 9WIh%r]lkcDBI ~n8تns uHKn"Cs3b ?7ƫwLǬqEhhbfkZ`pS7*ڗl7U?2#VfvQ+7-:MѺo+P\|T ЛoU#W?@B?? ~~@A? ~~@A?? @{qƍ7nܸqƍ7nܸqƭo?qƍ7nܸqƍ7nܸq֡nnx_gtH?]Wnܸqƍ7nܸqv?Sq?ԠCrƍ7nܸqƍ7nȍ@;E@WCrǿpƍ7nܸqƍ^?Cr?sƍ7nܸqƍU  ? pK NLjKޞ_Ʌ7nܸqƍ7nܸ"7~|P@~::$~?@O'V|Ʌ [v ?:Ϳ ?j~:)WB-@;[ިϣCrG*-@;:AC+W*~7k>tH.t @[CEEEy /JeU.~@qu.? ŜsW,/(7=TqF?@ H IH ^a@gҖUTs, uDz]3-Vik'UͨԱc'TԢ<DQ ~^ǎ;SԔ? HVZ)yFP(B)HIIIJϫ|Z۸|lԗcbҲ:dU[=$x3KF~?X3?Py%kVIlyc<Գ6<ȑcŒ '&_%n~Gᇎ34=}[omZM#A =Æ'T-.>jOr玨RDe O6=:#/|C:SoB4!:v%;NJ[3 ~~_aKn݆m12HƎTNæ͚YэKJY +'T-x/H-뙧z.~@[|]{O&qmY16^/|Z+˱ iԗyvmG㫂GtB~??C+~,}o*Sւg޵ǖU|5yPI1L҉ Z8$=֭텶lb5~}GʓOχao Ki(V5ee)#XX å8C*3-ٟ;׆c挙v$M&3ﱝ4æe̞5;ze)gdGC;QIz!ګ萂;v첁ʢ}楋 ~~?@AݕJ"l>ᯕo[` A2idS[.!vzTJEp+BH*}к_bڰ~cl {<~%zf98p[c#a4DoVc0 ~~hE[!RkGҲo٤[pwm4V1x~Y6۟חX?ZߙӥVd¥) )!_?1~[N3y7$Y}PMC‚EP0-l5㣏8ǕO1pŋT8kl{Q]HyЕ'VC "DCUիUWu/GN?-knk=:2ᇚ/nQ>S\6;VOV%WWqaL 8Yp{G5sV֛oL.!~~$ ظq-/_2ײfV)`Ż=}!xk?dqvZ D-],:#E{+\Y=ҥZ;fO=W=kNN̫YnX~ݫƌx˗6^IRjMbF/-?@_ssz\S`cǮc]Cݺ}OJwY߱qo f6 ğ`L?M';S$~׬pb~Yzp{?_ݝ%+6?@7Q/kGttk۶~~ Z(W-AyumG0\Tf(ڱZ p#2c%7 ƭNJ_/?\ ?ꂫA(&/FzQ*I(@ UYBΉYA1Thϫk??Xh ӾT-LS6[i0ZTmXqVLz 'q&~_X3?xhȆ W|>];w_S@ hsz[w ~ˣ %.Uz>?&MmW+5nϵl-,^ZҝOпt I ?\vҽ) +,BBnQ9>ȑ訌D}R^yՔV/zcb x;+ݳ7hJNzNto7?@B?@ ?@ @UQ ~~@@ @ @6?6U@Iٽ\qe-@;}aYu{cT|_ݻS'O{{{'No.> {GEE[v ?~{ߙ{c߿w6uݻc۶{{{'NOޛ+Dիn )es=y>3ޅ .\{{{@;>:W>Ʌ3f?S}ޅnj3v,s=s=s=7=v ?~{|4{c߿0ݻcȐ!{{{'N7nܸqƍ7nܸqƍ7nܸ7nܸqƍ7nܸqƍ7n:ԍ~~@A?? ~~@A?? ~~@A?? :KU\. 7Bm}핚K\. ]Z f -=tϿ\y]8`5Gc7Wz?~]w[[O.<\?^!iWˊLWsu:|cVŊHzJ}CcI~8ls}^ϗ}@Xt]+c78rqwE[uOȨ|UY΍[woY?^^yjb \sbʘ*\US_/-]4|=. ƚ\]ٕtz [}鲬86_Ҳ_?߳Wϗ ͱ>Z_p#peagz;Z!= ǎuSNw}#=;_)twh΍o{^?ZM:X 'Fq:?rYQK| :Y5WsƮCXTߐe%ȝ\Pk}YAEE w{AϘ>S{^[ˢgÆbu׮w箉C&NS(/WT~f&NWTISl{%I oOX:.Y?ܛhϕǯF|==^Z6[4_Zq,}㗋AȂOlabkI7ZLEg}b[ Rs6go^~`ϕWV?sz,0~`G>;\2bTV^tA3gdW`5 :A\Kt|W2vؤ =Qǣ<Yp.~8{[J{];yT&tJǺ}IXW^Owo޴%")5.?&׋~UBBx{.Hѫv`IR :u#e9QB ʧBƬ#~xu}N%WV}2vsþRZr?~^UXZf:u ۃGS!=iLل}"-9/rU26Zh DЀ+6q)UoWg3+j}+:$W.;<"8k@qY9rt\eӧMW8E ك W-̞k^Q]r.ȑ;v#j]>t駞uL?!efϚf۶N$~ؾ3x:=ԩm;,rYgި18WY5z7dNkʯT߸C/K#7hcƇz\ t MU4DTAW\sbNiebq.5RϮ3_n>jZά7vQ ˝KR"{. 7Zub{_~8љ~ϫCF0EosWT{|}͝jݚKJJj;T^vm_C#-*CZd6[PDb)--akrƚk)s\`зO?]R:[jGacfi8i'z׮\e֮wk`t.E}ǽ?q,_2x…<䈦bAYI4W* SJL?|=MyzƦm:5xu?m+~|o]ٜܬ-w?P'e3-\~<-|*?tj92sk?6j`*)F8J}\k޶sેnLg:D_+[Cw=c6q!A'=۪vSӹY`3V>64#gɦnMkiS1vLUU7&}>.;t<SIݿ`ݻϲNDᇫ^!)m㏣Wo vۿ *ո͔[~C<1.6u{e%Vx,~D^ rNŲhTCf ?+Rg$MvM!ca}:J\bk7qR]p /W>>e{PkK^]#ȭ#6#рeEg*kݘ6-Q)՝_ 3ʒ;ٕK'm;jКW4#W.K3JJG֦cfVT;oSZ`>D ė za`жqo;[Ʊ;G/fVT}:ˮ5զ .b7+d9qUY U孓l;,#BEMq[`%#PKՅdĆMYX<*nZ+ /ёĆzdҾOp-%%v,~]w=o_o+Clbujֲ?ic7~h|{ʑŖa,^_ߗ_?S]ș36JWɒhN]m T{8`{Pf֊JZzPTJ4\+{\/tE\9^^ݷؕk\DnW8X.b.)*x6QDCKZ-m`It̓i'MqV7Wyhe øU܂{0j5rw,p?x+Iheus:%2o MDw종64_`ZzîRqkK.9_}GFF[4>=iR+I? PAS[nHbWsY./YlRW[W ?[DCU]Ğu':k%Q( ?TaR'іYQ/OO-.dq @#42p|}E:rm`~urw+5.;qOWJ?N8ťL -\xsh_rAm]O5!%” 'Z w^_ۘb=WJ̞NV#ٳ9Ȟ.ޗ#nݞx~h PǯW!:斬h'5ъ{ޤCzonX):[mBE m ?Z3|b*g(B'!Xv^Nb<o@Gtb~X%=rq͢1zHm<+7"$=et\1zrQI!_תFaGICj\5Kvj2h`CQbWVďdlnkl\| X ~[juW)V . 0(wG^3(W`Ϛ5sv$$0@M(iJ*5x_T{D{D42u͓UAb*qo4W5:ezʻv@?dI^{Ce֎,z$vyLv A37ןB?o3nL\qņO\V0)S>+GO?|J?5.ZT꺃e86$D.]S&o, Y.$ [x\,:~ y[,~($yh5hķ͙Գ-&]^v?1es.s-,)͸?|=G'~ __V^לd'X8b[3_u ?L" (  *e/@yLUuH,Ԝd#mmq.~HkKɐ~^J ?\pR<׹@Nw b7m-~dGWV;\yCc~XzoiIO?R}QZQN"Im *olNV~萴7G ?qTM~Dlmns]?\[r%|uMQ_N?vc\ߪC6#*Ck/z5'ԒfKD~$|Jzy "#vX 8Dmb/Ht6]XRIhaKJJlF^ɊW\6գ~6į<6Pl?^]PxŻ ܖ@EsB<$~0V䓻H5]Y=WixeO: #DFyəU:`y~RfY_b*sIFPsR|pt`>~H`~$a\>Uw\Cןtͧk^VZ*u?؈n,ZUPj`S %MLk0NDÏRR+IlbʙR'ȇ-O?a, MvyI@4HDM5fW D4z=ڣ'.]b;vqu'БU4$͛t@ER[i.\3а.H?&8Uq++J*RN~GL#5rͣZA:ĎC;ur/m޻h߭|"G:we˖.wΚDH]οAP[E9p@l&v0]zSg7 I Sh`9"K,K.[TAU9s=.+W&=.BصʁFh31(sɓ ?s7_=I'~Ӫk?>h]d~k R9{+~|A;{dVq%{ϊm$u7l/Iiq_T#D {VTTu5sZh!m){kE7=p2WH[@NZ^=TӼmzI~XCZZ>ɵrybU6h3e w7O%b\j/{Ec*W )R;\*/Mn|)Zm_^yʶ4/$6pqg0)=Z'|d碗–Hi„+M?{O*=^pk6xBk[~okUbH[$??ᇵ *],IRJ[.XAZ=ۭjzoq8'h Su[TMLiF ?䳕0)Tۡ %g-Gtؼ S?z=XrREPoƆ76 o6q朻:ء6OhH h㸵gne߬>Rr<ڃ#-hЫkȹ;=/~V~DsYL:5Ğn=#sM)Ć7%2D߲pK=j[(ҿՔ3wif]|%Ηt;?xn=\զ)it]J$̢A?ev_slټtqmG8he/_i3Rxg ~+Ahae)\+- ?s/F aObu(6"N%yI\́t!M2wQJ"#V`UXGY> .baXd)Du6W KZTE)b,ݱ/ Y,PV^BƆn_np=k޶`˻Gr9{"i/J2t>eIz4rERi*FrK9ZuU~AƄukㆽOUT!ik Wx؄WZj]Wۂsc)QIJ./m? lRo&C q{ C)x`o{Ҕrϕyh!?^UFm]U<}Q=kM/ֺNA[=bͻRX8? ~YOjlGj͉9(BP;$w_aCu.n{gKE!>c'BbWUE_*Hȼve &+~ , WZ.]~g f6B9lذI$G pP0>2d$S2._=bݨR,0noIxu<3\R,Z(ںJ9GY.ܤ^沒)ވO>Yu]]Mvt [)O)tPћ ]wP˵}pm6}qm!N] 'WAntD Fn8CWW64r|orq˙ez^Vly^pݩC0L/;w|WaEEst ]~5rߕr ~P"oԞ)(HƤg~xDqfm`2cN̙RvUںe'Pe5֬^"ZV j|?+ԫ7䶔kvK&LWѰ?7~Cś/x X} v6;ȇj\YCRrc7>ez -M8|ڇ:uN!zU Y["ǵzuȚaԔ}2ps\ Qvb%~=u\{.sW&' Iu8KM؏ :R7*kŷEP#(X0ƽ  w^—j,,w'wkT .˚tY4:%jo>G[_e ~@A?? ~~@A?? ~~@A?? ~~@A??Glqd)@NWW~@A?? ~~@A?? ~~@A?? +N8}]]]իq64;pX}ƚy(--+.>zG/_r;r]]3gNg?6 YQQ荻rzժ [CɫOU}s'oSSS/AuWV^}ފ~]6U55}O'Uw{Dž;6֭]Vn@`- KmyuݦU{6^ض?UoHWݰ~6{vN (BѮsfϫi^=_zg)VUUEiWx#C^80s]pE\Uo=~v0ZoEZ}b霘f&%+NQ3F }_tM L4%XMҽj}yÆn[i߾}7oޒ^|9 b+H~zUYiY41JDg={#u{AXх2_$m˯kt떭K:}B ]dW}v32"~-ꯛݛNT>X>W~LeJzjW- Aj&|~X3PpcQ}[iu#. ZW*VB5lvvfwY+Mf/%•*w}ܖV=s~'5w]Cra~(xs;޲eZcFu۫j#RǿrmMY*[ǟ3{3NYGb $[׭]l8ӧͰv\mT .L2=vo g)Ϗ{f-[|]y=;͜13[X72Omyf'H-/=zvή//_U+[gSmY;R6o(ȴ*?.d`TWaZ6z醋t궬@ͪ{T+-eԭ\`!DiGXW_ye/CY_v:x`-ߚwZCcͤzSu+:;iW_wk.\X7(/6nVs M~|5a hou)ګ[fC%Q"yj-g-M> BM{5T\q'j%i(sr:$.w[ '-BCsuYPH=df`W6]8]-$-Y'K Ǿb~o.-ZdJI\qǷ.]?;w~TV-}/'nne*q5up!M6@'Oi}%K/=p࠿X'OrZV0prR) JYױgeC6V/D$ikƃ8Z}|?۶m ز5vD G*_UQ%7nN9a WEbV51BWX(+`o:/`:x-sUJb;J-?WхڱcWx W|\a]=%ƎD `_3LBt㙢n.?w-?bVZݜM_OR蠅/>m}f*MGxc;W/яJ~(# '+h|ٜ6HtֲͣD6lW (=װde\JVT ?ts'j%TaViQ/97ŇÉ]TRCf¦ehnGeU]~wJM6h=M]!^J܍*8n쁍WTn3stwVոx5_5竟O]+|խHх}?ḰP5.k-*_ 楋bz)wEb'|Nf͜^ڟHbAU`f]h6x4}6>i-hJGpnyV~R}Iʇ߲,=pMd5}Pu ?2=͠-<lo^UMWy-y$ӒR{^[W2V?|!~{qiš<hElVRmMnq\Jf[5mj'ɔA^[y_Җ9iᙏDSQ=ԣG5Gŗx#.KOTT勱+B>?XZ+}VRO?R-k䯡k6% P}?VZ ?%eyjոKx+j4mWiaڏ%oeתzPbot ]?GE6xCGN_'e^P[|(UD deU.^t?~"0j8r3p;62r?G?~ ;bkݳ5VR/R%px [׹ ^@; z1]8_"?q@~XLԱ`jfaZLiYvO`J\@V$P5C݊yz5Z>Bea&h^,}e-N,VJZ2Waf&ODZUw@CMȞ~ٸ½zdKU?]y_4[r4#&>uWÙ̿N@uhMlZ_&~c ?\q+8s&We ~Uo3U5Nܝ0ֿo Q;`q?U4w ?p3%me I%5A+}y rW/--~ pCP 3мPw[?ď4Zֈl܅4!?y+I/j2>{5Rk-競E=3'z+pI$-ɇً^ClJQKJ#oe9df`LhhtFճ/mU=˟K}.Q!u9^5+IdH8FaRo՚ (<2ͮ^.LYҔDx\'GjYWcO8%sUm/"_қ:U:VGWh2_V|(vJ⟎|˖- n-ݙ{>&h_{Kl{m| PbmAtTIWcɱkt?57>y){UGXP|-=^8x(&JfW# ?ܘ黛M?-&m9ƦYǏew2#-V[ծФ)i6JĈW 9kR$ޠLtѺWˏK^j,Lٹf}jN;!x+$^4:_#S) ޲ F҄Q}hH!eМ=]8`k5Z?h!EtbvU3vܲ6g6: HVqZ^*Qٲfp|.u:}B!\Yvߎ?UϡR@]ᖾg'O+N?R7f\+Š:𣱩?Ey=( m ?ܒw;T~葕+ ^=_tW=_a]'볩!nhqFGhsiǩ ::&ђ|G~ـd#1Bٞs`s5˾ާw;oɌ`ٳ߳njrl+sWRSaȖOH~l:}4ڂ|xk.~3=h~9AM-~hU0ѻgCDdU=-K}>=bBRÀ6m>ZP!2y;SFG&3F{GD(,RfŇ<'Uסj '3$ sxEJT(ׇߤ(*-3P~ B؜ HD?EOڑ#YUT_e 63z,a^ʗk̮A/_`QZ WM/:qthDKASۥضW1+ Zͯ]05Zp#hO'-kZdNb줄@ǩ&kf^ڜKGakmI]yU'|*H8OadӬ9{s5-̏z 06KY}[2CӳP)/ǖyS PuH0gN-Igδ,d>-^Wo/kf%ҥm ?*4}?fA:{%Aa@bÏΛjUqrZ.,;WZZPzkęx[ⓦBrLSpC?qGa~f-e/2Sji*W9y,4dcMKuZV\:eu^ jjWX/uH?K[O΢o#?q3 ~ ^l=,ʼдQ _s ~,17g>:;pZܛ؞$M&;ElYU,KU(THQ")) $A l  D!)ٖ-qr^$}g%nFv쳽oҐfzۇxRwz~>0~Cn*RO[$ M=Z@s,3:rX ,tY'~OGV,_@}Ss/ܼEbd ?p]2˜=BO~-AVWŹdRwo6 Cn?5-I3ս5#bๆwkG(s`s_E]BR)ߩPXA4ylWW>Ӳ+,v3w,ݷm&t_[SoT;BlnYJ20ҒQ8`Ķn?suDvv,+j?Ԋqgv+o3ҠE{9iiGGϘ5kYڭk50).qML#K,0ۧ'x~qƑXbB mXkT4}Ƽk߲yB}i;*_X'|KvLHTҠ7+?uH_x} J }>zM,M8bc` s0bP ~o/-W8aܳ/h,mS5ym]u=\``g$MII<'6޼kV5 kj.B}0u232bS:E ʹ[E\PyQ@uHڏ_os.ȝY&fZN/..?:N:*ݾGیbB$UHqi =w}*Y:wtgx?&s)M^tF; .+wQf _k8:-9qÏؒkn_liҷY5Ғ-Ȩ!wꮙ+ -}1~Cl0(3*ÿX Ò *݉fj;6 DHۏv e9C;{][[l_&Y܁oa9NFӧP{l]:_{'N\U+ܼ~Z ˟7sP.t⮠e%9wW^Qr[TÇO1~dV>++HS5?vk?{[[pNpòJLCe)]zVC>Fv+5zSN l\ZwWۖlj}zh&o݈Ot{cGW,S梹bf:])|vZj)va3St5".lXIcۗk]{[Jؕ?hܶ :W:7;JN:x^eWZ0RW.k/}WB9\w*J~{]󪽫 -uvH_n!lS*u\ - kۉV^:|F~3E7invGj:Z"ޝf7vv55Cv(Й3gw~ii ftTڹ{=Gk^I}ke:#w@'w94Bo}5؞=aEǧ$@98#9v,?t tF%'J{3]2k^ƿf}okҾ`mڼ2n$Х"3ӐaW͊Unδ-טZw{]0`Ï4dۚ55eL @M4 3g6+M.6yRoϸO$3}pڕ``M i71O?G_=3rlPǣ<+e޻~` ?Nw^Ǿ{;?ǦM.PC{'W H<**N)R1/>yL5#|_~@A?? ~~@A?? ~~@A?? G!EQEQEQEQEQT((((T~}?((A_*u-~KQEQE "We@5EQEQ5@_'EQEQԠ/}~GV|/0~((E @0~@}9}~{Փ@hni Gs@cc9}~T-߸ A@_'?  r*8}Q--^vU=*kjj~ }lٶuKJӧ޾}'l.]INڬ-3vgFlzrd玴[n rsF7-}aOW`>2}I/^"?@1/?{7'o %6quWtHN淕 W'З\'?@1|+ѓO{vRZrgtiD˽~lKI%'JFa?S~ M?zo{O?G~Fw~z7+s}ȱ zPv? r^t~ ֐6lܰ 6ܝrBd ?ΟNiiY'mJrT~ӹ߃f><ԝ ~#oر|7$(,ϸ<ܩ7n܌С#g  Ǡ ?'vv,;?ԅE+hzwQcM;=ʕ] ?jk봁q>ƽn_b:`tR'OwJYaWԩJn4a)ӦOi\VGJ(?e~ tũ8eQ\\t2lwY`qVVvCu6Z,삻z?)~Iw6ڈ"lvhᒰk׬O0~;߳/\|EIZ:]"N{ľĉӧφ-fyoژxO>TqQט*(;Y+`[x`k:ytW^dcႅNtyMF|1-jam7n =~Ȃ ]>o)gj? ڑ'6Cr.+##~MiWZ:?@Aq6#ztp@nԍ?Z&Ix'|a7ѱ=%q;t$,x7?0;]/ y]q~dH4ׯ[hB~~ᇽ/; }տ앟PW[ߍCDyfGXn?,Te9ҍ#'gᇻmXV`N;K]vq-~ߚ9DkWj^E~~[p eIA&NOIE4n[/8GyN\?w|xֆPyNajٺ%%.ȥ :<7q~()IMq`.MVZZf?qh1#Fv۱ M'.ZU?@ poq04[k^:Ntc)h.p$;\3DA=Z-r#:y nG˦I[R2ᇎ9pJ;DF}iP٭>)4zr ?@AqoEÏ \ʎC]v'"NǎwL|^e犸Z^^gj;{_~N XdY{iWW6I$ٳ|cXfF,v鰰]JඌeK= )}~)?|IO=JB~~sa\) wVG`ǃwEᇞwuxv̝3/S+*NE,x.v,u]\]5sl}yGLWD ?쀐p xls˼d>| .5F~~C/^riDjFb;xS޻w{)LPWWyɓx9VJ伔E{:tڛbbڝ\ jEC2ܷS`:c.-{iK) G?ċcpgQx[4#l /to/vEg۟mq6nrWmGZ+p>G؜YrU.^$4g[OձԝKoUعuzz,[VO~~ X=ui3u\Ysvg޵4$--}cd[WD =sܣZJ>Q]]s|(v!(6&[^AN ??~@ @~~ ??~@ @~~ ??t?~[}Zm9}~9.<#<#<dzΟ;GyGyH?@Ii)9}~iGyGyGG9#<#<`~$GG?@V^ r(ؿXyGyGyۏ)۷#<#<8 ?Q>aF r(X?72<#<#<%K.]GyGyHh?#<#<~5k̙3yGyGy̏j9EQEQEQEQEQT((((T~~@A?? ~~@A?? ~@A?? rf \?<_̩Bk\;׏B󭨯k=^<}Yt~ihh8s\PU_ksw?w/lo,{뎞;8s j/1'?'l1IeU^ޔSQS~)jo\\pvKcChXSS煻ue&fo3%k=aήZ{[V+C|k;4*玮ױU_pC͑DbplM}^7(Ot.g[]PU0Bd=޸z"Okⅷo5ZVn~9y؄5(O6ҏ> G߿}n\vU}ӕg^oG {O׿෿S_}С#.-9\V{] JN}yy~:Wfڛ7:5gnH~ggntpD1M6 l܆>>Yz߿C>~ ۚo"Ja[<~çZ?S6F]"͆Ym<4MJ8GϨAkovu޶?6 ?~+6,nE?uO\yŞF ][-[lv}>YhDZTf?g6&R`T}K=Ff/Jf;PUxw濳-#bGG2_b'x*SO>]WW/mN>OPHXlOfhvgqf;v쌛|;nK+-ر=ev\ύ6Pw9 M!}j'HH ^i,IuQ\[.lɋerR.pȲ܊O:p:`W?ɗ=moiMɁ/oEd[m7kZ@`y\=ʽ϶45] >y踰7W-W5#s %l3e)]5%KOkx%%KãGnn=~J& ISaã'L&0poZI7oBؠ'.hGjyt?,_6o_#@$9Q]iW՚7Y잻(OF yR$\r7)X5GؑGʫ=dOD 뗎[ BϰzU(3ۆV2Ԋc#v]s8jN,nV@`wTpV~8nVĶfm9іmQv@sNЅLk>M/_̟4tکGc _UGmJeqmJSSI%'|ɬǥO?~hjNqPP[[=h//=(xiKuf\r5CUP~;|$n/QXP{7MqD)fk׬foe*=?w.e3X2O*A֞4gɇ))nfC?l$܆m9ݑQ/PPǥz۴T|qS 0lC[5<3\vEoƔ?Pa'uSw,r>M!)Whh3}5Z՛glbWZ<+?m%%)˿ƔxCLgmnz&_t76[t6/]ez7h{BgkSnrgI.78O>ȼZC# Rq99m1LN HSi3O@?~_M~`tus2xb,65 lOn^/{jMg3-DLCH&pK4B'01aQgV\xamh?>9ɜ?I Cb>r'Qx`eWo4{^";}OZ,xw#0Qdz|U1/-["mOAyuNu3$;$p(Yc;)mىSfuOAǙI7j6naT+MZ~$ڈCuw}ɫQZd#;9Lc<hؤt~GQIWJR*ۿ~ʸߓN\=b!WG?ԒY.e ogҬoK6>QxLLGHDi qI&fݰnZVp%U[~U~7sy# ?5zRZ͏X8KE"; ?lJNu6hu i3fj4ؒ;E㮣\ɊSw:&q;c*+O'[@` 6xgG~돝6.uolYV/O_ y[  j`Æ-o,ʐ5/qSӶǍ[?;}ߩKm=l:W.F IG',] ?b~x_[pr=#aJOm[xn'jSyc:eGKټmQD/)֥ÍXwd$ inR|t7 6PGÇhELGZ-~?_жYfb!00M$aF<#5e{Xkȸm{#KGxG4i-'MvaIN~K,#0; X_m6V[I@Kߺ8wDb@71 }ksKGDžjO).@̑nqrm@bT׾%utѥ];#PȘXB|g|}҃E .WrvgGBG컴C,`v+O 7m;3b'jO^z군OII5i N$p0f-[Z+ m+*xVQ&cf#?w2qao˰mG9p#ǠLZIiy1g_e)/nM jZ:d9 ngi$2~4ަ7 |i-)g{ǯ4o5jkyOƉmms>.v ;'>e0M~t|L+~ĶXngGnւfm^:_@?w{=e_`rQk3CADzamS JM>_ZVv*h+4ޑn~4cPw4lf+WC?"{.`ۯδ{?L(fz;tS^mP8vOB˵Ss^ |zi,1K?_NNq=Sw'ɇ̓fs?lEPCҬeҚ:치ԒY8+_YQDa =]ZyQVq) :̔bRg_oy?v?Nj]e+~>˷Z3ZN 9df)kp "=qgf1Oɧ4cu+Wt3S]ZB*p3:—^| 1v*Z^7[꽴W]m1lOc ?$mJ2vM-9*|u}~|unw DDa [ʽn 7o>|x2<:ӬpBϛ*8OR<󹱛.zܺÆ(J9vSp3?Ɯh S@/T_Gh]َ{#Ïs@w^9G;nx'KLJ|(0$4ၓn²G$.q93!EfMh$2$wɴ#x9}GgLalD]]};w9Z?Ǜ#aqct8wWGY>?}KWƏ -7+[L2ͽj 80_KHkV -çXqboyMD3BOjPPQhh?-vQx.D/ќXtlɋq{]JS@߳wjD9~7_,#l7k+ڲݕB%M.^PwaovC]c*mM׫jl>= tHk_*z:ȽRWKzmk^/~qÇdee+l OUy:{o"U]]Zqs6'J4C碑j(K$jՐjE,дh ; 35Dȵ̓R䞹wJ<uXõө:\;#*aiL c{~]a]:/Wd-psb˗t_:|qޥ7aWZ.&~@_ؐF7?;v`n8zi ``"cf·F _92X3㝑g>}1]m[S޻~@'}T^:x|O*Q1<(0{[O8HO?w77vAx1bCB/~@A?? ~~@A?? ~~@A??` ?ÖГCqB~ ?~7O>>֭['@(P&jB!= ?7i :PܕøW~ .7_Py0¤}~nI}؁}~+_-e2jHт\HMMMEQEQEQEQEQEQR(Ab L}tuͫD ŮMQEQEQEQEQEQR(CM>i~ůt(jB"\:2 ayk~JQEQEQEQEQEQILXA" &p>.D/EQEQEQEQEQEQQbO䣧ᇛ֯((((((rs,N>z!F(((((e7^;ᇛ)EQEQEQEQEQEQT[NPEQEQEQEQEQEuzVrAQEQEQEQEQEQտEAQEQEQEQEQEQԀ*(((((T~PEQEQEQEQEQ5(((((UEQEQEQEQEQE "((((((j@EQEQEQEQEQEQ?((((((PEAQEQEQEQEQEQԀ*(((((T~PEQEQEQEQEQ5(((((UEQEQEQEQEQE "((((((j@EQEQEQEQEQEQ?((((((PEAQEQEQEQEQEQԀ*(((((T~PEQEQEQEQEQ5(((((UEQEQEQEQEQE "((((((j@EQEQEQEQEQEQz9(((((Խ~x)(((((X^L>̑((((((*'{CMkک_QEQEQEQEQEQE9&YAuaa2'|EQEQEQEQEQEQlX!&] ?S$naҎ?)EQEQEQEQEQEQS&A0ibx"~C}ч~xΝEQEQEQEQEQEQ!(Ab(hP` ?lۇM>LuVKKKsssSSS#EQEQEQEQEQEQ"(JPXAႉ@l6Ea>̂WjB1G}}@CуY= ?lۇR+znSܠAуmNmjH#=mW~h- aWjkMqBE !p׼(v $!=AUjB@v(:n߾GzBqBE~i:kjMq ?@OIzE Cq5n#w3r*@~~\|46o|'G}t֭7oGPܠAу #IVQ'? ?.] mn55~𣺺?aӦNo}2iӦ\*;;GC풢&NN|-𣦢_~!]5g .޳mN>{g 𣪪>}楗~ПR^^ǜ89͜:sϞ5ff09sly7M*unc͐^C}IYY2sǏy a]};xaU;ܷ& v[PP@џؐNθ/mtzYF;,`<̎U-SSwDGn/[yuV6Z03=VQt$@]Z^ysdܱcGEGOyƌY;wR0O"G{1}RZ|m`.sv\1Y'~U] /H|7ΏAQRR;sߋ+I&ZZji1j{P":?=7SSm$ aZcWuj(L*..JKOھ^ ?Rv_;Gt\zeʨMD,q.cwF"iSp"k>XO=^ǵ010U/[+QcZ&ܧƏCĻm۔ hxq˺ eR)7oe>p7=v au2|Sݜ[.St̜9Kqb[^v\'.m7o7z՚׾ڛnޝ]jl\lذiRx1得kZfUysT0q?Vfƾ?Neg$ry/ #7nؤצnW(~-v$r~^ju-RfFOz73l׽0p?"QzttÏܼcEVDܣw'n?iAlZ!V:T~6t7n"n)Pҥؽq) ./Laa:;_ ;醾MǾ?=t4iiAA&{ O8O],OnSs#wFqwDm`oژ? wϴ3zZr(@1հ7ިOw)Y3gG{ؖ6t *vNri@J[iyRiSW33P;qk^yC5L믽΁uU"k/Z90-GeV_3:Ȼ6qW[v{EMwnjyϳS9gAA#b_;)hZ˿MNλw}}y%%m/f{zxU-M_}X_|Ã)lww~nfw<+y v֔ssRku;?$-mW$gE{fսv~蚇}+W/ΏؑXLlom6#ٛ UN6.D|յW-Oǜ=tJ%'O6j؝wDm`oU+SuLzxU!Z,1X:?[y>[B>*"Aמ8%쵫V:`z?o~>5\aժ_OȭWGݻ6J!٥ۣGnʯ]nk׬=~г7e0n*4~DZ?<=KWliwtC&Ol_}uU!' @ ?5/7ǍY*W遯,%l'~Ⱥu܅L?^ Iu)C=.an%%]m,׆u ,ԗgڢPJsT术Xw/~UfdrrN2̙̤L3[ӝ};=:E)-˶JR,"J M( "r~?ᜯU{AD^f_=ZYkҋk:??gi1uWǘ7δăGfOUx;lܸhR{ "nboK_!M~ѳE0#?4Oݫ˻Z Vn?;3'.]֮B >=Do  n M1g-)}jp7Wч&.ч'W ? EUWMQ-?`~}sssC) t5?Fe'yo Nذǽt@Feeܟ\w.2cD8;Uoi~Ƚjs|iy "6,C_ۍ%^?pJ/|rϋx" ϸ+_!՜Qc!qr%}}7`)((tǝCْ?f>Lq 3p3x@|MG)'r'&7k߼; 3glwGn ɣ%x|C/XYQg;6kt!Oz\CՀddS~"%{],*e9tGҸfɩ<)K u[8+**~7 ?smt;z^wR6_ߢٴw'CITގ\>f7xt|QZE<[93"D/Gϊz 7nq+Pke߭fܱn5C{u^baNV—>iŦ8oƐ'6.Îzjܱ3hW9=*|=!Ju; ?Ct-Ulwdzz\QB=6>uݵ- [t?OYjC#-,*[Z))bbbeT (Ӱ|gvj8p4}LO?NGq#<& MEO?-bWF<4ӗ-uM7fZg.I=W avwxi.Yj C9~ܼ絖;;xمY Oͦ·$laoEeeeaaQyy9Oz~~@@?? ~~o 0jO\t `Ԟ#p=qGQY5=qǵ7FI ??/////⋯z3gw7|~~@_ ~@A?? ~~@A?? ~~@A?? ~~x]t4!~K䚵~}Ԏ]UUW\}:>s=g[:y ]u5<|szHsw+*ege1رfX)my?e*T!MHL1_=ꏌS??౹}+,="uotGk}oqꗺwitn|\v>5zQa{9Yrs5iTX%%eY'OTTRRCu֛7spVHκwQ^&xv'=-T@[6Dmٵ3ibK!666y~d ~mZsVXeމjz۶m'~Dbg؍Bt"D;gi,|Li)Ţjᐺ'55Z͒,fV˖qSmkGTJЯN:7tG444ݛg޽{ߘ?߽߫~cSSUU?8qޛ]jI֚juoÏO5rrrMTtƹ=& Ve}(-0{-mSNׯ! _h#] ӋLal;~%]ְRw/:nzbb2QwEkH_DY`^3a.]Fuuu C27k!lRO,Z5VEE6D/WUVZjwMu^aayL3ځf\ի]Ρw"?lSRQ[(zMon}CV&c~H=9c=IVZhZ*/RcRLe#Z:wK C<1V.X{[9CL Ӳu5_mK` ݻ/[^^a.hzR=S6k9Mvƭ[ vsv@hi;O!~~`P3^/}OB،7'6(-- ?t]7ϹwQ55gWIJڭQ+77>ۑ#=}~?FSFo~̞٪A:MS`O;wgDϺk$=`]2K[}}cj۪ٛS觞29-uܙý}=@> zkĝ*D̽JԉrOp;9_﹛v6x7,#ؿgh?xc&w69M1)ʓ6=vm;o+7W^Y}["v2Cm6\Wb~]N ?΀d`p{Ze#om\n&]qxk=]2mk{̭v"fr#iin~=w_݁ZB"mmmnz9&day_繈wdډ#.+~pv@;!Wasc U[-ކj2U8m36AY ?<fh,SqkiB_ƪ9Jhi{~ݨ[ڶ8#͎z`.Rfd&~~Nd<+͕4ms~;~S.:իcΜ XY{&,҄Q`۬w 6^jkkD݄N?t;ơݫZ<3&zS}&uX8v>37Lo(ÙnrDŽWOLje]#خN6L<9&uL^dGm#ȏp Ju-0e{f ـ3 k./~~Nd<6xjxniٵ3x{G:W566._5wze.`(3.H85?lϟ jr y xי׬<+JՇԃҊbG%0?]3ƽJ"f@j1ުIYז Gdsh's<!t:LRۦhʲ67*6,6VR-LCm۶{:U, T3ѻOvƭ[i1(EF3p0&ڵI?8wS)ݶ{ ){v؇=l6;̪w61JcMJ]{ZCWWME,X^vƠ ?){߷=ʆ ^$>=ðw2+OawcޚvK?ɴlXzrG{Vvvvzhmp/~{qafvE%99Q۩'[:OdEQS}6dee Yvmo??&hII?\ ?8r" Q^|ܶ!'Ok& forJtƭ[i1nuG-m"6ږv۝}Wf7 !qJcijjjtXmn99yNgYFy{)-[e23k, Z,ľ={ >l=++5_RRK h$ٴqclSc~?}7Uz|oxO;:6\b7eҋ }Rv1q{/~@I(#Kc]WzXg=SvoMVV>iUe筺q+'ҟ-{ G:^V֦zNx w#׬5tGlVKm4z-klQmibKMMYe ?poX~o}}q}(t17؁F3VM5YOYϖvoiZzdMQЍM[KsC}ô?SG[[>Nss;Щ㔌ǜ-B3vZq{%&P yk΃ w3=+,/r=@% =Wn= HuBl/7Ro C*ױ12nC/h) ven 9Nx!ں;4`d[#?nn†nT '\Ry^6Dh3wؚ)K#ju삽f£ni-y-m9XK;`h-ᇿB?8wS)_AAQ-<:-q-ZPx˦{];j{o)55i\nvP'НYVopoftw j3??@=zӃ0{#a))'wV|S7ho!@:yK.^y4}:o6W]AvgzpPghin -c3=h's<<ڟ?m$%%ۍZ:Wf j͆c#n?_%G)**8RV׻=MPSkjvZzglj#ݙzM]5Eu̗Kf/|تjx=LSeeeO0?)e>ucveh9PWޡ"*HMȽB-,ҎRgL h's=Q x6 :Nɠ@%J@;! P t8%J*d*@=@Щ *T9ѩ *T9 :Nɠ!T@%P dp<UT@A[yyy5<ܹ)))v%edd Ϙ1cժUoqr_={iӊKqJ*T?8%͛wЏ,--F~(XpydLL'S)TP  ?8؝;wO?]/L &;vN> ?G||~:Nɠ@%J&7n͜9KD*@=@=6npCڠ=쳃f2}6M?NdP @%P xdh's<UTq9F~XBrܹS,Yᇛ|hNdP @%f%<*@=@GE+sT#99yC?Zt&"ڨ{~(@~|fqJ*T0\~C7o5CP@U < ?7 GJJ v,V8cԩ: 'NaoP TP O]i{ =v2CP@U5m[4I8ǥKmZre8/vL4-'d\uK߰7*T:%TPEMA?O+--3K<̏𣠠 &&|A)TP *@=-j~I ?V*3~us#;::`LP @%A`+*@=-j~ ?t7o7v_.xn_<^OLY,2\@%J0v2CP@UcӦMf 󶶶0uM@S2P ~p<UT<#33sڵyyyw96oWgϞ z ~{5t8%J*CP@U nvc˖-۷.iǴ _qqnᾎtuu͜9S :Nɠ@%J?VXATP?ߵ|k Ǣ6h]Iggick~8HGѩ *T`RBTP?ԢS)TP h's<UT@A:u9%J* *@=??@S2P Nx :Nɠ@%J@;! P*~~NdP @%TP?@.Nݭw޼sm;L8@%Jh'! P*LN2w^T)qF%JNCP@U+7 ;/8~.AgTP dp<UT@A ܩk5]{˜gTP dp<UT@A ٩3}~_𛌺CgTP 8 ԙ>R[{~Rcr>1wƁʮN3*TAUzU~~`Bv.ݸ6p9O{}n߾)4Ψ*J~! P*LN[-.m?r~*Hɳ׮]dP @%TP&|ꍚ1U$;}⥛7oqJ3@%J?TP&XoΎR~rOYfc79%{J@%x S}Z걳Kr؄H.=TA㌽A%P p<UT@A ֩;3t-kQv7~pJ*T@;AUzU~~`wN \^گm)4ϓJմv\z4Ψ*J~! P*LN݉QGN}_/0Ǐvo-on>}nuNɠqޠP 8 i܍mm3fXjw׮]_ٳgO6dԝc.Urg_73;ۻ/_ g *J~! P*d{mu붛|,\pO.2N]sE-?L*<~|?p]A]ى.~@%P p<UT@AQqfmm̙f$&& ?l& w!:u=C6};>?pm~mދW8%{J@%x #$cӜᇛ|x9N]y6.Ss ?8%J* CP@UìN&v%I{pfjͧbr>M)TP 8 ~R7o-]4XdMeԴUt L;PmNɠ@%J?TPƆXjUcvv3f ]inԩ:X._x%?=bNt\r^__'EL*T2k\UT@A'(|\2>۩2eJJJ[N'N>zckyvԄRTնvv>}.lU7SJ*v|Z  P*##!Ï:K[ڢQ08|\@%P Tƿm\[[v033L ZO[n~p TP?0һKL{3x7J^ַw\xiR|&@%J`+ԕ}?ǿ'/~]Tvi-G;M?-d=k sǗ~ T@%JƑ:sU$^xq[SzբK$%g4@UzUL0EG7[?ɢ%u-O]?)b\b/m}C-P @%ŕ+WtUuC YWeee8^1u P*W`J[~9<՟#o~xj:gv|~Sާ7|cK.oJ*x\#?BS?~z͚5&X>JOɝ9s6lAUzU~~`)mu;['[s;4Օ؜[ D~pJ*TxG~Ï^iUu˄uY۷o'x Og~ޞ?}%/ m]}Wn֝z9ۊ>7~H dP @%E^VhQPPn7k\kppݞjm<*@=??0G~|*[ۋ_kJ?~'Ri5J>*:qL~ܸq4@%P T-CѥKj%? RRrr2CP@U өO;pNZu|u{[%#]eWu768~j9#Nɠ@%Jf\Gvvٲo>Á k׮˻̔jժ?hj,4*@=??)is/ث _؜ӤUM=f0~|-oLWݫ5' ?@ T@%P q03g˖-sطoV^a5kZAUzU~~iSTQoB~|zcΔ2kkݸyS~~'s>3u[sI-]ufu6d8ޠ`曷^3ڠlj]IvL{KB+?8 VXZpP1@UYGm4wl%_|oUҪ?8%J*xh+^㕎6m(O(kZWĢ~gḳ?@JP 8 ש?dXo|{{y [NW/oX?~TCۏ4~pJ*T@*@=??0?~g2{Lz^]YUmE_ޜ;}OU;+)TP 8 D?7_۾e 3k, J\ҷrwU2S2P p<UT@AЩG?pƟ.$NȢgӏ~uK'[#*6S2P p<UT@A Щw?L?}%7(~?)b\,I?cA%J~! P*LNWg/ɘh(矍B-HcZmFmGE+W~*Jp<UT@A'S~G|pbl-ږޘZc_UˡJmrFMɮK.߼yS2hJ`oP T@*@=??&;w֥,--qOkWRFFFuu;آ}ƌV}k׮/_ٳMV\\2l??x>ɘٜ' Gf]kVөc,1ǢCե/^Dg*J?TPǤvB $̙1SRSӦ=5s6Ж7QpBO.Sǟۯ m{q/7?YxgLqNɠ@%J?TP>f [ !444Ǽ y ?l& hNcO {OwjihjN<]ޘ-ӫ ?@JP 8 5DyyiJEdd~l2;B[4չs=M>ߩk7կ>?y{|":3H1F~@%P p<UT@AA1m-w> ,pbcc_tl2fE~,YD C˄Sk 꿛Xd%_ڜoʳN ]*hUJ\$-iahQ T* CP@Ue̝;WDccݤ/,~C>}ƈx#)itTEFԩW֝a_d}\γf4>}B3ɥOW$I7SJ*FZf?? ûr媙~(?K]{~﫲}kʕNݗ[Tx^}:&k9V7jyROv.9zS:"|V/}3y>5P n* .` (G~?͛?J3eYG993g2[,TB#]ܸqS2hJ`oP T@*@=??&fIdg-3[Uk׮˻̔j*.eYS4KU]&&#//չ5\ogΜ-[f󸟎뙯^Z!~֬YfeN~ܺu]]]3g̚SWPP8pjCѹ ۺ{z/_ǡ{_~S2hJ`oP T@*@=??&/x5Ӝ v 6ڕdhȴh*YMNNg/7u4T75<}BƠ(\v.M;qhJ*jkjtiiY8c NJڭ=! P*~`i+^5r"+9y?ohh>hS`lñJ>ZOm3 ?Wj'c4twF|4@%P Tݚc憵_3g=E_AUzU~~)) W4CcJҜ;cV;Ȓ9t s?]% eM熮\ gT5J.\hҋ-a7oNʢE/iGNN Bf̘v/CP@Ux she( Єf+MPwnpQV7|&6*NtS2hJ`oP T0>51Fyy٢u̖OkE7! P*<|c``P-iz|cЫ ^\wm~mދW8%@%@UV)Ml^dd>4&&patttK˱aSN&=q P*&3g U<j̑Kɇ=x~py TP ~ddd h)jjF,V nVWϭ?2X;yʔ)Z v!]o*@=<4s"X% ~ < 9.oJ*xԩ;wFwŋo9Ԙf3բEΧO X*@=&W@%Jk6n߾}Lx O[q~2d8ޠ`Qk׮3ܿYlѬY.,*@=??@AA 8*T0\fI;̙3|}tt}eWҏ0}EDDh3CP@UGKE-?]Ÿoo+Z}44@%P TMx+*jZ6?q׮$/-_oU;AUzU~~`mN[jW `ֶ8ޠ`i+^SΝio?rضTP&p1pyWݩLZ$9v<hJ*dp<UT@A)('3@%Jh'! P*.\hE?8%J*CP@U Pa3-a7o`OYe05AF{^ppG||BXbtNjG> ^!3P !CP@Uѣ&(// N4[LeRWWo ]4GQQ?p+c۩T1?ۊLWݫ_lp dP @% x xjilVP&q&*j6FFFO؟j;c--<ᇛ|ڕ4::=GsyI8%J*C \GFF,m\`??\XK?=~ܹu܊ &Xd枲Gjjڣ~pJ*T@*@=?? ˘;wFwI//^9ʕfEO>]tm?X#M:5;;[gN%׶Q_ޜ;=x}~]ξK.~Aĉo&*Td0y&@UzUc&uC?/1O1Flll\\@NO|\2>۩2eJJJ[N~ <_ɶiYU ܣKB?OZuKL *T{ X@UzU~~`#?BJ\{ʳ>If=GAAV1{&D72+n-&d8\ޠ~ X*@=??&ȏ0Gqq}Ϟ=)6())u" $vګғ]늛9ٚ()S܀J`oP T^\UT@A1Shf|=3Y<ĚeBSvsM0+k.Vî~gḳα9hJ`oP T@*@=??&-ΝK.FM97驩i׉Mf'&O *TAUzUJSS$s̖}-v̵k,dh,΅0=yNɠ@%J@TP?Hhƚ5kL&1gΜ-[f3|/ 1il#gΜ988D)TP ?8 x̡C]S.^|kS"##_a~_2K+11хg ?@ T{Jp<UT@A1i+^|ʹs=ZZ\76U5Gں5<4@%JAUzU~~`uN0̆'._Bg@%P C*@=??0QÏ67(;qҥv.Nɠ@%J@ S7tfGOxwg6|sKKKZOkU8*J?8r<UT@AשrVg= |S19_ϛVULg*J?TP&h۽#Y+~*Jquںe7n jޝ!x ;Wk^I9IgT fϞ=9s6ZJO7oޭ[ ?8 ǽBWg~*Jr #!aKbb^aF8-z)*@=??@A)TP cvhI/͖N$s;;O"/&x  *T1Vv}A###C,Fa/"#?8??8%J*x wYڸ`xlڴɤ&mmAUzUS23*T`]v}ܹ.&1Uee0cƌS# ?N1'NaobP#-D nsÏ_)jH.tJ@%ڕ+WgϞ袶~(?KgΜ9r>8cʔ))))n U/}UTq|ᇨXy ?7 t7n *T{x#?BZGK.O-f%K~p TP ?>R~Wɥ mB'P T0n#?ÜĴ_{-((O0D231SL{EA{t ǡ{opy \*{er)TP lmdg-6̖d]6///+t*@=??0)s MIWÝśJ|S2hJ5XfI;̙3|}tt}e˴e_,qm*@=??0ں%MK,h/ޝ7_j5-4|kfA15 텟~bѻu{/^ g@%7qvoqp-?q׮OF~p<UT1M:l 5s,m 35Qȡ̛7ާ5% .4_7VGN8*JƓ2?7/NTPFv$Tcbb^aU-:laM>G~kk ?@ T{J CP@UGkM&Q^^noX3[LeŋnpQt?8%J* CP@UNZvJC44.DC_|1761dP @% x ###~p6.X]cӦM&5OrehF-nԴGש# *TAUzU~~ ˘;wFwI/ܙ<*+3f  ~L>}&#.S7u;;ޜmG4IӜ4'R'*q$M" F`\AWH`!6 b`Xuؙag`@rlǎѯy_A 039ù;<,e3×|?߱[V?rʕyz&ܿ@zN'Ik!'x"‰;jZX?YQطo`ƍuy*)ѣ;s{衇ϟS{D1dߘ~e#V~5 m^7/9zXg^[[n+)8N(Μ9*p ~ #=`D1bĈiSI:<6j34iRz\ֽ?sa t ~@aR\QSSm۟~˗lciiHE79"Yn|aR{U:#m{v/^w 8N0?+?cذg|y9Zm*B8#=;<{z}tAIU&|lۗ:x9+M2t "v**"-H[ŖSoll*//wK/MH+?Zx֧KG^7~xJ3r=/8?ԇCp\?%Ǯ])\J"H%UǏ_reޙG<~ɧ+oݺko.Mݲ#Ӫ^fop:gp Pp ~ 7.e{ևk p\p ~g+vkck׮ ?sN'~Շ ~?#Ï7Ï)w8+8?ԇCp\?7uǚyeWomxuD+u-M2t -{;)[)_^it  @>T+ @!@;sڪSc|?v̊GN ?sN'm\~WzM]뛎}yߚ'홫j8 C}>WCySw_ݴwWW<:cճ7:gp'\~Wdp8@>W\?~h pP;v,\b --7>ԩq̙dɒ ! CI' @'x,}EaOVB\~WM28N0n7tД^L6}̙AnܸS5?ŋg͚ӧGCp\?~ccW\~@ NpOپ}G.SIdu &Lx9;pT[`b(-Cp\?~|W[jN4Ïglp+蜁 d3&B7/bVʨQ⻛7oɗϞ=; ^#TVV8tnA<WõkcyΪC~{mˁߝ! ?6?v0N'ta'˗.]O?t8q>bĈΝ;IIKz8&=ۅ-6pEp?tV|^kǎ UTT)--7>%q NByqQ<Qmp7GS6dǰwwK';;`$#ugڴSVtpƦHd톣D@X+EaNoȐ!inQd@N4@-p8 ␹ԗDÏt|sǯ՘Ncml]?+`+wKY&1m3gǃ j/6KĽŋg͚ӧO?*&|L:w~\~@9?5&U~l-ڲf: Nʏ:iuJ眇|'C}?+Dپ}GתN%`6wo8p`sfX-欵?G+<즮+ח;G7yk=蜁 ܧ55Ïmc#fUT,xrC}?+%;1VcdFzF5*y|ٳpqϖ?GFE?UZ]:gppOaD_*_aF=nsH, IXHݙ?U\~W8K.§~:hGvw7bĈu")~ĝ^%S3?C N'El[sD4;|iنNC}?+%f 0 x M GKOBpu m r{E?Orplpψ{Ǯ][YxҤIqZ[p8A;v\>ÈStv^:ȑ~WC;^QSS{G^lF[lo9u#2[o+ }W?.q,p\?jÆ=g#(8`H, IƬYZف0]j8}فysòqЈϡsN'?ԇ ~?pL6 aĈ孙;qʔ#ʿ'e}DI' Cp\@{B;v\>Ø0쨌?(3gn|yW4̙3n֟?O\˷;~5t \~WGI\Zm?Ի;>WCySWsyزڇgT+~{֚k<~UM2t țѲտ0yo\j6?xI3p8C}>WCySZQ?^]_c#g/^$C N Pm+ @!C N'?ԇ C&N'~p $ ! ;?\I8@>WC~\z59'?ԇWC~58ye ?9|ҭ[isN'~?+=2rgAqś7odp8'C}?+QU~oκ_zmV]}eMnܸ)8@>=g/4oÇWm66 ?sN'~6p ~Gh4i+SWe޶} g ?sN'~6p ~Gtq5 .X~CKˍ{wS_tyבe;קWW|tAKjVjlr-M2t ݄+W]w.[VY[[=g\~W~OW>v.M2t ݁w<=zt~p ~[s{CM7rӦM9sfzWC1:>|${qSD?N9mCkV+ihhhcPuO}f}qXE}?+f SQ ?WwtiΝ; gujjj?G?k~G/|?5aOR m/}ѥ쭤`{|ŋ<렟k>"?9j>짾0VZ!=ʕi:ێ5Ï8xl.vÏW_O~? ߈Cxd]? A?9v5RZ6ÏA<=k{oU#n#W--7Ν;ЃҾ{ ""=j?|Z%J,x*+TSۍeLt3g17R_nN['{1of:#f);/06JGW^p3q?/@Z! y7.Bk׮7oފ⏸G-~;{G҄ /lcD?b{ 6 Ʀ,HZpQ'gYzz0TMMm}w` ]lݜ"ZRX*{{RG6Q ?@!~?**.Yqƍϟ?wΝ;;>)^a  W\w,n8ഺ 3,_ Qpᢊ7đw('JhV\u)ڥleQ+Ι37-z-9r4fъE{[oŌvk֮]; -mA D!q|st<q/}Kjjj ]O\0r(o5c8VӧO4邆[|" o͞=Ux}h}S 4h͛MR!;~DTb3gμuŞ}uiV=1VhӦM; 2zlXg.@B-Qj~X~ʔwZ~P]YjǎR ㏟8q27l#FHL0!.Ⱦ7n\\QWƻN77_H׬X2H2֔D.\7ޘǪ6_03ttʹiӳ׏xMQ:NP'ndáCWƽz@hC{0rՓQ1hqA\obQ^n뇝;whc2S~) &O2qN^tZ#$8wdJV-Tj҄4-5 IYf65.nsxkԨQݳg{ŋwrNw,یuG+X%YR!.qKo9s665w҇3 T5#$vM9DN+_J~ڐ!Cjݙ kҼ{eن޺WeQETQ1FZ\^cJQOɦ.EIԓQ1$r?DF9f([ޮ)Ѷ:u:V9TPPKޑa~?PheƾQد_WJK s d65/V9:aY0 6f̘xzg0lذ1946,x8䳟lvM;e4!rOKN4)̘u坐}Foa*(|q7q:w{ ~ t[:ӐSX_v7J[1M;}=6 ,zAZ4Sz#$Ϊxxb&b,F|I6.CډVC>QS)FN!+z?+%lC-?@G\V9gf i)60//_N1!w@UNx[iǤҙnNiD8Z~+)-ؽ{ƊlJcYYY*;!k84ӊ)' ڋ+ל7o^{@w!K5[lHp9yiFQӲh$F6om}Q ՜N)YŘm9>p10:L.C paݘveb٨e2Q PPKR-ax ?C|u[pFQ26dOэ/˱eZ_0DbiA\%/^!4 ( dZC~8slї-ퟞ'6Maf jΘM0V0V!c2~L*ئqϴLűfL /zA{cЋ]љj+zj @!&Qp.֛oގFW_ xgzʹ#Jj.3|;K4u҈J7;B_6S'm_~P N>2uB/@Cm;SKL%yWzkFXB+0$5@fo䟵ftU ؈&]t9J/ rŋJ5*zAV出L]al2Qf.[Cgmkk/06J\ DC-?@dyXbgpņŏlo6 k9OSiP,5iIf*3!7 al+(&9{6Fe9![B8`jI1}KC3ѵrB8A=fQ0MmqCCBCmX҆Yy޼yY/8peTei0V(&g?a٠UVv0Ut:`ӦM-k(|tMb@ s~QְJjCSC]"I65KjqkeiEZlimY[m_-z3?-k_.n :$  璦͛7gvBtbw)ᨂ:xSN('z29R=[) t[:hȲMe9r4:)x֬Y=R: cŤ|Һ׷gh}tmݧ%~H^]4jG.(CT`&;JjRCrZ ~;by'jCYlxX51}8]v e[d7qM>ۋajo+>퍭c~w~3o uB~S(ڐ)Qtp\TׯDd,V||>>ͫWPPK0?@wrKp'p c?+ ~ <` ?! <*  ~=8+~pxZ?gU@!~?C ~?@!~?C ~!6|[׿{p7G :DDF)?@7!]~DD&#~D Ïo~7n"t_n޼)w~D ?ooo֭[p7G :DDF<#NZOg"#ty;w&5? P|qC~ϫ#R*qtG~M_q?WiGldbAJlā---׉Z)B"PX!…RCN#GRQGDDDDDDDDDDDԎ"J@!1dG~ͯGE( Wdq7ou"""""""""""R҄"\!|}ܿ (@"-"""""""""""Vʢ b|m?% )I)HRR{| ,G B"""""""""""…| ?G>!""""""""""" [tM^ BDDDDDDDDDDDtG"""""""""""+* ?WIADDDDDDDDDDDJ"""""""""""U~QzDDDDDDDDDDDԫ}0Bp^IENDB`docker-1.10.3/docs/examples/couchdb_data_volumes.md000066400000000000000000000032731267010174400223140ustar00rootroot00000000000000 # Dockerizing a CouchDB service > **Note**: > - **If you don't like sudo** then see [*Giving non-root > access*](../installation/binaries.md#giving-non-root-access) Here's an example of using data volumes to share the same data between two CouchDB containers. This could be used for hot upgrades, testing different versions of CouchDB on the same data, etc. ## Create first database Note that we're marking `/var/lib/couchdb` as a data volume. $ COUCH1=$(docker run -d -p 5984 -v /var/lib/couchdb shykes/couchdb:2013-05-03) ## Add data to the first database We're assuming your Docker host is reachable at `localhost`. If not, replace `localhost` with the public IP of your Docker host. $ HOST=localhost $ URL="http://$HOST:$(docker port $COUCH1 5984 | grep -o '[1-9][0-9]*$')/_utils/" $ echo "Navigate to $URL in your browser, and use the couch interface to add data" ## Create second database This time, we're requesting shared access to `$COUCH1`'s volumes. $ COUCH2=$(docker run -d -p 5984 --volumes-from $COUCH1 shykes/couchdb:2013-05-03) ## Browse data on the second database $ HOST=localhost $ URL="http://$HOST:$(docker port $COUCH2 5984 | grep -o '[1-9][0-9]*$')/_utils/" $ echo "Navigate to $URL in your browser. You should see the same data as in the first database"'!' Congratulations, you are now running two Couchdb containers, completely isolated from each other *except* for their data. docker-1.10.3/docs/examples/index.md000066400000000000000000000014131267010174400172430ustar00rootroot00000000000000 # Dockerize an application This section contains the following: * [Dockerizing MongoDB](mongodb.md) * [Dockerizing PostgreSQL](postgresql_service.md) * [Dockerizing a CouchDB service](couchdb_data_volumes.md) * [Dockerizing a Node.js web app](nodejs_web_app.md) * [Dockerizing a Redis service](running_redis_service.md) * [Dockerizing an apt-cacher-ng service](apt-cacher-ng.md) * [Dockerizing applications: A 'Hello world'](../userguide/containers/dockerizing.md) docker-1.10.3/docs/examples/mongodb.md000066400000000000000000000152271267010174400175710ustar00rootroot00000000000000 # Dockerizing MongoDB ## Introduction In this example, we are going to learn how to build a Docker image with MongoDB pre-installed. We'll also see how to `push` that image to the [Docker Hub registry](https://hub.docker.com) and share it with others! > **Note:** This guide will show the mechanics of building a MongoDB container, but > you will probably want to use the official image on [Docker Hub]( https://registry.hub.docker.com/_/mongo/) Using Docker and containers for deploying [MongoDB](https://www.mongodb.org/) instances will bring several benefits, such as: - Easy to maintain, highly configurable MongoDB instances; - Ready to run and start working within milliseconds; - Based on globally accessible and shareable images. > **Note:** > > If you do **_not_** like `sudo`, you might want to check out: > [*Giving non-root access*](../installation/binaries.md#giving-non-root-access). ## Creating a Dockerfile for MongoDB Let's create our `Dockerfile` and start building it: $ nano Dockerfile Although optional, it is handy to have comments at the beginning of a `Dockerfile` explaining its purpose: # Dockerizing MongoDB: Dockerfile for building MongoDB images # Based on ubuntu:latest, installs MongoDB following the instructions from: # http://docs.mongodb.org/manual/tutorial/install-mongodb-on-ubuntu/ > **Tip:** `Dockerfile`s are flexible. However, they need to follow a certain > format. The first item to be defined is the name of an image, which becomes > the *parent* of your *Dockerized MongoDB* image. We will build our image using the latest version of Ubuntu from the [Docker Hub Ubuntu](https://registry.hub.docker.com/_/ubuntu/) repository. # Format: FROM repository[:version] FROM ubuntu:latest Continuing, we will declare the `MAINTAINER` of the `Dockerfile`: # Format: MAINTAINER Name MAINTAINER M.Y. Name > **Note:** Although Ubuntu systems have MongoDB packages, they are likely to > be outdated. Therefore in this example, we will use the official MongoDB > packages. We will begin with importing the MongoDB public GPG key. We will also create a MongoDB repository file for the package manager. # Installation: # Import MongoDB public GPG key AND create a MongoDB list file RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10 RUN echo "deb http://repo.mongodb.org/apt/ubuntu "$(lsb_release -sc)"/mongodb-org/3.0 multiverse" | tee /etc/apt/sources.list.d/mongodb-org-3.0.list After this initial preparation we can update our packages and install MongoDB. # Update apt-get sources AND install MongoDB RUN apt-get update && apt-get install -y mongodb-org > **Tip:** You can install a specific version of MongoDB by using a list > of required packages with versions, e.g.: > > RUN apt-get update && apt-get install -y mongodb-org=3.0.1 mongodb-org-server=3.0.1 mongodb-org-shell=3.0.1 mongodb-org-mongos=3.0.1 mongodb-org-tools=3.0.1 MongoDB requires a data directory. Let's create it as the final step of our installation instructions. # Create the MongoDB data directory RUN mkdir -p /data/db Lastly we set the `ENTRYPOINT` which will tell Docker to run `mongod` inside the containers launched from our MongoDB image. And for ports, we will use the `EXPOSE` instruction. # Expose port 27017 from the container to the host EXPOSE 27017 # Set usr/bin/mongod as the dockerized entry-point application ENTRYPOINT ["/usr/bin/mongod"] Now save the file and let's build our image. > **Note:** > > The full version of this `Dockerfile` can be found [here](https://github.com/docker/docker/blob/master/docs/examples/mongodb/Dockerfile). ## Building the MongoDB Docker image With our `Dockerfile`, we can now build the MongoDB image using Docker. Unless experimenting, it is always a good practice to tag Docker images by passing the `--tag` option to `docker build` command. # Format: docker build --tag/-t / . # Example: $ docker build --tag my/repo . Once this command is issued, Docker will go through the `Dockerfile` and build the image. The final image will be tagged `my/repo`. ## Pushing the MongoDB image to Docker Hub All Docker image repositories can be hosted and shared on [Docker Hub](https://hub.docker.com) with the `docker push` command. For this, you need to be logged-in. # Log-in $ docker login Username: .. # Push the image # Format: docker push / $ docker push my/repo The push refers to a repository [my/repo] (len: 1) Sending image list Pushing repository my/repo (1 tags) .. ## Using the MongoDB image Using the MongoDB image we created, we can run one or more MongoDB instances as daemon process(es). # Basic way # Usage: docker run --name -d / $ docker run -p 27017:27017 --name mongo_instance_001 -d my/repo # Dockerized MongoDB, lean and mean! # Usage: docker run --name -d / --noprealloc --smallfiles $ docker run -p 27017:27017 --name mongo_instance_001 -d my/repo --smallfiles # Checking out the logs of a MongoDB container # Usage: docker logs $ docker logs mongo_instance_001 # Playing with MongoDB # Usage: mongo --port $ mongo --port 27017 # If using docker-machine # Usage: mongo --port --host $ mongo --port 27017 --host 192.168.59.103 > **Tip:** If you want to run two containers on the same engine, then you will need to map the exposed port to two different ports on the host # Start two containers and map the ports $ docker run -p 28001:27017 --name mongo_instance_001 -d my/repo $ docker run -p 28002:27017 --name mongo_instance_002 -d my/repo # Now you can connect to each MongoDB instance on the two ports $ mongo --port 28001 $ mongo --port 28002 - [Linking containers](../userguide/networking/default_network/dockerlinks.md) - [Cross-host linking containers](../admin/ambassador_pattern_linking.md) - [Creating an Automated Build](https://docs.docker.com/docker-hub/builds/) docker-1.10.3/docs/examples/mongodb/000077500000000000000000000000001267010174400172405ustar00rootroot00000000000000docker-1.10.3/docs/examples/mongodb/Dockerfile000066400000000000000000000015511267010174400212340ustar00rootroot00000000000000# Dockerizing MongoDB: Dockerfile for building MongoDB images # Based on ubuntu:latest, installs MongoDB following the instructions from: # http://docs.mongodb.org/manual/tutorial/install-mongodb-on-ubuntu/ FROM ubuntu:latest MAINTAINER Docker # Installation: # Import MongoDB public GPG key AND create a MongoDB list file RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10 RUN echo "deb http://repo.mongodb.org/apt/ubuntu "$(lsb_release -sc)"/mongodb-org/3.0 multiverse" | tee /etc/apt/sources.list.d/mongodb-org-3.0.list # Update apt-get sources AND install MongoDB RUN apt-get update && apt-get install -y mongodb-org # Create the MongoDB data directory RUN mkdir -p /data/db # Expose port #27017 from the container to the host EXPOSE 27017 # Set /usr/bin/mongod as the dockerized entry-point application ENTRYPOINT ["/usr/bin/mongod"] docker-1.10.3/docs/examples/nodejs_web_app.md000066400000000000000000000136341267010174400211230ustar00rootroot00000000000000 # Dockerizing a Node.js web app > **Note**: > - **If you don't like sudo** then see [*Giving non-root > access*](../installation/binaries.md#giving-non-root-access) The goal of this example is to show you how you can build your own Docker images from a parent image using a `Dockerfile` . We will do that by making a simple Node.js hello world web application running on CentOS. You can get the full source code at[https://github.com/enokd/docker-node-hello/](https://github.com/enokd/docker-node-hello/). ## Create Node.js app First, create a directory `src` where all the files would live. Then create a `package.json` file that describes your app and its dependencies: { "name": "docker-centos-hello", "private": true, "version": "0.0.1", "description": "Node.js Hello world app on CentOS using docker", "author": "Daniel Gasienica ", "dependencies": { "express": "3.2.4" } } Then, create an `index.js` file that defines a web app using the [Express.js](http://expressjs.com/) framework: var express = require('express'); // Constants var PORT = 8080; // App var app = express(); app.get('/', function (req, res) { res.send('Hello world\n'); }); app.listen(PORT); console.log('Running on http://localhost:' + PORT); In the next steps, we'll look at how you can run this app inside a CentOS container using Docker. First, you'll need to build a Docker image of your app. ## Creating a Dockerfile Create an empty file called `Dockerfile`: touch Dockerfile Open the `Dockerfile` in your favorite text editor Define the parent image you want to use to build your own image on top of. Here, we'll use [CentOS](https://registry.hub.docker.com/_/centos/) (tag: `centos6`) available on the [Docker Hub](https://hub.docker.com/): FROM centos:centos6 Since we're building a Node.js app, you'll have to install Node.js as well as npm on your CentOS image. Node.js is required to run your app and npm is required to install your app's dependencies defined in `package.json`. To install the right package for CentOS, we'll use the instructions from the [Node.js wiki]( https://github.com/joyent/node/wiki/Installing-Node.js- via-package-manager#rhelcentosscientific-linux-6): # Enable Extra Packages for Enterprise Linux (EPEL) for CentOS RUN yum install -y epel-release # Install Node.js and npm RUN yum install -y nodejs npm Install your app dependencies using the `npm` binary: # Install app dependencies COPY package.json /src/package.json RUN cd /src; npm install To bundle your app's source code inside the Docker image, use the `COPY` instruction: # Bundle app source COPY . /src Your app binds to port `8080` so you'll use the `EXPOSE` instruction to have it mapped by the `docker` daemon: EXPOSE 8080 Last but not least, define the command to run your app using `CMD` which defines your runtime, i.e. `node`, and the path to our app, i.e. `src/index.js` (see the step where we added the source to the container): CMD ["node", "/src/index.js"] Your `Dockerfile` should now look like this: FROM centos:centos6 # Enable Extra Packages for Enterprise Linux (EPEL) for CentOS RUN yum install -y epel-release # Install Node.js and npm RUN yum install -y nodejs npm # Install app dependencies COPY package.json /src/package.json RUN cd /src; npm install # Bundle app source COPY . /src EXPOSE 8080 CMD ["node", "/src/index.js"] ## Building your image Go to the directory that has your `Dockerfile` and run the following command to build a Docker image. The `-t` flag lets you tag your image so it's easier to find later using the `docker images` command: $ docker build -t /centos-node-hello . Your image will now be listed by Docker: $ docker images # Example REPOSITORY TAG ID CREATED centos centos6 539c0211cd76 8 weeks ago /centos-node-hello latest d64d3505b0d2 2 hours ago ## Run the image Running your image with `-d` runs the container in detached mode, leaving the container running in the background. The `-p` flag redirects a public port to a private port in the container. Run the image you previously built: $ docker run -p 49160:8080 -d /centos-node-hello Print the output of your app: # Get container ID $ docker ps # Print app output $ docker logs # Example Running on http://localhost:8080 ## Test To test your app, get the port of your app that Docker mapped: $ docker ps # Example ID IMAGE COMMAND ... PORTS ecce33b30ebf /centos-node-hello:latest node /src/index.js 49160->8080 In the example above, Docker mapped the `8080` port of the container to `49160`. Now you can call your app using `curl` (install if needed via: `sudo apt-get install curl`): $ curl -i localhost:49160 HTTP/1.1 200 OK X-Powered-By: Express Content-Type: text/html; charset=utf-8 Content-Length: 12 Date: Sun, 02 Jun 2013 03:53:22 GMT Connection: keep-alive Hello world If you use Docker Machine on OS X, the port is actually mapped to the Docker host VM, and you should use the following command: $ curl $(docker-machine ip VM_NAME):49160 We hope this tutorial helped you get up and running with Node.js and CentOS on Docker. You can get the full source code at [https://github.com/enokd/docker-node-hello/](https://github.com/enokd/docker-node-hello/). docker-1.10.3/docs/examples/postgresql_service.Dockerfile000066400000000000000000000045451267010174400235370ustar00rootroot00000000000000# # example Dockerfile for https://docs.docker.com/examples/postgresql_service/ # FROM ubuntu MAINTAINER SvenDowideit@docker.com # Add the PostgreSQL PGP key to verify their Debian packages. # It should be the same key as https://www.postgresql.org/media/keys/ACCC4CF8.asc RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8 # Add PostgreSQL's repository. It contains the most recent stable release # of PostgreSQL, ``9.3``. RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > /etc/apt/sources.list.d/pgdg.list # Install ``python-software-properties``, ``software-properties-common`` and PostgreSQL 9.3 # There are some warnings (in red) that show up during the build. You can hide # them by prefixing each apt-get statement with DEBIAN_FRONTEND=noninteractive RUN apt-get update && apt-get install -y python-software-properties software-properties-common postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3 # Note: The official Debian and Ubuntu images automatically ``apt-get clean`` # after each ``apt-get`` # Run the rest of the commands as the ``postgres`` user created by the ``postgres-9.3`` package when it was ``apt-get installed`` USER postgres # Create a PostgreSQL role named ``docker`` with ``docker`` as the password and # then create a database `docker` owned by the ``docker`` role. # Note: here we use ``&&\`` to run commands one after the other - the ``\`` # allows the RUN command to span multiple lines. RUN /etc/init.d/postgresql start &&\ psql --command "CREATE USER docker WITH SUPERUSER PASSWORD 'docker';" &&\ createdb -O docker docker # Adjust PostgreSQL configuration so that remote connections to the # database are possible. RUN echo "host all all 0.0.0.0/0 md5" >> /etc/postgresql/9.3/main/pg_hba.conf # And add ``listen_addresses`` to ``/etc/postgresql/9.3/main/postgresql.conf`` RUN echo "listen_addresses='*'" >> /etc/postgresql/9.3/main/postgresql.conf # Expose the PostgreSQL port EXPOSE 5432 # Add VOLUMEs to allow backup of config, logs and databases VOLUME ["/etc/postgresql", "/var/log/postgresql", "/var/lib/postgresql"] # Set the default command to run when starting the container CMD ["/usr/lib/postgresql/9.3/bin/postgres", "-D", "/var/lib/postgresql/9.3/main", "-c", "config_file=/etc/postgresql/9.3/main/postgresql.conf"] docker-1.10.3/docs/examples/postgresql_service.md000066400000000000000000000136271267010174400220710ustar00rootroot00000000000000 # Dockerizing PostgreSQL > **Note**: > - **If you don't like sudo** then see [*Giving non-root > access*](../installation/binaries.md#giving-non-root-access) ## Installing PostgreSQL on Docker Assuming there is no Docker image that suits your needs on the [Docker Hub](http://hub.docker.com), you can create one yourself. Start by creating a new `Dockerfile`: > **Note**: > This PostgreSQL setup is for development-only purposes. Refer to the > PostgreSQL documentation to fine-tune these settings so that it is > suitably secure. # # example Dockerfile for https://docs.docker.com/examples/postgresql_service/ # FROM ubuntu MAINTAINER SvenDowideit@docker.com # Add the PostgreSQL PGP key to verify their Debian packages. # It should be the same key as https://www.postgresql.org/media/keys/ACCC4CF8.asc RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8 # Add PostgreSQL's repository. It contains the most recent stable release # of PostgreSQL, ``9.3``. RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > /etc/apt/sources.list.d/pgdg.list # Install ``python-software-properties``, ``software-properties-common`` and PostgreSQL 9.3 # There are some warnings (in red) that show up during the build. You can hide # them by prefixing each apt-get statement with DEBIAN_FRONTEND=noninteractive RUN apt-get update && apt-get install -y python-software-properties software-properties-common postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3 # Note: The official Debian and Ubuntu images automatically ``apt-get clean`` # after each ``apt-get`` # Run the rest of the commands as the ``postgres`` user created by the ``postgres-9.3`` package when it was ``apt-get installed`` USER postgres # Create a PostgreSQL role named ``docker`` with ``docker`` as the password and # then create a database `docker` owned by the ``docker`` role. # Note: here we use ``&&\`` to run commands one after the other - the ``\`` # allows the RUN command to span multiple lines. RUN /etc/init.d/postgresql start &&\ psql --command "CREATE USER docker WITH SUPERUSER PASSWORD 'docker';" &&\ createdb -O docker docker # Adjust PostgreSQL configuration so that remote connections to the # database are possible. RUN echo "host all all 0.0.0.0/0 md5" >> /etc/postgresql/9.3/main/pg_hba.conf # And add ``listen_addresses`` to ``/etc/postgresql/9.3/main/postgresql.conf`` RUN echo "listen_addresses='*'" >> /etc/postgresql/9.3/main/postgresql.conf # Expose the PostgreSQL port EXPOSE 5432 # Add VOLUMEs to allow backup of config, logs and databases VOLUME ["/etc/postgresql", "/var/log/postgresql", "/var/lib/postgresql"] # Set the default command to run when starting the container CMD ["/usr/lib/postgresql/9.3/bin/postgres", "-D", "/var/lib/postgresql/9.3/main", "-c", "config_file=/etc/postgresql/9.3/main/postgresql.conf"] Build an image from the Dockerfile assign it a name. $ docker build -t eg_postgresql . And run the PostgreSQL server container (in the foreground): $ docker run --rm -P --name pg_test eg_postgresql There are 2 ways to connect to the PostgreSQL server. We can use [*Link Containers*](../userguide/networking/default_network/dockerlinks.md), or we can access it from our host (or the network). > **Note**: > The `--rm` removes the container and its image when > the container exits successfully. ### Using container linking Containers can be linked to another container's ports directly using `-link remote_name:local_alias` in the client's `docker run`. This will set a number of environment variables that can then be used to connect: $ docker run --rm -t -i --link pg_test:pg eg_postgresql bash postgres@7ef98b1b7243:/$ psql -h $PG_PORT_5432_TCP_ADDR -p $PG_PORT_5432_TCP_PORT -d docker -U docker --password ### Connecting from your host system Assuming you have the postgresql-client installed, you can use the host-mapped port to test as well. You need to use `docker ps` to find out what local host port the container is mapped to first: $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 5e24362f27f6 eg_postgresql:latest /usr/lib/postgresql/ About an hour ago Up About an hour 0.0.0.0:49153->5432/tcp pg_test $ psql -h localhost -p 49153 -d docker -U docker --password ### Testing the database Once you have authenticated and have a `docker =#` prompt, you can create a table and populate it. psql (9.3.1) Type "help" for help. $ docker=# CREATE TABLE cities ( docker(# name varchar(80), docker(# location point docker(# ); CREATE TABLE $ docker=# INSERT INTO cities VALUES ('San Francisco', '(-194.0, 53.0)'); INSERT 0 1 $ docker=# select * from cities; name | location ---------------+----------- San Francisco | (-194,53) (1 row) ### Using the container volumes You can use the defined volumes to inspect the PostgreSQL log files and to backup your configuration and data: $ docker run --rm --volumes-from pg_test -t -i busybox sh / # ls bin etc lib linuxrc mnt proc run sys usr dev home lib64 media opt root sbin tmp var / # ls /etc/postgresql/9.3/main/ environment pg_hba.conf postgresql.conf pg_ctl.conf pg_ident.conf start.conf /tmp # ls /var/log ldconfig postgresql docker-1.10.3/docs/examples/running_redis_service.md000066400000000000000000000054151267010174400225300ustar00rootroot00000000000000 # Dockerizing a Redis service Very simple, no frills, Redis service attached to a web application using a link. ## Create a Docker container for Redis Firstly, we create a `Dockerfile` for our new Redis image. FROM ubuntu:14.04 RUN apt-get update && apt-get install -y redis-server EXPOSE 6379 ENTRYPOINT ["/usr/bin/redis-server"] Next we build an image from our `Dockerfile`. Replace `` with your own user name. $ docker build -t /redis . ## Run the service Use the image we've just created and name your container `redis`. Running the service with `-d` runs the container in detached mode, leaving the container running in the background. Importantly, we're not exposing any ports on our container. Instead we're going to use a container link to provide access to our Redis database. $ docker run --name redis -d /redis ## Create your web application container Next we can create a container for our application. We're going to use the `-link` flag to create a link to the `redis` container we've just created with an alias of `db`. This will create a secure tunnel to the `redis` container and expose the Redis instance running inside that container to only this container. $ docker run --link redis:db -i -t ubuntu:14.04 /bin/bash Once inside our freshly created container we need to install Redis to get the `redis-cli` binary to test our connection. $ sudo apt-get update $ sudo apt-get install redis-server $ sudo service redis-server stop As we've used the `--link redis:db` option, Docker has created some environment variables in our web application container. $ env | grep DB_ # Should return something similar to this with your values DB_NAME=/violet_wolf/db DB_PORT_6379_TCP_PORT=6379 DB_PORT=tcp://172.17.0.33:6379 DB_PORT_6379_TCP=tcp://172.17.0.33:6379 DB_PORT_6379_TCP_ADDR=172.17.0.33 DB_PORT_6379_TCP_PROTO=tcp We can see that we've got a small list of environment variables prefixed with `DB`. The `DB` comes from the link alias specified when we launched the container. Let's use the `DB_PORT_6379_TCP_ADDR` variable to connect to our Redis container. $ redis-cli -h $DB_PORT_6379_TCP_ADDR $ redis 172.17.0.33:6379> $ redis 172.17.0.33:6379> set docker awesome OK $ redis 172.17.0.33:6379> get docker "awesome" $ redis 172.17.0.33:6379> exit We could easily use this or other environment variables in our web application to make a connection to our `redis` container. docker-1.10.3/docs/examples/running_riak_service.Dockerfile000066400000000000000000000017531267010174400240200ustar00rootroot00000000000000# Riak # # VERSION 0.1.1 # Use the Ubuntu base image provided by dotCloud FROM ubuntu:trusty MAINTAINER Hector Castro hector@basho.com # Install Riak repository before we do apt-get update, so that update happens # in a single step RUN apt-get install -q -y curl && \ curl -fsSL https://packagecloud.io/install/repositories/basho/riak/script.deb | sudo bash # Install and setup project dependencies RUN apt-get update && \ apt-get install -y supervisor riak=2.0.5-1 RUN mkdir -p /var/log/supervisor RUN locale-gen en_US en_US.UTF-8 COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf # Configure Riak to accept connections from any host RUN sed -i "s|listener.http.internal = 127.0.0.1:8098|listener.http.internal = 0.0.0.0:8098|" /etc/riak/riak.conf RUN sed -i "s|listener.protobuf.internal = 127.0.0.1:8087|listener.protobuf.internal = 0.0.0.0:8087|" /etc/riak/riak.conf # Expose Riak Protocol Buffers and HTTP interfaces EXPOSE 8087 8098 CMD ["/usr/bin/supervisord"] docker-1.10.3/docs/examples/running_riak_service.md000066400000000000000000000063071267010174400223510ustar00rootroot00000000000000 # Dockerizing a Riak service The goal of this example is to show you how to build a Docker image with Riak pre-installed. ## Creating a Dockerfile Create an empty file called `Dockerfile`: $ touch Dockerfile Next, define the parent image you want to use to build your image on top of. We'll use [Ubuntu](https://registry.hub.docker.com/_/ubuntu/) (tag: `trusty`), which is available on [Docker Hub](https://hub.docker.com): # Riak # # VERSION 0.1.1 # Use the Ubuntu base image provided by dotCloud FROM ubuntu:trusty MAINTAINER Hector Castro hector@basho.com After that, we install the curl which is used to download the repository setup script and we download the setup script and run it. # Install Riak repository before we do apt-get update, so that update happens # in a single step RUN apt-get install -q -y curl && \ curl -fsSL https://packagecloud.io/install/repositories/basho/riak/script.deb | sudo bash Then we install and setup a few dependencies: - `supervisor` is used manage the Riak processes - `riak=2.0.5-1` is the Riak package coded to version 2.0.5 # Install and setup project dependencies RUN apt-get update && \ apt-get install -y supervisor riak=2.0.5-1 RUN mkdir -p /var/log/supervisor RUN locale-gen en_US en_US.UTF-8 COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf After that, we modify Riak's configuration: # Configure Riak to accept connections from any host RUN sed -i "s|listener.http.internal = 127.0.0.1:8098|listener.http.internal = 0.0.0.0:8098|" /etc/riak/riak.conf RUN sed -i "s|listener.protobuf.internal = 127.0.0.1:8087|listener.protobuf.internal = 0.0.0.0:8087|" /etc/riak/riak.conf Then, we expose the Riak Protocol Buffers and HTTP interfaces: # Expose Riak Protocol Buffers and HTTP interfaces EXPOSE 8087 8098 Finally, run `supervisord` so that Riak is started: CMD ["/usr/bin/supervisord"] ## Create a supervisord configuration file Create an empty file called `supervisord.conf`. Make sure it's at the same directory level as your `Dockerfile`: touch supervisord.conf Populate it with the following program definitions: [supervisord] nodaemon=true [program:riak] command=bash -c "/usr/sbin/riak console" numprocs=1 autostart=true autorestart=true user=riak environment=HOME="/var/lib/riak" stdout_logfile=/var/log/supervisor/%(program_name)s.log stderr_logfile=/var/log/supervisor/%(program_name)s.log ## Build the Docker image for Riak Now you should be able to build a Docker image for Riak: $ docker build -t "/riak" . ## Next steps Riak is a distributed database. Many production deployments consist of [at least five nodes]( http://basho.com/why-your-riak-cluster-should-have-at-least-five-nodes/). See the [docker-riak](https://github.com/hectcastro/docker-riak) project details on how to deploy a Riak cluster using Docker and Pipework. docker-1.10.3/docs/examples/running_ssh_service.Dockerfile000066400000000000000000000011311267010174400236550ustar00rootroot00000000000000# sshd # # VERSION 0.0.2 FROM ubuntu:14.04 MAINTAINER Sven Dowideit RUN apt-get update && apt-get install -y openssh-server RUN mkdir /var/run/sshd RUN echo 'root:screencast' | chpasswd RUN sed -i 's/PermitRootLogin without-password/PermitRootLogin yes/' /etc/ssh/sshd_config # SSH login fix. Otherwise user is kicked off after login RUN sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd ENV NOTVISIBLE "in users profile" RUN echo "export VISIBLE=now" >> /etc/profile EXPOSE 22 CMD ["/usr/sbin/sshd", "-D"] docker-1.10.3/docs/examples/running_ssh_service.md000066400000000000000000000050171267010174400222150ustar00rootroot00000000000000 # Dockerizing an SSH daemon service ## Build an `eg_sshd` image The following `Dockerfile` sets up an SSHd service in a container that you can use to connect to and inspect other container's volumes, or to get quick access to a test container. # sshd # # VERSION 0.0.2 FROM ubuntu:14.04 MAINTAINER Sven Dowideit RUN apt-get update && apt-get install -y openssh-server RUN mkdir /var/run/sshd RUN echo 'root:screencast' | chpasswd RUN sed -i 's/PermitRootLogin without-password/PermitRootLogin yes/' /etc/ssh/sshd_config # SSH login fix. Otherwise user is kicked off after login RUN sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd ENV NOTVISIBLE "in users profile" RUN echo "export VISIBLE=now" >> /etc/profile EXPOSE 22 CMD ["/usr/sbin/sshd", "-D"] Build the image using: $ docker build -t eg_sshd . ## Run a `test_sshd` container Then run it. You can then use `docker port` to find out what host port the container's port 22 is mapped to: $ docker run -d -P --name test_sshd eg_sshd $ docker port test_sshd 22 0.0.0.0:49154 And now you can ssh as `root` on the container's IP address (you can find it with `docker inspect`) or on port `49154` of the Docker daemon's host IP address (`ip address` or `ifconfig` can tell you that) or `localhost` if on the Docker daemon host: $ ssh root@192.168.1.2 -p 49154 # The password is ``screencast``. $$ ## Environment variables Using the `sshd` daemon to spawn shells makes it complicated to pass environment variables to the user's shell via the normal Docker mechanisms, as `sshd` scrubs the environment before it starts the shell. If you're setting values in the `Dockerfile` using `ENV`, you'll need to push them to a shell initialization file like the `/etc/profile` example in the `Dockerfile` above. If you need to pass`docker run -e ENV=value` values, you will need to write a short script to do the same before you start `sshd -D` and then replace the `CMD` with that script. ## Clean up Finally, clean up after your test by stopping and removing the container, and then removing the image. $ docker stop test_sshd $ docker rm test_sshd $ docker rmi eg_sshd docker-1.10.3/docs/examples/supervisord.conf000066400000000000000000000004331267010174400210470ustar00rootroot00000000000000[supervisord] nodaemon=true [program:riak] command=bash -c "/usr/sbin/riak console" numprocs=1 autostart=true autorestart=true user=riak environment=HOME="/var/lib/riak" stdout_logfile=/var/log/supervisor/%(program_name)s.log stderr_logfile=/var/log/supervisor/%(program_name)s.log docker-1.10.3/docs/extend/000077500000000000000000000000001267010174400152645ustar00rootroot00000000000000docker-1.10.3/docs/extend/authorization.md000066400000000000000000000260201267010174400205060ustar00rootroot00000000000000 # Create an authorization plugin Docker's out-of-the-box authorization model is all or nothing. Any user with permission to access the Docker daemon can run any Docker client command. The same is true for callers using Docker's remote API to contact the daemon. If you require greater access control, you can create authorization plugins and add them to your Docker daemon configuration. Using an authorization plugin, a Docker administrator can configure granular access policies for managing access to Docker daemon. Anyone with the appropriate skills can develop an authorization plugin. These skills, at their most basic, are knowledge of Docker, understanding of REST, and sound programming knowledge. This document describes the architecture, state, and methods information available to an authorization plugin developer. ## Basic principles Docker's [plugin infrastructure](plugin_api.md) enables extending Docker by loading, removing and communicating with third-party components using a generic API. The access authorization subsystem was built using this mechanism. Using this subsystem, you don't need to rebuild the Docker daemon to add an authorization plugin. You can add a plugin to an installed Docker daemon. You do need to restart the Docker daemon to add a new plugin. An authorization plugin approves or denies requests to the Docker daemon based on both the current authentication context and the command context. The authentication context contains all user details and the authentication method. The command context contains all the relevant request data. Authorization plugins must follow the rules described in [Docker Plugin API](plugin_api.md). Each plugin must reside within directories described under the [Plugin discovery](plugin_api.md#plugin-discovery) section. **Note**: the abbreviations `AuthZ` and `AuthN` mean authorization and authentication respectively. ## Basic architecture You are responsible for registering your plugin as part of the Docker daemon startup. You can install multiple plugins and chain them together. This chain can be ordered. Each request to the daemon passes in order through the chain. Only when all the plugins grant access to the resource, is the access granted. When an HTTP request is made to the Docker daemon through the CLI or via the remote API, the authentication subsystem passes the request to the installed authentication plugin(s). The request contains the user (caller) and command context. The plugin is responsible for deciding whether to allow or deny the request. The sequence diagrams below depict an allow and deny authorization flow: ![Authorization Allow flow](images/authz_allow.png) ![Authorization Deny flow](images/authz_deny.png) Each request sent to the plugin includes the authenticated user, the HTTP headers, and the request/response body. Only the user name and the authentication method used are passed to the plugin. Most importantly, no user credentials or tokens are passed. Finally, not all request/response bodies are sent to the authorization plugin. Only those request/response bodies where the `Content-Type` is either `text/*` or `application/json` are sent. For commands that can potentially hijack the HTTP connection (`HTTP Upgrade`), such as `exec`, the authorization plugin is only called for the initial HTTP requests. Once the plugin approves the command, authorization is not applied to the rest of the flow. Specifically, the streaming data is not passed to the authorization plugins. For commands that return chunked HTTP response, such as `logs` and `events`, only the HTTP request is sent to the authorization plugins. During request/response processing, some authorization flows might need to do additional queries to the Docker daemon. To complete such flows, plugins can call the daemon API similar to a regular user. To enable these additional queries, the plugin must provide the means for an administrator to configure proper authentication and security policies. ## Docker client flows To enable and configure the authorization plugin, the plugin developer must support the Docker client interactions detailed in this section. ### Setting up Docker daemon Enable the authorization plugin with a dedicated command line flag in the `--authorization-plugin=PLUGIN_ID` format. The flag supplies a `PLUGIN_ID` value. This value can be the plugin’s socket or a path to a specification file. ```bash $ docker daemon --authorization-plugin=plugin1 --authorization-plugin=plugin2,... ``` Docker's authorization subsystem supports multiple `--authorization-plugin` parameters. ### Calling authorized command (allow) ```bash $ docker pull centos ... f1b10cd84249: Pull complete ... ``` ### Calling unauthorized command (deny) ```bash $ docker pull centos ... docker: Error response from daemon: authorization denied by plugin PLUGIN_NAME: volumes are not allowed. ``` ### Error from plugins ```bash $ docker pull centos ... docker: Error response from daemon: plugin PLUGIN_NAME failed with error: AuthZPlugin.AuthZReq: Cannot connect to the Docker daemon. Is the docker daemon running on this host?. ``` ## API schema and implementation In addition to Docker's standard plugin registration method, each plugin should implement the following two methods: * `/AuthzPlugin.AuthZReq` This authorize request method is called before the Docker daemon processes the client request. * `/AuthzPlugin.AuthZRes` This authorize response method is called before the response is returned from Docker daemon to the client. #### /AuthzPlugin.AuthZReq **Request**: ```json { "User": "The user identification", "UserAuthNMethod": "The authentication method used", "RequestMethod": "The HTTP method", "RequestUri": "The HTTP request URI", "RequestBody": "Byte array containing the raw HTTP request body", "RequestHeader": "Byte array containing the raw HTTP request header as a map[string][]string ", "RequestStatusCode": "Request status code" } ``` **Response**: ```json { "Allow": "Determined whether the user is allowed or not", "Msg": "The authorization message", "Err": "The error message if things go wrong" } ``` #### /AuthzPlugin.AuthZRes **Request**: ```json { "User": "The user identification", "UserAuthNMethod": "The authentication method used", "RequestMethod": "The HTTP method", "RequestUri": "The HTTP request URI", "RequestBody": "Byte array containing the raw HTTP request body", "RequestHeader": "Byte array containing the raw HTTP request header as a map[string][]string", "RequestStatusCode": "Request status code", "ResponseBody": "Byte array containing the raw HTTP response body", "ResponseHeader": "Byte array containing the raw HTTP response header as a map[string][]string", "ResponseStatusCode":"Response status code" } ``` **Response**: ```json { "Allow": "Determined whether the user is allowed or not", "Msg": "The authorization message", "Err": "The error message if things go wrong", "ModifiedBody": "Byte array containing a modified body of the raw HTTP body (or nil if no changes required)", "ModifiedHeader": "Byte array containing a modified header of the HTTP response (or nil if no changes required)", "ModifiedStatusCode": "int containing the modified version of the status code (or 0 if not change is required)" } ``` The modified response enables the authorization plugin to manipulate the content of the HTTP response. In case of more than one plugin, each subsequent plugin receives a response (optionally) modified by a previous plugin. ### Request authorization Each plugin must support two request authorization messages formats, one from the daemon to the plugin and then from the plugin to the daemon. The tables below detail the content expected in each message. #### Daemon -> Plugin Name | Type | Description -----------------------|-------------------|------------------------------------------------------- User | string | The user identification Authentication method | string | The authentication method used Request method | enum | The HTTP method (GET/DELETE/POST) Request URI | string | The HTTP request URI including API version (e.g., v.1.17/containers/json) Request headers | map[string]string | Request headers as key value pairs (without the authorization header) Request body | []byte | Raw request body #### Plugin -> Daemon Name | Type | Description --------|--------|---------------------------------------------------------------------------------- Allow | bool | Boolean value indicating whether the request is allowed or denied Msg | string | Authorization message (will be returned to the client in case the access is denied) Err | string | Error message (will be returned to the client in case the plugin encounter an error. The string value supplied may appear in logs, so should not include confidential information) ### Response authorization The plugin must support two authorization messages formats, one from the daemon to the plugin and then from the plugin to the daemon. The tables below detail the content expected in each message. #### Daemon -> Plugin Name | Type | Description ----------------------- |------------------ |---------------------------------------------------- User | string | The user identification Authentication method | string | The authentication method used Request method | string | The HTTP method (GET/DELETE/POST) Request URI | string | The HTTP request URI including API version (e.g., v.1.17/containers/json) Request headers | map[string]string | Request headers as key value pairs (without the authorization header) Request body | []byte | Raw request body Response status code | int | Status code from the docker daemon Response headers | map[string]string | Response headers as key value pairs Response body | []byte | Raw docker daemon response body #### Plugin -> Daemon Name | Type | Description --------|--------|---------------------------------------------------------------------------------- Allow | bool | Boolean value indicating whether the response is allowed or denied Msg | string | Authorization message (will be returned to the client in case the access is denied) Err | string | Error message (will be returned to the client in case the plugin encounter an error. The string value supplied may appear in logs, so should not include confidential information) docker-1.10.3/docs/extend/images/000077500000000000000000000000001267010174400165315ustar00rootroot00000000000000docker-1.10.3/docs/extend/images/authz_additional_info.png000066400000000000000000001315341267010174400236040ustar00rootroot00000000000000PNG  IHDR$w IDATx Mǟ{8p%c_[[Ÿ޽;}}&F%$ޘ^~]>,˗/dٲeqwСCҼysiٲ8q" g`į]&M6/R\>xH(P m*E;V^*Yd-RݖFE@Ivk׮N~嗥L2o~1Ν)N"tYaHM 6I]΄NQ~Xj׮mNS"E2#`4hCDg޼ynK{878q:)Ȑ!㮻r/qkյB 92ex__~*YIwn89sk{ym}cĉի;PftM6;wy6k̰ATq^x> >3ft;w;+qʕs1;'Otӑ?~G,Y?cǎu'l޼-H>#OXj/Y[F7;V3HOk_ޚg[8]%HΟ?n:xݼySeqQ%@?y[xqeQJT7V<DZP?qAbŖ{.9r8~'eX_+ tخ^ZQL2N7_GlڴI=XBJ$cmVc(OЫWT|WO֦_VQOQWoC*D_C@q4iҨWdIGT:y .j~aTĴ:n _l -LU0v}q}uu5oWZegl$rٱz񷏡+/ aI߾}%?t;q}g1*޺uku>))1l0ǖ-[T|Dm׮<ʖ-裏 .tTP8 iӪ2;vf3fpC;.]z QQp3,vZ5O n۶1n8#OkժXf*TRys{XM-2(||G "_mይն m S]_}'OV#mlf}FPtrf<; ˩ mի ԩSƷ:8Aq p'޽{25[8`3aʞbǏ7'K6u:ܹsG}D]1r 8gHG/jji*URyc iἾV#'6,_}Lt@(؋$rCޞmrzoF.S1VS^80ߌͼoQij _`tՎBu-|Vj :co}tĈ:!"=e'}̪}nw ۡmJ*u6ly*o~AܹQFnݺ%?\|Y< e˖UStiq"+*w}*e֭\rƍn&xLr=ǎgϪ}qPbEԩT^]/^, G .},Η/QVXu]q y>ʥnݺN}%C 3%`IkwSYSoRJe{i6ȑÈ5Nk6ҨP;l9*.ҥ3SX`%o ƾ@ޑ裞|V̛7O/֭Z QyfiѢa^x94akJwC([p;UP73SA3tP?iFg|GFgFuXA=}m~3?˳> >X&t=k@"׬w/c-֐GѢESee>wܹT}Gmcj|A,Asq'|͵P[9ɜ9٥lSweVvWVʰz/7x+p V >\ʐ!CDBS/:)}ꫯJ~>PS 8zk׮]b~`bϞ=:'ј*ґ؂&N(x-ŋ~jڤ~*K< rC?aBTVx.'RRRoR'x.?/&LG}Tl@w… oco>yu^VyBi+Lf҇8ǎn~d(1G(l?g;uNcG6S]܃=a>c>Ov{:od$0^2^„W^HHbŔei*<̙3eˌ3b'T[cɢEdbcŊGB[+=} guZ75V5j_5L3/_\_Vi#OUn"zJg :ZKYXcOnݩ]MƾCOfL{c ¯^ϛXŁU+:\zZvKּرc,2},K Ѱo~1[*_s9X`+V;s+^+kmuyx >8 x?xWSxnݺx2N]&Vhps{XM <:SNFݴzt:=}Stٮ[O؈<Ǽ/+t z^dXݣ^Njr[0*f[[bkBn`GtBw1+`}jb_[mo3s_.Wv< <|MC4+0)|.^]!Vz`*=tH v0'VG&MV ɴaÆ*.z~GY i2rHOhD"ECJalo1oF Ĭu`.]^g&2(ï,+F0vGAZRi8bz spTSD=&s`5kz}OL5-X@j. +6 r6|UvW:U ==SV3VX(dvkoOv:+R\rÇU'R?_O bɒ%i~$5j̙2 @$ Dmo$+ɲCwQ#8j߾z8.\h8֩SHHH 8Bq+pժUҪU+Τ s}aY,f$@$@BV>$=Z ֯_/xg |Z"3`>7=H @HokVHHlK#$mF$@$@C$qښ5%   m0  H$֬) ؖm @ IfMIHH(Hl44HHI5kJ$@$@%@Abۦa$@$@$8(HYS  -799YիgۊźaF*ϟ/+WIiH I&Gf&m2D,Hkiٲe`6fX$~|gRn]s#bBHm0f,H`ɠABn3 Հ lA!.\ blkd,uB a! 1PfG$@$@$`ufLA$@$@$b$!HHH Ό)HHHBL$@ u$֙1 @ P(#  N:3   1 ev$@$@$@ PXg$@$@$@!&@Ab̎HHH: ̘HHH (HB ّ X'@AbS I2;   l)H{իTXQd"9s攺u̙3Ν;NlҤ$%%ɵkvک'N8 7|#6m GƎm?iҤYJٲegE!'дiS{챐ѪU+UΙ3g|SN@?ێ1BӧO/y Ȃ u:] A$!4Y@0m9k^{MF)C }vٲeXB-[&EkJf>PԖ+)SF+Ҡ\|Y~W%R?#Yh?O\PBa}&QˠAʕ+5f"E`ܹ;wn%H0eQHojDȑï,7n,mڴ7o!(!G=o: dFj܈чtعstY [ ,S~:Z#?h֭[B08FP#<"ۮԩSo%JPP';v)})"~k.iԨmplO}ig`wުG/g]N<)O<N:U*W39ui/]tQlٲ ʮڵkhzf_7$m~/[#E"EE3A! / ѣHUh޼رCE޽tMぇaTMmmALaTR $>>)S&8qC0>R~}9xzD_8|[ TCIeRT)۷TPAM&nIs|G#;vT[<0hQrJ3-|W0o=i%LWbVQͅݻwEus)]o|jT*q]rf_wG%9 "]ȟ??<7nܰ飏>]zUk۶:>~:ݻ:~7]b:ߩS'|˖-չz8O?T۷q~޼yӍs 5PO>#Gt <؟~3f<~x{\Rkܸ#))w9[xgnP~X۰aCǝ;w2yu~֭ƹ͛sq;SLQz!㼕>i$ b')>[n9 *Ȟ=CSn޼(P#C ӧO;zwՖh?<3t?K.I&pWkMDvw52 b''spf:1h 6T]w%7|k7%K5˜N0`OzUŋ75j8L G0*b{:y p=] `X|#lgU#=7nTSkfV` #`Uj߾+z$` Ft+눙Qe,ҥKS4iu >&L &`5$`S6:N7K*Ew'cΉxH(^Q1`~D:,YR節I`O.]rj-[sO?$WFU"+'L x`}ctG/sZp`ޅpJ*9e 6mT_Æ.LP_|'Ƙ 6`~":O,R k]=$p ٳg`o/MCٛO܏WK/w!|ݖO BF'4³Q|r98گJI9-j]}U?[0چ__~wH7~xKy/vsOQCwݕǾJb ֭[{G c2l0y׽Vv3|qy1v ƌv ?anRZ5:8>\Wh{8ƈ!Cd `#6c׀/KpRT 7qi\>-ލׇ~X9݇"Ooy[G-]/\K !t Ap uêŋ˻ᆱ6- \Zǵ[Ho:$m۶TUӣ. mZ1e7b KӰ[|0g4Cu47˜9s~<=4ۀ9.C"71c( vFD_2,)|ƛ: IDATW) 7}\އo0{loW_s<qOvbw-S4F?(~o a…Ft3wS3꧞zJa4 CBo1%."Va| śZ#o*2 ~pHâx^u|&7lؠn6? ?,f> ~5|o=l[!@j*|[+nfaepO0jϫa7~37`wLNWv? FxVU 0@H@;U0{t5Fb jTZjmO׭RG|A/HOb. x"`v|Kx@D`Z{Xɡ]% ,~/"xAhB u9xWya$&HHHR I'HHH"M$Y @*$ @ PD8#  HE$   4 Hgy$@$@$@PB$@$@$@&@Ai,HHH  THxHHH (H"M "@A O؍KԩSv3 Ia*W\tT+ P[j%IIIn?%K_בILgΜYeթʎ]vłHH @$ W3gÇէO>ҠA?뺶kF:ݻȑCN5jHr-˗3f׭['UTA9:`P<3 … KaÆcM6riԩc:_nIHI۳sjJ*~Z0i$UQٳ"qjtf6l9znZ*Νݻ˒%KetŋA,'NիUe˖)KϞ= 7‘-$ AP_h?N uh߾`Yf FY<Lad֭!C2dO^V*}iӦɊ+B 0:RNOYWZ1OwI5 g3f@D#tQ(JAe $:t /RTXѩجY:`ĉjS(ߛ/^\*D 8i;ݼyӸR '^ؚ,ݺu3jLüe%J8Rf̞HL(HL0!j &|JƏGΜ9F3nݺ%QذaJ)9s(Ν;5oyn޼Y9ݻWdɢ0}8Gp(iA$@#@A?/ʕ+`RF _ >\… )Rj֬RBDL2E0]U>k֬Qcp[ps,ʀA9pf>B=2n8u?%2HHznoԨQr9rdª,/wM:=Q8j@2] F8 GH%qM#%qݼ Pب1h= E V1 @,7BOZjGx QqF~xg{s$ $ I.z XRp/IgHH%0{) $܊]|[F$@$I#Xi6mĪQի2o<ٳ8{D67o^Ydd͚54Ȝ;$@$@a'@Avđ/KW˔)#,[L;fo>5"q5\&MdرҦM9}tIfΜi\6m*UJe&ݻw7okwFIܹ|2c u#+rѣGK͚5hѢF:2 $ڷo/Jdɞ=.]ZfϞmرtR_' Æ Sב… rپ}ܹS AOe ͹siӦҡC`ׯJ<@@ ގ;$%%E/^5C);x e#-<٩- @dPDsJzϞ=j@1b`TRJj~3f:dI>TZU+MeÏ%_|RV-ٻwvg p<uV5RRH塇Rbb@rqyA+VT}' Yf(;O4I.+VL lbԛ;$@$`'d m FGZD #3L`駟6Ιw0}-@$1cFu9gyw[n>pڵk-/ G }Pا-#p %j̙3˔)SԈ b0U5:C}{7n>v ʕ+`RF _ >m|IOe`J8¶'|RT^]z)UV}j&X)O O/L}~uXo۶M|]fϞm]vrq%fgϞ[i( $: Dț7+VL}e&3f4 *$kxʕ#Fȵkd:ݷo˗Oz-9r䐩Sx5ܹsKeƌFuI*U$O<ҬY3С;VFwP5.O! … KaÆcM6riԩc:_O[o6x+Ǜ}x*I P$z`c@ΝՔ UgaaҤI< wٳg DDƍ |704l05sQiݺ :TΝ;'ݻw%Kŋ= < XN8!իW˖-S"?={:i[9C9f>` 6m UV̇lA@#H7ۿP FX ʝ5k`SFJn*2d!CHjժҷo_6mXB*T :ux8j*y¾'OQ={Ș1c![9vh> UhEIh2;Hg#h oE+:E̚5q(&N>0 ]^رc'\n߾n0}RhQ}Zm˔)tl>4 <ի'sU8ޒ.] x_o5}-p !u$ނBLe˖+ GJV@@|穦h0SreI?7oTCK;zN5%%E\InO_frweU-C)Jxv')#"ТE g„ q+3gΨь[neÆ vA"3grl?ܹS]͛y޽%K5m#>/B~gǛ fk8֢7<#I$Y 9HvʕR`AQԯ__Vc…~)5kTD "L".*5k֨1-O89e@Р8bGҿ7n:;[.x[9sWN8Q*4$$t,oԨQr9rdKeQH})U9XΫ_S_套^J}1θȐ)a_v !?]m}9RM Iff%I@/yTR<-J^%$ZO{/:'0x/>bRkV[իlm;M$zΓ[70' ЎK`,'@ARxf.ǼmV&O-jN&eʔq:6`:[j.#xlU։HD-o>%J 68ETFS F 0# w}zWTi D۷3-OII+W(qҭ[75ruek 0ek[!8<矧4Nʕ%M|y󦚺B![p Tت2 M$@#$@$ m۶{*V<1v., IDAT;|6Ο?/'O[n)WdɒEZhF_&L H?;Ə/7oVNqD{ʄ|`/[/_n-4La͙3G9Ο?_yӗ3fT>D $Ҭ' DFB04?.=`.\|I|I4 V_^V\) 5j~hnҹsgTc(]Uuoebfa+Vgf͚*K)SCYf1-O_/ƍez\OrɮR`… 2ry,`_QJ ix$]0ƒepҍVIz%  0P(C$@$@$-\e-,HH f !8BZ̍HHH $@c    -OF$@$@$ 1 @h P's#  IИHHH (HB˓ @$hLB$@$@$Z|1Zhy27 i1`qd<I5&B$@Ci%)` 2= @(HF HHH%@A,A'  I @(H%$@$@$@A !3   IHHH h$A#d$@$@$@ ӓ M$h̀HHH X$dz   P K$XLO$.]SN%ofJ$`?$kZgr%K.MU&MȀVZ ~ݧdɒ:2ɔ)S̙3Krr^:Uٱpvڲk.~ҹsgq"uȑ#}9n8AȞ=9scׯ˃>(ΝH ^PKK1M`̙raӧ4h8޶m+?{l#$ڵǏ(1={W[o޼)zzkH]̛7,YDfH}$_|g̘Qyy<$ҒGLìXb-[6H*TH|]ו7+W1B]&wQ~[r!SNU5j$sˌ3֭*UH>˖-1waڴi*_} wQh˖-Jp@lܹS=nyմ lzeƍ*~%K,eQ-\pAMl߾]{WMabŊJ,Bl:tH1rg3<#iӦ-[*q8 $8B-zEۿP;k,pT0muVɐ! 2DҧOF+xXB*TH:ut<ի'sU8NҥS^`'lWsTB4p#(A1cB޶ft|='|H Psn#ȕ+W"VQlYw![xV|7nݺ |he}]&lJ(!6lpJ)+xe{) BF9lMS6n "FeբE g„ ʿp'֭[|r)9s(V}o[7oV#JƀF0}8G0b4FJm۪| [y%\fk@ԣ6t$`w$vo!gk"FgcʕjJ5~2|p  .T+bo#SLQ+UX+O8b ʀA9pf>B=wv 8Z  YvrzS׼1q? $z7k9j(ȑ#㸖Z$71VX_ 2l Y|9ܸqC.5WH 8BM,;f x#1[3RG<,"F(HmY0j k3b.`1cUCxDžW{ ~!'`yU>#xy嗣^EE݆pUGV~۳gOF$jYp3&Vx!IXmD[$J}Y')oC ^Y % lVV*k%II"6R%!HIwV?8%cj (H4 nI @%c2 0 1. J$PrLG$@!@Až@!"EIc6$@$P(HYp(III w1̟HI5)+m% $@$@PX$@$@$@a @A̒HHH kHHH (H5Y^tIN:mU&9ت9h MoT/ ~oT`ڵe׮]*́T/_vGtCkN|O4iDƎ5psرcb֮'x«mH$@$z$gpW^yIϞ=wߵٳg-_d̞=RHE{G>3g@u1RUe9$@$@%@Abh")Skl29vaݾ}7k׮HC6mҩS'9sq}ڴiRT)ɖ-t]n޼i\{g矗|,]Tj֬)y\rI.]`DǣGV׋-Tڵkrʒ5kV52iF ӧbŊ3gH~ߖ֭[kS% t*A`ԩҧO뮻G_F3%KBdҨQ#%^N<)u֕ ʓO>@ Dŗ_~Fсk֬zH,/zɅ ۷|^AL3fPb?nݺIV<I0ѣҼysyꩧLmfʔI ~rӓ @XPLӰg5ʁ޵kW2dHh>S6|TZU|MqrPH知lٲrE%> ( Ǐ7bfc˗Ojժ%{U0=ҹsghFםHsh H5d„ =ƴNܹ^Ib' +T`GSi @ IcT%JacO;|j?o޼ƹ3ʭ[c:w\yG?8߂)4iF@`ҦM+wQS#i$O!n߾-p`E0rzU& '1V~}Yb<Ɛ $6(RՏ>H>`ĉUai1;bd"аaC`S}Qټy:tHVZ<1Usi/A]dΤ: Bʕ+qU?VH Pؠ̎}~I6m$ VΩX !xܹӰ# "CjP>Rׯ_1F]0*m۶ɍ7 p3^!%={+F$u$Qoo|[}Y  PPd$_#|< X'Xg$H-dݺun$ g!̆WHoZxZwFH$@ J$Aa [HH.(H#& Ģh.~cHB.-A;b@|s]@\ eE ʕ+eƍһwpa$@$`edL+XMXA~}]%D/^?(}'X"eZƌ#\<=R~0/+_l._̣-$@$ $$$rz/BRLHHH/$~ab$_~YvufРAK=  C#3iʪUN:pF# I0 dȐ$;N$r$!G EI4I$ $2EI6-+F$a$EI)kD$y$g㐀YaX% ; #fB@VZ%JYO  dF$ Qh"  H"0F'_ JHH ƋIHH@$ P% 5$x16 @P*$  F/&   0@e$@$@$@PX$@$@$@a @A̒HHH kHHH (HY X#@Abc I1L2IRRɜ9$''իhMGQv?<HHIϚ̞=[>>vR]vrqi!o޼dɚ5k4`$@$@a$@AFvbŊOrdĈr5ٽ{۷O'{9rԩS嫯uJΜ9'N(CUoQ&{Rti5jHr-˗3fKH;h )\ϟ_6l(;vyٳҾ}{?+W.=zԬYS-*ݻw7o|֭['UTQqZh!:u!ChӸ% ) 6L͂4i`*p!;v̚5K *d٧O5RqFXӧO gі_|Qp e֭jdժU!C5*kUV}ʴieF^ԫWOΝ+k׮U#裏T6ޮ":MVܹ(Q>%Kt:A^bsuٶm` Fsz+< I((H([)SiTl># sTFS F 0# J* Va5jQ~JJ9 Dlh3~I ?ѱ$@6#@Ab9m۶U pnŷi;J@4iDɓ'˭[ɒ%/&LP=|JƏ/7of͚|k2?,͓7nw_c d|x񴅏0Jvc   KK5|%qy0pBKO|a#ׯ+WJFR~}>|tY1|Ypnٲej5Ѡ<.ӂ4g#;_F8)NAl2 ]$9:NX;À#G C.8 F{?SAm/aj b >? I ^j|kGHQṘ$→,u͚5_wdh4|}0@$@v!.=#ƍ}+་wl zm<" ( "|zx > 6mRKli& X#)Xk1kk#x%> 5$x16 x$1e $@$@ PXg$#W\Iu͎'??   Y&F+l҆HB.-A;b@,Əci4 -p-F"X#xI ^% $@$`!KKЎ"bŋҡCiڴԪU+X &@Aڅ^4 {CUTrʩJ fI$@M0Q 3Ցd|X}#b}m6u]#={n׮]JkN?pK$@~ #@|ț7+VL}e&3f4 *$kxʕ#Fȵkd:ݷo˗Oz-9r䐩Sx5ܹsKeƌ*AI…%ҰaCٱcvɚ5+R^={dС4_}ԭ[Wr)iĉahRfM)Zt]n޼{*y>T @H I.̄HV:w,]vu ʕ+; ȤITN*Uf}Yɞ=߿_._,ӟ_~f_S"$]td% ">֭S^rE ͛7˥KN:J`4mT7n,olܸQo޼dɒE .ș3gd[^5'O]xQf͚g$NnڴIlE,A HPf$acǎU# Uw:o^0‚|HPY,0RuVɐ! 2DҧO/UV}ʴiT^'OT#&{1cƈ#:^{MBK,XEŊAwM:@X6ԪUK뵼UVyS-F JH ^p$^Z "EiPL<&DtŋWcǎi}jS1sU#.ޒ.4 IDAT]pŨ'NRJjCWiʝ;wԏ٩#4p" : XoAOQ$eZ>*U8pi(-MIIL@,tMM .N:% PISD ٰaNm1=K(y+ϛNEl(JEl‚ "ТE g„ ʱѣJp?^-SQ L a_8^zU Cڶma>3;wwyG[ytWiQHgy&Pe~$@~Ocǎ2|pSSׯ/p*-S,[L)o0XĻvZy98>sꃩONCOvz/(MɳPHrrfy5J]?rHYFsbR[ w!DδԗAҢ}L@S6JH GJ8},F 5 " hQg^%$ZO.:'r ^${g(A?e X"@Xj-j x l m; $-$`w!{ > @@;.]B*F% ~  -F֐d&&@Aa,HAb$Tg$ PD6" 0 Tfq$GIH t(FBǒ9EItt HІ(Hl(4H!_O\! { > pC %%E Y|1Z6 'Hd#^ȬUTNw$HfY$@$`sx~#ұcGqFD{+IDq0  /iJ˖-%$HH"NdѢEH 75 $  {Ȑ!CE #  ( J3P  ?H.$@$@$'Om۶+[XB~ٵkӵPP$!  'P~}hrΝ&=zW_}5P IC$@$@1NUVNABOyHHH b(H" x"@A ϓ DKԩSQ)FIسd I/r|o|ߤI;vxڑ#GT~ϟWj׮mp6 N۷O}ڵ rWp3 Wm)HE @z̛7Oz)nHj7o^Ydd͚UwY#_kx$(H<6)/ o)SF^{5Yl;v)ڵkrJ\tA._l\t }H6mҩS'9s!J֭+9srĉկ8p@r%G5kJѢE{rMUҥK33_\VX!*TPLZjRs$5i. 8\x{) GyDƎh (^}U|i&>G"J;k,1;v$$$HN2g%@ \P'o߾]v%V\!@ V.^.k׮UǛw|u%\6lؠM0AeŅÂa&Of͚ gΜ){^\kРo,YD 9`9}TZUϵO0^RܺuxkNAY Dܝ8p@E9?i/K.-O<ԬYS۾źtҪ޴iJ*Uo߾2eG FrAɟ?ݻW5jDKll:uqx >}Y%(t~3Եf}ׂan5xJ-Z$x<;WbEի=3f<9p>|)>mw9eg]ΆuxNt8)**Jݻ.{mNzwT;0$+eȐA0̣ʇBB Eo!+ʕ+aGa/ 7СC M:U0̌3T}i9)Rļ$ 5 `*..tS?x 8s:q}X]ӵ| ͛]v6 O] ^u>q&)Ȉ"ylߜ0qFŬL2F6aw`x C=`\e,*Q<%  E-[TwQ֭['ڵS'N;w \tsŐ>}z5dN(C0 Xq"#FPis>e\ "O nݺ:K1/3fp6Lo WĂĨ8xFX#G*բE f䅗C5X6OPG`_O$؄Y> Dx0D \Pu=]1 ^\s-1 >pڅ20|1}tuqnݺG}2C6 HHlN'O$@$@@$z6 P`IHH PC/  9 l>  p@$@$@6'@Ab'  p C/ xW  I(N CT0DNL8dcE    wJ$@$@$`"@AbE    wJ$@$@$`"@AbE    wJ$@$@$`"@AbE    wJ$@$@$`"@AbE    wJ$@$@$`"@AbE    wJ$@$@$`"@AbE    wJK^~۶mNm+=v$$$HN2g6lL ] Fbcc`O^0ڵk'xXn߾-v#FHbb:6S|yɔ)4nX>S)P)RSN$`1$0^(F VC4 ޏ&MȨQ\w9 A<f`O>L8ӉAU)A>& !9MaKH u P?% 脩H%JPBoظq^uΘ1mӧ9 p"=$ةlR P1k  {7[4KLA$@ O𓀯i(ra"Pȝ;z(m`$ h1/ H.]d^=$:VH T*/_U&!<+&&(~  dAv"РA;u8J$t$AG "EI,E$ $:#EIt%B$b$!Vo}%CH (HB PD@' $@!%@AR<hQImb[HH Pic %k׮E[H $ @dY$ "%L$@$@ s IH   PNjIHH@$PY$ l)H2dȠ^WGEEI٥e˖o>1w 4mT}|-[6ɒ%lݺU͛7F  &`KA.raY|@ѣGûho߾r!ɕ+c;HHG$gΜR`A)\ԨQCfϞ- {ы͓իK9ԝx׮]E߁ٳG4i+JI&9"(wޑKx_:bV|A7CC aTJu;xx_AX?_ʼ j?,6m"E(o:bk\\*;)yQ'vnwҥOI7D65J A4ZJw]tIDL3S{zAA)C& '$8{RiʴidMy'0| ɓ'Ր>ݻE "!R'l̙3RhQY}cw:c#qjɼrj5)u$V]vRzcq.= +>Oڌ!8pܹsV+x,Y"mڴTc;߱n;wرc~W_I͚5b:$Ɛ-. ,PC7c ?~lذA xEͫL2jAp,.h$x04dN(Gcm~) 5. TNXۡĉq[m°ŋCH0"HVj<ʓשS'~$@v!`[A \@dw}+xL-u wp#Mi ҟԮ];53n8UbLF&)Sca/\ {_~*%Puc7vʁd'NT DW[ntY|*\~  `V f@@|zJU#@U̺… , |РA~+۞bV#h451 @ #`U1xo-w`h$ݽl\XY k֬=z %  P؝!*1#3?TBdΜ9]GvKH |po-Ç ޜ 4`OBٳj*)]zuRBb+% O(Hqݻ i߾:uJg7  TJH r) T̙3Ky_5|%K͛7eϞ=:СC+W.ѣ<2n8I&={v)UL4I7 C/ܹsKÆ eΝjߑ#G$SL+Hz\ro AڼyK֬Y6=Zq 6LW. '|Rn߾{e=Tz6&:S'Nwu&;svW]tnݺ9 o坶r1cƨ *-… %K9|\vMjժ%{KyJDGGܹsׯXB{uu߰a\zU֭Ff$!!A{=Yf:e˖˗}v-*UR[9rXߕ+WTlѢv&e#⩍|[nItb*%$?M$`]GA|GS|CzL"xJ"dӦMKڴibŊҫW/0a*̙3cr>|h1}7".>̜9SҥK+2Tf`NZ5jַh"v2 ^ژ:5G䤎; D  D  HI#ϟ_pa60]vӧOKΝVSP!ѣrI5/>C~M L:U-[&UVbŊɌ3"|!@ݸ-Zԑ X?vc%Z7;u9|% 7yjcRۜwҪU+3.[l,}4RF1%Jx'QLA0NΝw@YvZ .܉jy1ov:UfE` " CEHɭC]HZI柍7*Q=2,@X$,FqFKO8ȑ#!p ?uB7AJݥ]vBs3b%fqNj*ٻwbU< *5}te2N%c#Ğ%I@V(Hª;h X\V\) .}G[}DYxˬYD'|f`FwVJt[Y27R$ ;i = v k#:Ȑ!+%ْϥw;B`v J7|fK6ů]|}l%=|s/! N $@#|XΚLOI @A@+HBDÐS$$(Ap1 XgXhkXdaaU@S @rx e#@A<,9B )xT: +pvI *Wf$!+@x~VǏݻ i߾:u*v |$@$zr) T%K͛g9tʕKz!<7N6o,5kVAѣG驰x۶mJdɒE+&;:&MH٥TR2i$ S[F|Iܹ36vs.W59u9rD2e$ԫWOʕ+'oڏe&Æ իK'۷oc={SͪV5v Ș1cC9*Tk׮Ʌ oI:uޓ5k֨ i˖-%&&F{1i֬x_^v!7+J%$!!Az-K.K͕V/E~ϊ+\>w̶Zj]'ʽ~e6lWJݺu@wغ|zpJ*ɼy$GqlZhᮙf 20` Z&ϥ`f"sU7S 22eRϓ'1sLu l;pZU:8bVZƴiӌ%J8ݘ>}oF֭111СC{wuϑW[[篿|G}Q^=[q֩m۶;㵍~WZ琍u#-%  HvڥFN>-;wvPBj̙3RhQ}X?v8 SʕѣrI />x4:<.:u,[LVzf̘\yYۚ:QB[pYAа6zӥ˭Cb.$@$` ڵVk«2b% 6m*.]?Xܹ\VZ)˨QT'9r N(yc,{Xr"ƍJD!{8wNweYi=$V-J$@"OV^~e53C/}Qx&^W_}U#ǏW0hʕ+_Jtcǎ2h I6۷Oׯ/,^̟?_2g,]t6 $NM 3~0[nݺڏOW;<}c $@%@AbݾaJ֭[ajY7nS .,<;gǎԩ?^̙#Æ ɓ'KVdʕݻwիWeڵjŋciѢ:H PB ر,;wHΝU{ bFyIҥӦM+UTQ^xFZn-WVޒUVIe޽r%YlԮ][gx# t #_`A vI\k׮1sL9sF-T^]bV_^,Y"mڴq:+$@%@AbݾaD@7Ug Rf̘!_~ 0@Ńxȑ#NY>,yUrZl)/Vvի'5Rd$NԸB&@Aba@,F_ #P`AǧW^?ʺu<ծ];9xrmٵk1B1KLq駟JH" k V0#@1;"qM~4iDF˱OA0[C= }QdN RZTL 1 Di [BKb,ahE'LE*Qz;Dᮌ3JŮ(.@$;M >3f $@"@Abfk@ݻjy,HDC6< HOÇhs#1BIkA 0 XUzv<[ú DV"3JE[Â/z߿XJ#K`Сre`-G1$2jxrEnݺjsX? D F6" K$> &@AEI0l  [%@QP,H(Hllz`P#K!7 {?[ fQ"Y ؊ &-JڶmjX6 D$ V6*T JfϞY/ Xe+&  PNjIHH@$PY$ (H$@$@$@A @A(2[l2o޼JoڴWml 5ݧH"k? ɐ!3fڵk˒%KHHH \ Pg&O,ǏW={J [n_}ݻw+AҾ}{9uo  k$!잜9sJ's̒>}zz޼y~m9_ɒ%erMٳg>tʕKz!<7NkҤdϞ]J*%&MR ÐK|$wҰaCٹswɔ)+R^=)W曂c6o,5kVMGVpGÆ իK'۷oCh;;UC$@$`iі>ߺuK=]K.ҭ[7'+q-_Ӷ@@3F TPm.\,YÇڵkRV-ݻ,]T/͛7W"$::ZΝDD~gŊׯ+azԭ[W f͚IBB{fu|˖-%&&F ˗/e[*Ur᱾+W2٢E F  DD F:v^{G^A|GS|CzL"xJ"dӦMJ6mZXK&L:s8p@.Zrx ̙3(]9(J**n6aPF 9x-ZN]&IHKVZw Dϟ_pa6' s:a8)|oBD]P!xQ9yF޽n03uTqy뭷TtUeE@-:A\>}Z b-7o;v̱ yu{OySo   bdƍA5RD 2eau:w@ ]V0<NCgϞXu(S tdպ8!_B[}t+$@$@$`!֮ !5j ,=q#GĆ >)WCBX A7nP+ݻwvکc0]vɈ#ǹV7;ݕm$@$@"`K NR \R^|E8p@<͠ATLɾ}~ŋU R018A˖-_~YmC@l>}C/=Շc<e?K?{ ~3>#̟v)1RT)ٳ7(+$]t ~7S3ouju@w}W kJ՝ZVP!Yx8qlٲI޼yn}&/ـru-fRԩS%{J`O?U^Ԫz _|E/HR c۷G(I(2 iΝ;*wnȑC0& 5!wݸ˗_bŊ)W&\{s9\_~Eq5ƚ1cF]ݪSn'PuO?w; kbC:ڵKt\ ą^zjO#8UYdG}T ! s:#Ķk:{5..NݡL՞B UkԨw\`0!=G ^pdȐAF-ݻwqԯ__=q=܁t1Z|RhQի<2ay衇?攔sҜ)#Rǎ7.HbHnJoD… ù~Z +a-FnvٳGm-Pm¹Yf)/7ڍTֺ+;*6f$3I;wn<[nU裏>q:]vjԩSjGj}N~Wj{Ν۴i=SmXXxޫW/iӦm'Ntl Bʮ3dcIɚ<ÇWG5U | F4i9s|>9ҸqcUnÆ {9|M69lRm裏۰'5rlt@_ Lyh0ls玑7o^#K,M}kK8w]?z u1^cƌQexa S$ 0'=ʕ+@ ᎶZjN[!#SJ*USr"&Hp/YD=bƜ*OϚ5kyI3-s6X:sE{OafۧH3^eL@˼yY{̘1 >[IDATX#$QF b:R0ĐNhWv#!6x~}ḑ#Gz}\.;%n5t]}}׼i]aG]0D4O~< l A @`6ܭ!C7Um­sxm y6wG=%ϻT'{'PWz= GxfU۷~D,`HoቁSjԨ=g?m ~o`I@3tH@q <%)LG\-0*(HR3+! 'aHLN1 0&5)G>m!I-[F$@JoO?jŬ,"0$"   k vz  $эl X֓ @D nd#HHH(HHH"IDt#A$@$@&@Ab$@$@$(H"  6 k'  @AF PXh= D\o,0RaJq7 @ P1k   A &  > 3f $@$Ο?/iҤC6<ҥKO?j,_?廐   2k&r So1  H'HJW"\' $\5ʑm۶RjUdɒ| ZHj~Iy$SL'?ө/R˜9$&&ʵkǍ'E\Q|~J3H{ojȑ#"|=4e)Vu.ĉ]d"痷~[Z62ݻWU>uX!oV5j(;=ȿo>W* 8.]Z?}Hl٤sj2ug̘uvI#uQv>_'ooo܎;&۷9r(?/7oT&.HHO3ڴiw#G#::ڸr׿e:I-լYh׮gcFʕ^y9(Rn:cӦMF%^zw62ed,[_^{Ȓ%q5[7n0ݻǎ3M1HǏWu\׶?ê/=s;w˗7}Qc׮]ի\r&LPm֭~cĉFtvz:^z%_~1njTPxꩧT=7o4 .ltطo駟3f4|AԩS,}m6eׯ_?ldȐț7`CsGe_qbŊ-[Te)5sB$f\& $pٍw懲.ޥJ2/_J(^du d>Wj.p]~QDIڴi?S] H͛_ӧ7p!ꫯx駟~b W8,^P3<㸨$E9lUuv9sƱƬYԅ g;SfAriw5nݺgСF5_|Ι?ñU̟?߱WK.u6;FF~o(o̙cdΜٸt0k a KH Pn]u޽[ҿ{lܸQ `A|&&ՎS*wzܹE믿JΜ9v=zH >|X =`h*UHVg cT6f` CE:լYS +uo߾׶.\Q .?H&)X:?jlDZW^](쯅_e\>عs+V4ڶmc *PFՍ_]߸q@@*b>Nġ*AժUSu!eܹNA؏`۫x"HXi*_ێR>\rj#mEk׮5O&Mi;beԡC?s;m3ǐ̘1CnذAN2ňq؁311QȏKVvփft:uC qelC֏o>q}L6# HڅcԪ $.0X鄋*@kܸ-c#)9rhժ9sT^tP`v ,!9~xuсgLGU\$Ku_fQlYD0cǎu|m'A1YfJ0,X@NFÇ`*UFv  AܹpBU}Y>8 !| aN*]۽ ]-o:KllE}$RHHD7oK Pa_ A(ڸqco[lc]CbמgIHRf pҖ.]*|t!U8&\{v D̾As='SO}#m lKǑ l $&{IENDB`docker-1.10.3/docs/extend/images/authz_allow.png000066400000000000000000001013411267010174400215700ustar00rootroot00000000000000PNG  IHDRe IDATxE?r9JJP D xp≰U I AE#g$ i[w3;3;;癝ꪯ~;oW\.KHHHI #kJ (z^$@$@$`z7.F$@$@z^$@$@$`z7.F$@$@z^$@$@$`z7.F$@$@z^$@$@$`z7.F$@$@z^$@$@$`z7.F$@$@z^$@$@$`z7.F$@$@z_%YdS\vҥ}v|e8pX$vcwH)8DÎPU֗CU.IO9l S[VDn߾- ,0jxAٹs'NHfͤe˖rܹdesGI &Zh%K~[?syh"Y|ybN 9_׮]U9ٳgky8DҎVۆ94UD Gwd >}*0eʔ-[6x̝;WF%iӦ1,SNOU69"N%uo ӧOڵk 6^m_zU˩[nƼc܎g͚UT^3<긷{ɠAH"sQre#el͙3KNʗ/ɤUV<~z)^IFj֬y\rQ?fitoD%uԒ#GS̙3G'Qqfqrk9_W_B )`4c s~ 8k+6G/\[nFul޼8ϑ4imÇKժU%cƌ*U*ɗ/iF;w?\srR Og8S (7\{ěCb+vI̵o-1CI`.Q3fve7h ^vjvO>]K7;ԉ.+j)St)SFiu>2exhj駟 { ,h8ЈݻO,̙3F ܮzovmW52u: ;u8}GPBSN餮`+vI̵ }tz~;8*tҹ_.]GO?UZ5#ޜ^!GZtl6mڤ&FΝs-XcG N5kTBf͚ھ}kĈ-ov`k ٷoرc H(Z/v_~0F~#O)U:_z+E*m\谢8yd: y:u긾K޺Áɓ'Ug}VE>k˖-ÇmVgk=i`%1> }p1(iUdǎXv|+sFqިQifa4hq޽{.t<aAGz:#Xsuc/1Q9'k֬8?fϞ:]ٲe8#y~F>4xcP^ll+ܔ)S@d̘u}:̣|ou5x[N]CH{n:/_>7:uViР`} ٳ3gld٢E Yf9R#TAПu֩x\.^im?AюF-޽V,?~ ~hb8/_l"'`rap/^Ra`&MȖ-[x,!JlHj}4O63Wb;vܸqC=pI =۰m۶)!+VUŋK.CСuπd̙Һukv횑,Xo<cf:.Jl`j lIk?۾e:;r<-VS~$uwB;wn#K)N:eDaEgJ@?z$׮];y7NGG G+ܱL-&T' =vZ_s;ydw7JL֭+2eR6c%ԫWOׯ7l zĜh^WoVۧ@G6C 0D^\YdѧȥKml Fı8 COgH.^izp8wų_nL4d5-DGO"-FJRٳ}1'O)V=zT0Nt8Njdq f;1uw駟VTΚ5KJ.-{<믿6=o"iCSA=[5PC7o^TG\xƄH.YDMݠmqle YB`Ov1_{o߉io0ξ(mx_pA>q?cDlt0?P`;-g/}['e^&L]̷ |l߸N[n9ȕ+׼jׁW풘kv%}t}mw@gMWn߾Q^$UpKM>`^+1spG' W8Vc;FNX(V20_~$FpY`ɓ'n|OL h7z:1u'02<=:?QF?~bE<հqʭQ5Hٲepݣ쯾鏘v<< AbBE۔bj$@$@$@pD HHH!(iHVHH{8  p C   o(ި0HHBBd5HHH 7*#  ;!Y   FB HHH!(iHVHH{8  p C   o(ި0HHBBd5HHH"QԩS'LC IY?eܹ2sL.[C CCx=bl1VI&Z[iٲer kĮBW__-k+#f%K2v\C}= 7z&ZQ=~@ʕ+ ^lA%׮] B2&3 Es7 @LD3$@$@JB-z  }L43+I$@$(7 @LD3$@$@JB-z  }L43+I$@$(7 @LD3$@$@JB-z  }L43+I$@$(7 @LD3$@$@JB-z K ݻ_2eH%K,Rvm2e<|Эa7n,ɒ%;wm۪sι Ύ;dƍȚyU{'y!C)Q~b@&Mu3τVZr.]`9j2U}z4h5R9rHzdnz N$OCh6 c?wyG,.K =._Uv)[l%KȢE$e蚾rJiڴ|g1bŋKUMqܼyS~g5k̛7OZh$XEM5k֨UԩS5ʕ+}oݻ&M78@ȣ>Zzl2ӧϺMk֬7o^{Չ˭[kڙ ̘1Ce˦{XiԿS2gy5֭[˽{#B!l,_|ܿ_#ʕ+ٳ гk[6t5y7Xb]p^ŋnY`_~EڷoҥK'5jtx^PXK~Iw [Ə'!':uR\ uQ?:~gU2e?j*V\(~ZЉ .\ .Fk#3gθ% t;)vж  Gرc^|6Or*@^ 9sƴzz˙ף7*s%2`P+W. ]w *?۷oڴiϞ=w?|,Y;vhķlRŽF6+߳gO#/Pq&M2Bj+|슋 $i@i 85onݺtK.U5jJ,k׮]n͟?_3q( |ׯzQ+nj5kL}F6>SSO\IIu:?Ty2esΝە:ujŋxF胿 :PfM׍7tr׸qT_8ύzKZΝ;] ]3gTrƌ]Noulժ:ծ][?:{9oI6q$۞j=FIp▒PlŔ#<"pFY-X@50g&@L`tWjU71G!FPbExux&Fp._\7/3~z65?`=<3 `FvT{VÃac)ѣG ̓p4׽^FޣfbE¢*;۷o.=ǜ.pPH?6DnܸV-[TW\^up=nD;!ThyEW߸ 4h"OyZG`[@G4_|1,#8p9RL3} 0B.zϧa!b%NoNl6o<_G2D۷oW%t+AFؚeu'aN:ٳ ,n/KaP{ tp?L>=W.Z_=csZn?Æ Ssx|2^kp[ЛonFemt`\vc9L6M=׿\9]ncA#o1=zs u;3Νk$ǵm:/x/ !!pc^,0L/9xC%gXc\oxJ'|pkV[N@}`,IL/v!`ߊ+,A'(Y7!Qn0r;su٘<kW%e@GW^Q}\x^;~r/2mh{xBhpmp'ڸo߾* X)I"<N/?u<`1~P0 ;28eF +F5N4ŏ8#r,kܮk!ɓ'zn>xz͛7W.^o?oxa*}^yG=0Ó𣌇`t,CXρw&a%x9!Hp]#oڵKp;kXY@ɒ%=OᾈU f'!6hgL@q;-1y%Z@R_q݆*SG\_:/w/6B0@2< 1E@yjbsvȐ!~B~~Dg1P􇞦'x9 tIz;R#-  p'c^~hn;U  KSp24~1g 'Y"4.`Q1! ؟%JX6',XHh^EkIH IO$)#lMSP  >xf<HHlCBo$@$@$< }x ؆6MECIHH x     m @(3$@$@$`z4?x IDAT %  Pg3HHH6(i*J$@$@όg m$6qqq( rE@2CP_zY\Fc$s% `qFs4n.&Kmc۷˦MQڶl3f4iy敄ӕ,YR $wܑ$#G$gΜҽ{wɜ9L0Akذd˖MJ*%'O6ү^Zʗ//ٳgMJe|@9:[c([xU'^{/_>ɕ+ԯ__ݫiݺ\xQ:v:9:_~ FBԩrý?k֬ Y27N0jΔ)=zT ΍5R̍cSӧO˳>+ +WH׮]e޲t~Y& @ W;K2efȐm?;cǎU.pґ?*THm?~\Μ9#X<à n h]xq}S̘1Cy}]e~(;w6gŋKEwL"R( ! 0>p5K(!@DS!/[\zUXۭjp#$O޽{q_X|[7lؠK.-u.#0olٲE=GaI"BB*#Z=z@!䘳5jt}߿_0^n :*>}Z7sLٷo:/͛7E}ӫ󱍀8#nݺ%Hf$@ @eX tRɓ'TZU֭+T1>w\HժUS58駂i_bZ-E,C(,6Bn^#FHG2 d.=ҢTƐ!Cڵk2x(YbJع*`dֵ&N#z'*2ه %3" }X0=>`  ;v%Xz*U,s2_NgH$m1#)STs6po_ -'%1#uy $Ѻ^ܼy_%  }X2S۵h7 /z_dO$@$@ @w@# $@$@$q\p!G39 D>B#]L>}o[۱cGPE?䓲~uαcT~SN2^Ñg' ;۱_K/$cƌI ˗/Ga\F9d!C$[xqʕK̙:={dK/_.sΕׯKF^zjN"{8ڵkr%ٹs۷O֮]+H siҤo^mϞ=[z%}e;:Hw^ٰa̟?o'NPv>sG#N]w~ PނC:F8#i ˖-Fƿ~[RJ%*T={ F:lș3rAyꩧT'ܹsٳg#ѹ(S<+W,}S$X0vN4I 2eJ)Xe  )cSKxpFOgހ_&M?~\M Eà _CuH"<|Pz>w$J*]؈u'O\*VꙐϟW<1 /FCb yiӦ2zhՙ4H 8O2Bd[f͚%_~rmctO\\I=ώmB6P wѣ_BQ/x&6oެDzٲe7o^#<\9NcNF' lA4^{5'Vu"BB] pg7kLqFɗ/ZT7}tΟ9sCuƈ.vt͛Q"F4>}Zͅ5i꘯2(+OXħMV1:۷ݻw٦MUx:`' 9Rt钠NLEXrʠot"։bA/a=FXQN.|j<ܹXeCnhoĈ:7׬Y#K.c.+|剑;D.-ZH޽lS,mxya p>d&9`Ȑ!jUݑUpʵ5 k-G)S@pD%! L0 8m@<X $@'@w~&E $@$`zp|QVH(hG x  XJA[lK@[lQcH(oc0&Xy@$|z1kFvyXzzAP0k Km,4~*x5/Ԃ1 8Goc0 *ׯ_K&MԫÀY Xb BsOz;-xBdɒ>'M IBAPPd1E FVϟ_իWFh2IDB%,־:u$/;wEٷi9 cYݤk/_.ӧOzf́HLBfޙ;mY+p Z}P  $'O%= [h P-D4b>}H2 PЇ(YZc+N$`IzK6 + j>&p  U.= X*-A;HHH (a,IHH*(Vi A$@$@a @TfI$@$@V!@JK   }2K    UZv @PY UPHH@B̒HHBBo$@$@$0@e$@!p pBx2g$Pz6,emYf 3qҷo_ߪU+I,O"E$$mڴnKNjԨ!˗/W"|IٿL$`z4 !wSL'OO=o۷oܦMfзmVΞ=˶UP<ȑC ,>3f4iy敄J,) ;wȁtȑ#3gN޽dΜY&L5lPe&Jɓ'W^-˗ٳKӦM}2|pA>F1-O%|I\~w^uN֭ŋұcGgr獏OV!@JKЎ#ЩS'Z{]֬YVqƩ F͙2eG ĹQFʛqL7{iygerڵ,X[nqׯ_':sI*U_~E)3gKny3tl7;c$`f!$F?p:kNs(wԩn*S~[RJ%*T={ĉeɒ%RtiAZj҈_lRL.t݇,%h޼Z?zhhC1g?j(5?%5,^X֭[j O͜9SۧsJ>,ӧWpc q #]V<=_9,#}-tJBIBJ̋FsX tRɓ'TZU֭+T1>w\HժUS Ο~`WXV㠿<@P: (Э[7ydĈjrYPǂ[$@$6e 'IH (I%BBz<H @O>Y @,Џe{t P˟ 8^n`ṮICL {9ˮ]$uԖGlE$@ ॣX޷Kݻ-G(QϢIHM EҲeKU =' 2yV{ }/O$@$`S}$@$@ `U[ $@$@ `EuθX   (8l߾]J/Q,YD?ݎEzBi,HH1֭+xAlթ[n[oŋt>Y cjJ :'  ;Y  EB IHH7nȅ ! 5ځV @HMV-1|ҥK'5j.!|ɨF@(1 ؈iɓ[ m۶gڨ77RD(h&I$@ȑ#,XP}J,)xڝ;wrə3t]2g,&Lm۶Iڵ%K,cǎUoC;vPL2Ib_Æ խeJɓ'Cx^%_|+W._ݻ7c:_V_eիW ^={viڴo^ иqcu W֭[ŋcǎ2e%)lE$@!q7nDH}QQFsɥKd2tP%^W\&M(ٳW^wyS .{OM@p,XܹsRJׯ*1o56 q<}<2`]vU6xz,ZHub̙#/9KmS-$4HF]v1cF=FSNyCׯ_/eʔQ*U*\Wށ>}SuV5_lqo:VBٳL8Q 0?t 6LQ8u0cCۊ}Е.]ZPwkժS#~N֍[mh $\p۷OcT 9*THBt-j>O8!8SPR%ɚ5?~\Μ9ϐ!CԩSRN1c\R5k1s9mm2F/P4)^۾ynyy&VsVHBDsxֺ9k… ˺uܒ¥?Dm?JT˖-boذAnݺDK.+ۼ˝;(c[ !UU/W$O1{2 []ޢ CHH ڴi#V 9R ̘z?^߿a=}Ҽys-=z<*G%7oV/b\w,+XcuFLeL>]-ț9s'gBIFVz+m# 0.vEһwo;Ν_xϔ;+׬Y#K.qXm2qVǚjժ,OXsb c*_ يּk2bĈLd.=QU3[8据]&oAx-9#Z\OpvH Ņ Vh/,HH"@B,HHEE 0HHHwtr$@$@NBWO$@$hzG7/+G$@$(~$@$@&@wtr$@$@NBWO$@$hzG7/+G$@$XX Qm֡Sڰ 3 6lp֊pe$@$@$e(7'  pЇ.&  (GX< >t7 D> IHH (˼IHH (QnO$@$@$@']M$@$@Q&@rx  ' }82o  2 }œ @8 PIy @ P,@O>Wر#G;wy-wHJBU,O_K/$cƌH wH< ̛7O/.,ZHΜ9mۤvڒ%K)Y;V.+ԩS9ϓ'ȣf͚2sLc$@I#@O?Mlܸ1^#&L =zGyD~iOΉ'QFsɥKd2tP2e4o\֬YؿܸqC6lؠ\"wM&X FB'"@ׯPZ;$ڻw:tH:vի|g9ya…RLATRIʕը#-ZȺu~ڵҭ[79x\zUV\)5jԐlٲ˞H @?yD1yrN IDAT.\Xr);wV#ٳgC!ϟE>FժU)R:c_jUٴi,_\ZlvwHFB4~<-nr g͚%_~e3>qqqj_E18v[GJ޼y*f͚W_}VשSGz)%VлQ $> CpȣI!srOϞ=WhӦ>|Xw۷OF)]tQ}ʕ 2H ? H"E|ex D'O!M"a.zπzÆ eў}1ߎpC{>HqFիWWsH@$Z)Cs#!dG+ٳgcŮnC(Qk޲euN.oF\4iIFŸA &؏E~-BI|"xA+DZ#PM$`)t[9h$tk3@Brʥ-P:''ύI:uŋyJL`Ȑ!rڵ3! $]sbV.ӧi< =PNbիGX (޹0$@O X@Kbo߶$+(ҬgPÆ > Py Xު-ClG@ $@&@wtr&߰aCey$@$' 3 UPHH@B̒HHBBo$@$@$0@e$@$@$`1)iӦUūDSH!ٲef͚?`v%иqc>|W#FYJLdMܹ5-#IH V ĤУM&'OGʪU_vm9~x^ 7ڰo߾rə3$@$ 1+9r䐂 #<"?̛7O *$}}…RZ5ɞ=9vYҰaC (UL9r&܂{Ũ n~P=3i$UY 6$T-Z),Ģ?g<6;P ĬcnB"Q`n_UX1xfA$@> P}Y,3?12  Pj2(w $@'@w~a"`W<ta0[ ؃EOtkd ;Y ,˜<=cƌQ?|.av$@$,(4 &xWu(*xڵkL2ꍌW*G J$1fAN" ٲew8 o/^GH:5>Z&{۳hP-4H A1 &@͆GHABov6&@qthdV1bX NB8+$طjo:$ HG6r<y_OVH>(i+Zj{   UZv @PY UPHH@B̒HHBBo$@$@$0@e$@$@$`z   0Ї*$  [%h > P% X*-A;HHH (a,IL k֬pBsnܸWmwɒ%)R$tMtI5df @N[&0e9yCիgo߾]:6m4+o۶={V'7 @X$#G)Xd̘QҤIc͛W:keNWdI4hܹsG8G9sJ%s2aaÆ-[6)ULk׮ɥKdΝ[*V9gׯ<ٴiS $uUGe.]Zٮ9&Oݻw->ӄp¼I `ĈPv <`N:U0U֭[(ے*U*PS&N::tH &Zuڄ2{l%eʔQYre.6(6Yvȟ?@CGgb߾}j^ܹsұcG*TH?~9sF1jgȐ!r)咟1c\RT"ŊYfb!Y (YhQ#6 #iuH"<|oy0૎9^x4oޜbo>Y K(TM#`lٲE|:\xQ<:jAӧ5J0o1Q8\n߾: XЭ[7iӦ: 49Ruy޾NvZ9xlzLkL>]uNfΜlB⤔鵰FcŞn|x,\("|Ifz_f,]T#UVu "J-ZHmjb5>XCsТE ݻ}+ϟ &3glUO?U+qꩍ0S$'̥'"^t <#V<8r$GYq vo*_=ڦs;Sԛ(& >&$!jժx2Z[%b }l7kK'a,X@&U&G`׮]z%B2FbYcT'@O<;8<"BxIɓ璀7zoTG~TTI*H@5Z׋}>@p>\d/ x!E+$@7 } 3 /e7,"EB),H PcZy }Tp X @Vn)mZF$y4ͫ@y| k0?ږ:;eY/ hժ\Cb٦gIM`޼y5j`//^ToҥK G9 $ypoe˖1ܸqz}r…^z>Y $-`/x#|ť]v`=uZ  h"?RB),HH mieG$@$@$iGuY 9H)HHH "5GqY$ @l_dȐ!,Y2OҥKp }2_  /I& n '( }82o  2 }œ @8 PIy @ P,HHIBN̛H"L mڴnӥK'x#l  UZv @L6MN<>WB߶m[9{lJ`6v"@SkV @9`SdI4hܹsG8 G9sJ%s2aٶmԮ][d"H?vXuڱc(dʔI+&D6l(ٲeRJɓ!B7˗Orʥ߻wotۛ)رc!Cy7N:cɀ:?˚5 :TU& ]ʽ{Թ}٩o6Y D~ܸq~͛rep=zTN:%5k֔>H֯_Yf>}ziݺ4iDMdҠAP(QB5j$z+Vȏ?(<)S@ SիW >xcИmիWL{-ټyܸqCjժC=k׮ܹSqX,\Pgׯ_ɦiӦު8W bb Z 7?1דC4i\iӦueȐA}>O<ٳg>BܡC#\UTqpȑի W%܎ٳ믿fΜ*\۱AZhڴi+}!C|跎DHfY$@$k_3 .,֭sK ~޼yCaĉ裏*wٲetx۷z[^>܄? XFpj<\{VqO>'Oϔ;UZf>@RN-:tJT~u /^\-Z$3fN:<&S&;0-ł^?\BBe , yc+'lsh ZqY\Oik69d3"  [Mh >d( XzmBHHH d(!CɌHHHz(kZD$@$@!#@JfD$@$@#&H{ILBom 8<yٮz]/ k $ CiIHHf(6k0K$@$@CiIHHf(6k0K$@$@CiIHHf(6k0K$@$@CiIHHf(6k0K$@$@CiIHHf(6k0K$@$@CiIHHf(6k0K$@$@CiI F G%K&;vp#p1o% @T P裊 ܾ}[ y饗d̘17 л 'yIwޑEə3gix6 (Zl"pϚ5KK%f||cp1$GyU͚5JSwQ.Xٳ|qFumӦ>|Xw۷OF)]tQ}ʕ 2H ? H"E|$@όgA" b.zπzÆ eў}1ߎpC{>HqFիWWsH@$Z)Cs#!dG+ٳgcŮnC(Qk5.]:tRI&8bT$`[h8]"H /2'E#iӺ.]:Q,_<^ٌ  =()SɓӣGW}vI6}ڴiyWB߶m[9{No  %@bȑC ,>3f4iy敄kJ,) ;wȁtȑ#3gN޽dΜY&L5lPRJɓUz%xj|$W\R~}ٻw:v1ɐ!RNydsm&kז,Yl;v::tTVM ( ]v{C7;UC$@$G_,wޕPթS'yCʕ+qK|^|dʔI=*7oޔիK^dŊ?3<=eʔ`%~իUnR͛7ˍ7k[!M4FG}$ׯW7kLBo_tIv)bŊj#{>˻~ӛM6ZOF pD/":z>|c??uh׮#QԩS^_SoݺUup~mI*TPAz)'NTy?^:$Æ -:wyGm/ٳh)SFugʕպؤk`?.[7;u& O G͛ɟ?@PVp".T<~9sF(>\3fPw}WJΝUR,(\sEh'NqHC)Çj WyHH >z-[lOF1C/QDPSW{ٲetx@PnذAইwEБ.\ܹsS1D…eݺu:;i: g[A! p#{Cb5G{gcfZ5BEA[h25v*3Sjh*"-(IRY"BT4r{y_}s{}ysq?*Ç 1 z̎[\ffvгgOҥƁ|/_.Æ N7"3TZF$@$So0Ϟ=[.8p΃_aРA:gzjiٲz2e30qJ:Μ9SG;SNy.u  4cCcC QOF0{G@[-c<qLıe tϑ @0Fù,H>UR @`YYYaaƉ0']$VJGAbɲ (zK¡DE |e^laJ!HT0G$@D 0s} ۨ6o<:d HH&Ep+&HH@=*>#F$@$7)z1=  d%HEʠO&| I OLHH@=**dHH|#?js饗J-|H"Zx]}IOOW֖⅏v$R=>'oy==>NJ\bA*_^=mliNyϞ==7LH"9Vb }0ꙥ$ (*Vb @R*/Bnf]/^\N;4IOO_|Q=fڷo/iiir޵kW=߾}{p8Yd̝;7I3cqOԱw)H%FrM7ի+:t/@:##CsΨi޼Vm #hZ8*THʔ)#Z^{-s߽l7}<祐fR='J_ V}Rpa߶eQP>*9sHƍg-ӧOW3ѣ6O?]*V(0R7Xu/{o!%f^}U93Tcz綾^RK;L2z]w⭷ޒSO=5mVj9|Z0bS ~O}yIm8n8\r!.]Z0-L$ԜҞ={{jժ &o]vؑ- á5\cŊK/T`ҳr˟g=/֮]ko7 0x YY|ORs.FDP-ZF~PW]uYT)i׮N%Ԋ6?AЉ_r:ZCق&E vŊ^ӦMB*pٰE\H,5Fr/D5k&~aT?\{{^zQA %ݸq\wu+Y U4_M6=e{tФA(zH1I|}YgE E>_sM4n/L'`.cǎl2 EzP$0ա@99B]N S=nZJ@PИ۵oAmhѢSOIϞ=U-[͛u m &X`4fe˖-ҠAygsΑ޽{KZ^F >nMyGԜSFnڴi'@=AI.]4c1%љBXr^~n|о~_uܘ{*ePTɡ`Pퟔ+WCtsСm׮x]tm۶-ܢ̖ԩSzݝ;wkzr`zwqѣG;:_%40CUÇgϞnڴim۶&--|M4Iù؏6ҦMM.3Guٷo_pBZǎO?\Qz֭I'R.?CѲLŊMR}>lʗ/o .lvؑaoڵKypﵭf͚}fȑF>}kރh xohm|ږK,igLsPe ]+(Szz^oҤlS poH.~r4#z`^ŒR0{7u[`OINփy޽AL`tװal9$mx\xz0[[bӑLn2}m8 Xcq *ktMӎ^ YET'O>ކȑ#JKa< #F̙F Ͱtoe3Va{Tr;k\ ^-8UaL}` cNsShVVj׾P $o߾luչsg=_믿Nc&T_m8WV@0 C }xAnV(.L` 8%B;lB(PmݺQpx0[B=e˖et 1[j}rXО0Zӧt?|]Eg#8!Y6bЖ܂eAw^F϶b_v-/nuw 1bz:zY`_8\bn-7':BMzR X dħX<<`s+a? $͈x3kYJp D=1z0>̷hq''x '3fzjbB)ڹYaΛPcu#8fMb%6qo<62#۷o_ݼhدܬpm{X"P VBu(_?W",Ne8_|NCxʈ1 '7X` 8R+̮@qq(gу^X*TTPA͇  N:7/T:kZņ(Tny7sF{`TQf3e*l<ρJǑ@BFl~~t($~Ys]* vY8(vBlbz ;b{m,oĴ-m{mб5/ڭ_OѾ0ux]1$_g:%r(F'2DWE/RRwzG4r*6X+,"~ nn~~rCxܤHHAV ,OƼ[ܗy$}>" @RvX fsX%1/ L>fT H$@OpG] ϋ倩O1J@EZܒ @nJSSQ  }HHR}T3J$@$@c @ObFIHH ~T3c   HT)SU( O>~fA$@$@)C>e%  Pό1HHH ePѧLU1$@$@$?*1 \QLAѼ#eɻIx \Bh("i&_dT%q%/^,Kt 4mTZlc|DlAcET`,7 ѧRm1$@$@$'*818 *T-HH$@E'0'  T"@EJż @$@$@$JSW #sNIKK 6\:Ï?`Eʀlٲ_\\m{Ǔ SP-]#ͧ D!pEɈ#PҠA|ʔ)r;5}ꫯ%JHʕ_~rlIzdɒңGٿs瞓s9GYf9fݲXbj^ߛ6mrsq1RvaرRZ53ڶmql?\uURT)9묳䡇"@VtM۶mszMK.֭[+ŋk>6o_zhBB{ڵk۷+'xBN?t޽^- nkVV\)͚5|y(HhHܶl"]vҥKEo߾r|ᏏPHH itYsQStiS`Aw^vm; I&K.fʕO?5]t۷&c'SjU3w\pBSF ӻwob SD 3sLwߙRJHigffʕ+={-[qiZٺu>Z:tמGJiӦ/eӧF23ڵ3˗/7})[y~mޯzf3zhSpa'ovҼ޾}}o5/6ի>J*3W6/)VU߶mr,Y5M[-[[-ZTX,]ԀEF-!Q3rHZjcǎ&27_x$>IN:|8 sUP5k4fҼU^ݼ{T -xRߜ$ *d-muw 7(h>ZVNޟx ӠA=G|~r_W;sBޡm'ŭon}Qs!9C 1_|[f~W>:^E?e~4mg̘āb ~aӺukD6i$SdI{n'P@o$@IAyr!Yb|'j=r,X@M0GjJ ,k8ocǫfr9I`uaL2zݚqҨQ#5oܸQM01_SNr7J"Efylå tLc`J&MԼl#FKJ*N20a~ 0WTIʗ/ܿkfHyG\o7ni:;P~2fY|rϤnݺmN:YKdTWGKDN vh0m|5N;jժzݝHB`*z%H>PsU~WG^ӏ,]X󑕕s~;}ٲo>= :)\ü7eԩ2n89r,ZH GoBs, Ҵ-}wwk9zDKB @+/By!ÖJBx:Yj%Hڵ9s樢-YDO.۷ww8'c_V-5j?8ai[[I믿 ͛'ÇWaÆ ah1Ex<ʲeXŹgFVoÅň|֭?;Ahi׫WOw^';E&Xs_*@at#I)n{>7XPw#X@m[޴b:w&x@$@IBoQ,8>YPIKKS-{ppX҇/^hͲe̗_~i.9նm۪Ν;Mƍ?!xIDATO`#1 4GKΊ 6ga7指pڵ;I΍v|X p,DEHB:):G$lek^3\6m{Xߴiԩ*2eʨufx9 ;X _|Bh>8*KM:u>lҷy><;t蠊RJNrV"07n4`w:K8vL6MiWcq8͡ 裥=m+ 6(Sp)_jxCr4Di@$@$@IO'OIY2v8iI鞙3g:ׂz9rg7pIYr<HHB`e$@$@$$CLIENDB`docker-1.10.3/docs/extend/images/authz_chunked.png000066400000000000000000001006201267010174400220720ustar00rootroot00000000000000PNG  IHDRj> IDATx9,IHE AEȅUHI .E@DH d_oLnlgsթ_stw2&   H8( "@%͎@$@$@%@%ІX$@$@$@%>@$@$@%@%ІX$@$@$@%>@$@$@%@%ІX$@$@$@%>@$@$@%@%ІX$@$@$@%>@$@$@%@%ІX$@$@$@%>@$@$@%@%ІX$@$@$@%mbbb$Yd7{lgLRϛ7YJ%Kƌb|M{sqW^{5NHrI۴icsŰ㯭*xcǎNs^֭+2dP *ѣG+ׇ/^hJ:[u#9~<ҬY39{lD2s<ʭ[ܹsGd}(4 K e'"PZ5ܹT٣rŋ˪UEĮU5Jn߾-ӧ8Z^-c$uԒ#G56o<3jڴioHʕYfySN)+8W\ʒhРc]gGsy\/<{.]ڬY .4?#)[KNP?A ݻ'C~X1X,\t- |0!?c5 a`\?,3gVyIF6mdr9ٳϟ_p [7%z-#^xAɤK6doXbJ&X:/?x@>\ "UT/_>iٲ۷O/kV?O>zb `kdl%ķ_Rg9sT+W<ǧ9ڥ) ~yDDKHBϓ'q|͞ԩS*T0m^z… )%K3w\#SNy .le='O4΍o[V7n,5kgΜٓ)S&.]\Bٗ{mguȳ~%CZ;aW}MVSF|O|>֭_V\9CN/zyIK:Yǎ]G{+ݻw%5gQ߿`2~x'ɓjGWE)+`4hJX_k֬1 Z}vu%Kd9駟w}'ߍCSL1,sa ojåm۶);e֭j~O 8qB`dsB?TV>K.vL4I &/|U^R.\ ӦMSl nCȤυP =X"Ň@DW]&`O|>L:UU}W^yߎ>dd&"ɮl׮yW͖HwlI/z2[s(K[PK6֣n:PG2R޸q?Pڵs/Y[AU_#cJ|0ϸQL|~ɒ%x?iO CqYOu_scA J}Fo/*UJ;},X@v)7oVh_zGݰu$W!aR͚5Q:A… eY%w%U,=s8Ȋ-jXVr9sFC$rujڴ(թS8kT3C).eSJZOyX$ + }PB(R)SFȶm`CxգVϒ%#2>L믠ĔWOL}3W_s2?z@ 0M}/_ "U' smV6sx4F<-i)JqJfʕ+NŜ).yc%QWǍL 8Ksz`W瘯hc($x>Ǝk)S&coÀ3p5Ӄ -d(2(Q%@"aM$K׮]c lzLLqs;/nAM:Vyه⪓]uJGv]&3ϨGLx D@P 6(VSLX}J>ƪq2tP~BB;^01c0b}!\q lO[MqܸqYƷx`CԠ0]V|LvP .dM[Fabs=W㵄Ŧ?5 o|衇ԴE;V""<?D%>}R3w<]mm}ƌ#GQeX e_ƫ=eBd>R1/Er1o/@Xu! O0/ajdɒjݮ>2]Qd樹n9iDV^݈D}ᇍgfψ* HZsXt5"1Um!SG~<3a_lٴnf G653>}Z=w'9i9[G#:|*o#G=rj;-٬S|Z_D D<94}9&&&\Z>,Mf('ćU}ħ s ħ_m6 J*yۇNvt=ܼ%m .CXiln\-`!9R 0?e޽ |IF'$D] j߾d˖MV+tX0W `.]Z }iʕ^o͊h䍼 ?DX<21g`MnvxAf9- V=0v5,JQ#<A'}c#x 􇀹Fڵk՜bX#eħ_͝;W=5 yhcP'/إ]`ˋadBK .AZJ@'<yii{[j1{H=x_iIҕ9XBт@Akp Hx_S H 0R<.su1ߊwXp3 5-iX2'  mHHH>TdN$@$@$`+*i[q23  }, JJV̌HH#@%mKD$@$@'3#  PIǒ9 mHHH>TdN$@$@$`+f͚Rn][afG`kݍo2E&'xB}Jz @0,|+N5kLN111 Jߔ8*7dɒũF LgK\`% JspUJ:,hlM#W^Sxsx`EQ}sQݼ @$֣$@$@QMJ:# dTґzHH PIGur$@$@LJ:[ D5*n^VHH PIGrQv  &@%ʑ D2*Hn=N$@$yY9  H&@%ɭGIHtT7/+G$@$#(; @TeHH"#][nRT)I>dɒEԩ#ӧOxnԨ0;w_|Qm={Pl|yPd<5jjdɒɓK xM">瞳w7oʹxbԮ]>tPsSJ%9r'xBΝU렍g(kJ餚B2l0ywٹslݺU-[&/)V5kHƍ>S2:cRX1)P&̍7͙3G+iڴicpE0^~]VN|Xu 7MQ?O裏L2J)/_ :<:?.+V7|3`M6MΜ9#sx0ȗyM^S x1l"?\rE i۶:t(x `Eַo_6cƌDl'|bACe˖JA,YRz%oVy֭['SX޸3:ESYQP~R6mUzq.X\RN#g͚U+p2<+111ү_?u떺ٹ[YfIlٔƴlN1oI̙aÆҢE wuM'ieM_Uϕ+fϞ]0&W?)Zr?UګW/pW_uj(]tRfML:Hǎ&1??0IH I{Jv픫 nuU'OA_Bۿ *L23cV&M?OD~Q߿ɛ7'SL}=Oܹ=S\p+;>+`S]ͷVZׯ=&LPy>ߕs*WsaO\}w٪f̘|仭 W͛+yNS:uի{`F&X EI814,!H] ѕp.\X5'X4.\f1XwL'#j]ʕ+{  fXTBp裏UXHpkZJY:pwwo|  Dv/ZjI[f.v#-Cj"s-ѭr„ :,[Xp#;V0Ga` :N(ςe%sLYAgH}7j[bZ~XM_n͚5Sۗ/_#}HpU"_0`Gs  HoEl W=f}Ҽ뢱j% 9A`jL@V(RҥM"EIzsôkpCA#nRsI[H/rzU_E|E}ܔ&xF `7ni M5Y⭇?|2zx/Cu:ر#V0XÑ8w7$yC<{n/`x<f<k(7$MlՍ%3g +3h|.CO7͑#G9Z^I_1yw.HT(l(Gi_|^߹myѶ:obBlϓ#VH7NGJ.~/XX`bJu;6 M'`ڇ)8ƒ6q sIͅEp\Yx3ĉ8\RܸqLJ#S$dd zj,%,PxC`! J7nD#@ވŜ0#B_!u믿^pu7<L'G5K(I`|DңCq!}hbw1,= ĴZjjz.O1`}/eqD뾎@8 KhGtXa cTq@:ɓGV:3na6iDBxAn$pz<:n'c `u_E JqS 7#Dv# 'a?H/Z%n?+KDBy(QކFҏ$~^0T2kh8pnbWbw7{Z?ڕSGLGp={]3H'r)S/MHq&Z>\EfLG # Lh׾ w;>VM̒ǚ,ikQyHH 1UCb6ׯoupԜ9Q4 x[UE-߆E9j<m~YTʥRIYm %P q;j%Yd~?uMtI͚5eժUʎþ}"ATH$@L>]N8~=z'xƧ:%/ࠤ_|E9s>%b.]Y)( AJ!#GR@˘1IΛ7u\d>D2tPmSÇ%gΜҽ{wɜ9L}q|ʔ)RHɘ1tYݻ8p@4h]dI6m Ν[Μ97/RJ)ȳbŊҧO1c>ESNU5K2(P@0ٺuQo 8@JgE +^ 2KV+b3k8uJ&ܿ_;pk :A9"E y<{R&惏=ڄg0@N̏=Z:ux0/f͚x?L$@#@%6DL 4P#L$@#@%6DJ 4P[N}$BSljFT7/+.yNl]a"pZkJa"UA;^7ۍχ2 8ڄERQܑ$nrI%GF"9E%WUM 7qOg-K0!'xB}=f͚\sHtgA]vtRKK߰d-Hv`P!OZJ}:r$@BJ:RZr:\TԎm FMJ:;SZr@tdmug$aY!@E4Y* D+*hmY+PQ'zLQGJ:Ꚕr e \Tґv7mp)) PI;u([f" N(9^G$@$@!&@%b̞HHJJ:x t3{  H(*鄒u$@$@$bT!IHH Jב @ PI0'  N(9^G$@$@!&@%b̞HHJJ:x t3{ ~?>10T`7 d͚U-Z5>}͛7dɒ=qd6mZӥK'5k֔UV*;vԨQC RF8 $ĉףG;$Z/¸ J_3gS"fyҥɼH9r _ƌ%M4v޼y%Zy%JCʝ;wcyaə3t]2g,'OV5h@e&%KiӦ[Nʕ+'ٳgƍK֭eԨQ|@9:KcH(_G'\rIeϞ=-Zȅ m۶j + T>ɼH ڵShoA\S4'Lʃr`fʔI9"P 6TV< Zn)ΐ Xx衇TR^'eȐkێ+1J9 ,V;&O*7\׻ղXb^ [̚5KY R~Ҿ}{sV ^2Xc%.Xz ??Dv&"n%:pҥ"׮]˽ xٲe%yuݻwO֑quӦMr-;usVVJrq*^2SQR" (#ФI>vX…1c|ŋ{}YtlܸQ3gٳg޽{1*jT/1ydMbrqÇWʰa.)ŋ$WZǣU,w$4k~ܹSy\s'Sc% w 8;tX1iQ;i"N0*k2 L$ 5;g"N(9^G"2uTwd,?5 $J(9^G"ҭ[7:'H07m׳:O.Eڛ%cvmPIE D%z ]ϖ-[uEk];"|HH@@)Jsҁs? $1*$nO$@$@PI"$@$@'pu9|qN2,HF mڴ* LKNj֬)d&jԨ`$! /ĉ_|E9s$lq.]NR,HF GR@+QKMwܑÇ%gΜҽ{wɜ9LsK.5?sL<6{lUOgg\IFI<.HÆ ĸb8 _v䖕~")M4+V<СCzn߾m}vOZ<3g*T3|pσSvmuD޽S~}Ol O$@$uY7  &@%GIHt4.F$@$#(< @4LuY7 %ԟHXpQ&8t5(C$qoCj@w]$ LJf̎HH"@%mIC$@$@6(#  PIE mHHH.Tvd>$@$@$`3*i2;  ]$ LJf̎HH"@%mIC$@$@6(#  PIE mH 曂O&~^:|1{  DN4Bf@I_J׮]eܸqYI֊NJ DH |WRX1yerDپ}ԩSGd"%J \B1cy>}y֪UKfϞmlsH@JBH`!=YO]4i"ׯWyLw IDAT۷O_.6mRۗ/_]vIƍ,'PIYװ_~X˴={m۶*^zg}&J-RJ O*TXQY˰6m*7nTV K.?˕+Wd͚5RfMɖ-U\^ڳډ%@wwb zW@3^đ>}zWs`IBCJ:4\<T.joVIJ:YVڦeH I PI')~M5Yp*igTQҐ 8CbD*iKքtRˏJZQGeX) j6Pԛ6mr[Y_  PIY/(j& H(*鄒u$@$@$bT!IHH Jב @ PI0'   uViҤȑC2e$?\Ҹѣ37n0%vɓ*+W$6+iԨ5*Dr￯0vXӧWuv-Yf  p *-pBiذԨQCm&KVdnXfׯ/{Qe(wߕuʣ>*۷K:u$K,RD ?~:fUPU "RB%ۖ-[` mn:i޼|m $1 S PI27o6hΝrEرO2zhpڵK~{ w^ٰa,Z8ʪk׮rqYvRP԰VeMޢE yᦇ Ptr9rDYbWٳRR%۷[% e˲eȍ<۴i %ӧOWUyV7kL7n(jR(ʕ+u ^y@'$@$JT"1,D.\ ɓ'ܹs]KXٳgҥK+_<_իaVVM:)K1ÑVX!SHT|ҳgOẅ%ۮ];I2`T^J=E e(tɹs~A9rH9_٭[7;w@:B֊+3fUyV725]ڃ=bo:=*u,,A$FWҸ 7n 9\p5ZM7oޔ˗/{F`NiҤMvP:HBa4Vرcri*?aE9sF (/WKRa|ƞ5k,.Z̙36mڴjwN}6꥓[g%S/!kLL|';C3`lBߠ#VpٚS*Uy̻qU%n`#a~Z'M?##1w kwUԼ6zԩSXɒ%S=q/^\ 08ԩ1y0n(B <5o޼0*JNsX駟bպ٢w}ڗ IEJ:xnjߔ>@)O5q+͜9SΞ=[́:w?^{a3bƣDPP˜\-[(S*KZVbJ +8\X eoߖ}Ʉ K.J\4Ȋ9bC[%a0k^h:'5W*i+q+;ḩ+WNPaŕ&M"_zm6˼c 20~zY|ɓGBիWO ,TX'NT2?n䕱φUF:e-^Xl G;t|:'pk7mTz~:K Dx#! s{n0TP9$@vH(y[6L/2uqCL5Άr~Ҷ$̟R!D؛UɢN p\eIcA;(^E Ie\UȂvY$5bHH R2s'gwJܛq]b<&PԈ@; k4.]w.Z圴&%X 07L!I @bݍGc~t(1x{ 1؅HBI5JKEykD$@&*% TԡRћ?t-kFN%:%vjwt\TmJFLJ JE޺QA˓ OJ(n= ڭ-z3ZI vFsP Яu\H\P6Y˸Oo0 @RoѣZ޽[v%?۸z*eݻW6l -2N9~]V)S(j(<{yuUpCѼ{ᦇ Ptr9rDYbWٳRR%۷[(C ^-[&@n٦MUOx0(>}6Pʳ!_+`qFU|Rre޻woύK0Zj>n @PIu3Ӆ $y;wnr ;{Rtie9믖냽zR6Z(CCʒȔ)ڿb I: 0@Y˗={*deɶkNRL)`T^J=E e(tɹs~A9rH9_ ܹsjŊ|3Kߪ<V`׮][A|^7HS>AEm&u ppMq^qÆ֪oy4G`NiҤMo`N)RPemX6ͯ+=v옜>}Z.wXgΜ .xT`Au*_g͚ -*s1M6,n(vzVYɩK#|; Aa#[z& *j_*&W+i5*UW]q)J*G+U'M?##1w kw5>c5SN7c'KLĉRxq5X@_N \ 满1+d͛7o,9;ʳӜ'%/Xa~49j 6% *jdH \)h@x̘1* ~M)ã:PZ j$XV9srΞ=[=:w?:Aٽ0a1Q"((aB-[ ԩS%mm1m/暡ga%uXvmٷoL0At5J#\>U*JN<1 Sڲç~ZbZ5~QTԾtM$.TV Zo߾z nP̧+WNP! JPd&MRp^Zڶme^1҅{c^~,_\ɣ2dPaQO8QES#x $|1ǍuAZxZy# :AawAɃt NJVY' FB"ϵLd%gDE @2Di(KK1oaÆEX.nPׯ_Rڶinis2u0`?1z| ʒƂ/@.ެ- $5(iDX~dЊ1L$@$J)CƳq%͸q1v (jm#ڝHBE5JIK.;rNZm&$IH 1\ƣL1?:pֽt^D$@$% x5u(S4 4`RQKEoT۶ 84کѹrQA;m( D3W*i4(u4wk{Fm/OF$<*i L*hD$@IEJ/'UtnmJFn!@%햖f=IH"t5& p *i4I$@$q#(0 [PI[֭[I&#G9\V\i\qQ7nrI+W4jHF|"9Ç￯0vXӧZ_l+WNkbdĉ\MN$4… aÆRF ٶm8p@}UV`WqYF{9%1pߤujsu׿w}4) PIwޕW_}U*RJI…^Xf;vjʔ)RHɘ1tYݻ'V;wse 9|6^@Y{ 6ȢESv*Ǐk*e E ?^?x`U&O- 7=<ҥKȑ#ʺ / PPl*U}޺uK02nd Z 7lӦ'L>]]VY0ݸqԪUK0Xorj r)``RLl}7HH LE7cX:]pA'O.sֻ,g.KV㯿jy>ثW/emêV:tHR<7L2+VԩSˀ$UTR|yٳRK,QlvUFՍCYPM.;wNY#G*Ԝݺus<#dXr)Ϙ1ø_ʳi|ڵu} k߿_̙#|>T^HA$fWҸ 7n ;\p5ZM7oޔ˗/{F`NiҤMvP:HBa4رcri*?aE9sF (/WKRa|aeΚ5K `E-ZT)4oڴiEoN}6꥓[g%S/!kLLR; ֡0Sߔ@_TԚ$@&j%4\TJ5:on8eB[%0?\&X}Ցޘm1өS̛֓%KY{ )^,`pשS'cWy0n(B Œ͛7VYi?_~Q0@ npò7'}SQR6 @VIR 1cƨ(w }|RZ j(XpΜ9SΞ=[=:w?^{a3bƣDPP˜\-[(S*Kbw*_5CI6K:!۲o>0atEɃkY1G|(xdUyb?  0eAh[Ȇ<%*jTH \޾}{Յgj@(+AM4IE4zji۶m\y@Jüe'OU^=2dPaQy_DS#x $|1ǍuAZxZy# :AawAɃt N؛6m*{V?}UyVr"s9<_[<17?MEm @ $Npy[6LYM qM.@[DmûG-in<~e>$`&~bp%_<]hQY[HjQ҈*dA'u#"5bHH R2s'gwJܛq]b<&PԈ@; k4.]w.Z圴&%X 07L!I @bݍGc~t(1x{ 1؅HBI5Jk*Pv[+h%& 5W)iumY3p*)i4Ss墂vnP2fThP*h֍ ^̍H xU@DE|GqTnmy֛AJM3R8~<$@ z%f6MZE}}H_y3N.r'6BJ--z D*k2 L$@$Tnii֓HH PIG\Q`  GKoݺU4i"9rPg|eʕFGU_Nqㆱ/+'OTy^r%YIFdԨQ'R38t萊ΐ!+VL͛U$@.!@%dC/\P6l(5jԐm۶Ɂԧ [j% ,2TOi֬Ԯ][.]$'Nnݺɞ={J$K$@qݻwW_CJTRRpaydc\L"E3JΝ޽{rae߹s8W[f*#FUJ댓Ej`PfMv:B $[lRdI6mqɚ5klٲ˱u‡l9sݻK̙ej/_|s_~/_>ɕ+ԯ_Ptzwnݺ裏E{RNɒ%(QBƏYߪ]]VY试_~EyI*qt]rHBAJZDp3E(]pA'O.st~Xٳg(X@zR6jժ Pu% iŊ:uj0`R>˗={*deɶkNRLիW]CYPM.;wNY#G 9jr¥AE텄$@a&z%pƍ-õ W3U߄˗v#L4iRN(gRH۰Dam__z19}r]ﰢϜ9# З%\V`UpcϚ5K `E-ZT̙cd W2(:N}6꥓[g%S/aǼ߼AEm&u pp UTQǍB(qK :leG ]鍹kX?Q\9:uʼk=YdjU-^,`p#:u20pu3/TP,y k޼ycaaU<^L@Npuc*oPQ[1 PpVA>cƌQQpoJ`|V 8E6{l5|p? eoØg(MJ޽O_oiUyx #HxsH&;1jw 6l_%!m ס_CZh3soLKUtB,Mut/kGN#%(@8V|c" PH̝7Ս+sou &cM=PԈ@t; k4.]w.Z圴&%X 07L I @bݍGc~T(1x{ 1ȅHBI5Jk*Pv[+h%& 5W)iumY3p*)i4Ss墂vnP2fThP*h֍ ^̍H xU@DE|GqTnmy֛AJM3R8~<$@ z%f6MZE}}H>$"B !mkׯ>'111qv+snQzB87tfleД\~i^XhJ`$Wlu@GUtBK"KC(1tB"EƂ".(UAzQBHJ$ I织yeݖ.n='yMoysg3km?3ÝXέucFpiXHtqF`ܺ\7F`FK7`F`!έucFpiXHtqF`ܺ\7F`FK7`F`!έucFpiXHtqF`ܺ\7F`FK7`F`!έucFpiXHtqF`ܺ\7F`F)C,ʔ)#2g,f*Ebbb6l(˱ަMzuVtxطoغu#X3O#9,Y%J>H:uHNre5jDݸqcUطL{ԩSG냪/2xH>}z3gNQ~}1w$5ےDUv,vaΙj ?aÆ )% it;wHtR.-ڵkE&Mɓs&ݽ,ŋ j,>|Hϟqyˌ8#IY߾}{1cFlowܸqZD?nݚtR'|<;v 녇׫W5f:ӻFHC :-[D5\`Aj*R}O:dGΖ-ț7) J~7|#={F]ꩽt֬Y"{$a9s&iٜƘ4~嗴_dx7*j``hժ'́U씈L; 6N#"""DBBuܹsn9l;mBnW=z$kQX1R?AUګW/qΝ$,`xh׮ي2e$~m;>a;m̙3*pmDRtQ+ u%J/Kh-[O///Dw1?}]ɨ!+B j <yf$\mv1;|+W 1S[hX}"/Țo+ݻSWlܸ"o۷qlҤI|`|#k^tItЁ4OOO'OrժUK%cfjgNz4BZ Ay po.Bg}Fϰ%|Xz}qTw6mT9r~ᇢSNt/w׏#ӧOL%S3 +l_ _2f(~7ѹsg%KzիWɄ~&lX-)v효ZEѢEEϞ=EҥŔ)SDZ=Y?]h{GW@Գ5.&Y+VpXO ܁L*yYKMMf0q }7&/r޼yDaÛgsjgTaFYQܹ4qqqbDbbb(_֭9**wNσ J722CBB-ZPX.]0ܬZ{쩅GDDPԩS0{[{+| &l&ӏ103f4ܹ3[b iҤLo…N/ķA211Q{gXXڵK kڴ)7N ĉ)_ ON26]r +㥷wIpP< "S߮WFGGr#44T 3 4ժURߝ={6QOOO2 d^bTU:Q cA] qPcG:J+!%^J .,V4X]-ZV8ujp&G@y>~ V_ժUKRxAP7l/X=RJpZ+WnAzjZ@ݭ=zF`ͤǥ <逖mR;U{ûEЖ *Z@8vxxtt?VX+[;VFn8Bu" Ek)53kx^>&]ͱW"8`o=h6$쭆vthEEQtU,q&G :::Ihт߿/?N-Ɩ>T {OApv& x`oog@U۬OP! B-G[o-oڴi3Gׯk öID_?Ǣ$W\}5aRJ>;+pUP;Si8M@H={V`)n, ؔh29fz2ՉM9G@mR }jTjk15R+}Su={$kTe؅&Yw&I!OT8| `/F:K47f[H\a,d1n}όÜFH1c׮]/ڜF'_ĀDbf~}Ϟ=lZt0`b /k@*944ԥ*T }8Fѫ/(#Ћ[GMeJM4`Xΐ0Lp)Ҋ<n+né, Lk&}X#Lg,ÜҰ Ԝ輘Ŗ5Cښ5khlj[XvNlXB!1pã>ĠG-lvXA-FG8{LH`xj )l+)4kKI0|p}ȋ/ʽ{ *.]{bcce…eS̙3eLdҥ)>**xh}Y^^=sɌ3ʼyH`Qzuxvg7RX1ٴiS L0OnJz`Rٳ/^VT)n:[xqzjLgXhkˁAٳg ϟ!xb-~ٲe2C %22R̙3<ׯ8KW/[n$-WaʕZG%VJȏz޾}[_`7o Cs,_?eU u:t3|pYfMz^d Oj KjxYF%()\;j ^ޘm…S>|Pˆ2M’[.0a_`R@:uD\\8vرcC_x!vIjnpׯ/ҥKgtf"Udܹ5XNj˗/9sRRAztm_*UDfDnD TPs gfWC*Ylj*{…56P}@P1(P@x{{kڵ{~͕y ^F 1{y'OѫW/1m4qQe\r}lٲeSTvmaHŋkϖx"C3k@nU~u5۠AO֬YUr8(Ra\HBZoF `޺u+ &M5w\ aUnm9ּqF}v/X{ 0! {۷H!Ə/#,-]ӧO=c!"KUbU69sakƌ2sZ9`F=;uD_L*r;X}}}a4G\;"^߷OpC;Pr˅<8F?F't1+A!3]@@yia +Wf͚̙3'y[+5«L~i'ɓ'@ yZFWw5q%yfYlYrT ě V'IDATnJH?<5jDB@]FK.I`*Ug*^H BBB+ 1q&K cBOcu7'shطvx{{KxdK UKnN0#Pzdv苜9p Jտ2]V K7lN-fF 9|p 8Yf8qh۶_p FH[,BCCE|Է]ZLgu gF`Vw;QcpQF`=RA}IENDB`docker-1.10.3/docs/extend/images/authz_connection_hijack.png000066400000000000000000001135741267010174400241350ustar00rootroot00000000000000PNG  IHDRN'ng IDATxƿC^PDz&(•+xEDAKG"TAADlp- ;HMfl{l7|3s\. $g &    P(x# (d$@$@$@$@{HHH$@'(&#    '$@$@$@$' '?A1 P8    ? P8 HHHH‰ IOPLF$@$@$@NHHHHON~b2   p=883geRHɓ|(VRE%C F{6ڰcǍp +Vȶm_p祗^2ݻw,[tbs$6A۶mΜ9x= SHq0 8t<ҴiS9qӫ @H TVM:wd˖-Y8K.իW[V/[~afϩq$@ '7H o^E&H?o/ o͈)"7|Y ( RٳKZdnб!iҤ[^y)X`|ѣG(gΜ.]:W]`q[IBs)˕+O^gΝUdbQ_.+WNҦM+hbe|*+G*=x 2D\<6ƥ͛7W8 oݺ pWXQ/^v{1N~P?*dٳgdʔIF_nذt$ . 8@~\"^g϶]ܹs7nJ*qCϚ5H۳gO.E2eʨmkW^m?~@N>+G8+FڟYvy;h}Ν;*^Qnc \UTQ3glmڵk5jG7|pKyi˗/:}>6ѣ… [֧`#Gu񷝍5OCAIhdžf-rJ*oܹq_e3/cdSOhѢX\\k޼yFAN#@D,O?m̾3~Νxx y:w\5#jԨQF:+/۷ڵkO͍7dF>FGQYf,YD [oɟCڵkF}mӴuV ^O?T;ӧOiӦI&IժUձ_~E}Ymo54hl޼YF)qyawE|wK_o˺uD{X8q| /INjlg_ƞue˖_SÆ vy&> 8ӔCf|x{7{~WM 4Wlٲ\7?8~]Wڴi7_x`wq{:d/7`xPNٳgWtYvr;7o^# |/_WO>.VT)>ƍ޽[ 1.$S\ɒ%SiӦzѣ:onS 3fmOIj6n0혙 28s->{g*fS hNѰL G썧ۭ˅2e^ǎEeƍ+TdXٽk7 τx /\kԨ!ц.\(CO̻}gϞUEexbŊ(z?~\U\Yyt&M^P&<5k80}d,۷:spмoX}駟ݻ*9fm۩=l ^7}ãP3ȽYx9UTIoKɱ]Ê@Vcǎ`9\R Q`F[ʔ)/ $Kf=29fgN{ese( yꨇ<ϙ12Za؞f>v9MJ~mf hPo3#_j69@Z`z@%0IFN%`$qjmY/BM 6T ̒CEUWyNY̙ϟ7ӧfP}'wF9V 4⏙/:[X.myq+qMJ1cF4k,vj6ު@>Sa' رc}UI (Š/>ӡ7o^ |1mNmp  '(XEofĿ7N 0 N:`u]ch*<\-t]vUIѦˇ~(AآE Pt>,!F" !>=jb!:uK!NpݯahTRj;)`֟k-*+YUۆ u!U @ډ c"ҥK8jX4z2T 'C4p"88'Vu"NCV 3p|0O@=l1 r|xI`oynZX _wd#X*gϖ?\Kwo{Yp z) "rJi֬t a ' x#]jӆWֻ|' ' ((QB{9{ᇍ{U߳VC QyYH2eJɞ=ԩSG͛VsA s"fVA$"y5+7|S*.K '|oپ}l޼Y/_.K.)kJFdʔ)@X%Pxq)P?˗/$̝;W.\(M4u֩/9rĸGWUŊm~rMI:[58NܹsGΞ=+_z۷Ov wH Dwޑ{O%~WH||AYr+ӧ%W\^yz/-/Zsi&駟Í7]vw؂㭅`x3g&H0_ƽjٲMJݻ|oFyS I*C١P='B&NI<6lUQ(XZJ M6-K,'O"~իWՃ%:',YfU a ~', }')S& _hBnݺtptq92D6k{ǤD#}c=7Fr<*!~wz! B/C 1Job6o2okM$"GꫯrDߣ }i 1o3tL"i@ /[L nݪx'ȞyIGLaWL*3f2:ļ8}ȬY^\ ǜ$%duZ ׫L fpoc Ak֬Q۴n / 2tl <52&I@(!Y0| %v ,Ni^H0}/MQ^0|yb-~o ĬEh8k=v \(%8bJtDWkēy B<[Z{c_Z5N| i#l=/Ɨl|n0SQ&N#M *mf Vy\.ׯ͛Wr)u֕;v,Zh!Ov)Ѩ5ϙ3G+&E5>pO^^uU{2p@A]`k  VEZ(ڵk\r,p|Q/oދ/oC_vyb?V^zKJe'ͳi6A4OQBa|iV)SٳgaDO ǡ݈'ðpslcݻ[) 4mn? "l"|$@$kkT"xx QhB `Fe˖/ aV_Fw0%B6m'Ȩ+0 S%ttw]&ē6x~MAZ K2%СC:;)tΝ+SL mF,4+; F|b0iƍ+*asfûg%TM$@N){ئ}"[2vX1OcƌQMB۷մ׫FY(AΝ;9{'># S1Ye,`V=ܣDD޽ ^>@2gάbxG 5 s X &L @>3Y.B<[œ#5q.k-֍Rx8ǺA `gy`&`K 4}-WV裏*ǚ{ 0 J?l<3=x?"HJ8%%]T ~ qX[y~' <.hRfdŁ'+*L,?2 DoıD$@&q2& h G?]T$@$("X{9hׅ? $3 0w'H"@Iź $ #K,Qe˖MߡCA;|uVYdΜYJ,)Ǐ%v4  0 Xjɓ{Rpay>Y9s+oQ{Yz,X@.^(ח:uꨘ(A… rپ}ܹSKAoesIÆ M6j{޼yҳgO駟ȁPC;vȆ dѢEy:tHճm۶>H<0VOv @,pޏCPk&CxL2Eyf`Ȑ!/Oٲe?4V0`LR*T =zxlYȑCU&{է,?3Ⳑ'▶l٢` /MeHL)@@\`'TJ<,7ny Yf9s$x0fg7V^ x. Ž=bƌcw: CM8Qy(kIt4iҨ t8kTE4`H P8' X< ^~I6L4Iy0̢֥K5j>duɊ+$wRre] <220:Q~Zy*U$<*3B,f @Đ\&MW^eS,mcv!4 #awu 6l I1-lDg1^Xj)|YrR) 3 5 HH#l; ps$@A! | D hELnݺ!F$@pS H KHH ZP8EKO$0Z4]za5D# ;NvtxH A"M4Qu4 #@dGH& /&`^@$S#1l, $-HM.\,~ z "U4]xQڴi# 6jժ^&$MNl5 ~")uo/_^J,~8P @TP]Tv+E%W\\\h+eSf˗Oԩ#ϗ5jؤ)   @ o^-[fq'./#p9;XL իWKt"5 IOPLF$`O^OxH P8E~$O VH P8%XfKJ)V{& @V@H P<7 #! fQ$K(bVNl) O!/ @pJ"̖H 6l@$@$(p6' D hEHHH $(B D hEHHH $(B D hEHHH $(B D hEHHH $(B D hEHHH $(B D hEHHH $(B @t钜:u*i2g$@P8C$@ %KYdI4h }UǛ5k&qqq"E$M4nקMVjԨ!WWv$^ڵ+:@Tpnd#H 6̘1C>^ݻw:u۶m_5>ȸ©UVrq$bϞ=1ueEI P8EC/ $#g. P 2Hԩm e[r_~7o^ə3ԭ[WvءiѢ>}ZڵkDۻ]ʱo8 D hE@PkݺuI: & x ^3b~ۅ" /4H =zT7o.sIΝeVYx~0vƏ0t.`jr1A0;w!; ϟ_Vŋw7`x fgZ?V^zKwߕ:J𶮃]9vk>zCG"SK$h*QD@@d ʖ-6 ?74rIduߺuK "/m'Q 6իWԩ˕+!vu+Ǯ~/_Vk>x8+ c 4nX;;v@A!i̘1*̙3;tmYl_^b CfRs̑;wsvynڴI{Jtp!հ Cam#0$@$@ nQDT&@~TR%"g^J3" x yꩧ͛bd$p~b-IHH "$O\6m)"nCVHH"… UMw!_mVΜ9#-Çˌ3qƲn:Ǯ]ҥKa9QF>` '@Y 4uF ;v.ڵSmٳL2EdgK,2eҧLR*VJ45iD֯_O_~tE#ϟkJ5$k֬v @P84!X$mx._, 9rHi޼y]|I)Z[UjUI<@Tʕ믿իWKӦMݮ @P8=K&&EիWB;w|J@տdP%ٿɓG{'eժUj^ZGQ"?pr/ g$Q4 r%9 0^=z_~EnoֲeKٻwcu-ٹs=Z:u.pf+WNҧO/>|ᇒ?~)RlyH (B ő@Vф~C,< ޤzرc=O,!^ 0ԫW/B"\ei=* h$@!9UaMH"@4&?쵋0 K J(ď>1yf6mZrq,uMÜFøAN@VB ES8$@%@^,fa=$Qѝl ) \n4E gΜl|y pg$Eڷo/˖-8鸟(j6 &.\H% Ñ@L2,Ԙ.]Ɠ D? cBBN:O!!BHI)Y6 D(P6H  xHxH 1(Cג 8{#$@* E/LLD$(@1= @$p^bI B hgIH  xHxH  6l, Sг`O4 NЋl @HP83 !  NЋl @HP83 %.]SNRV NQ͚58W"EVn޼Y7n,ٳg3ʃ>(V2c#M4nMVjԨ~-a9rDjYe׮]*`器 b  p f4c 9|zu]0Io۶MUnR~}}˖-{nСnZ-Zր>ȸ"©UVrqtѴ1 FӧOTΞ=k\< A$@$VNa.PzeȐARNmɓGn޼)/ 2Dx )S.\X^z%6lhqkdίdɒׯ+Ә߿;%*V@xiۺuԬYS2g,kroKr$w;ȸqB +W.y뭷T6ݷo4h #GTefɒE.UVKΝ֭[yu],Yϖ- ?PעE 9}kN f=c^ |' ?B`ƍl߾]Μ9#O?tj+@L0A /_|Jm;wN6l(Z~GyGɑ#rJ '|RҥK'\{]uÈXݺu;w!sLyz-)S4y wgϞ\Ŋo߾*/s=6UޱxҥK+/uR $q2 P޽{Rٵ)_|g%.^Dc1a^n o4  zqaҨQDKͫ<;텇04u 'E@(ĉjX|W'OEO}x_e͚UmB0ܧ eHZ} 14h6&mɓ'w+ڄfϞP?x._ ~zB|Վ #=8%ϵhzjseUT9s"xBDRxq[ф[e±S *̅߿gnk%mm}ݮMsΕ)SȦMpʕnشpGB>x)!x @4pJ@oFhBSH!cƌ_]F!48{O % S'N(oUk*e˖jl*KGN:%(YT6Pn f&K6 'Q#| a='nS<% 'Nb$&D4#6Ä@oL1Ka6ڵkeJL J*)O A`I&ҫW/JhyCL֤I=q`mצgyF*Wf"x5wayX.]O>2j(b3,* @d$@M  ~1U)0\a532Dׯ_Xi2I$@IJ?_u$MpA8Nt&ý   p#QݻnNtߣaAh"  DYmSL;&˂=ڼyڮ^zx+53]?tV&)Wjժx8p@{'Ḱ#G|U3{/Ol߾]e* *?Zl)J ZޑQ)ē… %G֔7l5jdHΝ;W P!WٳgWKPZ E? #KHJd̘1:9IH bDŋWEK/$YdO>D+uշf͚I%W\jѣG{r߾}Ҹqcɖ-zkNl޼yRlY5kVi۶h.\X+P_RkU~A~a%@]!X|Aʔ)ܺuK]$< ֳgOw߹1R\rE:}pn:SK.v4mT%<ᅪvߵl2W9\ɓ'w=z4^W^u-ZT1C^xA]ߢE ޽{]ɒ%sUP7n΍=ڵb ݹsg'|j֬+]t+WN<ʒ% mРJxx3tPu݆ \gΜqũ#F|)*\˓y=5kV'Nxm};SL)SN:eۖϻr̩8pwF/UOgJ"kϞ=*'pݺu˳ .O6Z\9U;^~eu/:k׮ӦM޽ v55#k+:ɟ?m]ٲeSy mԨW '>}z%mF3_|۷ojuEyf%E׽G9HN`Mz!oڠ4@B˛|駪:u2k4`b p*C:N nxp$&p P {W2`-˗Kհ{&M_T:c M:̙3.Dof, ΝhE 6= ?8 ?!dȑj8 CJ ,Pi1+C}Çˀd/^\l٢Ȑ맠y ??v)SFJ.X~Yzgo+WlB/kC`a}0?~\30M_2;ԩSj6mEؾcVbE#߯uۍ _ŋMgnݺI߾}ՐYWs" 0pB޽{U3bhxVZi8g (E*_4&1 ӂ_5#_W%#FP2L:UAވ30LkC5(Y|TX^OA&V;wO?T֭+CX!^8*+Ck^̮-nmvbCcĂ{y7!uVI6 23kmz^6ā! ak-p]7O$@!SڏN3fx. 1 ʫY F=! Ҽ+NK`ڵ^zJ$BbŊxm<rL뭷̢mO ʕjժ)qj+3LDo^M8x7T /`u9 ?c:^.; M6\2e: fC3E+!PI]_+顇җ%@Z+C8?`e.#ZRB r!AիWW dɒԩS#8)[W_~#.J@y.wͫ qVu}Gy5j+s*VVm0k |PqYµng}օ3s=իWdx{'pb焾5A v7efpdP4 @Rϲ(ڿAF$@$@$(g`1) D!ۉeE3XXUEO7J^ݻw:u۶m_u?#]v)ԪU+9~Nw ߨH g. P 2Hԩɑ#tM2e$'OVի'YfRJUz%yJΜ9nݺcu>}zyץVZrn*5k̙֔3 4~xu7|pZϟ_:w,nR罕<zm쁉6xF28NN){'%о}{54Zn]`„ [uYɘ1߿_ ׯ]NRË RC}_|,^X 'NHJ^3z#6mxȖ/_.SNC<۶m+gΜE)… zz IDATeΝ_rʻxzJFb4slӦQƍmn@$p^cI `[֭+rgΜ)Zy3 eI* 0@RL)*T=z(N<>^v!yJ߾}%{2i$ٰaԯ__r!H?a1 Ci&exe˖MJ*%0tf͚%ʕS(2d*TH'7[r?]S%K,K/I bŊkyȗ3gN9rԬYS .,>\~] TGHO]pvRԓHH0&N(«\\xQN>-08 ԭ[WF%WVvI̙֭[+#cڵeiڴTREqcetRٽ{iFKiӦy)Y0<VX! f]{ڵ\&^|Y֭ .Hz!ܹsr)ټyVjU?Ν;`=Ο?M۶mUӹ׌!kD6=bHQӟ=2d`d̘Ȓ%C sQe۷O=ݫnjcT^ݫEƎkԮ]ۘ9sQL~8s1k,Xb^q#F0:t`]Ȝ9kܹӸy'U'&~MӞ"}]AUu>|ЩS'7ߴZgvMs9곞Suεi M0Նm۶)ǏKnݼ-ZT@J,Ç a5}u!9z:ƨbccf̘!˖-ի)ٳg+1VqrZĖ Yq(DoNӤI#nRS~h. $@$@&"LYC(VZ+) ,&LÔ)SB j}'O͛7S:ʏSSd00zꥦ^t?~sQc+|S&a?]`Yߣ8~D9G}zye.]:ٵkm(]tiY`d͚Uw0.X1Ֆ2܍~:={)@!X=HSG*rzH0D]<槱 &&&t$x)#1 ϨH|>%W`[LXM͉!}2DGGT{ u"  p$Nl*E$@$@$DqrbP'  HF%Jf"r)HHH p6gIHHIS"1 @ym $ Dc6   #@)ڜ5&  H$Nl$@$@$@GS9kL$@$@$H3HH9o:G#jV4ڲ Do4Cɩ:4   p>No#jH$@$@$4THHHh89! CprHCP    6$@$@$@!@! A5HHHOۈ 8 '4   p>No#jH$@$@$4THHHh89! CprHCP    6$@$3HTTlڴKb>J %@)l +W̙3O>;烙,H pJ,H |gRtiyWerѐ*a_ȑCʖ-+&L0jժ2}t% ^x#nݺ2k,9HR cϒI bYUu4iH .,%J$IR Hj4˄9߀Ѥf͚}<YVa!?9HBڵ1 8@ZBMH&O?0@(S2~uCׯק^2eK.yeȐA4H p)꓀Shr gA$h8.N aX=W4'+A$`ASupE$`K!@˗OMt'"@)ȳ\pݻ… -Gߏͪ$X9w\2HHH)$ 5fΜ99$@'@m@hԨ!BHR ԤϲIeh}Ze&/JڵetRٽ{iFKiӦy)ߊ+˗/+mݺurW2Zn-͛7QFիUvI̙s99ul޼YKժUմfܹwy%ӟm۶[O^$ 4V00¡G0(.UҢE x s;Nv*iҤQ.]$ ӧ FL bi>}zyWT*UH~dʔ)ˉ'TerރC u3g@+c ժUS~] #F0>ٷo2W_ԓB$@$h8%`4axw*Œ[vLȄ!V.hѢСCrQ5}n޼07c 5+oC%c:Fttqel,YR_V8?|֭[j/PyVzj9$ _4vh4ChTL F|nS'OOpT\FM^ԔbZ?AVX1Yj>1-̠*JOxB$@$3² )4g)8?~r?a4n8F0$ag+W( U{Ν;5QXꪘ2"MS5t0'`GNF)Z<>-kճ>F Fx+)DLF$@$@$h8ЀXe:IHHHBbϟ`AOgK!<)HFS g Z8)LJ'ŕcHH|;~?K,Z/ ۷QF4Bdd$@$@$V4в4IIHHh8%Qi<% pJDcxJ4f!  FHpF$@$@aLSOOI$@$@$@aDS S\\\0; @8dC+xb   p?Noc֐HHH&4lI1$@$@$@'@m DM )HHHh8YC   p $Ő '1kH$@$@$`N6   H*FGG'$+nܸQ^*XwW4 +x$;*)EC>e,w!!$ &$@$@$@$H<,X n!83Fv-K*m ֬Y#ҪU+U![#M]! g5jTHEd8=! sc";wN N$dGL؟L0xHtwdP)   'VN$@$@$@$@ɑBHHHH[: 8 'G6 "  p"NNlD$@$@$H4,THHHB9nţIy&M48hBK?^p$쏉(>ⳄfrP=9(MIڢE Umޤ¼ '6MxN9sC+8Ugc    @HHHHLIHHH ' 8"   3Nf<&    4,0HHHh8iHHH,p(   0dc    @HHHHLIHHH ' 8"   3Nf<&    4,0HHHh8iHHH,pڲe<R|yɜ9ȑCׯ/ӦM[nyhժDEEI||ޥKu~qtqi&YfMrLxU?EwqdɒEʔ)#?e׮]."кuk/ڴi(Iԩ*ԩSAիWWu9b%_GtI}z}z*sh*W<^8|7j4uŊ>}zS2s"%IJFzK'=h"6L7d4 #;J× \׿TV-o1Az|9R0}v֭[UG}Tzꥎ6 .G/]{pŔ5F_4i3Άadg̘Q&L {L7lP:qaW-Rzuydɒү_?뮻dʔ)r jʽfN mBx'L-%6}/8ï FXL'b*l0d ~"vء.\Ol.~'j47}IfG׌T"/˗CHƵk$e˖*ߕ+WTΝ;cǎ}aÆy]hޭ[7;k=/^s}̙ڇ~fׁl+Ѷ!=z?ƍٻwo/T7onDEE?WstkGoڴ۸qc֭[2 kڵS}]5|z&M<ry2%[v `Fl }vu#F鍓'OzӑgΜQX@W֭k\pA'7&Nds`С*M5xh#X5kYf53o|u!ةS':կ__]Ul>'4=Bp' DdVW8mXjW L/^\0ax[ƛy9ZakL1@J+Ο?3FjԨF0UؾQ&sZ:uLPbEuQ(L,YDcz)5rzj5d40Zu9ĨݺvSF #:`$wPc y5Vt8qH0&L!"?^sX8ZL:a!*Om}=0 gXdGƕ+WM)7߀;9pp:(QBO $ .tر:믿V<{V>A3/9XbJ_>lf2#g$k09'vrK, 4iRDG=x1mC/Ap_:r7o^A_FE71iXhӷ.1ruWNpw^~!0:߉`>Qz7&ۊ99$"_#z^fz; iD@/-FXU'Z{4` /\P_a/>)״+C'}>m#F"߸q,Ѳ7kR菊;p#~m2p5Jx y-[T-m֬YiIN#Fh Scx}Ѥ)ʕ+՟p~S)#9~922q74:գ:q񉽥8޴iSpV2B6R//T(^P00>|p;vZ\߀/ei  ~ƛ2 2L6pxӃX'$:!yBj%]Luhnz 9\}\Ju_bD:2N@O>@#X8w\yw1GG`` To{ /6 I0SbP눗mqƍU F-"+1Uề=i~'ŲU,K oX}P|GE7l3.BPGV>?"0{^Recwg`sOk2c8ӱz!/ 싅M#-OO?ԓ}[BR =XN(>!RGl% V*{ $ b tNa`0v0b71fr`@̟ P{,LxP᭤}j߰9cFJe: V;ŨvCGbŏ/1uo9bA6q/>xPPk)Te˖se @ݱ)%ӱ0 ~ A`ߙzKJkO[BBb0~` ynإ?8@vJi`0B)]T)lSej5gLLe:F:g Q Gʑ 8 ''u!  p4Nn*G$@$@$$4ԅHHHh89y prRkP   Gr$@$@$@N"@IA]HHHMʑ 8 ''u!  p4Nn*G$@$@$$4ԅHHHh89y M-ek $)bcc I ::Vlq02 H f'   hfVHHH4H$@$@$@ASD43+I$@$@$`NvP    @)"$   ';(R @DJ A)HH E:uJSp.ߖ_Uٳ۞={¹JН#J @Ș1 :T͛|DTɕj @4bĈT p : @s?۩S'^|RlY;BG|Pd"ŊիWD~*.k֬ҫW/x'~ҤIRdIr/_ &{ҰaCɔ) ]Mw,]tܹsAI||ʚU@$@$N6:v*w-#wFڴiϫkO=Ov|UѹsgcǎƆ {4hyIQD c͚5Ʒ~k)Sׯ߾}%Kcٲeof :Ȗ-qEo%ʕ+FbŌ޽{6fΜd!‘#GTِѣJW/U7n0*Udlضmj*#o޼Ɣ)STl{Ϟ=ݻw~>}zngΜQ#… <ƯjlܸѨ\crŋ=z0ve|GFLK;vLB=6m *S׽aÆ#cƌF{WE".J2ڵkd)5"i8H\F\r7oTcʕ3/_jZticɒ%!mg:_1җ/_.]:ի i/Ȑ!Eٳ{Qȳb L6,ӧf8_}G1cիWWȏz8qg|2P,\6ail8?~x뭷k׮yʉ5>u矫>s%O(P 0@N*۶mS\6o,+VT)W*T Mڵkʕ+,]O>gJSQןV܆ ȑC'}DY+%HH}0afe,mV=4̙ZHәiǍ7_mή-*.\PxqFYn,ZHfΜ)'NN[ni3]tsl S`f `/ F 8:Ȗ-[/O_ZGT_v,Wή>ͷ ]O:9\S' @˖-%..NNpߦMdҪU+ONA(.(xeʔQplRz_~矕Gkʸq㔃رc0ђ`1ʂ(O[Ceܹsk!|.'F9"O$6ɮTf=E #/iF>ڠFϮ\*i_U;Q+f0ӤI=~<  p%{*Ga8P@#**J9kvS1stE>|Vjԯ_غu?UT1:uh͛+SN5k4^y~z'n$_N &5jPeGj޼y^ሇSz.]l׺A[>X:|;(?..@;M:5Z׮] pOo4{l嘽n:=}t#s=zKG|啞9|߾}F0^n]#&&#Ǹ`Վ7-+nׯ_Wz)Qg!z!:# wU5`gMq2BA}hɓ'Z妝{ e^Γ'OVGLxHN6ͣl$:tZgs8/c6>zjB }x+Z@c^֭aSHN:XF`'jժZwUh[nƗ_~ԫ-y$a"3Ww+ Xo߲~\o`5!V!$F(d ihHHH `+H:pݴiSO_u5l2ϵH=Sf&  $GYW˙IENDB`docker-1.10.3/docs/extend/images/authz_deny.png000066400000000000000000000647331267010174400214260ustar00rootroot00000000000000PNG  IHDRw? IDATx a9dA%H$*D^E("Q"tA" As}[[ vgv'yvrOWթ4J)%   M -9 THHH@ HHHF$@$@$@>@$@$@6T6<%  *F  b) P1  OIHH}HHlm0xJ$@$@T$@$@$`#@hZ>IFu]A/7|#k֬qI_ܲe_J\KV7nL,(GR+{}ifYfMj"0Ύ1ҥK_Zغu[κNɾ}ycɑ#GTD%n>Rm)#3t!_ٳg]&O,իWwqKŜ9sdI8_Νu>si~8d9BZI&ؿ²Y|'2en\rIbb̚5KF)2e m橘z-RCC+4\C*RCMK~Ahܸ~y PT#DDM:U/uFn)WΝ-?GگJ*ڽYfVX,:?K=pSR%htٳlٲYtr3eZpf/a ˖-S2dp ?eORTu˷`êhѢ+K,*o޼FZpau ?ӧUtt9(i„ E8?npWuۚi:x[~x/]iK1T)5kZvYo]Q7rH+- oN>9s?޾}1cr՚S 88#yN<͛Oy(312,b[~NkѻwoGXaڌws^vtU{MVf̘!e˖/_^ڷo/UV/B#w^V@O[ߊ+ZYb73gtZju\kn~ 6 ⃵*󗐐SNVm_U+xrwj+WJFk8bÂ?#gn裏/#FkwR<ÓUj:'|UmyjB0Ɠ?[0L*ɭ|ӽg7`щ(~AǸSFh@m۶M0*Tiy̝;W`&>7|SO.[^ypJ;NjB#;&d:IzLQ2oǞF#xȍ7Č[ G_J$Vrոˁ,'OtX+PxPCv^s (J(EXa kU0-YN. ]$vrKe9,X& %[le~F\oͨqΝ2o<:XP~Q>Z7mڤ%K }g_{ݏ? ahx(띜o?OnrË1$X'jFѣ6Iw1d=b0Ũ\r:ofؕ=/P*UJv-*4iTssb/qs:bQ!mge/~}ٳgO:tFQ=8$Va<6<4;GXxHCCv%URL kʕ֏|uB_}~ܱc,^XO?2/?M6~I/[v-XB/% hGl+`;оeSل=Bn( A)-u[F%x'+R<1 A*/[?$@۷oɵmRց}N?\#ij?m0|~P*F!%kg?b6l0=ESԱk Zƺ,^{=VE:IL5lP灗1?Wҗ\xQe8q)SF04/67;C<҃vľF#<򈄲M)W_3fÇ޾9 1²ٳ|Μ9V8C:~K?s)?/cDxX0 ?BѾ ~ƲG#l߾-tVN2I<<؏غaŋޮp8ǽc$Te *~:ڿLyL\cLW"?;ތQ ,1}&4hi7ix9k?=x` ($zF)!bz (0Q &XӪϟ*U_-_\+_mҥb #D 3Fyz \!1 q~ v=&MH…u:BC=Y1J45:zK`܃/%._r U[SZX 7V+z &Em֎5kX AKkv,9 5as9dOSI j pMˊ@ ƾ)ըQCVZX,91& 1_D$.=X烂׍۞X@1[# _$@$@N1[# c@HH P1F{ ~$@$@b  D;*hoa֏HH Tb`  h'@- D1 \ L$@$Y?  $5+uJh 2kODxjc_`E$J?RT&'~&C>R}}!{#>-HKbd0Yv<|'Ljցy{' FKIh&ܺ\ctfC  $@* 8btfC  $@* 8btfC  $@* 8btfC  $@* 8btfC  $@* 8btfC  $@* 8btfC  $@* 8btfC  $_{Nʗ//3g9rHݺueĉrMfz衇+ve޶m[}}pXv,[,IxÆ MVd"eʔg}VmۖbG}G Z!<3-[?~g>uԱ駞iy>}zɓ'<2c |ݟ).AHgUIN#zqO:%֭Y͛'s̑nKݢ/ZH5k&|.c8qLNYJ.-EI=Ο?/~)?G}49Y0n_L/^,ER{{z^Z^*3ft)_ƍr 駟߮]7tË&("]([bEn:ۧ~ /X~M6n.]nNRV7nܰ„dڴi:=y& :TgȑDv횊WiҤQ3gV-a`_0p*at8p>4)Nc=HLL;-OG)W:s_#%KT TաCtgR*8)a3}vyw$tRVJboӪǏÇ9sJӀ_~ꫯŋLS'uN*r咾}{nz'ˊ2-_\^y=ꫯ${~I&Һukv!)H $6cڴiruݹ籪s^oOgΜRT)=ł^xA]Z_%?^Zj C)Yt]7nw}]i{{h=hCt &LǤK7|#0mZa ^x c[lѮE̙3sNѐS샞[R|=?/_>>pՀЦM!]voK۷ZO?moawn;Xq[R_k1&.]h|vjҤ^{_L}/t8;`EFt 4P7o޴ٳv_rּysXn8㏵{Æ -@ފ`_0pUUlٔYsZ:gFþvܹs&3fNGIu{W]|[ӧO7k֬ ed⩎-[AFPuj5jg2 {zX$$' a3bS]0: S~`.]`_ѡR`J)XcwYSI{.h0z`*Զ(.UTїp+S"![`~kT]y=/#|Xb:kڵmhFvf:U(f%`6>7eŒBgϞ}% 1c(#Qa"x!F%G`=?|ݷ0juŒ}_N {l5Þ"{!Xst钾a0%.2cH%̩>Xsι0yɓ'/5LuAX.x\rEFuhLz40m?GXk{0=i<1%0r TP%Y(t@﷔^~%_l7o^Aŋ+6lݾh҄1^XN\R=l#w!ث$xkEkZ4hcpũ;F9K :^޽Rc%-^|,uYz]b3w\|/=Vhf^z0}ȸc럘L/9rK& `.ױ=H QXd_r|w^#:_8p 0+3󦅷ӧ{ KO  ֡h`=t[J5/ ``$e[!TP0 /0r9 L f: 8/=Gl!aNFq\0u4/axq$xY+T'/QXOxeĈڜSb/ FP2uZIu2"f $N: aRz-Q:ļ퇲-ʕ+~ ^;M?iT܃Xs/_ ba 䈯^a=xJ3]5kn^*̬-t;ac|TLu` {~7X0>bF&p0,%/L2Tۡ=lCa4tPAq.09״>ʶmbo|bvOg*bFK'0En돽0h್YfY.{mO?iU [GLb j-(C lF@/` oF]܌N# d0]/U|G~/]`ozLMIyE 0Ypg (΄FxۅQ!} CL,Q`Z 훦C0a={ C,9$@$[ÚRc}ҠAc50Ģ Oj 8+1rĚ#,_Nz'sImh`-,/aK>T-K,aT# TlRǟOT3I 4|srљ }HHbc 6:L$@$Lљ }HHbc 6 IDAT:L$@$Lљ }HHbc 6:L$@$Lљ }HHbc 6:L$@$Lљ }HHbc 6:L$@$Lљ }HHbc 6:L$@$L Y?;կ_?rra@7 ^TwٲeCz衈)󫯾구kE<2dot8jc_>cb?}WdI̙3IŎ'qbRy`DJ 8bt"Cw  $@J 8bt"Cw  $@J 8bt"Cw  $@J 8bt"Cw  $@J 8bt"Cw  $@J 8bt"Cd8w;v,Yi02 *Fs)gϾ%~O>ڽe˖{+QG"2er'jՒ ܒw$8ԬYS6o EeIH ]L8Q߯u&iРlܸQiݺ$&&JK0OHH P1H:tS41j̘1:?(4OQYlde֤I=Zb<(Z7|SN<);w/S.ngϞuLi@q9rDV*}̙̙3gqIׯ_ō$@$.n s90zz]sϹ\]v.]:ԅ @2i$I0uŋ%C o蠕+Wݻ˸qiӦrw҇`8%iϟ?1޽{ѣGE2tPIֿ>HP>L9SHH P1" ˻̒%u0.F$1)D_b޽{СCc!#7nS,RqҥK\/0M fzdԩzT;`]wyG:vhO֭[W^tR*GH${OPX+S@iJWB9})VX2v!U]v73҄ ~IN-\V%K+u.[S H}TA)3a:j(Úȑ#{sΕ/1 L pO.6m~\|6ٹsdΜYOb !0@3:ŋuTNN$Sz) 6yJV05k6ejt)>c4.b.\I-M$y@"ҥKye 4,(V R !Cș3g$>>>r,`Ŋ#)Yjk%F59€%>\Ga\WøqX41T1\]*pnb@Ln'O> ©T NzC/bfmݦ?=S ~($@$bF1}۞T.-Z^ @jcjPg h,S=# c3yP)ljHBO1􌙃T>ћH E P1(nfNJѝIRcj@ OÍϪ@b Ɖ֢_.Zz D*k/qŊ[ߊ D/My5ѣ  G1cDyY=pTހ,> @p P1'S# pTЀΝcǎ4P4C @P1&K/ ~fڵɯc͚5e:={tϟ+y8p@qi{ 4=fDG HETI%6m<3ʉ' <#_~dɒ%)IYqB8OHH LP1&!>s)]?9sȡCvڥGh/_z!6lnZ}2qDܸqRdIɚ5tY]l"7ֿ%Y\90avH3gΜ{IjդH"V<<ڵk'.\q1­Ud˖MJ*%~Uٳgrέر\jԭ[Wr!e˖ѣG ~^[9LyB$@D@ŀǫ~5jP&LihBJΝJDԥK,MC ^:O>N8mۦɣNΜ9 ,~muUi&UH*O>͛*11Q.\X͘1<߯8u#W\jȑ:իW̙3_U!C`PBjʔ)^ܻwNc̘1: 6貌?g9uC}(9b;.GIxKٸqرC^O>D2x`(B z_%C oHrҽ{w:g޼yzsNw V>4nrJ=r,\lݺU6l(gϞǏK2e/_^s@sG&Mdh@ʉIרQC "^3@H $ ;v~/^܊3f?O~iEoee$cƌruٻwŔ7nT@!I.ܼy\z<9rD-wwk릘M6TREWُ=bZx߾}SDD_Z aTqwW_}äx, eag}&_?PA1B@N)S&}n7\(!FƂq> h_SPR Fxc=#_bRN+SxAHHHdR`Ahq߯͛'͚5,˔)UgH p*5Rb eٲeRP!mD3eO.6mrˆSPޤEzsԨQZzj]))K ZPRcn # @bd!́I c2=Z+Wl.<%wTB,G0HM1D^ @)˗O&O @b6C)b ch@DW'+Ta,7F)b  w")$@C1r*KjW/^He*H rP1FN[lI#U)}?({6+NH1-Jmd>&SH"?"9ms%d[WvcI P1Fz Fqa} CHYSGu|Ow`)]tFIJvZ9U8W,[lRbEiժCJ87oF{ ~1-SN&|눱_~0|23: h|s+ JoERHH P1i0lr # $c22zh P9/S'LfìAXrTQR=*hiIփŸcK?v'|B$@&@lL/r1?~e˖&M%J_H$SL.VZ`+H:Yl޼9̲@ P1)'Nnݺ_0k֬_~ c۶m&HO81eeAI TCE=傢3f ͓`T-[6ٽ{@5iDV߁؃JV7ߔ'OJΝ/ٳgӄEPGUJ߾}u9she9sLyg\ [>ʇ||"I֯_/W^E7H6*d#dJ' kN0IN'-F+Wo!ӧʕ+Keܸq2o<;C$ԩ㔤>|4QGQ;dС%l񖏷r>){iӦ7':tK.ے@(\/_ޥxYdqѣ$67h+V>ݻw:tH`,dƍzJӚE1Xtik/!ҬWL:Uj ;HǎI%ܔ[>g26|̵/W\-ZQaxMI%@ŘTrG#2eJWBKP(D(ŋ2uջKҦɞk׮i[e g "uҥrE$;uꤧ^ϯ-2x[Ο?7|ʂQ6rtD$Tj1" $F<^5j@Aaqȑz}ztwu;w$$$L1u;em3}tٴi˵Ν;%sz:Ӟ8 ~d֭2x[u n#U}Ѣ%p$XCA7|# {Wׯ/o^C߬YQ -UKe qapBmM Ooi F<.]/,Ç cO V>OrrEH̢@1#(!C̙3Ab*-f +Vl0)8#XB֭['{0fh~\2If͑}/ cb9|Ɉ6GLQHDD3$:HrrES,J$(08NaC5>^k>`c$P1F^$@8AL> P1D$@@",cb 6aH$@ 0J+z ;*po+cKL$?T @ LH wQ_ iL<P1Ts$q9rXZB1\Z SO=%ǎ?w$$? P1 H‡~e$[26l_]_0XlYw<*G I‘ӯ_?~m)8~Ү];IiHw(D$@$ |)HH–< )ö;`$@$@ ʑHH rb  @J)ۈHH•,ݥI&#رcSNɾbL6B&@$@$ >ʕ떤{nY|9-t@$@$ƏX79s?9\cL=% :TQפ @rP1& sG⃞ R1 Hb@L\>.'jՒ 6Ծf͚y_Tqb( O?T߯bl۶>|tHĉAL-g$@1J O}tN0|饗~P+W#a~7_ʕ{2n8AG#;vСCh{ vbʊϞ7oq:u긥teRqkJ)BŘ̙# @T(.7mڤ1jÔ]+/J,iӣ=L9s޽{СC2d8p@ի'SNE"`?37?{>sSVoybZH"hRtikI!LJYy 5aO$5WLx⒐S]B)B#;S+ *XeB9ݸqC KŋķB1 z]6O~geB9x[N1z]wIڴ7v횞RFp[0`rje4O8bLa̎H 6iFvܩp801b'N>-~\~]`0sҢE =5j ȑ#Aat>B9狸<5Rsε>^062TJ t b\C|NjƈI`LZTjj*% KTa,, @jUjjg$@$]$#hn]֍HH `T#c  h&@ͭ˺ L1`d@$@$uY7  P1HHc4.F$@$0*ƀ1 @4beH"o Z@mH`~&Z K$'G$@$@$`#@hS  bd   *F # P1`HHHHHF$@$@$@>@$@$@6T6<%  *F  b) P1  O#@L J.ʕK7o.۶m슱$@)J1Eq3POe{nYx@Y֭[W꬙> @bd5@}\;VVZ{9lٲ2zhQJ{\  K ( oRreɟ? 0TFꫯJB$_|ҠAٸqfϞYΝ[ر\> IDAT|Y]dTTIhBڷo/os*?55*,W\QK.u 1cF5|7\ 4Hy̙3`VW^U6mREQ~ӧyJLLT V3fP+VP3gVϟl۶Meɒź%SQ1C J1{SN.sR}V<}tUxqxV>H7ҲeKXq]T)gi_]=S&ף>`u@)G`z($@$RS4 6S9HI1=C˗ JV&t 3Ta , D9*(ohc$AO1ۈ%r>@! `b 6QRF9?Ҽ8 @lob#P zHM#`ez)BH @lbfmIH|b$@$@E1ڛ% A z *FΚ5৏N! ^ܮm'L M6u 6-%\H^4O`RZ5ɝ;̙S:v(/_q,Y"*U-Zoʢ=H"WZӥKgɒE}.n;w*W^yE%&&k׺\o*sj̘1իjÆ pj='I[oj+V]7o?CMVٳG9sF,XP:M6"EoV-ZH,YR?~\]~]I&:Mo~?C *!!AuBԔ)SU\\> `TK1B)DP2eR8zycÇWUVua;bu[u ۧ\˷n:+h\O?O/ndGPC Q[nU7nܰ!C PׯWUT9RU^J'5Ҋ[Y\"q~Dap!>䂃5e˖If<G(힘Kbb=*%Kt},7rp;(Ĭkf˖ 7o޽{СCzZx޸qCO֫WON*cƌS wyGOzK>L6M&NR?^OSlŋ7Yc%[Y\"H˜@L+F/^豉Hʔ)8j.#!!%wiĄ5I9)RD*T 7o'&& u.]*#dNI&rG?!~'zTR::u#+p|P/oeq  c1k|K)N8?VQ>M6sNa͵kdӦM2b|F _>,Fv/_G(G̙qP\\>wiɔ)6̙3e͚5gx ٺuQg+t%*1( .]#6#5XWwkҥK:Ys4p\j]ʞ=^Zgd9sNjb H6lgb{jԨ`ܣG-'ff{$blgONcj*՛RLX{) 6h㟅 իo1R%&+ @tX&5tX >\ڶm+GҥKˌ3lre$@$rbF1Ka%+hǗnZjmb}Hb@(:ܹs>w_R§-X51ƈ} ,[bY_   Čbx$@$bJ1c$@$7S Cw`@ 91T1Ya @*FСr0 VhicYY  G1A?D!  $DCy|}lC?%Rƾ>}Hn~'7fGƾ>:JcIHN1H @$bcIHN1H @$bcIHN1H @$bcIHN1H @$bcIHN1H @$bcIHN1H @$bcIHN1H @$bcIHN ,㯿*=/_^2g,9r䐺uĉ͛.z!]|YmV_9r%\(.֮]+˖- E)a474iӦ,YH2egm۶xaxu_xGVOL˖-u>ǏO:uiuZ5§O^#<̘1%_gg/RYbDR`#`bD=uꔬ[N~g7o̙3Gn-uh"i֬|'fZ.]Z-G{?^ORg.>hjX~tr_$gիի1cFb5BM`O'qم ,0haÆ/XB:@Jm)S&=ztE,Y"h޽{wy kx7o>Z|RdI޽q2n80z">B@&LI=oF`!(;f$Vpk Z)'h`^ׯ lٲ)n58u@Ϥ3\83}ڵչsLp5fF=,70{|կOo֬Y qS[l˃tNu5j0Ye8II(O2VQfĈ`F,uhov4]0H/ԣCFvS.ٳ1{]`T&$mQ]T/)l#+VԧEB0-`֎Tb Иr$i`~xFaz.xcJ{a PKPxҥo)5KB{51/C,+`- ^Za䄩s@t0q_8{0BÜFi`- h^IVP!O^t 3aoˈ#9=99BG#_,p1eX-d0DHHuL',1 ZLtye[5+W rw(14Ө.u_|!~A2<ˆ _z #fP5kR%TY[<vSZo.`R 3}=V L| `4=XK<_~e2eߛ.Է~.{rȡj.~ɹ?a)tqqq| {8=}#SNpº*T.\i﷊ܶ0VXaɓ'rh|Whs|oW/=\mo`_00eE޶mیŋuGXs$$$(AD_ɛ7;=~`vt8Gtqޭ[72817uܟ)Ho߮g-#h(kƍݟUN?VBI@$@$@6T6<%  *F  b) P1 Ǐ >\k׮*W8fذaRfM]ӧOknG85DQ’ g2eK޼y=I8sa@  b|؊>~ziذf9sfA9k߶mԫWO˗G.9skwoigmO5m4@߯FFko^fMɓ'uz衣_~]u]iӦjӦM*!!A͛W7N{K۔'T۷oWǏW2dv)]v;wN޽{KYFUTI=:˗/o]uQm۶MM|Xz ֮]3MS+*SL`j ,>v?oTRy:M0ŵah%>I  A+W.u )WZxMҥՂ C8Q[|pUjU}ѣG-?\͜9S+ o\Ν{K١R+#GwyG]zgȐ!z}… ?^9s,_i/\ЊEh 6lNq/T֬Yӧh(CB#b2"ЌhbPj_,`G)ŽLjX)6(2)?+FlY` ܮi;7}kг cTD .9ViSB>7E,^;)wN\Bel^,p!7xxx(~X]]brrR#555U[B'~]c!(3fDX#&4އ6qrrBx?ƁmuF:y7_u,**t yg\Y %QW/FQ|>r eH&uf?OUbS,6Bq_5nWWW r 퉉 "Ќ #8%? H 7OOOjN,FwWTtXj+`hii!'G~___apzbSol6uNv)99ޭh]qݫAYYH~-Dсb(IIIDຟc,G.\Srzz)sB DyxxP244D@t '|H(S9 A|-H1 3qH? Ql6B?+5Tn(ƶis D|DzU222T;{<@L B< Hg(cccQ[Gk-=" LA$œgI va$tD#& 6wEOs:.>#"d2% i`` dzz6?8Dl333 zX +;(V>`NMM}i϶fr\999 H1,z'+$N\]ļ@٨KY[[g2[X >@b9F#u~p&LlV[JD@" WVV(oN/攐/t~T sr@",rCƆx +++ title = "Extend Engine" description = "How to extend Docker Engine with plugins" keywords = ["extend, plugins, docker, documentation, developer"] [menu.main] identifier = "engine_extend" parent = "engine_use" weight = 6 +++ ## Extending Docker Engine Currently, you can extend Docker Engine by adding a plugin. This section contains the following topics: * [Understand Docker plugins](plugins.md) * [Write a volume plugin](plugins_volume.md) * [Write a network plugin](plugins_network.md) * [Write an authorization plugin](authorization.md) * [Docker plugin API](plugin_api.md) docker-1.10.3/docs/extend/plugin_api.md000066400000000000000000000111531267010174400177360ustar00rootroot00000000000000 # Docker Plugin API Docker plugins are out-of-process extensions which add capabilities to the Docker Engine. This page is intended for people who want to develop their own Docker plugin. If you just want to learn about or use Docker plugins, look [here](plugins.md). ## What plugins are A plugin is a process running on the same docker host as the docker daemon, which registers itself by placing a file in one of the plugin directories described in [Plugin discovery](#plugin-discovery). Plugins have human-readable names, which are short, lowercase strings. For example, `flocker` or `weave`. Plugins can run inside or outside containers. Currently running them outside containers is recommended. ## Plugin discovery Docker discovers plugins by looking for them in the plugin directory whenever a user or container tries to use one by name. There are three types of files which can be put in the plugin directory. * `.sock` files are UNIX domain sockets. * `.spec` files are text files containing a URL, such as `unix:///other.sock`. * `.json` files are text files containing a full json specification for the plugin. UNIX domain socket files must be located under `/run/docker/plugins`, whereas spec files can be located either under `/etc/docker/plugins` or `/usr/lib/docker/plugins`. The name of the file (excluding the extension) determines the plugin name. For example, the `flocker` plugin might create a UNIX socket at `/run/docker/plugins/flocker.sock`. You can define each plugin into a separated subdirectory if you want to isolate definitions from each other. For example, you can create the `flocker` socket under `/run/docker/plugins/flocker/flocker.sock` and only mount `/run/docker/plugins/flocker` inside the `flocker` container. Docker always searches for unix sockets in `/run/docker/plugins` first. It checks for spec or json files under `/etc/docker/plugins` and `/usr/lib/docker/plugins` if the socket doesn't exist. The directory scan stops as soon as it finds the first plugin definition with the given name. ### JSON specification This is the JSON format for a plugin: ```json { "Name": "plugin-example", "Addr": "https://example.com/docker/plugin", "TLSConfig": { "InsecureSkipVerify": false, "CAFile": "/usr/shared/docker/certs/example-ca.pem", "CertFile": "/usr/shared/docker/certs/example-cert.pem", "KeyFile": "/usr/shared/docker/certs/example-key.pem", } } ``` The `TLSConfig` field is optional and TLS will only be verified if this configuration is present. ## Plugin lifecycle Plugins should be started before Docker, and stopped after Docker. For example, when packaging a plugin for a platform which supports `systemd`, you might use [`systemd` dependencies]( http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=) to manage startup and shutdown order. When upgrading a plugin, you should first stop the Docker daemon, upgrade the plugin, then start Docker again. ## Plugin activation When a plugin is first referred to -- either by a user referring to it by name (e.g. `docker run --volume-driver=foo`) or a container already configured to use a plugin being started -- Docker looks for the named plugin in the plugin directory and activates it with a handshake. See Handshake API below. Plugins are *not* activated automatically at Docker daemon startup. Rather, they are activated only lazily, or on-demand, when they are needed. ## API design The Plugin API is RPC-style JSON over HTTP, much like webhooks. Requests flow *from* the Docker daemon *to* the plugin. So the plugin needs to implement an HTTP server and bind this to the UNIX socket mentioned in the "plugin discovery" section. All requests are HTTP `POST` requests. The API is versioned via an Accept header, which currently is always set to `application/vnd.docker.plugins.v1+json`. ## Handshake API Plugins are activated via the following "handshake" API call. ### /Plugin.Activate **Request:** empty body **Response:** ``` { "Implements": ["VolumeDriver"] } ``` Responds with a list of Docker subsystems which this plugin implements. After activation, the plugin will then be sent events from this subsystem. ## Plugin retries Attempts to call a method on a plugin are retried with an exponential backoff for up to 30 seconds. This may help when packaging plugins as containers, since it gives plugin containers a chance to start up before failing any user containers which depend on them. docker-1.10.3/docs/extend/plugins.md000066400000000000000000000114211267010174400172660ustar00rootroot00000000000000 # Understand Engine plugins You can extend the capabilities of the Docker Engine by loading third-party plugins. This page explains the types of plugins and provides links to several volume and network plugins for Docker. ## Types of plugins Plugins extend Docker's functionality. They come in specific types. For example, a [volume plugin](plugins_volume.md) might enable Docker volumes to persist across multiple Docker hosts and a [network plugin](plugins_network.md) might provide network plumbing. Currently Docker supports volume and network driver plugins. In the future it will support additional plugin types. ## Installing a plugin Follow the instructions in the plugin's documentation. ## Finding a plugin The following plugins exist: * The [Blockbridge plugin](https://github.com/blockbridge/blockbridge-docker-volume) is a volume plugin that provides access to an extensible set of container-based persistent storage options. It supports single and multi-host Docker environments with features that include tenant isolation, automated provisioning, encryption, secure deletion, snapshots and QoS. * The [Convoy plugin](https://github.com/rancher/convoy) is a volume plugin for a variety of storage back-ends including device mapper and NFS. It's a simple standalone executable written in Go and provides the framework to support vendor-specific extensions such as snapshots, backups and restore. * The [Flocker plugin](https://clusterhq.com/docker-plugin/) is a volume plugin which provides multi-host portable volumes for Docker, enabling you to run databases and other stateful containers and move them around across a cluster of machines. * The [GlusterFS plugin](https://github.com/calavera/docker-volume-glusterfs) is another volume plugin that provides multi-host volumes management for Docker using GlusterFS. * The [IPFS Volume Plugin](http://github.com/vdemeester/docker-volume-ipfs) is an open source volume plugin that allows using an [ipfs](https://ipfs.io/) filesystem as a volume. * The [Keywhiz plugin](https://github.com/calavera/docker-volume-keywhiz) is a plugin that provides credentials and secret management using Keywhiz as a central repository. * The [Netshare plugin](https://github.com/gondor/docker-volume-netshare) is a volume plugin that provides volume management for NFS 3/4, AWS EFS and CIFS file systems. * The [OpenStorage Plugin](https://github.com/libopenstorage/openstorage) is a cluster aware volume plugin that provides volume management for file and block storage solutions. It implements a vendor neutral specification for implementing extensions such as CoS, encryption, and snapshots. It has example drivers based on FUSE, NFS, NBD and EBS to name a few. * The [Quobyte Volume Plugin](https://github.com/quobyte/docker-volume) connects Docker to [Quobyte](http://www.quobyte.com/containers)'s data center file system, a general-purpose scalable and fault-tolerant storage platform. * The [REX-Ray plugin](https://github.com/emccode/rexray) is a volume plugin which is written in Go and provides advanced storage functionality for many platforms including VirtualBox, EC2, Google Compute Engine, OpenStack, and EMC. * The [Contiv Volume Plugin](https://github.com/contiv/volplugin) is an open source volume plugin that provides multi-tenant, persistent, distributed storage with intent based consumption using ceph underneath. * The [Contiv Networking](https://github.com/contiv/netplugin) is an open source libnetwork plugin to provide infrastructure and security policies for a multi-tenant micro services deployment, while providing an integration to physical network for non-container workload. Contiv Networking implements the remote driver and IPAM APIs available in Docker 1.9 onwards. * The [Weave Network Plugin](http://docs.weave.works/weave/latest_release/plugin.html) creates a virtual network that connects your Docker containers - across multiple hosts or clouds and enables automatic discovery of applications. Weave networks are resilient, partition tolerant, secure and work in partially connected networks, and other adverse environments - all configured with delightful simplicity. ## Troubleshooting a plugin If you are having problems with Docker after loading a plugin, ask the authors of the plugin for help. The Docker team may not be able to assist you. ## Writing a plugin If you are interested in writing a plugin for Docker, or seeing how they work under the hood, see the [docker plugins reference](plugin_api.md). docker-1.10.3/docs/extend/plugins_network.md000066400000000000000000000041261267010174400210430ustar00rootroot00000000000000 # Engine network driver plugins Docker Engine network plugins enable Engine deployments to be extended to support a wide range of networking technologies, such as VXLAN, IPVLAN, MACVLAN or something completely different. Network driver plugins are supported via the LibNetwork project. Each plugin is implemented asa "remote driver" for LibNetwork, which shares plugin infrastructure with Engine. Effectively, network driver plugins are activated in the same way as other plugins, and use the same kind of protocol. ## Using network driver plugins The means of installing and running a network driver plugin depend on the particular plugin. So, be sure to install your plugin according to the instructions obtained from the plugin developer. Once running however, network driver plugins are used just like the built-in network drivers: by being mentioned as a driver in network-oriented Docker commands. For example, $ docker network create --driver weave mynet Some network driver plugins are listed in [plugins](plugins.md) The `mynet` network is now owned by `weave`, so subsequent commands referring to that network will be sent to the plugin, $ docker run --net=mynet busybox top ## Write a network plugin Network plugins implement the [Docker plugin API](https://docs.docker.com/extend/plugin_api/) and the network plugin protocol ## Network plugin protocol The network driver protocol, in addition to the plugin activation call, is documented as part of libnetwork: [https://github.com/docker/libnetwork/blob/master/docs/remote.md](https://github.com/docker/libnetwork/blob/master/docs/remote.md). # Related Information To interact with the Docker maintainers and other interested users, see the IRC channel `#docker-network`. - [Docker networks feature overview](../userguide/networking/index.md) - The [LibNetwork](https://github.com/docker/libnetwork) project docker-1.10.3/docs/extend/plugins_volume.md000066400000000000000000000115211267010174400206560ustar00rootroot00000000000000 # Write a volume plugin Docker Engine volume plugins enable Engine deployments to be integrated with external storage systems, such as Amazon EBS, and enable data volumes to persist beyond the lifetime of a single Engine host. See the [plugin documentation](plugins.md) for more information. ## Command-line changes A volume plugin makes use of the `-v`and `--volume-driver` flag on the `docker run` command. The `-v` flag accepts a volume name and the `--volume-driver` flag a driver type, for example: $ docker run -ti -v volumename:/data --volume-driver=flocker busybox sh This command passes the `volumename` through to the volume plugin as a user-given name for the volume. The `volumename` must not begin with a `/`. By having the user specify a `volumename`, a plugin can associate the volume with an external volume beyond the lifetime of a single container or container host. This can be used, for example, to move a stateful container from one server to another. By specifying a `volumedriver` in conjunction with a `volumename`, users can use plugins such as [Flocker](https://clusterhq.com/docker-plugin/) to manage volumes external to a single host, such as those on EBS. ## Create a VolumeDriver The container creation endpoint (`/containers/create`) accepts a `VolumeDriver` field of type `string` allowing to specify the name of the driver. It's default value of `"local"` (the default driver for local volumes). ## Volume plugin protocol If a plugin registers itself as a `VolumeDriver` when activated, then it is expected to provide writeable paths on the host filesystem for the Docker daemon to provide to containers to consume. The Docker daemon handles bind-mounting the provided paths into user containers. > **Note**: Volume plugins should *not* write data to the `/var/lib/docker/` > directory, including `/var/lib/docker/volumes`. The `/var/lib/docker/` > directory is reserved for Docker. ### /VolumeDriver.Create **Request**: ```json { "Name": "volume_name", "Opts": {} } ``` Instruct the plugin that the user wants to create a volume, given a user specified volume name. The plugin does not need to actually manifest the volume on the filesystem yet (until Mount is called). Opts is a map of driver specific options passed through from the user request. **Response**: ```json { "Err": "" } ``` Respond with a string error if an error occurred. ### /VolumeDriver.Remove **Request**: ```json { "Name": "volume_name" } ``` Delete the specified volume from disk. This request is issued when a user invokes `docker rm -v` to remove volumes associated with a container. **Response**: ```json { "Err": "" } ``` Respond with a string error if an error occurred. ### /VolumeDriver.Mount **Request**: ```json { "Name": "volume_name" } ``` Docker requires the plugin to provide a volume, given a user specified volume name. This is called once per container start. If the same volume_name is requested more than once, the plugin may need to keep track of each new mount request and provision at the first mount request and deprovision at the last corresponding unmount request. **Response**: ```json { "Mountpoint": "/path/to/directory/on/host", "Err": "" } ``` Respond with the path on the host filesystem where the volume has been made available, and/or a string error if an error occurred. ### /VolumeDriver.Path **Request**: ```json { "Name": "volume_name" } ``` Docker needs reminding of the path to the volume on the host. **Response**: ```json { "Mountpoint": "/path/to/directory/on/host", "Err": "" } ``` Respond with the path on the host filesystem where the volume has been made available, and/or a string error if an error occurred. ### /VolumeDriver.Unmount **Request**: ```json { "Name": "volume_name" } ``` Indication that Docker no longer is using the named volume. This is called once per container stop. Plugin may deduce that it is safe to deprovision it at this point. **Response**: ```json { "Err": "" } ``` Respond with a string error if an error occurred. ### /VolumeDriver.Get **Request**: ```json { "Name": "volume_name" } ``` Get the volume info. **Response**: ```json { "Volume": { "Name": "volume_name", "Mountpoint": "/path/to/directory/on/host", }, "Err": "" } ``` Respond with a string error if an error occurred. ### /VolumeDriver.List **Request**: ```json {} ``` Get the list of volumes registered with the plugin. **Response**: ```json { "Volumes": [ { "Name": "volume_name", "Mountpoint": "/path/to/directory/on/host" } ], "Err": "" } ``` Respond with a string error if an error occurred. docker-1.10.3/docs/faq.md000066400000000000000000000325271267010174400150770ustar00rootroot00000000000000 # Frequently Asked Questions (FAQ) If you don't see your question here, feel free to submit new ones to . Or, you can fork [the repo](https://github.com/docker/docker) and contribute them yourself by editing the documentation sources. ### How much does Engine cost? Docker Engine is 100% free. It is open source, so you can use it without paying. ### What open source license are you using? We are using the Apache License Version 2.0, see it here: [https://github.com/docker/docker/blob/master/LICENSE]( https://github.com/docker/docker/blob/master/LICENSE) ### Does Docker run on Mac OS X or Windows? Docker Engine currently runs only on Linux, but you can use VirtualBox to run Engine in a virtual machine on your box, and get the best of both worlds. Check out the [*Mac OS X*](installation/mac.md) and [*Microsoft Windows*](installation/windows.md) installation guides. The small Linux distribution boot2docker can be set up using the Docker Machine tool to be run inside virtual machines on these two operating systems. >**Note:** if you are using a remote Docker Engine daemon on a VM through Docker >Machine, then _do not_ type the `sudo` before the `docker` commands shown in >the documentation's examples. ### How do containers compare to virtual machines? They are complementary. VMs are best used to allocate chunks of hardware resources. Containers operate at the process level, which makes them very lightweight and perfect as a unit of software delivery. ### What does Docker technology add to just plain LXC? Docker technology is not a replacement for LXC. "LXC" refers to capabilities of the Linux kernel (specifically namespaces and control groups) which allow sandboxing processes from one another, and controlling their resource allocations. On top of this low-level foundation of kernel features, Docker offers a high-level tool with several powerful functionalities: - *Portable deployment across machines.* Docker defines a format for bundling an application and all its dependencies into a single object which can be transferred to any Docker-enabled machine, and executed there with the guarantee that the execution environment exposed to the application will be the same. LXC implements process sandboxing, which is an important pre-requisite for portable deployment, but that alone is not enough for portable deployment. If you sent me a copy of your application installed in a custom LXC configuration, it would almost certainly not run on my machine the way it does on yours, because it is tied to your machine's specific configuration: networking, storage, logging, distro, etc. Docker defines an abstraction for these machine-specific settings, so that the exact same Docker container can run - unchanged - on many different machines, with many different configurations. - *Application-centric.* Docker is optimized for the deployment of applications, as opposed to machines. This is reflected in its API, user interface, design philosophy and documentation. By contrast, the `lxc` helper scripts focus on containers as lightweight machines - basically servers that boot faster and need less RAM. We think there's more to containers than just that. - *Automatic build.* Docker includes [*a tool for developers to automatically assemble a container from their source code*](reference/builder.md), with full control over application dependencies, build tools, packaging etc. They are free to use `make`, `maven`, `chef`, `puppet`, `salt,` Debian packages, RPMs, source tarballs, or any combination of the above, regardless of the configuration of the machines. - *Versioning.* Docker includes git-like capabilities for tracking successive versions of a container, inspecting the diff between versions, committing new versions, rolling back etc. The history also includes how a container was assembled and by whom, so you get full traceability from the production server all the way back to the upstream developer. Docker also implements incremental uploads and downloads, similar to `git pull`, so new versions of a container can be transferred by only sending diffs. - *Component re-use.* Any container can be used as a [*"base image"*](reference/glossary.md#image) to create more specialized components. This can be done manually or as part of an automated build. For example you can prepare the ideal Python environment, and use it as a base for 10 different applications. Your ideal PostgreSQL setup can be re-used for all your future projects. And so on. - *Sharing.* Docker has access to a public registry [on Docker Hub](https://hub.docker.com/) where thousands of people have uploaded useful images: anything from Redis, CouchDB, PostgreSQL to IRC bouncers to Rails app servers to Hadoop to base images for various Linux distros. The [*registry*](https://docs.docker.com/registry/) also includes an official "standard library" of useful containers maintained by the Docker team. The registry itself is open-source, so anyone can deploy their own registry to store and transfer private containers, for internal server deployments for example. - *Tool ecosystem.* Docker defines an API for automating and customizing the creation and deployment of containers. There are a huge number of tools integrating with Docker to extend its capabilities. PaaS-like deployment (Dokku, Deis, Flynn), multi-node orchestration (Maestro, Salt, Mesos, Openstack Nova), management dashboards (docker-ui, Openstack Horizon, Shipyard), configuration management (Chef, Puppet), continuous integration (Jenkins, Strider, Travis), etc. Docker is rapidly establishing itself as the standard for container-based tooling. ### What is different between a Docker container and a VM? There's a great StackOverflow answer [showing the differences]( http://stackoverflow.com/questions/16047306/how-is-docker-io-different-from-a-normal-virtual-machine). ### Do I lose my data when the container exits? Not at all! Any data that your application writes to disk gets preserved in its container until you explicitly delete the container. The file system for the container persists even after the container halts. ### How far do Docker containers scale? Some of the largest server farms in the world today are based on containers. Large web deployments like Google and Twitter, and platform providers such as Heroku and dotCloud all run on container technology, at a scale of hundreds of thousands or even millions of containers running in parallel. ### How do I connect Docker containers? Currently the recommended way to connect containers is via the Docker network feature. You can see details of how to [work with Docker networks here](userguide/networking/work-with-networks.md). Also useful for more flexible service portability is the [Ambassador linking pattern](admin/ambassador_pattern_linking.md). ### How do I run more than one process in a Docker container? Any capable process supervisor such as [http://supervisord.org/]( http://supervisord.org/), runit, s6, or daemontools can do the trick. Docker will start up the process management daemon which will then fork to run additional processes. As long as the processor manager daemon continues to run, the container will continue to as well. You can see a more substantial example [that uses supervisord here](admin/using_supervisord.md). ### What platforms does Docker run on? Linux: - Ubuntu 12.04, 13.04 et al - Fedora 19/20+ - RHEL 6.5+ - CentOS 6+ - Gentoo - ArchLinux - openSUSE 12.3+ - CRUX 3.0+ Cloud: - Amazon EC2 - Google Compute Engine - Microsoft Azure - Rackspace ### How do I report a security issue with Docker? You can learn about the project's security policy [here](https://www.docker.com/security/) and report security issues to this [mailbox](mailto:security@docker.com). ### Why do I need to sign my commits to Docker with the DCO? Please read [our blog post]( http://blog.docker.com/2014/01/docker-code-contributions-require-developer-certificate-of-origin/) on the introduction of the DCO. ### When building an image, should I prefer system libraries or bundled ones? *This is a summary of a discussion on the [docker-dev mailing list]( https://groups.google.com/forum/#!topic/docker-dev/L2RBSPDu1L0).* Virtually all programs depend on third-party libraries. Most frequently, they will use dynamic linking and some kind of package dependency, so that when multiple programs need the same library, it is installed only once. Some programs, however, will bundle their third-party libraries, because they rely on very specific versions of those libraries. For instance, Node.js bundles OpenSSL; MongoDB bundles V8 and Boost (among others). When creating a Docker image, is it better to use the bundled libraries, or should you build those programs so that they use the default system libraries instead? The key point about system libraries is not about saving disk or memory space. It is about security. All major distributions handle security seriously, by having dedicated security teams, following up closely with published vulnerabilities, and disclosing advisories themselves. (Look at the [Debian Security Information](https://www.debian.org/security/) for an example of those procedures.) Upstream developers, however, do not always implement similar practices. Before setting up a Docker image to compile a program from source, if you want to use bundled libraries, you should check if the upstream authors provide a convenient way to announce security vulnerabilities, and if they update their bundled libraries in a timely manner. If they don't, you are exposing yourself (and the users of your image) to security vulnerabilities. Likewise, before using packages built by others, you should check if the channels providing those packages implement similar security best practices. Downloading and installing an "all-in-one" .deb or .rpm sounds great at first, except if you have no way to figure out that it contains a copy of the OpenSSL library vulnerable to the [Heartbleed](http://heartbleed.com/) bug. ### Why is `DEBIAN_FRONTEND=noninteractive` discouraged in Dockerfiles? When building Docker images on Debian and Ubuntu you may have seen errors like: unable to initialize frontend: Dialog These errors don't stop the image from being built but inform you that the installation process tried to open a dialog box, but was unable to. Generally, these errors are safe to ignore. Some people circumvent these errors by changing the `DEBIAN_FRONTEND` environment variable inside the Dockerfile using: ENV DEBIAN_FRONTEND=noninteractive This prevents the installer from opening dialog boxes during installation which stops the errors. While this may sound like a good idea, it *may* have side effects. The `DEBIAN_FRONTEND` environment variable will be inherited by all images and containers built from your image, effectively changing their behavior. People using those images will run into problems when installing software interactively, because installers will not show any dialog boxes. Because of this, and because setting `DEBIAN_FRONTEND` to `noninteractive` is mainly a 'cosmetic' change, we *discourage* changing it. If you *really* need to change its setting, make sure to change it back to its [default value](https://www.debian.org/releases/stable/i386/ch05s03.html.en) afterwards. ### Why do I get `Connection reset by peer` when making a request to a service running in a container? Typically, this message is returned if the service is already bound to your localhost. As a result, requests coming to the container from outside are dropped. To correct this problem, change the service's configuration on your localhost so that the service accepts requests from all IPs. If you aren't sure how to do this, check the documentation for your OS. ### Why do I get `Cannot connect to the Docker daemon. Is the docker daemon running on this host?` when using docker-machine? This error points out that the docker client cannot connect to the virtual machine. This means that either the virtual machine that works underneath `docker-machine` is not running or that the client doesn't correctly point at it. To verify that the docker machine is running you can use the `docker-machine ls` command and start it with `docker-machine start` if needed. $ docker-machine ls NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS default - virtualbox Stopped Unknown $ docker-machine start default You have to tell Docker to talk to that machine. You can do this with the `docker-machine env` command. For example, $ eval "$(docker-machine env default)" $ docker ps ### Where can I find more answers? You can find more answers on: - [Docker user mailinglist](https://groups.google.com/d/forum/docker-user) - [Docker developer mailinglist](https://groups.google.com/d/forum/docker-dev) - [IRC, docker on freenode](irc://chat.freenode.net#docker) - [GitHub](https://github.com/docker/docker) - [Ask questions on Stackoverflow](http://stackoverflow.com/search?q=docker) - [Join the conversation on Twitter](http://twitter.com/docker) Looking for something else to read? Checkout the [User Guide](userguide/index.md). docker-1.10.3/docs/index.md000066400000000000000000000110771267010174400154340ustar00rootroot00000000000000 # About Docker Engine **Develop, Ship and Run Any Application, Anywhere** [**Docker**](https://www.docker.com) is a platform for developers and sysadmins to develop, ship, and run applications. Docker lets you quickly assemble applications from components and eliminates the friction that can come when shipping code. Docker lets you get your code tested and deployed into production as fast as possible. Docker consists of: * The Docker Engine - our lightweight and powerful open source containerization technology combined with a work flow for building and containerizing your applications. * [Docker Hub](https://hub.docker.com) - our SaaS service for sharing and managing your application stacks. ## Why Docker? *Faster delivery of your applications* * We want your environment to work better. Docker containers, and the work flow that comes with them, help your developers, sysadmins, QA folks, and release engineers work together to get your code into production and make it useful. We've created a standard container format that lets developers care about their applications inside containers while sysadmins and operators can work on running the container in your deployment. This separation of duties streamlines and simplifies the management and deployment of code. * We make it easy to build new containers, enable rapid iteration of your applications, and increase the visibility of changes. This helps everyone in your organization understand how an application works and how it is built. * Docker containers are lightweight and fast! Containers have sub-second launch times, reducing the cycle time of development, testing, and deployment. *Deploy and scale more easily* * Docker containers run (almost) everywhere. You can deploy containers on desktops, physical servers, virtual machines, into data centers, and up to public and private clouds. * Since Docker runs on so many platforms, it's easy to move your applications around. You can easily move an application from a testing environment into the cloud and back whenever you need. * Docker's lightweight containers also make scaling up and down fast and easy. You can quickly launch more containers when needed and then shut them down easily when they're no longer needed. *Get higher density and run more workloads* * Docker containers don't need a hypervisor, so you can pack more of them onto your hosts. This means you get more value out of every server and can potentially reduce what you spend on equipment and licenses. *Faster deployment makes for easier management* * As Docker speeds up your work flow, it gets easier to make lots of small changes instead of huge, big bang updates. Smaller changes mean reduced risk and more uptime. ## About this guide The [Understanding Docker section](understanding-docker.md) will help you: - See how Docker works at a high level - Understand the architecture of Docker - Discover Docker's features; - See how Docker compares to virtual machines - See some common use cases. ### Installation guides The [installation section](installation/index.md) will show you how to install Docker on a variety of platforms. ### Docker user guide To learn about Docker in more detail and to answer questions about usage and implementation, check out the [Docker User Guide](userguide/index.md). ## Release notes A summary of the changes in each release in the current series can now be found on the separate [Release Notes page](https://docs.docker.com/release-notes) ## Feature Deprecation Policy As changes are made to Docker there may be times when existing features will need to be removed or replaced with newer features. Before an existing feature is removed it will be labeled as "deprecated" within the documentation and will remain in Docker for, usually, at least 2 releases. After that time it may be removed. Users are expected to take note of the list of deprecated features each release and plan their migration away from those features, and (if applicable) towards the replacement features as soon as possible. The complete list of deprecated features can be found on the [Deprecated Features page](deprecated.md). ## Licensing Docker is licensed under the Apache License, Version 2.0. See [LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full license text. docker-1.10.3/docs/installation/000077500000000000000000000000001267010174400164765ustar00rootroot00000000000000docker-1.10.3/docs/installation/binaries.md000066400000000000000000000162711267010174400206230ustar00rootroot00000000000000 # Binaries **This instruction set is meant for hackers who want to try out Docker on a variety of environments.** Before following these directions, you should really check if a packaged version of Docker is already available for your distribution. We have packages for many distributions, and more keep showing up all the time! ## Check runtime dependencies To run properly, docker needs the following software to be installed at runtime: - iptables version 1.4 or later - Git version 1.7 or later - procps (or similar provider of a "ps" executable) - XZ Utils 4.9 or later - a [properly mounted]( https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount) cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point [is](https://github.com/docker/docker/issues/2683) [not](https://github.com/docker/docker/issues/3485) [sufficient](https://github.com/docker/docker/issues/4568)) ## Check kernel dependencies Docker in daemon mode has specific kernel requirements. For details, check your distribution in [*Installation*](index.md#on-linux). A 3.10 Linux kernel is the minimum requirement for Docker. Kernels older than 3.10 lack some of the features required to run Docker containers. These older versions are known to have bugs which cause data loss and frequently panic under certain conditions. The latest minor version (3.x.y) of the 3.10 (or a newer maintained version) Linux kernel is recommended. Keeping the kernel up to date with the latest minor version will ensure critical kernel bugs get fixed. > **Warning**: > Installing custom kernels and kernel packages is probably not > supported by your Linux distribution's vendor. Please make sure to > ask your vendor about Docker support first before attempting to > install custom kernels on your distribution. > **Warning**: > Installing a newer kernel might not be enough for some distributions > which provide packages which are too old or incompatible with > newer kernels. Note that Docker also has a client mode, which can run on virtually any Linux kernel (it even builds on OS X!). ## Enable AppArmor and SELinux when possible Please use AppArmor or SELinux if your Linux distribution supports either of the two. This helps improve security and blocks certain types of exploits. Your distribution's documentation should provide detailed steps on how to enable the recommended security mechanism. Some Linux distributions enable AppArmor or SELinux by default and they run a kernel which doesn't meet the minimum requirements (3.10 or newer). Updating the kernel to 3.10 or newer on such a system might not be enough to start Docker and run containers. Incompatibilities between the version of AppArmor/SELinux user space utilities provided by the system and the kernel could prevent Docker from running, from starting containers or, cause containers to exhibit unexpected behaviour. > **Warning**: > If either of the security mechanisms is enabled, it should not be > disabled to make Docker or its containers run. This will reduce > security in that environment, lose support from the distribution's > vendor for the system, and might break regulations and security > policies in heavily regulated environments. ## Get the Docker binary You can download either the latest release binary or a specific version. After downloading a binary file, you must set the file's execute bit to run it. To set the file's execute bit on Linux and OS X: $ chmod +x docker To get the list of stable release version numbers from GitHub, view the `docker/docker` [releases page](https://github.com/docker/docker/releases). > **Note** > > 1) You can get the MD5 and SHA256 hashes by appending .md5 and .sha256 to the URLs respectively > > 2) You can get the compressed binaries by appending .tgz to the URLs ### Get the Linux binary To download the latest version for Linux, use the following URLs: https://get.docker.com/builds/Linux/i386/docker-latest https://get.docker.com/builds/Linux/x86_64/docker-latest To download a specific version for Linux, use the following URL patterns: https://get.docker.com/builds/Linux/i386/docker- https://get.docker.com/builds/Linux/x86_64/docker- For example: https://get.docker.com/builds/Linux/i386/docker-1.9.1 https://get.docker.com/builds/Linux/x86_64/docker-1.9.1 ### Get the Mac OS X binary The Mac OS X binary is only a client. You cannot use it to run the `docker` daemon. To download the latest version for Mac OS X, use the following URLs: https://get.docker.com/builds/Darwin/x86_64/docker-latest To download a specific version for Mac OS X, use the following URL patterns: https://get.docker.com/builds/Darwin/x86_64/docker- For example: https://get.docker.com/builds/Darwin/x86_64/docker-1.9.1 ### Get the Windows binary You can only download the Windows client binary for version `1.9.1` onwards. Moreover, the binary is only a client, you cannot use it to run the `docker` daemon. To download the latest version for Windows, use the following URLs: https://get.docker.com/builds/Windows/i386/docker-latest.exe https://get.docker.com/builds/Windows/x86_64/docker-latest.exe To download a specific version for Windows, use the following URL pattern: https://get.docker.com/builds/Windows/i386/docker-.exe https://get.docker.com/builds/Windows/x86_64/docker-.exe For example: https://get.docker.com/builds/Windows/i386/docker-1.9.1.exe https://get.docker.com/builds/Windows/x86_64/docker-1.9.1.exe ## Run the Docker daemon # start the docker in daemon mode from the directory you unpacked $ sudo ./docker daemon & ## Giving non-root access The `docker` daemon always runs as the root user, and the `docker` daemon binds to a Unix socket instead of a TCP port. By default that Unix socket is owned by the user *root*, and so, by default, you can access it with `sudo`. If you (or your Docker installer) create a Unix group called *docker* and add users to it, then the `docker` daemon will make the ownership of the Unix socket read/writable by the *docker* group when the daemon starts. The `docker` daemon must always run as the root user, but if you run the `docker` client as a user in the *docker* group then you don't need to add `sudo` to all the client commands. > **Warning**: > The *docker* group (or the group specified with `-G`) is root-equivalent; > see [*Docker Daemon Attack Surface*](../security/security.md#docker-daemon-attack-surface) details. ## Upgrades To upgrade your manual installation of Docker, first kill the docker daemon: $ killall docker Then follow the regular installation steps. ## Run your first container! # check your docker version $ sudo ./docker version # run a container and open an interactive shell in the container $ sudo ./docker run -i -t ubuntu /bin/bash Continue with the [User Guide](../userguide/index.md). docker-1.10.3/docs/installation/cloud/000077500000000000000000000000001267010174400176045ustar00rootroot00000000000000docker-1.10.3/docs/installation/cloud/cloud-ex-aws.md000066400000000000000000000233601267010174400224420ustar00rootroot00000000000000 # Example: Manual install on a cloud provider You can install Docker Engine directly to servers you have on cloud providers. This example shows how to create an
Amazon Web Services (AWS) EC2 instance, and install Docker Engine on it. You can use this same general approach to create Dockerized hosts on other cloud providers. ### Step 1. Sign up for AWS 1. If you are not already an AWS user, sign up for AWS to create an account and get root access to EC2 cloud computers. If you have an Amazon account, you can use it as your root user account. 2. Create an IAM (Identity and Access Management) administrator user, an admin group, and a key pair associated with a region. From the AWS menus, select **Services** > **IAM** to get started. See the AWS documentation on Setting Up with Amazon EC2. Follow the steps for "Create an IAM User" and "Create a Key Pair". If you are just getting started with AWS and EC2, you do not need to create a virtual private cloud (VPC) or specify a subnet. The newer EC2-VPC platform (accounts created after 2013-12-04) comes with a default VPC and subnet in each availability zone. When you launch an instance, it automatically uses the default VPC. ### Step 2. Configure and start an EC2 instance Launch an instance to create a virtual machine (VM) with a specified operating system (OS) as follows. 1. Log into AWS with your IAM credentials. On the AWS home page, click **EC2** to go to the dashboard, then click **Launch Instance**. ![EC2 dashboard](../images/ec2_launch_instance.png) AWS EC2 virtual servers are called *instances* in Amazon parlance. Once you set up an account, IAM user and key pair, you are ready to launch an instance. It is at this point that you select the OS for the VM. 2. Choose an Amazon Machine Image (AMI) with the OS and applications you want. For this example, we select an Ubuntu server. ![Launch Ubuntu](../images/ec2-ubuntu.png) 3. Choose an instance type. ![Choose a general purpose instance type](../images/ec2_instance_type.png) 4. Configure the instance. You can select the default network and subnet, which are inherently linked to a region and availability zone. ![Configure the instance](../images/ec2_instance_details.png) 5. Click **Review and Launch**. 6. Select a key pair to use for this instance. When you choose to launch, you need to select a key pair to use. Save the `.pem` file to use in the next steps. The instance is now up-and-running. The menu path to get back to your EC2 instance on AWS is: **EC2 (Virtual Servers in Cloud)** > **EC2 Dashboard** > **Resources** > **Running instances**. To get help with your private key file, instance IP address, and how to log into the instance via SSH, click the **Connect** button at the top of the AWS instance dashboard. ### Step 3. Log in from a terminal, configure apt, and get packages 1. Log in to the EC2 instance from a command line terminal. Change directories into the directory containing the SSH key and run this command (or give the path to it as part of the command): $ ssh -i "YourKey" ubuntu@xx.xxx.xxx.xxx For our example: $ cd ~/Desktop/keys/amazon_ec2 $ ssh -i "my-key-pair.pem" ubuntu@xx.xxx.xxx.xxx We'll follow the instructions for installing Docker on Ubuntu at https://docs.docker.com/engine/installation/ubuntulinux/. The next few steps reflect those instructions. 2. Check the kernel version to make sure it's 3.10 or higher. ubuntu@ip-xxx-xx-x-xxx:~$ uname -r 3.13.0-48-generic 3. Add the new `gpg` key. ubuntu@ip-xxx-xx-x-xxx:~$ sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D Executing: gpg --ignore-time-conflict --no-options --no-default-keyring --homedir /tmp/tmp.jNZLKNnKte --no-auto-check-trustdb --trust-model always --keyring /etc/apt/trusted.gpg --primary-keyring /etc/apt/trusted.gpg --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D gpg: requesting key 2C52609D from hkp server p80.pool.sks-keyservers.net gpg: key 2C52609D: public key "Docker Release Tool (releasedocker) " imported gpg: Total number processed: 1 gpg: imported: 1 (RSA: 1) 4. Create a `docker.list` file, and add an entry for our OS, Ubuntu Trusty 14.04 (LTS). ubuntu@ip-xxx-xx-x-xxx:~$ sudo vi /etc/apt/sources.list.d/docker.list If we were updating an existing file, we'd delete any existing entries. 5. Update the `apt` package index. ubuntu@ip-xxx-xx-x-xxx:~$ sudo apt-get update 6. Purge the old repo if it exists. In our case the repo doesn't because this is a new VM, but let's run it anyway just to be sure. ubuntu@ip-xxx-xx-x-xxx:~$ sudo apt-get purge lxc-docker Reading package lists... Done Building dependency tree Reading state information... Done Package 'lxc-docker' is not installed, so not removed 0 upgraded, 0 newly installed, 0 to remove and 139 not upgraded. 7. Verify that `apt` is pulling from the correct repository. ubuntu@ip-172-31-0-151:~$ sudo apt-cache policy docker-engine docker-engine: Installed: (none) Candidate: 1.9.1-0~trusty Version table: 1.9.1-0~trusty 0 500 https://apt.dockerproject.org/repo/ ubuntu-trusty/main amd64 Packages 1.9.0-0~trusty 0 500 https://apt.dockerproject.org/repo/ ubuntu-trusty/main amd64 Packages . . . From now on when you run `apt-get upgrade`, `apt` pulls from the new repository. ### Step 4. Install recommended prerequisites for the OS For Ubuntu Trusty (and some other versions), it’s recommended to install the `linux-image-extra` kernel package, which allows you use the `aufs` storage driver, so we'll do that now. ubuntu@ip-xxx-xx-x-xxx:~$ sudo apt-get update ubuntu@ip-172-31-0-151:~$ sudo apt-get install linux-image-extra-$(uname -r) ### Step 5. Install Docker Engine on the remote instance 1. Update the apt package index. ubuntu@ip-xxx-xx-x-xxx:~$ sudo apt-get update 2. Install Docker Engine. ubuntu@ip-xxx-xx-x-xxx:~$ sudo apt-get install docker-engine Reading package lists... Done Building dependency tree Reading state information... Done The following extra packages will be installed: aufs-tools cgroup-lite git git-man liberror-perl Suggested packages: git-daemon-run git-daemon-sysvinit git-doc git-el git-email git-gui gitk gitweb git-arch git-bzr git-cvs git-mediawiki git-svn The following NEW packages will be installed: aufs-tools cgroup-lite docker-engine git git-man liberror-perl 0 upgraded, 6 newly installed, 0 to remove and 139 not upgraded. Need to get 11.0 MB of archives. After this operation, 60.3 MB of additional disk space will be used. Do you want to continue? [Y/n] y Get:1 http://us-west-1.ec2.archive.ubuntu.com/ubuntu/ trusty/universe aufs-tools amd64 1:3.2+20130722-1.1 [92.3 kB] Get:2 http://us-west-1.ec2.archive.ubuntu.com/ubuntu/ trusty/main liberror-perl all 0.17-1.1 [21.1 kB] . . . 3. Start the Docker daemon. ubuntu@ip-xxx-xx-x-xxx:~$ sudo service docker start 4. Verify Docker Engine is installed correctly by running `docker run hello-world`. ubuntu@ip-xxx-xx-x-xxx:~$ sudo docker run hello-world ubuntu@ip-172-31-0-151:~$ sudo docker run hello-world Unable to find image 'hello-world:latest' locally latest: Pulling from library/hello-world b901d36b6f2f: Pull complete 0a6ba66e537a: Pull complete Digest: sha256:8be990ef2aeb16dbcb9271ddfe2610fa6658d13f6dfb8bc72074cc1ca36966a7 Status: Downloaded newer image for hello-world:latest Hello from Docker. This message shows that your installation appears to be working correctly. To generate this message, Docker took the following steps: 1. The Docker client contacted the Docker daemon. 2. The Docker daemon pulled the "hello-world" image from the Docker Hub. 3. The Docker daemon created a new container from that image which runs the executable that produces the output you are currently reading. 4. The Docker daemon streamed that output to the Docker client, which sent it to your terminal. To try something more ambitious, you can run an Ubuntu container with: $ docker run -it ubuntu bash Share images, automate workflows, and more with a free Docker Hub account: https://hub.docker.com For more examples and ideas, visit: https://docs.docker.com/userguide/ ## Where to go next * Would you like a quicker way to do Docker cloud installs? See [Digital Ocean Example: Use Docker Machine to provision Docker on cloud hosts](cloud-ex-aws.md). * To learn more about options for installing Docker Engine on cloud providers, see [Understand cloud install options and choose one](cloud.md). * To get started with Docker, see Docker User Guide . docker-1.10.3/docs/installation/cloud/cloud-ex-machine-ocean.md000066400000000000000000000276371267010174400243520ustar00rootroot00000000000000 # Example: Use Docker Machine to provision cloud hosts Docker Machine driver plugins are available for many cloud platforms, so you can use Machine to provision cloud hosts. When you use Docker Machine for provisioning, you create cloud hosts with Docker Engine installed on them. You'll need to install and run Docker Machine, and create an account with the cloud provider. Then you provide account verification, security credentials, and configuration options for the providers as flags to `docker-machine create`. The flags are unique for each cloud-specific driver. For instance, to pass a Digital Ocean access token you use the `--digitalocean-access-token` flag. As an example, let's take a look at how to create a Dockerized Digital Ocean _Droplet_ (cloud server). ### Step 1. Create a Digital Ocean account and log in If you have not done so already, go to Digital Ocean, create an account, and log in. ### Step 2. Generate a personal access token To generate your access token: 1. Go to the Digital Ocean administrator console and click **API** in the header. ![Click API in Digital Ocean console](../images/ocean_click_api.png) 2. Click **Generate New Token** to get to the token generator. ![Generate token](../images/ocean_gen_token.png) 3. Give the token a clever name (e.g. "machine"), make sure the **Write (Optional)** checkbox is checked, and click **Generate Token**. ![Name and generate token](../images/ocean_token_create.png) 4. Grab (copy to clipboard) the generated big long hex string and store it somewhere safe. ![Copy and save personal access token](../images/ocean_save_token.png) This is the personal access token you'll use in the next step to create your cloud server. ### Step 3. Start Docker Machine 1. If you have not done so already, install Docker Machine on your local host. * How to install Docker Machine on Mac OS X * How to install Docker Machine on Windows * Install Docker Machine directly (e.g., on Linux) 2. At a command terminal, use `docker-machine ls` to get a list of Docker Machines and their status. $ docker-machine ls NAME ACTIVE DRIVER STATE URL SWARM default - virtualbox Stopped 3. If Machine is stopped, start it. $ docker-machine start default (default) OUT | Starting VM... Started machines may have new IP addresses. You may need to re-run the `docker-machine env` command. 4. Set environment variables to connect your shell to the local VM. $ docker-machine env default export DOCKER_TLS_VERIFY="1" export DOCKER_HOST="tcp://xxx.xxx.xx.xxx:xxxx" export DOCKER_CERT_PATH="/Users/londoncalling/.docker/machine/machines/default" export DOCKER_MACHINE_NAME="default" # Run this command to configure your shell: # eval "$(docker-machine env default)" eval "$(docker-machine env default)" 5. Re-run `docker-machine ls` to check that it's now running. $ docker-machine ls NAME ACTIVE DRIVER STATE URL SWARM default * virtualbox Running tcp:////xxx.xxx.xx.xxx:xxxx 6. Run some Docker commands to make sure that Docker Engine is also up-and-running. We'll run `docker run hello-world` again, but you could try `docker ps`, `docker run docker/whalesay cowsay boo`, or another command to verify that Docker is running. $ docker run hello-world Hello from Docker. This message shows that your installation appears to be working correctly. To generate this message, Docker took the following steps: 1. The Docker client contacted the Docker daemon. 2. The Docker daemon pulled the "hello-world" image from the Docker Hub. 3. The Docker daemon created a new container from that image which runs the executable that produces the output you are currently reading. 4. The Docker daemon streamed that output to the Docker client, which sent it to your terminal. To try something more ambitious, you can run an Ubuntu container with: $ docker run -it ubuntu bash Share images, automate workflows, and more with a free Docker Hub account: https://hub.docker.com For more examples and ideas, visit: https://docs.docker.com/userguide/ ### Step 4. Use Docker Machine to Create the Droplet 1. Run `docker-machine create` with the `digitalocean` driver and pass your key to the `--digitalocean-access-token` flag, along with a name for the new cloud server. For this example, we'll call our new Droplet "docker-sandbox". $ docker-machine create --driver digitalocean --digitalocean-access-token 455275108641c7716462d6f35d08b76b246b6b6151a816cf75de63c5ef918872 docker-sandbox Running pre-create checks... Creating machine... (docker-sandbox) OUT | Creating SSH key... (docker-sandbox) OUT | Creating Digital Ocean droplet... (docker-sandbox) OUT | Waiting for IP address to be assigned to the Droplet... Waiting for machine to be running, this may take a few minutes... Machine is running, waiting for SSH to be available... Detecting operating system of created instance... Detecting the provisioner... Provisioning created instance... Copying certs to the local machine directory... Copying certs to the remote machine... Setting Docker configuration on the remote daemon... To see how to connect Docker to this machine, run: docker-machine env docker-sandbox When the Droplet is created, Docker generates a unique SSH key and stores it on your local system in `~/.docker/machines`. Initially, this is used to provision the host. Later, it's used under the hood to access the Droplet directly with the `docker-machine ssh` command. Docker Engine is installed on the cloud server and the daemon is configured to accept remote connections over TCP using TLS for authentication. 2. Go to the Digital Ocean console to view the new Droplet. ![Droplet in Digital Ocean created with Machine](../images/ocean_droplet.png) 3. At the command terminal, run `docker-machine ls`. $ docker-machine ls NAME ACTIVE DRIVER STATE URL SWARM default * virtualbox Running tcp://192.168.99.100:2376 docker-sandbox - digitalocean Running tcp://45.55.139.48:2376 Notice that the new cloud server is running but is not the active host. Our command shell is still connected to the default machine, which is currently the active host as indicated by the asterisk (*). 4. Run `docker-machine env docker-sandbox` to get the environment commands for the new remote host, then run `eval` as directed to re-configure the shell to connect to `docker-sandbox`. $ docker-machine env docker-sandbox export DOCKER_TLS_VERIFY="1" export DOCKER_HOST="tcp://45.55.222.72:2376" export DOCKER_CERT_PATH="/Users/victoriabialas/.docker/machine/machines/docker-sandbox" export DOCKER_MACHINE_NAME="docker-sandbox" # Run this command to configure your shell: # eval "$(docker-machine env docker-sandbox)" $ eval "$(docker-machine env docker-sandbox)" 5. Re-run `docker-machine ls` to verify that our new server is the active machine, as indicated by the asterisk (*) in the ACTIVE column. $ docker-machine ls NAME ACTIVE DRIVER STATE URL SWARM default - virtualbox Running tcp://192.168.99.100:2376 docker-sandbox * digitalocean Running tcp://45.55.222.72:2376 6. Log in to the Droplet with the `docker-machine ssh` command. $ docker-machine ssh docker-sandbox Welcome to Ubuntu 14.04.3 LTS (GNU/Linux 3.13.0-71-generic x86_64) * Documentation: https://help.ubuntu.com/ System information as of Mon Dec 21 21:38:53 EST 2015 System load: 0.77 Processes: 70 Usage of /: 11.4% of 19.56GB Users logged in: 0 Memory usage: 15% IP address for eth0: 45.55.139.48 Swap usage: 0% IP address for docker0: 172.17.0.1 Graph this data and manage this system at: https://landscape.canonical.com/ 7. Verify Docker Engine is installed correctly by running `docker run hello-world`. ubuntu@ip-172-31-0-151:~$ sudo docker run hello-world Unable to find image 'hello-world:latest' locally latest: Pulling from library/hello-world b901d36b6f2f: Pull complete 0a6ba66e537a: Pull complete Digest: sha256:8be990ef2aeb16dbcb9271ddfe2610fa6658d13f6dfb8bc72074cc1ca36966a7 Status: Downloaded newer image for hello-world:latest Hello from Docker. This message shows that your installation appears to be working correctly. . . . You can type keyboard command Control-D or `exit` to log out of the remote server. #### Understand the defaults and options on the create command For convenience, `docker-machine` will use sensible defaults for choosing settings such as the image that the server is based on, but you override the defaults using the respective flags (e.g. `--digitalocean-image`). This is useful if, for example, you want to create a cloud server with a lot of memory and CPUs (by default `docker-machine` creates a small server). For a full list of the flags/settings available and their defaults, see the output of `docker-machine create -h` at the command line. See also Driver options and operating system defaults and information about the create command in the Docker Machine documentation. ### Step 5. Use Docker Machine to remove the Droplet To remove a host and all of its containers and images, first stop the machine, then use `docker-machine rm`: $ docker-machine stop docker-sandbox $ docker-machine rm docker-sandbox Do you really want to remove "docker-sandbox"? (y/n): y Successfully removed docker-sandbox $ docker-machine ls NAME ACTIVE DRIVER STATE URL SWARM default * virtualbox Running tcp:////xxx.xxx.xx.xxx:xxxx If you monitor the Digital Ocean console while you run these commands, you will see it update first to reflect that the Droplet was stopped, and then removed. If you create a host with Docker Machine, but remove it through the cloud provider console, Machine will lose track of the server status. So please use the `docker-machine rm` command for hosts you create with `docker-machine --create`. ## Where to go next * To learn more about options for installing Docker Engine on cloud providers, see [Understand cloud install options and choose one](cloud.md). * To learn more about using Docker Machine to provision cloud hosts, see Using Docker Machine with a cloud provider. * To get started with Docker, see Docker User Guide. docker-1.10.3/docs/installation/cloud/cloud.md000066400000000000000000000051631267010174400212410ustar00rootroot00000000000000 # Understand cloud install options and choose one You can install Docker Engine on any cloud platform that runs an operating system (OS) that Docker supports. This includes many flavors and versions of Linux, along with Mac and Windows. You have two options for installing: * Manually install on the cloud (create cloud hosts, then install Docker Engine on them) * Use Docker Machine to provision cloud hosts ## Manually install Docker Engine on a cloud host To install on a cloud provider: 1. Create an account with the cloud provider, and read cloud provider documentation to understand their process for creating hosts. 2. Decide which OS you want to run on the cloud host. 3. Understand the Docker prerequisites and install process for the chosen OS. See [Install Docker Engine](index.md) for a list of supported systems and links to the install guides. 4. Create a host with a Docker supported OS, and install Docker per the instructions for that OS. [Example: Manual install on a cloud provider](cloud-ex-aws.md) shows how to create an Amazon Web Services (AWS) EC2 instance, and install Docker Engine on it. ## Use Docker Machine to provision cloud hosts Docker Machine driver plugins are available for several popular cloud platforms, so you can use Machine to provision one or more Dockerized hosts on those platforms. With Docker Machine, you can use the same interface to create cloud hosts with Docker Engine on them, each configured per the options you specify. To do this, you use the `docker-machine create` command with the driver for the cloud provider, and provider-specific flags for account verification, security credentials, and other configuration details. [Example: Use Docker Machine to provision cloud hosts](cloud-ex-machine-ocean.md) walks you through the steps to set up Docker Machine and provision a Dockerized host on [Digital Ocean](https://www.digitalocean.com/). ## Where to go next * [Example: Manual install on a cloud provider](cloud-ex-aws.md) (AWS EC2) * [Example: Use Docker Machine to provision cloud hosts](cloud-ex-machine-ocean.md) (Digital Ocean) * [Using Docker Machine with a cloud provider](https://docs.docker.com/machine/get-started-cloud/) * Docker User Guide (after your install is complete, get started using Docker) docker-1.10.3/docs/installation/cloud/index.md000066400000000000000000000012451267010174400212370ustar00rootroot00000000000000 # Install Engine in the cloud * [Understand cloud install options and choose one](cloud.md) * [Example: Use Docker Machine to provision cloud hosts](cloud-ex-machine-ocean.md) * [Example: Manual install on a cloud provider](cloud-ex-aws.md) docker-1.10.3/docs/installation/images/000077500000000000000000000000001267010174400177435ustar00rootroot00000000000000docker-1.10.3/docs/installation/images/bad_host.png000066400000000000000000000653471267010174400222530ustar00rootroot00000000000000PNG  IHDR{ WjIDATxmTyo+CJ"UbJ&iTMEʇiN+۔~pVIC,CCmDjb ,0˾;3w^mvgְs=g$JzLcg=9s<_̌~3g?7?04\PEQEQ=92fRxg/>~g̣3((8oOqjf}h K(4b%v(wt77!]ĶgYk?KAc@=$ΎNTJ80 "r(M`b0C6( 1Кbzq0L6",A` ix]"֚[`d!9,s n($'AƕZ*bb'buiw(g:;;ϝ;cSREQO)ď?MMMԋMkp佾%_Qh±G{+n7xA4bdhC][o~B.Fappwޙ'DDUKA`7A,ƀa 1/ Bqq(IP[QNظ@#),CXaHXc cֹZxF*[QYH.QI c6 IIu[E:1g6HX,-&&y`xlxx(i~!9аiӦ7RR{*&ށ O_W02CUmGY޼!Ŷm"7_?`dSmmmL3 DU_{n-0(F'rNNI%'#B 0!rd"Dsd6"!K1.CLa M27 PPHDuiҦ&&x`xlTq*(<|/um۶QƲ9\/n`;}ybX܎uajk<ۙ[p^JE{QܾDҰ\ 08>^EbX'KtKn{tTQQ z.\䊢(R"lꫯnmv:ת8j j!=`Ubܬ8ahGuI~ ?EDT>{PJ(NEX3an# D&Qb*8G1ZQGpgd dRC`@,[^& r(䐓uRH$}Pmi! }.sti /m"ۓt&s~?gsTt>Cy?cML2l&l>/uuu )(od$rNMKB Ԣ8֢8֢8 ۻ)k+PhXT(X4 mV"{d !F !5!WSL#ЌF-c`"^xruNźEAEtbMj Rpf"w{i,ۻI,qi&{KQ\.!M4sN.~>:]L.OA3yir\6'X錟zѝ~bEQ]v-0ׇDhnذ6Sդ8{Oq§8T-_[T*D"J&&'EAeQ2l!Q6N&ܹrRho FC#&45WkR&4_%W5jsgB|-GdEQk;іvy}{sl󴖰23h;LPQD4Ze2y$zv劕PnRˋ0.e7-  6w6+qK py=oj,y=xk/6_v򍥤pY]ݲu>- kyde+V,_|ʕK0vci|7^5/vifzkk""u$*KœSdGd^ٯ̤ЎL?I'D&TO4)t* l}fS\UDŋEQE)N߬lo|6Yq¡Zl sjϽvoPX  &bJ& AY5(79Oq\%ԖE\:n;=>y?lmyv̆!EF("lDOKKg(e]گ#1CؾT"jbƶ:C8 \R9RGH>( :Ɗl r}p(&@ܹ҆VKusW^[ЪK8pO-%@)I e\!@r֯8 gԠ+TDv8Tf}#cYM߄E>X~| j~u='Y&`6ꏗ7*:{({Q_m+yO;g~Xw_=Ϗ6 xP~D4l=T&>Ld'xhB6GNF-=ܹ`/Jxn׷k?;^nL躇:Nک7pΡ cKM^|B[q ^Jr5Lu y&Ԩ-R9F{aCr~;T<{:Bg>j]U۾y6Gyd|r\s֭l ̈́x6();pl0ZQ%կRse5uͻbd7͆6O6t>H|=@l/NQt\1/f&d;L3ǖdhQLz~bӦM e_]_=鬩n)DWغTW6rI.+:%YKt^졅\ҠpE2;v옟;Y}8qߠRAB:H׭cs3zA#K R=!=/tTdi3*mJG_ %Cǃ`iwe7WkAwBĎy274e=;33~O@4uر jS?XWt%̏(ؤ49ҼwO_9dY;fR;me&`VT +SZbL}Jr"x5mM.?ghvtŗ?!+ Po8|i)Yqbn}y$'7~'KjCvA{wW=VWm[jO>SO=ոؤ&$K@ 2qø1mEիfE/qW ?-CZvMKk{2(9I&|WMss|8uMrPl/K2B@,~vrY}if}i}t3ՏǏٳGzxxxxxl:^T=!<e2:aEtm*qOxTW_ dXAB Jg͗?#1m,/Lj;.ċЬ.YI؏ZeԚeZ{W NWEr1% -X <0g0bxhp[@)tѺC[ƭ̛AgS2qPxK:Z%) <#w RSJibB+D&SMFWB%%5MʅnyM:?(3C| .̇:%$)O«flS Z \߰۵!UY$*NԸQGHXQY.z2#Uƽ K.=ٹli} V7?MM¿j$a՟YCu¡">h%SXe?C+s{E8N7;[o:|Oxxxxxx@@/FEsA' FnP؞r?mxic,%QILjf|ɨ{ît5(m.v'Rha{wfo慁Ǥ ou]?C=:ɃW/SdfT8;V0R7'2'WYUxohU1^onۺyk1,<#aU>/@\IctZ1ukePW;K,nkm;u\mR=VCfj.\Eu4'>T#z:Neҽf R+.d` 2,t/t_&(5Q6jp4v_XHyԥ(:`]EʼnӧO+m]5SK f ״ ;v`sR7edg 8|tuZeRŠ1 ;fK;!t؆`iw~plJO][W,=X C/7b(W*Q'>T.MV0a0B7sW3 UC1Y-]{>Fjѝ+ܱ[AٴK+cx /df%Xǿbpt/gTq!;%$KmSԘ1#W퍭۷q{'~oĒZ1)2L9mqXaXW]"uJyֻU̐p#O82֜OJS;᪝(&a8CWWOSzsTj @*{`(G٨'KUfO3P= [sG<#7q^?WmjQ B4eg "5PYWRoF1XY5+PCLp$E רG-睫 EBXhVGK{|Ӏ%ŵ&Tgg f8)& ȥV.H KMܔvA=RD(KM]YjNd0˅K 9!K[9ZT.\ RC|fLjE+\͝\?;tԪW#鮔!h ~Y@#4Pf l'!, (i&Ԟ\&%ρaȯ`Vgel_ԔRtVJJb%V;6*vHm\)mbqmhiJoVf N<xK,tK!$,Q tDB5"m#FъhReHnbpM5ސ)+)Fh[(>xc:^(֎U8)m6xA.+nJj|xOc&>u(AӧbWe `PʗHdN:-h$:@?mY\HPP!bpgLUuyiZ{fOGSbiS<}Y&XMI-Qz|kk[]Dxxxxxxx|u.ߩAX%$Չ4%To9ZxEBsP(6#H zXYVeO#m@!0"xHbF79 EbMr9x)Cd vnbjmN @u@p44~z9&ms/n;g逪))n<*F^6.p8E|#@l?g_2(i8D!$/6BG~tNMz *B`CAձ'G RH C\X($[H8oXh;#bVDS1Gr'~ [|j>5OS h{0C+%-Zb4 3ЧeS|LXbV&bT4] ? gܢc8R1Lbڡ DTeXDF! h}j>5OͧS|jС+6X|LJ'Th1$ycjCQ9F ˜1MZiYJ Z#.TKW̋{wc29ITdBXi SJ1{Z|j>5OͧS!b.LHFمH(dO"%axթa m*IA%A`- mM5(FZCSD3~bѽŪbƥ 9=+I8oH1j!mv{1?jq10afïS|j>5OͧSԔOg#|jBL(+ǁiP;oml^TNZplsg@,k cҊHIzjB W$zb#ZhJ44s_]jXr;;ڷmۼv-[6n[6o6nEZej7mٴyfJ Ӫ.㵛gp]bNO)[\ !-X9עTP8E=bOͧS|jWڶ7w~=S v$C,FC(+4J8\DB!ccpWR{'JoReBfwҎ83&e#)1-pgP2,#KR3:qtDX,{xxxxxܮȉ}QZ_?ٞ3pޅL.XIFHbpqƴۖ,%j7T%>e+4 LW%FBfM -ADb;.&`#\]j覦c~Y5&$Og#|jADBA%5R.J;!`0O^̕s>裏hl 5[sSu֚:CϦUǛs?0y*[[B0iyFBK.%c$tԋel"oFB:ĸQcjf+KKP*9.!I| VU&|:R8,[̫|.50ˤv3N[ovڵdɒ-[׿8.7啛7oSΘ1c-Yȅ;8?f۴M(Q\Z6#ǟD#:餓N:_N租~ ̉EĔ BW*O32ҁP}4o 4邠+A#8G#|TC_шNڝr6`i1"ظ'uS|ju;>saA#5ȶ[̙/_Ĺd҉[Nۛe͊f7jZګA`khPc擐7ǐ\Bڽ_XqS~c8{x^W]UHN^tX--G6T PgaЊ"KKJ22NAc- d/cc4! F=-SDTlٖ-[ /צMyʔɿWp޽뗿%e pm8KK'kJK;ꫯxOhZ:uI'tGEEE$JJJ233#⮞>&e䦬{ccf`ѢН;-a[eGGOi|\㥚 fh䣏?Viq5[ݩǾWHeƺu3b:_O*sBS|ʩe1a7nu8]>%KBCCcbbuE-w{ۻIcvN2a>W -Q/~7i|$IM<9!1 ~BHmQ,ׁwɓlO^. gt7P#qEJ崎 rt;s&h/0=1LaxT 6+JBx#Ě_?0~gp߿)S/t\+ [Z\.#4jk] N8:NHl?{ul'~SWz4`0r @p>J-!au'C#:餓N:={b 3L$vmٲ%ȍufbb1ݽj2jQu;ߔ}2;{֬D4 /3eG /S'L^p"$8@>|Iΐ'KKCA#77~Ppo'+䝺rߌ6KvPaO򇫷GD;H`"<׸"xbt'2֭MhEcʹU*;{8,ÚǢF-" |WO/ӧB<3;ÅBqM@T5OwfϞ@hE1AQ]z^e¦#U6F*ƪ| /lB΋IhU dL5!x]j5h%%^A^PX\Fh3f̤w5\C#)`v2رFɇ8!ꟅC?*Ruq#$dߩ~!!Gxǐڠ ąQvCc†ׇ崞YO /B(&tI'tzĤaF^Wew: eeeʖ؛)3YZAܹs/^t&ZmDć6e3_x$[n[78BEjb@>YaN4Yn6|UϦ<79O_ s_ǎKӧ歊\C#K;g,OumOAVQQQk׮ҽ[,tiXmM5 jݷ{v37sUDq>ۗ}2oZpōfOZ D>(CE$#&rJ:e^N]j58yj\-x=m4W4|>"ݮud]D:ej'J1{ )ix*0N1q{,dmZزevN&𪯤$"%g3=(R0Sˋ Ɓ?N^ q#Jq"0i[K_pP!!^Ʃu"=tI'۹s;҉'|W\]1E4%%ÇZ)sbih2e3eD-}y7ͷoN7e8exrڔEAIq$GhdB8-Y!ξJkviүN<п:w./'υ{ .FkO6E'6;A8Ep*,={䩊~7_tZz<h~%gWn&Y9ꈏ9Q ?8njZ aI0?y|ϔT[1 7 &&$pZ×Po.Am mBvI>4"lOTgJЖL됺j<$29IyRrKy%6*E -`1EaKxTbY8 Np"ὴÓ'So,JysfʶajzH MY}w,|9ӣ4ǷՐbO?Ggwj`#?_Z"UpiվDjBwgęCǩ"Ρ=#kBCdI'tlR躺 ߾}{̷Z9PK 8&3,)HHAN3-)JU%Aڔ j݂ܺ$);a^-K 3݊@>1wFB}+:ȃ\λ+lYP7)¸.ںg5#vح@4s >ĥ\+3HHJfXog0a;q 744B5js^@k!F?-?7ϓ~aaf]VO_|P"G-hN% Ƅnr .koӟ?_e@3>vk)X)B0}AnezR~#&hSfIX5Wkkj,fSAAݎWƏsn )7H_3?]\UBTbw˅d?H]ALB-y9g`"Z8@C|J8]Ay܏ض,w]~Tq3!{aVƇ!S=@kyf9II#)T54kMY0?U7As]uCǵ<_FZ7 +ܖy>=M GCdX6RWWKZ݅pUBXMttIgF;vb bI/,,rn-eeeVICryqn^qyl6m [JS0 hI9F#b@41 gHqFP n8 N<vy68gojҹ7o7s.\^sF'k#.W : tqySVSc572223KJǠ ߄,a-y@$!L.K'=PhK_zʡKV#:n{aʞ={}8/ &$㏵FXv<'}r)O-. w`te& i6kjkk ekMnpjQq6*r":k\\hwX4©򠜗x ["C@<x.eSԟ8M0YsZsؾ@S*~&c#ohbKVK/u )g4d" V=:NF,}Efzhsqc2gI'tzD2==ccDӺ0>$&<5W+Q{{LƲ'",>q*q ⌷"7A1KMeƺkuh2 At5\8CݘR]S Ğ;woyuCRs+jowi}8+qźnc) ޾>OE%x#hXޘ2b$MpvAZkAYgM6=ˆ>pwOg(&Fe9A4t9qtȏVEsyhƂe3>^(F|U4ڹ 8Wv%M<"cM~*sNlytsn8?jW=W+qKRSMJ m[Fm<u`$C/-w4a#mNXש`9A ى24C\rG5 :.P(3ssY0Tf,Jb:܃uQ(@2U'ʽ̚tb/W^}%--f7Rӟ'0= X3\Ec} ;_BS]Zk[YZH,34V󕼅! U2k <~po˼Q-zVR[[";FZ iK{]4"zv "t= ͒n;`w7'^p<9hLMGli4d=ljZ]vzD'tI6Iy Q DC)[` @ `# Z[g19sfgߔ 6AuDն~z *Tj4+шĵښ.m5wr"րAmݵhՆw.bwA@Nш O:5^ )5]NNݗF,7)Yx4~"Ъu4o?N:{[L'6Fbq>_U?jVtP"V˥p6ۤm\_WgN3{[+4Gmn29p8kc0aХ%P,;7f6gŽj'#(y/BX,fk׭=gOsN47 o/aS$15QMoyUhy ˮ>Aj%Ay=M.M822ٷf>٩?_1YQW<05y+ee %+ϽsX qGﻬ׿*ēm2.а hS˴5>%sLԳ8[MG|k2ٷFtI'tzD }7I3s;iii CܰS9)!jTTo.**_~GShS6o&ݥMGKK]G#pxP$'+9E u %ak>{Å:kW-^?HWYږ؂u{Yv{ƠM48#2Ώ"r;nnQN&x"rF-" |啿Lhő6j,dϹJR:{PB}yVN y:\2N*9N*)vn BM]-﷬V+۳cQ' GK(ǠX3ջJsf5=;n G,stv '"ν{}3>QDS 4owXat4T^T4pB׶9u7Ⱦ_p0!!hΝ#Y]]B10[LcwdՐ;p&^VZR5ɻM{3i8KY#5]ʘ&oˇ$%~E fZovJSvycL3MNjF-OE#c\O<{>)+hNAM7v4<>AiuzCOijFmm&0 ucIC}}\z%1$E! /SnOb4pS ыiHB3b, mlc>RZDhxA%"1:;hK9\0@z9Ր;#TdH::z>n g'-zD簋} [JꤓN:/={~+4fjvb1Z̉l['8e#Ŕ}lAtt<ϙsLTMN's`hDDFFT i'Ff,z̒rΰ5Kɾhvf氉8Lμr WmM @+n'mY8 'SnC$ jR9IUzF$h426Y΢ZB(!1\hW4:oG%Pbm]~@~h_P'tI'F4|vS)?/c AhIXLX{nb¤І US.Jj )U[<<Fp7Z6}IYXX/uXс0r.6hȉ0=yjU"ߪ&FY68M /1"Sm5F1#fsR;;:o66Z,sXa0\rsT^QnQnFbnǪ-fSh2Y̼#EX.U+H)4 1bDr`2SE9:H+P/-f h Q p6lEɈ\l]ms10[Ñ SwMwMwMwMwMwm"1u@g7eȞIb`@lj'pq:Nq8ػW@CX@| ZK'g-"(8g9J i ()N@q8ũ8ʼn'@q*N (N 3߁'3+Э:8QDqSSq (N'TP(N@qvTDqSq*N (N+'8Q X$p4 8'cq.TDq (Nũ8'8QTP(N@q8'8' STP(N@q*Nũ8ʼn'Sq8P8PvaPľ "0N0N4Nq`'' q8880N8ح FPUuUUgUUyUUgUUYUUYUqVUuUUuUUgUUyUUgUUYUqgUUYUqVUqVUuUUgUUgUUYUqgUU%Qq )T&AEED*01Zd#A҂/tޝ y♽5kf53'!B)N!B!))B!8B!B!RB!BS!BHq !B!)B!8ׯl5?}K!^L!_իWs\iiִ-[u/^!DˡC0*++}ջ6B)Nq]IfgϞk׮qb NǏ;z(ۢP[n+ip$v#7[mӧOl)o ˓mdee֭[?gGchS,}od=6jQ }WG4"P =B_B)NQ@1eb3wMMMEkۻw/ʷo)U9s&Ʊׯ_ى'V^SS1|bbJ>}z޽5'&fl*fn xcvܹ{Gpr?|vy2= J.:ֆK>tXyNFq>>ubxkJv9cx(`]C<}/(B)N'RҊ.-;+**15 ܶm_ϳ6+#f4pq)uFCWrX~1߿f`||((lk8BJ.OOǏfxxy,ȳS W9>1y0|q{^;vؽhSɾw|@ n.<ʕ+VeK?]/DlWDe@y}xB ɱt_Bٳ{]۹s'4JL8B/!8E(ND?$ɓ|LhQBwTx(55 ~ķR&qPUH S+8˓yxʼnEm(<0Kl?oafMHRj 6Gr1+D٢t/L^!=dwX9n3#!(ʼns OLLbKnj&:9UUU\0e6t͘ȩ,iNu3<)F$n߽{w^71&iɾw*K_GQOg8myT(5O!;??(ť/L^?9 0g#DB)N'$S)N_&h,R 3g ׉:aKꄲ)qrD̺u@qh'a@u|K(-M8f[5(3: u3&(Dz茒3BSӯ#U a=ccc#2OCSpqwyS%i"p1,Pq]{G/8YLm+fڊذ`@j+@q* *N^*DqjU]!)gq(MB"$$!M#?~frLYa].+sři;KCo=N""*N9Oř1$e&= J{NV½ѳ:cLeϑc1Ҿ:ߝuʼnKD?+dŹd4*3ÿmL= NS  JBDDTrn3/U= 8ꔔ<-*伔Y5 vɿM8<]x'F}2ky+N>@$#H@|///[F;L("ֻy *-*noo?>>we?hQbPVHauV±/. k6 hNxr+ӆF.쫱YW9F2+ h(8ssӈ oލ5kb&CSl$ȬC-(Ε ڮy'.bΟ|1MEζdTq2[ hK)48eҾi7.ՅSث2:@٘wq_+gzGo6d쾽j6>-ZMH=ŕ ?D/ ;8O,:FFRۣwwwߍު}2ۂ&ntR&뼼DʄGg WRʻNbi{0˻:RHKmI7*ai_}N7HS:ýu|xxȰsz3WXv|nǘ p %/.M)LB**a&|1gO>NQqQJIMOt"]^ V?Ht4wly<W(m!*3ֿÓ/"s?P]>>>RY"""Tq4ц ,Ur 9'݂ ""Tqʙ@ܸ 5RMl ""Tqʙ@&CTPiHDtғݑ(m"""*N8EDDDD)""""8EDDDDT""""SDDDDT""""SDDDDT?ح FPUqVUqVUuUUgUUgUUYUqgUUYUqVUuYUqVUuUUuUUgUUYUUYUqVUuYUqVUuUUqVUuUU'v`h{S*mq0}7[  @q*N'ũ8P(NE~Xo$%7L'P+~_ (Nũ8'(NʼnTT(NP (Nũ8ũ8Q8'Pswn?U\QSIA%T%i"RR(T*o"pQoا ‹9~_ٳg͞e`'Ɖq 1233SSSȨ]__w% MNNz酅˗/:@9fGsT t{7`ߘ*0N+O:֭߬[;O<)Yt@y3;;;(X{9@911'a3<[3q:o߻81N3ސ/ /%vfxxsppP3b||<++pnnƬhn=[V85Pl;%AK;\{y/ XvLvۇ?_usٖG^peZ{1NO/,++KOOܔ&4tqqQ>j/U\3)~~Rƹqy?L|H]j_q5X4>JIl)qTK|^{Y*uġqbaJb}}6sR.wpBAA|VsrrlVUlJJJ8}r S֨ݜTUP̯k}5/㒨S<6B+(|I饪2QH nzƉqbbT]]]mmmrI&5P-ή{kkΝ}6ΙYZjjjdʕ:6Q[lgLPgnxjEvWwsgNĞzmַM JU88Jt4/44N驽I-uq*rttʕ+k 9Uu ~1eUg2Keaƙs[ KthZd3b?xf81΄R{Q^^KNݵ9Ί l͛7Ϝ9N㜝miiF###iii%%%$qY/IY4hsc)CS*h9T8 SU814N5UVV\usm $SeUU|Q[TS;qJO- 1pqڽzt__}bV4k\~ ۇC kFX^O33ȺϾXwq#X!g k׮ICQ!ܬK$"Y5QSj)eOW|;`ʈAq~+{n*nWkb&ouF1NLPrc37T31K)s>\D:__4 PU=8UIilN.O뒯\bg\Uq`'`qb'`'`' '`'Ɖq`'`'' ' 'Ɖq 9qu2mm(\DT#+u׮QQ@_CtnٮQQ(N @q@q8@q8a_! @9>989MS'}9jeYڨ8Q;WqbL)r 3XJy!Z6YIENDB`docker-1.10.3/docs/installation/images/cool_view.png000066400000000000000000000575011267010174400224470ustar00rootroot00000000000000PNG  IHDR_IDATxiu;Q[)٪[RTʑPNb!I$^JJvYIJ JĈ @b1af -ٗ z|H~R7~pdq~_s=w^~PEQEQEQEQEQEQ%q `M㊢(( &BJR.+ r8Ot:::zVQEQs)|ޘ*CmUEQE\JdLKEQEQ[jEƴPBQEQEƙ1-EQEQn=ƴEQEQ>Kɓ'w޽`8AuUpC(w0 MMM顂((g]vCn$HPWٍi+(|غu zHQEQ =D!R=U&9“%˚pҥ .^45Ecŋ.^pء kCSK.Mq8y^gpђhXQdO1s2bcNEݜ+N\x`huOmrr4f, LNQEl_r?} v1V2v }ty:W1 v1V2-նho[EbmPow=mN`;v]18x!p|0k~^y`|6l]aDq& o9 ]v׵l-a#$!"3]8tDˑfX CL:&';::lEQ"W_zʘ:7k=,4Hc{:'dm7=,`#]3o|Xc/`#]CYޛ%u69; kk/m$v Ѝ!(R= j8'Av8Ж.]=@ħslۦ&iO Ka%${~آ9KMtj||jr ˆgHQE̤+zzo߾jժ+WPׯ=_?U Fp(+mѭ=ϸjHƶ"j#]8\jmmMR q_rL kM> lYіvھ %{$c̮.A=9mƁ]6ϕ HHa Bs!Krs:Oqj\0\6EQо}^~oذ5m,7+w JAip(76{x& ]8)~]a޽{7oJm!L$a=59Y6(xH垎2!mcHӍ%]rHԼfXT`cOOL˚W"rEHPH;rdK$ڧ85e#%+Hh؉ԩSi*(![l)ߏ Zv-kPׯxTc]8ˣBc!p(7G;VNх5200@9vTx 'iٗ4k˶p0R:l`=7za)"̥6-l)REkEH${FSE2>'IXvE"]Q#P-rE&NML]PWC10;CRnx(781$kzeX:Ӆ$jii(<u^ B*C<0TNхC<=)¡X恡rz.ѣb1$$Ur: /~ےTɔQBZ~xGJ $q@)~:س9D`f.k|HnQ~h%YCǏnJ-75$&~:_9`ˆKhB(dt̘i̦T&ΤҲJ&tʚ)kLҐʤS餙ͦT`(<z=fkX*eLKTʍD[d]]];wt(-CL4]ryMM-CDzv]@2-!%);1x@lK p؆^`^l>C+>R&2֗or(PsHD\ܰ,);5"f }~"Ȃg~jPS#xeSs'&'w.!.VQ0g2ؘ̠d2I3$Ti&Sd6BJe$3xbI'Sf"B~ WEy뭷fF VX6C]?| rc[32ʍo| kC|>ONLH~[MIV%-[jC,t6UҮg[ym i{c=u>y>o̘n}."YDZͳ][ 9G}yC_7kO̞<]7g acr$pZ 9}jwz12~^=/Q9X3ҎGrI0z!?1SPMc ӛ39*s857vHCޭ3G^?9{ ʇwl;rγtSlZ Tqtl{[mO`PW|gx75I Er ]Y_c'z3OxcX2H"q3Ko'm%c${L'~EQNslP&FP_6HӅC݉DoO6Śr߷ ᏕH$:2Oj0agI1r%:o\}rѶ(!1=;֯['5P? T=^G#W'K/ɱ9Wl[> Y7ٴ?1nh{(/7P1?78>OƢωHA1 Oㇵ]{ H 6Һ0#VraX w֮߼ll6aNml\㫫Z ͯ1oYfj'?=@wv?}bcOƲR8soT?[kǺXK)o`K+3 >a\~d(/梩qs<Ԗ/_6{ѧѾ_Z]E4'77 E.S1;~lſ5ʰ &B(SM'(/L&zD.!xL @.-3S(-Luuv;PxWW^y5k6nWG221bk汙O#Ffnnt'#]Ç͟ B#GXEuiӛ䂝^_IrXN1|XɠNMmc􍴿a6P%}Vׁ*j6{{8bFe@#ͫ+XrڻfQ?ΏֽGXgͮ2oV`ND=cT[ۦA)< 9Xx"og^+DSkYyϗ_zjN gP]R\(z.,xgdjg$sk?Q@ආ3y+%WW|)}u_ss /]~},Ɗw4rwv˜۶۽sYVgǙH\.GO-8_y_҆ *xGy2:3:xɜ3m/s299L̉sƱ3F1v;%l'/1`A` 0لAbZZ+[ !^{HB5n[֭[V_mH}SP H1P(DC `Q=k4i`)R _,dh͚5<133{޼@o$JOOfp~e!P+\*"‰mm<. CDVD+ZB:`("j &x=@_0GЈ-J$>) ]f-:3!Te^A:f'Z>:_MlWn{Z \bB-U-v'n t\xV[8VgS;_ߺWo(uSBKT2{^ou<0r0=2D+zGy%ݞG^JڡW`a=r7ow{nyn]_Bׅ5$)l/9A%<1ڽ"%n{ϡ5gȌN9w,֌=MzW$Gvdg;៽y= I$u&q\v$4A0]+3W8F[9j\Y~H|R㫪j@HCɞH4 ~2/'B!k2~懺lIkkxa9Fu{\zE-r }>Ϳ 2۱yMcuy涊WJrڲ(fw[m9XNsk=l1^-Y.\Gg1(v,[)mг*N2իBo0.U-z(Q Q?$C\?DW-X']4Lhss\yVvd%PWOjG9R=] ;jXٻx%Hg tg|x(x_>544۷@@u P`Hc.mXK}}#ZщֳD`mKs"E]X9A&& MF+d1M=<FqI\xc([!j[ rmj%łKN9IL^73z8TPŪuvvtMױJsȮeirL"{3܎Iěҡtqkf-=cdKiJp>+fx ٜO˨m@4Vkxͪ&f#`u/e~؈hաKhچ׎$e:КpKCVm;YzWg{+'l;N5B?+&@tjS ꒾JN5TtR(1+Q,ZGSm[ɒ3|M͙;GᨙIs݆Hshe;sv~{-`l])tǺ ;fYУƉ?PPZ’pə3Ғ _K\oG#^[,׉/UY=6Dj#  SZ< Ha H]a]-L]C$B-IXm)Rt?\,dj[NMԢ󅦮\<ą9gZٞc1 HذOQ+W`cM"t׶1a9u;u7jrh(<Kӿϕ7=%sǪ~j8)A#k˙6'D>糛=)ݎ; TЃ+ -g,p)O`8+ʝl=\Cmb8̆~EhnώEcB^.1_tJ("2Zs ~TS.ՊثKfhd4W]껝>Ji]t<,|5ܟ2{rآ9`‚¢Ah.̚:˫gq! 8H@See%jc(R[S[_WSF >X}JQe|[=|CMUiQH+x^&$f0>!H1UrtXJ;W0[Ar.,1 #9RJDKAACC7pj MBSTh*F%leR M[hQ%mMgC 떡u Wm5K(5m XQ55P_Яu,k8mZ2βBZM*4 MBSTh#B JҏaekblՇY-汤%F 霂AQ'i5Ji[Yq#*"֩V>Ԑ@ا b.3>-RQF `ЀF%Fc/T{r=E oSB:(FuX;x9 V.՘.dΕ^Р֐X!<|E "l8,Py :HBSоyؖ3B?1]ôGBo8 @m|-x6E|ipF&15<>`` =.T0ܺo_V-PBCx#QASP楅FKFt{7lڸaY롍6܆YH6nM7lZѠ[,)R 1shQEOj( PGMDԈutfE/`ņp0 L*~RzTE5 MBƅsJKK,ѭB?Шd$Ǖ#t,4q`PIKZ26h'b@p2)chX4U&CѧK4&&0IC|ZFJsYʥXko;H$4E)R5 ~Oh۟HB?1W=(q!4|u KH 'q ^Y3 {H1 KteemD@HIReLGc?4A6b¥f;vA^G}H"E6rG穃_5F%r= hBGe%M+\Ȉh33Kx=CPK-jG".ʀP&&3"(#5L018T]Zhg 9+RH\TX8?*-4*x1GB3dgfh -aC= |RK2fT{(pOâ)cO-]! =AōxA!pNGM:F,',>0SA.]Zh"E)?Em_ڏ#ysMS&O 5>4<_}Oּ-ٶ goQξ-{[[۾~^?p )1_Wg]_V>P׏4w*H44mjQcg[]& C[7IM$8I.%XĔԈ5E,9'^d'h%a! zMA ZTqnE] YM>`hn- ’b)RF}\(mw>Pqa8_䈼ٱnݺ'u=wyǽDk֬踄i-['ɷl-_ܦ m D{K_[[qs7s*ݞG|Ĕm۶iӦ͜9s͛1cƼyO);[дǥW,xq ' J"A&a0,G߶e@"#q&`dSFHM*S>gL'*>xN7SJ);Al5Y CU .SN>}…9lX`$X36<4`LJpqK=|8'h"E) %/g4WzWɏs)?Ѯ;ǟI! k[裏Gz64e;}6>1L$>FpH6)P\1DD'G܊kUq?͝4iRk@q]B0Ҁ,y#(W 8^$px˖ʹha&84\iH3CĒ`wv>3\sM7tylogstwFk?Ou Ĝc⒋~wܹs%;[A"E),***Zv- /H޺uxqC&'gHj"G 浕+OO.[БL]X9|hٲȩ|#3'O$MI=%%g؂PjkZtި/~aeVՖy~zԙ{w:kU<d^qs0e&ڴi?ħz*55u>>͞=kDʦȺ'ˮx?ʖWt">⨍F=_^|.n@h$4*ѽDWSFCOZ|֬7p#3eyLR`1lI\FũD-,:1۵kwIy{JJ'":aZR1cgX &Ⱥ8y-7sP{l|Mw1oo[ 1(eӧ=T7q"l(M*O?O?87TGm|4Bcm6iLFBRL0_21Z%4İ2>{-0d>ڵktD{\Ds隶u"&@(qP$&Z"XJZhyeymmee 211acECP6Qb%y9s: %=ӏ<Oio\%u:p|s!'a>@xj"sP߫\({G ?4UgZ^O0l›pɴAM[03gp*ޟ/C mж/l|vX?њs~|ˌw@T:10$'rj3dž7⚭/ƵW_}sTobdd1 Crc" ;ƵNk{CG?K>>լg^:f]7pVobNӮ(q푮z;w\nO[9.]4)dcw[OhmsX 3؏?4:.ԘZx8&|WjD#,=r(?72њ8F"|wKXbݰ^U3S)Ko@Cq(?f )p+̙EJ}(⯲|@zY}mM>Y"C>Zţ[bR^fU4f,\`gO ëɞ-Y[^HPPf*v p8=TFGW xP4#-sg&FMrOg+pꖤ~~O 'uUްe5sM{:5ߐi8=C$/se7M~v3 ?@Ij?a7*9-Aa5o*9" o,|#-DW6_V3Rd J*[he~0w 0,~,jj`y̏+V>wzg}UVh0xT̆S}Sl2<sGdVp.7D|>C<(1sڝ;Cd:DRJVxnbj1GQXe$RR{޽^(g5xhHQQ]2˷`k5D-(xpf ٲWwR\(3utr^Y * lG)ԝ~cxJ~xTֵq-')e;Rϸ=.=mwUO/NO#uuusd<E5ҫ_Շ>k0xqWB}eFvC񹼡qplRD7oGV0R B?Iqmw%TOy[ hIr :6W2.yHEk6go#7w^߲y_?mxi}S;{J <~E_q:%ƂTW- 94];sٌr-d6ΧM)>$WKUC-ԧS h+o#H̔ĸw~:+w^X`Ɓ>ZiR+`O>Q ƞ9{v+؎*5G8' A;UUF JSed &KSccsSU!BrTiUέnӠ_?4e oNO)8!<1Sf$͹sH U 787 ںЛ.$$MGbälGe39R%oP~OVt5E⑒<25t|g4e]JH.+ԵKE+7|k7os]Ӊku_PfϿ~0+9'؉Ll!tˠ. K@[2Pn!~^~΂Il|А~摶짞e?u߭G!lL"2N&-TȷPD0 FAe:dہ`p`O"&Ad6774TUZAnYP_|@_F#NJJd߹ lK4$<ȇyk`(Z`&)ѣ߀{ |Z yf0nXX]b-)m@B0?dQ w_Fٲ"mIh 0@VB-;2N5^w[NN??DnڠWm;u{0tB@+ۖf}0.m}|ev^ϻ9ݽp<Bx(? QNϱ𦤤$^HPU!Rr'PC!FV^^и!?NЛ?2H"/D@Gx\O*grK阔ؼ[nZY-?z=m_0ф(q`BfnIʥfKGrKeVK{ǥܢkdنw9\{d\Z]Mģf:ˮ;~≴Tݦļ̲buVb!(Iz愆r.*Zc]ʏ?8ldXϮMѮZ$4h5gYҏ ޹C$&F"#r@+t DZ%lMViѤzep)Je&ff)ME%% Jϖs'LсO:pN1ۃ)MX_hە%%GRg$G9 BzαtիkedBo_^䥋 Ӊk3)q Ri8˽;\e5ZL8"9z\s{\VZ^dݖWoYyeF \,:'vON˅GƵP =veؼӎJWCA#BiC+\[Ӂ֢7¡oz˩kw$O}-i!ޠ,1.F?J9V\\Krr. gjVOӳG#8\~ݘ @8~֞YQ@R5f4-_ie$fxnX i&/fM~~LU7ՁH|o,4[*Jd?5B[+Hj(>I]"5<~wb.c`LC:-OMTVVc|nl(_ۥ$W;T ϣ Dkz͟Vd=.tL,'iLF$ N'N\kll,(*ٔV hZ.Z/L#lZvjbґ+ל,/g5-̭ui!s{N ݲ5GRC?zqY^";虆Ү#phm3^u?$)>BeK'~Qx NUK~?Y"͟7X*+/=> D .2 .$#1fy"0d6UUV4Dѯ052.+ơ>xr"1cs=ۨ9‹pxϿ=MrMΣ/'b56$p?^:-<yLv-\> #rFOvB/%q{Ըv便q-Poi"SVZN\[V;kyRF&4>V\sʮZZ*G݅q,X47vy܊5ZD3gۂBgH>>)do>%>? g Fթ7rBBdóf\5v}՞? ln j=/ԯ~h<_LIn'pXt9:m>]- _8S<7t9(t;9qOG&dP#-nLX]_hnm%rA+;죏8ЀL$ԗF@)<Gv4av w O,@?7UC12*x};C@#tPTHA,kPh 7R5 hl{G)ccCCä kń{(I'>HT%&O޶m;?^vP- P:h7 S)i5I/ffwx1/ͼx򴶶^̼cDLntN\[U?KJz/k%5ñvk&C͙STLZȎB]zΜtUXJx&t>x /iEQ N/Tld qԞN9mN:aW-(^LkD+ٳ,EG#1SrN:1<|J_)^_AFf@- D*&Fs@'b^.*A)gΓ] )}el"( ?^ўu xWuvv"Y29p<)5Rٹs!tI'Al1T6b?t2C]vѣz!`2UM#pH*19$S^^Ə8_5 יV7΢i|Eӛ9 S9jvZVi䌜^elہMӉkkG\r<3;-;ĩv1)dxR;nD*$A^АF~h7EG#\~FcppT9 G) 9Cz`h` T<4{9b؊uHEvڢ +ԑ?92XF%x^/G80!(aׅ X8lb"$+Pp=k~\[_7)k,©ՊR4>uޙf\UW*OV%g[/z؝nv6ﮖ~wnYC6X=R5zȾmh"4%7˾l将_$r:8w/9W '(A $#Qi4:Q2$ .(HXK 5T1a1F#*:mjC^@P͢EpT9ɉ!Da|iW-fJ"1'n`zaЫb(k; X  ࠜ 9¡"j!@E*TQC: J(J*/rP:UU 2jPA"8ED!=x9 q0_k5ͽ=bSo.`Svb$61G:O &Y2*z^W/Ӡ؏;Ⱥ$ \6U?H0:%imhP*' \D jlh]KڂFDCMr-fJB3>&IVaxBX2lbH0|?dA{^+^'0X:"Hy+5lgZND0.Պj,WR`.t`(z9Nkkkkkkkk\tV2+9cLa[Hb3bj$ fd(5ĘPZBO-4z1F@튥+Ujm(ʩ&5555555ݵILI_ꤓN:餓N:j(fJI'tI'tWC:I'tI'~!tI'tN:I'tI'~!{wVp|Lf6>̢+)A>0RLt[ b6fcRMC&-/-Otwzjo;p)${?-~GC:CRl7n )yRQ'LQ1 3{H"ЃX@(ws.VP(}6k{5{yR־ ֲ=tf- h2oJ)ͭA5Y7jatXΎǖKyrn_ֲ!jV(m8(Y{jlM lzz>C"<~(ѮKɵ@ "\pU,ng⣽'Jy']ln\НZNͧդʚb,6' QH>quƵk,fjjƻ9wdX[j}tDx婀PT,qztf֏XRXƑYg ;(I}q&N3^(֭cg zҳz>sc\C^{?$u=α_R Vڞ!}OW$-g NKV?k;N w9avЮ{!twe7!pCw>pCw>pCw>pCw>pCw>pCw>@ց A.;|!·;|!·;|!·;|!·;|!·;|!·;|:o=Ew>pCw>pCw>pCw>pCw>pCw>pCw>{(`Vw-11@c%0Y ,_(.CC!`{e-608m2޲,Coة!PCJ"= .I|.DzD E CөDzJ UUl~\8C@czV{Z.]5C"˲t:5$z4|>CQBiϷGUZe/KQ1t^cr}(N尯KfIeY14$zvۃ$vu Ű%bbL|ëD6p8ǖ! gQBhlCO 6=Ԍ!/zD1@dz質;PA֡0/@VdC>C>C>C>C>C>C>C>C>C{j@UG$=v[_jh(IENDB`docker-1.10.3/docs/installation/images/ec2-ubuntu.png000066400000000000000000007206611267010174400224560ustar00rootroot00000000000000PNG  IHDR2fZxIDATxxWuw_AW"C"B U.޲]ڭ]RP T ߹st~;a. |#gÝOܙ,N&s;??S駟u]%sFh4FCy@:CFA+T"裏~{|?ev_h4Fh4͉<}t@ P']"6ɴȥU,T;'>Gy;r>#RFh4F9qQ~GףtA:!PɦU4?(6 ևz[<2c=vSO=u,?]Fh4Fh4Gף耦 ~nHG+ wK-V%C4U;e~(LEȲh|,Fh4Fxt="tA:!ݐHW3d48 i2)xh4ҔdaϒȺL"tD"t/%TFYJ[s)BP#uRQM2|h4Fh4xut>ML5E3Hg4tI5y&5ϔlY~ƉFh4FhvMz~V!$tG:$]Rb)|\r999 ;MON8X'c2t_gb?~?t_8s؞ϩ}r>ʧױ:M mܷR2tF#.I4CњFrT(B dff:m .p.QXcmlܜ;..uPӡO1W_gWM'G?y;/̹ܰ}ي8a:j:itH$n`~|7 ABym5+h4Fh4 Hg;!tJ!b_Rǚ&6edd@:MH[ʺ9q<:O\'è} + ur\J"O?tf49w8ކNuvG\GG+6Y:!1&~/Ҕ|ĠtX7nܠA##~juq3wu_<eݴuԑ4s_+n˟c8vx>];:snsbV6:]{.dlJǦ!'#  b0TK|+Q$MW8o1GpkHNNfXӏ-:::C};n{uu_g_f?^#odanêtF#.$k,*ʏ3GȊ8L4X$MEaV{*]XIcqxسشX7 T(Wǡt)p[Gde5d{MPeرOMY{+1c`Np[qއXFh4˺8i[E9kKuU]]gߡ3o =sp(tI:Sl0|P)f@b_gB^FJqnH>[qէ'kN֝: ~\9\^"e⑑c8^7%㼛D?)Qd/) ,#`Kc'"CʩHz$9㜳:L@ H)#v3q\>Vr*\-,,KڶQDe{.*8}9 á_+}O?n?S"//q#rk` ,??/GϗeѺB)8ol/66bbݭfP "X~[7ȤRYFa2|kn6:fY2qxM;C{\י[@ӑ!`cb84N磷L+ 31yi,Db|j:aނKSWXT6OƂ \qe}|fܴv?s*2D>(QQQ̓,P\݈ _n]9}HI:ƺcҜ09OO B(MpyoRe $MAleğx9T z<8h4;r X(.F,R(eE$PI*?s%(++Cyy9eZVZ*cN\_QJTUV //+˸K c2ǺzD3aJLmOUŚ_GELe%(2֩1U0GQPyVb}AbSzJLF8KYJ_fl;R|3k˄ R0|9}ee??g3׋tD cL˨͇Vܾ>Ks= W| o^i}7piQq : 7}O^)Z GF6'W"&ଣP0y9ƌVb?S '! ~}ϷEdDVq90Goq?c``'֭ W߁yݎo^lD넭Ύ?22Ra夾1Ȯ>2۵ ;v][`nc$#q"qqFh(z.'q*&ɷ:i\n}5l{/hNq~⸭e/L.KOnA:%ӠƠ_ `….@\֝1Ww¹Fνx뛈tMG[u>Qp+\/.rs󻟡s9|3J2A+ubã:;ǿS:xNQƏ c~|2WT21O(ϕU岞h)P*TWW&L<]eǬ:444 hG݄:Lm l/JSb7Dؐ MeSR(e DL 5 2LJ'35ըad BʳrRnPn b.0bʗȜOF)ӆ|iɇfJܼ|%1j#ݬ%/a;H.f;nOu;vd/O#b%_? X}+D"@ǎ+?+>6&"B;6P. O@FuxoAܒⱭ8s^t1"Z느7jl?/ D.<<WDz:cs;uB|XQmq_x`|^^K?p䣥zQ](+ECCmQ3iI"k@Uy:'{")5[ۍHvyOJGm}zzz0 ImGݝ7AseiX戒\P83ggAci:Ʀ"yh3k_ 8yUZ\b .݊ 9_g8M:9mSSط 2֕ӏVs~Rg+C_2}8EѣPp 6q IblH^}}P'MMd9iliE 'ĔSʬHef M<Exڊtvv h@{[ZZ̲-2->\׌&m"Ijk)2hrp1%)Rb5_/fddH$BĨFdyiz4I}DFn$Po'wR'+i3j|ȭ)Ќ#G8dR,8'N`KGsAaPΥZS˘yV2 y:O`%+Qg*~l0 "e(92m%l?dR0mF;vu`^:Nqyi/R q+2#XtYxf/؂[bc}HD9?+Ws1w/Wi/N 9)XzsPSZJ .)3ŵKauKpu˱fbŗ/3`˱rYLkC| Qݍ>g،ئ?Mr@zxQκrnj\yH %'E<(axQMYO?VCxBЈf6v]nDNZZMZ~u>ڍiskHT!VF-er$P'uu͉"M5=}2e)EtLyYjɘݍIh1d"v u6PRL Ui0[MA1G B^~J1̹/gӄy5g{Yg+GlUr&ynnnb(Ih}dy[m/^W+%$,3;'WĹTdڈEdux#R'Á̸ujl}[X\86<=x'xW[_-BTd"_r39ݷ?ډ V^qRxKmqg}6zjޒ)b?QuUNر%-խ'^<]8v7Z;ݷ]YW܂  -=8[7eaġ#{n\z۟q۸|fx-̾oևDGe;~F\Vlu/1{ܼ;Eܯ;oo-, 2d'J; sNuy1c֯_m۶aʔ)lrL!v GqK[+W,5,yW]v>*J'ekqEQ._u[+Eqv=\y=iCl.ou-lo2֮]3;opo!!.ڿ`FYoTOWUup$&a8o/?ax{~&.k݁ؽk2_ `ϻ;w`nSJ̜3g^y"I7߾w=;6aьJG;osxx{P_[܆Oc0U Ca];[kq{;7;]y)?c؎7_ykOErK t"*2qoC?//BN\ԿQT؄{_ѾCxy䦠|lۻ/>q:YІ6vbub֓b[c/_eJ ć0N3Q=& 3 {ƣw]Y7v `7pddWk.B[xW/9Μ1!No݂w}օHz3۱{Swz/b,*}_W,Fee Q0BnV1X7xp4ڜ{l[h4FÇZQĮ.I'tsXG19/'#D .>cB`v# m>D϶6AXrT#pQޣ#4G.g٠TŤJ=Z0QR"co{xrCYYFFkolŁzL̎Apa])K%?*xAc2Fd4 ܏#Gcm^ܵ|Z'M(ffn^ЎWƈuu 5Fc^e:+AhL ~i/9LiĆa߅W_} EYm6/]h&D!vuTu OM3N\u8㹯]E^ Wyq_gwdzpy~؏O?gwfVͲ &P$ĸUn5A9rD v~'ϲf44ab ӦV.kEs`r/TP>(Â@)UdYV+t귗}=EMAAQvJjc'aYP*'ֶP5d"~YaR2}O1X757y\2`>٤dq4o!L%Gyklyk侑12eyK,2Ǹ7TRibZ䙿HS)ςUX;\ۄٮlc}}Nu 3ΕJC.("l;Ʃb T~vg`pVXsT0MFGX܆2ztEs?b?G 4Vq4% Wsc"Å1M@c$LhFVZ  Gqu{\PCrR1mΙ"ppSQQAr?5u.F([h#g$ +0{B]wpSDTNęW]ˮE6d!P9GRL_Z4Q1qmĤ&F S.^Wa(Dؘd܋a٨Ȉǘ؉ O\(K1A`n`ݸSc ik %")9 㛚Ek͗#!) Yy\quZF6JP+t[~\f_CjjcHe]^>X>!VOdo"4S.q. O"M./(D-EzSS˧ʚOhqE[ ZM(&IͲ=GH$ZTs\D =|WPL))qh Y/2"?Icm9/e,gɉ䜯OQ0_z ("lmw(C7%ATW|b, )%dAAC=&])b`\F&GG2(+eD‡ރ r-[-GyٶGZ\'175G_cSWȹQTP? PE(Sbi;G<,YS۶p}H̪9 &#Jʣp3ǿWKמO 2Bw)Gxr8'"ۄaԨ1DjQ%6YǷ9!2Ϻs xF!~ȐsT.pQ$8"D0o(DdQ!g'#Gb,G:&!0ˆѾ1QFDJ^hh3ώpK;G`pFvtO=(5B)u%9`SBxp~XоL>sQ~\J>A˯+nγyYW5y6|~DO?t~7]YCP|/ľĀrWZRj CҲRE0IqPϑuۢz?-fRc=׉@XhG)8/?`Y]VQqi"`JG,y1e*)+Ksr|0_{9g)~ctFY y 1Y2ooP*9bgQJVBW,rELɌ;*|/ۧM%cᕶ6Zdp[z sQxlF9no/4/h4~z}{pQr|<49o}=͆=γx~.s0_pSUma8?睦v#{L~k\H91]#G1G>u:p>~/^W~LS ?t߁{|/sq<<>7ŵ9; Ft=il^\򼮜1"X5v\á}L+V"?O?tzl-sz929p.A\cSx{ŵ PO{qGGYϡþY9Q)<,ߠh4Fh41 g.҅Iޅq CaN:6"23,3(K&e[ RUXj{uyUI'jgAAAAAAQ`   AAAD)   AAAD)   SAAAx(0AAAA   (0AAAQ`   1D)   S8܋.1ti,"T_A濆Vϱ>J`lUUJe1E*Y_`$Ż !y#eXZebr]2H xTNœ&$-\BIR,ͱssslRC1y]f'&e(ο]5ֶ6ۍUAdxVL!"+ 49^lI#s?4d)E$zDsDbp/yv4ShR$h -#_2YZ^fiq26 #e)O6/%t-s1f&!koNX^['qT'X^`jr]r2@%&^Pre|l099{W9K_X_#d{{yn0?un B:gao~qRJp}ů;>S~_X`+Z W|UB-f$;R&˒/S.Idi R;/v,Wo'K|?s!iʤ;~=OS\v }h%~~LԿ̃ӊ\~z=$ ekyZ4Lۙ f$Rt:CIѸ#sYdHKeo*mhk咲,S*I}5ʯ# jYb3 ʥb嵻%*U LL1d{)ӓ_䥑@mJI*!JH"J7ڞ%ɡjrf&&CQަq R`axjQtF&vb.C&G@T9ҙ6ɷ4z]"vw c:GJFDOp U)#KVd*c0}+|o|J.~Sr»hbߏgtaz#S/`6hK%YemRb݉kkGw7*cDJ$( e:q&ʭP m6@h}54tEƫ$+9rcu|R4Z=JUUA$;qBwW]<~0o676`fjI22@5/+}VIU$3[W*ׂjrU~^@SeR ScB)ަ2LlH\3g}_!/}~h(F.s?7~BΧ܎SHѷ<1r%s)|)v32X{?_~k:$aӷD/] <8)dW3ۇi CϟyUEw (}fnQ=&D.nDo ](8Zn27̒J]1f1cs.r5l`42ENWj¬74O&y@͊e^}!Dv7qY0]ċ9:zX 8;ȗJlh41LGiwY1+Th4}\]\tV!}`@ z#6 tdl]&7\j)kN&Jn]-}R Y*Ye6tDdc }cc#8m^MtL._HG`Υz݁(+a4pw^k5a4ߤ̝ Vc=}:Zp؝韉P9Ɂd1}F8V,V3zuўvl&LC<>1bmlFz#[*Mt&ѤKFZs@)s@ɎդE*}FۈCJ:ѣGH> kx'_?cj#L7L`)Ӄ`a~ r[K$!^l7Kkޅ Vy.PyR.!t1vFlŀh2BF*s5^qSc<-Q%1Lp唓 L&V{;<+#]XV&B;8Oz6czfLF31m8zQҒF71A6c26uWN1k .UT eՂYrI<]M|xztV򶽭H$DenN6fkG u]K n8J_Wus[Qx us:.r6&fs+xV@T˽i78\d|cJi ,0Xp}@C,Pbh5C ?Xsلŕ9=FαE$y-eX,fdo02zʃ̃B!Olk:cqMS<4xVb-UG.F)yDgg7s㽌]>+װE6]>FNUb"S`Qȥ1k)V`1+E+_#DZ> g=gbv{|򢝵v~'czr_Ipnc~3/g{C}8~7 ~s~ɇןou\wQ`j*R!R2I}eLsw.;w5&OϘ03ˆBggX /mm=i,lo_bak ؞++'FzW"/rnhbeWcnkɉ[ sdph(W,tclvSSbs 3bmp]Gp7. i|z6bih^fH}ӋsFZlFgi759$j\A%V| rϫz Q%:D1Xocx(J7:k;Ijײ"rAN 117Y;8m;ΰc]=\Y PHDU`8S[^xLhw1."^YYڹ$seoC l⳾%p:%stO2詧ypy^'rSࡕRx=ͬmo`J\nN3?XĩkC$~IώBSsF|=YǺ14`=U^&<Տep]htn6?}j\73Zǡ3q+602aeOSiw2>ADF)YzM5̇..q@6i31qItgL o>V5&73bѹeӃ7pmcԿx6>s s zv蟽`|%DGrFlw =O {܄x!#-V<6a.O4ъg1'z\ r|uCl^C'9ptfmH4>3qތb>xb#ZPO_pl6qZhVh4ڈD{^97884UJglŭo yԼl$=dՌ?h`nˍ!̞Q&}Nni5`k]]},կ;%>;#uA3Q|iFl:%]VS%Y&6K+/,s6-5c>K'~?K6_~)&(e_s>x~_~KSq|{?9)^~F.e Hsڬf^C>]s qƆl.awO7AzSlI+3{)MSXaݎe{mϿSwqx#U`z LdєF3;gLxhr0X _o3&۠aBg,_@M+o3<;=HYPI 6Z470zɛл7m3 z,fX=C4N֦ySf>' 6β<4)wC<&fcZ)Sr x2~V?)IEUe:Mq$eBlzF#j2ZyN`iFNgCX8B)R+ M^&W`n2{8(+4\sᰙz:Z]JGx-4ZnȚRJs٦[Z^Yxh9?_ccE-2au8,< 7cXTQrۃie0tUIL.;I^]rgs9Csu9.RB(t?W0l#s=gӊRaieN+wbN}?Z`΅,;y| VhsWєkz'lϴ38Edt ٘na2=`efd݅C_o s4-GoafRV).X xXU^|J"O>qS_OBqES<"0<`{_DlƬ&fs,'hafRASe:F7h6rp.CQ fg7|̭0zD"4P3vV&y%V&N=jג9{3{WwtڼDdzMF%,L 5 V'c~]A]4rQ)/ o3;|JC'6+M41s4cT+ F#ƻpXɥGX hS$+|w8:3Nt24CBZT3E\GZ\\44ژ\\mi& .x: 3q4Y?͑"Fk~#F&ix'8?'x /[E_ [GtW?1bGaփ~kN:.y?s'h1bm&I*[U"eZޣ~/3@.YSgm#`=el^{U8ZzgWi0t@&W_Kp/=įu6Mc]-1nN6prQ#=K{x,uLf@n28P$%]#;,8[0=A]jZX],4:l]A8]-,riFW*=D_:]YEcSGgt1tW-5 -FHhqt]%2xu=$:-s.RH3piLo:Ȗ!w4CM|=JZqb|It4F\cLG⠦=k.JrXF*&N]4 >P6f#l{ybOsW|;(1&}qj_9ILlSK#|짋<FLU ́SҗGx3A7L5d~MJ 6]J9]=zsrOS03ih17hid,I<˧"+<o[m #Xcˣ/n gvQxXZ^cWO _$ȿU`^\;c-Vy3#x_>ej<|if'lNu02}gf!7 l,`r CW4.p6w"u5l]dGp:Ne*[?0xp&9O"#k9gVR$:,], diF}zA Z Lo2I=@`m^=]:8 Ͽa7ӏ:뱼[g8_?GQ.]y5'D!z14b(tFf_+c~!Zڂt-k ЊVkzuDD6~_IV}tl7Xkk]KcyJ QO4Ml5FUj_6{!U#EމРgX<"k&PU5B<{:m[/tyVⲨ37Eu7Gl^r˗F63l=Ae9x*O>5r!/~}UCPO^ Z6yG|7|3xkrs}O_?*ȫG|]cf,/>x>uYKhnviPL2e$\duFӁAgcu9ںV v*۬9J\Lb3Y,C\pqmVvή,`4hrXi_ARyM&u،1wz^%Ljݣ0]V#M7VS9z;ۘlv{i|Iz;|@ xձ&F#k$p̸\. o:5X3NVU  O8C+nkubq8OZ^!sɆncL`?2\ we|=QܮvRZVb<3^M |9'gt-N_w!d_W$F g ]K.}:[k(-^?{af+nwg'' 3].-08^u/inx&aŊaǠw1wEt&_o\Y}'-K`Ӭ$딩: lER54@ᴸيFw506:J*q@Ex'QRI;m\MvN06эCX,{.wi8::Wj^:0rp~)7V'1ٝ4Y.T'08&<h ߣYۻ&ԏ`jpu݅!MNz>x㽬mYaf (1hO f^yD<[˰2օfT,۱599S<<365 sZ|4(n? w~"_)ּ&3d^#|,_mc(Edu v|ŧް=يlSopxEK9+8oV4(: ۍldt)Lb Ӆfci :+Mv #ˤx&&^1~XW׹<JQ&.ioI5ޔ6U6]@%&,&h@ G:wDtъɆk gk=md~s=+UAR"PVA+9ֲT TeJ E.#+*LkR4%E_U:~E"Y]TSd)n R dI8LS7IN*sKI& \$W_."[yp%Ym5B>GIQyST Jp"]i(׋rlM~J$PeI$Sdr}UWMHUW{# BY(2lT*A&RSJ\26+>eUxLJg*Ǩl)e|ABS$"kl{U`Ckl.Ek{D"Es Bd"A:EVT4U&/&$ϧ,ɤ$Vk"Zu^UWQQicvgnnoI2LeOcTv1OQVc\XIe)׹*K2y;7ϟ7rϽv&7ܐ*Wl%dnTAۼy۶b`Rm,hXױȣ(8Z9ػ&[JZ!LD\u?/ X\W =|:ޞk_+ir\eYչr{\̭me+y),wV\ijXR#[~ qj& }et 9ױT"חb.[wA?4M\`q"['Y63<0HP8&cà yxAYCxP(al0]6b뉑K V# m4CKy?':BbUO9qp/6=נkkh5g{,cpZIY ;񪑱Q$7r+yG re^LQ:K%JZr;k+c/²n6nLR֫D,_zUS 'h,`h ^%fƵ CrZ崏*h5Iq2\ϓMbSQ lw*0ŖH+@(N&x >R&LJ*4ׯ#[s[_d˦\2q"q)---Teo[8Ldn᢭ 8c#6ldǡ_? 7\8M6Fe|7׎q&lI1GX~>2zsmp%q+6= 2?6Z%ckѲ'auwGgck4in -zc2[c51섩 ^K|0GgyǦPj ѡzz{9"6jZ8✾!f$K}K~~z>_@ǓL2KO ?'vŒA1~FFE+-,H$-ꧧ9TZ6* Q.7ص(^ؼ^3y>VzK U+eLN16CLJPNc?4eq~>> B*Y@ҜC诟m0Oco[v <[ֳuT1/rOo S(xݘtcȿc0?@0E8fчF>SsrJ>fbNZS_312DwwCb|?a0w1H_TB_O7}L-}L^~zrLQ?LLͲc/N1$2:ªA#89{@.kgp!̬ZdxaY}]ضq q%,`~ ֯[ -Dj 9}=ogUs:λ&^qp+Ѳk|WI~2^ApHa!x90څNr$|R?zGX,u{J`s Ζf&סѝ7&ߙGsa%C{JSX\<ϽGqx~]'^^+p bp׏nuGzW4_Ή׈IL ]5sY B\9KD^yF8ӂǎbwDse,a'sqYΞ95X5z'I]=hCgq;29Mwѻro~X]_w="?6g 83& ˵ xZ-a,amka4?Ll.;I?Ύ-$ky## v^QtO/|66l޲Pl.qXWglh΋at}>/]%"׏C8qҌG \9ȉc3tatuj1/]<Ó_/:9ٮBΜr2@"G8gANa)|8rNw'N#$1 t?qw9}\]NBX39_j"7.??āaIloYSݿXQ>бS8=)Gg뺍dC \Z?fʇ̮vJ G8x,g\1[^hA:eF`tbsVț% ,Xc5XCA1*T-p&p%䖎>|lcGXc 9C_ O\%*0KN,Q&H<~g0KD#?_YOz1o0Ȇ/1/0fnIh^9Y)8[|lކ]`lvʶm8oOjf6a~֢Vwpe2Ɉ]Bw}? Ŧ [ѳ ]]pBϞYVܳ;MgI+s לxmtJ-:|Bs| b0U{q Gu9:[]U[o #b(ysC-Z̐y UQl߸FDO wLi5Z$viˋ~Ƅ YlZ;Ht}˘ \u m̖].rmvaϺK潗(Z`&U7>NS6aDH@pN%QLxh_wVkk3Q-kx7ùQăQjHuKœLp#a&)3k)&*an::1ѵ1M邂yXP(H%f %+6)4Tp!f+1H0{A9_k\(gD8%4r~✳bɭ Ǿx +tp)]\3?˲B05$lٺ=s34j[8r]@\|<1 x;a>*,l)F 07h-j J͏i/u\Dsm2qgDn_}?nc߂bˇ}Θ1)+hzm1|HCaJ]J]C>6DU'lذ'ÔldQbcNlF %Owt'wg)Ǖe~ZH_wԾ.##%`?g-" sl7$&IپleICUBaiUUU?OhǁkԎ.|?bwm%+yf%=KEa{$<aJm]e.\`/p{ߡAv^kjeͅ0修[sw#]S`TFi~;hHDE6B_sI4mu`ГjXc5X%5c*4HſT*-3jkxm^APhkj8_3fF#N!k\TP|CY;ԪOrsgD ,V(?yN O ;GۇN] UJ2zȏ&a=[4, Yle&?ӧ8u'qcX=(o$qU,/o [.0. #ҷo"B $S5=U-3`U {9xÖ%V!}Åxާ8ދM]j:dTt-\qRW/bѓgiہ^ep|_عNFq\+,=ʅ1pE.^K9,{n`?̳[c%W_xkʦشu/Ǐ䉓8~-a65?"z E[_Z^g Ӻcgshf Yh)nL-C/47e .pQl܄g+ = WjtX埿'e_QKIci,Mc !Gi$ od]=~R;y$G 雟7~sZ6E)AJ>bS0+ëxIyN>ȦtS=}cw5vc]s slڲG\N~;S=x TT0g==3MDXaolݶc'OrRa߄VQ @`)⠂HgȆ8_2i0{kkZSB){O^y6 ֫Z\e0cϯv --M4&74&׍u wq%&d.cq.ێ_7Yuflgad˴Sާ{je+4H NoMX ]شnZXz& fyJ]'Gi"-!k&3nZZEEEQ5,f79w#e Cw?SSm؞ϮKv `G19s',{QDEEuooC&Wig?̃l1];8|&Ohh`rjxOK6n=`ljuc&\ip}!g\M@jI'4s,M:Ǒx۞[]h)b0&ur;î}GqOջFy_.~Eg%=VP /yi#⻟34̆iLN}*jinji=R^VIڿ`&`|z:a&R{?~DVLKUDU]s<|747^^5F̥?E3l9gM2*IVBq/NQm8ld=_ NF mi8O#ؾa+s[ l)yXc5Xc5 x+c%%"|ǭe+`sG1l7샞"UQ?~U^ZS_8g͈Dr9}=;2*SSd|had E,Yp|{\}AGԋ~l|k0-c@#%2kaKWY"YV1R}5 {JVS-嚹M U*ݳhM\{30P-ćqX fxg()˦ݤG\F2ZuՑ%66+fa |X=r,wc_a`ꞛ˜ĕ$S9kX8ʣ)@γ'vo݈YP:3ZVM5cvf;_etcp_iڶ [waϔDFU79#3ޡot*RM՞#FP!??9s-I#Zm /gU}|1_6+Ȥ}`=e1gFaDz,1>at̡M؏s0sZ2b 6o,/Q_-LD#=A;ۅ4|>(kldY6P8:cG/.fT+@ >u5em(5|| ZqV^uwthΛ)bw3&(С^q6n?c]ƍ lgvl0 X^;1_5Xc5Xc`&g&liue/_RΞA$K24Z"_.hJ&Gikj]M L!Wh.}HyP7{WKGRZ~\BoW'S)P-1Cg0RoLVtaVV ^UٷzZ%#} Mae.FgVWʘ飹E^RU8?Iׇ16;ĒT2Fm6>S@[G7s4ڕsDڨ}},,E?L ܈ ?0!ڜ֮QTRD &dR9G-Y4o9C}=tqL-~Ӗ`}c,(},|"&&Y&GX\}-[/ǪE.ȥ vR_1vhbZs|gOή0],)|1IU11#\ <}}}b~2d gHUL ?7-+Aw}S3s(5RhIt_^_{r~iU=N7Ig'cV7Vj^kE{&$t։:ia~IP[Uo388@LJVڻ1ѣCef'Fhs^ W<1,O7# 58`0nQkWd5Xc5X3+h4jT,CRcъU*j*"]6~\w9*J?J??htsU*b*1v14mkc_'q_9Gq\=/u܈?2w|M5.1o_=V9*]|:I dLŘ>e%Z]=y+y8Ɋt1z#]XIv<= M,axr>rٻe=mﲠ^o/&o._O^ȼWbuv7">kkƚGڸckYεcص(._3nJ'_J6EXf\ߛіٳχXְkk`ΏvLkjַ<"OO>8Rر5d0ߛRR kk`j5j295Xc?oQ!.D"aq Z \L^F&Se5Xc5X?\c5Xc5Xc5Xc`k׷Xc5X3j5)}ߋdJ*h.eVX%!^eN !CͲqZ,]ݓRVdjt 8wlhBwd=Uo;KOy1S|fQain%Hi}Iʅ! 3(iabbe# IzEzɈD?!DU 3ǛSL4=y6Zɱa*Oެ߿3]kcfjUȵ4ҹq&gmyś>S@SU9Ӳ^yJUU9_U/lOE-pj']~*{g3PRE_.*_OP9_# 룦CEci> UL7?gpBMJy^֊ek^S9d7,M3*6 FMsݔ. }MJXi@P3Eu_qS}VdI^>I#"$4MOc`J'Z9W<iy>g˖}}܁ z=aHø/ԧcqr+^r+Z"9rXj;%d{ڐZ3Pg8ŗ^0&X1SˌT?BCAA;A)ohʼ;>|ǓG&u];yJZ ەWr$2<2;+hƈnRrGNwghKd&$#4wyR*#D[ L6 fII6{j~!GۻLPME5q6DUaͪg?m~|9"N#RXm[ZeZ ΗK,GE|4OA1^᝜4؂ I@+DR9m֋bnV%g1+Ü?W&c1AfxJwQ:f,VƎ-[Oyr&2ضː)i a 3,ysX3 9a;p&^U]Bz{`kJnE7ZVnr fV0$$Kgu;O]`)/\j!. %*p6季hXAav>K1I%{D.yHUj>{7  Ol],lI'?5 ;K |ʄL˱rcxYD>~9#PR7{;yػ* (1b~xD5Ff~`zGќ`\?vTV#Zm1 Q]'!"AEJRi".TD|a+br7##*[gW;]Sm77D{I,eQy[َJ41!8sD/#-f Ka0&P7|_lm JkJr࿮'*A:ý;Xp^Cj{ ʞP,+iwwvэFi"mMyvԗ=#'; 7/޶t<=J;tO@#8c\ApP{Ld<{":K$:&FMq}\}E b*-jŜPD5 ŵU(]LtkجRDߒnI.%=3y?i"I4vR8Y>N܂bS4~I|_1g.?8g;Q; Y&0XWDLKZeuɮBCXk`Ne(&H/S>P $i tHT=v͡N~E9I {+Za}Em`p ٯ:x}Q`2ԬK%ңqwIU?.{F0G1~Qk`C"ֶNdY,0XYYDdPhٛUfB!|_Xra8E X,LfTTb 2Eƛs8;M_RW‰8y#rLp|`<$"*o?{{_^ZZzG@=DҊX y r_W}cm*Կ^ԸO`N8q/cUE*oXt KGQ,Eݰ r2KYA3K)^^9/QUw~IBl/s vW##9>}L&C8EbHSɶj½D [[E|~\gBuFbdf)/CM$-)I5 2Ln!wJbvt{++knGg2.ӈ_Iu -oTo]PhY0{OꑐSHnm޸ͤtchx'7 s;:(Dkf95L>?W2 }4|`ζU`IJ3$w eIW_`rӆ4.K_2OQf8N_&Ezl-L|Jn}pmlN{jq1m Mmx90:Nz-6^\y]p}S :ȸLeE掃S^܅,~ =3*e|A1&V&%[Z&fq}{cu+?&Ǹr <}3i r sJhUS[kNXJr,"Z>d2u ¸jhNbZ?Ғ1+ 7 -.'SDCM.tT.f7Ȯ|j庾zsRŘl}}͑%+.cs?W+ ,ő\708ұ#'|P+^fWpMIvfsYߒaEXq';c%.x>eІ#Jix^@Yz\w?#I{D?(=V#xFY2nm/LZը%11nFq07 ͩ16E̵T`(r;9-r^!,TRo)}{򊋹몇oJUyA1}ΓN3$E0i~_5NI|\ԩX {yȋ>/&Ӑ|+07#q!fAQPۚT ʣwo~Sz,{ i@_w^%lY}> RMx˖ O{ۖ҃gtɶB .}D8zBq)8<&+1}jZL,7CD OjEʄa c]1Ign '+Sj-q}K(:iCjs㧌~VB=W.z34ħd?tfA oӸM]C1NK&m~dJt V,7c}˔0a$azˊzHIc,Np9i~"S;Oq~7]YI7@˳hKQL{)7M\:\8=an 'QMC3'fsF_Ļ>rqsO30&#v`o:/X.r&xUCx[R9 hyo}ǔLԠM*{ NĹ &"oغS3`NrAn4,R}x 34] Kܳykr219׀_+ ^00o(wN32ÜH+b0W9Zc8mu؉8y .W}|ez99d"u#/WJّ;2P\7 ""̟3(CKPzu'MyV9`c<,CP jd@6"H}Kr򲉿 )eYށ% $rUB#٣)7$ 6}qޢ0n #o--S4CPΞ:DlU%i "_^)5tz5ϠP*O5Ē rfD Q\&Kwn]n L M +G *@6RMzZqjHq4+BJ(v5G]Lߔ|%#e*2e,zdV/Wv3oseB#r7ė#/Sn/|xșp &1=QY#"( u-x=ք!jVDeN ׮#i <|A{ݜ19{jd}ܺhJӄHN:_h$zWꞅ˔TB.jlS-3zΔ>✑ 8w czFXO-b4o᢯a]$q…֖ba@J09SuG/,!In>p20샒EM(XD=& ܾϓ1wn Iؗv2'" pLy;&Icާs؂5O6 AD` W7W2GGw?ݍ;;B]]GӸj{2(E1'L8zX@Z*x8yDOJz&w YX*L݈qs4'VjJ4e]dEZ^ bwˇVV7 -_"0Kc8w4.ޡDz1:qp&qn~iXˇWԤ#ԘKMGz%brՏ|0E;\@Uk2m9\4gu(f?#ŏl<.~B#ߊW|Aڀ-{?/jrE.>̵U3˵k17rBAD Zp OU*YZ'hZ.30;_㓎1-!ʈ!]m/MGI?Ed G]!z<'5#w!6I|7@061̲`}ҾKs 3#]<sxu.C9&m~xRu$caNt#u:GI@.^8E&+!4-܈VdGb&>ob|z`  hz80:׋٭}ZJ I{ɷ 7rj>ry jԍoH+Yd8+sk^`cCfK[bNIyY&t4݈Q;ވ[QfKD;{^bbG.X>-]S勱U"]o9w4Dq>~n;ӵܪP=/RD|Ù]wzi{SjKn8(iae^$y bIޓ\,.F> h  9Eq&,j5O<,,[KYdQAF}~ a`vake6KT|@f>7)AǹqFd?}PΆз%Jh"T=-&Q ;.\7\:sȧTLCԅ0߼3r+N1)ޜ^q񽍣Mxl }(ix+5zVwXү8,*m,|Y`ѷZ2 4 ɩd4kqS4ؙR6n**HZ2+:_c w{J-⁽)1U|(K}ÁTl=putiY:@ ؏S;HP.\#9/,򼮇/TcOtR+&4O О珸j獎_0/xH'XÕ`6l-fȜ9TRMXk*ReaKT\6lТpKS^+a7ؐg叱1 `vX'xM{Aᦡj:6==OK;E:5% XN^Ӎ+7;.(&& Ýt!Y)I6=BԂw x`6b*je@AՍqǭ焱fFՠZ)}DBh%!#gz 񦴘w#>]e<ԆK|l:#(̋Ѩ?6A) lڬ=I|FOfI߄*ZҸnj'LCK&Ds}.:$|`;z%-jkafrKt,ȗ|BIf)u7qUў|k05 Vւ[Ƅ'az*ggǃriye 3Bq0u NM^VٙiDDsC1t/]_&qĴ,2d{5+˖rmĚ}07S1щ =HrBpW}|3i«u+ȈUOwk ֧{!Zߑ.r5 F4Lv=媉0sEA 41U?N#ÅFV>%Rxs8]B.WI$$7P) f7s?\t,,Nͧ$+Lߋb, |RHƩ~KA#n٣gC~^{Os#.*^}Nb|sLM$.5 j#[S#_?8o %tP)bpJ7)uS<}r™If5 Wa¦964tt77mn񠰘(WRѪ= SKdR&;?5Zs_$):4_5#C,;'1qdFbdh- f2F,D[ls\q}c*-|9nxყOݤnHt[ߺN| Z(X\B@F-VB¢S,#I&ӈxU 7ox2'+Sót#3Qډz,oZV 7ԫKx~wKF$V* s p?GTe6iIĥi6EzuGm^0d$a`3cSZl٢:?UM ;8{W,Bu+dR| NB`abLeE.7,lhi ]eRqh>m0r=KU7 0RK|YGGKimo&ٌwiz9SxXS9HIz0>tԔ~:bKCS=mpđ._%M+/9{/yAT^9ӏR8z٘3ܼBhF߲utGy&L.[Z ~y%n?P< '7::R`R_dhKa=il#1߸zr5'pk-x%ȟ=/c/H:p!Ϫ?PUCGH?(;1EC\p]ĥ0 H@k.D3Y@ t6%`똸'IA8]k^ˊQlb)(4aC_Gabr/ޖceL|)q^EnIh1!<*k[qև]%KIr9񶹛E{Qm.:ƈ4d{^H4cؚ [SSU OےT{PG!wowf1Lhf#=o1p Z+py&E idcser>7=3aT<ЎcG{{+9ސ74WfpŅi\62]̒Z0bgnaZbKJ-{8GZ̵nv h &*T(T+Z^%zOEV(+Edtk&׿/]qWx9Tw%*%M@[w0XF6뗯SPl읩&)ߣ>ʼn[B@Ók`' 1#r< y,YK#O[F\%ZR]Us]%0sm Me\>tg/c}^o[szƈ&Hnܰ goM{C.~ tftWH&gf`jxsT^ W(ioUaR"OI4¯K 5|K'9Km/ZPL8WwOg~iE_M =U?kH50VS8\>FXz xi"]AbQ5Jqys/_$${cv?"[±=Lj/[/P܂V̵8c -+ rˉϱ&ix-iWju(AЋݍ38O[]13PLv(c2 _Ucr Oimz<Ky_k#4a[NsI-\:I !<& Z}w#ݢu{@/n}A{i i XW-O6Bi1g |%5:s=nl> #x3V=K;qO`RFIͨu20B=!3')*{'W\xB< Cͥ8M`'yS?2 ~8cAīH]qu^z!#-&08M,vDZnfƑ*e2cB)mEBxsC&K;c$8:8JB[VP~14Td@ĥpzg䔥Dbk"C=3|AHS)68y{9R>MJnvV<ʯFښwɊG@u=~#..fVZIr8:q?@1汶9L3[[p$*9""ie:I%M9 mrJ"DxxiEsgFJ bm7T Vf;sNmloPAe^,|5 .-z|ܝqtr5y䣵 cT $_WTG`SPg3Hݲ[щWmBg*:rpqّ{N u?@ͻ؉<=|mzyK0MI_oX&~/>L1=NRm<< Sj?R~MZ@S L% -r1tw+} >;pXϐ)OMוtWŔ} E1.~F2*ln}UOD⎟6μmE,6n^xbJ 蝿I@BoHd p?:B#S(Jgbx+~wnacM@t*op&)sbX9 :R!ȓP*hʉhw5-oZ Es770}S=!< ^7KG PIȍiuf FWgHDKm,pv`д3%u ~f=#0Nbqv]o6 +Wss"3Rƅ[Vz t&âd_X8{*(Y'9>dE<]ݜwKc4u SL5Lݜpj+L{"3-  , B,3c3w30C˹VQy~ۉ rq=A4(ݗs9{fgW/C3̓0nDҷE;Z:L 1>"xsi+hȌڛ)Z3pv&${%RF>1dyv綯΁wt׾s?&u9ّ<>"0ލB":&>PhRLlpu%=r3u5QԂo0LL*(A[">׃7@P3YNzs'$9~_2vUiwB>{Pq"O:kᶗ .N8y{bA0D*ІHcbr'j"tO6Aj>Td`ckw"KѰ`#_r}3LVlKo,IJndUj,qa;TT&YMWWGq,(3JVPQGV饹,5Z`s; =5٘8gh^ Z5}u%:Z&jNh5E%,nƧn+gG'<0$%;'nyK@[Yv7YX[+ε>z=ѭB~4ɘ~яn0ZAXxSZf~Awj/ϱ$]dA*%eqa>E$YR,-HXW6J5ZͲt],UkHǜvS2?gٙy49fh|9][3,^XB25Zd_岜 ?#r"<s*͋E E{yh[4.*ՍSwju-TX\R|8+e>瑈c^ bF27.&~Ϣ8g)TEӄQ)?-9їT&g$"6o2u8%T/^@*b^V by6u?)",(2_q<)Wc\$s_be92旿+̈}>ɒ׸ES"b(R欅m|dQ(+_)jK^Dܴ: Z],5N>krQH <QW_E&^QN3s1G"J1?s]W/Q#q˕ˈskBJ]jNsB_c; Ƣ  ѪcUN|xo sAA)}\0B+;#(4шW4!97/'A%-~L\8i65<͗Ja,2B=nHhId7I<`sӗI>y]KWSsnW3 OT= &:oQ9Ѷ._=Iov },Bis/ƒT."5lI|ʘku,,ku/JsYɕ8eѾx{sYY tXXD,k 7 ZK5,KA>[żukНR5 ,}8n^j }j^paI{N>J\Ts0s^M+7J6g?G%qW4Y}$Z K{V\%~<ԓ~ `%ҥ):9k{]0i蛱t5m^fGsuc%g }ҌRkJV.sR~XjGD[q.6_kyeRk[3/MwYVX٣n~|wwZl^"~iV#W=|jDTƵ^ ˾m%J@#ښGP=T=a%.+݋]7ӕj{qQWU\W&VK߳^{XOn7N\s%+HDhW{D;/➷JhXO>kwts>ctOsC p|҂d4|_,# C(2FT3y\qЮ*> k}D6J-/ H52([/Ⱦ'ɄAO0;=iKS6ʘlպ|a]W38>Gz^33 ]Cru[?~K=D}3k|9^czhm g;{bh_lG#r&&V}pv6/anNiZ\fG|Qh"ٻﯨߟ{ZNNrrN^J]@Q4^QD{PPnAP)LS~ޙQcNNH>|ce}k{_ks/l9&'?W (R>,{Oٓs\F凰si XBg8ʰzT Q_t'9uĔ<&x&g ??ۀ)ţ_I׍5nlS QqxUorqLXS8l=#&>+)i?Tui_؅͡Ki*?>CͿ?iᴿE`ϿS v-KTlsc5[qڭ 1G;jjԑɅQL#fS/*syZxSx8-\;ӷ1I_>tc#<)VGZF¹P_{S[Lُ5ij `Ϻq4=n&:*[zBe.mcc6AoɄTn׿~{sY 37cpX nL<;ɜUO?5 AeIuzTèm37?;& ֍ xF83Zؕ?pYnӳt3gc \yy}p82єosV귯Q;Dcgn[C!B3OETLۨncwkQZi(/-YĄe7s #&cC9;C8p0XG#Xǟ`;yy4pU-̚l3]c:uȑOZXoYfG|B!iTY0w- ,Nft"MSoq:Ahh8_(g/[Ùk TABr IDrvc)?12RFjlQD%oEOL¢ ջ(ht*!%:2^`pf7{wr{JޅԍphBm륫:DF6Ǘq?~2`@ST"<:(8ryEn)ѱ$FO. wBèjBe b'is֩d$Fd6G-6":>IDE!%TÖhbbHNBSSA;L *eKl8!eS>iIYlݴ`2 ֩R;``)H3yDFBM8wN[ٺDMPfع5[S_ { &z8ӻv6\}8]#)"Һ~ Dhh$QQk"+ Cӏwα}k Yk5(2XO\F.;Dd^ M5Ypf̻)=JlL4Q+ƠB!LEud؉~*{(ܗAX1ivhbuWEw 5t=uȚtr1?V63{XؒGE{cW! sc_撙[֬tE7eK·1a"D尿X.{O1AcJbs@/3"-fq.042Lanotah~ c9DzVuu8<%yupeG8G}={RH=Psrh/aHHI!%#+UI'e輹mL^ w.bיtw+.z0='.Q8K{U?LOEhk%5h7y𵜯d'#޿ԍZtju.~OfntH 59znNo "+Vk$TYDţ~{I{#[8Wqζzׇpy[yQo ̻| 9ddz].O'|!]Q?4&s^cuO(ĔS>n'$~=T!$DkjۅYisQ'<(e4{y\q ,᥺8eXf{HZ.0-k,;F#E0E7vGߵy!B_|TIYݞ94Me`6##6 \l$'.1pnPU3{gt q G5\!24mĤa`#gcӀ1TMyghKFNfgWtvT4vqVrŧ"™ 6[H ӈקFe.4-E );Nrqwakv&df,'!o|Ν[y畿/^mPerbh+ԝ–؞EYV:f&h_ǩyYˏg_ QTKyC[b8W #E~{W0<8EDp.sN7c+?W0n2SJg3ÓH:TDž8]zOyɊ_@k30cG6O =$6&]S1[ڒIZ-^ߏo_nX$Oq#'-Ep{}hN ;;`"Ssi8Lj9z/-Y|Y$JƝW`}:mt5qO6Mcgqhv4HTP*UXqէg[r Z8 s O`S!VQ:=I0 s&uEB"3~2Ws:hw2/}VvAxFdR_UK؛EѭJN mS o5#=3Nn#~ <>FcSUXLXǎ $쾍4jeRc̘%$<, .fE!05I?I5/9Frb;zH[[ȉIlO]_閄s&?(9nNL)qґN2Vqq x/#=4ˏJzT5^/]Lsp##1N(MZ `.1Rw$·'9r<#F F\ \,֭\:D7#|&îē2־.Meĩ2xMۙuix6=/{!`pqGŹ ce6&MSQ39l=TLgO/RbȺԋ| _C5%`ׅFڮT4_Mc͛(6̆y2W5^MbY,ĬMˠͽ8 sܮ'="W* 38d. $Zxȋ %e`멫ttvR[Ʃ3t"yc!=uX~#ͭtr4u7 J *N\oԽpwU+;ho+R[UĚTtn7Q_˱$6lǫ6q$]iJgRM$elbb\nY k{=![cB8R?bf)7'b׳?}@Y%Y[i5ױ.is^c[I>\t3q錹X4Xա\*ɭ[|z'lHepnаo\ccfl i*oWŽĮTOE!0)ܳep IA!8QM彬_KI[O݌F n AY3Lͮ$ k+?H#M]&dq&:3Ro36]=F~©bOv07.G&68Wl [xNDQ)_$^]7qat> ϤO_˖?̚N\Lslf~6.EA\I"g6 =5|ӍH{׼QPOH9 sV~3sX:n#DZ |_rU'+!mXA\~\:M'o:{<͏)(zɶkzJԱ+S2)b#ir^>a"G8YM|'Gql[£|_H;re.\DN)[!?6ܸRHX=`_Px62")4# k`m}F/k=顬MKlXsM軮~*^g&bVGs\d#[lmh(Xf\8p ~'c"JbEQNzB!~p(|s~YԼi:犊()L>*&*ऩ#;~ wy2ie)FP؍mN<>7jnrY}y_`h)q{ЏQ4P{狯W@3=7{>˾h._Ds_KMS7>ֆP\z1#Fx:]c[Oaz=DUhB._W蜞RvY}~Y~p<6j/>OC4\"\!B ?}𺕟뭡88⿆fwj!B B!B!^&S!B!)B!BB!B B!B!~$` !B!zyx><ϲKJ!%%%%%%%%%%?`js-+CcnUII}@1fff0?L-ڡO?rIIIIIIIIIIIII\?`N!B!)"sB!BHB!B!SB!BH)B!0%` !B!s B!0%` !B!)B!ejjl>4M eTU:FFF?$`ZN8TsţMSܫmb_q<͹\q=*zgpzT+aA?gb B!DOO,rhiiann{A:::>&h|j6QN4BO26e)nhCj|;7|8s^&06c)/C]444507׵Sԁ]<ߣ{kǩWI~~|A}e KFUiwyذalEgᛩT":8sdAzᛨ<[cyˊU1XG˔aӶ2 B!`X h_`{38r_y8s/:Oup Tn"d}:+SWIXŶ\"""9r1u^N%Xb6-qywJpo'&d 'n6J778` ꃷXK%@Q]kyl?799Bi`Yx(, O&j)FsT!snP*8BM=d,ai˧ӗ7>ğ^y c5r`uAx?rT2WˊϿ yx4ddaٜ›>FIᥛT]/d[f*k?7dmB!Xnt:pCVk L,?CNNO>@i-/Yxo^P{xYN(K9'iƯ{gm8ɊSt]g[&r;UUZr'P{"uY+ص--9yyY$VoELͻB!̎nekkP1::Z ˥y̛M>Ug>y#_md6q`O9Ξg+h2kVUC1Tv2è (< [Hvs/G 54Ͱmk Ik>ؽAFz;zd6JlRv1u 44{qADGP*0\w8B#i0t3Xg:Is7tr;0X~įr}}}ฬؙ ܓm~ k4UgG觼. :ǀWX~YśooGQ^yʞs\:sGN9OLcq}cZ罽;Vhǧ!B!IJѣGʡ!凨=+ykxz?A<#V"M1ZMvzb >y'wm&tz&XpTQau8qX8Bay7=u'HzWe >"-r1m|88+6ln$ϩ)2#dG!շ w9R9 ~6ee28-#W+ؚÅ_OlDŽm̅T={CNdWWײ&i 41`Pl*ecJ*&+iy7?ZE.Bvx06zYK^h*>|MrX4=DVRW0 _U~X[y3.wZ9ؗɟ*G4PΫ+W?W1 B!IJ?"i2c\.]0 [/1Ɍ*P<,D\\8@RA oz H9%4-7ǡ v420rKgXJzGlr*ٰB3N`v:)Ng%cdKB ̘ؓV 4uEWLZO+)'=kK/5{nSQ߽nUoֺtJKKI]7qa^yM^e_ʧtO8Ⱥ۫8xveoGJzگ^! NOdŵ[Mԟȫ+0jٝm>B!XNC2DzC``GljjB>Uu^k!C(2Nwa>8}tݿMGqIEA=WNwW7=}LdŰ!)2ySiq4tY{WQnq9h a΁*:hsdKkEOwCcӸ5\ !D9&23wsQ#!kù3`$}>4i1<CD`ҵG} `&¥K>-s)u3Y'|'^N?D)gkxDp൬[5!T~|éhbM;\xs'ov9~جXOC(e}hɉ|JNxă+ؐEouVg{1(|B!BwX2;[Ő<.ߣS59ql.djfkGO !*Ǯ3XC|ħn &΁#ܜAdebtbSI e_I6cK<7y֑46fpy O 3c}SG^J5=y)̗̌ٞˣ)˶ nNs˧CZJΎ+(N3Wn%.eid;27XbUU@y`VSӆavqF#MU^&I &3Fvō2g>h`rb9 a9B!*_ڥ2́}Ccwyb`S5\v+YfsX K&ܳ]B#Fj6a4>n4bXJiG?;ͬހͩu;؜,0ch2PX-f FnjFvT|̛46˼мXLF\PZ,xT`E?;Ǽ͉LMM-cЋsbbf̯/&?xQhB!PUi~и?ic5M[ܽ !B! !B!)B!BL!B!)S!B!$`JB!B!S!B!IJHҋB!BOL˯@_Բ`0zB!Bb71;ڎhZvv_8!B!$`:N1RR߫fC!BHa~~^’Բ, B!B%L) B!BH)%S!B 0$` !B!$`JIIB!UF" {PÆ3 b->ֳzocctKѻX& RR0B!͏ۼ.~~ͷ[^w{޷q:2[yug;}NAL!B13O~bm-XDogzaM6g!O cB_ϻ nñdC17NߏnC_{ l.93׌Mlgf-CCLLMc0:/&'fkmG^32:.ft2f6S "Ar돑,*/HD"g2T=SS5X8zsUWcgj~ N8ڒ-y:bee%v Zl 5%D.y"{z4@ 163F'd00\#xbh`!F?QFab`(_1&6DhBP~,=n5 p֘9$зv#b3UY"=CLL077] w{H)H\ˎ-7&>,]^f5:,p2Ax7WA4NcÛ,l'eRȷ rF9]̍rb_^FSQz)|;lIǨ?yBJLz/ m<|O^YoodOh_æ` s&PX@(ϵ0)?킹Џtf`tCPio KH+wH}3H_<-O@Ma $ "\?fE, 晜Fa @"f$w;8+~B])F"Ex1&[ʲx%Sc1_1'MuձNnݼÛy!y,Lִ'Y]F(deM GH(dqQ@)B"Rp9;M5PTP~7K <.Z3}5XUL}Yr> },hyrb6V:QUS^\ {Δ359}x؛E;-L5]܈Ѓg()Fn5:%b'ˡGl'aޡގpf<7Υb& [+$:b嶗gؘ|ujNd EYˉ}3s ([P4ĵ8gTUUP2N+PVFa*1) >/DM͔^GߝLtU5HHc32UQ#מQ>GiG%*:<yK_1#raܷ렬ER6˧h)l%x%R`cm\fQ"eEIfzOR3F!H HV. X^]gB1KK"kwQf&[d#&\^w?)\RODOI;WDbSX|ܢg/V0+[iDKkds0?;<@v%fjjKyNdXa{3\b7LqDѽ6Vp'/訽$aefHD4H:SF }tڛg ">2KIsxJz"y3qL* )\| .߮ r+[Y$%jT0X]TU4I e1T@EU#3w鍪{Yg~!j_P$Lb ۭ9u #UM!doomDxm:̖+ɘq8>aI"aQ0KPD3h1A. ۔5I uME0#/>k01`w ^+hBɣAZˏM6dC(UFڣ*>qp1Ee`FG$xA'ntɨ)iz~q^p:ڇmj|!?ő q!{w&v*G?'ad;1erm'PV%NOH܍v|,\0Rr V'DzNk?On% 0}ѡXY)YZ+$ss̜ēt ̲e1BgQVT 1w Ks\8#2n{ð0?a`2"|026+CHXY\2!z{ÄHB4cOD(^>dpho^$^bvYʳ“:]Fp?' =M{Tw_.F{YlRi)%eY wxkcgEvuys N*k`qWsΐUPH|Cݟ+n'+†G9qsn4c8w ;J&l~lIb4rYs-f"n׾>IG.FƸQ7hy83恞?q-|Y07w0 'dvQDO6X۹Sr(kG*fS-՜cnd?/r6X,KVd`gGS1&ȊVn%(8\\caaM|sI/"-YY~i#܂t Do>َ,{H;'}OUVfߥ_؋y&X@K( r r0@Sߔ9dDDI˚ҺZoD1JO@][qF{#@CfC1ƹTuI8ՓP%MQRЧ& sGo:9k Ζf8j#,M = P}X'װRp4Բg;||wb`^Suy^=;8)  ]m<솂I#3n6*:\8԰upcs'*7QQ[q>TTmHZF2x掠sPT$nƋ}=H<{3\"J{a^C-ps&lQNѣ!uQ1:5l#H\AUט&Z8PZ |}15 `ER51 >2ߊ`=>ae_Pe2?GExk;!daaUĹhoT6 g]^@IaMc"/samCH5nF4ʊ5s/ M5[zoW&$oWYgmuH$xi/k)'*.P Cy1hpUQd}A_!h'x+;BuT |l/?V<l$*4OOԳC[7xQ'̛ 0EpBTUQ1"z5LVb%W٩+ c^LS 'i'N'\ҙ=hz`"8M8XÖAOGKrLqaq4]oHR&;PUһ̞ݪgh١GJJ4G䂉h:dVRw7g'<}qwFMM^wpVVlE_Q)x[arz!v;h dNDEl@qPk̼ڇv2qrY1V&D$Eb?oW<ѽ[dN`:Z쓉kw#p6ܽ&.jHq#sV>4,%R_XC̴pc!3mmbS+/-1t[=%e{t;H@֧]E`v[@WMG3datylSPş@O݉Y==/cM.& ;\܋1y%-nՏ$s5)CK"PE3#q1dG,mh"&(А Ɍ/YNde_Սs#)ؼ:V?1k>}83bꮑ@XX(Go,!6ĉL=׮@3&lcyPC^L@lSS} !4"(RR:/b;Q B3YHX]%)ʝ%&8|1Գzu':eS0 l f` o"[e__DG~} -{qJi̝v/txj[&1=Nh,vTP+oX V]Q8.1 ܔnDoZxL0m `xxU#x́0U?!)Ÿ>巏"v{cxu\M D[O@\4q`;X@L/QVp+T>IpB;坴=."1! Ow#y_̦`V -EY<%\4;B_E.%r]` q6x-Aᄅ`COaÉkO)׳P`p FdaAVrbQ7wfJrI4څW碥F[M6.>Lcהo {Nê`Rr6RNw%>gRsm<ꁥq"-wb@$"sj**^W%츒 ΞDDj/kF|~#CE(w4UQW7n]FhP;|1"oAGYԒvib` r̽k\9}}Z=܊ǑB_PUdik*]PSlAdp8(`eos!%0J*4})ihULEAʨP˜_AD+~8HӉYr3_&-2njg.2*hvv>H?|^ۍp%An{zq-y! 8]tgu4VLr\9c|>#]OwƑ+U4U <ַ$cnaqSj@v,[\0-.r`J:T{cÔǟqd2jWR3wUՅ%x}/ z"w>IzآM(5>FE]8zs١oP͍1cjS;ѱpGko OnL/Eַ {vs>'>2w'et[,⦶B{b=^+bŵ맱TW' gCvY+' TD$xW#]bw;O?"c&Zb*v{x=3@c6"߿-< *ne˶mpm'ξWi5n5puk9WǠNmMQqIwCб~hƛ0RVkc- m5'70ew ?R^X0nlW+-|8*qbF`}aZ}I~fCO"+ݯU. 4vQ|֦j1|K,Mok%J飼,2d<=quBCϜ#E)?} 5m0E̞8ŖOaG{[ 1v"q&UCTuhXBw;u:tQQRHq3R$-U COK 'L^CK7BCCqaNү,_(Dڲ 4mwSx]85H.xB^B ɥau11 Ö[BqTa|),kX%+ ϐxndoFJQ5!=J:CY9vّ]b\<9}KbAE}z3TFkT]cJDOSs<)%$u=9p&; 8v ]LjD` qbww'A a$0==ܪ>]wS9﩮 \iwXs>dF~9Բ kρiM.`8uZH"uGn3ۜh~ME0fOJM(㝵ռQL֮㡄Zc<'$c&l e^xUgp^w>Ig0NOb_ Ay?&EkxD 9gW%k-/wyX=ehuS}lD wd׉Zv"av"V0cùZ*i>3Sʌ30: 2.DZ*Zu,ˊh4OFR : PmEgAzKXBZf6xQ&ۏrx*Sdfa7YNbR(j$d)ڳ7Ke$֟j]էY 1nTQRqԔ\Q[Y>6egr@̝?e+V|7g6ҲP>si,y=h!q^adT iޞK8JcVo!O&ՅD.;Dƒt b>u\jojt_ ;5IxN]MZbʛn!1 䚦,d*|0MHN^IkA7M+IZ(K65Lj-J^,w_^iu3S8ThuaF韱MʪE:\ʕ]Pz͖,CYsO̔TZKRV>7t7Fq @<Ǥ$ SI]ydeSwl"+}17-"O?/#sxg)Og,KHMI>ne0;N7D f4MJ6Fc1K3L|Z`_CF"wf˒,?IiAQΆ=B@hm68d/ۏՌ};,Ր4LV{8VXi.$;;{1c]JfV&K9QF$F{%]" =ݾlr׬f\JZ;ؽzZkxf N\BPc0|0YxlVzC&7H)/Wx0B};FE`yѐm0>i67P8067p2O$m}30]=j\yxjNgb0c.Hˍ~4g?5iƭ PSYA[sǎ ^aAW㨾s% i}ך)--H۵K!q}f1rڮrj gNr1.8(e0Oci0xyh4 :['d p5 W0u/&`{Aihwcؚ[#5?d0[;md `x=/H<[cڒ׺чy"(Dbb~@ОC+ԥtC)%L$79b!ڴѹXG#bh`d ,dI]sĤiZt]M0C4<Hm L|7-.!SF[}>#b-pxuH?Xf q!oQvMq _܂6z1lZD=f7(-uV/ m&0bX}2n?A{X|?o y[O3菌/zd`x5r/ygs^_f(:4ȷ˘g'$F}#4-;>ǞGHc17 rq^GEA+iÀ'#~cK{ xΉf=r"zr.n7758gU#v0TӖ;mC"Eݦz{֞j@h_sX^g+⮇e,vC Fۤa sX5`̃B3Z/tP|\a4Çn"`=D[5@Q{oFv:-6E߸[>- 2?=_U<0q=4[^]Fʛz6@e}7KTBa}-}::J9SWw+ZVPGaM,)籩LJq]_${eLrʺ8]u|{ʇ[J=>KR1ɣ+fDbn5E?WJs}=Z\38 ]{jނ3?y諘pw`0_*9нBCF$C>zv4-?}?d}rgx uP({ V̏ϭ䁄 urzK(qSSxdEZ"~cv1?V1Nq<0'"~uv ?(&G|pR ?4_]ijblwvd?=" 2J`* BP(&ƕ^a0']檄oQ7cu[oo6:N)K{XoDdlIc(cd9jɞ{#G9w2`*)P( B US39<5 z,Dd۴h(gˎ5*n7m2/V;x ?2cW;@L%%e0 BP(JJL-(=<^9M$5~yDQO/_s/qs i5JJr| BP( "he|-6_3@Ĵe04}#ϗ`* BPL_Y|줯HIISP( BPt:JJR`* BP( e0)4 FJJr|b3sQUr+=|7P^qh./BP( b"YٮM]` ߏD"7pI+54SsB?n]E^^Ms"ucĞRJ)+cdxoA,K8"'?$qNj`g㧴&}/:1xZ;ZIu! +h.H|ïa(%]QSQg54_P|C)(`h ?_v4YtT!Ӂ7( *Cir9Cdd;"=M}R`c;˗C/lZzs6^!|`#pAl"0-Ӓ,Vȓo~yWJA?*(,!h7|yQtuo1Hgm>m$[ yMwh#1/BHQ6s{R&]CJSLHcp@Q.92'`iږ&c/mgĘhۦ5>'+-&wPQ:|rөZE+6/xz0U:v(c['0C=iYӳ]lPWGZt49}( :ٓ`qwBeTQʉ7PWJ6O~Et~'O̞2՜Dқ{+㽭(̑f'1{[+w(ymxJUnKIM(jON'pET[mW5PFL&B\Jb֙i֩ܨ5hTrzJhCtgSxFZV:1lX(7K&6>)&SnDVj$zyt.Ezno ԛ'PPgn|hRptfW'%X{ COq?>q0HFA~BjJ.R#bP]]GB|,E (bڪ:iRw+J;i$#>dTN|R&H(HI$&.=t{3]dOiB $:gP\mHJv6vVVqvO4\Ǩ)H"65+RL&=CmĒ+@Pk)>/bhjۙ!3\.#K?DŽVDTPLI:J lwWdK*7sƐÛ:k ML"#:(?MJ|4)8+jLO#Ίlx]ՅT# 4̬SO?sd`9OU8*u 搐89) عV eyEES\'I0gTd2WK|l4y]HnǨ1Jwn2qxR;F{>?NJ+*:PbcI2I}]LlX`TKdtfLO|J&{W<1_cP.N,_boloԵ~|\[zˣ-*NEF*m|i$fn;)\i9_+on,'Էu7ħqEwO{O'E)~)`dbxbߎ˵y #mF_͵ʄQ}EEAylSI :=0vwyn^ԔҋҘ0!&*ƾ9 GP?|5+R޲:Dbt,9]-:-TAsJ#Ëb7 ܬ±B);}҆y%M¿gSԖב𜯾f|ig2 IM0t X7\Mbb,9o:LXL$Vʁ6OGz8ˋ-L66Ht'$ 'oNTbJlW] Z+44X|HJ!OHEw1wQ0pvO` C{4DY?/_AїjHrpIԇ<)WfI_hOS{0*ψ|)55b ^$52Q? cRLﱂ$or0>FGo;/+:_3N{^# 4da2KFi)'cƓ..jN4E:/7gDg6)7(J|F #hqz9> tV`O>=$13҈v#0&]ܡ$ԟ "Ȯ|s7gƩL'x5@ev}ƙP`ǾRA[2{tN/ݥxgs}6C j9 8::Mc[ +M`4XLI/$&>9==8:ȟ3щ'A`E|y'c|4uZM4{[;[CWc) QϜ(n"?;&hxYj%do?%b;x6SߴQ֔SA@l2 1Q1M }RTg!^t z.6trudBR8:jcevPmc|Kkg0 Dy{GWU2A)`b2khzH|7kCcUcqbYe@CB#UtK@Q2ÇDž jr©[&epoGr{Nx͝7R5R\(LNwyNba u%= `|eLgG׈~1d}{oZg:'hr#C TźxHdTo H+ w&HrqSfvim};P:yLK_KS>'4+ǿS>@7Fa練5 i|,r~|O K*J"f342v3m_P96E돔^>xD OuOZFəT7Y eyf *`8ˉǾ9G:zYhk郯UX.Wj_(0=K@'>$̓+Z">A xƓ}E9lߘEij?+N0MplZTʛ-Z^Nt.[IJr* a|c0gr#:q7 :ʉ=êX <VF{~?{ϓ - iq}?Nt ~|ɽE,֟GL\iO $vHgώcw:FO?"0%:2Hq.n~D&[χzvwcx W( u{VCrX틇\e5SHgrhhhWĖI8;eڊB^k7yDDR>iU\T7qb{[zbJ).K^=믿T* 1|3Rӈu૗E*J4}""c4?/?{HjV.u`6[%C<++# Hʳ @qyv,m- FܛCb ^ !6 ݝ<z ?=̷҃sZx[ :sq JEѢQ p&GDvj1nDwFL$J."r)O bHH(.Ƌw}4w(taaul_kPl4ꚎXpz15$WAaܶ*İ6TIHj|D&÷_ٲx訉uMΝ/n6L(j$Hb_}L7e\ZoG@B&Ob#%Q^-Kn,Ƕ񞝞d\}Q ׀=FC}JE<"݉3q:ڈO:"dKQ2Ks Xy :^ '%<.Ѽ[-#8Wxʣ!ܪZœ\AӲڒmT:}9Auf tX9lc(;r^kI?^c4ܘmi9җEi/'sZIL鍭d08{L{z1H6ǰyB +.ctvܢ9XşP;1dx0rD> ו|}?%ęIx>+8+`iIGP#=U?LWԏm:_=RV{p-d?G{<q1$OWv.{0{HC68]TyFr'7 ۋڹK3&<!)DFZ5lU"|Ob.]8<_K l6`̉X`&.kQ3IL0*1_B>ã MSG 2h.ȠJmG;VT-27XΫ\HeEUʅ\ #|eU N!Վ jç tyC"1&-%~ \hh ~w+/Tyо%ִМ{~Gkg0cU18;< q./'#ey'W_֏/) u 6`lq290-C{W2GX^ѝɅ}Jh:LDE1{NǛx͍ZmhMTۼ]~S'֦U5ĭNZ<RvYZZbem$Ϝ=QR ($6ƛ1gzq~HX/n66k-U,oHy}md3V̯37NzI3ɞWa'8Ds""ݠ礔73<4JOS ϬY82VƧqLiIw5|>ѨQ\#o.![St N,s(y?nesg5]k8kEbIj&`>jyԞUVfL,PB,ܾR.KILl7y@M+*1U@{Ù^Ygy̌zN J"!/ه&Wf w!i0O>;3wJLpO`x7:p@Lv@X]'-Ď.l^ ImIṬo2VAnaQO`u'>)hd~ O`NEo+kR&.Vndʉ2i]$)IU}llM6M# u1Pc/B>m:t!1Isy$0FK}<3Pt4yRֽ,|R*-u'b혉քٽCki+ 5y-\ln 3$+b Ǡ\p&eof7qH nۻ\|b3+Y}]gԇ1F[K)i&ٖskwq'0K"i`uI%d[:ev"1  =mXp 7Qf.kCFs;~.O0{7F6/B< 5r l^)P(I`¤#AM0N@k/V s5o Ok' ЕGț)4r1R.0~,E`Z%Y0tH"T:T$  j^B,Qb4葚]_p#2KQ\DI7rQrs}5r0"IPlW,1댼G#]LV R&|N!,+~kZ,)W*zF@A hHb[ di%UROh8u݇p/7jziEDN)Y~|-9'S]?J{~(adoC)-edT ۝Hǧ%:tZǧ~'0izjݩh_==JrcW,FZ ; ݼc`(gcb{} Ũ9Oq(BkThG $h[0* ӭxg;<(ʊϾ75P'F '0&ZsVˇft[yyd%`oOGs>~FBV!eyzHI z^xtTw6w6(466Gb+K⻯u(1ERZa%# |=Y,֕a<\H.&%:^ֆI+%9ܖ! y!TÙ:C9jFLQt/4,^5?Cv`PsJ++I U(k'rwm)Οr^_9JQM9q\]8f7_Wl->𠳱//Wz->5DG$;9Cnb>>%\ Q@i-\YVL=mK,!~l?="ܖ#eK9?\#- مdDքxLV)% N%#78|c>3=iVp5ỤY=='s8+.aȀ6s}[t}4ܣ9$#9Fx(+e<~DF^3yěuޣ\ՙbH.co{W'gbH6 Ks\7`㛅 8LǻxN7vH jë3a]:F81um-oumtj<(=}61W[,\r05X#E+;gzfUL== ~p%~N-airA!.J|~ǧX1ĝ11bΓu`}`4_39 {gB˜UF2*ŭCtzj9# 26:ֹ;jVW6ɯfLǘ\C2GfvaI[)}SGIV12:`no`\r 1qs5z0wR_ clv^#g}n\ёqf7/v1Or,R ,m50>< SL.pfyiZBǨazlsiK{K-X~q9Fm}bbl~ +{2ˠهED [fce8bxh@7DX-/c:CiƦQ;120\Jj$,Ng.;F3fWޖ_Fzܪj/dw]/ͱw@S0M_O9fWQi asq@:zp)v<aqx0o478@hD{`P^/*/X8@~u)2# O2?7.{l8B̭Fbqe{mkg7R.7lozpͭV8L-n]' .#Ui8ٜg$bْ~sa6Wi܍5yY4;àӰ][:j#K&̱_s&w1i\FCyy/F69Qp֙X| zk\%'އĚ! ;շ#=K u[,|/Sm܏Z3%вQ)i25JG1KE"r:jZ4UE t76UD@K,ROo.m+iwbG.xc҂zfxLJ C] -b7Wg..syr2ٮ6ZFאIioQt{ c &nouh{e1 幟7b45?>i\L/\׀VX4)[ۦc9~c7BLC51w Ľy$gNhK76\BIKLÑû{0o1^f߅ߟM{ٽww߾oWޠ7yV+}iL4ii rws]h/w`?5?ǎCopK_Q⏵-rLx혍?θxG3/Ew2KbZ%+sXwgִmW(j`\F`= fPFv+ Jt=BOe3΁14PQʏZ>J)L3 >q`jJu7bШc\^\[)s# rn$ՍP*߼-{T.Fte>D$8X7)2 " .I,*uj[[\squ]d?wL*!>I ̎RRRȰw)))~`xlϏ |Zzѩn՜2;;ٯ|­N3_ M`ᯁū_TV&nОe^9 lT;+4h+$ #jGVqJw?u*dwE{3⿎L=2쉊O=["$S=|B1X?yƳ/hfg蛚%Wqx~L|MSIӻ(2AE?Qo>!~6I8;`Bq8WT%Ahlѡ6O#5M 'z ܱ& l qe̎녷v$qRQ 92|prt5TJ)%99boBTV#72rƖVQN3..|ז.wcrr ,V=L?hNOaŠ J HYpׂlRtTSX# 451avnɅ\V+V7ad;鿧E`BWݙi+w"|h$60Ӓ{\!EDW17n^0ڜͣ~]x-^K?y}@VQyGs$Ӱ[+/jXa?' W$G}蝘$-ܝ 2amwD01 [2;;#psGgt,beL; (D_B+mc 47Hs~o\Ase}@ q/=78%1"5}8es R q&"X8yd'[}2hK.D}|xE#\J`~'|WM_|)o'~&I/\˯ ~a3a~jNƧ2~9_Ps-!N],X`  ޝnȍBgg]ȕ\(.٤<%W"{YcYn'/bP`0#﭂Y;!0-Sl D\l,ѡ|4̟۟ӇOxeN)xe+OIcj T4Wc8  k۲ ʍmB_b&'U5SUSPkJxQn+6>1Dz1~AlA ug[N>lKu`ˉTV1 &Z .&;7J:) l"8Sj-Ou֘'_mZ ,X`5Z15)ƑOzc҈iO%+;x2JY=WoS^҃Ib 0-SV1Z͏X:F,A&9% [D|kvh?}} -=5Q:q|􀘢vn*f *:79Zhx[XyIu8CC}D;?ǎL/keMĤW7 <"x/k|Szsq!.a<|LI,|ɨleӵBS9 $xY}wCjH+ig[QCwe36Ђ+J-+ҧ}O\'҃YJ )(,"9؉Gs?'_am}cu_KW_f>Q-G "Kw|ŷ/XXVzGq|0C ~mdvfpw|`Yl%΅pj?^VحOhvCQ_ _M& 51u!:^ F3 z'na2 }߰ۯ'h9F? c_{Gp͡QG#zn ;^ sn5`Vh*w*5GFčD{30^C&-2LRD昵{}7d_m]/&} Lh +?dž[-(ueuUeG"n:5:iJ,d2dJ%+oM抚02 )TzӉM(D^/4F\XA񆭣+OsB&: 7&SٿJqNqZ5c; Na$$M(Yj4dR7\!2ץ,͎DpH09m#ޥ0):;G[£ #췌\Y%$(Rqsfrv2& cDc 'u9;"0 NtTn>-8Owc5c U_ Sï~'_> dFNǛvi~fƄ]\e{yH[q6>tL27` .MvZAPJ9,6QGxz }Ԏn 9f|jz]3\$ՇIsLb F9y)%HTl< ]KhMU(,CzZ U"O(+(Ht'Mc|,QJFћ+ab{Xby7̪ʨ?G T3ܲ:ȍ/!Zm'' -?|;e5S2*&;XǐNbR q1dԱq,b&k-63JsIH&%!4vIϨ.yáԀ&Zfw(R҈1,Rql #ޣW\QM?$! j9)x@vy3""* JXahI &>"chFϯè?$B  iЫh/'?@oBs۾`cUHΖ(h@ :e~QNSi!s"] , L|+%5^r}}X;x#6,puuǧgcw2͍l_,݈f;±D,J߅֋skPO,6۲7 2siwzzfknC,yޥpn#BDYn;+# 2^zWyդ<\o1Ol=:"Ց:ں21x~o`mG4 w=J%OoΪDRa>o[ȻSwJ-33c%Ӈj*][FӻO .Ioo*Uzצ 2>XЪkzST6hzn?\}01wy>E|9!|e8sfz:Zm^wPxh넓z?aeJ[x/&ZÚbrڟ$kL-F߮ >ޝ&Z-ңEFV0ٻ߮mJn Q-32h=wVm+OL&],j۶/He`ݼРT1r6la|x?'q*3k?"wΖt+5k1 խBj-mgzzBGqJ+"ޡ.Ή}& erfQ6IXC >5IfQ|oKBגS B78k:I%$(\*1UG@Ȏɍ '4Vռzddd' o:0qdO>uγw`3.s[uJsњtռ˗}_#9"6<Os?fyxHގí2Vh;f{@os_W9Zx`?_|}ȨhE|} gjVˉ.L, ̝JXh0Cw(3WC3[an,uC+K 83shdG:Rٹh>^ѯkV?"|̛S%)ɅߑOkE{l֐QTIsRK{9*hpV 1gr4Q\?K qpeRt{W jٞj%_o7: 7 ]= ߼}mX ׉xG@bKzfS)ٛO;CU3!EA/IF L-9 )u;Io8Y%& ?Jo$*"@o\LH 6 !VyB&$&5\_/HaC k= _ ʳUbB&"sDҢF2QDtxޞ$ԢiYoUhdw/|}H.r/WOvz{ˈ}&YQ&\zor Ԝ]X\5~o`v < +D .U*ʻ?Ư.y3>3 fnvvbs܊vf{_駿K /i+n`aj7Q! 1&]LOo˯xR9`EcXdx`dk{Ya|0 LnlZq1Ȯ^#?G`T$2RSikw HbÑ9?C6NVߓT9!ޤuSGL$Z!ANL|"xFff&hJ,51߅9o Gvm[*Kggǎ &(e]\h"Ŏep?$0qOhsFwrįh4\SZ0i{<.4 /JfU\@/\ۧ45ab<)hegkhOkzVxTy.YQ;FcA,N~,ooQOJC1,잱Wk;KG]&_>p}|{(q23 rs;~*@s /~B}L23x5oF$n޾'xT1f`w2Slb-7d{QܳlA^{DMy8%"5rAK'w7 paA~FoIP7^$w3>_!ׅʖz^[h u-llʩ4ݓXg&2WW_Zz`(\ #K\i[4e6hsoh{QmӽA\ ! LeqDd;:ٛ LpW;ݢ" xQ~Hg_I6o6jZ υfmdf ^Oo[2@O,זps&kr3Y!ՙ4{(lQu=?}rwqwp 0Myxa06J!rqfIY\*#f3G6ʨu9~{z8rg ~WX qQx@ZUfRVX:F8"+x+zqpHݸX\|gi\]Ùh%0+BBã Ӷ%1V`z l/“F \[Jȫ#}#HL9gc\oq#>!_gc̎&bLxT/;Yz **)cɮd=<LiqMV3xWs>O!S|S 7E" 7(+sfQhgXz#:ٚٗT_⚁Fo8>b)6grᝐ e'ׂw)Ks%0flt Cf)eEc _ZNV~&;G\[a/k@z}!|́"=L"춏ohE?*V/ITw?Y3/zabnHN'p gy_:ˢn.Ě1tWx836Ջ {g\\]@kVv4I6c- L|WV,- WOHdށ!6w:q1kaQPy %WHhN?7-QMe>"m(n6e8}zp70Mίo8f~(?kۉr~BJ%<څiƚHōֆ "!.?|K2~, Ś0ORgO,䝭`ε朠k/frJe}"tR|Ʒ6' <ɁR})8n쪄KCL6ݳ~x6l8渁C.NVqW*W:Ң#G2z!zQxzg?Xb}bE\BQA%k1qV)d\Yӧaܞɝ+]_YDsڗȏɍ`lnWϒiEzô ػ>TO&꫒%Τ{щ%Zt[L&P/gmPDe>ce:Z%s`N`\F3tWbeLRynmCjI/bVԌm6p?W MeEB2 q|TwkȤaYJ{Ȇ91B_+)qӳqJrLRo*gy {"AAkTG3 4~1R+~~=tUQ\B~Q \IըW&9ZBgA8n~\IYh"20NhH TXlKDv}zO4e{?uY^@1δ/y>KVI壨u*"# 2R^2R% ƛ@xfzi94[" g?Jh7.(HO}j7WWIyIZQDq@Rp*j|4_rR":d1o7OBy 2;fyj Ow|ptg<ܜ %;H6&'yʃ# zHd?OLQjIkY_k\*#C$Y`iW: q#(8pV?{M֢ۤys S'LEaeoKpP9V;^׏ Wxge"Ii̊+`O"< QGq.a-?nTm l)AhT7TeD-C- IXD?oOxW/(ap!."VB}AK`m5h$kĻ:hubʲp~+yd®݉*|}<#86}\. ށ{G:d<]$"kL<==xVbHX6nm bn{tyP_`<]eI0Jr;-!u|CY1-͙A4 ?UAh Mö;Esa ^yf#k%~ ^M)&] ˅FnT\O` vէ_XpI N~MMd m ԉ1M'^ѥPo0g!nEhId%DA->duF؜$Q ?<=Y8\P/N+|̑\'(8@[X(.-TtEKTD/?&1u{\HHx(Ͽ)<'8o_:<с%~an.MOXf6ɋ {G‡(O x>YY%LuaKX'_ʎ҈h O ޔNXx$m?#cccALz$b vK(zT2ŕэɀKIH6~bOlOjGvcTqyv bRx9WR9JJx_F ǧ\}u1 `ooks]#[5Wg 5]1VՍ[Pl+.(H:>j7ǧUJnЩ(5:-7gR,-HJL|96;?1hd0).̾]Jg#UjM!>BíVDb I?=F}kj:%q8ܮBp6KΝ4 e hrx|\C522х9Dؽ[,Uk[[Ϲ5>;1=F$c0}f../vթsln[*WczȋB%/Nf\έVn秸o͵݀.0J'G]\q}qեTFo^g"+BxUuŵHͭN1f?rpJ0ǯoP(.O9>=Gt#R2$5WiM&r 'oKZp=/υ 7UcE`Z%Y𻥮.z{{%KKn. R7q{WJ]F{;_Іh˛h9Q?y ^G1Mjߏ(??~^)l+A'?e}?!AQsSӽ:LB\ V~VPCBL Ll`sÈ񣁽Ⱦ cH.O9:c{ϫܣA(˾3兙%1?|,X`iIwSף,~D (0}U1 XjT*6,0-wS*IM1 `2P*\]UȄ=w[g`J!6TJ9¸^-X`iIӂ ,XYł =FSNNN,ɒd7Q*` ,X` bIdI`Z` ,X`Gd- ,X` id,X` ,XE`ZE`Z` ,X`"0-ɒL ,X` ,"0-"0-X` ,X`dIi ,X`E`Z%Y ,X` id,X` ,XE`JQRgqd! ڳL ,X` ,"0erG[5R]7q$buqók7"_YꂕY.߉, gG;LqzuK9!̿ :&9$&&@IUJ_ĨdgCxS5'(ҙb|c5+vk5=4Uбr1;;g[cfS- VѾz֮tZvkp+Yn ,X`E` LLDq! WH~ӁH*VT5 *\R@7'}*!JRuWOX)@02JЖR4Cc _`b*dҷTD&GRދKa:Z|Bԛ1s{oH*O&c\D@qSC9\c4*!;-Ol [?f6\%ϓ3  K3,2s:ζqq`#0iYfvf$OO291[K]FQg{+LLq-@r7*ޣFk<'gghA.1=3ǙD Oe-G[Hw u禘yINeVu}L,rLo0>e)r>[o6JVITM]Lm^ yK2\]уA+RJg[E~kd!VX>dq~U67__bܧWr GS]y0wxRP\351 +7GEth1,r%a N.8?a~n-tF{K>\Fe[9ۻhhOpwOc~~s:967ޜ`noK)^ʴX` ,󯔤H/t~Zg\^3ӕ+;f%N+|-4tO!ٚLRZ\(<ƖmEG#)4O0UOJB"5܈ogj]WG+6 =G[G7-%6d񂖲R)UJk]kW(JN֦j+J=k024-9+ Fjo5'Vi^-G+gOP^oX򚄄4:FYƂlnl:C+}kT%]@rL cˬDA^21om)ѹ~hyƕ (xCJd*]ĥRHEC 9OiQ69Ul-AEEaY+HxYu0+C:DrN5uY%&/|IM)Qq,SúZ:+)&;91j 0O":Ņcb)%5 "- 1N9?ɾ< چM ?!Ҵ(|I<qilL(HHn9Bsurl̠e`Jx#%ƗWyOuAIN:1- 7-P[B8E%ٱ$ +FSU|iOSmqUh SJ59v(c}>G_Q;PC2_~ȸDlޜL~Y {<3P`:)v#Uj8Z+R$Fx>}=i#+™O?]:ƨ%%RnrBl vFqiWvb+nckӳrAuNy6V8& #+hNpɇd=pehZn N="2x8\LC/s(䧤44D_W:=HdHfz {Fuc6 щ錎IV`oCcxNcS.`➘ن4PDIuyEyQ/mm'њud FО H$/#=WSJٯiBq_Xh 8:fd~'Nhtb|CYʜSt m-y@K9%Һ@s`E '{3T7S3VgHX*u"iL;hP!')qOE<'2ȟibS#ݏ-ܣ)r'<X;S_un( ػNK\0MdŤ006IVR sȨ@q>_R&{2,X`LawWȥV59{czjCSC.4>I\ɘ!\9Rk([<~ fq]+ȯsmr$G ۑ*tw/at댳i<^8еI 3GlLbp^qsG r=~V2T'.-NfeFRyBn;eqp"g{ Nuy%c2UXncȹ\LOvx,Mxyr3q2@@bI._bҞGQyWOeU[sk4R7uJS*>~:adyQ0xz}eՍ%Rݕ `:%3] E|dLNSe#'`$;ԟҢ lXؠ4m$+:#|1UL33P R ?8P}X9rK N.*7W"|!u۳>ܝ!%ۛ5|^JA0鸥3\Fq]5%OSHI,ZT"X>8Rù+)͍uNI 11\KPF7lJ06d-UJC*n~.d<^l*Ȗq N#?և6xֹ5]{֏S)t],bIksƊTG)-'e+Ƃ ,X`yT-7 W.Ѩ<%ҋeBSxSMG ZR{RS}|: Xq-r8W[zFU|li_(? ݞYJ(F1p8V[4Sݙ8ئp.Spsq`FVqxJkc,vʅG[o_m;b9;xSY-;N`6??Hq^:ζغR;T_~!\LM4-E@>3MxL|n 7+(F ՞}B -S*R "08s z ^V ݠ=ߠ8!>½ !R <>TE[̤)FFhMች/D%qnn0#H"X?>u6+7 c03Rs/BI|],L~_Q'"!؞ RG(kCb[O*;i)O "@dS_(>N8RK]=͙dd1dd^W?^avm(Rk)Ib`a "#HdDJoRݻRL]#jw*ZVkpkb-}GpATnp:hBBBI*hjbF n,%տIM[%.62[CU<&Kؗ` ,X̿!"GlrED\ml!WZ# y؇r-Ӽn ȎW𷱧k|`7;ڦ.K5U;@E)!P*dhzT2GoIMRpznDBĵbwXWo/9F3_s><"AD)vO$7B.i(5: &,X` WHf[2.lr$ǝ'N#Rū/psSOWv14\sd 0qew{ws`Ggz֮1 revՑj~|Oo?m`Lk<ъ =[8<.Ͽ=k,>G2?J,_p)V q*UbKmdӐ+lqPҜ߻0*g{Tή6p`yhY*v^ **H-/ L?Z2jr6W7{#>?fyaiV6%\챱wD&͝#A\Htgf7ll 2MasV晝[`c}C.[+zW"791]mOq/\8ȑ qHwZ9(׽xg~ctwҠ`ӈ-&T ;$D !' uvkwZusZ kih~Wu4LjT7R#K& YOFLTw٥ @ .N[@ddx1Jcc(!@gL]tww#M\?2u~u^y->x{݉bҊʩlvj+͵N{yﲩ{)@ 9}T29d"bk'#dr}_j޼GkLuL'Ҫ=ڧ'B0@ oB05<"L@ #Sx/` @ _B0ED` @ B0`@ ` )@ )"S @ lkkO玈ܨSk#"""""""""200Ǐ$ DDDL@ @`266Hgg'O>)@ qd2 @ @ S#?@ L!"B0@ !B0E` @ !""6@  ȹ}Ե)""S @;Z>G!U|z[*> ?USFǕB0`|3h蠭n R @ F,ÃC+xRJA} ^Ac}s]=]?F7Lv2oQyutm|Yj'N-&)SD敪Aoؿ)? g~{&mew\ aDpZRO_cLgcDݔǐI{$Ŭ@yLp'єla:C]ܨ½'( &$$X $4*A9@y5:F,o!y:⳴]'*<;Le6|!>>bȿ\))MXp*6Ms2J ]ǹ2TurxWoԽ%Z p@2 ;:NbFP;`)x˃2(y|5Alq/gS \?-dgj> % D,%T0MתEĪ O^7"h6&?c[x kiڰժ׬!<uSR?iZH&R[7Jf9^_sq^_Oj9j3 `l9s&O[7~~]~pai(4D d|u0ͧq> Aw&L2V>+2E[W2k xͱ%rk&/&.ēz8{၇1ȴTЅg}|(:l({15n&3i5jf(mAaᵘC) ΚMXuDƎ8jI-@_!8xs~NV-Gɛw= m=KMuM򤺘[|56hhrpotlT5 $dP dO`7U tnV2o2^|<.OܜF''17*6m:D+'H ^ݱr'|%5DFFśذqI1_}~ L8~!-)x{qZO\# `8\}ظ#9JcX@ N0WV23VZ>Z`H"M ᷡ>|TPNjt+Nef=!B0E`NR<|O˷I[۔ts%2筟|ws.Kw>Nn]':?{KlJZGn>ǎgO ,LJH 4*lƹSǖ %mZ;!)`z`j:ioo&r3)+/#uFj 6D.AC݄-ES;Щr0>L!759y3ݶ- Rv6s@j!wJ8KZܻv5)$}#J^b:*c*NeQ|w+9y$Ʊq:y@v1[z|쳥4!35ƅeG6y4=J}-*+/sz9зp½&q0V' Z[]5ʙD9LNԭh3MK^@s0unm9u]E0Wdb6BMRk&ZZX[GKvan&atv5چy# xYjH}v20b 5G.Oi g*[e=v80.\1Jn<}eSx)IBdJn53wJOH´M;ӸFKus,tp[ɭص9s'3%8SpٷcI[pq/*T;JB:d5A.q>k/پ( 1[plOH%jo}SSx?`GsU2JF2ƕn?P̨RȨC7ڟWKa\`#)$śEYĩ+ut>y̑DLe-WOcsBo"|;{ kYy)/WTI}>ZSÇkj'AxH"XV;5Ym>E~t_?9{R+!51#VS39vH?_͓y8۵/ fL!"B0Kg-|wަキImg{If@P7N0Ycč&5mm[Hrwֶx:>\z2Ƚd- sf<u5L~E0+q5͗%EPDqw`gk01p`gV1<Ԋ2S3> ='gAzZeyx 6:z:{==-mDl(7hi 79|qt;3S+J֖-QZajc{,՞J;c11`bnOؚP<ްIqa'>s pN0%5xM0Zo͔ '!޸;٠æro`;rb躜kٲI͗7aalĊ-9/ɭR@.Wvg3=`\dr%t J%5Wxbu=\bٚqÏϟs `jnT=+fLN]О&o-L]hra0g17&FZ t7`1B8p4{ê8HBc$ (.LK0;E(r\<O":̜BgGr+.6XۻhtCYz}a.nǹܾ| =}lXx9r9sB2 gʃHv*#0"W2UKlzz/cvP;hMMfD<6^|QdUG>9acɊƾ_B62 mNcVEl@ L/O\5U~i#I۩y7O66QdqFG )jcF}~/{ͪ2J2\pc-ϯ&Bjy1op5P{o}]iܯx4K)SD6otcI_? J9$倔?w7K0EOOO-MuBy˖f{/$+IMSl e5_'Q3m )V.o .԰x#&i6N;ލhi]H[)ƬȘfb|Ե뇿4B\A3ϜŒ ز 9~n$fP#kD МEsika쾌5<}3Z t K9Ed+6\[N֞tM|ili }sw9ʮIϯztX40w\F݀ f-5?&͡hjl mYd{\UyX̚zcc+}ڙpjyp7cљDs92>:@{p6ra$wOq'+* F[/{woX #8g62[MiiDJF:707kJm,ZN#4kR0vnoDJIѡww@OO]C 8͠|T,]-L\9TހY`.u8k00Ikl4g3~'H{[9Cۥ\GoS% pˢ>&s7 Cus[%1"!ٳ,q6}5z,=oʯ!-Eas7ĥ`)xJ.Z5uldn7n)ݍlp-khMN9.…֒t5O=*vI8A?eɽpcyݏ*uKQS/2sN^?g紆G--ɖ?NBZ{a++aY]))0k"ȺX%!`{>3pq%C2+jQ*ԝH@h'@V!Ig<6$.H{/5g tY彐 Rm]Oʐ+R=)z<|ddI.#S,Қ{~$\SgSiMĜ[b˲Z9)Gl%=(J =tc .{KL!_;COx$҉II0SLo1}_?MYii(.JmEIUVؙmNDT c dwa^8J@)cjk\^ ˣRh줫+Pm) f=W27Hֆ/}Y% GL3vȐuwKsv38&ffd9.n(wobl;nq-g73q >>PZ$ JXW4daHPW2gA4=r0H$: x7lK\3v6,MH,Hܨ5aG%rk5yޞ̌!ST$8I>͞ [- CI_, @h= V/Dc 0UGl/z:ΓaruSG}d$m:l&`=fn 7Xd55M)@ \?1Nf+U6F`a?!'bPӝT;<:ToI;["-16d^:Q1)T?뤽KgqSbnoEw& crPP~l 3 UTضu}s,X.O ClbL ȻY祈UTM O6I\P@`>ۄ͠ o.. 1\=lm7T |=$kVid:ʣAnfx Jd{NcH6[;'Ns7"̵tdӞ'Ko@Jf\m^.A0Jv/4#~V\e\y2@ x`by9}wn;;Z}Vz8UOoKXDԐZ;C 7I#Kk!62<>NCrivw0c|Y 2FJT)"sbo+TJ%%3WfKr9<FߟH?'fXEB쵨0؅ sCgR )=ª+9Q~qP7d"ra_77"paLP3@%' ǖf^*ɩ=lh{tV`ihhW"k̞ 2ѣdd$ Iv=}_`*ȌYfR(<~q)-)aoR(n1sfs ;6~k)|Mhk}N8-&sj fm}{e9n[10"js:%l&0}J&372/hGeΑ\j/ _t$45wuY7W N;(++`sts)hの;*fNε&&PrvV$R3fj@vrOr3^h!a6ǐIhh[rg9bC0.c_9ubН׹|(]X-a٭=\oqfG4ik9Hc+*L&܌1r^I hRcAX2%N}hqWO^Ñc&v52\HOBs,fj!|P/ 8_F}Xf:o~vd]n乬J{Fq2ahlzLLX*s"Wi"ʫ'=:7LLyptG/wN$Ѷ/kp11 bK1ixzPUG3hge7T_WwKutY5}ug>nmiUcΣտ~q#G24g "\ܻ%^*W/6`6LZшo-d 8H~c(zqe7=k'>߇8o?Q^9̄GP?sodO{غE3r?-E}z/t+1ycY`[Ows5ae n񺡫.^޲)3g6@8*3+m-Fh4o]KR^.y+_jleH4c-wc&"({+r]h4->ibch{)>>s;WrC<=`^f ;!f!  a:os X9ޜq֟eӓQouSLF4znRhH1<OJ=ӎb'f7>p89'QX9Xm67SKK ;vkdL%0-V+g90 <~?ţ<ʾ;8g/DSf;22e!nK V-0 >A_FI\VsdOQ!v;nO![k'2*37{\1VhN1Vlnm( ]Db2Nyo8N42uDwwfS~[cuЈ<.6dcX,& xartzkXm.b2b2!eF`$H( iG)x4>v*i jr>CpZ{2Ao7N Ӎߣɕv}zqQ?jD[I('N#5.ņ/jK+{HCsٱ;$  4"6?$/G\w݃W6#N)!|pX\DWwH,>AKpzCq3мGDpqGG|gC_=F!t+vcQ=}Nl80p`ِF.NA(!QT+n xqOg! RWy3~ϮN_Vq1{GYӍ4D`8+!`Oѭ1<~Nw>iN(HP? ʿ NX|IvGI!esF}?l裷'@ q>oXq%ݠ' \h+X46|GQ(r]]}w|}ר_9yO8^~@6H .1u98idUKna_  -"ƇE=OqwV9,fKݯC|C }GG8q'O&VYOܴ]<΂< (**$7ϔ zX4mDhRj#+ŧczN9x<>lΠяKv'|1Ck^vw7ӧ.²QSgg)90xF::8[Co_gᦛzy~&ll?N.78PMt7%g['vuiI:wTM5(KVB!S;4-|^&OĈ83YuoylQǙ<5!?OPKu{ɹbɽb'q"_SOMh4o8F8^Ul(UO4!{/U2Qq9R> NhWAΆ ~w< *_p|>,:oZ]l*9S&)df(O1ee%6mD`>mihcZx]lwKese?ܹL1Y+2c謢(c>SLe\N5u#3n.S gcu;)0k9#k0}LkpAc7-)dQQ& YJȴ"OA '+;{/b;GEgٙʅ9B~+O+z"QSN%ť0!-Z$%3ͺ>k룹Uf4_( BPX+sjޙ/X{{^+_ɻӫtԑ*yGnxlnN'w%J~vrߋ;{>_ZRd L#đ\f<8R`6ô" X|%ms,}ykظd>P\N]s;715Aڈ s Y4s,^y=k{z1.Pa}eXq(޼^z{?rt^hrk-+ngyPmp+}k)ʪMcwe\ݕdLKT!ZB\'_%o1wIRtӯ-FQ ט+JBP( %0;K kxzMl}?z\1 جjT|VJ`Nihh#3:} %54낱`+ ww wvSf䰰SH_KgԴL.($#c>6muᖯu#30ۊY0GYȍM&??rs~7`dΒ'ܖ,Y}x=-~ޮZ f/ȡm Tx%[18䷤ "07vBE&Xl+P( Bq]ffYP-3oPV~_Zet˫-K{#Q1W{X\}= L%07dlFf4/^k+ 9jc:)n):ڛ9ҳ<܋,7ێ/ ~|:\B`..0l3{9pZJx\4֜`lߵ8mf*N{Љvu{EwK ζm;8Zрݭ~ e]x^L]M/eh'1YI=q($bDVnH;VVz@P( BYw/|\*u+K𥔟/K-SLǵ]KO8~IK hn+9SLn?Q$)&~qdAIdvN+̌mYd-LZe׋Wp$~r&LGI9z[Mא>~#Xh3Ƿe=8T'Թs[^^~}ߺ:6mD__fT?Sr]P( Bb<|{NM?v:J`Cd)ǴIRڻ~/Sl,h!6%Kf3r|Vv\Õ77O+ʊcm6fYċxeV,#+yݡP( BEx_&E\5h%D[f6ex8D,BP( BzJ`*SyL1)wIM4zOdɪ^$s^ԄQF޴3~lXŌd?kcec6U][c:!@a1xuoѤ0NZWJȯpB!z!`uhE A2O)ubw{n3p݉9\8v Nh؏㱔| Zq o 0!q$! 0Âyg~͍_g8Ƙ&dIv FRU &=~Czg B$@W+M $ln?[a1 f8@/Q@sqzIaJ\̏@8ƫIs}{-;sug=ͩ>l?ý2J sYg?z~ـLBW`uqcb k(87k|ήCԏ/2TaFR=,N!@<ΞxcmmW`*sm/JzyH>Ze (>n$Ռ}'i/lG_Ooptu'TDݭUvɛ̊h/ 0^w_DVt{m{܊54`ej%1'09Jޣ5:W%=/GxKunP,al`Ji(c uwFX9%3({7e?>s+LON391LKk3+X"' #xV5C}v ѬjNfw)Fimme|v IfYP.3J[G?2 Z+ns;wMEaŻd .4$=-@2=qs͞zż\ k61mU.1>h?-mLgPV6ư$thlQݯXUmrbAZ:YTtuu5K8|zWOut/}勗Y- !uqݜaRE$ Su5Ϻ }/U#>t? Ѣ A>uNby4Q$ 3.illi*gfnv|u]fufz֯yiNƞYYX408BanlGs" ;41#5Vo2W(eOͲ8̬׃ 395`vebHjUzf)m|~5 Hݏ\{LaNCq 3$Z@q8Rft7&: 'uxX5-8{ɞy yQN/ώ[3ndIm7 [+BofEs[,0uBXښaiJbU&Gbcゃ>ASYV,pklMN}β 'զnM90s#3jz'ͺErJҸr3R ̉hHFTHWҸq1Y˽hlr/$e~[c ]0zw!5h4&Z`nN甌'ty5nl/]bh*ԇ)2anD406:(LwFQЌʱUrؠoiShdc&XQ1IAJ(a%.U91'04&gQ3 ]LN]-<匱SuL^[[whjR\#>[{{-ȩ.f8`hd̃.fzw[[)͉0fc -)YUWVo6]ؘұ֮pmgLijn&!< @= N=Wocok#V1;?[ll, gd$piiD`km9jRB#]:|[%tc|!b9sҚg Mje2 Sכ(ji`sd4LzoQgpܦqPNieLre%)16 ϐی *U\}x4 @v+鄹+旰p+KxĤАgkcfW4|3 FV t$~P˽z2##7k̍woZw{*z'a0C}D6ٛ]}:n>il?ċbnF0 OwMF&[冩ƶXxݪ^\{̂'\}ȇaϕt {DcU>/lx̥I.:/'/Q19WxGE#Ή..8 Ye 1D/NF;ujV7G#Y\ֆܪkDm5xC.72B1TY}]e\obWX^0$+?ScC^k 弇fk&<@^\h7}ϴ]$̼ b\8y66֒x AHebiN 8{;Hv$,%nwWD|w8v6oḺHEakkiv<#gif{8Xani-伥v=8:`ȝB^Ds5s>P.cmdFU_v&?!Bp?WeP:2!J9Z@Y]fRqG=7 o>5[ڒ[^ONm{3{Crq myPUbP,'xj3`~ ms7{ČAl~3KjGͫXt%`(q5Vqk{@t=S )XyDjC< %? [y󲳅h'3ڗzM|'18G3v~ǸfmHy/p̐DRf!r6~5ꛛy% -ύcq%6SQqҫxb{M=p͛#y'j֪,ǜs5r+¬XxD0\[ُB1p!iR AEŸs=X,W3Uc=~ɳܯ{;F*h4Z,gn8pԃ8l w196K^&3=#2љ?NjZ'"I+mOkt;~>a! cd3<2Hz#&.dq ō]vhI+1r|7`jEAV7k،F1A)vңcd'z:Ȫ $niˡ!~TH.?BX <(\+M )|`eSGfFq׽dCCallà PxƷ&5)<#瞿 Qi䧇I>P#~NKstwk0oq9ԼZ)cy5tuBY:J- 0B`hԨMVjvoWޤGZ97iƹZGZOhs1Q ."x ^`ck.$u7š qY/-ȫlgegܼqru'8!@rQ*V^ Y~]UGe,Mt>4?3QR!NcV\R+zri,H+@9NdTj,Np/,qPPdb[ypk!Q6mn7^^xy7[4K}Ӷ;Xk-@IN+iV%GT5)y&YoE10bh5ؠ>g˷{ '_x}GG<==prtfr*DIsٙ;96B=qًظ?x88:Njs)ӭeDI&(oM#T?}ēn@CK<\q !R{w7"b\T~O"s!j4U`Gtvv"(9*5j~5r%SEzJLFetel/ݓĬ:*ͻu,$R89*sq3d gGF3+g? \=qsqZLK '2ϛy u͔ШHU꫃1)Ȕ߈{fL<>ybMvUo>CV@3ݨ:4(f{y 7>C"dmiTJd?Krtwvc6}[z߿7`oyvv7WjR'>ؿlg\Ўz܍J@x+k w@!㑿5Q,QFVY~(ݏjn˗kWK*d^>RIR4d0S{388.!i>9F6Mancr_ds)zQɂ3)D WXZTg@@S ՝.m(k_:C yDJg|Q7aS"mw ظ>< DG.$ć9:ɯ9z[b;۝_`gJ{kǏp=em 0B`s;.}!]aATAg%kisQ~goVkN/j.þ#X!60 ~^<Ϟ&bBN_ mixQj{w@SL!a0ٳg^з~tm[@ T8?Rbs-Q-[بXQũ=|{-oa0@ *`tbFR*Afav)^ IugdV07f2''߯/ 0B`:0Ӕ 03&2)iIn<9rؐۏK9zY~>[sfICzz&ޗcu]*71dHĝ{i&G@ @L!ߏ< 0Hs9KƠ{p7]-*@.,!ڈ{O^v)wa۩RR cݸhE9qN;i;w≍ŭ`gn'S^OQM+3qŽ@cQZR.իɣh._r!^Ff$H}(//#@ )$1~M?r'N!mt~'qx?ycG%~hsrG8~$Gn/ܽGQrjhȻũ#<)_[Fw?RiQw'KS @ ` ͆k1ΦRJ+gwq?x` @ ̀vڥI]:vwAEEŏGUh=ѠQ^52B@ SH;w`ll:YZZjkddDSS?X f8ylox&(9jP}(Ȏ GQ>>$PW ?d=|˞e~.Z2TFq;:16PS^oB}c?}^P.X^B%yf<#r~SԳ6@>^Epx@ So0 $cCbb23DU3d?OҒ~Voj/%9-9R8CQ *PLRJffִ/`3)T 8=D[+4do -mTGRJ&h 99 23T9n{FSK3slz^ )oꡫ99(y>l~Y$g=gxAFY]RMSK yOF~A T05K myic/x5bTimzPcTWEZ2C8iA~Q=hV:{h-!+3bV5w^?%-4חk]**#N6( F5xۏ̼xړ<Gz t!$ (i 4%3pfQk^$<~ٳLy*7A(˺Wn|.#~*s*(FKw\ss!WozV)JlJw͹q/A8Ē&!@ 0drEҋ"݂XU(o.J<헡Tq;؍Rܮ2ְ44QNmNnM ILNΕ;OkBnIW"bik כ0{7H,..P%)sqI$Z4FjEz HF9mp'*V/^R][qxYKhX 9:rE}VE1͸b@W}RA_$iQRIbx@ ` FB.WRRFR(H&C4jUmB@)!WޅRh 9h8JsBfT 6vO]>\h\*IQd4ͯQ ˵_m2TjF~@B75j՛ViԦBݪvU sIrJ-I!Ŕɴy*)o\;>ZR!)Gオў4}P(xFG%)*VWFyOTDۦvײvPm]l̉rmSh+{4 @L!` #}v F5}L-Vlkbhzyt5@ SL!a0{h4՚ j5A 0` )@ ) 0@ @L!!a0@ a0S @ SL!a0@ a0S @ )$$ @ @ 0BTVQTSH4,,,hJHHHHHHHHHhyyY̟.!a0@ xDVHHL@ 0BB` @ ` )$ @ @ 0B` @ ` )$ @ @ SHHL@ UH%Tس_N"W@L!!a0@ u|ȿr{4Hjqp0O~ٟO?^νi_S @0d_X+ѣOˀnMݯksmڇw7mTwüHhQ8Ji?r?wػjԮAa00B`U2'*)M?*mVddb-j*oQI[RiТT0=6«E!IuWߓL@;4 JJ~4M+B ڹ+U(e0sKr~2ai#,F'n* Vj o7=+rVVRFJb}wrToTfjd ~mfqf!QSF{WXeYZjJCPH2Tl+F ^h]XUJƪſ_xt{Nu7tQY6kf}+Okc:a02 }/<ݓ/1K,3?-Q:\/?ʾ9xzaO$VΧ8mAi$Sxr~:̉3ɀ3gpR⍎J4*x'гgQ&+ {s?VyysM |hP!)ԗ#oKdY㺏4Gq (5,)_* 79~D)xD>b^n0Oԩ{9%i}ɉlj|5Tssd7 }Le2y@`S)[ux?*.du/ܺ_ˮݬ?}'Wa0SH̹ *1o'(?keF{*:Rʉ_qiů?j}"yKKl=Ęz78w_4{[{la\} kqtOn25;χbjZ0~L5r/~\/?t&q!j~% Osؑ%{4 _Ϣ&{[˃7 >۩K,VMƀ[HKG}zVx9}&!4z|u7nq$]侭:Ga:|u?Q樴:xg<%מ=?GЏy>Dp{v;KEw:ZǞi ˃fۧF?Q|OXD{} >+lò*Eg>לwB`#'?RT믷,>շ%9N&gDtQ϶pԙoǟ|k 6-̶8lłXl=H瘌6Λ]_N(?7Qΐ/}_3/KG>KAQ+7҆-dDpƒ¢|.p,9Td_sᴞ;E%D{s'#]88[#s1ʘ"%!))'~CDF=jާ9s{P`)~\[ώ@L2ҿ]2X3Nkfi~Yڴ?r~C2a[}ҶYY_ka0sKY;Ym}Xe5ivd'_rXcǗ`v+, 5ncOTEwϹ\`ɗ'y^=#S|^#V.T p[>kIXJG `*LM'm < [nlbj(BT@ad?#2ЏI.L~:aYRo2 \G{ӯ浏agÃ{1uZFi̿_c\gPV{\hRVsuYI8;VXHtu y,iG6?H\HyDxYTT9Nfp^ 78q1w+5cVzNRk+V)y=;-Ov1sJu8Bv|戮|' 3S]1@D|x=`? \"(125nb#͖M[Ɩ/n0_"l'&Efy3SN3/tǒt `rikze͌L?T_ʊLAoS, Lk<֑>CNVCt.e]sE:ucl5SSX\KfABAr4zlbGꋗ(xoٹ{eߏ%&!y`ujbaHH%=SVzRv4p1WpAǀ[tͳZ2Zrӏ J4 WGO'h;ՖBTt81i }=+kQ1?7[l-bn&pfv<)"-VfG>dtUl',]zla:40̕3yT؃Jb:.2>nI9=9AKrZkɳO(JؠoyOR>+c3堒So .z9G(@8ٚG"s%X;~Q",_ܥұ]Io c(&zC|?qҖ/?>$45[:2['tI}cLvSL!a07РTQVYV7=/7޹4J_3iܯᗄb/9s (R20Pa _}{ f~0u_mv΋[wRZۉBLoc\%88!73w e|c\ŁC{H e@}~15op߲ | v0?([Og+.Zbg[LmAGv`F>'w[=DTz ,ɧ8vDŽ8x2N9,#hl(l߹Su~An\9ݣ܈Y4vɔ:DԼENyucv7On'$RЉ\`3??fr)m#x^8֭[9|<.8]8)gbQ'F{>\:<au9lJU5cL<ʀO?G?\mٳ};Z==k`~yl7Vɻ -/3[>UzǦX]!\W201tc{_p Mm\uǟo_W3v|9||չ *|%ɞc:$>dn?˕/_>g^K I# 8wQt<f/tL]鷠r4y*5QՌ7rr@} OW2GY42Wĉ7w0s( 8ÞTTslCϾ$QMiرwP %9<0pL2ߐT;;MD9ꊫgeE2Ci^Y\ ׇ@c_ mXV(Y+-k%:[`~vq=77w|4E]3 m6^/ 7傩:'Y(F8De78ȳ9J[$K:yݩԲJ-' ]YUkp\h;L"w0RܽRB>ߍY֌0;=@ƣۘ>y7&}d䚿_-?[dx PrՌ1;nx+*y;l_97a7,p!Ex6?loZSfv rn=۰~r{V4?iġ(ja00B`VfIV[$evr֎KiVk0/knK!:zFdth p3F'ei ?dןaȼ@AE~I*6s]V='6'lhfyy%)>ʼ\JDTR+3۶&ٲkIJZ0 ]8Q|ܪxc0{?__p:ew q9em$i\ٚp x?1CGfga|nnq7A~Docž㺴ML{c0X7@U|GI Ih4KD1ѼyMAF bbcp2mڌK½8B O*AF|vHg]̽n2>'׮ɋWO1ΗGϞw[ 8I /&s"'^/[>k`irX9^rj-"ی؍DFfe6W^L.ʤq,ǒb;s>ϗ{/ZLgGY\Uk_;c>A2_rb/_mtL\Sv)VI(ޢG $=dL`Jm/%\3ǖm_ٮ36vv,L g'cr|)v>nX8P5 s\"ɍqϿQAɡg(a3(|6#[ܾ휲^'^up릒ӱV9G[3!PcKRQ!;^&=cXI7oLC̫td㛝d'v\rˋ˯QJ>޺k{q/R]Lt`|N888`miKVy56Gٮ@YC|;OHd#T cwv"0,'ϸKlj~#RXh6zmm`'QjڊЄLRhQN :__?Xvz67yʅ Ix @:[) eU9KBz_*,~WR"9c 67ȸ7۷HaDp n?m@:H7XagH.&4[\8sОn\})k/ThQu`g|;-<9}Ҙ۰ x1Zñ/ۃ& oYmw͍dXG1Ci>r'&vq\>~/>LjeLNCr4$G1CGZ19 a#vƆ)5KyטG-oVQk{%[pHLjkQQ8ȶ'H+C({}&NfM2h4k͌fscTQv21=0 EwϜu͜+ٽGdM,ݙ+M=kX ͭ t6+;:_ {z7fQQU]EmS;2f0vF}Ks47IϨ)^ -cr,cϑVXMSSL:˪A.ލ-jH㖼(‘,e)&1tdIϊ@?1/?{4m&ݟ&KMcy:LFT05σf0jW,aqd~3jXыAFլN.Rf!)RY2o]Ν%=tM=(nBW}r31x /h㒡 1IϙXP}Lw \{Zd~isgh䄩yBs[k|E+GA=EQ&?5]WT=JjWGG Vdup9σRcr2|xxK*۾%S.JX[e(TjFڊq3=4g/7ik"=l15wst]x0(K|܂鞔w#!whTtIc02 [k[l<"[`meM&0]%,ͱkǜӻ,ŰFJ&V)u] Ty2cj]{_& <(!Nz{bge%[ Q)W)ϊkK3BigG Y M2Y uJ:JF"jfiw1ڰ9@E"r cs֏>8G;q927G:8K_-h  |+ Ps-ݴn'ޘz0` eH:Cd))OwXd}QԾM6;_jRܛ5_z5jfceUm&UcYX;"ohyY[wR喗bhZMWkqk?^v)oťk) QPxӿUmߡFaˬ娴1&`=8h}IZd~A;gkirԿw?_ƶ-Ֆ]b+h9Wk+k?6NKKs67vK αiMШk01o@&՛mߦ_X듆qH ?c+߻7s/X v{kZ}6lޗ2G׳0j?iWWߟGJ'amMWTm>ѬOx; ^?~' @6Na̽X >~>.,-kjk!4ˬh^2o6b[ՍuƿƜimGoi6Jic\7_=2ƛ죭ɫюeqcokǷov653C\xwl^lmvMص&}fgϛsI8?kWhcT^&W~]byr}o&--f皶R1#k?疵E-?aX5ųxZCw4!yTzAEE|> ,`B㌚q:虥5CH" r5YLhn&/fmӎâgU+Zd ) 0<&;8D@7m/3 Ҍjk7zas.bzy?99zP7fe^$413ۼ^Pb|zǗ(膫Wk4_7C߻g5Od6GuaW.?ڱz< LrP-Gk5'[ZeIʟ#N-ج]Wx>Q _ #Ry4?Vo[+mߵ|c0SHCJԾIv^6L m8?k_S@{wJvOanBмq4T*4~zn'w)e4o7`Y1ڱ?2]Ȥ?ϯ,9^̧&;_&Ut~йv9qj?6䏌džTȿ7zΟiT^.15.sr/߾j^%*a0GL!a0.l $6zZzKڟ*A@ A2lRĿr124Y?TnJ̠Q6Ul-I/ӦiIƵ|-OJm]I?MvΩB2U`CL!a055ܩq Ҕ˄RGlW}:@ #"[cIed!ߓ&}-Rm]ҦmYפ( 0B`0UI+I!5j"@ twа/lq8j6}.1*: ) 0@  LU!ݽbgbuϛ\.v/k6KA=9]:f69ԓ9ys_KcC)ɭ0BB` @ jm1Ī?1)揌?6h{ڿ$-ҵҦzm7+y6i1ڮ?7+Lt+?a0@ 5pbk^5+L o6퇏~z֏u6kcLfdhzODL!!a0@  trF 'c@%Q]+5uSHHL@ J + 5?l*h4)$$ @ @ 0B` @ ` )$ -MMtt Ԁz׽-)P,?8Ȳ\{@7MR1V'OAej޲27( 먙|=,@ c-#I|?47J*13CR)VRLSUݺfݝ1)=q<2BkL@!f/yۈ g2֤<р}Ryߑ2`Cx9}]vALGE? ]Zjȥkd5pu2 5YfT(k#'#^J'ՅTAm};ḻs'̍{rï =R:owuIich~<(X@{ߎՉ r2)lfDKdȕOؖ-25ٵsBMi.9%L}!Jds=>ٞS*斴SZW{Lsy-sI-.!7w6Ybnbս l222:CON04 <H)klL"ơ!6~=?BAV&֏y~r+m@Zl/ϰyO(ZGíI;9k(.?zr<:Q!GB.;̯0!YczuOObBgMsrï>a{zzB}Rgdx%wKg8>flt9N֗AS$ @so=11+CѰ<d~vKH.O]\GA6if zQd08$'Xm蜙t:=V:hHms# qt掾 `ΚPBtN ]$g'H+I/Nȏsr\#?_ l-p'&")cg {,N}N={w$2:[k#lTj7!n'(Hɐ m#;` 7cbI~ Jca +=#Y`u9=c9spTBYwY,ZYɚP(~B"!h.(N`hޙ= 켭8f~'[ΔѸ8*jslL|cR( å182hto-5oLi= q7 kGDI ^r"48'+sLX8P>R{KBc٢kDY?xg`_DA.sg_ݏtq?qAu'ﭝ +@/CqL UmC$;|v̕ΠV/qW#А.:Jq)ޟz3w.#{S-s6KS9nltTZAqFKFw~!ɬdy2ТE+0MkZ9ۘ7~W0US[yvO`4!̅jC]?H5vػ#?Ӈw?#ĹZbWH^+_-A| ɍ$1t- tg{]_WWkb *SpJ]Gw`thLWrˤ8ٻ2qxUG.hv͍BO(N"O(gef2<XX`}<+59f|pbt%y/9*93&Ʋp_suDw&)*>aym>fbhں&eo*cɩIVw(6S#nf㎦|:;z99Q愖tq#s} [?,̏O43LK]K(>LLqe5t qVsښA~yG|Rpie.Oգo;"yˉ0WdT6qgqƺ:&X9@_7k7n cnbc%؜kN.~i#uVdܧ$:^mgL@4p`u L4 t ۻBۉ#kJL'jx;<1bL}4$x;4&CCJ}w̶B>6*n$H魊7|s{\m#2G.)#M*!N6{0rcZR(O)v#_:02\P;~@WG*.VLsxs}]%3kmuZu4tpp&KHos`_6.L`k{A?Bz.䴎}ܟ1ËL-q⒕N/Xbqf1fF\W hؘYt_0'9fCL}6bܔCϯqoaʚr )8$#ʕ[7W^אھ⎳ F )58VP]2 mVpJlX;x[_coMӓ4I\`C6-}hF@yxW#u vn`N˓,n<`rln!?_]ìM1aCk=1G'[LˎXn& iG#MŽxvF)-zZꨗt}|}FCyi*buRTíOFZqfM1$mN,ai95Q"[8uPLMj (xV 1xOwʦ3 6)fQ"r9s}RjkjXAQ-[bfjq]# (P>;D]m-KV&{+XLMI QEV`'ִS:n~u|6Z;Fۘ?wӏD\?Q$wq|&0u~`^$gs ( wq7K " c `WoE$[ hPb܂ifa~SzHL`8Y=EM;:vdT1r)DJB*bq~$jD$UuDtQe{{2R#07grsX_fhTFq7.{@=)wN)cb[@cC06f|v G#%' QXQ"֐ M^X[{7óTGz@j/F)42p>a3%cJIa*&NOyLJzLfQO"2ܿ3pL=Qpå.l=غV!5">zzɎ) M\ i?C3=sWbb75Z9Hլ'X[0yoFޞ.]\Qg|C4`oġhѐ\瘟[Z2i1fAhT2;!ƨ8\ LdChb76X{ 0/%)nN$genmNZӍk/(Zqp# c#sDr]˭~ߙRR/29hNw{v7?6fR ^<7"3#SS#{Xj@I롯4 kor1ruZF~.AGy%*5Bb۟]7Nќ,b5͝- [x 7dhTDYV6FC#3|BsJ哠9$ƀ9j"s%.*FW𶠮cx h.JV:FKYϿ%!1s=̝IKҎRB"3[SJ>Cs% $`X#/^9 -z/)E}h%3 XZytt")Kx$7+d1qDZcⓀLJ7C-;srE#6Q${c;7>Zqg"׏(otlE }MhQۻh},g&`KU!4TTy^(E)%dc㷁 6adˎ1`207oQFU_ےcr08ؑY3̈́8Gkor1qv|PMH7s-ZlZ s11,.o_H?a|!{(Er-o=, ҈FRY|qC#LOhuGi \A`&Tae*[>~W[==?#ШP }cbs,`ʱ.׈u}$%dgW<'oH䅋7޹(0hI ٞf!_*re|xw&hpE?,'3.^"<=JGG 3ȯW{nq6u-6? [ۛ3k MZ9E." :J­ϒrusAAmCHҝ0w ;qsup{Q!8`fH;FN*[aV^ k\"bpR$=efSxҜ8Wc9AOh4T{v4T*֧Y̱B* a{8aւr,Ҵ{cOUC3`M{u a_6T{ۙ:)#Ӄ18t,gzWo^_*  ̄o;xJ6.%t"gyz0~mDdv&.^#3|f_ˈ2忼 Vs 3 6)яS)ϋuLD6})LPgGtE7Y؄ZJ" /('*d_KrcSX)n\K.0|[]{ .CK'ܱА[\T~޸؛TdK!߽0T_^ѐQ{rvZp$ڇFIc(ͅX9's|y(%eմ|f/j&gDG~3G{{*ؕubcGkiCo_jX{}]8GmCdEph>TaDiMR[1{=Un+Gώ@ͮ`tZJ9G T%튅)T1[X$0AC>٣W$>UXXJBf6|{XPC u+TGِ_(*9g5 ccEXr ~NV$?=0raO;J: r5"~Br_1&n>dy^_s/'8悃M[t.`vdX̬c9ˑlkHUsI~ N4a`&] 8K5ܐ`cFLqrv"f:|g )m"۝A`yQ4Fe[¸bW_:]R9 1M|ʔ}%=؊yx*ba"t,nREXQT")ES2o?'8S#Į q!X<̏3<4.~45!''Kjv]ejw0h?iM+0a'Wx . S7#>_<"*k)剾||o~LHQX__?a;so~w{7w{@cC\.5fX^B=l[G0J \'ׄ]Ѯ%}-n4$66Aa;E:!Au<|['vOw -wc<n>3o^qy v2멯&)1\ؽv'L ZGrɞT ĸR1 &J3㰱2'=?(G\SBTD&+'rwJ `hp鮆D%FoO|A5Rch`k̉tl# sqbt=SWUDRB "RjK50\坓\g2܊AaL 1 <ʏ7f",&%HP_m#r$>9'{3<ӎӧfG{䍞9ŕ$$029=?k9)9q>GgRWW8P3d &:&Gc`|d=$|[MI8%_/XADy#L0v`xw/5oΓA ?ڃOm% F>Σ3=ͱl掃2s JX=>X8e0ZK]*멩($9x쬓QGǐc^TGkI #ޒ`<%݌A`rә>N99=FDцdw=[ڧFqu`T bLi`3G+<=,ɑ.p[kgflo$Gk;01#q} ,rGȆU~y|gN{g;Iu\ޖܖo7ƒ6l=P"p7{= GO}q1#(v7зpfs*H(0+)*R>۟^oq$V"#%>x|4rl3=M(]h C'wb]]w Լ i1V,qzr5*i7 cx,ES},(~Lw`a%66N|w%G88{5،/S߸D@Ax~ 9"wZU .T7oaN!̵Оlsӱ7yIrx!p2EՍ4/b ug~_D!­l8ރI$bLA wvꤏw|TJ3k tԢ(5Ԩm kajzók4Jygme *n.XYYauu[[][\~5%. VVavO8=ecppKTg,0&'lnlܞZ`cE#8C1Mawo+=2"#Ğw㣏K ]3vl +<}9vyvi4V`\ISܨ?ȉ#qU`&< H/ )N2  E0r|_P=J=nitJ2rS%'0~5oE4-tc֌Bq1\Ȏ w`4#[#hmi XK jJ2=*,M)tRMmjq@ UI|?ں ⬭;: {Q2^>6K9aet4j)w-3;ؽ$0֪l^x_Z.x'URXXز.Sa]$J>l csx wwv݉)цX,=Q z3N5s-fw@ǹf[C6Ultb!l^kv/߽!J#?K(jk(MgzȖ҆ X'VC 5kNoOdy'.ty˳턘^\w>f^֜T|bmCKO5|H^z64bwŗHPSCOƧuv\aM Ϊ }'O5ܱ;ӊw I*kS,ݓݐbfh/{3"sTeJ %>V؆PW_F-%30$x<_7KFƱx| $w=bjQ(MHxeNO{ /u-(k!;ƞs/nss:{0`!ޤLr_E3XGAAES:¹S V+X:=15=suBUQ69EeV7Z򔦆:WOc{҂[EWP˲쀩储c.,*kQJ ɣgj:C\@aĄ r2kFfaL68۞(,:V*ٝ(82:-ʫ#5eddg J* ~nˤS\OinPGT4iX@V0Fmܜ YglqPoÙgTWQXTLIqyEun_ܛ ƬLJjٻTQ+h$7'Jv8YsvUUl_i@ux{ ; 䒑{Z,de;e|. s((TWJUkk3吝G]AyNUi7|∖6=Ri_cyхm_{ j&%7Bه6nFA_[͍_)"n.̟ߟ ), EE0ANn>BYA-}3(TTcz[tqTs] 2?1>ZzhTK&S.&hE\m)BR0YGvvu/һtllK˨~F6ﷂr!fql K(ia耾sz8W)ϥXőBJqJ_CYYI:S ?@\\DNv.e_*9٘ |+ܿ."|OV/^(/L+8A}"])@-GaK9h8ۘ"\'1nai˽Es3/)V]`C@p[3KGr.w() +_r5?VMa)=k1愤eRCaS~ڛS%8?XK 8Yb$ toq=AeI!eyd3)s?*dh^5+dh~2?DqN&U4y,Gs-wN6g)+ȻKK<۲1r)*@$ژGa~1eT893u򐎺:#?^C\MdefQVVHs49SY^JME=̬ Jr]iWOhV`jM+0T(۟J4|^J]*RɔQ}v\TVh@VT&tJ-ԹkJb_h?| S_4ot`gʦRmW]#F-p}ˑ߫sep׏_]#YZ8OT~6fwͼ[hCNU߯Mw?'T@@/P|aBn?}h"Vwr5 Sru}Us3M(+jF)nZ|>_?ws聹&nb] oOַS]!a vVR.5~?'2,8_j]>[w> E~T>|r}W~}|1uUR]yݜtF#sJ຺o sU+>4nyGghpz{#㞟k9s܋O姱R)+_S qZJ!_܃5T}4骻y}&]\aMk&Ϭ&vvwF:@#:Rē7:-Aa8; ,۱z~E7ZNsff:;;?988CbGx鹊~hvgf5Z3!  𿂓˛EÎl1(7hw}]GWhV_ -,uyo焹{>Hή؛(d|SP3w Q+,C+8Zn2),)پV`j; ̠ hZ/]o-ZhѢEOs#6h4.HPpwB_{;Z&STTHQy|͑\5blnFSW \||]O;ZٳgӧOx'{PSagǏ鳿ÇPW0 h'<_8y},o-ZhѢEAi9.a./SboLL133MQ_-BV]" , C1rOaL+0SkZy'ڞݚ+<s_z?bnnx7߿33s,F K#] n1} 2z}|^LԢE-Zhܝ"[{{]\ڿ{`]eKQk վZG77]1w cA\acH9UhV`jM+0}`1;46Z9y;B۟ߖQLQE~3?чzݖP!.ӝ?Yǯ|> L-ZhѢE oBRDT=TQw}AqsZ OhV`jM+0][9s s(inX-RzzɍAGׅږNsA7˷^%bj%\JO58[R,ie ۸yƐF:H00dzprcݰtg`dq:j3 /*ɟR4Zq$8ULWg7x^x L4L(PݰύBïRٹ5쟩toܴ|/}nTb].V)˯fTx5MC zWQ=m͔ OכWnSU\+k!Wؾ?ֆv8!f~-Qǵ0wg47'45 ۻ/bC̨hhѢE+0SkZ܀zifua䐶tC, 7RPsN_,&o1/ҞYSY?٣0=f%E(OJ煰3i/dwܬRvHMf\Kgk,ש &(Crm R;P=LnH7%Yn' \޾!WSC%z//Wq:T즂_TS;$㎞2韞6 cRCDT|W@.~LU>I%5`Whn( rΗyg򙒿bҔd{7<~ ζY пq87 :0}'6 f |QAG7>w4jqF%#GeDyےݿ?B}&#/.w/shS]Hfb޷1rk[(H:a;,(8R]Z@?qiWdLY*ޏ%NPaeBm%zR̖\rc!^~~O$}sGyEYf,^"K?f,<+lRʳ B$r|D}KQTȯwIOaPq~xzS6\rI)#'D#r'@>&:3 KR ExzP}+glP^P\\1Lnvy%xxx sgP[8Gx[)w""JټPGyܞ~Ȋ;4j#R= (GИpD4&H'N8 荗8+FncyCE-ZZ L&x7孁cX1dfU<ߏoՉNkXY9|/Y<%+$j™ޢ:#I4#JG۰z>}&< *rSXY )$>`S)SȤD0Y@2g#$䱺/#:~a'tmmlVIr׽rhwLl J3FQOt߿!qi2>9,V̀bíX;2L)9AY) &!v5acnKp?& +iGZGTJ6'>1Ֆ 6k)'Ntidt.*m/ԑՓ-2bnc1s;fY+Kw߼q _k̞⭾?l{r 907ڎ=*C0 ,`~pWْ bG^S/'Wݾ85}+D8YK,;k m+ЕQRCqc{Iik/L?,?UrBKwh)LLGXf`+"2km̗-~ tj )ίJ/[]sw. -nRFh\!ڌ~CpK 7?SCt(vTtthJl$Sy;OVtLXvX*DԕQrl'Nm =$L?VDMaoknSL i 3؇иjDVO;+ 6'ӏ3Z09  :L2ԔƏVȋvak,Bv _p&fO6)HCWtDt;Ζ{J:HU4f}GV]5fp|4_ s&ct5_&:rzx7k0&Tt#IGv+2y+FGE-ZS+0Dtq'«F~xio~˷&ĤfC-/Na~^x!K0>~X3\S0vl=ӏyKV=VR?})υ޽R({*P Dxcׇs'"{/zGx8w?> ldbt)$9|+x=y"}H-Ļwm۞ }z4;*a{SV pގ imhfzYڜMA(;D~عLĆY! չ94ua8(0It4}@,\YM" %|- МHpxv& m@ PC#Jb,beqDӟA`z S$F I5d,=K *5c~Eh"1yVdwҨ$ PY^Y z\h`w6TͰq' ?KCk%͇6M LJ=H,ʁvĘZ&$؝s8oJkxgC"sdg9 kL P1 >?+x0O;- ԔfTOq&vػbo[/Ņ< з$n8_F(ve@AqU}a⇯OQ"[j$c瓃RsEcѨOs1֞:c=*jp1.NwaB6Cx;?!t ̝甶_~~^2N=؆cPqf}sPKSc{5 s,d?2q>ƆaohAb0 mEm muqPGxnHh/:?88{;oҽ`A~> TqC{y-̯oQ& #fV1LhLNh xcHP/ (joȪia3ȅZR=d[}K|D͟+p[ jPӝ{{BC}5&)_ @W 㦣?6xƀ{9-{yႇo6S;X*ֆ6V[xiN$#SC[?:q|?,Ȭ4L%WGZIpfꐡhLt bކA>{+폃|kV83s3MXDptKV9."|Kgj =3C5zObwȏ׈21!,w@zb_>6G_}?^6XƱDm˔y`cn9lnnn;>;wG}|?:#@qw{ HqwwO @wwiqiUᆹ;;ss﾿sSݥoU2z+Ox~j7bAD#" f@@#F`ܸq2vƌ3? LDVJ"11q5[F*SobZ2 \CZR"ɩ4vj$Vu ʵ2r1X$ 54wtZSI]imTaе֑LJJm]ȼhF]{lC݀DmU2-*0buzZj0JVե$%(SGyC|l8K"ɬ47MTkڐ$fZz dJlL]T5 2]MU'%GѢljtPn2k2Czqm;XTdzʼe؞QJE܌tRҲhQR2r)//&ZU%&h]kF($19BTȼc,">.tԍdYVFnZ*qIou!YT鱝ހZUcK8rjH2X壪NM6^}F=ly5٘U~?wcشiݺ Xe>=H_ynIOγt6g7Vw^Y7;k_0N{q{$޽6;x-OASD;/gc2L͇$I ]*0Y%1ԔQTZdB;"KX?$ћ[X&j+/,Uc@Ŀ'YiiucIC7PUۂEAQ0Eh -J`+;5NADSAADSDLAAASLQ0AAALQ0EDAAALQ0AAALQ0EDAAQ0ESAADSDLAAAD   `)  `)"   ) (BBL/^ԜJdɄ PU1]墓0,?{&4w4fWF/ 5őSq| tj׵UQUHei&_Gls'Yz(-LNcCI&#g5F?AASLQ0H.]ˣH^<ߖZM'Z]}Ln:5RQ4Mȼc֠ikVz˦YيjjסhVlSي)jd2# >[fn49,k^[dz=NAc.nk7)Wert>>E[K.g?_"А̱ٹ4W/DOM'/ yz/tr~;{FM=7bP{_0*Η7Ëwh+hPwq |#rvlL~en{)ܽy};߶cp+<맯RWsR4Y٫w=2UO%o̩+\Hx|B8 O_?<ɯ69vq'$MkgɌ1ego7^NQR7iTH®dߎڵ'əljH  \0̟8k3V 0M;(oQs'pfWTtw8|lҫ`-7/ovUH|G\Ʌgu-$e{=%*)/#٬o!˚y}$7#I 1<8ɅW:8w/lZ^Ddy܉*^&.؝g9r*aE4(rm/!š5 Yxzn2nÃ9HHM 2>||)1!8J)i09鷅E+3C,g9 mc*El:vm _JLTL붐Mmơm7uTa!냩nNĪa1 N|Y+Wtצ㣔TFFAQ0ED>)hTa胪\<4 O@zma4;f-$.;͞dwH upc/C.zS əW.üJk,^T_GE6x識7UJ>[|y{!>w1m[:(9ysr+b3bH̵`\lHyq}.Qé0y!3'v| y/O{OEYJ鵏vzuy |-bx+_AyN ad%GsET-f3MOC na2p +lHTrTqkiQs  \0M nދFe=wp3,+{Ȯ[Kx*2R] :9F%(PpϗGue74MZ@HF|N 69*fGgG0N6> F6sgZ6IYF9G^v\U:ΝH/-S䶚e$AӛwvV*=Γ>c*[|S릳20VsS}A,[XeW2kf)q d|!sq4r$ gW.^'ٶɏ#0mFRs9}%')ݟDʁٶчW-{D+aG2եtcrV 1z0[rٌ  `)"   ) (   :"`  (`)  `)"   `)  `)"   ) (   ("`FZmzQ0EDi$ *"""""""""sID   |KDSAADSDLAAASLQ0AAALQ0EDAAALQ0AAIGSDDLAAh8VL>9Χ{3pe~?%Q0EDDAALj+z!/C^* r{|~ދt_NHn(("`Zhî&ϑ̦"e#%AOu`PP(o_n_:}큯WUl&Q0ED&n_v?G% jz/~Ic#[4F <C{!ȩ:#$3-MhF>l ?)7opMFQaߓo$>)Grd4@xBJ#]s%1Զ뿺ψOOW}!O % BeHHbS[qQ[K2CorU14wDL 2ɉ4E(/BgFf>y>}Brx]Eyc5׸zaQs&C^J-X5DF_I2a$b;́'O:>-U<{&\Fr_9|ی^<'vVd֐Eh(/ //<*yI]HM;4PwJJ\,q,kkˉyRL2vuDFE]\GVTy4k-*l\_Kr[⧲/q>G|qp?Se *2D% (dV7jBՈQ5Q5Q5t ey4n Ibhgo:ES76y>Sn$)8~:v)*p$# F }FbPgVnFϿ'GpnޑH~v WaWYɎD:m7Hi|2h"b8j gOX1}*2i.#F쌋F(/Lh=f:juYDJiWމ)g}p+_|6K{iKX1ޙ 9Çl:p CKO侂hV;{5f  9wWgGObH7uc{h[%]5i:#ݜ>s!룘0f.#aC¾sw钰ԤtgD; (}1fHOQґC> gg噺gؙz@Sl\n՛8q-+2f$V/\ʽ56r׏hR]Μėal/ae,[+b j %f5}r7a%\,bĖu#·^ǯ+RtW)`F\jm]mm=R~׷a;@ٮ;7p>|\3?] GtQ0{)" fwu*R=wІ]m[B'?G=T9w:1 *֏saҖ34i.x <i9&`ꢬn;t4$Jې%#Ϯ1e 6RlFYa :3tn.%,]kVMy^Uk@v^BSr M5eddPT^I{zJK1=]d'MS]2Z4MdЮҧ&5s(e _~6`W25;+Z>牫L%`lG⥭$ u >:ѕ8jʎ`,>F2R }΃es'svr5)Ĕ(>Axn-vsץdU鰑0ޅI`,} P{C`0R?I)C̓.\LaC2r9E; iKdDYGkE2+&+rO >e{.ЦSu{+vT`&81h"^G6wp5`1CFL&Bec涆z=V};9I$&Pݨ9p! U5z jej;tT*hji8'<4%9$$&VէVErbYL|ܴ#rg7n_bP^TJS{'*j) 19N}IMJGc4xeyetA6uSSVUDRButwԓ@v X/xs -F FBN2c$>uRו3Xh1;9H{+QhTgi TiTkbj6ysa\\~a^ec7]Edǚyc[0c%S'fEYQ0@1?Y.;1uo #V2T#oe{S) rwu~ݳH)kpR|(DӪia'T(rh=hC.}~Cs 1hn~QZ?_-.C``fPgΝXWα[Qt'1g .>O =3#þ2m6Ni8fzI86̛%Pc&񴠙w[Ge1G$@8j:>8YSpwh,|Jd K)/`\|d5p7p53S\Xm ]"(3g/` %]N`͛ąĕu`o FjS[rY-G1|</2>ĤdT̈́NL[WnFTtijjG7MD]χqI}dB0/ebhx͔CXxbm]HWsZ'5f1EUq&XEbq'jسq&Kw>6 Kfd s}<=E`i{W{9օ݌JrϪ+"~G)w.|NE}' K=7 JϿym؏쐚,=Xj[!cv{U)م|/SDL]ϱjG=R=W輶 Iel":c|Q̖_) 3U9x-'?g7N< )4%fM],8v9gro=Wh?s !9tu^9 1tuwzj3S [mevb8σu^ {חX&e Iqp@2i8~>ɶ+1a&O[PEdE<"9:ǹ:9W8se%D?9yIMf鴉[ùعi+ݠ,% ݇lv.^< q+Ȓ/W֭!RLA fP~ϻ] QP~{{3a/*bYz_,hcOVToy݉,w2PC*vCGR& ltd xSYU*O)c1?)\Q0EWuT zps}%u_i/#MÐCC>G`-gh5`Әq! '9U,v#FkL5ӏS..fR0g/[i-yA_p$2u2Lex@Ƌ38 vc4wC^DePUːGX'i9psF0rVիYlO2=h5[$s⾘~[0ȌbX`_ukVbbN$!gC1К\A\*$ObR_f fꗛlڟnEAW lDniu_Q*&*)>k'I6v:{QY֨췔>lN)әhkbڙ 'Lil̞e 5"`=Ϊ8f)r^136]ʹq,޶n٨>ׯL}%eĴU,;3f_Dv^1ϯ3r.xM}QtVlj˜>lÆ bʝwxrp)ql&޹K}9J mYC+OԱJ)F6g)Ex6Sy)҈հ\EQ,4+N-uc2gs;pS1]|&k fdipqufĸ <ɓ1y+};VaxeLήi+%g<)3fΒWini̖ 8<֊U[ 3d&'J95v"&}j%Pt/4x ai 9R- c1lkY6ʼn>X%̧.Ruu1.(:̝-~WiX0zU<1zUUꯔϕqqqQ̐#yWMK^$S|BfMf)}rvue4t;W~+n7qk*?t)6blW/pvbnnʽOXķ2/8)Ek䅜VH2ucBzsX2ҕ'3u8]>g=E-:Ŝn2 #,FNaTz2K~̞YM ^Mm^Gh/wvY 6YOQKY;|ȋWxp?'Nc]r7"i-cϦ/v;A>L0[196n\繞+ܭ靸^ͱGXr1c\qu!O*es)byl: kw2}J2klތB/kwK-'JCo`R0nR_woer(2T1Τz_USfb!JKgXL4L=:[9=wj1V:=W!á3)xm 3+ōYƼ۱?SLQ0{sbBߨ[ S'R3wPu }3,3J't =ũ|L$ U~:a/B %*. -ǓBdD84uB"ÈI '' 4=yaLVjՂl D 4שx3*liљqd5+c+$""7cKIKXxLCYPUJb +}:6 UYRWKp_]R*m(#.1\iU哘NΊ#EnzY%HQf2aDǥБ82kJ'>!KY9IBzB ٥ f W+/QΗ8Oa\MY&e7R葰6KZQ>Xʛ:A2PJFv2PWj;g{G#׎dh&*ek|-8]"aa*sM2Y1v6EhH(/"H)Eo"ZHNN|7/I1)G(JITKQk]'[i)Qރ0ISaHks'SVAi9de^TYNFJ2UmDeV"UC{=)Z X:3liDT "tSKr +fT\;Xx>,8 b˩$'%INYV $ dxkJkڐd>,TQDTd)..UD$uj K0H& h2/IQ[IKx{h;Y,ʠEgHY$5 UBDD2W896wR>gT4bK#L : ̺7cEX8Ehw<  |sC<K)Y[5lδWd]ͯmSڻ痔w܈dĕ\~}e<'4 zLZz9_,2@br4@SiSTGAN-)|Zr&_$./7*c2Ɓۺ޼=6V3Q0DS6i97PSڎoTiR;*mGVvﶂٸF>6$aX(Z%dYnEzjxm[mg%2d$e,ۯӻ,ٮeuXﰿ$jY~{޾qQƣDvmޱ癩s gul[W߼X?NC ;LC oّs\:>w(b渉\ܻĎ3KJxm1 87802yn$;j}ڞq8^vgo_7l0p>ȗƳpQ*یZrY޻Ѷ_k%ߩw+lp:W,,޿>Y^؞IxsCb;ǷO†ނ7;)_qȯ˯ؗ73])FYBccnnal«tXVFV3ϞU'g}P6:%]"/Y| L-M: l\Flpa!wmy lESD̷zNW@󮉴^IkLeX}FtOdZ-"ѵVUHA@c$*ʥCS^ߎ ̂Kt~ecRy~e}'~^l[,kߞ~\89l&G$;Y)`)" f?l2bVSM 0r :  uW'{h-/럟n\$_X_z') (_CAEAA f =:E)`uKضٶYŶ~@)yvx{,~cNWމ(`AAʛ X+󃕱7Yf{|vLu~.s:1z{z/u 0KspU)?{))""  Xt_Z?HPU<%W{K%}ޮW}M}پ=>p̎q*rOּZ~J`)  ԶYvٔmWJ/?8t7c(}Y,SSDDLAAAt&&E zS6U=,2߀(""`  2Jm#"   `)  `)" cY[@ʚȊͭ-Lu#Z2vdEedIR|7$3n =S2?YdY#`KɌlͷŬ|V"ӫǠCۭCqRb$~@ڮcK3=F _G7udei]g_^Gވnz$ug`݃Ac@j1K|dgd}3~|=Kûc?L~od8m?ko",J9 M?x>Lf+?k=n[X% $/Q0E\Y,|xVFl=y\J*됐i@F[C55T䒓_Ψ'./^N\I35ըcT;Xuu\xvn@Q#KZM2k|t^}Lfo]Og 竊\G̕;hқ#xz&Em&wV45E GPAG@HEc;Yoǃ-r-8 \?{Ope.},ܺ\ ˠG]ȊHq;cX~b_s&=ۧس/#G+ȍNUɋhИ#|gk84D<-T :sߦ(ތ;%r=pڻ{NyUek7[F:39g@TA=:B><~*ZTq{QƮ*cn='$žUϔx0{s 0X>&W3#O e7[ DAo[}S9O_IiijC3RH_?{Vӭps~ &8'0! sN q ـ9s !BBs]3;w:{T*v޽;,zsFERFi</(Ẽ(%n`{V8189" '+֝ad fī0?\^03@w#nݐ1n3X~Us ~JXe1S (%3Iz-g5HCi/[(Z3+/t#_M: 0U?/k|ÓmH+)*Ɂs;ζZ}V5zkRf&q+e0NZ'a3[~amV/#BUgYONh u13Þ TkFȶ*̟o4|FUea-k!;؃جZg)ύ{[G쥫Mtuv0Y9yڗ1oJ^U?c<ei~O ?>+_G÷:=9ˌ>KO>vٝCRύ|1ʭ9!L9" tWF{nPǗf=-ghP78 w |9|mkc!J/vndg_&u!,Ž Lc;LSe`qgqegny$Q6 70sWNsZsiwo!k`&+hix.`#O?V`'1u^`\@CJB~JZ'̴\/ W`O-!Q U<$luT9 Ntȷl걘uT}ϒVB|%sS* ё+Hy1pqC֗Oϛ'M#Vu$I`ڎx܏1qױf $<;C`mwWft 'rԟdэpS2r,lg[|ί ZocOH S8EoVfL&3f|ܢѨPzޡwCךݟ˒6p&yuìWe_c!RI"VܚTzL> &ɖ< -Vh q=G z[>is/@3XCBa<65ȐP|Eޤc43^/2%գ[I%$@ɋuLO&7u6%~1?S@h -*il?cdfֺb2SM%9.`%/Lf7#3'l`#o# 0b؞$1<͵"V$J?ĴAeI{m]'-&ŕ։R_;vm$?އ4< M3su'Et@EU|E++յU^8b1I9 {B`` 2e6'q}:fol""'~9;O΄"~<*ldie3"6piK:#m`{GQdR$A͎ʄexT.X4} |}|D7fnK~4v(2(i)ix'y,O~EF\"xK?7[9Z>im/t8QGScR$~R FY .-)b+lrixA*oYÜdfbIgfcLďW7R ?_e:u aeر L]`!uMNn_4Uf$ٟ[7sZ"p^Qyg6HW-6?榳;Kǐ+\g9U{}.0 CH_-ßD4k$ 򉠳_U2&@7ބ?OvO"J*D3ہ7M#UQAsqH9zP%nWفvgX}V<ހݝ6Dto[Ej X\]$3ҏE< gc -˭K>Ǵ\" #qJD(v$i\$ICYyl,`9ڢ$'?oblq&s L*ūaX7a;mF uI P}0+M-flft]n8W;F#%;DžG{9ѯQ70 0L۰6xHBj2_<>c&{(Ǜ<(t wG(\΁gw 7v(\Z_&޿ nyIOO'=!G$'DLi'4L;8{z$a)^"{t7D[! ޕ=/7iM䆊ɭF""(n=D*4~]"bFV9 9LV˞O̵䛎G“JYYYUIZ 1ldg?шY^"WM4QISPԎWSx;yЩ6+' 7v et^h}=iol35+Ni,{ o aPO>KHcjf ٭y8f~<&>/X^>3lZ qR$2JtޖPDj9ϳhY`7=3xO}V*ѯٞmV6yOHZ#ia_##,5W2 ^N0,A/i=Jx*sp fpnڼ($N{D$u x*Z`'Gmq{d6JWQ,4>?ux.ᫌ 2<''56( bt,DC^_KNT-}:rCBDD2;IOY;j*yս&Y5d&s'e)|A*8]Bhn5B7o~&03[9msUϰ#Cg-/|&FұUhirTowށ?oG6YqqGpd`ӎbb:)I=:6)J|H:1(>>!ZiL |40IaaDFr'+}Il~/B?4nJx_UX4y\HT 1(4F>ѐ)'*],WH؋zOZż+CFoâC*l680`bgv9XjxJpxnX@zBbri+#0u- ]IH4+N$Q, s a`{wҜTBB t1+ӎ L?Srț(ahGa1qDD1G< %(6d^WuYF'xwP_523Aldc"(jp{y: [M &hNU}|in.MAucdu" ~82#4'e q0G1٬+-x۳VM {Q%g2P 7"McPOV7! Sp?^>`ܛ%'9؄"~r ۴GP[\&3/#Τ8#y;D,N> Rf180 sQ0r*&#pŰj=a])8ƴHapdjZqaEk.r^wIEI#?XDxny10Eͳ0aM#R ,MO3jlq©c*?mbqV<0 {XǴq/]χ=@Zց뉁"klo4i-R0!7ݔT/ыQ.gS["f`,ɨ*Gӛ+[d zɉ` ,Zߴao}Z&g)Pul{"%DHܕG )TQ1Gq; z*eW/WfX>UL뭌恙z q͞) fX})};Xm1e/q~YwD~e#BT/UDbQ#JGBqd0T,8=4>TV/(ctO|* ێ4Ol8P+;>K汅`n}CQ?FH^wPOHN5e.a 7d6 'lNrLJ!4%,eeGh Y/YD#?"R1f'xDZ=D~߃*d.,ވD P+ #8J}e. IO%΁Ywf`zS`CR_< :£s]ב}k` AttC52cB).z*z5̶K:+nsl6}vz͆t,L&' >//x/+'B$t|Z? uYx;PBLF>Vm8{zݑ~PtOi~APb=H^ppGZic{#+uGSYε"q,PLA`D]#uY^((2Q/ (E-R<.<]@"/b%*Y1d/j8I %uvx/W$rn *7XNN 5s8ʙk^(rd~ބ}e(T(Uk(zT" %rX0s*_A4޾H|Hάg{mz FOQT<"H-D6 }x:RIy͓޹EfRܽ(@[ܼ/"@ơu-ƩxyH,fK3D?@|}y8P9Ro7d2~FZ8XlCaYgf)nn(UH {= Uy8; kÍ '2?F!MwgoWfewU:}!o6X^Z`Ehvڮ5Ԛ5G&4Ã#~6og 8ضU5ۻz,֏Vr^91 :}Z4j5Nz?K^6;;loo$Zy?_R&u2K+l1[>g% I oL>v^oj;<@x͊=2a>2Ic:@X6 V7|ak ^/hh3"1 T *fwgm?_ǿ7Zװ,P>Kg8B;;D,=4k/ۮ1)6 Zիr8X7W5lidO{ ϲ 㽎z}ݾu,V̧soײ͡^d3 ߠa|c]Nf6 ̀j׷86w|F!se*qfUƮV6z^Kg`8:"D Ϸ |;zr ΆFfawPk4l{pᣏ/ 9T,nnQWLrA#G [7s+ 0 r]NkPЍNγw Y+ L]`l/~gر2|~1ce3RQ֣-s_o#?5鿱cnyُ2|~ۗont wbkSD C?`< $C B^Fm}u<7(}YĎ\UGQczw>&! ~/} AW2L|ͰA|KD{y0qUCjq+}*g]`.0pqqA$ݜ?7'cL'6n,M~ev8z &a:A(lyIxN 9FFFGLK4BL'J.0fqqq|\|׿kfcǎ;v1i%dL8c="iyťKwHG\7tS'8^>-pSR3P@ȫ>5iv oW_7.0رcǎ;vHO^s#޾HOIA*.T.r+=ЛWkY`5.2GuOT zށs]`.0/]~ɾph| .|2}/]u\ 6\1">ܿpү] L;vرcǎ}Ý[$?xˤ ߫5L(f<6W/szڅ~x6yw;S?8]`x*I{{(=}s*Qx˟] \ (oǏh[ eA~'3)Q59Y/Lۄɼ)ekcvMݡ8aM0YGѠ5F vCL{ޑ c "<k+[7GGַШgGoƬא.{{.'#"bw˛okfN |$9292܇2V|*N]ehp#BI*ZCC,ohO~1Q~>]cxxf|] m6 Jޏop sX@`c>@1776f+{l020adb,}&F[ 9Ya=QӵÎ;vرcvi*LiB7[p+(^g% ٌ0L[)Ϙ T|? LqIQ`f]y|Klh2=NͅKK`4mxa6RbGT sMoǛ8KBhniC%$nHW(KG +0VHhj>6tOH0;#y $^9Q;ߝ+hx> $TL6:> u+-gװW J$TJPC eR{':Մ 8!$Iy̍u[{{IHa>̜K <伟@=TÉXNb^O g/oc8:! !11 }7Zׅ snف ž$$- qlo&Vz_DLtnb %#k?O%y2ZѫPI=MH"D| v,d''/!j6RH-,S; Ii:F(j \W c <5] OE َRdd@ݜ4رcǎ;vJ`~ȏ$g8\i-WC3gyRuUx5/KwyQRYKyi"yp/VۉJ{Ocv-;xo%~It5TF8=7".-#u!dMe4NQKN(inG|ەu]C57cޤJyӱ6#oe#M.!Ng~rfc2Ddl+H3B3˙ %@ka>6V pij <|Z٣(Z̳GIQXu fef*_DZ1KMTִ!&4#_pWO{y:ҘK2Xh 8C-YA<,GF  O2 k{ALҢɩ??zZs7Y*=s:3ydG4;<-iqzwK.qKMӉy/q$KJ4֗p3C ;{O{M!7ozQ3L]~<7f$q!Ⱥx|Baǎ;vر "i7gJ~ ׯ^ᇳ~ի?quw՟ܺ瀇7>Yz:7>OW oPγ^PʍBk{~L^Ex[h9wo^O?rC7pquƵ+B q6ܹymO`.d3^J[1:yRѲHYU?]^|]U6 ll6[(m 2kT' <|ƒP9q<(xwg@L|zVh'^q6Ԭ +3ЕE'yRbuY/ q׍̍uHZ4*Tv,gذ]*(u,ӗ'Ƒ]|״b|_Gy:gFm[8t EùGE2ےIٲw:(gAd:cҵoFG()ϧrn3ϸ*}Ih7r;;pG#8_tERHW]vŬ%:*5q;bR\{{l޶;]$D"q)C M=z4'O~bҤI"^{Bo޼?)X,flvX-͊hr^L&k͉gd")Gc6n/6-Q+V̎6(+Y-& M=&VEWENHS|/9Œͦ`\tcQ[cM}!D7F"H$L)2`J4 s)Wr$1{&fĻGCfO Xn9g/ [] ϥw;otg"I#gb)96$D"H)C %S9yi-grL8m|iG06KHEj0:׭ 򜙛79kh󑈾}$JG:{Y]D"H$R0`ʐ)˚L&q.rjMuk2WlD#6Y!?,} )D~3ŒPv}D;ч/=~r)D"H$R0`ʐ)yɴX,?՜Ƞh[0=w'Rk=%|@F|fwpG_ˬM~kkMk\*})TcϥSѭTj5 D"H$)R0eHKfE?zWr1&T _"6Hseքxʈ#O(:-yTftu.]8C{D"H$R0`ʐ)QVg744PXTLƞ]<^蚡Kiы77bH=.pq'b֝*lMXM c74D!-1698>u)h|PV1?T/]D"H$R0`ʐ}vEDl[Tٴ4ʃp,+ڭV, ǯqޥnu]Q;vTmw~*(M|٪/%b6nD"H$)R0eH::W+)v%$͚IK$bVaeuwaHC-"B:"ӟ}b [Yǻ]/!AǝF J.@',<]F 6{rk7R{is(5+ٞmH$D")C"3xs>/KXƍJٸ(gHì fBw+;jNܰ'pP ^>+~;/u+ ]@}#NW-~*"VqUkx5kN^d\ )a[q9a tH$D"S )#ݣK̚3OrdokV|Mhzꯟ'p9 >-oU"zNxᴐW;lHL9Y.vIL=fN>O{Ɗ5A0[JN%.!E{c)M{(d5Ru犰>&bN{H$D")CH$2q̉[4.TCyXxS`7{W:MvjKr>&ɢUD"H$)2dHf P bh4b2nSi5<~@QM2 UL| C">A>0ٱ9mk fbSӘM(6;`j`l`bbG"H$L2`~_P7X顺gps1#H$ac< tu٬ b0^MD"H$R0eȐBQt:\?A1&"yц~c?%:WM$D"H!C 1뤯(9XJx$s51fL_/ڮߏ(H$D LR0RYqLfdu75tZ9z,b^V,9"ij߈HJܨa(&eYKGEY1#Ǝ蛦{k9CA"H$R0`ʐ).qa>fcC,MB^FM.p h\ {m9W|K+a .;b $ؼ:zLD"H`J!]*}PU ˮŽ S38U&U0uB[Ko o.MUϬ%ޘ#HkuG\nYD"HH!C k]-Z_UtQ˰uvhR5!ug[05 'qIWY3nzygt9(e_fN㒱hK" 4R\¢9X䦢 }f9׍Sbl:zNJՊD"H$)2dH`vn1:%e_zSEFK0eK<).)G\"<ў$U;ƌ$zvU(;g3 Ŋ)u?%cvAXL`N$k?t|jW;%%5;9 `-^;O\,W뿲_^3bl1J;wlKf%D")C̷hNWcAVKW_]p *dmP[<ÚʛCG^v ؁k33 ^WTa/t5<^4D)%<Ü۪pԠyҪ򭽗)ƚ*PJkk+&IJD"H$R0exi8͂i%~Wrݳîc7YY}9ZQUhQb_R"zɉ^8>1]ZZ[0ŌI~8}q^km9ƌe;<DB\i<||;1JɔOCC"|:B 2dPE 7 R0:N-8)\Ľ1Mq;CPR|4UCPe{ f LR>-Y{\&%G''OJjK4?4 }J:䠡Yf93sBxݮ | SZ<}iCP)KX3T5!?ݟj wWoߛ!; )2`m JSv2ұܽ=k]e&--AF;ov6m`XEZaZ<DrUϯ/)G5:w>=gDÕU(5G 7L5_nshxW?.LR0M1X Mׯ8}{37Ǩ_8հП{`Z1cTQ{6̧Gk>v[4u i;t=³id 'KH!z}Пb 59Fsz69ﰻC..n1Pi~p(5NAԦOWyM`ׇYVj<^fG0%z`ѻ҇:};cX<Ӗw]KCUX =dX왟3k&kw`A^˾xʺ '_27`%E:y-^BoL^ICL-?s~?MZz8!wh,ff[O2V.7T7Cis[v4i3z6D9] o-a""#59ȮI_^3Ù?ϏMys;b ۛלÇ_PėH$R0uq!{? ?XU2)nB(FoW'sw]ǵwv>a_+\?zhQޫYg}(S 3`ʐipl۞?o_~ʃYΦ?b{ɻ\x;oF#Z_U4njt-y%b@皝&c5 Zt,ǎ1) }v Y_vϥ }+l sܳ!%(zURO q5ԌaнaiR۞NIKϗ?5}`1,8?'13)k6Rv,GMfdv-1/W1w r-=k7>b|2m?FRc0qpdwPQyQs4KޏY7rN5cŐ[XɄ9Ń{-[C/aHdFMɩ+׸tf/L:Z[`7' Тձe{|EW˪uʛIq;whycšZ~ii7nﯼ?pfL)2`<1Fu 4_%;WGIU!'F ?bO9#xŌJ0uiU<ÒZǚM%"[0hqQyaTbJ鮯ϟ$$TJ~re2Aߤm`cOTߵX1&LG ˢ͜NTT);By?Dv'71i4ݯtj {w%!?¡|X>ȩd35\+< 9FǓuYO`VaÅ O#(`n4r;*/_r^]:=g1g.7*/0m\6>J 'kP1>de9x"Y;iD,a9,Z ʺKfΙ-h"1{\N>L[z3GwqNvgǹb$乂^Qo.Zqp]rזV2`6-}˾kCC* ^cC~vO->[kI}`ڧ%VFL'jtܬEPQF-'ZXt_ ]Cg}5}9)2`Utx؜6>q?}UqiEhnAEjvnўbg]rגDIHNϵW_9|[ek6U`p q^}ξvt벛'b,~>ۿb n4I>z?~Ӆ]Oޛփ순ϿhN^ţ<6&&f?^ce=`Xs$,_0[,D- l-z{W ^M—r`b .2cvg]cϘ*Rn\ˣZ|?U;S)-+`몙M9^D:ÃwȍGzEk[ mNFLxɿ~›SɤQ޾˹w8uԵAL[\J(mB"H,E淖Un7^)DJ~gISyIA`sz8ӌ.?5G4OI{B[ s s7Xr-Ztv}t`ʐbk.l?P_͸_OWttBf{3 ItKZJI2ӄ!zp664U LMt,} 4m8U>*huffaLVh`mQs ^|YrAAl>S a̝;yY:+o //fx6J^.-#xE Xx1'mw=nli+lNBSCwƗ/Vr yGį\, &ߏm ׇe4 xRXOUܽvU-OI^`gAlESˮ:EŵX{8^?bC"H f &!K+5G.=?_^|U:oeZd͞^@SONk˥-T=zJ݃&63Я%V =eaJ>u~*Lmж?:*]GI)C O>m<^q|kr%Ы51^SgǶ"x{sFV蒼;\(.Lf}A RwYUp'^ R@lj |(cw-e F\k` ĘϹgl9y$ X,6vl|}v0lRJ$U̿p֒R~eq)_YR6|[D h{cNnA1c [p-z*sk,X[6tmlJO# uww~467PVw1wG?m =s,.7KRdJ`ʐWUެ#+Jv)i8RxXpdo&?!Q\;ƣ߱؎R_^bM>GLM\*=*Cê;4\y~`jnJ#ǵ5 KG7JmgϦ 9-4'&a Fm{[]v L?ijg`K̜sS]]hD"H$#K0*_]|_ p ~lF:No.cԛnh㋸oqK;n2zK%ls_NLV#ݎ|}yħ*n7ji֐pǷk[KJ))CSniҖaS~?2:#oM_%o`jCħ+G(e&%~c+Osmx*+avcg;Y~5Bni!ʰR+6&\^V덩sɱK0釼ۅӢϝN@@[l͛hZu&O"H$#B0):Z/y_?5+?ȇ˯s9YԼ+,[r?O?72?3?[x_f_G/;s.S G99/N%~2?Wg??ț,(x-mپ)S )/j657RSrGu#`X_\pkML{O;JKP1iכiMi/.d®0G eL)2bhf; i5-؄`3,bl̙7?~GH$d f!?+Ce߿zyo>Z S W0%yۿg3:bl?u… YbGÇf$D2"!Uy~:߮!`^wAyOebhCCM6mj;o8")SL9)L!d߿G#H$,]S]_t_RC\-K/jWo8G2{oL)2N‚`J$w0`/^̞={Q%#D0%DbCFy'8_s|zGy|BzZ n63o:ⷂre/ᦋ#,6iW|K+ᇆ%[+"1ee8̅`J$D"S )s5/%.P3=5CUm懄%M'DzHĘe׼o~YD"H`J!nჇyҥAD݃Zy ;/FʣGm(,l=_µ~^m64uӡj C0ՠ雈cbaoA^0&E.2칙B( ŮUAs bu|] BYJWuC1aLrav-ͣ/.c~:l6!i45"ޗ3Yk~,zcrR5him 9cPgU޳XSmק$ T.K;% cZ+o2!َY !vdanyZߒ@iLQwI6%!g4d&uY 4f5c@[LX3Cdd$ 2&[yl۳ic\|ےhgoLf Sz!ΉovqzSMH~B#pS^7X̪7./)Lq_TR4)D!\3TПH_\;ڞmS۝VjC,90Mx$\tLm ,KN\>MtBd]٘5QgaZ),ڶy$&ϨO>oi7oj4UE () ~TW;g\Q}pEe][o6}3[Wf= xi~Cq%[Bɺ Xw?և/#2!~ ܺ©S+Jj%B>}f+ 9~kX,:su2N~ `4=PZz s2I?{zEN^YNn=m%͞Î47=QiGDY 6yR͹\Qs?Ù7=VqA|;d99 *s,,_>m<{WWoVcή]ξd>Ine/_K·D"H)C f+LC>t,{2gȧҪrjwS033ciĆ,~{uAP4ԛ9n s3{ V0q5{3h+=iSY⥱T4h}J7?)ޗ4;e*r)=;U8 E !lT&e/-u jb [U:Œ&f {Y*=6 dtxWKxh?{OCr.v4b ͙Ę+?|O{FA0Ba71Oٛy]k9!K"FXΜS9tʼ}|xz/kRbl&k*+WElҮ`ՑaLKxOY5}n#91srt^3'd2=Ǘc ciDn<Ƶ- bJڵ$L7|ɻ61w jn1.#v)q { 1Pho΁,^̛:)9{0O$*l'-7>u >KC9qA9rx&0yl'6t;#79T7v53s0y?A#KR뽈]k4+wJ2+Pu]<}>9yЙAdTsq?_,Pr|REge H$)R0eHjΧ.~GX 'w-anL:"%/HsX"5467r>>+Xʏq 7U3o]FOβOgRϭg9v[yQDo@: %䒡r=% d33 &4XKD4qT7yĭʵDk ,u_%tjbIPf|cQ<6J$=^;-9.fv *۱&K.MO{Fa&-=j:, 7yZ_ݽ)c>xܘNSuK·ǂ=<^Ê)Km Να]ˁK(XqJ/ct/̸]BdDS؏_6,Cºy[̺SUeնc>̻XHccOiΝ~uy IɪLD"S )vv#|(/U 黗AB\t4}\I4m&{6gL >sM3sxu444#h4`fKb?&z-N ̭hNBp:>ov,#1֡4aLyj,VӀ 2V7c-S%!w Pݘ3]$srKБ=LU(vtZL(GX̠v$apA,g(Aiδ*wY ޽DY_Ce:\}ϭ`LwD]͂i<={FX@\:"cwM6c{c99$g%OJcq64D){Xn/֢D$kKPщCY~??`ΘcؕCɍKd^@nC!2`ս8I!`sar]؞DnMKD.`ϵ$ vа {7|Pʍ› ^`jb{He^HVQK2HJ˥>?gOz/gsx2Nd3-F h{֨h;-t83p^@G9VD!%c_<ߍd_Lc)H`J!lihņd`dfO`AMCc3^ :A˓&<Ťiݎ_Y'jj6V7p7"fT8Y:[udU &w G'z2 ^0;%#}Z5I裟 w0_,Zq)j7̱d#-VҬJc=LӢT7Z\BYSg.y+r~Gb6_3z{  {W8Ǻ³ ۿMN!)r GXl#:Sxa-wAKI/Ot`-o[u8y^ܨO M޶jlJ&~Νj,]DFnF`&; 4l^5mɻtS~Ws1ëĹKgY9G3kR*`&qCG,6n' {8ɗmv+CRV}؀DJlf_U>wE' 0^8K8oH@ Zؿu-R*U4<_0gk۱9C\,OQZڱiM&#_|&lZ=;-[Xuύ XE6ttc71qOU:4iĘv_LOЊU9sd0 \z #v[ =AʝL2nܡjߺɆKg|${OĢJE.fwT~ePQRLzΟ9Cigѷ']EkIy1 &NCu9u::]%h)T|Lgᣬ ɺc0b>$`ڇؗOOY9'͞w,wI?r3pmg$1鷥`J!S"l67Dݠ 87@Y1vo͙˅ ֭[)**8CRsߥ\,keyw:B _kkYQS"|0+ėhaM90NYgP&5?$)C ;wʕ+j¿\ ?>ɷ>]9WD"jC.r`.Z0;ÇL$fnȧNý[LKzy=6vS7][َD"t0Y7 &7f?W.dmY, uScvV~49Nwen>g;`J!sڵ|'2\CQz;"`WF2b1{r\v.9%C_= gMfts5yӖ%-rQ'v6Ozy u^K,M\Y)2`nܸQF1qDOq~f?Z =竇_`$#2cќ,? đ@>hemm-D"H~(zLGI#8v-<0zO6Zvgg`z '>y!;hJ)C 3|[AtjGD0I'\N2y0y- aQ+1i:qΙsX;E, ||x&>t%Ծ7q`WNF]cϸƊKs ߽H$(Vc[N@`Bעc0JΑ| ]ZJxD8+Oi kW[7Bo\|8sR L5dH3vWQOȽr+WsIIXqŸЍs̞M|eq058~@<]&˱1;G;mkc]uc,[}[q]rRѮ|;hbOl#(OS,'Ԋ^ķKqnÔ_M`abdcŮBaҨG_}a`ܨyN`*.~1eO/1o1Řci̟7Ͻʕ+9pt:4V"H$)MȐӻg "]̅RoݢNcHQVq3Q`LVy2|1ov-*t J_UűMz1edz (UNxo^I3HNܫOzs% I߸i+U)Jg+:׏"];)B2`?~D{\蒝|kъ%Ce:QftU|sЅ&b5}r*m=_q<sZ{wJxJnRJ2ED/[L|׈(8g2繗.Y͛7K{{s K$D" )+7\B1kx\_ 6GnAɪqy6)goFQáL( J%~;gfst6:O %g[VfR Yϑi<Ւ3c'|c3?bs7Y0 aLUNѦT {NE?(]Z L. m~.(4_VKь1[vD"HCܻE.bowkOV|9Tܿˉݎ4]NIn4w2P}-S̋ŷN]|0p97ȿ^@Wk-9ٹUʨ1㾱` l6saݹ<;I̔oT0uBjc _GaVӶs|oj4 kn",5? ]b:d,.)y^垥`>syOPePY4ß*f QxF7zQ~`kWz}]Zgp*vQsu+Do4qߝ`'twGTTsjcW"H$,dHyݳ̛:QG3sl(A%3x\{ 7Jh[ 4>(e܉=~1>~Ȯ-&ews0!dסkNBO|YƎ{wORW^B46gBrҨe:;f&4}X]v_DMp l'x?Ws6p6KiKplGVW\sLS7&zw}݈1%Ɩxv\K,-W\cRMD"H$R0!sݺuB޲>k.'?s)p#mM=|ȃw=wgY }4,JL,e|}\Ήby LMbnL5eXkBE㒦WL qM<%?k Kn:iR1dlT-&<11eLQN]ڄD 5 a`WL'/cIVS%13͙L9r)رW(wH$D 2`^tm۶sNغu7]kt¢%Qܙ'Opw:x}/ӽT~\O_vwcu@t4cJ r꽤`f2J)Ϥ7cu32U^;*8gxjsWi=|݊lX+b~"%CYƄciiGiÔD-usiϛ!!!!b,eb ʙKD"H`~7!C #<"`z.S4^&zrvkƢgφHaN͝D 3k鴼:ViC&|y4AeDy<$V0R:ݣdU &񅂙)҄gl>Kg5/ȡKm=M34ij{ *(Nc3b2:д1h|O̿2~Gijnr)H$L2`dt.9ƾH>_Jq1׎o"f_)1ˉswD®-,5?٘ 0QZaӛ[O76/cv89]ϛьCϛƮmҮl&^b1#N8Bg|{ϼ'?%Ncg3}8y E!1IH$D R0G2ں,]L~Y9 2Y*/f+ホD&v~Wn7p0>p ǓR7g`fqc4JGA"cCx:} ?/3m4{XGLXkQa ؑH$D"S )#sxFa@`$es 9]CJ`ӉKd$tr/$*,Ñ8!<9^0^8]QHb,1!Ɔ#-|??|;ɿ8eK|?=Kul9r sg%7laD"H`ʐ!s$c4QPt3joqI'MI;| Wxjq(5.JeY ˩}ȹ˾.Θ҅/1XpO$`\fLDŽg/L?8875<^}+zUu׸Bo-w:'nNwww>4"" H*ġAAi}#':y]sc̵\s @ SP1&/_,Y[ivO4 c}ξP?җ;*Eaa|~!?cH@ )S(SD?'N`ƍl[iF&KgKakW'cYfzH^^ֆsrpp q,@L]` ~$q_ŋs/_XQ?=&:߽%}\F e&HOXeqqla%F8V8f8v8@ ))ПTyiW^y=]]XDB,IǷoϬ;!R)sRGR_Xrت*t: g-@ ))Gw5:tk׮Ŝ9sىyR$% _ـ}>cߩ~<)Y_ 3RbcccccE C!S()|N{..\`ݺu?>1{~YztxMoG+o3ГnPXֲ{ e@P(SͲ{xjÆ 2ɾ> WU`[N2n [įt]$>`_On}93-Y6??%%%g߲s=[+@P(Sf4ٿQ|XhW[By/{[ݻKx:l{ͺD#, ]aٗS-X2c)BLk(߇wqlٲ/ 2q% Oq'%۔m6ַ9࣌8,LFYv}²;!}ʾe?a)@P(S&?7z/p)ر˖-ôiӔg$6Tcv^&6e&&0_mȶԷ-LJEQV}W򎰕#}>d_Oٷ",@ )McB$wa+_۷oǛoӧ555().FON&V<s3mo1XA3# fff"+KT7a_O7#}>d_> BP|< E`&!YxwH!/`Μ98+"fESS***dj2 ={b_J[[g Uo⇏vPKjfn;ۀm6w+n~j<ʒ )))HOOgqɶf }ľb^BP(d%1)Kys \*1q%`_O7#ӏP( <.)%)4$q'O?ƶm۰j*,XSLAggx2t4]8[QgN`_@ SPBS(8F_W>SH˗c޼y |g8Q, 2ђYIXbINUx)|Q/~|=:y}>]߭{mޠl'skWw~`{4c9΋伹 .䲹 I&m$d 55UaZZ/w{*⦅mĶb؆lK)ۖm̶fBP(>#&W?bZ)0iޥ/_ӧ~IΦs0ʳt(**2{]N&gᝬ$|s u[)JG?6$/uqfn;ۀm6a۰Vl3ޥ@ @9rv ix'?gxTeNA_x@%$ԪssИ ea*%wd&c w}93AYzx4#dRF4OV;pZ><8/Γ2vYDAÅ$]_Hr۸Vn3m`md@  bGf .ٳ8v옲Ν;qFx7t*<O^RP* ,,,U(ԧojzx5?2L|uF I3םm6q۸Vn3m`mF"*@ (NYܩbT%⏩ TCO櫖 @ @ @ )@ L@ @ @ L@ @ )@ @ @ )@ @ @ x S @ "0@ @ S @ @ @ )@ @ @ )S @ "0@ @ S @ "0@ @ S@ @ D` @ @@ @ D` @ @L@ @ @ L@ @ @ @@ @ D` @ @@ @ D`@ L@ @ @ L@ @ J___W \]]Msoz4#M3s"uvttDŽM&O'_XSc{=mJ'7`_1ytO:S&1l}\i`ff$/qDbQy0v'LLLX?_c1c{WDb1{'@1'O?x,nkk V(ԔE19>~I{| "䊰(dGxXAT BP(I2200B,t&B.=1Ky;w>VWc<:mVLv̉ ؗ߫| ~r=~O?t{=C}?I8 L++{Ǣ&O_ L0 Wxr`ʿɓ'#.. b$> LÃ)bbbžgP( BcqK[ZZt[pl=~/0YL<P^Յ?ׯ_wѣX`Y@PzPYƖ;B.ig3Qr씼쨎v/Ձet-또Kꫯ_eQl/}1T?raPyv6h/@OjgP(m٬P_.)wz+Lgqĉ"& R_Urư/= 0l߾_?o7|`\rռv\HRk6 8>FAu\7;G'`h1h <y=-sBNM;I><v׉&7ToINN"gϞrtvv>w}ƶmxqnkc\t' b̂30 *Aqc/ okjj[\W;kd>pL? 5uf> q ^tX>|,R^v~:|cޗX )?81=\lO)*:sg3>P`l_%&\'O٩]hc?p )gLJؓ~̸xS?rS7To?*7ySMl3Fßط-IZX5B*YZkx\Vha? ڱx3׿mşZYZūᱯfLΏ-0Љ 622R}o3f׃ KCdyEDEE rH G`擔+ܹsw~ lH<@|;Ă#s(yP'Y"e%6. _ȆXVZ(9A4w@KLh,4X(nݍgjY{q نQ<ܬJnɒ%ʌe8|4f(Xwwpb?jٙB('%{leܜ~7$fwExuQ^`5f4-áwpdx Ʀug|2Y3XǨć?[X[78 )BlT(m'\p:1 A|B#ãOghɱ񈋍#l)űxŽv.|c ];68#8,11 ?G#1'e|SwDEG Z=ݡz>?BȾ\=\&E}얝G ¢"yR#?W Â.Ppr y#1) c!q[q ƒPu\N?^ceO"cbɝRp7}|}(޽~ d;gEZ>*O/o9)q}C&sssUWg+ըUX /7] 7X.2Siv>(rr3a2ՕZ";'~(I <\| LU_k{uըDm]=jJSC׷ i e+B9Etw<["t38q6;|lɅN(^[nL*&(2ϟAp9ZKhðSQLiEfv> m[%1#p5Q0Ukc;+ZKA阷xVZ|;>Ǻz7p~txk%y{ sIXY KƺuQGxsZ]sID9c棸~jQ?8W.Ccf8;/eL{mq^WWX!΢E]zcppwPB"))S,Ɯ3xj]&VH(5kŘ7wBjF!]Vضz684)횵0\;B9s 'nח`-}?P{ ك_WP(ZYbrr!:P[[6tQ_ G-}uw-)C?<(_cdm: "9ulE학$օ D:Z[14؍p_D)+zڻ^ 7D%}Yp;a57 n2pݱ"!Vë f!5Vu^7]o^iBr[ :Rӛ}i~ek}A1o<B놚ΩJ O-W*8RSɏrze;:Ge"T:{`G?utAi] ai=6VͰq]l̸k_&Q uzsƎ;\Qn;_\QOn+Ǐwc!5|Ė[pfOlؔ2r7ñ~p,^E'z+!&)SBm˿ ga +"T]_rz2©#XDXhqi8p[|u?gw.EBb~"{z 6?Ù+Pև+sc(zܾ9:+7͋;`p7_G){{ ==?F GPu5/EC]#ڛ;;p GK[5|x`F$dף2 Dc Uȏ -̌AM Rc19%Ôٞ ,xpA>X I˟ rkPW^^Mv-}=(NA\zZz] ;+Sx䠧11 AfeVs &݅.4WJcupZ:?4x}}}nH4BSI#?!ipp FUc;:10LD+Pt"3y9sK㏮^ƥo;9SGЭ!=@#Zʇgmi)J<gG4Pj,G j[}}AMn ے|Xבt'SC`)Q~pBOK]q:5-J> q#ǘ"MNhGWw90R^jB'?x:`o\. ʗԘ5"UU:%4e!' ^ʿ9qŖl].EOG 218c&1g aQ쪧ɨ-F_ jz Y6g:zyarf9 BTz!٧7ǩ֒lYٺ]zEws5ܬ4ϩGmi*RjQ[s%b3ⱋBES+ RBAܴ$Դbu|UZ̚7Sj_DOK-eZQ7UJ~:NH.BbndE+ X ߠ`5w w E D_w+"o^Q=SgLCY?뇝]ir䝥X,lX;&윍wG";qb m$}ε蜾/2۰lP/c;<@r +3Z7çNSXr'sў,h%=Ų-qI|cf Ẃ3ị,g6,y̬TH&bo|)ʼnna3unس[\:zdvoŖذaX}/:Fb ,ɏ K&a8u >\ KQ`j{N4 \<0aTdaq; ǰdjLMNjd?O9qgk/%NBOY8wy#`@b.-e0w_s7nwŧ;yv K܌_~}ۗcda|Xu3<Õw޽vcgqz"۠sN|UtWՠq5{.z g>ĔYIH{EcWYU݅[wW_"؎1uhVlˁS]'o.\%]jjr2ןf/+EK8s$N>E^r:^[f`~fw.o+'7"'GY`*V>{Vb׾'m;7!8;N\ǵchvFi!{ c`Nh9c_m>iY}OΆ˛X`J1-OcwH"t$B;Z樜\Z\`k,[ jZĢvCk""SӀEtL2CqW2rj0<N%w ^Z+QUf@+ Em@B:m #Hhh]RF`oBPH<f0 (Mƒblb{5QȯkAkqRYIc[ $YXtQ]3QQ?tOA~z" :@zf= Vw*zQ(I9,?5pLPoF} vCHF jJRRXi6N+0=5UM*AEs?AdMto`,zILJQׅTuSP\7-M6 G !U ~TVWg*Ȟ9AYC/kҺltM޿nEZ;ߜv||,Z/8:t #$ a9lYԊ>:) 6akєBv /D\L.9_Ae8a:\`F#"ϟDIrG*ه9+q0//ش$/СJal:~;9NxZbJֈY3?ҮVهOʴ `5<~~ǎ5kxXBr4x"XY"Օh o |0b3ܽC`;pqlٲ/?on{v`8yroۍK4kkFb˸MM8nB*W1}p.68'a=8>=/-޽(-eʐ`޾}nKQ#cc}[鿪7cڵkIOFVVnݪKdr^7ξ/_@v\\GZq "җx}?^yW֗7_;,zWOn@vZ>F&+vgW#-3=phǯ{fkH`^FWLoj|dbqhؾO}?<'2]XgYקgx$ 2T6:?>a(Fym#ʒ`fQ.QhDkmm߆X<0Hbo!xa:Ipu#1A 5;XN׉btV&+-14ԇX{DR-MlB9 MH`f@WKYy&&HlC{},'Mz68mӦ _ɾMF]qM-Q;$j:tMCC[1e43pFe[*3Bu Bg0bM)2;^S[hnF5L[ͨVf$[胊^5HGog jkK/ue3z;g4Ӏ>.}G!ps/JH :PSWN]/[@vrƋd>VWNQ=-uYH֠֎){(1faNuI`6]c1ZTI<`vp _f5ScS-2HTVÄkf&>ڒE)'磥~)h"adMK-CgG;t](Lf!{PԶt74^H*FmF \c\k!%F$>ʜ3pK?w pSbX~ |[,Cvqq=_/?ßu WǬؾe;S񢉆o\ L>7滈CqpXn=:Wl™+5Efz_ӁIƎb"0لɩxkڽ{wldS=.iD?'t{{7BvtX vxE066CFFEA2CZg͘2ocջp~Tyd6l X#Vdڑ5$kɟ LDK:ȋ>".tB`ǎ< /xي Nos< ’Mg#ȋs/hҋZI3P؉ݣtJ=_ڄo^"]cA|)|}sE`~H'1Xބ776wLT7Х8wb+ZU30-|7<~/ށ*܍ɎEwPԛY`{eۋ_ɡx1::Jbb?ϟGppAup嫿ɃXd-{ o-}G6mpQ\?mSpp~tނ|s둝c`epiT[omw?7ѱ8n\-Ms߽ ]MǷ>(P(3ZPWYƆFЬiBO{I}#)HrߑV " wMkFt7 Y1C#d#C {8}uѿ$VRq {QKb1~NvW$ ̺ XZ!e"/uuF1NTć*;jB~-jJ֨QD"Vl :uaaf2B;8x)(v.>h@zJH:–iF% 8zah&!e6,\0 a!(oӡ2{2l<_V$`p!ˁbJS~R(ȆAI!܆\^'Qׁ /;Lz uIbwtFCgs*1ӊ@G F=KEb377 A^r"lE*'5k3\5pEWS++RG-JP]tW;llAˣz~ʬZZV>Zs?:154MH853YpFrq3Fzem&Y!:{+JoI/jQ8Z u نpTfG{[ڨpuBrbSٌH8e` 'Y+,E(-B;"*1ka [_Q_ؔ| 6S 'idlf1"&)eu,%؏ʔ 8gyikAAL5nhnE;ZLDp\f "Fjhn2^;RKt*lD :hܐSX UpEAS M`|2TwS& (Oo0<z&U`^ ,fC OP&/XSI0`î]BTku]5d|}IiO>ۃ%cjw#[#P"LH>͝y #S>(?;pHGuؾm9pxرn.&EZC1Mۅϝl>:e\>$0Pf0pi,+w(f/YEB_3N/ O'RnXz6݉\^DxT6vJacs;Vg ,r^E~lz VSc{V!\K7q8g5r0Cٷ.~c]=tl6TO¿jzLV]J̢YK噢c[!vN!z:ebFw.Yc]8bQ7m8}{wnQ^"r9Z6;B|3\vU خH-g}0,Lа`;?0[>'_]b(dx5N%~4\O 7Sߴ}ǯ)όL-hk@ksܔs7'Sʿwt<hFOWzxvGWWlH.nBowt}$>cH I&.k45@G'3.a:]/+sakaI&MFSy ,M'90\6կA6H&^+##X <합z^tU>MHFC6=#(QfITl@hvHEgDmK6L Sb| D%^EQEg; Ahn L AHPTo^ȩn!_uP>=ʦM^?ե>!gGkѡȯ@[m._D16 HʆZ814mBMgb >^(@I\^dFz ! U0G)yR,X9g(HlFyN4 )q(N[xtW;1cғPRՄP;$3[\:SZR'KSlKQZg SغQz*zSҌx$~ZBH`BL!x;ZD。.n7"}?OS #;YO:,*y<޲fk48jJL o.ў֞xsFtg{1{>9)$-}l~ص{}M}}ؿw~Ed b(/>Ii xƦ<%*_ñGo(LGv\>ƮscMk:~ս_~u5Vtg$kJ֖svϔД9A 6Jv>By l PNܛ;wt 0LaA53Se,A 'IF!ʮ?=ow AW3km 7[-+x|B^aGi"\?N@c4㵉ȁowl7cù~$ʬl+*Kܯ䡤k,0I,CE^,_ҲշR̢ZrݠPvR6$QPhNvpv.Kv6v_@04_+wvL[}A)R>~AykLm0RmGcS5v[8/Z.Dm^~1X^?11 ){%bw?mGozBͯ J]x:^{eӟM6ڪ{E60vD~ |4z.Î#-̉.ziɏ};~=_^+. Q( ךO8ώ];p0ny[ {~hFj`5t kR;z5;뗣7nN,#S8F3?Oc Q &q}XMm3!(nD{{#caiᾥhu2\nN?D:3Q{o|IzmTOt`$ddF18Uh@8Ci_w՛mƁ_vN¶b*'?,.d<{~3?Jc_v|n4p|p:?cYCs_пH{6q=:hlg{\8_Q?#If_ؚ|vq/kD֊Y;d-ɚR6GFᵳtՌ^Yr*1zwhN;=w|,S?Î3<X mV>zǨms6QQ0?< Hk?O'q<?qBYX?z5kDfkH֒)Y[.L[{{HTgD9 :>e?oվG|Ǐ_ө?y~yI{f+ONUmHc{y7xͶh`Z5V7}~BP( BP(|vڎ5k=|cҌ!kD֊Y;Tg/Y[*Su< Y'XAV7SNC\؎ZP( BP(>dmޘ3e Z5!kCֈY3vd ^*ߪd7jzZV o?49s_(sBP( BP(|k>~Y &dm"kFU\-y)N^GˊlV۶mw;w-2V BP( BKU߱cڏ5 kAք Y#Vdxd2rY}ɪTLZ<5* BP( gSuk@UT6 Dd*< BP( BTJ}aiH\^M_IENDB`docker-1.10.3/docs/installation/images/ec2_instance_details.png000066400000000000000000005567021267010174400245320ustar00rootroot00000000000000PNG  IHDR/}9݉IDATxGYwwE݇F`$ $ $!d_݃w |{O32$ps~ִWu뷪ɓC}W^{_֭[bX,b1=AG%f b ˖-[cǎo\[ww˗bX,bX q a!1pdZDZDXV^?|ɏ~ퟨ?~FbX,bK+ CDF$F21"0 p% ;PGf͚5[jwm,bX,BO+ AKD`$ȋ).L,[Gڈ~͍-wy'R}F+CU;S;bX,bG'/=>B/1ƑZ 15C*e Z7 r"[sFکybX,bX,BAP;~BO[m} ʆZk#JU>jÕjUjZgwMX,bX, =>A_(o?!z D{-͘?f{>}8\jes%_ߦmO]e[&oٖIز-/5cЖ!z B_4c F?5}R`X,]v,ECK' t8ʟ>gLݘf˶l˶l˶d|Ne[%!k˶Eflz}^B?H1G^'BԅbX,bX!z B_qɋ~2U6ޅbX,bC# =%/7\_~r6G ;;mٖ/fMlٖXe~Z,Akb۰I!zWyQ oT|2_σm2f#CrŃj[r1/scߦ*۲-۲-7)D+G^/_=ׯce܅$#3 )9l夡(EHBrZ2~22/}lݛ-I.%x~, ۶lYڲmæBO): "^wHNFNVGꞭcܯ _}[`He$spQDEE?/4c{^nm&b9/EѾ:t@ii)Q^^ TVV:twP+ʝu6j)-6,N+w_|eonǽ-²LwGSЩS-UPUC˱r=Y{,e<6*;Ǭϡcva:m+ܹCuu=~\ǠisuuKPRiq:E<<ׄ'zu=}Rt(Ut JHFOSlg:sz w\\nt/Quk:N;ycPmھ~F{HWμn[z?~7C-G%AMzzCS3}~&{ o*-⤚va֡@nV*2߈6|#9s:g]O҈*7A6A]/Vxx8e=8bIǖm\-3HaC)-0Neȋ/6I/|CXv2 %#ЯhH^XCLi"CizRSS.KkFDD^( N-Ro)9 +Z6W)0%pYXO^g 5տ apSbLeٸfKKkf;ӆij =ʁA4/*|BRz]s)ߴFˋ2IGAN2ѳ$ Sc}*rfᗀ%̀[㍫}e(kn#JߨcNMֲlOC#bJ8OMˬ]FKKMQ˥:%'#)))P"3=I)j~cڳ랮=)!9+WD$:G8-9\\&4Lj*R൬3XӚ&i[er˶+Y .T/bEL֕j/!aGD2( RT%Y!2|@%RӥN/ .o/Xcf\yܯȊHRCHSZjtݺusn9ZnR/։ms\'خtf[nFWmGm@mڕ E`L$-Yqe:;'52 ߵ[yfD^K/g anC}@29!<1AA+SFj1Ő_>akU37(7^^:XM.eqe?.-ѽ8xz܏ʄbf_(5sVyԯ !^=7͛~PZs݉'zO=n=UeEHHJ HHFuaXt}x#wc֨a(+j9T$[k0{_tG H2|CZ6+9YrPSd11Ib~YY:]| 3/ >aIVAuuKHi.A"C-؆lG/*~wn%6he6B0>*8jE-[I a9~KrDн;z聞={: z)jQua::x*r̀ 7 09'0j;Zz/4d9'P ,mAYQޣ3Nzg/u{(KCha0/]Lt x6$ҥiC}¶qdbJWjN5uKqmVu٦~OE`D<i3-.#bfFL2O%%%w)Qح%f8sۗq뾅7We% ;# *cc5}n sG34LjKi>999uXZÇ?#0gqil~nTgg2G2qU Ƒ]kp 8r=⽨(DBBR9ZSy#8t`/^o F#!1#|'%%tߺsmN:痌SOtO|fRrNO47]J_0~eިco1`ʵ1yr-]wƎ $'a#ĤqPSOW6z A}:#%s>l—m7>2t~Zy؅N%0@@$uEŐ"ɖt53&"~3GIaڳ;`/w|NBTJPZԲ.v0+r5DlJ=<̐޽{O>Gѷ/q}GH  $Swc-Yw#ցMbNmC8E,?$F*|:tL7OJoolҏi63Dts A@^%XN[ӎ`ݥ-f>)"/:[eGg-%r\wW&{ ({ %F׹3_S^‰'pQ<8]N >X'.ľ Ą)u8p]9 ѱqG\\O`!&&4XJ płǿb'Qw/NZyLEAa'**ʐT/FWэ?HORN@AQ@~1l մWJDfV>:v0qJ6t8jˑ6iԌ((+WKu%rP7X-ccceۑD¶ya[J{rLu)/'bW?VǙtC( 19qu(W`fWb0g(d!/"btM^iX.RS. 8t-/ ,y7:͝ EziLIESbDZ$0c0edM)! =TQJz \#z ~sm'Y3aL tm K~П/8v'Xv#dR\l  :ݗ<'6K=iyu3vnhy21Pqqֹ?0@QwΣpYͬ #"c͍|dtţ-LȋxwNqnf= oArB#"#\D{*]qv[6OkǼA%$˅BAMSAy%/]QEYqwKXLZ\$":<S?#'O8t8 #% &rv<Mo GpᭇoO߉aؿ}W``=?w1j:@ZN#;b5b\C 4^`։B;3#Ʌ=矧fܷ7zMZǟy> y%tvjسk\:i=culٰ?YPk0Z -{w,x[vl=s!-7 ܳǖ5oc=iXn:O?݂ yw۱]x祇1%h/r?%Өf/A??jlˀ$m/[- An[I!>.yyܴpn?7߬μ=a*j$%KnKcQS2?30`Zub] xzŏb>9}RXkQɂSt ;H D$CH: ߅cmgIĤChg͌:nU--  p ^: [L <)/:ctsbWOoJoFr(R '% fF0єdiSDZT; d cfbt6F 7ri`ޗ}QsɋA 64ǝQ)/ERʋI{P\$%HIof>22R>M( f'3-0HƉ,(as_;, 5MIxL2wG^:@a !,:%}4x^,N|\4zww'vo oſO~3Dx;>Þ={wAl6|5 ;pq:;nĊW)Q _ 6@u ,X>uO@tl"G}GqBeߋ╧ՃcظmYk3mƩ{q˄Xa;Jh^|%&=u; Gޅ8U/>n^㹻ao䩓x)su _ g;ЮU7f$Un$0GG_1 w y M^bO^zyɋaoC^jK@Ք?R^y1Ƽ0'R'4PoSF^GmR'Rnf^HQ hq4 V-/}.ByIVEX Dm;۰N&AP;Sq Ǜ9$qi1EwcI\Pb(0 fdD`$3"uί2&׏cJHHK!.O// h萦(E'a|˘(ȌW^ZE&_h@/#U\=l`j$DE<nMczj%I{pwK@`](/ԝ-./G.a-Ua8t>uWϸ FD0 nܹs]hL&۲-[ef;}Ɲ *D 3A9;Ox/"2).w?#"961>DS{RjQe}F ywc=:#w1K܎1/l\buO @XTv}қS;y66ց[+0ȸƻ֯d[3V{=u:3/P7뢖:ޙͼz0c^ 4x#ՆD0˲d=#~C?qa\V.yQۢHPKmO[83_}2Mb//f06yψ (//h~'堖L¤V̙S8Б*x>c?eUm?ێqԟ9Kg\j㧰'Qо{pڌv)Q(3o>"#&Lx$t۷oqtr~뮶׶[Q{Y_F5GeӰ >n܎GNMo% p +e~Rm/ބ+кEsdWn?<[@y~ZDy9˫̱m^10pߝKsl'xزs;=_x޶?^'bs1ԑP@k;-LȲĪWr']) 1]kG&׈s4Ek9Qr5ڥ(yQ\Flb!PĴ c'֋y͞=SNQҍ̛e[&|˒}qKv6ws{² .c#Gbd0rWRGxIPzx;"BQq~X~S5ARa㡌0bf2~]$u58ɣl3{k.aקt-/[(Kv?*eyA%+R"3q{wtHEb$X70] d 4ƽJA음@^g ěهiC@ g̙Tq|˦}A 3FKwяͺp.cҭ}xg 7\_Dˆ;KS*":2h. 4̼1kphdJJNca萡zeXe4[<<)Y9{-tYs8sXy_A>g؁xɇ/cǡ8wbX0LS܍'^^C'Ε!lw,]OdXSv ]6amdd┪iYu8&eam՘xvtL^Ji_\xeIX㓖<7c#s.7`K*\7]͛>։+Ƽ۞ĺ;._'#5&Q/y]%gX2lU{{vĺWk#RǸYaر}'v؂7NUx÷л=~#"sҭxزm_?bȔGuz3{3`3snڵ ֽ[&AJt,.݂;c j?_߁5j[m|ELD$,:eeS^n @dc٘9cfhfΜWFlT )qiJŨaވRsx̚;׎DDq Bi fe[&lw j>NȻNj|d7̷z/TLDr䅁ƛXs9E|3twȴ  %%DՊ/Q4mf=(Feㅝl Y8㱾j9<x˜K\+q ˏ/sd}I3/;_EGb5FTaxBN{=t7Aq0^R":Wdžk_w=] ^P0zc(%l^~;!rsϺޔcȊBa>e#{l$/m 7 )i̤({`pHIMSǨvmʒ֍6!+@מѭvӤm/V֭:oo}lMR GXVhZ!2*R}oQV-j<n<:Y,C^jL}ɑ24KA!1z[@Ӧm#v8 ReN|YR$Xjz,Kq'#[ʜy]"9F7~p=ҁH=Jg|+#t\"&cD\>]"/Y/K+مL$FQ@$%K6v2j rB$E(JLV2Tמnw)?X]P~`d=͐xRb܏79|ܸ\:/4_Jiԗ"Y&f\M_|3hyaPӔDDF!51J~}~giA{3=rS|R_%,L  Fix:}+ SDjoGdZ/X/J/͓\)ZO%躛m;_vV>pmGan$w溲H݆Ѯir(aҖg`x s9 s&D2lKcuX,7 (. ԐlfpYDH1" <(p;1oH"R7v͌[$"TRXTXAqjYٟQ'uè[uDFd@8u:9#"w=ƻ89"/m'SK>?<"1,|֕89o̬)ZBЏ:ȵSys hd3 Z}a]Y4^ʌ 11ߦovd[EZ".F}f{K~My Ke4MND-KkEk TV&+9 f} K~C:3#!6 '$A6=loe 3/Җms/sbX(oΐVR^  ;S@\'-%ZoCd JLiљsY}"B-.nD` =>A}Cʈgz3.y1""#72Zv8w6a ,.2f&w d r.)倔3r?WxW3W9lS$h d@yy733<%Є Fˋ7)ኰ0UfWp$DAVVAUDU9)5yZ&7~:s^n۲uúe[&l̸0Q0eEPyOy1RppP* pS\(6Pg_ $w=4/E=M11E!2pP.r$`T i`2\w]i! c).%nt!*",N(%r=id[m)rHlC{sEf&TTJFƃ"gʋ.:])1.g ڡt/(/5Z^> xbX,D0X#1=3:F܁& dylBAoNǀ}cy.elծR'́`A$"AyC}$DDWd.2[JYoݝS ʉ.{\wuڳhFc+ {3" ̾t2~ԷM3S]FA!5X,vȹ4΅ba8K ǀO^㮮GBΤs:x^@3Aƺc.&)&\/Hw;¨l9ncn-%ul%Zg<U`Q SE@FH37'fe>R1RT ,1G..q[wFջލ/Qxmٖm/r{ز- _ev!D2C/4\DՕt0.[#2(0ydSlv3o=<ջQz~%.e nm!*YFΫ^B|f^BMk4X,-Z\l8P2ۆ6vh,K4oޜ|K|X,;*۲--6$"0MB׿5.e~_]B^;bX,F˴ip)3u/غX,b4Z^N< bX,b4Z^?{w8zqwv]mY`[P('BI2>>(yzvy^>sn>@wH"H$D"HE"H$D"HE"H$D"H"H$D"HSRxH$D"H$)H$D"H$)H$D"H$^$D"H$D /D"H$D /D"H$D"D"H$D")H$D"H$)H$D"H$^ĐϏ_I0WD>߆D"CˈkBO/%HHE^:맙1ښռ9̟ry7s ΙK -')6Z_p۹TIl&Yq3 EA0 o6h:m(h1}7 a O# 8{a6߄Cж 2v,V 1jԣ;=KZ [Z2nFq\[φBôhW^23jBOIU`ךhRaL"n2jd\\Bp1=4árF7i֠!@=~l[1n[Q:NƦTКDx Mh3n C،I#N_;L3LW1y||C6#Kr'5旷֍TPjlѭiP]`0 vtZ3:V"} T0Z]LD p}?`ZsΌGuk-(g ̚+lM{L@b\^mFuLqܷ3w)zzjloZ:}vX^ކŨcܱ^y,|ޝAL81u!6g/KZτAD>-! Z0j8<^Z+[ff;ț0u:tPWA]: Cퟠ'*߆i̓ҷ5*~k`UByj?>~Z != 0Ak4ZA9(%tDC0>+.({@`o\Vj Zn~zܞuN]4d[ԴwQOqEܦ Z(*)wl:Gc['ϫ(lb~q,=+Jl6C3Z +*J)..wx8Ĉn*|9knv\nTSQʶ˞֎r+{c3usiHQ[ &yP"翿MkkqPťN&nKUm/?ya` ͒ o-*On^?nj0R*c_\Sp m{O4n僜LWP0dsf:GzN/7*0Fa~uWΑq:qz|t&3oiMXY}ܫaڗ,ix/%Ü$vWW%MV -uCybz[w0خay/ .p0[fl+)2T9/ɰե9}ݢ>I!ԈϡXK#~˝U0f[J%*+K.z)Lз`A .^;((Y7VLpa?w |pwu7#q_a۽2ZfzDbRGY},Q=TO0X8B)I]f7nwl1Nʉ͓s$ceG3|j ,~^Z#FF 'SxXp9e#'U6 TgиE9{8H:2yAIbLop>_<|a,+g%wXE&i?{I Tr._ɵnn W mW8u5vp QνB Bl/p"uiKo[,*6{ts_Q} oVp)2b{4JG|!q"urz=,*f QNA>cAΩ+V~{<ੌ'#鯍Bj^|&^FW?{x?raɃ,MȵN-ވ"s (ם N'vxtӱ])B5LRQ5Ee0ExifxǍ0É$(̀T԰d".MICjs}(> FC=W`T@6:lP=+g03 .3tjsБ K09t(VFa&T/e*ꮀЕoAM"J hOepǠnta7 Ao1 vX.0_@h( {B𱱪aye$N}ri _p㓣zT$c7jYYmSr^ޭk?6ϢEܻOX>̧O(Mڝ8T+fy<~r5 ɋ@b>9fODX =K41y;JRa=.9C\:w ͒r &tepa>EqC/ys/5ԇ\ k<{3rgEͣʫ~7ns䷳2FD݋ k$ 1Mk83+$mۜ|DvA7/Z7p.);v[i]]E]6nf$q a6eÜw)ru)x6^w{(b3CJ*7xfpj#=2?U26ʯ3¡ѝT/qf1UsJ7m+nF3w(1#g?/]4k)XalIˇmwh)yզMN7 rykN2ee\-]aw'#CKYnT nP![jwQ,l܁_JM)2111GSlX YhWih|N70٘lfoLW7')_gLܘ#xE ٬<&fPqy/7Q*I&o9xlFKzq(֝ E6,[I0MRd\^`la3w{nHJ%3em~ 9^^S %`v&I/D35ϓFbK|eJfqOny'Y1']3EW(7)x=]8䵛m1@HǕ %Z~ e7 Lp; /-+mo!BQh+ z`v%< ;y"Dh>ePw."T7uAw^P+? ]ؾ^za:{τ؜Ghzl3pzC͏;FBPS 0BD z#4 #h=zxyUx ٠LB_4uxKg]>>_e{<~o / ?.K 5=~8 9,&lF3\LxDg.\o.Dݧm@Il]fZ 'UxpT2˳=DlIڵrnLT>:\f;rgt-H܍tx0'QXwxz15Ø4QJq]*5_9Ž2O G1\κ͆ZsԒŇʬϞ8|7e6c*]Vn~t{EΦ 4Q2yJ [V92AdZ [ةnƊsu!V<"SɃy biCKss.TX%ò%~!dsNLpHtbuG@0DW5BF3@t<' ?Fj:Y2N9t˽erJ=#u w81G,=te*i]^Nz?Mq^=EVi0019%*Z@ {1-nZ?z4ђ N qȞ \]G]̻DN4Oc"N_7M9JckKW*9S h_-ؓ9x/IΧ k $ˉҐ]OոhZUGKy#I /5Z>tE\]8~' sdwr\H% b66P|NnѪYvz?0rr?hz|Ӷr2z)YvsrV>\Ss0z!~5 pVVa`דTxL~ڒ3d "φ9xc7rcF?fqƌ`E5ct*8V:GOy?cMn ۯh9œVa_J&9wZ$l>>RVq:@W6 a6 J;T ~>O SPK?Yh(퀦_A}>t_ul“_,j>V[=¯ WvD tʉφ0K N@#ZL{v#4:sa6tP} 5u0Q ltBuh.#>E=_C5.Vˡo@kPu M; Kϡ8Y yJr\r4೓DVC;ğ=;rA"O'8iz)s ziIʽR"e^|Bsmp! ۝\z@K{?"k<ߺ P؏1IemyG?gn ms9 =]]Tve1D Zs&ߡkAG_cuU$G_gh6W)}36ǸsiO+y^0>Jd# )R IHGR mצkuv\WAnN>{B3ANji#Mf56Dv I1=loN5&k ~q#~6Q;X$AB"^{(5fTj~v_*{ތyqFy wuR6m¨,%C[^^2:Ia11m/~3Y'M+^OrQ/1zNĶݴNEiGJpXLdew< |ֶLVq?v H^dLA٨RR>f`rG1<Ȑz$1RZ7Psp u0PYG$ ~ϵ F*%E#&NFQl(A^ xΝ>?΍u~}A-qwX"}z%PU % /$Ѭp)JF*6ƶiTpsi#w;yإ!NcBɃmf<#v|[m\T p|r{2Bު&?w0ȍ>fDzՉqyˀf, \L鋄UGU|U=˯nuQK̍ݜ,2Βh5!~=\ Յfh 1?;k},X\G-$ 8\$Af@dLv*x-}@ۍB%“8%K.DVǯ?롦o7 2bCqlF%ӥN;dQ#3Qƥu. ECMp]k僒VyJ ׻v|Ҟ3|gx%`LRQD%=+.@hg}Y#}>C}~cuKyn*CşQf#'WLS)3D9Q^2 #@mIT1 Lqitz$+X~ e8` !Ճ8 ڷaj,crs*Ɠuh5t=o4P: MwUԽoBk.حsZCPae[@>BL d!Կ m8psE4@Տ)"&Ӡа@{ehzچ*ۖ|/P"w T%83h~1쉐i'9s=ܜTN}rV21K&Z8~ߍQ.{lk;ws-G5"K G3S(4Ֆ7_X !!?9ALr5*y{^;·'SY]g^4`):/P_[IM_Y.~qϙjڭt>WoS?J潫dT4Q˱㗘X28z:B^U E9xe2/\ X H?ye4ep<-)'%fV2j̇WRϳܼ_bkI˕9< r8=;^Ì֜z` Esa g2>m&`3 ?de_gڵވg_ܪ󳠚OCRLr0e?97ɖAS(`#3w׳T}6闍zsח^q038;iJrV'7&O@ J`_0o%pb/vvPvE[qtl5| \)_ls,MšAd1Nrd*9Gbg*: ϰ?m0)t&>9z4ā!Idx͉]ͽ~獰/yn/br{W˔ϯH <, m)C{nDL⸑.nJJ9l$>z0y (.!eD;kFDn29tu[40^Mf.*KDa|\:p!&o&8tg!HOK%5%fziT)x%53RFbB" dTnBh:HM&-9~j|Jek?0Ntd=e`щeE,Xl(\@c}7.N^/V i fdMw(MciXg)ѣ42atR8IAZu]*G;(. F)(fNgCPLfFF&)kR$rrIJejCK9Tw ti$--njWLa 6O?'(0y%bq#&ŋ.c` "~| /F[&vOEF "&ϯE0ccyX <."o@K|EG(Ҧd !wBPd\Z327aB ϷW8?~}X^Kj5hr*[|O77(pݹٲ:#gS١wnn,zo  |gQeqÈaÇ=O"{Gcqm!ح/ŦºZC!LvWFg`H/S' ;㦥X%>ѵE ''_ pGj^#g@kpDj7( q8^/#{Jgaq N3=@ݏC9=2m 5X؜~ @;ĮW5€gcp)II5zY{*b^zY0a<}:Baf'ZOxqU}y܁C!6o094N?@F_2?싈?<o5 -Iq98N^a1uA7=Á%,\wύ^?a! }B %i7x!yY)={ }qvr"oX܄1,;_ n ?x E>c>ksns@w/2~Ѿ (w #XANg:aY ;V 1Y2|G 0=LVE o=Ҫ?˽%Zx^ Ž!ʼn eck3MpBoK>s{;Ϝϙlvf6cU~a, C7?V0 !&‹ :n6Z椑Niy-V\lOOנ)_" هa*nbOw$K 5R"3d|r / bd2#I2K  "   ‹   "   Ëh|([V+#˲:L&>J k 3G3 ( b=144'ؖbW‹mtwwelc$I}{\gۗRp™?iKɟ+gŧ`lȗd2)ꋿkȲ6W^xQv3GROl?ss_=!>GQbnEAAAAAA^AAAEAAAAAADxAAAAAA^AAADxAAAAAA^>AAADx1v5RMfz:))deQG{kd_Ajj i/!PYJvV>/1}QICs2  5Ub"|Bw+UTWWTT54cC&4vy2PAAS3ŏb =𰕧gׅ4 fo~΍7yW'q ֯\D_굱ʐ¯"8wt+sF>rx2w:Nzߑu[M5{aIl{3 ĥ$7op7=A&K[;)QQD 558 C|9Kʍ{aN`(@{eÂS  KKi2|qz1izpϰaD<=Ԩw% sN *Bc5h<JQ:M{gs5;9 C _VnC?& = [#As6‡(ڢֲg t0d4!IVTPօ ~,,`jlvKეxkKvõcKsvhF-===EgG#I ;qN1Sjʄ|@i ڻh|LoX'˟  bآ09h5,C`Z|lbfr2gv|wnl 3!X8rFת/Sggp^∏'q~)8(-V^me#.4Z i7ϳ-!IXc%1Q;pzC:4B7<,l5CC)aKBqa~HH~GORQWͥc{jxvӺ.*2+~-:{vVwα->v½b̼ӻع-W,]_(n^m[%>62(“WIͩvg8;;4u/ovfɃ+$nB\p.#ǗH_G^߸m( {$$uk"7ҟ!3Qľ]{q{e /2/~dD&gزc7RKAE89:6})O+2`Hk^R]EWv?lDDGXD8h=YM YG$|l75!aass/l6R+A჆zkt$I5X&(og;cH쉙oykaú פY0+/QPTNOPxh] !<,ow-z 60sЩLϏB|Qt}FNX'Z-:o<=Y8H}^͋|pUZ{zI끻/aӪ7m -<w='~A[~D"^h}l CO$f5P_wQJDX:mLN)[w;}uhܽ '4)z>29e:< S#mRR>#Axv*0Gmݪ{יeϜDRsPYPmZ;h$/=n>v^>Gܱh"ȳl愸8bb 4GK2~xN`U~nϦ=Y*/upvvat>#1A; ŁiSviCHB-#f?@6KTN,=@wg p#xRkG!&pp%{4PQ©h]]Hxى^'3IE /ط>'dHLɖ3wnl< s`$V3E'S94=ihm#CF_^:/- [LH,֗e t$ A̉An1/?e~G;"N.Ar%zo*л?9:\Ere\:U Vs8nU7# !‹l5RY;>ZNGo8b'_,fNéHLՓ헟2d`N;օ)1tIwt.,q#2,1]%pP8Q/`^VE披o(`a@?] 1mёEc%L:ryu#OC_Hёqя`o3Qj&9a1*{=(2C-/X clAؐlޗ,i((w\5䓗* FK2!z쿞Co&ۻquaƊt\ESƪz}fSeE0.nlRȀdy9q}:-Ȓ\=s*$Kfh\GeOjJ7}AȻvo{&ǜ{^A^GĄ YY8;8-g;"m^;@T$6aH4@82m_^04p|yj/אtt)NnIz΀ʺF^Ĺ{03<ԌI:ҟ*Ƒma?UQ7O^N} tsnqte TPF?'  |5Ȓ>).xʽuR}и~jb?Ux)pTi2yԜQ;6v!E- N,ΛRJM&.\Xp2_bꥴ\.[7\Uvcʭt+|Ԑ6f*3loG$dFVсCg任 +}2uۓ <&F pr?v.aέ]}qRE|qx r*^tYywpW98s1oH-MoBJ@UaN,ѕ rhB*gL1kb9suVK%s|[KaDO[#/J yp6={AD-~^v¢G1)aKݙi6KNOO"N{C3ք8R{1mo< @uS Fc9DGGs?w3WEعNN~3)d> f!͕={P% I2SϱuwVA9u?:;|gećI\[7_q|Iea[xYpC XX!`\Hx&58X; r\_ƸMQa r"&G_/1O"l" lK;Fg͘'_^pT%ytxyrpxk{n-^̩qGEp4) (7ɂ̙<a;,^Gn'`yLO7ۓ4r:._)UL[zËlO3Nzb͚FH7$à,# N# a^LY) 5 /+Fjaݢ8{q<֖mRMtr*1K}dZN<(F]&]1g!Wcqran#vNlV璺2' ?=G_, `x=m{{> tc#Grr?vKλXJ=VRc>ƯdQ^|Veܒ4y#P j7By~6t<4^NV|NEu3==4V?']$GLoKGgps$ j-?=̥ẴDa%KvN̩]f.'q-j}y4SCu%YU6!}Ȁ{^=]-0W/TJ#! S"aۏB\ZK% JJk顽'9$ƑVo@Aߝ/XjY<{O%>h]C/0/oYɛ0F(\_[gQ3̚ۏ]ʼnE(oΚ^: Oq&x:x2XIi,,e.G}EXU<`Z!j2v1+d8GybEQ@2uq&\Zq_ /qX!~+$?>^ N0΅SsdBL2=6υS“mȒUb1 ʝ DxαP?coȒ?ax{0?)eE͸e XүC5ƉS1j 8;83cTx ጋZKałK^]_ WJ:A' EvG˂{H4t3puq{RR*ބG'&/˷ȭ7S̬P*7?6>AJF:)A9r:> Wm!a:is‡,{W=VݴǤܹѺ.#`d QSrdt*] ٻb6N˽_^nzT_U}'IwrzϷzQi#n㱷S3gNR23It:¼'^lA݋wV1yY<7vkgfoi1*/.H)lsǖ|: w=cDwf?SHOhG^2m@$@r=n?;o#3v֗V6Q*8;}2JQ´ =Non;FJzwngf7Zv*@Qd^d^W&0#W QfpTs6ʬS?~C{Y8}.*<`g]072-F3虹<6dRd+%X=gZWcme)CR^cu1Q}Vshr>cY+m\5ۗ&Ͼ /GX(q֖ei Z<|X>Գ(id*o 7#΋7dX#yd /sw"*&w(t=cx4=lhfYȟ zxN'NN9ܦ~%Wq2+iɬZ15jw,DjQ2mйm6cM[9 ;?Wv&2)o\YgpWX߼[HXZ-9I,fu: uFg9m+JJk<+)OgEy:tя*QUK(|^II$:(**,DWk5EŴG掉J 8- mzg) v5QRRBaq1/Am8/ǩ>΢y%}*JrIIIyU3nS]߆[g%EǬ`#YM־$/'R2()w4j[iRr2ILJ_0hq-νQUZVAkg/F$*rlVZz:{K5WE}?=-oI*zz(~՟4ʫG[M};nJ 4#zFcrsYAAyӘՊld2a6[J֕$$Y|dږ+<8Gr7E9Izsne%Kչb(|,KX-[%dEɶ|w֗m>{YeݶZI>^eGВct Ɍ۱)QXȲmNwG{sgu-Wxbכ?o<$Yy|j>]_cYmی dۯ}gW" /_Xd7V M9KMH`xhqi=oEAADxɺ[xRJ[mw2{dƅGs-\G2!AAAE2clB,1ov[H2   ‹   Ÿ-HfCemA>BAEe^ 4hBL$IoKew؟ǚoױi5Sr)yP\ߗd꣮%}&wq-jۆRNCy%=DC}-}f~s7o]kS/奵d:Ksn!  co UX>HsJLAğbe- (Tq=oO{7u`2tP 0@*|BtRr?on߽ǃuJ-:29g ^"lEC7(  Uw|m&Frej%SʥW0Vf?jF%7PKK mܾ=7aV.%gckZn@atn>f+- c)WVas(DnF 5$Cai)We&yvDUk&C+e5m@_Mw~πyu]DxɻiQp޳i3gZɗ2+M7`j[4)WֵH8/+#׳[.@i7!;)j2iknPr.%/b3ǭ&Y9s nz.FK"y+l]osZxpBˋ*5h $xKQ3+I{w}x.2y޿]"&e#09un ۽V!q5['7|g4 k`'[H 1EC^nOq7%r3Vt .! Ụ{(..#;HJRy/GkdjDH\1ï;FgSr޾e/SF7v8N^~!QH(+4}PSϏp&13;ӇxF@){#($G޷S1z̜J* =OcJn{PR d>o1?g`O8Y}Kl/r$>uΏSTFivRJ/k(d %oҨ)NwhKܣg"?a{ {#>j_f:^ᣔ#I@{-7.W/`qޒ,'ɏpx6$jFV@NMfl'.XX,%$'HN2_8ٻn*S+, SP=:VNMf"|* [cW1IESIǼ2j'9\X|;;0H*0\̑SBEi**k gpN yTII7p%%edDz#Hbgs%{hEdJ-{$"Q1z/J3)Jkl8;;.NOۚ+OK)*LEY[:Jz\ґ=~ZwHcv OU&7fcpoR|vNuM"+pDb8ܹ!Ԏ2ؘS~sF^ђGUoޤ<=EEV' RF3pZ!WD&' edTru{H}>}ޡ0Mʒ$4p/OigwuBV(#<6?/rU]_  F23ЀBPE'.,?0NIY9*" #pF@Bk%2=ק2ؼdC7^r" QEu|~yGa<$tpq4d2U( K|s@<"29rs񅪴 J(Fv/5}ɼoHCotTQUl1ܘ\*A(f#Wz#pb:{K F."h+mWS &x[ax uqvܞW\$DrWĭ:R$RO9ٜ!=&L/}6f)(,(DLTv}+$x0]xM'b)U}\WVҰ.33=5%ў(o:vfzN^k)cW/ʋfcʪ*+sP'~zΛ<$b!r(w_"5PP",>lИBhK,`C;dF YöSL*F .; 03"Z?.OP0ȹkMC:}oΨ/L#.&@ *η_НlѐuW~O]Ab=#%wS/3RTL+'\jTdGܕY ch^LG]mt|8qOeaW %%# Le@ H3~w똂nNtpsBWy*RI@KLtV )kAÖD ONBq,zrƛ\@;S/1SQ_3XJ!C*RH #pEwy*!qgS?*u\r2qȾ@FB?!JJo3pVABR+=u5prNeb ,ΉuSAUM!*1"y4aW($Ir A21eEyuq LP$$:?/j+B.! ut9܌QhKĈ~1yteacX'U1SJ$DH_YGq_JGE&rLE],2xt*MdUtqqsX[)1by SrNsn Hx~y8T {ܣ ޷eaebCjI9Y! [f'~2?Ll-i"7X/fTv і2%_MM%Z>ȷn L]0ؘWv3s\DJ^ӖGMդx Tr#Z0M}" Ɩ&( _ޙ'LfN`f)5E8ۻR?-|M@l=+&ֺ-lX>3 ^ t8l?+9OkkDg00Ѕ5lbg6U 6&:[XPZMCY*/wMrBWh!*4?/炭8*B$VrQk` H,k" %csdlL{6RF֎ɔ-ϋϞP#Z< j?&MIu]5~VWy9',5 y36nwq7t3 [#΍2j :9^MO/2ZɏD御7 '"nOst&1;aHU|~ӌ{QM}?J*,e+Kx=/Ds >4"çq$DeUPm;fcBhJ̖Wfik]=$#Dsÿ-[:7zFXT(/B-K#uXY]AuA*fT}f1e g(\)Sp7R m̗Ĺ$ :䶆ԍ}b?YE}U vp%vǍs4S+uݴEc$e8Y3xy&.56)CS*7K4 7įTrkhLԎ7;gsDM͜)#5XfgUQ] [ԤU6 ,R7fvVHRPWGfO ȩc02'O&7Isx s5 -dcdV}XC\pt#9(<4O#uRiJ3֥"+ g&,Qn]`= Y1HUj fw}hdYƈwLuDfP_=c;kX@I]EXKBF-t:@ kmJxxb_>EXhu G:H裯<S'2JO ܑ%5Q<{fK~E-فB?IdN IгtG  DR3 ,bCeCQztzs۾$09.oc'wB{؝/{%QW]S6b]b7RwE4V 4#Pwԃ瀞(9Bo$>teEĊNCQ#Y^G<1/l*WºFrr eIFvKZ> ~~?bWw$J{TAaxۓc;o꺻ɏPWNo[=ʢ[L#]QvA|k poOT`kH pJ-dH50H3b$.֌**HsFX7gO1wK2n3"@`I` : .6ǗXpJCsI=1458Z2-߹b8SK'ҋOܑ9rCWMFJ~OS6Bw&OW$aɴ7biE\:8d6R΅UߝW*kckJJ)؉iJ,In~NKyd:--B+RDQM-~ )&gxpyN5НRhIl;ќ"Y4Ļ ~eXN{D9a'KgOw`K Vf1vdXs$O3]<3`d 4-L L _$$(x[6n |rs8#^-"ubǛ<~~fFDF>yx:=*+`U4{ӱtd VCM A){đ,ZJ͖oNwp'yӀ/L]P(F5jh/~ǩJogf\,*>, ]\>B{J?.fV`'kp5x/zPh_IT6_4wvЮ7*˅[=pԊ[Cp *cis1ha,g!Ck'"2Vٯ /9YA^ӗ,)c)Aቨ$u̢8?ԈQ8qpsB#Gt"=_-]*L1[dDxdLqm.q=&Bo;4Z-j}Hff}-W&E%N_#PbOه}V:*S^2Psc'biljc|v_ǡ_/=&.,9FN/М̧Dȋ9ry#?Ykd(InyDžVt>6w6lXM a+>|>@{IA#Ž2Z;xDb7֮-sDOH\GbHB]?ec| C9{gweC 9WhO.RGV -mXGӈY*Eɷ9,RWGfW)OS=8HCعxvɵ=,k9b-e9ZS<14&4~}@@R3SA`;y6GHݛOF;QFrN9--],m?3kvt\l3z(ʉĀ7՟Fn#c9{7v5Gیbm-O FfPOS0|aDLz.m/s=m|NNqL0; [G8xGRTH{3&yGCU! c#؝gKhLiBe 1AX-sM7C"(,Bbι0i֧q{7ByRÓH a4e-b+]#(D,}E^c+cC' M؈ FΪ}jʘ%TN"qzw?k7/mMe/(~˛n$p/%v!DnEJ:g"Z rwg'۳xjauhuW`.1a3jt:-m1JLkҩmheg4yRְ嵆WttSY!iNaĖ6mNDZ`e(Pqʼn>f?dJC{j* *\(/YMF Wr:KoS!*).F֡^Ea)\pw%-5Io1{,gp%V #V'? }}X5_H=b^|}o󝛋cF{O@쌳CK$;[8("%G~2{"{ sdJ|/G܄4 !53ftr<e,`撂?[DfE4'99C#(-A(2lFh(=) J=-H 0wMe-!r17įm}koGdjȖL:M$5S{bO#*<܆ 6%L^M V8S8url} ]M8;;!Ojz m\)Ot&ߙA,ʡ1 C2.JHt6~|s|G2]q 0 CLuF`$c./7T@xr0pp1G< =YN | [+0OT=qPsqpJT@_e%X{q~ \!qubfN^PU{_M^^p27NԪ(?UHyM(=7u&nBq77yW%Jm%(\l{ާarfA3C+~df`L$%&E`P,C2fҴ({(\"+w]lco56f8 ӎYF҂p au%YyqELwvN. 0{ 踦0RYh;ߴa#N!Gi8#\,imy{߃}(3j=GwV|`a'4$.4L/2%q 6^aލH£rXd@l𳡲 ?;[0ssv6.X]fr%G.'s5b-+)^N^vg͝/ zjU8-sd9[{RݾR.MLb^>nƤ֍pv:E-yHFY{ "2ݏE@pc01BQJnxUts%!B]|&wRܐx#fsxFQVlqI~'&Nx؛ڌ+GޏըNHi, ͋,ec)~ I$$$F|FИ5ӟ"78$qOlEl5ӓ3Z@vgokL aE^$4mII2i&W8;Op9~#qbE{K3;))I_qD3C^w dYO51 9pum*/Y߂O1> >X4o"hea/-y5yAv؎nD}5X{fv#ckfp|CKQyͣ\KOʝ^NFCVEuc. kvοtqf!Ly2.ָ5ڀC.hDEO/XVNth8OK"{;:bik}KXg+Rٹ(M(G=Sg./1{/wDz%0"ܞ1B5tW:FjbPCR_oхS'>#X>p'&ԼYB;c)C+9KؘP76nVXVˁ;CCK[PMI(Q2+n_eD y܂컝Ē Uqxavw}*`b[(d0GnH蛼r'/|*#}zjix[0|dcqT}[zLe|>8l8Wk@Zw\V&1m"k;RZc#l%Â)޵c$ eLE^l K#JGG|,繕3;rd'1VK[7瑻7M^q3 -FCk*yY?𫼼$ ,y B[1v>Ŝ9.|ۥ1WSR#ګ >vgcȄO{T-&߻2OK ͜\ް7 Z&Vxgd.eA2ى-_kcy&b\)}Gmz89<إ%Z!40vx \Ycce@w kD->% "zv7ɷ5]ks32?p|`C&5]f'^{ya ?JDG-oq6 FǤy:`WX?! uA?eoȗ]/uw` 'rN b_|7gv{?n&<2jfC\8_#sڻcHV2xprx u5ß99RӞKl'Ìؒ^gmVƴ-)bb _ïx0N,_h(`_Ǎ&vszv}P6+{Gl-D n Ho{%ӝi>eq?{Y]i*p{fBV8G'i$0|RbϾ}?j: H*ig{rL~v*%*c+8RKޮD-}M`h gA$u 7̕[\vN3aUlid:a~M* k I!S-377kNfJ3bԇLuci!`RKVڷ,Lvb9oٜh䩱5NwiΈ$%9^Kr>EqZf0|iFۇ-4 VFORfwys#«M(` %v(ήwְ z?.J> h1lx%F(#XjK+-U:N^ɓ=oWX#/eT$^Ɔͳ?: ?E ;glv KZp?L^2~"ّTMs!Ios-8svgQ,B!DlO$cx$/< ix-7@{# ,o:IBt,#&\i^C^mnԃ=!A~8ȁ1$Kx88"Tų~a## .$x97Upsf4'4g[/ET`%P+Lf敀@hV(|}8?)-dV9z~n4/}U8xstDC;'W|d88Zjv=Lw' fe4{yJ 5{KyaoP0";Sj\k$ M녫sh橔WIq {c\J<>fi3k{wuĿp\'\@c58hkIl}4'+H= Pɰ0f3#⥕Ǥ:۽(c5?S/d?Y%> fNy+0p&=W7'8`$K7BYZʋő/\. D 'GGC'6"|7F1J7Ά/!rs3sޝPU^*ZV8Szu w33ρsdNd}yYjMTd`7l\= o0!/2'ޫ9]Lk?=Q ?c+es.`dkO}"'ޫ/8Y;?.eek#+>; kck=b'>{ 1775!D9ÈDH(K?5GL,1ݑK[||qu㹍o7VM^qPK[!A9y9h] 4%LHR*FQ4Bk6^EȋP `@ʛ: ,Ü'X8ؤ!5IJ*qpP38Zj̉U |[FU~(\̑%>=e:C3||psW4L6& Rwc|~`U4 ?ܟWA~8W^1ixZ*q22EB/oY-K+$?Qg{~»oj0VV?|-ݷ2;!c/ )%@j _166&<czz;CgdO6^ɕo+^ KM֎؉UR-kHACSGO:掹疮P"v0Ua@<:~|OP&R^]e.9djTO=Yޣ*^_rgc|1[c\yf`*->98 >Sin/(qC3G791[/͖=c AG8XM f-D+rW/ވ־L-}6#K[J'gxƢ9-'+#}3rk{Bec,Xφy.YfcHav0vI(ˌ2Ke|O+xf]ϥs[U޽[*%K"x; ȭ<-SN?~F-/t6}pz_DrNvH~83/NuP#SHVr2Y־TJl ?}e ks-?><ʶ<,3 F2Rx^C~L67Z,8R--v23@s ZHQN;+f}ĸyu.S 6jx>.Ybinnaa6v 32FzhimOY' glЋ`jhEyhfnCbK.08,zGE:Z1󢒗1>CT /~1+|HRU'= _4ob`Lx V802SFB>;t ? vzLz/ qȞFSsŽasиڎ֏  x܊ v. 0847W򚳻>Gz8팉9 TQ~^C;-};ep,L165 Hз)*^ QeBͅ1Qgok^S[c06OY 2+=21wGcG3 M(8vro7 яI-~"C]8âAB>7=+9 W:G%BYFG'/!ëTo`B@ayf*~~^t}3e`ļfbHOa%?~s%'@BEd)Ӧod_0ĢIqn%FFqxB4nT`7O1<5g3*^Pu/PEGDf̓ ᗁ 'Z4/$S*2ri&/-_'j+<344OfmAgw }ԷiJ/eK?KB>=~·"?;ʂN=~.nP{[~ܫ׽+zz$ vAQ^I>f x1 3S/4+{SƵ8KJKNͣk=+eî.\0iGFaiSbAijM\kwcw X0ѯ1u6nFǗ)梅9;nb|Wj}AxK>ڷ/$($l8~$$ںj~RuU]jO~[S @7mPрZZLyJkbd#jC^WPp-:>^Y۾KV{%j.p0%3͋J}Ծ_G_$/|wRo'=R+zDլ@?a^_K0܋ߺ9U>\2۾vN*ބ7+{}v޶bz+j^ongpQW*֞qfʲ2p7^^Us*+e]Ob[3x7 wc1;Wƪ,='z况Z흚;|*uPrV{ /_VsW+U{!J %wa{%Pe}Pjp4{[&/)JHGu(sU|K3=&OjO߾+~=r~3+LՏh3>~ʧQ ,}<^2d,.hhüvvJhhGu{ %0j`%_~OFȇub/S:$4 oG0! \d)FI$GFC? z44rP?UAD{DWkh 𢡡^444444444444Ë$Bd~7HA|3=I?Xʞ/ )(vxga7 U8{~g?RS{kF,q Bk>=mW/Psx'T{E7'اx5.pO7R{v i_.3g61hWt X:;JݳBE+3&3Nˣy\$p8.&E*rՌynǍC|NfE -I,YgnZ^4cbqى KC<~Rϲ۾$^sPݍ*W3dzz}lVLY쾯Ĺ ;D9un/=v,+u\v|.E:7*!?KsfLYa!.7>GѼGX:Lùy:\M炨bּ/o]c. _@ ^e^\Wp=<q݌S)𛒛rq. \J zqNAW 9_ڗT2@p zm@ίNuCu:v-'-g9^f_K_w+ ,CLn/>>,} ^/( trj,z~y|sJrt$ma!%51vH=~gqER%Qu%wNM.>qu=Ebl Ix9C`3UUgZQEEYijs~rtDfqa|~~T2(bhC_8]b%a ;o$7]ՑFC H% nvY-'9EhLIF>qbW ħRo*RDܸxβGRS)>Բg]l]..]`GN:qqqLĵ73?NuTl#K@JFV>WUҟ8v'6wr,i^J*]xe! +lUvKpPqdDGP4 7ܡ(51hu&l:n0eQT{h'E(?qʜ6NsBx ؝Irr":}$s,x-ܮ-bͨEf>[;!j+KH"1>O>QY6ɏ7E۞w_EqI1BEM&~EBʺ\o'8Tu9ޮzAžMOʇ]jHI'oiƍmdVf:e7]j)<O#8$P~W3mڑCJ̴\~}߃}zBѫxїCb\-̢Yeeoes4>]U_&Lrv9&=&[ު,2*v*8t`Iql?̕#1 H:u$%,9Bi8y`U}R jŭ^زW1uSe??GcxOD%niG[g7q7 vq,uL?Lf%&=9v{ߐ!+ -?R͉uz.}7N?6eśIȮo; \ /I(dp0?B~Kq9*>I[O?jILfIF%8I. m ť :^%[:7 -6G}T|VWe)}?˶˯m"1*>dD&m僾W7H}v')sjȥwt4ݥ|n:;ȋ7o6 =*̥ߊs&0=\;R?El5Dl(dl?cPh>?2i|w#[837EԮU" 0ٸ=5x~q14ꇹ+J.#1c7=S6̮Kh"I;L .4DAl$Ǡobq24D ?wmt'.^Ǎ)^Td |wΧ^NC'wkv!yf8d>3dž$Go,['8@LqvoQEGn%lƻi~cL"vbپl~>G;04#66˴\":렲nkb4QRB$u9{Nu ߼8Gʾǿz>qhG-z;ɖ,^0%.m'|~tH{ײַ#|zvBD&-f.RtN1^S+wS#їT%Qy_Kmqe7<ȭid|ݜOG!wPҰË{d1:>m^K<%z%AB'KY2yjU~)e|&ؾ W70i]n)*=MxLe0lLΦhmK"Rx32Μ |1Hp]RRr "{i~/s~#:&i?͟Skz")y7JȇnpA#g7԰%7 /^xR4j>=ƍ,/ ӏD$5wY4<{@^fac^f~%H酼hzCx3jˈ_jű[hhhhhhhN/:F}(xg8׍dP74@Bם -9V-br/qδg~QEnz o&*cƟrlj՝DoʸP'<āJS9񸙙ZZ?rD5:rb|KVN ##Md=Qkk;]Z~TmS dErY?IS)>e&{qjIj+2.<Ƚ/9u8x}*e&hf_L׺Xϔ33S]m g߽[8q]Ctu*UIN̦c>>؁=Gr~(Vh;Jas9WIN!2R9G$Tt==IT|Wo%"" :]97oIONG26Gp^ї\_!m[p nT&ީw[h0`Exs:B2^$ݎuפs~Aymo#7ْreV Z?ϓOɎƌb P'j~ί56 eo ]e F,nǟ3i5ʳ ᥞ2gvvqWcڟˏ]`aZ&{ϜyF_ 6;2tu0AT.3eroHw0o|Aa^uCqy6 jI>g\<Ǘ m'*i+ue9$e\V/8q ;9e',L)/Ѱ'𒗖azSɻHpz~F?6Qŕ8#0<-ɮ 2=obDiCnr,#WMؒN6$׿$B׼1ĩs̄Bl IݰYCwGl>[0)O-q^^&/;r: bsഴ(E{.drce:O1:XB6Vo~Ǜp32+444444KN&~&7/] gfھS߱ @$D%AJ!gI~FbsP8(JI #ѥ&RqҒ9adg'=-ϻqϾg}XBZs(ߙˏ1pxW.'ް*qfAlL|o^#62 wp_?oA'OjHHL%m[IO{J'HKL$35=Lڼ'8Ĵ,RuiT]iqL$\D^gvXr]blƛkINI'?/]n>Q/p@.hiHF@`[ə=b ͺT޶0^Kz𞉮zb/ xizf`ѝfWP@1.IEN.sdl"nm+|ok8|Ieg @}7[w^;8Pf#(9qk')x:lM$5#S-ͣer8yA4q:_sVZ"{. B4߯%%* 2819&Q&P4+'1%Mh*-;-UEtY=k䠛$&#Z,|(VҜ|rk#z  d^){v ෍S'W+ W=Gvx Iӥx%@^rχ HKKKx~$_YREzH+S$!7U %pc/ ^Z+yNXۏbyy:rhe͆S  .՜f,&M9Ld>/o WǣV +^t8Jkq2oYBQ[tD^| J-WZTB`eseK!z1߮ #(%BAk~dYV4չ`h?n7.*=\>WŅoX6d 岼#?t*FWE { $% ^?rPgDlKKg!9LZgKv7@ȡۍ_,"VjU?a:J}H+ > #0~~Z$<$GH#6vGj_lj_ܦOgUk~kD~O0(zUoI+aneCw:wmJ2HJBǷVкxkR{:4vu}VF`]H(  ]鸻[w^}Su YyfvaJ]]q!_!RTM܉܂լZJڭ6򶠂^§a#3~+Y֛ek6q7 )T"$`5׮e7W_ebU;!=d BOA .:q(|b*N J+赣QQOrROYi)-YOC(n'mMQԆí (,B /zj''~s-B!$!B!!B!"B!"B!^B!!B!"B!"B!^B!B‹B!B‹B!BHxB|tvvJI}2!^ ZZZ萒$JYYB!!ğ^\.JI}rݔ#B /BJcc#z555p8>kxq:|*BhX‹kbX׷UUU賢(!$?⢧6NuuttYsv6Q]߂թzhI]]>/TU/#55U{ꕒUWWՄGΧ$$nmu* c['=v?6MK&8FKuOBPGʛj V|LG :؝_dHƐۂż<}FA?B‹qgfFJ-{C ދv.)g}*~kWc/y)$6r%R;W?AEaѫ]?`Bg /.cG%WnaOIHxƫr ;oe\-#s9VYýF%!<^[1k[7Ym|N]KࢱV;mH__u4h㕡 SEg`hFV;fɵK]YT[MiZ9yeFzڍ6)-vmp[."؈^q7+j!x7)7IG}75hUwjމlF'ԈKV^7۩n* ՚$ZT_/h ]Dg<ىKg|MLJ|%=:{h1nh~v=_uPUP_FUOUI *̀n[7t0yV_OZڭh-F̸(4vbtb0QXE;ԵHWW~FGGݭWCC/_ԃB‹ v 樇X;1~ aWM̼Kj\֦x.cƄ;9Æ1s'f20b$͛Q#7m>ד˰[>9b.nV9v͟53;v{6\de ؉3u i1,7am#%Np7v~ifsƌΨq0‡,)30i r=<' _ŽLni9c ߍ̪fEX}8VNk36#-d8~Vߣ)?%%B3QVwp;/䢵/5Aq,*ѷAg/eU`u@"r{(aȖ[ړBZg) S1u K#0Nv}&6%<U͓n-uّȸФ~S\ɌߒIJ |=+wЀ DΦP<{U,vz[9W2{Hf{m&-7-3w*g7㹛\JE?~3ώg7[;v([O 55[',b =}PPVseӇ3}!*+s #FO|ń)w3^fٜO =2ÿ#+ n2qh6Ƌ- ͷlFo rn`컑Awo%VbkY38~ _[ߌLZRXo-]|3y $=;I3nLZ'>ʓ8VM7S"[~26~2-i;A{voMFd#[1CT_ /)\@Mrj]<K{vqB.+nrFOVLU IeGMD?ݒCvs}+ra'-99RM_m/Ro݀b`d2 #/3@9z~RyZd"bcp7NZc{'1X1.>PVcDPCCSFģZ,^L4Xic\rYs2E=UeK*tb,ËIĕĨR;ņTM`=V2b OKRo ɼ(+aǞtrX8^JuKo.mϡB۴foe3@>NqD$P0Misq׀hGﷵ#/?ÇZvI-sHx8i[4i8{Çdc5*2ulĵN.yg^^*0}TB= 1(gߎV36ח.Lu B13O̔ixkev EUy6sE >Mel"G.STK&MùVP-<;Xh!G~j: ۣp** 7KONfqxo=D[r`2t1m"'Nf|O{Rɪ Hin"Oi|&b~d}׮Ð)ޤW]נb2 /B‹~.7g3EeavdbHx 01Q;_`ARPȈ-_V&UryWf2e9v})^]H&G1|}w@k9Cу (DE0j)6ҭo^ ^^Kz^O1Ξ zM_MEw=vcf,{3}vS^ c&r8psjh&)GWr(bH;|Br<ԛoNХ,3yWœGQ̙:K8s ~iTn4BϠ%翙C|nW62zwXȊy*eL0;ീis6UOyLMsxOC04;jkkvISi~\x2 LZ˲S\;nFryg""8jNM_^DebY=#8F$q|ߋ9U^Ϙ,8_JR| #7ә]xdD/Sxȃ)L>U.'s#EeKBl=uL\K"^6t |Ǻ{R_ƿYE' f\Deu|/̭Zn\moI.bNU cT%%zc$SjԾ6kdOu!l !=ZZ ˭h.Ezܴ=cc`(:Jb.muZP焅CՂč\/ŏtWsxw(6uOpЙ{𲰛u$DyE?LAqn.މlnd[CBغ%Qwist`'AI/"1lŽ;ٻ} ;NSPUp.~h)~FLs{'r%SmQ>[Ev~_HjMO]WVT3{2'r I 5TܽWJN\Ĕ< nA:i}>yq(.Yq_s{+ Wbc]x])>,"RvV֞#A-. C\9sNhˁU*SN+ìw}|ـͭy@_Hٝ]f.dNd>sqz>koVP[UX\BA7 Ӄ[,j`E st6_K%TM*#sq-s;wiije۝Rup[r9[Euv5y,=WKDWY3X]Z0^^K}{^it9m6\;6݆BUR>DTŅfw˩M<6>Юp~/U~^6;.nX݁KA߭:Z.Ty. nc`vuQї7h]F3xyCUq<6m] ?WAA_ѩՍ7ؾ}62p/&&f pDO^@PQ wN}ۣ r8PUUVm>Kٜ mq>WWVi Orn}s:\ *VXޯ֦{ܡ`s,c`]6xSti}6>q' Rp**v&r CRQBUTE}~iϭ룿G}u)v-1Gʰivoh?F@x9tn|eUڇ^>xi`ب(϶tvvj?_v?{xS7@U/%Pv}8Cc*B.ii3!d;,"B!"B!^B!$!B!$!B!!B!"B!"B!^ZZZ/))))))))))2f?nZJF^B!B‹B!B‹B!BHxB!^B!B‹B!B‹B!BHxB!B /B!B /B!Bw^ۋhb*7B!$խ`nw86NϜ k ).<)`_7(xB8rss fbx9墫&zx1aw!B‹Z2e`ڸOS5OV6sWn $'xNfuK(//DOO^yuuu|.@JJ 111*))IJn| &6n b >ڬD+PRƙY嵂eݼfR܊59z 6r^]!B‹hH~o~ęk8|d/<1|HBg;7L6Tթ.Lݝ-.4Z[[2YXK<_|#s*kokiKBOW-f4 #ՌdD{k+=|ফ,}}{L834ihkl]_T{,xɁĕ(v wbuh܎>V__˗/O -zIKKFcUUUH>Cww7k}T%%%czS +YPD~a!YiY9u62Ѩ 4wsO' -6l:r1@bow(h# ttym@Ssdʃl:M}< TUs)|9}ЮBItںphHSsv7쫬XHIG{1_ӫ80L4}=hnB:.g.4Oڅ[܋jN*)t4؄FZ:yG{K mX B!E4?b/Raj.`ѤQxl (||jSAi`5=M1_dˠ>>3zhȓZ>KodYn2;"k.93a$NS鶘1i'M̌ZMGC6K" dk*7ɬDdLPV nZ*Y#2g9eF +=׳iZ&`bRHytQÇO? #$>M<3(`MŨv..:3~ZBofıLs\yJ- |֭[޽[ +D}VSSCEEsZ%!wS&N]Ո[+KAK< ή-<0z1Pfnƍ\"c,HtzNk3d'.hJ>b1u.᧓pj~!Q׷TU=%ܼΕ}A\+=Y0w1cV@5s9b3,d܅RvňvF#L5s&-oٹu'lbٲxxhT ,[ɢEK9-i0yHFR]xx,bB:GCU+{xȰ;)+c! ˭?y |*Y5a%%p2|=K|[v~&͘ϥ:aԸI9v1rhO< &ɲD?ƂQX)67?Yss39:;;эɓ'ttth/M W%lOm:'4ݓ={vm `Ĕwpf 7%7Yh sٹz磩ŵke TG{&*lEAYoDdC8Ϟ㻱<ͫ$7죨K[Y2%|VeӘɦm=~>T̄z270ܒ r_]c%ʨ'&*9KB)&?= sfskbOӛآJl.tfm.RX5k.?3K(ۆWy l]hl=5Uz|#3fM3@ r S{'aH-v*#7_TQXXEOC'q\?;ȯQE]^g#'d 3rBRK݃q㧳`'C*LKf:jG_r+CY4&}H|fQxi-}Ì;,xŒ)r;/2rro:Oϫ.lf`: 1im\f, leTm iO2i "Ur98x itϺh#Z8)Z]x@- z,==]<뗦]GKNN`e9xCj!%;?j(nZglZ?糇ЀL[b㻘׳I^$`Sb%zҏj¼L>xl%9Ҥp>x1#_e=G\nx::ua=T%n<;:ڎ=^vB֬+ף*܉fR* M=c{YpK^A9$ux~ϝ7xswK7VPUko흭Z:Km[6vسa˽ֳ} c3i6B!$^m7)^nb0u^j9&2XNƎhN/&r;Mg]ϟ&Rk tүt=Mӌ2BNƌDD@ KFm;ozC;IzŒ/Fq+@⩵ _M^J_V)8n;OocكKKkk<@[eDYDIsiho.H'g,6ke^t}hl>@HΘǹ6٥zxO}$//4(8q///3Al6kED>$<8Xvl26]Ɖ' ^<~xp ޽U9YeM\{$}t68{ TV}a[`9I=!RQGCx]؎(=yxBZTZ3\_Ɓgܼ#/+y/z^u~[?^Y7olhJwF* еVZPS|';\8{*JIOxm,XF >+!B‹hJ{[TU뽤@95!9w##^0i4vD$Nk— (/RQ^@Cx4O1H'xowl_5o#>7p ,<«İ$?doFp3> OAMh*ofۖ`Nň ]6f*c/Ɉo]Ѣ_66˯ Y3[F_"d&9ʼn5dΠT-]s1{bNJ¡d<;w_cj.\9XJ֣s,3y^k7p+ {W7^szNqXr-;;7!,v/mmm3bŊv?`, 0mtħ>߁xMyvMKZ/]~Kׯo3fxr*WG.Vy7x~냸Wԋ1}YZoO.ll._Vo*6@E!5x ^JͥyL_ɔ:٩6ra&Vq3.'7WX/KȾ^mf`V[ݤ$@fYyn%!V{{~7is`*xx^b>rKG1[B /Buaw)Al>;Xi9\z=g ՆKQAU[-]*.c6_Q\m>lRPr+6lvAIa~O AT|LUu:$=_O& ^+.ƭO_ݩ?fc']lv'ziifwh)s[Ŵ:{,>>>KJJ>KII6R:555c]]]I6nMoGw>Szq JٌffT@oY1uwYKog2a4?Vk*m]tuO}>ٌF},&zz *?ǁ2@[kxL{n@_?LF=KiwM+c6 لaVԁ鵾hNK/f kR{0z1~XNnmځ !^3q ʥzz*e9z ځ690RZZJOIR/lI -Q<8̕N>^NB!E?6c颭S]6z}闐mDC0LLՃ禪ﭿ<7ƎV67KE!^?v mFCK[[B!_ /BUUrnkmB!!B!"B!"B!^B!B‹B!B‹B!BHxB!B$!B!6FJJJJJJJJJJWee%^~ncXJJJJJJJJJJJvKxB!B /B!B /B!B!B!BHxB!B /B!B /B!B!E!B!$!B!$B!BHxB!B /BͰ픗JJJ?B!^ʅk$ՠyy+wsG9EmnTW7?ܾ4ƪWٟ]0IA[ж/^ˋɓ'3|p&N7OַqDEEa!"DBD}WCf2N.Oe䝴Z}|"ǰ`|Ə$;f0dڻyp*QS+, Lؤ[ac ifzJbn3ዯX|<Ʊ,*/3}P<7lbj&ޛyXSY?wÿϬE++dm~NX1Syi hǏA}L|Tn?\.\?*O}Nr9׉r+SC;ˡԧy:6߷|?rZ ~tAїG}?`=ݸǝco3g}}?Z}ӠR?ZyRʜ KO^/&b'Fn|ZCщNj%H!`O*;VPh3B‹|h1I7Y޾ψT}֗^ϙظFr5g v8pff8]|Y'wY4z(qclLE [o A+f13ښL%(a+gF#FyvC4,''g h?F^vޭ6򢍂R]pر߷a5c61tIWG?{gWeݰŊ,. J]hE*P(ZJi)4d2wwne3]7Gn/ۇ>6iSAAOg:ztK$/G y/x]]h//T(yE(yd3&SHbHf]4I鵡+-tG>C&'I?ٚJs:=NY 9NgJ++Pg9Ht/PR)vj#Lk+[GS,n~yJ'9GyR~Tx%JeMS9wԆ׵؎BFym^Ԯ%.'%l\j\ۗF{(~ΏRQvQN+/RR{( $-UOHmIzq(Ćv8 U"^yDŽ^r!5 !!/BB:~=Sڌ%W\\ٍx؃M7]|/lڌ-/nsW%c\O#xgR\&u=عg'nJ\*2)|\c AÛ=݊Tʉ-ڏgI6p%-Mbxٷo4c^Gdr)q^_:&&16:A~FWG;ڋsY2id Sܼ[(/]Z:5x IT2t:+ع‚ Gvqa5A! JHDBGFD=pYv=> Gk&|T>N;X s:FCpyp<]n|"qO4AN$𔷏A0@$A2Y: N:#qa2'J`J3p{Q9z<P$Q0vFPvVvD+AǀJRQ=(9 ?0;ByR0!D`(PZm("2Th JR?H"P_ ΏK2#Q.o/㺣sprۉ26*X0@w2eUe&فpe?/Ԟ={$xyᇥmۤP;nyɀU%LL z˘C|T@ñՕyt%Ogt*që6|$+b`6Z#I<3K}7b ;P0[^TDZȎqTm;ݰ$߲ǁ)ϗOf0Hk:Gw ,gx.VW$W1<9'q;~rs7`_za ҋo<>',AK&t, !a.t r= Gyn،6ufX6v -MjhMZ:hu:Uj( 6 *{o^^; Gyz )}+ & 3 ;0 V .\N'&+R',VNVʋ@)]B&#@S~>*x>@A]a3Zᡴ}7|>7AQft% 9Mr`4$pH0CeE>­8X]nΛVܺŅZUAKF@8ݒƩS>` ,hth^uVي{Mxp}]}XÈuhVpKFm,n< lU/ >̟9p(\4~و:LXu4TX7v}jUoƠs> Ob3l ] iZ;y,2Z-x_uNt Uv+n\w&+!Kfx+qӓ[웆~r'W&X=E?>qF| <ALxk3<$iiLOgnϯ`h c9 _2Ol 262Nsi#;f=Z *%t--Ъ4iL0Zp#_#,f4h[TPhQј˲n>MiV`@ ʆ&(˫Q/B:6ԣUpl%UTr_ xp;n8 8\T@`awlBO0MXr|d^/CJ~8q$b9jV z  Q6NSVEtT?]s3-ltS;- mj9d5>JCl_TGTAI QWӈzrceLrSJ-Gl7F5QSpR]xĆ `br 3^ t1|o?pZees?0jxy$x? Rϟ8tMb:ӁTcacZ}3xfv$YٯǏ{ql~~FE0׏aݳZi4Ag87wV‘(Nٝ:\vN?};LlVxb)]M&g9u7vO㆗ЦFvwSfFG1I"$t4&MEZSH0]iZ 729QʥLg-EB)FS7>ԁd2 Y t  GPLi;I 3tt ;rH&ZO umodT[. ?1_. T A6::jkk!oG4ZЬx`!=NA E\ Ќdj45i(f-l=㣊EPCѠ@cU*jQw j隠էp$V&Wl983+pYXQYFN E1l&:1/S~Ok||'Շ/ g_k|.R>}IΝ#UxqNV[ D .n|n_+xՎxQy؍.A'gqmaǧ{pλ6}rZ Kx zz#Ў)tcK81Cit ntFLV?l(R%XXFjy!HJCyjM6ɡ#11pCD!Db>B*&c O#Fc}a3i-ʤ&x!$/Ry6࿽Gm@1Zk𶪍~VlV5ލVicjNpˆ[?HbxhG]a8 ;e'100#ܶ?QLtv=6Xsc_ez?/"x57+[(M>|T,0o>aW?~} ~U}V\ZO4BJC7e4׉V,_[7kPAHHt_ULJg`-Eټ&d2`kEavI#P m t0<؀&Zj:fC0E"DדDx 6 z*jtB.ؠnF:7mҨH甗QmAE=,#8l$8hj&kG t0-P$و 87d#SH&Sx:*ל8.՗!DN$ᆓO Dr@Bh†vxa5/VNi͠EׯܔWt |"Q/^z-EK3N߇Hxnژ64@+RBLC#'q;*ogZL:>R5Q8+TSkԇFiNl`:icyx୓/VBE6vUhݹg!Rx%0Yj|`=H$(3jЍ{ 6=g3GBE7CRy 2?z#zˁ7 )KvmB8Oͣ;Y540ڱ=|Վ[w#(`ZV,` XjcWo2=v\yx3qLf;qٓ-<1ko7ꛚهWG%hqF.{ف{ߴ{ϙ3KHH=ogZr9$hI&"I^xvCW{ $A?N\&=: z,&3l6/ӧ"cVFg3Hc0^8>:p=n셅J; B`gJG[`'켰FG;A.*5.Bt&#;ݚiT4ZYd[sg"" dަ8Con'.*S $MvKmk_Qy#qʇn-P^mWӁB Lw6\c JT^rcPyK']"˧JRAf#r֨I*{Pp찚mԇ6 -FnMnڊs9osPRN+s9HmJayP·gx QU?~xX';~ BawS|g.lذo֭[;3?P)T.(;KL S@L q~a$1(sy R(cݝ袲|x@g'+}⨴#MBB^΋_>w:0/([-}=ĔY*52iimR@Jm}J+}et݅6%"Kd.K|s.;:|Dg;gfʑ'萜'K}\ƙN{%KP=:; oMT/)NНG|(n t iuR=ک)dNK+ <0He{(>Hue%hts}Kc[1G0C*EjFq.E2\m;onB %J'ՌP([gSbdw`HKh `vWm]okNQgn׮]/~/YD޾};O.3dTUֹ0ߣ2B|*|0T_] }~1/ ](`qbr"Q?KMH_y}˟l"<7!vʼ-2XBGNKυ$$$ģS;<Oƿ[Sƿ[O ߝ #65F,EHHHCBBB /9aao'cȗ{pt !!!!!!!!!!!!/BBBBBBBBBBB^ }%EHHHHHHHHHHH!!!!!!!!!!!!/BBBBBBBBBBB^ x"$$$$$$$$$$$EHH[r appPXX[AD'\:]$iJHHhzz(%O[k/2O33kf񎩌2"$T15u#OFxfO|^{t]UgQ*kh'Np?abxggI=b}3Tw!!/BB3xtwкs.*hAt n ^/J*Bwwd%ţ.###B/c#| t=Їޞ.tvu]Ao_?00p~_չ]]^gÔ(FGF12<"M]gm",r,(Q]EOate2tso7z/z)L(] | h5 0:.Se8A!9IQG Pޡ@(_ STG8ԏAL;—wq~[I}B 1 +CŚ8- C-Ads MnWɹ<ʿ e9mp`NZeP( T=wd2crJ9#/2nzv P=H$b(esHK/ cQD#!/Aj74C~[;A \cAϨA)(Od: A(/N@H`!fX2D mq6n Vx@DЕmR%Ƒ$`$0>~W1 Gaߤ~C>QW k6G#oGdՉ9A΃fl#4K"_ }`: a>. =;VU,.&/=ce,<1zۀʀ+^tHbӻ6{?p-VbqZ3F? ]UYQ+!fNFzȏ_7㇯п0: -?xTӃXr݃ܰьZw`ǮZp'Q_ey|0>{džq?}΀Ad# Mf / _; %!>]o܎d΂/(?K.ݏ[[n/;bʊ5\ny[owYGѼ.l:^\y|j{q 7ᡥ[a~b:NxnGCCzzzS˾їl69adx]%b{][̰pج8-FuTP(PZ7`6<A^FRBY !%x4H0Lp0L``1`&%0J/*DJEMr475B.'XAU9>w'3U8}zt~\QQz4؄fy3T t*-z#T#c#pr} TO T&Fp{yK-_8n0\uJBpdG _y$&N#8R8ڝ@,ڡgM\&-M:47Ҭ#TwY#=MmTNB Gg"!%t|.mDToZK}"YIGH֠@c zG7+'Gn>X X&;*r 2-4*+&F@ Dm96{b\| :ފSYV'F+LJP$&MK&l"<"K-l23ߥF#6RA҂ΕZzF}d4*_2#JKp̅頾%(v4֚.c㘚\Tm~tc`M#p t ئ.'pG/O_ `jcoވo| !|o.XՄ%a7PRD&ڃy?_bjfaS/F^#.}z`ք7q3xm} 2brr0~q mp{Kx5N+u;^%x MHjn8*זּu6Sdb :p~bE"$G=KݎGW]y%a|ToE W%köS.C/KPn(l~= kOlo&׾ )^\x#ر \qM}3e|]0׻.|dΝ;ye3y_x??^| д4I5A@ך&( jP]K/hɥZV!QH4A@ajQ]݀Zdr:; q= jmYIOpGDeQ[GTҭDuUj鼞5U;qNT8zO?3'ܙs3͍ `F#> ^\<3WAE -pCuP pS=781 ~ٶXa&1%xϏ8az -WQMP@n" PiP{yu UrT)bThR( b~ s^'V+N-TVKFQr?jQuU tl$7M}MH`bS4kTޚfM##S,F#^9ZԾ60dH!O MͲf4F45!P|Zڇ6)hh2R֑k\5N k~Su6\Q"^Nx픟!U}B8BCcWNԉp+.m@2n` y'ax i1Çk` p?7U &vrvpPzxKHHv#=N8?^'Dݱm,'0* O֎ \;zF >xKZP=͸{~VuVڈQ-w=~5/oN#+e߾}xꩧxЈ T/F1R bIT_ Eրz48^5 MM^d/ryx7 V=l33Xr$V8lfL / TJT pб94P^ZkQP=7RI#/gSgg*qTN<4g鳨:[: ^ j' H` #Op90:&.P t<2L`jE~iQ*$x/v27/&JECBMBCڞJmR!h$Ȕ) dff4*Qǐ!xp%0/.#/&WCug(S`GeԷMl>'pCeE @ؤAЂ.yȋՇ?x EoFhK"V^c 0$p!nߢ &7$a#!Gd r( HWOzԐ Zv|οi'DeLdk%p=fsWBklІ0= sMgedžz|g;mP?/w#l.3Ɠa8CmCVdoщLf#VuM'{ыi` c0/`gx> z^EAޗqgG!2Z{?z~46;N] +_>[FpǵWiص\q`ڰX sozܱ*֖Jl~eԸ{ִ7r+6ЈQ|]SB$k_ԥzr3rrP^%4j%Tf(Z kɚP/SҽzJ u h$zriԅB  5E:#Fp@*픠j%r:) #6iT<&sɤShM&E>zkc-6JE 4}. :#ZF:F# MF=4=T:= D* PX ?>'7HD4`J WiT-hVR6s_P$+ &x-Qڒ\>RIJ٢GDk{#nI b!g^/àFuCHѳl:MmHmcry OGqTP-x} 4R{Yţvd[%m,.̿i r>g-Tg=OH!SJ%(f:F}:Oiڠjw#KޅA ^xE_B _p nz(mqqE,=ć\Qk>H` ޮ0X=xlp^\ь,<`O_t`UZէ h ]cg6e $vgH-^ҋkW)q"2V>DpPtc߹l2` gNǵw̠zR:E ^+~ɎaFV>[Ť)^#d~޹,EeP߱HgVwc3Őy~}CuP b*S X򡼸Ҵ+4)/^ϻQRzNʟ FXH6SPl 4zA%"AmBW2@R+Rm狀A[BB^xd` IipØ-1%~ c8?܋ݍ,Ʀg _ʥw6yNq6.f 7&Itw2Fu/*Wpm lٲxg6}~e`9 Ähhnd*[1㤄R/.(]KINNm)ݣxKY:;okK>v]]##ѱWfJG{\<_T./mmd.7Y. OqJT<+輭R '7^(>ηw8<-%6~J=t͗`s]K ySz綷>81(}Ef|/G #u29^"e_-oi]. x$k&g%O31kHt15a˘g8d?} 7Lc..K*WӟK_se|fSٿXg|T2/*Kkvqډn xT^^B-ޅ/8 Yد̓ۊ_6y k`G *++'.GBMŤ}۴9Oyf2]W)'xY~ !$$$^xxW1V[T-+>[ڍd2I߂oZ iP($9 ~,,,q} LNM|O<mxo2)gxB;5ag!`V܏ (a[zzyj ~62$LCv,v9|m36afv ^}o]&8yFxv}1Iw™ByzA)lX._ z9[k,_;W84%4ǚgϽ m2fU+b*VY,Ywo˱zhmHg_4\_祰k6)ڍxGGaz;6. _rkXrV^3T_g6=׬5/ΔKqmrƂY*,RϮ|Q̔!$$$$$$OWBBB1|zzYZYeOb24=്ka+XS=6s龵bw)66x+ց^0x2 ܼ\G!D[<|<1ؙ8X*IU"˴`ߙ^­qP^~0;;qխ=NN7 ?" C G\|Z W*ik0z@cvx~IR-ÃMdp Nq)yb&f ^.2󦏊l"rs^dy-7WKe]U">TDN{G"⢔+i|W=זP1])Ո$pXHDq-ՙ$VqRCxJ1px}̻{Ć+2_8 I`>5:)ƗXrexDa*-Ἱ~É  x?ܛXiX? 1{@%s;XG9'#/+ d\icfgfȂo@ϖvH CeJthOVjUDrhbӸ=w*p'QJ"{GlϽ$5췤ԿQu !z7mp%5&kYqG \i[6'5xLrw;,sy8պ͝ )sHSn1aeg# r!9%ºLBF\pncVs0{t_HWSɁ' q ;K'Y,KIlv;o񒩏4^#&$Fvר:-1+#IgIС^$qjBbŪ0<;Kuq*gC0uKdد^9V7n.z~y>XW믞=_B)/B!B!E!B!E!B!!B!!B!!B!"B!pHyB!BHyB!B)/B!B!E!B!C'uDSSKks#& !HdV봘B)/B&JEE%%%&OFK'"L1|yyBGөl'N!Ef7rrr ӧ*QPPl}PolX,|:ƌ{ّw5U;:iINBʋHG$m4HN-]㝫-̙[607s+|rrG|}Die'%]nP7nRT_Cy444j@TV:ic|g\n5N֒Lc]76I>'ëUNTq:l߾JuY:~Ry孇5f=OPKx?K RUHZZV:($'5UHOϤimh蚙VWi23/@lVZ)ˢPIKϣͬfxIӥR[6mo[A:rQJʸɧ4g(6VcK3_y,2s idBmJ{/Εuj6c$6>63&S+%WrHȢ Wߋt2Kр[J`k'+-F#$6BP!~c`wU` *.<̄+IVÐK:v:o R^ķMI.\6$s&:3h"&NXoUƏp?C=o&8eR6/e`aC5\= :q8x*E;:>1IIJd Ʊ3~$[rvI,%Jf\<}{f(^cҜE[{24[{0bI^)Nt]J\j& `xV:Ђ\6-KCXj3gqxAULFh~g[Xؾb*ưvF\G f䄥[U].8_Dxz:Ia7F'"{.uQѣGԨ˪աet! =,x<ĂdY|&fLerO ipԚeS6e NZY5cLwr}>cƭjZ߱np}!7 S{^+pF}؉N%y8uΰ|d\/,,zšm;ٷ˃O(ʋbV q19d>|aLZvsc6%9~:[ٴptݠԽlsC_a8::2df ű,J(A$8q'ma^ wLx,Vܡ$IxvЏזuk7юN hO~Xt f3uhXBBBWqQK5c5:5RVVˋyBԐ =_qጯ?oPlOv3{4i-Tcx=x緟lYqHɓC,wV"<,Ao1km<`HO\◿tAl7'E`oP,8؍#(6n2sϵ?*1\b٫<ͬaKi4qӃ+94y,hkcGߏ [sYvrm]bxŭ$1yON~=bw2[i- V|rzlž㗿X‹<ɫ#1ы1 +~~>> y¯OvTg~uRQp7B)/}!}hLP{ ;L3ՀqZxv&ڹvCf(b7a5IEyl_81c2fr{R&MUX|vv+*/v3dƸcʌ$1u<8 GwA,u4]4I,G)րkG2zO7a=",6̅,;މ(4b2q9ts6C<ώQ^V&/c>B[} Eoy8K~ :>&GIJR3+Ǚ5k:a5#z她9|4b:`dJ ݳL>'fg\ I&}ySJsEyz S#8,hJ/0?(z} NVMڋ%fbėќ9xv\JX ]Sy;g8|BǾa؞\M&'YcL:{X5yʳ 9ǟ\܂rѾ@p3nM-ۨQj1e_e Z+ps;3"c*1yP7x]s\^wQ2. d_Sg92j^&,! vY"'gM %-1y"VO@B9vs#,=\MYp=:g`0$_O!KU$3q*ο+ؽtg(>ӏa8 IT~|d743| ǣL4p~lz9fƬ 4~0''p2qwa@F!zӻ*(c쵳^9s #2rBiluғsi;wK!X C? Nߝ  !(ϧ˭V_HC2Uo81*: s-*h06TFJr2*6im ==\ ϣhSo8))&%5 躎4RRR+h0Րg˙8s9>큇o _6+ )h@[MT`jDM:5eMQa!&*ʊ9%fLQE=6jJ)2w)-PVbb%)5BOt)_T\Mt*iNWRUTN<ɼyʋNN?mL&oXfXm|fm_5,j\vM7w}u{]]Wk]c+ǵo]װiuYݗb^fZ^]cXiwD{qSu4]Έ)kTCgx]qU}OzV+֎mVk-:ڇ/^+9v=&Bygwtÿ~;:;gaL!];Bwj[FM宼ګƺ#o^myss8a'1g!RK[tN_:sZu^~lOBkQeA}>oV <\.}Q?W jRZbl7nFu*ۦ۬a ?sWRe#Up6BʋBsu9iff#T}X`` jL}9R߮ F]]SEN}Dsjj>D%'# cullNGZr_n厍VBy^`Z[[,:ǏUZh4hf,*PK}]5YE-ey{^ZȭjB)/B`ZbԶLH$j@:B!E!B!pB!B)/B!B!E!B!E!B!!B!!B!!B!LMM|O$D"H$zn2x )/zH$D"H$O'D"H$DB!B)/B!B)/B!B!]B!BHyB!B)/B!B)/R^B!R^B!BʋB!BʋB!BHyBTUUR !11:ⷥ Z HRBHy)!86zBfr|0"bi”(HN6hGGQGc4y# @rssUHHHŋjwͪAj%Lx!H>$ߜ w:Kb2R-1|؄R^83m4_jAi cxzޛt҉x>pp٠ 4ܝ?h(sW\rbBRW,Ԩ_֪u5fcT"22 Šrg&NK$-:;l|;NFxH(][NϬY1bӧO`hhhPXV>(B)/B˅C2`%1?I fb᳞՟{.c0_? `奱4[WgVa37{ kL33z65{SAfXK\K\{J. IIIטּ$w<8fD"ab]+#/k1ZDz#c]>ϸs8\Z2'#GΎcoo2Æ رc]ٳgE!EHy9VmZ: {8i:}&< c ۸u"f,^^ aq ½P<7,da3QC1(E-={(i䒹q):8=Ycc:T URTQ!00JMQQN[[m-f*, &3J$-9zScƽȴ*j]WۘtKgE:^qVEAqQWKN8fa8p }eΜ94Us^ $p7CߔdՊPPA\\bRk TBs/u|t&ҙtS*,:Lj{GF_|]|G{ʐOy#w8G'gfΜƌYӘ` |WP05ǏDvijjRŨo[ξDͺp'EMƊD6m%VC ANQ;Hq:Fͪ88^ՙy" XmNtF\}IB<\Iz{yQʕ+ٓ;w\;vҢ }QEFfƷ22{G3si105Zkٺ#}  DQ{'3왆gPVXɋ)`4f{XG#1/FiB!RYx1 #LFbꋩ,MQLnh%DXds+˾Z,tI?&)N5l'Nwֽy"/Ph>ˀ۹4 4%7.~svϤq5!ˀIHJO` wۆl޴Es&bo8wAƒXύS>8M_A~cu=9 :I>E@[6cWFn@2V.Iג^+/\k_dom,uWe[87D0@:em:;I;C;ҽ@re&߯ aΑxDUl81Ch.qg&`Ύg|BHyBos}&ډM #p$}~1f`v?t[!Ki΅缄_p:E7Wpt<::q,p]u&NLm}t δ k4h|W:Lo-/PIdﰼ44=hwVRZR81͵tXuЬ9O< V1j c&!L_/_ʔq38tOBiu~`pNDFv.$|}y1L,]T5ļe64G?_cTP9|ZL\BǎVŒU̍f<_Aty+ /]hwCg_ED`-zwR#]ZX1ϟ/*1mg=T׊^_! ;O8zNZWGnb]4q\:GWK+`-1=2Ś`|[ZmL[NcA)qEo)/ou nfH$yla28t"YwJȌKat*+MG8Mw'!lDΝ<|H~E Ghl2}Q`F绳X,{\(,,T%acg*/*M@lj勃s*| _έIN>[V Q9ItaZ;]ڵ O9S&Ckf4!vϏǧ#OɎ̯Nv_gl6ۧ ?曨?ȨC|ּ|>q%<`"݋`45^s%;X#/I+gpd^qFRq;W݊ס`~&ٲ3[¹[׭,_đzƴfmMEpmI0KvsG붆Bgs<}'#0 {I$`F̝$4z `FY%}9DzKܹ͒{%%prRV@-aWQI',"HNDUn? vcW7L2խ:߃R^l6l6 MSK8ulu Ն޵ݪ~yoz+gj͆ڗc+((ٳgjFע }F.S'1シ4db!fc"a?X0km($a.F(.rdگX~#Ը0 #t0̾2$\I7?Ý;xo(/?w,\keQ'>H} ߉5;E hW2r}Knr^$QEȅjZkZ(B3PT'$ #/_Lv z!f˫x:`D~Y^lM?*_IHz#m5$qi1V!!24stQ^V'572k[nwJ04#Ud~[h\Nk"%;>[#(hq#^h5qP:g^ӐGAV::a_}*F(*W2r_"'3}K(ڜ@VUo2,m;[ő{LX¸{-BwWA~r.=v=w/Na(%ܭ,:dS7$Q=alj;)_0x~6&%m4gO_ÛR&%kf(!e&t4l-u̝Άk@g[OM'4_m%UGΧ KbHpg5S'{ѵF ӵe@BʋM/{{_,^:~UQc>m[):ݙ[xfNE~^`l%u$޷7hO%;(5Y*t#!r?mȟap:$W.wˋ0WUAa{lذC@ B!7"tHm&vVquG>H$~?ƿtUu.ͺÿjLs8n@yߙ]Q3j.#R^?1p)NFZ8l#H'–g[=:/Px\JB)/B] /HDr+Cv X5BʋGD"oܺoꌠ!C!7DWA!!B!߀B!BʋB!BHyB!BHyB!B)/B!B!E!B!E!B!fFD"H$D" fIyySUUH$D"H$@hii&]4M"H$D"" !B!!B!!B!"B!IyB!BHyB!B)/B!B!E!B!E!B!{B]ikk2JKK?H$bF63G Tm,X ӈ.ֱ!!Ff<U0ͼ/ jjj4M0B3vƬ\2unz[cR!"~T͕ĄGqU3 VJ 1X[[))4Po\Oa~FDPSVLyy# Kk  Gvv6AAADFFT"""6 xTiQ.j :cLɯv1>:CuBʋH=iLpx\&-o:L1Xtv.H@zo fg$Uۥ牤NE^?nMņzLnۢf4TQQ%HTTq ??RRRy(..ڙK[-iɉ$%fPihsEG3h7DHtBHy?K VL~x|bi2zV2#dKI0F , p5Vk'#18\a\xFXkH~IxDE(3inl"'-šV@8?< g(- * #XY8΁g,/gpԁK@3f䤙y)x-x|͋Gey<|,kU8A3a 1&#0q`o·$m,֒nco' EX}!KIr Νۭ455dʋ! CoՆƹ37ЕC-c\7b0i~'nwsZJRQ)fЧ0-]D'{~cy0ﴼXVdn)sR pZuz3ώb?awS gʆn]K~Y4Y޿s|_SԗQҕcǎ1{>iL}*dz'췷U:f:im7LK[Mjj6NpVV !O'ovÿձH02/w UʋBʋfITd2zŪhV3&To6h X-6]a2q&Lf?j0/So7ˢnl֎˦WuM=Wj6+&u]ӵ/#&36@Ǣb{5ƪ^hS_kžci]m6cӺ*fmQ~T**>>>lٲEͲt}ߋT%?aj< MG1sl2_ZpBS-/BuX:E} rii)ĔJ:TL-UqQ'l6%Uˣ6c*j]"¤k~~Pףm:1y!!Uaa="""PԌi͍:%%%E}D"kՌcc7rs9 V³lO!!*'jvCʹdggR\\΋Q׽}l0Yl9{qB)/B]QeAEB!!B!!B!!B!"B!"B!R^=!B!!B!"B!"B!R^FRP)++l6B!)/JCq1ůb0R^^(k;2 ťXyl֗QdfM6J T7Иk1 4 MPPԶ~4c(,D" 5ݧ7QQUM'V[V~|:͍6TUVcPfV+B!3Vq`+'NbL<X+ߏ$͋Ʊj ܿyPf;yq4[\[ 2cQ-|_m4[Kg>X~?o~=q'@s 9 *A, T0!%(9㐇IwN/9^wӟPSUa?[N m픔O'J.[4n\5J%MHajjeׅ8 !AT244Dyy9^@&!@EccmW''󘗩U<.BW13QCRxk;*~~X eim ,2+j~Jn˛)(ʫv؟'v{OZ&#"1opYTj[aIԎ D"1'[%F;s<|LrJZc#ӳ5QՎ6he]V1AdeJ`7uu452 -Z ?qŚCݿɋ)MM;& JPeHFPِdox4qbg4sFc 9۱8J@B~V,?YcJ@Do7x> 9SF:FSN]paxH} 6&imd3|dΖzYQ[ RUU%/ϟ?kfqqfX[[hƆBxM/R˼t5rQ OHnicySzw: wҚ;U}HDdUỌ%7OGc UZT7.;<>ZH]u%u~ SYY4#)6hW3>\}M{;|danjؔ)ؓ&$Kwɿ郓_K|cq*͘:_Cv** ij.l0XEe*j:k;5cGUm>7ͧ9F;먬cli "e \NSr`ka*T]%:%Mڋ`J^y/ @PGյ-l*3c}dBA>TW;1ǦtƚjW*5]ff|)mbKT44RUYIS z,I~:UUL+\ه6Xej~:*k\A$Dbx݆Xݿp5^]=,l+.A Vv6=qR'\ٟ9w^O%4 |LrBߘ '9}$F sEЛy5t=Hcڜk<3u>ġoX?q1fvμt$ t挮>g' mΚA ay _$'#S洞IQf s3h[ӻ!+]}#L8rg.SP6BC&ƆϜ:NfU;s\ӏ'uSCC~>vmmM= MiMQ *''?w(̹Oc~']/Q٭$٘G:zSXt\>6:֑?xΎ2==%פ K訰:OxFXwW~4uB|W)ߗ\0u" /00rc a[;bs4}rP-0H)&9u |f){>#q2%(5 !XYYw}@3A v=0""%1挑7a8aϚz܌xrՏ532' oG=/9X>s)$xI36,e_#I=J(w4l8v7^5̜rƓƱO; x^#Gg<13ǙoG>DL@,ƅ`tNs#up#**sc^4-Ӈ`lN@?7(gw?g+=psu&r]ډx9ko0/I$"89L>?6fbbJ^ mo0#ơoN`$$VLG[ 1>ށ6?Q7B:)ccĸ[po{z49+:ȿ)=j8 Gifa3y=]x]?c7Kkhs{08G%g>O/_y9X;YZ+笥uMhoy!nzbbII <=Hq"uMxX=LVGN_aE}U9n {i-m<2ۀ)-Wk V3jkk),,!\'hVdevv_7᥾$G\-1s}?g=L3&~^M̳4HcK/4cimƕɽY`*oh]f}IBF7;y-6QsAϝ"2579}-J&+!uR~ciONG}G .۟NF2:K?uBxEe[ޑ56sU|+̎{k(p3:ǭtٿ#k*^yc'#T$qVǐʹoK#&H*Ԫ]nY{,OS8w92Ts?QTwdӌ!էk]TC13&YgpcGnz&|].AV6 6702 `AS?gt-x0 \LaCBr!I4irwP5c>DvRW7'Hms._o/,-t› ׅ-&`]5H(ͻGll>v螳a|w ?AUQxl/oDq&"H$3 YD*V!pqꊱYmUX+IOh=bo.2@R~GN y0:.WijjWS(Xi3`IOSs3O3pa1ũpj tt} yMHfyOF{Մǭ_KBx9GWVɉ"Goѭ!+2_ v2Om]8JPN=8'-ޏP~3gyPBPoS8 G0' qׅɲo˟5sx@[hys_Ѷ&g^ܯě,R'.]I Az3̼Ϻde6a\tcrM/SiËKww7`MxIKK𲱱_&|xq/9*5;3y|tJ* Q\ݼ_lxX;Mեd$; PҒ3oC={L*S\vv;yQނ5w඗; PC!vF^nP?uH>t=y@nv 7ax'<_QYCR6;9pёu_Ëc9[:jRք zG'xGJ~^y3e]y?4UoGt#PSnBr\j{X{'ވE${$[&?#126 F#i}w/7jge%pFG?Њ3ccpy=IFg{2Br Z'  Z%5'Bi,gt<  w0^SgnAExpciOF[-~A<2M U4M;CEk;~䰩wa{?kBA0q7Rn.>gqcf{.qLpɛ=u&-0wŖr猼SI/Eh9K13$b gOp8]Gy^]O' m Xϐ4$D{q=\0<0_H:;gtS3C~>#V.9$>g,#tȨH1 ](fjSSS^VVVܟ{0R*>ْA&ƄxJp9 r; `m).'@JٌblhJ7ԽF6LwUx4(-#GNeҧ y%G[zNC\571"{He~f|/az؞A)J55hH:^L@5O /"Dbx=K(H !$4ZV;I <'īY^I5BDxd$qIt09PB %e' LﳢV#Vb ~)iFSo HH}W/^T|[SDUzACMUP*'<(tVfȋVj&:+ * HDt0ǰpj&6Po0-x<~Ƀ8VZP/jߤ}\^Af"Vz5JJYڷ%ȉsg/3\Η&\:FHHDAR/G}7X%!&iڵFx3I± BYTDkb4moVW(_G)_士h_ #a9kr%+/u@NyzXWmooS^^.!|.7oKxĄd2wZ622#M\ϮaOɷg))aA2ڧ)\ IeTt. ᥤt|V)h~MXDd>kb[F<ȍPn2ݨ0f.ɳw`Fdx$sewQ!|TwavKAs""݇Yd2!Y RE*&eiE^y n@%[V& rD"H$!5r{2eooOxv^RLRk>BiT4? i*jZ@&KxRQ)} +2a{fLg*\W#WP md)OSf9jƍ\8^>*A{\ͷcls=c/ 5~> ߵS=_c[)y_ϗ'|m۱ MauukP%226&_QQ!accc9oa \q \spl|¹|Sa[8h+\|s} I&\ GNGaMkG=5=o혔x }׎F_5E\z,e/=#r{D-mQ a2^)l+|o| b_A}*l'\p^ ׋^<8rOB{J}*|&AocH$"\HQ(»,!F!۲" .BHP*rὛ۾#&c$J`mz$RD"H$E$%JB_`4XXcc#{NXhhh@S'T*?JD"H$ËH$EB8VZ2==-#H$D;bxDjZ+"Y$D"_^D"H$D"^D"H$D"H /"H$D"H$H$D"H$H$D"H$v"H$D"H /"H$D"H$H$D"H$H$D"H$ËH$eff1βH$D"?^D%$)$ӳɔ P2djUj~3ؕ)c HUZfxxjsBihh&&&dks p %J+s6,lV-NP-M,JT{,,,"WQ0:Syd_.\i{*.α*;̒2d2R0{ d{-uƦX\?B5"H$ËH q4d3\D_GtͦZaiLA¨\ෘM3ǍR )LMMP^!3(J(//իWߔ24d2h,{5^x{A kLqob~Ϳժ]js3pg'](Y@7ʥpOz#)1WD: H-v\z1jDkonR-rj[*~xKi񁾑!<ɮjNYt>F0r6(v4 ǡw}R-LxR¼T3!\}(ɪ,-l;4?9@uE}#< &8 2u5T5kDDYzGkK`o{%TVViO{OJos-LLK4>@$Dbx˘~w'8G Bm* ȡ,s |.ڇ8r,YL.h}䏎16f|v!Fly/p0=a?yFlpbXC|7+#') 'w;$<䏞)֦htEDjcvx~Ǘcgt FS}+{CEoxb䎷FOKJϋ8T\* t>&8xybo[p׀@6BW1a8g9*rqLzZ.%f8`~-)O=%8̬݈YeݼȑZ{FR;&6ƦajfOzA;uEiicGELܨup0%`%`f#14uoC\U@O>&GFbkdݎu8$MAdv[cyM6Kt=`'L5㸑W#blj_XX{@WN9gvLjs';chDR5>{cEɷD'<8N9)?JI?W-HyP| Wܜ-`Cm#Bvxx䉑#ZA1GP̍]ُ17@$Dbxː^P7@ Kp  LLmts־4R8zvjn{t,}>zZ??ZP6JwS~6iL{OzQ=D]Q&:Z?UMKYg yYKw^?~P'NY7My :{T}=E YX'ṊIYjҘY&+p̡6]+'^346ԣP@E.7-5J p{j6$?eve&ɪh-WrQ Dcw VgI{:"oh ɇ\Yኛ+]ٺYBi/3$FFR1dlkN>fkGK*ؙZ}B|Bȥta~39my=W;s<$V1K F>`mKF*s1HCC[Cw]lﰴM뛇$ddrzWƳ@u/ >2QdzsYSC\6bC"ܲeI:'2X5E01a9}}5Aד0L_feQBf9tu=-(| Npz $1pk Ya#H$Ë_m2:F nXZa9w]bp:'zJ͖/?WT7]kVriuԦgMxYbmiKc#(wq1pGGT1?똒0JV&^+T(rnf+& *RLc[‹IWW/,d}L* nI$ex\'-u~;ؘA*1Q:r42mPlKhR(_Gž\܂.3ђ%=> #0m1bWHj%uI /"H$ѿ;/ɜ1aw"|wڞy]~2S:sx&[Pz@DA 婮6 a?һtxǵ&:~a[Ҁ6E4wzomxq4Y@&)I}ܩiGu_=x%va(Kww7`MxIKK𲱱_&|x~Bvjԟ SuØRPGģ~!ao \)n&W -gJXyɦm 睂d``nl f4؜'Oh.+ sxp/}pjz ף%zbJ}Fm%\>z)^{,Lшއ8Zк(@܁;ehٻ}xwmҝw3Ӌ+Ԧ+H$Ë_u"GO5&_&pOz(IB#_4xbe|Zs|I>GOrjI!5݋Lr9gDVi'8xk˱SnM*?f7zuSz<&==ofT1ZZJkb[]t Lq1LMejJwKPGeee){/u)ֲ#i[|ִS@f7{{w0ACnE}Ĝqnqg;x} O0^LdtGE4זONjH%-۝* V*Duex[ӵJ]Q25ͳ4\g0k+ɈG }<%j!dwjӡsx7 _.;X-'IYC؜2)ҭqu,A"'[d[kv7O08 Ax>+Ix򺺎W8MDU8Z]Aۇ<_GR]3UUdx`NcC5i9nE}d-+8&g{01!c2uVn3]IqO3$jȵrCR!ceF`<|>| MIy,uLy?(Ñ~/yJ&)W!F5145;\)XW;l}2;[ K{,<^3lM^'b t'1o2pp 8D"H /+%&\l@Z֣FMyj$ XW0S" "08(Tj_|Tzy<`kR@-_5B¸_3b{D÷UjF:tѵ=Vz{99gjR6kjPvwAfyx=;TԿHj۔ @.͛7^211!S2V1hk9сD)Qd/db9hyziVGI '"2;$^^y?v"B"ȯA W)h}C|Da!eyO7J;H"D&4"*Y+a{w]-Sp+P‚cy0$0@HBbIMK"9>v-g8n$q5:>-ux#UU?J^m?BjwoITMf&sm>ni, JؙijtD%>ddEH7=a])."On^#,$k7xZ tNDx8+Tb}ԫqgdV\QU?oS?4xV1\ʥp#HNBJ٣$M]81d]em*,tq']adݾd`ƂLjfA%,pMfn[ K¶¶eݡwB4js,Ͻ09g!fIAtafvq e[ffd˶X'Nwtϼ==ggI*ڵkW-,OҐClݲ`2Ql @!SC1V's ]Ttܖbrp.vy,Z B!$?ۅnǥ(NÎ餓afyʎӥ}pvpTŅ@Qr9unK}z,6{:>vnE;Nz2l`ocε0g^Fh4 *ׯ_g֭ޫ,] }.))>ڽ 66lvw];(trq;p)]N;vw@}ݏXuO95T- \;Uo%-w[rڻyi;cjv:\ʻͻ C͡ӭM{;\knxۼ;'1ovtgn.N~oyg 06_MXؼh,'bB!E۷]{*6,}%::jo>}oHp8Ns7]up h%-֭^ɒ s>5BHxvxsuVyLnnxRRRW5ˊP!rbYZ.TBHx7{z,E!=^zÊB!$!B!$!B!!w]B *. )mSxPPMQ}F=TlmvlNP̩"`BuRY\I7Q\yڶ)|o8dfVTB!$! ٥_ω!ٻN/{Dk(嬽 7CúuZVk9!;cH7!^p:JJQâqjx[x0irQ8j[+a,;geܣejh!4ziv8l#3Zt;\Y&@WE\wrv&Ix:x4vV^ʧKl}NʒJXr& ,nWSb& rYzFBA_o/UXIU~ǍtκzN^9_@Dx ڨj,6?g)Ϻe*,z 2» »n* *Wrl.QFM&n/ޣ{<FmQg:* |\٥"BHxB|Oxv&J0V{E, -\¾zz#Dbt&E4ÎrL?]Oqh7X=S0sb l~BSldʙBctG1V.S i$&Υ7˴|&ˈI9?-`āl-v.N`DO°lzrZ&K70h.ȯgu4[gq.$k(و/fG48c 9q wI =kk"/_ \ CI_^i eB /B6#Cj"=9kGq'MCa?ZRa[h v{sSG3V ">۝J®qIjV<–\gBlnNg,5`Uf7;cxK]k)i +@Mxq;8Ys^cr/+c -bwIc+kq/ftqL[ϙ$#1uI48M58q88ԕg1l_AJj{O3=s5ETc㝖NȧHM!?XCŊ6<{hq8,;G3(e|!ځ&?*OS(gXr+[w(0͹6B /SBCJ1=3w370OKٍzN>-ge:8!7߅+6^j;+Ee\ ] Ct &t:_ǹ$#Ɵ.E t&\ ~N1:AI~#e2?@C5q_A*?ώxt+//RNyS{wy K`dTkS=.EfK-`4;B /OB㧴̿\FO'5 97qW$:ppp,if=GYz*.`Npۂ|e[{Ƹ-l#`4C8z;I#K2猆CFQIjXs-{c`[%5_iM Y9,ەEfd..bgb=ΰ+1Cpd?HxTɷe2yC'x+**ׯ6xwbᷥkK[q(=~mdF }:wÇ#^6vtg;SRa'=y2313$}k9|8.`4U>Ǩ͇iv8p8<7q_ }JJ 5< MSl߹*HلV8LAR.JNo1,fӏd CjtR&2SO.a8s88wžc 狏 ܗ8 ]L\޶L619R4&RÙǤ0Iv!s"<ܕ>dC2w4a9L@(6#-ϔY;u(ILZ cNTQ&sG0zV** vsVCF}2M5ԇG f2޻m,88wEo]W<ɮl %PclW!C\$GRbܛK\OEUJ$N]{ȳwٷc?!G3uhy̮S~ bc̔yQ̍Ylvb.Ϗ2ǑܿuGa!=kT2lPtj_ߧ(*-a Cx4+2,,zs'hLj" m`\ >ɇWuV9t1G7/3_?,>J{ھg%f<,;M ,.8[%,6Pseо!='E׬bJԑ};Ỳf,3OW<|ݧ`0&Y`8QO_U !!jǧ@:BVN6oqW0e,5U;Ρڃ2M||0cy,>ХsuxYz^ /752C*4L N<J-F~0z&w2e~mx9~8_|{^ _L-`yH%?c{%:9m~n:=Ȯ='IyŜ19zsQ f?b<Y͢IKC9 z1!{QL /?7bqrsRX=VxJ|9@ˉ K˃T赜:>|W݃O1v~g6S[CpYJDBGNb>{?gZ#RbqdҌtj%f3Rƌ 'U=x6ϵ,}e- cIXcML3s$hg3 ͞ϓZ6aD=JޫU~ĆpAi. #jap?$ߎBHx"/[1@&ϜABEsseS(@f?Dصh=& ` CzG!KM`ݰ҆l>G+`NbD899[װfq:tԲq2Ϙ&*p?rV*'21hX6cs.J=stE@u^pX7I1{rFҩ*MJ9͉Jʞ`ָ |rj%Z[xv.'`nbp9Umf68YǁU<[W|E"^"*kǟt2(eμ6C[g`i &Mȶ;(HxԉXa3/س{6&};9~*=o?E˃,2 s-nF7M o;׽_<xUq}ݳlOB /B0'f.NKWϸ^DRTӎŨ@{CO^Pb4yQaxNY]3$e`S^K0JMdB>ރP p(nSvr'}?'rN'^opvؼy‹h40RYYI{{;_t`1-X.v bwQU͂lfyʁaGQUTōb˭;fՆ).N]TʼnݩxMV[6v's{Wflx_Gzebp\I'f?mofjr}lہíz{iq`sߌvs?V|r8w۶v΃å|;twly;\V\)۰[_oS tzlvΊ[@,~]/v:q 6I-!"(\nTSQ.\.Ww*^nEQVvvuyt7nE[_juT+WW.fıHQ_EYY/_^ikk'Ox455a2y6FJJJ!K)Om{Szݲϫ`3~!$!T3O_ޫ*S__C+/999JKKm\jjjTc}5Y9G"oVU(B‹rVP a@RRޫ,Kjj7ȼnp8W` ߳2;K{5kPсB /Bߥ/ՠn x+CޖH#nT-FH׶L$,T+.j9ry aJvl.GSVh;){'Ē6M6;ip:BIxXGb,cYPv$s|WnСǓEݸFnOeN%* .6/3k۳p8IAIO.gS MhY ,Ha#NIAr;ȰBVV9]ɬ9H-W09TUZ@zz!h(Jyv{R0*X*B35ےxnĩ<6L&Rܤkb߽\IZ\*l٣f=B 9FHy4{F+8ZZ8r4A> lmOw-h.a{ =1Y41{w=9jlV_Og h 򖭑!qؗEU]~lv˩,?[yY :Ȉ ؛܂3|;[;CY).oXVL% ZWÎi;B /`vN3Z5zŐk{t ݝNtd)o˫|!/Kr69+wkP*;bG3PمFCRYuR~\KНB1Ll QegE&?!{|#rLdbTc^Tg<7pR*<آR[R-f #ϗ`/JKI̧_SZПM-{ \&Ҳ| ">s0Ll 1D%%$H'kcҕ|Bψ=YtI̋t|wᶙ GE.)a$ &B6߆>_ /-irU9 O`U 6>,&Q6xj+ef^)D_?-+(W@eyL^̓R mTJVFbSxğb_LcyZ`AH,uYZZ^-$Y4UiM̳D1id'_!K(?_Z j9v.<Жp@ SS+c9 _ASV6υbi8߂˥ഷj7kpW4n+; ͌DKa˵bN1vvŹD NAٴ;ܮ6(D^I r^tg~}xqZa N 1 б'[!3-{il6,RN\΢8.&ўJ惭*2001)=\js /+48[ }IŰra[q5=(0rгΐ:9Fߍe_tz6M$sC;%=B:kRiR|7܏|OϤ^k0upI{uثZ2K\EpXaj<;ffհq-hr CBƜ(~t~Dfz>5[1[͚4fj.Sq<\LS~ d\f`X{ZxPJrL)gsiv%y<˭R̦Z7"^!dr ެV=?΢6^~Z ^j9a4_ϙbO0REg|37bx_.|x 8IH1'󨶨heyCW39|$ ;=1Yp Rm`xFIT~:RgayPGܻ)=GM|)3SkF6Ƞ*^kiVUS eat?D"Pm\cN.ųngjBCcy@gy|2\}îTμǓ>Ql+I;XDc~ d/)ctR5Z+N=ˏpNWF[a=9S=緖gS9Qxk9JgYMٖB +|ϩT k{_BTK%Siڙȹ+YmaVLUZ iwz{psVް[mu4W3$06VnE!$aBٱ7 0txOWsA>7j+Yq܌`nsϗbZi+-hfy?}Aq9eR>JL*8ٸ9 Eg3Mx`!=« nm-S73=s4PGri71T1n^ <%V?ޚDLkI5Dz#}H*+oVP?~ gS+~;̳n:kqX̤DfZ>W8xɂ!1 ГTس3^1Z9mΠɪb+ΓɞyfƐ]?!ԁU:Cfbh-b2iI1v9}<TTCՉA02}~߯="M ʥՂӇ#p8Ȧy?I! 8~4Bq^VyL B0Xq]#% /feq%-) LXf\~#VUcѓXن)܍+1=?ݓÊI"j`҆D2ĦӵĝjIξDBވssrgxkWHĐBjZ;S/MĹ$h_(Xe6`hs!^%LX#`-@ CCrHʦFHdºz/q)2G3xo"(T4 ߓZ~oѵ3O3"#Q;c=[Ϙ=X'F[e 7j>\ⶴ~s4?ݐBAUm? $vy2>ޅϮXp,?#o뒉|E:R$h5hb mh%/׎BHxB8l6k8ܼlюB\^y5fk,4ݠ1@JE;U5Z]S`EmV*)7:Զ@U$6_YD6*BCFN }q+%i6;i4iNUT x&_DSEu;i'6ۜ|AYV*M.TZ75Yh(8ڭ4Nu(1TB~Ce vZ B!BUZj݊}}w(ݗ}[۶oO~no7EWlc.~4\Ѹ&-y՟7Ujri B*5E,?Y~um;OWϣ~<p\ ݋ [xqP_mS=n' ;P椩d)f-Ծu ۟m絸hvہ6& gI5]aD&Ч@|FO`O/d؄Զ8?1dzR 3w>½ :q'33c'q?<ʢE3}pd2??;ǐlwNu֣0y#}GzrLI5|$sүzo4g5㶷se*|bg}`]68YXcه S'2l  q~$fbeر4)'0zPy px~r#G $&I8P:*9k#FaҤx֝*MC;җ)Ӧ0dzKxՄyO2|pӽuB-e B!$޽O>~u={& C-WϘ$VᥦMUC& Wzy#0o;e <>c8|)9磡oX~1˷4=cC+PwgC$d_68i|{o-lJ5ƏO0k*N_ӧ=fclEc c9?a]f{1s?9MV"P_$63|vˤ6zČ($aFcn:]y>CFF{%''G!Bmc'NO?א }N#JR>1yE6+My=?Kdv7>?Mv+vc> $c{= X;4fuC͕:W`Ք+t1DelAsiߞG 1>gbS>lև[+6Cf{7:R3-ǢLI7̲*o.Ь! ֌U(tR8q)Ɍ>S_^s5<.$I\A]c)~295T\ Gc7{7ىϙ1CƬM2&D P陗neԨQËt:<"B'EQ裏ػw/^R NM̝=?g"< /jn}1u4O]ә6e4Æ-@TK[~SxVĶ8uQ2w8.1w~pMK>zJOc)FyUOs C>7b>c{t(.l3ވRٖLF \ l~0>O}4f$>QhwA 1u\Tj ;ysa 8bmeĪt8 6q>C:җi3g2}tOeر[ZK3g>}3~Ajp6 /oK{!By`3.\xqm#cچFXHOgGrAuF+-UQBxz ]bnfĩ؇CTU}Pfڤ:7n$M-)p7dxurH˽c+QΆ|J46pKe6j\@[5Gb\qb o=9XX=ϟ<< f[Їc߮L֓ 6Rت|x͛ fwB!ËW}}|g72v ςw5 ]뢽& O8.L;~{b.Kݯ/#'. ^T">6q>hpFD8^B8l vlHz^k /cn`BO,[uX5f}F4h$n4rs|>b ԫ_[;}V#1s=9M5[n{$.?!,<6PiRp5uά+Y4g=f\*8}~8haԑG @3|1aYt2U&jʬ%˙7ˇ^}˴)P~CsE]YU\`+X7k(=dμe̝9^`8T(O2r,V2g0\Lt9-EA^30O/֡\.t6JDD!sB!l;~r*.ndWl* NN@zlt(x?ƍ9r)7 N=ZKr3": 'l3я@ ǥh4q͋V/gV#%5ff^U;rxq_篗3]lCCC#x2Smec}|鯔c99Ms3_2xV?J寝de~>N'ϩes>Gyz"ƤX5AibZ$I`2*+*(NM={LrzfJmz6ѾrfhBbh|-/ݣQ-׌ Ȅ_ \{/LܫedzPy.Ãtw09H|{TݣmXoo+RA7Ns1n?ai7.mMMTݭ}jlg}Lht~Z6I}ӣ p (+9#_LF#}SVD^ hPYZJWL ^e9J)KwhE׏v~Tܸ9s&[z~60vRw6 /'j56P^YAEE9eܫnshTj}z{.)Q QgWT1~s@ ̋mM90/sqx;P*UћL:LQZ-H:łdΧ7,Zwf&mduj79žYc/gCŪn)< Ia=f8'p\ɽXMLft7jjAb0r4?~4=VYR9,&#vfr2Oj2]h:O׬N ',9V+FL&,cX:r1:3+uRl4`|óHJ^HQZ1[R[Q ROvǏF_كۨ3N }f63s^G{ڏ~,2/&UzOJ|$nD=B۸^jو>Ij7딅KPGQ50ŧSljV'-fIꛩ|omM&#MkX׃,F8:}q5mH_̖믰LHsVfYב~JǦY[5jCh|_st#//!d F&W 'Y8+3~ /ez^eA$p%h!7ߡ7y:imcqYWu7}~h6?$~MZuH)qb>cr꿟Ko;f&۫47NZLViDi-syY"I~9uxzLDXnį΋1UwʹI:Ė3{~ɝPȂy1:]1nsQRcLZeq,IZhL&zUp)Ʃ޻]ĞL]uH)gFal%#g7% o0MhG)xM䞸H뀖"V) #k%چ׈Twy$􌏽bRF-܌xe i9F oL;~?=#t~̋ԏs1"nl>yqbR=YW*}R[JnN[SVߊVohfzٖ۹RȄ([IhA)]#: ?EKFf6ehtFcsD8뤖gw ٓ̌lN N(΃|Wpq>nԭjzGf3<$;-T?`/ߺ[҄Vo7zFp~23ΓwL&:(8?x(3Jc0yU[;x +JKO7ZUgq]~GvF:;rx6{$y^ta4T~[3NF Aë"vdek.U[%33ռsd}r{w4}ͽ VKH <@A] 'u&. 7q́}#V*(wU$ _ty'1jQ Ą!L3y࠴YݼLopYV!m_0.rR2Q_u(e #cٖ[Zy^7GgW ݜyS QF[x>^QŲo%2' Wsa V#ݼĥZͲ(/|DE%/LBu?tXC]، al=pgt cյb~(1['y[Bɑ_ۋb"Ը~WWK3IO _œ\9(>SCt9\u"UMRގ~-qx.5B=CM,OQ[Z\":p52/!aDG#$CmqSF AW/Wdts'0<|Y]IKa=LL%m~<>~l:p3p?ȟ* N(n^4\Fҕ+uQG/۹+ oaDt ]ϫY쉗*(T>/ukr 2]t#vy&/_,?5i+yXqd,^:}_AD&LHtݞǯarcWR>Y(_21=/ٗKl&/k12?IU!ۃT&W&,7:?3ؘOT%%>=(nj0A~,˪Y.¼H\w#VMEލ:]Z/U)kcҢɼd$2O2/U[S!88 %N=CғRیS~6#*xyI8$3'j.twfCq=Q-MCp'rǫ OXI˗\޾GH.]̂xs5K9[#'N%! FK*l'@&cg$*H=J.T4: fyߏygvha6P>g.M\U`Q&[ojUƚUiDrFc Z6&,v?f|x ֤ 2.y$%-fL\yJUyoV`)9%}4\م+(3(+:EtU` QqΛOb N*;1q$͟d#Yj $ū'Q(y)~B[ǨWEM e;yw~dJ //wn`v~ZVYv!S޷(*'ٻGso^Crj I˷q%C>V?}qg̢oUd^JldN|8{gVⱚ f 䃾 qMdl"RX$evjj ҘZst֩<@;1īݐ{9.auq+ 0ZI`x,y5C]Fx\"LJ'![EX}<8<)DT\q)$ůKoFvh1贳ٛϔ1=!$-@"j^Kܮ'%<)hh?yUIjW^$V|ԼbpR0U[P'r Wy1DGHRj;[XVǷq!sJ|I{ͭ;HK%<&(9M<&KN^},?&߲ꢭ8xE4smԊZZ9ONdFlmU":&iz}[ʥ'y^͎5x8͓,ݗش\\ĖU{Cd<-"kU:sjF e=<E;{ljE,⑑i.jȚ ߴy1Rgc4kg`=*7Ҷ8% "e0ڡVy:j?[KHXywkhytxv8Eֺ$kxMzrz,Fis7'b"jۇ0O)U34>KuՃB[]9̛8Fik;pE<ʟ6@|0+ 'EWm(sE=vA%,O e5 F-ZM,uÓu>[c7/r.`KYZ>~x^uVor=ٞWHH2z7v%MJ*wvRPgGtx@B52Tu7G7vW0ƞ QS1ۋ0MxzQJn<|HRӢ#D+YuﶯŋiDl?r*yv~'~>\y5N3!=[%Ww/<|Tl{(: :쏍UrKy{k? IaVilo$[ʧwpHmS3n:ȳ^jD;Gv*CP(_ML:GqW%_wbzOKQ.KTGTww7ݐG t[g>줳PGپ"ON5aH}dP+pp ogܮ1//Y:?ym#+Qsb0h̋T&U_cڅG0Yn__`当4_c܌Ÿy ScW*E&cîJf}WO.߼6?Hx8:p)r[}XΧd$(qusGC݌4jFS-oqtuEPgmqr\偄ŭ `igrw"ԅv}KBpuu! 7%w`κhn1af3O;/o &@&[v>f J]K\˖E];p'04 avpx^,vwo`K Ws2_5~~2i?t<\[D׀4 o5@ ؘ/h5\WOcc]GsJ{3`f?9ln 9m1`[- oZdq=} +j:}AF0B{h;䋇>ڢhtngCJH9**ڷ6&v ;hX>iYL=[&#AoV Q=]blD>{(v.EKu<S7P3pa*x'5Crq .GIgŖ].z~²8-P@956{>J;=TYcٷhh^t7}j=F Q(*zz;x.,/PYw@>,nNQ!D|-K[f0 vLVQ#df 9,j'{oKK $vrYcX@:jwpr!cp!ʕ1 WF.Tm{(k۶CI_5apgӃ]WoQثb(cm((Wa BlRJZhwJ਼f 8%;_2_VNn5%X #hP]+:n& ,F{u\ lVQґySכ"G}׽%n_qOޏ:O}?D@wXm#y^C!F-lesz]j0~.CPReuTF;7@Z88mǨUhd岁./ۨ E:z-dg[pYEt?\;m| N56гKRDX_Vt%9ut?t0:GD=~;d%z؀w8Q)-M}tٖ1p=tFSF~m7# 2x)h=: 8epri+#OYG}֭7 c4QNM"+4MZ> iY<'(݋E "auIɇ Ǝ|';vP]dži L!=H˄)l|`XOeK Oy*O2L٠t٢.3VRO𔄴|[ wj{| zAkJ1!m @Vzuh,hԞNzDT.>{is&($%p߼ArQI@n"{T/0-K~$bBX7ey<CAӂ|h͓N/v7/8Q4>,ODI(kO/KIĞxC:S]U'[^ L:E KsAbow?1~x?zy*Tx_rAe>εiZseQG̙Mcr^ x~a=R} _Y}R%z=}ltySx:~X!^RgbEkҦJ/w/!^׽JI̯!`96.O8\e\aN~ >r߭Rt|!D%~z=EҡtC6+&dmg=s̿vN{NCsfBƥB tm;W)6+kЏ뮽̖7ۉa/?qy"B?%s̙h0_='P>cpzj0hm`<0sc؏*[N{)`0  O̙3g"N`Μ9ssZśqx< 7/~3gΜ9sœ3gΜ9׼/sl6[^HO!`̙3gNs̙3gesya0 `0`m/s̙3'0gΜ9ؘeY`0 `^eq?30<3sᆙ SB4 3svbc)ffK3$}_qRMn'?k.]&e]?mp݋d .%E!B!%E!B!E!B!$!B!!B!!FimmEQ]]MUUWPRʱjʱG!z"W[k :ꙶ_|><WXRgP9_5Bmm-˅ JJ91WrDB!$ӉZAb]aQ*oG<Už_qIl6J4 & &BHxiR7sXuF|SWWbA'@/vԛT-5;ǟmׇؙc5nxË8㎏wJG :@sH@䧚τD%X/+**P&硽ˋf !/hxi$;8J@ q'6}%*#=6WwrgMMy5.m6KMeǕ?O\.5CMħ17m3_oWK+_AJV56ݡ"*vyZt^̟R|LqࡿJߝ">S^vsdL~3U5M{||^\6 (**ϢAYa%Z(ZM>Υ "rdr|q-ꛬ(5TVT|RP&|.$cgNWTãx^B!$s=3NgYc5q\3~^eCG2kHΜ9|:ɺCCX$m@~3}>7'/KQBk7? k/c4?3g3yw̧sx`fʳ"Bx-dF\Jǧs(>[ ~[7XeZl=K׸z #pܸxwB9}8R˩ 㗹}" @ci<sGQGTt__[\zTFV*5ddVHxBeb-ɜ) H+[2t,XbhLb-ğZWHyCAl^ 5Ǿ\˹:hbgi{ E3صu'odĸ;58H}gpyQТ4ԅ;1鍥d啟7|79[3蜔?J6t&#C~>b3EMٴAC0f^J]EQ,ۇƳ?4I-wJȥGQs99߾1~C8y vRNHR NF$7Zo#X 4?\8%Z|t#8wB ;EB]I:$CSOZ}(u%$[7FZа0BC")i*!J_()YŸUFXX(1XZ04^uMa!GFOJf6DRcW^/:t8|7хr,i4Qj5Y BP̓L5Bo!! g; ywd +9u3/DrHfԃ-ak=;?蓻9d5e\תKdöVbYT̢9]"떜 G~M-{W dP||}9ks8cqY$'/bN\BaE>Zd@Ie1Ƿ$FGim~GnPXÉ{KGHȪnjDIXqI%|IʴjrbOp5r)L6x2Ǐau1὜|whj*/^ 9[8}0xygrnBS8Egs}GRQUrnOx"=6Z\FBrxDaWHGԪd%Gx7|2[Aa58GQ|A()/'+%4tyGqA6A嗐DX#JK GR^#ښ|#21˹p t1uܼFd Dxz7<ص64''.~i*\tv?0QoeN%z ٓ?-~y?CUq;\A "$RN0g ʪqR7ibYhjbDյ(M8m'(;ʛנ"`y//o^=CyC;e0l5̻ JTvJpж>s!޼ΡSW7I&9+X2u&w#I T`(mv:# Q'ZF\0n_?Ʌpn<??h S]$iH ؋B9s&CGrt8\>w?v+A Íۡ=|TiU>ԨtՍ4?TT!)O/FK~L\z8V͞z6{b=6p*>X0MiUAATG;ɳф|;8` 9$>;b >pgn>Xw * N d榲jԯYz!L˱c])p;Zי goSYSN3}ȥ N5><~ʲb8z@S;]D^\/XF}inNBns7:wH0NR\5NpQNs@#0; {@Dɣ$""K$=aq5cTGxx(wC)hfѐBTd8!<ʯDU[^2¸ॾ<'%:2o|xiցt&^ǘ\AejЇ:r>YJJ[9~i/qK287C^z)jkᯒr~97z7 B!Ea|7n:D(*̹3<{Pb/P(M6~fN= Ƨgx-("VJh 8MXBd EVyKym)ǹoH;b'lcqL^Rs; ;"b̘2UY!?_oMlBFDdy8aEl?UÜ7n6u1ATv*STD&`Ӭ "A, q1hL48Ȍ 'AյEDd*IP* X|?^Qk /yijN>Ӟ_iAa(KQ6h$QxML|3)^"e7㘿6y8ݨ"BBQϭ!,nj!=, w-IBi'/%]\ESF[p)؛|JAJw#00;.+#>2A!DFhk5= pwɩPVd8;;w @>b'Ɲi /^~Q\Wa~TE< Z "!;Sx񀢽}%7K؇asi d)(52B /_%!ǃՌKm.Z[M.'.Y݁ rk4,vZۼ|O[ e,ۅ-4c68Qmn3Ӥbs8i䇙4h *rB }XAǃ?z.w+ /n;olgŝFunJYW?")~](+*X+\9B!!B!o>!B!!B!"B!"B!^B!B‹B!B‹B!BHx !B!$!B!!B!!B!"B!^B!^B!^B!B‹B!B‹B!BHxB!B /B!B /B!B /---'%p8x<`B!𒑑ALL $$$Er """Pռ`B!#jh4<~F^@B!BHxD755EN#77E/B!Bȝej`0!B!$Ԡ70矼Ǿk׳ʘ]c/P{?/^FE/B!BHxS&τ Ɍp£1u]DcyBIKzZ<ѱ4&$DIxx8ѤfӨc6wng):"BydVpfǖ^@B!BHxd2)K6dbW3|bJq8.ڼbzW,V;.w v#{{f`11ۜ4EWݷC o^;S1c10-8].nv8qmn,eՂ7ؼ(M6'fDYt`10v.T86L&3kX(((xAËB!^ɺ:Fn^D^}{ 7Btaq6ő}07b q8d`M<|%odhX&,6'>1wN5n}rn V4Ɔ [z/EKcL? 8>[V _"B!它`:b.!yc=ˆS_ 3w0cO`Q?㉥T< ፑ}?l MȐ!0e:6Å0w_7ami_%IWУυRVc2nL=p;+׸LiAOXL&n|^% cFx)*2f`_Yw&B?|1xl7F cK~?8MʊtN^B!ԤuP9 c-x$ϲw.}z1 }zp|9 9 5?{ʨIۯ7ǿKɅa܊87䥗ӽ{wua|*[ dޙ:G1ypzt\6#/2菿ߨܯuj+dސ_yS2WFB^xgi5UZ9}kU;^.r())yQËB!^4 ]%(vYxx6>˅ĨⲪ0IG`P( IOaf'9n}&Dta7CTt 111DK)loT߅{w}'l6n%brpu<{@ZV6w Sc KDl5MVؚjxxhv:?gKKK_"B!VrA 0J9ֆNpr7ڌnkkW~-nʺ-'yՆK\njokzLsK2:U6x[/ׂv֎RI܂K577E!B /JxQ:(/ZZZP~6V__B! /* ^b/ՊZF sv/l6+7(^oI|!%%%%%%%%%e>BB!B‹B!B‹B!BHxB!B /B!B /B!B!E!_@ss3& шRk|n<fHTlMN>+χf{[iOHxB!2GVԄRk|RP^Krl~>%l]6ݶy< ^V vKxB!ij\.dAyT& ZZZ~iZz'i~1?{kxiO-/{jzc5]ί6eWl;.Jp15^V΋eoRkkBL, Jxq:誫C$}{ryiWQϷWEI);fg1|FJ_R /] !/J[4^Sn*;C[9빯_l wL&^k^f3UUU^>B!n3oLC϶֯SI)R~‹zW) /B!Ll67 s˷Wl'rנ-m圕ʞR3_R^BE,XO*g_R&/+SRd3/AyMw~]"T~}v*ٞ϶^U̿ب\Ͻ^l6W ,u`5*J‹BUzisyhu{‹bQsppڟe׵Ki+֊熗(GJenv(U_l-{k,m-%X?fO7]Vv]JG)%۝evg{_eEb?m.\J}kOYtv9? h/k-4cT6zE 츛m]}!FQ‹BU\\jbÙ֜oC Z/:Niз>WxQ& fY<(OVU>WpI;q[7h;l]mebeԒxbi0_ R& /Zm/Qk xHz7y nݤXXtoUlŌkBÓp]gWГjk[Ϗ?ci+IQK; ?˝ZnP#r+1? 3)WpLmSMT="9=тS;mdC=p>n &^dbsАÙ3 jajc1jP.P>N6j>L^``{} YgxɄėv?fg?kITA˗K!h'!oa_ Ӄ V4efodFTs;ӎ5[k9v]*Vѵdom`Q62t;9ņFzl ѨT䡣mcxV&7""66+Y<R7a+w{xk>*TXeڳuoe{hlTŜܽ'gwb&?2V&RUU±Ky% ]C%laiL2é1t맇Z^83˜z,wgc|kMjZ1<4]XΨ#j,OkV;I f7n+[ӌ`kVx _Ran%>˹WBM^{Kg…+%|<svx0i0v_hPLv[;j:~1#4QlhC6 r$e^ry|kg% 49j`deێ_*槻ʹ[Lq5'ZȪqQg2rIMRc :&2cj} n 4YZI+4MJy(w>Kx \'vޫ'=zfèmx-A೻>y"Y՘,"[)Uqv{72yX?N;> :iTg3gDOF>juy?3Λ,g2G}wges5fⴛ1͸z~Fr*@ڝbAolxʔos;d㙻d31X *BOmdK޳7n#8SV2ck>c>[P1cRqqj&sf>8qImv"W-` ,Zb`vz/BsuVV07P5|piGFTf]G[vupP,9ϷNzmem=+/趫6#XxQ5UT&cghaeLĬGGaÌW3s/19ld8|+U3F0~ ԉØ QCef 33lEȯyt867㲈?[Nz~֚ ϔ]!<:L=ua J(LX?SxPYwZ1ᥡ\Ul7`heեj2>p {zp WNÇ{VQ՜j_;iTD[vkgXΞ$LJ 4;=OBJ?4VLzInݦ[L)\rGjӓ7>&V XAF kM&kgfӷgof-Yݻ9=q熗Y_lz*|kU9~`dz*zB:)vVڲz[P$GQ V3T7hVx|(7@J+۲(}Y\3&8D `0fnR8Y4s(L+.۱̸Zϩ۫rPW11Ҋ"9{3-zǹuDii*kttw^\V+Ϫq*~ {J`kiA'oPeqL:[µU !Dzm+aҜgΏ]ْ}zxj30CM$6'=o bwXM^L\NN`ĩ>rkӫ[o^boܱ=w+yM.Zۚ|/ZGˋPKBB I^hDSSsf2s8V_ _FZq%a7r$׾KHǬA/C3fH`tLu3nh\Ϙ!yn8˿/iĤP_˃9v7EAeFu\:=MLv-r8wDMaYW'x97OP{q2u\8?^&L+ײaz֬LVU,~3e VZM1bq2әڧ/cj񡄔51sw0[6d)%Zl]4i EsQ~j6qkHCox2q/=lW(˲ tޞ1'h{[5Kq&(Q=ֽc_HqYϕ7/.lό`} E j_ I=l 8"+pvr^1DjPcG7ـhF[+{>6g363krN<(?L%43},k"%_@XW7gk,ܷ3aÕĖ39v=FMsǏFf6m.k֬cӦ,[j?ZV˗M!>ok 7r-NC#vۃ*L\ü&*k/󻽍hO˾&\q{k廍|[ANv}|~xY¿YSd23֌0/MI7{j >0551ugRu8.KP@61jG#B(+w. Ow3kV֌"/~{ycAxREV/df)-@eZ6E6a,f+n/YU T -F6-gj%*[-@#?{~sM /uuut^]yfH%&2MšxGwM^D\Ia̷#;9M˶UIV!*?7Cή,&ο3QSfq!:0YMЃ tZC6ա~7nqLݟI'ܝ'x?勄1-';3as?%#k0h(;/VވM[Ȧv X.[=s1<Ҍ=]^QFܵ{O}1G2Q+R`֔ˊ:n5~^p<ӧw?V=r>,YFo_Gش8* &}s:%!_vnAA(wq4d-?q7ȫr$6Z[kZQidaZ ;քnpX#=v|b0h4ϖֈ&YSF3=4h:^jTdI\_ 36u~=f<÷,ky{HzMdz-nS;2t묨2w`zȫlf t1G2|h_$7'Q5sRN}QË?/KO~Aei&QG-h fĚP&W򫥩ϵwLd#^Kk.,de:,(Bo0BVtkLە_m'H.-`b/~m3<&3h+Lv5ޝQAN5dlFEXxF[;5ϼAc%h[lFڀ¬*UyWYJ}`ŕBBt=GLK{nx{z#1фga:D2}@MZ@t cyf@uڻSW"79Fe)im}BRna`tt{UN܊Bg1i*QCp5vhq7CkߠHȻpӽ;M`Q6i1&372b`oz´)<5-bu\f3X7gz}`Z38ޙЏn{p~YG3׫#{!KS-׎a3Z5}2f$F GX*kc.>Xisغk'FfѪ1؉r0hhM&ʱB!LT(w{dwPvPkEmmG/w߶yV/:ًIʅ9W;U*'fLN/ N;IިGQ: : MךЛt4ih5M(1Ŋ2UzN3ke]ͪV,5uz42[jKbc21UTWѠԈZoda2h&шm_<0$v(~4?䯗-IoaY:+.0nSDbْ($'tY(_.JaޱLz4+-+evQY^vn-NDVͥh~KËFE3jh¨*?&ZrN,5TV2[574;3F- u fԪTj &F5zΠV5 Z*nZFoPmDga6*Tk0gҊ*:Ԛ&4bw(_*P]AIi)U* AhŠUuɢ,NUKYi5ZvΈNZg8>Z9*ԍ*T*K&}קRc/1v oaen:ՁNl!B‹2kjjӢ״ʄF>eYłO*kLMf3&SGjR~'0fer4ZMKO/5txxiB)/!$Ts=[Z/UY_)JǍ߹N]JyMyݱn)$+}]|uGR?tgs7u][y+>CEjkk`I5ڵOUfF#'!AȵBN`uwJ=NJeM6xI&0~E|u; {NIxB!>e2Ro}2sBR=-JR^e^&O&T_OӾMʎR9t~Lk+U;lܱg.UNV唗bu4>]3w /B!sIx2a}nx`UkrTR94~‹F1Ǝ&rIBc)F‹B$(9eM.%~jx|Cw:BYo'R׵^jjjoL#/>`$!ߚ*me,eZYYIss3N|wy2}+b¶_ Hfܡ|ݮ[ZRTʿEy"B\.2x%t>~ʾuWokDeq"0y7I%*Ζ$$ʹW&J׹;K7U‹B!e٨R~|3<~J)^/צϋ݋]R nc:zZR-RTD^BH5Qe2p:|Ne"̭LנҺkەk^, ^B!B‹B!B‹B!BHxB!B /B!B /B!B!E!B'E!B!E!B!$!B!!B!!B!"B!"B!k-*vC-Iw<H(` D$$X8"8h ;ߺZ:uUg?gWlBkk+򿎻ȱ蘅СCS 3hnn땣c'/899!s qSg...XXX`ff&ia La=zXa)055o^` Ҭɋ\Bヽ=vvvx{{OFE:t GGGlmm%|:|}}1777C:wK&/B^Ct\<==oNΟѡC:wkkۿCuGR!/9.y~ݝ seQ. 6%PDN666ҩM/nM4ʳ"e$СC:佩ۿCuhEd6cZ]Nwɋ!-Fuhh(%%%F1TUUErrf|"D&!v3tG*(--%00P2~.l'z{"I]&=^H .nx*ʒr<%Q?8:>YG\\` yzz1.=2t˗ЈH"r<㉫_.xtsWdd)b"g`qu$84Doo ܄$mQL\=Tӎo<2Vssq?9d`*'}1SnބGKll 1Q co ""*+sh*?V/2*I&Tm q򜃺Lw,$3K[BYw,տ}#,$>I=ͶZml:XYGcU{ SҨg yوa꾝k%bgm!t iG"Zkw"`_7*z2|MF=0Q ̪cd,ˈ*%%lDV+\\쥬if?EZ_N{[o.JI r:;JqQetVdlKeS1>*MlD NTR効jtߧ ?'ME^rStDduf:h&k$NtMsݛgKslhIB#,&[*WN_sgr:Gw?Hf,7|pVfXXƕzZ]daܛb+YXv[Y"k^"?{+>_ gUpOyGr{J?wNfу^ U},J=cieUudh6%/ٳJNWqy>8{OSSUI-,|j)5[Kډ'=j`w>/?=k 3Rz̙wɰp||S?/k+ 1k̚A<[?#((H ޖ$i ̵/4TQRRFyE)>΄x=l߸Po06D8>*+'bFxb.|'?#ɬz _<ǓknntgU/_jZV޽'n5'Kx)%<]wmzJI(oyC[Jڒ0mrXf B"JK)'z{v!yN[9>)e; JK)++(7۴dN%ÝiEw\Pm~G\cog:mPԵ_}cB碵%2$+ֿfǫ2ٵ}UyXZZ>BT1}BihZJ*hlu8쬬RO+`H^*bAtl#$yHqy Xd38ם4p=nS1:8{Ѿx1z8,ʚ,Mݓa # o5+(Oʞ؜fz 7'즕5ՌMѯ쁕ѩyWHi^%U6-X5XGjwH4uJJ zJe}-EakIF^!%崨gl;Bbr[-CeԏL(>xS٠l'%ߴ2:mPq d`o=a7\G-6wS EM+EBljgNU"+م%P >a)44TC3 YEMT쀫wJf)"ab8;3NiB1iUQ*QBG? khi:&lo7Mjh9KYJgϔP_jљT[ĊtUGaem?JrpU$P?;?ErTm{Qj^`3g*Uߒ_h%yJGhVy7WFnm+W/$WGKU*wx2uM-L6OǮGC*3[ khRėV mQrVd-=[ɣUeEMc3MHY{7=җ040qf }PCd"d_PAEf,~թom =k{gJhVl)&{'UV*SpTz_DMm#٪s!Uɘ OACSCNXzhQ_pX-/ݦ}1=l,Osu41Wʙ[u?6oa'7аl#+[qR8Ȇ5-dt*`.b-̉+V؎=<ԓ?ZEr6,$ 6nyL,-#*4msS)빏=O=Ş=ӚIOëhLA;MHLVکG_S1Ci"${v>FkOW6Mlڸf;عQY6%=7@haإڹ壭Ƈo{lO)x*W8Aݱ27%krf(ME+[<x QQ?.۳{;9yQ?8)[voĨPyNd\gY$14,'@\w,쎈PG,"/9y֭[Ś׳rI >}~K^ ɍiS$ʥ|\R ƍ\t%|r.tMCWW2ĥ/gx|W}p+%y~ |9_W}G0VUk_]<!7&'Z2Ȥ"˭Ҽ k.M\";9A1!8\xQ Y>x%r n~t;g,/a}Op?|5_{\8r~5Ov9Wޢ3'.0x)}g.|5~n>ȵ/%OU;NYM6Ӷ]ֶGD]SE~JIL&:!*\K)~G b+#65xؘ`H(9eei;]T;@G3LG!MgjokcOIx9Revd/|)k:3+{rZrb>z+ qgrʕ>RH*57+/X1NIY-Wv-)j 3)kQ&7’2\AI -Y¢.*2ɉZ[@mfL%-Z9ACy0eX'?FIV] yѻx ͕v1H~A)c5x :l%&"l;oG}y)\ʲ J]6i,V14:FUF󶽓ZLC(nPmYFKy"SvXYYM{ %,. *ғ8EB'B-S$splǖRLiCc=Tѿ*"k#!ci#;>ҩl_ɡ(J&1ܺGQ/Js8-5UwXZ9 /E"+Kio+VaR4(|"Eډ~p2(JZ:OA~|<9A]ѱx eu8a+RASW=-W1{ډG a`غǎq3l~W.fpێGر?OK)Hp!g7Wp LLpU  \8>F{y9~^<~Oy¬~|gKl.Wx׸<.E1;_YcisKiE$[̱e8DF9{}IFV܉ڧ|ts:!7yvb NɅ33xW̴\7nśݸe3׿'w̓lof#M.`bcn5}ۥں_:tXYK{c 9adb_PQ4V&$D[[bMFq=/&%؃"pT#ڑ1+j[w:Fhʧkh&:zFIYFMU 9P?kYҀG`<݋-ލef`aiATq;=YXbeHI["/~YڨYLNRx$ 3Ksb\JY1Vdb@bbݰ Rs IOHTFc}ٚafMy 9ѸE3:K,#vJZw܏!*㽰Vu+ӊi72fR*I%eYĒevvߊ,[uk%klB`B)Wf%TeFS5N"iwN]K}W;-e TAGM*uHOC.eu5 U2w $:IKVy6-_@v L';C6B<~&ykXWv ^ œU[,!& 8y$)bC=J&TwH[Ɒ=SŌ*rQaDA5,]Oi^&y8HXI fr:񋌧>;E:I %:yi!(6<\]/"51jE:G:I ;kgo Uɭ{hLE2cp ɥG6Tb(ONr74&d /LџŔ`n杻L')d 5#d&FPICv &6TњGJ%80:23VCaLO0%1nX+,K[ zMZWJfF&с3Sڟ"馳$s+2yN!8:RW +z&TxP?5i!XUDP"/w:jYn~KB`kiYD'/\9.y[A<[9yx->5Ʌp~EW>ĦUCxt//~KϬ gaCtsK ٣{b#YktN_ʇͣI֯^Lc\:_GrxMX[ȒGA*򥽄:bD\M;{s12Q^Tr 4yV,pI`ѭ|lëٳn?ρ͏p=(>x/_qj)]2=0®iX;6m2 3 +9tVfc2ǂ܎~zy(IpK3cL01/jy+ofժGxG^{^gúlS *EŬɋB84< Kްa-*eYXXXk{r4ԔQV^NQ^*=xO}puzC>RN_VG<$2/ڐ?E>xV=_Ŗ_;Dzp;x^*2r\ԛXz3c|ӊ/Τ.y/=LrFy"E~iT_"8q$o.9ml"W9NYgXX䲱ӧXFZv6D{зn_~uW6qq<[wpᳯ9j|"/h%g3o( =tE]_~x{a/Xz9W|A:}Z] ߩa=lia[@ ]W3BEe=Vv\-R!*q BmgW"*$g0E ~tuRHO 6&1s WXNŎx0gZ[yQ8X`lF["zӱ0ցbE^2|y'!%?G3ͬ.:kcS,S32*Y2AyEe,^4J^TuĆz`*%E^\,{ nEaʨ`5{k3l\R`Tq/ Utf?B^c= s%fVT4l2*c0wed'b0I$%PXOfu+88٩:PK{Q8ʖbC1_0#sRZY6\IyMy$&$WD"5ň1`W twPOX,B^"SnTܞȄtZ[zm(%Ia"u%i]AY"\ąx /'s{oE"yQ^Ez=ŪnY0c<@|7޳^-qTe@GS%g[iNŰw0^`k"/,t䉣 V1\"9&* #.*掔<ι?e7,K e3)R~?C-A/";$çy~J1InAuz S9űm.yvE&ހ,a{ͼ{!,_ǙÃGplq^}a3>yǟ|?MϢqA^)nAeƶ˩lixE BtU[X.KprdP@c\W{^Ng[#ERr/k##;'sX")_^ҧįt/ڞ'&y,x^}I)E g|x~%ϭɠukY$_^}j9.9l;6Ob>޾%gO(O߈B,fv`L. H7nD;QL{eٲe'F;M-{N~.SㅸqM_<ÖAF-b\} 9\MJֱ*N%k%"><ŚkN I^&j8{~(^}kcys89}kD:݋vh6qgžj۪/&[F 'Sc3BYf("b E˒ 3b(ieiO .P.M u4)c#ѥäG_Ǣ%]۠{9(D`j2N A\L$vT2z"YDE7RGxY5Hm4*\Hxl:CuQşq#,jInS=-ȋjOe*bKK_95*rHQel≎#?ֱE4Wd#$Gާ"&tQ_lblNJY68T * a|`ѾZ::Jen<~! uדWY5J Dg2d;Š ,0-Hpj$@r(l &*X;Pҷ&:MJŲH͉EQe2٬6)k6 c`bRbb) %4o)wΎ rRTVd9$4QʓIʫ<%Y&2Vm3HQި1)"IZbnb_F #yX9J#T䧓A[UA!4 ,<5 +c#Ll`6$',DD$ucEhU%<"+!,8E,Pd!8UaI.Pi0%6 1<-&&&jwQ7Xbnfg|#QSKcNqtu`!Iy@l}U8EBG-N#5kF[Y+& ZgjP3 ÈJ-ab_jw":Y~ Pz6HKqGooSq#yX:6<$F=we-K+K'O=d޹;joPH|`pNOޢbA]|`Ϟc͜z5#=|oOt~f"YcE:g7o-ܵk[$ڟl$idrSrDtt-^.3S3.2t6jI0";mixZi!0>|XȊ| B-Kkb)'>GS?_;ȳe}̙Kve#Y/_ ʭ|˯˳?/ᣣxFfGtD;uM:f]Fclm`wE\D&((Yf@af*~a eDqS$f'-j\]ZMy^.V1)b%7Gb2jQc 9P l'7-/̌M`b *H'5"WXre7CHoFA>~HU:Jp\mj<\D" 1Y~0$léSy*OcKrCz?&R"?f8>ilFt8#K'2EG&z ] YH)')=X79QJHDO igtA;?8#/s;pxY7'?zW?̡ؿi±SD| ylqEwjӷzK%Ř%tbg)YCy%hiNIa`lPơ24~`dd|VH1KsZZ 0Qy&735O`:e\LJ'u"/[< S1,_ҘH^Z 7dj"īCvI]xUE6꾊T]o<9NM!gPGyXo,U#&L*SڣҙH yim06g*R)k r5ҿH_kuWjWH.j#engZI'$_6k[&I'r:-,EfZ&H] e|JNJ7JҗV-i&,D3'4xJK%KCƆ{S\)jCG|WFZz?%:afHy0Mt+;,/Si`Qi2԰nRVTzg 227iӭ(\!/b2I000lNk/ϋg֮]+^,gGA0G y|_gc -y=ᆬܙ}]~woԀ|(9!$w| cQ+/xqAKZ)o)hzO}i)HYͶw.ig9O,4e0]= ]e|;MI3;Y$fn_>lפ8@71.݁N*L/սYy8 5k6@R}"mȦ{ʉDܝ͖1!f6vJN*2;`"6,\i 'p'>T qid{:&#NqSE+?"/?СC(b~Qwtl!NA_Ov yy5iE:Sٲe ۶mc B<q kd=_Lfb?{+ݽ#yСC򾘂e8}7)~YFT_#!{3KAw.S6eS6e###5!Ӑm8dSȞ-Cѳ>/RwIbVi6d49Ev(cƍl޼ ###########Qdm. ȑ5"k?x?_|q +""+,Pxx8uV]fdddԛ'lʦ2 نC!}@YU_)BTlB‘@$ 5n<"dHh|넆 Y>dl_~Fe6S6e72!d C!Y# F$&^xᅟHjWrXɆP  GBxe{Ᲊ#p1aĉw'W`s@}mh%p?bg\idddt&CWy?v8LC!ur_Ƕ~L??lw.6zh2ԔMٔH. 0D"k'n\|<(JB.۹b۔Զx96K R+IyQjEN\<Ϗd UD9W粼{KbxLlC}Tj_%&C"55*s밭lgu9jcT$?U^K繝"I[]MGKɧu"vHKK,nҶ.yn$~ o 3(>|7]}Fۑ ib9vUFI#bYW}?i{R:?4kq}~*ަ8ʪ]mO|!}gS&*ZuRKq펧~VӶoi6dGgB]`1u|MB!_2$_zh9~<>8QICy' =mϯ'?|#P?GVb&gz +~ x^WgMzf*(Lw*H:lX_:r,:a<.c|` "bG,J@Tl!} FTl$!AA6ҧaA:?2hͭ_.S=~uϢ]VٔMٔ݉D h(KJX!tTt Q&?)3h_Km'0S-в2[@(RFNNrE9Y@Zj$ < }"9.+ ٢9Oe`; )0u9eH[-耝eڥF% C/mE^^Q@hnTrsi۷v)(40EK:t.~ŀХrPDOt3Mli؜v;jH[]ج2 نC!}@d!2QY/rx5S,)$9`=(돲P-]K]#(4Ɲ-ʐ 1HX_L~9ga.? ܻcQP57޴7^߉ Asܶ6,hAX?DgVbU'5*ܺr29a3?OWt(7܏>@ AѤ X=X11 m ` /mތٕyzC`輪ԍ٭xQd ATLbSSuAvMٔM).n43128g@>&S*@A{l+В*X&4d؃uJ RyEE(-)EYi)JQ8y9 H:e Y+ǩkJ E(cKJJDr9Wɾ",Lv*t>9Pc}}:8gluDgyn^7S|=b-//Gĉ@eeC(XGV,<!`BJT`+R1tkn?gctM&ʱl![BAJ$Z|S@߉?K^ ’C'g`OA-PB",% $$Q,g2'EDOb tOʾE6 \7 ʰ;P4dQZldkYp)NHLV Pvۤ3iA2 ن#$}@dn`$Ԓ!T)|B0p8ۡs^x` Xg 'VIբa 9&p@ '^ǏͭIǥkxJ"Fz8v$NWG#t*q'm7\ o N8^xi-lw?l'MZ񣇱o^8x'N .FZAN۬L g|,o܄cǎᙿG@@ ytّq~o9m/39mOx̸r\)W\",0@a7ia:C$JkaZm ?WU%Ugm]>SS6bjHSP+G˭6f5aI> i)cBJOw'>+b}Bb[6Ԏp?4d2YC!9H/09O|voLkL0f!7+0" QG{sHąc;Zh~v΀^c}R} |q^ydG#m >} ? uE X૲ Q}v^݌f{T]r3>>)ڧgi1n8SNbom:bвY|1lxK U78ŋ/ :HJGmm qAHHNAzJfOAyzR{8!arPIeM_"WZ&!2T:sP"?<ń,4$ _>;eeK7 a#]rd,i-Vߴ Ŝs0w[i/A١"nmgJ}X$נ&E> Rf#; :/"Ӑm,#yFz[vc9 AT$߯a#Scƣs lb`:`t<|z}] :v/x%aiS.uѴb3Nqy<hKW9C"`ܸQ!&p"} C -TK3Z[$0'P 0(cTH'SLQD j49v::ftCmhֈ9Vb>g" Fxhk @A3Air^B\]-f  8tY$ E _i,&9GmdNpev&N$\s:^\UpjBQMOc ĜY0PY1s&mm9"ʵ@ p1eFeUA}> pc Β*k(!1YPT*vd O>$KN 0cɌ=3!Tn)]{gݳh?0̝4Gϒ}팥7 7dI#!S6*!3s#pu6S(=> PsC,0JHe3nձm~ͨcfLT7YFB#I~|@POlh~H(LXJ,篭#@\[aAdSSS1}pD`anw$d(C!ʍ$Ge]LLظh@t = FOt^Q>{uM;groƗWC/X79hǨQcq#؉=xOEɧGcY'~Gž]'U$'`M&|?C_o"nS}=YuUsL<0uKɁ$"V΍[WA=nŃ܏7cǎ`-3P׸> f/yx/`^ɯ?mt_oHVqmS<=z?| |}*ܴNl߽OZi|?N6l]Am:}:}R~rx,Xq+nq9]׋-ZDTa՘[<\VXt)0kr)7#,>\u".Dr^6W\իWP,sN.##sK(8csERR Fjd+{L BzRo6 B $fH=2O$/W#B|2 `^V9O'~͔mOgֈYY"T8m`a!3gQ%"<0X&@0(03e%("p M,>VJ_hBeQmefָollL3G5LJeJ${! # 0+4ӂK͝\y%H̖Xv5|U*`>`t*1w.vaKHL`fv7>B3.!c̤@uO & I"~ /c]SDo'$j55x??gկDVx> _ф|vfҟ)|p3o$c4Qa}A!-YA=fĚDRnmmb ;̤6cvY6Mk[̷ b2 FL!}@dn`7rTť3U4QhjIT=|0#K07H2F#z}]   ///Lh=_{':w#RF"e=/Y=]#'#{~fgE!4.I~ix%\ ں ~J/=qN#ؾ};֬Y(Nٮ )IJk3bGqX^`ߎG>څǤuLm݄?>Yڍm]:Yű#baz~=pJjڂ'_w>~lyyYJUa-:9Fղ$y|\xp kD]עqAOg# $sE5H΋/0v"{@2%OE1Әl.T=njP{A(IXŮrΕÑt$6ߪM$~im~Q D87Cxn%:SϬ*٬~c wgd23O$ X&k2F|(!7Y9ae$53 F8(nx),C43E D Íff Hʾ#p_S.dB`/)=I犍\}OzJ Ef7곿ud b[0R(]OS#<О7% Btw7u2|zɑSpsK&1+@ ;`¾|__;#Ǐa-}v:Cvŧƃm'ؾx}F60(8 UpHpO /cd| _?ދx=nZ*75\/}aZU.FyaX8xQ<]~A__P'.Čq|ez1FŮ5xkعcwxjnxxPvX2y]]6 zz;p0Lω, Wc>qà(ɖj;:z;RBMnjdqM@II9!X\Ag.R3l^r" K*Mqh+utB B"J'eJ{U].X`B|;2L2B"9rR.ۭ\MT9n#UnjXTc*eu~gZx!5Yx `"^^bn^Q'>Gf%s :iZEMna{ ER?koc-.e5:G7}E@0*9^j%\x$L>ySR|]E DV+ŧL/JI"dZKWG)$}gDp0>%,WhC4k 0SQOi$0 pha#j4@s>y|AOUbms G$^YɾL73EPd7&26颐FP0AU(*0w. ~){TVn$JP:h` 4-+n[[ó/AtEw`c@c{"0fx{D%ƛ[;%%>|ul|0*$o ܅_~_TaXͽo>؅]_|gAXsxT!5mck[02Gb[E 闍wlϾَw~fwTͻϿ!˰/?l+o}wlg|ן{+1g78\:8}JjH^P$Z/W^y%j\}ոk0dHl5K/ż- N5|FAV\u"\11]K实FS6eS>L8!ıJ~njz~z'^B1EjRDDc__^r<9=\65%]b@L;D1S7P~!*K藼Ij{kj]F|oRALű|/erg)238gCO`k(F1WEsqەk6EE1{u0(wp=?((Fq7|V@ lxaPノ``ecJ"},</01g\z^ɜO_Ǎ=r(.aCp|xpxwŞpE_`(oZ^ÇbH䕔ˏ6 t{U;(t <#Fy*{F @BN~Rb9\~ xx_ֹUY FګL@6k勻>SJ$Y׌E$״E@H@]Rg$0?+qNq8<"#} !ҿBԋ^_6?l\>Y(֡\)M;-oWl#&*I UHl8o[P?Z&-Mfs ߝOU~xNN1;q'bS@$GޱqD?pLw`<%C?1񛢟!ygɛJF9k "= tp>nŨH5ƍh9ؽp?^t恣&˧nȲ>Q`&|kF9cYC $~.:vٿKF7=Cl%: 0(3b~Q,A1,QC|d Bs3fu`¶ ԗknPm mFxt[E68 H?Oy"`^.5 'Pi"u?֡LLX-]p򱓢I8ۥMvPz~j3>BX;^l?/;>k%X(?^*JdZ<>d!sb5x,=]x!;ɇr xoyJ;|d 7yQCpUxA{WnO% FdpwH_ :Hp"ZKOOo!gjۧ5~߾[D(z*[XTRr]kHr άwSvkZO۪X'vq:l6-~Dz ӵq :E.9_MfJN3 _[TQF W>E~҅l_O?mAi8Rhxv]JAf0S3B.@{BQ_;0rPq]322:yFNwZa!N 2큗3 v0\I|]u>V:.};OcW60&ӹϸ[Wg˕/\g7>vҗ.K[Fgw!oo;߹#8)re.pwlʦlg@9S6)kDٶzuϰ=e8G<3w8:w|S}O#G=77go]ۀȈsvȈs3\mN!2226lYÇ{ȈKI}FFFn+ks_;0rP 2СC϶ֹyNTFF%ԙ52ꋿs}FƠA(_8p)));"]wܜ{O9ޓÞ'9FEPDDQt 59ؤn:Bm+øYSezQ4)m+亿^aO\\\me[Vme[Vme[Vz,(MiJSҔ4)Mi?¦4 #~ BP( waP( BP(JaP( BP(JaP( BP(JaP( BP(JaP( BP(JaP( BP(JaP( BP(JaP( BP(JaP( BP(JaP( BP(JaP( BP(JaP( BP(Ja$ qz8^\>/o)xL(?2y"^A~|M!W뗐y>$ WrE/=>_BO0eiɒ_cVsD$|^Ǎq8Nk/Hn9e|>&Ͻo4W߿vA}ktk]>$&l Aq/"q$_f|~<ʻ.|~q?kD(H)d MuiF#|T=&Jm⽋mt{ؑ?$Qzǟ <7(WBZ Df?ơT΃K jtф"+6+& Z ۑp8a XۂYH4Tvaba]X=ϽO #$6ZZ o4}\jɫŃh0s8JF.d- Ikg.ѓ>My3<1Wx45ql??lj܎v qf/',Jr l`m7Y-Vd~XcbϥF^(chc°8&X'90˭d7_4Ȋ[dL\ƣsEHn"nldz'TKۼؘ1O;?v 0fUOA5ᇉtAdt?gH$|w[e)8꘢(?s`$#y\nޤwֈ(,K[?I6۳?='h{ lf[MzG5$]vy@jԂ M<N[X1XvKmƒك`e~L2఑{Ff-nd,ԷOgu>f-Lq064jaƁu GEϒFF\z-`"೯ojЄ+\#ێ~o᷇?vּfD٥zNT%s`uV0f-ǧ>o=F\zo8&5t.Yl γ'"SS th{4DMoQ8 l~v ;]6D3c#K`|jcPhcFFN9RkvVf2O .cVC'NR#bk4{ɽz>r<7;)Un]k}|%rU|ĩ :{ 5lhf˹jҵ HXG6𰥇_Gkr /r| qUqd :60p;3=Q5:s=!YXe6/$9֎߼;0@I ).* Yk<=jBezJnq5wE<@_!y9ܮj闐|6ʸsfl>)MDZsܽ};_i ImFعyi6]]̾>%4&iKm#/0NܒL>wTRz+0ی,zD? U\z3 U}oflɎ*}-Ğ8s@|p܀y;4vvp;%E\ͻAۨ1XٌcQ!}H&FF,ud6+l, ,ڽVFX-Dz*B̓Q+tZILNuK5,~.,StUˋQZT|<΍SgkPM/Yj *^OB9K]U_k8zژX:&>L竬Afs^a_N UK:U\ S^η4]ƹF2ŽzN^mS<[fsO哐 AdsUhg)[ȕ@R3i7kIgkz߃ =Oװ7#31}WZ"Ї HK쌮hFr|]_?'䒆-ZlNV?z韲 oL`t]ӹF2O-/5iIi8Ź h(GIf[YnW/+9Au[>]}Z2Јܜq>?hY,w9n;()HX9T5laoz m}|s>N[:q071fLO):f);-ftCV #pNQ1¡L5j9Y79,O 00y·cK5Nd͍~R3c$.p ^͈̓B3ʶP \o $԰3=,T4`4-:歰zehx;]g ZnN;)jR(F(?c$RH #7/TP5W}!ᜊI)~Egpx5nvhw,쉮bK"]<$ںx;RGy|.U6^`lhXj3`D۵ĔLc2Jjm'ȇa7tI:Y٠j([v{MF?5⢁}ϷRX׏aq 3L..Pr==]G)qF*;B9:Ji$9v!NcN ؃:6I~Ql+C? N/9D:tJbYكX=7_"ڐX#"~~>It[VHPkx>xTΑSZ{yM&>笏uӬhd~3E2և` 9YW]ĦTrFm>42yGkݸ~&"oEk0ڹ_#~޳u]6QV-u+SLjⵔ4>V˝lM]qeCaR;n`nN>4c5Z j kD:W\"jmbo_lg'^ܦ@:]Mf| '2:VkAvUr$zFTsbZ z[r^$CqlZa$ˈS>;WaC Z,xo7/&#I0NT v =$fsz3.vɽUMzrT=vi ewaz3/$1_ͮy#_VFZ?^aci,.z|5VvNfF K"&ד~~.73@,'b{j+gPՑ ,OPu7Ç\؊o%>S.{I^DnA!M=mcd*j{3Gbsm,F79rp?U+8p(t=\ ƮPXZÂ^υm_P܍Gr( 4tOPqӉWd;ȅt22c0yJ]IEu6_l{fF],Xϣ/9cCSZOFsUնs,8V2c6sJyx;pVs/2AbjŅ'=?0,"2kzox+(?%?Q 4ҕ.TyКhkawsd`0@ ZK2H^r"՜o )2U?&&Mō@+!G*]Zdwjnˆ3=v^`aOѹ`--t̺^Y@Uʮ&l9VɽrZ63Z:x1ɜed(o˝|p]ҾeSXx= ;aZ Z'4R :Lnkx95GI⯨s ۘJ'\waJ}!~]%'M6,28ޔuy3Mib8_Uj寧?6|u qzjҢWDnh{9Hffhc Z> '4MiB3]L_S3/^f`a'DMf1Q9"+DeZ:_^&:SMi _ෑՄLʻ9#Zx- vrEbIYDM Ӵr"w <:TKٿ!XZL> Y523q7 ܮ 'EYUdZA9#cށ,mVf&h]tdu~r#Tϝ 7>e@ZXhtD,aQvQ&rq MX20?г"Dž~j/ŸVk;(I$b8.f /Is\:HUdԺ6NM41~~NŠM Fobմm,vN^igrL du~VHlZ7$eab[q)Olep*uJ[f)U/l\r#QeD(o0*[+z4틈iʋ tOqmIj/a~ۆۿ290jx8j>=ۧ)jawqOYX-mD2`tQ\96f C$`}aüfȰtb͉zq"&8PIz 6M7>-jhiPKԱ MelϏsc-#I<5 ϿKY8>ð&s;Vu#.J}KXQugs:eR^%?Ja~gs*. ]G %ŷ(̎c˫#WH&G~Z=7F78u"r Uݦ&Gٹ7!bͽ,[ ?W(+J/7Ia)/WPz،K\ʈб˸~:^J3p  _<-`'طw?wqDk!_Ie>ř[OvSÇwNOat PQg=XNz4/e&YrrM*8}>tp,T)i_".Fioa 1>N1M$10u0.5Ls9Wåy>>[IF4W.y0 q\鵒uQ'|ROHxd=)`N:5 1:9޸PAY$/TpK7MniVgOi:,8z68ösj N=3;n73g'm250AߌU<[S>`Ev&/T\Z g$jgobKv[Z6lvN j ǃ]?NW2nh%8f,['k bH3:,6圖9>9SOݸ~T Ip}7E^ /ip̈́&Ula[}l(BT=7*$gbp^3ì>dl$:]z%\VQՓ]&Cg@L>;R,8-S4y5Y[:Agb5ovRcǵgt]\bv5GnOtɹ"O,+, 3*:|(;-`ŗ66܍uv%YNZ.GQub-TEjZ7uT=CV\p:sF6VNF1墳{q <OhbգvB ڙt tɩl)Jh2~QEjNj8l9Rζq&㽼3SJaTފv PG!Frɼ~لvʉl0?i:>/ͼH'Kq}tTvvd/Fͻ9CX]N n6!8iy9ע5dJ2w;ٖ7\ `^sc/Ŵ$RC/okI9x<Դdҳjˉi=r?݇#b2g`γƧ( NAC/8k 6{>nt/]w9r&[EW8sY(.LDT:zy,ʪ*"OqȺh'7z"=0_wGQ&|<(JcW'7:ܲ+%5ܿ˖3AAD(IY*B~\oO[$> Z{Uz'7+)˿HhX&z<3=DJHV3ԗjvcY3v?+dDcq߳+pb 9Ek5r"Q__VQcF[IHÜ5~{茊/rd=3womh_ĵg[Ey[*3M);{յ@$UuXѷ jZ#vyx>FӺ]Xel㭬>iszt /V ,]Ϯj䖎bUZ/v h_W#Q/Jm 82?Cٽ@FVYy5 "4vy-'i)-|F=#U^Dd:y?.6JВ1V1G`Bg<9c\V+z?ȞEb[8vZv,.k>SMӒwSHcZBX,8ƻ;6dVZDM\wsZJoawf7:>Jno̞z^nJǶI;:~Kj^JmWٱLϰ%^ɍ|Hͦ:JlDig|"cawD-\|̬gV/'wp9\\mnf!Uǟ'] _fi#,IR^l~|ϢQߍ X&&wR$:+^LUTl,7:q I慇Di;(/'7FRcnz.uGUOI^ 7*EOVI|#o=6~6SGQNjXX6˭z ԔXoȆre?3,JMٸ 6.x7Ry nk^FCVo=Fkwl vĵ[V K8ϼohyҊ`nꎢĺE>I'JO#?}~ Ifsxf@RR"qqd^G7PO\lK^3tKdT$bEh ҨB=h 1L0HK#..ܻ̻E''DJZ&/n &!-.4nעp-7#`!33E;mI#11R-.yscʼ4INI"6&jos\#ZK\%tVdECy>11D_HnAYϷ4ō{햭\~؍ n$%{7սjI Knq=vQb3 ݉+>~-A{d<./ F+fCqٍSGgޕ#Kr0VdYƭ,)of"kX@q8ś_De8r\.<]AY~6ķ,Nn? (y=,{ucXXq>}/$'yCzﺵ9m=Y 3ħc>?F'G>˒lne\$`Bp|Wl2WNYji &s{=+^͝?o[$R-kRIOA\}ϔhw^,W0nGk>s=DK`e`/qduD>ŰdeBU>Aq~mS'nX?>hu}Xe8& "eV/!=y?>jy>:Gx%ZCò(! dܧ$ɒ,NODGuʼn[lB2`]b$=:nN(x9gl2>AD?)oB$2 6gogsͮP7gnb16#G { _ՒJŐIG2@|iYYZ3oPɟKv.sKJXqЦٻ5/Ἂs?}D>Q={hf{Q" " X H0}vD5}<7e u]ǒ`$6q Zj nWAgH;CB=y嫄^/CC!H!B!$ !B!#!B!`S-/B!F^Vڤ.Jpa"k%_ˋ8k5ykO?WeJkF?G>>[޵O_inn~s%_썿\ F0 2 Pۥ GRf22R7o|H!B!$ !B!#!B!`$B!B!BB!BH0B!B FB!B!H!B!$ !B!#]i6 sFص͌מa_&< _?3mT(IB!jn$f`fGfaׁ#?3GlI1x,s3S;N_e\9p -#&7rnwG/5x%z}~! G@Opdo 8-݀"F_2:yO+qy]įMĀXNWQ9+f89sp< n9s7w.sffɊ︯!BH028q@s率UWd`H4RO8F)~Zp]忭c"]MŬm'>MӋ=d[~h±[n!|ҥh=l`#bn4R>fFU}b3 6 jhZ|ٞa==,dvKO<f<1_ż$I{nVlH~ў׵}tw fj k>}݌sXߩ3^ϓ޽|>EFzƥ!B sc03T3'9̏ gp kp?bw7ZД.^Iel'aͤѫ`PMy<:M $dzs!nUj}i(Rݦqga !^#Sq=;p>=3|0%f,)zh+ӡ&,+lN'៵?1vG ].uK<غ}7 z+^駎c&|lڶÉix9T 8w77%)~w[>)>^dϮm6s8160tZxx?۶ijcl#(o$ *ky+2iR_)yjgan-0;n"޿w~lMWc֚|Ϻ_vx:͝PJґ$$$m.Ng\ͧ>0Jgrם?/[9x,M> ^N`xi:x\o{p(9^* 9k7'SZ[Bݤ\)$7--[6ev^,ݧއ.:}u.dCc-1Cp:DDEcs3kt2~{D&,"0lf&r T2#*U0衷057&?`҂owa#lժ}Om- n҅ŢCm¢1|0!"" aY~77e8 whQ8mVQCYiZUs~aLDd.&I˾VL {@!HypIۉd_"u3c5Y  2"X|7 ga@P !LddN&G; ^wG3,*c3` #=J_0 )AH ˅&8؁;bU=o՛Wӽ,VfNTTnk~8Hf؜nhv0yWmT`ao劖^s|fbvS}X5`(n\!}d͢hZK {>F݁JDTA63?!B`LlL&;Fw;I:{wiu袪Sb5~!ݾGu+TfbX9CSǘJ#u{y>AN;,( M45[MQ1 o{;>U֣[u .@o:a2$׸ ~kǧyIBL[\n0|4o$LF4W{ܘp0ܶ-#&\.MfbX9 ʼnf~? >Cim5n!9[hR jeldq|w$f[9X4.'{y> GȽ[El:*qZ섎\ȉ+whoo0 rdn/J r9[p(Pʶb OqǙ}=bV}Td8|.;* qF`ԴI^NK[iG&&߃8Ϊ,EǃiԵ4S%3`} 'hgݑ5vLaɫ.c0;^9W\͹?2e=kqX 4qfFF !Btn]Iӵ+4n8! N6wd,k6$QXfWS?P'+c5L^oO=3f&_O[cn< =2Ol)y=%ngtKPagQc]a3Ӗ^1#&Oψ+˸SYLO8e-tz:F4mmٵڒkrOb!:3uG~.-^ƹfGKg=bŊvpқ?b01-/ .p9çۓi2x%gf`P}JN|-456^|}ldϪ-ȮndʗgyZr(NEF r1prҋmdhL!dUQ.A`FUVMO_ l b̙3q}8aA ErGTzn{̪6q,NAһ.bPZΪyq!Ղ50;Nʋ2xNZU_e2,*[1jz389鋮t} t1~ Nڴx9q175O1p_[eUJ꼽ru7w/1iFyb:Edn7o3V?[CH+iC'^-0(*Ӊ=C؃>zzj$NEHRyJy qL6Y3g0jH(v{<SX97kc4tؿ=B=6{;^] *g %v&NZLdʴ6eMV03߮ir۳= #j"n6,U|({>LfLL!3מx)̘:i_NCB!f02S5z. 28BzNf Fj5-l2KiaՕs-/녥\<9y#mg,,q2|@K{3 rj*YKna1*ZnFHV~ 1X> R{Sg0jlii|tr<&,zIWOuNnTp`Yh~=yƦuV~.ytj$#^WRbAO0>qKvƠPLy a\*nu T{9Ft!Bw3pv7v$RC_NGT:X7+~0>JT$2t_dgGh)hô11,ې@ pe|6f}ŭ64MG4*XƾU? Y~ؒQ(8b0'uo+zs!BK%q;Mވdeoiס2 F0nZ;_$(| 1eF\pFd_vyOz~^9Ϳ~MocġXCu>Eއ>'vH,"B_zҷ 1cs3m?H$ Ձ'2_tÝ:1'Ga/Fg V|y̟X+F)`4hN<c.>_>%}߳h,f74.P8lfܱ3ug҆$|x{f !Bw://"7+Ӊ'8~8gRʽFm ʋ/F!um xjJ -5t K鑵i6ITJB23/:y2 2@Sy yb r4v΅L{EW+=+<WJэ|.FƥS]|iyw*PRˍ2/NVVyܜˤFލ/ B!%9TַRå.fIٽB,ƫ^! iez]|av%WST 4Uۜwkw}ƮivǨG <=OMxtT7{'2^c%uYh\H߿_{vLeT R~֬ݿ@zO?o~|J>6s'h5MEnRߔV ROS\ɛyʹp:Wz[(*CڛK9q>"=x]N=GIΫh?SOqq-^TԷp/#U shnx]mM8Ns&tԶ*.O9Gwofˉ\ 3ȩ;C3}MϏ;~S~Qʚ:6IZʫiwYDүΞMϬ rNtR)/d52syK?=9p G}=rNǫ&]I$%::1*coyM.^|z;R0H^_>]k+ks+?Lg'$&w TpwEZH=Cǟ5m'\㣣^<@}W{H=?IO'&&p+k9R~!/֝[RSSIN id%;_2XH,!,"kSCܲ?r'WBRR`_rcTî\nʍE2ò\>Q}Fڣ:ӁgBPJy+Z*Juj} ېMc y{~E ^Z]*:j [N=O~ pIy|?:qw dAf{y,.!qV~SC`߾[VJ4CĴt^|V ӬUޟqT͛Bkzt=5)u;׏FZ}c>2 Lv>콫kk8 Ԝ.n-<3]u|If4?%}Ih[*Cmw$4GmΏ10Mjf/O:(1SL5@G#y<\&oRkm/tb>p] :!m,{,jT㻚XNR٪?toYTcYߕ|#[hSǙއQP,Ou^V3[isf_aRMcgmEՔ G7~|zsItW˱+5f־Ǥ܊ ƾ=2d0_uWj^廀?UK]ǚZGN <8?1NFOi`?B>M|*'ߕ5N|g=8q>zD$n_o衈'>3̬ok披uNkv$aᏖNӱZ[KD;O3b?jMP= +M}ӶdIG'laE]Faznrw6T\-C^5剻79O{9E wx6!} \Ϙ fq|ZQf-8'Kgly ZT"<>3=5(+mpFF{]8VnPS1$Ȯ~uF .PnΒ,FT3Vώw?#G͗/9_"3ęݸwb%SÔ?d T s] dhvA+<ʡp_Wb\$"W%Լoy/?qW hOrK\i~ ՕpCEׇBj@/=|hfc-^6\3&Xzl|<=xS:‘rΪb=}LWLǻ NE6΂@+!^zSKb C߾CjY%iښ0->rSHOMpC>Ywf v5'o@$#'$]=I*j`Su4I$FP\!8։L&L.wͼ#G "&0B#H|u 3[O'\gӰ3{xt?矹){dg]^lpS gE˛|8[~o%=`Iϩ701K}at_JJ$Ŕk#Q=Gv?&HE_TTZKT`0?yWx#o߸urBGɥXBv/mHKrRiV&6Yj' wwO[9:D}py7q% ^!''+9ۓF :w':ct*F.>4V VG |YlmQU˳'(\ cxaC^6>8!Tbm.?q=Q*wi.OKo7o?vsƙS6H d$.);,rbO 7("^WǶX$5c~c#/yҳg5RGT|8΢g?}۶2RPWQ7U8E`GJ'>78\MD yy4h郄l 9nk~=ɩ>aª6 H.˦ >rM_ 2ցDE)puSpsqeUEխLZyٌ XOOl/#O໰Nƣs:gg>NaZ*i|EE>-+0~[}=[CnHlBONhZ_o w0Ms.P>O$%q+L&V6$EG)w'+O,TU։ZcP^6\[ PN%֧ސ'!0:M[3}PSYМM>'51#<{ty4W 1OYQūEgP\P@Y?oc/ןviOAO̾|3ù#P>6oR2D3wo-inzvb򊋉vMN4/Cc &!P/I^%;ȉ?lFt# c162yx:a(OZi;_aGyHIGQz,n+s&Ǐa'MFQc(IMize3:D<‰z|@aEr~׿@GܻkMfCCX:xabk 3 %T&c%p`R"Kp77.N@@%C%>Jf?:w6)cd@Yc"ݬßMhMy$wŞjGJ=611~EAXZ:0c{OJ+J c[B.ތa3bx=*Bnf@ti;󟺱tVӢT2iC+Eiz[#q `+YzPXRQc0ss溙Ebc@ Z϶anx7E"%iX`O)f~+K44ON,3g#'rʟi98 _NX =F7%rJz9< 5?ff(5} v[>+J0pɳrln#Kbj;w)(;W vf+斾1MIx%6p o6L~3gYb%Ğ!?DngW\5UQ}W,"YK[l(*ԏL ƈ<3Lix9JSΙYh/7wL߆^Y<.NK.5Fj޴EMM,3O?۞ZĖgSirE,B_9񥸚k$T3H휨};L .Pj!!oܒGdFɸ-ezCurFtr+o斈pK;K bJ߳3݅bsGzލpS.O~IY__$/q.5]?1#8Į jPX➁ob&1x-8CG45-5A/t#aq|l?z.OkR}oK?r9T'>a˞rxHLݙ?3yv/ϫeJa wZzh!&#42Ԟo_`DIINF8܁ѝkB)>A]||1ٓ='?f\=gObit>4agiDd^֓rt\mmu;*W_FtC3ùa$奰YG0ӛX)/iaAJ8n:Ud92L[Ƴ|A zyd uO^AK?3ß!u&.FBdإt#8>ռ]a7)cC=S7؝#Z_tdԜm-46cx:ޣ<yz-kX8;=@ؒaN.fv7ya@-uXt*6KxOg]=/?Imy1=xFN&w{S)Cx)Ƶ<DH ZY߆m"1 =o{I<\1z[- ygqJ:=ĬWx|x ?%6 S<,'INXx%pBH[ >^2>ӌX"ʆm\ᎩFXD% =OBdºUE`$ Ł3(\ |<x1 b{et~AU]_ܹ4p ̏tSZ` 117 -9_oR/_v`~͂މ6>sܱtZi{K_(@ґ-3g?iv3F+kz̝WW{f OnRHψ:lcl(3|lLFb+FFL3^)=;&!iF71޸dY0z ,/3 wH_S{2NؘB!~=cC 6\)zM}`u~U^Q|M !t{89pL≹U g="'l6{qWmQtv kFd^ a^8&:j9m]@jwk]WWO%p2\*v`_8f&g) uX:m[>@|''3/0I?}衴F# e^$Zfe`*Mm/xDEA.:KN,Ou#c[?>1ndkcGuN0UW.蛓]ZǫWimlcM k;]EyEkA,2 ՝xh8Tgˈ{q=0-9Q-pQN lN%3>-ew5m(LLVmJHP0޾!eb!DXJ}(C+EY18:8Ld]jw&~ѾT*p&MaOE'-]iɂ'hsR!SbޝBC| N:FG[L?vg^=.j7F}fBg/j YdOз8!S-bnCC["^|p@+ r\wK>[vGד{x!~xuqԇ'z)$#10ݛv}V,eV%CJhDi.(*/T"6Fk%-6;-M81:oĎtTxYl9l0]{a ^= ؉w8PEgQ$2̣1!9LW[MuG8T{]3^BWMOy9ǿl}!KnM ⦱Gfxe\Е33 $:#ePƧ7yM;cHUo+ټ_^c{s$۸6j+ޢՄ2ynvXF7}=Q?E1;+wm=؆qJ~ˁ eMI["Mkac+G_-.OI)cyM09Q-%4UB'r{74}|h$-ݙXp,jYڍwN7c. dz=UeP _-y1T3+ _Y:;LdBI4*Í\22 l2"=kdDn_tIs &v!7ңul\ ؁/lŸ^>b򳜉1Z2}c$5>k +X<`ogÍ؋ufS-T|F3-+Χ"rpsbQ.#'(JS0^ij^.12UލLrpt坟msXIߋjS rl:ًTw2W&19^7pΏ9\D_3.!uW`nh@T}ZL$!#ߗE~ *9ۄ6˒D9391vB׌Ymp0g3fW}֞qLەl)^.~c?ĮQXz[iN""b5|Ndd;R2~?i:R|鸔Og` ̬iW!t<ɦ -Rm,Ukʎ9& 趋b)q07"cYioAPKN~+#}FP~}# D.Km-i[#;uY lĒ65wόqr?~NDuV_Ϧ ,M(7$}fy8AO/.G,ݳGr{:}r6l0?J#Ε'Sp71Y~?9ۅmn2ۓx[ҩyedn!uWvf;馘w+lOgIb ^f#Ef(I04oRFi|F̌De'6 ,n>ߋՍ;>JWㇵ<fiߍyPpbm,;"%-̌[Z*8ܽ@"nƺ*q6$d|2hGeDJd/FO٧6-{I 5:m= {F? bU_US7~2ispKÞQB$ױ!*sK[&؟iD/x$q .j@BcB҈)hdk[k aIDr1Ab|f!1=zcp8Gw,yMs]{q =s|؈ <$z\fBc2 /MQX>bl7-OLf| 64~\L$gj"Ab/."&-ʅ\͈`u#6xD勾/立9X2E u|ʏDd7 _X)XVq2w&~[ï)Nxi'Fr''&fv10:>B\1d&`i@:fLbn 9Ԛ~C<7%F dr~< -HJRlmbk=`DXXyM {z m_kxqepgId ӻCLq5@oNbcqAp]cN$!HƱV5 k}IlfjnIj'KoNdd C 2A.FX%wvcԝ |95i 6!Er;v-nyƙɵ\/s8]|/ӖRåv߸.|ϻoiy.3u \*̔8̉cvɖm- ~w%;N4OwJs gd+toit2~ׇ'qDWn}&>_p׿~C{p80o\,Xx_#/_?[[ |;|+'FrBK9DEQhfZ"iF3l8]+3?52ryVnVd ͥ2(9[ \\e|*ZQT$;=xf\M!"P\[bd*ٹ pM +-Y)$lm8#cM W[ z:X| nm:ļFX>;߬3?3:f->;X 9,{M)˫frU#fH%;:Fq&:<~QQ]u yFDvbL\~v +ʠRHvD͵y2 6 EDs'}݋H4*ی0aGr8}xFl̳Vk 沌32ȽޑJ,V"tFAR aOsZ d➓""Lgk VgLNQ0I!S{>,f:2jn׾/j:%ePe}9K^nRѫE-%aV $OBg-(`wk!ɉmJm4K;,fRXTKԞ8\[192gtV3fRg8&6A kԎqFm$b92c̯nc|__ab2bBa}MZ9z 6טdX,Pڦnjvt>aemQБ,?w \';1A/jWF\Qx/-0CkK*7X_cyL[IQ`/G0̈cl+ü mu0kpT1Q%;>xb<'>z)"U =*%T+x,RWlbR""H_8ۢRSP fT d3AlskrXp(nE~~Rʞjg/D&$_{Q'm[?TT//q'=ǧ _T[T*J} W>DU% M=ƍ]ϗdz}{\wxpkU{Wg$"Y/>1J6KSOQyu!8:o\`*#ZKEMZyF&,^+_yzE]fXnrG<|Ϡ6srn Qh+u$IYaD8E.#I nea $1H{-"`oz$IorhB`jeR9|wmZ<6}in3̬+"O<4M'BL$Kml/"l2MF\۳;u4FDtM YMO:^5s9{yog 쁧Y\Zbq wa>f#<)G./sr?xOƎa:uYFiiضE.czhF?~EE$YiȒ ~R$>롛,EDU A"{>.t{A^9>;oh!ASI̛_KaݎA.ϥzm&nb`-$'BlSퟹ`t]@`*u:nN~lh-%YW 1n%nk@*'=VgY""nұ46WO6:( Km(.MDzLh“mh0G;=8899&]g?w}9+M z\Ϡ$NN~$ U X8;f;zre:)sK|[Rr\h+IښnxaOry`}Am pwRڱQDk')]pmKk\0~GO}a)~gYq_:xp}i^M|\g·.TniTzI.zץ1Sy )]S2LCQL]|͊_n@|[躅x:m#1=<A2S9Ϣc|K+ 2{$'̝>Ä́Q1zzr>_|[>]$EI gگtM`oۥViф.jn%9| "zr(lZm_96G6S(C$f\cq'ޓyX!\tIJt{3JOy&:uB9!@_{Va_XV']mBijZk%DJj$q^3aO*=OiX]?J\S^OHzޏs]ia\,{|.#nޠ7̵^_ _06N%sRG=E%̮>61:~Vۏeqs( I̽$^g?"M\XxטRIrw<ڟAfå5]Mrao`ιuhB?v(+j'@@,jG`ŦKً.PQ''Fc-nWWÕ/pn}{iVeU@\BqwwP I5Na׻GڜF/}1g*uD(YI')C,`/"09+OϏA8~,|FR'R=2HFvlx35HE8$0naYlBTdk[Y\,g L5YRA ol N]%TD((Tz] S E^HZըD4f=B.(a}8X')D@$AJjq[ho?S{ۉOqB#]FtR);ݨѴgZ7hIV L`o{_  G~ƝyR<ʊC#C`' rd"W+"dpWwY/]_=b{;H D.rA>g4%   q LciJ||wF(K1sݯqxM0^BI31B3Mx쑺< '"^A3rA..N)M.FߟxNzD^P$@N:(7!+ ˬxY6 ͯp#z+::]Ǟ2U\P -(=y]\.XGw#anԌhx H-F( 6dS8x@C^,jF'Ȍad}To=Qhb5>H|>d۹kzd^0#g|TE!Eh/">w'l@#9Y42D GG)N2= l4,oKky䩦aln4nm|p1NB;R1m&' U"wD"UNIۂ*t$uB>(CS#yE5 @]F! %6sp%v;V^LV6 SH@ %/廿}%*\[2ias ec[|1*qcoH/`w@9RHq)YxŒZFwLSlDGڐݿUg<'ߏK*޿4JG r2Ŕ%Ue DhbTt  Jх&1MSw~uCH (g~Uw9>4>rcuB(68 afCfMZ]Ϙ9Nb'bܝҎ^2BHŶ2Y?8_w\l⩒Ӻ:0lݗ,1<ϳ\| w ONd~ksE]QmL_͸g͌7Dܫpųopuލ[alp!+3 W[+9\ArHdhM4>FETfD"v$2Y^BpՆ0u%h+!*"uW" 4%2HRDB1^vb<>fmTKwDhܨ`{G/!ߔU0f6"I8*HxF0IkIe<nƸi&[KΙΥKVuctϮ^Q]@lvpIax0QJH|A 1~1 RK($Bao%9!YDT oH(Odjw+< L:H,d5__ͭx\OkQn:P[0>>8 qol"MDꈣԃyI>_KPWO1Ol1aŽKBȫNNȎ!2gm<@K@t7h2tݓKpމ.o"CWkzeM4V磰׽p;3h̏?^0U܊M5'b~g*_a<kdj/ {x膵0qwEF]w% r`ymg;ԡ945% m_IzUOGufMX>=Vօa%?'bDVg1ёSR{kj>5ǼNpq-snlqϢz7"ӛ譌ܜ܆nSf~\MEZLwɇe< # H{`L?1rPAf!}pW&1nl gw 姻fT,\9"zMmolLbkwT6M%;%]E&mWD3g6I/œލ"xۈAtw&7EBIU;ehcPY>OfL$Z߸'.t pO/۹HD+ZJXH<)kM:Vw(^(gxv*-T|?³D=xP[hl'RokCziNn8}eM6`Zxჹ̇ŏh,aЋo&Oğ)Y0^% o8=`L>9fvp9%.^1ښ@2{˞+y`ku-"_(jh,9'hKV"| * kdi=`)08۶$[Ҝ S Fr瞒ZJ2`%}g83)7Cj5'VPDn?w<Ykңp#m K1p;TA Z]?ӫRTDg}P jKM&91bz SH GW[h6ܳ&4F=/؞%u6' M;ytTgpyym on߲?L.)m9-=Pk@&m~cVz1S?Ai+dT75Ze)`w]tTeaimE2l7bˏ7jF,oOȧr,_]4M[(n:&3LuDGm0i$2XKlZ(teBJ:  oA4uw EuR +gB@jNb'3|9Ok@fQ8[" 4 &u3pWLsG+.hihn$1@#Z*o"4uP-)¤6(2hI @FFpьe`Nah_yP\J@Env ZA"o >ґn^$a뜛1_T-.]6 K9Y# (sLfk:[],#/{ļ2Z[]l}"b[9J k#Ӈ#Ri7p!qp$u=Y^|װ{7}sz(Хq .-C,8J "YnsQ8C_J&gP:b2FdR3tUS]Gј߻MRlwb.ڄړAt̨~&l;ėpa@iA0.8k$fSQr2ڞF)/О+83b/ hkÇ,,awܷsF憳̉<68[Vhw>]nBi[Rҵ@c ϛX(Eh3HH#OVEr_7#pNE6rpx[=u#d#t 1i|CeI8 ,^lo_9~,Wb#tiϙ}\D7lPvU:P88CSOl&.!Gqmo N/Gޮ+KPGZJeF5R5b 뗸$1̵qGgh26% Z`a/'!!p̿QLȚudVݬ9O s瞕;v9ߛ__nś]ZpK.e{mOkG/"p~ KcŻzR5[%I EbJ[r;ϙ\lYݻ`"m&{p"; WC4+U*oXdviL52NU9PԄ&$ĔwwP?sUQ5K0{a:Ǜx2~UV'qK95ψL'[1Ϫ+pqqk4S2s&q;攴Gwljc {$U^ K3Lr{"i({H6 oh97b؉"8:>R[MUU!Cmmv#Y8ӾS~6x/R+Ofo4KMus?(pzBui]i;TsOu%*I*iyh K[?MtKJ!%k~{5^VMRl{Q!huVl0鏛m18DެOs晜"F¯&_|DmjBQ"' I"U2pDgc$f 8gq%6\7F1T!W1:=^W<%E$ sy펽-o8y\D"nqv~v2ٙ>#cr\~ 씱7Ȥ:{p1K"r|zqXc`O/OT!GGǧ\|e,'XʇtVkim(̋z섿c7mkǻħdGvfl-4V@'E_'ŨӒ0\w*&޿龎"9Pr΍\X;dkdZf'pX`uK0HysLξ"+*-1q??;8 7D*|^DnsQ h ?n261|#jKV)БT܂i >N"Ҟd*pexGƣ#ή Ll:R_Q$37y(OEg`wdzl3]SG_=]cVøJ%d5sY(RjR]Q0~Zޟ#ւeb!ޑ_0}go>P# l%~$G_r{X"pBRAlq+!Gnbh{C{wIN/y%|0];P7:F}Ns?VDz^*~Ѐ-#}̤뽡Dr|Fwy2vޏ=97ihkzY2U(SpFL'cdpx:Wrc<]L\0KCW G{[⋚IquCc)<1 nB+["yHA~yO2OgtiK0SH '<"l,TfCN5zjL_Cjͧ|`$YhNG3Sz[k*Mȇ˺Lcn8b 'Iי枡W׌F)`4i52'`Z1I?̸ϫD7{8: >Riǧ-/>|f~wwut%N(pҍ}\Gzbщ)0 F 2ЙOH~ZhI1/E&IiU wM}Z #zzRo4 hNN|\2ywtDTf۽M)f6l"5&L/W(uϿƻ/5-dyY=LSM\ACl vp[C[Ik!3+XV,YΦFʟDa)eh6f侳2[hy@ <w5_QRJl$"]PevuفR3.Q텕} [[ 6<܊cEAW5z[߈)hfu} ?+NuM4EeB1 ɨ#wkIV8wJ{7yltjVX1R˥T.3VŨ,m~985ũЍ̍h{R;ƕfTL~eZpM*0[BҋYd<Ǵ+ VƫYߥ Zhʍ^4I$>k0?E[' P^$q11Dfj:񕈾hȏDӭ/17SӾhkjٙ]ˊ`w(st?/ҠBNT̅Y;ӿ; 7$aoI.j0җ'2*mRZZB:3Lm29&S렠{] i^$ٷM>_ޮP>ib*ۂ?WHSQ:";)z/7Lrqh 7F@ኛ{SZ^!FrmߓBhFF1wmmb]Ho[ք4ĸ)Qxx{.wDc`t_RveoYR킣DFźS;JGVy O+7F`p FR*$$F0 /2 Hۋ$$B1Zj#>Q,r=FFECg *n^x*HUOn,C>qrj{ׁcj "xZjO9lX~Z.CYB٠7X>ӭ´tLƈO0bj3ΪpЗ_d+/1==;g,b:'l$xQpj\&{LnEn#@AG2'[mx_ꊋ#ʞ t]JC29j+}Y>b<$v8KqӁD- ?Q"/_Olledora W͗s8$YIxF=U<28N{L*EYCۆ~q O yZ1=Y\DYs7,-q2[-Fyn >Ͼc%6R;=qSM.b9fw/|t8){a g^6pO[GtF"㉻-ED{}(6/+} //r 2UsNwӢRIm{=/I4FGR߷~=t^hľhi6:Jp@FhCHZR }Ymj>WD|fҳ8`S5ƑhapW4nb6u}uHoZ9FE1A$p8)?bܥi} V6)u۲=rSj=eEgGpPEN>!{HON .f[^&b.r6 !\gF#wS2[Ɵdc2*ׄ~:N>n!JoĝlfJ9UGxF"1 2t0r |ƸqܲbYΈfc*qulh567T,ʗ6Ath^,FUhuƆٌO Q(%7m&̵x$:JPluYsEҪ tƊF`nre f;2ZE{al073?+j%EkNSRW(ÁSЍSypZ\ph~Cg/(㿷݆AmT ֺײ񁫅{gcNNW RTa4۷QPz~muu&mG`A.gu]Sj zGIv&,W%Qקze ᣖi튃s6>qaQW9ʢ;nPZXӪG)/9{Kz;FaӰm.nj.塭f4򊳼_/ +VX\Rap48nuZr͛j6 }w[MZV]:e9_.Q0"ln츇{IGiЪՎ}FqT"0Ylz;bDF#l?m>,6cGQs[74ZOT b$b=8_Aph#j+as]ʶkOS KC!rΒYܕNsX-[?n;r^?ɄfS"mtxUˋ,(ЙN?:ND1՚ێR,sޫD:5 #zew\Z\`E9βآhZ}w}_cLVv[]e19lw5+l=A_71_Ց:0j5Xik7uTհ3×(׽bYFgdcMhqރ:dwq 5/vF3M3j7UsM羡_׳i0u`q*ڈl4ZqF)v{^äfqLzq+ɜ3)m~ՂD<u:Gca-RP#uok3FEpL+m7_Hy},yX1_k~K:2b/d/pna#J> ,FiobvYOKl#l/M202+L,L!kwހo)$l,O14>,f*K=$6r,hBB]z3wf^+.;j%a~h 7VnĺggVi֨mwּ#]pwEֽ v|HHHHHHH#Q Vܧihu䨊MߤS)4 WQ5ƪ^,&&ˊ%z:ew [MȧF=̺S 01;6ɊrLkݹ.j&ШL1=:H[KCr,6;vgYibUg`ev^T~Ѿndm10o.1>%g~f0ٶ_2Ch+aV!Z|gЋNm s?603;χyKՌK ϲ0C=]u0)_ŲЭU-.Cjү2)'cdRųbbb2:ccf8 sv9t Ԛ/.F[H'IB=ɸD_OK_^Ç16Gg0h0^f2C#1423:yw|ې6 6&>}nd0>яmO$~&GFX0~7Jĸ]̭)q@̎;ֽkM&ik50de'F"S msT-L&aFAwŌBվ&IFMlH z9 ~V0碓Y,6+):Ÿۅuرܙvɝ;z#EVelFZPLĥ9j/brFz>E/9BEtE Xv M.lXGHsI>?9<^=GEE=M&8u v alktASYgLN ?L2?=X) Vzxv \PhȑGm\ nڄ~s/Y'JG>c$$$$$$U>)Dk-BN~Jo:r^˱[Կ3o-\k[Ū@f#ZI sB3Ojn $zؑ6LDT$ɸ݀^νs&:6o^t<|_P(yϦ^G Tb9ˊG}oOŷpBWޫe)~Q$%!L4N7HhX$^^>;7/CQDDŽv(ǵ(o͑ƒ} JvZqeKGb %<ЇELML4G@?1kmlzjd_oS򀸰DY9Bn"\&<(j 3K?__R󞲤R(~JN5FXt ~?z)eNABL(=ތ,baP)M/ @2fVD1<~t&LV"I":,}CyT;NoEx| Xѡ~|_ V{w2V(9!+oP6@07_ `A% 6NOI'ﲸIͭNl-3?HLba>'e^\=WQe:l?y쫩Zh)h|<=I8v4!F210P.>WM`x.={Fշ /;J0g"}OL$>F;G1ogDdl$b̧duXwy-KJe!brn3fD5@f\㺻W7^57 =< ^1}Xa1|3t.ꑐFs:F:!V4T=m^E35n5%;gIL?.14IYa3*A.=6qH3d?fbzg!e;ߐr! ex'G}xP_P(h{%)oEVehjGys 'o3z_}Ii4*Dq|NmU8w*S@_v1ɋJE/SS#\ )eKì+ɏ"V'u:м\''so*~R8Ȝ-V9VU ?٪1V%ѾW 8F" +do.Mh*A.|ϱn`tt[R:NyiKq IeFH=aAJgc3>P(icmMAT~S#"$3W;9\AоܬEXmI6eS8p6L9yo( aź;>ۇQ}h'wWbMEřhn'(~^P7KAke!.2[_6>1T~}&E\G>^'Z%S^:Lh96<=Jo%ÈH,Tsx:ʢ ߟ~ {+%Yg\`w#ȻZIaN0G34LB~<ĻiN&sA#5 sۡ?.(N`j># K'Z8-{/b=O_𺩗5=`ߗTv/p30yxs)l-Te eos]9H{itb²H^z8ߍ_z70>Ho_FTQ{/ϣ% Ue](\;ğD]w9f~XgȺSEE94YM>_My8R[@?V.< $$$$$$$>パenD0s#7vQOh>Q,Ne Uo7e5Obɓ~VvXiM.!o/^6,`?jFlDRRq+r)#hUV]LB2 Em,bT:r떟),mfyQFu2MlVEv zѷ;qX?~GfaʦB"!?c'_Q|lh(R'#7վҲHI" GoQ0`b|c D1wt*PDaN@?+PN'q `-6ؿ/&ޮ}cT}\°Cg$c䦑S$\iMX$LoY\>KT5&:+8CtrDynb!>oc$C/gݳR$?hN|V'ċΗQ+wifW+1LO68lp-uR_d Ο%dj(i*ۨ+,KiN)' JubS%n]bޗ;1˩;[_D9S͏hUTTFՕhI" >w !H dG2wŘXٙ iZD',jF6)`nf+*iⷅ#ZGoI-LlX(&fL젶i_"7}viy|q5v<9wde5喝Ͼk,AL F"Y9 OOb2kȎFoK +xVeDriVIKe/Bm>@ǫ lD13LG{9$1) ܈ &|?r: .I?ۄm{P9c'p#/M-ռ}KyJmm-կR\P¼ւ iQ~T 60בH? #VY߉( &,f#D_AnI'y=jYt7Clɝ"q#d|y^Hue furb|-`i-~iIaFXf~_$Ӯp/׾1Ǒ^#/j-fH-^^Q- 8w(A w:4膏$# vBEA@^CdMf!<.;^?AqI-ftΉ"! G"Ciкj Ф"6 f,f=CmtOYJd'^̺/ʧM_`tO1 bʴP2O6nc(kVãzSIVP#tL#*2#9_9N.3lPLfMnfE]viD MWq+`qӄld=5-¾nߌ% f@BuV?QN'<*mU7)E^Nj!.6ͱ ,XuD{K85hGDSj&槧a+:kNe?J#0^[רn!ߗS%Ծyo>Bݙ F#v5re|Gv&-MlHHHHHHH|~ }ڎё6׃Qf2Ax 1Irv=_叡oc`"EQ;λ).UMOjc:(O3蛜~>~?NcG/.rh&g(`X6[xv#3NO5~佬I|럐-6B2`'MI*~C_5D3<%Q\Bc?IH|Ager9Wo(a]~Q8Fd3Pyww?.?ϻCtEFFyyqdyyYe=`E;M\zh \G%Ɨ9̌DQv, M*paNlD Ż&*^'O^Ν߈ܙ _5scu%kzǩn:\e{ʛqD8d30L~|W%?FPE|o*[`B-oS#;7Z _IW$s?'n=])чۤۨ8QMֲ ˤfBk!?ifqԈ9U_M8EK_":#sC$<QK|T8151ON]leY2!!!!!!!_2fjSҵ8/g;Ibt.2h"1%(8 }7D;9Y9bR2I;M޹|NIe8Oq4+G҉yi.:BbR2G(mŜTS29MKhCJ l%0,+6G.Ų @K-ec5S=ErZ&sxbxM$KQA'R 5nU7ϑAZJ+eX 3HKO#9!~9ն-<+">(O:URï/49 :^{P2*z%+jyRSIK &38"bBsϫ]|vUBƑ,.xn_(Uv. n {j8HFfGsϑ[Ȉ:O}s<>^%U A^S X*`OXG2nϣ{~e3vjH%]9A6L!<(i9±,.Q|3<oT"|]SŚ^Kwc{6IEeɦbs n^e+ltDJr*g.`dú9#"óQ|\T2Eg`r,*G3{wa (3 2333疙C8g+X8=d!>71dѼFz)PړTL־wǵWr$QfVw^}~}?˞x[]Inɒ+ْw^ED$?ƨ[ufbkΝ}+I -\(8#4t7o~ u"}Dq_H~`TNp2}Ž߫o_ig?)u?{هnC_.B]xǵ4Fc N8Vni~?Am+r%'bS# Sw`# F@0`# FS%QP`sj6TInuݓ@v'wq'$č(NxIBmt .qwwAbۙ ݵW_VUtz oRUR@VV6ŕ T_Ѭb;GVAǙ׆(_l]_ۜV+ɾv꯰oZfˇE`45~xUU2;hF˴#8SVvh.5ڑ ʐ'M W}GW_߹WǐZؖx7It*}zQ?MdNdNd&86WW<e^/2褛hTRNEp+ٿdN019 &(WFocчF'DYcL!=(0~I׿_u4ffgl=@ik?aTmx{xr|.IqxzEðN^(8fT3fZZ:> :=#^8ѳQJBirӗjɎg]\/?ڌMaX}v̿n]K'mU)6īU(=M888企GK,G8ԥ9H9JqrD;}>c[jJ^PBlTZ?觔oj-ݻ|;Hּ/8%)w[8|/7ڒrA=H.k Nisee4iQʺbZVRUSCa~94J:h"W_Bk &t"i2{eRߏW%?*,&E!>t_ߏN' IoZ4.BXz5wr";b BeNqA9=2rkQQFMY>e5UTJeq>=2*"JF9H]Xz* iBU{#e^f×SaٹTRQQ.H&jyEN~hTRWCyE5uz :˧VEqOӂr0PPoy9GyS'8}>%:Kkt _iCJiI% "߅ܙxj4_/f%URMSV:s];SǂQJWQ9|9;2..YGYa& SӨXH!z=)gf-q ZZ{G%hP(1ޑnjj.21>)IG2%r-1KSlԒo9fqR' ZlΩwNh`2ʟU ވuJjբ^q:.x!{3WG[c# h߫'WoFJҥGj^թ𪧹U}WT$ 7Sќɜɜ́TzPկDP'OcXBs0Б%Kcڌz9Sc#,ױ\ 2Gwv{c'D`0VXmݙ5k2@O/cFrײZ=pHAr[о(0ECޙN8ؙbsnGX[I~b(֖kT30<"x07[8Yj^Q'ivMMօ+ϸY|-{ORX~۰ m%A ؀զN[g #SS9{|?`+Wda N˫G-=E2jDd` ^a=>N1Ƭsخu•<ݶ ;[ LݟDSDJk܆h$=9g +,,9z1^UjqUTku {YbT/m|%`4 +!o`aGt'{3 19{9g^n Ɔl)d ;YjoGsJ}Mva)qhy_eժU:,[UnXFEhlM`h=pr™mt=NGv<\<Vaj%&~-DVF lM021c{qtfaGv`e;KXnvV:7jrV9Jڗ1wKFSy:<`ldQǩel-ned31j5"lL-aWsV8j]08Qb-DYKoۭi{`E/yS{3XULFE7Kn:11e/(f(jϷeS {IcI-?vH%F'e}'@;[/Y_x`ɷ6dsJkC9[&=Z NFwfsnIsq@%E27,oupϟѱX&jE gsŬb\~-+4= 9SN3 0?L#ZJXꑉw&8-gdNdN>́Q%Ff{8j[1+,C\l-ٵͅ156c{9tdFOZq;Yp1<,et`t=–qQj<=4"-L^9X;Ht_ I TJ嬺Jz.B$k8zHoܭ@>F3O5FbfzAB<xS_ lH/0# )9_OAG\9[1Kذa3I),uQV6gz1Wִ~ +v  Mf)[SPVHfr6Rv4>?ȪKbeAC4bW3Ƈz'?# 0 8%j+o<$3V9w1`W z< m$+Bn.[F<KuO.ݢجtY<#zmNC 8oFns_g1/[QF/t>.ٿ @AeS{yVҎ ~*~$ H+|vBGa5'2Y8 ŵ)ye2?K*?ߖ}K= Mm/5<dבl>=|9KtR dR& jw_7Oc} L_e|QĩB ewql_^ 0YF=?ʹB~s}>wHXu~_ ?5oZt;?+NE/;}G̱M|A<5rdNdN>́Q ߳8wEA,8/a10%O:=XF@vP8cl`DL.cEiEzb˂U^Ɓ7r2*|Vnb#գю21Wa4ͪk[ﻁ%+tm,]gai1,נ@RJP542P/m`上MGKTeceiã.Fcokzp7Q]+aXn9Qjz{v[DdJv-DTkxDža" bn?S㯑ww>0Hʱ `Z'&x,qEIZ]yx _+ |CO=' ,K݈D30'C&[JslPo0~kƘS}lrvwq,z*TJ{(J0'[&? >J;DS 8|H/}iympb,6lg'05 Y~ǩ SĪyd6]~E|N+#C>+o3jʥRgy۰l Ao$go_BvWϱx7˖bw;ܶbilOx4O02"do_Zn]9́]1Z`S}3v^z&@Rǃ;oJiAa&-0 /`0o  O̟uJmΐ\VK)nku 7^p$<&;w71}\ٓwЦprRD[O4LOV#4_xg=B.9Z0m*= 6b p.҇K07ZOcFƦP6R^+Q`4Z|شq7mh GRY[KkB MW~b4 F`v20i;UwQg.p:0r0_;L1B+Vu"1Zc*AkcaF5:U=?./.Dav-*{ɺys@ Lk:sPzyKGWK,7^GM <PhWyjrc8'> Zs)Yuh$w^RϒpBR vJ"EKLGRNb|vO FԠuZmQ~Of|︈l 7 wb)`Cӹ)#oy%!Ǟ2RK3X.s)!5;(LLTփsBݴd`G0(SHQku'͙>E.)`i;\^J'1KK&N~?` 0RT3,O?1:#"?$B7/:Pyg` @i JEܪVů2ɜɜ'902 nFNB# Kb@u&fv)_D{`mkϱ ɆM4r+Z `n;.2/wybƀ+&bc+my\ Sä]t+t/2 QͬTON?l N+MVCXڙwαΞ0F X[ iQLvycmGCuNq?.ڇy$aǽ\vY䵛[)7ph 4oz/DGablNͻˊƄƽ t(6]aO݀%>t7@|Q|mz(MlԂ=30(Y._sW(Zcgm%Qb˲[yZXws$1;Z[Tξ훰w{)HqώGrMh{0IH-Zyӵ:l! '{+Ey'w\G@"023Ti5ZctCZ{1Ňs運9*k9O!`9 ]4e?bŘmb4%r5CbhhO`N6f,ME`Ǝ}9v(|Q-=ikQ>š,*ʐClj݇FON_'VKidH"W;\ lBv[[c#8—W'F "X ߰Dlؘj(xt5bUm + srS"J7Qw>A'>J~-? .dC\kY$sqo*FF|4 F  {Up8rMR''h,mO}OV-Gӱ@А,LZfOP+JJb$AzbكlVnbLهYH'|56fsZb~G"oKY!f[,~͊ؓ\3=/Ha,~#5؅dZ) JOG.Y8N.0:W̟!TY|{k*%0# = =1ޙX$uu:XzéќɜZHFBd/ݮpN^xʨȈp!ރ OK뺸'= ROk=iH/=E~7GlnnS0܂ѯ80R(3E";Acݟ(?W%g?Rs,{"#iI:Pid=)OR;8~TvQwSح**!> ɼ}^Sg ? ;I=mWJAHo~9J}Ipx ݌j(yv_OV8$"WO>@F'Jnv{- vDŽ9BRJr)^`$t~M9C:Y_^o^_$%퍾M泳xKVNH?71,y@)8 |8ǃjwkXxQW/Ir5WZz -f7<PѾV-\}&5ǖbq)DzLUrb8ŔrH Hiozo7]e $Tx uJY~*W}5q+o݇[پ{aqT$$g`@Ǭ<&P(<DsT[u;zV>]e*/axWlyW? xZ(Ww'7,K4^x=XdA毿o >KmWzyL0s6>`eˮ'$¢g'= jIrM-G;g0ƯܕKɊaTnL3q8]:lNLCm3mrj> ́i0Fo(b2(˄Ѣݩw6%XNe].;VzZ-K؃v:q9mV<&Bm2>`p?3$XC2Hnd͏~s}/???IBAFM <ӃhCWR?}|\yKGϿ|0])Ĕ OΣNmM[o;|ӵlsh>Փ=s]vzjG;t7)vj'Zߕye~`5G4hg]R7chZ0LO)|w V]a9Y _RiZt$\VhO2>yF/AAWfؘ_QM=fhh `KQ~'BA~dYV.rGGGt;\?v] ZNwYDeAA_-ϧ\􌌌( +`",jI~nx?~? [A#qnW^/QJxq\HjKQʸ  F   "   `#g?g%J(QD%J/+\.y{(QD%J(QD|_N0Tjttg)QD%J(QDh4$ILAAAPYg|   D0AAI#AAAAAAD0AAAHAAA#AAAAAAD0eAd~5>?oLtnc+ApX8dy=@cw+#vϓyx}~qؾ}>2 9t`Ӄ,ϓguYpy?^|n7N^?}χnfa)OoWrc6LRmHI8]nm%Sn$?#bWLp$doؕ.gqux۽8y$qt1Nt9䄆qfN9yBq9~vB}㋏7;6'a9ۉ\>;}ʺй&O/^QɊKzJ ӌ~kw8x)}ao6_ 0=p=4;6&4h8sDqY.AD0|߳{:)(]ˁ[nd8yy5.\ɦnl?c8}/$ɓ6: s9;yGs[ ɢұ \䀃Wx-<90ݗqx\qe:{r y:04?pl.^EyS7SXSQJQq%kpa_gZ v'9w F?8/ߟpk=Ϥ[V3vl2i<×Rڶ.ؾ1sc 7s-&8s"CN;β}G-Sܿ|'/-vog쯫n.j_aĠzvާeM:ɳI?pdvJVQRVML?Gpevj. Q7oWW9[zlmt0h^p / yXSqu>+_ŬCBOgH Rue#uM4uqH4XOĂN\EgG'ߟM\2`v&M #MɅY^O9W_R&??Snvu"GS{krTuK6@kK3Βd{iL8$BFZr'ܘcfwdåƛl-#r6TNL9VsK=s${*$..mw!""a >U7 Wd'>ba]Lf+;LJ~̘ZnLҧ,SX o|+f7iuKr;m-nh7`28Fv>;zԨ:{\m^Ivyj_]~>`bu"[lA6d$m A>B&{ K';p\z͏#1})MA7#HS}{}9޲-u#~ԪaYNJ"8ݫ)ZB/ 'd'-'7gQz2/(]YAwYvzl EHL {JV\ε=G{}t"-%x3ݽZp>%{|.4Pe1o:+@ó+ZGxt49x󊓻+g1g\oaqrIQ,JbFL^ӻ\Nz~1?cA1T1Ur <%u;|Q+8zYt$ @MˮG&muLM!5#K݃>tݬ-$)%J18ڝ( yܩ^?s =-{7KhGن]Ԭ-")1 )geR<?0#EOꔱs gsiIdmŃU4]8MCSc7XvV F?<&s_cہA7o>4q#*N/sbfE%Q?ѵY|9wek7QfZpsȫ\Ouq?_s;YYOݱ-c<:ì ga1P0&"`j|U8k78-> l. #ٴi=:T'իb6m(cΟ"cy4C-Dl޺f[F,K-a;[g_%|5|9m} g_U5%o+>A_o;J[>^߹H~|78:> dۊR)k#0/!oiZ.fvvDW 16?t>:ԊbNÁDҫ/T?*J  4c5j74nL$l'g_i;M#,/v5m%b'חIK{_ϲHnM=, -ܻ~ Yw >} iy i)bzn52aEM F׈z SF5ly[s:R28ws3c긴ʄlH]ƢX3Y(MOγTa"HaͭmÈ C~CzN_ؾ-D.ɢkȓ;ܸuڜXf^αOnomy ]Տ ܻbW_Yb}˺ hٟG 457"CF`2Woe%O'.67oqn_% Ja+H)MS{ץ^ʿٛɾ5)$o;?3 PpqYN?FK(z3-g:ZN;{)7U<979c5/>|{m |{j&t;];际 >'?rCi}XRdv<}6״ܺMkk O^򌐞[Z+3X >u("*ѸދH_%\on<'C7q.ӥT\u}krYRAD!s}=7X8144]X=- c߹$VD5҅oǨ&g% ^ZvRIdX쩓J]jQݜcdDΥp#ީ, &:E.d1$giiAE"7_d NpEb4<^ =Eh|n.щdAD0#2~μe[0 /^3:OM' ?02:Ω0~u#,sЁwszrnϦD>hhfȥm3 5=7'^NcL4L IG|ynEP0ڒ9 u8ۙYPH³|IQrsc%飼f霿_ő +jo͎ Y^Gь|b*mg|{r/}fGg,om PBhiW'U:qhU'INߤ<7 ̤łEWˀCQl$a> ^E}CqB-׈Kq;YK+bh \ X󽊠YOxw2wz3(͌&|VC;Q1Q4<^KF6 ~5z<;U8"IZL+X;)kwVPyfQLxtQ{ ={k͝6;?l ;}wwL#GwƩ)N$>-{rz;#zϝڔ^Tq~}ߝfEN(Jgwhu/,H!/;Ԥ8kңrmF|GQ^zǫI*`ޣ\ijc@kSdsvWJڞyOBˍ6ժJ'2$cW +byEdl\2>VBLβYTBCNg`Ų :F<@˛TsGdjHNT&rOdEԵ;_ǖ,"ch{L~>8wU0?^ΰ>#GJ$}ٶv(< zɉ^ƍT_ȥqԝXq'!|_<ϙ'iy1ݢM߸ʁDEqn ۈ+P ~J0J[/7:NC2cS;l}2v%iU:ǍGXIZ2eUrFt0Zh8sښB’~)~lo5 CdropUH=; 3/W2>$RuA{Ȭ}$Ė0Am'jY.-]"4Fi&2)>c%)T`x O6p!Ns1I_]ŋh8uĤnd`f1n~Uy,_uk{Y(Itw]%^|5}=d-_਎ 5I|ph'I|D-ɠcԃ G#Tǯs ܒSά9^Oo~$s`tx>_g*Jm|K(,fuu5E9 ,c vm dWS5{mFGqH^:o~|sM_w+Ր²߰86d Neլ*&x? <`Brj3Wzu5Źo̝;ʣJڗ/xrwH+E9r!_|*l8~2?>=Ŝ>fvE+2rNjzrb!!(| _ FK2OK]^ wm{FMIK[!7zVo ȉLdٍ;V%U]%H2 RǁgHLLz dR<~}ɨǢʕ QSKV^w_$#55kpa/ᱱζbRAj ;u rַ|}/ٳk+'D,XW_"m6yps*qChU:isoeS<]}~0ZEԒ^ i{)@?ű$m廭yu8n=ɬ1s2+v+#9CF>;|q6%r9Rh/ (]BL*`X#xvܞ OM??d [f!'xreቌxype7IOaS=spԺiB^;NXאUs `{@’L:F%AD0`(KR[.0!QE~%o*Bb9q9bFF*F޼Akv8y|Q=6.Π{ %]Fw"t0Ϣ#:҈0A" 7u}I{p&v'RqQ }.ri8/V%P~ۇJ3 wa 3GNm#.3ͤ Р.rZ"ccToe—Gr,^~zϑkO0&uva&ͣ,C@lקr-O0uz4Eķ|jH_{ i%*id԰#+X4\ځEe %q-ZB]׍,'h&F{.9Wja"A#?fj">2-g[PLh_,MM+Rw?3ȨˍBVjsˆqFFSo4 >E;JskPyʼy֧diYb"}C;aKKalw';y1t+цCn7c 0DmYoQho}Xwh~]l8z [Y45ԗ$%M1,H ɩ$Nm.czcctƲ\_>x~8w{#IGѷKyflʢ\~3C#qVuYF1`$ F1\i0)`HmR2@꾟ևN+g?xdby2^CMN0iUf]t7oɝD/l HԥDQ]`Ti:ƦsoTᲕo:؄lز/IHZat0Jb1[H^BݵnԚ1.U$m==df@V(=4޺ͳ1;3At nMpg s̨h*x^$2*{;Z7+A%d4_B|n^޿J\A7rw2}Xo8> 85AF$# Ǹ&iI|j֗'WpS Ldax / Z1$GC tk~-̙=}ۜHDYn?N?-=-ss>"")eKQ V{XhKXkp&dqWp/Y@XD:W 4̒E X`>#Ҹуme̟5P29 [8شj-8}l.cַaD._Nrb$5$Py3]aEl .#,,UZUH^fbhX1#^b=#;^8>c0X]}Hirb#(.+eel2 3YOQ1$XQ"2SVP~Kf'*ُ+BİYlm@pr,*Js)b|ڛQ^1 `a7aV((-ȥ$qAVF-4Jy1i?TJKȦ񙊱'$G$ޒ-HK%65Ғ"23Y]{I)" ds~W11IiUTȚMTlb9UܛtrnsFGnN q;>US=4V3[.[>|DrMKbé;XtϡY5Rd̯7/}"ȧkgEl>LG'IYBBZy9dgU^KjDDOvV +yyyXK([KLj![p} H̭y؆U3Y $wB}XRZh=2/nl'x "}_Bg`q,v$AFUZ=z >Y<_هŤGg#Ӣclp5V Qda УeĄފêWopb6be~\%c xi  1<>}z]3>P&b0m'-Ji>703gierbI8-Ɔdud124ȨV9xl,> y%'qV UTS:}&U 7!_;l6&lXl.e~!8@Z6s|Ҁm^foRM1!Z VnfXc$ͧH6 Ftz_UV/Mnתy׃N)N5 :c/3^CA΀Wa2@ 8neRgEO+^Ee mXGE{1M%' 6 :SUrJ62=NM@ ssz;h4 TiG5 0kh8g%;nIN`No)2qTV>t$'fBC 0N2:2ZbfŒn dRαdWεj|n ;p ~>,{LEcZ\lQմ2NIz \Vwc-Ԙ&0M04دWn/шk `3|,G9*Й c˻1m!R2y,uqMQO5hgVg;f׽A~غ7Lގ.zSfNtx=TKxifO~"/ # /yr+.V?[hhz-t?/E;+1i|­#?lJb>,6#>>}@bOmXk8\Obf)=z;AʈA~GpOpیho=Aő%z?E L8x^QFt6AD0AAAHAAA#AAAAAAD0AAAHAAA#AAAAAAD0AAAHAAA#AAAAAAD0AAAߌ   "nr   ;F@cHa @ND1WL9 (9' w3ŗ\<]eP鮮1 L`/&0 L`+30&0 L`&0+'0 ˬ3&P섌mʴxk0)ikͻLpEzG5Hв'DGL q{)=s˜ ~q{yP5OJًb#3ˆpnΘp12S# '&ޚOh:s5qU3zU*U 3@VfL7B;=vK{DZWc|l9vtL)V.7Ȉ?'mj;0#4)G|2 _م%"7}IoL@aU蛚gd`X V359$*E_'4uFfC_r3 7gBgrJЧ(4ͣ5e!0%}?ӕs fzGryΜ<+?(-غys)!cwizS 8wR!4.ԯ?̐|5 9y:|K^!e&&Q> z2-^iOQk4v^I *џA5>zRfL0u<%q.#j*R<8Si#QD?Ԅ vwHo}+5X;R֩FYC~gKE35..7i(z|"xҏY ޫ&U>ߍdl| SA2oEO"Fdg>Z~k̈:pÉ5xd O +"8fwM>6N:'O13<{rJƾ՘ ^I'2]aB)'z!X51^=,vGєVB=Z4҆^خR'z"k yr&X^1".' xks~Dp EOK{cb8p۽ :LuM '4޾9‰]Q\IdR;N΃+̥%Ȋ엗Q`E{=!z}:ڑc;ډsۄsPUct*# }?>kH=-]=T=Ӽ7\ԭr)N52O}@ggٻg!D7_g֐0GRI\l$͛q~/<M8E$M$Nkk"D#2v7jkHwcU96@v%&́~/FW29K+k,7p#ω@2ä_?Ǝ@HM!`/W{bvqH2>6Nڹ8}?!@*#>Fp2Fq}zTZ/tص&iwXm7M.'Noʩ$+z7}jDjq MR3 Kw(QQQAUU^ $P2#4Dzl-6m3 ~l%!ОV?ÝDpV6QR툾vvILDZ#`;yUzGWaFY?Kw%t U/x^\L9ϱӧv`w,Um7njVMWg5|7w gD}9ڲ#0cm9rʹG.:?㬊'N#v)t!V.yWYBBh kWw+lr qxo |Y8@}zg@;"6VD#0t Qiۉs'r(/xNq66XB2&!ȝի7r5̋Q#(n/WdkkeFur 6NZbLCcfN<)9&z'o޾e7_nf; ݢ*"k=ɭcjfh;Ht'T.U\zt`fAC[+?‹td{JrIـC-^҃n>GHLlJSn?M"IL' $]}r2'.6e~nw~./܆[́5_5WHoy~兎!SKO\6#+-1rȹw;]DaN;JEG?9W`v&_+ȞkscUdfɋn-w)cLJoma63xHP"v6_&ȼj.= GR10*g OL0<<;tZE^~FE)yUitUi\h{6ZzH يێ#Wcܶ> 8;=O<1lo^\zR]z&QR]^89Q.h㧟ge]ZQ[\ݞDZ~Ons5W~`$ ^Ƴ~rqV#h:uz:b M/q6afQF ʷrM>L1Yt֘ʵ\\?g JKKG@!Zfq-Y#h;42%[/)t{glt^)فw;k3*l8Q<{]'{bˋ8ٹsqv ڎ˜ u=d'O46nMVMRC_WR6} -=T=1w̮sw!)̿I ;?moB Lxv8kBO=i|]]6gGvmx[LPufGH!Nډq3}Ş <\< ;d%p?/㉍7ڷ#̽Һ'~,x`F6nTsKط 7of˖;mu cp߂oH,]Y e/Bv3J]sƮqFjaW?bcBhρ"Fz8䊕 Gnr7T\ڇnTExҴJndQF]<}+ aGp47/qߵۼ8|BpJd[J?W>U^U\+v5?Ύv1[ +R*zUBQD-8qޔ?8jƹ*K-@Gy7*%@%^Ơ_^86ɾz#tvK^}<*bEDg}ЍSr N&&HB/c-Y wz.uhl6\}؁/W إ!ҋqwu'{Kx{1tO>۹뇃w 칓O$rPId[MFrFUڋ Ï'/:h(ᱻ p'(cZ2Q2W͗tpY?^4ox|7{?p]2 :Ʌc?QE]AO*17.!Zgh}YDjܿQx(+T? λ_~vܹ~#Gq-1M W}^xIf*_,𨨞q:kx1bx?yі!&y$''<ņ&|os'/:GXX9MP׷0:h.˼S+4wR6Ǐd5:߱hzo(k;f!⡓T4 rJn|*_3>CMI9}* 4We8/NfV c|Kqa5tw6IN~UU%WfH ftS(Ek/YF"#3]Vl,/$3# z5L/,FPCaaYUpסȧ^/ۘih/#+32(y=554#{$uj:ʕ2RUVD͛.3:+)O)+|%gМ˖>dH z(~,~IC=Le"J(-)Pd48@CM TTdDCVP=[MVN]#L/9y\E䖖58qmy8Y|7B~nQ5ZE::.Ȁ)rkVhTgUt+^fY1-= SR.Ek*J 3SCT(IAi=#槵4|NvCEQ2쾟Ӱb[mI:RKqU- C=TP:ܬ¯Wu,]|+GN{^ȮL4# {y` X U"if1 lq sZz^PPȄV@'%Ե}UHE(yrb_\VQ/O 9j-C͆~Ԕ)?X_ e MYhA7;HK 3#I=K(;=hxmJex!0^~h.i]~Jž/a 8^x^>a\H']2_BVA/M8# eDLμMR {c17[_Cűmh_c[<3Ү=Zys`p%FIlMW../PXZLPSGڳed6Z=J}YD䦧[DS+xGˀ.>MOL %U OMfaO>YY<ϰY0\/-:30HYU7x`nKJZ)k*(+$5=ポJEA""2_€jҟ˖^ ,`7%BE7h..%vaafɔ\zv0kyPʔVw%Y z7+9ll(%uedY%_34mUWW 8|RSSy\L2(+<‹[C`V98WN`kƺk~?}JZj).8/giԿ~Kyq.OQ "jKy)t Mת++l),cL=FKqVM]Ë-F_əijɻ9'i.+Ou]JwGOx3HHEn>YV.g-NjlL3a^=I ҿmKk >J51:5o;׮q\]|.,VJEJ "x גo(#yIwy-59ױ^a羧܂2c%@dBnx\68nk+KK٠Kks 曹SPAJ;I$؇O^OW ߽;pOT`'//|%O}uYq+Ct]7IڕH7zpX8s7l6捿Vl7nŴ.%7/_9r1ʘ579JR6Kϗ;JɆO֣ݝ݇.'ˌ8e=Dyr$kݹWbnAne`/,/FsKٍg\HaS axz}uӟ[lQQ40"<<}Uy q-<ñl-)Y5Ue?v]pwwz%yc%\nnɉS7oc [pws&: P*誸KՆykƅ=['׏aVEwGERxPXY&^+Sy#1ߺI~j;e&0rN `Cot*>dx܉-yG9}u=Xζ Ji\|Y.,V$ܸq#ȵcWV`$5F٬]6" 8s9sI:9M^䨟#. גu|VY^DJQIw$LV'K7ibnT4 Mq1bN9bz98}Sws \p@ٺpN:j73k}qZ pnڵkf[ AGj"ݞ(H9/s܀gCYXIJFcnav O7|v_F8^VTzQx&'gwVUq ̞MۢzϐK>deH (::Ij(y+}ySIľSD$q{# Kǚ[DbNnq9^5,|7}r/f;U{:`Eic;-uOٴΚ?WYD73u7WΛ9E7x<b(KIo b# ꌵc**2_͍)_;`w>"{$k+)>ƣGD&ܾ}[R^ʓeÍ_nrqC`dz-(wÇ t1/a|b-D\D3!;%Qileg[.Z''6\9Δ^y(FVi\j:#X!0jg~nh3{p ޚSO(̙ x?7I&Xov!~QO_]_9,BR6.#`pc2OSp!w H+uK[m}):swy4ICm1:4oqndiz{gwl0v9'\Db@70T^Jr0}m#U䓜/=gwҋK9촊i `&gcisR"\qE=MsXzP\򄭎ẏy^b62K JܡLX# kg]nf񰡟gⰶ'LvqnkY)t\֊f{$-3:^fqj^V\)ibJE1v~K*ג/ tJ*+'r8Or-tH+LZSj%p l ^*[d]ޱrVFT*WyIA윆dl|ŒŗX761y ufC/︆٨T܌#j:5i7NUc>kQBJ;ቴQtko$UAV<-uF/pʛTM4r&qݿc"B<ݻ7܂{u\`'YBR~9YaqJ;I_S^YZq,Q!a\m K9bLacU7XcEve?$4 r&J4޹WQ^Us6):V̓?I.OŐxKY6xDRmlֽ$^γ07yf2WыgCb]jܯ̒Q&̕k!۹_. FN<!EP!j^r%ƇӏKQFyz:Qk?9I47W<;ie1!VJ‹wƟZίBP{A;SߡbvZf1'i);v_dX\\-6JdxۃɭD;9G &((X]wh] KH-i'O=C!8nqhT}xL?8D:e#z{^qa_۶M`a_ӌ} OeD'o'= /!̔k?4d:)y+:+ QI?\˪fdQ\H+A5>̃kUUa tPwG*/&xǾ v(i[N'$r> o>AS3p Ƞ8۬գ/ʼniHV˾}} '^e qF5R"};\!(90w-"eת'lzE{0!Z𚖎&&i5ittt(i]ݽ)&Fy5C*o%L tKKC㏒/N_駟$Vɓr[_/KCljNE=#>Iz;;Vd>16,,02.zFrSUKIfaa"HƉ)OVs䞔[CPȐJ¥oKW|`$`x]@A`i(J5ƈj¸g =U9k;.gTN茁%OXOB[ܸY==o&)8*EڏjFU1*wd}"#E|jqub|aTjic)&IGf|BHh(Vc|Rx4ˆ^.H,N8ɋY5Jy_֟%c)^t&(|Udg!QA8:p& DVqo [C<$WAF&67ebc\NcdJfف-#5DmF 9Y녣I<(n`rjKDCLL ;w"ݶ&wTIc|$ ٳG6?򥜔77ߒ!0Z4f!:8C~JNے5̐+swrcZ>Wa ,fwX<m2K=G,17 ﳔ4;;Çfo2F+_Y~Ȋ^zˆo z4Zoz2GG[Vm5)7SH+a`bJuF{o\˯{}yӦ2Id^#q"UOi㼂x+ǟD@k9wRn4L2rl}wܫB p=^u,8+>.> .T5Rt%cGQݻNԶwǪUV;;漑 v(iH6¯1fhﭓ dTf +ΫT*C`~ȔFnjq[;Qji~FUR!' |>݋+y}-vČv~tdR?M[c"era%[yeR~zz>߰箾^VES)1舔}zu.;nDK̕/,V:.II318,KPrh_:#8Z4)|,8[18lp^$ 2[ U ;2%Nz 7ݡь$?8{7}ncvz;zxU;;YƞǕt7s.:-> Ly"'{&M}=Y GKȳgdpOI⇈UƓWEp[TP 'CV䔊ۇ=vW\>аp8q'AU7WʃRZ/Ĺ7LcRGXXA$22ReYgٯnw \|XO">7ixφ哸;nfkR*oӔ5J/lt u}:G&LjY }dBh wp(Kqy֘'ˎ#R]^b"O+20|&0Ig8L&xdR"e+ކkiȺHDv,V&\wff.>"#lz.ffSlS6o)q` ,_>E=$olfY1"6n39d]vg.fO˛i`ob%s3ܣ(]O#IWy{g''c@>?3Č)hƫθNC[2#e7F2o/ H:`x&Yf1mXXm⭉&;F=a_GG[=Kg"wlc|;.N_"'u?fVlڴ-7y~(j%N\=)TUZ$wjْO:zC`$'Q`lV{.~dWP4'DF'mH8i bG ؄{E $x:pwʙ[ quG"gu "/Pػxf[WVQv\ȭ#>=IH `1WSRvJJgh"mWʷE+M~l˞ߕI8EC\%׆M 5U M<1d4>.fQ}w!bZ;EwK2x)U5TUSN+~X޴13H_܃/10f튣!6Wq GK_?DhwIU*+ ԲBeM%ytZO-hԓ t";3g䑕_F[O'%TԼfEEEr #MU?}JzNݥ+/\ҟW-'C2ܭf5U9X DxҁIjtfyan≉&;0!U˽&䤩)YR+q?[WR7ռK'*))O"Ykɗ +J7 נyJC''>QE(^l8wj :VViEN,\~0T( 6tL.]IZigb1_V,}|V@7Y0JDq%OItA|WZٲV|1F&'&19&ޚdT*Z3jbс~޴2`2 -rmC#Ȋ+Jg\|Y|%0F2$Q,.'.\~7&ܤ3 d1Hxb2&;0RI=+r-zGڕ?_&_IeV_} nƺiD"_,y)ӳ#!Zf>MR34&ޚdL ?f4vE&^CAV@I/$5 L: 4xk0!⏘x#?&^U$7ݙIsD$ HfDŽs,9n"Ys\sNtW_->O\)JY'7@3N»@_2b#_ș1W 11#»h[@!F!FBq̈.V !F!F!FBq̈.V !F!F!FBq̈.V !F!F!FB_<".m+###^NbuNnDx@@@\.g}}sss[6n@P|]@"m8 b$b$bdܶׯ9880J҇qq/r hj z? @/< Dۊq@ H H H* Bqʊq/vrӪl.LR)Z^P៉F j@$!z-:A^wh b$#NVg䄏.{HGNq'D3H#HtfggO8p-:_&tה䆯?9:Y^ǡp2HGzj&ēθqYoǓV|+),xZ'dI2;^$Iu zbj~N= D-5 f``d}%\V( Ѷj/h`lbHl,/0:28ccT!FNˁjjihhZzfwxښ51a<)$;KS4<-lyпi*rN@RLI<_#;Tbܡ,מV 诽)"voשv'q&\ZG֠?\ZEjWxrg׏Ɨ|:'V`;QC||wk=ϓxroԛ<*)``YՉ@Q{;9::"7n2`!ՃH !gHIr5GWykgX90p0߃Ù-kJzɼl<1Z Vh#.%Ɠwx_E-ؘ!/2 TuN诮[4ϳ_i= I@ޥxccu|Wpbhuױ's+n6 bp;|АiYdo%odvf`+\kh++o YxȹӶ DZW /13=E@f5;2f۳8e` 9o.N..!hN$-((⋇؟;EI]/Ƈnlf:=wN{rI=hkL7bNk='h s`*)a.x^Byq9PC >$`~ֆW̴ibjCο=6ps!dk gyZ k8Yb7HqWo*i1* :Mt GF@?[ C|kE]#5ThWu7m3K:$d`{8c*K}gZR0\SM\=֌##;Z!FJ$ M>!Fnxg;Q-`G^~7~1H:*pʃ{uCt>҆WI 悝'mrޥ3' ^{9oz{xnԮx51aeeu`oq\ӢD3J~ `B %>g? /:K3#3}w~:+[K9AVF,6vdV 7!x5hoo+Fz$c~^B'[NE9ׯbsg{8D uMD1Y ̧6YC TU0he '+=,N@iЇH+KZPmaIUm .\y~,F8x#W': L,_w2 ~[ZbdCptv"b Fܙp ,, ;3XW%% eEr D?Sb0=!:*vn*O6'82h}5GHbX Vf8^f|'7@\E-p, #t`ˎVURlxҵIOɝ,7`gVGuZ > DA@{=555ԷZn#Ç <#4BΛ+ 7xѴ뜐k.\فHm]-Hk]:p.8.?A.ܬo$חM7c|pHJ'K~r0Av}y5~qHtuq)鷹䏙S&7c;5MW[f֤l!Ûě",NQXAWW'8'߂#Lbd,6^゙#7P5h4*dkx[]$c=f28Cs<\`!AyU`bI8=Ajs< i`oKSS117Ii OzXk37Wrp u Vlu೶< ՙ'Gw:s1̼l3fۯbZEǃ ·R]k@*[ T*w0 +cqFx\zTΊ}LMN7jQ@p9ϬFTK'Fa'g$I:sd]ӟ$ɕKDa"CS̎@Mz7O[)pNo1$!F%h5PR/je Ш=P`-5Wr+E˧ !NFA1tg|Y?Уj8|J^ YiIz J/'H0%Ǩߝ:ǹUu^4 jZěf-~bUxٛ`| ޞ'g?U'56EC O-!ngr> R\ O(Kޤ_\Bq+15ur" $`osŕoo>ǕמDws̿si4ksyې4`]{泥PEvu/S˾o^$ۖ~9\-n\ۘqk3q)@SWr-#mM.ca-BqM Ĩ49|JKM\yvmDz[iX[1\@˭t/ Qr~h{" yspabmP:AgaWQ)q%ބE7Z9Krr&_As7\Sg8*D}LMqTprtbO̽1\/qǓv2WdbvޝkEPR9n'bS-l?:|#%)d^^Sbjrc|(cq-I#5: 7d1RSNjx+#v~g],΁ŽiœNSسAz9(|' H[昺qtRpcJ% 3ʺJ\;;DlTܺK~ZVf<xSϯaDʵR dfG`Y >bAI+B$Q7ZbU"Ĩ".x%t.'_o"w5.??|}| F-M$[ԓM&4}= "1.`JEDRrR(0Wr _rtv#(H!C_-`tP_<<MRc%j0jLIzIRiFg^??Zu=?i>$٪ %\}M/_Z#QxxJkq 94\$aO"9ÈZVؘ\U# FbF%)|}wlij%\ OqBxH i'FoKA2щbbSPCJ\7+_%!k̉W'ħ Ts݋)đmՑ~-RoHvbs7SOh4*h2狛;!Q _s0RAqYY xݤ|jMntzJS~G[_i4sfB2J~׹BOs%ε\I>DXv-ŠMhFHX;6682 )Q;ISR*OJJJ8y$WiX*! $x0 =I6FbFbF솭V!W1#DKۊ8 &`$&`$ȂLnb=bFbFbFFbɀ$&m+〘D2 I8 &`$&`$&`$`$& HbҶ2    I@$bFbFbFFbɀ$&m+〘QmmLnb=zֶbT"`HZ[ؚ̕UTT(33SmE"gRHQeEuUW`PpQ^^9<%%%HQfVRHQn^)uUWTzF&V!\唘kRR3V$Hg!Pm=mE2 夦gH]Ou]}L@ʬ|V#sDHtuf%n)m+q@֒O"u]}L@ʼ3CFeU<MmE{ψ5_jVRҶ"D:@[$#u}6##Dme DFF" $xI8 0 drI@Ttʐ"#HHH$Q5Xw^&:;,C!i[DF"#Hj0j)(`lefbRFr~צ_)bbL5ws~% me DFF" ,ՐovQтV7#Ϙɬ[qk*TфxW6c(oZPB9V|g/oT Y-7LghnMʰ5h[vmDF"#Q5*'7D h8>B2bxXqȟi>BY{D\"Cr՗Soh:M'|cJs=kڛ)C=Q "8YVњ?^IdԴ]$m+`$0 ujEuʧOn"#S!ko/Lʅ W|Z,g:0.82| tZeq?LU1!4JnX˘p0A㖱׿+pG8}xg tƳf9Xke.0 DF:K & /Ա"]:NnwyW"ڥ/ vu\8oaw@6V=2GIC'ʔt( !=# ^OulS@hηǯB^/1yE2޻J].0 DFzk-ELF&32+:EmoWLe&嫞 ML?d0LcT >;Zm&?= 1pfDBmg8ٵ`Oa>Nha,@ؕI;ybofbA14%?: 5Pi+bΨ|@ucW+ =m,`tWoO\`Q{[GDF枿{],`dl&PRFp1ʁp&ڶ 3s!Jc|U=Nnj0Wo~Qdg\s.wmۿ]:KqEU˲y􈂑#<k0 0FdP2E:ͯAVA!fB]+0"LY,ݟ9_h`KJ!:8J,Uu\ {8n2l'ZDf9S´P&ן>fU͘*+{EdoKUX`^KZӍ{h2j).Fk}s+[Tُ>51T4ߟ^8 nOo 1+J=ܫyJH=ɡy|^OzJSooPhyTv5^5x?5z.#Ną|_}AKAQjJehL8.n<_Ql+cS/LZE0~X2~4cs< ,QTK}Q3ى)}{6눋 ͏JjQRpt@.] #$ 0W7̅ n{g< JE_n JԶU[4e$kq{9VGDF|,~9k\nGo+)-%S_[̄x.x^R6YTe`R`Q\N[VM9_mۯiQ;H4ah~Yo^Kff&nՙt&uyW?02@Qeʣdp56VKJQ^@Zr:KVT$$g.W7-&#~!G_n"J#4BHpH KDr+#}@bm\YOltד#P;VES8RJՍ^]2)Ws-BSI/4Hh@V?U`n_@.haDkIOhR7כ[A%#MM+2f cLdy ɣ(/Cfp6_}*0Am}#g`kqCXB.bIL1qwUĵC_A,/>s3aƍ͉c(6X `[Y⭧N^k% I|3löT^sX&ϘäGtҏS0Y'ۋmRVY:~*s'Lf 2ujuE=E&+f4\AѦ(Ҷ|8/NKSuTjP^ޢ>2`$0:6ufTqSՄZUhi{*cl@gk[Y}*BKLݟkk0">*oxg;um[cxpiPlFV&,:KkrӾfJ5`djFoAgkѦ|(΋a̭ě|[*o؛o0)LUw:dk0R7ΧW@Em##%E|r9KȎbY 5'r`f]·&ѿ0~th8#F0jz,]{S>fY{$c4݉I2dD\>I FOIs:e 狭g7l7Qq|;(.s7Q474V\N HVVSwJ5\Zi΁s)dۀ>ShUJ&Y-NWVx|祲y*^g@O FO)ՐDPׄϮ:|Q*L5$陗Y[Ii!{eRj&;MXʆNoǎ=rrm{## 3cg/_#To-ņ2bwm%8u|ϓmp:6cs ymF3x_=a OܵL sl0~z/l__ +/N#/O/hP,g5;IJϓc./B"hD?FH@T %ed(֢Em-]rO|w1k.>1zbTLv~1Q  0m 2R<Cz jj_&}zD" Д[^ o9g|3)8w=gl6%M\=c]oEu jk;gMMF୭`t,fF3X:?ۑ hnd|CeAZb(qm7],]ܡx}'<3j F#`2ZZ:#G27po!'҃>3oH|0#!&2B/!8-\3zuĜcMϟ%Q*O#{K ǻKb2c[myR_~׆-r vE7&~m6 ?=g>2 }όq}~JQ(l6 Ngat҃KFNe6S|4z 2|e@D2xޜj/UXZ N?̣TFIp9w8|b>۰##';t?w[L\JV[=Wܽ8y"iz J7>;FT6sR/{ ݯFՀʑ}O-`d޼}0\݄qOtp#O=ٙQS0y,F}͙kfl55=”#40s>۾a*c]Ev>:NgpyT:vR+x Ŏ@+&3dq W/[u;o%`a}λs90|]Ωx~|CׅR:NvK >`,LփAc%?5 T7#e6[0WX1k"zO?q+Η"8} f]clZWWIg?JWʄN]TLIV S}gTwe򩬫衁QBVݺM#6\K>Ȱ:{%~ρ-w=v*w&$˙p4{~:?ߗ '=˾]ؼ˛ªG牑zWXW^QUڪ*y\wKu=&`N^O+`tϟ2yH+2]L|c}3>OJ/%5W97}tD+"9= ǝUmTmy ^Žk4MKflg୘* &ŀɼ??*kzhdӇb[ L\V.NLXk%j!(S`_;Dp|7|p4hoZ2*n• ^>'Yػ;?PQvM `N;λi ?`8yzz6 nx|^уYc0zP}}kp|ݷZ fz֌?ck7?|ǗKɎ,RYgV x;V}- cd}0 F6^^Y[tgcE<&ϝǛuNſwvu{(.ƘA ]ii} _-&_lu!($b0NU@VB]_#ru{eGؾ#o#]DŽ'C3*~ߟ ɴZ's7(kOϱf'>~LK~t#._ʪqx1og{>&.RH,.6ѷDܒ=f=Xkfd0,M`nGce=`a7yO0b(c[02bRlRNB^]9|׊)З_Z֥74aNJ-A}DZdEpZ{ ]-yz09X$/3I\mT |H L\>0w?FN^@wFsDcUXk.4o B^̭F5dZlZ.Pl*eW'13g@(|}LavI Ą!}h *r RYֿ;7Rx<KCR^YĄ~^ߔ̌ 6w;Q+e sΜ+)6X?;}F%^4װd^O= IgŰl<"}|ʋ*/Q^] +]^w4Rp([CO96o}˿(0R|[]Nn_gs7e`-8^b&,iGL8]HW 8Ys"|܊&b<0e͘ɌykV`'}9Q:ʶ +5i6sc2j.ƒudV#_׸E̞o$v`S6Ifo(ad>1 ˟·?aʴi̘ Ujq?c2qTfM$~> ӵuLBŎ&b1}̘cmG/(Z[fYaBpov,Dr i-#T+/Xk+C m;{V09 F;foUFCfr vz ~;4-=lڲ 8!ҡ Vm>]ٰn)Хnw:e%N_fн//P F ^ɛtW̠f{ GmŬt2;p?<Gc3wybWe Ѳ:Uz:D!j-{p/xe⨭ǥ//%Ԉ ?ӱe; tB/wgr?+y['0\R*۞w~s \X7k/Uzf{~3i~GyX({CXUBZq;OVz<$!c7G9sF`cX fDEjyxͧٵKzw}rG_owgWRr|(}]?C>:Fwj1J6gWI|I矫|<9*oւVOn;jBO+5~=z;?ML-+5[Mmky~#ބAU 0*6VP`vhy)ע|jI[ף//ej\̨T+mٗ~ױtśߪohW^9I+o|EW'ަˡu7=t[RZ=NCÝyo=h_'Pa] +gq2ю!`t?dX L\-`tdp̼EKXdY3p>[/a[ŭ]Bd+,f3o2֟%J6;_ʂqV{9S=p%Kl MEYlx=z`J|E,V}M$`;8cv_b?Nbj1*M㥟qe5\odV܅q ͹mPRR~>|a/)matMMy3Oݣ)kMG[-;dӞ p- s1fD͸މwLx^I X3̪Q띹nh-Y1˾w!TV`&| 9iMgn_ޠ<*oտXV Sq̻`|^uW`M+d<ɚ^!Qs~ϊjm0뽈+y's6\ʺkE}kzvD酽|ynmiq-M=KG秘跂mWd^dtOk; {oN+su#=9K3ؖxIoG+CXxuC{myf3{o ٌK&:9OSj{!!#%km۫F{m {v`Tx{O q{}__y5ij^jkZYMkϋ@µ\9ʱ3I\r Xh` >뙇9t"`/g ^{g2k35?=OG}VvJxvst:ԛELYE9Uޢ7LtdY?&/}Z fdTfx`hLQC ^ X楣}QfyMgdk; 93}gu2?!%cg/1҇TJck@H$m+`teO<1uJ.ߢk  I5ߣS68*[)(%+NRǔN'unyO4u| [΍_9ouV%=s{+ʸ1~ΧS6("5ۛ[&J2PhM#Vʵ>O|>%F#y$奙Ύn9Ԙ)(\X:+O~HZJ-W-^>XJIEDJ0sH_ ߉\(LD>,[)I_7݃l'!T@9yy sw=%0 J]2'F.ߑ[K!gX3&bֲ8xf2?SCa7N?¡@Z,|5'wsuJW{e5zFxh(hu2..ߡμ/㲮츾2QZ40ZGxY Swf!4vCqSSXCޏi2/Џ"WQTs4~n42F"##HH$`$]?1*rȯ/Spi)v=Ӊ]bx[e&4r } 0z'vo'^!kg'0/:6K%iY㙾 G}ohJZqdz`X50 3j;Ηvx^&eќ(3]KT3lt?"5 !\ܲ)`7(ODF"#1 )_v=SL HDg\NH>.;fk"wPvY䮧xr6-i_г'jhf-|?OcY74Šlj&({9 t I2q*z9_F k?=LGDLq4b?DF"#q9^ ?Q=:}OQK\wҞ2}NC}̮yp7{.+v 4/t,;:n Zw:}ieM}os5^;/i*_ǟVWpn03Z:T_:D=xHM !2VG%DP86=D㽴b%|.Nߓ5p{'oNtQS[۟$QHHH$`$0 =`$> XEïK^zMzϭi=~KZv'=Piݵ=U~mm?M TynvJWu)uQ]|h(i-^JZI_#oFm|mn7ꭕM+-uAQYw##HH$`胑Hj/JRܲ|Ib{\NVCHHH$`$0 =`$ȤZMW ̭]yFF"#HHM" $xI8 0 yjܪ=#[~Hm`T-zTt+I{W&71 $xq@LHL2#:eeet:)))i*ݚ&&Ҷb2 =&`$&`^3L U ɭ | 1ihjAZscJHwiGdh\{(uqޓӌc` 3H IPD% $ (96444Sg{}ǽYOU_US5.SE H d2$="'I:{#d2iOtp\q&_TФO8(ixԂڂ*luuC冂 T~lU6TaQnncA*zݝ--F *+ro2{4ȗªb(hTyTq9@{ o!F lС?4vT,?NeG}c4B@`tpp 5)VZGZSI"jPSSMjn), oMX7ZfFDKklXM.˘_XdcG0Rk\[AjԣX^baaMLVP.q%m{c źK+kV–b V-V#ۛ*{ȖX[C"`kXYZdAm 6M wZEk0`3!_f~au)3-lZ9 =l[jyTIAQ!oq]#͆nws2KމQSt 1f>Rϣ;B~ˣ¢"?hbQ0j,!f&EEERPǣ_wH^aEro?`iςݸCGui)GGpZt=GKoySEe+GG؅ >C1oL3|HyoM2,2}* *Gv3GQ}Dŕ|pl%6#hvvNЇ4)VZS{>͔R1oNs+:MJxd *JM(ĸ0#\b12(BCBIƮJnVA^($FGIhx =]8amR|#$DC Fԏl>FnFYS&:TŹr&ƨG, Z^_)߼6>Or <{3C|3bҩ/)nfaa=xqnHQx5:$]v`[΢lIFG^ftGBXY\b[ɲXfg"HىQO8[u.53S. ?h7Ĺ VYLl,0&'PxT?^ =W~%+iZ+FG_#SպL3x39`CN70M&^12:ƲR^]a}c(vbk^J8{2SFGG]hb$ jG=R',F6|V ]{y'-̯o3\UG6p';L!?==ƺ݀B@DWΗi|E)Srh6Bq#h◔DȾ\ L))`ܘo{s=GλQZYɻBOW.XՋlȪ=]&^?/w[)M '*I.&2;:_$ gV $E$񰳉vL`5jIg{׎bOpw[k#_$"6oyLp/W8P3AG]z,욎<<3loVgyXYj_boG# aRHB):~4oS|/SRu9P܇lU6Oyz/ls#wo_\YXm2~EzR6t6=&;.9!=[ 1bԟ{h}'-$^@wFc>3>x;fmvkZ32ʗ_OFVkqr<ɉ3yjŰ9KIj$nԆ$y 8EU1/ /W"JYVMO &6ggj{N&zq'9x籬g/\Hg`~#1h4Ri)VZ#3wcpNjXȈ&** gOR=pH;8="'-WÅ"w'/ZnE\5t.y@ma"j[{yTC9[[v5]o?''n\g sK\t;ϕ,gs7]+7p&| uS'n\ou|Ν{M444tdTϫ Lpm/YWS+y<RpD\E'K>/Ib|3I*nS 1NT>飯?qBF -+B>:&vUj-Sg\1[v>Bv.04̓q%g]EY))*&#) '&.d$%QX͞+[Ee zrvChHDD,E=emc'y8"1vxL|T(~Ai5|<|Ehz룧oHG!As HJL,-ފsb-e<;R: Ot$9E>ea0PHCC%cVtMH{ϋ#j[Qy6 >B²1|@yӜWL}EFNp->a,K߭0< %A)Z[n]e^D%LON a^tohq%|oyS_d7# .zѿ]o\1Q rĬR_I#=KnF 珝%s&՝OhZ.YYD;qM?!FG}ӟNMD1)SX][?+=t2#OɛQxgkq?ׯ;qqFM++^(yt#.Ql$'NrY7n!vh g̩ey]2.pgpq4X<ٍ-vyŁ~*OFfX[0^>岃+,ν!; 瓐Y̒2NKT*_(aAd^kFw(_ 0X`Ш՘Cc#8ثV>,J.Qߥq,MxƖ1XLdr%*͎ŨnykW^hbvLRE?(!$%p6btEmOA[h&:7h4a5w}g>;z4[xy9ՊkF7QPڞ~GSzߗ \+ƴ3A+a\ BmV}b< (BwhbW| 9 tp`b5Оol1]3Y3#s9BAy 7¼IӆCUxSr;?/XU t <~cDdQN1lq;!dғq`tyXQs+*64spsa/8p}jW63`ؚ^a12./ M),rr2>Ő M`%f8w͓gx^/)@^ْL^$ 1蘶It9_2C^140uTd' #f2$'еzq(^+uhV_C$bla{cnYKKQ:L5P4ZDHborF*)ɑDt3ܲ&q23P`Z ӨbkgI$,7![&!FE@||l'1!jط(`xӊ` ≏fI3k:MsI1$ő,D @s $%Y>Yԍ`ڛ8!dHJl~5;*j2" U΢@3Q$'%ȢDjEP~):Z.JH~jlhIo-Ҵ4OZǘ׈Kmg،J?>l&z=6xhjP3F t`b2a}lZ0YXMk80b2b1,VL&610[mB>66+F!z^jB>os?ڡ63`x[1uPjw!v=(Y+b&X-&Lf+9ڪqfAcb$lf5sn94Y۾|7ַЊh[0&VSؿ#Z&+m -נaI &t5{:=&-Ѷ: jF-9fZLFtZ:^Rm"f@ jIEInE'HͭMo(} ":}$(ҧ7Ebx(?B *e !F!F!F6 MPv?zS~mO#m2~$V@@@? DA$@[BBB DA$@[BBB DA$@[BBB DA$@[BBB DA$@[BBB DA$@[BBB DA$@[-,,!/KBT*:'iSSSҧhk'h Bj>c+Iu$X|vM:Gu^mmmM:1ZZZB&I;qS45y}(h"Ą8_Ϋt~űlGbctxx<!kF<%@T,--!Ϋt~?7FbpHqlE/B&(؊< b$b$b$7(D.V###1 DA$w8"  M "Q ıy@ H H H nQ] !F!F!F6Czt9-HbpH D1|Vb$b$. mIM "Qc+y@y@@nGղF@+$KMKˤ)v_Z,&jdw90ۭM(؊< b$b$bd6;AIN;zt)veeEZhT/BtZ&wcWm] mSwL|xjket1121YLz:vw0Z`&=:8?1 1Dg~~H~>I:Һf+WsiLPL)&̇U;wX݊~CgWvtegG tȁYZ=ta=l.t!`@e_ͿmoI3:=64𲷷xȾv]T;FՄFfG%靎jGNZLV;`Echfdab#`gG6Nܻږ.jÏ,Q78͎Z΃֧hx结Ox^A ={B>f/)(.Ν;RU/)B_Q;4=Cuh})"1&i4t o?a֧3b$b{,||v^jqebgv߫ƻ),*gj%KKS3,|́Y19#i)VZۜ9qyE1эu_v05ljFw0ΎVjUPVQԺʖl{US6VobcV[#Y;̡#f#[\DFbyi| `|%6UBZoeux4"ߘ%C %;f=C6vP-,TEҲCg]u[~yb[o8Fb˪%9$;UTTTP^^N˘B+7 +*SYIqV_J~UJuu5Ź7vލ JҢ*vvPȑrіZ@W#aMl+79,54g'F!Fp J}XlXP7pfcG˷hIV#ʭ=Z Wr_nOyhI%E8^t5[]DP-2qt7LwrRp&:NA AQdqu2G" ";s8-3 6*c9_ڍ 2oQTMLx)+'>6z7XL̿hF|,ֱ1pN]ia$fGHq+3հ5Iz-F(Z)h°?CfRwnHmJ}]? maiimpErwэqGVWR[wW.p pCc>dg[ɺIxPhv=2',Iq ?;11v=4C޾S1N;Ofbk&ݶykɪy~4*f9%:Νԛ+<* ,,85g.B+݂69ގ˅?i_`nȰ0BB"ȭzRk¤ݠ20SL#,tW1ERXӁZo;UGE|睏%>:[Lq/ËׄeW5"IFb*|ⶎ#1&J|bd"[]XH#$47&yp T:vA̓>"[n?j2QƹF(iW7/ēLN^dnSY ;_𬷅Xs=B/H2,V+˃\?ya|Xb'W<]ܪdf?nahXs>P.'7ayZǹJ'q3wVσ,Wp~NN.p=$=/ 2s$"1 k-Fg $ z3G|?{\?|nJJEJ;Z"$9<_K71g]nn6N{qav <^60ۓ[H:;9G| ik?qZ mOgDqn2[X"#5Mr\0zpu#J:=eNϫb~uܓG 446p;-'/TfzCj7׮x[\Imc3}/'wF2B8}5Oʏ3} j^zűӔ?s_3TILc ҝ@d6!F?'V3]77bQb{h6W&q­%,}k`Htv$ioQ)q"x@Ca8k8Kg.&2ūą:S5f,TX28sg]ȭ~nϒ׏s8{֕GזrUl;:_ \ϟwh/Yoj)ޤ=O]$a |4{Iu{ȦGh07 ogx>đX;w`& OF(jM J2.l%s:{߃!FVuUqulwRe'?d%. fZ _FvLK_J8ρ-BΑ<(H.Yv=Jk=2=]h"˛nO[` sщ1iʣk\wP/')|Wd8p5y]CQ:Kq]5N\{|DURm,-erȚ^;Z#݉~«hbn@x`bJ8E ('1!'2 QAħN\ };pr@n՛c_GQa cA$<ҤOZV;ir O\ے+wqɝU$Wlel-MDypB/02ϳ0bRߜ:IyUT3EU0=ՅG@+F8¾ٯ9qݟ sF9 'i\Ln1nNJ?~!,`y(U@UY>}9qťS9gZǖ)݋ue†H||yk׾~b$ 9)?+"k\XDĺZdUW#r7}Ơ܌$F> 9&d\wJM+s<*N^OrCUVftߒvzpp@9S^sKÊnr3#FnH|,`' אlM΋j_#b7~Af;qǸ|ģt~_Yٹy}~kg/s$Oo-i'Ꞿ@|akDUua<d_LT=^Rr"OB21S{no{t|j]ύݎj=jv>Br#o 52 &6.Å %]/vf`,q {QIlZϸxY{&!F?)y#!Ȥ)ۋ~eJ<[`n3$L#=#(?"B(,-';%OF"Or7AYQhL 6qGx0ACR1>K11KBzՍ[dr3)yVF[i> r9 U9y9|UHrYXyTkMExd$EGһa !+.,BΤ_f6Bf)yD\F5F*xESZKH TR1 vXR JH !Y.JH}vh-+B\¨bdXXYYATR>I:Һ&Z&MKe?{i@o_6L\c/ 2hNN`vnI&&&gy{;Vv3 9{:z6=դen|/GX+ 9^061 ]-&\If׌ϠPf3,_Ac6r>/Uξ^d[{XHX^7 1Xح7brr-m003%]ҼF-5S< B&P[lo^ǭQPjѩx{^+3#ufggz&qxlPaL=1Y96> s3VE^H;:_JB&r4Fz7y3;om2!4kkˬ*fSR12Fvl׌Y/0(`"24F 1?5΋#.cbhlllѧޝ'H:&=7::4oyҲ?ƺG,UooϿĂ`ۑMIӀLn|l @Iw uLZe ? 1b$ b =&It7;Lb?M6x4-͓}1CCC|c/ z%Gw?ԤiiY__'^Wxb$HX葉 ###Ʉ^G}gcYm۶m.m,m koJ˱m۶s7Ykn߭My=77}VJ"GW3=weRx`ƾ(} 1c?Í}Qsϗ}Qƍ8*0:taÆ-=zt={ $[(e>ݻ7O #Y )xy`„ ƾ`r@)yW?8ϬNLT:|m|Ku1bܹs/Y$g u0 -4 >}Qs*GԩSvi6n_Q6mܹ޺њϓ*`m.<^4e!$6M"ӢQY-q۸quvni_{GΑ|(z+!CdLiLH-M2&rDüq۸q:n(dߨ UQ; Δktzu9y͍5w[`ͣ4<Rm;ZqK-qșn7 * #}acakͻkCi)]~W>Uf!j r@bIzD4sD떑68y}6 |>zz6j۶Vza.|6:Q]wo+jQTX+iYCsWH)ZrUYrnmqy>n 6 hGa?9F%'v'u}͟jG 冚@J""nhhk6nqհa£"xazS\Vr%Oѻ3 hϢMrn407EӜqy>n PM=skz댼? ե~@V<)/;<@Jnh(*JM2gm8nE@kj({/%iaGGX?\,y [y?Qy,1x4h,nh34n7[x(ܺl/D))I5oMrCrq)n7qdEn0XϩRX//yCWUGE9M "vDfv-q۸q:n/EFR*M;߹Ap']5>iO]Kxo]ꂛmFqDq˸i6n(VFQ,-oN.xmu!7[v8f=7Na8b,.,:\]%) Ydfqa r۸q~EqTHY$P2/Azb0,)xP L %z0BX}ڔ/0IENDB`docker-1.10.3/docs/installation/images/ec2_launch_instance.png000066400000000000000000007125331267010174400243530ustar00rootroot00000000000000PNG  IHDRm\D "IDATxxպ)v:^I# TJ $z9W=C*Hb׮# vF9Hw͚'d,Sff}if2mڴ>?𣚚!|iX,bX,0EôfB fW_}W۷owyo6nwDzX,bX,QV,8`idDD{OyӦMZ'X,bX,E>E_ѳ["p"oyqseظM QYj7l͛77wP;BͿ>hiX,bXXO(~EϢoѻ_0di37|_ ;裏Zׯ_ X,bX, E^՞Eߢwѿa1Siq$Sq4;Rt?q'ܡ>PGUTrg.jyW,bX,BGѧU+E"}Ee3z}J8RM9n,64e}jSQvGD>cX,bX,=>E_iJwѿa1z5cʍdLauM ֭ViZSjXuj[U^?WWzvμ׹>[uqjY5Q_u\ZycrONOul|p+6Ķ)=yĦcwǦ:wl>46#qtl܏f:-ڡ:(j]rs&sNlՂs }k?^:6+uk?ٞxVcƦv u#Uc6rL%6}}ը%nn'u$vهy^w|=DZnǵVεV|yVՄsU}4Z#5ך"Z#^f{ ZzjVH5Ҹk^kFl j\rǶQqz_k2kMaG#6#{^ky1HlҾk;"bjwkMSߑuzBMg7ZS2M>kMbwbq-ߑ?u>Z#yy\k>g]k>#5!gsY5~6c 캯51 l3R?Z3~ZuV?iXEg4Z3c&V1zeZEE.%/ھ9m2ظ.z+',G{k[y?o\~=c7DkKZoL$cc2=LFq|z c+ҥ "bX,bX_0~&C$i/Wuۯ}S\nܵkW[neN#[/=[˝;w&`,fl66V1=>F/\Ҧ_B?-2K "bX,bX_0~67ssy,bX,Ei/w(m jqqe[erǞG[˜e_bma8DwEi/z}͑zŕ|S7_dggNS6,g[,$lH.sjX.o-۲-rEgiSfR]/}tFnN [#=3jYFt8'33S*2%&_DbX|dҭ[ݻwG=УgO 򐗗|u>O0+hM|Oaa/k/Q}7 _BĿ쎡ȡXĜG{IOӞ,Tn#F1mv9De!u ( b\%ͺc%RRzCWf!5!6#;"q~$6wE,m}_$ ;+C cqsE;f#,7SdMb@*16'.]ot;!֝k6 i7NV򴸅f;F&͐6S^ Q`dA)2Z_yLy"YKw=vG:乡X'!{ XDܼYwF|xdtH3%;y1(?S˿{Yc|ffӊf8x{_aUr>{:dST}Hi4d(?TYvHSp^Zj*RR/d'yȈL=,Ӱbg|)Ip6ғwN7SY:lYX}IKKCGұ#ˊ1ʲNal˗Y&^eT\@QK N1m);lYed0dD aA²)9SODn=!ff܌ [П~%~b+xʚ ˸Mǥc2Fi 8/f'}kvvZm2Dx3/HXu@oY_a`ևY'Bݛ֤͊R^\uiԣZ^]:ƺ|W_JJI`AAS#l)mn闸}5 liK3:('gƲv1G*{ϒv($. /]OxRYOxuvX0WxIY>d81ۦuLAIkR,y sґ--'SB:c^Wn3sqS9YTx5 bߠ_QO$&S"[甶N&gw0,Z ^Wsb( ɩ\'c' K/CQfu.ղ0/dC::MNv'z8>x"mNS'IIHLJvIfl˗SӦ_:G Ck;:쌉9gǑ@'Zg:ĉ[  Ȳ죄H.SsDB[-@oλ kjݒF=\BJxHZE ;T\uԺp{Kթmcfv`𾍁! U!~N2&̥Scd& v9*uvfH 'J DF}: Zܸ`|6[@޴ivɋŅٟN>מԡG"mʓ[eH"c =RI-/ϐ65Ϳ|/–6?&f(ikw3|87HJT'Bd[λ( .%W/~UtL/Y_1??#Ѿ}{^kdt23Mf̘[nl꬗L {-Oy1fLu<2ҳQZ5GB&׍bXc țyڬ|v).* ;OH),;C<5P󸌝Hv,e_H``` ǩ 2% T?.I̚ty\.td} [JܟRȐ!CP5 CP┫\VYcc\il[A ɲu-m#KqfVK[7Gnj;C_),FD؊ͩ ige܊3ssLJ/9-NR%0ԣ[ZGMDژdݰH}PWc-Spہ > =^rVxAV%fJ[m=%licg.(!a`k@IVpH%pG }ۡsV1Gښ qJ[TBzcKqa)hH28'(-'wR;Eƕug'p1F]蔙G vQ󋑛WR%''uIX<.WrD<4ީ.[{YMô30cY:suSfb劻P!Jட0ipmwcŢ[йSŶcI\u#Y'qbX.3qlaZ"mP3FbG OFِAZ yN!$sǎ#;Nɔ%CBRqJNb٣#%n5c:hIuav-qȹZ| ɰa0 NX<-+Nlwq+u䶟ζH% wÐ}1{&֛_;&7q$嘂"¦ۙZՅza>zQˇS(p< Ax.%󔟯JgitcҢP=eRD(f>JsÉg0#a &6 FR^yŭO_#fS$hж#=} [ؙ$ V"m.8Җ薕cʴU!.qa$#}-{GOo7q`U-<o9?ކ'tIOwg18yn^7g^SO܏{#N˗ta7u:ǏƘ.^[IEbDe9O'^zO>[%ೝJRO]xұ >dv;]۶cW06TzN|ܻ9lݽ/i7aRu?9A,۟pûq3c֝w> >1Xoꪫ#@Qc}?c ]Ĕ6~f}zSߋw\)jb:aܤaɊ{l",ZK43Mgc͚UXz%μJ>a\_n9^$c9?ƌ+x;`X,MNgքָ S,ĀUq*(&YeFnac'J ;҉,fD֪P81ЉaLɰ L y#ԯ r2hy:uNl1&Cۼv9r+TlQ@u[z{Xv2nlk͕ MϡZ֜GbQF)Xv^B2oDI.*tpSDb$fڴʋyowfp]d1icPbYZ̡*&4},P#3;D!Zy6CD%sf#cJZ–6v"IT%m-6.뒙CngMmq).l6h8$wwxx]J'MdzeΜ;k޺{G}xOp <7o07>3gNooVsw=3 Iu/7ov3g>|WfbL5D7['qa޺?GbXtIOmĹ3Gqﴹx? qܿ  OUQ5b=O~7ϯQuwo<5,IU7[ֿ܎}_A;q վKӫDZ"dD ѧ.)k׮ž}0ydGֶoߎ)sLOi׮/ne8θ{z"̝3gƜs1<c0J3w,V1,bԅ|=:M/SPعSy[bm6b ̛7ߗqyn˸, Cp5ôTZgڤ3mHKتHMfƍ&hi&I[i-`i9rxV%me$8VJXv[U#mSڌ{ڤ:/+N %$ei#ޙ6#I9R"l526˹^h%n B"m8l֐68I=zԡ u:If)MgڂMXbaC(zyNl(ZpRJ-nqsm̂S(Fv O Cn;Ew-liN D}l evZ%#WF4".6h(5_s'+<76gڇ3wCwPAY]u_#ػC*anHWBL]xk=n߃NXAHV} ~X~=ك~ZH^YNDn.(.Kƛ7a:SgpҺi|sh'ޮyױJ^y o٪d~-1j:s?Om¹sД15U˛왽XP-[A師q18p,كjDZ+ocߑoOOwbZa>Y>127x[nŖ-[@Oa2!"m-ZUK&aĉ|uT %g8' j&ONJes+Vږ/1[Ym ?03g˱d,XፋFʶl˜^>e[61M-%yw=mz<Ñ 72$JPD76L;cYsU:<ߩ204R,2)6g3z?Ȇ$l[UÆGD19C(K"""l UGۄQws*Bm=mlc2q\NMq[5NoWc'qI=|ߜ8JN݌k*s-;3yS1u8q ?^]8wj .w(i<\p5rڨcȬMzQsOON?Gvdl Μ܃n'FFZHۇJ^rHevh.7Z~^Ю5xuv1nxf%'Q1>1JGھ#l111#۶m[!/uɽgXdD; J!6%#gOTnqr%m=3`%j &!)3&OY!E}XcIL3~?>cr߶eSe[Ԗ~9yuU64?6ד lv~Mai270yBG ~`Jbg 6mtGOY%$ϓ#EaE$&Fɐ!a+K%tK͹ׯq+( }rd>7^mdDTY]-(Z4ÈHI`HdF*)x݇L#u0A`"(b)luرe14y Cdh[ܤ^ 3&#C#Gy?[|9^1Cj-hTS-J+6.I[n'ԨcIֿܑxgQ9u/z#ƌc-q۳xi(vJ3ٓJ};bm`߱s89F'ƢpLe1zijp̿m; N9&֠ع]b'qJz2qwU qxSWcqrǸ}cش8m.ůQ˃ƫoNhۮ:Yq9:w)>ϟM^Ǚ`j<3zڈEߌ(۷.\'~&؋ޭaж}Dkݍľ Ѳm<vىW]R,}}ݯkȬHg.'_vspnlxPX?~= }>q(FX&/ƿFqfR21yk]jz,X;O@bL F\spY}睸}-蒖oǜQa¥;`\cxYmٖ9]_ڌl9Mf߽e7JˋK.ኤTx}ȋ:ȋ_y_F^ǮI:j}oD^-q婆1듯@y/1 m[@W]t-G0pt EZ"FJ@Q3sooS∖A '&/(IH8yj[ R8a dߞBJJH rPeL;D؜!m2$yߥnv!!"!Q)m>pBdyw7sm8ːX2"NS)u%\Ω/Bzq2ɺ!I:d=C]Z.rN6;IvyJVQ$Ո9On;{O;s$::QW[p~ھ-X́i|a|/5G\#Mi␱wjIX4V,{[(U%( *.1z3 7Ye$x28FMڕ/1biԫoY'&x}9VD-RCMkXl#G{l8\YTU*rgBe\-^%ZV}St QS)QBURUџ8IVMd %g)b<&0A$'Bsz6b!'L|03ԇY$D:'p]@J);S JVgb޶ΥZŽ;gA/@1~xT빎i(+g: 9V$t»X.!ڃؗq <^D*-|.c:Ueκu+) %\ —;{kn[baGm.y0܋#0"HyQF0z$x=cÀ6n.#b ۏG<&0lXcهq a$iT݆߶u0W]6?03cر ^sҐx 9O˖- ɖ/պX,k 11kqu3iҤFk}}P/1UN4\v6NB❌ɓà}]/9—_/-7~3) ul˶l˜r.x㍘:uǴi>-iv.N.:X]~ՌxL&m.|ium\1ڲ-۲-sjM}ʎ;[0U&fk0~cTYihSu>={:s{{{IJehjj"33Ihtv?pCB$&:H,\A\R6ǯT18AAX:RUTr%ZZZȡCB 66oV¼)ݖ3362HoOÑJo=hH0׃ԪzkתF=NOwϗ20:ίf000<ߘaF5"x>>)9;:z]  f–-[ldy<''\.xmn|Q&08d?r1-@]$_|v k0zo,; -]5{s;ЏR;ܓj;;׉wWIwڅeeeT*il>-]뎎B{7-"GlL$ 2ۈi-V$JPPa,qI3<,߅26q$%u5K:13J|H}y֏E_[ioB?9oc!B2:gYìύ=v:сɢ%ibl=ۖ&挫;)s/O? tgom=ƻA5綒ˇI3?H"2`̞ee^Tbr3{צ=C +錫 ~a!Sz350?\]qsw/*K0"·'b}?;wF־V[[+g[dMOOF=jJ%σ&rcĀƩQF|iT}>?,ʫ,LI "p>.C@bV9ʅXnDs  9]ˌAFæ+ZZK\NoY+n/LVCD@p=8O4_+`1Z Tf슳XN(sSIZ ggg=Y}儆fqY{깊ڻyGsuF8p%yp;K-z@7u1!W2Qu{8p@; ws&l=QŸ0iiw0]\%\{Ўt=K… ٳgIMMsKQ*h".LEˑ{JsS+m]ÌH|9 S3:6.bEpB0:]x&ӮaF}1\汧Vf]~X9-~9H҂ ||qqODx\|}cK t'*B=q(ݪNVFHh7rb>as[醃#a1l`jJ#+>d9MgW;[CFсޤ-Fͳy s%dA0NΞ,]_"fJ$#=T_OR?BHB/_t.zcAiyQ8r/=o128>;;9Io+iQjJVۯB[rA[uv8I[wGbm%\pU8*&4m@'|" qd&kI,u! )Ao{{{.^WwVҚ/E}wLv<#1DAxt(qyW7hC܊x3G$',u|iv$.[˅8|/~nǭ].'4:ӗ.qRݱO "GBfz9hu(إ> PȈZK`!+ BͧI$)QjMi((yЇEW]7G@}ܺ[7X B滐MOrKpgܕ|C-8q _<>+8uO$ g<~7aK=e] GR|,>̛gY=iETQŔqR]/^dka<2/T62]G 99w"N%*T^TWWK?碿_Z|-mGVd/# ,E1фv ڪLfƠc0Шy1o:ИQL m|b˒T¸vf{Ϟsa|gGb;?S?=d*] I36% NUbpnZ=>q%oQxD9]!,|۟#V|["F`ѳB[˓r\j~p;{huzFn1֖[٨ %6wnFRU2SGN^Qiy}(&l1zTh#vmĈ  |)czwFL<8=%zz]OغX,f-ozHz5pΤ;x/:* 0Ĵ=JBGq,rcC7 DJS"8^λU|T$lfhB?ldޱ\y.~X 1/*0LJO4T]a$$,$ 41Ȇz&zjɊ6 #B]w{^,aE<:/0ыgx F?WN:,>?JrHM VX;gI8ʜpAlgoTR3&/]]]YYYu?M4\=nDNLҡynx-n}i۹*iA "Zcǝ4-. t10ml &8: q8K myRqNQq*k}pMits%Jq KYE }ωPXuNbgX<8S"Ց[_͑Hl\Km]5b0ՑDD^U^~#mҢر]vOtɡK5kJFkyD;)~Ƚsc}8x%G쓴iHۜ:^ʦ||\q9S\z7`Յ|mq ).̴֧gfŊ?V+:VՇzl1Wv)'&w{ݿGrIލvpeJ&79g#> 3XB{03چ?׉;uX?FܚĂ-9hk\GJJ8Zr)=nn];sR"qŚ 7wuu>ٌD+%$*xG)^ (‚)8`?a/WJjr9KӣcF]U)?;4B}p{S>éKl^u K9aIգKU| RKy[ "h5IX[SVYj"\He[Y)RPG", R`C}K3Eʁkl^'Y&`;ON[ab̈́6Y*fkce7yy1x~RN'>bSjwpzT+Iji{@OSaLk̼8BzZ:9,_29[/tK"-#C_pr{ŻH/śNm(bIV9$%(Y!I29rSIIQ|/>b2Mr:R2%Y&ky'W$Z , װzy)IJ n$?ot!)Y9sM C/krTfnDaHҚRZ[iix X+QƱ /p3ZZh|zDW `Ur ?_ESkni[w&~\|'DKS#*Yچ[4;Gޒ芃_go=!s@*C7+SDS~7o4XnhoŢxHKs7ʊk-𵗘AAzqĉTyzzȷdo GA`J*Ӭ8~J °qZw9g6G_T]`?1̱T Cp ˠMu e>^5[rðqX2^5cm,gQe}G=XͽMthi >NJ ñP9Ek:a4@kG>!~-k['bWgacЌwPjO&n)kbkGfA Rȫo5+ GV'ĭgirdn»E/7e2 H!faaan֭#6va0 z6˴;Zh]~4:̳F~ Ѣ7f[3S?s|Ɉ? &VA7~ykvL3sn<L! n>|Ko/E`*7 3i0mYqptv[50U z[3 ?rrW!~Nv88{hoW0N?A3_9D3Khl儯0Cɸf5ѡxY4 o@akkvW7 wtb%#GQX5ZY?B arm;2\ۗ']LT:pN#)[ᏝO$(]>ɇue?11Yó+pQ,m()x̵X6&3w)J^2'B idcXci^0*:7īG,0?sٜQH ,|UV"lg/gpnFw|*$$J˽ͬ8D{㑋s9d2L&ɾ6B)E?2°~N|T=@S*01RG|S '$8qdG2OTR!{#>vE+WOAcx?gL&'''Gm2L&d59#xeWqq NE3"ß".rDŋ3ѪѢiy].D+YX16^') 9GB9m(O|t|\+ڶq}6d2L&dc^8Y" L8I]1^AGm&*8 y|݅xq!G$pcŻqx9AFNY/-5K?+1G]?9mqC&d2L&>>HQ6碞p/8VQ`0o^ZMj MuhN !|0G/.OHmQd2L&ɾKxs%.\8Oueq:?F0{#b %WOCeoGCKCLQ6q-#}!x)~w@m?X2L&d2X3JJPZZʃ(_B$~ff3^S@AaJ`5*Š:6R2=^EYOm)l\e8˪6?55~? C䛙M&vV=9W_)G jbaق4inLWӥnqK8% ,Y K ̛V݉,ϲbdټՎDcNH׈_g.1ax槧Yoc4⊍?}s,a8<`e8fc?uYۭ-,-xWc@867Ѷ^{4K]:LULKk~8lOk005b6c[ss s̍-`^@b2,39yʼnLmR9;vFD(s"A9ӉG(}c{NY_&@m]ONn;-wގX+rP\">*DĹ۬M\↲[YDvV*CNPIr< 5_kT^/ H""" &! 0='Yx7 V@1QDϫIkT\8Ly0IKn<0"X]-~W'"?WAE#E ۛ}*&iDKV9Cv0m,gy)uF~cyު4ӏYu f.ƥ:Ϸ,TI' @}JD;ķҕp&1>zVe~_#/=ˬMnGx8^YAr~eja%ǴM件<=!VmYy[ uݑ:>HkMXWXY^Y fsKƐXjGxֱLm[29YF8Qc2NNN=h kKwhѴ2,`. 1O2g^יz4jmݘ ,YZ63͜D=>]IeN"SE/OӪz[S[A~I&LЪjCo\❕ TL0=5wDr9J~ LkHLƍ 85d2kaiy C73fXԨۻ4۶it;gji-D,b]ebH/* cF3׿=9 芎r0XAW/UthT P巳a;9T^ Xc ,Mtۍٍ arCws]m~7>N:ӂ/EZF}?jlcQu ``ysI~7ʍ zp8&L୸2 \,ӱxD*GWr9 ᑡr s EPuMq,sۗGukJFH/"%x%KvJ|[Nd^aGX9]BC@h^|8Z;9%?ğɳ4~1D}]Gg(ܼ9ALrks;jD{7$777d`Ʌd=נV]!x'>j\ "L7_c0 kOB2Mcbȹ8BRFܩGqItL4!x`75޳Mȥ7b^cc%HB4l$\ocjrHuAZFo+'$=AyS#>?AGt*w@(>z.v;ג∊ }+W8DTl#(沈g7OCѸra2J|heC '.)BBqٵ{o(.Hg`XDOWyټ `?\`PWwâPDϟ& jv}Cnwgc?;XAPC ^7E ͷ2]'2Og J+DjalT-AIHj-?#f2_ͯHbLǕ8jǃв0;Ǖ3|P˗*>`d:Z\?Z˟GVqi͚[w{ŵy>MvyJ2X"$UgIuJ⟟D3̷379wc}<:҄~bKY߷ZQMoে8jD%Ǖ|d_fA)u쎫/򤰅 $&* +&CLxz:_n uf@D z+0I@454a;Ȧs]E/秲=iFŕ!f?!qڹG{a'gh˧.VKu}<:Iv'9@}{)9z 3A"y z ٿ׃'c\9>g?`Ԡe6y3IKAOST=!M4'\4ݯˠAGv3=5AIV$?{f2B=0, [a^;>ZɉVn晛`tt%|E]}֡QƆ[q!)|y 5x}c[Ktd$%ԕ/N X7AHZ  J|ܿ%2} #*=*;5~HWޙti fG-4 } Dx~{9qr#ԵRQ4NbmTvSt. }hkÝ|#?U22%+ 5۽'PESٶw?o:LpÍ1Y5Ry5L/Ur.]?#wq #/3>nPgo mua,Ps{ %4(k{qUߛ[dБDmkh5&3o=ym#%<̉&H_Cô)%%!sa3f m,9-J?o ̖e2=:@x+7Bp:FH y*ܹ_A_#Ɯ`?(o)01^24f.ܸ؃lMe[7c{j^0NOp3ѓ'ǞqY)1Q+Lnj5NDpY7<G7gUypB;yK;SS_Rн=|kx=^2LI˨JftQV;]ClhDf~t%F4GmJLmY"~}uݻq ?. 09iIi+{d+#KL gtMp5O rZ3CL&sss+s;\s`-ɷDDQd B}2A]pr15Z652O&䶛mW>kFڢ$~͍HEp\VWmJ*Fm)# ]!ƇqPE˚_S!@g1<\1 "NƆZRCV+Wjzi"5>6!3)xlBgQơPŕlpp!-SϺ`g0U]1\ "}̘-2D(qR_}! ãv.&EH=Ln+jo'H+C\j "8)ŹH(0[ETX55ehDKWяtRbK-7usI@H f4""w{sH}y)yYyнB _S`AjDQ(?}b*p> 6͉Ji+Mǡ|z젽5ZD![`щ%IR JasuÌ ΐ(Px^x~5 ?oo|x9NŻ|np.g>͐ö>N$ rT,a/p"ֵ5lx>cB}゚ύ p7K25%{sCώI:w-Σ6.I ""]oh%=tv'/ym l޹hX2cg8A[c9Na}ܨc5C !><{YLlQFmd{QPtԃs/I %@SXx>N@D1iי5$(S ]KrHIc_rH bBf !ίj=:G?8!xx9nD BZz%s8y紼i7mϯre"op #_tN֩B9biv= 93%[ZE5 )oInCqiB [\;Iۗn{'8j<3bL*A@Bg9tk^Aޣ*:\I >V; '(aQ"a^d\-'3)SKH@ bҮI-8*q8w!>"B-lFPe"U!-9>?xy|<+RnV w潝;]Bm%:yxNm.h^?&f%{2ZݘɳZ>э2?։f$ sPyuYoSYEʣCr+V$ CnlhG2<؇׏5oOhеONPncIGѣol#2y&|,RhRRTe>04mhkU||ǬTިg{p ig[j_n$+$"EVX`7K6ZS5D<@)6L_˥%oݦ;ƈ$D\x'E*F5op/Z<̧ݼg[ ~;Yr%ɛvšvpˏ2)H3pm#}TTDb4轍Oï+bw(ϔ yZ5CZ2{wm7#I~O㸸񤮉G8dͦgھ^&vkΑ#/K}NO3b‚)hgܔ0) XJJ.^T5Mf/Z\8Ω*T5=S䬞_p]ZMڇOr>vZ~glO~8‘'1*ejxח=6@F>Ш 5d/~%'ZٶSv҆k~.A4Lg8t\NÏ?QG? v vK mզI×WnZ^]>/ZYG;yO 峓G[BDO34ICXQtYWq4̣-L\'=Oeo<Дu#66_]^\W(~ݏ:.m>*]sv.+GJyA_lp+9$hX') )w|}oZ:Ѽz)9B؉-e+I>bFgOt5UD⣜^=ݱ&fHF$LIr!j(A5_qQdB0cHH>E2" Nr5#52IR& ٧1l\nyFWdK"P* NoQM92GyXplFVD`T0P~7|X3gLJ D"O'ɡÇH%%)GyDGD͕X]ʻdܯ`QX\jzyO (_oq1׊T/R"&#gЌ-&h8 :tؘXNvS_L%F')Lih ֗'FmXiɥcDΥ*NQGֱyr24GR3p vr>.,c)\cc(֗_yjQ{1)I>sխߊfM̴? 3&+l4le=kol26σKljU*,[8-Ehxt=D^uN2W՘h>n`!&p$$+S<9On" AյI@ޱDuRDrqNA`1Q\0 \_z:eƛ2QֽlC>Gǣ3UCa(b &sN+F /2NMW[9g.ajii3>ٹuvk1q^)Qb(˞,S$eF3 U{w14{ Ιte%s(<6"43oK/R5(iyvMB+1Rs3SpMqַ_yEp[S̡0#w% z'3 OaH#Q,ݑN'ڪ>>=ʧ*>Pww54MY5d %-EZnjF"r|"pUS6>__+2(WxRv2ZGՃɉD0/r{XXZe;qC,:Ds\ gw٬Kۋ6mj8*>qCdTϲ25Irf+zS&\mځ(h.dV>Mo!F'=mDجܾ@gD~n`\ b'^OZVɡM&n V+vfN 7݁*-XXכCLt:CzVÉj){Gv6־Zn٤N;Vlg9z!{kl%bZl8Xgc46o[wC@)ӂ)l#5Dղ qmkX87շ!ܺ| )cOǵ$O_eY=am%bXmRak6ٝs7Mu.YZ.!mN;[1NGzR(|UW:glTX˖mlvqؗ!FHIUtJS݆z.kZ|WZ&ARחo&"Fp:7o8߭ܗkrDc* 'C /]՗d'56{֊jz i ac<[l^[cee-ҶVU:OkfGDD:D)(/۴Ƶ}sݹ(WZnr]IfJb]I18!xח0ox[W\V^m0avff ̶,$˶$Y}?8InɶӮq]E>:oѵW%r\1IA"mdPHBmڞж}lz#n==(khEMAA]'a7c%eJ f0ydVl=-嚣̙Œe9rUL3RS[V1qDf/6m   B۟Hg9Jm{SfY1 6:;@X0{VKo3oC/Or> ' cĜDhAA$:?ڂ-h\P|ppD.;O$#qzt▩0>۶ [PDhAAmj͟NʄTmZǀImbdҎXo3:)c2x(/;A6AAAJfXG)nwU2AJ7svu _K/זw>dκ \صJT^Zŧ}FMAA$Dh+<͇RאIo0f,&&?s3-FMN7v yu锪雟0})KvDhAA^;e rJ#9`1RTU,Kg.G~I3cJd)HJMnn1֮0DhAA´֗cDcf eeu"1뤠by;|QA; 7SdIdEeςY6{ CFo:_Q$B  Bٵ#Iޡ",sMpV-·F0u4LO2ui1l`NaҤI& K0Xm"Jf +/䵲z@mR!" )L<##+|ˈ& s7=:(?2¬:rbbQ (32l0v:QNZOmm BBWOD)m&k.K[GU{?X&VO?Ro.A<{hjs703^unlA"e->1?Lx8LK(#'n&zJ{Z%ĶY#dԡL^o>fLIϻ{_da;t_A6A7'E=_埇xi&QOαrhA6ϭOE ,Hwqw`9%-qAϮjsc:BHX,lPO1Y[Hy{(d7V @sU4O#!/ůS50Aïgäl֩l(|} Bf#CLƒCJ߁-> ꩣ?i/_Kd英c(" ʞb7|۳ھf/)T5klwڼ]&@ WLY{JiuǐbqI:).# ( L8,8 K2HD(*C/^ɀB , K57("=LUC[)Iw~$ XJ\ @!4/ ݸ,'_ _mo^C-={7Sb׾7 ACDlv%gz N*(Ǹ3̡[.ZۂtTWs߬|Jmsv.bq^L8ry"1v-mu~2'BrɒbfAaYeT\X\8D?O+- h~Ѩ |](z ψI @SFx<(Ibw`>Y@,L~s΢ " 76u7tY1rnRw>^fʍp2S|:Rv^hGqzXMJy=Eǿ3鰙 K,_mܗɩ\(PxȒs1wc1Z'Yp`TbT;Y/S ]^Lb mC[j:sÊ~6^' oUkrx:YKNYp4ؘ1Uſs )dKRj;#ļlX[ʶgt,dW .Q^<ܽn.zGK+ qZ˞<' S_Ȅz~:^ӓr؜i# ܝ6SO rDVNUL1P>WΚk]B 9p&?Yoc܉R^OSjԶBߍP3~c&֯+<ZJ2nC[,G6M]OVW 1uͮb =%kyefN@!cœlT@e1f}6_6ҕHKᝏ7w!" 76"D"&~˕}m04SiyRg{@ ULlGxo]31L)b5U|opCԣmeB=--#L syyC#ZYjw`^+xl6gxjxd)hsZN9m ޝɿM.b~Nsśswr1l8iǩٚsF&u eoLffu"xup&^U\+'oChlA,&7TУ Z PM][CVn;ݔl m ZU ز!#ʝlH.zظm9 t!L;SI9d7p85'JόYVr[h5b/_Xp}n MMeH摅%wP[^k㋨pzo6!]Yd̼1#'7/ mRL3dqlL" ߙYJ̓ZnCge ?e'#_nk_QQ8-ܻ;kq; %6As6g݇~+4p LT-?.@b"32:a֫1H`w2tYvL˹oLր$)4dbǖF 45k]įt1xu%.@|wDNԵL淩,:{6;eX+cI3- \t%w6Z$Z˚,tSoߘ fft gCD?.^p;<{XIoBUs+ڨVd5)cJ5L(`FM*te \:?hϼE$ah^ m3xoN0Of[f'Kmwh6.%f%gn Z~הWAMU;p`wgW063tv7$Ψ$ɘk] CܬR;XÓi9<͓|g xlgFn"qAZPdb8B/Ydw,bDQb;4rAM%w4m/ѐrVwFgS S3x(κ*f0z˵Ж1S&fVzg"=0BBsi5=6Sź /)լ/<^Qqk8KPmod4ɔ9L9g'bzS Z;,KM<>K c5Hم$-hrQ$tbGn.S1P (1^?E*YPK yJ'h#^I+xv^6xip*<>jGsL;vo|`L> ]Xt=2\ LLXU2`E?[I<5[}8B4&w>;h m--|2JàSVS1OY_i0m+0Vٰ@SǠE<;[}IZ~OXBA6u+H=Yk,pPKFVCހ?g"BW(H=+q;C=o V賩7&RJ$4ʸ6-kC{a}dF1> fC-4?)a&k\[ko|b 0sh D,lx;rXںJ7GvjxpQ]Wk6s'YpE9VƁK-;WG-XTˋ48ۉ G9 ƣ)YlqEh+)sg"Re~I?QJS@$4ticMz HG] xpB hygjc(#6w)#x{q-,B`iqDwМZAQeرr?R4L  *)7^EEz#qIp4NIH8D&o쌐 C~'A6GoB{ Hd2_DSg#BgԳm{?(67ߌKg_mvRqed\Vb3Ȣ]&~1UÅ-iUnB <2FCue>R΀ZCunˤeu+쮾"'fI3 7j~r.{/ףXɯGkxy{#t5,¬yz~8! };sϠt\XFewrP ^}&/-:bJu 聯u蟦YΧ[ؿ893 9Tƒc5̻lGp1}f?J3pB-sWWcrƹz3?br6[8t*~2=N+eqQ7wg ?L/B}fض* &,ө4#ؠǓ9\YKc rqO7FǦu\>UIY 8ՆptoH!^"hqnn*YeD-@= -ADh7hooFPk-fV(iht8˙7a ))9dRQ\LZH QSɮw98a۶o`pO釼;$Oیs:tc"1A/p1ce(w,`v绹Afj%x[$2hc)Xv \&)Bی/Ye操elf>V;#koI'-JjC^XZ}%C KxbdMn2ymG*Wo'Q5g[w?{r][l,ZY yyq!#6UQAqrf؅ks.en)JU#WRa2quY-a9L/|% tZ"_n!Sh14gi:u"pW]ˋ xzA1K<(Gb^\\绿ps#VpVZ~guQ!,e^Ǖj;Ǐ"^Y\K DĠ7Rk0sMM]%43r[5]8SShua,eF"m8*+ݵ | mJ#eؔlڴ62sPƥȲq#X Fv,@46+t_zx $1`n:YZwGdz@HWCw EezBW煣"2}1Lr뵤L $;O!/?"+7Hc2pz?K1nJ?WsPe׮,'q(Ů")_6<zٷ mMwcLJ'퐊b>z5Y7m/’ 9Z,˸M$rGa{,1U1qp?.fngK/2lfB ~HYl}͐)[@m;2mhAՃ B_=0R\O/çYhx]D2D a\r;:MFm^ e669HH4R&U@P&!llc-z m(oF쭧 ֫=4`(IF^C sx B[BZZF92'I:IGcxN;0EۜYP\ffގ u^%lꞳKFs[Ax,-:0Z'D[i=Vq\ߧ2h+<o:TIeBYV~{Ñ@Pp\T8/ogقN<֪kdh\Rscs dʁk1zȸ,3]׆2X3GЗ"kvr^oeg7gvy \-7ĝ㳹b܍H @<"d>P)0!Oh0HZa?|N[<mm*(d1B; =U`sD6&_aS:E;%3^P¬8\ĽԼ7UϋSt;ZůWTf' _jxǨxs-X3;h5}gX5O/̸4-{Tx3&+ï0^o09ӕ^.Vt=/M5o2ah{lGD{v&fi;(3ph5v85.9Ds3t<;9&kg%fvY_J9:^ŽrѴFQ%!o wq ]f)wihxnȒd&yc``=1`f6OUD4p5OLoRjB2U+y8Y6^ ɨ rߏQdL?g1`2`TH&wIVslVx[ StœNpD=}XuN&Hsxya/-**LKɅcCV'2w.@9@ H\*z|QJTAfto|@0޽\Π9_R.yau 3Rx(jǑV3KisP<+9RϦpRW./T ;i8]޳{ǖ`2ב-V:Q%6.Q1J;tH&ONeNw JM('A2{ Gl%F$Vu$m-山7ǀ+5fBVb.:ͅ\23oh%(Yexb r˴YD*~9(Ǧvyv|әRI+.)Xjjx5mNߝggEAW}Ʋi^1Z€wGed=٨Csd ^hֲD-O,Ib,ylcG\'kyy^ a KxU Bv(Y-udel4)]Nޟf @fᵩٜ7y)Q¯Tr=uͤH2pq-WPDh13m;wXz Q~h+;ϡCx%4ڢ1V/di+U+ޱzwf8A{8U,9MY8QJGN+n ms[9;qR3b8]sɹ2Zú7(AM7z8QϾ&v偙82=Bl kO//#UbR53:!jjɶ0S W1YON,U<4"gjlغ+u.PyGeyB'h2ʝzm8;tXd5 \TAGW=< -@^_F|J;j[VsyәRuc^oR}97}y YZ7@f':Q?Gh{x<~ݽo-{jӌToYU$a*IY<DrۧGe3傓PGe2T+oOs;Fg0sF\VFCg͉?Hsgϖ}?Mzm'd$yWW@f0='Hwer> }:K"s|1=PGNCku6}p\eT5.7 ,i9REh6!j5vcwtv1jJ&t]{<\ 552IY .O !r SE)煅e4u:=ܔǏfPDhxc;A rI[ǰ)j]coZ˴n8ob"f,gٚpWϫb Jm3Xjhc7ڢ|X 0mw3RÔ5,\]ܵF5nfC9|-DhK8%Yܮc5鳦3z>0eW2b֞%f"TEYL=l):l0}?r2mwc3yzJ6gh(- \ܑgCT:I˳Ij7\x{eE^KBr,GǨYމ @!<4Kx3Uw]K*Qbt-ڑF-:ƯT c^G3lƭד4tX`?fm wM=,@ h:\è<1Q?}j?i;Ar`OU|>H,ABF)9, ߈N=Ũ-uew?T971u7J?=A8>T͐E9<6&_Su|dȢX[90;;f<89)R݄>XFv *,;qEkm"<+ 6x DɅR'z 'ܴs紑W&WC:8ic|as`rЖq$ !د[ԽoQ'UU6WDe:\\2:ɳP{ )mKtb쮂 綹kp& sgX4HYy-,DREITcyV.Vv N.V{b1J뜜gA;9} W#7^rĎ'qY悡j GZPո %ZZPrx(cqvNr؉B]b#@iմ Jodvgak)rІ%*)r=HV D|+'($H0 zCQlמO 8_х7,!_5ǓY~DnV={vyx z]L-X,ݞ˰-D(ۦgXřU}iu<0TESVZypbVN-{CuRQ[!i*yy}y{ _L4r>OFwg3;c1jvzDhZ!+3M(jt-8(H ƌIœ,N: .0dg MLI[my+A?qIIpWd[|wrx+JzIBgޭ/tҳFR~ҵ=k Snۮ;&+Jo |夛~frlgֳ!+_|ssVc\PkiՄ'qI)҇~w~F%Ahb_T+J跤,C]uL]Rȫ+xwm%~䀓e׮(ᵵFf-f,[[o3bc)sVPt-u!㘑,.eyZ0#en+;]À- (NPVNLFfm࡬Lof1[-mUf=IZq-u3Fhkp`;ǐ)[ Be'dV[B3 B 7S-DdoAPBQ D"]z É%xL&&(D"2D0CQEer;83&)_WX%tuMeňJ 7mF?3wGQ\~푞Vf&Dz+&2Mf+D>b܍/LeWCQDG"kU"T " <"y- G%bݩ㡅|NϢFq˾lZaff ;QT70h_V;)kN/="yqcdi6s_` 䅙]:y8kHFAMM+JyrGA}UO4k rfmw^8*.EQHXPŌ@A_w:R\FV@Ir({$[H2<)X v;#R3x{*u; mfSMVd݅m)c=)NP[840wQbh*_c"WzBB[nwϣ h _:EA@! dn(sP8.s,ɽc{S2yg"hnR$mQn#6\㍴{cDLX;]( &B B[ ($sǡ '33. 9ȑb~=Q}uR)E*fdvtf0 N磱h4qO%?+T//(|?Y'˩ 6.O(BY c43uA6yvjs:fZ.e3ST9&7Q5w`S)ێx1Y?MaSf+Q8~&x`᛫(ong\}NQ$,>ZMڪ\^ZFyU3g?USs_%?yG`*N+#ڹ©#krr+U 'ZQI0h qAo3A6PdHDmorr %Ău9dnI5(J ":Iu<\œxsg KT$oC$ttfJ&,>˪׾}xa1\}iyw{5GN2(Mdml^X^f][qjfvB<ȶ]:~<}g|ȿ$kP3B>;9jօ-6~三raZqd5O䥍uRk\O3cm>$b#2P͓KJsUg}6Y<%J{S43cIj>H$&mMnMKս^HŏǓ1%b(;oJ d [efj~<*W,WYȫXtOypZ>4-X--vK"`-! mqk;Ob֥kg_B{P$_c!ډ7@.6ʞ 7,,}i9 KR0HEeEfrL]8fT85'B[6O 8ZF6x<9U~"qRثw77csy':iW,;aJ>Y:~6@'Na ~V9u M&;p Q^AcdS0wǼg°6HRn>)3cЏH^{[E>o~Џy 2ߧLs%>y3fBga|4| N_餩4Ô 6n(m/>Y7}4+;gѧ(.Z@ꈾ@ V;X緿e[f.qm#rg12A6AM;IGG)9ny@{#OS+u6ũn m'J'51Vr2a<>Z݀ @Ëxmk=u'+LZTyeIONa[Κft?='_xf] 6_o7AP(4"j1.F2PbVOBvs-r$㔑d"~3/'j@9diKJ%]]n?N0=0 %EFhGܜs&mOWcpIV>gxyg:NĽajϔw#U<>ז^}*Y9lRuo7PPm<=;'_f`/wFSPh=ƀ+5u< a2dl<]H "6pRWYIEw&KBWb9gw,`(DS-ߊ>3V7+K0iY\BJ+@ƮI#P ً'n  Ry_~ot>P, t]3VSR^SO5? B q.9va{P\-67:V_e Yá2?p3%j%m\Z<Z6rn y3Xq )D]@#QZrп?fMBL7& UY$aPHk~x4Bj95DaJ&-6"+$eKTciydG1 Np}$6糼V,VNOaĘ$ΚGx A6Aěsl q55hPts%ƙm(N8_ŃxcS=^O ށvUכ@Ź&?5JRZh۾)O#c]ۀmj^\XJ%;2j>T93j6bL~:HVWs0܉l3?b LԦ\ɳvMkl6O<[Gef:H5HUEz׀yMh pI{kQ2%eqЖ@/g@e6-O,(Vɰ|Yz3W2ywGd虳9}6㩱XG٬u?Pe1KUݡ5;;9AcIY8t=5rd׶ w1:)qEcR~0Z]SboF- > GeݩKL. [fZnښiiijB40%? [P|U<ջO2Xנb՜8wG"As+cG껬>UHBmkq%WI;Y&H0yjyt":@]&s6.i)6p*'/ca|20(rH=C^͏9r2'e@T].6.L\yiho%W mfPWH-P<ǖSyy49|~|jV& dXΞ3Otb?~dɫ}'pY6,Mᩧ~͞F/l9~@mGE՝e%,^sH R齛Y0> G_юpCICT4k1n;ipG6o_˼ Xu36o'q`̛Ϣ$&& Fe| lHLzH$]Q$YB,ƐB,%]{DB,ʼnFzcq7qM=(2k{GQ=scz\1Q{Fw6N9MG_A\e.Kj_,"  7rdf L!Zk/"5m   B[GGMMMx<t:]o+p]]/ me]h3NQPU©yqVn$nBu8wLu8|1":J}{VVq1 Ψ:DZ%❜9N<"#I2jWt"rq=tػkOhGF6AAAԄjcر$%%%jasrpݿ3i馵:C2,̥ pMRN9M 5;b3\:wRn Υ#(*ŋZ}4y"\QlHS*^6u 8b_&  "f\n7,תjm`H>4现mvҵeT8qHDRe 3dC O1zVx2J4~An <-?ZXN{Xi.jYQٳ5\]wf1p3W*ȻUPP@Yӳu0gq %3I⟇ٸ/cNǢ,ԉu E"vDm!nP#1*4zc՚x/2!-xp&oWS频cWHhB.;c4<:7ba.x7y{_ m ~M&N]+:zha5^ 1,g;}tZxbT&2ٶF+K *pք F(*v.gR_;R7Ym hwj4?J͚|7jL4 r17* N6v'O· RHDFAo3q\B[PNDN&ZLȄl)̪y*ZbOIԼƐ^]x<J5<7-Vt?u#_]!7Шt~2[[eg"ݎc4/mYd!Z.6A\X'l>Qή唘<՛yLfZc`0#Ҙ^#sq)$4q"ߢ翍x'DqV%АY<KG~P m BDq\ж\oIXU{JyJE^N/F3h_#1qV)آ(@#3y{بoV%JeS||(!%N8ZSGfrFk Vs|-cR^?g݊,_^7Jح%]\]+MxKQd.Ilf(Ts;#\9cK+n9*pe hm,*_\WsP5Q2򓱅%Yb 5m2a<]nֺZJ.= Z^PC\&A s7J72x}~ OH78+i &"]a61:?*7f7qPmaO9]BxoF7||NGodE0&px|)9YMZm$ںj*a1m>%Lv#7j~DXEaN7Qfn#pg|v)`S dx\&cʩO)3(5O0r\, {k}T4QY2{,&>:#v5/ϲb$A6_͂Vo1o|c8@f{5{_c:‚!tz}7s*X΅3Ovd(MAA*bϜŌYy0v!1[XNYG{ ^{F\c4oz -&63?F&|3ksh,<ϯ֒V9ekgOmh2.~#FfO=s6Y貓7%!J+ =HCܻz q[%U=%טpAfs%>6VӨ.SZS͡u<K*'Y6gob'RTPHDDMA>,_I-בddj )/^RaҌT;eS޼\ i+9s1=@%1%@CI*-=h?AMmaEM41/_ ICAk?WI6DSio?g=sqm?ֽMSWYގbRDggl!:_%RZG62S"/!gϞBzh❶s9zlFuї]ɌtN.8yrkvHCƙppq֯[&J2=:"8,kNN=G]5~ۈ ә9k&f/e#NZX}<$?_ ә5kӧa?kKPwmo mB[Y4TώSYƚEs9Zosx^b/&a?޺t܃օ,c=猙̘9zoOPک 7/؄uLBb_qǁEײ~bfYL:k\ ʒ93i?v紇*Pp0viֽȫ"muo3Lz 19/eYYk`F(c8duL2#TuDhIx9r<9yb&^{g Q8ms]~Nk5zam>NN$2 }.=`3/s3I``V8沜7T htդr=T/9} tgx)O" ]I^}dbw[+@1;-Lh`2&NYh9uv 6J峙d mwӫlz|a`LKM\z}:a^Pw)Sq Fe{] \ d3=ϷqI6=}gT!M=a[0iZޟ,n홹0A=07ےN\ {fia#'ɹƵi4`vփ]H}r]9w5}~a o/kRv:ᬚ9$]eYzF?\L4eryiyT$G`;G/\dg;\k\<ɚV#& '!6ɓ[0]eY8 [UC߰b4Fi62 {I[S'L$4& $ KgA X# ZsX5+(.ӤpkeYZ-|nC~i ,҇4u97Dp=%èɵhw1>m߲KF?E BZ2Oz{37f2P0kڹ賀IA|3i;1K z7/dGMZ yv'SGo6LqU9E_O#:D?ڢbvhiYKEn&oIIɢw *2Tthh*!59B*J VkW֌Qi)C=0,,7$$f7Ȓlk[Sse ue$&&6)5YAMA?Za{Wu [x}-3'c7Ǖ!5l5WRU7UMe=9dbK\W[kΝC^h3saZ-|h3[il{*vId`ϕ 5sn}N/)Q#X[њ-f:;3)ymwUɛTuGX Аzu^Ξs8<|d=b@_v9 h pdT쿵Jr9k/eW-j6sg{F>3Q9ukж6=ʔY+<{j:[3!w~fǍRIM_wmz {=ϣ mK ;bB[LJ9ý%ϛò5ľHNMg}-XBOn=I] mŴbՔ8Y67/ eЖRkhAMj^ݖ췦z3?I WT;IrMfr*q !IϟvīI|F/3ۜĬeus W1yV0Lm'30u4kvP򞞄{d^ǜ߰*<ܘ mlg6B\zF>jQain k~h[yyt9dFe2 v²]6ewh+,?vzS X1ME,bSyD:^a&Hqb3|=e E=?~13 y}6e1Fy׃G/oTvnfÆ 8o܈n6aI^m\xX83Cq.}Vvr!bfuyYEcsGp9o2d-okF,|`I/={ ns> Ȳ;pY޶zg/.{ı\HNY?+Yщ1VsZsR#}v'GG۳^љ]'1b`">uhHSLqM" ?{"M"4RuzBTv?0E7 D\of= =9k#齘 JWKC'r3H.oTܬ҃J Hy;# ]LuI"򨆍6\%wV2+'kAq|vg>gRzoE1k*'+);qm)_,Pm⼎SX| ^ ֦}\ I|ǥ>3*O^i8_愧2e_9z-D_eF %_n (;e|awZ!,;Sͣu^ǽh, d ^"CقA`lG@kg40/|d1я0<0hm2Y $LOʇF$ = Xkw1 (5bۇ:_;nffatY'+ 07[mk-y?0~\O~ާ~dУhg6iZd?u)ݧH ~ق0ȇ(:2^hfDh~DhӍ/n>TU6Kd0{o63#/EvpK}Ih7PFDSM/&ӄ"cE ]C OeR;Q79u$7ڲv&8f6n as&2fL&d䛌t NKg// l5 3^N^jedO4ړ7(l9Ô GdbKA73bF9eMm-mMrKn0=жdJ0Kp{܉\e/!Cdw&j  mϊmFٙG= zG~- v]NNi-&$[m-IYL=CMO~S̐,c:ClJofΡ<eS.4P~+dD&^>v'ͮ?y$7>hmINb ;VBMI;i3PZinH|<fh=-}PV_ܢ!z.7 0 6ha\eA{ͅi,]òSCbDԏQ_Uw& CԨXpIf[(AAM9bd^k¬$(z%)?;medkSxot]h%{3bڴK"2sڷ?r`FTn'Fiofj:5#ewKBFdlr3T&_ etxJR4?(dʑ2(TW2cg913u,UAۨ—ܹ"Q=:&"a 4R[mm,]L:Z{fu'6%]ϥéL24iw-0>>jJ)撓K|a7 ݭo}Ik!  Bs"B2Ū2kG֑ 8hAVO!M?_{TNXx>$̼VnGp&t9f.r$}Bw&ǟ6RшSpsN2\ίT} mЖDŽC`Ѳo_#iUݓ93D٣9T 5S[ZܤF 3t=' T4sz;= 07h=@m 4nבZ_D>nC hvL1kοm+%[e#5Ka‰w"/TSBOzAADh}(F4ZFtF(EQ>5~<8ݫLߕ]X6+0v-Yp';PԡU9Lۗ˼E_)2;Fh+h`Yx6f3=>ZhOgձ*dl^Wz="x#sCM2 8h@@teL8E䨍|AQЙ|~ۺ,kb9r>-o+IF6{Rʌ9WD~E6VfҞ\֞,dωB"BҠ$˳aqk^3H&^U7aؗ˔~m |Q>|`ۓ$QVbxOGp=KGPHpG6Ab"GAEA?&h|C;kVp9$!YiZ j`{H[7 :JWUc5.Kf|:.{5kp6 s?a7Edz89;Fbqmiyŋs :;uxZ5e@ Dn '(#x~[6Ƀ rn,qr:_7WY\B=GM,f:;dXAAwڤ>gE9ݧq4bd4,㱤ez<>mNKsk ;#k(kę3)n馩(܌k$ݽҋL@w}3*.m  OmYr)03\0wܠ!K0iUa9 )ki~ ##_` zFGyu5gh|b ?.DJ7]7s<>K׈ GVaKs9 㻵`5{8Ӆ av7V",ЭDz͙O8vG1Yl]FDocumDmUq|κAAYG8\g66ɃnX=s+\< хjF#7a^^[ظљ](@eU._Ar>1xk>_2GF8L:MydK@0N+x@XL`[|\XƑN βru`n:縯t?8n\KOۜ\a]'::kWXCZ~] ŕyH"!  ?kKfjj+koIViI#.Zh,)li,~v|N?:HV'"32Ec]j dJm}3}emuYSF8} _.aQ?3Y7(#cޟR-gb[;l}ݹ-K'[ƛ>r+w †3<)i~+i+kD勣S,,QJAA-<!LLz-d( meldLvnE;At-i-ԞL[* i~!9MC.2fŽlӻШ;8x4aل+rz|zXj&3fIqj2b $g0@{aGŜ=y0Mk7k Mxb"zUΊL IՏs5 ;Y}T󉉇o&a3Y/}w/JHnS[z.$nR.%tDbBBϨsPn_ oLBM AA~3j ,]'fZ)%7.m]ߙNģF뙴; okyty5|[wð=i.ƞ $4s+e sxo ьt|:L|b|bk Nru+oK1[L$ױ/U sLKPQftBwčI@ QsyFEfb3*灙uhI}U7AHm!q)dq-TԲ`9,]}'}miCۮLVT`#l[>oud. 5;B[͞[1LTݳQwhc_ 򘸧 i'D6Mwn Tc4xWń|nFrF Qyd Kqflz盐E62Tbʴ$]fZl NVV 9GUOGj5Zlt7Jz9К_LhSЍ1a4u:2_0tL&  #kiͅjfx'j%NYŃ"b{hhgծ"zưt s;?6u̎$gY/=8ɯgH!: ULn&'3tۗQd ]: 3eo.#Xʶv>M$3;nlVNn5Ұ?L~흶j,ڟWaֵU$b1W9|ͣa_Rh[ܰ]7Thjm$q/F0KS}=-=d>Pj8ʠրalu[;}ޓZz{0P, tofHH5șp/'HFF541j7cbI|L}Q__GK{f03i_&ьg&u7$=F[B  0SOam? #FFF%$a[P@Qc"+hF 1% $F tX>0F7<饦u bG>HF(륰q~f`i֣1H wkc`HOczmBR>ӎ Fd8bu6 #@;4FE 5c.ݾzF)磻QKϰaQ7Fy^==n@^J=HyuA=3Vwj)kπYF6_gW&u( Tb\*:G鮎ʼnuֲaK(eh{8⍫k֮sF]k_)6-{HHAzq&APz0^V[ D,,e8oa]wtUO\d:RCC M8lQY/~5vʺN4:+Og}^FGU2>[]\Yvr-'Ç;⺁m@RtN::54 uc[58(1պkz>ehAAYVP~8!+|a{X=,O#uӏYWV=(_ԵZGCm!sЦ丆u[amlq\E5‰7pX{:*4OChk pJ h+wN@*a:ܦ~_6ݧ }Ij6!{QW[ɍDDC:\xA[W>r46ֶz/&h\/n~N\SN]#(" $'溁 h+-wAA0ǨQn/u- mot`c^.\<ǩqY1koiogYx@5,^wlqwyO^%Z:0ver6>NlqBݨYH:{{ίo#2`s #k]{*Na4 H(;MNl2@hS!uv)jl;O" z8Nؾ$f`]<lX sXʋfmLK\x+ YVPn1[A,Ya3by̕qqq{$':hY6itW7bυr)@N8^ƍ߽4j$lzp;B[kW_ @WBWzW]\M]]eɜ;}Wqgچ>yӰjmS6s!_u?UyXXz'Vk&[PH-l@g   fnk^h]Qxyn!ˉaj:8{>^~{yq( #f3Wmv%^>iwϱu7;vw/>RFX~Jڍg$0Mܽwo||}ѱQT.Ç|BS4 m'ξbp̈$If6p6&z"ۢ @6ىƠQW@[- Ъ(Nkhjjah vڛ鋝xQKfFini@яoc֡ f̘m]]hf;Z, _`۳ ( \ ^¬G)iC3j[K u556Շ`cih`ҡnu_, hhY[G13_0jhlxۻϞ  'RmAEVPd A(:IVQN7^hzddoDV@f>'[Z&>Rll,҇mXlGFu Wdh>kPۀ~t NdX۶WYB2lYF1,`_O̘>b6YǼ&&\ku$ڱ13م6i3혶2ʮ1Kך#G}Ze!Gnp8ąISB@c1v2mvS2c NKFcPs2&OfڴiL>7vrp~kg \L: iL]q ;7eS5g/sNbƿ#=k`y [/`d;LSjtR3.C}Y+8z;.M~tpFZyx1Sj=;/ȋ&,@S/,1Zh lߴiS=;YK) 68ݝ0kJ.<)dĨ   B Ch_)N&uSdL&#fܗӺL4뼸p%eZɌYxB*s.L1*.VMgvMG]=)ԕes1ԑ8y)Y} |63,Y*6-q m:gF= ##g1gUanaL_ċLTY$uI/0-{7[oƭIwȼvvER[}˒]75AC}-3:2l]ΔΤTku5˔%[ t΄lbD;]/B&s1sVT239 3sCՈ   mm#=94kq}8r7J1-4?gou+ahkyHp'VEQp г&dY,fӗm&/3L.d`Hc9@]};w0жqB}2l IbVr:sob(UM ZZs|vlqrfD1"!+*!Gh3c6h\Xt߇ٿ"ɶ:<V,Q&ғƞ"|<?Z fNc .IcT?"#  $i-a ^yṼdTc`yl 9^)ȴv,߰p a)ԃ2=%<}| '9Scϛ( B0pŏfW5Vo7ݡmú\GrO7H HC<ɜqJ#&wtpiB̺~2{r̝cN*ĢX>6-F2mtlIL|υzb1۟魤dR72u|"FϠYl?]nn,111ܾsq<   B m(W¨O,c<8ԉvDܩM+ >b8ж}<}R(M>I Ʉ"Q8Kī6JnZX6l^eWKxR8^4‰@n*?"v<CWr=łh:3&˘{> `i#g)KhlL\m m2 m~?ڞђse%;ٴGR$gMgOQŵ}!A߇!WU:i.=(4#f~m6T Zڀ|<bAEfV:/d7,BFFMe%yGZBf[Fb^iQ쾝gE1[{@M;;۩3 >aTu*3aΜL:=(j@x4ɴT{*M931tumٹ)vf;mr:òL]4ꜻ̜#k2u^v2Yشv vl?o7kG>Ϡ2m޲ܭq,X=KV  ~;2 L2:Y%g0aϗsN#&lLrp9WuQzVV0ŭ׍'Upjrk.mYy!LAB٪.]_"n^tUP>wUlPNеjF豎 0~F'ud mz3]xߧfYi=g-26YBj jfxOg! R_MEY%u PZ**hTw1:6FWK=-P$;ikk{H,Y餾j:3YPattt0f$=] Z_[j lk6RUSCcsZ݇=d6N[{: d*jfL;@ͻ3`2XdQ>[r.ƌB頾\ds/  /US}Ӹ]6@LTޖͩ%oKcj_/2w=?XNKY "@NJ.iKzT!Q^LߗǕ̴^{><*6+"'3?_+رL&,4w6٭.fFHW3<9+oØdEӎL؛ϥ+ELNJ[)i ).0;" "SYmz{ ޽by>y|ޏYw}E"Uz~sA?<12JXx_׻)dlja +XvfmO7c.WC[fJӖ\_mϢOd:TfәQSMAYMcuL2xz9ɺTmX$# gtRW˙syud,~ݘV,RwʮdB`uZLb5rN%ۄl\o!fMBSc#V܊V'!G"h{S{FQwbb6Qq.F AA-Gnlʳ*{cڝFtn-!yUb `ծlruJ߄ѯnnyڲ/r] #,H!e#$G=d*;-dWՃD"~Y@ma+){98S׮"nHK|C|J6m:l$-y*޼yKvQ%:YF]uG"ۘf,g"/-)9fN|WډwuEHZB<)*&l$3u*޾yCVY=FYOMK8qA6cf,#d&&!Zum  " D.i&8ȲgQyLO'f1U֠ezMRYs8x1ԩUH`bShVOgJ+@sg* RI'WOds::7L7tbޖ"{JEJ1nPIX}InO.i=h[N*?lڊ'"B[ >ή: CWq66q?؟MkVp6.Q oa;ٺa ֆ$>_-8>8=ݍڍJ'JITOck _`+۶zfCY`69;6g=.cddA JB<]Ij襯>u+pw=Os88{+Hԁ̙: G}5r0‚69m(n&h&Dg ;ߋs`1ɵ?l@)΋NWv ivf7zȯE4a@KB F Zt2 d6b2\Om#NIVhI"(dyݤ6iAnYWq?n1(A!esͺwd&AZ2"y;rU+9iv_ƾ "i,"#+׹TÈv;ǘ9kAnpYq=q;N* J;="Qmݹ_2iIZ/>>E5yqt++=w|hcoJpp {d:2jxun n:8PO;n ,:}2^Yj1-uܰeX2k1fXɃ)jZM}}= ?&h&hףVCIYz,?^6~Gݗ54O} +_c},ִ]gcd[Ou>݃q|?A6ms-akfນ<=6~/wc#p΀`i.KhՍ\|Eov]+EPdLHyk?크Y'R.|Ĥr÷s@N?N6 KV8qI"/ogц`22p^oH~xp_팥8Ghv$u(LwU*&|AΝ98> Ȓ'WcmR$y}{9,wc=z>U 7v <=#m  ' B`60lDl2b00M_Td ]dmf@X$b0YǵL&[MY6ք,+i鋦\ۚf,Yrf3$ => Ei3ZCm(Ȓg6ha M$ۻa1:{Q~dIO[G&?ζt $ m]C(d@c'IQ3Ke҆YQ #44ZZZi.!E e)F{: ] :M/}Z~dΆ.Ftf> iD_'}}4vTE5u֦ᣱ^HNhF6' 1-=Cgw5h1 &d^>~ذn-^~k_j̈ƒr~,C'px$:?YBتlnOX %4f$nOzzՕ)h+cٲy3T#2 s~IP2Cy1 Y;~ _uj.3a? dNK"sW;xՎEJ`zh62#27`fK_0p~BU~,)mma21ӚxvkӎaT橈OZ5Obj~!5w:4 P_fx|_ٝ_d9La!@x +]u F:;MN )-Q6"oӨnj:WYx KWxR IIyuKy7n%[2r1 D|b2e=LKM9*yQ`߂*9#CjN*2yJc/YHMcadd _IdXGom,wDjen 4wf k(o#mo3Ut7V*[ŵmݿ빻^@1HwFۧsWvQ5c5كE2#Oy2>#FձQvNfDkױ| f1Ʊ @,3 ãh¨N4utzFgWl-25`'oV-|_}p[&Lp U {ڄwd8ލ&;4=X B,mc ;3D'.NI+4TQɺD {3DJy-4spNOK?[G*XXa$hYvV'EEs^ <@umӹvV_arKq<Ǭh!j*Gm1W5j)D=@4BW0k",N P!72GH6fimSavWgz141e&{5{xbM/2"W%-]9TRǺ~R`{0gMNs]2dS0F: ш ZjhbK,fIXWO;vN@!^NzEVak[9;c|JĪhj!FG/NOfqѣ~fCM_;<:B<0#`w4}MlcKA}}[T0] 1mOp02muXe#֓cD}XG=N!Dq6u 3# wktchh$EϾRy?bS5zW ^,=:Q<sC+"cC04هx8cF8.?A5Ntl+/#Nklpe~a0˯^GaLR s,| jej#!X`=; 0~vZb`Ⴛ-_}f)gci#.q9쭴ho~NX1}%! r=u9.q9_M]tF< 5/-0%$#hD@C='ҫjOo/ n𤠖z,L\X?M[q%w ݄9! 'boܲrVtO;SLlݰjG$7,ۄX O~!-^`o/AB<3.^T/LTd7Iody[[Aoa8.:K+xHn߰9V0tN?S|ڧX12T; PPXg1>AxYIJXNo^8悾#1qW5ۛ`?okʨXIzxchMIdKmZbhO`?f8T<im+ۂݥI'D VfSjo8y:pCMg9EXr#ہ[|=e+M0NCi~_)$ENFch`̓eV:bm}&/s(#A O_m `Vrp >z ZRDZdk"~ a`#IP1L[į29ܗ ;`OBzLcu_ǎw)OOfeaelH4!Zw+OvQA1+vN/DK ţ. lF7C0št?7b5FDHx5qXf> j^Gg۱h%Zo+o[۴:~Bx4bd#p+v+6}`okfv/"Z5Hf7|D_dDQd1"n˨y׭,.P=$\1M'jukJGpK @Fuvk)Bԝix݌L܌ eb|1cK,Ak /Ƨ{pչIN>OɜCq0R1-̾{'?P+bo73CS[bU/bb[s ۡ*ҿM]aϙews`Gch, {8W9XĘ-ZƗ p7"tҐcrbRPUrԨXx!S\Յ19C ڊ~]o}+46%@ŠPo7lo螌PiaJTQ2X36,imO0m;ՈC{4g~MI<㝹Ddb$Ɨ޻mMs(&6t/jNݢȭe%6.PϏXoڙ#Q#os~ adH˴Y:ɽFqrclp/g? ;=we'=ʀ  D:ZzNd}ݕ}'+ҫ?8+=#.s8HtdKLaǛqw\ eq/bkp8_,$MV5< #1t w'/yV;ʁ8JQio ^<,Ld)y;k޴ƿW]|1xV- U>fM*%2 P0>܈CيTl`ȿC;}pM-~֖aHh*3SqO\Z)aahI+Kk8?e \˜ge{0&~LaWrH3h¯H.ɯ: R,pQ=|)\ʴ] ¦m=+vLbw}'fdQAXiNR8RџwFIKN$0$+k8$#֔Z>g -Ӧc}n2yXx5INqqtaa}ެՍr M#ܔp0=PQw8i~k8~%~Vt!>^/rG+Ԗf߸IKX~kBe*1yVd@Mlb }}Rb5^'ˊON0Jabe%D "'tA+.?R+˙YG4%akeH`R);2%ZP) s'QU2vv lmgL 7ylDhqjF\L,ԅZJC#5)?.fhH+;*giӆtpWKZw%ǕJ*2H.hCyIya&1x9 kAغD!QCB};x{P^/x$ WO\jo#چM ٮ&ɍqf/ ;}s6Ob|+\@MQ!H2qIT*E;E gH(ce#?|y9.cc$&𶰣z7{FjL1AsneƘrѵq̴uG,ψ ΘkNtizm%΁/슩1k@ ~XNi$^:?D@8ϒX@e7/)}J|iy?y/KIoqswA #[!ȿfրÝtj7\Rϟ>diy6+;H)iG?eO1#7ɻ˂EjĴx6xNbވuBAzutAdkӜ$> RHɌqN)y%"Fcڈ{F3mp~V́Zl Up;}>l'kր4>-wR_d9NV6ZXP-81NvܙVv$P3JlM}S%051ݓ~|mmѷ!~ۄ[ TK 7x5ږj7񤱻 S0isFF7<78xr6wtdZ SܲtftGT?9eڬX&F{YL-,Q`gX9Ǔo76gX7w1yfM; $Aoki'v>(F˴-gNn]/s!B299G"5썽>3m=x:0-D܇p2_32 xK0L5x%ntKѮf&&Y(fqԔ?cSLNNRSDZGr%ώ>T9nʔ0oB;sqrfSJc ϥ!'tY( !u5(k=!ߑ_hL#V=C+4P =P^Bhyc#ꘚBfnfnbꂧX6u+7"CX9R< b8l?,Ƃv.61cj먩<9 ᑕO~gTt'^V^tcm@PZ9sKT9y8<`PRc:̟=ɊbiFe[=t0SS/L!}{PmlK[dA! &[0%U-SMigU+#hip~* pW|H6p24!E^xziQ4U8&U m_'"Dɉ2_2%ݡ4չۮ%.Zfa?-9uԟm7ǧe3ygJ8g|ZIhCPiR s'=Sܴk&eF(ΟbarV3[K39}!mO Ю́Y۹߰@_}ael?Ew!Xz6Wro'h!qCs) j߷&hN!S6A peڮpihHSgtwHD>}y?Tޥ;^%/ki|zT2Vǭ fgyK7 hcF/7nUDqKN1Ãw3OJ珕?ݼIF-Fl[!&%6kr`zKē[ 5>V7?T3٘!Lm+P*?nZi ,fkF/].g /_SBPkb_hH޻R8Hs6INyi.e=34azW3 :L[s~qYv.ysy*(d[< wM hxV:gT|xGAA!ͽ}^}t_>)y~ܾiÛJwt9M?ZRUP[|fM/CӋ/ѵD No05x?%c;>$aɎi|?ar'nˮ ¹c6r=oz`m883m9FdrPέU rx9ØܾEtz{ ܳSh<+j#. X^uӅ;# =\q eY8}Mh֖J-} qeFq`F÷hxtEfEsC+z&W% iپv͊Jd*.AIɃ>,`k8sP{8[ѿyr&QtnVjVz$QO WG;WS91TfCcG'90ҿPz?27ݤ}0sY\ ۶.+J9``DUs^F02/kjj>*ЂP;Q7>"7[fBxb'I͉zGPZZ@#5|W6^ԏА{YgMLݗgqonz5e i~-#{T4S=GHfJѹͽT~w68h,|QnBȝf>B%TfBMz!va[( 9$  l؅ pGsu.z"HW}mdK)z_H?.F%QSUFlߤ}b?l2~a,t^[Ġ^62OVT-!фuywEU}etEɪd'wK6sӁfʞ ưNGb ۑwR&}2LF. 0 CoGt7а4oyuxäqFqMdfo<깝: m-O) h5T\Kk/4.\md`~Q1ϝV heŢ5m篁]N"X?$%?~j_X`C$y;w;M*!~!~$w< y2ΓMxD$qwPNE6'JrRسZ<*\aD@H}L X8U2wk`١ >L`E G~Z d<'-W^f]%%3U-$#yڼTƒ~"(GR1]Jf3y9#fFtTOQO5E5 &88YLr…911D9*F/|>/DO]*`wᄆŐG_vFyS^t0Q󗅜A-ݧIKC: *p"тlg(h]Tibv9D)h,.)ǑtN"0!L?]At k) LʸɎw<*,%ªif{,G ͐$=Y"%U!=0"5}-SfN؟nntaD'bV,G?K'IX)&M؝Eb$aB횾)ĸQ1jBBCC &(SCNP&ҩC.Co$o+x Cb Sp9uS~4aʨf<p vR:˝eUpr ˔ab*I^*j^eOR3.X}B3ؔszq͘diLdǃ? | JlsC -^18}X zGHѻ% +*5i2F>j@ARMK(xj۸R?W2mW2mdrV";"J9=MJua"+4 *ST*C)\˵)2!pMi|B2JF.;=P.-ApJ $`T9TJO P/T_e4_  wguKvZsO6)j3uӬ<4%G ]Z*^aeaM~2<3}\fm͟WLGU?9ȕj8[cej:]xMYg - _L:T._^-muz*LÅT4ץg53SEm!.'\m^ZkK8~d?YCDv2Tڲl LZNSg27sI*ۺ1kUrP =͜8x>T|N+Y'8r,[SW1[]x<`Vg`CM!B)m 6sv4wvv2tJ֕D*Ì9@с;Tʯ<\{Klo)ر\̹$NsI 8}p?ΦBrr2%Wfp1.\L$^fE֥M!B)mAkk466iVnchh_aSA1lQAyadw) 1*~-u:z fv'_[:W(XnTŃmޞz}]>TnsRڄB!Ҧ*z>6h (|0TT5q@J[ArŦ[ə23=vB߇xߟFIJ[_DiR;7Ve|ci-;K+xxmfn6!B'|]Sckc!l(BHi⟳xhUkk۴k~2^v/J7B09)mN 6/1fx|FJ+Jy`u=.iC3_^ڤEƷrx?\=RڄB|-<_ FQ}\`<\3\RϏS˫1lJ[ġ=KOwkf9U󾶸}/-w-qV5M!ϫ (Tj~߮*7hy&vg nKm/,+7늙h`"U (=R^]<Õ YVk_PAqiBJb7rtȪ].m6_ޅ=̕Oi_ -uvQ9ؽ-Z _Ƿ(vr䂎-es雚+݋yb_'SֲHiBqs*(~p(Wq!åt(('Up{UT%V/~Z+E_Qrg~'.!hPs?!ރi! ˥%˫3VKV\j klTiF?#`3W}/PbC{xGhk>?pb~>o7P}U|.t;B Z@Q=,.{cjgi=WU!~F;07<8l[Y#%B)maNn g̛4rXzx~K}vN{GVY̋$X9YaaE|y;~۞/VQ3 \^hU?JO5?Õv'A}Jxlu+;KZaDJ;sts:`[|2?&opeѪo>wp`6h1iֹ%Z-_^VOր X #5۩4yiӮ)kyxF7m><[ev0L# ΫtG+;9@AN/&o'7*^O4h4q;#s@Zn;7sgdٍJ{|EەB! V20J;Fȏd΢8KWƅ$"-P[1¦rm )Ҷ8WK<9f: 99՘#KβJU|j޻mۍ|$'#ǯz(BQTwz^͗PaFmT:PUзv*TK^<^7QI(T bfΦ:>*]RwӅYc0)!ES[CŇ&€ɥ­@wF|*yUIu58o1ea㡺̐;T89]ũ];sC x)v%B)m5)I~,*NL^KVifEsbs$q{qbv%Pt{yYig/R\ފ۞i[^·٨JFOq9T UVV.uRip w[RoOqrB+)0)`Y`AZ[,ƞlLN;p6~OB pT+m(3ܽzi[67-} Y-d^fX+49HMiHWn_X[ñ5U]\4:y*\REM鰳h/o{3r>N| vP9֪:=b~B!Mgԗy1Lƴ)b,\}1N sX}Gmw-&3Twp,$+N2JZz* X~V/;;Lq/ VRkjU=yΖ6o3K*yub;q^Z ]Cbf;S69w-M!+m}%T#8N-voАγ/!K{~##;s&0e> vq/,|"P}j.=( MTǣ~}O>_aǴ7Zq1qك*>yh7Лo x]Wf_T'vt]o[Jm3,** Я=2k<>L8Ǐ˫`s!TڎUq6CU[%PU Ue-˖<ЯYUtM]<-?3B!MiS7btp!+=&~@݅mKbf`ymaU1`Q__GCc+f*_}mASGhk sߏgiT~÷d*vث4"s#*'VP5[ONl&?y||n!?N'Zq)Ssxr}9oSrټy|R;竱42{ :ŦL5rAϴ{+2OGh+ P (Q9} 7&3}ul^X̂+y1*D,\;|b^?ggqOD&?_W#<&oh6G6WݯWVHpZ?ca6!Bﴩ~>tPG͍sEѢ"uJWY|t_Ȭ|>2=GcZ|lfKuC'fhvG50 |x1tNh>t-9ߝ3~uC{ =s~Qޣ](RڄB!nv )mC3O ϟݴkn7y{J\!wE !B)mBJ-&DwX_-ΛM!P%U/S&ĭ/mh<oLъswiM!Pm$fjVa:盰:@q<͔BJekoڲSi2Oy9ac9~EfB!~6'v;?W33JU268̟r۝!M[\[|56c3k?Ggf9Tq3 !#͠WRvf7ij&(bͥ.USyrY)j 7WTRd!?}'jY,)"!݄ٱ--fb|%h"6!W Y||FV$nI$ڳ,CF|&BJ]beZ3S&VcVÊlrMny!*G,IBHi1cKRv31pz>Bow7}V;* {әJ Mm5uN_$ѫc Tz;ۨ#nX3+=g< hkw{F 14[yF'/FP$`d޺{]QڄB!|> vF|*52UauBNPs01`sc ]00pQ~39Z,䥶p\j>Ҧ=~"/QUEul3/ņy\40Pil`MDKG3}k6!n+>j.fqL6EqQ];c'p6m"xxyls.UUQs9<NN2w3qMx VQlr2P˚9aLcFc!9ӧs$4^cU4l)mB!THKT]]Rr9eR^d;:^!:"Ǽ SXr9Kcsbk |<=$,9g D²9No _z}*hom(~::;6&F2i[H?#^}<"XL4,7-iYSߠ\=EDN_s0{4&3la3?ct]Ys&Bi6VN/)}< BIi$bX""?? l[>ƯϾ|L^xq6>TUb< k\*4КuW&Pߘs/&M@ޥD>GU^/_!h*c#T.7e ,:@gU\s{^Aݼ2k6e {ϕ7`ïOf!M!B7m}lS;w63gLc򌹜,jS}!&Cbw$ѡ"v*=Tfb뱫xE;MfOy̛Y9a! xclD2+l;3@M:!km`STf gvDNfx+X4uzSL1.c%+6bxرdS~6Yӧ1pN-]|g\23>ާ$;!D7$1!!|oob;yOіSTXZWP|mA6AAA?\;IJIo)O* OIg$U)U d)O./]#Fʢ*lPA!IqMI2*V.ǙHJfdēIdE.DI$Sߞ{[7͐J ɷWTAA A6AAAMMAADhAAA6AAAMMUd $YJ*' UET>EJ%PTndRDIrI **(I!LYsUɓH$QI%IJ|&w/=NI>/%"μx,N^Vx4L(8B@$ r6I8 @zC>DAɜ-SSR:5 $vDZ-!^gK#3Ft8.QFr^]>D1$dPrcrayE̔7٤e'*J5Q5Css r.k.5(o$H z*q<Ζ[>4Z:c4|B&T,:1 zJޔkn~*9_EbkyTrꚭd~oƉu$JLR]QJL xAح~So|r/]/8K֧|X/,-sܡfxVo&ЍA6A\⌆O~ϟtt5Wa4ocL!1{z~+ 71LV{-`wƉs1D.iAod%HTb;?ôƠ}xOL,F[;Y5e2:D^o AOu ٝcx>%SH}V9Y2ˮ߮Y6BIfqYDK'&#e|f߸9=eJvRx|,l'\6R<)i2n f99#&ЖTYEGF395OwK5M5lfpȳELzpS\Gkim/|0ۈazwɹZ*SZm]g :C&=e5@ P*sm||%F.႑.# &]hoR6+JZ_71=MQICv7-et<(mciP8Υ,etL9~xX7Dy FjkhдZsX4/i-3f`g!Fk4f|N CCu&|Z~A(uEPLR[a zA_ZhOKُ)Ȅqh'V1V2!J^ډu}ϙ:nhӼֶ kx:UOɽ)']zĂTwΆזk\KYE^WOpl)ob|Cwcwz&L>2-ZPh$Ho mrѺ oܮ"'lRa4[[h8TFwbPfe{0іͽV^]L8x%(逑Қ\#tj{/016P1{ ׿B6ǿz$-sF<|aK駏q.m}o0bi1hAP[?4IpVblǿ'?.r?sîUxbcn:?]&O~⦸/8ŋYZk~6N_]dw>de;~hkV{tMv2N4lʫ+ "Ei,װqv&o, =vI)`/HyC I匚gTw0?;F9]*v, ɈlHR{sCTk/CTT`_۠sH%:(m')IDy؊w_8byDs2e=3{ llOS]ZvJB ORUT?6by )Ʒɜg/gUrl㌍ssmֈ@'ߣӱL*&]%[oC[oHlqi%xfs6p|DiM+ I"l@'MD>vwYs]5U!qI=U*_VF\T>JrHiǔ#xQtGQ"w/P4KvZuTUP4õԙj-3. ,9њjhX&4ȫ0gmɧ=ē 9&4@U"1QEHlxnK,cc-m1n)s>a`rv^K^v9\FOi:ulqj_a cmD*/nс$kڙ5>p1;{fvȟOS\@+m0Lo?ߣ'h{FtP]F^ղ ҧ'77\-zy^7¾ᵵ U.r*w ޏ|{LN&}|w~ls~ṏhF&{[?pкc9]B>0k]y&+a^g?]hk.#2+˛kd*Go&xd=CQq?^c.nYA6A#4ulUA7Oe6%{xfO oCs,c|vc1R,-bd3 @<kaXfPyYiw2؊ȅ{8wK]ɧrM\`(}ƒO2r@:C]k=|a>^(95Sy#y!K dkY ml+DnޔS%C(Q7͐Jlbq*M2@QͥnBaaѺ88ȀY_C:+:jM m؁vxs :Sh_>>X,s'VߌB,DF0ߏsΫ:$#~Zfd/|DOGqd6@}11j m.gx9ĵxKFh_O+ і<~U( U~g.«׵M̌nidW MӷxVBV5KVB1ν< z+X*%4yhm}T.3TDq!z\}?k}>;/?fb;-oq 5O8A:9f[ FoQ^aX2ǵI6 9m?ah;^IFxke`C>-n5񴪍ڧ<duaOEv}dYmeE+ L/62@uTj,Lo IOiewc} J*HrE>o J']0XfOC\ŔGZ/}Hs S:ÀHC<|:(/G2 sVt|.d0 mkehs///s/Ǟ,rE| #e+)~=S@VOE$298F)3%sG_0fYcc_3m(ցadvmֲvzA,EFHXNAca\d&-:]mF %ǔ!0hsIpVGu;oEHS.p [ ?rKewsy,V/I,ng;hжٍҦǾŸms3Z4,.8'[I&)gv@NG [>:ZipIHRY03J*/33\ȳ0jb824Ƭk)2-̣.0t5J[ggnjhiҷc\pg˨Cv2r906[+Cq(\I3l'q' P v暜 cjǸ{aP4aY8Afpw oU^ۆoUxxCFuz򮓭iZ&9OQoIT;MM:tZMbhh58J(D7&iдyʵ%՝D# 9)+tv5TUbmʂƶ>' m46MS~%[md/PQ Zihha7*q0<ȝ#6?[n-Bk4j[Z7xM7(a ;tK۵hioiǿ1 viVh4tG&,LL._@  ߰*e/VF I^R;Y?W-Y"O~1Jm6nP$]>8Z;$Gc`-ۨ=vhPwg/k/y3.>v^n!\oRor[:\!E7dEyOT߹ Untyw\zɲ~]Ufa~r{jawE._XkJA] smIShJY>.{ f\ͽ^]"7yOGJVN2}td 7<|Q77BAgX?r8HQ_1y|*[9 &QT 0@PI\y DvR9lw&ާltƩc"q 3l,f{~C) `ڧbt*>4*_7mm $#{le찶q{P9?d:qxp _BˢAJEVpIU HWdE-Wsrz/UVTS8v58%]%&&|*gv'_>  B B   ЖIR,L1AAAB(`ii{ǹCpbscP7$J,&K]m..'xl z rU]$@β2G{ >/#g =N:j \N6h4CI3jluoq.=gx47wiYgܳ|@o0ޑ`ًN#ki^okhh6ɽ,dimh;XI؜i׷r젺! 0c-[x=i(KA:qq9G72lɉ{ ˅=N,/B  %BrMQc6CO7[,lL:(,s0f:[; AJh2F:#un",I[$)YQXngaejn7":;62^$˵ 9vL?RKOnC 449I + R6Ecτ E*{Q>im7MY46[%2)|cvHL4 Iʲ$-l2t݂i~*Dgt20xNiL.G6%!&  |v||xmg|VPfff "&̣FGvk|/ٳ[g X|((l.Xi6[DjKVk#X,8=N9w^s-}Eiv<}{vN' :UoiEP;"U'&epw34‡$iq!<GTWWSUUw]#col6?ߥOyy}ԳyWwm((ϾLbv۷S=FG9y)jQG^Vv>BC$_*jo0o-#RUagYL!/EMۊR >v8N稌τ!n)ۖSׁ?6eI{Ot5>>Sʌn !\7o&oU3aiccc|///VXX7Ȓ1-jp27^>y};x%Z&?ugyxU y[9Xryg8h~*nsf j7sm/*6FqYN t~m'P s m9#_[/ﳳp+yy]£GwI6|_h}oy^=>OP:Oa~^n1z0pegټi^hM%;11! "=g!憛8q*{^ PǬFXj>.ď@U}oAZriPw$gL8L]JDO&16JćA\jEq X{$DGq'9)qDĭ}_{Jʡ=yxxrqɿvhӜJg/;,*N5GgrOkG[@ #8[aew*X~~ٔox^f)4*S.L;q &gwf쬬wtS3+v#7Y}T&zْ) gqw'G,͌q/e\^;̌pt^>jejou2Ќ-mbo1f$ң#/ kgܝqKŽ;H(J۾}p-\..JBkko[[Sq|{*˖,#<} LN͠Piѫe v1!%yani+v6XXQ|=0ͥx qsu'{[ Vy4r=E[Z"n䞨F'߱.t\\pvt3s63 m 7(>Qye&y7_˄x;cieVfXڄq誻Av8;/ kl߄6 *zd(?dy*e+Xk'?`vk/`].\\\152%4e#ƴj&k'O\]\pK4ɛy~8&biaG>傹hIgӛ/^ȯG~ϛgWhjM"3x:ķ^&$&o c1b l<9hǓ8˴рUӽʋA$Rh[dH*4Fa69 :UTO^mC QŞw=DE(}h9Mܒg$+54~α15$dqɿzhAew8]L Yڄʹ"KWk׮RZZʵ+X뺓$h%^YGk WȚ\5OUWQ*S<ϫ|jC`N;E͝2j[xq&CS'\/لIE(jhbC ٽ{vÞ`eA^lĐ_-5.CP%p ʄ|6?Ds0FCXax^HG9މR)cW T*2l/cVGoؿ6Õ+XnHxWpZgYqt3#a76AVC`)8S1\?ÒU<|1":m6|x8ܦ0B#]KRa_6~Vd7ۂ8$6W3tZU C'!0Gɸwpѡ>;G\XF[- l#IMŪLNEQ196L[Kar5i'x^+ qv<ũHy)kVj5:>:J@v0xzeKSiDXefNN/PkX^xD캭 kb6*!Z}HzD.&pbbqwu% l3C( Ϗ}pvbmO | m|-&I$"&&&8q{p^`o #}J'&jo|a&VW(fnȆm|K&/ uu| m1[OC>Nc/<=`[w⋏-eqh3sfO㵊7w傝 N.x ? 1ʑ ϥtK".\&Ce^/綸b-3*گL,@S%3JƁ- @om9x-zzhۗ-F^c^q11% 'o=@+Jq>A~H2f4zЌ[+w@2Tp&A mhGE"t 3z43T]9Hd~~GgpA;Cف4\lmEܼ9%Α3Ɍ? Nb\+M`vYjwph$o@- +'wva~KIY r="A=˳k D&p;z$?\h<," t;^8Fa?T|IA@\#ɅG8ʙϩ9׎HjtegfqN im[/!dO g0ɐhڷMl-05_zI t;4 0"2m;&^CNT;Ux.qlhmmC Wu &xmz%wS5\r8(''qF 7i+*_չ0fwR}&42;D%5}h3wh-1޾~4}o(?wPWVy{~,ɐ ˗/322"{mpǼNu!{S$gǥ^fɽ@ˉ1K}BWv;nʉ݀c)>,YܩkUz;#3"Xġ 8&x%pTEC;#1#,>u⃅98 ̰'4N> =T]d_#Z,Kaݺt8aaW-<8W,,*܍011/kC3O`z))^1?%;ˆ|akbH>z>JV0\} 3}4ZFPF@]t.C]J8bppvBDk`ejU1r <.܂"寓7=,5<$r5.5"o'ъ荇izG'7Slq% u/ԇ{د"[2C+ޠgt0'R:˳Gi[@j9ϑJ=mNy=DVIRB A0Je;}מ!-=?chEՠj*/]v6z`BBqxj)oJWfk1OoBYe@LG(Rpč tXDxv,[S~:zzwa{2w",AruD^ =0t&(++y Vy`wKl  %6& qp5acax%3c#v9;ˋBNLk&۟j; $$celJ`:j]l07"0&@\YlkB61*;b#_ぅF>>vjt\&Wv$%%!7bFݿɁYx9bb BNhsIgB;Bv'A }>m~};~ mH{3/_]+$) ScRKЖ튍{ MZ'21uڈ%K,9=zQۍlflggz\mVaĐNW?`!NK (= t<>%\@b~8=~5+ߺΞ9ycdΑIFځ|}OGz5KfO6mVVJ~RhHZ MOؘN\\q%13_8rIZB"I8A ~΋v~m~={rX7jyWuN*wںxʓ!t"@bbI)U4z): 1`1a%*fL# s%O@ٳ&znGHRbY۹!E&kҼd|@'i1?nR_(=P H$߽ET!u#7xbccvE4sT^=BJR q%eP|1rI>6}o!c @̖89ҁz-;6䓞OmVRwSǥx9jݗsukSVLXDZؕƽ^חBFf_Kzj* x7AIݽS$fk4C\3C1_ U<}_ʛF9!{Pch 7`am_H'DaoeJ7BG|zLW4#$<Og1x^1A$&Ƌ_دЪ{RB[Shq[-7XĘQA#c#530/Lm9Z52ŏ /ne H>1^lcjJ>HC m_3;;BDR8 h5ӄ*VVAZ%޼JAvo!-jFu1T}N-q=55ZRB4kTJZoWcVktn#O"_׉}QT07;+`q?Vm?_ӪQit|KNKWt|*_[ԕv|ݿZ-}7AeJ8cJFfL>Ϳo]!E}~qY<6~}t"e#A܃$n_J!??v;K*Wz:_熉!aefT[lO!=#E~d:znoҖ0Lb˗E\+s#[{s#]J0G1I8 @O/aY_؃F渺bdh`Ghå,_ ]!ΎXZZa茍* ,}!Zڟ뇙ތUF8`na7+b-6fo ffΣ @ObXmlfƘrE;?0)I$RhH$D2\C;ᯙTcGzcsZ5=׮9OpUN>%p;kk}ƒ(XiY)[б@6ȝ{izF:v ]44Q..oƺNC ,oK9SوN}{ A߯?A=wͧHFž8zjjJ*RXA[VT"A2yrD mMRh 266&H+ChDžKE*R=5H<(M4crZ=ATKE*R?oYڨH'hbC.n>oDjwn?#fikAC"&H$D""6E/[RC=[唞O=gxPPMTID mD"H/_".qJmh=˝5ZGvrhY. &2ܽWѫguv &$s;tEmAvF"k)F[8c#11dlû!p`6Ң"s! d$Jɭ(壔bafJJA1Jcm|,1 $D"d2sssRQJohC3H\غ*x҇F}e%8P^K8&S^Qͥ&nO gSu|H+g=3-/"GO5)e՝Θ{w>lгqa=^H'/Wq~MOK~РPFuH&H$D"63<[FE`Ǎ<;cx*sysF_^%o^&/'dB09%/hhN*pri=eC Vx64bRkhTT$.t5q1tko͞=\*KWLHM"H$D"BLc?AʏrgE@c^Ecibؼi{wf> Od۽h}(O%åT&?n5Cn)P^"[Q!1;&>: E5wJLRZ#6D"H$D m #pV@l\S~<ѣPP~4!19qϤ[d/1P !ǖr@>%w+xu(&*0N%XrR^PLq эI9u\x9dI]˦ۑBD"H$D"6A7Kmx;ّh({MgM Αt)\?UTىYDhať.eanBR:"} \{)%{~A^dKBP zz0*#؞gX")kMBU -d8ᑼ+ #X9-IM"H$D"ӦUHmu u/hD'hC%glZV9KˇwlxK[_1<9p2UG9- (AG47O<zFx%u oFi`FcnoꩯooKRhH$D"H&zt:_'/A@f{~a[AWű8W /ݷ'N/E,Rhӫflj?NW2#:ÌOLE"QB^\RĿid3 19+(*lөQ|hąEQe}&:Mz~APh]sZj5h:aVF71>4ČD"H-}ZHMKsE֦'LDL"O1<6l,E-iyzu)Dx3xz w^!;CM $awn'O oV3HI$$8D6ߤoFgZ0e!62Cgnq@iP)`I\xюcon!AGa+7z>9pQfa'65Lwq !ֿ~ADm> 'D"6F mwĺcáSgՄB@ 3jjZfgG6@@@!!֙"vifd|ϋ2`K->zk#1Y QAHR 5ziRdʅVc*5Jټs (sLMM1=3: ZJ"ٖrMPϽe2"^~&'rI=XҘ[б@r4Δ,"y/cU`hHUS .Ffzf/TrbESn̍x96F'𕞛zFkC闕$n?JeXOl(}&naWM,D^V<6K1In<* mRj`<2bN_xj,8j@(sy4Jdr:N#*yj9q9rj3y\B@4SS+x+ʜR HH&BK\-wimfǨ?ABjkedDij9?5vغPn$Eql{gw{{;Q?oX:q>.`.rv MHŃo켜؃eȁֺs.6ʋXܚC˸td+>xLR;L"@V/m&)*exgVZBDŽo5k/\ϥ\/|}p 1`lH/ʋ4`[\=s WQ~|WSx q3 7$`fhiRnVM;1n{vjn˒6Aý=,YC|ӨQ(h2G?^t'1͌wJP lwuos"/3r]u6 wak3'opI)+W+Oc>z_ngr+R|Gw ͷPNq(u&E"}a\(}}Lό1=o}q&C/jvaf:UrzUb~:<FGFϣ031\B 6ZP_+w5Ɔ&D,_nLL:2Sp2w'Ue9=E֢[ן [k?>5z>%'V vW 4K6oH'sk7LNaO{C*N6-r=O܁ZgnسԸ V.3pY-Je3+M8\nm.ˈ?Th{qmx˹X[J|zNV3%3LG=?8h5`袋 v~q؜-e|ma{%8najFh+[H 9{)IXX|;7cgnW`)(d^YY^4i}5?"ٌ+n%"ȕ;4>*c|‚1_MCB_~Yc|17̻k&"=0tBm{>6Y.uBHZ9:}x0֢a { scǼJC߇r^Ewh5JСzZƨ|{v |6#8 ONД ԜL'08$*K6QrbD÷\/L%4П _3R5p;%uz!sHs88/ CXǃ:.3:vaO>µL_IMGB[D&qU8mH6{l}ٿ'+l,Ɂ)غE|0z3X,!D,|Kt#lI_mo+7%tR9o` N!Y=; ؐ͆1>yX`{ZD.׉e;::r!8:軉w+|73߬]< goJ}R sp<t m*N$m"b㵹xʼnymgr^(3 5O"s0EL29>J%o+y8znq{[zF[]'/nn'qCobtUhgOe/n``t&GUK޼@h}46󉖞F_=Ord& X_|U$9Z韔zzikDGinmetNR__O4VFwG764 D_yڃJ"a.n%g]LN 8M{k^5cw%vTN>:;:h|Q&zqL)@Kwk#Ϫ}/r:߽Y=}Í$EGsGF[;#3/.wt7~=}5 vA53̫g<}茊EyDאA'h@ճJ%B؇د^Kp eo&<pΓ'ZbڭPz`6ygg156 1\eg](`鵟6C1p@I 냽v4?s³8y7&9fjwX] vgVl̰uA:ss#|0uj]_67g#cS1q ccdyb޴R3$J>DbWό " óH$?(% 1\ڵ1kXʒC]} S#"Rرw/b 3t_ u?aʵ*4=w2fr .>o<DEDpF5wK@٣JNo_j#^ no o|!Êm'"+h%<͌1X?#jŏޣ[trBXaͅڶ g[3=yXObޙܯdk+`Wb !LPuuL`R) ZL-lûc҄]e4܎7'~ med`+űo?SZCԒ]gosKYD?,q\JWk-~؈#<( ".YlB GGRx#OZBaRb]|>)d$FNcDGnVbllJAE !ݎoK [6J[̷G1c8舥;[ ə\-Xk'/rOT+3Sk֐᪏_$YCP'IkO-09<۟wjP56Z;[t7dgR{(m7ޝgʘd VF026>+e{qq!5.k_'ϝ ֑≵56{FrG1[EMppt&!9@“v14=MEDƁU)=' I x{`ғx.p\}xCAJzP ?$)6Qغ6H\{!NjwXGX|*Fo{FaNr(oJRf:a>x9BC"6VN͕Cbck~hG%\qss7z/hY/FL>@69|Qcʍ3(jAGW bǙd~~ *R\B@mL 322RY8OJ[:fq|Rqc  cLƼ\I,YFEs}Ýl:PjFz*wB^l#Mrɻֈ Vy|$Ĝ#JticCrq4 &1 @J {6,#iz[;[vm3A>L<0+*?IKvy;zǥ\ɷC4yL?xLw&#:lK%ϛtN^fQh;@Aj rC !=BD$!߄fPOeJ5QǡWqP.N3I{ڂI=Fu)=IhH:wDrHWg[Iظ8iJP3HǫqGdZSN>uvlʝGduvp0iOb.~iBܑ:[`aNpB>Oq[vcoU=/2vB.˗{s!.b m2NfӅP%Mto<`:>n̪PUA+82Ph_p0I񡨺{J8&iɷ8{>3$ɤg\H'Uuhn~XMB[ -n~Tb@ m-H$^%S#0?3üRO"swk1-]P'ҧT_ȆmU4܃{ S'JN$i)&Nټ,/,\|ؐ/k,3h ˆrPw.ڙ~Qf4z;ut<ŪK0MSg6+~;}g5i4 yji Ƈ۶WG5|P L /ܫ U<<ʒ%=˞QXJґ*fYQ3W2nL.<^';n$]@;BVY}`3:Bv'9y-8zP92QxgQN8y$5PwNX"wWÇ{:s/}jEէ0w kïI$RhH$D 9x6_חs1uzTy;z{'AvFv¡wG;,O?@E>m# mg$$&ƥ3w]';%(D112IHH )m o7IͥiBmC'd[ x1 TkmdR3^IFj*;OVp9|+7OdDvF2R9VL/٭c|=Ml^GfF:A3 kȤh; w&7ِڵ98m2-x kZghЂ0˕dm*O4 \eD $ON 4bô$RhBD"H$VZt5Zj zXeRBVbϪ_m#)Q()J4:%赨/~?T'+ ^ _CTM|^/Cѱ@@V\T*5:^lZa m.=&/5^v_&~,ݪFt9ǢR~[RsVU_޳뾬%T (#@@Viߍ/H߅D mD"H$97Np I&H$D"zEM"6$D"H$)}^F޽{mZZZj,GR<х9NB.G6?L"/ B\y^|6RE@@V-*=Q'%|,C&8@V/Q)nc%N3>>c x<*^: sl?}^ܗB\u<`7B.C6/[x*QQkRhw&H$D"BȈSz>Ţ"UL?Uw 7+7/a]aJyHBLhh!ߝɣ#뾛9sKoyn#q.*ȒX#ZCBH 3EqWS)As $*>os,7dBEQ#uƔkZqqRs dK]fڔuB_)v&b((2pJ {>!H )Kκ"p,#+*ȷF<Ԯ›l&WP(^"^IVHD~;}ur\.rφ6@lb۶S˧ONן[f-Bj, z5~aM̩G:pA @+\4*3)m?%!iaOz Jml&uvˎcögAGٺ0iUs=>oz\n '׈,đ\߳rU;-=- lRƘ.ҲsB^#]0 HOhL Aáz y'һwn+Nhc`XlCΛ|7=2!]sr>)wWOÜOH;f)ÿ#{R݋6.}Ӝ`tKUf nsn&8lnkmpwvmLd~,)xI$>Br3y6fGȩ,I%cWDG[xMe H&HL_ͻT"4&f3}m45w~^Fs{?"vA!t:c#3 Mw#A<k˴45ȁZ`g “TbߙKeHfS PUL|f~O"JDjLp f(><̖ ػ0?׏)nhY`,+r^#V4"{s\p3✱3 j~'3q{z/!bXj+cۚLWlǧ&=u2׈8³> \oZ9uբe]QQ+&lnuxyQ!9qw[e<%NY˦b#R9 ST5uy>ncR"'y幉HkNyN@Ab2=$0|=d7L6wE(&ɈId~hH$I뀹'V' I(Tdv T38oh zvu:4a>[bmxkю]pvcuy9`ijgh[cee'V:ҔuI=Shλ ^ӣgLlfhUOwi8\xo^ډf{ks:1Ϸ2 bᎱ#œ{L?崉"1ŒO/Α=>j q:ϟհ=Q#%]_~wDs m N04Te=ƵD=J`d]EjqluIpEzH}BOxĵkdQS,8-j ܍bf[pϖ{5 SWLkDD?AqB4׮^'6m% $=۔!bcAv1 V:(ψ'n %mEV+-qb=Nx>qQu'+)Mzk$69~uOZCCj'k ]{(~MAUv7;] 6lʅn q86|9ffg#HLcZ*zSxVFJFS3LN36>ŶL(諊{ $YN% n e802e9%"ߡ0MSSBYv3M|Sj{D 2Ý>33*-<ͨf}sGdWXg5cba4xAo360ށbgƲ\bƐQX.If8ڤox޺ ш[FͷWoA& ImCXfOB8eq}rB͹r@ iey~HOlbJ[|41Y3I!.\Ik*2n52`[zƤնqs#RYZ['>:, Ě*6VҘ$/YCeH6Jq1;'DWmSlBɉvn4T9B]e"-oNMqF#ҧ|<|{lf!wlt#jY`vjJ}nۡgnG.֘HKZrQ4f:/DІϝ- ܜ!xV7xx-csWbib6.G8u8v֘+7obcf]4raxh[uF\eWM/N8slt q!{p/gιP3 [U8'NZGCkkȍ bL6s~akS}+W._I M ~\f[7TĄq[xP3D>9EHMm19g19˳n4%fYgdjM67 Q}K]66-bC-J7Q ;r AVVX^^yc,--87h֩cE!ӂ6U+vxx>e o/.Lb+-5o֏%fvnfY=/6mnbu.^fE SiZq9<ll|J=q3H$HO@1M\O*%;ܕw[ u1(([c04uц'NၻZ'tXE݇%su}-<=puȐ[O v:MBE-[% 5 h*y1z&\G&"$Q#2Nn^,F,8呿$`|֎1n'ZX%ٟ6ADPOlM'-)Q\UNQWamOA%//7l8sTy{=^i<=̼ޭO{{]]=FAQ8!/JFyF~/9܇B|}_}W} oChP5uըU&<<Ѽy^ycrs|^ / oQ<?EFxomYΒ^9+sx;P<ͳwθ=}Q81#xЇwj {8qe4Ùxn_׏@ܡ.'k׮(!UΝ>sxC3;"'!đ1M; }/~1ehqw}7چwN}RͰ9Vxp L`mQ~&Ga{5˛ᒫFdM怺g_ƽ|d*(/wĄJ;hk./G)E- 4*kI.eO}U#%%m~̀r_z6f\%JFs1 cU0LͯMwg)ɭ@;5}5f{NXH.R]S$Տ?A({+$F2];.\@@"]|H`2\YTFLawt8m`7nyP5+{D۟7~ڟ(4T&]n9pz qWG;b|<*Ɨl-X\z+2nxAg}ChuJqttgQiI⼧+~!#u,Y0Wٓp<\7<=qrv•8f&pebUNopH疕-wkFă1Kp/8/[S>SkoFvEDRHM$8/ޞ9Z=%/WM@u%A)" ?}Cr<Kɮ`:SoDw5!juAQj\ )D#8퉧-1,m+m'.Uqd J.kqHu3kGpF_X=BZ:*l09 Om'\l(QP0S'q^Sll33R5 yXɆOa܎De!Sv3W?笶E3tfajd[Op}1ZANƤa[]޸;sFr{d#"k(79&w.khA݊ Y7tL~ ۩K©3bL6ֈ>!<ʈCiyŏsͨY!;YsL{ܻ{ c&!LihG>q eDFM3|Q8 ?\mg3S8Ʋtqn)j^Yh~w3knʕز9@MN?1ޢ8l좣"{+szmsg`*i] F34ˣKN @D NA"̂ `(wbj0,m^T߈z{m%of[0<1ںvtmɧǹȏr27ixUoE v;Qζamˉ:w"cޗRiz GjGX^^6/vT_q o5/U0p&  bp^F[}s[6C $ :PVKss9z'Iayy"Ry{dd6"_D Oݚ[}>_4 sg̯pJ &ZQG@ٝ9X`zs `M֨rgaom1nֲu}XN,Ϸn`C[1n<)fw:ĕʃ ,“⬁=4ۆB5o\-U/V`ge.V!%Kcl /Cm{K螵zBDDQdw;1scCfT{m;bb]Fen$7qU #W(X,>E\=!e);1ON2B5svYT7ytSt`OJ$A.)w7X[&lSqa^P r+!DAEcc=ʃj^'j</oo>+eM!0[m _ ObQ }(fۈŁ 2VjgX.yo|| F_њ(N<ͽ^:p֏96yPd["ZL0>|ibds= <UI|׈%,ξ Xy)ލ-:,{_#-3@{N90+\s6" =" 3#V3[Us{"5!vF'Tˍl=[V0{SK2{Aq%#3 SzѬ9^ LLŧxN!&L+i6R0qHRJ,f_qFIE" IO4?ɥV6V> I7:B|m?cq5PnG`9kS\3?W1`u3.g4'iXF:q;dt#Y:g1v qd' j +mF&ɸn}iIP˾ț6Bg31mGq!k ^ ʣ_jn21Wƙn=&Q;mD)~o)dTΕ:[h K[Cߜڝ_K)^LfoM:U.<7Jy#'ymNaaFL7>7#5JI=8 ^#]$ܿ.\O o+hD$hʽncivVd&6CU$%%M7Tt6TQ7%ؔOCm)U=^ ]察L7x ZQiD^oil z*:Q Q]>"fMcc y%4XNjb7OPNRbͬtՕSPNJI'3=\\䥧Kyuit-q1KYN)dԱ~Qd{qdZ^\ĮJu[6w!=4wsHY=611&vPnVV>avǩg_kDG{JKoaѶ Vv`W!^"?XEլ¶y"戬j5J(Qdewe}5&Y?xkmp;_oS9YFwhHhD .[> >+~Ro_&wKT+hd˄jJxr*CyP26 S<W"Wfgc'I)uyZ}ɧ%aAh}1&Y`k(FVβHsi:Q94``oG+ߊ6a[K^E[ڠ D =w8~S>wzJA.^gqʾb([mј?3,ϵaimJޜף7pmN۱T3OYDEòv6jrhspw:zfN8 3NER(ؓ "H$?_hDQxAŗQRרyX:K7pxZb<֋ oc56jADymEԯvZ#PkkXph~_RIhٙ#xa_E9G|Y9 |𔿝 <0+ cLohO#8_"3#^mr7d6_D.=i= 2wtHjS\LqE YKyk =E!g1Ϫcbv }c u&7 ZN+1^Kbl3ZǤz>ymd`Eٌl\D"H$*im)~H&M^L;~ʼn#O2;Gb2wNhN ^[LR>}Bj^3r Vg e;F-"KmdT9 e%m\goeױРR_MJJ YL""_OZZ![^%\= /l. S˦LX3y}TA4 D\3XA#Jmi!WFRVST;VgfG%8M jrHLN$-D-Ul^`[s+/S5ƁFA_m!iXWDR=1sκFdNTH$D"y)|IhH־ʿ_C` u.OQ_WEhedjAF "o!!ooAVkq}:<|z3^ܒyF່_Ѩ_<\_#/oaxq›^n-.?BxZsJ]GnE# H$D ED=rbc-_@S?ݿǵpV>ϝ9Brb) D"H$h^APUZqhSʸP_16??exi?GŇT*5rD"H$RihKfp Cr!t']~L#Ofd4["6~X[ZanfA4Mm =rm\ $xeߕ H$D"!EVwQ|9lHh֛115n02{:At^+Tuڶv'hkj]erxmboDUwVag06~S#cL/)=# r[fkq&h{Ų-gcyMXW fNo-15Z4;2v֘`vfe6;DKs}(RmDQd`U 8J)C-H$D22JI 2]JF_WJz}ζ12P 9 ΑYG[y${$Ri:8wXf +S N[!sK;laĆ\-wH,6dmKɖX< 7cΙ@xTQ~|HG\}"$xyohr%(FP^,wMppAȉXqgks,^f:#χ,yIl);q#[<1tmvL-qT u쨥h(5Jd=6e(T D"H$?H"5|vo?U]w4s*b] BRK3y?rNGإ*IhCT_)Lls$v*k?vp:y\R=7l}X\]g9gZ 6&oJ hoVYcX=cdr;/b}sS,>R S*Ę^P-u[ ~"aa!\{8A'~ch4Eȑ s4>4G|aż @ɍ NDV]OP;W M<2l=1IgI7JTJB"H$)Ža$5>͈` 5[=S\e-1]7NBwqHQhD@TZԏ6vۺUf;W{umԮ)SK|ZɻvDo }k9<|bآ)~N?"N/L}/ڸ;ku:1qh pdV`9[k', >Xz!BTS'݃= @ESO 9U-屜vzȲV v7 jG?c[͡ LL줽R9w%9"R-ϒ,Դx|?"IVp)keICEQ6 UWmrb2]T}b@K x5D"CIhۧii2Gح֫i@+k/-gStucNj6!~Mj^ ! 䪯Ѹ-WQ6Yp KhvqZKt"]g4 ma_(V L4_={ȶv'jwjh{MlG *IhSagp [Pj:{h($tctJҳi ΘPG=/Κ12>хtԥbqﱳZ-=XDϨ\whvwѿf[08u ;C-=GOw7>VTlNz)u%)Xk}f;Ҝ̚vSpax>wi)Y4TRy}P{IG ˤhSo>H[F|д.7@yVُ@U3*{P7Ɉ#~Wi[CyIPQ3n~(Ayp\R H}ջWƊC+=XDu)U} eኋ EQKޓ۸8:|=^_#?1Û tŝ[qlޠ>5'/':{;m'ulˍ 5(&}9H*F)"E۟jh@@n6WbI=S}CܸYE[0%AX^IfFJnp ]u#Ž9r8<,Ik# g>;_2b0Y6!=[U4ń`^`VzZ{Fvr4g'y[]?KCXixA=K':r6F p03!~[@}΀G< Y-=wx$("'Fk]XYs3&gan`I7xSm/ܠ6ff܈ˣ,;SCRZFyvɊG0ܝ]眱t%-'`;}L2BI{1fWK.aaor5onV[ fqO1ui| -Rǚ4M8!< F3m]t)RBL0 @ڍ-=_G)}d9|)؞C܉u oH$)>C)CՐo񪧴&>kЎE5?kbjGjujRiuqH8~@WIn柇@XƯQ΄Uy~qe5oXo7+痺XAϵU;&Uo؄o흋}(w79Zoݧ׿}6F9TEB |vw,G,Ӵ*õ,)9ޥ`-$RA!#zQ(T<'U+<'R(ZYWrR@RJ^TiШ(Шh,K܂e<*R J4s9@~pRڼ Qq8ԂHC=fs߃DB\~8TDR!PP)prnPnv]SMJ9DU8EPd,e4wɹ9/B0v"e ݐR+I` ni j&U1 {ƆRZA|k`c7;fE$nbxZ\`exr0e5(!kB`FQתqvpo_ԏR) ZSѱئJEi⥭jL?5zp ZEYD24 soI :mu-TkyPJEtSPVhcݍ75WQm$M=+cpE$DGuU|~/Ows{1A- n} .&9v[W1CM@h{m0材V<5> ^76>u{$ٝe|=vjZWULv.K kW##ΕچZ~XΗTATtsZt[9UmUEݢ\9'U1)maood ?sPC!Eۣg JXюhۍlsd=026F: ^PVi,'##3|М{ӳqLXh(4Ŗ\/Yq+DÕ4zCa2Mعsr!?N[fC$=<,%a 7]Zw`>M1X_煥|uٟg̹v\tB}-Hh`kK~n9su5M1ΞZ NօifXoUjc1u50 V|H.-.bBQ! }gpuoC-[X"u 1 /9kr5/m Wa-;߾յ\֗릇 Y:Q=JO&.&&(q;K;0DooOrTǖ&<\i%%Ȃ:9rH$D"M"EF԰:9/N16sJFQXcnC(v^K/N HSK'SKF@g;utM ,L1:[tϣphg~uEhoi-o7=KC}=-ݬ):ESãl*tQDj[2=Ds}ͭm̮!w1x@W{;u /q(:nI&/=ᓟ ֏:@=inTMV|l+SR|wRgp4_qsu{",12`;7Y9>S1CfTur9ܽ=K.!O@"H$fֺx\DR#Fp6g,ޠ0&p7#nC6O\9iq E{73/<`g g *}#-LȞgo}ܪG=K(oUֆ\@"ED"H$?2Q1)WgyEcn6r?О/0!6=  uconV8q8&7($ h޼0L=mde0SgNPF(d:N7RJ!Ž<<=ƧFɎ/ ]Ifo=n1. {Άt Ξ#~ꧾڝ$ Hh_d-N(N9HK6_z28Vj3y@O-vgLidsƔ/'M&۟mAϊ}*ch{59gp'"2WYShvήD:h;Q*SB9(On}/խmJ3mS8 LL^b5z\mLq G̝\h[yqz\j 9ԉO?':{ bbiNx 0̪LJN~yatG\`sK-7fk7r mcHhH$D%b~Δ i_ss;`Tj[y:#U+\.چdh&k􎒇too@kfFɌA#:~7 q/LY|4?`2 ]5!+7Z"f'1@ dsFVëjpH'߈ouI uY8fEAVZⰵ>cLn( 30̽1JkCY r #;igud*5z/N ># 5ᬫKE{ &4lǕ#qn|mMSU$bpw`$e=ZJ |I&Nn-^^9cowUcTHn%293J[C&TNph.;yp4gBr}/zƹnrȌf ֆ bfKѶ| XC4֡.jWx }CTg00D6D"H$d&WqOq!9oGdo:P<ݘQqH1e33RiEֶVqmz #,;~8DB\!Ix|2nvxޥ(Gy,g E%6vɸN^3yR3 \y\]nph(Gjs08ܖ@yW\^!)KSO%)m*1~&3o/NӝzT-8hӴ0K=O.ݺC%1kދhN[ˋt{{gyOD=ؔ˙.A.Jjx8I]TLvyC]]lyZ7fg=O:\ lCǒ  %wFr}ZXXBYi %UL,=' =\ r@nL!"M"H$p":՝=fz:C>6ܯm0DW =;sjԧEpq /F%~{[CcU 39eszJq xcO;2R?!><(YXA7Yޥ5N45gAz۫ܽJDn,RxsD=ĜENipP58l3&VM.QCSCZoZu]ZF7]]}\8}#g>/lμ~{]93Vjh4D@Jiz2=SL ul\H&H$MQr#cܱ15%0}`An!)}߹Ӌv2;ww4WFw)68kl>_1ޜ9#ݱ26NjrE/`dn5vZ=b=9lܰ557<5(mͱLߔKEs1cLpƟ-D:Ѿ6XXZϐ+C,4h3m rp [C̮sۏzcCv^zqr "6`'g3\p5i;QmqK qLCڃxތ3. NӘ̧:NĦqoNt'8bG\ e[`:9iX%0$RI$D" 7%;MNLD9tB-]TlnTȑxNF{n4V˶{ldW&C)pH<@@<`}mvd ݭMX_z1j%ْ}e(WV Jωe{,-\`" }hXZZacwVWVXbow}Adw_ hm{k{MedUylwپ} /c_%ː,@-" vY\64-J wRq@Sq>dffAjJ:K4>pJyA))USNYm+Sd@jv k /'#9ʛlQ[Cb3KX!ro*vV)j`[[DOO&!!rHhH$DKEDR/cLj8?ʼ6Ԛ yDd()@m* Zu >/r yF|}DvO 9A9\.[47}Fͺp$ />\_"E۟7AF#H$D`kqqjU6TT-V6 ym))I&)%O@)Oe.v|'iMjgmuTzّi39#,m],BjjW#tJJK],+!IVy#"bqr4vzɡ6d H$D"M"E[oe_%vߟG8E3?Lg3|&/Mj+8A VwsXܞ#͙~Q.zR9vKΜ9xw/~vx9|]ί~w0'~4damTv*VI$D"H&M↏%|_r7`@k$? J<+{Fjmq(TLdpr+d , U瀵~7i[CՁo>e.`Aq0~gz8guga#s+n?Me'XEjGWxA+KT=~'^C3uʞ*4p4˗OMࢇ#^4M ߚ"%,>zq{exq^ϓH!_~y+OؑQGG[l~/)b{Ԃ4gjRUp2݋̡Y$f06VQ/=X"+ _8?bt㬉 //9d;~GbcO6/Ґ|?\;h_컟ww>~t?~  m3]߼{Q@d939 -p2~wtu3;5G}Y[䊗138n~E W?9_<{O(dvmGA_c>Q4]⣏?) o;~Oyp˗=3&< 'y.}r~;O;E{!G_YfrSD"H$M6mjs~˟+E{K9_4L'1cNyO>DVRy~*>wz5iI$61F&Z1ͻDFkqW?%ڬm kCsP*FՇ<,h ?̗\>%ixT3F'~ȝ64}r綴N@eC>;@kG5VuKҊj,> _ZWg~rB:W<`EE3,Q;$?#=%>nA;<yd3hag•v ӟgen߸ó:ΥiD"H$2)$R kDZ/>ZjH}|&]e066k>~m/lN~km-y-l ס /;->}mv9#_-|pI^'Hw~^Q >O3[r(;G08\/5OiIMc9Occi#N.N$6h/S=lkzGͩ粥P#0FWGg!Y}|6->u:@6ވ?&qB18y>;͓H$D"M"EP&ξDލ;jgɝVGïs"99?}l/[hs<3$17E_s .ڞ7%Q ?ohkѦMpW?"]ԫxrx5븩"I 2~cΓ"j*(-)qhw~/-ha GOD7?1aC+=bIGO#V_xGJ{K nixa>?OMGucLpLm#':g&T .0\_Eۃ&d~7݌ebiP-~- ]$E/ɵ6ܱME !_626dH$D"pI&M /D{%^MG%6'@X<`৿洎?_~w|5U>\\"34:gxQ-Ν9 ѽBg-_K>#~7}jӼ=8Bɿn}ɻlDX)~@Uoٻ8]CkEWs|_}??)?ÙlMp ;|L˖Uj0 `y'1.Qy/>5}\Oc[%r0|?Oy^f Aγ._c(o>എ.>O9SZht(x?za7C9ѯ9rZe榸tuw$D"Hh>X# 61g)D^dL 1=OM082|nںgdx-v7CT4**A`cfζ6bpd]FF^v}AfWQL %`{y!FgΈw]ޤfyjaFFF^!Wv/IK{h{L -`si)"9`z|zQm&FloY^ŊFYC.cdnFdfy;+xx5Qgt.Fǧb槆iooM 21*J $D"Hhw)/!9}gVA[{*v(lbG{I%`va6h)OJBxT6?v65ା! |H$D"HhLb(22I9o2>R'cr6ۋĢ7)HjǗ|w9) l-2D"H$mN&ܭ\煮(>OW_VL)`<7Ka ji>5i3ɃDS`kMd v)EW ʎOabockeև`G_/͵pK*WyI6M'+GٚɓrblgX[x59(VI&{ĦOM=_  T [3̈́amc廩hhH$D'~߾j 3Jm.=gzf!p0s `Z.#\ 6QTeH k=W#r2(icd@\vC=ѵgj7ѱ$9+c|mtpʣR@wMkt2(y |t?=5y$C#E8i>f欅; c ,TcԾj:{ɊY6<㹹::[aik$%%uںK.89CvI pyTwF}ȕ$&&r]=o r6r%mW3z[9V^YT%8=Ǹe.ǎIY1|HzPo⬧CASAgt&ږvbnm8s<-j ]nt#/'QmD"H~JZƹ^4mDs9L0{)m?q-Oq@/ r?}_ID< yש ?E}(9@1D~~?C8ҕZ;vft*.84_+C*~p#Mkdb|I7X,!MwZ]< t;1q{W9pb0A\M~A>^ڔn*n^pV Wp}X{ zGHp`.OQCN8?dih rv8 yc~׈tK[,%KϏsތ{9-b>5Q7crr6$3-0W=WxgJvϩyёl ( óY\ՃZ6D"H$k[^TP_9Mh+q/wS-hV#KMX\gCM"#[yB A)QA[i/_^l7_ =<^@C޳Aռ *9kWW" U4lO/{ZʛC-68o:IT$2 A2*V(uy` ,`w~+9}Y*Ӛ8rkSxV߲ |zy7P=&~T"!,y5i!Q\l,%˃3x_n6+WQ8D^(7nw=N[A]lHX;eC;Vp;H"q01zK~&5\,\bæ{d$[&ҳOP̯DŜWG4[UbI|"S; 쯬aWc Y}sjmP׼_0m -SJj.7p#cvNuxǎxNŽ:4*>Wo5_ \I/?obYP[7]fok}{зKu don4Wbr_Dħ5=$7j\#!Tv"n5c:ҋǹã%.cP`HѦUψ)޶ռԯAdsi]~( 4q: )P1?9򶂷;LOϡjkiPfoV;XqZem_٩I*@de~T;+LO d uPH@ *ۮ5c#qjchrͭ 6w֙[F 3}4498RRCg]LL1J#'UFKkj.#=466;:BH&H$0ڶ&0k݋qWzT(hD+HI(VxmMn_I$D"MEii; aeO ?yun>bj]=n]磻#,Ma z5Ol([٧#l+E$Ry(auGmD"H$un"O|ҌjY%ir|#ǃkOmrV'0ck_MSA7ơSs1k UkJ]k*U Uu+J_iC6ӳ5E=k]o[8RϏ+ZJv@Nt?uC+_TS5Gֽ>%a+kZXK > k=J'',z>?+^'v765sj"cX'8XG7㳻t Rн؊՞Ʌ}cv,2-{hcfrDu WeaNƾZM*f6)l%c=^Ri_{}V]SUs"4IIC;4R00/C%- X\hg|Gr=w5.23S[v-S>͎Lw@+."+{,yN`sqe{יT"nlaQÓ=rH&M"H$Dn1|VqhH&H$D"7v!:̊m)~HKIP)T?j4H$D"Hh Mcv|}bwuD@h>Cʟ^~1xbwzb0V Fw/N;ǝ^kJO c|/H$D"HhHѦҨY1=jwgl=ΑT<3] fV, ]VTjd=vww;@9y4?{۩lmajFcK>xZ$)W v;^azy.gw?CV5B~ {~]* .ay95u'[@%Kr@AWGr$D"H$R JJF@Vh ]AMЈLLsvuaUKt)Jb=gR=v@GE,3Ŋ:]c<M>{-L/nPͼ:J"p>Rhc? vN89;c}P*Kҹ`oYmz+"996FFo#/.,nA aav!Gؔ)m@4s蟘AH$D"HhLbOX SBRې{}vdK-['M:|p6K ?- cO3&'J4#14Hu 05Onc[緩yH@XY 6L$D"H$Ra),(#93$UOts::uxwf wu'mA.8 RL,M\奚/䰱Ht+"< (Px7c\OHy|s:hbL6;Jh©s>.q#YBb a~?p);Q^>ބ\:$G䂳;wP|]'$$rs\prO )9Xv.N8;zT1(>ٝ56Cy1LG *K3n.Dݽ-;[zgsH$D"HhÈxcdnяA/079nG&,0'F1tĺԋe$ J Iy\Oo19=.^S xlMXsw-&9gO2Hg>vhM&~Vq[q݌uN?'\fWI"H$D"1>3!uLHl`>~y\zw&1О;;XKjq}llm011z{rvh(']\LqgF6W,(#Pd+MؘރkfO%#]iYL17ekk]nK??v\ι 'D"H$D=Ȅa@A) \S1fKH!>>߄lk[v&Ĥ`eBשJ117"o|BRBڸ }7[80&*hR4LS_O' uMIG5:>G?³ʊjd#_s{H$D!j4՚Oůȷ#  {AbqDQ@@D"Ej Gc3"PlLG:u^dK$;s7*|Eh.jޤZ,̐IoRrHtvddEsȕ}m,`Vv>4n _^GIG-鳏WyG>?G[D"H$xxx}5<KJjp ".Fӿ*=Q1dt mHkU%wPwqgW}| KXQ]b=rn>$T# 'l;=}i:oa{+K-i@>ن՗c +MPq&xYHkE\Ta%FVYaNߪ')A`fc sNB9N]9KQO)rD"H$?$ŗе ֶnr5oB h'Gs*AyN<`}ҌRfwD= 2se{(ͧ㮵6 "PxߐdO?W"Z@dk\5t}oJ} @p {~noɻED5 O9.*SxOdWJʝ(hPՈD7RkD^Rfk(Tj`hĖoÿ14"AD!\F́BZB-jJD 66wQh{#ޖZDQ#H$HaqхJ[;yzh9R7Mueiiq ?qʣ7Wm9sNGO_<\+A%v0g:kJ\u*`uoCw J[yEZZXZ},KٓMsѝ vp}Zƛ>r5 :&Ϲ'Od[=9}KsΜ:MPT.r/U "Jb0>sg|/F`oy Wktt>ϣAd9SѷjG)XW ד$6F"ED"H$??",Ѷp#++4"RˍN|=B("._ؓ`g 6ڧ'+0d|Wc'gyjm'ϺɟGbu7Hճy`3ftr }Gʮ%^W}-Czn1|84@oNWR=89-Ef126|#=8s̖F:ʱm\1btuȬ`lsDSsLS4Ӯ1L6-La$RvWFOB"E(7ɱ1n0ﶳ8oS34 f'ǘߖ(TD73Σ^Ѩը 4.?D{L-lz D"cG 3#L-mE؇-"YQ'BW7V`z֖5r] f4 d`F\ ku>GlI wOcl\ 9Ow}[;8Y9JږxE9Ky8pn4 tq`j䧷n).cxEb W#S]n.xxeN=nF H$ɟkf8gO@P4Ȧ;q1vqNHk՜f^Je A)i\s2vdB/f4bfMz>4x-ܻ}#ȫB %yYf\hkȡ*2PI$)ڮoڝԙy5=.'(bf3_0>K"`pۖua&}%hU=Tze8Տ)4Y ٜiI8>M,T]>=r]= 2f9ؚ^72oiunϑ|ٖ/n137Iqf423LY{Ƴ%G"ED@lԟ1۠-Ȉ)9epO)9 ߺLmhiG 5OpB?:̵;wmAhj| \23TMTdK1=YCr ~vܹO @%D*(7|x _t)\3%3p S؟ͺj6% :'q A!( Ȃ.hsx׭51zlU9n䊂D%M)9r\>%"ty\ ,lo2hkgO/9[r?ǎs(I=._N 4`9Ξnv Dֲ:ϏaǥK^|!ŭך1=G0/mX!AҮءoϺVohv$;jNZu'C]f0䅃̿8GBF+1=MYig ^ 0?JTi΋'7 fT~F|^ TGcԦn*)I,9㻈9͉l 7Ո~NDj =᜙c;jD&7͍H掛5!(59Aa#9!2DΙc$7͡<!]AG^4Vixӛ۫oTLB)ewoQ(vYE~1oJś:+27vQ(v5vwERAD"ED>Bcsw1pºfo`uXlPAE>p=Qhԋ[PZ-,y%w<\ˆInxBG#2kK3hvhxpF/sQ> 7)6 ]61wWf?Ctb8;+8_(s#98₭=D0z[ԤBӧ >/.4aeF.ωyUFr%ES<d%zfV䄭yʚ0pmE7匛ub~GEmn$Pt,c>@" yvSPx˚I04{\&-0o{z7yNC]R I rN3m螳|8 %pQ9-{3ڜm9PS< t7ecpG$DҚt/HkF@"D"EPMWW7h0t` vGq75„ ⨅LfO DEu%;9T7쨘0艍]d"hW2Djnzt0Qt#+,n_ 0t\B_8ՂiQO]kΧ܄yZR5ТhpՕF[Ky\x3M1BYy7kK c|8Sމ)# V8iq:I 2Vr"b@srD;OkX^YeiDZtUcɰ uEF2ѳdzWMMR7R߈T]PSt2C~\K!"h)ob7iy\qQ#"r'̭&!ѦmE '1 AmU=1 :6x#ڬT1z;Xr7D",O002Z@"E"Eb˳6K?~!jD Xv-׉G*٩8O₣yݓU<._izb0uF&Ms7К4#x}}`P m _a{~טz)w#2:N-?Μ~b2,c~e L`gs3Gv׉Gȇ4^++ױzt2O18M/)p਑3<)ې˯?1^Q/gW+Sm;2uk uӿ  .cxTΧӞI= *(r|)Ki5a`@}J|)3pw㏹V֕6.![ &+9sFx-R<,| j;G )I]ՏD}mO%.'K{'v9W ]kJ@A9hѲH"H$m"[kxITZßQJo%^cmc?.<Ó4"Of()gs_+c6as~nڜ 34ϧfh:f6X_g6uY$'$]Hcy wPZX@~f"O$S4QЃ9musr j#3#- "Gͼm}ܜ23HOK#9)މEֲd0&TtUW1rvw(L'..z6`qԂj∏Kg.9=#9]%2R KvV LmpHPV[Hӧħ2*C\MՋ(LK%11fJRh\ckr['m{l<%/mw| 7Z&8.r~{o:c[̬ DD>C O hZU"ۄ6DoAFe3)77w弄m"1>K"H$mg_~Ӛqq0x` ]N./by>=,Ͳ'yVP33+t3¦L ẫsehCńQ&6x 678@ u38L\䚙jX`c~N&[,!Zbxt5zpstwv7fh`xj5ꣳ{])Y짣ٍhvqݙa~4f3?Q}@Ka">n8S>lGW=[?:D"H$RMij9> m@ٜd+3p4YN>NVF1= 9rg{p1&yz% hͺ46wĩ8{\ 86U^3K؜̉94 yZ5W9`s\+3=9ǣrcCz4Зg8Ba<=I8d[N~q?Lj,':r7G _}+]p4FĀ3gxV؆>9_řǰ-O12gZI"H$r}~Jr D/a gsk׎(la@ *(kCPc5OEɷp:QNxǖ1?S׈2$t{hx1T7AVǸDC#>1 JVp< mz oa`:D&J/R˴ @CyWrQ5::BYYYa}}F&1>>fxxO@63.L SóS|g+vqn>F+NwP !:i/Fw~{sxZ}QC-WJ` 㜵t;-/Rf(94k8Ozh;22Z ^'>{\8{WR[C]ttJ6H#H$DV,m@ V'S?$p3Ntaq,m0Kr+ +l.M*ω{duu]D~;\Nrr2CCC6twwZ,,,pEPappɟm h|Y= -$ 7RYSEUMqbX^8(jn68q%a8y8.co_Lωԥ^~.EѮX68|K?t%~L)V۵r=JJK*32o |Iˮ&] Fj6r^ rf2#1v* (hbll^rg}m8RH$D"(f[^R.kÝ)F1ĈNJK^G> _?ţ! h=f88`SglhB)_#Zt15܌j0֘èguu=cCX^^>|uӧܻwz@&krrt޽KNNsss_9TWWٳgs*_$m#BUk?Ǐ|ZnJm0ʼnXa|sǜ0sP8_9sjSͻ" +|~ܘFsY*䈇 8Ef"4ܵ#0ڜ ; T9֞h#6d]-f&ɸG]+oamt}CcX[a}mXˊQnXaJ w cr3#iʼnJ".Xp $ۢRH$D"ȉ8:;섍Y>& )dmm8$Oq3M[x>.erQQ[[]!$k1pkaM?i $%%%DGGpJMM= kw9|ua}tk_sGhlldww׾_y(+++9? mܾ|m^X缣 {m/igFbq7(7G9dU /\fQ*/R␰OIMm챳wZ\% sr/,ѡt.%U)e[K# 9O®ҰRX`)k ;Z_▏=NNzxS5VW׮g"r=C,v舳+o!ђna3nAtLm"}Չ98gD2K 5K=x䈽{0 ED"H$[VhZ(.\=ogzQ4JV.ycu)4d |c˘)ᴎ;+ϽB2Xsjpy\oS*`e~~e555hkkC155uiR4 a%''1àfIh"8ڽmD"H@ӇQ2ۆI∱U =F'Q>7~-== ۋ+nӼ " ~@oD6D6D"H$:Wq6u3zv-8VnkEf6ɉvBcK[TdQ %8HǼp3ؑ#:fWRũϳj|[?-U00&&&xaiiZ[['''^{aZYY333ޖz}:sz;;;HhHhH$D"Yx$ jb# ǥ7v&wl<lY2B_}* ZY#4 5 5)<أ9)>\: 6Qh^lmm=VWW_e7<<[.zp^/)M"6D"H$\<YaaO+U!H&|M"H$D7ǑL004$A1{j$)$MI$D"hؗ';@@"M"FDR $D"H$RI$R M(Q{4sdD"H$mɟe * SD{m6GoW'}#S&s3KhQ8;;@dkm}eve6bh|5ޅх(yb>|_a| qnVx42:Kw(;7savzD"H$D.m{KMX42K2_⒫%Fm6?{ȴ:713u7Sͱ}{W\\]1>_){N ch[VaA^<ɝ+Μ:ugm)i兺@/@C-Y<'7aH$D2Ҍuqrq S8ųGtpzZ{nPr x~_ Nж*|m e5;oao`{.E~F P"M"E[Whߥ# -wFRАqۙ-0:4ECQ*7.zqt4PG]j 5)X:dNCo>.8;:=sZĕVRQQAiA9 3;1IeN2W}9}ևՃ.[R?oڦV7~k=!Y7) 7fr?LD"^3)1jD20@u/,/<-CORrh\9r3gdq D] /l 7ibGτm)ږe+\7|ZWRwuYʿr{ 㧹\OwK~>ѷihԲj\+8}\G-rۄwuQ6.Sᱏ;aR}c0hJ[y+}~ڈCP ˧g(G#eD.G0D"H$%pZ5TiFH ֛a8qr[H;gx" %}Qkzӣ_y- ѵ!"y!&8%v<>ңLý9GX9fYK7:DJ0;c}l/2uz5x}3 R N&8w qgk^fױ gxR?|c&Z_z2Nm,`%N?u fz >k]IiMt =8 UD:piN9CFok. l(8?S>axrԉmhk8p=V*BAYm }1!{ ,# ݌ҹ] b=o3!EDa{47K!o%i-O. {c )o'ebhL?([''G\oп"G ޷+JEh\xPo3kj P}?7w?R XA$!י"H$Hu 9;.6;9&>N ~ƁEƇ[4IY1af_dNYUr+=cn 9{̄~̍ a W##yA4L`gZCU+T3s gZߤh| zkքψi<<~s/v> tTsk241 9ãLm( =|t{1J5/K^?*:D"HD*oYm8ƐZLܡ4\l9$ 5l`Lq)>lnn~k CDۚl[mǠcٿ⟆'/PH$D"(lo*޴oӔuOYs^0'U7[>G\n ^d= S<,[`"feҰ0~5>3%=;+&Kj)x/1K`eppp =CCA${ޤ9`fdfZں[)ƗPk8t@/_>&Wx,=/Z@7 C/mfd|QfyNcKh䛌BGO+{qf7m148B-pH155@`uvV[;B4O7׏5Q F^8?$D"HDFH+G[449xY%c>!EF>}rjq|"{*E-yEt咜ڀ *NI~R@WY2WB/q)MNܛ%-)clb=ᗹ£ZD5M\$b*9xA7ObLkr)nggԔ|d",mroTFx'O((, 398:Y4l2ZJJNo6`6hʢ}AT_KGWř4mWIvI'$Tq ũ-2OnM'_`~ s'q"KJ4;vj)$D"H$ߟmSS-`mm051mJ^Z#I);ry8Ϟ4)d>~B>k=rwTƖxë%QXRI';9DZLDz˿y^:Id䒑Ge{ d;|'O -&*߈šnG.xmdV'ۤ$)}0:7EV3; t6X{ ˤh?4R-{LON-W&$SM "j&hdqwQk K"+F@!N1 &Q>oD"H$)Fb~ať%"kwd0- |}Ǵ-!r3H̯a#-\fOJOa,O;, '8W|r`wGw$D"ΤhfuuVVW|wD)O.J&gT7wPãtƩKcd m$\QLb|25Uf%I sS/ S^,hM6Nm0Vc&*/9s:-%WTP]SMmMsLv6NIU rF;2/fwT鮯 ##&_a^2+ev]Ʒj&+#vd*+YU@+Vu4{0=Hei-;*5S-dgf[cDz*# v5rwLMu%- SAnI#Cͭ!=St4VE/MAv~k2B{}Mc2JadmaҼ,rJjXU2Fan6陹O!}z ;+@s[}4xD"H$)VVVgvvoѱGATaMOϰ l./0<0[2@"2 dequV1;;ڶɶ7X=}}jPDv#|{xp ~.k_)m}|1=ul׸hljsbsnaOPjrC}160ׁ3&-ukMQ89ap N6L(xITҚ~#ǴqԧeC&';L>9 <,rh3G+ey';w r8sRKaxY\PMɘZgj2"ip{QҾb ]]\/jđϾNZ-]/OK%_xкf'#m-,|1>6]AKGjs#3Ѳr߉/H aoE3*Fӽfܜl),JA=r@vE)rH"H$QH/):;;~ctuu1 mAf>dw{RnڙXʄ*E򨠍 c04@\$͌ީhsp%#0"-|'aZcԂ2t>:NdZ3r8bY&:6OO <|*Th*_3ztA') Tw:j>#"Wm AN5=D[cq8w4a&'yXD%kt _;@Ιz$ϠqPi4tU=1<:o>fry2lϘS2d<Օ6{{fG9ou5c5_D"Q1X[&Y[`_-ޤ=%&2V[LqC'{Vg(k@)^M mZ˙_gc(ϟxgEGDm[8{|z g }Lo+Xv0V޷YMڕy9V^^X-h4E-({ml/?w,DˆInx/;Ddזgv?̅yk>pׄ4N&27iɺf{ J6k[fd_}tR+m9(38gi-zFcRK3,)7|1>#YUBVnnH)Ym$[Z8s"xp>.`/8khV8SX3jxVzg8t)X@Uyn&W8+jH+䉟xNAa57x76.ahH`CZVavm𜆺T?n[x 6tl=H i0qHbigǜsڔONYP5dhsf@AAVAYbĮ_D"\421EGa&9K*r0LR޻/7}Bh0oFIM5Na̍ pi[dHhHLMɓ0G }j$N0lhqw7S[:g XZDT/mY$mE$S=8bZ`Q;Q9 -Ѷچ #{X[G ږx8_wUá,LuyҼ r{ϓd6qŚ#l/7atl (biiRbc oF5ۨeeelN c:?eLq1@ZS9\&Ikqi4j<i&^alf,0&)ioFH*. L@DZ!NX>BXɗD~+ڬB|ތF.[!.1Χ̩"!!6ֶxH|ޘqzpȠ.:_sP2fhoTlŝ6~$dwK8:rI>JM㼧 NnGP8ę>?Bg<ʗ_~EC> >r>cI IsY#u\ ʼn+E)* e"RoyNEe&y(SOW<|nCPPLA^A{]y|wBprQbvOm0L} `˕Z4SxڞquN>hѪ[x_ zG(lu=e KCڦ<+-} ɕ"{Ԧ7@&d$ ы{i$3vh FuOV4jS.s>)ED7>n |կ>#$ y_I{b0uFV&Ms7І4x~ӥ l U$gͽY2>`u܋}lޜqj6'w cj{ ?}Ỷ5bd@QQ6:F]+@䕕X^u8hrh2oq؍GϞl)ΧF=N =E7LǁD]uc02g`bLvQmc i 唁)5Cr9404·+JY,T_CVVy`duz3^v3:\}K6|L6)x>`{$c^M.&ZH,,"2ؒ[y}t$yb{Xе<4F4H$gtKaV FzF mu݋3g o"ǎ8Es1.'R3ko;.|)҆T1pwvwsfg쎽Ii:`{5 <}br I䗁N}ϴ ym N&\$== |6s;A"Ӓk%c̴s"=Prfʓ$Lmu%FNq5țͱ@6C1ycIQv,F\Mf<'z&<)# .h< G؁T|l)*մ&bdmäd1Ɋm)ڶxⶒW4fā\ZƾBw9nqR8i܉&i6m$eRn ,40h2IU7E<^Dyф؞i$152SߐTJI!/oѶD9Xy7q&7¯sUB/]{Q&/.^(:w w, ߙ -% Wy~/rn2ՒXR->;AVރB j )ot1+c]~!^Fp j8#/>AAA_C*,ɉi,99ͽ+zG Y4%zKWnP> @[f9_wK @E[A-+,7Q:wo͇d"b2@1Ijb*2 mݱp?|D"*p5ȶ&I{ Uؘ9K{=>=֎G<+ouMyNdf{;(n33|wʸ(yΩytp5/i9koJ?:Ɲ:&d)VzL/nP6Gfz>8K/LݙD7,qgU`5FnT\p6qS"FsۑcG)D䕕4O'm|̎zY-;ݿ)n'(/}:VRbfƔ/xIf'>#]۝?99)$R9H$D_j:zSqhka*Z^@P02:B0-Mݬo)@T2?> 06"fLJhk~X\]`jqorF{:(m7h uP[[237so1::R uSQYM CC#lռ64j޴2"I&PzeqO e:Z).-M& Jfڨgdb[({j@J_W+uLNp-UT4Lk] ,Y6I&3{B_-mN6MI^6##3)j~{H&H$D"]ț'4J&j$>Ѧ@ψS14ç͕^/ !,$@/G;Kahu89@"ED"H$;ɖ)x•0dTFgmN&f\+mkc\ ɇ) ,.ƃ:" R#aybr^Xq ccDf"}5xEln~'l.~}\W!);.h@K"*xb= ϝYI'없u-9Q±#:K";&hS1rV 18՘Lr]=\.ᅍ*,Y~+ZKc8<}B`fMxZ5"fJns 4OnJD"H$D"M3 WQ(s zV2Jc1³蚄*S>]%4f7zp:ϵ<3CKϏ%"A}w:rʱвfZ|\/F;)ؘi8|ia88͝x|NDN7"ؚE%)0i|rgQĖ˝jI\O\0 E_ D"H$"TD5h4J|q j$FJ%* *5?"h yiFz8z܌ A:5\⸡;7<)ϊ.xz;/k > N^$ޘ/>xn'x+g.dαO5@agNj]b%KMWڻ:q9saH MZM]$ LFi3Sc2<媨o^KZ} " Pq攉{mDs2hSO]Ci&)2If,+\ E +6YId@WYGC|cE=YQ2l=ekT6oa.0aͣ(1_a\Ikw9vM<]kڑa~B.s Hb?{J/log4WŚSN>~LuM|inNjZ\S9IǾ6nIïy~o?M:ssMwoSѶo7CMK<]@$qgu`M%U-<:#v+ߛY˧ؼgyeo'z c$[ <ƥs= '7ښVYd')dX8Qϭsg_g}vvncN xûm&]^~UboS .1LpCQ3ykO-0p=ʳ_5͘6~Yd'%3!5b:n_¶}v6omg8#AD -ϙ}|nQ3T'uI#CQXS ?  S %p$d&(A_{~• 0&i F+h{cM=KjL8+k,=9I!_] cDMYgxrK}4f/j/:p$te |nRxg5Lf&A70A{c'>1Y M6l8&aK\UHq LǒhL2C`$Z);| Nl{u82YA>+עg =nvqdz(ns?OjbF3/7=\OYPlZb"-:_?3~ϯGΫ1%yZ~v|6n/`R5 l# c3t&\c59yf]=pN#92Vb/,5Rvѣ6Po!4hC3, Zٹo,5pqtXѦ(wt6R=pmӄf1{F[[8nK1)Wl;4<#?j6L5,#u6^ҊNgͽ'qRoj읿Ǭa/G6AD[#;MJ fG)B e7¨SK5/3p'm b΁~Tnjj4[]8nвb d'T3cEL8YeW]dePe 68;\3Mxbh^=w1OO=Ġ19k,w2ˀ;U*b=?[B/,G6r3ߔXqH(Kj]ǃ;d8wcg<\[7kZ,6җPK# $VO1\ Ef#Mj.͒\4l2u?]/o[pFU KtEzzG\ίG( hv lsZ׍s:.tFP"aVٮn}i2֘s|$hDAg:c+M|ƮdH '\ywkͬCt{c}w2|}73p'5lI#lh`^*KǏP&7\jZyC'z xol0Y:6j,:׺Y>1n.[/I7H{D  6[K~XjE h;bnY_Vs{-yԟab_krN:{9BەWV6U #I n3p{e6-9skA'Go{ܽĭ󵔔;*L4ZIJmj5*}p":nZ<9כm)7! s,[׈ѓ'b[7Uz3KF]suXR$5{{:!tsr/iȄFyq[cg|$hDI) h]M' K&.ַsR -i`*yEr2 ܖ6]N4gj=gčRDγ%D\@:ˎ7<=WYXTlNUF^ľF>jxKWk% Gǰc]zZU'hv'2he6F g[Ϗhmmdit^nob=%=8 |m;^n} :[;,ObrηM9g'<_᪐[ ԫȋKuMXqf%/.$QY{{ G {ǓH$ y XPj[LYY _4H4C.IHd (2LKym4!Q(L]_&JLɦ%2 ˄cq|$(2DAf$f &pʼnd& . L:_eidDZBRJP+3++(;g,! xq)2Wm6'M|s͙"xF9L6 %o 29ܣ1FY \M9_B@$k,I<+sU!cx,6q=^&hb,ިe.5fj\ݫl9Տ^52shk!0!d-ܹ wM6~Ǝ7nT<ˎIDfYbj)*_}f':ձv,* AiS[-<9m}qZ,;螌FTlcb˅ߊJr#qE&Ɋ:͈1 |wU=IGpt/X%m6O@nb~ZZ:h ;w-0G& ߶Є_nbAzԽ|c@kxCVq-=hAAR'?[ς&~6Om69O> AϞN^Οhof5paz#l5&qpf2N׿e9?]w,mY,:5Cy S] ,j槧5LMkYRcJ~H_çrp|=L>V'ZcΫz!]Ie8֖NGLFwVQmj4\p:=E=tn3597<7=&4߂xm;v'^ikwwtvgG^V7p{\/de2s5lЄ :^:P0/ӟTLA7=7JAF&h"` wV[8RP@z)^oޒF^b+FۻH\gE͘GDۜx4p\=I@I AA&hAAm MAAD "AA#MD  6AD   hm  1696Fww7~dYE+L o+_K TX$TyOL.$LSv~|.C< [(a2S{(oLo#Qm  D(CCCd2Rd[:& Nۻxzwx1ӕ*rL%8]vVէ(-=G9y:-cyxSee9ZJ[o?Oq69IgᓧqT:!;C{{**kZk8Sʀc‰ғ,=ƙz=Im  ‡@D[1&={`d,xweT?HiE5UU)=|3v}&Kϓ`R.t{D[TyU(#\:|~?WyzpL'k+@M9*ڹJG\^MRVrSG8vFM< sLX}r>*Or$gTv<}fՓ 4hn]5eH"hAA>"zz{ '=rs|s8ehh]2plI 7pA'OL.ChH&໶H$ hARHh.! tE؇qty F s\>yɤtyg \K p`p)#^9SхGL4sP,˻~=%fKHyG]¹>:!$YaB:k0, x;^<G hAådlMKܵkmt$&(ySQ*>[ˌ5ܿ6\<ē[U˛!l^fd:$Sp-/pUAdk(LR1/kƕ(^"]NZb3^tS;ğLQp򭗵ܷײ@B\Yhi8Ô<kC;SsF~5bOɼB=soyU57,2rI#5ŗԉ$q&M& jq'5sH~{Y?R8Lqy-y&MܴJ"] d\.f,g%6-ڊVԱ1̔ {٠ 1!m:V0?xO.nd0TuʌU6&9|ui9 Ͻ;He$מ>Bq2 d\jSs_3m,d\7Y e!9TQPY{LŪ nȄj5 a@BU/ic4Ǚ6AD  Bk;XDr(h6>nZnp:; ( __VϞ-ʕu]"e!.g ,h[J3K7Ȁ{E[]jR7PEfG%lߩg>L0u62(e9WϽ(nm}d%AD cKAx ˆ7HFFQWw3z~335^mR+/)^̥Z3u,=܋pLj١wE>KkBc<}Q>D "AA҃|^w.pn/XtP>5r]T|og55fn(n%Ѷ5FI} nnL)9k͜:cW{۞^mg/?#V?|2ۦ'SNΫ{NgBmvqD$@A󖅧9 tLŧzՅ4CY|tK3'8aJVB sgП|e}ZJO|_W#xq)2CC=6>s$l\OrC[=H]1;\/ jo[YC^,^Vk]缤%:I54J~fnm[ZyNZ̍Y{oJ.ΘÀC;z?(\^stzTN|4A\ &yu9Fs_A>vcY'lytOC=:|\hFxh%ok|X|V=PHwi6}K~#<`m&  mA1^;ənzdy FRL*Gä \=]x\!:fx4 KX:tOO3j?6Oi BF-~g.a(̈́Ę.\# HDFR1\z^2@&` #A:uZ] 2=]ףьD"vwQdsinѴ8#g߬a;F>1NKLP&cB!K %c.LE=v:>%NI{&5!w6w6.\"3mO=}ϋ}щ9lӬ/08j6ڄB!h31>dؘjZ$Y=*MʪeQIrH( #ä&:* g;{v!v+.c`kE=w8:y` Pkf$Y7zq;؀]찴׋[1?_"bpf.kP{?/>~]<:W[8ꈹ ށrqX4BBBB< јﲠ㰰g$o|FƦ1oU+3,,^B7GB9-,\ݪD57`8hfFLV)kjDB!mͽ1'fىqMS<#x}Å'M-<7(Ч_%B|%Vt /0p>H@F>S33]b.W w):>=af&Ht&f#kj9>.F76QL-*[]ǵ O8F%qw3VG<]Lȃ+t?I.gM|Ƞߠe{9Ӿ6,Y<-!ϛbKuxE3n[a,p7Om#^r=)el1)oᶤ`ܘR`Ak2 _t!Uc|8Q )`<nX!8Nի~TZk !B!Fjdp҃Y,ۋڹu ;-+-y{3ehv1(i`CUnP_z騹 xg;> _7^̚آ!TʼnQfǪp;̨xua;ڞԑeKfY0W2jIiandd>u(a.E[(Յ8:ͷ{򰮁hwO0]9f攌THvap^M!B?hӏhiE@u^5P_NjVDbkG eWϰ{6MRy6kS fVTq,Ȃӥhx30E鏘tqo)@zʔ(\RKX#E$}/ t/&R=q8E{_?S1aΜ~>h%ޑ[v5ȃĭ xrds1X:Shfu j3brJgdS!|ςA)|=.4-ͣ蛙Zw#vtP5ƺ_@u=ѫvq̇'n!ʌ70ڪOFcl-#y=ԛ|M[DB!fZbT(nㅛ1H ח㧈$9K\qBcfn8@7'2DKysF6F^riAV>fD`}p#<ֻدq*O}(h*YGqf W>u9#TR`ѩ\S0G'.%GkR+OO'49A,$lOS "U6FI &,%6N'Л ooO\S7#~p;8J5Gx{K-B!B3L&+&ZQDY_zre>DBohd3OB!mbp7\\ƳWhmB!OD0hU,ͱ !&B!1$&$ڄB!hBM!B6!$B|[GaA1ϟ3J*jXVЬ.PĜ43>oEJok#K:>4UOG? E_\d=(cra-SY]Bt(^Q)x&~v"F0w̰Ju]SZ?=M4NcODm Be6wh dSH:j+r11O.xSߊz VNSv'Wc-w?̜$Oby7lS}!<7lIHLC>q:[?SNe\Ŭ:ϲ.Bş$6!$@!FR\ Mx-^TwS[Sҵ 9It1 @AΰEnMR! `ؠn&d\DžS\)iqQ$f: nc{ tQ{&{3N0r/3j+͡gj%I8؝D..ۈyLx^$&Ί`gިez['|_IF~tsk% \yPJBCUBk#p-& , "x,Q'o'4jƙ$J)ɥqM>{%=Í$2,+Z56ȿHP^)+sܿKi=džue7G"A9z6!$B3nƞczvzbߚ&b=6˦7?ONjU/O/Um<&܂hRp5]d߸Mj+_+v3;,r6'HCYVn\}CTuN󡺋>|Nf]&/%oܨ&сu6_{#FvV\n~IE=#,@T1vV>Q\~[3KNd5]lхK;;SDEk TNq?)+[]C-Q kYxx5 }_vŹqb ]J~oV8I]>ͭ.}Ja+U:~,^r|M=ύW31;§)j`XDDB!T|֎N:::AlNvlGÈXy_K_c8?+5#Dy9%*(˾7y|Dw]\6X8zs8(B(xk/};w!Ɗgzr4;j .~ř~=h$ׇj^ o\a0o^ف2v=pk+}svJeycA%]ckz=R'cicٷ=l9]CQ >>8EB sc13 ::HB"pRsh-ACyVWEM o6x;Gp !d 6mi]ۣmn5)P>-2ix&DCh[j{ϰ L5 ,1x ^>>x8aH紊C=3DS:zbV 198hq7mB!sf Wv\i2J`s8f`Wb*3GWfp`FFw!&>F٧ܨlN3~YP>\Z^doK^8 ;*?iL3&'+h[&zS>%2YtN$0 D'i8amI͔qZ{Q̎ 16>NMQ|W߃J7騯$?1 |le /(zʖQyYr1 )(.cxv-F=m?FY+7 vzDH3 3:@g gk?E{}oCw#lSa kb^ӭPfr7vmVFV'X@6GGG͵=m볣TPPT̚DB!)Iqp&:>:B7/&,j)ޱ'HΙҡ%Vz/~IĹBLH+4* ckiIbF&|ܨx@66D )ΗܒY4-kK+dh5|jEYUu I>ʷ_>_%g*'c[$9r rdmmmXٓY9{\r9$xgQ}(坅2}sgps bLrqpT@ik`CX'i0=8[1]?^nOnZ~ą 8[s<͏z 'Ϟh'?'eWYv/> edQ{< +a¾dzc fbv8Dp!8{Q9Dnec(jawba4_=_~<=cBx?ϵt>}MHu[?:c(٘>l|Bvڛq%iٸr4(N&VLjqvLGh]hzf "" @Wa:޶{әf-= ZtNE_[;s3Mv΁=YɍS|kH`\f]!RP*o*qHT,8W6fdEU$smB!0C&>\[&v,NQSZDIY}e1bd^c5F4QRTD}S+=o_XX`n %E3<:o_P\XLSko;^3Cc=𲦌jFٱY?~e ue?+ YM[7 =u(GW.b=nfƆ{y 0f~h/# ƶ[QQCu3v98nyIQa!l&*yAWG+^0TQfԼgd~jjڙ]8MϠ_{1%Ѧh{]*1a\/3?̨`Kd>fbOhV5̽yWTΣjx<̏2:C3x삍A"N\cteO7ZtYODrSg~v]^'.GE&:$nd־|+^ Q bceMj(8lCz Jb 04qއuL&kǾ]4 ͣժ΍%b}HB@mj ~+([j4 lTdW=-&B[vg Ԣ1616&ğtxϾ`w‚6%-YXǼHdVu0VW1f#: vl(`c^AM=sBI_{##!!X}Eؕ>+9YK8;G!$?!9ܚl[嘛''GxǴDr wXkέܾ~'+;*;r#ʊfe[LPġA>%*EE0 |^dmr(WmB!@u9u5#6t-6d=E}= ud\>M7 ah GNA3S2iesf e)V{Vc)3(Mh=q\-!F o'YG,9W ES-`qDSh 쳗wmlY|=g60-ٱ2RˮTtvq=ڕGG*SkFKϫ\ѧjuc!onhfrz-ٙR'!B!f*#$VyO}L"l,I|W?'6t%'wwǏ`9QY3j=Xaa5wsp1#rcm%8|,f8~"Ν9>D&V20VgEog&hw{ K.HR_|1>d\㈗=v<(%|'VTuRFPOuqe?1x6ɪG3Ho8uW2+opOqˡ8S\Kvԗ=Μa_E!B6enpƹu4r$()gqe&Itge8bcHty[ 311#~%#EHHQuT̉c$J;q־^I߼Ѻ<*GWJu{Yn'O$7.d(>(w/&s8qǹ'32N̳cmgO'.>, 4=XNȵ52^;Ǔ9Q β{L8Bܑ8U5yEB!m 5x{y7WL?>0AB!BH 0wx"o0dz4݂ZmO !ddz^?mBH!b.dAƹs˳^4F~ ~FVx^>oQǯcb- *A~z/ F$Dm B#%lrr_Oa40Ll[K1`K5g FF QBWhku>ʯyb>0U aAڊB H6!$@!FN|ֈ!l(g9tG'/4 UO1f=h&q69{kMliAaߠ62OsD\)cd=IyXd0oWPAv۷o?j4ݚip[K$&$ڄhB!);Bنa'JÅN*rbǚz8yNOmY69b.8}v~tN.03HOw_;,Yl3cfښE۱1T?«Yhdk)fvs 7*IeQ܋LW,w؁Qǎjrss)--ܹs DmB!>v ;6I#, JJ ^aG0L:3&r0L&mY !B̏BPx<ѡXxwr!$ڄ6!BM5ԿbzEmB EB!mBH !B!&$ڄB!hBM!B6!$ڄBl&* Ǹ\zV`G3򇶹:F~79F^XZ67՘a D[JFo⇌76Mc2Ө1Mo#&ѠcSPjpfs̠E968+DmB!ݙh{O/Ǘ&~'FGO8Ϗ8PˍΑGhTtݳ*P;@XJJ=?vw;'Ne; A:6y,n[`.>i .N4都yp*v(.ϕ!vt5>͏Uv6]LtlS+:Hu&B= ܱ7 nI !&BߍSfpBb۩u΅MCC(0abea5mF֖X~my#L,be١l0;m&;Q89efa bhxu=;L5&GA\Gj6MYCC(fxǤcnj1ִ*fVNi7XQ0j7YSQo,Cgy*LY3FVt,LfB-l-y:o`%SlQn5Qg0l_N^chB8e7qѕhG1#zwTdbm*r<#*[Ks2,=[S:m%9 Sq+Bn0avv XZ:mn+  3v04&6!$ڶ !#)'_vlbl Y[comUm#\q!n Qy2v@ ;lŁܭfU|“Q|AQPmy>_xt&_oǨؘJJ8f{ٽ߆swPxG3D8;agO.aXssCfv][2+]I\8p݌xG>|_yKEyA,™k;OJynUtCYA+bEL-clɖ8: Vn Q;u89Q8(сVd֎f 7*POދ"]iZ4f8ؙ&*44is!.\^I+gv}I::q=cPt#?#>܋qZFzf{g5gahBM!sp\ñgv;50?3H;6<,`.Q yG;(fix.3_?DvWnUF:OH߀vs$w+n : 9s%₝toQ!/" '" }v)x8 贛R}lEI8G^o.`N9Iw{J殧d\ihBM!`7"4,`"cN8Abۗ |־ecs^ -RCTJ&3jlmaNyPRAY/!QOQ&5a`ޜ s8Çٻϖܦk TLK x%?bmDX }*)ȫ̼z݁pFWL|_h®t ѰmgO p?>Z:Sԗp&ޏݻr)!QKMs͡S5_S3[B# %*!EL×Bss m/qsq KdSR3R OaMo$AWYl דwiL`\z*ݛHZQ?_EZ`ݍ+&<]c[͹~N`v}kMc<03xfDmB!#e)$^ǖǛCș+T4֑7&OCIM7߬ӇQ!(mawp.':-:?m9MV ~8\Fi͊ƊxgpKmK-W 3bQc܂ᗘU%`n֕i5$fc\H3Px92Jγ# 'HL<#7wzK^B`yqQOIA4Kce83'&8~++i+bO% 1ގA=F $%&x"$}׀/ڟz1lAHJ>x82eK?S;mpmzU[%ڄhB!)=L`|.:>6ޕ/wPޥ5W]]P=n_a 7PNR}}!ӂvKq_[R=&Ƒip{m^}/ HLt~Vu;B*eؙ]=o8Sr>ux[X[cd!9̩adIzhƲҋΥUNpzc7fyĨbY%C\8roIJ/SoH Vj GkFx~ |a_<®R_.<6J3{}eؽ+% ; CvK3o—UH'ڶƝͳ3XP9YhBM!`n"IC MXx018cQhw!Ϥdx^za lkx^Ye;μEK|xvn+7øMRT-5j/Z&=m{L`B+ƭ r7WaK9~^ '2J;>^84踞~;&!-/qs7W7NF1TGTt z>p4qQ(xzuh s'Epݜ^JՍmF)w9r5=یk\9GJ5?Eⵗ|Hx1_gJf\=(uofX|LiiO፷ IYљ$ڄ&BaԃADG l#]T :Lläft(/=nɠEkw`Dk2}&|;Z~*tz8nYCMH !BadVb0Ӎ`͡TZhmB!::KLzXUkBM?hB!BM6!B!$ڄDB!mBH !B!&$ڄB!t:JM=?ddqv7[D{*J((kX6zrNC 344rv SuaI/- J6&~촂əev͎1C6jQL(& #o[)+xNmV &Dh{F!áCdubཷeY8{R4x4e>qvsG[ bghB;^܊ __?|}|p! v"=l?MS7g2)Qb3e l=}D2#j/K[&Vzo@0s838}=BMH}!Bvs;V4T:htnזNTN1SX:nq$9&Ltph'5Ljl,LpvL&04[ N.cabʫ^02~g1񠠲QF흽MNV4hהWRT\F$F`ef:_6k)~ՙA*JK(.f`|Fh/ ۴B\ȯj`X̃M0_rtxdVV4uHq32"4~͢Я\'1mnʒbJ˫[XcfJy7(;(-*uIM6!B(&;w1k`څfi.Ex}!AATLPM%7+/(L[F>2et5d|l}?femE%Oτ²Td-`O88TW-fǡ8vH՞G§' N.iMe/wQԠօv\p '&]{|i[V$1۵;zރxcNJ-xoq/{;|ba T\c3'K&xq9 K[b"aemJ>]QW.5S_|&j ׂ[{H|R8w=cj_n{|#lL,0=5;_(aN]18xAskr^N0G VNgYZMm̾$4</B` @ol+4mBhS*9B! LAr<w z[qIgI?%FL%n'><PeppD`\vr!![3npFF-_m-`Wby*Uh&_SChN%ﱸ[[gb}վ|̃PR6aeK㄂;aFocw7ĆqhBZPP3Yzjp:>O[nZ(SY!EH=~o1E`X~-:6rj.*fxA2~[x{MoJ.>zy,q ye YNy{ZJB^I/[ ܹr+Z^=#?m+s=Ϲx(7sTec;yr?i/gI8NG(&B!BM!B!$ڄB!BH !B!DDB!BH !B!{年 m `[$K{.w˽;L !$@ @)>I|_kKќ)3̜?U3W444DGG%ϊCᱵzf(c׬ + k\?×e|?֎5hsqqa?h?VhXa + +1=jw͍y?׎5hf ^/3G7YaVXaָQoVRxa>w!E`4mLXorcVXaVX!lfo_cK64{Y+Osm ,2rQ9Fi0$D6rAt:i*/^%ǒs$E۬e + +d`mWYamγ\?w&B~BN0fpcKK դIѦrAQjQ6#$oR(//OᵹrBCC%_H *gVXaVX*ɛ2v2MƢemsm?4<|r ~G,ݻŋٹs'&I %)<-m]`NcuY_3.iCZDFF}lyUVV~{n޼O?۷fttT^/~:?§9rTx|뗰ܥ#6ikzS0siMùj)?ٮ]zL|iyJ,e]gHipsU&/o "W^?c; ?xCUn~ᅦ迯.\Gs] 2:pYO%l˟8fg7|;1xJN2XweNvr' (/C6Q|ٸrqm){vqi{y+ssIS~ȹ3_<N˗pq]c9hd\5zC\|<q{{at4>'~7"#BdO_W/£bOC' /aLso2>3)Egٓ8D_cE?cd:`{Epq#61 <7< l77£ |tRGYR=''… (M B={hAtqt4Z2oƏ\+/JJBmuݣ Zg?Tןoj:K?/w⃭-jq^E: u:Ⱦʺt/ˋO{+9wWz^Z-i١T¨3HDDGQn~~>K.^tnݒNp u swo_W<E@Z8e̖ZV8Gs9[l^,a}^H ю}N9C? #z;1)twMwo]dDɛ7R( 8Oscc5쎳w#vCpr󧦫QGH /AFGFh#5Gqֹltv#E=܀Oɑ FFE?&&iو7) tFBni  ^Mm 1ru8JpǦHD5ߠNʵ\nɓ9,yqݣʼ\|GG^Ί5ntQ-sv*k-Vi/,_w{c ,0*c7^YG=5dS_GKriQCir,:dJqfޖSXd|X9"X4"96^Hڗ}rj Mcd8[j#+! iL"$)dܝ Du8%SRAo-Zl9qkG 6bs%=ʌrh76ǡ|hѹRZGh0͗thZ}Rb\J0Ld3Gic^_^?ȳ{lo9NM9Es 1\od,cfA6Ή ?p)N>ϕؽ~~5v*5 |Jvht@{Nkw-ђUơ5*~ c"etKU>wZ+Wg sաX(hWOD|'Oedͫzw;Y2.Uco/:<9).vWn46gr5tg/rx"|\={_,vAƌ"->Қ1}rF7-0Tkpe9|>y9|=qШI{0h]B`G2oWsmRo9<')شݵnO?$mvX˕w4VRTlx?WcjXq ۷ocËOᇝ"ÃmFbʇذyXߓ’wNp xul/" Kذmck,AQ\XNjne۶-lx959u'zy"X5lz0$ӲHywkGGnY(_ok"mjeɫܼt5b ĵ%R MiTtvLM[رu#%ثqfjF1Mj^ްD^|o|Χ;WD}r6 >nkk{~*zU/~2MhfGC[[4 NGhz 6Q]VBYu#Mmty?9Q`m6^6TLvi#f=ȨdxSM[G_ Sexaj>#syAox"ݤRR!/6H:FI"*GaxFuBm𥶽sF"*{Dzu'# d@@^Q1ZZ0eRZHsS q8:h*EPm&ϝ W'Bon q] f34aJ >AF/LU}4eF@EB]>AtF{su&:[i &ȝ&Fzmw Ef7WsumD1QXYOkk1I+ JK2'!|#7+HQቹ4Xk&:'9*tsN<%JZ1ڭa\8dsE--͔$,~w󊠬AD^FumՉZH Š0bj̩:;*!89s~"IE,[<omiIJ8yb VIS0}XB_GI1)\@GE"~QIgF+;7 ~Re&>6$%I,[)A!4 6֕ cv48N=&7nj2-8TF'ǡIذ Ÿ]]OV‚룣fi"i[ʊEd%Kc_m> *ssZ>!T5JԐNZ~-Vn졌E| Jv>jDg$How2iPSBG_Ljaz^?~(m$$):h0sϘ,%8&N5u-v.&ٲ}۷mf20^Ae_LLAKW2y^ ;D$V8z1qDRѻ۶UĽYħ5*ֽu/bymf6O(s$DU+dLECM8[+N_DϮo&H0r^\WV-8y'җbJ'8^geʦa _g9gnѡh];%&w{~k?u!>{)Iu=6-fOwor9_&pEm>?\ݽǛW[p3|u7EOh{۷%n1_tcer3; .uy:x>- \ޡF}=gexhco9v ?޹ξg; 2~lHX,iM[ִZDi};BZ;ۉv;j[)I `7$ o-Z=C4V(Ve_r+ʊ&kO)brb&H D3mSj7'7/dq1YAHQQZ#a t#Ή 78@Kez-ۚƃFpz:I sg.V,[BCeM}4ft SF&-Kh2gBAƶ!V,!!,vQGG})e=,k'2,KkMP'-:R Ʈn2O+b%pڄ8+ɣ22YtMT 0L:Έ|.dHUlJ!4уts;3pİ0trJdR@Wy&ZJ `lEԴ[oLPG :+)hc*UL_AEG(iQT?Xw=j&Ky(#>rngw0:(bxA4,*/3˩fq̅yW3U&B\!FOfM#դf0<:IIb<}L a634Ak}1%tזRPK[iai&܌ 'hɊ #!QWQ ]sZ qs7j$ZNxb!EtFRMTSOcM:{Ht<3m]M6B̂+A$36Kabb d;,Q#s chl :i)FS?=N)"Hm#7~igU s`<}$f381LY1+3׉k{z1_~b9-?-X=N>E>e01ɕ79;s v5JY^=_|'{|Gl"?͆x)hL7pgL qM݇OoS7"b{e#E5)||{;w}wbہ9`Q.*Ŧđ7_&H-wϛEɸrnh s2gK Вa|ƾ#'|&FkVr{Wn`oroD~Z|Mޜg?!_5C:𖏹y o^W_b{Kb<=\ШlI^dvZH\'7qNz?瓽oooxoV~u/[qz>#RM96[3WHh4ir4 EK/$Eی:_Xה?7|o1g q[?G!}ܸu6,'ϟ5;sN8wEzH#|"zSU}(.cp!#XҸ;SDz~>޵&h.'w zgGiG;jGU:vrr`ߦ`c,sj-&]ǯpU5n zp2F?5fVW 3\6 ׹r#_67D}OYom/ey5?Ko㹵;mv&3W;|&2|ˑ\nv. ytHYMUy%v5㣵NB@` $㠕b'{kٙnRѩlM.wa/.1hlgbFitfs zQn^+&7;F{: ZG7S]cj'QFDa8Q@4s_\<1L6CBĆxK`z-#''>6A_]zH:Fѷ7Lۋ_HZjm#1cT-@KK(1G3Հ^/ n#S,c=d9*BDkS5BfV 핅UXAN^ թ|0ELJjX맼Z1~okTʫicD?xRQDzfh61Oiz0*!s+i9c4|F^|YPC:ʌmI_W8kJMᙷ?\A2!.5~w#_ի*+K2YW Yoruu%ܻs&wH9ˍǙjjxMQp^ܻ0!`ǨN+;R Ays֓(ơO˃rL2RIC+/R)]K.qƍ/{QDXXXm%*]'riV2IVM~6tlHyz׉k [cȟڝٻVD[iK>rK^ WwW'A`A^Z.@,W#N^m#i^O`k?#?ezVikZNMlQ-TU_EM}+mu4wM;a'Li ct%967̓dEQM_44.ԶZV KKo3nz{AGT}DCvZ tcyR=@9;;1fF۪p}"aRZ@7izTZ{@X<1Livc,Y4GL u@jZEzhC#}ǜ䍭ރFR<7;{J dLE%hZwF%8LoGg7Fsi+M%֪\4 l0y- %$& J 8BԝJEC{`G^-H?=ZjHmXg跎Tܔ:,6F3^n6UI4 Uk*骈FA[_+^NxHQyc#ERNb\:}] dt_Hkc Mɡ,Єbz1PX7HPRٹ1Hsi-b s 3kfRyG-c›2gb22s)ݣc4ǓYKyJ4Yu4TgU).\4-XXc2E[*}o'{ /!#4UOoI}qY^"cFY=)^nbO7sT{+cۖMlZϱ3W?\7?CCӟѳ¯oc{qQ&Әgc+W}09ɳPZm:uR'/"jkcd`^ǿs:G'X@bֱGE\!ELrr'<ׅwAێ|чy~uSWh"8xy.{ ;+ ϊu?wl>/[rs1xain;Y1ݾΖeEW|z;WR͑npF:6qS3xd_ü/å/g'(Ż/tb,~>-[rQߥh[b|O~Qɏ?sK;,wMC.Α_bÒaFYh1E,|yE$o^ζ/p~&9x*?걥lw~b"=mkvv,_'ʆVsCYӛ_̏|R=ՌHnyLZa*E(Էt\YDaq%4XN%s$.. G3J[Obt ͽt2;4&W@Q!Tv \A%~0olmbGYTj<Xr!$"#cI EtbRKX4M[; >a7'efǓ[^B|H*Vy.IN bPstyT)TFd07Q vTHw=!Ѵ`N,!+>"]2IiNyU,^+d :xZW;]#.8#4-WpNFCh^Xk9Qy uラJ[-t))l]Œf14xkUʘ.dWY^_.<%hS#uYllv]"Y5Cc,7Sy<3ABdM *ݛ@SeD6KrD0Iy5L5fY`O7M6c Nx`U@[!!TwACsE:OZ=Ŋ6]4e ?HbN U%֊4.7d|z=  7țᖸjfI {R?r<>H;_LB;8;6>;Ʋ?ƕle5^fgʷ?^f0ޑ:]#_@VBax$uerz2oX$/ W%s16><&MYU&b6 ~#R\y=_~{hgՆܽ)>ʦ Nmҙ3\"}j҇uwWRϹb΢Mi&GHH6lPv, 2,C]rbbBNغ}skEZ7Ʈ]Oı eǑɟ"4ݧ}Ǎ-wy7/?Qύ[ܰ0B| Ep mIqJoQW>?é=/Ņ;;_:y 7N?ɇ4 *#޸ƅKs8z^۰ C|ݯv<<!ivXaju47@YA::;ԲJ|]PF]}fZ}v!VMx9;(B!TN[[ U%ySPQKSS#5%fYqAiO|6T'hjiij 79݃xvZRrojj*;%'уls%--JSpF@JkidG1%cu%ByT44 ~GDÄ-Zh$B"EhFu+a4r ¤ ᡟ4WIiMEAu[} /G'J_}c1xRRPL+~FZi]P-b.(IBidEv>2b*2j`&0WgWbcobѲ!aKRLDyzrE$` 8hl4ՑF/wo~ KIApFLT)ck L>o&WYޑQ=x`hN EIĜw ,)OJB=W(/j_ SqnTfm2}T`lfS /|~ |UIm75/P ]C+_b}l*zUT ah:?Oc߉+lLSn(kcPK_MWn0<L9~v`|}2%R{Q30>`5~/9?\+M5qViF̛3[iɎ,ibǞ`vʄ7&Ĥ6)iх]ZH>Ųxƛ?Wj?e@E`wǁ"}"zBT^jh+sQ.A'P;Ԛ5Nݽ(( /5 [bKIM"qwh Jf,#bL!m).nR yY:۷o۰RX 'wʘ(-8/T̥$3B%s)`\8#ݕ,]73QLFRǣNbVϻ1S]VҶmG^|ԁ8In罪k$BԳ(}- 玴PPVeeYXq |qBi;?u>m];N&X7>ofWPܟkaČzNyXfbRsYBݓm|ۯHA[ 7}HOI[_/Nm?IMWOq^BZ(2Q>:q`WUnxʢ66*J{+&%?vuror}ݗ#o7WcY{D\i3޸ #v7eyhs.sWXŋ6fyX`~u+nse`e쳈+wwvs_ŪmZ%uEp\=ja\6maFvw2{'ګ]Š[N<_.DDǛ)}9,'F`Y'NR^wӹ,X]S[㩛th%? soYE7KM$|&˼#ÇVVFa>\7|ƕ2rkǴ12ѐv!|X6 p~s9 yg1P6ֹu|7O9Wo_~f?YW%\]ۉ[1n4m礭}rvZ ˿=[ٲe;+.$ 񜋟OL6%5ӆg: LVX&s}Mrj'hcx'6^) ػns];sh[^q/pm0貯8cq!L}%ME=6}e7<͝ٷNvx 'i^b{'(W?;7!~/Ӄlfԟ %@[3<׽]xK/uy{XE"pKwu/az 痫qX6u ޒ$I$=Z~ ڒ~Օ&חYMGg*E'^ľ0x,p͆ޮ|f1v8|;rH~2ڈ?bwC1:c=QF G}G.0B{|P|\UUj3eˎ[ٲeNm9-K ^&f@lm?_p<gph3-Xqyts @`{b$1l )(b['I$I ԯ1^pKwmreh7y͛ٷ+;ޱ]>.9Y$3\ilmFh#mlF #a+&eˎ)[lٲeY6/~6ٷڗiV/2 NfKfVP0$;V~Ŝbo"`{$1lChs ˥=I$IghSjxw$+<ʴ'mݫv_p<gpvsTK83Lr#Н͡\~]mvkn%NO[6 m\}d6-[le˖lSm >VYm*팱5 w̽1{sF1 >൓.SIOzv*cζ=({.I$I$I$ ~) wJ8 Hx${) Aq5HjQ@7FGIˎonlԢdQ7#(8KGWv9>X__#+9#0e|)"!$xa(澦K:a5$z[ubjzƅq_ʫQiٱpeUlʻ0yͳ"KtH9IUa秸Ǻ;HҶ}a:abGeH-ni9p~ʷN-2􉔠1DD%G!/ΘNʥujYQd+&ٲ|rΉy?_O1dhĚ*yCw??1WɏLKex> #N:WѮppoJ,@u540EĔ::ʻQSTFݥ1WɞԶpЪXK٢žȅCۃ_mk᫊t) tRNS뿽аu*IDATx1k#Go%!5, k=nA&hҤ"KI[?F\]'\ԥOg)B@VFe2oߌZ%@ ]ffߩߍѫ/3bVaX4 0ZP{o5 x۟_RY0 ?R=0 s*jaFqR wK;bpe(UaL0I1X#!R aU5D;T%`qFNdT3#Tש1ָE;۔ MJeqYOF~6lPDIQ/PlKTFRcU۬Tgg\.NFYkr~v>١(󻻻nj#‰ Uz^A,QyDgX2zL!`ns_& v4wBږ0Lh4cR))dpy }3:],WF¿fs:+$j$e"MHi\RGЂRMJiFqkڄRG'% iRIǯb>V+5}S6|\\ϱ!EؗQ/ˣ(;"OaQrL$vW:py"N.\֟H0T(̵Rz;RgUuB3c}ѐ˧ DtxID4x|ģ]AMEM[qGc_Wq0=|RMRiY9 ZQ : -]3zic8N&Q'錙qta(fR$&(ڇN}b֢6HM ͚++% ^^eC}9dRԞ|!!d2XGyAB~AhT0 p3BU.Uunt}#<5Zê#q 88PzT_ DkiJ V R4Xf0aE+kVQ>ICxcǿ43M鹡ސPK !"+r{5RmL`nA.ܘ3lu, )?5t"ϣsZ7p:qpvK$|(=ȾIQuO0^S%YEJܒd|⻕g~hT.\86֨c' W1}G7v!n/}lw+hp?mg ?˖*b~j)EejE휵y.LYsSb<gh6ᘢ88R-1>E9KTRU+l.M@2\rRw/V7O.&ߤ2L<_h\corXf-5[_[}9- C"Qzś O_Nðkvkl(\X}8 7/ D.X.;V D"yM/`fhh}f!5''J1|pQT XzzB1]j]yɒG)(zp򹰱jx{%QHKﲕqK'g?_i3,9o_&,s^?;ǓtyJNNs!=pVr'R3@*&1|@-[)Ѓ^:DPJ(ۃL1%OP=+oNVa5WcQR|^:l !|TQֿjN%vbg˅V8[#ZC*YV$m")U ƚBy [I@"<}=pJq-F9[5K@.4\'j>VOdFmzoԕ~b V,` LU=0⫌EDQ*UnD4j+9}!K)>+,]bU#a0L$VzD\3ll43 J]+7ETRr#=Hbc*!B"1R1tQQ׎ Hޭ=6UmeD!@2B%)t*Œf,d7i)s:x 88fJAJdf2j%$[ ܢ9Atj@g0~N !"T"h0}]:( r[!NPi)*A^UHtrhkH h]Jyo=0vh7EM`ł{*K8%@¾ #)RC &87S3!YLAM@}[|F?t]nvZ{,S 2UYé2lrw{ϼDžQxk.ٯjn\f2G Q=M?L.[j]gYE!k~ g$s]7Lo$ۼZ}p[qn?\۟~{ 9֝tWtd\˾7L cNZ["ðnEcɄU-5LeaBrrY׸ƻ/Ļʕ~{UoVqC-V!c(:Anj{va/r 474 ]> l.O51-xwM5ѠzаvA7n[o,`B%_dgA9DC&$c[맨 ȟ v{y]æǦ&I!◄q#CAjܖ~ZADE:GCE"Q wH\Vv-YWYv|k G)rpZ!j5ŀJw3Q`|!b5F&4(V@' pU*URS}(5T !9[}.з ŭ PK812=87Y53_Y<)vN%PRM Q&Q硂J Ni餡ЬX!yGfjwM߽5wihw5_tZVнf e|nۄ -2W`Bdo~fb/SVjc?^_y|cBiHͮSo. mvFբ-}ق|U#Hs2݋9dgrZjyAb}RwymwKR{?'{gEz}UUe㏶4v=]6jҦUXr1Tt+bw=35 fdpX`/ki>}ⓕ%5>MXkcN)@I)T>Ƈ,d&g8Vx6))V^bMvۭ>B Dᬃ!E5Mg\NqR 92+c;р&y%*Z;M`WQw$ܪ PFRUb. 5Tr ߉ZYԒZTrQ(@U+w?~--ϧ)9E:ٶLPĝԡS^CBX!r^f6c5E?m":ٯأ;iG3ED 2 iquy &ECڢ(kh uH2=(PrD*ḂP932IE oes1jv6Bwքo\~8=u잭x춭/S`꩹P/6"'m]U dޝCuuӦ!C[ө 67{\*i5j7抢(^4sb!!TUc|KkD_\hw,bA7l,& !'<<F~{@SBа v -*<0ei6O_-dƴ[jY%keNY8ч)|z,! Q"'v'bfj6X3(ZuM^=dǬ_?zz&?7=|[0.g佾Fgv!e;?z<Ե^1e _ =<7zsE;r6~ܼ )Ռp'q+@x#wU T3LqA*H@T<JaO罝cFF7\9= 5;q.VML]v^. u@O ;7p[?̾o9͡wgN$ļQ9{Tw~@@C]o;w냃~{H_R^ڳqvkC]o^$W[x{m6+|b I7OHu@Fbh xm["A.%qSl)lX=K=OOD\H+%ewx9hLC˧Iy”'Xs8>Vf6Iz'}f4 Ffg>iF$fJFc0B3ZTsS[[[o[3L~vGo?+O zflooCg5lIFӣo({7nhww~ꍹw_=uho*쑮oOm9L t:Sݟ9P(j+C ?Q#sj8^2)ϊڔlf" 7WlJa֧H ik"aEN ҩ!(a&Kѩ=&' ٤&p 蘬fl"fšx94I|ċHA C/=!OHa$+Hm\HO~x]}ܻ^/}H}ps;;Zo|} RX2R~>:k/l=973]e6秺j..62;DH;m5TP3q68UBccȶ﵍~D[ a.s/={maBjrpTL<'@EN91"X΃xMs,IpI.RtA=>W G+ Ir$k^(Њ``:@bD>D33tt= k-/廯]ܽ̓8/!]jdrG/xxт9_<5쐋Wgt}sj(QEk7Fry9&if"-1LRc2khF9%S\.$#Rgco~ K0i.uu-//Ѣ@LC)9!"E@:KOڊXbB\,W/x#~qLrvҨ)!P[(# GH7+/]z@_} ^=H~(t  oҽ!ERC]?=8h|ġ.@JQGĥ,,\l a Bj)o8mJx AeeAmjwr-҃R'$6gE*&tJ|p83`5*ۙɻ~.EDKKcS`&!E= —XH,NN$ 3 DԌm%U3J\eHb/ Q[z.سS7{-v/Nu9HWgo?kĥ{_;~{\(ruOv,>ꕻ0џ@zAy5Rs +Mj).eaK] \G#ZRd&\R$t$q*a5A"xX!R\WfJ^ .hHǃK@B/Hw&8i2ե&@j9Nm5~[4ʲ",#c@0d .C14 LPOgtEQi@He_؊JԱˣ'cWϜp.шkW$OO]y=#=>~ɻiC^_>lAD漏WLRj͒Mao(-%(H$X0Ţ%ԋm)@JHAcnhH )IEv\ ec$T+q qz 5(ȓӋR\KXÉ.RRE|`>Vb!Z")/Hxx|IlK)JVzQ[v/4t_is)K}PV.?ԉ1~°+4|7~uEr)lAg#].E>\y> ]SRkli-OB HX^Ly|)C 9q E8iWurR0e`!-*b$Ә8kO?ȧwBjqzdey> awN™8 ^~״/oUU-y& ~\**;}"-HdzQ]/F``ЋuRbLx˲a!:BE*QџKTUUyd7 ?.đ&/qC"Y؅ ~Gw!H>G(=ѣG&,-DT9Tl|\1)ZZB6 )*UbG}jhnUUU1cBֵ[_jJ+qMس*Х=:< :S_ܙ+ Q/sfR_w%rZ m2+䐨PK0-mwUUUW|eu$a0 M XPR0F+\ss$ey뛥ՐQ1Kvv$5 gH~}T8-}r˶s@3i³i/Ƃ> XȧH!e{[//տo˺.ZDDju UP_@a.șYGlB#>obqJ%!A&ZXJu9o: d/JI,0C++!a B*+N mWlG):AFp(o_<.4qvsvs^n6\β;.Y!_ulf {A6e&yغ:RYzHg!5#2Q%_hK^>N:T\hW+(^L3w\h+2CۜwDK@ԍk@x6r4!T$^M 3%(Q.vѭ(2v쭺nn >ob/lUipӌ1폿_GbXB2CZ 9RJKkWHUbw7Y!q=*iŚm ~d'7?e"vX*w{,8\][3E}+S|[ki XɄ@>l[6iy7C  Q$Lks$1vO-H(gT>y8)dw*w*dbq-lZy) slũC-r,C梔E?l?ŷEq9\-4CLmܳwAl9,J .x{H"j(d/EtQ! 0(A8L 6&M.`' k=-&L?>C2ZA<\+d_+O%3q]ϋpQ(ğ~^+ (+kQbIHA@3Ͽ~gi -ER 9m$6~HlH $$II@RI@R@R )@R )H H $uS,?I-+Uv]^JCc#lbHU~ -vWFW2ށ:?n*5HEKѼ0tgHRXROTn#]*bߕ: &󙨔En"0u݊$_3T6mTa>B']E5J$ݭ Z0^%NZ%UO>(uM§W Ar;SFS[r=Q71W 6XFߵ髨-]ORMAy9XN>g7jpB[,*JB,wA2 i %MWl)7gRZ0U걎Z=(%ї>i9X,smYnr#Iby++%f8o#C4*JOR jR,w-ci)# k"ct~ojTDC,Ȗ Rt/8.\] CPLS_3T셸xj׳B[G*m}JRc1nJڱgBiOi_9l01ߞSU)WG93mp|;#::UZRj1xa%5ŻzVC0 1Pƍ#h9Ktr)5`C!󹗒!!EJ1ND5Lɏ>xTc|bbddbJ1tL` y.`\T[ >WsrVT[)&VQ A` `utt (, x(@9*Y)j>H)椬k5R*5'Yʿ.DRjReӅGe&뼕_jrMVV \ -FRA bzJR]O4iOYZJ 0F$țqJMɯjRpfOT{y3|q9fh58DjI@,MhuH"XJC3a)ЎxNat>t){V <#9-jTdq%R14@SI`Iͭg&`͓|%^-~|g#/Ji $foxdT*Ŕ=sughqfNs{ho3HS39a%Ҥ-Ɉ#O@Ϙ#Am䷎VRF4jJeTt=:?T :!),Hk jj G ҄Q4@NΌ} kIR_Ъ^.*nӷ;whPUqlլQH4ێ0_oa4cw訫~a>%~:㦉#˷>VRYg6U&=gfAӃ"b&.6u>ѣPzgiso ų5_}d⎱*kq&.A9,*j\j*.}=;Ś}R+ɤAÙݳR_j~k53*eX+:W٢?^~ dzݕu;YՌR׎-OVRb.1'?~, -g6=(?&m D0]yue)2n#k GFo- rZg$x`>4ы3:#Gmldty H})pxH=nȾk4ihpA", u!&.#>8-S+-SF~49]Ф$[$Qh'#0d2RKS q[gu">#]CoՂ,K3)d>!ÂF]A |]iɏ`٪w^>lJ`#&SK6Q3|ӘJ>Dz**DUC Lm;YXBhri5J ,59K2%ϗcȥynz%,:,τC.}/lI%ٌH0Nz*Ď.批p'д[g0wYe\[:FpjG@cЫ(y٫A3;62ĭH/aa%Z,ʅ!].uWr-в7}KUCDRI-/5+H:et}iPR#2\򖑊#ՖW>@==/=QxK8DwŒ6*d]7%D;R5*j!+)yNFpgb,X‘ dszC1KҴ&8x/,&ǫRqmh{k-V UZ_'yyNWu5%XxWZvLPP7 M^Ri4skTUbј.i7e䳋i.Bê ~@5\/ Ŋ챳[)hd}|5QQB͵rjqD\2y&NRcԕ6oAANx&TBLkՕR]:zEXXﲽuHe=CqkZcmTGH͸I /DdwwD ~7 p^5`5 J"k;*lz1wfZ1.J)aǪGbR- ll>LzBl7Ei'lNTE¢o1)#y7e:;_:3*:tg{1O%q*^ Ȱ nh﫶DgOUºKyV5Bu%z>fCK^x68:{ݔybQmuS}Q1;կɴDlS[Ep3o!yة Rk~Tf@|03 ~GN]8;:{Ol=k^sq-[G.ˍ0i65z`HKiֶamf6dzbX! vw`C"5&,ќ`3ZIK:pjź'ܨZ>ӵX;$RR.T\vx WbHy;6!ہ.Ȯ|,TXZPzz^E;U6ҪyaWTEx~QTjsMfE/ъGWc^3P[4 zZ]o:xqiկn=I_UT51tc.6(ʮ} )M D1Թ |Ks񯔞#Es ;4EƭIͯ8Tv;VN~; JHU0=s]C ,cv1 5@(֘}ְ7гenn)ȓ%) RuZ9f宭g~;!"9B|#rW4E͋9ȸJ¯G5mE/kE}:9 6uQ 6DCf6]I:8 s =0ApNJRI n vj6v2`^* z/]_ͪ09'i&MXx/rTKR4&tj{ǚbI<|mKիWL]sR$c!B8r7 RGXt8,G Y2OeRE}W~dB"?0}T# U= [5Ft@pW5-OȾs pH0d%mb;`EJV׸?M (Tijëd:hDܟY4!g23})!a)-7v'3V[!lQyGj;p*'<؈"yE x0/H2⒨C}2,/|#z!|<2EfUT}VgxFjzz49cH73Ao6) VV*9\_AO2v-CD4CH9WKWA;g2Z:WOlR8gǢo38A;kUA)"E^~Iލ/R-J'9u3Kn͆X$g;h+UuhB\9%sMH"Q%lܮ<@xm筜q4ټRf/H %{s ܡk%R~3Y'TKbŬ"?=qK_6LcH$}D&G+zv9**[$87:cv47HᰗdڽX1 _E6}?fyѸSWuoŐUB(R/\@J\d tX2M~sIy5܂< ߹ͧonC}߱sG'\!LU VՂ]@gKXɆf B 7fGmxCt_rSJY$zv:5{.n+)8jJ!R~p;v߯+<]$-k㵳+Z %Z7k_ M qetBkj[6ֶG-Q&xm0.CP+=H9FK F fʌz_!D1ݢ01 \o/8GEY;t d (kzag 87D2L^tƯzሠ&kvwכ'aאG":mA.U62 2YKhơz-҆x5}bW)$zK^7#La;:r -L4!0gh$Jol^hMǨj-^hoʭ6\r'S.`A|h(k(eV96ɶG\!1RFCßm? #,*-NysGJkҨrhjj0ѣYT`Y x ;hZR h[*#5AC 2E-_ HJ@ZϣӒN%%)p}(x5B^d&,,I1iP?IHw)'c#5a4OT(a $8ĎuƟEIBNRbA㤓RSRT@I<>tG#{0tJY AZj/UKP2) -|ݤQ돞J&NJKY?z.Z'6d %ԿNC"l=?&օaQ69mrz¿sy\G 5Bocq0tpL㏦fxF Qo]j&^#b$/Q6ђRϤ)?KsIKwQ/iB_voGJdp)-4+| HuHs=xEO[I9Jw0W*^H ˕.~,à-8pBOV?&윶?)w;zX> _HK)Y])!㥬_¡o1wĪ?qrm"?)Û?)w(J+=$-?x#E;H( հA7k)i$R <I6"dRxjROYVj٢֧2RoVON]$12 )(a٩ J4q>B9~&;&, OHImR}fqXӬY8wxyLJZS${K|?G{Jбd#_40dsVģDu,&Ģ,atdd"FUTsDp[+{-\]IH.-5zv}U_-r6eC':]tʹY$IQGQɰE֝#i'Cuvi~y*(z5s.*%x~ǶEߚ>nV<Rc;TC7jg| :"Ayюq!%1 sߘ@HL_ܠ[Z0tmEg %z)E}h3+yq=hW&T `}AHRYphSfM7ںΡNoCC`ٜK"yOHU Lݬ.{jt*z? "D߫o^~~q~Ay iq\ᘝw@l+-B]2Lo3)WIR.<)dZ z)`:[2=(Fs%zUKflI˨1G ^8 b!-k];>:vP)4KEԬ 񈇾7?@jElƺ\7NF;"UtR)k<'%ۮ64 '5\-Huoyaqޅ A,5Lc1CiXX ^ 4HETDx6JGH ƃCHM KYPjY}lp@.fdg gm@!6r-r'!/q2feb= үEpXb'ԷfX^pgN)kAj6U?tؑw?PcAU!KU-Rc"0RpW-?m! "^HBܩOFKT!eR ǎ{'7x`\zyxF$8> %(O;-mo,'8CH8AԹvA2~kxodE6e8cH:#,>RH2rZ 5{#"3mJB*=%dW K&LԚ7p~)ONݔo@{Tc)y4lƖNl)[(%<@jx߫EwBrcgTGjxftu.RQ)H{:mWvkM*pn}(#zN6n| wp@ ,5@2 T=@%&,z)Kv=Ϭ JIOzjh%y3O KFtl 1t=p6 E鈊q-b=]ZMTH0AH"EA:SJ])+Y1>?%"#4!Q*IX&/j]gb띱 jYKeYգ,*:V+*e;>fjXB>YpB*XHh肑]#k": yaYj 5cV$Wf T6K5LZ {=w܀jAOLw@AkPSYkj=SG8g4PC C8f{zB#@tLs h4B ~!/ FZ _ZULh%(tou4R([.Q3@nͩ̀Xg iD!8jDi6*tV Ѥ;$U*|XG4ٯ̬w -(S쾳BRK ݑ@*CChp4*&HUH4R@d,t2]?] q+[~S$@K͑Pg!UTR%f+G$Mr(]$NmV+13GNNPBŒ(NVu 1# G6]BKE(:ЮEihnW׎^~=e$kGnsGSGop/uִ;Ev9,bz])|@jV)HR4Z+U.E{" IApqBVT'1Mc\Q*Q\BuZYŲ6#))ճ_/nSSEL9>Rq5P!Kʆ0 R2.$:,I";HL#u^~d]r%Z)[JͲ2:ۥdY,aԛ|2λdS)ʨr)z.:ie̒3td](to:icӴ2z_.;.DRkFƻ%ޟ2^0>adAGcw̭r!^/sӱ_ Aڶ+y 5(nR]Όq + R yOgvM.kԮSD4|s:moώNk:G3PR7=!v<*MN72<(3(jTIu%ONˍ G1TXI"k,fY"|PYU ֌s:>{jsa~5wh2 [{&s̝{;t+MJ6WGz֮\ ֯1>:ܦzz cPuݒpiQ$t[Frԩ3cf*j6վr:ȭ:~|aEp'(X@ Euc<.M$3HAK(?6Qo̝AI &zYQ3d; <~S]d*T0TWݛMl"EZJN5C?SO cTx`O]W'޷o9ri ᧓y0~μo5P"bR< \: uG|i|e4ޅւDa uP1Ro\X4<;3H,=) 6 ) HSH-'tW:,^T<!9|Ѝ&$<<ұ),bj&T@fo"udY)O>T*B* 8t/ F`9`T EJe+Ja!&!ხG0HI !jFLH@$4YHER [L/BjQHe۶nʰt\Nuleo"u^Y< __."۽ u+#|G &P0Cy*tg R44l#T,PqϛH]M#zċkexOM_"Wz5 4Ϩ(zQcUTJGj\ڿw`)Ep|` 5h.nsHxo"+?TU[i]ls4oNLLW3e=!u!EEnKe$4e8t9e+sI7<㿣>;dn9mwө-RG%_,:uf.kJQdt"`W Rh};zN@HH=V"e}o_EȺ}X |RÓ%#E-ָ Fuoh~V摭.F4@G} 6fOrfBLk QS$na(v ʎ UjiZKV釂-alT6*()Xg%։/% jFgW:*GF& sYK-iNVx{8P9L@&[!h*ONSpm%R-P_Y(,Dubf]`Z@ծQx홅*))w V{1mpQS 5\(7ugu8~p?)tzɗGђG*nGކKGk{ƂuoCe~ivpQ876K;l͛$FzA H??6mNOs,(?l}vZy%N+g^/H |n|D mT=P27^>{tp/vH5w@p8KCZ$BoY iE_ei v!5Pxj2]^TB*UӋGxo?G_xk2zb{ɓ'.S9?\ #uIWF-K7'a;tX+9!#c'(c! #+1]7Vf/|CkoGV[}<za I$_Hl7+ctHaaav}7:ab+7XC"guᅅ2B!ċF7g XMg0 B+ խnify8 -D}ō>h=FjC>Lp_ )g&R3߿m.Mv L5ܶB34R"%Znb0Rq:⒕u<\.2a1R/GZ{jkN9U)ɡ6VEqcBjN8W?0gz`o H# aqb x}S}ZP"sQ4XPa5~}d.H}KoDK(.}1,Dß:$=Ti!4WlYT sCRd8p,hcBfz?>8j]vQ1p޽y u \{9kS` &?~246~ #%MdMLF9I"Iu)廕YJpR(ע9{tF_| ;M*}R@#E ]rg6,['*hEܬp Ē<03eh&pS\v.DzO[>yM[ar=V!HQ6cǝf,t)&{w cpNF WQCpYܛ]_Nݍx`{Sר|ttb^`V}^ǂ6Lv m[]ʌkW{<Rm+OH ̨ݶs,mwpo`~geޕe:rF"D:Z#'Ii["s_x1$/5)˵KGqpLh .)ڌkFԙ U:gU%]Fئ^Z8=E;ȡȦjO"JZ Etbm=EW-3 /\Ka#yǹQp cY _&[X,w g}hmPYjA\|$EZ*%2㢇,ʙkKȿ -eۋWT\EIm.9v"ǰm䥋 \HQYwC!\w^//齛6AaS3=CCpՆ;ʮM=2ca~Ju\/s~2:}0R?D1tķO&6jEXX^ކdⅇ`,F #Haaa0RXX),,F #zua0RXX),,F #(C~/.bw31⮕uD\!Tqn_ H6 #%'1t -J|}T;R7H-WAʷIX)b׻*μߚ:VJv]0[C| +صvYәQmP.sgc(ö jJ [GOxoEV`ad$p.7$+Z!:ZD&TםΌRr8UBa<;%6}$P:~ATc#dbǵBPBЙ/Pj^5s0BCn F-+\(eK’<7]׏{7_xy8 W- 9>k/-:19^|ڥ9$ә0?$XEl0a_8<@Skn0MzzwDeO ,&~!y]YttYjQQяqŠPT՚iIMF*_ގ\n6sk?sBK}:֖'OGg:;_1[-kTfLzQ\3uZ}TCK/>CqA AAO#(+ҚE l,0O^taaj!VFZG";mݙ?!JN'.)2Csby'?k XZzC$Ayӱ XKRfd7t.DmYٱ*5"P1!h||t1?Q1Ѩ.adXDaaTMT*og t׻$jxBntGa:vO9L'P&X #E*xY&md],,u'BD᎖(YEE -#f.iW/ j/,JP4Vx580ż94FRh0( | %H(z2o1 OkK>LI%5jÿfs#JAlF0:Z<)x?WV0UƜgn߯Am&<+S"soל0vBK4VЙ6Z窡čE>]UNEcc*eTj2scRq3/5҃g;i7YXZ%3"(ߜ~H{~+apVXjK+j :֛Y: 3SPќCsCr ,0HiY͍N<#wJqa*l˒Y ʳR-U,0*nϗQ K-,:ꆸƄVP͍VsEA&Wvȩ;rFi%Pʮ )IP(aڲRoHwureR.Ltبhi[a,lyӧt̝>] h8rQ{4ݧ}[ɇޗʋS[y8rQ{۟TO}ɧnֿpx]R=G9<҉ٹ?̴ 9r.Odh\cS09rt,w~gG9{! 9rTRpҸ!Ri%[fo*!1fcVZ6ՌidS+yIv~S7cluJFL G|\Ks~G 7u꯴>W8Nߓ(xJK ͧw FimZq~GG1 lqC E\uOΚoҞ; SZK[c3Ps~o}L{H\^J~u~ iqV@B<䤐<odWw~PO0] p j,%4fR~,cr4:R|Kr|].<W~#8ƻtNF|k}1Zw_{(bhDbÍUѸnLadWw|;\VGFdP.a~,[AXVɯ| Gia2שkBSwJ_jb8-R0y@xyĻ4Ҥ27Ku}z=2HyR?0_e>GZH$KLӬ}O"OQ MWdyoN2ۮ"/b>Ȟ,W\V)kTpԅUvjqT4x3g _Xwow8-i|[>~yPh[g70̶ Oif׽`࢔L r_r'4OJ#ur\6RF@Zp<y`LߝDӤԣ[ÿsZ֧>x:~6{cfosf9bvc˟:/ridd%88-\+g#|ī,8ҼI8vS]!EG [r ΧmdtvIY"c3Ӂ~ϓyv}X%28#^"c)scԜ< R79O_=7wg /`~gfH?^zow{(.[anxr9+3d57&v!}?t)q_ }|{Cv. CǧsOriۼw3 12q G|\K3 ˈ'_ς i(gc=C~tppA'AVkY8cS,,-!΢ȇd$iY5o(N zGKI  ltwFvIAl;-f i.fm`9u2K%:48uzAF n;%1p@">Gbg?D+Ka{KSB3A}\ho}^ M~M/~WdT_PP,;(H^>DovmCm)7łvょIOji'"'ƣ3g*Q^v3 -ֳ\쀮EÃca t98j!3M^;n˶NdsB Hz\g,8[l鬒s,z-Ap7ٳp YV[_v-2rfag|ۍΑǹ~g卉Ƕm6xW+Ouz|xp WDݡHF=Lp4]$O?zv<8`&9_l0Kv^Hs ca3<9gkV F&>N; bo}#D3l6G^h`ߨ3v:5=FWcKMM6{n .oO@i\8 6C?:f{P3o6ƾq%oF >SA[|Ez=8 eEg`%=&&<( m[{̶mZ`[YsbB/?JH@\aEg"8eL"?_{y.WjG Ik>#Kʟeϰgh&z?i{KFbSYk.ggq:vkgmW^ ]oQtWCBg;z9ףfF Q/1i?Byf?ö3&'vyB.{JG>fSZ|n}o3d*?m_*/fqM>'w e㒧8dok;"/#Zݶїu!Mr0s˒#׬&HcAaK(s3>28`࿐8B#m8,_9‚hZ {,a[9OT;3p`6f }6) |YgƿYJ~[Z>x7>-s{iy]8 \w~)!csk9: z> >#T֬3rgt ω#J>1GvVmBcfacN2ox},u{!ֽ9e2]iu;lo-أ` @sB| Tًt} +>9;p)$f8̞oƬ郟_(xLvvl^} SĶwT+epTz@9L`x`j4~|\p|)~96E'` (s(x0}W [x{]6`1iԍkV<4i^w3rXޜRv:xuǣO_Ί@#\E+K'l9ږxA͢Hگ0TKG?%#~7摆k3^赕4 PxK j2`n9fp3wFNثl1Sm#715星&58eOF}\Xr/UP&Ҷ2n? )qy n 88ǵ،{RMKto۫i_OƞN'ğ& 7`%ft^?'Ӭ="ouwݫ`̎1~ߞM9ïjVO2^Mz r<R|9z*| ı?*.[)wjFmdD~= (d"^_}~-amxشMg{0b;2[npk,o;cho C2Z +k1qquTcCCSJ} y,~^% ;^;7m Z˛9RWA_s樓W|R;_g6t<-9P]Ǖqj;1sH4^1哂:w c,*Ҕq;_Qhd3־;_9xl୔FHWʡ;?$8Fgהk!(G=D^Zrw+0@(YiD~ī4EV|-kfLw+Vc( F<`LYa;E}_G4JՈWE'o7L[Qt,=|R_axx;[ǽIV>paPD5 (ma/j7/Ync}m]ˬ+UNW݆!<1uҕfye!^Z~ʾa׽|\k?>?6z  |Q\o<` UbՇſBt%-m'݂~58p.>X+xʼnK#R_+ؘ@5o3~Ӊ;O샵ǗrᯃOeA%]cSWY2Tp,7m-~WQ^ykN҃f6fp]LM]^ q:F g&yQi?PΣ=8bz|⋓d fwgtɔС_; {CH#'ɐ'NAXkYwۨc@Ǡ1}@a8F!ՑpYF8&:c? ɇ ؖG=\w//YuVoC<(oJ>tvd睿I.أ#=㲿f7ZF?hޔS,Z?*s Mf'_~83} >P﷕%=iL|4-{ێ׳~ G&˄FZKC˭:7Ρê-3~!>Eg çW|Gl<)o=mh3YyWLy??S; :Ĝ@`kA,wV; N;FpG ,8f@ѕ^Rl+.wz_Ia;C~61ΡΡḋThBp2Yi{R zugřFA ]AȨGU F,ћ`(/ Y?P\┯}!j>FI5q3E:;In6RG,a2B^Z%5uYUǗg4A`w&fi1}BC{/3  k_%uR6Fq ps?yp!WcH10[Ćb4e3[B p)l톣xVtchNDgы2}O8NL*0lA̎_7:f^?ݜe|Km:|nbbz̈>͊m mfAuV63~ǿ5j 97`4'~νM&8DF7*3ʑoNyqpkX`FҺYa3&5O==; wawp:={;h- wVS}l'J58F9nNwsjs̮0>-mV-`pof֎ww2]CT,6 N{[W fW!4`uBoHm?Zq}~Pnx88#wNCy9ۿ;G:yf2:sOW akih=#{p1/9jVoI#1=`zpNqFi6p;h1 1/2fH)G-/_$*\ƖU?U><;;Kwу;Y7fקT}tv 3F6] h,QYA3cg,cNt̖n_:౮>-zH@lh0υ5Q^-7Z=xG[vAmbAG'M8:/ 4끰N1D^I3k=38s!F Vv*O$˫ !3j._ޅU =(i;=q>Ǔ|HS=w'Ҿqc> p2/xcPG+OJ?33Z)+lzn@Q9YܿǶ@-KplUYR0yKȏr~&q0&k= C3?Qk_GZ J;_9ƴڷSw~smd^Y $c;eW#]VX`p[Nzy`ew 0T~2Jw͍?28O1 |=ү1 NI8_Qeuy Π,⬡_e#(qf1>W4ȄeW`Lp e>w~qe5fBxJK[}~k[#9e} OJB<Zl|AL>J;4rUDZZ|~/IZw͍_ ϡ#j0i|ø\>W~382P~aɫX΃ࡍ2amIUkUx~lFpz9-r&/gUqUp:N/34YwJ_\11xgYE/g+/SMmcz~d@25Yx,YOU~,wo^ll ߘ9F )#9Oiiq(y}a\W cWdAK;_$<Ԇ4e5XƒhlS:J^ʎ_gn;*9PP?ǿl7fOS.<[._N_I7-{snl4 o#8h%.e0ye#b!=dGnh$U/~#%8n3CYPaK۔)a{I/'~v(XF7Ah$3o7ݫ/ecQ꥜KYcEn!_vM_>I#, V+@ z0c)-d(Jw('8.]^|_ҎV{yoȗ[[S,B>B|] W7`6ч47{Q9F!E3!Y[RT}FT=Ŧ=ujM3e:cE<qESb)یѾ=umq^2o~(NqUuh)& up<--=N |> PGkM}Uuq ij\=CFZ-ߦ {/78sUm@. !R>qWS%dž^8@Y~+,]j'KT:c \ Y|tzuʵ'Yk#{kmҭ۠> k#G[53pA׃u+[l_xfr)xJK+O}-u{H+ƫ>ĥ 6YTu/0xpjmd!K{&.]Azq(Ppmۦ_~?28T5+O:J\&-&O2ǘK??/U(̂جz? r!!@YH۟X6:jCMB ~`k0sSL5 LlxW~i f!3ǸepyC-/a{6%(Sj#g:[e]:y%u34̢@<Ƀ1 uddy@ǂ3ΰGA-nCҗ X <0.7>`yjY7`[,;AmDZ7~fc˃S1xo}i06$P%mnnv7\/W7W#p00e'[{|)^~軵JW68]]5%>! e/{aʞ0,Se }xf8oRy%,Ґ - ݝ3=L-K Hύؓiy#a7LZzf ,??yGoy ڻe/W7ׂ_:'i{lp56h( xG!u@+T>V88l'g| g@^0Q[EAH̠ ?SG7e/-2-]\/)w閇nFt0Ngp9tO_VwืU-e-78?R\1 E 'XĘIuNBysB~'8X &ZX5Tc"(- [/Oh1u/A%0,5{rσpgTb. t8.gXF~4h(ݕ3 / G daC g<9&YZe~֘`WXj`#wҷؓK- Qp 8 l{฿h1ȈYV7Xۆ/a_*¾ч}U#)}7$sXU |C=59ooLjƶ 9.!?+8^+}*{=Eh`ߎ3~G@4fN7rtR|0;Q},FK^=.5C%j38!*OJAwo|Mg]OKv8 {b& C-I_׹f ufg7t&M>_k,Xߌ_ \(-o8mƵyG|ϱ%pZu]jh4F;O{,FK~hEA([oOY <@3[cj"%b #}Xuߠ~8=dE5=.  _JmVnA^Gs?\ߌ /} *+cIϺ/iOo4wF4 [C9 _r1$#[{z}-fO$Sd茀~~-+w E 6ooG9NKFUr=VTc$UgI^k 2X-x?O9GFiS?~O6o9>(L2kMxI;{zeur{%=5ii!$/s~dQf+g1VP4]._O*O/o;V pī8;F<ZvgZ|Kyw+zpVf˳?2VY^뜽%G:*Gw~{(8ڕׄkdO+oO.OPZw~kb2zkYC+Ԙ&iW:eu\7:d,Yu~6k3Ha8LSǯcW36Bw¯ǧ. ➖f3fqyX,:vg!;%WO]h ⱌx88[y=lMW6# >g=6*QZ|ظc;EՃ(xTLpbیW\ygow~"coy񧵴:Y{Zr|OBr~sW/7xb*_0FRyW4~#8 F~kc\aOO KVqz>=x+ucs~?ژI۷wo=^(ʔ-8ÿ?s#8N2fěYYCWkʫX*QޞY_CZwJǏ_ysj&8Jtҍ'x q~Sw?|#;wCOly*W2U< J IyI\r~rCܾ)w?skOj·єGU5GW(|t~"Cړ{tS7_&li:}"qȑJ!zO>s[֟ݧѩ$lD~>qȑJ!RCa!HIENDB`docker-1.10.3/docs/installation/images/linux_docker_host.svg000066400000000000000000002175131267010174400242200ustar00rootroot00000000000000 image/svg+xml docker-1.10.3/docs/installation/images/mac-page-finished.png000066400000000000000000004655061267010174400237320ustar00rootroot00000000000000PNG  IHDRl iCCPICC ProfileHWXS[R -)7Az;FHC ؑEׂ@".lIN9gsfe Y\a3>!I'l9#(oDn.vU96HɜvGa.Nכ+w+ !Ad1Nb 1NbKMt7>,09q|f;Ƒ@lo؍@ 񤬬S!6N!NLbci.!ryg9deЅ&  'cvCH ?9,b%8{1& `xÚ/ e2bd΄MxIxJ"<&$t`8ʅ93O$QFbf lpC]!gx07;hZ<ߏ9LFY$95gϖ 0v;]ZFNbMe'06[[F%r7lOМ9;aM:QCZZzSSW-CmZCu\T}*3'LdO,xh= TT#RcNCZ͚5ZZZNhkӵݴyOj`0=rf;sPGC'@GCCgXH7F@^EQ/Eo^ޠ~:{2i |0423\nhH((ߨ18۸ $dI)jjgfZiz 57m5D4?zmsyyy"ĢdɉM>7e.VJVAVVVoM֕7lh6~6ml^ۚrmlB۵}wwwHrpQ1qy'bOι·v1wp|]Sz]u]Y;\ݘnInݺuY=<8=yx{|e%:{wS/կok! 8`]@@v`m`C `jpTpEaHs(>AA?1aTԈSFZE.<E7}W11X鱵|J'/KhJ$%&N;m㴾vӋߚa4c 3gf<>K~k$BR\ޤ/pV5k(90yK ۛrKR\SJSOOsO+Ky*xҷϨɌˬ"g%e+3fϝ%0 7f s9MsYd,EԓWqNs^g:og~-YtABυ;!-[\o=K)K3^),(-x,nYsf_++^| |oEJW~+_,,)+V::eu5Ukkkos_T4w} mBmٶMMM!M7"fWe-+|zʣ6m%>omGCauNμOw:owZï֡v5uh}}70?Q_r|{j;x#[ҏ7 kkvi>5-:-U9A9QxbdɡVASm?}}j{Ǚ3=}[.8_8vb%K .bwh}GUMN]SN\sv7o\vV̭;ùn _òG4۾xOQ{_>y)i3gϭw@_mye_{-|=f[5lߵ E =z~CG{>9~:99_H_ʿ|mHȈ%dIlhJ oj%Q/ ;;D f !RT=jc3F%'Z o1##o5 5U822ud.H.{X]|W_˧kxQv_@IDATx]`Td{%tE@ @{g "XPi6Ă*w{ߙݻYg4ܹSΜ{̬@8,!^םl}݋I@'?9W^)U*%3gĉK"HD8y'"ҩw?fi*SjG i(t&Bi(6P/~*Ph4F_@R\85^FZF5,a j䑲F '>w9#܁*WU~mȑ->=ZDDDREe!?!* 4 QqF"4#D(R1x7' @1kgN\$K9AF]'I9@QRxV>F@#BX+Ys80LVшSOpgCh3 <J^=ޛF^CQeyWU(jt((wηlXaIjE&HlfB(?7hY–.O)G<~.~y:S0,3FY11Bh2a!Ϯn'ԣ-Lh4i&/GZ{5~)f Y*,d)N55I້70Gr`ٟ@a@LLj .̅ҋK;8]Kt:9?/RWlTTUx\,,H R)bec(|(WJ5$ [#Ph~'⃰At#V o>e^s ~8PV.()X k׬ERj:5kD0o8c;S+úѲ]GSm)^ܶ y*gWhެ Dry7nC0;2`*Uk<#6?<7Cf[Ja6@ێauhج%o[((0kx0a# By$1Q|&.$[aucֶ;0A&>N"MIX4US8,J_ kr̬"R`]LGj Gq#McZD7`(˸ֈtrw+šxEKɟ,111>'7S.@4սfa"j&ـ8DTQp:PZV§}0XeC|&DJ(MM[؜5t?-2;HưiG;C>OfbSO: 52y0Z}[a漟 eqUM(4oMި!KQ*F>:tGBRl88<7/L/(W4jI|rճ^6Iş b=V?59 .UǞ lMX`TOA̯ rL\C!-VIg]-,&-R\[#tF7#9:wʐ51Pw#tƸ0H; )+&'@$A;4hʗq#Lfe ^b~+|2;d9 _.DQi9Cca~\#jq(>|z֕iKKP.~wU`M籨8 ~ whػ~)WPR=z45_g)r4VϦ10$NUUVIJ>-;9zYx ~*ۢ V-|Y;7c!|\7%+?i͕q+0d`cXލ`>S\0}Lc0\[XP0eSOzU9=uLPq Yo\Bϋ|)?Vq қK1 d&hakcYXS $䃓8,V"T * b#c (څ*~9F `AaV7SGT' EjY,.LQQʢxJ-^DQ‚Q2ΕAAB4[y9Xd=p"6?-Y)Tte)ދ-D#ЫS+y)>; Z6CQSx#S1p@oy1{X3gy"XL|7ԞBD\~ ^w2k)nj6n 5(u%ޙ>]DݰuJ̟5G=/7 ѧ!v^~xqhl.w?c C׎xhޮN;6'3="7'K|m;o>0@ZpQ3~oɧDFd|h!&-Da*s0ٙ*`&^m nZA,ۺut22%c_b)HH3a֬[qQho\UYΝw10e<|Kк֪è~j(k'~Ù.Y$fyB*Aԇgf2"aQyLn@ݐS<J6_ё0 ?_I?ue? -RQ"ӥk0^ }nh&B$[(*N #fa /,XF>3+Fppz t%dT`-DY^=JUS \mU->oXc:3F16'Vo| eHPk^ɭA4;u< `Ũi.]ڣ[omDhK0? ))KҔ46? e\/N+iǏ8VX-;✳DÆ3P\?L-S ,qs"y8|geKF_|,Gq֘ڞ6p$d8bu1,,nP3'*XVlmȑ - :-R٧Jo@&v0ULƂ U.ׯJi~?'9VƆ}%4k,\DzZc%5}X[ňF|ԤqRE+}'Rh`b$Ҥ9N:yGP\_xV)6Ѯ.!|!e>4q& (P<ڤ5{2qSF ᗍQXX ;UKxx*I!$:% ǜnNc)aɲyjvnX(pZQQoK(o2Y--KW1ensI[nS6NǭRfW/Mv%I KP_o*msQِTmLZN P@[^9ResY#cЈeI|eıKߓТIC;yڧ=}N9aރphNoXfPW[Xh##( h>P2ܐUyD $lUh“paP.B'$x㥞ԲQ  ,;?"<&`'6k *N.OjJ}xT}]z¼.Ӊ=\jS o4s1j9%zT#~jr8?'5Lu:QTHJyCTG"Pmy;v!OT-Ih _~6;2F{mU/M3xF xJeX{͵h<3gb5W;j0x85Әnjwya cԜ6W2 BTfm|03fHYhؤ0!<2_zڙAV~[y\ nHRVQQ<~=)T2[i#,9j a,Ԭq" xWS}cJr&.cQ\ !jܖ5 š4hEn&'rQߎڢye65&qPSRϙEH֠mA$j9_]"-'*Nl O%MV!1ye|ޠ>ʨѲD'wnJHHNM_-eY:N?_c+ھ_}=F˕_6 U+G 2^7OlVVVq;N;< ە׌г ٳ~_կ<5K |2mpV.ec1e4 Y'oቛ6'N:u,$d6aƅE:o<9<1=jȱgz)$E\:}mxl,[ ^}K)Fg'ハ>ɪn5k4C?f^sotŸoi3Ӓl8N [ 1Gՠ-y#=yT4X4eƍU&Kqi$D=ȯD-[ F91𫮴x:SXBD{0G5_E/2@!ʨY$$-YPhJ1"ur'gr0"™خfM4qdVinko?\v}:>^u;.e~h@[!ܨ ]3"*!)8x_W𐾛wF=@?\wkLh'Ӏ_TU{4io߹k7m\%vp6>>%hԘZz~|2F HX\j 3Ʃ\Q~DQhA鈇ۀwBIYhڴ#Jm)/{_&Ji֬icعgn̕ps(xy ;W E/AiB hkw/mǒz&D4mΩJf4h˶YrƳ ^4LNn:iG4jlO!6EQqh޴)y\ڸHɫWbYVcΑ^gNv)˕W#9Ytn`UG#!`gíprV<`pl }DSݝh  zOiekAʧ”Fб_^Q`"YxmH܆V#'5 (Ј50#zwuH`PF"8@Jxb‹LdApAyxHOMhgNodk.ǜ6S6~$`e;R4k} 5JcR>F@#? `BO#3IF3jy3pfag BT|Aڣsys~-_F@#8LcL_3XM뮊 }77{rA7<|d4BfF%3d$#HfY@r4+`м-J/<+O]SxӒ(H<`8쇺k+eHrˠ`?U '@P|7hӚF@@ :hY<: 4^ᇔoD'k-pK>`p#̃FZFz),; 2 A՟:%)StP#>PeQ:(;Z^#am|T@G?^Q~j4Fߎ@Z*KG2T^#`hӌp\s5iנw0??/|w{#IF@#h4CEk"%&򧢴h4F@#i4F@#hY4F@#h2Z`˭yh4F@#@F@#h4:xi4F@#hMF@#h4u-i4F@#F@#h8Z` h4F@#64F@#qVHh4F@ lh4F@#h7fO#h4th4F@#P[o ͞F@#h4@#GGTntwt6bak0m2}#r{ʶ_W :~uޣ +ۄ{WU+f_?r]د}\nS]QF%77 :j>_#@]マ}%D 3>/'n T\;"t~)oj#{/XlN; }~QI+"<|Ŋ`wH ǂ?@D˾tl,^TV:a;f]~wm̼G.ϙQ|UeÏ~8GNYU] og!q%(Om# .}=w9>]KgΆ-=/Yv=6>^ h:"[g:X͒F(5MK}ycO]aÏbނ01q2>DHM`\t$_űnqlSZVT%.%ez+[n7?%180G8( ;X?DŽW VM G#7 Po`@+;5/'g 5L5(,m\~bР8qԣqc1` bs/Ƭb~,-D vS(a٬70܋p.@ ݀r/8;}6'@c yɕ**^_' 7*7G10U*hjGQ+1Ѥ?x{۾6JK1p=ORKŔoǴ/7R <0ePe?O;> Ju1Yb6|f} L Z=8(D`767?W(ŘxtGcA. #/r)oǺs]b% ؏=tK5W"Q81=Na݊p2W_q >q(3~Ȱx湧q)u4w>ֈdSO+.8Sa, zXQen^.8TEWނViN:R(}W۟ Gt,2|J[7uiv %0±}1ˮŵl̝,'c^~(+]W]f|KA.v?GcKnbތ^ӽȫm{-Fi {VmW]7)w9 neQm$< >q>] 37Ґ?mލ_cW#w\t {}6ױg__>u۳`:DԙKGoVx2c`5y6~߁(T}_v|qV밳؎_WgM{߬BV;7 ogDŸQ+k#,\Uy]zw_ɭSwawko~gx+xy8pHi7~:7O웟 0gy?sچ2hn5tM&> 2&_=OzuhDijOSWbWV"¼x8}xg#]})/\Yv 7]?S^ͬ9xhҫ(uSX!}'7DӄJ|<{8q9_3?].Evp\m• \_xSa"Wpec5!eL*X 2KoG@YwS1p33quoV˱r!>~ vgyѰr/&(X̧gqia7]u1),{Zw񣌽pr;na_ΦV&O|LаZ7awiTҕ} S#W P(WSbw4#MB6pvm)`ldm)e;q#ѵeCjąviEYKt3ϠWKoOz6gF{Ң|װ`ngYӡoSG"V=婽1jh@Hu@+J#ǭ?w-{ Q)MjR1|P<:pBMyn?e]?qGVs|tJx|80?P@ಯ0u(CDx2bf; {s_мil:_5NߎƥIhDMb<4ǃJZYqAK1ԋEXY +[[W|%;'qJ653g}Vœ݅x];7୙ˆ!p+#O>07&NiqB)+!'t熷#u8a#nZOu;? 69]''^txwGk䉃=г0O qg#%ʎ_YpVX|p-.otni1>V7.=qF܃(paנmAmތ'&jFld0—ܛx=0_bQʃϞi_G[}\t1cO-N.?  ~@c942?#l/ɶ*E e;=FsmYݥT]=D ^ L-8‹'[^<1>5k K;@]Ai0Uxi뒖RbP^QA'#v!.Xh\/'7HU!_Sɦ|2lѨ]7c 57w?".?g " 7r r eUUO⓱Cw#.6&XX_4"6܋Jb"лUL)zc&MV Y^:8G6h"VbATZȈ5fANVu ~/IдS1i6.m¶6h7VG;9i*bGm>Ǣ{wirTR`m7f/K[W⡘i8sIHHsk#1O~C(l`} ΁ ֑VuڂS)E PF[2i94N=kPZ>c !T.ó0滰6/ fq>Vs]R fe_MNmC LYCmQuNP&nYx*7Rw^~ m۶,غUE`B*{`u€v_;ՠ"xɕSZؑpBm7EWY~“k߅|N cqyiO˩ywf/Da]LĢ0Qѱ)<萃2S?mhǖ.?11ظw+ܴjNrA'>/тu5h8/ƝĭµrUQB{f =Iȍn i:A^q(Ma\< D9n,y)*\y$iZL}Yyhs Zq+&}[)2݄ѡO*ǪW%~ytF?7'|p9qs0'm7*wňέq?dG4FB Yh,f'riÖݸZ0} O$y|2EkpZyќmtErQ#pHNo'B?ޛU^U9XJx2qhz@, +" UpvV"AQg#PTڻu ꯍTuzbV㘽hӕa曏nt 52~ š0a52O%Zktj }y'~qN<A~Nj6Яg_Ӳ~Ʌ]\xg472 ɧFcQVU{ft8[z+,G-[Ҽy^۳>ԉ/gO|A"5VNVOp=C),@ea^xqзC]hW3>ElFHOkC4Q)=[c?kCN:ycMh,Q<<| b5ZuB)cfm&)'"kNbP_gN~k4mFͩ=qrmw~̜bdm< TR!g<6m.Nŭނw' ̙'9Uٵwu , 1)0Ww,>e dmX; #7bm#gP17 7^N=~wd nqTe5G^8ιyka/m뚭UIM䠑~jK1g"=) Ew'g)Mj\M##5l=Ŀ>bƏs%,YW{`hfxaU@IDAT0~=w …sIJn;n1St.BRnjbR[b„qsDaW?q|߬*Ĩ1ur8̿i;p#&9 m=w&KM a3#G73۲ION'n;[5K^{^Cz W'ix:^ xŒ:')ȜkI YAC|m;Gh";(.EKVǟ.mbpz^h8s{[qۍgnƏ>ŭ2Gg/V[FS_JF<M\xʶ`pK:1R%;kq^R}6 Ѳٸ3go7> ~M[|xZ╇&ĭvViؿ;h[yrd `84L`?,ơKt$1nX .qCξ=Zµ}jVCpRɸT$ #Q/ްu ҖI'}%e'4;g<פP;wp/|kB&_:;eGlɉe jю؏L~lՒ [nܲ t4 Zy+mft@6yYZo۶+wUmdy_έ`MJV"Z;$[X8F֍i&`.ضNnlמ^}ncךQ3uVgFBel%q*/Š+QPZFўF˗ĴVخ ֝KlaKhf8+VU6lt-Z *ܴM2P?.BxD9ڶn-FVF 3;#OO"Xk7‘N œX~5)ddG9Vl؆6]CS ׬nF-ڴPt ɽavټZhݩZ%Ҙ[8U5V + igFpÎ۩LAz$.4/ +֮MWAF}fظ5۴}۰̆-:b#}aS̞p e4LGzT5WE##p]ߏ܃E~2=^;Sm2~o%WPSPW:._VBJ5qr%geAkw0p]&f<-wZ{dV p\^B-BJM.ebgf᜜E|#5+^FPlDKf'qOMIpWq{{W1[1g iCF @>1 hOZggo{_.6xX0-DK]"/Fv}}Cʓ6> }+ޠ@(q-d-X_u"I-l#aWy }Yƌ>e!ת=~K=/G_-?g~DN> jFA@o=RFdu^ BtS zȻNSBG0,j8%U'{M޽3.B9N8ypRw' g+a><%y.5/ɩj>E)S ՔL8T:'BvpPdK\5?Ru̓)Ævjw*ɜW36}Sj$W[fU_0V#je\3H^aO%}f9h8}j]}kJ FSF!}k0= .W|An#u _ְeP4+JS4Y KiV)GLni"ZV[EP:b[3Kdk[th.M# WN[9FMk̀h]E@,qh4F@#h7fM#h4 Du?W"p tkԕh4:d@JJJz?Q-(ju u 4i‹SyvF@#(//Gvv^pH5\[]h̓F@#( m T5:5hD4F@#P'-Li4F@#F@ lXhF@#h4:dh4F@#T#j,O#h4@D@ luY4SF@#h[5ڧh4FN"a͢;طmB.1g_ϒσkW@a2uFԄ#uUbLMfQ*|+ߺ ^נ1 QG,OGj4?-Ls'!sUͧO= X#0xQ?1:~X]u Yr n?aJD%8q9(OeK>F:CivNj6Qoڱ/&= F<{kSQeW^q[HiITtņ:F@#!hQ7 0G0:쇑Ca1Kz1i;Zmk0hx[R{ |5=Ia 41<4pb朙ز{\8n6 I{(sc䅗R`kIw݅ wS2ԋ%h4?-j2ؤvܘ;/!%2{ prKX|xE&mqg ןNŒ{ЭhԿ=Zv gRRΝwJ*Шy+2L zHff]7LGaisN?57Iضnvg.POnjGiY֬XKfq9m WiLϾJP?=Q FXU~ud׭#>9 :"5,ܦ Oѹj84MR?ok>5?GZٿ?ڴi_}}6Ljbяpzό`]E;ѳc7,u,)1m86,{Nô oq9pP )&9h}(gc>Τ|L8?&}?72h߶ʋ[y1dz4圻rJ?o{F(D|ƶı)܅h en7zt<.EyJ6ޢ:@4F @II vڅFG# lx(Xt@njF5*Zm*KApX;a~CG_i.܉zQ kcMeaT߶:Ɣ7P+:Vo %K*aYUJU?'7DmyVnp냮zP()Aާbwm-$b"6. 7=m߆Lj}[- QzRq'sC\t>>E#h|蟏._9pLj0ŻO 7E8oƶÝ܄΍1yGx$7ʹۖTYSVF]rwY6.:qVK;s1 ug+X~+֬Ym[6Yż:q5 :tFLd!z͙]x侹xk3Q[0W½+]Pk4:v؈3h4? ci:$) awXۉ.-ٸy5p'5W[JTxEGߊB3㈀F9Ŋ(5>cF͏1*˞_Rm^3b"9,W2{&N~Q(ÞL,{ףؙ3X34a>s&♷?Q+:v뎘8j|XѦpsr1md0h4_kp֥q3|<|/GMx@m0v@8DhO_:OWnP^/('k+6Vs^z:un}{,1 0G">{|L%: ~50U~X>y/W0v-f;<;#L9]ض~^]9>qwݽ8wmqhq(]}f!"ym؝gfg\9wR4+I|ˠ)go0{%yKF@F@F@F"%w.#}#Р0[0 Xu,?Cbaljܤxyn/pm8i9vOX׍,f(iPC%>_M4?^U^htu ZuOl:D:9H_VY΢քv/U̞ \wߍn4߬k®Egq~8x- )g8ʹkS`z7σ`BXGdd$RɘnhhSSSIpjХ KxEr AiFTO%}B0H$U"qfzDsdkAWK A01.) -XXÈɺE oCs[;0HH4$&07ykd^ŜȲE&,RP$̄)Lڝ0~!io:j")m)ĩ#<4Q0e+sS D&օ 5-=䶱zFt NDF@F@F@F@F@FB 6!H$вY[[ޞ)z(|R'aQ?k> X?}$ά#flF(_UƘktV@!H8 vBQ7diC8+uX/a$U`¿0y&*Ro-*JѦM IwѦp u< H`dlZF)a<~$69EVxj§uݷ9k=n!!Y +{cӖ83OEo(Q #!A#"db$ ANA߲/(icQi~ԩKf_R8LJK2B T4&NMKGnp,hI߳V`_V2 sm%an0Bk:ojxIH|Z&{N=+MF\%JOW=LՐ}|\F@F@F@F@F@FG 6(OEhۗu5 HZO |gB(-&N1": 'NDƈ$2E<qԤi@>T( 32BNC@Bte@SGV В|6n8v/3Y@AS[v94/Ǿ5܇R)AEC](Gdddddd~6r\`˝;GAIB2S)EOT2Z "26&O>AD*FçhdQ CuǏEn!xLZxz-B$#5o_<3S j!%H<pd >OTZUNm]G#9!{i T Rdd <OBBHhXa8u!o:88+?WV30vUB}ퟂ\GF@F@F@F@F@FE G}؄ &ŋ˗ T@#'KKK(QB)HIFZ2H!AΌ Q6M4y B'=4ĜMHBIӧ̔љsgb蟓Q #E 3́` S^9cph" So]}6Sp:L[ zkӈٸC~mƈ&oѲ3h-\fa:k\ "Nm1W+,mPTmtaB~גJ|,&g+E%1flL#9aͪф/z&om Dƫ!>+B7 S#!!$aQqWHBTIɳFShh7^yMy E~u4R{M0BVB1r*t7]7s= ;cR.Q$QNtEGci(b~{2Y vnO|qn28;:B4iڣǨ$ԩ&MŹ"Z낤T gfPt:N;Epb+՝P^{D$$cNݢ=*FiW"OZ''')SNX0 WWW8Y1SouѸ~M˷zv0jw@^ZEwD9i!9oߠU2SG(UnnPŀofؓcPt*zKa\_^LĶ]'ж0T)~cHτ 0H8sgb0{.^$4:7Np]揇B֟4mEt &6nMIkfT(7Dj_Qb0ߗ8w8NY-+g!uѶS_7SOc$^&@A/ub>̢o,\ Czb4N;bpaY} ,3QFeq9Z3wOƉ~xy@F{v8@m9edd~|y2_@Zb7ƹElߺ 0s8.YvA97'h3СL`~(qFLe]RS&-ǫHq0;L< dܸc[%\x3W1!R(''[9oxF0S ]vA.ѓe! cMih׹?£aރ0/RA6 IHN6-Q&]u%kH2k)DTnkh#>:eC>UT].?%hW< ؼ=KXb:菈Di =5*ǜW?@!# # #="1|9| "kB, pX܋IUخP߇tYqܸU.@׆n5F >~mz)K? yW,dmԮW&4Aϟ>Cju`gFMHPTEmq?zG< ڴ,U Ep"U,‘EsK#DV$OQ7T:*r> m%&D!Q(Slrs." Aup ;jB8}WƢ3P(\K2-ny u:ex~ t_ƕ q]+|51$vT&JPA |5cdfsb¤=,4$Al# iL#`$gleQK{V#Pj43iii d=iko8^M[MſC$&7Ak#8*^8tttm?ı4͠VH_ut%x|S"Iy ddd> .gjطf Io!cRQz\35.sGb #]d$E`ɲ (]9<敄cTP$'".QAc']lw-;w' R}jLS{7aǾx }3+Q۴INdt?v*}Ti_&UFa2+_xj~-ddd>o.LtByEm8t!/ގϞӧt zuh&E`l6p`,<43nH¸.m1sydYZ"vl؀[O}AC%߷oY:5h֦7)Bc Lz,͋fm+u~XӻY&Ѫ@ Lדx;ܶ0&B2SLiijגSzr}p|<hrfX07^#Rq n22l@l(=)i?:4!QҚ-I LTFn|LC ܹy'CbO)3W52Gwvz8؄ϟ#B(b"F~i.)HϪզ;߇M07IcĒ(0팮1Aؓ:w\|/iZb\m[u /fX M@޵=X?h:Nu2 &sY:F  4B%b:<=0F1T¸[Q,5QnۏaCF7C VbEhV$.l݀as !x@/ aU3|UCB64G#`X \>j.Ĉ#KgJoeO#yVS}ӱZ\CТpLzjvb~B⩍S\{)p;s46sG5CrbwwcE/]:do Cy55u^+Q-qJGʓ˴\>/+ 97#^$tF^k%ڗ(2WtͱǏoD4t Q*{8N@|@w}_~nݺ+?qW=-,?ouVӥ?\+b">-'HH1J{j%=zPGG(3Ïz?7Apc#>mr- |B0ZǏK$*qAAAxM͎ROTR)ED<}kcФ8}(4()'w-a/Nٍ41mTq@0$T0"(`!ߛ7hx;zEKx>n21/㧰caq_iS;e1,Z'U||BHfO!{uK&fݯ?3_h9,v,{غtv :}Pc'I2+Aem;ѶDDC h ѿ~Ӱp _46|P6,={g_:4i^zGy{~3 zBc(Ф%.h( իƎ?ZG)#3 ߟĄ0ajܗ!#'JMĞ \cx:CdL4lS`<~F&PDʹ I(>|v\yCmBPd"T6J"<Ǡma6["5$1/+7bʠL DƓsKO:OQ, }]#)_y-%sUӦ xXBixVkE&"\HֲŘqP~iy?qOW ׶OѧKCDyahX7 1 ?w/tТe3E"W^4ӇM82d\)N B%KegDFF¥B 8ő]pYxŚcѩIyAԼѭ,J% ot *5|I~rlIoЫMG;;x],<GvSRJ] >?!&O4.Ďd!I'`N}bsWΟD:ӱ=}[1!:^owf~ k/o%)xݦJ( ~Ox.11Sr/piY{C\SO&?' |bΦCRE@*0QG 7y߈ک!#]߇ؙOyśoA ()8o :+I;ODX|}ܤ:|IR+늉[41bB\tGbfܼ 3إ˗!cs8RX2<!HH" :,$u\wAH,3>{\m{+Tz:kQ$ß0e5fȠfveu=}=;w}{|4d43j<4̒.2s[T?lK9DW@&Y\>vSV hoR=8ݺIo}Me^$E'Qٻ9n䡪U$HC2Er (`r/5S&E/=C~`>uZju[ŨP!^C1B@U$w JnS XI&$,S3mc> VaGf}=WDC|DNl[<70{d8 6FX̛4ݻ$jw[MX7?$=={ DhM7F )9/1WV$qM/"XR_2m8K9aEn>ʱ)^? 鏸~)*Lq3b~k][{V~mnnpuvDs+eHǁ j_Kєni~ӆuA Tpw{iWxVm[L_9+w]KG{T4UҲJ{Az8F=EۆR.NhOf$PG֖033mAIH?k\_*Y\ipS\ٽ#f/GJb4FwknZ*`3MѰpvDj8u9.\si-Ff}z:046ˊ%lˈ$<7:9أЙI@4-_ZWS*r(ZW6-k4?x0Tt Mq' ɡ0oVk5I׬ l9.5(^ժWV@DS3cU0*zDy=yeyYnT;1;k!Mjֵv#@ l" DR("%h#Lv k@R(RuƱq>cCz} 5: X/@IDAT7pAt[ ~HF _+̑~<ڌa3֢صk7WViD?XBF?B%7m:G/&VcR8hub{B#ti]/]a0%lM\04| =rY30cKЗ|Wܦ\涰-Y ;h~;D,:eob?GK5:q6"U\"jџTiψSۑG_(6M_6M̯|:?~vLm3G`841/EӮ3^D &h?u1vm{DZllkk{{U4~};gGΘ;c!1{'\~Oayo6[ IzcQ69^Aܤ+1}@{=+0`&C1Ĉ ӑ:xy6j0L/<޷ SA ]Zw?r~/qIt9dj2'x+j+TSՔjzZ$#!ׯ)9|%3^!~OI^ꇞ Ҳ2jVX jØ1bxYod4<W]FZԲ^9uj'ckz'.)Y+'L7O>'cn ?nEY~"ZT[|9CJj|+Jذ`/뎓xt&.< LܻWql E↼&fZ)ؽi)ݗyWH'OHMh =kz8r jŴYkɓ`N%lZ0'w.•WἏj"]@ӮmvP<ǾSz yIDY{ȩSթ:"=pvqk'Of 5tԗJ>K.)2QA"Z---aHKE(Ǿұ>tX7n`%hۼ9cߙUƹksѕ}p Uмek~byvveb4j)ydy7gFnSyc(s!NFȗ O՗~4cAAuDK 8Rhp,_ t1jx{ gW1f(LJS)j)pv4$,'L P3}giD+9fZ͚`ꋅ+6"lRACʲ_uVǰi1!mm ^2U(579:gf0yhJI}_ Ow&8r#z1˱0~Jשz14)PƩ$6UQz{1ؖ*sM1,Q(➡C-SQzlUfF04}>?TEb`mmGPE2y1O̞?if[j㡞+^41O[YihAȧ j<4`2 : B ZODׁ#ѷ0s_ *A+-E0wp&RSٸ&Yahh|ƸG4|3ۤA&~5z/WΝ(= SU*4] y80tM7čG}cX‹\_b6nD_҈DD'PxdJ"РFȱ;%u9.$iْR5wb4Hazش>^I[vXp6O-æ'/}e4)"ϽQ$o.FKV3Eak|U>VYW/_Tuh6! 1!&Mh: ZI`}HeZRySc_ڄ#Rn89yC-"I=W/:צ`2ѧ&s<HUGKP Щ\GτBv1jyt:k6O@0tQ8} u`]X8I†QE֩Tߟ_؇NgK1sBwENt޳]OY.G9˰eP^CF^Wb#h2h9ӔP>q! 7 37СFlնBLSZnE8H&#]*ip?2')oKgh#Շ~v =kHUoX@uqݿm$H p+Z*x?Mx<l 7Hcx $'];w|Ս'8-+bsh7pFjڄ?6e:_x>V5i L (nVqM]4G;6ךĖ~ x: ,u  #9}&F#[eVlN-c˷Ima<\JhZNXp 7R઼ K:TsO=Ot-t:S"haL'Ѧ(wBtZx[xκv/"^Z?ŽJ#%kJ+?ocަ$DB* cYt!odsF5]O^?31f:C*#F &o/J|* ~Yf iu_f̛' )rV> 90+ /pӹT]rf_4 Ph1i^B8_/DUZz(TD >HCnԂ"2` O<P6v7Gs&0CV8b(:&38Oa Sɕ$ve-BhSև;O ($f6y`aOj*+2`5%bw [ӧIHQ], S622B.j(zTB'sz^ηXocqm3::Wx|aZS꣛h۠"_Aj A}ꂧ࢟6tWТ}ڠ4v-{1`:-YAgdI԰_L?vE\ K*|*b̟}2i~w0q Z$WkgѵkOho UʏB͑WoڷlAZ蚥3ʍΝkAQ/v,_f8y-́ȗ9KL{KBK1{w, mFEnTx!&B[S8dTgx\kD: 1FL B hn}"䷶8 (׊^hٌ={X>c.?CVeqejfb9x^k|k,A=]0q#{ QW5铦 \F>NOo[,:T/}"ztS|Dj*]4Dn PhQVqѯONU ̤R r;LƦ5Jxt_U+T"qZ]Ԧ%RjjzXb5lh޵3vh##4W)4;H }&^m^|SSgp4ceo/uog_t#nCWWm[CI/I tELa;&ՑG[TmԯWMwc1zʖ7qu)PZX|: Z!.mD5%DRhYH0*p1\~+`(`(R]wG颶b8<]ߙ[FB#C/#JZSH *tQ~mnz j9xE2Q!K Vքs6v Fօ>c4_۠Nw]d$ /_ ", ָx B FNIŭoS]NBQ|1\ ʵ[Ͻ.V/!\E8R0!QSUn87½7X-FN0Iz#ѩ]kOU cAZF $^t9s6 lݰ;FxTWIb.uת 8Bʍ+9_ĵ/%|޼&P^pDͪXڕD<(u9-|l/R*24!ﵤKy;v euyOOæ-DeiNf`kGL| kS8yL[u*3k"nV % ڠ@ZuM_jW*-M!Bڷ+ /2q7wxEj QNUAmq.5C|Y}: !/ hP.^]qx1 Y}.68.\Re`aiPQZn#f" Q NR)>?Wç?Ֆ1 ?A܆±=k 4t.wq+yIB]Ȩt vtZYg9w>soPͨj8=yٵ6&o0bHW| ?>\]F@FšU45M(9#|šGVui-=CTu:̹YP K ڐzUFĊ#*] waOaDrt<I9p`nNFFc/ƟWI411ðhkܸ^# l9g!pJ&>ߍc䑽' wǹfNxf5Ö͢C}|2< $ϟ>|aTIT!ȟ`C"O'#A)yR2}COc7/_$Ъ!t'3uI<&j sSՋkYJ|L|H3BRgUgɂSD T>SLTh%p0mdZ8?61׮GA+/L4#\ 219eFB|a٪Oj Lq'l/LK:/]gQSRȡI|y RzΛ;۩hT <HS$"00 Tg0mG[Q*zEFgTׯ^чL%fхX(U YX\Q##?2(Տˬ/>Hco詞l~ ob)x"$Kf`A!9d̅H/~;`sfiؼi9쫵Оqd6Ǒ4!L8SszNvr<+HӢO 7OO899`{dS)''\QFcW)jW@r∦O3Y렊Dm%Ƕ w7j ]H;u+݃۞q!|Mܥz=FQOh.T#0af`iFrpup`Cs[Ն:؈ypUڣti7xTLܮ#mN T٫'&(S.Ԫs.pweAon~J( '{;l&~~RJFM((o@E{xJaʝ@s kVj /n%{LN(=j]*q*9՜ UʕFzQyȄ4>< ']OBՀgUQz{jfUs(^'ãZ=rV9G$|n_eQVu`Č.;ؗg]e,#C1Zp%ժUd^Jv|RZO5fPĄ`P{cs%=8Q IrMsۤMxרˢVx7@&ug^3l8/ ufBQmH C.bOf4ќi',%'6GҥYNΘt⮅owr*凮 @(@c ]P *uq>_4iVII[fuN!MxՖd×@$G`hptv'4 F|qM5_'d{:\~Z΂ݫLb=}Du4g9bbhکzo[ytq$eSFL\^f)EQ*?74FJة׬ YL*茫Yڶ/-şvR[NxCL*j]4׋һ k:ctT/ňIb)q6= r\3Mc[us>6fV jg7Wↅ@kpRŋK)RS=C!BuwsgI~+ϓ̝+ysV-FCXo'=ƵGJyiC^ ~*LM!(Vsds-CzҠ([EkHH{}냰wT>ќJGЋS3 /U8qq9 z;?=kgҖiL̽'61057ï V69ڶbMʍܻOBL\6ĉ Wp<>b<S.Ev7%VCi'm^&QY.:#խ1eprY}DFTj+bG)2zqqd8Ê3`hu |'espДq^'3e.`z8ʚK|YQ!92Fc1wx0ZeeB=qi#W53Xu36=ꇅexOuj 8N޻Z!ꆢ%}C#\B紲p!]G!O=[vAE7;9^ QhQ`L .]G[FAG{`lQ&MF Ա}.J"&%B(U o@J.HDC_ǣ|he |3< q%!'Eղm *PcAPA*Lߞ7ê`9+TG ;v ԡC-e$CF(l%0(Q8R4OM*[׃pTTUɨ]]E@?]$81:R6|F8ѵL ϗ',~v#0ÈBx'lU8{,̑x-ūC@7f 06B6g;ql':%.\yab w\ccg'0cD{QFhB֎H6ŋ!Mrn{= )J£C66Yv|b XaG<.b'4 c,Q:B ͭ aظ{4$-<,b%* '< %SE*3f.R={ܝJQvg/ իj`6d5[)߶J:7ݏG_QFW,#EaJM\ދr_EFLƮ-oOR"LLqi3$P?,LPI kǧϣ s("NHb,^.0W =<9G@覩ϘL,k׸hW tKMQ3ovKOY_-yIngmGd^S L !_r%7E<#q)d? Η>lmn9&mڠx3d̄1g2z_)!YDi4%aݚ"DZ:Q ^j䱴p71n sc1rRHNd ] ecmk{18-&(ZdX"9EKd >c^ȮdR;wLSG5>A/KM5VM.Gf? m5)OPO 'Y VY2O>3|]EOÀ־ G L`)cٻGQ5'` X;ƣoR#^Nڲp,L޸s{+=ZRp]^=Wy9xC5-hYg?p'xoK$%=M{dI>v&kld =; {iּvys2FH. >(ҸȘft#-cgr,77'ܸy7K7-A~Q ~{4iw@=ˣIB0qF\s2IAsoflCq$m)N#LקN-|* ?ߵe?å{'oHʆqaB[j Wx ߃+ڋM?)*V/AGՋ<%\BB &TRcpmM%?s܍>lY:5?tBKA[+^QBv={v݇#H|p6/,cYB))\ ,anefbҥ|-MIa j $!.KeKlS]̩.K\ X7Ct ļ~;Ϣu^(`ŋנTPX c47Ȩ8ZXʔ*% C&Pp^ɂc?Xu;& 냡wɷ2pQoGF|>djPQ3683v[cRhFS+پt GAkGW!Ū$~S ΚDŬnrt3F,3 fO[.K5- 9TϬA@EEGp[gB- !Yk0z6Z3f٠$o{. |k1ogF+'gx c>E]EFaǠ}4sP)ɟdf@Zu&76Zxlwߺ2ڻZHQZV(~}:#aD7? DvNI*,*zhUONOSMi  uTu6$7lذ xYZjNThOL4ѣU5啒EXaH}k] }PL_߷rh'곺ԖON٨}7HD=a] >|̔Ơ_cuc1*)кE/WL~qÞ gi^ a$S>G]xi-"[MˣDH8r"]{fsZ“1BNXzBfS#Y8!*,%C͞9gNͫ<<-qyy ^5t|kwa0e_* z|U4`)3B_tzpqǸ0]p0&Zr#ԭQ&J4APicX"qiDטYh*g!{nAa] -BO7Tj&:so8}1%,)P,Mχ#3|b*M([2zLLg= '֫U/Af:QxC'BC%VeoPYFtPoMCj}&J@'.ph 1#Y<ʬ4;0V+zך 56F<>gΜET L][UgL]&nُH}wnT]ۃifecNؼ.^Ѻtm( aըR$/3q}H'߮? [kiR&(w3[Ӊ&pp6YBN^zӾ:L3Ø+ж'CV*h{3SV1Br&*SLgj82] 5@5x2N]71e[ֶy.!J+$]FC|2sL3\4o "%P;LTkИY.߁ܢT?ˢNH DQrQ(,PjYԬ݀/#8{,^Ơ߰C 斤0Dyԅ%]jJH`gc>,Tѕt<`(隇P,˓u*7g:tqȇx)*{ᄑP#Ο>OPi,=w.-UD:/qZЙ@2^'u:=OlR렂Ompƃ[7!rGxb;7:;ثʵ!">% Wbcc n%*bl`cc#)"*!.}zS]qz DB%3\!P5 >Faģ9㜤}ElB_lmfʞR*ތW?fpvznANb k⇉k0e-*ljZN2[80ǒG ؕвys;GX<Θd%Xljr,Cz$ǂYdQ(1UO(tTzՙQ&V̋DU(BR,GR*1EI01'[>ptv5c.}㢉 \j+Y>~ʭb!!! ҭQ^uBNm~[K-B,akƌp,0n[K[.,E]K!^KHN:q :ѬL??T{vp4mkVr]qiŘhDkD???UjLBٺnD)m9'RdWY3K#* ߆!@l=.FJl3oG -Y\PBslZ<tNZPz ܂F0@tʶr=G mP@jX,E'v3-c;\4FVGXpU!N&tW A]!%$B!rĥkХ4KҘ?JxQc-̤;G9~o5nY K}Ȍǀ|s?U4C $biX.Cð?t.+%S Wx@3W%FБ(Q $|&:U&O8f{VFYu eMu(kĿ/Z,^6Jqh/Qքjk5=£zeal_kw5Iv LX՞={&YkOrIhϟ?K3,Ei)G`?*q/f솻W EԅQu\vkg‹m 1TD~ZXvbNJ8r/ 6[}֍[&6oB>i?R)ذl+ Ti%4SY`Yڽʈ`MKV 8o0z Μ<^Θ4/\pifoދ]u7I?it :%3?C4')B.ؼ݄|G& ֕KؽWD؊aAM?RWA'i LP*]G_* s{ģ[њ/wߑ@>3G\D|DmpBCpT-Ѻ_>G(z.E=g̰u/ĭp/[#>` ob/-Ѐ gЩn9d,*^X?2Tglݼ Uzubھp|T3ảp17̑3SQ`ȪPʛmHR(Zȉ81%,XV'oDq3ЩEyxcS;(% 0Hߢ& zڲ~EE?ARpg|tPmnuVItg"70^P(_Q'+i 9Ě⛦O*l<аn1cDFN_on.ӕ,mQl}뢥 YO*loXe?9iSĶ kx/gݢ"fVLfr YmGzYr# '1_5?B~8Fgl[?{ʱx59E$41| G8Xʏp,KGPpH(XWm w.F%B~h>N: $N: ' 1]qRlE.{P$љfXq/y|24x@P@IDAT&B cDT3r֮"v9[>\>ݦp>669򻂁GYЧM~F8B @)j6cSg.%櫗hU͝9?-e%3Z>ǔfm tIC3Gl߰RE}fAeH̪HDA窏Z Saai'sKm`}(h%!B_C$1Ű]j؜:rmr2BLjƬf;o ѐTg-pi_P7-\k4AѼ}4bePb3qAD2 bT 1ŋ0^ 3d, 7#9GBMm:+n8: J/7‡JkP m; ז}BH {B|;{)cLI[c&)kjbGDνH$M<1,l`mi`bAK)SJMAcH"ڈ;M*TQGP!ۂ%PVO05$q#T←DJoM]ǡnOѼ\~:yT'-+QS+#1nd~~w1qTd)R dV쥫Jl(qX4("Rt埑mӸpZȌp%; _l>;ػ6| GƢ)meҡ#زf12}k맰o&үcEEȕS7cuttI E{AeEC%ϝN70~5!Yhr+Ph'5̸]I;'fn:#{(U[5 ځ=cДUziYf0cRK(r3,x4r(ZZ>'ZaEjrڝX=&6sGYoGcX24p(j 3"m'bEh] g@H4kDD74`B$ 2ջ)۶Ě3wvRwY(Z5ER&h<sZsOð3P&ҵNFLʭ+b# 0sNDhKx kGf`%c#IGב~!xYמlG$BDPgř؇%MxxRmH(9C8oo_/A^{뜎?~ޅFs>cC 6yxt䱅%fbi9'_5i9 -QX{jʕM"-$(Pڻr ['EUy}-ܦ%7{?<=6eżjJ+mFJW⽹K[=OwaHeƻ)\(GBXjjfִ~'ՃG1Jfad%G#0F涰1[ Ux TaФ9 ֭"X|8Ȱ$Wt6u: $5IoaK$rdd H<nep!scD>\iȲG[pw+l5{Uq5/R)AM`pCNɏUr861}vLH6%-ahV@Ir򟑢t^x}W%õO&b]ާcfl7Za;i*# 'ɓC?&+QGFۤNF61)ai355&F<@ŶUDjZ;1NJSۚ[_B/Pъ d"Ŷ5:81~l} -ygJ0捓O(:*}+]PN{8k֮EXOQ8܅o6lh*oT?inNE'A |QhyF](M"_/\XrsxܿIB+nBˤ"wิE(gO[r+9dگ'Tr 8}\xZtaTLBv/A6IQ:8?Q7s}/Jp~+)}G(Vŝ(^~j]KڗH) «̋™q4¶S.)kǮ" UV1wm ' ccNx N΄B^ ylm`mCj4P&2[ZAa`6? #\j\VKX%Fe0\X=Pa}rn^F4(5-e+&]~20j2⋟q1ԏF=h떑/R:z5o]#emtjZإѻu77­YZ:5 3JKF !5*_`ٚpG1gw䱠7t3^9 v^'>8sM|:&Ms$_'mI@'PeׂБ u>wfrQ1VlJ4+_LMLo2גSƫ$*cL k>(&B!ʫIe*xKKbT$#/c)0&8W9]`!( Q,$btD;ڵԇ g /ܚVvux*PZƸ((4-:0W, Z# pkna6qNˑLƘW0eV9&-X$+s93WM qjcöܡ3;/@BXKπW0·<7n"@sLUf-JqÄqPV؛G0W 3UՄIzxjѸ3_=FT '}WG7T+U5 -N uKȱ$R∏ǢE q/_#:pG,řf0Q' Wmu㙁Z= WoGUJb8z zx$c>U^:Y#4^I*8Bl;|nZ^م-'Qq5㛗0I8q'Fx|Vme4kP7=vg@jB4Y%7ȋ<_C\d6. '^c9PʔE2eʕ-k3N1g6 / 挆 "cLH0b8[侍Xy;Ν!^r^y]+y.sbסoJhd!` xyCt!짮v ]:8>v 63|hX߇2 Uu·>&GХ]ąۏ29еYeɒTVȣn 2!݇M]ENJ}0ro6A3`Nd^hԨ TK dcЦQEDSU Ʉɉys;K!c̻w3Aͪ$S!xbw] ܭmsffBeySn^…5[+&*T wHQX8FJhܬ삨l۰H4e>4qmr,@̆} cK.'_*xB\TbQ:K۲iM}y=E=f-^5j0V7,:~ W&ٕ *2jK:.\oߡA>TÂpKZ]{bFmHUSr*Sbqd2mXP. QKWFPJ <}| F1hޥpFl9j׮dK` u_tI%w)lKgJWQ'A"~ߟ%7FפN: $.+KA:I@'tI@'t} ߗnN: $N: $K@@7tI@'tI%}$P!$805[G ,ŀ呅j!{*U+$7'Uvjf^ڪ do/JR9]8qFd0 ҵtd*#p͈R$%rOO4-ڼt[Ҁٗ{L_Qy(! ɬ^hPI\pAjF$qohPEF6ߎ3;#ը\"ۗ[r{Il B\otU&p|]o U~ w}/[b) <͗W6 5*x{ݽ_[]: oH¾UP N+.]F iH.nfg=M]B^o,+f׸Ic;9%ߏ}p; HK\?LT &PaZ7C3!G{S4%?hkZNl,3FdETxRb^%>Wc:5wW<2XM%n49Gx BkI1jӈ6 TXOÒ陋UXx5MVo4u=~^I0H,>8vc_;f_WAO˷ɾw}Ah__C]C_D:틈QG \# M/}{觾¾9JV[8p55ݫDӧ:z1c &w8s=Nx -lڨ۸Jj!XK`#E 6}%m Nޟzp|*wdqAf6~:Ro֡=83Sx&FYIj TC:tYc#,i "-*M)4OoBH%uT(%{ YIz|*OK2%궕x\^%.zRLd?9i$ǐfr$ǙH9j -v2l@igATʀcz6gy)hD38HM<9ˉ9ۈL&LJ2Fmؤv[j~Dî)0˲P_ЊHYj&Z(x9 1)-vm^kĵ$Gˁe,#k$FG8%B׼.ŀ4%JH!M`INE@@brj)<.} Q ҦEE ?Px}= ̔T2ۤ#88 9$DH㏎K>c_CK/ Cd,/\%3% ~ϞZ2%{HGuTȒ%6WOoc-2S@Ɯ $"~~{~yHx kGFMIE[rTk fT!L :.^7 FJ͞#119{|)[L%4(jq1M [u $uZ<!YW)Yr.W_fyKI|ط{ 5mޏ$ct"]GY_c 6hR%`ʝ<@`X`\ HѥC0ef$b`HM.qFtW@hm';MD%%7gXBv[,,jYF|7M{eۡJ4Qf#3ѨTBLr}*QK&zLd%]SDJ ېt./BC Zすǿ7Ֆ'*}^24*aY`U1 G+m0v- swm4m{?QIN] (FnU *9PVb@&UJLڕ ؼ2葉fC-[.f`f%j\9?#F+QlN| q`ڞg>28rĮξN=PY=LٓJ-GWU=-G*Qe * |ؿ9(Q]~ B{DCl]/TP 4k2cxiZYJyD"FPz%vrt+^m'UX:75 sj, EJPBx{W믥-ԓjO-\|.vH"E[elkUE= C4ߍʗG k8i.9UU$o;thV Q=Qdym\%0jni^܇*{jUQTel=q]:TDߢnʨע3^F&ѥ(G@Xuw*m+aHm[#0.Z^@oS޽@%hEF*PI3ԭ^ Zu$݆5!(Nso,gfbƨЍfm~ABm[gP\94mP+Vwɢ̔(%ˡrr(Un<}'7DxU/ԧZ \ ֩(Gyt#V|χ(#\}'R50З7QUcceJF,"yE['3@YzH}I虡C6xx #w@-h·dHee<9)DP%wdNaf IKea s1qx]D8{.[tǀ95r禞Х;sl)LqcFEy%ޙpp'ILJ+I }(Q W᧚rū|%G+N!BׂLV 5NLM>oV.TfFѲ !Peb5:5TP2Lnrl@DXC9~ K3?8XmdXK@i5kµM T]P$˪J is.݅e^ QQ? 픟z*츫⩚MdtRAn)CT)1>-"2eL^,U0*±z4T'E)cZRee\9&z*+0j$ٙׄFNaZ#^iP|Up zr ]@Sp}L ~s~xviŦCw:ۧ[wFIƩФPy#W]0Kz{t-sVl_1Ľ('=uֹ+?vŋ4+n[xNXJKGxgM:*:ff{پ.F /޷օ?Kv]Ø5rr[&Ҁqt\zuK=(#zwA Oñ]ue?O]$ZBBC񈩥 J;ym1rjȄd$ܹ|WSPd> qyٱd">cI4h"'k\)X=V,\\GY셋м*6mp{fǂױ|QܹzAe8K"=9n=G't7|!ܭiQl4l"ikұy,v _ύQJ"2{4;!@n=X̛9W0ו 2B7L&x9'7~%n  smBVH^_l}g >=x vL/zgڌƹ] .7`.?ڑD\|nR@^BT "/GWo^ TNpsRI ӺBmOqzqn0x߻}q_UCMwDg=0+ʈW }Qz**'xH^-LEpݟj'{U\EeY/anA%J5aѪݙ|Vп:U2,ݤFmZv}xPă7Զa-*O\yEBunG:t}2Lםp4ݧiiƱYpaUK&`QFk ۏ!tO%wswBWc}Cެ,6l_M,/_G4ZPJt;,ܥIsY-yyЂ]dr-Pf At- z^c8Po(ZFj!">F涘h NZ!) (>׳mr{<$b,sѬ+i+hn0zH𞹌;C0j|xXz֯+:50B`H.\ 'U[m]8gbܿc7WiO(\'C(Q!iG!&M)mC'뙧,jbܱ(N"dxm~L_8оEp1ȒHB^bx,L §ݸ{7}aw*pp4j\.g_ׄ@$iZ]dh_ЧS=*D*>|sA-%I>#>M|ȨDwSXRc,[ Uy ֭Y Zt:l VONRimG*Eʻ/Ș+d3X1tyӈ=/֕Mjl^31I8yG"&6sGzhM/6`Ѭ7܆BQYEڥWToaEؽ~!~u1k]S-KI\FO> Bm@ڦi!?9Mweة+9XIuC^FPreZRADXJ6Y=0Ah Q NPG۴=&>23VO%*/ZКJIr U(jJg p,7SLe`Qbiq$hj&1ά<%\PQ%9jTXru{$>6.Y0m9Uc"X)FLqj+G*ߩ >3ȣF9~f%eϙB롶ؙpkZSREO}qgu1ҜvggS+0X2kXy#Sw/ٴc'T=$L z3+TV|Zzs}u>oޜeaQq""&JF\<|!`2gfv I} dJ޿_ CK]sI/Ծ!AmHg|mpڪ0'5Wɴ),-Q1{3x<8Xz4G(}vf]3.Gh9n, iWXYQY?!Q#!ǡ1_Uárm 8XZケsdoo.;MǜP=G~tz޲ѹ꿬UNO8Akx4 M{B?7&`hAT2R$s篐EYOz;Q!G$deOJ\l4"cĿh z!0chJUȘ>q1mY*hEI ]>zařA܁?3.rc{,_ٗΠHa Q<|0) $i~%B^l,O]$} :Fz DPG~" mӎG!՛7G~>x}İkp3{_ _zrqѽa^7.-⛚붌7qi;vIW:٣{Yk-vQ؋_bvRrK[́ T։Y{32R*L(- 89Z8_a#<ن @؉Eh;ݎ,辤,,.(ֿyTJV"s8n@ӌՊʅ4oe U#X9RQTږ(3AjByƖ TԆqsrxMW,֩{q5l]OR*'3X,eTןP!Ast!o@Q_Y.,c96L,9 NN t\*mH\S 11OpIfF|܁o$FVbvޅMKf \]`e][Q֮4>E\. kV8 14թѸtFv'\YGתG˯l㽳 Lԙssˇy]EjZ ,00~~~TT4WfkkˀP/I ӞLđ^3|ϽPB;0'v;]5u挩scv;u.P,B%^.w.0)9{ l)xD*b lĮE0g~XElؽ{,;eo~r <|^@#&YS\܋gRBmYEҳƷ+Cj&3޸2`ƨw* Ia  -0zfDwHA~aGo,!!TmkaX󪔠[{ý-^޿:?m/aî tjXV/Q py ݑkeQ0oSSMORf?{ ] a|ll<դ׎6v^ahd |)V,YPPI*yI6o h ЫmmܰPfr^ PD$56&[QL+Aڤ]J*L"fiB(; BrM5IH!=IGc5.."iZy>M^r*HݦE3-^`fFB>oMmpM7V=R 1SʳhVU'DkPI,^~=GsN1}\+Uc$BU"GN~/_ cm14W1QRS7ur!) IDP'[PՉ$DXU~. T%cFVd!W)M/A&WHނejG wr51:jS!WKvPKI4/T&0+s<4yzԱ#I$LMgj'2oKTq>:]q)LA ey$ ئ q#(li5^pqvm = Y_rZϧ'ƫd=F6XP8)ʥ*zjC" M],;E{@#xZH_˞_iӽ k#[AC~R6e*DV.Tؾe3KŻ%-FBy=-eѢh5?؛޿*Bq#}=6ވG%ǜ#f'>ŏ~amTL&г9~ fFi{kK3fTӐuEa5VӼK4bMBiUV.ZZJEkD7i5Id[Kf7zN`OQ4A[fCVq/q=p vo0s1|KL2]M/^ 44T Al1N#p"?D8e+2KŐMq~$Nu?>GQ8_ šmZlݰ 1a2m6ڷAΊFJ3|Ytj鉈{dq*3]ZPKpQ+8©TiGmX4"#3kzP 1! MaKo}`iJuAB}ooTV<AF\ذv13C)-KYK  i4aWb*+{hU] $ ژa"|ބHM|(nt,!B"yVb^ϊEM"i"'<\&Cs@ D6T jRH$lJ"i^TP>{A,jf3ԷujQ}wR&ΜѽCM@Fi6db]}<}7B¡'mXK0͗(u`*[&~)0uEjeQQ XD(xus'>ZDqI9eB FQ bH^M}Hqc}ybpbQs;Wrq郰9ynXzM|I+"!6ׯ2p{|<tqP -@1 cp1!a0V,n8szQEG^[bآ=P ~ȗ|cx7j }d-j?_҅oza-).\Y*yV>evm%&ݝD4T [GeRZ"Y(v DPB$I,dX36v3st1@x: f%`Ifڢ*Dz 6&{qyraHqcZP1y~d"I`>b$ۮ+vB[ih.Î_wBc) ^|ظS3IdĐ G0I #C&τit >P_bMZqa\짜0&-</}&Vү!=-dX5wɂk{H?g̻&9lbnHLʆDNy1<9`ˆk^N<^Uw˧7Q2&jDn3A Pqrz[2 nu-o_ a1&c5 I7\F2^*4 6׶TĊ>j[%qX?8V7?ƶƷ%I>kg煑;bGdV &O!XwN޳s ɘNf^a3-*YM^ja3w4G&dvCJИw=LsΟuˌ6uh=M%W5mc23.GV.`gE*f?GojRRqV0rP0C$vKDI힧q_RXFDd*7%5o,K'QcJu;9ޡtaj"f"ؒ(׎&(FXaa 12046u*[sE@YO!D4#Ǭ{pncH e=jc1h8qRjfc|XrwCq~?8ף(UUͤ0z"lJm2];P\;iG{`4[೑sU=JJL CM@x6ϟƖ35-F XVƕk'6لwo#|k.>+G#x3#"Q+GAeTKNߤrLǐIغy @nKܶ8r)7o[h 3g.$AF9̓-j9o[?L;&8u).xDy2TjkWܰ}zC]9) (0B4Y缼|S2УKgͣ )sG#" 2ewx|dST)!?k=JCI5VhѴ!DܪC pʹׅohR,nݼaa]$rsDZs8j/7ǣ]Gaho?K7 N6PIСKWjR#6{5{oe] fQr~֮"qNLJa]΁ 6A& `DB_#Z4-޼wG#(((@ff]W8RCws88DߏGp8G;8G#p~> G#p88dgwp8G#x%l"Fr@ETdLnhhD t:? P9G#C[ Xxlܶn!>-aS|1?- Qp0G#p8 a LI1:9Ko-'0-)2{\>sEm`k@(d:K..,.6%Z8P* JuTIvVm}6R6V= błEiu er?֎=:B±YxVD461ƦEG#p8I Q: oGXgw0[/vcʸxd3FOp3l ִElZX" y ddeT>}skT\YCsKTpC=1T|e )}$JDGQOsTv IABշ1J'U0sٲ03ԃR@zZ"CYqD8 İ!,--2'AfJL ymr8G#a" #kWsOD@!QVJ!69XLBSIT* F~eg:l+ %[lpff݋C!wGhT~xrC&uuϳ`Jw:u댠MލLТHl\:йXl]`-~026E㶝a׈@ ]w_l߳]tNȔ,H(1V舭y:,C4QΜ4ɅU&8NYyX,q,#p8wtԠQt*/MW&kl`*.uǫԹϱlhx5Om fjOyeBlM$ʤmY Uz&^ɷrҰgfXU~!V$DNtD? ¶&Hpn%a(m,AxIxd|;w>$b?McG۫{qCA嚙̸8vБ=@BWUnJcVSPJ"^8G#DJؘd~ AI2 H[R"3) 5-{ 8w>DMo8p0;1].kt$Fؾw'pϷ3buvUI-\xy 8SmܴMrs."ȞnR D֝,=+,Y Y(] >ZM(hň ˦ fo3nK?MG:~h^ 8ɷnN#p8OwJYdDLZ4H> 6 ;Wq?& [}g"I,¥[ /v0I+932$R@BɴH%iK#}ȔJ8꓎8 ),(Qu> 0%2ZX OŦ_O![.'4S˳E4wlEɯ5Df%hۡ~qbyIQƒInߋaCaؙ!^.UwUNQ LHsʨ!5#yp/OZ)~Z/9|"*9=KEM{.S6 ۋ* ʂ9~9Θ gdI>Y8"M0~@ ZMБ=04Qb<;RmB*߷Hu~s3aB wp8aW`~pJQYц|͠xE100!n@#SmtARP^XFe\+HX¥'jw@zSAJ]ͺ$?­;Z> (SKڈ EAgu=PRYE9ɈO3T2 qG%Edt,y+Z圝Wk{xz6A;y^cw*VCrLM"iX LP#F$t,[)1!PmfoE֣#!Ǧy%;d'G]}C (6 }Y"nޅv0ۺ&8G'lISA@gǷ(hDvhRhs;XFg?oQOIUÅ}a#Y v_/ڻ]PAA!X| DCyZDpqe{ vr  a2Ȓ:2"s;|73n6Z~AQK|4 2E n D7tnS$biS ?ۉ=!>|G~E\ 8u^}Fʟ;cVJu"?luAXڭuMq+>g!֎HXbr0v.-Pg4?HN1 00 jXEjx:-\(=#| HȧxPy Q.ˆQQ C1iTeI˼IY3"B6x(T*c/ ˓B-R\>LHm.6zX4튟f,vo:,S+550rPAN)E#؅7A 1jpp3i2΅Lf*6A'so #=WZPBF dͭf0mBVzMt #a^^A-ҫ&+ J!X!W.è+4qdu mˑ/-ip1 /4mOqn`nͽ K \m(TEi&G#:|2?% +&,iƦ'(ٸb$G5L')XlYǏCM[Sa) f.އW.GlX VZU Ib/^t&DzX(֭ۄ]̃eNΞ!5!Ixa7v<8Z`*ѽ[ga904dRHbt9Hw!,eq|?sہjVƐLJtO=[x!x:oZwr=RЁ?ÕshMvT:7HƾiJ­!-W۔ ꔳM"l$R"#zjCW~]Z·m'DysY!R '5k][ӧM)H4$|J/B%qx5|p-QFB$Nx*+ѮS7StG~2&@ߡz@fs[pSywSKL$H䌆R3w;T.[JvZӶp54!ōl:ԧKՕJT󂝞70r*ej_W8ǂ]?yp"Br'ܴ$Ku#Cv;M6SGj禧!´@c 3 D&%׊ J^JFOy\o 1(ݓ& ;OL.Y 6VQq1>|؋ݻwc̈BuѪ+̝=d׬[H5V"TB)ZV$a^(6O*YV^DE?+Veկ53"_ QJnѱ^Κ8y) V8aX,B@ld:^cNY%⹓0q^dۡnW0雈R5ona<ѰncxUtvy8ݺcÕtǷ\9BeoƃSϱRps(Sƽ{1gldypwJȾNbU.Vr\&OZSN !u>whW"e)w1iB<ĚŨ,Rڣ]cwH t8n].d;vAd$2|M} c3F }\jUk 3JKHG#C Dt85{T@NR&ꍖm:`׉:LA P0So4l:|5a*:;*,m|9d ~9xBQjv$[}ɴQhK;*KjS kJ5qVnt'ƶãLd؟)I -UG7]*4ÅajWme6D,n=k ]`aH[=jV1sSвlDw@O{%|Ӂ\NZ_YqxJkӪEf 4jOEF5jy]QӖ\rx= G@(N#Ļ@B#??~E$*ڶy*vĀ^~Hq4Vϱ1VMJFWTѠUw CM?áSi*߻7|QL%v Jj \cA,YipV*)j _W`EP>D ajSGN#GWڍ1/QƚDPo(CWm\TOϿ^ҕ?2salfM0jx7TLJa͞z;ڭ:|4dXi=ynZ$fhվqb|2~cݾ&Ɔ1sQMtl{uJ|#p{J?|V.հU tc,"{cG !C5}[]ƂZ}su/L#5 Eޗqp8!?MGG#+p+o#p8a{!G#p88}>G#p8?爏#p8ON> O#p8NsGp8''lp8'l9#p8GGOp8GGr8G## '~s8G## 9G#9G#x?G|G#|}%.?\IׇFR>(Rcϱޫ' ?i8q=ݻvMU]V~J6an| f=Wp1ԳAa _Am=?y, yƇFDjj J>[OuR\\Hs2pG utyxtȤR.5Ҡ IBģ0<'=}AAxKc%G#Wh`l` ^ BX==L:ɧmTh," 7^!>H*EX%zJ U iDX:FUnWf3ܻ{ѱ 7䌜Jy)x~8j؛igׯ]èBY_%:_yԤ7NYR"dHC\ĸhܻܢBb\ <@N^>r@%iJpe]E|1h8fsŊ"urd @1I nQyLRGa!}lYMRŋ((H[DтDDL |7^Ypmݱ6y TJCǎڢ)DHZdB.)4R+ 2'SRgTȍ˶0;Rb#q =A\C#SBR1p^2^q\Ac55"zjz6/pY\ \雰 6:c/I(NE焝HHF/JŋgqݢT ϵPC,ѧ낤|RZ@DݡϢE>e{h,'Y \!4WOn|"Ѐ R =}#!sh"ԯB&ٰH-|~HaSvCxD]ގF<Ьys| UQv}8>IH-?:ifB$.!1ZzQ;ۣehت7ŸtիP-3>kSݾ%vƣgNAÆp4xr h= 2I!Р1wh7iZ⯫Zԩ@QaQp;O%^F!_͋&7  0kNڲ{OÀfjD>Ng '_izQhmZznNPN=n^PV-zf7= $.TUb( ĩh /㻶gr@h;N?yt~Y 0ГiZ35p$Doh Z6G-p)"KDאozj$pIM5HcӾ ƌEbΝqv#D? Epp0F"?_J aP*b$2{(nǤ 'Frzb2#\*ٍ>ѹ AۺQВOm$0ˊGzX傰iZ SdPܛ^? {j S |Z㫑_޺K=y렠VNuW#aU!uw(Ja#wBDe8j620o'C"2zbɜ>HYt8v~/1]zwbfܿ~* I,ezjEZ%"<6a%ȥ'm곧1OïKLD [۠Ct@/@J]lKgO|MLĻ1FMY?e 7j"hNQP#'׾5mHZEA>}Jq0{wZ4\bt% jVu$ݻOE>h/ ,X0qhWҌܕ ݆Pw㻥;1{nݼ ߎ¦p~$yNvM429o|W 1wQ7OļU1on @7JQɡgK!$^زMC硓0eP7ܽlpV("#W}S>|~t+z æAsD5:Ýq",_ Fxxe <N>H!x|$=%4ۢ ٮH8p+>(0F6ac`nj~o>Tlg:-kbWC`ceCǠ .\:sz @g3~g}ػbChXZ93cѺЈ)zm_j{g7|=KZ葺CbPQcw`񫰴4ƭ@HRu9 N@Ç^˾(NEPsGk3V辝Ar*H$$%*njYXaY[ zHܹgrZWga8 \;v`/kp:<6D{AMZ0 +W630lPgw&&041 Iul c4^zqzK`拯vLgK RǽL̄Ѩ<|@7j05q`'yn!N4>3 eKífs?;'U57c1zzF7 G'\<;!Ĵ`% SCtNFADo,b +J ] KCSS)/;wad 4 Ӗ=hk&z C̵6vY;}PGNЭe}~[2?q ~݈$cf6жy};r 8uYLR, >:DͻRWh7.^u喨S*'-%#$TwsBFިLR-8{w]kC1y$\ 'u1L=j$4;µ0id\ IZ-[Hz*rL+a!03C};5Mѻ0/GJ&nݘ37Ll@#bØj7~|χ`0?N^Z:Y| 2ȖiƦD8J0sQ/KjG = \^3lQZ8} meF "E/L ypru!<͚'TY02S)ѷBm3+?"tywp߈A} ٲ$oH ͠Gj+jh0ٿ\4"}"K1Ő-| \Oꛢ]$3t#7/apnc $GAC"& -*hDڔpt* S@OO+:wXتHqj9Z.];X[4vs"d˗shaRϭz8 6bg56 4> "RĦ0GLt = Gj-A07 iACҩqbTT*kR.;}2L b 0I6n:#+#*a ܴ\ z Gbhq_z5^硫FX «ZL8`zA8$5w%)P]u;"vF~2+ƎŶ[pFݲt)mS#DwkȘ9p,y{]8lIMW_-J2ܴr)SԞ2Uѫv-*YWԙuу]gA깕e} }ӐYD6Ƽh""X Göp^.kߖ-D֭0~Ax nw1e ЮU+LZ^;_9|$)Υ6CR"p}QݛB%yFq/6,wSQFMT8~=3]+בgaWh Lv/׮ x2ɾ[. 1ffV$%jXr֬Yz$(PRöZ*R$z× Sq.GS+iZGWh?$GG!{N˩Xz%V/-_`°1pod3 Lc:fsS_>?Q. ")2Ia^+4+m>ݺx̰5jCMڢA=+ϫlpi>o]$EVݸ=T~cᚓtM}`"Ǩo~$vɩZ#Xj4\kJR-Ѝp8%7n(W_"5L05|z (8H{qohQ^hE+!1Kq,Da=L6e}/PKB-QF)Sf¦gp-TUXq\5|Y}4Gh֗/&3.=%\riG"M!CD.VT&V?/IAcs0P`ˡ}NhGiHB{7vFK41$%F%1K4vc 4Az(7{,Q=vwʛon}1H9=_DI3[Vm`b N)ͧMh@HCŸ´8r%n\pp 츒K 2:0md3< H3}8Tan-;=M\\Q^ ?{T[y{9oϥU+=Y%Mzed#uFHth5t0uD?\w"8 Kh]Ѿ7tW+%w`ܨ{Gfr<بM9dȭdF.4F :4-e3_ [G:ظb3~5w|7'avJ)$"m'ZlY|2i 4zat&U_E~TZr!VƖ&k ƥ~8y`4f45OSХG@w|uj>Z#NW'?X`!dop {a@ 0 <3hC|ܣ1hv GkSX]emk43S⤀ʱR`ٴZ~!=ڶD8s0|:(|]2ά$''[[)4QgcShӱVXR4=0ˡ0rz_C~L(>PԶD7so&c! `Xt˾ K^~:d3HϦ8=tU1tJq!-ǐbivbѺ@IDAT?ֈ&;+Qdu6.0uC&ޘ$|4xV{k Ъ>&,܁~ߎDu=I$Ћ@T4BfpxD[J2/G܆(6/-dW[5df6i[3hi =ԇLoiPн`APվM| ߃ѱU{tWEGt rC4FJ"tVT:uI d~훖50wSkGV}O%7@"t㌝v`Ȱb$j#FN\ .kQB1!.=к[Y]Ť̠:У,|ڳ=B>Ƕ"og_JrL(>0&vGg}E uRzZV%?yOP;v*#MK(%IHadfA+!jjQAfT!ƣzM7Զ|=*QpiPFLBS?rl#qaެ`cn@گ@$d>ȌJ͖ &hOS0G=/$N&DFN#ԗMIs'Sԭ Vig4Ŧc\chtSea/ P˖lN'e*}!ԍȘ7$LV+AlULAGsuEFudV0#b6RV\BKL]F3/yZvu>AԮ%HG0Q́Մ|kÆ4~^5i3ճilz[Nhִ9,ԠJT'ha A"j iЃ2jԀ3ZЗl~B@;17ZxQl,Ѳט3a(ti7bG0yAKG>~H2ƘllХtĽ(&3td3HhjeƁiuVY"5 qC ;»ڵG 1-o~6 M:jDUBϕ>>VdK;7F4YЪVдdMo$.}x֫OW{1{?a.[4>,lrl*76U9sKsZ> 怺>>fBm |ɶ_D6͑8)k D.-ZlX9 [>Npww:]jmYLliz1iCsW#ѥC3x{ЦM`K.yP˯fL "Nо?.VE O' UsFQ>ε|`#Mt:iJ@=@i1#X5Ĭ{bhX wE#9K=a7^7~ez~F}MuUB[񙗵ׂ@a!  *)))_g \G?FM%qssrWZ]趙h&D C E^2:x0,a^Nc۱~H$M<Ʈo]oPhM8 ?o؉cWUD^'k/&,oY\4G##,˧G2iK &4)<&y{ F9?a>a%qsd`S> z`mznae*5s8*"r*8~A2d%Ej&Sp/%A!(d976drA_%+7CcG YE] qAk&->fjp&OpsЉrr5C@=x͚m8wKVɓ9!Ut\D?LA=&p] JCfrN>Ԝx6sRr`V :Zt -0g-g$< 9i_G>DnЧ=(pwc̆xЮ)wIn6lO:蘡i?tȗU +m1gg# Bא_#}[*ϝĵ0qBp k>Xjݓ̔D݉?v$Q,P'O".%{bjurK{5}6vРgZO_q8oN&wEyC5 g4@S?폹CТu!)kn@#[Z>_ rvYH?ާM?޿CU!'&*O^&'vrgĠވ EȄq0iifC//3&3wnƒ_tXvmˠWnOU`Y:򒓑% ) jܩ˸m;@Y>v2Vn?~=7޲'.ahu_^Q|Aݰ#%9/(+5|qjx;Y);_Ei3S7h=ڵB-?6EJn?ަ=\NՇ9lư()Hwǒ]aNfӦ|}\nk=pڷmk!3075sz/: M]!?3  $Eg~H5_M;L=>aC``,Z˦`r9l=V@?fSy8o]k0}^bqH"ô'=شa-<;OD-]9o2.$詓8z.G51vZg`Ÿ{ 'e:C#YŢ911}do8ybc8ypj(bp4'b6j6-n̦~ fEOG!MQ'_HS f"|Xz… a=EQf|f&awAK[+L51jl84#nAQN3дV5䯃/_B.=Py`,UN"$, Sh{\ ab,XZA{M勑vu7..Bswm&԰8DYc媕1{.~7}WʍI_6nva` w}Go o+p Ni3nE bA z{3=:c<t ="wа⭇W6@ͰO< KFnPa_;ݢc(WD$GF+ K>l;5uu]Sf_7H&(PGNi`)1o4 aNr[6mࢩMt`e>f&Þjʦhsxߺf>rIkDVhB }ȢTqIXmABiRX+54ӷ6ܪSm1D޳s}#1ѦQqLVږڕ6~I0P)ut ?唧ե~%ihk3O8z,@R#0`@ӆhAԷihԎa']n9;'Wc0vC=F2=Yِpw h.  UPBӓ.܂@ʼn_w}V 2Yyғ2lf077 M&ġ =:B1m7%>?$60՘2 6ߌnLym6W1pI8a{'o o oB Ա7£w"ʽ s4s4qex͎<מ&dXe.a}cX5"&P!:*VjBX19aiT 8NмLCZDbe%ػa jy 1 k#z酟%gNcZ냑b~"9AT)P?eT*+ "Wd=UNPeSں04VGcb+;^ J,q\ Ŵ+Ean;>R}`akR];_٣ޥ=8| ?/)d=֔CBž)_I#i#qeMpnkOVԏ;W9 _Ἱ׉@pϾZ-h1Au~$fa]Mw>~p  hmW%qvҔ#b p9ĥ9b)l#gBobڒfS.K9f~M֤uФyJ_ŹW0dWC@dkiZؼz>t\Bk0Mu0l8j-YMǕC9 N,GᘹOUWL~23pr" Vo z6Fv^ܽsVWyOF{5NS> `K0Ow F4Qn=,5峇ٿ/zCVn ߊN""6FY?cȗߣƥ2C1bPC_cs&R_`&xnPÒ 1A` ַǹGɣ㗉c}' -x8Me3F+ivǥɦ.Ξ\loF CS@~_Dĝ$,7 SwBԮƵk8 |kNvV5Uj|X$n Cy,n&A0Ұq_) йG?4vQll߈c &:Mӆ@@tz  t\Ypm >*V݈Y 1kPfqji(&mGݶ8(ph\}=+G'Nu^:\б6eOԱ1‰K׺'qģkd֩zIӊzۏ 釛'VB.l\ЫMC/$KNNo-:ͺ"//\'ͣֆDҎo6-VDS6εбc'8W]2bݲ.DZu uk7|Az4B֡~8⒋ѪSgzؗaGɤq^u: lauK"8lдja}aCˢ,l\!`JDe}g<~usu+m@Pxyy<իV>j)=#~uiz\+k'ܴn .EDخ!kvk7Ŵڷ}>P]}lJGM#𦶦M9.?G7нsOkBP@'!u2$ޕcX ȫ%wp!Jz[݈"'l H &Phi5vVNg;װ77#5l;^#p^ 7a~^G#p8qyG#p^ N^ ?^#p8G71#p8! ۫Ks8G#xp!p8G#x58a{5xiG#p8oN8ļG#p8'l/p8G# Wp8G9G#q8a{ 8G#~4G#p87'lob^G#p8WC@ՊBjjP @uMmh= 1%EE(*.LMO梐ҵuLz*x CPQ:TR_ AS[ZoW%AFϵxZ{װ'77"O;Ip04APPP'0 OEbҟY#'a akQ Z ??uxza]6f-U|bz0lbz#x<&29>B!n_I:tԡP=*2$eD$&߆{zIϊ ^Qf _oؓsp9o]RNBaQ!IkCNapi^<G#pҐ@t*gļӥ!KbW4ԱN!H-=ѽ.ڴ p(3q!DO@M?N> u"\ CVG{\>sq) jc~!ܽѲ?JDfrN<; 3oFEcϞ,VC`e#1;NuakO*z%zz˦(Rg>DǤ 7Omg;V1FA8'CI:jyipi@B5hRP@븼q'KZ I\ s;Wj *Jӧp3T>ozhқP;vEBF*S`bOԥ+/TwF fi4ؿn=|7oTTwYT%\ EJv!l\jfC(5.rWn>D Zhպ CPڝXy"H)\Z(ƭHLvzcQ˷6D(=#ar2 v@H]?͛wq-ڟYWw7D9Lj!- zq\l]kh S~L— 2 5ٻ@Fpm5HIFn.^b5u V]|6|5xFM_G 6ނLK]^WU-=Udʄ&& 9$0'ŧmTU:#iׅuuLKKAM& 6#}5<*BbA}|_Q) H(b7A!l7I0P}k=8*IuLS]wsGEh=BȐ"Z6IxR 47@.t p! 56ee4m]CEMh7hPHI>!%O>h(ȴW/OQF`Q.hTOzDD ++KzZU}޾e2Yه]bFcT1Uj_}vhӦ Zn&@bPQ/20moKs(cF<|` R!aGU'(+TߪTYIa#'4DzEbSѹy=Rc;^Ur3n`۷iؼ{Er:y¢ad?NG`]{<,  葁m+0}n;yK0ter̟:bǺxizԹKqҕ+^H>٩tQ6{ #tN ? ܉܋Gv (pn磑{5$b ˗ #x4j)mPM;wBA84aK@Wp&50i'P'{77M(a1\I*ha}{7| ޽w龇 nVǬ>P\",m*߆F='N1n'Y`˦a0aaC9c`Mi_Jrۢ9H%mc58~z#/£9&͘S3'V#"&w.įkP>臂L%* [0#O?4 >֐d3td Ҙb߱yX1o*֟=;n-#O]pn@C[ߊ|D]: Ӿ芕k^Ʒ#~)yH; 6HO^^v -MM cX1b[RR^⇥޼GQlV╧k%*JOc01' Vuqɑh`кx,3E>AXz_Z*$s1{C -?Nĝ8rrr )~5󀶅-LueH < HMz}'nҨIs㛞MJZ!TT=qWMh  ӱwEc!! V^bqY˓*">daε(l/ [RXc%hn8?`?``mcmҲ(PPDv=5XS;>}{ZY"|3[/ď⋔$Ԩ榬Q^tNMTiJRp=:8*.gAAh UKŏ:ſ{*e> -ŋ,@~'"J[:U ]9+AJ}4ll|Z³꥔N]MR<F,rI1i匔QLg|^ٗx&Ad['<' fӭA qj&[y ÌW"lǺ.*^W"} įvA~Γ up~Y8G@p0EMh {6ðC؅ #ҦE;*M75i 6mE\UKq<#Bֵ<<#ᬄhcbq^1OrLx|?L6 G > Yu7~Mz~ 'b $j4u`H{id}7mP|*Jy-~IPh)ɏh ,J%0 o`z&GP[ rnX8NSrv;!0eX0Mi;hZ_r1o@Z~4 BUOld~"0wRJ0~X7>##fɾV)?*Joi =Qj1{NG5}"LaDeȲ=YcяPD>ꖮLp 獅KOMBvnQԙ8r}``ώDSʗ_vi,50ӡ)~7VmSj*PvDzȭ{QeƤ2 Y2 aʨp>3Ybh:uC6RU\j[MMeY+7YM3zbR B{b7Ҡ~%* ˜^J _JCGF2=BΡW:5&ƨF@#._C!:>k>"62+~*.M-#T_񜵥OeX^NDE!M=y2|j3PٕB=uw2E\>U ~cp(J 5xL 7OƴbY'`ƨOխ/1'ؾЋGl!3uEӶL4i-44I*dxp7%[͢n܌cɰ Čs>~,4W,N5!V{=Q>í|U"QK#+˩݃(`44G}TAga[w>{漚ԣqOBX<1np NѨI xzƜYSI;~P',A+o1쇅grc-m4Bڽ09M\J֥.ZW>#W-ü۱k|j[G %bس7XswO$4.g`Uae8c~ " r#GC_i؊>*HB1} 2P@GR;)d0( FDM H+6 {d$f/UҰYˡEF&͑xTyަϟ;'ڈTQNeQOT?57atJ`yYKEQzLc?"@R,b S ++1bƍŋe?QK %r@5}}Qz1 堕*B2ȵӛH@CW]8,+`gQI<+Z)oŲ忛ᢴ *|VlkQTP8Ҹ_rhH"5vl9ӞEQBB?$L?[!(,mT^A:@I.iQ5QӗH}x756w.vR1ٮ#MLd4<@^Pv|dL%tm/e[ Ff Q=g֟]+)CS"~,"<.^ {ӎhv}uWP-I!Meߵbc!5tK#i{ЌWYŦo,|<;g.}#Ъ7h^Y,; Q@ X((=ȣrJ0YuECX7lN& ,d8|9iNC_[t^3 ^-?N[0ca|9ifڭ9i y.YZB͠NhWW^Q\Tvq.CZJ[ٯ]9xLKVփMp U4bgl1ZN cX(*bZ]/WWQ˦O9= u|9Gt o_b^ǃ%G }W$<~n C;fVF4nÆ%-#>#R 4 0LN~` td42SlaP|QR>+bB_D9EC &mЪo"e ~,FWm~RRpi\&d&~HI^ A'C>>DiD`䫈~t(] MDMPF|QR !'HT蜑avdȩA,)(5HfPMKFl^J͜Xln ȦO=CC8 `ԧib6-bNV,*:#6P!b-X`-4n6,CHmd׌)U>SC6NfdOh`n}}eރa~]9o<]A߶!܉4W4EP#71 `F/>tVXx!F "O6L\<wFq Sڇu1fp:CHIѯC.k̿(/LHi |б.%S5p ]}׶6::q;LzhUms\zr 4D{PKI;l{鄕*}<6D]!ÀQ?V|EǸ&ezCvchPyXb^f<ڸ.M`G_D>`l iZ˾S Vq *CWMkz!}E>t A+IC&=QV"ռ/6DM&K!lg ~I-ܸM=tI7. &}ѷekpVx!a!Xw8u5mmی䇍- |kC_s )~[}u wb4zO#زlڇ"S8}9 QD_Gq42F6v.BB:APwl̫;E`]\5}ş'8XOLk? aN6,1wBj頺>[b#شa3Ѐ7zjla=#'$!Ó_A8ZFc-DˀR~0[Um={=u-oc3BA_pl(&I#|b$@a\}){i#8bHwbNX<+hD5zu!HhجiD`h)?#f*Beղ\R<;V %DQ$e`m(K#I2X)2``I>;)c.Đ XըW^4?FL.ƏҘEimEͧ>ʉl^!rv44ŬVrSA1^)8?1cmPLgzVnF/a$ {;l۵h[$T? h&B777hJbJPd_˺bc)'J4q62]_.ރG'Yۺ ß;ϒ#Fd6WJ+Cq˪OVҍGAs쯘^²|<#x3dgg?6kp%tCΞE>-#oI^m.MWē]I* 䵈Jj2ʧ)5 Ɍ)ox!?WI Q$-´MDT 8检4oLViҒi"]̓V3BE"jxd?emʺY< -#.eit#6 Ie/Kcy5 ,]G$ʺ #Dużı_)dQ8], s#mW4FdAidÒ mZhzOi-VV( %VGuIHK{/bɐ f,5<1#_O/t&f < <Ap4mESB8b#'?Ht0)*dh|3cr&b[ٟK;u4S4SAAUSzDʵy^\0 ,<#x5Hr)ܴ duln)"!Т]`I %iy2UbG 6"HNp 6My'=}z;D 1w,(]~lX+#%qH]1BBRJRk_Z`A*%]IDX]md9\>usB:¦JINA_Aai#TV+ĺ*(3؇*է*& h׆qIzh:'k+}5l/F9MZ !jhHdIlɈ>7$`#p8@F$@>e4s,NlmZVRR [%N6=[a;[Y ٘MU%T-WU$yiн2!2.mF):@U)%=.D$´=+2dR]oEl?+3iJ'ƴml75H&JC U$jXI ;4֐DT`IGLI(FaW#rm uD1<8G#x)D@AXi* lf?[K+#CM]2k҂jYeA4$o @IDAT+~SmdKZ< .S=4 EBTVFl&p8Ga#9̵DiјsYװA\ҶP4MNL)שm}.~3aAτ&6i#eMD|8(yp8GaLæIKɎl7c3 ،֙ l%i[<@mϓҽ/49~[e`SLjA>O7kF*Kir"FL|јA9#cQV. l ע7}(H[>iR&C""YIlmL2K{ѵYL)D!Od>ĥ$*y(+/z*/b ͂.iQ%jvsv߉ڣ66%*[1?V3p8G# "P5F-/'Ǭt+ hDN:z4l҉S)1rbm!^֖Ʋ8V}`KEvʖ|,Hs1([RH)= luSKKDH>ԤiD7$v&,^Kt-Ρ,-#ŋb鬲<6ǜ4k6iZD:lYUڋaG8G#xy)YhiUJQYIHqkhrMYQyu"e#cvN4*: "y1GbK`#3"a`mR26r45<,+M=Hæk2l~vdyJ*;g3W#LuIymTI.* ! Jܹ0w~ER*pdebh&-:7r8G#r#aŕr-2%'F<*rFF)cZYsQ2TGIR}9 s)+ Ec^0'LjCM֋2;;דxG#p8/@ lKDkLi`j},K!KZJ*Q?u %vą o"]̉-#WTBFUY*YSP""vBB4lGG#p@$JO_ 2$'w4ēq)"a4=̏_?1ErVz1'b"gٴ@!ֈΦ.*A:[VJW>*ĝ Hf@:H9C.ӦVclE E$xhA|1>l =,ϠY 3E%(&͠۞{g +)%QM_4pӰj46]lo EP{,J}Q!r>fCAV"֬ڂ}kN7ń73x+Ko` I?WhWNŢU!O + {;TްiF.&ntI^*Vۊm{;<`_iijujǾ{?/iq 㟏DmfLƞjX6{^J*#qBNjvpCQV- ?i:zWaMǒS`ܬ{G!ePS\5gѫ7#lIg`eh<`-p3~@>%[WX5[ʿeL4A]{>I؊21v3>Ғc´)h^\Mkģ/&jHi֢8 4(2FM`DC`>hPg䃇C@I9"g4uyG- CLEFhkº5LMo-xӎ2"*iي#7'YHM@8 ;QoY14P{ЍW#M8mZIHv, "l/kX7~_E#׆ݠX?޽b``f :SAQ!ax ;Wwv/Azbc` :TmbKs}1<+a Ҏ޼,X:ڔ%9lW MoI19 [W8Yɸ} 36Q6{T()B'5#np2<N#K.C-:4VjYԞT&P3MR{蹺 %<\퐕k!'0~B3QޘGo!hnz0R5=co]5(刉Ma~yYfj}5ΜDHW j _ Z'4i[F]aikh H RD!!=t_5.&*Ħآk9H."Qg$3l؀궦(NCbV1erܾ}VpᗑWZamj +MCxMdnb<QgR 5,tQ LIH܊TXM|feC$a_z `fŢ_R=wh {9Mx5EGɨfeE3I!2".]TX3]؞$GEnG߇ zϽn`c`FQ D AAB TLĤ[@:G1F. /={W'ssfxN;,;kN[aN*,1 (đ}9B On.qQd R8*y6mQ3G{ dݺpŵ7!zԃJqf*Xk̥Q+)-_C:OkUnjCTPe5] bĵ UFݚ .ߝ@8Y ujOZ$^6N^pu¹SG G_4_wjiq>bO 7oԬXBFV%8{)n^+{+Dxojܝn\K-ҮzZ!"jՁs4.UVٜ|2bb9X:NzpV[utST\˒Dž+`yzj49Ԭλ9WC?\%g%ԙwG;uqUok%#19$J{(Iߨ%"&S>/xT^pEo=dP@h$t?b2dy@-&,rsA.-ѭA-NpjP?hXgII)"KBQq1z錛7qBCᑜj3/K)I %?MC1"y섲T{Cq`z#g|OiOc¯KЕ~|L~]7yc1xۈesFշV)9fL~Lc2o]kuŕd  /7`UBNZ{|a ;_0oD&4+7)*Y <ۯVJ obxo0cGPjQTbDXwsg_IŰ1祡xR^œ~m8J8Kr`?,_.){W~c@ol~yGCA.w<|8ۛ('Y*ԯSEX?m߹=$)}p5c羁mkRrI[u!Z ^saaEJ)jx\==pNЧ_Ou{;̮oѱ3лxpaOGn#>qNAQrc[мX|s̝:J 1qӨa-}tVFܙshT|9o]^xیtFN9DWijjlݴ6YH8E:9X"Z-o`wS<=n16/}GIJ'b"3])FnyI%YлyCtfɣWpYTVǣkxMJz E RQhO5U ԸTQ 'OY]aPD\},4:kzm=1}3Bƒw"zOX7oa0V7VNsPx n/1ep;|jZ/?{l$ѥ95|2' l|1 5<,>7{a2@AOW Kl\m:~aX ,n¥$ IYL@ obqpW?YHʽ>k4p/ ^mͤT|rrșvGNVt/ޟ͗]e5?N9n`ү1u  IAsD'Ja5‚{  =>zK(#O뎕6C9CJ}Ŝ D_NS{Q7ߧPI`_"1 ># uM\0Nx$~n8Zn0rgKpr1.-1vk P\r EW_|/%fTGџ:[#'˃fu'?s'"ot_DΜ…T'Io:cΥ]µw O36"}08Ӱz+aXR6fSO1'kcs'fHW8v/n&j=sGGNtZ0_lM{NJ%LP>Dr# kbǡ=\oç"l rѴ3&=; Nq!ڻ5ob+osFzz:|c߶hJ v=FFa7oVDLzw%0u/6*y9E|1%N O$N9[H%t)‷?{3"\Y%݌;Xy?ΐޔ Ҳ[¥8{&OwgixԤbNrJDQ"-+O-MoGq24f>^̬m5G=;uPJNDqsӞh[GNFB|Ye?tGG`FpS+B!bjcq`GcVp#Yd!=8|,op}Gq(ud^Cc%& #S- (AUF@k;silmQ3zꛥ2kEUrPPMDA)>:h9OEֶ:v)~] %XiAv}6M·_chaF#^T,6Ca2Em%QO-qñmKtk;[{Anî̓ҥ9p ~짽 {6ꉝOƒܓUEWQMǡ h\j²/|^,9+E-F*bH&jVuóO g(,vcxyPlo];>OEBw"L[;^F5BHO6lW/gXaͱGa : GQHr[ UU7ꏟ?Nq7 L rbq\/ Mp'q:Vcv=;C'e)%3 /2+[8iJvvHG=oc9Vϴthٷ F,sGjXE R"%z US4"s3~K |)c!IOg4< xU D[ϖ .e`ʜꐛnzawv'^xyxx9n^Ű`oV-k֋ml)a˹y1Skq䧭 ߶2wv}l8"C.`{p8:K*JZQϗ\jX"?5PˠP/}k4KˆwxO샵URvI)Eu;ch*n k޽pZ!xU'ݤ;ի8rN; :!5 +:.dU]qKPc&v1a7.7qQq{N]ы+ nxΘuzb8Vѩ'!=n*ۓ ߯D l>q<+D;(?~Jh#sA#1f=uw1 ,BQ 7sB>.y^: ? P\M@vOͪa͏]9T@>90W!b3fADtttҳ*5ЬAX'b=_tZXd-3$9lrWshǶvhOMQ#b$q*;'V'RL'3I*NiN*%n! [:{/ďXEc-=^W1|??N@hX:eR⍲hY#BJe)"G#43gFg9$f]sR9ݫiWDl޶ڶS+EQei W33F¦$#$%j960Ԇ J6* QNEp (DJ2^4 r\XG|4ulW֊˕xoxQL7m{aO\1_| Ht^P!* 2]Ԣ$e=(M=%+dˍ< )}c$mT9OÍAQ\G&)RH)ۂ%YqS1cSýr4 W< yrܜRī-l ɂ.*^4\#ǎ*jf O]Ye.F"wQ^*YХ tmCyk-TBPq;Y`|;toGO5@N v\[eOSFnn/Lz-)$0wClD_;?/ &? O%p=+,7IIJweBɋOŽgd%!N;ZtzuJf~cCt(ݾ8}~ ϝuQB](up?~=,h#cS0g, c+nڱuuw6ee/2w]O17Ru}#{s=F)^ڗ=e68%É-;(28%OhP ס [L$WRc¸Es3f9` ש.z2@%mm 8ձ+ Qw+75Z6֒>Ǜ;9mV6H83Rl|V̛*XSy9_Jn?fut$c͕X{YO Ĺ1(Yvk+>yB;p|(fXW^OZ&Ç<VW)LUrgGWbB A,EB:šz'5@0LR)/\DFESa&~ڸ_x>udOhXN6TW+Oq $Dzk> *{58wvvOno:_RܼwW(pL22o%;WkLa⺈YcS ess$z{lP*LO(:WcNeO5k64rXxW {I#=+i]N1{,̚$_8l>*f}h LZ_mཀྵRt?6NpsMT5^z3YC+nc޼y?5=FIY 9dDYqa7ZL~=٤f]:nX`+ʝ5'R.FD&EiL #_PW[ l2yN 1.q?q" dbXhѪn=-M7ZrҤᝢa6k>LI<*Pq֘›CK1Wl6or]zoQ]ɚ?34kzmBu4h 5եW>ܭp2 [S0[Q]oN6zW!Ocj٤"ڻq ܫ$2ՁYף/hExX RxõG@oLEC#ԼzBhb.wNZ~(в?6Ymz4xlA 64+ԚQ(0 =;/]D)' 󪂩9#pf-Jo *51}DWL|y0odz?Y/ "9s)}*;ZR'Ga=iyO /RZRWV|.oM*KQ@*UXP|DM>ѹYC޵ kbvoQwD@L>'*(ySL$yn.h٦qm|4e9z$X(ʭC0vDxn ZN9.و!TKkM-H7*"Ѿß{bvª^m+zc7RzR%%,QzxoDS3Ltitxg|Z-&x;!rɜ\<ɸ;o,|]Z@65G"N:zII:#t(Q[]H|kFu]BWXc|B ~S^UzS4BaZ2 TJ#MtMQujۢr >Kk^w,]1'1z%:4/׍:F-y|^^.=|E='-E/}~.rXqQkoVú/WqaLъ VÆ S;U`o3\DʮА;|;6q>f+I@|3|J؋Z. hз#*JűcA+*G`^amhKJ5sC'p5f2 &!˸"DS#-ۘ5HY^'Svƶ|0nԄ~(.ӥQ]kkK\>W0r&Q!{0u[fQiZTk1^4)t1Ϡ[8~0|I`hL<."GaLfǏv?Stx㹝;a'fݜ|P~IލUr-PBS$︫_Kkmѡ-VWmlj3NPW@Q!Lh$7-&ETp0oBskNv<n[8',<Nõt8ۇR,۵j+eY_/C;777^<О:|+qGqM;uތOQaKp* bwF68qe"W_BPP0FGQѼ.!8}`MX^$i^N$:>M#(cIwzuCv (ŔRjyPYʑͰ`T k*5D'@z rH:w {iiU8g0> "FGJֺ iXGh"\Rց#9**E,u5EMZyV.p}CиY4- ٬=VGvFC# $7N~~mfiL]]?e@fmjq#>d}dYM*U:+atC^C(EBxWm*sAPFu ԉbP #ݺÝdoo/7;}Q-Fb| M3[ž5qYI+ABzx!ACPY ZQukIV-8fO*GDttsx/CѮCtYtaڋQyg_H;u7v}V%^UnӶOt8TŒЅa\PZ ӈ% V6V6]sT ҼY}zrxш~%K=>\uH |SMC7e8D asthѐ:Zb[en:Á.sB(P+ML޻''S`x"ɽbƘ;.8.U:ʑS;rj֩*1ªR_&Qoabߔ'>~]><ݨg>ׂ:}ԷפeGT D&T s^Q\'(wZ>i.=jI^Z1y <9s@=Bᫌ&cΔJߣ~AbVFӖMKW?M=98Ug>Ֆy7@!]򀿊̤ Qk *55'SYpE.r+_(9z~ lzgKSP;7ŷivXХ9M| >>9EA~4\ԽQt2~?T/eA/KdZsS ݟXڕ55̕ ^eʜTx2-KOPW-dTl2Y^C:D``M2@GVn}yo{2rp.t(F`WVӗ& e0Ǥ)31rTmHy {c)<°%?SgRހhByX+￑ũgFc"E.#1i, 1ƃ7[^* r|-O>ѳ #ТY_jΚVN+yJ,)"5#8# 7 gHG=?pNxZ_mqTq7{uy 8jzZSbG1߾bj|n}^t$"ꖞiZbQ-%ʢf(ۘ/GžA./+"=?Ĺ(wH[OH>Cyp KWY~Ǜ8pJ-GiCYZ{?/=w|U<)K*=N%=ӀJr돇M:(`SD:N\%Cw{^%O= +G څn0C~ZTC_Dj/#PtDH7vM@z8`$Itm5)r$bXz.x_f8[))t]Xwu"zCB7 XbOwCO@KӤQ3K'Cp-AsG=jU!W$>;V;2~Iŧ'CyRW>rY r~X>#glOf F.:t=xB"X7w:#1YOwA8rI&2 χw_Exze^a*sy|ǡ*F:NE br:O1]`Z'R,#LףS=BaJ& (}6# #nOM/;[pn4xsoN4B=s*Wm};46Z{)"g:URM XxS_M H.%#4giA&?J&^T~FwGtFAJnVJg 5kUņMpU+{9mv,CgasyK< oMݤTmq> U. fmUf XُK'%S1kx?,"U };yH,s?cAQ UǓ˗n3dθ|9u,e @x3\ߴ Ţ'rRnF7aQ/>AnFs-Z3aSe&?FEz,e ?9F:{=RNh1{wUJzaJ0@v3^p at^ǘt Eaj̑J~Ci۰5[9zy~8b9d8uKbڠcߖpVaz^UKh[R҈A/fQ/<!otx#-燙cb^g*HUU)>`]kx~C)N?M8P4NF!oCbqY~#*T=tDv Ŗs!lW'[WΎe)H9ZJ̗$4-da "[pl{Mr @ qc8DޗW քj3t"XACZdhP&Zg9c@IDATڵ\Kg4 \|0"j]C(Π,$%L^rǦ}G00zb cW>#Z 1հ 6~~^_#łܩT}jUWmOg_)'q4IU;رX,y/mL0]63;x/Ĉ c Ggp> 8;}]-us6|n\|;3 C"))Qj@?5Isp9V:15X7 0O_J‹xf2Wc;TXV?\h6-6C%;A n`f5ZТZ?ЅȾNd_ l~C y$xC5`1A7MwQzchi (L$KШ٪âx2}ia+ K43"*{sJ{U('7,A%]DX*+^$G C3Gz"2Ѱ|x((`kNxy(b_@$%%}>>CP=\;  [=hi 9}~!hx"r8 aݷuy@+qB"UhE7Uy&~UkOޛk?Ųٱ8y,\k) x|[ѽ%Ck)zFc2v:=3ϚuAQ5W_yJ[+ 9X#箢Qe Λ!G| !@yp5ލ"8cu1=CFrrǾα~L&{5!`/*0m"-WFNЭ!jVX҈SdFaG%=cu|>Hz3rz2)#HWb'UgͰ}ke[veJз"2 [G}ZX(|tK?ikF_`-Ycj<9͚ѱ2UUaMnziY.a*A54QD! #d8(02FW˅m@GT\͒2\H@XwOjb e_$[Oq ~i5jk鲀e_Gۿ:B*0nNv~Q󑒜I=$_FMŵL8;)*YDF&SP_|dh*`ʙ,I(&3T wϒ|lUtfx)cعJ)#kWҬ:7 wB?oP0%߆ qBSsSNOW9f$sj[<;y67j13uhB$`YAUxS2lHǩ3UHu SCX =k'7rsi%*!9եer>aT;AʼX?aKiCj#I~Ĩv%C7UdlCHPvYE.0o S5P_.E}:{{*;aȗqN]F)*ϗ %ſDyYCyZk<z;T1 Uɂ畦(eyL0Q_Kl ׅdƆ̀ldQqBs$3\1VG ] ƸK6, r'ɋ\RH#6ٱҺE!4|h)"ή`DYDgM**uU%^NT0*wEH?G~K*QUW TBWw߲9 f[P<~1ڝaX\hT%'%bvE +p<9>uiJpmg#H{I@${W"42[D@džFPtW3cauϞ0zl+>:~KlbڿpxV_а1aRvQrV;1DU,CJѩ9_Mrayn-,7H$||ze-ܝĊZ"<"#Ă/5EG]plk4iub˾&fGN)QV/PT͂G:wK'p%:VA@`;Yyн=;A`XE'7J>A6q)3ĝu,b|&Y!.hۼ96+ѴKhV3krsQ~ hH^0S']EŔg~@Yn^J7P~kܠ; Fj m8ƘxD rۚFcן E$1Ŕo}@zkS»i ]՘Y)WJ  >ezнA׫zz@ 祦x\Kw1@jجë7kMoÚ67AUs&U堼LE8n@a̋һ&;[+ܲʴ*etס3̋MdoLK[֕`K1A VyzЄ}gk{A%:D M<ũݨ׷aD=bWGh rNIu8Yꩢ^NUREDcC ?Z&|z}&`$4 q/'闉& +(`6~VqYPHENAe-Wi8s"D|PΕ}"j(eQBTժ(wNzqדԘz$ "[y z聨NX) (HEt1yR hW[RjAc1W`OQ}Y; +[&yI]7zֈ HVhߢ 5;͘mf?VMFf!W_ٻFz;WtdMQ~hhE' !8jPDDPL-`D+Z?eIE[`[v,ЮKw9"nC _gۻ[3+XBOiclS“1p p늈5|iB&*A.|*]meū6hԈ1C-)kӽL9@Ff(*T 3bPe]Kw^+Dc _#E *YPOBۍ 3*֯*-2 UuF}:Ƀ>mپv UN'^fH(/#@* #wgC-kp Aadmײ9ZR/X(x"j W$* N`(?d=ÃIIb+(mৢT w'qd;sFZ Рr)(`Bbn3339VeUQrz,\KzY)YWwI"k2fu3ѵy0N]=a*hb"r,lCs+.zK.ۭ@Ѹ.!ϩ Ve, vEU,/sNз@֍iͦsQQ]ľci8DSdž4օ(FYHD NHDM\` )K9qbQ1?էN?A&UWw偲/ׯ_GXXAЌO$/ $J>`Δى\SmzPRҒbI.q-˙;LKJ r GB.UsPQ֐sQ49!ӔMQjg)_C1^SՉ9IE 1%?DgE{a&hۥU+>\!W7YLtJ(=R(d!w$WBC _ڐr;.ص:vx4mRWf4 8gըFn*0&ZPZz=IΪ)s蕼|=e;W勞loV4<(&x08iW~ h*vfDʈo-kdJQmn 51X""ٮ*Ƞ2z3Rs5.{I>s3`d8f܄ Nsqdyܒѿ\9V)5e6vtBwPmMNŪWQISX :QBj^ U IhGĩb+pV6Nl!:.o&"V)@@\-Eb P'ݩ.և%& (`Cl%Cm(+bC!61XUzB3>D(PiAƘ6ePƜNTDw hcD KYZِ+DALo1qʟm^@M4A Rn{-?uy?:Dih()GjieE@>^gt;G(Svy{|P-&:aF1Q#A%ff `ŠMƃ%eP?-V^L77S閄·=棇 '2Lg8|"uq9 YcL0QD) fe: (5,Yt\ "Cgbgz70Y+%KkR& 5Bn;l]hqEPF`B 6.f*~ -Tl!Riik*r*0֖̔ZqvVIE):*,+t!fdc ħ^ĐeԐs )5|WKM;qSq;вC$r3J̢G#(Bğ"@٢,9(Tj PN5]Xh%$Y#a{=.:;ٸᓠ7A1-U*#{K&.ЊvM[Hd& (`wS@. ^^K,mߢ%9VtJFgF\é@/aԴ$:^'=,jU,Sʒ@@)?m|_zgi8NzxӞQN;+-S(o2Q + ,E: ]lGo [nlEJ~`YB hD<+`/#+w2I+/ҏ.X1^H|C+v4kN@%? p[~ڙzQUG7NP-kB wp*#CnJE|,G-6S$<=;BG섓h, g)A3>*o=/gЌϔL0QDL0Qdgf(଄ࢄLb4)379!"єS<>sǛ:?"=fEQ;Yp7r.]$(!*Y=Eue|;K% ,?zgu[9wuRE?pwEB RL#7W7%L0QDL麟T3pw#\Hc͘XC#,3ȕZl/tl`OQcIa~i\ ; WȌLO!.01s#JwjA #Oq2"] ~\Z` x2P猈CK,p(T#oi 5-JrґERZZ&i~DtV: }'Q;4j!44ȜMw&9VwjRRpZsߎ"I;7#`@++$sdu7Z1Ť"F-Ã` cPJ#2 )Fʳ".v[D+ ̊I\]_S>%7[Z 5_J-8H˓2^1?AhP!YjJ& (`& (p2)3df -"0 ҲQ@`<%K8Η9q3Ng- ahMdaSĮΚq`EuH&'tfzɕR@QO`|*3c9O-Ϫo@in!܌ ?Y7aI*;60) `SKKP{əse |늑\fwg7osGprVtit Rm Ԙl^L ~:qUZ 75%ViYgy 2oG:<5Qc -壬l$$W9T"HI^{×K T-9Kgٶyh֤}kt][ +c&.ԦOL0QD @]LF1KZ/Z+#-DA6d3+~3؋Rk~"^MBaб7S"/?J"+$I^%zpp4k [KQ¡øj=®%V'֔̏HMgJ0BR7((zyrPe8yWz48UEa^zATB 5P{t*%{hLۯ5sFdTKJ>gbkX$9F]b-ZçJrN-ΡJ(:BҥtxΖp9+YLKWuT".%dz4+V>bT_ILkxTk 7q\8djƓOc\%LT (@6&Xil0u6mh``@³/R.gP5X Ďג:=gxiqo#:YzRRy0RV+9$ L603suR-ʯLPcl_bYw!>9>0)sbE XN¦j )\$)B;bmѱ[@(*Ɋ-zqKJK,\ͦ 7Vٹźث;f1jꘖ{"Eoagz7pa̽chnp{vW_>Gw3`)M;-m  d@j05IݻNg_jM>9eggkgΨϿJҦwm~J[Q'NF/t' ,r/&OƵ @'7]x_h:wv#0vjKYAS/>^1ba OXmzȥ/!-R`'ʉTVޫ*_/?߬HZQ䢃A@@;? ) z&0 Y'}qAZR8aؠzCwV=)ITff: %JC.ZM筥?c#Ա7ð7tQgZTXØ1hq NDu{3]/^x"E Ԗ(TDɤ0kb H<:jPH0x\:S+*u/(4I>|XO=l辕KMk̏?IV{ifUUjuzXgnDjJXDEH"`kbF'enqQɀX9A#:f,F07DCF2фAT J7O?ݩ ":~M|P3\RvLtDj V\VǺ[uWxY*OE=\=ޭ!LbSRՌ&Nihŧ(īgvEbs.6v;Q2*UUu3jP2uH8YNMOM  "Mq5oWvzx"Ee^Wv2tTH&Z' 7?yVAQHuw 5AHڲ4yg~Ӛ$l/>,P'Ш}ޫ?oCؖ&8҂#NQC-EX:kEcusNS- 8 B?'^EvՅ0?Ԡ5He l@b# ~˺=]/W|[{v M;2UĆlwX \EE_޾Fb? ҠژĔxyh-^x"E@4QzGÚL#<&skđ=\HAVΏ}TN&U֯~***r#hoW_r|C+ -6>4kUZ \t(8Dhg+JH]Sa/+ԏZ2hXWӇR~@ q'o:ЩKPlz ("L@7B,H  Zѡ"q j>"4LLW2m1Q.ҥFsPǏ+mM,T:膛z7FՊn'6@H9R!cئn:G_:^|=iBnKr#&`fkvÚ Xxp`/\'b7::-ku}tbTcW7³E[E/fZiuHL89Td(ѯ8meitZYTga^Ԧ+68kEJҌ[#<0E~_hWMc}1t'?Ty)26>إ  R\pdMuj8ź"`w[ ҟ[ ]TXCHC>/!I=BVz(YM`@ÏuH 黬xTěWnWNAFk1u畚 c,[Սo`[wf+/.eDDZ|MGRJ)X2cNb9^f{G7@z1Pə%0u(NltD&,5f3 /^.ͻʁ/^x"lL2`419rL -c|P{S]s8u];u1*Ć5:.R--z'8&nݺuz!ب&\<,\zn99~M4ߪl^;(f%4dPajB@?k~<Z[,9 x">֕k,/^A.9_uk[HϦ攸x8 uBE4]]ڲf;ؐbSfwxcC$u:2e5vQDhm|Y]:6ggxT )zPΎM17Dѝ®;3CӤJXc8@}"E/zF(}l7,DĀ Twߣ28L:-,޽o_CNcZw)DZi8,yΝ=-7"ͣq 544o|C9mʂ񚀽YF1h)>Ru{*㊱ A,Ap'6-,D4$b96[D茪7K@{FQXXq0xf6o)LXd:,O *#mdRx-}E{~vE- S0@Mg.ъz>1[ͮpu]5gWiJ= nM)yJN2wkkDQi)WI5[<=ꅽO^x"E}[ l7<C/3G 'IRt֗ܺmܰt9aW+`(l<+T](bbH{Cg׾uq꡾g1TcoLw@'j6 ݋450hs~־k(:+mvk,\`+W҄W/sx"v bF;1_6F*Y /#\8NSPpbMp8%> IjG%v b&o m~j Cݲս.,`;^j8zFqK if6A39=ً/^"pKͰI [ MmiJZ5dR^4 olv =e1qf!ꭲsrOz:ӋI[fF_$K]tFx YQ߸9l;پn:.;t@f9c"<~zzk#)ڱuK)hu@%'J~0”9דbФ8_rqj(9#O ERVwئ9YR}ve{ƼM9}Qi`5>J@$PZ VHՋ/^#p.Q92R jIi^uZ[`u, 9CZҊ[I7lC SgtgP3JAf"5e0$P]xaym%kL`rk9΍߾Xk q>Uk9H @1Y>;@&v].x^- CcFz/PI~:zUYI?{L91,Yى!kr:NY*0Pg< YLnX,4lH&gjV[ F5L[gt7CIdxp`gK*%[Q^ kd$nDc*&\zE/^~ ؄̟1Bx-?iVػH]WNhzawo( z6 9lsHpuNMO"azj[bHd!B$=B8[|VeϗD@ V>_sfş1ޗ\l66`M0aldo6C`o9T=m+?Dgg! ,H:kp_>KҝRA4;G22+{440 !1XŎ+H|qv oþ6A Lk~K9Uߣ{I:pl;4#+ӥ j8Hj8w 2\ "@}6=[C8Oqtcι4mؘ:!ąշE#3Psb~@6QEs@:W &CA#!ab@uĎsŢmC!} )ݨW+`BtM"%紐}2;lIͤL\y1 [zzc2Կw3~[g$Y0Yh~E?ȣz|2K?U֮]ӯU׹kX7B\ 4yAիTZǨy<,`3A$@hZQx6=YIiS@"`kt,NRaSY`)E[JWlY O#Hq KKJXZN9FB}ajƕKCPױX K͑^R?lyk,FG*7)Xr?9kNxk:@ F93wnIi8,<@&.je$TlE [Ejl 0w>@Zh? 'vNkN]R< J`P/jp=C1,>Efoq}buHݳ]>bă59%]'V?\GfVyt(8)XUU{>Ș՚U.l`83=Z"@`8 uRVS}?=HXޢ#ZZ9S׹wt᥯|jn W;Xw^N-^x"E5"j@)v/ͅ{RpK4W:VVV2]U8t9֬&n?RժBrWy#~_hfni˖}ͰȹL+%5< `R.OX:Wߢ˝j07͓lbp?u1~zO~L9uI pvnHgvE/^D?9ԅ°GSx]M?* LeVR*.T `B`?v#@fyҋ0cJHg܅jii:Riu\C\?55OҌcLN4a]4vu}jN)fe-,*7k7Q6Ѳ` "+5Kj?&[zB~#MQF~D=ݭ;~>{"Eu w(2=JYSx⁹nfWj" ,u])ߛzd=nٳJEߪ\Q x0Gv:1~ՂLНw@4o^FDuz&v@5|*T|q72Ή:(v?.)}!M5.4DL a=~,Bcڵ)5k5%#%4]4KFA(l#+T]Y'>?ګb#~~N{zPҦvSAX쀾cǰ~ꝺԞ5+##w̹mS 0.;8@{\.,$6-J *:su)]{RjR'BEHΐb>E*,鹟 [x"E"͵(RUںe3zŗe^1 T+T? ']XZtMV{Z켥BVV+3;C:}X\ҡq c׮&`'VfnTV6 ٩8K۩2GW^Q@\ۇg7it,G[7}@UWv**jR~đ#03MMMklz姧fs3F y(ҫSNCve`z/^x"p(z_ٳN!/6uygtytu\Fsa2| ~Z5}R KjWD [m"rڽgWԜ%vC*fc6g V-4/Ou?:-]L+-mj&SAݱ583dOq?wB+*rqctA_U.U;@7FkŪZt zAdʴ{gVTszI{A<ï!K*;#իWGkU3N$z9X?źNuiX_# :0 /^x"E"{sЉSSM:TJ@F:uV&̤p= 9Rx0gPMu}*w54:@)E0搇)'\/?םy=юʍǡwU d,U)w_&[mc>%dW b Z5 OНUTmOtrzuZO2gh╄y%*(,ذo!W+k a*sAΣ\5u+gy䠹{$ H6&@IDAT18ҥ4epl,˭CVNӨܳ+Zט``lo^x"E_+V 0`k:Z| 22ALQRmt 0BotVlvEC ߧ6 t+'?YΞsM YEivQof+WIݔjZMaHNPm9(R5am-ӱ rJD$ 0fj+3[(14waAϟRi:gܮ•\zMNQڍ1aHo;sDgr= WTu0Ț1[:RE/p#8pa Glx##XS!~͞6-M0K2=C-;DzL=[Lܫ*\ϳ_ {NJ/^xx?D_Wrn!cGvK jcjWVvô v[ȼbSM?Ҡ t#/Fhw(;t&Fe*-*Sfe}Z$0Œb@X`zo*k&k@giQOOco͕n~jؔP1ꬎmlrZ)MsRyF!}<91B4m lم +SA6#x4݇ G8YQAh\}]]mbrF)dKA/53iq>1~\6fݪ}=<'-f "JOK'ZM˜.6\U*)3193vo7xkx"E}Uk,F4Ԑ4Wziݭ\%'ޅi9jjjҡC'TUFaZ82㍑`ѱ,L{`Wg/O9Sr5W*FBK4ANdRm&H]o'yjVxqBqag()Ը>&I_/F̼(ލ%Lb} Hmno7?î۳2NPZݿgx= 6{t1galUWuwI±<汲 b nm5sdŋ/^"0OVj||ܕk 8X"5^׫/?7i&lM;*,)iujkBYQtiF/I&ǃtV5)Q߹s;ݣq=|Hr5 9?@O9^_9t ZS06)e 6w@khf0~O9u HUbXy,4>U+`K[R Xv\[o4AyC;8(PnOCz:[Y9~N3.+ؼ4$Y@#`ر-j۸^/R!4.Xi(QZ4:H:=pIE5U✲ 5jlv25G5o`l?}\^x"p;"p{UϝFyرI6mؔBԊu5)/7ڵjHT&Jq@bYiM>CL(vBf{U\OZ֮)s[[O}{@w1c~}Rwܱc0di1ßnQǺ,om\(cڽq/9zݻvu@pmJbE/jhXmĥkV%3k&rFG UHqT)_)CCG4H삲UJO+AWU~rKH1雱+tótghVt~ZM*1gj^Qҳ:p[wR~ٰ+G]J` 1OP]fت'۠JC&[x"E-+rGj˖mG?7"ltv"Κ*Rh%eQMx""yY㠫 Ug{vܵ =F [uBƇ& "{n:@5Q0}/S?r[R Y;c-\#;p}}-۷Og3;KŚ5kj*zlr#܆^R~Qwԕ}x %l 89fa5Jb;::4TX4^C}*T%` qkBEa?2Ic"3jyfqWH[A )Rd[xVY9S$K13z$mP+ςd2~kf(,Svn.]jDNfI&)gŊޏwyRo/^.-6;= @o9TФr\/K +DA3m/)-r:Gݭ]{6R+p%L^T|ݚLWcHmZH@t)R3eh|Xƴ LgyFG.bTWW+يyk 蟧o,TfMzՕ_2p3Psظ#]=}̥MJ@T}XVVpt0),\wgZh%ݹ`G0pXfV.BGe*,PyM?Gi3>X -kƾ뎎NbD@خO^x"Ӌl.16L(1Y!T ҋR4Tٳ]GYBfmj>}nu*YV0>OS.US"1 p^{M iBmٺQ}5:|Lit"ڞ: +@ !YaU+ 1xWZM۰uJ"o`"pOi@԰4҈?ML1"`5y5ja㣽J)ɂƁ^gaX[{m5tb>?GiPМ %gɸ4_np\ CV{՘1NYL`n8&R\'DÉiPHY"hmMڧ(Hc_+ ;avQ[d];7靷O1KZv=woxW/^xxG;8J(*Ԅ.xY޵\|]c3H}:~[yz;K/vt¤ U ͫK?vܢW5@mkgPifgTbX T62ƒYo3&Àa0 .ŗO*4`ŧ~XqtR@f5,ߪ]=1~Z\uO Ļ,v/6f@)u^Ln !pHJ2 fl^o0j&M3\|.X2D$@4)D4ø&mT+j;}@k*99iERc붵zv2kԽ˭{x"E{&|0Va S=:1> )c|J|p/^j< zV>=3=tϧUu)/b&yti7lڠ_U,s1m7ݬٿ믾wF I:-kzSIeN:obp ٌIO+%=(]m35 _pk2l٢R:l͟X^Ԙ]@0.+/c/_)h[V[o3(?/UWȟVѡn*Uˇ%48J7+^Ґ ' 9>+FҬal`5k&b *.\y>gխQ[GȘ-mG%ɹ&n<5kԼef&#k 0Xg< 4"㐕Λݷ΋/^"lR7Z\#4 )ςGFbA)YڲaM).Gj]yIeҤmw7ng |j P>O֣?V(TrZvPK7@w7UUUIg].'V^3iy)X HH\bJа.<Y}԰ǻT?-?ݐԩ魓 X:]WqRWwxg{R2rYݡ:.0=5t;zuWs &f\ʵ[XI'4!< k7O ߝA%1=u.FcrhXNvw%%;r4475M90Q!TJ>c-g\EcilucІ5do"E/Ć>?W)̌ 喔’̫Y%P40KzJ!el\it}֟?r}BL5P[W`"/8P7 Rޭn}z]x-ؚdUFëNsD?7)[ڱ0~$LF"ɥ^n\(#fg(@2ZtlmFcl,@ M9ԥ_)?٫'.蹷;5S+OB!~ZxH7xotߦt68@y45}.~3㴾hS~Qo垯ا(P~q@+ VkQ -$xpź<y[X\ie㖓KJiS}Y& dA$.!M7W랻7JBc6nʘcty甐W h30b fMvAsiRyQG^x"p["pb̯~?+̥jra:V"JM3IU8}hAՂeطM tgH煶ˊ COUAa~z+(R5U|l J]Uu.~@(G#Am+3&Ч^`a _ B0 83D@ZuslTQlv;PDZ`$K*եaPcs4 s:?-5+ O{>؝YY s7,VO6ڀWPդj !-se@-ƐVd%Cf*j\(5sӤS [ۥE4{GܳL )E0&J51ĮRAG ;DžmYWʊ"\KD6ɋŪU4Oh| KnҚ;,g6鐔=l7 ҋ/^nsn`}*f~gt̺[Ng;֯yض g K8@f{H} #agT݉!]|-lWUXG=R=pdiB 34=qzoF\yV֖67t&2Mlm1tjғ1 hSfA?TUo[pQ7/2PT֯ Jkś*Zoi?[] 1sG_О+z5 7k6jszAW^WmyVT304s该Z˜Vh"&B_W #EDzraA,y]tES489Xڸ,XM ;$Hc>nYyiSQOUn l⟠&d<ҔC>2nq mM#gk .M({z"E#o=vU:ghʦ˳qLΣ'zԱTD5L-t[?7(-q,aBx ØUcUUuw֩gFmO=X/kZe'uџi֭hql&@,^5M kT\\N6 v&Mx4XK3H"),:#7!D\47+X ߣ,Z+F -!gr9t۞q]g4{%m{IҐNƫwc E:88lJ5e\`) F'kqYs ZZgtڛ u TnVl_jSS_@3t%5.;dHȱLܩ1$>dq?iTtaOHB|k.b8:ZgqtRO69sIG;"]ٮ?-=xR55URSV PM1p!lV¸bIשӗ? KM)8Ol-\]:UEbo1Cݜ2jni~-{"E/#X=5󤶲*N {R۵W[kEhw@a~7tߣO1ZKF,B[v=ju*TWJQ:sgUUG'Rg&Ţ3'N9,k*96x^smUB&kɕXMxtDj0ҧE4֥Bai.".''w+H F0b঻O՗\L&V\}41 iԞ$@9pmqNjƞE3+pc\@ggJHtOEW+*J?}/KtԔ538OCPyb2VǏ{0_Lȏoe:0$H9 ^/x MLĵc Dc@~˻/^x}^A׫I~’*VfJUTUA/&ڛU 6Dڌ®szcT|) L.TҤY|z,f&m >-ůVms2ygꃏQ|Rc;Nɔ7ݱV ­&Wmd$䈰Q;GRȱ0%OL8Wc&yAC87u`9سx5X=h X7j suBJW!0&Я8=ήNDܗC][5 2>22*-˄M;/^#BqjCF￯I7uwT=b|)͓^BpUzу*L2-MjBt,,]B,L@+Sa ws6ՁURxǫ0ߵk.ѽz,@$vNeưq Ȟ-ө}_RvU!.\R=󚪊F=233Sx͵kFj[+Y\RDmɊ3,, H=SZ\fIkhhL=j<QgOVZp@oWS焦K&oHG|R\5et<(|i?xEoބJ^1'rFI @݌$}|SUHzNNN4^.@'U}"E/#?6U lfݽ!u4\Obge.RZ>vS)4ߴ]s%4اDvBQe1!hz"T_VedSvM9s(r @!ޟmT hKpQⷴ b1D8X*IGw:8}:nS)L>AZ`_RFCކ^QD[c~ZmLR63_jT1AtҮX#/^x"O/?o݀!ꋦQҏ`G4G/0aR;_? VYM"eº3pO ń3 9 o tGwvHڽYG!կ"i6Q\OvlUEƴu>ʫVЀP8rm7Hl`?"+tz5>&\?vY_QW~1}Dƻ57kO };4HZ±g\XXp˄SRV9ƒX\F/*J TUQ‚|lQ^^ljFGGEa&cL?4@Z2n-FЪIa^@xLMn_:*+C%Tw_0/jSȊyA'NR~+Xŋ/^"pۄsuJCo'`3$@1tE>4'd!#3;=㧎`OLtj.Z'5&YN| xdZHS?{'rq:qEaAJZWWO~#:rUծ* W/zzz0r%Dru+*0D7ػw[:*51Յ>e €] ߗ g!~:C{\`íG>[OCRt!Xk?ð ”M"<4h$};ՇYƌ+`no`.vqUzQ^][2i3&1|I%@a-sӪ!ԠRcL9t0 #/AC|sw|,US 0Y1c/ˍYw_@OE a5m]KŽ G+5/nYCw|_ ~UqJ-uk܋Kڡv)waMwH ڃsvoyX 8HYе.P0hSwXTjVUM96nppVFX8셜n\wgZjN[V[WVRccHY\ utcshR+o s jo}EO9:f]|3O8N(a2F"}zE&؄]?Wxާ <;MbDWNFς Y~G~mˀ^mqV ^tvq @Yd+޷Pp#-|4i)9d{D[fprLnwr;#-v/{|-}&Ftɗir\.@]-v/غb :;q r_`Tvaqi'X;cզ+Z*tJөjl13 qXQaxR/x>`nM.H]thGQ%[AG7ᅠ UɃ{42<X(WY)șN-^x"E=k酑XcF@-1LsLL15 Ÿ?߯X3HźCݰ1ϖ~c-] (abwap /:@Ă1>\#d=cH{&s6~`bO!ONa!tkr7sa0ەW^]b^z mٶEmbV7oiR px;;E@Um>dOKS<2;Qm,vSPfަUVf'iA˂41u \.]wtg ^\B2{I0w yA%2)4Lݺk ^ $\_վ20d%f( f;H9m{2 c@ɖ:{~Xoց76۾@wW>qVg`ɟ,Щa(îw}9srU|?.bivEk(d]dXgHط=;;7 x> r }/8.-` }!F)S{0["~Ԭ7XgrףB͊0؄~a_R#% [5He/=ۧVip$EK{/^xcFJu3?kA]FiB`B7&]\ٺIIE/>SZY6s3{Yͣn&Wn9}d drw } .v7آXʶGFw3VYtglܭtpuy}l16䃗ᲤFc+c7B^{ M!<:ހ"`cvל;w!6r_]݀v\c AX9V׭qtwRksml~Vԭ%ުǴaݣƐ2hOlnЉkg&f X+t& H 6jkXS3ڼn,j&Z^ZW` UI:vWka^L3ˁ~-ao6k>FW-rnq\˵D?|͏z^,}a<5_3;DdkwXz7 ~Kei'#EnHMڷmVnuty~+NӍ{; [9džVPq!oјqҫg^Tщ Xǀ/gsޜL2| B]j}5ܞqw[rIx"E?rf*VtUWl W2鎎9NGcN0mIC7 TLm?r,n$ t; ˮC]4ϹBm!28Dݼ-e20e d5}Bwݹѹ/X7qtշgiYRyYU ~7˸,MĒóf7W %xan5wpx"EXlxQ( +1hfc}j&Yr`-*(tQG3V~؃;RƪM1V_%B!+묱hvr {% 鳿ڌl?č |#:w/%=L7h: ,>-gk8JK̕ƛo}"E/#pas ,=+$gYN{ӭ? {޳o 6HQ@ʊL[(vDY]خr9N$ŪlH9,W%ʥr#.Y?q"(Ѣ(Rf0LL9~,3=,=oλߗ$I^9PUqyˌ+u4Eo˓vBe{(礕vgSMF_v Z2 ݓ ɚ*~>wq^SӳV5-(B%'o^JL蒢zuw[i1 ǝ'˸++]^#&%m6muD8K8L(ۅTƙ0$ͲL-{??pC$}Obkx2o5I XZd%pIU?-%A(.^@oiųT~E"`؞H$N9g}Z NyI]BiH 4YY Jr_UFO0a[ֆ(}Z+:ؘqP;\alZFM`{k{>O,-Sq3? zϯHIw)iԡ߭&mjm6$? T[Җ]X,E/U$lr_8GF.5Jjj֖VJX/jRӣW cؕ<Ŭ-!miue9`BM_^ = [WMI}ѹoS! V)KۛuK,y:@o$Ugۺ%K,E"0Mİ{.\jnD>cߡz.Z[ZdODGM4g>N'Aڳ;q_A D!XU\g5b쾍Z {hctbې혣dgÑ ^"Rр7J%}N2gZ>7IiOF,cJM@zndX,͌H ~|lG^{щ0UI߇{}7뽔>If h;T(qWqbQ.40TٴI`h2wwefFm_G?OG,:1J$嚚Φ0*F%;&VPMjDP2$#*(ځ;+aEɮ[,fDQ撲I,/S NP=d' F/T}=actedjں C svImFȶwdIxwo'8ØjQ}GNE XRīdibus#gsc^5;ӽ]Z,Ea!0GtAfm'ȕbj0nknC; |_(ȽMC)4i\xH68< i 8ɏwNOc|MR :N]1 ?[i˵#1#as99sf0zFog!I#è-W} E"Gt:4=[lb^N oj§yoS6Cw * q3D*%PAzmúU=r$c#{,^V^0$ pҁ,+݃x /:AqJHlp1:s:oFJe6yf#8MB) յ,|9E"`X6:6D;4C&l ~Nt哭4K3\#D[R:VDҔDY8HO.Q) Y. W9}ƣ_9ktJh2j'Nތ{ǃIN\`*$ic35քIxUq;wwI4z0xv>//:b)吴,E`3 aPq :qy[[xQSA1<@3cG*g4<~⌇EDth f($q+7jQIDATzRAJV$)vKKs3"ֺLFxo`ϱ "<Ҩ`+qAeIR%EF]dI"܋iے,E! aD/;+kg7R%CWeuWTM5ph@TȦ &Ifdj"Ys,sW+Mk F4ƽt,P/vQV'y=<\#_4JG(3PR 2UǼ+=ޗ{@L"ݜlB!lX,V@¦P!~><w{SيV2NF *p 5˴)A[?*83Bi1e %--!"r"1:Qafhэ? tY1Gh3ƚ+I?EPh Y7S 3$7,Rk╚&E"`lr lNM8 դ;CTY ! s"%b5]dMrs-xhM#`b{_] 5yIwpHG;U>Eut1I L3t}# "zqAYNMn]C@bsJJ/˕{]X,f@`Q¦؏a(l]&q@Ÿa!!TyI״m䒝N.XҺ$YT)s8ySdJKH)}$PE?0ggm8sOYLH!=oQ-SU{5 ![8DeciTVvE"`l6$lA΍p&(RIZrP5["lAwЙn+jJ<$dn| x80FKPR`)CXufI^~."Ӯ4һpME XǮL[8|Pweom3il$h둖%l /hm[@q%[[m>[B32O:6iNvfV?8Λ%ٗFbڏft0/J6vuWJ)#caYU6w7C6ϸmqE`" ,v-{k1ydOQq?ajd~)Lh~Dú棸NdE i3#yr?ZV# 3P<л y$k AC+_5.6%ܲ"`)"LlaN>F@Mq£t/GaZ."YֹnOpv٬z4~g3>.5 &Yېy "c>TP<ێ38d-'ђnW0SMiG¦b_FE"dMvi؏VVV̒+G|ܜ ,ȏf8?3xGCJQo|( hHg<#9M%FQV 3S%~iyZ>8 wM R6KȚ܏M0G/'"Fs77|$#|pL| ќ( 0\elMD2"YӹifCr?C테]W0tauԭe${ν[)> o.]r窾Ղ,nBE<'>j(VrO~DǞ%$k{U¨yLJ4-R5ͯ5?NHhL,Rѵ|۶*9A3s'L[ٱ-5d-mZ} ía<;-~Gо !Έ/N}V*[ׯbt^_Mmh*L .4^Q_6$3'˺ӵCVj/~{*i"ǣ}-%>s%I<>9Ps Ů%l|ҲhoH77qc?c0gVRBp[}v߆fX C@\t -8Htb}6.:vt?,JJ7\(mK'P~ׯ]ȓ 料q9Ia::EN'P%nb.U qWU\an?}*l 1xL&jw%f/> oA$ &]mSUIR-$x:O/v,WCB8I^%lFԖ7Be0/OU-Mt<@ g(GB]+ۖ4!=f-@^pN'buf9|!R555(+܅ bx :5I#Y))+Cu̿XϦq*&Cؼ^N* 30MI^ 8=5i4D"?K% 8|TVX)'+]F022FF{Z[r mTR$!דO54҈yGQWޣ1/ZKU|S~+IjSeϼFꋻX-O./5.{#a[ѯYR]ړ*Ƴban?j3Tf(gwʱڍ)W$MRtz-nZ,+##3#J8KdDq@?Up)u$QmnD(҄A"a蹅K׻0Ak} dc;xS}.iiIZ:H0Ei}+0R;7/v{wCc8| T`vj.\D(6#k314Tҹͯ.CxiS>$IYR$zF?OpF:j5ǎ*?=7I(W k^<[ _'p>>{v/Rͻ|bVǸ8O[ K:t%wݏydMqxY A]li~Q:iή pSz\%:uކoJчFn6E"`C̦<<$QEɩ(x4^۱aN[TVDc d)2ZIVVYjCnd6><諭6vC-1KSnEc(iDm5 R??if$q%Pҷka40_j_j|k6C uECp5,HBi$4UEXg3}\2$vQ {[CD~2~/s3J㾼KKr/I#'sA~Ճ7z4 \?j5L3˺~ʗ**_r&+cJGzkn13lM 0qoF^C.)$ÇR$\^mSZOMQ@6E"`A2t;ET]V*v4cnGyU$[j XJGgrѡ|eakim56l*gN8 [%pw D(I*0jIҗ/ꔽ<IIJ2"ӃƺZTG[;-ʪ$Urc (bJ*pմ'QOv5z?Q $i'VI(Uel&b$<'@}V@IoןvYOqU*Ytgܪ1ڱ݆dX1)RspJC='LT M[1/vQM97v<6aPP:LũSr -PRZA ow09OIk ` `:FJY44Lb72e#'g1C펃kpsC)hMJv4 Y\tH+hh,KӸٍ'/-Yh%>GX y'U3|q?~7mHX Ǒ_>E՟M% =xP}WH:#h"Gmv7(}4y~"z1?9>Z)G◢D);p$1\3LE%l, &=%P;PWPJڴ)Y+tfh. r Wb -n?Ivw+8w vlE}`?e{ac6y ^ڍUP:tҡr4'"i|xH=P92/7^!h6N,at2ft ^+T{^”~nqr'#*7"+Ej.Dwf0ӟ|· +ҘaT/yoQK}u͛Oчo|ٴ4\?y-i|vj/S~ WتdG!NA=E!__x7O{^7;i&II ZTTfef|4[,5^ې_#ӦH毶殻Hi|@q{]Z,! ;|;p4ǥ^|q؟ $n*,ϔM5H]R<Ɋ(nNl+x5/r?,*hæ)zQԀ}|K~iEKg_ iEjE"yq@D"g(BYSaZ2g6?{2DY dR]IT*lؽ>p,g,5gff%]wBT;:Wu| cwrM3Ai$FT6'΅5ҡ.a*}9vd**JM Nq\KKC4Q&8*{:]yhS&K2fB箔o6PSdkk4-y7d+`$vzfח^q;'q٥E"`#fDz f_3b&\.2a-rKo"z suy/yϒP?;njU%!{!4yru^ڞ[ޗiM0A"'2m,#uڳ%U?#^Uk&lj `ijjؓ*!~5 ыa*oT/7/lV2(l慣m%iuE@}A"@(D7B1nqhʁ=h,3LjSjJ鱑SVȻ\YN.U!_PBN_6 GZ*E@/h֚Ttf9j:;W_x(Z1W]X6"kr1115ܸEcF/[0_Gm]l'e`Nl}``%%%YIǕJb1gֽlIó :J"aFeNڢJ< <<Cc2q!H,am6Ip1\LZ'ZӀV~)>ZګY, >kQ*S\|> ;FMӡVSc;}WRt&D-a 6C@$Mg%k/npgwpvF-g3ҹyg7,mR}g?V~t?wbI2ΤIeleWՠ Y-Y4IQ(MZn>J΅a N&@_yjO[p7%lRzyo,E`!ZjCT2z܁na%ñ]fmw}pŮI|@zmg}ZSLI"lYB$ɖb1R5pIU,=TQR_MK9%CܸT96 zI3l\qRz\FaPX7$eSz뭷/ZA6Aajs]"L_6g2:č+Yb)I#mY_ؔ'nYϙ,)JUr:3-8 ;gkX4l-,i-b"` |i3upF:~6mAn3>6 'I'~ 3DG{wiX-9{ޣDQq%"tT໵',W -5}@{ $THtEKE""`I]nCHfK"k`n[8N4w˙b)JKͼ6K G؞a~XҶfcχ{ץVCf|ۯFHXV(U |qԤ.~;XG9>rߍr*x^f3J>|" '5%SI_4 ڶ"`@.iH5=mFȜ={ͯ~x#SoS=^n_V(o陳IZSeڎv5{yMKI/g@/ʵ-끀^zO'?B\IENDB`docker-1.10.3/docs/installation/images/mac-page-two.png000066400000000000000000004223301267010174400227360ustar00rootroot00000000000000PNG  IHDRl iCCPICC ProfileHWXS[R -)7Az;FHC ؑEׂ@".lIN9gsfe Y\a3>!I'l9#(oDn.vU96HɜvGa.Nכ+w+ !Ad1Nb 1NbKMt7>,09q|f;Ƒ@lo؍@ 񤬬S!6N!NLbci.!ryg9deЅ&  'cvCH ?9,b%8{1& `xÚ/ e2bd΄MxIxJ"<&$t`8ʅ93O$QFbf lpC]!gx07;hZ<ߏ9LFY$95gϖ 0v;]ZFNbMe'06[[F%r7lOМ9;aM:QCZZzSSW-CmZCu\T}*3'LdO,xh= TT#RcNCZ͚5ZZZNhkӵݴyOj`0=rf;sPGC'@GCCgXH7F@^EQ/Eo^ޠ~:{2i |0423\nhH((ߨ18۸ $dI)jjgfZiz 57m5D4?zmsyyy"ĢdɉM>7e.VJVAVVVoM֕7lh6~6ml^ۚrmlB۵}wwwHrpQ1qy'bOι·v1wp|]Sz]u]Y;\ݘnInݺuY=<8=yx{|e%:{wS/կok! 8`]@@v`m`C `jpTpEaHs(>AA?1aTԈSFZE.<E7}W11X鱵|J'/KhJ$%&N;m㴾vӋߚa4c 3gf<>K~k$BR\ޤ/pV5k(90yK ۛrKR\SJSOOsO+Ky*xҷϨɌˬ"g%e+3fϝ%0 7f s9MsYd,EԓWqNs^g:og~-YtABυ;!-[\o=K)K3^),(-x,nYsf_++^| |oEJW~+_,,)+V::eu5Ukkkos_T4w} mBmٶMMM!M7"fWe-+|zʣ6m%>omGCauNμOw:owZï֡v5uh}}70?Q_r|{j;x#[ҏ7 kkvi>5-:-U9A9QxbdɡVASm?}}j{Ǚ3=}[.8_8vb%K .bwh}GUMN]SN\sv7o\vV̭;ùn _òG4۾xOQ{_>y)i3gϭw@_mye_{-|=f[5lߵ E =z~CG{>9~:99_H_ʿ|mHȈ%dIlhJ oj%Q/ ;;D f !RT=jc3F%'Z o1##o5 5U822ud.H.{X]|W_˧kxQv_@IDATx]`T7$JDE)"J`g{ ]ybW" Q.J%lv"q63wʙ3߽3s3s-5UW]h&MzZ-Va,SL"F ⍋ߟf{% 3H26:LI._bt-7}4qF]F5F@#wC<_+^_S~m;`:WL6Veij@0)[<9qdgg/>cH@/f\UUJ`1*ǀgK"HB4'q? 5ϠMqP}BLC ^ _ QZ_4#NF}h4ک?q`̽L4 5o:ЗF9,q oy5H] |ɣhtM׊}ڠo.*gU 'Z{A^}!!!^ 8B@UUhTҌBh(F S")g\3gq3g\O$K=~F[ œ}rLuf#ltH#h4/přeoڂF?9y ͈3h4Y#յo70rT'SxCQ^Q']kM⛜!5qFf3*D NXO#+kU zƵ3L!dI6痀!@)XteqFEZxZCRLh4ị'{}9a×hsؗ^,'h䩯 pcJStMff Am@vNKt'#F5` ' $g˩RwOhQcXe֟Gn <'qPPOO;E 5/:Lq*bχ90p17ai y8WѬ\݀5=pnvduF0ϳS?1Uoym\\ :ׁs/T]3\h4>SF:5"Q ;•{.~^ {>x@L#8wN<ְP愡pn=o1;988TGt需ذC0V7%Ũ&o"mv"&: uK2#H7G ƍ1n΢ߌzasUνVor#,C i3%Mj;NZ|rJ~XWS#}aBg=j5;QK=2rG9g=t-8:^#;8G L!{j Ƽ{L;,qRN"eQK|elD~7|ۼ]HN4OF?y82 i8LF=SQ8aم9TUQba oAAK˼Ƶ pUVVrs#qLV&:‡r."|xF:B#;`?qV #7Q`[%l,W&eEI(tFҙJ_qV_~ElHIi?o H?_ci8{ah۱ Â6f7vnۄ̼by LiB"Z$C.w:w*ˋ~6!A'Q ʱv/prkQf-R& Dح();СkWX\gSR ޿oW#dC`l?*GuJnrkWYfDwJLz8hXgD_NIX4US0 ~J5.NFVϘGݓѤS881p[-Bj6` (k!i#_]֜EGG+ZBO~2aDDDl4mA d̾uP7ShNao@@4Myh=1;rDue%Lb%l݄t_{\K) Lt N}!ڴN{ay)`Dџ$Q CAqyݟJ0uJv YV> 7ޞLI~H,,ߴ-'&Rmɧᒡ!62Cϛ S.+gS?íS,VOYv49]#‹E& ;ӊ ۳7 lɬG0'cvQW TA!-V}Wވ-,.-~IZ#tF7#z))]pFG1~aw(R/H M7h~ #Nf^5#ň,܁hj֔&z5 :h(" P>wj?367bu] #šQǂ0D9"sh_}f \KJJfM5ўKXrE+\&xhJ:qԉ߂}!iegM!ɂT/"v7c )73x~EE%FWd R4%Ԛ!,4Ko4k`s*B55b1⿜lI>脖Hw*//SU\e;$aIcSv^~ᄮA.'K_ Jr &NCp?.R,VWUs-ˬ"بy,/+Ral2W7,IX4c'QN,xo?K*.!a1~Vgy1^y?l\v~ U"rtVQQ袀d|A jL+Kְ\{PfGYI.»v-AUuoP}Ϲe|5.Q==:>ƉE?&6{x>8KNrBxpei_bov! =uK&XLN!gHۍ+W"oP4 bRj*Ğtn_Q0&.sf/ yC[uxSY|~9:gzmWcWsQTZN<8Y8,st=?>$%+͹ChC7ׇH3<3h 3}_X83v#D3AAÁ b]Uy+Sa\V ;7X.c& %`H& Z P82Hi 'YA$eQi U6DFzIoQ U|37Ts>M;@A½[n姮ʏ;jEY,NLQQʢp6RT֤4DW‚ϕAAB4[yXx=s>w V.^y*Ne)NYsqrAyB*~i)jRڦwި3&4չa<\ye bw83_޺<uҫ)5ȵLj _PF-{j|4q*N(-]0 |6=vBx h @3"*6}OL,GgՋ¡l 6xĬ/&cMrhńGwM0L y6 x(?;S/LŻSk?^RmݺAqX[L011wsxL6 ڊ/bݷsNt ?&OBb5u2rJ.U kh5 5דavst?Ykө+."n#V?@AibG\ԚWri%/DN㒱z"TE4CQť'νl4n4jTQ7p~m8YtV/qNy튫aA6thŅX'j`5< 8,E?)[2(~f-^gŕ+,_ j酘|Z/lݲ +~u&ڪ-;b[("X-u=H7bҮ8{u+b-v'N*B߲˖6UU58LZr6u<+}\ު͛EֵzZth3_;64lcB;@F@P?4S8SM,UXɥb#yjҸTj"ˣ+ϼ'tn('Hqr+ r.~q^^1qMоcW ȍh{hc&FcWO+چ<$RPh IOflla3!ˮɩY:4OlUעlwԶpqWl5 z%Qa,Bh/ wV΍KN+* 0ჷ#|掣]`=kߡ-"Ŵ{ȸڿ8SQY j՚qըY)ZsU GrR'r .Z.|bᎏ硞ԲQ,;35Y'\OZ'M5'̛2أʨ%&DT.C}嵯`0͚t 16򨇰A A9 wga HLV-[b5:u`,/plǤsѶ}[=3":6]Ҹ~NF:\P9~~8ljQU໮o r4caa>essJR’ qg,|F=20g ķLh%Nkh\ج96/T*J-"4ٗRhEfH/' ]mr@VDH5IʷRJbp & /=Smϟ,DV(/&Z2.!~(,xi6>#_2j1^Yc-nghfV޼ s&4,;3PZ_"ܻW ش+ ڴiXV>/\ P(f߫@QY۴GvmPի~VN‹tʊ dsjmƶlZ'|HYu&к};lXc/ j M 9>?|-vgqLٳŌ9{3QjѢ9?]4gH-EԆH@TΙu[vݣ~K{dE mɾf1p¯eo6Rh׼uk5WQC9 1kl8)Wj%$m[wqqղH+|$X$7·_ŽJ#-[sJXJGFq':O?EznkϏ;3`KK?5†ocƵFWGJ|X~tMu3jԵM*SNz0YSQe i=ORBB\8, 0_Wku:W}% .W!cz_#Zˑ/H]RS 鯛֮+ܽ(KU\W^~gcF8~س' ?|#c}X^^y \ yrTu:‹/o/g㦫GvId"(.] /^&t/.qc^Apdgq mス[ٗsM4</W,2ve0"5Gv ++0;pl˯ˈ 9v8k u9_`]i]h/W^{S>7\1M=`T_iڮ| bx3Ϲ6\@>(f$7a zfZR̗fЩy7ƨ:ougc2/*rFlܸQ]`)NFfIwnB~[""r`.[]i>pu&78kSqBkl9Ѯ2A+eԬN%-PjhJ5"qr&gr0"™خfM4qdV7Whk/;}1Aj>Q-Фso=,4-nԁ{ƍ"E M6m#xHggd"_K}|=\rTg 8F2bArrsx]i#D$'R8%踦hΏ痠y|ڗ:F HX\j 3ߴ"( |eݻv Pl %Pɲ_q~S_h/$'%kcӄ y&i)-`ݻi3W!/H Fn7ά 5N顭mFZmboRpJtgy]YDT Z&P%#hleKGFVG< 7CbBSsvtH%н{)ơg#,2 Z$bKi?_BdT#֕1ҍt_Y!^\.elegHlk .St@#p0?g͹xrV<`pl t7Tg1޿K3_>k^O~>&uHhT?]/3i@4f$fBC|@9.RP%*Z'u)iV fA n>j ɳ$*_X0K&6JrX/Y' *Loء?UgCG^}{{m`}Ÿginv0lmqi6ƃKg̵rm _ިiצ\)5Ùefx?buR!d1h\[֯FxFf!*M /2iH_ҽur"M/5%99+8ƺs^J% F# ݱ"]'jC>F@#pr0†o7ךTQs!)hrFR=_0;@X^ gK_n`Fux3z)7<dzVLNrcy JF_9_A1 K!JX9qmOLuͤk?O@F|gn?g6hb4/}Ϋ%=KSh4C;y y4瘝kS~s/wt}~K!͐t ie _l*̟QV|i]SxӒO<2<,++uHre?ݛϟ?/=6 p i}8#^F!!ߜ~9s[ _FA_oī_ʑ,`<if@R7=)SrmPk}!PuP6:TY#c}|2&~ k^k4FxG p V %ӡ䊃(˃Q7/iFx3/yuu/--4ymo3h4F@#}HaQ#~*J;F@#h4hN3h4F@# jh4F@#А[C;7F@#h4D@ l1h4F@#[A=F@#h4Z`πF@#h4 i4F@#~4F@#4pofO#h43h4F#~4{F@#hF@#h4 -5h4F@#6 h4F@#hh ͞F@#h4@#wBj#nQͶX,x\9b8<( q9q4 ߂nEuU5<+pVWx9;VaҡBE;S^h.f;*M}۶b!;!Vy3`μEXd1,^?rII U `Ƽ>Uiٜ  :I#8zI ;5;- ޅ#7pW"uJpAX۸ևPf%_]oB]Lzķ q= 7QcW4|-Zތ. \xKJچc-|[J[3=;EN*}yly):( C,Wsr,kDhǯs?qp=_d`dǂ*@~4o7{Y.@^15B!:z)D1 쏓@exQ]Am"i_VHXڰtxOPAhXlq2./GxO<_͡7w>yw,tn0ҋk?w߃3& 2#nnQ6AFOE@w?n]ٱ@@&˝k`]c8Ԗ(//CyY"[2]ޏ N}W jJ13?aHX1<g}6z~%\mnܿqӍ!UzW_͗~߯__|c8A;r"Y]xgݸƻqsI틻ޜ-v /= %O,4||, 9Uvn[[qiSg,4a1wJrC:Xo Ĭ qЫpaG^%}7aپ6 b|;m&J-?a)״5:%Y1y\x^Qf|= N:+()RjǏoŠhPw|-K~>~#PflĽG Uj wK.=ݍpځ31n!aطz|^A0ifnm_T看 Vyn nWqs$8 N-Wma)H0m,l\3ypEM@VYhƢ5o.y1N#8'h_c70dϞ[4=Z|Χ}G IB[9߱p92u2~l v\yk>8'3~o>.蕊w݂D=>}Ϲ:6*@|GzF >~lJ+k ;hrbƌOs?pqTYlή@Φxpw೩03+a||~GqUH|O&~Ŕ7?@6Yk9͛4r-,#_@R:|0lH/ՖXt>Q6d. ϠcLjGǓq=PEZAt>CZa5 ,3;IyݩFRҜm9nv&LĸA٘ oǧ_ĈGFcCfNz0J=r^ZXa%7s^BLbڝ}+?CEzuXŃQ/2  Wjz˽s^sR?]&o{=xwlf">=f&|Y,6Tn{o&|M>vf-!߲ asj'8*7!3/ŷegf#ߜcA5Gn>r:q nJ-@ė_ϱ-@-BmfmؙQHrTmn5="sҁ+x]ι)MЪ} yB;]Rnn`\}6Xjjг{{l[u*>1nBpex8̂bi ѮiTƤ(zt~SQ"6ԊDG>F)],|VkMrHlZvŴ!(\0* p5wm8N޻aqɰ`076Ӯ_`Lw'hA08rڢv練_UCNٻؘUbE$6o-,.qbpq{ȅxE݄_ )s}GWơ2ƒUhCEY0p&}:h':둝_CM?zR nl~ܡ0伳⣯p)dګqO-\w |h ~p|lbvR=N臗ߞYT4`Aaxp<S)Mۀj:8+ŪD>,;K۵}=fTmS9VCA;F 5lG׿4UCXDf͔VU( ;Қ !6](0> griƆexG%P+ZrkѴc(\7^}&̣!] NNhKQ(+- rVO؝]-$O=MmCrB{Arlڴ1¹Mù `dM+.9\ܡXɕ(8iإ7ƌzY?}Kƙ}O%B9%hXxtؘ|cjUhwďBQ.mBpI\r4iΎpdʃ2Sf\P|PܠP!vt&bh"L/ϵ%y%aꔩؽu^W^q-T)F!( w_>.<vU(A8?v2[Ȥnx-%Nbd8!v nVFT#D(\܌8S+ io]pѿ{?^ n*9mpb{'-cd>>w>| w`ZYO(̓]Dy5-ZXJ4ڙ1m]RRkҤBO> b|2m64P!B0I( "P.E[(dma}YA>w `qd VɮA9#88Ma=SFN;3Ѿ2dD3bW K駄|j.oq߰`ҫxvՄ?1ȯn݄̐Vֻ:}B 1 !xMRfji\/BdrSDpl͸GGH9xf]+BBq5`Â"4t3gy7ظr8xWwu[HhB<7-IA˲7'Ǫ9`v4,6tH.iHDWebW~1Bl$,E>um\v⦻e0}҇8Sc<},\DXƎ mZWbՌ[)rK< ?+,؂"ihWB(t68DVAyj+ m.j:1}Cx|xdӖ_}2ώ}]S"("aupVӱ/8!y|v|2w)Zw:]|RCPR 8ac`N0ejƄwpF 4~@nE?$ƛ*.vE^` S;ngLj\8xh=xϋUT'SuF>]4mJ)UUPs0ǸmXrhSzQh܇~ݡܻ3 Q]$7!}I۪}/D+f5u:νj$5 Ahƨc8"qV^*fM7?Q{TV7/~ϼ6-QN xܳuҹ$ь/ ύz۲PO= CcPxwխQfCs[g({8lvbg(-[mF|$\s5XՍ{ Nk.E9-?e œ9s"S}{o*=dubBxhlKDع/܀1O[U8:NkNj#s~yHTpup[MAeq1 hUh9b ͜FsyhI{truMz ,rr= ѮՀTR&t ,%iN Yd=?ϝGoLA9q]y}. c*>_Y_7;>~5\mUɸ<%T >d'":`{!*ȅB8Nd{YFWm7e>\'mt.D'!}:775{(뱪Ɏsx9hE[$q{Oܡp#pw&[=zn}kinHCgdw&.)ʿF<8﹎BSH#しT2O~'JDMb;0o/$Q'g^| jȿI`#8˜f68,v &&z?qMxp՗,uX#8Xrsss9h&ڵ4~;/IFe,QѸ{?ctVMⴓ!l8@q^^ ;w;T7F8Ǘcw f`.#v<#Yܹ}+($uꄸpٖkL.'"n]ddOD;[5Aa>dP6N\Bܲ=ڥ"<ɘZh[㣰wT"Se6!~)6SÓܶ=b"x)~YBS}nD(sD$nٶ}tj3늰(VԪ V,_Hib6Ѿ}*m˱avlQao/Kۆ@ނѹ}2\UmhӾ-B فr mZ& $b_=(u}j+.E[P܅ڱk*pll]m@~v-#]5[vI=L)-ɋ7M;)Z* :I~ju\JAmn ݺw2E9خo%X.u9g%n`"xdށj;mQ'yųcovHIzN eE#:1l55c؎-hx H`NBv8B4%9Whdsi@IDAT)+4'>%6NRqP"']cW-G"K5HK4,r諟],].eMb!šy#z'R/MJ)ZjC6j{ds{D&36s!ȣ2+1/30 >&)ʷO UheCCk'P¢q'N|u0K']UyLzG%#؜@[C<}4yqgIppȢ}F#c|b؄߿1%&&%\F%?F@##SX~Æ hf͚$,챱RS%5$a!yRtK5F@v|N l4!=cW7c]F@#h̅Z` =&) Œ%K՚۶mÐ!CZ,Of\*dgg!##C-fgߧG EIw,/AvN~ݒ<>?/+Fw _(,,<`IYgQON;}0666h!܅ȃh"##fD0;5Wۺu_!mǎMV5v~H-H2|u-ã_F~s+_~_,w ۳q:%> sه7~^ޛ5xz~u`7 X,̞)y|TW ծ@%E:KٯOy)wlύFzw2qT!#3ߖeYYWz3z%ġi"ZK^4o\4}!6|6m8bi[V` (x+:$FEPS]Ɠ=;vVXT%t@ME6s^?c80>Ndn_]i MvƏbkZ7D&NMEGcn䖭{'Dj蹘|kc2[J0wUV} ~Ճ1zݪ͂Z oO?Ob px郹vXQ Ý?!Sg<ؑF߉Kl Kƨg_C'kdxj4No_P @PIF)^z_}bpzoºk"pqIx۱6=or.֬"QK~yto>M9N6 øt@:| M+vk^7/s$[~'_zEP8htocMBNa 7^y_M9׵CN| H9xgXs.i/azCп3=oZlޔ1s]#f) 7QlV'ᢐ*F`S G6h ZD\b =mVsL#jc߾rC_~uV;0e%װ֮.P-Z8Ͻ1QMS$ĉ^xNo:,L8˝nL0Q/gE);y9;š~Y[nQ+.@yƽlSdN}~Gvūc`X߮f\TPw="\>o8C(xEZT=D$C܋^e{b\9 Uwϖ=,رv_SG}7 ^} ݂^__W^zA8{M}ٙؔYZ˄i̞1ϩM1u\He6_s^ ʶ-@xBƒ鳱5ǂoq"0A+|_Oͷށ {sbٱޜ ZömBmScI`TU=p/"&mŒMk#8`RӶvhۣj_+g&^=!O݁Vn^z4*^p=6lk(-x{С]t0 _ّq)3RBg]ZTWV0xO[wNMBpVxu`5DàΩ-$^u;w耄\6?V(x/nJGN!X866ԔŲСY{:{"b?=wºa̙pYȸTވ4OkkKI -]*B7{jrsb9Hs F^w??c~W+5@g]t.;4vv\pOu">|>&.z9q- >Bm,<8~_pQѩŸSѭm^|}UtRκ!Ah`5S-cTqt<8w_rbG,ٵѻBF&~-v &l&\[/: E6矁5]csGi؜C|ǂPJ]DP|m*PZRD\.qyZ\y烘w55v"s(LK& (- :A^ŮO\hInJEڶq[qB1ht{Y> k:|hJ#^7XA]Nz\Wk Geu _Q u?uc^ zz سe=NL=݊sZMy74 un p-: atFs#YFf6X};۳Ջfrh,?jWҩx)sPCqj+qxΛe ClIk (k [E ֐x9cc"`tj\X{0t 8lj]Μ%pțǃnν e$\} ˰ys}xGp/b` CLFm02Tzm<}˸R MKl]5{JccKq?ŸOCBjTD[&CXDb}@AիSE%T`4_XfiqnMcz˨*(E3.Dڼ:"c-}#A`Ћ0aH߆nĠ{ \h _YkwQwFm7u9*\'sӥmr (4}rʙTyѪq%yHiф+pG{lݗ<{ }-dscDyy9{#Fތ;}}Y9zpD=;dL5"ڵ ]tQGk*sWj%Deظe'=6$}6GXT&lpZQAyhvdcWn:BQ^ܓ[DRahѶ-j?’Le 6[O'ɞm둑WбMKw`EQ.ʜ3T|hvfL#mhֽ -;tBE^:戍Q*Dr$|r2PDBl"o7vffR;R{yʧj||N"NEmT1@Pw#E+d9kF i2:MfR۞;s;m/l&=Uزa=rѬEkNYO(ܴ=pF!>v Xr^c"юsaF;#&́|V-b7 \I|I.܋M; ]:򏯿=ۑB{.Ӡ8vv?ᆋcy9&9;D`+((hBȱpkCsEHiܸ9iq$ W`"X[vAbL?MK#82dSw㷴ƪ=Za:hYm<~qqnFxD@-ޝXh=Z`:())AUnF@#hʎJEZ`;*j y;6xF@#h,dp'j ꚦF" i4FDh=ZgE]_Y9{ QXk|0sjie]sdK^3h4-Ku)fnpr8? P,&.ۡ#N7{ce%&aQu=>Z[h.zIYB@>ifD&{u~34rYlsj%=14hG/]mkxg*:wݡS܊[q+(^ܡݳs7KЖ;ϳw;r3GsO |q#DжMk4k]C$2ɰeT,~QnX,yI:_Dx~z0x= cw^Bb?? X1`P߱"LiQ;~*$~RUeMx44 Ѡ`|ӥO]jimG}Z(fоmsvBԅ6f(s3x,n>.(uha<=NDzJݰe $÷5KD(^\|JF昿~ӳwyOKOUiQ 03Z%AVFuGmYIz jƈ1ob`痿Ĵ%cO$&U#}t^ Ƙ2w1֬^VUpl:zRG{$֮lu tӾ5[?&2N3*G Q:ԭ/1|0ɾ|COE jUt%ӰdɿL LMhDsӛEjS X վ1Ŕݰz7ڦ%Bo[!ZgAnoǥ'yZ~q^/|Z5nn7 \8?_Dx&l xt"hO>JDLs32=~ju!J4Mc0z@;Xr<{ƅG~2 RpyE%ohgceWr&FAGO*ybi]5y_=C -ZwDbd%a}.(icBߠ~ew\⸶rrG+xg1"GAXt̬ѬUpUyO\$ʼ?K]mP4?~ ZRnt?4uJu%3'HݼbRφy |pw:_F(_"~>u |'o 9vH)aq2CV[KCiWWFХIE9ugϥCBaJ .2RNP-.60(-PSlTci[ ٚJN)%9E2B*$(DbaOJ9,M!Lg#Q U7Lx 鱑~S)\ǭ O9eMhw'<\s 7 5ZBlV3a#-Eԩfxy=BHbUX94.UyH;oBѢ%h7DZ1D- ѩD,G]Ϛ l `vۨAS\Aj b;vTz "9ؘTd(BfZRS#͍.74Ds=T&_Bd+Ə 9^ݷisOaZ+I{F3 FTc :jHWO!t`<*seg]1i tO20ș{2RQ \5D8XWz071Ȼ%[бwo> ~$$A[fJuVL EˢlbeEvU0l-݀ÇݏOsI0k{.~7<<ӧO!5|0BE?}:tޭ[׫Γ 6 f@D777,`#=m%i¼4rͫ ]AIBwgRL]vDQ{+/_ og~dpg%dvՒX$|o^_*76.[Q&5cߘ6=\ˣRYwztN :G.aA\qdq^9t ߶EHG!ULY ,e] TFn8 GPX)@ x,un\DяmLG&:%l?@B2ntyG"%>~:e`cBXM-]3uinxx~K)FV:[С"@BN~ pt*'0k:CgCX6 =vB]C !`cH! x jV{ (e~cJ قgA3;e¾V21z aιH)&U MTڷ{1[Or8+ 'Tܾ7XxҖ+(NBr4C'>;R6 =5Dx 6()Ah[&]BŊ\zYRK WjrTy_zÆ˲ϝ|R?<݇k- VMI$Y5Rw=$֍Sh{*lM.LwBC06,Kv^ĔyKQ\Tڥ+_/B1HzfXɣ0c3Hyv #.GN0y̷w&DSesq /KR6QGE 0˶Ä>,\Y3Ly|?k6䋧᦯%/Lc?h,h`߲Ya Vĕc;0m$JڜtLk$h7x6kv-_ۣYEjU Ÿ|e?6|U)wO&TyFqXL) %1S1hF*9fepr>҉kvcȁ҆Mx9tcTw˷u,8kx!C饐˧&'رdža8ۮOOFfgI{-k,\ ^Mo, ҍK0~-cCjz>m6Ɛl(IâAMISSwڜ%dX!?wuhrR/})+VL»½ѨVy42Pcw!f4*Wq~1id|7aʕ(1[QSYC(ѥ$T#G։=<Qݜgl!MT'0u faإR$,Moi &3oSk'#.*jvƸc{6J՛6|cHZwZ05 j_҃`=84~.?~k=LLm7Xf!39f5ѩY5} j,Z#BcL ˶̈́!Ghl*bTv13/2Cd` zX=tsUF͹H;ĩ{rei:4=qTЮթTZ9+2jTe#B=a$d7(k8hWc@[Id UeI87yI\-kG_J&ӝU1u'ߠOI;KhRWv/E> uŀ׼?PܸvΦaĶ0s\v6  s:vu$IaíJ~Ɛ:RO{ 85@elLW~nOUH50 VuAreV7ZMP5)ZP։2!^@!cMDԯjUE*տCnJV\):Y53qO.1eoUm+,+ AHjMf9SRvJ^x88,m ]R%ali wҵ̴$Gޑ|e/aonzKHM.ZP#0 v\<~΅ZB"N9"Ci[+c!SRڇB:8^k\}zX>ǚP>uKX *EgW'BbʤR_ QHTM6\%죶 Z*¬T}470tJRibnKK{ R8r. ;ˏRi >7/_IuY:S^QBLU+z(߼^#3 ;T4G0T"]9)E{CeC.Wb?'.0)j7NqfdPo [K3Eay62Gbnx#ݒ6g "EI5,()-^CM%˖&9I|ń⵷/n-&9g/ є|fF~ q0rD.N o 1MRGo DWM4SF'W2bϗ;8;;?3\d R{sB(*M3lEe X#v_SLؾР0rrg,K(pv.V'PLjS0AZc_ɝԨ6rdaC1'MΉ,mayiYmA݊ͫkR*Z3̘ϭh'4u:AS[g姠$) ɴAZy`uyv0 wt#WPk([ǢKQ2\EyuFe+W}?Sof[UɟȖ(ateGV[7{( 3j^LMKy'|v$B%*l6Qyƻ OIa,x4>_FZt"bQ jң?MM!sA׈K6wAaT%S軓APDMѣ[:_zSCMJN&dђ9pUZ%lןug UNTT_rh}ѠHV?K>vKo?U* (派S0 aش} .>M}{VuJ_uREQ;;A4jIZTv=Q(ké) ޒ xщLh.PJ Ү<G̝= BquzZN8M#]Ԣ 55WMO9$\?^#J)8=QzhLʹNjWhVhղ_ {/")%,h4a* !ȐJ~UPQ@EY #i1iib4{N[кNPm{T#&bŊ۷ T$'++++)*uXNX MBZ%"i)[:R'Š'$wʥ2ڔQ=tR2M]Zdxt qRŪ3 A~6s~2N+ +--3%^_'6( iN n$gi q~H2ᦖ9^pK#ɻdFcϖB?bWDY~~0u'T|_N7UTPQ@E:aDwTM0n"TN|◈8F1@ IX\@x]F ׆/$nYdw 0O.X >^Q(Ob@ e T$ᄠ)EU Fi8/I#8/o뼗3OJ7[$KvlΕț`ؔ5Oz槈꿊* (_a*܏D,n$.@ .M05ʕD5>,\"lG%%zN}Sn{rJ?^BIl[?;|o9ߺlZh6vÏ{<]p=86F#p^yKq_}4X ڎ#IVh3xKVUNJTYXqTsVo^/O_⏊aTUM5`'s0 [렪m9$2DZI@ܗWoޢt?~\z䗧 ^R;di | u Ģ h\<~?*藤 Kr: M| J1>XWG>rvku?+V/GunmPg8UD֥Zhs}tRt1 4Vhڪ 3$8Zb}6{qveg~ŕ"Jk7EnHOQ+.9 ҍФ$z(vx=v(l^ S<ϠZ -,&&'Oo PqsT,@ 3 EͫxFm]mZ5D_>a3pP0E|yf.{ģW0G߲m NDڝxx +$1{`젎b0)5#?=p7R0{AXLjV)3PNTc?yPqѹʍ1 C`1Uv!~a CNE@,U|haazvfܗQQ7L4|((~ !$R7;LB)T jLZtkcxWg"*wJMԨ fMLďfArw|߾diy@2.(X5kE` vDJE=EE>!= حF1ym2P ^>nc עQF<Ċ i D":Z6/q ѹtP؏h^!ݟpRC ):NG8s`#7koci}KWqlbXҡPTnCŽdD'1BQ0|٥Z / |^cnU.V7n<ѵ ?>z]my^6krxDpP0/$հB~_/URQ@EOPcHU-p)eQA&'I gW#ö*KJ|Ad=kM=]FPT p!ǯ'v`뺭W(oO\DjA^q"-)5CqCTl CJVrH1JCa , fsg!"4 F6N(/OcU ! hP } $#nЀ T_>ިhĬ+Cmwx>'r?\GY2k"8[Ia^R|bMPbEI wz2B<֠ ̫N0if D} f֤dQ VaH"Q=v 2Bnѡ) Ky4#fW̚Hyt@ICT.馸df 4Q=ɹw(J%]sKG:!r" %#+ *2;"o _1o.M`%jjSmØeᲫT%I n̟&/nJUHbaO܂TN]dL#y ;Dj /F3@^a_~3&qUӗfxS&3w|% l\k54Mн~5,}wH4y%隚%vltFisĬ .kw`,x" 3\bކGI.&|97W]B`ȌdB)t">hXh"$WʷToKJ\zv$Cl{.?y/;._skIx'|iPMz%ʕ#=5A#7N0z?fԦ8WuMs:΅m~N@0nZWh%c-R?/$SO!F\9tpIMVDW`._-ȫL"' W슴-]1qkeJCb#aRDr5RH.t;UDx#QEn[?J0ƞu`B)8$  m Ɉ@!-A "m!Wd<鑙IF1 C)ASEвNlIYU_ $# " 9z$e陈'/&2R>8{'QB=ڿe%!"\K)7|x=#(źw1:ۄc)pcߓu8psKNf k/~!݋f;(CXm/O|(1lj]p' i\x)ݺgr'k'waOT6_!FLaw0o>X8: p]y(]oDZkCخV_Hu֯[q=RD iȈH$ѴH0Fﺠ|1GGiQ qУ [xx8bh&R'֬^1uXw87 ߼\X!TU~JMP* Fa!_3.ڰB5>޸6 "u`c+3V*U(Q@ aoÑq61X5<̂TK-R_A ƺe)7""q'}b ib.C`ɴ q~ ĸ~]aC Cݹ-Fa MDQ9"NRaT fR=8dr@bF AĜi]$A[t)QR܍B|C$ BewhF)̭`! UprL4 ?_7"5*EA,(VJă@ۧLj<~OĞ3 %1g`2shTwDŕmm0=TK gc^ݓ1s֕ǻY/ .6RNAOXԡoas7;W/Cˋ=~}HR5%ݵ1u8 mZ%# .U1yy.S=0u*[1Pi!<4%jwiF n">:%ĸ>cmM7x*W< C#8 MKܨ}}FNAV= ZG'гi]XX!~szGhh굋w8.SZԖw\9}?z{Nl6mF-%=ڎ51mWL~s[;lԴ&|.ihB0ʤ5wnSOGfl5R㏞q@m+g-Pю/KTKշ?`ER$Ղd9M?9D}Ҟ_el0& ysʹ# +K7 NV01'Og7tMd'u]7?K1'"ҵLa {;us lL7>{$L74ԡ'!TwqA(xDJl-z ײ`^XOI E77x#^8`FRdb┑И$ZK$dY)]!{.$0H9졛b D@H8 n As-ס{\;vBШxXnzN QHR',DS$G!G|ၾ 9)QU,3 !l%ɥJ O@ Py;ICumXn@|D <_AU(W Wv1w_!% 8qJV%Sc#̥;8n"!3|#m#3,o/OhX9h!R (FQ(hoCmRaHIDHX윜qzԦT"&󢭥 Rby07҂+/ \Rar'9Ǡ_P(,l`ImF$/ 1T9D:3RPJ<5/b6M#$4KYsOEY|hkIYTW$⣌3Fi0Jd^֣-L$IA؈ܢ<# 8tZ85|)MJh+"Hs~x;sWʞT* (OCxw}¦-WeDTAXetm48׭ï_a+xKPj8c1dB顦`дt Qf j€[LO'FKFni$ݨи*+6? 1%FТ"9q\6 MThR^r^_RͭQas?߭Kݣp7~J̇OҎ8B ?%e1̽ZZj* p.򪲳+V7Ŏ7'ځ^@A=5Tl:?coI#$Y[h͙8ql8W%n^9:m&$AR+5m.E;@-]S.#JŬݰƉzt'(ލ/%>Lmw*i"H:}qw`:hki!15ٍ@E)M:a y2hSN\_0{ƫ ת S$̛0 G-G*9`$ʓ!ڪ8%1VoG+ `[]SбuG;XшUr)d$AEH\vINCBBcTzg{GETPQ@E_E_H7PݻxCFg7_.֯v}"(FG[8uIӧ-:KfW* (* | RYR5 KLZhh(^cdGiW%+m&k"=: I)Nb̼5|,\<#<`|(Ģ%sゅW%vyǂ ELb2CjH bilr4:WwG$8k>i/Ж~{Cc1oGXX:QL2Re4`~4(m rL0t 'Jo'O!Lf}`NJ{p?-~\Fx St">OCY:%KWq@Dc x~OT=@ETPQ@EaKI!z=WS9(, 2ldZB^BYKbKur3Pc&aj@2_x9D%S@l\fA"YR1b?Ѧmy)Dh6fL453fY,'?=Sm]Pٚ{#;hNQmD^FTkh9z}I{s.!):_dF#&:"'=kjOTHfjDh CjiXv􊐘@A@$3%C.'Mn D~ #79!5ZLmŦe"V0ә(<ɉϳeęCja/HHFq%܋1M+G_迍]"zS#|)d\8Irjk W X>R96]zI~, B-zM/IuPOrZxnY~F cF~nڲ6؆cܠWwN` pii߱3{ÖRQIih0ze~w>~҆D [B'2@. A*]q#Qq L+N`"[?}HJtQԕR 2uy"]™4V$S"朢U<ײ(!4q(EMpXl>~Tcxb¹x%RY FXE.lJ\gʝi9X:)QcPEc 1 {j&ya&+tqxLOsFoBhM5'~ĝ9g-Q*KKBKϚ/oTLG2;#~[NѱC#{3zȍ!t=JDs3skvkT˘j́ _ 8o3͙|AߡN#o0+_z>^zΡE4[{ŴM>}r)'/:8Uu"o:'\"Dn7x"ፌ7|蚚Ò M)tIOC:vhצ d{q'oE*d33ʛhq8-] U K,8s4𫲅;.?bff&]+CH]JO=2+8 GSrT饠L a.N1hӰ lⷻѡYJy/ C6d3PJyݽ½tzR%hFRaU-\ڨVh~Ԅ䴠ch2pt\h^KD#?tYape`񊅘8|?IR;?ڿy 2,+c0ϑ:pRB߸䎍3o_•[T7!xd .ƵٓJMhӬ!tѭˈA(4[6NhݶɈ/Bƀ'ܦZ3<۸p.b.ڡ ZjL)|S#65ee*J3p6$%=c!xP3L9utI#%VL 8۽PҳCRs {?p%Wn`vJ{4'LIWP; ݅G/gwU卧=Vlah3ǎ}^xKLnIZZrNTuϯc @ժh,aL&I?/n>~ ]sJX=KO{}bMza/Ӿpi9{?bbOv75& }"<%ŽNPMO([.N]$[Qed*ZH[vpCqSާ+°65@R\Sy gg;WFم3Vbl:?icL)qwpX/cQ*fG ($W5q׹z5mbfd:gny# CK "+Rzw\=¢b'$ /|1^;^=âH)4]~;6pmgc,S=Fv?=Jso>A%mͲ9x#l:"YzHEp6OlDճMբu)@0ݛRN , v@fuz,6lىb1Kl&xqW:c}f-%cc'Xbj&$2o\"~"}zZK8g+ Fa~^ا>_PH+E0lxx:ytr]'U _a6X 5 uړ?湈x^ԝK$ep,YV~aV6GY5tQԠx>J„E9J̑C!_0f}H6ʄ $ #uPD~ZٰBS2A5, SRۢԊIgm~dqM َ@ځTqyA0m25-.WI.Y8e,>Dj/# XI u}C:]n=/=!%h>`"ꖱ2WR"vf6|r@Ě"x$2ۏq/U0՜lgXagW1k~ĥQ{:Jho\uaqc&h_€LIږrԠ%0gU##..U+c CIm__m6w)d0E#4O,-J )2I#:wC]tl\rQ9؃#`\xEju@Έ)*z:L 3hkNf/uh$u~o°˙#!# R~ g@E8H^[V0=/7 I?nq9uy930sjKI  `j#]>sS~)Sƍ_i ٛEۥtlB%–[P x* fH̚ή|mԤW~H#-\<ɬ е-Uk7HC_n.9KISQYOR\8ӓ|b)_ɲ2fD?i[^0kR1L2^!@]9!{N/(1@ PFvA~n 'M\ʹĬfp4(+1Opd YJ̚([l-7WW ,⇲kH̚eۍDzXajkn īWgТo\JEF6B"rQԗMvT(VQ")bd)av&.B5$fM,UkV<N DLݚWx?ҀdU;sFA#͋D-2kumѥNUx O$I=hI8׉{C$fMp(N}nGf'qd+2w5QK}-RDBBI{e:aTD?g#(|1vkS[ 2Օ I@b/`g6B!S|kYGfeZMp')LT0yvc.%F`O}b\ߌa7 0]c@caG JYcȾP$SuK۽@N NCt9lW8! lǧJyiogLcPdُ߲ V r̞3*c5u0_;fhPJLMEB C {m֝(iX ]=~u!?Kܾ1PAb$LJq%O_q04MHOST8p497(E5LIBP|&*jq -{ph9҄B /D8<5)NXӮ? b$O7/Yx$؟X&`0؀KzUhZ;ښZ.OG6VB˜J d&GKިda𱰠]!z%2˟T<[1 \N_X?ddj.iAӔ@(trgh<45ԡMt&:a`2K=bc)ź6Б_Us>: ߄Is\m,?sI\ xHbSƭuc+1# ;:4J&. =.NG=ɰɻlWQ ĭ,#fK6cDA_ȩ}^Xyf@^q/>eg+qJs*tD0lDsjUr'%y.,@>ƀ0Q]HPR0p `5J%$4|TQ鋠@yMgS}{tFQ cTTnęG U#[\)uBjQdݾ'QBm7~"DȑlҾFm 2z0knۣFIS\> R5No_WSF{ nB ;Ѽ}6:t1i66Ƒc-׶1ĺsXpTˢ%QIff쉝ě68w5UP|qT\^Oa ̜`ߥ7Y}KFLסi7wE2xt ,Pd1<\苔ӱm8~!7 UuN܇cݒ9_euf6 -[W@rpQ"ke0d`PAC8} tqtrL d̓~X4l*锘QBn?zBQD53&7!4 Iv2lT bVRVEUL]qI)x}v^]PTEJSGVbd̟X>ԬKwN(d&+ ެAuݼ'hʂvʜNPH'.Dq 0ip#c2#1kL if>5+JҵXxbО/ ѿ}#D"yuE|X Q, ]WA0%-Lr&Rڭ7=qYq&@ːGSH_b@H֪dt4KL3R ¢d̪A/e"žw-̴j$kDci1DL[on0UC+:WT-d%V56OKWt2ZE &[rIejҘNst1mF ޘ MNG-n09ݗ"ͻcqD \WB)%QBf}bxRQ3]1a^kU`_]'15q1 OdS J9.Y*071"TQZ24 ;3N"nKC9F.ܽ-L5Ÿ: &K4J!G\BG-{HǮUVmDuڬC:a, hJζhJGc1G[⃸Ѵ칽%t{5auE߷0mh/,6bL^Ff(dk!ʤ/4<&fTmS̖K)?]T`]((2 7/ߢ1P#K+zD[2f4޷+8r'ё"Lёրܮ+&2J?ҏǰ%EfM<=/N0(;=➇\83>8׏}٧22<$Yؙl(0$1̠EfR$~z'd'L+[LSqbܚ4=S(%2)AR1sm=C?҅S36sI(lAqt{_ІSI'~:M?aeBE-Lc$"p"ZgT F.vyƖ(luCͻ7Ǹp bwh ו|bȠ_^JCѴ:c0 A oF:)}M(ʆf3V`p IcIZdH3,0+!]LG/`ч4 (Xb k<tI3˪}&*Wf芮PpsPk(T1h2M>S1۲c+n޽IFU |Up) $lyR})A 0P |K |+ [.o 0P `(@*  0P `(?w0 '_-Sp(@ 0g1PY=qU.g>AFNxн2tEV(@ 0P | fTUո)2P]ďzDN"M˫d0r6|VDFCS8QX.{D?UUz&$ǬHZ}L,#2 STń-}1aB*05A9|]?qr]GIW[cEGE#3`:@Hǚɿ!B)(:"Vg4Y\_c-FE>_"J=<**Bye J-67:)N?Dag +˔1mr>- s~\co ,Km{VYV O#d8(k@9Dش~+Sѩ`)aH6ƀ>=a 9,?%o{N['`? J Gj>SbuǀιgIE e l!8Ak>Y_9>&+)>˜x\0&Pta3lS3#21zEۯ]p beiMڽA/.cXy5>]۱;%`i,b 2 9An7E;}t#5z >2Gtp$ѐ֪haƝ(mO9pI>?ſF>_Grz]1sϗDðI`&Nx;Vb lr}u.Hi'0{ }sI8m*|=CiAv2z[Y8Gh~ 6.~VSp*xϝV? DYB+ej^&pJ,1OyB&6)NSaژg)^8k;e)r;$2W9'ddzSbvRj0R~e ٮ"tsϊ;4xVHT4, * 6he$HqFf]`"1lB(mgkxKǛW:'r"IJ@M]P t4Bd{ m "!p+ON;yR鞐~Or@SLJ5!qҖX{;)# 5/df$/BE[cԐ~;~ l-[7/$__ACaU%tOvO~p] s,_Lb'?KG@'eܾİ߹)vm?aT0`D$gm)sPTY<}v%%U/p|@*{=FBXcO'YJMA~>fʶGڍؼ`&nPK09P4%ғw Xvw>(ά,Ғ-Ʈ@E /nÜànjZ[~0b+Y њ3MF^B2oQ%MF#6:i)4{/2agt0JR\U6ńaR3 K =-qq GbHccQd'1Pԁ% 1 }9CDJ% uJLR&s6y#?T@(K F7"uM3K^C&mL E@L Aaq04B`\c[tmS A8r,#by?SDGmw$ݰUiU.N]8#3+lK-RCgJ9W qu`eV\UjCK6m9XٗEǎMGTw]౟ woۗHS1B-<9j2зeİw Ax SktЁ)4k8w8XڢU62}CyĆcxt<2U TQ kT@zBΟcS-ܿJ(S>sVt/n]A(iO|'^z-۶;>)OSٟbmPޏS!:ThJCPhf×P3A=P\CwS[F]U&o^,N> 3~rn֘>3|<3;%ܞ\ƞ%~ꥐF^{v5CνP>O_^8EKs/̈́Ɨk&؄!IS mXPRwI2l#%d%a0beGHe25҄hڸTU8{^C_o]0kwue8.)/aKL ʪtukIh_j6j:x28bתQpGЦϠ4/~ڸ"~c֨Y.~huApd> V# ųƍxua {zu+cט;g8'<^߿ܠEEu1yf2K A"۱\jBƍѴe;<D0rێ3*o ;uL{2~("C( ..uplڋmC-Qv-ԯ_5O*8luG:H4m`4?u!#HsnSG**cXJdC/g4LA憖m;@O)q}u]ƱIIXťbtnX2RRqV#SYv\{<䷎,C=2 ^O1|@/憬0i7`ܥG ,.wHHg>dJp}qR(qܿq ,&u=c_LA<! U 4Z;OO'#$ŐݱSDlY> 㦬ӥ !a CFB8VL>\gs]̥ː+!jw`Vm=*!OElJ<, ׍{ޏϡ CJRxoZWzt2S0g"v|GIyyq%c8 6n ~bفkRuNn}~u۶5cfdd:{D\}²?+zZd8 z}z֯-a̕HB4#=uRpb+v"iR|CzMxxP3yѐ[f`.9a1BSq{RfM]G^~Tj1/]5˜*<>J:mܸ_GvAf-̃v'g#ZN^^;Mا5I :-Pp|oxyiOxԩoW f͓\)OI(@lr"dRFe LǣkUQ Vw[ Le.\gBusЇR;Az DQx~tfضb._5/f,WR2#,a型Xկzlu{AצD:|}T`Aq+՛w߂5sF~n]!!񣑷CEvǻweփb} Djj&7]aVbU.ُKN""\Wp3V@Rj}BB|Mq^#~/ŤqQDނC۶FN x9qx|h}/BY2 #\T1b$ꢍ]b4LMi`LJmDP}W"ZwSOja](m:oBSPXHؔ3qzl^}.M% Ӱ`L+˨زgjڃDXW72d,?y_*W8Dz)y6Lsu ;ɰhhD=(0L*^ݺGd*4Ŏ-PTI M]Ӈbh$bcV1+b^yJ \&/A:Be4Be{xG"up臶(aOfKt=GR5~wsKRٰzuke;s^xCT~#UyL7PGDb&Ұ^*tu"$F!;=4x*I̚Φl tǛH DP̚Xj=5^RV,Z"֤f 4>3eY00ɎB-(h˿ #_ fM}XZD}V>̆cM89H׵L`ȞXc h׭+X)OB%كBwu5}Py#tG'5~hY4Kɻ(D=EM%YR3`dfdf&}BEԌ oP9 mw!gPr[ĬM,(1hk*P>0&4K4,Jhޢ)ۄ^"$(P%c(X аA7h 023PĬˢRe;xAMEbB3%6I|d(XS*p}^H22Ae{P3DҎ8gin%fM+Y-]Ieaena ;2al)?$@ۺ̙kw8?m}ck WN*!?=%%zj2DG2  GF:SD?OKlm2He)RVCL%d2']@td8ikc]: $ùL m8%5Q (hۡC$A^!EԞ4{:*$WtMS< *:aD38anI,G^2*7rQwsN}ͅ;h46w7F su7J7s@S?,7\9q1TJ сn݇ĥ6RJHKJ@t31 l>[IVAe3}*3uuPݰ<ېȦC4⒵è{>cihG=<~[W7_߼j0LqC`2CfB0CC##-2J%)T\禮hdgE4x+荓K\8cAhS^OjBIUϓ[bK7 +W N.Ʈ9j;ap?y}O+5>J)(_ke@G/(96xoM8)@xu&^ӞsK: ?usq4 VCF6}`#F?eP* ap1u66tNz#TF%5œ \#p5mHH RCN;g*әFf0ڔdϡ1I(m/JÍjLK9o\Oc. /˜isܑNhTTjȜQR}#~XXtAA~:?~o5R*hۣuӐdX-j u3J/… /{bgo;WjFp>kUp~4N8 ^LCacY8H­C[?THIMc%ʴKԙuz_^ן{{twgcʌyx:~^g^b<=J%"yL.4+.&uGƙMU<ܽcJ5T qJD qՔGgd)AV0%^w<`W/Nd'y:4LK[FBW Y䊙ZOR~|e룶K:hh1XHFax m[)|yB5ؤ/b_0uE-QrM\:'H$nGvNҨR,\Ǝy(H¥Û1q+휸s婪z2y6 b`P3Gfp&tLl`e#VPcq^UF\;7# <=+{? ZA”Aqb#kڇ%IK *j?go䩋I6&EFj5~[_"v{];v[(R}{;db=vB/ܐOJʠiЖ%ǒsC"%D<ܐ@w[Ӻz+#xeɩid>u (z6:=Dr/wOe~UCݚPˈKOG9 r~2>. 9ӨQ+wU:^""2Pg/X4"*uc䤗!m_EdvhFU}agՈIb\oŢPqm=[KuL^*D<.@.(^J&c/+bt2V1T-32AkXjgnډpk8rbrJø$`_w,Kz2q4~ b-(pdh_x+.|~X[yn=gժB#TahؽLygNV=_o߽ō'lmoK-|9שWL 3+IY8(o i@b ]zO(=Hm9碸9U -KHS,7ٯN}A.:G0fv&CGŬ-jЪa]8PHЌz,9/=wOh:F<|!M=cz a'@# u7ĻX+$j7&? &.Evp`ƊKD*dFL] pl.a[M@1]xJL5*T, 0&:PA#u >P@p,_s,G㪎rb#O0>>>(P_ON-6u2p'!V$>Dm-}6,eQ]y ;+] ;!iohd m5~ Y< 45v/oIǐ&Vr"BIbOFQowd, WN adj/J'tZHͦ!'=##?39 "adaMB}g4t`kgN6k! c CMDQ=dm-]ck:!qn+1DFLgX(TdUTyq@026ѳF/ǐºap<'MȦ^dDL,7LF0h q! H3c rwlbN=Ro֓hz,B̪SmdJ>7R1}P@ߏ/O:۰u(D{pD&G`꾣0H#M6m ?ݬ2&h ]ubC=#s*՜~eL gӇf(V(QQʐQ*u{qA/z c=`GMGĄ78jgmJl^9/O7w0dmҸ>hO+gÂM"#3j߈XtZҼB>dzs1đhĦ+(uF8f t-BXY,$юͽD<2$X*M=le $BFs>ccc  (2HQ=J (.'\"?+oUx(Q*(s2!b|Kϊ?n9(Y7 i ,yQ]*ދ%[re[ݸ`0` H %BB@xHn6{mْ{sW#Gݙ;39s|Hv1cQ}+IB Ýɍf8zuot|}'N J$ZVfCaY,%Ockp s]OP,KI@K]nr]TQk Wȵrt䄷*e^{xmkSyCJɩB`tБܱeq-( .=k_V>}D@Or#=MW):u:W^yNX;z/**Jq祋=fMXޖ\-5?!,JL>+C,8zwkq͍7OϿ>7`}?. b@ !ks＀{5P# =y?O]2g}*m]?Vk_waM@c^|c#~jTmwbϚ<ư}J^ŃO^NgmD˟ wl|PPPxa{eEԧ~۷ ~b_3xCƍرc9z8H5ˉ6k[xz <@܌:Rke/⃍9]/yؖSM88N p7~?$룸qxC䯤C"|"_D`T;ّ_Xt͌0[|{;0 ESE-]gc#70>>޼e|b# f6|~+{0 qL|IOQY9,`hU^ 3]9e-Z1[ZZ'_ز!?&W-}1\yQG1tRQS|/& Kx*27yZ[o܃n*ݍ%i[.XT{+qy^v.;+\* (%vxEx 36sFܯ߅MKq΄Jl_АfK^W}H n/+g]x)fMg|r<80]1N{TdcxTAPx"N>OXv-?6v)J󫐖kH^ȳ>,c)#Z'_49[>W݀wxKůy ~X05D輾QH "g##py ~lؼ%\pXUn8GPaWp7oc/}hZxq NTڌ^W? <0!55஭ 6***|=>}/\u{*L;'O2T imS]zcEˋ͐i]hL3{%n櫸7#fb\z̞k#r5lOc㷐[o\ؿKz?v+8CNC$_%p S`ܿ {<%w>]׆pO4VEN5yQ셇)9g]]`Sy&Ƅ-CEK7E`?}7|ֲpWOo[Z"o!Kmxru b᷿/o`dB{# xg1&%_xe5MHgu+ZPI>F)iQA&oE= Ə<+Őg#A&sR(aN6T6fa2c4Tև[/= y_۫s N@ǕݿBu p!H@/Fז`AQgOϾ3V{WNes|l[n4D `m_.18xQGru?Ox 0+_傥`T[={~9l*㌹_i?rA\rh*SsP׀;<'q͕]Xa|8-*=;n%(ތo,^⎯֣3}_xyJ>g~ Sk;/N.,!!p/>!ɓ1m Ś쏏xw/6TMJH$_WQ/=oD1sDI Oŕg]p0MIʂ }18,ﰌ6.+ksOy%6nFqa"-`p! /ވ5ɛ9c>&I %W;^Ȯms[!N$k|SD,,ߜxs/<&p<^sl__`\,GTwzlnrKO_W?Ž/;ϜOoMi ;lj*[@IDAT߽O~Wd_"'{kY}v;zC49\&SzIH(Y|j`K@oȯ:=?Oqp/S 3o[>K `ew܇ X+P8j(oN LߒTqmmsoMh)I;5ذv#yw5cȯ}eOLC}=WF)BǥlQUQO _.r F#?xw^&ul(}F<&iPYo<'_Y :r2T8PT6 O)xIQx鯏!8\rL]t4,GX,?o}²z ?$weý'8hm;}tZgXz}ѵ݀$L;<0pz<?e.W[~ srR3s.`1ŧچ&[oƈỦ_?)_µ_Ɯxɳ7'qӟlo'~w3uUi2lě<= qEg!u\}ރW#{selHxU5!1s.Vq=+SGb s8헜@TDŝ?5̝h>Tay7{o2t|x1#~4Kno<1xgM 7m „Iøk͉e^6$xO~-?4,6]z:xf \xmvEGQ [4R9\{-L2#4;7G6s!RߣVQQyN@* ȟỸ9w EgO\q4qcc(DH~%CM`l{k+gͥމyr9PW!dDEU9Xl\pR"[#m)/@p"©ĝ+DzWcܯ`djG!L\"Pfmm-t#"2e̼7]k7T;ܵhjqfyi_ń (|z?-#_On+9i}8\T7CCX7ƆZ449O+Qme}P(#}hLVWR R~k3jX6cXTs2>}bXz<#a55 2/xKBVȎ˟B56+ n;J3 sL@?,f݀1[~*(9$%DžiU#ýזR:G٪ymiE#0Gx~i9){K1/^syK@kAEM=%'qͼN7{ġk+*sE0jT$srr0|pt3|҂F9ȍJv;DF`;]EuһPOQbؔ ,? ޲۳s9V/?)`ӰuJsoh,8lӊWC0b?i&`)/܏{l3G8o/Vn2Es*L9qZ|b37Pzd̛6K,Ð0yD +fZNPo6m=i:rik%'I-6td MJ{@ 6 7YɯN$_wy8Z_?(%'7?]>  q»"|pegf~|IP 3!h8Ī"$Zhj<2mI2G &L2QzpVo܃ʱlyӳPSxތ=y0ml7BqO(;ׯ@1}_p.R°g*Ť㍘?ޙزe휽`W9bnԔHLN|—qpp5kPRӿ:۵iv_MGClhin0=Ds(v94OT(n'ކ:mK (@ Xz-"O.]9bņw?3zɭ@W'T@mJh˘ wHSLB  ;pq@ *4kʋdzn_,ڵy=6jG,o\17ؾcm\1f.k03-+edR8nimcAHsgv;lRѧ[t]Y86HbH7ȹ:+q@w QfnU PJ@ | p@R_IZĚFOF厓8xB3ǭ8_l]\5|2IZ7>7Qa(:>W4г e,l'$oeL"4-&߱TbAVTTa.zUvt?}2YuSOE (%F8dU^oGPxC~~2[ӏ~no, LQWPXN W ͪ|ؑaJzMjoItCb?򹂍Zkså͈8Y}}7ҳ|}T` l`(H!9Mwu~ 5U{,ci%86P#~d|?pI{Ap{wCI1]:y<1y)m*$'*'•+?]s#cÎxA\va(LrNǃA>b0rvl@mqI(-+Ž|8j.MY*g9pv2KPW&8 #'|*D_"5DUVVE5} 31hbb"G'ur@^(%B`@zȑ M`MC Z]K>ЩIW8  Wsc}Ih0h!yoK%syh:3leNl)P[OP(lT([YՆ.c9ӃHٌOZ"C!N&~ c5‹E9 AB 3֣f$NYix_WE}s;;~|ds_bLo )))Zq9. b},,$fi:,:f; k~q(޵Q.S 'UΑ0^xʥm@b19ǓX5)w1+s/`P)zȕ|Ex,] ;ଉH((QUo6hZGuxݵx?9ppμUcʐasS+jC39$}(: :R+BeUBHx4 (>*h&,Pk,Hڷ^AqP\q_Z~\(1ӦM0+[^khoC$͚6 cS0ڱzv+s1:l)ⅳ}7wo+‹~z1m(?_xCO/9B˨h4qo34w;*7}HӢܼإl{]>䳈 K]!"7-kYuՔFVYU:~% VBDVjߣκ)P@a9Ӧc҄qu؁&ye:Z|ܽVCYY|C0vguc+l"8I%h#1BYQ)炶֎7\KeoQ&UÇF\ϸ=RP, vm8W oZ ]Xhرd0jɁ<.qⒸ)ƈ6*2#ro+b?fT680$- -ؽs7Z򮩭vA[p$҆ FKM)v9U]Gv;m!n&\k5xͥh#wGY‰)eD&F | U1vtd %obsض0!%:2W K>Z(.}Ҫ(UxOPRO75=8T\ݥwpI \rDiƄiqc 05+W,ź]:f j_("Ǝ5WBVb8Đ'8>a$lܼ"L;U%% !a:$մȅJ(C0T>5Vѯ_%iᣥy0c%"(CC@ދd$q1LdC2/QKj\Y0/Z$ƞeu*qv N 56|f(|C̐x bހZ_',l@MD N., EjB JKL2%wȲbG'"}P\%EŴRqB)51(ۦ9шm?qqhBE\%gZrq-JLu6#*ԖeLr(78cv =4ё}O!fI)cċ.;KWTߚ훯{vlA-]M?lgIyٻo*e跈L2/d{_N)f/3fƣ{Ƃ#2q鄞N{I.~D Jf/&7މ=\lg>,y `@޾f͞zl?9 Q[C)Cc0o8#!;{ WoNW]֧zyAHJ >|Mع,Q^|8_JqT`lZ8lG r8Wnh)mbisc3gӷ"ֹ-OIP\RHGR8W=2Z؛\xcfR"XeŦVN5Hb=</23>_8ۋ L尡*ٵs;se,z7q:A}@AP,O砬=8H9+{v3hb}Ś`=\$ERtÅRC5EXcJNgKvlۂ" SP]R-[wD"oݻym&_coRX79&9-M1y[e#x(؛â[?:ap/6:m,JDd K[}w֯L#9s/a2>aƕxQg}ޕS.:G?\-& ̚`| MǂY㏐yri3Pv5V2.\sc洱lTj1hU MUukφՌUƨ[hpO~MV..Z1jٺ-8 SND=B 9CvA(^s1G1 qfGj| ֛!O(4<r#9m|6 ;PPTYFtp`)3 kD*("|yXB%:DK\ɮi!N C%CDԕ$)' nZ(Zh1lD W& $O.4q*-fy^.jF,FE o_%O\cܯpZQ_b簢(>BH,ߴvTZPw8/ޓy(t[i&UB=fR1Y>K"BiaKB#vξ=AjlSpX:''7Z8dOV4 RFM.۾-y,Бi`ָ ~9UǺq7b\F 8?,\} 1x/c̞u^Qic'xC@8a_;{DIC+b1g,4`GKh Å&`ZuoEծbľM |`۱7s'Av;⇎Cr#xh7I8wm&>57.WR/ڎ `3҆Bz"s*9'"B&'&b8TD2۷af$KnFT\k7Co'.D_ o-ܭ+=}lC4!CCjps846s> J˫9 뢨 ZxGL\tx LqcX=g1\c5IpYRĹhȑ'vyB= CeOlj">aػђ֊x#֤]qMRTb(0q,sNe4)띓3yRN5 S;-זT|ၭx W]TﴌKzYg#n> 5NbBGM_[s-85QZ$tm^x"1!3vŴaέż+lՆQ o`02\W޻p󍗁N#<%Ram@ | Eл$ZP*8!=zfH.$045+>l4ӂf%g+D16m! &O'aipYMhTD>罣654w|ɜʿs`mZ_[[ߺ1v6g֭ibfjk۱W}+X)K鞟1q! 0|~IOig_ЅKw#% FVF [ZgxWʾrv%%& blw$V88sݍ !H[7WrСC̼*cTX8*OёՁ6sDpHzU2)b}uZe( 8GN\HCaC9F4Nd x>@ R m%rk8ٞnlJLj0\\9l"@]VQ.S(-GEXB3ULll"䜹*l9lMt܀j ,8NkX+肅 </y\mط71BQh0OvNa{X[΅t+mI򸀀 :/9'ӱ՜+n=d^Aa-uLKC9Wr 5 qލbhGܴ@P<m܆Mg`it xhZlۗ,-Uk<>?)t5Wa:eOÇZ˪ɑ2}Ro vuYɡňsS}u1>^=yZ DE&Gj +9o.-HZroyKM>7gVoCDbF }u"艠u| /q!%bGobL EB]0\+F9dUC.p.u(yv#;LZ\<$+c}U7>)$ kR> `-8GM̥^K 3`7n6ދDVZj$R\b쇋H. 8 b~8D W 126]_LV16 I1^CYM,>Eb ZS~ s48+9lw;'αE Q8hu"Bsã5vYy4ÚqsZ($D4/5 L0 A&ʶy(jA ;:F%a1 C1b [s5W{84B7ɡ\92b5Lx^P`sE傏X Dd}cJd U_ P~f,O|} /O/:YbP)Ukj\85oXι#ڭߑ '{n{Qocً9哦&.j =tV{s5kι8\pExwr|X4t  م.oz ˖Qw$2Rp>4lU߲9␅r'&0Ly^%l(YVk[C8'sqQ6 X6 7݁t C;6Gzқ܎ |#aĊ"H,NV GG$* c^;X3} o9 Epx n>nЬJw+,;̩A&%7 5Ӧ/21X偲cl']I=2;cے$DfX2D1UR7\m?pbM/b;-rd_E$_io?n&ΕBomV'Catb%qU-go<.PZe{["úv TMOiṈ0#HuklybmhlA #u3ߟ/-C&cQ_@A|>z מf^ g8K{S߳ u 1twho0Xԛk; "ӰO>av5Tc-}\I2v$\_Bמ3<ׁtb(fёFoID?WMJ@ (|3hL mn; `8i:Ƶ%NycN/)N8u,z7jh[J@ (%zHsnQG!9A(?-97@p 'hq6M%PJ"pZdBB.w+@7+B,yDLmW (%PA`@Wdk>Av~-FPaV~N1KbY}wپ͸6mXo^w,hw^>O\Ģ]~n9v|ے;Wyy馬FWofZڷPJ@ (%| `tYN" r)|qjFUx:f,W _l_AM 'CĸTL5tI?Y6. ef:6V.Wpz:mKpx%-teՕ\~8:'= 3G/昤.6_ D#RW+7S!$y]7PS `N VĞ$tPJ@ (%rh HCq0k=iěJ TI>B1qilF$YE>|) YHFr7cͧ`0?ZmsBq74=%EFI0aYpɨj"Bk$ĆR#P%t|n?w"Y%PJL/:5IbQġd# Xː8dN!o ,p96HfFf 6 1sHգn//1iŘi !!̃6n&."CsQǀd2\[OX".X| N<EPs/XgX0[PPp;]ÓKOVSfܙKkC&x$w,L PO (%R86gN}S>o2Dz$B0}2l QUU}cǶH‚ގl(XM,fB IA=@8?xN3ַ '%PJ@ t%з?rգ*JHGAB`$Fzaj=Z.+:M[F@ ZP[[74a& ltW =ʛ be#_v$oiA.f`X䂋Jᙙh.BNq] VI (%PG&pBDEՠ&.$$GM^H$~V] ^0^ 9see%0*#wlD`ӵFRB,eA!#P|v'դ2IsT&(XL)cl3r86 L$`׮ɡ`3^*4WFZR PJ@ (%pD1,Yd ,[&tѧY}CYD17+B!QOD/@828oͻAa.:•yj2ʂb5{#ĺ9^ǹ'ŽH(>?^KΖj{ɘ3f1ᶎ?:yH.&%P"PK999>|xϑЉja퇈-fch7Qظ\Rld #Y@)fV=n5% ŗyIJ&Nmb3b&*71BDcx|Wⶼ=`H=PJ@ (%pt&ؤNsbH(⳷=oZu}/yoYw;5[anj9S0(6VμV}UJ@ (%@oTqðcgFUDEPe1+%?lgw\%-KjRJ@ (%q\By4PJ@ (%`kmI (%P"_شPJ@ (%G@XkKJ@ (%l¦PJ@ (%?*Z[RJ@ (%@`6-PJ@ (P?ڒPJ@ (%E@[i!%PJ@ 6֖PJ@ (%/*M )%PJTPJ@ (~P/lZH (%P#%%PJ@ ~aBJ@ (%lc-)%PJ_T RJ@ (%`kmI (%P"_شPJ@ (%G@XkKJ@ (%l¦PJ@ (%?*Z[RJ@ (%@`6-PJ@ (P?ڒPJ@ (%E@[i!%PJ@ 6֖PJ@ (%/*M )%PJTPJ@ (~P/lZH (%P#%%PJ@ ~aBJ@ (%lc-)%PJ_T RJ@ (%`kmI (%P"_شPJ@ (%G@XkKJ@ (%l¦PJ@ (%?*Z[RJ@ (%@`6-PJ@ (P?ڒPJ@ (%E@[i!%PJ@ 6֖PJ@ (%/*M )%PJTPJ@ (~P/lZH (%P#%%PJ@ ~aBJ@ (%lc-)%PJ_T RJ@ (%`kmI (%P"_شPJ@ (%G@XkKJ@ (%l¦PJ@ (%?*Z[RJ@ (%@`6-PJ@ (P?ڒPJ@ (%E@[i!%PJ@ 6֖PJ@ (%/*M )%PJTPJ@ (~P/lZH (%P#%%PJ@ ~aBJ@ (%lc-)%PJ_T RJ@ (%`kmI (%P"_شPJ@ (%G@XkKJ@ (%l¦PJ@ (%?*Z[RJ@ (%@`6-PJ@ (P?ڒPJ@ (%E@[i!%PJ@ 6֖PJ@ (%/A*9l?+y<ȟ XSߌtPJ@ (SI/,aԄֶ6PА&6!*|I{%PR& GqQ6n؀Bx#>FfĄay,JSKJ55(-)Akk %611qqdQךPJ@ glK,uXIg[K dkV| ;ES3F/8}$%%!88A2Z܄4‚l^Mma!ZCs8s1$-p lL>Cr4PJ@ (%_lM>0(wȀ8 a2e?ď,1 ?9{ ho@-Dᡡhs86 ضqaI8ПdHKdZ[e}=jKdzyyyp͛&s#:k_6V}g46..11Ct_Vշ>}PJ@ |6lNΏHp򯥶-| i'%7l-sM}~m?Zd;|@ZRPOkQlt4i9 56s"n-m# EY&ElwFp[w!y-1gRPɂI^n?o2"TؾA^ڻ"o:fǗj}-kgG6Y"Z֢N7m.7(΂` kǍ=;vH̙71qfYRGXo{iv)'-9[V>kRJ@ (%G܀u+ּغ1啈jnE :v_#"oFa'eIql{ʏ@qCQ%f4ۅ {?6Z,i+~:?Ah+3pت@IDAT< m]\lCeꖃ5o.Q&D|9(fC+U "(|DX]'ZYR?+i|6BEĸG摉 b1&벶K~n2 d@nr  #S6$ǠD&B=?#i9LG(b)*$`;aԗ]3lפPJģcl"䦻a*4? 9T ;r( ]TM9|"DoWI"Dgs*`Q Ɉb m -HiM Q'X”A*y/uo۲]9@L*E>Xu0{>#%~l^מm[Jkq!}o^d>3\l*3)A') 8\Qy55h&7{C#4s-MV2oq-kiq*`~9N`(tMh, [Ē[`BqjsX3٬ߢPJ@n؉N$dAVɲ1!vZ>jk^wXRWywld򊖮7T#k!GA!v*Pg/"!2ԬbzSy8\)>$Wm^}ep:$ɍcwSgI|j'#[ۤ1K@H'*e}ڕ}"9)wMmvi! W(.FpswDqc54K8KAȋi˧"dDY$?":hЪMICbܮL5d=H;VVPJh68Zok,b5XUYCljV15)0L볖vyfv02eVY{bZ6oCDI)m>Ň ޣ;O-dz?+W]t.kuD޼l8~8̎و.D%$%:W{6dsS<97^Oqb;")(8'~׼ _gnխ9l"hKy6E qfX=E^ (%86ĵCv8Xڄ܊-k[8 0i52U;Okɟ<;GNPY dTĻ py3}ù̗UPJ@ (%pMqIXla!yd׽ܛyÖ!DΣ ]VȄwnUqXN$!P&sчt {.%Xx`1iRJ@ (%OAh2\X쨞[7o>-J%P5s2 J^{ٶH?mMj}GlWIVޣiT?n|9] v 孺:G_8ђy"hE,r)W sނoޣ6PJ@ (CoI1)Cz7]-գVMFh&:=I߼Ң'O$yom7s*&O>,O=ѱupi=f!5 Le,j0Mf)E$?{f|6j%=wVwkg4o4]P>&/5ݪY#1,8V,wHUm?gvv.w&Av=k1c-v#OX"" uvv.DA==ܷ^Ͻ2YFr -y+eq&W!~~HT i"8g8G(dfM,ȷwt/q}%i$1m% T]u$BmEbg!KlyQe.;O h l[9!HVy K>+O۳L߱薩Y/3is0ly>> ~$ƵI9Hh&\o{"ܿ,E京Am!s'x]B=Jym/f/Dmy'q?-YR :i) W$eb?7OG,"JN! N-Ln] X5c*z nT" 0H"yNf\yD5y}:>*LH~=ÌUs_'eGBwIuz$,"ZlS 8H)If9'侻pO~TB'-lˑ|rF&uJ%G Ij,x5_MܹxYnV4RDVuyB4R`4#hW&Fw.b:/#✎8Ыc6}s'yV5ev :, %s2?DIBY I%7 ј~=;%m6Tc #x&tY.NAx[>yL':p)jQbӽ+\VR{jKVQ:;Q ktv?)1 ɂom•]=;cXxw{l1.wFW>+AAy<՘b4{;BlB*dugPvK|-߷\tJŰ#1|XYzml3SѽgO{mM58<õکJ'Y@Fo\veRi&Z'M9ڕ{B>٪2#"ۃQNVQyHU]N뎜"mT8( pWLZ kZ.^#R IT(}~\yՉ^ wjP Eg19t^h! ]ش? .8JI:Tn՗^-pY6WnKxshiY5t x828Gx.+q.y>vk`po7\z{S߀ {8Le[3k/T?v6e,[99,A#@aʔ.s/0+Gej"HGȤkIWϭK#m|<O*- ,|HN(]4 mnfaxrpդQ0vimBIe=2r;sa_WaǮ8] įX)NFXn :07C-e h`LHY"$lOAķW0qΨ(g wp`) ĺQYA^AA/,ip[P^݂L..@smZL9WsR>Zq)غab3;c ,Cqu#s ѥPV|6 11/ކ7a̤?o% ^b3lnzT{y+\y$O܏ƮT${q0-qr?r+Y䝘ŧ w> A`k|>DAhbW Of}sOZێޭҏ# @ٝI#w=S֘qInڥKQCA.Lӌ*88Yvrzv-; ^uFAF"z5Uc |g  sbF1&Ώ`MDiEᦡT[Z^d]uGj"Xl5[ Tck\:de co6&fegG^˦Dc6w} f,|;cIx8/y>:x|S\xk)">_ >@g=3gל'^Dy&pa4kB>X-Q$/VY+SO ƧgLi,Tc?kB<7pY7c 5?pNlJRx^RS,djnp GGZ¯?=`e[U`Z}]J2(GLg`}K.3Z'?i8rױ&)g#1.q VW㺻\p4^{^Fc=x;`iށ[/9.B^ Uc/㹇=rR ?kD$VpPaO'HN+&囗/A n拏 '6y-?]䢡5uf(exH5ࣇ+ޫOO܃nM &`Mk mߞ{߼:zmg`KEnpY锇q~evXM q7sX xJOcG0A>9@ ƿSVbL.vHJ?^6*&P O_ gQ\rYc,\}I5/@+)~w ]Z|;e47<2zw*9 hM06Att>[&0 r=>477SW_}5Z,{}FFOxl||,Cu0a׈xޜxb)ᘜ\u扨Cv{(G W{첑[]~<[8g|G~Ͽ: \urt"9toĮ[c7`ٌpM퍔h߅~xͩHA%nb|N5Qi_M `Eࣹadxkn?@IIer?0ᢿbhe  )ԡN450*@ c7ոIU~ewꇇ;VTo;Dq+8~ٱ;+"?WS/oM_ n|d+ΛҝcNoY;!>k  {w@Yy%L!q~˜n#" k.;`@,jOlmO+@ BQy;9',? xޗ<εU,uKYrFO~ yxK~o<O;cO8>'\+AE OΝ;/>nYƵNŜٳWb90:9 ̝=ן<_./;f|5p*&?t?ܤ2|ڋqƤ3qƩ' o@.ð.IxQ}>=1%Ի Pf&{nz]̜9Z497Rl*JSR ḌM&xhughia칸x졇~z{w0wLLJN;L7a-8vf\b%'s< gs6sf,ݎGRgo^ t }7_b™#o)8 ,K rr?2w=4Ajʚk1۹>~9f}0r,ߴ -F1`7|GCx'kr HDz)&w f>zO{>Q_UgןGX >7Ofb8yDv57qCQ=[DoʓMˏ܉])Cp'}>.3w=3 >i-&XWi1!rT'NCÄ`3yEwٸQØ0$|]aCPMZ9:r3 g7X<\]z }V9:Kl+y:67}f~$qLV9;$DMl8l$PM&1M)$,i׾/(RB{jk!US8ȸ)C7QF/\ᜀ9V:\A9PxT. . zSG8lu(L-9W݌bxG1{I)|8S}aAys^xM~!.F?z_^,PNОs}/JzL{|,Se *P݅<4(DqB lܴ2@7Dc7\~1sGņ(KBz8vR9~"8$?g":b9FOA`^L{ϘVl޺%xS(: HnLj:݆COi.~fԗ͸Q/<-܎He;pBGOh1>Rf9``wb☇ER;[<v)x'qs054u> ^ [VGm 9흌o_ h]z*FQ؈:/5(YA4&zG?1)3O[:1H$~8;ŸC981+q1]U ?MK]a.ioʼnn 8%0DV8l<uM.$H`4-^C~5u#/偦NHL]V5"đGNJg6DYx?Y+?٭H;{[&}NW nrWl=~F[EIG{5Q$@K+K%\qr.R:k0ܞE.DnX2wzЁNNZ9}HkƱI))_' .YM]IGv~"xBrr*?IC!k 9򹽨2<΋蕰ۨeGkU;bdZvù\83? M8a(D _# ڳ @P>~*W8Ñ=_|tt5MU\~I-?~+Kwð`KdZZ.ƌKVH]wkoA<>(.F9 (݅s.'Nۂ5;wg߂;k޽qW(q<>@m1sW c.Lw!_N:R|t˔-kQvjSuYE"~l%9t*-˹~brlZ$.:5} =B|!M^>}s ͺ\tLJc!mm D|uNcE%hKyᅬn0Sg˜J+@t+C\0ะ'z1k7r 38?w1cؔ*No"4阽=dlws pÁp\V ʋiC?1 D&Y7%QF--rP|t!Gh@6<j D&*NbuSR|=\\eDᚲpcڵX+{H.*B*ig[O.6oXG"r ذCFCl'>~TVaW#ow>Ջpī,D!WGLWeNW-~PRYc@x<-hrSc3'tӌ COd;a,XnCF=mS~6׀bcYO!BoD[ŪLhUX]w30y 'mmƴWCfɅ>?Y:X,,z Ŧ >vzqm T_oϧ!CB&`SG*n,{߸Xa};]|1TEql|BzZ>3g̢?iܜ 0׭^%*YĆ NW8pi|Q=.-Eȓ€W٧LdT/bc2- `>ne,tr,/~pB.%_{^DZM`8{87}\ʏjdfA/Q1 ɑQ|#Ǚ>V68:n`Z0۪Hi&q\8gf)Kd6i-FR\,Ȥo2yjXDg*ȓE8p4$!'_͛ozZ9'Rbȵڊ.Ek(mKop_+:Oc';b}Y`'|tX\wdBбX|hF8Gx)qOԗuȈ^ʋRKϯ;E썧śڜcOo8m=܎ UUU2?dqжbr7$-_Rז|{dQ GD}2dqӏS@h,|Sxb\tBc'芋1ԣꋜld㈡3|ن˶)I20ɡ7_9'\q ୼7lKbjn@Wj'mE\rRwhLdS@^#:>Mݲf*蓕;5]FCu-ܜt-!zaTuA_<碂|=_l4/6|d D$q7Nб򅭢(࡭'pz"q ]@c;>G [("=kN| 714S? .NI>.HK}i^3>'&nX.RͲiߨ~ݏ)&q%;M/=I8\~U]Ѯ8 %|E;\-ͭ%"//,ZD}2-iF`%/>}{иYqw2ѭn^cn'sɻ*.1&i0~(ќ c,yo|l>EȨcXS79yˎ%-@)ϱ"䒤2w_!yk\}㟐R)s=  'P|(hSR |SX|_͋jQ4\'Py544Jԯ2Y9c1a{9GsPݣ>֌f;FpFks =JJâ3%'AD[~y|0! |j⿊_f[<6"lЀQt N5A( @QʢYZ!: |ı L),$82䚝^cF`͚ 9=[ Y*"Ð7 0nT 3 Ǝt&aXSb/u*IHhbWc"JY,$8L0LB2c18T(bQ%͎8NR־Җ"022X~s1L8I[ |Zkh= #"E&I?.y&7jvKdTy?h/+dh4"?j ʹp!?ŊJZIE+:e`Mn!nC'mV^OHP}6+ɇ|KsU?Ԃj\#ELD8%QgßAưcm8y)q^ْǏ/='@  @k!GdX~ QXtYe+NeFh*mLl\rpJdCj2qc[igl듺&Uf>3#?MdaUqmONfju]/}SŽGJjM;"lIJSx,@$!>v~+(AN*iKK߅.p9znPX)|³.1IwſQ E&rSb=thN==eåJ\PW[p$soPa-Fs "EqayK !@c30둞_ថJ6$˘ZH2##Gy) \.lقFj!;Ey[vhy P!pQ6iXlN._~'t=b*6kM8BFL DN_Ut^XX Zx`uMyyV􅍣xeܧxHHݧfH&F~&|YpD:QVOPO*k5XC- Pk$pB53ŤPY&i@OeiGԂrr3pBlO~y|y'N`,1wE(z2-j#v$IĂz) ɾp#Ȥi`GfVkh7`+*,_*%:bYd!ZF3zFηcI1Hbjb-JAjR'P"M6֢ݩkacY"ܖ$kk0a޻'k6d$s7QWkk~^FyO+++QQQĔfi#Ha;YDX!rMXh{gaD8O{FZC4ᤫXROh&w(DT! )!mlS&r'֡#}:PJ*rb}^aZD:J%ѳ12xדN?V\\UV ߂j?Bz h`Y-|\(Pqw~rb|$rq%z -DM'Q8NމtW nVTf1$Ґ@62Ie*4$@Mٌ)7@S>.skQ끿TCfӻI!ZG+:ՓNÔ~umYY~0'z)S@&KM18=sGQJygzҀAgqT>v19s&*F+uѽCYMylabjCЃ+b%24.P.61R8.q5"Mv#cň6z Te( j`kq ?q$I1k)MvE^F;".=E`Ctٗ0G~kN-']th^'}Rp@M( Ǝ12W5jZLH0Lf<LBwK;)cώLy6 Z~Caf|Plf%)SzK 7-\T'.H^)p-`*%_?^ Dt7exeA9}5י6=1Vb qք"Ԓ$a/ig7!&$ȹC񄯿SG=y)LZ " ؂bȤ5&GroRYƟ+o/).:%%ԉW$jT7:Q4U %ڌܯnykOħ1RB7paO7ZZ?s>xt ?@jgC, +)pP@$M8( j ӑv6Y +`8ƙ!D~;[Cbԫ?1 __6a-4@0$Ɯ5?㛵pF;26@X 0uT20ea9ǥE0S;(r񗘔Z=ҴHvk2\̬ ?n_)Vg?/ϪXIuo?:t )3Q@;kqnB# CŬ,ۑJxъ_t.[lˌߏ%jQI0Cȟ͘ -?m./*;`ku_<'ʵ:s1zeǨAz8fij91ݷ>l.b^9SJ7=)S@N*v롮݀M[Ync1'ʬI`aHY9`%Pef"-n9y;t;;g En~VN:t  [!C-)EY[+~.b = jn<ץ6Oٷzj6*hJjR'c؊6i)ꬭ1`lݜNyHHAKÑ@Q|6|47QYQuŌEs7S(eIN:t *)n.$D~-D \f N@֒3d7$A߁=hh&ԑ@C,rI-r_"KF_p^O6ڱjy! FA=2XΚ'i@IDATGNg@zF:v/8_47c}~v,\93tLe iʾ8=)S@N_9 "tPJ@1 DHFjsĤ`: I dd#!E $8i_z"l]p҈勓fX3$∉ &M-sV3PK[V+K㑝Eش}[⨁m:pt )S@) lJ11Ne ;&dG%ɺaqD~7"ǣ=OtpK䋢^ekBoo-.5E#Rƞ=Rq5}Zٴhj]y1ߏ\5 4B%d?3~BbIxM8! &khPZ~S1m][PBO:t )S@OQ8l\-*sHT={]2Q(*` Ϲ|G%P+EdD< &F~WZSVJ3E]]\)uYy<^4mp&UaK-1AXt?'rӂ_FAdk&h9xF)zoVr46+֔%pؚqɃq쉃bfL>wlfU7Hz)S@N:Cv}T:'pz{I?33sŴ V8 ixAx|ǾΤ=~VEp[h"wM` X?V16*=͌I򲾀ϧ,)Ɔ 6xչd$ ZWtvlta0t0Khm*v$q59m E4U$E<]MC1HNUl1НG)o$1'90rDo܉2kDYĮ':t )S`h7`MEnYnj">[F` "HM8$j&'ǬlC#4R[mCr2E=ИɩhNJD#7@)s#Άj6T IV$ g}пYl\9 ~iɈ%#0d)IPx**0b$@rlQTMj^i̪~⪃$_8AY[B4LQڈBP䛎M G\ :w# aηtƺYt )S@N}R'sah"q(Ua D<ɉ Q=^1'Aq<<:5`jC-9` v\CǪBTFIf<'Hŧ挅CAV"c~Z|\AM #J$Hc_9-?M{c.uKpSA(E)6z*ϡ.m )Y[Bi选bP;QY<H 9fba&σHLKjQT- ln nIH/ ڎiBiq,Z q'B>|:t )S[E9,pCfP([e)+ "CS($R&^;Ӷ9 DNvU6M(/@yؙSHnY*Y IOPF`VH 'dZd~ - Wӂ9D-"y@*tX[ZвPF PkGacWiE  LdcN,$b05\{MrB[m]-]^U;sB4!DR}2AYt+Pa)fB]^@AByiങ81IYJ ,l2sE6Њ_o'p=)S@Nm)`V 2(iC۳/JhW 5&:N}#%xmAcF%H9{NG.k~cEZ:^<|4tg9fxPr"N<+%Q!wTT*H" Մ.7(֝XA'i J~](g&?& ثoB}}iA\?Ho#O^H dW&uֶbԟ* !SYOXi5q'/kB w$Ʃdۣ+5=6}c UJy+0֍+0nT':8Rt˨E_.DVF"~U:t )V3Xy7xQԌFZg|&[HCQ"su(RoK:xwVnaq4w[3AۍQ#g09рm!8 Yhr7r c:MFBO:t )S@Ko#Ҳ FEie4ua"0Yy_ ,"Hd$LOi˿VYۿ"$z`vfs$$ 'o"hE'R\gP)Y8;0BsF'g@W" 84Q]Bnd?Bq $WKCk\"9n7MDc$| M7n$9U#Ͷn݉߮MNE֠.9($GOB-ĨQi!6mAl!ƾkWYLR*u |6D'dz4mnQ\ފT6Q[m[K΁ÑB#QRzƇf \A{fYw [47[#=5z)S@N:P\L#\|B&Y&KK(yB,9QEp07E&A}2% 0}"0fd+D#RRj])'I)S@N# EI.jKFZ E$nϟdbi*!'nc۔w6d  Ũh":gI ʽrNu1<&DG괈D!\僆!S+DC bh %՚*9R,(duyW;`xi&rNFQ^VV% 5F+E:!QmFL)AtȰb ҀC}&UU`RPĹ%^'$$@!.SoW,Vğ&H2Ѝ oa5݌d'ba*'2-2VN:t RITNd䢩0KjF=tbDfyBNF9x}P$`ӟCRd83`߷aGaü댄NΖ ޒb УWO|e1;VCD(&[8vpdDpdGr3G6hŒl=UO5-:4@T,%cv:mu:է ,Lync&x=67*u=!*HoY=x VnbT'> Au{Wg8u1Z1PH.B:abgRe5XiV+ƚbjhX+D7 FQ zEwYSvv6.Q!)ǀ("!ǕQ1!?:t )S@ !YgFXޓv iF:4RjO| Ԓ._ /Vzf:PsvAwA)6^.-\Lng}2z|d{QkŘsGٔ7oo4AX(CB0'N*B'֢a MʧZ,HKtKE< VDƭtC\%hy)4@Rl$81yn A&@4n@QbLI@6X]r:[ tэ.Ae$.ʵNWt,}wņ[b,~YSnK)f| p&Bސt qkKE`%,퓭JVW՚V3mcWE=Š)|ypN+Z<t )S@N{SCt\ RJd,Pη݂agNŠK)* oD׮]`4i233U˶oۆ]H*̛;F_f[MCl.ztM>B ~eQIx*i)c EԫR"3H(ELxN&ǰJihZopkg^ʶO lG-Bl8 rH0<Ѡ(6xU ψ&@QBи[p:t )l$:[[luss>8/ӌtw (U c-C!A]dMaoW`@~vZL{u$oڊDrɽ c/">R{ʞ=^.Dh9}).\8V _܈OQwOqT,:Ǒ'E@F7"$ecœ$\Dr~_nR8;6 >{o$Ne#ȓmWYa9kZ_]=]׳N$w XVxqHt1EN5)"W`%xmS,mON:t MR6YOeHHIY$j`CJ׎ۻӥ8RTy0(%bcHSg$P#lF jUFtyN:t P6!`#9lF*o"%hAL֑NS)n@v9Q"tƸURr λ(ݹ% 6ފLu G!/U@$i%WJZxtVpB87)ľi`N%=$u[!rhaά? QT\<މ!)V(T;ԙ !nieu Z+`t4A|4@L|*lC B מj#*53OkcF79VՐ L(bl Z_:t )k!lE+馂IBfdҷu =n$*w60BeLOFW.D!rM⠺ SF1S:9kt?is]-)艂\-(Vdbɕ?BN:t Q}6Z·˸EQ!ӈ.,}'E xƮte?Vum,GDtZF'$bXKk9ol1B:wB D>ޣf!Dmgy,Yɕ $3TSS $\7<2V 02V~ 4u2 zӫU=1ri(S'U}'~~淰0ShknŚ=Nƅ/outttt>l  4dT͸0: Sf)AY`kQ(LS8H66B 䢣h[",]:!bxR" ӧX#ÆlD]uE#:ӫS. r̎2ofLXDQiaI 譩Y r,e4% H4@{YS~.La. FHJ>" 3, Nd yڅ~T5mD{Pnl4H' .l\%`CW1NXJV(Bݠm hijKBx΍K'v5=V?3Ù w_7 3 \'TL()rv%b~0tYCF`8LMg[Y ˗JZ9m{G|vvA M[_5.ЋMSv^7O;ԃN4Ņ8׏I0;IJ&E2q P*%GF_3=iF˖:cfWBb'$I phGM}Z':F`^{$^-ݔtKP ysU\rH~ѣ _H>H@&A{Xlj(D ?#&e啀EK|{;,dBG˥DiQp 2[Nct EV]ƴ!FB+Z%dhe!(K?lL$zaa_ޚT: w 1>S!EJu3|`>XuzӋE0emS]׭d#DQbL`?jN rwstvcl4=׏~p L~I(XvjN35YuT }HKKS,[:ӡ /b.:M3ltO@ ۥJs`0%GGwn@ZM sPya:8o8'Z/9ص!(gY`N]Y(&'fFvc% ehy;AF.5'гލ5H /<";(**CrZJQ)0NfM;k#L֟G# \nN&/ƄQ 6@d<ǙC](T@XL\>fdyvlߌg|wC} uEAV!M'0ӏR&Obv>R~mC1FXj\AW7 -b ?~ħwXsf.%˝+ǔ9}|b}.j6֧wXNqn)wl"r \7|:Hqx Xjj.Pg=C}),*JՕhnco/QRZ,]C&mhDnA!-@_/zGKP[$_/'89

n;v^P.? H87Q #-w* HRJEH"bY̐;k8>v/c}\ro>4RSE˗/ç?8R\weEd=]33NJ.^Wb,zJ |]6gR#M sڂPEhNUL3z}bP_~ ~~lC`3 cAa9z &`>1ZSQ"$5gķ&d8 Y':ZY?ydWޗ/t"70DE/{z6'8Ŏdl09؏^6@O'RRS<}4;E[$c#'D]/iP;FHbhUH+ Ӿq y˯Cr;8 UsEpf$.e^UD".n#dD|(patQW NLzQBEFѫ:$0m{dSȦM [*vaÓѩ ch`f2a;m؁$zz0Zi4)"l Kt*$gƏ l xnՁ~'6ZX,fEHr0Z& 54#ޙWFٜ ;VG@G@G@GC˔,G4(K!i.GHݭti9jnnƑ#TRavx[8tɜН XZ'M9f- qȥkYY%;%'" H9Dƒ E+` ;.;9#ۭpp sxɈOO~I1g I^&H'Vdg0MnlI,Uݞ%%*-t:w GʖaM䫅am> Gs6ts?[7xbK~iev EnjqSnYذajճ^NdyUV@nOWjDY G/%hN=.f@sqgaꕊqP W^C8 yqV`[pGm x7l21;?& <ătjr{"Xr5Bd"g"k!:+P}bDp#HV (4uc{IK9gn' (KJZr649}C3HnxFsC95"L}._9N> x"!96c@j1cTԢA,a:3r#LםCc]_0?+MؽnZ`)nXϑ.܇ѩ}h{@q!.~#$z3Uͺz\K# YuO/~f3Zy9hl/!ndAjkP`I-~_!L뙂\7:dfcp._"6WʹobeX Sr?YeFvN#/.ӣ ]:A `۵CY3`S`YLw{SH028_G܂|$j-Gp+V]4J N8E6R!l\PȄԌqi0&䔵 Q RGHq#'YY;`v= 20j'ۻkx"pyw!gkRa6mZ5kbh…kź:ڵ,@)OK|Lx+/Y;L'{!\{dNf>\Yq8C c}df/V%2I)+ݢu}^y #wN`r&_}5;vL]۷㾻o}2 &⪛ H!]z,Wf-_F5!,ȨBRi%D&(H4H2Y3#B7(\ yLki9AYCqP`Oi4ctZ陚 GNbky2 FU 1=,%",6T:+񡱱gO3iS2zENmn7Uɏ? 0ƮB0ІUɜt`媕(]T7ӟ>RXfC-6ަ0n^Xi7KYp.6R}sO=79e%ri'"CjnzǽWl׏H6.)ĉ׾5ttt(o꯱XSsds|Q1䂬[LLX3\6U1 ,=6JC#ف;֕]ٙ8QPEFI ,3=~6QyNgH X]mbJBcrl(L 2`dzBEm*dz53ݼMż&/KI\Yn TUarr˕̌m Attttn#wF)~NiJ! 7͌n1`l|Un+Vz(/Lִa=}:O# JN`x`6!v*>]dNJ6azE^?$H45*$Q)H! S~76dIdB8u2ϠfYrr2Z[[i^Efr2!Ҫ^3'{Q= "d$.jD[ދ-°͡8J!s5D 6ԒYKŽO|ě 3Po<6 ܣd,) ҘR{10u>6+g&ڳ,Y:C,ɘ*jC3vEt[±8WɴͻصW4{MdLYB,HIIR-O,ԗ8}R={ݭ6\%sd(!c`W"pHpP?o=@& fcxoHMNb',~kֱ!@v،2BÕ=q^8La!K$iE%4HjHGcӈ&?͸]#@c*4vScv ~ʺIL6\#0d VcF{~NIL`nDp6"*m²ű,hoЛt[wzࠎ)8S%ٵe04D.XZ)x-k3<&,]тWzms }'ɮ`5Cݕ`xv>@-42e8|0ӕNd I)b m4@feSB8FBZ#HN\0lr 9V3dZ13vct9`Zi;} HJKHW'/>Dɐ@qM$$7 L l!Bf%/^2k7mlKKc˰f*>yL 7#*9ף]b71͋7,7OI̊+\^#####pD|ߣO cSck!:~ gpGDCg?~^D#w~k7I/\2MdrX32tv ~嚕XvP\E/6^bG7uv*/ 1h`e75]eRttu#Pfʼ+ֵRNgrbY{tMdJCJJb;/wzsw[٬`gbn1՜dV2S~)#@bB ֭\8 &.ɬ R]7Ӥ[P\f' L0=褊lßaO`ƍbq3 R̞ [wRYdul0M7xͪ.FX",(\lAufvCYӭbR麸L3KO3aLߊt_h&W(,&dǩ{#g/}Ǝ=6ݺE8P3o\ Aj(_+v'O_ 1gc.^)R1|WWTZfZ RĦX)'fWԌ,hev,gr>)s-y {i[H묆:8Livϻ\G@G@G@G֍) |E]|2OďjX44V/FSQXdcA}Y*ק!C=SMħP1E#1EHCF:n*ei/:{qGc2 ;JRë55j׮Q5#JƦܔxv)dC*ɉ_6I0PjnJL= [b-5&it`'cB5`¯Q$ݏNK܂r'I!^$ճy{MuV*@lj$:|$r,Vwȃ΁ LR473+mS'Q)\$ZA !d4d-_{E gg#ݼʴjKdQ-LUlwVS-6ؒqm)ܺ,SS.6,bzUgkSBFN6*dܬtcL!A%({ĪsՃwxY:lG>_7/}X\Rt2Eٵ7Ӥ#"D'0'# 8[6>o h>b5\G NdxÔpQ-+ ,Us&]RVVVSΡrb,P3sbؔ)+b ݛda?F P&rG3k/FwusnDiۖeblv(^Y%MBc f0j2REɟZɑӏlJʕkz'^z2#цbYY6Ju qe^;FW ٪ʻ LO)Ji3L 'N߃ ;?GԽL$8sSF\|+[J5i>l~vU*\:.nyrzDgNב5EIi)[ ) 1=_zG1!nFQQ)lDx_"p>\N{_ JXNnvU.]ocՊ%p dɂ#7klk+L4zo j8EpjxȠ/R@ShSaNlo|WpU3XTO"$KzUn`Z̈dzO<>[ŦlzHUXYGPR"Zڈe M7& ( "iIDP?]L"9(ݰbS_,ELiɭ% šQXz2<XM8<V珶E] k[)AN\ėK 4J= 0JGk4 iY@i,TĂN7@O_iщ4DȰQ,yͮRjE.XC)AbI! jt')bŢI}HZ;%# 3.rR ()V LOOemhkoreag?wQZu ŮYMJB,mAC|"`n?qq"5a銵Hex^9i@@矞Ac+kjDJ-8"=@ xֱ1IA2a!ÃdJiUU嫖*45'Q,?Xi8FᏵazjqTNF1)a)rs *[FƻiiL%>H(JM ptS#)>2le{$&DVa" (֊EɶD[E(;`MAOkf[^Æ`2 fzQ3O ΨlSC:I\in^>|dZ )r;>ajk"ln2ZRw7"bI;x ֽve敩4!"+nj4Ӕ5<_ ʯiOF95O5<ē.ndhƦ^@$w5E?dN04L&#E5%ı+ʼnGFԺ-K2 #Kk"%9{N" 74L5MODŽ%E13Ԝ%)of4<è\16Lv,R˞"%(tEzKVI)T]Smq*&Qf,@"iEab=I>0,jQoKɗQl06EY<Ԃyg %ذ~b/C"+'=yXGO۴sLA2=>e D,Yn #"6 l||!3KvMpM3ua#uƸ6]%vr~#FkcC݌U:kb%,.LѻN=i_ qh7Msxoe $7lԥ eiW\I܌E@NH=?d2%a\klru4j^؎V: >,QvG!N {\TwZ}tSU!N\+MY/HG@P7I0@@5!" 0z79wa+gɸ̳HK&<ɒZ1?J7sC<3(*XFَ$UW\T{I:ܬP&@M^9sgT*wbJ_cqү:::::b-(-+ý~^bj+r2*ؑön#F:۰_h. YZ Q^Am]u8TYqn.-r !RSՊ\N:Q :u5s|(+^v;;SI"x&_bڶy^"t0=\WFjj@gډzC2SIFxQp4w7"q-|-ؼi Y$`D z{/?~ ޠr/@ <30X{&S! #@MkP%.Y* ?;ٸ~ʂtcQQ>qq-> Q%t蔋6X_ OԴ 'O;i"_wrFP=ZG0醙T–I? >@ & X  %NQiwH#####~EdJTXӘO1BK*Y5QTR#o_ߘlE)INtB{~!{ [bڔ L`,inҵ |)֐Y؀3Z۱aJkrv#@{5)5dzpKˉ6 ] d)p.AA+Ya.fx,?zDžK\tڵݫcdI+jA:nM萚> ~ R:%+)dӉ]1r 54bgc8sj6IIW{Q^i/F^pY1=]#G5;>E֍9׉Ŕe!0&j5^Iu/{L ю>ֵ*#xQz۶ Ifkz{wE:.q:2p8WGGiOR ;G'gzmrdl/[0U9J@$ jkF3%Kgβ8~4&gDEw=I2uY{y֩šVI+^۶ݫuDdlP- 8O1=93{p;~3OS\nsKF= fz4h.ŵ@k{K#j+WEm2ZJe1,LinA!N0I vէbZף{.2m0{A6qX㓕(e9C43,lCIRhjBۉetm&4-()i4ԟCQt}K?܂0dh qv*yPf-op48?{BvYhf#*|5  LHvBQC&aBOzt#%!lgdXHNJ9o2AՍ19>ڨj@YR(W_"аcs!8ė띌OM0Jgm2uQbM|;7 AR  *(uk(挬,]Wqȣd>Mt]F1Y<(:(rM1ȚdU6H}4?XR/窫`cOHb8ټM&O@si%xƞ=X&Q'>Ŵ8V&+W(*)x `4bA2U_u20C"`6(>dbN$C pѶo/-'#LamǑ8,3e6B=["Y5.,JF0= 30!ͩ:ـm xH ?z*ȣ4$~U/51mmAq"im\لGJ-:ZdsAb@a5M. Jq̚uC>ﻓ Imt" WɞH 껬5Jq[7:& &BȒ]~ U;>\qqVt̝R/X^NPݷΊJvJh-ss埝De:d)))ANNr0dUOx;piH(Yb7z(rG?E}_2O@Q1=ӈ v ^r"v"YUڷϴb" T!쯕R SJ#a $DNC5S~?cR(j$qcKz Gr vvj {^)y>*'?brZP-/&WS8=]؋eJJǫo9bsEұ}8~u`㖝JVJt]q]Yrĸd%Z6/m1` =czxx `I->Xх{U =GSMܵPV-"Lmx|>$N*sr|@0wc+B4:ɮǨ؃~y_uG9ݿ7d1iid8NA<gZeuӍx5EXFFB y?VuPڄ0si/f B/-nvTaAy.EG@G@G@G@GvyMdTV~aAk^mk7?hszq C%(@92͑%/d4K'nm?h"(L/ ٌ P5RcYi^$v #%Ȯkowc@YmDZ`:#\ն?A90ƙm]IEu?GU\? DÛ}t JqHd-x-԰r+Kw26֠A:.ٹL uG^1k1e-l̉ctYoWugNdge>%PJZhE!(3qQb466NҲ ҍZ'H$c+Y9*jă(ntZ9[ RsNRt b8ilqbt̪NNKTt?kXOC<}]YEk1d e8PƄTLGؘ`)3EֽTIst-LAqzhn'`ONOW IO.@6x^.j_ 0$%Q ǗLYd! lZ@JD+*Ưe,'~R'`XɟةˍìcI719*~?f9hYkhj+v-mkY.]6砱*U}Fa# %V ˔Ns\:CF6֩DCr= юS?oԁZKaWJ!.MҍnTr,#2d|dZ%~σt<0! ,X}=Gr:e IK}bk[NڇM"`-3`!R )yBZo6cPWbݴE/:::::|8rFV耵fL_%R>L'Y j] #%@DvU 6'Aqd^<Kza)kmĖyoV.Xm//;v.w sWEKV_sy{{ ɚsd!L)rfѹa-RG!糢4x`s!5{RR{ f&F=܃l־idz['q"9$,ti@O/Ϟ$g*a8]2%wHC;jv܃khEG@G@G@G@G֋@_X3e;J QzgdNb,89N򽏵I#oNҚ PqBV[/|?kv]kosr|# gxJP|-J  LɶfYLХu I_*(+Yt,ii.-  fBr.zA4hU1H۫ۊD])n{Т"a!M+&~e<7l,p;r8OBM2nZHVi&~ &(bË@*IrqgI"{ 7X^,<ڗ5~IYٝ9Ɖ K9ɲHhHyOo/h.6@¨9ƱV /ib&8cE7)zBȢ$ݙT4ynK׮%:W߈C' _$X[Q(;]L`oC:5^b0.]DT,Y6FӫEy9v gǪIz1DLhd(D?TQU0kȤ,.$MOia!Ee=kKVrE߇pCF@s!)K%&L034C}6i>KG{V-W KN~ % XMF6-~Je_֮t1&O:kwr΢vz*&J::::: kbЋl ӀnUω b^QQTU:Be'LfR5m\0Q fT-@J@/]`zzkQtw੒RϏ0gN*|꣏b2gUmd$ WZ'Zsa3q6XmL'_4nM=<{Bl6Ț# V㩡F&R L{Y*/tiRQ>~L];'k# exJ7%/xOq&Bٱ)%eMO!ЧfuwziLDA\#K7aqp؆! pRrz/ 'IPMaP{.da*3G(K(gA!] "fW ZYVskK)˝ !Y*v ~,AworU 1^Y|  N6d~_M9v=/ HpqSMYO(ǣ`eHz#p~&O1G<_ZTDkOPgY;d{+iĝ"ۋM^n&(; UWԨa ؼi3f=>LkfzٙH+Fiv)GFVERI'2#d46?JhJg.,0xf Z FUL~2YN0z2UЀz?Ma1AqfRb>,aĬ&X]}EEE4\W|Hc$``BaL74 `jlc/SjbFy6cfiX-M hx>( ׮ZMn3P R %*qQN.V.YX r ҳRc~;)0˭!dYP~ -@&}uFFqПN|jjyZdffz| \eR-}:(ƌ-_1gƍw7hhNn[:6Mmvz F6ddg;Pcdռkp2HLTMVN0'uC7׺Cpg9VxϋRU; BHOU\U:::::M{whM4D"VD)"e풗CQ{KҪbe^' M,T̓1&&7 ppaOG?5: -SVf9quǑ(iw0߀xRI´Sg, )ʹ_mQ篎[RF#S+X'18U=Z$w::::B"d8"MUE =,&#I1n܂zGZ(.:8g0O (j0ulywr+\:ypKV2V]LxɁDqEyDɪTP|4$>} & 1+KR:I1ݘ;e'Y BVjpw[####w6Nz2j&+zBhkH)^J9$,^RW #unb/B˵#E@0l$RkX.}lI tHϠ_=h]Fyc5[cVa'P Dkib#LsJ-#####p;DM&BI0h1*CA[,AchOJ2k6KB,N"ɞ 1m=[yU| ' F?rwFdvt2=:Y,Y悫Z)LV SO >Q5!2,ԓ=t  {#&GPݜ%V Y|Hn-PK{ T쒇C@XER]Bj[-Wlr!'EN"nz~~ Em2Wz85=e"|XΧnYY5G`OF 6` ǮN?|g찇᪋9}a2z" gO^t$LwL\ bN^ѭ>q~q_]:5K7ן VykλZ J0Z&?>Cy67 p\a8Q8MelS7B/;->b&aeWʄ!ÙH[􉿧C/hC;cfinC8W,׫67 I Xn݌ܘ} 1,r=z^ӟ$YWm;aF$DN ^Ly/um#ʠiDBOrE@& jHN8"߱Hos/fw>-- IkGOqZ \.8k dXMp/1ʟI,y[zDž>o.qk>l-BJìo[T@j]hhD>oosLP0)tؤ53Sˤ~˝el ˑ.r&4Xq3+3"ϿKBw(Tak]_r#1 TjqqaK . # :Fp*gVg % #ks==h :IɊZ `f0zpF@~iG_X"lW[$,L(]O.l}><亶3'3$~WN]pFl,KYmbY|26\OC~s8|E]<^f000;hfL?7M'aW|Zb ŕ]&Ma?x1m,= R&je:s3{UCW'o%a8Hvʱc'; W9rOTERþEI 3}/{?_H7o0ٜ7B9 c'5,]On-7zh{c?%Gb[e+1҉|6$m  ډԾG[c4%;+}='~ =(>_#w$kǟx?_?W]{ ;Ҏ|_;x^n?U̾I=1Ƅ0ױO3ױ7YqӀ!®8\/иm%DT3n12*K0=2v:l௛7CIJm}/԰-t4ۏ?@ʢ}k +x:u8/W@CqD{c{G=v\ڛ.?^Am-5uW#~ݿ~rʛGmK+Hҟ֋s5\=%3owVG`"pUV?|OW/7DTG%~m_?ra7Zj1L\}=K2Q=#hP=Yʿ## P)NVMHFZU"#EF]+ͬ{H3Byo^5YVgX߈N|;zp?,wlDEq%L&p1te+`b/ΞFjAϠoyŸk$آ-S81 Πp xF /qq"/c"f}:] 5D/ܻ>sb%f//v*ELBֺY2o.%gbOt6*³ef$8}z&ppD-6_Kćǎo::{&;{MWII5l̲$Mlc7j,ذ`cϹTx4wޙ3\}J8o ֍8z R/?%RڵE@gσ]{o,_09 ۴C* CƠ_ "ׂpe1+ڷk Cj}{}w7;x "кv˸t%Id`cOOT5T$=9' s v􀃝57\RQdj+'xt[6Zf@Tw [zwf#Y\ #wNpn6(/L:7L۷EݚvHNi!a䘣a3w4O~()1oKٟ'9XEn"3>)Q8y= kqRZ嚈M;q U}|H j >6b2d;da1m])CШ߇pwǔǩ[pm;hѵN\fb[b.o^ `^6ݏ#F {O?ðfB\C+`O_`ChYDhLe!E@NXN7_K:pp؈qM#Y(z i9i4f4DfjD&,,&$L6IDAT.Jވ& ΰ6Q*5GCIF{p7eYKFDOuL b!@pG\NM##U5V H|((-Ŗ&ikcƍ߷,_.s='siEhgԩA+7 $] =;!=:={ EЍ\J2췬kWE*XX,=I#or߷Kڈ GI#s"SڤR_B>Ly{8DBFZ f,ݛaǯSpۊNj!6>tn]iPhaԗ+Hz8;Ͱ}4ibĪ-1wzLbm`ȌW1z|e&b=|D؞~^>2vT8zpťA$ݦQV_{PsV9ܻgjvwwOW>*#1\Aߠ]ơGѠ%ɏM?O-gI]8~bH,ZZ |(=FoեXLy{ _WEזG>^2a֬"ee%km{`셭ۖO5L3 #'uм9nߏ`"~ mGgH@#qz l~WThUӂR{:CM}ϠTQ+֓]V jň=(QJ%k\3+LC ^:9tFt@7ſƝױaA9*fQWD4jEן"1 1G"k\~1|Xp4zw; s!2U #)oːձM`GHdiO%n)#C__\&ɈMHs 7'cotW;w,XFt%Ηދl>ֵdb_7ҧ<#"Ջ1s|D$Ν?L) qzVRyڹێ/Ɩiُ~#u1tp~n/Wlo c +Ju(%:7"j]qooK&N66a#'w "o•{362ȩwRF:M AUHl] OMkH1Cj'nT4D}LLHB6$M)PrU1 )tCA a9'4MM``oTDjMlڸbo"$'DV=I"gy,aK'Db(MĈA}pn,0"US+3\ƍj~Ѵr$l֙,2w@Ώ1f$~ltL(%ī5JA}"C^?JOg\8tkR"Ki{ճ]a:N؂߷UC +Z]6 vm8NՒOT*#uxfEA@ 4#FN0oo`8jٚt f 7HR?0\sDo^1ui&{04aJp&KЙV1ٿbw=ܿzNq,z a0j\-70cV _H5L5ϔ xgL'&Tf59 ǡ'2k(&,\LW[#=rv5`mJ IGŲy0!bW/aMh@ ֮\;h뇞^dۧ&)iN{g~H7U @Hxgoޒ "%!k,3ȐZAbahf@W;q)*) n[Ӆ@de>r <2G}ɣIJMS Rqعвn|t ]mx&^#[Gnu:T(wSu8ԮMQ?;d5{U#-mLp9z Zk]My[ΨWm#~y HBk$.x{oT'{7 JO{uMjZ1gpqzo[aafH+Gv9|! M]w[ZvQ0tO@ݕ7ѩKO[}-`?~,-2q#mK}OӮ zwrGԍ =,}馘oЮ%%hf "$E g.}hL=8jubU h)Ѹ}&nݺ7¦v盩َ\o=.豓o'i- ڹ91_E+?z"]^PΜ$덡˷:٠p,B SRRRmcg:qFium&kGH(:={)b?l4#C3 ϟsSt18BX9noFc^PE"٢z4ƃ[HO//AdLLC[5z*  -*qN PLx˘|Ѻ eD( UI#ȔD2&| [ráÎAZ |{6P&4ydWFY뚘x "mגmmm%Iob#C4hРjӈ$%mvVͰD[;=2*HdL _p.1Ο_ YNom;~'5Cí蹲f%l*R 3W ؙ-$!T S給=/F8iȍ,:uWKSQ<λp ʏ,IxqPd$ \}AI2XBR`u ]Zp?S"/ 3!+^]7JLXhLAciWQW`󚒐5~~~iLM'!+4 O52hɐ]>&~+'*REFF߹H.VT4Iamb&uTG$JCNS@vDї\'g?|=!l|`F&H}Q8*'@UA@f*W&fj>Mˑ21ʄ6,C@Trlhqu.I"J茀EHOeiJqf_LŅ@@ (p/59?y.Ȕ}4eWDI[9WT)&411Se LblA$lz씗E@F #/]oڱOrܜ?RWEz?z9EWˑ,JREa7g%!1JRGq@@ Jc111۶m[='>>>8TEIENDB`docker-1.10.3/docs/installation/images/mac-password-prompt.png000066400000000000000000000742671267010174400244100ustar00rootroot00000000000000PNG  IHDRvJ iCCPICC ProfileHPSǿ{o: -: E @A:I]Wp-`YZ }Ae],`A]{v޼7̹'|<ae2RqD=.>%lD V\Tw'j} 'ĕp2P>vH,Cˤ BYM6 Oq'M9Q(@f|4N:dm\(=8)l.(Ȝf͓j&k|9O2)@"Jg񿕑. #tSAgUi!r&fw2SdȂ&L32ͲhifgHYQ,ΌI#yPy,`MsnJT4g bO$-2d&W"='3fHfzgzD'Dž|G^S.HRMs*;8|N|@H2 \b2A:: "C[.7S#Sto,!f;9M5vs& K} ut&f=.8b` j@ c`=pn `N:O:2l@8*p [O@70c! D !&CP%B|Hɠ*ʠP5 t݀:GP4L`= 3ao8|8 ΅p)\K-> G( 4F/$ ɈY %HR4!]B>cp*Ƹa0& Sc`bz0ØX VkuŲqX>v6[==Îp8sq-}:\3׋Zx+;> K=c;>'`O $QB?aL4%È\bq0xG#$wR)TJ%]%=%WPP0RpQX PXPpBBg*ْK^D+G ŌEIH)[)Ք˔OTEE"Wqbb7JD%S%o%JJ%Jn+ )͔}ʫ˕*w)PUTT2TU2W5SWRKET_*zzڧScR Վ QQ_^~^h,Z:m$틆OcF:^<:_ZZiZ;ic-h/ޯ}U{HGMMSsR.kBn^Hoe!}~~A+:ݛN/_ 1=3&3[M L晬41ylJ4e6m5h05d`6dSsyy= "bE%lhbYny rX꜅2K8bV5:ۺƺdžfjަl wn6;U`vMv-9(kαÛCGu{unqwߝ^k(<'O0(9;}í?3>}ȷJaewUDՕj꣺G5cu;Xk]{VWxxK/Nl9<XISg/_ǖ}-f{"=itzW<)?IxWO J/z2];',S럒$;O"i|&;6)>!SRu . ㄣ@IDATx} `Tյd$ȣAE|5b5Ղk ?"*">/oZRi^ˣl-EZ7 &$3d09kf?W={vߵks{Ca! /]^ԡκ@G˲5%Z7?ES:D}% -E٥vGG'w\{N@.Xi@Gfd4u>syoƏ/7"##HkܨS O_M?JթCkw, OUPi'ig]~}J.x_u\A@,t{}v__uWR3O].7moeZtmr5\GG~e}!C*\JJErZaN+vx!ufMy pouUW,| Pخ:\A@ht[\~]q"~q:}9-Wwh8N﫝mGmaS yԏ⭴Mp;rjWebY3{bvʣ 0|> STMT8 Y_L"U:\A@@픇cTU<_[2'{fϯb|˶--z[nsZG5cU«"*\6pŵP.XzPlY2i0 %ۮϕQ7|v?SG@ tԎyCKek=Mey~>NkջO_4 Z45Q]S*#̫UYW%ؖjJ}7y{˻|{>~!yZy {z  pdkl헭Օ}YLޟ U 2C~#7Vp̲-0#,9J/PZ upzE<,Ob%,X q^aTrZ76&GtޠI& t9nNJg߫~_e,[Nnl+ʠ01ntW\jkiFh"SpZ=|+CT6k}R,ȞN=ծ0A3n-5vSK_u;ɑ6V^,R>0;mZsJy:\Ѻga5u5pJ+#+g#ptpc8WE>ti8j}.L t$63*}@竖asF V uLJցT-jX-̻}c|tU T~;hͲYei2Yu᫃`"$PgPJl| ZJ|Al^խ v@mi@p\pY7r+@"}lTjϸ-m=LP\="73j:yǂ֡p-W]M0w&gZ_ U\yWgA=Rv-Lc zYAA؇<%WA@:̟2ṿ<;]߳<nM~{PgoY}8ӧ@]&]{z%j ##4 q>?Wƫ&P硃I/+پ[Fkt^ǙDž#Up߮4KV)m+`]m?& 0uUǔoϦ0+-]Jk30{[V  СFQAFN=)ޙqVm6ث+o[w$䭃.ۺksi}(:98KA{fTb,̶ Jo5ֽa3?YJ9[A `J  tUA@F@.`$ @WE@Ȯ>9[A `J  tUA@F@.`$ @WE@Ȯ>9[A `J  tUA@F@.`$ @WE@Ȯ>9[A `J  tUA@F@.`$ @WE@Ȯ>9[A ` „J*QU]ү*?BDEF @G!Ȯ e娬ADDKp $jTV1Vťs ._GYR A@ "}vq| Ba AkO:)֡֬Uhhp?W =b\FF`o")/E,u~*ʓ#?ލ؀Jm?>'Ƿd;.ѳ+.zrM@^񨮮E 3[ ]VkĪQ[Ø4 G0"@$=N=_\ӧQV{rr wٷ> Vl;.+!;gHy#{dWo=I]$-mВ'8i@nA]~G=X7x]vP2RA_J&<6xEU?B)Ǭ47\lo]8캟:s1ݺ!i47Po…Kpb)+*Û555:aDr4W4tDCszu-W]u%N(S7:a8PvQ.3&:1{tY88x(LU'e cJ2b5Ӑk#UUO{pvkzXރz=47Q"L"sK'sg;)78w5~k)0%Yfb,z۟RvIH?}`5jf@S#1m'3j d$ {\_=VJ^z97tJ&p;d,cpE*`"] 2>Sq ;ƞ.^LAPSgG ={ei'N: D^UUҲjѳG\9rPx5&ciYN=Qp^%άJshx4bQЗzx0?⒞?EwxP3']v!yUgz\_ïYxԿCe`ap8q*==7<%4ܨs+3W$<.4Z X,ٌgd{dN 5?Jq3v0")FF IųO"Ң]\}&h8gǫ0,J<YEw_1z+fG1KaO[fA%ce+ƌ5xgTדTWQ+"mRWs;Dјk݋k\Y ӆ3D1ja2UqƏ?Hk:筷蜻J(ĝ*GUoCVø=hDU6ސBn';viiS3( KWشl7T)p"#$BA=h3 c.`hT6-vwĉ\r5|JR 0|Y3HA qԧ84/],©}lnt?AEvN ׳gOUڲhQI I(ё7+݂Wwx~uWoإZ,+/y幉D+giCP85I$=E="5%Hsb|"-9\~wRdx L_c=ZxƶP_ACm 佉n=îQߪako')g˧NYX<Q/1.p2ċbՏ<89oY'0ÐT>6><ȱG mF^[v 0Š:ȘX﻽^2TJ@r %!l'111-Ɩ\ሥ- U4ZA[h,*4WN\-.F] hEzGGa̜ױ"6i22iFR|>nhRЯ¬w.XL9cf.:d95Xk~J;VܘY+{X07;m*OnhͶz/W)jwzsI[4s24, -vQn7*ӢL b)J0#{I3CpĎmƓ1cXU`+wWlܶFfNrp" +6o{6{> =9 UGem7>SM74T6͔[FOM ?&ߟ!w?+ |AZ ^ %ex/* ?['O[47nIc>HcaPwo%pzJ°C`=sXh97$v,LU#IMPP TS a4?P9/\,W ->t .^ .zGsvqcqYBOԅpV?-fyJ{bmUUZLs^Qe kdXyM@W2-ٻ-xN伿 og.R_%0k7y}_Z-XIJ{f. nÊmGľBt\2Dl|jP|jiXU@!+Ql; JE>߷SL[.Ė ^&/woa FѮ|IU4Ke wjvOez5(;AN[\4|m') !1F):8'l֝~uC8Ǐƈ!mZk?vHE8^C7(V:K.)c ^6G_h3hkF88m7](.h>t?ak3=PPO5{YU[x\ gVӰfhhD>j6W8P| G1cqL14l(k 0bl{ax5bCݻ{b1+6XG(B vvnp\>" bmM|P,ٺ0KFt _uLϱs":Ocs= v֘zzi=7NcO  c՘c ҫWT2s4H.,{v2>(=kp#g љCRW5KN>鬼馁24<,޾*/(r r?c-10'ccc[ 9d6'cY6ĺC=6Xd47EHp +יM+7R4o=y;7$kaV2vtdYz߾э9XpaV3{xzcvw ߒ=ȔL^,QA$vtLSᵷSƽJ]Xw:}QW`(nhӸP0V32j!<3N8)&Ok\] K לzkpRmXF<74-FgGwS4dR2t4J[2T2[@[Λ7C9fdn Чz:btʐﷱ"l^@l Rݏ(2/tꠡe:=l]28al RQڌeXI:MƦԓ[AޕIl]zZknuTɧTYy; SV)8!5(>attߦ _~kɨ$.(k8C(W=BG^~8jK-ي/o@\}Xv ־ fN(_àd+Z׮;Hʈ'F<UteTF<]9_f=Ufb9 Hˑ}ܽʵ6ФxX,kLgeu|MNX.Yl]u6a5`#TX9&\kIg+5H qcjʬk#s#Q߼8d^Jl5{WhAO_7YO6Eق˥dh&"jhV=: ~y߆b=s]|8[*YXwi2:ZP_Y-@!C);qeGRDFG[|wSIG,2CLP(*9C#F'ұbtm/h@ 4Y$ϟԌ5{%΁WM2.r^A _(IP͍dXB?3Z hXkF2& sZ٭dF 6vbr`CSo1z09Kgc>qTCw @سsn~aE(@=;:2,GPBxqgvø}7}3uoмZV9 s9U&_K~gWyzv%P[Yq>a` mK`FG0ꦥ-d920lI)ML`َB̠jhhTq~xA=8Yp]@#teNv5x$*PύCDb?:a@mILz<-mS-R" w܊qeC'L BvLllDZE9hR6\kzqx"7YESQP-vF]MO S{4j-xA@N!;&pt\t&i0&J9XBbErDx&?ަJG*L29Zp$bZ9``#.zhl(bLL\ݯ7P$ǧDꍖJޔ OMnA@ .SW H<0䞞@Zb:4a; ?|̘i)?y5yW?zvٽ,¹sPZR.O6W ޚ>w^a)\> ЪMT8y|J*d(C)DASzv5I)ȩ3kiNzXt|9ʩy\Wm8Q_҅2sSJ H}}}@T4cg4fҍ{nnZs:y{l7 2Dc92H @FCa0W`c򹘊xhQ/6[~䔎l9 qV^ U(Ɍ}t}PC{zPBSgJC''f DG#\K˄x&-@0!)dVc!q2ÕDvdFl6䮣=vte7"6Յ _SE٩FG%iW]ᨠ` ˣ<<"""ՐNQaX?yƞ  t &PPZɶn1!yPh=2D2FW>HxҲS^{{igV}I Rz*iKg 3{s'+FKJIN%ʩgWA WbcŲwxEIA@#9dǽ7P'ƤGe @4ռMXaG7UTE 1N?=S#yVA C=t 2G<_7=ӊ c!WA@N!;&*^YKQiXPc2(]tzu4z*ɮ6pƁH";+J.bynܣcr8-G(z / /%=)Y(<$L eRIA@ !;@ cܜq&y%\9N$g8"#W߷s1WsUM0:*L%sc8{,Ⱥu98ZI Ox(hT^AN{舰:GZ5J{Ps.XTL" t 1}I޿&ԪL 37138Obj820iN6nfŋ QG<BLd`>ʋWb*uUp^[cyo_yufŤ( T&+00㕗rYnZRLU8 ' t 1s0ɚ'2\C6 EzQ˴D{XLhpv+^)bO5Xq3oIpz踱'SO%A@';&%vܫb&* 0bB6's6ϩ.!x;&ݜ(5'8IY86wO4F&|?~;}{Gq)-4QD쭉   Nv\Jn δ/TJ)vG7|(' ѯoo:DzNDPLj?QH<\,m(CqtODžuN6˦O*]^RdЧC-IDAT8#@"d=;w ZD6:ګN< ܝ|qSZdy視HwֱerWͩI'|s#gwG?ϜUhsuS+km;ի7YJ(C( [  =!N⌎">߻Γ*HDE[x`Г޿â5Dxť u%}1rW"„c<āS@h5<@A@N!;"'`_h'/{:oy*(0e7WىK[cqo.< m.X9"PA  '" 9BvmA@6퉈> m]C*A@ !~"X>[ ]^bt,|sW*tN +Nxs[z_|Jǫo(x$= @IXrfc H}‰gd\{@G+M/Bv+I^k2$VtA@v[qKcxeqW6@SY0w-oxvmӨ{/Vd9=)كMO=(n2$4ɣfk\/_\}#QdܶYٛon '+fckOwmLv3"\"n lj#4̶lƽx{ޏ_7pFɒ9~Ql6o'A d[Vdç_li?w5)<1RbR&~Ӗ,?=*Z~/PDWY)ӱngQLdq^փ>`}zf ,O7 0%7lyeӳտ.?mFw+Ԫf#);ciq N{_W~ol\T: xco=sn1XIg0 M]{cDb(W?U&>(jdz./jB<~JǯblGEŜş~ Vz#ҍ7NcUA3_ ^_*fs^O :Ejf/gbX,بۢ+BvA0Secڢ]Xy>ڱSo~0w1nׯz ;mƒQ+1y߹KW3eb0k|_#,o%fڭ۰@\,6Hw2چ]rl|N*KBfymZ6Y aҴ݁K_Әnp#?ײ6x9fcK aяuNٖvcM4Kho9ܛϮ#~7)?9wdǯn[]ELGاOM=<=FD$O[U33ph>]C|8݋aKϒc|ަx ypslߍ?d~~kɩ xbL$~>lm-~:JCqWmXy?ސѣl`E&i4KlhՏ-wbݥxȘx-z?]Kr>p5㒐4a2O>-1o}ܔ>=S(χ8etɟ,#Y0#B 3 cR uhB70,t:$JB#  k&sWM݆鷑2rvn&\a(" DQ} O{Hj'*rz_fe㍟߇뮾.y k'o2~Eyv'~9?ziE"VQV?SQK]'"lu3X_r99 y+6'Ԧ〹=זsoA -LAL1zgI`M QʄX31qSCd!Keu8F=&b Qo*Ņ_FegAB؍6uK= 8baݮ1_83vp Z^LkIB~ |QA:r7GWnw̾ŕiɄ>uUj|%8MC_iCMX+uD; | aאM8ek BϹ  *Tʸ{bgipv!+>|=n9UZfOBQ{0,\ 'yK{K0*E5N? ˩$gw>97[- psGÔ1f-I! ^*L[CSɿCq[< ,w2տDrpE$ٌ;,B=q+D> }G=`B@1;i@,~s' iX03gЬFo⋿<7C E@L*ww-0X%"|s)&N=jsSR'ބ9F_Yju=ӹ)מI6w&֮F>zRJ\,9ȝ._9| 4fT MFCzO+M$w<RGbA4h]Gb,#> oBE3CҌ'J|>2DWsqAlYCKuvqݍgǼgi$E# i?(ˏK=5jsL%waƘxu?,,Z(rNeqR,z'0uZ59M.E+/y0ԳKsW-6[$OL LB͍NG& *#47i|"`TcurWў72ױIi?d2T0fPH0?d$Kf\K>Ư=]- Ȝ]P<QBA=kOtE @P dAA@hO]- =;"(%|q! CY.ҿ  nջWhBv_קkUn ߵm//%n“_w)k=:q Fˍ>(0.]QQQP(J @ Я_?%09uT"}*AGVt};8)K: '% ٵ9+V}BCCK:a)]"2g5ѹv0UVV*d`A@.Y.GvQBFIcؿ»TG]! <[aMysrrrpq5i-\iJV[ߴ~0kߍض.A@>FNv6FL:&At"L;[&`Ҭ>!lye$ -@@zv-D?Ф8~&_ -.Z^[.7QZ˦rЪJ8JTAM]Jಉ8SOOazBO\AOWn-bPՄ=-@mK8Hy6 -ڹ) si =!Y[V}%##kaw!VLu۷ӕ[b7_ yh+D{L\XśrA}k\[!(qd7y sd=v| WCւx≠ʾWߢ{N;^II? 6/@Cx7 =q9MR-@5ݖ *uɓw>VM?K1~zڵ c-,ܱzpDsվPA@@ǡڨ8$y pUdSaM pބk7coBCť(). =E&<r 6 3$1T \T͟zD΢mHX^AdtH9~xV&V|ݟo{7Qb/m0ߦc%ܦ$cܘhp%0!Q>4wsL+w078c)WpBs!sv:X 3Bv psWyPҷ㕌@uI)X=7[7?dwMq^zqK7э }Bn_ Vwsug#mٝ0zeshXn; #71L#~X i-IXn8}#T*~,NOxYb"A.@^}I^ԇO!fdN HΘ)I˱t vd~'=Ddž1`22?6mR_׸w0j XHv  ۆ^oVgc3SN~2mSw *ֺo/6{"48"{c5f:\A@hkBh8̰֒E^;!V\XMih[bcm}Mgxسk3I!\ =;m5L7R{vҳk9vAf΋Lz[6߻7۽+qqq஻ ՂfWī̦3)Tl*  ٵ9+{iLp<4pTUUW^gWa:tcfj-  оٵ/*][&烞촅r3{r} X$'%TNظ.::111*-  ZZȣ L_ǤヱJ"jQRiF=E'A@h?h 'cXmb`B^&> fջ&ȑAhv@zvH~0/lʪw/$LTϮA@ G@~.A@Z]1 A]? QOAٵC  A@Z]1 A]? QOAٵC  A@Z]1 A]? QOAٵC  A@Z]1 A]? QOAٵC  A@Z]1 A]? QOAٵC  A@Z]1 A]? QOAٵC  A@Z]1 A]? QOAٵC  A@Z"D 4@]]QZZrTUU,d\hh("""X#$$u{6;OLκz jԧ7 mq:uAZwQK$`r;sjѳgK^-QXXЧOE~͑u( CTxú/howm*-$UadBb ܣ^ѣ/*LOTOW^͒B.l!D8f?:X uG* (++5_رch.)]4P~ Gk0`IsMuVBzvm "PQQ޽{70k6KC10:7͟kv]Ps.#kc'dƀ8A/~exhͳ1EuXn.ܘ4Yk79Pv㉵a^ uX7@J r1JQƎC1˸vWVRcs׸MY=s#>AB& n}>uLĐLrU@lh* mJVL r~|_FZaj8O_w 14?{ń[kܸ>fg-pmc3@.i 5n>jڵ?؟KeJp_i(BI {cX=C<ò?_k+No9/9Ң]U}@0*JMoZnQ1lB\v%1*q2iƒi ϧ3 e`J\Z9ҡ^\W"=0jdg㮡qFݷޚ5! "9i^:0ZO|(ăZIgz`" '@.߹1~vVY IʩG{NjuVF]GhM/,ec` ׁz9ЋĄy5&X v$& 1Kڵgq{c nr1=8'28aijU('d gR\}B†n̿Y~h&~fJBvc=sJ=Q"K{IqR!YD'-o$è%fr*5UewY_'wV:&gF$%GLh$m>cd'zr)t\\c a Gzp/O9ǂ|.2$)ִC!ˇ:@gH0/߷yKhHAYzLsO-.?jk9~%eϴUʎe0F^=C"=#ӣsce/ZusbN0L֨t V}bfJhx]oj56y-bI5ҢdiDU" bũ )@g竐-<_Q[kWc1{gVN91Y莧T#-"cʜnR2OhFG߿7a?wkc0@qI%(=x]OZ?̍iK|#m)~iυ HhcVsiSU{Еnt|?H/r.ū ;/%ȥyNnD2m;zܮIJrܹ%O/ǐwYt+wypz oKZЎ͌y> mم Vxam,E@:ُӪEz||e(z '08`:<7Qe[~ ,/̝Ȏ,jvd)sPPR}Lty,tFD\6Fv?K>ڊJ~8[]6+𖬳k'H!f;@c9~kpY0$,)9C#zB~ ʱxrJa=.wa2Cc|^#|cF8n{en$!I3dzrLd/u6o0Ӎ<^nu.|B5frTqK; {L&V,7D?j:0u`YlMME=H#!v+WS1n]NY1 kQyQ3`T,Z4/W3xGY+P1a ƍx:nje2fb Y&POdSPi3I[HZlt{1MOLoh6",Ώ$Ԭ~)pǒ؄Lmjǒ\Odpۉ!T^mk%naچ/N-~r*D,1²m?Q9w<Lj72r#-mFDwէřg'ߦb! 'UJp\2)Iz~t!ZJ3sE.҈>E DzM~c!E!UbNx BSqlAΑ%H#:tC)))EwRE%%%"5Э[6p]?v`gE<\|vŷT*H&bzAsET"SBkii)C~o΅T<lMNN6= C؏5|'Ux_Y0cyWF.F.j/v! 'lOWrsxFI]˓-ɍPo47L c6W"|=yJ("xxa#VE@ry"(#d>APE P )"]E@P(G@./(">JvcE@P%(@j"(@(مjPE@ry"(#d>APE P )"]E@P(G@./(">NըE@PE :sn޼yctV)"(# %lD V\Tw'j} 'ĕp2P>vH,Cˤ BYM6 Oq'M9Q(@f|4N:dm\(=8)l.(Ȝf͓j&k|9O2)@"Jg񿕑. #tSAgUi!r&fw2SdȂ&L32ͲhifgHYQ,ΌI#yPy,`MsnJT4g bO$-2d&W"='3fHfzgzD'Dž|G^S.HRMs*;8|N|@H2 \b2A:: "C[.7S#Sto,!f;9M5vs& K} ut&f=.8b` j@ c`=pn `N:O:2l@8*p [O@70c! D !&CP%B|Hɠ*ʠP5 t݀:GP4L`= 3ao8|8 ΅p)\K-> G( 4F/$ ɈY %HR4!]B>cp*Ƹa0& Sc`bz0ØX VkuŲqX>v6[==Îp8sq-}:\3׋Zx+;> K=c;>'`O $QB?aL4%È\bq0xG#$wR)TJ%]%=%WPP0RpQX PXPpBBg*ْK^D+G ŌEIH)[)Ք˔OTEE"Wqbb7JD%S%o%JJ%Jn+ )͔}ʫ˕*w)PUTT2TU2W5SWRKET_*zzڧScR Վ QQ_^~^h,Z:m$틆OcF:^<:_ZZiZ;ic-h/ޯ}U{HGMMSsR.kBn^Hoe!}~~A+:ݛN/_ 1=3&3[M L晬41ylJ4e6m5h05d`6dSsyy= "bE%lhbYny rX꜅2K8bV5:ۺƺdžfjަl wn6;U`vMv-9(kαÛCGu{unqwߝ^k(<'O0(9;}í?3>}ȷJaewUDՕj꣺G5cu;Xk]{VWxxK/Nl9<XISg/_ǖ}-f{"=itzW<)?IxWO J/z2];',S럒$;O"i|&;6)>!SRu . ㄣ@IDATx `wIReGʢJ ^E*xł, AY^@d Z7@A@AʾuIڦL6mi}GK'slϙL& Pmbԩ#88(uv.(@ P(PB|ԩtEguL*U?bj(@ P(@) &`0ZhgϞG֭_F% P(@ P( yԎU:y>5ڠAC# v%>_?QLhzE$J^^lX>#>ysjVQi^d;7Kl޾[:KBHڨn*zu2G|2_.نGv-v/3ѲnXNQg?_~ѓ)dzǼ9x]K>?܏vTU ptfg8&gaU9w 1JhLIpRg>NT,.H9fѾI}6ř4o(@ff&2,8!yl:s"i2R긋DZeQtt}\@l3> &>jfI}½M!mxľ=V]^a۞T)&!v\M'/k܆r3-⮕@0Ѿmg.__<ʘHJ* _'dpftxGj\aU5q+u[l6ko䥎\/ A(`_>(ʓrewD+v'zhtƍ1V=7cw_&Z jmv{5`t~b&::D"`QF\c Q-@QLWYuj&DftS8c/G@ I!WX˳O|BlΛ$e-&Q86|/?GоS37?H56^^Wy۰wbQmxرcP8gvk#h<|K'tlirb_^5gCh9s&_=\͜ ,Eڤny%0񲧕%^fdCFVD TRłԳ}z[s@|U<Ϛ4iz-;ݸf-PKf;-9P|-sGp]Sx/Q{;x=]\K"뉁~ԯLy ɟxay6۹$>9 zrdu_/@D M:FMkw$Ne~Iq{Z,!=d5!:-l%Gt%l]%>77qp]DEOOc3L+>٫߭CioQ#fMX۵lZ p#qk[Xlgq | ֞!ZD,r lpx`,qy9DNJ,10:cǫvHi&qޜ2C?bۋ`Nv:mϭfhr,ܻMsVN v],-fM śEo 7'Arfý!~[OHh5Գ} -[Gn֡mcu.Csh|?!nOcQ'蒎[=:*V܅g'^ xcBl7ƌnk Q,}5[w.Poo-`v#-5nzѳH9hG$<:QaV=Lp Ͼ5 !3 =Km-$nh+Z,[.>Կ-b~xv|s؈>)vE6w`1 /x2|飵)CgFIѳH?f,u}&linC۷`| v\>][L|o55 ='xn-cD/ΟPű7^E cdC5㧐ۭ_\5G-E,tK펯ॎfer> tW$_x 0o[iB^B*ճK}W8Fᕗ^Cꆬܷj&$.t A-.ɗ %cF@~,. u4E .8)&vh4 1VKڍB>#1{%tA%tt,j j9~eSB}+iK,9i1u6YVoҵ3+NX/x]'w;12}k~t&U\SBPN5־Ubw#)bb"'ڦnn=ћND:uB_X.ciV06v@]T%.&gYkH wFMn9#Nҿ)!6Ts.HATQtnصGA^Yim=.wY8 lo2Rc&];C 2j 1s\DAp߸qFߨ/ܻG,Ot] lO Afbo=hq {4h닟5ҹV%qQA7v~Q+Q49ĝRdU]c9V-҇PoY|&n݇*R' Q.6fw3:fD.f|u3qr$on;?yەum2ү}hB[BŬKfeEQw#֪X9]|U@5G=^ra윕CNޑuCTZN~/yE{=-sfdcu# Fr]~YK& ENF P@3h"ujnZ c}YwiO%j?^k գz~vF򴟼Q.oRSӠ} SD{L['h&7} *O.Q ȓ⎨|S683TnAg۾om7sU#fě`Y.8mBl ~SEN6k|6#c-pS!^<ǦOwf,_/eD4oW7F#\ws_l0b.XŻN:"0qwM91h ,- gΦ{bVüADۍ-\r.o7,sbeU`r GYKRG~z'L3}~;D[ vcfK8i}b ?fnk%9IH܊̞M9dTCHۏSQ?w,ey6ߕ|ןCF0﫪x|h&?8ĉq~JLv\ۆ6~ĄwaU }uMWٿm e^-u97Ya!m3R5RGz9(mi'۔m`\6c_"sgf-_vެ}R]$bbg|=65T;+scxH~ٗ߁=_Ok$o$lNzVE P4u8g7LNhwk>PdII8H̞}WZ5Хo4~*67ԍB2R/_NCK\w)[V8mq^Iy=73~1?E݉gF嬦Q s\%V*mѫROuؽA dv߲6kTEo1[?u1ŸFV;"= ֢327x8c}OLU\tU]{S .`Oa-Q]OŊ-Ü_69ە*.>f^kDDD(8%)gYl?WAh~. j g\*k B"n#ݶysr3huW;_K0}pqemƬ5rߒ]c**r9j1~*u ɉHx=2B\+r?=6"\|Y {1N\hz1js| G54!1Q2i0E^ͬk9(@}/qDP}}5jG!\erl&p}TBw1Df>8vt9jK[!\err ;{.Y+RXtslO߫ݭQD.F9svufK //3d M *5C~B MO{tB#9zZDcpםdbNL,ͯGa9WgaDNs\ ͗mE}xPVq.JVG`~h(.Dn FOk+sUXGmsOsl xu*cc/Š9@Zѡ]nM[DG#n/*@ݖOяaY.xzįz1e̍xad_|<{^Scbi~}_е2|4gm_f%d\O}?:]֕]q=f㿯;l޵E+9god-nkÔi*+Cɮ/`> gu;R3AB#׎t<>rhDsqGol֜yIausCq1e60oA\-ro]}Ͻ6 e%Uqd Pʞ={°m+v ?j&p3]/+RF9i~S\5Z{dֈR[/{SMr:k^UcݘV^ \~ %œ/r-B\Z;ZjC &{h9Ba^JN? G_~/; c~ljB7;ҭlCdSl;D ^]-[ F=|GMe}{״]CYq`>@|NEi4e݅ F TjdAŽ_lj#?-q4G P #ڵZa(Pa8 7v՜/f )@ P@_1@@ P(@ PxrF6(@ P@`V)(@ P*@e*B;F P(@ PfjM^=e)@ P(@ oh_>6mddd %%qpp21 ߧ"*(@ P(@ TɄ 2|,H~m }~~2hPU_Tiv(@ P(A+CR(@ PY`Xff6S&g\?rm… ^ (@ P(@ gʔS(@ PAgϞHKKCzz@ȟ\ Z.>(@ P(y#׺{mAs:(@ P(@Xz5}ͽ:h\(@ P(PwqVh-׬Y6^☋(@ P(@ gΞ5S(@ PD8Vf7)@ P(@-'h7INNbh9/As:(@ P(@b l6 ]wku( M_V--)@ P(@ `K^'##CIIIfMwu]](@ P) ڝ]f& U߮u #b3;(@ P(@wDDDh3erFOkUϨ6(@ P@0@+tVI P(@ P4O*F P(@ P :(@ P(I'n(@ P(P UR(@ P$͓ Q(@ P(he*)@ P(@ x`I(@ P(@ 2@g(@ P< 0@m(@ P@ZJ P(@ PyR6 P(@ Pe Y%(@ P(@O _EQO4 Πesp$^N_; &K޵W18(#vR(@+}yU4.F(ecSR5*akՅe)@ P*/qL}9>sMnWUq[hK^F }9xW6RU>WR(@ P /qGn-D{Y$qrDNZX;CB|衐[_ F P*/qcWy[ޥ+0;6ӑS%]IMETdTRv(@ w^XG(@ P*/qPR(@ P,K=p+(@ P(@R`V䬐(@ Pgh](@ P@ 0@+urVH P(@ P4.zk!+ i= ;Vjᦡ[U|zi r۠O/ͭR(@ P\ 0@+WQ>ӹUA'oԲWܚ[U|zi r۠O/ͭR(@ P\ {pmczEyoH;`P{U%nΜMm^p{QQtosE]oԜw':i{rv(@ P=h~E\& LׯaȊHydK޵.uZ]f/iނ3RQʼp l/iJy1(@ P@0@+rXkU4.F(ecSR57akՅGDZuׇܣڞl[<_'cn(@ P( ^XF`{5E]Uef^¢k,=&{I52vU24m70t{hPv)g*X(@ o\|m)#6h3M~~>#zNziz˧VeW1z ]e֮6g`q(@ P(NK1K].c摖㷮LtP/MzJL*HZU̞=#:iv P(@ ZYn P(@ P( ;! P(@ P 0@#vS(@ PnH! P(@ P 0@#vS(@ PnH! P(@ P 0@#C^Wvzvxí?zin[˧ z܊ȷO/-_An饹U P(@ ,gsNAe?^Ҳw򰢗O/CQٛeaE/^7K.+(@ P(xeGW3 Ei0hP[&xI> ν,ӿZoԜCû5 P(@?{P?( a0d?y,)@ PB8zqlϻ\S*.sty &%m@^*g\_nϞ2 Ҷ\B/iw} S(@w\]Bwu{^zxfgDOpvA/MzX?z ]e䮒3xL q(@ P(NK1eQFØy%Ƈ+8Kk^>4'״ >G={#>*(@ PR3heϺ)@ P(@N83hp`(@ P(@*M P(@ \v .!e(@ P(@*M P(@ \v JeHDe0qH/2i´G(@ P@xz,a´Ł#Mo¨Aw ٺ5AtEHͫ, EVUh_04Ӥn,y<Jvcdb(@ P @nt?:,x%dH23I*2B|I4ƈוs)q&'.(@ PZ3h%0|F¡0zeeAmU>dZvwqNͲPi)-3z[EDq= )5gPDq٭oi{r(@ P 2X 6Ϸƻ6 sdXOOT~ JdZv83n}1bd[e쬠5%qS "DȵZ0{)uy"W|@ P(P%՚b-N!̛#0fbNw{ʒ$s25n{&'0/8+A\M P)p|JsDDE~<'䢪9缓4OZ>oAŒGWMKcp3b8u2UxﺴzRlB|k‚ٯA9\ږk^%O9VbĈ)@ P 8P~,EUX}z|5quriWWےgNVW)@ P$K tǶuO\+PA⮊{7*g8@GJ P@%%`PW̞MbpVFpd1l;cXƘumrP!QH;}JE:>T78+e(@ PT8V*̬ wũUD$RsJ ݉i{r(@ P@ =Ie׺?v*ZU_\x]1&9{D}k_L|k٢$rjοV:["b1 dc--;[xۂ?KC\6!Pf4|As|m^}Y}/zzf90)ݞ7@)kL[yKܵd鹔1u|L P /P/Z$&UkxDbYK6a[|Ib`+ Y4emѫ2K%gg*nz%9\[}=!na5a`4(;Kr =GuFq#rnzkϧ)KIm(@ Pp4@ˏC\~[Eliy娾t~/k>}/v}z._˛ud|(¼>?NmKY!ˢ@acۺ'HK[_ٳ}(@ P '@7 OQJ[v [{JVVC]acaL׉(@ P< TϠgV(@ PA#~S(@ P/q(@ P*~/Ķ ILx?ɖWNojU>ZWZ&/W޳(@ P`ϡ 7k)֟喃zDT}sd weWt/mM3CUĴٜ=>S(p 0@N~Byp 6Xsyk@/ f3CoiceبJZ/3a̚(@ P`VC;+2 W*c4|>an>~)!}`4)ioYy8t!]"'-ݏ(@ PQ "6ܷb,~wư>1?MV1!#OvuINuyژEYbk`׶ؓ; -}:&vgsܑ(@ T%՚E忥cמּ0-8-ٵA"mR[G<4-11H< P(P"k"""(&UƤd0@m(@ P|(NϠl(@ P-ߢ,(@ P |c6 P(@ P`o" se~JbԖ_Y9aĕbʺ!(@ P 0@+% GǢ<7\ot~wyY"(@ P\<(\ }wbz^L}/O9S[ԥrR̄Q*}wG͠(@ P(? X̌[b?+0bd[!aS[ʐUGb@xi-Q(@ \f À^\Rқ5[)Om)k ??f=8i9{'RC P.[hdhNYfhhhQ}Jl2}Oh]ڡUKߧ<ܺj!sVCPFv<ŊvFG!$ TU܇=ۡGSEB[t酸)qCnTro1brl[*I|RQx-2scG(@xТnͪV3_ Z߄ ^a/>FO8>o͗_3[9.yЀQn?ʩ> Q7 93ny+7!H|6/r9e]ֱy*ܸsg}`>|1F+: ߊ7<~l΃Ay[qQ{I%Ҫ8etOnsUVaAˠ2UyYn^}/mKm82~z-2 ˢ+}yWO-"k؏ܢ\t|!(\ɍ (@ P\ o~}H\BxvN[h=mUޅ4#D>o.l_|D1c^2wgXvnwW]Qm]1&e`t_M{Î.wRno6k0%h%]2#,Bnbj4qAiˬ/R[Zqp]ZjUEX7嫴!ٚC xɇPKR"i2|Jrz_ۋ(_ũޏ3㗚Qݒ(>E=v S>8iG{@G"! JUoV[({BYxV}AV^)@ PO WpjޢWo}^]wC>3~K.0>5:t/46c"`ʴ5ւ/qK{%LQ]Β=øцLgXYϘ(OȚ zLJ,yA]\EtKڹҼAY$XL wC/p4M_td?ޚzxc!(}`n-hw鐘sqԖH̟$533}\i >+G*e:Ko[Y )nuD$lXismhz}kՏ6$q5zJ왹屑aGL\94QE/[VeYnǧt^EȒM@K,GbgNcfȷ5.9sZ /c+{ABg/^ͲW+{c2lXcG?eͨج4>/zwv_vsWclQȻo>&naA끷JA┩^Iuc[O^n~|2S•:2=õהޝ*I;xqΞM~1u̺9IWaM P('4W]+~]ѷ GQPs v3fXdm b?nT]bB Lgj+Uz\5}[O q[uEV7<6^^(dg1;H}QX>?6{&2'nmeƎa?g̹t+pIDbc;ߧT&j7!ڕǝmw7O! {56.NB!ftOg/Ķ ILx?ɖWNorz1T<?5bf0f''~}2OĴ+>mA[%4빣|SV\iuLvr,/ǺNjz[Ֆ7mG쪡r|%Pe6Vq *tNQx(R7|B;gU]W~\f.*t]2g\zÈ.r' 9jQl쭻uQp1̶2q#ÿ'bm>, вB PVVĺ96抍'l]n9 R6qh{ 7'B?k7HA2e}Qm hq4,.\_ak81$/ڴ- ޾ߓ!Q߈nNz=jׯ]ԵA|Gk2o:3clŬv\ܫn} ;[)MEl@59`O^tgvl۟x/ӭg .Hcϋst h*=M ~ͼ:/ Udz"w|oz~41w۴ ܛty?^ezz_AczMkӠxm9k/mx棙448d@z'-܌̂\<}kc[X.7Ldo N?7hL]>:er򄄝OS9p(P-Scm5e-zĄoiPMoIعW䫁;:/S}g`9EGD(>ֵdnvyj yc֭Uw ־ }JLVC0#WO xw+˃ dU ~Pw#*+`h./^|6_N[EH4Xw(@ @3ht)vϞSfi r|3WmuuÕiOk:KiꗦXdoE-RRCgfI:ytS{qǚbEnv@沴2S_9R:zbAQ* ƺC֏iqY[cZu8˽pJ%~$ ԖD\3C!dٷ[kN7ʘ$KMt \L9g=t{~u!o)R,e]\Yr1Df Y]5͜R[a "bOUꍃv3ԠYA|3[]Ut"߆]dgoYߑYt/>6>LU<}t?q|9gNy;\ uϸw!G:˰f?^rJw_r,D'Ve{\-1nb7G7/M=~s K:Nj(}0Yyc]Z8f+Ȏ6//{͒bv5 9v ,*m`_W,O׽ɶWims<{veAfJ6 P@yP;P)+v]k"al:Rz*Y]R^{cHlOxa$~Y!)'Ejfϐy/6g;瞎M)Dz$"C 4W3'0VE_Jz<*,CsEYO$>;_?mP@/@;u(YQN Pw(ϦMd\TUELzdP?=zާ\}5n[;kcF $릙[ԔL+cY3Et P(@K8Z8Uן?GP`p ǨQ-w^L%gRu)@ P.r]aEس{ O V~(@ P[ 廑l(@ P(@ 22H P(@ ThbH P(@ P20@ >R(@ PBZ&6(@ P  *((@ P@`V!(@ P*0#(@ P(P!Uab#)@ P(@ 22H P(@ ThbH P(@ P20@ >R(@ PBZ&6(@ P G5yoiN P(@ P0A+(@ P(P ^(L~>%6Ÿ-(^1U竊ߪ*r@ P(@ P/ql}wWȗ?_S~\53ӏsi[]n}ߚo㽱MΕs1ӵi{c*(@ P(@<4 ?֟UfJ<_~Xا-嘱2AAqJm>_ZΘ w7j[׍57o0iJXCAe2(@ P@I xA5fˆ~ 7 |Ick/H̟7={#W3SOz,m6g3G0W[I{MOTCY}jP.aI4gnYaw8S㖿=` Rxgիɕ(@ P(@ V`iz}1$6keN5Rp˚ÞWz̶uwr`av3B5mwv-p&L2; }Ml+TXmtM2cejg(@ P uM&<ۻغd?0 !H$:RQSqHDB@@h"S7⯑i\#84 6,ܘ:)뺮y[[{Jn>/zϽǽlwhvy|7Θt!±KoL*+JNH꧟]L93̾M_CYJ/I_Gw_}~dVoEi*S[( @@}}}4B48e"@r􇰅Ybs5,7!}*yO c44l&mDUE @pT*cxv]]]^zCۓa\[/L~vbR"@qtIc!F?Y8$ M!4͢ 8 @ػw3 /~%ZIENDB`docker-1.10.3/docs/installation/images/mac-welcome-page.png000066400000000000000000004574461267010174400236000ustar00rootroot00000000000000PNG  IHDRl iCCPICC ProfileHWXS[R -)7Az;FHC ؑEׂ@".lIN9gsfe Y\a3>!I'l9#(oDn.vU96HɜvGa.Nכ+w+ !Ad1Nb 1NbKMt7>,09q|f;Ƒ@lo؍@ 񤬬S!6N!NLbci.!ryg9deЅ&  'cvCH ?9,b%8{1& `xÚ/ e2bd΄MxIxJ"<&$t`8ʅ93O$QFbf lpC]!gx07;hZ<ߏ9LFY$95gϖ 0v;]ZFNbMe'06[[F%r7lOМ9;aM:QCZZzSSW-CmZCu\T}*3'LdO,xh= TT#RcNCZ͚5ZZZNhkӵݴyOj`0=rf;sPGC'@GCCgXH7F@^EQ/Eo^ޠ~:{2i |0423\nhH((ߨ18۸ $dI)jjgfZiz 57m5D4?zmsyyy"ĢdɉM>7e.VJVAVVVoM֕7lh6~6ml^ۚrmlB۵}wwwHrpQ1qy'bOι·v1wp|]Sz]u]Y;\ݘnInݺuY=<8=yx{|e%:{wS/կok! 8`]@@v`m`C `jpTpEaHs(>AA?1aTԈSFZE.<E7}W11X鱵|J'/KhJ$%&N;m㴾vӋߚa4c 3gf<>K~k$BR\ޤ/pV5k(90yK ۛrKR\SJSOOsO+Ky*xҷϨɌˬ"g%e+3fϝ%0 7f s9MsYd,EԓWqNs^g:og~-YtABυ;!-[\o=K)K3^),(-x,nYsf_++^| |oEJW~+_,,)+V::eu5Ukkkos_T4w} mBmٶMMM!M7"fWe-+|zʣ6m%>omGCauNμOw:owZï֡v5uh}}70?Q_r|{j;x#[ҏ7 kkvi>5-:-U9A9QxbdɡVASm?}}j{Ǚ3=}[.8_8vb%K .bwh}GUMN]SN\sv7o\vV̭;ùn _òG4۾xOQ{_>y)i3gϭw@_mye_{-|=f[5lߵ E =z~CG{>9~:99_H_ʿ|mHȈ%dIlhJ oj%Q/ ;;D f !RT=jc3F%'Z o1##o5 5U822ud.H.{X]|W_˧kxQv_@IDATx]U^f]vPFQ 쟁݊(v'(" ݝvwwXܿrN9M{<@~W_}111&r11g=Ѻ7NwM@+/L'_?橗aMeytgk2髣a ҙS!P(5e5u_{g5߆z)r)Ϙ.tIr4Or4<3cDTK *) B࿅VOjxŒ2;[UF=NKky>QxaN<݆]qSL `" ye)< E%e)Ҙ&V+/@M |WEEDZQYP~50oBZs^ 7@Q &o7!X9Qhj&c` mHAMGeZ]pqJLCiy)hErxʂʸ9['V|+ECsUx^<|z^@pTd’ `OA .2|8JL)* &o2ؾm;bIژ&8MSnjo?:f ji)83:wG^1v܋m; "$Zo}Lhv=H.P83?iFCN杲|}w@xP$lAEy nنJ3n#98?ʳn&hޤ?D--ŮЦcG,g5iȿU>UZ&;} &;yBA8y4Oѧ4_ljK-0^N޺{1jtzޝH/RY' y $,*ɉ)R`F*9:ZfV"R1]L 3S<!HS&A=<͞9xtrw+Z%5t-'`i_Y-VUioq"|61NhJsQ'gDE#1**PT\g&0EFH{ %g#F76lR`(Ŭ>ѼY=~>qAq#‡IZLN,;x\4< ԌkΝɽ'߁ףe; yoqӘFb|! #vbݓ$"mÙ=p鈋ȩ=Æ )_og~;7Ax(|rU4+lp%?7[RbmDSJUdzM ?E_-fBZӦL+oBz-,ΗKĻ`VU:]!pig:}52du];UN: oy'!eyxxi{:mO25Eh3\n5==|ԬišLnW3EazLgX9}7'z?f^a A(Mw0S?VH&BM&šhJJ/_ DVNLiE!p:89#;#tThPCELMfU\~XdX@ $Z~3HW\dkM>b/cE E]XPB !AzYdžx-kjDc23O8Zbn éXhOٶӶDrX4U7?WwBpU.kg`ZWcڌ17^u6_ȶjEy9J=ނɛ*ǒbn:ʺ,s|yuR͝E3W%2ЍgVD +))=($յ"ZP%-g a%lG `M'Lɶ 19ְ5H0 TEQA`{ g[TR(aܒf\o;k/}"~KS$dU$d8YɺoǂO1mz5/ޛVϣ5(tOMæ1-F|_#.eAlݴ۠8qdwsHN8iY8 f,3M6ajqd<2dBhPCdׯG5K`j*4AcrA?(UǶ=ď@cаԮ/V 1?\ ù:͘"t VV Hͩco׶ue')-0_+K ü֦XHh- |!J˛tCRz6 EÈ#Ф~RhlٰkϦ Aa;p(ztmU^ϲ}9:Y |ۢ56_VcKpNH=_M{ ĀsjƐG!7 \w UW.xy=oC4¼↑/\W+z R.N<` 6kmP-Pr"_eOnʆ)ۘCd&ha.;scQXHi' ,E$mQ UI:oQ 2/Tc:)M9@AK7SGDYk#Bژ3*i3eEiQ-ׁfKen֚GsM ! ,u'7..O*Τe*Hpf!~FKl߰?0 MZ4A}Pp1׷ꜘ=[8+ZAf,;?-^3S.̓΋4E0cBnt {٤h, ,ED(; 2Ұf/hަz>¡^ :|Vg`=vp4UKa#{Ѽn& F\2?;S9ħ|=x^,ۿf%6KDFއb1k ,۰M#bBLʺ:u/c֌Q?s2 Ъ&U^#TP8ּCO39\*'&xSyI1Fcxzd ɨr_MHytzۆV$ve? -RU^FK5Wg$MڅxI68 7 k:ydYӨBE;yI"4 fG}Q7ٱ,ES-#!(=W5]'cuȑa lr1ּ[n9:Jymԋnk<.:wnrnr6A ty9SIi2CzƧkcve@ mWI;-!EG\}YЦU3ea 25ǚ߶۹p8] +llhA\eu>`|raPJ߾"24g8 У5aV5nG߆aVuD­od, koG1Z@Wh$<[@`i$y-))ZƇs_Zź-ul8g` %-iquxl۱#;bwտ~\^eYnECqW?;oՂGFlTRZ(8g's3$NO Q}2^ d':C:M>5=9ޫ4l+Hf 6h57'C͜lZǓFr\V`sF$\h1ApY5fv饩ޫkhAyJ)V+m{`WDlF&MSK+0R>L嚱z% IVJ,]WO-ԡҴ#:b 0eA1x0>P\_;k iWYZ>2EKIFo~I| 4n6jIͤ͞8CzqV塔nֲ<[A!$N˒ 6jng?shިOu) 3;λW ˬ O_1q^>2 lZ* eyP& ZB4ڧ\Щ w|_Q(8ccy'lFq No ͚l˓"W7ǵCO7e:G;BCC5ZԄ7yN9A=Î_=;c_5Lu:qMCdE! BPm„lNPYV-UGX|K FQ^<`>2xycrl]VЈp [AjPB1N"ssr(5B6]n$zUÙy_][@,"4nb56+GETjk~ r5c^^<Dͦ o恅` qeJEskS:iOWVY¦SB"QquKsYVC;CϲOa:#jMp`l:4@,sr @uW/#Vƕ&,J;wlrwIcc3|k~yz-ym0폃N#漺iͯyHPO]_^x-;ΦڶhٌҋppG#;T^kaN;A-c#Xz+Ц)Y@9)zx)}1 s$ MBsz q : 8J_\F[u(I捛4JEnYi)2x 56cɓ=KBs{Nn][a \&_֡iApZ ٜ*l|~!ތ-6^GqIYEhߦvpJ5R Q+؊% ǎ}GӣZ)DN}d &%[x!&Pzُؗ&ͫ߬˩\R{y`*)WVi6XGRyqoH+|4 >QhT84/ϫ^5n3*ŘkUjᮻAј9}:R 9 Iq-SƩןqfȓŗSQiCfv>.H9/Z's@m Rn||%V/]_:=#-h<9ʇ=]Gzz9:-FNcQ/ݻwk2YӞzb?NάL/[Z!Hl-D\ȯsщš"_D=N z_sfϹplE@ PL͚#lҒJ#B\!w|&wH #"hDW K:kOccC)D;lgm78SD\ :\Eߵ"}ui彳ЏB$DFNpdSsqq,%5 e~Ur'$kv'Ў$(/-ҚAhԨ>HU"Q8:-ڈ8~j2F HX,jǩ\WB( tm#GPX\J~07n`&Pɶ_AN6__8iԠ6_Uz_A>)/-9E4IkҸB=@BNy6n0e;)pPEj?x.ڦ&%v, QQW nS&'ڕՊDF 9UɌ&Z? mRτغܬ]jjAp 411Bl4y7_~a Bki]lsi"k"^_^Ue5D /p/n\[iɼum-_9୊(:5NΪtuO=^hjw1ܻ3{w4|eyy)0Mp_vܩP`"ymH܆0V#'5 /)Ps503ix*PTp qaCW:p[D(6kJ˟Npd7P4~j뚥;86LqḿhLo|&i\W/kcLʐ0$i._ӵi@4f$fBCƬ6渥+JA-Q:d,ßO3S0 rf꼺ˣ<{yD|R|z|,WY' {jބ;_!FH;efp/l>eyxɫ o8#iM;wP`#QAy4BՄ7!&8yD!eړwI4ԄLnzF':yzi1UmfK" fP_\xh?o/BV9Ҵ? sJP(]_aխ0-1d48=W0;_>tgKWn`_zp'tS:!Ӑ-<ϚFF4yXP19OF1 K._s:X}*(w/O:-z,XoRZ]|^ʣkysn- _B@!8MO_Y nHozyLI?=rLx=#ie{3zlZzz^yJ˜F@1- 3Ou~~+O/RkNywxhO:mcZߟzW'=\= @MB58kj7#k=dxV<e=\A3GKO?ekGPs:ÞLZaNćΔ![ UiheQ*;Y^=au|<*?=w\T( ]L.N&WHyZ5Mw7GXCSoo^wFw^'߭y'r B@!P($taTR^jOE)P( B@!Pc~KrS( B@!kfS( B@!(&M!P( P B@!P(j8J` S( B@!6 B@!PpVHP( B@ l( B@!(7bO!P( TP( B@!PP[ o ŞB@!P( %>P( B#@=B@!P(J`S}@!P( @ G@ l5{ B@!P( [ L.'**+յ`1PYQ˥,جWty3 6T?|ȧ8nϊ BBoZa%^U>Mw8N?¯cw8~/Y|ր vO}X-5-L$-zZ~UY t|kfx󕕕BX95.r(JمK6.GL '*[)?"pQ<+A@?NO',Ԣo?w7~vZ5&p7E9:GZDTP`{ǰpA"'٣ㄮSr4y~NO[]5cB c& !crZ,Ep ; | W,R,z4BNnN<8|:}&,Xȿ1~}8<3'irFlB>+$DM3͘X5ӾF|$λ'>k;/X a{aܸq1{.?,r_k#oz{) VMR ;Wccpv?Ч޾۞jݫ"y_W|qz;}VZxb̵ *<+Ix)mH܉Ǟ~[ˬ8<U+B߁wIB!~G8nݍ  /K7%ڀRlڴ uCf\t-(EJj*U[x\A)H"3; ة ж\m- EJm;U_d P4{N{)WX`V0=5rLO/^4e@u^q5JEiiH<-AjD]B2nA1hVIWYIvNB1yMc(d#=3.jg"9HaqйҟB0ᳯ%ir\ܵ6V,ؒH!l?R[3awɶYB,p 3gdQ3dfdN=hLٕiزۨy,+*аv8vTVڵJ] K*:͕3w>~ K/AL%p,tmr6ʧiw|?jl#kP5ܾgi f,DjZ: }0)fYOY._y8Y ExVeR(g*mdK 1sT#6[y9vZl^9ԌC)$md݆Kj%نZ}mZ}\?2tV:3"m1a2.{2i !B_ U@%Wg@hPV}b@32Y Q^rlQз_g~ï5s[,>}Hg/OZlq㰡Z|j^~ r ~뵩4Іmeǽb;5sف?k0輾X/ Vfp?ޞ ?3_}:;fD/)RDks[ڐEyk|b6>y'oͣFcOz)ũwX+Ͽ>yE-c@nP50IF`N3~Ăi&aG1c\pVc<0f,۝37,_| Fݎ!ϠRγ]36ko Φb÷aռ/0Aes 9ns,n|j) tuX}#neVQ~3&Wwnw3ΑhlD#:!&d[5qh|=.lN7e1xOW⒥?`F"bPEfm:D ~1jw x-j]ث3F!v}7 ѹE$T&LD熑(mޘSQ# L^>5ns'ٽgH' ==biEPxZH+~ܝ'Ϡn,&^oWM͓"^1qQd*xPSMW~ -r قIJrhw6n?phaeۓ6ol^Op>JJa͹Q=|^?s7a֜yh{uxN k]pN|L q(NvaEPZ.|ƍh\6Q)ӐrDž0qk0gʇÄ P@VE#9g7f†e?@mjP5IJͺvCPP|:;M)jEFZ>^5:;EέPʅۯMS"쫴>=`QqvBQ|y<hAybI FLjvIb'Z0YiRDET+QEWr0 ;%]:\P3ʊY1,bq,v.Nu5hu#CPJ[ sG™9H \2 rDգ-^WitOh,+ q(:6Eq~nc7Ĺ]hBا6idCF0MPR\ж@t9"vjE6A6 _aN+7kۛ=(tWF!"!ۙfD8=Y{f4AIi)`t!!-3Y}i?nQ:I9‚ѣU+|]? }0 [Q)#-VЄvmeEH*@=n7 fY+,l9.([4F~7+. e#&ʉ=[Ƭ )ÙWs>w 1xlG_9˱ "ĂWi<>`*:># 0~Gǰժ\9 6y`SS8T^U񿃀T=P;=|·imx})`8FכoEPDS9wxց(*) U+~C~yOTlFq5YC) ejL dS`J> q;6p*BhA@^sK"d;Z8iCȖj1I {#0!Fc!jV9i Dhx؇2hkeZA w5RjFb$5Ө{ _ݝblXф;w }v-ڣ@8w6֥g",dJ)KFPNᶬ8iİKDڒlS|6 3^ch&MK0Gp'IK!806<$|6TRlܸu[)gH і`[yڍQ/(E-.-Y+2a/#<<8cW~ _Vn[`r!9/Ψ0j.N.LkD? ʆ_Ј 'G@4FQ;:bV4t>béiлW/'¶.aq8o{,5 hѬ |IyxENj>xW?-Ix1qm!b槯۟^|?UL]=\ 5`ՠB ixLߎ?EL& 㝗oW(^cUNB1/ơCGP xv4LMW>gzVr?9triK%+?0cc`X0+$ػv.{Yd(0V1sr߿;o=>ạ]QřhV}QϽDbPϖQ$xGXjhVן@xoaC^fcǑL4oG`9JeZ:O?|?+w$Csq#KٮPNIM;,"Ԅy>B_Elr8MڝdLv!j7D|dyyLHDH)K( un Mcn{!΁vI^hБFÊOdžѣqy$6WXd> y+3Z}9>E+BȮkoFMgȍ?A |. lw`5jLtq;+pUzy򬨧J.ncD6Y6 &"~'|ӈ.8v#t_ z<;FmKUSK_Mr^1U"3<n s0nN(>]ћi6g0 {:}{yxbC8.o+U퍚-vkLlO'y0x>z )ۈ]ppd:h׾ i8o߹ m _a?(q mp4IWAqP\۞x{p/Ǽa(B>~7"cDr SKKJBvނ*iN6UUb^QM! ѵS{Xi^v=M["2qI;;L3жqDe%tsyVUI!.tQ C&qa\KKF*5-[6Ӯڨ(>4mՒ\Vnewb`B _tvooQA!})uv g;nBRZkEs.ƟF!ĴCHJEmF{$Z+4EA@&y%6M۠)ц-*Y*V8{/J3ظzШAfP^b垸̤ض *mfmЮu#n19v)%ߍ]vvp˰E.h Z1iϟ +ƎPAK4IFGin܌4҇GYguDpP\sвMszU%¬8\ԂӱY7Ņ;!&O=FfR* mUڱҵ3)Q)HO<q2qh6;ń۷n 'ڱiY?;uA=44nucB?:!n340zN)#)hAPj?[xGheEOFVmlla;VoP["Hda߁XcоUSaӖ(*'۠Im܎&5ڵlq2'\[rtN>g[낦 jN @pd=W$+/ B+:zB\&;5Q5r re$N)[aD:ovݣ03Uj vI4,J~a@vUeAG=e;M-R=_U)u.AsiO7oĜ?/!oZ޲*+6-#bO':~͍_˗cArZ]5ʮˏI3M.Άr$n?^+{٧h'm}ݟ?)W9%[XO!P( 'r B@!P(5%Q) B@!B@!P( jx) B@!P(MB@!P(5uqn o C ==]@PN!P(reRPPԩ]g? IE E-kԨG~Q9B@!P= --O/L l:`M@@4kJ` xP(!DiW8eWhAS#*P( _(F6bJ!P( @J`B B@!PHV#E1P( B %Ua| B@!P(j$J`͢R( B@!PتP>B@!P(5%fQL) B@!B@ lUX(B@!P( jd( B࿄~Rnw9:=֨uU?MU3Eq!PK XѴeo !6M1{wnĎ=Ga147Bjڏ;iR9qڨ4os6wmq 6ź5RV[m ZP?-GCF :;mFE#4\_mFzF|c/RKGFӇNm}%qoXOxph,  kM :.lY?\e!17N f[tyCzzj;/c8ظ{ Đqyta~,aOE}o鹥^|9|=I)ڋ`|v]:0S_ZNO= uCÒ$' ?|;{ɤ4≨>9OY7{Ͽ9 y%7۰ЭMco<~s !,2#aM†?MZްGA5¢?fBw `2[r |iA.m^]^L`f :fbFg5sӔe94$īy0-j`/l5,;քa?o2rlI۶I\xh]<"=F{r<DeE)ۆ.LR~a+H1 dr Gf-Ѩ^I)-GRR2Ja~Fz|'Lb=u=mrM͠FŠ1hҸ "A!JsT_X4M GCVA%ZkjpsΧ,#9Gpmx1fP'ײ`r9ؿ 25Z-[6Mቧ4Ǒ4$FS4B s2D]!:"e? B#jk>&0Rmq4&t'd=OOǽc|5,:=ge)ߏtTװ(׺u[r~BQv% A34>^,UT-Rvpf@4S$ݶJЩT=HNNBN^!-"2 ue;^ģ/"7fԉG*§K޺oZƝOˏݍ@w^~ո11c&7Ug.&< XV гqgyKF¨. KAJ[;l7HN 968<^|I 7]kk`;0[-'aL|-joxv SX-J8Rh\3yv8iU: t$ [U?L_}G-&k⌮QN:7= jOAY9RCGb=w{z>6}SqӁƘWhl[g^zWAVnV Zݷ\0G??^+eKSeQXqh^-O ^6q  APpp%0/u??Wܽ=N!><q%]X0#mfdJlҳuѭxqV˺-& y!;/JA6/= G ڕ7}}8C'yݺuѣxХy`?x0yZmz`Δ,Ӹ $y/|]DclIL4_7N)=EťpQ` GL\3rur3o}%&h]z{uSm_*{zo?{Ȣ>v蓏ؖ֒YK^MQYVEiƹ䗗/Ì[Z()#cj4jqic ' LxйѱT O!(-5:i.vWcʭͅ=~T:8iɉVofN݂/?l:O^ s݂0kc- 2 =L"GXuTN.2fC9 拙[D&WV]e'33R6 ]]`.<ҪBp$>O.&n_Zr"N8kcԁ̭0!^C&XSd0n쇱o Gq)yOIj,AZ_̃rV%lϘF&~&JLE=uc8s'e|[pynѨ~u#_|\*bߌt07NFH:jknп/)9&e+Hd5\)Z+b(f&Zؤ;wޝ$ֈy'fՙ BI2պzvaB*7beDBWtUbGSkkz&ѻ:SE?ێ&Zŀ~aki,MqS?BVST@Y8D wr-0}imb^6WZe'8V82n5P$nZ )X|X#6gv]S]uCk?w~'6CS>=ai鱗1~,ڠn&"THEǎmJbS#UCd1  F$ng*Ⱥ }ifѧg)!6=$NmY/jkjwWv,܌uSh!}n:BqH9zAh5]c:on`)v9dh27 WW}qQQ½ F¦HHo^#D5"WƀV-ʼ<0-W#C`ZiaGqEdUlE@|į?lAAS)k _wBy[27r &\cq%X9LIydzʉx 7_GS cvPcz^F 7!$ݽճ&@[ǟG[|M)K4kOLБz8z(K{牻B3z铇1rr3p>GxF kw?@Y#'.RXD/j7\pSGNzny(?C8z,@4T"3^fb_xUmʥz (hIB[ړ9Rn&7%NEsCJ.,^ ?N Hgrl9 v-] TS@#"ɯDBLΨ,RNo)#&  [ʏ#d1H)ut7ւ̊6FBcPLtBkᄵV"QEPdqL)vWt4ayTu6Sc#Gb[ wc \0Cw*ar>>/.t_vܒKKIQPRrHI^N[Yi; ZDraԈ7OE >p }]k5'/]&54`ZG!/6@\WUXmlnXV kEߌ;Of^7WVFnKAm")#c9VC54֔hGú,TtX& X"jUՕ b8uPa[Iͽ]qk%qL668+(4[~SoGODF[>* ѧ~ 0կTFQ Y7PT,ra$i1Pm9<[!Ȋ$ |p{āQĨB QEbGK6S]ٳ#90D,ȧϯz(36r5|?5Mr&5E-~XE;v 2OK)sk 61UU=8;,soC[@s"JCPe'*:fdJYywt%:k->cxvM"?׀Kd!)"H!0ߠd%t܏zM!,.r*8-(PYeK%KO9m"YʪJ=:vj/+Yظ8,7 wuUZY,!W}6-Z`?l8p SLe4XKX<}_baaK(6n$Tsybybܑ;d A>6-}]3u~vagLeMN}[P'CC'|A>Jb/&']]['`p%|1a8w7*w{^MuE.V,XbA N N^hb]{jy.vzZSޟu%7"!PBϦ%_SoAvC`'NgKv]T92lb(+ 3g/xaPq7N-T8v-BY]5s!YAy 8f-aM}çIȅ1ZR޲,Km{w5~ңx5u1ZJž?a(e_@_SA\]S#o0r˓<=;L@2F17- 妃aϑ 'Y(V W]sf#1_48ltjcgR )$->Pb+ƀr4B)%xْ4qƌoS3Y/ɕ5+U^ZJDV#11 5t=;RD5$d*ᐙN:74GU;G:D9&'At.(!%^)l 6s&!NXF& b}jgǦ)cm!"IKSeFE#0tVb:75MxnyY)㑐_b򸭇P0%#P\M8oy9·yr:MUϫ*h꣪8i9Х~XBJܹ:z,F7#ҮT /-$߄/?LK4 0v$~y]ʳru <sI1Ÿ }~!Sp藛!p;>mPx )#o5>[ r嗥q4[Gb/gOEfwܙ:DTؿR d7]]TTߌ7XJ3bis sYFɜq-,1Q89+\eA"FL!Y8|ZtZOaV DR-D\)o m` cŹykyq9;S{Z:B:~.3)³mN8|x3>z#Nݓqq}|/smF+ 7K`Xnrl};ҽC9 }0bp4qA #dD74M]iN DQ֠y|L OZ]t&Fl-#LD~x s};%+!(N~7!}rpl &qn'w8wCpZj zj^J~{݉EK{7#FsG+dW"\1'|NeEcᆥ fu[5? man(". dggs΄gspcӵ/|0J6PsrS8_FVqKs tuq=}9#CҚk{*mY6331ks-: ͵usvƍ֗Of7M}gggeqVvѳG1\AA}8gmiq^m8 s}m>E]89q]82 999ss?qM9}CQc5۹GV8 YXr ~9}͍{{".LV~6kȖL-]rBse%䜚:s <\9CCށ|#_w_T_VVz夆BS4ď%6Zn=ʨ]IU!7otpd0uKjTJ9 XsQB]:M:s yU25G i>/xNZ->VZ]4sN-\8sPF[90@H{]K }^`7h7bߞ_ʵ,ZۓǶwM).V#~s3bRfGs,䁣{E{8UHmkAgftTBGA+'3!EC8cXMH4rjb|姸"dmm &?1ҊI4:grȰ]~O9T8t{1UE\ui UvH,hKkq EvRVctgVcXO$%dں wN^pp6l*X{z>J(Gޓ|7e=c2ihSߡ8v8F+nD`]HP!7~;t S(Z/I]&+N1X/ga0:kqN^d' &uT=9Xƞ [Kj:CH7rSKhz ]'>ɦV:f4k0Ypx_T?@.Țʟ.xpW>rM{^ЮhuI:{Z t}aZ!xdXxtn_Yzג0hx5|Z#,nG{bn.&^}fO0]k~7!'6wߎgg'4ȿebskڢz/K` ij*rsʬ(Al\"Zdook]jq< BY7ڴނiQuOb311AAm!:ґÒh룙ScC2?($1> 76tԕ!g$+Kg2 s+[x" E:qe3 y:ċ,ar "E> wBX7vz؎rB0f`{"*SI9.y/iktI׮ /F6SJ}VVUY" ŅУ /Ŝ6rH:k:{@4^2hqB]Ch?b@-M lFv :b > y^',ųB&ϠtB%ڔ9v`HxY{sѬږ6\WIsr1Q+ZkZ+ \<|\45""~KJ`vGFF1zCQ E?g5&"tu "(]u"zg_Ok[ctٿgl]eH4ҷ , Y9u"p3pkxb aEؤu1X3g%QSy_VNΊ#NSa&]8)k>#v_:[RC%qzFV;DMQ!SWJ Уƀ8|y ]bfmH&49TW]J.F62`9rcny*Ȕ9 !D 1 b@Ā ^)mF.Zz;q~3M T{1{ &Q!$!s&=Z5HlfwThB">w vc҄){ <%Z#g,À`/$E`Z"\3)KM}Ш&}Vb$VhwG M|nJx8YcǦ:KdXc1}Kb-}&ݛġ DN;KtPEVdD'MYY5 |hBH#1 b@Ā RF1Zj1Gpr#FG8M Sױs OH!RttE摲uFn,mHJ"ʧ[+sdBBbNGFDpɨ\]: 2Ffpuk]uR&q*IQD$oeeC8"GaHnХ DAi%aC'EeŠt(L HMjK) 'O8) #AYKKIi9*%/3%BD\%G ;7/HՍe8\ޒd&'$:;B<]//ƅ}YN,"b@Ā" .WN)Z "QF70_% ~f>2'{f@oubKϦ8̀-(uRF>t*0W(N`(24fXf%d&kӓ#j ;1AM@IDATSv1$UP4Ƶc)/O~kP#:V[qL4~oH}bE 1culTMK@SHTʽ0SaFH)`h!Ty]U1*W YT29fX*U $ʫ/3PUOY.#kWMb1 b@Ā"}x:l>=1 b@Ā"D  -1 b@Ā"D 4`k=b"D 1 bǀH@l"D 1 bA [E 1 b@Ā<Db D 1 b@Ā b@$D(b@Ā"D 1 l;[ b@Ā"D h" zD"D 1 b@?`߁"D 1 b@@ #&1 b@Ā"y Wljt{xN"X],W`}BjlgcX9ֶWmxD"D 1 b@ĀwE/+A5?FnQ: sKk84R)0ҁCyY9m--0wp߂-DEe ut,DŤ:ԟnS0$<% &6PҁCc6: T&Afr4R 9x4w~ ?rt}YE%uNp b@Ā"D : J=aUOzY9)c "%().FVZ\:Sg.!j*\,..KL#py܋L!(i%__zn1-1CW.E43'e?s%R{UQZ(WryS*کlSjjCCpMh@|*9F @n2JdD$H:~)"WHc}VVSGEAΝ:R"^Rix+b@Ā"`qjjjP]]s$V^8i]T(\r k{a芒"WBWz:!x"U/G5q )Ľ(G%K4 ܱ l *[ iUKC}eǷSe%(&5 @[C5U]+ J Fн) nj+[]UjHGC#hk,>VSjG GQM4'!!-٭=yGͫF틸Y-`OBFXUW#>PRW'\L<*!~Qgj%cԱvWULR5KEE[UÑ 1 b@Ā".^ V^^RZ#H쨯/D(pD$1ў ]ᑒi((񂹮Bnql 8'QBs/x6wBY^:]lPFK@Xi.Z}*E-qUظn\RN$(-G}pi ߖ r awq/<U_w@alEFfh΋"qC=#skx\/sydx"09˲||f"D 1P3gΗ/]UUU(&% Pc n#ܞX>yN͜D<&䠙KSdǭDxC * EQcDCxZ1:t '0FVz: m·Ushq#"*,ip @kTd"*) MAR ء:oKC L5Jqڡ}p;8Y 1* ypk&:g䠹?5#@j?{iaasv_R< G1HLL@rr ظ8ڈ Fz$wV e46n 4Igy{=rV tiu9BomsoƚEP/$7 DŠD9<{i54yiɂEv:l]{ۑF{1h`/p_WS k U(tmmŰ2{*Jp]YA3d&FbXX(+/]Gl" 4"+&4h/zChdL-mC:7q{eD>E\\,^X{K{#Z'^8O.umcׯ\FBV zBB}9z R5}=χI^exxe1.ӁN0Q j$cAZ]TXE尶kDrI8R$HCQӦ=󇮦&,Wz[[!k#>Xۻfhڬ q5G,ܠFoU9Ijܘ8}SfcZy>&03SWցhlgk' !n&R@¦_??Zf(-($.m7 .΍ .Cû8}8N@*dQ}ejhUP5)R _LN)񱀾)-۴3;)KG?p?|:# ?d[\.5i ql=+)EVv>yztű?=hF"wPhX*)"}1Lis8Nčj*ph&»yiU9q7!!.5m>.I'5k1d7VK'g%ѢLNv&]H]5H+HL_Cc3~cRE%3bpgm4a/T!t( A:B{_Q™ Lޤu{q2`vZ K+M!m|tc@ia>J*6ofJjnՕHʂ!M߹feC%qi`#{UwTCOVn> L,x *.Zru=~^oAC.*5m #njj:jHޖcZ|86C&̈́I)X(.E^A h̘++*)![Ne_\CK}_>RaO?`]G)LgE5lhޔEKMcԂ,;#^.e}q㿟0y.DjvpSovl.ŤNoPJmp[4RRFE"FJK5*r!qiwods²CSk4Z^\Bb& 6:6quL0{Vi%c;owgK;)JƣDZ45v$RBV- &#r2Х:tѶs G:{f< (e.Y-o.O9A5@,`D5ZԑZqVEϧK`Ll3-aiiIq0K?/2z{瀏S\%&"~Oˉ KZuGAоnfDV掃u8y9.8irɴ]F|V!?dߛfvN}*Ȍ۫% ԔbTx睾6.Oj/tBn]0{ɟf ҇I@z\| f#9 n o*߶3q*Hl t jpw'n#(ʌ [1H.pa> cᩭnǩ8s#==ѳo_[ hAA!6s,\cNIPFO'J67cMV?F>^0(F^ޅ=;qtl|=k4ZFЪuWp(\HK6=o<|Zzx&th Y]"7oCVX3\JR^_xwS`j?_at۵?{yukB77@aF4}KcI+QFJ6>:R_-m0m\#JЁL:UV lv8U>?-<ۑb~$P]ZƍXH(hx tw^8y#ʆYS`_t}޾]q.$AM73ޠ+KtnClfΚزr~%áPH~N:˓&Li™h <9 .to#:wo5>r $'zmZi#œ16b ˰WhCfPP;zȡxd,W^}9m#+ ̜> #c9D8}?|ܡ#:uyGM];m~ߑbbzhzAFym?@|OK]:>μUi_ߠ1퉹k@ܭo]O*˲?q!g0O`wT. mХK'ѷ1bʧ(y4n}ЫooƼ{~zɾikk+?F0$^/G]A2d.G8{8poA;&=8$3%4`47CO!R}Œ8fyTB;<%9cCYҖQiO/%#+3*4cB@!wBHϭi{Z$$QRmqob }, iT3'&J3qm$`E;\B"ғr>rɠQRe>ji6Yȱ&THul ~Z1 nxWȨDf'cp5, -C\2ê$Qqĕ吟g`oȏ8ͻM|g|gANXlLm환8ZtMpv?".y1ԉTcѸL}},lG7`ޒ_;p\8Cۺ ūJspygftxbq -LNeY"$ьq# ;]J-`;^aݬs;l;9sN݁ BhMVxpvu7F|3Oa7b{8x'5eO-Y SLYL|xYch`Nj.sCgrL2Ŏ0wd/زD$\8q ܄{~ġeb"c 0+âϿ;3ڕذd=DW IX?otG|p *͜1iKV 'C HXJ^"hz ߁SU%&89i8zO wIf~ETj>47Ӥ}(oDpM -ܯ0js{ Soҩ%js2|8}ZuX1c?m`Ae>!!Sp5" mkḚd &rU1}Sݾ}y951.^5Vrcfƫ"AщX8m .aZǁb͚E`8x7Κ3c&IsQrt-]1`5:tMAlj}8v8`צX0KUHNMť3goMmq_QDeV]RXqꒇX]VpAYU%EV~);S푎6/stRh0Dxp44{7wؒhL˴tmEiEfUͭae@:. ٓ誕Syw{*1񫾙5VOIbHu]մdu+D%Pjnu#i7oƍKKJ֣5GSR\d7Rci"D$ee1ֹ9I,z"NdqۄDm#q<#Ǵ ,hK.@Ȋk35Dky֗"m;o-bSsՍXeĔD453C`L8j)_'8-[U$B>b~⏄'q 1-6;a6SBR 47&Y\;S݉#q JdEB "YXg'?ƶ3w(ID v8J>>ޝB6q GgSKַ]-p~I r&wM夈;8W}@3h!ݎB8[OZ6O2<.Z{$ip8Ф:q>h;vĒf%_¾&cjE >ƾ3 {& bĞ*F\nAoa7rqAl1~ⳏaSHIO=I3 !R0Mf#H* cna_+' k9OH\KqfdI;N%gLZSM&1ƙN#qp5? Q@D-uyH=cvjKOQz$.^ '/+E⋙oaʆ 0j p )~n }b5<]+ׯк%~پ.$>0|8s*ő W]qTodÊkhQU˖J+m$ ge2iĞ`LM#C{uoM]W!5(Ff`)ёaz&%9c˶ hLDh7b\ v$w+l*!Mn_/F#y.,/۾Ӹ8 lSgfxLX#.gL]k3j1o?8;{K4gYawo#}q8:Mm8ڎƥS?-_m q$6g6X`";Ç9"&%@8:  mC%i(]>O$*%ΗQXXD Gi024 :ػe콇 DD GLM+GfC͜ =LmR˭((*NTPޛܗWpUZs{sI#7q"gwEb8m|h3$I[+{w ֊L'Xm[ĸfe5fC5H ~*ٵS Hk0ĊmiD['vdEL-FRz֌6ƍ{ @#'v!iY}SVߚ)k7͌XUI k{WfBT!m[ju ;iC;FԃT *_N[Gvfb>ގu_D%>.Ka1DAm~<&,-]UsQ}7X Vl،^]˖`Qn w}'sYQnMhOI;#N|~~^8GL[/?"m^Pq˲w}p=!_aky IN܏'M4@aa/sJ2/Em^ӎr4+ w. {jqbpn  nVTO105:ji4I&/VTÅ&M3%8ȍ .P?TLR=qMxz6sJ RɓR~~\8+1-x 8_58))5U{Բ;6`? VXh+6d)9:/̤tKoS"_@iSj~Z2M⡎0 K_u[9%W`ll >!wsm1^ +$ݚ&l|%ǢpMDŽjj7q0=BѣŠ6ҺԲ6Zb^צ=u}WĨ#ZU&HJQ|V}Ӆq2Ġ#T vNnY**UjVjhÛpTR2N1_#CW .,vo.$9[FpuS\gXW1rUznE"6(K'ņX?n_I/^f af]A>^Oq_+_ì4MY1qKn&5`aJ[52!Wͬ6(}pppӔ 9:\Njf-N'81-]Ɋ}(YJjy311=1 ֞N6ӢKC`{M)ڈf-ܼE u[hGFһR؇ \EZ4.j,2W@+pbɉ.D&812=5 %1V?RLjړ\T΅3Py-Ci#p%3-Ƭ/ Vo4R,~/gaȮu8x꒺ڍHZsyNKTWIiT,{ b"R]ɉ& lR&\ 50uxl7;A[<_ckGXTr =&cO]_XRf9*LP+[О2 ڣ: N["ͱk>Ǧ͛ǯэ6(Ue=Cy %K#%kox0 be6~F4h« c` !ʊJRs< 'a-KtR[Y]H߇`mLpXd# &CjMJbʸK5Mɬ.<ae6VήӌCJ|L6ݮ+77 +jS{׼Ҹ)֏߬wl#!jh8qa6Fop ?>~)rB5vZDN 'KzNѪypE?N\E1鉡mSj?*B(r~(uPs[3 'Sp5:4 *?OPSAG_zy㺈b- <2"p~<]@iwV"[SjL!(!#{4V{UGRcob߷#/Eg[85n U"n徼ڴ;|b} 3`\O09rcb1SmDoҲ͵m+\{1]Zj|SZbv5 ͵#>«תyMTGEJ:xǿ׼¼'(w*>A楚Bs?ZgOC<{i;LuSl{ t2Oɏ`lDQEܩz,l'0㍔Ӗ߫S#_kh8۳GSЖ냡ښ` rӷ%og16/S.߫ 9 ʼHٵg>hu4=0gC;}c&BaL~y2Nƥ8{1>cXuTKoMo>a̘2ixm:D-3gSZڌZA7|wzd KK3c5/ \xVKA 7ǣX!-YaB''xs`O_"G ̗~qǠ~ EEIh%ߝte(4eէb<>v[T F RLԉa8>*{!uנ:2%~^"rNq5&H6dT2}jcÎ#$,A/1 W[29v*XruX5EM^>|<5ďHw)I1] K ю :PO5C0,v^6<o̚ʍh_&\{yr۴Zʔ#΍94pƴ1fPw3[fZ:;1Qݪ_޳b<4>!G#DăMYx66M`[Pp cir.\AHXZҥ]0L7oGP{Ь?z yDաyk !m2aֶI,Q-:9 i@Ai:QZ>p*cUw)Dz}/o/'.簧1])f<>#L"5B'<0 k߱LX@;{ԨZ /*Zw:p6Pc ~%Q\P 0ttlDQ!Ua f,vT:omZ- 2 h}Q,jĢcppk REDof*cx0`0w.4߁#а'Y̑z%Ը`±ij>GtC4t\2|^a}6vl߉+嘲M,: u(LpD_ٚ y0fKOP?r/kо/s~=xsϧ?=+vtG@X7rz~T=b F?ooval0d$Pch?]GO2"l߶ 4`"mpLFEѤ hDDj+^a@mO;CTah1p>GWҼDg(Rbhhj1 nc5.\J$#*AM mEhs[~="@T7p3D=8BQaE#?Io$=z3ow8$2j,8yr&FVX ggIU( 4ꏪSRnmg,yy6m`JOoSXyk[PwYxxӞw4D gIi }0*XHʘk18@LxgL% r N 'z܋p Z2&&lѓvJ-ҫ+M KPKRD݇+ux \;ҳ=:4ø^N%3mk=N;7b8H7Q* `֗sդ2vD|xe~t$M\jRN]isѴTݵW>z2_&BcaLѩr OH/|@8zӖ&1Ч3\'p^1B!sp_zvxj C0}həa_:`K~Գ@8==-pcC؝۹Ku#񊸞7~TVUՍ.w?| Zp[ }7mmxW1 F!m?$`j0ĴKt OV~UA){}e!FM \;-h/%L@w"}=,=0| K-0zWq#TP&:y$ ޸ DDݦeCȋP/0ow[9\SJ}iFOOF2tG \5P؊ "bebAPtλؙ EjagL:>Wnk>)KęXxZS}IDSfz†"}[&$jۃzjDS)8-)G4nYX RSSixS-ά(wL)&s5u]Ѣ7pGûQ@'^WOכ1kmn.rz*5~M_1k¬z.ThwP TZ {Y&51Ĭ =ìI4#;띂K[ֻߏe eo a3LQVHkD~P"9F@7vAN:+EƷsr?&7) SEYܯL\hk4Tax40}dt hp 2d '(pȴy/S,чJLؕQNj2$cN4dnNI>ppVẹD!LtLJFHJb߹y1X^56WU;cQGLJSeX_%y8l5 ѣgO$՞n_4P]E3T_}m93<+5yxbh|~m ig fIヲRW#N݉Yzཅ-CUoY%hå?5ַ̧٘Z| Czq2'msiĆU/aLWƘ&$]o#'Rvlj8qzl?T S@L@t{mʫ23h Df`FqW}2u%H8~UWx̀Jp"!N 9foV~>$t+k!!.Oꏲd|f:0%f#`$va_Бo+73M?6XM'Nj7.';1j,NMy!>$ Tpkʲ1q%r'kᆞ 2d (<I 3l8O^` &eNa SbfFe:A+B|KƉ'AAAdz׹;n:v-×~AN!x{xjOcDQmf!Vw8Kh-Ҿ\[&>Cݑ;>57Ꚋ Ǝ=i0fuE 錎aCq!T&Bz &q6=1g^죞j+%~."bD X:s#VcwS͖6B N:߻#t!M ve:)tbvA0Nd*L2gӕvmD; 28ѽt6?go)26sdΌ@Y(?՗>7;(= fN dW(+ːD閖#FތR\HOD렮p1ơ=1@:1*Lc8 "hmJ^_ j*ʜqCG)8xAQfVAdoiak_. ~ ?&{_p۫uAiJr j YU<ֲ)ː#{6X8 # #T1jB=hELf 6l+) }[Kqh> Mtw[71J#FX XxL^z \oqի?C+ XϚTVa5jvdГp)+(܆wVaf;+ q t͘d[S"Eę!س3v$7)U٧o ("ҊZZ&j`J΁ Km*׊"\PeK݉tP½PUQ){güI} e1:tcHɠ3?2&-riUk5d TJ R7"6.F:G8$aDR:EO>2lxhP]T*Y݁˾3xP}]`hV w8 0r>Kf0yNO]D 23W}I5ԳDN1)F s޼ΫrrZ*0i36~U R: GLB ܳ aΞLs)+h<'ESURcG U8+gԤ\<[0f"ҳퟠu` AСC~yt̡;ՎcJ-tT3oWD#Jj]=8_'Sh42  LȻ ܪ*;MC7T Es,/Ə⫍р-FDtB8We^&S6fBSG=Mk^tph} FJ|+#B%ݻrӅ|;F9DQR&biꨶ8d OIvD R (7iTtΪ'濹gϜ3g'@OF,Z<"iEJŕ{:AæNr?Rr~0EL \?q i픸r)iWT&X<*]fKHsd,ga3q0Ek*JpyTV7@CY3A :vԎs 6Adд5*p9r%'^V?:B'տ"/bUu5jhWURgXJT jȘ ]QsplL!ql8jTփ9%۵}GzV5L.JmKܙѫȮǕs3p´SҕL5Թ/N\FX3UhA^J"VYZ)pp.4stT!().Fi82yI6MdsYQ^B}#!tDME,pww;)=L9UaSՒ.W29 miq&WDۤJ."q3;@A A} VTTӈC{A4rc<#Jj Q0*4*adm! Xyx}SP-G<+cmh(`n:c'9F>t,M`KAxhv:;a}|,t,}}X&a+G{2 1 O<<-j*g؏ݺc`h/Dj4*IBC _Hmtc?|~64%4{f^M^;湡闳NW2d ;֛LiTw%h5%@(*222[ lE+ D=ǒʨ^i@ RD$]1p񇣑OC ]S  BR,[E?e$c{dz ;l};:Dr.GxT 5`cA?̵q} CVn~]"?W;&C)ki˯:x{ {AdpAGѦ:bi+u0"&r' SR.hX^1OAAM+)5!Љ jQ!<RM[[`is6333%1ב<v')@߾}G1c0~x|wXv\n<or Zc4R&8N %?%)ۍq;Bx),'4z8)+=2anNUSMSI&N0?8v(vf ;105#]*KQZ@מ}h`]qy²MnDJ4prCRiE9*g=4P^Z 2=BY.t8&A.1bi>ޝ|Q]cbq4;7 &U.w2v(+Õ)p)sNi `X2\d - {t 5pdxq.6**/J۠c̉ }8}=⪡aKQsa JfF05C>U-vТZޭ=z86(-ahJ75v46@A/B@pXRZ;k+) ,͌QK՚P}V#Fډw#i 2v$swT ;s-J-ѕ9K5i:< NOC\P|96Fzƌ-Kt=4¨2 UZh` 5s3CJFi3"q?In[) SS@0g| \\\8_'\oR2ljqSȏ4%a0BWڜ8yǩm$ddj@075Th?FK. 1FBWr.P9Nd)}N%@h uti_}cx5mՒ `T Z6sg@9]mijh Ӏ1Ur:rwʼn(.$r%Ø n!Ҡ HJeLG*RN8ʇ^7͠Z5!N4ޟ1M!S@L!Q M.268& oU$9e$xd`#k /VUNh+N}/[Kx֖ѣжFXDg ;.IĮt"dY8z$6m; Ȱ%؆:!kn%yX@Q#}PUio.l%- аϮKoc=zҺRȄt<3R*r :*(n2ҕ)BnY=D_[&S@L2d 1n]q[9_8=uК S2k wLǧ&LLקZ"$"SCExM*Tg2py&2̓U G ɘ04^ IW謑0h7VXJ/&()24O!k(=~,JrbI?8n=c+{8Zt*%mov=Q󳑐# ڎvy<*JjJx,S38[\_L2d s65S&t@dx`Rq>m&ٙ5]d`de=sR !%a  K_:{Ҷڴv&s-[HLMM[SɓFBJ(u+p`dxLhbXJ}mXZ"//']AwcXJ^j+`tšt&$} WG!MM|͟i ZpusVo*ޯZdR5)aܹ%^]`a$‹!8#'N`{%O!CICxʛc!_) S@L22JXp.R) `n42l5Շڔj)P8l İL# %W"6U"Y 0+cF)cCd&2 OFe$t*hKfbdB2V06E5%mz7N3Xmi FQ Jgd ckc 91Q_WA۰:bՠuO؈E1D:i fRNHnŴA*bi@HNAl 2L-+) S@b;9ʰޗp`m>ڄĬ=K[Ws$}2C_[) S@Faߞ؟+l$Iu]wK>) S@L2q ,q$e:d_\SL2d /Ix) S@=E4 =̿2{d ) SnS3gĐ!C_q.w ݤ [L2u T5 'Nڵk_gs׮]7(pÓ/R@~@fqaBFӘ5֣/ϜS@0\l~M::s *dzeȜ NjP_UqhcՅHb:2GQKd!-뉯mַ /kNH +`x9Y6?jP_ShvTcc110D[gcwaoxg]m:Ļs>zS{oqߺXK'[W;F]}0ߴvMyoHh_F|r d6[)k.%TF%ztb^g[sos!')X+b9 ǎǜbF?}7e *;AEO ],^ QX~!hR|!)(x@Y۔V 0cYs]b=g_[Yc10qd<<QV@pRH;.̇\󳑕WS k8ZIA;ȪJz V(FVn `ce.Q615Y#tkn>*zEmB~BɃ 䪮ҡg77k%9ɇ5Z9Hih"﯂KmMdgN.iۛAdiDyi14F0S!j+PY#Ci ,]km/f^Alh_XS 6c+.w3$>djXyU +%FMJ`[x rPZYK;T|Rz:(1̚Rn v$ }C.հsnJG~q,laki!PRlEeuz>5习lUY J+ai ⪪T#01y3nur% Zz||H + 3_B.nw4v)M|sӂj!DG7㋐n]?+}1b}1f5c>p--'ЯW BD.:9m>_ ~>>xu%%BLO&"937> }"4]CB'_U B{#8\ D9nn7`N\.^w6.쌾O@.Km~:bœKWws#p y5|L5p.ѳhK/ov9U>h֌0.G_x93h;w #$=F]2|UcW`_/B'QDݟGȈa' B ]Soک=BBqxO럓r }C{{@AfW6^_= EuX$?\ƈ=ҵ sAjEpvR?>/~EC34aoŧ Y 9vG8YYMBzȾA- sY٘5y(]&uxy`AW#<63'6bbߛ@t]GSY|~|wFxhuGEn(ƈP_y3LEa[޷!4Lذ@r9u+^ѽ_aߙ4\ =*= '̙4 qnSUS_>9&!E["_Bug\0qʳ9Y;v yKp: .y2~-ŊO٘0t} '[/%՞$h/ėg(&27UeQގBީ͘t R`ҔgY'/g f0eFys}f="O#LY PY+t:~z>K^:%.'`BWGD{!AʑXVۊ8nC4$iO+1E_`["10b lyeD?#Z6iWrIg g"y,n#6 G$\I܆-obbDk̝5h!1,5e*$~*<)FYn:Zn3r/f끼 ´cx}b?z?3 {7_lcط{$~[b3Z[bҢw0;pD6&-;_~EVa<2i.B'-A:@\tpЫŞ 1tLZa^=Zݛc'd>|V-3zԭ͔Ր%jaV| c5(mЌc8F>pvDƖ37E`%i=H}v14w(1ڍ5XwID7J W ! Gms&hR:39Q]t̠0#Gӷɡ'%Za t?B+8Ś%t^V}%|gfIj*QO.J__ؼELta>/LuHH<Kp6ʭ *ڡPlO֖!Yظy=D.NV{T@|/hQ-?FhI 01RZt:rc+4 41vP:28ڈ⢪ m.voA2])AqsM (פٟTIc%~W[OX#(hdږKjpµj'71vjl +2k#/*gTQiҾq.TUnD͛RSed`_nފ7]%/!pb}"1cB*JRvL}r6m[[+i66IusZ(O5pxѩ57'&lhDž:=VNǹW|QVV-K+ڞ xbOF fUM2TbXVG#ml4HS[E j`?>0;tv`gjWØU߄5Qꢲ:6|'}{ߋF\$x搜󵆁ܽ=Yͬ)[zp,*hҞ̘WY4};N؀\]2ZkOW`ߩ7->El[O ^UK{RwY7i.a{-6d 5 ]+cRND;%.R}.\()TLRF'cq 6FZxȼW\<aX#Lj5E=3"ʑ4) %%p0t&7gNC[_ ?Zvjv식bp|(IHvHMG!qj@Aq)w܁Z| ˊ)$ SziZJzb!)iOQQ)jCԡ J+MMjN0j\pJʩV2`:ziU2DfnT'Pn)gaQ TЈ]uMt(/."z龑^s;3 𝭨] Uцi7.rTT+1 p| 5Np8W6{ɩR :'kE2ދF+jZ;bjXcȞ mqذ=>WSQ0r chiE\m<>v( "~&|}(;? eyWwijķbD7LP, ŗ{2ɦU^V#ȱrԉQ*/Z3zeX9m݂ 2`$>;^|7L _ L/Nnt1 ? ;N{ }2EU kSC=%"bP霞֯^ C!m+t)4!6^ؽim բ-}cEy9=S↵ZȀߣ뤌MhOMxVb.R#e6q _njŨp|p6(e|G]:xlLxa{AxN6y<Ln)VנˀhoaǺ/-Kf=s`'|_FJzy_+1f=}&OXzJxwJc3hW?Lm8W橫6 !5ul<'[xwa,GJ4g@O ק =%-ւJD!󸺈"C`jF },ϠԴw%#C;m xL;Ns|Vyj I.6y\LN muzC m5"q"R|<=5HW60R42s)r.Þ}QHNHEOm 4Ve!2(]ĕθ8!j>Y8o/ϜGZj*n>lQR{##z%;te' PXj( ?+ބ4 ]]YaawTĂR^p*&q^N=z/#x4 V}6*i.!he]; :d} AmA =# 21*3o|û241{r 8=F6Z\kBC`CY`7iFqDǢ#] 8G-sVދnIP,ӻ@]$~[Yo'w% gxJC\ɧ4nmǍ[-ږ ‡ H/jS?!PH>X׳v՟"?O`^6r+5ГT@U*%̹+F{0vEHu胣0wѸUJF٥õLGSz$;xce$cvBӆA%BB{ë}' .f?NvFaGKV mT+M;sO@0P(xu F@G[ap6.#䁩`l_BBnx4*`sF?F䯽r :vϿ3||0o/\>C `kV={ɗs1tl̛>ച c! m:mDf(^ѻG7 8xfm:~n rC͍`W3C~dq01 qT5WpҀ2dV >aQp%!zsPVka p3 %f]1qL~ W]K>:ܑwDziSÇL\<}i\*/`$v/oS@>R6;I;@0h"=E0zzz4TbEHj{V4r"ę]>u:Ґ:jcgo(\-0qmQAuJquD8dU*ܥ3iT|woF)Y.'ҫ1t hac?0랸'DI $ EJ)Zn-@Kg:(?*` /.5̮?_o3>NƊxt/\>7#m{`VmV>39r?X\\+AuJ9ɈՄ1.@=Z!KmI+ØY d.1 K) A~C$ OJL \7%%dͦ9KE40ࢗtHˡ ؚ@rNʅm9MMv q1$\ N acFV+=23 ] GAӨ-\My蒀V"kZ9@?'،:l&ȕ sc s PWXљOX8,:S"[«7|a]Bضh2s`K$ 02!≢0u p1s15'V]bgL:Hxʇ_yxxx>15jq#FFDןcXMx/\`35ND kI`6l89SXsb7'V*d'Gx ,-Zsv,RL$#({hk!)2_$p$GQYL6E2ɪ(KHh4n^e'"HLҗPDFfҫHȩ4(ʊp M?####?D@AQ2%΀kGƄw Ҕc̡up?9L%xxySBћZei?k' .!1EKB|Y #3zw3[#ZUCsN8f[@LZG7XhKԸj@U,H+e<<<<<<_!kL,ϣ|Q+@J*o-5WHljǞ| ),y""TݪJ8THAjb2q26L|r ^@t" VP$ۚ#0pc*1h]TLm ma9>J0[|>WgزFCcKܹbK~z근g9k[|iX4vF*ϭ6/'ΣgEV̈́NJϬϲS﮷/`8_W|>IM﮼n|v~jt}~ Y̏T{&Χjm}~䎽gmW Z2Z/ ^`?[^` /:d/ ^`?[^` /:d/ ^`?[^` /:d/ eAϖG߅(w>@,cJD40Gf͡_q8Ǥslsp*jZ`,^nqr;jjxr6$6hd[}Rܹ };Զ2^5l_% P{a)XR,];A+^O[M Ǡ("oRcKۖ:s "*zeuJ? \Y;N=_7taοh+ӧyfJVM9-nApl5Iab>r}HX\:Wa.9/J} !1kLANa< No*K s9 Դ *۰$eAV:  -]}PN^A^&R3 SV),)z)`fiIytY^!.‚ #,I7>@kZtg6.]_$ !1>H s?EHihԐ;^**APue9$ē`fwn^ģ< ̚8ZEo#1\.5/JKlGRm,egrkYUY9H"ZjN).&MTK %5M0 IIaNGSn@fRR%@K[lRS.dʧε;rkC/<&Bqd DmZ,筚&Ԡfmbixq^g# xj]<;Ctr.73ס }|ܠ>n: ((q9oT ND-ѱ #Qx4VpWc"Jp=<Ц@DZAzI9t5.$X5FKb u(ʉ~t;;Ъl/uVO^f*BCC{Bt ('ĵUq >޸8DEECXѳ÷'J$]\@qT$#+VΟtNao#bY)1s&bYjEkG7kpG^$pٓq^$k6n_m>Nrfpk >-̽-'Ӣ1yx7xuFnͼ:9_v^?\6ɻ?KW l,Y<$糔n+(pIx@IDATJ(HAv>VJ+ܲzcC0m(ڽǸr 2^%; #B* 'błeNȅr#0_Kc^1(ص8Z>{oB~s/!_>GdeBAE͛fjQBa{L Ű[ ŁUso  %%yD>s[ap5:7/VΟʕC[т_L6+̵ ~0iY=:a\]nR$m1g%kwVJ ut$3~sCt;f|,n0npw6IYEb̉j§ N&"9 _tyQK ,@Q/({_)"nAxHl뀍3 ;ÉS$)#?5a]Xc cp nݷOA 4ޤf5<<N&Q# "4ѱ(k[{ԮeO2Uwb?@ b'A> Ah![c^?.@- 2_Z`^X]-H6~7fJ84y c-Eμ6jdoZ؛aKkk;/B" ce!n̠mm֠NҦ" QJڶ5SAԓˈ=qEHaTB>c3D5d|+_ȏ(URԤ:0Ӯ6Zzc;]g‰}$92Bo&Nݸz*\> &09/B\v pf2LX`Dk6!/>,p[H{vm:`ve~?:'1tv6Glb4vR<54Htpyڰ2^W*~; d87~uyb $^:[6$*cyЫ=}N>k6vzd~ wO<]!+/'\yxxa񹰭i+MU䤧{Ѳq=恚V[63;T/yݑNYBB;.GֈidEanvL!#;6NձؓJbr[/&Ord^8Yb`vč ?(,҂%]ؙWFjCIi@UEki!+9dµvAv8UJP@н#1  HJ@պ\]7Ҏ<.&yDu !g"A!H AW"nX1EyY䐛lz~Ia>C*DŴiɉHII|Obd!9%L"->#s̠dds ؚHG[@8Ip=dXa-| ާ,_VzuRXKZl G^ja<< ,i:4M,aY%Ixս&>*y!Y2VDZoJyLY‘[`ol++hBln1wh/;}lXi2(op#85;q֋ 7-Yl]`kCK+:J`ۓo+M|uO]CB *dD%:YI٣`LDૣH$+t#HfJT0e>﬇zml2#oxp7nqzb.Jӭ7J"}_1mu+֥{qZa qa:ه#ֵKؿg=,5!%_E鏮\z΃ ;uȿ,6@ kUQФ8ۅV=D{Ƙ>xq,&ِfhs! ri:(aʕwZ3P!ɍ}d6 򩍷 ),c^v#{w!ix4Іl I1"Akqk;x};`ywCP#ވ 3b椡Hz3brx;9x8t=}3QWOCd۶Ņc{q i gb]8S:tbe$X7㧡O;W$ƅes4Mq1V$ĄI^ؖ-NZzlo6UƑe2VD4`ZOt Cڅ];9g.4L)8~ - #wgS}n' g^)#,3=: 9<˜tLS+#sIQb#)*mi#ˍ6-͡L>yYѕ{3iB V҅BB[fhH[MDmh<:?1v(-@hp4Ɠ\\Dm`K֑ᐨ\!O%&N,4mX 2Eۚ#,4B\\QىH'gc[;kΌ WM _D6}SQQuWaMf[Sg%ç*QŌ1sc((ksHx8yÆU `MQDкuk2@Q_ƭ1vX"00fvXeD5cxy4stAW6MOBxX *?u)V/%Zh>M_ ̾&ѷ#b#U0,jaYǒ6R"(jӭTƍtqj٫ |5MТSPTDnPOBDL \uGnm")Fܽ7my3rqDl ?6M p0wEMH@ѣ[6>u]st +cm mh>o_ux,J40z QA*.^G:N2T"Zía݊9/lPh97~$©qsk@I 9mzUn#!"s,\cnHAݦ-a-XӸl8y(+jz̻s_yxB@[gO&-0=( ciȓCA^8'$#++ aVBMSh9 opQw-څKh١W]\;݌iSJ9ߎ>Po|xn>C.(Nh׹3,HVH nˎoc(]eCn쀌DHl 3& ڹRBp 4k ȄL;ѯ΋ҕ S+oi `ܤ$8::ԇ÷/! %Mebl~8TfoAA111@;[U: {?&Y䘜S kkZ#2E `D|8[wƓ" C2h^`ξeEŅxB27G\U~#gp5]h0&&ӎBU8ScG4?{xv kbGϊ'ؘS&e}f%P줃Z54X(Ld[$BK?v#. >FھӞ B<"usti - Ućdfal?զe@-#Ƅ -,aF2xxx>-:fv!*0O;S7!PMy_=JihNa+!Pf`dCM)<@ht<翖/_HN-ORTFEEnn^x,h9d~L*U}N 5`k#"_8KM0rʦT x߹,%IJ#_7[7Y_xxxdBP`#6&(Q;et3c%%045cl'O_ѽ;;̋JhҢ%EVۺH ( tT"C JeIPeb[S"(͌5 ;_&6 gݱ,ǚ6x& W.'pm􌠚7ohIT%Z`f*31Ȓj>g-_yE@XMJT*A_{yYD}^.-=9?k!uQݼRU梷K~GߠF4gW~_s(vnjpxDZ~Daqx:zʈ"K<I>iAU"F+cf,%^|6 " ZD˙W$J 5)⒴_&)@Dtɂ6xט+,݋N"W=R4SD[YF+ cE [LǔRCMJs(U"V*-5*ٸtt\ؑKI/dm- 6F!,<vNqv$4k0iǎ+OtR%YoњlKXvFzcJV?y#l!Uk|sZ߃[0(%Qc{Q\lQIQA\#D}^yP\SQ?7[?CWxu=|_K Ä 0pmb&#6Dof?#B :( [< {„=9JKŎKSUCLoP"eY{V>R(^=a5y[1"u5H}cZ:wsE/:Q1o-TBc;K.#"9E^QoDC1d"J'-G"ҁXb@7e ~ARjt aKt8̷9y +d!{S@aN:R2Ks@(6@UQBi0@ 5TPUl98wׄ+JFJ"2s+OE1)ʆROiFI*#,C$E˫jQ mb}'9yDxF5*5) o M-`Z~yzq6DwqNkpjE{RT&!,s|()K((>2B<ٙOLjȪ'27/?jȢX@T) %?B"?76$JIm + tAW ʾ9BOp8}TM Ja1ǰ^# HYPQwUmS=C>WmS5IwZ(q 誑pPq71IFӖ-acL̢7?a͢>y^-y3 5 `ҎHa7\ƄYN%E0'q~G^ݑ")3xQSERiظx%cCOCC1n>9E%0½)TΛgF.Ǎ;q]+mA ӱk|hs1X}THjc~?4"͵(+}|HB ƪB ,oڀr2!M8G]Y } 14  ^G%wmW"16s5~ͤ900DCjBq~_EC- 3c3ܶ!LDS@)aggC$ٳgH̃C]8ש ((QƆdEx͚4rFA~޻7ur#;5Irqj+װ%YMH&ez`%4f0Ԗm*r3hLK.UBF6UZ8<7nľml$xPQӿ5cƙSaLL^%Cv3"+fޝ#2BQj܈l8v}!̎AgVh:v V~=xY*_h,U"lヺ k_OF>Աf8YSUg97ĬG#7dd\j}б9 V?-"08dļFW7t? |fN к7Hc&\1VM.n]ĪL X[cwgp\uHP|og,A]ܹ~ NV;P? R2]<b9;A)bOPc'Mgbג8$ &-wlVwsIP։LdfV*PSѫ4V' ybZ+6Y'`7S1[sM\uX5{4)3&q\ضLܾw;ĵ^ {}p)"JұEOJE״f#\r 7En-< O'[:O q`oSL=墠 FQ=`bj3B$QIM™A(3y4kHz vqF$MT&ԔeZ>;I~08:8a`wq9w>Ev%|Vzq< }F8k7nJq>0\qTD=!09]jXQm@IЕ$(tSrZ䣩)jRjm@_iŒض0|VO#6UP 4B#Y z; j4P DS=RbHNhSA^k~L R|aQ \7:*P0 )Ք^-lVP456sԣ"s9B&̄R7RJvL" IP$12lE=v",*͹;ӭl'Bv<\>y4~[]5jbD׶8$$h6Č]j:Ƒ+4Waa>8>лcK!-«HG31R[KSZHO۫kA?݋Fu*Kb/EC)KIqqӄ*e`o m5.;0FnF&|yPĽ"sD1bupuvo[^ſ%'#/GG(Z݇@V [cL?NQ7ȍǞ)zf ZxEUWz}a4Z((,G=F|^;C0h<`L4/^\?oqkg/^!_} ٍq&s(*/q|ܳևOQp6`{FqO_Ai-7-q=Y&|0O#N`+w`WI"!,# 쏶ͭIL>Ùv' QRnPL`JhT "Gr%x8bD(fEYCCo֠YYp# _x%(̞={ѿd.4x>АdIMAQ5J6ө(] vDiA/v'Iã'k`1+Ʉ۩#WaB2zzz>5<0vx_Php-M]KMYaLRӪUOԱ4Řa=-(V&hZVy E V³i`!h߲B^!5Oa,ļ8-e"hi(^Ry4' )&CMʲB Cc8vB;&{щhض?~e1LD1GnHHƌ=C]$楅",2Xe+:P=ܡG_"$4Re=Y)ZQʹMRTт"7Fvo]:X' %KVdBpvJh9K6̊DH*B"HQf=6!M͆WaӗCFnhF"m=S4,i3мCB }uĄltдicS4jʏlgkܻuϨY+O,\LC]L"4IڶvphP7r#R/J.m㧟~@MSzU4E|]b&G&p1l4DZ6]jM]8dVȡl40itBA&j7 a= (h ;Ŵ%мD\QɿCQD)g#@1k 5)W/K|v ffo__qsi|px>w兵/Vks_a<<ٙ(̤25l_7_?gD@zFk|?lφ07oS~L9^2Eiݭ%t8BZXGXy^͏0+D`DTаU; gA "9w޿"u3hXwu/^`R?<睘ǝ# ~| Ɛ %R_ 7QdHL˂ jZ w"`e/PR*U"9-N|҇Q1K K;[++RQ!,5ܽnQ\ؾö1 ELx(`jiK=X~nԴ6#ٔcBDhokeε+$e"[MANvЬT:8Y9R\"P}ڔp6).B^u2_R$}l.>VJi^=~Wj- lBQ>8^WUMLa Xpm'$rae /_.SBVr<ݩae-ueD,@8aln  C!q* 3)eІeM%?7 h~:-{@[{[Dx<`!{Iـc21ɳг?%@I2n92ߨH)$Y:-UCw+ O b!&/{71~aF43H LCJDDvy5j@N '-ODQ6L+*R>вIdDw`ra:kƆy0xH A.3a&Mǘp-8RzfX ?o߆[aӴ88Zu#dut";Ҭ6wGI` IEj*_5 y>7վ-tjY+}ݔP fc`ڄȡTT='Qr-A'3yzN\JA0VuKW!8!)!#!CtGX:?}a,gQ(ڶVP\1A[4ߥèmZ*"BX%4md_}UZ?-mfbb0j/ zm1sT {bh4ZiqHñkp/H|r \ܝ Bφ*y7NiQ>?xas6up$S \]Уx[ 6-7'D>'|YDdJLQRJАQ ?-[c]t&1_R5m< 0vr= L NG۱~4™ %DJab(B}RRzh`몕p''n% yUZ! H8r&.-M5'NZxD2wV.u =}]̘6 _IPL`A(i6ptA}G/@ɼ9OL ȵqDtjd7aT˜ǘ?k ig/B H?q&(A@db7wÒE_Zz%/E«[9wFi>w_1-,HŤe2n޾c8|KA^þǥ۠5DbepPɅ}Ε r+Ǯ\? $=?eB\8mڏŪ`_ѯG7SL0͆w{aYH,W>b2/o74]`d\3f| Q*)ÉIfiaEh\MP{ $ܹطo^? ʞ K[& Ѵ75``w"U; ;M}@Ix>#85}F8g/nJ>&jxs@)9XTTߗ$cQNStS#*ɫ%r_BH:k6A lskg&S珑NARӧj(Q$TQ҂Dh۠w tk6=fCTRK(*ÀW z)%+"=i q))"97LXMyD0nE "PB}-^ySEMɠX $X@". ;X̼½ j2k.8Tٺ{6u) G_#"-W|;n44 e[VXG{4ԍ(I)VLH6C&GH[ c#,޳.ʧ//Eہ"%AW+)fS75ԱȇcBZtI2VO-Ζa&`擈J͡Z01F m;{;7D M)-kcI41jNٖ5W0 5WAI m{B"\q!s!x !tLKfŚ OdTGjFj#mZa}7e̝ں$E4hjTu0 u%O&ѪYox>WaF@=a`o^A]ʢB)^`ۦw]b*2U[ * [n;ۉm;6]cKj顨0Bʛ+|U4 )*+"е"a4WGq* < J̍Eys\q!LܠdPV 93'(=\ 㛰chP Igǫׯ{wo䡝5ʹ.hA||~r%^(LhTZ>n_@d%-/U6NH\?͙ݧ#!PP) !3 !D:v8z6ɬ5}; ȍҰ 0j #?!==+\\RgØ8pGW7kn"9 -@P@Ҫ5(`ϫѵh4wkA}{ / Sw'D+ Ćеc,86|wwww)(ݥ wOgC֖_8m|lvf>}fvW|..A"+M>]Q)EI"*JގHc5`It{n+k{p*Uu"H5GF߸8QauhQ6ӧ,^?o *ciؖ:峳ebr4b/Mnk&TZsql3yn_S3Lg@IDAT5)ވfN-L*i׼~lL nuPL|wU (AŖN"0C#SrJ5 K+hЪsfϠ`EGZ Di,>kkAМKϟ6GsQ/QxtBh^";"/2//r (FtTRVBi|uYƍjJx q8MkEP~V9W"|$H"VQYqڨ'9+,]!vyMZbgOEpfhުؚpUO174 '! J:BJ&ҵ7g+S)Jq:-vVVv:Zi%ɝQ]S ҍ8Y82n\:7ż\ q|eʃD'Ҽf܃HqH6&Gt)QtVfA|v%x#&1e-GdF5Ikm,X3IOt.lMmruP!3rA1ތ)SƑ)EX. UMfdB^̛8@BΈLj #o̢ףXdrP19?KWy:F>i!d[\Q(E4Z!΢T5F ^" O WT 2ȝ;LRȗ#@ݒ}Uvjo녽lՑP^M)jo:d^a̱;˽qW&@@%8IUTbsd!MFiE`Qk>z%9vsp\H/N+v6WoSڿsJ9J"JؾL5xLls">;+EըA7$`\ӄZTUGUT`fyNn39U^P_Wg@_Ew`Ͼǂ,)ƫIo~tlY67gXJ\)E!o^J8΋Sa |=nLD תɚoArE\9q@;H|f&^Nd;"})6Q9!IbY:RVum}iJ(O5ȯuOZuUT5ۗ"SDЧnz# Z7OyK\COY/gNJ)yN3) =[`{KkhZ aoЩsGFƺXrh@ xWuPRWu/ Ԓ U%Ҁы$axuPwWσSaX+nmH:h1A>'{.{iW,H6ۻE[A{aU/Iс׵=GMd~^v5;d٪쿖2tAĨW؃}PAPPKP _ډ7@@Xb&MwZ`Dzd_M"F7KpãMܽNj7id%<$H\Kn/=p%*V$YqJR/oג)iǹ c;k-?w$t?."N0lZN?t|3ct0ɿ-qQa5t- $Ptcw.JYw)m3!6`"Bx]lΪMHХmIM*ANw !6Зp-nO$Hm }f\?KL5K %{FKޫ/b_6yygQ1_=Ð_ahl"b6r:3GV_b*@+pLsC6"ТEwG ؾ~9-ܶ[J4}3"dѲT,)}(W4hfJo£xLO<9(kxn/YVa|Nr>Tj= beF<ӏ d]!hjJNM'>oIe"$EaXn<cB˸0>1f*l%m_?_&[KQbC9 f)Y/'nzӶ}۴7e2/ƀ)sҳe άG1N#^Vn7[h3"HWtoZ#8w+Ѣ@%I"s0 G*Mw1O]qiST|@CC Z4Ǐ͛0lʦ߫گ)''&/Zңuui㸳o=od=<{y됾brt8!l=t}˧rq՗ؤ"[%^lvn[G]w#|0qsZhL*} PNoJPH׫3ҧHΜlD0b*\#pKHmɗ67)]y%zO]ÎkypI-R.B$@WԌ)EA.Y3US9OxQ)1;-g,; "(sIҼ%1f*J5"PB,\][.{!Bۯ0crnmJZ&1"$l93Val47 )trcWz&sg͊B8"ꭝj$z1v>zŨ͚Yp9!o緣煸`W*F|/d7^'i(*y[h4l㸾6y:W='[l*ׂO%SbJ̕1~ũ@M*9#mk/|Ej8>q<9qKq mxn[ߎ3J}Xh!K Jlu)Hšբ""E*/I"PTQQQb#"p⅋(U3OdYZGd!jljSVFQ.^QjQjANDsU Y#{K)+L)qqD3ek$os< c(;'[+-'vGG""]_Uͣm4Uc7R?2;|Jʴ}COtDtd|;QeF.&3o@lm޻tӟǶY66O8o>>T-'.SO؈4**8 gE$C&:2Y2A Ftk՟x~8gLػF:vȘnmݰ N$ئ$~/ ->cAre_q!/ <N"ŌzHV4rЦnu4Kgu:NtNkD[#zM*3RxʵKhR-Uк ؗƏuI+$)j7F<@.&kY.>VnΑaalHnۤ'S:ĕ[A͟fӤAS.VY.͹ָ4wUrD4Qoؙ!M62k7ݿĠ6ң4'y\fgZ?7OQP>AEBE߆^ $9bYE(?ʵ@dϑE [ qpIOJhа& I`+̚ YSH1rfp;J}c3Gҥ\ao!ZvJuOPJe/6hdW`argH H &TT՚wgX%EIƌȞ3-;cڔ dwV2PZ]3/-ĨsiS(Qd~|?U[28ZJ78R$e̘܅KS`ꐳH%vi^$<17.N|m۫1!d,\sgQ8Ak-BƵ@Zc,i C9s6ʆ=( %~ sɢ%JHW\*!AlEjU!Ӡn-x d+50$C+SPavp o‚qFd!Ĭ=3N _\ũ^6RP芙54 Wڜ3Ƒ^l-SLi2YP\9llȐm'YϞzc!/-Ch\)[Q*voRG >>P-N xWX%W!A@Q;wn$ԀZT{G)T5xQFԐjQPP3U.QՆϐW"/1$[Zu**u:tTT]Q=zתOE@EJG a!1Ԣ"""F@%l3";E_F&$HL+ IX9ff}6ztnϝOP^(dG<0qG/Oei߮<$)Q)Y>O^=ϩ>4k?߹ĥC%܇&m;}?ët_X ܙ W-IYgK:K%sxVDio(ABYZOE@Eoj7?Aw~N^];a&lfto۔Rjqܑ4UUy,˗feK.PEU8̢ɓ%X󼰍>æwky͚syZ?i=sf>v*O]Ybē[^W'\-8/Ͼ8N=JE@Eo*a[uR*KC iYNySG yΉzCcbIB߅P0|Klғ?wvIWJc"yC7XX;+GfiH&8R $%")0Ft+ \Ry2ix =Aӹh0d 0%菞qȔ*IJ= xO"tE'9CND*Q5)PH"W|aZz=%I$O=$8[k(o !46YOˇ9v.o"guX֯ s~Y=t%Hd*.e JsK^?}p(׋dHML^>[KxOόY6al"P%{deܘxBxdN%yI`cE#}%9sP2&I,x{x>FrQ _N-NqQċ4)*'^`㔁]>׵OE@E"o n7.*PrsE|%w[>^EËs4 w+-%ٶy=ٳnX[6 n3}i1KdܽCGlJ\D$;b =Av#$;#^ks†1Uvބ*4,' x.)#~-ũUd7$L#'0kPUT!JvE Z4OVM>y7]{)y,|2__5[p#+?l*FÝEڳz"\_` L6-4"X/ 6m,1 J<~-K8b*+?HKOl6uI} Fȑr~A13"B\t14%TDm%]=;Dț;4+fÜEO)H.]#zX֌̩+h[x*4~9-,߼1l\w wՓFq7!/=z;N_>Ά@6hfO֣H~QʹLZr;̓ mK2w/ !I$~,}~ \!jq=wӇq!6iS6wI$ o50rYƙùvnnB"}..ӓeam˿]JRoz?myp[OLQ|A*c9dێzH$?r>`Dl0w}>SE@E Jؾ jry>HJDϢJWՃIoc6%ݦ)4۳=Vz+xJz$cG4fzx.ܻj?IT7вg,2gXd1 9I1,'zdqP(ZrNG1[YYNB,gΟxz^D{dƊxh7h,r}p̤s$\IώZ&Vo'k#}jbֶ֘إcܻQC z @OW8 XYehċxi]SYѰH$q 3,v;Kk=GR>mu Qi#k3Y&**Ύ>]ZlGLR5L*(DT#' =?4"|ٺ&#mc<7.\~i^>)z^2uH.P֪sfȐ[YQmO*X8&R]LWA,[Ӏ'~Aɘ"id,u_ޕUd*m;% NlMkD uu,Hgд02C_lݢ$麤20HPHḀMHJF~fK2vm4X:F/oN#2QvɃ=U6[,g{ׇAܻq^ڑކ$tV*e,!k"Ez$I0IZwHOYE!I>4lLn8mZPH#.BjXؼ{@a NV͙8{)f+kx\z| 2(%v >DJhbas !.5 (`M}pL+P=skMbŪJ-;P({f aLs^֤odLl9/p)LF$=$ql%شe `cx5X vC'\ܨx:#v둳,[8&3+jaEB$Wȋd7C&s3i h|TW/ QOmHemiţ{5c "I[}xrn>fʝlX^m *Rȸ$|)[8\w}my[3u=P68w_b$Bp>,fZTati$*;)6l" {D=:vՍY maƮއ%"Iiذْ8]OKD38p/TRҧ'#;VH #w'iN%L B\b #q^ڿ7W X16ܧ4rP,#l"S؊=\b'ʔ!Fa"Y(m8e;VS'5-uRWT$WlyX>")ω97싍OGۡ|8d3,e(:rYބ2gBf-2cK.)/T qop$~/W${nJ~ʑ┑ڿWE@E"7| vw|^$;w]Q9@yɜ%+9sE>L D&׬Ov[]Qc>:HӲR^t L_IG'RYWtDbSR)QY`I:kخ={Le`>9w3\٢ 'uȚf-OC``M^_1n[*.L=u#E&V-YLӠViY{DȂL PP>r2ZHBQӺSwJvK"ctiuz59էZuR\} Vv,]#,m)V%h3fמ scۊ<ŊR0o6gCEzk\eJ笁K,[JUb6f+XY,cfU Qdd}#K'*O_zO07#kv<> {j̝3N<:Ϭݾpj4 O^|@||<FH^ʢ#7O_ԾT3Ϗܹs.($ko~=xn\xMU8|o 5|9T(oooҧO/JbHӪ"FNȚZNa1)Ej ꖊwA@}A'{o@@ S#y&dY#$mSBy֬AE[!Jؾj***(3EOxFauwB@ VQPPPPPPZTȩTTTTTTT*aN@è|-*aZv******* }'aTTTTTTT}-rj;Jؾ0*******_Jؾ9wB@%l huE@%l_NE@E@E@E@E@E@E;!:"ENm"""""""P wZFE@E@E@E@E@E@EkP "SPPPPPPN;G *ЗuV|R,^̂ YqCbzHX5w?#. bED'aL`ǚu{FkHLL$Y٪i;5ibCٴp9~!iؼj%>_5DuM{HOgKW~$AtW\_p<~Wvm%%ӓď@\Cڑ7Xx£9;O;&Z/*ax}Ā>=r3Ǐ юJ< fQ8v?q9,"$A?I1AL:wMs+AswI ,e^aV͙˳_3b?m{vr&.Hdk& ̕?m^;\޻GN}\ouqkwHw߶#hoI Wıa ؉sy :| 6&F|p7@@lbQվP~%f}r]~?yɩ9|W}]&R}+CЀIS$ ltv];[ r"xK ?Y>O c#c tލz< YșمĸH<'K=>)GD`.ysf"! tLm2/@u كcw>NuW7Jesӟ&9 n?uڮˀ1/^b혉bsct̥|X* y䩬Ȍ\yacaF$]o`mk'^> [' 5Ī={pD|t8X$ }S+#BC2nMąKxi8Z[#B#bpʘ\Yi:::ZoJAq˒,.oCD?c ב>~i3z2tHkY9O<<;o>,MoɆrYiHM .0k|8LubE%Q!c:‚ЈS(OO0Nk"f"kvTo_B3^OڂHt x-iҹ3ΖFt"24_[lBvM7e9hІL"oדOɖNd= rp%Nc^T&FF$%oR%R7ZXI`MO_bf@9SUƔݾM@X&g>y') hbǏ|R)_>!6p,Lup|=ZjWƛpeP7 ߬k u*h`n$$DTކLڄ+F9o)ֺ1wn"aq3 +[!rMKH^=:^: ~dΖ; ~ y \̣Wq X#_q2X<ɖ'/q= CtԨZ[<)3] }“{GE[}[*V\#.]5gDcdMYHN]e+W'wm*/6&Mٍ4x 7-:jwٚ-BmȞ;WRnBL$ާpTj{0!6WO?BSz7)6:t l,11^^⌍tX4 ~d&kVxp!Ѣ(庌t.l\>P]_M9+̡})u]2 Iݴr!PQWxhEr,.EsjLVmƑ n<_>E 1uZ!'Bz"7JD%*Du(n/#,{AJծ;.2`T 25țۑ9eWכt`Dgԁ(V&"&Gz2oؿh!tq aJ)bcʓCYed%Zܵ;?gϡE:m=kk AY бn?E<s^LZL$^HE.^Cȹpڭ$Jš!H-5<[y (ҨY]᪛NdQ3u mkl?=3FS AJ)IJltvC'!7 3) fBkjNܯNj,vQȞޑZ.J80}'l65j뢗Ѱ*znF:5T.~M]ꕶ֤7\Ak~ ^Խ(~/ ՚pyVLF9r34䦙*Wi 3{pJ.EsH;'hܰ)aϚT݈{/>QK]^ͦ`Tʙ*-ߦ`Ԇ:"ضVХF>̛46?6a ~m)ZMڍ`IT\siؤV>gן~߽A'U&.^αX"Hc稟G.Psƽtv;OXdң2}&E;uV <4r?8g)7-cŎSK+5LYkwFTl>pu~ҐsgU ! }|O 䖗V&lWa^=]Y3'҃}.chmۍLop,1QO9HOqȯx•iS !۟dyq0g/}p=1oB>hRR3 Svuoz p?B ֺFձ+s+†atr ` /kۦ8Z~0VFm6{}2zQlQ"Duf-J@IDAT0v+[X$eꕌ1"4ncU㱐Ӗp+7ϞL4uqHON?S>l= 8|4]2b:A>W* \bh km.ͬsh٤MDە |pv,soU$Qѻ*"~ €AcTi͘ 0Ui~םm|e(w'Ϝ$m |"-.9@ܨ .'tS4^W)@|}E2Yd/jާ,3ʽ.ݻc'Rjܖ mXC_Ԡ66xO9NۉR-y-'GT)ZyH3+KD210'Z0cRB"R?*Ҳ0KQ&7l 4S5NRčxԭǣɻbsgh"/u(אOG)TLRYeZN[齏mKc6?n`[ ]g 0eKi8eOZGr&TԐͻvKjYf~7i`zZtITup} | hY#m:7cߨK<&,~}ۈ]\_LxPQj1m 1M k^}#c,ὃСp2:(ׁ,taCɼ<r=IQorp,Xbsd΁8~\c;Snhb"/2ۓ U#C!Wͤ{EYډz>V[&.-b'!'GB\oqv FOl2ib)FMeΜY gOcnf,˔<|r/9}ӧL$qB4LLCc𞝏vPQ*dL 9ޟjBzbAJR*ˬ8>^-4v3b=x<5; p""Yr⛽PCf̜ٳp6 DQW;.eȬ lX}>%RCMbߨSfK 1"yxqIlѼ9sQ:/x4tH*ؽ顫F+Fb#gw_^!4#HQe;/o:;iF~JwPЄiV}t.":qo]xoqQ^B>,iוڤfd<%O{XUu7F !a3A@PkUYVejVu@EAY,^{wYDk{}ΚO!%-aԔP?>H3zF/=•x*^=ib@)g\|.Vi<c .*MǞcwo]"ƃ/>K:?J-/ SOA]ЍNqRyƼO">{V~*h+gιL0' ǯHZ?8ޏ<^OPdϾ&‚/{2'jiA/`ogӱŧIMBâk-W^JR֠a2)E- Ͻ<1aLwDBCly3&Nyƚ}0F9s _z:<Fp<{ VwXjk?O]q'qX8k V_ycqكm2p?i8-6lBꂘJ"ᬩGhT/{&Cݷo%"=.|?jù.B|U[7w.f>ek`5ĩcW^_M5(o%x)2(-?Wߌy$Q|96{f;^_tzXĩGWS]{#ޢ y}E~ZTMm涜^)16:pʢ˰pxd'B~~.ȑD -mXp$=O8bȰ|gr6łk?LꇐA%QRZZJ1{.3RC$:CթCEjV.x,?H51ltLM߀n{qe'S*dDŽf濙+ŏΘ~7 aX:?sE20=]SZz]kY6D w܏sOS]G򑘐nr1xP7[c2=f퇤H7sa6t0B[%q5u"ôToÏo }dniI0fb$}$.)sNFZ*Qz"w5WBcR1{Tǩ^|Y\Nu8ԣ=LŃShG;:߭`Hs:? 66)FEN䡃GUU")u?RVVֆϦo[06hL,-Uz|:QZk1~̫y7[ ym~|8e?wqƏmg)A7pu(@wZӏpy?KNZZje% FZ|c~F?P]ؘo0O9pCU7p%egs䡋`׮]HKK;<#{j3+Rn\hPlƓXG5Rƿ`̝~t+zy .eO;.A4e} .mJvĹ~3q9-3]bs?AdԴ? n/?vL>b;%k$I "i2 (*,Ċ)5)A@rz?E9ٔ$oj&Ւ+)a|Fe|.Vԃç|~W'q%X|6k@8쭏Ӹu3E oF5#!Pt|W}RkOk[;ꀿƸ,uk{ߕKyya+#X /]14G|[P'*#zyt{s3‚sFG!Ձ6ƕ0m:ћmT7fVzFE:Fx#`Umt|O(u1DEFndx. 0>?ȩ<ŪAR֒x6`^l>gRWE 5H:x I=WP꾔tawhXU2D^8yqgmuK:G_Oݻwsϸf|K04oz7Ҏɋ[6ӏ`(ʅ؊>2*Jͩc#p2F@#;DؚiUM,'ggNJls"g^ճ}9d .4HlчQ–+P`5ESZC27%dH~ F~үPBE \%ቤʙyA7 2g^ٔ8<gV2̕fdyr)O[TQs*x`5s _.]86 AcWJۨ& 0Nuu#Ƹ=Vn\u {g1܇2&Zi3m~;K=yov䵭+:i4F@#plaXl+,}mFC\(`(JuBXFH(%>ԑ'>0VFxI?tߖz^)h{q4?1u쬙tCpRalwEZʰ%XǭmW=(I?h|z3H!b,r Q7Ȏz.D\6-Y)E[&JPz4lO rEDbVMW$N(/IF"%"I|6mH G"ll_>֐q)6$OB]ɵh-c WM4AZ?U-T9 'scZ 7޷qYMgRT-A] jHTWJIY[~.=2Iڐd۶aW!DKY9 Rg3SVX+W#dBTH?RѶe$luix6k-Evt(.)9#,?R5붝66gmڌrn":CJ'\9a&se% lO66VFq$-G~I7I)F@#h N &Uex 1P M` 'I[96boHlS\*vHxʻb U2ƽJ3Cf[湵 &IܔVEѺ-N_PKe3-iUR>(\kDՍ}tn}լ`I#h4cCKM8yxdKTc (ЛS<)BRk6kT@h&"Qu%uTWZ(C\$"cIGM%Hn5bԮoӄ0wŊ+%^'z#{h4F.6Yhf&e.,3`ԫMmqiQJ HԌ߿OJI(x;fE͙#qJ#Np!DHh4Q!%&pV죸[Df%X?%mIJIjkb~\NFA&|QvdB 堫ì0=DU6 h4o6v.%IpYI5ҶPɤL<<|xe8b}sėa vtϠIV gSҟ!)9ٷg+Vo gD (N>c%~o-oߋLd;ƛ~  ~rIX ;Pu^Ս03&VD7BlB,JvoJzxz+l.1yP n{wf"QFXq7n<"*)݅H!^*ͻKB#3F|blUn=0zhDusu F@#WXdsSD˝hUZ2PE*#$ĉ5jQ %)B$@ʐH( c2rc$&1PϘond.e>TJT֑IR풔dJ|,HVZ6KP#Ҷu{#;/ަq]I>z*\%OeNl=8'c*;c,cf`ٜ7PO$E+Rj6,uEy._t'Fm}W/  \[HNUz_u;cHe6 ӿBjXɏ~y3 DEs2̿V[ܣ;gؿuzY<<`ɗ"&25ܙb 6\<8ϿVq#0G7]~ZU z:DoS*1ftOpr~/T'זb4z^|sL**\ w&mN;Vg7+9nYW_L^I-sh{˷q .W'\yTJ̌US5vફoD_ _ c^ח%lqwOx/aJ]V0V5\yMsu!]w3v=p_݅ 3/>-<;xo|>!|$lHo+)iVĈo4{0Իq˰e#$}ěJj>ziE򧟎G<֟ pS#А÷&?MF }HKޱ&$ƹ<UOHI<um>gUl۳YVe̳oN< Bږz˗"Ė܋TQewCۛ_|[ \|pr>_r>~oȈq*YF뱱ڃıcO%al8|3\0Yl 琩df|F枉+;e}X*cS\L?y>\0 gu EVEYc0zp;S k])I=?*L{>ɧ 浑D.?ۢkJ'~/Yh4@$lޚ%ʔ;I&TD'hZ8Lt>hѵѾ,dAԳwݮQޢlN cC7M^' "|ncMo3F](g q#)%V$|fpcFOI%u.F{UyжT3הjIUoWwdI_|Gd;A~Sa466$Q5$ )it*lRT_Qp8P|I$uvxs!5,gIT_6h~ŃYҢNVߠpbh"DaC3d_,/5 )-/}dJHU|׼Ș~70xKm=bFx!кEH₢~էgͨ+#яYڮE'/Y؉JZl`^y-fgRy\𻒤%u\UWY7_^CQ?]$I∦^Zh@bTM|G.miHzVj Ɋ'p1k׮Ex Օغ@hdjN a]*{ Oiض;Xț6a}{Q6}')"vcE'Eq1L]\vXn&1RQnl=p3# kⳇ<zj>1h)}x{oW2=㽵0sq޴kT54sq VM̆qjYY$Rze)4x<7r&D9Ϳb@xjk1+0}xO黐/ ID>G_< U^h4o,9|lUƐtVs4=L{܄H!ByxSMV`+MY*~nh m=y.S#qYI~:j&5OY.iӴ?#构8޷jjjOUQi-^gc=9F٨ =w>TT w:7T=ѣo C,J C>7SS1d`daG {LNHLqɍ₹?/I`.IL>m"#$ͩt[v/3 )1KMXh4'UEoHxMRVVJ*'DG4pnڊ}\ȅ|riJ9QPB6{kV'x;"G$|ឝ| eHRhgc&G][m2IԂ!:6a|8H씠T`D34 !#\iuR8XeXJh >k4F8*"EFDog5J,j3chWŸ9j k5> 85#F"jM\-8O+]vNɴK'QD>(d# Vd泌}]VH~}0UIكяdfd @IDATI#h4G˄M[YnCBq$C$lB؎ւMhZdh1$ T!iyc1dD1 z#&.ztfQvQ ^9*$kpG!k?$i8,lԠ:y4@#13aon;7==GM&L?4F@#8zL̦5ro)?|AP9C"clg(ӱP{Y iR##%)RB(=r6FUBgR5wXA<@iFJ־$ 㽢iB΅SW[G~*+BPSM5qs/XfȄ8! ԕؓѡ.>x>m!.|LZ Rer"U@#ag"9 1 ^uM#աK ũ獅5&7*Hh4@g rlM์j%j:s1>~3LT m.m%ЋGUEO]h<R3R.!l,(6\2L ҇am56zPrMz]) ]s.;aͪx yfPJj^Vad-Z5?,Vpn>w )1k+ %le:4o85_n>~OcQUAZI#h4 efq:2Nsǐ!iA:$3LJmD8ə끷Iԇ8PKZ= *_0+e$o6F@#h4DS¦ V5CJ5l؂$(ن,VJdEWDIR!j\2뵣dD%%`cYZ6sPƨ(I JIV t;] 9( q,GO޸7D4 NjT 8]##Ux8vB({p'ߐL9Ti}< QԠY k+P)a̝B7]]FVtQ!Ʀ"5xı̎^s*l0:?H:i4F@#"!\ 2')inhx oؾ r:\.>j'U~T3eœ;ߔur9Y:I !f)S`!lZH3wkR"F%jbG.ʓЁHю-@CRM*tB5s/Ν/b_Y#2m"שRv^nIDId¤ljԣ4'H*mp& !;mN@,biɔIHfb@pӞ-(Ugddx֕S2PSAX~GE^"-(J8[8`Jse:i4F@#OoUTˋ(T枡JII"(ޖ">Ȁ/Z+  5t5<T56QObn9j*;& H$`B`N%H8 0 )S$MCB\46)! fm"PRH1 Sg~?ԵY K5iϐZ" d~6oM0ަDjB7Jѧg"w%\ فуӝ(M!ܫXpd>"SX]5Qܟ*x4=w.GqTzle2 ]ZIҋ (,J]Q:D&PHEdg eǻPꥺIӀB}"6VR PXiCvX d5"9K:i4F@#Ђ*B).ET-E|l#SGFJI%='$| $"F6bX{;lo_yq=a5Ylؔڕ^"YBudy*Кz$8hO\R)ExShBg`Kr\[Cd'Q|H˂0 cW4fؾd:Ւ0;8MA]<$& cSPśV`͋b}3f8NEڌI؎jۺP4FE|Dd$P"i JzqY_XT 7w * jF/jFح DjmhhRR,STƾE4K]R.^z3cءFcrDP[_wH1R6jh4An ' ᕧphQZ[cDXC#H찑 B$y_(,<"Jf$BJ̒pIt"ЫiIh<4Ď0_Ke˱wA!MH -_26^H]mjB CMÆDj!0T1O%;@(Ɠ\H ie*zHvSz舌čWF;c0p$+L_ ꩚>m$9%U#f;wŇÇ>tSWI EDGE1Ba۶ $g3t qjfZh 6)l uu^˛PQUZu˰kg!Wo !&3Dk{6Xga}IfcǮ$%D#_'F@#hZgQ=l$IV!V C,Ar¥[&DYž^؞z{ɳ  0* aiDmQ$\27D4UcHcan.6A i %hbᑨ ud>\9kG$I6:EeKI5ɘʴ)) Ljfo/??a'6Y}a=bI%Rb+6]A xN]`=UwU#w6%NDQXǺe$B:I|{%UU_F91ʊjJPS۠bw#6G _wPTw,Ytƴ4 t(!6>6a`2sLh4=y8"AE :alkC vkdbQi*>g2~(e"UFtn|ǧQΘ^?OpUW!66gLNj_}OYT ݆ǧ ĭ]yBĝȹ-kANdb2)RW9"lQAN-fK؆O{1l6K9wS WݯfwưXb;+^EFfz 7ϯoC }r r(59c0F+2 jl-ڜvJHI꛰ɎH٩n_O0Rr7#P#m7p5(# GDtNhdh )tR,\PH}= [,ECgDq m"),!C= Ǜ{Ajjqդ `kb"N)U2Hܠ0ˈTNI(FTMXRg2NPS pC(sH{U6zc' 4SWͮD)u@ԇ`gn`n axs6 }RXdW1jsU_BMU)U!pNaϤfW`=X QA,dCQ qPJ=*&>T+ѫ?l]Q"6ͯ" ĕQ!"1vl Hh4b+Cp-FXS= .鈡BX@K2GDCl _2.+y2>9$crE/_cvg).߁TZ%4rwɚ+́FNF@#㌆@s.'aKb.vJ|# Ћx&>4YB?ZGN; )))jdwBq>32,cz>61Vc܉zz a`D׸फ़-Pݬ<*wݳJQu);x/1Է-RN6emSHЄИ³em-8uN"H^%p @7@Y"lHYl _L}:U÷m-bRJHҶj3dJ5ؽ|9"rQ IvIo~+-BNh2St MT͒J!yT@e̽#^"Ų8pis6ro4e`(  *֌-0*q/'#6T1 ڭI;!p<1C괼Fl Fv؞!1E8sIp6buXzӣ 鷮O U6l|TU;BH[l o4F@#E6Bi" D$@$?;#=n9/$L>JPTX잍)NIҹ~5`T&`ƍxm'b(:@鍗DEBhI͢⣪NyJt10 h+W|(S0?DCJ&E}(ADT''QI5: e VG8JdyQFS*r2L)IpcE(4R&os )zpm$!hMpJɱy+NP=R> 'Q9kk_ OXBݲB3EAkI )*W)A~jfm֎F@#hW&˽ +,q8I\0DV[T.U H$dQ 3/g(=wo=YE{wlڊydt5U7=Qզ8*Be\tS4q`Ҧ[P":1g25&t A[ %;!*:]JJwq/=q*$jiHDCѶ=}h4Ǖ lMYi,陋igD(BB=4*O.">rÓ YHIƄ)1^hosοE{7!6i9.oDA$Q?{'u]y99 Ť@*Y-cwgvvvy=Ac[#Y(QL" h49j@0Hh|O쮪xOxϽUK\^ak<1X``12gm%[Ee۸.^5PYaaWv[/)wUOCpvn_BFZ4RnL&g.89<ד㔙8!S0R d+F*;4;3׎6h!KQ;21'9U( yaq +6~x<N،uLcSAr~))uj]埋,Cu&y;pV Iĭte ˍN6TƦǕ͆у%rLaH̑$; ǘxa; #+L;kRo-e;wJS-hkXQRr*b5[uW+y}k~ \:')ROs̽*-T{[HV`%*;>2ML+'4E$L abkC6OX5%H7=V8!ʂ>=J."wvÆQΪneE 20QM}}x<<݁`2?B%sYdb$tVR :YBml}cɚ69[uFwufbL Ý ϫr]J]8kZY{\c:yzU(JW|RX]zUU4TY;[_[x<wwĆ)6c;q盅bYC4Œf3]X~#n{G؏1Kt*#3X_F*4>9A1V6őu9X*6" uknKJ gDLom'APUi:#n|/='9ޔDS2,YyXсBS kf2Hk(pS8*P#W|E4o*=ʅXm+Ta5y^Z[pQϿl٬7ߡcv+// EMīNw:>MѸޥi^8z^ItHLJ'ob(0ʘ ðfG7Mr4x%f͞(ˇ)l?>̌LZ_A)1o+#XG鎐կ#~DܾRnSfNh{Ժu+U~EVMz\,QYʢjVd%}DS=TXDW{b.1ι~u~?͐uEmـqN+\Apn[G#|H،A#h jiEf!hS%Ojf1<-AZ!HkcL6f'E(:l*4IGe+TvyVjmU0VxpBuvvFe,-ќ"C@~ax!rƔ KPשX0 ^TlDy i4V(XF! xs1-":[e2ʾ\qGFh07E8j84߮zM%!f)(+Hrm. kAiu;42!')`.9E%?`S5vWoRA۴9X_֣ߤ?^ғO@Ӄ5%5CGV?#\GVvv|F+$ +8UWf;?"sVkTᬌr݊!io]"4t܀M%vP +G"<ESaJ ,.ĮQ +RީӊkC=)_?p#u֨vT)/+ Ghؾ|Pr)XİvE-Y zp3jY i%ڂ?7W-[W~&Īy9gkhUp?auQ@ 0۸8m3O~]P\8[_K=|>‚\P:E9vZ6fp)^]q<G#p `'6A=l?#Qu6DD&䢪rZ*eH3GyB5/, *[oB8N9[G{5o02B @{qN3ƏpFXB M%.q*S}ԏZNi$x:\"kYzm*"im>Cԋf!jOɟU[wӶmڵkRtmkudOӺ-+]kPn}fQ[kv*u!jgUBҐVKDPXsMN]8&_cTdmt"2 j6ܼ W~=G#F HyS‚6~RV7+Ay\qJK4E8iTztjkɓJõ߲\Ru`2ԜFQVcRwGny'F3i&L!39 Xœ<#,'MMa/<'WP˖_b JNaI"B'ubj`rTXi=&ZBcܵ0!09kXR?HĎ$BT|, #I#uHVMU#kF'?-SW8].~gB\.jU07gF!%а~՚]:{!MkW$ꡇ7¾oYjH`]Qv9XHش8mN5v:{)?n^뿩[Eiɩ%:My+թ(;9?<G#onSY6m)$SO?mQe*Zyf䧙-!?9²* ۢh"8C,Li%~`E9:z贲r2GN龏ܯ\'TxviB &!l3SS[.%O]/wɮ:} :H#Ff $X_դc+/Ēm[[*GvmaMb1C/TTeBkDmszjlt5C,QQQavc~#u%ԡY?-ȃRw?o*۪Ze榹ush0:4wS$$pAWvΕp1q <"pGG,QGM#x<k#'};nV9_i-}BOKJ#4Ic媙VTG]k^pM.^GZkxZwܵ%}_Ӟݯ~9KPΕ}ڜ>2:ITD||Z[,{#D`rή ǥ.~e, )<¤ߴ83lOpVTjjnnӡ—Bq8[Um?դTR^ݥΦ=5jn,*:__wO<~GE*d eG0#T|"*mcrjxCDTw?9 Zi;}KSIQK<G#0Bg__RUU;bm!).2 23ȳ"7A?,ד:O/iaz*s۵ Z kkKcm:ήS:eo"JrP3{b\ݒDBLZJr܂9y%dl[@BXpЎR3I~* R j T8rTUMuE`*VaNI؞$C,\mv*F.*Rcj:6El[.hF^S-kuf˜uO8'9žq/.ރ:$lMB δPU-9T: ߼0 E28r aG#x<F 4 ҃sHɥUؼ2ɯUvqkވq~ԬXt8^~NS(sSU~=:o(>)cs_G~\fN^|exkI\Ƿ$xNׯ'9R,{sl=5>A] #V@/|kGR(3}7;~Gۚ\PYXh$XX B@SuTyYJ PC47O3t]߭aQ@"#A W*AXӬ-<oxTXD_{f&ZK2=M"-',=[vU {r!?w'!Wx<P!>_)yELINu'+4ף. s<>v3)]P = wM4%ҩ-ߴICPab~Pr5hQNj~PN.&~HI5FL c6Q,2fTq1([< 8{qS46y,TLi4:cR*aӼ4Q OO;ME-vac ө ޑazhHnq cn4X'[*xiY9k/Lȶ7V>Qh@[UFzFUxBWT:͙]!K=GC@hͪuc<԰0WFYP}߭Ғ<$݂i9jnn־}GT]V sZ2L;SY2tΎrp>L[V_kkWSAh=F#qAD,f߄۽u|ggd=5Vdi\s3LƧs2j$qRԚ>٫[dȕ7(6φWCKZQ7=+\~ծ=wj^iAmS&":ܣ[oxSK~,jrbNpxPiTעBرC4/IV4*(Bx<G#ӌ@G/[Dծ4>HBmݼ)"X?3mw?{|LU5ua. >QuпO)XׯݠU_GѩQD59 i"5`uiyEkgn4!Ն3j^}=Twjph)n tvt(! rn ƣCcF盎=Px4n#x< D\:<&Š_G*-)V=L EsKxk b֤ZrX6ziZISen9+Ai76{*J@Hn\wQ\טߧJ|tmmjbD|BA̠= liGTMUA:0.YʆrEF3EH7ԌM=m֯*$TI.Ŕ7N Y[U 45,D:26K`E5MTOYz&*+mw H(/>}ݐs:pN_;yB:Wlں4,Y8&ڡCG [qB:(\ p8N9#*=S4iרp;hGyFyZS F\YԸ:qnkkoQ]p4|ZM_MBx\~0V-To+ hnOɨؼґɧ~FpG;uwoS7_PX а*ڨ =֭^-B._[!@(jBÊ#ZJ(/;TA2 gRFRHQ? 딗SJȜ PIJ;"wQzr#Vh0LU&352rhn]٩!W|D`>Q4_=V?gl+aSD?<G#aC K_ǮA=ڶm0ϋ PGb#lTv`ΚDUJi#=/^ (5Ab`qNc ]y&[uLǑ ua{nzHeQ8G/s?p[r1 ߳FSfUxN,Q/{N'3;Iڵj*7dCx@IDATŏ sȠ Mڴ0%[f,fk5V(aƒ#C:uP[i!- JURg83J.Ɵ&`!2>5ڧVrϜV?ݨiF(^Hx%/ö%{; &wm1GHTjFs٨sLߊ 󕓗g9"lmb.M~{< EF)lq9_NL"zM('YPrzKD >,Ӈ2SsS:uLR:mBCO?J*LAk^ZV!rL?rnk϶BWO@4_v ܃Dm-&BPpn!z.3ehlHGuu9$Nַ{n2 !MWF܈mł蟯D_gnBZRK1"Ï7#5 ս\4zikDJG~+epo͍@B': qAr%a=bC-٢k'owxj>O¯jʢi JS3Jkjk7n٨[7__U,rqN;6o!4U5AvNBiY\BdLSJ)wFVx=q#D5l3&01Ԍ\ Mk T?<+__3m6Qak9볲H p} 25֯g>Xږ_kb.K*Oӆ5 hdKi|S P 35EefשS A2VaE@뉠l#y֑0+65dB"*)Zڭ^Vmzu_Gݳ}sd5uYK!-+3 2_#a DQRzpvj絮/x< P**snUi%U:@-‚lԏl$m+ Ԓ2:xnJ _5 pL葟W+Vw?+%=^;ɥ'DڻeUWWQi @Ujtz+! ]' 2c*OiaS S Q AH^9ڢP ]몕nYꎁ?{R[M EvVEIe _sMn]1q^>5lHyS\n;+)d|kB횇--"UhQ0?`ȪV\` ,:QƒQR\Ԋ <4-YKb$ʹr%R 8 ԻYl`*`G#x>ą rÚ뭰J|WcwRitjUZ΁hx|N o6T#`$ThFd܏#gFנI#nn hRq\n[W]7DC-:?<$p]rvttZty@/eRv^){Yr v=}4tGoVP,I˼jX>Yo~}k]mj-Q+8~mIlP` *5'*\*0TU]FFș",ԲN-Q34el:eH%Z Q9OЮBs!SPBDžEm[_bm̱%UC4|9I]w'O6ԫ=kz< FF\%L[ZY^MB:ͺ[vN~M6iԶ3'J9#lf[ZGFi#MC݄¨NlmQ 21&^Ee#5*.tNm$,*ޓ3:yEw97 Qu^B+LkZtD=)t &. .yXf43Vf*̀˕k cca?nb!UTOWVM8,8bVo`Nn^bZs>u)ݵk֬'ga5;k`dX<ӧSwZ̃㿖JTժGTx5_yiQ¢ٹcSaOF6)'r5(-|⼶o0! Ãx5%UII.̷ip4i.KG3Pj*1+[dB|ZAk{d&)& +劑ABmKYiԩ\&ݰl`g ͞{F;z0d(S X?}_t_+< ]bJd^hT~f}`i AKU3xM:͂N(/;b_G[ζwFӄ'!t.;eHБ^X&GuXo}& N]Od؉$&ƒvIT2T7ځQ`I3Lbۍx<4sA dd$rs:w?loԫTu?UԫӚ%-1&<7Hl x<@iˆ9MMa#+Q5DX- f9KrMh hOܬ3酽OcnL$:ULnvST,NUsUs,[q9fuFUZw˷yYAL[XX*#6,ne|./puoe`sSz텿׽;cz^#|fadFcS!$<7iYl7{ڤ..M<9d=`QLUmJ7%g`@~Y&iMw B zIyKI~EBf5qpAMEq *7=Aǎu>*i60L YxTRj] {i挔QW{Φ\(wth軗 x<й꾇>z}>Ih+{:T]i Jݤ SM}r|7u#b'YnyvP8\8R+11 v!(!NXA8Ԁ~N9TNϲj3ؽF+GkVhf ¶TJ%arm,I<6_vb^mp*O-4i ^cdQp<3~]y *'x51rիg4VYa^S2mT=3Kqrڒ2eCL= fm`ZM=Kw Tb;~ j\ =ZQYmꅰź~al]"C''][33gt 'tqq;M+t\ܱQVNT'! ѷ2]Xr$b^t_@5fYE@K03V^/,G#8BkmT݆ 8qT_F2iI*BXj*kԼU -AQc"mm˗tg>ɞ~z'aSv]<Аn')^,es_49d 4k;SCjijՎ7@<]İ[:idN[6SKK#pkNJ`WTd1:mT5 a{oXHO^i{mBj5~ɗ^F%ޞkXЪ5͢rGԋՇuJ:ۈFE'FtΛ 0+9}#!8!Ȩƍ&iZY]Jc9 /pZ6񬛛8~,u UtvdՍ\œ<:sAee[^͊Kļˎܗ0v dkRO9g S 3Mw#x>SI?Lk_5ծ&W[}=|ye/Ymm*=b~C=)-^BpUKzGɃ*1qZ.ȂoV3;gQfIgT?^Ytwf~ Psy~RR98!E_i2jZ2>V-uysuh2BY}8/EYQL[Sz[mLVLg gc d*iW+x<q듒lՀapҏҎhd)_*aVCH m7뾏}ZbLˌuoo+tw*AQЀ`&#T41"gu_ $ܱU_=AhO5xMQl"Gţ8HvܮrMiik=W_QE Vs NY;Icbd"WjlX-~O1emX/M_g%2孹3ll{%‚N=#^fk=E[6j \lf%H L4g`-pZ00> $QU\EʧEEErȅ.@1 13%"cK<Bk֮СAȥ9cB 4Lc؊<,@ #4?7F#Tcf:3:r켪j1-Ya#x<78Ω~Uvrhd>pFiL5!|DM;)Np5"3;5. {91YPdF֘d+  6> OAȁ[TxQW _?'*0OO]uww;a3J鎛=?RYUnuFݸ +z[sJKjMQ/9rP.'LQL_<7顇\AP·K [%ϐ֋?"܍ǰ-?XDPzP&0 W:)$uL[w|~ZWY+,U1j nbbT/=]nۡiLr휞7TDc=fsrQOy9Jh Z/!?ӡNUj݃7iqJߎWiE~7~I7qXBklsn3T..r+F4b=uy~x<G@h֝$ĕG(k5 E6-B& m[oҝ}V6!5o6^5Ys1cD~-?ܭkЀ#!4j޺sý/`٥XKbwZfYޱ>5jTYZF`q-.jڡF6^I"4k>0OcvV n,H4׽OyBrZyuq_;rZb׬FYDtjJMp:vq]x8o2HZj%::5 jɝ'#8>uѳ{:cqîۏ[!4aNk 2#q\ʞYzOy asS)N(@ҿvMYU(Z(t WyúKD֦y"N鰒9fÚџo8Y77>(s[R'EAmz Υ=w\KJ'Q_|@ݥQgx.ok0`PdNpy{;_ۇ ƮdžHnh$ԲPy&KQ6nQ8M!l?{TY^a@8@F3ĔD$S yy.\Da)x ʂ%-]5Z[y?5U/}V؉BFi.Ǜ={ '+}~IPUU:?˟K(w28[n"rR,_#x<.K wy;P0rjՅ0Y3bR^`"] E <Ĉ6&4>eOinYղMG08%&al(i0 bLc[UbFٟ_dvQ@EA- D!w?Ox|/#MVjl2tkFrIi3cö1Q.eFQ[f\ˍ:׶2 v'Trl?hīu}#U|biv%NnQԻ蒩rgFmKԷ;7 `=o@Tb8.- Q Fa۰s$NQڃو S<_hǹ6u8OC~֊8>[aCB-:V9e~KilZWamG#-3Qpst-X]ƙЍ0IzH.TlՋoPS.^Y6w3uq6+L'#AiWnu( e 6drw`Gmc[عŸ}l_ڵ[Xh7_zyouc_2w:;]lF\g~:6bKL$%tu8,!gc .bwABun%Ȑ]#Ɛ?q_G܁ }F Y1ڍYɍnE6v9@EGiuժlgIg{g? QFfOZ>6nzRN[ηȓ5w/*V ̅L{V`脴ނ6h-щ*my+-ͼvvG# ˌfp`ו9/c**$Brb5cI(/3Lˑzm.rkiKm, ..r_]ˋ2{s|ݼݫzŸoֿ{vȖ,pՈu vF܈ف2]رU͠.8O7 ݘY_gcj>zJ}Mi#z:U|fh@ co[g*S8!.C[u{yNwr~'G#'F`Y_4LũJ XrU8&ݑQG2u }?On MA,q7[Ou;wqwrZv ySJU}l +A`!sȗ)[7d$u+RͮUwM'߫}W[F!KH>ifP If7Z}Yrd?د[slG#x\$lx(*)h&={o`YM+-4Z\XBfՍ_;Bc4*CZ~YUֵƢ!'+'Gh`MB[1qJ0_:CJOCq f@2 |~|tڳƇ"d,it&G#x\R$1Kŏ-a ikh-f^5)F>!aiyHhel-f35}j)nQۿ'[- jց'_T7iUR7IJx1ټyrHo/>;K!Xׯ$Jj[]x<G!p;  L$v3Q%wvufLΦX$r)Yͦ  p6Q/' n@E3KDzVfE scߪv]-LLCޣ_?jȚZ/o~_G Tl cjeނJmg[ ]„W*Ksg;rto G##U*{v5jf❄GӱX(-)|rF f0MULvg;u萪N7n$.׺|ۅYg0ڵV~44mb [#\<; AP 1 ^u*+@U 2(Ed\1`9ro;{4m9d, kHg?<G#~FLf=>9cZgV%Je}@{ߋ6J~MV%8KPh#3)>^o6lc}kSo:Ee[`JזWPQ[Y[;o̼PL嚚RyQr芑[64A[YhO=2?aU|־ DGvD (G#~D Me3#!-Yf:Y*CY;EuqY%y:tQ~&q ccͤޏ}Y%i{ߘϪQeeGF8t0u.3f*FP^?tR9 /K:k I#vsݖFLgC6ffW$~.ӿz<GBamY8Mf`Dm2,.5kUz =]|\~1%EŰ1Y[}kC:cՙ.џN<N7[P^k&I Ñ%+lY㭞Ab[V̙>I[w@iV;{G#!B ff' HrKFUî!VFZ#n.\Ȝ Xqե#ˏy#8K^ k"ojl܉λ/,^P&_RUm6TU}4 Z3?˾"\+\Kf|Vyy%n|6G#x~xa#5<wtiޡQyY6Lt͓+T"%wi~%eaX2OeH ,\:ɖefZȲsSNK(Hg>UZC%@׌}᠊V+EP$ T)m,O= IG#xa1ϪUgOVRi 9C2y0J bH}YHO+kjZ^xﻑ2#:dc۫[a}oʖ7J@YṀv⥤XYYYs$#<U)Ջ jD<}q1;ؗP$.lBCW9EГw<G76&Z]\:_ڣf֪R0#]3DrH]Ji_F;lܬ9M׃؄loa6rԌxZ q_8ؕ `1A|<"RKO73TUjm٫mw'hgiV +@fheVW k #f,ΡfE 2G#|xaZAt.dyJ!m#wa:!}jQZHU^1ämLt(@Èdd^].!FcwǾ>\Uq(~UEѰ*Y£c?jX,gRRR%TD$?(y;|.p.g)b]RWl|G#xXu(: AOj¤!BVZ4I0B\%B z5;d͔4w~54p2Z,Fڮ!it e-v8 iɃ-u UmJ} W>aHI;syh-/_U*~.ƿx<k6;qGsC Yں͚w[[!&z\+ BhM]~=Z\cMSSLIԣ9Tĺ(=>[D5gu3a!3dۨמ;#dW=F- zuMP=G6K"^-p,? ^^Sp#ufjQT<F %m =yaA33JK` -:Y&qÓ8 z9lQ"f] 1J|gyva##?w.'ն8xw"Ȝg(ޒ]}~w"U+EݝomυzYR,'Vy,E8RfjKs-`naݷa'ɱW}`(ǼnF_ _G2Y3d6Kk2˯d:x#R(~,vC;|Ea0gq%_ʥf-+)F 5GsJ4ay'hmˏi-*x>(i>IXIُO$~3ۛvrssMS{;Fa Czw2?^c]D Sɘo22̄Je'=B~zcOB'Ce1c}RSrv7bN=E ڍpȚ #\x{aϔ[uqJ(aiv1s[c 7;@rjg Y[YUO/.jhPZA7s` B-/A+yl~[fNbNICAQ Ʉ'Fҳa䕔cQ ݐ̄+&96N^l}xDS A'f雑:j8qՆu<=־G'SgmLvW94dmt;@xj]ma Wfs&*uxɚ-j ۩3v"`P$;p1*JFƱ#PIV {oIr:iXQ}m[PwGFf Ls͆!2ϡB4 t *UnBŴ/=?mCFF ^ WvoG7_I׏r-{.1#umF`:H*㴐|* PsJ8\*TN'Zyl[bbĎ1ݶX.0H=8~8<\f%]Bs#Ӱ>N_:PRа':;fD~:O` ۸)b@aEI^03X3U2hcCN0Gڊ^B5m ',L\i(B6 5I C[N:\w{#s܇$+nرekA&Wm܈9a!j!dm rphu$¶ذ~=֬+ dšN|]î=HI WcmI>ǰ7aRYkQr$_َC|_,(\0Ƞ%lMߩyr V~o< /R03w w[f"xAL'\U{E"0+tPVZ1͈ۅQL`~1fjvUb8F/\+&˰&-<|bR#۰l`IDAT6ScV[S2To:X~$҉\jmxQUYnQS|XdZ%ӗ1v ub\em\w͝p-75nMXu]'qeh>yYwG 7+ޏ>v?)<+t[8{DͥؽiէQ@XD[1zKK݃u7o2Uk#>`w{~*˖>)u2߳F؜?Z:3hwû6b44`ov C?AR`1߈ nrbj:KKFdw-hWA&U"%- 艒O)>ڈG7W U1EYg,$PTSHKAU3ҁѶj !cA! Z!'d)\hFS[V_rQن7Ecs+#V@A4:E9~#k1BLsrX&I~cOWV tMx-/SJ,ȓ| nI:O}>,) ` s@6mئF)5ՃnFٹ鿻GعJJ5Ө?5nh<Ы|?kV8>vo[ʧ57Ҭ6}i ӎPT{zW"UYmH>I>eWb8ND)4&VGlWZĹQuF"](X1tPFNu"XSv4 I5vz_%[LJ MpKjQ@$ XFҾC$jAF0 $yҳpeu䕜r}}KR7s1*`??0m.s3*v.Y^@v΃10qhN)-\S2ÀْӁWY_Ev U;wPAC 8EoEy;:-șqT_q;W8*m/8Q )SۀCy{5 $MQ,7OB=@_,: mA=ZvJ1¥+ךA L48rQL`AҹhId!#;EEƆM<޵vim58MC{*Pƛxz0m]OW͚/KM!!$ _0Z}fXN~zqp-..(=*/?6=G6zEK2)}wp %% R:γu#ez c knܫC&JaT4՝V6-َx[(Zj40Gf!Kvy' i4 YMjh}ɚE ۵gFX,Bǎ+0]uh1IҼȰ6h.R8P@." oY9V( $3sP' P*%]n GN6sa2\Duo XC$c\5:oG7EKoBc} ΣIV-ߣǹf2+ɨبp$h*HcOOֶwZXkЍem_6m2kc ;yoiq UYo,#`$`4فmhbGEN%k]ؔ~f,oO< rl#$b-*qY ޻*% ԈzJȔ(+ݏ_=rngP[}^?$AC8: 5!jc!%tTgF-Ս\XEk{`YA. uo,%*sd&&JgAi1AjFw>xc >~٧ [/!#r'gn[~qdզSY=^!:L\+xpS7K2sJd4PzM)ޓe[܌H+R}5ǧ>goA#xߝ8d9)_ftUB>6Y>$B|yx8y[]X3{Lꂡ$_F5)7]k6tkE"(.cFFiq'O;ݴ!k}@Ns=>?~9SP$,@Zf*+-0 P,L)fqtwwҘWr.EC>舴"9RSH!,7bVcي Е U.dE'.FpE{H A$I Է"(T&gKj;ڑJWKWj.mU 7\p`a96\^NN/^| աn—\kTSU p }kK>n^w\td.ǵo4uekXmXVE׉*lc*oD^ W}G W_w_wSU}N'xFOOy'ҧyrg*(Zvp^>utS Pt^dQI:R)vw 󷳃N${E"0W9ZpiDB&3y ޤh(gA5M\kerLD{! !>lMZʀ^P]m9^>kC-Nr-7:҂px{{2d$ bc]13ǵCRYdǺgqwACc=OY%*RQ)^s g2W[T+ ZdTk1Vr~$N5rE@ #W3VTؗUc䩎,)zx?%w*3^爢)̗䥫⭘ Fs>^?ef4–:wr֘J&BלntYfVzz%icc,=>ځ+y6M ISi8i9Jp(uMЛ1y;F?af B\]*qSFX "ғ}m:7 H]b.^6n(--8 lwuuΊtʄm8'4H-2>WF3m1gHOM뇢lXoHDh̴BI/f%mCC<ݤcfZ:Jz☳-JM-a zIzHl&K$G(ϐAN 5Z@RvP,I~ךc'd~HC\QX1X6gq]5Y&II"ٯT9IF#ުyIDpHgQJؤucM8o{E"0?8@bZND6|Ʈ=3 aӠ,ZP qIi1]ls I<|@"}dMu1d"H¦{n᷀NK^6-W'c;!nE"`$sɝ`۲TJ8$Ms!f= ='Yw!bz %| RaYݷX,|F`N6@ {C#)qșiiDt4|SZ!{IRd7 uqs~[,E pkr؏>InUO7*TI>=7^#Dqu& `wFd6SIeF4SꡇQT}lE"`8J*%¦0$I^ )LV!fh0v ե撊Tu93čSum;3тzG͋tW US%r}9iX,QhS$R1PڎȝɡL*1Qivҵ3q#3(`0xi*{XOqHE"`H" as. ~VRNN]TGItF8;uؔK5jIJDžfNdX,$A`N-:8JJxq}{A@m69nntE:  K.y-X, w`QvG-YU|{bSCb>>.:6,E" (l(jtQ+%Cl#$cz%Ms8΍'=E"(Tbn߾[B?n78%mIqrTY@?/ ccJ\rʫ},E"0Y0bp[㎟ ghztV}"N$TQq 7lDƽIENDB`docker-1.10.3/docs/installation/images/mac_docker_host.svg000066400000000000000000002337151267010174400236230ustar00rootroot00000000000000 image/svg+xml docker-1.10.3/docs/installation/images/my-docker-vm.png000066400000000000000000004134311267010174400227710ustar00rootroot00000000000000PNG  IHDR| iCCPICC ProfileHWXS[R -)7Az;FHC ؑEׂ@".lIN9gsfe Y\a3>!I'l9#(oDn.vU96HɜvGa.Nכ+w+ !Ad1Nb 1NbKMt7>,09q|f;Ƒ@lo؍@ 񤬬S!6N!NLbci.!ryg9deЅ&  'cvCH ?9,b%8{1& `xÚ/ e2bd΄MxIxJ"<&$t`8ʅ93O$QFbf lpC]!gx07;hZ<ߏ9LFY$95gϖ 0v;]ZFNbMe'06[[F%r7lOМ9;aM:QCZZzSSW-CmZCu\T}*3'LdO,xh= TT#RcNCZ͚5ZZZNhkӵݴyOj`0=rf;sPGC'@GCCgXH7F@^EQ/Eo^ޠ~:{2i |0423\nhH((ߨ18۸ $dI)jjgfZiz 57m5D4?zmsyyy"ĢdɉM>7e.VJVAVVVoM֕7lh6~6ml^ۚrmlB۵}wwwHrpQ1qy'bOι·v1wp|]Sz]u]Y;\ݘnInݺuY=<8=yx{|e%:{wS/կok! 8`]@@v`m`C `jpTpEaHs(>AA?1aTԈSFZE.<E7}W11X鱵|J'/KhJ$%&N;m㴾vӋߚa4c 3gf<>K~k$BR\ޤ/pV5k(90yK ۛrKR\SJSOOsO+Ky*xҷϨɌˬ"g%e+3fϝ%0 7f s9MsYd,EԓWqNs^g:og~-YtABυ;!-[\o=K)K3^),(-x,nYsf_++^| |oEJW~+_,,)+V::eu5Ukkkos_T4w} mBmٶMMM!M7"fWe-+|zʣ6m%>omGCauNμOw:owZï֡v5uh}}70?Q_r|{j;x#[ҏ7 kkvi>5-:-U9A9QxbdɡVASm?}}j{Ǚ3=}[.8_8vb%K .bwh}GUMN]SN\sv7o\vV̭;ùn _òG4۾xOQ{_>y)i3gϭw@_mye_{-|=f[5lߵ E =z~CG{>9~:99_H_ʿ|mHȈ%dIlhJ oj%Q/ ;;D f !RT=jc3F%'Z o1##o5 5U822ud.H.{X]|W_˧kxQv_@IDATx`\ՙ=~jf4eْ%^@6!em~d7ٞJI64H!! B``\ݒ%^i|odYmK+{kb|߿K_g/^.^H$ ܮB@!P( B@!p#`0@jˏ_EOk?~'33sn4E<?P( B@!PEh4l6cppwixя~t?4//, B@!P( 9X eeekv~B!( B@!P((**Gd܁_4!*o׊0뛰jB@!P( BF@܄; O&0X`0χH$L&l6aϧB>^~' <JoߩwjBE@ (hCjZ]5j<'$j/Y!מ &kRa,F~LT&DǝҶcq$qU B`0 LMh_ H|{vv6RRR&|MZ"hLԗ:MfM!>9 ݍ1>RiG>|:Y3؏`܄LM?y}cattuƶeg'$ ЍB$> LBfgO"~  Q Bݕy7Ev^}0;i9}1 Y҄rNƔ5Mc`%EՊv$xeqcg{Z&ד]noo8Cu>ǜåA7w*ntOr:ۛŶ Pe%ZHX:ގƆZNb{8%;'nrn7H` q_,<q ѳ~'j׉I؍jB@!8+y;EhH̿Z>͡q5UOHd;w;B@!pf^7a {_իcيUXaT7pDHa8jʈ(IQڟ|>dž[~BX79O\LD!BcIbAݒYEY)b .̇nծcà/0z{iiI$떿9Ŭ?pN>F!d/[4Z$ݳn{`8>UhI#}m(NŊ%+`uXeu(δa9'KOeqjx"}'m2#pov$_[7C t$ǑljPI{4Eq#>'FCL{]jc B!`>٥Ň_RD.H.E,'r$@6M'$G!8Ӄ>w@nX݅ owbai66DXg\3QȳdɰoQ5l &s`VA9G9''PDFN,tY@NZeIWxq,uAe ϱZSa5ه0zzBX<3TGA7smV0Y?')l77.T!PCפ iB 4;C~\6uUr1%T@̈́Z4' H9X8 E#BnR08ىGӡNkD0AAzVͯESrn=\VwK-(Ha1n%xx>҅*,زhV$ `4`t (-(`߁j%a3MbμYX|rtُw6Nj B^f*z{47&{hc[3R5k˰8Q\:$-ށnxcf.Ȥ ;Z`lDcL1Ѯgli JJQZ6.Z%i٘SRhÁ&J(ĊK$pb,[ eEYnmB aH ǡmmmhoo0BYH! z{~_x t yt5[W7 #6Z_Kg?pHkS.JҾb *N:Q2w%tTנ`tyX΂} 4R>@H>#>4t$V$NV’PeG[k+hA0R,'q\נXxP[__'c?aO'ڦP( Z;VD߮lDE\z0R߉ڮ)SD XI@%MB ~ϦrRXq bX|%s(pP;Q_>OKavQܱQ@h.5W#%=e$`,S7ыܮ3YLl  "vh<yE~ss'5~:=HEj#p ]ȴM+BvNDK#nP @}+峑_Es=j8.mVq9擸M!<yH5&q("!%$l F"fg'Ê'{bkעgzl 4P`;fຕZ O>Ay>tKywҐ[مZFiYڭY ' ("Y]\2 %EU(ge&3]Xc)/Yt7~7M>9Tӭc>+g)Ԯap]N.DE9琴ӈXHmֺV^%4n0zE3"lZR$,^Y%"A,jvSC2aZjcܼDcMӈ+-8swrq t򏍱?& EhY CNbAIԅPws:A Nq 3!F}mjhM2^.+^ބ)S݁+F1<܋=kI$H ;٬Hxň* B`& pR" y=&ˋUlzNVg>_jP(4DڻY^LA+ " u]#;<7.4D;N-Bd\HsOSr?tnfZHG;^7fэE/$jά ,]PF <~0d%B?a'т,V/-"1=0h[B7F D@ӯzHMMլ|eZ|]| H.crdcx8 ! n&0(m] P\b{6GQAiJ4?NjݳCkR(IVc1M Z6fiFCpJZ~jn33~ڳb/~ ڭ-$xAO4YpmiXi'\Eg| M=tE*дѬ8efbCbNB@͹ bfa qSK+-*ڂ ɥF݂4Eh<үK(36A2d{1ŊL1}i[ou M0;誓c&5b ֬\@+7Fq3>NWTGNQA)--K4;s2Wd"d x&o B}j#WOG cO[@"e4YftrC>TqVifO B!pRda2Oƺ@/$A~'{2T'!I LH + E%Dm^484)0_dɺ(a7Na9~߶Wn3g~y k3@x^9am[uqc,\ja27ځz'}BA ">gV.Z E81XA6}Dzg!~K$(}HC95ѯR+SPTHEß,r-?RӕoQޚ(`6sfP-.zVn&jc&NÌXIIT*TJ\c f*. ݲl(/}0LKK1cX2K3]8ڵȰ3-tjfVbGxe wvH՜fFC@HLTxP؊,ڐ>#3~8iGF ZtYt-syT":@MZh0C}y/$;fJc,D>F{6C}P(g I,,nڥd0<.d.)]OɢS*n&^|DlJ"V "~:ZdKyMjEpˠ6wf\ʁO-S+mӄ Ѹn |^H1x\ i`A%]ן)!`6P܌4֡j%SE*+ K& r'wƉ $q6cgO^LO`aqg_ryx4B>$=k2;f$bv8o }N_Hgv?qn\h&x6@!mIєCbgyH q nrl<$n7ǕMGkI?FqXpfgT QkV<70y~3)Ykx$/1٠tq:GB GP34 tQXVe;tKۏ! 5r%Hד{QVFrP("%͵"T^ݗ>ыr>}]DmV G@Gj^NGB~-c]ّ|Hl ?sd|Ѷ#B(\}\'[d0z$aؓ|N/.. $+LHcFm~}rnLڟ>Zv?rNM#.Om=v}Ǎ~?p-C1c%:vc [i݄1ih h!.\y@k8lymHJ!Pe&MvjE cvMLoRP) 4ldsK!Ȅ@k8)[2B@!P(&X$#|m d\-(B@!0s8iijB@!P(Ċ"go,ԕ A@QբP(N%B@!p QR( B@!P(0x ٣>) B@!GIF>j^1UE#aIwThlv_NwtGD>i ̟ΜwB@!P( wB*dl$| b[Y:)8Kl2QP1jf ;6ji=WgbC iw0KfA hD IrSNv+'S:ss_K\{'GIP( BF`ʉh?9&,.JO1%% (̖&Ę݃X5+%ٜ]gPYSڤERJEC÷Üb(xwEs5-'{>t1ڴ?A$/mPG)ۮd`= ttfMO Dv-fD}#mF8ЬFOGK;Bx$5" Eȁ,>@K'$ ^{ \i丌e菾gC+ B@!1#~)fè&#3ń;o)Ǿz 5}1/'9 }lY>{a껽*9A ]NUF1oC1+аy4 Lhɠ I0qA!nB"lvTmy&BdW !:KN^{8n'^Ǡ7_p)zc,8?=uMB@!P(da,,LL03lF|ʎ& E=<-OcÎ0k|mHI5cUt#g]aj[pm9^|ad-D܅Tf!:B1La\@@Rӂ 0b(XI3 #l ŭ[̞]u osX6* B@!08-" BaqV aztRF^>Q2!HtfqPc`^C^ہKܦh>r.l/ {7 90) jounJ v(NvC FL"j i3D6 [;u]B@!P(7:KPӴ@0 ”vRF|OyMO2 Qnb KXs=(sCD`[;Vrҭ ݏd:i L%H˝PĂ%e0",$@ra%1`'bb ă^ {*c]23tZTjNj+X,^W,t u2 Yv;ɥxd^VL$9 WPS/l{9[P.1r y#1ÃUsV`vttU7 XB@!8ЕTTg@cU;Wp&c$S{!ZaqZ0'dž +o>Ն6B\D ~L ~0Хf?H4N9;ۯ)@VZާq]j:#j~/pƌ\877t؏PTT+݆f`immZ%}qq1ޮXf?0p,''G;^҉h@, RǙD_>TQ01ڽW˛/*Ԩ ṛBix׆%xu&<HB* @Ҫ?JLcEsŕHZaH'ֻ{2}8&cIӉ/HՉ8>Kwў{Ye6ls:y6DB$[P.A' #9mTfaS]9Q;(^-jd'ءS3SP Dp`o2VlnxQT` x!B(j\02R,Xy!chc'm%@-XM\|/^e@}"q>>9V Д8!B tp▽bW%#AgniAGHVE䎑{K'5uk=PX?#H``! q&27Qii&_p"YedNu%%lWlq B@!0soaH=yR`-++ݫUø{P'*,HL"_DNJRĵKIғÈD L,|`R˞[~;<'>m}T B@!p,"[`7je\au봵l{WK/᪫㎭y}8FSF9g!?73)+Lt"%)TNjJrD[ESK a8-n.>LMz! Fae=*-|[\Ƥc%㞓=hgltns-dv\,ndeǿ <ÛNDyXgăW!`Luz܁daMI ,$~u4a$-f+;bh"Îsͧ+hdM1-\79GÈP,n'H4qUEiu~Hqhu;@u" 7"7,"KozY˱zR%R,Xj-p g{vp3^/KPFM2ҧ(gw?pي(n?S?UB@!PL/";M\$CKvv:[,Nx7y_[\D8N,XL)TR6 `"M\_xĈ*#cZZ[P8gr&U|׊Mj/.y7:;+7I09"p.;ZֲLGѕB4R yK֭[pB""Bҗmm= vZ1Ӂ+3\BR$ɋrx(/(G5[;JG뒓+5-iz 7Mc!;唈\7X<7)I=O5`6vk&Hׄ]"$[tV|.)MdS]&{ZCo <zk7Xlu~y@ZvzVkͫ`ԧxgR;4Ҟ!7 !HS o#PDrD~>ſ_C!++Ksݑ`~!3wȱzR틆E~r- 1SDN!!؄S*i։[&aZ>1pЈO-&.[sgvh},U!P(%}d<(--՞" }H*RyMޝ]vr~̖p,w@.2)(z?@#rErI2.(gzI ;QX w ݔZ;=WmM~jM םxl)8aؾ}߮YD0AKD^s'  f+1h_^Q )اŚ>\K/ pCPR1ֳVsJ)¢s@ v" P+$g"h` `^l*DM< wu`ubɢypXM$&8\Db"Ĵ(3$CÌ'9ˑBv \MXlmIF yC>S/?m4 E'ǻI3F`т{t1}Șq:iPWno.;7v*2qĒr;7a 9o"|3ͧe1(1jQ"TYYj? >¹K@͎N!V-ƢE|ȏdeE9$@42I)6 GKi IwT7D_ӷ1$G9ct-qaXZoՊH}V(:ڹshvwtthB+quBd^VV͙#06l89/RgJSe0t[ԍ>qޝXm0'袨Ju933wM(uښ`(xZۍW8Vrw45gpr>| hl IȁSwR%g|_܇?9N_NtWmot*3LT|}˖-VEO[=Z[`{q nkdp/-9"+AO{=jU"-}=]zRҁ"-ĜeK2Gg5P䊳UH+.Ǣ\v4M]Y(!#ՌC 09|uhoiE~RE唉kԎ[52~$D: $˟5 fa^e7/-#*^(*_|adፄU(w0)MZ亯M$Ody 0cP Y,tD9nl>.3M"\oߺݣg1{ʏZZ!C@L11dL!?x?Ԃs&.Pm;O W\r'6uDy p6̚%`xY!WV Bo;w4d 4J[J2 lj {k'|Rs-((}ojybRS졖sѓ I1HցHa4(XgpbZ(f(",):X2u8Yȑc(LGqXNy +D.eN>3&j.184H+&46` S|.k`P1'r=6HyiXeR)ӻ燓qfljlFvfЛ![ 9E12 r pjk:̛ eOORsB Wzl$|BLKF- ]CS&a=׺1( OA{NfgBDC<61Lj03{ULd=ق_ )#'  ǰG8LƢ+8N :%E /5 05]$.Ȳ;5(ufMKxd,]TcOȀ[_?O.?h >HuHo)Bm9c풵xn*pg,yj BN?.,^Y!P [ЅVq#b1(,,DNNf2 ­b1>""q[|cOa,P4 &QAY޿BNiM*'>+}t/ @ZNY2;y6Wgg܎/RB'e 70B~uAhB 3AKni!UOw =AzvH7#"Q: c;?`V[\ c Gc MXjg!K%3h($uG"9.e=?^sR&{qohy092b2>{rxL"rl k(7 c(_De+):)`(-ոlv(gEaN~.'>Ep0.V]~Z T_LFFJ#x/lJ!׎ߥNӁkuDCf.]*Uƒy'K $@ b3^>떭æ)p) &'-$|awHAI@Q(w?=/rZkqqRZZ+/Eby~Z%IBI39(JdfjK YErx&G< C}L?{OwEBn_@t[) ;<@ar)-L{n-z衇4l$H39V`M\(?oCa8`|ْaJ_BެXD ]vʒDW fepvCs Xh2$nl}c'^:JBv;"z aN~1̿0uGЁ*dTXqؾ1r>F+8)(ʚO5c ts\]%=HWrcܝl!pz+-,&òj7 zl{?1o"L!&q0?R#vp)6$$B|rYr#ya?uٯÕy/|1c12p<>-S3H, 4!m)"3S(H@_< p(06Q "YF .ˀhEXW@WQ|"B_ OމfM""K0|T9v56o IA6 09 f"b0pE'\[T9dFZtDJdK2=$@%BFȀ`kHU={Nne$e"见 8p;oˑ9J2TZLYKܣVjDfP^D֍2$AZk0fW `&+Bexއm&Ȓ*Q$?4|V1a.Nj_FKFvL1~~Ć˙.B`>/:L"ĢK-Qa =~pȔ_Qr.tzQ;|!ο[ @!NR7m= Pt2]Q\:dCn&LF@IDAT"N372Gm@0Ӯ"$فµw\AƮ,ouuB@!8{l"{dE{LLZ%۝H}!c%X7p]?N,~k"l|t#E TR`5™ƘsR`sڨeN8>\Cl;ԕ2YJYxQ~aܢ,iiKEuk|;'.AI?Tmٲ&M#Y"rիWkA;`?Lx8xH?\3Hކ);8ς@IjT 1uу+*.Ŕ9Y&>?W NmI>TH.l'%w/% T ϧH('L 1\9-Q6崈7g[yO|=^d-wEۚMOl3FO?zE@:{Pyg *V`RX9gh}ߍK.΅&)?1BKoǪ('.vq0|'CoօkZgW18 5+}1HumA(L~冔1Ief, --/ù>=b D+:^7V("{;D[K-n!+Vo2o յޒ*Sޏ7twą>;RtaXV]9(ZѪF3Jd{-> V&g@,*l@'*'\߄9wv*2MpVݹAtEsM`[$PJX@Ob.ƒx1Ĉ~#' FRj %k%@Z!P&+NťdEbZ%c/i%U̵S[[S^^+ЂuxЅW]8Ɵ3O;~例i/vV)lj\pEH-:6~N$Bdۏ֑:Uqx̙n"%tNp0#.ʲ%VF{xc5"V݄(M[2 AKe RVՇu8nEL4n!. 9[436gh11ǰlU_k>΀Ls= |hB͊  ^bQʗ"?/9do͖,Yi oZ930kA|rH4.?^uKLv\ZR'90M܁buik.iQ阩B@!x A|::(.boQ xw;:M&9X;֑ם)nii0;~Y,Œs2L͊g0_X9W[w@>V] ⵃEhZ`f.';;e ȟylFwURNq:j3A3Ͱ-q q18 32܀? Y9 9HX2 3f@%0ScO2 4:^G%$@B8ch*&ڦP(gJ+**4hWZʊ/ARX4rn! cd9y2>m"0"X3}, 6yG6s~(ơ.|bl_DEIZdD|7Z}mdTsdS_Db&+cH{ɗT8L &7Ҵw+͈IڵK\G#s rO\F\J8 B\F@ݲ{d;c rB=G}VLrJ\.NuN+Xx pv53WYI-d+Cxo\<_Kz} (NP`G'7vkO*t4k)'E)Z J/Gvn-3h-?NMفN t,qPg( @@(ɂ'yg$B'g/gzH~6蚂%8]/c,x,? b\ǩmZ/y$V M4Bjݯڏho:U!P(fVj&EP=Xx"jSGwo%HDi\Ncϙ"`!K\!ՋK59}6ht8KkqA Rey$xE߱F)RWAEBfj9Vdƨ6H@05]ӯSSh-&f5M~8Y 5ЭTaޟ֨ B@!pwH(pMrjqŝα8M" Y#xۜo&T| \\ ӗ~ !7}w"V!nz8LL9tY3t_JO#D̟ bAI $$0݇Zcd:k+YXx!#8؉=5-X|\~DtPXӃe+M-$m邓.=eh8vkC9ttQ* !)zUB`ZN74 &Go;>pFθF,m_Ab6 3q;13٘Uk#)#\ ɺ#E:gj `{)|~ـO|_ϵAaeK;ml'G6&>I jǞESR4?C3Q3ls)HնsYO}KF<^xukQ[}>%cx{I6&!yL_!vvI[%Z߮+Je-1VZ.< ҍJНb`FЍWy KxuoԣwᄍAecǜٝYB3JX}{VG!P(f2_,jQCMMA s!Ճcsbt`J͗]:[7\w͇qmt=,Q/}E/?eEh݃.t% 3%iׅ_|Ҫ}עB~C(IcۋsVk7[n]UO`>aCH؜8_r9U̢`~ XZ^xJ66rCKĂ994aNQGA܄\V36Wស܍' ~-6nX4]k@f 3@W jj4% 00ȶ-H0kq<-}SwwKrP}B)i 9K!-6M[^y+sDގw_=^ R-8s3Aάy抋`x[{ )*5\ /FmZ 1Vݵx`}=iyA!P( LFfmX׸GQ* 3O``>s O X`\m.Zkc "75nLd죿Ɩj Q.t= 2Rqw/#_t$2n/[ x،8{Ts"*6\r>~Ay_~_?{}7znvy w}4Ybmz nhfo=Mݷ"/Nt|}+l 0$~)# ]n xafΥ f?Csxqkȟٽ߄輲u?^=+MxPnر .؀ N1U&/_}>؜Crb:*qO~wU.D7s/}^Nh=G6 2h&n#8`^ptcwwᓟx3W157E!P( 1kP>wxÖNجfiZn݃;oOKᒵŮ=U>t&N\5T߿jSEN–{V%o| \^z遧-l?~}^9߀e7#~?/~m̚wa\xHH$"!_>l~y^}Q:p@ux}lP|y0 ܏ְqOK}^#njn4]&X-򗿌zXcAb0d/W +S#ߌ~|Э8ո@G  GIp^?r-6? -7W<;|^ouJ?F@}ϿyzaMMS܉g@ 6= Lظvs`U`Bgdq"̱,t)wxF,ϡ{?_>0Z/,ÊBb##;ꑳ` .p3 Pb\<ۂOP&6 Vo==Y˵,6( B@!82"iaxk |Lkvq\j杘[kn_]N|_G;EkoD)ܡm_O}Wb0yfz׾w.ZoO=xe7R]n# ,Ўb4-q'%"Xn(jwW˜V^|Sʝӷ})%sހ@0$Li W ൰MlYY Uc/'(^|{n_ >28'~k.|Gj3DMݽ9X B@!PL)q L;||? 1fAwq^|u ~?]xfhY]~TXb.Z<.'ۉ' |7G?|_ĐWV,*Ig;[^Ox9 xz pO+hj2e$DH% Q0@|{w_<f9qhw'^_;:嚞dC;3O?w'+փ GO!ߍM[vbӤu# +֬D౧6%2u밹`LD Z}´Mc`*Vū\v)<ͷ}wGX|F0x,#'Ƚ֥kFiZb>OѴ WY\Wb}xmUVYgqzQLl@1&&"(t(l0b .l,!L=:߉sѶE#ӵkwYpWzP( Bh,MqeE@hߴRլ-zm>= X|=r,<ruI:ZƐ΍amD.EUkfo(L9$^wn!x6"=uDX᝷ߥ1p_D!=a]yn`;<|tF`Jq>6-;wB-yb=ϤE#oyᕧ4K=cȽO`T6*:B ,8'#[;aԨbCdrǾ%zV'Ns&47BɪR8w>GvMgO G2 J k;уi|!<{>A aci~T$NGG 4u{IJ,RYb]܄ũr@wr 1cڭ0u D е"5r@w[`cvҠ9qV+ B@!G (f@|hVkq1W| 1.MmǘOy(v\hlACF`m/`Eu|;SoJx!{ĘR^ԃkZH]d9f1~Tkc{bnM!"ᨤP( B5aj^+5J(ZSie$piQӲ vֆ2'F"JEPWW{:Vw{w~}B@!P(u Ե_ȎD.w_: B@!P(\;A:.c[v##OPܟ͛j>Dgcq1r72AR }!ƳZ5פؗuppИ~Q :p@NI₈m>D%g"`jjB,ٳAA *P3ЉvYhذ&ݓq޶mk.J$OBB%4ɁlȻjg~]O^J dh]{Ѿ}mb#N:'ZF@ ]AW+ѠQQ\\{cIJ<&\!P28DbT$.>>bTR(/2d 3s. X<y-ЧV*.`a4Աe^9ڗP1slF]e7̙3СCH J[nE޽5#WK&j^V !hE̙h8HHHo*A$MV9.e&p޽ڵVꇨȸ憻P5UX2xlܸ ,G}ZӧO׸3gKpίjg0X 9}B@!P5B`jL@ucOv]`ǜ]2k2pB|j*NT]ٗ!.K-1PJfEd#sDezw#9XTXjvSoDT 2ޣOk_Xs"3P05La)%RtUuFcS1-v6dzxkv@*} $J>sL6 AAAn+́r$HsLCBhхek'I!Lxկ);-@`DSxؕ2^HXHv"Nmcfdl,+jafB#C#vd 9!e'bي8} & @GaQ{6sPlCQkmRBC ˜K|,8;;s?M @$vu>ɡ#!' nP5 s|^MabݶCB6jMס7*.yI,Iů?D-z+p ʾ"uwB"K@''Z˗7JF@/\lnv~YO~ۂ{.{N2DUGHE d) PTGi{G瘱{t`R1/? MF=U7 m ดuN[!Z0_a/_0By68#\|PR[j9]" `-%k~7B0o&zqZw(Rtr;r\Sމښ.BIC1P3H/9vR|锚[pnq Sb2~"qc+5/U2>֨*B:!دؼ1X5k֌H Xf {p$-‰'n:}ݰl|);u rbғH d`8#{`Ypl X`&4vFq^|W/ciAxEb1R`Bcֽn[SGWKw<:rSc3ཥp%Ӟ{?ހ .~xhcyC~٬h~.{q;Vs-HwɍBn]u-MVEd*!K~) 4KbG L'_7٘N-Ъ&=rH?|)鰲q1c$ F0,=mWMvFӮcGjkޭ+֛bϱ3CXE!x)p*:<Rs~ZO|]1ekؾ l}G-}IU0p2R/&Юfq;J$z:+5gw,~yui|w q/?"o@0mƫ0=c' жj,kSÒQxdؽm-ҶgD\2 Ns=>q4^2ɏ?{ gůGcwQcATR T)iZ TD zҤIE٬] 7 N6kq?}D=?!Y[VAC>zf+ YOA$DYʝDmZIϾ 3Fxضn=bmEG7bF +7ETb~\'mo~^W߾>| $'_e>Oxg1N? 6Ui"cר`=xŧa[5oG/>bHr6&\;yg1iʳK*?ɩ/`Аnxa}:b˄k-M{.aXtn= amANzNu>Qk_t?~C KgdVmAVN6ޟ<sEWhQw) =h%p7RbB?CE&)wO?Kb ytnY=s9pK3ÓfzȞgQjB$g}z$k6#v7r(y%!. }{wG#Xp6zf:4od;|Cd(;ϏѮ-OWoi^ORm^P7㘔/ntǞ["Vw4 :TŞEU) 'cH081'q AqJ;W(Ɉeq;W f =QS=+( B@~P`bkag=z,U~hX _FïĘ>!ذ}5˯;CU>!.07-AL *)2UO${hkk>@[51cvK5rP$Xp)A%h0zÜXZ9̑A!aI6 r PjiJV龉Sq\6Fae#Z7C\b6ev'}ѡ?dޏY IǞFԖet QjIv+0(MMhgsX|ϸ[ lĹxDaɪm8RD&,g}Fzuo#X9EK<-XJ-008;tz&ٷ {L}4yY|f7.لiԺXKd-:ĩ`ߤ7&#մ=}&wݪWg2?u ou B0E&'{+&0P5N.OyZy&mG[pW29NL,(%[Y&kx^xrm"8U=0&VDZ3(ٳq)h{z!َŽa.0v;x򑁰2-V 虃{¡ ~]h75?!jPS 녀F/MF&]R-mŵ^s/J86ͨp&KdݗJRwaxu褀>wMH9FML'q8z B82|wm>xhj)`0z4?9.Lg `wt" ~;>lǁMyئ̓۱~8r掁ѹf5\Q`JF >.a 8bN01> FP.JH/>#g`(HtM@0"~*0Q1bO@⁖5試)ML|NG<=~oF-Էq_׾ryǎl‚q(  6Xq|`9CEu9X_AZ^uɤ0qӎgox!p%r-f ܇R=u?x‚qgQO ?c„ZYmI3H<'[ȱv ,2Y'R2vj{ $˾~.IFBTV5R\Q cҹZꚕȨ( b&s %OLL}[&h'Aj:o "5ִĎ/$s-fc |]I9w'pUbD snaЀ`&]qd-LџAk\0,]@y#q7obwtlSx.Ц;6+p5{z:_ݹHdZ("+'oo"#FW#h`D ghHF9d`B$qaʁ]QGs09ԅ9ou1\Ixh#ތY&+7'gc΢M!pDX%`Χ녫/ݫ$ :/NB㎣p=jCewm1|`7)xdDl>G_oE<)XVLF6N[:`F@pRڤ) /`|Y"JL:˖'4ڟ3~WpP RVv؁$lڰ}b[DɆ{R>¹_Ë BRL4EǢQ.8Vw'+ &=g4FxScM&accciTs1 4+B1ҁf/$R;x5 "U31>A-fL; zoC׮!'ML)Lx=;y뇙HQKK9e1.$ShT,kIxaQD1ЈIڄ̯lts0QLzeN71s|ңrqX\:rTI!PW0N+NV|r0#$86oF@@6*lsQ+MNN64ۻF>EMjP)ǮܒKaynF/ܱ* I#[W@wG ]GaIGPW-' ŰvBNPRpF#ez-Фf7qͩhǩ6Ӿ _Ϟ W󶻰{X(<ӻ[{ZЅ27 UR%T%#`ƁM @ei3ӈڡO3Gif8$s葃 ,wt_\\W5LG.-LGw&#$%# -؇~Õln.k};EǓFTOGzio$>L}'DQpVa>=tD1 {7p |:G'̊돋p;p% #G wgbcb08|׃f]кV߃y '՛Mサ=Hɤ7xURJ_c=2Y߇N$Ȃ/E0Q\h%(@V*Eв' 3_[V~[ Y"' u]|g No/^'ΦNoV2gG׎`1s!^u}a8g} Owy!2+   Mn焓ĦGݡJ 2{x9rD@g%D{FhssJj^X IPA%I|D YD`nR^7K~f4n `nsV8pQѯpы<=9`@yXO s;]QѱoX~6ؼ)c$Ht܎cB_@[ N; v,Cf Cv)*eq$)^.,Y9(%1!M!ՄpP}$@Fڂ}Vkz| _1q.T ؐšk{.>NW6x:AӣЭ4~[L}tl\M(4 &S 5n]93M>7jzVعKt [wA.fnH$cO`ˆ}H)O`ϐə˂ފ>5 $ffP3k3i l_)wR\KJ"72BzQhWᬷW2F"lBS|}$رCs!{ݦ'RM{ k}blg{w_tL[Ow?A7qh=&XѰe'LVJcB&+ݴxc9kcڻ"h,Q R2#wYbãaB&P@%2>1"E\ԩB `\/%}cܣKN幬׽{wM (?:~%uhU|t鬒)R0d"c룩 ki郐֋1Oi`@IDAT]}0qZ.  лlJAB0!R_O¸IfRLV+'hC j-#:7Z57j4\Lu wmqxo?'aݺl2(D@]: 2T}Y{?; 1L~7z(G3$FSB@=iL0 aC]ǩq&4nX=~(8X;0<.nӥ3OOzA*՚ˏ7ۤ"=po \HH+k2^v8s L㼞Fi#CPLYOi{>@=H" Χ6FqJOѨ($Ji-V~3 ߯F޷c}qtW5!طݛE楟Ǒf`(WahVE&4K8;wo8Sb JQDnz2 ٠`bc ǝ+xգ:Q]v냀Νt"m?ID4xq{UIܹHxz9WzG@ˮ`SF"LLipLbtbbQPj߀`z9 t <|BWNFXɂphKyE5}hO.<5%)g 'V'{.Y 8τc??7g(,pWBSc]܌j>j4]6n~OҤU5W扃j䝚 yA mymUܓN =b>H"5V 9Ϝęs$)S/~"R2 1gcQDFdKX6Is?:A08y" 隓4~4dS/!>T ekdѱGm\DJ :0wꮇd_Y8m—oGa5I&+qªQWP >uęOW3(A؁*AE" ZP!Y_1 kPPДII 8Ha#L&`:# LA-Tmg8akDQ/ 䒛Ξy-^Atrv)t Z ./'.\r…#'v\LF#Kvrq*auo$%!ҙ.\'hLrtCnrzIwt|1 -kGW4%^K]5dqn@ yM&&/O=*| sMELp&}(P@eaA2vyO='OP6WLU9/|[$Lԡ+ 33Έ57oSoJ}ήOoxisoP/T Q7d0]u+a8uh+*l3kC  AG#L™nc(#/89RhxQ!_b_[JK'<{3% N&)F+p!`\[GHH_ӅNvd% .8}Iv=IKW!~e/S"$$tY]QJ]B"B@eĀ32Ub# 6d`< dzYW*^.ǘB9E\.U_=T :@MlM[-#`r#`AݭAۨpȊ~ i[mxR\i#|gWz=G f&_GF"Y8Cf7.!sBEc /WWyIx DtA)ZN>WU(=2E*Pڠ*>U2d:Fr' lXL!-&P7X<Pu˃@pJYN&REְQq݆Җ@r^fB*ΟL;FamO4}q')*!J^"OVTM}_לCs/ MP۲U~@]F@6dB]|uϪm 2^5TDQ~ϯU2 "1R%uu 8vtE{1!Ssd2oAАp&xЭH?3\7ÆòACX1䷣%}X[!jĉ{d&Ef`X[w,pUeHζݕg0*"w ˒4. ݐz5Š^`Ifwcxi`1ӢkAd0lxI`ߐk_ؖ_j>uje#1pZ\$]WR0kA5d$4[z)HIBv"=|^v u"t3J/A SL@S8Mہca;'oBZY["^Rj3JŦiNc@6m(f &pe$L0giN^ bd[_ݷ/^{~%CPZo~e` nH-;gpf2xӯ14?q#<~߲co!F%֎xW0Ӡ}.|{|J4klQٝku(&Yҙ黋_|hf|>."W}ug` ּO/}HK-=`xrnS+\gċ=JvS(!NGG T5Yk؏C$PLt (17/Odn38^nd`iADCR9 GY3U7*.Ζ8FѲg8kKȎ}c1N'b%ŕz1&Կ7KdZz ͏?C@2u2n>܉1}2~6%Cpop9J9'h'Xt'|B5 __LT BMMRVn!2L.2 ߰c[J JTLX2/I?F׉n̔Ed*b LJ 5M=_` 0 "yGFމG݁)M(ڻ%6a:cvf,п] nŨmS)>( K7=Հl]ñhm xt8Yb}Sü%a V=΀ZBB;Rwr>UR TH$$yIr1: FV jAyr3T(XP( B@!kTK9#(4-L:mHG)-x-P_B)}(i5^8'ʲ "ȿ&k)I&S`cd8546ID /"RbfI=BXiͨ ޶'f8RBgݽo٢US}yⷍ;k>O@lDרm[s`.5{22zb$ʖ}zTt8Wطn=[5z<4vN-.ox00|XB7UC=uZi"DY$=JZ`pI=?nRxs8qßDcdx0[)(MB>6݌] P,]"{7F`OɹMw&PJg&ĝE63^RbWon/{JkgӒ+6 j[TR( B@!"P} m>Ú?Ձh@.@?T `a llPSJF((ߖ+ڣ>3ۼ= .XG="+$ĥυKql:4@<2OŠ{-,-*)1lS%%=ڵ?M"Q{)InTa4l4w,+4ocS ui? V/ ;N#> `NՠF C5t]]xrx~=^CG ϟs?v֘, &: Υ 3- +q#|=W3"KqZ5uP( B@!PR" |g>?1A$P֌p](z.< -vaM`';4dJ1כ"a#ZzQ0Eшtw ;~IumWݚs;+чb)5@%ƕ7p6U*K|l62w<367ư 6l[ÈpXht8{t3Z4L,/1g}y& @AtEz!Uaku3oo$(K@yT83-NVk6 W8KPgu ,p`ryYh2.k@@}/.~-D C@'x*.H֏7U3$2eT 2v0шL-EKlti@MW!Q *_o 'VQ] S! !lRa_`2&W)m`g`6ʮײ^]!Agcb+Y?-_J[H5&ɳ1vNFGS\-u=/O B娘 6Eъ1ȹ|(-{=U%5\?1~i jMQA!uw_"D @‘:iINHɣM`K5 9LȠo^W!W7g2~SO%z6#B࿍ -m_zC@F0IIXG:|&kD4턡:kZ -coވ1.IYh # ξ+bNx}ӲVM˱7;v_z O?d5eCqE{SĒ!5;-1}0 ˙c?p#?4 nY8=k)-. A}йe(݅?lr6˜ݛHpY ~^'nw]/*,n]"1fHWHT p/L@\ذe1jH۷+텍;]uLN|n#_Ժ2B?NLNX{%,\w#*;#,>$N7>ا*\X*nEԤ8w8uT=fBۧ jxǔ: H⥲GG,?]~{"&>6 ~bcLԥvhft%0΢xbh{Ԁ0.KZ@'ԣݗ_~?5>}wLtYqոfѯ?5Roj{ ,ry&ߓvZUb`JBM=>xE0} lKpȁu)t funaq@مCX:ap6`/a)ױxc(I8\0 u.jb8sM8179GwV"ȷ C0lY;oxe{4kRm\߄}Ӻ̺\*tb<# 6srr"iiiQgt逞3a3BGggg81k.$[_c! biy;_ߌuP)ğQɘ <K'J1Y}yć}:f$Bp^7lX=V޺i0ُp s 7lL}^: =f"ůkϰ`1YiӸsXOKص?~r~#;ǡU28ۊ1=#/8v鏕k#l,S'#'6.0x]8e;?g7} |&"o/UDR)> g/w]]/H,;Fu _/Z^Mg7`ᶍ> }xeeթJ]\Jx!pss;ٽJ Q7 +" Dlmlj6StAjk\*N!#0t萻0yxIk5?Lob歘*DߊpAMBƌ6= #`$C7ơS8sOiK1~Z49qhݨ-flk?͵=`;L>Ic$CA߈oFv(uok܇R3 Ysĭy2&c=(4i)iaJ2}^oxnhd\fzE:(Fwm~sd G8}ojJ.эL$;.-B :YT?OwuF@S ׇ0LPo4@ɓ'q9MB À>x=o߫z[!E>Ӛ&x4g?ypϠA8P₾]ې1ܻ*EF@68uMQڠn0 #jtE@ד[u(iUϵxkY)"t7QI)[;Kf<9_pIllYa~7E@ :I(>^2 C@0y":j%0ƓS -/_p,+_[ h֊wp#xF5?2w3gjotE"Ms_GTTU*ؐ. aRRRW)J A@CdMItf@\kVZkʥ"`Uy]^Z4m,u:ÌϾĆqԇbuH/tZ9 Ƞq~vJyOO7c׉w{x'l" } S?o@b2;_ghKLҲBZ\fgEU3 ޢ,17& NʲZ" S&MZ B@!pm1'D bbb4u<1(+y׶JKS@ 1riXzjwxLY3^n3r3V<Vk͑i[<GGa~%񺴜kBF*{fxx8^xZf`˰gFUf8wu*u-n6M{c}#:o47? zĢOCgՇpiދviQC;EX1UxnN<#zMgaScѩGk_-30xEE=[cb"Ѹ,Ip; |bJ,s$&%!!Q{ 5P/(ؒ1&69€˘\w{dTէP(DgW2y0&uB%8;F}J7:EN K:gVm;mo>w{Y'Μ:qHCNjnf{ߧ^~1G0/lzs Fvѱa\lޚ,L{ҷ3PVg.1?Ңq@$Lw#=:C6]ѭY0NߋhV=X3ꍬH?px0u[ƀâ 1v .~8yCf3Z7M+`?odRFC1paaf&\nwKi[>][CڙĬݻ/mxW_̙Ӟ;wc`VaХ"J 1U)W2N@<jDkuއ5C2Ũ_׿e4J$C===5Cz^uT(Ȅ![76f@gXdd>]}ÏJ ͛h7GF mNz#"ae_veAd&}ET z*d͎f={A!m . ե+Vqϟ6=F^ʐ*!4{GGG4iDDR i\!P(Rt@ AoDTW=WFƝNW ̷bPgB@!P!<#06$A%B"$pUd  H#P;Tw B@!F@'u&@])}ѣGqwh̕ *cܹԬ$\TT1LB@!PqtC?5[v2˦+kКcN@*~Ab1(I|q95y,/#떹ײEX4|aw&fYυѓNWڃ0*)" c1T~l?sU%#y(6oHDNLij/)C_e9yKr2ȓH>+ 7_{{h]Ӹyp8[Yp/n9w?^O3z8څភ<[.JӜq2% _DTW ;o}Ȥ4nxKP_a.a^iº?ٻ (.]%ݽHCS R\C@pqB"w\ _Jhg`s{{>{Fcoxl-:Ο~"prLXz '.텥p/mBg01G!PVw*e05-~h0+۬5 ػcxW|F)3p"<,NΟ?#x+8s0GNˆߓ;C*EOɵ"IyaT }h§t?Q8/oç[?X5셃[FL[Wa_+F.nA߯E(ޣ>҂\E#)M/ :u ;}7=ܰBq31K,6" 8*~FiP &AΩx)iXVxBbq_iӡB<1"ۏɃA &9?,+nC=a[|LD&+G#(S`9JSw^#,ЧL"^zP\IYE%j;,P9<+A~u$_ĥ[abl\6hg8Cf~]8v;&rڴ%Z'gQ$gýrMLDv8rܡn&`H~v{\@Rz6 ʭOΰHڵ)߱2n:ᆺ:T!?H ~g/Am0$ػw^m}E|L>+ʭfk}~q ||XK>-F߻QdQn+gp%dHd+]#?@Y)-F>}nkױ ȫZmo-ee Ʃֶ$D ]5ƍZP е['Xиq==l >Fpx(u諂>VmMud'b#![jU(N!L㛦ml'tD렆1v;ϚQٙ#{R`^m&[Qy?[4,o |q 8vTg`%;7p3_"-37?-^]^҈AR 0Eέqi]&?vfj)ƩТ͛7 ӧOPD.]XTx,=Qc%ssJqqXko񌼫Sg,3豽NA`|茦]xt4[O=t5gpe|߾C-ذ;Enyn[^Ss Ocqhs ^$$#?Yz k߉HNGVѦPܺ-Eo}pe&d$mf l]2 ZkCǖlV}{bi8G`t20,SWl]{-E<}tD!s#;s9?wOb3^Ԫېv]Z۩OCUqkbxt w;FO1{ 뇶d)A%/ZytpeBgVukwpZmnD@,O㘢]ܼ^G{5!aQʒhBY8JQ4rIPMEM_a{R$22=q~6X |k FӐW9op8/V &dعs'-[Ta2dȫș@bVEL\b Rc$geC9 T鐷ϋ@#J=#J_ @&-0bH 6ş7ށQ"SQn37 `+v8VFTb2Mf9~W}f"(<zDz9OƉUHx:u{<#-+a߾7_֓hQdTTUhfǂq>\<:iq \p 5 iNQyJ5wm Λӭ3)JN:ظq`IfYXi/_炧@JJBnt 4;AD'r ˈ2F&Ѿȏ %LR NjS>53=Y$M:*7WBJ%Xd4<:70HM!yPdS%5%YP(M=l=$2$A]G,qPb)bc㡤cjߍؘxLOf֫42tt*5)>̌!C=hTIR8MMAF,,{$dLNY_GSL*)/OJXȍcX|a< Ǒ0~EtTߋ|qE( Zz+˅GM%|df ΣbFoޅiņشYy|+/ u.}Fy1rGG= lFNDBJ ӊB|m%e$y!z:z0!{ML7lM@?aa`Wx  <|Sޡ-D4TȡP*^6--"k3\%"wAm$eɌ[chU=Cf\;| $ٮ w?y8P P. >@hbOCP%KT]3nankOʶ+L< Oʻ|R5hdLMP#E񓝝{p< T-:#E0ҢIL%rRbлu3ݦ/#U}}(iIvk [ XMBl&_t~.^1s՝'^+Dld0gϞ :^otݨz_5а=^=Ӂځ͚K H @vmp)˯,D [RfA=G#<Sh`¿. Fn}BEcܹ0aׯ/lcCTQS'oh>GOhBR")7A+9]=?YA:2751| 41z{<| E?[6M*u$S טs?l#~Ž}Gpfj>aJ&4Li &cOAPKHYzZʏwyBɅ2p:/ %-H/q ȋʐ}vyzO UToP?DU&źp~"N^' GҔO!5ʫ4"'M˙kzm >:]&!T3ElE.EALT-uғ8!e)>R7r8ҟgI' ijjb eu~ITh_Tlmm1矅d%$%@9&D:{9 4Y3/{Nдդ'];D&8=i &5s<_*NګL)_IS map*I*!;xSTxG?`7-P,Ƕw'8WA$[< /"[nFY:KAuҰl*bg܏a=Vzpť&P6:|.(I-]dH g)F|l b֮B7EU@t0thrbKhPFA8qh5ҟ^,& -bGU[2&v)ĐPKN"e5ۯk_;~j@.a6a& +e8-z%x+4Cz@KۖOB`Ԣy܉жr>4s쉦Qzae1s,: fP6Fs`w$h79bz>P/E˧/3;\8]ZwbD*>⡿`E$+SB4JLJT&d)RfxvF³I/÷G86CޟHoM8 :9g ҢmT󱃮ybu=L3 B &c ֛g':,[ߎD»Q"ϥ>yo.Vhx8ah6}=w0i"vxDA mna-FM:^wKDHa߰U3ٻ;-6=X`A~+PD,{Muc1lN tĤSp8w*1/mpp,_G,pكDчj‘9&Y ́NFRB¡Y%Pӂ X:dDPCw  y¤8盯5e`lٱ)4kV1e)F~-)+Ul?>pD8|V<:Mm`Jl] b "%ULy>+zGBqtV089W4k1# )=Tɚv0&O-gj6$_<'.!=3lf),I r򇐥S94DM{ng>E-IF~FJLʄTO~*s8kaC]ȒwII [ ѣFXS gJ@`uPZ{C;߮ ,H5_!8tޕ1#۹ZC R +  /G#Rͱcvo 95> ͪP4 h99 ^WnrT0tͱfAآ(ը{%xj]i8M{!0+5u#FiS\2!Mq}0=bUѨ]OhV-4iFK6 dWe=,ÛQ9L=tMNMM10r$8R&]U2 T,EP-G-QK4.vǞnjb_ 4Gcl7A|Z6todD#P IDe, ={îv a&'㤉96Ah z%j`""X`#ۃRUE,aMh`Ӿ%j5ol(rDz#}!|=2 Xo:YJ- u"'z탪 J\iû Ƕ5K2#[.MjSoG3 @^ya*m d ,9ͱD$((2 S:6uh,D=MAH0o\\ ~uT:x _[Є{!.'wsػU8W-͇`ـg0NE}k*Wv2m@IJ2•;dlJ]Ago y&(U`i'עMp͇9ѸQs֝Xѩ!6 8\ڻm}'BR ㋎|!r#wZ[H Gozt8mmE} M)2q6= f|_>F.B=p`][;nRo, C% ! ?..(B쬜30rݺ\: m0'^Yk,u2 ) npp٤.K6gsu ?3GK p?eS6WջN) {s=Y2 >:Z@R dT؇e;_Qڇ,D%ü$`A`ZNuH8[--mѴ]+,9:}v ФNU؛ ..Pjs 2؅fp9/@~8@T'a|&e ;ȷ(&R6CCZJ;' 7jjyjȎ*@5";!:`˭X8oCseChvG-Rq- ^سt ΆSdARShp|,46N^^Mk.^# w8QPߧ\-/5U-JA1|TE`'JJѿU-4FZR]b&QV>2cdžv=GFM|,|G# x].|¥H(qj/ɲEYnT͕ fѮy+ȒBk@qرz!vP1'wԡ*4Fy d<9@{X=o:ubCjuڔ7v_~ ~ȉP1Jo;#LHV!OC+D ^BGwp8W __AAPu3nUB\{:,$6ǩ;CƆrSpP"”о"@J]b+pU5u1P$$R> ZQjR9.y,MLUaٞS!{2zLY] 1-|˜BJ$CP0ukDyi`a.L^iĢ#9#&o6sڔ#!QU`cgOuґS#S^={.3 N%Y0,gLT'л_atL> M-age*%?7 }r/PȣӠm~):=%XX?::;BCCh^l_1e*yX1T~#ja ~(G#p6ޣҬ \U"a]zMD>MotYf| -0h5dRJJK}u[u( # d:@ |` bŰsxK&fЦ{Dh5XH[Y+VW ,M #WϣCzΦgDVH.HdK;g\GAYGS y:pqxZ:ܲ-[Q&aO(>7[xp8NX^T?9bu="4+AfA3ToR*C2=0:yBiOW&%dQ"{cN1,6yT x(k֡f*c#{=[tiꞎG#P* /(o}|2ۋ:e/z[˥,tʮ-7x(er(L*'gB%E;G#W`K*0r.w/@'-b !ٴr۴~Ŋ0754g7`ճ>i OF(D,FJ#ΒrZK(}f&U̥/P~1 W)}|V0bQ_Wsi#Istm-F؎Xh.~۾ iת3~=^/ZA3NCB€αgx8 A9^k \ .]w@~fiR7Æ]ˠDr8D )^3i{s[+hꇔHJF&F0ՁD) H>N>Չg0 dJ{,$f!f6(#e#Y88ƠOK-%ej.NXDH]9@Gƥx\=a>Sfo,0fD_[fK#i|&RPv&TPh7,Ʃ4/RjQa_1v>-ߤ0||{D,Q&B|T w*RJS6TQ>\cʬ𬬽 MP+<cEt8*IiCYZQCHl_ *!)+&ex\|LtaueD!1RR:gxFk ͷұc.K!W8Q+Jhײ)@oOh|߽yeąSݸAҳppvC.Pٵ8B"ulw?7كcNg|ߪ-E@?FˡWLh0wt3a\&z&VjΦctQP'to6*AeܽK&~UmoMPerػPCCux>Gjf ܅ACC#,_[,E0puԲ_DA/JʔF*T1XJګ$-ݏ㛠hj$RB(h AqY x2ªf;9P3XzTeг"dʸXSxfb 1j<"ͱqPl\QyۮCLг1wѼAMl:~C8dј~'LAe Tx78 ?@B u2X7!IQAhR%85e a-s.Fȣϊ q T`"K#-crTQ iiKkY&-DFۅJ%,<{s^8rhhZt&o4O"ZQŲ  G#o!^__Xo⥣pTcggki * B)dC#!QjV}aC-M)@ ݈hEQ,UNQSp5])IϡL21\g7#[ 3cOrSڹD%,f * dR'ѵ%*yJo][/e*K^kW>=& UHvQD6NwwTRձḭƣ֭9\bL\ڿ=Pjxza>!pwuEe̻Z-l9|LK!pqNmbUK@#!~߾[lџKۢA++O1?@B :8F60b'9YT- yqER-i])_`G8yJN*i`VɎ(?/HL•P]31{U BJt#Mծ[@dWL%« MLK"zڐPG*UN#E r׭db#3-ZcHT^ih&JBUpN5P\> 'VSMJO&cu(c^ݼq=nzU3ѡ;/g  tݠM>@r?4и &MZ2 ÑK`5t=Aaȶpo:E6m9ٌ? q.Sq,6赖_ a:7M9}I PzChkkUԭSN*a\q6 7G!`n sD²\I!qdʬQƷضgIOi xr]z􁓅C^\|2؆_? aT՞>-{!w@%*ڛ&"g&Jzx)ۘW8G#EFAKl=73y еJJ3Q.2Y}J ~/}`nc}] DylXVa`J o~{MU1HI$*qc_Cߕ&&uYRH?!׷RH pẙkCuHȣ`Hv[ŭ=ȥXEGjY7V)uG/t p-E:md(C(5Z>G81l-`Уbk#֒PswGh?k"9V`M >?`ޤRRGO8T!e'"}3غ;QMP%F9 '_BԵpS\|8TUzIXz=[5+.ލ5M; JԴf|ShfС  HbɆ}DYÇP&ǂwXzw2u+mD=HѮ(HL* ;'$Z 6a2;| Zٜ&iV⥫pg/oG#(}/DP$r^FAT:< 98LE@ ( iij  ;p-iTlX}Sp6HNAˈsGj2"Ѱ6q\Zhd ':P>; @h@T,ژ?m4f`y-t}!o*Pjx1;ycyVtG-;6¨_h^/D ;p4)Ig:qf~[Fm'OG8tU< /kȻWJ5͙z=&F̈́WԀy:,`ѡGE@J6(Q펷(*2x |ށ? %9:d<-Cs{Jۻbs_=FW}آwv1w>;vu)Fh7 ې (ShbfАTCtI"@o K=C)% B݂b0šSU3< rn aBbyt-{FvoQë"):@ V. 5KA(kU0htiޘҟ*C"5Ĝu[K ` ())(vP~{0#`pf":P>F^xFkh[ϼ닕W-ddQ1߮5k`[dK:zBY8Fl\jJ2OzLļy3+A7!*O0l!6]^`m- QS: 1g@o=Ϯ?C/|*Ԉ?HH(K]nbF No~=G࿊B}۽_ RW Y)TPI!o)yD!yC -) 3WfB`Awc zH ,>0ڐr_K u;` }OfC ?a٫:06qJ:8%-!6q">TİгCX(R3$00j9{N IqR@SWU`g5;[FRhh2Sҩ)L 4XPtD#woh7`"£)`P3 k5/I/e@G`]n^jIQiڗAU?e*y~~1Tz d,KeYh?:,/190WDsk~&F݊IB!Bo(TN)䨠TN7)M%$V(&W 23+?>4 ^3iu¡`P(WΎv"?Mo}`PjJwz-]}ƖşiCMKkbZ&L!)ڋ|)ؽbkkW GG/5~2g.K|00|ёNFqApwwg ~5$alو Ŕj hU \2&-ByXV]FD!U3r[jCM ţ[x 91.ގ`DSN~5J''U4I.vׂSTVo߅Q̩7o8ܺun}xp8;/oVD9`)=)! ' H_Rbt S+ퟚ`dI&ϟ& w"uAE$T MK\.(mRʓMRʇt|ug;ҥT[|}uRfߍQ%sKhUOyF8<33Szq%?pީTV.Edu_1QmHg?ggrS.z5EL|gB8*`? FbnDDD$\]]`llOGE@r $W˓0 <-ڐт2E|al^*G"$!&y+œ+#ߝ#xw*lw===ayP#(0тy._ (x_d)L#)nCE;"afVJ@Y8_UkCp `QS%))Iq,D1 yP͡TSOcQX= |n_.b:QI<},߼q8(>8e&0YʒIT s޹I9R"~[`r0!LKK'zA.&ϵY 0xD4amOJ& 3 t8?`4>  cEYɝ9@ "P  <т2Iս9R` CAU`ĖA45)HLJސF3)Ml~b26H}JBp+;ށ2q"|Q|E)ɸw 8::Ĉ*{mCd$˞<^Ob10X*XG&"ohh(dXѓZrE㿦;`?SY VD,22 Sxm#x?_=|WSbbH {{[!PZLhWg,3!Q Y) W6X DP FR@YC+e|$/(vv$(ZKJR*{p1 >ocEBb*(ILS/YP(K<L`.QJ :~׃--ͅ?D͚[f~= )Gl!}p> HLQd^%CBՏX1q[}%xYQJ?dD߼W :҈uMPZ%Q@X7mmm5LMMk(da0ESG:f~\ByXqF?:@G'|e?0>\@i0 ļ,&Ki*"Fjn$IK}Fv>N~W*ftFu)`<#(qkaкD mI (IaB>K,h:lOLYP\,TL8gHtPH0=>EFzl-F.##hӡi}8~A5ZAKJI(g1%\lHxJ?ɝ:F,6adz{CQ2$nܺ*xL-;U:0oR,x5LsDq1R اxUВC&kb'2Kìlq5YK>O+?D&XlS&!>jG"NEI৲Q 7L1`.TXs:Tv&Ozx tAxX(Q9_bg. ȓ/#?I_ɪdX!z_E{4Jm MA}qW!wLy5Fj)x d46.~ }ҟBei~a~haCڑ)ER؜TAnܠ(rh0;'5#2Q@:;7)_` 0hYo"~+LeJߧ7Wo"NE@CCH 8z(<(ggg:n8bУ`IIIY(@_̏PtHft&{QHdF8ll5+GHޠKGJ֮ϝUжYMkBphlZ:P#!>< 9PQӄU $S4QoX-MB[Dbד'AzN.Ԉg/ʗ 6>jZ"]BtT,RbH/Fb0Ɣ 0>3A _)LA` ۟7#\yp8;>::vՊ8q[2G='U%a ˊ,|&-c0K?1  ~t xm*Jy w*DZa] F( i o00`G 5tEH~_'67Iia7놟'O; +5w8̟8 IM0lxl襰ptrY;3sL$xpN|G+2Q|ea_s1#r34Fp>w+ ՟'<9 JB9h"ƌoE=7\ 9ِ/WJ҈w 3Fˬ"ӽZ@vrz"ހne"AeU&S.Ukjh+WOh?Hx.e$$ˆS"ㅂÂ|_dE{؇rJC@~/OFm{fi>}umS?艘MX.k8p6lY\cGRDDBAW=Kyȍ{(H="K,*wCrBػ ({qq XCP(E[ %EJϛ͆)T>3&;;ّ+ދ$(,X]ʓODw6%jZ6-/T RHA21)ǜs"!I R\\aNnzII)(XT!qpbi%X:n$W4(c!_cׄ =88,]j ;xp\ML-a,#敘#cr-Mt]<<`%\b\ ;񇀭/34&pwh01!7 E #K]`oc)-R8Z31-9pgS3ۿ0B"XXXS /YkȰ`DR.ӃflA}okmkx8uσ`_i,Kpc?}L 4nNWly:DO[8tuMToDtNwFvmQ4w .؎j\o[sh_P!#?"@¿_߷+%s٭G@rV. ^^^8< .,0Phh(]&)8-NA"MUaL4SQ!Ehކeîje1K)=2 %EcQH#8Υk3Gaa*~0ZTq'GtFCbp Uuŧ#GbsB\%6ŊKZ{"Oɖ( c̄Q/,G7f|cͬyܶ:&쏃PvK-MBx l;>';-A17 \Bel9Zrk#$Z/ E1ȁF (kG){F8(X?';/B"п^SAs~)Tm6NFպW玞D$bӴy3,\ϤoMJ-.8"SQH#9rxS )OMFz#JPWQ,-f۩`GO`Uڏ~5.wiJC 4H2^`5㜶U[bڔXv-L(^ÿZ/;6B!+9-jxa{X~7yZ" q/B $/caereˠcjR܀8wy^N4f!deOVz9Bx!L>]Z.nmTf& 8jIXx8ό> N& jݲpSREP5"hH䅷܃.ф~x۱,jT`Pd:W!> #D.D] ف549Q^rܢq$bMwK 8brVu *6h 8gvT|M|:ks'7"ʼмb1$$iѶϧ8}瀤Ta Q3iVc`꼷A:]n7p._aݸ yg`!W( &[ Hȟ4kM%/# #*.(R @Rb.`A3bc)RU'/Q aCXD>* )I>$ @X\ԜW},|IƯụU6>2zOzس769(ѦI-7 Ie0G}1.Y":+"QGnh!쇀&\XW= 8$hI} E@a20Qb{SG%w\ߤ>Hi^jME Xq-!j$X;-DJ9[8yZW}P0tR^ Cm1ݸLQw_5Q1?QN)S{FϸW%Ɛ[P4 eA45v41{8̪ӦHm/"lDV {@im@?rG[#Zǒ)ヹ?0'H.N#0atJHGSg912CRjrhSX3*&TV7_A¿n"/6S8t^0xy=>-Ă0\D1͐_10`^K .vj>l(` K'Q H%?{BX)'XCTqѰ%$J=(C 2\G@sz}w)oE.R7^ec̩,T/ƣ(ꔁY_ԮXQAFl N[3a ar,BTS(#a)Z:@Āhk+=T eN:c ->د}K}Phz?|=ZGcϊp`50G4ݚ@Ԇv.ݞ?hԧ54'vhZq 1S}_ {!%Ku;-1}b:m"Wڟ?G>|=k֋X3Zn+z0TA!z'-Ǎ>x+M%A>ǻqCK^,; .E˖@3CÑMtjLn߀0aj6-'"1u:) ^XG*2d=k_ c6?:x?rV zOD e@#/()|36/ۛ}H ~N\$B B$yNBnl.\43Iwr!Β\{6e8yeڏ6|R{TGGcXX) ylo+KJhk QN7mչX]iNۏ] #CiJA>Z>.=`C4Q `Lg^Q aLudjiǚ.Hf4*J;;RR+yQa09v`$V*19]" n7#ȵ9.Xx,.hظt4RSE ":W.:Ӊ c.[4%fܨ3͐_dBP ̐'+'Ðw`x"@<_71tJ|Q2E4RŇ$%PI HfLO"6 58l]!&CS:8fYq243Ҧ "[!LEJY凌Z+Z _ kb3#^W]93 In;,9>E"6l>,ѽgA=vy UN cGBABYR "6@Y Vǁ6}w7}?KF[+vߡ/e*5~X X5 pΔ"̈́) @<=mK)8cAWQ!FGD0i&h"$eyE@?Ho0oVBض0BZ-] -[,{wSxIu W.銥fx޼s$9"xǙkg-%=+`=CQ9-k%]F@F_Dċb6S~/MƝѭ㰉}xa-LNMP8\imC =޵%N$A4ijeP,C*Jp^g!𭝡' ғ~ 1̑oN}^G3g'(PFi 0BR #,ʿbCyq@Ui62 {㭊8A Wy6Sz EDG0< FX90XVxAѭ(ԡ:LMBj- ,T"Fh|jSWơNޯ]}#ɒlX"*\љi@׻_%H lv6aQQx箤C9\G_aQ+"|XyǏ Q|223oUı%_6 Jmy=H11_TxOPZvf"*HL`!sk,On$ <ҙvaΗ9neX MAj.N)/ff14fu^fۘs_zeDp=Gv-`O;[N2;Pc%lt+Wå7Q a?t 5fn),߼)4v._=G/d^߾rv|kgg|Ң9}139@V`mxS(+i(a= ¿B DOAs~WWM?@!FHmJS1=6E"CK+7`mgTB8#85)O.<H-L)XZh9LM`AE iއѧA^\$Tz}#ZiRHQ3kRLSmYP }f*sgNf Oԯ\T]~g}5]?,ƥd%? 3_Al+ЪeMVLoҕ)XǴSKOb_Z_ a7wU"Vn# iiW?- `čsiqBB@(B9JP!#w! *#,*&;iX70}f) K܏43@\xRݓĸ[w{궵@zqL61rGr(,\<ҍ !>B#R⵨խ5ܽ]qQ$盇B}JEuƠWhZىXT-4$)';:ž)FEC\B"LY7Lor"3+)Dr9ieJo F<9qV%#]aɂhDP-hM@dL,X/ A1ae,Xib1zxp*yŅoז0?T @~%f8LG~FuL39Id^G@ u?TG"=^\6,,i7k@| \/&# # # # VE@UUE>ʇB2S R#yVP=\uB? ) ax"vִZadA3Gl"61Uђ4SW' E]!2e4#*kdZ sHEBu4`ERR=1qԸ8>iX h蒂ѺF-ow~S]2c}R;L?.#K{||#7(5sbkS ͕~,֯+qY(yPKF:edAtQL&7 `N)[Xx]pJd,c K1ſdOnїx"  g+ʢq"@Zi&94-Oа#< YܚL|ǞB1Sj]zN: jŒx#[&_@M{=k;Pj͛W*!,E )&3,D7]0~X7lw~1u@ѢAۺT_{|O/y_Ñ0i|Yqf9v~qWjF/8M?DzmdPoDA3cr\G7LPRmWIc"L Wl'C«j7p{>Rvosu;XB vuwuƾ_{DIs3 }~QHN ?aBco`!Ѣr ; <"`ߺ2ޤ $QWxF% PwشL% J*7pP?eK%梳XŽo_(c(e&# # # # # #C.Q.zՐ>!m *4QR7R5Rv x{ҁ)H^ކ,c3@0LpQ#y&uCaje ,hIH)ٰ VOG$<;8- mJ)kdU㛏ѷhxܯB݁r{~>;bZXqٽㄾ xq WHSaT^6jӢ*8z`Pޢ塻k槳MLzB4q+&,4i_4Պ*ѐ OYŸ@UNZBWp}% y CO+w@୊zb{#ߓIt0KwA ' " ?6$1%T<Ȝn|ce $ Y|>+c[T8Yלi ,DfG#7 Q1]?ǖ9 a}(['yFX*I]y#Rf@j!O%ߢ5j??.Aӕ۰WCAA__!c{LL,̻P(VLZ} $ձH{88׽EԬ#bkzu+J[:"O~qPfLq;SiҤFY!L8`f =dkO,z&,b0GD]Mh%σBBp|)&H)RJBɒ%Q/orI9Hx;n8~]k% a&ed +I ¼6vlل]@B tҨPʕȇ_GVmq3J#)/ wqhgHldQM*y#D} {}4oa-^ ÿaNlb 2kφ41.nf~+6i-3PK@)^'%K@Ao4i n?Oo" S TGC˴)T` E@(& ǩ`[2_/#r!9.cA_\݇Q(Ҳ;{£;/o;:;<-PdYc38䁽]#4Ƃk4Z@eWoDn>Uj35* ?E`V ӆ3elq'_LiĂP3_ao}_ [1 ao>p">kWΪb=g[nQL%1ivF._-l\0yh'*M7Bab7a-z#l݊#LHÒtE宓0}3Nu_C'`q^>&GK[2!*6̘W,pt)x Ο9Svwq)nTuɞ-fݷz` ޥ13tZHF7~$a]3u`_c_O&Hcݗ-k7aP/IlX襑--5 . ʰKaƹo?M~( H=@N?=%i8i )3P Jx 0ҋA~k}?&|+e1-*M\E~Rx-aD"{;f]'XL/gcJXIQ(O ]31 vkHXf4nn?ooCL˧#obՏNAH&عhsDuoe wC>3t|5 aMԊm͖<@șhw"`F%#8+@d6#[t =&y+vbZLߘ%s'y濃@N\4JWgZI+Ec>MyN3VFG*{B˗W>_DR,dm7v m߽n0"g:@ߦ5t 7zyol"⺺{K-Yt3TEey5@\п8E)t*e25۠[z4 y6bic;Kqr߼(,?#HOf|āQ7K aϮݤX A.cPQT3[ڋ,ؽ:tb^W/PSoxx~>b|M}$f~ʕ!7= /zG&# # # ##NBv$B M$ԒHR2*H|rMF^ SR ܦn JeDGQPW#Yӗޅ?Ŵ>8J,5rWO`Nak%ggR#TVupO|Y$Sm cd^*/3Y[(iE^U feV݉MAo }[ҖԵay!H޴Fbyf*l0J~=xiF0bPɃpI <==^eʔAŊQXA͏3b") ض2z H% ұbr9 *ߤD6 3PagѸ0#nQUC 40oGl<~RѦ@(:fJqTGŘ 禉`[:c\PH8ӬQ4) ?DZ-PRa*|x'_aTWx҃H;Qzȷl-6ozaQjU"&~:op5(>-~7 k-s^@\)k!hW ~؋.O_Na 4b&e+DFS7|9)وr '\=?lb Ѫj!f&hl /wvO'dq,B`J{B5~cu,~;~?n\%z|a3 _@eS'35nߦm\ѭa[XǴ-xke(RܧU]]#gR:cnݪu//!VEbعk: RoƄ8H!DK0=I8s1Zc,XdS)ɰGAY2[FE83$z1>2*1P>ɣŢbFؑS#0?3ݙ¯/ң3X:GdX$` p4|A%оmlZVº;Q`pg Ycf w ٳy{"<OCih(VAL=jؘO:B`?Lkt=2OM^("yZ̦Qh3>*Vm`m\evvQqrc2kr{xL"+l3Lgda^8QCG$[úT* 59+㊰{yHV%CSYњE3.ޏxVM/$".h( =K#BI_g~vn?~222%ު(Y  }$1ިEGbTB&銵6ԈtfzP:Dѿ Ԧޑ܃R2Me/w+WJ+Ti>AȭxuPFuIJ_iât(^8& 0((jCӭGWo9 v!]+LnJ4ff LZnjܘ4exi$"f2b=,6&;`g4X,"H)b9R3sDA0}8^Bͱk㌕HU\JҾgר;No AkO OEr|8""ѣUcLY |c"Pۭh54-au+Cqw*t膂wЊx̦eb>eCJGiD,eJx" F#)M.=v2U {{)R?_黑\|wXNe!<*ք=p󭇯G{.5(X9jAsGҦ YLc ۗL QBzpjׯ0+~Y*ߎvټ E~p8{򥡉^UXF~ݙ>Z3eI^)Z&>LOF D?EiRyS%`Pݐ2222C@8 ~pC ;CA>cA_jQwQSoя_V>Q@-woX= _%371ֽ"&)\QN}z;wonԛYyn!G[XaA޽L[B`KLˎѭem&}-s'#_eE޺ uo$m*9C/`~߬Cr &Xk<U#̚;>N~lǂYP;# ςiGS6H缔>tХK#i|%wĔbɰxR@颅Xٻ<:/ye(7w]ë60d(ƖjKq <m}J^_1fr`CP~1f1cp~蓱]|})黍W),Y4%l%N\#A\~̛Ae@M%@\֮z5J˴K򧌀P'yDQV,9;;W#j/*IÐx@bb"BCC IC%6K0 8 h sDpE #\"xh낋 obs"=(f€H<~!ZxlNO'zOeQ^5Apu_S3BQBB1cU1;@=B $AW|-[wCX=tޫ~N;]CRz77WIjûX| @L[ܷJRr3@Zed>L_q(n^3mi~1>0$៟%B".H<4/Wfrrr|TNn!w&>Űhlyddddd2Jx?;:P-!gdjPV4yu*2<#L6oya$R鲉1-Ba!¿_VX*r=hǒ-ztp`?G;9(i_lg;IP30Ν@ U23ʛ#P(/P dEGA}(4'6)1ׇL "g¼#\¥(B7G@(/?{z`fVe"~?<<DGG_ЛG):@Gy̋phwQ9㇡rtOK0nPH!yP~kĬaJPDP>6 p˻|4τ槖O#@@RtL gmD\|2poҕTCkЦ 8:\Nñ\/rf A[kΛɑJ t wO8kMJFXDn,2 B ,f;V1>uqbݢ'kQѴTW^.>g S`Ό5Zu \%ѮGOA u=T/T؏}zwj#gn[vۅ;{`ʊeJؔ@+RYdkBХi4:zq222#''X/OX2&kf>ɬZ LBXH"X)Y6tncz"+q 8E.a=Zc|)P+33H)ʕ++TH`}=rBJ#?aWH.& 3wNy!тCv=bU½p(CmŧoSEi٭ 'cڰ8 ƺ@IDATnX2k8}aw<9̙8j:Q=Ee5NCOTJ ;% }e.̯ՅtgI?8f3&.Wx~GESxƊFpCR3FW 6#o@Iҥ_Òq74yc`)ߞubPt:w0a7oزh #зkcCg!ʹ8ٌn:հqf.]f/BuEԺmX8zHw[G@[MV3"uB8V.g݇z|vmjEKYw <5p 0>yn0^VN_<1zv.c7Gt2Id,א={EMq9jWپ'w`0RQb_c2X().]CҜ'`݊ehYF-_y :.vxwHZ ̚#ضWxmIUaP.ZM3{bᜅ/RţDUĘCm_NdcD^^ |ڥc5i}Eؐ@,YF6:2F'Bl="`tbѵ!h^d琣Fem;C腃Lj:V^t \T)]C]:VXϲ&\ wqdbl=pƖhԴ>oF;d}f=hP"V4BoݼNj8 P`r <Ѵ+L)X?WFG@Pxv-I0s;N5^X?Pϴi8N5#SHǐޏ/e ///cP2`("b$PRqO #:(YVQxkpto݆{K _,Fڍ&]zFI{a?{!99HJIIBRʫَғ_ޫh4pz`zwA5c+b: 7.%Z5tOEGp9Oz`)iΨи/NSPG E0SD>8X1qZz'0k|oH 5@h왊I'jqѼ*>o{ t0uE|X=SяjDcTkbcp?lY(0: )H~o?>ꡚ5ă$[ A:4t{®p3hX mCG01ιBgHGtP7hSx/) tLFAݠԼ#\m&tÞ K0g>CТ8,1,BN}kġ2E{*&O,Dw{|=vS&**ni4 Zzuu ʔ߮Ń<|"]vf[`MM axotlX!2"y4ifa ,BONKFGR~  +J˗</b ›-(35"U$b,Bzٓ( Uh]sy`чϲäSi\LÉl2G՝k@f9j O2n BW R]hHتp_~2n]%~0&m&[j_ PJhFXw1qf9bqrTZQ/J/9UFN^Kq=F̰zFdy2S`ӯѧJgnCS!뎠WZdzU*7hrA- F 7LE@< 4ۇ H\cw-u;@^ͺ0x#yd(c/7Գɰ˻>E▻`۶=W=v-)/FlY0h;c**SC@ ET@BJ }ܝq7BPD<{5K?襳 C`u\ڙ 2%$㌙5.Kc.pI@~yxx($'ODIT^p*sPB#)]&7//g8e26QwBtNjz4"Oɔ ^yk+wݫe;dvm[7ýPI9Z.JFȺ!jLluS+lE8eиQ8倩A2E8XRJ/ ?{N\E3ѝuh<▌[L$;-j]$T&KVBO%%%.Fɷi4ލ\ >mJz[[3)뉹c6Į0Ú륤P8t9$T9'_4F6RHNL{52Ôoε.ovGNPiCtjQğQ=/݁THr7@\@S>Ҷ:Ծd*0TZHԊBX*|LS2`to<_ꖵgz]; A 3^zoI W75Ç}6Ə#FѣG R[AVԲD]s@el5Fd"1$"zNSxqh/] $.sΛmbXkRGe9M?Yy^g)A$K׎"=kC[ږ}F9gӇ34k2ٔ60}~j٬Y磝?{_&Fae! -ҙK%8l)n`.0Q4/}R$qŊCO@9rP!L ߫)D$݈{ȓBZ8k!^IB"0cJS%֡/ΙE.X<~ᩨR87ۥ $},V>ۇ=EGa~?NFHITV,0̛\m k/BxX$R3Ts@ }q?ߧ)GSne8s[ZFAQ=1.z ovFx|0m9Sj ]q4#%϶2~Y(XHK6qH² >]sƣF8q6_C3OѠ|aMqm1]w8u:.3pԚtV\OK”QC|ޫ+ >GHFݥݫB ^C@"'ϟ`jJQj& {d?5`-rK4SеcKE 6*`yv['O(gBh'ajt P_ƞk1xl BpژH c3%5[O"Ĥ|ƅ80 4h A C"-}S>/5QACua~xQ$/D™3g J}.IJbLH~n^ ldFO y)X*Oj OqwܽEAÌ҆1Մ"H&/̈ n!3xe-.ҖiȐ0P.&BD^>'Ҙϔ"rKTܽ~vB!d6iTk Drl)`K!RFv$ >! T {| 7g <;z;lā6!`@˧?yOH)D2bD*OW|9NxwW 0i8~&DtCm虦,#̱|V4RpAN/iCI##PAxX$a}h[F)(haq!X<k.a={*ӷj^zC5 吽&~;ɈM"FOu@e2_6ᘽeLR.#::N $SwVw>1WI≩7]]JU,S#p`VƜ%AAƈ@Z$?Ut ۏ_Ǐs‰kYmC66gßIQI(Z>wKS! Yp]WR)пY5SKnZi gkeidXyT²p$ɉ^Z)%?g&Xl1~7{kjꂷ54 GIz] aX|BQe8*hkL|U}4j ;Wg4T([dm' U< *u^>DI(VyUJÌХKP y)y]ƃ40\dtDz%mLK7?}_KssAt"'4{ Xr5]š={cy>ߌŮ#qq,rm>mص'nފ뷢T/C[6mJsZ;4 #&zio,O=`h:wL#šwͲ] W/ws?C{ꇠ]D=1w:lӶ^JF:bB )BI(K[!H,[XY"A\QREu #yh(g!!J`.UB@p*^a4ZwkNs} S-DLWlb=3@Otv|0j[ h3`-YoSw>|Cc;c'BA/mI'#%(E{ \n] &@J9f%s aHNՆy`o;wp9ES O|տmNjxS=MQ&S<5@O%Hd{O<D_~` L@%ħ0#-W^4o SG;  G>E7TP g~݊z0SƢn}*.>L" y+7S+l˄L5ÛFF/~0aX< [ʘiU}o}x/h;S n4#9Cu;4۰qD孎9}a[~ҷQ7'Yy߶i"1{ %SCPu[,&!pIr4m|Jl"زJDZmo"F}E☩(Ѭ?i4nω>/qPMՒPCۿ:LނÎ%^OJa}DO2WH\ϐ xбb{889Ðdɛ $/j$/te@ƕm@ o{2D;=ŮL,J_loV, #97U A@!@`^NSI=P,Y+]= 0RB&Q(ņfF5P? 6Ύ_%S&UbnĺE QA`Rn}r/ltPVǣIO  u& |ҳcMA!>wvÓƖ !CJW<. vhg$i.(rN {gwϿ/΅pbG`J$Q*/@uذ<:mKE78! <}7 |9yZ*ٸ*s1d XIȲ*̔KkA 6!_UYpS p'M oH#r)&{'89 !p1S<kp#?:$&3-NMsbٳ04QWغfg-i A@AD%"swsE\nuOc]mʐ?ߐJ|,KakI$5b El?HE4.%ٽLJW防72}l>օō` 7D&0ǥc:o66v4.4$F0 WV9FLNQ0Em:_މ`pþM>2*ىX3gx;p4$g5;4^xnq$qbgիƒ-X BJ'n Gw- (Bs. `hlgC 6΅XK4h NC Qd:\* utQO Ec{"S]l rOKa= db)ܔ 0&ۋā dgbψYƔ2Q\M%߹5ڭ;bڭJD2,U5ܦ‘YXG`QUʗ]H"\ëbPRmhρC(ټW"ʚbsaﱦ^TkKVM3@y={h(QyEBтsB> d =De5߷,JR_1MhL sXE|H'XΪx"tQuQXA\zRu: 1ՂC5/ǃanfJW#٦"T1 y<#fI;x)]/-:%LRpmWT$ j?} n'+1> x9eL$]؝^dH2}F]@ 42^4 Xp߃}Jp/0" ?^ŗLج4.1/0`Vv0H:>%IBm NeK{YS3D⨑T^7Lt1~fS`0֕T~w\ ƀNanB{ 1zmoJmt:^#ONQQDРSJPL ٤j6i3!n" rܪJT##4!V#Wِ$YО~cx[Na0$"Uٔ^1X3.d,j%ຉ([jA&u+2xhED<"͹HHDdCTFB>v^8v y`VIӧ).YWߦvY 4O b"\I1I7p]L 2f srܑ@)l$i _ m \4c,RIe{eiLJI`0+[јV%L-7}YCCDI̬⩫ FTߠ蓈QۖksHk/KoLd_!*`DQ6ɗ.Ü$ *斺Z/kg$lo84Jk3b4; Jg@G@A]KoxDZs$q04_wd.\W/1_n} 嫄ɓ88Q531Yq2Z 4hx9T󅥄{ 81N7iԫX`"s!Mtt*B Y yU*ՓUbs,iV-EE VFפ3u?}^g=u߰v\#aF~'f\}Qǧ8XXk? >`xB@.R UKF"0yy@}Hlh""&ݹx.lƵB%ѯ'l*aE gLٻpaHkS!;њ A@ \Rݤg_bzu(Fa1k$ےSZ!x=\D=`xyr;Ñu0 ?GMEDx:dW w_VyJZ5: wo".\߂u\$hۅ`ycgd(BJKJ$~_}Ɵזv݇@M%UѼv~qA}1f(׮/".B.QsOsM]' HdL 'f*cYA; A@ B C*ZEL+`Gwa뛝-Jlx@e#O(&643w``1rX-@ H ֑tE|3R)!iTq3#.g4@pE3 gB(X.A y[d_}γ0BN7a^&9 >5Jzc}]_BDT29~,kӒ+qk A@̄DtA2Q&!㮜B)*Ps?86E[A``F0O5epf.2p3O+9NS^W 7&eH'D@%D\ vL.EsV|91 a71?Q jV^cʓWkfq~ |)h˜q?^)k@ Un.%Upr󄅥5uQ;_ )?'%'6҉&(Apb;'!H 82HBTd8BKC" gM9چv>CR`8A9)H>I Դ*+FPLt@\ 8m|QFO6QՒ @@L;z!3!@lDp"IʎHhʫ(DVϣF>tg#^Gr܊8>d+9(ۦkOH+82m=8n 1*c˔Ax1=ċ& ]NtmnhYViȥb@&L9m1–?#0'W*HѥS}2|ͦs$Sz;Iȩh@ƔNKk2'~A%ywO# iOZ.??'S X D9ɍHTI#A<%;a`6maGgD٠ ]!n: ;oZW)kV؇Sq HYcsHX\)|_ #!jӁ[(xnDLZ/|3";|ؾ!;Ku'EWWz R[0/vvȘD8e'sdGxKwxzZPe^Xˑ#H<}q:g d֌5yQq3F5>X/D5(DH"P!҉  4AhJJ5K3kP! wzuíX;LFG@X&`IUQa;U %zfNBЩ0wF~_Gﺅ;fF! kUV&EwwwǮ]fi4@O?II7AH<catB@X֡~=*Pd $F?v/AxE}`]G,(lnDR!q(\ eG/Zڕ65QY =(!c)Ip3۵EG=I4ȉ{ (t UY㡽/U;f-c5 0n'>*o_43/ 5JeÏ==B Tj bPZ3r|r,7@LCLajGROq̼ /S#&%,RPս<> ȾB0m9iJH1j,I؀Ͽ\g<.h'07ţR'УH1y7 $0fH,(\(N.HN<~j\>O슍A-$l؁ϺuAs|ڜ! ߵԹֿk ^{v_\fBԡ: HJNJ1o,}I7![3.T)eT+)&c*O7 vzl}?vnn?+ Ղ؞Aݤ%@(냄(j ՠRp!~"(* Y M᭡(s RpS8ؿe -jH PT:ـD"Rsc#p1 4U~NK~; #^1kDGl\#vhH#ʼc#pN8y-tqxm~moc̝(6)~[qE¤W t(P^x="A΅Pp}e/6@ljYx0RY?#06#.I+reۍԘ3磠-<^X8&xhպ .!Q ,-| sfBK3;GǸKR|DA:I1ȩN*AR lv((9Qa5IZ2aUhP߰AK;b(X6iʳ\e2ʝ @gqTm=mQ=;HԐ)t*1x=P)"Cdהz^Dud*0tD}_:|+'o kٲue䑧<>T>5D ~>ɹ4hg!w;w=sK){I^lDqxEOĝlAڕdExl<4Ѝ za\zN]:7Nݍs#:(a1,¸BĊI%6..Ր%/7M)g4"e&G05TuS" 4 48wS؋O&'dŀNzc*G8;REDwIP|REdL,J9wAŠ ?-gH?O>HcaV(upS{dvA@eZ%lZ^h= ީ B@:'cI2H1[ CR'€!o.&6FA Wԭ'C 2 )T)ڮCqvꆨԷ:|+xFѴ; ;\:e@Yͻ-9Ecѣ 9ƈI|=m9;H`^%#e&TW(ɱ5#^z ,A׋ƊD(F0M9 (U[P42pL7n;ˈK(mBy= Qn$Pǒ>gwD]]V$%:wu3.+;X?BPҬV {gPy^F$.IW@ֽ#3''h۹ Mpe.PBPDCS"2qA3l<YM)ČD5ȘA]]`f?QX>g8Ѝ`+W`@K{g 7uyg DG(u.6*!{!|<=GG~W]ҹT6' o{w=DDDˋAFu@!dp\xHlWML%ވK[ւ1=M`lpZ+~ɛB#:P*7$TTغXK'60D?11[ݔ_г|A܋!(, w.Hx$@gg|X ?ué\w7KNpa$g>]Yik;GgPE*JT`=$L3lj,`mS$ U;SLT6:3;r9j} պ]h-118s!W6ﮥpp$x ȴw |z Cph$~?(1 z$"JA``0~Ryaa G0gnjC"ˡJ r&DeL [K!F򿋣QjE?SB>@ Uݜ2iD ٲq-#A?I@H889a@DV$qⷀ R$ӛ\6ow2: tgV/CwB(;jf$JLșpθ6.(,67D3QMΤ999!::NB}ϟWK9IYeZS111 /$2B!@ DD!m[CA'w?%V/] oz(;а8a:UC .xeI%EŲ yv Uˠ1l!.kqr|"]ŊmGQW}u8 N >׻8:_p+fMxpV/!wT9Cڠ6TI3's1*PW $ kɸ E2R??m?i˦Yy ̞2!Iv_w ( # A\Qa KTa$_#ލ݄ JDN)!D`EZr$_`m)I.Kjm;%g\5V1MNEGeħy#9)%v! "еPU:vbL9 x(jWĭf%G)_01}t)!buZd -%PBKi=Ϸhn^61n1Z9'LRb [C Jo:1@ۺWk¼x;+v3jX]пuEf(uąc8|LеP/[.bhYOROX8k?ɀ[ 菋;bՎsؕG"}KrQ`)WΙu{JT~gƞknͺ@{@|(&&E+3Ɯ>3@1trg=pZ$0~'v2ft\ik0'LqͷV#,'6,Ģ=0|w+qø/ 1: ſq kEIɔ>ueEj.B$pdJev(P0?])\|(W2Lп{KwSa׼ѸbvZiK!N#fViEmtfWR+;T*酟uBgU+bڅb S PVV36i:3V%Zue1q7& 70)8Ξx/҉S2mnJ^$ 錓pEg ]> Ա ‚6ۏK+x!KKFp$ڷиACleS`aX8←6{㏐:'OѧNGŐ"@\5{ oY3-{Y E?]4?;ItlCV-(5ͅMS`%jF> l^Ib릍^ϕq/E$˞ mNgǁ=a[Jmn[8F m l^ =aތ^(?,gѴ' > ?}1.Ga.XLhs%_9°`rҰv<8юUO%5C)~3wCT::WEͼ3u$֥݀1,efKWǸzc&ckctǰ_A5owLd=ḮS>2Gl Ɨ] +DʜNGArbn{t34ȅ[o0 ~7 V7{:܀2m^lG83 e`ޒuzWpNe̤ؠ6SAd{pV炤@*!J Tݝ62 R/yd_T@=W g\; 5c ޫU˖!&%SGb\>`?T5q3\Sk73y-#3ҶJƑ!=BMc<i q $W'gxE GArvI`i @QX6w.蕈ƁP#A3a3Ѹ9.n?5"߬r> GEur9|FdiDA#xPu鸥X&؝™'ikve ѽ~ Ȩc붯A^g+Bq!V[gZؖyuFq!~n;" ]:vզ&¶9DC+tT-',C؏|^1s)>,TEctkn'cBؼs%g/ `Նݳ Voچ{#WZOGF1 |5/[SV)Sd!}p ֆHCLC0Arˡ~PއͧzxX`:u.o|HX~H$lA\=TkBH2qDFgqM]_Eer"i r,I#0hJH@KQg/҉ BZ AC@==cyxAVmQ$Fݺ | !.Cc1|uD\sT^ޝ0jɔQ372HEx|["Yg鋣Wt!, 'v4j! uRl\'n(UmƱ⦌PY@5FnuEH`nfD" Np`"2]6'"P] #p.Hu7adOFAbhMڒL]\ݿwbMu X|Z4>^:F4|jZ\}lT6"BGIۮeRl}k)'s[V=em\C–Fσ:جߌuk#. rF?r֮p}n:Pkػ.]Z~$wMu㳑? Lۚ%j+fgܱ#͑ /Bþq۵ i p9|!8}"W֭3 L;غ=ܳĖI@%gkb.z],# GFXAP)]6& 6T^/ā%q++E!Q֧݇2&D}RE:a(D-A`6 Ü_WzHԯ?C_$XɫJޔb!8.1K6>AՔ%)KBt ,r\3븛J#zKsǏ(^ġڶzOsp(3}n㄃%6lLnؽ LZǡk863C}j[ۤ{,LT ;7aѽJ?MShT~ڛQEвAi&~ @D?.: ;LYd$#nK囶B/~GOԯ Ǚ`@ESd^3&j#~54:ȃnILEݔWM'+m/sע{>t"ݺ$9J$U.S )%W>*ArR 6mGy w+6͸53 / Rvz3^VEFWRDE+v 🃀^{>ү,yU|&%EB@TTz}24i^bϥ̞8붧S"@zA+Y{Bç~uv=R|?s%*}:wf.~YaN8LcY>0VƈF]/AzQzu>1gs#\AtTѕ1Y2vNipGUq]D] ï.Dx 8ls*bj(KLz%UG8z?j)#бM݌a<]z!>9 ujűu'ao]4#HbܧDd iraQwNBmnNRCF-o+T*Ǹ  @XLDozodkŔv xو| mTEF̕IxSS}1xz|0($jw_GCPfq6}QTW^6ڸ2W'pZ&CGȑ7LH"S>B݊%Pr>o숕ʣOi}ڲE5")\&)}R/90Wa#"*үJZ ux.|nwܠvA@>AAA1H(EDč7o_TSBY.j\zkn(+tyZMѬjIطm`$U)F"`v9C3tq[!>1Xx%A9( ׮ę+adf-ڢLA_ . +AQ6;.ڍ\7Q25%*SE z)v}CzTh%;!Fth xj1e+lE&dv/KP^:wBLb *bbjSi+]!U,yp<:%СA$Faժz'.MGA|Xَ]rŭƯhVYR->|LA ȇ>ׯ1.4[ ǁQ\)#CxƥhIcxk!IB6 |$zZ dj/Jϟ8?~ժUPeu< O\x (]KJkվFA@ ?0ao7np+W4(߷o߁L5Bu! A@7 c3k͛덶_lLܹsǸڜ5Cܹ Ts;߿3i;j A@wBXXX@N<'Oߑi AM@ҥ@a$x3pZ A@ :ēr1!ܹs_6 p\%)o")CDCZ4h r&>=N"΅qݳZB8xx1_oe5C!9Z5S@E7/a05%#NRIvN4F蝫8q/FHX\sZ-OJB QO wsdyQQS͑G"> =ls˒nL)s6b.՜<$3P# 3À'ut}>":^> f,ة9rd̀D6]1H0-i C@x~3wM;x X,4BM@QkC p(WLXY(c2O)> knX_,E6Of\[i% UL] v֌ %nCx!Z}23 ײ"V o/S{S?AߦV2V #B`Q鑐0oV _TH@>?1 ,+c~隶 o9w!,wjU%?|K@tU`ŏCOa w wv2OyVU#J> `<:%+/_Fi/]猆 , ;}pհ {\ִ B̋Qx 433CΜpS܃JޛL!&A@AHoD\2ZQ](kŐo7zquqp{g].`1w)ZJ)(6<>>eoy3/ x}Q'C ;υ Xl[8'W o^Oi7 )j19mj摠/PDb1 Man+w?" [wCQcm Y^j :6kwUD#qv"XR@z}]:6EdIUK B*~ L߄_OɮMذm"vg݇c^#(E8{5-=4iUӻ(0+ŊSKM |{)̇GGLoߢtKY;9Km^W# &4C^`HJ+#a4ܹwoߖ^n"*6ŐG˷Yv!DZ|4ω3@խ1myz]|}1o 3݇AjE2gylt#)3Vc̢JXiv ͂v:m;#ɱ$umV",ŰlAvq@&/f߁^E%N"O`daTF!dp4n&P &..ۇ(bm1~! %"@fM»lL&-aHF MsZ؛ [vÍQ߿4Q߽iԘf3I}@nYk &F4ӬMCu}pbԇ ecZNg:vJ UF8sWֆxu-m .S~݉oFAuP$Ifo?VCm|ǦfEҲ]B}ѦNUUc3ܿ'=ëHN\a9q=6m:Cϣei셓fg+pKkkBk L 0wgL" ZcqOT}l y,xud Մ>,ГÂv~Y"ͨ w2>Бe+q~cןsp:$őSWA_IRt^%\1Ҙ:e&OI ѹqul^Aaq'!ZCgZ='[M^lNl]W4S2S/O;LgYil'>p ŀ6uR?ii*9weC=2s'\0o{O67j?ڶbR6(b}u[٘,@RO0&7ܹU&B254 `.4-VE&m X?w`0<GF֚fedKM*OQez2GJLLJ ^Α_|UEs`iaF.< }ZF!ۗ@kI%G/$UL :w͋[o ] STRˠ10g7'rKtdZ#Cϟ&z'xhSb0GgOpYaNE6.3in~EoMpEZeHIyeYf[WO% +m拧©\]xj)-5j@Eڶj-MՑ#׿g 2&ukz `L"`.Y">nY<ށ'#2P=u &rף.\ #^ۗ!)L]ڍʙ (LIT+ CC{3'.x~g>r/i[,Y݃k [ƾQ?#2pYRn5(K3Z5g2歄Y /\iuvXCh P~cJTy}.`ܢp"m88De:]*Ƣ Q>_h?0&}2rMn 07gCCC3aNu1B޽G&*TyCM|s&6ʖ {!9>R7Ls\87s6n ¹f(76^sab6x ijR㞠Uӎh7l6uޝޝ8qZ\xQ-TE Dy:].j8i2C/MNNd`L 0u*?g'cBI+PNqm еy=1Nm4+ް|~G@."_ Wabi RGNL 0&+`!`L v8G&@QL''h{{%`D?@ٶ[倽aE @!rk%%%)_Ґx,L 0OFO+bL ͭ^dwr¥`L&^>ǽgL 0&`DW>bL ?&>!Wz)y'wFpʝ=_P'Qpr/K'@3Et|?zUn=BAoj'՝U>`FWލbL |4kt/Jȷ߾r2(^ K.mG8yYW. ׾,ˬo8Ɛvl)O)oK-#&#\BHm|o.W`LXx+"t(xU3p>sCqe( a/tfԔ/-[ 2Nf::st'oνV toZ;#%Bq"S[f ch#`хw``|Fnj cFsF K+`X ׏|oɸy(bC_ibjT#\0e b ֬lt(E&k*4ɟ x5 |)L6yo&1s@I5>b%g S Z]T=p+.oJO>d<ݿx`V[ #ruEɒ%S ݎœuN 6_8q7fF4gCJ`lN$%@ٕPhoDƍP< sSS&Gv >܌Lݛ[ @؇ 走+VG(WVdr%}7g6J׮] [afJO2]LFykм(; 3q,bF&q@L2bmZ4|f[!$kbULDX%7dz3 W3&oWJáZSh ߴeJ:p I_vkEA \vF? BnZPl4-ݍ'Q7p){V`Tz% ,h/&#E)h졊 EZ5\virne0wtĪ  %_UR\ֹAj"1ʕQݐ^8~ h2`ՒIשFNEKHqQ)W{g`_LOկo7ݛfi1 TJVD@KH^þ' 'sл8v?;\. <L{bsrZՈ{rZvF>k\uБ`gխ\ze@j܆*԰@OesG.͘dнTrx: `Y`E +*| |fd29Lg񯬻2֤جE`ȿ.dPh@L.[ck2 (\رn 24iPE9r9PRRt]б.IU,X on1΍M*`k5A@pƎhؼE6 ƅc}@aMkQT @Xc?/? !6IP![wA@Y>jN҆g E.}4^ VM⹑1h XPWɊ(հ#xrpD|yi2 9[kk7K4_G{ibw%Dmǝ˗^06Gf&\wG򮰲J: 1&XEM[`G@---P(ƏK3&] XXX]o dsdcB@4ʗ/n $Ϗ_󻌝0N߽"W 0'L -m~fw 0͛$_NL 0!P2L 022k)ܹsφ=.1&)ܿ۷HI '&'ȇrL Wll)+m$J qF0le`A@4q&IAlllS{L JW&''^7ŋ"wz>AsǙL@t*9 # +W~`Lc"1,ȆD@'YYn߾ q߀{@TRCdlu`A5 N]C{rI|dŇL |0V>dٗ($ Cf¤d5@Xfg1 bpc`)۬d0#&>+ǏK3lM M!s ^7; " {~ dOO *qE@|f111Ejg.0&xɇv055nqbf>qbE@v $%% DEH6%b^`L +bl7\ B`'X9&O%?P,[I}`_q_4Ws(Syt+_wG}!*S4 >Lė胜`L 0&M\rMRrɔV> <@eɮО7L 0&7IA$.ƒ+s;7pm VD= Jisi+ oט`L 0OO@MWH//wc[bEc ry& Oho\ PT~o666RPl0\`L 0:P@~EHL [g BCPhaꏌYgG;.I@| 7n܄o ): 0&w Ay撜͛gErL% nfauFAŊ?q^3v\f4\*iug&-*Z9e5MLJcSR.58ѵsg.cQ-Q*NFCuHEo&oI@IDAT0& OC8AbEqI&-h3hll"ֿi$CV}5"k)/?H44AR)_.4V|M)MD**KDxŪhE9AdP< HWT׌Fؽy 6<;Cn4ƬH\?3&\&&&;p1րl~e?|0:.F+ 4YNEa"ʁ\&y2S1 ]wcmS+)a!M HjC:GIj\$6o܆O[bŘsB͋8u)R\n&H/Y{T*d5kpN tߋa>Xq2 K@]1l ¢iBig%`u(LQޕ7qD0Ìą`t`1h`lG MuJuƖeS`Ͱb-ۉ^uˑfE@ 0&7S%V>I 2 J.kIАw! Id2#hOJ5ͳ(ɄFDϧ3dJZA Րwb@ϴرk DYϟѯuE̙0O‘+]i3*x =ʨ6s-fMw6SWnڍ5ϡ{-;gנȹXf3Mgٸqz/n@8t-fy+Zb|hKL 0&7S%V>I d/ v(!!?Q zINiW5P5ODBv2Oo~V RiKdcn.nzn[ ]cT Z|W ec~,}?`Hސ%5Ó7u?犀VB^x34zp/@Oa$Ts]qӜDF@(ÂT`L 0HmgVއeٚv){*&TmU-PV#%cn5`iiD+b4M(MZiӮnNB`d';s H'GBcvÞ˰m `#?-YAA 36]W%B w5\GLd"MC_Gp89œV;ÒBRAGWѰImiɭiZ4@`L?")gESP:@6 ʯo{8]r{%MC}_|()䁇m%21&YxQ ySڡ:$j y1@Knl} d}rVۊ`9n5r{quq$lzԆK^Zi裱{vƥ{Og=} _z I4)[.:¥{pj_ :T)`L xoty@@,#RzEQmewQ|W`bL~[' w@*/N4SVGZ-0EX.^BcB)]H|ZFރ*',zKX:͌OiZZ Io*^S4h\ WUkNVFP* '&`_ V; 4H9ׯ#<<\!@OB{y4#w"66VVVI ,(1L[UxP׿=*dqߐLL,Pޫ*BU"$"_(f3z%, 劀*{c_D"""fffpttM遲D3{H@2%PL$&)ڵk/_>Iwrrӂ=|PjժIH VKB!4%=^T&f0%zL#&Y=YT,,aFfLF$C=LܜlI,L|#1{,\q'FIߎU/w7s|`L [^t %Ks񆵵~~olqKL_Wx][tiDGGKǏ!"^2!ɓMm:VzT$UU/\N`S3CDJ 93&ނ1\kk+)*)ڰhnXXYKʈVJI 4'7wd~ݺ\N('&`ٝwf"6 3e|o]=H/>tDWVʬm(ިTs姘ю#AO)efb5UZ8ʇd"eP2eC&`ْ˿IV>e1QO[*|059-)%:BMr% )FޒL- qbL 0&5`Ek0&`J0ߜ7L! >-g;`L"&/B`EE |D/?&&&(w 0&WL@'>EbESP:L@{3&`47iO{4?=Z 0&`L 0OJO+cL 0&`V>ĽdL 0&`+'W`L 0&>|{`L 0&>)V>)N 0&`L |}qLЩqJ󢐋 u&DNXXAobE ՙ߆8 sx|yg (Zl͔/TN|k>rTzxG`m ʲԄgN.W>Λ<:ݺdJ.7BA/FУ$W(\rڿT?2&;| 0ϗ> ?wF=~BKA NUwڣwCjB ~V#0wtWK_wע\u>U.k|0Tڵ{e8ĩ_V=< k;*xWO ܈HB zhղ%Zh:>([S{~} "aL@iV[(>>% w֘8k5^Uf N#QzOq 'w^dmF*nN(("dG᷏?gΞżq=b(,{nˌڣSqZz(F)a',Kq>u5*0|%*@x V/3/@a}u{N}8Tv#=~0NBcQ˷:VAMEiz.5s Ej>^f-A iS"YҌnc06%͌L=(\aa a}$"&N6ŎnRU'`ٌ15/C'x{WA}a  ٛl[6|QJU>E=>#;иA=o>ȤX:e(~AWѼY3q%cH_Q|T0-gXw+\:tE!c3G'BF`c[ \,dzϓ6KwBan⻽S\9sɐ;:{C 6tZ-`o %&%p*F9gt;8%RnŦ!3%бuC_;;X\/ab6s,ZwH5J1qV7ut&0d8{4<7+*l]en8l"hOJ=¯bb]5ͫ0`>o.]Aw,Ξ=}%x7ꀼ6v &F7 a7J@n6m;`吵xwdlp21LM͠VJ_b/YNPzIb2mw;@&aN ѱf!3b\REf`t^e+P t=,I@[4xz7@Jx .=c#G4W'?J[G2T|[Il{N|nkZH #];U<MI"]Ү ֭#Šp<'%@LʌꉎdSk$}/]h["&ʦ6"z V/n܂~pg (L32_d o{uL_  {rC'rC±)u'D$r)4k V ZaJ#GX#p\HΟ-1XuAZF~gL 0w \0/M hV4o] T^\sAMbRZDDfVPۣ_P2$ $D}u)HHQU$ NYzNжF ,;(lÖ(lN/*&)u:L|4 au_+ɭ""r4[,#f]g0k\I5-5#fvolA 4!lm@mMfcϣ e>ih#z4̟C`k@2#{llr)ἳ3qw}P `L  _a_.-۴ƵǼ ۲#%)Y@-mrÿ|^,57B oFˆc WbW.իۧvb2mtT 2WĒ2פ`Ixln_UeеN\7 KRsNF /y8q OórETItC)AN%B:G˹ó+ ؂oڵK?i8.o +ӽ"Mڄ㧣z3-KHU*RTn$ VpPPe4Rr t==J'MEj)U䁆]_}()HUg22)RDBi `ԙ9Jȣw~-+!59ɤ&ܫb?Bڭ u-ȧW3Eʖh[U@ExlZ*^'*HB: hۺW/L+ʺyѵ84,Us3&ޗf2޷4gL'PDDD 077';~ ܽ{e||GttlP7m4 v)pN<{|lKFs@T單߽| G_FPU2etrww,'HWEWg!*I?(%yV+<{|{Gl_iSHz<*C޸'zņQ~{(bHSUm!@rL~>JC=ypr=\ǖWQԷ0;v%*VC6x 3[G֪ίnNxzPʑG_Gac@<]?v:Ԭ Y2?BN*YMkߙ`_<ݻ%rtqzX[[Y >MfHE$ +YsL l4l*NĂqqUs,Ò*`_O\KaL |08$hsc_X `\ 0&V0&- X*c.s֪lySL 0&Xxod\ 0١[PNL 0& <L `L 0&xW"8BIrRU) 8,wC+]_q:U4QY<&[ڡLٲT~Lxx 2)V(=>eٳ\Qrg>UdX\ VK, eK{=3]%ʖ4}/_J"oWƝ+=>'F#I"ʕ+o -`L!s/y$LDG:1C;K)h^KQuм̛lM2"SiI8t6f023{kmIQ\{`| ET'U-֑u=Ӳ 8i+~ǩʺ޷p| Wp1Gē{PŊMUmdQϣO(,aomE&7QtnWM H\C@u\<,ۡ#IBȠ ˞ߟ8s%wФqc E<[a($6Q(`bjey nQmJ42ˎI&@6&]60w 0O@ꊦ5aް2a$g5qo@d\ jã'Ki[7}еwi-aGa> n5wxxt 0, )MS'`p&=s'i*m]xx f A^?1xkƳ$ƿah޹'V΋Y߸E!7zEXn ܸYZ)`7Zch{4) r<{z¬'VyIE0` 8s4*1o;L\|ƭ5+мI'\ XBH ƕ+iP3~c1ed_2¸yU%hבּ~|RC\4V>1}I4j]Z;g)`lS! [χL5 QaÖҌa)@04qէqhݸ ގFtQmc,ЭW/L#c^| 0/@6&@pĉŋG Ϟ="##WLLpyر4İkBn'+!Px1H"BC ΑOY8*YPB3BsSay ŅKO7!X {¥Ϯ-8Ui⅖>~]$l.hS}0~qACmj´|UZC=(XX =yè>¾3G JۡcBy]F 'aSB|Fxr0w!5U- n-T]_`\F}az Tk+6P9Rcֻ{G|J[=3 - , u{¤_ eJrrPνhIA,Z%j4phx^8, k!,Cm 3⮊juz^[T*. e[ !OM- IϻQ% 1тJ6´޾=~A4,?RFУie\A+;bX_[/ *x0VA(9~e0I2B~cBIЄkۄrc>%=T.'+F|#:Wn< w*El.?,(])ϫQ 5$Xb_MiM;`L?%k^ƍBTT$T*&fgh4J"_~ǣbo$ P64Nk|H۷aqD=NB>dBʂjO/E CV, 2Maln-8ȊD[p9 jbX(Uz %(MLqan\&KJș?c#FiPQÚ(Z`LHZmW k{gRDe[9u[Ljg1r#*9 4z :p1̚ w3(Y˦b2Ɍ HGQ:{Rak:%q c7ӳ3i0}T7=F>8%$Щgw?QЬ->] ><,` jd?RѤwh|&:6CB(]ߍ osLƬQO諒 n CN4޽P=߻mA; צP}C?:#wA 0t'U!SEr>MaÃWVn&B/F0Б0[-"-LGA( [5L݃z2 I:c8bIEYC˜6VfڔIg aj*G|RlZ{DLm4 &t$E,m4vơdN &>5"t(蔵0*62XE>cGGfL2 @\d(NrʃKc䳇8.G"q6 rF&9Cѧys'[7Iq\, z#LNbl\FT1{Ĩh@ s=(Y4j Qtj? KF|Sp+MqQ,hW č j<`P,u-WܺQB=TkW3F48XSQBF-ҴO{1H} FN eΒX~?HcYh$^M¤B8h8)l1a`KēX;`^EPQ H5)&4]cʺFS7fl&&F+hcC@ADH0pPԘ'\?z=tι{x?h;;xLUWgkZяG wyBOW 2X_BWSط:19c}"@@_Q$#D*BiMa=v3 Z C~QHr5kn ܵmڄ {.Ȍ7i?}lXy"_,Wι]UJԶ<,Nf̮DDL,bc14e<Ux<89'QnCd;?]9'7> ȝxkbW/u`oo;?XÉSx{UJ%[aj#r i | ab`̕ڒLwb0n{cV\ pEi*vpJ>#WPTݭv([m6($>2>v9!OvE^{w%e^F!)#̞3?g;u 7Ps+0 5VaӦM8QX;tKʦ&._Dڎ5hq`QucW$="`Γ_'Wya}t"* P4NV.-v~aAÌsb}f/ X ,91ӷ3 +>6@$*D='?up"~%2}mǘ=;T;caaޖhQ_`=oL1`4;[GTvYZCE=7fcn&Cgb9Je{+S^ar31̏b23{)gmCCk6ebf}<aϙ]oOd 33Ƽ\lKK:Yg`Nf O6&ֆ$f-J4?8eEƞ$&.HiK̀}DV\Nż]13,$poMz9ƃ~|w1Z3;l< dF/F̤ 3`gV3ҷ3O'+u( ͬeūY'y|h6EuKX/NW5lG1+C3Fd6LS 31c?޽Z /z$mĎ x,œyL`?83fߋC뱝@[CB!,bS–3 cC]\34V};1l;gnjrvYOw"iؼŠa17vi;ټLP00߁5afEf"${HF빭C@kk+.]j( DtQႂBw /kup0% ~v빜u*?)F FY}rל&s?xؘHQ{>g*Zbն Y 0T#2~<\׭.9h;VΈ gFk=' [1 ѰTHpIhTYgc>Exsܱ#P3C@}CjFyr43+ =c!\\SzI3ѢY _ĆQk25qN< 9bBG>{ 2A9 (O':i f38ݮXmeHUvfIV)݆r/(.O}!|U e9؟u *b#aA6Vc>^CFr1=)G- Gfػya|LXXg:6Ý4JKBJ(7~<?MlA(,k;Fzڀ  JS!Dqvg8::.XXX7j*گ1zJ>Mh1"p ;H4nJ 0<c#D?e\xǷ@A{멓 "@w5}$:9LD"/]C_"9 D"@ 2nN"@ D}%@Aڳ$md+J1Wƪ2ʿssrG NS>^dƽf?C9gP֎. ' Ʒ_/&+NLNxg'=F_2-_SBC|ũÇ0c89^X6:[k3%ui3nt) DN'pbj OlTLyr ,? xs1%<= S&unpuP,~~xu9ϰxӷ_;7}_.x^*usIǼya9CHD ~vN[N5^? 7`#N\WfoE@}zG7bʤ(lKr1)a ԶxNx`H9]d޿<|܀]j#h$pkTz"} ֽy݂R7TirMX:D=&>exx㊾ t6JLȚ2WIm'mxc$4~^^=5j52 9v|ȽQP6"co"w֋p; IS)p#j4\|{&nG ,~Ȍ|D8ˬ0Vd0 K ]&lQ ds[Ԝ„45wp3ڣP@Jaf#`|hn, XViLL uTa&y`=ذ0p7IQ"qw w;SL FyD ްa, 5>Hٲ C=OlǍcD"@^u' C~1L_bаrV3|;wrܥC"1@[S5NXS/|,W]8 kyq(h5Ztp٤smՕ5=C4mDBTb|tɵ钠X!04J߿J@J829q- ~:qMEx{ca$7AIXגr^905ԘskLTav 榆vBG9̚Ï07PQ EH b27:qcf D'2_I*"pSFO#/vшuu0jn_Uc˕8rɄWc7֤moF 7*<͋(ȗ;ZР;\M5ضerWԮO˼p_z }bÿ`oy1t;^@wwmC-LLa+:!Tzr]0@t"wQi&##*<+^AZHy%EŰN߿@@ &|>T'gpQ턁;Vmgq#'(YoLwRw~NYT7=o?;M3@uMX_}NޗBGKa =Q@x* G)nVkxBo_"@nWn9Df̜Sɛ._%(ᾇ`=k}b&bq"iuwWY"@1njEddwm;6%`"o-EM@xj-|7¢6Up`8\y _IDATϱ+y4#sQkUr#ٻveuLǚo/X4\S)P"Hѥm=<?w0,>0&2y9`\F\W_[vTuQRJWhmGmm .#e̜(b)ΪWF`98.\(*:\)oLEE;X&V`%B5_܀1\">_21(Du؇~f|OGiJL ѷ?S*e^%m"@_  w0v9rssYII a⧡ai$4l#b,lMϽ/c PMbLυbVV؀=;uo0Gs6ԗ>;wR6 43fN^ǝI 32Mϓl[kKOcsB\F'Uiؼ ÙX+pq`fl4Ғ pXKҊć$}O&Xqm xF!3>ɯ^89MqSlV33g!AƚNUj~+빞6La:1+sfnbyvg3CM ޘw‚}YvM uaf,(8 qqaNVLf>|3 (g~g7?*l1%NZYvI#v4gO` K\LLjq,B3R7f3BF1W~ʌYԛD;ٙ3V :::ZOgg'>kT*IǬ!IhmmťKP]] ./& **.=DNfa0Eԩ3 T D$ x^nM! IE D"@~LP!D"@ D/^S } J D"@vd=H'D"@ A ۀF"@ DFBQ D"@99:K D"@ePD"@ D7HwHؖ Ũ"@ D"ddBFᴴ4x{{fI:"@ D" jݻD(F^ "@ D"wNr%$$АfI4"@ D>L@܁# )) 0n8d2qVbA"@ D/܁cArr2 B3$ D"@@_$ AfoQ`iiI@_|9Hf"@ D% ܃#!66;Fߗ$#D"@[t1))) @\\\LJb܀/4@2B D"@I@ ܁RSSő[`oo_!+((XөD"@ }h-Q (q_0riȚ5k 4hFBB"+"@ D5BRJ'@ ޳!3WW^xiKKQfff⮑J D"@8/eA>/·0QT*?{,gEIENDB`docker-1.10.3/docs/installation/images/newsite_view.png000066400000000000000000000516331267010174400231710ustar00rootroot00000000000000PNG  IHDRϤSbIDATxڱj@Y;W+ HAPEîFB _B;c~?p_*gu:n0l6UUz@wy{ I݇aYy;29ɷD6| O݋@kI;^rVhu)+_ $1f< eDS <9V{ǓP98QLe35;R-HeJҶR։J"7)U;SDW;躺⴩};*BGB{Nд $ꖞ `!I%:餫M{]-8}!҆zIUHuwpmʸ.mwwok,l6̣#F?jczgc?уoc0u?_~:)$l\YyJ0 H^+_YY~*)$HkÇϟmeQ]Q(|dd"C4ʊL3jd ;:ձ0.$fC.UV (#FR)P搻,&(Jx'*9ei6 ۆ31bĈ:'🻞ǥnoa6c1b_xĉgϞ%O?Kni$ y;˼ogܬHL.\Un+Ha*j*YY-ɩ΂G)G@@`V*[fU:ZC5D(TحHug --0jqV"{8Ir8J^46Mg߬ի7o޼{ #F;wnE_?u'?y<˼OAgj>̙L'3|h:yD2"`[n 1b̙3O>O^D2/K<˼RCe !˗@VeK\Nڻ4uOƓO %,NKYe>hb7=fB(,\d2"S21&I ʈVծ2\eKiPCmۆ<767ͮ&CQڱԷb ;OP: }UL(4c{-YBW!QacebAM;j å" (qL©>5p8ZfGV-}}=poMPN|sh1s z:mWa D@Q)$S&w72"G%*u0`w]l=׉Fuz$7},rqƘhK9-vlk<6vrl61F#TD"3R#mT@&$y$%,n뺙5kj\G`\_,13}_KʭVO%z >>p+wQ\>ȦR6?eMm^!MT-UɆT8YwaHK X0BHB̵{FWMB>}G_n |1=Nx=}Lø͕\cg*i{TjmkE9Rjp!Qޙrp 4&t+z%xQl[iǥ7@ORlܬT߹S=xHϵ";87}D{ez24%D3k -~i~}b-rSʡy~J>jkoChkn5* 1עJjwLd x@ 0>1TC(O|#iI3I11b&ШIoQ7tHNLFU%T<ׇ_a(L#o$;:g;ֱw谞mŅʍOCADn\b8E@"% 6lI+`+=܎Urpy^8<¿B \a-J`3MAU(5U(blTaH铫([&F╾Z3j!PM3_0c.,~h3-ΏFnNE]5BM[TߞgZ Li~~h3ٹL7߄h+P4c13n$ О%Ώ ?oL1KJ*5M^;(Vho Gj(_Q~Ӓ Ϥ5au a.0ְV.=ӱ5zEL5ʥo$l2tjVs8t/--qZmھޥe@MЛX\yv$v{.7bq#13 2xN5D4FԀqX,`ÇBaNΟ*jiiY]Im8s29_WWksOW_hr`p:*c;a ^~j)JcWpݿU-Vz1ʌǩSEѶ ƍW=rKZt1)aV}K @.O.K8sbvj(_eL~q񯅧9fY;BlݵU`8sezEy^'ŷjhM E-G,3Aen!ͧ?r+thp#y-htݷ5[mQ;Tw/hwWEWWсxFi֒moҭva\` 4Ϗ/kRJ KG_O|k KPk"WxlվVF8~zjC?᧻?ƦƮU| lyjNQO,-/#;SMy⎒áxǢFt<H$l D<3{㱘!ŏF^ŋ_CJ*uDN㹘\Eeǵ VĦ/v}Uz Z|⥼:w*'}+La(iK4x^ê= ҿ;}.=}칳'N~<$}\?]i`m}cΌJ|N*|>^ ݼ>[Y&[-Uj>1B]L'Ј73PM lE`JW.ʮfٹW[^o 6U 7lKi]+.?{nQhasbY˕[MaMb{k]f3(`[ӳSdF{{;E@zĀ`^C}Yz_$TqSaFݺUg7{Ҏ3ʹ|-WA۽3 k5}R|X p*?ڗ"lyLf,ֱjf<\kbsҢ桛Mz+B!R ._]G/{gܻPBmc;x^9iӖ!ai.6hsF9$hFOU|Qj< hiQsV{[kvZfUjKtLiSS2G݂U\ #Dy H)n] v/ DfJ7L,X*=<"Z`QSƪ [S'5,bIũ v1 3sɃp/fٴ@TMe7=dzZT݋s.Nfe^P%YFtXaV-eEk K+` pRpnls VXHsb&[ȕSRc("ig4ADE8тu->rjo~˻a8< o{+{7J֗bٶg3cZ܋Yf4|$K}/]nכLp \dARjąQMvn`f(S%)%{ mnŦ )PcilF:E.sFcG&Dݬl ͩ}lc<ٳg7q3MӼ<8eYx% xxGP?;8{'3x X~5"CTYS@F7"F1K2iWkl]JC:ɮi]eN=x؈ /]pϨ-{5j&fl7 ڃjܤIN.L qp"6h!6~AGpG? rwwՃSi +QU6X.kLQEUP9BXikbGHZbyvv#ӮaHٮ:$Xcw3M7gF6;OƛZ7=8888xE,N]5"=vt4lQͤI+?2"b( 75qfJF D!X`\@dgў2%篟_7_(8̸̤Kwm'*bUK*VPn.`(a5|:3VMe78Vwj (˒ɱcRɮ3G|l|t//۳7a#W+ҽɾ- x ܻaAmz}0(w듻U{Պ]+]I?.E㐫WJ n6lkq){x 7)2{!–]v1[j"L0+΂{SRg~!kg3d7HVjfWiU_V)Mn&\WE nBW:.foAۦrl%zK "rJݮJתP>hg3 ~%ȪHЖLG0Ki-MΖ.[-gQ|p$*Tq!ՍjyηZe(vQ۪G+s+UI3vF;~>wǜk[- y8ha ݲ%"e_.ɒIKL:ʞ -t(R^t i+;sZʾJPy43s`Eb"244}%s~QBp7CCvڃc2)`w.4ו498hbrۺ(-{?zOM\5HQtF3Ukqѷ'R@ܹwB|_^߫ݐWyk,k F8bӪHEx&#`ݬp)9,F7KkKW:mE\FS;sTa ]fJO8nӖ󲚧DvnnoN]&O3ɶЁr# [,{";r 8W&p2ŹꔀlY6 kߕw?wV_ֿ!56q( VE Eb#r=tE@S +g88tDMU\~1L>1i)F$9CT&-ShA`,A:Ue e^/|߾>R?#F(|}zp{ן:7<.B-gj$U"L< TAde(*AbWM!aKLCGGIQސ}wʕQ8蓣 52}俞{}}}[%F1 /| 9.9DBAEiXU8F Ir +CPBUO)uc &t4`kGR3 ͢=Y%|.j}z9o?y@o#Fbp?ߐWsk8IjJ+ eh8J[)iV%Z3XF8N5ale)-BB FD;eW H9u!.Z%H6bĈ+?ݐ777w_Qeٞ^^KuUִ+U9Ք9)Ni  i:)NC:("( 2D"L1ɨ`AZJx! ߪčs9g=ӽ~g+?)R3td[wRu9I̜ܔiwnIOw--?,Xzm'߻{7 ҟȇupOKTIuE& ̢m"%+j>һBX63 ٦0E>O> 8?s#&:fuGk׮nii!H)TPPw<2! np?_&00@ OII!pC*%Jf2spXc2Re<v4aϱQ O^ ;T|g^H}lGyZ?s˗/7noXAƵh|AY49={mj\ Qv VAC؉3$i!{";e9߉͈pbquPڲu˯G* pv8 j$a%xé$ s`, jivQaaRUCG RA8XDLS=887Ο?ysll=zȯ~+v, EKh9=/xG***IR|SVxՙ% _vmÇ,6VVVzaFEE=$ˁc#t^7qW|~U6LQ=QN^H |ow>䍹wJNť9hޣ:M[p᧟~hu;v W($$DidofsTL_eP|T/7ҁsvxZҥKP$ɼ|Rɓz 32ςX#_C+NrgPL~yOJO-6|gӿ'%isȘ/y-BvX3qӚ&ƺuPgmZLzõ0 -\ēCUymUm]&gkuo{魷badwIib;?FLK-[,&&[YiA#Qҡ),M氓&l١fr:~lN;vK/t0 % ]No v:. t• v[6oQ\/,(G ;AAA,"ᅣUM1 ӡ6}'_G8"i&ߞVM|gZrC7KZJ/5|D J5'L$CX{Dҋ>CywBIekR`xP|ƴ;PsH7zTcB_՞ z)tbK>·hN2Yv>Zbuksrr@CaUd%9[[]qEVBqAzq s&ο:R*=z~O xr%;DڼgӮVKLؾ:xm@Џ>sv\6l_ux%ښw?tkai?<<17k{ 땸23gމԤDSCHSJuJdvJ'mr Jh^] X9\ߺ~*gw؝(W.PlY /dܤQ%0UsQI<. I|i OJȋ[1)[m?qDJb:w˱嶦dn~1,`cΘ 5fȋX1LGFEGGh~V wTZ_+G"}o/?R\2QK H3QPNAsBS!`^!9Gm_Dx2roErlÆb}ٳlcN||[|i2>+M ƜycKTӲ\}Ci?È)Lysz_sNuect"j|A?RUĺaAgYe^ssq{Mkv v/\S#?ӆ) [ŵ3x]*mlLmt<'̞5sjH8ҿ6.v4A'c>%)%19119)%9)1%1)1㠙wnߩ2UKu岡(g쀤:aiCeɯ}9}svϟ,s D9 )(K(f y?ݫ>s?%5xOʔ]kdPxL2G+9~vE}~ct7kfQ9./p:fKEMMQJEey' :oMW?@>n <,+"pzbm:5tʹ}g: X kV %mf\"XLϻztBBL>k¢~9U]f N1laٿX_~YA !UTXk=䣖Vrώ80K!=0&!Dm \U TIVu]|uw :d-+kLv1MW9[v0wXu7p㧣c~/mp<{؟| 44x:8xTIxxMm)oG״1:8!ܹsG]o⚟FM$P>l";/lJFuAA Ԙxb3Q9RhB<{]w"^ %)3ܫk_AeUeY1++Aͅ<.bX :ӓld`_6b*,1'``rC$31=>&& z:-ko8@_-YZYֆ.`^>M7hC˸z|OCL'OWG[\?+*]ZFzY,V1P~Rk?4eL5걁{k (" #d^PY7]ƗF= d}*%JDH~o|RPvAG&fWHW'8lVrTɻi60zH@,8q{HO*`0"HcMpW^ukKriN{f=&ƾ#heޞ%k*n/^Z}L7no}?mXi?`iIw Cɜ[U IWS) %\Ja6#p)L\zܽ5 3wδ,\4di<8|9참W\ ^.*e•'xjUDYmb7TA@z'/.>Rc/1߯2Uݿ_]/6T?``N6+i&~:tJpÜқ-fjI)L-nuh!$3ՎbrO?w. go3'G,ZRg*QSOظ |?E5x_22x=n<zqGZ [ B FMT2hh2:(0rrcN m+$3x/. 9BGㅏ5RQw0cRU~Ϊ;!.Q=$csC76@m I!шT<ݜڔڤ%tS)Ӫkr %ͺ5VHE_lܶkfSӴ{OJn Z fB~XiˊT`MHv F7fOO]aIE ^kIk#kX9}4g즤e,w3UpirDVx2L0AwjOgCL0#&&K!$2HWđ~?{*rhO/<_]]SRVV]Yח^Xi [bt'\LOXήHY{I RviH<5ziV' YlǸ}=FC+̙>?Fݵˉ6)nz~ n޾GjUĝSߟkpcm@XP{/j¹VAhݜ˗3VӅs[>Ϟ"e+t/4=nO^&4PhPoefVz:az{)~6m*_!^+yǥQ lk ō"/f]\VCk=d.<36կU7oiͺL4p?_nt?.PkZ8|2y,@Z c;d%Y+䲊f@>:\{#${‘K2i2"78Ye,EmSo$I٩Fs9u2˻_yXl0՘xv)d\(M4 +@EmW+V|5GN4vjLUx*b'>z6ZUv}R#/ӧ=|R}=:߼WWd>r1,dD~n{0K<6.-g y ~S-u}fKŐ^ 5(o](w2'@\cvod~1%92b޺-86jH@nȣkU L}o|R!m2TflK6߼y!S%gZjf.De2RO ںbs22T|4|+:Âi|/ÏJH+i4ws6޼5Gb/u$׎jZ$?V5TffO\]xKy>o:V.w9iOSBs |-t`MN,$$dI^NٳIfd[XZQ"aH!]|U|X#bQl0d2|8OȐY(@Ab"HWTD̍ᜳǷy;!!w_W kׯ36qFS49bfAS9=|R ]symڸ!~ pY^yX;Nq ~>&8dV]Su\oe"kjQA~6Jssv̕+MCիk w]M'$67/#c ._.jj~xd٘=:v]YKKL/Z'|Z:fGm%~;G>z٤ޞIRPW,@AdDKp{˗DWOSӢN-N"Vx:Y/_bV)i\gQ5UWf䅅\@~hw.XfEG#bŧun :u=z̻ǟ8ŵ7 ex5M5S{4D37f^o;q o|Pk6hW3ysw3gsg!OK-d tJDjPJ==ُ_l9o7142+4Z1\dcWu|lCTm,pɂz$n(-?@ _-a3A~uEeTD&45.9v-t۲ى5ko[[u9px:#"&tC r#Zs¥ddd##>v:j/5(zp']|a"5q.ޕ{wK{|x+~Q|{}VPHoZ_;fx<7Sws"XS W*1<|]3;:,p.i7 &H)RW?#^sȮoY?+3aj+/o.O*y o B7 jt_رk>\ / Iܝ՝jioll(_6ę; XY5p> M{<2=ɥ;% pmWҭug[pE> _Fvf_ݰקf óא hs&{wRCv`!OKV+69&c$ xh]|I~A.Jfn,2C[bأ#-ʹ ZUj^R[]S-oy2W$ |B䌊d3%Kx=-ODp?VQNɱC48є IމTO}{exnG&xO]vlEc {TLilFf065G>z̈́-Whƣ3tceM;痄NLohܫnhwQ8Jq+6%Rzkبk@z@~ٹi{L]kgMkLy~0|ȏSN=kUߣ >ٷVjbQ_Z5`M֤҄fbhsΙ{R}pRخBCX8{=w"2;3=R-Lu 3&?RbL OuH,\)<FFCZ 2wgUrxħ&rLr䨆;ͪ+ـ;m>Ηď__nM]fVٓu0 +Vl:naZq;dVWWΞ}?93^]R'tȺjŹ9~ќBp:ߓmٽk}+޹s{~J*l濾y+oT5wmҁ}wl_k^=}m}m_ #W~ `H+@k n~7,0*$>F(Xԇ s_'@xɆ Xb-L2rGqK4ՃuH3[ZZ_lGMԳ6w|zkNM=r=ξv]fw>XZNþ6)y0kf`3LxO:gP z@ 0։PtL>`̘ê9CJAw(R";F`1]i\n|bŊvڷZ<tvhlom/x2;/I(Llp 8D|uduf@*ݝR@-$?TmGvmG{{3.4Bq7pt|twB*4C/Wuf jku.0ALPzKd9'PUct@݉AQjHc>UЇh;ڎho/xj-bt3'ݣF0# [fD~Kۈ% 86 _#Ma$xeP"G7(;kH!vcDco3@7黗? )#U9{ )$ׄ-h4>ZU㑈y9?hH` 92br!D){Xo"w a f``|V40MѪZ R(}Z!G*{n[AgJzuX0HTRݻ@a_o}f %nf,ťDm;k䃮Ԏ|!bb)7M5N $)74ˇ\[ʼnR=m#BެZ"zh ! t5_3"b_t\(4𸒻B^R;~V2S$Zud/گͱ&nRH3^ 0ާBυlq/ͼc;_m۩ָkٚO_#퉐Z+~7Anغk?=*zJܖf6'u*X>;^wKFvmJ*X V=wwbae>?!ཹ|=os̭e&Κ>DؘC K|;x&Vnuzf>pf5NR= [)R+S|a]9{ͺۙĦO9eKjg&)gՋv1"zHoWUe< %?OmVa~zix:)EamMYc{nab=Zsyd<^;~r,f4aj ׫H MJAK?G7{Cu?8>7vm  rNGjf_iK8%س7"~?6o{u ,8j< x@ x@< x< x@<< x@@< x@ x@< x< x@<< x@< x@@< x ǂ α~IENDB`docker-1.10.3/docs/installation/images/ocean_click_api.png000066400000000000000000001104071267010174400235370ustar00rootroot00000000000000PNG  IHDRɤIDATx\[6bvg5Jk۶wcv2(=5 M &9F2̮}ژ3f̸1ѭȵ9Ҙ:]vn7ܵkM$""""b5z_wݵ[طoۤ_M63))NDDDDīXcmNV/kxoޱCܸq=^?v =(x15pzzjb:YnV?cx#*JNN/~Xӭ[6c-RSS[VWF{7KIIivCf:Z==^'k]xLAk_?wt=3=Awwp}ŸYVO:װ{J]Pggg76o֭?>|Æ ׏ADDDDLտO?tWZgٲex{:Z=V_ۊުrV.>v˖-=:˗/fҥ/͟? `w=L1߷{e֬Y/̜9ӧ?~:Z=vjۦܮ*2׈ .45[VW:SNQTkh . 0;kuz;r)ZŕaoM51w{eQ}6-?wx{Vmjqxąw^]Vgķ&֘ݦ6Ehq@ M=QWovݩ'ӻąC{ԇlUڿP Ļ%.(omo2 ĵ@ ._ t [SwF~5v[w 7"[Pm nQ;d ϟKjYY}1.sH_Qw~~]pH'b/vIKMii>}:q;Yr~}{~˖q*k~F`KeǬ[TTT(r ZVtznnFZuNY>1}7w?\^F"úyhge-OړδSmOmnŨU4wjv\r򎞱Vp\_SRm)yP0ndILٹ{ǫ|}꼕W?[n8c*l#ⲛ/2nhzhEkBۊWرR;[Zi1J2u ii&SROKȶ~e< &oڜ)۰^sufse[}m۶um۶m۶۞897[۳&y~/;k zvoԮ=veXsd~,;Eϋ'JNn-/RJ4RS,i)lO!Lw(vJaoL;v Ob#&555pؠVF@@N_.溠@m!eee7p?999jouryͨIusqJLL }XFFiiiڻwCa_|f ٵ ݒ_7Q[[߅k75X@?/"ݓsI_F˭B؜QW30[Nyu۔ {SlfuUG3j ԰aÔ$VRzzZSp< =ѻwo9o 5f8p@H}v L4 Lr!mݺUMFԭ[7[N_~YF^zi8<\WZx'Mo߾;vF*))Im%\ߌ_|iC;zCƤg:H諷$ pow >?j cM;w%%pNV? Z.iR>ɯj։BE)(x^* | K, yeGkDӐ\ơ.V###eAzii)Dy"2!C0INNTUUӺu,ԃo酸v 'J1kFg\9Fߊ )>>^Dtsvvu ccM"*557PK/iŊOXܿ9˜8G<17M#s^"O09aS!.&LN^KY~98zӧOW^:\v-Z |38˖-œbxqh;{Glz)9:p61v}ŗ6oSiE5B0GCJv'u{\0kޕ< [X]۱`Ɨ27ty#goր.]gϞqV?,A EtD4m4}CkÆ #7͛GT2( @0P'ШSN##GЮ6mڤSr3f,@0QC"mĉhΜ9… 5sLLpZ4w\GgpøV\d\$ƍ5k,{=z@/|3& ='b>87z!cs^V/mz.Jม) MBO ]4pM ~E}'ĉڷoc%o!c,^͜veyAx}ѿӧcb(]g"qk Xi+ ~K7J\UVϗ "H'3z 7Qi .`}[p'e ߔ4HKoPFo1u2"&;Ա6Y_˘_.ņ<~{z|ڤjP>w 3e6hO0Voy軝ׇe6b}ιEYπnP=3܇fWc,|^ݚk®(imL95DDc-ͮ*pVo Ř"<\@8ݻUy ҂ m^j=\ 8a.,.=oր8p)!M߀RǎCQ'Ox3^tqՎ;Tz "9|0Dn29#Ÿp[3f`N(˜QSLaN9 <`:`cn28.4SΝ;G?EҥK)# .tN]<]~`3`r^ J扛% t8:w,3ϫW p=a{k7Ѝ?tM10?9c!ׄމcKõ.9}hO19 w& D{{d>] isU>a`T0K WO#X2}+sznQM{b}t~>CGҵ:NoMJ;![qJ +NӾ>a _~NJXD;6Ҏ]%S};s.ZݱʼRn o5wɺ,_awo^K89|N9sF:tжm.ps=Ǻ<~qo]p4b^kMTj4Zz Tgyx|C{͑{>QVo"%\E? I(-@ E.x7c"#mm yE3ʘy8Fg ̿{Ay{xá uljjP԰!A|dC($W}c,} M8-@FF#awDW*qoj{''O1_p!8%6s",c3)׋sï_|N"|[\hrRaI;3dUn4~"ޜ0P/2gjIy-<7'TA ; z*A`-t^ʎF8|۹gC"o0YD?2R_0ϻZZteY [K֕ }ՀZ_wYKl E1I~|cޓ<ݤn>-}~{p3a} 'taa.|ϭ7ƈo̮hl]G2V,](?,̬VbqPYV&Qwdl[b*i!4 AD.motAs^7 3>72OlDIAg'ly ]9^p.sM~R_zqHxk/i'%Yz/{mBcS ǗZtԋz1N7:^quNy+ t0W /3Say<}5}*׉h4Q-Z<&EvNՍ>k0WZ>:t'4~ԑϩ9g`Câ К( kנm)hQ|~0|+k"򬿽Ko*z=3Ba%1yZ||x!D*SkɾKwk7nn';60@ (ɢ nŐXhpЀ;m@fa00 `" X 6Uw9W `.ۥ#CxDv?"sPb69Dѹ'M>ӡRt8PK:98P\2N9f,sM7tw6,N|[*-l]7>el7w?N-N's3L: }7 N ǰ-_tmJ=8W\8F^p8s3w΁zp_|snRD ł:SeQ9\zDSˎ (|R:;g9rH7zXٵF.K[9eu -MqA"2&yߤ!w L ^] I yk{Lw'Ŏ  \_֟rZT|JXd۸dT;ӇY5E,Jyۜ3Nr G?b#DPNI;G0~[I%]NLEJ9)J-фD5XWVOpj ]~=vL>D4y۔wB4@H^gDʜgM;?B 2W472fnn<1?cI!bQp;0ǹ&9 `'K[vUMc膀"5dr_> Gl}NXϯ<6~v,;e2 WS1VW34h&Ky_&9v} [`ϸ4g`barX3ʏv?{n_d >n58ECm} Zj&(Ku |N2$8B,; + 3y$/|enfffffffn3l76]lY,iE+%[vw-vᅫ7?n X/p|A v{?o!ZE ,@Ӎe~G):1'bt|()S!dgE$2WqqB"?+b=|J)|̈d>~_}s5cW<**wW){=`vUj /$&;8jnª J ;[3l\I1c~kE8mI~cN=ؽ!p|!0#+NqDsg\Փ C3Byߢ+\ ] v ݴfi"vL4^|-A.s_s?W_憯nWTp0a|Q_=TЂcȯu.ou aGVsk\8}i6)/)8E Ɓ?nTub[F#6ݷaXFL\axwCH}mk ):.V?t܉ n6a~5Xtw4cOn3V#{̩gǼ+8OQmǍ_.ŸVbZ)hſ1(s1^ 7p"jiރkr.@زN"ll0;G7Ba6AàkȫqBrXv_{ѧbɧwG}vTjf  !Y\į(cx.ўnB[[{1Z\Ѡ8g!ف c;iErÅVH|K| !ī4a܍k6 ԑ\< !$Bwvc%};7 qw/D|?rn}p_'cX<&sxm<džcd<N!$-lC8g#Z1A?7>g|\gwA!kVwR8~zH0=Ʒw NR@^u\>:ϙk^Zs<떀c1 !ķBp5} ;c… pZ{qOU_dD S, gnM0*aYN Mpϛ?Q۳'̼ :VkZ/Zyf^;ǀc1عN8/BBt"zql6wAVx@p$a#+0=UBBϟuDŽ.#Ǭ_c_*<Q#@!$!\pIٳ:g:{, /"b³df?\_O`q;;00ǎcȱrl_{ 2!CB>};wM+{G\Lv]7/#1qB8c5ǜcT;F}KG~]iB!$!l6ɓ=s2:{5ϚE/RG~֮"0\Kpp澭0*J!$jOcܼy'6ųQxyOI2KxA#-BH| !ěv^/Q z3h lRY ivJ !$`=T %|C<-QrÅB:wlX-8 .dL_ʞkup'fDB{ !Dn86IFu9+"Bo!D?2n_M.ͯb"88p  J !$!fE s&k9!Hyc>lVQ:B[!bxJ^2B8ᗷ5n! I !--U`SO-^[ڒ==~?KWB!-ӄ>.%+ZpxU`sjޱ]0MpBa6ýpj(b-У :]'mTׂL!o!=kqbȻu57xpnq灏up\"-B*s's1׬sxv:::TEB[!̢s]LX]+wƌgD +{D8} Dq{RGۭ1q9Ȩ;W|_;)w2رc(%afWٴNf᭵E .Dےs̕?)5aoˮLҎtd $P oS~g!͊߃l;NFc>znѩM2-E ^L4 dԝmr>} f(Iȍwe?f. 7ng80,o@ g|#0[OFKK ݋Όr 8ydZrUL=eTϝ;Wj)i^1ki0l3+f[jЀʌ 2zl"L&B f_6=Jv!Iy:zםmf@* weE]]?.6^w@0 'Oh/X@UVa͚5ŋ1sL޽ Et]Gcc#Ν?7oƍۋ3gΐBi1k,}ٲedz?ύr MafsIIK(L qUˡ# RP zLhc0anC 6aǐ՝r-jPJPߕRJ|g׿3 }=[ T4YoCh"\v N"3| m> ieg̘!soz/艄a``GCC$<|PnL=VSS̙K>_v={oTim:7otSQ7Aoٷ`v˵.we?W [lĕ+WS0 +Ak`,w^uVX @)C"Hp8(ҹxѣBf6$$!y޿?r1<<۷3}ت,{oTqw1G理%>?0eZ%Yʵ90a Q__SN/0, y @ph&,Fv֭[(HЀbQ/^|%wGFsNAw^Ļ>ӡg(uS?{Ǒqr] {QKӞ|X:CKAG g^p朣lHD-DErH\7U $Y]?``S]կ^'9󥹛g޽粬'ݿrJ+Kn066)y jկq Լ;!69R遏;yr˶_\ORG}4T* s='BON>Dztg @СCarrzUE{;ZxW'7mjy[rvm-%Vo9>|;w { ŋM-P\k'6fٳ͏z*ҨXKVn<:n)|uWx׿nJo#9Aj>uGoNs@DE!QJ:]vǀBP5|E=e4hYLuUwdiAеDՕN Ug@}+>Ale[t8v>l&ԇxlaϋ _ }3/D???>>g<͘S6l';vx0cno9|3}qkhIbȽ{EkRCMs]R*27ݻ~ E/mmgO}*syQjK5wWJ+o<|K_ڴpD!.GɱԼ 8",8 P+(:(? Ʋ@\p rn\; R9&q?$"ʀ'0:5G>,<(( [X#.kJӿ;i{r9i[Pw~:ήeW>- ~H|,AQ?Cꝍ^kAvMWbF2+; Vhgc|맴\N3v'Dz}n|rݸ;Iiw/Q ߥVw-o囅o,ZFhhw *@$ ygJ333!_l^.DD  *7)ĥڤ\M N<Z %-o`um4[4V @$ȼRvyo mߤ=Vi@k8'NRK͸v<<BÑUq:9a,_+#,E,a9qªS@?v\仴J+[|HMNSˈ#5x_EZ*5wg4yEP8eQ}0O瞑?tpYT+{d'yV}Ht#ߢyFMF8|Ɖf&yS:ceKz@8-EN 07!ΐ(oh{HVi Bs΁o>e{[p.Z#TCJ[ דo||]|VZi%| 큿M62bIb!}] UKd@C&9;>*Yi}{l42f~0L;;q`o~h Bll%+sNɥ8"1|LKƨS)¬N0n~X=r^|g2R'n]7e7O.୒{j%|x7g{?bSNJ+=IV7Ʌ -Ѓr]އ$[nɕ+6PDNE vʓE QuR_QuQ"w,$E)r[4/L;#kq?dnaf\9a$?/ Cv=s.|V欄ohRmo/o/=5n ]H9IFvB fQԙlfnnԋ6GQlD^IL6pw*ő9GZ; n.Z7Go$ AB>%_q?ooߜ8V1צ wYF<m0,TZm-Ԇ XklM=V@Ptt2,4 ;wXLMsah ca1zU^Z~%,0E;Vc2NpXLz]:7}+3ٺa\ڶ|f,K8{S!^ IvX},9_{wixpd{eiV7&-2mo!*z|1N@(ixKP9ǼM|$LCyswl܃eD|IgRqĺoe2XD/b/-"oU"v [3N69Hv'qT 0k6{ߑF\4Hp҅FgM5ay)u厞͙/5d#췓p^=2" } Aq$g| ZVW<خ/$|q-Ow7gRN++Jăݒxj+=]+IQvl#r m$"9ne@Tx3MR1{noE?l䤛EoDm8mc`,epY1M|f{w4AfNW31:} $P~LDJ3R C o.^ v(z,qTjg7ި\ ;9닌WQ/JZ$Rd]t~XD4$Ω6-WVV/?:q}gh:ƿGkgSr7 N,YiVw/A/=s;oi#]Wh Aao<8EQobd(tt'HJō7d91L;|$1㤸" {D_u dm 剷uUE}IAcE}t䈾?9;?kI?@\  I%Rمom^'V}ey^NwHeo4W o]B99SDXlߵz<7-[|}zҨ "٢wIiV·>6z[l:\ZiZP`"@&o;o Y/ڹZ[y&\k?m|֝NIs3Þ]8V'w]Zi%|oIyA A'0?< f,o,ߩ($( đ(:`Ѩr,u*P&`nj?Shj hJ6C DAטkmk:) uM7)?\g@{ ؽrâo (qP $rl8ܠGmgݛVTD0{o㡮gi|^B͖V =shomPOYmSU4 xVO*:8#V)}+*n|SNsFׯU8w]6< *s(JvOF@⺜J+jSoK6r&'\Y=)Q;{ێ*{HLVLX?uRDj|$: Ps>b,pJGF2]V {J?p*YMl쥰fFs2|9V?^M$&鿧/W_L4C#JT~hN>wjధ³nNVZi%|oXFb{?v|:Kj>MPů2 |z~LsNTDKRW2suh'_q7޹•ٙp--&mғoIk]&2zH񠝭9!|_BWGΦzƚ#Ra2xu]NF]{@X:: nYc9ruٵ0s\L:]y?ĽLpO˥VZ bMA ´Hu[u`Ɲy0:Lt_cLLGs MXTVGkkk?o7•Ze]w*^ڵ3_êe5Mg]_kz^ܥz]/+sΟ?΂9?.!gf-+e 330Yp}5hT2z}fg[}޿5^kckP{Gv~&W!8ͥ@rQ$21L,c,Mw<c qDt/j@q <( [x2+^)Hd#8x/›NG|&vsPcŷ[o%jf'j{nG=+K(/?JXQ%qB5W9a5bAIMܒ3ˉYb70X|6p 9t4 IwA'B'=;uWǺes1Mk"ƒ1w5)Ѡ`/6|ĬME,  NgŘr:͏1`C| (cyb 1El%cX]Q,5a;%V-U, xQ;m ""U z0߼ZmUz!dlk}N]M\I$JM8;0cN) h{YH?T6O1CCɘtӂݱ&MiJ,Ħ1ŷ15A={֯DRQ=vM1 1^*e&PTTD ExX|c,iG ġ?{N٨W'T3oZSƤbIG:M ݜ|L{ hmL~cm1i)tXz/a"|5R7oC  D:Nݴ5(gQfBs1h,1&iw^-3[_ tAթe9ք|t\+5*kKKK`[F{oocIpIEEN:GjӦMZ>uANb 뷬Vˉ ,7><'aN%&.VKEk.31ckNFOVs&NЇ]OD]rL~/iܧ;= ? =zhM2!%bJ7X|[|cF4Hb ͜4QD:t GuP5ة+zaÇ]ܠnӰTy Xt+ C Ke6ƘtDEK8UYYN%#L?z8w-'VGvroʏҒ@wr#e\^I1B3Ď;c,1I7d҃Z .l| J \2f )ԑL.R5Kxc 7>_ {kn8P 1Bc'b Fb1ocyJg/]~MnܸQt4iJU68o>ՅcT U|?}_-IԄ56]}pw܃s Yn|=1@,qۢcm1ϻZW^U)?pժU4{l7NEEE2P3u(죓AwU zѢ1S(׭Y":r4}}Xmzz>X#keͬ` lHa[lE[kǝ+|!ħk|n1#ᵵuΝ;'j>]v6t#JqZ /꧟gwTU\YTvro68gn̑2gX kbmOd l }5?(١$)=>#|>ė6X|cLp >r^/[f-Z3g69Ròp;ژaW)yjfOPmխ[kU{R'x5}{DM?~ΊDIG"VqWuuep-܃{qO3xٔ0P[HV;\Mwj-6|O >W C|i16 qϱ9qwYU^^𣦘ZlΝa-*--~!5 hr {뷠'^3kc5^lQ6Æb[l9|cm1Έat?C?~4}jl… '!;N&IVwHVt-4/hm_¾:AO]"5sc̕93w07(Paԩl$6fbKlm18myķ1X+lr劲a2k޽ڰa\͟? NJQI/"3Pڐۣ~>6%{MбfǥZt/+k\õ|r=7Y{KߓЯf /cd`Da[`(gfķ@|oI| @|oķ [|-7o ķ@|o 7o-@|/-zGQV:^p~[|H*j¹,P`3L33s3q;lF% bU3qRoNY1& gdO҆!c}~S._""2GG LU,m"41o(3u [Yw^)>یTƂ-"2͕DT{(cC;MbSU!쩎`~e1NV{!""=sDD2 ?_4 i3!41ʃȫ !Bnenno#^־[Q|""-""R?v;US<+şW *mǜ-X|~d =""-"r '2EҸQXvT{Xuȉ_|UŽpzj/]v㋃_ @G*=HDDD[DIdg(h0\7^ ~nK*0kU3Nl8b.\vO ""-"r`exѱ6IeLk`'כ{=O^*ӝxoW#^\v5xukzI^ ·Ȍ,ӝ0M놎4- ?8>>Ё{zkC P"˴ׄs  *K(k EDD[Dd1Xs gpR }agt 6o O6l,z~4˗ohD/ϟ=LiX@G]Uo:ov1ôVs_4e"0WD xzyJدM蓱+_Bc==?'lݗA_wǟ>vf`d5a' ?]f$L({)|̨ $ 'iX֍^ZɷVdt5󏴏M rs;zl{avƝ~rXG#"-"" =1|n9Eaz^˗irVፚzV᧓au'"-""eq3xsw+ۆ5씓l.9Ux'OHIؤ 7Yv<` o"" ""Z1G?nۻ=m<|{goM_uq?C)<l]\7Lb2,:pmFW<[ rto`k 5KNooHT'Xyx3o;|r27ny5T0w~^~*_VVa6Ը/{)lB8a3ϾqOt!#eD\J`JDDEDܾrsK2_^׷m ٳgJ;=d7~e1U#T_}ۂ'հ*KڶzF-qv͖kUI ;[G1go\DD'"#mx|I9w)oV`\/'ƬCxg÷ݳ= -mN% inSN<.|Pt5'‡Ĭbf?|ҟtlooh+ OYe~yKNT{VJ,?Ήi {jXrw`9`>8 zbWZm_όnolt%<{930"" ""lx/..pn)H$>omV=эizu%woG3OKo mϾ(}JqŬO+ឰ1VsƯ07ooQjŏgoJq܃twzȎ~Ǫ72BFR_ýŽK"ee%NHD[QW19]; QV9W?rɥF&Hx{ϐ_󽫧-|U:eU5" &9fOEŗS܋X~1i(3ODD[D䖑X$مqٍk9P[{x|Xa_z-%K'p۬aY%6^%L5tɘ+oo [ W9p׺#70aNo;^|Ol_VTqjqy~i>~%&֎^ ӅA+83 "-"2Ҝ:O ml_3G .;فd/YfOfrb{Eay4,<Ӄ_~]3t݃D9Z/:L# AFDD[[DdAuCwÂJN;X-+2VEˑiy^ľ^~\Q:[r*w2~pANsx` oםsbN&"n30: s5lIL,e`YW[֟sb^|eky1I!Oᣃxg_bPZ{}<{kV|~ąg67o=X;a tr"t 3|HLLV/@6÷i1q5MEDD[DfZM@Pg vP4Mu8W{0yE>ߌo<+kWY~go uV؇zv.½ߔsN!WGW˗D<^ C =qR58扰 z p<7玥,x pYΘǛ"-pASjP /5t5O-)gn YT߯pkjze{ɲ }XvуuXW0}w_wc5Uí-4P cwQ]?$Y8W%w^)]RG:}n]ȱيϾ`T1pOusvxn%c#b0>/>+j/j#xZ#"-"r&k8 ޮ !m0\#2NZfᅧvDKgL?߆Wv4-ۚqΩË+oA{yxQ|?_ViW7~2{84F |`` +[Il/le ƾzww "=DD,&A;NQK^z&.{9]#_xk<|=xzl- `c%c\D= y9ik8'^DD{0X{"F6pۇ&T79I.,ãK*y2;dȒ^SK ԣ/]Nv&LζFLv:Q jde*1\4[`&혙|sLIGlSgP„(V=V^x7;d 96{hZQ,˩r/nD`0g(|LUz7£xpA9 ڣGԆQc_MIrjKqkm5c,$ҁ~JCDD[[D ooԄeߣj8ð"3]-.yfRsÇ9Z8凟pJKn~7uY }[,erb··+Cw̸f6嘽5gNU!=/lQm'F~e ]|> pM4Cz25 "-"VR {Jq|9J xþmv\ pƒˍ>T;# 0`[{ zn g~oRd0drNn|rC=Y\qM}Wk[Rww5PqN;R Xw5b Feoo2L^?n`pn_TWy%E'.sK=sK)o ˧հ2rqT?ϪjvYy`g}v]\g77`a'_BK@0QO8ǟ#r517YQV7o ZcZ/eֺ"dt'>̠j>!>I> -dgHYÐ}2:%lj-}؉ }^0.ak9^s):%I8~֚v5e%Lu/(|+|=ë|=ȴaq^k j]a\n@i/}J񫹥ۚZ4c]{; `ť~l,qIξ[Roxlsp%5Z~(ڽqSjŭQ-2 K"-"2Eg 3 2aaF1/C r=B~/9A;q퍖+N)wxƶuemĕf[?3& I[(|=E $fVU-e۞6,8͋yhɷ87Q5gU?Xև6O $j-)-FDD[Dd+v_pz{6XP5-xooGVqqLPf>A?5JO(h8vd1!"-"29i:NHMXf{ؕi^|`~9SL>:؉U9phYɾn^<C}_zZD⋙p1E[·Ȍ,^^^ g샞O(j D;ҁoJ9j%y&m1u%O#01=?4@oؘnooB4j8i8a2=sGp΋n9K]\u{=!}DDEDnPG8e;:ngEc~yT_n_YFKo+"GyԳ0A=k׽B!.>77[,b<+G@9q;3 9o/r^}T͛37t^.ZKع]խm+u qo2D?\@o@@"|Zs\QX}43Tvžj9EpC7r4=oIQde?G}Vj$~j$&]-=a |\[xs_r$>\U}\ї_~?[}Tɺw~nܸ!è |w@@V\͛7 *..yZm޼Y0ɶ|/''Kjĉhٲe&( 2Dib>ٳG*++SQQ>S-XOpj;whŊJOO7 |622UVo :t^~.l5c x:y$N8'Y'NݻU^^+wn0j^o;|?xwD9]tkٺ95vn(Ξ=kL;9r!f^zXjhh~rsseԘZF@xAYa%/UTT QcT8777*;;>JǃOuejΝjL.ԋ]b7E,smr-ϟ_' dvkllx=N%!7zK.] Ç]㏉-Dz왎;FN>=c,[n<?jʹr^.}]Iݻs233CmXᴿk.E>|пDFF |5h 3F9:g |X=F !f^^_@oD<umF 9$lז-[Cqiذaڿ z"Ξ=[iii,Oڵk" &mB@!n*`M ȑ#5zh;й/\Pآ<c:vX-JII!wh@D5e璔D<]8rE0:FB#36^ʈ!)%^ I3H!3㏛kA eQB)x*kbݬÇ7zi\;~t]w5cǦk5{={ʷ5`\Zc~h裏>0Nk0EǛKd[`R(ػ7^;גD؛OT?Y;HeI##ᅮ6os*!ceQ% $N֎`~i~d\eH%agӧOhYY~%(i%(dE$G#(CM%~o| ^\qPTƄ~a_T{@ź80w:'c >ں>'q 9p=Yԃʄϵ2dO9yl>CrgΜ3J(VzR[!ƍ[ouZKY}o/ĕ ,xk3C`[ɰZiOIҋw}wyH$Ko6u-kxu$X*Jde3FFI$ "Y{>0^"J}3n?k}.g~/` :\\)@@e;{Q| |'7&ެD(g M 2D>dEV$2$8Gjb&'J?G :_}# ATue&)TnȽ!ӼM(Q0Gj(?ȯt>цj߼ dReƷod۸ɷl,37@՚ @I!)JWYL>͊.!VP||+ ȌSb}VH_6'db)ʂ<@'TNRf2|[[cW{"@K7_;J&$;"dMZl'#I|&D8.w?Y]16&dC A;6' >$(_]#,2DTX*#'o~ *K1Fe/2dZ;ɱ1q C~7矯&Q3]]19s 9A{n׾~ck 9 (1?V)sMP8+G@2o>K`T; yr Ν; Ȟ pܾpo]m߿ sJ,}l>Fm:y߾}SH= kaLqKC`$/(0sqH bRozImH4!%C!# n'Qytj y%+>ا%A&@ʦ渏@{BƠ-L$]& 䟸,3`2Z_,U/}P?y%f3:ƦEX#F{dS }sAǐ^l49TH}/}`N;PȜ; )Fr]cث`]78aa^]X_-Hk]Ř̗l1NvdxO@F]^0o9J& M߾q@ҽnVAel0"kF#"<#j]J̭uªf[ t8+0WۈC!j~ uβN0f_ He?YozQDyčID$ww|GDDDD$wDDDDD|'|'wDDDDD|GDDDD$wƟs>rK03dffcfsH*is^C8Oڽqjjw߅hZURT*.J-9f̘QFň#bĉqٸ^;w.N:~8vXq5=sѶoѶmСC3ŋ1nܸ8qfoZ!T*J߭Rn{;v?<nfׯ_su_c^3gѣU]zuY&~^xɓ'w4_OH]s=@l|Ov:}Q;z[ZRU*|Oe˖Ɉ̙3O>+WҥK/xx |Mdb֭ѶIɎ#>|x,Z([mo1`ظq#xu>Θ0a-ƌ1cʍ7o?0RO?tܹ3iӦMѶWƬYo˗ocʔ)PE֮])>Rs?~9ұϘ1wXj>"{nhlfر1iҤ_|+V2dHAO6-ӱxОϞ#GDjÆ q]w~̝4ԩS)>!H1֞={±Ϝ .oϹ>'OƂ ˜=xt-XdW^ ٳý`lc?>%K~&*JRrRzѽ{1& ` ի&>hF *M_ Д vy@աCx뭷ÇԩS>TYicѼy֭ ׳TW@ѷoxwc?aW^p!qR d䁤6ǏW~Le 4'xs=I|g64(.@Ak _/]{ G?aS@Х=οݺu (t1@@ ={FfGp`m+_߼{/Mp2'eᓶϹ`KΝ=Gy\γW(JRT 4m2 >7fQ8Kjf`FJ?e* ܀ ā>y.} @ʆfP[E{ n/- OVG}?xckty$N\4J]:/utO @1s C#r'h2I9>#/3F{d`l%-ms~Z Z7FKRT*2K,+(Y_ O [Y7e;%ݧO_5.T!#*Pb># [TI 'j |7 6@v!߽+xIeׁD$,S0/m=Ia>ٖuK?]ٞz߯5{4om_2tP%;Al?۽F06OZH{%n_nS|%;p_+~smܧV* KU}ig et.[`:tDR2 &o)?hDzZЪ/Pf=kduOϲuƍe'2cr;d{YXЪ_> Q`Lrk#-(# -?>;| mZ[gw]jg쫐?g#z\VZo Np?#z_B|iWs6"ÿ7ڌo҃-<[MOkuN34塢nh~eAy@<-h|J^vѼ'6%w'~/1n{Yn˔}^VȤנbggǯck{5yM^GX`79ƗOO!Qs:۠_@|P[P6'mC"Twv_ºmD_MKJ_m~!4)?fUXeunVp_+jq:F%ߎґ߄n;#kΨԴzYOX۷, <[OO[8 {.n}6541b̟+4ў-dcD7-*,x qͶ^`o7#k{6;6|SǼ]ӷtՍjP{Z=4bIcS ZifcD+|l.Ł>ao)ٜ( {|k}:ߌK3d<-5cnV[7d7ڊo'C=b:_5Sj,o+~>쫠ok̷m~mX]_n1^{fۮ1k<1|a]eV?F ~ަ#Rn {!#s{v: oUĿ+1@|P]~mLU[OX?ϗޗXn}Lǖ|mu_}-23;5&~ͳ׬oPQ¶7 |:SZoܧU77oo"77@|o7 f@|d|0oSA*j>UW>G TI7uu/VY|EY] *+zu /\[D AEI.e+-8^UgM 7[|-@|o ķ [| 7o7o  @|om%gffff^=/۶ݻҭw,˲Ot陙[ o7 @|QkR 8; ܸ0z۟^&W0 rÜ%QD~Q8H5z,ŨT*EQV_f|!_r}+"""""Rrg,d~/B<޳$yC|%ŏ !"""""޿MNN~w.;fGs/^ﺈEDDD)Z[t,ٽMMMxÇrFEDDDD7z3:'bioo."""""KFGGwﹹq'}?~^kku):v#'{ }Mgm!cJa澙Kj !"""""XfΎ6XJ]2zgggF?ﳄ}6*>6ooyw{wMOiFOv07'rgٕOٿ Ĺj#Hpaot3LGt5LDwrKiAaf%orff$wץ0[XJ#(iS"A􍠪Sp)+a{8~@DDDDe_bйpxV9afzoQבdf)T*;FV?m'-fʅܡPh!3%BːH$lƍ<6pOQy LӴ~r9IVC2=y[VY\C\RoPf1b@/sx#wRFHꈈFV 6q̥rX?T.'vh4k y[6F,+R*Jㄧ{Al|]W8M^r;nz/\ S\y˅Mǧz`((6ыTX*هnc[}XZ7V+w׆/r"""" bL7VqXW>˨ǽLl'<ՃMoqa;j(Q5j(=xJ@"""" ʅ)])bڧ֖DXMwָ҉= !"Qc[}X튯Ӎ-n\0,-S.© ;(*™//`hXjMΡ.|e?W8x=aZ7v=X.*rI+dbxݟØ0>b;{XJ'rADDD, E+1^WA'{˺*&({'bΏ3QF4SN5ifL"[6P3LQ#"b`X.f,Fefff޷3333 l89ƙ1qvM}_*MjuTZ&ιu&E$}mV+KSu'9\K=կfؓͧ7'zQukfݘE1[iqǛZou:RdYʆSN11-j3|͚5Keee_\lmۦfakѢE|Fg2>W?ζ555.޽;Sڸq#չhs޼yZrjjjqiܹ8qƏٳgd߿_48%%EӧO׈#DzV]]>ks_w3\Y<1co?`t<{ fffE_%՞  ONby$PjBe+&oU h2MxHhMۖި_ [knS]^i:7YYMn3,I=Rf}~뭷dR9~ww}W\o߾];v<#$Ν;!'qЃ Xj9 @]x mVr^ j׮ƌѣ{iݺuuEK. iƹȑ#5|5JzRjjg&y 6/|6<{jL;]PΝ!Saa6l 0PzwI՞={Dy׮]JJJD~)pBsrrرC".<ӦMC δ4ݻW..ʖc'D Ttt4+bD 3gruڵhgDM2EC ѣGԤB0WuD;&""BiZPUUU"/ Qƴ>QKBsB ♀ڻA mڴhyG.lEôT Jn(VB]zmף~ xL4ߓ`/I&] J/V|}_{_e#TDeO(BLv+! ?ȅ{g?Czꩧ /^?'xc \|;V۷Xe=RυWi~# 'f~uRV?|ڛ$)9rakz=C6TU6U˘ J!;kgiPM5`[\ 24 o2THC4tP͙3Gcǎ^}@`ҤIhq}K h$SN`mڴI @ Y8t !% }yLr2" 01wޢ38PƍQ\\,(s,R02^x݉:@X6|[su@/_RrAC/ u2QRRuM|! ($rA7Q׎$PW<./ hK~y1OdzF֭=?esERW<7fy摋02Vu&[צL-x.{} vmAF6:+mwۿ2u~&b1v#c荼k_. \Tb0^z Hu o-^V!')@S$# /3@{p rL`B q˄";\ !AIXܜ <֐%; IE₀q +t-x)sSQIP8V^ ţNy7Ć}G.kȤ1"s.H3e9PODlJ O((Kfynh+eKVP!Oat r!7P<5}g⢾hGMB.~QyG.6 `ٜy^WICSA_2A%JQmm.FS0zJ??Q5z*{~eT\hGeVL{rö6-EXmпɹZU\K8Nyf&w^?zJބz:.Z2uZҰji&h]l17RLlŶR=<=jM+5&Z,&Ct ώYkֆZDlj@ 4jOs$$.E\DŽmTLhPfI9C?}_wuB ,ܾ&VR( `GO.M`kT @#=24' x~ K/hᡍA[5Bqʚ4T AF(O-$zs+P9!, QӠqž++^Px>gyEU_~T k6X\շm{3w"V>Cl^EWW,~9jAK`# m,S.g F<#nFs#ט]>(_ւCzkI2BNxV ]wꁩq|!U{D7R{;!LYerV,="Qo/(LaAEv|1}uڙuBaVaR4m.JQ˷/Uix]1.4vXu\mI?iġ($ ?Zb}:dlI"#27e镇s ݡ7\`WhFg&Y zlfhzʎw3cbNkETn&7aV kRm *pӠ-cg/2R @TlɅ\0DJyٹ`yg𶰏_&+1, b-;R W\Ar} lʳ!NY2"#_e`(2cDbY1!nػ'.1+bF)6׶Lw'}nu:k7C#WtQw 7g(q7m՘qaą+yio!uUcZ; j_zVkc"HޝUG3`~W4 T: .ą(z3e8 .ą@\q!.ą@\쾒ƅpąQ־ͽ҆Tw)m쉼@q2ɧohڦ8nͳqyɛ-%A+'2*JAya1>>..6Gc{}=A>]Jt^b2xqk . .q . .q . .q . .qt0<<~1R訸NkZb<00ju1d\FVIF#Ș,cNsΘl8 . q@\ q .qLhx ᄌ{~ /}QSZ>x7c||<b?xg㎋ . ++΋s9'9昸/pwČ|bŊغuk .C8c̙QTbhh('T׭[W\ݸ馛ȸ[r<"x=.fL2%x: qj)ߞ}7RyurCGN=ԸKbǎEp^o+WcӦMӕk%Kbڵ]|Xxqo^:\s}ƍQvAʵUVŶmFJE~y4Nsn?83⡇ 31cƌbk֬ENu6|FA6mZqb`wo˖-?oj@~pui}nݮTpn?&Sΰ&9'B|'N CVAʍy8QF!ieEoŐ XZuGfehZ~urY=-) 2::*lVjҧ+8$_9ܔ9I\Vb(H{7,Wp Q*|ߝL\|M!kL7 V p4 d]G5Cv} C"OG"E 'MT={vcN>]ܹ6ZӫYfU~鰁ƍܹskݺu%WYpamH[Դijga+T[lzSL8M.?3/͛M$Q_V^={&LR_h1Lmܸ9s:$eҥѧڰ&DR`fC IJ8W8Dx3m<9 $'3̱Ue0.r9nE.,gBVQ_{ޥg w_ xs2+>MSnASwYwIzrGI #"*ީ½?]hsE-!w )k{@HrCn&5 @VoڴuC?,7TcǎEDuRt#}F-@m102}'b^ÇF ƒȏw|(9 Sﰐ~̘1L$ޑ"(1<`9o6٘e}aMi]rvˢmYqedwq9}ryo8PC7r!iz &s=<EwBz3ҁ2)Bt8rAvH$Hryc1Clьgyz%!3mgV\)rXV3<|/dz!nOʵ%WdG\vɪ~ Tj H$I.G,19ypsp97i̛3&"68P g (NNqsHOF."yQ(Ir(klM<#"^ cV7S{n+ra RFꊔDdn# "!=K6I.I.\Ga˗̙̙3Aw^oI.x㍮kDF.X vFoP"ȅ$.rU^WD"SjNtcG>&"'[niNS9uI.!m)~{|î¬r!cٲe" /;Gڵq\ߥ'}{~a'$[Gg + e?rÖ!ӽ/_\3PH$$D"H C. r!Zi^N4g |MfO裏{\ I񾖑 N:27V3*-#2֮]KΖꫯj9.\(MMZc;ږqRF$20;7|ݺW_KPg0wH$$D"֭[`eGwO ?<8Tv؁ȱ(BAۄnr+D yKArj,{.k]ψBGDij& *{w0 a~vo ؝)k,)ӝM2e7'DrøX(19i|ANK"nC\  .ą@\ qY2&aG9`7kkqAր 謻p2_>|4x[ jhEÇ>n 4q+q @\ .Z @\@\- .q .qEq @\ @\e . .qѺeY֪ZgPU͝:?xǫ㋝qd;B0sœ,333333333eXffvn?Ҍ撴$4|UjH^e{%#.4qq 2DeuǬY4{lEQehɺy sF[nɓ'>}^ziРAZf URvڥv܎;vZھ}^53Hk5hLׇUb~z*#I7CiE/DFIhWo0_G$޶m[/ц šCԠA}2000xމG 䓐ܹFK.Bd9RjҘ1cH$TN:#XLsMeѢE2xuYRKCE5Yj-`,Qsoec'200 'N?=|P+Vd7o.x@VZ\o^+WV͚5 9sD3gرc6ݼyeM2E^3fƍK:f;I oTR%;wNf͚Rd2UVI&ek ɓQV^I"ܔE<'o>7NÆ ۶muKbh\$D>xbj#rgK,AQF2 gP]rEOmOaAO .=t𲋸PVK\tiQ倧I%1 u9'"X \CZ~,Xv}+#.֭[G*k׮QԪU+joСCu=u5 |M=g?JG\~#Eu R$Q߾}0,L˗/WΝUXXD}`" 2\zIΝ˽y. s ŎC"% ;3js=֨¨杊zhGIkݶJ'uFJUW-jѸ}YgsBu cqN>c0⢔Hkܸ1DC0׺{C^zqaB8D  \ӧOȑ#x"IB ??_C@ Wٶ C ݸqC]v… e``E`&P @:uHG3/&FnNE˖-Kuؑ"@\A ͙3'bPI.3jX\  Tc˜ WN\z?! \V !B6G)q]i')$ # 3Is{| /g~j m={-&5oL7+Ja( ΄(k!(h͊ ]R j&) i,LI;hB x0Y0#)7)/ VO愴X!ꎕSqDr0 7#H3nz_NZYliq(ԯ|Zs\r${"k\ zz {}!,>&S I܋9WG{A2VRbEA "Ip8bnB*Up8V,xϬ#X9v/&ng{2%LV8qѿ\:A 8}3000(O\0i՞h$q2_SAdRi5N*V*;j5 C[n#,}f  Wpf ,5Dqa;-UK{u9#'IST⊐b":K$AÍ@T7Ux̋U#1 NmO)=fVZbn[w#a_ְNL$|XlЛZF\X@|=gprڄxpG,by"gL:7;;;+J2G3:YG~````5(Wg| j55l=GP̅Qr\0JukR0~JJM<ǒ̒`cPx#ڗ.'^n;Hy*bD*Px5g|p;`'\:޻>^ {RV~6KUy\t߶=DɁQop3vlZƶm[d۶;}1*R7{ 1eoMkʅA>":. ^g?BeF^g^ʅ׭fdd( )( Fl .54\xS[8VLLLTTRJLLēEB\8KϹ@xP.\P.Gfjw#@) *S?%JxgrP.r'իWիbcc5jPݺuUhQuM{Vɒ% z(.cǎ9sFUTqȹegOxJB1~xxѢE iFlٲ9S'ߏNJJrr"Çkʔ):p,?׎;$5k,}Wr} ~]0N* &.=j͚5jܸ oXbرN: *hƌz< e=)o^ƍSӦMqFeկ__. ,Pu yɔ'Gշ~1cSڵkkĈڰaRRR\jK(~Ef4p@]vMP|7zLUXQ`PPH@@J,_\\.\<5Jއgy岲b m۶MW^Ujռ<* rBаaC R8wޕ 4ղeKySZ2Sׯ̙3l2auAXݻW.c~͝;ד /{3\l6Νӓ'O!B)艎r%Q^IEFTR%yJOLx22ᢐ={ŋܹ.\UVM.# ֭[ȑ#,ZHڵ O.6.;s!B!Q.)NNxi{2=^&OKuI}K/oW^,}y_FN&MԵkW?_rDц~LeO%|'rͷNm^/`dͷq(Q@\ @\QrA(-ZrOrP.z"E {T y"@8{~G*11| ^TjUm޼Yϟ?W(ݻw累_|OAx>A*Tкuo@x3.]xO+/_^ׯWP.~e,Vz BUkOAG(5b7 ,=ic8j /԰ŹziG-|ހ H$D"r񳃙1DE{?E8TC\peT*-W%:?{2pa8`fq<<&8q g+ f*T _8E<@pՙ.)Z9>˺$be 3bxlITGk5h>?.H$D"BYO)P?<Qy(P\x\@QBD?ơGxIiaR?" 嫘C U4ӭq|Nǝ9 [aGVi;W-b.l[2h6A19eY :O^+X`:T <$WD]8p;t[ v8hcr*eC@/sU-n%=1ܝbVwWhh3Bq}**Tz 8ҵ(Lc H$D"BhߣEAB!~V"Jq_Dg/FhqShSj/s-# [r0rXtD&IPx"wm?(M v4;Xg4%U6 vj.'MF@hI&G>*|\G-L8DႤ0*X^E!68+.{ ULa`BEt(6 ()J>#)Q,oRpWE&<0bX&.K 㫍:1"?l1DA Al#.nʡhʀ̲(EM4!1<ڈmmn~kB&Ke)lqD"H$R.\Dwn@M$T*+/1POz)DwaVX :BU1* Yu#.I`k^.":'L(d`WQtdi\ܘP$Rs\*fcq@iN㚌4H\,vFQ p&"|NCBҰw HΏrѣڄ8$:Q!`UirAQӨa#J6w(kq90y[s㋣$k|ZG:t [ XQal$( ۔`-r(brFsS8⊴%Y ]*僄cc4 KuW)<5]|'tkLTaX]刵EH4xEbᧀD"ayyyGVVl۶ <` ک䠡s~Zl`\!> oذUF@,ڵkEg楾bZiشiϟ4[ غu+RRRC@$ƍ=,..pAO9 ??$|K `8L`SNns@"M9o0{\ Å3+~ni:wqhy!:)G!ǡˠl)Xdo!tSO=Z3Fwy˖-CQQm88\|g())϶6xjBeYXn~a#s$$$୷Bcc/oeddF8]zH^~eN۶QXXHWXA~PVV}4iɛHW|!m$r޼yX~=MW!0GŽH nˍe* 6H: 1k4:c [;y O7`ӣ9Tbk-K`މƒ4TR7P8w:˜U_RO/D"ȍӆ믿@B SLߌ/7vsttt`ڴio)j " R{&VDh;hL0/i>]i3o߾`cBQsx pS]S1 lٲ%:d=`>5{VAAM"kA%. `j8'O&sN iY Y .F-" @!_QǧW|g~ #Giun< #@, 1L|ΈN45M7(7ģAM>ThioSF/Jy&Ik՛ĄĎh|yOgoo.:OTxzE({J_(pH4l;7*\3Y9ڏpE߫qf͚5@G!yRIS\m@j,©5BDBRxC!LѣGYyfEQ0cŗ"o*r0nVmF 8'B!FO n l X] "HqN;Q b1WM.H2pqqq3äHCė8_bL$Q .8Z'a@Z!)iyLq>p`5%C:Vno)h2Fe-^cH+ zOdCχ5Lȅ+՘ jͫ9g(p  ~uI^w\ Ec9Jy'nZTkN1 6-TT9G߬oNbW)={TYuvctSf͚5kV„bݤI pYet#ۓk_+H$L!C}6< \@0R\n 5VyݴufX&i-{I@ŭ)]q;YC#&5fƏ7=qI9#\0πd|ԃ(x/v|+qH?H9oR?Q@?7#xϞw`{r9VJ-й*FP<: z}SJ*4DP+/TIcsj0=*w[+ E4::^߫ =Ef"PYn@ҖwP@YRNQ(];d"J+?K@|jb>VY~ "aeM/ oreo3k֬Y#IB#=>H QH;i+#nQ"QKO9mѬ#Dm) " p4Z#Rt 瑺ȬL-+\˹0>V;vd޷%: #J+..OXgoP.]!Qk1D; Ib`R0? =ۼ-6g*P ;O@YXSk ɗZoojPoͮzOwCe)Ämݨd?^)g7t{u=YSה'" s|H}Hm<6}=5zEmOj?jˏ>_?:ᷮ_y÷ܶv+ݗZ~c99>rx GH *4J:Cz2q?=/.xNZmaΜh4gˆ@!fxx2Mn4Z\X5o|A\t_|uSP=_-X^ZK^_ǽuܻ=l\UM/Nf}"Y\,bY;Xk%VUNXEQdVVE֜!vObj VVX\ĘrVW(ǂZe/읹/:;ʱu~=ן,Y,!xqV+Ij%39q'7IJǂ^Go7oV$*cX$ʺ?bht)^AQ0gvMߍǁ&O~u[_X[οּjqZy",$:bGLECbYe!>B!rB$!bIt-,l@D2e\3Eܫ8ӡ'@po muc˒zވ#&PO\MiGN87PX$F\DȻWD<0_1fkҜď$X/E"TI}g<ġ[}"ȞOg'<} 3%<Fqxg1T7_KVJ'2a}QxQ޿pxN-nR˻՚i"..XSW<[rߛ?~U12+8LVB qd]s~GYX],D 1k;#Kh0+x BKRvXA΋"zB;!i562O wGHF1Cƭ iw-?+K\Y=!R{@6m}J2k` V-Ԟ yll._)u qŞ4Fhq߉ ´{_o{9?x0OGM<o~V/S%,waneUBbQE2! Vt$8`fFts՚'DN7ky9A ,.=!Wzaik\t$G XK|D@{$2C6a@CDh o"$Z{@ ,.G[GȔ9WB? #a~&&BՑ:kӯ9ƫb/a OIh4Fbucn_3?wMy!uBg1u/?9vOcojyi#F a-H=HY`p  } o2*)s\,"A`G( H0V!f|Tv^0>bOs1 2o *T9 :!q?H֏GԹGE\9oUEQ%"cqaMu3cxNLz~NFh4Z\';ᏸuxˋI`%&.럪cՌ=E=" /B90 dN$,H, #Bvp'v$uL")% ƍgaXDG E ^Di!65 J2|` 1s M긎vb"/r ^}g왙2ǜ8kk.訙nCqa!bɽ5{9"ڈ*\m4F b5x1K)٭?fysg=zCV>yPyKk/SGT{Ir9A] HH8THHt# 0"8 :+8!<^ 9#MTEx`XדLH|H8=ro=N~qD!cZ=A5cQr *ŵGHP8߳nsuI `syQKa#0Ͻ]D9 )Ѷh4j@F$DS0#9wN8((gs38>cfkE qȓ<]l638'. .q'.q@#8wwwwww a[Cѻht WJ`5ZS┮w<!.V  _~ӧVqB)VeS(+,ЇUZ]1t] t~mK<\xݻ):V*dl1OW,M`f3ڻu.XQ[Bnj?ΫrOϩr1P㘙 U-…OVC/c񹋢(RNQ [O |̢.|ӆZ:M)\L `fǽ%-|(1 իɝC*m,oL}*ij~ZfP^1;h1RLj]h~UC+^a t$.+ٰ/m@DDDDDޠ;:$nKfD קj/Ͼ=(;QOՖIֶڶm3tڶwz3sWu..}&`aaaB1^ׯj}4e>GOh,|W"P,CRV܇]==fy@7^?d2@P<yĢ DT}͘=dDĪ4TJ3I2T?2=9-1Y3_*aXm3q{PF[lV3yvPכu童qGgl MA]+;^)՚o&4"`FʗD by>B,LNŻ]dT6vƖ}. RBL/^]uóEZM~(CvP8=Ze}ʕ]K>3* R2&DJI{BW=m!ۼ^]ۡ"rJcmKuf*2u}:uKPf5")D:٫#+ҽV?SָM1uDz2ÞԬG>u,V e弹SD #RQkMM,.t}0^:`㰞Mޢ= Czj,¢ {HaD 2Zu8laf䘙1UI2,,e=u*Yu?VM@s|OݳZ_Fe't7K+j3F: V {ᵹ 8p( * *)-)AH gkłcyAJs}YM5/ʽO PoRi 8QYziJ^@g&DhжtyU+/ZnprkwJ|E4 ߤv}A.K!:ʕ+uq/ĉ?%z5gT]]rssl2<]bC4uT%%%]Rfwִi4eq|wn>*++HQ[n՘1c4aڵKוrǂ *Ra-5mi[p-^gERN/PDnrB֥diӴ-MP˃ӽX`?9$KGSKU[)O Ss$WƸdUTU]պsrT=SUMrJ/o::msC Նr-gϞZbW!f}ᇚ={cc$I&:y>O@(11Q]t}#4k,9sFW\.ƍ$rDO8FuUxalْ%@iܹ͛SNmB_={(!`h=4NAQ]uOHҡFW@|7Hߊh9invECʹs|uM=<@+)u"w%FSz<siKӣs|`w&!W'hOBǯ,x )7M(ԖD%T鱙j%;Bk0ӎ{I 322}vQ?|w.ꔲ'hٽPz҂  P7Nܮ];Q>A֭ )>OeOmImߩ;!uB=etRqvIxOoov9R)p5*V'MRHyR|g=(O^Idq5SOf03>Z6`ʪ(H+ܝQ{Ϲ]>\`ZU SmSb!eժUzgU~}}GbA_Y96=ѻ3WP,&6-~וW?2&L 7P9PH eMO55WO)Q낁KwL.Gʴ>ί R==Xqbnn_yMnX޻bMr`#ť:$ dFu  Ϝ9S|cD2ڀ0g̘! ݻkѢE€?}ڷo/< sa3z0 " 68pӎ5J3gҤI~PH!c2L0(/kj"#}BUM(C P C uբH i <|@EEEh'uǎo a0h ^JJ(s )⦼FE~{XRH+:۶m(G pH(h3ydӧ /L HxV/I=nL]qD}S/QQ}N/I¡N½TPAHiW@uM9n׌³ؾ]L/]?Dx\~hp[pݟ8Ύw7-?=c2FKaay/mۍ#[D<1X_3e- BW ߘ۷4pk BYE>M(R29:Re* .>t*kR6%u"%Td%VVBZ}UBK7S2K>!Ӹ#=:ϋzt Ek`^T4:luT\Akyb|J9_E:aÆa 1ZܡCb! -H0萌s1B1[c] / F-F5^rF )MÇ1R-yF O [ J+ʌ4b;Q&=zG^᱾#)3ʔi\ÁV'S8xw( 4nI/Q`}]6nhHA+ ?A]|!f'w3mР'vsQ'xh䈵:ZPAuBJxKۢ9ho}F0?)KxO#;j:\c4-}?7g 5lACwR;J>ꖽst0mqUj]qor(w#:ꅔogIW:#kqTVy)+RxN?^ 4๪⹍G g"0KAB5a BWձRƳhXxLo͉KG4+,Uŀ  <Rz0!%I)Um.Eוk^<9B,rkRbutxΓBs;#qH10002cpc; E < F+=i`Z 4g.<) k0J6mׂ5  ǓD `lqB^ ]<)LD# | { 8b6 `)ڐB9@Zeɱ@C =nr-#ExXc9([U8NLc- B9NHjWGLgA`i{6m9U6dff/k!^H *  +ڏ 4yZDvsΰ NiWYzdo/Sy=^Lz oo̓Z,Ӄc{y.&t,+ ZxyoզnM7Xdd?Xx=Ha ʩ(i~9[1,Ir,VNSY1J0Ko{jT.CHHsIu̓]|Ƴ'̆ IoŢg'F_3 Xx` ^?Wږl95+4Lu)` ҂"k1`[e> TW^r oztF.ia{#gMm rCY^kjAPzb8'*yS7 aJ 'lАy]1 _]X{ܼ̾**h=S`V7pyFh+36HI1Hx<oA я#QRJ p4o%NgQjs\߳sf}) x"<꛻MVY/gC8RxR0haxR_Q,u٠c跶[ O";_ /gy_x.}aXrmQ ?'LD}U5 c;gArɋ'\h p^硟Fr8<"!K(dűc|4ϒOJ )"Ȭ뵆x$(%d)&;+?Gf]_JMbQCU+7ح2 +u!5g?nQ_ qX)?VkTn3HR.E(IP<=Ih =H ?u+|RIT%\l`{avzѻj2i cO <3Hʩj{Ȗs=WO&cWyԌ϶~( ;T" aI"m(D2&)͐f4wKʗ>G҆z?-uGA r`=a{cŞb(>(>_8WMՄ rp8|Ro)y6fHU̴C_}6Q]rVޝ6{&ub眧;H^YINje}l g[[k>0*v -T6$l'fP)lk[7m,!n4[HLT5pt/)6Vz(ta!IJO=>a ?ʱwV}?*\(36Z|<;LRav;^!r2WYS|Nb6=vBik]֏@bD't8BTY!sp0q\<D<O{%`p8DIB4Er3{U8țNyT!X Lr'B{)uCFˬ+~LiNQeVuZTlx5EiYi!R]ȎLXMk\mkI"@UƆ?9|XJ̛q(BHrjO)<]}AEq8$ZRcVOy / Kbߎ~)3TʲSΰ)z? B _xTA&{2;,;Q }e7[`?A]7 ȯ%vʞ0Ș|Y~.:̴JIp8$SEIq6T%1) lW* (ѱ"8ߪi9T aW/?JzTVoߧӎȷ1+ƹp8')m ܞ[i՝1Q"&/ؼ(̠ }Fv 0:T,ݑCGfRiQζ$']-9˺̼4^ RL?nU՛?)"&RD "DH&R FU"RvQ|mߦؽ#S<1c6E3LD ?fqxr)?i'x{t&.gcT3ބJ׋,H']"R~=WQGi5K'V@@@t2RD RD RD R:)")")")'TUyt:z-R`e8l6(~ff)RA,K(StIYh:n]Z"2"@)"D H) RD "R@i/m@yG9,6=2b)Ķm۶m:3ؾ_s[033>tuu|X[[un`kkA!"R.Jܽ{?~?~}6~xyyAtt4^|f>444ȑ!"R CPPfgg|po3q͛7hiii(U!8ߐ"e?BHH!%**~MM }Ǐ@Bɓs )B!!eeelP[[N,--y333骫?zzz4},r>etX8LGkvnaaM~q8 )R"##qє_u^3!8B Oa kgNq>>SXXg9994^sB 7"D\\]d.G(**hBCCa`;6M E!n?ٻo& %{dcR~?c(T,Wp8잞:iՍ:YH ?(} ?~vW%c wp1q>Aߌ=HRJ'sr yѦvD mr|I[ov0'a2#E.dooB&@}󈌱}ϙSH~e>ޘ/Iiqy=EAD`2/m0)߱cG{~sfm&͛795 Y}vsfͱc9o=3֮]ykSɯn͚֭5kڶdxGŋJÌm/)0޸P(ERiXvJ0/"*GrN=>Q@޽s |~As9YAcȇI5 = )+}rO;?rDX!ytI EoGoxtӅ!/IkH2r_H>=DH߈ ER|Ͳe<-FRgϞ!63Ӟ03 )oa`4I^ׯ_3bS90")pÈdlٲƀ[ȦMȺkIe 1|Yk˜BP$e!쮀t bY;`ȊyX%F-N.d]4$CҢ@f"bl b⳹9Y%G|{Yс6U֕>6q ߆gI%K>*o2{B")m~?3,d+R`FO 7NJABm-NYk~zQ$E&Ecp i7r ER0 .SYO9Yre&%S22U3KLDR/hShkH[Odgn޼|9 BV/:oKBb1U&#)IIP ͙裴)gJ&fI!*yBD].Y6Ǚn4 dRBz3HRA*˸BHJ֠,X%%#) Ǩ2./V5A3ns!)J7M[awo/h;1I!(D{Q Ba{Ry;T?!cݧlHܟ5a<1-@˖7v9+HY~T`CF?txuRE_C>(_N55 3VTtﻈ}#m=|B")mfbŊ H VOLY{]=|l!>ᛣPN$Y%R!DD8|ᅵwW)W$G:Ⱦ+ KJO"ŝ_722.LKb?^ϵy9)J#b5B!"Ł@!V'#"DD5D 1)xy!pc^DU_Δ"lct/Rʽ*6!B))p)N7~D6.^U,ҽ NLSrttTgbݢ)R-!BHHa d+!VЋH-CX9\KKqʞ2aD MuwK4P)Z%eyXC2C!" !DD'⢝9lE,ܻw/_ЫHB4u JzQi(o/D۷o]i9D>x^b jU61zyZG(9n,i4?QUH}I!D"R"RL)$Vο;ѴmI?{S봯jrrvk8cccMRcBuǫe뾎˗/(Kþ&KXӧE맑L{Ò..rk׮Usss\NMࠚ \2#?"RBEDJD a~~fff.p4Iؙq} M9bx'~fsAT4w& sI}C Z]]ma]\/n-K㔨,OO檜_gBeB)!8 nKt/@NRZ}#1Q<*w'쨻j"ED £H@"R)"6jL!y6F y$xJOOcr9{"(XDk-8cL^΁"RD RD RD RD j"@"@"@"@&RD RD RD RD j"@"@"@"@@@@@u]qeYyη> ;;dɖE2/8m۶m۶m=U(5 kȋ̼'͊X,f&bQ$7>yoy~1çFw͛?OjӦyn֬֯_/` D:v(Xrj׮uiϞ=?~ʕ+Bʾ}TbEUTI;wQuU=Kzz6l(+/fp=Owh!6k~6ˡQo@T[)iȥsi1_ FBrzk׮Չ'zj1B7o.ˤID/ ڕ9shС8v%},X@eʔQ5DՒ-ZІ bIq`17rHm۶M$}Ν:uݻ'j=zV:` v)[V n\bXu!^pڵkgAjjL">iԨ$gkb˗/kUJX%u>"أv݉`x#RH9v옘L([jEk׮F%Iw*Ut)aW'L } :֭lٲ:thBlܸQS HˆM A~i4j(q/ $nDaҥڵk*E 11%b.ó 4p]N<)jժ"pƆ蠭AFo^֭11cpN-[kBp(0ǝꅱGၻA"^Y8y{±ׯclg  2)IukɈa,q'\"zh\ ŒBZҢK!A3_ukAk?Ceңn{|Z֠T\,&$pUZnj5k.|s7EC*-G !.!f^ĩԘ~;ߥr1pT~gG`0"ňlBR gHbՋ䆣D)N5^J(!#&w dO={m$_\زe-R`"ߡzI[ěb0LUPDbq F!VM6 qA̙38wEbBc֚"=--!p ϴ*}vq++ mo?T~iK'GjͧbH.-B ]Q>򅓺Èhc z8J'”n8`R@R ĥQ}wrbrY܏v/%$qUXZQ5n 9P͵[0DW3Vž{Gp,w0rX`8pREaf8 šaXp a033 ߓjMOv=W]?'}x1dC3IU ;AE G>o<B1PRcE7\|ǀۃ"^p}3v\1~=?;]sJْٛv(5%84_η7~S[sǵR~D {쏝v!J)k BB!BT)j kK  B!BHBڗDB!HQHB!))*)B!B" yc[ID∏Rl7%/x8O[hDB!$RD׿EHs~%lgff&ocADpH:u{p`<_"#77=GǑKMĸ'ע? :tu5O|c'կ4͛7HB!׃D „ :(#bq@x |WTr"2朋 ol8~P~}Exw,Y֯_o Bk׮!wʲ_|:uDV>6&gϞ s5p`y^x K4hP>|8|bŊVgѢE{5X x8F0 4pW)ofxÜ9s |gvLp1^b3fLxw$vo`=XxB֭"pXM&"B!n )p2b&װ 'N ={9UV?ϡ{#& oiTq_o(   b뮻ž={3ԧ]c@5HBhYֶ᫯27$~1O><5ă@|&ʕ+&z^}հuV&8>… i`)B!BHy'%%| QFB0aI1cXLșx>B R0n r "!ib\ŋӹFD4~HΝ;P@fkU9v옹3!dl8pBG4DY_7k,Ͻp6\㰵3W^Hc2dDB!$R8'8,6'Mt,֌r^1&G:DLjwX2_hok3pZje"eȑ&Xp&LRHa6̮Mɓ'ZEhk DT K8,.\aA|}QgA[}91wGq"VX'ǜOԖk)ߖ6=E=_₈ٴiS'MEEEEEEEEER /~_G.%͗bOOB /6R0Z !Z '>R!zpoVm@zo/=̬NGU?_׽z)@Hb" "@?O-Ca7يrPHY,1 E۽oo@b^σ7}ߏl7pkv;iTDPϯ_jZ12 Rը2PaTD HD pH13313313R233370sjK&;93h Y3/oW,vF߻ Z^Xup*!jα! 1J$D"H|vu$Tp"|?' 19%"p/+8a5xfMOTmQOuK ߔخ<W&pik Uޮ1!bWfGbQۧ9.0xU ΎpVd=p׬000MCQ\HR",&9]j )+jIvD A*@)a7(-6@\?YFrGR+gTMaDKťxV%3)VnyBTL>h wKU#&BvֲA>^σD"H$DJgYW/r’RB"wКFuN$ GC//Bݡ&ܷ;q Lo#8e,:],MiZϦH!)3,0W, I(+AٔJ! NPX6ר<6 \?EMTƄ]:DEAp&ϯ ؤˁL`#(;౬0IN82䨤pVhy,V$)D0FA[MI(W I*/$礱!x$`QkDO,H$D"H|6D_.Zs549o9sXm,+vD쇶=PqPڕS3xX-3񂐊DŽ8k5U.x:N¼bEa6-DbsL anyA1Zi[w RqMҵ@sŜ"K<8/gWnt.ʠ"6NcTSp0,hP].97!) ").eʠDˣQkMqO|_]n6>$JQ2S uS9,hSaɡv菦]7Yu7D"H$))p<v"4'Ŭ)E= (9֞|la}}ZTs%ʯ;uel&l œ sљhzxjag)x`n0rg,.SqaAb!TEO! d+Lُ8/ui‚BYyJqϬJZ]4%U!q]#Wx"':qjJ+ 9[9ag;,-+3933'(,. AA"mvRXA"&ƠrVI ֍SXw]%(p H>,Baa!ϟu],[ F1vX7H%b̙=z4~Ԏm ڠ1cpYKK R6'O&w!r5k@UU$)//>|M,^^477cHRYY,ACC>:>\IA㠸1b/_P(Kmm-JJJq1MCB_\'y5=ڮcn O:3> s񓢧qSevkI_}tI2PqI|@vmFŵ_yΘ^{/2y81G˽Pׯw?qȣ:^b$|0xi=,Ig7h[zF,m ni}ݦ(as:mK߇H$W=܃gy櫯_֭r t߿?LBj*lڴob=C=X8REAv <wu|>jjjsϱ]^x tś7/MC'A[Xx 7bذa޽;!sI~-[=|~_|C 8_ ,Dž~>?$;msdgyB ,k HI#8|$2wA8²JDŽW8{('%ψjFOG{)~l=(=(l!+vk#D"z%AR@7K.AY1MDw(--{rJ^z_)3ŐŢZ$p={D[[ҡZL0K,A8p ur$ى?00i1ȣ>zuVteggc׮]ϝ;gߜsNy &e) %4h_{AD!']q_M!>. 'rUr&«BY6 }aW q9|IQQ C(}VH)2) gF*B =R,dA pem%\;@WJ@.O(s{T{u։s=7B8#\eB|fnyK$pʄM0!<#wnyʬx|F9\!C ѓ1n?`e8`Ep\0g G@S.yi aV. -fSM^eeM 8Bqƛ:eQ(4ǭѱ=Hs(ˋ7Z~y>Huꁱ bEG_o!}CHXw^SCL1ճI;*7f|٩!W\F˘&H)!FF7p 8(.}DRgahH$Zkd,r$PÄÇV[mC"낧B;d !zHQAƉ OPzo_@$Q=G+a$r8uJcrʺ*미3]Dh_Ǝp0欿I0ff =LڷEJG[9*֫Z_0j;y%"( ΂pY[U_ZT;T gs;befk;bkCxW{%\lc!+*A E/O6]41ebpĘqN9U$?:"ey'*k=+\wv׹.9)?z[,\DJAAA L G(#ZA,!+diM ԉc:gHa1ceኀĐ&3Bl7#ȞA*RKxv!€t?5 \8ýxDf]+ QvmQr" K$HXhsw¶A0XX!hWuHbJPDh"A~DHYq,Rl$ʹnh3nC{F{-^56#઄5KŃBmҳj]C OKcA!^H59״u(ٚC6 =O8Zhګb_Ÿ~i c}[0__'^Na-:1U}Z]~U8)tGts/u!ȟݻRBGwC'AoLa0i|fM?{&81 Dmٻ'ʻoGμ=S.:%F}-\%з$&lZ,7+twƴCvϼ.F|%Y ا_*2w_̽곷-s]8-?*' Rev+"l8(%m%l42g̲ma.#xϑD$)=C?9:<=#O.nJRa=ĖnJEPXds ѯ_?ʺAbfpߌ!4:;;#]'bpɀ`p-cOBؐqAI ˓!'q݌Yu{o7Qgs5/LӖ& >#J*}].mX^xVqE'/z=6kRH~G{'Ϣhn\5D\>oX Ox#6-_xC9Xڻ8Ϙ6C4J =:a=%qQaٗwEAAAA/>96#l10 M1n  we͈0 {jMQN pC?'ȞNtWl? A"jUO %T[{., ~ ¶Ϧf xor)Ê;5>8DJ(|nw_c 䖨Sy/ _v6R\Gc_n>uޯcħ^)H+!^H?]3Y=_͔Tzig]7mou>0myt>[ص"(Cڄ'4B|ϑS4r* }^D6.(("E;QyqLd^Z?w_k(f3TqU(1b\ĈϼTe+3y5}A`&IܮDiƏM>mvP?v-DL%xVfX,:UV;6^iG(f@0`yfޚ% L_խ0Oqށ6&}fj.UTyMhs>3if"!Mof߄8~yf W+Ln+@"R^XS<+DŽWʸ5ӫ.=)\wNL?x3i\@໣ǟ}%1Gc!Yp폚&r#'Cp<\?lgN:DA>'"f1dy*C`Ƚ|T˭|iӦHC%FB-D%\,"A W.K^?(Sw>~wD|'!/SH}WNyqgLO%(}>>}!7);FE< ,7*&}4šǶo 7}eFX-E]=fXtΏ|0F|r_wKzl` fh>Dϊ!-W1y `L5H7HfSf9?.r.Jp&\H6<9$81&'] DŊ `g~~r‚$Z{Ėtnfp=Fܼ]aA_҃SBkC鼫$!t)aafѦ:yp Hy\ ?WE4Nޤ3)߉6T 6{N _*h@lO3;v1Cecq0#ʰWY}DYCG:13lۀ-{䘻7l?A >'nğcC,p#ٞW =\:(MJ&BpK]ꩍNoUH!,q.h+ϴA_O5mhFCuN y5P{yX$H_F bX‡!hz!-F;D%N !Iyګ?xXYN| QHp+$V_O0h7E8mR&Uy#]BVSE<Xw]Swc7Ta^/V\Ƙ/Ưs13~LЁ\Yu6"3;"FpUfbp( fNb "0Hp#y((W%A,"F^:1٤v iQ!CZ_wyw Rmq'3!r>A P?38|M s.G㠨|Ii6r8-ޱ7ONW[=9NQ]pS>Lxa?wI<"w@%B{SXALmP8bh"c[c颅/:dyNH{#f=L&Ca >s:[a6A' PFZHCZ̏#rwPI³ ܩ]\l$;gu(#eqwuKm' \^Gx;A I3a}e!x ZѤ!,"R_ ͊M^ 6+?pdkf?-ՏX$|S~]L(Z<f΄y?^D33KeKs_+e$R`Eaљ络 =rB6@@@@w#RD RD RD Rn>|〗:7N{n%⨚@QwS\^Q/ݻw4>߷o߾,Oz?l꒿,0Nk޽ox8;Sި;:KNQ-ٳ{m p uGQ#'Eʮ]1h|}6aС@QwuH=R͛7o_|zKSި;:K6I(k{>8ꍺCꑺ>YlܸO%_Ǐ_?+NŌϯO7|ۅMkꅺ~#ꉩ+3ꍺCKņ )Uo~qlwzh؃WzBꅺ~#ꉺ3ꍺCꑺ>r?jN=Le욮nfsWʧ'C"ffffffa;Ψ7Gdчje5To׆M_>rȵN'Bⷘ^:+:ި;V.#kPTKׄu8رcO|ifz/ze?-H9JׂͱR:3oǎ133333[j7-QWsgP0J%,Ƌn'07V-} N1ZIENDB`docker-1.10.3/docs/installation/images/ocean_gen_token.png000066400000000000000000001234631267010174400236000ustar00rootroot00000000000000PNG  IHDR3(LqIDATxp m۶mFiڶm^c<^ٙ=_hz/NG~$ ;fKj_vMmΜ91c1toxw 둬6EffDjjjLmժU{1c1T61Rݡ1NO131Fnݺ}+**_TTt@jeeeNt1c1j 5F7j4#B&u$&уXXXx|pͿȑG1c1dX,v:C͡PE$jTԨYRA3戌>HA~m'o;%ꯟ61c1ƘBmO8) Qj5DV~&3CFR$/ht9_?Ɍ1c1218_?wZDM6Q t̨ltF礩z*@kn\PPp|G~~c1c)En7fgg_[o]k]o?[M6QUtʙE VFSRROpH}ЙKA꫏.^K>dɒ1c16McѢEϛ7s?רI&jo_&}YpKu^2e-FQY.j)fT< h4zDVQ]0C1:̋`8: sGfnMF9F͢vQäc&g_>w ;=fTU,jI]p߉%:]oOmS*j1uo#2wx2Fߓ)\jE]r;+vbk9l{{?xbfi3|b7`rN0}7j7~?KTf|=lJִ@F3 1C ڷڠ`Ye|ch֑^l?/b ?ŭlmIOZm-Hۂx-j4@0by*ߦ}Ju6V{jwYJG-4a~Ymh)Un0b33Gb*⯇DaV#sWٯ Z mhkҵmEkXͬ+i]Ͱ]AtVFbF!CDGTn_To֬ޟV㤯!f1Èbf$Z:Я;}6_(^BX^goۻmk[5v&mp0}j@0bfBw@%m؞yc۶m۶ƴUmڨs=8Uϻ"vTfbe˕{W.Ui̡L6 <ȯk=Sm \XU yMWg 3X]- k͚5Zt/_Ç+##CԤ={(==]ofׯ_a?a_^WF۷O(9ҺuT__dMMMŋ:v[RR>!xCS5hzU4ܐšpA=ԩǾ: 8\M4bֽҤ@[aa6mڤٳg(::P-@BD LTO;WwNY~sf8f֭2 ~LfҦdM55I;Zx6E% joKài6'M?M-LK/kٲe5kzHVWWQFʕ+z3Є TP@9߀]_:wYUΝUYY73kК>}zwڵ3g[xx8:tGx&ӧͩ7gkSL%7+jZ=FߟS1EpU괷V4u7ns| ~?,0[7Ozowp6&2] M 鸡o7ֿҍ 3 $ʼnAm5`EQa&3)%cL)=$N[ʆkE77mRXV< ®ۈvJqEbݡx^\=eDv b z((:"[n>pW*;;[XNNFNVp6oެa3F5۾}9"5bx׮]ٳ6lؠLc 8t]JMME^I';wQ^<87tc,S[l`ǵ.СC*++3ηw^Q)))':ׯu;3|:uJԸ:zU") 0܉':CCR5Fb2EhspNٳ8ͥnHO\)lSF j[5h3'^[U/0C{ۀ;uBd]ʚrؿ^0"9]rV^^Nе6=pݻ! YnAꭏJk*HMP }z!?n}[￞c;l+cvOylHi,ns@㟖8]:YXWjT 5Dz|>nF7GC ^ГO>A/~i{>a&3<2t_:/1% <,1WǙz|i3r[d @LBTE .Dd"b4tPkƌ2d#lҤIlʕ+Dy60sM<BDNw`C;v,ZbfU߾}ɧVZ脦MF:(6 Ѭŋ3%|L9"ԫW/L0D;DSfΜ(DkĈڶm@p D9+y*f!:tDZ)Q=zx~/X82ՠAM:UÇ'S$Q ;y$ XQ^( qRl*ejI_զj}E>3OSfɓ~;@~&I-'Msj*o}}ܣ)Kr>had[vn|avFX0i*Y눾p,)A\_.Pns#s>q~Gwr/8͛7O> o8CV7:"3AFiF$Mdc>NRLi.UqOJj-M=(ViF.S3!ٗ "F2"qh0adFcWp@l f`"C H7y`;漼<ʇt;AD5E'իWm΅!Q/_F!<2rgDuD ѐ^D(uF>71/Nt k >o ȥIFԇ>Fݵ+_ /-kVzYsߖ{Z/O˼ztC~k`^xqљ 0>0GhD!|q7M"=Ч0<o 1 ! YnEϘk׈m;Q]4(/}~<բ%:oJ}^q|>銨&zն6(o쨌eǎ-<u0pc5b+`Fӥ ,>,eڹVkUe}ɢ(K.((Vk# ҰTq~%Z^c14䌝;]YZ]~E7㍖F+yN2ѡ`͆.# qoҥ jo#lwY ÿ?>\.Ƶy=#:uJz}?݂̘'ү`A4Ė{ 2U1\C"_-s&ã*9KuDJh׎xI{5PbԦ2$U`խ l Dz\ڊ+ Q66ζ5WKwg7:^_'MUA 3W^ ihRGeqIK,J+q#X_%O,:П3 !I^&`0{3ئy w'r6p&A P7®?]JN^v0عO*,*4e Ŵ ʶ[s5 7>XnTjc)igq9'뽖'"= fB0[٤Gyo 3Ƞwxv5o˅VS^eW4fضRJ.%:`@Ą)%DFJIS:NFa!L8?@q <8A 9`0*!I"q 뙃I޹&GpNA;m^PG|3EyC_Ըs#4YOԇO0q]CG| ȏ+&:@Pn;@H??QHپ`<ʍNNӝ}wu7/f@dZ@8EWrZ"mfy r3g|@Q꘶ Rn@Kwu 8 #pXy |79mp~ssm۶G϶1m۶mۈ\1v:락˨Vf~3!%%K3w0Cw TQJ7Vy/8c_M~d!Ia+v(*]PWc;MLO706o 4CՊ5rfT8X [ yU$|n0De 3.@ex+G 0ѻDNJT,m\9R@lЂmp+Qt ?O1O_ahRf&aRdpb3( C;_R჉Yx{\&Qt-aYj %CT ^$X\x" ':%E5Ԕ>bh>/rEbAL{ScCYoE:#%%%dwj_ker@4\:pڽ5ΥSa,nQfA&58<Dx?)X'ѵ7M+ƉK|~8򘀁[]Ryy]/e8.z6syB7WvM|U-0Xݫ @fnT$ l@- JECQ:4DWf.>3*\ia̐B*·shʈkMтLl A]G0=6YG!Ǧ')GDo3.PjtA#uokCad#Z N[HBnYaHtI#x5n \())) 3b:AϖN4}9ѻ5xN7לrw4d`r(c'ev|7٤c7NgՆHeU9?lr"J+C\/Gņlg|xi9Av..fME /<\xI9Ϣ $2ӯ q/KÎ$|~qi&FIH(1ۗ; 2-J_R6wOv1刳a`qr J-V̉/bRcaG5&@O)ص4*4;4nvhA{ q0.:q.ܔ)eff033sffuHg>mر9󓬴녗D7]I$ÌÌajl$". Ik&`3%6o䯗FRѻ@Chb$IaƇa0_iQ$I 3> 3I$fa0#I$Ìa0c$IR9c 3MfÌ$Iwb ;ȿ/[E6oLxcjÌ$I6󶭎V6ǪXxرcú0#I$II$I2H$I*0#I$II$I2H$I2H$IaF$I 3$I1H$IaF$I 3$I 3$Id$I$Ì$Ib 3$Id$I$Ì$I$Ì$I$f$I$0#I$Ì$I$f$I$0#I$0#I$II$I2H$I*0#I$II$I2H$I2H$IaF$I 3$I 3I$II$I2H$I2f$I$f$I$0{x饗G[ob1r駟"k׮Ps[l/n?>͋ǨQbݺuQ~-}޽{z7.>Xtidٴin#>$IEť^x`x㍱vW{/3gƐ!C V^s̉m۶e&MW^y%oqUWE] ѣG<r : 1v8㤓N=xhӦM\pq 7_/c=6?|~=\jUs9ѷoرcGH$f89}m۶Ua:a„8蠃SN O\sM̝;7RW_![oK. O˗yq[n*SN5kp\yT8ҿ#ҿVaׯaF$fmƍ0:2c>@a 3+V: +jcƌ +~H_3 !N>x"EnSOG~i 4(N;4UfoטxoG}?Rvŋp}_fuXpa`Dt kwĈ; w/}a^ufub-tcx?^裏+W-_^zfM$f|P;`B9)bs}0I!Lر#þ2-'0wuCsu/RKXjH8.^=o~6eʔ ~gssN_~9D`wyM^wIk aeFB 圸\׽{ ^ys?yoޟ{?Bpx=@x/r]Wc$x]Mk[00MLϞ= 3&I 3|O.LWpry!K, oh~? 3|MQܹsg N"CW+:Rm<9SO=5=fLP1QΚ5+X8 -pEQ?<3M7DkfF#eh׮eUl,(t2lV(}Pv]w~j:׃U8f 9{sb:L8'0/sIb;>گJ}j'u >ޛN  ?27>)5I;$-KYs_  {駃{EGa\tBjP8^czrG^[B#U5pW2!H `;`$I2P0( ͵ [rBQ֜ PMCA % ÷>Eh`hos0N] Qm<{ҽ[桸 tdGs ѱBxxE)s0-ʱp_k> sfh 8"+U΁l ҒHz?PBCދ:GW|s $I&NCk(fF(} 1We}[yW4b`վĵ}W~MI$0SY%I$f 3$I 3,7̼aI1H$IFK7dusUXE"I$0O'J ;k]=pGҦq@kᄊm۶m&cDc۞RwUwU{dqι5b1 31 bb 3@ 3@̜ 1 3 f? P*T(E0 0 c?eYF@$}aaޒ./ {+b?>fi,gx񴵻']sw5>;jkCOow3>fJ)bN] E,%<l.l<C^꾥.ld6?M^]]PՊc%{}[ǝc{ 0- 5*__{wu%w܃N7㇙IZ vnֿeaހsf:= ~dvMew%EIe/ˋsw¾8u9=+Hf$i^;OXw;l]G.`բǮ'3ZkZ mIc1b 1C̀1C̀1C̀ f f3 f3 13 1b 1b @3L$*m!s[bzɲa&i9h͚5JTUbQ---U=?~\suqEf}bt. .tQSb \.jd.kp ?3913lٳ:;;5gEQ䦲jnnֆ y=nΝP13::6c̶mj]AZttt'{M{?ۡog5k,N۳deȲu33 1CXX `۫͛oBɑK6̜3.X Ü<|ޒel5F#wɀ0[f$_|Eϐ6}D?P|[oaHx[?:<6}RFIIIII fH0j)> `8 f1gO!^Tvրg)F?zÒ'<CZg}9ou`&3 f%^z;Ky[6mj֭[~jժ橧 u]׌iiaQS_S{nO>VN9_lksg@(aO`Fw˼<Dqgynq+X3ksM7}@FΣ>:_s^< (oݗb͹瞛˃MY ʉl(U}!skka5#̃VxOy\[oյr0 ]v "$%% o5F (|r`y<_ f$%`FN %Ny ;BxC(f9.qL߻wo`>DžbQ`#<~4Baz%S&  e!3 2oPT0̰:dRn=sOrM.,st'r|y1e0coSW]uUoX7`s#QakK&d^h (' g4`W74#d#@j7>{zz9x%xLX(Xt-?pXGE'E\( Hv-bM@ %%% a,^};aá3)`{rF*q8^;eO>J.|~<|ģ/x$8oaf\+2"ǏQ HVx4rT-+MO{[7Gy9{=%I0PRexj+*[MP.1К<1fM,FBZ?Oe&:hh0񏌌`=<f5 }5sg<γRuRC#Zx _ 4ƙ,Ak``S3)sAzɀ+fܒ(1 !))i Yd~xGE!fxLg$31 +xU{!)W5 < ooD !}_Q` H#!?3 (@Yث9@ ̀g&2*;b !++x.p%I04`ņz"`&3KE9O/,75nB%XŜR,@ D۝ (pmkŽ-#_!UCbzBe_ؗP ~$N0^Ux(7ŵJtdfEbqΞGH!T?BfyG`hm'ύNJZ(0(PDYz](puo3a9%#0S,<=aڼݳuvp(3ӧ0$%%pD 0~w}<$x ʻ i@0CV-*,?ś|1 0CQP³(`F{@=| h`PZ<b >ms` 0㚸?s$ˬk'@3@ f9HJ03ӄQ1;EÍ?!t.4KN].PM*Q5+í 1_BVEY(ڛa: # Oa6A-Vxk) ]'-A&SٟӢxF*isҧ6سfutZikFi,`urքy7@ UB7)7Qka?tCqC݌SRR[G.Í(>Bn@Ml 0PtƯ";2! &rf o%q0/|X{<ν] .<=@Y;dMmyzuf! 6%M f a8}&aK8f:(m,"tFy[1!̬ɠvY,3(W0|Bg#H_@%g 8&P܍e kmxT0r޸ \cyޔZu>3>_^z˜8E(gj L)m$-D(͹W2GQfef yDWD9!W %r"sm'B 3͓J)ąIȑ3)ʌQfZ-Ңl}&"2H],R' EbSLi: cOԆHף6!"2Ga"BYQfDDDo>/[/~?PfYk0 Nä4&۩?})3"""'u?d&3*_|&Jssm(3"ʌ(3 Q&4E'sreFQfDQf6 ?7++{og({(/-%cfc+eFQfDDD16;Ebwc34uEsƺ_ʌ2#̈2c; h2&#E|[/oz7PfeFDD$5jku1Jd}y&l$/+ƈ3Ssx2̈(3"""tkem?(xEąuhb!`GLig[0FeFD&4z%?Z[ #:Z9g',eFDDDU5Q-HKbP>y싦nS'HZI2̈(3""" rRyxv%},'e_|YoBe,{gGei6`ef1Lffffff?333d&jf8TǏ5ѡVuVޛ7K{|>/sի2nܸa_~U类/Afopw"S{BJ#&x/ΙW"(%/ñƑúr)K;t6'?/z(֏WХy 隣 !v3H f(JXmwn|eg! L4hҥ0awﮖ-[jʔ)1clxb8qB:~,X:h $q֭%͛:vݻw5l0&q:]} uWQ=R&'+S6_l4ٟv"fue~$ kz]Lx?v}$l59: ɏ9&N2{_,O0x4!xw',mT=|^n-m7B2!q6jիILǾ!5hToASNudB>g,$$hGbGz({昻`s};g̚5KG70qv~(c?TC?(WXL9_*-l?C%-3Nvq~5wΣ;+[nĥ/NRNy.}+Ǥ]74f{@l=cA1r~(axhEQWُQM>n>R$]4CξT[W0*;U%-nr"^g奴pjƫ:m\K)5>';n4lJ~>+.DXw}=$Oق0R06Oz?+ǓzvQ3sztCH"D}5֚ #,YlBNAyS9Qz)* oT w2*wR7(q?.Ur|%ΐ7{{(>b+֧[);EZ΋3Жs8>>&_)$3! 6nܨ#F֭[V5jUn._,pM>`}6m8 @3gjE8y&O aOGmVgk .l@tPO<(-7K.iر$_'Np~a%̛7bfmDhv ɳeE}ŊDnMMpɒ%v,oͶ]F-`ĺ5n8lSWRN&MmkoN9hõvR\4`ݾ}СZn;v4:Ć8CzYs5kB?1~IAСC5rHq8lL]o )3gιn?A)]׮]_kݺusa_( I,KFNhDYߛ(Aj)qGs8iHZ, ϧt6=EKN$VHClbͱb%1wzB Տ1chmlVGC*x ʸhwPC /න1`B]^u9-pM^z!e(Ϫ@gT-ԗk/cHͯr|HW:'5[q}:vM|<*f*݊ʶ{յ'Ujj%LW|w_3Q _9sxUHt*$3! 6 @=tan$j$<5'! 6$~$Ē$]k+ _2Nf-lmL`:M<^4$c 츙!7D#U@XObZ2/֖ٝ+izWGu˴=oCj׬L??m72 Ȍi/?럖FTgV5.*(S(/>{R\kClQ`!3m :r?2dٚ(jew!i7Z{w&ET $>H qڒ)J^b*Wz6C(sⰲN){r7w[GʿxB[\9g-p.}}76MlӇbz[r׫TL%C2#$3$\ݷo_4699OS_(!i;[{I1 @۷o'QAbΓmthC2MBO_OIldW^~߰_0&0n?'%wĀ?H 1@!?*!Anժ=J$$lP" PPD]G\`Lƍ2s43Kh%b `y mÑt`QO6Kb\6&rm _Ei~(5@V rOdύ"r$/>[!;lָ 2켕mン:TV\JCf(*ԞZp> "ICfV9gSVRA,:Rmq!Cd޽ser5kg؁$Qosnwcrl^'J~0Wg,1*?/iLE}M<t,$3!>\JP%@.V(֯ÕX<[-9~*^>m؀OK͗I -Qod&$3!B2Cʲ蛿g}g%(U=/|߾}dơ&$$A}%e?82C_ݩHfH8(iSs,XbR'(OTd%x 32&f·L@$ #OP\P. Hc/CDasBy&A%L0`%- rc5Ƶ$-/ԯ1iz#>\g k;16 %kXfv)]0ߗG]_'3fՐe ee}ĆU?/D( lõZlÉڜUX2Lʖ1אoLT3f^霬ڂz?,%3>{T'3fx4xW*3eDHfB4ugWrĢ֭$Ikr(s򈒫ٵ96_4$8f[بĎŌd&DHfxLy31ZASorH$$n'i&%w"Iw d%2C {Oߨ3}cIu3R,ʀHI)DU!f؀̰V' 8AfP H}0~gPchC 0s 1vQ[%ܘ) $vT;g>HQCI}y`QG_6Hf(Ճ 툽+bN8@i`Sƅ2?bIr͖=ε; ^o7:;9OqJG*'Rv*䳓Y>efN]٣HQ7Y_"0!0h+.PKiUG 3l{[Nz/KH>_3 }R& >\~gc@V 6vmED5usKPZzJPfc}lm1D؈TpMT$'l UK%V(;+`/9IElj?b5QljuHfB2NfvEX/$$iC !t F(}FrIRJp "\8:'Җϴ;dOIQM 4OI*IX$3!: D12vOYWzX{VX('A2} PD\9X-wÿ v+}T~XAx~0VGޘF>#1?Μwڡ !H E%񡱘3n>G Tڗ;٨ ^oRAAu1;:!6bᅔjQD:jN⓬Y& wxɬ::vb'I̬I8 k"AN,q`G)- yKf+HO߼ }7!V_?s L[g߯gkp6gHR\= Φ!7lHao4#^5 C b=SIs)8T`!͜P?RvI O!~1#v0ڵECf8SH&B2_B2S$- $\-Ν;+Wo>o=*w.^my'wsOdMb^`X$X' XN Na"C`$LԒJUKwu]Nq]ڜ1؏HtU ,4xmX C󡭹`V6;ېqIf  Z rQkzOg+0%6]y'У3p1Ƃg!^(':(ai`QiBZh#jN#4͖̀ ! ]*#53@OK1*Fy@JM%~uiQJxܗ$}(“nPOk$sNքU#Dq T*{+4$3 AA|6C/I)=r;@w%g^fr7Pe$P/h@f5-#{B0G+jŻ~@@c E=8ro4ł9YcS2s亵vD]ǣX[[YGC2CA |턖?8=' _f(45PB4J,$;]p$h(y#@lu-AO (Y}*5Ywh=!G"Х|(2G/E$#ðXs*DH^CO*z!!Hf (WXJoށcV40n'424d˞4̣u;UÓh %OxJsY4fQȊ,J/o5Φuᑛ^kw_ ifͯ[ mH%Q9Wb}}1df/r _6K B<[;+;xZ;M00GeYn0c1Xz(eITB"uYUmxH~-ÈTkf\#ڒn]z7&,"!Hfx1|BP(g( hG^d O/fc܍e&F$ }r}1}aR: $2 %pi]("!WjJk2캵Ѧc͛7iZ3m&!Hflb8`R(  xAʍN6), K7ss5Q5عF!)(tmKqE:u xtjd HfHf( Bl벓}8c`1*dfɀ:ZN̠ΓR2f%X #:xN0:n RpTȊfjuk=miv.>}= A2C2CP(Jr?{x&-3< v. ;IpáZ*cB.[0̨`-8 ;ar#ژhk@;N</Ν; A2C2CP(Jq8*k-zM%"ژ_űcpY$۪d !P( w\A4=CM=YE1q/^o_=T3ddBP(Kn5{g8nhc}ى|{.l&!?$3 B A#3Ɖ'pndP( BkfZ3o8w;ڢdoP( B콎2sQ9sn"!Z BP(`o N3vG;$3́dBP(?fVG(8F$x 5we-x53Ls P( p DF:Nf4@I8%ؕ/ @2CP(J9ïXKFeĵD(8l09Pf }A"K 84Y£`Q蝲lu^C4!](_1B%e@bTeGT=@/ 0bO,'jB '5s˟]C .:rdRw0u @'šQOj.`{a:z,X քg%)HnJW+?W :,$-RMBzF"sx ^Y2]ݨ`귡FS^,mDi6 $ }+'P4,^#ڒ\+󇎟Qw̱cpY$I0ؐ$3$3 B{!;yY^bł43̘_0>d&LgH{\yAr%w͡0oZWtYOQQ~X2 D"J=B,D4@8 )yޣYKR^*_uцD[^&Fe)f'Oŋ| A2C2CP( ln!4SQ#JHTGSJ' *x%0"%341B BWCQ,(X'3}ZnIQoQ54ipsa9;d:> #܏BO@Ę-RܲҨ1_-A!q΃wTӸ/4ɧ$3$3 BUn\ǽkT]n5ͪj(QQ ײ#1Pj,3:>dFʎ\R ׮rŤ4J|KP+"pU.Q^C(Ge >5ʣ_#ڎցێ`0SN!#N7cKfBB2C2CP(.p{#*#IQ7dG}$v̫1ٹBU2(JAxQb,ilBtȢ]f q]~Ə84 dF]/lXH{u~7LQȌh+l{v557t?ˢ״QddBP(y w9ٙ-lmb@ZZKY8D{ٱtm1o"34-G5ATV` oT ks+( y`*3BĬKr02oz) >#_œf+ jJ>/^kdDmȆ#GӃh47o8P/!;8lVe~Xe5NduP(@zc#DaZDsP#jIMȎJܡKFXR nWh2ϫ]D 'ԚZ_=Xn9Z+`}]c!1caqc( \#v_kEV=e>qnҘi\o7OJ +q/v5]мÀ,5X4lei4x^có`1y9r`-8 g3Y:{xCX}s{-L{h M!W^D]xҿw}W^ڊy A&@0n-†dtipvᷜ#0cu*_vr{U_EN:^UVui|n&{24*I~5luw]F>c7Ab&pqY{wc'{?o|b = d4r*vzps{mM_{qL3{S}waMkD|Z3>2&ۯjVg?,9MKC6 ڂhmU:xP-[0wwwǼr VWW:}.3A8v{z~ bXƱn\7ɩ#\1mX6k|}~Tzrd1pocjK:H%fa~!'\^q뱵zF[Oo `c3/W(lEoW8ND6ML͸t;4WV0/(3[Q×7Y-Ce]_ϻpG 읅sI .ywC1Mȋ]yOf&3-=Ͻs償 EƸA:sQH́8ի׶rj+*)p؞vX|/cd͚P[f6%wioY{'ʶou3obr/ZE;{Z(tVYCZevXZZu+vXۻ|=imƫ놈VH>LjQպ*.nۇ'H:ѥ߻n 9pu`[Yvc ~"6 ]}‖SE=`&ׇaJwLZ 1F<8RY&pR=yEa᧘`~G@q@ c_?@ ۃR٦w4PނbOG:Êf g}o.,aNT$u;u/E`Z9'QJfD&!!֮]klpI1d&hA O\fdfYÑF+ivNhбj;p lGYƜr l@0liG,;+lIBā={Sn/I*OX+Wrrrm[x`--[g9oEE!Ȳ1v+j+Wf5|{ {^+-`CEji\k8,6[CVvߺL۾LOAb `{(=袢ޕW-ۜiN\͛;RRSΜ9ql4ݎ4u vV2jݺ {|e8Eb7CzXEl#w(+/wg<OMKμb Q|aYfi)"g,++Ǒܼ|INAdy*o ƅm6of7mv3|vQ,5'6ov2#9^,<{jdQqSO H=?<Ʌimvj[kd4J,PQ؊v+YKGsc=Tj-_fV\s̚5wK61};JʮA7/Hwv_jWN)e4cO٭F߃\]# >~?Bf+Ph8pCr{w`9V/;*5[1wg~oKgόIII\4 H$/-h̦m6cLѩ/uzKĩlRr;nϞܔ œ2 GTw jNmSԙ!]~S z|^ x,Y\J 4ީ p$"?^n>$)C񓧬t?7ZqI 8ߒڙ  {mHDv(c=yRw4*}U-hcƩ1~G͛7666Ff4ԄF#My S8]/ Gf8s[1Y@4cHY+U`B)77/RZPuQp8'3^-F@Ҙw|~ӶT֎.TA@ӧIq@ph(+~DIYӏ/pi;4ҥM;J6|v&{P5P#H HsƳ5ߥ靥Z4:wfүx"nx&N)+( #SPC}STR=#3UODې Oy;m6d`" Qx8Q/Y8R0wٯ޲GœQH)cMf—׃1//")i5`.n&.  %͍6Y{HeUSH#.;;׏ŒQP&- s(("F/ط~nO@$Bjf+l3J1u4&ΔYYZy׳~ȑꯣ&HLAKeӋPqٿ0]2/dCQ 73_9ynIF ipbKQbRNhMZ 01֬̒bpƼ 8.@Ac $M@ ! 2ØjoݹEz@/$U$N!INbѹrNʛz7ߥѰ-}@h\B0kNQSOĎtAǓ^ɌX~dvYwo$4.8=:A3tLJj}haTBl@:,[| bbJd_Ayo8hA^`jRNAOɥm,#NH$#@K-Őjx2?IWD_UlBbcM[/!F&5O5J,O9Kx2!{2ÿyuѠ^SDžb!Sǖd3," 6OPp5|'_)Cm ?_UUe7nܰ1 ڗ2x#Z,? R#N?Gׅ"z1иkKbOiM6klkyhZz. q@\6FQXKB]TGI;{`Bj6&9Hl(F+e{قogNUb<9x+ommuie>, 3A[@f Gtb,Мs p?iywBm<~z dBdҖ.̙BJZG\ ~\RЃ< 㔙*E0&`N|Z ~=4-R&k@$[7hq~7_d3r}G] k pzZFj%}/~VvP1 W^Aڥ#N<IDD:_ISlUj'u*I(Ԃ.RwEh>MKb AΑڅ_ wI?>$3;z(jˆM?OX˯ >=ȗP{ӗS`= #cfe43`lfZӋ_KRF%R4eW{{ )>4?ԔW6; =qv6p4?d] ߫YωϬoJܹs08>~2d=Go c_v݋9޿,{{-|M.ݏyw/{jg ޻ό/ibe(dFmJfsGOݜ￞s }g?;y{_@e1#BI IWoPRSXTt t8Ln/ܥ3Xښ}JM`V&3cccd())R7n388&1Jf>|2P(>DdjһB0_/רoۑ_Pt4Mt4l@0abvfO|ae4@Fw!}-}eьAJ #cBJFtvv2'f-I5%3Jf '59c5OVdQ 5.bFN3/Ɓ4LI£X^І}A7Q0ѧf8$[D`l0[l/^rGahhMb̨)Q( AjX\EHXЅHlΞ$O&-=ʼn hbqҌoF%(&`.gBT3gyVg~Sfh[M`RRRlHisI>̽{XIXIb̨=ɌBP(Zy Ml؀%],Ħ [l$!1! v1dD/đ? )RbxzwfO›oaX {W{x)Y郻F.eFa߲ ' K3fΝhjjseF/^mƃ*QSS2P( *6,$6/^6WVjS\9$%"->f4:piQ30U fO܅SkMu؃^QD_<!4 y198\kqM=p/bxe|^#FftOL7qs^RSSEygg~Q_v IQ`fgg1::4T2dFP( %6,|ciBͷߋjcسO6ܸy7YٹHIMGbR 2QffD+F~1B#a{޸G+_$Z|(7cQlHtrnn.[neދ(/ñcD}aw^<~%mT`166dFMMɌBP(LKq!7?C{q^@Ch۵յؾ 7nFAa1sDaUdߟ+, f QBzN8g/70i/} ^>È)y198\kɚ F >d&L3*Lˏh#MVP1*.yyye(++C}}=vލC… ʛy}y왐qx)QSS2P( b܂(7q{[ g$Y9$CeñsqN5ה{lhGs~!+'NVރ3υbb^𰄌͹UŲ̏)yP( B  Ԭ8 S#ys}- YWwIс؉8r$9/>P6@{Շm$Cͭolfii5{x/p,\ss 56pO]?ϽX__ ^IT\lBUų̌)@P( dsrX"opy9 ]u9:=e(4d5 q󎄷MQ~O5{x/p,\ss 56pOF?!d`.pX8YAATLcO 9##nUU 3fia$;Yz|]z}]~u# c.˜$I2faH$3 3$Iƌ1{3$Iƌ1ƌ$I1c4cF$ɘ1f$I2fcF$3ƌ11s{qRI$G1/n8ʠY>M$IGcƘGȓG^$Iz#h٘=0fcccƘc1f0f0fcc1f0fcc1f0fccc0fc`` ctt4:*B\<dt:Fi@~?}>9777VVVZM?sss?}tLMM]X|}|9xhh(xVI1} yݹcc?߿_}v`^ˣGʍu###;?x Z>fϟ?~|Ny7>07?.u||c鼬|.}!ӧe Yܽ{7<1Jyyf{MO_?ߦ٘8d?=yD8m|=3 Ho6a56}IG93:0?u#oħq2y>+lYsx{" ;F&U,Ӽr*8A96ItėƗ\ۓ}q63O0Ə}!7;'2&펾^%\y+cpwO&t;@`k|/!O|L6xP۬[ctGh$v6ЛNXo ,ʓN4sF5ש?֒w#+GAwvx0x.}!F4FGȤpMrۏэ/Oe⎧\d.bM߰a63y3Xΰ-晳63i?kffkGxW6}~6ֳoy؅ÖyG0['/^̈#]V^4<{:ߴi73ٴS1ꫯ`@IqS\ Dn"u8 Znlx(xC%*X!_n B 5;i9lKovi')B TԔ>(h;zRaN Z P<)x?h#`XCɡ;b|u&;E1 M/?U3ӫtA3*֜ |E{57_NѱF#'`Z]s@{ڃl_2f'3~d3{(wzH#K5*wΟbf>Q{j%ڈM]?/i֨ ~5AP{K.<_&d?Б>I}SC|DtO氕N/?z?_٫Ac5Slk5Oc#CXә@;W#yj-ď5k.كjSڴi73ٴFPp!MW ElW4]S8P(x 8,kf2Bp?ϋ͌1(aYE);'{-LeHf_D?k_Β1A 80ؠ:?3tETyx&5R|*>jq+'H]mf4sŌl)F  ZsFO+ѓk% 5Dk3C׈C9 9'?sL@c ~53?1l}`Xyz'n"Ŋ >lfV[}'יS#ć \ٻ 8>rH9V_uFL"&OL믿ƃr@Gw a3ܡ_OQjf4_ZÁ,d?jܚI'i91tݞޫ3r52W̝[tc'{binfv3i73/.PŃk*|͉F@S@ Xf|@`T8v2ԁ B)H6^]tA?9FdD :V@M@ {!Yw;LeSw%ɋZ; \# folA|'],FOB/2fƼIJCV SkG^Dg9O f~~wM\\d;Y3{sr_{j]}m63FfR#1 ʵolYC.b7>t/ԙ?^su>4z)=Ym.W}iQM>>/v"_ ߴi73?n~[zkPpWz5E1U*d֙ST|{eAao3|-`|wJ` 0th騈_;2鍯":,;;_@ fcDGuy'uT`K>/^7]lB~fm."G 1DwzJ9'K,l4A?_ү*b~|#o[["Mw<_{&S:Rg}6ʉО/^o~`<_W5\h7~ye/=J|T:Zcύ>b+O3 Mq]}a߷7ЉH(-fm3>^|:s5klt/'GsTGX+kߟb|0lg^~q!9QoWY ݦM̦f&zTdmpnfv(Әk7KQsti;&̿+|6\?ߓ3?~◝%:hV@xq~{h_teǷә4uiZ7Mׯ߁̆,MHcu2IiC&c|vTgZ,7L!e Y4FZc۶m= e1L.-k!7ic&;]rNiTP$o%pVXqِ7i1iOd.{5æ~CkcIZd;*WfIENDB`docker-1.10.3/docs/installation/images/ocean_save_token.png000066400000000000000000001445001267010174400237600ustar00rootroot00000000000000PNG  IHDR({"IDATx$YFkm۶mm{fֶm6zj?ۅWoّ|s"NӞ$g򖷜{$]3̅bGFF.-pc5nTq$СC].V,/a×\2vǎJl#n 7F2Vr\/aGɞ={./[*.w+篘dWN pS-XC%đr/pM syAxjBCCCڽ{uÇ_7<~t:}n7 pS-n 7 |a_.C 7ݷo-B&|m×1[vn íp{$#Ey *.?0;w;ܾoԭfp;!n íp{A"nI\,^/㱋Q%}=!"""""bRpKDMq7[pD-r!IʣKjFdn-&q],8a7tsʽ _h\Tr^AjiuuUfSg n 7 qK~v$8a~9TJ_:`C@cQɉdT*g n 7 2PTnge'G􌯍Q.Raz+?PG (^V5h8NIDOʘ~RXցŚMNjZ]ͮ7?M~Fk2_5}G|k/S(H(ɲtoRԹ?+pXo~_mn2F=}vH`V3n}{Ӻ{M?W(H(/!'|/jRfS!P:ݞb<]vfQ.jvɊ@@ P@!PWz!P2~vIۡ4[c>[?krD~I(H(bz~1{('!2<%v;^֝ߗ޾WƘkN,+%}qf723M(H]6y={V.\h ׹Aֈdži/Byy%P\QjG.j94Ze1K WWjUy/#DTRY,++>B 8կb4xpX"U-|EĐ!C4c BguM[l㄰L+**REE,<(o޽[[%%AC sjʔ)n~‘@c„ F/lN"GXʐkuγOY ey2p/ h߿_o:wh(kÆ z u|ݺup5-_\>x-Xyo]rJZ/AqkzlO"ez:8O+_fu:WR@bСZhغuz QЙ3g ڸq֯_P\\F %`G'Nի!?L |%$I&iܸqZt)d[ȨtB~+ O#yf͚5KV›uiDh!!!B̌l<#eի(C!}+=*Ώ3!]ବ,Ah:t| SddձcGAy6 T0 G@F q1B (H!-S!n\7o#$qӦM%(*2))I<ua;ZD oݹsĚD; rߵkF '"mŊ1b Ègʏ<ɑXEQ7m۶_ ! U=ܧ>{KJt IkOK.Vj ѯ?({E/P[MR1Yoivɢ iv@GNHHӴ)L?vR{.\IY5>)v>rHStsd*fPmfiK f'a ߍ{$J/&ߘYԖkL~}܈jYy#Z"jrDJruGP|#謾[s݇h1Ew`}x>elW֥i6NKNMoOsרik6SgpX@!{? < `\XCFi%I>$mH cB͈=ry(B}FyF K~ 4D3AiCl!c+?ό8 r1Zͽ̙C@KDs@ 7uGDWuh8E`xRD/餍])s-o DF9yFLiZIslB w$SN4"P t&ںukt6㡠sa;jB91FHaڏ!VBlPyaAl!ٹs% Sla/<&ؤ 1хgO-5(WalM~:qC -[RԻ#PY )P ;`C=#@SAyÅ+Pjө3#P?vὨSIjQ'#HxPrvoQ%)<Ǎ ƫSkn"( u 4%r~3ڈ(0#-tgL?!:} =3e)jc s=P& XʬonZ Qte>CFl© ~Zp`mc=d^j :~,\]Ƴbxw/~DUʚ+ŕ:i?Rt4 JLOKX Rh&D2 YtW+*K ^H5d7?g$ddx io1SJۅ3!^YN=:=(!eJ⏲?ŋ}F/7vG8d Btqgzw%T5%ٯ{㿢[Kՠ@EΘFؠ3FP3(}b+bPWl@tFzN.؈h'V#b0PEsE;ū 95>6f4lu!/<(YIpcL8~ÐS7ƳNr_t#(Xxaeo #PL0e<(}1uI:yx@a-ӿ>ߤ`8hR95^,f5FF|7rJH{R2;)Rz#m^N[F'5i< ,2?<WetN&6c/hə{(:obo**QiU|0^#9iƫroFn@X8:# <+Ȟ%uHPǎDC4 PB4!m̈ K<3[烆ȳyq@­ ڗ("yE2I' فu"vm >`'S(eApY 0006ĢxADgJ9 (“ xxq- !wqSPz_X+'e^ֿ{u6㲆*We ~Hr^SI_3R ʐ7װ^OO~zBv!-Y/)ġα+Y/@?h7…Hb?S5-fUtfzW*w3qC{gm5⠃q4QyT\[@ar/Ly+a*56wJ;uݥګnD򢳊Q;5,:]v=[{qsσX܂G3 MB,5"?S+S{ŸFA#viDf(Cv1|Mׯ>fR0Xo]%7f1.^2J5P^Xbu~y\Z}qL~3< juQ|^<4N@0RV|eB 3 mQ6xtΏQ{׵F?'QQxG]p /uk #˾~m/a W/OIpYJ[EW$AI'M/DkTm)`{u>ůfgDHs"u˔Ks;3T]6$4H+ٔ+[41֡XQl_.A/SG$T(:e\mN䅵(z#aև]9azȘѠmcxAF .\l]e`v? ].paE9Uh_{XqcSlKcZ{!OG 1›Yo U`s"KJԮkiO= 34bWoOU͉Zq.Ͼ=LiE,7#X… .\p j*%pJGNu!ݯ4RxUМY{"[XeHMj6(i@G'X0&]KK4zo6\,0XG<:nۂEcp:S6q… .\\zPv_P[WR+ epm )2e9ьY1Q΍KKƊ^vlEl->3=fuԶ۰lFm#mZ嚾{XNuO$Ew~.|(QTZE9Wr+NBiu#P E P@(|"PX[qAhNՅ8]uuuјf`oW)iL{T;uTߊǑN*JJJ/VWWI...)7.ƄE_ޞ^/c%QP[RUPP(&I \@@@@ @@@@ @@@@@<{@GƉ'>ACCC\r%Ν;䟲ؽ{wlذ!o/^gqᨨ\եߓ]ǎcҏ:u*r8ydܼy3r P6m;vΝ;ǁ"zc֬Y,i=sLZaIO4 F۷>}D.]{?ijPZZC vŧ~SԖǼyۘ;SNѭ[8p`L:5n_q?~k?%Ǔɓ'G-Yxq… cܸqqƍ;Ѵv! E̖ԭ[,X 7oW4餮k׮.-D0^~hٲeݻ7?#Zjm۶$x0aB7,_?bŊӿ}iݺu|wD+?;(9HާWӢ5{L[!PRL@ӧghiN ѣcΝT,Z( ӧOJA>V_}S='ƀf͚9P Immm(wTG@ԇ?x*(yVy2r(zhru- *J|1;cdm~ NSʓqZ+;r5L@{r%"sb85bK/^o/BxdVCLż)D2Mz7/;m7_|1Ѷ)b ( ASO=uj@V**bx q<,< /ozL{g_tE}MwZY^PMl~0 J0<0 ޯZkK UwӞ={tݣ>*xoMf[&'xtM-bp9#Ah_\9cޅ/'n{~Oǜ@}g' ̱@ߙ ;9Q9B##DqB DZB`obpiA@9+#؟o&pj/4p b7aa0`c6BOF"vd3\> `9CSVFf%""""bSME+j!b|RY@9ټ@!G a^ ]XI0*r `2NĔ]o"LzC!&Qql%G 5!h(Y,lB>}|'4E2eJ5!<'Eِ?gV$P """(ʏd/! )ZfW$k BY*l[ZޑD}?x3?C lB~pJ=dMi1YL2n=O!@neyoVB@,(h+o~'Dػw-P@%y2)&l˟ADiR%Y`n̶C\I(T d3IY*صM;ae@VDݴ )f~fj]Ӕz߉es$y"uJDDD$PFe#V>!%DV AUh~16tEFHبq['FAlK@ͧa.*ʼ߅P !l᲌l7$[kbj;NȴXy@xsY\n'&`mI^22 VŎZCVckmk,_1sP (N ;3{@,kc,fB̌0W22JmsD%r2Q /$ .#/NVD-{1_j# 8޿°?̇[-+P[#+YbF`| Ye9(6|\m_C(:SL(';V>X&2w>pGӱ[A(Z;ky(_@E. #U2ʜʀi'/rfI(Vɵi6Fc e՟c.`7%I6\dWd̯ʵ P)?Z^? =`EV'og16wbʪ9t32!pdg a>h'$Pd@l*]?_&Ηx(ٽ{ 7b\'`a0﮾j;))p=̜NX| 62J+S"k^;]Ș ‰&z 3"0r|VhGY[T},ƴؔ@/BP0od`/{>SOM{Dlڼw?kB7D5qeab#ɘ%P""""@ouYX7Tdyi%AVn~/‚@9bP34F<9hYq.C(EkJ|t ͟ry5+q7Kme}lwvi3i=Z&ivKck\`\@}7ñ} 2zFJ%"""""@]w@ڵk P믏6$3#jV@HDDDDDD%"""""" JDDDDD$P""""""(@HlHDDDDDD$P""""""@JDDDDDD%"""""( HDDDDDD$P""""" JDDDDD_# `.H8?Ͻ3333333c )>cfd hfw*仠uTCڑAgUY*0 0 cY  Lz?= Mp.wJrohCAۏJYHBبQH7W {ن1z_ ~بo72-澁R\>'~d,ӞcnnveL+aZk& 3C}g|^y.@fLwvTXK}gTWmܪѫE{d(ϦzG/@99˲+/_=O;{CRypok˅ww@O_J>~Yuz_w d?O~t<~Zu> Xq7gwP?_hv$LȸՑ_e+etE*J98}]\Cb}pbJs8@鹞4nFokZhpnu'+d(G$kz kp*WӕB/VPG#wU%*@ˡqP0Fnh뫅^OO;|#3[\1ss):X>(POSӹZdɽRRAlV 2Ӹ*@Z7Ut+bFV>bH=Cyuק4.^b$kzt?kV&>4O s_'~\L֝f݇B(ʅ\i54&ryc Lz.Eer3:fznjXM}-F_7Ņui[vw3޶wivɼuZ 2c aW3 />r)ֆJhM>jd>/TiDmgW"i[6zv0~|Ѯ+XFDmFMǧnCn/VJYĸ}2SB 1;YN5V-eq?tc|j_ pڇ@@e P M  @ P( P6-( P@!P@@|@@ypqkddDIvZMׯ_*ob1ؘ]@yXɓ'Քei@۶ms4(J{a}C g+C333]yVV411q辩ߴVcPZu~\ÇYzԔ=]&''ݿYiLnh܍EBqd .2333333330.33p vEY$s ~Jb骿M73Lt䑇FI 7o^z5{` /b$,ZYxqq7:_~i_| sE;;uc̙G}^[`A[/P~[Șzc$(D"HY+y$AIҰ$l~x뭷?_|EK/5(XNH ҁt̟? Eȵ,&3fh=1ͫ ᅴ-KzP$DbuG<$A ,oMљLIP{=DԘ^{=0/^AEJ AB_tHH$IPHa@ x{$XpNm=(yt2/@A{c5zqe};$g so6h7ni[l3Âqf7o{&H$A# JxߠG! a4stޫF꾉 OuN$IPH%Ś.̙32h#>EOh߭\|w! VZdf8^ /ԡ^3&֣wSO}&߻ۺ|婏yv^[f:oق;ϳSGFk"oc"g9~mDb a3%$(IPVI$ZV;Pr)«Bqh A\iQh)HM|]:x=X jmz2޹~3|j3ušW_@"h&n6q{ᇛ o\''P  rqw]W{@Wލ.xFO ;b^uꩧ}~#(_"X9?H 9$(9nZ$(ĪIP(߈Az;8*KoEoBBl(HpWg Dn_f$xiXY@fC(u0SMIPx}(֋PԵrBPxv7@N8S޾5> "߁ b[8x<Oףoʶp&ʾ(Xx1# 5nr!(#yG] 7\y>Hŀk$:H_\#kS$➺쮼JsYs~IPIP奞1%^xM1T &eRXG l;Biu][23w^Xr@r+i A*Va_ȉ(>P%źk(m`N!uBTQ< y`$)߉'|X["KzHHIaT8dų,AQg: A|vFS_ɁjMg\N$(# :A D"1uLv2y,YoGXZcABE943nHg\#LK+/eVͻyahtߜ(%/$%#:[C>2E(G]'i6WOv*5uT^$(ū_bSܱb AwuGh 6e$9*AEY)yJPWxr_C 6Iw_Qכ߭a+ E@9!(]@T;҃"kƌ֡AAT& F%(BmDH$ B7/㜝{Jp]yfG}R ezfظ,5G e=a0RJ+vSP} .Iw>  u B&IH$ ur駓o(9p\aN$'!Q!Rddbro\Fzy!B~^Ľ~GuaċGiYS E~;Yv2Իz^^cWKi`lc2RxH?w" J"H$V)$AB#^=4Mvoܿ\!_Ԙ,OHy CW&㼎hů axTH 7(W Mo6H2H$D ,|{98P\.r܋9\(.r\((.rv|GI wd,x|E˯.ׯv/x9N楚|~)WI{y%S[OLP"Vܑ7)k1{t\.}#GhԨQ9r/~>\*kwwwݽ{WK.sŸ{Rbql! s$m%@XHI?bu#',qGr\((P}:cx<%K?V>}h>|X͚5##tmiӦ1fesl&M˗/+G*|k֬~r?9C .$Lr/p=!t5 ̙3 !G:u"wTȍ+cUciu`ߗTcV_u^˼Oiȁ~x\Vaz؃:QC%|NƝZzWϵBP/sJY?taNQJ֫Ŋ?ev&KS(@5@ vj~-=r0j&Hk*hz2)M?Q_]~}f՞tFJi)v5(.(0xB,Iڵ+Lu놱P0~aiĈy&b0b(1jC}q EgYҥ `es1B=z(xr&S;vԺuKƈ 70E`Æ hܸqڶm/399s攀7M={$/B#NhСG^D1n %7o_| r}35 "=~1VΝ_OƇ[G>_9ƛv@_G9wCO'X… smQ!71W**6@q-ruVb#&ྡྷZ\q cOlڸq#**= p%@{mƌ@;pMBJXfGF-Wǘ f[^,Ō_ d 4ښP,[jQ~gSUX0mVunrAN?38oPX J9!gYm0(!9Vr"U?N3㘷 [$3U4i}x$;&v2~[Z @H=" ]~VguUTVƴ"CP\. ?&2qgL1q6+iLx8IFxJͱpY~=㊉{:C@ Oa!P`bUP&koLlL:fc \џ^zaX1ߜ91Pq qJq>9ba<&;u#K<]DGWZUgDn&@TF[ŀ2zkrZobtm680 jU>Ud rҌP\.VPxҏ1 3~bt1y2d&/svD*S88 y\HT5 3(pD>N{JTa- &N5G5)IAOmBm;F< ( ̴8xh!X0W :p\4Fu޸q# ;QGAqhn#Տ&`C~;KOe|/>>Gs}h 帐?8W-PPcgAaa}9@A|ô(djL偙Uf[$5B|/+(_{j:+h≴+ SÄߖ)cz[@zc [0Hqs>02Р* _ ǖ־j)7PtY$CQ9\.W#L|P0`# JSn}&i^؇1r=/`c<cS{<TvI&H00,F(P քߩ 6* P0A\@pxj^n9BS&b՜sHT";  C0vb 62&ƑƑ1#{r_r\{c}mDW\$6b%}),g FNrX%)*I9} H8H&& 1,”DeJ}.?YAfO_})~j03&zJ-j(L?iے*U y'v2B}v'u`&,p} &Bw`U'<-TS$ h ۷UH>\&Yl&0֫0U㺄>.kYTO;/ 4HdeXrPP\P U\0H2( n6Ǔi6s O1c0z ޕ1/y 7 i;!:E'wb|0UL;+62t&rb*ܓ?_;x}0TL @k6,-^^g'Vbhqoվ5EiXۭm=W X^3S*GgVIK볶NM^"O@S~{r\((.5TOwaT!0a]w_SO*T3xqh3 3/3naffoszFߐsVTwչm0.i:T6;d̴"`bb"}\+R/)3::q!)S; k%]<~W["W[ZXkltDAQP E.ŠĬ4`v"21ڲ󭮼9Z" """5}EBkNt'Ք"iM5 ‡$M*MMM1YfEDDEA5jEػwo+^XNG<#k.(,bG***xB_ee%Flt|mm- ֣466kMMMq~X+iUU\k{uuuNsM,("" J R[ " J&A5,#Nb f?c0pΝ;_ ɥ8yd a㣏>Zp}&&&sŸĘ_sQ姟~]v?544ĶmREg2/3ʋcĉqرcGTWW#,(x_`ٓ_~C8߿Jv>gB|lCR6((%sRݗ\Q:Z)r͵ ((lw}g[or Oӓk6y8uT /}''~a{q]w%7x#*‹/[l7|3%+ؖI?}p>|IEPHةL D#}ϩ tCb.ƙj<"(TOw!^Ăg@Ucm d-"y$6ld(훵6eeQPHLZAp$!yG(^{50+m` 2n|Cb`Q "&px2)UqyLBP.w@1hEOΔ:0>|ycAx z8TL&'1~ܟ|u>2 /'WR6((Ӻrc3YT5b|4rM0 " ꠊ<gjQJ* a_D`!=2LaBR$|cCY` zdFMqwc@<>"@5X,;(""uAaZ̿Ӻ*{YPj]?Y kNiuS((COe,[U|K1EEŤ$ػ *(;"CYMӈb/@@("P(V(\oW#ҵ bM (!P^R~M6ڵd).-LvoGlys@(!PR|& (@AN) [s P %duu5;wnymm- P %N^XXȗ/_^@SWVVãG(< ȑ#(U>|x> P ֠Ět=|09sf\m?J)~Y@A P@@@@(@  @(&(E P( P@A0A@(@ PMc1uYJ]c1fn{@A P@@@@(@  @(&(E P( P@ALH) {6}iv3vMmv_}:_z .3333333.^ff ó 89I:Vs2UujƶnVllpi?d!SX0tЗuVGmyZoalK6a@ǐY ߨr^9,Ob+~:{ B&?Z{ ˾;@xww9\{gI ߜ^Yol{?A Y+'\/{l!W_Ϭd{Sq+-wܾ{SuO1te@Z5'~gt|s0HK2Xcj٧=_\ nHOP~ESST~}[w_IR~m,OKR|.Q⛯UKti2ֱ2/qבqȃ\G9(| #'FLJĄؘ0y;19).4oݵkܹr<9NcN։mqvha6t#묓e0N8GVDȵ6:T .p:W6=?Z+~|8(`hsH~VoWǔ_}zlo!B.@=z2< '|%FwΦ~~ޱc_78҈6x/E;iԔv5Yr*T5QMc̕xܐf;PiʄIiF3'(9AS\t[T?HʫH;p C2|}wO"(&_١FNs222"0:, \G{!oxxa OАl۶t<s`'9M(lM ݁usuc\)p3ۦђOuB@r~Wߵ?k ->;L{]s#ρ[NgUO;:s22PΡwmUℑLRGQ~'.Ik r[j,oo.d,6e֊Zw%o79~uA u&Hjoe4 krYTn$j3'(9A*5.H%Տޕx]r? `߿7trU1:Gt@appЁ;Zd`:nbN/MԦ`OV1y;?( ~hkQ~wǿ*ˡK#qW*__"+7UŹ6e:=2To,RoʌwD~=#ÖG2Wjcw#9Q%~0c#yjHN&Tj] *њ zvlEoSdݡ#Fi8A=ʪGȼD$B)W_=P{X68ـWDuᵻ~C[D `W4i2nA|#N4 9X|hluŜ苒u9cC,?\ 9׽WjsU,.$\fQYK,fj dfFE aX"ufJѶTzhé~I5d쬍 HTy벜>^OdµR4+r TA29@t#$YI0AV75;?.oaoNPryKq̟ӎµJ{aTf*aB@/=oGvJ鵵 Eg32 uZ zxMp h&>hWHo999".h`(zm&pսN9?pP}9[^: `S-QD\R}p;D>DN?+N CmA 4<x,$=CՑ-GvlCY&hNڨO;S0+'(`z{jJ$?12pzSp*/+ΨaI!K fDkPe#)VE0D*Q#V]EdT_a͈=ὢ VRԐ ixo*H>=[&&"(,=My0Jd 2CpkTnʛU9hI$ݓuLw\nbuYhX" v}c|Qk2dꨕֈLE6~bJe9ה5:߰#>~Y svG0"xQ3%y!0RiG:I Bo;`}AbGW?H 5a$KWv;pŁNMQ}?V5룓OY%nX%psşO#ɷ/ٹ@m(UHJUCp~b_ViS-(n3bJ-:!(NPtJ_&AxyeGPf?4}32[j@[QN\|=ʵ$@oXzOMy% R$}Sx?HK RĆL&(׼S0]smDj]k;ll&(X`c:?^v ϛ煶4Z:xN>E>G|rt/J-4 $#D6y 9@F=ynS^d bgpmPtsSP[x$SmO(Ђ}^oGˆ2=i:,G[W(%|<~\Z/#v:|yu'?&/H?gl,Țj45T[A'CV ?6D׋!L-6fxvwA+~"?(z Fh\4#Wb{f\4V0eiilZ6nsZN {Dz%Y*ܼ#(iq}s9A |c=I|5(KQdON~?@Lr|ǘˌ[GֵX)y锵QuY@gXՋo5cSD : "9\w8_Io h]!O٩~dgl8_{pI}&ڶ'}M?/:zA?J~9QGmԝH'-`}6rm llEF]򄝴Yɻ5KD0鶏YsUY;uHXqHG ~g*Ĵbpk˅,N} HMojvU8m^laA95 CީFk$*c{1pC~NkU~)"')Ȓ*܆.[ݞywk<`dy]lHw^貳2IyrI{QfUd _짊mlFn(FCϳ^ JPSRZ̟})6$O@Ҿsq;J$2=w|pP.~s;g_ÿ҆6%MDxAmc 5}S2&m3<4; ޑ'm7wMĜw[ ]R@^Sk`zgw6j l[oز|yo}r'|SO) j;EfRo:GPk?~+˰ J\ët mbAqc E7n.-Tc[Evˊlɴd<YYDwwL|My[5b: -P 0qݚ``.1 w?b&X&J#|i/O0v?F>U?>}J5LD '2EpuiFƜa-Aʌ,Jw+3|8ߜwοX[™7fHF3*.6r=N5ġC0A> $? eIk1-Ȋ,P8\_q{wA:B?-TR7hְ.GPܹCGGVAA6m[ΛD̆DY]/ 9.\,A|Ӟ`0$L$x{ =Zx,cmE!> GEEV^+Wmڵ%%G  5gb/_8sēFLXMf6{}^HNI-_"3h|?q@۳OHrh'bٲeXt)gYY6mڄ={# >b?Y$)TD@y;$]zgъBkJ!x`ShtmϝBr X?o_arhbɒ%(..'aڊSN:2k/ ݕ׾qPВ"@ycuN9n5qDM] g{`;HJDM-}p>/f&---0a04sr" B`A|?CyJq@y0)jR&Vwњum`zZRW0)& w M"LrAAH'e%100x׼2MVfq*>׳_~NR<&Vƍ]arI& nߐ]`}sK/VB$z/@oRϞ?&;'&"LAhjuX_8WBQuϝD}wS1S*"R)L>rpDA!Me.]>?zXB`pp<+"P^s L 6t7_Eނy)t 3syI}}+LS0Awd>>ھ K{\w-k$ބt7|]E&@&MMM8qD AA˪,{f`X&76ƍPU5EL/-Rt*gmWAaRRRV0ٽ{+LܹDAX]{f__+E,,Ș~#J KΥi;K* $% S% =na&_SV!'y~qq|'qϦ@b.j &AOgJc??<`/X9_ʦ՜?#0Ih)t֝l$$BV|]x "ho,| Z}Ɨ=9 EPAAҦ>Xq<3㵭LR?{`!_888_zڌ$=d׺ B䖆8E  )@= <F,܃43qԆ2DK0BU:F; ԹVa $y: %P|4w l YاDK+`܊-0ac]5Fw'b壿~sxAX{DVA圧"  ㌫}R0۷[ Aq׊8œ6~Ʒ9F+.T@?k`vw!zoPwbMfTp'zdvJW) aM5c" Ҡ-!a@w;`vs!|JŸ8hjV[>V;׆` gUPPHc:Y*o3Urў_71p8* 558~8C^NatǏ3PfApL%hllč7Ċ"  ].ʚb0M)T¬uzp]>߫5aOh{vAkE׼Yfh|< !|pDN Fy0hX+ZME0y0uz3ODvk~0Gθ/-Duu5:;;ѣ)/AAֱ#P+?4s !u,u- /q,sQ, *&,h+wG4@5&P+ V }O?W ׻q#lڕ+J_8}bq,PDgAp9cW֭[UU+  L(q[a} uFQ~$亾Ҳj"H%R C:Q FƄtvwCN?8z# c"fjhwA ^<-11=r1^7x: .^[kKKA+JWW"dAX<")6pSїY\g;U [;#P٣IWzi&z i%tcjQ1ø~c]m\ T 9pS|bP2i}HiQ}> m\@$ уDEhQ5ŅhiiT  HM.]{e= OƲwpݹeB(zw9'R:$oO3\zf90nNr3bDuҡKnK*1(}~-4|.{CK}W-Z4Ay7(}Yl'K{}G]l'YS@ի+[+A֠t?2ۛ&Mro;EQRtÐ9o޼XH P^R`ٳ3jժTݾ.j)6(3+V(>9s_ Ƌ\jJtf 6lPlPpxYn],~x@cW|dݶQ &!rlBNeǶmJ_1t/$F)]]3c8jƌנQȕ4( 7|$]~љMWkxijڧl PX%=Y2N%Ftk6Ǘ|o^f9q䇧ub.#~a?}crڇMƍG2 ~xˣ*ā>gMƙ+k?v9 8C>e}ؚkHo<9~G#<"oI?UMH_qo|MH>X!ы__~e:Ycdl.(Gf{*.p|_hߒ+5i E.Y:ܓ䑝̯Ii]9=[}z#] WZ[e_uO帜xuC[.r7^zIA8P w@̙3IEzCW\, s@FaV 59@"%b& + 5;P]9kͣ?=3~r"~2.H W ^M-|* I֛5Q|O'<'; +ѝ '$ӘYl}b[Ϭ_i;?bE'j4`X5vxL':*pcإ%~ H7X줃-^o V(&!ؕ f@_7`1g ;<%wmX >X# ďP $< @ɵߘO9yŁ.!抱!4vȲG|Ҝ[r>uX@ D&2!~dGY=5\c~KYP u~$s|t2^ cHHL/AlHnnvjؔn9>'b`&WC|-J6K E͋%kn"o}7]Vmr:{zFge0[m>tF]WP~CMލhRnXfuŁ_a-! "+D9 HP`i B5K:|WXmPSn4Q;mGٕIU r; 4n#po؋إQC[ ;> (. 8@xWq!3X 5H>y7ܨ)g#g DA =`SeK ˱qr1# 5'lPR9+Ybf_0>z %7TrFb]9=D :[rDö4l:;P8DWbcuxC? #]|̗_ ZQnJC$r3{4*N -bøx <3cpl6(|AaCr|`_A=6X^7(l\W _u.[?gˠu:iǂBY# ڿo7.z+w?W*hpse`rҡOUwJF^һxX]j9:h0w rQۘg̽oC)m{Hgw=~mS^,hgQ6 dn-CWoAN~lw^8!. HNGB#)/jܳƽKQyfAUt( I#c&(pV.7 yEC0ŧ*)l^$?ﮌ>e6vQm7ȄlWL`PO9 'n,g[(3 >P7_ƛk,喊wU$VNrTSo/w?c|cQ'r<9f?m3orc %@,8񫸹[r G Jn-ۼU Oy?9aM&ƹ* JYi<)}INƊIL쭉N 8HHwӬ?W r82G'WKćr)XQ)ƹMk/(7c$=}7'įIVjckE޲l: S=Yie]OօX;Mi^n}b?J!^ĝwi/}iTB;fel[֬3{`Ok׷xi!v~?X:Omjo0ߞ?D]W_hkϱw4(Q[܃| ;xlsN{8_ oß$v{H^sy٠yQpA1΢PTR,eF<4Ky #2`#h(+`x +n`L3 Ǯ<',![腟m@@ЀV:0p\Eo@&90ǿu𦋿t)W#[>s @3T~`#@'2o#JNv6e7_WSC8`*gtm x#rGer5D4/d%>Kc 6<>9LDt>;@Exx(>0Sel\\͈oBirb]}I֨O3xэ7ֶK  @Heȁz4Ģ&$G om4)?`7H'vZ+i܎ ]̒#O$r>t@b(FTBjkc3itx4#a8,qvf8yC8ď b,P?2rGg;WtUD!MssrZ:] hs]*voЃ@z(@"E{#M}ANcN7i½FPwO#v\uq AX~ Z5dJ55,kBO# "Mjݤ5i.1y#=JFA.?PP B]+|8 "RW{, &B& ](A&<@,A OrDDaxW'w 2a!sBV)P A>C9Bj= "^8rCmMEF&sd?Wn)lٲ===8p9\DAچLJAPaϑ>saZP(G{< J5c^9 1^Ɠ9&}cqV(t~cO so5![_J9uYG_](e`i3w#Ak\Cn{OS׋Pr %HzZBMnyBݓa`aH?ʆ眛%%!pI'kPv_*c2e7doe;  ˜?(j:=S>.L tzo~RAAޭO3fUX-xfK3nF@U-xiB("uI0fyFXpѫh'Aia ֵiЪLG( TKqTܸLN}r= ˜3Y+z<󛄹pxfӰ2IX6b2S[O·(V?\/JӨuoeNI4o nd-V猪qV(Ave6G}+;ox8FnV~RAsu77߼-»Յ>B lm  _;m6&Z9A =Յ)dtPy_嬖Ჺ,#j8T)֙QuZz=,P.<'g_79)`vnK.P߷AvzrPޡ*>d<#m0Р JOMF?@!( Wײ?^TK'EA"@/"(yу xMTz~3mXAb-,[&NG#z=IJ%y?a78Zx+crw! ;i{¥z-LwɾMAAB ʫDpeCi@a/~ > '5BTCi.'K ~ RT 1~v;! CJ $:'m۶A4:0 b~~wltKr//V"mM$Nȸ 144)ܹsG*w RfX<\f8w Q(pQwo8^fB^y9r܃dOAiئ0Ƨ`fqT,anߋ!^%J##[}y*Gkg}ߓDAwK/QmVP~dA쨉wsN8pJ ݈8A)(jۯ)%crmD!̡AA俋 ] #C$NRT/1;Ԥi0 F@18b(Hp8;h zk$B*y{ |Dw;Bk]Jq(_|A]="p!OY]Ҿy)oB۰U v؁;Eĉ QTp-,|V)'*t^UBѳ`#]Nk&3/X;6Dg0fqpZpkFCCojΌy ;TrַLW{<<~% :9~8م4cAyty"oWJ`^ھe6p/g] AnȾH,过p PbT F u$_c,."I筣 {#So#쯉nkåKCͱ {F ST=1W!Xg ټMek #%_|_dd,N֮@aB%t~Ӿ NAADݻwqE|GNKBXKTߙUS,Z\B%c`E UÄkqBe,Hz(G;3^{wg 33;Clj2qر^UiI[n%uWw/gzuNwfHpRmu=ZK'@|Ep$8_{o~i U~*?T.W?dѩ rEykHBP(]Iy\tT=yl-OB};6 /ABj4p8UPPE!^ڜ@4e̙(wJ n~CpI1i:( Bsx)zYdLT3Y/Ǥf~p_ @?:mwSdp͕JC9?~!eO,]9!~Qi].o^<ŗ|+9G;d)yD"Äj#F&QOM BB] ܹsΉ |٘G'eybMM@=ӏ d*Yd¹,ޡu84(o۶Mϟoܡ.'S}cI4uƊ%Tߑ=1qGarC~ ( B!_r]S=N&O,eyoܑێhdi6V{%!g=Yꦽ;l JQQz~gz9J4Ç'aB13'{yr $arQކy?_r t.yO "Ϥ̔^f:tm#6mڴiӦ$'idcpѢE6ej}0#±&TÇ79c,+"$[ %M8\֍̃0/cGNfP(tOG#Cz~'_rEŽd1Ê 9dlGxr˞tH>IUƪEZ],-kkuziۻCڏ4JWwG<{PfN:/31.!j]M1$}-j.߁x/x?xOxߠ+ BC }s۷O֮]kO2/,,4xLVfep;wK'THKRi;\ `?^XI)NfE6t=rD"gܑ?z>_!<_ #Kn/J\Hw<C{ť2ۓ9^D$~MsfԲfH%S'7GW/P( Jmo߾]eܹ2~x$//ܧ+ˊ7UNI}L7NSy!DlMCK"s69xnj#eeek Nqc‹d7C&1:V)$'I$D;)?ZN{ !_W~xx~~qqy0r7SFxG0\ff'7GWgwGɬJ?hAXJ۷o ϟ?j*\ڵKm&6mlQ* V\aϟOԉ"=9=ާ_gc\ġy1?ik|+ BPJS|_n*E8qQ\S`wpuAĀ:8kq H`J j%4z ώe`̟uX'eݬ~ >~{PF ӈ{<~t`0`Nqe_ N7HH"+?{s<{O?G8Ǹ<~U( By,62}ʋ/LXugCuAZZszpB!Fu6Dѷ*h¢0 Q+"1b9GZF0?0?%`̟ue:@Q( BPt 29YCFV!, %ӜTWxb9GZFXà `Vm, 0Z ]x2b;Jut )^Kaϟ. .k{ȌhKݙ+ =EFzxW@@(@(@(@(@(V(((((8k PjwfoMo[,H{A~(ꏮ?>?Yfs-Hs=Z\&YJ|S;u]񕕕_XZ#͑h r]k݋@C*N&k;R_2bi4G# Il *ka@9Kk9iȦ@9ylvU}}}~:1bi4G# I,СC~}?z>瀵C-/XZ#͑HE$imkfnIL&'[,H{A"iɶ4|e fcczya&`b/r[vK+iDki4G# OJl[ZZڑC))\rرa1/Hմz=];P_nAf?gfffffVH3i4E"Hs= i4I$xm-伯^a)[r^Xxvm㩞<1o2YZ!͐vh P"mHk9iH$m8RZ\anMiy$}=㙙 i4DZ"MHc58I@ɶ|)9Ēamׯns.œ 333333뽐vHC%i4FZ#㤵IMGRrWTM,a?~xfffffffHC(I[0Is.ɇPIŌcd?Yq5FameD\LIENDB`docker-1.10.3/docs/installation/images/ocean_token_create.png000066400000000000000000001574621267010174400243000ustar00rootroot00000000000000PNG  IHDR HxIDATx$iE׶Y۶7۶m[mmcm['blTwq3{o|< 0C{jw;oT t23;uܹeff..]zFիDDDDDtye{3 C4$(;|gsΕ#%&&z""b􈈈aVWvs22΅aKAW/Or… /{I+\]"""""b׳/W~WWWWWW˂@XXAAjAӸ0--]ݐrwsFFF'Keue<ߘ~}TWWv B$\,2TrssU)A]*{Ad{2~[輝B$1hA{&tŋ׉8r޼yw,Xn~CDDDDRY3=xm٣f͚5tڴizNnEPsHNN>;XvB_' >}#SN}j/L8 &ꏯ!""""btdǍL||qqq[oTWWWpqOW7l5 .t'iAۋT :Ю!euuuuVRHIhzk# JBțEj"hA[ :"Jn鑒$2=A0px޿K0euuu-o\ޠK$5lKr^KIp{>ͧ7pxAx\;@_uuu=Jn٬i?S958$nn^=~[gfJKKY~~m߾[XXhGcp)/ wW9I]@=C%򧉉kYoҵU Zm쏻¨g{١PYYi 466ZII544Z]P^NGI ݺ A`? ZA6$<-NhIXW\g؋YjO}}Õ?kni P>EV?u%A[|&g6w96';,mff+j P@IXRo} yU}샕kkڛ,aTy~ko-Rb [$P?(F2KیlRm;Fklʺf[ھ:nLǪ(hA|g:{y_cnBvCJ b B`Ci/ ·=>nr&VZMCKw_g۟o1W٠yܷR$PJ푿i*@\v,&+k VUlr?Y܇ l|^9>j9h sJm%( HI$[Unpr-im ZMx/yvr?)Kc>|=~Yh$ %6_l!! ,YW??L5RZJ\eeXL; :e _wwww#x$wK KCSnC{9\xՇVwW]DƔr QmS-//O%ry)((:i‡FG>>(mrssi^),,t[G_wK{pny?5m19{;v}c~);55Om,'N͛eF.*k.innYd_^Zeƌvk&7 Gt 6z%&LpReҥlӧOott$m~.]: I#H¦r- >& ݐ)s+MD]7wJ(Ս#:MtkRyt~k䡪YξF#}$Ç1cuLF7ZHyADmM8I=O)))KD!G>}̃Ni`>h_i#&s|zMMIC9p@H|gh򢢢 ~@ {S_#u۶m2rH)--E_~= ={[ע9s戎s1z.o R&!OηĴh"Hddd+xp...N/SNu f ?~\:uꄣ4>>^fΜRf+}С t7.s H3EF71X9-xA# ]WIɸ3$2ZƟ.kq3\0DL/) ˏȼ2p#%T>Cw{πetV/F>K?H>ӣݒ<.\(\~YrJҥ(>v%GeHzY.4pQz'ȕ.+Ӥ˪4Zb;WO2p:Mf 8<+VPׯ^@(pcǎ 4Yfɸq;!!\yU:4i{Ro\dرD8pDDDj\hѣNfϞmy]˃ݵx,!Zjk/޴i*$@FFdڵ}[N8nk׮me˖QiӦtHZZL2rY`rٲe }i͕͛t z=r+ o6Fr@H0`-b7uֹNj$snϹцH ߿ߍ8{ &6t07!6IfnŜsFLJlp~+lw`dGOm#vzP'dyϯ#G$XIYW'gUKOz+Cw!{uuo,VST)H q׸'JH+kRQvIx=p 3gw-r( ~$Tࡹ WX'-A B jy?0(i9>SpPRR"oL=xDw4Fq6njaÆ=@&@ 4tZs6 ?>^jʃ4 I@xk<ፅ`8n>ۺu+m#ZC=BPp pGΐ6! ؈1/WtN?4cXkΝ;9 =sDK?_ɛ61f} [0^+G/[> O<񤣑 |AKl2JJ&_Fo5 C4`_6UV!eF>уs^=\] "$*Vf&Ed̐*pR@X[z^Q.G~+ceܐioh (\9F\ˤ`oϕ,@ɚ#+*O _\:Ypպ՞oEږoYġ4V|ꩧs5Y}ul](7Ǻo!l/@ 7hlA#k7L!v\zJƶ9I8h X|>M@H4`@i@ dRb@PF!I摦l~St $xvә'BĀAH޽;Q t&6Aw߼' {'O `Ů!@Bܸ;}Fub_:9=lj)E.wB;'Cmf@D"OpIQꌽ)7^L[/1ڋ`" _{yn݈A~hGz ?AD|-z9Sh `oر'xHi]VU}$ *(E_~rGt?5 @vV']3eҙ]Q 'b{_@(˵ˑZ, 7ɏ_dmUܣ@Y32kx.#Lleex)h$ #LcuTb锻JhI'^%52DIi{9 Q 1hh+}(!-DNg& Dy[̠vxmZĄ~`L`OHv1!hOM,2VbzLyAANv(z]QHkvT'B*1[$`F#!aéz]L*%iD!P0D#/K\IF^S"gvޚTy&og^k䲴PIl(o' v7b3{ucϰ'|$B& y5rhypn X4!AV,ݩ>1/|j[s0yw[[HHԭJeTIquS@A1`/7P8X$.0Q``xCt5B`Hlx ( pP>l(ds!._m$:SǖV@l Y>m +zc+ڀ \ KO}&$ mLm'"cqRg8 /@Ctt˵t r`'!$@ ߐ # pp`;k$6Fl >:d;5p}Cʅ _D11?Q9}vЏ9D;&l}Ŗ&v ?o't5 %/8!v@>;gCB3 (U/d%/*Vod.Z/7M|% 1=hm%DV yM`"q?$uwܝ9K~8Jrv=XXvmՉJ\vC7;0xIMhx]/yQJd~Ӧ8I 9 s?H' 5Mrkx+'cש,^n<>:k߇c҆ N; <.u"p=eo x X,\ipDlq#|x9f4Kw@a3l(XˎGQ7Hαh"8bZdvD2zB%{Bh `clEMIzlg'ǵ؟2/',COnH*Ā 'HGt$0> 3o }ijdm(9Fu6QO섮 ؃h{p`lC76ӟ/m11!x ;xGH}?D:Y Kqz{JTelU0~[4% ʔP7MQp9$(Çn֯Ub{fϕ2ʃYsI% HBt}47,EX#K++!6/&~0}h|pfS&S|L,)=V)I2uo;U(EՕdI.Ҥ ۡ@A)p1yq];@ic!< ǽ0[.= viԓ@(j0DגFDYQ ^B.w'+P2QUnIz<->~N( +/je\:I4mh^YvP^8<@6)xZ DmG2l'c{h@D鹵Uq20\OWwY[C]I(Y@7dKJ6jSJgh2G$J@~/. z~֧wA|\%(+s]# 5fSt / #Mv:hS# ϴXb [#EDylAٖ#egOvT*`ٱ3iE9#N _z-2GI9rZi_T'e  (w0>ֶ AQ൲I{oBq~s8 <'x$=|m8车 -@-&_OYmW' 65gG̲FYq⌄+ Oʆ2uPF+Y&C2P{-)NggD2-V~Ӗ~L.:PXLٝ/;eʞ|1[oȖ&L9)w;xz'x'IvLL<4|_Ȩ$dk ݐ%}s”<>SNW X) 57R%UqӔ)ҲG6ƔɶDKdsoTb!taU;7jKLܑ{'x'x$<_$ D+%L{ )SjΞou=@SF>pT\/;}q`5YB3V4<:?-~2UJJ65 KwYq@׋:vNurЃH{p:ޫ%33@)l D˿5xYkȯw(5sngDH   "A$DH8'Dꙛݗ7}̅}%RE6\lZE@ű{ ZӛbLnpl-|rr2m4DB53q}iyssqnm17<6KUeBV(6|2XV@$" HD @$" HD PmHD @$" HD @$"@$" HD dB,EEUWe Doo/gpp0z/ZVcr[s5/O?ڪ률("sύJ+Ey"c=zkĆn]w]x]{_^xa^2ꫯ=P>ҞknG%X"_~% z ?<Ѝ7Gyd_O<1=v5ׄ >Xy{^"?X|c饗-R@GdoF$ iƦnW_};ck|w-"N?Xq㦛n#<?죘 "zIEi=c"j/|qp^{-[n8S_Q뮥6a HXXPPPPPDB +Bqg[z& H@[&>99ٶ=׶#HvA^u~*stt\\)Ni[ć]?GBovm8۶0x圈O>H>3be7x#;1us_ci?vk\y&ka3cHyo = wv-((X(QPDB ̜OJFd{<0,JfmSO7|3r {l|Q;BupRv|oB3Q).xnqE؜p l ExsAƧ~:gXwu=QpoK3v2e~A1]w2G+6D+}dM&.jMԮ1~|C?8k"#aǢ.c_W)=ky6l3d}B[Gm[[+#[מFW^ye6v8餓Jqo?\c7Ye::Ih *{L-5hOl  H("YV$N7 "o G;gyf(Ux2SDfhC0To?:* ; /sD4NB=i1#uOƳ> /*(dƉ>|rd"H9ĮNERvr E}Ćqd $O>*瞡O0!%\2 f@=2j͊H/>Cw_HXB. G+EݧO<noyϚsd/}ֈu(8҇\ȘVѯ>y~AY ֠ulZ7֨b 8a:;묳îEϺ?dg5nOA6?⭷{ #`DAAAA E$2'͗Z7")E.$(;ͳ!,WCf;ͤHxwxޜh#2UD]tF4*y ܻ{/ҪէJ(;\|3<0^N]䌐 6{8ƺ aiUPsHgy1c=" H7L9Ћt7UV g}`Wnzw̭!U" rwߝck҆/)x"h%l\pb\rZR"At]]ل 3 k'qsY۟"ȉcEAAAA E$+`^dEx0 C p|dGSPPPP"@?0O*։HGۥ'_H,qNdEx?t7J$Je__(17{ mtf)Dĸ Gs+;N"g\ N H@&n-(5S|]'N^Z?tƩSt۱;^yHIӲKYc{խ!AVgCG9 B66GHLvI#oR:¼DR$ 1RPPPP"@bayɮo%< ^~UUhH!aȹ|S%_9<" i"K>H̋bn%:Ϙnl@"U"sZ_%MDd@x.yR;71$%cEB3 'HDH "A8׮@gK #tZ{]B{ǜ!}%f5vj\ Íi b) N@D_[z1-tmݽ]?ϽnDB;!TPӼxx^_l~7Cc16d;dtT|d4c<5 [O'LD0>u*'E$D#)y+,"y95TDK.$E3@xT"Hb @7՘]DK r*FTBFy %} hJ$HՑe|5&<dQ8,*^U/$'E6r"ۚӯӜtGvۮE?Ou_}q"D|6̦H =u{ hN7$c"Iz"ѱ y3ۻ $gC$HzexPsPlv@p_0C筙[M#/0Y_X~h‰O qǡ͋?VKX_ o2ֽ?vc(o(6i0vc0 Ɓx3 H("9} CHGG4"[r]BuzO3 $K[^jHDq^Q캮EBm3 fb^JB@uണXsbuc9g~ ɷn^ Al{ 0dzv$Uzwm?ȕ5]Pވ(GyN3%jC@DO6d"e}~S8E{PPpñ;<~3/^j"W ݣ"'UYWGc{xRzt#M)7;>;'oly@M;y]C_?Ld`ܡD⣟b$<.}y4vJ!"zֳcF{:1ޏqRd0J}a'W%?8~P?bԧF┧btAD)#6Er6O VN" H("P,G ܞU>e8٬L@ ' ;}Ez}ͽs)CӴc77m 5oҢH$xGxTTK]=<ҍQ~~:(|K%w)m^ցz*;/bZDv8D^ꁸјO7/7아a\2Hy!O Dlg^ȢH¾ooSGDމOW77R<}18prA"KcԧŪW7Sq7G< &JW?bDکD)t!|K`I-sI_hJE$9Hꗶ F$^zZK^hzN]j BC79 @2RK([ I~ȟI̗9%km"!-ɨ+~D1Q a_*]ސ 2߼4Su]5\BޱW=B5Ql5 >~HȄ*&a=W '$e/Tc gk\*吲(FdM +R4"I2yMh1?埗<ߣ4D~j'>o75ҟG3\;9I 'q!q̣E¦)0[q=b `iNm)=7BWLCJ\hxHH:7ȻblcεD>*:fKbbKjKɾұH("sKi 9!+wر/_Bjd9NZAPLy# 9 xđdfOc6v$_ [9".=Ir={6RaOh#- 0ko3"ՓeZ?ǶB4(*f'K3 Q)z>@*ӢTS3a Jb_$ܚcu uR(Et0NdxpPgw)W+3o"mQl jk$D"ƺiŚ]8"?;oO~qUtȰN"c~` 0E$v1ɛތx(տ󃺃87<|OEHX:yǷLD"1y {/k" J)$ ICDc}@|ovc{v#FR8s_'9W\#u^P䈸Fhg4˞GDFkڷ&ڒ>3?95a]1;d3.UrM^v0lx>u;| Kkoc i~WP@$Kߑ*DJ㟌'/Xl{O7$b?/Hw~!y,`D~ߤ%A09n{{,xAFߐ$yi*!hW IK_kSjϕI~JX!}=7SĄȑ&Z֘Bf9S\HЧ%. Vb}]#G"aߞ[$FH("s:",µNl{J #<Pɷt">M k Lh&I2^Tl%-Cz o4r"Uہ}sfC/Nn7qNm/#~癗^xSDO9@ʓuo z!:0~vq=r_Z`oƺ/(hS""˻.eT Dj#T^OǾ G_0~`<D%cQ? i9jD)6MJ]QT\}GR^Y$A-Oxxߢv0TD^vH#~VL0\Xl#o7w7ۤ(ьDA2H~ hz Bdh:"`ȲhIOoCGNgi5@Bs1N֟T5jC  E&0ffG_@0Ƹ<>t_ h n_K޻R& HD$e I㵡5 HD @$" HD @$ HD @$" "Hwe)g<ܢ:WOb"łbA,4()_ 6ly(*d^r 1Du眙93s}ATWV{ٻ[alh-+_ ׊,8΁3ǜipObg~c<^<d,e,u`]X'!BH$!#".'­-^m5߃gg1o™=$8&ԛ6E3ZueYw!BH$!׃P kQyp|FL33{潳 BQK4ΒRC|G?k 9Лomp;ý6~=j8lPM6#~G]8zi# 131sy e̋y2oX&fXo}lG?aFQ)))))q)D$x!II׮CYsf \زvAΝNC=F:3{𞾉ғpxmȶdTRRRRr:J$~`PI[ay; zc1 y Ӎ`/'ܑ;B! - dJK8[ۍQ`߰WCB,=)^oSĂBH$H$nd DV#xDr>c T W!B"A"AH$ O F6*@ "a/'yK9.BH$H$8sC2mS)}l 307UbA!$$Dி][`xۅȣNi}>ю@ ՐB"A"AH$dW\q~v$Ɲ2qCpBH$H$?3o=v"@I/7"A8?{ 0FF+N" !Ias XݏlpýPhl*!DDH{ohi\Ǩ0}Z)xJ*EWiHr88&,ccM:5   B"A0`ɣ.Wy T. *2,GԬgcc&Vaw,G[[ +AB! $#>DC!*KO Ѷz}URzr9Vwn ρ^P˥ߎ`~cy^h},B~5/-wݏsxwY-ݦF+#1sy[n߾ ](!DDH͖lh"I psȂX-eE+s*7KJags€]Wo;',f[vb0 pev1X]7˩DFf_~eUX:X1;ٍ7nt&PB! %W0[h08E7ƅ@"c)!/3~+@" <+d! (hxyo8Gbۉc!Hz|"YW?8 w1!5 .b?U|B"AH$H$d82&ȓ $Xe DûߧsVJ\J2BP$Pt0#FYz,ˊnlQ$ >W(;]7]!lk!s<=իQQQ!#!DHHН~weH:W5J' pgヤ]Ĩ -8S$T4X D`Oh  OBIbUDYVQú\nɛlp,yll޼9ݎ4 H  b@˜݁v{0rŝ05Qg'8P?g@جo±d3Rr @!$DD$SxdKiSpBܹmC!$DDD#j<£.X2Ug̛;6m™3g%!$DDX6P @PsSb\NĜ9sf|rB! F3=% c_}/!8qu."!Qw~\;:.wM8 W,/p-B'4ޱM,B"h3l^ύBH$>hHgS DSL?!-- _}հ"?.mmm`wg://tJOOǽ{F=xa@'7n֓X/K,[ q9 b0,2K,a=D fyHFBUYHGm~&nW4,o ̢ĬBlxбwxmqI!au}@>9v(.aR;kĵ&c5Y"ccl MO3݌(T \K !$DwD]?hooz4Ւ,ˡM?gϞmep> t&".@k׮~3̛7"*[1E@CaC7!HX`gL2/QLgM( x_L>pe͎I}z'HhW/`02'X |S$8 .Kv ;cEM4^-ȱe!DHNHR,o%QUhp$Ӝf^+@7tIYEH t5bٌ70EDX6 zNꫯ~@wΖP}o؏G3_Ʀ|D'Z*oLQ# %P[[\o3v[p3Q$p p,XFGۦ8 8g<ǔ=}!$DDpE!A`G}D"P@X@}Ah1 ?np+uo)0L@12X$0&a$<-"&7c,HY4i݈>(x_E|1hq$֛3&L;}\61̲!xF;29l7Ͽ:i'wW0du#=#.*.w]}vn!gq7o~${guL\Il[)̙9!pc+ !DHH )LlB94CC?. Ҩ&tSJg0q5 /ƒзS?VDphRP*E,Qp7HҨtb…d4S8]7q6R˥V8ORc1 #JX A$\=A8`yoL/[c(6(.k$rxΰ"*Δ koiʴW1#=݌A3J6 s ^!!!$DDwD2 GO:<>n4Z,~8C<g9>rx]x^xGʃq%b3*|x-E8铭rf |q91*C@vD4/Gv{GokKKB@l/E9 ǐ B"A"A%L0~b^\L  bldoŴ)Xn-Zt]ccFA1HH  uV '9}P0c2<{J~؁D؇{'mvR>Խ10}Xᘑ@Bu$$D;Yӕľ{7>X[  0oFɁ`_wFdeKIp17` FbA!GH$H$х܋q 0bf.GzQ`oGMD܁) MY!$$# f 4wWڰaegWћX ɚ\wx?mwRl{b>bߝKC!}ξXH !HHHoN˰rl9 =O LΥshBwÝ6tm6j^4ˊE J[e߲s=ǀ܋B"A"AH$ˬfx܀ܿk/8>R'<]0,HgN_A!؆k٦lۧ;l첥`f1q3>%}>f_ezBH$ DDXdsw]pggnUovҬ>+ ux0``+w>c߱ٗS-}-q   B"A<3XFyD}}=*++cp},Z[f׸)בm\v۾x%M6Qۄm6I !_(N4w\  }>c߱ٗ q\!DHH >\3Kc\;v |3M#te&\p3mq LGш$uy҃Ps_ p̰M1R|Pګ`r0€+egg }>c߱_8B! Cɍ8^҂3'NvƍAŋ#kn&JGiz j^Ix2P8̚:lByMݜ3.X U~GwQNȺΎtsfۆmĶg5בp#JPls=}>a߰W3-g:zBH$+,O)ar4D[[[{p{Vq$DLG׀ 7ہgLgOS\9p\epQ&_oٻ!﫪ag~c<^<d,eL'oD|:bm4zu&RZZl d3J6f[}>b_i*))D]"!ّHHpx NJd>~\FݻϟO~foʙ 7gώ4Mt hHӯOFSxs2 ӧOqQ@w"FgK,+vZl޼mi 1ۚmζg/'a)))3J"!YHH\ME.cZ?W>n߼Yiw܇IƗp.a1[&am2[ c~k86Ӧ9/OlցuaX7֑ueMWB%w](֬Yaoc޽ QMM bz*ئl[-+!x |A"H$H$! CefY{(K~z?3pu&~fMc{ȞeiX|[*^&tK;l)x6 TKp8UTag~c<^<d,cQ zᘮBNǫWgOm6\qǀC%U60B8H $?Epp-ouI"tz|Ӿl2[EEE`ڰaw~~>70Ƹ)&x=uh[Ҡk2Y6NȺr3 Ut:t8C@A( у8KX  B"a\! 8={J !+M{ my ߲=z ea}q|8p0ٹs'V0?]wK4y7331sy e̋y2oX&fX։ucY׋/BWwc `Eh@!$3  b ~aYȽ.mp׿ۖb={?x laOҰx@[[]x.\@ÜDɓ'矣|g!{Ç&~wS1RoLp8e½1wK;3'y B!RHػw/REW˓5~C0 c߾}8tиM(ghG\R V-?2. pŃf  B"A"盻S$ 5əs4sc }Xh !Kwq\n (vEc.IanƍX?wy3S%<>Jۙ t ^Cѕyk.BU`lYv}4+5@q'z<20<Fa(Xbܣ wƎ]{56o, B!\H$pbrU ӨFqq1֭[Rk֬{lܸ>h?>JJJ֮]kyXbgX7EcעkSpp恺:s L<۶mzlڴ ;v`~zngTVVpwa֕R W. AyyyB|>B>8g=l ׯd<(.#|p} xX?7@N81\zSgN(8"^10bcg29`QYDs1^IEc,F ~D8cb[[p!DHHh̙xgL߸sBמe˖Yb"ͼl~OAa4igL eC?Lq8W e1gCY`CQD(_poxnT#  7s=1BH$H$H$H$ALOOeS$0b0kisC/CNHxC|)@/g"80H`{qV~U]] d,Z34g$(̶fðG3'OHx -hM|AZˑ"pZa@y ?hG$_:LLx>9!DDDD 3 Fjj*ݍ(eF8Ӈr5"C=}t? g8 444k@-?ST YB.Q4yo0 `I t!b֭[)Xo|Ce2pa ɒ%KxHO7 F\|ȣ.D5\^OoY0#8B! 0|ܵ*y-Ѐ!N#+>4d̕y z\sAu K,4O?5g(d(~bHrBD)g3<5Șn2&rPO޴pm WctU!DHHxY)q|g4+mpS4Z!^\˳61kPnY1!$DDw^&}9k c<;5` Q}~qmم'ހVv"H  |ǹ[bƶa L\Drمº\T HHH BpRS0AɵC-w 0Y<Г%0-bA! ${ʰsp]!_dH|ɓxB&0   B"Ah(5Rٽ.xRÍx%GۡB"AH$H$HNܿ`]e`| Zm]YhtahVa " N$E"A"70B赺ѵ{](oY %Ep]B؏|^(F;g2B |fD1gk\%DB h$ p XI֏Jg#^IIŸ;DBr" `L@B_[б3S' S)8`KxD$TWWI-|)c`uͯpOvA?X ",UΟckќWrGu'$3MJswB! ߁-Fs6$z<^~v =~ !|ĉt"s^$aPEwj]OB.~WX3l>pv" gZ2FZ 6bBH$H$5$6\\??1b!˲_Zl+g̷}4U7B>ShGˏawa,B`*,E8<ܓ<WUU fM$R?X+oo] ٱ|+P wIDZj4΁zsSDR xYDxܹ7SBH$H$$9?_ }9h$Yz_D.}qB@"'~ovxKYF\l8(3_Mm/EѦlGӕ*F]+e(E (.͏h[M_s xx=M‘ҽuf  B"A$;Ho@y"z/oYpf76YOZSl7P݌7E UeU.I n ~O`xsA*BxR9oB6yD~V,HY3 B! rxiqI$܅y\L"HNc=!vG100%}Q8,vևB{5w°cZY>E<7fKK((@y;5߃#cjb\\8GB"A"AH$$$xĄ^* d"!jk{_%Bb"\h}nzW?:AAzn;;0ݟ^23', Yzhw ]7IlEdޮX|l"(76*i\e#ó?+urmY b˖-8u?~x!DDHɈ#MOPضQLf=b%xFHH+@ziS$"LC ]# U\;`AWm_Uc" 0xa0,'Gw?(⹣h0o.Xx:],q\ѣBH$H$!#sE㿳S,ơkY&­- /[]05nx3Bdc|,lJq" !*,=aVPd!h =;:rOwR'a-5b >|wo"BH$H$&|3-k5|mٽxw#_>F0ȳsvd ";kG>|Dwn8fN}]+LO hӦM8tnݺө8  B"A(>Bl]>:&P$cfBdϞ8'Y1݋ҦY܋Qnn.JJJp P N B"A">|t=|>|XWe0o>|zƍ;]]0zЏ8ϟǁ;q=3uP`2GDrnogg1B˴X=Ƚ(++ k֬Aii)Μ9g{A ! $$)S'? ctyf`xpE/S*{}6m~3\_ ȟYGgݸ~:JҬ0T,lGM |~_MAaC܋6n܈J\zøfyG/cvuGe~[V!k.X wRVftw;st8fB7qyg>̏2X.g=Xkm ֦Z\<ϟ?+W|^qm'?.9rᮇ5Dk-Kw;̅sq3FgbXO֗fy)DB# /^ssرchnnh |3H "Px_t34,O<`T"ӧ-?}hkk@X6b4ա'Oݻw-.===̓g/x?q`V/@x_lW 13öo?c#-[ha Sk' I{yÈ 0,s8.9^p{m4ng;=.BH$H$$ |8[4ic8k8q"?UBcXn݊o2e ϧZ[[a74$8kN~r_oKtz7hpRp`(hhXCoooX뉿/H/?X;(|>a{o^dE"@X aI`$"] 6і@2Ls_hK=F33S͙#G+onni'~3lo >\*!~\CDgޝ?O&07 CUӈ4yOQhaq@|/J}RO)XV3H~n[<1Wp(|#ENV E/UE)uγ%_'JXJ&C{Kq^O?s Żqb7OV$XX`Íy<H/$YG&kNB2zʕxG 藿%ƍ2x1n kג_Z~_Czȓ@hΝzWpRpǏ({@b8€u??s <ÏHg>!?xeu`rQ~U/'?*rW/Tm3\{=5ȓ!90ݻw31 %Je3IΚPm:o)]j9d=T'i[4!Sz.ψoWƗq V$X `i䲘K;|1'ޟ h6a X X!Rc"Fx iq}vNBWʈ  =䞾[z ANz⧍H! d rcD;X;v,De.BQmߪ)KB\>f#$NZĽ;XQeѽW^<Ю-$vI*TZHF6E1NK!8cCn>,5U7C>r@/ސS\/3GP^<e,H"$G-z7[.XVK܈grO?1}gbag544tZR2xzZX3D9 %ijj2e }uEKsrK5#DL8^D!^ibcqf'N:lLEBzP CW-_뒰JgvxhNqQ0D yY\~=zeCp+ n@Z+"TwBuoERr&3>Ÿ1~ݎqI+$s,,~"`aE SN ϐ^dvڅt)'| =܈i[bC0o1\aHq9-\;I< įa?X*9&X3M3$/@6Xp#$l!F{_= x$~_p1b<2 y"6 H{$ҎJb 꺺P0D'^+BmX-z2'v96tVZ V$X8B [@1[l ҍŐ0FH; $X z >qdr8'|>VyS&1bNR$}Ѷm۸㜌!%R!X79R18,\O}ϙ@e$Dׅ7p%! H,Ʊ% 2p,,w}z ŧFg9J#<|' JKϺ]ƕ^a8c.Ϝ 8arqGYX`aE nI@Y ?!񘰣XVr?  6Bt*G]X_<j@L3}!8l . . oD #aBe&sa֬Yn}"nS [4SQhg [GzBp߈*2$yB#PϚHB<у -JIo2bΘufuI)[daE V$XL  ZaN|.1<#~XMfUԁBT"7p1on޼J" aCO(֪C 9k9S8LxIg tJ)MWeUX:jY7yC+Vf#sGRc3@ /U c9=/T [)r\X >p qsVN:V67Y V$X`r)b{.C-Nɝ={6?|AjݤqLH`,⨀E89"T D\چH!B8ߌ lGBFC?qHiH qٮ;WN߿vHmǚ6伾v>!^8\WX(#NkhJGcPg5.^X\3cƚ$!S1#Tܸ&fxE "ors/~·nZ`E V$Xp,B2BT,}|08VoN(vND( V?01ra1a0l/$#}~F;m@iOIJ'E#JȁtoDٰҳWA36-eކi7\> aBs \ "5ONA1$C8kx`?4a7,G\֭[aڄ/ℐd  dk߬!#r x,x{|>1ڠ-?ms8D^B""xKț!\15qm\#5]OA1glKxs;~8aHOa<诜0 +H"‚R(ťZ l,Jq6D'!<{q=rlj&88' 4N Bvc ڂ6\$Du1B.dw2&(gʆ"{L+Tӹھ};f`NÉ,jރ"SXX0_7ehOU" +,,6CR-G 5"~#Ƭޛ+‚ժf;*fQET\7 5:!|{ɰ" +,, O(ITA8$"2бb‚y>n,O#KV(" +,," ;$ޒt-C̉ ">_/ ̧obĠ]S +HH$hw[!w [0R2td_NcXo~z2)[Xh0*lvuTw2H"fRu#=>r }8 sX4Zq.~Ө}o7c*X M?;ulh)Ey2sxAYb(X C V$XK~kDxTn75SoX>ԁ>̯]igYXm.i}ɏzYH>iXK¬:~8Sl CV$XL͈.{T74/)rŒ\o@̹oƏkPh2"Š+B$rV$=<刄V$Xͪ&NT\SU} .+QJo=%_Y+g73ޚ;[7oC  +H"x,p"ch"Yl/˖DXK9ύ8---ęO{ Eh#{1 9*LH j"Eeat466r:- cWP޽{8k8|̕>2@w奅3zKX!Qhs]_HhP &Hvs')P#}==b5#GV$X`E XT#ξaC ~9sFTV3 _/ jDym555!>p/ʙ=%qñ,է%/GX;믿߃^$<%UtdDŠ+ܹs4i+YSN'<'o(ʾ,؆( +Wk̙MIĞM6x݆ t5kK=DEMz֟*+0|*qBem+AJS@* ֝ThkgRW`Mw|ݻwwAa>@pp }g6m澦pBA y`p@?f~صk0`͚5ڸq,X@ۮ`ݹs'x @pe˖a X)gƵwo4)P( eh[s<dP@8R'r X9쇘╀ڌ Bf0{Ov~u«5^->R$.gևhs}s 'Otzƒ{)gn%K\1j*~1ܯ{Ot .M΅ & ;q 𠬬LCB1F0@&Osgkǎb!&N\!%jW6*\[%BW]$-%>J^rr ߍNm"x *Rg |,B*m^)Jd?EWb|eB'`^KB$wF曙{=#E#}Ԋ+l"",#%(fsa@f F=V>|n< h! V|3$ lu%l2cK(fZ:=G@Lqx ?؀rM\?@ pH~ű_ m!FF -)H?"!JA8;c:0C@ HψO?Ty|H!Spm<4`_׊'bƣDKO?1oMǎ+{H mW$4NEi+nuw OpKF+mt9vJy#iD#*P_.DB,"fD;1<{^)T+,H"!](lV}Np#ۆi CX!ވ B!\$i6^9EmC; &DK-a.$hA،H?}C9W'R-8'ӎ>rގʧ}   /F8H̜U`lE^Ǐ?ĝ١]r#,aix/jkk P ~) g#&"@I={6kb<Dފx?iRTU&~li[x}GVB ͵okOBcDҗ_/2RHxx*#g$T{"H$Dԯ`1ܛ[qEݱ"! +nĊk#A9Li+0q-X 7Bp]<xJH0FƃDc%$a3 ~G!W`Q~Xb&I;s<2aܸq#<y ` 1#`4 ~„hG)" DxM _r/L^^Ǽ|wo~pA; WA xĜ}r͍ UnDc_4*~&3qѯTE[Dy uOܷEUþދmWWI UnբLR@R$hk_{jHF0?p#2fy,^r ͜@Б`X,))q+rHh6F @i0y'  E0H׀%8fx~&$x C}OW*O ~*UVWW۬}o˘Б梙OME#)/!`-4d,5tEHH0Ƙ!%^՝ң2u{;J' nt*(J v :=~ج[y`,,1ߕ^jرMǥ2!^2v1b\O SA![τψ?/XHX$X$cz\g],1j9\k ?#G 1-;Y$S,,1ưX'_Xdɬ57J k= N!ͩEHH0Ƙ6ب7oOWZEE3FNBbkɌ+vD1?/,CX4Xw+Aii)qA|' qc``1CaV?\kYgR΄#I&=rFq~jes/繾;hrwq0i$剙kc``1CmV_ÇkӦMZ`6LocuY -q_;cok`>~9.qA|x"X$X$cPUH_rE}];vhɒ%*-*TNӅ1۳ǤjڲZ(b_"?71Z%E[Qqq1Wx@ ""cLPVBBA :t?#ݻWԧSRPP9RڗЏ(99㼜pOѸf}M(PVEJ7v~.)H0  णC---"d;O=qb#Gh׮]bKŋk֬Ye.Ķ8'K `.0g'[vAz:k#k(y<=o~mp7 5F!f Îb_쌽;, ,EE1Ƙ$Զ:uh={'ܫW-5EEEIOS*Nji2Iz:47_zJb#ocTӦhD ErϞ`-Az`'ݰvĞbgݱ?~.%2  c` !!ޝ޽{vΟ?cǎL˵}vl2͟?N7M5}t47#,Xh}(ougzbT37kɂX$X$ceJ$  ƵUsOg] @SyNѣ:t TQQ}QgH2[_6lӃy8q^u׽u 1c1fzEwŏU fm۶mߨvm۶m[ۘs~>UC`Hp"!&x&Z,qsat8p$H4ؔjjnёGoI/z442 ?18<C#V7?y_5F3Ț>g H`D=񄶻W˜حeOZy?6p/US՚~UT톳1]~ZK9#`r7 S# DBHN[_ЫDto`Sl EWS!;K&TR9y_㸒]jy"ެ.s pP?/v80@Xo*O6v>'oCYNp$OKkH`D#t='o-n5*]*w8JuS{"@$H`D#pb)? G~ͷ eaFUt$/Ȋ"aM"H 8U}pw(&k:"*۰YU;}K " i<pcQ*]wWP(ۨUUs+ " Vg@lrS&T[LejדU?Nyz@@$ HB0_Sl\9w %OPT ջs벒@$ HB`7U7fse6|Tjh " PLxSGŸ"H R}Su6w)gRCS [kױo^ׁH F$npv ؇廢_ -%;HD?ٶm۶mm۶m۾K8}ξ+z^nl99m Te%(K>*T 8w⸺s P0L   6 L&a l >Q+XQoR Ьɘ3FpJYQlޓOO-ʃ>Ou̓ǫ*TH+ ~y&d^db -`NdLX?G抴 9<òPZ{e]͑1;%Vفi2 l$0H0LbiϣĤ|:fQ24 k;j李}xH&;xwzkDt Tм85˧OC٘>y"[joi'8G04\V7Oα%aهHeX\Pf_!+xv>/%19_Ccɛy>L& ; L&|b^or qpg3_řTG1sck9=s4|3(L%Ylbi?0t4Wƿ OIb3;*{cg|a,OK(H '0srwJWPZz;)Շ{y.>Ρ$:Jc̪.$4tA{YlL 8 l$L&!^5k16MHiG5J#'C:MU@z?&F|5%w i-0,ɠ촚Z'ݢxkM[4NⳃA)T),  >r0ty}qw W Aޡ|Kzlis}zkdqnMG@n'dP\ Adsy5#iܣ:EqN<@o 32/Mic:jGxtł->漃ŏeiQ+Z%( ƒ i[LkBGA$Y@iz]opPHi<1SS /J$ wO`Ai]GmB0*`sI]e`2$0H(a`2Τ.` 9F׳ou~Dw4s%P^+I;ۿ91L?p%!aIH+ @, i&d0qBi=FwLBˍ ]Z> ǔ ^{%#H$xژfajCqIكVH/blЄp)!a &A " &I̯SX5"JNr?kE-9ڔ^^o_ՒafBgUK3$0.3[A~́U4س@ z[+U6%ǜfl0{SiǪE75qq]_#ZE\1̫.ۏGYiEJE% a2 l$0H0LҠ,핅 U|-HJm('/$Iϯ8S##HJ]t$IcS܌#HH00##H00##H00# FF`$`$ FF`$`$ FF`$`$ FF`$`$  F`$`$ FF`$`$ F<*J">{u# Çٳ[ioNT(\yizzoO<.{퍢T*1ck y8>nܸϟO ^ksNM?X9s{{{L-Ο8[uh1b^wtttDqff&NΜ9c?ONJs:g}:Scccs}9q;w.>}:͛7|6^G#!Np8q"߻w/.5qaػwovAmmm\.GXq#et᯽ϸQsrtRڵkWv8Hs,|Z߿=n%lIt:jo+j~F\"~fLMaOeyUw "Zj@=kkkGT]ԮT?OӤZ'''tjЦ6W%_/ yeka &Fr&]!hA4EAx||?֦8gG^#jZ PTal Ax}}-^W[GGGZ6Rd s4귷 ]^,'DxtHVϡ ໹U mIf~ͭsrF&{Le/HY8'oYdTpwwv!$d:oQrzh4!ݻ\ջ&_4R0C,OpsMu-46ǣso|Ekva .0641f#-8##|ѻ-'dܝV9Cܯ[6sxž𰵽MhѓطNb_^쇃$.·Sx38_|?1.%xbe= \hx1KWӭ|ft Gt݋'6{#$`bji|TY=%Qddx$tͰ?5ɋS2/̧X>K`?r.){bGt3?k_\I>ҖX||_dR򤏊i/ NI'gW\k4 U16=lA=)C' 0>_M&rn ^45dc]cxů٭?#rfg?xb=<Ї]Ixِ5<]dp%#>\%=* 8aπS]K5-h6$8Љ^?r#+#Mghd]؂/^b)hohB[@6t8:t 1t:Ȋ|Ȗ\Kw{?3 |o୦y5h` y*R9te-3XU̟[OPNs_%7OkN@M>,rv|$ AL|@ T$I0G2ߘs* $TdQCdH>E ؑP%?k΀"I>h(- >h$^rp\r3&L P,c+xFd$IfrxI[EЅ K:k+ζ4hNʮ)޾$qrÇCy2?׭/5#ɴd|##'|_v5/H(٥+#]h`K.n]:n\f>zIZ5dG.(F|yXK{nI&A!ۑd%`&TIJF$!F]`dK>MDQP1j*%*e9W5p*b; -"@ӋMS2'K9i@wL ?,})GM%@nit)=~!~?:vɝ=]IvCaWjKN5dx$''6e.=W) ާI_n_:GxcIh ؚc AWdɗ]:]CV 4&-wM/R;2@3TRs% ?d{IHl|>>lġ 'a9ö9ݞ!{޽{צ{&IhAmlM9k7}T!_c3dk[swSdG|&!]&y]|gI&)Y*7A]0SpK|ƺEP,$v#L82qxiTQt]W$ N`mGɭ])_\^A{$|ch$dX_00e}xCvr2 ,F|%{>LoWa&!wF%lɚ MiǏuߕ?6?͔^,ɱ[uGoptݭKzX|k])Lh6 xV%kgflgÎn2G+J9^rɾ"N\_*6g>§u lMS7IhE| [{M6 {!pDx.If%RIQh!I1IuB[IZ}%"\%i5Kb~ DW;&JV`p+0 ֖$mx Jy@qq^I^ %dÃݢ` s&&8ߕMze#M7=&<&CɎߍeO$kxpv&إ?vk9˄nX O|dJA=[̻8ZA/[ mr>{}akɣ[z+>ԨeG'zNۛsWsr8P*3XJϞɝU>}L>9'W{ \|(5Tnc3tu O'?߻jt-tyіL4*8Q9l6,U/x>2gkGøM6 $/󅅅B$-$lQ6 vE/󅅅~qgcЂ*W.\mI&aaaaaaaaaamIXXXXXXXX&amI&aamIX&aaaaaaaaam>&\^Ph}]W j~\`{ /Fmm^&xp{~oqo;3o[~omv;~] _ri'37' y8G9/ pLC/)Xu-Q͕`O%MXÚ,2XOK()NR6,J2D,z~~k^.$,tD;z: Wr`Y:!y*lA赆DN+*a?OsytEE7$r;QZqHdͰc֡{4tkyMlP%Y^|y͸fwx≮Ш]Du2q|Y[z,%{gpeLyhqj\ȘuE5OS s4:,YAE': w-]6gA/=0wŅ4cCŧ/+:${yyUFA!Fc x{I(搏O{f7τd̃eݴˊ4S2% U\v!'_C}3]$3=;_ƈlؓDCc흋d/[JI?/bYC D,k>;_F_|v'D_ϔ+^@16/DOƧUn{HيD~s7'O>x)9ߺxbt|E])ۍI&A4v s&A!!Q_$8%}$+&yxs&OMFhvD6'dK茆Y\ b\)lg>` C6D>=GjF>sŶ~ɕ66;O!Io~3 {4 ӗv/&y|"9f+vğgS hzI`9) >k$ddئdJӶiȜ^q鋍n}zObikp@$[%l5 Z3?ٚd*lrg N7orƓG6&aav#A 7AD/Hn+D0S IRz_[K 9HfvEOI¸X"P%9kHn8z:g2v?:wOdO>Ŗ[+$Ǵ/47PANA8LSth:M%8nS;pS8rg*៻'1/=?왾Ud-\v÷Ț}M'bj⍮@^g. ߚ]|unblyrEz1hl$#?-rEMَzy57oC\49aO} sȄ}]?2&9m=6G9\ڦ=f@7yy#s1z'm\6 $v$2& ?II( òrIMap=l=AyTT0). =:'zhS4 deZKdY c%ṻNeŐ9Ery Ưh?YJz$>{0/u/~dbEr`?C'S(>NGGwt;lC|EF4E+${2GsjG{I[2AϟhF@Wn-P扻gI\s:? YLotH@G|i^]>a~LdEK!X2Fτbo )kYk7d|7g9X W7jMVɒN[޶?c!2=4_=sO:mj~~۳%u8N'Gv/M(;G cC\;YM]4ݑO@g\FǤ 2/7$InY&cXo`ZL1c[dHOM3_]ӵ $-nGn3btW؎zy7W!!AH@H!!@H!!@HXdj'>v-[~mvh;s{O_e.v?mny9uoIut{@H@HzYM=Hf~yef&Pnh EY|zPg&hk' !wI;3|rm^]oy3v@pZPa,uѹ+777?o3KkYI[G7fYIuY&k~YƲ<3Sg \gK!){*0=[>sLc>  $4|oUW<-E׿oeeQ:Rb`*g%ǏzU!/8@zcho߾=JpS^p:y7Ԩc*Ԯ6 wش=w|sF o?:o\ԾM B* lW8W.w`ooo)ܷ?4[PX^zj iPZֺxР ]\So~8Y >tl 5:>ܨ"u TXXj۰N:W}n6k~JCng: i{^"@0`X8HG}حBΰf>]z|b.~A7Pz G~*++gZw,  ?jLi_QW~WQpj{Tڧ0վj.Oix!!AH!!AH!!AH!!AH@H!!@H!aՄ!!AH!!AH!!AH!!AH!AH@H!!@H!!@H!!@H԰'<;OH9 ǚQ;k۾wmc3J16X@0?}8 S4+sE(Q&&&nD@8lH/[9 QIP~JH?U)RJXQ0GXd#̅>pB\p->?y1/7|CPydDXP" 7<5:<xz=-F>svs9sN֚5kZ @@@@@D8AգZH GZPWWG٦ @@@@@DJG@{P: @@@@@ Nt $'Þ bA@@@@D@^P kO2%V:2I     `C`OxkxG能{/[jm5(O?=     ­bx] uW\Q2Zj˩Q8ox#PaF;t?ۮ54{LL2Ջ8d0*zR8)xQGW_-77|   jO(lgWohc WP"+"u2i/.> I}ͬxĀ V*քHV?#TdijL/mڴ7Z)++K; D-%/V:B @wB_vc5i@bn%g(ZQShf3j^F\O3i(K\fە-%?~y4p %NgcjCAL}7 @#0~?8t%CV8U=.tl޼~{xRJpT.-M^?.z̅U|܀GWШl[+64,4FxVbhX5#kfQrn6]|քTD;DȊ)rq 畄 'MST[DDL ^ P9đcs2Ѷ ʩ\?0<P|h,kXLof5ξp1 J)j{"38Uzj6JKd[֫\6EUTٹT@ϚVET-U!Sh[62UIR.]O'G}L}H{~@Sm{ZZSs6ڲ~mjD@x hf;W>z=VsF`<y0\L.W.cv<' EVsqwtVwiI  I&yCV1>#޷)$ uu/۩GTRRBݺuӓ    X3Y^ؗcci2~4}ǂoB>#İKI.>;$IE˾T06ly1+7cj|>b_@K =  )@ k͚5aEF6CGhǎw,>_СCN B@vZˊٱT  I& z畎C{쥯$fSӛСCk.u}GڵktH@@ jʼJdzA@ڎg[n򢋆#@@% ZMq5pnmK(  d6@@@@  BU_t@@@@@ t$8FJG{{/$$Gs     @hoO$pݽ%Y4     vU:ԩ9     l Ux     +(DyWP:\ @@@@@ VP:b%     bT:VҭGwc .~h>_IUW*C4䎦ynj'RYa>^T]R?9L TVCdW/KTM**cU/¾ \vJq˃\     $WGEIUs436!7{fjՀY9.4Yu$]Nܹ64V^L"슟҄)#We7UYosr˻`     $W P>s5buf }ξ!Wh>%ǰs8ں7|*|ۿpxPSHF6 : _+4p勢3\ 9Bl!cP\y֫AS*o\ĦV08WYk֬i-..۴䕏K7M!):wo|[1(+r @@@@@ @@*8"H rGLe$V<    N q G$@Xc>pa.]d8t@@@@@ ;wt-GrWβK]@@2@DJGkkkfR@BsI~|n$ew@@2vg@JGLk<u zvQ[NY\й䫟NH[Zg)|R.(Ig4'($\URsDu GY ב GDH@@@@ Dt:4P]IM.Eup3Ci@~_б14yV9MƧØlyF8َXe/?I>A@8(LjΥ_GhfR4HAs%eeQWNB1*"-^̳Ҍ,*ԴS,*ƲR6؎m6)uY3PVG%xiP6SeQ\IFo3On*޼{"1 +KdvMg OLLVFf3Wnk߫yy7Z٢-(s3eezxc fG*yZZvqMT2Gݮm?h?"R:`9mOן[ά<͕24A3j!T0`62^z "f@1?lPZD Dc%J:3*4ed> 6~msqLeDȧ|P5ַȟ֕JS=>z|:Msr~7(.8c$q5č@DJ+C-~>fEݻϤUeޝf'B F٤h捵QcXW)d @2ߦmC >4Vʕ8FA7([K˲²Ds>/@ous/qXWFw똥5h,/Zzƻ-Kzi!sȡJ0ςU13jnf|Z[]Pֱ#o󘜹ʯxbw _XExXU3etRVrUy[q Dk[AZ gorWʚsXғerFsP"aJnaF9Z:kӧmǂ9K^_z   ~A?0?I˂ Z2rͼw]p xr/uwћ3T}:WT2ORLVͧ;%ʨX@OLC&gJnKpV&2x_Jzx fnɉ nع~IH.{DN/P͏'ҒZ&rri0q 9P=4ENVwD^RaKҭ}20ԙȽ_Mڧ{?lQs|x*g]-ի&m37;PNWRdɶ#n V\d#Җ̚]3Vu9siY  O "zsHOٳ' )h“#46hi\N~2q='7iyT`=~K|T..z;FФcZjݏ_WV>!'OƓqni*Dqy F;*m.<^C@5Mw-47]ݓ~lc,wgϫqc;hrc`Kyy;}wUve=6>`\Xjq||r7mƈ~^j<~m(C " !{h'+7z~ڽTI~@DzBխ!osʩ[DbaX ͨ&Z%Pp:oU/JXWP;̢4ż4JGM3};G7sWWуww&ˢ'ɺ2AKj5 6/ M ݷb4!F\>~Q"?{LSW/^ c?^-M#cd5t _ȘcǯM[Ndv$RYe~uP*_iZUKKOz]o5˚ŏ%Pd6D\=M~ޖnI@% y*6&Z* doO3O&v{'ȊP~ @@ZfMkqqm}> 0@-[.B|/3{IWtE>c48vK狳zQm(β8z߯n?S]>Mgz$oā@ؼy3۟v]WWGt`Y<}B*HVM}pK3km 龦_Lx9"rD8a5ߥ@@ޞx7 w-͡H⣷Q՘b˺-2zh4j>)KSTYnB@H|ajdֽȡ;t8B~h_"R:} xǝhI~|n$ew@@2@XG&98;ً#9@f(((͑Q=Hٽ-斈4X <+(x     +(DyWaݿ?uŵ$/'2`#tQ7A H(( ŋA@@@@t` $E     P:0@@@@@ L>1kB-sm۶{H999~ź6 @Z∕i餓NAt k׮t2@ @,̫tP q *(B^hGtCP:%r     >C鈄@; `8ŇC#!7zY@@@@ @HG5]?Z   FJG=ȓV>:aA@@@ At$,|Fhh| !'%ޣiϼdM<N<^aPpȧ±eׯ_t$@KKKԵ-0C4;WEٍ48zT>8Fr\p:S* SI+vqTMK spK3]tC/N@@@W+aD>)JF *hvJ+Hn4$`U8x=    @h'r*`U4$E4U_->RACxmd9:HMP:\Zhݏ#.ʅPtK2@@<*ʢ,嵖y:B-U<|<^1wD&/rf7I( ]}ȣ~-pOA@@ )R^pZx л?Uy/]{IԹi۹S!ҸN|˂G!qnNn-tM5r*:ȫ$3s̴T/=2@HyC̟v!+}_Ib̼1GQncȷ/cޮ| `4W?EH |sqoO.&reK]h\I{L[6 AI4n  iJ 1{Cw?:w8.ڎvI*NW8f@qgy0r4ru2ꩤo;u54;ˡ)1YTιF/_QT@ FIw᝱f%\IpoP}PgܗL{jB}b"3Σos+URQxv%~eB]WA8_H~ZiÊcHT'Е)2쎵:ꬓ.äB?hF4d?.[4yu ;fj`Ng.G?Spgȥk{MվtKb،1}ly -KUINM^ pc(~-u:RZ{+eԧKG2*ӎ/Hv:͐r>rd8֯tCc#H ppfTP/.ǷWq>ݽb:?<Cy\*Hk+ eQ[A׵_Pxĭ1xΎiR&b=Wu.OY̞N噚ƍ*i:=^#/1rRkY~8rjF)ȏf~w|+} f ;B;*okRZ=cIۆ嘔q$?wg<>]6G7C̲935ɔae*+;\V:vCz,Z=d<m8xmA0LXLizyeeT樔Ru()kWJha+O"X0$jND̵@b1Va&J'~N+ļi_B1KmѴŕWQs&S6"Θhې7nqy`*76}̛6ͤMM~~9vUkH]Hd0SZ!^@/|]@B!Nӵ#w?QoJcEeӌb:Frss=hfe R@~:n[ڟHޚ􄚅U0at]U58VJ,4ϛCUeiqXx994u.U61u)FH ۸!ؚȊqN3RoL:41|еբ,aGME$FO0tyc,ɵgj3y^bJ|6yT}|-~z΄ `zƺ>y۝d!es:n[ƹ!k|3٣hB8:&(.m)߮l{iggboQT<]v]jI)t*Fó31Z"RM) )h#yo+C/Z yi@8MB,JI3g;bljt,ILIVp3z6 iW'd?`eKC&dI&DCK@ѱPIM%M<>ӐNCJ$.">:L1$ٜfW4~03:iyիWx鄠dM![7㼞&bM^ME9t9/O)~1b`"[Ç*L;w8RϭTS-cs«4n+οETMayO#oSZ*`\zbN*7)b vQfY 䓕ٚXIY)I) 2Jӛ$يKp"W[I:Po'ֶ'Σ؈I`Ҕ,~)-tÛk/[5f,ROj ~zF-([6qyf:vtb}:&,xR?B͓ǦX(/4 uZ(a1!1f邇 h%3Q޴1KT2NVNrk1 5yņL߆H9_,i'ܪ>ePi6g@g.u0S_HwZ[>3Ya3 FY/aӳ_7,)J'ʆ5]x/'|H8};d;܆>s=d壻GJElW3ֿ)M>R" -/Nϧ+۸prʧa<pNdwC;rs K]|pHO+i!ĒLJԌ h?xR#Zq'z87WR=m`4Qf6.`ceա6 MȦle<2&nV]Ϯn3kbݦq4YO/~Mg`51Ftrηl<5s8֔M41>p-ђI!w7UUwDԜՊ$]5qQPG䁗vOx+!JH51q~^6~ ) #O,dMXu86iBp_Z( KzsxD4u+7Ek\vp1z+#r̢3WTTCna 9JSx@oMĞ4<c jRi/,obKC~' i`'@BooGrK!9;E{-tr!jVdڋ}QS`WNee+cUE#-׺Ӻ*y[ڏ"9 ]oNۚxZFj%-f#ieT̗>xOolj}DzRKr#4u>zUZ0? Qϒ\涼Wb*f[8ˑ )4{3NF6?5iiA&gh2mdYdE6KUn2[à#/wYs*U)9`ȫ9lvdm *T\[[cΨVu$>D3ty|g!ɴu=@1M)5ٞ7ic)=*̅Emry K/[&3hzEڧoy|uS2K4M9ø5ߴ*H [88W7TJV9Akk 8wNR%fZ%[p/l4úQN\Tܩْ.7 )e,cgHӠqf&9,'o=U[5ݶr1~6ܠ|ؕ eN{vrV&{{mSA;R&hmk)o*Y۷Zs\PlY;>7Ct;ؖ_'u{\구AnbU6գncz$^ sg]!; Y~>[0bq'(>D0Ƣθ3M?!q 7ٗc*+f:q=d]B*r 1<ոGl+;؁\N@@@@@ $Uټ~xvoj䝡ALZ9Rg͋{!PcaaqQj -!$U}v G@zHa4zy$u;֧CNWҷGY%   iB >A{t 9O)ib@DZܚ*GfHa U:R.zyG&_ǣ0ayP:LLp    @ J)bH.iFrhCҟ^۩!k@f8"patmByfzsal:S޵qR.}Mp     ttxR(yXJq^hkTM"@*|]EMϢ't{%DFR@^I34hDO7ڭpe  mH`Tqn&jr4w(?5p=jMT H,Kف4qj)ޔM@TeT/zS/C "mn~7_<0&L2aJG MX d(5ܚ^V!z_s Xz;׳feD XI_mPozzeC@]7`Z+E557J:YA@@4|+o`Q4W):ԛ6vMꊪ$A1R JG=q 0bF{F $UK6Ҩ4CVMjoX)O>*%G6Җ?R\⟣" \%әjjX*mNuB#H`ԻW@-'E;7e#g^tݠg7;l]@HG@ v ?{rslVu&󈽭p5@=,H˟  qCoti :gѳipWZFϒPrS   жRov"I$tD A<ЖhŎ1>6GR+h2۱jl/7a9x䜤F@@@ yEfIpE ym 4fJed҆ރLU:|J92Rl^݊m$\LrA@@ !z6==Sx-٪@Ui 0x#ݼ[8IH#L6B*8v*]8fГa(oB&hQ\)fUTlKYbT9V:8q2mSs8h'Yye_68Qoۜ%1mүZfMkqqm5>zM]t o۶N:=.@  ڵrrrL]۰{yS!&77CKF)X9;w|>̫l @@@@@ ^2      @JK#     ~ >{@@@@B ╎#@B@IDATHF@b"P:ڼ糘@a'0c!m/H@ @*㣻.umS8'+rIx 0>L|2G*loo]”Q8~KF%鈤mWL멬8mGөZ39E40*䖞,LCAK=-StEJ/0$\@ poW75oӤ-YAhO#-sKCK׭Jy#Z0g;OiīRЉ4W$%/>ol*>/~O_iIh)l¥g000a|hiSEJ.KIIYB7\Ї.ܡ0>3G*loovp:>NwC.\ٔ=Ƴ*jit;sLXlJ_@ElxU]JuU½TzP#8Db[pv֏M:-dGP[zdfzFyp+B#b$JrT FފЬJV:h;dV,*ͪ $U,+ tJRok\uT&b_B*Csd ׾GsǜN۴}4H#_}ݐՍyYp늮|簘`_eJJRE9g'\d.A L}~S/*~NGҕ7~=GeE"!pJV;页COqˊ4H[ S:;φxuXX0m|j/iLaB4-[*FfT$ f$VOldsSQĘeWvS9#膥21CkwòhJYr t~.tn.&|\qU_8L?+a>4,=A!]uf/^^7y,iWv朓wfȩ]i@ch=.?ĮŽO]8xGFxT:@ ґR4 LD ): H`hx~7|^z!ZiW*LʱvI!u'7>-C IĊ{i_ҵOv%p4Ne {e.>?2@HyUb@G գhd>.ݭl< fuKWèet>4s#چY1tŧq7C0ܵ>2 USK7-{@G1dD+Ѩ~iP47:>nXn.&Z"R.|vkۥ.8qtGGfQ|*lKF [esT޻9tA*-*NC3tQcW9=T?nwliwd<~zԄ_n W|ng)Y!<Ĥjۇi;t=۾N[t+VDˡӪ{(1#^3w5_C'Lf.hڦh@@ Nt*cub2Ӕc1z-Në^ш3'\U.UlY~T|UiJ#Sr(02(߉yl' ]_dq{m7ϣbc@۵TnkcVTz%yԖ1ի%~/2q _J4䔮!ct{N?'2rMnEn+6LbS[N{wG[ḥ;W'7%XXi]5ĐO=9QUɵ,фZ]q !:˭݂fYxbӋrt~J_NP`y%E'9^|'V!}:ra$#tc[V fѡɤp\`-mW9y*c9h@KiRNjBQS}!YR2*%֎5jmPQP38 Y>)U?\>@mnحn Ԗ=4ު4̬Z|1 W6\=x f] bV y1: l,% 30w/+S>J* fx>'ݿfެ!6vQMxWE@쳙:z-\/eMX) (_|e%&UG=MϢ*Oմ)>~b\ ^X<ͶjZCVNjKYBmvl5g -+М5Ab& e1w216 ^Pck<%rܹ#Iagw>!ømﰂPݕ.7'ہ9]V7"U6r/`wɍh@Uc/nMA#}ӞN;KkY0ԟ~ +}ߕڛ%}  pD<*A'oNXLujTZ̲TvҬ/s 9 gӖBbf|ԫU`%8s<`|%j--,oU"#6}n!\[xA3T?h`z;9亳OVfN׺N3r>eڎ-f]ӿmYe5kIB8:w{[!]gP/Ң,Gu*'~E^LWB YQu\_ˎZ%ںbWIsbdfҩcݻ^~6K;i؎lf`ۼׯD?}؛&S6 Ɵy"NSv$8w/&QP;;BsUTT OM*}- {g ͪ$6rؑʚ]d/R$_!/]X")T[AyܒO9M,Q͏Ax@InNoӌb5l+;) 'gf]^Aʁ=:amx)יWd)&%?d jm]ӄG׳#ӻґf0&rmUmqeRŻԖ*'n9CC|-vt>ֺ<=hs\%_1^)ӭlnK7?:CML=Qy@Dݑj+d<q'zqޗR"&GY$GW|KVܔ]"ז J}j e(/|M۲M'(+FGl9ZNYDMlUVkpXIhmؗ¡1:OI1MLtc8cQTHsD+{X3A"! ?B?}oJIےݙRv4w +l_ff yk&*F#rqݿr&Q:TNS}8$sIe=7+:Zs;/mE'CE_yOhYmAz~| t ([IXEY:XQZ,+ hC=awV*+uL̮9іYN{Q0?;wR˦ "Q8E5o+D cv'ÝRA@]r yb?^lմQ?"!;E2!4U,u^з =Ļ%*԰oV:_{vouz8X˹"U8Sם]Xɴ'|:ڎ=ZBtLfٷ?+v'vMߎȧ9|z#o-|e }Jp^H,FV'g]C|]h}߲`1s0_>}w Y9xo'۵g'B]36cz^8=-F9N;Wn}$Hc?]t~_3_]L1OgdXxb#^$Q*tKOVW t7W".r ` gbN4a%NrTO:Gq-L.;wL~w~V~[ VC۰\>PWn~?ik򐱗)!#/S(   e9axO91Yi7s x1 3qBά١_o}lo^}΃W}ɮr9vh=4/]WdlŇ>gj+dl=Jl_Mv|%ɿw =tt WC?TDٸm FۼO(񤉺@@ #7pՒ t^߮45&JuSBI 7e'Rcm^Ymyy]xmT3ҼQ:Uv<&QAm|2opvH״ydw/7tStrzY3 $++9NZD1}XyggwU/D^rllDPdґ "7?mAc{DORr/pKE:!'ѲMSwrWIfWl.VY98ݧ1]tǥ)N̢ulV28`3?ߥ.rm[^JG?! G_۠ec޶E 1]N!_~d{#ҥÑ.^ٟA&6!<ͷBy"qoU>^'WAV7O+&(H0/[iAsgG~ mG@V9}zYTn$ Sx%/`cHWbiFq |6_ج-/}$%Dz3UR@”[] ^ 6}n!\[xA3M'OnIEY/wn%f|;txe*a~ ag%NN?#yLN:ev" 1Փx٥?iuQ8] OIX@@ JלsR`gyyv.(7jMeZvOt2{)7 |h]YِA& M.LZ,rlr`#9 DE`g_*| OlѼj4muGhF& S~?ʿ㔮"J)/2y? Q @HGd2ʗ)N' !k},@}N/ʫclD!go|ՉqHoO&}{#}$pNz#]C&?n}j  F>wׄh-$+ަ\{vb6!yV"" L&0g 8a A+O즽px< tzyǼDD8:wG|ሪ]'@h׏dvgr2oH$'+JE|3ڵ#]ŧ"@$ |\T̨liO'9ʎ @ yN:OǞȶ )Rvqɍ}ޝjʐS֧K*~C"O3]"Sh抷,ڲsz6w+E $Rs2AT6ܝT(Y!'?9D];I==*Dn>!9̠u_ѤsO6gpE?_jsl.AމChC  ـx 1>ƟA} ]!wÞK[¼-mH v?8$PTǪ_߱_90R)[4gdڻ8ê-hY^6yg5DEOiF2kHh!/ԯC>׼M'w?B{ (vć9.O):d{]=jI^V8Dґh¨ڈSҖyFxnVV7WFϢ;~:,S&e+HõNRj7cfw@@@ @h'A ;e l!~UW7]ޟ N'5ÿ~p# ! D`1<֋   P:Au@ߍjޗŲnv;:W2 ҷ =T)>nDzye';nj[?{K:xF]1 @#@T& ~|a~cRNԯneYݰ*FgեU9'e̛׏Cm? \d㥛/   tP: @rv/kjX?Q8<&ҕ*JO'>X;ǻ7P?99%a`ݻaĹێ1C 4x#xfA鈞J@Jܛʟj{/ Ӡіj-}Zͫ";>Ǧc mCFnc*sWߢG&.#H<,  ׾GC+Q7@@tP:t &pـt㞔=R/>%*Y+ R/Iws=ėcdni*&  H aU\@orl9!ظ;©] W"qmy6VniAj]X&'1zDw7K/o줭~N+9t7}IA>HK S:DՑs\qtٿҼWДsQw4l"Cqʐ|B # )z}HJMK򤰶JxBSKYM-օdT"Q"&:Чkzyta1mU[ѲMOF[Et%4isvUt{b@@ Z)i^- hî酭m7MN:6^zgbXڴOK"^hm*OYYTkd!a(R)TU ]h7)DE*0 }j%r:SMtW!1x62lWNޝ;~尒=@&H)/P׎=霓.yP=_> Ggѩ]ߵ͗>%GEUTZnT;^SEE)7WQd BRtY V^%eW޴ ¤`q#u{P{~rn~M!+ ?;]~Ύ(累XB y@@2@̫$RGց.WFw>Omļ!E7>[Gf94' i4ϣbda%D77QS}MXl6=/l ӕ}~܅VpfeQu-^=;>c' ZCh$\,_ϥ|YeӚ| FtQ Ta\Xym 9<wa&`e|DRJW|IGށ #(GEom$g]H53$l>8 !H#{'Yf,YYd`bar 6!NJHHHf- d0!i( GC`>n7H{vUhԭ{mFuB%yE\])ޣ?wO!d0A:e(%~~` *46|6KX]ӳaOuv_T=EJHX[.oFpSsͲ|~Vٶ:m;;Tρ7ow!*çvmoB٦~Wsk-J|bu*F?R/vF)6FۃK(ݟ Duʪq\{JR.~?O2'|so 0t D_wԳzgl?.V@~{w+my]!2䂀 s@z X<[=ycЪtxVUlXEW,vЮ%|]pU] EEb#ҿ"$'t0j=JخͶTfk۟?!?D~[j!nyeU6#rCrR؟Kއ?B>2'1'0$ Y@VY:&XMÏ'-~9Q7tM8`LPѬNh%.ޮ [rCFu kJ0cUbƹCy|mpzO-bNڽgذK>VXrnU?? =Eغ.q ˮ^?~>l\ ۺlYKVc'V\d݂ "UOڼװ~s:6,"Wnj˝ +;~rPG;>KeзxT6 RY>ׁ^r/!,^EE*HT*޺sZ,ܕrNўW\|Z¶ujӿEԩ`llf*}f+si.$h]r063M GG)>Rnv+[[HϿxK+?A@ņ@V)v8tUqJHTlpPѮ"ű)#R!|?H!JZXq`H+Jq)(9?tY> -V(zK;-9OL3ӳ8c=K66#_߽E g3~+v9A@ Yt}o`n5;U},/Hen?G-& PZ*UP '$VVx!hQ۾Kmw8{Ae$'Cd<_j)I!uv`7Rkᐾ?n,'#i+SG/  h .4 Vt͵@故4\f$&HѨ6&*(;xzV-&wyBM?KnЬzWbWI"v%# z`/o%%~j>NTgLO~"#r\O"Bݺ"SV~\ Mom-l`Yqح)(  d16񐔎5X ँ\{rz"e@ 'cafd5˃h(qrmX_^4@ չ\~-o;K[T.bZQTW`eдJ "T7E RÊ8X|  @bssYtB&rӝ8i Q AzL+*'AvD0?>6wLVMqY>E-}]D BѪ6.5İHD.>ǰBaasۮ OaӬxkhSA@A Htm5k|:b)ذrqjL-jrjKqy/2OObG "Q:C֎*th?9tPSۏc D}.v5jǂ|=('n;5;s8N^;FM`ZruߎZuʑ5/b#ݚH  %w:75JG"qpPHaK|UYODZ[~ ?/T?(ڊJwZ cEGSLv6"m#PV9kGTG ӧB3zToWq9GhN# @n#tnn/kUNV A.:w~L7^V-)ɔuD~͞ؤrq2ҩ\ǓP:   d5Yt8,l(X^ُ;1:vMs8Ӫ{Y\+.G ,N% (̬&MxsyM$  @zU")v(qݯ>[M\%KExl]YVu>r&sp%'~^ 'ٙ<9BE9S +)  @."5J$=-}1p#+0+t㯪\9@SVM?I(Kɖ5; vA@A@;w-?xu~eGr 51 W .!EdBuK}:8 ¾}Fѫ=|9XU& H8OȿOMЬ&J  B`z&/wdP}xg.&''1==R,gf7٣tCUQTE=&\S?cqL>SrxVhiDǚOANA@A@,L?ϯ<_q ܿxآds*20&tBS8c#Y9 lĀrn?Ì   :'ۼp }ܹsGG YtBxi֣>Z+c=%|qg+t*?SU7QrѬ>^x,* (r" E*>CEET?}?~c{c(N0 &)׿R[~fSiV cOUXD\#`MՇ7EҫXx"JR/ B@`7'{;7vq{p]ܼyS<|Pؐ!k:^Fg"Daؤ+tDR2e=tO9A@A`A!x|{ g)]TtRyZK/|!^!%@Gھv {U?~7Dbsxe:_[9L"bqsb#ǂ  ;#߸t},&J ocҥXb Rh~Pϵ.?wC%iyP=w{0+(ΓzQV.YCG꾟|:JaaGSɰ#u  ٍφ?u3T| -5D)pqFn^yy9.^.LFyߠs?n /oxH=/ǃ(:O{sO3tygX}P2XA@A@@`hPQ|FB G޽__) j]۶mCII n݊UV.\644ǣwʵd}1-k}sAE6@__sc_|@;wJv8PI  1< rx>e4oT5+K,uQ\\;v`Æ X|yӫR}QJCWCR'8ws9@7Z\7ސ3OYԐ3hl7 Զ֠,Ss)&lC%hG_E-Zk||{S֘{KO<㝇 ۈj?Z*XT)c U% 5+caQ!RPoϹFghuOĝ:89FtszK)p,[ ׯWa`:ğ#.*R ~0hAw@XTV>BWN;a;HhE+4^fepR}xwB yH,ߧHF[ @ R B>6B0wvڏ"i":$pPB o0}jxQVVO~xDHgԧ#uHWA@Q?FfO(--U*pHĪnU-Cz !SJxc)j lX!L}&7:%eL# %,Ē'>*j[[n|mFS#RSQގݻ#k-GF,k7Qx[)Y=p..?Zm-_ߣLrɍ!zcS i<9%hjkѲk!L4~?SCxTcل×Q)LR0AP>:lz<nBICoO%Q XT~(F/oB]-řNeo0͊ۥ$ ^[XzN".&4K"'xM^ E_V`Gv Jٌchll$z 9sFu7j{<=U %z'V rU-nEC י' G\S0J~T )SOQ_Sc80wK/"U/ 쮎X:f-ɪ5pmq&E&2?1LDkSC( Ty$u肟/eI[Qʉ>Tӛ/)ރ fde{uj'EqO+ݎ/^'嵶v4Omh4I㳐 O_;ZܑCLe5h O.jlP D<^z%;8Ks,wt7w1uOŧSm^)ќ#Or*<BJd.zUj˒т !A=Q;=tޚ;c)Iszq^uk7D71 [z|f5EɶqIКs^{S-(h1ʠMt+u(E. ^'AaU%֬t}p=p]V.8]+,ΉV\yO?닢zUxA +(/JO[}}vB#'36SR 'X‹`t_XٜMH cڗ>}$:V.W`ˮCz!_"Ah{] z!DF",~\ؙ];JG6{=@spe%&U 'R*.hi# ©2xPrXoiLD%J۳GJ =(R88?O|*:Gb G#a7B6;TNX4Ku"|'{氷HoLF1H{.}E YWb': q[hF5)<(xH[{%_m $.?yVoy0mX;*ݡ^ۓM=qϨ#yy HR~{ح5r[ԽNS2RS6zpّ|vtDϲPJǦExףGywg#7&X788SReYfQo3o^y   ]ŏIB^lN6mY^D *  @V 095S?_>N}[Y7k׮UV V8؏&VR2(U @";$gђ"rU5k{!JG&P  FS0~l* ʻQ7('c#QqD*lHUO7Ùtd Y+ De쮡)po6uL'e  M`6}Uu\?̳ʕ+`qοzjoC@П?efA@D ЍvUS4{(֑NDII݇VU:%Mُ4e[tMhy#ITTp4͛7+eG ANU:x㍌.>[ki_Ja!x"wBO}q #76Q]dg P щ'βe=W:o7XN3S(=?|o7{9l߾]);v`G cJ+_|1V^*ù9ƽt)ei:;Wl Jq #wNzh|M;D7G\6T4_%$|Y9z 9TSVvލHm |ѷSQۊĶOob<",+vz>ocLAcM]Pv{_?G(]䍏Z7֯_HUlXd_ D 樤`A:4M`=ujC&fɤt]d0hPvø"AֵЪߗANz 0 Dc@՝7YJ?; I0\]'r|c J:?Bk+Ob&D(?F!jVHi MF+QZ𷷠;_ެQ{X1gKS8X9֏f]V-սER3;` R+1ӛ&ہs!m'QyrY OTVVX,A[F1&ִ Mb+? ]\DHDZFtO+$!ED,4aD @vvhXVVrD% za=|&&;ݽ߂ki([3FUZZOضmpxv%tduArWjЇ#`7Z!媂WoxXP[L^sزZߚ;lXӍay][x'Jbg p!e 9 7#ѻ5L+631MK6Z(6^CYHeF`JnϐFbo8au1CA` 06>o_ݏ71ʿ /( nx]}h7=}D 4-ԃMb˲oU7s8ߖnZA` pq7>൱Co"/w+/Sܹ3a\C:=2H>ʉŁŽ{|!~oqKM;jL$SQrmS7 Gڋ&R|Nv\&9\< -cE*V6YRu=®=Lz-އ hhDҀ^{Ixi|hҲ4ka ]}s+={֕"B齴`Yw}V憟)OF ;t+Tjȏ)Q}/LǪqF:4O^ `^^юvÑ_Qtd!T.ƞS"!MC":}gTSvK8_cZvV?,Mj?C¢d&sr̼|Wtg{~"ރ%$.K6u/NZ#a%:ilN\KWe?#tZAfЪHM~V,3 @ wW᏾q{qz:#!S#S_Gqtץ1Bת1lօןRFDQ1'_U6Jбk_)zVQ1LlptmFI {5'GD]PliA/3ϴ!Wi|)sA2(ss0L! ھsO'fu=\逋2Pol߾]cݺuMBđ|.Mq}"D.kL}_\f +E'Zv1%ʥ$ѳB*iA'ϢM5#'Q0K\}FN+'=)@p`fo#X!Kl`U?ZA@tM?u;Ïb Π ЃT_V8؊֭[)I vP *.A,qd@ $OWiљ"C&42'F:y4LS!uLI*鋉yF :bvP"ThUM 8J(zΆ#ot*ο+Vroc\Hن !y \b_699WR7$:UߪfHc!pӐGP4EK7ȼID<3"r,㫝70,:-Ð+p߆9SNp/ׅ UUU;RN~4?u|t{D.ߘ oZ 6mRʆuCyx83`Kf!NH_A@)[OT|&7&,#-#> (|Ujjxݻ#85)dW6~co#MX7n?jC;L:A5Tɨ5Di:VS P=}r}5H0%g270)%TG*EZ߮QfX4LЌtIyNBȷpJYpQna:~?]EErf ɟ(b=F_Ŗr~o7%TŧCZ> #?~0p/yȿm\7sAw#.ov#r`\ґxIu|3IOA@l _ @6R2B=}4 YtHqq=(Y"hT껁#OKD>e8±tRnp;vs!]=scd?yE+S}qfrWlm*r(jVEHĉ.3z.I۬!bDS?4r. iA 9?LLx;Gb SKؽ{R:֯_/i: CX::~PtFzo6뗡46ks&i E^*"׭QjnUk: op?V8w}8cՒ#u…`Uy)CCCx<*Ms̪7x_|՘}J?1^{هNhL޽\ +'?IHId J|  #eY^Zβ5A@Xz4F.ko[.俱}vER! J C@{Oq 5(<0-yDB l@w1PeѮ]Nx9 ѮXS4S0j=s(hwhK~uϧE9 Ckt_M@71Y"̩֚W?gEKZY;VGڗy+?!z3R8oCѺ%*b]?ßFYFzz+Ȱ 6 `qV@BuF@glEX0r4#)S0M M|R=8[=8iԷ)4:1~1JPB2yܔE Kf_V Acy Tb dJpu^F&,L/˵HMh'J:Q:=p#Ѳl[?G/#:aS=4$ i^ZS[5ŽSOQx݇'7,/v $@Ɣ^{ oF+W)@"Pf/97QmBh"T7x כh-'Vo#FzQ9:`P:]pTclV>J7-T&#c<|9!Zb27{2nỎr(́۰\o}5cPGERdnv4XXEn|Y Zu^\: BǁS)@uGxtq'cq6@8v2ySCZa' 1%\+=9%i&1e/?Th1̵_]Y+m[;Xq3enXT뽊0r>Ls=+v{0ANg\^AT$KoHIt7'dKЯ{i<ދ(j!7l0r> FWp*cժU! vw:0t !"CA@X0( XQr~#hd>imM.jELM#j"E!0ꚹxu9W1}L:uيއ.֜볓Vn~}9AI9WW!sg/1F{<؈ %›]!iSLbEv–-[Dሁ4Z:f,) (oĈr58R͉&gY5q65*8OG{FRu;B 3hUҎi$V7rjEXY9 F.]S؂?|ҥKN 7a\.\VUҦ )sڵkmZJX\8Hd*+S[ Q?fl۶M) ._\S()C(A@N2\8FԜ"7[xl =Li9k1ՋuPN/rg:UJ$#  sO.?t B+ inpqQ8*<2@B>A@A@"~@nDxK>ijg0C!tsX:mKA@~gWGx&q39;KeCA@$H#4+|_qWCr dtd\- @LN_OvXSO ;[!8#yQQIKGjhA ԝaXTp4gO_zJ_z3k^17LJ |<&ܷ:^^?Xl"M;wZes[^ݿ'ݣ\ԣFn G-PQNŋڄ"K~bR욓9W,a똵XsJ[3{se|3g[pݻ<)xi,y+Xi^.ŒGcm,ɛV NJ1::>'&&fe)6ґF0E UЃ%?N mq*oRL,?Q~YۤcXJJڧS! QH|==8qȌc2} ߞ@5{q)0(NL>ɹ*b+sl{ "jT< nӱr~]C/RB \>½zik]w/Pp(Mdҁ‘n}cWQ8K r!qWX2?sظq#<oߎ;vUV) .#E4tda/×iJ*X@NFTa^bn,={qi(\|^ϑ(DBli ?xD( M`9(%=qN+Vz=Io}(5DJ,JLn{oh(s3-hllvactT,R[BT)%z4.m؄6(4C gn[^o7Q~L43t#`,-?ujSYruZيh(Ӫ-Ң;XPRMcS}UeːDNNW_[~x[( >BLO-]Te(Tm|p* g/..',++Cyy9> _/W^K/^xAbD@3Ma8) 7γJT\9 Nܹt6J~RS6x_cpzʼz$#KΡd:N^18 V%=#vY4jU1If2ߊn6W1D?-iE~́Ŭ&V@ۏMDkuc>k["X9F>$5C~5¥/<>zP 0԰R#r}mAg֦? .4#9zCChgtcJ:p(=UJY9[?:U^T0u! 뗁tܻwOEb%Oa[;b?.7??_&xm[jґ~2Z^B; ?`GkI:+%vIp0wL{kL)!J=0ʜtͲVE!~sOe7;_Eˁ Fs؃ݶN꡾OokR;p !JVF?*@{*|g Z)?S.(ݫˊ}:9ZOQ(%ޙ)*Ǒeӣ=eɐCr\O)W$'zt'V28[1 ˆTJFra(&XRV V0̟E B@t!)r,D 7m?D)8o8, 3מL }xueGy>{9lI;S0uigAц4 V=SpXAv^8[ D-HhGm62B`A6fVuȶkʂIDt1[TVR:UXI` I/+V C`P.̊Pu5DN:t(2D)ۡY08)$ごV'dӰ(63A95D5J.Z4#(Z]3G1.$ʏcȠc2aQoEM)V24#@B̝6![ov1qCP?b48}-Vz%-FUUHqJI-* Ƨ c]aE3TzqN7ѩ6WE \O&Z:5amGcF MqlXnrfߋ;wⓟ$n݊M6)?[< A#T9w2fg;f,!Ԕ,?;$Ǫ^{|1%$<߼ ؓ[tIxq"1U|xL9Ԇ}6mP/pB>od:l!dj; JSus떱: +M+ R _"+95sSnE;ɑWA RT_DlTT]I/eu i{OUר-/dD, 5^ GrUMt*Iy[frG_oeo-A:סܵ M{:E*$B@/iNP(g&סwH-L.D$B.\tR8Ӌ~˕GG~?'8ҹ۝o;pВ^(HBArPA җ[D R Q\TQ+G6ǐRy?_q#fPtoT)clCSxS+pQ.v7wQbtPS"sbO|X"9>lա>whdW9A@H w}78~>bI`}CUp065ѫTw?= FQJM69Nשmؼu705FQjDZtl0ec) bqN($+6Бў ],قraژ y}c`*@A kyc-(|;:g+ 4fJ V3N"R9*Aɳ9 4<Ҿ( y?)TqNKa!9}(  ?ϹYiV%2A@HW:R@t,}Ue阞O2AEg)ցSZOiP!K g7TɌ"L$sy>eVYkх8(쫿<yH*0Y!_ןb۰ h9fyڼ*?gH?~*~9 IT_l(I @+Kn}J0*KGXd5 $+b4f'kS|fУ"*A|T _UQN=@+~F̶rpbZ4 !yڵ)i(ULzb1iN61c=P*d #JǍw!Ut<';Tҝ()$7ӊAپ~z~:'I;a0eg~~~D\(?(B, =ak>)͝V*嗲6SӅ~/ZA@9t,{Xz=n阹 br~n>Y4ZʼnL|B;sTI`i *ʑ|nh2   K~QTtX:6lg11J٢Ch, umXXojQ9?R(EA@ň@ƒfV=N:>7+6x./YB;7HΨGLv{0yotJ9A@A@Ҁ/)"r13) WN:>S?3Ug٢B#bc'OJtґA Xt;83II5{*OEd3T vT0 fk i>su:0s[ jshv939t `yٯ`*L>I-ad-qW Mo ňxA@t4A"0܆>6 )fgY8"Щ VruHK 0-!)Y 0,j] te[;>ބIGA@A@P T+Y*غqҜNJ~" !e6IXp(7ԣw  ;[ PzK-rDXd[߈ {Avո.K%2N ,RvӇxd𸱑aQ +rM9ؒM*@g JÖ [T(ѓS9G_xnX:m#je i6a@ 9F_8qG#v1Tcqo}K9v}oA8 pМƇȏë9S5(U!r2tsj@< B ůI>Lvw^!ٹ !pBJ <֮];gg"n(?_wasSKWG_:Ex_`1%oXdfW,h}4w˫}pq#:#JTȊ(9_l.7LM^{ֶHAjv?VRA@A@.R}AIIzzzuX2t-L%df|,[&#+'p[9.)oV&o+~[MyK^P NFdM~rJh'Jvmɨ|mdI+A@A@A 딎IR: WKb%*2~߯0u(]`n~+_Pw|9JHMA@A`!uJ+Yc]tLP+.KnHZQ02|E~Fjm-d@Š#c82gV!_Zc7C.F6wCmK.j3\E!;ڴ0ܟwF_NgZ`V(Iʐ  '0ee (NKT+c]9<_Bq@7`iy֓\=96)Ԫ䤫DSF\4sdIH=LvlL1 ;ήME!E NIv?D(*dkjmDgCd  BG`ޕ$lqXn#QVcrqwݿ%ǘyPFrL17WݟVѯT YIf}9 sxo%|}:Hr%-$K:+#8R׫-W8]<5YEʧ  BA`^# 0[GֆxeEk(Ā\$JVV<2T84~a`-*[jdHi8i#󺥍c(EUCBph)r  9@|&秱/!jbP"gP/0s Nwc>T? ɒ*Qz-j"^cܡ͔vE!+R1v!MI!@j( Dd*jW]To}kd$aCEy%(' Y)#b%S֗|VQflrj=8 A(\O0 CiަO`zєO~܊eя Al&RjAMVIqS׈{OӸ.h;]M=*eҸsZzIl; Y7mVa3%1$ @0E gѮoMi2sr&*XZq p~f3Cōѫ8jԺQ4*V4ا#/KGo_S=ҕy#L>S9@ PX/U%[ˋY ُ@n> a atET<ܘ-IJUzQiBzU(O䦆p.Ĺ&GM1Y5H祈X~Qr$,VMQk6 c!U |O*KoANJemcW vgkkL 2W!SL8FBN3'@ a8a0S zp}[]6H[Zj|W#C)މR BjA7zGZEJh wy:)OG^2GLk~w_\xl?oq/k|rHY}K<yc:OjY&DӲbt!e֙uRݯ.gZVCo|M}H*_szk2x~)LbFq%voD[~E-fZhf}B~vQ$G?й8^ PKeˍ&'+ћ Ro)GjBQyDGt(iAg /GpCXF% A yS:8jW[1vjhaTHn8}<6#oJSlo'zX`T8.Ӻ>p(z{G͗L)\A`"0p~oPS ˛)Q*kݯQBM*kW9ׯvV͖p75qjE"^ kè_NR:؂qH?dJIoK\ 7*%dnA@szAQ<%ĞEybpv 5 `կ#՞| 7`g?È ܑ#eQLŠWmPc=M=PA`q"JB#h肑EcƊ5raHrz8A7Q]C詏^$˽Mq ĽbԗZ}:vᨫ2@NEZ: Ӷ_=SON'_}EZ qSwo^5KV(o Mx_aox[o?Sv,_;4?/r TT@e>rޏv^T3ى :p$60btv ,bo1ћX8 A߈SEҭ(GC8Iq*W6=c+JT9tkouUf5Ulo"85տ(p/,{5Uvj\Iou)mrY‘ʡUS`Kw[Y\T' 8{Qblh7=6F)# JJ pc 8E}cKJK82], Gmశ!Ũ?R/@V%ĭ.1#QNA3KSw)NWgbǯ (d+XbHmXRqs<^=X}_3b^}on`:;l/.%B}Q1H"{URM7Y)dM.+UT EZ%hRKդ4&1Xp ^=Ϲ\_j0{$s<9_Ks=V-ǗI-rC#,Wc#63zM{տ;nmN@&/gh}ܨY5?֧W/il,>=V:}BGu'N 5%C֚ځr`@ b\j5#~K -PG"u)ٛTꚶ 3  =0P+P\|nꈒ Vlv刖>fFrR=qx  0ґ!- 0:8FK"u)ٝ'׬>qT  @ƞ={zKJJnooc#   `>@ zESSs  C*@14  |@@@`H:@@@   Aǐ8  t@@@! R^G@@>   0CK   @g@@TcHyi@@:   ?CڵK{/>yd͚5KEEE86m"  HkqmٲE۷oUYY6ఁ @l BVҘ1ćEGՙgI9`A G  @ڂnYFwy^|E;_la-Z+V8 J6nܨ~eYI]`q NkΝ7ko~&ُuX^ߩV-YDSLI  0b2Îp؀ckdi{,7FK/&NϜK֜;}\-ҝVwuOT__/[@@xt9#v"Ŗ##>ˍx5ь41SUDܧ]mߦW|UOw RnLo9*  (rajl۶͙ :Lfvvi#{ԓ9J;~ۨct/ٺ.(OIAog<ǵ};- w4Vmvh*Ig  `R:vء+WjܸqwtֳwWijX5 4o WIZjۦqY7ʙ:EWi3d]6#ΝI4s4ٺ 86_Rw,P}~"Ouѷ}G?8%ֆw~45hT۔ rR bR-  -(rohF3l;,Wޥj 1J5}\9ojGC5J˿^ɿ[fys载M;U{!kKhut_RԖh@n[Δ1bh칱  _ >#hDul}Nҋ^U9U 1`˻;ԕiLUUt`{S.yڶTfULCǵ } w`V6mqLkW96&}_e]>$3?%Y&DՄ y*}})(6ؙoT;^TOM<-  @@A5~Cwûݭdvb/ мŷlWv 7W啪Ej)6ؽ]f"9\~4`IUi:ZLڪB]PmoF:VlM3>'q6BMM鵧Gz37NZfl4#c+{t~d7Uf{C5:+Y5#WVwrQsqk2(ȑP i*Ԭ'3<9L2iS.ĽG ]8ۀf)4|ߩsC`w lN-|""fز9ԣjfۛk>;68,Ҿyzž7A\Qݹ)63Sa.X)O>iF8zu:]>}AnfK](q`+XW HN^2OOа['ܹ iS g(vF=̛{g(gS*,F~֕Fj~}Px=;O? &z BEb7^!ͨ[ f9OO5" )gֱc"u#uoI~TChr3F#TaѨ -+jm(uG{6L>ƌ 8{U+@%]UIG7Wp|Rϵ3qMSCeϙ۷Rc T_=iCI˲u.N[ysya_M<58C ηq7K[/Jd껌o0u*J_Nͺ&%mӵ62}ͯ@@a raꫯ8#v[߶Ԓ?O XMkX^-Lz]oQV;Lۿ~kNs62:6V5Sl+n"st, qܮ '|S]5wʫt"F=3g{֍IF1*T}@昿gwDHl/!B[1/-P{yZNIۡ})MbIF: RBce`s5G՛;{BbRS.Cf"ǧ, 2)w .$btv[JU^#3K{|u z'4n[LUxÿ_L3_,8{B~핪3tjTM[ѪF:©X T/Ժ4SP _yS'涹=WvE9Q~^̍hb'Bv]g!3;4۝{vimx$= ( ,۲9gG7 gǷS&;HO_ L5g玄Rtm  0R2[RR5ys+Zfws\7nܨ1cI3%}}Cuuuތfjδ\͚>K'J1:K9Y28.׿~/\SLIfʌ|X@[ 555p@{vVUWdl}#`DM+oc]zY;v7F˭1#7<9*  Cۑ ;baG:lTeeƎ۴-cz˩g볤W Q6X|}Q3gckwz0M^@@pte'Xt{19w4iΜ9j޽z裏:峳ݾzlH|w\ڈ%5@@@ڂ{6s3l@k.'õ >nNnnSl 5Xo9>zEW B@!zΙ]2t: ̌Yf|^-QrޕrH]T B@!P\=xxx}FHNN>33ac4R|9iغ1lޅNԕDEģMVrHmWKu))dҴ{Es3l0Ӳ1Ӂ0#68 `0tsl<<,r+)[ Q/C (ӣs⊛lEP@PPL3=rYJKH}f1l8:k #ۨdR }!P p8DlrkB@ u/EDL`ybRGl_=˗:ji{;Kv]^_,^<10D>)^F%.G Vʆ=0_<U,o>2m)ɄQfv6Xp0뎣霆󪎌aQ3 z9'?j$:;Wza/^%9b$'`mlV^xƚ42i>oB@1Vdx//^xj WE+3S̏wi0JC|`v"}8Q-ry=ti -Oƽ?gk!E[;4T-;u_!. %fU 4^r"^ fz.knr:J/DG!pu"PJFJu37'4R{vjNq-Kg(.%GvG dGǞS _|xoxGSI;*h&W AtnZ̑`"^'Mu#\ x%R!fz6M$W6G=[E JCu%*Lq7`[0ZѣXN;M.qn!h3#{\}0W&>c,[E5náuaB# sRוA5ա+ƜJӋqܵ8[[U\Nwv١1br'GO\IߙB$Qt3$]un}b;:O|1ib⿝ņ{ bhjom|Jm+14a8]=K:Q_I zrC#frBe2nc㡊Lё{ ڍb",9|EճzT^ÊN Xq- cCEpI's* %xDSCAq{~bZn"_u&/[ !Q~~Z((PQs8$Z3Y nMy.񫘵,zwO<S; p%]Yqتˡ9Ogo<=k.CXh KX ylZAҖy|+fp4])RC̙@~X<|M, ,LQk*װ"LH  3MCL}qWd#UMF<VWIp}O/z% K>4 \rI}<h[,XଉO6ד( ؚߘL܍ Y6haۻΚx{J{>'4nHo_!TyYβ˘9; <*ME: #G1<–Cl{x}FzAMnV <3(Y` !!G5>1ĸCFl7]~#:I; 3vسvu^Na;[С&!,?Uж5\žxrV4E4PYf+=yCDj[Lط\p J&ڄ\BE5yk$h v$}tC cjbn񣸶?B%A5%v.tΡTz_#pwк3A=w$‚iL8H5PEaZmI˜RhJ\{AYU35"39݇?ӈdKvA2}O\x6b\D:hR#Vadn&Ru]~!g%X>%/"-`%xDijX,:]D2lc͚F|h!QZ ~ ikK,,qѰfdc7}r#== "va61!j2cղZ;goבڟJj?#o^G1=_V`wXz}5$&o,w&3![FEiړ_]?w:vȫz.QQQh?`))+/)jcU%)o+q]".Nzy],ƯwVBg{"Jq"c3uVL0ߎn<\7 ڽ[YKV0r)'&Y'V^fʩ*YwĊIFs ).;qwɔMv9 fXN2Ή|fy7$sDoٻ[[`:Dɞ,-^N^n6Evp'navMp4bL+x<ĄP/bXƎJgz;*-o|(/o, -[3>do`wo#`ݜ𷩢.Sϸj*]Џ.TW(.J'3n6IV2jlO6 +$_v-SN+*JJXtwKew738i \.<^cRUԔS i*mlRK@3l>Ka>:W_B"8LbhMOռR˂znjmuˇk6Q?W*7~+l6LI"@$xcԟݓ3qr-Y uTa۲Gh䖠RZ..ǂ┝.>,BG:_n'OS˳MoDyIhrIǎ F͍sSK||m̰tpÂwPU/kǴPEnR<ik WqPOƕrXVS)h)^TԟQna_ q߮`tC %o,b_I䢅j3RسR. *`zlJe&D&&7`:t~)WQyTHnP=Bvi7%.gW| TQ]yEaxNiVX PA$:NvNWB%bR!Zԛ=wie5\BBb8\d}yڟ Il޿[k;(ꔣ2iy]<7]|챫؛Ɠ?o=WtN-ĭCjdyVar]FD2}C4YL98e*[hHY;0HIm]g^#/ah&u NSS[^F}8gL`@gYeU.+;atJW/dPeɈGsa`fM4,٘]pg gϲ|Y!8}6sbGMj9ϱ+/!ZR:&:3`H*%fQ+!-œA=h ~Z#'&Rx)˾eh4)i= [kn];X]{쫅He&CVV=p2՗òܸ!DLyq^ S{aɦ|{=']`Ebmw/,*(ԅ%b)~!*NջXNLwǂ57cD:3ar͖?)E`r$xަu4OW΢ o96m!#61Q4:/e+N1Jt<dOickDaM&ceoc/.q`V ?>R nΝxs6˱RZp4#cZ+xq7^u MCψAr^o8AHtz[\z̔Lmya?pd {=ό>!@ɋ㵏e+l ߻^8gz-û|\ָifO 0U$B@!L8 &$hGBmиFꚵ*m#Nzb] uYYlj4SWH.narD&[>WgI;qj/kכ˿M;{Eyk~)r( z3APEVV׳_luj#h:*G(a`vr-kh#ƕ)'W xPˑ*fH}dz[r#i A7SG0ql^ʎ:*7\pUMYf}%E1Ǚ2$7DBxb| ⷰ~k,A[[3a ︑ T,7OL;^bƴ!d:h0䂓Yɟ^nfLI,W;;3387mU,n=O5O0=ܕ_j*f f3[Ƕ`?kn!FS)vavy/ B@!8W;daqZ~,/>2jKqbzn67("uuPnp7Y\ÒVzMJg'1N CL1tAB62f"W7M^4l_gƳn6OY֑NvZLʢzoYZ¡Ѹ yg|{[35rc72nrG_-\](Ԑ+M3?e+}4B@!P(.?n)N4JnCJyubλ׈_sd|ٿ7y叙WeT~@l&-gڭlO8BuE1EUc-I.q! (F 寄ʺFEFۿq4.nn@GO*D-M])Y˶$:Bcw&O낯 "DHt?±:o nkd6lX{ϯ͵7]ǘ`.#HG#)!iW RfN^C0ٲa䭷F7Q~5EV(ڷlrkO4B@!P(.[ulqDfUNSKVTWQBqx,Rw䓘MzC(Mej)/!Oyv ;R,KeԸP"}p>D I_B!RuGaiqѯkG&ʜV;' ))vp0.PXK~#;6Edge`S (zv}3%$edI'2>7G{L.(~ّ.z#a+gCb e*7j]~"W|߭t_zRX߈͈B@!P(jE|Dnk(ժ|?}ȈLz)rS[N/IJwE=kc R, ϕ[18chi3?afn&>.pf^<>z6RS B@!p2o58o[!Fz,4 (N縍8V9K)Z&1lu<1ʒuz_){Jyq NDc7EEqh#KD?ijб?}?C~cvcCl|H%u< 띺?D!F7h<*o x;`yo;šs3}rRf#}SOY~6[KCZ0C[)4x(16**>}+a˨t;#?<~-ݢW95@t`B@!P(m]OWC<^DžU߼{PWPP`L^wtcwT4\'OB4":kP\Ļ\A˅Rظz.\t&/ÉEɹvuy\mIPum3[l 8J#d$gy=gto+m.4 q[ktât| R\=hD#)BclSievVU rsاuNDm$ ?o"U_D~AvlM(BβyeL6< @K9EGo &ş"-qcE*l G6GmlpH#'}$];w!JY [=H]|]SYN-> <0{9<9f|;׈զCtTc謈T]U7*,b}s_t3~]UQ="+Fz1§2!YKes}r֝r8ocS3uJ{Z^_q`uy| ^=~m"=_۳vQd -ny7B ݿݞ{F7!R}XyMwH5za҃,c\ǣ)/w7s!R%O͂N FVOh.fՄ g"Rm$ qW9A^ *gܥc6u {zqץZ*rDYq "_fnFGG=p Fm 1:/W$.//xim*"uŪHAķ*gڝ4)ͧH`%g=a|&;G|Jj(*6;m9_Ss!B4X]BPzyygf;7/mAuxVɁ2VW-rXqhh+Z 5ȬB}1~rV^lZrI,Z* r4Lsbؚ1IZB[ wcEPGm}=ulxқp7E;AA:E)٠5a4z9hQ䯖ÆUd$KG/u)3%yZN8FVK-uR^mF$Hdh^M̨+*ZG٢uZ&_'#ҋ#q$m먔6aDa3K=֖,͸J2"ӚY[xk}+i[c%ns~%jҨa۬ՕrZ@] '4҉uPKUm#:M6GeC:F 'YIxK,ӕ[SymiO#j6&8l3vxzU"=f~[$&đ:Iڲ6;izq᤽n+m"a7bv48uuڟnktr~m(MƃyaЂ^[Zz{cH3&ژ$  %I\N<_#u[/c\Ϥzz Z}H[d-GciubS_Msh',$!W6&86ay쎵YN3H?ӭT˘*eުO]fя֡^VI Nkܝdݪ.~~՞CwGD#tP#(k|m:ˈT< P&cP[Fyҷ}hiK#mVzqiҼɋ Wʹ8Rʃ$'x (')=kgٺ͑DqcqneꮾY<-\Hrr;WmC_e-/.wcylK}|Hd|| o,jr!T"iZ_åЊtD]{nIn,’:nx1޻x_}L'ִy/őhĴ׮!mK!څa9w2HbܹmgC,7˶ S};5"_/xwUkwu )%7  gd '5k;+vqdKkxz'2۴W4 ^}ݗHg>[<{qĩ\Gw ⊱t%x=z'[_zuKHڎOq&sc?A4Hk#yWUod[IOqu/2t߬:nn6+\{,c'$i< 7'_bRm!K}_ ~oBU=y/?>nTPW yruv/fN>q.w俳s*};z<&ڙ>E;̑Y<{,M̡H{7O^dȗq!vԭ<f!I⏷2^@4|onNdžl /<;:[!SxG6Bg04|fc5n51$m}i{|t/GrfgcC Ξ__r6xGP&9n*}i߾;wq.)xx[lȫhC1 E[xk}/{a w_d6?'x=<DZĜ5>h%e p W6tw|&kG: L%ԕd4[B?xt.4Pg_ۋG~M.?`lW'5}B]PwJaqN<"o7OOrAm/k1I:a |vJbÂ46o݃JXl}Qrʫ 狀FʅL 07+T3WBui+5c~˃6n w}heֳ144wϚHD$cxlZ?ɜ rӼL҈_Ǐs]6w.O}&LbW&[gA{O=Ɲ_|rHI/"IbѲ| M79P^Ş9+_?Do>l{yN8吹?t$z +JݝqS.4ʗ}dqh9`c2wʤD)m\(dlFC7+Yh)-+T ?D^8† ʶ8w*l;fa^!}Y"/LL_~'8HI:9\|aknz>jkuyIw&yӥ/R!QUOnG_ysfTSMgB6v^0%cN tcw3֦eoegR,ebH=UGҷx|1aHsVٻ_y YQ 1oi!s1!a,7$mogR*~MAH n;KyoU&}NG]Ĕʠdž=݃}MbTV-Mu8,bu6ޛo䅎J4cL/rJ"'幞ЊjW̰PF;r?oq/ٲy[NE\6om,Gv/[p̦؉gckxmzx꣣U-lhJߜ敘d)/Ǝ/-oALtyklz eGrE'gCRrQcBN4!H1XhᛜznǾ*v{kIaAg.ϚHdgVP>XbVNZw`{Glx8..068"6( BO;]҉d#yV ::4R[,0tquAF`ah "9yI,+‚Z6`ʤ!s,jE:1p<%~PX+Ml>@ӈ# bmN`λ} eXW76px++jU&}w$vd(EŇj5/-bG:ZC?Sop0ˋ~.q6K~ qf#;϶*xP{G?wJc4c"m\ľ;E:F`I P-NX<*pc!?n\rRO#RTޮnSg aDRSI>Zh֨c8-BՃöY8/3Ϝau{8bqm>3P@z1(zuKܽGK/zu[|*226k1)- lJx?#žm>DŽ0a+$Vd!D?!NM/у?:,8,b1ʴS_G^#q:}#C\r%t-;Ež@oѠ bx'9A-Aݡsӄqܗnٔm!a :xT)XJyE9;s̠P|ES= [cpG/C1vdWJV|f=Ps8VPe^'2(JȊ>[s "LV?ɌzZ!33zm8@IDAT&fA[$o`$2ts!D0@Kgb4:_2M.['*7 mɾ)4ظHpe!M̨(\|yJ(cQqN{=+NHD^/D/Lub$Y@\,L2nhlqOw׳*tϠղT׃t2A!;roFYKOX_Md+"lsDm`Ӫ~a2 !?X$9j-Dns?xw`AǬ .m<ԜEܦB Z{%Z5Piwߩ4%Mjbl7e /%l.@,ϗ0W!!_*k@1~DEuw"# 4?h#oF~Z=т#zkg2 z}lfLfD#"-J9q-0b,CXNaW»"#?nOM=xʪAt 46RXC{X||=Q?h ~dqYfz_|ӬJK)ZہhbV;~\k &C<" LA  .ڻv(LD82;a${-h>>>x5RZ$E4Q\@ڃ!tMi.ɾ-ZE@o>wNHZ x 90궱xPoKS1wcL:qK⯖``oWԙ8@ivh@d g $;{̢9s3Kv)B~hv;FU%U`oY|{-͚<>Cz:镾B//֤YgZj,2sƟ SUKCPKynq>IfE0= 6桵W܄>"m,D<܎뽚hF=x˪\kq%o~D X'2x'C;0W)=O}el&Ht# _icI]H'Jk,uj}trA2car fIIZ z77zG:xJ΁BM<[V{O\"$m-90:"jZ'e*s_lLCwMh2-H!΂F"wo Wn[ɒR^K"TWQm6˘a(.C-]Ef!D(u*tZFȊwd'KU>O惡qvj 1(>U: D[ smڐ\j\Z\ƨ%ą{@TѕY-#Q<{UoeѓmV:u>XA84 Bͼ"4T8.1X\5[GnJuc^Ƶ<Ma0/uQx gbdv2K(G:oy S:㔇mjv$86S3[:C6Fy;z!t=,{Nj׳bYZ4g?iv67+'_ՊTQ<:`(Z_گ)Ncpt4]%~N+ĩWf 8@~~%5Oe0"HHdXDR ؀p_?<ä\[B6,ΠI4+¤CٜJO,,k憐k>dwhz|-ٚCl]]^hWK[S?/3Hn@R:P,}E%=kAPXCL$t 7"$GWVB}hˋWEz#gҏ *«8O;H 9lb, v;#Ned5c )*D/D4{3ɓ[Ƥ}im,@HV~^.عQPJFQ*~vw /d%HYaNj1'n۟Hir{G*).,HKm,($G9,zc n}Ѓn!2i(iC7m%rqqIh{k%2Y*V.Ü1h{iLME1ߵ+S(qR&gmIl5O9TsDO3ip1 N]Ia>,ejlpEN~n*}Mؙr|˜wdϳCʨw'PLJsGJjxTҪ>W\Md( -4m*yj0q4⨗뺳6-,; Ù<{Զ!+lqoҜ.'@O:GRܵ[0 nbB'c-fNǭ5me,r\wqxAHB #wy $%2ܺDTHhfo:_qyEyl},tX{GQ@{%vbzɗܔrSnM5DcL޻ ,e ((FM4.;9phfm_Fg`jz )7vn{ɻʄ8RXٳlj/]]&[bhF|.Y%+BzKQ*`rZߝK8PZm\T?d5&_Dz"2r&>l5huq )A U%˕&Fja%5HXE㦦p"ioE~yS %[ \)gp:_n:p ."W7a ddЬHŨpzkσGxSZjjLl6ox*'_n1AN]1v:5FeMƝ`]uo a扰64(h6cb1&J!GiNn8"S1_T-)>]=rO`pۄ_P7c¨'yo2~wކ?_lgRMc A4G`$ [{}>L- R\dQgC|U}mb4M(\c{h9R)@<'hOMk%SXFJ'HIsclvmtĴ vE?D(X;Xq'1$gI8|a#$ dȆc4B4a2UiG9T4MOCz.U`ӡ`?4tKځeFu@yu|%ޞ$HI$iJ^BF̂s,ُ5UT+jE*O WJd?fJRfr^-Hug% }. BW]UP!p><ŝyڿao|뷌cD/㺈aA*bn\m϶a[}6Û'`bY0_>7{‰lIJ`g!Du\*}{>Q 菧~.1pQki{O NY8![-UX-{`{Řo=/!H6PC!xynhew`|D{ #N]=i -w|^o'NlvfOx>:$RsG>;-FWa>^D! ݉ŝ:'-e3D<: MLwf@suO܏cav"=r$"⧶{U$O;F-1κw &sFyY*Ꭹ)⨱/8a$Zw=G>ևf;XKV~}̛P'~z8Ƅnj&xPA`6%pѤ7?²ťP}FJ0Ćcm,$;޽/_<Gx+>>e~*+ufG2l2Αޘ}@nA$5 JHq:w[[3::7ܨ|WB6`#LkA"Kok4>)5,4 Ƙ>VOG~UmHŸkՔT16 )?wW҂Kθ8Vtvm="T1кe*ʜpף%'^> 咥o6,`cT8J ͦP1ϬZ( DФbQ8Ӛ>ZTHeLIm8QmUZ\ -zhf~gpJd֑E[Ҏ ڳ(-F U Pgq|eD6Ҭhk1#}oA[s50=r'm϶3pV3-YP* VJ 4+&F)K;b%řckษPI%S V8Na2ȽkC#Ysw߶vpq5 iCI?6kY6`u݆g8 ƿ؛ p@= =!BVu jOka49MhԖqYu+w5Kj)ښdTt̯cWMBbknmoSEfo )Q_E[ڝ~Z8AC]JjG}c1Js%LdU icbK!`_Ҝ)s&ݘmγ`vVu&}C|OIXa::SCi]lG껑xf%na,V^]p>ws. Rωb܍1C\L͚Jv,چz>= yKrWhVĻG1i(ƆYS酩r3~Zv㉷Uge/_#H=`9@g I-w$sCqJn0|vٚX)r,9Fo )j)<.mƳ ()aB*~U^c!c}xU"+-*T99,𢓹ە#DI0#[ (HoH QR\a; VXrgM !DI0z}qo⣘^TZ:щ=.y#uI$? kʀ m)pV勣~Ƹ0EvvgsnbU @h.FA@JFkA%HIA8WRcޓ"K5%&MxD P)<.8[KÇ >\Ƽ/4]iOh.NA@z$ 1:%3NA9ϥxBt$@]&&J:rk I\  "PK!N$j:5KJ`50KzK  /Hmhc6ƀp嬿1NK:,rK/^ҊA@A#Pmw?KgxR"=/aS)umg.b*~6#A@A@$ 4V]yxҙR9ҩ%Jan.se,A@t.f@Z ƫ) E]A@A%)2A@uA@KJ@RLA@&BZ[UA@.)!H]R"3A@A@AjjmQWA@A@ uIqA@Aj" E]A@A%)2A@uA@KJ@RLA@&BZ[UA@.)!H]R"3A@A@j4]MuuA@sPjzo?VB&4ehjjoVZeCyyo^$A@Aj#퍀T^\\ YiiATxPA@h233MAT2577_DA@A@葀$#Io%S^/|~NA@J΋H\  @ 3  % "  3!HE|* y A꼈  L@R=s   p^B:/"q  =T\ħ  ΋H\  @ 3  % "  3!HE|* y A꼈  L@R=s   p^罢^RN7Ga55\uiYs]gn,Hi"* vY5EI&fSom6`tA@ ۿڶ& ף\S+':_9crq1]]Q?8[\\gNpNjUpN 9&)'p8 %|EsN~w I R- h)4N+\*Iߏ?U82!l\Y&8KK)Evf9'`QSE^J3?٩w}f}׌XZZ~xLAi9F~]L:4ko 'XQYl-^>lZTV4IowF4m㎗|F]rS8v/V}~ uf,ν8WPRތV5\ykK6qݭطW ~)=m4?jQE~?/}ϥ1iU` C|ubK- 6ax K痢}tr􏁧/J"g.F[%HQoE^,suVxR  Amd}sKƶ⸛I CSo:y*aos~;} ښ*g7_GZe}%?_SC+)8K}㍸nYX6砕$XOnNL9g:^;zvfw'Ou?wvOEJK*LS)jN^#%huXPƗǺJDXr|]2ʻ?BCm&2vgW^-:{-^z/ՠkfV|_.=\{_Q# *C:[W÷?\{]t.\doJc*_i=IV^wΘyqxay6+c2qN*8-h12hy-d~>VZ/$j'XLm ϬSKE+db%DWye=(_oŵʙ^UɵX-o|8]IfuQ^ Ge.0-`g^4孃2v=~w8}<ثuQZQ7 M/kpc]!zSӾ`>TKbk}7jeA^_";ցwKkj_PA Nr0f:&PXeyz UۂO\`[TlF#ҎAFTZ|1NɦXR) PV`gjjQg)Q9>.zR&L[WX3b K/按Azo.6Gx#?eXaY_!|XX;] ;s^xڌ?plzl^ -%¯?;[ջQn[x< }Z]7.YS;0u@W/@5-LN*55%ݢ@} >i Rヒ'xMLy8ڣO=LViCi #nS#N}aόE%dmߞ7̻+nlW 6!tD̺t>֗HC*/?VǦѸ0NLbV~h عb7v[a>DBKj5r(%Ҹqg]KZ AHHr ,˕u?fQW"KƢ0ePc>t 1=߈8jo̤JM^16Կ]@lrx S_ԵIX ޝ(Ůn~pܔeU9c0pmt#iG!qn1GFyapۃCw7,9WZ>7B]#po8v}[kX~n.OʘhU[_aDx `C]F߿GRS1kpDlKWC|T(ɍG<錕bC){Ec^?.'{cv27a@ ekWiYNjbX:x&x蛛1"[V#-8(bs5x0]]Z/Cn r`>=Y(pG 9fP~+ E; 7.uXWa1Y o>63=Y@UrU&Oiɬ6Ȍ8(P^r{]1tD۳!KAT7D`#<0&TXו˅S1t$)+#g~"Ӆ"/+Dqn?>'NFTĞ_#;~peKX߅C&oơ#ɰ6u_ؕ ÓY/ƶ<@cfx]}8Mu+ʼnM`v=V;G2T@F' 﫼yc?R`=d*`-FqU;sk V@KaN`VDCMף9 ЖdujoOi>D;hqcH+OkaBxЖpGp xW`P'NƂ 6_v"RS肸w`\k Ui>8v4EjKjHڋr"F0J4gmG+wakhDb`itupsya Ak1sn-<dd{Ɯիaju,,5~);]5ؒ{B>d0\okуu/GIQ)>Jo`KBj|YmqrF;'alA.-M0*<ȵoҩb7Ұop bfP06/FсM(G0=HCRr~yc~=<(hj4loS9|E,!ӆb},-Tc̄jK*rwM,'S%UMzP^7V&3 aIYv ֺj=LFIsS6A* _#Ie 9jːn+ѣ#n.H /a w嫱/%3l9ƷauSlPyvnnB_pSyU4OW&4МݑX  ;N$> %'Q]@si[4("2b.QmF` +Ofע8T-3WDZ3}d]3/%^x);@po;]J#!- $3'+lŁ-m }8:w(z9d-M;y6Cv_ mwAU#x܆P0ۡʼj7yaҬ)a&M̈́"`V**_pq_eYa09-|hm(w!k7)Hw&r#Jv`/؞Q}j. C7y\97BT~gjP_R{Og3/Y=C~QmpmK9,bhJu!Ϫ֭ s.=]laӐ&ڣ>pSÎO81f8 JV4qV_?6 !ԙLmq1Bh+imP\X\%c@ 5Px)C!ՎDuU*7A!u=r9)PtD_hq>΅?ciO&Z:K+?3q̒`_\%dAVfM= R҃V'4GQnŗoG+mВS@O"%| dkQs]{[v2F@dQ{b1kغP:n L sdg45Phv) R8sPP@$7IGoþ-rN4?YG7#bH4Rtԭ1H?!{| ;C:|(Mfј0.ƹjP}[>=l{E7nYvÄM_DzFA\ 2x 4DZ9hSЖ}SaÍ@ TɹF We![!j$=LS Z(< ם֜ eTJZ>ZpYq1<ꑡ ¸s?ǾAS0)Ht)7g߮EW`-K_Sx)ٶJKm5sGcxS3 qp*A ^㆐."d#E-Pڇ8pp.\\Z3ɹRUuކhACLy.ڕ~ rYXjz9VTz 0mY+waq Õ~E6!1a\jzbHwN}7 |;򓊠i3w'EC˩64R.b4dUq5.M4cqess[|{a̴4:{@: ZZ)4#O>S;g)oh€Nf^Em 3-(uW ҷ=:@h<LFQum ikt {*ճmT-NGC M wnꇮpv&'5hȡ2+ ?d%Zf횖S?ƄHD:+lp7=S eGn9&J'xRa}uThkАxol)Ž}_ЩdR~& GQV}}xŬe7q[0ޚ&jҐٚi.𔄨$=Kh!ap ?uzȪă 0Bl؋i{tiVkhyNʭB/] I ΂]ܫƒ?5;u\6< .B&2 ^\I2 Ҝ/ Z:>μԳ֢—W<Ɩ=?/݂k o:3:mj$Bɜ$v9/`avLa̡ wp;r>_x !j#m/ɷ’*hdk0 ZnPIaDk@ʖ& R)տֆ Tiȣ OE Tk9\mF!3^ .=#kd脎ߴru:,lp[C\y~}ꍝֶj%#Gx$B=MNpGN]o|CRLd!diQGٿ5FU4wAGR "f݋As+Q_n+֭=vPd+W 5aU-*MGڗmխ})ܙ۠h,C;YmD\f@ ϰuԙ!_Rz#:-;z$ۛ oy7Xٱ?gjZ"5gnS26&h]7+9u +;]IotDir`UXB!V݃J Z=oXԒmN0V7c}^;rl9 ^ Rx/ #p-)|o$=~5;dFwz|hJnp^G Hjm>lMwV#ޓ]/QoVK=1S^)k6c>[85o¼6دdVt;_xRT^6j} ME=vV:sю,U e%Q<333Jf '7Z6 Nq\ ë̠CLГf҆fM`n*y~#`/_7X|ckuM|_ صnNp2 Rþf5Voic aCpǢֶ`Ǝ#^[=eI8onnj HAK#4rjђU\ˡ nt*BuHw8{_EsNrmrOO8v2Q[\H= }--ƃ*kCIg2XAmBHc#^Y*1'Olף>OmL2tm0MEVZIE dPcN|,h7'S̜mGu;Ζ~ߜ`T*,%n47* :s͇},gwAyvl>z5IE&YFLE?,~RyBpi5(D>Ci[Y#*5&)ٻ:"qN P|0 P,])۷X-o{ Vf/B: ?ﬖzs>d\<Xy\sKUH?pu4! a ii\ɠ E|jaw K^C1r NOCJn{Px9ywfpb >^h71jkiNjѼ}8́~M4;#ĖۻVjY@8ԬEK+! Ac1lxܫ{w\Y}u _(.2H=UpѥzIQ`"w #slv_M KU*kDX=:h"3F~j)$ز14k66W#w$-M4SA^˹x< iz7՚0{>2l/vN,&VoeYGۜw$A 1Ũ}GJv 9C1ً nJØw ^݇a'KrW@]+c֞_6ᆗpldᩨ%~oUO|wi t/m֗?7 x5/6043',Cv+#mנ;~{,KLî u7Use0nl2giы\;< eg'Ȥ=+!ގݯ<)anǴDD¼-`8ѷUX'~&@IDATJ7gMC"b Q oGWƬ*1xd 95Nti%JQSkfa 7éjbH8  $?w0oSW@5k7|!<|% '@w%O'02n[ύ3\ic=p3>[/nB(܉9cvITME?ij&0wz8ۑQPr/˷-DF _تb [P"`F+FmOl ͑#wgg_%ӜJD|$Ӕ7&=x7_xz)9l<ݑ8u::V4ASݯxyi\V>{~P4 CK'`}qmo_졠'hh<,Bﰈam^_JIM7_>0 ?fOu9w%/8'bLRpJ"0bW|Q3&,ʮoLNY#!\x)i/3uĹ [%^Sj;;`83/N޳ģ @dF;y3u8y뽘=j ]E"0XrfR{AmZ)v2:'Cf ;ݽhbC44%3C+wT!D1Dk kLo0iR\ aJNl_HG~bDN +SGj Wrtp &:+cKJ( $Ņђ=)rݑ_RMG"TRLc;}NX{|#Ν[ac}wo7wpUFq9hfs)05F6kiB]0&L`jfb{NB9#]a%E%٩V+xzYB_zG8a1=d;W ԣh0N>bncڛ3Jdk`'4su95 Gx0ʽdzL(BqAނs4Z1R3=XˉK}O݋3*Vܽ#E W7Fٶ!.LDSʯ0;SjCXmp^X>??9#36'3МܽVL?:LnNW :jQ\Tf:}Stz0n#,ؙ4[,C,Eߘ:棰ʾ꼣:IEV'S>0tm^wǐqӇ=|aJGQ+ilۜo J613bUVYc?: x3?u”ic1G:ٝ`˧&z $A~O"m$p1Os5[*+WXdjuWp,:zxA_?5;+=L׎IGYF Ԛ 3:0^YkE& 妟\Țí󶜓ʨ580Xdl;$FݛO_qq$iam 8Xv6:.RcÐd:Jf,Mؽy#[m޿1֌%MBIbp:AY@.a?CFI#2cMZlw35ݛR,'|̈́.q"k?U(-Fn5]sn+D?$)l ǔIے;OrS2ɢ3qc(GvtZTh\E;7rQi9q:_7g`:\CMQd)tK2b }*}uF/1Oiq%*KabP9ߛsu?7WT~|0@6Ct(7_tؒ`O ^.HT1 F/OpDCH@ml¨VO;@-H幭U.D2.{m EAwdP\LsIʩ&8:v]9%z%& - թbܜz cC~']痛_A@2sqYf|b,1U֞  'mzckx@"+VȊsO.ņ]zf#kbo [*⫟x%EIA@ g?{+ioK5gAͣ+6&M5F0#D"q$s{ qŶ( @/%)H?XGݠP;dU[Mx)K6DZ-욻0T:׬. GįGjdـrh^θ/YcPZ^(/`6`HH}DázŘ?~|L3?֜#c#"&?ጜI^z>  +A(_xt9gtHCQ]evA)> cXQe%cDJpDA@z^-HUÑh]ɫ<-583H|C;{k u(+aT~?HzTه7!bljh8 -ϷRVb19m,B.Vpq]:E[I.)HY6iALX fDt 4Aea#ϖ=}&}1n:Y\x]v|!1^+cWŒ5G08`x4o~GsKP+jMƝDZ5uf >sΠCrbEYۥ ?~I&()1AkY*N;Ach<}"=huHg@r ilAfϨ aBVA7ՂT$kF>U8u v :Ȓk>HG~I ^6FNRqbR)3&2J+sF:NTlۥGeQ^X K} [ چWdN7R5?Qm#5*Ԍ(^ڔnaE: 2wVobPx OS5M ɮHpNW[S\J8)AYA4/Fl_{۶c/G1bHBp]1zyS֖Ezުfm)#v‹dNp˗"U Ey5V ”5_y] BW R~w BT'T`{Ead86@F _W?ܭ7![N!nPܵEc"8=cS] ,5SP0N2&`gԍI;*؂o/+i"c7c\ [xK @#Ы-ډ_>JCr7nZs׺Q#U=]s 2Պ6dVO.~7&djVc ;ltPۡ90rkθOc%"#Sp:ڼvk1Y8Qg[VMR7A G@ ĥI6:/~`!RNPu:MnMkv˓'<0n?(+QS'0TQFiqX.o/,FEiAtp=orq ܇P^:s mhȻi2Qa@ |{ټ*C" AaBɠŠ1uTKc2(#.mZ`k 2 Jg?8>0_IpUt73ҔQ3xa9sl&aTƧ_=~ǃ;j y ۖx\3xb<08E|xv:"16=Y` }:<!AWo}GVX)o@e4Bjބb[]5g  5^-H)HJ! $J$8E;}cÂ5sۙC?!FF?1QFg/jN >Q:v>>? Hݐ! YI1̂+}]::{Ua&4fKak 0P;Hl TF(Z1Sf@ LMZ44P2#s0zrIߴ' L11GZ姶I/jn\t?`mDO6 #GˑJlaxQ0he(0R7*2r3X 6:K7xzZ)2mz U&oxY`c \=1BiEV`zZ,3Q{4gl|7p ;;;~TE@PE4X֮mҳ!; .Kժ )E@PE&/uTsw q;UE@P =fjJS]-U0xѡh5HI%xdNcvo#ӱuZE)'Wg7QlO+t&3̚Cdl[|y{gwS^mAlL-T7h tC'Gߊ"(JGF4Yփ\=hW//eM#SLi<>6R~KoҎ|cܘOCc:ۖ|ģ/H83h_?oQtsxSE@P Y NWcL&<1mŗwz 7g*aL6r=[w5Zش;[v;xFBYMs\?LՇ"(@ǾvzGDtMgi.ڛ4_u; ǥt/>>}XI3).CUk/R^߁;!G2O_/;_8b\II0+>]hIʸ721{X}27L.5Þm^e .J÷j%TJo<>0Īe7OWHn -8H4~;{)bOIoFe|Yʤlr+$ʛ\| s+b[sصջ`͚Cj c}[çZȪXMK_?;PZy}C| ]6mbȵF0𚩤y̮ ?&GE@VX;=X!um'{3vYBh2@g}sUuj=5}hkơ:CqqmvoZ_zX9uV-oET# e;WW2HSP*Ej< sFz⹥ޱ HtL1ېDPZ(^Ky1>cH#4zcr,/0*6 ~ms_{IQ^NWv:qK{"^ Dy!D4d]uYWiFb#m?,>tԖy_ﰻ" (v-,<2#"^җG1uey3w#CS3t*"V\͛qrr+ %8<]{޹wbcNV)*lfɇxx"F=V*2s0:vwX,S\[I;>jKzLR=w2:~jS/*Xv0f./VKYIO|Z'$a`XkټMY׆tze;Y1c9svd;K)zD g;)"Ď;da ]ڄv߾}OVO'1ɗdbXAK)˾DD!ȓ#Xq[:h<%}UhC14g!mn]hTs+2iCX/%ZSCy&y!P냟۳OH'LD Gwi{TԞ7~lI'#4$^on, s(zH)\0ģ ٙGik{Wˣo;2Od.'C38ԭ["뚨;ZyI&0fݦBih|.JcƂvzI` "k]Wi:/9f5i4)4搽7dQ[@cvX}[.Zʋ-:΋2P2]~4hr>|}d+*rԾ("IHH`pI>g|F4{ɜWm{\| /(q<{jn{фYVRƍItxuΌ1n2.ʨ@oY xgL[Â$c$!V$)PΓ7O0_48"\(2yk1Kõ샽~wz/7RO쇮#${eib Q|XSXXhuq/_PLcS3L/X2z6wBz'%"Ӧ$1-볨j)'eNx;Bҝ!qx"u&SuKY(Y>ux:r=I!E`UPT@UXZ3ROOYQA|r |+ؗ_G;7ahcCWT'iP>lM/G".'=[GRZ(F{3S 5;8$Dl֒Ͼ)(oUtt"0a0[lQ8ɲW' Z2*$憳.έ_=W\=$\~vgZ ,?M4ו}"V*MrxMRSǣ"(犀M ٴ]}⦦&"QGEQlS:gK<ގ[tQQz۳ -޶J>3 *"(>R'T wrNɫkE@PE@8 JH,u"("( d%?[BtZن[ zLI@עܱU9 ?yz$DG}<̢@ y~RQBѵ'QWE@8GNvfKv{CZ+#T⳷R~?vL7aEuWgiw~ч< w~_gdCqnG=TQu"(Pӥтf[kɒUyf CǬu8;9l{!&){PE{)ZB ,ʪ,d`gl/OULQ7eCfo>,}׎T6iGo>t18kn~u/n]z \12"6Zkl m[o&j GKcOiKd,~445aqi\ԝ"Г TTT~f:::ӵ'vlJH}?tRa|LfP'UӋI<1ww7w\"{1o[)#_jJkX<1i| nR\sh oB7m{xەw*/2K9Pw5g :3laOR1:!OomN&N*lbWXz'JOK@/R&͒ͮ;ſ|0`:)O@#&g2(g@[[ I8+Fc7LT| >7q猱InhnRdqCxQv/IN踌 VPB%r#W/p{6b.:PɋguiDh \J.+ή>`I5w.vQoäJ2DKYѪ9Q%vPZ+Dxol*Dz XrmlCht dKW20cw"dNʈ~Aّf!YՖuiNgȾ1&һɩIjhϷU։-^}04G?U:H;BSzq,M}o/k;0z:;"LY#HO6 K*d5jRKj ##Ni72wu5^rO#=^)Sh-_ƍwzyNkZ}&[>+h,%1#4EVv42f4'ʼn6ڰnb/ڭCX [,ߛA#clibZZxǀHs\D=ltHs.XF^=hf*7 3׌}ь+}mdII}E@PNRI{E8u(.R-:R^_el. )g0z;50؊Vyv0euv2̡֕<=IWEйe>KǶ6-~g&%"3Nݴ_bCN+mtm-DjexibɳRW'}~%RrCs:>}= Z):*h0 'f[euRmr#^qR[f2ݰrRS>1N6FkM._,gՆTԔYhW>ba#Z=1)cԇo>-hX~h챺q--]F;R|el ]̥יRm93˿Q"@tc;ĶϾt\1wPv%v.ڪ#_#Uڨܾ h 80R?io&kďF={DLFxro jr ǕݍTR[v ×Yh}0n_J_Ţ-{RXfu={QVK{bh[o}ϊ"(?H@ dk5 E,$[wjE" ;:w7JCZ$BDSm!vI[cg{:|G%2a^YAmuٌoeח3 \-Rߩ~6HT*3nT8o+wpwT &qVFY3~>3wy/\L1 .pD6jY)[";ȳ3^qwMcP)s+%iqLɔחK:Kx=_}q? nD]ډ Iuik:aS*7J]M>/}oHY(Oe.Wc7<;@*;?-{jT'lFd +ؼ~"4 x]锍;[d,5Ԗnox`LoC|FF_]+9)hN>|ߗi㚚<&0k+;7 g׫_gJʾ+A_̜2C6^ɜOpP>#1|l#QqTD40oKn״U qx;!vPLJ]0B4k"qwF'3Fw7gnwy/4Vޖp"4k_.La|.j>+Ǐg\beoR)G u9MCM`] F9KG'+_/;C4x n/Yq+yaȒGU$vobe#oG;2J( pD3{~hmTΡ޿CΑqh+XMu/ &BD94o;f}qf JIkt2:>OKIC@%1!I|3 <|T2ZA͡n$ S":( (!ub.u6,Tp:'TSߊ֐ ӽ" cwLq ~/zO}dzۋf;$"+5N@G-y8&¯<--K{yw ? 2:!…a|zθk}4h?r *ny`v#`[ ki޷uMٲ<<V0vd>GJ+8ØaG)?\!<8̕FW4z#ntZ" A[fNctJSd玫_2W(yJ&u Bk\8:\îw$c+uV}'kw}V(+C{E*I22Z+{Ԝm2,XkQ2fMtdJpr0Y"(h`o„oU9_,/Q "! (7Yq.B$JIju :͒J,!=E򗼎^:Ⲕ h5Km*bbqzQJx Wm ]K=-UltLcp|] e!赲<^u7eE3KX}X|(+tzW*cj!*>@߲Rl{#n.3Uo%7Ct5Q!a&s?N[/% cTkrifO#l۶v12ʫC8!fS~!.TϢAtYRa Qr<5]I "#|)I{ɳ:uAwMd5 ?D$Qs0z)ڡ!iP&[Ap+ C)øxb䩙i4K >8 c mn7xBah.jh" r%,BnH{>*@'LgS?}:Ne/;;OOwlul1ȬʉZcc#B8 gv,Ʀf6i h3~7O${OttT5Ktݥ"NbWmt>{!q}2_~PE@P~y"6-o߾3.iA@boUԢ[899UrXE ,ӹǦbkY;oǻl }ii8E@PEBJD_B ï,Ŕ-ClZo*cԙ$RYHۻƠW2hBSƛvR]Hyk NhMH'fR3ql_|ۊs[(BNNxGqZ`F`l^>h/6b8V~} IZwۡ*5J0@IDATnݒ{?&.)B?"(")*5i(cgզ6jqh±ɵ"t<}u)l BϴXâvD}K -b뇄Lhz܊J6cCx?tx"q}V_|D46P]GW'.9@B/:\6npd jZ4\PE@8eN/yfM&=A>!ֲܽZI▗ hAٖfdA$INFD-iki).Pl57֓qrtb1DMl>&E4˩+]봢s40\r 8 ClחR7VSהK;w¯^0>.ߋCPE@P΄@Rw8Ѷq+Q'<tx5a:rMe:'a 9"A$D2fni"oG8$R'6;$,-t%3}=~OdnSE@P1>8 ; DO}H4hn>#fE@PE@8:ǀ1P){HyT8 4s#uRARlLf{)`PQ_Q.E@P.|JHgZLe"n~HÏ3;R1򾒜37uxvRtv¾pyr!Ѵ+՝"("pPBT]s6Yßq@$h*O;FߣǎmbBV 9l /[H|lu,4Mܸ7xqǟ't%D~guNPE@(!uEPH MjGroᯗ]9US\auȴőW\ch,-4RR|p:*K5R MR)$;'lӉVPE@0 (!u:Mg^j= tYawkwE_䜮9}٭[-BEc*'7[T2 BqN)n;ِqXD}" hC涕|vpL`{.fw+5a70a-[dUu+aWO#hַw}K]rjl߸ߟ!+g%d7SA ?'? ;X2w54I ݗ[.5)*'L'I0&܂ c_"($%N籙0c3Gx!:{j"^" ڝɺ >~[~[2Bua)W7Ȟ*&\Ig_ډ7 O,?^m7*Ӵk,6(Qjnmf*Ȍ3'RR:ꨫAvlȣBUp,O1BGGM4TfPRKei8)]=VlV׽N&ޓp0bGSiyYlf$}oV^5E@PE&Wkڭ:ֲ8sp2CA|pQjmV,Mn{Y_%/om$.bov V,(.G%N竧UE@8? (!u:q<ΕN5${ӱtk ˸$YEKNބ^9~[7i:^.N[Jٓvs惿~YTj:1&@BPEB"pEq! Dh-Ǟ/?76REZqN 7]9{観pM ^G0mZg |fѣsD|g("p!BJX=` :|^+ VNsgWʊ=vU&/N,\A`'i%ReqYNcvN++X丹y9wL_vvr;֮k!7Huwg拜?:娵alc;طn%_k'b"k2q@s:Xm9;l>)mVNJr-񵝳vٔ~remL:;0w)߾O;ь7\5/xwY9J< N 6a'_mcp b'im/-zUmo!Z`1عƬ[n1BsiAwZm6Oo>?~w?JonSy_1u9E殭!,c&}\=7y\5 Yz6Hf. +av6bIe­p-2혱^0<lh#BC̼\߀l-Po $q\wt~ՑH2i;wc`j>]lE2nX4mot)K2nHx3\mG߅ECPE@L!e 'gx4.^7>NջYϿrĮf7cwHi6=SH_`l9_^ Z1\,6!PX5g+ݟ^qd@i\ l_CU!<½p1USUȞ\3wb'c9q nGh5ʠ-9kϺXWt ۗI`lÄcdz[YRؗQ32p,9& ǒlMa}z9q9֟ABp0Cfaմi@Oc,"XA~JofOBk0QvSJɜ g{/RG32vʚ2˥X3Nmsh6Q"IJ4bŃlpm3$ɱxQjEGEԉD+b6#gq Wrq M4w,4QaH"h0DL6G(5f7_¼ٶ\kPZX(8d&BEWPU9 cG^V M$Z 8rJn|5/]A%(2.PA=|7Ocwj]Ey>摻2f[\BQ{BAE@PEBIj XcL=5dIZg0-$:?Z|g=#2"J:!ՔK7pY[5GTJԿ*Czϭ}i >p>_=UU%3oZ8IVL6/|(V'!&eȓv˟)UAĮÊ"(E@SXXhu 46vTjj+VK8AiioY/F2V`:",/%n9.yF Tʴ\C+rW~L9TԶ*yUx5PWuXDҹJ˓O42=u擃dp RGfnʆo呛c 5Yb>c j3nvDIk\zI|=xM 7>1:xHUv/@I|g[-R@jCrk80=Wm=a "("|eH}{vFzs<("(N@Rk!r=UXcDX('⾺ޓ5^N(J c렣܊N'_i>9X'orJCžJ׬:SE@`Jy|In[Zhݪy4M}WOG9X2>+r=]#+"on;+_|9JM/"eF^I|~D&˗d_ :\cuHXf.2OwtaSۜUӸן}W9ۊ"(]#uϾ";W%?1e7eqWηï&}^|~X}r0?%mC5dngj~eJhڊZ:q")tόNz>U?jw; Fq#JSܾ,j).?[>04y/.>LuWPE@8eḨGjƙkgoСoNcCx/-?%ύ;ο^R!0nԉVL϶GSd t#D#5\M`c=K:/viqq 5C^}Lm=}7Kv"TE8 7HNz("|!u!m غz0~Ϯ;PtLl]MG~}q GJ⦰) skNs $y?ƭYw3{33̉ѻ | fΏ'#߷%s=IsZ3(g0eܼ\<1`cwᢇ1`C֍Dj,!Eo9׷q ZtpLf W0Q><@}S)Yݏ䥿eQdrlo6=;j>Hy rPql6!+hU5k. m(siNdoٲ֋A}DthO; l Lզ *Oؓ<I ԉƹs{3Ӧ;XCZ|-Cr︃ li,ĬI9TQןIL{c 8Mo=M{x-rO#ۇqL^2aE85"ISuK{u[Q)"+xz.("UP1R_џ yAJ ‹;A{s ;xu_K3_!@rٺ{dK AӞKںC:@h7`Si$۩-acaKOQ%QBaζNIXROogi=f ")Vf6} ,{ ` X}fٙ^Mݠ'1ty~1*%svqyd;-z}twݹcžj8ڴӜm/!bI 킋^Ы;ށxUx淊1NE󙔈`๥ڒ㒃/2pK59䉫;=y|(r)Î7ֈ끃qwZih_}Cmܱk9#ٕ/5yOl'vVI{k)6Zm&BDAYd9i}bm"(H}'f4I\.UԶar|.]8Ȧulذ;q%s%LO-of{)VN̽ #j uk!+sY KT]=DC7-Kx>8YPE[E@R_qIܑߚ^ʠ+8wg+>:DFM 4,p5\dF(x*YJP\F]Z"MvX$'1#FO GVҴKaDiwz5P[M.ټ^ǐa , it(|-X:y!\H5ue谡: "zNH<&č" Fu۟K{rƱ,j,BWNIm3hP1K #'k5N΃Q \o{w=j$YчGڔAqO73BHVŁOQ]NtS:Nt8$Dl`lòqg"(7~cf">MmtA"~Zߋqco$s5aJ[GXSbK7_)fmJ\svOlYAuA-$GՆ>qRCsoī%֋3% OS*X~b]X[yHM+[YGqvc88SGLD Nw092}[5hlZb<Ci^J %a(J4b*Xt*˵Yx 5ȋXT.UQE@v7b0yz"+T~1c}JF&>g YǴbS6/hApw?wFPR-R]WE@6mX:j:_W׬`!YL+yxpC 'n{<&9y<<)7 'Wvl77xYoV#H?ᆾ7 Q~N5{qO:T,Bl&/OMb9]/U,HJK1iOk6CN rE˕?Hzx'5?OnD}7:mpГ5lpM*wW;:іWyEPm h&O^ɻv@5mQh| K8y+E68r,IO(WTН0k5sʇ?ȳ՘h_yN{S~*1Lv=3uUPEB쿎ߐ ~caK7[ "^dlqѵ\M.9/!ثX *MNN!K\S0>QyRJȇL 96*}۴?3' .60p{kC!R abHDKnnƀܬDN!Xz =XrH/9'E/Br˭69 ٗ`!??;؄BkԈA+X{"QwGQL>Km@_1S^L$-&H@EcdwfuBk!('é:-K@K\_:T-$ً`}x_n˵!۠%)Xi`ev4@G?ӝk[$dJdvxm|PRsWE@&Xuߤ5}sQ%lM"y^~f;%r0x3/Z+ۡԓx8swn]&0ctLjgUdE8g`vJt̠_r7P$di rj=xY2?5yFGvZ-\YrnP4yc&2fa*Nvy^D~AB:+Bg44M¨γ UMPEC@yY ZWGM$mʆwV XT"{۝E/"kvgQ)Rxuع5VܹOƄ+^gdTs앤e3ub. -ZL\l;i),䄬TmdMmF!K&S#F+QкY"(eGB+WacvWlؕB2?컊ɐp"(:Sna$LOG)jU5paj*\Aٷ3W~ŻagHQ~Tol*ehFykbCr{6s,UI{m&TKx29X8,JȘd"gk$bؖ1<ZŐs%dxbD[ړCeKsF\D2jjq.e09=xmYɀhR误y׉RkDuX[#e~Ge:R/XMN#AP#rEܧKMcF"V?~\K,=,(97=&Y`Qň@۩g(dDG댾lH,⨵oƕ?{([q\jR|Xtd~N-EosQ7"(_eH}N L\ iveW]7XAIڞ~7&ZZVP@yj)M7K'0w#f̓b>z?Duѣ<ˆBװlyMtj*[0FL'eY8Oze;-t 5v0eț5di:!I\nѱj> 4fs]Ҩ_~(@'^:9?N7EI|h~x9{>,!q&V^]KwXe؎[ ߊBH`?[E4VX۹j /xh-ͶE~.1yƏ$J,u?o~=լXs7q."Vϵ)G:>9,l8?JE\gp W}jBmvb{g\E|}cNa~;oNGzA B[c%V>*>Gv| (ݷ9c4譳*"2WV]v~~j_:^\QBQOwV?cye+MTNz)>1;MbԄ0^b>XՇ><<"$'ҬN>WP8eH}U_U5Gem$i\|}{n `K\N\xd?˯oEW)ْl-,X$:T<0E/g/Qǜ gjqK8M5`/~tuKQ6Zc^vɖ_=#^RG&T.{~ʊ-K/mitDXA:FA[?p eSWE@P,6:ͽԅ'"ff[{Ccc_wI̔ 8_rut(;!ď`ѹv&Fv^+?g[H2S|Z03opmf^Xd^ހ$5-bތ(6zdЉ`5DxV3 2eM?`k8:"qzj{;7~ӝ}fPN9j%.J/2Ih\&'f{t}w'lcgat܀!tkȫ(%tآc?BX4(kh{2'O6rO;szNZ[Q'7d.2QQh"?ѶT'3+18hX Rn)03w.Ygssz("5Pԗ[Am8]t$r'"nÚi')J( ꩱI$}IMpf>4M$рoQ$2ѝj]dlz> Ɍ]C( vZa0+Lf֝biCc)=ٕ"s>N07>[赻^b%s:bqVl廐]z /4_~JՑ'~Dy5/QE{٠-.̉+z%]7%d[b*k37uUWH\2*lo$=6;֍Dz+16Eh]8JD H>Ba;c턮R\8{1;ǴmE@P2.eo =YDVzz$8Β&- cÞy#']U43H2SM{xob{{؈7E|Mddžtv+5Kf[Xq4Dtc(Yd7O6iQ$#'/q@n:+n BMp~QC%`[<_Z WbFr5lg;!s!\S좨!]:죙8΍GXz؋$ 'sa+lggByPs .rRN<~ )naZ^} NsqD n1&֯x֏tTR>aIϫp)WR7nvuزA/v0P;lkUE@Pcvߤ1lpr%9,$\$Y1'7RϵΥLX~0;tȀI:r?dW^J7.b-z2B&s7S 8F2i-n&gce.SK<]D*6~8Ez`;+58 r-@gr9\ F .=@IDAT2{rP>CJ{l&ǷsIcDBpiׯcR]/o505b2!QbH ڜtx QbHD \WE@OPCgt}ܾTgI|;_#GmU؝ʡ'U8KqtDEQ7(N%YR؉To]ݒΦQv4(eHΈ]U~WI)-h)"Dzw{lH?%MT,l5]RgPXYwWNan -~Zy|>OaY|2Ex`!v'zG߽[^ WN pw7 d/{21-ɐ=qwn`c0'66~|hIg-~=Qڵ<'y{{.U2.'CbgxmQ*! j)ּ?!d$.a|fzkE?%fGT?Z("5Pԅ4v Rn楐a#I4(Im0[|H] DGq5:.eOp#jd #'pv?͉z_G붐;K3~نkC*we3! 30ɞ\sIҿFr6|}e=u '|5'(OnI_9LƮ #H4Suh+Σx*gQ4G :[ !Ὤʬc$iV $ӨYr ;$`n.+|=H≕X>mz8R\)uoxrhE^f| w\xjF4>]4$πmc1sxdq"^V$MRz("iʐ4=TO?O$5v gIJs$&]H8K̐]_ՕF4 T]f;DI 2@y'JXa듭V*3 *h4'S@,I銋?n$H :3" x.H$cxH[l%K@N.:I}K|p#Ghp&2F ڍfc54;( >؃SN:Nwj+{h&гz*zG0G!ZPmK]Q8=򷡻bgc.Ю>ۚpͨ$bnje$vRymC]N̙rLMU$9~LJAW8uO=K*-%4;ELYn>><;("|2>M>k;Ǔzq߳sVWE@P8eH]΄6p4)hP{ 3>#1 I FO&^y$SEK^ٹsٿ=W3@wOx4"W`nPv8+ kL¦1BIX/kizWxga)U 霜z]iT{ftõg{;8VNGA̩IYJ94%ЮO4懇-P\c9i>4n%P:sFx5 X!n⥳7a64뵵=angg2;0O=RGx8!ɳI1Ιn{=p?v@q"bAޕ8wwrɁ#E{Eᡟࡓ_Q7=Ip>{r"(w\|iYhu2iBh Vb`@62\1h%檯. , 5F]lM.8=;9"(kf7K6nAxx0N!f.TPԙȨk"(w O>1ε;'E@PE@P!uz("(Pԅh*:%bgFWE@Pԩ}>z%oSwGt-!J`^$5Kwwd֓t# JS$]ni_Ȃ,"~ۅ{[ Ncl7ᇛġ[O("(_//{N{g'>zt5rJ駱x_?VCfsIm#JS M6W!T tHrs5v,cOwz-/5j("("*q_i1Gȯ~=qI~8vRs_ m&#V)"(! 93n>$'4:{ َh=1lY~[7۠KcSMSY|sO(v޻Mr1o m;D0>m/󷵧-RO}rrz7<B ALM%t/~ȶC4X| M."VŖ?Fiq Lt >$35Y]}RE@P!u_[3/$ʕ'IMq=>`i<ʁ:^m"$.զdǑa8MbƊ /yz*)iBo܂4)hhv $ʓ9CGf#gH~b<K]1FM&CڰsGe 6h^ɸ;ډX{E@PE@<ʐ<:gksX,ot?-E}DGH(׌,*aӘv%L33|fEWoQGVoSp4YH|:fQ.+*?vZ%J$5. 9lՆa.8enxf/$n^noLM\r{h4=01FRJՈ"-(ߴWW] TX vxm1{r.]'ٷ??x*13*0/ NW}B ݕIQ"=IOK2Oq>Vި }Z"(w2. $yrd&2Kg`PrI]W̧ {DבKƖJ}on`SJ~G) : b ,9$WH`7$۠sv:菓$8Kً6/xd"("AYZNgޣˌ[86qC[N}kIvz )f]ꫩw73vVw y =o5U4e61 `+Vr`6Zj()>SI6qgzzu3~a,w)"(Pρs[z=60Qk8(?ōƚ_uⷔEٸ*zwzGHC{Ր/7yzH?:(;Mѫ=t:n*"(ʐP=),(Q~x^sZB\ww?!i{켙|ݘJ:v%~8Uq}fW}v 7>n:E][~L]G)qPGI ^ztĐXB\wcLFK"Nqɍ%݄By"(wh|JiY%LBjQEPE@P.Vj XKfkɗ+mܴ`<={Fq`HUUKvEE@PE@PcTw쁫*"(WG@H}u,UOIăīij,*"(O@R_?c5¿~Ͻڄ?+NBEUQE@Pvʐ:_]%y?:yƭ)>|sf>x/]'uN/wl&RX<]r5YsȟZN[;iFil$yaDxoFS^3c :(>#n\C Og,ıLq}!ڳ9t =Z.Y+xwq$R yk<9UXD*DyQ NIxw[H"sG3 yR;,M̪j8O[6hc> a%g^̸i)SŐ2O>Oζ=;2ȫhzG$rU \:LVvRWRC)<Ċ1x9,<&0# &E0S,%&YM]ڷQ*Nb/KHdst+- OWpqh~Sű5 ɚG1}X$N1IGEb{4uޫʮ|Kcbb5Ft.ԙ=5Yy-3?jSڷ?<9bBĸTzxq3K~nBZef-fJ2K |32zN<X2Sޢ@yJԲG|9] ǂ?jI{i .۹5~Â0=Eoaw7i:<E@PE WV#a)sv/Y0?#o\0)/&7X 8e:nqW7#{5#]^ e$%~'q1)))L~i'{\1~b8|:7b5l-ɕOS}0>~A kγHJEDOb|$mvq'8v<ܼ2=QOC‚H qI%B#Vk6`"7ǺoD~G7RO#E"kᢑvk;D]Gvmrkg̵۷k郴ki~5L;š#鑅IMƨ)UixEleELU܄кn1a[Ϩص7Ń+P|ë*"(_ ;Wn^>kbkQHM^ZuC謓d+ 'k~毦ym8Cbn08x墥 0FETl4S2>fj;yBQ#&5tk,8BzXƤWRXMbo=d6Bʹ-^B:73tOUO_"hfULrg&DD>Q#M=nV=?3»iju~r"('/.IBWe'jVڋAѕ6d$cςMWɏ鬧,:[zr׳]k]B)gJ;?796xNctܻWiC&3q&D<?g1tb|6>tm`wg,I6Fs$!ϻ jͬ [ٳ&}]vZF_Eh_{&"C<]7;u6&~b'+8utHS Y۳Z.c7e\7PC!ZާV+5WdLP ~gbG.PWE@P.͝wG;; hͩN__?~NOH z8GICrڿb{z4^$MF*[1?XL}K๣7}XO^ʛK{sWރ DQTWVmնlVjm֍ N({F2 Iw?ϹI0@$$$d}u{$!Iyk GryE(-.C}E1kMz]ʽ?( q,в{r p:pB0ܻvz}>}}10RG! "\(Pc~WHmؔ޽96\}:f4l}tɪa׃i!75?vTeZڿy%8)JREx(P^} W! @h4Bi5|GoUӆ6'KċTDGhz^(//nGd28ЮǦ$GywoacEQW7ft^M2B@!\Gn,7x5 }tQ=MB@!гYuDi5[01B@!KwpB@! z=RB@! G@9[/[ ^_|+p!Շ_.BxJKKh"lذuuu*zj|"S7B@! 8PsŎ;77B ֭ 7?EoZ! @~Pְ._֭[|rRn*z!{_! HII]wgZ1{=Rq#(與}OJ۶R3! 'E@Yp- ::cΜ9 Zзkߗ[^.B=%fΜddggcԩHMM={ِP! I>|8n*o'^oI@ToiIB@N&0f̘NΡ%/S{=ͤB@! @7! B4C! yDH6 ! ݄nR ! BjwUKB@twb !p+1%A! h;%Ng/++AB@!p* ԩ-y ! ^՜R! BT!u*iK^B@! @"+|NEJKKOV- OR'BGz8p|>aaa e3B JH)tUB@@@Ol/)B@V hC24V+BRJWWWȿSHDGG \$B;0L5UTT@h4"88;GWHNTWWRyJ@ƒB ! 8QPz:USSC6T:@R~sBVPP8|B@K@{%%%v 0jOBBJ+1 P8B@nH ??#,P7*99ٿb]ֱիB:@jZO*,!cIJjB@! %%J9{!!!~nPW^jOV~S*! ԸR%s!EDD_RRB@!Z'S{"B@E"bKB@! BC0J"B@! @_$ B/Y! \mq'A! z}i/둗_FJ B@Aq7~Bʇ>vm#uB@!У q̇&Уk B@ ]A]B@! zRB@! @W!%O! BW!+Q*!B@tR]A]B@! zRB@! @W!%O! BW!+Q*!B@tR]A]B@! zRB@! @W!%O! BW!+Q*!B@t񠾾+<B@!m xV˦ݻFB@! KҐy*kYYY1bq#I! B@5VYcfk5DB@! @_"PSSju55v[(B@!З|V+ZE$B@!2R-sB@! hVI! B@L@T\B@! Z% BUDA! -!29*B@V jDB@! @DHE ! BU"ZE$B@!2R-sB@! hVI! B@L@T\B@! Z% BUDA! -Z>,G݇@ii)Ng)P/)ILL f3Ұ\na1nxۜlUU|\p|aaa <~voN;%B@Z~!eZOmƽ47χJE -[3轴ƧZ; {%JJJPPP)'=rT433Sm,BHLLl%>XrQ'xؚ#ɇg!WFDD`'\u}߉`C@u'GE'[MG;l3IJ)B@tCbꆍ"EBer2m#-(-okH{tNMH,RWBȠQ$%! :X:%h+!!؆F'zZΎ@YI1vFħ".&Nλxbuyض -8)iu|$ERd"@GV)Oj* 8U*02QQQkhP;VKW4 팷-qTK;,W9(š:)c,$E3hp} t#$Ćra~Yÿug-8橘sUsRmѩQW| LfM-ngƜ~mwJ]{-.8@,GfO:|>/<.@j06X΋Ca/7]nz\mhS]c_ {á%xK7a/ _?|v7ƳmҞ> 1{ Ο+Z(H|>74_k:ɳlN|dq¹q.xj2ġ' #:m}ejlEۤsX6TW:! @/%P]k0 j77:i3~?=6jg+x^y߁P=YwfC_`K߅Ϧc ETgOufzb>߅!e'"/+Ǻ^FF /N{b;":$/@̻JYL@Z,U = Q~Ѥrflkw:z!($`)Zo|Ч|Ǽ ؐSiH?}&)ظʉ/W\IcTB{UufHC4ʩ~4v|w.%t0qqoBC^o<39>Z d0.y~LG Q~p=gXhm >Ϸ1Ӛf9]jV.! 9-@ ++OΧajmQfFgb̠$H\~ǧjĤA>=i:X/$_zּX~1qxk,ۍB Mn 1aPm=Jr/3܇bqSA$ۭS;d#VFeiHM MP4 a~~!>1A@}N K bvbⵏˆA([37Nnگ[ڂ8 :*^ v/@#B5W&P}} !VG31 li4cfn5PhnZXj(>G1A}P8eK Y^ *bŇpw>^!Q8IЎj7HOm ̈͞s *\>U{2CϟQSEhaH; #5,87 \WUB2Y?{`֠ݚR6x>$ !{AJaHN,@a SqU==ը)+>`.CjlH,RgQgYa ;# M*lH 2D 3W᫼lb)C\gpݕ݌U/M+:rG4 5>mٵ+oK @`HG^9>d變Gs]_!p1iGN7H4aC#֘A$ r"3+ 6*@G#<55RcR}i x 2@-tTV6$խ^58")(HG!c}PM[7Yo@y1y=02)qlza6yAÒ1`ȄPR/i%5NTUnU,|3ޘ @p<<CE._NGk!vD`~=)FLCTv"s?T ,52ТkНip}/Ve̽o.M}o݁:^Znb13^{ 8x on1T+ίwt37f{RIwx{ЊjJŠؾy2Sbooz(/3:xFď\oy7m*ܦmsנ3:``'`#C،r&ѳ}f̤BĎO7jcPlƛXԎȆܩ?zv(lvldMWرnݩV|n=?!7C8piCo~Ͻ4[5?Vs<4S-WiLوM #;30!t&ޟXEG`IډRИ֖KKVݵ ݁J8ԀٜS˦<`G^*&v?:NG^lo!ʫ<d^XMdz>*_ڭ)ߐﱔrMKoaſ^n¾?t_OX{eÑ0e_h?̲QkC0(p˰x{cy_lu^ñ}Z]L,-,RߚAর7K҃ǵ\YѮiץ-_dćρ]^¸~m//ۖ ܐfcs#X𡆝>\sqmMi@շq}Ёa>iNjΩ}Upm.D[&̺݅ѧyN%ўjFk5b& ?\ye=^I*4,̌/p\H<`o6ae.L8݃]+:}B\K<|v.WGo#;?pfFO[G"qJƵ7܎FuەCzxoj֨ᯃ?zyD՗W.LP ^0w͖s)͋YO_ࡿR?=H3o/67Xե?=[WdY7c'*f;b/;y}`Gzmx;f$.OxYxi|v6x\w3f]W?</6M} p/^ ,RZ 7Ϸon=9rsB:GVeĎ :\im5cc5)C&ڄ5{u\r1Fft7<\)CÚRN# r :^?4GtvuFxxgqHwsw>:GS884hbGqU^DЪqҩί h~/g݆$0܀KÅA~Hfـ6IDAT <4&鸫ø:/Q4<:^>ił*~.I<&bߥΦ(GuGkg=h0&̹ S)MD` ZqOHJ]"7iZq!_~Z!~ XSIqw9w ָ9c@<ӈ(Nlo5;Zp<w3nxE~ =n0#wR V8"3Çh\8j\dp>0%30D$.Em#d dO @uzH:2諚aNmB'$ӌ֍ӧ04Vz#-~bƲ̘DLeY~{L`Nc}M,XZC>?۸eHR5Ya~`'Fp~Ai`,7]&ȈX tb ޽Sё8})gkϚ@ u^[b_١Y>ӂK /POϋ s"kM?Wҥ# 7Oq#JSXm5Q anQ}%]8g6v6c6_H:Ym@I EWş?JzO+6ۃQg;q:lz+^Zb!p/Nt5۵DMsԭ|@Ç뚔1W0 njvfcGטu+h50U)Pz%}؟q#3\# Y9ċ{_J ߰` HÀb޹}w5Tfȉ447mu&>dONƝFF2ŞÈ.]kĴ|,+V*8D0/BV 8߁CH3ܾMf,|ߌB!tt1aV#MX•ϘD-ص Y@see0|8NXܤabrߨ!&d Xc\d֌?2ns`(r6߹ 67?́ ý?v_ރt@,+A1xi5R x QCuن@FIE ext̼;5/K҆uTͣ7P|MA[G7K?Gj*cH<$%ݘސ6ijcwA[\ZMu=TZB!UBvS A1Kc`*7Ty܊Ўh$=2g©hAOLHVq.IǺhߵa F%>J 2 -I )ym#ƌ kn2a/FTlft3x:suO9^5RŠq:*ޛ& xxE7EV¨a[<R{0cWRbd;UmA,3#FKZX#)hk1m6طeXh d[Xy> N$'B2p ÿDėB@|? )ʏp)QKi ~p #]V.'-o.,kA '.bJ ׊EN32c ͱ4˅\ -w}҉s|@k _3c#N2q ݼkiS^K>ЃK %3RzZxZw4S.5ZY74/x5 X1捿Z\7Vi~2Oe?wb_6}l0 =GD~O(r :M4c?fF7*8Hr]ֵ.h80nA2,"4ÄZ6F+~Mî(=A !\lufZ*7[qB PHC+[+6,U¯V?RZJ>eVhYS@qqy~Z gheIHqZWKGMx؁.vaSK3ބy2-s\>}g, >{قN\S'&>FgBZj7uN\4mBKpu5NJ˟5"BŴɉW(vmre#f!`ǭt3/"/7Fϟ@:mڔő&e(*ŭlM|NK( CtgRc7o.ঐ҄yXE6q \2p5i5k1Z6B@t )* 2=?59ًܫcS\i> U\ƈ7yy -\eE&2΢{~'ĆI M)?x'x0ymQLPbBI y^3J-ȿEN!Riƺ0 tVZ}IOP s:U:GzpbBȀf-OVD]6 n,9etul88j#8ubA N*fn@c޹z9SWtQOQkS=F_e_5bwJZ53q{:r);J[ 39SM֩ Z6Li(cDO~ٷqpEqb eY5>ϩYPB[:VE!C6 sΔw.ANC~jٿG2.>6>,cReiKbҨn%ժ|BU֘bϴodZ"c&hHܿl?9%טRU?fϦ\PN9E42Qn |Ă>88%0Rt 9PsO꯲lIU<%i5j))P/8'T['[Dj)m/ :eS B;o[T&*pJKbu^54ޟW {8Zj8urx<b8u`k8j4V잳ݥ:[ruE뙴PCO܆]AnrﬡeхLU zg'▼a &TTvYy5ũ\Zb.`X?7ؿ׋L3:^]Bv FߖSFUHM"i$;贈Ĕذs-VYB`L/nӺʇkJ ?oͩ*E KR3Tg{1֢/S2"r&.j:q/=f"tQ<+ YdzJT[N'UsJ >0[P~4V>Kt :Sp:)ܡ>9pf+ީa8qH}td`|y=5k*hNC77A֨i{=ڇJ vs%wBߖKF둉'DZt4)wh+DbE6OIhƃ ґCN>.`Decc ?N˕M_+52=NkY/l0"(ZKM3`>'np`o.@h:Cۄpr:5i95)-ftrESs6pw,CN:Я7⓭nܘEtvmĩ=>Y}"oNװP*eMg}{ҟoNoż[lirbG:'&8ե]\QAᠦ1B9z'.8\F-;-|J:[^C\Q6urEߋwYnpj4G%Sjò}T>~΂Ƶ--~>X$i>E7IVo[aB.=E<" 5i>a8sA>(Nj-t6!iŎp8+hV! |&,:Ul=jt:l; $3>l}9^; 7`ǃosꍦ@o&qE%w7ܘ>ɂ n$ XMUd,@HK&=Ӗ&^l-"@`ܷwYح& Dg g\@o- |N[7LWZf0ڹO-xȹ?x̃YeVP[_r{.`#}u3 _1t:lXcɬ_SAOX>bῺ~i&Li o+wUkmȖUwV[s>5ӌ% =9XTɼ54p c[v!ܓp =\&-- .%`v%Żv>(;%pBB*V\BGrI@8ЂAʥw$Oq@箚.̛4WqS.X8;MXlH `Bgn"B$t;},d[c@u8=H .9F>ԀYn\~*id>S_TZJR:$go$Lu7(*C ۴+,7ҩL1fbtQ 7RG0N\V8+13X;:>m7GPavUgM?\7VTu(A 8{%g05Xu~IZI)@:-G(/~ B@!nɳ8p­2R}ڀZL !Ѕ.[ Nhj/ _nWvB@! cEB@! @{j5F! $ BJB@! I@T;eB@! !%}@! $ B2! B@> B@v!NprB@! 4Dd[#!]A!  (}sJB@! 8f;p>^B@! @ TB@! h BJzB@! I@T;eB@! !%}@! $ B2! B@h B@! m$PUU|t]`4a0N>AIA! @~_]d5IENDB`docker-1.10.3/docs/installation/images/win-welcome.png000066400000000000000000002676771267010174400227300ustar00rootroot00000000000000PNG  IHDR^*sRGB@IDATx|TU3d2{oBH7ATeu]]˺uUPbE:"!BI{sg @ef{}}w |i/> Xr3\3g3;k^hRK Y)4Pt@`)9̄DIvR!O! :5@P`NkN/BOOkC/"r"Dx t0 1~ (Qmt}*)/pr .*#4ǘ + To*4 Tv]vcP/)4]/v݌uk|:_O, 9DT$^ohU@"*^'Gh`&Ahff*/@K0x/]Q377"#R!G`]˫dWSY)Mk+4TY륝tqgۥ}mx S;a텮+̡XP͌ vY6 ug>l,_?Q;VհP~jhTY[:rwk n7 }giiٷ XسGcj]MT讅^d3X}~QHκb>}n-]LD3=; z⤤"q+%IiQ>@.vottLK4SZv/66-?c^ol8? J18*?,nہ>hbC' p'ph>k/EW W=z+ p4;544^Çxv["qaee;;^#\nHĊ-%I I$2HM=cnW=\ʊjZ.W8N~[T }Zrrd2iV 3R]o`jF|W< BsHJJ0'!ӦMfs' p'!9E:Y;-N$Xuu-=[] FTT8/^ p' p@ Q ª2Akޗ޳gBWCC3ُo8v(K0aX@vJ8{*|/' p' p.6=AN%f?<R5הt}*^1$:>>^(/"aKOzlȑ.8N8ۑo?i4gzzcŧ_<2Nw"ILɈ1#׫ZoGмΜ' p' <^_Da? :1ΌXJP8,rFK0"??˛t"qS`mR/(?8NnL*sG>'rxM [iؚ: [@[+S?E~H!T{4LĆFD]0MyW(c^-N8N7M*..s|n(ǂFϥ @e9 vs!ȋ;wMUS>9 eE߇N]ROz}=Ԫ;= KnB6'pT:W‚w!Yq١ցրTBk1@FƦwԪFIb`zo,hG3~gBux6 ^M9{r|sUBӚ4Ca?i)<5FeW`G- [4I^)1c@_~'pS(p%|\g΢u訪BoAG~9ŗ)"+-'pkhBظ#c[Qi[+}8K^A ԥ1=z(K߃o_b ݓ+R\ɯ}-v!EZSIR?_ 31i?-v,4Ewvyo@gBKZ89s!G_߆5CoI0ݥ^-8Ϟ!UqKxޜZQVԈ&UMyנ!Б R9C:9KыHkmm-;gxCw})6<5+~K$!f[V7#ӖRkmSq@w FG%~e.K#\"xIJn!zxi=:R׀  "3 ,4: ,/jI]#yIa߅xZzg0Qo'a1/!{Y<"8fBM^AIӆ!r,Rvb`DE BАX8;7֞:wމĽ9$xPZԝ^o 0"?t8|{3;4}MjlB58%10{0dw9Ҁ&Qm3k"رa*\BQ'CKZZb1fAI8n:L}xbj\ :lIR#[XPNƓ} /82p0IapC vu*Ԝ\oy#㾑0z-DKQ#5:ZK[r"ui{S!IM\? :֔~<>VAU[ >נHuQCj ppXފ4[EFO 6!SV]]`g+&2%16qL"z#`#O7bS!t$U"H.x"cAd(T64t`,Ey~lڜ ڋHD}S.c3T>Eqs ]p7P}۳P߬A~FOZSH$Q%훰U&jGg8[Rs]"%"u?N +5[Jv$#qww6uڑ~sF8kڪQsf֭F]`ŐM:?X,B,,S{!h$!_тܣ Gڏ- weA곐vPNXY50,$IߍՉup4|/G27sm۶&hѢ"x"+Z RAdmYyn-a U(<҃v= w[fZ^,  .vl5NΎܘFF IG~?l>44۰M3󆗽Ƿ`$ޔ -1?Gi6<}cG"M萣,R&?>>vdE.+Ra&D b C^M8UO-$Uu.1udLY98%#IqM2F  M3bi{}gN{cL9/ >4MBOOՙڋnߏYxوs'$A^돱13}5y &CٜvC3유,JGȐŸH!!eGVKܸ)r6fNƖ2? OLDyM]'z11}:m9&]-@Lί)i33Py3iEa5k`F5B^<-pP ԉ`/ 4]NћL'p+ǘ܍롌~gN(lʓ8tH;hs`XE0y`sqlVףLL ey8܃Xg f5I= 48Q?( AYPseޘl>- ;dFTȑ. o!3 Ra޸o9K x}'B!H4M({U'z=6P;E:ެE8uN~恔v=H0k3Bn*h!8[% XὍCpCϔ T D??~SH.wSOgN;eK[j"DN3xfaDs%$@\kn1^vִG(EuluwJvtKUלK(Փ1Ȯ6i N'"x1΢ h[3|c G7m?].o5z-c{U5ըhD3|KG9z Yh&QER u.N<$ A]-Jp / JHF]t~C!OOm_{zXYf5֑V׺U0k "V>*JhF(6ʡzЃ/ l48m2ˣ> 2WfNظIp]WMZ-ɜu;e#14|Mj.g;bd#KAIC$r5&⤭#czߥC٢FMZN"9JX0E {0M' \':lEVFBwc(ΐ' Ld8qN]8'+Ey=iaK>g6Ej~5a'RRCfئ +ii3.@o['2B+%ZIUPӤ2աΡATNQ&zBQ̽Md 1͒ Z( $r5ͭcA deBO6 ?' ef4?bMIEEHLYtfy!u^=[:k?@]@fraA"I3'3AsQͨ?\D?6|REi(.!8˨|Mr:W_PH2 = =HEf= Yv|Auq 7&9=EJK&YhGUYdW0j~OЌ샻hW;pǘ8r'QҪ!PP<8SEѲ ?- 8y1.}%ܼ?;C!ˊtǢ+SHEؒ)Em;D,~ Z%ŇmS:iV4Kw,DV4sPƒ@-jFu +B!aN8 4|,?/LqpXjRzXoȧY4 TxEa|Gx9}e{N-3ƂMX 4z#$x8xf1:OLC$G$03],/ǎc(*b,оSSbR: 1CI%-Ss  w8o v~ ×4W=-njUpf>G& EpfI@r F$cyBR:_^3y1 ;?kQ-^ȰIۋHՇfJ<ƒfg8y%%$a'bX0Z3ih?$R+<l쀂]+.xOC/v))XA]}AAWEy9BCCg{ҤZW;E^q]-? pK߂۾O|$\C;)8#PE t]":p8N8NdggCD4 >+ZWp, @z' p'  AԪKޒ\F' |l~ibWK hțD' >,%4Z2{5cƶ ƷOyM9['vkV׊84s oJs' pMO/7HEuzо p' pG@. fT_K[?/Ջ p' pMG`+ы6离B@' p'p`/V&tq' pMGsu>>3 p' p+++X[_ijT*HjPᜪs'0'uay8N.$ #yiMbɧv"{a/N7O= fJ~8N_ZmV"w-k o(Gi䵒1avo].i)CEqΔ 3 aR΢ERX}3}\̄0|RH?W?Iq%Cڀ:[x;t *tTAHW m@w9z@]UOO{{^~h&z:9 tK%}]س1z@'yF,]n[z;~CvŸn:Ywvf )hkFMu : Npteο糷W%$x qxs~ , ,~Ӭ,u} ^}FO@ _oLc;W` q07c||s32ƿ(x`x.~{c8:tG%a`Ѓɹ,BV@`ٿ2k(E-Կ硪E` N}aه pϺg"e wHEY'iF^|s&vxp.lHS(xӕTN&G1O9[ 7 jTz54KCx 4*|`g /өh+=mx`HT(BCॕIHoKVobd ,h?N5_-֯wsR)!+5obçn$6h/y<SBMp ǿ+<3>eyt릻$@$O8Fa\0UH5MF^\ u;:OtNKIZM+Hf(Ai oa([ԀUJVBV[O.PmPתD3AOCɲ&5A!EmJ6xs+G8y#(|2mbRw8ow B( i -XwZ,$ @P `>p]E7).xݤ# r?iPXD J+Q4,2t4TMw= _D^=*F nD<0Fٕ?#S͊%!arFT}oDre` sqyc&m}| @oEvgzxu]y4:$܃Aa==%eM-$Zӿç %.Td:G1ƆRq>lr#NhnL{ ~ۨMdMmbVD} HJmf8L 1P^ 8*xuT]b`K.Qʎj a$߆egp&_ &8t܅BNFqp |rLM(Q+݀GpTmPW$cYsTuh)ƚPg_sPxgEY~f? HjiPc(cY܃j9Щh: / ƐPfcI"1yw= N'9S,l|p OI@{x O'`H e{ŧ3rN!H3>XujQ Nͨ/AQlІ$Z86.,yj)V<Ǔ(b1(jF5Ǿ7V@C8fi)^zN Xr +mIKHATs0I+nP7/#yt*F(-jl^~'1̫%~c.C"مhѺKuRYOC\N.FቡsgcQyץ5E(V Ɛ ӌ೷WagF>'}Du'}Gwa5 +xE%,zd{t)dU'zԈꄕxflT!rߖ(43>BPVFڒ?^&Qs[G+oLF PxؑفaK^C[xffg[1cs9ޅ>[ɂ(?>zslص(4%yL 4栆JU̽%az sHoB‹#pbc 1:t r f1CPs~Kw * ,~y,}`&Ɖ3#߿vم"+l"i^6X C?Z(7O5{{*6z<3p,_w]|f!(|g!Ϣ>g|\};[`LQo\xe_QNG7m-N :, kބz -ؑ6Vע]ފCFp@A ys&kN6=*>~ { ^Ko: a@M. sP݋U+SVG='#.$]&3 槰=H?UAfq:wDBļnNV@a)Kg"fyPXl!f6f=:<Oz73 /T_% c"x5Tߑ HX ^}F|1ԡ0 E#"C;<;S߀Q@u+#ܧN$ *B &h4n{ɪ'(a+X-ۻ!.pQOZ!6tNIĽiΧB*!<@3mح2 c0T\]GZzb֬XnN.ڊץ!'Wd)ޚA 6 ,FIIMjΫ *[jP,)cȶUd@\q 0,]gmߦV(ɨ^cb?#$ UQ$ ѮgX~+|*|˥\KF{*AQ. q1<bRB%9d@;bߍ n$2*P^)&xPMF4F&w% ]ecdd W/^Z>N ̘Y9Rhd1FUwʝm¸DGJ٣uTpVtWrJCRq.}yF[9K 46K>Ǻ0l=0*v<3lbYYIL7IkCwyU$ RF946] Ciޝ+~M2jĺ;N.b֕H?4kƠ3D$(t [  T.`L o^\,ΫM`,/o`I/"1Pn8NF%9̭2(bJzpV0f!"Z6X F(\ 84d:FI7\Lo~݋dٵ\)H+jl&M {8Gzu{ZΆbLcAm`.r#i:nXע:"`萋z00$Xҕ2i.,-`GZP[@jO3]q 5VUFm\ahD*'̌Be!XQwS4Ӊr%ȧ]߂9Ũ,Oú`c٥JHO}r[17QÜL+ߕΰ"ժ5׉퍓 J(QރЊpٽ4]xT~#֖$́f*v|M.<#?]uGΞcH!tan/!3k+ dyIcFaĒ, ۛsPXK/z{ >d+$OO)TMDCACFxi%/ .np%-[(ѓ&B[XF-!sqf?,v6+429’{~>~pn Up%{VVvל~N cB@e\n&j- d*dy2N.zxh)a>X3u%ȿs]Yۅrv3l&/[!|@vzl&ԭB9'ȆŁ $zjdOiP%;)a1N$ =?h Ridi>NtS8SOFFeֶ ٨knL~Oۊ,ͯ'ZBVn`#x@5-ieɸq1G+tƨ 5lm($667 %H{'#xƶ' d^`RP;W}**܃ byOX'GbLO-ωWF`<-fшTC*cpH‚)!#ԜSH/zBdBdcw%,ZhQ?BU@bB%\yfLP}z(8g/dӬvSiBuQWBe#BIrc Ӑb̌:SHkE ȶ~> $x]}GQUD۹üEq`m¹4PE=T߽Y}/ҊĜ1iACs Zۘ: BhLg{R}m{hQ5|ѣ6N&dl1{gAoXʋ=f[ɍH Z/m{N3Z)!č|OgD즇IYh9^SNcݺ]4+ ~'Yfګ]ƌ;A5C:RZ[6P4y 7IR C=gOBXm*qR&#-=4a1eXLGvvFsO!\oЌ0f7խЖON N֭Ԓq>v:B=c&_((&f(yi"M+5G #}0u7.fqSB_${Qȡ$dzƌAd0B% 5~GJ$cDBD0PE"Lm<*;LhgCM 뷝D;¦>g_}9M;ڄMgX}߷OyM9_#`k S͞b "ĩ[=oWZ[ #CU2efBio r[&Sy qk洀 BჍ0($F\  JsHűUgQù_4 Y){6zˊݍ4|7c汰tSinGD0yc<-}G!JĹԝX.wdtt>DZ*rڠHw{jU5|n&zTm0f-y.#jO֞cS @hMق?u4Oj4 „f`#a,Sfb_e(~?0'aŅߞ#ab;?P;lןfE tL&8'Tf`|}s6 `/3pq:|Ͷgi"_ć=c/z $c>'[ /OθL<ֺt,yv>8ں~wMè)T.-cmFmzd%D ‡ƬcJ >yo(ɨs` "ÁҟKGatըٴ%#Ka.1aKF 1j=髙5v%{(aC-Slh5o#ad|D߽ ڽb`+}of8`YȬ)xX]ߦg>w^4Wk0nk5?v3;n iטWV#~(~ƿܕ3|=ϟ%ꓺC\zbbWw2;t1?UrBw1y _lDRq\*^y0b6f:|l/ba鍼7G"ĵoI?2@ H%u[%;8NJ42*_@IlE(2<QoDGk!f$tͭaK\ơK3ӐPv@9Zvf%o4HhCizT~dfњ ma"V !2sZ^Bn/4OASudӒ+dkE3w 9B7DJx ՕA24Q{kgiE~tX%FZM!їڐwgSyhi1Yd'4h%qCN ~kHH(Y/fZ"t2 \lGY*ݥ(W9Po-?[2ꙧl r{qX2y͐4rО5lPbI^{gJmdsa1h&<ӵ8&'T8Tlb%m-g.!Z|FE3*Y#Zidz^HЄ5IdN Z͏#W*v}fjļ2voSdZ u4h{oaF7k73Pv}dql`AgqRD<Ȓͮ ~N1)[ـh蹙atc_ݣ>Nc]̋)Rc8NeirZ oPUUe4qrr4x}}=dpW@|''zK8Netޔ%8N8sJ.%HfH p' p'pC pi]ohn$O( :$3Gr9nc]6uៜ'pu=%~А#~|?O'#}.7 ĹSd+ڎc> 5T)mz_Y ŘkȲQțP6?/ N\L0}nKk"O\H:l V(|E mu O ?v/x`a_av 9Šh7s'0`Dr ' hM+IO}{J{ vܱx>F2/Tc(\{{-hk,RGs9O%`v kLy?qtq =٧S9Boي ȴM `0倵2e-)^'( 8N&% ^7źhQ[:ScxT0LK-00&@cY= VߎJ{' XL :YiP cTuFuIێF!:43{#Z^O08&a.0q+S6p#jZm#-]2"w'`v/|N 6T-#La+yEi# naBjΡdd}K9̨ ;>"-X$>l,/M/R]O$"pjO#53e cP4G"YUFJ;;pNv elAnkgPżX)ИWDs pQv3Zke~y8B@Vi0̔C[9zplW>R-^׬EP4נ$_p<퀦8Z$x5"/yy~\ppj{i4K Y_Ÿ cX)ԣ1Q,}"KX|Tl6 "Ղ Xg`ׇS슅齗!$@z=)*!7̜{=s }Iܚ p`ooA"NXgZ( k[~&KU=sLY@3l-=i>ſ|cE vx:v0hH&yOg](L͔Oղg+߽][xhN W=9w]ޞvbx*r?X9z^%sۗq$6gn"9S4Pӗ?jQ*k!rtpiv!);W^\J^a1*CQ,ЮYE],|P- `N~%m艅V[q\5*>ĸ垏+Y W05/iD/Ky"k%Sݳ&3UnGG<|ks!ն" ƶ!Ef=)"p ȐXD?m"6ό96mGZ wqp=]rMIƘ`:o?+:?KZٹPn=kFFc(1Wsxuۺ-24xW^\-'hYWQ^O~Nvv[g5x-JWZ,v\ϖ1k_Jᴌߝ2빏7o;D:l,M쉃,r%~Ï3ژrp-lӝPٛ#qs؛xiJ$11O-I+e!`YtC3;c,B'mT6N Ϯ"p p@{f^xk֧2aZ~4ytb O?)3~z14h7Xب(ԛ*Q>h.6[N-vz4(wp "LE"Eyj>gD炃k(DiىdRXJl} Q`ӂNNdQN$R|\8?yw@ \#Mok6C_Jw,s %J:̧ackAua־e+j rp#\gW( v㫤(=3.!3rɺ^-~[߳i@DaoL#;87s;.-%R5?~/Љ5^t:mFEkqriAA p6){[do|["0BEN"h;c\I?h-hTZ"7Û-.pX O.H_;E+`M ɩ]9zVg-f棈gLH_Uý^A&QڨgGQc4ȄFR1*r!Љ}%^G" 8Ξ kH!+חϒ tr"CgLg<0S\_,Qg'WĨ+G'/vlްA3(>|mpБ9Ў֮Gq&EjVmZA :Cqu%TeƻbOEw\7a'_c̛ s̸N8ڑ%ËunE@8 ƢI\j}n P{ { YFÍ=k96heluݛ@AgMGddAiA,J 8iS8*Q,l?ꩫ+# jCpx{jۅVzQdaE%)R;+cy:"hVxԯ*J3waW|,j0&ݟ~cBhy═XDJl{EJC| r M셇fe*2\yTIP.>& 26.Sol>UcEa= y흅K$L\&W*Ct#D|B '_IĶM2/C^DŽeD'_A x^iRsY""2[9ir&#F-C#ƚv8 `?0 `Aw$m qe.%Z}nΎ :~_@K<(ʒH9`s)۸ᫍ3J\Hlіp8Q*=D\M- %55_Q-+ ג E:F*!K(vVy]:CP.$UUU쬴w M.RB@mR6>/;̡**++? @"K7IƎɐp[($nDO߮gvst^z'"^B99+H:4װlu|*M0A/Gczː=6M$%mj*)KI'ɢ8YbScZʯʔ%~[2B(q.ҍ_zLʶY5y!33 +* YPW{EIQIIIbTVg4iW7ܴe`(ˍo ;+{ye:k[̲4J !^:(TgX,.WgFwx IΫ*E;bٔU @MM k|jY5~;<\8P*6Dd=|qPJe ׉cr}>.!9NpIot`wk,u6䗹Z# eٝpͲօ' +Lt avdMM H兹&}X<< 'uLdW2 }ٕ:Q,Gu{șk_mA!=س]`iZ9=׮L&*%r߫f;2勗z66OoLI~\>ev779-C6+q7j;% 'W0晥YFO=q4 ^0`owFI w[^u"M3e}s@<>773ajD|%"p -GTNEN"{Ӫ$˩Y[lulKD8*[I5Q~:sK8Q{:YjŢ{ZuRO C؊NqtEdqS !2l*KjROnX3rqP?&6;70:ձk,b=AY2ܨMtMv(MnmPh42 3m;6cWY}KBLf+l%?ֽhit2RNZf;8X0&ee$ȪC@$"YbGzv!8EN0TR|8&հCyy*N'tKU%-ާwrՎ{tfeTD2UӶMx[Uv  '(;I?y{#qURE@^f\@ڄb"("("nuCWE w pFAp_wn(J*"p+3SW(gh>>hT^*)"("5 a,!}d95`V*"("5ΗҶTOPE@PEP9QE@PE@)^("("p(V("("/E@PE@8Gu@bE@PE@P"("#J:GU1"("(KE@PE@Px#ЪE@PE@P"("(爀RhU"("(JR}@PE@PsD@)^*FPE@Px>("(9"sZ("(RTPE@PEP9QE@PE@)^("("pQ9E@8Te'ō:)眛! :-H9\2&Aw:Y}Gس/|:5d-#oD#ܺ2-t$?onVm2w0ڎxI禿 ERHߊ_HСCl޼%8\C|J$DҔ㩶ܬS8g ;ڍj_U[LQFXrh8wdWڴDm̛c l@$Z2S_SHҚ%7SYw@:[=^/_>guŅ@, JJFEG@S Յ$ 'WG;">Uxl$Z/}_uTdz﨡l[ "AQꊩL]S ̈[S'ePkUYzM*iK_z9')NdJF]kCQ,5S+|mukqtB__XN:[#xqw4`Eqe+y4'˽%;N'kSce;GhWCaS3RVS6x?s;vQ=G!f` E}Eo\|/o&"Af򬓾؆]=5q5uO/}g[?075}T-%W- 'wgmjΦik֞!NN؟LMe9u:iW6"#rUKrX"%I{X(6Z0 JBr;:)3Ccn5DP#,yڏȲر+zt! fÄ˺ZG/"Sdn(Z7y9dR}3:9qdSnk%_7vkٴKns>6a2\q< Lҿ%[G?[ޤyŽm#{3z }=&^b=/:-ё0D+O~w3x b܃r˸v$}ty4daCe8O8Ffܝsv<í̾jFG7oMLdۭI81wOi%t5a,Ve v=݅+ӟo2~F=v-CߐX-mviJf?7aڱtEn UPϮ [;W^"3QˣbmL"9d:wgLJ8Z( y}*sɝ/{& |o",u>̕Vmdo/͇۟:6f2gsxKLwcwh4~hgP&c'*iؓI&WZ~C~L4w|-m=p"~=M6榗/~^>_LNj /cp_QDQܛXD*,!vr~k&CoB:Y%{q-7'M7e~o`Z__bm 1-损I/ͦ_s-SYvcrҤ~gx,vJte)kӛ_*J#ӷg]ҧ}3Y.?.}M>o"_P_ Q;sL ƜToƌpHva޾5TbΜ7S(6RX(*"jXO\/׬l+ٱ^Ͱ۪Zi[_CE h֩_K nҾu\eQk˿Ǻ3?ao6`gFŦԸR֑￟+ Lxr]v&ٌnh?:K>͵Lq&/׷?>.87]Qm qL#c ϑFn*u/R2I| +2Oέ8c*6\o\*wyxL['ҝfbދpf7|8 ټ{_ʻg}ɤͳo#rؚRiR;_,W ?`N~\îE2˞YӋVL~,̟y nK$i 5 m\~{ST`Ohٻby/3Ĺ3w/Vb[_o?u@Xt'-VxȇϷ+# lBmA\ x}9Sb*[,Apuo xV)=}l;JV'D6vbiuva| Kp5n- >b)32F6 9"Jәh:EBPi6EHuOnI^BnEeDOZr$D\'jUJ1bs+35t)=˾燯9y94P=޿%vD()ۘ~]gHot{|gO~Y (y;[/UɨO1'UxmRDfobunY+VΚuzv7|-gz\Rmjw'[` A/1'` $vk6w7۾ûE;Fsq>[rHͲ|UYAIBJY';& Gw~.e&d1&FJ/;5CC87hӱ+Z"Nl9[B%JX.v- úP0]T`p\25!UԖt e6t"J ֡bySޤ KFWD ^MӴL|dxޔMJ;΁9K@?zE.^eU =F?#/I+=l.M.'t>Dw.ȰUsᩰ@zI/kp)ٵW񺁌'D?SHۘDAD6wr܃aC3s>ͧЂqlܓMY}ïGnd,UqYD#6ݼ+48 ]'CU{b:Q ;rAbרkiJ+\5=2sXr|q7*R걩?l7(9n4-?7;3Kogl'Cue"?EY"=H/"uoL7O/Z̆(91]IgTSlC[ƎNqXDz73ȵ[D3D;fwRepǣ[OZDuqlrC*qezY@Z{z G:RLb[\[ԦJ}Ohy'UAqRNP[!o0moJW<4JtTmГS s59M-X:فT'Q!&DDKPAkD4Bw]kX 6}y`6Av~5SuT '+Y6B0B]eŽ3zys$A5WRgjJ$*jqa_)J j)z !vC}ӝ`G\ҥ)^TDK8iO+4 KlՂ@ok_`gPj 3zEނl{Y**ˍFĊEVAмL)Rbmh՚KxupB8՛9zt3 uQ21AWM3! =,G0M[FWs( ׀HZiY'st9.I'a[uˏn(h!:-Aa&a,kSQ >qjCo4"77dRFy~ٍV]ͨurc$ĭk-tIE!7uM&M8hj#πp"4%^*22& 3i/g,LPs/Ѿ)=@]p_gRm%E^i罅uj?FXq !D-;N1Pߙ/\B4D4PשDߊߍ\m{K7l)?KV{H0. pA^v(^F@?Jz} FF$ϙ;oN0pj񾭫Ld疣,f1Äq+$OEBޫgJٻye$)}dI򎮪TIX]DiLQI 0&` ɵؘݛ&xG0gwY,l2;gcd̥*LZ(ͧ0C纫(زyo,e}SkjC]6Ei{Ȧ׃֮Rc髂'{hO u.>2X,:B֏K(]yC2m27T>7]mq$;O, .2 #P`q\:Wj(5]xȱ, @tb}D}&-at~[Wx7}o!U->ooW䐟Frf̤5xXZXo$7_GUKù܏ &}NP CSxvYynE)Ot1m(bjmUK@^m`Dgc+SI:BmFȏA,fǣDo><'7Ol2Qhg@)K;A$PK;_e`#xrL-O(NqQzaS ):Z^+{0,̛+ㆻ1D!a0DNI E<$셍0uL)GY?i|[~M xt;ᕗN^~b.?cn';>E>s7_~ 2${K Ť%D6;]im{fńWyA` |v(.:"/dz}&*[:kW!gL^/VsBnWbS]?_ÿFe]S+-ngʻ;|JS_*q<"s.fr|ijwtepR}Ly8DZ4?0'y{^^ |p*OD`!-?/QLړ-6?)H7DIݹ k'+>Sޛ'r;ׯ{>XN‹Hy|8OY avML~בT]7ugа7mI[+MHk=KMW:|czr_zwlhw:#rٵJy֮uWܞ\>H<qdNS-dn-''úy;pSwP%&Y%tu|(3R]i%V>x[z͵]$F1I̜ʐ}/!s5e2P{beuGOEVHe6@?'Qz$RU =r^,62,!#ɠڬZudв8=I| e.+Hb})52,&2K(6'U',q7sI_/kC<ħJgY|D:q]2$# DoRZXO e!JBnK )++ %#KcTiJ?KJF[! v2;K*YU-ã2g/yenZ/$ޖ SH膓?558[Kڶ9Ҿ%TH%i%Q`񖠪hN(!֑hX1qrrqzj1ʋ+O8M۹ႛH'sMUn"xf'J1M^V&*#geהdbt2 A8H{Y_8dF2y\ C%S:@X4<+1J$nğsYo}k(6vtc^єVGD+*i%w~˙fc;2l >I-hv1 bh|Ҿ&kJ,4 nn8 *<Ʉ᪕T-_'Īa}s&''}mY2xWZ\N5FDsc1XltiJ=]d/f!\dx M&uz{>x:bsZDϊC~y+LN.ri*g$^N ڍZoL[!K?oVߦ.b>/((@)^'-E7 )7sW'("X*xfK^w@ػ/Җ2ʫdk*dM€vm"$7ۜ(}=]X'Šq_4õ[Ixv|,_,fɷ t Nt~4#B{+"pu!wp[LGw:a>RM֦ٴnt lZ[{2w nSGХh?|2m#+=Ѻ(g3u@IDATvq"$?="f~Fu"J#lhY+,5p*MdټugSY@qrVW}fa'ёJ U-J"(gG@)^gK!G+yĕ=ſZk%M%*׆Kr̽wFtwHcHftcn%{`"[֙w2G(kL}bk ~ZEhʌ=1ֲ\CY(_Jȕ5RO'!)E?E@P:"pYb(p[C+L7q1&bQ|b*eݩ ­E$11h" r*MpހUwbe4ꎐκ爵L $~d ]u5 R~Ru"(#QQ1YL Y&,,5`Eg 44Xd)H5dDKsxHQ~d_R{}YcђJ-fvbɟȯyg6|?kVƓWA;lWgdž|:/GP?9UeJ2YL};oJk+ٶv?a4: wOO9~t~s]% < hᅓ(pETk塮 ?ܭjQZi*wvθkWJN\SS'/3G= rzl1֣s]i6&nAQ:*pDu^a(ˢ/޾xP#RU g/|\p$+_'_U1w7oyQIEJ9P_*_e"9'w?Z}J.T䨯L&)X, 5VP*Jʉ\KKe«;Z_}kxbJ=@O88i߼f꤬Bj=#rG?k.FbŢi_/1V]?{ 6UWPUk)VTP-m(:n&v/Ude[<rƮΈ$,{}@4y [X$|yy8*jp#?5;."iâJjd0Ss7RQVTHy>82PSIeir6&{?nRYS&,T 3Owg5b.7Yfa54cq ׉ftI6<)j(Ya.%&֞W*Duy19Mp?<͘[iE-E'/7- ??O<ܼ_<\Au>Nł\*R+wp'D;VMs+,?#[K^Wo}xz<NB*ܵ{M=n<]~ԯu٪y+Wy/1m-ʜ%}:Ϗ9&_IЪ$ sy 9TLkmd}[]Xo>Ws_8Iq|<“k%WdܨػtKլ|i 1ӒojֽɇY;7Eb߿^:R[DW+:e>Bg).o)_B=DO_3|13``I( 1}m4fgd"Cf囮u0I뷊ex7GHSeZ)J Ҝ1辏io+CyϿFf%Ȧ?eô-Gc[9yŰcc5Ǧr}Ec]Mzu(KxmպD6[φ<(v7D^ٲzG 0dv?߀(OLa1rY>?|DEL,?sah &lٷ;CpK`g]-.JDv7Qv0A|^#o%;GX |ID b5ڃu?ÊMtϺQKTKyKqKVDmJkGVSrȐ֬9H p#fh&)$۠-AKNBl IH˜l= A+][.hia?'M#63"ͯoPkxZ?s9,ҋPI1#> { PҒCL-$ [)Z壸zHè۬Gn&-A[`C56k?}xӮmdI /{~pSGj,]rOk+0I4e6 ~46`>7ɏmO# :QV3ca@t"uV,z~)V1\ ע 7M'W{/@r {< lnZ? k~\fo@ʑȘqR;/ RE.x]D%ZpVO8z9jΡ4@׆(M@(xJyMX Pү-{׳EH>]ejua7!Z2f?elclc[Tv8~ خ~韧Bsp0 ˜rxQâa4'E"ӎOݙw LHO/QJ:m1")lJ3t dGc<ܼ8h BT^uVXՌWI+cW*c"{n.zؼE&&2_ͯ*̆4Z:D/KF87>۾D$HDıfb%TK=3j H3h" ڞKڤEHzd]A@}5=\{yZdFNy5Ձؼ]\CkG ΢X!QrZ4;/Zpn(C0\-m&xKg䘃09l_2F㨳h imk9GsDI~UnÞz GgD5rĠXxE6=l1je&ptAQv :v斣A;$L׮ᨿ2+ekP,0%@ O%vbρZ^3-l~m_VT}E(4chEl Ⱥ=Pe7#>ö*>#0#DmJEO+AB{p̜)2.c`䜱lW8²Z闆?;{d-neJ9BRR?d:pJzMJk.zx VOMaf4J$a7(DSTLп 7fDO(%Jp#~ܲ^^QQ= EB`F}c *<:ב=xeZ6Co|*jck~s/^lr"v>ax Gk'Y3/t ?\1{myD30s{\raTkce!`+Fe8Ѐd>~4Bxr#nD v6?9Hd$M3 MOˏ|w%gx?27=y]Q^da2 s~~=۳0OaCCiڡzg7ϕyq j?;̺g )1B?ʞsϥxE۶"Tlgߞ4hڹu)M{;n>mitNNA/q)%'a]n 4~›Q /TTT醓-/MK ?K9E|e2trqoXw2_e u:nnfTbWWJÛ=zYIvHKg[6u7wxب 9bw'<Ҳ :u4,8O0pZqTqJ|3T*.<6oJ%WoLl5L#$wǡ (A5v ](\DE#=<TyNRاN*vИNP҉^8P{>Sd9{aYF +89t fVl53w>p΃M'#q|{9_́e]їk2Ed*I~hc,Ҫ8Shi1σ(gfK?s̅c“>~tΏG9]|{#&ηmRKep3qԾ$ڐRm~w.ǁ'zl!΃ɹ3մNϏkW!SU+V7A2-d]k [эr =޳lc&5 }ۘNtӉԣ|1uSkmtCDk*}A).*C 浦 k$^$ 2ϽYPw{q;kg D{YI?xu_)wL WWW$IqCcoDzCyݨ774Wd}!&qP 5#S]BtwMIÇ3_">lmu2'x'T!B4SLEkC߁5{s2]__cXb,} 9#!(pSAuox ^)S~'B(OjzbB/(?x-mi߸ݍt3|}wʹ0Х>i 3a#gPѶ$VeD^S`+wv{_Pe"c^E~ޓX^skjZyީ㕣o~Nj'K$ )N L,  pKjڿ>ˎ"ۓ(ck.r6}_? {2H$ HT$]F\{p63'oL# H$pBRx (ھo2Vp"1mkCC<ϗ➙_7`fl (-|Vus|apqMq֫Ț>G9MҋU˪14 €vΣ5p٬Upof+ںp g;=X?o;ϖ"1VoΩ=Wb\ޔ`J{3M! H# ;:O;`s=I'j5?¾ PM;P+|/ۂ_a-p_si|Bb\.C򈱈)k## _,DI6\c;q%@;Iʜ1<+E;2/{ŨEmo P57Mv+[׫h״" $ H@U-YIE A5b;B\+^,hݶvPQlF U=(nO&9"Su9Y_XE'/w󌟆YU:Rz7Q~CrcK{\$I@/ -  Mxt~*X:MxW͸toפuN crF!.jdFշ&s4x+,؉V'-L\7mpyൗ`v,yuϱUu:<]P7B-ܓAV[a$S#Аk/E݄1zb*YeQ؞4ZR1n!5mGKM9Jw?PhSYx|g:ꊱ5ێ4Da &L膥XwC1ԧbyK((G0!6=E%!Qa0V_#MAjQ׮V4Ƥ##\ߕM.Eey+nD]Pd6 ue1ũ?>kPSDk_D"a!P"eYckPZc1Uft*#C`mm@ynؽe#LHFEO]){d*@Yv*[X<""Eva4P;)d7!6{+A}($ dL 畢g))DQ. gDhQ,wnjXK[/\#|*57A_Ezle1day}k CdL"nx= "*,шAz:ЅZ4k@zvC j&ں7uEŠ_T ^`̦ŪNX#rPv @sm% [8( 㦨GGerPUь7.n(2"̰EKBXЅɫ_ďyεre3<1Am\ƇE:L|:.EʷaSLJtԧPȸ#wӸq$ Ad\:_6GzT84}bԸ3Zo4Re<&ueǓ_5hGqcpE<%7hkAMxRLz|V]$nrLuƗGmxw1}ᘐ x8w xgv$w~|嘭^{gƟߛ!aG ?bB,_66]?Nw[XM,Xvp__ .m />Y55@u~O0(7jO 4 m_q=2N5 \ow5Og-wH,sF=wWk)?gGq׸&Un36ZBynW]〛 7 7BlQ ،Xƿqޱi7>zXzq#ufguU#Io/&<:\zw'^p/nH ~r'֍|o&c'+OŒQ-^uص-5T~`4êDmt̘!@ۘu㑟}NQXLnpEq˞۰H<6(d$ǃx!>sO!yU >~s/-fd" <}ճ^gy(ޒX\-~ DaT2lcw]1O9[aߚH}ӘhCp߫Ykgx~D/̃AhX"Ăikڏ}[6O|a@IDAT_[5B.T,3w* 69 1?0QܑXosh܆go=_ao'6cگ3rs9= /a$B| װ ,Va 6pſ׮H&,x5뵥O}^ b,W"㝤ԕdzS 1c<Mb!,Ǭ}R=-*؈~ n}sL[mqc0~~;T& =x.KK[ZGfŨH`q,;ıxnçBx,Ƒ{v_!umI9G;>rN$R]GͺMxܴk?&R Y*M ExL@)'o_EX_ W[RPF\f 2Xրu8D5K{ ŸPy'pl_O˫QӉ'n05̃=-g.A"a3!,ƆԿW~s0;C\۱[bQ [rA96-܅uO<~-Oäh UML8~:@q70!'[}ogAe%'xvG|GzR )Kpdm]{۴UIJOmŸnݘZ(}HZFwv|،܎s$͟_~gM0ʮwk#\uxӧY?NƯ礪Jڵ<710 F| 6u@eE˼A6)bٞē؜lŠ]UvHZX5:r1B19-K-ը6Da;Ά m(l3g"(6%^֏X I.Ѐq5O_{,ͽ\ O'ڟ+pٟ`t-y.ųy +.]h>KXU  uٖ4Z=VfO{(LL!-ԓbbTR0ҝlH^J}X[k`CN 0(]ǿ_wيvN]B(&zAM*FhD>>:؂˲hCx| ,c~|@{T̙.dQ40j1(=O9C3L@mbvCfY #`,5hx:__(##C}sr[4[8¨DMuV?|jG6Zk¹Oy0hS_yzn!_M1&2`vZzFzZ :W< !dD@@h2R쳭J'V8 ? +W#GZ ?jc j+ ȣhi +/7peVl3We_U_}g}NQEۺ<~ ~6 lcBʓYSQ;/ W9)ampӻ+G!|G9C'%lY=jd`W#SphHd9z"ۮ̈~-w SThңcj5űꖡ02Ԗtl{ڣ3c -hZĹbcuOP0]S\",Z_bAX]Ё]s+#<ه4WU5hpw CoU98СZrfuifXK!H$da qG(OAA:񷠶EmPb|C0$ %ބ;vW6 IH`tqٷ;Xp3bF].B7dK'')>3T;*S9U4s.IV!OtMo$" ס)F 2M9P<(.r_(zȐ٘6#)~t)][OnވSg+ |(mv@Ò۽# K*4 5_wpcK"P\\Z\i- iRFOhMGP<,ʠ_R$pSjPs> >= r"B@ LzO Ƙax-r-ϫ҄sW) DˡXt 2!eqV='_^M?¬j681r/b:6cf2<2`!B@8< 7tr!ӮJ0ސS,"+Urd>]G]XLHؠU W+hՙXm;W`Q{v{rPkGS0L:L>\"|'qΰ Va);i3G q((\V͑2}A63D?)˱ӏQy,U۱ ;%,+ƥK B?RI)$ױ%UQC(=?R?#ԗt_ 2 7Sx H$a%%Aė Q.9\8GEQoGw,p9UϑŇ >?#/G8 >×OGfѮ[m`CZQ <K^{Qͨ][yW#9)TgK#/C20{XRv!f]kd 1 .$_v-&rUxHNg *>ꀦUXa3c"G!tޯۊ߯"3Ea">4 [/LD}%<#zm+ɑnXGp7`i|f9%uTgaT$Qv` ^M 7MD^,yw8GPPL`/--- b I`vO 2>Y/sKizlذ;>b<` SQRlERF:F _yWoKK(B_HT>,vCRbb'zláL:7 twxrI7k*&CW>ŗ{\iX4X-J+%|HDs8g3r-2Z"'`vZ[!FuEGPz n" ;9'bw+ZtgQ;m6`9"Xrk#s,LHiĚ_-8.>.ZPӓ_159E*B9$ H$ ^'$wK$ H$pHuH|$I@$I@8)NH$I@$ILLH$ H$pRx- H$ H" י")$I@$I: [$I@$3E@ 3ER# H$ H Hu@r$ H$ Hg^gG$I@$nI@$I@lGr\|hښX8VxUv 2ZDd%$I@$)NiDP[ Rms=uzQ CjϠz9jB FFG_4Zuܢ w[F$I@(Fx]Nh(&N44Huڤ]0Z|[RV3 y:р$g=2)@WT TPh `oF+(ogG-[s;ZBX!Նb OEE0ц*?-?tOc4r$ H$0 xz`kiDp])*`h  E_L*b;" uS!4Qѩ067^rmvTBGː-H"+g,>Gñb辣Kdu&K"$h;USx9ZFr ێkz-в)΁ݍ@A^w:> -E .{O)PåNajbz. H$ 26!1g;ҎlDlE6J !V|p**m0S̵›1)pӊ܄)/FֈXS0 E:MbYYg"o%bAEYǏoU~S0ؤ)Ste;funfz!|)jRXz" d4RFFʌ[$\N/_9E^JK9!55e0ԇDí7vCI-I$ H^E0n9,uh "AQyc1"?.XW'\\9:*BS+-A_[R}E fӵ.o[:X]QmSv💯H#ئ"5Vmb86ߝVErlOC}Z&f3v+ʡ5lZRMUSobWgZxyi]-J W}H  Ǯ!&ˠA$I@*-* ݄:;j!0USq!Ċ#DH"^vwa<;Ao2vvjcha.-CkQH'Xk=+˨WΨ=v+YU+WDNQK]Q#phj@kq^:֋B WvGD_DYw~+.n:wܟ^2H$ H^j u%7bHk@Cs Iֲ|ltCmCUQu[WA$=֕EQ _8(9khi}8K.-Sˮ Ĕ4ۺZb]h]DaM5 @7PLd$I@K7ٜmsn cbu q~;ׅpi]M!me9TYsRC]Eb>GKBSσs[eCz=IwԾ}ioo/3_۷"v2b߸4oq㽉bK.?~gbcZ%IG.K$ DZx.+C. Z=O@/a@8QXgD/2oֳw;oAkM.=wn6QX_`ٺr÷ߌ;A2%P2bn0b("<|+Q{s V>erO+uCW.fd$I@*/Z9QS+lY'JbD8fԥ; ~b#'o{Z~߲3om9Kht=ŲmZ ,k],7su5KG',Wq$ H$0 x%|x 0' ")KQX\}[O(#'+]IN&z8}}=gm=}m} WbZ7WC^$ݟ:SI$ H?x_xQt؍%OAd(}m}cF_1CeznڮK4,!]"⯜#]I@$I` Kq/D'T a%H\ķK:S*bӭ@8Bb/K[~|c=[$I@Xj7H':׎Dc$!NƷ'bŤ\nojT FK/q;GJo!L3*o`JmjFL$I/: :h"JU=*E1O%2 ۅJX)*]NUo4/`LI:4Ҵu:lk͍ (zDfcbzukY I@$o&0B|8%AkfqDP -PE'pXC` j`sr Va@kk ZPS*457"ضڂ)|SDI$I ]$:-^'Fh4l]6/׊BL$NBHD",~L&kSLRȗs *jO_tNLW]E5Pv8 GrPІfNh) $ H^N /]&8'ebD1OiΆ ig"(@8| a%DdXZtc}qy/D2N^7&c,s*84QNCyZ2 f#@Z Wl?/qd$I@/ӷlf_I8zun%2V߉l$3OݰV|p3hs6JSJC%sA.`,Za7eDL>PmQLU5.k_ 1O$I`˥*d&O\ExQA(In|ʷo;YHu ,  q()!dfҵVͥKW3{vwdct;[UK|D0B0X(-ac]neC\f~/$ H$0 x$SDIC|94'2G峾9U(2id*nFSu(qO*ȠoU1 :<f _.b%;.# 2H$ ޙc+qi1ų$NZBIQŶD7j>+uv.aw D\*]qWyVЫŝ&M"l9U,Ex BRb Ν4lvD쀶r$ H$C&0-^^NSpUsLuK M^bAlATH6T i&IyPAJWjcUK$*5Yi`Td빬~4Ջi<FkE/C~z v{LqY7P][U@=݄g^ hUs/$8?<G#-@[O,،O?dp I#O>>=q D Sèb8 m8?9˪\:?gg=G#MD[O,wCI7/H~[]S~.پe=QR$t}iFDsH>x,wY0<$VAm 4!ՓTj} U4eٽJq d1z<@#'^f шF[!O9rj\ŝOЋ25![]|80騂Y+A*zU%HWf3D[ 3N}sMoT*NXTbFǴ.f,^Xx<o2zE@jnO&rY=[]STd4(ԡb7dT,bfDo(|ayG[Gb[^:ZF_R&pOs t5Zʌ  B7FSwvFy~x<GxY]3bv$sOUFTlK\p6;\%Qjx08Hܦ<Ը9iuZ@lslhp4>a *PkR33jlxDVh}]'%۵!~Yvϫ\&tD+J*y_y#G#Ѓ|'W 5~1y y`'Y聎pCre-=HK*oDt< ݬ+H.֩Q/D8 ȹH<ڤJ5zIs`NZ_AZVֹ/h9yWg#^M)*k\!NIkMual(KHS4A~5?<G# @A"qn\i-ƴ <8e U|jwٔY- 9;!<8Jİ1@Q A>V{kt@g 3c\!UC:+s>6+kÉP12an¹+ t,!>6{wQ܊vpdG*y٥FpVҺ<'*׊,xUѹw4Z`(UF-OyYaĭ#' rI+S6LJ =\ُ>ݝ-Ni_ćɑbn5t v. k$Wh-T9 ExR{n\!/-t*,bUHwLx<F'j/damjLFJ2u,ci-5FX X)B8*l! a>8 i0^;Fl=,ܗU>9rI0+Kː=쪾2wTMk"()69Aݰt&`dӒM2e27W'g:xŏie%TrA:AEQ"_9RE_jmUj3P9!_6!ʏ \57OWZ>B`g e\Bhf6HVoRkPXF&v GU=tQ2t_\7r<%x<E7D+rOul廕:l0!)=F:\"Lƅ c8Bv*VyG \X=H"x OTJՃ?dĸDHW6SӹS3 m)/f+8\a0 1uD9a#^ )@2b)9ir_>9)R/}B\!K'L.f_Zrz&a9pc8*'>sߙ#< Rk1K_YzQ{5*aS_oXr\`\U\R?N\$KCZݾ#JƤŽi(^\G#iXa"T$g"C^Vnu*n+0cSW!1|G,Ȃq{8]QNXQ%yX^o%>cں=7>P&R/4Z{ԏoEu//32;oizsUO ^,@ "g{uyjpq+kcUuukEr@scSFL U-h5ynkW1Y$ExWq. oJH2#eP Tu;s9uvHƏcSZ>\e]{mPXmѾ)&P&+n$Ur%ٍa%PhU({7?LxQ c8! x<^F'W3u^+福M݈ٚ۳I82I6oJjUJ by\`%B[;,ALȻe?>ܟ{AvP↓ ׵I3/iпۿҲ| }U9sk1ŽZ j搣 ]>Y|h^ [?0-pʪs{81gNN:1XܬmJƒxb(s'>sP9% IjsY%Ql\ ZH7lyuLLJMWªhLAYNK{\-+Pm'>T¢Îeց`}<0BHムF~x<G S9A/BjB\"U9ū:%|R^tmQFj:F5a7wcf_ip%|v)M;=ou+e}{/irrRiP(TT>* X'*<8|8kVfRyPЫ'T}bċT ƱG3bNM:ñCZATOhRr4dĎ #Bŭ5\4`2NQþo BsVsF!MwNͲbYN8&MMa.N LQssQfW严qx@x<G7 ՆxX!lƢvF-j-^/\T*MeGQzܠOE8 mzs9e$ίҭVyyos> qlpʤ0##<gqr3^!AdUxhYwkĂ&U<ε!j*%$}wPsB Wf|VkSbw6%7Z??%Ny3tU*$Z I49G#uzxMy¹=Hȑ}VmJa4[m#3V&ՃNxd!'ߦlZHi=^&*s3y`В:L΄31LjssSw?@mс(!p$XvYGL |taEb {i 1լkCP1`ڗ 7x >3ŪUUmfR+zrf@dT;#Xԫ)PPjݤCWBf1njKWpZT5+eP3\pZ܃nU*JY,GMG# >\=XD FBF@تl{mdÑ2Yכ]~FpAKGj(yM8o#wLO=aTd*АH^% "vD%Na=m.j;VȾƇw*8xe&냏ƕ7}`ju}ül_p߭~wkr̃ {Dm3 ΛVDžM A:;eO3-PX&8RLQw7><>Nę?JX4lBP>l\Fza[x%2v, v=G#2(L PsHJ1iamS=bǮ!IgG!6^HF-ZͨGGD<*adЭN 9L`rvv* 78ӭrP>=L|s}ȇt:[bvcGMϾ8}!JoL+PAɜ !L up>b5?b1S]TnȨ(9\T]Th$IB6v5=ZmBMvsdX^%r0v})hN>rbi@P\QJ[L؀i# ?Q`oJCS䳥u []rE?OO jy94?'΍Klky+ BϝL޸Xdލ)ק1(X.`BM!F3S5z *4+/գ>ͭiuTMx<C(eZgtN!.s3wp#6XyO*6m?!cm*7WJf$Kz`uQKo*CWE8oB 11%T) AQ Z*iS/U QMiZan̑6;T+6!Y1EK& WJUa {r_!bW2l*{ϪT ^kX(ޝ;v9rNun‚g|=޸\mֵ;Lߨ&F4Пb|*FQM9vs6Bƕiؽ[tKOO-FN7Ok?<G# @ga"V`I6eʇ9[3PVw # eUQ̨4_W?d$M/P&J=F^эE%+!;іVnIQU鲰*Ea034dJ#w hQ%FzHA0a3.hߒ%9$ 수1g@ {&_ծ’ K<-dj='![89AƺfyN@ǾzQ b'fhmjξ{t,k+OBx)6 VS jl|Sp:9һm׷A=qF7nzՏQ԰ JWTKF:0MSR~}RDUϜԅs'Q!ukιq#x<zjrՍ3!B1R`6BiV()B A Jͽ `5mh&ƹ̹fP `QHP߭-Bw_UduPYZX105)I.L\$Ʋ&32eMCi>K xѱ/nykcdGs̾oY!=k7BȌQ!B-k:W';}eعrGEZe WLF/~["#&N„0l3nBl~qib4-U#/1N:rkk%e Fs3Ӛ>E$Cށ-nx<7"^׿z7 !s'ʯpJ'IH\Ǵ=`[/$s Jb7Fi܏eĐjGVB.S8!AڳWH?\nѦv=r;!5Y_šx`_K\s  a0&p|ϝPdBױ|jX@+H6Yr6/BCWUpV[w]ZnƔ@[[YTnO54ql) K\[7! UjΎ3q@DA,n絽5PPc\w(! ?T=F1Blz:r6E#x<zxu D)ASЛєNoԿ9ih(=WJ[*tg<{ˇ>BքNNW| /;r$,o,MO=}*G&Z5cFR8!Djo~M~q|P%͵m wʫ982? [ZUM> ͹K*^8?ƱXT~RT㥨&ʥ+:uvF/ĥCrd5v]4PϿgvQ'8>x` S-9(k(7Eqw" eBGV" (N>(Y]]%gkkN%\-ܸKBAQ|vޟǛ+cS^Hm6l;2 5a 5*ц66wVDrSiD!|RS)Hݢ6s{'A[<G#p@ϭZ*g +iDHIf[Di@!~:314Ņf5 *VT& ӰDcW=„5 $0- 9׽[ QMOO;ƈWJ N5!៥x<=@/êQhKc]Ii|{6vBıc wȶu|ZQmBuB&jw %|m`Z)'j6̙Ս&:xU-7$s/npTV:tZW>Xo`y u_GT~}W`$3@X̽U[T!K}ϨiVkM jU3$[Y*|oyX`Y;& O.]!ZTvpDTɮ.+> Z 5#إ TZG#Ezx}*IX㺄jw`Db]mGEz8]BL[{TǸt/G^XF{Jo+꣕]JM.D\00o/rǂ4n߿z/:a ݴ dT X>&Y~AvOI4GAB 6:9O-:9۩7]}B-(ڭ ]ۮu2gN8Ga[8;y`#4K1\֢v [{9B/67GF\%sʯCwp2Mq\ZZ%VcEjZgu ' d4VbPG#=GZ4^`1NI[ h#կmk54=qܼCvyXqm9r"!k6A9~g2.C@r hUʵ-_~)P"$0e^2riajKunnyrm]|i]O9fё<)rfcArJǖ~Y3lrs/ C@{Z#GJCD U& ;B ^֨,!n1$y+ ܿ$'%56TXYA]>/E5j‚vZ\&HH138c;Eka X'W"Cg EϩXoiX.cK._b~x<GGUZQ0D\]n*.:/k]-F `1 k mHүE6D8S1:78 *`e!^XDir'"aPbя֧fko~0Y4e ʵTnu]`շ/qI?mTf3^U Js*n-Si<9sU0-/jd''lm:6RD臔xmzP, Ibϒ<#alъh__lik[RqsNӒ ƙŭK:U58yWc9u6|R}A*W:\e ӄVQ>G#=G Utߚ]${),XBhx{6u<,,t)9{@ *NCa4wEyKTCC^TT- gN*|$kwC{oGiMĚ:>|BEH{.^wv 3WaBi0/ -ֽUXI('+=+Dg¦:Vљ!W qt rT kUUg!aHUp_!tD'+nP0 `Hbe5,jVqvEtk3Ҫ FƝk~D>]M[5˴MV׋FCu{MNS|6jz,=x<$^ u)C\VM %҅\T3%@CԆX̒7thu8/ $LU/ճ~T&jdFt*m`F\(AWX"K\8b33=a W*.95s / PN`TC:{Ο?{{TbsxtӠ1׵~W*җDɃPLEAj3YJ5m{JIo#ڂbw!P]Fxd2 >59;;WEA y8gB/~:3@&9vV-ge*m$7ǫO<͵Q;g?Ί$?v:ڝ=Ե kx<׋@OĈޅYGPVzAt6@ʂ䵎yZS~3Y*.$ shg.$l%U*pWn8#F)rSfNMui T <%Lg|j7ajky*X1 ڞ8ҟ-rIqMoi,} MTYt[Sew!2!1͒f"ameXycjE0Jm;ɋ[.k.vuՊ͛*- HkmuiAHP8Z1T*$Hsf9a r\s>*Nen0\lNaprNot©qw=T->GE 4*厥8 -@Kk_#xVzx#4Je4-4yU-a3اԠ$l/=z ʹ (#>)~TZ$wƽ/9_ SVWGEd:V1_n\@gΞkKkY~hAGH#V` 8 DB8?׊Yi:V)1k߸/m((Cr^tWV\7V*F0T<&`6x̣&i)QFJ͛i(R-XvSk_}+t9Fq߈B #Zb؄0, Vc?ʆW Ks: o]RJ4IӧkqiHS|w -_ՙs'IˁXx<$^m ^ ge ՞`ݭCvS#T?SyxwH6je5o]Uge KU]t,lZˊ& |DW: []D!x\W/] -tfgH¢R8I<}~Iᬿa)SfXj#!Y&leQ!\Ḑ#Qe Y $hQY8R̜ L)ڦp)JXU!Z4ʨ~^'oM!Sy`p Yݺ4؆ cZCjpqFM g΀7IUc u„wDVCiQyM=yHʌ~$?' SÐ6kCy# ed*F(-F.d 8g/xyφGs|A}["zPpD5^0|.^HO~H'=Kocc媂6tZ%믿laeZ8X, ]XmF'0xcmm:(L`QoEZs[ 'Sl16Kk V>x!HY`p>X۷guvQۛ(sg;vXm>B<ȲdBsH5U]ó,E]x<GKD'W2I-۟坦/kgq?IBewMKi%7$7h|״櫪CO>8g^|YzT38JI혯V!$!S\A}qDrŅeST!{D'j089s2)NvPe^#Y9 w|,ElگC>Z$yUYԩ,sXZ0#PTFYIF''ǝ3ϸIJ,3̴\a a%ѬzcsIYPOX6ҳhh^8P@@ ~5(}+50u=QN%dP3 ?=J.&zfЕ}/⤿MGV4^9fu\^D+_[G#2I 0N&#I&Uo B$jod%ԖΆJT CK,wI3uAϼ]^L4Qu-`}u([Qݒm1љPӱSڢɶ^[:15BJ60^ Q #nRU y0F[=HxϷ9wO;@w*SqtJoޛ(xV/1Wӝw_W cOBiOM9J%0ImZlan,8zE<71_ 51#}[ՇW-7EJr9 6^NQ2,k44)s+s A86u(/ْ#IEf]WenrOPUaa RM8wl#]FM`tsZ(**2)Abe^9R®BY\zK:uڼv޹} Lʅo܈h:\՟ %ԵS5L5Q?ڿx<q,ʞ4qFdiKߧVvH[ P1R)bէP@ulڨQyD|mP;j?LFi ?kBBP-h\ag/..'# VW%զ~ԴԴ^t5{zD:p(7L$-kxcب:K oQ2e"ʡ(BfNk[jH3wTaodbٱ UMA09k5dǷD}#VPCEs9_L|_2-#e0dkh3;E* a!?8b;vjrtCҲi;y}pyu[Y,/9EqO95])U 6] ƹ(nUG#'Px<@O/WqF~WWkgEkOFAAM=&l1u+zK[ʢ* s,L"8?=͜4c?.2;ojc#Ō {'TcX$Bپh,=Dv!C5#~$7_H^^]; 2c[ {0཯^ *7ʮdw!fDЬhE7ca:gLid϶ك<(&@}Bocb[tųN2J4E5 XxY}~a) l] NzyOk.(hC׾t_\cWP 6WDq#V|ѧڹ̀k.rPk#`TpBٳ9כkKAB; & *[e$h-\x<W@O/#]zU/m*!MXB0X1'S*@$vh)TȺ2vaЫWL:[XE j0OH/)۩›>en#k! 8rbJjZ/4g|By )`_/r*QIwӑ8zI-}!}#ar< Ʈ nߔ&9^g߶kq;;u-\φs`ǽ#4W"j n*c5B|80w]9GZ|4.uBD-,U]WSiq3!Q`kUWU? (久Vd.fAK(1M?qFOpANLFwgto|FTB ȶ!PCp#]^gG#|&)[_.* yK$=@ȉ=7 tz.Ύ aSS|!Z2x=FɡbW1ᴨj$:bm46`fdMʞ"gKu߳cckr>1:ɾ}|`9=I6~oMq#qbq/.( :oƱuت0O’#vI$Շj; 7h9o,8;v/y^|=tiZ5o^REtBKv Qܶ@J]G#* eWӸOc-e1w >!fdlzt9uʜ|&)=$:Īm9In! L БA&X #{vÇ}Neszׇ/g!`؜ܼ>%Rm;$Fw|qv;awt>:80uɃ{0d;?#Ae>c#~eyFTgc9AOw\|5\;uDU"Ǡ5kU>?zYMξ2XgTQ1xm.sK;g? M7#0*݆ dx<!^V>T(-ۢn$l?:y[Љ˂xW/![QrjmjaIc`Gw~ ;xG{4>T>3m')Jr,O\-\لPS/رXvUmTFHtgd\z: /^pk^ȝksm/^MNLXq߫gumƀWmɺ8G#* e=ܽfEq|@Y`0!e(Q:*rZq;G#j ej$ 3b>G>4nt.ڤ|qȖJQQgA ۄ%,*rpS|.ѐкE@޲DS@ >Ͼ_BΨ;*=Er9~?seՒYeWcmLT$ח{=G!zx٬(2t ZN-ܟ4,=*Hxq:%T3t}Q$ Ǣ4|fqw Ktd则~d8S!v{N2r}-eICЫ'|kN:A⒮ߩOэMq7Wl0t,c ?{jXN K+zqMOGA/x<W@.Igx[tK˪,kϯʗ-+gwee}k'r97sN񻾿%훲u _6_M̛fs!^^`}jooaU*WIrhWo^U_nar1ܿ4*W>th*avd~5-<x<@/3| _}*(6U=cWxHNp%yX^*Md2ڢuYe :6=! a(Ou952KГ1JE% l}$#nsATe߿gԟN sbYC=4Vx<4XNŰ%V# {|;Cg7֢ %*r^a }<ST0;={C[w9- AOFjir:@Ws3دn-hMM~gZ;SlsM/vEvl{ɑ{ffVa-KHaE p`.&x<W@OUP#ȌlQ4l(&KFsMEuZbm*:Tv2-4xÒ9^VCnBDUݚ_3 U_G#>=A & թ<{lءv= lqw}v6Jiz 4PU2m뙡qPmP?5'6} ӣ1Iq5KD{oxq>P$ZGn|_W9_[UɖHԯ]S.jqΧF3+Z7ޔnwG~x<GCpg"Pښ{t<TqW}!˓i-miAys]+(b'c})qJ xu>LOTE2j(74HV@7QNO;ey8ݛo*b8R>ˆOpjPW?ąƆu{lN6RKϫٗL;{<GEg֗Ym<:U~kXA?+oh*euVWA9uR8M19H:|<4o#`Ae:"dh_,a*vEɤ&>~]ew:OzQ%6op}_*}}`>g3t kjP{[ORZ=b«]_y<W@/kX$r+k G$a+puf֬pT=aj=ۇeDˮ?tu1MC8OW^PHs8N1Ƽ$살<[ȃass絷7O/ƨfZ$2hjFGc|(\!^lb~aon/G3JkB$NKRsx<"^M#^().`m1G[`ߥu Vo1G&HS;sZ;,U G &-6^Ո-BG#xzYX-8 @e~| ǞwbO"|' YVʱY6b~66MCoHO$G#}eӴz aBAkO]dp\OwґT/դX}_^˞:g#xz !^r6#8?&n :XT{ l:^թ$ e'j~1s޴8+CDG#dzx1 Emga5v.Qp#Xejw䤂cjnljtP [|)F+ߑLG# BW ve!GSA~)SUDY/$=zP~['N+24:Wi* G]'=z'<w"^`US.ec[wH,s`g8򮎳B6Coq7d=Gw-x(`%!|3i9B*ڊrT}UE/SN1kNG#kzN`nn<2O[:mQ_2j'tXVVkj&vح:]5,dm~;4CwG#Z)e!9xfdE;w6tE%ϚkVlwC#!^-7opG#xfzxFY0 u~D ^k;2v0Fu$@n$Զ-F6~~x<Gw)e-Jq{u&B_z~#gBFsR(>rs7bcxJm*[x<fh_eZiQ _4C>6q䴎hy^_;c{<Go<εlQ%[.,ے*ލ 5@(@!\47IH!\\`:۸$[]>{fw,[+i%x99:~;g:@ /vN))5:RKOۻ$!O>Vͩ/[L,%B Fxᡐ 45JIhm+5djWuNӾB@@/pjP[)4jj(*.!KH4.zo|(`XK}SA5yɱB@H*p&#yEz$=h[۹ZV⨍MѾG%2͠akIB@!8Jx`^}|HRǑ~UOrʷ Ӗh%z-KMYT;SL j,iH! E `7^[ 9xRj>)rOV&]UJkrTkelLz:V|U(ZNr+| ! Hzyy9 :ӎ,^V.VϩS/=IDATf;U6d`PY4C5@ Ym'4L .8QCB@D"|>LsV#U O_VmgPĻUe4;zHGCBL|S p (!"l99urE! @ p8w-Nϯ^Lƶ7}68P7KFjp>4i)Qe;~^iYqE]oS\:ݡų-7@tZ!%Ȕs OF[r{B@ć@Ki)Ss iIi(0'^QDt=N0jiE>U 1/KG ^51Jp^@C$B@v 8‹p@w"Pc>k|Ğ$ #{SYr&`)TBr5t|\O gJB@!HFxwMʀ/"TӴ>SN6ۑUhZYz4Tt $# 'D L(tGYz^U-?X=ŘR1Gv^='k|J[O5te[̣<~C^yÈ5+lkX#rPB-Uת7iWUǍϩ{U[EGQꚺ/:X(U*+N}־yB;],*$}tכti5! &X‹SI]"v6?t\=l\`k{]o([ƪ\lT0-ßC9)MǸJtҨ5TnQ jtuMeond$JtOQ$mu>rA6'ϛkY&0BRK[[ܱΛUrէx 45]M! C «o1ؙ?.ʞLJt5tCܔ@2>S`&".9tI6W V'nbdXS7S=1+RIuY&eSf?h PsRaSQ iө_X3r*S2U2 B@! @B />)󇢴G8)TعcUB@u*(@yey",la'{wZ:bޙ耟L )VڵN?O/S\p`P玗"\pU Dr D>:+ b jBR4WX *Q JD4U꺽deS#*ڢr/[J)T#MN)e /f/S(3Hn26'jlhն^J]v u腒! F ᄗARMK:"(똩aڥ԰y53\CP!r"ZES (T"J- 4J`P(QcZ"7pNk*J*h?VѭbN'\u6,TZI+Q i>F)f(딊,|VD=uLE!g'вl{|tմ83VB@D$p«!;n[(%_XTBYRq<O|]!  O XjJtYC-(ji%B@tnu?W+2mCg䒹B@:nrWCQ ơB@>^q)έlHK"iĔ8B@! X-:eI+uʪKB ],B@" wU! DxuF* ! g-ޣ-3SU! ^VVzS|u KB@!p9999Se|B@!)n@गwK%B@!ЁĹaKVB@! @& «s^! @":d%B@tn":wK텀B@$ «aKVB@! @& «s^! @":d%B@tn":wK텀B@$ «aKVB@! @&AyPKIs b8*%B@6Poͦ"5n'l6Q U1`Z\K! ZIo߾(,,l !0vfB@! L*++cnP/Ֆ B@!p@MMHxA$'B@!B*-c$B@!v"PRB@! @LDxńI" ! BDx B@ DB@! @ j;CIA! 1&$B@vB@! b" +&LI! m' « %! B@D@WL$B@! N@WJ B@! 0I$! B@$$! ,6ձ\xy9~[lUU[|pzHII9}V^oO+mB@HNӁFg~ykFU«%%%&ڥ TW?R^k7^F1ϰUx_?GkM~gTo'P PȀt~>vW/ uO,??=Pc#Q-ubH#m'vq'g@r6fp/Y*V+K_ ?+?UVr{pocʕXm Oڇ؋:;*i͏(d!K C[~ g̽sqewAA S0 ʡ_@9kdM}#^ Ldfþށ +/aϱݛa{(57jO_Gg/Q4~%X*̚ӐDj!.ؚqm@F:+^o0109}|A\^ޮFԭv$LLJݍ W}&\[x #]oBL@յ=-^QFjB%:vL0h3cؿÍMTv3< o=1~8돡>x~ E,,p36d! |mL8HZ}0e$ s7*>(73+O;7ڷ1ۨ"L!b)x4P@:| C~oЈtfN1%%{0|^[(|Q w-so-ȼ8jn>+1// AlFyE﯃>.ςT/+^,! @$Q8\6n7!v[R1z9!S#JTs,ͽ3}Z1.K(49 y+{ >^s/s dPMq VV}क़(:S}UpboJv#3Hw@;J?Ñ=3Fb/cjZ W_Fv);Y|3fxb*ls#KtWQ* ƧpyV[#$E ! @4wܕbAuZ*g7a+EYZ5*ჴ^z|q41SeoD:ȇvOApb rx ۏ Zv6e^r2Rҫ` 'Kb t/HEfD^(A^ZV7j6l.&UҊ,Ǿp|n E]v!4.Ӑ%$ބk* A6PY!gQH7Ѹsajj5x|@jcG5tOO´?k4dވ#P*[WHpuX.~2z1m|?4z.H, rT#Sm*q!SS૯AEԕg~ ^<N*ܼhk%bU5Ɩ Ӑ'=]R! aO1hu^K`sl|A*fnvmhsef{Ԡ#+Bm*N`l$t7.0n$s뿊oӑs5 f\0_A9ԙ`ЏO2T~K#۟ÒcԕkJccm74OS^gra:?E־]kpu[p~!ЎڱzvrJZ? %xL 8ق7DkCn aq_4)!`7`[:kg7jrS{UxLi0#dkcl?.}8Ѵ"+Aw.rg㡇֡]?3<yUqpc06➄Wט2F }H/dB.23$y5"?8uQGe;(͸ /a> Br E=pQEN%8v ze0/:Xd`بj,3Vx?0Z/F:^{ƁVL/cC$;vHĴK|0 rGF!>/+#Ɔ5R귑 b=f 8pR3 .CkXd,(e3P8ɏ3(ZL ubgp0hJI!.Nat`f btկ0ZJK8Df6 Sz#j}Hgz1Aֺ";_1t|S;WkxXsr7XkFuޫ8j{8#9i_h~r`刼ݎè $!C_J~8b&Ĝlg 6;* AGa[HRp "N[l5-:6,$u~nGl LH YrD,!\xp;;cɆ0z0yDYnG4C¥]щ;;,c 7g' l׉rQ4[8%rVt}SzPnV8:Rsr񪕷#>UNOktNjkt<ppPL5l)1ҊBwN^h[IH y;T^,j !V >OʚEJ ph ec va':6+E> zEg>cgn#׳E'>cG]x!vUR^lxnF \a vc/ra;|z@ӫ3gbN,ysvǀpgf&6vgaǻcl3RL={+0Ѡ,.wxknB??a4ktlC 1YvBq>pڔ3wgqehŤR9>sjR |PmTO?/1SnB,l?ו /Eݢę4lrc}:hDNN8|Ḱ 9ÂpEs~g|LzKq1!h>6lx) px/EӨ vd1 m%/vUdxr(z+=>h!TIvdn+6|i|uv}SrCJ2uŗ,ul7%G7ϗmQ}yzh3{bpeRXȶ0yI Zpى|{8#m;_@jiaA G!#8(shB\#K_tg-t2Z5|:Ft,}Í~-aɳN Z@f旛qW8񰆤~>Mzq#W^* !:>z΁iRhqV/cDK9h-)\_\B1\sh 5a7>g=eT\fS{QGpY l+s:VB1E?ir]_c n_lJxkLdmbez;$Yo.u`^z {\G }*ةpVbэ> Xg=z0apª<DFy:Y]Ch>ЇiICkZ{[71S+7G4Q$w6-A ܛ;mBKj:ܹۅ`^ --wc7i,ud]Z9i|6\:0~UAtRu+&>)n)Cd{=t\}^i M}A>'|#S}bǦuib[ѱC:m{^^ loEn;ExB :FxѴE^RI{ |[1:.|ԋ[a-hٱq+_ڠSKDJ(&pz>'f!M=d> ljC!&*C>51iJs.ֹ?A)*Fprɀl?Z>_J[dJw]CºzLڈG4vH1LȢ(V /RtĕqNtЪUj?c6|IVL?:?OZ WH³r)OVB ZTM SA/gI~CvoN335e@?.i)Aq\;/l*#(Bv 5 jZ {1 X B>c?cy4ާ+d;ʥ%TV{ź|rU& wiRKv zD4K^Åq/a$FU Za5fe(Kwk})mLh?=Dŀ<"5l ?3K'щȾkԞC[1ᜊr1SGL"R؁B+վkoh I{o7ތ,!6CYX{1hZ=h Jn|ty0,A{z(zo5 O.jqO56}L{>-IF!7>O0Z.}B%NLӇwō{k >!'+o}#qB dȈ o?iG[둗NlX_EAӞ=\y}r]AۣaV~/bo-GDOk(*ekm0hq)ucj`)fsiXKNv:+%[9,#J<4Cg\7DoE R}Hjg1֨OR謰!{.;U+rnO&se!Ogjު˟\ZK?p<^ʵhg6q68ٛ"$ {8\;Z_6; ݹ1F="n!tt ѲIxe:e1 >;Lb8az!Zzoczv=|60#/G_1:2=n/)Ԣ,2;5|ˋhj|ƛ硄mB_|x9۝u#8T!ƒ$+QyplD,C6NXkûl9aۈC%B2 Br%ַuSZ͡7nlrz/(‹CoV3tJW*VpM6FOğJd ;g]0Ϗǿ}ӉSXrp!ןW%gf{EC|Ŵ3 y݁ޟ7R(2L#4Ž>I HŻ,cԏ@1SCgs5O8 9Sf[㏿KƣpDNIt. :J/d< xu5) #~N`*VO7;u] 76ʆ'}<NZJl,Jtqfi-R"\DS0Þ*6UNSt̫ۢaC(v,m%B8}|ҵu+1SRII+X܅W;edwq9E~gd+9^\PS٩ `6}T6Ji鈞K'4 ʥ`(% 9`9tb{)}T~),WhC!/8&p -jd/<gT/\u\2BIHlNq!.@;Qdk+zhe@%T&q֓17z0LM%bEdZvd34A\:{7vts"c3e#ffP$rI+EWCiUsv j/&BS[.:Pcn"wo:s;mҾ3pzƙll -\kT0ه/8NοOG~] $:@hh"<ɞ<c 6{Ŝ1;hJϠ}.F:)fN= ~Ky(iW!=l 4pSa\@㽳яRs f @f3d="3?ԘOD B@! E#A! B#ʒB@! H@<B@!  ":d#B@^ ! B ВB@! Dx3 B@" «@K6B@! h~VD! B@DPH2RhtU@B@! @$vx+%%j B@!:i[B@! M.^1B@! h5^F'7 ! B ve /]+%B@! @Sxl6(%Í \dG! q'n7/++Cmmm3B@!Й TUUF.]/%233zM1 #!rB!((Vh)B@!~IMMѮ1@]IENDB`docker-1.10.3/docs/installation/images/win_docker_host.svg000066400000000000000000002410731267010174400236540ustar00rootroot00000000000000 image/svg+xml docker-1.10.3/docs/installation/images/win_ver.png000066400000000000000000001422101267010174400221220ustar00rootroot00000000000000PNG  IHDR1(dsRGB@IDATx |Ul,a%"U -UF@[jfJ;ZiQh+i- 3V6v*V#R*X "[Ys}o=9wMrI,{psXzuD@D@D@D@D@D@D&|H[oMVV?PTTL$;eנE@D@D@D@D@DD?I@"C^v ZD@D@D@D@D@ H$sYM;5hM.kn\KDÇpС`^hoGCCuO]q%kGD@D@D@D@D@D 2ky|3 +|X$&@|kPי'7f!++|B}gn-툀X^x!!?V(@8Ks&ᶹSo~|x1Xi4~n4~b:QWW*VjGD ,]Ҵ믿ǎK3+G?퍔oĉQRRx[3CX)@8tG[wDwnqtYEFӀ"zݖCa&  j"`~7~~b)QTTZ#}; __pGk-aS޽֭kҼ ˜ǣ9Zs҉_;SBIs;PAL'!i&lpKFl OW58tړ8}4,g8UAָCZg˗~D~~iesZ}sgOHF?cMݻk.0ߝf̘Ν;Oi߇@{"3փa[c1E{g/ˌ=@jqx)S&݌9_>T[0.UW'Obk\D!Ean vm17h:o7ēeN]&LpBmS 6|XTJOY"1Z8!CPUUE[?(//w퓙13ا^*kߏm0m6پ6[wYadx?Aem,g_'ax]N,ϱ߿C'k_Dc흆 Z/=c)O.GEשMi=:uꄳu8Ϣ?~_>98p vBMϜ1.E%vC^zQpTz={N7ވA0{fcyW ۗvСNb<㎝?45k8! .׾59 N</ˎxf{O~a쓂^ayz$&tKQ/;9r$~a2x=A>j߻75yvUc.w-]^zuH" "R" Q{ԮVݷy|LحBmOs 0Wnly_N1X5 @'9x $28ws?rvƍÄ=%6aQZmDd, ۷Êazl  dl X w"ʊnW]uUV,3sLGAsEJ~L؎k׮7yW^yeh<ʈO~_~R_<λˤj?+0?+/B}v62V_8^Cas0#OR鄥J$28wPTVV~L̫tRd=u΁"p{޽u׉hmO ~@+x2䶋X(JKK~ m)6(,܉y e#wl^(.z2Oc@dxG?QCm>QFGNa'c$6k){Lh{_q3Ɋh}D#Ѯ)tVD@DnS(/1?W]pzEE@mQ,!w.!4y=Hrch##oш=SQů2C njQ1vR0:ӧOw[eŋ;jknnnT{϶L) 9ˏmE7y[Hb}bǨ@nʅjlٳNı͜ģ8| g:dVۃ&J" " mC H\fȐ& +&}L xv܉SN??:K _W_6R" -B"bW^ ['?" t/¹{\Ԅs dmb*b e>WxS$}Ѱ9ȉk8<V =gቜjCMy^}tsN^nT0 O㎔b5n|[oyzJD2%y`H6;NmE@D@Z@Lطo_g^ 紸n\lV8/y+Wt~Թi_D}Rcb]ˆFqNpNKC]>}S0ls 9 HxLFj PPtbŊo…Ta\haSl޼yaK㜬= f=`2)/ָ ٳs^'p<Б;w-X=F틀.իW7 ~]>\ YfO0qߥH/@{qDjiw/QDӗ@k$},s\- ݃d@ȳLG$R,P b]l9k5A fiQ@z`.c%2\qРA jWmG kzH fi{$" " " " " "ДMIlEgD@D@D@D@D@DH찗^$2$;E@D@D@D@D@D)ĦLtFD@D@D@D@D@:,I,))`4phFqhQE"k;6bee[{ィ+4*e@" رF+" " " " " Q H$FţLX$;hE@D@D@D@D@D *Ĩx)" " " " " DbǺD%Qk+3l۶-lA" " " "` 3Ƶo0P'迫XZ\$={ӧCt @NNN\*wpׇDN:^wR}WN@# ki)hO=6h ֬Y\mkp„ я~={fcy8z(9P BadC{b1np*(" " " " " A-"w؁~\pnV5~ؤP'N8{:sܹsڕÇqEk׮Mʴ52qk?1vPy툀t )x'p#???" /r# /_:\}G8~8DףGE=rvDֆһOnnWBVVp~}`2-툀@!Rx/ E;#Gtg?u蕌=k_~i ŋ/F{)zM7[%r_Jjٽ0k]0,=ԙY8q"?rGՠ tXW?;~ \+ nٲIĉ3τ-6-=fh$CL? 1Ǜ({={xqNJTuYdt5-Oqm}>ԞƸӄC2UWW37sGsmYfh;=80d#o ID?Vמw{Kxy 2Ed[k3HN87XbEZߙs~:Z*gطo_\]p!_W!sm"fۿ[KdOXSz)t>繶J)wvV1e7QOr>\X/QXrbls'OtČ z q۷;zs& ߽Lk[hXLV(r_~6l0%(FeAkO: ,g8UE=LַBs -Z_;m4apc oc'8ΝfbeVR~:srk⥥8'5+b vǃߍܦT0Hq$zT^D |Mws SIQ=쳎p{1{lǑ`3-R}{ɓA?mLZ\6N:_L& R:ڄ#5"L$n޼#FpBGy뭷S)^yb6 !l+ t9鐼ڔP572&݌9_>T[0鶮!?Tcz ).],Xd+Gu]gr{;PIIQ[ {z鵷pdƯ{ wZ0o #SA[cǎ9bj=zt[Mo!mc UW]Fn+ͶK/9Nq>73-RD"Ht'@smȩr^MX^^W#=!$hRe4 E.dz$8IoYA;[ eısw9r+No;#T_b(2R ~gdmmCs=eOdħR6ٰ_˘_ &YoLkvy,3rԮm~c`cmX}l}̫͜=OO[?3^xǯr"  Dt>zkp"}ֽ{w8‡#>ŪޢÆ s1\vm~ɣwa0'|2Z}&l[*߰6k߼)۠}ke=v[mwv#|4^^m[n,Om#nFM |= =LlHƐ*<xېS_UB"/Vj|bnBEy#R?l ED}yHF:9mۭBmOsqӊCـa7WOYwlMbA֛׊@%_Bao&}]\r%N9+Bw;8dzdՊ?9.~ߝz6aχ%wAn=( +a/m>Ͻk:tc+r6&r`8 Oڱ7%}m9N~xY&(zMq5DĭFy=E@'qF3 9]MTZJ^n=C?~4qP7q,ˏ}˱Ǹ;Ś7cŋ;mSw M>Ʒm!8sLny]Jrh6r\DbgR"~my}L 'n41-/R_QdT<~_NpPnBey# #?:[nu8Xϛ!'C 1Sex:~yvlv;?)x!˜u"HXζGAܔ{ΰ3g:Bi?"ӃPm4gvG9p)dzDvpSlİ`zi#z^"}E{:=i:/"aɵ$.cu>oD!N'tP0  A{,4<,cVKob9-dÌvly:("-RLŇY`]vje hn4^~6~l{n~}ws|KѐvAD*)y( ?©!zS.]Ba c)zBF5jo*˱ ?e@n2_H Çaˁip |VNDڏ8x2e Pv>?Ύ/{ϛxٻr?ɴמdƯ:" `ݲd2k!#X&V9Ւn{O|boV EGD%詴[Tɿ\ )Y^~:>?b5Eȹ+;(LJ<}?q.w~u/BW V*O~" ~QqR c90V{*Mʕn3o4Hc4׋|a<ۿq4~ε{o:'"<w8p全20-bzw5E Sk=\orڔ_"[߯9UMEM6Vldy zI/m!Yv>߰5x 7pq RIќeGi"*Z/-c, Ym+cC_ZrMou>\|Bzaǎc5Mɛ-Jwzя~44+9r*6^^㵋qc>$C9/*Z{Lw˧:N ud(e䶁BXD{FQho @&Q,}ny=x_04x9s嚊1%˕;w}MG865_loϛy\h|tv%<\m[sqϐV`h\+_#R}qXeb>m`('O}*i7U]#S 2,8cƌ9ۚEY!h&1d[omR^TT,nX<'h#0obLM\ԛ( q?myl m݈v2+|_tEN̼mv4sj U箪x0dWcઢr9ܞP:qO'[5XD\! f1rbr" e^gO޾mC"n"P ~ _p"-1#^h_겔y 7,TOZ8G^F.@d} ל-J ;u>=zB@d-&mmɼ^}RvTQD@D@D@D Q"]JʋBEi_^ƣqtlmӱF6Wviի@HL!SD@D@D@D@D@D H$P" " " " " "F$bhkm}Կ42ED@D@D@D@D@ښDb[_/" " " " " iD@IL!SD@D@D@D@D@D Ǐ*Ҭ|yOE@D@D@D@D@D}Hl_SfPi$N൲J hXpa[@"5i/Ow;т\ꚅ}HXpyXtp[U[tds*C?BB"E$P8pP^khNbkV?" " " " " CL@$W/|rBM)(Y'4wDHŢ" " " " " "Z}vG`^"9ZcBM96y~[+֯L<$㡤2" " " " " "LEj1 ЛHhS: ^a=v/E@D@D@D@D@D /})WׂsMnAhѢPu8v$〤"" " " " " "R*7<$&ۏvl;ɒS=Hp8]tk^a=NaDh$IƼ>5 EKFλK$Q9H1.L㗸)W7KM9TT;" " " " " "JDǾ6);T" " " " " "Q pg[1O0o"-ڬ$[(8אBADx<ֆ4N3ao:\1k0x~!lMʄ2Hvo *" " " " " #M6tI0rȘuwNe2x.}{GsLQ Phã;I*0`^w]$" " " " " HShU]f&V}@l#lڕx5,@^/srb2 mHD@D@D@D@D H$C0*"#&2D ;-;#1ވwcH3Eyh,4^Jjuq% Mz6݉lKd7Xz9ʏ/d>UtgϞ޽;:u ň!" "Ў doKzsG B^5"2h9a9 4؍M6l֙gAbі-qmFmE`%Fެ|}a ?Wd 5gXMN?vvTa_EMf9b=xҥKYYYq" " "d^!??w,%%%;voN0WoŢo!޾`DV5 SnE}"~v; |YϐJ W,m{"8,G[K>ƍÄ pE!''h;, \D@ڐ }ȑطoFFQQדkN@"PQWv` FVzNxϼ~NZdY̓;Qk9}4N:g*4q!" "H$!W1sF$6x_`!_YVr^}Ѻ!xamRCk}vXEsi=agn=~'=qi0D@D@ҟ@.\dQ׍ĭ۱|;Q/ I%~~s?70LH~2,{87;}O oxZ< / qcL*@mV, ں4<}&?p~@: cϒC҈ H~ӧ98.6\EfsNUј EtxM66k0ʏ}N S8i<1x^|ey(L܏|4 % ǟHhp\hB2gދ=j&t^;" " iM ~imM`0\\ᄜځk/M)Kςk]e2Nv8UZtfm-xs~-u'RSFi.{Ig[8H*l xhQr,jC>]ϟ1>rvze2L njhH:ӧ#*3a59 2˶6 =mMxd)ɡzi'_ݘ^Mu(BQb\uRG0ӗb`s< :;ϟ@Ȧ(p"\] 8vG 5MvD@D@҈MbhJ $BNb*#f ?YaB< ϛXfVmX@L{,z3)W` ]mMW67M;gݲӜ P47^L!}74h~FZ)^[G 7^ح(14:04+}ƈMHjhwtﰶG0cGB`'"3O9N0m6{"9>;?xzv)TCi|SGf"ID/|dhzt>}ǠAY2˹Wޓ蹮M,v'p͐=69 h3HXlb|6! &թO`Wb#ocjCIsM点8^Qt?Ht!9ݖ~`Uz(K3&dr~`3x;KvڎDٶ@5<9{1LBjkq> 26:υbLҁF(VgJdg%O &dQ{+{VD 8Cc4$@_  5iDTW8߈rW4't mecb4y*8};qM,&v\lh\xçqDSdTkѮk˹ދdwLT^D@D ](4]h 3%9]>{5 _Wf u|f!T:zOͧ UsL-^bV t߽ݼb$r9nlجa$,^ƞl9EfuPcϦnφy7R,@u "vc1$=aٶH }<9Æex޼0h̺j'L dו˝GqhV-4h3pdr}|Y4)b=Vys}"Q ό4vvZbGK\83٩" " C c `رy:)" " %h x}q4$XYǶ 0`ʤ84st邞=sƻ|#{t9ItD fHC6lH*NطoF]zEEE'1i(" ",cgCX :dbBJ0CvYYYjV+CV:04Iի1aWED@D@D @@"Qw@8| oa u<9̬=L!as3 MXܿ.|H%VG" " 퀀Db;;#g|a(ވí>:2Ϟp*[qعsgVWZpaGeY9&SE@D@D X/" "4'8|0V87D +}͈ ^qxysK90Q͎00e9Or-XE@D@D @@"Qw@ ?UF흣K,LR C9<xU[rΡ}Annh *$O@"1yv)" "!pˆÿn8ba&!+w<5G8ԩ [sHqj8Ps1t(" " IHL48y+7*IT2d\̚N~9␞Ck=HTT;" " sʍ8}6i iar!?\{NXUJ9[n^ӐE@D@ZDbVO" "nTqNhidndqA8ׯ8o߾@D@D@ҝDb_!'" iDoEij8sXsh=rAn9琞C泬@HlyAD@y5&e MU{84,GQsn!)cD@D#pֈb#Ɖ$qX;šsH;"QEd@;$ /$" %@qʖcxΈcU7W}Y'w:u8asȹV2_q@JE#GrJTTT/CnfgI*V[" " -C/c7d2Y@y![ZϡspSÖjUD@D@!Hqyf!//y1Ebee% 1rH7 +JHhYu XqC8Ry6Ϊ#8^qR2T0q̪!" " AY"p8y$&M/TN>_E^@O>N4~x\z饭16!" "@o=esxxⰦ„F29gCNj=6GƸ.$,K/!??ie6r01aPr 8Ur?A}}[$ Ea쁮3 X)0cB5"zM*jc<\~4" "R&m'x;xۧ"x3irrr|švYF%$,X[[^zs%Ö>N##:\vjܺ뾆~UPآ8YF=3bP\~!qGp zQ)h ҈rHB9L9: +msHq +0`Cz97ampե$IY"}G 7EN >}9ǪU]hCG}wODV ĖɯaMiFKw }|5,-{RхwxCHMm9C8b<&4* +VRyG" " "V21?w{{P$"34*vnĉh7dww҈`W !\a><:sm>0O_'rٞeu]uiˇo|y 2}d{pZ{ٮl.e-hBlmS/ +`` 3[L?;g?I`SY%?YG@l8S}/#qd+FfF z{x /b|Ƅ Eʆ,LӵkWdggNtL{%J:8ujMQY9x&N8ݴ@IDATPҥK (1,D=pboLVVߘisSuME$6aZ[eǍaA3FVvakO&Cۖ"Ckf4wa˘o~k{X1;W ^xunldPC湊Ei3rVqm\Zt]''wۃ݇N'dnٓܿJ YGi"JJq(/2 6 /8L @hV)Wƣ}outGn3̓jj3);:gb2DŽ̀0c(vo[+0^b6$u&ynwA$ܻ|`xF6)53qKy p(4bQ:ɼ` 32y\zc3 Qjͯm?]h`\P8h7b *L`YlO0tK1g(\bL̛93|Lu.{ ѣGI%G ƽHd G!OBi7GSxcߦo-AՄ.3b&bԄcc} ;ɰD8 a 0:Sl 1n"o)1GS(x< ω1%UΜmN%OCG~d2lB qYp!=^q~sÄq5Xw% 9~g&lh4@ܮڻwĂIw^'$)>mePT؍M8?ھGжw| M۷gLG<0CJA7} -Y@j 1Fth\',!7Qu4>rWpWw_4̈́:KP~]wݰڟGnl_NI;g=h;b3<(1O'QHqyp-rxz }-xQ Qb<7"3{?k[o GM||WbJ9‹f_tdܓXE̾7Qk Pw:\t/r J9sFs H-FSjH`Ǝ댉BqʔqޘOƉ'j*P 24sǖk;c-B5W{ngLT<4Sݞh6{1b7^ 6D6t8~9H=r8D\tJD@%P~ t;6yռqC;+2o ?zuZe>8˱xwotPo 6\2B! +␞Cz%ΉH$ pYc|xpE fw扭>dN?sHqh=^x# ØCD@D@D@ D" "~TrCVc6šy|r88(KUZqy<VD" "bo=MeI>8 +ran$SB_@%Rx\μ΋ѽ{KX#06U 榌]qXX.HS8dXas%Hqyf!// J/l7nv'5uxRCYd\}N;~ash߭(q,," " "CY"p8y$&M/TN>_E^@O>K/1ED@D%EqYrz t9s9s)ݞCj$" " " $,K/!??YZȩ_ۇA1CxTh^ G-aXbuw2L\ku/P5R2W#dW\tEcC:\r#0M.] jDD@D@D fDIWeMrzx}l)9c̪Fv.9>Z4cLAi ttZD@bxr:~6FIJd8z)qB!p8j"_nVi3&"" 4K$fdd=Uxks;7WQu;oI$:{5,m%C2ƠGƼ/; +Ůame0]?ٴuYãӭ=-Ӥ}s{9)<q}fq6^l\ 4t ѣGq 0jCID } 3G{Iؼt}DI/D@DKYsX[]Z*:Kx&3gjq6)PR@gy(_~UNΜBh|aGMlS|nBTG(ՁNwzp1lݺ6m;q), J 1Fg.9ebU҇:-pߕǺvƾLEq2 ,kFxV`ْYXP'л׷o_ >yqm^^0/Ƙ1c0j(9)7l0 2yA0p@'FdX~~W^t OvD(|߬ttgBpép!ȅb6B-@2V^ݐ[cǎͳ'-7OOWc7X} ]`///R)$,6 Maj'8/mW4%m , Gt&-Rܟ%79d?gcC=&ay}ΜBq;(45964-3efpNC*g\ř)O}Gؿu_ /ew]^+Wo@l&hWRl2 ٜ#ц$hZ2n<7f3ѷX4/覛:[m8 \ǃY܇YsZp<}Q:gz q&3ضmb*@e~$wB>vNeK6 G4g# Fq^\ GL`v.>Klu#j ߂мQ6-} b,[+焄+SEQLץ+3k 1xnŲ%A1ΧtUH] =o'ǼoI$"0)$Z\lnڊkureP97#wNgwf8pY-u+Yd A0!fb(*q܆G%Aߠfơh}(f@f9ΫϦLy]T(~d=Hl/WRs8`\x D&Ӟw.kj_G{Ӓ:#_D!&SƝvodyw;a_"=\EAD@D@DOP9?I^o;qa}ǵ;M-8xy?DD9xd<-.z\hhhy睇oݻwo4 hc;޼z ?;km=KH8ܼy3ʐ=z8"9r$ƍ~'v:ίݸM汭q]mKm"ygJ"Vs[q.,Ha~z^YM$ 9^^;xM2ic`㵜`ʙl 9->b}#ExRվ vJ8& r6e(/{ƒM8.wM#W}Ä13sY=8l'xvF.K+zxonx ,uGj^z M+S]׍zc9:0Д pԸD)O܃)yˎb2L\k-ەZh7&[[c[9η^W_rĕ'[-`t#mQ~< _~2YOkMwb}&>"ť[k۟[ScE鋕8~yC~# o aoLe#0je)V՝xމ8Fzщ붷fČ ô~q Ob-2sNbF*vCvFQ3]-f6qUW$6$ۄ0v$ !dHD? h }u2 1z5zѽ [[gGbu0~0ArŸZXql'o}g9%9g[:_ }UP&M hߙ%u );fONuc`wN#Pu>P%ngסd=t[Yۏo=8zrQӷ_.s.&YݶrA@E|)g% \.Lf%.>K_߻9/^ik]((} v=&I~Ȟ~fRiŤiBgL܇8xĖRLjd[2}oկu]Lĺ8y\z<^;gtbe"vx?nC敭*k&&l}5I/ Ə$LZ$Gd%Ux7o"=~-QY ɪo> wx Uw|iطKsM+-Ǎ\mg3-v$[rCrAJ=}6|lyUﺔ)R{\ɿ9CL ϮRqnqޮ\Bڽ1=po=U,RrP7̮qd؎kz1_],8u}ۯIO譧TEվz0P ЋA"r.&˙w^|Jއ :t5xg|G lWBӵυն#K@M1`M?Gzxmb;v ˖tV'BuRyPY `9Ď +۰V([n1lBg ck"$5>W x1V?GJ(YLwgz>M7Ko)ޜAVT3qPdo ː}AJx18 ?CaK?0t ?y.^ujzేpcZ\y+[3"cli幨ӆ]|˵e8n&s,ͺº+?b_S%n,olBM8",M؞m2Z"( T#cƑ0=3bMG'Y,]0mq-.u<:Z.EO~I7,A{5zse'c`M&AW;88އd–|VQ?9|VL_~#-4]JlKٷI`A2 "⽨vbBc5\nH-s?E/^ >#9x{HO!hΛO&>p83hubΨ*jfɮjjaYNcQ{cFՓo;Pӕ c7 ׬'s&uLXj;}4a4/I=0>jΞB"&Mdi cL hj`uF3G$~@s*F?њGzӋwcSY!ٍ!}PȪY"mmg߱ՀOA8i`Oi_X;q׳P`CX9Uh VG︲un%B3p냁RV"fWSWpg 35HΝo|c?zPzE֡W~9_xm!/6scSeo:Dq),dq9avrdk2闛ghm77ζDД$zlKno[Dr%M5t#v]>oK/X8 ˒D_ WU ag+oc}q<q466Fnlpұu7g8bnn[:{1n} {aMݙ{2[{6ɲ)^h:{cKz*B;.&DƱtդ2\VNU2i_r$ةb]j]]K]nuhrbD2=v=$ucP99rte;!?WKQk^xt3ہF96>nַZN]1L_ɱ2PwUYUq~u=Qw\A=g1_mW@<ɶ8em rr_qEX7Bp^Γeʌ&,?YǞ,f-&4Aqs-1#v ɸخ,-uU$; 87Ak<^O(=u6M?я⪫c=pDwsO`͟wm"_uNha"@KоAZ"{Mv5̺aB0Aܤ2v/OM 9 },V%r/AuKOdkVlRY7̊yOH9R詋/N0j9jN!CP [Z˦@9>us}"9ۅf}@ɘEN01_mRek jrA P)c'bd&"SVNΧbkW5@Dxl0D^KŘF{/$/z籥YoЏr.Ve+[OwSY3Ƕ>sh@^W_}"9*|{|#رc*:%@vԑH(9@U[6BICu/#gS.r>*l/kC64%?_ YF%ص4 <)**[1~òȢڙ"4&ú˖X]WF,v?#>UYGIaD*Ym!QFM%K_JծLX92Ǿe}Gڡ Lc 6lvSzSF@-2Vc3Qh+W6Pdv,:GOXjYDrSjEzmg3`l+2Ͻic&8|V:]IEg_hzQ~AHNTyk֚%! ͛7_TdG8ot/p fᒤ 0}8Gѹn"sC&l!$vdDdjTS$8qtVޘ>A%Bc8)K 7S\WRY%<~.u1뜬prAd,oP;CcL=h$elюjjtqĐŅ\y_]"ƕγ.DD"җ29g7юǖ<-b ߵ&eׄ~u:0W j_:JG_\}/a)Gl*:,w@ۖ?,2QtQdZ"m+mY2klQY::eYv,#N d##kMSqz__Z\.n3|@9'n~:K{myL,9N0`Dsp(ؿnO?\caMjeZD_x/yvLHg&7?^ʖ?&v:ruߕ-|֮wVl߳=jj$y2Gt=qDQ]MMM?pkN  ӄ@ʸNSҍ 0]54⧣-h+$2_+9+KT= u,an2pi4]SnQ܋ǂ̖fbIee_z*'- 'L( != QaK 0`>+{x~F}*+$2t>Wq}H(dqd6F_@5<~R?wQ{3\֏`,I֎j" $qzA@(kWh :{pMG݌Zj 3 x:btkeZ}̶<JZJ1DA˘+[lvohlylM4s'y_{[HS- Kl `]"cbԷ)'uIs'N/2|=z0&!5!O  Hl=17`[>JɌiWMZ2lgeF vD$%DL|ʽ <[McKS}[M`El~q[5GAr&drR/XOHb%Ί$ @Ll77IA\7bKLuX>M”>cdkaR@:n\; &f!nIԞ{ Uն4|rtT&^Q5ҭ[2[D_ QM֑:.]$Υٖ  F-:blkYVL3e{d`o]\%E :#u`S5%,{{Ky&70޶4e[gylLm ^K Ou× \Vx]/?JqW !Zo[=O]6_KBKDMM >(ԧ>Z|b͋I& ʶаV?~K;zE?SL8&'Mf'M.<}D-]S3>j~Ȳe-X>||# I]o:L2Y$Z'&[nſۿc.bpSx#"F@{  aK8BaTkR!MgM=A:05S' jg'j)-vQׂ[j48peṯ_(g0ږKszhDO´$2) )QWi=GmXrve"=Ӊ4"ayʂE2_{}{|%H%/->zWJ;_1_Za0DUuW6a#" /Q0+(:1iR/LB8y?CоXȊìؒ1 *3<6MYʁR\U4w5o&8L1:38yg Rx1ί~ Z( n|b*B "   P ED"܁wMY<7~;4&܍or]Nl\ 5`:|jmrx8;,z9{w$[bV5@{YraIoу7S("Ž֛LN1xMn]>;S}SXz(@LCfCrn Лd=x`jlxj諟_+]fHvŚ)5+UA@f6?4z =Z^FŨtRc?ƒ%K"u鸚~N; 2t˽o!]7A䴲-ie8۵=)RBę|l!¨ALꋗV r>-y,o;N0+?dW7Ƚ GؾvΪ"sL>C0:dN׍#p0[GBaL׮쒽q1C5̯L36r 5`}%2Y~{x/G*LL 9/q"K>O18""G0 ,_2:M Z BġCf D/܍md)dɣli +E͋pS7 :{=5^IEXY$7WN"# ?D/ zXڍuWYf}YS)Ϡ1"Ϝr#>R$7SBz^7V!]%.@MEJJt@Zu9O9&89LU:]g"nA@GhL9b$[!Y ub $5V "/k^u='"%o.\r8 ,Zg:#j]N6o\/ǶNhF]L4mಬoW=ۿFEf{㸽5]dt8kG# G/ t} d1ֻ9{܇oKL{ 3kƼ4f[sԾC'\A1 Z0rӮTOV)<-]7nxɱ;k}TGi~)yyI\?W6qUcw|vGD}y])oF7v)=̌]^slUЮIe܏=>DGa{z36M,n*zqdxG:LznWk!5Dk~Yy;4y2t(uMKzR'@2|ϝô@L ?Aݢ_A$  Pwygll':1//}j:^>ӟ/v[2wwߝ溒xレn{"Μ(}M?>';M<mMﻠq}nwW )jsdnP妸c7 "hM!Es2E8/!jd~#bKG7KΒN#uPC[O#)'RຜMge/vvs>F?{</!??A%  PmIc`yf⋊,>7 !+} =ٽugk@+"aO븈4\%\e!,~Aڳ Bt js$F]/ȼ %M}P_nҦ`"\z|lvcEd}S'[c!p|1`T+;z* EΦ8l$ShG '? бFu飏dY$-L9z>|4Ƚ  X|_*r1]*Ѫ5]>eZ;/!A/*}~%dj~4RvYwɕU5$;:Vku\]tkPy%TN;칺e3yv J}]rL:؂kMR֦A:ĵ4(?ߑ[^1-/U/1`|6uEm!+wn 1zʕ#:i6d,/h, XAzp>Iy]}I-  Hz_8RX}LA@A@A@NP]r d9%CA@A@A`! $qM(,  @X>lE   3!3nDaA@A@A@ʇa+A@A@A@qIqS&    P>$[,  C@H⌛2QXA@A@! $|؊dA@A@A@fBgܔ‚   !V$    08LA@A@A|I,"YA@A@7e   C`^D)y#c5hZ.R}x䎧5$WA@A@@e@vOє%ΕqDH4`(݇8YA@A` PWY%_ j4:WE}#T ƓqDkqI}A@A@@eZWƚ<ոe+㽻1 5(ߘ}܁k3nfߋ]piG6lJ@j #u5ւV*  ʴ$_鵷߄ _wqaWSh/›nz{3O_|{Y}ێSPM*&Ȳ,{&¨oZ5Ƚ P 2ҎM3zh5-Zߋ eӚd+b#1jӍBI$Zr-lWȫQ A@A@JD2- ʊGDzK^Q!;KÊD sjd ! KnZ05B55jdb*SDhZkB[g IoP$ҙF!|U]I VH"hN1VIj䯡kjF\[YV1ўRE܍dT  b*$)ڗp}D9p1CD7_f&>d: OW#"i(qɾ bh:H7& ԉq'Y;ԡgk;Tfzr0N{JA@A@T&IGLPe{^z\Hl&S?K6*.^oN } ,7܄YڑoD"O, V(7qk] bGOrCFF:Ď \觘_TկMn?)9  g?M9T&I$bEC| Tp1=K{)|-krs`9-RuZrS3/Jʊj&}`uN !wS7I^^U0R&e4ZZ!vvU3c2˫TA@A@@eD u ) Tt8f q)k)8OyekYrJ@*q-jbGVAy}晊\PF4{)'վT:h;  \G O}qHRb)63_p$S8$[l`6Y^A@A`Z8qDQMMM~ixxz'(MDnA@A@J(RiB= БHgFA@A`n P{2JA >J A@A@A bITA@A@A`n $qn̳RA@A@HITA@A@A`n $qn̳RA@A@HITA@A@A`n $qn̳RA@A@HITA@A@A`n $qn̳RA@A@HITA@A@A`n 0҇f|8zץU?W nܸ2~SoC;ָh#~ %"s)A@A@fOu{k>=- lb|,[Hu>:PM؈ܷDY7UlŽMEXmypG׹wNF(,9   %>2y,ȈI"Ẍ́q|d t]RRT4I0p-w'pcZdAC{LBHdh+RA@A@B`ޮçT&fn1#cv&?-'VDk.eq0i ޓ8<4]+{=ccf}Ӿ-r؆r,B vds[}S0{ _&A̖:w>$Ogj"Wy" Dk"DIA "VZ_Irj&ZhY5T]1tE}FϬrܩϩ%KXϴ+\@8de3SܻPJ&E0J;t@o}vCǠznExISVZsjb wP-Mt>t0I,chfYe&czXĺ\&)KWFCM G'=;5|lyr0!u6Omf@2̑K, /$#{ >ܙ!~t1lr'B=44R=@&X @/G]-huItXfh:=ji4 aGDݪ0F[ShLjR3 `6ҋ}679}Bֿ m.ɣSyYv7gT tMaU;4aAc |CcHJ09e$yF}9N,U㑹b\G&5"ۈM;Enqx]֝dX7֑ueѮX踚~ W]l\mqO + |I&78κˈlDV}vU%-CnaDydeU P :22Rs @s:EU[듟D @G/uw !}"$G{5Eׄ_:}P~LaM|:6י+hb|&ڀgU@GDDjS;j_4ķɌɪB0侦`>7AI;@"N0IbhڰtlLY:ҝcMf}gqOg?@oݛ.٢yW׺9Ͱ2, Qp\^Z[͸.VK6v~y٧* FR' ( U JΗi5d6QtqU\׏A_ 3lS1M-3)#_ZH7^׸}XN>GFm(j\WvlR4XLaE܇b땛[^H:ŝ(ڍ}ylBz-ۯ]>n~OF>F}k1xp`]3$6:u5_;d{?"=Ȉu 7t,p,1߿8-9ˬ#EP16چݿނ9: }6S["Țw,|#Iz:#U@Eɧq5TOmIv9%5 Bs)Ʌ4F+S`7Wެ~~FW~j]z9t:;zƓ&_ Du{-C^z|boiHgHvSmʪrl S ЯfPp\6&wr/!:/Z0e;ѻya,ݘ%343iؚX +ZKgF&"l9 @E܄a~sD8A5G8Pk.=3ݨo>gd_ɔѺ';'dZQmI-Fk4hHvS ~WIa%3Cɬ"`+ZT+ŗFn$rx,R0j{&dC{UZ4YlčSxr^%j_򶲵G3 s Xyvnѻ=}xn"21B&cS־MNqb-:]l~A cTMS/FҔ[+bf4}tEh6K-G̭Ӡ%ը7*T&{3ը^_ufFnWIФ{7n}~OF%Mpm%c8jo`ϕ^G3E\5{R(/9۠F: jhDNfHż)\*5B֙&[k(1hCe[}ژqߓq׊rL)@R ~^mXC#hH6k IϘoOY4ڝ6s$&vt/N)"8GkcG5=UR9*cg?$$q.κYȋo݇uMqs̹m̼;R얦\đ,̫T8M$QC~f:@j nDڟ(M r}>T]K[Uho@P~aM6їhGƩ_:h,W 0 >876cAyTGe8?7.ݬ˦&r~Ok8}pb Pf߈ў:ԎmrkA1߿ [jˏfO5gāi6GYu+ Xk;Vr6\ lx'RV^S"Y  [ rnmbw]{! î#׭gb}Ab(xeg2p9vĉOMM0ĒXXA@A@ppvIk)i}=<`ΑlŹUHb1i+  @8huJG@%W8]M03-tV9OQHL[  LiN.&fO ".5dIv>S.w݄kp6'!yvel  ,I۰t؋Ȫ|&X׶s@=cY ÍD2A@A@هoTޒl?lc/H3^ Gh?'c9[D{YA@A@e 2<+ʹnY!ۑjj/ (NJ($ўmyA@A@fD e 43w.>P5#=fS8fS"  pӿ)g#6.gEd]FJ,GP)WzT~00!0   Pj+USE5=U.p\h٢8[ B+aU  @ *iבsut6"[&6m/Q;x[G`68V$Iqk3wS,}[b]fT \tSh(  l@k:<^KQM%2i۾ck?̊$.Ƕ$@`)ūV6anܸ2S$7")@A@G2/ 2k)g#Ns%u0ƨH/6b5CښO\>W֚S(I&Z,`G=z'y[RHJ~GgǙ@QٖVSw>9dXKt_53^O|[ʼn^x%{YD,9υwg(%;#VXt' A CxDZr5ə6b1mg %CBsQ3;JYM{G5}Y㚪5t[:eVz]_;aWL m].ܝ:7Ǫo *S>IA@!%RʃMH5ГJ#7ZDwe%ݦ:'>ЗB&*ߐM8CmO#0c ʷbU=fC*S2ՃQK]3'8rZ6̏;TӁ*QtY;svp,j4TK X3t:i*ʁlf$X `\D"U+/EcALp|"G}rz%DDĮ؆)'-[vRc!N·Oݳ.#8 }7p?MnwW7=؁R=ǖKI1&~Vw9RU C`bмU~b1Nt4ۍ}mO~r-D,3}TeWM^T.yFDS:z\݃-٦v ϕ TBfHk_.se^ND;bT< kiF";Wδajٿsf[$ȏޗȄsFCۯZf?l09uYr%kh#nv f*sx95}5$p}ݑ<[Ǟ!WX tcԲ#_I4q>Lazl,l= Hj|en~{3m${#Z{\]+ך亍ګaK|lUɸ V3#wܧ!g&2t02H im%Qr'kTU%L3z8p8rt_,5cC8[%F5d&dtyEOEzk8M/5ZʌÑ91>LUj12BP~"LlH 2DSgP:6CrG1B]R2u|?nT3̘Ps-^*IqkOy "1f>;pq5CL<bIޗllpH+?3Wjg12Y4ɢ  ɻaӮEr.Vғq.zv3n{rkMTFOyskʸ;h‘F{Pͮ}"ץul˺Dҋ.8DrWJڊ"Af3؄3Ƒ+\^KGh =NrR`?Z:,yrI7Aa  j ט؀~d6ۘemt4 K{$XD*ju5dtR|/0Kvk͸́Yv,-+Yq9w9UFgXƵ3VpDMvq,gZMk-ŮSDʫW,(LKLmp9}۰%Ɏ?}q7@exCn5s@[kȂ]9A k@=k!^k7~y>^f;}v˷3xvܻ {=.FV&/{'QV?S \uݑRy0'@5],IdJh]Um-)oyo&+S82 TvUvwkKbp4;]nzY˴wjnlLsm .'KHm9ey꺲6yɨb:M$61HO0%AW&xY{&~<2˭wg5i5%c3^Zh,"j+#vb,hAJL77 jRJz֫=z13 .(ʤ6tYٜyOR:-y#mmKYyg# ]#Uy [掶z3UOD&{{d=2oζnm+Q9[⺓ev][6s7ҋƍg&g]f{o,SdʲuFؤ˔MMÈvS{}^ۉHQdH#N9#,v7yI ct Q7uMOլ- xbRYM>N0Dc;ELY.n%eA?ƒV؍ryD:g\w 5)x&ldH܀~*L»P]݊U`5fuҫS3esZo5&g/#yL%﹤|VT ƟAi1RL4 >q44~ćIf@+DEuhra=eS(yyʥXL$IlQ7*ֈF \B:UQ4ͼDטc :FMTRr(H4&ԋo]ORТRP_qjN;pWHd׽A0\9 26rPljQl>n_vڀcm W۔τɟ@ g K0a%Mlr$qf Au 6;b*۞󸏾 ֎+ʅ#TXϚ2}+mUFbD}6P;d`sDF(賊2%%W`(fut6b OSwc K PaB9yTIÖKrC 1??a/(}v튥p,D%|t}=IT W),A@(9H8]+c!ԅjWP垖9xT"*(k)"PRMV((u&KBO\V`ụ/Z!aY <|ՋpgҠ䓪8L|H#l)8n߰kl wĉhSSS䥓vc^41RKA vG=[QU;{\Ud͵. ZD- VD4AO3YKڂt tDÊq "cSRXA.]o>hBƚ/D>C?׭Xsp-ն1&*= IA` 1S8u479 DQR 0C^KӝV|`RERc3|TG uq̄ Ha˭DSl nZФ F sVq,7"_Aô-;WB&ϾhbugBpМ8)8HojGc:ʅ$N҇   P"hl={ӑ6,]Ꝯn׮6zm($Gu+Y >]ܚBs1A@A@Ab:|"jbp.]y(k-:eR}Iu.JNB+yvD7A@A@AB}dJ-NʥoȠQڈU+Ίχ+THn o׳$ڈȳ   P"Bsx\Mrw-8iOoU,Id2娏hdܣ:^8mG0,q=18.H))U !?ݴP䤝   0&ϢV VU. l,#Rols$[e٢/}āuݗˤNk$A@A@/!R'1LG8ϰ6ƍ3_ZFvm,7JbZآE O%ƭMWS n((IA@A@A4#(X5WӰMz=@@8y̻"X Eua}xx^'#ȜWdB9N?B%uA@A@ӄ vYBN8B#8i{0T3AR-agcx90J41Be I,9i'DH$O+3q sE"l^'z둨mTJ5NUE}P!!sY d*&Qm~~˶V`Nڌ-K#8#]gNHQQjʣªꈳiPOI5>w%[`K%Rϊ*~im_4xlXr˯əzKWMYWaUI}̈w_Ն2Xյ1']MϨ'Nô]F &$2ҎM;zOմ(5F:e=5ɖFcZ):j]\J]2\7@[~on=tʌke% !91tk7fӭaX'6DT8]/tz^%10҂& hŸu-`dWVrM8;.?~}fݽcqVלcwGfW e.su5psJq} [KkƜc}:RW/$mG#s+}85u+CUqR浒;7g2V$hZKNYvxehރ]r◧gוO~$if DMq&RT Ǥ31L>}1"Q\V30kSB'L6K2U}%SA@A@A`!P]لQ~xx<9CPC<    tL CA@A@R $( A@A@A@f BgD0A@A@A@(BKA@A@A` $qL CA@A@R rSI   D-IENDB`docker-1.10.3/docs/installation/images/windows-boot2docker-cmd.png000066400000000000000000001110741267010174400251230ustar00rootroot00000000000000PNG  IHDRCjfv sRGBgAMA a pHYsodIDATx^=lK4pK)j\lB5J$(j{d DM!HD-p+! lŊXkf̝Fh4_h4FڒhTr4Fhi[*jKVQh4Fm-YF%Gh4d5Fh4ZږڒhTr4Fhi[*jKVQh4F;dqm-YF%Gh4GV%,Σ-Or-YF%Gh4Gr#*9it%nWr h4F{ZUڒh*jJF^;o_/6N϶9]nI¿!5|h߹XF;Z*8iKVd>;T]ujM抌kTI<9ooq,j]_B5Tr4ڽK*ZUڒh*NC%G}t4.*J63@|!h?Ilf 8{@[JFV_&uCq~i|Kg|0R^܏U|%Rn?h2YY]׳й rc4P|9mG'X,C\ՠ^scq;y:moKVQ Tr46TTre{{-z{20ĬƾCkVڤ}(")Y]>-g]=g63vi*eŏu o_⟉~%Tz}CL\[*jKVFVZhm'[i)q~~:~+_춒6M\)Y֮9f%cc"X(V<{ "Mܚ]۪P\&mM+v.2gNgbrgfRQ}U[J"N%G}l;ʼnMsWlf;75Jn 8ouV-U ,5Ğ3iRf%ϕl/m$GuMD&,S|Jz;th-W%=PhF]돶bGa۪s4`D|Os٦nxͳSҜZ)" ?Z_ЙFTT_ՖF{ 2hnyŰ6_BF$[ݮA!-Xzp+to7-hZ>Ὶ@7 7 =YͥQK8}'XݿYgfRQ}U[J Tr47iտ'm{25)&J#1Юdy|\9{Tf-ikKKj_Tr$%`Éel5 &8~3gՠh-W%Ѩh+6ݘ'uc "fU|c W-yY?FAO;.vZ˨rz=b)_L_ZVK&`.`s1˯ѥ%aZA#gvv-i45]Iqv+WrhnWr4mr0-5/F{F%GѮh4ڏoTr4FhWmTr4FhWmTr4FhWmTr4FhWmTr4FhWmTr4FhWmTr4FhWmi%>Ym mIF 2F~D[ю*R'?wB%P\~_?o_o@2D}~/ݯn?YN]\B.,dRm'#?r=>x ߿|p)wצ6Z؝`TBdݎ}; FMwwSZYs7H=ahO=YiˍUV}%Qws\quܼʝj'W)N>?TrxJ)O}ޗuVr#h(nEGesR~Aܔ"ة { >[YYgT6aUOsHb7o]<>x?^IŘmA}mE&x-Z6lwn/N_K4}el@q%W%=Y'[0;kQ͘ͷNs\P=K::[ӑhrpt~xif犝~_-c5Pu9._;j#k.B>8]K4be_땵/C@;*9/ӒJNC'nX Y꛽a7El` )VtGcD!c/K6߶Wc1SQ=D:bqulu(ZK/"?+YncxȘY5s?3MTm$Iwm0S +#kT.\~"Ba%'e\{Ң'\ivWN/9 vV;hx[~?%Ӷ:D:C r"5Y˜ձQbQ TO#IJ]bfU߫ YdR-K>3íBڲ˼ fZr}ob\+:jx;*Ɏ8W塾60^ʇ]E٧y7.Z &O'{/nG<ϖX6pל;ҐI 1Ոu]R!~!]:Wcrpit+b W3>N>=;ٛ? Tr|+\;ڊwUr|''_*.}(i,G;F;3ғX"ͳ~t>Y{ObL>K"m_a(͎JGڒE*o*,|>tlT)L}#.HL ew<_;X>ZvwTrZe?.M*92wT҂C]T˝QR[4Yʩ~j[Te&R]7ǞAP>OҢ'՜@Q-VJ ;p\زFg%gվq7:\"jfSܮLz<ϬJ#ƕFyWr4^8Hj=Vʐ<TZ _U$-&组9K%|v%wrBe^֟\Xf/dZ ^GjœKx *%a

`w=KW'"[q+N`%m*/Qޓ12%%RgN>+V@Oz9Gg,^EWrcW3J،q{Cj~5"؝h FTv2:Wl5C7oe߿~2˖gU Jt$V1@O:G0q8Ow8وazRy3+5f_W:X(?:Iz 8Bǭ@;̓\_ 9$Ʀ[t 1vg{Ps9TOړ6ގ`)C^RN vf=uu8\_ˈqଊICz` aOqe`N0\D|Hu2Xw]PdCB ӼM@A1ζW{ao3˃;Ҥh'ۛr3m$5v? XY,0?nd{DaŞnY czT?EG!ۃGa ˧WrKOt;Cv4{źvH9d=5ޢgdG8'TGڋ:5u[ dͯ ! YmzKj]O?pr[LNg1 ҹ!VJtRnva+}`GIu٬ M(&/tt>af?8%|mhInz}7}G:GA>X#J>)aZvTrL9/{E]ȱ{܋hӽIE'Oߛ̸]d F%=:)ڙѩD;9F aFձ|/=rWšGFm'>>Otx8,c-N(}˴hys~ ꚏMogjC%ݱ,g/֙\{uE JVe,Xn–ZNjAgO1`,i}`}D8_{ wnWH?Y?0Oj}˴hy{1tK۝n-l,p!~p%A@C*VTrJP\*9B%pU ܗ`kG#A+NC%N 6'C%h-fWw=;㡒tJWjC%[fWrRJ?Ǯ٨Ŵk@; XpeٽvHi Ӽ@%pU U*TrWJ? +9#[8=z\JT@;Õo,PgVrR+ 5]8ƴ)Z?CVJM{1'H^GIq6ulhE۬PGv8 |j%hT:xoۣT@;*Mqվ|tYm:R\*9Rִ4Oj!5KJ=ͫ`*r )F᠒ 2+d0E$ J+Nh%aL(j5b>b%(K,*9R4N U*TrWJP\ׯ_ Yd 6jGW# KbyYE7דy|Jnݜ{_^ӐA$i'|RDC4\UOze|pߺ=}p_wxe] Ť=>&A,9&. TCc<֣< ӼW߿RN ۡ V' sQ2G̪~fΞ:5B>z"S}%eItɆ,ċ FVgϪO>JҰw˾lHo;rL؟vvQɫ:H¹tG[yJNk]m1X{[ı{)C}nn}, hgeLӠJ@g[B#~лS ҄ ]DgnP?ۙ,RyC*TYbtlT|Lˀ(%9Bn[ =n8z>ƢcX(yǯhyO7}SUE fN:~+eNEW蠄+:>tya{ O>)N|Ӿtueyȇr"lYG1 1.sz˾JYOBןE8hME =zӟgyshyO%O }YWJ{za(!Ufz2ϲؤu@6Ӏgdž]~z~,4qt5q1N295oMLR}KqCEP,ĽW_N<\LN[$+q>Sm}B|KqCh-!Zg#BMSJQ.up(3Ox*ж4VrQ^2d#41͞{A+v=sKQC|:NwuEs3Cg:WcAćvf?p ,dva87z+"}Y uءa PV7͝%|oJA ٺckHdzԮ>Ya UTyN>ڗiTLki^ 0z.*Z>W~<{Igy_G*9WWr5XxuiiO%pi UyJ2vr>ѿXx~}rd^-d>_7굿W4$pvewZ05M2\gj>y׵4Vr=o/yh&o\j>/ۡCa2A Y7ϾV÷*EjN;gqj51Tr)L^w5U{.JW<#tlp ݚn*eunp7sо8وazu du 3+eIytٜ3!>q: ٢#mWLc T}X}{\^yLGŤG46bvIfW| vZM}k'nqˮN*dln H=TU+#=, n#O+NJNt}iOdat0CYsP sS_HgAYy $,9:}?ِwxa~YUiGC)_B|C\n M}YӮ׎7*}\7yUH"='K"chg'JNS]U1~y^Vpl`FaP}KBk~n*Y.4h!ÖHnT"4!3w?%Y[f/Dy6v&aseDF,˥x U!l}>F>]2[t=,d:2`"# >rib=$–Bϳ3N7Լp+}Xdl ~s^p yZvG+zӧUK9?]Ͳ{),GLxyh#%L+NJϟYAqaO(!Ufz2ϲؤu@6Ӏgdž]~z~,4qt5q1N295oMLR}KqCEP,ĽW_N<\LN[$+q>Sm}B|KqCh-!Zg#As26V܇ AVJ>՝Ox18왿/abMI@>9ħ|WZD;:?tU:֭nA|hjA2ZہNBfƹE_vMGjPEadfK\S#yji^$y. Z>W~|: >\+ϾJ\}L( g#L+N}*9KC%pUWr{?}T<;.o\'jyyZ9FWb!;H.:O|߆hk)̫;u{Rz=亿\1ȻhyyKJ6a!$j̩ܵ>+$_*O56} \w\\;S z=xI_/O/z;p7f/ߴ>r[Ar"8iM85x{Mq9tnN=OJEH+I:6]nM p̺Y\7`ցSh_OAl0=Eqt2g:݄|ԙ땲T}X}{\^yLGŤG46bvIfW| vZM}k'nqˮN*dl7!Ec}=#U+NJNt@1!|W' sQ2G̪~fΞ:5B>z"S}%eItɆ,ċ FVgϪO>JҰw˾lHo;rL؟vvQɫ:ƽy]lgn4~8^߭#,"X^ a[86o0#wύ׊%5?7 i4U aKP~z~aQdҟz-<g;2`"#RJSlx#.[JPR2I\04T aKm'޹7OXf(4Vr OrۃZI@Go{El©S pe/?wiB##IoZzT,4pY.[ ?2~:C{>ƥwQoCx=tYh\'->w %v*9t1Ӛg~7!%L+NJϟYX5{|Q5Bw%}3͠deI(mϖ ջ1n=Y>i#Dkbdv%ssjߚT;I#T0{q쓡X8{vxǹIfW|U-T0Ѻ[d?B.=jWsi7Ff{TjK[te5cQp4Vr]Оn'| 6{ EX9!5Tz7ϡ.Ee8)Յ΁_u[?{fyЃLjvم=O1e)6cV-@Z4wV0CfH"~+-d떮gqnlE._"eQ~d< +ȶ.%2]Ghy{177سWɸJ ۜWrʳtG|zd޷ͷ?8Q uu ]K[\q8ܿ3SeI=clp6ju#Nr6b(nEu|3m|iSdYmʑNse9ʴz:>h֏;Gl0=)&`. ~ט*pj̞{SI'ȿ0nud: w8׊Ex#hyz%aq'ڝ2' ً|tUI h #=/Ɓ*ɼzk%|_Wrfۓ|j:M%nsDn$'Qͥd5{W|.=.spUv rj:YnN:rg3Py#4ϮM237^qAWHw>1՞/HLurY>{Ny<6Au|gquI{ga+#{%*_0^1 kz֐o0Aey5pi>kq+f+CBqJ2r3%=-3Z^pߑ&^g H8;b/jg|^=1xJοѤľ ڧ ]>''~bjG?ю(H8t|nŭdj=;Ϝ$w6d:a:8<# 0R(3,?|J˔9+УY_Ƕf)OM0SӰj)N[FIG+NJN(/ʹEy4̈PǾl.XyK 'yKzvG 8M=6j=m#Ņم0[O 9s蟮gήsq<}>d$ 6,7S=~|}6n(ۅkzgQ:1ӵ,|:4^R( xsMdyF%yൠ*TrWJ઼R%~X_ҿ'h 7ų&ļ_cH <^Cpvr?G+'̰e'p?tt'?h?܃a[v~'? ss}QJo fUzCߠϊ O႕zw'=ՙCEHVfOt<}D2{騸M툊N1(QbGlIA,uPJCk+cxh'oϢ}Pgz`BJY r4`dM5$O+HQډLHi3?Yq<3]dyz;+~Ka9cgS\sO?5Zv'Wr1?a !>B_zjO`3=9efIMjn8y:^sGq*`-nUQ!8O03=!wY>g2v&Cjmٴ{dj7Lv!АLO X7vL*y. so[ 3\L Uߜs=窳uh6[Niԇ9@;W''L 3lUN#cUh^D,{-S:ۨ#tL晷LO-`g!yY\2Bw[L촯c2%Nn3"i>vϦ+#LjS^ ÌQdcYDŘu@u4897sӸf%74dZubCs\b4{-J/C}nd9/h_ QV|Lauҟ7g4_# b~Tq#D&tv{?O5d6`3[;^ ]kqog3 y>GDP7g p=:p[Y3,SkC?*njhr#!!p[~r[4P54V(OHOx[q!آr8qC8oP%ĪĊ)Uכji)kUָaDYǏr"U\ iu쐁KifNf:Vݛ?w[Z9S MKh $-G9V1kN>{IR^EGßU'Xq,#E9ވg~hc^c1>>4dni^{{2%84ӽ,G; u\]n[_IN;QLͬϴ;Di?dqacgc*% '=D]MrwehOꎕ> l$_#ԓ@5ܜMuQn=̧ ֧ڒdv8Կ#ۤuЍaxv^U(Aal#}1vXgr1YY\OƞXD:>v!o+Nx%W;'6:`a"ʿ Xǔq_9ʭy:q}>GvGsan{q6yCn)#G:dqO QjJpkcai2{Ұyw4w!^ǐv5Ma4q,O2J1rOGN#R{ ` zL] SÉM);YMC:j>,hiܓ=<.}]덅o%k-G>>^'_w:|W{u,%+ ;?L}sy}r\ko'їBvh%y_&~ŐGhyJP\oRǿ19d(b?wîO^'WO_Mr~}s#OO+NJ }}/W]v_uu7n\uq}_\O^<}9Å>y7~+7/a^Skg͞n7C!^Jg8 O|]=z o{ܼ'.0DPeIi)*3!c \Z/u!uY׭k'93`'a܀_{ۤZqȿof#jNG*ˌ.wcÙzbqerרx@{"F:]_vNŒ,Akȃ q7 OI.yS^I~лS P$:5 =Hm 7YהWED3O B+NskK zKu6}o0 Q7Jg}xfNf:Vݛ]v 3.;INTwu ҄c{ӎFyÌnYl:GsXǧӌ}D?[gh+"g>eI~лBSdJH`}7Yה7!BChyv%|y|7Q왎_ݐRNn{|QG C<{e2q> liH cB:z\eD3=x޻JnaxI,&8Xm Q#<9&)9$n?sM}׆!lsN=R}Kp}9ki^ 偯̇<6. NV'sx$2R5ʰ@=~!!?ﵺc_z~5i'f ֹKg]Dg,wyUI1 9 62^GǞCuF.O_-. nH/!`/d 9z~A>t4̐8l>\|uu!޸LFo< @;*뢯U~eeJ4ZJn-XE@%wȻq#[i+Ns+9JP\*9$[2;WrwlYޚrkG qO{wN)yk,Okoqvy|R!ͱB6e,B}aRC_!0O^MWxzuu@ W HƆ-Xk%g=6fIX1[53*,OsWqvHa^*YsK.kxX˥ɦ埒䩕Vt[Ϭ[9ɺ߭ }%Э{vW46MGrݫ9,5([߮6VݟgEvP;X6[H\^K Lی6(贖/#Q~bn˴Wrdy*s9.Bnk2*bpmuڃ6|dQv oqfY$/X̹{ q5=_hoT1dk94L:&$B Xż:9]gץ[rɲu]%mC]o*w)nT ,- iG;b_)}WMY)jۊ}d=6Bhd^v:2,}ڨkӵʋH.u\cLD-vu=p,Wh tun~cKH*JVd$4nv?RH}1rply2ӣ]ٖž1h'.Uoۙ 6e"T/_uy) :vL0%r =2isyۮe| 9Y*r[ !8ږYRJ 5'Kcy1ξD2p=gW:lid:.̂56ea/ YXJ(ۄ^MguCm%g"nb ŗ M9ezlv#UGS-1>OaY}A3-6\9.6Ffd5tӬz+lvmG]ވXWu9\ʢ'ٺߪݢlc6)ra_'.1]\"5z=΋JKtG^?ڷO_WrB: ڻ/ft2VC,,b95/Rv}%mR1_dvo|Vr@%wUii~P*9B%pU 8~wU>{/_-Sz<>(?_]eXhjm#}c:{r~5ŽOoiqvPl~۱H4 /cqBOIx/0Oz_2sT|^JwzZG{aRiTrZ: zSIXY/ј $\ZDaV}͊*6qHO]DŽ@c/.XQJԕ,]vvNOab(5J+s7[7b=0J˰@vlY\W{gց)\tݢyy3+ſd?w÷w=m/=h21ݻ5Ͽț'%r3/R[ [*ǽRgut*4Y<5&F,Rrdy r_\_eeI#T7 BEj9[./Au"~TWuޫ?RI{aχkZ|J22U. 69|J.;o῍(۾*MӼ EdxTŘ}LW.KjܾDuVXg7oқcR;RRЩ,4]iCGfy uybloB{ .>~&^2eNu}@?R?e˞kEXVsi|֗0G{;qlo/Ħ)OԷ8{Sإ*ՠo}LXaS[VKøwW: `K` "%S/3! w[f/܄uIgKz @.˥ypݕmgssbCɚ͹9$lķ8t݊ oSN7/qD2'Y櫡 5-Y/QBEm~Ve哃P'M\<}3Xb^fn=Q<:h["p:%=u}r}Άt{NܼG+"o\c$.}J˞⸉ahxt5VcXMTߘ}yEyW>//1/G52CdHVuԵF5Ըzfl}|zֻ:!)Ӵ4C2v3.yjѽ1uM}WħW2V>ůA{k_Cw÷N~z^-jp*TrWeQXp/_v U*TrWWrX_Eo^sQw$`H?7dgiq8^Yero:pإuPGE'B. wJF2V^F"CnrZſVrYS_TUEN0)-_|,=XiHIw Tr-n 0ekf%U6C =J֔nUBUc;+, ofXg+9?jf&] o÷ @˸nBPdlW-&Ms@Zm"UɄ%=Ŝ]Ccyf:B+"*Ψ*hϖ ʝeMi =nQZUx`jI%l%%LMr)=\E:vID_~:Q {+9x%'HzɰKUj AOUZ*VE|_t*'3PWg;K%WNbh`Vr°ݬjPsJnZ- ?mm+9[ Բ#DN~+9BcuA=6UĪǭNKϙJ삔)cyf:`^P[UrR1Ȟݚ/\fW^=("Kk!jC"5z=΋]N-WrFu+)D_:?YܪO=ߎoJP]k;=|>*9B%pU U*TrWJP\*9B%pU U*TrWJP\*9B%pU+Ur#Aj~:>Kίr7L~3}o0/'S"M{>% <^pdcz}-ߘs*|B@ٰ Yr u{y]g1@6VŏNci.[Xc@W=MH6*>fef͸\ya%W}T1ٸÙ*fuYuj?~ #b{I5X}%c. TϷޣ\x@;͓+V1c7a !>_zjO`3=9efIMjn8y:^sxa=Bȝ@1Y\;flT'ȮWDb=-4bfNoFV?c:#;EIr=u t<#͛M1%GPwR-=٦$;. f.Vؽy*_>#g2BíIsq>_ܲ@;W''L 3lUN#cIv`o;13!##jYSȄW{#pb86*`IeٽL_?Ր<})Lܜ G"[M+NАQCkٓSIs\b4{-J/C}nd9/h_ QV|Lq~\jē b/~y12G6v?=,ʉ$,֯g eh=>К(7,y1%_>Sl`dwϭn•,n=DfoC :p޼1 !^+N \ASjUH8EOrdykolQR9!G7bztbŜU3nM,9$z4ӈWW}l+yL49=_Ĕղ#*=Ցhl`ґÉ:ey߇gydceؽe)# ӼD%W?pIqձ65= K,Sg "JDkފ>`Dag:bJ3 sr呣B$n ztc7{?q܏@;\' >fŷP Y:̟߿ ܚG%;S sܫ{\sr"ED;SZsqwI~ߞ"t(Yb֕HrPzWcF_$*y>S>]!;)Z/9q%1S;.N)vȚQeX{^?ՐznKZz@;{{y1oXR6O?.㪕Z7YJV>h<9^3*[}I+^!$y󁗅 !OT+NUTrי/eS&sr|}ң;ϹJӻ?EbU~-ϸEc{޿+:ۯF@;ͻ*r|IΆ}CfDJUһw̟skvbHNW򬛩[fQ4j:Eq#o=K+NC% /R[5cwq=#f~pݲaϼ ,sא R~Ete-Q;n./ W0oyX4Q5Ϣt=;VhVXF56:2{g]J[ݔϨ:[Xeݚ꩗ 00;npKNn'3Ro3WaqEߑ'VfbB%SѝdgF:Sj'"9FTݷKT\GU3Ӻfk.ʌ= - <;9}ѝl扊t`2Ў1^hdj8:$j}C{Ld[u7'ia@3sUC'7u@ ߇]oe^㶜QN_;l!fv;:>=,ufk"=</%ppGdyZ<[vgWrw.fp;vP8f^pMo7427iϽ }/e@Z7Ɇ,ċ sw#'S.D4i{ܞ5F"[;ڱY7C] ֻfUq+o|J ^Yo#YK0:+]ެ#L(.sK+NJn5cSI{!90џ8YvV{ e ۅyP?ۙ_R)6zvE?mOvB6sאn9)=e?g_ҿ\L'P$ f~ Zr%]︺G0:{D0iL{u8 \ji^;$CkhiJ=un!:w 3j59ɇ0NLviYs& k|w#={uWq>ӻ~MnS}S=2zf2q#觳RpnNY+G,N #Z .a -c`zDE?mOVB5zȐFHdSkOP0*XO0sO; g͢h}t=M`}Gdyp|v%W;g>'ΌYQML2I^4'onM?3|b+.~QkRƹc _q6cV-@Z4"3/z{7^gOsBUg0JfR­yyYb. P{<㺕5J+Tpuv[uP$qAO5B7 5tW\Xgq\S^ue<1x4ONnKҮ^X #(uHޭ0<B6&CU}TFl⸦: 7x Yi TX9M[X j5v{ױ~Sp ڰz<}7q֩G}vQjo)Zɵ|vzK"B,pL$n?mp0CSk}OR>N<)BV=< *9B%pU U*߲{"/6?5rc27ffOU/ u?6QTCl9++ؚJYy/(7cI yl;'RZu{]Xp=󪟞E/FU'",3 8pe,\h݊MSWs Bki^ӿt?N -z9=zҳNO!,&y#5H0jo_6i7qo~ۼbb:|1 NL\32i\Ξqu ̞긎,!{n/7[xړU,r`ԇ l``HIȪ"iGEi"hjft'Y@rJZVNC#,É&˳y%Fkͮt޳r;WqЅߍ<04*=4OVYǥܰ(YOx~L6re5Ž_ڲ ٪dd(,1mXxyd_mi^ơ^^d~mOq2[3=8M߮|51<-.XaL45 &r.e"F2{e,]*lVFSk^}+e5-欆PqeY$+eXKhyJ'5y5eP^Vw4Sۙjq=V^L$LZf66ٞ4O@M fvj4Nswׅl7w<爣,[nM+)=.ņL +hy=:*9[\oWtE~bEC%[v*9JP\oY$nPHjЫc;/L_,sb?>z6Oz_7Ur[Y~/]eKLFGSh; 䬂w/qe_i2}۱^-5f-EtvO?#'eM=D_+Oa^4w\Ƨ#ܗj芖%yVj= Ӽl%{䙯^=YYr7fc5}P{ɗw V5xavmr\O?%'.D<<5TrqwЍR]OTo>UF~kG;,2"m'1YEFTK>vUVдD/"ũar1sZSD7yq}]{.JW<#tlp ݚn*eunp7sо8وazu du 3+eIytThi^;9ֲ@;}<gt!{,1ڇ9ʭ.䦾Pk`q {a1ޫgۈE2'] 1im'7u@ ߇շ/^;I>SLc(rs4/[)>M )Z|BV6>ҕah@g1i>ؙo$)q7 vEf:մdPs鮨LnǸi _/Nce=H|_eIduYsu-~!83b%|ٳӸR꿄4l,.ݲ'қ\?/]oTnZv۷~7ś݃V43X}ړm4P,Z 9J%-cb vL 5I83, ?hLӠJ@g[B#~лS ҄ ]DgnP?ۙ,RyC*TYbtlT|Lˀ(%9Bn[ =nO7F7nTXG\/XΫ8XBFofjZK9xU_8Fn7q).t:dA-Vgg>/?wiB##IoZzT,4pY.[ ?2~:C{>ƥwQoCx=tYh\'->w4= Ӽx%{'O7/c?2:y:fwhOӔI':Ps}P_,VeFnbiUfmmY`ᶕ}|l凎!wNA']3S$+QS֤I$է1 ܋[d A{(c]>N285:׎h!ħ1M";=jWsiTni^+Ic('f{u3RvPOog>GRcc낥=m_?2ƞvȒtx?XEwuEs3Cg:Ol>z~ >t5\@'\g! {ƹc _RHm[i.a܇͐E|;V2Z-]("]^;E&ӣv5|便0Z>W~yP\oWthdmi^*9B%pUe% Oe?mֽ*vOZu;=uNe0曳ZT>F1tBܢLʈbz/#H ]<لZvtmS>~}&N-z9=zҳN4o<)&ynR]B`,nU`?T>L<x6ߨƚTw}X7Q PC$, o~,=Dǎe>UwT0@;K''E[rIg |p(3FQt'#j¥WHIQ{6ߓJtfi|IPw+foB)Lե!u/B|#Ih'0u4ܢZvXLʊ7¡hg2ұ;9M-Ԭ#$HRɈ%0-h[>BOgX{RNd@=*9~>넣Jb} g>YG\/h6qЅߍ<0 Xӆv@ےcI%:3(hsR "X+N╜쫺2ᨒ(2MdJx8ړ4%jR#c`Zq(y>T81ASHQ%I/ B J|KAQ>3EP4ڝDErܛ[|>5SLdii^;9]Og@%pU]%~WyZup 4Tr/U*TrWJP\*9B%pU U*TrWJP\*9B%pU U*TrWJP\*9B%pU U*TrWJP\*9B%pU U*/]z !7gGowlW+uhNݩ |3]+)Z)B4%f*9v{Fh,yN"f#eL@g8TuuY .Fn"/V+Z*"CR6=֩)d$ê2Qpb?!QVd# b.ƒtb:|1 NL\32i\Ξqu ̞긎, JN /o}hk(2;tl ՜kxCOۋvt"*#v;F[XtX8pbh~cޕXLD?-iDK-Y~B4MgN|A[vwTrNj6G XcXۦmY6#NF'HxUIsWѥ1 3`7kF#dFT2{n:xt+ ʜ&T@{Z2V@L"`If oVJ~='@A7Y6Շ vs)y蹌|̦w4JN::}3EEqs)õc7g%4DMke55_cbL.HpWrccLDhGy2:8ԩ287/;[v+9UmL1M9`LXsfA.Uht<aFn)s2~.Nu0Z5y~ƻΥ ZCflғ+[EbMhjثr}޹>ٜ =n,dbV,+:[v+9EOH}g;@A`Vޣ>3OtWbEy&Qb9 |uJVS4nf[iN7w'Z{]LM͆p3^uO+9GezuoZNq)6̐`j V]~[vgdww\SwJV.#•*#*9"+NC%P\*9B%pU U*TrWJP\*9B%pU U*TrWJP\*9ҕܯT<;.o\/ɻ;~};@YCl>nN[9 P;@v;%"!v>05M2\/"T+N\ry#{EOr{+ZrVufc |i%r2 Iߟ߿ d_$=}|sqGO]7&yRykb2>xRܙj |z%'O[?upOtSD:{jεr[^tev[;:a i;>/r76*]K^Dt vգE85Z{Mq9'ӽ&ɋj|UM=OJEH+I:6]nM p̺Y\7`ցSh_OAl0=Eqt2g:݄|ԙ땲ZvwTrzgwyyNxzj %lc!UYۣmY6#NF'H8%VuDZmVA#M6Ea%d'7t 4jT=,{=tመLGCœdvVTzrS_}5׽0qՏԋI۳hm "̮촶: CN]$UT)&1>Zv+ѓџz>#5=4FI3zN̡#%R1\LG ,گi{KWXg^%=O sS_2qAYy $,9:}?ِwxa~YUiGC)_B|C\n M}YӮ׎7*}\7yUOAVJwq0NK1bsw2v5MӮ#]Q*iɝ%Rqawղfy+~@q܆4 dt%D(?[?հ(M xeOItV Q I\0r)%9B;qH?[)O-]O%G()$l Ȉ\yC*X6|8r4ׇPLtg-8[Ojcf7KΈTCMKAu~)/2_hąNZJ33Og~л4O!ˑ$Y7A-=^Gj?]|,-†}D?С=;ר{T,4pY.[hTZ!a+Np%W)=b8޾SΉUtxhؓ4j2s&.G,!Xn$xMYW5gʥN[eٖff o3}|l凎!wNA']3S$+QS֤I$է1 ܋[d A{(c]>N285:׎h!ħ1M"?O#jWsi|ܴ4WrByv_ЍGa)^uThwE#UGpS8g*X11xt6܇VSK֍=\. ^|WZD;:?tcSwgCLW8zItu2i=,T:z(X}H i>^Ϸc%㠅l1΍-ESl2=jWO,#u+NJq$^}olSJUq @6VJ%*TrWJP\*9B%pU U*TrWJP\*9B%pU U*TrW+_ow;k'?\Q,J2Z_ͮ>ס:=:w2vbtIgG S,{7kSp2)#m{RHlr@;'Vre6";\.>h#]']:ӷr<0;dL--ߓu4MK߇Otsm}ּ"=v5Z9uJۑotE00TF`Mҏ'ر̎{p~iiQ鞚7GF%ڲVlΪ2v4lZGНPsOR?p0d*zOX{R# nr3Jn߳ϐu^<\uB;%ͨ k>ؙCŐ`jlOSsX{R]ܕw-wCVJwp0fE1M9`/#R4\Ʈ桙uDнKZ 9& 4-'3~,=Dr2CC[ b+NJ-xoʥzmQR_٬p ]H%BgDf@0aP$X{RNc\\͵|+dki}|P:"޽g[ l]pCÞU0q9`V\J|'4NL£!s9+󩕜tǥ'ݢoV{]n[7]F)Gm}xog>8JH&Mv'QoQ\"濤%a0UԲ\ ZvwUrs~\E~rXy?Zlέ@; K@%pU U*TrWJP\*9B%pU U*TrWJP\*9B%pUKWrvr/_ ןeyRJ;/U)yN&gΧWrRu|1E[P%E[ kڬ.-Zi9gWr|''_*Θ}%7'˿;9-}1}JnK">sp̧t^ɷk[aJ9cՆ(ʬa8[mݞ4sԵ::6S=K~ Z ^k찒+p @V?T}vSUjVqu!+*]Dpv'T@;ͻ*ɪ bxTr*9x Tr_<*9B%pU UJIߊ" Pm?Cb}{%&fjf[{oW`f'^bR;]e ];jٟSSFW\_۵؋^GGu+ 5Y1j?J,l+Nx%W缎~+=.HP ڳ8fj%;F}yev'|^ˢzk˥skr]n?R^~S;?Gϲvkq:y{8q.t p.qF\x Yɥ띳+m| <!qvrE̺9B[҇ Ҽk -jW\gv3;vtI_Y{\C둏Lg)=d׷_zG>[tc7[uY^\ZAf2 GIw |3qh, N]L{fơQLիq=n؅߮ױbjӀGЋU:kDž^jv ;C>1zs3c9O,nG\:v.[ۛf߬_v(Ɗ2jK{fOab]ƻ} ܏[ρ.`]q2W\nuJAϹE]I|]_z<.<YVɕz[`v䕶͒)\=5rV`v[1j[|&r/J`Quz5t m<[\e gױ[b:ӀwWrYy뵏>+Wq`+̝!&MoZv+9A/d\Vܽ0:˝X1ꅪדAƻ%;9V|u~J\/K~K`\nͮPz vQR/Zt #~j 􊚆z\B 0Urs_֚6Ur|*ʹbO1/Y9]? 4XJn߶{ =m\PQ'6]H 3fbR摇 eeS9^mI+¶c.hPJ7OuO/w+0Ȗ ӼlkOuvnS ?3/d > ϫ&G-m%6쇭@;s̽tqS֣u܂ ri7k稗80/KTrt{9q{s*`'gl㷷v}/u ǽkh^Nkoz憯\q2֋`+euW -hygB%pU U*TrWJP\*9B%pU U*TrWJP\*9B%pU@%pUKWrvN(^RL`Ҏ:ݕa_igIVd P NS}_ٗiI% !UɥE!OX |5?ZGY;J#ƕb~&{%'[=Vt_*"~.a%'HW;맫ʾ;,̶Rd\Cqhf_i,;TF;{+ǡz\UJFh4 mFK+9Fh4ڋ7*9Fh6*9Fh6*9Fh6*9Fh6*9Fh6*9Fh6*9Fhh4Fhk?mt3p$IENDB`docker-1.10.3/docs/installation/images/windows-boot2docker-powershell.png000066400000000000000000001120711267010174400265420ustar00rootroot00000000000000PNG  IHDR?KsRGBgAMA a pHYsodIDATx^mMwץK,r[U@K-Kv RFSZ.}Kߣ ^ 1$;J$b$lBZcv=gwk5`^S@~=`j@DՀ^/^4F7_ Uzhn&:x|-LT up'|?Sat0Q5Py)0y=?-1Q5_M7uㄿ[MEDC'@DئדXMOϿ|_?/?ޟK'.IvSssy2=g.mDؠ?M???G~ ?Ӌu./W72k/ c2⫿"k}o-pVT륳oⷪbs/GKcr#xXO=ÿ~/ gO?O?|o5u`|j NBSKSr1.x9Qzpi׳XK0Q56;gRG_?_?{7c;./ R/Hi lz=k 09ev3V28dt&LDؠշo?'sO_N1=|o;~.nNzoxБY?YzeWe_tܧ}УߦUh8UKY~< ;JJ?SatzF>ZcjlG{o>cw7?W?~_~mgK.PR^YIed{JgoC$3U7R[ oFZ^bd? _-,y'pgkaj? ~ww_tMg~g~}sW_Ο[L]je_,SBGlmP Zk 9Pm;tE[}ϐ7OW槹]My0Zk}o;~[{gwɿ諷-O#.l EdOBY.4&u'#m}I-YRjz?Ek\|8 ^\V_/?w||?%>Oco4u&k몳k Z,RCy2*Jiѩzau~ ~w |-LTz_7=];g~ſ??/ߟ':9,oTe(vcT@EEu7L>m w>ѯ]1.s:hY7hP6FܵC)Kv3n9L<N>+oȃ%z77fK<{>#SGfs CU;5~*I٨ѿ|Jvj|DvΩ\d'c4WSz0<:|P Ċ):{59Ċ(]X׋8 ⵕ=1~ߕLa$ KPT6鯵L>'.oGvqؿߒ`:V-Nճét3w +Tz!*jL%B%?;i3v?S)~g&te+w|=?n~yD|VW'";v˕:my/+,m~ʨolkwHVbv~ái* Pts%c*V{a$pg0~8SkgR|orl?o%^;e*{r,D_(>"Ofv^3KK@{Qc!3R^b s\Uky}y3U&\@;&ծ7oUuv33[uUK8 H3oGkr>ɸjJ/a{+To1(zhPXU1Jq&F4cMGL>{-h4P>-w9jp0nEŘtƽ0g$X^ @XmkIuCϾvtIy<-s gӥ:nYr*+eHȼUe"I˓zgz>GSSw*f:ۊtF߮gyf `ꚡ[kd!Nzs/Txm NS^z=_y;=&UGn HɛLIRisMzqQ7y:@9@,h,߄[ڨwh=5WMEZ1_\,䍆*z}M>59opO!1ߨt<3s)t|us1?63fx݋͏ʃ׊?qf<\^ oC!m3YE ےYH*y3'?ϟsʧs$|=yzݩ^wڡ=%)_IC3M݉^LPQ/F?lwL]dtQG?:^Nu:ŮGS'bk0K$G'Իcwݹ-[A7s*?uzՋ=h,ͫ&~Tᰤ_Oq~B;ACemz]Nu$Oǭ^C?+i:ԗ±ah7I st(QDԮ~&]嫟Z)cyhLK|_ݕyYeVD:EmV[}m"ŵ/{W|lk+ٕys" Wic@C ۟:iw1owU:$#3X.p9)5]Ɵ IшV,ױWu +)Hwo.HKG}~PpMٮ=_'w,V>Ke2܊TF"]C8J==Pρ5ԅ1bl{?Weg/BIFE .ٓ^' ژK̆=fTo!U,1<0YQ+?I]v*mJ?]?Mg{8^X`?yƧ׹z]]rq>Dp<;uy^yzs3zY [o=MNzPǟ? D@}SsO"z}9O'i'Ib>>,=<uv 8|>s ͥXQ|MT{Ф2Ugnj?tA3ϧ8^UmQtVxO#&ﲻBoTЈxX"'ziT6G6?hqCUy?0/7R8g_?: _|3DߖYG&9'+ g7Ag`U<3;6A~Fz}e0f"K녜$b<~p.I||Dӻ.Iy8kgLy?UǾBMKskD!?-]Ѷ KOQDFLy_/*A-,$Byk0{{nq9\rH|?8 ~Ϛ@ꤪuW# FV׳8 93{sMJ&yGULP12Qz=ꯉә#2FObZ <~1­v.ץ91xD`OS DnwQW*gi\eiw-JqtT)N?k-'Qxy`x0T<<DbuyϠ_6K:T~Ÿ/Qtua Κ\Au >ig|&<u :z~aolVgdĿ78п(e@F^B?O<Fp.ȕ:Y up9׏G8<~^`@_5xu :Wo QIԟ-cб<:ވ ޼^z͉;4Gab=/ϙJх\ásE$KEOIE`7\BY \#}ޏa^W>~E%.ߝ\^{z]eWgsq{A5o*ϔgI=HpiEL dK} s ^G(~ z.tz}?ْ֮ܚuupU< z~Ώxx_L'p"{s%9U20^eo@7o?!xέKB;|Y9F720^'fo\Z0lri;Xh˼? Xݣ/ۣtl:;.qkWq&p(m=My; ?!ks:wz=)]ql R~x8,їwf"Bu9?9x3;qi1u:f[,ukGu/vV#Do+|UxX[^V-iyGƣ'7*[j4yW{CxYqՔpy%.=HkR͡qx4'{7]kg|x!j)]}uߋUfw'\AW^iqOm]?oc(cO¨J?o0*ek\WH;b!Tq ^Ԅ;Ǭ%axfC'cjOyKUfWCnbW 'Ni)S,B?m8o|vg|+1^)cQN>9եF}Wkjp}]q+q ̷ɉ Wz]1`^r3zUuBtCm]5sӑŹ:U20^eo@7obc<%iioqpJ.ߺ|S(e^gz~fiw~Oo G(]ř_۹-.6TmKty6sM~x8,їwf"Bu9?9x;qi{tDͪX׎^؜dKc~ p˷wn*Za<,I-lc/p+cm4D<#_ y[cQƿɅ=竽!x8}f~zjJ8ɼPEa&vM9:$qkslo/1^#A-# >{[<Εyiw/~=mtЕWdZ@[iX1XǓ0馼FJ;S ڷfUn# wƏYK- $ oyMͮŮN>s=SY.9?~ 9q3Vc"RDT|sKy>?|/x\C}x#鼾S_:=i.Jf I{aG5-S(n~~ vq&Ր[!8h+!Ά:Ο6.F)@iވ^{ӵ¼[%r!{}e}׽V==Lߠ~ zqtr8 iڎҘvOh[CCoUڿ-1~::bs-_8!Sf#cRXdO83%c 8ltv-?]Ih4oxPLmY۷Id?-A0zwi)OӻMLYÔw+m'&/!8cU]O}Kz=3Ajvȃ3 cm"XC}Ȋ 5\ HTqWݿ}xKrNR%jI^m%Oߠ5:-Յ#mgV1~)!9ZDR=R ]DiʯދUnOG#pqy~FXɃ ]Ƥ4ג%):'t.kh+ 矵6 ?|L*#1^ ۍ*NQ+ jZs{&gR4Z,`.zM;gVTAa^[  pXN[TQUgci[ו[<|{\=)c>^WS>͒Fg}]q+q ̷ɕ 'z]qL܌^gUݟ]dksalnoqN7iy<s-PZl}gqDF\-.[{ Ha{(pQ {w&wg2K-+6T;ǥzRvJok!J<'Z8ۂr0mި0K.9_M qe/3ӃWS±O敄* h~P5kR͡qx4'{7]kg|x!j)]}乼]6C$8:y ϯMR"NxO+{ZQ:Qx7HUʆ>A%lo# wƏYK- $ oy'N֛ͮŮN>s=SY.9?~ 9q3Vc"RDT|sKr!N·kGR|i>zz^g Hram7$>wQމ}( üE~ 6lAhe`\,3Aܚ< )F[^^q6qqq4JJcFl#6ηJ,3 Qw{SlS;l͕b#=QLb\.i}@G,}?;(Ӧv>{nWivt8N0xEUpߍ,:M1NȏuH}b=v̌'oleQwm䧣ztI'Ѽ-75BUrR0%z8ӥvWKczzwiOӻu"RSWj͘0V*3 0[B坍tx[DQ r5V'Cth+*OȊ 5\ HTqWݿ}xKrNR%jIy^'Q}vUݯ>5^Zp*߯r&~vg|cX1NQ+ j!mӎtR,8W`z]ʽs䭴P)o˫y!AB˩L*>*@lw2_OC#i;|΃9:{'Vo/九:\uG[^*IJop/u :z~^`@_/u :z~^{j} ~b~G~qxx,[w^8$\Z޿"=nS$s *ٚl|H\?3q^9/W;_=us˟J')?ocqT֬gSE9Uwp+ ڼV1?E";#xvCOoZQ8?0N>5;\G8PZdlxL>O7^DuL(^v;9E[kt6/}iv}>6e?zwե]7a䝦n\Ȯ_xvz[~{<>GhJ oa~ ?ܼxt.b qNd#˄#'W9︋8߇C ִ.- SyFS&" y KF~"^/FM7閯N_MEv?KLӋ7~DYF{I>S#ĿLό a3.Y+7Fnn:o'~8yܻg_5xzKc]O k\gDsߒ=&U(˯eƏd9n7<qWޟ6j7Asnb#6ΪcV5[~O'b<z2YypGlF\?Qlw?wMϫDnWWǏln`?_kj5O&?tiOe;tdE 0~[h/6'Rm,ܰJk]7Z 8Ə*Ƚe؏^OG )KFc=ݿj흃SxPyva=&u8UZr<|7^jkw8KZc=6޲rOUr|G/1JX*`G[b]?>GQv9򐍝pf`w$Fuď\u: ?,xLGQO>S_c3>Gxn;C~rb{y <@@:=:gz}j1TP^ZGdW? 2dOI~~^\|TK!3ݏ,z>ϷRA>.X̃=>b19sIyK%| ~p؍^uJ1r"jqRqzn'%B?S,5]~c` aϩdfIxzj$$y~yia;3˙-M9&"{㩝:&O}tU{ц~ qN-81:E$p$mz)ߡtv_~%j~m>F#, ܴ3ɧaE~晙ӒƥXh\yy_`owp0?ྸaZeP^?~v*74p]*,\}) ݨd.;3&;$yX{]| oxjujo8cg?U#5+NѸW[0 ݘM* bqL?ྸEZl⺬ !7 ӎL8qa,3?"`Ƙ՞LSIN,CJ槇[VϽx ηdO}&z0 &- ڍtd/Kݞ!}1YPWP c4Șmnh]yzFbb ǧЏm7õnChدCGFIi9Oeȃ~=|B<F2ܬ^AԼ-,'w: jEgoz+dzŌ%/Qef }@[a/܅F.e3ʤV"{ҜRca,ډ *~kVLs1jÞs|i?O.`ުi^u#%vg\gn~Q1ʏv<.>I:_? =` Ȫ'T;:.mu:o8&"yI;CNa c`t4;Z7έu34Mg'CMv]n}<8뼀V8Y^d?V/@(S χ!? I82%G?57/v\]gk'&Lc,iS XdVt:u]:}?EލgWŖfN_%0U[u\w,Ǚw/~)o)Ȟz8d'y,Sݺ_0?ܯ'م^oM Oƥz^uKnx>siK$?Q~8&5Wy;/ȘJz[]V_)986<91Py<4페+t ]57 /|_1]i0/z0 ?Z15 ?f8uG' "UIF|sZ04<4페CW^wPnz^ސ|_6m~U={ҸqF|_9wu2C#n1_FDzPyo&cUdb&d2k ^~5Lb\3ig8Io۟N/ ;?ߚ ^{LԹ͹Z~Une>؏^OYݨ<}?}t~, ;O։s^s<5b͞+VQ?w2: Ok4fcuk{]*K̐x\DbsUGЯ-2N~FgS>Ώ-"N";S2]0?[w_$^OFl&'~R1չY'aŮWn+aإQ 'd^̫OYN:3 aQ|e^`@=6Q_pԧ>2n߰gl]MLuP!Ht$'ރ+Λ7&; aX{tjN&d}]|ׅ'N_ojAi)Zɱ<|}ta@&^ ڻS}>ku&w}S|UΥz5IcѩޜG\l9'"GlNr}+MN?pmtP:v>Zv~-?'8~JH*;ʯ;]^D ؛GƏuUI@|6՛U`Q^aqɴܮѣnfayӊ׵ c\wSyJȹ/upۼ~s:e>?.V0{⫫|,oKž҉󠾺a|?W#DT~ z"x~e0O4I.ڿ50N#Ry,|>]EUTd/ZP䫇C'jGx"Quږ!&Iɛ(M;P=WRL\ ٙ^K=6aF"|r':Ι}K&6+M=ezݏ~u. #>hWjQ:/,.vB]cɛOi0JMsE -z=̧?|fYS_H|WoE#.V2!}:Uф̧3gV2uyD x0'ˆQ9ܜGvz\z=:knupU6,do[9\I N7u29. ,N%H__|x.^KꭔȟWxvQ61i7&jWӦ.IֿI,;cﻴ/ i'Րۉ]řCNܖI{vA~Bp{z}>>~C.R>n.7J=GǨrUֿR43RxO.^w0q>rŞT|sg.ߖr~ +sqzvȪ6wYit[#7y9٥l4?fWe}nr3(nݔ6??or9եF}'ZoTnD 7d7R,,>!w,m}15jWKi۪yg8*qHVe季2}D~ 6El1M7Oł8jȭWeq "?V,C {UO 5.F)@iވ^{ӵ¼[I? JղGF9 :$`t6@}Pfnjbؤ[e Cs;zF "2..Wk1~::bsP7DqWCnG:>RȞ^qfKR7q6N2;6y[lwh*9c):My-&-J1|Nv6 5ޜv׿`H?wS*wbbTAR[ma^y(?1?,98i|ę W) F[>VP{*2b8R><~lo6uG*咜d](Qsrwz!hnk1`TNˮ .\flwSG PD؜[S-7-HW% my!Q2a8 /`s_ג[y~Z]c(}6FU1糗Iec{:?ktK[)jzACN$\Һ\0+f^+]){ٟ^LU4]OɛO]aTOv/c$f7٫,uo{-3"<鳓Ve!Ο^)*'?Ea'g:N̏ȳR݋SL"5Z3T< 6wɛӮc_g|xC!ֽ0OސFԮvH۶&foDJ{  Q[<|u :z~^`@_/u :z~^`@_/럼|^xzm:|o?Sq* ~d2~0C .<%dA6L2/עm6塽v3 Lj(a<qN q%\?_elX@dED.dleZFdC+8^+j7=u[V\O01ԏ)2#iY_\ɔ8%M<-Nr[aˊu${tSCJ~~bQl_:O~Q}%QQ~ky?F$p-8d e Q936p푟dwRL( ;iniq׽y!~NMߴ:If;0ڲ^ԊKK*-5BQ^+Dqj oGYg@/26Wk0j2a<_u{MH-1:z>g&47zclz0O6į2ܴ^/0*hu.L#*_kaqFDdoKX^U+b›mFnvDy͓?f(3?(?ވvы%8 A~8:Oa;~_Q#g#gм-,k8SŵG~89TEsclɱz=/,fvH+i][Vz u\#$qFGdoˤ<_sEvN8[<)I͓q y^Gaf^O}{n]3Fio=~#:%gAs7Ѭ|87t~fJrFGv<^tb{Xq7x:_?6`%Q_uuR\~\3 yg8n+܏8[gM#fNzNv?BXgnyݰY7'3~߾G2W;[G]Z_j0H~vN98R b@k틑v^Wy_Wyءׯ3z}/u.5zn(%QćwkGm$Xƹ-_BGsz]} sgrN5GyP}fy|^ ':ezt.c9|'T{*?7uzNåp&wYS>O?#+Ϗ^Ov{܊5]_7-}藻OWTs!o~7`.{!ި7>=H+1]p=7R$l-r\Ro3au-+xtඥ}nq;B{0$?6 Qlqi6:tu+hⶱh%%2S\Bw5ٗ֐F'_;-nWT.&T'~L8N;X2 Vg4yP!'a>톖`\Uߪ/F/j.?hD0&y++a<"} eOJ\+8Z:M*K祉.QET9z=wkv>I?񫡌C8Oy% K{YR*usUx6Qs=γiRuz=\­/CfP~Vo5~DZ^ǙlLasƉ:,bD3G=< Gi qO 빎^?QIg^SK:[C2qz LW)W-uZugBԠ}KNHWZ]O FݱA~iNCB­'}s";RYe%W(t|TLO/th?կDR\l7+dL%?%ϗá\E Oi114r+3^1$+9Ǟua7^&u[؍fYK$%J79~ %w2u61^yП3^g]UͯFѣO z #ž@R̽]Wg+t ]\|Ů2h*8?Ge=_oL7Ml7>1Y燺j a e+hc'yQo_'$V8_.J{T`95C.g^z}O+"gG C`!QhROۿ{D,I(o~sNh?mɸ(I٦ E"U*Y T]S1ʇ;kb`kԯ*5Pp$Y@؂IW^ru:qY7'=;U?no]k)F)W_WeF2)]?M]I;qNuZ~JplR#Z.3V RlMxѿsfMh {Sv,oz:iϴ8+lZr+?BƈSODXޑD S^+ф?W4^C,ZWM6]6ƔG䧷" 3LHNGTsusI>i40^bFȃZFJ=/;iB§2 nĂHRku}WFBŒ[o *Z߱sE|ݙOv.򜨩QSe7qRi(8\ [m&U=26d(bnI_1釧,r?jUy‰'Ú\ai}KΏ6rm7nGq1(U'RT'͊O,DV^/,Gż <>ͬ7gi=k],kծδUk qQ8oKؙ#P:A@M;)Lgrqn`u\nLaL*|e"?V,C {UO 5.F)@iވ^{ӵ¼[Ic ײGF]ny/ P]˥:Ɛzz Z=*Xl76qsJ$ZLmoBeR:&~zkRZuctt<"*/BY\ c7e8="?!Eڍ33^2пvJab%F8ߌUKؠwi9 kn{[Ǚ8Қ>1~[#7iedX]s hV]!Mj7~v;@7¼ MBp$;[cB坍9̧@Է)1g3Is A r5N-g@4ڊEJSm/#SI=_vk#\w1^.9JA֥C| cox{ZZUiu2৔Wv {CbֿC5'#[ ]dhίދ^$Pxdc30 ~FXC0ouފ9ïkI ?].1Yy`Ǥ21loNQ+ jzzZh >{x_jN&d\) z]qһ)'z㡟s Ηv? ͣY٣z¶$jօ#$NۯpzTzd\O}k#T·aqꮈ9Wز*%޿#?^.'O^yCPz>_m^$^;mE?'>O)eS5ju^?~$`oJ?ҹCn9QՕFg)uy&{zw>HlW3Vō'Mތ",oZqq?W"C6oMpӦ7aޟ1Ɨ'"j{s>u :z~^`@_/u :z~^`@_^^x`Wn4%>k?yg_y0#;U1"/|YZ1:|N1\=ԴoSv'M?N#'?gfpnju+!|Oo%7VǷW|9kƂ:c^כ8OK!euۯUaдC '?Z+FvQ61iE^~tKE(8}JIFwM ^3W%N#:͙ږ!&.?")O7Q`yIv{ X:zjާݽ-f[< z86Y]u)A~dyCN?q_B^O j({͆CPuPvx,d/ϙK6^JF\yX(oCo4[8%%M1ím `,]X׋2P0_9Kn9S~ CW㧪(q7 Sq v+mu_&3'e&VWzM2c) NaJR㷑q:.a\1üGse{mE6p?Nss?m$zZ۫Z,)V:] J&'|=؍Ho?;c=_v~];gS+@C1~v~㜎Z~J(-c*X.yz]p4[zt(r:'`:6^^?MX =}7`{;Ϲ?DJh٩⿕:9ԗgWgrO|᮶1Ny{,Εߍ7Ɂ^`\\=׻!di}LIuY+> Gt(\+si3ƤuLvثOߓ^U*j"4golLρ\ c48=p(Ӓ|Qx7TPHz$u=*^ͫ^ 8rCu(oN99 #M%?]ɒkT#U*oI):EM_|6.R{S֝[v1DPHz4r]#~"%ō[ $mf `.+;IRV v^b5ʮXnwfw[=a2{r5o;~.EypTcg>mLB)6^ W?_9)Ϙa n w}OoSbζg怯8j#ߝZ:΀ȏic"#+&ں_GʧǏ{_wG:|l1~J; 3)]Boi7;{DŞPxoxM5Vg#{ *~b?,}8y&+y[<~u-Iב=᧫uYE[9^7]ǾZ$C{uH˜Zʷ\5nty|>-{'VopY^^e6o:popI^^`9%o 3g^<<nsŝg} 1dzx\\bWImeyZҗOu^t>_2?Mli~'?ZnkvOȤZ]qgfnj3&XEfy.tn"֓ Qg&(;:z6RHy5 `z8H9q#*~3(oN99 #AC ʔ:uۡrO=bJ3X1t<({z]JFRN!9(Ũ[ $mf>EJbgǧp:_Y4?B<㿡죶Y׷[?G?X8bސ1u כ(u2ȈnU 8TOk"1~?K6T,SP9q^4(B!V^"?g!I!*U:j3An&5JCbT;%TyPTW]kQudOԟ[[ Vub3]R#wSN:RMJr JE|~LzΪ=ԑC?~9]u_,W}QMz8ڻu}>b@/k?AfꄅqR5K0鶤(o|jaqꮈ9WL`[V9{xdg44$ݟT<,ӳXn%|8] J |ÎeZ[֝}?'ڑ ?R^Χku|{gOzXchW-\ĐdF}5[TpA|?x5˦ : z~ g Q۟rUʋPyq\Yϲ(5O^JI'8^.!>*EEqX!?Gl\U:*ҥ3Egj•hWvtEE)@*_wDWׄW0'>Ws4k;cD*RsVOMOwAɏ͍#u~v0MxO~~=Ikaa=3#nVsE- T,]^>c2~`?zNRA6,íO}R~glIr=H[IG=uҾX/CG׹bad;_'X`TY7i.0oc TP3uHr6VV}vK.^ ^^su ::z]K)~cm*/EпBj$qDn)doƾՠ~nl}gyb0E.7#YO1N3QG3:~Km}Jo[SaG󘮯k>0WzI~Ps*|uϷm-2Y8$G *A眡]ǬwNrVrԼ~:\ʵo4G]ގG~}ߏ8a'7lOƑ{v]pE٣W+|8^Ku~o5ӂZy(m{6/z*.ͶYw0424n_ZbŝG&e5^Gw :/c| %P|lOƑ* Ӡ_up_S1SQ3|8^+ugQ}Kiµe:%[gsi^Vd 7g:~`\>@ 2q\׿s>Ȅk}'n:eחq,:=rLnOub.8 2zZ1~QLq7r^,uoż,mjWLۄ%f c`7NjO?|^ }掗da{iINÔh7χGfF˳I'ns,Hoi^k*8㥹i!Z*";.ȭDY{nG~0'N}hSGI /a}į;zf_5^ViЮN6FRoi} g&+%>C'/pj~kqLm`2^pE%qY}F~|PU~k/Yqo`~0'Ǭ6} U `Sb13bu:` u :zryN/_V?PZnTno+ H_E[:z] E7 _z"R߼^ɵ ͯ{7z{vntRe5Rin+ԊM1k|zz}B=R!7ɵzUs(s/5Kr.KRڋn"ʟ۬%^ '^l~*? /{e~x'1uz8t#?3#Z|?4  syn#D@_//Fp7o?qqnEjh.~C_)[:zt#޿?=y5,K]Ӎ8b7u:ysq^i]uC> :| pv͕G3sA~V2\>61lrUίPҙX8v/qؙ(G_HMW%<v˞z}HYuv+<~׹bvuG쓗;{u:z~^`Uo@p!F.y^Bz=g^EUpoZ'\W"^BDz}:z~^`\Z΍"u :z~^`@__*[IENDB`docker-1.10.3/docs/installation/images/windows-boot2docker-start.png000066400000000000000000002240121267010174400255120ustar00rootroot00000000000000PNG  IHDRY]PLTEinig ʪZ++*R U Ԁ+ԀUPU+dUTV  + ̪Y `!d$fɡTkL&+K! ;K~7ц!O$(')'ϚO\)G$`- \w!<+?ϔv'Tn[f*mgJp)v3h|hJv]=6Ub~'WwՀ4}w&}C(Ƃ-J1 i(]S&kii(CI\XUS" iLXy؜j*0-yΫD Zϭ!~rUm (u7@@lpH EVn?;Vy,*/}U72ԀUiЂ^ t$IDATx\n-!mi@3&tiS&k-m c f`'eV9ŊRBt?z߭xykn||^W2# ꟏=p\RyLaX&}[-ͮ o1&,4XJ v߼E7>:n]B(2aKT0"Vmkzܛ;Lz6G931Wא>k8_75;sQ׈n=|ƪQxd [tǣ~ޡ?3Ow1;kHqQzT =eYh 8ӉyZc@sy̰}r(by}u5oNt1¼wu?|oXچ@U%+>{_KmLױTKhQv[͜0 [DnQެDy/b%vBj7GNR3) 1o9f#da `IB:ĵrSRE2Ι(e"V`*޵AHe>ʈ!)&:"GDGM ZA@0/oAУVGtFeCĎZh76;BćaL>qgS6ֺ <a"8sR@"Q=댶 M`,b,h\UcQ/mT(H, u@ (R:dXM *lwd~yJf7Mc tH ~p:i/[1oऒIGu Fvw dBeRѸ@ "87Q{-wFTaCCǭ3H$}a `;#v kW{ΞL 謪R *#յd^$`%QYN8D;.vRF)Rē,2FɃ) zKҹ"KD5eh|oEbb#jJm-=W8څ,^4g՛_d)+wO dM`:,SC:Z$L ~!Q$+;(3gys=d%+WQBcсT $:fjs(+Se5 hW,];'1PsI gȞrc,`"à5Y 㝑82ܒ(n $ 34nIRJr.@rK1I ߵx@ճ Xb: ƭu(Z_: j$9$AkS JO2Yu`Xq ȅ)\[ U5F/ۨ>Bb&Bid o {0E Yu]Rĝ'SjytrdFEd>=EIaeRcCγ [;Cub~X1c슨y/hڛ7dac +|Djc1a9C,~"Z?xCQ+(ч1 "rXUf@*3^Jg4sm*t@1ʜ;Y%vXNǙoYшWs_dA1nĂ+--z<`5"JH @C(y+Zt- 6#Aћ[A(Ñ"z-1DC,uqDrN tPs9*'9b,ʙ2S*Мm ܠʺw;2h텬br}ߣĉH@ZfHN$`<љЂ 5G_'`DϷCR]vݫve~ Yh1ax>ᷧ~߱^jda laΫL'7xәdBlQN|ʑ< +fs%!??>%Y2+Iԁ/uh1 4OJo[xG^Ouԃ,۽}qwBh\*a28+Zwdm_iH)]3TƏX\EgUOrQj\ Y+Ț$vVvK{хfME HVZmvRt9^emUe$Ҫ) %@(-0._S1)`UFs0z3sd00gndQA3S,>\85{F `IDWvqIwbjL86x6(@9f?YP;c.lXO|_[)}Gğax*F&}Z+Mefڦ<Ãx">ꆥDDVMxY5wn%ǣlY3)D_L9N zTpCuEI4RV'tD=ܦ;¾YVfZoEf%Am6bHH,grE`=c)54&bT#+q*N;a㟺C]Dʩ:]Mޡ8,z#}R- $^BDV;,0 43RݥZ>)|>dA<,AThQߌrblp@`%2AH2' y)3Vg?%* r|I n7=V2So%kziҼlpfx6*L +hg:Gee"Hc5HHo>߹Y_&CnOzhu| \ F8+#ű'ݘc3.% ~Em$\*Z_h>`L,CD*n.Mf/Sw6dldQou|QJ}HkAg =)liHH8q"ĕ (Q`V0 /# H7HY!. 'c+OfsԂζZy!>b̼DTIm5ՁN)3Y0yuʴfQx'A +fCa% "`I =l_N cY\D|GrִYy,+TX+ZJ׺~]ǶէZɬ!N/&tU{/k"Wd89+rj}u>j՝_Lo r{lYUEfRr2^#o剺{[jTQn"9,Zd6ZM~#kQP, )ZCWȲ~,uYQCS1NwK.QE#k]jVgV05^kxꮧ:TT_h[!lSGH$M˛Ck#+>0C\hSAX(-J w礲h:K^>׆C.@Sx֦%R)Ib;l$Ё,CA>Ϗ|LdyeU6fi^۷JO֒@3ԠM\M@Bg0ABP2[ö L޽^P޻fu ]hZI8:(B6İ$0W&_]GaW5d24.j),Y\ql"}=vm+U.ԘDz&]6MvbQeezr۶Jc9L zoI/-˴[u)YCǃd1Nv_G0Y1 zdWs<^mEɏB4t$!Xz(22@hN66qʑa*B2xN$euV\JTutsao NLɀM+x2 3Ҟ"6d BI/!?ĞȲ}%jo8 5]RyFf}A^Zr,VGru r[v~Ls%!I2۞H1iw&# 1w=&3%Nmu 6(Aՠu`;wMNEKreyLH|(u#IY@?!M`F$R]{I_u1$3žtK #ݜ1$Z尫uqJ1]Q,%wJZ]Xvm D6dyR:$ΒPFқLʉ*8`fה04请[@Rad̤KYDG953I>8a0T4gt"?e,5=I9YEYɶ:do"Y~Ldh[bQEa)7ӬK2h[0x!@n'RY,+3UJ ,?ץՂ֍,>UkOL^|/rK* 0P8N9 b#@p2y?&-t¹'2}1YaY$^"}v!ή&pУrCә(b4.2.#Bc1I\YN=.LH+oBV 7_km>:3\Y @i"NAdeWr\߆bJy d` .II |_٠EZrZ+w}3Ur>i*q.̖ /Bd05LLkuc[ଜ8k_28ã*8o*`|@;wvVv=vӏF.v|2]wZ ~>|3z}-yD&6Yd,s{>Ypgx왲{#"ܛuvJbnj[ pmTd7:H/0~C~֩`8HQYWj2s)3\kw3zFRGS傃,dLLH!NDiYsX.eGd}W(rLjJH%N2L25U N1X3VBDVva[B 5^€8 #+gM,ߐU  lx*FzgU2vL]"?DP =lSXn.:UMkVmp5'!SByHmzE{W^ZYQZ$WZiAic&@UZc)9d}314U< i0ֺ!s?i>4zp 髱:by<&ȺZ#]>hpg:4 ׸׬_ξ3>ߟ7B\~)$1Tomz3~0+R>݌8A[;+`luՂQپH9TN '븭[QE8D'ضNnƎm3B5R|e %^YuA`#w͕[nJXՠtȆmVE Nxjhi _a7CJg ! $:gFB2OvJ#ƃIpToazlA^<[;hȖ:k!pŴ!Eđko&lPF;uyk $CDo8n3˯tzװ7$ٟ8EQOX32̵Ri_o @1(#l&3 `Od%2rѬGh;5g/L-9=^`Sc_,}-Dָ;6ia`y]SrPύ,lި ^L@Ӕ( >R";WQcer|2AVT1}H_ dѹ뉬0oOd)3g wul2)o9n*"!_ "ӝg+T9Cx!SM0kCr'R.Pyl4k8%KxNEƱz`XMUluԈY {YAdjDЗb Ogg^m=h`el)^ҽ=@\EWIӅv'cd6w ƿE:JRB#kG2㗑 㠗]ܽwbM٪2}?"Dyߗ, p"ԏlh=Gt0QvIX'=̔Ք'@VL`{Қ1*ޜ!-f[PXV9Xju]?JjP_ILoʅz4=RJYg)z{Zxb,s]6꙲[ K3 ԱJ3TV#y<`sRV>F @΃; )h0bd\ϓxpr Kp !$ u,2w"B3bYRRYQ dr9,r{19}.<듳ܯ2,aik-bЁ,nrymwU6_;wip!O$u [XU/UENŃŬrw?QtQ /Vt_:A_~YXcodFu%sY̜nnɡd[K#iHɮ M[Iz0y`|!dԼ\9ڔ_gkrֳ~? aA](y9P ll,s7}e#`!,^}KvOK Fu U䎷n3F*>03.g;~ Ela8BE8dH ^BǢMVP9KKoCs+Y֧Bk935DT=:dX?AgeN׼p>:7~N#;^6 ͡wlV>cwc: 8sS~]jy U,FB =d@z'h6BBM_Yo-cukuz@F?K_6z RmKejbIH0VKLwXYfgԠndza*J\ȷWdϧ~:?>z.)ʒˋs˽ػ,lhu :oTK68J⪟ ]Mvdײ^uSU\Yu)'mZ߂6{Bkcݒ2Wh^#I6UY47TR= cӣTzr +]AWxM p[*A~{E %S;fP丙Ռ,VNpuw,1kY fڎsݏM7BdGX5"@`+-)65u ]WUYJŊ QfZ;ZfYK|" d=S[lʿW)Pm$+,-o u469~r%_kGaglwC899#*_oGno[1l؋m0&Ft=vYvoCSVcRYMEOm߇Xp|li:&'{,o_{Ȫw9L^w`Wl-zݞyJ@= sGg$.<Tq^bqQie2eXsЕJq%$sS9eLWd)`X@N0רsQ`iZiR#`/3HW=㌻M]cɖOoo\MJYzlKZ1 2*c(6s:g({|Z.;&}8=Px Wtemyީf n2@jH븊ו_+܈$*4&ז}|%=.屪u*%7j1EOfE KC\CnP7jueqvCTCż˔JA>jě5'VWEV jR˜Pm֜\y)aWl& *LԔ e|u*5WlZP~n,WCUmM196!mOs43zs#H^;ƵR:{FJF B1t?I yPp"|)0#mGBǜ~aA@L#ҩh S(t0Y1XG<@&*pF1w=F3?n:  MƢo5E!QYڳ$|TqoPr$1迟K4ܒCk|a8 Ӊ2N`l; 䉣ZkBsrY'?f3640b|!nIcqÒE`4Fv `QFq\鸃w{Tʫ5 kɢ~Xjh(ƍ3vm$5Mѱ7 f v$ˎu#G;lAӠpcaBܵH1nF{ chWsRUv?^J z2Ch&I| y'~+2h=nԲD=AdGЫ~;pvU\e[U)-Z IUѯȇjd4ƣ|K<00$ V[\ `Hݯ\&N-oJ 30/[\UB=c!+_}j@$7\>h8a1/DK/؎fQ7ej¡V!TkFT* UvU#Yʸ|\1&t}]/'V$IRKHN.4YjUkGOTg!e{j%MʯY6F P\9ZmIו$fRA-q,/$Y,JZYF,u]%k-rXbx`% KWwUH`id?4YeYFCфaŒ+R?~}Iۍs$d$)Eq<%:ȯتwW C/|׶7} Y &Kx*" ^_Xq'6z@kT,Kf EnA9f wೠq2 H\VIK^J(ML RdoV(DUer0,Ƃc5wIGq/Y1˗|EWU@K{v|֣+y;^,xKT}d\R[6s"02\燿nGoջ˓axjj)*e~~DzjG?RaW^+.h}*VTJAu['NgQ!<0# +<_bkeEW,bhΗq*h} 0˫k9g|dHZSdkdk`R*9\ɸW ٧4Pv^B\5p_Ļ>c 7^,RʐFK'Y :ACz/ 띫s;ʲvֻڻCϐu_d@$J 녤~,nLksDn'ermKEZx Q%=)Ch*!Bi>d l0 IXo.h\,x($/T3)"G?4>B,wra^~ ~*aĿDG!{QiM,*PMdoNo$Yʗcd5iC`u(:2dY:HY""%*SP:Cd`//P7;nlSMESPQb1tuG,MD(1$ J(Fi%tz!dU,YQ QIdgpuӠ ׽$ .R3KjJYBF` eFU`%j V8Q} =_!KF0R#+9I͐ud8*G!;.tq ߥw^n Y䌀+!l$ODP5d[g IU;v`u .T&G?"&~7Q! lBďP X"50CC\R5|7%{;YS/b"z>akc#fY7N8oP˲ 4(mDvFbɥ/yR-tB]֎`<ռY~,0rE& ^{0,ЛP6+Y0:tdb43?7!U. M(~t0YJ \NV B6@vt't`W^!.~<0P2Y7+TT`(Û=JL!Dݩ d^ ՞8@T*BDd53]R2l۸Z,!iՃ Y,C, 4{ɁdΑ'c*hD5r{YNӼ2QҔ7D$&%{᤻"OU, d5@KՓdq%MKRS΢FV,Y.~hO0{4d91[D%ie[v?ϾGFatGd F/#'Km=t|K_Xh0"5 zXQ,/԰W&7GzPZP,ojz: ,O_5ϺmDdϲlY'͒b!{l4xY$(@VFw# Y_Yl_F]JΎS@C,;1W9gK_b g Y5A?O,^dc2,XНX)%?CR/ɢFc*BE>KN&A^ۨG<鳊Z^ 䳸<-CѬQwˁ\,WlM~!/ـf$nkX,zNV_[um,&!C Ɠm3y }8 MӏǬOʐe4klp?_v ʝ9,Nc.JeS7[$Z|'vsm?,L$b9:Y}_ݚxTךT)D`P:A"Ӈ2rqH,ɂ@ *uy?ϧ{oZqOX)>S%ӸxA`u~||Ij-[`<{Ǖ em)w0‡Ƨ IA> YZjd>7H^'g#Udk,Y{7 V<;LfdUZYqW?k3K+er#ڡ,ʸF) DYeP)\ci/C q;Hknrh~0iMqs[Ŋ%d`kP7t=Vv *ˏYD`Yd|Ud$Q wE)%RO r~> Wjnp)ŋG;嵊:l81kv5s*籉q:C{B34Lh?,ג3B֮I,l6A51pbiir8-Dh^^w0%'+(yr(\HTACBW 9us:SU% RHzEj7<i&'A}S_0ҵӪXdŊ%ߦJB*k!",vyi\Xj9E`Kϋ\]<آt1knH'?[x\Ԏu\Pg@VT(j?q}Cc;dQ{3XsNG ]4 FȢ2Nv&+( B̖Gׅ,3lp%ʭ$K#߳kE?ɺ̇uM}vԱȺG K|xbŅ͋Zx"10s\󲺣gNdz. 1)o@_z)WqdJ~6V,h^DY޻ҽ33 }d'ľ v۪GYZ)q-tNhWj)87=u7g7@FA|;2t 2 WX}Nx?U9UAV Ɋ%B JRO55au}{{KKA~:$&";wEVpo%5ԗO?3B}G<N,1/sonN~9wxqd<,r<{9X YmZ'iu۹b Y dizQM3'y p\,0fŊeYm.ˢӕGW/Yl|LYs :h6hFn;N'Y8ㅎSNfN@.`r9#eᱛcV#|D\%hG+2 O6Zd_mՒC\4F ,?>2GZ\?yߟĢ,Hkg}4şu CMg0,0Ӊ.NfO9*| bV}[4e1>+П\}+RqDVLF9fD2˹'+%`(J d%,D3:OMH $Y6h± d0Y JVYYSVz/:5,gYO4do|];zN@76\Tw k`C1FNˑG?WbGVsV(Q3}8Rm9ՒIxonzzz3ݰmxQTEW\~Yu DaUn?(Ԏ,%l?+rB)J."SsY%Ž%1fk '[8'Z9w&@z*? o%W*\rBG 8^a)6I ;I?_j ~,,hcs}QKVl)&W2e֗en:giCߤS麿/r8"GlADf(%%w!'SQ(+\g}?B־#'G]zeda55ٱfv,IYvֶfzaGٜYI\kD)ޟҙ|ԟO,gi1S#Oy8LWu|~YLFFO 7Qd&w!B[<#!Cfu|/H(YH /qr8EXqJNo+9pHmmn~:;xl~mwla-)`og!^g,Joẇ^gKϝSyߟh&Ş!L,Y.Hq$foRlHH{9|z1 {[,-I< __T]\Glosj{0uwk7? YܝY?z2r-Ϻ־ZG9 'J)U/{9~[#[B@LVA‹QT5&d;񥝼G9dXn6Y=Ϯ-l-nnm ͅ[YEܔ_67f{T;2{k&llnYR,y hagy2g ПUoSLb_I|?K)}0+D`<@Yy_,Gwny,|&wT8R.2NWIV,MsG@Эm嘬?^)aza_K# k av˯4B ]g!;+G?(ӨQx)7%3F+T]꧕]桝6|\h<#c1Krs]I&+LT.YGYYW+=KGZA8 +*YJWŨg).{:Oܐ sY{I zk%7::^+V,m6YԖxJŝEJ Rk1G2>/?W0bb^ +Miڭ+[ۅ/{~s=AQltEmvd GQVPRKvB/6)U| <ͷ(Q4vXS"e'c57jCn69/(\YY(TS\P&']Q=2  ݠLnZ_U#=p4uˁflea+xd%aa|̉ f#ۂLhEMH8p \o곭{;cv׏:Pu[gfn$ Vr7lzϟ? .'_DigA?{H1E^:+rPJn5K:7{ń+/e. $ ͬ%լHg-qh]qB&%)R_LZCϕ=YI" d&̊Sa8e, m$BE` eB4 W5/}Df`b5+-OS)HiCn\4b.^Yݦ^Ƭ$Z1؈j~]\xb%-yS`c\2wݡlV``EEmΈ}IgâƑԕK?z땯.F/Q138I=,D}SiQ}_ն^DhqbrAhN9u{Yj{5IڜX(ƆQ~h$]IU3)$ }/OwҬ$jg4bSOD?;uoF,/"bN`Y$iVWho)F ?ۂI݊ ,fa,Ί̝d)ggg pB:^c1Y5g5` bsCBZ FrȠ4B0+6ݬy4>i|$u fGP!J2,m VVl8q5Tł0k2E9}ojI,t??>`!;9,tE7m6痃IN/Ge&R2llVfDFgΆU -*ղ͒#MH8 ͛PnQ/ڬ$8]P>X E`!)wGADQBà)ˇ9랤YN8FI8pG( qbIs83YICH5R>8agE7h 0eXo4' DmaElzuttt|, *@\YdR>+9h S*1,Ǒn!J!YF!nQZYIU1d0*G,**.b2R^[:ɛ7(@;y&aA*LT6hǬMe#D󊁥& a*YaS0֮Šdi8؎`0ՀC+D!JҬEi6}xkpEY"D@2MQ ܬ$͢8}+>+P);XPd <䌳(Ҭ$ z5 bppG0kTE)3y?+IϬF)Y#YrMDL>Lm4;h*H+4qmvBThfk $iZ[5 i>8Hg}VF)C8I[[3'1O5d&cah 2 N+F>Ռ)N^,C͠m͢G}twܻ5ߝG}w/_oqHY*hp b!x-Z(4GS5+1}Ӭ+wk'?}fk#YLOQ\,UD ۊjRv4X$!QPkonZ/۬Gӗ`փO I}JҬ`߬yq3@ >YM2wUB/2YzâS zzE9Sgv7_gnPw_Q/ ڥ}/o>ɗoaIr5X>+0Q0 aO8! % b4[u;{0Nzz5s~{fl lj^\_w}˻񏽺YI%nZ~.lq,Wa2sITFSY?|Q&bGݴP QnnMc߰&|J`7S_Cͼ4r{/U HM {pHMnGeb"coFd@7ug\v;R6]^g^Ybuw'BwwK@~ݠ*(Q LIwFnFÀ|hpo47u{Ngv>q|}b8X¬ȧYgKJYQ5 5sHfN/4k[]DLCi7t.ku3cto|nVLu_`փ'zWyu%, bYS" 5Lf u3.^,xKfŔݓ/>ó{٬yI]5[y߬:s`dםs=LLjf 27`v&`}h^"^3^~{f aS7 >KWdaaZ5GYG5:/`Ah6 ,"SlJ> gm÷oV3ͪ$} J*J.3 )$Ϣ7Exf%iP19R,@UYwYeKAK.}"NX^)dq7 3H#1o3ȑGg0̪lW#̊fX55. g e ̜T3 H[(:IZ{*nݬ㓣7޽q1n'MPKDe$ s6#ϡv:z{!wãڒۻaɫFY R*0ؽ?4 畲=޾L̚?/fEiw>1v ! .,\ϪԴ:P9jL!_ov%+h֢H#enIdK$Ka}k0f-R`*kPKR [niZYI3R:1+4uS&rpot2>+iY}M ɨ@0o" @cJyO_w~t$iVaƍKfQB IJC0J9ZY$g0z6Dr7$so)'sEƬ,"sb8:lҬ7kkyGOA66ha00PPYA0}nYMu"-;6 YԵPC$bRʄ84Kxs͊lVdyS @Dc$HMg0d8ifMVKU<< Ej.EiVd5\켇$M@;}(rwtDDJ % 黦> YiVOX5k⢔y!Lo4d5YM͂sMcq. G2> "k0K4k'2^5T1G Lslӑf%iVh,fQF4wм%B7,Ӭ$^fne)*Y̟ Dpk^7JҬi"8V7 [А. o ZY$+QUʪ" ?f(lD*>IN6'Y7jl6[ʺe%!%apM*af%im j]0^YQ2'HM#aЏ:h4aXtrc! 1 ![w!vg1~df$ mLzQ)Va?qGeC6BV#Aa%˲4F0_@Cl$Oxv}[tIΩ{Ͻ?9u9ZcI]1nkO= sXc{du;0*'.A}%]:gCLDm՝:ݿeԔ@0)%#@bٗV *2}x2CvN7VNO$6XZ=2af1Ho=mZwY0'Y3YF?|[L 3Kd`ege_$;;}xfb{o4~t2VOݓ9.~gDǟ?hs ;+ӹ!/? 7Vmbb]^>:Ybbeey#'b5%R8g:#R_n,鳠,I }YS% 볦YT+~ZkBM wBKst;z_6 ~WgY,K_=;Gsh!˽3߼Ys۰s{uny63gk }Y[X6Aab ̫H\B( ׈Hdc Yґ͈Uz>#R1@B}T¡v&:'˒lN\?^+ _-YAoKo-dYq2w;:\ays6s{͟ H̔iEWc vT=&}$-tP+:ޤVL^a!,Yi~Uw"S QzdJȚ7YbgnEƫ\{^<="9Mfh61>@3|ee˿̸;ǁ_ ם Zy`U,aI||k({*P|_> mMVRS?-y'di/Mޘ}Te$r{Owߘ!oNz)3t' μx9n2:?n{xu!ɸzȒT_ O} e}ݻ,M)Y%QdKȚdu}ž˓S YvzQ+ _\;Fե=4ǓEw}_x}u#zm ٞ:AS}fw7{8mhCe}*4%dUecO,o&x ũG}5r}~J_K pIWglw)g.lhO Se;i?_?{ 7do8W~dY=;>|WOg1UY$4N%SWc Z/+}ZZڗuT}~0ߟeˬU3r'"-q/ W Vk3laɚOYO#ZoR"YY=4cd9QqBk]wodz$ |',IW+h_yXl5uV7 PnG)ͅRwudLuPMYlS8)1%dq >} tXLlduj,%݌ܩ0ۓEɏ 0fM g GX%+dhƨť,]05],?ؖq+S-)Y#ݝڊUe6Ql삌()&}VejXgfiJ)2J'Ѧ\ԑgykx4_:cX؃ڢue0 5,G؆EoQ*Yw& ךK_XDF2]KwjsBVdd݃D֪WgUoK={#3G K)WRžϬm%S΃o{.Y|9m>qG0dܙUG("dhUB1Z)-we*7{l!AC,`y`ioTC䍤nk/5s=ՠ?~U<8=}wt4d߿ɽ- WIP=Ye;ȣ0ډ _tJq?] 1gJP-CP"ۙYI#arz/"udf">'G%F+֔,+9Kc\ЧqUc6vSYC&x,a)4\6 즦罱S!X4wUORzc2?҉) \dy}tSL9'ZyΊρ/P.Ŷ'~6#5,VRbTmOC;Y99C2ċ(d2eKЋ-lQY/G-b>.t`LC< !)| n'JV)tSL2) :Rg5>[̮Ru!Qi+}V1qzY&Y@Y^!}= ,YYYu)?|Avw޷P/ 彲|Ɨ_9$u`Տ2cU}x,qxHMups bpkYSOʯ!KjTge=dOQi6> %dT5U6ny9b yquy[ 9[C]7dR|Iœ[>{vf6/O"'gq‚3zdY߼EKtfVϾO=+u`K IE]TWeDy}A}?>[œ* P/x8P0ջ͏/=4_2iFP_ahvn֒U\p}>]듳 RgG]dKˎټyrKqt TxUo9U\疠Z5%eݫAey532dLR~nxoxPkG2bQ<=>Oh:8q{z?AZ*jXudYi Gs k:&ch}VJBd/Q7s:a8{' .Z7uZ2=|Wf.,b]tVR( !ݟd2g?N|)ķ9,H c?/y?+=xi CdЏ@^s7F8}2E?r129O,~뽱н!eƣ~%:Vy&KҔX/zjd F>{揟+d7RgsSJ_^M)6DRXtVBu_u#yPDY^g2~t|>X|*/*^ؼ9'Mi=qeR>i9CgJ]ݼu`}|}k&ReHݙa<>.˅gx;'xGԗnjQ63Oh)= Ҵ—CgSb ؕfE5,oA>kPu蛿)YgؠgjB\H_ )M=WG'W'6~LuVۚV1uX]"9&Q}VqHn/\(p$C%mo&_PJzFV̱͟u^,%it'aч'h1<=wKÏERkup,v"h Y˒w,:+GEF%o䠳r/#tXawQ[Pa2YK zh'EnrDŽF-A#]]{AfXrC\Ώ>È18'}<.cx,B&&$3BJ!ߞyudՙ0I}?zEuX'뫻ϪkE"]co8jj[H}RQo՝U#њ5x\}j~MbHsEl=Y#jgy+g}l_j>~x562lX]EObt&y UED5=q m/%k{#izpd鳬H*:J :~V[mR^ ]JYTmro7gLL}N܎RZGUKvB3ܵω_0=KxaFMq!K6da)YZU:$5^]W0Sd[e19wWZU]O`VK}UAU"W"j3,*g,腹FEd>+}K}֌dр YXP>x$(8v,[혬4d}q%E*_}V곪Y<{jPY.}dfE;DU:}ϊ2G&KUS1RFuI•+Kd>Hgox`,3ϚX8ݥ2ILgq>gqϊY?D<Ɣ,9gI~}6z-%hF[-"Kt:=;dd*Yo2YSV^EϺ> >kq{\WgM>6矬ymƣEm|}^=򵧟! T YRu+#AeA}Vg[*}2AebOgqZ*Y0܉>˕,z@,fz1qg Xui[,=赞ڲ}S=YſZ,ƣ*b}bO־^Z > F> ==T2Y>QbOmT/Hi/bV}KTő>YW"}~Yc}',YLgx}>b}=Jx2sۈ,=5d5z- wLcɆӉ.Sud ~cT{gQ>A%}V>ɪ>oJ}UAU$z(+z(Yliru@a>,0,Yi\q< {H}VtuꞫiTO>ˊ>k떈N惕{x@ņTu,6}}͏gB5Ubq:]9S鳶,,?8}VzkՓ@]GM:j(ibP#hGvYtO;e^DAU,Tbq_!H >˪2,ATN0d|x,kY-j{ݧ>՟.%3ֆqj)Yq]}V*ʘH;D7jVfj"X(1ă,=Wgeey p]<.{W};NYLYgW#}>fd f`H'Fv] .*R +Mj..9K4ӒxF ^EinqGX'm 4!kjRO(o14}gvאydoj<섬 ۇGXGgiA+ ]Vlcm!O:jՠH{Qt풑Q6 kYڎk}cSj0SMQUT4[6kyU+s" 9Jk'dnˏ{ZG: kYztd_Ji>BĎ8JH]ϳ>+C htY1YO(K;ppGXѽGd)_e,sojp7YYc]Y,Ϛ:\8# =<*q{?Ƴ>+CLJ,UcUk `ȶ@hOdrCP:%Ws*/j=u}1Ł>3>)[{fg0GXn֟JuJ``_`%&s ž,t)Y@0A)6X[G|WdsD iDV?L뵆ﳘzQwo-l:U&o&:ْFnTRzT;=ȇ{i>6j7r Yq?xP4U%[+Uu[g}~d+X/ꯨ=/'ƯڜyB#LIxg' tdYWg5H3+'"2pOpxB7}Vc, bho:NXAWixպmd~uɃ.`],[Y}#bD;P^OVݡ d5}VcO>fyWC,}=gET XD 6,Z֧//te]LnկT$:R Uϳ%F3 dR}dƂ>Kڏ}+zL[j)UJ"K> rkSs<_ { 15اP%d`JjI,^~$+Ji {1}Ò̦L Y"XcB8 h d=niURRT=Vn`v1a/omZ9 Sݡu! G¶Rvcj}d1 g3 6>d}Uq[+ɄXIvJ֘A2\6o;XcF{쩧~Q 5R}Og4h?&dbEFɂ/o,"z/ϢhXzTt:Ywu'#%_yb3r](]7ZtYS볔0% ,o-ܤI'q@Ʋ,}V Taci,}Wi9R˪>rN&ck볔,CydMh,moZJMS҃UlYS勶Nk;펊K10& ]ZׂEdg곸ZREXd~ J^PY_kUZ,%KS?4G܎[풵AV?JgnE'\%dvu Agg)Y,օ_ūFUY*},JɊ)9\U]jڎk@ήXCVIK1>ں7YBuqYz?~JcWs5JHs4M՝QJƋf?Ւ^ +&Oڎ#ʸݲ,_[(Yg 9=Yg ;Ϫ*U}ܤd%J=p=tzU}J: &@N5jwoό@܃ Ҟt\mA~M+Y\곬i}~Ϫ?^~] ЭOj3h}9vɠ* q% sȊfE= xY)Y꧇NLx]-Q4U.t|GF84GNTcȮCZOv4_vwo WYX@ioՓS}(m<9$Y ,%KTqu](EBEh9 hPNEKMGd+nGj;m _PDII]},]'}"}@Z:Xc)Y^0x'*)[?@W<$YƘA\賘?qdmm5cJBUYdkt }*kY%.vkʛOexr V+;m%I「DF}bX?g5ּ}FJѰ'K2 Y:ږC> [{O}VcEʶaJ4cŎ`7'LȏS,ӯ\yXk|7Y^iZ41v|1;MȊK8]R)ʣ cm$(t_ Ft@y*XYNUg2?' 8ǜvj2QZ0e)W3ĬT}ηDDaioeZra||%,+rw-a z('4Ot)1!YL#K*.HL1r̞T Gb^^Y~?*mK菴X{s9>[#e$6zdhe|ewI|k3B*Wh|C2RUCH{2K%I:hM?DڍzO+{vgw---RAY6cʨ@9SνZffr299CH15/{8zh֮gsʖX],hzt=GeW03Qَ%v3qi|GC|I!Ƿd퇅9ꥮOsEdaSh+! }gYV:p|lL܃dn䧓u4?.!7L÷]^?:7_ZY?E(].OfTc<$K'm-u zu!^Y^n1Fzrvzy65sc􌙒vO9WLodi| ~@*1ώ#> Y+o_|^tー@%K/V 9ڟu"pS2O/} KYYgՖדƷ" 1YZIʯ 5TM-d4{9/yN~~}Wܚynӳm9dj 7dy^'WKY賈8_}agڥ^uoޢ%:k3gߧ}.ܦލ˽3߼cH3Q8hSbFߧ/#g "}Z/ǷXƗ;p^ANΏ/z5N߮|`1@P8WC֣ Y_OÓU2Y9Z LQ~+|U\YEohoTqg^>dEdfq!Ƽw2Cl;vl0ygI3+i/&P}8I-I ͠.rf_Lyy˼w@ܹN;~o96kG2Ǚ^Q// Ɉ6o%KDDjMֽ`\?\OE#QUMw:trj~>XB#rٷG7dyFc%1=S보^Z6f*a3[Y)YloW 84?t~_o~_ojZ.ev de?=G $e2̤y%FJ2ŸL`54_/w[K]w!%d%}{tS684?#YN_8޽r=ry(ʔؙ/7t=HC}Ϧؼ9'M}?p}^sgޞshnlޚݷf2c i| @N|N_w4/qoa7% CfM;Kϫ)YVGM:g=e*4Vk/}iQb2Y賴^My-Y ogq>K}5E|۳ N6/\%#WAr?~G8STɴ?g⣏>[C5iOcON z4 " .arXYځsM? CE>k<!u'Ncʓ}c|y5\ig^IV}93ytҚl${eeIq|kr3.c}G.m/]1w>U3c \Cmg̻IOIl B\)!f=C6+ f s- >lar_AY.^ԓ"7,J'@T Y٭#0/ȾAW͠ CjƱB7- $z$mC- $] O' /o" r~)j rDӜ[y.gϧ,tu 1\05 (N\"=fY[\c$oWҙ1!!efn2S2~&Oo|*vBo>ˬorq:ZB6@u*ۋ2d E^9Zo>15oEP^fܬȺUbfolVZxJ"oYL3(-XrVW%Yibd7M$fd2=f)3YYhH9ha}c20ob̺"ȖR)s,ޱYOu["en6kfQ+fEzM&}f֘/0k>~fen115Mifq.5рYvσ o׬ح&e^YIx&ӘLQϺ҂fYt*?ՒY@ e"d,i fYm!=1.s] fe66Iĥle0K"}jV uڬ:ř̦Qa BArhA; ň#>YE|JW_jYb9l4X-̰<ᱣlV!/4 0Y9fe6o1),>q1yY,lVfh zM;[i/4!rO@ ]UYKڕ["ϊlB6_9D.WU[BQ}Yr 2ѡwsl4$;* "&bTB=`(B >egl}!/^dn-&aeW+ȡ#!o4SsfoCrTʙ/^ʬ}>|plqGz#+[9Zӏs}Og-B)źEAUnˉY~U3G6ˉ {U],'-Oڏf0sTq Ovnxլdy_cw[ӵRӘGN~:# 9VX9.EÄZ-aRK9`VDS,1 "axz;Ǭļ58 adlX9Vv>NmAPJl)0{H/9#*vgf8!.0+ r;[/hwzY[7K&`J 㑾Kcf wW>~?lyoa7ɣ/ڢ327aa XUY(>3Ȭa `Yq-ȬI?lG3p̢XF6"Of}уxL߿QzʍfyrjMF0TkNX]\ 1:؛h:#.jyPM͊$|f} q myYųd~߄7abn 9TjuZeLQ-x@mVP u_ʦa>}o骘g~y  m~qA?ƿ}~2fmQ-65Pݴ r[|{cKO*6Kʠ{ e{S0>iěزY.ĥ`BǣVU٬&㬆Z[wfVgFveQS\X}^YKUH2 -bF$! Y ^tVUXXlE't!gedhDAZ,Q G,óOr tܰ`j}J̫=U#k<\ɼ1Ϗ¬:X2275if1+6MB2*T< h~a2Z8D1nMGӟܬTgYP57+'˟/q$37F'i[3Liy,:/D-(|e?|uVskf&w4ΒdCO1ys^iƠR kfjÄ}/|4 aVgbu-XbYИY&V&E~DzY)[S4"R8\o`NQ'Y?kfao~W}|\+K&v]wؚGpjd #K'  8;T?jjο|=99X&6??;篞3"σwJr2xwCHYYŎL\86lգ\M?5kbR{hVxeaѬaؠs`FWd2CzuQ]>Ѓj5c3`B4#zY#k=aQUg2VY+N1rj0{coTZ#d~a- 3[g2sG kU٩feQ/1ˌwv,]Ö-Mcƪޗq?/h7,0okVZu07[.f6ET5ٹPE-Ze];䘕duSpH0ZA0`]=3@-͢&UL*<9nAV&YMMѢWeLO]pidԲmj>5)Z38-u곐ruR|/xB_6+sy=.qh"6+km:oVܥ' sRzr٬JۡJ24/S6kaV;Z0Wڬ[ݶBhOc볢1!.1xye2/達E?q"[^YE t.yYW%eͺt>e.O6GJY1"Lj끩Yٌ 9af볠J\{V:GмL6'?Bɞ{`M4~uq53YOЕd6{U[`i,3֔ rtUT{&mWɬoyݻz&c̳wEwhÀCFg9O,kb5!f rvg_!3lqlVB{X}faȢ%$ȭ{qhVfy/PW,3 lkXa0ffU DJF2k:ĬG#Wh f+hVhmg2R=ƣ QFJ~%EʩgB8;k9A3S]̈۽h^2Li&ʃ 8"k_N~[+,P|F1~ ,8BO!wgp& w䒐8ƗK[=UwKiMiïϷ~5<AJ2Yq ?ź,":({E^1KmsLn!DN @d\,tAN"z f-c-ǹ8reU%^YV$ե.ƳEY'hʐ!ML1@V1 /L\ɥ'pŗVV.uJ"h|PE}-D$*.)37Y qb-eY=  /ԛѷt'x8߮tbuWYzY{8@t>Uy|l-`B/cXO11q/5,6|41ظL11l9VVeE +X XtNZu[-$*!G0x5 _U YӐ%P>mL'O[T{ZdϪ]7[\3YN{ +ÈK=B) ^sWd6S%2d-1 %̀(S SA,Dkyl) b = ^֜d]5m'.e~UPƇY_cmtп>ۂWc8o}twGlfsXl̥̃#ȊV4ĈۉTA*1UbVY$᤯^m\j4DaۏaNhe%%P ,LGmWhܑ' %>OUJ8@}z;o;"uws vsf=Mݳ? g%{EV%,fZD r<^6 ˚Q3IH]Y{'u)ccok4{gNTzѸpBUUMp dE PL%AGE}߫E.UY)IG8IG|Tde[MdrkaExiR{MX;i--r]n̮)k0g{!0ge(Gǒ|$YK 8!@G6|UndiS=gӴueYO9~Xr,H E,JDV4ƀ̤d]q$J~\$I dqQ. m%YYIPc}ʣpDc!HÑV, Y`?_7ul k Ϡ@cf,Y'6HPgrNd%}n,Z [0J,N:o\mԺ,++YyM'. Bڨd2}[Y9sZ,^8ԛϚr6UGg<"4iA2ykdYY\wbPf*p5 f1{Y;eXe?UdqLc_TG}|??t|ҥGxr]8ǷE,++7I+k+wQ/gUgړ=ueUV9VC o>qGNn^,&_`Ht 2>O>3fYYq7?qs,M EUikkiY~7Bp&_"Xm7/6s6?!(o"yJ:~xUȲ:G)z[Y{Hײ,lŚ;߀+ɦi}QDYZwn_hB]zXOG7G6~weeE+k]h 5XD$* ARI,;+J?뗐5{̚??=Ŭt1Y.§5D֜U&(mZ}TS9#j=0]ZN/9,+AR}BV5Pd ZcޗsaJڽsObT5OU%n>^,zKcѸ{+'±xѸAA,+,'˗jYkF [!^EЍϚ.D)g\6\ؓò$C6 ^@d=2,cihb/?kq̕ -YVw%K*%}܊#+ЅJ0+ 77Nu6P&.zUح`"0kCd=p)J+*I sk+K" 8I*VxtႜH)6.b <((1q%Ô?+|Y8D )n_{K&}1bIWR<ݵ"EpN:*\%/bh!{Ynb++Ēx~LO%.8s)bA6/ UQ )-`vtɦ*vStlY(BJ(#*pps@" )rCʥ7sBTI"RYG0J>֔`FXehk!V9|< wk*57|b\oVkɻB9tҕ;GNt0|γ?JGʳ[}<:he:T.AM%.. d)CNJw'#hTbj^i_Lb&\ p,by0q*s{n3~e3NQm4~ơ@{wpZ[&(u&Ip& ? pK#[!+RLa ]2V8 #3Ye"36Ѣ954´ "8Ea)ezy`gk[:uPx \D t?/*VQ(VnlDd,%>0 eN847O?Oޕ1 "d0ԑ€_r Gn]#o,_LO=Ξk tS=$IX @|:KbBUk01B\ྌ$&*N=$ Ӛg,Ls}toW}"}b\+p_{˿Hqh|?ꩧƻG7(v7GϑOotFk,+oDA*Q lȒ@L)gU\i0芕"4u1s'#B±F kG;O./׻f$үƥddž/vbW/` r`edLcߡOf;Q_2W4X1 / t IUsaͮ0G4b4tYGC1vR'mхp<[}V:͚3DVdYR%I E,"!)HKLê>=]@{wZ:V,5đwEBno<O*TDJ MiXeŇur,7" dH}<6h=t|#cϑJ|Cd!I)b$Q*YrBG(_%HdY&Ik$֖H\Y1HI~BO*DÎyY,c+1o|_eM߯Mh]/utzja  rOhیG_7})+ςtt"{C]z"$L[?X.,}ydh;8YEZӐH_gD:WJp,RW?@Y]ˢiAXYB=$ҿsLDuecVZ;vz^DVYYY 2]\+,LW%4MՉf G6jFX]"my>ӈ^QKݦXǒ2,œQ؏_+ު嬩M <,λ>;W3$&;c溄N/kfYY %+EqK8Y\vw cnj ~G~,AU˝/0g\}VL֔)c"'X_M/vSVU#f茩bnw1dE=`!Oqi2YbrNYΚr2צN'JgM~{1?u*ju\SLU5D8Dł74CxĦ#K/bgd]DduUu,_. ]}*%SJHnG~1Th)}~]V`"D0ǬW7Σ;4)cyodʊ@ ԥT2"YlTTT$ӦT+i80f/*z, 0ќm^2YEE٫q$ZlUCe2^5Ի}JD,j/:$8:E;K>bz.TÝ"EKL@ԺFB&ׅAg/.enYgUYE/BК%T2+cfSq "ӓU[E{D6a6[e>ko7nFCi' :ہntRz3}LdIBV^P dL͡ A_$ЕY]lZ-EEJT[(QY' Ks޽ok8F": y=YY_ ]GomEEEJc*zR2TɎYU #|,gq1YSG!.֤/*:Y 0\p15ۛmJm}.hyxaXhA:<ͫoz.ȢAdMb B_QMUNiv}c2`=Yb([b3[+lbA.ɚڿ>wL**k7yi(M[wK\X-#D+;qRW rq\|V&&%dMbWt YjlH6NBo}("x+s@V+/֘[3NW!+:Qnwkjq>1_[@gCp$ >mpi;]EwkAq!\ظ:pz1 E $;Jg竻; ֥imR7XeY$ދu}˧wuӖ3b|Ut Y.AQ\fJ|h1 {L*>9تXw_gVK&yA/PDANmЅ~rtC-.uEYE/Z)9?,a9uMmoo[=jRUu1ň:º.1E+cUZ'X X! Ń|e較둊V[UdV}ߙiEDv$'Ypmda.Kn@)ܧOkl{z_"', z(t؜BkpA) z@wD#YBI}`OIV~ydLd`OO60r, ^ cZ|{?VQ!;Z0w2YVb e!L:1KOp>H'U87טay}1-9ֻ+0+5ޞňnNw96,XUYGdYic1 oC(/IG $ Pwi%T; zrkg-?obb}!UOrTz*rT_w!Bo@{%K$&Zy*i_mFd=o?+**Y&oKz!(!*Am}[g1̀,HfFV?,7?Zok6z̎RTr3:ʤ;s$?TFMϺzU}Z"cy*'uȾ# YTf;!Y^+Yl5g!Gc}Q?_~;&g{,ǖ~-_,-*bLb-i%9/IKru sQzc5jT7YP;~Çw؎̟Ax_Fҧ Yǘdr!^ݬmݐ,ɵ2H +7zt6zX7?Z}lI}Zx**d!.Y<ڳ:6 {r-XZzu 5/%6+"k*--ɢO j**d% Z#|id9+{Ɗ[Q dPE_}?g}bѯ3x|v*C*b &}\j1K^ui"YbYo*I}lIcO;@G0gM%\3!_}Vdc;#ZIב,jSMHnH|u+Yy]h.)ڊBUZeReOZ6%׺٨cҪNuk|nܷ˜B-kR7 {:5;B֋VQK$qOW^`ХZ.w~ٯnr^( pw~Xڵ/;.:˶cզZ*M~\b[1YdP 1zoƾӲ_-*ri]֑^ (M&9FE٫vnZQ߯dE^yd=^pR,#EEFIQNEuH.%[\X*YKX tjHruۑUdMU d)OVQq>8#znC,זC>CUN [.,׸Sd!r@7E&'dqT}V _C>**>hLw5rxw|^|yzi #kXwCaa)c*>-n}r_.J$PϿgຟs x_0E Փq|mjdi-V֫YHb9޻M1LO֘,~[=ov4.c_ ~yj?$kc/\/Vfbysv7 $bڬVMڀq̲T ߭&'IgWz)dW |HtoJc|ofHȉeMՇz:f9Y~#(>ZEE &1w8cIEz}Uooi볹o z=k`߬~iFiMOd~158F%Y/e0u5nTQjmA6'i"Y\k+vސgfŽ{j>QSE[~?1cuHd ~6**R& SnqƮ|1Ki$Gp|I ^7Rdji!hl>l$/`(>(c{·=G{CY.,si eMOa:KKYom?uX?T WbAP|m]tMcos9Kڋ\Ƭ5lSN,ԱuP͢Y[=s&j=unͪ*HJe{#9PƎwYrXVmK|s+7&kYVuܾYl뇫olr(0 ˰{U*6Rנ+n ]Yˉ sgƼĤ-@q4|lbljֱ.@M[WY]r..,IPͪla =saW Qaj٦,bvB2I͒C5fU6MX5kZ(%^I:e>bF1ahй@vǖ}}M,300 v^mB A:!gլbTRu njV34Rv` 5FY @ >K>hq5ib8^f}110>c^;UjVeşu;rijP/fYY"IƧ|BSl~jVUvVþ|#lچĬqt\r8݋S-VrY }5Vak̢{%b0}'CڴYŔ?_̲ta@8g-3O%Nqduh,(yfU*UlnbhcL":qs=~"6XgU*Rs*GwJe, 8`+ !Bo͛,b)DZ"P8~eo[ړ)n9*լw<\KjY`91.tl}?Pl#K~Me*UeR+Nֿq5Yt}<avKFGݽ.ɻo;fU*z\dZ9-k.^Z-`.rއz|}/ϟ>}TZ;\HH<"_,+`VrhŎc'rb}K F矄݊4mT~2FiX#]VU©$fqJ093+P J}SAW=iYNY4Yh1D+fyoT-I Sn3ߘ3?"/W5Pi6 MP_T8q"@eQX̵#( e7Br'|t٤_"vPEϟ_>}~2syg=BHUͪ|uOi|u~^sjS|FO)AԪrŬ*=@ q6(J}~yQ f,_?x]c^9eAIE *SMv"R,*Zxm#g<x(uJ}* ƙN$1~Xt '5l W,^ l8s`r>una-幘xW Z!lU*?*M'~ ʣO1$3!rʇm̍Y?7|ݶU$Š8Q4jphM !xߵDǟW8[Tj?k*Tp)w= Z N>z9x|qk~ohGkK-LʈP,%iܫrfYXNYfNdȳL|7Ԡ5,?P4KӱӕY`1l]0˝"22 DY_׵-ja[JlƂ]Hse=_mǖ1{t|}ݥ J#zfq(/:uVjIx- >[5fm,B,^,2Aze/[1į)41j^̺Yw:J?vW#bM8ZqcQZ4_j02&3ptfmJ/XĬ)qƮf_ в73=i**NFܨFuʭЗ%mکճhv=7|fm*I G\V}G}׍h *. ?0'b\MS@vXX8G U`Ey vrV+?mRR3޸]MSIMx|Dio RqIÑt$HĴ 2Yv[Rq)LSʞ)eW1!#}?tYQ%V8ᮚa*· c&x7u╄-10K<6á뇁BX_լf)լMSao r.0l4mQZp(tih.ݛ7̲Yۦ2x t^ Ɋ>bVG:7vS#7YcSY'! O>C vO7^]cVdլMSqL'ǜUrsNw;#Ph疌/GڊQh. !VL%` s&Df˹#Xշb]W 8K:opTD vEL)cOQ9MWfofap^U$[Ryo'H%6<ǠSq)LtˣwDbT{Ir*N<"&)x37,0PA:5 Rfmj4YfaF_4:̎u9dvs5R͒?!f| y9;8پr^|y8侘%V]5kT0! Ʉ)h8Jsij:0}竘%7D,SY[uT$q.Xq%==-f^ecs4^) (ΣsuY2 >]% rH$E̺`#aF~1ZY[dX8(by^Yg3 ]kP8u!QCCxfmNb1Mb1b@9 XBcV34^e\*Sq9,#Ra6~n -64M|0g ,k?R/? q?|zE)JʅV*AzԖ'rs>RZ/7}x(͐|Z*kCiR(R݇K5-Y}! hR 4;M?V>vaiwRQ~!4AYZG?寐߄(Vh(VpK[$ >|)|+ rP|?;b4&N;Yݝ1s%1 SM0k=l o"Sb A #\.dn4-mJ->iiv!$F.ieD()r}P.^幑T^10u*@'v H yAe@J5a vl,:9+ ;ɀq6;?>[Goi|2;א5_ derXxN Jd WW$|И7fk/<fxppIc[/琲L;sk#K15r?9 ez"~@h\UhN(ӋMYU1b`ggnEQhHИPoJ6xI#"PJEMO|siw!i IIX .E,`kbUVguduUgtVYc(w}e& At5 @  ,=I/L&ɗ$,]%4[8fQd_sމ7۱?fxwKN$h.t %; i\t܎Ѭb3?OwuGɐ?-EB]AF noUp ZjՔ^RG=GH-}U$ N@$ݻH_݆ȤmAiAB$Gh9tfYx7h9b` Eaej+@d!=I\M=2Y 6+7 g0Q%YEL/k-J';@ @pDqtp1k:aXwId=&K'f,IBQ?JAxsQF Urf̷j*±O-eXɺ=7s=8`H>}b#BҼj,ao&oIR! 4DR 0_s|rD9l-Ĥ9|8y&"8eM?lN!Ϡ "5;!eӞ_W㋅r ҕ\|s4+AL)$%pjF-, ZbrըU׉7Lt>$*§ad3*>P=~@ 6T#7M(B KD >\bO \@hAR!Y: CΈgƃS bt w٭VtF̕\ Y\U=޻Ƀ̩―&iS$9^tr%-2%Y~+?wEZRiK.4GxbЊ:$`: ;\llEHmgHҤA@ȍh3 ^fR>4 %Dbn1.$ f [ q T%ٌccXU$ZjjUͬjҠiLu i yAD4@fa @S6Lb# |bc0 РM RzJM.kWx^Gcz~~ oy9|d@cj z<<~?FS&׻,-٢P;p^idnpmˉkx㿷@+!4) ;]nzs)n:WzFxS3\mjm|A8(Xrڣ |nj` @VҨnG L< %k-9usQ<"Pkڑpd ZR7rTM5^Wm(Ep YM0ataASu&Y(dU+A|0` s梑:L>kGY5PheX x^+D]. i ,E<{];DK0 x5![{)hLYAS%dY{K*SVPz*t$=J)\T=ј >[.s{-r\_o% NOj?նOb=?^H/4&,>\,7VP,kRK2k,wXGUMө_Gl"AVo0p)h\ɚdMoN6 PEj9 o@Jv$G3Žޠ0NrR%iq~^ KR~ٝ>FR_)#_/;ɮ[+ΈkE~o*bzVDf":qEU/xR1eQ~/Gb?vmMB 3԰ud1d! s#Pnx67ڥjd - 29p F% \nYhUop;Y3޾N3| dNb-4e##TJ]ڔVWR.c,QкvJrl;CqF1҃oÛ'[w-O_'xF_E1n6c/_|sL4Jg+,@:V^q0 U,( sz12L x:"XS\й럲d}CvAEn5IgHn> Nr.ͱ8 %o_CBnw[SyNGL*EVPx2[=-? x#5Bis9a|`ɺhMk`"$~@a<80 ^(OH2/萬J7M, pদ2+ p4I6Ugp@s y(C*)32>pUdE'Ϝ > BFr$ $;,lnv_'rcX &x 0Jt@RΠy: ~6hRCZ JW1$EDOa|n=++K7OBu15^/P!M2&)t+{cSh] /gp?FA tzk&xJ'Dp^@= ?uVóaחQ??LTL5__ >yeAԚBnؚ$|B%a&Air+ѪVL[3PWKlGzF@F|GdIҡ63ep\|Ĺ>']HwiϷG¯]8Ƨx{Lg kSŋd*Oj# xq ЀnZK:A`Lt50M̈́.ķ%0걁5ȃ ?cU QguF>4UڃK ,.b]3k)p0oU, 4g_Rݯ& JhX)UL\?6,*\wQ푬j6s\HsYlBV*v9 А[cuy11 '^ ?A^@t<v?_voDR9oGrqvV : 뿹:cFl?'4LW8fmxFˑ#ц6b`gt`'+wҽU7߽9綱oijVVS#X[n図+E hUe͒,)`Г8q ol WR,707,]1 5#Z ޼ĪlM$xD5ρdOZ#YT%w۾'$|izƅ:߃<؁VT9z[MWѠPI2CXp$e'&3~&hI%+dL4;jq, NI*4Q|搋eFzridd0C| A,G,iъ08&EcJi @`d*gۆ?C;8yb'%ID+,{ᨇG@ѦYXAq'YF/V ,dߐe^W*ޓy+j~xhd,kn8tט榱aa`xsU4,MhVN93$^N<5UNx_&:ɾ׉bupJi ^+s~R p.}%,JTkO9F~H|u3ec_>JSdVf>f]@zimuȉY+`4$^Xeq:JmS|5PUˊ ]p!YnZjK[H JVe-F1&9'1>8dyfTI{18ZFg3/eaۘ [z/`[F؏o Y3R/PB@?ðeCܯ=Yg;?:`j lCQ2I$tVtɇf!A< q*a0\BI?X>Պ4>&>MlUIVňcyM5]X[ 茏U@LiG$u (Kr.M?']IpwmaNC<@9JxwgV-)a*Cķc(_T\g!DPXm|gkN K0$嵏g: QA ǩP,Xؠ%9Ts]@CEMѰʌwWhsfXuU BV.Lo⫒UcRgGb ʸlr qĕiko C Add+fYKѬ T8 -N\dDU45kD. #\6 ڰ1:{,DK#P% *9xs Np& N Y w59ʈ{]qRIN/Y!`hYV4jaKx% @.88`қJм n-ntk0Np106tH@N(EU?m6X+`t Jj1ɒF|mkEYjT+WMlĄh Юg!YRi (eU&+%!OW&7=Eܛz+d dE Ԥ'i`e{Q D׊W9JVi`eoiJm>і0E/ߧ3RəYT1/c<'r 885Y,C7@|B^Yx\X{ѵWXfjz=qedҢ n Wu-h{¤pmd^Ѳ}8 Pt ؈gVƟ& )ˉLC S1xp3»2E#K@|=^lț7ܘ Bq$"#LɒDRݿfgu6Ak& y|ܸz47їFJ뽼1)T vyŤvQ"6lQt͆,}*SCQ+QYC +\sQA{"Չ!99L9S6fRBC1|^^۸N` gkԪ x0*DehP9]?W X?#W0ՙNq&q$Y-lFNS^:>6@w;Å]#WhB+KL#' RN߲`3lk=$I* u(PLrG9^n,$I?,1ZD 3M>[Bdd,B>Ǽp&ޠ=xKy;`ĚKL z8YST$GR}&VБ/?A  )qu.dGvq d~/nypq.8@CSj갌#o`UtXãXf;satrՍ4*,%>5Mj7=jbHei|qL-p]\ hmeg~* yAUX="Y Q)VV kRߧH9N~C[J_ƒ:O6:9IO.UfMd(3Sϱ8_ӘqlTruo3)Y--QM>-NJ{e'i1FFzqٷDxX<MQ,|+ `5=m,ES>BI|MrwJݓE֓1; (|hً\roUѮC%Iw A ͙`y<%4O}|9'$R pyZT` V#AK-Aj 1u)`,j\4I2C+x@ܺ ӗУP)Zٞ'm-"ɪR-t\%#3<(Rx̹ =m3my}Suvj˷T8TO$^=H3]t*tytɜ?4LG Q#YX}W)~CpJi2&|#6^EBTHlD eW@K8pD+@_D,CL2x+U!4l@hc@_+腬o_d6<#PՄ*GTmldldv'T\#1@UX_vUXd)c7U\ hF4&X]O3}vߙ>^ˬbkKS kgveQ2:lqء)t >#kNcwZ+.M5ٺ;~td#YnMϕMm3V56hԙy^bn:У%B&Yfj0r;5C92`VTIUpCQfAIS`NɄEQ^Sta{ܲrʹPRx9A 70,(dNfm"UX, [-#hc2!jsY 3C+%(8YH;Xy3F YT<} 3v5\YgYFCje_Yꭊ,Rf[^P*\J`, ^3AW`A7+3IFoE@cP)^eYYa 3wdLV Nj78@J99ct0 #XjͻmɎ^ b'Yj $9aƬ~YUdb* nu 2bnFE"A W^.[P3 Vlanb4>Mg69Óȭ'ݩH_Fn 8=xR{$ -.swѹ&!i\ID6;eUUq*G{",߱6tٵ ^ZO8o?E<7r@Q!,,4j Ȫ cCv\OY1RoB9f˞!K/(t %lբˤff4).P=~{o(5m.O\YYib%YBj YA :Oyܯ ] %rgx㰃iM^J!I9U﫼@Ysvj.'=œDeat'|7ӵ8 xHO!.eav!FtN8em,b>:7Ǒ@埛ڞ1Rbѥ\}ȸ\ 2|ǦY<^?17\thYz5K?bK]jhC\eɬWѭWNh6ܔ[tIH1n 0kI,U2;L_B@K]?fDNIN7:`wuLȫZZ\Q#c*r&P1@e)aL9Ahf*B2nJKԸ4nOO$% 7MKQ~/7\X OʓAmG$rx@@JNLJ‚fy`1K=Y\ ^qk@0H??>=>=s@I2V'eAj`2r0:Z ZqJwDzeUdM˻ JXL5Ib뾯3vv+ + ӏդnȴ_}/o,Dc^@lRl}/cݪM!ߢ}3ܒpk6K">M.eUk2R:~<ԩXdTrBkn;@deac~^!qXebm֗.Ut PX{bM:ZN_+f /#?ĊSXz 3\_kܷ# XRv4 zHa?v*Z 7 #Cl󼪼 >Lc,)!P+w  AJv頧O qTTd ?K4 [NY" T N P71!*~'j:14ͯ2+EM/Lu`#J㰼Ch#Rê*$JCUhFEwML*l,$\<=kw?$8+ @GJrTu#deZcY !kSg Y]Q#17*!Jso91+^g'x}ʵ(l|T1IJ? ZKǜumvAhs!\n@+.*o1h;D7H*`l&mpIAaa h7P#)}A8L?tva}T@,SA%!֡0O/=j)pk}Gt41AeJd[|'T;#f*-G~ ;-NEB&=n֑[.,3}C"%YIprx1V|ZN_W0UsupW߭\YB 呰>퐵E9UN%t㧸J-8fr %Wke T,7ҕ0q⿮6&yaUu.* 쏋^n}(H 1Y:je_ S48 ʺڵ&NB,aٔD_ZN.m*z>qaqpM#+Cm  :E,3q OtżX8Kj& !{~~ =۳ >ޓ/9ՙd 4A$N7`7UEn΂zWI7}89%;ZkXh$i[9KG?[bBq wʹ2 Y.A8JBd 6oX-uY5e+B[+SijDn(PLl 9l9b?e\uYj/1j,YY.fTfDR;` z 3>O8|18m2hp %u1W5cTI2mχR@aBPHl@z Yۙ7sT]~Os9✿h?S}_kAWtĭqWt Rڥ&: %b'o={N)jVM*~u]!K>B֕::!tQF+-ZG?[33 rhzy'YG4թYF  >]ڿn{dtĔK@hZ$cii*xqdO*PXؙzY(V, %9ܧ^FMRp$|3[puVqT:DR^SA.] ے+:){;p2/v_{Wqu%W,BسouNücܧ[ǓKyHڪ2~Ww.Cdio(J@'ɏ wA=w.gEUBt&@D)N0Kٲ̅)_ї}\_Y'H~WUswaXll-~xͥYtxo2$؃۳*+b F/ѥA}fwywR*eNp[ a1c6Y3J7C~GU *l&Z7wޛ5Fj1+eҬ~v{pMZ ιvg͐?lXF{^ly,9]߮ZQXiHV{f؂}}*Ԩل3~H"?_<Udpm<Ưl,_~T? A[nAkzS|$X{vs+}Vɿ݋aIQ4>t2SB7aFڻq?iS@gy_)m ̰Vu 3L*ԗV1q (3Mw)o̺ON?5p}V6Fŕ59Fb0lR}8ZPc3%2tk\8@됩'lQҪ~J9+ LЉ``U_̫ ɠbg#3\ mcXJZyWJL ԶsJZւd|N`x03W<FQlN[,ʢ3\ HތH'/8zrk٩PM^w\Ude&Qamܯi9ɂ1d '.֔NȖ2NLsuxVR$elyd',dQdY4qó gbh+tj 9Hw(\.ZiN' >M,TEUƤ  Oo}ātz(c.$ hI}cF'O=?y'Z'tϻwUb)(Ru RS۽Òbt,UW,,ւDP|V JOM9B_hA;@=-r{K[s="rSϽEd:P>bM`<}}"PɎaTI<5L?˼Hvc,e-.&J/*ǩٜØӧ@K[x٭z JTsǪ+Q_YPEr5COwd3e1[)뻍ɔ*=aR؛:o*YbKӐ >=>&BMX1ٜ/I0XlҟhvLlYjcI|g"Js4z3:ʕqL_sM 7D޷-iJH\ȖU9_3ׄ|`v:P Y:?˪U΢*{uL/(?$yTD ~OD`A qOs,,eV0,t!J ;㏳N%ni z`O, 9QC[ sj.vVÀxD$m#伯nUYX8|a nKO;_fN7B*dEcV;ŖG%rS*b.\Iضm(C: !1qMȒc))0淒C+т]#Z#-*'tKJ:=7wXuwADžF JQNrd*Vac5 7R%4-^HU7JY~x#+$K^Vqp%VkqU:Qi^b /tDt,l@7aTaQl䫭*L1#Kha [ުgQPIENDB`docker-1.10.3/docs/installation/images/windows-finish.png000066400000000000000000003511521267010174400234300ustar00rootroot00000000000000PNG  IHDR\WsRGB@IDATx@G.K" QMK4%w&rzb؍ RH{]`awDљde-3|ޙ}gyFpYHՌIMނf L}=&E MH!Իi\wA K^Q)v]* Z5PPCٶV""'@(ҞǎJRa+m2@@gf__W;ɮLnA7N+>uXzА:CvܴM׋]7mݯ|:^M,5H$ZVVڪGʻBzBN8ljjʪKkWBjZB`amaQ@DĮu]˫V֑']KT-TꇬF6ڶj}NX{ }bP@۞YLkmm6PѶD#+ߏTԎJ%$Rt$_- =M#kEG뮻ݧXH$V>ӢkM\B-CH8vObkNj#^Cx5;ֵw%ڟ٩QFz.'%EV{ҾzBP^;c#cjccJ8N8N3Ƽ<(.f9~͠o\\!c&hhtFĤFN8NKG+[YꍮG1edN8N8N``h(Ts40"\9N8Ns 1꓃/O' p' pBM;Г$s)' p' ptgrr֜' p' DWr9ld4]1bp' p < piܸq<`]W Y]f#Zȏ8N8BŐʺ沂>P@W *z% -55P56wW8N8;{S+EYxb NRҥob]EYY !YbSS-ZON;BDo 9YF;SR(f5R2-i`h~v\_a_'d zwIfK5u f688@A>g2R!ԮeekުowFFt߃dSylH?!c}cxGu3$DmAf(x`~+FgûXw^x˿'~^6}0Vj,ƊsUwd̷KU,L2KW!ӗPoӫsU0Kf l5V*[L>!bU3 [oqO#FJ;LYf7L58`T^rJZ$2ߺ+vfzIaD}\.`PB6|;G~I0=M/;ϲAOdiؐB -Qjp99:C@x)rso'Eޡ\z5F_eL-^wU*+A$lE!N$1|:f#vJGZ "#78r, ($æ30/V!yK<}0 b>Kf`U_?sc{1fa`vE R {c8,W@^1 >b aR>f0֕& +G}Kc q GTd8%a[&0b TVC:bpi*qz~kj$T$ >r(lM4 /~ߑ*XjԈT?ڀ͢wN>EVt.)[vvP}.ugd,:{OV:dmӫ-Fal7$  5Ypl~/rbaKĺ㈪s'tMld=]{.45^LK. X? 4&r%282DžbCX)0ޥFsn ͰgϞfdɒQJHI|%)1*»PӌJ1nf)3-D^Ԑo}R rk6gJXe */OENX^Q`&z1!4R`+gJ(re3"E˱xXAVx 83g( _z@6"ZcHoAPxZh`y~*gU`@]ػ;Gbڐpj0O NiDT#@bEsoB~OE~n/[w`2)DɝO0^R<:nj%YNl,$(j#XGZySoeJ$Nqҕxm!Aq$Den,,g(hg~ _%{=(RNg ȖmԑsM> (\UYN)S1SU8Zo)Zq9| xnl)jpngL 9])HIwġg% wN\z&077nj3ZY۰hu393j W;zRŬYLj.+눵,Zͥڐ,DSnEµ^Nu b@7/ >uI+%=QfAMx@Σ0~#nŖ24?b~= kT]{[w HO ,Thz.ڶpxr YoF#IYK+\*ZP]z {G~-7%ˋ =o #ΗӍM(w1 qX aQ]r815H>1Yӧq1G ꉡAEUT TfD`ا)q31-saQ١$̬1GcFrqϧ /i'4@o@ SD}K 5)O!-z+KvĊOCaF#$~S 4}M){!`?Cba!VOY{uӶI:] c![M:1Xo#!Mb=y@ډ@S|N젷ތ"1BhK ,]r32ސ5W`_plN4 W8ts棈nrz+EQV11F /y$dn2 _1pwjVF|Z}hmL32"=c!zim섶d]X- 38W/sCwɋƆ&#P[ST5i98+r {vNf14()T&F:eK{J{Gn|b]x}w̴%k>/37ek P1FcHٰ_ 񶃀|ȿsZ_cVRpade+ )uE7zayR̲(zE^Y !- sSȗ`% u&DNCJd+}b&:hVNч-!]`Rͫ\&$urOnYHϡdYR⃱_Ze k좚Zy*}@ffp!dfưkR^-s/(ZNjYT a_i ͈0Ba&X4s)kwkG)ENCWQDIEX`63ҢK03) m0?,ZKI}巛XDA& 2 Fp!ts p}$n8dz5YS$ {8PYMl OnGO8g/LE^Y],advBLzWԌ9M~-^KFTגks{2܂KsdžajT$΢Rs;V]_3BF\L$Ÿ]FtgM(Nalr54'N+>ն}xJz(LFXT#J<ӖSQb!AA8 3ҭa8G%m)&͜]ŵ^x`B́f],vmH3 >p8;6`}%-t9؈ "} YZybv%҂duE2Ӭ~L:=g̰C5֠J恠1v0*zӌ#1qJîms*B'g䉀!( bCeM6~.1 ԓf-^F~ 4#rYD ټ^,Uuy\?r`Y Vj<cLA݁ddގ eNd}TڇUδ󴆉"#2'Ub$EGث0mI\(r}k]bvV}vLA W.F 12 !?o. wʒ zIS`OKߎh߸ 6)FfHW8.O'ppxLN )9+bq$ '/F˿ )9R9i}[HI(;yB8!DpLYcGSqC)Q5l-VD044sX<3EƤDwPL2XLj1vp<K^5ytn佘Ecp$ͺct4&:sFb0ORv*DЬakz`2BZ8g*p ^&;z4GNf!6İe5+6M]`d҂u'pC3^e  ΰEY; ȅ氳D`}&aL,1"l8 9.RZv.bM(h]+piN@HNJ]w OnG[I1Zz;e8NZ[m8N8N` pk 9N8N \̀8N8&̳8N8N+\ p' p'05y' p' pN8NW0Ϟ8N8p6 p' p s' p'.8N8N`\`<{N8N' p' pL+\ g p' pWx8N8N` pk9N8N o' p' 0p 0`=' p' p\m8N8& y'pPV\DfE( K0|:[ !_%HAfIDR9L`b (J8JT4Cƺ_3f֊bŞEj*{xչ2֕4' g2j%Aԥ!7# baӽagh,/R\Ẕ 8Q-5V4e 7, HnerٹÃȚ#<[_mGlm_pw[Y>/=vnpu,eG@37 e98gp6-x̙3a'SXb;pej r!Ȕ|o9yi`ͨ+I ?PUC1T"yORX94T4|Zj;x/#emu9b{!=SL7RSh߇Ng F!rN+԰FӰfթ~ HymePo$%XXdL#/ys=+kCvabd1e,O}A_ "pi&3K4T1~騨E|yV\lO#wՓ< Ț`4s*i*I|e7UIͤpѐu wm:[xޑ/b[-2m8U8vJ4&#XRf؛/Dw#۷_m8B To4',BdLX=*\,"T-$w{#(UkJGhW4/3}S3IHEB!o)Bp6cNmi$S~Gl|&cWTt%\k Ab\e~ 6B(-D`ZyP_U}^_D"Qya%M‰l(CSu&q6ֱU4Vqн0="$6܃vrJ s´!q'h,J$rFw#u r @d\X/f0,?G# xE9\;3-#Pl\8 Face3 DsM:2{8yMcPf[d$3F|!-S'W q#aŃ0<썍E#,- r a9] 1.]d*Usĭ{-=>I GSɶT;_obd$$z|^рWh(=9~MPPT 9\'?*g@L/ӥ`".^B\6ǽ? xg;P8΅Z[ƋxUH)>du2xm>Ԗpv~$(L{}H*~C"YmC'_]ZcrdT*&93|#*6ES>ho[NW)p}K<5 W9~5>Dp}Ķ/--As|wgZ҄ -YgO boxzb/aͼpӛ?o@B|Qc5aI5JeH9qr78e;LGK Zr}H1GVgB|ԝI"pgxpbYC?9bpmW>tt^AfjYޯ@'ObO .}{>^P4/4Kp)߃}_]5ʋ(DA  aIԗALC@S+Z;K"XF"qkh/4Jh'{S<2hv4|&°Cص}-ǹ{4Kw`'*_H=ר2K@}d c !":PyyII@l ]>έ`ngHVQ@y(cC< XH\Y#+UqRoNkn> y73JYf`%X]!_8/[BK'sф맂BǪːu7G 6*5  9u(%Qg!E0p;ÇKu4uEu\x QapfRqHVUjkJ7t.=S~} FVN9j&nFhJ%rS~nNdvP?da) =*+8YRJjkrKq>  d '†]cW*R.K~^Nx( N%A8T6 HWRTѴU5({!$LBa) z#?raHd9$-Zg=KCzq^g{0 7I $"AR*~xՕ]Sa[}/iPN~D 01 7Hw`IJmȪ5YNXuyR2À/k=vDc[Ρ1ge3/|tqҐF>u#)YPEU~.24zd$5LMS%Vu(YSrf+`ӵt}jmWXnW:*>q+jDCv*)mD\$$>:rs3ذΑF/hAS936l; CC-v0k|IT*!ľ p7@.*KuZ>)\M( }:X3NEY84q"6h\Ѩ-fC<}\20u++nǿ4)\`aOy|6>_Rns/ kN-]Y1-[+.M!d"d?xe] 盔4 cEοXg5zbx`izSQRVCoT^o M7iYtV)IaD\2|re%mrV /0m7XR%NSguJkQ4|RfiBB)qR, Z+3h5 2>:P^Kjה1*F (MWv"|%dz ^CjfV~ChbAʐ@;:I4FCҸ9 1AP4g K-Ph!EgJN07e;,ξE}% rؙ6Ĥp7YPtXu9RWE5]1G"&e_!rHIzz"[9@O9h̼ )*4iHQRg!97nja?bG'8Hay46CEo t ,5iR"o'\'XRۭ gj~ac롄6qLŰ<TScYlJFRJ]z})/.W% >+Q|m lhF%}c>VS/%]uyj+PR*Wܜe[Ê&84G~d֊GwzjЀ/A r ?@z8f<S!xm;*]ev'y_b^[M@f O)0S|P m)S?{/y8i&00ʕ̬d.EWDr z 'f>I8A4r ʠ'Bƍkt[Afc+&ee#Cm)$!^amN1lh( @~!Ƞ!~KH6!hG 6;BȢ~ BJkIBN#h=^Iq^Kݎ!1yV5>yaؽvۦ$?IaN5*.;^<V4F){ad`x2|ԧRbz'hyːA?sDn:^ gpu4"1% E (L\0b s iry0UAa s"zo努.I!Lڏ-lN jtZCޛG̛AnoĀbdf"iZ )Y,cP{'_RM&33p^bCqF .WQD7]4dge#6T4!Ct[͉DumI$B=4GC \1[[uI|GOcw^N i:|l$sgcqwMo#a0,_®$rH^:05 {tvۘCiS[k m)̞O3h8%oˍ=%ˑG3KPZx'ƞ"XyWVb{*cNEUW!A%֤S)l]{CH484K3 5 (TC Y0 MF*G'ΛeO.0id՝8eJf|!"%B xԾο! s_{+ϣ0 #'aeg p`mE1zk-4ʊ,M۱S&|VaEv:CmXd C sfe5^o|->oE}ź>__^EdJNȲL ga {Nwo>x#RD~sVbˎ0gײ=8x\7y$Mz4 Ŧ-;qDJ18"0?B_Nv&xYPW{kk#;9~&`@X83f!ٔ.-gob1Ai_?ec#) ^^sXgO*YbE0^oE:_)سBhՌBk@fLCzh4a <})8 9s=9A d52PRHC6~``<w?^w?}јP)04C0êU4Hr%I#>'MωI#0blikԈr4`dڑ?Л1dL eJCh @o+CÍ0F B̂^l~!F< |~bCPn⏑D3&'g̢Q5,L.~_z>}}ג [͗ _]=@룏ԃ&}(v,r2WO"W,K?%ctCyLM}~2okXw;|L}c2 }!&P9ڐbuOKe-\MmFL¿j>)3!x/xDw!'ҡ35y5m:ۼYŝ…,}/)?4 bY9lHvo`%2 ܭIv33|jmN}/)qת} E0=XnAF1.} TBQE-h(=.$[,nbfvQZ M ^^ۤ=RDnބ:$"\+[{`XoX4NJDNѳi7-V*M*&3!'j&B[.홉 'T[vJ:s8'"grczk׺j(ZhHR}= ?ZO7%VVjP4ҐG[).b;Z)vD)pdc9tᘺf4;Р-K#!Zk,Y4^}zRvj(ՠV Ѱv2hՅ< @IDAT#E 7AFC(OV`n14cCfd7v5*CRr )j,m6+JE-cԞ( Ew"Ӏ-ȩ/QPS)ECc-KFAJ/mk{z>]FMLw4H/E:O]lF&}^C %csI(~+]#ڣBFLNRav&ݪ A ~>E^ UP[gES;דRbP|6KGhTV/YJ?B d,}d!eee]^{8o8& $ u9iD dr{iHFo=d""_Sf5I` +[6k>n2ك=qpdgb MyƶݽKZwxp7D;KKEcH*f3'P,PbSIK {{"$1\ByطKasYə>WNBQ0))ӶW()ln[m2Θ},5`>|\˳v9X,Yh_Hh#eLWiO;Q\~Ԟ:zH YاwF֜'ȩᑈݽv[k5M+Eyh=q/4a 8k% |z8N"`h/%OwP<G^C)؍'eZD:gL}? PN= ?|u)ʌNWd3Cgpj}Of9N2a/]s۬2\NF\FP0Noyyoo>sC3n4\xߤs`+u: /CTvgs& :RR@ƈw0c/~8۞R/OO6gGu.xt•SlhlX9~|i O@Qԣ84Exb? Wo:ӡ,|iȯ ˈA%-u`?h.#6ti;J2NK9A9-2{ ӞcEeD pw(pݡWv!Pl{=MA9d./[y,BM3ZF{3`)ŀoPL" a)hllӐam[Jf [JŁDde!#BSi& sq J}-0fWC#cˢ%eC~Q )8Clh] }/PXy1*0SR#X:n \sO| Ŷf/̅g%Y/-1NX: H2hdJ8޺x8"`h #' sN) lx^k|iZG&{Ẉ"^<xt!<~bޛ2-leOǃG-aCxn' Ekcs0 -3*sؗ>sؓ?,z _.D~Oxd;>ֲ3OZT"m7z1d?mAB**Aum=jIZ%pJLiȭFIy%4δ.\v2.Lꍡ46YAtEҥlƗo}/7C{ ύՇ1l}ƠX`Kd7+3|RwdǁJ<-bL TӒH5ZW#~ o5;UHpz-Kwxuy8M\'g!Dv|-l8d~}em[5K:}:G9o% a+g?Rp!957^YX6˕u甑 z{O-Z2s/dm8A{~T_;bL⽫{~<7~y/3»aǢDOc! ]%ZR'.z/2: ό JKN_a I;>$H 4vNv$eaxo!˞ɢyyevDn񑽡yj> gf1-K&h9nt'_|'q 0Z7|f=j!>WΗ*g kO E@ E7gbkZ$/--Bx7m E'lwFm}}/öFIpՀΓ^}V`r~m:`:5x9bA)ekfqD"'NNj+9nq"T̃Anpt=>weoSi ӉA\\jS芻݊4gyNEH?=.}s&pYJߘ\Z,$1H}~=Ӊ2h@Un>{OH[\"QcIԢ2EWJpr'wDv@́ .IZJz 7<("hNTOg("("( O¦.RvΞx= 4DxiF(IPI#S(g8߽PΞ1*"p:~KNE@PE@P~_mm*-kJhE@PEDYLPFUE@PE@PN{{{y^YO^U"("(t)iKE@PE@PN3%pfzE@PE@Pր"("(N3`U"("(JRk@PE@PL@ \^PE@P%p5("(i& XU("(PE@PE4PiWE@PE@ \j ("("p (4V+"(".E@PE@8uE@PE@PZ"("fJ:̀U"("(KE@PE@PN3%pfzE@PE@Pր"("(i_U(@K1&'_;=/qG+m:b{Z1՟czq*yunbV}Cn^OϯfG&^h%_p>4as ݛ0OG;NE WVM(]زf 5<>qD7 %V+^*1z8Hu2|t%LB˯=6rȘg\F^n~0C"D2ڝ=F@uڈDOkz^ē"p9IKh@2>yLFkOE^KycD .@A7)[<=,Q uN=#)~e rx:O">l 7YZ05aŊg']䁯hҁ57,֜ք NNF RW%D.Us&r3vp/~׽-ZkqOUקӊPS"W'mױ&m]njvL;n+XښiBioM]}/{8jJҔ:\q1p׳aI w}im}{9y?4{=]Ʀl?n%ԟ 5`'<8`(ݲO̅]PNB oZ*r]mi Y.ʖLe('ܾ-8\T F7R(%DzS"VLvԗāBTۧo"sF~J!IaRZż׭Al*<Ń@pq[:(tԋ,,]Vb:LF{@ԝ*EzJ;pZATG|D"=pVZrJiTL ,.K4 cDqtO`p~2~o$~& Vlm3!xI$bk]72(rjpMci t|mg*LDogUz?M`UB|G@TOac0yiDvh,^F ja98&>H5LLOaazwQ|dSc',&j솷8tp#GRTCpeg.^xQlE_uq Ƿy/:j%ʍzh‘|vZ#>QFOz t&aw|}Tt^pp>\Ov 8;3ȏ ovc/ǚz}+&W=!;ŏM3v,i|L2]q:KD&NXDM07aMPg'髛8-NWoB"ZL/Ck=UnH`n50zu^cTA}?|ucnx8冧{;.HնCr]֧8Mz4NB#!k3Ga=I(ZltOXXxjER>$%ӵa.B"!T*l|U)õ*B } J~'OlBq2jDH˳ЛpO&7FvCv#+ix$L'N).GM85c27PH+^픕 G|qhS@ī[|P"h4Q.&EoD{TY ~7WP-]<_Q "*\UwEsD isN",ˠ)"@?a|P'&I9sJp?o]?Ķ,rV,-Ah.^ϻr;1eh! *pHL2vюuLDgR^6ϭIe D ?UdTGiA ]aZ+˨A^v[dh1SB䳰::#1{q=3H鲶ܶvɧ #"9o$oHN l0AQH@CiH b0GRxT:9<Y.Bao~ӣ:wh?^Lk:6Bk ЁfrNZɶu^e5]v,-ěqv2i-…cHf!C9-8dvGD9a!.ZȘwwK˃:#zg#hͧ4[}{ EsYRٿuDGf50~Ʒص6'g;j+1_1$-]ڟK |f^G`:5a.t#\#wngvz5Q%sXF񤄖V Yw{/ͬ9J2v:AV1s܎]!=lrd|Mt ",(l>%k1;Qn+`yN7|;!]__/?}I;F`9o7ȓ3bo p'Fy Dyd\{R?3!Y7)Non&쿻qyn!CD7Q*!x@ X<㋝2iu<ԗpRH~}Lq#ď8AF;:%yegٖK ჸw <eJI!4z$IZZI[a ?{Og#!øͻǭ/wQdE8Z]kGhVܧНxd>ƅbky[0+3Z[mriLedQO: #IS vc2v%L$%S,duI"$Ox;I߭3J*-1ǑSIW".Mj=#y䁯ٰeOq֮k3T}+zkr30 u9* C/J{I pxjjVrQ/}CoNvԈ{ٖLo'ym m"|;|Ĵj 6s%ki39O˧e'<}1_2ʨh+$~FQ~@ \?HPm"U9ΉΔ~*? \? u"(~ŵ7艌=M/SRB@ \Gv)"p$xē`T("("plJ:6uDPE@PSB@ \DPE@PcPױ٨#"("J:%U%"("FQE@PEP)*QE@PEul6"("(N FU"("(&cQGE@PE@8%uJ0JE@PE@8:ٌFUE@PEJ:%$U%"("`P1ب݊"("2ʤxPE@PE@8:%pګ(?B{ϭIb&YNI})q9"ҊϮʎ{kvZ5g 5@@ \R8 XC#fͣg4PٕwoMϮiĜ.wKʨ?承ͅTdU:*'KQxukoaҨ$z+[Y+K=]ƚ5Y?oI"2=MmX~fz7=47(*p\ F䐩LvS~Bᰶ(+Us?]b6f隊Ӣ.MMRCl&c.l '=N{#fKljic(ٻ>&ˉ־߮Zʧ3Ms^^#.o>vY1wr#87~N6˲~\uP(lik֎Ɩ6;%:+2cw c-[ĿiK萆~%PVcOqs&6V #nSzHd|pNuUumT໭mDxo czwhY[(lnDp #4.у#^IHɞ]do^]|ٛ] X\rN~)%2.$b{8%A*u8j"0 [] 7]Mœe.28w-°{}{1d]ֱcodDLy'4D/ZFmYu97ɫkz!)=l亙R/6y;RRJ[5cpES-b̕,ZZquUɚYILĻ`137U./Ϟ$Ac;W؁/mfI 𬥱x7ip`#eXz{VRc G]qup`|=*ޑT@DDdv .'ʛ{ٍky[k<~4}L֒_q37 6a*zc+x%O_ѡ6Zl䗋RlA<v/`іBk9p+o2~Q܃qQG}l(s;wp3 l,s3F{iDXrّD]u!XdmuŌHw|d޻KG{d0n5=Ldp|kHAayx|]ڪ)ׅ/>N PHpRU;n^rMҨ~$R k8vҾlϪI+Vmi{z;,>q T9M@n.9o-l^{͕aѸiWCaC6c }TB(C*^=- \b\^r,X|^|jx97C~=<5W!#KGCY! Rʁ{Sg'Z+)n'j*|N&\Y2XZ$bJ=98ׯ2Һr9f`ev NdbwX_xROݰW?z#pkP_Jø=Ƒ=sXrVFf> 7Ti\F,-νqn~}m,\46S0~DL%O-`O< [ZĎ>Lb6>\BӮ,/3~cOu \rq-kR#مݺ@v+lbbRZLE (5°2whUcpa%y_鿟:3y{ڏ>|i,X%,-HzNʿ7aGml[8s(<}Hrְu d!s)¤Pn]Ϣw'sXΎ]mXk#W&! [50"?}O>',]M.:|"ھOwkV/"oOB̩$^oWh,DֲnRVՎE72gJS\yrv-V.sifK?bǡ|Uoc|:!{UaL&B \6O \Pۊ&`i"j7<&:?Q||s[kNMM<*|*J/H&zkv3ش1[F/v1k|vK([M9xaBrOظzC=jyo yg3ό%u|KQQ@}F{y poW\;NޟA|8L8;L%V&%;叏Oglxz8O8]ܳ"JXzˀ,9j|>ϽMIC\TEI"d|DkVb==BE<|nR{b|YϷ ٌݴ{/HH-zGq~ mfx>@f,-kԝ>M`R$^Ɨ,zY Nk}̟qq^|oT&G,uϯ!)@zmycfK 0bM&<J]4׼3)]]rTU14&nI'=?HnV~!h0+??Dfu{ ?/_,,c \<كp2rF˰@IDAT~wVFK+1]=?^JZf ŗ_AĨs6_5ܰ{glo =K1 zk=ؗ%Ywu NFי(_e}nEY /f@DD`۬yZƋKyvro yx \%:o_r%(ӹً(*۽)sW첵4NW݋^|qݳw>,K,Ewq /eSxا2:JWQ=t3őU߇vPپO+]&?1؄3bd|v_rT_O1+J7;#Ļlk ?lw߃K,-/R[)I\7̀7 &L QБA<Ǚ@w9VymkgZof䠞 82@B )lAb݌*^MU>bBU{7sXE{j '&$gc,4_ވ!/ù{sݔGI<(u2}53_Z7|cIp@z}+?iK@Gjk#|H5jtM^xDaW@ fESIߔC.m֮ckS߈VR8zP9̗s؛ G^{2+ʨ_ƌ܇;8ȺhkL$ڃ|_UzDZ!Bm7##t8l:qDƄ1u ]tE>_;bKlkFƟRti%(IP*KD&fDD?eߤ J''$1vzFQ|)--%ϟyph9n#9/7`ZICCd"'RE|.Zl2'-~w=_5Ѝ %pID}VN!w_=R:GmCQ=?\BF b6p:>tPQT"~-75)4{ X_+!H̜N ~ο3p8onN/OVf+ `[ҶG6w:xisͩ^[4"wiu}?Ch>kpyqvsvY(uuW6d^dIߎ` Zm[Lu@KWxҼUzVOטZx}acr{+ |ht_ 0b#x f[NT맆U }o`Gk,wy=ˑ+8u;F/a7d^_n%O16Ǫ9lK(ڔ<~`.*Mm3-Qc 4SW;r uU ~r!f %uVN৿ !ÛeP ޾$ GB|$>哹hKgn≄Q8m J ܟA{mQ{MA1g|f^.-o>+~웩1g(+t/ZѲ:m>+q5ЖO{UnM>0/)o*zaSnSR/2fq]D$lmCpIO|,G?qs:fѕTPa1O;"T9dADtzk?'-퇹܎z$%(%>!ͱ#^=rA_ه#5k(ٱ-yyB~Ȕdm"kDoB=Dw˕}&| Z)KMĿ3F<"0a0xF#*]λa65ҨٛQ{Nj@&4K凚(6gʊRUN VNS!=pxgW?KOKr#_mx(˥'{&w|^ydaPq05MخŬW8 t}S Z~H@7\^vgG;ƨS3O?@5F}Y%qײ`B>zr iO;A#vZ99}9ϭb_Bȣjw\467#NWdVSɘۆƄ1!|"o[|o+.vI(!)Dc΢mJ_6@yu%H E=y*fw䉦;@uoڟ$G%7VOHrh-z' nL̇?歵!>Y~⟧iA#綛y_n`gr#gWY&JƒEH!Eb/x̮2&Ey_D*HL-6ч֗aXmz)Æ\}O!~$"s sz]aR0Uĭgt>IOxyg'nRQm>#ѽV E-|#c SIM.c)Q.rD<ؐKi:j xzӿy7[8#Hյ$. p'd"iZOhOt^y+-yMd$zUoa`lxtS$B㏢Q'n89u*ȏ$o92ɑgϊ#N;ԊhlH ʷ:yЊɠ4G9Z A6 Wf*o'1WCs{7 OOCnhKDcf\z WX1MvxJĠ/~bp LMԛq\[ٰ5V.f߯C\rE996c/Qpyp"aZ(B]e+v#-Q8:QJަAb!Q,9|K}~ŅT7@ޙPo )r}Kވ?)DDž"sZڈ9n W^E,J*%OίmuJJK "?7Fʥk޷MWmDu y).(? Y";~қ^ LXbdΪ4 WG؁z$"6 Jh$t'멑=q ƀ .,]ZMVf];%''2?|ij V{8bin!^dmk'E&A3`6Aˏ bF" Tbmk ֮1OAQxtTSCi:sphђOCk [RH֏;1tP)M9^a"e0 kUm b2}s`G6mDhfGMn psϽdeID'BuCrXRޣ|~o廥#vS;(QkXcca`xG9R^+F"px6Ή;x0,F29P%Y۷ŎFKi,kw=ŸljtT!p󁞦j`L彧'0${! ׯb#GkʎCjWP~cA=Ó&e(s Rϛ)`‡sk uSSq+kL]Nd!;{#c|u8h֘XP,nb2?&6%js;!"vHWg!U5(A G\U(QD; V)]Ni)Hv8O/ڪjL \j ($ PbHyrn("&^ϖQy-ї.dj$l<>w7̣H rjS%s{]Q>l굯q#i$FH2hvɉ}Y ZpA'fOu"p% Z39'>BuN_L%pUgl6ëŃ$WPɰ.<+ag+C'=J>ڐ()L[!$/K^O}E s@"c̘s3|?Zn*:G^2$ZϠ"ڸt#$9!Śq)L$u/ 34fcc%Axzf{#z?'\fɵW,'F+d<ٴք_I%Lf-3kpp2ڣYUHc'0fݖB~!?ʹ.m5;@ʈ{ѐ,iumx3(#+kx?>7y9g+wR4`Rȫ*ҜdB{"$J \Č6E@K.{7oJ/ׯnE@ܗP|'aȈØ6IOqL I 9žV%QㅒLuGΑl<1x#zM%Ia\GsPISF a,?W2ed~9T"~e4#+;8N%Η7 =%nK] YYw"=sΓ4&vğ# xwqE;L ӬuTWz>E7K@rg4RRTm֭̕ kITrhGɅT[B~\Obl,Mi,ciޒ(VrUboAjjL͒bӈQ.;PQ)9$C{XQzmA0ėfW_Ix&wo|1su7F'>>.y=^zuj4H0wgyѴmDD3]-=9|ϕ~ǩjCm+~>xKya+x&y$Qhd>:K"xInXWۆJo$SuO^vox-4 ݔjl=8bɃ&5):e 8Q7pC+E_-mRm&*Jd%UX0¬*ɭei5H>0_%ϲ3[0W[^*ka߮,tfyS TFYnU2d#; 8)gvgs;XA0P[Q֯ݱ²=?, u?'n|}<{N-:ٹ%*U ck/KLUIBK*f*m$ Y𫅬Kq(-C|Yp8kḻILj \SSFB|Uc-}#1ϗ>L'/׊ uφD3`J(D^lbhPJH~NJ4"1;ky(H]<\X Ҧ\yMmc(R2)6%]%UGeriX\)_n:KsO^e˪BR35XI=$zEL:k;=|ŧ\YҟdJ8Y&1^Hw א^L,zu䤕5Q]-$܀~{$g\MHptn)%Z;Z;7rC=<{*dӷwEC5SNF~k!!^P-XQq2%ONC.|7_kٰ ZMϰV&_؝|q/%f s=غ0s=ce6egg1u碆41c+0M/hK=?5xZ!!ȋkaT-if1|x }gώM_SV~g5 0ydyKfCssp;/zI#[,*5_oL(1?g׿`/xg4@Iڟx0Ju{1/ɖTwo=~P=UuYW U]L/@HRH.%!OIb>i+09Hs.=Q.e≄4\ßۻ϶Z{݊u~Zglf-V.*7:RS<6#J6sN<~!_pNl z]]4/EG+RHO꒻#O,3AG:ew UApGu\]v9c 3'wAfO'gd^9 םKrV'jY&vױn~iFV}wu%}#չ$Yl[~YZ:Se^L%QMwdC6?c{I8}=t~Zv.AcGcJz_2F{瘽xPm2?obߘcDwG6l=$ZwVV})6fHqL.~!Ĉ[pnxt:?ڍĵLy[U3s3JU83h_Y&~ze3"N^j2mOk'%#aPg_|~x:}&up^-:ˮyI3}P2gH(=)ߺ+0ŗOx/^Ȧֲ6Ie}3PN͝ůIwn_x: ;O ~~UU "GЪC*cW: F4Xy,< ̍k"ϥPŒ6! 9pNį|KC9;ʲfQ>㭐l4'f?a ld/nIJ;^$r4>G8xFlarӷ5y4wvgr!0΢v97*8Ӌ~`͒|ο<}t/|:'gW479r*{%3ךl4%e S>Mg r,w/Ԭ<ܼ$-{j+<f&c_=zwqcWHk4)kX*IԇZgɣ{II: Ho4X\F$AYyyyų 9kt9.s[v )$j -z '!f?MH&|n>I0^(ƒ1r2taWgȳG_^/k9;F ~sk'ӿnTfR~aθOV9DzQ=lz͒ W(|Di^5OI4g2=6u;AEF"9҇Do XVїICZWb%LH~R:p Yg#qLF6>9X^EyFԝٱ;3v[)Պ~aoKH|1l6{v(;CΆj![I,ә_$#"Q5Ԯ!yo/=qLOxv.=GHH1i연Ntv_/6ZN6I4?mtdХ&+X\KF;q>:K¾_6s:x(_&"mz=*!Z^[-e"Is/.I(+l*=4B Ր/w/z@ZM BVxe3HO=ځع-Xw oVVVLԊ4w&oZ ;d#a1r-B#{׶$p#4ꎔIsx&*?QngD|3{&TTǙR0s:ZN%r#w.%ML"U !<}s[ iA$R su\:A>ބfƙЈLq z3ULiAZa8yɇ;wahFUZ̫k@,]rt#yWM3qUam+jWQ_,}zҢN2^: *"_*eF<Sh5' c'}/E@ 6!tU஖f:3 uc!jzW\y'C e-ssp=x)!hJI`e>9dgEsdK4=oJ / %/˽"la+ Aa,j:lEgc'*K7_T_<ˡlz,v6v*ty1 Od//*ob 9nsi/@<K-)i7ݻ<ƦqnRƜ }Z1kq+Fѣ[:& 3:Ul2'Ћ7QKxHl.nrDb5iۊ젺14ُңc0h CYT(cؔؿvIvUǃnogK㨕1xxRIq()ZիQ|;0`U6\"/eB<"iCPF(*Z'=^6bZ}W>-\B o_)w?/$6inn i',  >h!1?6kh2&`pEO}VB $\ᢵµ#d@юDKh_TT'lfͺX4NFGYa[UMذ9Jz5JI~J~kϛAC>.Qb'%xj%6h`gz؜St?p/ 0<:dmbg[+IX0NB$ Քط 1\/cݳlxMTCHٴBLT"j%vP5.LZbZqq9fWybcLalR#nyXnYjeWOMH&;] G%P l[FxOo_P.ғ#H~?"Qg谺Y*" 1ɜηgCo}) '"- )\8'vf8&REv~k %sFKC3 Z mjJQFIL ,ie:B 2,DiSr9Hqdd&8@6{fBp#AG#1HĊ}L*k岩\6$Ų>oq3ʅɜ?$PNm2trc7%],x6dl.K dAcڷ$椒wĵ[u5֗؞D¶K:UK-VE߁gꭔ:s@m,&]ibO<6"ʦ0?oh*ӓN'ei2;DbԻ;/_ȌAՌdh¦ϦyXkm ECܑCbHޒ#,3I Y@Nf^vMvw+ah:>Xu?=r6,Mߠ:&\UFtgc+DuF EDa*)?CZEkgpVt\ѣYkQM,fJ󥽻*3і9BnTªT *4j0~*76omA,6mLK5?RQ?vBEu\󛴥Aje\.~^Oj7W\|DZ6ԀGKD# ;*bg"=mM3LVٞDI@pybeS';$G!y4@ O+ٺp3q{.;2kyݿnQfIZ{H~Y Pԉbɗ]յo U#+>d4kR h˅Mi_ñ&:V=ŮIh_v"U# 71~ _ub 1Ib`s$| : }"uvm޸7[.y74KۙIgܒNIsY0W'[yw׍ WWL,o}/[k1{:: x݊;1Upvo BZ# ia^QXq>ʿ+I&xsq.g:՝ È["WchיE5*;KpsYɩ,ϱ<ԮD5lc)/lss>eg'n-&"}+^ƮFFƤ݇1d|Fρ;g^Hb[jHmdN)PmQ3uJ^ϟH!#{֝SBKؘګ1djk ]Bb T aCS7TenͣWnjkDmP]Χ5S~q)(U%Q]J15oJ3bg#=b;Sv9[e1w؆UVTQfR5B"Bhf {ۅͷɅ1MQ 5RGR스:>8UhDP7->J$1[^:UF'wK p^U`khrVʮb(J9sb#JxI;hןFKEKjW`x|) &BGi>Fs]O>/9Ks5r΄d'EֵWcɸ)o+K}9^+}TMeRԳ;&0_|'e\T"h JQ7nWgbv &S-trWe,)c&Df.*;ӘV2e >CH8|~J~|}\ecy *LnՏ9e|(6W<֕S~L(ytRnTw 9.!dԔ(|i&9xUx6z`r]_sN|Q+*I$+G ǟågx6HVH-meP$5PLƟqx1)25 2W4_]uysR^Z>)},Y(UmR JP7Ӥ ګ|Q5iQS3VLiii&L]\8)d7TT2D)&FV8i[/؂5$G_>/n\2KZۈTo[KEUo d̏Rs_I9fσ:!fue\}B9.J1@*e_yKzzn0#ʄ)UTP_ѵe*VbYM2 Wm:SWVȢr7UkDfS;tno!҂.TE!+~^'M'uv*57ӉԪI!)h]o-k&.t~L\BƠԽd!R&0CMM=Vo@y.hTuZpyjЋ}%Lx֛^`?zvt%w7h%ɮЈ"7VOZaLc =>QoO\ TZyӮ㟗ݟMm?=cٓJ/zgm 5QWPh p5zLE_@U8LHl?{>)(^>4}+oO}jo< bDxQS^|C*Q]pu_#N=ibq-QX]ЋrQr~Vb,\ bcV~3I*mLῴ\=""' pRPP"PMۥH$A^ϷitK4/b ,5]-4a-ᄈSE=^-j xx8t#g ,^},Cd.Ē_TDʉvũعS$.%Ix1Օ[o**.*sTsS!o1\<) rP|f6r%Zmdé05{*P_T8P ;@l7̯Tn.>Ա7!"xYSeK`kظ/c+*28O''Ni[37K`Lo`_@ +AUq|$ːdJ``drc|9F%sbeaՊHlZGRxɮe_H-pۚFJmCoz q|<.L_9FNՒs(5Y}nzKh̢̟ĥjj;a¤֢\g/d4ĸifjU*s([O-ഄ)7JĻng} R u[<Ӽ`֓vzRg;.c恗zפ%?CX0a-(+4Y"N GB̔ؐrT}uZM**#*VR ?sg=bb@|FN'7H%@pXM,'ZZmf]uzy]biSH:ƶYD:~\x\w:Lѐv9B,[Z [:bB^j$Jx]6.3`gG('R8sʂ}kH8tL[Z[T&۸AvjEl4 +9{9#[gft.dnÞ׎Q҃*xyl|N%+~;ޚGj tyO"YbZ+%yR懖UiGab?Z |[!-qajfD&pv&r!GKaٴ yX7*mCBRIMLaՙkRPd;k tXkpȥ-iRxkRfާK{|[W-.@IDATӛo;x(Akm%>Đ_H%+M4sE)kL\%aM7.ӿpP1T]GQ92<>R"ò2R) 8x_U"1Uo=F*Jb)" J2Q]7K"# ӂTTCTjϝx2wDIJQWw3':y 3p[C Q`W@ DArŝܿ nx'irr,I L sɖL* IܵU4=ϲ !$3N$:aUvXVs#B}65lM&j lX8;0:X$ h=\=v+S آ4 9эHߥ2BA#|BR;HDC**1/X^E@;~y?XoxM|ECD8KΏ:wOrS}HJr՟Cp$~Jg3NnX1|H,|OI2 2#=G3n9\c"^OPЮ|~NvQk/lJe4څv~xJ] 5zк ON]| n+DdM\2#NQ?Rv CxaT+h;2íy4XS*I#1!S h[ܐ*/* R>*f@"73/g[%\L^zgN&#BSr[/;Eb~B1`2Qąx!/3at3XtfV:Phʴ*"Y}ET#y%U<$䎞`"bϤ2{i M=P G''+"x`-9vR*.- H~FJ15{m1+J8MN7|‚p E^Y$ǦKdj ֩[ₕQ]R@D;KS8*u 2`-udN.qi:Zt:?FږJF~z=6njl^Q*Q@ZZf2(&|vv6*5+ 5T4?Jԗ"^"""""""LTLUTTTTTTJz@3P W3S/WPPPPPPh.*j.b*******D@%\L\E@E@E@E@E@E@E׫4p50r""^"""""""LTLUTTTTTTJz@3P W3S/WPPPPPPh.*j.b*******D@%\L\E@E@E@E@E@E@E׫4p50r""^"""""""LTLUTTTTTTJzE0fOȾ5QKoeO2(iνhn]ŧ_%C7uEYwGy[gp&\u._-^E_^YX2aLK#l%БM2Ip% kJVQuk3-z,Q v/Ƈ]etbsk MGUQ|l0R}>tthF 3@YGٵm_}ᗝ,%{y93sTOw ?:9^(d.&猞a$3Ws1&l{bBJ̽? xؓ$@%f]®y8e^>߾Tfhv+VKsصSLj= oy5|!l!0v}L*jN'ת4ܤs-"@lUM'?"WfV^:!Psj5*Оt!=zjaA{"Jh\cooN@Vv&yXMIg/0'HO{f~wk-[ ĦJbٱg-Ato.=TURVf# I:@ܻZhtI6zn?wqϱbpI K1/ѽhm!V~ m.20TRZj(4;3в *b{ e5_]=erq;S!zH+2r҉9jRq'oW/"-WG`;V$MUԘx܄dR)kf渊$³6LyK/:,k(HReQ< .`4ŲU]T@eEyĥK=ܰՙΗE"_5YR) g/<2TA*$$:;l<3}Z)os8Tzmp G3$s) < (K Ӊ>Bau 5RGO/ F"* ⥞Z<~v - X&7O]̩Kb\Fιw[OE;I]ɵXB8r+H')[i돛ՕTsUus7 _Է5^v8MzVQ A%D.'hfΟ-JN&'=|-Gw bҍ2"B=p[!h,gыDm]:ݻ6^$GHY爤MF'aEz"PDօ$T]S4Nb)!3=~-M2.: /2V< ƍL/j6KsoCIhKQMѩ)P^(^t1+jj*sIVPQ&;K\` @syE(l8%.' sRS )~wle SDo'7Gj*)gl1̽ R3|]/U%1VWp"Y)GZF%U`h$s'+qk燳ROiY9xAZ\,kOJ"73<)l,Q榢ϰCbϤ[ {ݽ X.cB@2fkJ(*($%ۜ y6⥌|يҚ0`\M[?/?[kja* ֑B|;#oaMu1ή\E J{2Ǹw%; w]8JֶNeaoV4D1Y-scѫ~c5|/aa!Yty DD9(K7u<--X~0R{Q.[8PAF^%`cLrOG+)b.+lq?>_sOTwGbC|BgU O"u7<keѪԱFL9Թ+֛7Mf3pk'5p7+z'I gFټ-te2M*GcGB0?ER|=;:-xqSjKݻAAyHڍIoff0-Ň{S]Vp]>=u`٧f<ōfΜ{ Y[_$:Gy9/8ehyk#[Y\fxtH Ud?_MbLg+ t]O\Жn7c羙g[&I^gXx-/0Z6~5ᔌK>H (ڹ/O|"#sg9&|!I!=0f!/?]CϜ}<΍tR3" Oφ9x2uQy؅ә;1&?xC3.[edgg2k=%DNo;3;ULp&!#jqʀ0yb0ُ%G#om)/5PՅ2e[j6TP2{b(=sy9֏wN{:vq/M179/Þz<qr"jǑl@j.q"_m2rn*4V>xD^=,/c5<ձxvd7nfBtY4C=.GҊUv2fw s/4"=!6.%.nyUJ2L9$!0ڍ㟍d@b^xƂ{߾|p(|T87MVl>v|Fy\논ZEč.y޸>-; [KS {RI^>/ҤNS9Yۑ+aAky:㛴2ZJ}}K<{7.Gsq!Yw2_黾x81*Y:y,Sڶ fwNȷZ[7ciCm34'~'DR`!r:ķ?u{v83\{H*3oqDr~e1[''!+V_dϜt{!S>g5me]LXCj`<ձEwU~ MQʳ3(|&h))F۞vHV*_k_`93Њz0jP5Z34Y ],+tΘ)v8u}7 {&={ 4 񸕡/3 ?~l;Ntd򟸻E U;dkHo'v>9mul'7h盘FGԖ`{IhE at·}< wQ8Ώ "щ@:J/gƼă5eMW[wS87;/a fٛSIVSoĬ0aFf"2xlYWOy2%i[ '}#a6<<ěw;5A<x/HMy$^A Ukjf'鬟R]dX Ȃ_ٻ*ݙt3<]mEqh*Nnku *=XsiE\+68 A,]t3r!r6u_

WC:SwyR(uc~GꗗOZ ?^4i}]L!. ֳnY<0>4;rga߼#X}Ș0b UFɶNST5FԉnaSF[Y15 .c`dyѢ%qlGpT`-9UK Bd|Xn},$Ε.cԭBTDŅh}HԸQz7G6zцL@ qmt-'sNZ[UAନ<}78ʆslz%ݸEL'X^yRD"R ^ꅛ:-"e"ɛaKdbu-AR 䋺Ԇ;2HTtRFƹ%bO[+3 +C\)qL7쁓m8[[1g+a'(ݱ}wUwKlJ뉣=nxtgwEV~\QG5mY؎Pz1#]2,EJ/$V0x{+وI[ITero*w yg("x-;o$3tJ9Xء2Ϡ[b5VI[Xa'.zs!qq(,dDLEXBJWҠbUŔ8`xlŘ*ws[K&^ '9+02X -e!,ע:*ʒݠ*;+!:ʍrٗF\ '}aa[UVYz4v* HyNKE%w"I˛5EƊ{>jTW`!KOڵsjTY$Gफ$ꚁ,Q$JΏAWeO,~t[]~h-dK5)yvbXn+na3ͬd~1yb}ШpU#`#;4蒓8wy H{'Q1x;U^!=O`(Ih1 -cR9 OLAvP);}SoFMIh1Nm'/W2o%|J.yb.(J#3'S_ɖ˫Sn69U$E9đ2sE^EJ 3}F7̜@Yp'r8GE)R#!3Oʲ\ޒ=dGkVcXcw-+4G}]=@[@)-V[(jX ł ! qMluw{{ $˜ޝ3Gsf7yf.=ɄXDv?~D m:j =$B2WG_*9 _nWpk葑$cem;OJ 1I$625;Y S:xU0,fJ㇯uI֟bO3t&RCW- ;$<B 'Abjc)+ny&u%mBK%Jm5%OdE ĉiN;kɀH.߷73$ O;sҒZ k2ƼW(cLI7:yDe5D0ut?޾o/z/I_e?TA$Rj{a TZ$S55%ՙk+~o`z '1Bqe|v6^+$%uXWEH7ż|_|K"dZRN{4=Ҡ!އ18T\!ցhpt8B>qv4VŋwRB0=_ZOUr4V3JDҪTj*(ʣXكfx=FΟk焀S~N{DaĈDzdf`\E*;D>37}=EVY9#"ZV E\_!j^=EHj."r)vZp%EXЕ#bɩ *du6DOk&.[D]7![%: &Yu$"9lA+O< 15 fAy_Ig_(GyItq 9E"HjGXpc~q"҉,9qmTŚ:c+yMi̝m(yQdʺDɷHykKΧ}]ǢQ>4{6q--5pCo]͎,v.Oso)5UBD#m&RZ\DHjVySK إnݰf'OUf"-*gS¤[S raL\VZu9Ini[:/ظ}d|_7#M%@aݷ2()S7ĕ Xt:%"U Aq6g{}B[ٽ ;8W2W8v)^(n|+:Eڞa %>ze\imp"BѝpYeypIj|a82lkɞjY1}(뿦6=dfU"`uѭdYĢ:'`pO?/v!F@&5mBKDb*i<3} .p+Q#G CKDxܹJ623,Z _ΕtH2#ov {ˋNUgt9}/SFs]WX&B{|DϦ]rfk5[1綋Ψ|@^o5T 鍳1&yi:pcL>GCV nR-"jM`4Q"iz O~v‡ A# Oֿ ߼앍0hM$papx 1IC.K]#0]%(?{k%cMTUN4E)pNs!cwsleCg|3pʥYX7<6H1!|d'Cd]|ʢEi:N1$ "qI]ed2o'zx1u}eUi5q2ԇVSg?߰L0XD9#7f^j:y- n" {]t76&DW,7~E/}$X"mwdf1H)B9{=!KޒU@I'H/^BOUoBCoH/~H/9c_6rF=@@# i&K2z #*v}mK, q#f6̶|}2\<)]TJgLw2%-r&2!'k}?yyXXGab0/Kd1`'}ǀ?[aCZ|5mW "r/FOx)WL&;"+2>a)9;2Dui5+:!x{ /d0kWM<',8B3QL(wOs2# 3 ¥۸q5N-SV/|9( sJ}0y;p0f˴L!m2TYtyv=叻5p7CY $NC~fYD[ȝEj/m8YńD)Z]=" {g=5~BH   S!Z_ֿr,)7+NG+UcE%ֿc2䧫?yL%ux`\ă+ӑ?]5zڔbϸZ/4~\B}5'_PEt9f~ 3.zAĺ4sB;e21GVHk!KueʺLub+)?mR燬Z+[CgF~֗_뼆wD ,"4FGĪ~©TN^aOՀ^^y ?'O#PbA{2y,Nvy+RE˺5Y(S+CC@C@C#~gnkY5︕'RSsU67R-+DBj+^ocoػ9\疪95~5sV΅S$k5jhh64GZ(3\ʦs6~>v'kSfᵆ5iNrjkht~WY룆xIQW~8h}IO VyG@tv=򺷊˹M 7Y/^;a&0s5Zg=Ŏtgn*7>ͱkߔHzYgI5DL^Fo#EgʓKas4ϧWJy&w:/ }>`>o(#8w1dOq?rk̚il;a-)Y/T'%LvżIY[ũnēLLmK+f<f^#^]^z C"v2],F1E>?n$(F|4J,Cobg3Cn+\s)g3xkWZ. 1X2_nb؛c@"4lGG!!pf4uf=vG;)<%.x Uh}K<2(fMf$w3ˉq䦇W,'њOXQ77lGwOeCi?k͙bT 't|% 9T3_<;hw+tv .ճ)V7px]&e)DKte8WJӶ/Xv`# ䷣HI/k3:ggS{Ě5ݽx?|G/w#m#e5cdJ8Kn"/HkkEQ:p<K.=@ ^`13)&"Sj<;)yrL8l.g^V[q({C|7qNq%n^^yDsvR$FBh,'goq m@5N gF+-9#[A?b:3KFa-2kQ؝ԋ͛)cvKw+/AD닏x32zx=٥doCiǎ=NY׉]vLi܊[G/q(ُ|rwѧ359d 2"nx-:sOfe+yS3c1cGuz 6ZM~Ht?v(#'bڛ[T?.8_2tvm՗ij]qF1vd>LSYow=L.Ǘg0]@݌9?Uiۦ3= \:TFExM͊Ak   sA@#\炖VChȖvD(ru,ZJ)glnA[056 ;ZS{,gtv,^V'|khɏ21q"X6c *ҋ1vXaL Lf2[!!!phRj3ƶf Զ ?FsMص\׸Bٓo~Lj+យnY΢<8.tؿ;kV^ȝ+U9q磴/Щ- -ʦCBS}-rnUgJ![o3~8O2wZ_Ɓ5xNNg*Y;!!!`C4O   ~ xIc՞JڶTM}JL$TNMKIеqݿC%4YG@UȐ8Cݿa5{$f?pNWds%d._ID~?Ve&/58-ዹY߀is2ZO<X[)t0$^كA?Fdzɏ}D_>"{A.%JY4ACMYb dLfJ!ULGY;fm)PH444Mu.hii5/.))S&@;AHOۙeK0Css$IqǍ+֕/3u/ukfC#q%OEEb2Ux97ݓzk544Έnƍָ8Rɡ ӭ;?9!x1bNҒ:s7[먭(b_r#*D;\éY!r:ET.rxy{,EO_,zJ)`Eҵ:⍗l}=Ydu_KYyz􃻩b jD(SuWtM8g.R4Jא$E"l'"U*!H-A V>.{ۯ暉 =MIXM՛j-?ȑbltDx"^_gkEk%N++! \0ZZiknJö^d MFyw.h+:* ؕSzn߇<A8)ZpQ#P$3y&3TAW3::Z |guB9?Cs} YoWLHv?~yDM&Ƈi]̰ӘtX.< y'I?<gKN[ՠ!!Й\pL;'["-뚼 K"ux_R5ǫA p(kuh0:y{tit]wG Z5RC@CBC@BЮ^ sD\L]R*k*ϫն21QQZAiy͜9)m_ש*8|R Z*z+*(*\#x(jih'Z?4 f+1^[:tjOX9NR>r91J4kqQfY)޾kiUÏy}2x+ON=AҲ_h낽tZ5^38u:kŖp -| ғOy, Yਅ )sߨJZV|ϏTV#U!S!A!}'1?JYqߚeizy,[-OŇ&ǫWIC@@#\?Z?1_v4dn Π"fd"ܖNe߲cmYgMfp?ވή,}s ;<,&f^PXJox}&#!իYv?ebmha}1m?6QV|Pb8y"QNRHQh%dU*I_nk}ibƾmlL'-[ႛw2uaB2] yd[6 AUI~a a咹#YJ r]Ò+/!Uvב/Z rF&#1mA'狥 d2k;ع4pBNdG|u*Y%rU:=)r#e:j XJFp XEJo&Z 1֦l?mԐͥRu;Y5keS)9]?ZZ72o8NLMg>Kf۹b͏!c?/5ۙJ5`C'ٜŮ buW躅>ѓ4.xF|ghh-^U w^Kph RN uB/l}8.$6lbۊl)r1SFeUytB$d+dҝȥRE!&_fOp#[:f>/U}}i3pu&(5Ph_u+[C"\hP_87BqbVPk6z} G}djsO>fF8garA`Op ) r6 >dզ& ]WƎx-]L@Sis> lf21WA[q^[yUBqUG\7VdտpL*#3EPO0g SW!;yL Fzֺ!pJye&jԟ'RwzZ#-݅!LynMh|\tcO]}J/k&j;73PwC΁$t1jڊVV2&QO! -Elˮ\|@b2t߹s@IDAT~v !KlL~{wD4HŪ~ t/;l4qS0GcNLzCʻ, ɝW 3_/%F Y_I?)X"[$]B$lJ˹ 3P_ ׎]䚧pqF~tڎN3 r}bWM'1DzUZ={3x~y2~~ihȖ/MYD1y8CBeOs%& gVLuTO_Rk].`9lkIL Td[F_KxzJdNA$n4+O b𫫥e;aLG0md1G;ݮ57D;IѲá`&993,089z| d"[J&Szy>gYU+[;yr>BaƗN\ p>}tDJ˺&ץsr iJgI^Hv{J늷̽w+z4^t ]zG#O#0I>d&}:T%Noh]Q|cqbh\t>.^׷Y]L\BP^W`O ֎vpkƥF쟰=Z?}(f /%j2h%_M{;EW10A]B@gI;GQH5*|wrFW-l< iP'tF7&q-G&>1 vgܖ-! rYNE6<.:qOkQ7.VPUb$TZٶmh9Ԋ훙kW] } [ѭZ0Ǎ"%>h_eI#CO{X89)*FK4WcBV<]̷Cm'^HXvEUu`8"fPh/GJ%RGRf=[JN"hJ@D@puu2aƧКr묚jS+GVJhttةz/aY:0PF '7*+td{hٿƌϤxjmA`(9p1&zY;ƥ|5ҔkÃ<( aFQ) ~#|?r?e섑4l]JH/(jQ-,y9#f:#VsW^x:p!Z2Q1٫u2+mppr`A7QIQ Jyk!X(qkX.q L6Cْ$.f~{q }QR-c X66!m\ I|K||^MGS!:-7~4&'f[x Yr&Bg؇q̭u\/½d5Q"3qŝ3ob޼+P s]?=0Dzզ $?b<  |Az# '"{߼߷bp4!F`tN.}d\N&Lb'?L:s'D&LpP`BY1Wp,6˙K$]8{y#}r kۊNFgNRkT'̵.8GFo!Ax7Si-R{n9R]%(OQuiACg!۸q5..o*Z\c[|$eNHj*ĻzR_Z,lvӹt;䟮\mARb udg=JIEYDb[WcHNr8USb:T|-ScAW$^;5 D+L׆qQ99R+˭p|Y;rMwm";^.B,dɾUMZt v!Β_/b38_(y!!??POа\Ӏje6"IKK6'Մƾ.kmYja_9UPˏ-9U 9YtQez,/¥YEl!؉1bJ["ʻf3Mkǚ܇8:DjT_סjtKXEԡIvmRSȈH EXl8P; *ʪHud_YJe3X%ڠ{9&! ,aXe$5(= GP:j܃iw|۩ʲ+e)Vc6ZalA:. ]P::a˭"@D(w}@嬙 'LN摇{Ҏ5gzbP^RcXve>9P@uZ QC?u:NXD%|klʂ< v* S`O u=(y^WWR[SN?[j+;*"V*V٪ W"eS'UjJ~** DMA>2"K[JqؽwI!m'ϢCHYӇz*-N%ɲMj}@@O&,AHlЂ˸{O';}팆@$\J*t`o¯b3+\0 3Cm8.HBh|Kikl#wLevLUae H.#ҡc{jUlg:7L :.QgC[߻W*M'gضJ/)>;h1?fe*QUӔ^@wΩ]* g#l?E{hݸECւ@EG.hFUP_L E:Y"RҞ3f-ʦaczGt%9ZmiUz%nlD۱mW5Φ-? [:6ђB--t栺mV+(=9 :SrG2Vr[nFHmzT     @$\J$~DKUI2eFyȱ"vrp.U/( ˷E2ԍ4b?cڟ IHc{q8UQUTgcTO$eAm0Vq1l[j`HS%T,T!!!!!Ѓ葄KTtBTPqݶ[$,I]ZxA\88bNTt]o#\Yũs_ }Fl h4444z0=pUwf(bJY>bWdKLK!|/E)DH]ݥ>R>IVw 3H\wu!$Em2/V1jAC@C@C@C@C## NV5;8YG2e0K deQrI[DZJ_K-\wWqw- v1n}4444z$ҋElb+΋aS"pI\AUEI%$D]Z:}_FҫÉrOL{x{L{~NJ=i*zg!YhyOIuV069{`)FkhhhDz$RYg\`uvF,! 4FM}Ө/ٽRqgJOe;RSiѴa?/[Ir|9*B}dKun?ʶ f"@Seȱuޖ[zeKae'[.]%َC :ЫsZX(6 lmӋBBr `EucDNLSy_u?oWg糟YұǑ/{]̞V~ȮcUhEl~h-k݂ *j:רwr=8FiX¥t"RAI]T%q"HH+$2$]"tҹoPBm:,ߒ4NuKՌn1Gv4{{qoEOUt-Pf8+;1v!!!!!p#s vRN tF}FҠ TKK]# C۾gVq ![j@)RviW 5eXh5zڮׂ@OF.Ҽp ;Ket!P;:evL="G mWV.T,VT1JTQ$LqUA]78yg)@Bf"*ICm$"Zgur =7,(;Xl R[ &:El?b6䧬Ȼ .ڿaU`K&s3    nXbrU$\j'][ٗK~z\#D BK{F iAj@']g1zjU weBfB')[fg9- `8;ҷ .h;iVRX{y< 5 0ji:8('G/M"F1H[̲pDN;t"m2RZuQ&U/ViS: oGEJ9A$צεwWVo.NaiSr`17XB`VE2F':eL6ɵkqܘ,ƋN8-\WGDlmv i" ]o&Y$uRih>&:qpR8+V5F~OMƯk/%kzNڞgm?5pկ'%ȖNn՘j!yj'RR-0PiuIr^x]-9~+ΆhMɇ 6%:}PKcØ4[فzڙ^ &ԉL$˳_s~i_ϯoyC5Kyzs~U2%I:u )7!oϯny{uF5Eo9KׅUU1a IqIJLeĄ ysxy7񻙗2hojG*x^xC6l|ox :"B$KOLx%Ӹ{yYIIL W{ȭKgӗs׭/a (o-|_;Z4!c/y{8&p͟cAؖ!i b?)~\T4SYm7W3 (4׉<8q8AbuO_ZJY֏xё\=9Vě+g^fpˤ8[~CGF M3r]\/㥏"aUҧlyu*R;p)w rl?oLeH^sٙTHyy/WE;1T+O6ʖ?Q90."ۘK}jv| ?rl熒:w}BqF*m[iwo**.uGodS+zeL$pG?J#VpIذ;`hYB7{&e%%$1|țϪe.x;ڻtv*rIN6Y2>|t&mVVUyyyxmK`xm@ou^rTr!G?uG!dIs#|s|&X*gJʝ3Kt[Ҿv!y˸69⍯rcL|&J.: ܀n% e2ʨ -[9VK讠b"> cTu-J[$d24qοP!bWoY6\ d 4՗~a,oWpk8z~Ojm+`] ٓYEz$VdQ;=Wnҫ(Q^lTE/ͦ L97P&xo#O{!&8A-T᝿! *̧<`o#mM$ Bj ޺Sѩ-aAF Kf#_oF0]iظo%<*լzOvokKSGZ')`#}PrW{{;yGG-e2V_Ȣ_TFٺC˙[RĔ{&jZŎU.t]s{34-aM-+sK;y*3{E$c(K;'mԗcֽXCE3 ļ$'4濻}|tK6oa$ ۊyKu4kGƪ&|̓s~ô>Yݴ\%"m(H1eTi3.`k6_U +Bw0#bD4^|ڇEt~;Y^L _/'{jV}}9.K9ٛsɗ]"iBφou"+EN.Q}kgG*Č4!dYdAm'\t^'  /q A!%V,/XWu֖N.K2G(9U±$]NN$%D2#TW3 H7mkq-60+QºW)mo$'Qt8‡1fpF;UUD> _:}y ;sSY!*AÆ $ ɨѩ$i-}M! v^ɁN\OCe9[65s6=3c0MŚd/0aF1*?)w~ Dc_#`'e;e΂ѵR涁,XWsn;†SN݃ 0YZH8T*2 (f܄lx1dRtZ ~Z!qxz9#QQOVI=5嗌䢁ˋٷh{:7526jۅ"?~ԟ3$$K)m-ބ5 9Š+1,Eٵ/^2ʯ!% }mSh˖~)fB`ub;6lV`?B)rRu}0$ d5& \p eA@%Xu6% RJA*dk}q$}.9I:%RU'jUN:O;8?33[glUҮVj U)$vqOb_vƎos\0 @ԐV2S=+!apE[M;;yO,g̯5,@\+xڣ) e.ԍp kqۑX)fxjDL죫--ab3k2(+v^5SJTរR?,b*hջ֪-/BU/lZ.(@L~ʦemIi%îJ$5P@ЪyzI_|"Ɵx~<ɇk[ʋhc{?/_K,@ZWx9+A,!it5Wg 2TTv׀\*Ozsk~ ;9rWVkyz@xA΅4qT'En^ 34ܣot~YGs9^Y7|,]ѭ:@q`g3>jCKZNzdcZ&]ZT[dDhS5Mv+\PW*л[תВ6]%_r-AĹJUDC#8]^ZLIqZ/+tFV* N%cU7K3C *il-#xMn=T]w>m[2i+>!6\\j]ʫT2C皎-S5Q1= mphkURyăOF|緩|Owx(V^‡9Z@:Ѫ L" i^8\xGpx&sy`"^'"I7\I+N!qXpW[w#O/m0P#Yͷ>)\N4ڱ{;Bu5ZZ#|l,t2OzNV`F:7^B_iM2jH:d6\LzȵhG*<>ِҐ8=Y'b$/fUkuVљ-k՗wɱ :ul0WmL:^uC ΄/: |_ ߚ] x( }iLgxN+&x'piy} $U6r>AFV7F v&-Ve]_<ΗE3zACͣwe]U#NwыkNu~x:D譞Pe/aPvx^~{&I6]q X<'=#VA'Lzv#0p,|4{v}Knğ\ʉ$whb*,5<ڇ4TMrP PetHns-J"TO㑜=wD)6쒝S.U@ont 5M;Q|RKN x7,ޮplv)UBB£#s'fz2DaZ-WYULx VSoZeܠ412s~[:d¥?>E0y$\k;Kud %ZMvN '\hنQu^U/o`It@A:i#jSڭڵm.̡vx_[,DVA]|OtZ}9,bIyY |%`?2R:Τog+@䶈)ja<9sux&|x kӐ=UI}N<6xPI5clE08DT$Vz" U "2PS30GExG#zF\V,&Gt׋KzQe}sҺN;,勑gKƁ\Cۦj})7ިg72yO#)#w㶅q:?vxfդ YI+ 7?8MWo]>Mq{ D.]n9 "^?_>2(\]7|Pmnӹ*+>K4,}Kn{D?Zڿа8nn5c?p`[ǀ-~u3):}mwO>E6w]Rc0K&ߪwl ur]ϑW O)F_ngƛO^o:FΫV%#d~,or9> 쓷꫔GOml>_?gqc[&@}to ;kp}@ܺE%gjc _9G{#NIeq}=+XՉe;_lHt~Oן9_2}zO-({4M_B7>xtmXYI >{Kv˫>_M:B…^ݿR}2!oԖ'V~+w#=7 bs:rfmm܏'wF[mҕST<1/C>OKޣGZ| Xv6]1gp-?w,]޾p_T?7Gw폶T/ܫz}Epmݺ5܌f&lxEy=2uޮU:;z?&cr~6/jq51l̻EGC*SGCyI$d596afz#[ uuhBÄu^Ypq?e@hTXL&-(YٴR<|9T ^rD,0wЬldqB$4oN7jh:q&9NS+i']sԽt'/J`,c'cHnXeQRSl/ᾗxH(Tcn#Be|z\ܣB@?44&0s(1`ׂ 2T5ifbedzmF /kZOd~'6~:A5qR8FmF1q__+~\K/:_xQ2*!_)c]vML(O;WK<`bS.lPà ˕Tx_3'/vs=6UvV\S}c桟'>OBQr/_y3҇d}Okb*u0Wev|BI9_(\q?Sܹx,zڰ^=c&ro :vO~$v M9Bd)B1)(Ӄӏ<_ }*{{Pjh*1m`$% LY7;hI$ڎ/q}}Gowt|8eQ욶s6+_r }Q5]$02S8s\GqlxSŸ{2I@?#9Sk{ĥZD9KOcj>1q>227-}^tj9B/^ly3.j&3t>~f}pv[#vԩ+عMo-n^_ Rq뿡@ع 쿚UEYX T&gڜqf$_A`lN:40ZѲQ5}0h5*\@g\b\,,Lbzk"z4dd_?wjj?ZJڮv[3;oSHYMc77d"~!JGr^13b#nN_+T9eFdLҳUCt/_s9_49ɂSEx"ށJ tTdn)Z|O"2 lpQsDNT`2{hyfU"p`К'4tbq[&s;'%[U".hdRmQҺGw. 6@Kd1\ڻVcx<'0Oqp~Pp^6,:3rydj) ,\ @"2eF smÜ&e`O @8A2u9~M5_(ׯB/ i,7<'t%Y(^>J&hXـ\&ȔJCw<_Kl۶}v<[KX6@_ǡzZFVW DK p`lgݛ )v~&7DQKf7xt0&CDqT "ӥ=CQ>2}R#Þ|I ]G"dw\ s`E#XD8=2<:T1J YENv߽tߣGq%bT6}sWji5Wd6w.jO>[T{>XN\nK|띟苷ݤu5}}nw(5Wsux5_'u ӿ-WyNY eP+A갋Ȭ&WI$n[jQQ|G#IqMj$IPqFTAn1qL"t<3B_^٢Q߰I;yuԊ˓+#{>F8,X⠾#dh9۲m\ 3xG?kh8 GObCGu&#R%(ojPɑvj($%M4Uv^. H Q9Bya)gGEQD_v66(QWp"*#$:V3hu0#`IbA~ҸZ=|rPBG"''W8I׆ru=1KIҤuŶa?Bqщz=4Oud[/bcCN;7<%XzyGbѶx7E&>r_SМ,X _2W;MYuI,CʀV} ;<5|g5/N/SGu@Y Ht~DCO8mKiY4ǾwI"&$63.VlݗCl;TٌD\ #zJ,c>:;['$ĩj`kp9k;7uO( lv֝7;ޫ$V.ŰkfJ3x |<ɠF4H8YM3|1R6qՄ0+J_#4.XG'}㹪~Z9 -f 6&EA@6I|݊b!%(b )D@"3G}Vw|ȧB5R+22M#ڽ{\Ue?Ѷu TFs1G~Au)4Z>_7_oݪ3?O]v.X]}hnXKwߢێ9U`=)DԨp]ܯo>DNgf]!KvЄV"d͵?ԥoVs`VwdzHW|HWM*~#I'٨u>uG>vպy2&ku%ieTOݯ[c%(keV=Ow׏ Ƕ5d[_x6V/nP?PBqZS~7*lі ͅz|VOkun94/l..e,. Y^(1b*%a #< IV6c3b3r4 K= .y;{B:7gP BĠ~Thg8&`IUk^YG-4wM !<7f=/L:õ@`!9c0l7~K^:@e Hh֣l^5m(&}fRl8f0FU\USxfSDЯqRS^Fg$"qM2Ljh0{D:eW1!Qlgq ZshlQ/L2,^Ea|)QMD}G۞ ֯Dѣ@=yIb7vZzZ&~o *گ_mO'Ouժ^g~=SQuLU7.WS <<$˴r%ljޯv+?Ӌ`K[:%kQ7 zRn7j*S!%6ռI=Y-?3_r_nZb nYӂ|=|V_q0vBM%e 6"7t@M+sG !@됞6mwQyr^}k?/Spj~6VieY\m]ߠ6\[N CztQm%)SBk;Ym2УO!kOZv(M-fڷQIܫmzNo>EB1|fSI9մEMG7ӮgoleRk=ЯQ !_Rmk8&v|v(:U/حQcX{v%_Vn9zjsSWsf]GF;:">T A꿔Xu,<Hsh)%fť::1yώ hFᷘc^4Hhy^i܄ġܰ] 8I_l@= κ^RA.j!봢Ai# <j]ȧHI2dCN ;T(>)q&7JݶKy\@^>s/[&XXg.t2fZ^嚀o5 5xɢP>On$[2 -X<qcz8 Pt<[挋4qS_Pu*l vpVrs4E 1$ pbJ?8UUD+,4:r,j'1:j,Hv7O7]Ф_v㙧5r*14I_}[j]pz ԱvLb;_rL˩H-j&$>7IH1}}F֮ZUE.T}w<vj׎:8٤/{ !ʄ|X7pffpEػBˮ ln戆)a,5~!e.Swkӻtrm~nz[v ]wZ=vMT_+>]PNA:3\TՆktEn c=!U|kǣ85եT_`WMԮ{vjX >wv0;Q/LxΏܢ up熄ܜvinۧu:kj9'p>zQsxvwhgw._ݠ?h穆Do1d> wjb;G,oV]Ql|q^'iߝs9c{*t]?&eS-OF#7 3<!a%Ǜc^-Jq}2<(qXUxFljh>q|bkYٺUevdXZzת /UũuOӡ}4в3%Oql{T'`̫ilљZԙ׊y略@*<[Iae |Ieb~Mu$c+ٖs6R?my-\Җf縈袸\U3*Xڛ]Ã,LiZXΥa W.Sk%!JԵgt{L§j#^U^aKVDaM_Tul7#R $T)y`)z~k>qڲJIdio9YkiĖf0q@B)SxxF4[PgUc>K)._պhte8W(hE睒 `:DzwY_@.nS+2H) v`{){*/坉։I62T&\^  o&퇛KStO!ŗdeFk߾*˧L[ÌojdFWU`iPH pAYԅ' Y"?WtxhFe ZcLE' 3K|,|cebVf`՚<OIn.?PlysΔn$j:oXf9ay ^DbNXsax1b L&2(Xir:uh`d|vGJ̕iHkY]t Ӳ:;ԥ-%4%2 #L! a[s꽿@l׏?WO)~zεSh=xF{_l-5ޮ[g_G4}]?Wuۺ3[\.vs$9GKNq0eOX֍gϒ2s``:Ng6\|/Wh`aF:XHeZ7['hr>kŕ=xSzt^وc} g]_JMG$M׵qk[y;̘vmMjxx^sh>p]Vh5\lY:x'e^dJ2&[䚂DdɐC8掴0ps M;7BdhN&b'q6k~Ev^C_z]֞3Nݨ@:xrАF(1=4 ,#<{zT^^ڵ5 +6ЭHau*DsCA&)઱d%lbx\K:%:͑kDeW/Vu)|w 5 s˷bƒu[ˆF:l?ҜYOefw ZnDz t %˙p5I/R6+I twԀSu鑳, yt4Yc7ps w~OC^k5}\++z7`4w׫`&mt灧$ ɚժni*hFQBݏB;#slidqgwjvt\'΍M7Vz&J[SCspPTjRhOy\9Z{b'ߣ#7/Tb߶a[hWkZxbke_ǚʵÌ4;/RkT) ,p| ֒x8EJ J1/.nO65;)y2| d-џ\ D1+xhjNH W^FeKlySMNNjhh'\[Z OSOSUU*+KSpBaSFYۅU] tҩb{E`x`(Ӏ/AžOlP6aNe[ 鉁>{Z+^o^TTS H?ؒf^)`^j(.Nl}1 cc4m[X" ~6 41Ue9qpKXqql`~dG{7CU3z@&TZfq ɢ޻Yoï5*~zPֿ՟^3mJ9<9o AGZƥƄ>iv8_˴BN5jkG&fyXMVJ\}ZU++ zMY e\[nM67),]+t!!tpI[ >mga}-ڼDSHIlm!Pl-z`j_ӹ\%c"-\n$ޯ^_!HcKڼ,}j) ,@) ,@?΅4*?.ykBkF^Rze)9<6Bh2'xə-*i k}Z$4ZN` ޖq`}_7),8AmgW,E5^^6pllG #l{`gZ]P HT8j.GA_1[d]~mX2hcL.{}lq<~&Zjg9 mfgL'M?8k^<JҐ5#ӓv# go.8LGwZ) ,@'+^SQgP=,x+ ,6C2<$0ǝˏ{- p;_Zȼ #5TPwL%eHqB|7dVq^sr팫 GAfVcUwA&;ppГK"vɾ9Sg;3d?-o!3umy GbO^EԇPP4Kh¡$#1G45Tqӱ5 @y ONFG\,drTKzeNjTWV1L|ÚX&!ǧ!ȺJ:w8\atFfTS~QWA B/4"JK#h!9'w&owNcq̢8B=U626 ,,ϯ4<[>FY%̓cimafÊrN/ qtl!gcr@;R{/,>@%H#G4889%q2d\(-%(Zwo`$ ؅׫{0i̒zzpGٟ2 > gsGѼ>x,fٌ1p&|F(S@*}e((ێPd0هQel% ^WfQ'΍Gɇ}{Tm# +c=뼏j5 MTXEx"MS(c:xP^VCՄ.ؐ<R|NY>[ַ=:҅WuU%ZS3NeT9> \J(^C6p8 [Jd;5UZEĔW,chD0B1RHY eRx#Xu\f4Bhiur+t0|i0ώ " 0fZh~`H!,d#ԞH@(uVv$\ݯa&s#{nΖPw%#ф&iS6"" yl,.s/Gje@3f*,G >V,VNX,s,;h^XEyM՜)Ej6,ܦCw;5gneVxyB_+W+BSABT=)SyńS0{ic]ա#ǜz-EZA$;O/_xx楤*deƧ 2kV,RFE:s˶ƵZ) ,@o 3*.<<.BfW47ℵr? Y2q3Y/ϒϝ 0X$%E7kbUjO!ynë(r~4 9^ʊc[@ШN#H1kJgR6+~sĖ6ֳYHY5[" 3`F_ X+jٟ!!r%Or4.zvp)=Ү'RE>M~TT]+$cspfaoB<c2&:GqdWrD!2wFތR}~ ]8IhzҼZk4M>uEz-j) ,@) A,p$= eeʥh7WZZ KHкkv\Ń:~PeEH7d=sp vO(E gXȒYBbix:)VݼYk֞B֢r| v eZ-CV:Hvw4gu^M+ZԠ"xw*sU}ϫIc}.(N7$ugBzocA_j߂hP(8@vz^)=IGփ,Yhrx |Uvkr#À3Q=SitҒ/q!gRHY eNV _& eBd"TVM>6 lrϩoU fSzs:H< :Я펎+au ޱ[MTti3i,8W246._Wǻe3DX}KW sSNhZV{vsEK3XZxE*+w¯pFkz +NNNq²GQUMƺ9$H6?Ksxrn_E,aЈGۇm1 KLGU 8C\آKw-M) ,@'N!Ӱ!دR4!De.-)\1! <2%mҲbk;/:ۣ^Y#/Z 5 )8ݾ[}C=|HiO($ B3 PCC#lZ:Q/>nbBQ<[x[^k*%v0V 9w[X *­٦LdkOu#Veg60R1Bd f"Y1~3{s%{]sy4RigHZ`x3Fc/X)\xY:BUH;JFē&șә<^6N5գ|Dp os2!,^e>)K2ȗuIMf+EBߥYNާe(pѸ]s&Փ'w+vNҁK2Y@R D|t l QHHj6j@9˚86^YB?6]mT&Y& #4O?Y^$u(Si'BH]. jTKY eRHYdIٚ_Ɋ1Ygj"RA=AIv U>ިp2r34 P:DfGdz4YT)ؐٔ)G{ p .P( 1 ׋mDL nY dwK*>jM,c\&8F2gG*/{5珼)UL4>|4" i76}vX-"Y,tCR" 5稂bxFCnjB@ ${FU5XPMN]~RV<|ҝ@avP9oh/.CS3@f3Y7S}zȶ;+0 Lj~a3}52xPvCqbنS2ê-;ul]w[ iR~h(a}=] "@mI\\ΟitaQB~q̃ V4(Ex,_*'d,MǼip!$´%[ T'eQOW[xlOaJ%YsXBiyr/M1Q䙻Dٮamm`:sSY:wJG5O"c}F(t\XjcORHY eY\䂚1şjrB1j0uL3Pg"j.V@ /hG< P=elN+Ru`[m=:kaU?UUZQ2!nL7m5 Ewk6OP|;f 9{߹Y7(* 8Pijii%u(QMlu-sD(=>)&&x/xk<0fW25ej:kVF )T\-h1#:xZ*ēHMa=GO6lsB//rI(?8t`dͮBU0PGsYþ-S8YȘlgYg9k(|poڰa^S-eRHY e'Y(]:uNg1ÀH@퐺OlU[{B7\[{:;Z<ϴ[5ݡU7K9? 68K^b,/$( 5 N>3:#/^,1j&wUcp1IlF}h돌< nB+AKXt~ҽa2 kL::rӯ-#gA:3Cl=™#4W|6U8c&rɘ4nX.}nR7@gzDSk޾SHY e^W T R=z1"0^:HYC]{47VBj-ϋmR@xV*8CP3ޥ𽼻vi*42C% YMBiei*0ʪ*u]ײwJ)F y(T"`TǺ` `b+"tM{TͮQN hVbOFLB'*6 َ=8BwutzCeU%C: A[V/뢰m@]1C=(hCv#ٷy(($9@"9(#JAע d1W=$0f̢LXNTrhh}4X,^{ԟ<qVH!x2At5)%N%b0ȡ=흨EFRE{-R=ؿ}+;kP@au4DtsaRf,L£ A@I!ռ"CNsAg9̚]2&.̙V3dxJ>#qޝ0K?~"+9Ը`3.z0~ gޅyp1 K,7GGV#bawƼbfn{G.Gz A@Y}mGzx NlCšK8jJݜFNЮ]I ,wqg̳M-awCWڍe&Xyb%#6hRJ@ (%0U2JpLNΒEkΞݜX> ?.X"4E[OLʋ r䆓ge7S= V,ڈ9yl۫f4sLj΄kbra'(88TEEg;"bMgE(P8y_"2r)J~">]%(#cEɶ$H\G}Sqȯœ;GUdD^]dkN,[̈[o~Xtrާἶ6Zh 6)3#_}5f>l<&+C91){r?Cݏ.`YhC!N8p3$G(J,yV %rl0Nc 7CO^!F1i?+EpI}2W>|#{xl;2f6 !Mh`"irU?ḏ4YPJ@ (I$QIR!Bꊠ65s2R|5*9$v1:cPnZN">qC:ŇmQeBUy׬;G~l\{,!BA :L`s H::oo2>CCt?z<_Ju ZHg|ΰ39%caBTW5:*Ħn E:*=ָ9t_0PP1ɏo*֨< ue[$Ӄg}VFhfT!U"۶0++ˍkʕfy`Ò }!Vq=a>$YʟJ󳸈t[{N8s~q'G'paRi1;zǬv Еf .#E,ęqFf2(_w?_N;!$ ʼ/+D=~Z VЇa7vRm'U-]^6UJ@ ( QAuR@e1/FWۍ(E-:ˬa>l B"KqnfFe3,܏ 䉝[ۈ8GqfPw Ma7j\ø$E|@+H]4jRJ@ (%0DsdFbꁟ+ʁeb:gRAG?s !@ܳ8J{ބn/h=y `ۦߡ݈22{*Z@)zh GкBE nhh4M"d0b\ذfZehmju=zFYwEGS;DJ1 ,w\g0=Y2 ̏ Ϸp#H sif`Y@<l`k F/Ŭ+{ojh@Nm'(q e0r5haWqJ:16ضGRo{g_lS[ (%dv%bGRQ;;pہP~42-ݍLtfxZRS/B?geLgl(Q9!N%8š#GhnZs6ͻA=0@"g͵{bSs= 62VΣ: ٱ#Fv.UfX1BjJBP*3d_ilڱ:3n*9b<ߊ\O$|̸ؗ|"J8'y&-^zW,XFx% -GV'H8csI]]^\8)]PЉKIJ5֌\$a;rٜl~z9?8$ASEҒ-|lKl}x2SBnpBE.v•Lcz~_36АWh9%h,@,A(%*DlCXXͅ8=-pS+xNsjCRXöoQ ( `E9ˇbkF9Nz; gY|Ld791`o(ٹ>ۉ#J$wAA*%=2K~u}^qєVRq0Om ޣ1y% [bFIv1 k<*Sm,ŲĀ Fr/&E'Ii&Rc)N\n.aVq|pRbbsϱ~d:aZrA8Ҝ)3},8թq`/F>'&=Q4Gi9g[$ધn-5K`KP=~j.:~Xcs1] @sjfq_|׬lPJ@ L%i\br'k" ),/f`H#"$G_nRDOI"GNM#NosQ$m2JalF .դPSI` {,Z5<쌥N]:s%.e?=;;@sq) .vCFtQdy苔?Qau؅(prꌲӴWzm݌^Mb.UUJ9͝=o)|>'QeX2B5祅J"*K"̷EIVEz>Үr`Xp^:Q{нsGXWgNN&2~O(N +Dwκ}wd)A4iMqj+zグqgҋM>gij%>laF/Ȭ(cPd8 g\ GQ:;Ymy߆6|<8lܰBkVCi a4B>в` |YoD; o h(5YO_OФPJ`jL[q#ީcFtNԙ':"6ZtD;gI\*."K 9 /`14̳e-0/- M2dg9Y2snS sb#0k۪Pyݨ^8}Zq*rqX>,?bKf?2($ubn,ӭO>|-] (% 0KZI2bλ3![ pӬGV勋{=\qHXz~"DwM1.ŽϓeK")rP2[s8$b}emӄo"K7ՈTۃ]-D}c3V-srF8LI (%eә:[^MKg;Є:43f.T2?BiIid&qhƐ c0*/3=8%d#_KS e 3˧*,,`,,:;rAQiͣokCË(`2ˡU˓|)q.!6d`<7.dzAԏd %PO`n{I+ Պa.̀vq9\5 Bl?-Z~oG(W.138r'GA\}9fHWBwH,2?Lp8w!oYps;0k6:hilBlNd. %nEZlTD1'pjX\#S&LhRJ@ (%0h+$>66;!vLKb)f߹}?w4 C"8L˨}HWXPtaJ;>GeQ9Q=h\(*,2‹Abj_ ND%$"}K0?o{ I1I3@tB-qL}(@W$SJ@ (%0uUp9e :z94%ҙ%I8~㚃Z\o|$쀬ݗR DY#IZ9.C mM>b%zbI%VZqDxݨFDjn@{SbV- ;'KAz[Q߈YelZC)PJ@ L!iR4V.,Υ GN^鰅e̅eB*/M@g3_dQRU:wV\UgDa$R2)ciQ:(͉RG%WTRN#𭔙͇0qxȫ x3 1yiefkqONj<Z%,:q7|9{qC+ʹތPJ@ d>i\GBIv!EQa)WS7u1S6jfô1$A>ڎF`Hqh$A-GyE]r ]\Ccq\.(nNnEWTφp# R_;w#xS rk+}"KGV%kW1PU9XP;_cU8l'G*פPJ`!% J*}mz:%=ymdw#kk8Yx4ZC bWw-\T1#;a0(vE M/.c -V4"aY8p\tb8@߹Z|>nk9zp zi B\8ݗVy"yʌEyqK9KUo[s ) {IJ@ (%. .2?Eti+T| M ![B\'#!qvb ][?BMvDa,l,q3|㦹47.:nG^wڏMu#0CcȂl!꬜hOu}A;CTպun)%Tv%'DMOo4Z%+9ܔ_CfjȢʎp۹/QiXjOKܫ<a%x\_g 8u3V_/|\zdqĒ(U߉ٜVt/Z4Q4$3mL-7{n1X:YbȗךPJ@ L5}BJDèLoO .ˇ\vqvbKܤyW8Ppje+&3JE|:qDrW, Pb0f|.JV-}JEp}.^/UIbUhNZe%.?l:$ MJ@ (%@F.Z&\ba>UGtIij&|¯VBb03IBĒ+)j:GlOg9)hBGQ)Y^ü a"3Myp*GemG1֭,1,N,rm*% !TU28ZS sRJ5ERRI9p#t EG飆myf@'RJ@ x!Lp}R1ZG3t08{;޳yL%FT_PC 3T2ED.@_v.-(<Y#H&,\b)MIh֩PJl"&YOLLvR)4QfyRjJ7]ưѹt_ (%6+=gR/ث_JAw" :ln\=e*ȆjYJ@ (%. .{<git;z2\*"Nﳒ1T}LSUsrPG`<VT'~K24!)gtzvGo_jǓ/FJlɷqr]+%\.,DA?dO9;e[9KPIRRy_WeNOI (%@,`}zxH~?iIR&cZn*#r\/^JpsY9Ųb:24PJ@ ("a!ųsgejb L}R5JiMKSfI>t>aXw*~+%Xl8^ +UK#s$RΗ粎'+lOdWe,Uтk _vFNcuds&%PDR n3K*<$}XcޛIʙnVڐ*t7;DZ#tqFp]> ,^$#n}&:XCH+8HV.N.`I (%@eSsF96'*=eJonO;#؊;"b\@? mPJ@ A #mIYh\=R(ۗ\'ƅc:{45PJ@ L6sHɮmZ&bIDN4}Uz RLr35:? 3V1zդPJ L2˰Pp N .i8CK)K5]R$coJmX:RS#&/Y͹TF9̜*C  e^DHH I&oyKWGk%G?ӏ7G0n9'ץT9"~^ʕbRyүKm˹9rB)e|ROIS4*[)%h![] u rb3 -]n;n`vDL+vqf누]n< d3n=9\,;ґDKX61O SK:rG1IHs\I#EؐoI^l9[<~8-bk eG aeLB d$_ȥC&F"SXk$5<.Lҡ7"P(D8)563WIMy1py%+8IJjS+;-Rbsܦ%9#rYXI>9/)!!2EuxDb":/{H4ͥW3-q:ԐROZ43:jN0i?bf:xIƴILyb3|]ʗxBDYN?oCo iRJ@ (% 2Bp a'kAyPHةC $)~B A#S$L 30IN%':f.#JV Lrt+mJ4\r߬ch#L 69'%"w(M̈Hgv:B4#>)R(pb"fIē)%yafDF#)䓞Nt\~{N^O6q'%\-G3ϗSO6KLzRgKe]N\0 5Ms0L@MJ@ (%2@." LEkeP2hv"ō$ A@Ig)Rs uɦ@1cdɋͼ+VqNINZ1R^д}Tф +QRXB,%Ţ5Qi'\p6}RL&L!Okh3]5)%D#NÖN>e:kdK{u@XI6wtv8j&Qg#J@ (%fLg)Ly"%RC &SRۭPJ`fkf?i{[YfhRm{ kJ@ (%. WMxp8(%T3ևkRj$/xQdbXrj%PC@-\,fTKf-*% &k?|u%PJ`jZPJ@ (%0 _o] (% H~SiZPJ@ (%x:zW-](%PJ 8&%PJ@ (I#Xդ+%PJ`PPJ@ (#kXkMJ@ (% %k>xm%PJ`:ZPJ@ (%0C ^o[ (%:*֤PJ@ P*fVJ@ (% c5)%P3 PJ@ (#KUxXj%pUp!]|zSJ@ ( F1e$n:ST \\]c;+&J@ (%0̙ڋ;&tB\fE J@ (%Iz{{tc\. >oLj&%PJ@ c1 X,H$25PJ@ (%f x<>[Yc¤PJ@ (%0~*NTJ@ (%&ͤPJ@ (P5~vzPJ@ (%D@ט0i&%PJ@ +PJ@ (%0&*ƄI3)%PJ`Tp^PJ@ (1P5&LI (%P'kJ%PJ@ 1aLJ@ (%?\gW*%PJ`LTp fRJ@ (% ^P3Nvk1{ei5.V@ۓE4=xsrLk۞>*'xi Wui4PJ`XWo?7NtZ^PG$b۲mpM3QJ(H毄mTJ@ (%hjẢ6^ (%0hQ ֪*jẪތPJ@ (%TpeS6)%2Ut$>hC'goAh9_4ޕW)^yL["&}H1ڏC(*r"2sQG u|n8qX/ 7-,$n+I"kjJ@ (d .5<ķi]TNPZ֣xѐÝ>K|/<ЗKG,\q?nLijNGSC m %P"` zEVXiDYbKzJU~|@VV2uC L\SZkRJ@ \I ۂGqY+ۃW_c;?V V +]~xGnFUxg;oykiÕ^>8^?/7=oJu[ L \QKQJ@ \dlY8 ˞OKV'8zuk}Xxc>Vwš&<6{zU}wUa֣d0;v#ȫBڇ 6O~2n}U;~*iPJ@ L;rcd{q=u*VoZ7szǟځ82Zю8Z\U!Z<-rMoz7C^Hpߋn_Tp]XoP (%p%έ['.j2|?w)~a͂lgS^r8)Þ+9sxuz^jlj6a jQ]kNüy5WÜɘ_ٖw2VL sD-vX.26hXBo9fFo8: ~Zk`B0 dN eqxd̳a  K@p%po)NI%%_)QepǶ%.Mc>k羌[K?8[{)/I>G'&-6dLy -{owV9`oK~||8B@re.~S~GF3]>@dR۴-J#p%"t&ZDq:)iTc?'_؉w>0; &'ʖ+SeJՓp_(3^i@`שN|4? :y~2HM܃p=O1V~7|xڛ4U'-uďe<,'? w`h eK"ǽ~*&/1rj8هG>^4u /<_MhϾ7 "N.¿{#o rFNۧH69Au8Ӆ돇}ȽCeNێxFQZrEP[xQ%Q!#SC40He(O9& wYhv ,* kJ'm#E~ۉ3ڦwխmd`z8rUwd3^*d]ف%X{[5^ TVo_v`~I^`-e(^b| Pp%e(qx( UƑUX\S9zz'~C`8X0VTEv`ag/ t؅`AzkTTŃ0],`cl_ \,[@6mqaS#=!]xnd3IJʗDq, ;(;qF`q }Cw^E¡nl?`a!ɧxQ79B9؅ZIʖFq!\O۷ËD'BA -l_qr7"qݻYF{J8sh";+ 7kZ[oӉ˨:ld͊#Ewà ,`uud9x,iAܺ<ܴdׅ݃*A -m-`=xw`;V\\r |ԋ۝sX!Cn;Ai}u/ζrQ:)tB%1,XCCo 1Yn7 |׾>nZyg֠eƺQ'=8?_bV{Feqw {^ ]6W WDqcndXƝ|q'XQb'w= U1/Hckڮ?ulwg#%*Di?&]ܐ||'ql͚wTG ]kyέ%OOM*L+|; scҊ\h( /Zhs7@ :'cz݋W_qဨ-v:a: 5 76Ή1ζG=$)Bi7ݘs -nsbny8vcg7t#bp'5x<N0!E嵃ciQD`ȼG"mB. 9iVh&GI.;݅}M"IDAT<6~'.iCtr 59pM8\o9$Cu.x6Gxl`8:2<&x\wnPnSشf(0& XN ϡ7\x{ p>s|alKBx;-W񗚖ytȍ[(a|a̪B߇p8xͅՍ _pcy-!B-(AO#7-փôx0ƿ܍O߰tX!yn ~|_])~oi >ps >|1w8>v ʦhh(:?rt c>涺ZF8~` "Їy ?xkAܸ$lUŐ7߉A;k 8+wQu HY/~ʅQԬ%VO})Y4D<'ha`򽙽"8NZǽ|^K9&N*9?B{ʼn}WCaGva7ClU2,c#}w)O:ѯ뼏^O(%& .v>k7FvK*8<ǎ:eCZ 6=_ ßc1-f]-N1L4I ղSN(6\ǼK[,jw( 8#XxV`~ %8tIbXqCK֘J2VZ\XKxy( ):l4cQKXҒux3Y|TOxơRU}3X_<̹Q'?'E^9-C7Z2+91ܾ<^ N0>\K'sz(>f's疔s~66زXz)j**i9r*k+bpEؾϥR/؆n,nI_I˙umAς8 Xc^ >fQ$Vc MqdW-}HiCI^5AroO&Cq(ͦoza[7cC6ϥ֑Ag5.o`G_ J9E9W|CIf~׋_dワ`o]?m32&vSn|8h *dEqx6][?//BŃ[c6=6χq?-Z9RUJ@ (#pcZH81#è0{1\!:Zv9yh<î:.JZ :V7<&}o>Lr 1h2Uy6-,>zL+كEjl`1|N:|ƃF6{fA^ s=T"wİ֧WvPlqhcNsHע=G͜5YaǼxq0n\a~bg7AxqNёF7Xt og!="yq,L?VѬ!$^"V&1CkM-|whe;H;rӚUuۢ(O_?r8G&.cu|4X:;>C_0nO<.-<_8VHqПO ZFEcτ>qs8$šS{8T Ll'p}|x!r|FRiRM/>1[}ɩT3L0qClAll.'y>Y|,?}ʃ};m_ 3z[iM%"[KKPʥ6byCkqtJ&J>=O wgngDx_2\;ocƳlņ^3`9:I49-F 7q! vc9OMaLТAV^~CׇwGaSKrȏX6s)]6 [EPgƟ>i+*=r.]Z'ßE͉` " v7qb:jacQ6op[3KZxG~sϿcAA93J_xi/ j0XY6>'a]w#;r(ڇB'ɮ`>c6qj\8on'W~l 9˙:̐ 5nH? `-7[zY~yg.x7􆅥_br c`;*P&p0HXZkbfni&JZ~mT-Nu_V산sD3^4IǞscee`^TIYX\IPLBQPixp>ER_hi_]wa.8-O늌%l]mlw+)~1@ĥmli7FQNr ;ꉪR N5I'&cixuqep!ʹ9(b %&q\܃dXg6nd>f?B_2 k9iPx/ohtM} @EGQMtt.ؙȍS|TU%b|kYsCk݁3lurNODony8Ðw~TC}#:w:o&{F/ϦXpcpƱ/(pQy⠏[B;ND" {&B߭|r$1 b3PQ>zGSDAwGQ?o)8Ҿ]p e#YPHv?5I҆;1^Ͽ)4)1XP|4)%0>͛Z'9GjiI 5>nzPJ@ (%L@טQiF%PJ@ qӫPJ@ (%0&eZc"PJ@ (%0NFpMJ@ (%PC@сɩEKUJ@ (% %# GXضHx44#PJ@ (% F@ IENDB`docker-1.10.3/docs/installation/index.md000066400000000000000000000032411267010174400201270ustar00rootroot00000000000000 # Install Docker Engine Docker Engine is supported on Linux, Cloud, Windows, and OS X. Installation instructions are available for the following: ## On Linux * [Arch Linux](linux/archlinux.md) * [CentOS](linux/centos.md) * [CRUX Linux](linux/cruxlinux.md) * [Debian](linux/debian.md) * [Fedora](linux/fedora.md) * [FrugalWare](linux/frugalware.md) * [Gentoo](linux/gentoolinux.md) * [Oracle Linux](linux/oracle.md) * [Red Hat Enterprise Linux](linux/rhel.md) * [openSUSE and SUSE Linux Enterprise](linux/SUSE.md) * [Ubuntu](linux/ubuntulinux.md) If your linux distribution is not listed above, don't give up yet. To try out Docker on a distribution that is not listed above, go here: [Installation from binaries](binaries.md). ## On Cloud * [Choose how to Install](cloud/cloud.md) * [Example: Manual install on a cloud provider](cloud/cloud-ex-aws.md) * [Example: Use Docker Machine to provision cloud hosts](cloud/cloud-ex-machine-ocean.md) ## On OSX and Windows * [Mac OS X](mac.md) * [Windows](windows.md) ## The Docker Archives Instructions for installing prior releases of Docker can be found in the following docker archives: [Docker v1.7](http://docs.docker.com/v1.7/), [Docker v1.6](http://docs.docker.com/v1.6/), [Docker v1.5](http://docs.docker.com/v1.5/), and [Docker v1.4](http://docs.docker.com/v1.4/). ## Where to go after installing * [About Docker Engine](../index.md) * [Support](https://www.docker.com/support/) * [Training](https://training.docker.com//) docker-1.10.3/docs/installation/linux/000077500000000000000000000000001267010174400176355ustar00rootroot00000000000000docker-1.10.3/docs/installation/linux/SUSE.md000066400000000000000000000111541267010174400207400ustar00rootroot00000000000000 # openSUSE and SUSE Linux Enterprise This page provides instructions for installing and configuring the latest Docker Engine software on openSUSE and SUSE systems. >**Note:** You can also find bleeding edge Docker versions inside of the repositories maintained by the [Virtualization:containers project](https://build.opensuse.org/project/show/Virtualization:containers) on the [Open Build Service](https://build.opensuse.org/). This project delivers also other packages that are related with the Docker ecosystem (for example, Docker Compose). ## Prerequisites You must be running a 64 bit architecture. ## openSUSE Docker is part of the official openSUSE repositories starting from 13.2. No additional repository is required on your system. ## SUSE Linux Enterprise Docker is officially supported on SUSE Linux Enterprise 12 and later. You can find the latest supported Docker packages inside the `Container` module. To enable this module, do the following: 1. Start YaST, and select *Software > Software Repositories*. 2. Click *Add* to open the add-on dialog. 3. Select *Extensions and Module from Registration Server* and click *Next*. 4. From the list of available extensions and modules, select *Container Module* and click *Next*. The containers module and its repositories are added to your system. 5. If you use Subscription Management Tool, update the list of repositories at the SMT server. Otherwise execute the following command: $ sudo SUSEConnect -p sle-module-containers/12/x86_64 -r '' >**Note:** currently the `-r ''` flag is required to avoid a known limitation of `SUSEConnect`. The [Virtualization:containers project](https://build.opensuse.org/project/show/Virtualization:containers) on the [Open Build Service](https://build.opensuse.org/) contains also bleeding edge Docker packages for SUSE Linux Enterprise. However these packages are **not supported** by SUSE. ### Install Docker 1. Install the Docker package: $ sudo zypper in docker 2. Start the Docker daemon. $ sudo systemctl start docker 3. Test the Docker installation. $ sudo docker run hello-world ## Configure Docker boot options You can use these steps on openSUSE or SUSE Linux Enterprise. To start the `docker daemon` at boot, set the following: $ sudo systemctl enable docker The `docker` package creates a new group named `docker`. Users, other than `root` user, must be part of this group to interact with the Docker daemon. You can add users with this command syntax: sudo /usr/sbin/usermod -a -G docker Once you add a user, make sure they relog to pick up these new permissions. ## Enable external network access If you want your containers to be able to access the external network, you must enable the `net.ipv4.ip_forward` rule. To do this, use YaST. For openSUSE Tumbleweed and later, browse to the **System -> Network Settings -> Routing** menu. For SUSE Linux Enterprise 12 and previous openSUSE versions, browse to **Network Devices -> Network Settings -> Routing** menu (f) and check the *Enable IPv4 Forwarding* box. When networking is handled by the Network Manager, instead of YaST you must edit the `/etc/sysconfig/SuSEfirewall2` file needs by hand to ensure the `FW_ROUTE` flag is set to `yes` like so: FW_ROUTE="yes" ## Custom daemon options If you need to add an HTTP Proxy, set a different directory or partition for the Docker runtime files, or make other customizations, read the systemd article to learn how to [customize your systemd Docker daemon options](../../admin/systemd.md). ## Uninstallation To uninstall the Docker package: $ sudo zypper rm docker The above command does not remove images, containers, volumes, or user created configuration files on your host. If you wish to delete all images, containers, and volumes run the following command: $ rm -rf /var/lib/docker You must delete the user created configuration files manually. ## Where to go from here You can find more details about Docker on openSUSE or SUSE Linux Enterprise in the [Docker quick start guide](https://www.suse.com/documentation/sles-12/dockerquick/data/dockerquick.html) on the SUSE website. The document targets SUSE Linux Enterprise, but its contents apply also to openSUSE. Continue to the [User Guide](../../userguide/index.md). docker-1.10.3/docs/installation/linux/archlinux.md000066400000000000000000000056441267010174400221650ustar00rootroot00000000000000 # Arch Linux Installing on Arch Linux can be handled via the package in community: - [docker](https://www.archlinux.org/packages/community/x86_64/docker/) or the following AUR package: - [docker-git](https://aur.archlinux.org/packages/docker-git/) The docker package will install the latest tagged version of docker. The docker-git package will build from the current master branch. ## Dependencies Docker depends on several packages which are specified as dependencies in the packages. The core dependencies are: - bridge-utils - device-mapper - iproute2 - sqlite ## Installation For the normal package a simple $ sudo pacman -S docker is all that is needed. For the AUR package execute: $ yaourt -S docker-git The instructions here assume **yaourt** is installed. See [Arch User Repository](https://wiki.archlinux.org/index.php/Arch_User_Repository#Installing_packages) for information on building and installing packages from the AUR if you have not done so before. ## Starting Docker There is a systemd service unit created for docker. To start the docker service: $ sudo systemctl start docker To start on system boot: $ sudo systemctl enable docker ## Custom daemon options If you need to add an HTTP Proxy, set a different directory or partition for the Docker runtime files, or make other customizations, read our systemd article to learn how to [customize your systemd Docker daemon options](../../admin/systemd.md). ## Running Docker with a manually-defined network If you manually configure your network using `systemd-network` version 220 or higher, containers you start with Docker may be unable to access your network. Beginning with version 220, the forwarding setting for a given network (`net.ipv4.conf..forwarding`) defaults to *off*. This setting prevents IP forwarding. It also conflicts with Docker which enables the `net.ipv4.conf.all.forwarding` setting within a container. To work around this, edit the `.network` file in `/etc/systemd/network/` on your Docker host add the following block: ``` [Network] ... IPForward=kernel ... ``` This configuration allows IP forwarding from the container as expected. ## Uninstallation To uninstall the Docker package: $ sudo pacman -R docker To uninstall the Docker package and dependencies that are no longer needed: $ sudo pacman -Rns docker The above commands will not remove images, containers, volumes, or user created configuration files on your host. If you wish to delete all images, containers, and volumes run the following command: $ rm -rf /var/lib/docker You must delete the user created configuration files manually. docker-1.10.3/docs/installation/linux/centos.md000066400000000000000000000141771267010174400214640ustar00rootroot00000000000000 # CentOS Docker runs on CentOS 7.X. An installation on other binary compatible EL7 distributions such as Scientific Linux might succeed, but Docker does not test or support Docker on these distributions. This page instructs you to install using Docker-managed release packages and installation mechanisms. Using these packages ensures you get the latest release of Docker. If you wish to install using CentOS-managed packages, consult your CentOS documentation. ## Prerequisites Docker requires a 64-bit installation regardless of your CentOS version. Also, your kernel must be 3.10 at minimum, which CentOS 7 runs. To check your current kernel version, open a terminal and use `uname -r` to display your kernel version: $ uname -r 3.10.0-229.el7.x86_64 Finally, is it recommended that you fully update your system. Please keep in mind that your system should be fully patched to fix any potential kernel bugs. Any reported kernel bugs may have already been fixed on the latest kernel packages. ## Install There are two ways to install Docker Engine. You can install using the `yum` package manager. Or you can use `curl` with the `get.docker.com` site. This second method runs an installation script which also installs via the `yum` package manager. ### Install with yum 1. Log into your machine as a user with `sudo` or `root` privileges. 2. Make sure your existing yum packages are up-to-date. $ sudo yum update 3. Add the yum repo. $ sudo tee /etc/yum.repos.d/docker.repo <<-'EOF' [dockerrepo] name=Docker Repository baseurl=https://yum.dockerproject.org/repo/main/centos/$releasever/ enabled=1 gpgcheck=1 gpgkey=https://yum.dockerproject.org/gpg EOF 4. Install the Docker package. $ sudo yum install docker-engine 5. Start the Docker daemon. $ sudo service docker start 6. Verify `docker` is installed correctly by running a test image in a container. $ sudo docker run hello-world Unable to find image 'hello-world:latest' locally latest: Pulling from hello-world a8219747be10: Pull complete 91c95931e552: Already exists hello-world:latest: The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security. Digest: sha256:aa03e5d0d5553b4c3473e89c8619cf79df368babd1.7.1cf5daeb82aab55838d Status: Downloaded newer image for hello-world:latest Hello from Docker. This message shows that your installation appears to be working correctly. To generate this message, Docker took the following steps: 1. The Docker client contacted the Docker daemon. 2. The Docker daemon pulled the "hello-world" image from the Docker Hub. (Assuming it was not already locally available.) 3. The Docker daemon created a new container from that image which runs the executable that produces the output you are currently reading. 4. The Docker daemon streamed that output to the Docker client, which sent it to your terminal. To try something more ambitious, you can run an Ubuntu container with: $ docker run -it ubuntu bash For more examples and ideas, visit: http://docs.docker.com/userguide/ ### Install with the script 1. Log into your machine as a user with `sudo` or `root` privileges. 2. Make sure your existing yum packages are up-to-date. $ sudo yum update 3. Run the Docker installation script. $ curl -fsSL https://get.docker.com/ | sh This script adds the `docker.repo` repository and installs Docker. 4. Start the Docker daemon. $ sudo service docker start 5. Verify `docker` is installed correctly by running a test image in a container. $ sudo docker run hello-world ## Create a docker group The `docker` daemon binds to a Unix socket instead of a TCP port. By default that Unix socket is owned by the user `root` and other users can access it with `sudo`. For this reason, `docker` daemon always runs as the `root` user. To avoid having to use `sudo` when you use the `docker` command, create a Unix group called `docker` and add users to it. When the `docker` daemon starts, it makes the ownership of the Unix socket read/writable by the `docker` group. >**Warning**: The `docker` group is equivalent to the `root` user; For details >on how this impacts security in your system, see [*Docker Daemon Attack >Surface*](../../security/security.md#docker-daemon-attack-surface) for details. To create the `docker` group and add your user: 1. Log into Centos as a user with `sudo` privileges. 2. Create the `docker` group and add your user. `sudo usermod -aG docker your_username` 3. Log out and log back in. This ensures your user is running with the correct permissions. 4. Verify your work by running `docker` without `sudo`. $ docker run hello-world ## Start the docker daemon at boot To ensure Docker starts when you boot your system, do the following: $ sudo chkconfig docker on If you need to add an HTTP Proxy, set a different directory or partition for the Docker runtime files, or make other customizations, read our Systemd article to learn how to [customize your Systemd Docker daemon options](../../admin/systemd.md). ## Uninstall You can uninstall the Docker software with `yum`. 1. List the package you have installed. $ yum list installed | grep docker yum list installed | grep docker docker-engine.x86_64 1.7.1-1.el7 @/docker-engine-1.7.1-1.el7.x86_64.rpm 2. Remove the package. $ sudo yum -y remove docker-engine.x86_64 This command does not remove images, containers, volumes, or user-created configuration files on your host. 3. To delete all images, containers, and volumes, run the following command: $ rm -rf /var/lib/docker 4. Locate and delete any user-created configuration files. docker-1.10.3/docs/installation/linux/cruxlinux.md000066400000000000000000000047741267010174400222340ustar00rootroot00000000000000 # CRUX Linux Installing on CRUX Linux can be handled via the contrib ports from [James Mills](http://prologic.shortcircuit.net.au/) and are included in the official [contrib](http://crux.nu/portdb/?a=repo&q=contrib) ports: - docker The `docker` port will build and install the latest tagged version of Docker. ## Installation Assuming you have contrib enabled, update your ports tree and install docker: $ sudo prt-get depinst docker ## Kernel requirements To have a working **CRUX+Docker** Host you must ensure your Kernel has the necessary modules enabled for the Docker Daemon to function correctly. Please read the `README`: $ sudo prt-get readme docker The `docker` port installs the `contrib/check-config.sh` script provided by the Docker contributors for checking your kernel configuration as a suitable Docker host. To check your Kernel configuration run: $ /usr/share/docker/check-config.sh ## Starting Docker There is a rc script created for Docker. To start the Docker service: $ sudo /etc/rc.d/docker start To start on system boot: - Edit `/etc/rc.conf` - Put `docker` into the `SERVICES=(...)` array after `net`. ## Images There is a CRUX image maintained by [James Mills](http://prologic.shortcircuit.net.au/) as part of the Docker "Official Library" of images. To use this image simply pull it or use it as part of your `FROM` line in your `Dockerfile(s)`. $ docker pull crux $ docker run -i -t crux There are also user contributed [CRUX based image(s)](https://registry.hub.docker.com/repos/crux/) on the Docker Hub. ## Uninstallation To uninstall the Docker package: $ sudo prt-get remove docker The above command will not remove images, containers, volumes, or user created configuration files on your host. If you wish to delete all images, containers, and volumes run the following command: $ rm -rf /var/lib/docker You must delete the user created configuration files manually. ## Issues If you have any issues please file a bug with the [CRUX Bug Tracker](http://crux.nu/bugs/). ## Support For support contact the [CRUX Mailing List](http://crux.nu/Main/MailingLists) or join CRUX's [IRC Channels](http://crux.nu/Main/IrcChannels). on the [FreeNode](http://freenode.net/) IRC Network. docker-1.10.3/docs/installation/linux/debian.md000066400000000000000000000131461267010174400214060ustar00rootroot00000000000000 # Debian Docker is supported on the following versions of Debian: - [*Debian testing stretch (64-bit)*](#debian-wheezy-stable-7-x-64-bit) - [*Debian 8.0 Jessie (64-bit)*](#debian-jessie-80-64-bit) - [*Debian 7.7 Wheezy (64-bit)*](#debian-wheezy-stable-7-x-64-bit) >**Note**: If you previously installed Docker using `APT`, make sure you update your `APT` sources to the new `APT` repository. ## Prerequisites Docker requires a 64-bit installation regardless of your Debian version. Additionally, your kernel must be 3.10 at minimum. The latest 3.10 minor version or a newer maintained version are also acceptable. Kernels older than 3.10 lack some of the features required to run Docker containers. These older versions are known to have bugs which cause data loss and frequently panic under certain conditions. To check your current kernel version, open a terminal and use `uname -r` to display your kernel version: $ uname -r ### Update your apt repository Docker's `APT` repository contains Docker 1.7.1 and higher. To set `APT` to use from the new repository: 1. If you haven't already done so, log into your machine as a user with `sudo` or `root` privileges. 2. Open a terminal window. 3. Purge any older repositories. $ apt-get purge lxc-docker* $ apt-get purge docker.io* 4. Update package information, ensure that APT works with the `https` method, and that CA certificates are installed. $ apt-get update $ apt-get install apt-transport-https ca-certificates 5. Add the new `GPG` key. $ apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D 6. Open the `/etc/apt/sources.list.d/docker.list` file in your favorite editor. If the file doesn't exist, create it. 7. Remove any existing entries. 8. Add an entry for your Debian operating system. The possible entries are: - On Debian Wheezy deb https://apt.dockerproject.org/repo debian-wheezy main - On Debian Jessie deb https://apt.dockerproject.org/repo debian-jessie main - On Debian Stretch/Sid deb https://apt.dockerproject.org/repo debian-stretch main > **Note**: Docker does not provide packages for all architectures. To install docker on > a multi-architecture system, add an `[arch=...]` clause to the entry. Refer to the > [Debian Multiarch wiki](https://wiki.debian.org/Multiarch/HOWTO#Setting_up_apt_sources) > for details. 9. Save and close the file. 10. Update the `APT` package index. $ apt-get update 11. Verify that `APT` is pulling from the right repository. $ apt-cache policy docker-engine From now on when you run `apt-get upgrade`, `APT` pulls from the new apt repository. ## Install Docker Before installing Docker, make sure you have set your `APT` repository correctly as described in the prerequisites. 1. Update the `APT` package index. $ sudo apt-get update 2. Install Docker. $ sudo apt-get install docker-engine 5. Start the `docker` daemon. $ sudo service docker start 6. Verify `docker` is installed correctly. $ sudo docker run hello-world This command downloads a test image and runs it in a container. When the container runs, it prints an informational message. Then, it exits. ## Giving non-root access The `docker` daemon always runs as the `root` user and the `docker` daemon binds to a Unix socket instead of a TCP port. By default that Unix socket is owned by the user `root`, and so, by default, you can access it with `sudo`. If you (or your Docker installer) create a Unix group called `docker` and add users to it, then the `docker` daemon will make the ownership of the Unix socket read/writable by the `docker` group when the daemon starts. The `docker` daemon must always run as the root user, but if you run the `docker` client as a user in the `docker` group then you don't need to add `sudo` to all the client commands. From Docker 0.9.0 you can use the `-G` flag to specify an alternative group. > **Warning**: > The `docker` group (or the group specified with the `-G` flag) is > `root`-equivalent; see [*Docker Daemon Attack Surface*](../../security/security.md#docker-daemon-attack-surface) details. **Example:** # Add the docker group if it doesn't already exist. $ sudo groupadd docker # Add the connected user "${USER}" to the docker group. # Change the user name to match your preferred user. # You may have to logout and log back in again for # this to take effect. $ sudo gpasswd -a ${USER} docker # Restart the Docker daemon. $ sudo service docker restart ## Upgrade Docker To install the latest version of Docker with `apt-get`: $ apt-get upgrade docker-engine ## Uninstall To uninstall the Docker package: $ sudo apt-get purge docker-engine To uninstall the Docker package and dependencies that are no longer needed: $ sudo apt-get autoremove --purge docker-engine The above commands will not remove images, containers, volumes, or user created configuration files on your host. If you wish to delete all images, containers, and volumes run the following command: $ rm -rf /var/lib/docker You must delete the user created configuration files manually. ## What next? Continue with the [User Guide](../../userguide/index.md). docker-1.10.3/docs/installation/linux/fedora.md000066400000000000000000000156541267010174400214320ustar00rootroot00000000000000 # Fedora Docker is supported on Fedora version 22 and 23. This page instructs you to install using Docker-managed release packages and installation mechanisms. Using these packages ensures you get the latest release of Docker. If you wish to install using Fedora-managed packages, consult your Fedora release documentation for information on Fedora's Docker support. ## Prerequisites Docker requires a 64-bit installation regardless of your Fedora version. Also, your kernel must be 3.10 at minimum. To check your current kernel version, open a terminal and use `uname -r` to display your kernel version: $ uname -r 3.19.5-100.fc21.x86_64 If your kernel is at a older version, you must update it. Finally, is it recommended that you fully update your system. Please keep in mind that your system should be fully patched to fix any potential kernel bugs. Any reported kernel bugs may have already been fixed on the latest kernel packages ## Install There are two ways to install Docker Engine. You can install with the `dnf` package manager. Or you can use `curl` with the `get.docker.com` site. This second method runs an installation script which also installs via the `dnf` package manager. ### Install with DNF 1. Log into your machine as a user with `sudo` or `root` privileges. 2. Make sure your existing dnf packages are up-to-date. $ sudo dnf update 3. Add the yum repo yourself. $ sudo tee /etc/yum.repos.d/docker.repo <<-'EOF' [dockerrepo] name=Docker Repository baseurl=https://yum.dockerproject.org/repo/main/fedora/$releasever/ enabled=1 gpgcheck=1 gpgkey=https://yum.dockerproject.org/gpg EOF 4. Install the Docker package. $ sudo dnf install docker-engine 5. Start the Docker daemon. $ sudo systemctl start docker 6. Verify `docker` is installed correctly by running a test image in a container. $ sudo docker run hello-world Unable to find image 'hello-world:latest' locally latest: Pulling from hello-world a8219747be10: Pull complete 91c95931e552: Already exists hello-world:latest: The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security. Digest: sha256:aa03e5d0d5553b4c3473e89c8619cf79df368babd1.7.1cf5daeb82aab55838d Status: Downloaded newer image for hello-world:latest Hello from Docker. This message shows that your installation appears to be working correctly. To generate this message, Docker took the following steps: 1. The Docker client contacted the Docker daemon. 2. The Docker daemon pulled the "hello-world" image from the Docker Hub. (Assuming it was not already locally available.) 3. The Docker daemon created a new container from that image which runs the executable that produces the output you are currently reading. 4. The Docker daemon streamed that output to the Docker client, which sent it to your terminal. To try something more ambitious, you can run an Ubuntu container with: $ docker run -it ubuntu bash For more examples and ideas, visit: http://docs.docker.com/userguide/ ### Install with the script 1. Log into your machine as a user with `sudo` or `root` privileges. 2. Make sure your existing dnf packages are up-to-date. $ sudo dnf update 3. Run the Docker installation script. $ curl -fsSL https://get.docker.com/ | sh This script adds the `docker.repo` repository and installs Docker. 4. Start the Docker daemon. $ sudo systemctl start docker 5. Verify `docker` is installed correctly by running a test image in a container. $ sudo docker run hello-world ## Create a docker group The `docker` daemon binds to a Unix socket instead of a TCP port. By default that Unix socket is owned by the user `root` and other users can access it with `sudo`. For this reason, `docker` daemon always runs as the `root` user. To avoid having to use `sudo` when you use the `docker` command, create a Unix group called `docker` and add users to it. When the `docker` daemon starts, it makes the ownership of the Unix socket read/writable by the `docker` group. >**Warning**: The `docker` group is equivalent to the `root` user; For details >on how this impacts security in your system, see [*Docker Daemon Attack >Surface*](../../security/security.md#docker-daemon-attack-surface) for details. To create the `docker` group and add your user: 1. Log into your system as a user with `sudo` privileges. 2. Create the `docker` group and add your user. `sudo usermod -aG docker your_username` 3. Log out and log back in. This ensures your user is running with the correct permissions. 4. Verify your work by running `docker` without `sudo`. $ docker run hello-world ## Start the docker daemon at boot To ensure Docker starts when you boot your system, do the following: $ sudo systemctl enable docker If you need to add an HTTP Proxy, set a different directory or partition for the Docker runtime files, or make other customizations, read our Systemd article to learn how to [customize your Systemd Docker daemon options](../../admin/systemd.md). ## Running Docker with a manually-defined network If you manually configure your network using `systemd-network` with `systemd` version 219 or higher, containers you start with Docker may be unable to access your network. Beginning with version 220, the forwarding setting for a given network (`net.ipv4.conf..forwarding`) defaults to *off*. This setting prevents IP forwarding. It also conflicts with Docker which enables the `net.ipv4.conf.all.forwarding` setting within a container. To work around this, edit the `.network` file in `/usr/lib/systemd/network/` on your Docker host (ex: `/usr/lib/systemd/network/80-container-host0.network`) add the following block: ``` [Network] ... IPForward=kernel # OR IPForward=true ... ``` This configuration allows IP forwarding from the container as expected. ## Uninstall You can uninstall the Docker software with `dnf`. 1. List the package you have installed. $ dnf list installed | grep docker dnf list installed | grep docker docker-engine.x86_64 1.7.1-0.1.fc21 @/docker-engine-1.7.1-0.1.fc21.el7.x86_64 2. Remove the package. $ sudo dnf -y remove docker-engine.x86_64 This command does not remove images, containers, volumes, or user-created configuration files on your host. 3. To delete all images, containers, and volumes, run the following command: $ rm -rf /var/lib/docker 4. Locate and delete any user-created configuration files. docker-1.10.3/docs/installation/linux/frugalware.md000066400000000000000000000034761267010174400223300ustar00rootroot00000000000000 # FrugalWare Installing on FrugalWare is handled via the official packages: - [lxc-docker i686](http://www.frugalware.org/packages/200141) - [lxc-docker x86_64](http://www.frugalware.org/packages/200130) The lxc-docker package will install the latest tagged version of Docker. ## Dependencies Docker depends on several packages which are specified as dependencies in the packages. The core dependencies are: - systemd - lvm2 - sqlite3 - libguestfs - lxc - iproute2 - bridge-utils ## Installation A simple $ sudo pacman -S lxc-docker is all that is needed. ## Starting Docker There is a systemd service unit created for Docker. To start Docker as service: $ sudo systemctl start lxc-docker To start on system boot: $ sudo systemctl enable lxc-docker ## Custom daemon options If you need to add an HTTP Proxy, set a different directory or partition for the Docker runtime files, or make other customizations, read our systemd article to learn how to [customize your systemd Docker daemon options](../../admin/systemd.md). ## Uninstallation To uninstall the Docker package: $ sudo pacman -R lxc-docker To uninstall the Docker package and dependencies that are no longer needed: $ sudo pacman -Rns lxc-docker The above commands will not remove images, containers, volumes, or user created configuration files on your host. If you wish to delete all images, containers, and volumes run the following command: $ rm -rf /var/lib/docker You must delete the user created configuration files manually. docker-1.10.3/docs/installation/linux/gentoolinux.md000066400000000000000000000107341267010174400225370ustar00rootroot00000000000000 # Gentoo Installing Docker on Gentoo Linux can be accomplished using one of two ways: the **official** way and the `docker-overlay` way. Official project page of [Gentoo Docker](https://wiki.gentoo.org/wiki/Project:Docker) team. ## Official way The first and recommended way if you are looking for a stable experience is to use the official `app-emulation/docker` package directly from the tree. If any issues arise from this ebuild including, missing kernel configuration flags or dependencies, open a bug on the Gentoo [Bugzilla](https://bugs.gentoo.org) assigned to `docker AT gentoo DOT org` or join and ask in the official [IRC](http://webchat.freenode.net?channels=%23gentoo-containers&uio=d4) channel on the Freenode network. ## docker-overlay way If you're looking for a `-bin` ebuild, a live ebuild, or a bleeding edge ebuild, use the provided overlay, [docker-overlay](https://github.com/tianon/docker-overlay) which can be added using `app-portage/layman`. The most accurate and up-to-date documentation for properly installing and using the overlay can be found in the [overlay](https://github.com/tianon/docker-overlay/blob/master/README.md#using-this-overlay). If any issues arise from this ebuild or the resulting binary, including and especially missing kernel configuration flags or dependencies, open an [issue](https://github.com/tianon/docker-overlay/issues) on the `docker-overlay` repository or ping `tianon` directly in the `#docker` IRC channel on the Freenode network. ## Installation ### Available USE flags | USE Flag | Default | Description | | ------------- |:-------:|:------------| | aufs | |Enables dependencies for the "aufs" graph driver, including necessary kernel flags.| | btrfs | |Enables dependencies for the "btrfs" graph driver, including necessary kernel flags.| | contrib | Yes |Install additional contributed scripts and components.| | device-mapper | Yes |Enables dependencies for the "devicemapper" graph driver, including necessary kernel flags.| | doc | |Add extra documentation (API, Javadoc, etc). It is recommended to enable per package instead of globally.| | vim-syntax | |Pulls in related vim syntax scripts.| | zsh-completion| |Enable zsh completion support.| USE flags are described in detail on [tianon's blog](https://tianon.github.io/post/2014/05/17/docker-on-gentoo.html). The package should properly pull in all the necessary dependencies and prompt for all necessary kernel options. $ sudo emerge -av app-emulation/docker >Note: Sometimes there is a disparity between the latest versions >in the official **Gentoo tree** and the **docker-overlay**. >Please be patient, and the latest version should propagate shortly. ## Starting Docker Ensure that you are running a kernel that includes all the necessary modules and configuration (and optionally for device-mapper and AUFS or Btrfs, depending on the storage driver you've decided to use). To use Docker, the `docker` daemon must be running as **root**. To use Docker as a **non-root** user, add yourself to the **docker** group by running the following command: $ sudo usermod -a -G docker user ### OpenRC To start the `docker` daemon: $ sudo /etc/init.d/docker start To start on system boot: $ sudo rc-update add docker default ### systemd To start the `docker` daemon: $ sudo systemctl start docker To start on system boot: $ sudo systemctl enable docker If you need to add an HTTP Proxy, set a different directory or partition for the Docker runtime files, or make other customizations, read our systemd article to learn how to [customize your systemd Docker daemon options](../../admin/systemd.md). ## Uninstallation To uninstall the Docker package: $ sudo emerge -cav app-emulation/docker To uninstall the Docker package and dependencies that are no longer needed: $ sudo emerge -C app-emulation/docker The above commands will not remove images, containers, volumes, or user created configuration files on your host. If you wish to delete all images, containers, and volumes run the following command: $ rm -rf /var/lib/docker You must delete the user created configuration files manually. docker-1.10.3/docs/installation/linux/index.md000066400000000000000000000016211267010174400212660ustar00rootroot00000000000000 # Install Docker Engine on Linux Docker Engine is supported on several Linux distributions. Installation instructions are available for the following: * [Arch Linux](archlinux.md) * [CentOS](centos.md) * [CRUX Linux](cruxlinux.md) * [Debian](debian.md) * [Fedora](fedora.md) * [FrugalWare](frugalware.md) * [Gentoo](gentoolinux.md) * [Oracle Linux](oracle.md) * [Red Hat Enterprise Linux](rhel.md) * [openSUSE and SUSE Linux Enterprise](SUSE.md) * [Ubuntu](ubuntulinux.md) If your linux distribution is not listed above, don't give up yet. To try out Docker on a distribution that is not listed above, go here: [Installation from binaries](../binaries.md). docker-1.10.3/docs/installation/linux/oracle.md000066400000000000000000000153711267010174400214330ustar00rootroot00000000000000 # Oracle Linux Docker is supported Oracle Linux 6 and 7. You do not require an Oracle Linux Support subscription to install Docker on Oracle Linux. ## Prerequisites Due to current Docker limitations, Docker is only able to run only on the x86_64 architecture. Docker requires the use of the Unbreakable Enterprise Kernel Release 4 (4.1.12) or higher on Oracle Linux. This kernel supports the Docker btrfs storage engine on both Oracle Linux 6 and 7. ## Install > **Note**: The procedure below installs binaries built by Docker. These binaries > are not covered by Oracle Linux support. To ensure Oracle Linux support, please > follow the installation instructions provided in the > [Oracle Linux documentation](https://docs.oracle.com/en/operating-systems/?tab=2). > > The installation instructions for Oracle Linux 6 can be found in [Chapter 10 of > the Administrator's > Solutions Guide](https://docs.oracle.com/cd/E37670_01/E37355/html/ol_docker.html) > > The installation instructions for Oracle Linux 7 can be found in [Chapter 29 of > the Administrator's > Guide](https://docs.oracle.com/cd/E52668_01/E54669/html/ol7-docker.html) 1. Log into your machine as a user with `sudo` or `root` privileges. 2. Make sure your existing yum packages are up-to-date. $ sudo yum update 3. Add the yum repo yourself. For version 6: $ sudo tee /etc/yum.repos.d/docker.repo <<-EOF [dockerrepo] name=Docker Repository baseurl=https://yum.dockerproject.org/repo/main/oraclelinux/6 enabled=1 gpgcheck=1 gpgkey=https://yum.dockerproject.org/gpg EOF For version 7: $ cat >/etc/yum.repos.d/docker.repo <<-EOF [dockerrepo] name=Docker Repository baseurl=https://yum.dockerproject.org/repo/main/oraclelinux/7 enabled=1 gpgcheck=1 gpgkey=https://yum.dockerproject.org/gpg EOF 4. Install the Docker package. $ sudo yum install docker-engine 5. Start the Docker daemon. On Oracle Linux 6: $ sudo service docker start On Oracle Linux 7: $ sudo systemctl start docker.service 6. Verify `docker` is installed correctly by running a test image in a container. $ sudo docker run hello-world ## Optional configurations This section contains optional procedures for configuring your Oracle Linux to work better with Docker. * [Create a docker group](#create-a-docker-group) * [Configure Docker to start on boot](#configure-docker-to-start-on-boot) * [Use the btrfs storage engine](#use-the-btrfs-storage-engine) ### Create a Docker group The `docker` daemon binds to a Unix socket instead of a TCP port. By default that Unix socket is owned by the user `root` and other users can access it with `sudo`. For this reason, `docker` daemon always runs as the `root` user. To avoid having to use `sudo` when you use the `docker` command, create a Unix group called `docker` and add users to it. When the `docker` daemon starts, it makes the ownership of the Unix socket read/writable by the `docker` group. >**Warning**: The `docker` group is equivalent to the `root` user; For details >on how this impacts security in your system, see [*Docker Daemon Attack >Surface*](../../security/security.md#docker-daemon-attack-surface) for details. To create the `docker` group and add your user: 1. Log into Oracle Linux as a user with `sudo` privileges. 2. Create the `docker` group and add your user. sudo usermod -aG docker username 3. Log out and log back in. This ensures your user is running with the correct permissions. 4. Verify your work by running `docker` without `sudo`. $ docker run hello-world If this fails with a message similar to this: Cannot connect to the Docker daemon. Is 'docker daemon' running on this host? Check that the `DOCKER_HOST` environment variable is not set for your shell. If it is, unset it. ### Configure Docker to start on boot You can configure the Docker daemon to start automatically at boot. On Oracle Linux 6: ``` $ sudo chkconfig docker on ``` On Oracle Linux 7: ``` $ sudo systemctl enable docker.service ``` If you need to add an HTTP Proxy, set a different directory or partition for the Docker runtime files, or make other customizations, read our systemd article to learn how to [customize your systemd Docker daemon options](../../admin/systemd.md). ### Use the btrfs storage engine Docker on Oracle Linux 6 and 7 supports the use of the btrfs storage engine. Before enabling btrfs support, ensure that `/var/lib/docker` is stored on a btrfs-based filesystem. Review [Chapter 5](http://docs.oracle.com/cd/E37670_01/E37355/html/ol_btrfs.html) of the [Oracle Linux Administrator's Solution Guide](http://docs.oracle.com/cd/E37670_01/E37355/html/index.html) for details on how to create and mount btrfs filesystems. To enable btrfs support on Oracle Linux: 1. Ensure that `/var/lib/docker` is on a btrfs filesystem. 2. Edit `/etc/sysconfig/docker` and add `-s btrfs` to the `OTHER_ARGS` field. 3. Restart the Docker daemon: ## Uninstallation To uninstall the Docker package: $ sudo yum -y remove docker-engine The above command will not remove images, containers, volumes, or user created configuration files on your host. If you wish to delete all images, containers, and volumes run the following command: $ rm -rf /var/lib/docker You must delete the user created configuration files manually. ## Known issues ### Docker unmounts btrfs filesystem on shutdown If you're running Docker using the btrfs storage engine and you stop the Docker service, it will unmount the btrfs filesystem during the shutdown process. You should ensure the filesystem is mounted properly prior to restarting the Docker service. On Oracle Linux 7, you can use a `systemd.mount` definition and modify the Docker `systemd.service` to depend on the btrfs mount defined in systemd. ### SElinux support on Oracle Linux 7 SElinux must be set to `Permissive` or `Disabled` in `/etc/sysconfig/selinux` to use the btrfs storage engine on Oracle Linux 7. ## Further issues? If you have a current Basic or Premier Support Subscription for Oracle Linux, you can report any issues you have with the installation of Docker via a Service Request at [My Oracle Support](http://support.oracle.com). If you do not have an Oracle Linux Support Subscription, you can use the [Oracle Linux Forum](https://community.oracle.com/community/server_%26_storage_systems/linux/oracle_linux) for community-based support. docker-1.10.3/docs/installation/linux/rhel.md000066400000000000000000000141541267010174400211160ustar00rootroot00000000000000 # Red Hat Enterprise Linux Docker is supported on Red Hat Enterprise Linux 7. This page instructs you to install using Docker-managed release packages and installation mechanisms. Using these packages ensures you get the latest release of Docker. If you wish to install using Red Hat-managed packages, consult your Red Hat release documentation for information on Red Hat's Docker support. ## Prerequisites Docker requires a 64-bit installation regardless of your Red Hat version. Docker requires that your kernel must be 3.10 at minimum, which Red Hat 7 runs. To check your current kernel version, open a terminal and use `uname -r` to display your kernel version: $ uname -r 3.10.0-229.el7.x86_64 Finally, is it recommended that you fully update your system. Please keep in mind that your system should be fully patched to fix any potential kernel bugs. Any reported kernel bugs may have already been fixed on the latest kernel packages. ## Install Docker Engine There are two ways to install Docker Engine. You can install with the `yum` package manager directly yourself. Or you can use `curl` with the `get.docker.com` site. This second method runs an installation script which installs via the `yum` package manager. ### Install with yum 1. Log into your machine as a user with `sudo` or `root` privileges. 2. Make sure your existing yum packages are up-to-date. $ sudo yum update 3. Add the yum repo yourself. $ sudo tee /etc/yum.repos.d/docker.repo <<-EOF [dockerrepo] name=Docker Repository baseurl=https://yum.dockerproject.org/repo/main/centos/7 enabled=1 gpgcheck=1 gpgkey=https://yum.dockerproject.org/gpg EOF 4. Install the Docker package. $ sudo yum install docker-engine 5. Start the Docker daemon. $ sudo service docker start 6. Verify `docker` is installed correctly by running a test image in a container. $ sudo docker run hello-world Unable to find image 'hello-world:latest' locally latest: Pulling from hello-world a8219747be10: Pull complete 91c95931e552: Already exists hello-world:latest: The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security. Digest: sha256:aa03e5d0d5553b4c3473e89c8619cf79df368babd1.7.1cf5daeb82aab55838d Status: Downloaded newer image for hello-world:latest Hello from Docker. This message shows that your installation appears to be working correctly. To generate this message, Docker took the following steps: 1. The Docker client contacted the Docker daemon. 2. The Docker daemon pulled the "hello-world" image from the Docker Hub. (Assuming it was not already locally available.) 3. The Docker daemon created a new container from that image which runs the executable that produces the output you are currently reading. 4. The Docker daemon streamed that output to the Docker client, which sent it to your terminal. To try something more ambitious, you can run an Ubuntu container with: $ docker run -it ubuntu bash For more examples and ideas, visit: http://docs.docker.com/userguide/ ### Install with the script You use the same installation procedure for all versions of CentOS. 1. Log into your machine as a user with `sudo` or `root` privileges. 2. Make sure your existing yum packages are up-to-date. $ sudo yum update 3. Run the Docker installation script. $ curl -fsSL https://get.docker.com/ | sh 4. Start the Docker daemon. $ sudo service docker start 5. Verify `docker` is installed correctly by running a test image in a container. $ sudo docker run hello-world ## Create a docker group The `docker` daemon binds to a Unix socket instead of a TCP port. By default that Unix socket is owned by the user `root` and other users can access it with `sudo`. For this reason, `docker` daemon always runs as the `root` user. To avoid having to use `sudo` when you use the `docker` command, create a Unix group called `docker` and add users to it. When the `docker` daemon starts, it makes the ownership of the Unix socket read/writable by the `docker` group. >**Warning**: The `docker` group is equivalent to the `root` user; For details >on how this impacts security in your system, see [*Docker Daemon Attack >Surface*](../../security/security.md#docker-daemon-attack-surface) for details. To create the `docker` group and add your user: 1. Log into your machine as a user with `sudo` or `root` privileges. 2. Create the `docker` group and add your user. `sudo usermod -aG docker your_username` 3. Log out and log back in. This ensures your user is running with the correct permissions. 4. Verify your work by running `docker` without `sudo`. $ docker run hello-world ## Start the docker daemon at boot To ensure Docker starts when you boot your system, do the following: $ sudo chkconfig docker on If you need to add an HTTP Proxy, set a different directory or partition for the Docker runtime files, or make other customizations, read our Systemd article to learn how to [customize your Systemd Docker daemon options](../../admin/systemd.md). ## Uninstall You can uninstall the Docker software with `yum`. 1. List the package you have installed. $ yum list installed | grep docker yum list installed | grep docker docker-engine.x86_64 1.7.1-0.1.el7@/docker-engine-1.7.1-0.1.el7.x86_64 2. Remove the package. $ sudo yum -y remove docker-engine.x86_64 This command does not remove images, containers, volumes, or user created configuration files on your host. 3. To delete all images, containers, and volumes run the following command: $ rm -rf /var/lib/docker 4. Locate and delete any user-created configuration files. docker-1.10.3/docs/installation/linux/ubuntulinux.md000066400000000000000000000342311267010174400225640ustar00rootroot00000000000000 # Ubuntu Docker is supported on these Ubuntu operating systems: - Ubuntu Wily 15.10 - Ubuntu Trusty 14.04 (LTS) - Ubuntu Precise 12.04 (LTS) This page instructs you to install using Docker-managed release packages and installation mechanisms. Using these packages ensures you get the latest release of Docker. If you wish to install using Ubuntu-managed packages, consult your Ubuntu documentation. >**Note**: Ubuntu Utopic 14.10 and 15.04 exist in Docker's `APT` repository but > are no longer officially supported. ## Prerequisites Docker requires a 64-bit installation regardless of your Ubuntu version. Additionally, your kernel must be 3.10 at minimum. The latest 3.10 minor version or a newer maintained version are also acceptable. Kernels older than 3.10 lack some of the features required to run Docker containers. These older versions are known to have bugs which cause data loss and frequently panic under certain conditions. To check your current kernel version, open a terminal and use `uname -r` to display your kernel version: $ uname -r 3.11.0-15-generic >**Note**: If you previously installed Docker using `APT`, make sure you update your `APT` sources to the new Docker repository. ### Update your apt sources Docker's `APT` repository contains Docker 1.7.1 and higher. To set `APT` to use packages from the new repository: 1. If you haven't already done so, log into your Ubuntu instance as a privileged user. 2. Open a terminal window. 3. Update package information, ensure that APT works with the `https` method, and that CA certificates are installed. $ apt-get update $ apt-get install apt-transport-https ca-certificates 4. Add the new `GPG` key. $ sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D 5. Open the `/etc/apt/sources.list.d/docker.list` file in your favorite editor. If the file doesn't exist, create it. 6. Remove any existing entries. 7. Add an entry for your Ubuntu operating system. The possible entries are: - On Ubuntu Precise 12.04 (LTS) deb https://apt.dockerproject.org/repo ubuntu-precise main - On Ubuntu Trusty 14.04 (LTS) deb https://apt.dockerproject.org/repo ubuntu-trusty main - Ubuntu Wily 15.10 deb https://apt.dockerproject.org/repo ubuntu-wily main > **Note**: Docker does not provide packages for all architectures. To install docker on > a multi-architecture system, add an `[arch=...]` clause to the entry. Refer to the > [Debian Multiarch wiki](https://wiki.debian.org/Multiarch/HOWTO#Setting_up_apt_sources) > for details. 8. Save and close the `/etc/apt/sources.list.d/docker.list` file. 9. Update the `APT` package index. $ apt-get update 10. Purge the old repo if it exists. $ apt-get purge lxc-docker 11. Verify that `APT` is pulling from the right repository. $ apt-cache policy docker-engine From now on when you run `apt-get upgrade`, `APT` pulls from the new repository. ### Prerequisites by Ubuntu Version - Ubuntu Wily 15.10 - Ubuntu Vivid 15.04 - Ubuntu Trusty 14.04 (LTS) For Ubuntu Trusty, Vivid, and Wily, it's recommended to install the `linux-image-extra` kernel package. The `linux-image-extra` package allows you use the `aufs` storage driver. To install the `linux-image-extra` package for your kernel version: 1. Open a terminal on your Ubuntu host. 2. Update your package manager. $ sudo apt-get update 3. Install the recommended package. $ sudo apt-get install linux-image-extra-$(uname -r) 4. Go ahead and install Docker. If you are installing on Ubuntu 14.04 or 12.04, `apparmor` is required. You can install it using: `apt-get install apparmor` #### Ubuntu Precise 12.04 (LTS) For Ubuntu Precise, Docker requires the 3.13 kernel version. If your kernel version is older than 3.13, you must upgrade it. Refer to this table to see which packages are required for your environment:
linux-image-generic-lts-trusty Generic Linux kernel image. This kernel has AUFS built in. This is required to run Docker.
linux-headers-generic-lts-trusty Allows packages such as ZFS and VirtualBox guest additions which depend on them. If you didn't install the headers for your existing kernel, then you can skip these headers for the"trusty" kernel. If you're unsure, you should include this package for safety.
xserver-xorg-lts-trusty Optional in non-graphical environments without Unity/Xorg. Required when running Docker on machine with a graphical environment.

To learn more about the reasons for these packages, read the installation instructions for backported kernels, specifically the LTS Enablement Stack — refer to note 5 under each version.
libgl1-mesa-glx-lts-trusty
  To upgrade your kernel and install the additional packages, do the following: 1. Open a terminal on your Ubuntu host. 2. Update your package manager. $ sudo apt-get update 3. Install both the required and optional packages. $ sudo apt-get install linux-image-generic-lts-trusty Depending on your environment, you may install more as described in the preceding table. 4. Reboot your host. $ sudo reboot 5. After your system reboots, go ahead and install Docker. ## Install Make sure you have installed the prerequisites for your Ubuntu version. Then, install Docker using the following: 1. Log into your Ubuntu installation as a user with `sudo` privileges. 2. Update your `APT` package index. $ sudo apt-get update 3. Install Docker. $ sudo apt-get install docker-engine 4. Start the `docker` daemon. $ sudo service docker start 5. Verify `docker` is installed correctly. $ sudo docker run hello-world This command downloads a test image and runs it in a container. When the container runs, it prints an informational message. Then, it exits. ## Optional configurations This section contains optional procedures for configuring your Ubuntu to work better with Docker. * [Create a docker group](#create-a-docker-group) * [Adjust memory and swap accounting](#adjust-memory-and-swap-accounting) * [Enable UFW forwarding](#enable-ufw-forwarding) * [Configure a DNS server for use by Docker](#configure-a-dns-server-for-use-by-docker) * [Configure Docker to start on boot](#configure-docker-to-start-on-boot) ### Create a Docker group The `docker` daemon binds to a Unix socket instead of a TCP port. By default that Unix socket is owned by the user `root` and other users can access it with `sudo`. For this reason, `docker` daemon always runs as the `root` user. To avoid having to use `sudo` when you use the `docker` command, create a Unix group called `docker` and add users to it. When the `docker` daemon starts, it makes the ownership of the Unix socket read/writable by the `docker` group. >**Warning**: The `docker` group is equivalent to the `root` user; For details >on how this impacts security in your system, see [*Docker Daemon Attack >Surface*](../../security/security.md#docker-daemon-attack-surface) for details. To create the `docker` group and add your user: 1. Log into Ubuntu as a user with `sudo` privileges. This procedure assumes you log in as the `ubuntu` user. 3. Create the `docker` group and add your user. $ sudo usermod -aG docker ubuntu 3. Log out and log back in. This ensures your user is running with the correct permissions. 4. Verify your work by running `docker` without `sudo`. $ docker run hello-world If this fails with a message similar to this: Cannot connect to the Docker daemon. Is 'docker daemon' running on this host? Check that the `DOCKER_HOST` environment variable is not set for your shell. If it is, unset it. ### Adjust memory and swap accounting When users run Docker, they may see these messages when working with an image: WARNING: Your kernel does not support cgroup swap limit. WARNING: Your kernel does not support swap limit capabilities. Limitation discarded. To prevent these messages, enable memory and swap accounting on your system. Enabling memory and swap accounting does induce both a memory overhead and a performance degradation even when Docker is not in use. The memory overhead is about 1% of the total available memory. The performance degradation is roughly 10%. To enable memory and swap on system using GNU GRUB (GNU GRand Unified Bootloader), do the following: 1. Log into Ubuntu as a user with `sudo` privileges. 2. Edit the `/etc/default/grub` file. 3. Set the `GRUB_CMDLINE_LINUX` value as follows: GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" 4. Save and close the file. 5. Update GRUB. $ sudo update-grub 6. Reboot your system. ### Enable UFW forwarding If you use [UFW (Uncomplicated Firewall)](https://help.ubuntu.com/community/UFW) on the same host as you run Docker, you'll need to do additional configuration. Docker uses a bridge to manage container networking. By default, UFW drops all forwarding traffic. As a result, for Docker to run when UFW is enabled, you must set UFW's forwarding policy appropriately. Also, UFW's default set of rules denies all incoming traffic. If you want to reach your containers from another host allow incoming connections on the Docker port. The Docker port defaults to `2376` if TLS is enabled or `2375` when it is not. If TLS is not enabled, communication is unencrypted. By default, Docker runs without TLS enabled. To configure UFW and allow incoming connections on the Docker port: 1. Log into Ubuntu as a user with `sudo` privileges. 2. Verify that UFW is installed and enabled. $ sudo ufw status 3. Open the `/etc/default/ufw` file for editing. $ sudo nano /etc/default/ufw 4. Set the `DEFAULT_FORWARD_POLICY` policy to: DEFAULT_FORWARD_POLICY="ACCEPT" 5. Save and close the file. 6. Reload UFW to use the new setting. $ sudo ufw reload 7. Allow incoming connections on the Docker port. $ sudo ufw allow 2375/tcp ### Configure a DNS server for use by Docker Systems that run Ubuntu or an Ubuntu derivative on the desktop typically use `127.0.0.1` as the default `nameserver` in `/etc/resolv.conf` file. The NetworkManager also sets up `dnsmasq` to use the real DNS servers of the connection and sets up `nameserver 127.0.0.1` in /`etc/resolv.conf`. When starting containers on desktop machines with these configurations, Docker users see this warning: WARNING: Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : [8.8.8.8 8.8.4.4] The warning occurs because Docker containers can't use the local DNS nameserver. Instead, Docker defaults to using an external nameserver. To avoid this warning, you can specify a DNS server for use by Docker containers. Or, you can disable `dnsmasq` in NetworkManager. Though, disabling `dnsmasq` might make DNS resolution slower on some networks. The instructions below describe how to configure the Docker daemon running on Ubuntu 14.10 or below. Ubuntu 15.04 and above use `systemd` as the boot and service manager. Refer to [control and configure Docker with systemd](../../admin/systemd.md#custom-docker-daemon-options) to configure a daemon controlled by `systemd`. To specify a DNS server for use by Docker: 1. Log into Ubuntu as a user with `sudo` privileges. 2. Open the `/etc/default/docker` file for editing. $ sudo nano /etc/default/docker 3. Add a setting for Docker. DOCKER_OPTS="--dns 8.8.8.8" Replace `8.8.8.8` with a local DNS server such as `192.168.1.1`. You can also specify multiple DNS servers. Separated them with spaces, for example: --dns 8.8.8.8 --dns 192.168.1.1 >**Warning**: If you're doing this on a laptop which connects to various >networks, make sure to choose a public DNS server. 4. Save and close the file. 5. Restart the Docker daemon. $ sudo restart docker     **Or, as an alternative to the previous procedure,** disable `dnsmasq` in NetworkManager (this might slow your network). 1. Open the `/etc/NetworkManager/NetworkManager.conf` file for editing. $ sudo nano /etc/NetworkManager/NetworkManager.conf 2. Comment out the `dns=dnsmasq` line: dns=dnsmasq 3. Save and close the file. 4. Restart both the NetworkManager and Docker. $ sudo restart network-manager $ sudo restart docker ### Configure Docker to start on boot Ubuntu uses `systemd` as its boot and service manager `15.04` onwards and `upstart` for versions `14.10` and below. For `15.04` and up, to configure the `docker` daemon to start on boot, run $ sudo systemctl enable docker For `14.10` and below the above installation method automatically configures `upstart` to start the docker daemon on boot ## Upgrade Docker To install the latest version of Docker with `apt-get`: $ sudo apt-get upgrade docker-engine ## Uninstallation To uninstall the Docker package: $ sudo apt-get purge docker-engine To uninstall the Docker package and dependencies that are no longer needed: $ sudo apt-get autoremove --purge docker-engine The above commands will not remove images, containers, volumes, or user created configuration files on your host. If you wish to delete all images, containers, and volumes run the following command: $ rm -rf /var/lib/docker You must delete the user created configuration files manually. docker-1.10.3/docs/installation/mac.md000066400000000000000000000373751267010174400175770ustar00rootroot00000000000000 # Mac OS X > **Note**: This release of Docker deprecates the Boot2Docker command line in > favor of Docker Machine. Use the Docker Toolbox to install Docker Machine as > well as the other Docker tools. You install Docker using Docker Toolbox. Docker Toolbox includes the following Docker tools: * Docker Machine for running the `docker-machine` binary * Docker Engine for running the `docker` binary * Docker Compose for running the `docker-compose` binary * Kitematic, the Docker GUI * a shell preconfigured for a Docker command-line environment * Oracle VM VirtualBox Because the Docker daemon uses Linux-specific kernel features, you can't run Docker natively in OS X. Instead, you must use `docker-machine` to create and attach to a virtual machine (VM). This machine is a Linux VM that hosts Docker for you on your Mac. **Requirements** Your Mac must be running OS X 10.8 "Mountain Lion" or newer to install the Docker Toolbox. ### Learn the key concepts before installing In a Docker installation on Linux, your physical machine is both the localhost and the Docker host. In networking, localhost means your computer. The Docker host is the computer on which the containers run. On a typical Linux installation, the Docker client, the Docker daemon, and any containers run directly on your localhost. This means you can address ports on a Docker container using standard localhost addressing such as `localhost:8000` or `0.0.0.0:8376`. ![Linux Architecture Diagram](images/linux_docker_host.svg) In an OS X installation, the `docker` daemon is running inside a Linux VM called `default`. The `default` is a lightweight Linux VM made specifically to run the Docker daemon on Mac OS X. The VM runs completely from RAM, is a small ~24MB download, and boots in approximately 5s. ![OSX Architecture Diagram](images/mac_docker_host.svg) In OS X, the Docker host address is the address of the Linux VM. When you start the VM with `docker-machine` it is assigned an IP address. When you start a container, the ports on a container map to ports on the VM. To see this in practice, work through the exercises on this page. ### Installation If you have VirtualBox running, you must shut it down before running the installer. 1. Go to the [Docker Toolbox](https://www.docker.com/toolbox) page. 2. Click the installer link to download. 3. Install Docker Toolbox by double-clicking the package or by right-clicking and choosing "Open" from the pop-up menu. The installer launches the "Install Docker Toolbox" dialog. ![Install Docker Toolbox](images/mac-welcome-page.png) 4. Press "Continue" to install the toolbox. The installer presents you with options to customize the standard installation. ![Standard install](images/mac-page-two.png) By default, the standard Docker Toolbox installation: * installs binaries for the Docker tools in `/usr/local/bin` * makes these binaries available to all users * installs VirtualBox; or updates any existing installation Change these defaults by pressing "Customize" or "Change Install Location." 5. Press "Install" to perform the standard installation. The system prompts you for your password. ![Password prompt](images/mac-password-prompt.png) 6. Provide your password to continue with the installation. When it completes, the installer provides you with some information you can use to complete some common tasks. ![All finished](images/mac-page-finished.png) 7. Press "Close" to exit. ## Running a Docker Container To run a Docker container, you: * create a new (or start an existing) virtual machine that runs Docker. * switch your environment to your new VM * use the `docker` client to create, load, and manage containers Once you create a machine, you can reuse it as often as you like. Like any VirtualBox VM, it maintains its configuration between uses. There are two ways to use the installed tools, from the Docker Quickstart Terminal or [from your shell](#from-your-shell). ### From the Docker Quickstart Terminal 1. Open the "Applications" folder or the "Launchpad". 2. Find the Docker Quickstart Terminal and double-click to launch it. The application: * opens a terminal window * creates a `default` VM if it doesn't exists, and starts the VM after * points the terminal environment to this VM Once the launch completes, the Docker Quickstart Terminal reports: ![All finished](images/mac-success.png) Now, you can run `docker` commands. 3. Verify your setup succeeded by running the `hello-world` container. $ docker run hello-world Unable to find image 'hello-world:latest' locally 511136ea3c5a: Pull complete 31cbccb51277: Pull complete e45a5af57b00: Pull complete hello-world:latest: The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security. Status: Downloaded newer image for hello-world:latest Hello from Docker. This message shows that your installation appears to be working correctly. To generate this message, Docker took the following steps: 1. The Docker client contacted the Docker daemon. 2. The Docker daemon pulled the "hello-world" image from the Docker Hub. (Assuming it was not already locally available.) 3. The Docker daemon created a new container from that image which runs the executable that produces the output you are currently reading. 4. The Docker daemon streamed that output to the Docker client, which sent it to your terminal. To try something more ambitious, you can run an Ubuntu container with: $ docker run -it ubuntu bash For more examples and ideas, visit: http://docs.docker.com/userguide/ A more typical way to interact with the Docker tools is from your regular shell command line. ### From your shell This section assumes you are running a Bash shell. You may be running a different shell such as C Shell but the commands are the same. 1. Create a new Docker VM. $ docker-machine create --driver virtualbox default Creating VirtualBox VM... Creating SSH key... Starting VirtualBox VM... Starting VM... To see how to connect Docker to this machine, run: docker-machine env default This creates a new `default` VM in VirtualBox. The command also creates a machine configuration in the `~/.docker/machine/machines/default` directory. You only need to run the `create` command once. Then, you can use `docker-machine` to start, stop, query, and otherwise manage the VM from the command line. 2. List your available machines. $ docker-machine ls NAME ACTIVE DRIVER STATE URL SWARM default * virtualbox Running tcp://192.168.99.101:2376 If you have previously installed the deprecated Boot2Docker application or run the Docker Quickstart Terminal, you may have a `dev` VM as well. When you created `default` VM, the `docker-machine` command provided instructions for learning how to connect the VM. 3. Get the environment commands for your new VM. $ docker-machine env default export DOCKER_TLS_VERIFY="1" export DOCKER_HOST="tcp://192.168.99.101:2376" export DOCKER_CERT_PATH="/Users/mary/.docker/machine/machines/default" export DOCKER_MACHINE_NAME="default" # Run this command to configure your shell: # eval "$(docker-machine env default)" 4. Connect your shell to the `default` machine. $ eval "$(docker-machine env default)" 5. Run the `hello-world` container to verify your setup. $ docker run hello-world ## Learn about your Toolbox installation Toolbox installs the Docker Engine binary, the Docker binary on your system. When you use the Docker Quickstart Terminal or create a `default` VM manually, Docker Machine updates the `~/.docker/machine/machines/default` folder to your system. This folder contains the configuration for the VM. You can create multiple VMs on your system with Docker Machine. Therefore, you may end up with multiple VM folders if you have more than one VM. To remove a VM, use the `docker-machine rm ` command. ## Migrate from Boot2Docker If you were using Boot2Docker previously, you have a pre-existing Docker `boot2docker-vm` VM on your local system. To allow Docker Machine to manage this older VM, you can migrate it. 1. Open a terminal or the Docker CLI on your system. 2. Type the following command. $ docker-machine create -d virtualbox --virtualbox-import-boot2docker-vm boot2docker-vm docker-vm 3. Use the `docker-machine` command to interact with the migrated VM. The `docker-machine` subcommands are slightly different than the `boot2docker` subcommands. The table below lists the equivalent `docker-machine` subcommand and what it does: | `boot2docker` | `docker-machine` | `docker-machine` description | |----------------|------------------|----------------------------------------------------------| | init | create | Creates a new docker host. | | up | start | Starts a stopped machine. | | ssh | ssh | Runs a command or interactive ssh session on the machine.| | save | - | Not applicable. | | down | stop | Stops a running machine. | | poweroff | stop | Stops a running machine. | | reset | restart | Restarts a running machine. | | config | inspect | Prints machine configuration details. | | status | ls | Lists all machines and their status. | | info | inspect | Displays a machine's details. | | ip | ip | Displays the machine's ip address. | | shellinit | env | Displays shell commands needed to configure your shell to interact with a machine | | delete | rm | Removes a machine. | | download | - | Not applicable. | | upgrade | upgrade | Upgrades a machine's Docker client to the latest stable release. | ## Example of Docker on Mac OS X Work through this section to try some practical container tasks on a VM. At this point, you should have a VM running and be connected to it through your shell. To verify this, run the following commands: $ docker-machine ls NAME ACTIVE DRIVER STATE URL SWARM default * virtualbox Running tcp://192.168.99.100:2376 The `ACTIVE` machine, in this case `default`, is the one your environment is pointing to. ### Access container ports 1. Start an NGINX container on the DOCKER_HOST. $ docker run -d -P --name web nginx Normally, the `docker run` commands starts a container, runs it, and then exits. The `-d` flag keeps the container running in the background after the `docker run` command completes. The `-P` flag publishes exposed ports from the container to your local host; this lets you access them from your Mac. 2. Display your running container with `docker ps` command CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 5fb65ff765e9 nginx:latest "nginx -g 'daemon of 3 minutes ago Up 3 minutes 0.0.0.0:49156->443/tcp, 0.0.0.0:49157->80/tcp web At this point, you can see `nginx` is running as a daemon. 3. View just the container's ports. $ docker port web 443/tcp -> 0.0.0.0:49156 80/tcp -> 0.0.0.0:49157 This tells you that the `web` container's port `80` is mapped to port `49157` on your Docker host. 4. Enter the `http://localhost:49157` address (`localhost` is `0.0.0.0`) in your browser: ![Bad Address](images/bad_host.png) This didn't work. The reason it doesn't work is your `DOCKER_HOST` address is not the localhost address (0.0.0.0) but is instead the address of the your Docker VM. 5. Get the address of the `default` VM. $ docker-machine ip default 192.168.59.103 6. Enter the `http://192.168.59.103:49157` address in your browser: ![Correct Addressing](images/good_host.png) Success! 7. To stop and then remove your running `nginx` container, do the following: $ docker stop web $ docker rm web ### Mount a volume on the container When you start a container it automatically shares your `/Users/username` directory with the VM. You can use this share point to mount directories onto your container. The next exercise demonstrates how to do this. 1. Change to your user `$HOME` directory. $ cd $HOME 2. Make a new `site` directory. $ mkdir site 3. Change into the `site` directory. $ cd site 4. Create a new `index.html` file. $ echo "my new site" > index.html 5. Start a new `nginx` container and replace the `html` folder with your `site` directory. $ docker run -d -P -v $HOME/site:/usr/share/nginx/html \ --name mysite nginx 6. Get the `mysite` container's port. $ docker port mysite 80/tcp -> 0.0.0.0:49166 443/tcp -> 0.0.0.0:49165 7. Open the site in a browser: ![My site page](images/newsite_view.png) 8. Try adding a page to your `$HOME/site` in real time. $ echo "This is cool" > cool.html 9. Open the new page in the browser. ![Cool page](images/cool_view.png) 10. Stop and then remove your running `mysite` container. $ docker stop mysite $ docker rm mysite ## Upgrade Docker Toolbox To upgrade Docker Toolbox, download an re-run [the Docker Toolbox installer](https://docker.com/toolbox/). ## Uninstall Docker Toolbox To uninstall, do the following: 1. List your machines. $ docker-machine ls NAME ACTIVE DRIVER STATE URL SWARM dev * virtualbox Running tcp://192.168.99.100:2376 my-docker-machine virtualbox Stopped default virtualbox Stopped 2. Remove each machine. $ docker-machine rm dev Successfully removed dev Removing a machine deletes its VM from VirtualBox and from the `~/.docker/machine/machines` directory. 3. Remove the Docker Quickstart Terminal and Kitematic from your "Applications" folder. 4. Remove the `docker`, `docker-compose`, and `docker-machine` commands from the `/usr/local/bin` folder. $ rm /usr/local/bin/docker 5. Delete the `~/.docker` folder from your system. ## Learning more Use `docker-machine help` to list the full command line reference for Docker Machine. For more information about using SSH or SCP to access a VM, see [the Docker Machine documentation](https://docs.docker.com/machine/). You can continue with the [Docker User Guide](../userguide/index.md). If you are interested in using the Kitematic GUI, see the [Kitematic user guide](https://docs.docker.com/kitematic/userguide/). docker-1.10.3/docs/installation/windows.md000066400000000000000000000355731267010174400205270ustar00rootroot00000000000000 # Windows > **Note**: This release of Docker deprecates the Boot2Docker command line in > favor of Docker Machine. Use the Docker Toolbox to install Docker Machine as > well as the other Docker tools. You install Docker using Docker Toolbox. Docker Toolbox includes the following Docker tools: * Docker Machine for running the `docker-machine` binary * Docker Engine for running the `docker` binary * Kitematic, the Docker GUI * a shell preconfigured for a Docker command-line environment * Oracle VM VirtualBox Because the Docker daemon uses Linux-specific kernel features, you can't run Docker natively in Windows. Instead, you must use `docker-machine` to create and attach to a Docker VM on your machine. This VM hosts Docker for you on your Windows system. The virtual machine runs a lightweight Linux distribution made specifically to run the Docker daemon. The VirtualBox VM runs completely from RAM, is a small ~24MB download, and boots in approximately 5s. ## Requirements To run Docker, your machine must have a 64-bit operating system running Windows 7 or higher. Additionally, you must make sure that virtualization is enabled on your machine. To verify your machine meets these requirements, do the following: 1. Right click the Windows Start Menu and choose **System**. ![Which version](images/win_ver.png) If you are using an unsupported version of Windows, you should consider upgrading your operating system in order to try out Docker. 2. Make sure your CPU supports [virtualization technology](https://en.wikipedia.org/wiki/X86_virtualization) and virtualization support is enabled in BIOS and recognized by Windows. #### For Windows 8, 8.1 or 10 Choose **Start > Task Manager**. On Windows 10, click more details. Navigate to the **Performance** tab. Under **CPU** you should see the following: ![Release page](images/virtualization.png) If virtualization is not enabled on your system, follow the manufacturer's instructions for enabling it. #### For Windows 7 Run the
Microsoft® Hardware-Assisted Virtualization Detection Tool and follow the on-screen instructions. 3. Verify your Windows OS is 64-bit (x64) How you do this verification depends on your Windows version. For details, see the Windows article [How to determine whether a computer is running a 32-bit version or 64-bit version of the Windows operating system](https://support.microsoft.com/en-us/kb/827218). > **Note**: If you have Docker hosts running and you don't wish to do a Docker Toolbox installation, you can install the `docker.exe` using the *unofficial* Windows package manager Chocolately. For information on how to do this, see [Docker package on Chocolatey](http://chocolatey.org/packages/docker). ### Learn the key concepts before installing In a Docker installation on Linux, your machine is both the localhost and the Docker host. In networking, localhost means your computer. The Docker host is the machine on which the containers run. On a typical Linux installation, the Docker client, the Docker daemon, and any containers run directly on your localhost. This means you can address ports on a Docker container using standard localhost addressing such as `localhost:8000` or `0.0.0.0:8376`. ![Linux Architecture Diagram](images/linux_docker_host.svg) In an Windows installation, the `docker` daemon is running inside a Linux virtual machine. You use the Windows Docker client to talk to the Docker host VM. Your Docker containers run inside this host. ![Windows Architecture Diagram](images/win_docker_host.svg) In Windows, the Docker host address is the address of the Linux VM. When you start the VM with `docker-machine` it is assigned an IP address. When you start a container, the ports on a container map to ports on the VM. To see this in practice, work through the exercises on this page. ### Installation If you have VirtualBox running, you must shut it down before running the installer. 1. Go to the [Docker Toolbox](https://www.docker.com/toolbox) page. 2. Click the installer link to download. 3. Install Docker Toolbox by double-clicking the installer. The installer launches the "Setup - Docker Toolbox" dialog. ![Install Docker Toolbox](images/win-welcome.png) 4. Press "Next" to install the toolbox. The installer presents you with options to customize the standard installation. By default, the standard Docker Toolbox installation: * installs executables for the Docker tools in `C:\Program Files\Docker Toolbox` * install VirtualBox; or updates any existing installation * adds a Docker Inc. folder to your program shortcuts * updates your `PATH` environment variable * adds desktop icons for the Docker Quickstart Terminal and Kitematic This installation assumes the defaults are acceptable. 5. Press "Next" until you reach the "Ready to Install" page. The system prompts you for your password. ![Install](images/win-page-6.png) 6. Press "Install" to continue with the installation. When it completes, the installer provides you with some information you can use to complete some common tasks. ![All finished](images/windows-finish.png) 7. Press "Finish" to exit. ## Running a Docker Container To run a Docker container, you: * create a new (or start an existing) Docker virtual machine * switch your environment to your new VM * use the `docker` client to create, load, and manage containers Once you create a machine, you can reuse it as often as you like. Like any VirtualBox VM, it maintains its configuration between uses. There are several ways to use the installed tools, from the Docker Quickstart Terminal or [from your shell](#from-your-shell). ### Using the Docker Quickstart Terminal 1. Find the Docker Quickstart Terminal icon on your Desktop and double-click to launch it. The application: * opens a terminal window * creates a `default` VM if it doesn't exist, and starts the VM after * points the terminal environment to this VM Once the launch completes, you can run `docker` commands. 3. Verify your setup succeeded by running the `hello-world` container. $ docker run hello-world Unable to find image 'hello-world:latest' locally 511136ea3c5a: Pull complete 31cbccb51277: Pull complete e45a5af57b00: Pull complete hello-world:latest: The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security. Status: Downloaded newer image for hello-world:latest Hello from Docker. This message shows that your installation appears to be working correctly. To generate this message, Docker took the following steps: 1. The Docker client contacted the Docker daemon. 2. The Docker daemon pulled the "hello-world" image from the Docker Hub. (Assuming it was not already locally available.) 3. The Docker daemon created a new container from that image which runs the executable that produces the output you are currently reading. 4. The Docker daemon streamed that output to the Docker client, which sent it to your terminal. To try something more ambitious, you can run an Ubuntu container with: $ docker run -it ubuntu bash For more examples and ideas, visit: http://docs.docker.com/userguide/ ### Using Docker from Windows Command Prompt (cmd.exe) 1. Launch a Windows Command Prompt (cmd.exe). The `docker-machine` command requires `ssh.exe` in your `PATH` environment variable. This `.exe` is in the MsysGit `bin` folder. 2. Add this to the `%PATH%` environment variable by running: set PATH=%PATH%;"c:\Program Files (x86)\Git\bin" 3. Create a new Docker VM. docker-machine create --driver virtualbox my-default Creating VirtualBox VM... Creating SSH key... Starting VirtualBox VM... Starting VM... To see how to connect Docker to this machine, run: docker-machine env my-default The command also creates a machine configuration in the `C:\USERS\USERNAME\.docker\machine\machines` directory. You only need to run the `create` command once. Then, you can use `docker-machine` to start, stop, query, and otherwise manage the VM from the command line. 4. List your available machines. C:\Users\mary> docker-machine ls NAME ACTIVE DRIVER STATE URL SWARM my-default * virtualbox Running tcp://192.168.99.101:2376 If you have previously installed the deprecated Boot2Docker application or run the Docker Quickstart Terminal, you may have a `dev` VM as well. 5. Get the environment commands for your new VM. C:\Users\mary> docker-machine env --shell cmd my-default 6. Connect your shell to the `my-default` machine. C:\Users\mary> eval "$(docker-machine env my-default)" 7. Run the `hello-world` container to verify your setup. C:\Users\mary> docker run hello-world ### Using Docker from PowerShell 1. Launch a Windows PowerShell window. 2. Add `ssh.exe` to your PATH: PS C:\Users\mary> $Env:Path = "${Env:Path};c:\Program Files (x86)\Git\bin" 3. Create a new Docker VM. PS C:\Users\mary> docker-machine create --driver virtualbox my-default 4. List your available machines. C:\Users\mary> docker-machine ls NAME ACTIVE DRIVER STATE URL SWARM my-default * virtualbox Running tcp://192.168.99.101:2376 5. Get the environment commands for your new VM. C:\Users\mary> docker-machine env --shell powershell my-default 6. Connect your shell to the `my-default` machine. C:\Users\mary> eval "$(docker-machine env my-default)" 7. Run the `hello-world` container to verify your setup. C:\Users\mary> docker run hello-world ## Learn about your Toolbox installation Toolbox installs the Docker Engine binary in the `C:\Program Files\Docker Toolbox` directory. When you use the Docker Quickstart Terminal or create a `default` VM manually, Docker Machine updates the `C:\USERS\USERNAME\.docker\machine\machines\default` folder to your system. This folder contains the configuration for the VM. You can create multiple VMs on your system with Docker Machine. Therefore, you may end up with multiple VM folders if you have created more than one VM. To remove a VM, use the `docker-machine rm ` command. ## Migrate from Boot2Docker If you were using Boot2Docker previously, you have a pre-existing Docker `boot2docker-vm` VM on your local system. To allow Docker Machine to manage this older VM, you can migrate it. 1. Open a terminal or the Docker CLI on your system. 2. Type the following command. $ docker-machine create -d virtualbox --virtualbox-import-boot2docker-vm boot2docker-vm docker-vm 3. Use the `docker-machine` command to interact with the migrated VM. The `docker-machine` subcommands are slightly different than the `boot2docker` subcommands. The table below lists the equivalent `docker-machine` subcommand and what it does: | `boot2docker` | `docker-machine` | `docker-machine` description | |----------------|------------------|----------------------------------------------------------| | init | create | Creates a new docker host. | | up | start | Starts a stopped machine. | | ssh | ssh | Runs a command or interactive ssh session on the machine.| | save | - | Not applicable. | | down | stop | Stops a running machine. | | poweroff | stop | Stops a running machine. | | reset | restart | Restarts a running machine. | | config | inspect | Prints machine configuration details. | | status | ls | Lists all machines and their status. | | info | inspect | Displays a machine's details. | | ip | ip | Displays the machine's ip address. | | shellinit | env | Displays shell commands needed to configure your shell to interact with a machine | | delete | rm | Removes a machine. | | download | - | Not applicable. | | upgrade | upgrade | Upgrades a machine's Docker client to the latest stable release. | ## Upgrade Docker Toolbox To upgrade Docker Toolbox, download an re-run [the Docker Toolbox installer](https://www.docker.com/toolbox). ## Container port redirection If you are curious, the username for the Docker default VM is `docker` and the password is `tcuser`. The latest version of `docker-machine` sets up a host only network adaptor which provides access to the container's ports. If you run a container with a published port: $ docker run --rm -i -t -p 80:80 nginx Then you should be able to access that nginx server using the IP address reported to you using: $ docker-machine ip Typically, the IP is 192.168.59.103, but it could get changed by VirtualBox's DHCP implementation. ## Login with PUTTY instead of using the CMD Docker Machine generates and uses the public/private key pair in your `%USERPROFILE%\.ssh` directory so to log in you need to use the private key from this same directory. The private key needs to be converted into the format PuTTY uses. You can do this with [puttygen](http://www.chiark.greenend.org.uk/~sgtatham/putty/download.html): 1. Open `puttygen.exe` and load ("File"->"Load" menu) the private key from (you may need to change to the `All Files (*.*)` filter) %USERPROFILE%\.docker\machine\machines\\id_rsa 2. Click "Save Private Key". 3. Use the saved file to login with PuTTY using `docker@127.0.0.1:2022`. ## Uninstallation You can uninstall Docker Toolbox using Window's standard process for removing programs. This process does not remove the `docker-install.exe` file. You must delete that file yourself. ## Learn more You can continue with the [Docker User Guide](../userguide/index.md). If you are interested in using the Kitematic GUI, see the [Kitematic user guide](https://docs.docker.com/kitematic/userguide/). docker-1.10.3/docs/migration.md000066400000000000000000000075251267010174400163210ustar00rootroot00000000000000 # Migrate to Engine 1.10 Starting from version 1.10 of Docker Engine, we completely change the way image data is addressed on disk. Previously, every image and layer used a randomly assigned UUID. In 1.10 we implemented a content addressable method using an ID, based on a secure hash of the image and layer data. The new method gives users more security, provides a built-in way to avoid ID collisions and guarantee data integrity after pull, push, load, or save. It also brings better sharing of layers by allowing many images to freely share their layers even if they didn’t come from the same build. Addressing images by their content also lets us more easily detect if something has already been downloaded. Because we have separated images and layers, you don’t have to pull the configurations for every image that was part of the original build chain. We also don’t need to create layers for the build instructions that didn’t modify the filesystem. Content addressability is the foundation for the new distribution features. The image pull and push code has been reworked to use a download/upload manager concept that makes pushing and pulling images much more stable and mitigate any parallel request issues. The download manager also brings retries on failed downloads and better prioritization for concurrent downloads. We are also introducing a new manifest format that is built on top of the content addressable base. It directly references the content addressable image configuration and layer checksums. The new manifest format also makes it possible for a manifest list to be used for targeting multiple architectures/platforms. Moving to the new manifest format will be completely transparent. ## Preparing for upgrade To make your current images accessible to the new model we have to migrate them to content addressable storage. This means calculating the secure checksums for your current data. All your current images, tags and containers are automatically migrated to the new foundation the first time you start Docker Engine 1.10. Before loading your container, the daemon will calculate all needed checksums for your current data, and after it has completed, all your images and tags will have brand new secure IDs. **While this is simple operation, calculating SHA256 checksums for your files can take time if you have lots of image data.** On average you should assume that migrator can process data at a speed of 100MB/s. During this time your Docker daemon won’t be ready to respond to requests. ## Minimizing migration time If you can accept this one time hit, then upgrading Docker Engine and restarting the daemon will transparently migrate your images. However, if you want to minimize the daemon’s downtime, a migration utility can be run while your old daemon is still running. This tool will find all your current images and calculate the checksums for them. After you upgrade and restart the daemon, the checksum data of the migrated images will already exist, freeing the daemon from that computation work. If new images appeared between the migration and the upgrade, those will be processed at time of upgrade to 1.10. [You can download the migration tool here.](https://github.com/docker/v1.10-migrator/releases) The migration tool can also be run as a Docker image. While running the migrator image you need to expose your Docker data directory to the container. If you use the default path then you would run: $ docker run --rm -v /var/lib/docker:/var/lib/docker docker/v1.10-migrator If you use the devicemapper storage driver, you also need to pass the flag `--privileged` to give the tool access to your storage devices. docker-1.10.3/docs/quickstart.md000066400000000000000000000151751267010174400165220ustar00rootroot00000000000000 # Quickstart Docker Engine This quickstart assumes you have a working installation of Docker Engine. To verify Engine is installed, use the following command: # Check that you have a working install $ docker info If you get `docker: command not found` or something like `/var/lib/docker/repositories: permission denied` you may have an incomplete Docker installation or insufficient privileges to access Engine on your machine. With the default installation of Engine `docker` commands need to be run by a user that is in the `docker` group or by the `root` user. Depending on your Engine system configuration, you may be required to preface each `docker` command with `sudo`. One way to avoid having to use `sudo` with the `docker` commands is to create a Unix group called `docker` and add users that will be entering `docker` commands to the 'docker' group. For more information about installing Docker Engine or `sudo` configuration, refer to the [installation](installation/index.md) instructions for your operating system. ## Download a pre-built image # Download an ubuntu image $ docker pull ubuntu This will find the `ubuntu` image by name on [*Docker Hub*](userguide/containers/dockerrepos.md#searching-for-images) and download it from [Docker Hub](https://hub.docker.com) to a local image cache. > **Note**: > When the image is successfully downloaded, you see a 12 character > hash `539c0211cd76: Download complete` which is the > short form of the image ID. These short image IDs are the first 12 > characters of the full image ID - which can be found using > `docker inspect` or `docker images --no-trunc=true`. ## Running an interactive shell To run an interactive shell in the Ubuntu image: $ docker run -i -t ubuntu /bin/bash The `-i` flag starts an interactive container. The `-t` flag creates a pseudo-TTY that attaches `stdin` and `stdout`. To detach the `tty` without exiting the shell, use the escape sequence `Ctrl-p` + `Ctrl-q`. The container will continue to exist in a stopped state once exited. To list all containers, stopped and running, use the `docker ps -a` command. ## Bind Docker to another host/port or a Unix socket > **Warning**: > Changing the default `docker` daemon binding to a > TCP port or Unix *docker* user group will increase your security risks > by allowing non-root users to gain *root* access on the host. Make sure > you control access to `docker`. If you are binding > to a TCP port, anyone with access to that port has full Docker access; > so it is not advisable on an open network. With `-H` it is possible to make the Docker daemon to listen on a specific IP and port. By default, it will listen on `unix:///var/run/docker.sock` to allow only local connections by the *root* user. You *could* set it to `0.0.0.0:2375` or a specific host IP to give access to everybody, but that is **not recommended** because then it is trivial for someone to gain root access to the host where the daemon is running. Similarly, the Docker client can use `-H` to connect to a custom port. The Docker client will default to connecting to `unix:///var/run/docker.sock` on Linux, and `tcp://127.0.0.1:2376` on Windows. `-H` accepts host and port assignment in the following format: tcp://[host]:[port][path] or unix://path For example: - `tcp://` -> TCP connection to `127.0.0.1` on either port `2376` when TLS encryption is on, or port `2375` when communication is in plain text. - `tcp://host:2375` -> TCP connection on host:2375 - `tcp://host:2375/path` -> TCP connection on host:2375 and prepend path to all requests - `unix://path/to/socket` -> Unix socket located at `path/to/socket` `-H`, when empty, will default to the same value as when no `-H` was passed in. `-H` also accepts short form for TCP bindings: `host:` or `host:port` or `:port` Run Docker in daemon mode: $ sudo /docker daemon -H 0.0.0.0:5555 & Download an `ubuntu` image: $ docker -H :5555 pull ubuntu You can use multiple `-H`, for example, if you want to listen on both TCP and a Unix socket # Run docker in daemon mode $ sudo /docker daemon -H tcp://127.0.0.1:2375 -H unix:///var/run/docker.sock & # Download an ubuntu image, use default Unix socket $ docker pull ubuntu # OR use the TCP port $ docker -H tcp://127.0.0.1:2375 pull ubuntu ## Starting a long-running worker process # Start a very useful long-running process $ JOB=$(docker run -d ubuntu /bin/sh -c "while true; do echo Hello world; sleep 1; done") # Collect the output of the job so far $ docker logs $JOB # Kill the job $ docker kill $JOB ## Listing containers $ docker ps # Lists only running containers $ docker ps -a # Lists all containers ## Controlling containers # Start a new container $ JOB=$(docker run -d ubuntu /bin/sh -c "while true; do echo Hello world; sleep 1; done") # Stop the container $ docker stop $JOB # Start the container $ docker start $JOB # Restart the container $ docker restart $JOB # SIGKILL a container $ docker kill $JOB # Remove a container $ docker stop $JOB # Container must be stopped to remove it $ docker rm $JOB ## Bind a service on a TCP port # Bind port 4444 of this container, and tell netcat to listen on it $ JOB=$(docker run -d -p 4444 ubuntu:12.10 /bin/nc -l 4444) # Which public port is NATed to my container? $ PORT=$(docker port $JOB 4444 | awk -F: '{ print $2 }') # Connect to the public port $ echo hello world | nc 127.0.0.1 $PORT # Verify that the network connection worked $ echo "Daemon received: $(docker logs $JOB)" ## Committing (saving) a container state Save your containers state to an image, so the state can be re-used. When you commit your container, Docker only stores the diff (difference) between the source image and the current state of the container's image. To list images you already have, use the `docker images` command. # Commit your container to a new named image $ docker commit # List your images $ docker images You now have an image state from which you can create new instances. ## Where to go next * Work your way through the [Docker User Guide](userguide/index.md) * Read more about [*Share Images via Repositories*](userguide/containers/dockerrepos.md) * Review [*Command Line*](reference/commandline/cli.md) docker-1.10.3/docs/reference/000077500000000000000000000000001267010174400157335ustar00rootroot00000000000000docker-1.10.3/docs/reference/api/000077500000000000000000000000001267010174400165045ustar00rootroot00000000000000docker-1.10.3/docs/reference/api/README.md000066400000000000000000000010531267010174400177620ustar00rootroot00000000000000 This directory holds the authoritative specifications of APIs defined and implemented by Docker. Currently this includes: * The remote API by which a docker node can be queried over HTTP * The registry API by which a docker node can download and upload images for storage and sharing * The index search API by which a docker node can search the public index for images to download * The docker.io OAuth and accounts API which 3rd party services can use to access account information docker-1.10.3/docs/reference/api/_static/000077500000000000000000000000001267010174400201325ustar00rootroot00000000000000docker-1.10.3/docs/reference/api/_static/io_oauth_authorization_page.png000066400000000000000000001150421267010174400264260ustar00rootroot00000000000000PNG  IHDR2tPLTELiq999hhhХ...777?jx@kxBkx2guYrZȥSG\ڶVᥥ뽾ܿ񩽼޶սitϥ̶򈃃}ywƳݛύݾyۀ{SÔht~ǰ;iqirټPYm,DN핂trmtIFH^js怢zdaeĸ|[ ylbshq=P\0ϰЫg-<Ķ804[SX8I_'w'lV%PjLcrYN!+{iKvS5d> a_lPz.Пu9N0 ^3Z[dI:2[yTrj MlJ}4;Zk%:tRNS"  W'+1<.5@D9HOLSkpfv|~b\_VySAEؿ`IDATxoܶſGm W 1PAx`{H/ P4hY~!9Bh!B!B!yzIBh7!QB/fU!?وG 9w_B^([\·{Ѝ|x <ΉpnAm_ecGxIȳ$oGm8j jxBrF6AeA !Ynpa΂9 !9'lՃ`~yv#nr!Q6T.|Ѓ!^ #ZJ_C9*1Jk҂Aߪ !no8 /yӮyǟB~|}[#$Ì טPyjN]g1tz@]PBZ뛄*h[9-2ʕn+3zPV?| V0. x0_hk?^XCåO>Xra¤KBX^uIHTmᨫP:-ws˕î$ʡPm ܨ[[t:\Bt"( O_Zvaل9VUoofbAj3Ht-j\ZFCB]Vʕ.S -2kQ4-շn0>hBu*f~q3\qB$ĊCB!)lsȈ+q!$d&B.'${L³!4&ccjXB1F@r)L~ʹA;+WnQR G و[Z!Z"[d -:iwMr: KA8`TGJ9C$f8 Wk4O>.IBg:u&B2)Q1 q9aB%X7<Bq騗6C-imrHRƛNEnEnE~"_hו* _"?o+HU-_~lp~QX2a΃@]7qGQpHQ.Y\0qAɂ{d~F J"d|jBK_[by|LAJ(fI(k+熡/"E9X *|uaXd-i*g.j0ᾩ} Bmh뫫n.c?q7iԂ\6]J, 6k|Ϙ- BIHSB-B@ -T)ʁ\WHWN -:,j}[7WWu9 zF'ܰ@PPumu&P EN+[$ݢ01 #8r- 5PJGbeug &:AP S#Qi#BxgQ.?w>) Ϫ0AT6Jr@QO:Z{+[>xiqnaHBRa(R"ƀ/ $)00HV|ٔ TZ$6En"rR,E"AKj:>w<xXjxC$Y Oa(2. SYlSړ %\ rL;E@<z1*#rP{) PH)(ȋn({?X`esɩFd"Q:yڢ Nl8}r#Y>(j:ciTC␎DB2wad_6h4H4̅b $:e _ _rMiZDyI666W0fqeҪ 1@x ~,#,M9RvF@&(Dl[:;|<ج},᫐PyM¸sp]BdrcD#LK̯F!k EeA≑UB+n"HQ!J"iS L '-7刃V;bfpZ-ӖQڹ \jݡi%`aJ1n}@G U nSna Y&r1@񑋷P՞#ge3ͬYyCC@wط0*DQ^#,6###,ɩ+,ճeZ՛ɍkv$ gyc}w3'}=Rs9 L&|D?2wTtL!^≡vB` q#;2?;L7o=8VX|+;H|>Erx++!,9xp$-3]9vHcm:Mںo6X7wFj+ɢ2t<]wIԯi?Zl^d/tCBGV"5pVJRILd1CnMw''a\Xg`Me"Mײ\3"[6mZ !^~ݸ -1rV$֑#Z k;8 86 eq@# @#SY:9K݊ nRuD̨@X w?yrβ[$<,UIѝ!~(OmOD4U]9195yRLwq䩑BdӪ:uj[b4Ӿi[٤LPr=\ܸX}+x =z_?׻ˉ>K3SKBqR{r"4>~"ѧ'%gR؎x5)e4pݶZo5M+b6ض?.z$'z=Xȣ`-86)`N]/S B1"B+mX Q@BK]\T @Ä@*9;6r:(&r˕zi=:"NBJx  wNBv^OQjc̙SZW_=1udcu"ٳr#$O[-`\m |>d?uRg]Ԗ~i:7ҙLn>yOr|酕y|vE=fҔ)?Ksܽ0eޞ4r/z\fT~HuɰVt},ןK=w}u\80omMFMU"ƨR|`5$#MXv@0x^ (8n!1P X R"m|ȩG$4qGuA5|.Jo nIm/o?TD1ZVpmV\gzU$kY_|?D_^ex߾\7>{QM+wֿ~0{ỿ˯q]z;79=/ń{ﮔͤg1`Zd6I28;UssyP>ȑNnHLTA48ҩtp:1'pvN*1"evFa] @ʆZfh#ogQRe۫k1I$) ?y3eK*W&yZg񏥁TS0}5"},gϾ$ ߜyoXqMy}~?ui|wfoxSχ" &z7^xx!Ṿ^ԫ|7?wh-LhUlk䳇_B!_%5;>cHC$\|ʽpKPr'ٖ6J"zь.hY Q\il:Zp P ;]ֻF@-/VH jOeC;"@ =r)N§*Mo풄(_"|',Z+K3M;ΘUCpIO)VA5kD?ٳ/|?Lj+}yk+o<3?ЗF7>xna}ps^׾z8}ovs[W޹sg7Nn݇gf{;^S/e+fI[IJBí? ZDv5 CTCՀ7%7{'>n":E,`Z d` pU(zPH2 [ bOuD9j2CLS]*tP}%4J9Ihf|28ԑ!j҇QGz/֩Q'Iҥj2rοO.Ujg˳9}e$ÙLsaxWzü7<;NB}K0dOjs)Xls~{e<ÚCN-Ȥy:!" |m\ywS 6 'r6f ,2b}&9 $2 @vnrKĞQc99Wx299yD(OzD'!e<$TF>BpQ#Nr'qj4(Xu}-%" {!$D_OSc\P'2Ww=idMf4ߗ]snjU9=tŕ{|=fFO>ޣ hd >Qu\5y5v 6\He}+؍9؅q e,}Am0M&B@>tՃJurث"i^?MC e0 <ͮ7gppPlԢGIVJVt6|$MۢDr0."З~&^, D,W%+'^q-- Ja%+7zrE ڛ?iw +Eyt/ЧQ\ͺ:aRѲ^L[Wd҄qreZy2'/Eucu2wuCYfVN;h\rB^,.~\]5j0$Idڰ,^l=n6IW1=Jۉ02"̇e,5N 15ZAL`ƭxҕEݶe{}wf6{$2" Sр'peYy+! #+KBįҋP`Z84&Җ#"–j[.WgR_9B"iqQ5E: :vWp \D$Q=3DP BDŽN*%Y9ثᕋ \3r۷ٺEKty0H{$|t+  wvh47^va6\*]g9h!r~yK%-e=|j$# Y +-X89saNozώM Zr0XH[mIQahtD-΃[Dԍ#cY"XƂ޺Eݸ-ھr[$ϣvtgHh$ y_w!<,䵐C]PWm1غ+aGGpˣӣ8z*{ҰfH<*켇ҭQR< XM(h 2. 4T[Var^%Cl5DLhi"(I"bf+avLʱrmEΟ*K!̌%a|spWI:pU.'g532y }y#![`CJoy{`_IRNB_H?u#_%< "%V1v`KX:)/8e2 RT)*Mw(K}^֎'s]7<(Z? Uϣ$a7$O‰"\ >K^9Hz_.|mjH_u]ݼjon@f^ޅf8!"`XSiZ&:\jrݸr 6\n9~Sv6eEwdDQg|TH8$~HH$bȾ.˓H_| YiS8=y2K #%~B7q$b# AjS)xi"C b*As×opbKC p'Jct@x y7yQxUwmͳbM_w%g$vìڼvd;J4&_Ww/(M-Jr^rƧStˌ aSMvGA&#ݗ (^d? "몙qe \ڿ Pb&Q5(8Ζ:IYԙe26vIX+P!*o[5ywΌU8ss>%L׮O ;UPN k}K }nJuZOɫTʾ#Bpr&&aqC+[:Y/72*O=e Ʊ%PXU`_8CزW79J!!MLVCƎ܂j`E[Xo,Vqx Zt@o͝f!GC៨/3z)PTzA&-NV% 6  qP oZg~JB<7#1{f$ijh 'dk eH1b4]6FO>"41O1D G$B | j_g&ǹ7B@Ѧh@L`}ļjQ"4;B)l)9Dh|u-Y{TXSnSlV-kթ !bk\-Ǔ/ͣ aP- +!!S8/~BggXT`ӖdGo{/@a# Z!.Uih!')lx1ߑ  =.VZiDBOB~+0qn.1%\?ߝt/PD&jP&rAz"NU*tR[0B1H]ִ5̌/[©J.|-7K`8i /_Zk5/q$9iTqU7zgHyF'~>"=ctNh%[7h԰o;JrBbW9u;f^9Q.PhUKx ]ةET SS.,h2%=v_74>~nBY yIO0X}Nvt 2^Nn|$pE{ Nņ!gM1yU {ĤF7Z>!ML`Db:D,X/7HHhlC;Lu~>tGBh>,GrN_>lBgq&SX \A(J6IpnRA vC"I֊B@ wrue￁a*¦F8'>U( F+=a#2 tltfC2wo&WmZ.7Ҩnm˝ (+^Np[GpE e:p2`%X@;)o?G= 3kCë&0GW I ( f(Te.b]ѝ嵹?f5\б;ۦ&OU)ԙb˜|n&&/ІF.H6`ŷŶ\.d;PV>¹m H Y7!H0Ά(dbal{b81ނ 5wL*} ~"HqG >X[" ʾpPP!×hu￵=L9ϓ1{󇳱o'+#γ?Y9ήO0yy!| T^dbEGaƆ(1k$ /C k5;Gu.C+s~x֒{u;Y,V<;iUz o]lqNZ0xĀޅ%\/ڹ\nX fĕSl.^/?S21!v\Vv@n o#j#X[:qx8 )g?;$ʌצ.LKrm`ShzP $9AUMngD@(@Cu; Bq*9x l'8~0l.$1G?qc~Nj kG\1vuA!o?j;HIbs-v7)1reqJ:gfű@À,g߶pr9 @x'VeKDط\V7Uw~?!?J,'_$Nv菮!(YScv(/ 2H@Գ%1Km3kv, vg' xIp~u&I* »hB#J2v^1%0%NB`|hY*j T\Ig=uMJ LjgIRkOp ֝8QTEr *"ÚV6tF@PҠ<&mC]5`ϺF|sR$cEgl>92^xxv7}5-*'6ooĄ@|%j)814`7yBIfb3 ¢ hWMةЎ0*KmX{~fGKem}g[rZ\9 QDlU#5ԄA%> }F`zr z{V@GiTKfci68*"jքoWHgq\}jpDwd7!Cl LNdkWEၚ=>EPC+ J' 9=C޲"j@*+@n뽶L̯CxO&g7z/2+#zd&XYP*Ve&o SqwoڞUFKNcdwÂ>.]1Cn>V6@CX+Q$dcog!b siVHbCWluI]Hz^QXʣ!Xz}^0.!75QoPB/Bt5abիa&7< yXO)U2标iZY5%?#*v Ķ@ܗ8e{XD:-һiuOԫN}6 _>/꓉y;_W'zCJ|N! ;ߨw3\olqjՑO<JJE]y. 4zկ{He0gup]/f[!v_p5_ ג 8l7 dXp5݁R%eP *_8~WBBF-z`JXnw!i# T~RA> zyB \ wP4j#=4mU[WQ[G:ϓ }|1;ءk֓쪑=\b`GL]}𣊘! GgӫsFmį ga-LJ hĴ°yvD[+d9PCQ?G9s.@X;Uwý+ʤ% ~ flgn3GO7?QB[i8y =a݈mxv},`;2~D*-9<8]U~>WUNj}b]6k:s׭7LXh^٘J] &Vp$4jHE`5z|{w_kaYt1<_s{<=_9=\{F 1(*+J"g{ňQ֢MZ(cl`٨RYNX&$0z_2zl˜tEvm 1,٣u~8.;'Q gϡoLA;G"y}~=` 僫kgPGY/@,|nK rZc7`t`UW VM4̼wXײUxZj\]LW3ry.E\F4z~3<4k;Sq?Lébȡ711LUr&D%Z"oM(s a.x ;^jVBa&mk !}CF![" j))UP; Cx: GΥIz'~X3i'@}:$ac\21.C96yQWwmϡ6c:au=f,܀KVLY4ф84:6⁨e\>:h;A}ptnii ڱ^chq XJ+ jiSa6c^y'!>LW F :Zׄ5 DyhhpSAP<|[ p>BV{ `6քϟ=IBF4!-i&C$; ͡iǺcU: ?C+HGNh7 Nnұ כ@Psp@Uf|(Ba,xo9BbVA|\ 燸|B>L}vC+@Ĉ/(+"%7m?@-}͉̰ õ4DH~o_IJnM5.Z ȸ]_OU]J~^nvhK0FuXjyr`NP[>X|E{E]J3A }/DE{aA ^[ ^-H_`}>D=ӜC,^z$d2m/Zk;t"NF}F>D]dp2 08W[.  r\ uF';W!zjRJrK @#S^9} dCT 21I2 O/59GvQEb)^Zc2?}ڸ3Zk:t֞|;b͏洉Dn&ħZIxHButF\{K`$7~|cvA / -,3f?sVnO m )[7`mE]r s`OFD,haU@͂%t:A>bU(wLo_.'9$|RdA$RPZDu~3>`4-'=^m1ڦ{ΙhaEij|ƫF ;kfi̵r-3 *j s-k]. ov7*\, vYev C>>*YwUY8(Nk;91Z@TP\Vy%bYkRߘSWҫRb#$dbsRB<\#+anhQ-^hmVڪhb.Pn>_/ښ,@LBL{<,b ii\VTdnz5QH$ٵA#'eZjnkBR`r>Ŕ#S e7dvCڰm h%(}ʮ-P(lb&9h`Ɂ?4u,xLSvb /H~$VvK;Q_(-WN6R]S\5.uiwԲ+8)$̋"ev;#GaҎI;le|C+?FwPvlvw"kp y{qHv1P+C!te .{ɭ !B|ʡ n #9"2~=x.[\_:lB[OB^?:BBP! ! !$B!$~Ѿ!jJ)M{"DxșBHP%^!3wjJ!$D:LcJ OMfz !Gj&X@B"Og6iޞ 1}:HmHS}Eyns[ 6B"|u2iz妴Ղ(zs@PǴf2B,sQ{Lb>j<3S.eB"DQԚ ꃅjw2ˊs]ᏯWc O 1ſ?$xހqpfE%ųKT?=_8 Mb:aܿ#l{̄m@+bfM.`{6@0uuW%u;v0W P=\Ǧn(Otm6[ˢ(LA+&"WMVT$fEgn~> 3탏X6oI%֝W஍_iYpg|Ĵ7>2{ UNvP003nFQj^]g.CeZ$<9sQ阗I7}080O$IxPV2Iÿ744_g 9>rc_1򚄮iǒK] Ȁ"Z. ۽;A<[YkCke*I6s$pn~Ρs)D/N`-JG]pɻ\v\ Wv~^*;9=~kA/MnG11ٌ& =opegɛЃ@Ci ~pwYQҐ0ݭm2ᣲc ї:Le`?old_ҕ:]1sHFU%.FwBnư1xO48^(xLC^pC)7|L@$Rd`GX";ޤ9$M[kю͂uC 'o+{'J!u2/xx^OXa6 oGάwyoӭ!Ɣųw2/ <1;:+].=vo3D2]1s՚jo5|"1r&v)TI5-qYJ5÷55A]Q41ϗHٳW($EܡXPig"n+ Z8Kޕty#RtB17VMkI=^Ko݇IWaFVp$MZkPlte6g|pyQC; ֳŘ7*̈́tǥ< ~'x0sW h;]ϝzAp'Z\*rz(IPQ~[.i<ŒwhpυPÔ@N 0df|UXfR2Q%>yD|?Cj*E\d.`( 5dj0BL3@6O1 usN*]`̴3:5&^ >&.m|"鉺҉Nq`!'5P.x?{$Z;z`W)U}YuR)T{8+1,s7[$A[`2ecڣCا苲Cdž2nv'pi-u)[0wc%l;ٔ'ULpQfݥ'k苝&AWFH7`™ R&,wBFi/y=Sw!gMgu_g T#.wm*PGjBkhxoڣ$*ӂe&3̓k !Fxݙ;.yh)f)0Z`` :ZXA ڸ-ZbSbKbPAۭ+Ii;i;ā [ Ι铟=((Ab-w\j:|ӏ{#_Vwa``d m32X McwԷtSI;+|;1_C)N)rgwMFw4d<'V'%7gU0RA)(}]q= fG ;T[l_% d㌤p^mP_''Z¥-w`!#-G)& ԤhuVcM76 Ɉ*c8=Ǘ!n!vLwUv%z0ȷ~cvl6EZhtZ` ]G" 7tB {ΔcAB@Dݿe9&V "DX՝m6!]x]#T=N%!giv,>½FRݒSkUK $0јKe}՚U'R`_*g_V{gogD8?>{zT>zz#i;i!\XEu\QG2m߄si ʜA(ޭi+v4X eB, d YB yv*B5X8>pV LxӷMjy1֙}ZmUs~Q LG_ίljd5j-|Q8^t C 2aNyWxՑ78^מ @=(<BY]W q{sɣ]C s|q>iԷe B(AΘ(ߕa;A b"};݁wQI/9*t3`h ͬR}KK8kY36Je3[i"71͇ 6U0 m u[&S5+h(|O}O--Q{FjnimJi5 }^mSf:mGQ#x[AάE6;7!=^iJ^5vzh\t֢U!*g%[7mͪ#uAG6^i4OACd2o n}VLomjQO獋p=]IrJCI+PvQ2aSJ^c$_QkSJ֒ê:Ez B'f$̄Kդƃ O4[j)Flt9166B5_Jw; g<azptA.1d:Sezw[  O['ǭn;UC(Nw,!Hz҉!J/Jo F(Xv2 =Ka"p\/0J #C#` w(/5/eG AN^=ŝ- - 0beH33H/Y|^(ZRUpCCm"՗ᐥd< ò>LRbt!b@]g>?~ 0'!};ڍ/2%aIfbf $-Bƴ (}yJZb%|ܤUC>x%χ9zv~kRkK&(|Z x*]T%&^sd\4ѷA:x@\V ~/?ܘ/  .9c+s\Rfq-fNS(l4 Z/f˜ %K| 8YތuҪܯL7g} ~|A-塚bt< Sv[˴ڢxBbXk?˿I.d[lՊ驁A8tqЄ\SUUS3oΞOw_/ϑ##~G !X;xmJ8O %UID&'gUޱ+Nq \ܲb ig"Q! žO+(B0ǡ҃{Hl齜$?0xq{?,z' n"$rۻ/Ztj:?$·4"XvwnZ_^{~񮂻ڣT By|Sͺacٟ˱t"|Nj?ysj~ן=X Y@2b<'`|~Ql%=%,{Nj}+&" P ?D"@a=oc)ZN޽N{%z5V1/2D#p\5¯9"Dx[E4"\6R*:T 4{!<A rץLu]%H(8%HFsE|$ N=qosT[EP_" M$. &РNDhi긺Ra@[9fTk\^d\D1BSNR;t2U|0I9ukԛ zrALQȜbF4K6>5KYJκr.RRJιvXhmOYj7p~mJ|"Tm)[/=X}ݰRc0ze}/{buV7sP/$mky/70bY%Ky]zqqB(zej6݊iփVAQD͉#3=hYv8Ε[~N;O?͉[YBXGv!;77bI_N!j=zoV,`=a,(g`픈pkTUf}#<}mIu. jbu=a۳Kٺ5_6+F"4bY0UW[Q^/oi'ֳUW/ Fwrb7[kfk̺vACgs[%0rӡO;f,[l}tXQ3W_NŻGgwO,7;F'2~Oz1ln'P:fj9B鴝ˆùKGr.W`)P1agX-vN[vYZ#2촩ܠ-VmH# QJU *ݼJ7BQ9Oʼ{ժ\(%>z*ke~{g=DBc&x^^:6p{/&\9$.w>혩̜Kϗ>*@e~2OQ|,B)8eTFx!8 w̨*9 <7pR7p_10yZ5nt<?\ibMz0=+5t021+5;rp2w\GoB"\ #!"D(!B@"BD"BD!"D!"D!"D"B@"BD"BD!"D!"D;6+_{ioD]fk.͊^EU**lH{(2@<!< 8!IYu I Vϙ1^`bm?̍g{ν R~r~MhA&?!M%?) sBb-v퇠[)q/f Qzv9wA/Dd _e)q[O4xjf*ܨV L[L0{_${wj6ZU`Grֶb'k+**j[l2q`plNsMsf3nBhB\xCQ޻E9S{F&8NdcBjeU<amio3cWf[Sӌe&IHpQܰ mLd !+ݽ=cb/1eO.Dnb픸FK:KgBPCMcèOWK{:~W5xvМqal<<0=5`u-ҞX[YGz2iO3חD)<>&&KD~gc-¬>`e"=Kn56%5qTJvMDlCxvyinO82?X Jd !gw_Y88g#u(txóJUG?/,+sJ%ZHVؙ7]Q}:]xUob/,kސHE=8Nb裺tio~;zF8YHh*e:aіX5cӫ7nP !._v':J=MS5Lk C)tx&ɸB0km56ץ=g `MZ1>}^MێQUޮ'@1ZUw÷۪Qq*x]7Cd { عIWk ;z]ZuH{!ԯ;*̳ (w(B0eTǯLW۵ ]N7baƇۥm0=&c*^@$ypxO_`+I{g G90<c9 &=S6*,{Ű1ޭ}]_fфY5yPHZ\R9Bbc6BJckp:nmm\WB89Z-lEA&Kw8gD}4~yR-pz#n~U[ke;HAFFh_1JUG--c0ǼЄ0zOUhBQ !:)9iev3-굎x{ʑ^vlw{N+)WJC_8aMB&;_֔=ce$: kSKNR~#1aBhBx|i+`4 `B/p‘=˸-ẲB2,~*-*PMfN}qB*=_/:r)6泦+示(g7yzl_uBTQ6)! j̃X[#탐iZGG'/X^ V>93C /e.p:#VkWLMR{y+wz<%l QH 1' 0ncwҸ¶-D n 6vN}zz%Ѱ '/GQ~|ę`[!(X~kRt3 o] 'h|=9⒚7 `?Ŗ֣7Р?^C0^w !nbجJ=`1*̨ )bL>f):^VcN)/WZ܁ /4q(ɳ3iq |#4hTs,Dp bKN|Jywz IuWdfOH`Iu0&uaK3QuiDuŶB{,LNCxK}ꯄX#uY2qhDXtrKg{ L(/I&&Y@H)8'؆FTPUVn" A8 lh}y_&Y?,RW@˛ԕT혏jOthRsU"Ut `k6q: 'Rǧ7/H8aL3nY82\-6A j2ěe=Gy-b}},ޢkF4Qܴ J:Ě_faƄ0+.v6|h >6d;H,6nȴ:$6T9 jA'Ĕ5 b" LDZt,Dq@g@dM qzN~<"ZhOnE惝2Fř{Of~386F=i^EI'4p}Vr@ָsO 4:>dXtR ~ Mc1잯XC+-4mNfW(xϟ,g_xE|ow/F:. e"O FGJш'O~O|?K#g']v 4BeWM-A3BH 2CCb$;rC(,AL9Ki_XYv@oUO?:iQ⳧II#ηR|IE'}"$v#:iTc^խi>NFxSPMSvL-$) tZ4vOv6%2ԬiLVqvҢ B"䑇 !!!!2DHDHDH !!!CDHDHD!!!2DHDHD]IIZ|!駀!B"JEm"$B"BP,X(ΰugLo ˮ[@Rp]\m}3*n6UA?. oo| !`~9|^j;ѥi{şvT7qMsnVZ !aj 'qfZQgOLg>vWyoگs;B"$B"iã"w7ߛ98 o{*|CDȕ#<8lZ^|}݋ҕp#lJHDȕFߛڏˑM|_7H3/DHݰ;ԘE_e;vH0oKUz}-v=-u]娶Y*(0orv GqPM^`^ywzy۬Pݏ cOR7FHExְ}"z9{7A"$B"82t!r?!GP !WB"$B"d"$B"$B"h XcٮP8:c; fDHJ ;"3$;8Mdʢf޹?7q{ڇbɻ ^,KH6$&>Y$҅"[XrlZZIA+2˘K lCwO?SS=vZ䗣GFC0kwoeJ u͑6ӯ|eҕ+mGkA1}ٳo_Zm+VL?g2H~x2V  $Si<7m Kc Ӓcc?}mG)B0ğ0kg{Bu{ȸ'N)/r:jW [C|fFu=|R:{=5T6/eh!+$ٱ3]&@9BzFq鋬A?V%3f($.|ӥUFFu]& G?32 s 6\z?z}T7lEq/j5z_B !ݶmN'e$ Hd Y"VbbzԨB ݌_β2ȁ+(BaTV 4t}mO)t,se_C/wU[C +}iB_p6BH!  aǛԯ T8 pXM$՛EհX(425&։/! ܛ~Su !E¦R)[R)BZ(B !BH!BH!R)BZ(B !BH!R)`D}b/%BUmBïcc?A5AH!j[X |k*IKٱbgY1)W!H6M`u:/#L<_xkxġ z:f}f|ox^^+4}b@jLzGu)+ODX"$`0u=>JEؿ'Lڥ !B~pޯ0l|Ģ5, 8pIL촙oDvtvʶpYY>? 7G^ohmC/[R)BKVYe & h@.|xhw ޱ~o޳ Q ݎa=="&8}"v# y!b wo tTtA}C6I,l !w*zoo+@/t+ڴU8̧JⳚp쮷@A>ARKn@ }R_!$?6>M-2eOMut ڑ (f_]E#mRlk}FWld?٧VʷgSB,IU42.*_Qεy/v-5u&]90_a׮g}3!f}Ru-]Y2$L`{4P/XW}Đt>`dWt44*TP\R_)̌T0XV1[WA2ig*g\ ۈqOҔ/%O!' lOPdfC@8;r֚.,1GFF[SvUgNA'5o]vm{s^Fޥ7F0/E,+amʬq]~lIe ckc̭ w ^Pv2(BTk$vO-wJፕ{O) o5!TtD'ѐΐV??RZX8!2bfgaUx_y1#~(@x݀ }=̂,Y=p*I!~Lzffl@X|NC=F tHɔ N^g-*B ra~&Z;1a̳Uvȁ~# ZF̒^&?Hf#cʓ7l#0Z RݚJywp%v҄M2_{A+̲;>fbO95{d޳]'dyf'ԡ~ᓛse CS,[A^:UNi׃&@Ւa>ʼ!8ē02k8!i]Q~MYkoA6W"p.3xRۿwzP91dݼR_kp Xǟ23>T . 믝lI1>FVشI"qd$r,^S3kr1>,[wW tLVkAs^kW[Ԓ})Lo}v@;zC5o#$}rFoοsI `h7I E{eB !VB1@Htfabzf[ۢ7G 5`Qբ[B !IĽ$ h M0`ك1zBc\mSon^O$n6F|xWO$֕M((]"RE:_#!n%1͐0 !t~YuH&ca1=D!B! D?.BH!"R--BP_Ձ*@JBH!9`|zwg@|x=yXmވ~d$FCPBHw8:z ˭6mBAoW |f#޼R)ICsK=(pɞfrq$ iCh$$ޜyyfl,C$ wJw`)ƒB@O{..]*9]X9ZV A㨵~8 O"ڛAu \3ЄNx 3־LSۓŋӕA4P)or:3st0Z66/݂lÞ06!x?؜ŽJ瑌!2Y)+V9)INpa Mw݂=BE&0z:=[oKa K09ZYl:˞ ;2J |oT޿&+$<|$~teGF{?>hN'bR[c8B !&뼜sn6з{"v;zIXvj5NK=Va!]Pq /e&j:rݣR^X4^ӳ0@=oTmDY{S)B?H? ȟjj)B Ꮤf_8_=r' YНeD{N%Xd6&sW[ᗸ\S.Tn97Ut6höyyý # B B@! 5 R z Bj-DE۶,ULz͈y!V Dk+&EѢxC&61i ͋VT0@7;BL^! '!'! U,̈́L&aZeuBjJB3sk<7}ww&! gLPšP!,4m)Q'i)Bd[M8a^-IB@Hp;|صs̭xsSzK[{ Tq|{OvZb۳D@z_8GQvg#fyAm~bGQ젦!)b&[a4PIq;wg/[xgX#|x6;qq|N)㧓g#2Yi)I&7G>o,f} Ptk@J2<|YM|N}]! Wkyn::qWxx!X@"7n픱|bƬEYL{m8)QJۆP0T4E2_1VVD[t)/|Sw•+q swb쩛JB@Xe"sU dYa"nt\r/eu[؏ԓi,~I8# j&x!zz'tvvRc'MrG'r]~+IH>Ќ$]w%sIB$]]^ $$Dm.XQ@( 65cgrqIH &EZ7-0־OVJdYE0Z.$}IHJ3;DH#o&w&!mE+X ˥# WNerh9Iv(VֲOpR$bRe6x3թE$ ׌(ȉ%tJ*mjQb&A 1q|/Aoq1sx$\'hZ`G tRU.@*qQ ڰca\RIsYq.cQ[kt0 /+]b;`$X%m!T! Q8+3jW ^DE(j$$NrPZ,rV!!!栴v<6GYiǀe*5n'MYSmB>򺀕=ll(eM!V!!QD%qr˻Ԃ[$77!-`j:5jX'pKدkd $$S*4H7g^lxMTȢܶH_):I˃[6?JHHLV~9E%QQC6zc (-vzŕmy<+0h;oTGڥrJ6|XMc8\(Uf4y&u}o1FeUgUuK jGMs]׼8Ɠ*E d ޙ7.aYEUJ +K~p#4laݦ#WzAw5|>= ?^^r KUK0مM>EJ&O T~=3OA|BlY3ib˯#."縄|PE%0xD\aZв#4xoUU9bcԣ@BH(a}]Zk4n4`j]+o^>H[8;q PY:/iDmeо).bݶڦin;NQi Z$V=ؑvg\0?G Ѧ'^+_^\ ?;tGj7y( 늖0ZYD<\JgxN m#f]]CBizg|:@::fP`䂔ibg> wEǏfq+p 5 e`y 7-4c*NطG}?"gp~vVJ>ݏJn!ѧv4*ЍCNBmB݋;3?^c sëoڧ;9 k~uB!O*\XZnQ*Gt.ן]X%+O7L}}w]b=">oORݑCB. . u&N)km8C8w> L=Ck1TwVTprgX!FJB*ile*Hi |A%8t36pdM*fdͧc4pN}< q 3Tx:r̨κ=3 rz2X֭t4V=~VXJiehG\l۵%Lv+2BjDz1qx3V; zMśnn6ꬽka>&D\LXcHGƠ<\L^{ƅ%U]( 6U5]1+]/{0LT=+zWՍh^5FF`GRy^jq(J!I@x'!\Z^B.D[7O y㭲WR})HQ~<5vwEոKźnMWg)!^LjKMUMϴ45U qxPS󳬅y`C/l6/=4Iq z -[!]O]PB?82knGSyP9vaK@s(؀;2OMZq1^2.c2P$¶jIDghbJK28M.<9 wu.eA#ribl2u5WTڱW%NHΧ~ףrM=](}@r: Kh':H&0myb%(΅б?uQ5%xj6% mD7:C] 8>c6cS. !_cqQhݐ4[Lf9׏fsTa12M@a0>e!@Zv] pBQ23_R^c h44.[h^8q;ϧ\UB^ֿq1a]L*SrfVa _(;_. nLC#c+Kwi Dqb(X N uV$~<`0Ĉq xݢ,LP'Ɲ_:uzO , X}`{C=$ FD^6C*x:QnXt΀:yr$؝0iylPP%LVvE{|sVBl6EPLV<\оcf)`kͭz3u`CA=95hRl}i\:Ҙv3X(z$Hi!zIT0ha 2jk~k9`WX,&Jw E8«@,>cRKAbQvux@j b,sJ@=PknOL`\:ًcq9HB{p<䃕!u꛶oz{6&/ I'vcq1'6xNچ$_{-Svϐ g`]4o%]c` /Q 8F@$j[ȆH,s \Aw;:ZUyygr:9#t%ucBoBxQ;ZN{sAb:¢p6 OOOߟnyrͯw!<{zz~fO#q.6O*=$W !=.7oJ9羨ϛüH(/WcB8]WW~ٜ+Vr^͇ HCOOO9ZŇ ~!5FKDMzNn WFsnUZ,I:|כB]bhAXpkPy/}lMu -{EMQ.Ķr,-K a1xS Hx~GV*Kx@l-a9'FA:"<"F) E! ѵ0~(ofP&m\20 G#B^h { C6sd2}uܣi uJj i$0B:T(lP m$P5=<!oAȫR)@l3XBPddc2 w),iT1ġ 6s{h ,\ >)DvG"ß`0!#TWC "[9 w33!u )P1, Hl2(fDBߏ(d C1,l= 3c0NaM!r}D`28bAXaPE*!dΝ/ d0"VCʡ B՜;I AXG9HB!^ΡOhcr0>&0 lΝ! և0# aC$:!uu}@tO0 aB!kޑ1: dpa!9@@`IBư8!s+A`y]1APY5:¤02ġ$ʢsN ±2Hb9@ANR(8Dbfsn`p 8`VCQs !8`US p!͈\b T )!蜃_Q0Kte%:WN2B0A4Hgx)00D$9:LepB1!sf" CZsu 3ah%z)ph8kF\7d\d!:&ZtrsMps9s9sO'r}r +++ draft = true title = "Docker Hub API" description = "API Documentation for the Docker Hub API" keywords = ["API, Docker, index, REST, documentation, Docker Hub, registry"] [menu.main] parent = "engine_remoteapi" weight = 99 +++ # Docker Hub API This API is deprecated as of 1.7. To view the old version, see the [Docker Hub API](https://docs.docker.com/v1.7/docker/reference/api/docker-io_api/) in the 1.7 documentation. docker-1.10.3/docs/reference/api/docker_io_accounts_api.md000066400000000000000000000171761267010174400235300ustar00rootroot00000000000000 # docker.io accounts API ## Get a single user `GET /api/v1.1/users/:username/` Get profile info for the specified user. Parameters: - **username** – username of the user whose profile info is being requested. Request Headers: - **Authorization** – required authentication credentials of either type HTTP Basic or OAuth Bearer Token. Status Codes: - **200** – success, user data returned. - **401** – authentication error. - **403** – permission error, authenticated user must be the user whose data is being requested, OAuth access tokens must have `profile_read` scope. - **404** – the specified username does not exist. **Example request**: GET /api/v1.1/users/janedoe/ HTTP/1.1 Host: www.docker.io Accept: application/json Authorization: Basic dXNlcm5hbWU6cGFzc3dvcmQ= **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "id": 2, "username": "janedoe", "url": "https://www.docker.io/api/v1.1/users/janedoe/", "date_joined": "2014-02-12T17:58:01.431312Z", "type": "User", "full_name": "Jane Doe", "location": "San Francisco, CA", "company": "Success, Inc.", "profile_url": "https://docker.io/", "gravatar_url": "https://secure.gravatar.com/avatar/0212b397124be4acd4e7dea9aa357.jpg?s=80&r=g&d=mm" "email": "jane.doe@example.com", "is_active": true } ## Update a single user `PATCH /api/v1.1/users/:username/` Update profile info for the specified user. Parameters: - **username** – username of the user whose profile info is being updated. Json Parameters: - **full_name** (*string*) – (optional) the new name of the user. - **location** (*string*) – (optional) the new location. - **company** (*string*) – (optional) the new company of the user. - **profile_url** (*string*) – (optional) the new profile url. - **gravatar_email** (*string*) – (optional) the new Gravatar email address. Request Headers: - **Authorization** – required authentication credentials of either type HTTP Basic or OAuth Bearer Token. - **Content-Type** – MIME Type of post data. JSON, url-encoded form data, etc. Status Codes: - **200** – success, user data updated. - **400** – post data validation error. - **401** – authentication error. - **403** – permission error, authenticated user must be the user whose data is being updated, OAuth access tokens must have `profile_write` scope. - **404** – the specified username does not exist. **Example request**: PATCH /api/v1.1/users/janedoe/ HTTP/1.1 Host: www.docker.io Accept: application/json Authorization: Basic dXNlcm5hbWU6cGFzc3dvcmQ= { "location": "Private Island", "profile_url": "http://janedoe.com/", "company": "Retired", } **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "id": 2, "username": "janedoe", "url": "https://www.docker.io/api/v1.1/users/janedoe/", "date_joined": "2014-02-12T17:58:01.431312Z", "type": "User", "full_name": "Jane Doe", "location": "Private Island", "company": "Retired", "profile_url": "http://janedoe.com/", "gravatar_url": "https://secure.gravatar.com/avatar/0212b397124be4acd4e7dea9aa357.jpg?s=80&r=g&d=mm" "email": "jane.doe@example.com", "is_active": true } ## List email addresses for a user `GET /api/v1.1/users/:username/emails/` List email info for the specified user. Parameters: - **username** – username of the user whose profile info is being updated. Request Headers: - **Authorization** – required authentication credentials of either type HTTP Basic or OAuth Bearer Token Status Codes: - **200** – success, user data updated. - **401** – authentication error. - **403** – permission error, authenticated user must be the user whose data is being requested, OAuth access tokens must have `email_read` scope. - **404** – the specified username does not exist. **Example request**: GET /api/v1.1/users/janedoe/emails/ HTTP/1.1 Host: www.docker.io Accept: application/json Authorization: Bearer zAy0BxC1wDv2EuF3tGs4HrI6qJp6KoL7nM **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "email": "jane.doe@example.com", "verified": true, "primary": true } ] ## Add email address for a user `POST /api/v1.1/users/:username/emails/` Add a new email address to the specified user's account. The email address must be verified separately, a confirmation email is not automatically sent. Json Parameters: - **email** (*string*) – email address to be added. Request Headers: - **Authorization** – required authentication credentials of either type HTTP Basic or OAuth Bearer Token. - **Content-Type** – MIME Type of post data. JSON, url-encoded form data, etc. Status Codes: - **201** – success, new email added. - **400** – data validation error. - **401** – authentication error. - **403** – permission error, authenticated user must be the user whose data is being requested, OAuth access tokens must have `email_write` scope. - **404** – the specified username does not exist. **Example request**: POST /api/v1.1/users/janedoe/emails/ HTTP/1.1 Host: www.docker.io Accept: application/json Content-Type: application/json Authorization: Bearer zAy0BxC1wDv2EuF3tGs4HrI6qJp6KoL7nM { "email": "jane.doe+other@example.com" } **Example response**: HTTP/1.1 201 Created Content-Type: application/json { "email": "jane.doe+other@example.com", "verified": false, "primary": false } ## Delete email address for a user `DELETE /api/v1.1/users/:username/emails/` Delete an email address from the specified user's account. You cannot delete a user's primary email address. Json Parameters: - **email** (*string*) – email address to be deleted. Request Headers: - **Authorization** – required authentication credentials of either type HTTP Basic or OAuth Bearer Token. - **Content-Type** – MIME Type of post data. JSON, url-encoded form data, etc. Status Codes: - **204** – success, email address removed. - **400** – validation error. - **401** – authentication error. - **403** – permission error, authenticated user must be the user whose data is being requested, OAuth access tokens must have `email_write` scope. - **404** – the specified username or email address does not exist. **Example request**: DELETE /api/v1.1/users/janedoe/emails/ HTTP/1.1 Host: www.docker.io Accept: application/json Content-Type: application/json Authorization: Bearer zAy0BxC1wDv2EuF3tGs4HrI6qJp6KoL7nM { "email": "jane.doe+other@example.com" } **Example response**: HTTP/1.1 204 NO CONTENT Content-Length: 0 docker-1.10.3/docs/reference/api/docker_remote_api.md000066400000000000000000000336761267010174400225200ustar00rootroot00000000000000 # Docker Remote API Docker's Remote API uses an open schema model. In this model, unknown properties in incoming messages are ignored. Client applications need to take this behavior into account to ensure they do not break when talking to newer Docker daemons. The API tends to be REST, but for some complex commands, like attach or pull, the HTTP connection is hijacked to transport STDOUT, STDIN, and STDERR. By default the Docker daemon listens on `unix:///var/run/docker.sock` and the client must have `root` access to interact with the daemon. If a group named `docker` exists on your system, `docker` applies ownership of the socket to the group. The current version of the API is v1.22 which means calling `/info` is the same as calling `/v1.22/info`. To call an older version of the API use `/v1.21/info`. Use the table below to find the API version for a Docker version: Docker version | API version | Changes ----------------|------------------------------------|------------------------------------------------------ 1.10.x | [1.22](docker_remote_api_v1.22.md) | [API changes](docker_remote_api.md#v1-22-api-changes) 1.9.x | [1.21](docker_remote_api_v1.21.md) | [API changes](docker_remote_api.md#v1-21-api-changes) 1.8.x | [1.20](docker_remote_api_v1.20.md) | [API changes](docker_remote_api.md#v1-20-api-changes) 1.7.x | [1.19](docker_remote_api_v1.19.md) | [API changes](docker_remote_api.md#v1-19-api-changes) 1.6.x | [1.18](docker_remote_api_v1.18.md) | [API changes](docker_remote_api.md#v1-18-api-changes) 1.5.x | [1.17](docker_remote_api_v1.17.md) | [API changes](docker_remote_api.md#v1-17-api-changes) 1.4.x | [1.16](docker_remote_api_v1.16.md) | [API changes](docker_remote_api.md#v1-16-api-changes) 1.3.x | [1.15](docker_remote_api_v1.15.md) | [API changes](docker_remote_api.md#v1-15-api-changes) 1.2.x | [1.14](docker_remote_api_v1.14.md) | [API changes](docker_remote_api.md#v1-14-api-changes) Refer to the [GitHub repository]( https://github.com/docker/docker/tree/master/docs/reference/api) for older releases. ## Authentication Since API version 1.2, the auth configuration is now handled client side, so the client has to send the `authConfig` as a `POST` in `/images/(name)/push`. The `authConfig`, set as the `X-Registry-Auth` header, is currently a Base64 encoded (JSON) string with the following structure: ``` {"username": "string", "password": "string", "email": "string", "serveraddress" : "string", "auth": ""} ``` Callers should leave the `auth` empty. The `serveraddress` is a domain/ip without protocol. Throughout this structure, double quotes are required. ## Using Docker Machine with the API If you are using `docker-machine`, the Docker daemon is on a host that uses an encrypted TCP socket using TLS. This means, for Docker Machine users, you need to add extra parameters to `curl` or `wget` when making test API requests, for example: ``` curl --insecure \ --cert $DOCKER_CERT_PATH/cert.pem \ --key $DOCKER_CERT_PATH/key.pem \ https://YOUR_VM_IP:2376/images/json wget --no-check-certificate --certificate=$DOCKER_CERT_PATH/cert.pem \ --private-key=$DOCKER_CERT_PATH/key.pem \ https://YOUR_VM_IP:2376/images/json -O - -q ``` ## Docker Events The following diagram depicts the container states accessible through the API. ![States](images/event_state.png) Some container-related events are not affected by container state, so they are not included in this diagram. These events are: * **export** emitted by `docker export` * **exec_create** emitted by `docker exec` * **exec_start** emitted by `docker exec` after **exec_create** Running `docker rmi` emits an **untag** event when removing an image name. The `rmi` command may also emit **delete** events when images are deleted by ID directly or by deleting the last tag referring to the image. > **Acknowledgement**: This diagram and the accompanying text were used with the permission of Matt Good and Gilder Labs. See Matt's original blog post [Docker Events Explained](http://gliderlabs.com/blog/2015/04/14/docker-events-explained/). ## Version history This section lists each version from latest to oldest. Each listing includes a link to the full documentation set and the changes relevant in that release. ### v1.22 API changes [Docker Remote API v1.22](docker_remote_api_v1.22.md) documentation * `POST /container/(name)/update` updates the resources of a container. * `GET /containers/json` supports filter `isolation` on Windows. * `GET /containers/json` now returns the list of networks of containers. * `GET /info` Now returns `Architecture` and `OSType` fields, providing information about the host architecture and operating system type that the daemon runs on. * `GET /networks/(name)` now returns a `Name` field for each container attached to the network. * `GET /version` now returns the `BuildTime` field in RFC3339Nano format to make it consistent with other date/time values returned by the API. * `AuthConfig` now supports a `registrytoken` for token based authentication * `POST /containers/create` now has a 4M minimum value limit for `HostConfig.KernelMemory` * Pushes initiated with `POST /images/(name)/push` and pulls initiated with `POST /images/create` will be cancelled if the HTTP connection making the API request is closed before the push or pull completes. * `POST /containers/create` now allows you to set a read/write rate limit for a device (in bytes per second or IO per second). * `GET /networks` now supports filtering by `name`, `id` and `type`. * `POST /containers/create` now allows you to set the static IPv4 and/or IPv6 address for the container. * `POST /networks/(id)/connect` now allows you to set the static IPv4 and/or IPv6 address for the container. * `GET /info` now includes the number of containers running, stopped, and paused. * `POST /networks/create` now supports restricting external access to the network by setting the `internal` field. * `POST /networks/(id)/disconnect` now includes a `Force` option to forcefully disconnect a container from network * `GET /containers/(id)/json` now returns the `NetworkID` of containers. * `POST /networks/create` Now supports an options field in the IPAM config that provides options for custom IPAM plugins. * `GET /networks/{network-id}` Now returns IPAM config options for custom IPAM plugins if any are available. * `GET /networks/` now returns subnets info for user-defined networks. * `GET /info` can now return a `SystemStatus` field useful for returning additional information about applications that are built on top of engine. ### v1.21 API changes [Docker Remote API v1.21](docker_remote_api_v1.21.md) documentation * `GET /volumes` lists volumes from all volume drivers. * `POST /volumes/create` to create a volume. * `GET /volumes/(name)` get low-level information about a volume. * `DELETE /volumes/(name)` remove a volume with the specified name. * `VolumeDriver` was moved from `config` to `HostConfig` to make the configuration portable. * `GET /images/(name)/json` now returns information about an image's `RepoTags` and `RepoDigests`. * The `config` option now accepts the field `StopSignal`, which specifies the signal to use to kill a container. * `GET /containers/(id)/stats` will return networking information respectively for each interface. * The `HostConfig` option now includes the `DnsOptions` field to configure the container's DNS options. * `POST /build` now optionally takes a serialized map of build-time variables. * `GET /events` now includes a `timenano` field, in addition to the existing `time` field. * `GET /events` now supports filtering by image and container labels. * `GET /info` now lists engine version information and return the information of `CPUShares` and `Cpuset`. * `GET /containers/json` will return `ImageID` of the image used by container. * `POST /exec/(name)/start` will now return an HTTP 409 when the container is either stopped or paused. * `GET /containers/(name)/json` now accepts a `size` parameter. Setting this parameter to '1' returns container size information in the `SizeRw` and `SizeRootFs` fields. * `GET /containers/(name)/json` now returns a `NetworkSettings.Networks` field, detailing network settings per network. This field deprecates the `NetworkSettings.Gateway`, `NetworkSettings.IPAddress`, `NetworkSettings.IPPrefixLen`, and `NetworkSettings.MacAddress` fields, which are still returned for backward-compatibility, but will be removed in a future version. * `GET /exec/(id)/json` now returns a `NetworkSettings.Networks` field, detailing networksettings per network. This field deprecates the `NetworkSettings.Gateway`, `NetworkSettings.IPAddress`, `NetworkSettings.IPPrefixLen`, and `NetworkSettings.MacAddress` fields, which are still returned for backward-compatibility, but will be removed in a future version. * The `HostConfig` option now includes the `OomScoreAdj` field for adjusting the badness heuristic. This heuristic selects which processes the OOM killer kills under out-of-memory conditions. ### v1.20 API changes [Docker Remote API v1.20](docker_remote_api_v1.20.md) documentation * `GET /containers/(id)/archive` get an archive of filesystem content from a container. * `PUT /containers/(id)/archive` upload an archive of content to be extracted to an existing directory inside a container's filesystem. * `POST /containers/(id)/copy` is deprecated in favor of the above `archive` endpoint which can be used to download files and directories from a container. * The `hostConfig` option now accepts the field `GroupAdd`, which specifies a list of additional groups that the container process will run as. ### v1.19 API changes [Docker Remote API v1.19](docker_remote_api_v1.19.md) documentation * When the daemon detects a version mismatch with the client, usually when the client is newer than the daemon, an HTTP 400 is now returned instead of a 404. * `GET /containers/(id)/stats` now accepts `stream` bool to get only one set of stats and disconnect. * `GET /containers/(id)/logs` now accepts a `since` timestamp parameter. * `GET /info` The fields `Debug`, `IPv4Forwarding`, `MemoryLimit`, and `SwapLimit` are now returned as boolean instead of as an int. In addition, the end point now returns the new boolean fields `CpuCfsPeriod`, `CpuCfsQuota`, and `OomKillDisable`. * The `hostConfig` option now accepts the fields `CpuPeriod` and `CpuQuota` * `POST /build` accepts `cpuperiod` and `cpuquota` options ### v1.18 API changes [Docker Remote API v1.18](docker_remote_api_v1.18.md) documentation * `GET /version` now returns `Os`, `Arch` and `KernelVersion`. * `POST /containers/create` and `POST /containers/(id)/start`allow you to set ulimit settings for use in the container. * `GET /info` now returns `SystemTime`, `HttpProxy`,`HttpsProxy` and `NoProxy`. * `GET /images/json` added a `RepoDigests` field to include image digest information. * `POST /build` can now set resource constraints for all containers created for the build. * `CgroupParent` can be passed in the host config to setup container cgroups under a specific cgroup. * `POST /build` closing the HTTP request cancels the build * `POST /containers/(id)/exec` includes `Warnings` field to response. ### v1.17 API changes [Docker Remote API v1.17](docker_remote_api_v1.17.md) documentation * The build supports `LABEL` command. Use this to add metadata to an image. For example you could add data describing the content of an image. `LABEL "com.example.vendor"="ACME Incorporated"` * `POST /containers/(id)/attach` and `POST /exec/(id)/start` * The Docker client now hints potential proxies about connection hijacking using HTTP Upgrade headers. * `POST /containers/create` sets labels on container create describing the container. * `GET /containers/json` returns the labels associated with the containers (`Labels`). * `GET /containers/(id)/json` returns the list current execs associated with the container (`ExecIDs`). This endpoint now returns the container labels (`Config.Labels`). * `POST /containers/(id)/rename` renames a container `id` to a new name.* * `POST /containers/create` and `POST /containers/(id)/start` callers can pass `ReadonlyRootfs` in the host config to mount the container's root filesystem as read only. * `GET /containers/(id)/stats` returns a live stream of a container's resource usage statistics. * `GET /images/json` returns the labels associated with each image (`Labels`). ### v1.16 API changes [Docker Remote API v1.16](docker_remote_api_v1.16.md) * `GET /info` returns the number of CPUs available on the machine (`NCPU`), total memory available (`MemTotal`), a user-friendly name describing the running Docker daemon (`Name`), a unique ID identifying the daemon (`ID`), and a list of daemon labels (`Labels`). * `POST /containers/create` callers can set the new container's MAC address explicitly. * Volumes are now initialized when the container is created. * `POST /containers/(id)/copy` copies data which is contained in a volume. ### v1.15 API changes [Docker Remote API v1.15](docker_remote_api_v1.15.md) documentation `POST /containers/create` you can set a container's `HostConfig` when creating a container. Previously this was only available when starting a container. ### v1.14 API changes [Docker Remote API v1.14](docker_remote_api_v1.14.md) documentation * `DELETE /containers/(id)` when using `force`, the container will be immediately killed with SIGKILL. * `POST /containers/(id)/start` the `HostConfig` option accepts the field `CapAdd`, which specifies a list of capabilities to add, and the field `CapDrop`, which specifies a list of capabilities to drop. * `POST /images/create` th `fromImage` and `repo` parameters support the `repo:tag` format. Consequently, the `tag` parameter is now obsolete. Using the new format and the `tag` parameter at the same time will return an error. docker-1.10.3/docs/reference/api/docker_remote_api_v1.14.md000066400000000000000000001077131267010174400233430ustar00rootroot00000000000000 # Docker Remote API v1.14 ## 1. Brief introduction - The Remote API has replaced `rcli`. - The daemon listens on `unix:///var/run/docker.sock` but you can [Bind Docker to another host/port or a Unix socket](../../quickstart.md#bind-docker-to-another-host-port-or-a-unix-socket). - The API tends to be REST, but for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `STDOUT`, `STDIN` and `STDERR`. # 2. Endpoints ## 2.1 Containers ### List containers `GET /containers/json` List containers **Example request**: GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "Id": "8dfafdbc3a40", "Image": "ubuntu:latest", "Command": "echo 1", "Created": 1367854155, "Status": "Exit 0", "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], "SizeRw": 12288, "SizeRootFs": 0 }, { "Id": "9cd87474be90", "Image": "ubuntu:latest", "Command": "echo 222222", "Created": 1367854155, "Status": "Exit 0", "Ports": [], "SizeRw": 12288, "SizeRootFs": 0 }, { "Id": "3176a2479c92", "Image": "ubuntu:latest", "Command": "echo 3333333333333333", "Created": 1367854154, "Status": "Exit 0", "Ports":[], "SizeRw":12288, "SizeRootFs":0 }, { "Id": "4cb07b47f9fb", "Image": "ubuntu:latest", "Command": "echo 444444444444444444444444444444444", "Created": 1367854152, "Status": "Exit 0", "Ports": [], "SizeRw": 12288, "SizeRootFs": 0 } ] Query Parameters: - **all** – 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default (i.e., this defaults to false) - **limit** – Show `limit` last created containers, include non-running ones. - **since** – Show only containers created since Id, include non-running ones. - **before** – Show only containers created before Id, include non-running ones. - **size** – 1/True/true or 0/False/false, Show the containers sizes - **filters** - a json encoded value of the filters (a map[string][]string) to process on the containers list. Available filters: - exited=<int> -- containers with exit code of <int> - status=(restarting|running|paused|exited) Status Codes: - **200** – no error - **400** – bad parameter - **500** – server error ### Create a container `POST /containers/create` Create a container **Example request**: POST /containers/create HTTP/1.1 Content-Type: application/json { "Hostname":"", "Domainname": "", "User":"", "Memory":0, "MemorySwap":0, "CpuShares": 512, "Cpuset": "0,1", "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, "PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env": [ "FOO=bar", "BAZ=quux" ], "Cmd":[ "date" ], "Image":"ubuntu", "Volumes":{ "/tmp": {} }, "WorkingDir":"", "NetworkDisabled": false, "ExposedPorts":{ "22/tcp": {} }, "RestartPolicy": { "Name": "always" } } **Example response**: HTTP/1.1 201 Created Content-Type: application/json { "Id":"e90e34656806" "Warnings":[] } Json Parameters: - **RestartPolicy** – The behavior to apply when the container exits. The value is an object with a `Name` property of either `"always"` to always restart or `"on-failure"` to restart only when the container exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` controls the number of times to retry before giving up. The default is not to restart. (optional) An ever increasing delay (double the previous delay, starting at 100mS) is added before each restart to prevent flooding the server. - **config** – the container's configuration Query Parameters: - **name** – Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`. Status Codes: - **201** – no error - **404** – no such container - **406** – impossible to attach (container not running) - **500** – server error ### Inspect a container `GET /containers/(id)/json` Return low-level information on the container `id` **Example request**: GET /containers/4fa6e0f0c678/json HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", "Created": "2013-05-07T14:51:42.041847+02:00", "Path": "date", "Args": [], "Config": { "Hostname": "4fa6e0f0c678", "User": "", "Memory": 0, "MemorySwap": 0, "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "PortSpecs": null, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": null, "Cmd": [ "date" ], "Dns": null, "Image": "ubuntu", "Volumes": {}, "VolumesFrom": "", "WorkingDir": "" }, "State": { "Running": false, "Pid": 0, "ExitCode": 0, "StartedAt": "2013-05-07T14:51:42.087658+02:01360", "Ghost": false }, "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", "NetworkSettings": { "IpAddress": "", "IpPrefixLen": 0, "Gateway": "", "Bridge": "", "PortMapping": null }, "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", "ResolvConfPath": "/etc/resolv.conf", "Volumes": {}, "HostConfig": { "Binds": null, "ContainerIDFile": "", "LxcConf": [], "Privileged": false, "PortBindings": { "80/tcp": [ { "HostIp": "0.0.0.0", "HostPort": "49153" } ] }, "Links": ["/name:alias"], "PublishAllPorts": false, "CapAdd": ["NET_ADMIN"], "CapDrop": ["MKNOD"] } } Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### List processes running inside a container `GET /containers/(id)/top` List processes running inside the container `id`. On Unix systems this is done by running the `ps` command. This endpoint is not supported on Windows. **Example request**: GET /containers/4fa6e0f0c678/top HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Titles" : [ "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" ], "Processes" : [ [ "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" ], [ "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" ] ] } **Example request**: GET /containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Titles" : [ "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" ] "Processes" : [ [ "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" ], [ "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" ] ], } Query Parameters: - **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Get container logs `GET /containers/(id)/logs` Get stdout and stderr logs from the container ``id`` **Example request**: GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/vnd.docker.raw-stream {{ STREAM }} Query Parameters: - **follow** – 1/True/true or 0/False/false, return stream. Default false - **stdout** – 1/True/true or 0/False/false, show stdout log. Default false - **stderr** – 1/True/true or 0/False/false, show stderr log. Default false - **timestamps** – 1/True/true or 0/False/false, print timestamps for every log line. Default false - **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Inspect changes on a container's filesystem `GET /containers/(id)/changes` Inspect changes on container `id`'s filesystem **Example request**: GET /containers/4fa6e0f0c678/changes HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "Path": "/dev", "Kind": 0 }, { "Path": "/dev/kmsg", "Kind": 1 }, { "Path": "/test", "Kind": 1 } ] Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Export a container `GET /containers/(id)/export` Export the contents of container `id` **Example request**: GET /containers/4fa6e0f0c678/export HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/octet-stream {{ TAR STREAM }} Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Start a container `POST /containers/(id)/start` Start the container `id` **Example request**: POST /containers/(id)/start HTTP/1.1 Content-Type: application/json { "Binds":["/tmp:/tmp"], "Links":["redis3:redis"], "LxcConf":[{"Key":"lxc.utsname","Value":"docker"}], "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, "PublishAllPorts":false, "Privileged":false, "Dns": ["8.8.8.8"], "VolumesFrom": ["parent", "other:ro"], "CapAdd": ["NET_ADMIN"], "CapDrop": ["MKNOD"] } **Example response**: HTTP/1.1 204 No Content Json Parameters: - **hostConfig** – the container's host configuration (optional) Status Codes: - **204** – no error - **304** – container already started - **404** – no such container - **500** – server error ### Stop a container `POST /containers/(id)/stop` Stop the container `id` **Example request**: POST /containers/e90e34656806/stop?t=5 HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **t** – number of seconds to wait before killing the container Status Codes: - **204** – no error - **304** – container already stopped - **404** – no such container - **500** – server error ### Restart a container `POST /containers/(id)/restart` Restart the container `id` **Example request**: POST /containers/e90e34656806/restart?t=5 HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **t** – number of seconds to wait before killing the container Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Kill a container `POST /containers/(id)/kill` Kill the container `id` **Example request**: POST /containers/e90e34656806/kill HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters - **signal** - Signal to send to the container: integer or string like "SIGINT". When not set, SIGKILL is assumed and the call will wait for the container to exit. Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Pause a container `POST /containers/(id)/pause` Pause the container `id` **Example request**: POST /containers/e90e34656806/pause HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Unpause a container `POST /containers/(id)/unpause` Unpause the container `id` **Example request**: POST /containers/e90e34656806/unpause HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Attach to a container `POST /containers/(id)/attach` Attach to the container `id` **Example request**: POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/vnd.docker.raw-stream {{ STREAM }} Query Parameters: - **logs** – 1/True/true or 0/False/false, return logs. Default false - **stream** – 1/True/true or 0/False/false, return stream. Default false - **stdin** – 1/True/true or 0/False/false, if stream=true, attach to stdin. Default false - **stdout** – 1/True/true or 0/False/false, if logs=true, return stdout log, if stream=true, attach to stdout. Default false - **stderr** – 1/True/true or 0/False/false, if logs=true, return stderr log, if stream=true, attach to stderr. Default false Status Codes: - **200** – no error - **400** – bad parameter - **404** – no such container - **500** – server error **Stream details**: When using the TTY setting is enabled in [`POST /containers/create`](#create-a-container), the stream is the raw data from the process PTY and client's stdin. When the TTY is disabled, then the stream is multiplexed to separate stdout and stderr. The format is a **Header** and a **Payload** (frame). **HEADER** The header will contain the information on which stream write the stream (stdout or stderr). It also contain the size of the associated frame encoded on the last 4 bytes (uint32). It is encoded on the first 8 bytes like this: header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} `STREAM_TYPE` can be: - 0: stdin (will be written on stdout) - 1: stdout - 2: stderr `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of the uint32 size encoded as big endian. **PAYLOAD** The payload is the raw stream. **IMPLEMENTATION** The simplest way to implement the Attach protocol is the following: 1. Read 8 bytes 2. chose stdout or stderr depending on the first byte 3. Extract the frame size from the last 4 bytes 4. Read the extracted size and output it on the correct output 5. Goto 1 ### Attach to a container (websocket) `GET /containers/(id)/attach/ws` Attach to the container `id` via websocket Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) **Example request** GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 **Example response** {{ STREAM }} Query Parameters: - **logs** – 1/True/true or 0/False/false, return logs. Default false - **stream** – 1/True/true or 0/False/false, return stream. Default false - **stdin** – 1/True/true or 0/False/false, if stream=true, attach to stdin. Default false - **stdout** – 1/True/true or 0/False/false, if logs=true, return stdout log, if stream=true, attach to stdout. Default false - **stderr** – 1/True/true or 0/False/false, if logs=true, return stderr log, if stream=true, attach to stderr. Default false Status Codes: - **200** – no error - **400** – bad parameter - **404** – no such container - **500** – server error ### Wait a container `POST /containers/(id)/wait` Block until container `id` stops, then returns the exit code **Example request**: POST /containers/16253994b7c4/wait HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"StatusCode": 0} Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Remove a container `DELETE /containers/(id)` Remove the container `id` from the filesystem **Example request**: DELETE /containers/16253994b7c4?v=1 HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **v** – 1/True/true or 0/False/false, Remove the volumes associated to the container. Default false - **force** - 1/True/true or 0/False/false, Kill then remove the container. Default false Status Codes: - **204** – no error - **400** – bad parameter - **404** – no such container - **500** – server error ### Copy files or folders from a container `POST /containers/(id)/copy` Copy files or folders of container `id` **Example request**: POST /containers/4fa6e0f0c678/copy HTTP/1.1 Content-Type: application/json { "Resource": "test.txt" } **Example response**: HTTP/1.1 200 OK Content-Type: application/octet-stream {{ TAR STREAM }} Status Codes: - **200** – no error - **404** – no such container - **500** – server error ## 2.2 Images ### List Images `GET /images/json` **Example request**: GET /images/json?all=0 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "RepoTags": [ "ubuntu:12.04", "ubuntu:precise", "ubuntu:latest" ], "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", "Created": 1365714795, "Size": 131506275, "VirtualSize": 131506275 }, { "RepoTags": [ "ubuntu:12.10", "ubuntu:quantal" ], "ParentId": "27cf784147099545", "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", "Created": 1364102658, "Size": 24653, "VirtualSize": 180116135 } ] Query Parameters: - **all** – 1/True/true or 0/False/false, default false - **filters** – a json encoded value of the filters (a map[string][]string) to process on the images list. Available filters: - dangling=true - **filter** - only return images with the specified name ### Create an image `POST /images/create` Create an image, either by pulling it from the registry or by importing it **Example request**: POST /images/create?fromImage=ubuntu HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"status": "Pulling..."} {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} {"error": "Invalid..."} ... When using this endpoint to pull an image from the registry, the `X-Registry-Auth` header can be used to include a base64-encoded AuthConfig object. Query Parameters: - **fromImage** – name of the image to pull - **fromSrc** – source to import, - means stdin - **repo** – repository - **tag** – tag - **registry** – the registry to pull from Request Headers: - **X-Registry-Auth** – base64-encoded AuthConfig object Status Codes: - **200** – no error - **500** – server error ### Inspect an image `GET /images/(name)/json` Return low-level information on the image `name` **Example request**: GET /images/ubuntu/json HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Created": "2013-03-23T22:24:18.818426-07:00", "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", "ContainerConfig": { "Hostname": "", "User": "", "Memory": 0, "MemorySwap": 0, "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "PortSpecs": null, "Tty": true, "OpenStdin": true, "StdinOnce": false, "Env": null, "Cmd": ["/bin/bash"], "Dns": null, "Image": "ubuntu", "Volumes": null, "VolumesFrom": "", "WorkingDir": "" }, "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", "Parent": "27cf784147099545", "Size": 6824592 } Status Codes: - **200** – no error - **404** – no such image - **500** – server error ### Get the history of an image `GET /images/(name)/history` Return the history of the image `name` **Example request**: GET /images/ubuntu/history HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "Id": "b750fe79269d", "Created": 1364102658, "CreatedBy": "/bin/bash" }, { "Id": "27cf78414709", "Created": 1364068391, "CreatedBy": "" } ] Status Codes: - **200** – no error - **404** – no such image - **500** – server error ### Push an image on the registry `POST /images/(name)/push` Push the image `name` on the registry **Example request**: POST /images/test/push HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"status": "Pushing..."} {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} {"error": "Invalid..."} ... If you wish to push an image on to a private registry, that image must already have been tagged into a repository which references that registry host name and port. This repository name should then be used in the URL. This mirrors the flow of the CLI. **Example request**: POST /images/registry.acme.com:5000/test/push HTTP/1.1 Query Parameters: - **tag** – the tag to associate with the image on the registry, optional Request Headers: - **X-Registry-Auth** – include a base64-encoded AuthConfig object. Status Codes: - **200** – no error - **404** – no such image - **500** – server error ### Tag an image into a repository `POST /images/(name)/tag` Tag the image `name` into a repository **Example request**: POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 **Example response**: HTTP/1.1 201 OK Query Parameters: - **repo** – The repository to tag in - **force** – 1/True/true or 0/False/false, default false - **tag** - The new tag name Status Codes: - **201** – no error - **400** – bad parameter - **404** – no such image - **409** – conflict - **500** – server error ### Remove an image `DELETE /images/(name)` Remove the image `name` from the filesystem **Example request**: DELETE /images/test HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-type: application/json [ {"Untagged": "3e2f21a89f"}, {"Deleted": "3e2f21a89f"}, {"Deleted": "53b4f83ac9"} ] Query Parameters: - **force** – 1/True/true or 0/False/false, default false - **noprune** – 1/True/true or 0/False/false, default false Status Codes: - **200** – no error - **404** – no such image - **409** – conflict - **500** – server error ### Search images `GET /images/search` Search for an image on [Docker Hub](https://hub.docker.com). > **Note**: > The response keys have changed from API v1.6 to reflect the JSON > sent by the registry server to the docker daemon's request. **Example request**: GET /images/search?term=sshd HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "description": "", "is_official": false, "is_automated": false, "name": "wma55/u1210sshd", "star_count": 0 }, { "description": "", "is_official": false, "is_automated": false, "name": "jdswinbank/sshd", "star_count": 0 }, { "description": "", "is_official": false, "is_automated": false, "name": "vgauthier/sshd", "star_count": 0 } ... ] Query Parameters: - **term** – term to search Status Codes: - **200** – no error - **500** – server error ## 2.3 Misc ### Build an image from Dockerfile via stdin `POST /build` Build an image from Dockerfile via stdin **Example request**: POST /build HTTP/1.1 {{ TAR STREAM }} **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"stream": "Step 1..."} {"stream": "..."} {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} The stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz. The archive must include a file called `Dockerfile` at its root. It may include any number of other files, which will be accessible in the build context (See the [*ADD build command*](../../reference/builder.md#dockerbuilder)). Query Parameters: - **t** – repository name (and optionally a tag) to be applied to the resulting image in case of success - **remote** – git or HTTP/HTTPS URI build source - **q** – suppress verbose build output - **nocache** – do not use the cache when building the image - **rm** - remove intermediate containers after a successful build (default behavior) - **forcerm** - always remove intermediate containers (includes rm) Request Headers: - **Content-type** – should be set to `"application/tar"`. - **X-Registry-Config** – base64-encoded ConfigFile object Status Codes: - **200** – no error - **500** – server error ### Check auth configuration `POST /auth` Get the default username and email **Example request**: POST /auth HTTP/1.1 Content-Type: application/json { "username":" hannibal", "password: "xxxx", "email": "hannibal@a-team.com", "serveraddress": "https://index.docker.io/v1/" } **Example response**: HTTP/1.1 200 OK Status Codes: - **200** – no error - **204** – no error - **500** – server error ### Display system-wide information `GET /info` Display system-wide information **Example request**: GET /info HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Containers": 11, "Images": 16, "Driver": "btrfs", "ExecutionDriver": "native-0.1", "KernelVersion": "3.12.0-1-amd64" "Debug": false, "NFd": 11, "NGoroutines": 21, "NEventsListener": 0, "InitPath": "/usr/bin/docker", "IndexServerAddress": ["https://index.docker.io/v1/"], "MemoryLimit": true, "SwapLimit": false, "IPv4Forwarding": true } Status Codes: - **200** – no error - **500** – server error ### Show the docker version information `GET /version` Show the docker version information **Example request**: GET /version HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "ApiVersion": "1.12", "Version": "0.2.2", "GitCommit": "5a2a5cc+CHANGES", "GoVersion": "go1.0.3" } Status Codes: - **200** – no error - **500** – server error ### Ping the docker server `GET /_ping` Ping the docker server **Example request**: GET /_ping HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: text/plain OK Status Codes: - **200** - no error - **500** - server error ### Create a new image from a container's changes `POST /commit` Create a new image from a container's changes **Example request**: POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 Content-Type: application/json { "Hostname": "", "Domainname": "", "User": "", "Memory": 0, "MemorySwap": 0, "CpuShares": 512, "Cpuset": "0,1", "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "PortSpecs": null, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": null, "Cmd": [ "date" ], "Volumes": { "/tmp": {} }, "WorkingDir": "", "NetworkDisabled": false, "ExposedPorts": { "22/tcp": {} } } **Example response**: HTTP/1.1 201 Created Content-Type: application/json {"Id": "596069db4bf5"} Json Parameters: - **config** - the container's configuration Query Parameters: - **container** – source container - **repo** – repository - **tag** – tag - **comment** – commit message - **author** – author (e.g., "John Hannibal Smith <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") Status Codes: - **201** – no error - **404** – no such container - **500** – server error ### Monitor Docker's events `GET /events` Get container events from docker, either in real time via streaming, or via polling (using since). Docker containers will report the following events: create, destroy, die, export, kill, pause, restart, start, stop, unpause and Docker images will report: untag, delete **Example request**: GET /events?since=1374067924 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} Query Parameters: - **since** – timestamp used for polling - **until** – timestamp used for polling Status Codes: - **200** – no error - **500** – server error ### Get a tarball containing all images and tags in a repository `GET /images/(name)/get` Get a tarball containing all images and metadata for the repository specified by `name`. See the [image tarball format](#image-tarball-format) for more details. **Example request** GET /images/ubuntu/get **Example response**: HTTP/1.1 200 OK Content-Type: application/x-tar Binary data stream Status Codes: - **200** – no error - **500** – server error ### Load a tarball with a set of images and tags into docker `POST /images/load` Load a set of images and tags into the docker repository. See the [image tarball format](#image-tarball-format) for more details. **Example request** POST /images/load Tarball in body **Example response**: HTTP/1.1 200 OK Status Codes: - **200** – no error - **500** – server error ### Image tarball format An image tarball contains one directory per image layer (named using its long ID), each containing three files: 1. `VERSION`: currently `1.0` - the file format version 2. `json`: detailed layer information, similar to `docker inspect layer_id` 3. `layer.tar`: A tarfile containing the filesystem changes in this layer The `layer.tar` file will contain `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. If the tarball defines a repository, there will also be a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. ``` {"hello-world": {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} } ``` # 3. Going further ## 3.1 Inside `docker run` As an example, the `docker run` command line makes the following API calls: - Create the container - If the status code is 404, it means the image doesn't exist: - Try to pull it - Then retry to create the container - Start the container - If you are not in detached mode: - Attach to the container, using logs=1 (to have stdout and stderr from the container's start) and stream=1 - If in detached mode or only stdin is attached: - Display the container's id ## 3.2 Hijacking In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. ## 3.3 CORS Requests To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode. $ docker -d -H="192.168.1.9:2375" --api-enable-cors docker-1.10.3/docs/reference/api/docker_remote_api_v1.15.md000066400000000000000000001362571267010174400233510ustar00rootroot00000000000000 # Docker Remote API v1.15 ## 1. Brief introduction - The Remote API has replaced `rcli`. - The daemon listens on `unix:///var/run/docker.sock` but you can [Bind Docker to another host/port or a Unix socket](../../quickstart.md#bind-docker-to-another-host-port-or-a-unix-socket). - The API tends to be REST, but for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `STDOUT`, `STDIN` and `STDERR`. # 2. Endpoints ## 2.1 Containers ### List containers `GET /containers/json` List containers **Example request**: GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "Id": "8dfafdbc3a40", "Names":["/boring_feynman"], "Image": "ubuntu:latest", "Command": "echo 1", "Created": 1367854155, "Status": "Exit 0", "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], "SizeRw": 12288, "SizeRootFs": 0 }, { "Id": "9cd87474be90", "Names":["/coolName"], "Image": "ubuntu:latest", "Command": "echo 222222", "Created": 1367854155, "Status": "Exit 0", "Ports": [], "SizeRw": 12288, "SizeRootFs": 0 }, { "Id": "3176a2479c92", "Names":["/sleepy_dog"], "Image": "ubuntu:latest", "Command": "echo 3333333333333333", "Created": 1367854154, "Status": "Exit 0", "Ports":[], "SizeRw":12288, "SizeRootFs":0 }, { "Id": "4cb07b47f9fb", "Names":["/running_cat"], "Image": "ubuntu:latest", "Command": "echo 444444444444444444444444444444444", "Created": 1367854152, "Status": "Exit 0", "Ports": [], "SizeRw": 12288, "SizeRootFs": 0 } ] Query Parameters: - **all** – 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default (i.e., this defaults to false) - **limit** – Show `limit` last created containers, include non-running ones. - **since** – Show only containers created since Id, include non-running ones. - **before** – Show only containers created before Id, include non-running ones. - **size** – 1/True/true or 0/False/false, Show the containers sizes - **filters** - a json encoded value of the filters (a map[string][]string) to process on the containers list. Available filters: - exited=<int> -- containers with exit code of <int> - status=(restarting|running|paused|exited) Status Codes: - **200** – no error - **400** – bad parameter - **500** – server error ### Create a container `POST /containers/create` Create a container **Example request**: POST /containers/create HTTP/1.1 Content-Type: application/json { "Hostname": "", "Domainname": "", "User": "", "Memory": 0, "MemorySwap": 0, "CpuShares": 512, "Cpuset": "0,1", "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": [ "FOO=bar", "BAZ=quux" ], "Cmd": [ "date" ], "Entrypoint": "", "Image": "ubuntu", "Volumes": { "/tmp": {} }, "WorkingDir": "", "NetworkDisabled": false, "MacAddress": "12:34:56:78:9a:bc", "ExposedPorts": { "22/tcp": {} }, "SecurityOpts": [""], "HostConfig": { "Binds": ["/tmp:/tmp"], "Links": ["redis3:redis"], "LxcConf": {"lxc.utsname":"docker"}, "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, "PublishAllPorts": false, "Privileged": false, "Dns": ["8.8.8.8"], "DnsSearch": [""], "ExtraHosts": null, "VolumesFrom": ["parent", "other:ro"], "CapAdd": ["NET_ADMIN"], "CapDrop": ["MKNOD"], "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, "NetworkMode": "bridge", "Devices": [] } } **Example response**: HTTP/1.1 201 Created Content-Type: application/json { "Id": "f91ddc4b01e079c4481a8340bbbeca4dbd33d6e4a10662e499f8eacbb5bf252b" "Warnings": [] } Json Parameters: - **Hostname** - A string value containing the desired hostname to use for the container. - **Domainname** - A string value containing the desired domain name to use for the container. - **User** - A string value containing the user to use inside the container. - **Memory** - Memory limit in bytes. - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. - **CpuShares** - An integer value containing the CPU Shares for container (ie. the relative weight vs other containers). **CpuSet** - String value containing the cgroups Cpuset to use. - **AttachStdin** - Boolean value, attaches to stdin. - **AttachStdout** - Boolean value, attaches to stdout. - **AttachStderr** - Boolean value, attaches to stderr. - **Tty** - Boolean value, Attach standard streams to a tty, including stdin if it is not closed. - **OpenStdin** - Boolean value, opens stdin, - **StdinOnce** - Boolean value, close stdin after the 1 attached client disconnects. - **Env** - A list of environment variables in the form of `["VAR=value"[,"VAR2=value2"]]` - **Cmd** - Command to run specified as a string or an array of strings. - **Entrypoint** - Set the entrypoint for the container a string or an array of strings - **Image** - String value containing the image name to use for the container - **Volumes** – An object mapping mountpoint paths (strings) inside the container to empty objects. - **WorkingDir** - A string value containing the working dir for commands to run in. - **NetworkDisabled** - Boolean value, when true disables networking for the container - **ExposedPorts** - An object mapping ports to an empty object in the form of: `"ExposedPorts": { "/: {}" }` - **SecurityOpts**: A list of string values to customize labels for MLS systems, such as SELinux. - **HostConfig** - **Binds** – A list of volume bindings for this container. Each volume binding is a string of the form `container_path` (to create a new volume for the container), `host_path:container_path` (to bind-mount a host path into the container), or `host_path:container_path:ro` (to make the bind-mount read-only inside the container). - **Links** - A list of links for the container. Each link entry should be in the form of "container_name:alias". - **LxcConf** - LXC specific configurations. These configurations will only work when using the `lxc` execution driver. - **PortBindings** - A map of exposed container ports and the host port they should map to. It should be specified in the form `{ /: [{ "HostPort": "" }] }` Take note that `port` is specified as a string and not an integer value. - **PublishAllPorts** - Allocates a random host port for all of a container's exposed ports. Specified as a boolean value. - **Privileged** - Gives the container full access to the host. Specified as a boolean value. - **Dns** - A list of dns servers for the container to use. - **DnsSearch** - A list of DNS search domains - **ExtraHosts** - A list of hostnames/IP mappings to be added to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. - **VolumesFrom** - A list of volumes to inherit from another container. Specified in the form `[:]` - **CapAdd** - A list of kernel capabilities to add to the container. - **Capdrop** - A list of kernel capabilities to drop from the container. - **RestartPolicy** – The behavior to apply when the container exits. The value is an object with a `Name` property of either `"always"` to always restart or `"on-failure"` to restart only when the container exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` controls the number of times to retry before giving up. The default is not to restart. (optional) An ever increasing delay (double the previous delay, starting at 100mS) is added before each restart to prevent flooding the server. - **NetworkMode** - Sets the networking mode for the container. Supported values are: `bridge`, `host`, and `container:` - **Devices** - A list of devices to add to the container specified in the form `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` Query Parameters: - **name** – Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`. Status Codes: - **201** – no error - **404** – no such container - **406** – impossible to attach (container not running) - **500** – server error ### Inspect a container `GET /containers/(id)/json` Return low-level information on the container `id` **Example request**: GET /containers/4fa6e0f0c678/json HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", "Created": "2013-05-07T14:51:42.041847+02:00", "Path": "date", "Args": [], "Config": { "Hostname": "4fa6e0f0c678", "User": "", "Memory": 0, "MemorySwap": 0, "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "PortSpecs": null, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": null, "Cmd": [ "date" ], "Dns": null, "Image": "ubuntu", "Volumes": {}, "VolumesFrom": "", "WorkingDir": "" }, "State": { "Running": false, "Pid": 0, "ExitCode": 0, "StartedAt": "2013-05-07T14:51:42.087658+02:01360", "Ghost": false }, "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", "NetworkSettings": { "IpAddress": "", "IpPrefixLen": 0, "Gateway": "", "Bridge": "", "PortMapping": null }, "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", "ResolvConfPath": "/etc/resolv.conf", "Volumes": {}, "HostConfig": { "Binds": null, "ContainerIDFile": "", "LxcConf": [], "Privileged": false, "PortBindings": { "80/tcp": [ { "HostIp": "0.0.0.0", "HostPort": "49153" } ] }, "Links": ["/name:alias"], "PublishAllPorts": false, "CapAdd": ["NET_ADMIN"], "CapDrop": ["MKNOD"] } } Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### List processes running inside a container `GET /containers/(id)/top` List processes running inside the container `id`. On Unix systems this is done by running the `ps` command. This endpoint is not supported on Windows. **Example request**: GET /containers/4fa6e0f0c678/top HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Titles" : [ "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" ], "Processes" : [ [ "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" ], [ "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" ] ] } **Example request**: GET /containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Titles" : [ "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" ] "Processes" : [ [ "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" ], [ "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" ] ], } Query Parameters: - **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Get container logs `GET /containers/(id)/logs` Get stdout and stderr logs from the container ``id`` **Example request**: GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/vnd.docker.raw-stream {{ STREAM }} Query Parameters: - **follow** – 1/True/true or 0/False/false, return stream. Default false - **stdout** – 1/True/true or 0/False/false, show stdout log. Default false - **stderr** – 1/True/true or 0/False/false, show stderr log. Default false - **timestamps** – 1/True/true or 0/False/false, print timestamps for every log line. Default false - **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Inspect changes on a container's filesystem `GET /containers/(id)/changes` Inspect changes on container `id`'s filesystem **Example request**: GET /containers/4fa6e0f0c678/changes HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "Path": "/dev", "Kind": 0 }, { "Path": "/dev/kmsg", "Kind": 1 }, { "Path": "/test", "Kind": 1 } ] Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Export a container `GET /containers/(id)/export` Export the contents of container `id` **Example request**: GET /containers/4fa6e0f0c678/export HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/octet-stream {{ TAR STREAM }} Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Resize a container TTY `GET /containers/(id)/resize?h=&w=` Resize the TTY of container `id` **Example request**: GET /containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Length: 0 Content-Type: text/plain; charset=utf-8 Status Codes: - **200** – no error - **404** – No such container - **500** – bad file descriptor ### Start a container `POST /containers/(id)/start` Start the container `id` **Example request**: POST /containers/(id)/start HTTP/1.1 Content-Type: application/json { "Binds": ["/tmp:/tmp"], "Links": ["redis3:redis"], "LxcConf": {"lxc.utsname":"docker"}, "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, "PublishAllPorts": false, "Privileged": false, "Dns": ["8.8.8.8"], "DnsSearch": [""], "VolumesFrom": ["parent", "other:ro"], "CapAdd": ["NET_ADMIN"], "CapDrop": ["MKNOD"], "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, "NetworkMode": "bridge", "Devices": [] } **Example response**: HTTP/1.1 204 No Content Json Parameters: - **Binds** – A list of volume bindings for this container. Each volume binding is a string of the form `container_path` (to create a new volume for the container), `host_path:container_path` (to bind-mount a host path into the container), or `host_path:container_path:ro` (to make the bind-mount read-only inside the container). - **Links** - A list of links for the container. Each link entry should be of of the form "container_name:alias". - **LxcConf** - LXC specific configurations. These configurations will only work when using the `lxc` execution driver. - **PortBindings** - A map of exposed container ports and the host port they should map to. It should be specified in the form `{ /: [{ "HostPort": "" }] }` Take note that `port` is specified as a string and not an integer value. - **PublishAllPorts** - Allocates a random host port for all of a container's exposed ports. Specified as a boolean value. - **Privileged** - Gives the container full access to the host. Specified as a boolean value. - **Dns** - A list of dns servers for the container to use. - **DnsSearch** - A list of DNS search domains - **VolumesFrom** - A list of volumes to inherit from another container. Specified in the form `[:]` - **CapAdd** - A list of kernel capabilities to add to the container. - **Capdrop** - A list of kernel capabilities to drop from the container. - **RestartPolicy** – The behavior to apply when the container exits. The value is an object with a `Name` property of either `"always"` to always restart or `"on-failure"` to restart only when the container exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` controls the number of times to retry before giving up. The default is not to restart. (optional) An ever increasing delay (double the previous delay, starting at 100mS) is added before each restart to prevent flooding the server. - **NetworkMode** - Sets the networking mode for the container. Supported values are: `bridge`, `host`, and `container:` - **Devices** - A list of devices to add to the container specified in the form `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` Status Codes: - **204** – no error - **304** – container already started - **404** – no such container - **500** – server error ### Stop a container `POST /containers/(id)/stop` Stop the container `id` **Example request**: POST /containers/e90e34656806/stop?t=5 HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **t** – number of seconds to wait before killing the container Status Codes: - **204** – no error - **304** – container already stopped - **404** – no such container - **500** – server error ### Restart a container `POST /containers/(id)/restart` Restart the container `id` **Example request**: POST /containers/e90e34656806/restart?t=5 HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **t** – number of seconds to wait before killing the container Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Kill a container `POST /containers/(id)/kill` Kill the container `id` **Example request**: POST /containers/e90e34656806/kill HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters - **signal** - Signal to send to the container: integer or string like "SIGINT". When not set, SIGKILL is assumed and the call will waits for the container to exit. Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Pause a container `POST /containers/(id)/pause` Pause the container `id` **Example request**: POST /containers/e90e34656806/pause HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Unpause a container `POST /containers/(id)/unpause` Unpause the container `id` **Example request**: POST /containers/e90e34656806/unpause HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Attach to a container `POST /containers/(id)/attach` Attach to the container `id` **Example request**: POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/vnd.docker.raw-stream {{ STREAM }} Query Parameters: - **logs** – 1/True/true or 0/False/false, return logs. Default false - **stream** – 1/True/true or 0/False/false, return stream. Default false - **stdin** – 1/True/true or 0/False/false, if stream=true, attach to stdin. Default false - **stdout** – 1/True/true or 0/False/false, if logs=true, return stdout log, if stream=true, attach to stdout. Default false - **stderr** – 1/True/true or 0/False/false, if logs=true, return stderr log, if stream=true, attach to stderr. Default false Status Codes: - **200** – no error - **400** – bad parameter - **404** – no such container - **500** – server error **Stream details**: When using the TTY setting is enabled in [`POST /containers/create`](#create-a-container), the stream is the raw data from the process PTY and client's stdin. When the TTY is disabled, then the stream is multiplexed to separate stdout and stderr. The format is a **Header** and a **Payload** (frame). **HEADER** The header will contain the information on which stream write the stream (stdout or stderr). It also contain the size of the associated frame encoded on the last 4 bytes (uint32). It is encoded on the first 8 bytes like this: header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} `STREAM_TYPE` can be: - 0: stdin (will be written on stdout) - 1: stdout - 2: stderr `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of the uint32 size encoded as big endian. **PAYLOAD** The payload is the raw stream. **IMPLEMENTATION** The simplest way to implement the Attach protocol is the following: 1. Read 8 bytes 2. chose stdout or stderr depending on the first byte 3. Extract the frame size from the last 4 bytes 4. Read the extracted size and output it on the correct output 5. Goto 1 ### Attach to a container (websocket) `GET /containers/(id)/attach/ws` Attach to the container `id` via websocket Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) **Example request** GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 **Example response** {{ STREAM }} Query Parameters: - **logs** – 1/True/true or 0/False/false, return logs. Default false - **stream** – 1/True/true or 0/False/false, return stream. Default false - **stdin** – 1/True/true or 0/False/false, if stream=true, attach to stdin. Default false - **stdout** – 1/True/true or 0/False/false, if logs=true, return stdout log, if stream=true, attach to stdout. Default false - **stderr** – 1/True/true or 0/False/false, if logs=true, return stderr log, if stream=true, attach to stderr. Default false Status Codes: - **200** – no error - **400** – bad parameter - **404** – no such container - **500** – server error ### Wait a container `POST /containers/(id)/wait` Block until container `id` stops, then returns the exit code **Example request**: POST /containers/16253994b7c4/wait HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"StatusCode": 0} Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Remove a container `DELETE /containers/(id)` Remove the container `id` from the filesystem **Example request**: DELETE /containers/16253994b7c4?v=1 HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **v** – 1/True/true or 0/False/false, Remove the volumes associated to the container. Default false - **force** - 1/True/true or 0/False/false, Kill then remove the container. Default false Status Codes: - **204** – no error - **400** – bad parameter - **404** – no such container - **500** – server error ### Copy files or folders from a container `POST /containers/(id)/copy` Copy files or folders of container `id` **Example request**: POST /containers/4fa6e0f0c678/copy HTTP/1.1 Content-Type: application/json { "Resource": "test.txt" } **Example response**: HTTP/1.1 200 OK Content-Type: application/x-tar {{ TAR STREAM }} Status Codes: - **200** – no error - **404** – no such container - **500** – server error ## 2.2 Images ### List Images `GET /images/json` **Example request**: GET /images/json?all=0 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "RepoTags": [ "ubuntu:12.04", "ubuntu:precise", "ubuntu:latest" ], "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", "Created": 1365714795, "Size": 131506275, "VirtualSize": 131506275 }, { "RepoTags": [ "ubuntu:12.10", "ubuntu:quantal" ], "ParentId": "27cf784147099545", "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", "Created": 1364102658, "Size": 24653, "VirtualSize": 180116135 } ] Query Parameters: - **all** – 1/True/true or 0/False/false, default false - **filters** – a json encoded value of the filters (a map[string][]string) to process on the images list. Available filters: - dangling=true - **filter** - only return images with the specified name ### Create an image `POST /images/create` Create an image, either by pulling it from the registry or by importing it **Example request**: POST /images/create?fromImage=ubuntu HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"status": "Pulling..."} {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} {"error": "Invalid..."} ... When using this endpoint to pull an image from the registry, the `X-Registry-Auth` header can be used to include a base64-encoded AuthConfig object. Query Parameters: - **fromImage** – name of the image to pull - **fromSrc** – source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. - **repo** – repository - **tag** – tag - **registry** – the registry to pull from Request Headers: - **X-Registry-Auth** – base64-encoded AuthConfig object Status Codes: - **200** – no error - **500** – server error ### Inspect an image `GET /images/(name)/json` Return low-level information on the image `name` **Example request**: GET /images/ubuntu/json HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Created": "2013-03-23T22:24:18.818426-07:00", "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", "ContainerConfig": { "Hostname": "", "User": "", "Memory": 0, "MemorySwap": 0, "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "PortSpecs": null, "Tty": true, "OpenStdin": true, "StdinOnce": false, "Env": null, "Cmd": ["/bin/bash"], "Dns": null, "Image": "ubuntu", "Volumes": null, "VolumesFrom": "", "WorkingDir": "" }, "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", "Parent": "27cf784147099545", "Size": 6824592 } Status Codes: - **200** – no error - **404** – no such image - **500** – server error ### Get the history of an image `GET /images/(name)/history` Return the history of the image `name` **Example request**: GET /images/ubuntu/history HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "Id": "b750fe79269d", "Created": 1364102658, "CreatedBy": "/bin/bash" }, { "Id": "27cf78414709", "Created": 1364068391, "CreatedBy": "" } ] Status Codes: - **200** – no error - **404** – no such image - **500** – server error ### Push an image on the registry `POST /images/(name)/push` Push the image `name` on the registry **Example request**: POST /images/test/push HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"status": "Pushing..."} {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} {"error": "Invalid..."} ... If you wish to push an image on to a private registry, that image must already have been tagged into a repository which references that registry host name and port. This repository name should then be used in the URL. This mirrors the flow of the CLI. **Example request**: POST /images/registry.acme.com:5000/test/push HTTP/1.1 Query Parameters: - **tag** – the tag to associate with the image on the registry, optional Request Headers: - **X-Registry-Auth** – include a base64-encoded AuthConfig object. Status Codes: - **200** – no error - **404** – no such image - **500** – server error ### Tag an image into a repository `POST /images/(name)/tag` Tag the image `name` into a repository **Example request**: POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 **Example response**: HTTP/1.1 201 OK Query Parameters: - **repo** – The repository to tag in - **force** – 1/True/true or 0/False/false, default false - **tag** - The new tag name Status Codes: - **201** – no error - **400** – bad parameter - **404** – no such image - **409** – conflict - **500** – server error ### Remove an image `DELETE /images/(name)` Remove the image `name` from the filesystem **Example request**: DELETE /images/test HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-type: application/json [ {"Untagged": "3e2f21a89f"}, {"Deleted": "3e2f21a89f"}, {"Deleted": "53b4f83ac9"} ] Query Parameters: - **force** – 1/True/true or 0/False/false, default false - **noprune** – 1/True/true or 0/False/false, default false Status Codes: - **200** – no error - **404** – no such image - **409** – conflict - **500** – server error ### Search images `GET /images/search` Search for an image on [Docker Hub](https://hub.docker.com). > **Note**: > The response keys have changed from API v1.6 to reflect the JSON > sent by the registry server to the docker daemon's request. **Example request**: GET /images/search?term=sshd HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "description": "", "is_official": false, "is_automated": false, "name": "wma55/u1210sshd", "star_count": 0 }, { "description": "", "is_official": false, "is_automated": false, "name": "jdswinbank/sshd", "star_count": 0 }, { "description": "", "is_official": false, "is_automated": false, "name": "vgauthier/sshd", "star_count": 0 } ... ] Query Parameters: - **term** – term to search Status Codes: - **200** – no error - **500** – server error ## 2.3 Misc ### Build an image from Dockerfile via stdin `POST /build` Build an image from Dockerfile via stdin **Example request**: POST /build HTTP/1.1 {{ TAR STREAM }} **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"stream": "Step 1..."} {"stream": "..."} {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} The stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz. The archive must include a file called `Dockerfile` at its root. It may include any number of other files, which will be accessible in the build context (See the [*ADD build command*](../../reference/builder.md#dockerbuilder)). Query Parameters: - **t** – repository name (and optionally a tag) to be applied to the resulting image in case of success - **remote** – git or HTTP/HTTPS URI build source - **q** – suppress verbose build output - **nocache** – do not use the cache when building the image - **rm** - remove intermediate containers after a successful build (default behavior) - **forcerm** - always remove intermediate containers (includes rm) Request Headers: - **Content-type** – should be set to `"application/tar"`. - **X-Registry-Config** – base64-encoded ConfigFile object Status Codes: - **200** – no error - **500** – server error ### Check auth configuration `POST /auth` Get the default username and email **Example request**: POST /auth HTTP/1.1 Content-Type: application/json { "username":" hannibal", "password: "xxxx", "email": "hannibal@a-team.com", "serveraddress": "https://index.docker.io/v1/" } **Example response**: HTTP/1.1 200 OK Status Codes: - **200** – no error - **204** – no error - **500** – server error ### Display system-wide information `GET /info` Display system-wide information **Example request**: GET /info HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Containers": 11, "Images": 16, "Driver": "btrfs", "ExecutionDriver": "native-0.1", "KernelVersion": "3.12.0-1-amd64" "Debug": false, "NFd": 11, "NGoroutines": 21, "NEventsListener": 0, "InitPath": "/usr/bin/docker", "IndexServerAddress": ["https://index.docker.io/v1/"], "MemoryLimit": true, "SwapLimit": false, "IPv4Forwarding": true } Status Codes: - **200** – no error - **500** – server error ### Show the docker version information `GET /version` Show the docker version information **Example request**: GET /version HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "ApiVersion": "1.12", "Version": "0.2.2", "GitCommit": "5a2a5cc+CHANGES", "GoVersion": "go1.0.3" } Status Codes: - **200** – no error - **500** – server error ### Ping the docker server `GET /_ping` Ping the docker server **Example request**: GET /_ping HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: text/plain OK Status Codes: - **200** - no error - **500** - server error ### Create a new image from a container's changes `POST /commit` Create a new image from a container's changes **Example request**: POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 Content-Type: application/json { "Hostname": "", "Domainname": "", "User": "", "Memory": 0, "MemorySwap": 0, "CpuShares": 512, "Cpuset": "0,1", "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "PortSpecs": null, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": null, "Cmd": [ "date" ], "Volumes": { "/tmp": {} }, "WorkingDir": "", "NetworkDisabled": false, "ExposedPorts": { "22/tcp": {} } } **Example response**: HTTP/1.1 201 Created Content-Type: application/json {"Id": "596069db4bf5"} Json Parameters: - **config** - the container's configuration Query Parameters: - **container** – source container - **repo** – repository - **tag** – tag - **comment** – commit message - **author** – author (e.g., "John Hannibal Smith <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") Status Codes: - **201** – no error - **404** – no such container - **500** – server error ### Monitor Docker's events `GET /events` Get container events from docker, either in real time via streaming, or via polling (using since). Docker containers will report the following events: create, destroy, die, export, kill, pause, restart, start, stop, unpause and Docker images will report: untag, delete **Example request**: GET /events?since=1374067924 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} Query Parameters: - **since** – timestamp used for polling - **until** – timestamp used for polling Status Codes: - **200** – no error - **500** – server error ### Get a tarball containing all images in a repository `GET /images/(name)/get` Get a tarball containing all images and metadata for the repository specified by `name`. If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the 'repositories' file in the tarball, as there were no image names referenced. See the [image tarball format](#image-tarball-format) for more details. **Example request** GET /images/ubuntu/get **Example response**: HTTP/1.1 200 OK Content-Type: application/x-tar Binary data stream Status Codes: - **200** – no error - **500** – server error ### Get a tarball containing all images. `GET /images/get` Get a tarball containing all images and metadata for one or more repositories. For each value of the `names` parameter: if it is a specific name and tag (e.g. ubuntu:latest), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. See the [image tarball format](#image-tarball-format) for more details. **Example request** GET /images/get?names=myname%2Fmyapp%3Alatest&names=busybox **Example response**: HTTP/1.1 200 OK Content-Type: application/x-tar Binary data stream Status Codes: - **200** – no error - **500** – server error ### Load a tarball with a set of images and tags into docker `POST /images/load` Load a set of images and tags into the docker repository. See the [image tarball format](#image-tarball-format) for more details. **Example request** POST /images/load Tarball in body **Example response**: HTTP/1.1 200 OK Status Codes: - **200** – no error - **500** – server error ### Image tarball format An image tarball contains one directory per image layer (named using its long ID), each containing three files: 1. `VERSION`: currently `1.0` - the file format version 2. `json`: detailed layer information, similar to `docker inspect layer_id` 3. `layer.tar`: A tarfile containing the filesystem changes in this layer The `layer.tar` file will contain `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. If the tarball defines a repository, there will also be a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. ``` {"hello-world": {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} } ``` ### Exec Create `POST /containers/(id)/exec` Sets up an exec instance in a running container `id` **Example request**: POST /containers/e90e34656806/exec HTTP/1.1 Content-Type: application/json { "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "Tty": false, "Cmd": [ "date" ], } **Example response**: HTTP/1.1 201 OK Content-Type: application/json { "Id": "f90e34656806" } Json Parameters: - **AttachStdin** - Boolean value, attaches to stdin of the exec command. - **AttachStdout** - Boolean value, attaches to stdout of the exec command. - **AttachStderr** - Boolean value, attaches to stderr of the exec command. - **Tty** - Boolean value to allocate a pseudo-TTY - **Cmd** - Command to run specified as a string or an array of strings. Status Codes: - **201** – no error - **404** – no such container ### Exec Start `POST /exec/(id)/start` Starts a previously set up exec instance `id`. If `detach` is true, this API returns after starting the `exec` command. Otherwise, this API sets up an interactive session with the `exec` command. **Example request**: POST /exec/e90e34656806/start HTTP/1.1 Content-Type: application/json { "Detach": false, "Tty": false, } **Example response**: HTTP/1.1 201 OK Content-Type: application/json {{ STREAM }} Json Parameters: - **Detach** - Detach from the exec command - **Tty** - Boolean value to allocate a pseudo-TTY Status Codes: - **200** – no error - **404** – no such exec instance **Stream details**: Similar to the stream behavior of `POST /container/(id)/attach` API ### Exec Resize `POST /exec/(id)/resize` Resizes the tty session used by the exec command `id`. This API is valid only if `tty` was specified as part of creating and starting the exec command. **Example request**: POST /exec/e90e34656806/resize HTTP/1.1 Content-Type: plain/text **Example response**: HTTP/1.1 201 OK Content-Type: plain/text Query Parameters: - **h** – height of tty session - **w** – width Status Codes: - **201** – no error - **404** – no such exec instance # 3. Going further ## 3.1 Inside `docker run` As an example, the `docker run` command line makes the following API calls: - Create the container - If the status code is 404, it means the image doesn't exist: - Try to pull it - Then retry to create the container - Start the container - If you are not in detached mode: - Attach to the container, using logs=1 (to have stdout and stderr from the container's start) and stream=1 - If in detached mode or only stdin is attached: - Display the container's id ## 3.2 Hijacking In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. ## 3.3 CORS Requests To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode. $ docker -d -H="192.168.1.9:2375" --api-enable-cors docker-1.10.3/docs/reference/api/docker_remote_api_v1.16.md000066400000000000000000001410731267010174400233420ustar00rootroot00000000000000 # Docker Remote API v1.16 ## 1. Brief introduction - The Remote API has replaced `rcli`. - The daemon listens on `unix:///var/run/docker.sock` but you can [Bind Docker to another host/port or a Unix socket](../../quickstart.md#bind-docker-to-another-host-port-or-a-unix-socket). - The API tends to be REST, but for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `STDOUT`, `STDIN` and `STDERR`. # 2. Endpoints ## 2.1 Containers ### List containers `GET /containers/json` List containers **Example request**: GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "Id": "8dfafdbc3a40", "Names":["/boring_feynman"], "Image": "ubuntu:latest", "Command": "echo 1", "Created": 1367854155, "Status": "Exit 0", "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], "SizeRw": 12288, "SizeRootFs": 0 }, { "Id": "9cd87474be90", "Names":["/coolName"], "Image": "ubuntu:latest", "Command": "echo 222222", "Created": 1367854155, "Status": "Exit 0", "Ports": [], "SizeRw": 12288, "SizeRootFs": 0 }, { "Id": "3176a2479c92", "Names":["/sleep_dog"], "Image": "ubuntu:latest", "Command": "echo 3333333333333333", "Created": 1367854154, "Status": "Exit 0", "Ports":[], "SizeRw":12288, "SizeRootFs":0 }, { "Id": "4cb07b47f9fb", "Names":["/running_cat"], "Image": "ubuntu:latest", "Command": "echo 444444444444444444444444444444444", "Created": 1367854152, "Status": "Exit 0", "Ports": [], "SizeRw": 12288, "SizeRootFs": 0 } ] Query Parameters: - **all** – 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default (i.e., this defaults to false) - **limit** – Show `limit` last created containers, include non-running ones. - **since** – Show only containers created since Id, include non-running ones. - **before** – Show only containers created before Id, include non-running ones. - **size** – 1/True/true or 0/False/false, Show the containers sizes - **filters** - a json encoded value of the filters (a map[string][]string) to process on the containers list. Available filters: - exited=<int> -- containers with exit code of <int> - status=(restarting|running|paused|exited) Status Codes: - **200** – no error - **400** – bad parameter - **500** – server error ### Create a container `POST /containers/create` Create a container **Example request**: POST /containers/create HTTP/1.1 Content-Type: application/json { "Hostname": "", "Domainname": "", "User": "", "Memory": 0, "MemorySwap": 0, "CpuShares": 512, "Cpuset": "0,1", "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": [ "FOO=bar", "BAZ=quux" ], "Cmd": [ "date" ], "Entrypoint": "", "Image": "ubuntu", "Volumes": { "/tmp": {} }, "WorkingDir": "", "NetworkDisabled": false, "MacAddress": "12:34:56:78:9a:bc", "ExposedPorts": { "22/tcp": {} }, "SecurityOpts": [""], "HostConfig": { "Binds": ["/tmp:/tmp"], "Links": ["redis3:redis"], "LxcConf": {"lxc.utsname":"docker"}, "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, "PublishAllPorts": false, "Privileged": false, "Dns": ["8.8.8.8"], "DnsSearch": [""], "ExtraHosts": null, "VolumesFrom": ["parent", "other:ro"], "CapAdd": ["NET_ADMIN"], "CapDrop": ["MKNOD"], "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, "NetworkMode": "bridge", "Devices": [] } } **Example response**: HTTP/1.1 201 Created Content-Type: application/json { "Id":"e90e34656806" "Warnings":[] } Json Parameters: - **Hostname** - A string value containing the desired hostname to use for the container. - **Domainname** - A string value containing the desired domain name to use for the container. - **User** - A string value containing the user to use inside the container. - **Memory** - Memory limit in bytes. - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. - **CpuShares** - An integer value containing the CPU Shares for container (ie. the relative weight vs other containers). **CpuSet** - String value containing the cgroups Cpuset to use. - **AttachStdin** - Boolean value, attaches to stdin. - **AttachStdout** - Boolean value, attaches to stdout. - **AttachStderr** - Boolean value, attaches to stderr. - **Tty** - Boolean value, Attach standard streams to a tty, including stdin if it is not closed. - **OpenStdin** - Boolean value, opens stdin, - **StdinOnce** - Boolean value, close stdin after the 1 attached client disconnects. - **Env** - A list of environment variables in the form of `["VAR=value"[,"VAR2=value2"]]` - **Cmd** - Command to run specified as a string or an array of strings. - **Entrypoint** - Set the entrypoint for the container a string or an array of strings - **Image** - String value containing the image name to use for the container - **Volumes** – An object mapping mountpoint paths (strings) inside the container to empty objects. - **WorkingDir** - A string value containing the working dir for commands to run in. - **NetworkDisabled** - Boolean value, when true disables networking for the container - **ExposedPorts** - An object mapping ports to an empty object in the form of: `"ExposedPorts": { "/: {}" }` - **SecurityOpts**: A list of string values to customize labels for MLS systems, such as SELinux. - **HostConfig** - **Binds** – A list of volume bindings for this container. Each volume binding is a string of the form `container_path` (to create a new volume for the container), `host_path:container_path` (to bind-mount a host path into the container), or `host_path:container_path:ro` (to make the bind-mount read-only inside the container). - **Links** - A list of links for the container. Each link entry should be in the form of "container_name:alias". - **LxcConf** - LXC specific configurations. These configurations will only work when using the `lxc` execution driver. - **PortBindings** - A map of exposed container ports and the host port they should map to. It should be specified in the form `{ /: [{ "HostPort": "" }] }` Take note that `port` is specified as a string and not an integer value. - **PublishAllPorts** - Allocates a random host port for all of a container's exposed ports. Specified as a boolean value. - **Privileged** - Gives the container full access to the host. Specified as a boolean value. - **Dns** - A list of dns servers for the container to use. - **DnsSearch** - A list of DNS search domains - **ExtraHosts** - A list of hostnames/IP mappings to be added to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. - **VolumesFrom** - A list of volumes to inherit from another container. Specified in the form `[:]` - **CapAdd** - A list of kernel capabilities to add to the container. - **Capdrop** - A list of kernel capabilities to drop from the container. - **RestartPolicy** – The behavior to apply when the container exits. The value is an object with a `Name` property of either `"always"` to always restart or `"on-failure"` to restart only when the container exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` controls the number of times to retry before giving up. The default is not to restart. (optional) An ever increasing delay (double the previous delay, starting at 100mS) is added before each restart to prevent flooding the server. - **NetworkMode** - Sets the networking mode for the container. Supported values are: `bridge`, `host`, and `container:` - **Devices** - A list of devices to add to the container specified in the form `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` Query Parameters: - **name** – Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`. Status Codes: - **201** – no error - **404** – no such container - **406** – impossible to attach (container not running) - **500** – server error ### Inspect a container `GET /containers/(id)/json` Return low-level information on the container `id` **Example request**: GET /containers/4fa6e0f0c678/json HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", "Created": "2013-05-07T14:51:42.041847+02:00", "Path": "date", "Args": [], "Config": { "Hostname": "4fa6e0f0c678", "User": "", "Memory": 0, "MemorySwap": 0, "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "PortSpecs": null, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": null, "Cmd": [ "date" ], "Dns": null, "Image": "ubuntu", "Volumes": {}, "VolumesFrom": "", "WorkingDir": "" }, "State": { "Running": false, "Pid": 0, "ExitCode": 0, "StartedAt": "2013-05-07T14:51:42.087658+02:01360", "Ghost": false }, "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", "NetworkSettings": { "IpAddress": "", "IpPrefixLen": 0, "Gateway": "", "Bridge": "", "PortMapping": null }, "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", "ResolvConfPath": "/etc/resolv.conf", "Volumes": {}, "HostConfig": { "Binds": null, "ContainerIDFile": "", "LxcConf": [], "Privileged": false, "PortBindings": { "80/tcp": [ { "HostIp": "0.0.0.0", "HostPort": "49153" } ] }, "Links": ["/name:alias"], "PublishAllPorts": false, "CapAdd": ["NET_ADMIN"], "CapDrop": ["MKNOD"] } } Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### List processes running inside a container `GET /containers/(id)/top` List processes running inside the container `id`. On Unix systems this is done by running the `ps` command. This endpoint is not supported on Windows. **Example request**: GET /containers/4fa6e0f0c678/top HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Titles" : [ "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" ], "Processes" : [ [ "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" ], [ "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" ] ] } **Example request**: GET /containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Titles" : [ "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" ] "Processes" : [ [ "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" ], [ "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" ] ], } Query Parameters: - **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Get container logs `GET /containers/(id)/logs` Get stdout and stderr logs from the container ``id`` **Example request**: GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/vnd.docker.raw-stream {{ STREAM }} Query Parameters: - **follow** – 1/True/true or 0/False/false, return stream. Default false - **stdout** – 1/True/true or 0/False/false, show stdout log. Default false - **stderr** – 1/True/true or 0/False/false, show stderr log. Default false - **timestamps** – 1/True/true or 0/False/false, print timestamps for every log line. Default false - **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Inspect changes on a container's filesystem `GET /containers/(id)/changes` Inspect changes on container `id`'s filesystem **Example request**: GET /containers/4fa6e0f0c678/changes HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "Path": "/dev", "Kind": 0 }, { "Path": "/dev/kmsg", "Kind": 1 }, { "Path": "/test", "Kind": 1 } ] Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Export a container `GET /containers/(id)/export` Export the contents of container `id` **Example request**: GET /containers/4fa6e0f0c678/export HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/octet-stream {{ TAR STREAM }} Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Resize a container TTY `POST /containers/(id)/resize?h=&w=` Resize the TTY for container with `id`. The container must be restarted for the resize to take effect. **Example request**: POST /containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Length: 0 Content-Type: text/plain; charset=utf-8 Status Codes: - **200** – no error - **404** – No such container - **500** – Cannot resize container ### Start a container `POST /containers/(id)/start` Start the container `id` > **Note**: > For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. > See [create a container](#create-a-container) for details. **Example request**: POST /containers/(id)/start HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Status Codes: - **204** – no error - **304** – container already started - **404** – no such container - **500** – server error ### Stop a container `POST /containers/(id)/stop` Stop the container `id` **Example request**: POST /containers/e90e34656806/stop?t=5 HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **t** – number of seconds to wait before killing the container Status Codes: - **204** – no error - **304** – container already stopped - **404** – no such container - **500** – server error ### Restart a container `POST /containers/(id)/restart` Restart the container `id` **Example request**: POST /containers/e90e34656806/restart?t=5 HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **t** – number of seconds to wait before killing the container Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Kill a container `POST /containers/(id)/kill` Kill the container `id` **Example request**: POST /containers/e90e34656806/kill HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters - **signal** - Signal to send to the container: integer or string like "SIGINT". When not set, SIGKILL is assumed and the call will waits for the container to exit. Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Pause a container `POST /containers/(id)/pause` Pause the container `id` **Example request**: POST /containers/e90e34656806/pause HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Unpause a container `POST /containers/(id)/unpause` Unpause the container `id` **Example request**: POST /containers/e90e34656806/unpause HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Attach to a container `POST /containers/(id)/attach` Attach to the container `id` **Example request**: POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/vnd.docker.raw-stream {{ STREAM }} Query Parameters: - **logs** – 1/True/true or 0/False/false, return logs. Default false - **stream** – 1/True/true or 0/False/false, return stream. Default false - **stdin** – 1/True/true or 0/False/false, if stream=true, attach to stdin. Default false - **stdout** – 1/True/true or 0/False/false, if logs=true, return stdout log, if stream=true, attach to stdout. Default false - **stderr** – 1/True/true or 0/False/false, if logs=true, return stderr log, if stream=true, attach to stderr. Default false Status Codes: - **200** – no error - **400** – bad parameter - **404** – no such container - **500** – server error **Stream details**: When using the TTY setting is enabled in [`POST /containers/create` ](#create-a-container), the stream is the raw data from the process PTY and client's stdin. When the TTY is disabled, then the stream is multiplexed to separate stdout and stderr. The format is a **Header** and a **Payload** (frame). **HEADER** The header will contain the information on which stream write the stream (stdout or stderr). It also contain the size of the associated frame encoded on the last 4 bytes (uint32). It is encoded on the first 8 bytes like this: header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} `STREAM_TYPE` can be: - 0: stdin (will be written on stdout) - 1: stdout - 2: stderr `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of the uint32 size encoded as big endian. **PAYLOAD** The payload is the raw stream. **IMPLEMENTATION** The simplest way to implement the Attach protocol is the following: 1. Read 8 bytes 2. chose stdout or stderr depending on the first byte 3. Extract the frame size from the last 4 bytes 4. Read the extracted size and output it on the correct output 5. Goto 1 ### Attach to a container (websocket) `GET /containers/(id)/attach/ws` Attach to the container `id` via websocket Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) **Example request** GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 **Example response** {{ STREAM }} Query Parameters: - **logs** – 1/True/true or 0/False/false, return logs. Default false - **stream** – 1/True/true or 0/False/false, return stream. Default false - **stdin** – 1/True/true or 0/False/false, if stream=true, attach to stdin. Default false - **stdout** – 1/True/true or 0/False/false, if logs=true, return stdout log, if stream=true, attach to stdout. Default false - **stderr** – 1/True/true or 0/False/false, if logs=true, return stderr log, if stream=true, attach to stderr. Default false Status Codes: - **200** – no error - **400** – bad parameter - **404** – no such container - **500** – server error ### Wait a container `POST /containers/(id)/wait` Block until container `id` stops, then returns the exit code **Example request**: POST /containers/16253994b7c4/wait HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"StatusCode": 0} Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Remove a container `DELETE /containers/(id)` Remove the container `id` from the filesystem **Example request**: DELETE /containers/16253994b7c4?v=1 HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **v** – 1/True/true or 0/False/false, Remove the volumes associated to the container. Default false - **force** - 1/True/true or 0/False/false, Kill then remove the container. Default false Status Codes: - **204** – no error - **400** – bad parameter - **404** – no such container - **500** – server error ### Copy files or folders from a container `POST /containers/(id)/copy` Copy files or folders of container `id` **Example request**: POST /containers/4fa6e0f0c678/copy HTTP/1.1 Content-Type: application/json { "Resource": "test.txt" } **Example response**: HTTP/1.1 200 OK Content-Type: application/x-tar {{ TAR STREAM }} Status Codes: - **200** – no error - **404** – no such container - **500** – server error ## 2.2 Images ### List Images `GET /images/json` **Example request**: GET /images/json?all=0 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "RepoTags": [ "ubuntu:12.04", "ubuntu:precise", "ubuntu:latest" ], "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", "Created": 1365714795, "Size": 131506275, "VirtualSize": 131506275 }, { "RepoTags": [ "ubuntu:12.10", "ubuntu:quantal" ], "ParentId": "27cf784147099545", "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", "Created": 1364102658, "Size": 24653, "VirtualSize": 180116135 } ] Query Parameters: - **all** – 1/True/true or 0/False/false, default false - **filters** – a json encoded value of the filters (a map[string][]string) to process on the images list. Available filters: - dangling=true - **filter** - only return images with the specified name ### Create an image `POST /images/create` Create an image, either by pulling it from the registry or by importing it **Example request**: POST /images/create?fromImage=ubuntu HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"status": "Pulling..."} {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} {"error": "Invalid..."} ... When using this endpoint to pull an image from the registry, the `X-Registry-Auth` header can be used to include a base64-encoded AuthConfig object. Query Parameters: - **fromImage** – name of the image to pull - **fromSrc** – source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. - **repo** – repository - **tag** – tag - **registry** – the registry to pull from Request Headers: - **X-Registry-Auth** – base64-encoded AuthConfig object Status Codes: - **200** – no error - **500** – server error ### Inspect an image `GET /images/(name)/json` Return low-level information on the image `name` **Example request**: GET /images/ubuntu/json HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Created": "2013-03-23T22:24:18.818426-07:00", "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", "ContainerConfig": { "Hostname": "", "User": "", "Memory": 0, "MemorySwap": 0, "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "PortSpecs": null, "Tty": true, "OpenStdin": true, "StdinOnce": false, "Env": null, "Cmd": ["/bin/bash"], "Dns": null, "Image": "ubuntu", "Volumes": null, "VolumesFrom": "", "WorkingDir": "" }, "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", "Parent": "27cf784147099545", "Size": 6824592 } Status Codes: - **200** – no error - **404** – no such image - **500** – server error ### Get the history of an image `GET /images/(name)/history` Return the history of the image `name` **Example request**: GET /images/ubuntu/history HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "Id": "b750fe79269d", "Created": 1364102658, "CreatedBy": "/bin/bash" }, { "Id": "27cf78414709", "Created": 1364068391, "CreatedBy": "" } ] Status Codes: - **200** – no error - **404** – no such image - **500** – server error ### Push an image on the registry `POST /images/(name)/push` Push the image `name` on the registry **Example request**: POST /images/test/push HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"status": "Pushing..."} {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} {"error": "Invalid..."} ... If you wish to push an image on to a private registry, that image must already have been tagged into a repository which references that registry host name and port. This repository name should then be used in the URL. This mirrors the flow of the CLI. **Example request**: POST /images/registry.acme.com:5000/test/push HTTP/1.1 Query Parameters: - **tag** – the tag to associate with the image on the registry, optional Request Headers: - **X-Registry-Auth** – include a base64-encoded AuthConfig object. Status Codes: - **200** – no error - **404** – no such image - **500** – server error ### Tag an image into a repository `POST /images/(name)/tag` Tag the image `name` into a repository **Example request**: POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 **Example response**: HTTP/1.1 201 OK Query Parameters: - **repo** – The repository to tag in - **force** – 1/True/true or 0/False/false, default false - **tag** - The new tag name Status Codes: - **201** – no error - **400** – bad parameter - **404** – no such image - **409** – conflict - **500** – server error ### Remove an image `DELETE /images/(name)` Remove the image `name` from the filesystem **Example request**: DELETE /images/test HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-type: application/json [ {"Untagged": "3e2f21a89f"}, {"Deleted": "3e2f21a89f"}, {"Deleted": "53b4f83ac9"} ] Query Parameters: - **force** – 1/True/true or 0/False/false, default false - **noprune** – 1/True/true or 0/False/false, default false Status Codes: - **200** – no error - **404** – no such image - **409** – conflict - **500** – server error ### Search images `GET /images/search` Search for an image on [Docker Hub](https://hub.docker.com). > **Note**: > The response keys have changed from API v1.6 to reflect the JSON > sent by the registry server to the docker daemon's request. **Example request**: GET /images/search?term=sshd HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "description": "", "is_official": false, "is_automated": false, "name": "wma55/u1210sshd", "star_count": 0 }, { "description": "", "is_official": false, "is_automated": false, "name": "jdswinbank/sshd", "star_count": 0 }, { "description": "", "is_official": false, "is_automated": false, "name": "vgauthier/sshd", "star_count": 0 } ... ] Query Parameters: - **term** – term to search Status Codes: - **200** – no error - **500** – server error ## 2.3 Misc ### Build an image from Dockerfile via stdin `POST /build` Build an image from Dockerfile via stdin **Example request**: POST /build HTTP/1.1 {{ TAR STREAM }} **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"stream": "Step 1..."} {"stream": "..."} {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} The stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz. The archive must include a file called `Dockerfile` at its root. It may include any number of other files, which will be accessible in the build context (See the [*ADD build command*](../../reference/builder.md#dockerbuilder)). Query Parameters: - **t** – repository name (and optionally a tag) to be applied to the resulting image in case of success - **remote** – git or HTTP/HTTPS URI build source - **q** – suppress verbose build output - **nocache** – do not use the cache when building the image - **pull** - attempt to pull the image even if an older image exists locally - **rm** - remove intermediate containers after a successful build (default behavior) - **forcerm** - always remove intermediate containers (includes rm) Request Headers: - **Content-type** – should be set to `"application/tar"`. - **X-Registry-Config** – base64-encoded ConfigFile object Status Codes: - **200** – no error - **500** – server error ### Check auth configuration `POST /auth` Get the default username and email **Example request**: POST /auth HTTP/1.1 Content-Type: application/json { "username":" hannibal", "password: "xxxx", "email": "hannibal@a-team.com", "serveraddress": "https://index.docker.io/v1/" } **Example response**: HTTP/1.1 200 OK Status Codes: - **200** – no error - **204** – no error - **500** – server error ### Display system-wide information `GET /info` Display system-wide information **Example request**: GET /info HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Containers":11, "Images":16, "Driver":"btrfs", "DriverStatus": [[""]], "ExecutionDriver":"native-0.1", "KernelVersion":"3.12.0-1-amd64" "NCPU":1, "MemTotal":2099236864, "Name":"prod-server-42", "ID":"7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", "Debug":false, "NFd": 11, "NGoroutines":21, "NEventsListener":0, "InitPath":"/usr/bin/docker", "InitSha1":"", "IndexServerAddress":["https://index.docker.io/v1/"], "MemoryLimit":true, "SwapLimit":false, "IPv4Forwarding":true, "Labels":["storage=ssd"], "DockerRootDir": "/var/lib/docker", "OperatingSystem": "Boot2Docker", } Status Codes: - **200** – no error - **500** – server error ### Show the docker version information `GET /version` Show the docker version information **Example request**: GET /version HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "ApiVersion": "1.12", "Version": "0.2.2", "GitCommit": "5a2a5cc+CHANGES", "GoVersion": "go1.0.3" } Status Codes: - **200** – no error - **500** – server error ### Ping the docker server `GET /_ping` Ping the docker server **Example request**: GET /_ping HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: text/plain OK Status Codes: - **200** - no error - **500** - server error ### Create a new image from a container's changes `POST /commit` Create a new image from a container's changes **Example request**: POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 Content-Type: application/json { "Hostname": "", "Domainname": "", "User": "", "Memory": 0, "MemorySwap": 0, "CpuShares": 512, "Cpuset": "0,1", "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "PortSpecs": null, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": null, "Cmd": [ "date" ], "Volumes": { "/tmp": {} }, "WorkingDir": "", "NetworkDisabled": false, "ExposedPorts": { "22/tcp": {} } } **Example response**: HTTP/1.1 201 Created Content-Type: application/json {"Id": "596069db4bf5"} Json Parameters: - **config** - the container's configuration Query Parameters: - **container** – source container - **repo** – repository - **tag** – tag - **comment** – commit message - **author** – author (e.g., "John Hannibal Smith <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") Status Codes: - **201** – no error - **404** – no such container - **500** – server error ### Monitor Docker's events `GET /events` Get container events from docker, either in real time via streaming, or via polling (using since). Docker containers will report the following events: create, destroy, die, export, kill, pause, restart, start, stop, unpause and Docker images will report: untag, delete **Example request**: GET /events?since=1374067924 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} Query Parameters: - **since** – timestamp used for polling - **until** – timestamp used for polling - **filters** – a json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: - event=<string> -- event to filter - image=<string> -- image to filter - container=<string> -- container to filter Status Codes: - **200** – no error - **500** – server error ### Get a tarball containing all images in a repository `GET /images/(name)/get` Get a tarball containing all images and metadata for the repository specified by `name`. If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the 'repositories' file in the tarball, as there were no image names referenced. See the [image tarball format](#image-tarball-format) for more details. **Example request** GET /images/ubuntu/get **Example response**: HTTP/1.1 200 OK Content-Type: application/x-tar Binary data stream Status Codes: - **200** – no error - **500** – server error ### Get a tarball containing all images. `GET /images/get` Get a tarball containing all images and metadata for one or more repositories. For each value of the `names` parameter: if it is a specific name and tag (e.g. ubuntu:latest), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. See the [image tarball format](#image-tarball-format) for more details. **Example request** GET /images/get?names=myname%2Fmyapp%3Alatest&names=busybox **Example response**: HTTP/1.1 200 OK Content-Type: application/x-tar Binary data stream Status Codes: - **200** – no error - **500** – server error ### Load a tarball with a set of images and tags into docker `POST /images/load` Load a set of images and tags into the docker repository. See the [image tarball format](#image-tarball-format) for more details. **Example request** POST /images/load Tarball in body **Example response**: HTTP/1.1 200 OK Status Codes: - **200** – no error - **500** – server error ### Image tarball format An image tarball contains one directory per image layer (named using its long ID), each containing three files: 1. `VERSION`: currently `1.0` - the file format version 2. `json`: detailed layer information, similar to `docker inspect layer_id` 3. `layer.tar`: A tarfile containing the filesystem changes in this layer The `layer.tar` file will contain `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. If the tarball defines a repository, there will also be a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. ``` {"hello-world": {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} } ``` ### Exec Create `POST /containers/(id)/exec` Sets up an exec instance in a running container `id` **Example request**: POST /containers/e90e34656806/exec HTTP/1.1 Content-Type: application/json { "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "Tty": false, "Cmd": [ "date" ], } **Example response**: HTTP/1.1 201 OK Content-Type: application/json { "Id": "f90e34656806" } Json Parameters: - **AttachStdin** - Boolean value, attaches to stdin of the exec command. - **AttachStdout** - Boolean value, attaches to stdout of the exec command. - **AttachStderr** - Boolean value, attaches to stderr of the exec command. - **Tty** - Boolean value to allocate a pseudo-TTY - **Cmd** - Command to run specified as a string or an array of strings. Status Codes: - **201** – no error - **404** – no such container ### Exec Start `POST /exec/(id)/start` Starts a previously set up exec instance `id`. If `detach` is true, this API returns after starting the `exec` command. Otherwise, this API sets up an interactive session with the `exec` command. **Example request**: POST /exec/e90e34656806/start HTTP/1.1 Content-Type: application/json { "Detach": false, "Tty": false, } **Example response**: HTTP/1.1 201 OK Content-Type: application/json {{ STREAM }} Json Parameters: - **Detach** - Detach from the exec command - **Tty** - Boolean value to allocate a pseudo-TTY Status Codes: - **200** – no error - **404** – no such exec instance **Stream details**: Similar to the stream behavior of `POST /container/(id)/attach` API ### Exec Resize `POST /exec/(id)/resize` Resizes the tty session used by the exec command `id`. This API is valid only if `tty` was specified as part of creating and starting the exec command. **Example request**: POST /exec/e90e34656806/resize HTTP/1.1 Content-Type: plain/text **Example response**: HTTP/1.1 201 OK Content-Type: plain/text Query Parameters: - **h** – height of tty session - **w** – width Status Codes: - **201** – no error - **404** – no such exec instance ### Exec Inspect `GET /exec/(id)/json` Return low-level information about the exec command `id`. **Example request**: GET /exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: plain/text { "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", "Running" : false, "ExitCode" : 2, "ProcessConfig" : { "privileged" : false, "user" : "", "tty" : false, "entrypoint" : "sh", "arguments" : [ "-c", "exit 2" ] }, "OpenStdin" : false, "OpenStderr" : false, "OpenStdout" : false, "Container" : { "State" : { "Running" : true, "Paused" : false, "Restarting" : false, "OOMKilled" : false, "Pid" : 3650, "ExitCode" : 0, "Error" : "", "StartedAt" : "2014-11-17T22:26:03.717657531Z", "FinishedAt" : "0001-01-01T00:00:00Z" }, "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", "Created" : "2014-11-17T22:26:03.626304998Z", "Path" : "date", "Args" : [], "Config" : { "Hostname" : "8f177a186b97", "Domainname" : "", "User" : "", "Memory" : 0, "MemorySwap" : 0, "CpuShares" : 0, "Cpuset" : "", "AttachStdin" : false, "AttachStdout" : false, "AttachStderr" : false, "PortSpecs" : null, "ExposedPorts" : null, "Tty" : false, "OpenStdin" : false, "StdinOnce" : false, "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd" : [ "date" ], "Image" : "ubuntu", "Volumes" : null, "WorkingDir" : "", "Entrypoint" : null, "NetworkDisabled" : false, "MacAddress" : "", "OnBuild" : null, "SecurityOpt" : null }, "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", "NetworkSettings" : { "IPAddress" : "172.17.0.2", "IPPrefixLen" : 16, "MacAddress" : "02:42:ac:11:00:02", "Gateway" : "172.17.42.1", "Bridge" : "docker0", "PortMapping" : null, "Ports" : {} }, "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", "Name" : "/test", "Driver" : "aufs", "ExecDriver" : "native-0.2", "MountLabel" : "", "ProcessLabel" : "", "AppArmorProfile" : "", "RestartCount" : 0, "Volumes" : {}, "VolumesRW" : {} } } Status Codes: - **200** – no error - **404** – no such exec instance - **500** - server error # 3. Going further ## 3.1 Inside `docker run` As an example, the `docker run` command line makes the following API calls: - Create the container - If the status code is 404, it means the image doesn't exist: - Try to pull it - Then retry to create the container - Start the container - If you are not in detached mode: - Attach to the container, using logs=1 (to have stdout and stderr from the container's start) and stream=1 - If in detached mode or only stdin is attached: - Display the container's id ## 3.2 Hijacking In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. ## 3.3 CORS Requests To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode. $ docker -d -H="192.168.1.9:2375" --api-enable-cors docker-1.10.3/docs/reference/api/docker_remote_api_v1.17.md000066400000000000000000001504151267010174400233430ustar00rootroot00000000000000 # Docker Remote API v1.17 ## 1. Brief introduction - The Remote API has replaced `rcli`. - The daemon listens on `unix:///var/run/docker.sock` but you can [Bind Docker to another host/port or a Unix socket](../../quickstart.md#bind-docker-to-another-host-port-or-a-unix-socket). - The API tends to be REST, but for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `STDOUT`, `STDIN` and `STDERR`. # 2. Endpoints ## 2.1 Containers ### List containers `GET /containers/json` List containers **Example request**: GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "Id": "8dfafdbc3a40", "Names":["/boring_feynman"], "Image": "ubuntu:latest", "Command": "echo 1", "Created": 1367854155, "Status": "Exit 0", "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], "SizeRw": 12288, "SizeRootFs": 0 }, { "Id": "9cd87474be90", "Names":["/coolName"], "Image": "ubuntu:latest", "Command": "echo 222222", "Created": 1367854155, "Status": "Exit 0", "Ports": [], "SizeRw": 12288, "SizeRootFs": 0 }, { "Id": "3176a2479c92", "Names":["/sleepy_dog"], "Image": "ubuntu:latest", "Command": "echo 3333333333333333", "Created": 1367854154, "Status": "Exit 0", "Ports":[], "SizeRw":12288, "SizeRootFs":0 }, { "Id": "4cb07b47f9fb", "Names":["/running_cat"], "Image": "ubuntu:latest", "Command": "echo 444444444444444444444444444444444", "Created": 1367854152, "Status": "Exit 0", "Ports": [], "SizeRw": 12288, "SizeRootFs": 0 } ] Query Parameters: - **all** – 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default (i.e., this defaults to false) - **limit** – Show `limit` last created containers, include non-running ones. - **since** – Show only containers created since Id, include non-running ones. - **before** – Show only containers created before Id, include non-running ones. - **size** – 1/True/true or 0/False/false, Show the containers sizes - **filters** - a json encoded value of the filters (a map[string][]string) to process on the containers list. Available filters: - exited=<int> -- containers with exit code of <int> - status=(restarting|running|paused|exited) Status Codes: - **200** – no error - **400** – bad parameter - **500** – server error ### Create a container `POST /containers/create` Create a container **Example request**: POST /containers/create HTTP/1.1 Content-Type: application/json { "Hostname": "", "Domainname": "", "User": "", "Memory": 0, "MemorySwap": 0, "CpuShares": 512, "Cpuset": "0,1", "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": [ "FOO=bar", "BAZ=quux" ], "Cmd": [ "date" ], "Entrypoint": "", "Image": "ubuntu", "Volumes": { "/tmp": {} }, "WorkingDir": "", "NetworkDisabled": false, "MacAddress": "12:34:56:78:9a:bc", "ExposedPorts": { "22/tcp": {} }, "HostConfig": { "Binds": ["/tmp:/tmp"], "Links": ["redis3:redis"], "LxcConf": {"lxc.utsname":"docker"}, "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, "PublishAllPorts": false, "Privileged": false, "ReadonlyRootfs": false, "Dns": ["8.8.8.8"], "DnsSearch": [""], "ExtraHosts": null, "VolumesFrom": ["parent", "other:ro"], "CapAdd": ["NET_ADMIN"], "CapDrop": ["MKNOD"], "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, "NetworkMode": "bridge", "Devices": [], "SecurityOpt": [""] } } **Example response**: HTTP/1.1 201 Created Content-Type: application/json { "Id":"e90e34656806", "Warnings":[] } Json Parameters: - **Hostname** - A string value containing the desired hostname to use for the container. - **Domainname** - A string value containing the desired domain name to use for the container. - **User** - A string value containing the user to use inside the container. - **Memory** - Memory limit in bytes. - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. You must use this with `memory` and make the swap value larger than `memory`. - **CpuShares** - An integer value containing the CPU Shares for container (ie. the relative weight vs other containers). **CpuSet** - String value containing the cgroups Cpuset to use. - **AttachStdin** - Boolean value, attaches to stdin. - **AttachStdout** - Boolean value, attaches to stdout. - **AttachStderr** - Boolean value, attaches to stderr. - **Tty** - Boolean value, Attach standard streams to a tty, including stdin if it is not closed. - **OpenStdin** - Boolean value, opens stdin, - **StdinOnce** - Boolean value, close stdin after the 1 attached client disconnects. - **Env** - A list of environment variables in the form of `["VAR=value"[,"VAR2=value2"]]` - **Cmd** - Command to run specified as a string or an array of strings. - **Entrypoint** - Set the entrypoint for the container a string or an array of strings - **Image** - String value containing the image name to use for the container - **Volumes** – An object mapping mountpoint paths (strings) inside the container to empty objects. - **WorkingDir** - A string value containing the working dir for commands to run in. - **NetworkDisabled** - Boolean value, when true disables networking for the container - **ExposedPorts** - An object mapping ports to an empty object in the form of: `"ExposedPorts": { "/: {}" }` - **HostConfig** - **Binds** – A list of volume bindings for this container. Each volume binding is a string of the form `container_path` (to create a new volume for the container), `host_path:container_path` (to bind-mount a host path into the container), or `host_path:container_path:ro` (to make the bind-mount read-only inside the container). - **Links** - A list of links for the container. Each link entry should be in the form of "container_name:alias". - **LxcConf** - LXC specific configurations. These configurations will only work when using the `lxc` execution driver. - **PortBindings** - A map of exposed container ports and the host port they should map to. It should be specified in the form `{ /: [{ "HostPort": "" }] }` Take note that `port` is specified as a string and not an integer value. - **PublishAllPorts** - Allocates a random host port for all of a container's exposed ports. Specified as a boolean value. - **Privileged** - Gives the container full access to the host. Specified as a boolean value. - **ReadonlyRootfs** - Mount the container's root filesystem as read only. Specified as a boolean value. - **Dns** - A list of dns servers for the container to use. - **DnsSearch** - A list of DNS search domains - **ExtraHosts** - A list of hostnames/IP mappings to be added to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. - **VolumesFrom** - A list of volumes to inherit from another container. Specified in the form `[:]` - **CapAdd** - A list of kernel capabilities to add to the container. - **Capdrop** - A list of kernel capabilities to drop from the container. - **RestartPolicy** – The behavior to apply when the container exits. The value is an object with a `Name` property of either `"always"` to always restart or `"on-failure"` to restart only when the container exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` controls the number of times to retry before giving up. The default is not to restart. (optional) An ever increasing delay (double the previous delay, starting at 100mS) is added before each restart to prevent flooding the server. - **NetworkMode** - Sets the networking mode for the container. Supported values are: `bridge`, `host`, and `container:` - **Devices** - A list of devices to add to the container specified in the form `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` - **SecurityOpt**: A list of string values to customize labels for MLS systems, such as SELinux. Query Parameters: - **name** – Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`. Status Codes: - **201** – no error - **404** – no such container - **406** – impossible to attach (container not running) - **500** – server error ### Inspect a container `GET /containers/(id)/json` Return low-level information on the container `id` **Example request**: GET /containers/4fa6e0f0c678/json HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "AppArmorProfile": "", "Args": [ "-c", "exit 9" ], "Config": { "AttachStderr": true, "AttachStdin": false, "AttachStdout": true, "Cmd": [ "/bin/sh", "-c", "exit 9" ], "CpuShares": 0, "Cpuset": "", "Domainname": "", "Entrypoint": null, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "ExposedPorts": null, "Hostname": "ba033ac44011", "Image": "ubuntu", "MacAddress": "", "Memory": 0, "MemorySwap": 0, "NetworkDisabled": false, "OnBuild": null, "OpenStdin": false, "PortSpecs": null, "StdinOnce": false, "Tty": false, "User": "", "Volumes": null, "WorkingDir": "" }, "Created": "2015-01-06T15:47:31.485331387Z", "Driver": "devicemapper", "ExecDriver": "native-0.2", "ExecIDs": null, "HostConfig": { "Binds": null, "CapAdd": null, "CapDrop": null, "ContainerIDFile": "", "Devices": [], "Dns": null, "DnsSearch": null, "ExtraHosts": null, "IpcMode": "", "Links": null, "LxcConf": [], "NetworkMode": "bridge", "PortBindings": {}, "Privileged": false, "ReadonlyRootfs": false, "PublishAllPorts": false, "RestartPolicy": { "MaximumRetryCount": 2, "Name": "on-failure" }, "SecurityOpt": null, "VolumesFrom": null }, "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", "MountLabel": "", "Name": "/boring_euclid", "NetworkSettings": { "Bridge": "", "Gateway": "", "IPAddress": "", "IPPrefixLen": 0, "MacAddress": "", "PortMapping": null, "Ports": null }, "Path": "/bin/sh", "ProcessLabel": "", "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", "RestartCount": 1, "State": { "Error": "", "ExitCode": 9, "FinishedAt": "2015-01-06T15:47:32.080254511Z", "OOMKilled": false, "Paused": false, "Pid": 0, "Restarting": false, "Running": false, "StartedAt": "2015-01-06T15:47:32.072697474Z" }, "Volumes": {}, "VolumesRW": {} } Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### List processes running inside a container `GET /containers/(id)/top` List processes running inside the container `id`. On Unix systems this is done by running the `ps` command. This endpoint is not supported on Windows. **Example request**: GET /containers/4fa6e0f0c678/top HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Titles" : [ "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" ], "Processes" : [ [ "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" ], [ "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" ] ] } **Example request**: GET /containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Titles" : [ "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" ] "Processes" : [ [ "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" ], [ "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" ] ], } Query Parameters: - **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Get container logs `GET /containers/(id)/logs` Get stdout and stderr logs from the container ``id`` **Example request**: GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10 HTTP/1.1 **Example response**: HTTP/1.1 101 UPGRADED Content-Type: application/vnd.docker.raw-stream Connection: Upgrade Upgrade: tcp {{ STREAM }} Query Parameters: - **follow** – 1/True/true or 0/False/false, return stream. Default false - **stdout** – 1/True/true or 0/False/false, show stdout log. Default false - **stderr** – 1/True/true or 0/False/false, show stderr log. Default false - **timestamps** – 1/True/true or 0/False/false, print timestamps for every log line. Default false - **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all Status Codes: - **101** – no error, hints proxy about hijacking - **200** – no error, no upgrade header found - **404** – no such container - **500** – server error ### Inspect changes on a container's filesystem `GET /containers/(id)/changes` Inspect changes on container `id`'s filesystem **Example request**: GET /containers/4fa6e0f0c678/changes HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "Path": "/dev", "Kind": 0 }, { "Path": "/dev/kmsg", "Kind": 1 }, { "Path": "/test", "Kind": 1 } ] Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Export a container `GET /containers/(id)/export` Export the contents of container `id` **Example request**: GET /containers/4fa6e0f0c678/export HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/octet-stream {{ TAR STREAM }} Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Get container stats based on resource usage `GET /containers/(id)/stats` This endpoint returns a live stream of a container's resource usage statistics. **Example request**: GET /containers/redis1/stats HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "read" : "2015-01-08T22:57:31.547920715Z", "network" : { "rx_dropped" : 0, "rx_bytes" : 648, "rx_errors" : 0, "tx_packets" : 8, "tx_dropped" : 0, "rx_packets" : 8, "tx_errors" : 0, "tx_bytes" : 648 }, "memory_stats" : { "stats" : { "total_pgmajfault" : 0, "cache" : 0, "mapped_file" : 0, "total_inactive_file" : 0, "pgpgout" : 414, "rss" : 6537216, "total_mapped_file" : 0, "writeback" : 0, "unevictable" : 0, "pgpgin" : 477, "total_unevictable" : 0, "pgmajfault" : 0, "total_rss" : 6537216, "total_rss_huge" : 6291456, "total_writeback" : 0, "total_inactive_anon" : 0, "rss_huge" : 6291456, "hierarchical_memory_limit" : 67108864, "total_pgfault" : 964, "total_active_file" : 0, "active_anon" : 6537216, "total_active_anon" : 6537216, "total_pgpgout" : 414, "total_cache" : 0, "inactive_anon" : 0, "active_file" : 0, "pgfault" : 964, "inactive_file" : 0, "total_pgpgin" : 477 }, "max_usage" : 6651904, "usage" : 6537216, "failcnt" : 0, "limit" : 67108864 }, "blkio_stats" : {}, "cpu_stats" : { "cpu_usage" : { "percpu_usage" : [ 16970827, 1839451, 7107380, 10571290 ], "usage_in_usermode" : 10000000, "total_usage" : 36488948, "usage_in_kernelmode" : 20000000 }, "system_cpu_usage" : 20091722000000000, "throttling_data" : {} } } Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Resize a container TTY `POST /containers/(id)/resize?h=&w=` Resize the TTY for container with `id`. The container must be restarted for the resize to take effect. **Example request**: POST /containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Length: 0 Content-Type: text/plain; charset=utf-8 Status Codes: - **200** – no error - **404** – No such container - **500** – Cannot resize container ### Start a container `POST /containers/(id)/start` Start the container `id` > **Note**: > For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. > See [create a container](#create-a-container) for details. **Example request**: POST /containers/(id)/start HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Status Codes: - **204** – no error - **304** – container already started - **404** – no such container - **500** – server error ### Stop a container `POST /containers/(id)/stop` Stop the container `id` **Example request**: POST /containers/e90e34656806/stop?t=5 HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **t** – number of seconds to wait before killing the container Status Codes: - **204** – no error - **304** – container already stopped - **404** – no such container - **500** – server error ### Restart a container `POST /containers/(id)/restart` Restart the container `id` **Example request**: POST /containers/e90e34656806/restart?t=5 HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **t** – number of seconds to wait before killing the container Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Kill a container `POST /containers/(id)/kill` Kill the container `id` **Example request**: POST /containers/e90e34656806/kill HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters - **signal** - Signal to send to the container: integer or string like "SIGINT". When not set, SIGKILL is assumed and the call will waits for the container to exit. Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Rename a container `POST /containers/(id)/rename` Rename the container `id` to a `new_name` **Example request**: POST /containers/e90e34656806/rename?name=new_name HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **name** – new name for the container Status Codes: - **204** – no error - **404** – no such container - **409** - conflict name already assigned - **500** – server error ### Pause a container `POST /containers/(id)/pause` Pause the container `id` **Example request**: POST /containers/e90e34656806/pause HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Unpause a container `POST /containers/(id)/unpause` Unpause the container `id` **Example request**: POST /containers/e90e34656806/unpause HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Attach to a container `POST /containers/(id)/attach` Attach to the container `id` **Example request**: POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 **Example response**: HTTP/1.1 101 UPGRADED Content-Type: application/vnd.docker.raw-stream Connection: Upgrade Upgrade: tcp {{ STREAM }} Query Parameters: - **logs** – 1/True/true or 0/False/false, return logs. Default false - **stream** – 1/True/true or 0/False/false, return stream. Default false - **stdin** – 1/True/true or 0/False/false, if stream=true, attach to stdin. Default false - **stdout** – 1/True/true or 0/False/false, if logs=true, return stdout log, if stream=true, attach to stdout. Default false - **stderr** – 1/True/true or 0/False/false, if logs=true, return stderr log, if stream=true, attach to stderr. Default false Status Codes: - **101** – no error, hints proxy about hijacking - **200** – no error, no upgrade header found - **400** – bad parameter - **404** – no such container - **500** – server error **Stream details**: When using the TTY setting is enabled in [`POST /containers/create` ](#create-a-container), the stream is the raw data from the process PTY and client's stdin. When the TTY is disabled, then the stream is multiplexed to separate stdout and stderr. The format is a **Header** and a **Payload** (frame). **HEADER** The header will contain the information on which stream write the stream (stdout or stderr). It also contain the size of the associated frame encoded on the last 4 bytes (uint32). It is encoded on the first 8 bytes like this: header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} `STREAM_TYPE` can be: - 0: stdin (will be written on stdout) - 1: stdout - 2: stderr `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of the uint32 size encoded as big endian. **PAYLOAD** The payload is the raw stream. **IMPLEMENTATION** The simplest way to implement the Attach protocol is the following: 1. Read 8 bytes 2. chose stdout or stderr depending on the first byte 3. Extract the frame size from the last 4 bytes 4. Read the extracted size and output it on the correct output 5. Goto 1 ### Attach to a container (websocket) `GET /containers/(id)/attach/ws` Attach to the container `id` via websocket Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) **Example request** GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 **Example response** {{ STREAM }} Query Parameters: - **logs** – 1/True/true or 0/False/false, return logs. Default false - **stream** – 1/True/true or 0/False/false, return stream. Default false - **stdin** – 1/True/true or 0/False/false, if stream=true, attach to stdin. Default false - **stdout** – 1/True/true or 0/False/false, if logs=true, return stdout log, if stream=true, attach to stdout. Default false - **stderr** – 1/True/true or 0/False/false, if logs=true, return stderr log, if stream=true, attach to stderr. Default false Status Codes: - **200** – no error - **400** – bad parameter - **404** – no such container - **500** – server error ### Wait a container `POST /containers/(id)/wait` Block until container `id` stops, then returns the exit code **Example request**: POST /containers/16253994b7c4/wait HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"StatusCode": 0} Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Remove a container `DELETE /containers/(id)` Remove the container `id` from the filesystem **Example request**: DELETE /containers/16253994b7c4?v=1 HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **v** – 1/True/true or 0/False/false, Remove the volumes associated to the container. Default false - **force** - 1/True/true or 0/False/false, Kill then remove the container. Default false Status Codes: - **204** – no error - **400** – bad parameter - **404** – no such container - **500** – server error ### Copy files or folders from a container `POST /containers/(id)/copy` Copy files or folders of container `id` **Example request**: POST /containers/4fa6e0f0c678/copy HTTP/1.1 Content-Type: application/json { "Resource": "test.txt" } **Example response**: HTTP/1.1 200 OK Content-Type: application/x-tar {{ TAR STREAM }} Status Codes: - **200** – no error - **404** – no such container - **500** – server error ## 2.2 Images ### List Images `GET /images/json` **Example request**: GET /images/json?all=0 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "RepoTags": [ "ubuntu:12.04", "ubuntu:precise", "ubuntu:latest" ], "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", "Created": 1365714795, "Size": 131506275, "VirtualSize": 131506275 }, { "RepoTags": [ "ubuntu:12.10", "ubuntu:quantal" ], "ParentId": "27cf784147099545", "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", "Created": 1364102658, "Size": 24653, "VirtualSize": 180116135 } ] Query Parameters: - **all** – 1/True/true or 0/False/false, default false - **filters** – a json encoded value of the filters (a map[string][]string) to process on the images list. Available filters: - dangling=true - **filter** - only return images with the specified name ### Build image from a Dockerfile `POST /build` Build an image from a Dockerfile **Example request**: POST /build HTTP/1.1 {{ TAR STREAM }} **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"stream": "Step 1..."} {"stream": "..."} {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} The input stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz. The archive must include a build instructions file, typically called `Dockerfile` at the root of the archive. The `dockerfile` parameter may be used to specify a different build instructions file by having its value be the path to the alternate build instructions file to use. The archive may include any number of other files, which will be accessible in the build context (See the [*ADD build command*](../../reference/builder.md#dockerbuilder)). Query Parameters: - **dockerfile** - path within the build context to the Dockerfile - **t** – repository name (and optionally a tag) to be applied to the resulting image in case of success - **remote** – git or HTTP/HTTPS URI build source - **q** – suppress verbose build output - **nocache** – do not use the cache when building the image - **pull** - attempt to pull the image even if an older image exists locally - **rm** - remove intermediate containers after a successful build (default behavior) - **forcerm** - always remove intermediate containers (includes rm) Request Headers: - **Content-type** – should be set to `"application/tar"`. - **X-Registry-Config** – base64-encoded ConfigFile object Status Codes: - **200** – no error - **500** – server error ### Create an image `POST /images/create` Create an image, either by pulling it from the registry or by importing it **Example request**: POST /images/create?fromImage=ubuntu HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"status": "Pulling..."} {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} {"error": "Invalid..."} ... When using this endpoint to pull an image from the registry, the `X-Registry-Auth` header can be used to include a base64-encoded AuthConfig object. Query Parameters: - **fromImage** – name of the image to pull - **fromSrc** – source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. - **repo** – repository - **tag** – tag - **registry** – the registry to pull from Request Headers: - **X-Registry-Auth** – base64-encoded AuthConfig object Status Codes: - **200** – no error - **500** – server error ### Inspect an image `GET /images/(name)/json` Return low-level information on the image `name` **Example request**: GET /images/ubuntu/json HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Created": "2013-03-23T22:24:18.818426-07:00", "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", "ContainerConfig": { "Hostname": "", "User": "", "Memory": 0, "MemorySwap": 0, "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "PortSpecs": null, "Tty": true, "OpenStdin": true, "StdinOnce": false, "Env": null, "Cmd": ["/bin/bash"], "Dns": null, "Image": "ubuntu", "Volumes": null, "VolumesFrom": "", "WorkingDir": "" }, "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", "Parent": "27cf784147099545", "Size": 6824592 } Status Codes: - **200** – no error - **404** – no such image - **500** – server error ### Get the history of an image `GET /images/(name)/history` Return the history of the image `name` **Example request**: GET /images/ubuntu/history HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "Id": "b750fe79269d", "Created": 1364102658, "CreatedBy": "/bin/bash" }, { "Id": "27cf78414709", "Created": 1364068391, "CreatedBy": "" } ] Status Codes: - **200** – no error - **404** – no such image - **500** – server error ### Push an image on the registry `POST /images/(name)/push` Push the image `name` on the registry **Example request**: POST /images/test/push HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"status": "Pushing..."} {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} {"error": "Invalid..."} ... If you wish to push an image on to a private registry, that image must already have been tagged into a repository which references that registry host name and port. This repository name should then be used in the URL. This mirrors the flow of the CLI. **Example request**: POST /images/registry.acme.com:5000/test/push HTTP/1.1 Query Parameters: - **tag** – the tag to associate with the image on the registry, optional Request Headers: - **X-Registry-Auth** – include a base64-encoded AuthConfig object. Status Codes: - **200** – no error - **404** – no such image - **500** – server error ### Tag an image into a repository `POST /images/(name)/tag` Tag the image `name` into a repository **Example request**: POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 **Example response**: HTTP/1.1 201 OK Query Parameters: - **repo** – The repository to tag in - **force** – 1/True/true or 0/False/false, default false - **tag** - The new tag name Status Codes: - **201** – no error - **400** – bad parameter - **404** – no such image - **409** – conflict - **500** – server error ### Remove an image `DELETE /images/(name)` Remove the image `name` from the filesystem **Example request**: DELETE /images/test HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-type: application/json [ {"Untagged": "3e2f21a89f"}, {"Deleted": "3e2f21a89f"}, {"Deleted": "53b4f83ac9"} ] Query Parameters: - **force** – 1/True/true or 0/False/false, default false - **noprune** – 1/True/true or 0/False/false, default false Status Codes: - **200** – no error - **404** – no such image - **409** – conflict - **500** – server error ### Search images `GET /images/search` Search for an image on [Docker Hub](https://hub.docker.com). > **Note**: > The response keys have changed from API v1.6 to reflect the JSON > sent by the registry server to the docker daemon's request. **Example request**: GET /images/search?term=sshd HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "description": "", "is_official": false, "is_automated": false, "name": "wma55/u1210sshd", "star_count": 0 }, { "description": "", "is_official": false, "is_automated": false, "name": "jdswinbank/sshd", "star_count": 0 }, { "description": "", "is_official": false, "is_automated": false, "name": "vgauthier/sshd", "star_count": 0 } ... ] Query Parameters: - **term** – term to search Status Codes: - **200** – no error - **500** – server error ## 2.3 Misc ### Check auth configuration `POST /auth` Get the default username and email **Example request**: POST /auth HTTP/1.1 Content-Type: application/json { "username":" hannibal", "password: "xxxx", "email": "hannibal@a-team.com", "serveraddress": "https://index.docker.io/v1/" } **Example response**: HTTP/1.1 200 OK Status Codes: - **200** – no error - **204** – no error - **500** – server error ### Display system-wide information `GET /info` Display system-wide information **Example request**: GET /info HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Containers":11, "Images":16, "Driver":"btrfs", "DriverStatus": [[""]], "ExecutionDriver":"native-0.1", "KernelVersion":"3.12.0-1-amd64" "NCPU":1, "MemTotal":2099236864, "Name":"prod-server-42", "ID":"7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", "Debug":false, "NFd": 11, "NGoroutines":21, "NEventsListener":0, "InitPath":"/usr/bin/docker", "InitSha1":"", "IndexServerAddress":["https://index.docker.io/v1/"], "MemoryLimit":true, "SwapLimit":false, "IPv4Forwarding":true, "Labels":["storage=ssd"], "DockerRootDir": "/var/lib/docker", "OperatingSystem": "Boot2Docker", } Status Codes: - **200** – no error - **500** – server error ### Show the docker version information `GET /version` Show the docker version information **Example request**: GET /version HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "ApiVersion": "1.12", "Version": "0.2.2", "GitCommit": "5a2a5cc+CHANGES", "GoVersion": "go1.0.3" } Status Codes: - **200** – no error - **500** – server error ### Ping the docker server `GET /_ping` Ping the docker server **Example request**: GET /_ping HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: text/plain OK Status Codes: - **200** - no error - **500** - server error ### Create a new image from a container's changes `POST /commit` Create a new image from a container's changes **Example request**: POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 Content-Type: application/json { "Hostname": "", "Domainname": "", "User": "", "Memory": 0, "MemorySwap": 0, "CpuShares": 512, "Cpuset": "0,1", "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "PortSpecs": null, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": null, "Cmd": [ "date" ], "Volumes": { "/tmp": {} }, "WorkingDir": "", "NetworkDisabled": false, "ExposedPorts": { "22/tcp": {} } } **Example response**: HTTP/1.1 201 Created Content-Type: application/json {"Id": "596069db4bf5"} Json Parameters: - **config** - the container's configuration Query Parameters: - **container** – source container - **repo** – repository - **tag** – tag - **comment** – commit message - **author** – author (e.g., "John Hannibal Smith <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") Status Codes: - **201** – no error - **404** – no such container - **500** – server error ### Monitor Docker's events `GET /events` Get container events from docker, either in real time via streaming, or via polling (using since). Docker containers will report the following events: create, destroy, die, exec_create, exec_start, export, kill, oom, pause, restart, start, stop, unpause and Docker images will report: untag, delete **Example request**: GET /events?since=1374067924 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} Query Parameters: - **since** – timestamp used for polling - **until** – timestamp used for polling - **filters** – a json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: - event=<string> -- event to filter - image=<string> -- image to filter - container=<string> -- container to filter Status Codes: - **200** – no error - **500** – server error ### Get a tarball containing all images in a repository `GET /images/(name)/get` Get a tarball containing all images and metadata for the repository specified by `name`. If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the 'repositories' file in the tarball, as there were no image names referenced. See the [image tarball format](#image-tarball-format) for more details. **Example request** GET /images/ubuntu/get **Example response**: HTTP/1.1 200 OK Content-Type: application/x-tar Binary data stream Status Codes: - **200** – no error - **500** – server error ### Get a tarball containing all images. `GET /images/get` Get a tarball containing all images and metadata for one or more repositories. For each value of the `names` parameter: if it is a specific name and tag (e.g. ubuntu:latest), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. See the [image tarball format](#image-tarball-format) for more details. **Example request** GET /images/get?names=myname%2Fmyapp%3Alatest&names=busybox **Example response**: HTTP/1.1 200 OK Content-Type: application/x-tar Binary data stream Status Codes: - **200** – no error - **500** – server error ### Load a tarball with a set of images and tags into docker `POST /images/load` Load a set of images and tags into the docker repository. See the [image tarball format](#image-tarball-format) for more details. **Example request** POST /images/load Tarball in body **Example response**: HTTP/1.1 200 OK Status Codes: - **200** – no error - **500** – server error ### Image tarball format An image tarball contains one directory per image layer (named using its long ID), each containing three files: 1. `VERSION`: currently `1.0` - the file format version 2. `json`: detailed layer information, similar to `docker inspect layer_id` 3. `layer.tar`: A tarfile containing the filesystem changes in this layer The `layer.tar` file will contain `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. If the tarball defines a repository, there will also be a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. ``` {"hello-world": {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} } ``` ### Exec Create `POST /containers/(id)/exec` Sets up an exec instance in a running container `id` **Example request**: POST /containers/e90e34656806/exec HTTP/1.1 Content-Type: application/json { "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "Tty": false, "Cmd": [ "date" ], } **Example response**: HTTP/1.1 201 OK Content-Type: application/json { "Id": "f90e34656806" } Json Parameters: - **AttachStdin** - Boolean value, attaches to stdin of the exec command. - **AttachStdout** - Boolean value, attaches to stdout of the exec command. - **AttachStderr** - Boolean value, attaches to stderr of the exec command. - **Tty** - Boolean value to allocate a pseudo-TTY - **Cmd** - Command to run specified as a string or an array of strings. Status Codes: - **201** – no error - **404** – no such container ### Exec Start `POST /exec/(id)/start` Starts a previously set up exec instance `id`. If `detach` is true, this API returns after starting the `exec` command. Otherwise, this API sets up an interactive session with the `exec` command. **Example request**: POST /exec/e90e34656806/start HTTP/1.1 Content-Type: application/json { "Detach": false, "Tty": false, } **Example response**: HTTP/1.1 201 OK Content-Type: application/json {{ STREAM }} Json Parameters: - **Detach** - Detach from the exec command - **Tty** - Boolean value to allocate a pseudo-TTY Status Codes: - **200** – no error - **404** – no such exec instance **Stream details**: Similar to the stream behavior of `POST /container/(id)/attach` API ### Exec Resize `POST /exec/(id)/resize` Resizes the tty session used by the exec command `id`. This API is valid only if `tty` was specified as part of creating and starting the exec command. **Example request**: POST /exec/e90e34656806/resize HTTP/1.1 Content-Type: text/plain **Example response**: HTTP/1.1 201 OK Content-Type: text/plain Query Parameters: - **h** – height of tty session - **w** – width Status Codes: - **201** – no error - **404** – no such exec instance ### Exec Inspect `GET /exec/(id)/json` Return low-level information about the exec command `id`. **Example request**: GET /exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: plain/text { "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", "Running" : false, "ExitCode" : 2, "ProcessConfig" : { "privileged" : false, "user" : "", "tty" : false, "entrypoint" : "sh", "arguments" : [ "-c", "exit 2" ] }, "OpenStdin" : false, "OpenStderr" : false, "OpenStdout" : false, "Container" : { "State" : { "Running" : true, "Paused" : false, "Restarting" : false, "OOMKilled" : false, "Pid" : 3650, "ExitCode" : 0, "Error" : "", "StartedAt" : "2014-11-17T22:26:03.717657531Z", "FinishedAt" : "0001-01-01T00:00:00Z" }, "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", "Created" : "2014-11-17T22:26:03.626304998Z", "Path" : "date", "Args" : [], "Config" : { "Hostname" : "8f177a186b97", "Domainname" : "", "User" : "", "Memory" : 0, "MemorySwap" : 0, "CpuShares" : 0, "Cpuset" : "", "AttachStdin" : false, "AttachStdout" : false, "AttachStderr" : false, "PortSpecs" : null, "ExposedPorts" : null, "Tty" : false, "OpenStdin" : false, "StdinOnce" : false, "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd" : [ "date" ], "Image" : "ubuntu", "Volumes" : null, "WorkingDir" : "", "Entrypoint" : null, "NetworkDisabled" : false, "MacAddress" : "", "OnBuild" : null, "SecurityOpt" : null }, "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", "NetworkSettings" : { "IPAddress" : "172.17.0.2", "IPPrefixLen" : 16, "MacAddress" : "02:42:ac:11:00:02", "Gateway" : "172.17.42.1", "Bridge" : "docker0", "PortMapping" : null, "Ports" : {} }, "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", "Name" : "/test", "Driver" : "aufs", "ExecDriver" : "native-0.2", "MountLabel" : "", "ProcessLabel" : "", "AppArmorProfile" : "", "RestartCount" : 0, "Volumes" : {}, "VolumesRW" : {} } } Status Codes: - **200** – no error - **404** – no such exec instance - **500** - server error # 3. Going further ## 3.1 Inside `docker run` As an example, the `docker run` command line makes the following API calls: - Create the container - If the status code is 404, it means the image doesn't exist: - Try to pull it - Then retry to create the container - Start the container - If you are not in detached mode: - Attach to the container, using logs=1 (to have stdout and stderr from the container's start) and stream=1 - If in detached mode or only stdin is attached: - Display the container's id ## 3.2 Hijacking In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. To hint potential proxies about connection hijacking, Docker client sends connection upgrade headers similarly to websocket. Upgrade: tcp Connection: Upgrade When Docker daemon detects the `Upgrade` header, it will switch its status code from **200 OK** to **101 UPGRADED** and resend the same headers. This might change in the future. ## 3.3 CORS Requests To set cross origin requests to the remote api, please add flag "--api-enable-cors" when running docker in daemon mode. $ docker -d -H="192.168.1.9:2375" --api-enable-cors docker-1.10.3/docs/reference/api/docker_remote_api_v1.18.md000066400000000000000000001631231267010174400233440ustar00rootroot00000000000000 # Docker Remote API v1.18 ## 1. Brief introduction - The Remote API has replaced `rcli`. - The daemon listens on `unix:///var/run/docker.sock` but you can [Bind Docker to another host/port or a Unix socket](../../quickstart.md#bind-docker-to-another-host-port-or-a-unix-socket). - The API tends to be REST, but for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `STDOUT`, `STDIN` and `STDERR`. # 2. Endpoints ## 2.1 Containers ### List containers `GET /containers/json` List containers **Example request**: GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "Id": "8dfafdbc3a40", "Names":["/boring_feynman"], "Image": "ubuntu:latest", "Command": "echo 1", "Created": 1367854155, "Status": "Exit 0", "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], "Labels": { "com.example.vendor": "Acme", "com.example.license": "GPL", "com.example.version": "1.0" }, "SizeRw": 12288, "SizeRootFs": 0 }, { "Id": "9cd87474be90", "Names":["/coolName"], "Image": "ubuntu:latest", "Command": "echo 222222", "Created": 1367854155, "Status": "Exit 0", "Ports": [], "Labels": {}, "SizeRw": 12288, "SizeRootFs": 0 }, { "Id": "3176a2479c92", "Names":["/sleepy_dog"], "Image": "ubuntu:latest", "Command": "echo 3333333333333333", "Created": 1367854154, "Status": "Exit 0", "Ports":[], "Labels": {}, "SizeRw":12288, "SizeRootFs":0 }, { "Id": "4cb07b47f9fb", "Names":["/running_cat"], "Image": "ubuntu:latest", "Command": "echo 444444444444444444444444444444444", "Created": 1367854152, "Status": "Exit 0", "Ports": [], "Labels": {}, "SizeRw": 12288, "SizeRootFs": 0 } ] Query Parameters: - **all** – 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default (i.e., this defaults to false) - **limit** – Show `limit` last created containers, include non-running ones. - **since** – Show only containers created since Id, include non-running ones. - **before** – Show only containers created before Id, include non-running ones. - **size** – 1/True/true or 0/False/false, Show the containers sizes - **filters** - a json encoded value of the filters (a map[string][]string) to process on the containers list. Available filters: - exited=<int> -- containers with exit code of <int> - status=(restarting|running|paused|exited) - label=`key` or `label="key=value"` of a container label Status Codes: - **200** – no error - **400** – bad parameter - **500** – server error ### Create a container `POST /containers/create` Create a container **Example request**: POST /containers/create HTTP/1.1 Content-Type: application/json { "Hostname": "", "Domainname": "", "User": "", "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": [ "FOO=bar", "BAZ=quux" ], "Cmd": [ "date" ], "Entrypoint": "", "Image": "ubuntu", "Labels": { "com.example.vendor": "Acme", "com.example.license": "GPL", "com.example.version": "1.0" }, "Volumes": { "/tmp": {} }, "WorkingDir": "", "NetworkDisabled": false, "MacAddress": "12:34:56:78:9a:bc", "ExposedPorts": { "22/tcp": {} }, "HostConfig": { "Binds": ["/tmp:/tmp"], "Links": ["redis3:redis"], "LxcConf": {"lxc.utsname":"docker"}, "Memory": 0, "MemorySwap": 0, "CpuShares": 512, "CpusetCpus": "0,1", "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, "PublishAllPorts": false, "Privileged": false, "ReadonlyRootfs": false, "Dns": ["8.8.8.8"], "DnsSearch": [""], "ExtraHosts": null, "VolumesFrom": ["parent", "other:ro"], "CapAdd": ["NET_ADMIN"], "CapDrop": ["MKNOD"], "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, "NetworkMode": "bridge", "Devices": [], "Ulimits": [{}], "LogConfig": { "Type": "json-file", Config: {} }, "SecurityOpt": [""], "CgroupParent": "" } } **Example response**: HTTP/1.1 201 Created Content-Type: application/json { "Id":"e90e34656806", "Warnings":[] } Json Parameters: - **Hostname** - A string value containing the desired hostname to use for the container. - **Domainname** - A string value containing the desired domain name to use for the container. - **User** - A string value containing the user to use inside the container. - **Memory** - Memory limit in bytes. - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. You must use this with `memory` and make the swap value larger than `memory`. - **CpuShares** - An integer value containing the CPU Shares for container (ie. the relative weight vs other containers). - **Cpuset** - The same as CpusetCpus, but deprecated, please don't use. - **CpusetCpus** - String value containing the cgroups CpusetCpus to use. - **AttachStdin** - Boolean value, attaches to stdin. - **AttachStdout** - Boolean value, attaches to stdout. - **AttachStderr** - Boolean value, attaches to stderr. - **Tty** - Boolean value, Attach standard streams to a tty, including stdin if it is not closed. - **OpenStdin** - Boolean value, opens stdin, - **StdinOnce** - Boolean value, close stdin after the 1 attached client disconnects. - **Env** - A list of environment variables in the form of `["VAR=value"[,"VAR2=value2"]]` - **Labels** - Adds a map of labels that to a container. To specify a map: `{"key":"value"[,"key2":"value2"]}` - **Cmd** - Command to run specified as a string or an array of strings. - **Entrypoint** - Set the entrypoint for the container a string or an array of strings - **Image** - String value containing the image name to use for the container - **Volumes** – An object mapping mountpoint paths (strings) inside the container to empty objects. - **WorkingDir** - A string value containing the working dir for commands to run in. - **NetworkDisabled** - Boolean value, when true disables networking for the container - **ExposedPorts** - An object mapping ports to an empty object in the form of: `"ExposedPorts": { "/: {}" }` - **HostConfig** - **Binds** – A list of volume bindings for this container. Each volume binding is a string of the form `container_path` (to create a new volume for the container), `host_path:container_path` (to bind-mount a host path into the container), or `host_path:container_path:ro` (to make the bind-mount read-only inside the container). - **Links** - A list of links for the container. Each link entry should be in the form of `container_name:alias`. - **LxcConf** - LXC specific configurations. These configurations will only work when using the `lxc` execution driver. - **PortBindings** - A map of exposed container ports and the host port they should map to. It should be specified in the form `{ /: [{ "HostPort": "" }] }` Take note that `port` is specified as a string and not an integer value. - **PublishAllPorts** - Allocates a random host port for all of a container's exposed ports. Specified as a boolean value. - **Privileged** - Gives the container full access to the host. Specified as a boolean value. - **ReadonlyRootfs** - Mount the container's root filesystem as read only. Specified as a boolean value. - **Dns** - A list of dns servers for the container to use. - **DnsSearch** - A list of DNS search domains - **ExtraHosts** - A list of hostnames/IP mappings to be added to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. - **VolumesFrom** - A list of volumes to inherit from another container. Specified in the form `[:]` - **CapAdd** - A list of kernel capabilities to add to the container. - **Capdrop** - A list of kernel capabilities to drop from the container. - **RestartPolicy** – The behavior to apply when the container exits. The value is an object with a `Name` property of either `"always"` to always restart or `"on-failure"` to restart only when the container exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` controls the number of times to retry before giving up. The default is not to restart. (optional) An ever increasing delay (double the previous delay, starting at 100mS) is added before each restart to prevent flooding the server. - **NetworkMode** - Sets the networking mode for the container. Supported values are: `bridge`, `host`, and `container:` - **Devices** - A list of devices to add to the container specified in the form `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` - **Ulimits** - A list of ulimits to be set in the container, specified as `{ "Name": , "Soft": , "Hard": }`, for example: `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` - **SecurityOpt**: A list of string values to customize labels for MLS systems, such as SELinux. - **LogConfig** - Log configuration for the container, specified as `{ "Type": "", "Config": {"key1": "val1"}}`. Available types: `json-file`, `syslog`, `journald`, `none`. `json-file` logging driver. - **CgroupParent** - Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. Query Parameters: - **name** – Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`. Status Codes: - **201** – no error - **404** – no such container - **406** – impossible to attach (container not running) - **500** – server error ### Inspect a container `GET /containers/(id)/json` Return low-level information on the container `id` **Example request**: GET /containers/4fa6e0f0c678/json HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "AppArmorProfile": "", "Args": [ "-c", "exit 9" ], "Config": { "AttachStderr": true, "AttachStdin": false, "AttachStdout": true, "Cmd": [ "/bin/sh", "-c", "exit 9" ], "Domainname": "", "Entrypoint": null, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "ExposedPorts": null, "Hostname": "ba033ac44011", "Image": "ubuntu", "Labels": { "com.example.vendor": "Acme", "com.example.license": "GPL", "com.example.version": "1.0" }, "MacAddress": "", "NetworkDisabled": false, "OnBuild": null, "OpenStdin": false, "PortSpecs": null, "StdinOnce": false, "Tty": false, "User": "", "Volumes": null, "WorkingDir": "" }, "Created": "2015-01-06T15:47:31.485331387Z", "Driver": "devicemapper", "ExecDriver": "native-0.2", "ExecIDs": null, "HostConfig": { "Binds": null, "CapAdd": null, "CapDrop": null, "ContainerIDFile": "", "CpusetCpus": "", "CpuShares": 0, "Devices": [], "Dns": null, "DnsSearch": null, "ExtraHosts": null, "IpcMode": "", "Links": null, "LxcConf": [], "Memory": 0, "MemorySwap": 0, "NetworkMode": "bridge", "PortBindings": {}, "Privileged": false, "ReadonlyRootfs": false, "PublishAllPorts": false, "RestartPolicy": { "MaximumRetryCount": 2, "Name": "on-failure" }, "LogConfig": { "Config": null, "Type": "json-file" }, "SecurityOpt": null, "VolumesFrom": null, "Ulimits": [{}] }, "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", "MountLabel": "", "Name": "/boring_euclid", "NetworkSettings": { "Bridge": "", "Gateway": "", "IPAddress": "", "IPPrefixLen": 0, "MacAddress": "", "PortMapping": null, "Ports": null }, "Path": "/bin/sh", "ProcessLabel": "", "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", "RestartCount": 1, "State": { "Error": "", "ExitCode": 9, "FinishedAt": "2015-01-06T15:47:32.080254511Z", "OOMKilled": false, "Paused": false, "Pid": 0, "Restarting": false, "Running": false, "StartedAt": "2015-01-06T15:47:32.072697474Z" }, "Volumes": {}, "VolumesRW": {} } Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### List processes running inside a container `GET /containers/(id)/top` List processes running inside the container `id`. On Unix systems this is done by running the `ps` command. This endpoint is not supported on Windows. **Example request**: GET /containers/4fa6e0f0c678/top HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Titles" : [ "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" ], "Processes" : [ [ "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" ], [ "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" ] ] } **Example request**: GET /containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Titles" : [ "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" ] "Processes" : [ [ "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" ], [ "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" ] ], } Query Parameters: - **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Get container logs `GET /containers/(id)/logs` Get stdout and stderr logs from the container ``id`` > **Note**: > This endpoint works only for containers with the `json-file` or `journald` logging drivers. **Example request**: GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10 HTTP/1.1 **Example response**: HTTP/1.1 101 UPGRADED Content-Type: application/vnd.docker.raw-stream Connection: Upgrade Upgrade: tcp {{ STREAM }} Query Parameters: - **follow** – 1/True/true or 0/False/false, return stream. Default false - **stdout** – 1/True/true or 0/False/false, show stdout log. Default false - **stderr** – 1/True/true or 0/False/false, show stderr log. Default false - **timestamps** – 1/True/true or 0/False/false, print timestamps for every log line. Default false - **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all Status Codes: - **101** – no error, hints proxy about hijacking - **200** – no error, no upgrade header found - **404** – no such container - **500** – server error ### Inspect changes on a container's filesystem `GET /containers/(id)/changes` Inspect changes on container `id`'s filesystem **Example request**: GET /containers/4fa6e0f0c678/changes HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "Path": "/dev", "Kind": 0 }, { "Path": "/dev/kmsg", "Kind": 1 }, { "Path": "/test", "Kind": 1 } ] Values for `Kind`: - `0`: Modify - `1`: Add - `2`: Delete Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Export a container `GET /containers/(id)/export` Export the contents of container `id` **Example request**: GET /containers/4fa6e0f0c678/export HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/octet-stream {{ TAR STREAM }} Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Get container stats based on resource usage `GET /containers/(id)/stats` This endpoint returns a live stream of a container's resource usage statistics. **Example request**: GET /containers/redis1/stats HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "read" : "2015-01-08T22:57:31.547920715Z", "network" : { "rx_dropped" : 0, "rx_bytes" : 648, "rx_errors" : 0, "tx_packets" : 8, "tx_dropped" : 0, "rx_packets" : 8, "tx_errors" : 0, "tx_bytes" : 648 }, "memory_stats" : { "stats" : { "total_pgmajfault" : 0, "cache" : 0, "mapped_file" : 0, "total_inactive_file" : 0, "pgpgout" : 414, "rss" : 6537216, "total_mapped_file" : 0, "writeback" : 0, "unevictable" : 0, "pgpgin" : 477, "total_unevictable" : 0, "pgmajfault" : 0, "total_rss" : 6537216, "total_rss_huge" : 6291456, "total_writeback" : 0, "total_inactive_anon" : 0, "rss_huge" : 6291456, "hierarchical_memory_limit" : 67108864, "total_pgfault" : 964, "total_active_file" : 0, "active_anon" : 6537216, "total_active_anon" : 6537216, "total_pgpgout" : 414, "total_cache" : 0, "inactive_anon" : 0, "active_file" : 0, "pgfault" : 964, "inactive_file" : 0, "total_pgpgin" : 477 }, "max_usage" : 6651904, "usage" : 6537216, "failcnt" : 0, "limit" : 67108864 }, "blkio_stats" : {}, "cpu_stats" : { "cpu_usage" : { "percpu_usage" : [ 16970827, 1839451, 7107380, 10571290 ], "usage_in_usermode" : 10000000, "total_usage" : 36488948, "usage_in_kernelmode" : 20000000 }, "system_cpu_usage" : 20091722000000000, "throttling_data" : {} } } Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Resize a container TTY `POST /containers/(id)/resize?h=&w=` Resize the TTY for container with `id`. The container must be restarted for the resize to take effect. **Example request**: POST /containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Length: 0 Content-Type: text/plain; charset=utf-8 Status Codes: - **200** – no error - **404** – No such container - **500** – Cannot resize container ### Start a container `POST /containers/(id)/start` Start the container `id` > **Note**: > For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. > See [create a container](#create-a-container) for details. **Example request**: POST /containers/(id)/start HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Status Codes: - **204** – no error - **304** – container already started - **404** – no such container - **500** – server error ### Stop a container `POST /containers/(id)/stop` Stop the container `id` **Example request**: POST /containers/e90e34656806/stop?t=5 HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **t** – number of seconds to wait before killing the container Status Codes: - **204** – no error - **304** – container already stopped - **404** – no such container - **500** – server error ### Restart a container `POST /containers/(id)/restart` Restart the container `id` **Example request**: POST /containers/e90e34656806/restart?t=5 HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **t** – number of seconds to wait before killing the container Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Kill a container `POST /containers/(id)/kill` Kill the container `id` **Example request**: POST /containers/e90e34656806/kill HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters - **signal** - Signal to send to the container: integer or string like "SIGINT". When not set, SIGKILL is assumed and the call will waits for the container to exit. Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Rename a container `POST /containers/(id)/rename` Rename the container `id` to a `new_name` **Example request**: POST /containers/e90e34656806/rename?name=new_name HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **name** – new name for the container Status Codes: - **204** – no error - **404** – no such container - **409** - conflict name already assigned - **500** – server error ### Pause a container `POST /containers/(id)/pause` Pause the container `id` **Example request**: POST /containers/e90e34656806/pause HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Unpause a container `POST /containers/(id)/unpause` Unpause the container `id` **Example request**: POST /containers/e90e34656806/unpause HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Attach to a container `POST /containers/(id)/attach` Attach to the container `id` **Example request**: POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 **Example response**: HTTP/1.1 101 UPGRADED Content-Type: application/vnd.docker.raw-stream Connection: Upgrade Upgrade: tcp {{ STREAM }} Query Parameters: - **logs** – 1/True/true or 0/False/false, return logs. Default false - **stream** – 1/True/true or 0/False/false, return stream. Default false - **stdin** – 1/True/true or 0/False/false, if stream=true, attach to stdin. Default false - **stdout** – 1/True/true or 0/False/false, if logs=true, return stdout log, if stream=true, attach to stdout. Default false - **stderr** – 1/True/true or 0/False/false, if logs=true, return stderr log, if stream=true, attach to stderr. Default false Status Codes: - **101** – no error, hints proxy about hijacking - **200** – no error, no upgrade header found - **400** – bad parameter - **404** – no such container - **500** – server error **Stream details**: When using the TTY setting is enabled in [`POST /containers/create` ](#create-a-container), the stream is the raw data from the process PTY and client's stdin. When the TTY is disabled, then the stream is multiplexed to separate stdout and stderr. The format is a **Header** and a **Payload** (frame). **HEADER** The header will contain the information on which stream write the stream (stdout or stderr). It also contain the size of the associated frame encoded on the last 4 bytes (uint32). It is encoded on the first 8 bytes like this: header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} `STREAM_TYPE` can be: - 0: stdin (will be written on stdout) - 1: stdout - 2: stderr `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of the uint32 size encoded as big endian. **PAYLOAD** The payload is the raw stream. **IMPLEMENTATION** The simplest way to implement the Attach protocol is the following: 1. Read 8 bytes 2. chose stdout or stderr depending on the first byte 3. Extract the frame size from the last 4 bytes 4. Read the extracted size and output it on the correct output 5. Goto 1 ### Attach to a container (websocket) `GET /containers/(id)/attach/ws` Attach to the container `id` via websocket Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) **Example request** GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 **Example response** {{ STREAM }} Query Parameters: - **logs** – 1/True/true or 0/False/false, return logs. Default false - **stream** – 1/True/true or 0/False/false, return stream. Default false - **stdin** – 1/True/true or 0/False/false, if stream=true, attach to stdin. Default false - **stdout** – 1/True/true or 0/False/false, if logs=true, return stdout log, if stream=true, attach to stdout. Default false - **stderr** – 1/True/true or 0/False/false, if logs=true, return stderr log, if stream=true, attach to stderr. Default false Status Codes: - **200** – no error - **400** – bad parameter - **404** – no such container - **500** – server error ### Wait a container `POST /containers/(id)/wait` Block until container `id` stops, then returns the exit code **Example request**: POST /containers/16253994b7c4/wait HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"StatusCode": 0} Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Remove a container `DELETE /containers/(id)` Remove the container `id` from the filesystem **Example request**: DELETE /containers/16253994b7c4?v=1 HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **v** – 1/True/true or 0/False/false, Remove the volumes associated to the container. Default false - **force** - 1/True/true or 0/False/false, Kill then remove the container. Default false Status Codes: - **204** – no error - **400** – bad parameter - **404** – no such container - **500** – server error ### Copy files or folders from a container `POST /containers/(id)/copy` Copy files or folders of container `id` **Example request**: POST /containers/4fa6e0f0c678/copy HTTP/1.1 Content-Type: application/json { "Resource": "test.txt" } **Example response**: HTTP/1.1 200 OK Content-Type: application/x-tar {{ TAR STREAM }} Status Codes: - **200** – no error - **404** – no such container - **500** – server error ## 2.2 Images ### List Images `GET /images/json` **Example request**: GET /images/json?all=0 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "RepoTags": [ "ubuntu:12.04", "ubuntu:precise", "ubuntu:latest" ], "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", "Created": 1365714795, "Size": 131506275, "VirtualSize": 131506275 }, { "RepoTags": [ "ubuntu:12.10", "ubuntu:quantal" ], "ParentId": "27cf784147099545", "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", "Created": 1364102658, "Size": 24653, "VirtualSize": 180116135 } ] **Example request, with digest information**: GET /images/json?digests=1 HTTP/1.1 **Example response, with digest information**: HTTP/1.1 200 OK Content-Type: application/json [ { "Created": 1420064636, "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", "RepoDigests": [ "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" ], "RepoTags": [ "localhost:5000/test/busybox:latest", "playdate:latest" ], "Size": 0, "VirtualSize": 2429728 } ] The response shows a single image `Id` associated with two repositories (`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use either of the `RepoTags` values `localhost:5000/test/busybox:latest` or `playdate:latest` to reference the image. You can also use `RepoDigests` values to reference an image. In this response, the array has only one reference and that is to the `localhost:5000/test/busybox` repository; the `playdate` repository has no digest. You can reference this digest using the value: `localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` See the `docker run` and `docker build` commands for examples of digest and tag references on the command line. Query Parameters: - **all** – 1/True/true or 0/False/false, default false - **filters** – a json encoded value of the filters (a map[string][]string) to process on the images list. Available filters: - dangling=true - label=`key` or `label="key=value"` of an image label - **filter** - only return images with the specified name ### Build image from a Dockerfile `POST /build` Build an image from a Dockerfile **Example request**: POST /build HTTP/1.1 {{ TAR STREAM }} **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"stream": "Step 1..."} {"stream": "..."} {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} The input stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz. The archive must include a build instructions file, typically called `Dockerfile` at the root of the archive. The `dockerfile` parameter may be used to specify a different build instructions file by having its value be the path to the alternate build instructions file to use. The archive may include any number of other files, which will be accessible in the build context (See the [*ADD build command*](../../reference/builder.md#dockerbuilder)). The build will also be canceled if the client drops the connection by quitting or being killed. Query Parameters: - **dockerfile** - path within the build context to the Dockerfile. This is ignored if `remote` is specified and points to an individual filename. - **t** – repository name (and optionally a tag) to be applied to the resulting image in case of success - **remote** – A Git repository URI or HTTP/HTTPS URI build source. If the URI specifies a filename, the file's contents are placed into a file called `Dockerfile`. - **q** – suppress verbose build output - **nocache** – do not use the cache when building the image - **pull** - attempt to pull the image even if an older image exists locally - **rm** - remove intermediate containers after a successful build (default behavior) - **forcerm** - always remove intermediate containers (includes rm) - **memory** - set memory limit for build - **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. - **cpushares** - CPU shares (relative weight) - **cpusetcpus** - CPUs in which to allow execution, e.g., `0-3`, `0,1` Request Headers: - **Content-type** – should be set to `"application/tar"`. - **X-Registry-Config** – base64-encoded ConfigFile object Status Codes: - **200** – no error - **500** – server error ### Create an image `POST /images/create` Create an image, either by pulling it from the registry or by importing it **Example request**: POST /images/create?fromImage=ubuntu HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"status": "Pulling..."} {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} {"error": "Invalid..."} ... When using this endpoint to pull an image from the registry, the `X-Registry-Auth` header can be used to include a base64-encoded AuthConfig object. Query Parameters: - **fromImage** – name of the image to pull - **fromSrc** – source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. - **repo** – repository - **tag** – tag - **registry** – the registry to pull from Request Headers: - **X-Registry-Auth** – base64-encoded AuthConfig object Status Codes: - **200** – no error - **500** – server error ### Inspect an image `GET /images/(name)/json` Return low-level information on the image `name` **Example request**: GET /images/ubuntu/json HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Created": "2013-03-23T22:24:18.818426-07:00", "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", "ContainerConfig": { "Hostname": "", "User": "", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "PortSpecs": null, "Tty": true, "OpenStdin": true, "StdinOnce": false, "Env": null, "Cmd": ["/bin/bash"], "Dns": null, "Image": "ubuntu", "Labels": { "com.example.vendor": "Acme", "com.example.license": "GPL", "com.example.version": "1.0" }, "Volumes": null, "VolumesFrom": "", "WorkingDir": "" }, "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", "Parent": "27cf784147099545", "Size": 6824592 } Status Codes: - **200** – no error - **404** – no such image - **500** – server error ### Get the history of an image `GET /images/(name)/history` Return the history of the image `name` **Example request**: GET /images/ubuntu/history HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "Id": "b750fe79269d", "Created": 1364102658, "CreatedBy": "/bin/bash" }, { "Id": "27cf78414709", "Created": 1364068391, "CreatedBy": "" } ] Status Codes: - **200** – no error - **404** – no such image - **500** – server error ### Push an image on the registry `POST /images/(name)/push` Push the image `name` on the registry **Example request**: POST /images/test/push HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"status": "Pushing..."} {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} {"error": "Invalid..."} ... If you wish to push an image on to a private registry, that image must already have been tagged into a repository which references that registry host name and port. This repository name should then be used in the URL. This mirrors the flow of the CLI. **Example request**: POST /images/registry.acme.com:5000/test/push HTTP/1.1 Query Parameters: - **tag** – the tag to associate with the image on the registry, optional Request Headers: - **X-Registry-Auth** – include a base64-encoded AuthConfig object. Status Codes: - **200** – no error - **404** – no such image - **500** – server error ### Tag an image into a repository `POST /images/(name)/tag` Tag the image `name` into a repository **Example request**: POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 **Example response**: HTTP/1.1 201 OK Query Parameters: - **repo** – The repository to tag in - **force** – 1/True/true or 0/False/false, default false - **tag** - The new tag name Status Codes: - **201** – no error - **400** – bad parameter - **404** – no such image - **409** – conflict - **500** – server error ### Remove an image `DELETE /images/(name)` Remove the image `name` from the filesystem **Example request**: DELETE /images/test HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-type: application/json [ {"Untagged": "3e2f21a89f"}, {"Deleted": "3e2f21a89f"}, {"Deleted": "53b4f83ac9"} ] Query Parameters: - **force** – 1/True/true or 0/False/false, default false - **noprune** – 1/True/true or 0/False/false, default false Status Codes: - **200** – no error - **404** – no such image - **409** – conflict - **500** – server error ### Search images `GET /images/search` Search for an image on [Docker Hub](https://hub.docker.com). > **Note**: > The response keys have changed from API v1.6 to reflect the JSON > sent by the registry server to the docker daemon's request. **Example request**: GET /images/search?term=sshd HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "description": "", "is_official": false, "is_automated": false, "name": "wma55/u1210sshd", "star_count": 0 }, { "description": "", "is_official": false, "is_automated": false, "name": "jdswinbank/sshd", "star_count": 0 }, { "description": "", "is_official": false, "is_automated": false, "name": "vgauthier/sshd", "star_count": 0 } ... ] Query Parameters: - **term** – term to search Status Codes: - **200** – no error - **500** – server error ## 2.3 Misc ### Check auth configuration `POST /auth` Get the default username and email **Example request**: POST /auth HTTP/1.1 Content-Type: application/json { "username":" hannibal", "password: "xxxx", "email": "hannibal@a-team.com", "serveraddress": "https://index.docker.io/v1/" } **Example response**: HTTP/1.1 200 OK Status Codes: - **200** – no error - **204** – no error - **500** – server error ### Display system-wide information `GET /info` Display system-wide information **Example request**: GET /info HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Containers": 11, "Debug": 0, "DockerRootDir": "/var/lib/docker", "Driver": "btrfs", "DriverStatus": [[""]], "ExecutionDriver": "native-0.1", "HttpProxy": "http://test:test@localhost:8080", "HttpsProxy": "https://test:test@localhost:8080", "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", "IPv4Forwarding": 1, "Images": 16, "IndexServerAddress": "https://index.docker.io/v1/", "InitPath": "/usr/bin/docker", "InitSha1": "", "KernelVersion": "3.12.0-1-amd64", "Labels": [ "storage=ssd" ], "MemTotal": 2099236864, "MemoryLimit": 1, "NCPU": 1, "NEventsListener": 0, "NFd": 11, "NGoroutines": 21, "Name": "prod-server-42", "NoProxy": "9.81.1.160", "OperatingSystem": "Boot2Docker", "RegistryConfig": { "IndexConfigs": { "docker.io": { "Mirrors": null, "Name": "docker.io", "Official": true, "Secure": true } }, "InsecureRegistryCIDRs": [ "127.0.0.0/8" ] }, "SwapLimit": 0, "SystemTime": "2015-03-10T11:11:23.730591467-07:00" } Status Codes: - **200** – no error - **500** – server error ### Show the docker version information `GET /version` Show the docker version information **Example request**: GET /version HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Version": "1.5.0", "Os": "linux", "KernelVersion": "3.18.5-tinycore64", "GoVersion": "go1.4.1", "GitCommit": "a8a31ef", "Arch": "amd64", "ApiVersion": "1.18" } Status Codes: - **200** – no error - **500** – server error ### Ping the docker server `GET /_ping` Ping the docker server **Example request**: GET /_ping HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: text/plain OK Status Codes: - **200** - no error - **500** - server error ### Create a new image from a container's changes `POST /commit` Create a new image from a container's changes **Example request**: POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 Content-Type: application/json { "Hostname": "", "Domainname": "", "User": "", "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "PortSpecs": null, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": null, "Cmd": [ "date" ], "Volumes": { "/tmp": {} }, "WorkingDir": "", "NetworkDisabled": false, "ExposedPorts": { "22/tcp": {} } } **Example response**: HTTP/1.1 201 Created Content-Type: application/json {"Id": "596069db4bf5"} Json Parameters: - **config** - the container's configuration Query Parameters: - **container** – source container - **repo** – repository - **tag** – tag - **comment** – commit message - **author** – author (e.g., "John Hannibal Smith <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") Status Codes: - **201** – no error - **404** – no such container - **500** – server error ### Monitor Docker's events `GET /events` Get container events from docker, either in real time via streaming, or via polling (using since). Docker containers will report the following events: create, destroy, die, exec_create, exec_start, export, kill, oom, pause, restart, start, stop, unpause and Docker images will report: untag, delete **Example request**: GET /events?since=1374067924 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} Query Parameters: - **since** – timestamp used for polling - **until** – timestamp used for polling - **filters** – a json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: - event=<string> -- event to filter - image=<string> -- image to filter - container=<string> -- container to filter Status Codes: - **200** – no error - **500** – server error ### Get a tarball containing all images in a repository `GET /images/(name)/get` Get a tarball containing all images and metadata for the repository specified by `name`. If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the 'repositories' file in the tarball, as there were no image names referenced. See the [image tarball format](#image-tarball-format) for more details. **Example request** GET /images/ubuntu/get **Example response**: HTTP/1.1 200 OK Content-Type: application/x-tar Binary data stream Status Codes: - **200** – no error - **500** – server error ### Get a tarball containing all images. `GET /images/get` Get a tarball containing all images and metadata for one or more repositories. For each value of the `names` parameter: if it is a specific name and tag (e.g. ubuntu:latest), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. See the [image tarball format](#image-tarball-format) for more details. **Example request** GET /images/get?names=myname%2Fmyapp%3Alatest&names=busybox **Example response**: HTTP/1.1 200 OK Content-Type: application/x-tar Binary data stream Status Codes: - **200** – no error - **500** – server error ### Load a tarball with a set of images and tags into docker `POST /images/load` Load a set of images and tags into the docker repository. See the [image tarball format](#image-tarball-format) for more details. **Example request** POST /images/load Tarball in body **Example response**: HTTP/1.1 200 OK Status Codes: - **200** – no error - **500** – server error ### Image tarball format An image tarball contains one directory per image layer (named using its long ID), each containing three files: 1. `VERSION`: currently `1.0` - the file format version 2. `json`: detailed layer information, similar to `docker inspect layer_id` 3. `layer.tar`: A tarfile containing the filesystem changes in this layer The `layer.tar` file will contain `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. If the tarball defines a repository, there will also be a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. ``` {"hello-world": {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} } ``` ### Exec Create `POST /containers/(id)/exec` Sets up an exec instance in a running container `id` **Example request**: POST /containers/e90e34656806/exec HTTP/1.1 Content-Type: application/json { "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "Tty": false, "Cmd": [ "date" ], } **Example response**: HTTP/1.1 201 OK Content-Type: application/json { "Id": "f90e34656806", "Warnings":[] } Json Parameters: - **AttachStdin** - Boolean value, attaches to stdin of the exec command. - **AttachStdout** - Boolean value, attaches to stdout of the exec command. - **AttachStderr** - Boolean value, attaches to stderr of the exec command. - **Tty** - Boolean value to allocate a pseudo-TTY - **Cmd** - Command to run specified as a string or an array of strings. Status Codes: - **201** – no error - **404** – no such container ### Exec Start `POST /exec/(id)/start` Starts a previously set up exec instance `id`. If `detach` is true, this API returns after starting the `exec` command. Otherwise, this API sets up an interactive session with the `exec` command. **Example request**: POST /exec/e90e34656806/start HTTP/1.1 Content-Type: application/json { "Detach": false, "Tty": false, } **Example response**: HTTP/1.1 201 OK Content-Type: application/json {{ STREAM }} Json Parameters: - **Detach** - Detach from the exec command - **Tty** - Boolean value to allocate a pseudo-TTY Status Codes: - **200** – no error - **404** – no such exec instance **Stream details**: Similar to the stream behavior of `POST /container/(id)/attach` API ### Exec Resize `POST /exec/(id)/resize` Resizes the tty session used by the exec command `id`. This API is valid only if `tty` was specified as part of creating and starting the exec command. **Example request**: POST /exec/e90e34656806/resize HTTP/1.1 Content-Type: text/plain **Example response**: HTTP/1.1 201 OK Content-Type: text/plain Query Parameters: - **h** – height of tty session - **w** – width Status Codes: - **201** – no error - **404** – no such exec instance ### Exec Inspect `GET /exec/(id)/json` Return low-level information about the exec command `id`. **Example request**: GET /exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: plain/text { "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", "Running" : false, "ExitCode" : 2, "ProcessConfig" : { "privileged" : false, "user" : "", "tty" : false, "entrypoint" : "sh", "arguments" : [ "-c", "exit 2" ] }, "OpenStdin" : false, "OpenStderr" : false, "OpenStdout" : false, "Container" : { "State" : { "Running" : true, "Paused" : false, "Restarting" : false, "OOMKilled" : false, "Pid" : 3650, "ExitCode" : 0, "Error" : "", "StartedAt" : "2014-11-17T22:26:03.717657531Z", "FinishedAt" : "0001-01-01T00:00:00Z" }, "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", "Created" : "2014-11-17T22:26:03.626304998Z", "Path" : "date", "Args" : [], "Config" : { "Hostname" : "8f177a186b97", "Domainname" : "", "User" : "", "AttachStdin" : false, "AttachStdout" : false, "AttachStderr" : false, "PortSpecs" : null, "ExposedPorts" : null, "Tty" : false, "OpenStdin" : false, "StdinOnce" : false, "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd" : [ "date" ], "Image" : "ubuntu", "Volumes" : null, "WorkingDir" : "", "Entrypoint" : null, "NetworkDisabled" : false, "MacAddress" : "", "OnBuild" : null, "SecurityOpt" : null }, "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", "NetworkSettings" : { "IPAddress" : "172.17.0.2", "IPPrefixLen" : 16, "MacAddress" : "02:42:ac:11:00:02", "Gateway" : "172.17.42.1", "Bridge" : "docker0", "PortMapping" : null, "Ports" : {} }, "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", "Name" : "/test", "Driver" : "aufs", "ExecDriver" : "native-0.2", "MountLabel" : "", "ProcessLabel" : "", "AppArmorProfile" : "", "RestartCount" : 0, "Volumes" : {}, "VolumesRW" : {} } } Status Codes: - **200** – no error - **404** – no such exec instance - **500** - server error # 3. Going further ## 3.1 Inside `docker run` As an example, the `docker run` command line makes the following API calls: - Create the container - If the status code is 404, it means the image doesn't exist: - Try to pull it - Then retry to create the container - Start the container - If you are not in detached mode: - Attach to the container, using logs=1 (to have stdout and stderr from the container's start) and stream=1 - If in detached mode or only stdin is attached: - Display the container's id ## 3.2 Hijacking In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. To hint potential proxies about connection hijacking, Docker client sends connection upgrade headers similarly to websocket. Upgrade: tcp Connection: Upgrade When Docker daemon detects the `Upgrade` header, it will switch its status code from **200 OK** to **101 UPGRADED** and resend the same headers. This might change in the future. ## 3.3 CORS Requests To set cross origin requests to the remote api please give values to "--api-cors-header" when running docker in daemon mode. Set * will allow all, default or blank means CORS disabled $ docker -d -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" docker-1.10.3/docs/reference/api/docker_remote_api_v1.19.md000066400000000000000000001655631267010174400233570ustar00rootroot00000000000000 # Docker Remote API v1.19 ## 1. Brief introduction - The Remote API has replaced `rcli`. - The daemon listens on `unix:///var/run/docker.sock` but you can [Bind Docker to another host/port or a Unix socket](../../quickstart.md#bind-docker-to-another-host-port-or-a-unix-socket). - The API tends to be REST. However, for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `stdout`, `stdin` and `stderr`. - When the client API version is newer than the daemon's, these calls return an HTTP `400 Bad Request` error message. # 2. Endpoints ## 2.1 Containers ### List containers `GET /containers/json` List containers **Example request**: GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "Id": "8dfafdbc3a40", "Names":["/boring_feynman"], "Image": "ubuntu:latest", "Command": "echo 1", "Created": 1367854155, "Status": "Exit 0", "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], "Labels": { "com.example.vendor": "Acme", "com.example.license": "GPL", "com.example.version": "1.0" }, "SizeRw": 12288, "SizeRootFs": 0 }, { "Id": "9cd87474be90", "Names":["/coolName"], "Image": "ubuntu:latest", "Command": "echo 222222", "Created": 1367854155, "Status": "Exit 0", "Ports": [], "Labels": {}, "SizeRw": 12288, "SizeRootFs": 0 }, { "Id": "3176a2479c92", "Names":["/sleepy_dog"], "Image": "ubuntu:latest", "Command": "echo 3333333333333333", "Created": 1367854154, "Status": "Exit 0", "Ports":[], "Labels": {}, "SizeRw":12288, "SizeRootFs":0 }, { "Id": "4cb07b47f9fb", "Names":["/running_cat"], "Image": "ubuntu:latest", "Command": "echo 444444444444444444444444444444444", "Created": 1367854152, "Status": "Exit 0", "Ports": [], "Labels": {}, "SizeRw": 12288, "SizeRootFs": 0 } ] Query Parameters: - **all** – 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default (i.e., this defaults to false) - **limit** – Show `limit` last created containers, include non-running ones. - **since** – Show only containers created since Id, include non-running ones. - **before** – Show only containers created before Id, include non-running ones. - **size** – 1/True/true or 0/False/false, Show the containers sizes - **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: - `exited=`; -- containers with exit code of `` ; - `status=`(`restarting`|`running`|`paused`|`exited`) - `label=key` or `label="key=value"` of a container label Status Codes: - **200** – no error - **400** – bad parameter - **500** – server error ### Create a container `POST /containers/create` Create a container **Example request**: POST /containers/create HTTP/1.1 Content-Type: application/json { "Hostname": "", "Domainname": "", "User": "", "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": [ "FOO=bar", "BAZ=quux" ], "Cmd": [ "date" ], "Entrypoint": "", "Image": "ubuntu", "Labels": { "com.example.vendor": "Acme", "com.example.license": "GPL", "com.example.version": "1.0" }, "Volumes": { "/tmp": {} }, "WorkingDir": "", "NetworkDisabled": false, "MacAddress": "12:34:56:78:9a:bc", "ExposedPorts": { "22/tcp": {} }, "HostConfig": { "Binds": ["/tmp:/tmp"], "Links": ["redis3:redis"], "LxcConf": {"lxc.utsname":"docker"}, "Memory": 0, "MemorySwap": 0, "CpuShares": 512, "CpuPeriod": 100000, "CpuQuota": 50000, "CpusetCpus": "0,1", "CpusetMems": "0,1", "BlkioWeight": 300, "OomKillDisable": false, "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, "PublishAllPorts": false, "Privileged": false, "ReadonlyRootfs": false, "Dns": ["8.8.8.8"], "DnsSearch": [""], "ExtraHosts": null, "VolumesFrom": ["parent", "other:ro"], "CapAdd": ["NET_ADMIN"], "CapDrop": ["MKNOD"], "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, "NetworkMode": "bridge", "Devices": [], "Ulimits": [{}], "LogConfig": { "Type": "json-file", "Config": {} }, "SecurityOpt": [""], "CgroupParent": "" } } **Example response**: HTTP/1.1 201 Created Content-Type: application/json { "Id":"e90e34656806", "Warnings":[] } Json Parameters: - **Hostname** - A string value containing the hostname to use for the container. - **Domainname** - A string value containing the domain name to use for the container. - **User** - A string value specifying the user inside the container. - **Memory** - Memory limit in bytes. - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. You must use this with `memory` and make the swap value larger than `memory`. - **CpuShares** - An integer value containing the container's CPU Shares (ie. the relative weight vs other containers). - **CpuPeriod** - The length of a CPU period in microseconds. - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. - **Cpuset** - Deprecated please don't use. Use `CpusetCpus` instead. - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. - **AttachStdin** - Boolean value, attaches to `stdin`. - **AttachStdout** - Boolean value, attaches to `stdout`. - **AttachStderr** - Boolean value, attaches to `stderr`. - **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. - **OpenStdin** - Boolean value, opens stdin, - **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. - **Env** - A list of environment variables in the form of `["VAR=value"[,"VAR2=value2"]]` - **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value"[,"key2":"value2"]}` - **Cmd** - Command to run specified as a string or an array of strings. - **Entrypoint** - Set the entry point for the container as a string or an array of strings. - **Image** - A string specifying the image name to use for the container. - **Volumes** – An object mapping mount point paths (strings) inside the container to empty objects. - **WorkingDir** - A string specifying the working directory for commands to run in. - **NetworkDisabled** - Boolean value, when true disables networking for the container - **ExposedPorts** - An object mapping ports to an empty object in the form of: `"ExposedPorts": { "/: {}" }` - **HostConfig** - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + `container_path` to create a new volume for the container + `host_path:container_path` to bind-mount a host path into the container + `host_path:container_path:ro` to make the bind-mount read-only inside the container. - **Links** - A list of links for the container. Each link entry should be in the form of `container_name:alias`. - **LxcConf** - LXC specific configurations. These configurations only work when using the `lxc` execution driver. - **PortBindings** - A map of exposed container ports and the host port they should map to. A JSON object in the form `{ /: [{ "HostPort": "" }] }` Take note that `port` is specified as a string and not an integer value. - **PublishAllPorts** - Allocates a random host port for all of a container's exposed ports. Specified as a boolean value. - **Privileged** - Gives the container full access to the host. Specified as a boolean value. - **ReadonlyRootfs** - Mount the container's root filesystem as read only. Specified as a boolean value. - **Dns** - A list of DNS servers for the container to use. - **DnsSearch** - A list of DNS search domains - **ExtraHosts** - A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. - **VolumesFrom** - A list of volumes to inherit from another container. Specified in the form `[:]` - **CapAdd** - A list of kernel capabilities to add to the container. - **Capdrop** - A list of kernel capabilities to drop from the container. - **RestartPolicy** – The behavior to apply when the container exits. The value is an object with a `Name` property of either `"always"` to always restart or `"on-failure"` to restart only when the container exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` controls the number of times to retry before giving up. The default is not to restart. (optional) An ever increasing delay (double the previous delay, starting at 100mS) is added before each restart to prevent flooding the server. - **NetworkMode** - Sets the networking mode for the container. Supported values are: `bridge`, `host`, and `container:` - **Devices** - A list of devices to add to the container specified as a JSON object in the form `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` - **Ulimits** - A list of ulimits to set in the container, specified as `{ "Name": , "Soft": , "Hard": }`, for example: `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` - **SecurityOpt**: A list of string values to customize labels for MLS systems, such as SELinux. - **LogConfig** - Log configuration for the container, specified as a JSON object in the form `{ "Type": "", "Config": {"key1": "val1"}}`. Available types: `json-file`, `syslog`, `journald`, `none`. `syslog` available options are: `address`. - **CgroupParent** - Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. Query Parameters: - **name** – Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`. Status Codes: - **201** – no error - **404** – no such container - **406** – impossible to attach (container not running) - **500** – server error ### Inspect a container `GET /containers/(id)/json` Return low-level information on the container `id` **Example request**: GET /containers/4fa6e0f0c678/json HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "AppArmorProfile": "", "Args": [ "-c", "exit 9" ], "Config": { "AttachStderr": true, "AttachStdin": false, "AttachStdout": true, "Cmd": [ "/bin/sh", "-c", "exit 9" ], "Domainname": "", "Entrypoint": null, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "ExposedPorts": null, "Hostname": "ba033ac44011", "Image": "ubuntu", "Labels": { "com.example.vendor": "Acme", "com.example.license": "GPL", "com.example.version": "1.0" }, "MacAddress": "", "NetworkDisabled": false, "OnBuild": null, "OpenStdin": false, "PortSpecs": null, "StdinOnce": false, "Tty": false, "User": "", "Volumes": null, "WorkingDir": "" }, "Created": "2015-01-06T15:47:31.485331387Z", "Driver": "devicemapper", "ExecDriver": "native-0.2", "ExecIDs": null, "HostConfig": { "Binds": null, "BlkioWeight": 0, "CapAdd": null, "CapDrop": null, "ContainerIDFile": "", "CpusetCpus": "", "CpusetMems": "", "CpuShares": 0, "CpuPeriod": 100000, "Devices": [], "Dns": null, "DnsSearch": null, "ExtraHosts": null, "IpcMode": "", "Links": null, "LxcConf": [], "Memory": 0, "MemorySwap": 0, "OomKillDisable": false, "NetworkMode": "bridge", "PortBindings": {}, "Privileged": false, "ReadonlyRootfs": false, "PublishAllPorts": false, "RestartPolicy": { "MaximumRetryCount": 2, "Name": "on-failure" }, "LogConfig": { "Config": null, "Type": "json-file" }, "SecurityOpt": null, "VolumesFrom": null, "Ulimits": [{}] }, "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", "MountLabel": "", "Name": "/boring_euclid", "NetworkSettings": { "Bridge": "", "Gateway": "", "IPAddress": "", "IPPrefixLen": 0, "MacAddress": "", "PortMapping": null, "Ports": null }, "Path": "/bin/sh", "ProcessLabel": "", "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", "RestartCount": 1, "State": { "Error": "", "ExitCode": 9, "FinishedAt": "2015-01-06T15:47:32.080254511Z", "OOMKilled": false, "Paused": false, "Pid": 0, "Restarting": false, "Running": false, "StartedAt": "2015-01-06T15:47:32.072697474Z" }, "Volumes": {}, "VolumesRW": {} } Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### List processes running inside a container `GET /containers/(id)/top` List processes running inside the container `id`. On Unix systems this is done by running the `ps` command. This endpoint is not supported on Windows. **Example request**: GET /containers/4fa6e0f0c678/top HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Titles" : [ "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" ], "Processes" : [ [ "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" ], [ "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" ] ] } **Example request**: GET /containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Titles" : [ "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" ] "Processes" : [ [ "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" ], [ "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" ] ], } Query Parameters: - **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Get container logs `GET /containers/(id)/logs` Get `stdout` and `stderr` logs from the container ``id`` > **Note**: > This endpoint works only for containers with the `json-file` or `journald` logging drivers. **Example request**: GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 **Example response**: HTTP/1.1 101 UPGRADED Content-Type: application/vnd.docker.raw-stream Connection: Upgrade Upgrade: tcp {{ STREAM }} Query Parameters: - **follow** – 1/True/true or 0/False/false, return stream. Default `false`. - **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. - **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. - **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp will only output log-entries since that timestamp. Default: 0 (unfiltered) - **timestamps** – 1/True/true or 0/False/false, print timestamps for every log line. Default `false`. - **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. Status Codes: - **101** – no error, hints proxy about hijacking - **200** – no error, no upgrade header found - **404** – no such container - **500** – server error ### Inspect changes on a container's filesystem `GET /containers/(id)/changes` Inspect changes on container `id`'s filesystem **Example request**: GET /containers/4fa6e0f0c678/changes HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "Path": "/dev", "Kind": 0 }, { "Path": "/dev/kmsg", "Kind": 1 }, { "Path": "/test", "Kind": 1 } ] Values for `Kind`: - `0`: Modify - `1`: Add - `2`: Delete Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Export a container `GET /containers/(id)/export` Export the contents of container `id` **Example request**: GET /containers/4fa6e0f0c678/export HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/octet-stream {{ TAR STREAM }} Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Get container stats based on resource usage `GET /containers/(id)/stats` This endpoint returns a live stream of a container's resource usage statistics. **Example request**: GET /containers/redis1/stats HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "read" : "2015-01-08T22:57:31.547920715Z", "network" : { "rx_dropped" : 0, "rx_bytes" : 648, "rx_errors" : 0, "tx_packets" : 8, "tx_dropped" : 0, "rx_packets" : 8, "tx_errors" : 0, "tx_bytes" : 648 }, "memory_stats" : { "stats" : { "total_pgmajfault" : 0, "cache" : 0, "mapped_file" : 0, "total_inactive_file" : 0, "pgpgout" : 414, "rss" : 6537216, "total_mapped_file" : 0, "writeback" : 0, "unevictable" : 0, "pgpgin" : 477, "total_unevictable" : 0, "pgmajfault" : 0, "total_rss" : 6537216, "total_rss_huge" : 6291456, "total_writeback" : 0, "total_inactive_anon" : 0, "rss_huge" : 6291456, "hierarchical_memory_limit" : 67108864, "total_pgfault" : 964, "total_active_file" : 0, "active_anon" : 6537216, "total_active_anon" : 6537216, "total_pgpgout" : 414, "total_cache" : 0, "inactive_anon" : 0, "active_file" : 0, "pgfault" : 964, "inactive_file" : 0, "total_pgpgin" : 477 }, "max_usage" : 6651904, "usage" : 6537216, "failcnt" : 0, "limit" : 67108864 }, "blkio_stats" : {}, "cpu_stats" : { "cpu_usage" : { "percpu_usage" : [ 16970827, 1839451, 7107380, 10571290 ], "usage_in_usermode" : 10000000, "total_usage" : 36488948, "usage_in_kernelmode" : 20000000 }, "system_cpu_usage" : 20091722000000000, "throttling_data" : {} } } Query Parameters: - **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Resize a container TTY `POST /containers/(id)/resize?h=&w=` Resize the TTY for container with `id`. You must restart the container for the resize to take effect. **Example request**: POST /containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Length: 0 Content-Type: text/plain; charset=utf-8 Status Codes: - **200** – no error - **404** – No such container - **500** – Cannot resize container ### Start a container `POST /containers/(id)/start` Start the container `id` > **Note**: > For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. > See [create a container](#create-a-container) for details. **Example request**: POST /containers/(id)/start HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Status Codes: - **204** – no error - **304** – container already started - **404** – no such container - **500** – server error ### Stop a container `POST /containers/(id)/stop` Stop the container `id` **Example request**: POST /containers/e90e34656806/stop?t=5 HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **t** – number of seconds to wait before killing the container Status Codes: - **204** – no error - **304** – container already stopped - **404** – no such container - **500** – server error ### Restart a container `POST /containers/(id)/restart` Restart the container `id` **Example request**: POST /containers/e90e34656806/restart?t=5 HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **t** – number of seconds to wait before killing the container Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Kill a container `POST /containers/(id)/kill` Kill the container `id` **Example request**: POST /containers/e90e34656806/kill HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters - **signal** - Signal to send to the container: integer or string like `SIGINT`. When not set, `SIGKILL` is assumed and the call waits for the container to exit. Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Rename a container `POST /containers/(id)/rename` Rename the container `id` to a `new_name` **Example request**: POST /containers/e90e34656806/rename?name=new_name HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **name** – new name for the container Status Codes: - **204** – no error - **404** – no such container - **409** - conflict name already assigned - **500** – server error ### Pause a container `POST /containers/(id)/pause` Pause the container `id` **Example request**: POST /containers/e90e34656806/pause HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Unpause a container `POST /containers/(id)/unpause` Unpause the container `id` **Example request**: POST /containers/e90e34656806/unpause HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Attach to a container `POST /containers/(id)/attach` Attach to the container `id` **Example request**: POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 **Example response**: HTTP/1.1 101 UPGRADED Content-Type: application/vnd.docker.raw-stream Connection: Upgrade Upgrade: tcp {{ STREAM }} Query Parameters: - **logs** – 1/True/true or 0/False/false, return logs. Default `false`. - **stream** – 1/True/true or 0/False/false, return stream. Default `false`. - **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach to `stdin`. Default `false`. - **stdout** – 1/True/true or 0/False/false, if `logs=true`, return `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. - **stderr** – 1/True/true or 0/False/false, if `logs=true`, return `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. Status Codes: - **101** – no error, hints proxy about hijacking - **200** – no error, no upgrade header found - **400** – bad parameter - **404** – no such container - **500** – server error **Stream details**: When using the TTY setting is enabled in [`POST /containers/create` ](#create-a-container), the stream is the raw data from the process PTY and client's `stdin`. When the TTY is disabled, then the stream is multiplexed to separate `stdout` and `stderr`. The format is a **Header** and a **Payload** (frame). **HEADER** The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`). It is encoded on the first eight bytes like this: header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} `STREAM_TYPE` can be: - 0: `stdin` (is written on `stdout`) - 1: `stdout` - 2: `stderr` `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian. **PAYLOAD** The payload is the raw stream. **IMPLEMENTATION** The simplest way to implement the Attach protocol is the following: 1. Read eight bytes. 2. Choose `stdout` or `stderr` depending on the first byte. 3. Extract the frame size from the last four bytes. 4. Read the extracted size and output it on the correct output. 5. Goto 1. ### Attach to a container (websocket) `GET /containers/(id)/attach/ws` Attach to the container `id` via websocket Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) **Example request** GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 **Example response** {{ STREAM }} Query Parameters: - **logs** – 1/True/true or 0/False/false, return logs. Default `false`. - **stream** – 1/True/true or 0/False/false, return stream. Default `false`. - **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach to `stdin`. Default `false`. - **stdout** – 1/True/true or 0/False/false, if `logs=true`, return `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. - **stderr** – 1/True/true or 0/False/false, if `logs=true`, return `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. Status Codes: - **200** – no error - **400** – bad parameter - **404** – no such container - **500** – server error ### Wait a container `POST /containers/(id)/wait` Block until container `id` stops, then returns the exit code **Example request**: POST /containers/16253994b7c4/wait HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"StatusCode": 0} Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Remove a container `DELETE /containers/(id)` Remove the container `id` from the filesystem **Example request**: DELETE /containers/16253994b7c4?v=1 HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **v** – 1/True/true or 0/False/false, Remove the volumes associated to the container. Default `false`. - **force** - 1/True/true or 0/False/false, Kill then remove the container. Default `false`. Status Codes: - **204** – no error - **400** – bad parameter - **404** – no such container - **500** – server error ### Copy files or folders from a container `POST /containers/(id)/copy` Copy files or folders of container `id` **Example request**: POST /containers/4fa6e0f0c678/copy HTTP/1.1 Content-Type: application/json { "Resource": "test.txt" } **Example response**: HTTP/1.1 200 OK Content-Type: application/x-tar {{ TAR STREAM }} Status Codes: - **200** – no error - **404** – no such container - **500** – server error ## 2.2 Images ### List Images `GET /images/json` **Example request**: GET /images/json?all=0 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "RepoTags": [ "ubuntu:12.04", "ubuntu:precise", "ubuntu:latest" ], "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", "Created": 1365714795, "Size": 131506275, "VirtualSize": 131506275, "Labels": {} }, { "RepoTags": [ "ubuntu:12.10", "ubuntu:quantal" ], "ParentId": "27cf784147099545", "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", "Created": 1364102658, "Size": 24653, "VirtualSize": 180116135, "Labels": { "com.example.version": "v1" } } ] **Example request, with digest information**: GET /images/json?digests=1 HTTP/1.1 **Example response, with digest information**: HTTP/1.1 200 OK Content-Type: application/json [ { "Created": 1420064636, "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", "RepoDigests": [ "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" ], "RepoTags": [ "localhost:5000/test/busybox:latest", "playdate:latest" ], "Size": 0, "VirtualSize": 2429728, "Labels": {} } ] The response shows a single image `Id` associated with two repositories (`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use either of the `RepoTags` values `localhost:5000/test/busybox:latest` or `playdate:latest` to reference the image. You can also use `RepoDigests` values to reference an image. In this response, the array has only one reference and that is to the `localhost:5000/test/busybox` repository; the `playdate` repository has no digest. You can reference this digest using the value: `localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` See the `docker run` and `docker build` commands for examples of digest and tag references on the command line. Query Parameters: - **all** – 1/True/true or 0/False/false, default false - **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: - `dangling=true` - `label=key` or `label="key=value"` of an image label - **filter** - only return images with the specified name ### Build image from a Dockerfile `POST /build` Build an image from a Dockerfile **Example request**: POST /build HTTP/1.1 {{ TAR STREAM }} **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"stream": "Step 1..."} {"stream": "..."} {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} The input stream must be a `tar` archive compressed with one of the following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. The archive must include a build instructions file, typically called `Dockerfile` at the archive's root. The `dockerfile` parameter may be used to specify a different build instructions file. To do this, its value must be the path to the alternate build instructions file to use. The archive may include any number of other files, which are accessible in the build context (See the [*ADD build command*](../../reference/builder.md#dockerbuilder)). The build is canceled if the client drops the connection by quitting or being killed. Query Parameters: - **dockerfile** - Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`. - **t** – Repository name (and optionally a tag) to be applied to the resulting image in case of success. - **remote** – A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file's contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball. - **q** – Suppress verbose build output. - **nocache** – Do not use the cache when building the image. - **pull** - Attempt to pull the image even if an older image exists locally. - **rm** - Remove intermediate containers after a successful build (default behavior). - **forcerm** - Always remove intermediate containers (includes `rm`). - **memory** - Set memory limit for build. - **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. - **cpushares** - CPU shares (relative weight). - **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). - **cpuperiod** - The length of a CPU period in microseconds. - **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. Request Headers: - **Content-type** – Set to `"application/tar"`. - **X-Registry-Config** – base64-encoded ConfigFile object Status Codes: - **200** – no error - **500** – server error ### Create an image `POST /images/create` Create an image either by pulling it from the registry or by importing it **Example request**: POST /images/create?fromImage=ubuntu HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"status": "Pulling..."} {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} {"error": "Invalid..."} ... When using this endpoint to pull an image from the registry, the `X-Registry-Auth` header can be used to include a base64-encoded AuthConfig object. Query Parameters: - **fromImage** – Name of the image to pull. - **fromSrc** – Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. - **repo** – Repository name. - **tag** – Tag. - **registry** – The registry to pull from. Request Headers: - **X-Registry-Auth** – base64-encoded AuthConfig object Status Codes: - **200** – no error - **500** – server error ### Inspect an image `GET /images/(name)/json` Return low-level information on the image `name` **Example request**: GET /images/ubuntu/json HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Created": "2013-03-23T22:24:18.818426-07:00", "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", "ContainerConfig": { "Hostname": "", "User": "", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "PortSpecs": null, "Tty": true, "OpenStdin": true, "StdinOnce": false, "Env": null, "Cmd": ["/bin/bash"], "Dns": null, "Image": "ubuntu", "Labels": { "com.example.vendor": "Acme", "com.example.license": "GPL", "com.example.version": "1.0" }, "Volumes": null, "VolumesFrom": "", "WorkingDir": "" }, "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", "Parent": "27cf784147099545", "Size": 6824592 } Status Codes: - **200** – no error - **404** – no such image - **500** – server error ### Get the history of an image `GET /images/(name)/history` Return the history of the image `name` **Example request**: GET /images/ubuntu/history HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", "Created": 1398108230, "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", "Tags": [ "ubuntu:lucid", "ubuntu:10.04" ], "Size": 182964289, "Comment": "" }, { "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", "Created": 1398108222, "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", "Tags": null, "Size": 0, "Comment": "" }, { "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", "Created": 1371157430, "CreatedBy": "", "Tags": [ "scratch12:latest", "scratch:latest" ], "Size": 0, "Comment": "Imported from -" } ] Status Codes: - **200** – no error - **404** – no such image - **500** – server error ### Push an image on the registry `POST /images/(name)/push` Push the image `name` on the registry **Example request**: POST /images/test/push HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"status": "Pushing..."} {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} {"error": "Invalid..."} ... If you wish to push an image on to a private registry, that image must already have a tag into a repository which references that registry `hostname` and `port`. This repository name should then be used in the URL. This duplicates the command line's flow. **Example request**: POST /images/registry.acme.com:5000/test/push HTTP/1.1 Query Parameters: - **tag** – The tag to associate with the image on the registry. This is optional. Request Headers: - **X-Registry-Auth** – Include a base64-encoded AuthConfig. object. Status Codes: - **200** – no error - **404** – no such image - **500** – server error ### Tag an image into a repository `POST /images/(name)/tag` Tag the image `name` into a repository **Example request**: POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 **Example response**: HTTP/1.1 201 OK Query Parameters: - **repo** – The repository to tag in - **force** – 1/True/true or 0/False/false, default false - **tag** - The new tag name Status Codes: - **201** – no error - **400** – bad parameter - **404** – no such image - **409** – conflict - **500** – server error ### Remove an image `DELETE /images/(name)` Remove the image `name` from the filesystem **Example request**: DELETE /images/test HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-type: application/json [ {"Untagged": "3e2f21a89f"}, {"Deleted": "3e2f21a89f"}, {"Deleted": "53b4f83ac9"} ] Query Parameters: - **force** – 1/True/true or 0/False/false, default false - **noprune** – 1/True/true or 0/False/false, default false Status Codes: - **200** – no error - **404** – no such image - **409** – conflict - **500** – server error ### Search images `GET /images/search` Search for an image on [Docker Hub](https://hub.docker.com). This API returns both `is_trusted` and `is_automated` images. Currently, they are considered identical. In the future, the `is_trusted` property will be deprecated and replaced by the `is_automated` property. > **Note**: > The response keys have changed from API v1.6 to reflect the JSON > sent by the registry server to the docker daemon's request. **Example request**: GET /images/search?term=sshd HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "star_count": 12, "is_official": false, "name": "wma55/u1210sshd", "is_trusted": false, "is_automated": false, "description": "", }, { "star_count": 10, "is_official": false, "name": "jdswinbank/sshd", "is_trusted": false, "is_automated": false, "description": "", }, { "star_count": 18, "is_official": false, "name": "vgauthier/sshd", "is_trusted": false, "is_automated": false, "description": "", } ... ] Query Parameters: - **term** – term to search Status Codes: - **200** – no error - **500** – server error ## 2.3 Misc ### Check auth configuration `POST /auth` Get the default username and email **Example request**: POST /auth HTTP/1.1 Content-Type: application/json { "username":" hannibal", "password: "xxxx", "email": "hannibal@a-team.com", "serveraddress": "https://index.docker.io/v1/" } **Example response**: HTTP/1.1 200 OK Status Codes: - **200** – no error - **204** – no error - **500** – server error ### Display system-wide information `GET /info` Display system-wide information **Example request**: GET /info HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Containers": 11, "CpuCfsPeriod": true, "CpuCfsQuota": true, "Debug": false, "DockerRootDir": "/var/lib/docker", "Driver": "btrfs", "DriverStatus": [[""]], "ExecutionDriver": "native-0.1", "ExperimentalBuild": false, "HttpProxy": "http://test:test@localhost:8080", "HttpsProxy": "https://test:test@localhost:8080", "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", "IPv4Forwarding": true, "Images": 16, "IndexServerAddress": "https://index.docker.io/v1/", "InitPath": "/usr/bin/docker", "InitSha1": "", "KernelVersion": "3.12.0-1-amd64", "Labels": [ "storage=ssd" ], "MemTotal": 2099236864, "MemoryLimit": true, "NCPU": 1, "NEventsListener": 0, "NFd": 11, "NGoroutines": 21, "Name": "prod-server-42", "NoProxy": "9.81.1.160", "OomKillDisable": true, "OperatingSystem": "Boot2Docker", "RegistryConfig": { "IndexConfigs": { "docker.io": { "Mirrors": null, "Name": "docker.io", "Official": true, "Secure": true } }, "InsecureRegistryCIDRs": [ "127.0.0.0/8" ] }, "SwapLimit": false, "SystemTime": "2015-03-10T11:11:23.730591467-07:00" } Status Codes: - **200** – no error - **500** – server error ### Show the docker version information `GET /version` Show the docker version information **Example request**: GET /version HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Version": "1.5.0", "Os": "linux", "KernelVersion": "3.18.5-tinycore64", "GoVersion": "go1.4.1", "GitCommit": "a8a31ef", "Arch": "amd64", "ApiVersion": "1.19" } Status Codes: - **200** – no error - **500** – server error ### Ping the docker server `GET /_ping` Ping the docker server **Example request**: GET /_ping HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: text/plain OK Status Codes: - **200** - no error - **500** - server error ### Create a new image from a container's changes `POST /commit` Create a new image from a container's changes **Example request**: POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 Content-Type: application/json { "Hostname": "", "Domainname": "", "User": "", "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "PortSpecs": null, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": null, "Cmd": [ "date" ], "Volumes": { "/tmp": {} }, "Labels": { "key1": "value1", "key2": "value2" }, "WorkingDir": "", "NetworkDisabled": false, "ExposedPorts": { "22/tcp": {} } } **Example response**: HTTP/1.1 201 Created Content-Type: application/json {"Id": "596069db4bf5"} Json Parameters: - **config** - the container's configuration Query Parameters: - **container** – source container - **repo** – repository - **tag** – tag - **comment** – commit message - **author** – author (e.g., "John Hannibal Smith <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") Status Codes: - **201** – no error - **404** – no such container - **500** – server error ### Monitor Docker's events `GET /events` Get container events from docker, either in real time via streaming, or via polling (using since). Docker containers report the following events: attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause and Docker images report: untag, delete **Example request**: GET /events?since=1374067924 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} Query Parameters: - **since** – Timestamp used for polling - **until** – Timestamp used for polling - **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: - `event=`; -- event to filter - `image=`; -- image to filter - `container=`; -- container to filter Status Codes: - **200** – no error - **500** – server error ### Get a tarball containing all images in a repository `GET /images/(name)/get` Get a tarball containing all images and metadata for the repository specified by `name`. If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the 'repositories' file in the tarball, as there were no image names referenced. See the [image tarball format](#image-tarball-format) for more details. **Example request** GET /images/ubuntu/get **Example response**: HTTP/1.1 200 OK Content-Type: application/x-tar Binary data stream Status Codes: - **200** – no error - **500** – server error ### Get a tarball containing all images. `GET /images/get` Get a tarball containing all images and metadata for one or more repositories. For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. See the [image tarball format](#image-tarball-format) for more details. **Example request** GET /images/get?names=myname%2Fmyapp%3Alatest&names=busybox **Example response**: HTTP/1.1 200 OK Content-Type: application/x-tar Binary data stream Status Codes: - **200** – no error - **500** – server error ### Load a tarball with a set of images and tags into docker `POST /images/load` Load a set of images and tags into a Docker repository. See the [image tarball format](#image-tarball-format) for more details. **Example request** POST /images/load Tarball in body **Example response**: HTTP/1.1 200 OK Status Codes: - **200** – no error - **500** – server error ### Image tarball format An image tarball contains one directory per image layer (named using its long ID), each containing these files: - `VERSION`: currently `1.0` - the file format version - `json`: detailed layer information, similar to `docker inspect layer_id` - `layer.tar`: A tarfile containing the filesystem changes in this layer The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. ``` {"hello-world": {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} } ``` ### Exec Create `POST /containers/(id)/exec` Sets up an exec instance in a running container `id` **Example request**: POST /containers/e90e34656806/exec HTTP/1.1 Content-Type: application/json { "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "Tty": false, "Cmd": [ "date" ], } **Example response**: HTTP/1.1 201 OK Content-Type: application/json { "Id": "f90e34656806", "Warnings":[] } Json Parameters: - **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. - **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. - **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. - **Tty** - Boolean value to allocate a pseudo-TTY. - **Cmd** - Command to run specified as a string or an array of strings. Status Codes: - **201** – no error - **404** – no such container ### Exec Start `POST /exec/(id)/start` Starts a previously set up `exec` instance `id`. If `detach` is true, this API returns after starting the `exec` command. Otherwise, this API sets up an interactive session with the `exec` command. **Example request**: POST /exec/e90e34656806/start HTTP/1.1 Content-Type: application/json { "Detach": false, "Tty": false, } **Example response**: HTTP/1.1 201 OK Content-Type: application/json {{ STREAM }} Json Parameters: - **Detach** - Detach from the `exec` command. - **Tty** - Boolean value to allocate a pseudo-TTY. Status Codes: - **200** – no error - **404** – no such exec instance **Stream details**: Similar to the stream behavior of `POST /container/(id)/attach` API ### Exec Resize `POST /exec/(id)/resize` Resizes the `tty` session used by the `exec` command `id`. This API is valid only if `tty` was specified as part of creating and starting the `exec` command. **Example request**: POST /exec/e90e34656806/resize HTTP/1.1 Content-Type: text/plain **Example response**: HTTP/1.1 201 OK Content-Type: text/plain Query Parameters: - **h** – height of `tty` session - **w** – width Status Codes: - **201** – no error - **404** – no such exec instance ### Exec Inspect `GET /exec/(id)/json` Return low-level information about the `exec` command `id`. **Example request**: GET /exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: plain/text { "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", "Running" : false, "ExitCode" : 2, "ProcessConfig" : { "privileged" : false, "user" : "", "tty" : false, "entrypoint" : "sh", "arguments" : [ "-c", "exit 2" ] }, "OpenStdin" : false, "OpenStderr" : false, "OpenStdout" : false, "Container" : { "State" : { "Running" : true, "Paused" : false, "Restarting" : false, "OOMKilled" : false, "Pid" : 3650, "ExitCode" : 0, "Error" : "", "StartedAt" : "2014-11-17T22:26:03.717657531Z", "FinishedAt" : "0001-01-01T00:00:00Z" }, "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", "Created" : "2014-11-17T22:26:03.626304998Z", "Path" : "date", "Args" : [], "Config" : { "Hostname" : "8f177a186b97", "Domainname" : "", "User" : "", "AttachStdin" : false, "AttachStdout" : false, "AttachStderr" : false, "PortSpecs": null, "ExposedPorts" : null, "Tty" : false, "OpenStdin" : false, "StdinOnce" : false, "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd" : [ "date" ], "Image" : "ubuntu", "Volumes" : null, "WorkingDir" : "", "Entrypoint" : null, "NetworkDisabled" : false, "MacAddress" : "", "OnBuild" : null, "SecurityOpt" : null }, "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", "NetworkSettings" : { "IPAddress" : "172.17.0.2", "IPPrefixLen" : 16, "MacAddress" : "02:42:ac:11:00:02", "Gateway" : "172.17.42.1", "Bridge" : "docker0", "PortMapping" : null, "Ports" : {} }, "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", "Name" : "/test", "Driver" : "aufs", "ExecDriver" : "native-0.2", "MountLabel" : "", "ProcessLabel" : "", "AppArmorProfile" : "", "RestartCount" : 0, "Volumes" : {}, "VolumesRW" : {} } } Status Codes: - **200** – no error - **404** – no such exec instance - **500** - server error # 3. Going further ## 3.1 Inside `docker run` As an example, the `docker run` command line makes the following API calls: - Create the container - If the status code is 404, it means the image doesn't exist: - Try to pull it. - Then, retry to create the container. - Start the container. - If you are not in detached mode: - Attach to the container, using `logs=1` (to have `stdout` and `stderr` from the container's start) and `stream=1` - If in detached mode or only `stdin` is attached, display the container's id. ## 3.2 Hijacking In this version of the API, `/attach`, uses hijacking to transport `stdin`, `stdout`, and `stderr` on the same socket. To hint potential proxies about connection hijacking, Docker client sends connection upgrade headers similarly to websocket. Upgrade: tcp Connection: Upgrade When Docker daemon detects the `Upgrade` header, it switches its status code from **200 OK** to **101 UPGRADED** and resends the same headers. ## 3.3 CORS Requests To set cross origin requests to the remote api please give values to `--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, default or blank means CORS disabled $ docker -d -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" docker-1.10.3/docs/reference/api/docker_remote_api_v1.20.md000066400000000000000000001773751267010174400233530ustar00rootroot00000000000000 # Docker Remote API v1.20 ## 1. Brief introduction - The Remote API has replaced `rcli`. - The daemon listens on `unix:///var/run/docker.sock` but you can [Bind Docker to another host/port or a Unix socket](../../quickstart.md#bind-docker-to-another-host-port-or-a-unix-socket). - The API tends to be REST. However, for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `stdout`, `stdin` and `stderr`. - When the client API version is newer than the daemon's, these calls return an HTTP `400 Bad Request` error message. # 2. Endpoints ## 2.1 Containers ### List containers `GET /containers/json` List containers **Example request**: GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "Id": "8dfafdbc3a40", "Names":["/boring_feynman"], "Image": "ubuntu:latest", "Command": "echo 1", "Created": 1367854155, "Status": "Exit 0", "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], "Labels": { "com.example.vendor": "Acme", "com.example.license": "GPL", "com.example.version": "1.0" }, "SizeRw": 12288, "SizeRootFs": 0 }, { "Id": "9cd87474be90", "Names":["/coolName"], "Image": "ubuntu:latest", "Command": "echo 222222", "Created": 1367854155, "Status": "Exit 0", "Ports": [], "Labels": {}, "SizeRw": 12288, "SizeRootFs": 0 }, { "Id": "3176a2479c92", "Names":["/sleepy_dog"], "Image": "ubuntu:latest", "Command": "echo 3333333333333333", "Created": 1367854154, "Status": "Exit 0", "Ports":[], "Labels": {}, "SizeRw":12288, "SizeRootFs":0 }, { "Id": "4cb07b47f9fb", "Names":["/running_cat"], "Image": "ubuntu:latest", "Command": "echo 444444444444444444444444444444444", "Created": 1367854152, "Status": "Exit 0", "Ports": [], "Labels": {}, "SizeRw": 12288, "SizeRootFs": 0 } ] Query Parameters: - **all** – 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default (i.e., this defaults to false) - **limit** – Show `limit` last created containers, include non-running ones. - **since** – Show only containers created since Id, include non-running ones. - **before** – Show only containers created before Id, include non-running ones. - **size** – 1/True/true or 0/False/false, Show the containers sizes - **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: - `exited=`; -- containers with exit code of `` ; - `status=`(`created`|`restarting`|`running`|`paused`|`exited`) - `label=key` or `label="key=value"` of a container label Status Codes: - **200** – no error - **400** – bad parameter - **500** – server error ### Create a container `POST /containers/create` Create a container **Example request**: POST /containers/create HTTP/1.1 Content-Type: application/json { "Hostname": "", "Domainname": "", "User": "", "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": [ "FOO=bar", "BAZ=quux" ], "Cmd": [ "date" ], "Entrypoint": "", "Image": "ubuntu", "Labels": { "com.example.vendor": "Acme", "com.example.license": "GPL", "com.example.version": "1.0" }, "Mounts": [ { "Source": "/data", "Destination": "/data", "Mode": "ro,Z", "RW": false } ], "WorkingDir": "", "NetworkDisabled": false, "MacAddress": "12:34:56:78:9a:bc", "ExposedPorts": { "22/tcp": {} }, "HostConfig": { "Binds": ["/tmp:/tmp"], "Links": ["redis3:redis"], "LxcConf": {"lxc.utsname":"docker"}, "Memory": 0, "MemorySwap": 0, "CpuShares": 512, "CpuPeriod": 100000, "CpuQuota": 50000, "CpusetCpus": "0,1", "CpusetMems": "0,1", "BlkioWeight": 300, "MemorySwappiness": 60, "OomKillDisable": false, "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, "PublishAllPorts": false, "Privileged": false, "ReadonlyRootfs": false, "Dns": ["8.8.8.8"], "DnsSearch": [""], "ExtraHosts": null, "VolumesFrom": ["parent", "other:ro"], "CapAdd": ["NET_ADMIN"], "CapDrop": ["MKNOD"], "GroupAdd": ["newgroup"], "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, "NetworkMode": "bridge", "Devices": [], "Ulimits": [{}], "LogConfig": { "Type": "json-file", "Config": {} }, "SecurityOpt": [""], "CgroupParent": "" } } **Example response**: HTTP/1.1 201 Created Content-Type: application/json { "Id":"e90e34656806", "Warnings":[] } Json Parameters: - **Hostname** - A string value containing the hostname to use for the container. - **Domainname** - A string value containing the domain name to use for the container. - **User** - A string value specifying the user inside the container. - **Memory** - Memory limit in bytes. - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. You must use this with `memory` and make the swap value larger than `memory`. - **CpuShares** - An integer value containing the container's CPU Shares (ie. the relative weight vs other containers). - **CpuPeriod** - The length of a CPU period in microseconds. - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. - **Cpuset** - Deprecated please don't use. Use `CpusetCpus` instead. - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. - **AttachStdin** - Boolean value, attaches to `stdin`. - **AttachStdout** - Boolean value, attaches to `stdout`. - **AttachStderr** - Boolean value, attaches to `stderr`. - **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. - **OpenStdin** - Boolean value, opens stdin, - **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. - **Env** - A list of environment variables in the form of `["VAR=value"[,"VAR2=value2"]]` - **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value"[,"key2":"value2"]}` - **Cmd** - Command to run specified as a string or an array of strings. - **Entrypoint** - Set the entry point for the container as a string or an array of strings. - **Image** - A string specifying the image name to use for the container. - **Mounts** - An array of mount points in the container. - **WorkingDir** - A string specifying the working directory for commands to run in. - **NetworkDisabled** - Boolean value, when true disables networking for the container - **ExposedPorts** - An object mapping ports to an empty object in the form of: `"ExposedPorts": { "/: {}" }` - **HostConfig** - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + `container_path` to create a new volume for the container + `host_path:container_path` to bind-mount a host path into the container + `host_path:container_path:ro` to make the bind-mount read-only inside the container. - **Links** - A list of links for the container. Each link entry should be in the form of `container_name:alias`. - **LxcConf** - LXC specific configurations. These configurations only work when using the `lxc` execution driver. - **PortBindings** - A map of exposed container ports and the host port they should map to. A JSON object in the form `{ /: [{ "HostPort": "" }] }` Take note that `port` is specified as a string and not an integer value. - **PublishAllPorts** - Allocates a random host port for all of a container's exposed ports. Specified as a boolean value. - **Privileged** - Gives the container full access to the host. Specified as a boolean value. - **ReadonlyRootfs** - Mount the container's root filesystem as read only. Specified as a boolean value. - **Dns** - A list of DNS servers for the container to use. - **DnsSearch** - A list of DNS search domains - **ExtraHosts** - A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. - **VolumesFrom** - A list of volumes to inherit from another container. Specified in the form `[:]` - **CapAdd** - A list of kernel capabilities to add to the container. - **Capdrop** - A list of kernel capabilities to drop from the container. - **GroupAdd** - A list of additional groups that the container process will run as - **RestartPolicy** – The behavior to apply when the container exits. The value is an object with a `Name` property of either `"always"` to always restart or `"on-failure"` to restart only when the container exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` controls the number of times to retry before giving up. The default is not to restart. (optional) An ever increasing delay (double the previous delay, starting at 100mS) is added before each restart to prevent flooding the server. - **NetworkMode** - Sets the networking mode for the container. Supported values are: `bridge`, `host`, and `container:` - **Devices** - A list of devices to add to the container specified as a JSON object in the form `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` - **Ulimits** - A list of ulimits to set in the container, specified as `{ "Name": , "Soft": , "Hard": }`, for example: `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` - **SecurityOpt**: A list of string values to customize labels for MLS systems, such as SELinux. - **LogConfig** - Log configuration for the container, specified as a JSON object in the form `{ "Type": "", "Config": {"key1": "val1"}}`. Available types: `json-file`, `syslog`, `journald`, `gelf`, `none`. `json-file` logging driver. - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. Query Parameters: - **name** – Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`. Status Codes: - **201** – no error - **404** – no such container - **406** – impossible to attach (container not running) - **500** – server error ### Inspect a container `GET /containers/(id)/json` Return low-level information on the container `id` **Example request**: GET /containers/4fa6e0f0c678/json HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "AppArmorProfile": "", "Args": [ "-c", "exit 9" ], "Config": { "AttachStderr": true, "AttachStdin": false, "AttachStdout": true, "Cmd": [ "/bin/sh", "-c", "exit 9" ], "Domainname": "", "Entrypoint": null, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "ExposedPorts": null, "Hostname": "ba033ac44011", "Image": "ubuntu", "Labels": { "com.example.vendor": "Acme", "com.example.license": "GPL", "com.example.version": "1.0" }, "MacAddress": "", "NetworkDisabled": false, "OnBuild": null, "OpenStdin": false, "StdinOnce": false, "Tty": false, "User": "", "Volumes": null, "WorkingDir": "" }, "Created": "2015-01-06T15:47:31.485331387Z", "Driver": "devicemapper", "ExecDriver": "native-0.2", "ExecIDs": null, "HostConfig": { "Binds": null, "BlkioWeight": 0, "CapAdd": null, "CapDrop": null, "ContainerIDFile": "", "CpusetCpus": "", "CpusetMems": "", "CpuShares": 0, "CpuPeriod": 100000, "Devices": [], "Dns": null, "DnsSearch": null, "ExtraHosts": null, "IpcMode": "", "Links": null, "LxcConf": [], "Memory": 0, "MemorySwap": 0, "OomKillDisable": false, "NetworkMode": "bridge", "PortBindings": {}, "Privileged": false, "ReadonlyRootfs": false, "PublishAllPorts": false, "RestartPolicy": { "MaximumRetryCount": 2, "Name": "on-failure" }, "LogConfig": { "Config": null, "Type": "json-file" }, "SecurityOpt": null, "VolumesFrom": null, "Ulimits": [{}] }, "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", "MountLabel": "", "Name": "/boring_euclid", "NetworkSettings": { "Bridge": "", "Gateway": "", "IPAddress": "", "IPPrefixLen": 0, "MacAddress": "", "PortMapping": null, "Ports": null }, "Path": "/bin/sh", "ProcessLabel": "", "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", "RestartCount": 1, "State": { "Error": "", "ExitCode": 9, "FinishedAt": "2015-01-06T15:47:32.080254511Z", "OOMKilled": false, "Paused": false, "Pid": 0, "Restarting": false, "Running": false, "StartedAt": "2015-01-06T15:47:32.072697474Z" }, "Mounts": [ { "Source": "/data", "Destination": "/data", "Mode": "ro,Z", "RW": false } ] } Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### List processes running inside a container `GET /containers/(id)/top` List processes running inside the container `id`. On Unix systems this is done by running the `ps` command. This endpoint is not supported on Windows. **Example request**: GET /containers/4fa6e0f0c678/top HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Titles" : [ "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" ], "Processes" : [ [ "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" ], [ "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" ] ] } **Example request**: GET /containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Titles" : [ "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" ] "Processes" : [ [ "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" ], [ "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" ] ], } Query Parameters: - **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Get container logs `GET /containers/(id)/logs` Get `stdout` and `stderr` logs from the container ``id`` > **Note**: > This endpoint works only for containers with the `json-file` or `journald` logging drivers. **Example request**: GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 **Example response**: HTTP/1.1 101 UPGRADED Content-Type: application/vnd.docker.raw-stream Connection: Upgrade Upgrade: tcp {{ STREAM }} Query Parameters: - **follow** – 1/True/true or 0/False/false, return stream. Default `false`. - **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. - **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. - **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp will only output log-entries since that timestamp. Default: 0 (unfiltered) - **timestamps** – 1/True/true or 0/False/false, print timestamps for every log line. Default `false`. - **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. Status Codes: - **101** – no error, hints proxy about hijacking - **200** – no error, no upgrade header found - **404** – no such container - **500** – server error ### Inspect changes on a container's filesystem `GET /containers/(id)/changes` Inspect changes on container `id`'s filesystem **Example request**: GET /containers/4fa6e0f0c678/changes HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "Path": "/dev", "Kind": 0 }, { "Path": "/dev/kmsg", "Kind": 1 }, { "Path": "/test", "Kind": 1 } ] Values for `Kind`: - `0`: Modify - `1`: Add - `2`: Delete Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Export a container `GET /containers/(id)/export` Export the contents of container `id` **Example request**: GET /containers/4fa6e0f0c678/export HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/octet-stream {{ TAR STREAM }} Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Get container stats based on resource usage `GET /containers/(id)/stats` This endpoint returns a live stream of a container's resource usage statistics. **Example request**: GET /containers/redis1/stats HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "read" : "2015-01-08T22:57:31.547920715Z", "network" : { "rx_dropped" : 0, "rx_bytes" : 648, "rx_errors" : 0, "tx_packets" : 8, "tx_dropped" : 0, "rx_packets" : 8, "tx_errors" : 0, "tx_bytes" : 648 }, "memory_stats" : { "stats" : { "total_pgmajfault" : 0, "cache" : 0, "mapped_file" : 0, "total_inactive_file" : 0, "pgpgout" : 414, "rss" : 6537216, "total_mapped_file" : 0, "writeback" : 0, "unevictable" : 0, "pgpgin" : 477, "total_unevictable" : 0, "pgmajfault" : 0, "total_rss" : 6537216, "total_rss_huge" : 6291456, "total_writeback" : 0, "total_inactive_anon" : 0, "rss_huge" : 6291456, "hierarchical_memory_limit" : 67108864, "total_pgfault" : 964, "total_active_file" : 0, "active_anon" : 6537216, "total_active_anon" : 6537216, "total_pgpgout" : 414, "total_cache" : 0, "inactive_anon" : 0, "active_file" : 0, "pgfault" : 964, "inactive_file" : 0, "total_pgpgin" : 477 }, "max_usage" : 6651904, "usage" : 6537216, "failcnt" : 0, "limit" : 67108864 }, "blkio_stats" : {}, "cpu_stats" : { "cpu_usage" : { "percpu_usage" : [ 16970827, 1839451, 7107380, 10571290 ], "usage_in_usermode" : 10000000, "total_usage" : 36488948, "usage_in_kernelmode" : 20000000 }, "system_cpu_usage" : 20091722000000000, "throttling_data" : {} } } Query Parameters: - **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Resize a container TTY `POST /containers/(id)/resize?h=&w=` Resize the TTY for container with `id`. You must restart the container for the resize to take effect. **Example request**: POST /containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Length: 0 Content-Type: text/plain; charset=utf-8 Status Codes: - **200** – no error - **404** – No such container - **500** – Cannot resize container ### Start a container `POST /containers/(id)/start` Start the container `id` > **Note**: > For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. > See [create a container](#create-a-container) for details. **Example request**: POST /containers/(id)/start HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Status Codes: - **204** – no error - **304** – container already started - **404** – no such container - **500** – server error ### Stop a container `POST /containers/(id)/stop` Stop the container `id` **Example request**: POST /containers/e90e34656806/stop?t=5 HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **t** – number of seconds to wait before killing the container Status Codes: - **204** – no error - **304** – container already stopped - **404** – no such container - **500** – server error ### Restart a container `POST /containers/(id)/restart` Restart the container `id` **Example request**: POST /containers/e90e34656806/restart?t=5 HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **t** – number of seconds to wait before killing the container Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Kill a container `POST /containers/(id)/kill` Kill the container `id` **Example request**: POST /containers/e90e34656806/kill HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters - **signal** - Signal to send to the container: integer or string like `SIGINT`. When not set, `SIGKILL` is assumed and the call waits for the container to exit. Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Rename a container `POST /containers/(id)/rename` Rename the container `id` to a `new_name` **Example request**: POST /containers/e90e34656806/rename?name=new_name HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **name** – new name for the container Status Codes: - **204** – no error - **404** – no such container - **409** - conflict name already assigned - **500** – server error ### Pause a container `POST /containers/(id)/pause` Pause the container `id` **Example request**: POST /containers/e90e34656806/pause HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Unpause a container `POST /containers/(id)/unpause` Unpause the container `id` **Example request**: POST /containers/e90e34656806/unpause HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Attach to a container `POST /containers/(id)/attach` Attach to the container `id` **Example request**: POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 **Example response**: HTTP/1.1 101 UPGRADED Content-Type: application/vnd.docker.raw-stream Connection: Upgrade Upgrade: tcp {{ STREAM }} Query Parameters: - **logs** – 1/True/true or 0/False/false, return logs. Default `false`. - **stream** – 1/True/true or 0/False/false, return stream. Default `false`. - **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach to `stdin`. Default `false`. - **stdout** – 1/True/true or 0/False/false, if `logs=true`, return `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. - **stderr** – 1/True/true or 0/False/false, if `logs=true`, return `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. Status Codes: - **101** – no error, hints proxy about hijacking - **200** – no error, no upgrade header found - **400** – bad parameter - **404** – no such container - **500** – server error **Stream details**: When using the TTY setting is enabled in [`POST /containers/create` ](#create-a-container), the stream is the raw data from the process PTY and client's `stdin`. When the TTY is disabled, then the stream is multiplexed to separate `stdout` and `stderr`. The format is a **Header** and a **Payload** (frame). **HEADER** The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`). It is encoded on the first eight bytes like this: header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} `STREAM_TYPE` can be: - 0: `stdin` (is written on `stdout`) - 1: `stdout` - 2: `stderr` `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian. **PAYLOAD** The payload is the raw stream. **IMPLEMENTATION** The simplest way to implement the Attach protocol is the following: 1. Read eight bytes. 2. Choose `stdout` or `stderr` depending on the first byte. 3. Extract the frame size from the last four bytes. 4. Read the extracted size and output it on the correct output. 5. Goto 1. ### Attach to a container (websocket) `GET /containers/(id)/attach/ws` Attach to the container `id` via websocket Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) **Example request** GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 **Example response** {{ STREAM }} Query Parameters: - **logs** – 1/True/true or 0/False/false, return logs. Default `false`. - **stream** – 1/True/true or 0/False/false, return stream. Default `false`. - **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach to `stdin`. Default `false`. - **stdout** – 1/True/true or 0/False/false, if `logs=true`, return `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. - **stderr** – 1/True/true or 0/False/false, if `logs=true`, return `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. Status Codes: - **200** – no error - **400** – bad parameter - **404** – no such container - **500** – server error ### Wait a container `POST /containers/(id)/wait` Block until container `id` stops, then returns the exit code **Example request**: POST /containers/16253994b7c4/wait HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"StatusCode": 0} Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Remove a container `DELETE /containers/(id)` Remove the container `id` from the filesystem **Example request**: DELETE /containers/16253994b7c4?v=1 HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **v** – 1/True/true or 0/False/false, Remove the volumes associated to the container. Default `false`. - **force** - 1/True/true or 0/False/false, Kill then remove the container. Default `false`. Status Codes: - **204** – no error - **400** – bad parameter - **404** – no such container - **500** – server error ### Copy files or folders from a container `POST /containers/(id)/copy` Copy files or folders of container `id` **Deprecated** in favor of the `archive` endpoint below. **Example request**: POST /containers/4fa6e0f0c678/copy HTTP/1.1 Content-Type: application/json { "Resource": "test.txt" } **Example response**: HTTP/1.1 200 OK Content-Type: application/x-tar {{ TAR STREAM }} Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Retrieving information about files and folders in a container `HEAD /containers/(id)/archive` See the description of the `X-Docker-Container-Path-Stat` header in the following section. ### Get an archive of a filesystem resource in a container `GET /containers/(id)/archive` Get an tar archive of a resource in the filesystem of container `id`. Query Parameters: - **path** - resource in the container's filesystem to archive. Required. If not an absolute path, it is relative to the container's root directory. The resource specified by **path** must exist. To assert that the resource is expected to be a directory, **path** should end in `/` or `/.` (assuming a path separator of `/`). If **path** ends in `/.` then this indicates that only the contents of the **path** directory should be copied. A symlink is always resolved to its target. **Note**: It is not possible to copy certain system files such as resources under `/proc`, `/sys`, `/dev`, and mounts created by the user in the container. **Example request**: GET /containers/8cce319429b2/archive?path=/root HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/x-tar X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= {{ TAR STREAM }} On success, a response header `X-Docker-Container-Path-Stat` will be set to a base64-encoded JSON object containing some filesystem header information about the archived resource. The above example value would decode to the following JSON object (whitespace added for readability): { "name": "root", "size": 4096, "mode": 2147484096, "mtime": "2014-02-27T20:51:23Z", "linkTarget": "" } A `HEAD` request can also be made to this endpoint if only this information is desired. Status Codes: - **200** - success, returns archive of copied resource - **400** - client error, bad parameter, details in JSON response body, one of: - must specify path parameter (**path** cannot be empty) - not a directory (**path** was asserted to be a directory but exists as a file) - **404** - client error, resource not found, one of: – no such container (container `id` does not exist) - no such file or directory (**path** does not exist) - **500** - server error ### Extract an archive of files or folders to a directory in a container `PUT /containers/(id)/archive` Upload a tar archive to be extracted to a path in the filesystem of container `id`. Query Parameters: - **path** - path to a directory in the container to extract the archive's contents into. Required. If not an absolute path, it is relative to the container's root directory. The **path** resource must exist. - **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa. **Example request**: PUT /containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 Content-Type: application/x-tar {{ TAR STREAM }} **Example response**: HTTP/1.1 200 OK Status Codes: - **200** – the content was extracted successfully - **400** - client error, bad parameter, details in JSON response body, one of: - must specify path parameter (**path** cannot be empty) - not a directory (**path** should be a directory but exists as a file) - unable to overwrite existing directory with non-directory (if **noOverwriteDirNonDir**) - unable to overwrite existing non-directory with directory (if **noOverwriteDirNonDir**) - **403** - client error, permission denied, the volume or container rootfs is marked as read-only. - **404** - client error, resource not found, one of: – no such container (container `id` does not exist) - no such file or directory (**path** resource does not exist) - **500** – server error ## 2.2 Images ### List Images `GET /images/json` **Example request**: GET /images/json?all=0 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "RepoTags": [ "ubuntu:12.04", "ubuntu:precise", "ubuntu:latest" ], "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", "Created": 1365714795, "Size": 131506275, "VirtualSize": 131506275, "Labels": {} }, { "RepoTags": [ "ubuntu:12.10", "ubuntu:quantal" ], "ParentId": "27cf784147099545", "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", "Created": 1364102658, "Size": 24653, "VirtualSize": 180116135, "Labels": { "com.example.version": "v1" } } ] **Example request, with digest information**: GET /images/json?digests=1 HTTP/1.1 **Example response, with digest information**: HTTP/1.1 200 OK Content-Type: application/json [ { "Created": 1420064636, "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", "RepoDigests": [ "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" ], "RepoTags": [ "localhost:5000/test/busybox:latest", "playdate:latest" ], "Size": 0, "VirtualSize": 2429728, "Labels": {} } ] The response shows a single image `Id` associated with two repositories (`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use either of the `RepoTags` values `localhost:5000/test/busybox:latest` or `playdate:latest` to reference the image. You can also use `RepoDigests` values to reference an image. In this response, the array has only one reference and that is to the `localhost:5000/test/busybox` repository; the `playdate` repository has no digest. You can reference this digest using the value: `localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` See the `docker run` and `docker build` commands for examples of digest and tag references on the command line. Query Parameters: - **all** – 1/True/true or 0/False/false, default false - **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: - `dangling=true` - `label=key` or `label="key=value"` of an image label - **filter** - only return images with the specified name ### Build image from a Dockerfile `POST /build` Build an image from a Dockerfile **Example request**: POST /build HTTP/1.1 {{ TAR STREAM }} **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"stream": "Step 1..."} {"stream": "..."} {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} The input stream must be a `tar` archive compressed with one of the following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. The archive must include a build instructions file, typically called `Dockerfile` at the archive's root. The `dockerfile` parameter may be used to specify a different build instructions file. To do this, its value must be the path to the alternate build instructions file to use. The archive may include any number of other files, which are accessible in the build context (See the [*ADD build command*](../../reference/builder.md#dockerbuilder)). The build is canceled if the client drops the connection by quitting or being killed. Query Parameters: - **dockerfile** - Path within the build context to the Dockerfile. This is ignored if `remote` is specified and points to an individual filename. - **t** – A repository name (and optionally a tag) to apply to the resulting image in case of success. - **remote** – A Git repository URI or HTTP/HTTPS URI build source. If the URI specifies a filename, the file's contents are placed into a file called `Dockerfile`. - **q** – Suppress verbose build output. - **nocache** – Do not use the cache when building the image. - **pull** - Attempt to pull the image even if an older image exists locally. - **rm** - Remove intermediate containers after a successful build (default behavior). - **forcerm** - Always remove intermediate containers (includes `rm`). - **memory** - Set memory limit for build. - **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. - **cpushares** - CPU shares (relative weight). - **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). - **cpuperiod** - The length of a CPU period in microseconds. - **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. Request Headers: - **Content-type** – Set to `"application/tar"`. - **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON object with the following structure: { "docker.example.com": { "username": "janedoe", "password": "hunter2" }, "https://index.docker.io/v1/": { "username": "mobydock", "password": "conta1n3rize14" } } This object maps the hostname of a registry to an object containing the "username" and "password" for that registry. Multiple registries may be specified as the build may be based on an image requiring authentication to pull from any arbitrary registry. Only the registry domain name (and port if not the default "443") are required. However (for legacy reasons) the "official" Docker, Inc. hosted registry must be specified with both a "https://" prefix and a "/v1/" suffix even though Docker will prefer to use the v2 registry API. Status Codes: - **200** – no error - **500** – server error ### Create an image `POST /images/create` Create an image either by pulling it from the registry or by importing it **Example request**: POST /images/create?fromImage=ubuntu HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"status": "Pulling..."} {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} {"error": "Invalid..."} ... When using this endpoint to pull an image from the registry, the `X-Registry-Auth` header can be used to include a base64-encoded AuthConfig object. Query Parameters: - **fromImage** – Name of the image to pull. - **fromSrc** – Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. - **repo** – Repository name. - **tag** – Tag. - **registry** – The registry to pull from. Request Headers: - **X-Registry-Auth** – base64-encoded AuthConfig object Status Codes: - **200** – no error - **500** – server error ### Inspect an image `GET /images/(name)/json` Return low-level information on the image `name` **Example request**: GET /images/ubuntu/json HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Created": "2013-03-23T22:24:18.818426-07:00", "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", "ContainerConfig": { "Hostname": "", "User": "", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "Tty": true, "OpenStdin": true, "StdinOnce": false, "Env": null, "Cmd": ["/bin/bash"], "Dns": null, "Image": "ubuntu", "Labels": { "com.example.vendor": "Acme", "com.example.license": "GPL", "com.example.version": "1.0" }, "Volumes": null, "VolumesFrom": "", "WorkingDir": "" }, "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", "Parent": "27cf784147099545", "Size": 6824592 } Status Codes: - **200** – no error - **404** – no such image - **500** – server error ### Get the history of an image `GET /images/(name)/history` Return the history of the image `name` **Example request**: GET /images/ubuntu/history HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", "Created": 1398108230, "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", "Tags": [ "ubuntu:lucid", "ubuntu:10.04" ], "Size": 182964289, "Comment": "" }, { "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", "Created": 1398108222, "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", "Tags": null, "Size": 0, "Comment": "" }, { "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", "Created": 1371157430, "CreatedBy": "", "Tags": [ "scratch12:latest", "scratch:latest" ], "Size": 0, "Comment": "Imported from -" } ] Status Codes: - **200** – no error - **404** – no such image - **500** – server error ### Push an image on the registry `POST /images/(name)/push` Push the image `name` on the registry **Example request**: POST /images/test/push HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"status": "Pushing..."} {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} {"error": "Invalid..."} ... If you wish to push an image on to a private registry, that image must already have a tag into a repository which references that registry `hostname` and `port`. This repository name should then be used in the URL. This duplicates the command line's flow. **Example request**: POST /images/registry.acme.com:5000/test/push HTTP/1.1 Query Parameters: - **tag** – The tag to associate with the image on the registry. This is optional. Request Headers: - **X-Registry-Auth** – Include a base64-encoded AuthConfig. object. Status Codes: - **200** – no error - **404** – no such image - **500** – server error ### Tag an image into a repository `POST /images/(name)/tag` Tag the image `name` into a repository **Example request**: POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 **Example response**: HTTP/1.1 201 OK Query Parameters: - **repo** – The repository to tag in - **force** – 1/True/true or 0/False/false, default false - **tag** - The new tag name Status Codes: - **201** – no error - **400** – bad parameter - **404** – no such image - **409** – conflict - **500** – server error ### Remove an image `DELETE /images/(name)` Remove the image `name` from the filesystem **Example request**: DELETE /images/test HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-type: application/json [ {"Untagged": "3e2f21a89f"}, {"Deleted": "3e2f21a89f"}, {"Deleted": "53b4f83ac9"} ] Query Parameters: - **force** – 1/True/true or 0/False/false, default false - **noprune** – 1/True/true or 0/False/false, default false Status Codes: - **200** – no error - **404** – no such image - **409** – conflict - **500** – server error ### Search images `GET /images/search` Search for an image on [Docker Hub](https://hub.docker.com). > **Note**: > The response keys have changed from API v1.6 to reflect the JSON > sent by the registry server to the docker daemon's request. **Example request**: GET /images/search?term=sshd HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "description": "", "is_official": false, "is_automated": false, "name": "wma55/u1210sshd", "star_count": 0 }, { "description": "", "is_official": false, "is_automated": false, "name": "jdswinbank/sshd", "star_count": 0 }, { "description": "", "is_official": false, "is_automated": false, "name": "vgauthier/sshd", "star_count": 0 } ... ] Query Parameters: - **term** – term to search Status Codes: - **200** – no error - **500** – server error ## 2.3 Misc ### Check auth configuration `POST /auth` Get the default username and email **Example request**: POST /auth HTTP/1.1 Content-Type: application/json { "username":" hannibal", "password: "xxxx", "email": "hannibal@a-team.com", "serveraddress": "https://index.docker.io/v1/" } **Example response**: HTTP/1.1 200 OK Status Codes: - **200** – no error - **204** – no error - **500** – server error ### Display system-wide information `GET /info` Display system-wide information **Example request**: GET /info HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Containers": 11, "CpuCfsPeriod": true, "CpuCfsQuota": true, "Debug": false, "DockerRootDir": "/var/lib/docker", "Driver": "btrfs", "DriverStatus": [[""]], "ExecutionDriver": "native-0.1", "ExperimentalBuild": false, "HttpProxy": "http://test:test@localhost:8080", "HttpsProxy": "https://test:test@localhost:8080", "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", "IPv4Forwarding": true, "Images": 16, "IndexServerAddress": "https://index.docker.io/v1/", "InitPath": "/usr/bin/docker", "InitSha1": "", "KernelVersion": "3.12.0-1-amd64", "Labels": [ "storage=ssd" ], "MemTotal": 2099236864, "MemoryLimit": true, "NCPU": 1, "NEventsListener": 0, "NFd": 11, "NGoroutines": 21, "Name": "prod-server-42", "NoProxy": "9.81.1.160", "OomKillDisable": true, "OperatingSystem": "Boot2Docker", "RegistryConfig": { "IndexConfigs": { "docker.io": { "Mirrors": null, "Name": "docker.io", "Official": true, "Secure": true } }, "InsecureRegistryCIDRs": [ "127.0.0.0/8" ] }, "SwapLimit": false, "SystemTime": "2015-03-10T11:11:23.730591467-07:00" } Status Codes: - **200** – no error - **500** – server error ### Show the docker version information `GET /version` Show the docker version information **Example request**: GET /version HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Version": "1.5.0", "Os": "linux", "KernelVersion": "3.18.5-tinycore64", "GoVersion": "go1.4.1", "GitCommit": "a8a31ef", "Arch": "amd64", "ApiVersion": "1.20", "Experimental": false } Status Codes: - **200** – no error - **500** – server error ### Ping the docker server `GET /_ping` Ping the docker server **Example request**: GET /_ping HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: text/plain OK Status Codes: - **200** - no error - **500** - server error ### Create a new image from a container's changes `POST /commit` Create a new image from a container's changes **Example request**: POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 Content-Type: application/json { "Hostname": "", "Domainname": "", "User": "", "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": null, "Cmd": [ "date" ], "Mounts": [ { "Source": "/data", "Destination": "/data", "Mode": "ro,Z", "RW": false } ], "Labels": { "key1": "value1", "key2": "value2" }, "WorkingDir": "", "NetworkDisabled": false, "ExposedPorts": { "22/tcp": {} } } **Example response**: HTTP/1.1 201 Created Content-Type: application/json {"Id": "596069db4bf5"} Json Parameters: - **config** - the container's configuration Query Parameters: - **container** – source container - **repo** – repository - **tag** – tag - **comment** – commit message - **author** – author (e.g., "John Hannibal Smith <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") - **pause** – 1/True/true or 0/False/false, whether to pause the container before committing - **changes** – Dockerfile instructions to apply while committing Status Codes: - **201** – no error - **404** – no such container - **500** – server error ### Monitor Docker's events `GET /events` Get container events from docker, either in real time via streaming, or via polling (using since). Docker containers report the following events: attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause and Docker images report: delete, import, pull, push, tag, untag **Example request**: GET /events?since=1374067924 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} Query Parameters: - **since** – Timestamp used for polling - **until** – Timestamp used for polling - **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: - `event=`; -- event to filter - `image=`; -- image to filter - `container=`; -- container to filter Status Codes: - **200** – no error - **500** – server error ### Get a tarball containing all images in a repository `GET /images/(name)/get` Get a tarball containing all images and metadata for the repository specified by `name`. If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the 'repositories' file in the tarball, as there were no image names referenced. See the [image tarball format](#image-tarball-format) for more details. **Example request** GET /images/ubuntu/get **Example response**: HTTP/1.1 200 OK Content-Type: application/x-tar Binary data stream Status Codes: - **200** – no error - **500** – server error ### Get a tarball containing all images. `GET /images/get` Get a tarball containing all images and metadata for one or more repositories. For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. See the [image tarball format](#image-tarball-format) for more details. **Example request** GET /images/get?names=myname%2Fmyapp%3Alatest&names=busybox **Example response**: HTTP/1.1 200 OK Content-Type: application/x-tar Binary data stream Status Codes: - **200** – no error - **500** – server error ### Load a tarball with a set of images and tags into docker `POST /images/load` Load a set of images and tags into a Docker repository. See the [image tarball format](#image-tarball-format) for more details. **Example request** POST /images/load Tarball in body **Example response**: HTTP/1.1 200 OK Status Codes: - **200** – no error - **500** – server error ### Image tarball format An image tarball contains one directory per image layer (named using its long ID), each containing these files: - `VERSION`: currently `1.0` - the file format version - `json`: detailed layer information, similar to `docker inspect layer_id` - `layer.tar`: A tarfile containing the filesystem changes in this layer The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. ``` {"hello-world": {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} } ``` ### Exec Create `POST /containers/(id)/exec` Sets up an exec instance in a running container `id` **Example request**: POST /containers/e90e34656806/exec HTTP/1.1 Content-Type: application/json { "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "Tty": false, "Cmd": [ "date" ] } **Example response**: HTTP/1.1 201 OK Content-Type: application/json { "Id": "f90e34656806", "Warnings":[] } Json Parameters: - **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. - **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. - **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. - **Tty** - Boolean value to allocate a pseudo-TTY. - **Cmd** - Command to run specified as a string or an array of strings. Status Codes: - **201** – no error - **404** – no such container ### Exec Start `POST /exec/(id)/start` Starts a previously set up `exec` instance `id`. If `detach` is true, this API returns after starting the `exec` command. Otherwise, this API sets up an interactive session with the `exec` command. **Example request**: POST /exec/e90e34656806/start HTTP/1.1 Content-Type: application/json { "Detach": false, "Tty": false } **Example response**: HTTP/1.1 201 OK Content-Type: application/json {{ STREAM }} Json Parameters: - **Detach** - Detach from the `exec` command. - **Tty** - Boolean value to allocate a pseudo-TTY. Status Codes: - **200** – no error - **404** – no such exec instance **Stream details**: Similar to the stream behavior of `POST /container/(id)/attach` API ### Exec Resize `POST /exec/(id)/resize` Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. This API is valid only if `tty` was specified as part of creating and starting the `exec` command. **Example request**: POST /exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 Content-Type: text/plain **Example response**: HTTP/1.1 201 OK Content-Type: text/plain Query Parameters: - **h** – height of `tty` session - **w** – width Status Codes: - **201** – no error - **404** – no such exec instance ### Exec Inspect `GET /exec/(id)/json` Return low-level information about the `exec` command `id`. **Example request**: GET /exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: plain/text { "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", "Running" : false, "ExitCode" : 2, "ProcessConfig" : { "privileged" : false, "user" : "", "tty" : false, "entrypoint" : "sh", "arguments" : [ "-c", "exit 2" ] }, "OpenStdin" : false, "OpenStderr" : false, "OpenStdout" : false, "Container" : { "State" : { "Running" : true, "Paused" : false, "Restarting" : false, "OOMKilled" : false, "Pid" : 3650, "ExitCode" : 0, "Error" : "", "StartedAt" : "2014-11-17T22:26:03.717657531Z", "FinishedAt" : "0001-01-01T00:00:00Z" }, "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", "Created" : "2014-11-17T22:26:03.626304998Z", "Path" : "date", "Args" : [], "Config" : { "Hostname" : "8f177a186b97", "Domainname" : "", "User" : "", "AttachStdin" : false, "AttachStdout" : false, "AttachStderr" : false, "ExposedPorts" : null, "Tty" : false, "OpenStdin" : false, "StdinOnce" : false, "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd" : [ "date" ], "Image" : "ubuntu", "Volumes" : null, "WorkingDir" : "", "Entrypoint" : null, "NetworkDisabled" : false, "MacAddress" : "", "OnBuild" : null, "SecurityOpt" : null }, "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", "NetworkSettings" : { "IPAddress" : "172.17.0.2", "IPPrefixLen" : 16, "MacAddress" : "02:42:ac:11:00:02", "Gateway" : "172.17.42.1", "Bridge" : "docker0", "PortMapping" : null, "Ports" : {} }, "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", "Name" : "/test", "Driver" : "aufs", "ExecDriver" : "native-0.2", "MountLabel" : "", "ProcessLabel" : "", "AppArmorProfile" : "", "RestartCount" : 0, "Mounts" : [] } } Status Codes: - **200** – no error - **404** – no such exec instance - **500** - server error # 3. Going further ## 3.1 Inside `docker run` As an example, the `docker run` command line makes the following API calls: - Create the container - If the status code is 404, it means the image doesn't exist: - Try to pull it. - Then, retry to create the container. - Start the container. - If you are not in detached mode: - Attach to the container, using `logs=1` (to have `stdout` and `stderr` from the container's start) and `stream=1` - If in detached mode or only `stdin` is attached, display the container's id. ## 3.2 Hijacking In this version of the API, `/attach`, uses hijacking to transport `stdin`, `stdout`, and `stderr` on the same socket. To hint potential proxies about connection hijacking, Docker client sends connection upgrade headers similarly to websocket. Upgrade: tcp Connection: Upgrade When Docker daemon detects the `Upgrade` header, it switches its status code from **200 OK** to **101 UPGRADED** and resends the same headers. ## 3.3 CORS Requests To set cross origin requests to the remote api please give values to `--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, default or blank means CORS disabled $ docker daemon -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" docker-1.10.3/docs/reference/api/docker_remote_api_v1.21.md000066400000000000000000002330251267010174400233350ustar00rootroot00000000000000 # Docker Remote API v1.21 ## 1. Brief introduction - The Remote API has replaced `rcli`. - The daemon listens on `unix:///var/run/docker.sock` but you can [Bind Docker to another host/port or a Unix socket](../../quickstart.md#bind-docker-to-another-host-port-or-a-unix-socket). - The API tends to be REST. However, for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `stdout`, `stdin` and `stderr`. - When the client API version is newer than the daemon's, these calls return an HTTP `400 Bad Request` error message. # 2. Endpoints ## 2.1 Containers ### List containers `GET /containers/json` List containers **Example request**: GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "Id": "8dfafdbc3a40", "Names":["/boring_feynman"], "Image": "ubuntu:latest", "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", "Command": "echo 1", "Created": 1367854155, "Status": "Exit 0", "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], "Labels": { "com.example.vendor": "Acme", "com.example.license": "GPL", "com.example.version": "1.0" }, "SizeRw": 12288, "SizeRootFs": 0 }, { "Id": "9cd87474be90", "Names":["/coolName"], "Image": "ubuntu:latest", "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", "Command": "echo 222222", "Created": 1367854155, "Status": "Exit 0", "Ports": [], "Labels": {}, "SizeRw": 12288, "SizeRootFs": 0 }, { "Id": "3176a2479c92", "Names":["/sleepy_dog"], "Image": "ubuntu:latest", "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", "Command": "echo 3333333333333333", "Created": 1367854154, "Status": "Exit 0", "Ports":[], "Labels": {}, "SizeRw":12288, "SizeRootFs":0 }, { "Id": "4cb07b47f9fb", "Names":["/running_cat"], "Image": "ubuntu:latest", "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", "Command": "echo 444444444444444444444444444444444", "Created": 1367854152, "Status": "Exit 0", "Ports": [], "Labels": {}, "SizeRw": 12288, "SizeRootFs": 0 } ] Query Parameters: - **all** – 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default (i.e., this defaults to false) - **limit** – Show `limit` last created containers, include non-running ones. - **since** – Show only containers created since Id, include non-running ones. - **before** – Show only containers created before Id, include non-running ones. - **size** – 1/True/true or 0/False/false, Show the containers sizes - **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: - `exited=`; -- containers with exit code of `` ; - `status=`(`created`|`restarting`|`running`|`paused`|`exited`) - `label=key` or `label="key=value"` of a container label Status Codes: - **200** – no error - **400** – bad parameter - **500** – server error ### Create a container `POST /containers/create` Create a container **Example request**: POST /containers/create HTTP/1.1 Content-Type: application/json { "Hostname": "", "Domainname": "", "User": "", "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": [ "FOO=bar", "BAZ=quux" ], "Cmd": [ "date" ], "Entrypoint": "", "Image": "ubuntu", "Labels": { "com.example.vendor": "Acme", "com.example.license": "GPL", "com.example.version": "1.0" }, "Mounts": [ { "Source": "/data", "Destination": "/data", "Mode": "ro,Z", "RW": false } ], "WorkingDir": "", "NetworkDisabled": false, "MacAddress": "12:34:56:78:9a:bc", "ExposedPorts": { "22/tcp": {} }, "StopSignal": "SIGTERM", "HostConfig": { "Binds": ["/tmp:/tmp"], "Links": ["redis3:redis"], "LxcConf": {"lxc.utsname":"docker"}, "Memory": 0, "MemorySwap": 0, "MemoryReservation": 0, "KernelMemory": 0, "CpuShares": 512, "CpuPeriod": 100000, "CpuQuota": 50000, "CpusetCpus": "0,1", "CpusetMems": "0,1", "BlkioWeight": 300, "MemorySwappiness": 60, "OomKillDisable": false, "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, "PublishAllPorts": false, "Privileged": false, "ReadonlyRootfs": false, "Dns": ["8.8.8.8"], "DnsOptions": [""], "DnsSearch": [""], "ExtraHosts": null, "VolumesFrom": ["parent", "other:ro"], "CapAdd": ["NET_ADMIN"], "CapDrop": ["MKNOD"], "GroupAdd": ["newgroup"], "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, "NetworkMode": "bridge", "Devices": [], "Ulimits": [{}], "LogConfig": { "Type": "json-file", "Config": {} }, "SecurityOpt": [""], "CgroupParent": "", "VolumeDriver": "" } } **Example response**: HTTP/1.1 201 Created Content-Type: application/json { "Id":"e90e34656806", "Warnings":[] } Json Parameters: - **Hostname** - A string value containing the hostname to use for the container. - **Domainname** - A string value containing the domain name to use for the container. - **User** - A string value specifying the user inside the container. - **Memory** - Memory limit in bytes. - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. You must use this with `memory` and make the swap value larger than `memory`. - **MemoryReservation** - Memory soft limit in bytes. - **KernelMemory** - Kernel memory limit in bytes. - **CpuShares** - An integer value containing the container's CPU Shares (ie. the relative weight vs other containers). - **CpuPeriod** - The length of a CPU period in microseconds. - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. - **Cpuset** - Deprecated please don't use. Use `CpusetCpus` instead. - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. - **AttachStdin** - Boolean value, attaches to `stdin`. - **AttachStdout** - Boolean value, attaches to `stdout`. - **AttachStderr** - Boolean value, attaches to `stderr`. - **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. - **OpenStdin** - Boolean value, opens stdin, - **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. - **Env** - A list of environment variables in the form of `["VAR=value"[,"VAR2=value2"]]` - **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value"[,"key2":"value2"]}` - **Cmd** - Command to run specified as a string or an array of strings. - **Entrypoint** - Set the entry point for the container as a string or an array of strings. - **Image** - A string specifying the image name to use for the container. - **Mounts** - An array of mount points in the container. - **WorkingDir** - A string specifying the working directory for commands to run in. - **NetworkDisabled** - Boolean value, when true disables networking for the container - **ExposedPorts** - An object mapping ports to an empty object in the form of: `"ExposedPorts": { "/: {}" }` - **StopSignal** - Signal to stop a container as a string or unsigned integer. `SIGTERM` by default. - **HostConfig** - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + `container_path` to create a new volume for the container + `host_path:container_path` to bind-mount a host path into the container + `host_path:container_path:ro` to make the bind-mount read-only inside the container. + `volume_name:container_path` to bind-mount a volume managed by a volume plugin into the container. + `volume_name:container_path:ro` to make the bind mount read-only inside the container. - **Links** - A list of links for the container. Each link entry should be in the form of `container_name:alias`. - **LxcConf** - LXC specific configurations. These configurations only work when using the `lxc` execution driver. - **PortBindings** - A map of exposed container ports and the host port they should map to. A JSON object in the form `{ /: [{ "HostPort": "" }] }` Take note that `port` is specified as a string and not an integer value. - **PublishAllPorts** - Allocates a random host port for all of a container's exposed ports. Specified as a boolean value. - **Privileged** - Gives the container full access to the host. Specified as a boolean value. - **ReadonlyRootfs** - Mount the container's root filesystem as read only. Specified as a boolean value. - **Dns** - A list of DNS servers for the container to use. - **DnsOptions** - A list of DNS options - **DnsSearch** - A list of DNS search domains - **ExtraHosts** - A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. - **VolumesFrom** - A list of volumes to inherit from another container. Specified in the form `[:]` - **CapAdd** - A list of kernel capabilities to add to the container. - **Capdrop** - A list of kernel capabilities to drop from the container. - **GroupAdd** - A list of additional groups that the container process will run as - **RestartPolicy** – The behavior to apply when the container exits. The value is an object with a `Name` property of either `"always"` to always restart, `"unless-stopped"` to restart always except when user has manually stopped the container or `"on-failure"` to restart only when the container exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` controls the number of times to retry before giving up. The default is not to restart. (optional) An ever increasing delay (double the previous delay, starting at 100mS) is added before each restart to prevent flooding the server. - **NetworkMode** - Sets the networking mode for the container. Supported values are: `bridge`, `host`, and `container:` - **Devices** - A list of devices to add to the container specified as a JSON object in the form `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` - **Ulimits** - A list of ulimits to set in the container, specified as `{ "Name": , "Soft": , "Hard": }`, for example: `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` - **SecurityOpt**: A list of string values to customize labels for MLS systems, such as SELinux. - **LogConfig** - Log configuration for the container, specified as a JSON object in the form `{ "Type": "", "Config": {"key1": "val1"}}`. Available types: `json-file`, `syslog`, `journald`, `gelf`, `awslogs`, `none`. `json-file` logging driver. - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. - **VolumeDriver** - Driver that this container users to mount volumes. Query Parameters: - **name** – Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`. Status Codes: - **201** – no error - **404** – no such container - **406** – impossible to attach (container not running) - **500** – server error ### Inspect a container `GET /containers/(id)/json` Return low-level information on the container `id` **Example request**: GET /containers/4fa6e0f0c678/json HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "AppArmorProfile": "", "Args": [ "-c", "exit 9" ], "Config": { "AttachStderr": true, "AttachStdin": false, "AttachStdout": true, "Cmd": [ "/bin/sh", "-c", "exit 9" ], "Domainname": "", "Entrypoint": null, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "ExposedPorts": null, "Hostname": "ba033ac44011", "Image": "ubuntu", "Labels": { "com.example.vendor": "Acme", "com.example.license": "GPL", "com.example.version": "1.0" }, "MacAddress": "", "NetworkDisabled": false, "OnBuild": null, "OpenStdin": false, "StdinOnce": false, "Tty": false, "User": "", "Volumes": null, "WorkingDir": "", "StopSignal": "SIGTERM" }, "Created": "2015-01-06T15:47:31.485331387Z", "Driver": "devicemapper", "ExecDriver": "native-0.2", "ExecIDs": null, "HostConfig": { "Binds": null, "BlkioWeight": 0, "CapAdd": null, "CapDrop": null, "ContainerIDFile": "", "CpusetCpus": "", "CpusetMems": "", "CpuShares": 0, "CpuPeriod": 100000, "Devices": [], "Dns": null, "DnsOptions": null, "DnsSearch": null, "ExtraHosts": null, "IpcMode": "", "Links": null, "LxcConf": [], "Memory": 0, "MemorySwap": 0, "MemoryReservation": 0, "KernelMemory": 0, "OomKillDisable": false, "NetworkMode": "bridge", "PortBindings": {}, "Privileged": false, "ReadonlyRootfs": false, "PublishAllPorts": false, "RestartPolicy": { "MaximumRetryCount": 2, "Name": "on-failure" }, "LogConfig": { "Config": null, "Type": "json-file" }, "SecurityOpt": null, "VolumesFrom": null, "Ulimits": [{}], "VolumeDriver": "" }, "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", "MountLabel": "", "Name": "/boring_euclid", "NetworkSettings": { "Bridge": "", "SandboxID": "", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": null, "SandboxKey": "", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "", "Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "", "IPPrefixLen": 0, "IPv6Gateway": "", "MacAddress": "", "Networks": { "bridge": { "EndpointID": "", "Gateway": "", "IPAddress": "", "IPPrefixLen": 0, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "" } } }, "Path": "/bin/sh", "ProcessLabel": "", "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", "RestartCount": 1, "State": { "Error": "", "ExitCode": 9, "FinishedAt": "2015-01-06T15:47:32.080254511Z", "OOMKilled": false, "Paused": false, "Pid": 0, "Restarting": false, "Running": true, "StartedAt": "2015-01-06T15:47:32.072697474Z", "Status": "running" }, "Mounts": [ { "Source": "/data", "Destination": "/data", "Mode": "ro,Z", "RW": false } ] } **Example request, with size information**: GET /containers/4fa6e0f0c678/json?size=1 HTTP/1.1 **Example response, with size information**: HTTP/1.1 200 OK Content-Type: application/json { .... "SizeRw": 0, "SizeRootFs": 972, .... } Query Parameters: - **size** – 1/True/true or 0/False/false, return container size information. Default is `false`. Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### List processes running inside a container `GET /containers/(id)/top` List processes running inside the container `id`. On Unix systems this is done by running the `ps` command. This endpoint is not supported on Windows. **Example request**: GET /containers/4fa6e0f0c678/top HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Titles" : [ "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" ], "Processes" : [ [ "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" ], [ "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" ] ] } **Example request**: GET /containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Titles" : [ "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" ] "Processes" : [ [ "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" ], [ "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" ] ], } Query Parameters: - **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Get container logs `GET /containers/(id)/logs` Get `stdout` and `stderr` logs from the container ``id`` > **Note**: > This endpoint works only for containers with the `json-file` or `journald` logging drivers. **Example request**: GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 **Example response**: HTTP/1.1 101 UPGRADED Content-Type: application/vnd.docker.raw-stream Connection: Upgrade Upgrade: tcp {{ STREAM }} Query Parameters: - **follow** – 1/True/true or 0/False/false, return stream. Default `false`. - **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. - **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. - **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp will only output log-entries since that timestamp. Default: 0 (unfiltered) - **timestamps** – 1/True/true or 0/False/false, print timestamps for every log line. Default `false`. - **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. Status Codes: - **101** – no error, hints proxy about hijacking - **200** – no error, no upgrade header found - **404** – no such container - **500** – server error ### Inspect changes on a container's filesystem `GET /containers/(id)/changes` Inspect changes on container `id`'s filesystem **Example request**: GET /containers/4fa6e0f0c678/changes HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "Path": "/dev", "Kind": 0 }, { "Path": "/dev/kmsg", "Kind": 1 }, { "Path": "/test", "Kind": 1 } ] Values for `Kind`: - `0`: Modify - `1`: Add - `2`: Delete Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Export a container `GET /containers/(id)/export` Export the contents of container `id` **Example request**: GET /containers/4fa6e0f0c678/export HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/octet-stream {{ TAR STREAM }} Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Get container stats based on resource usage `GET /containers/(id)/stats` This endpoint returns a live stream of a container's resource usage statistics. **Example request**: GET /containers/redis1/stats HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "read" : "2015-01-08T22:57:31.547920715Z", "networks": { "eth0": { "rx_bytes": 5338, "rx_dropped": 0, "rx_errors": 0, "rx_packets": 36, "tx_bytes": 648, "tx_dropped": 0, "tx_errors": 0, "tx_packets": 8 }, "eth5": { "rx_bytes": 4641, "rx_dropped": 0, "rx_errors": 0, "rx_packets": 26, "tx_bytes": 690, "tx_dropped": 0, "tx_errors": 0, "tx_packets": 9 } }, "memory_stats" : { "stats" : { "total_pgmajfault" : 0, "cache" : 0, "mapped_file" : 0, "total_inactive_file" : 0, "pgpgout" : 414, "rss" : 6537216, "total_mapped_file" : 0, "writeback" : 0, "unevictable" : 0, "pgpgin" : 477, "total_unevictable" : 0, "pgmajfault" : 0, "total_rss" : 6537216, "total_rss_huge" : 6291456, "total_writeback" : 0, "total_inactive_anon" : 0, "rss_huge" : 6291456, "hierarchical_memory_limit" : 67108864, "total_pgfault" : 964, "total_active_file" : 0, "active_anon" : 6537216, "total_active_anon" : 6537216, "total_pgpgout" : 414, "total_cache" : 0, "inactive_anon" : 0, "active_file" : 0, "pgfault" : 964, "inactive_file" : 0, "total_pgpgin" : 477 }, "max_usage" : 6651904, "usage" : 6537216, "failcnt" : 0, "limit" : 67108864 }, "blkio_stats" : {}, "cpu_stats" : { "cpu_usage" : { "percpu_usage" : [ 16970827, 1839451, 7107380, 10571290 ], "usage_in_usermode" : 10000000, "total_usage" : 36488948, "usage_in_kernelmode" : 20000000 }, "system_cpu_usage" : 20091722000000000, "throttling_data" : {} } } Query Parameters: - **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Resize a container TTY `POST /containers/(id)/resize` Resize the TTY for container with `id`. The unit is number of characters. You must restart the container for the resize to take effect. **Example request**: POST /containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Length: 0 Content-Type: text/plain; charset=utf-8 Query Parameters: - **h** – height of `tty` session - **w** – width Status Codes: - **200** – no error - **404** – No such container - **500** – Cannot resize container ### Start a container `POST /containers/(id)/start` Start the container `id` > **Note**: > For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. > See [create a container](#create-a-container) for details. **Example request**: POST /containers/(id)/start HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Status Codes: - **204** – no error - **304** – container already started - **404** – no such container - **500** – server error ### Stop a container `POST /containers/(id)/stop` Stop the container `id` **Example request**: POST /containers/e90e34656806/stop?t=5 HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **t** – number of seconds to wait before killing the container Status Codes: - **204** – no error - **304** – container already stopped - **404** – no such container - **500** – server error ### Restart a container `POST /containers/(id)/restart` Restart the container `id` **Example request**: POST /containers/e90e34656806/restart?t=5 HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **t** – number of seconds to wait before killing the container Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Kill a container `POST /containers/(id)/kill` Kill the container `id` **Example request**: POST /containers/e90e34656806/kill HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters - **signal** - Signal to send to the container: integer or string like `SIGINT`. When not set, `SIGKILL` is assumed and the call waits for the container to exit. Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Rename a container `POST /containers/(id)/rename` Rename the container `id` to a `new_name` **Example request**: POST /containers/e90e34656806/rename?name=new_name HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **name** – new name for the container Status Codes: - **204** – no error - **404** – no such container - **409** - conflict name already assigned - **500** – server error ### Pause a container `POST /containers/(id)/pause` Pause the container `id` **Example request**: POST /containers/e90e34656806/pause HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Unpause a container `POST /containers/(id)/unpause` Unpause the container `id` **Example request**: POST /containers/e90e34656806/unpause HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Attach to a container `POST /containers/(id)/attach` Attach to the container `id` **Example request**: POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 **Example response**: HTTP/1.1 101 UPGRADED Content-Type: application/vnd.docker.raw-stream Connection: Upgrade Upgrade: tcp {{ STREAM }} Query Parameters: - **logs** – 1/True/true or 0/False/false, return logs. Default `false`. - **stream** – 1/True/true or 0/False/false, return stream. Default `false`. - **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach to `stdin`. Default `false`. - **stdout** – 1/True/true or 0/False/false, if `logs=true`, return `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. - **stderr** – 1/True/true or 0/False/false, if `logs=true`, return `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. Status Codes: - **101** – no error, hints proxy about hijacking - **200** – no error, no upgrade header found - **400** – bad parameter - **404** – no such container - **500** – server error **Stream details**: When using the TTY setting is enabled in [`POST /containers/create` ](#create-a-container), the stream is the raw data from the process PTY and client's `stdin`. When the TTY is disabled, then the stream is multiplexed to separate `stdout` and `stderr`. The format is a **Header** and a **Payload** (frame). **HEADER** The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`). It is encoded on the first eight bytes like this: header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} `STREAM_TYPE` can be: - 0: `stdin` (is written on `stdout`) - 1: `stdout` - 2: `stderr` `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian. **PAYLOAD** The payload is the raw stream. **IMPLEMENTATION** The simplest way to implement the Attach protocol is the following: 1. Read eight bytes. 2. Choose `stdout` or `stderr` depending on the first byte. 3. Extract the frame size from the last four bytes. 4. Read the extracted size and output it on the correct output. 5. Goto 1. ### Attach to a container (websocket) `GET /containers/(id)/attach/ws` Attach to the container `id` via websocket Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) **Example request** GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 **Example response** {{ STREAM }} Query Parameters: - **logs** – 1/True/true or 0/False/false, return logs. Default `false`. - **stream** – 1/True/true or 0/False/false, return stream. Default `false`. - **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach to `stdin`. Default `false`. - **stdout** – 1/True/true or 0/False/false, if `logs=true`, return `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. - **stderr** – 1/True/true or 0/False/false, if `logs=true`, return `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. Status Codes: - **200** – no error - **400** – bad parameter - **404** – no such container - **500** – server error ### Wait a container `POST /containers/(id)/wait` Block until container `id` stops, then returns the exit code **Example request**: POST /containers/16253994b7c4/wait HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"StatusCode": 0} Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Remove a container `DELETE /containers/(id)` Remove the container `id` from the filesystem **Example request**: DELETE /containers/16253994b7c4?v=1 HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **v** – 1/True/true or 0/False/false, Remove the volumes associated to the container. Default `false`. - **force** - 1/True/true or 0/False/false, Kill then remove the container. Default `false`. Status Codes: - **204** – no error - **400** – bad parameter - **404** – no such container - **500** – server error ### Copy files or folders from a container `POST /containers/(id)/copy` Copy files or folders of container `id` **Deprecated** in favor of the `archive` endpoint below. **Example request**: POST /containers/4fa6e0f0c678/copy HTTP/1.1 Content-Type: application/json { "Resource": "test.txt" } **Example response**: HTTP/1.1 200 OK Content-Type: application/x-tar {{ TAR STREAM }} Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Retrieving information about files and folders in a container `HEAD /containers/(id)/archive` See the description of the `X-Docker-Container-Path-Stat` header in the following section. ### Get an archive of a filesystem resource in a container `GET /containers/(id)/archive` Get an tar archive of a resource in the filesystem of container `id`. Query Parameters: - **path** - resource in the container's filesystem to archive. Required. If not an absolute path, it is relative to the container's root directory. The resource specified by **path** must exist. To assert that the resource is expected to be a directory, **path** should end in `/` or `/.` (assuming a path separator of `/`). If **path** ends in `/.` then this indicates that only the contents of the **path** directory should be copied. A symlink is always resolved to its target. **Note**: It is not possible to copy certain system files such as resources under `/proc`, `/sys`, `/dev`, and mounts created by the user in the container. **Example request**: GET /containers/8cce319429b2/archive?path=/root HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/x-tar X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= {{ TAR STREAM }} On success, a response header `X-Docker-Container-Path-Stat` will be set to a base64-encoded JSON object containing some filesystem header information about the archived resource. The above example value would decode to the following JSON object (whitespace added for readability): { "name": "root", "size": 4096, "mode": 2147484096, "mtime": "2014-02-27T20:51:23Z", "linkTarget": "" } A `HEAD` request can also be made to this endpoint if only this information is desired. Status Codes: - **200** - success, returns archive of copied resource - **400** - client error, bad parameter, details in JSON response body, one of: - must specify path parameter (**path** cannot be empty) - not a directory (**path** was asserted to be a directory but exists as a file) - **404** - client error, resource not found, one of: – no such container (container `id` does not exist) - no such file or directory (**path** does not exist) - **500** - server error ### Extract an archive of files or folders to a directory in a container `PUT /containers/(id)/archive` Upload a tar archive to be extracted to a path in the filesystem of container `id`. Query Parameters: - **path** - path to a directory in the container to extract the archive's contents into. Required. If not an absolute path, it is relative to the container's root directory. The **path** resource must exist. - **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa. **Example request**: PUT /containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 Content-Type: application/x-tar {{ TAR STREAM }} **Example response**: HTTP/1.1 200 OK Status Codes: - **200** – the content was extracted successfully - **400** - client error, bad parameter, details in JSON response body, one of: - must specify path parameter (**path** cannot be empty) - not a directory (**path** should be a directory but exists as a file) - unable to overwrite existing directory with non-directory (if **noOverwriteDirNonDir**) - unable to overwrite existing non-directory with directory (if **noOverwriteDirNonDir**) - **403** - client error, permission denied, the volume or container rootfs is marked as read-only. - **404** - client error, resource not found, one of: – no such container (container `id` does not exist) - no such file or directory (**path** resource does not exist) - **500** – server error ## 2.2 Images ### List Images `GET /images/json` **Example request**: GET /images/json?all=0 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "RepoTags": [ "ubuntu:12.04", "ubuntu:precise", "ubuntu:latest" ], "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", "Created": 1365714795, "Size": 131506275, "VirtualSize": 131506275, "Labels": {} }, { "RepoTags": [ "ubuntu:12.10", "ubuntu:quantal" ], "ParentId": "27cf784147099545", "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", "Created": 1364102658, "Size": 24653, "VirtualSize": 180116135, "Labels": { "com.example.version": "v1" } } ] **Example request, with digest information**: GET /images/json?digests=1 HTTP/1.1 **Example response, with digest information**: HTTP/1.1 200 OK Content-Type: application/json [ { "Created": 1420064636, "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", "RepoDigests": [ "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" ], "RepoTags": [ "localhost:5000/test/busybox:latest", "playdate:latest" ], "Size": 0, "VirtualSize": 2429728, "Labels": {} } ] The response shows a single image `Id` associated with two repositories (`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use either of the `RepoTags` values `localhost:5000/test/busybox:latest` or `playdate:latest` to reference the image. You can also use `RepoDigests` values to reference an image. In this response, the array has only one reference and that is to the `localhost:5000/test/busybox` repository; the `playdate` repository has no digest. You can reference this digest using the value: `localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` See the `docker run` and `docker build` commands for examples of digest and tag references on the command line. Query Parameters: - **all** – 1/True/true or 0/False/false, default false - **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: - `dangling=true` - `label=key` or `label="key=value"` of an image label - **filter** - only return images with the specified name ### Build image from a Dockerfile `POST /build` Build an image from a Dockerfile **Example request**: POST /build HTTP/1.1 {{ TAR STREAM }} **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"stream": "Step 1..."} {"stream": "..."} {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} The input stream must be a `tar` archive compressed with one of the following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. The archive must include a build instructions file, typically called `Dockerfile` at the archive's root. The `dockerfile` parameter may be used to specify a different build instructions file. To do this, its value must be the path to the alternate build instructions file to use. The archive may include any number of other files, which are accessible in the build context (See the [*ADD build command*](../../reference/builder.md#dockerbuilder)). The build is canceled if the client drops the connection by quitting or being killed. Query Parameters: - **dockerfile** - Path within the build context to the Dockerfile. This is ignored if `remote` is specified and points to an individual filename. - **t** – A name and optional tag to apply to the image in the `name:tag` format. If you omit the `tag` the default `latest` value is assumed. You can provide one or more `t` parameters. - **remote** – A Git repository URI or HTTP/HTTPS URI build source. If the URI specifies a filename, the file's contents are placed into a file called `Dockerfile`. - **q** – Suppress verbose build output. - **nocache** – Do not use the cache when building the image. - **pull** - Attempt to pull the image even if an older image exists locally. - **rm** - Remove intermediate containers after a successful build (default behavior). - **forcerm** - Always remove intermediate containers (includes `rm`). - **memory** - Set memory limit for build. - **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. - **cpushares** - CPU shares (relative weight). - **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). - **cpuperiod** - The length of a CPU period in microseconds. - **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. - **buildargs** – JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker uses the `buildargs` as the environment context for command(s) run via the Dockerfile's `RUN` instruction or for variable expansion in other Dockerfile instructions. This is not meant for passing secret values. [Read more about the buildargs instruction](../../reference/builder.md#arg) Request Headers: - **Content-type** – Set to `"application/tar"`. - **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON object with the following structure: { "docker.example.com": { "username": "janedoe", "password": "hunter2" }, "https://index.docker.io/v1/": { "username": "mobydock", "password": "conta1n3rize14" } } This object maps the hostname of a registry to an object containing the "username" and "password" for that registry. Multiple registries may be specified as the build may be based on an image requiring authentication to pull from any arbitrary registry. Only the registry domain name (and port if not the default "443") are required. However (for legacy reasons) the "official" Docker, Inc. hosted registry must be specified with both a "https://" prefix and a "/v1/" suffix even though Docker will prefer to use the v2 registry API. Status Codes: - **200** – no error - **500** – server error ### Create an image `POST /images/create` Create an image either by pulling it from the registry or by importing it **Example request**: POST /images/create?fromImage=ubuntu HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"status": "Pulling..."} {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} {"error": "Invalid..."} ... When using this endpoint to pull an image from the registry, the `X-Registry-Auth` header can be used to include a base64-encoded AuthConfig object. Query Parameters: - **fromImage** – Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. - **fromSrc** – Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image. - **repo** – Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image. - **tag** – Tag or digest. Request Headers: - **X-Registry-Auth** – base64-encoded AuthConfig object Status Codes: - **200** – no error - **500** – server error ### Inspect an image `GET /images/(name)/json` Return low-level information on the image `name` **Example request**: GET /images/example/json HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Id" : "85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c", "Container" : "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a", "Comment" : "", "Os" : "linux", "Architecture" : "amd64", "Parent" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", "ContainerConfig" : { "Tty" : false, "Hostname" : "e611e15f9c9d", "Volumes" : null, "Domainname" : "", "AttachStdout" : false, "PublishService" : "", "AttachStdin" : false, "OpenStdin" : false, "StdinOnce" : false, "NetworkDisabled" : false, "OnBuild" : [], "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", "User" : "", "WorkingDir" : "", "Entrypoint" : null, "MacAddress" : "", "AttachStderr" : false, "Labels" : { "com.example.license" : "GPL", "com.example.version" : "1.0", "com.example.vendor" : "Acme" }, "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "ExposedPorts" : null, "Cmd" : [ "/bin/sh", "-c", "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" ] }, "DockerVersion" : "1.9.0-dev", "VirtualSize" : 188359297, "Size" : 0, "Author" : "", "Created" : "2015-09-10T08:30:53.26995814Z", "GraphDriver" : { "Name" : "aufs", "Data" : null }, "RepoDigests" : [ "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" ], "RepoTags" : [ "example:1.0", "example:latest", "example:stable" ], "Config" : { "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", "NetworkDisabled" : false, "OnBuild" : [], "StdinOnce" : false, "PublishService" : "", "AttachStdin" : false, "OpenStdin" : false, "Domainname" : "", "AttachStdout" : false, "Tty" : false, "Hostname" : "e611e15f9c9d", "Volumes" : null, "Cmd" : [ "/bin/bash" ], "ExposedPorts" : null, "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Labels" : { "com.example.vendor" : "Acme", "com.example.version" : "1.0", "com.example.license" : "GPL" }, "Entrypoint" : null, "MacAddress" : "", "AttachStderr" : false, "WorkingDir" : "", "User" : "" } } Status Codes: - **200** – no error - **404** – no such image - **500** – server error ### Get the history of an image `GET /images/(name)/history` Return the history of the image `name` **Example request**: GET /images/ubuntu/history HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", "Created": 1398108230, "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", "Tags": [ "ubuntu:lucid", "ubuntu:10.04" ], "Size": 182964289, "Comment": "" }, { "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", "Created": 1398108222, "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", "Tags": null, "Size": 0, "Comment": "" }, { "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", "Created": 1371157430, "CreatedBy": "", "Tags": [ "scratch12:latest", "scratch:latest" ], "Size": 0, "Comment": "Imported from -" } ] Status Codes: - **200** – no error - **404** – no such image - **500** – server error ### Push an image on the registry `POST /images/(name)/push` Push the image `name` on the registry **Example request**: POST /images/test/push HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"status": "Pushing..."} {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} {"error": "Invalid..."} ... If you wish to push an image on to a private registry, that image must already have a tag into a repository which references that registry `hostname` and `port`. This repository name should then be used in the URL. This duplicates the command line's flow. **Example request**: POST /images/registry.acme.com:5000/test/push HTTP/1.1 Query Parameters: - **tag** – The tag to associate with the image on the registry. This is optional. Request Headers: - **X-Registry-Auth** – Include a base64-encoded AuthConfig. object. Status Codes: - **200** – no error - **404** – no such image - **500** – server error ### Tag an image into a repository `POST /images/(name)/tag` Tag the image `name` into a repository **Example request**: POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 **Example response**: HTTP/1.1 201 OK Query Parameters: - **repo** – The repository to tag in - **force** – 1/True/true or 0/False/false, default false - **tag** - The new tag name Status Codes: - **201** – no error - **400** – bad parameter - **404** – no such image - **409** – conflict - **500** – server error ### Remove an image `DELETE /images/(name)` Remove the image `name` from the filesystem **Example request**: DELETE /images/test HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-type: application/json [ {"Untagged": "3e2f21a89f"}, {"Deleted": "3e2f21a89f"}, {"Deleted": "53b4f83ac9"} ] Query Parameters: - **force** – 1/True/true or 0/False/false, default false - **noprune** – 1/True/true or 0/False/false, default false Status Codes: - **200** – no error - **404** – no such image - **409** – conflict - **500** – server error ### Search images `GET /images/search` Search for an image on [Docker Hub](https://hub.docker.com). > **Note**: > The response keys have changed from API v1.6 to reflect the JSON > sent by the registry server to the docker daemon's request. **Example request**: GET /images/search?term=sshd HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "description": "", "is_official": false, "is_automated": false, "name": "wma55/u1210sshd", "star_count": 0 }, { "description": "", "is_official": false, "is_automated": false, "name": "jdswinbank/sshd", "star_count": 0 }, { "description": "", "is_official": false, "is_automated": false, "name": "vgauthier/sshd", "star_count": 0 } ... ] Query Parameters: - **term** – term to search Status Codes: - **200** – no error - **500** – server error ## 2.3 Misc ### Check auth configuration `POST /auth` Get the default username and email **Example request**: POST /auth HTTP/1.1 Content-Type: application/json { "username":" hannibal", "password: "xxxx", "email": "hannibal@a-team.com", "serveraddress": "https://index.docker.io/v1/" } **Example response**: HTTP/1.1 200 OK Status Codes: - **200** – no error - **204** – no error - **500** – server error ### Display system-wide information `GET /info` Display system-wide information **Example request**: GET /info HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Containers": 11, "CpuCfsPeriod": true, "CpuCfsQuota": true, "Debug": false, "DiscoveryBackend": "etcd://localhost:2379", "DockerRootDir": "/var/lib/docker", "Driver": "btrfs", "DriverStatus": [[""]], "ExecutionDriver": "native-0.1", "ExperimentalBuild": false, "HttpProxy": "http://test:test@localhost:8080", "HttpsProxy": "https://test:test@localhost:8080", "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", "IPv4Forwarding": true, "Images": 16, "IndexServerAddress": "https://index.docker.io/v1/", "InitPath": "/usr/bin/docker", "InitSha1": "", "KernelVersion": "3.12.0-1-amd64", "Labels": [ "storage=ssd" ], "MemTotal": 2099236864, "MemoryLimit": true, "NCPU": 1, "NEventsListener": 0, "NFd": 11, "NGoroutines": 21, "Name": "prod-server-42", "NoProxy": "9.81.1.160", "OomKillDisable": true, "OperatingSystem": "Boot2Docker", "RegistryConfig": { "IndexConfigs": { "docker.io": { "Mirrors": null, "Name": "docker.io", "Official": true, "Secure": true } }, "InsecureRegistryCIDRs": [ "127.0.0.0/8" ] }, "SwapLimit": false, "SystemTime": "2015-03-10T11:11:23.730591467-07:00" "ServerVersion": "1.9.0" } Status Codes: - **200** – no error - **500** – server error ### Show the docker version information `GET /version` Show the docker version information **Example request**: GET /version HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Version": "1.5.0", "Os": "linux", "KernelVersion": "3.18.5-tinycore64", "GoVersion": "go1.4.1", "GitCommit": "a8a31ef", "Arch": "amd64", "ApiVersion": "1.20", "Experimental": false } Status Codes: - **200** – no error - **500** – server error ### Ping the docker server `GET /_ping` Ping the docker server **Example request**: GET /_ping HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: text/plain OK Status Codes: - **200** - no error - **500** - server error ### Create a new image from a container's changes `POST /commit` Create a new image from a container's changes **Example request**: POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 Content-Type: application/json { "Hostname": "", "Domainname": "", "User": "", "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": null, "Cmd": [ "date" ], "Mounts": [ { "Source": "/data", "Destination": "/data", "Mode": "ro,Z", "RW": false } ], "Labels": { "key1": "value1", "key2": "value2" }, "WorkingDir": "", "NetworkDisabled": false, "ExposedPorts": { "22/tcp": {} } } **Example response**: HTTP/1.1 201 Created Content-Type: application/json {"Id": "596069db4bf5"} Json Parameters: - **config** - the container's configuration Query Parameters: - **container** – source container - **repo** – repository - **tag** – tag - **comment** – commit message - **author** – author (e.g., "John Hannibal Smith <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") - **pause** – 1/True/true or 0/False/false, whether to pause the container before committing - **changes** – Dockerfile instructions to apply while committing Status Codes: - **201** – no error - **404** – no such container - **500** – server error ### Monitor Docker's events `GET /events` Get container events from docker, either in real time via streaming, or via polling (using since). Docker containers report the following events: attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause and Docker images report: delete, import, pull, push, tag, untag **Example request**: GET /events?since=1374067924 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"status":"pull","id":"busybox:latest","time":1442421700,"timeNano":1442421700598988358} {"status":"create","id":"5745704abe9caa5","from":"busybox","time":1442421716,"timeNano":1442421716853979870} {"status":"attach","id":"5745704abe9caa5","from":"busybox","time":1442421716,"timeNano":1442421716894759198} {"status":"start","id":"5745704abe9caa5","from":"busybox","time":1442421716,"timeNano":1442421716983607193} Query Parameters: - **since** – Timestamp used for polling - **until** – Timestamp used for polling - **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: - `container=`; -- container to filter - `event=`; -- event to filter - `image=`; -- image to filter - `label=`; -- image and container label to filter Status Codes: - **200** – no error - **500** – server error ### Get a tarball containing all images in a repository `GET /images/(name)/get` Get a tarball containing all images and metadata for the repository specified by `name`. If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the 'repositories' file in the tarball, as there were no image names referenced. See the [image tarball format](#image-tarball-format) for more details. **Example request** GET /images/ubuntu/get **Example response**: HTTP/1.1 200 OK Content-Type: application/x-tar Binary data stream Status Codes: - **200** – no error - **500** – server error ### Get a tarball containing all images. `GET /images/get` Get a tarball containing all images and metadata for one or more repositories. For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. See the [image tarball format](#image-tarball-format) for more details. **Example request** GET /images/get?names=myname%2Fmyapp%3Alatest&names=busybox **Example response**: HTTP/1.1 200 OK Content-Type: application/x-tar Binary data stream Status Codes: - **200** – no error - **500** – server error ### Load a tarball with a set of images and tags into docker `POST /images/load` Load a set of images and tags into a Docker repository. See the [image tarball format](#image-tarball-format) for more details. **Example request** POST /images/load Tarball in body **Example response**: HTTP/1.1 200 OK Status Codes: - **200** – no error - **500** – server error ### Image tarball format An image tarball contains one directory per image layer (named using its long ID), each containing these files: - `VERSION`: currently `1.0` - the file format version - `json`: detailed layer information, similar to `docker inspect layer_id` - `layer.tar`: A tarfile containing the filesystem changes in this layer The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. ``` {"hello-world": {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} } ``` ### Exec Create `POST /containers/(id)/exec` Sets up an exec instance in a running container `id` **Example request**: POST /containers/e90e34656806/exec HTTP/1.1 Content-Type: application/json { "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "Tty": false, "Cmd": [ "date" ] } **Example response**: HTTP/1.1 201 OK Content-Type: application/json { "Id": "f90e34656806", "Warnings":[] } Json Parameters: - **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. - **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. - **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. - **Tty** - Boolean value to allocate a pseudo-TTY. - **Cmd** - Command to run specified as a string or an array of strings. Status Codes: - **201** – no error - **404** – no such container - **409** - container is paused - **500** - server error ### Exec Start `POST /exec/(id)/start` Starts a previously set up `exec` instance `id`. If `detach` is true, this API returns after starting the `exec` command. Otherwise, this API sets up an interactive session with the `exec` command. **Example request**: POST /exec/e90e34656806/start HTTP/1.1 Content-Type: application/json { "Detach": false, "Tty": false } **Example response**: HTTP/1.1 201 OK Content-Type: application/json {{ STREAM }} Json Parameters: - **Detach** - Detach from the `exec` command. - **Tty** - Boolean value to allocate a pseudo-TTY. Status Codes: - **200** – no error - **404** – no such exec instance - **409** - container is paused **Stream details**: Similar to the stream behavior of `POST /container/(id)/attach` API ### Exec Resize `POST /exec/(id)/resize` Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. This API is valid only if `tty` was specified as part of creating and starting the `exec` command. **Example request**: POST /exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 Content-Type: text/plain **Example response**: HTTP/1.1 201 OK Content-Type: text/plain Query Parameters: - **h** – height of `tty` session - **w** – width Status Codes: - **201** – no error - **404** – no such exec instance ### Exec Inspect `GET /exec/(id)/json` Return low-level information about the `exec` command `id`. **Example request**: GET /exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: plain/text { "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", "Running" : false, "ExitCode" : 2, "ProcessConfig" : { "privileged" : false, "user" : "", "tty" : false, "entrypoint" : "sh", "arguments" : [ "-c", "exit 2" ] }, "OpenStdin" : false, "OpenStderr" : false, "OpenStdout" : false, "Container" : { "State" : { "Status" : "running", "Running" : true, "Paused" : false, "Restarting" : false, "OOMKilled" : false, "Pid" : 3650, "ExitCode" : 0, "Error" : "", "StartedAt" : "2014-11-17T22:26:03.717657531Z", "FinishedAt" : "0001-01-01T00:00:00Z" }, "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", "Created" : "2014-11-17T22:26:03.626304998Z", "Path" : "date", "Args" : [], "Config" : { "Hostname" : "8f177a186b97", "Domainname" : "", "User" : "", "AttachStdin" : false, "AttachStdout" : false, "AttachStderr" : false, "ExposedPorts" : null, "Tty" : false, "OpenStdin" : false, "StdinOnce" : false, "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd" : [ "date" ], "Image" : "ubuntu", "Volumes" : null, "WorkingDir" : "", "Entrypoint" : null, "NetworkDisabled" : false, "MacAddress" : "", "OnBuild" : null, "SecurityOpt" : null }, "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", "NetworkSettings": { "Bridge": "", "SandboxID": "", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": null, "SandboxKey": "", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "", "Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "", "IPPrefixLen": 0, "IPv6Gateway": "", "MacAddress": "", "Networks": { "bridge": { "EndpointID": "", "Gateway": "", "IPAddress": "", "IPPrefixLen": 0, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "" } } }, "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", "Name" : "/test", "Driver" : "aufs", "ExecDriver" : "native-0.2", "MountLabel" : "", "ProcessLabel" : "", "AppArmorProfile" : "", "RestartCount" : 0, "Mounts" : [] } } Status Codes: - **200** – no error - **404** – no such exec instance - **500** - server error ## 2.4 Volumes ### List volumes `GET /volumes` **Example request**: GET /volumes HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Volumes": [ { "Name": "tardis", "Driver": "local", "Mountpoint": "/var/lib/docker/volumes/tardis" } ] } Query Parameters: - **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. There is one available filter: `dangling=true` Status Codes: - **200** - no error - **500** - server error ### Create a volume `POST /volumes/create` Create a volume **Example request**: POST /volumes/create HTTP/1.1 Content-Type: application/json { "Name": "tardis" } **Example response**: HTTP/1.1 201 Created Content-Type: application/json { "Name": "tardis", "Driver": "local", "Mountpoint": "/var/lib/docker/volumes/tardis" } Status Codes: - **201** - no error - **500** - server error JSON Parameters: - **Name** - The new volume's name. If not specified, Docker generates a name. - **Driver** - Name of the volume driver to use. Defaults to `local` for the name. - **DriverOpts** - A mapping of driver options and values. These options are passed directly to the driver and are driver specific. ### Inspect a volume `GET /volumes/(name)` Return low-level information on the volume `name` **Example request**: GET /volumes/tardis **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Name": "tardis", "Driver": "local", "Mountpoint": "/var/lib/docker/volumes/tardis" } Status Codes: - **200** - no error - **404** - no such volume - **500** - server error ### Remove a volume `DELETE /volumes/(name)` Instruct the driver to remove the volume (`name`). **Example request**: DELETE /volumes/tardis HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Status Codes - **204** - no error - **404** - no such volume or volume driver - **409** - volume is in use and cannot be removed - **500** - server error ## 2.5 Networks ### List networks `GET /networks` **Example request**: GET /networks HTTP/1.1 **Example response**: ``` HTTP/1.1 200 OK Content-Type: application/json [ { "Name": "bridge", "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", "Scope": "local", "Driver": "bridge", "IPAM": { "Driver": "default", "Config": [ { "Subnet": "172.17.0.0/16" } ] }, "Containers": { "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", "MacAddress": "02:42:ac:11:00:02", "IPv4Address": "172.17.0.2/16", "IPv6Address": "" } }, "Options": { "com.docker.network.bridge.default_bridge": "true", "com.docker.network.bridge.enable_icc": "true", "com.docker.network.bridge.enable_ip_masquerade": "true", "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", "com.docker.network.bridge.name": "docker0", "com.docker.network.driver.mtu": "1500" } }, { "Name": "none", "Id": "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794", "Scope": "local", "Driver": "null", "IPAM": { "Driver": "default", "Config": [] }, "Containers": {}, "Options": {} }, { "Name": "host", "Id": "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e", "Scope": "local", "Driver": "host", "IPAM": { "Driver": "default", "Config": [] }, "Containers": {}, "Options": {} } ] ``` Query Parameters: - **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: `name=[network-names]` , `id=[network-ids]` Status Codes: - **200** - no error - **500** - server error ### Inspect network `GET /networks/` **Example request**: GET /networks/f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566 HTTP/1.1 **Example response**: ``` HTTP/1.1 200 OK Content-Type: application/json { "Name": "bridge", "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", "Scope": "local", "Driver": "bridge", "IPAM": { "Driver": "default", "Config": [ { "Subnet": "172.17.0.0/16" } ] }, "Containers": { "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", "MacAddress": "02:42:ac:11:00:02", "IPv4Address": "172.17.0.2/16", "IPv6Address": "" } }, "Options": { "com.docker.network.bridge.default_bridge": "true", "com.docker.network.bridge.enable_icc": "true", "com.docker.network.bridge.enable_ip_masquerade": "true", "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", "com.docker.network.bridge.name": "docker0", "com.docker.network.driver.mtu": "1500" } } ``` Status Codes: - **200** - no error - **404** - network not found ### Create a network `POST /networks/create` Create a network **Example request**: ``` POST /networks/create HTTP/1.1 Content-Type: application/json { "Name":"isolated_nw", "Driver":"bridge" "IPAM":{ "Config":[{ "Subnet":"172.20.0.0/16", "IPRange":"172.20.10.0/24", "Gateway":"172.20.10.11" }] } ``` **Example response**: ``` HTTP/1.1 201 Created Content-Type: application/json { "Id": "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30", "Warning": "" } ``` Status Codes: - **201** - no error - **404** - plugin not found - **500** - server error JSON Parameters: - **Name** - The new network's name. this is a mandatory field - **Driver** - Name of the network driver plugin to use. Defaults to `bridge` driver - **IPAM** - Optional custom IP scheme for the network - **Options** - Network specific options to be used by the drivers - **CheckDuplicate** - Requests daemon to check for networks with same name ### Connect a container to a network `POST /networks/(id)/connect` Connects a container to a network **Example request**: ``` POST /networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1 Content-Type: application/json { "Container":"3613f73ba0e4" } ``` **Example response**: HTTP/1.1 200 OK Status Codes: - **200** - no error - **404** - network or container is not found - **500** - Internal Server Error JSON Parameters: - **container** - container-id/name to be connected to the network ### Disconnect a container from a network `POST /networks/(id)/disconnect` Disconnects a container from a network **Example request**: ``` POST /networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1 Content-Type: application/json { "Container":"3613f73ba0e4" } ``` **Example response**: HTTP/1.1 200 OK Status Codes: - **200** - no error - **404** - network or container not found - **500** - Internal Server Error JSON Parameters: - **Container** - container-id/name to be disconnected from a network ### Remove a network `DELETE /networks/(id)` Instruct the driver to remove the network (`id`). **Example request**: DELETE /networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Status Codes - **200** - no error - **404** - no such network - **500** - server error # 3. Going further ## 3.1 Inside `docker run` As an example, the `docker run` command line makes the following API calls: - Create the container - If the status code is 404, it means the image doesn't exist: - Try to pull it. - Then, retry to create the container. - Start the container. - If you are not in detached mode: - Attach to the container, using `logs=1` (to have `stdout` and `stderr` from the container's start) and `stream=1` - If in detached mode or only `stdin` is attached, display the container's id. ## 3.2 Hijacking In this version of the API, `/attach`, uses hijacking to transport `stdin`, `stdout`, and `stderr` on the same socket. To hint potential proxies about connection hijacking, Docker client sends connection upgrade headers similarly to websocket. Upgrade: tcp Connection: Upgrade When Docker daemon detects the `Upgrade` header, it switches its status code from **200 OK** to **101 UPGRADED** and resends the same headers. ## 3.3 CORS Requests To set cross origin requests to the remote api please give values to `--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, default or blank means CORS disabled $ docker daemon -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" docker-1.10.3/docs/reference/api/docker_remote_api_v1.22.md000066400000000000000000002572661267010174400233530ustar00rootroot00000000000000 # Docker Remote API v1.22 ## 1. Brief introduction - The Remote API has replaced `rcli`. - The daemon listens on `unix:///var/run/docker.sock` but you can [Bind Docker to another host/port or a Unix socket](../../quickstart.md#bind-docker-to-another-host-port-or-a-unix-socket). - The API tends to be REST. However, for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `stdout`, `stdin` and `stderr`. - When the client API version is newer than the daemon's, these calls return an HTTP `400 Bad Request` error message. # 2. Endpoints ## 2.1 Containers ### List containers `GET /containers/json` List containers **Example request**: GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "Id": "8dfafdbc3a40", "Names":["/boring_feynman"], "Image": "ubuntu:latest", "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", "Command": "echo 1", "Created": 1367854155, "Status": "Exit 0", "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], "Labels": { "com.example.vendor": "Acme", "com.example.license": "GPL", "com.example.version": "1.0" }, "SizeRw": 12288, "SizeRootFs": 0, "NetworkSettings": { "Networks": { "bridge": { "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", "EndpointID": "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:02" } } } }, { "Id": "9cd87474be90", "Names":["/coolName"], "Image": "ubuntu:latest", "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", "Command": "echo 222222", "Created": 1367854155, "Status": "Exit 0", "Ports": [], "Labels": {}, "SizeRw": 12288, "SizeRootFs": 0, "NetworkSettings": { "Networks": { "bridge": { "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", "EndpointID": "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.8", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:08" } } } }, { "Id": "3176a2479c92", "Names":["/sleepy_dog"], "Image": "ubuntu:latest", "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", "Command": "echo 3333333333333333", "Created": 1367854154, "Status": "Exit 0", "Ports":[], "Labels": {}, "SizeRw":12288, "SizeRootFs":0, "NetworkSettings": { "Networks": { "bridge": { "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", "EndpointID": "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.6", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:06" } } } }, { "Id": "4cb07b47f9fb", "Names":["/running_cat"], "Image": "ubuntu:latest", "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", "Command": "echo 444444444444444444444444444444444", "Created": 1367854152, "Status": "Exit 0", "Ports": [], "Labels": {}, "SizeRw": 12288, "SizeRootFs": 0, "NetworkSettings": { "Networks": { "bridge": { "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", "EndpointID": "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.5", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:05" } } } } ] Query Parameters: - **all** – 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default (i.e., this defaults to false) - **limit** – Show `limit` last created containers, include non-running ones. - **since** – Show only containers created since Id, include non-running ones. - **before** – Show only containers created before Id, include non-running ones. - **size** – 1/True/true or 0/False/false, Show the containers sizes - **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: - `exited=`; -- containers with exit code of `` ; - `status=`(`created`|`restarting`|`running`|`paused`|`exited`|`dead`) - `label=key` or `label="key=value"` of a container label - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) Status Codes: - **200** – no error - **400** – bad parameter - **500** – server error ### Create a container `POST /containers/create` Create a container **Example request**: POST /containers/create HTTP/1.1 Content-Type: application/json { "Hostname": "", "Domainname": "", "User": "", "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": [ "FOO=bar", "BAZ=quux" ], "Cmd": [ "date" ], "Entrypoint": "", "Image": "ubuntu", "Labels": { "com.example.vendor": "Acme", "com.example.license": "GPL", "com.example.version": "1.0" }, "Mounts": [ { "Name": "fac362...80535", "Source": "/data", "Destination": "/data", "Driver": "local", "Mode": "ro,Z", "RW": false, "Propagation": "" } ], "WorkingDir": "", "NetworkDisabled": false, "MacAddress": "12:34:56:78:9a:bc", "ExposedPorts": { "22/tcp": {} }, "StopSignal": "SIGTERM", "HostConfig": { "Binds": ["/tmp:/tmp"], "Links": ["redis3:redis"], "Memory": 0, "MemorySwap": 0, "MemoryReservation": 0, "KernelMemory": 0, "CpuShares": 512, "CpuPeriod": 100000, "CpuQuota": 50000, "CpusetCpus": "0,1", "CpusetMems": "0,1", "BlkioWeight": 300, "BlkioWeightDevice": [{}], "BlkioDeviceReadBps": [{}], "BlkioDeviceReadIOps": [{}], "BlkioDeviceWriteBps": [{}], "BlkioDeviceWriteIOps": [{}], "MemorySwappiness": 60, "OomKillDisable": false, "OomScoreAdj": 500, "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, "PublishAllPorts": false, "Privileged": false, "ReadonlyRootfs": false, "Dns": ["8.8.8.8"], "DnsOptions": [""], "DnsSearch": [""], "ExtraHosts": null, "VolumesFrom": ["parent", "other:ro"], "CapAdd": ["NET_ADMIN"], "CapDrop": ["MKNOD"], "GroupAdd": ["newgroup"], "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, "NetworkMode": "bridge", "Devices": [], "Ulimits": [{}], "LogConfig": { "Type": "json-file", "Config": {} }, "SecurityOpt": [""], "CgroupParent": "", "VolumeDriver": "", "ShmSize": 67108864 } } **Example response**: HTTP/1.1 201 Created Content-Type: application/json { "Id":"e90e34656806", "Warnings":[] } Json Parameters: - **Hostname** - A string value containing the hostname to use for the container. - **Domainname** - A string value containing the domain name to use for the container. - **User** - A string value specifying the user inside the container. - **Memory** - Memory limit in bytes. - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. You must use this with `memory` and make the swap value larger than `memory`. - **MemoryReservation** - Memory soft limit in bytes. - **KernelMemory** - Kernel memory limit in bytes. - **CpuShares** - An integer value containing the container's CPU Shares (ie. the relative weight vs other containers). - **CpuPeriod** - The length of a CPU period in microseconds. - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. - **Cpuset** - Deprecated please don't use. Use `CpusetCpus` instead. - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. - **BlkioWeightDevice** - Block IO weight (relative device weight) in the form of: `"BlkioWeightDevice": [{"Path": "device_path", "Weight": weight}]` - **BlkioDeviceReadBps** - Limit read rate (bytes per second) from a device in the form of: `"BlkioDeviceReadBps": [{"Path": "device_path", "Rate": rate}]`, for example: `"BlkioDeviceReadBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` - **BlkioDeviceWriteBps** - Limit write rate (bytes per second) to a device in the form of: `"BlkioDeviceWriteBps": [{"Path": "device_path", "Rate": rate}]`, for example: `"BlkioDeviceWriteBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` - **BlkioDeviceReadIOps** - Limit read rate (IO per second) from a device in the form of: `"BlkioDeviceReadIOps": [{"Path": "device_path", "Rate": rate}]`, for example: `"BlkioDeviceReadIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` - **BlkioDeviceWiiteIOps** - Limit write rate (IO per second) to a device in the form of: `"BlkioDeviceWriteIOps": [{"Path": "device_path", "Rate": rate}]`, for example: `"BlkioDeviceWriteIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. - **OomScoreAdj** - An integer value containing the score given to the container in order to tune OOM killer preferences. - **AttachStdin** - Boolean value, attaches to `stdin`. - **AttachStdout** - Boolean value, attaches to `stdout`. - **AttachStderr** - Boolean value, attaches to `stderr`. - **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. - **OpenStdin** - Boolean value, opens stdin, - **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. - **Env** - A list of environment variables in the form of `["VAR=value"[,"VAR2=value2"]]` - **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value"[,"key2":"value2"]}` - **Cmd** - Command to run specified as a string or an array of strings. - **Entrypoint** - Set the entry point for the container as a string or an array of strings. - **Image** - A string specifying the image name to use for the container. - **Mounts** - An array of mount points in the container. - **WorkingDir** - A string specifying the working directory for commands to run in. - **NetworkDisabled** - Boolean value, when true disables networking for the container - **ExposedPorts** - An object mapping ports to an empty object in the form of: `"ExposedPorts": { "/: {}" }` - **StopSignal** - Signal to stop a container as a string or unsigned integer. `SIGTERM` by default. - **HostConfig** - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + `container_path` to create a new volume for the container + `host_path:container_path` to bind-mount a host path into the container + `host_path:container_path:ro` to make the bind-mount read-only inside the container. + `volume_name:container_path` to bind-mount a volume managed by a volume plugin into the container. + `volume_name:container_path:ro` to make the bind mount read-only inside the container. - **Links** - A list of links for the container. Each link entry should be in the form of `container_name:alias`. - **PortBindings** - A map of exposed container ports and the host port they should map to. A JSON object in the form `{ /: [{ "HostPort": "" }] }` Take note that `port` is specified as a string and not an integer value. - **PublishAllPorts** - Allocates a random host port for all of a container's exposed ports. Specified as a boolean value. - **Privileged** - Gives the container full access to the host. Specified as a boolean value. - **ReadonlyRootfs** - Mount the container's root filesystem as read only. Specified as a boolean value. - **Dns** - A list of DNS servers for the container to use. - **DnsOptions** - A list of DNS options - **DnsSearch** - A list of DNS search domains - **ExtraHosts** - A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. - **VolumesFrom** - A list of volumes to inherit from another container. Specified in the form `[:]` - **CapAdd** - A list of kernel capabilities to add to the container. - **Capdrop** - A list of kernel capabilities to drop from the container. - **GroupAdd** - A list of additional groups that the container process will run as - **RestartPolicy** – The behavior to apply when the container exits. The value is an object with a `Name` property of either `"always"` to always restart, `"unless-stopped"` to restart always except when user has manually stopped the container or `"on-failure"` to restart only when the container exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` controls the number of times to retry before giving up. The default is not to restart. (optional) An ever increasing delay (double the previous delay, starting at 100mS) is added before each restart to prevent flooding the server. - **NetworkMode** - Sets the networking mode for the container. Supported values are: `bridge`, `host`, and `container:` - **Devices** - A list of devices to add to the container specified as a JSON object in the form `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` - **Ulimits** - A list of ulimits to set in the container, specified as `{ "Name": , "Soft": , "Hard": }`, for example: `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` - **SecurityOpt**: A list of string values to customize labels for MLS systems, such as SELinux. - **LogConfig** - Log configuration for the container, specified as a JSON object in the form `{ "Type": "", "Config": {"key1": "val1"}}`. Available types: `json-file`, `syslog`, `journald`, `gelf`, `awslogs`, `splunk`, `none`. `json-file` logging driver. - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. - **VolumeDriver** - Driver that this container users to mount volumes. - **ShmSize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. Query Parameters: - **name** – Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`. Status Codes: - **201** – no error - **404** – no such container - **406** – impossible to attach (container not running) - **500** – server error ### Inspect a container `GET /containers/(id)/json` Return low-level information on the container `id` **Example request**: GET /containers/4fa6e0f0c678/json HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "AppArmorProfile": "", "Args": [ "-c", "exit 9" ], "Config": { "AttachStderr": true, "AttachStdin": false, "AttachStdout": true, "Cmd": [ "/bin/sh", "-c", "exit 9" ], "Domainname": "", "Entrypoint": null, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "ExposedPorts": null, "Hostname": "ba033ac44011", "Image": "ubuntu", "Labels": { "com.example.vendor": "Acme", "com.example.license": "GPL", "com.example.version": "1.0" }, "MacAddress": "", "NetworkDisabled": false, "OnBuild": null, "OpenStdin": false, "StdinOnce": false, "Tty": false, "User": "", "Volumes": null, "WorkingDir": "", "StopSignal": "SIGTERM" }, "Created": "2015-01-06T15:47:31.485331387Z", "Driver": "devicemapper", "ExecDriver": "native-0.2", "ExecIDs": null, "HostConfig": { "Binds": null, "BlkioWeight": 0, "BlkioWeightDevice": [{}], "BlkioDeviceReadBps": [{}], "BlkioDeviceWriteBps": [{}], "BlkioDeviceReadIOps": [{}], "BlkioDeviceWriteIOps": [{}], "CapAdd": null, "CapDrop": null, "ContainerIDFile": "", "CpusetCpus": "", "CpusetMems": "", "CpuShares": 0, "CpuPeriod": 100000, "Devices": [], "Dns": null, "DnsOptions": null, "DnsSearch": null, "ExtraHosts": null, "IpcMode": "", "Links": null, "LxcConf": [], "Memory": 0, "MemorySwap": 0, "MemoryReservation": 0, "KernelMemory": 0, "OomKillDisable": false, "OomScoreAdj": 500, "NetworkMode": "bridge", "PortBindings": {}, "Privileged": false, "ReadonlyRootfs": false, "PublishAllPorts": false, "RestartPolicy": { "MaximumRetryCount": 2, "Name": "on-failure" }, "LogConfig": { "Config": null, "Type": "json-file" }, "SecurityOpt": null, "VolumesFrom": null, "Ulimits": [{}], "VolumeDriver": "", "ShmSize": 67108864 }, "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", "MountLabel": "", "Name": "/boring_euclid", "NetworkSettings": { "Bridge": "", "SandboxID": "", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": null, "SandboxKey": "", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "", "Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "", "IPPrefixLen": 0, "IPv6Gateway": "", "MacAddress": "", "Networks": { "bridge": { "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:12:00:02" } } }, "Path": "/bin/sh", "ProcessLabel": "", "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", "RestartCount": 1, "State": { "Error": "", "ExitCode": 9, "FinishedAt": "2015-01-06T15:47:32.080254511Z", "OOMKilled": false, "Dead": false, "Paused": false, "Pid": 0, "Restarting": false, "Running": true, "StartedAt": "2015-01-06T15:47:32.072697474Z", "Status": "running" }, "Mounts": [ { "Name": "fac362...80535", "Source": "/data", "Destination": "/data", "Driver": "local", "Mode": "ro,Z", "RW": false, "Propagation": "" } ] } **Example request, with size information**: GET /containers/4fa6e0f0c678/json?size=1 HTTP/1.1 **Example response, with size information**: HTTP/1.1 200 OK Content-Type: application/json { .... "SizeRw": 0, "SizeRootFs": 972, .... } Query Parameters: - **size** – 1/True/true or 0/False/false, return container size information. Default is `false`. Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### List processes running inside a container `GET /containers/(id)/top` List processes running inside the container `id`. On Unix systems this is done by running the `ps` command. This endpoint is not supported on Windows. **Example request**: GET /containers/4fa6e0f0c678/top HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Titles" : [ "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" ], "Processes" : [ [ "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" ], [ "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" ] ] } **Example request**: GET /containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Titles" : [ "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" ] "Processes" : [ [ "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" ], [ "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" ] ], } Query Parameters: - **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Get container logs `GET /containers/(id)/logs` Get `stdout` and `stderr` logs from the container ``id`` > **Note**: > This endpoint works only for containers with the `json-file` or `journald` logging drivers. **Example request**: GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 **Example response**: HTTP/1.1 101 UPGRADED Content-Type: application/vnd.docker.raw-stream Connection: Upgrade Upgrade: tcp {{ STREAM }} Query Parameters: - **follow** – 1/True/true or 0/False/false, return stream. Default `false`. - **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. - **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. - **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp will only output log-entries since that timestamp. Default: 0 (unfiltered) - **timestamps** – 1/True/true or 0/False/false, print timestamps for every log line. Default `false`. - **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. Status Codes: - **101** – no error, hints proxy about hijacking - **200** – no error, no upgrade header found - **404** – no such container - **500** – server error ### Inspect changes on a container's filesystem `GET /containers/(id)/changes` Inspect changes on container `id`'s filesystem **Example request**: GET /containers/4fa6e0f0c678/changes HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "Path": "/dev", "Kind": 0 }, { "Path": "/dev/kmsg", "Kind": 1 }, { "Path": "/test", "Kind": 1 } ] Values for `Kind`: - `0`: Modify - `1`: Add - `2`: Delete Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Export a container `GET /containers/(id)/export` Export the contents of container `id` **Example request**: GET /containers/4fa6e0f0c678/export HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/octet-stream {{ TAR STREAM }} Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Get container stats based on resource usage `GET /containers/(id)/stats` This endpoint returns a live stream of a container's resource usage statistics. **Example request**: GET /containers/redis1/stats HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "read" : "2015-01-08T22:57:31.547920715Z", "networks": { "eth0": { "rx_bytes": 5338, "rx_dropped": 0, "rx_errors": 0, "rx_packets": 36, "tx_bytes": 648, "tx_dropped": 0, "tx_errors": 0, "tx_packets": 8 }, "eth5": { "rx_bytes": 4641, "rx_dropped": 0, "rx_errors": 0, "rx_packets": 26, "tx_bytes": 690, "tx_dropped": 0, "tx_errors": 0, "tx_packets": 9 } }, "memory_stats" : { "stats" : { "total_pgmajfault" : 0, "cache" : 0, "mapped_file" : 0, "total_inactive_file" : 0, "pgpgout" : 414, "rss" : 6537216, "total_mapped_file" : 0, "writeback" : 0, "unevictable" : 0, "pgpgin" : 477, "total_unevictable" : 0, "pgmajfault" : 0, "total_rss" : 6537216, "total_rss_huge" : 6291456, "total_writeback" : 0, "total_inactive_anon" : 0, "rss_huge" : 6291456, "hierarchical_memory_limit" : 67108864, "total_pgfault" : 964, "total_active_file" : 0, "active_anon" : 6537216, "total_active_anon" : 6537216, "total_pgpgout" : 414, "total_cache" : 0, "inactive_anon" : 0, "active_file" : 0, "pgfault" : 964, "inactive_file" : 0, "total_pgpgin" : 477 }, "max_usage" : 6651904, "usage" : 6537216, "failcnt" : 0, "limit" : 67108864 }, "blkio_stats" : {}, "cpu_stats" : { "cpu_usage" : { "percpu_usage" : [ 16970827, 1839451, 7107380, 10571290 ], "usage_in_usermode" : 10000000, "total_usage" : 36488948, "usage_in_kernelmode" : 20000000 }, "system_cpu_usage" : 20091722000000000, "throttling_data" : {} } } Query Parameters: - **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Resize a container TTY `POST /containers/(id)/resize` Resize the TTY for container with `id`. The unit is number of characters. You must restart the container for the resize to take effect. **Example request**: POST /containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Length: 0 Content-Type: text/plain; charset=utf-8 Query Parameters: - **h** – height of `tty` session - **w** – width Status Codes: - **200** – no error - **404** – No such container - **500** – Cannot resize container ### Start a container `POST /containers/(id)/start` Start the container `id` > **Note**: > For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. > See [create a container](#create-a-container) for details. **Example request**: POST /containers/(id)/start HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **detachKeys** – Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Status Codes: - **204** – no error - **304** – container already started - **404** – no such container - **500** – server error ### Stop a container `POST /containers/(id)/stop` Stop the container `id` **Example request**: POST /containers/e90e34656806/stop?t=5 HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **t** – number of seconds to wait before killing the container Status Codes: - **204** – no error - **304** – container already stopped - **404** – no such container - **500** – server error ### Restart a container `POST /containers/(id)/restart` Restart the container `id` **Example request**: POST /containers/e90e34656806/restart?t=5 HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **t** – number of seconds to wait before killing the container Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Kill a container `POST /containers/(id)/kill` Kill the container `id` **Example request**: POST /containers/e90e34656806/kill HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters - **signal** - Signal to send to the container: integer or string like `SIGINT`. When not set, `SIGKILL` is assumed and the call waits for the container to exit. Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Update a container `POST /containers/(id)/update` Update resource configs of one or more containers. **Example request**: POST /containers/(id)/update HTTP/1.1 Content-Type: application/json { "BlkioWeight": 300, "CpuShares": 512, "CpuPeriod": 100000, "CpuQuota": 50000, "CpusetCpus": "0,1", "CpusetMems": "0", "Memory": 314572800, "MemorySwap": 514288000, "MemoryReservation": 209715200, "KernelMemory": 52428800, } **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Warnings": [] } Status Codes: - **200** – no error - **400** – bad parameter - **404** – no such container - **500** – server error ### Rename a container `POST /containers/(id)/rename` Rename the container `id` to a `new_name` **Example request**: POST /containers/e90e34656806/rename?name=new_name HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **name** – new name for the container Status Codes: - **204** – no error - **404** – no such container - **409** - conflict name already assigned - **500** – server error ### Pause a container `POST /containers/(id)/pause` Pause the container `id` **Example request**: POST /containers/e90e34656806/pause HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Unpause a container `POST /containers/(id)/unpause` Unpause the container `id` **Example request**: POST /containers/e90e34656806/unpause HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Status Codes: - **204** – no error - **404** – no such container - **500** – server error ### Attach to a container `POST /containers/(id)/attach` Attach to the container `id` **Example request**: POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 **Example response**: HTTP/1.1 101 UPGRADED Content-Type: application/vnd.docker.raw-stream Connection: Upgrade Upgrade: tcp {{ STREAM }} Query Parameters: - **detachKeys** – Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. - **logs** – 1/True/true or 0/False/false, return logs. Default `false`. - **stream** – 1/True/true or 0/False/false, return stream. Default `false`. - **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach to `stdin`. Default `false`. - **stdout** – 1/True/true or 0/False/false, if `logs=true`, return `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. - **stderr** – 1/True/true or 0/False/false, if `logs=true`, return `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. Status Codes: - **101** – no error, hints proxy about hijacking - **200** – no error, no upgrade header found - **400** – bad parameter - **404** – no such container - **500** – server error **Stream details**: When using the TTY setting is enabled in [`POST /containers/create` ](#create-a-container), the stream is the raw data from the process PTY and client's `stdin`. When the TTY is disabled, then the stream is multiplexed to separate `stdout` and `stderr`. The format is a **Header** and a **Payload** (frame). **HEADER** The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`). It is encoded on the first eight bytes like this: header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} `STREAM_TYPE` can be: - 0: `stdin` (is written on `stdout`) - 1: `stdout` - 2: `stderr` `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian. **PAYLOAD** The payload is the raw stream. **IMPLEMENTATION** The simplest way to implement the Attach protocol is the following: 1. Read eight bytes. 2. Choose `stdout` or `stderr` depending on the first byte. 3. Extract the frame size from the last four bytes. 4. Read the extracted size and output it on the correct output. 5. Goto 1. ### Attach to a container (websocket) `GET /containers/(id)/attach/ws` Attach to the container `id` via websocket Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) **Example request** GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 **Example response** {{ STREAM }} Query Parameters: - **detachKeys** – Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. - **logs** – 1/True/true or 0/False/false, return logs. Default `false`. - **stream** – 1/True/true or 0/False/false, return stream. Default `false`. - **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach to `stdin`. Default `false`. - **stdout** – 1/True/true or 0/False/false, if `logs=true`, return `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. - **stderr** – 1/True/true or 0/False/false, if `logs=true`, return `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. Status Codes: - **200** – no error - **400** – bad parameter - **404** – no such container - **500** – server error ### Wait a container `POST /containers/(id)/wait` Block until container `id` stops, then returns the exit code **Example request**: POST /containers/16253994b7c4/wait HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"StatusCode": 0} Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Remove a container `DELETE /containers/(id)` Remove the container `id` from the filesystem **Example request**: DELETE /containers/16253994b7c4?v=1 HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Query Parameters: - **v** – 1/True/true or 0/False/false, Remove the volumes associated to the container. Default `false`. - **force** - 1/True/true or 0/False/false, Kill then remove the container. Default `false`. Status Codes: - **204** – no error - **400** – bad parameter - **404** – no such container - **500** – server error ### Copy files or folders from a container `POST /containers/(id)/copy` Copy files or folders of container `id` **Deprecated** in favor of the `archive` endpoint below. **Example request**: POST /containers/4fa6e0f0c678/copy HTTP/1.1 Content-Type: application/json { "Resource": "test.txt" } **Example response**: HTTP/1.1 200 OK Content-Type: application/x-tar {{ TAR STREAM }} Status Codes: - **200** – no error - **404** – no such container - **500** – server error ### Retrieving information about files and folders in a container `HEAD /containers/(id)/archive` See the description of the `X-Docker-Container-Path-Stat` header in the following section. ### Get an archive of a filesystem resource in a container `GET /containers/(id)/archive` Get an tar archive of a resource in the filesystem of container `id`. Query Parameters: - **path** - resource in the container's filesystem to archive. Required. If not an absolute path, it is relative to the container's root directory. The resource specified by **path** must exist. To assert that the resource is expected to be a directory, **path** should end in `/` or `/.` (assuming a path separator of `/`). If **path** ends in `/.` then this indicates that only the contents of the **path** directory should be copied. A symlink is always resolved to its target. **Note**: It is not possible to copy certain system files such as resources under `/proc`, `/sys`, `/dev`, and mounts created by the user in the container. **Example request**: GET /containers/8cce319429b2/archive?path=/root HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/x-tar X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= {{ TAR STREAM }} On success, a response header `X-Docker-Container-Path-Stat` will be set to a base64-encoded JSON object containing some filesystem header information about the archived resource. The above example value would decode to the following JSON object (whitespace added for readability): { "name": "root", "size": 4096, "mode": 2147484096, "mtime": "2014-02-27T20:51:23Z", "linkTarget": "" } A `HEAD` request can also be made to this endpoint if only this information is desired. Status Codes: - **200** - success, returns archive of copied resource - **400** - client error, bad parameter, details in JSON response body, one of: - must specify path parameter (**path** cannot be empty) - not a directory (**path** was asserted to be a directory but exists as a file) - **404** - client error, resource not found, one of: – no such container (container `id` does not exist) - no such file or directory (**path** does not exist) - **500** - server error ### Extract an archive of files or folders to a directory in a container `PUT /containers/(id)/archive` Upload a tar archive to be extracted to a path in the filesystem of container `id`. Query Parameters: - **path** - path to a directory in the container to extract the archive's contents into. Required. If not an absolute path, it is relative to the container's root directory. The **path** resource must exist. - **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa. **Example request**: PUT /containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 Content-Type: application/x-tar {{ TAR STREAM }} **Example response**: HTTP/1.1 200 OK Status Codes: - **200** – the content was extracted successfully - **400** - client error, bad parameter, details in JSON response body, one of: - must specify path parameter (**path** cannot be empty) - not a directory (**path** should be a directory but exists as a file) - unable to overwrite existing directory with non-directory (if **noOverwriteDirNonDir**) - unable to overwrite existing non-directory with directory (if **noOverwriteDirNonDir**) - **403** - client error, permission denied, the volume or container rootfs is marked as read-only. - **404** - client error, resource not found, one of: – no such container (container `id` does not exist) - no such file or directory (**path** resource does not exist) - **500** – server error ## 2.2 Images ### List Images `GET /images/json` **Example request**: GET /images/json?all=0 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "RepoTags": [ "ubuntu:12.04", "ubuntu:precise", "ubuntu:latest" ], "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", "Created": 1365714795, "Size": 131506275, "VirtualSize": 131506275, "Labels": {} }, { "RepoTags": [ "ubuntu:12.10", "ubuntu:quantal" ], "ParentId": "27cf784147099545", "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", "Created": 1364102658, "Size": 24653, "VirtualSize": 180116135, "Labels": { "com.example.version": "v1" } } ] **Example request, with digest information**: GET /images/json?digests=1 HTTP/1.1 **Example response, with digest information**: HTTP/1.1 200 OK Content-Type: application/json [ { "Created": 1420064636, "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", "RepoDigests": [ "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" ], "RepoTags": [ "localhost:5000/test/busybox:latest", "playdate:latest" ], "Size": 0, "VirtualSize": 2429728, "Labels": {} } ] The response shows a single image `Id` associated with two repositories (`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use either of the `RepoTags` values `localhost:5000/test/busybox:latest` or `playdate:latest` to reference the image. You can also use `RepoDigests` values to reference an image. In this response, the array has only one reference and that is to the `localhost:5000/test/busybox` repository; the `playdate` repository has no digest. You can reference this digest using the value: `localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` See the `docker run` and `docker build` commands for examples of digest and tag references on the command line. Query Parameters: - **all** – 1/True/true or 0/False/false, default false - **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: - `dangling=true` - `label=key` or `label="key=value"` of an image label - **filter** - only return images with the specified name ### Build image from a Dockerfile `POST /build` Build an image from a Dockerfile **Example request**: POST /build HTTP/1.1 {{ TAR STREAM }} **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"stream": "Step 1..."} {"stream": "..."} {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} The input stream must be a `tar` archive compressed with one of the following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. The archive must include a build instructions file, typically called `Dockerfile` at the archive's root. The `dockerfile` parameter may be used to specify a different build instructions file. To do this, its value must be the path to the alternate build instructions file to use. The archive may include any number of other files, which are accessible in the build context (See the [*ADD build command*](../../reference/builder.md#dockerbuilder)). The build is canceled if the client drops the connection by quitting or being killed. Query Parameters: - **dockerfile** - Path within the build context to the Dockerfile. This is ignored if `remote` is specified and points to an individual filename. - **t** – A name and optional tag to apply to the image in the `name:tag` format. If you omit the `tag` the default `latest` value is assumed. You can provide one or more `t` parameters. - **remote** – A Git repository URI or HTTP/HTTPS URI build source. If the URI specifies a filename, the file's contents are placed into a file called `Dockerfile`. - **q** – Suppress verbose build output. - **nocache** – Do not use the cache when building the image. - **pull** - Attempt to pull the image even if an older image exists locally. - **rm** - Remove intermediate containers after a successful build (default behavior). - **forcerm** - Always remove intermediate containers (includes `rm`). - **memory** - Set memory limit for build. - **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. - **cpushares** - CPU shares (relative weight). - **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). - **cpuperiod** - The length of a CPU period in microseconds. - **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. - **buildargs** – JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker uses the `buildargs` as the environment context for command(s) run via the Dockerfile's `RUN` instruction or for variable expansion in other Dockerfile instructions. This is not meant for passing secret values. [Read more about the buildargs instruction](../../reference/builder.md#arg) - **shmsize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. Request Headers: - **Content-type** – Set to `"application/tar"`. - **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON object with the following structure: { "docker.example.com": { "username": "janedoe", "password": "hunter2" }, "https://index.docker.io/v1/": { "username": "mobydock", "password": "conta1n3rize14" } } This object maps the hostname of a registry to an object containing the "username" and "password" for that registry. Multiple registries may be specified as the build may be based on an image requiring authentication to pull from any arbitrary registry. Only the registry domain name (and port if not the default "443") are required. However (for legacy reasons) the "official" Docker, Inc. hosted registry must be specified with both a "https://" prefix and a "/v1/" suffix even though Docker will prefer to use the v2 registry API. Status Codes: - **200** – no error - **500** – server error ### Create an image `POST /images/create` Create an image either by pulling it from the registry or by importing it **Example request**: POST /images/create?fromImage=ubuntu HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"status": "Pulling..."} {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} {"error": "Invalid..."} ... When using this endpoint to pull an image from the registry, the `X-Registry-Auth` header can be used to include a base64-encoded AuthConfig object. Query Parameters: - **fromImage** – Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed. - **fromSrc** – Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image. - **repo** – Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image. - **tag** – Tag or digest. Request Headers: - **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token - Credential based login: ``` { "username": "jdoe", "password": "secret", "email": "jdoe@acme.com", } ``` - Token based login: ``` { "registrytoken": "9cbaf023786cd7..." } ``` Status Codes: - **200** – no error - **500** – server error ### Inspect an image `GET /images/(name)/json` Return low-level information on the image `name` **Example request**: GET /images/example/json HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Id" : "85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c", "Container" : "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a", "Comment" : "", "Os" : "linux", "Architecture" : "amd64", "Parent" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", "ContainerConfig" : { "Tty" : false, "Hostname" : "e611e15f9c9d", "Volumes" : null, "Domainname" : "", "AttachStdout" : false, "PublishService" : "", "AttachStdin" : false, "OpenStdin" : false, "StdinOnce" : false, "NetworkDisabled" : false, "OnBuild" : [], "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", "User" : "", "WorkingDir" : "", "Entrypoint" : null, "MacAddress" : "", "AttachStderr" : false, "Labels" : { "com.example.license" : "GPL", "com.example.version" : "1.0", "com.example.vendor" : "Acme" }, "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "ExposedPorts" : null, "Cmd" : [ "/bin/sh", "-c", "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" ] }, "DockerVersion" : "1.9.0-dev", "VirtualSize" : 188359297, "Size" : 0, "Author" : "", "Created" : "2015-09-10T08:30:53.26995814Z", "GraphDriver" : { "Name" : "aufs", "Data" : null }, "RepoDigests" : [ "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" ], "RepoTags" : [ "example:1.0", "example:latest", "example:stable" ], "Config" : { "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", "NetworkDisabled" : false, "OnBuild" : [], "StdinOnce" : false, "PublishService" : "", "AttachStdin" : false, "OpenStdin" : false, "Domainname" : "", "AttachStdout" : false, "Tty" : false, "Hostname" : "e611e15f9c9d", "Volumes" : null, "Cmd" : [ "/bin/bash" ], "ExposedPorts" : null, "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Labels" : { "com.example.vendor" : "Acme", "com.example.version" : "1.0", "com.example.license" : "GPL" }, "Entrypoint" : null, "MacAddress" : "", "AttachStderr" : false, "WorkingDir" : "", "User" : "" } } Status Codes: - **200** – no error - **404** – no such image - **500** – server error ### Get the history of an image `GET /images/(name)/history` Return the history of the image `name` **Example request**: GET /images/ubuntu/history HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", "Created": 1398108230, "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", "Tags": [ "ubuntu:lucid", "ubuntu:10.04" ], "Size": 182964289, "Comment": "" }, { "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", "Created": 1398108222, "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", "Tags": null, "Size": 0, "Comment": "" }, { "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", "Created": 1371157430, "CreatedBy": "", "Tags": [ "scratch12:latest", "scratch:latest" ], "Size": 0, "Comment": "Imported from -" } ] Status Codes: - **200** – no error - **404** – no such image - **500** – server error ### Push an image on the registry `POST /images/(name)/push` Push the image `name` on the registry **Example request**: POST /images/test/push HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json {"status": "Pushing..."} {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} {"error": "Invalid..."} ... If you wish to push an image on to a private registry, that image must already have a tag into a repository which references that registry `hostname` and `port`. This repository name should then be used in the URL. This duplicates the command line's flow. The push is cancelled if the HTTP connection is closed. **Example request**: POST /images/registry.acme.com:5000/test/push HTTP/1.1 Query Parameters: - **tag** – The tag to associate with the image on the registry. This is optional. Request Headers: - **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token - Credential based login: ``` { "username": "jdoe", "password": "secret", "email": "jdoe@acme.com", } ``` - Token based login: ``` { "registrytoken": "9cbaf023786cd7..." } ``` Status Codes: - **200** – no error - **404** – no such image - **500** – server error ### Tag an image into a repository `POST /images/(name)/tag` Tag the image `name` into a repository **Example request**: POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 **Example response**: HTTP/1.1 201 OK Query Parameters: - **repo** – The repository to tag in - **force** – 1/True/true or 0/False/false, default false - **tag** - The new tag name Status Codes: - **201** – no error - **400** – bad parameter - **404** – no such image - **409** – conflict - **500** – server error ### Remove an image `DELETE /images/(name)` Remove the image `name` from the filesystem **Example request**: DELETE /images/test HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-type: application/json [ {"Untagged": "3e2f21a89f"}, {"Deleted": "3e2f21a89f"}, {"Deleted": "53b4f83ac9"} ] Query Parameters: - **force** – 1/True/true or 0/False/false, default false - **noprune** – 1/True/true or 0/False/false, default false Status Codes: - **200** – no error - **404** – no such image - **409** – conflict - **500** – server error ### Search images `GET /images/search` Search for an image on [Docker Hub](https://hub.docker.com). > **Note**: > The response keys have changed from API v1.6 to reflect the JSON > sent by the registry server to the docker daemon's request. **Example request**: GET /images/search?term=sshd HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "description": "", "is_official": false, "is_automated": false, "name": "wma55/u1210sshd", "star_count": 0 }, { "description": "", "is_official": false, "is_automated": false, "name": "jdswinbank/sshd", "star_count": 0 }, { "description": "", "is_official": false, "is_automated": false, "name": "vgauthier/sshd", "star_count": 0 } ... ] Query Parameters: - **term** – term to search Status Codes: - **200** – no error - **500** – server error ## 2.3 Misc ### Check auth configuration `POST /auth` Get the default username and email **Example request**: POST /auth HTTP/1.1 Content-Type: application/json { "username":" hannibal", "password: "xxxx", "email": "hannibal@a-team.com", "serveraddress": "https://index.docker.io/v1/" } **Example response**: HTTP/1.1 200 OK Status Codes: - **200** – no error - **204** – no error - **500** – server error ### Display system-wide information `GET /info` Display system-wide information **Example request**: GET /info HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Architecture": "x86_64", "Containers": 11, "ContainersRunning": 7, "ContainersStopped": 3, "ContainersPaused": 1, "CpuCfsPeriod": true, "CpuCfsQuota": true, "Debug": false, "DiscoveryBackend": "etcd://localhost:2379", "DockerRootDir": "/var/lib/docker", "Driver": "btrfs", "DriverStatus": [[""]], "SystemStatus": [["State", "Healthy"]], "Plugins": { "Volume": [ "local" ], "Network": [ "null", "host", "bridge" ] }, "ExecutionDriver": "native-0.1", "ExperimentalBuild": false, "HttpProxy": "http://test:test@localhost:8080", "HttpsProxy": "https://test:test@localhost:8080", "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", "IPv4Forwarding": true, "Images": 16, "IndexServerAddress": "https://index.docker.io/v1/", "InitPath": "/usr/bin/docker", "InitSha1": "", "KernelVersion": "3.12.0-1-amd64", "Labels": [ "storage=ssd" ], "MemTotal": 2099236864, "MemoryLimit": true, "NCPU": 1, "NEventsListener": 0, "NFd": 11, "NGoroutines": 21, "Name": "prod-server-42", "NoProxy": "9.81.1.160", "OomKillDisable": true, "OSType": "linux", "OomScoreAdj": 500, "OperatingSystem": "Boot2Docker", "RegistryConfig": { "IndexConfigs": { "docker.io": { "Mirrors": null, "Name": "docker.io", "Official": true, "Secure": true } }, "InsecureRegistryCIDRs": [ "127.0.0.0/8" ] }, "SwapLimit": false, "SystemTime": "2015-03-10T11:11:23.730591467-07:00" "ServerVersion": "1.9.0" } Status Codes: - **200** – no error - **500** – server error ### Show the docker version information `GET /version` Show the docker version information **Example request**: GET /version HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Version": "1.10.0-dev", "Os": "linux", "KernelVersion": "3.19.0-23-generic", "GoVersion": "go1.4.2", "GitCommit": "e75da4b", "Arch": "amd64", "ApiVersion": "1.22", "BuildTime": "2015-12-01T07:09:13.444803460+00:00", "Experimental": true } Status Codes: - **200** – no error - **500** – server error ### Ping the docker server `GET /_ping` Ping the docker server **Example request**: GET /_ping HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: text/plain OK Status Codes: - **200** - no error - **500** - server error ### Create a new image from a container's changes `POST /commit` Create a new image from a container's changes **Example request**: POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 Content-Type: application/json { "Hostname": "", "Domainname": "", "User": "", "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": null, "Cmd": [ "date" ], "Mounts": [ { "Source": "/data", "Destination": "/data", "Mode": "ro,Z", "RW": false } ], "Labels": { "key1": "value1", "key2": "value2" }, "WorkingDir": "", "NetworkDisabled": false, "ExposedPorts": { "22/tcp": {} } } **Example response**: HTTP/1.1 201 Created Content-Type: application/json {"Id": "596069db4bf5"} Json Parameters: - **config** - the container's configuration Query Parameters: - **container** – source container - **repo** – repository - **tag** – tag - **comment** – commit message - **author** – author (e.g., "John Hannibal Smith <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") - **pause** – 1/True/true or 0/False/false, whether to pause the container before committing - **changes** – Dockerfile instructions to apply while committing Status Codes: - **201** – no error - **404** – no such container - **500** – server error ### Monitor Docker's events `GET /events` Get container events from docker, either in real time via streaming, or via polling (using since). Docker containers report the following events: attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update Docker images report the following events: delete, import, pull, push, tag, untag Docker volumes report the following events: create, mount, unmount, destroy Docker networks report the following events: create, connect, disconnect, destroy **Example request**: GET /events?since=1374067924 **Example response**: HTTP/1.1 200 OK Content-Type: application/json [ { "action": "pull", "type": "image", "actor": { "id": "busybox:latest", "attributes": {} } "time": 1442421700, "timeNano": 1442421700598988358 }, { "action": "create", "type": "container", "actor": { "id": "5745704abe9caa5", "attributes": {"image": "busybox"} } "time": 1442421716, "timeNano": 1442421716853979870 }, { "action": "attach", "type": "container", "actor": { "id": "5745704abe9caa5", "attributes": {"image": "busybox"} } "time": 1442421716, "timeNano": 1442421716894759198 }, { "action": "start", "type": "container", "actor": { "id": "5745704abe9caa5", "attributes": {"image": "busybox"} } "time": 1442421716, "timeNano": 1442421716983607193 } ] Query Parameters: - **since** – Timestamp used for polling - **until** – Timestamp used for polling - **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: - `container=`; -- container to filter - `event=`; -- event to filter - `image=`; -- image to filter - `label=`; -- image and container label to filter - `type=`; -- either `container` or `image` or `volume` or `network` - `volume=`; -- volume to filter - `network=`; -- network to filter Status Codes: - **200** – no error - **500** – server error ### Get a tarball containing all images in a repository `GET /images/(name)/get` Get a tarball containing all images and metadata for the repository specified by `name`. If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the 'repositories' file in the tarball, as there were no image names referenced. See the [image tarball format](#image-tarball-format) for more details. **Example request** GET /images/ubuntu/get **Example response**: HTTP/1.1 200 OK Content-Type: application/x-tar Binary data stream Status Codes: - **200** – no error - **500** – server error ### Get a tarball containing all images. `GET /images/get` Get a tarball containing all images and metadata for one or more repositories. For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. See the [image tarball format](#image-tarball-format) for more details. **Example request** GET /images/get?names=myname%2Fmyapp%3Alatest&names=busybox **Example response**: HTTP/1.1 200 OK Content-Type: application/x-tar Binary data stream Status Codes: - **200** – no error - **500** – server error ### Load a tarball with a set of images and tags into docker `POST /images/load` Load a set of images and tags into a Docker repository. See the [image tarball format](#image-tarball-format) for more details. **Example request** POST /images/load Tarball in body **Example response**: HTTP/1.1 200 OK Status Codes: - **200** – no error - **500** – server error ### Image tarball format An image tarball contains one directory per image layer (named using its long ID), each containing these files: - `VERSION`: currently `1.0` - the file format version - `json`: detailed layer information, similar to `docker inspect layer_id` - `layer.tar`: A tarfile containing the filesystem changes in this layer The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. ``` {"hello-world": {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} } ``` ### Exec Create `POST /containers/(id)/exec` Sets up an exec instance in a running container `id` **Example request**: POST /containers/e90e34656806/exec HTTP/1.1 Content-Type: application/json { "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "DetachKeys": "ctrl-p,ctrl-q", "Tty": false, "Cmd": [ "date" ] } **Example response**: HTTP/1.1 201 OK Content-Type: application/json { "Id": "f90e34656806", "Warnings":[] } Json Parameters: - **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. - **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. - **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. - **DetachKeys** – Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. - **Tty** - Boolean value to allocate a pseudo-TTY. - **Cmd** - Command to run specified as a string or an array of strings. Status Codes: - **201** – no error - **404** – no such container - **409** - container is paused - **500** - server error ### Exec Start `POST /exec/(id)/start` Starts a previously set up `exec` instance `id`. If `detach` is true, this API returns after starting the `exec` command. Otherwise, this API sets up an interactive session with the `exec` command. **Example request**: POST /exec/e90e34656806/start HTTP/1.1 Content-Type: application/json { "Detach": false, "Tty": false } **Example response**: HTTP/1.1 201 OK Content-Type: application/json {{ STREAM }} Json Parameters: - **Detach** - Detach from the `exec` command. - **Tty** - Boolean value to allocate a pseudo-TTY. Status Codes: - **200** – no error - **404** – no such exec instance - **409** - container is paused **Stream details**: Similar to the stream behavior of `POST /container/(id)/attach` API ### Exec Resize `POST /exec/(id)/resize` Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. This API is valid only if `tty` was specified as part of creating and starting the `exec` command. **Example request**: POST /exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 Content-Type: text/plain **Example response**: HTTP/1.1 201 OK Content-Type: text/plain Query Parameters: - **h** – height of `tty` session - **w** – width Status Codes: - **201** – no error - **404** – no such exec instance ### Exec Inspect `GET /exec/(id)/json` Return low-level information about the `exec` command `id`. **Example request**: GET /exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: plain/text { "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", "Running" : false, "ExitCode" : 2, "ProcessConfig" : { "privileged" : false, "user" : "", "tty" : false, "entrypoint" : "sh", "arguments" : [ "-c", "exit 2" ] }, "OpenStdin" : false, "OpenStderr" : false, "OpenStdout" : false, "Container" : { "State" : { "Status" : "running", "Running" : true, "Paused" : false, "Restarting" : false, "OOMKilled" : false, "Pid" : 3650, "ExitCode" : 0, "Error" : "", "StartedAt" : "2014-11-17T22:26:03.717657531Z", "FinishedAt" : "0001-01-01T00:00:00Z" }, "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", "Created" : "2014-11-17T22:26:03.626304998Z", "Path" : "date", "Args" : [], "Config" : { "Hostname" : "8f177a186b97", "Domainname" : "", "User" : "", "AttachStdin" : false, "AttachStdout" : false, "AttachStderr" : false, "ExposedPorts" : null, "Tty" : false, "OpenStdin" : false, "StdinOnce" : false, "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd" : [ "date" ], "Image" : "ubuntu", "Volumes" : null, "WorkingDir" : "", "Entrypoint" : null, "NetworkDisabled" : false, "MacAddress" : "", "OnBuild" : null, "SecurityOpt" : null }, "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", "NetworkSettings": { "Bridge": "", "SandboxID": "", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": null, "SandboxKey": "", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "", "Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "", "IPPrefixLen": 0, "IPv6Gateway": "", "MacAddress": "", "Networks": { "bridge": { "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:12:00:02" } } }, "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", "Name" : "/test", "Driver" : "aufs", "ExecDriver" : "native-0.2", "MountLabel" : "", "ProcessLabel" : "", "AppArmorProfile" : "", "RestartCount" : 0, "Mounts" : [] } } Status Codes: - **200** – no error - **404** – no such exec instance - **500** - server error ## 2.4 Volumes ### List volumes `GET /volumes` **Example request**: GET /volumes HTTP/1.1 **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Volumes": [ { "Name": "tardis", "Driver": "local", "Mountpoint": "/var/lib/docker/volumes/tardis" } ] } Query Parameters: - **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. There is one available filter: `dangling=true` Status Codes: - **200** - no error - **500** - server error ### Create a volume `POST /volumes/create` Create a volume **Example request**: POST /volumes/create HTTP/1.1 Content-Type: application/json { "Name": "tardis" } **Example response**: HTTP/1.1 201 Created Content-Type: application/json { "Name": "tardis", "Driver": "local", "Mountpoint": "/var/lib/docker/volumes/tardis" } Status Codes: - **201** - no error - **500** - server error JSON Parameters: - **Name** - The new volume's name. If not specified, Docker generates a name. - **Driver** - Name of the volume driver to use. Defaults to `local` for the name. - **DriverOpts** - A mapping of driver options and values. These options are passed directly to the driver and are driver specific. ### Inspect a volume `GET /volumes/(name)` Return low-level information on the volume `name` **Example request**: GET /volumes/tardis **Example response**: HTTP/1.1 200 OK Content-Type: application/json { "Name": "tardis", "Driver": "local", "Mountpoint": "/var/lib/docker/volumes/tardis" } Status Codes: - **200** - no error - **404** - no such volume - **500** - server error ### Remove a volume `DELETE /volumes/(name)` Instruct the driver to remove the volume (`name`). **Example request**: DELETE /volumes/tardis HTTP/1.1 **Example response**: HTTP/1.1 204 No Content Status Codes - **204** - no error - **404** - no such volume or volume driver - **409** - volume is in use and cannot be removed - **500** - server error ## 2.5 Networks ### List networks `GET /networks` **Example request**: GET /networks?filters={"type":{"custom":true}} HTTP/1.1 **Example response**: ``` HTTP/1.1 200 OK Content-Type: application/json [ { "Name": "bridge", "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", "Scope": "local", "Driver": "bridge", "IPAM": { "Driver": "default", "Config": [ { "Subnet": "172.17.0.0/16" } ] }, "Containers": { "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", "MacAddress": "02:42:ac:11:00:02", "IPv4Address": "172.17.0.2/16", "IPv6Address": "" } }, "Options": { "com.docker.network.bridge.default_bridge": "true", "com.docker.network.bridge.enable_icc": "true", "com.docker.network.bridge.enable_ip_masquerade": "true", "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", "com.docker.network.bridge.name": "docker0", "com.docker.network.driver.mtu": "1500" } }, { "Name": "none", "Id": "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794", "Scope": "local", "Driver": "null", "IPAM": { "Driver": "default", "Config": [] }, "Containers": {}, "Options": {} }, { "Name": "host", "Id": "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e", "Scope": "local", "Driver": "host", "IPAM": { "Driver": "default", "Config": [] }, "Containers": {}, "Options": {} } ] ``` Query Parameters: - **filters** - JSON encoded network list filter. The filter value is one of: - `name=` Matches all or part of a network name. - `id=` Matches all or part of a network id. - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. Status Codes: - **200** - no error - **500** - server error ### Inspect network `GET /networks/` **Example request**: GET /networks/7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99 HTTP/1.1 **Example response**: ``` HTTP/1.1 200 OK Content-Type: application/json { "Name": "net01", "Id": "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99", "Scope": "local", "Driver": "bridge", "IPAM": { "Driver": "default", "Config": [ { "Subnet": "172.19.0.0/16", "Gateway": "172.19.0.1/16" } ], "Options": { "foo": "bar" } }, "Containers": { "19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c": { "Name": "test", "EndpointID": "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a", "MacAddress": "02:42:ac:13:00:02", "IPv4Address": "172.19.0.2/16", "IPv6Address": "" } }, "Options": { "com.docker.network.bridge.default_bridge": "true", "com.docker.network.bridge.enable_icc": "true", "com.docker.network.bridge.enable_ip_masquerade": "true", "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", "com.docker.network.bridge.name": "docker0", "com.docker.network.driver.mtu": "1500" } } ``` Status Codes: - **200** - no error - **404** - network not found ### Create a network `POST /networks/create` Create a network **Example request**: ``` POST /networks/create HTTP/1.1 Content-Type: application/json { "Name":"isolated_nw", "Driver":"bridge", "IPAM":{ "Config":[{ "Subnet":"172.20.0.0/16", "IPRange":"172.20.10.0/24", "Gateway":"172.20.10.11" }], "Options": { "foo": "bar" } }, "Internal":true } ``` **Example response**: ``` HTTP/1.1 201 Created Content-Type: application/json { "Id": "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30", "Warning": "" } ``` Status Codes: - **201** - no error - **404** - plugin not found - **500** - server error JSON Parameters: - **Name** - The new network's name. this is a mandatory field - **Driver** - Name of the network driver plugin to use. Defaults to `bridge` driver - **IPAM** - Optional custom IP scheme for the network - **Options** - Network specific options to be used by the drivers - **CheckDuplicate** - Requests daemon to check for networks with same name ### Connect a container to a network `POST /networks/(id)/connect` Connects a container to a network **Example request**: ``` POST /networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1 Content-Type: application/json { "Container":"3613f73ba0e4", "EndpointConfig": { "test_nw": { "IPv4Address":"172.24.56.89", "IPv6Address":"2001:db8::5689" } } } ``` **Example response**: HTTP/1.1 200 OK Status Codes: - **200** - no error - **404** - network or container is not found - **500** - Internal Server Error JSON Parameters: - **container** - container-id/name to be connected to the network ### Disconnect a container from a network `POST /networks/(id)/disconnect` Disconnects a container from a network **Example request**: ``` POST /networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1 Content-Type: application/json { "Container":"3613f73ba0e4", "Force":false } ``` **Example response**: HTTP/1.1 200 OK Status Codes: - **200** - no error - **404** - network or container not found - **500** - Internal Server Error JSON Parameters: - **Container** - container-id/name to be disconnected from a network - **Force** - Force the container to disconnect from a network ### Remove a network `DELETE /networks/(id)` Instruct the driver to remove the network (`id`). **Example request**: DELETE /networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30 HTTP/1.1 **Example response**: HTTP/1.1 200 OK Status Codes - **200** - no error - **404** - no such network - **500** - server error # 3. Going further ## 3.1 Inside `docker run` As an example, the `docker run` command line makes the following API calls: - Create the container - If the status code is 404, it means the image doesn't exist: - Try to pull it. - Then, retry to create the container. - Start the container. - If you are not in detached mode: - Attach to the container, using `logs=1` (to have `stdout` and `stderr` from the container's start) and `stream=1` - If in detached mode or only `stdin` is attached, display the container's id. ## 3.2 Hijacking In this version of the API, `/attach`, uses hijacking to transport `stdin`, `stdout`, and `stderr` on the same socket. To hint potential proxies about connection hijacking, Docker client sends connection upgrade headers similarly to websocket. Upgrade: tcp Connection: Upgrade When Docker daemon detects the `Upgrade` header, it switches its status code from **200 OK** to **101 UPGRADED** and resends the same headers. ## 3.3 CORS Requests To set cross origin requests to the remote api please give values to `--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, default or blank means CORS disabled $ docker daemon -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" docker-1.10.3/docs/reference/api/hub_registry_spec.md000066400000000000000000000011571267010174400225520ustar00rootroot00000000000000 # The Docker Hub and the Registry v1 This API is deprecated as of 1.7. To view the old version, see the [go here](hub_registry_spec.md) in the 1.7 documentation. If you want an overview of the current features in Docker Hub or other image management features see the [image management overview](../../userguide/eng-image/image_management.md) in the current documentation set. docker-1.10.3/docs/reference/api/images/000077500000000000000000000000001267010174400177515ustar00rootroot00000000000000docker-1.10.3/docs/reference/api/images/event_state.gliffy000066400000000000000000002115001267010174400234730ustar00rootroot00000000000000{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":1193,"height":556,"nodeIndex":370,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":true,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":26.46762966848334,"y":100},"max":{"x":1192.861928406027,"y":555.2340187157677}},"printModel":{"pageSize":"Letter","portrait":true,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":373.99998474121094,"y":389.93402099609375,"rotation":0.0,"id":355,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":0,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":191,"py":0.7071067811865475,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":-0.663724900050094,"endArrowRotation":-0.6637248993502937,"interpolationType":"quadratic","cornerRadius":null,"controlPath":[[22.0,-17.0],[94.00000762939453,-17.0],[94.00000762939453,-61.64974974863185],[166.00001525878906,-61.64974974863185]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":359,"width":75.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.5,"linePerpValue":0.0,"cardinalityType":null,"html":"

docker start

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":275.99998474121094,"y":323.93402099609375,"rotation":0.0,"id":344,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":127,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":335,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":193,"py":0.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":105.08369488824782,"endArrowRotation":91.96866662391399,"interpolationType":"quadratic","cornerRadius":null,"controlPath":[[22.531977827253513,30.06597900390625],[22.531977827253513,51.06597900390625],[-52.96697615221987,51.06597900390625],[-52.96697615221987,106.06597900390625]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":347,"width":64.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker rm

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":279.99998474121094,"y":249.93402099609375,"rotation":0.0,"id":342,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":126,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":188,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":191,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-74.99998474121094,0.06597900390625],[297.50001525878906,0.06597900390625],[297.50001525878906,50.06597900390625]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":313.99998474121094,"y":290.93402099609375,"rotation":0.0,"id":341,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":123,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":335,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":191,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[19.531977827253513,28.06597900390625],[88.35546419381131,28.06597900390625],[157.17895056036912,28.06597900390625],[226.00243692692698,28.06597900390625]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":353,"width":75.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker start

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":214.99998474121094,"y":322.93402099609375,"rotation":0.0,"id":340,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":122,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":228,"py":0.5733505249023437,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":335,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-7.637919363960094,-3.93402099609375],[11.085379699777775,-3.93402099609375],[29.808678763515644,-3.93402099609375],[48.53197782725351,-3.93402099609375]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":83.0,"y":251.0,"rotation":0.0,"id":328,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":116,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":188,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-52.03237033151666,-0.9999999999999716],[47.0,-1.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":332,"width":67.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.5233416311379174,"linePerpValue":null,"cardinalityType":null,"html":"

docker run

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":74.0,"y":318.0,"rotation":0.0,"id":327,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":113,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":228,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-42.0,1.0],[58.5,2.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":333,"width":85.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.5689443767164591,"linePerpValue":null,"cardinalityType":null,"html":"

docker create

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":191.0,"y":409.0,"rotation":0.0,"id":325,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":112,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":193,"py":0.5,"px":0.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":215,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-21.0,41.0],[-61.0,41.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":331.0,"y":346.0,"rotation":0.0,"id":320,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":109,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":209,"py":0.5,"px":0.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":193,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[2.5319625684644507,49.0],[-41.734018715767775,49.0],[-41.734018715767775,104.0],[-86.0,104.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":324,"width":64.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker rm

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":872.0,"y":503.0,"rotation":0.0,"id":310,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":108,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":205,"py":0.0,"px":0.2928932188134524}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-60.03300858899104,-53.0],[-148.0,-151.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":735.0,"y":341.0,"rotation":0.0,"id":307,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":105,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":203,"py":0.2928932188134525,"px":1.1102230246251563E-16}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[0.0,0.0],[137.5,60.7157287525381]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":309,"width":83.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.37922003257116654,"linePerpValue":null,"cardinalityType":null,"html":"

docker pause

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":1023.0,"y":446.0,"rotation":0.0,"id":298,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":102,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":213,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":205,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[39.5,-1.0],[39.5,24.0],[-158.0,24.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":313,"width":100.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.37286693198126664,"linePerpValue":null,"cardinalityType":null,"html":"

 docker unpause

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":904.0,"y":434.0,"rotation":0.0,"id":295,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":101,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":203,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":213,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[43.5,-24.0],[123.5,-24.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":411.0,"y":419.0,"rotation":0.0,"id":291,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":98,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":217,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[7.2659812842321685,51.0],[-14.0,51.0],[-14.0,-3.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":292,"width":21.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.5714437496124175,"linePerpValue":0.0,"cardinalityType":null,"html":"

No

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":415.0,"y":419.0,"rotation":0.0,"id":289,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":95,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":217,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":191,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[53.26598128423217,1.0],[53.26598128423217,-32.5],[162.5,-32.5],[162.5,-79.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":290,"width":26.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.46753493572435184,"linePerpValue":null,"cardinalityType":null,"html":"

Yes

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":521.0,"y":209.0,"rotation":0.0,"id":287,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":94,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":195,"py":0.5,"px":0.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":209,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-11.0,-19.0],[-97.23401871576777,-19.0],[-97.23401871576777,186.0],[-117.46803743153555,186.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":988.0,"y":232.0,"rotation":0.0,"id":282,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":93,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":201,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[39.5,18.0],[-150.0,18.0],[-150.0,68.0],[-250.0,68.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":664.0,"y":493.0,"rotation":0.0,"id":276,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":92,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":207,"py":0.5,"px":0.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":236,"py":0.7071067811865475,"px":0.9999999999999998}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[8.5,42.23401871576766],[-20.25,42.23401871576766],[-20.25,-44.7157287525381],[-49.0,-44.7157287525381]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":678.0,"y":344.0,"rotation":0.0,"id":273,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":89,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":236,"py":0.29289321881345237,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":91.17113025781374,"endArrowRotation":176.63803454243802,"interpolationType":"quadratic","cornerRadius":null,"controlPath":[[2.0,-4.0],[2.0,87.7157287525381],[-63.0,87.7157287525381]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":275,"width":59.0,"height":42.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.5,"linePerpValue":0.0,"cardinalityType":null,"html":"

container 

process

exited

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":566.0,"y":431.0,"rotation":0.0,"id":272,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":88,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":236,"py":0.5,"px":0.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":217,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-26.0,9.0],[-36.867009357883944,9.0],[-36.867009357883944,39.0],[-47.73401871576789,39.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":785.0,"y":119.0,"rotation":0.0,"id":270,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":87,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":199,"py":0.5,"px":0.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":209,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[5.0,1.0],[-416.46803743153555,1.0],[-416.46803743153555,241.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":829.0,"y":172.0,"rotation":0.0,"id":269,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":86,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":248,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":199,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.5,-2.0],[-1.5,-32.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":661.0,"y":189.0,"rotation":0.0,"id":267,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":85,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":195,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[7.0,2.284271247461902],[-76.0,1.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":946.0,"y":319.0,"rotation":0.0,"id":263,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":83,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":197,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":233,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.5,1.0],[81.5,1.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":708.0,"y":286.0,"rotation":0.0,"id":256,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":80,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":211,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":254,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.5,-2.0],[-0.5,-76.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":258,"width":64.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.3108108108108108,"linePerpValue":null,"cardinalityType":null,"html":"

docker kill

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":710.0,"y":359.0,"rotation":0.0,"id":245,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":68,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":211,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":207,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-2.5,-5.0],[0.0,156.23401871576766]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":247,"width":84.0,"height":28.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 killed by

out-of-memory

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":761.0,"y":318.0,"rotation":0.0,"id":238,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":65,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":211,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":197,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-18.5,1.0],[111.5,2.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":240,"width":87.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.4363456059259962,"linePerpValue":null,"cardinalityType":null,"html":"

docker restart

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":608.0,"y":319.0,"rotation":0.0,"id":232,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":58,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":191,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":211,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[7.0,1.0],[64.5,0.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":333.53196256846445,"y":360.0,"rotation":0.0,"id":209,"width":70.0,"height":70.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.connector","order":33,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#e6b8af","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5555555555555554,"y":0.0,"rotation":0.0,"id":210,"width":66.88888888888889,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

stopped

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":540.0,"y":300.0,"rotation":0.0,"id":191,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":6,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":192,"width":72.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

start

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":510.0,"y":170.0,"rotation":0.0,"id":195,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":196,"width":72.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

kill

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":872.5,"y":300.0,"rotation":0.0,"id":197,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":198,"width":72.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

die

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":790.0,"y":100.0,"rotation":0.0,"id":199,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":18,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":200,"width":72.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

stop

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":790.0,"y":450.0,"rotation":0.0,"id":205,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":206,"width":72.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

unpause

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":672.5,"y":515.2340187157677,"rotation":0.0,"id":207,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":208,"width":72.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

OOM

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":672.5,"y":284.0,"rotation":0.0,"id":211,"width":70.0,"height":70.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.connector","order":36,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#b6d7a8","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5555555555555556,"y":0.0,"rotation":0.0,"id":212,"width":66.88888888888889,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

running

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":403.5319625684644,"y":420.0,"rotation":0.0,"id":227,"width":130.46803743153555,"height":116.23401871576777,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":54,"lockAspectRatio":false,"lockShape":false,"children":[{"x":-6.765981284232225,"y":76.0,"rotation":45.0,"id":223,"width":80.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":53,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Restart 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":57.234018715767775,"y":75.0,"rotation":315.0,"id":219,"width":80.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":51,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Policy

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":14.734018715767775,"y":0.0,"rotation":0.0,"id":217,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.decision","order":46,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.diamond.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":218,"width":96.0,"height":28.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Should restart?

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":1027.5,"y":375.0,"rotation":0.0,"id":213,"width":70.0,"height":70.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.connector","order":39,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#fce5cd","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5555555555555556,"y":0.0,"rotation":0.0,"id":214,"width":66.88888888888889,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

paused

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":872.5,"y":390.0,"rotation":0.0,"id":203,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":204,"width":72.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

pause

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":540.0,"y":420.0,"rotation":0.0,"id":236,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":62,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":237,"width":72.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

die

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":790.0,"y":170.0,"rotation":0.0,"id":248,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":71,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":249,"width":72.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

die

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":670.0,"y":170.0,"rotation":0.0,"id":254,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":77,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":255,"width":72.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

die

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":740.0,"y":323.0,"rotation":0.0,"id":250,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":74,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":248,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-10.0,-33.0],[87.5,-113.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":253,"width":73.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker stop

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":1027.5,"y":300.0,"rotation":0.0,"id":233,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":59,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":234,"width":72.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

start

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":1027.5,"y":230.0,"rotation":0.0,"id":201,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":21,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":202,"width":72.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

restart

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":1066.5,"y":298.0,"rotation":0.0,"id":264,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":84,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":233,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":201,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.5,2.0],[-1.5,-28.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":132.5,"y":300.0,"rotation":0.0,"id":228,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":55,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":229,"width":72.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

create

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":130.0,"y":230.0,"rotation":0.0,"id":188,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":190,"width":72.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

create

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":263.53196256846445,"y":284.0,"rotation":0.0,"id":335,"width":70.0,"height":70.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.connector","order":119,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#a4c2f4","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5555555555555554,"y":0.0,"rotation":0.0,"id":336,"width":66.88888888888889,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

created

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":60.0,"y":415.0,"rotation":0.0,"id":215,"width":70.0,"height":70.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.connector","order":42,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#b7b7b7","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5555555555555556,"y":0.0,"rotation":0.0,"id":216,"width":66.88888888888889,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

deleted

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":170.0,"y":430.0,"rotation":0.0,"id":193,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":194,"width":72.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

destroy

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":1133.0,"y":570.0,"rotation":0.0,"id":362,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":130,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":213,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":0.9595103354441726,"endArrowRotation":177.33110321368451,"interpolationType":"quadratic","cornerRadius":null,"controlPath":[[-55.0,-192.0],[-3.5,-192.0],[-3.5,-160.0],[-35.5,-160.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":363,"width":87.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.5835366104291947,"linePerpValue":-20.0,"cardinalityType":null,"html":"

docker update

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":281.0,"y":596.0,"rotation":0.0,"id":364,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":133,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":335,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":-88.08561222234982,"endArrowRotation":85.23919045962671,"interpolationType":"quadratic","cornerRadius":null,"controlPath":[[-7.0,-301.0],[-7.0,-334.0],[17.53196256846445,-334.0],[17.53196256846445,-312.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":365,"width":87.0,"height":14.0,"uid":null,"order":135,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.524371533874117,"linePerpValue":0.0,"cardinalityType":null,"html":"

docker update

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":305.0,"y":604.0,"rotation":0.0,"id":366,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":136,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":209,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":92.55340974719384,"endArrowRotation":-91.2277874986563,"interpolationType":"quadratic","cornerRadius":null,"controlPath":[[63.53196256846445,-174.0],[63.53196256846445,-144.0],[37.0,-144.0],[37.0,-186.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":367,"width":87.0,"height":14.0,"uid":null,"order":138,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.5749848592663713,"linePerpValue":-20.0,"cardinalityType":null,"html":"

docker update

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"},{"x":516.0,"y":570.0,"rotation":0.0,"id":368,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":139,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":183.34296440226473,"endArrowRotation":-0.7310374013608921,"interpolationType":"quadratic","cornerRadius":null,"controlPath":[[158.0,-263.0],[134.0,-263.0],[134.0,-284.0],[182.0,-284.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":369,"width":87.0,"height":14.0,"uid":null,"order":141,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.4230219192816351,"linePerpValue":-20.0,"cardinalityType":null,"html":"

docker update

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[],"hidden":false,"layerId":"gmMmie3VnJbh"}],"hidden":false,"layerId":"gmMmie3VnJbh"}],"layers":[{"guid":"gmMmie3VnJbh","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":142}],"shapeStyles":{"com.gliffy.shape.uml.uml_v2.state_machine":{"fill":"#e2e2e2","stroke":"#000000","strokeWidth":2},"com.gliffy.shape.flowchart.flowchart_v1.default":{"fill":"#a4c2f4","stroke":"#333333","strokeWidth":2}},"lineStyles":{"global":{"endArrow":1,"orthoMode":2}},"textStyles":{"global":{"color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.flowchart.flowchart_v1.default"],"autosaveDisabled":false,"lastSerialized":1451304727693,"analyticsProduct":"Online"},"embeddedResources":{"index":0,"resources":[]}}docker-1.10.3/docs/reference/api/images/event_state.png000066400000000000000000002310221267010174400230000ustar00rootroot00000000000000PNG  IHDRi?{ IDATx^ UseYUb5!JC3TD%Dmd-[ٓ¤Q%La9o?_s=9uߣwy~,=߳ҥK    @YV")?G@@@! @@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ    ! m@@@@ƃJ  $Y`vJrRl"4~C@@{B&3@ 4Ye@@ ! CXldY&˵Ͼ#  CxD[1'Y b  D(@V̉@iX3   <*s"EB,:  @1 oŜdQ&>#  CxL[1'Y b  D(@V̉@iX3   <*s"EB,:  @1 oŜdQ&>#  CxL[1'Y b  D(@V̉@iX3   <*s"EB,:  @1 oŜdQ&>#  CxL[1'Y b  D(@V̉@iX3   <kX&M"ܻhWU{@ *B$Y  %x8Cm;{[Xy Dȸ!MXd-XKҷ wgyժU˦OK&L`'x}Wv9瞳#7߼Uw @4B@EoՋfK.ufB@qCk?wқyMW_}{=Ioy_u!MӦMt۝7C/NR Gi7e @f͚e_~K֧O7᪫Z@(Gd ԩ\@?~[mլvV`?8ϡ尢"@riSWX&7SƞKUV  P6$ꍪ7A}Vn͒d;D#  kD/"ܽl@ -I fΜinE4}Y z `@ C~YnlȐ!˺p'rg(4 @W!V᭘, dgH7Ѿ} @r <*s"EB,: I| @W'V᭘, dgH!M⫐@R%@: oŜdQ&>#@i_ *ՉUx+D 4Yu/@H*d@T 1!> X+ A& F7i#N94Y` @i|ʃ , Pϙ Q&@ 4I%ʈT I  bBZ$Q&F@ 4o  @HCA$ $(3d^&M@BP&.!@i_! @q)Ώ@<4qg @Q4E0 @i2P") IaK ~B1{ P!Mq~, );[E )@2 @HJfH!M +]B ҤC@ici(!My* P!MQ|, B T2@ iRX_&u"'@HSK#@yiV@ica@ dER(@HJe@ 4c@8BX#@HSw%@HS #d@&."BBV* I   PBU@(BX@ 4dv ҤR%H!M=D(N8?FҔǝ"E   @%P&.!@i_ǥÝvکM:+**Rq/$?w޼y_X ~W@HR#@! H!M+̓"x MTt5D2IMww, *r @iʈM7QI+7Vc4VI,svNdmM&m5 @&i2Q͑$7I ovNLJ%*e_#ΉU ҤFȄ!M&9 <)VΉiX)BRɜB2R#!M\lP&B UxsbV&T2䳐z%@H4A" 3C |ec*윘" /9,$(5q %v@i"Ъ1_X ;'aiK%sN> ɬ7J@\4qI@ BB13*n W6VΉiX)BRɜB2R#!M\lP&B UxsbV&T2䳐z%@H4A" 3C |ec*윘" /9,$(5q %v@i"Ъ1_X ;'aiK%sN> ɬ7J@\4qI@ BB13*n W6VΉiX)BRɜB2R#!M\lP&B UxsbV&T2䳐z%@H4A" 3C |ec*윘" /9,$(5q %v@i"Ъ1_X ;'aiK%sN> ɬ7J@\4qI@ BB13*n Wv9sإ^jwuWgx0Ynדh2aU.y@2iQOXNQ@o tb~C~Y-l]wI&m֫WBveVd3#FpQHӹs碷mqx [o¢Tf7l1(@HcP&@BH!徉j߾wy iOnl֬Y3:u=.ݻw!Z2UY͘1Æ fA8tI6p@kԨQL r};vh{㎮.jyS>YEoZc>QZL !M-\ݼ/X~WJgyժUc i|I;3W^Gy8b- iԳG5`"ɿ&~ѢEֿSN1zʚ6mzk]y啶ZkOl[eU"(UE/gBk@GBk2!4 |WVQQao͝;ׅ4)a /;oB/ؾ[kӦ {ZzƯA˅3nڇb͛ۺ[*{<묳 qM庉_tu>cܣtB'ĉ]{v '_qO*f?Uf귘ZcYQZL !駟_wzìYlW ?<hn=~>4׷ɓ'硜YU?&^ǑvƎkm~}ugrA1uԱ+c9W_=֊-U1;Y2S"4> eBi\@-bf{v&MF|ꪴ<nxՓAӨQC^4*zp3zi|+zhcwNX?N.Ϝ9絜r/^Օzh#M%!̀`6h#={v *J4sLSk,> X+ xHǘ-H {mڵԳ0rDՍ'z%h܏~6tSo{!MP~ p&k- ۛZ{qzVTRϮ̸qqƮzBܐF!;YJ IDAT묳 eB2|GvWC=dr{ ˶nKϙ"?庉 <֤rs1socTi kxwY^`nݺm&"(MTPtIs駻 } onH/zw*:tG.f6(gm7 LCiȐ!v%YE \hŔ*e~ 5Ci|ʄd8+5M7d]v /0 xW*zyL[c5V3FϨw.UC|k@GBk2!i|M#MW2E#MT4{R`1Mh2h*ZOֆ@iVdB@ꪇI߾}]4M&Lp8v饗ZVҴ{en c*윘  Ux+D 4Yu/&Foι˗%pc ovNLJ҄J|Yoif; @ii~߿.LpcVa4!Mxdg!FK&.iD(@H!fVōA*U91 +EH^*sYHfQj K  DUqc ovNLJ҄J|Yoif; @4bfhUl[ӰR4ᥒ9'dF .B !MZ7+Va4!Mxdg!FK&.iD(@H!fVōA*U91 +EH^*sYHfQj K  DUqc ovNLJ҄J|Yoif; @4bfhUl[ӰR4ᥒ9'dF .B !MZ7+Va4!Mxdg!FK&.iD(@H!fVōA*U91 +EH^*sYHfQj K  DU7wu o Ijo7c (TP9C(@HSFo+/I7]K~[dժUV[mxABl-IϗU.\~'_nݺЎ$o:l dsH!M+#7#F#GI'd;w\$ŋm6yd:u3gβ۵kg ͛[!s!xP W&_1G^j jfQF0%@`ƌ6l03f-X׮]i߾}GBZ @BVEF(QXpF7񪊪-B՝B]VlѢE/. m<@W[2vR" )   I`QdF MP(zxS=5D9͟?[Fzr-?֧O׋&ƕ2 !'A1@|ib^@`y^4oMV5W\qQZӦM/:u^4m۶MNQbȼ!M DB$eF^4GvQEƇJ QI&Gѣ͜9ӆj={t H!Mj"fFHC3@ z&M6ne p0׸R㏻zҨLF z DB$eF d ј%K,q7VZz" `zzu^zT_Lf@d4%e @iJg˚@ zI-tS'O֭[i@ZN7L^h`=ԭ[7;]]zDil7J o @i^(̫躙MҨgnV *8M4qonRCSG Β5! F͆@ 94=䟀zǨnɿ*D !MĠC&ei iȑn%MNL @i\V&Uˎ!@41b)?3l07? jH!M"B#@iB& h1{l7VyF3Lά  n( P! (^x(0m47FcM hMʬ | ( P! (^x(֠Gq7n۶F3!Y Z BTT#;e )s1g4qZ`oލ?Wm3!Y b xBW!;Ҕ4@4Fkzꕷ`l(!M}N;KQSĔS&^!@4znM5̘1֨Q#7LΝ|U–3&p뭷~ tSꉐ X?!MDHKD+@H'kCl Ҕ'L:u Xh`&O&kBX@ !x#Bx [&3#Gtϙ33 h0!@AHWEIm%rm VYZBvȆ!M|gƟF3L|u;xH~V4In-!<$V᭘4e BxYf4@#=zpL Pe4[ۊ&zKZWV᭘4e Bi3Ǐw5i`=Tvo- @qIiZ"*;CxH[1giiJ@ 4gIzS6m3 /wP=.q[^WY!|Ubފ9K+@HSZ_֎ )M=k`=G4L3M4)Y+-ww=Z n+B*\E!x/Ux+,!Mi}Y;dC&z g& ܳgO5"P@CQ-q[Ҕ}h 6/?zYtqvEٿ/޽{ۼy\onN9իkv[-G38-uwVYe:t}Ƕn^|Eaʿ?ᶱؠA\s9gٿu>n;m+vUSXӪU+zE==ꫯGmĉᄈg}֞{9WW:Wg2]s 7 8.B{#tvmZkk;ڒ%Kor ~w8N{yбPm^_ťl V^=ӟk\4IUOoukΝ;~ /׵u÷b o_&ⶻ` <3ϸ3ݿF5sҳgOkР=#6uT[huUW]euֵ~}uM{sO{ƍ{J}A-pG|}z\>pV itSBb]vŦMVp;ud^{UrƍmwwcI&~݉CN*m&nhx_`ꫯv‚C9mS=vT\s{f^omnݭ{$ rKwSF[֧uE'2M瑮gͻWr B>&eԨQ.|TOOOfNp|:vW{a?QFJ7Gu1b=S.{ʔ)('4~ r w6]ĦiqV>Xń@!AHF6ꪫ:I!MRi]='u}.cM`uK;t޲ 70&ts/?BXXR/ iPԉA6mV[m>Ϻsk׮k z)mSEUooghx`R@uU3go{իE!}q=@mP8;d{u U(ETIOo*ٳgz뭗wHo5V{WFiBѤ/I\Lڏ:uoUtF tpDz(!fm@Y۶m]璾6lX-Ei"deEǢy@=hu/vm7;#&|irMϗ>3wϚKujNuW{-6}ɩkD]WLa_~eEk\z9]zTW!/i(eH06G}衇o߾X2N~ lt@s/\}iؒ~^W:iRǝx.1͝ԛFLJVΣ6/q4ǩ x嶣o`߅c4]w*Feq(5YWZFd}ot>}i.l "_? ӔkZ]ctIt7~e_cҨzu#׺TanHFB] #.tCIH>ۈST;^U_XNؒ&?+=GmӧOw_Z8l̘1x{_mE4jA[;uR#Py]*~2Qگ4Jw(SotRQwi:4pN u8 S i^=S]R9dQ2<ޤFǝԓG''QB™ܥ WBх-=o,MOil _􈔾}ѣkS穗M/1gKeUMnٳKV}ks.w&|H5ocM^ͬG5ބ}R84AH\Wu-oQq}^Փ:+#UչOuݩFጂ'I?!XR Jt }ޒt/{sWui){FѪ{Hq@ LnݺJ.X.k,UuM\btA4!hL(Q](SW+=~5SX @8)5tQN-Zꢥ@Ne3F)?RSCV=!j*/}jr%Vأ=gp&T|%8 +QUWW=ڔaRo],O<ٍKP %wT3=$k=^?S5WWF9*2ldhEI=g}IIc*hPQ Hk5$cAj4_|bTwԘ vt_c|ыz4v3ꔡ1ir+u,RYe&Q2HUz-!#2z4IAL4FG̨z(P(IGhFLpMLѤ]㥨*lQSUW@ƫI-*\ m3io=T]O ¤CeWpT9)uHD.suIND@=i4QI'fEzyнU;"@?:)kg}ǻ5+G )!M6Ob>5!B :u_/ OI1I״~5^ȡIe <4Tԗh|vk їsOnROSzk}Iʄ@Jj˽Yk`F}Ѩ7+D#NݺukXE\Q <W=a4j.z?w:god< 墨ɸRDQYYC*sV=z]oѳ i4_@q+:oz|ǺsRz&.wdϸiZ"*;CxH[1giiJ-i 4ud]͊ 7m!S̪Ye{=؞^`swukٟbzAvm{j<5d86(@C9mmEHSl`yUx+,!Mi}Y{?4DXcc{w 6|ec  Mlcݣ{_{[^c%FQP4pи+zn+p̍]7-z(i;xHgV4In-!<$V᭘4ePHF?M=un֬{B &7p{I h E]@PPf]wuoѺ5O<^8h@KAh0JU :ԽVZo1W; J-G=cJͫ'F_ף3>(R(SNJ cvvĄ@l!M[KDe'xUx+,!Mi}Y{icnUO՛;uzhRH0z{譝 q KdM\ sD!M-m۶=|UDo6cG\s{.6ʯ՛2kVn]ϠN^Ҩ׌4 uf~{{(! m qIۊ&ɭ%<*sV IDAT=4~ֱ+ ;+Eo:]H9oκ`VUch:Ƚ7t 2t#=zƄѤuh/fΜi׷ WO>.Ѷ5^IǤEzvmg4WEcє'Cm]c4j`Eo@J'wP=)")}zUUx+,!Mi}Y{iYf)A%lr7pEzQ% e8MFt^h}쳏덓;)X8zNmԃFmf͛7wkϊT4TEU !֝xcq[D\I\CZ*sV=4~q.]'4Jp c=fYs5mm1UIhgn F^P4h=޴{w}#QS޽빣 CS4첋O&KF*QO=N7GnxUz(ҫuPؤ`FihY;xHoV4In-!<$V᭘4e ~a#=ԱceGWHA.%\ vh ۿ;ow=i{=>CA! ;x`# 0'NtҬ(Qocǚ^>zhWQFPDha=۝4]# g?Ss{'و9%wdiZ"*;CxH[1giiJ/@Hg+xQ/Ѣ^4zcSvsǚh4WOnsSУ0fuYg{zR@3h ՛4~z(tcRU ~ǡԋfw7]r4VHӯ_?z /`GqDI^^3 ԻG1ytNHΉ#wdiZ"*;CxH[1giiJ/@Hw+p p+ЩSvDӴk/J8i]Ǐw=4@z0@pJ! oŜ;xHZV4In-!<$V᭘4e Ige&gI2'@Hs'_ !bq[$DTvXb Ҕ֗_&u={F8ig4i]/Ҕz,w1EEۊ*I A=n+**[kBBXB&(Izh`3 iHx< \x&)܎%),}yJݵkWO'BR '`4W!Mf,!M-{4٫8@5)cNXh`Ɵ)M5YhM4A< 2(J"xsHh v0a7@ 5f*^xCր@ը?z$(4d]&-/V i7/W-vgFzU f(aF&Gւ@1zt}qPHPDɺ@ibf3Hyj+KB;РÇCO:$7@pF}{tInTn V,gOx h"{~G{w]^{ߕe˖4h`׷vɶzk[k<ؓ@H5[J@nө^c͛g_|ŲsL9Fk  X0@py7g| i4 @L<3^ѥhܸ ltݺu i|!!5T\Ju%r )kRM4qwЁ xn #eu(gb P=z1&3XO-٦[jױ6wín3s?߷_d}dO7mڴq{'B4*!MlsL5jY6-jƮ@o K[/;O?/)`FNAƝ3n JEB> .o6{'mml][62ɷX j>|E\hL k:vX7% iPfi IO̓O>9f ֶ{ooٴ0&߽Wx3gg7m7?,[sn#Gtϙ3ǽI7%ɶEBl{J,pF7B2jױmw9؅3nUɶw9nYuOEqZzҔ$+ccfǽZ3Գ)o)o-aS9F 6̍?Fh`Ɵ)U^n cIHHe{b] ? 8j([(iXqk_?)\t} ibkFl(EۛUW]t? ps]NT>(٣N{V{ڱ+_W, kr1gv1_ g4@p16lT" >4h> h;o٘3 fvo{rLe56ԛꫯ^atL* !5D|;wk5tcVwCNo6l[)*cvo=jXÙʕf/+F3qD׌=D@oSߺdHH"PbkV8A/,cM6Ùn޽{^5IiXk/ <袋傐FoFs=YT]@97\ǚ=q6ܢ]t뻐F3-C#g&;=E0XgyvXNzhҠ{n")Y2| 4Ȟz)޽-Xj={uYn&;CV3+u9FgiWZ5<}ejttL~ h P7o߅t\&MC@7ip`=sqږU {X5z+XyL P>BB>@ԃ%MR5-[t>>^CW7}V/?h<0hcIhR~#m7?v0Ӝ9s\p0m4vFYyxs.>}]DŽ !)_c$X@op[6Su g]ōQ ieܛ^4e*7c4MqN3'UPon L!zK۪|ٳ}~# !ɫ03`@:wGƠC7])u"f(W4}҂sLWAzhRo`U3zuU@Ӽysj*E Gktи4UW5gzy"ɫ*Y  %ϢUpNmdS6Lw msfS`#`̔4]cSXGH^m&Iq7$sfwh9k5*;o {^!I${ebPh-gj}qҟrQ|M Kryn|I̒UV{3MsqM%*(H,!X%kV9&khz 0;DL6අoY Kט5kx/_V4&s %+~ r\m=MS΂VB4b d-x(4kVAOͬ HuirېFn!kbk !MIq7$fJBֳ.?*+w\~nָq„4QI8<c%k[݊aKn sLA^\cFQMX_xiM猻%4+V\s5vhksI>u{e]v衇ZWT D%zRPgsm[ڟ;[,]bމ׬cǎֻwF +טQhaU"~" _73ƗdЬX;wmVIʾyv?|QjaB$YOY ̒U0`u6\X. ^zuI,f k(  !MIq7$fJosRHGvz]?;zC7|&Nh 6}[o$.(e)x(³bţN˷y*pUmaU$~" _73Ɨd,Xikֶݹt܅IH>l͗<9XWY @ϊUpNAyt8[ޜchˬ0,\c&*N~q[҄wK2h\ɓ}m]NruER|ZnmG!Mk#!͊Up9W;ۦQ%zOyu7Dc&Yƌ*N~q[҄wK2hWog}<.׏vsC3&G=+C5`̳G`\ رchJ#Yƌj*N~q[҄wK2h^MNrUEZzv뫨t5)@V(|b[*qq9Ǥ4;k̨$ !MIq7$&_=s E[EV5:NI_n<~8ӆn-[,+ !MY,X`]vnݺԳ+Wi!dŋmɒ%vAٺ[%kL?ndg>) TFڼOfT&p'zYjS/_Vi9± lԨQo$ #K.{̽w'Jol:#{lSO2 .n̎~wU/b۾)Я<-K;!Mx~:,={v1cƸ:c k֬Y1il5״.̝;y;^6 4.Rl/؝w*5I⮛$[鳪nͭG֮]]FM6.;Mj{Ow;ҶٱqAmJE@9۱kJY:h| 05jT3gX۽F7(Fmڞz)kѢ4iUM6ąjmSj{:?иh .nϩsEJ&]T_<3G}ڣ o{~\T͖q.6s驪mwMշZ}i~;[Ŕ6{h}1Y:kձg;5ܸuyjԳvw 紷̳z/zG_\tLD IDATujU׬j;]vK|q7b_`XksK%~J:G菱M3reB穠Oט*ΧO?ulM)h ZOV@ R7j*Cu׏;s^".F7sVe ixM ;k`n@6l3Lj}ڽ{w۹.? '~b-l̙n]wNoqƦ"R7)8. ;)n;Dv[]uիO'ߺumbSʒIN/ևw+83o6e}v/صkW;&c7^aǝk jwOqW>jm >mv&γkGeOC{[!ڒ?_{b{]gyZAզѻjUXI=(^#:b@I{;^*_Κ[>ZoܣE,)ױZWwp+Փ't7]خOTuLp ]L |T`ҘL*vSu=c&}c?Vy ;XGuIC:?}gn*:Fkvyڠsߝ+}]W:gwshQ;1|G#837l#XQH0T t|'.ZN3ZrYu ۻߵO!͗iOzz >6j=so9aߍofBWx͚svߴ!Z˻Z+z_ow Koen.]Ȋ A+^_BVu|Wπ}Եnt@_HzO87~+Mozow-Iסƍs!vԛ]pÚ|daڕ:urqEGsI^9rǹ5q &؉'O_Lk&SSM}:묳}:Jh`R?w[K\HSQϚ5^э]u`IA߆;F$Xz(QP1g"L' @"L7; nt@8zmN..l:yԴ=(㎮DV[m.9w2ҷzE_m *N8:*ѾoMXNZ.G9]uՇGtvOpօOFEn>/վ<ջI^ՙN,uPfł,vպzhzmmwoh4j9RoiGz5eV'~qLz]l->8؅4?ݲǝcql 2;pkwҳX]|+z߿!}Y~{Y v.,օ:&Gf*qW.OhuQk>KI!N:fTwLӱKq-cTVe .>uírЫ׎¢f!G7cSX"I~0Ҙ^WHQqS&]-Ep:Ꜥs.buMe&>B?OŞ>j\^yzѢE5ΛҨ[ inmu!U ir p~vau r!+`+Mz%:+4U-ӥwnd@>61f b|Bu crp uLUmO盿.R[y$8/`գ\1sg|(=~U=kנBuY ktT2Gy=LP盾TUH/|n$I7`3βۚt̪t-F_:PԮt.=WC}9Q%XT߂zTHSHcB=æ. P TtcGt!FZVa &[F湓ڹ! {UҍEu]j+^V5A!(lH'cF=n*O13k vH˂"  iQicuUPc7!]^cF'X.:Vu w NcZAH^y]$gB`ҍ> z|IǠ` ǰ*cuC{n6&Mח(z<%Au}vt/q'zish+sESF?!'nW;.xa}v9=<ċ7 ~T_fwμdۢ}aT{j3E4Q<5Uu\4Wt|z)(skE`}uїjTHS:\7ڶ?8Xa,dR/dY}٨cz#ygWC| t<ԗ:uZs")\;|eOK=/KtO=+=]#}M7-Expߨ=]ppK}W֞4H|kw fPejTr nzk耤V(횺~k]׋M1t30#n (}VR.ucvx֣E 0ڟ Q02kM_6]} q'mmò U9+FIj}8U:[`҉@ɾ.ՕUJsyzѸ?>5US8؁4=C3mnZ=pm信dZ{Xۺ鮶ʫX슻+l_Wu~Cyo>ȅ7Yokס2^ kn+ͣBsr]WH.XYkP֣F:sy͉>]?ʩ}㏡)$2OT  J4HJ(ҠidjRRJ> BMMmggY׵{k~'{{{6fyq&o!UP]2%i8E%T8N}M9%#'2~oh^ sxQIsё4 x$1?+K8;9|湼 s߰1|4k CpSgEQ/nNY~TE1瀘K| Xe<*?VXtpc!&1/`* V#ixu>L|:4ԇB`{Сa01fe- ߹4`ZS8$1a㯓2syV<ȠQnBҸo#dPՃ>SR /㟖!'g:ՐH\Ke κ-wd{;πդjG6iP:>½Mzp~ɽ(HUV = uq//bpIRH:A1羃s2 *J$X@P!eiH/9o n_L,Sp|RZ5}6<7 Py93&@O#sEÜ>Iܩ3O#?I3ndj peĜp4_~QF>9^NSm J] W n/OXZd5c6!8|T5X$e³q7'F8RcqV#Qқ0{855)<[4+0JmBL4Kǣ6n=~BsBDY<㷍 962B\捎qEh!ihT2 1$ >kn4ơ|v;E n̎cƾH Aq0ac9$ O+)qSjA.R4"' E+ ^<|W/G:tH><)81 Ó؈f%BKHPi?gy&V<88&&[3`]&M')"};p3Еf I.k&V/t*j1}*Qjw&Ҩ:iL[Yt>o_&5 ޿{1+c5׃{O9 èw\iu4k2&mXdJŸ {~ZHo&#0W7G)ǽ@ϸC'|5{L^xWzI=_q_5vX= N<_@sH@;P֢bs~L<ۉǟ B}4LD~GVf(\6̴H;+.G6 *.yOҥayi2$iwr4Tדϛj*ArfDPrŹkOeFxE~n.8B)^;B4p'_ Ƭٖq=+|s/~4q$޿9q \mGW91&sQ-ವ9% Gh^gfE+}{AMC L`9<p.x&3sBy4h4%QY%$&|O/|[Zv<iÃ#ڙ9_Z|8-:&wd-&e0vLx@@ĸԗ[~ DqlQn*b3X[p& Ll9r793Kɒ5}?cGird=+LoL8MږN?=:dt:ݡ2#j48Q=Md'Ar7N=- kܽӿ.SӻǦv|KHm8ʣn{!ؠ@Bɔ > (Aa՜${V0C)d YvE3&R7*v~:M0pG$9q A+ ].K 1 9)foލ 3&ĩa2cyco{0L]3Cfvb]xqĂxH9HkP)cU%2Cذ<\8E2{N=Wj4KO>~Syqe= *Ps[I^0 t!93HuE똱/.gf|$nHHf;ڝ/9ɀ4fmHs7a1t GaFn1M$&1 T [y;b)*(5+H  v|~PRHz\4ΠP'BDS8Lxdc А4/XIx$| h`<ݗdRc$M@XY1ijkV!h_2&I-dʭtB@$Kq* L$zI㕖,YD֯_Qˠuu%\"yQb&Z(xd'T4RwdcҒU(~m m~hwx4ٰJV5Sc_$3 P3&3EIImd~iv%wn'%//o7{xMEIP3zfʻ+f͒k *TH *UJ H-͵g."$#,\Ɉ3UDb7]^|i1R3n@LЊ&3f4BAH̠f$MfZ'vg8+L[j%ge;G;LWO;dAb$Z$9j*6m|~馛{x…5Eu(fjd$Ɋҥl_E+#iou0^`ő#IG'l $ a'H/0Ha#=3uT={ѮZ4jH5%rSG4H-ٕD-[(Axb)]MZG73:oV&1sx㍚??X~'S?3CҎ`!If6bad{UIsWq%hjy"4 X T4ԪUK9ϟ|ISx9g $gΜRZ5}@`$|AeՀ(%{d˖Mv!^Z>#o$wJ*S\{rى*pmKC0 #ij "Kc蓯M<444av$B/^5o?ŋℴn1%M>'4i=cP8e Otd ~N`c0 C 90&9ٮ5x:6Ohn\M4FIe*΋N:0J\r#WP;P hnRlbIDׯWe d }5S@[cP^o}!`!`$Mhކ!7ΣKqjYeYdw߭$ӹZ/SҤ\ .\X'9xZS1&L ͛7ᅲ7Jʕ3ÇeܹrIq:9 +vҿ&.";4$ϘU%UqPRl\|2:h9z>A8†{>5&oPj!_I_e5<*C:zѧ5RPP{1MH[ wO>`S?;fҶmۈwAs=#>)!1]|fO|U4]tN_yŝL;05:NZ$ U}SsO몞DxL+ d3Dnkdl^{Z C#F҄L;!`3׷gF!g\V(¡ o̷&>g[!5ZX} D4 \]\y[|Ko ߺNvnY+[(4JL!W,L`NHeb䝁apuFqCYɬYdӦM)k i@hZJ'/RBv\)do='yt2Ϡ-ĩR/Fd m!D[B%GG' J›wr/IC];wRJAKҌ9 6 }sArS٢rU+krtNٶ; rp/)m3&]).LT2em꺐0 !`$MԠd x8T./t\^&9;粓 ÿ[?WRƅ3 5ūrƿ0S&묎BF o?묳TuI3KҠr_ȓ'zI}v}_ˡCdɒrh d^ADHL"ETI" H $ ~2B4NArJ)^)$ _±c%i|! dkXm1 21ڢqhn+'ٲ-yH?NڱG>*;6N~Jj{KjՊ !J׷ cl5ߚ(0 `$MlpI[e4a @FUXD9滋WMCX9sJ,YFEõg_2wzH!`BB8$ J$g^uA9AX!_| C^2`댃*!L$h̘1DՓe˪W +O5O!'$  C[˖-D_ORLK]]/IQ9e{Ik$wAB Lj+3ox2zpA}&MbzJH,dS@e|k lxF; }b!`!`$Mhކ!퀨aP{F(|1W)(dx1dA' U!|4浉 ! j$#1Mh/Q kEXJ1[n*0=HG1 a^TNB>i;QOW \]Y;v>ӦMSor +#-LB fRP'V3c ^LAgj|&M @ 3ꓙk三4mUTjy E KRĕXk¶3 CFx!"~#I5Mh4#2EH@vW#phZvsGU ^QS!s!`!`$C4c0`>S~ݻ+VLO7?FƨT#KGҬ_^r-Z'N()}9s=ǹ矗J*!f /^,yJx-߷o^I)vI޼ys5?K&N[[ne?߿u~:t `(9fϞ=eҤIRjU:thT#Y| C N!kP.X42aпb 5j޿)RDdŊu +񍀑4l?z("i&!jAv?o-Z-^WV2BBcs5uU3› |PiH|·p׮]rUWI׮]o߾zy'YfM9=EJwyG !FR UIGua`̘1dt*!PAdAW\Y͛'5jԈF7s!V +v0C$Ht2xd  kr٥KϙKEȚWy+Wi ,&i,0iae w}\ K,pH[;IfOp F&I㈚ŋ9s@.@Hta@.'ιA^@N0A[\ R%+dA 3P)BNOWWGu ҪU4uֲm۶Ï=Z@i46 T:ZQJ(<ܹs 6!`x #i<VC yaunڵRzu8pʳ! _ LѰ" ^ZʠUMJICDp3haD {vNRX%M 945ԣ5k*ymkr%j&JҜy晚3x@z)P h)xoذѠ~aƍ֭[UI %6|v%r A@b`ٗL?*i6mڤ*¬h[<'QPiqIC or&MqM2E9Hmθ`…ZZ% !Q +!`F[Y} CxHo'BVaiYg[pZ?$BneVlY ̾}%K_&!yUrJb"`$7pTmEĂ'j^#Gj$ t_~J`LE Jl E|I_ aGZ!9.jP@A@AEpE_"̍ Âȁ(1b̞=[À9/a t /(aG,냘(sB68`>e0 xCHxk1! 킸qV1Qf .{97+z p1xOM5o=Ry6[jP3P6Q Iê, $ YF& !d3)!ԋ>JSx\A#iB0"%Kj+2"?hHѱcG%)@`,^pFCD  /6Qocʄ+Trx)(ix֩SGU1ޅ x<)("ΝQBF} +%t  >`w}f;)m𤃌~M1A%bDXNbBHO5UH|P fA`y3+a  5 A-TxSFf˞ QID2"݋_Ui/mŠ/^Hu`` 3e#{#iצ„hX4@I(Q> aR{V_z˞={J%ezߥ7#cR=iח-Jj0qs}uYjl0 xEHxm9!G0a5 B^5O:N퉠`%h>㨵NVS M45 '% I?iX'roVHن-r޻v102HQfA1 r/V#iNA{hX4\'?.PҠxwrpz%Cc0 #i@8~*dau%ĉ:xc0`i\^XyРјfo _ѣ#NQW|I>@zw^U@̙Ss߳ ~`xA@U9rhW&:ޒ2p:+CH~qVA/4,d"Q !g *!(I(-iax Xu kH˕+'e d; Y&J!Ni?KQգ &Nh9 W c8 bc b |y^2 U{;#i.Sَu^ i_JF?xOm]!`#i‡di@1^''|^) .[LtB8? !gs&5cLy]˸ɨ}%jTɓG @#` S\%e $ TNF@3׶3BH !1%T1Z+$ ˳.U2+n G(#1Q1¼fG54  IDAT!I $V4dDOTq`%PA0JDb 7iQNUV4Et:|ɞ=s88ۆ@I㭮hNĽD$3Qz$ٽYC4YqnR_l٢0gL!!).\8G?b$M& < f̙# a02`?f:ݻRMon>}tH1ˏ oS&Mi) PӦMVZ2w\6lX'1o>|ѐ!C/:9gfP0пWNϓek4T1&RqFx/0)/U/QDx]xhb޹ P71fBqø6,Y1 "1:aZPWs9ӋIV:%&eʔQKvdqF!p 6(YBi֬+nff-ZTl3sL}O5k֨ a`_|ɓx9LN`]t!AaJ( HS ~,=Xµ]Fœ(8kHOJ52% 6)I )mڴ&M<>*WL+SfugB +oy'gt(})&ME<5Gzf0}v'InDxI>ѺukU;VxTk8Vo Tηq$kfX8fJދ.+_*zCެr,XRvٟ; 4PD(F$B+5x21j(yGq wzG5t  1H T ]ի_s`pjs@%œ iƏ/-Z6ޝB=fҸҬY*THL=߈T*bW^!Z>T"w|'?] 7v<;yꩧxFJ@ěF<YPec)iKaB D i!iF-m۶MߠɑJ&+ISfMU lX/?iۿFLMc$Mt](3~Z7oPQԊVb?%ݳ)!+؞x5 L[Noj56Ģ_Y3fK/ 9ӠV%W#{.Ceּe2cED5P|GŒvGG ڵkeĉ)4yrK~SϚUVUW]/;&*/b x8LϞ=[ãOICd wJV1;5 >DƅIN4X!; YcP)/^܀JbN4(hX/n/T5YγaIKOd!yԯYQZ4UrfȚ 3y+4Dc4I#KdBT֩SG[ΓOWu MX_ U3 dP*ű\7<8ǎӬK2fGil " B wb)%  ۊ\ |5:5 @iY 'v,M׮]{Vx iP fDM}PrD~fr0Fis9$w7B@  ™PDÚ2[U >YKܹjIOeu{s#GT:$ٳg47}b lG>34S&v}4|#5nT *4p֣_fvX&$.R{~`(QBdM[ jPҐݱvںXb%uM{ d1=@{Ԣ^=Ma3?^4RVOC PtIiYgXB^]Bn W1&\Hv|8H{qUsfe1B$6#zs+2Jv䅫I.$?S|'RX15GDŽs%KH6md۶myEhg~X{dE%T^+}{ՕI&Z>d$L/~Azˌ'Ɵ{˕! 3NY}PBAMӢE 9q\q2lذlsC԰ ӱcGg /-T(T"J, L&aj#㋕"=ܣ^z+ȑ#jvo$ 9O:ޣAUc%cPԹη{:Bio3gm4ό9"}S#imi3ڵS#`<Hk() *K x.y+H8<1g~2` ɫH+`EVZ.,!!qQ ,pO Tx}2`;P9OhJx*BPKிzϨ7P\xᅧ@L"Eo߾ݯ*Jp&BS5s bBMݻc,\sNGoܹsivGVh2@,^Xxx@֠w޺ad͢F!5}D ,`Ro'kCGJCKC8 3LfbKh"RL<(t3aŽj0!L`P4qhJ(+ ԰B(P} ?|\*V  *4,l j+OW;!' r|ڗ q9Jb4ڢ dFȑ#I&6gkD!+QhPqIO.^zj'b$*cak,?&FUPX9@SdS%eݺu:cEI[j>3dA5Dߥ8zo x9ܸqcm+UbVPd'$ 턏0Yzn>8%o eڴi:Xmٲ0X@x:ywO0Q"t "ix qJ2AZ<΄VA@?}+WL? $ 9!Oo w<\}}k( }}kxӷH#i2Yz{8JiD-]cWư^*Fx5.a 30'ot xp UoIΒB. 13{cm:%9djHGΠ,A!W (do-I?PVㅠ$5}/)D*N/(@?Z0$Rol}PE ;S/uQB'5C5R CvCfD8/!?LRZYvz:rDfWW:7/ge"solR!I;~%]_}C`¨L+iIC d#g5 :B (vڥv xQb'h/)Ũ!!!oP5ETZw2&N1ҫo"4\'9eFԄpg(dg}M ;l-F[ub.H/۠{V-ҀI2hB] + F>8Zl$<έŊ*!]l ^-].S"u:PT})_N/+_yU `Iɋ)%@a3gNMŋw5ږU\r $Qċ듬c`:aСfb1jpz_SO=$ 3C2BA0n!t†?(f͚%kPYxw1*-Q:3.;(vҏ,_KCR&pFx+tx$Sa€! .0x]BҠZ)ݬ|ڴgg*9-m=/_GU4ZwP'3ZL,L[*n/zu hJۆIj'd_Վec&HFz]~4S>~Eߥlva!i֮]T_D_E"Ngb1(J[g1U"d H[Y{%k!nVZD)d^k' ?ŸD[ Eg,[s? QP7jH#o#*p2Cwk&4㘏źI#@!N 1lp4 ./f$I`%ӂ w"BBҔԇ!ʄyɹٳv?X).W<)wtgLcع dZPG:ˇFK}e4S;n΍/ك駝TI M<3I!TT|QnX,η7oQ58A栜sȟx} hK2ol*4Fdc{C S\9 !92:4x`ygt XB^ iVB1}wޖ[kG*ֻG \WU]BҠqJ wMWZ*7;}/<'wqݻȍn9td9LIhU)Ӭ,7z8Ye;^"/#}[7OQ鍔NJBfDzIK܆f1 _4`\xqv 8z Q+q~٧Y}T>,Y̢i)3,g)gq(o,_dLӚȱcj_=a:|Q9/I`Ju7+Ҕ<IHiL $)r&4kɃ>(?}.\yZ+YH!OLB W/8,H͝;W /(~~ d%m wXөz%H 0vI@&S̙3URXV-UC!fJ5*,j7ssig\n:47ZI9OC쐰d,F$c'5ϙ3G0p)[+o߾:q3f,s^p:2l*bŊJxJp_|Q6n(UV!Ȋ־}{ٺu7NdҥdjK,$ ֟AQc%: >\$ro3^Og0&1&cСI;eY|X{Ԝ IDAT|Xz%hPPP@Ȍ=ZM·l"wznxArһwo%|~5/TNV/Ri޼y3T(i(J4WF$ gϖ6mIAIzug{;n^ҎЯ0sGXl19Z#i2n!VX" #%bCf@,Yf4+_?..ۭ8T,qtf\9p[&+ƌm$MFaFxg8D2Д(QBC0ڶm 5jPE j1#GĿZjJ*Do.W]u? ԩ[PnEGmCxUjJhg0X'IF\5cCVc֖.v8|/ ,@ĪI+,YD r1U͚5kRcy]|2Kj.}nO+UcI+BԩS,"T^]  7 -JIWT8NIW ٸ) z޳ % $ʕ+%wܩ~^T@Ϣp\8D paÆIǎqX;F8UXZxK,Apk͓^Py-Fr$vlC v#V Y1DL%^kv튺/?K!ڏ61k=*-5A,XP4i3iԫW/!k2 !0id6N=T`iݺfۆsmݦV bŊeCT>}9ǃ( GgQ8PMP?AY-F<a6Vx СC~jJ 'F߲Q~eΘ.@I.u]jowߝTixBV%ed݌+e+ƋP$0&1c$ ٣D b#Hݫ*B(x/B/P҆cP&}s BumN9yK0!!`CVcB(6*G/v%p=e}U{_7Jlic|k53{X7&,0A  4RB).Jö= &i\!CB,4@i5\7Mއ1灱Kݳ}r[k J S%o޼HdnļvdYٳgb^U( D+bDM8A X$0o~Ӷ&MT &߯ȸ駟 >k墋.J*U36 Bh> *.5dÖ jH5%o)sHvIZge*OyQFMӹ;I<9sTPIg+yBVӬ|Lٸy2e]dS%i;\~i<ӳ2ryٳɠOSYY`AyaR䚂+Jt)iW!pرҬYK@x_LV5SRm$ZbdVe ^|(jDH4#ܹ^f%Jа?0%<_ʈmQQ-"{ȑ#O .*l ȸ9JW8^}cw*ZO)l[Pqf.E-رPB)B6E8wgH{oO͡_Ic$M;%^{`@|4V❤!i/2ZeN&rYq >GndKI=ޮ-F ҥKڵk^>^4]nP:l( 辶3 Z?!]<$ oO\'Gn#-),-|rw἗oLɛCS*_mٮHFWT|y᠕4fNk5rO<, -\\Ζ :rT??JK{k$Z]ve"a RwRǏKbŌ lcDM@ $ iڶh")X4iD/mڴ%K*)CB1g1p|jVX!EU†0gҵC8p߿_CYܼyu,DR ? gx$ < j82B*gH Ii#+/_"h'+Փ-"﾿Z>TxPm@}e4]FN!M=h]SPwo'"7@2\6oV&_/W\v>Hn|d4|(B}~,$OKٮqc͏Š;b7!RM收i'+cȖ:lOZF>_]hFVN"`$dA l-{2ehVY-\z^rwXN: &zÇא&{3gx \8 cߔ4e$4jDH4<TjUyWu< )$3$L^;~l[omN@[}Aӧ~ɳl2%Y"U8cqeAXSZ)\ L;jB kw@T(#FP̘ɳԭ^Q.D>T R7˦-eÊUISa;[jzfscOɒY|r6-)qpd{|ܲ;S߉c:>3 [kL7ㄲ4348C&wH1%X=7j j` c&19aE'wI=wBFԄi#԰ 1X_"Gq~4 qJ+dLCY w0_^ ]a2슛~o>3fU`jġTR L\P1ဨXʀsԃ[4<ЙXlD?qs$[5[x+/gN$l-f|ICq)& z۾@GI3Jx0!p JoKXfEԢ^xG6a̜FҤӁ f[`zQFɋ* 6Ab 5J'd` a0x` y“p(VƪZ„AEBĉ8pYCر*WPwb*|^\$$ ;#`Rrełz@Ұ)StwpHvח_~&;xxGmF bQ kUSt{lXBG$S]^Ŋz4 !\{}Ԗ[Xc7 *V  x xհc%txC*pBNP'b$M=[gHx^.L @!KiլxV vܩdD+(- oYR7B a+W.U͠z!A:$&b)~\T*믪 g%9rz6$>Kuj2y` p` )_{gCQYbH Cci/, Mc "hHeEDB,YJ{^Db,P֩P4Mm<}}9zk9}}]|w<ڵ;Ѓg\%]i|aAFTy,E8$H棱9g.bH Yp nX {aqE4\8wź(|kF qZY ` s>xp,6$ |gyx%b?zoуx(Ĝ i(Xb5D 6n$d3ଂ:!։{ڵjX3y<7h&cr4V HvP$O 4v|*'о}{3f↙J `Ucx"5YcO*oc(Kp߬['n/,f/ vqk`|#HH*ད#+b7X\i,_1 ;4|2S縸5U~/% IDAT&ִgs2!?,ZA +»`X!(48VE,5 qkBE&# x>3>$=~5Mn,˟U &BMܞMጦܮ D\FH3Tsr\9]z)BjG|.x VX 90ys˾XiZR+x+&MivxQwQn&{'n$[dVqɞe) 5q{6?rBEEakÐ!CE]tjfժUf;Es>^XZݜ*q(=aU""}4`IxōD}̴5M ФI۳)*cE,kk8+Wufq H).oM"))) JъXdM}GlXI ޷hx[XK2;sdMβⴹ*&==wMl`}BzϽ[? P 崠p}bTڡ"ğR^.=uY3xJ$ Fă@yf`-Ew )bEIH}XQ`a_w?xhw k،xH(V4u5qfٮE|4ᴣ j'Yҳ@?C",ޣR**L|=fΜj׮m1_)#FpzQF6ؐ 4vX 5!sD\{Q-&6]v:KT-[Zgl3;PNF}4a +46ӊw& 0 -^+k8#'$AYz,}4q2˟'߲ns;wv'OvSLq7v|nF{be+4[nq=z(I=`W-6we`'9g5r?;o\X8^5*"  cR[\He~½zş)ibw]<> c=f/ƨ\MXHo|8p-R`׿ׯ_?w'Bǜ{/#dAS fQ7߸޲?Xਖ਼, ^B*K.2~nڴiM9C̚'Gsqk;3sԧObVx.\g#:u䮸 A`!5pg@C|Rgg uf۶mkt͛7!OC#׸q܊+.<_''xcވﺞ={:yH=rH{믷 ^}6bN}?~4gu JܓSbWw//Ԕ.ҀT98Gyh AlʝվUQęqOv㟚mg(Q{S'MӾare#%0ǍmLD)x`v#aСe" ixU@'whgYʡTN1Xx7&S:Evf£ =ǃ}~: 1TX hÆ ';`ˆC;-ZȬ~# bBs e?wM4c@`6ʟ "b# V6 KlX0R]Ca sƉ'>?C9g7$dNj$A'{_~_L-z61fԩe8$DL ~n3 "Qb}Ms;cРAnm5ф»1X<&nԨQ&lN&Gu oXtMiӦ78>5 MA8cI=S,_ru… aT 01 oe./cP7>19X!Zv`l`Y+&ϔ9ƚoԩ;鸦5p6o w ^잙[ʲngl}*ƧVͪ]LF01A"> .&24R}V0#ppU0%/${F) `bבA"xЪdG@"M0^Ih|K& b@ݻWgS(͸T=E)S*b)|&nvi(npoB$8BI&e0bNBY7ᚄC6% nQ,y Y^xzg69+,2*O= KP .2TEPoxW% 1 qnSC*F~<7svP9ǧ@e P`>g-cbpm4h/dh8ۑJ/| ;(DS~&,1g%:e3fLɀ࿝ma"}}dugC(EP(f)S<̩s)S!x/MdICXL}l?>%Q(B3a9@S̉q+WZdWa ˬb! }V$TN2MԄOC *+iaE"/<3aAAl6w}J>8Val(FjVIjQ"wi6T4~|u-vLKRX8}駮M6K.e魽H5+3f0-6fːA| 8Wq P0ڨ4\3] ~Qg|9njTH4 ~a|I'Y5Oni@K3'oE;̪~8L'}߀0I5L}A8'Ƴߝ>%)>AL&6 ! QXaN,RXŸDSN@%F"Ma><19FAi P xM5#$ĮI{a$AIvjD40* tCE7/ DtXUJ=&anV3,V 5'`!׿x3"z:d"HÄy\-!-f+$* Y>4IV]-{a)6A!>d@BpCxC$Z abڴi%~~NO G("U^_>;9i'7ޘvFih w6XDല؞+6 ȘH-Y(X=k4;,3`bw>s & Hz?'}a5e̿vua cn8(jP* HHHC`:DX!R:" b nͬ#XcICp^&X/mĪc& B:fE/q\c0 {.K"X`dKPJbL_ ޶m[kZ5z RIg5u~:蠃LiԨX@Of6|EQaW^nX=-"ihJ-^ԩ;xU)&g>sy^3p0V1/τQāX#xqLxY_\cx݉7H,J srJ~ADY2S„b⊆ Ca'gvSN9Ų<r!r k%R"o9.r[;b"M\IٴG(ДR!36(JsgU|J5-4)i&n){nLdaNF$D+WBka2l0 >= xfX%̒%KJdq>SEa )!P\cI)֔"-Z/#Go`BK|!7#a5CAt#.IX fЧX 1q˸V:t7nR A6z@E_Q␂;vrwc@ObHSPws ax'e|&|Tt9sew<\Wria щlC+a˓b="# ٛ2w 'VE q(;W1" !.]>|'>"sjyYHb kVʧ~W+!x%Mj$쏫97jП;wYy0E"M=_̱mK{*-=z$l'Jb/&!LDHM!` Se) ,Kܦ2 ]ʎ$߸">|ae xwDч~;,5"K/dc2Պ+NVu]M14}n9ZAas[n~NkH{_]AJ^w&4)i~iKJ&ZT $ Va܄5i/2D2};>Sܒ* &IFE 4UB 5i\(s\d:s-SWZD{c7 bU:s4 HIHC _eK.-r9͇:>mP"}ƌTa MҮVFM\HF"M~b?gΜi&i-ibk\+U$ &" M' vsa[|f's&E/=#VY44h*sX)P#pq'6]#Ć#^ Q~nT9XcD@"ME2/uvai?k+Y*^d{@ii %АK.!Q^=&8|`Y*RE]Tx<{e_&W&v%Z/FH2$T=A3td:HIH[ٓx4 ,E/=#VYȷ~!e,sI'>n8wכ@l2׽{w ^Y cLngy+/}ٛd z͠Dž-H J>$.3fg5k hXg#A@"MEfKꪫRk,]8_}Ek*8+hhnݰaCB˂u '?OOK IDAT>f2zhxxt'WO;4{ǎC=vqM~ꫯ4Dn6׹sg7yd7e?Å0lEBG ,i6rHx$8C JG@^܈#, ?*l<9ہ}YǔXg#O '7Ym|AJ9#D={۽n6@TyꩧLA̙6mr!_6+WHK/u}<ϟ?" m KHOJHtTu UbL s*E~o `QNE/#Tґ&PhƷ+{nb?cs%OLpIW_}q&4j_b47V&Ҽ ŸO;8Z% u{vYa]Oi$҄5r5 NRґ K䴗T4t)/_n" bM Dw߽lQEw*8+]hp]zk - 5s;Sk׮[hj̲y\,_~{;wĉeV1U4/ 9:&MW馛$~=3J5D4a}D4 NRґ'C Sq." Fk8Y!Ջ,JJґঃH.]ʚ6k,wZ jժŢhxba|A0i߾={2;ywc9M:e" lfvtC)/ TAX6ÒƷ3/0+!gnwyv!K94i\hdV|6с, Ց#@XƻwiAŚoI5YYD9`A^d{bUQg}f [w^jġSNJ+̢ww<q … /~@dc" )zɈG2A!nڂλI&sϵ l2;eƤ!$ܢEĤ)? 5^! )a'SXS" F"MXAIއ| V "\׮]sFƍ-]9]4'Nl`E#Ar@HHC5 >pNC~QOzbU$-"oAt SuY&t&}m۶ucǎuk׎ b 4$sex \#<,ik|&Ib k,c(^C<+ʄo;Y`ۓόdE@"F\ Zհ`A3UyqE+A58۪ܰaŅ#)_\924iRlsjlN?a~VC&~\cѧO &}~a)XBJ\]3YIUuA `ذaAP12aAL#ă/-lȰɅ!#Y,?9U1c\fK[6|Z1Lh 0bCK/7 \`A:/G&XvOL E BhKB^Xvrf{{,K){zf-񁨏,V3,D'&bdt]vI;ط_".HPW ^㙄OxBw8 ˮwGϸ`"n;E- c\`ec#K U#<ԩca1N}s)yʵ[S.9?~}џ!.//s;]M@h`1a WRvDClxê& *"p0?{'m˴i̺oY;`ܬ)X~|na y>YDGd !pssX}Ws߾}M zk6 ( I`)ԋ4@!|GlBh /5>\(3n]iig3_uGiوqnNV-iӦL^DIkĹޢ6-/ԭ[M\؁S?4P"Mc" AX)bȂBvW,O=Tǎ=B<؈СYT& xaݚ5kLH*H%,_5Eba-?͉4^xl 6obmALa11-V5;6Hsek06Mz7qMLfD&E߽H(8d1&ZXtM>A]1% q>KǏ7ư:6 c5W_m(B;bCHAeSQU*& \$>tS@mBl}` &@2!EL 7*NAѭSm&vLT}f"Ȼ;a s&k)XbGD4aE6qQATiԨ{,:g1 ;H,(9F;Y"lΒK2ֱ~b6ya7;eX݉x8qh@X4ْ^7om4x 6;Of6&>1~>)wHX@a|>(3CDX`Dc(Sal# 6!.&4mܪ`RH\ /<4h>gw#˿t4!Ԡ@Mfu3`MfmvL/bfG(Eg;XA؄gYȳXdEN;jq6$]O"Ds" n&~CĔ1{2PMx6aQL}LkbS-,d}XNF& n Dj,q½/Ngkիgps%dXC:a=5 c ~FM5/6+,FpcL!:?%cCXo2+ޒ_߽% K4cc. Gh353q%Ddl?D>pzsh K>GX`}駟Zf&D/ H`H`D ;)#/0!AN"P,( j x~be]#|n";'쮰G\&aٔ`Ž]~u=6h&tS\lGx4CN|EĽXUY SrԩdL`sE"3zlP Yš2@-n'O9:|cLrH*wXM/fn%-B mb88! Pݺuk;E]db ,,KX0-A͛G(DDO}d$ %HCp}b炵ވ4X0yS?}g\$<Xa oA`JΆDrso&Ř$b(f 4<ܽ`!kăk BFPäEE#3ڑ>L, iT+/Kqǧ9ZJȒ=Y#jdO _]UpYȂ1H5)9XgJa"/3SLqXxM 66XLK.1ƚ;XS|\cjag,x<+P1-A @x0߉yㇵ"nℍ~,rA0 qj8|=6B#⾌9,èV0d;b{}/_nnW=ZliueCpn}X1w] FNΖHSL&Oɿ\$Д;dOXB *mLL9<vNd3-䌉?1ia2o;og<6>DvsU@YLqŤZ%$l?XSJAvMTv I,fIL(*s1<ʋ\;A)aʧi;W,+ o9ls]epc0AʼoeѾͱ>'.Vlj&ODjPFQ>`OM]MDx!xhvda]*vLszvpGBHa7x?lnI_8tD~Rz7@(Lyi$҄5%<')VYHH#4{R|RԪ=?/.8 ~6`tD@DȰHr-MpA|e  R3ğ wXaC=KS 'OJfd"B_%TE(H#&/!8I JG@ Hɢ1?GV54X[Argg؁U$(PBRi"Ҽe1et0O _ix^{dl # nREOa4{ls"_XEMX$y4itDz~;#V .TpaI2'ݙg=CLUD@D j[DS2fb1s'!8y omڴ)KzuY`aܩ.r43~>{}+*oD4WXg#E $6cbXCy_kmuw =;2L}! HXD@Z-TčTX”OII2(23 PTSPMDy=4itDzLr{MH`C5jvg b̚5klQÄk_Vx2" ";jb\%ĽW4imtD{bݡrLę aFV3y" I# f=*&iH#&&!8I JG@ H)``X3ݣ=V6|!N@`H#py睮gϞeP!&,F"M~#觳%<')VYHH#4iuYD@"Lw;wt|5j%&$3i$҄Xg#E $Ҥf(Lfnذan}5&3XofeQTK"D$*8+)i$ &6@ tusq-[L'p#[lQa,"Аz*D4a - IUpV:RH@"M{]mȴjC qW\q~oo93{ӧ6mڔcǎ-YgUi\ʤAU{=װaCh"w֗PHgƍsg}\HXD@D 1$$+/uֹ5jXHݤI7h zjss#BM.ڵk" "r_{ꩧ)b4͛j֬5kHIP@?D0}qGm|?mTI@Dt|ZD@K@"MtF5T5kkݺ[pkԨ{͢ą3g޺J>Ijrhz-׳gON*/4 8uŵh‚)Ijrh;yn=(J>#7|p7ave 8uLHH!戀@ H}" "<_qݣ>ꮺ*cSO=սnԨQsY *dLn.2Ncƌ17'RS֬Ycq%S:)QJHƈ@HI@' " "T'Ntwu{gʕ+ˬj|M )qj 4eiZt!6k/w7̀:ttEYw9:6!$HIPV3D@D 1$$+H,<g/vw}ŰA R *Ь_mB A|AץKWZ (tL HHa&@ Htr" " H!N;>}Çxb<#FREy뭷,V2=z[nP3nܸ`HH7xÝvi<G#<ީj@ H)>sQD@D D9yfܹEv>4n8P6R54~FmzjѢEN`dBDK%@E~[$wnٲ;cMy@F+\+UD@D@DX$#" "PF^KmIDATĠzrm={OjmѠHS4Ժ@{n3Asq=z3fXJn,mX&H)PTyw݁XVݫ\0fϞZnmN4}ܘ1c @E!fN|L"N6!Ȋ&QcE@D@$ &L@I 1v}Yg:v|I0馛,6 1l^U)P@s׻ݻK.q .lV7|;SUWH "M5 xȊFRHS*@HύU Yԩ?v9\p"( ^{m{)h^uפI)_?şO?u{OEr!vfޚX4&#" "4aP5D@D@"C~p3gδXzw뮻fJ hH۴icgFi5kYT^=2UH4|.E@ Hc" " ذaC@B 4G-۷w}BHVkcQ@+sϙE Ž曖r[E$ f#͡C|ޔv;QkdC@"M6t@Ç9s(CY~B;իW͝;ׂVVrjrh 5d>Ċ*i6jwG" " # &q]xBK\|!5p-•aG hv^V[sE Ti~i7yP}BN9PDDhj%" "P 4}7 _t]v/AJ0_.?8A:I&P {q|EϊYPw"" "|ij |w&,_2%2&LwA"%w`%PJF!Uou$jV:BD@B@"MRzRĉ-kAxM[]PSFȃ@)E Uw:YH 4IICD@DJk׮uM6uz2|KPӤIKKZoVQko{u~HvJvv" "Pi AU,uYjԨJP$ЄT)"4Eí$M@ Hy" " 'PbW4 ;iU%'" ' }$e\:u5!M$D%DT;(4k@ Hv'Ivv" "Pi AUHNHQD@D$)" " " 1 &ڝ$&ډ@!H)U]SD@D@D@b@@"M;I"MGBHSĀDhwDhj'"  TuMիWZjvɭZ(5Xg#E@D )$$'ȁ.],Yg)t$D!" " " 9С4i뮻\sBvHxKґ" "iғj@'p~o~^xvp[xX~[oj +?_Zjnm ޘYlVn./" "Dt$ڵk]-{zᮽڂ 5gnFwWuݐ!C\޽Ms;v,[\ݺuM9c'|.\ve͛7oϟ~dxUA*D@"MNt$9s\M@@]vAX aÆ e˖f͚nĈnvs Rf̙֡C ㏻F5kָ#G1cƸ^{*7mԽ[nڴim۶eL.Rw &MĖUA*L@"Mt$'+Wtv;#\f,0.Pa.H3uT׭[75ju_mV/Ac=B}㰦U0_-pY4 " "inO/ݬYkb=goτBA9'D{6h~^hbbb]߾}믿n.O}K./D3\1T\șDDH?-^͝;| U &waÆ^ /4KzѣG;'cY&Nh6SLqt{G akߍ;uU"MXCpi4D@D@D@D@6!X_UVu֙6aP}Y R\(Kʼf Ype:묳,0f]8&=nԨQf1s'|bpܣ>c?idI5D@D@ F@D@D@D@Dhƒ/2wo?Gf&Wdt:#y.bdps={.,B:e]&hG7H>V E@D@D@D 2!࿸l=࿧v; +++ title = "API Reference" description = "Reference" keywords = ["Engine"] [menu.main] identifier="engine_remoteapi" parent="engine_ref" +++ # API Reference * [Docker Remote API](docker_remote_api.md) * [Docker Remote API client libraries](remote_api_client_libraries.md) docker-1.10.3/docs/reference/api/remote_api_client_libraries.md000066400000000000000000000212061267010174400245450ustar00rootroot00000000000000 # Docker Remote API client libraries These libraries have not been tested by the Docker maintainers for compatibility. Please file issues with the library owners. If you find more library implementations, please list them in Docker doc bugs and we will add the libraries here.
Language/Framework Name Repository Status
C# Docker.DotNet https://github.com/ahmetalpbalkan/Docker.DotNet Active
C++ lasote/docker_client http://www.biicode.com/lasote/docker_client (Biicode C++ dependency manager) Active
Erlang erldocker https://github.com/proger/erldocker Active
Dart bwu_docker https://github.com/bwu-dart/bwu_docker Active
Go engine-api https://github.com/docker/engine-api Active
Go go-dockerclient https://github.com/fsouza/go-dockerclient Active
Go dockerclient https://github.com/samalba/dockerclient Active
Gradle gradle-docker-plugin https://github.com/gesellix/gradle-docker-plugin Active
Groovy docker-client https://github.com/gesellix/docker-client Active
Haskell docker-hs https://github.com/denibertovic/docker-hs Active
HTML (Web Components) docker-elements https://github.com/kapalhq/docker-elements Active
Java docker-java https://github.com/docker-java/docker-java Active
Java docker-client https://github.com/spotify/docker-client Active
Java jclouds-docker https://github.com/jclouds/jclouds-labs/tree/master/docker Active
Java rx-docker-client https://github.com/shekhargulati/rx-docker-client Active
JavaScript (NodeJS) dockerode https://github.com/apocas/dockerode Install via NPM: npm install dockerode Active
JavaScript (NodeJS) docker.io https://github.com/appersonlabs/docker.io Install via NPM: npm install docker.io Active
JavaScript docker-js https://github.com/dgoujard/docker-js Outdated
JavaScript (Angular) WebUI Albatros https://github.com/dcylabs/albatros Active
JavaScript (Angular) WebUI docker-cp https://github.com/13W/docker-cp Active
JavaScript (Angular) WebUI dockerui https://github.com/crosbymichael/dockerui Active
JavaScript (Angular) WebUI dockery https://github.com/lexandro/dockery Active
Perl Net::Docker https://metacpan.org/pod/Net::Docker Active
Perl Eixo::Docker https://github.com/alambike/eixo-docker Active
PHP Alvine http://pear.alvine.io/ (alpha) Active
PHP Docker-PHP https://github.com/docker-php/docker-php Active
Python docker-py https://github.com/docker/docker-py Active
Ruby docker-api https://github.com/swipely/docker-api Active
Ruby docker-client https://github.com/geku/docker-client Outdated
Rust docker-rust https://github.com/abh1nav/docker-rust Active
Rust shiplift https://github.com/softprops/shiplift Active
Scala tugboat https://github.com/softprops/tugboat Active
Scala reactive-docker https://github.com/almoehi/reactive-docker Active
docker-1.10.3/docs/reference/builder.md000066400000000000000000001373061267010174400177150ustar00rootroot00000000000000 # Dockerfile reference Docker can build images automatically by reading the instructions from a `Dockerfile`. A `Dockerfile` is a text document that contains all the commands a user could call on the command line to assemble an image. Using `docker build` users can create an automated build that executes several command-line instructions in succession. This page describes the commands you can use in a `Dockerfile`. When you are done reading this page, refer to the [`Dockerfile` Best Practices](../userguide/eng-image/dockerfile_best-practices.md) for a tip-oriented guide. ## Usage The [`docker build`](commandline/build.md) command builds an image from a `Dockerfile` and a *context*. The build's context is the files at a specified location `PATH` or `URL`. The `PATH` is a directory on your local filesystem. The `URL` is a the location of a Git repository. A context is processed recursively. So, a `PATH` includes any subdirectories and the `URL` includes the repository and its submodules. A simple build command that uses the current directory as context: $ docker build . Sending build context to Docker daemon 6.51 MB ... The build is run by the Docker daemon, not by the CLI. The first thing a build process does is send the entire context (recursively) to the daemon. In most cases, it's best to start with an empty directory as context and keep your Dockerfile in that directory. Add only the files needed for building the Dockerfile. >**Warning**: Do not use your root directory, `/`, as the `PATH` as it causes >the build to transfer the entire contents of your hard drive to the Docker >daemon. To use a file in the build context, the `Dockerfile` refers to the file specified in an instruction, for example, a `COPY` instruction. To increase the build's performance, exclude files and directories by adding a `.dockerignore` file to the context directory. For information about how to [create a `.dockerignore` file](#dockerignore-file) see the documentation on this page. Traditionally, the `Dockerfile` is called `Dockerfile` and located in the root of the context. You use the `-f` flag with `docker build` to point to a Dockerfile anywhere in your file system. $ docker build -f /path/to/a/Dockerfile . You can specify a repository and tag at which to save the new image if the build succeeds: $ docker build -t shykes/myapp . To tag the image into multiple repositories after the build, add multiple `-t` parameters when you run the `build` command: $ docker build -t shykes/myapp:1.0.2 -t shykes/myapp:latest . The Docker daemon runs the instructions in the `Dockerfile` one-by-one, committing the result of each instruction to a new image if necessary, before finally outputting the ID of your new image. The Docker daemon will automatically clean up the context you sent. Note that each instruction is run independently, and causes a new image to be created - so `RUN cd /tmp` will not have any effect on the next instructions. Whenever possible, Docker will re-use the intermediate images (cache), to accelerate the `docker build` process significantly. This is indicated by the `Using cache` message in the console output. (For more information, see the [Build cache section](../userguide/eng-image/dockerfile_best-practices.md#build-cache)) in the `Dockerfile` best practices guide: $ docker build -t svendowideit/ambassador . Sending build context to Docker daemon 15.36 kB Step 0 : FROM alpine:3.2 ---> 31f630c65071 Step 1 : MAINTAINER SvenDowideit@home.org.au ---> Using cache ---> 2a1c91448f5f Step 2 : RUN apk update && apk add socat && rm -r /var/cache/ ---> Using cache ---> 21ed6e7fbb73 Step 3 : CMD env | grep _TCP= | (sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat -t 100000000 TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' && echo wait) | sh ---> Using cache ---> 7ea8aef582cc Successfully built 7ea8aef582cc When you're done with your build, you're ready to look into [*Pushing a repository to its registry*](../userguide/containers/dockerrepos.md#contributing-to-docker-hub). ## Format Here is the format of the `Dockerfile`: # Comment INSTRUCTION arguments The instruction is not case-sensitive, however convention is for them to be UPPERCASE in order to distinguish them from arguments more easily. Docker runs the instructions in a `Dockerfile` in order. **The first instruction must be \`FROM\`** in order to specify the [*Base Image*](glossary.md#base-image) from which you are building. Docker will treat lines that *begin* with `#` as a comment. A `#` marker anywhere else in the line will be treated as an argument. This allows statements like: # Comment RUN echo 'we are running some # of cool things' Here is the set of instructions you can use in a `Dockerfile` for building images. ### Environment replacement Environment variables (declared with [the `ENV` statement](#env)) can also be used in certain instructions as variables to be interpreted by the `Dockerfile`. Escapes are also handled for including variable-like syntax into a statement literally. Environment variables are notated in the `Dockerfile` either with `$variable_name` or `${variable_name}`. They are treated equivalently and the brace syntax is typically used to address issues with variable names with no whitespace, like `${foo}_bar`. The `${variable_name}` syntax also supports a few of the standard `bash` modifiers as specified below: * `${variable:-word}` indicates that if `variable` is set then the result will be that value. If `variable` is not set then `word` will be the result. * `${variable:+word}` indicates that if `variable` is set then `word` will be the result, otherwise the result is the empty string. In all cases, `word` can be any string, including additional environment variables. Escaping is possible by adding a `\` before the variable: `\$foo` or `\${foo}`, for example, will translate to `$foo` and `${foo}` literals respectively. Example (parsed representation is displayed after the `#`): FROM busybox ENV foo /bar WORKDIR ${foo} # WORKDIR /bar ADD . $foo # ADD . /bar COPY \$foo /quux # COPY $foo /quux Environment variables are supported by the following list of instructions in the `Dockerfile`: * `ADD` * `COPY` * `ENV` * `EXPOSE` * `LABEL` * `USER` * `WORKDIR` * `VOLUME` * `STOPSIGNAL` as well as: * `ONBUILD` (when combined with one of the supported instructions above) > **Note**: > prior to 1.4, `ONBUILD` instructions did **NOT** support environment > variable, even when combined with any of the instructions listed above. Environment variable substitution will use the same value for each variable throughout the entire command. In other words, in this example: ENV abc=hello ENV abc=bye def=$abc ENV ghi=$abc will result in `def` having a value of `hello`, not `bye`. However, `ghi` will have a value of `bye` because it is not part of the same command that set `abc` to `bye`. ### .dockerignore file Before the docker CLI sends the context to the docker daemon, it looks for a file named `.dockerignore` in the root directory of the context. If this file exists, the CLI modifies the context to exclude files and directories that match patterns in it. This helps to avoid unnecessarily sending large or sensitive files and directories to the daemon and potentially adding them to images using `ADD` or `COPY`. The CLI interprets the `.dockerignore` file as a newline-separated list of patterns similar to the file globs of Unix shells. For the purposes of matching, the root of the context is considered to be both the working and the root directory. For example, the patterns `/foo/bar` and `foo/bar` both exclude a file or directory named `bar` in the `foo` subdirectory of `PATH` or in the root of the git repository located at `URL`. Neither excludes anything else. Here is an example `.dockerignore` file: ``` */temp* */*/temp* temp? ``` This file causes the following build behavior: | Rule | Behavior | |----------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `*/temp*` | Exclude files and directories whose names start with `temp` in any immediate subdirectory of the root. For example, the plain file `/somedir/temporary.txt` is excluded, as is the directory `/somedir/temp`. | | `*/*/temp*` | Exclude files and directories starting with `temp` from any subdirectory that is two levels below the root. For example, `/somedir/subdir/temporary.txt` is excluded. | | `temp?` | Exclude files and directories in the root directory whose names are a one-character extension of `temp`. For example, `/tempa` and `/tempb` are excluded. Matching is done using Go's [filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. A preprocessing step removes leading and trailing whitespace and eliminates `.` and `..` elements using Go's [filepath.Clean](http://golang.org/pkg/path/filepath/#Clean). Lines that are blank after preprocessing are ignored. Beyond Go's filepath.Match rules, Docker also supports a special wildcard string `**` that matches any number of directories (including zero). For example, `**/*.go` will exclude all files that end with `.go` that are found in all directories, including the root of the build context. Lines starting with `!` (exclamation mark) can be used to make exceptions to exclusions. The following is an example `.dockerignore` file that uses this mechanism: ``` *.md !README.md ``` All markdown files *except* `README.md` are excluded from the context. The placement of `!` exception rules influences the behavior: the last line of the `.dockerignore` that matches a particular file determines whether it is included or excluded. Consider the following example: ``` *.md !README*.md README-secret.md ``` No markdown files are included in the context except README files other than `README-secret.md`. Now consider this example: ``` *.md README-secret.md !README*.md ``` All of the README files are included. The middle line has no effect because `!README*.md` matches `README-secret.md` and comes last. You can even use the `.dockerignore` file to exclude the `Dockerfile` and `.dockerignore` files. These files are still sent to the daemon because it needs them to do its job. But the `ADD` and `COPY` commands do not copy them to the image. Finally, you may want to specify which files to include in the context, rather than which to exclude. To achieve this, specify `*` as the first pattern, followed by one or more `!` exception patterns. **Note**: For historical reasons, the pattern `.` is ignored. ## FROM FROM Or FROM : Or FROM @ The `FROM` instruction sets the [*Base Image*](glossary.md#base-image) for subsequent instructions. As such, a valid `Dockerfile` must have `FROM` as its first instruction. The image can be any valid image – it is especially easy to start by **pulling an image** from the [*Public Repositories*](../userguide/containers/dockerrepos.md). - `FROM` must be the first non-comment instruction in the `Dockerfile`. - `FROM` can appear multiple times within a single `Dockerfile` in order to create multiple images. Simply make a note of the last image ID output by the commit before each new `FROM` command. - The `tag` or `digest` values are optional. If you omit either of them, the builder assumes a `latest` by default. The builder returns an error if it cannot match the `tag` value. ## MAINTAINER MAINTAINER The `MAINTAINER` instruction allows you to set the *Author* field of the generated images. ## RUN RUN has 2 forms: - `RUN ` (*shell* form, the command is run in a shell - `/bin/sh -c`) - `RUN ["executable", "param1", "param2"]` (*exec* form) The `RUN` instruction will execute any commands in a new layer on top of the current image and commit the results. The resulting committed image will be used for the next step in the `Dockerfile`. Layering `RUN` instructions and generating commits conforms to the core concepts of Docker where commits are cheap and containers can be created from any point in an image's history, much like source control. The *exec* form makes it possible to avoid shell string munging, and to `RUN` commands using a base image that does not contain `/bin/sh`. In the *shell* form you can use a `\` (backslash) to continue a single RUN instruction onto the next line. For example, consider these two lines: ``` RUN /bin/bash -c 'source $HOME/.bashrc ;\ echo $HOME' ``` Together they are equivalent to this single line: ``` RUN /bin/bash -c 'source $HOME/.bashrc ; echo $HOME' ``` > **Note**: > To use a different shell, other than '/bin/sh', use the *exec* form > passing in the desired shell. For example, > `RUN ["/bin/bash", "-c", "echo hello"]` > **Note**: > The *exec* form is parsed as a JSON array, which means that > you must use double-quotes (") around words not single-quotes ('). > **Note**: > Unlike the *shell* form, the *exec* form does not invoke a command shell. > This means that normal shell processing does not happen. For example, > `RUN [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. > If you want shell processing then either use the *shell* form or execute > a shell directly, for example: `RUN [ "sh", "-c", "echo", "$HOME" ]`. The cache for `RUN` instructions isn't invalidated automatically during the next build. The cache for an instruction like `RUN apt-get dist-upgrade -y` will be reused during the next build. The cache for `RUN` instructions can be invalidated by using the `--no-cache` flag, for example `docker build --no-cache`. See the [`Dockerfile` Best Practices guide](../userguide/eng-image/dockerfile_best-practices.md#build-cache) for more information. The cache for `RUN` instructions can be invalidated by `ADD` instructions. See [below](#add) for details. ### Known issues (RUN) - [Issue 783](https://github.com/docker/docker/issues/783) is about file permissions problems that can occur when using the AUFS file system. You might notice it during an attempt to `rm` a file, for example. For systems that have recent aufs version (i.e., `dirperm1` mount option can be set), docker will attempt to fix the issue automatically by mounting the layers with `dirperm1` option. More details on `dirperm1` option can be found at [`aufs` man page](http://aufs.sourceforge.net/aufs3/man.html) If your system doesn't have support for `dirperm1`, the issue describes a workaround. ## CMD The `CMD` instruction has three forms: - `CMD ["executable","param1","param2"]` (*exec* form, this is the preferred form) - `CMD ["param1","param2"]` (as *default parameters to ENTRYPOINT*) - `CMD command param1 param2` (*shell* form) There can only be one `CMD` instruction in a `Dockerfile`. If you list more than one `CMD` then only the last `CMD` will take effect. **The main purpose of a `CMD` is to provide defaults for an executing container.** These defaults can include an executable, or they can omit the executable, in which case you must specify an `ENTRYPOINT` instruction as well. > **Note**: > If `CMD` is used to provide default arguments for the `ENTRYPOINT` > instruction, both the `CMD` and `ENTRYPOINT` instructions should be specified > with the JSON array format. > **Note**: > The *exec* form is parsed as a JSON array, which means that > you must use double-quotes (") around words not single-quotes ('). > **Note**: > Unlike the *shell* form, the *exec* form does not invoke a command shell. > This means that normal shell processing does not happen. For example, > `CMD [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. > If you want shell processing then either use the *shell* form or execute > a shell directly, for example: `CMD [ "sh", "-c", "echo", "$HOME" ]`. When used in the shell or exec formats, the `CMD` instruction sets the command to be executed when running the image. If you use the *shell* form of the `CMD`, then the `` will execute in `/bin/sh -c`: FROM ubuntu CMD echo "This is a test." | wc - If you want to **run your** `` **without a shell** then you must express the command as a JSON array and give the full path to the executable. **This array form is the preferred format of `CMD`.** Any additional parameters must be individually expressed as strings in the array: FROM ubuntu CMD ["/usr/bin/wc","--help"] If you would like your container to run the same executable every time, then you should consider using `ENTRYPOINT` in combination with `CMD`. See [*ENTRYPOINT*](#entrypoint). If the user specifies arguments to `docker run` then they will override the default specified in `CMD`. > **Note**: > don't confuse `RUN` with `CMD`. `RUN` actually runs a command and commits > the result; `CMD` does not execute anything at build time, but specifies > the intended command for the image. ## LABEL LABEL = = = ... The `LABEL` instruction adds metadata to an image. A `LABEL` is a key-value pair. To include spaces within a `LABEL` value, use quotes and backslashes as you would in command-line parsing. A few usage examples: LABEL "com.example.vendor"="ACME Incorporated" LABEL com.example.label-with-value="foo" LABEL version="1.0" LABEL description="This text illustrates \ that label-values can span multiple lines." An image can have more than one label. To specify multiple labels, Docker recommends combining labels into a single `LABEL` instruction where possible. Each `LABEL` instruction produces a new layer which can result in an inefficient image if you use many labels. This example results in a single image layer. LABEL multi.label1="value1" multi.label2="value2" other="value3" The above can also be written as: LABEL multi.label1="value1" \ multi.label2="value2" \ other="value3" Labels are additive including `LABEL`s in `FROM` images. If Docker encounters a label/key that already exists, the new value overrides any previous labels with identical keys. To view an image's labels, use the `docker inspect` command. "Labels": { "com.example.vendor": "ACME Incorporated" "com.example.label-with-value": "foo", "version": "1.0", "description": "This text illustrates that label-values can span multiple lines.", "multi.label1": "value1", "multi.label2": "value2", "other": "value3" }, ## EXPOSE EXPOSE [...] The `EXPOSE` instruction informs Docker that the container listens on the specified network ports at runtime. `EXPOSE` does not make the ports of the container accessible to the host. To do that, you must use either the `-p` flag to publish a range of ports or the `-P` flag to publish all of the exposed ports. You can expose one port number and publish it externally under another number. To set up port redirection on the host system, see [using the -P flag](run.md#expose-incoming-ports). The Docker network feature supports creating networks without the need to expose ports within the network, for detailed information see the [overview of this feature](../userguide/networking/index.md)). ## ENV ENV ENV = ... The `ENV` instruction sets the environment variable `` to the value ``. This value will be in the environment of all "descendent" `Dockerfile` commands and can be [replaced inline](#environment-replacement) in many as well. The `ENV` instruction has two forms. The first form, `ENV `, will set a single variable to a value. The entire string after the first space will be treated as the `` - including characters such as spaces and quotes. The second form, `ENV = ...`, allows for multiple variables to be set at one time. Notice that the second form uses the equals sign (=) in the syntax, while the first form does not. Like command line parsing, quotes and backslashes can be used to include spaces within values. For example: ENV myName="John Doe" myDog=Rex\ The\ Dog \ myCat=fluffy and ENV myName John Doe ENV myDog Rex The Dog ENV myCat fluffy will yield the same net results in the final container, but the first form is preferred because it produces a single cache layer. The environment variables set using `ENV` will persist when a container is run from the resulting image. You can view the values using `docker inspect`, and change them using `docker run --env =`. > **Note**: > Environment persistence can cause unexpected side effects. For example, > setting `ENV DEBIAN_FRONTEND noninteractive` may confuse apt-get > users on a Debian-based image. To set a value for a single command, use > `RUN = `. ## ADD ADD has two forms: - `ADD ... ` - `ADD ["",... ""]` (this form is required for paths containing whitespace) The `ADD` instruction copies new files, directories or remote file URLs from `` and adds them to the filesystem of the container at the path ``. Multiple `` resource may be specified but if they are files or directories then they must be relative to the source directory that is being built (the context of the build). Each `` may contain wildcards and matching will be done using Go's [filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. For example: ADD hom* /mydir/ # adds all files starting with "hom" ADD hom?.txt /mydir/ # ? is replaced with any single character, e.g., "home.txt" The `` is an absolute path, or a path relative to `WORKDIR`, into which the source will be copied inside the destination container. ADD test relativeDir/ # adds "test" to `WORKDIR`/relativeDir/ ADD test /absoluteDir # adds "test" to /absoluteDir All new files and directories are created with a UID and GID of 0. In the case where `` is a remote file URL, the destination will have permissions of 600. If the remote file being retrieved has an HTTP `Last-Modified` header, the timestamp from that header will be used to set the `mtime` on the destination file. However, like any other file processed during an `ADD`, `mtime` will not be included in the determination of whether or not the file has changed and the cache should be updated. > **Note**: > If you build by passing a `Dockerfile` through STDIN (`docker > build - < somefile`), there is no build context, so the `Dockerfile` > can only contain a URL based `ADD` instruction. You can also pass a > compressed archive through STDIN: (`docker build - < archive.tar.gz`), > the `Dockerfile` at the root of the archive and the rest of the > archive will get used at the context of the build. > **Note**: > If your URL files are protected using authentication, you > will need to use `RUN wget`, `RUN curl` or use another tool from > within the container as the `ADD` instruction does not support > authentication. > **Note**: > The first encountered `ADD` instruction will invalidate the cache for all > following instructions from the Dockerfile if the contents of `` have > changed. This includes invalidating the cache for `RUN` instructions. > See the [`Dockerfile` Best Practices guide](../userguide/eng-image/dockerfile_best-practices.md#build-cache) for more information. `ADD` obeys the following rules: - The `` path must be inside the *context* of the build; you cannot `ADD ../something /something`, because the first step of a `docker build` is to send the context directory (and subdirectories) to the docker daemon. - If `` is a URL and `` does not end with a trailing slash, then a file is downloaded from the URL and copied to ``. - If `` is a URL and `` does end with a trailing slash, then the filename is inferred from the URL and the file is downloaded to `/`. For instance, `ADD http://example.com/foobar /` would create the file `/foobar`. The URL must have a nontrivial path so that an appropriate filename can be discovered in this case (`http://example.com` will not work). - If `` is a directory, the entire contents of the directory are copied, including filesystem metadata. > **Note**: > The directory itself is not copied, just its contents. - If `` is a *local* tar archive in a recognized compression format (identity, gzip, bzip2 or xz) then it is unpacked as a directory. Resources from *remote* URLs are **not** decompressed. When a directory is copied or unpacked, it has the same behavior as `tar -x`: the result is the union of: 1. Whatever existed at the destination path and 2. The contents of the source tree, with conflicts resolved in favor of "2." on a file-by-file basis. > **Note**: > Whether a file is identified as a recognized compression format or not > is done soley based on the contents of the file, not the name of the file. > For example, if an empty file happens to end with `.tar.gz` this will not > be recognized as a compressed file and **will not** generate any kind of > decompression error message, rather the file will simply be copied to the > destination. - If `` is any other kind of file, it is copied individually along with its metadata. In this case, if `` ends with a trailing slash `/`, it will be considered a directory and the contents of `` will be written at `/base()`. - If multiple `` resources are specified, either directly or due to the use of a wildcard, then `` must be a directory, and it must end with a slash `/`. - If `` does not end with a trailing slash, it will be considered a regular file and the contents of `` will be written at ``. - If `` doesn't exist, it is created along with all missing directories in its path. ## COPY COPY has two forms: - `COPY ... ` - `COPY ["",... ""]` (this form is required for paths containing whitespace) The `COPY` instruction copies new files or directories from `` and adds them to the filesystem of the container at the path ``. Multiple `` resource may be specified but they must be relative to the source directory that is being built (the context of the build). Each `` may contain wildcards and matching will be done using Go's [filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. For example: COPY hom* /mydir/ # adds all files starting with "hom" COPY hom?.txt /mydir/ # ? is replaced with any single character, e.g., "home.txt" The `` is an absolute path, or a path relative to `WORKDIR`, into which the source will be copied inside the destination container. COPY test relativeDir/ # adds "test" to `WORKDIR`/relativeDir/ COPY test /absoluteDir # adds "test" to /absoluteDir All new files and directories are created with a UID and GID of 0. > **Note**: > If you build using STDIN (`docker build - < somefile`), there is no > build context, so `COPY` can't be used. `COPY` obeys the following rules: - The `` path must be inside the *context* of the build; you cannot `COPY ../something /something`, because the first step of a `docker build` is to send the context directory (and subdirectories) to the docker daemon. - If `` is a directory, the entire contents of the directory are copied, including filesystem metadata. > **Note**: > The directory itself is not copied, just its contents. - If `` is any other kind of file, it is copied individually along with its metadata. In this case, if `` ends with a trailing slash `/`, it will be considered a directory and the contents of `` will be written at `/base()`. - If multiple `` resources are specified, either directly or due to the use of a wildcard, then `` must be a directory, and it must end with a slash `/`. - If `` does not end with a trailing slash, it will be considered a regular file and the contents of `` will be written at ``. - If `` doesn't exist, it is created along with all missing directories in its path. ## ENTRYPOINT ENTRYPOINT has two forms: - `ENTRYPOINT ["executable", "param1", "param2"]` (*exec* form, preferred) - `ENTRYPOINT command param1 param2` (*shell* form) An `ENTRYPOINT` allows you to configure a container that will run as an executable. For example, the following will start nginx with its default content, listening on port 80: docker run -i -t --rm -p 80:80 nginx Command line arguments to `docker run ` will be appended after all elements in an *exec* form `ENTRYPOINT`, and will override all elements specified using `CMD`. This allows arguments to be passed to the entry point, i.e., `docker run -d` will pass the `-d` argument to the entry point. You can override the `ENTRYPOINT` instruction using the `docker run --entrypoint` flag. The *shell* form prevents any `CMD` or `run` command line arguments from being used, but has the disadvantage that your `ENTRYPOINT` will be started as a subcommand of `/bin/sh -c`, which does not pass signals. This means that the executable will not be the container's `PID 1` - and will _not_ receive Unix signals - so your executable will not receive a `SIGTERM` from `docker stop `. Only the last `ENTRYPOINT` instruction in the `Dockerfile` will have an effect. ### Exec form ENTRYPOINT example You can use the *exec* form of `ENTRYPOINT` to set fairly stable default commands and arguments and then use either form of `CMD` to set additional defaults that are more likely to be changed. FROM ubuntu ENTRYPOINT ["top", "-b"] CMD ["-c"] When you run the container, you can see that `top` is the only process: $ docker run -it --rm --name test top -H top - 08:25:00 up 7:27, 0 users, load average: 0.00, 0.01, 0.05 Threads: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie %Cpu(s): 0.1 us, 0.1 sy, 0.0 ni, 99.7 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st KiB Mem: 2056668 total, 1616832 used, 439836 free, 99352 buffers KiB Swap: 1441840 total, 0 used, 1441840 free. 1324440 cached Mem PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND 1 root 20 0 19744 2336 2080 R 0.0 0.1 0:00.04 top To examine the result further, you can use `docker exec`: $ docker exec -it test ps aux USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 2.6 0.1 19752 2352 ? Ss+ 08:24 0:00 top -b -H root 7 0.0 0.1 15572 2164 ? R+ 08:25 0:00 ps aux And you can gracefully request `top` to shut down using `docker stop test`. The following `Dockerfile` shows using the `ENTRYPOINT` to run Apache in the foreground (i.e., as `PID 1`): ``` FROM debian:stable RUN apt-get update && apt-get install -y --force-yes apache2 EXPOSE 80 443 VOLUME ["/var/www", "/var/log/apache2", "/etc/apache2"] ENTRYPOINT ["/usr/sbin/apache2ctl", "-D", "FOREGROUND"] ``` If you need to write a starter script for a single executable, you can ensure that the final executable receives the Unix signals by using `exec` and `gosu` commands: ```bash #!/bin/bash set -e if [ "$1" = 'postgres' ]; then chown -R postgres "$PGDATA" if [ -z "$(ls -A "$PGDATA")" ]; then gosu postgres initdb fi exec gosu postgres "$@" fi exec "$@" ``` Lastly, if you need to do some extra cleanup (or communicate with other containers) on shutdown, or are co-ordinating more than one executable, you may need to ensure that the `ENTRYPOINT` script receives the Unix signals, passes them on, and then does some more work: ``` #!/bin/sh # Note: I've written this using sh so it works in the busybox container too # USE the trap if you need to also do manual cleanup after the service is stopped, # or need to start multiple services in the one container trap "echo TRAPed signal" HUP INT QUIT KILL TERM # start service in background here /usr/sbin/apachectl start echo "[hit enter key to exit] or run 'docker stop '" read # stop service and clean up here echo "stopping apache" /usr/sbin/apachectl stop echo "exited $0" ``` If you run this image with `docker run -it --rm -p 80:80 --name test apache`, you can then examine the container's processes with `docker exec`, or `docker top`, and then ask the script to stop Apache: ```bash $ docker exec -it test ps aux USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.1 0.0 4448 692 ? Ss+ 00:42 0:00 /bin/sh /run.sh 123 cmd cmd2 root 19 0.0 0.2 71304 4440 ? Ss 00:42 0:00 /usr/sbin/apache2 -k start www-data 20 0.2 0.2 360468 6004 ? Sl 00:42 0:00 /usr/sbin/apache2 -k start www-data 21 0.2 0.2 360468 6000 ? Sl 00:42 0:00 /usr/sbin/apache2 -k start root 81 0.0 0.1 15572 2140 ? R+ 00:44 0:00 ps aux $ docker top test PID USER COMMAND 10035 root {run.sh} /bin/sh /run.sh 123 cmd cmd2 10054 root /usr/sbin/apache2 -k start 10055 33 /usr/sbin/apache2 -k start 10056 33 /usr/sbin/apache2 -k start $ /usr/bin/time docker stop test test real 0m 0.27s user 0m 0.03s sys 0m 0.03s ``` > **Note:** you can over ride the `ENTRYPOINT` setting using `--entrypoint`, > but this can only set the binary to *exec* (no `sh -c` will be used). > **Note**: > The *exec* form is parsed as a JSON array, which means that > you must use double-quotes (") around words not single-quotes ('). > **Note**: > Unlike the *shell* form, the *exec* form does not invoke a command shell. > This means that normal shell processing does not happen. For example, > `ENTRYPOINT [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. > If you want shell processing then either use the *shell* form or execute > a shell directly, for example: `ENTRYPOINT [ "sh", "-c", "echo", "$HOME" ]`. > Variables that are defined in the `Dockerfile`using `ENV`, will be substituted by > the `Dockerfile` parser. ### Shell form ENTRYPOINT example You can specify a plain string for the `ENTRYPOINT` and it will execute in `/bin/sh -c`. This form will use shell processing to substitute shell environment variables, and will ignore any `CMD` or `docker run` command line arguments. To ensure that `docker stop` will signal any long running `ENTRYPOINT` executable correctly, you need to remember to start it with `exec`: FROM ubuntu ENTRYPOINT exec top -b When you run this image, you'll see the single `PID 1` process: $ docker run -it --rm --name test top Mem: 1704520K used, 352148K free, 0K shrd, 0K buff, 140368121167873K cached CPU: 5% usr 0% sys 0% nic 94% idle 0% io 0% irq 0% sirq Load average: 0.08 0.03 0.05 2/98 6 PID PPID USER STAT VSZ %VSZ %CPU COMMAND 1 0 root R 3164 0% 0% top -b Which will exit cleanly on `docker stop`: $ /usr/bin/time docker stop test test real 0m 0.20s user 0m 0.02s sys 0m 0.04s If you forget to add `exec` to the beginning of your `ENTRYPOINT`: FROM ubuntu ENTRYPOINT top -b CMD --ignored-param1 You can then run it (giving it a name for the next step): $ docker run -it --name test top --ignored-param2 Mem: 1704184K used, 352484K free, 0K shrd, 0K buff, 140621524238337K cached CPU: 9% usr 2% sys 0% nic 88% idle 0% io 0% irq 0% sirq Load average: 0.01 0.02 0.05 2/101 7 PID PPID USER STAT VSZ %VSZ %CPU COMMAND 1 0 root S 3168 0% 0% /bin/sh -c top -b cmd cmd2 7 1 root R 3164 0% 0% top -b You can see from the output of `top` that the specified `ENTRYPOINT` is not `PID 1`. If you then run `docker stop test`, the container will not exit cleanly - the `stop` command will be forced to send a `SIGKILL` after the timeout: $ docker exec -it test ps aux PID USER COMMAND 1 root /bin/sh -c top -b cmd cmd2 7 root top -b 8 root ps aux $ /usr/bin/time docker stop test test real 0m 10.19s user 0m 0.04s sys 0m 0.03s ## VOLUME VOLUME ["/data"] The `VOLUME` instruction creates a mount point with the specified name and marks it as holding externally mounted volumes from native host or other containers. The value can be a JSON array, `VOLUME ["/var/log/"]`, or a plain string with multiple arguments, such as `VOLUME /var/log` or `VOLUME /var/log /var/db`. For more information/examples and mounting instructions via the Docker client, refer to [*Share Directories via Volumes*](../userguide/containers/dockervolumes.md#mount-a-host-directory-as-a-data-volume) documentation. The `docker run` command initializes the newly created volume with any data that exists at the specified location within the base image. For example, consider the following Dockerfile snippet: FROM ubuntu RUN mkdir /myvol RUN echo "hello world" > /myvol/greeting VOLUME /myvol This Dockerfile results in an image that causes `docker run`, to create a new mount point at `/myvol` and copy the `greeting` file into the newly created volume. > **Note**: > If any build steps change the data within the volume after it has been > declared, those changes will be discarded. > **Note**: > The list is parsed as a JSON array, which means that > you must use double-quotes (") around words not single-quotes ('). ## USER USER daemon The `USER` instruction sets the user name or UID to use when running the image and for any `RUN`, `CMD` and `ENTRYPOINT` instructions that follow it in the `Dockerfile`. ## WORKDIR WORKDIR /path/to/workdir The `WORKDIR` instruction sets the working directory for any `RUN`, `CMD`, `ENTRYPOINT`, `COPY` and `ADD` instructions that follow it in the `Dockerfile`. It can be used multiple times in the one `Dockerfile`. If a relative path is provided, it will be relative to the path of the previous `WORKDIR` instruction. For example: WORKDIR /a WORKDIR b WORKDIR c RUN pwd The output of the final `pwd` command in this `Dockerfile` would be `/a/b/c`. The `WORKDIR` instruction can resolve environment variables previously set using `ENV`. You can only use environment variables explicitly set in the `Dockerfile`. For example: ENV DIRPATH /path WORKDIR $DIRPATH/$DIRNAME RUN pwd The output of the final `pwd` command in this `Dockerfile` would be `/path/$DIRNAME` ## ARG ARG [=] The `ARG` instruction defines a variable that users can pass at build-time to the builder with the `docker build` command using the `--build-arg =` flag. If a user specifies a build argument that was not defined in the Dockerfile, the build outputs an error. ``` One or more build-args were not consumed, failing build. ``` The Dockerfile author can define a single variable by specifying `ARG` once or many variables by specifying `ARG` more than once. For example, a valid Dockerfile: ``` FROM busybox ARG user1 ARG buildno ... ``` A Dockerfile author may optionally specify a default value for an `ARG` instruction: ``` FROM busybox ARG user1=someuser ARG buildno=1 ... ``` If an `ARG` value has a default and if there is no value passed at build-time, the builder uses the default. An `ARG` variable definition comes into effect from the line on which it is defined in the `Dockerfile` not from the argument's use on the command-line or elsewhere. For example, consider this Dockerfile: ``` 1 FROM busybox 2 USER ${user:-some_user} 3 ARG user 4 USER $user ... ``` A user builds this file by calling: ``` $ docker build --build-arg user=what_user Dockerfile ``` The `USER` at line 2 evaluates to `some_user` as the `user` variable is defined on the subsequent line 3. The `USER` at line 4 evaluates to `what_user` as `user` is defined and the `what_user` value was passed on the command line. Prior to its definition by an `ARG` instruction, any use of a variable results in an empty string. > **Note:** It is not recommended to use build-time variables for > passing secrets like github keys, user credentials etc. You can use an `ARG` or an `ENV` instruction to specify variables that are available to the `RUN` instruction. Environment variables defined using the `ENV` instruction always override an `ARG` instruction of the same name. Consider this Dockerfile with an `ENV` and `ARG` instruction. ``` 1 FROM ubuntu 2 ARG CONT_IMG_VER 3 ENV CONT_IMG_VER v1.0.0 4 RUN echo $CONT_IMG_VER ``` Then, assume this image is built with this command: ``` $ docker build --build-arg CONT_IMG_VER=v2.0.1 Dockerfile ``` In this case, the `RUN` instruction uses `v1.0.0` instead of the `ARG` setting passed by the user:`v2.0.1` This behavior is similar to a shell script where a locally scoped variable overrides the variables passed as arguments or inherited from environment, from its point of definition. Using the example above but a different `ENV` specification you can create more useful interactions between `ARG` and `ENV` instructions: ``` 1 FROM ubuntu 2 ARG CONT_IMG_VER 3 ENV CONT_IMG_VER ${CONT_IMG_VER:-v1.0.0} 4 RUN echo $CONT_IMG_VER ``` Unlike an `ARG` instruction, `ENV` values are always persisted in the built image. Consider a docker build without the --build-arg flag: ``` $ docker build Dockerfile ``` Using this Dockerfile example, `CONT_IMG_VER` is still persisted in the image but its value would be `v1.0.0` as it is the default set in line 3 by the `ENV` instruction. The variable expansion technique in this example allows you to pass arguments from the command line and persist them in the final image by leveraging the `ENV` instruction. Variable expansion is only supported for [a limited set of Dockerfile instructions.](#environment-replacement) Docker has a set of predefined `ARG` variables that you can use without a corresponding `ARG` instruction in the Dockerfile. * `HTTP_PROXY` * `http_proxy` * `HTTPS_PROXY` * `https_proxy` * `FTP_PROXY` * `ftp_proxy` * `NO_PROXY` * `no_proxy` To use these, simply pass them on the command line using the `--build-arg =` flag. ### Impact on build caching `ARG` variables are not persisted into the built image as `ENV` variables are. However, `ARG` variables do impact the build cache in similar ways. If a Dockerfile defines an `ARG` variable whose value is different from a previous build, then a "cache miss" occurs upon its first usage, not its declaration. For example, consider this Dockerfile: ``` 1 FROM ubuntu 2 ARG CONT_IMG_VER 3 RUN echo $CONT_IMG_VER ``` If you specify `--build-arg CONT_IMG_VER=` on the command line the specification on line 2 does not cause a cache miss; line 3 does cause a cache miss. The definition on line 2 has no impact on the resulting image. The `RUN` on line 3 executes a command and in doing so defines a set of environment variables, including `CONT_IMG_VER`. At that point, the `ARG` variable may impact the resulting image, so a cache miss occurs. Consider another example under the same command line: ``` 1 FROM ubuntu 2 ARG CONT_IMG_VER 3 ENV CONT_IMG_VER $CONT_IMG_VER 4 RUN echo $CONT_IMG_VER ``` In this example, the cache miss occurs on line 3. The miss happens because the variable's value in the `ENV` references the `ARG` variable and that variable is changed through the command line. In this example, the `ENV` command causes the image to include the value. ## ONBUILD ONBUILD [INSTRUCTION] The `ONBUILD` instruction adds to the image a *trigger* instruction to be executed at a later time, when the image is used as the base for another build. The trigger will be executed in the context of the downstream build, as if it had been inserted immediately after the `FROM` instruction in the downstream `Dockerfile`. Any build instruction can be registered as a trigger. This is useful if you are building an image which will be used as a base to build other images, for example an application build environment or a daemon which may be customized with user-specific configuration. For example, if your image is a reusable Python application builder, it will require application source code to be added in a particular directory, and it might require a build script to be called *after* that. You can't just call `ADD` and `RUN` now, because you don't yet have access to the application source code, and it will be different for each application build. You could simply provide application developers with a boilerplate `Dockerfile` to copy-paste into their application, but that is inefficient, error-prone and difficult to update because it mixes with application-specific code. The solution is to use `ONBUILD` to register advance instructions to run later, during the next build stage. Here's how it works: 1. When it encounters an `ONBUILD` instruction, the builder adds a trigger to the metadata of the image being built. The instruction does not otherwise affect the current build. 2. At the end of the build, a list of all triggers is stored in the image manifest, under the key `OnBuild`. They can be inspected with the `docker inspect` command. 3. Later the image may be used as a base for a new build, using the `FROM` instruction. As part of processing the `FROM` instruction, the downstream builder looks for `ONBUILD` triggers, and executes them in the same order they were registered. If any of the triggers fail, the `FROM` instruction is aborted which in turn causes the build to fail. If all triggers succeed, the `FROM` instruction completes and the build continues as usual. 4. Triggers are cleared from the final image after being executed. In other words they are not inherited by "grand-children" builds. For example you might add something like this: [...] ONBUILD ADD . /app/src ONBUILD RUN /usr/local/bin/python-build --dir /app/src [...] > **Warning**: Chaining `ONBUILD` instructions using `ONBUILD ONBUILD` isn't allowed. > **Warning**: The `ONBUILD` instruction may not trigger `FROM` or `MAINTAINER` instructions. ## STOPSIGNAL STOPSIGNAL signal The `STOPSIGNAL` instruction sets the system call signal that will be sent to the container to exit. This signal can be a valid unsigned number that matches a position in the kernel's syscall table, for instance 9, or a signal name in the format SIGNAME, for instance SIGKILL. ## Dockerfile examples Below you can see some examples of Dockerfile syntax. If you're interested in something more realistic, take a look at the list of [Dockerization examples](../examples/index.md). ``` # Nginx # # VERSION 0.0.1 FROM ubuntu MAINTAINER Victor Vieux LABEL Description="This image is used to start the foobar executable" Vendor="ACME Products" Version="1.0" RUN apt-get update && apt-get install -y inotify-tools nginx apache2 openssh-server ``` ``` # Firefox over VNC # # VERSION 0.3 FROM ubuntu # Install vnc, xvfb in order to create a 'fake' display and firefox RUN apt-get update && apt-get install -y x11vnc xvfb firefox RUN mkdir ~/.vnc # Setup a password RUN x11vnc -storepasswd 1234 ~/.vnc/passwd # Autostart firefox (might not be the best way, but it does the trick) RUN bash -c 'echo "firefox" >> /.bashrc' EXPOSE 5900 CMD ["x11vnc", "-forever", "-usepw", "-create"] ``` ``` # Multiple images example # # VERSION 0.1 FROM ubuntu RUN echo foo > bar # Will output something like ===> 907ad6c2736f FROM ubuntu RUN echo moo > oink # Will output something like ===> 695d7793cbe4 # You᾿ll now have two images, 907ad6c2736f with /bar, and 695d7793cbe4 with # /oink. ``` docker-1.10.3/docs/reference/commandline/000077500000000000000000000000001267010174400202215ustar00rootroot00000000000000docker-1.10.3/docs/reference/commandline/attach.md000066400000000000000000000114521267010174400220120ustar00rootroot00000000000000 # attach Usage: docker attach [OPTIONS] CONTAINER Attach to a running container --detach-keys="" Set up escape key sequence --help Print usage --no-stdin Do not attach STDIN --sig-proxy=true Proxy all received signals to the process The `docker attach` command allows you to attach to a running container using the container's ID or name, either to view its ongoing output or to control it interactively. You can attach to the same contained process multiple times simultaneously, screen sharing style, or quickly view the progress of your detached process. To stop a container, use `CTRL-c`. This key sequence sends `SIGKILL` to the container. If `--sig-proxy` is true (the default),`CTRL-c` sends a `SIGINT` to the container. You can detach from a container and leave it running using the using `CTRL-p CTRL-q` key sequence. > **Note:** > A process running as PID 1 inside a container is treated specially by > Linux: it ignores any signal with the default action. So, the process > will not terminate on `SIGINT` or `SIGTERM` unless it is coded to do > so. It is forbidden to redirect the standard input of a `docker attach` command while attaching to a tty-enabled container (i.e.: launched with `-t`). ## Override the detach sequence If you want, you can configure a override the Docker key sequence for detach. This is is useful if the Docker default sequence conflicts with key squence you use for other applications. There are two ways to defines a your own detach key sequence, as a per-container override or as a configuration property on your entire configuration. To override the sequence for an individual container, use the `--detach-keys=""` flag with the `docker attach` command. The format of the `` is either a letter [a-Z], or the `ctrl-` combined with any of the following: * `a-z` (a single lowercase alpha character ) * `@` (ampersand) * `[` (left bracket) * `\\` (two backward slashes) * `_` (underscore) * `^` (caret) These `a`, `ctrl-a`, `X`, or `ctrl-\\` values are all examples of valid key sequences. To configure a different configuration default key sequence for all containers, see [**Configuration file** section](cli.md#configuration-files). #### Examples $ docker run -d --name topdemo ubuntu /usr/bin/top -b $ docker attach topdemo top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st Mem: 373572k total, 355560k used, 18012k free, 27872k buffers Swap: 786428k total, 0k used, 786428k free, 221740k cached PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND 1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st Mem: 373572k total, 355244k used, 18328k free, 27872k buffers Swap: 786428k total, 0k used, 786428k free, 221776k cached PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top top - 02:05:58 up 3:06, 0 users, load average: 0.01, 0.02, 0.05 Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie Cpu(s): 0.2%us, 0.3%sy, 0.0%ni, 99.5%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st Mem: 373572k total, 355780k used, 17792k free, 27880k buffers Swap: 786428k total, 0k used, 786428k free, 221776k cached PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top ^C$ $ echo $? 0 $ docker ps -a | grep topdemo 7998ac8581f9 ubuntu:14.04 "/usr/bin/top -b" 38 seconds ago Exited (0) 21 seconds ago topdemo And in this second example, you can see the exit code returned by the `bash` process is returned by the `docker attach` command to its caller too: $ docker run --name test -d -it debian 275c44472aebd77c926d4527885bb09f2f6db21d878c75f0a1c212c03d3bcfab $ docker attach test $$ exit 13 exit $ echo $? 13 $ docker ps -a | grep test 275c44472aeb debian:7 "/bin/bash" 26 seconds ago Exited (13) 17 seconds ago test docker-1.10.3/docs/reference/commandline/build.md000066400000000000000000000345341267010174400216530ustar00rootroot00000000000000 # build Usage: docker build [OPTIONS] PATH | URL | - Build a new image from the source code at PATH --build-arg=[] Set build-time variables --cpu-shares CPU Shares (relative weight) --cgroup-parent="" Optional parent cgroup for the container --cpu-period=0 Limit the CPU CFS (Completely Fair Scheduler) period --cpu-quota=0 Limit the CPU CFS (Completely Fair Scheduler) quota --cpuset-cpus="" CPUs in which to allow execution, e.g. `0-3`, `0,1` --cpuset-mems="" MEMs in which to allow execution, e.g. `0-3`, `0,1` --disable-content-trust=true Skip image verification -f, --file="" Name of the Dockerfile (Default is 'PATH/Dockerfile') --force-rm Always remove intermediate containers --help Print usage --isolation="" Container isolation technology -m, --memory="" Memory limit for all build containers --memory-swap="" A positive integer equal to memory plus swap. Specify -1 to enable unlimited swap. --no-cache Do not use cache when building the image --pull Always attempt to pull a newer version of the image -q, --quiet Suppress the build output and print image ID on success --rm=true Remove intermediate containers after a successful build --shm-size=[] Size of `/dev/shm`. The format is ``. `number` must be greater than `0`. Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses `64m`. -t, --tag=[] Name and optionally a tag in the 'name:tag' format --ulimit=[] Ulimit options Builds Docker images from a Dockerfile and a "context". A build's context is the files located in the specified `PATH` or `URL`. The build process can refer to any of the files in the context. For example, your build can use an [*ADD*](../builder.md#add) instruction to reference a file in the context. The `URL` parameter can specify the location of a Git repository; the repository acts as the build context. The system recursively clones the repository and its submodules using a `git clone --depth 1 --recursive` command. This command runs in a temporary directory on your local host. After the command succeeds, the directory is sent to the Docker daemon as the context. Local clones give you the ability to access private repositories using local user credentials, VPNs, and so forth. Git URLs accept context configuration in their fragment section, separated by a colon `:`. The first part represents the reference that Git will check out, this can be either a branch, a tag, or a commit SHA. The second part represents a subdirectory inside the repository that will be used as a build context. For example, run this command to use a directory called `docker` in the branch `container`: $ docker build https://github.com/docker/rootfs.git#container:docker The following table represents all the valid suffixes with their build contexts: Build Syntax Suffix | Commit Used | Build Context Used --------------------|-------------|------------------- `myrepo.git` | `refs/heads/master` | `/` `myrepo.git#mytag` | `refs/tags/mytag` | `/` `myrepo.git#mybranch` | `refs/heads/mybranch` | `/` `myrepo.git#abcdef` | `sha1 = abcdef` | `/` `myrepo.git#:myfolder` | `refs/heads/master` | `/myfolder` `myrepo.git#master:myfolder` | `refs/heads/master` | `/myfolder` `myrepo.git#mytag:myfolder` | `refs/tags/mytag` | `/myfolder` `myrepo.git#mybranch:myfolder` | `refs/heads/mybranch` | `/myfolder` `myrepo.git#abcdef:myfolder` | `sha1 = abcdef` | `/myfolder` Instead of specifying a context, you can pass a single Dockerfile in the `URL` or pipe the file in via `STDIN`. To pipe a Dockerfile from `STDIN`: docker build - < Dockerfile If you use STDIN or specify a `URL`, the system places the contents into a file called `Dockerfile`, and any `-f`, `--file` option is ignored. In this scenario, there is no context. By default the `docker build` command will look for a `Dockerfile` at the root of the build context. The `-f`, `--file`, option lets you specify the path to an alternative file to use instead. This is useful in cases where the same set of files are used for multiple builds. The path must be to a file within the build context. If a relative path is specified then it must to be relative to the current directory. In most cases, it's best to put each Dockerfile in an empty directory. Then, add to that directory only the files needed for building the Dockerfile. To increase the build's performance, you can exclude files and directories by adding a `.dockerignore` file to that directory as well. For information on creating one, see the [.dockerignore file](../builder.md#dockerignore-file). If the Docker client loses connection to the daemon, the build is canceled. This happens if you interrupt the Docker client with `CTRL-c` or if the Docker client is killed for any reason. If the build initiated a pull which is still running at the time the build is cancelled, the pull is cancelled as well. ## Return code On a successful build, a return code of success `0` will be returned. When the build fails, a non-zero failure code will be returned. There should be informational output of the reason for failure output to `STDERR`: $ docker build -t fail . Sending build context to Docker daemon 2.048 kB Sending build context to Docker daemon Step 1 : FROM busybox ---> 4986bf8c1536 Step 2 : RUN exit 13 ---> Running in e26670ec7a0a INFO[0000] The command [/bin/sh -c exit 13] returned a non-zero code: 13 $ echo $? 1 See also: [*Dockerfile Reference*](../builder.md). ## Examples ### Build with PATH $ docker build . Uploading context 10240 bytes Step 1 : FROM busybox Pulling repository busybox ---> e9aa60c60128MB/2.284 MB (100%) endpoint: https://cdn-registry-1.docker.io/v1/ Step 2 : RUN ls -lh / ---> Running in 9c9e81692ae9 total 24 drwxr-xr-x 2 root root 4.0K Mar 12 2013 bin drwxr-xr-x 5 root root 4.0K Oct 19 00:19 dev drwxr-xr-x 2 root root 4.0K Oct 19 00:19 etc drwxr-xr-x 2 root root 4.0K Nov 15 23:34 lib lrwxrwxrwx 1 root root 3 Mar 12 2013 lib64 -> lib dr-xr-xr-x 116 root root 0 Nov 15 23:34 proc lrwxrwxrwx 1 root root 3 Mar 12 2013 sbin -> bin dr-xr-xr-x 13 root root 0 Nov 15 23:34 sys drwxr-xr-x 2 root root 4.0K Mar 12 2013 tmp drwxr-xr-x 2 root root 4.0K Nov 15 23:34 usr ---> b35f4035db3f Step 3 : CMD echo Hello world ---> Running in 02071fceb21b ---> f52f38b7823e Successfully built f52f38b7823e Removing intermediate container 9c9e81692ae9 Removing intermediate container 02071fceb21b This example specifies that the `PATH` is `.`, and so all the files in the local directory get `tar`d and sent to the Docker daemon. The `PATH` specifies where to find the files for the "context" of the build on the Docker daemon. Remember that the daemon could be running on a remote machine and that no parsing of the Dockerfile happens at the client side (where you're running `docker build`). That means that *all* the files at `PATH` get sent, not just the ones listed to [*ADD*](../builder.md#add) in the Dockerfile. The transfer of context from the local machine to the Docker daemon is what the `docker` client means when you see the "Sending build context" message. If you wish to keep the intermediate containers after the build is complete, you must use `--rm=false`. This does not affect the build cache. ### Build with URL $ docker build github.com/creack/docker-firefox This will clone the GitHub repository and use the cloned repository as context. The Dockerfile at the root of the repository is used as Dockerfile. Note that you can specify an arbitrary Git repository by using the `git://` or `git@` schema. ### Build with - $ docker build - < Dockerfile This will read a Dockerfile from `STDIN` without context. Due to the lack of a context, no contents of any local directory will be sent to the Docker daemon. Since there is no context, a Dockerfile `ADD` only works if it refers to a remote URL. $ docker build - < context.tar.gz This will build an image for a compressed context read from `STDIN`. Supported formats are: bzip2, gzip and xz. ### Usage of .dockerignore $ docker build . Uploading context 18.829 MB Uploading context Step 1 : FROM busybox ---> 769b9341d937 Step 2 : CMD echo Hello world ---> Using cache ---> 99cc1ad10469 Successfully built 99cc1ad10469 $ echo ".git" > .dockerignore $ docker build . Uploading context 6.76 MB Uploading context Step 1 : FROM busybox ---> 769b9341d937 Step 2 : CMD echo Hello world ---> Using cache ---> 99cc1ad10469 Successfully built 99cc1ad10469 This example shows the use of the `.dockerignore` file to exclude the `.git` directory from the context. Its effect can be seen in the changed size of the uploaded context. The builder reference contains detailed information on [creating a .dockerignore file](../builder.md#dockerignore-file) ### Tag image (-t) $ docker build -t vieux/apache:2.0 . This will build like the previous example, but it will then tag the resulting image. The repository name will be `vieux/apache` and the tag will be `2.0` You can apply multiple tags to an image. For example, you can apply the `latest` tag to a newly built image and add another tag that references a specific version. For example, to tag an image both as `whenry/fedora-jboss:latest` and `whenry/fedora-jboss:v2.1`, use the following: $ docker build -t whenry/fedora-jboss:latest -t whenry/fedora-jboss:v2.1 . ### Specify Dockerfile (-f) $ docker build -f Dockerfile.debug . This will use a file called `Dockerfile.debug` for the build instructions instead of `Dockerfile`. $ docker build -f dockerfiles/Dockerfile.debug -t myapp_debug . $ docker build -f dockerfiles/Dockerfile.prod -t myapp_prod . The above commands will build the current build context (as specified by the `.`) twice, once using a debug version of a `Dockerfile` and once using a production version. $ cd /home/me/myapp/some/dir/really/deep $ docker build -f /home/me/myapp/dockerfiles/debug /home/me/myapp $ docker build -f ../../../../dockerfiles/debug /home/me/myapp These two `docker build` commands do the exact same thing. They both use the contents of the `debug` file instead of looking for a `Dockerfile` and will use `/home/me/myapp` as the root of the build context. Note that `debug` is in the directory structure of the build context, regardless of how you refer to it on the command line. > **Note:** > `docker build` will return a `no such file or directory` error if the > file or directory does not exist in the uploaded context. This may > happen if there is no context, or if you specify a file that is > elsewhere on the Host system. The context is limited to the current > directory (and its children) for security reasons, and to ensure > repeatable builds on remote Docker hosts. This is also the reason why > `ADD ../file` will not work. ### Optional parent cgroup (--cgroup-parent) When `docker build` is run with the `--cgroup-parent` option the containers used in the build will be run with the [corresponding `docker run` flag](../run.md#specifying-custom-cgroups). ### Set ulimits in container (--ulimit) Using the `--ulimit` option with `docker build` will cause each build step's container to be started using those [`--ulimit` flag values](../run.md#setting-ulimits-in-a-container). ### Set build-time variables (--build-arg) You can use `ENV` instructions in a Dockerfile to define variable values. These values persist in the built image. However, often persistence is not what you want. Users want to specify variables differently depending on which host they build an image on. A good example is `http_proxy` or source versions for pulling intermediate files. The `ARG` instruction lets Dockerfile authors define values that users can set at build-time using the `--build-arg` flag: $ docker build --build-arg HTTP_PROXY=http://10.20.30.2:1234 . This flag allows you to pass the build-time variables that are accessed like regular environment variables in the `RUN` instruction of the Dockerfile. Also, these values don't persist in the intermediate or final images like `ENV` values do. For detailed information on using `ARG` and `ENV` instructions, see the [Dockerfile reference](../builder.md). ### Specify isolation technology for container (--isolation) This option is useful in situations where you are running Docker containers on Windows. The `--isolation=` option sets a container's isolation technology. On Linux, the only supported is the `default` option which uses Linux namespaces. On Microsoft Windows, you can specify these values: | Value | Description | |-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------| | `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. | | `process` | Namespace isolation only. | | `hyperv` | Hyper-V hypervisor partition-based isolation. | Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. docker-1.10.3/docs/reference/commandline/cli.md000066400000000000000000000177741267010174400213320ustar00rootroot00000000000000 # Use the Docker command line To list available commands, either run `docker` with no parameters or execute `docker help`: $ docker Usage: docker [OPTIONS] COMMAND [arg...] docker daemon [ --help | ... ] docker [ --help | -v | --version ] -H, --host=[]: The socket(s) to talk to the Docker daemon in the format of tcp://host:port/path, unix:///path/to/socket, fd://* or fd://socketfd. A self-sufficient runtime for Linux containers. ... Depending on your Docker system configuration, you may be required to preface each `docker` command with `sudo`. To avoid having to use `sudo` with the `docker` command, your system administrator can create a Unix group called `docker` and add users to it. For more information about installing Docker or `sudo` configuration, refer to the [installation](../../installation/index.md) instructions for your operating system. ## Environment variables For easy reference, the following list of environment variables are supported by the `docker` command line: * `DOCKER_API_VERSION` The API version to use (e.g. `1.19`) * `DOCKER_CONFIG` The location of your client configuration files. * `DOCKER_CERT_PATH` The location of your authentication keys. * `DOCKER_DRIVER` The graph driver to use. * `DOCKER_HOST` Daemon socket to connect to. * `DOCKER_NOWARN_KERNEL_VERSION` Prevent warnings that your Linux kernel is unsuitable for Docker. * `DOCKER_RAMDISK` If set this will disable 'pivot_root'. * `DOCKER_TLS_VERIFY` When set Docker uses TLS and verifies the remote. * `DOCKER_CONTENT_TRUST` When set Docker uses notary to sign and verify images. Equates to `--disable-content-trust=false` for build, create, pull, push, run. * `DOCKER_CONTENT_TRUST_SERVER` The URL of the Notary server to use. This defaults to the same URL as the registry. * `DOCKER_TMPDIR` Location for temporary Docker files. Because Docker is developed using 'Go', you can also use any environment variables used by the 'Go' runtime. In particular, you may find these useful: * `HTTP_PROXY` * `HTTPS_PROXY` * `NO_PROXY` These Go environment variables are case-insensitive. See the [Go specification](http://golang.org/pkg/net/http/) for details on these variables. ## Configuration files By default, the Docker command line stores its configuration files in a directory called `.docker` within your `HOME` directory. However, you can specify a different location via the `DOCKER_CONFIG` environment variable or the `--config` command line option. If both are specified, then the `--config` option overrides the `DOCKER_CONFIG` environment variable. For example: docker --config ~/testconfigs/ ps Instructs Docker to use the configuration files in your `~/testconfigs/` directory when running the `ps` command. Docker manages most of the files in the configuration directory and you should not modify them. However, you *can modify* the `config.json` file to control certain aspects of how the `docker` command behaves. Currently, you can modify the `docker` command behavior using environment variables or command-line options. You can also use options within `config.json` to modify some of the same behavior. When using these mechanisms, you must keep in mind the order of precedence among them. Command line options override environment variables and environment variables override properties you specify in a `config.json` file. The `config.json` file stores a JSON encoding of several properties: The property `HttpHeaders` specifies a set of headers to include in all messages sent from the Docker client to the daemon. Docker does not try to interpret or understand these header; it simply puts them into the messages. Docker does not allow these headers to change any headers it sets for itself. The property `psFormat` specifies the default format for `docker ps` output. When the `--format` flag is not provided with the `docker ps` command, Docker's client uses this property. If this property is not set, the client falls back to the default table format. For a list of supported formatting directives, see the [**Formatting** section in the `docker ps` documentation](ps.md) Once attached to a container, users detach from it and leave it running using the using `CTRL-p CTRL-q` key sequence. This detach key sequence is customizable using the `detachKeys` property. Specify a `` value for the property. The format of the `` is a comma-separated list of either a letter [a-Z], or the `ctrl-` combined with any of the following: * `a-z` (a single lowercase alpha character ) * `@` (ampersand) * `[` (left bracket) * `\\` (two backward slashes) * `_` (underscore) * `^` (caret) Your customization applies to all containers started in with your Docker client. Users can override your custom or the default key sequence on a per-container basis. To do this, the user specifies the `--detach-keys` flag with the `docker attach`, `docker exec`, `docker run` or `docker start` command. The property `imagesFormat` specifies the default format for `docker images` output. When the `--format` flag is not provided with the `docker images` command, Docker's client uses this property. If this property is not set, the client falls back to the default table format. For a list of supported formatting directives, see the [**Formatting** section in the `docker images` documentation](images.md) Following is a sample `config.json` file: { "HttpHeaders": { "MyHeader": "MyValue" }, "psFormat": "table {{.ID}}\\t{{.Image}}\\t{{.Command}}\\t{{.Labels}}", "imagesFormat": "table {{.ID}}\\t{{.Repository}}\\t{{.Tag}}\\t{{.CreatedAt}}", "detachKeys": "ctrl-e,e" } ### Notary If using your own notary server and a self-signed certificate or an internal Certificate Authority, you need to place the certificate at `tls//ca.crt` in your docker config directory. Alternatively you can trust the certificate globally by adding it to your system's list of root Certificate Authorities. ## Help To list the help on any command just execute the command, followed by the `--help` option. $ docker run --help Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] Run a command in a new container -a, --attach=[] Attach to STDIN, STDOUT or STDERR --cpu-shares=0 CPU shares (relative weight) ... ## Option types Single character command line options can be combined, so rather than typing `docker run -i -t --name test busybox sh`, you can write `docker run -it --name test busybox sh`. ### Boolean Boolean options take the form `-d=false`. The value you see in the help text is the default value which is set if you do **not** specify that flag. If you specify a Boolean flag without a value, this will set the flag to `true`, irrespective of the default value. For example, running `docker run -d` will set the value to `true`, so your container **will** run in "detached" mode, in the background. Options which default to `true` (e.g., `docker build --rm=true`) can only be set to the non-default value by explicitly setting them to `false`: $ docker build --rm=false . ### Multi You can specify options like `-a=[]` multiple times in a single command line, for example in these commands: $ docker run -a stdin -a stdout -i -t ubuntu /bin/bash $ docker run -a stdin -a stdout -a stderr ubuntu /bin/ls Sometimes, multiple options can call for a more complex value string as for `-v`: $ docker run -v /host:/container example/mysql > **Note:** > Do not use the `-t` and `-a stderr` options together due to > limitations in the `pty` implementation. All `stderr` in `pty` mode > simply goes to `stdout`. ### Strings and Integers Options like `--name=""` expect a string, and they can only be specified once. Options like `-c=0` expect an integer, and they can only be specified once. docker-1.10.3/docs/reference/commandline/commit.md000066400000000000000000000076631267010174400220470ustar00rootroot00000000000000 # commit Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]] Create a new image from a container's changes -a, --author="" Author (e.g., "John Hannibal Smith ") -c, --change=[] Apply specified Dockerfile instructions while committing the image --help Print usage -m, --message="" Commit message -p, --pause=true Pause container during commit It can be useful to commit a container's file changes or settings into a new image. This allows you debug a container by running an interactive shell, or to export a working dataset to another server. Generally, it is better to use Dockerfiles to manage your images in a documented and maintainable way. The commit operation will not include any data contained in volumes mounted inside the container. By default, the container being committed and its processes will be paused while the image is committed. This reduces the likelihood of encountering data corruption during the process of creating the commit. If this behavior is undesired, set the 'p' option to false. The `--change` option will apply `Dockerfile` instructions to the image that is created. Supported `Dockerfile` instructions: `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`LABEL`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` ## Commit a container $ docker ps ID IMAGE COMMAND CREATED STATUS PORTS c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours 197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours $ docker commit c3f279d17e0a svendowideit/testimage:version3 f5283438590d $ docker images REPOSITORY TAG ID CREATED SIZE svendowideit/testimage version3 f5283438590d 16 seconds ago 335.7 MB ## Commit a container with new configurations $ docker ps ID IMAGE COMMAND CREATED STATUS PORTS c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours 197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours $ docker inspect -f "{{ .Config.Env }}" c3f279d17e0a [HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin] $ docker commit --change "ENV DEBUG true" c3f279d17e0a svendowideit/testimage:version3 f5283438590d $ docker inspect -f "{{ .Config.Env }}" f5283438590d [HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin DEBUG=true] ## Commit a container with new `CMD` and `EXPOSE` instructions $ docker ps ID IMAGE COMMAND CREATED STATUS PORTS c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours 197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours $ docker commit --change='CMD ["apachectl", "-DFOREGROUND"]' -c "EXPOSE 80" c3f279d17e0a svendowideit/testimage:version4 f5283438590d $ docker run -d svendowideit/testimage:version4 89373736e2e7f00bc149bd783073ac43d0507da250e999f3f1036e0db60817c0 $ docker ps ID IMAGE COMMAND CREATED STATUS PORTS 89373736e2e7 testimage:version4 "apachectl -DFOREGROU" 3 seconds ago Up 2 seconds 80/tcp c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours 197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours docker-1.10.3/docs/reference/commandline/cp.md000066400000000000000000000101541267010174400211460ustar00rootroot00000000000000 # cp Usage: docker cp [OPTIONS] CONTAINER:SRC_PATH DEST_PATH | - docker cp [OPTIONS] SRC_PATH | - CONTAINER:DEST_PATH Copy files/folders between a container and the local filesystem -L, --follow-link Always follow symbol link in SRC_PATH --help Print usage The `docker cp` utility copies the contents of `SRC_PATH` to the `DEST_PATH`. You can copy from the container's file system to the local machine or the reverse, from the local filesystem to the container. If `-` is specified for either the `SRC_PATH` or `DEST_PATH`, you can also stream a tar archive from `STDIN` or to `STDOUT`. The `CONTAINER` can be a running or stopped container. The `SRC_PATH` or `DEST_PATH` be a file or directory. The `docker cp` command assumes container paths are relative to the container's `/` (root) directory. This means supplying the initial forward slash is optional; The command sees `compassionate_darwin:/tmp/foo/myfile.txt` and `compassionate_darwin:tmp/foo/myfile.txt` as identical. Local machine paths can be an absolute or relative value. The command interprets a local machine's relative paths as relative to the current working directory where `docker cp` is run. The `cp` command behaves like the Unix `cp -a` command in that directories are copied recursively with permissions preserved if possible. Ownership is set to the user and primary group at the destination. For example, files copied to a container are created with `UID:GID` of the root user. Files copied to the local machine are created with the `UID:GID` of the user which invoked the `docker cp` command. If you specify the `-L` option, `docker cp` follows any symbolic link in the `SRC_PATH`. Assuming a path separator of `/`, a first argument of `SRC_PATH` and second argument of `DEST_PATH`, the behavior is as follows: - `SRC_PATH` specifies a file - `DEST_PATH` does not exist - the file is saved to a file created at `DEST_PATH` - `DEST_PATH` does not exist and ends with `/` - Error condition: the destination directory must exist. - `DEST_PATH` exists and is a file - the destination is overwritten with the source file's contents - `DEST_PATH` exists and is a directory - the file is copied into this directory using the basename from `SRC_PATH` - `SRC_PATH` specifies a directory - `DEST_PATH` does not exist - `DEST_PATH` is created as a directory and the *contents* of the source directory are copied into this directory - `DEST_PATH` exists and is a file - Error condition: cannot copy a directory to a file - `DEST_PATH` exists and is a directory - `SRC_PATH` does not end with `/.` - the source directory is copied into this directory - `SRC_PATH` does end with `/.` - the *content* of the source directory is copied into this directory The command requires `SRC_PATH` and `DEST_PATH` to exist according to the above rules. If `SRC_PATH` is local and is a symbolic link, the symbolic link, not the target, is copied by default. To copy the link target and not the link, specify the `-L` option. A colon (`:`) is used as a delimiter between `CONTAINER` and its path. You can also use `:` when specifying paths to a `SRC_PATH` or `DEST_PATH` on a local machine, for example `file:name.txt`. If you use a `:` in a local machine path, you must be explicit with a relative or absolute path, for example: `/path/to/file:name.txt` or `./file:name.txt` It is not possible to copy certain system files such as resources under `/proc`, `/sys`, `/dev`, and mounts created by the user in the container. Using `-` as the `SRC_PATH` streams the contents of `STDIN` as a tar archive. The command extracts the content of the tar to the `DEST_PATH` in container's filesystem. In this case, `DEST_PATH` must specify a directory. Using `-` as `DEST_PATH` streams the contents of the resource as a tar archive to `STDOUT`. docker-1.10.3/docs/reference/commandline/create.md000066400000000000000000000232501267010174400220100ustar00rootroot00000000000000 # create Creates a new container. Usage: docker create [OPTIONS] IMAGE [COMMAND] [ARG...] Create a new container -a, --attach=[] Attach to STDIN, STDOUT or STDERR --add-host=[] Add a custom host-to-IP mapping (host:ip) --blkio-weight=0 Block IO weight (relative weight) --blkio-weight-device=[] Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`) --cpu-shares=0 CPU shares (relative weight) --cap-add=[] Add Linux capabilities --cap-drop=[] Drop Linux capabilities --cgroup-parent="" Optional parent cgroup for the container --cidfile="" Write the container ID to the file --cpu-period=0 Limit CPU CFS (Completely Fair Scheduler) period --cpu-quota=0 Limit CPU CFS (Completely Fair Scheduler) quota --cpuset-cpus="" CPUs in which to allow execution (0-3, 0,1) --cpuset-mems="" Memory nodes (MEMs) in which to allow execution (0-3, 0,1) --device=[] Add a host device to the container --device-read-bps=[] Limit read rate (bytes per second) from a device (e.g., --device-read-bps=/dev/sda:1mb) --device-read-iops=[] Limit read rate (IO per second) from a device (e.g., --device-read-iops=/dev/sda:1000) --device-write-bps=[] Limit write rate (bytes per second) to a device (e.g., --device-write-bps=/dev/sda:1mb) --device-write-iops=[] Limit write rate (IO per second) to a device (e.g., --device-write-iops=/dev/sda:1000) --disable-content-trust=true Skip image verification --dns=[] Set custom DNS servers --dns-opt=[] Set custom DNS options --dns-search=[] Set custom DNS search domains -e, --env=[] Set environment variables --entrypoint="" Overwrite the default ENTRYPOINT of the image --env-file=[] Read in a file of environment variables --expose=[] Expose a port or a range of ports --group-add=[] Add additional groups to join -h, --hostname="" Container host name --help Print usage -i, --interactive Keep STDIN open even if not attached --ip="" Container IPv4 address (e.g. 172.30.100.104) --ip6="" Container IPv6 address (e.g. 2001:db8::33) --ipc="" IPC namespace to use --isolation="" Container isolation technology --kernel-memory="" Kernel memory limit -l, --label=[] Set metadata on the container (e.g., --label=com.example.key=value) --label-file=[] Read in a line delimited file of labels --link=[] Add link to another container --log-driver="" Logging driver for container --log-opt=[] Log driver specific options -m, --memory="" Memory limit --mac-address="" Container MAC address (e.g. 92:d0:c6:0a:29:33) --memory-reservation="" Memory soft limit --memory-swap="" A positive integer equal to memory plus swap. Specify -1 to enable unlimited swap. --memory-swappiness="" Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. --name="" Assign a name to the container --net="bridge" Connect a container to a network 'bridge': create a network stack on the default Docker bridge 'none': no networking 'container:': reuse another container's network stack 'host': use the Docker host network stack '|': connect to a user-defined network --net-alias=[] Add network-scoped alias for the container --oom-kill-disable Whether to disable OOM Killer for the container or not --oom-score-adj=0 Tune the host's OOM preferences for containers (accepts -1000 to 1000) -P, --publish-all Publish all exposed ports to random ports -p, --publish=[] Publish a container's port(s) to the host --pid="" PID namespace to use --privileged Give extended privileges to this container --read-only Mount the container's root filesystem as read only --restart="no" Restart policy (no, on-failure[:max-retry], always, unless-stopped) --security-opt=[] Security options --stop-signal="SIGTERM" Signal to stop a container --shm-size=[] Size of `/dev/shm`. The format is ``. `number` must be greater than `0`. Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses `64m`. -t, --tty Allocate a pseudo-TTY -u, --user="" Username or UID --ulimit=[] Ulimit options --uts="" UTS namespace to use -v, --volume=[host-src:]container-dest[:] Bind mount a volume. The comma-delimited `options` are [rw|ro], [z|Z], or [[r]shared|[r]slave|[r]private]. The 'host-src' is an absolute path or a name value. --volume-driver="" Container's volume driver --volumes-from=[] Mount volumes from the specified container(s) -w, --workdir="" Working directory inside the container The `docker create` command creates a writeable container layer over the specified image and prepares it for running the specified command. The container ID is then printed to `STDOUT`. This is similar to `docker run -d` except the container is never started. You can then use the `docker start ` command to start the container at any point. This is useful when you want to set up a container configuration ahead of time so that it is ready to start when you need it. The initial status of the new container is `created`. Please see the [run command](run.md) section and the [Docker run reference](../run.md) for more details. ## Examples $ docker create -t -i fedora bash 6d8af538ec541dd581ebc2a24153a28329acb5268abe5ef868c1f1a261221752 $ docker start -a -i 6d8af538ec5 bash-4.2# As of v1.4.0 container volumes are initialized during the `docker create` phase (i.e., `docker run` too). For example, this allows you to `create` the `data` volume container, and then use it from another container: $ docker create -v /data --name data ubuntu 240633dfbb98128fa77473d3d9018f6123b99c454b3251427ae190a7d951ad57 $ docker run --rm --volumes-from data ubuntu ls -la /data total 8 drwxr-xr-x 2 root root 4096 Dec 5 04:10 . drwxr-xr-x 48 root root 4096 Dec 5 04:11 .. Similarly, `create` a host directory bind mounted volume container, which can then be used from the subsequent container: $ docker create -v /home/docker:/docker --name docker ubuntu 9aa88c08f319cd1e4515c3c46b0de7cc9aa75e878357b1e96f91e2c773029f03 $ docker run --rm --volumes-from docker ubuntu ls -la /docker total 20 drwxr-sr-x 5 1000 staff 180 Dec 5 04:00 . drwxr-xr-x 48 root root 4096 Dec 5 04:13 .. -rw-rw-r-- 1 1000 staff 3833 Dec 5 04:01 .ash_history -rw-r--r-- 1 1000 staff 446 Nov 28 11:51 .ashrc -rw-r--r-- 1 1000 staff 25 Dec 5 04:00 .gitconfig drwxr-sr-x 3 1000 staff 60 Dec 1 03:28 .local -rw-r--r-- 1 1000 staff 920 Nov 28 11:51 .profile drwx--S--- 2 1000 staff 460 Dec 5 00:51 .ssh drwxr-xr-x 32 1000 staff 1140 Dec 5 04:01 docker ### Specify isolation technology for container (--isolation) This option is useful in situations where you are running Docker containers on Windows. The `--isolation=` option sets a container's isolation technology. On Linux, the only supported is the `default` option which uses Linux namespaces. On Microsoft Windows, you can specify these values: | Value | Description | |-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------| | `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. | | `process` | Namespace isolation only. | | `hyperv` | Hyper-V hypervisor partition-based isolation. | Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. docker-1.10.3/docs/reference/commandline/daemon.md000066400000000000000000001147111267010174400220130ustar00rootroot00000000000000 # daemon Usage: docker daemon [OPTIONS] A self-sufficient runtime for linux containers. Options: --api-cors-header="" Set CORS headers in the remote API --authorization-plugin=[] Set authorization plugins to load -b, --bridge="" Attach containers to a network bridge --bip="" Specify network bridge IP --cgroup-parent= Set parent cgroup for all containers -D, --debug Enable debug mode --default-gateway="" Container default gateway IPv4 address --default-gateway-v6="" Container default gateway IPv6 address --cluster-store="" URL of the distributed storage backend --cluster-advertise="" Address of the daemon instance on the cluster --cluster-store-opt=map[] Set cluster options --config-file=/etc/docker/daemon.json Daemon configuration file --dns=[] DNS server to use --dns-opt=[] DNS options to use --dns-search=[] DNS search domains to use --default-ulimit=[] Set default ulimit settings for containers --exec-opt=[] Set exec driver options --exec-root="/var/run/docker" Root of the Docker execdriver --fixed-cidr="" IPv4 subnet for fixed IPs --fixed-cidr-v6="" IPv6 subnet for fixed IPs -G, --group="docker" Group for the unix socket -g, --graph="/var/lib/docker" Root of the Docker runtime -H, --host=[] Daemon socket(s) to connect to --help Print usage --icc=true Enable inter-container communication --insecure-registry=[] Enable insecure registry communication --ip=0.0.0.0 Default IP when binding container ports --ip-forward=true Enable net.ipv4.ip_forward --ip-masq=true Enable IP masquerading --iptables=true Enable addition of iptables rules --ipv6 Enable IPv6 networking -l, --log-level="info" Set the logging level --label=[] Set key=value labels to the daemon --log-driver="json-file" Default driver for container logs --log-opt=[] Log driver specific options --mtu=0 Set the containers network MTU --disable-legacy-registry Do not contact legacy registries -p, --pidfile="/var/run/docker.pid" Path to use for daemon PID file --registry-mirror=[] Preferred Docker registry mirror -s, --storage-driver="" Storage driver to use --selinux-enabled Enable selinux support --storage-opt=[] Set storage driver options --tls Use TLS; implied by --tlsverify --tlscacert="~/.docker/ca.pem" Trust certs signed only by this CA --tlscert="~/.docker/cert.pem" Path to TLS certificate file --tlskey="~/.docker/key.pem" Path to TLS key file --tlsverify Use TLS and verify the remote --userns-remap="default" Enable user namespace remapping --userland-proxy=true Use userland proxy for loopback traffic Options with [] may be specified multiple times. The Docker daemon is the persistent process that manages containers. Docker uses the same binary for both the daemon and client. To run the daemon you type `docker daemon`. To run the daemon with debug output, use `docker daemon -D`. ## Daemon socket option The Docker daemon can listen for [Docker Remote API](../api/docker_remote_api.md) requests via three different types of Socket: `unix`, `tcp`, and `fd`. By default, a `unix` domain socket (or IPC socket) is created at `/var/run/docker.sock`, requiring either `root` permission, or `docker` group membership. If you need to access the Docker daemon remotely, you need to enable the `tcp` Socket. Beware that the default setup provides un-encrypted and un-authenticated direct access to the Docker daemon - and should be secured either using the [built in HTTPS encrypted socket](../../security/https/), or by putting a secure web proxy in front of it. You can listen on port `2375` on all network interfaces with `-H tcp://0.0.0.0:2375`, or on a particular network interface using its IP address: `-H tcp://192.168.59.103:2375`. It is conventional to use port `2375` for un-encrypted, and port `2376` for encrypted communication with the daemon. > **Note:** > If you're using an HTTPS encrypted socket, keep in mind that only > TLS1.0 and greater are supported. Protocols SSLv3 and under are not > supported anymore for security reasons. On Systemd based systems, you can communicate with the daemon via [Systemd socket activation](http://0pointer.de/blog/projects/socket-activation.html), use `docker daemon -H fd://`. Using `fd://` will work perfectly for most setups but you can also specify individual sockets: `docker daemon -H fd://3`. If the specified socket activated files aren't found, then Docker will exit. You can find examples of using Systemd socket activation with Docker and Systemd in the [Docker source tree](https://github.com/docker/docker/tree/master/contrib/init/systemd/). You can configure the Docker daemon to listen to multiple sockets at the same time using multiple `-H` options: # listen using the default unix socket, and on 2 specific IP addresses on this host. docker daemon -H unix:///var/run/docker.sock -H tcp://192.168.59.106 -H tcp://10.10.10.2 The Docker client will honor the `DOCKER_HOST` environment variable to set the `-H` flag for the client. $ docker -H tcp://0.0.0.0:2375 ps # or $ export DOCKER_HOST="tcp://0.0.0.0:2375" $ docker ps # both are equal Setting the `DOCKER_TLS_VERIFY` environment variable to any value other than the empty string is equivalent to setting the `--tlsverify` flag. The following are equivalent: $ docker --tlsverify ps # or $ export DOCKER_TLS_VERIFY=1 $ docker ps The Docker client will honor the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables (or the lowercase versions thereof). `HTTPS_PROXY` takes precedence over `HTTP_PROXY`. ### Daemon storage-driver option The Docker daemon has support for several different image layer storage drivers: `aufs`, `devicemapper`, `btrfs`, `zfs` and `overlay`. The `aufs` driver is the oldest, but is based on a Linux kernel patch-set that is unlikely to be merged into the main kernel. These are also known to cause some serious kernel crashes. However, `aufs` is also the only storage driver that allows containers to share executable and shared library memory, so is a useful choice when running thousands of containers with the same program or libraries. The `devicemapper` driver uses thin provisioning and Copy on Write (CoW) snapshots. For each devicemapper graph location – typically `/var/lib/docker/devicemapper` – a thin pool is created based on two block devices, one for data and one for metadata. By default, these block devices are created automatically by using loopback mounts of automatically created sparse files. Refer to [Storage driver options](#storage-driver-options) below for a way how to customize this setup. [~jpetazzo/Resizing Docker containers with the Device Mapper plugin](http://jpetazzo.github.io/2014/01/29/docker-device-mapper-resize/) article explains how to tune your existing setup without the use of options. The `btrfs` driver is very fast for `docker build` - but like `devicemapper` does not share executable memory between devices. Use `docker daemon -s btrfs -g /mnt/btrfs_partition`. The `zfs` driver is probably not as fast as `btrfs` but has a longer track record on stability. Thanks to `Single Copy ARC` shared blocks between clones will be cached only once. Use `docker daemon -s zfs`. To select a different zfs filesystem set `zfs.fsname` option as described in [Storage driver options](#storage-driver-options). The `overlay` is a very fast union filesystem. It is now merged in the main Linux kernel as of [3.18.0](https://lkml.org/lkml/2014/10/26/137). Call `docker daemon -s overlay` to use it. > **Note:** > As promising as `overlay` is, the feature is still quite young and should not > be used in production. Most notably, using `overlay` can cause excessive > inode consumption (especially as the number of images grows), as well as > being incompatible with the use of RPMs. > **Note:** > It is currently unsupported on `btrfs` or any Copy on Write filesystem > and should only be used over `ext4` partitions. ### Storage driver options Particular storage-driver can be configured with options specified with `--storage-opt` flags. Options for `devicemapper` are prefixed with `dm` and options for `zfs` start with `zfs`. * `dm.thinpooldev` Specifies a custom block storage device to use for the thin pool. If using a block device for device mapper storage, it is best to use `lvm` to create and manage the thin-pool volume. This volume is then handed to Docker to exclusively create snapshot volumes needed for images and containers. Managing the thin-pool outside of Docker makes for the most feature-rich method of having Docker utilize device mapper thin provisioning as the backing storage for Docker's containers. The highlights of the lvm-based thin-pool management feature include: automatic or interactive thin-pool resize support, dynamically changing thin-pool features, automatic thinp metadata checking when lvm activates the thin-pool, etc. As a fallback if no thin pool is provided, loopback files will be created. Loopback is very slow, but can be used without any pre-configuration of storage. It is strongly recommended that you do not use loopback in production. Ensure your Docker daemon has a `--storage-opt dm.thinpooldev` argument provided. Example use: $ docker daemon \ --storage-opt dm.thinpooldev=/dev/mapper/thin-pool * `dm.basesize` Specifies the size to use when creating the base device, which limits the size of images and containers. The default value is 10G. Note, thin devices are inherently "sparse", so a 10G device which is mostly empty doesn't use 10 GB of space on the pool. However, the filesystem will use more space for the empty case the larger the device is. The base device size can be increased at daemon restart which will allow all future images and containers (based on those new images) to be of the new base device size. Example use: $ docker daemon --storage-opt dm.basesize=50G This will increase the base device size to 50G. The Docker daemon will throw an error if existing base device size is larger than 50G. A user can use this option to expand the base device size however shrinking is not permitted. This value affects the system-wide "base" empty filesystem that may already be initialized and inherited by pulled images. Typically, a change to this value requires additional steps to take effect: $ sudo service docker stop $ sudo rm -rf /var/lib/docker $ sudo service docker start Example use: $ docker daemon --storage-opt dm.basesize=20G * `dm.loopdatasize` > **Note**: > This option configures devicemapper loopback, which should not > be used in production. Specifies the size to use when creating the loopback file for the "data" device which is used for the thin pool. The default size is 100G. The file is sparse, so it will not initially take up this much space. Example use: $ docker daemon --storage-opt dm.loopdatasize=200G * `dm.loopmetadatasize` > **Note**: > This option configures devicemapper loopback, which should not > be used in production. Specifies the size to use when creating the loopback file for the "metadata" device which is used for the thin pool. The default size is 2G. The file is sparse, so it will not initially take up this much space. Example use: $ docker daemon --storage-opt dm.loopmetadatasize=4G * `dm.fs` Specifies the filesystem type to use for the base device. The supported options are "ext4" and "xfs". The default is "xfs" Example use: $ docker daemon --storage-opt dm.fs=ext4 * `dm.mkfsarg` Specifies extra mkfs arguments to be used when creating the base device. Example use: $ docker daemon --storage-opt "dm.mkfsarg=-O ^has_journal" * `dm.mountopt` Specifies extra mount options used when mounting the thin devices. Example use: $ docker daemon --storage-opt dm.mountopt=nodiscard * `dm.datadev` (Deprecated, use `dm.thinpooldev`) Specifies a custom blockdevice to use for data for the thin pool. If using a block device for device mapper storage, ideally both datadev and metadatadev should be specified to completely avoid using the loopback device. Example use: $ docker daemon \ --storage-opt dm.datadev=/dev/sdb1 \ --storage-opt dm.metadatadev=/dev/sdc1 * `dm.metadatadev` (Deprecated, use `dm.thinpooldev`) Specifies a custom blockdevice to use for metadata for the thin pool. For best performance the metadata should be on a different spindle than the data, or even better on an SSD. If setting up a new metadata pool it is required to be valid. This can be achieved by zeroing the first 4k to indicate empty metadata, like this: $ dd if=/dev/zero of=$metadata_dev bs=4096 count=1 Example use: $ docker daemon \ --storage-opt dm.datadev=/dev/sdb1 \ --storage-opt dm.metadatadev=/dev/sdc1 * `dm.blocksize` Specifies a custom blocksize to use for the thin pool. The default blocksize is 64K. Example use: $ docker daemon --storage-opt dm.blocksize=512K * `dm.blkdiscard` Enables or disables the use of blkdiscard when removing devicemapper devices. This is enabled by default (only) if using loopback devices and is required to resparsify the loopback file on image/container removal. Disabling this on loopback can lead to *much* faster container removal times, but will make the space used in `/var/lib/docker` directory not be returned to the system for other use when containers are removed. Example use: $ docker daemon --storage-opt dm.blkdiscard=false * `dm.override_udev_sync_check` Overrides the `udev` synchronization checks between `devicemapper` and `udev`. `udev` is the device manager for the Linux kernel. To view the `udev` sync support of a Docker daemon that is using the `devicemapper` driver, run: $ docker info [...] Udev Sync Supported: true [...] When `udev` sync support is `true`, then `devicemapper` and udev can coordinate the activation and deactivation of devices for containers. When `udev` sync support is `false`, a race condition occurs between the`devicemapper` and `udev` during create and cleanup. The race condition results in errors and failures. (For information on these failures, see [docker#4036](https://github.com/docker/docker/issues/4036)) To allow the `docker` daemon to start, regardless of `udev` sync not being supported, set `dm.override_udev_sync_check` to true: $ docker daemon --storage-opt dm.override_udev_sync_check=true When this value is `true`, the `devicemapper` continues and simply warns you the errors are happening. > **Note:** > The ideal is to pursue a `docker` daemon and environment that does > support synchronizing with `udev`. For further discussion on this > topic, see [docker#4036](https://github.com/docker/docker/issues/4036). > Otherwise, set this flag for migrating existing Docker daemons to > a daemon with a supported environment. * `dm.use_deferred_removal` Enables use of deferred device removal if `libdm` and the kernel driver support the mechanism. Deferred device removal means that if device is busy when devices are being removed/deactivated, then a deferred removal is scheduled on device. And devices automatically go away when last user of the device exits. For example, when a container exits, its associated thin device is removed. If that device has leaked into some other mount namespace and can't be removed, the container exit still succeeds and this option causes the system to schedule the device for deferred removal. It does not wait in a loop trying to remove a busy device. Example use: $ docker daemon --storage-opt dm.use_deferred_removal=true * `dm.use_deferred_deletion` Enables use of deferred device deletion for thin pool devices. By default, thin pool device deletion is synchronous. Before a container is deleted, the Docker daemon removes any associated devices. If the storage driver can not remove a device, the container deletion fails and daemon returns. Error deleting container: Error response from daemon: Cannot destroy container To avoid this failure, enable both deferred device deletion and deferred device removal on the daemon. $ docker daemon \ --storage-opt dm.use_deferred_deletion=true \ --storage-opt dm.use_deferred_removal=true With these two options enabled, if a device is busy when the driver is deleting a container, the driver marks the device as deleted. Later, when the device isn't in use, the driver deletes it. In general it should be safe to enable this option by default. It will help when unintentional leaking of mount point happens across multiple mount namespaces. Currently supported options of `zfs`: * `zfs.fsname` Set zfs filesystem under which docker will create its own datasets. By default docker will pick up the zfs filesystem where docker graph (`/var/lib/docker`) is located. Example use: $ docker daemon -s zfs --storage-opt zfs.fsname=zroot/docker ## Docker execdriver option The Docker daemon uses a specifically built `libcontainer` execution driver as its interface to the Linux kernel `namespaces`, `cgroups`, and `SELinux`. ## Options for the native execdriver You can configure the `native` (libcontainer) execdriver using options specified with the `--exec-opt` flag. All the flag's options have the `native` prefix. A single `native.cgroupdriver` option is available. The `native.cgroupdriver` option specifies the management of the container's cgroups. You can specify `cgroupfs` or `systemd`. If you specify `systemd` and it is not available, the system uses `cgroupfs`. If you omit the `native.cgroupdriver` option,` cgroupfs` is used. This example sets the `cgroupdriver` to `systemd`: $ sudo docker daemon --exec-opt native.cgroupdriver=systemd Setting this option applies to all containers the daemon launches. Also Windows Container makes use of `--exec-opt` for special purpose. Docker user can specify default container isolation technology with this, for example: $ docker daemon --exec-opt isolation=hyperv Will make `hyperv` the default isolation technology on Windows, without specifying isolation value on daemon start, Windows isolation technology will default to `process`. ## Daemon DNS options To set the DNS server for all Docker containers, use `docker daemon --dns 8.8.8.8`. To set the DNS search domain for all Docker containers, use `docker daemon --dns-search example.com`. ## Insecure registries Docker considers a private registry either secure or insecure. In the rest of this section, *registry* is used for *private registry*, and `myregistry:5000` is a placeholder example for a private registry. A secure registry uses TLS and a copy of its CA certificate is placed on the Docker host at `/etc/docker/certs.d/myregistry:5000/ca.crt`. An insecure registry is either not using TLS (i.e., listening on plain text HTTP), or is using TLS with a CA certificate not known by the Docker daemon. The latter can happen when the certificate was not found under `/etc/docker/certs.d/myregistry:5000/`, or if the certificate verification failed (i.e., wrong CA). By default, Docker assumes all, but local (see local registries below), registries are secure. Communicating with an insecure registry is not possible if Docker assumes that registry is secure. In order to communicate with an insecure registry, the Docker daemon requires `--insecure-registry` in one of the following two forms: * `--insecure-registry myregistry:5000` tells the Docker daemon that myregistry:5000 should be considered insecure. * `--insecure-registry 10.1.0.0/16` tells the Docker daemon that all registries whose domain resolve to an IP address is part of the subnet described by the CIDR syntax, should be considered insecure. The flag can be used multiple times to allow multiple registries to be marked as insecure. If an insecure registry is not marked as insecure, `docker pull`, `docker push`, and `docker search` will result in an error message prompting the user to either secure or pass the `--insecure-registry` flag to the Docker daemon as described above. Local registries, whose IP address falls in the 127.0.0.0/8 range, are automatically marked as insecure as of Docker 1.3.2. It is not recommended to rely on this, as it may change in the future. Enabling `--insecure-registry`, i.e., allowing un-encrypted and/or untrusted communication, can be useful when running a local registry. However, because its use creates security vulnerabilities it should ONLY be enabled for testing purposes. For increased security, users should add their CA to their system's list of trusted CAs instead of enabling `--insecure-registry`. ## Legacy Registries Enabling `--disable-legacy-registry` forces a docker daemon to only interact with registries which support the V2 protocol. Specifically, the daemon will not attempt `push`, `pull` and `login` to v1 registries. The exception to this is `search` which can still be performed on v1 registries. ## Running a Docker daemon behind a HTTPS_PROXY When running inside a LAN that uses a `HTTPS` proxy, the Docker Hub certificates will be replaced by the proxy's certificates. These certificates need to be added to your Docker host's configuration: 1. Install the `ca-certificates` package for your distribution 2. Ask your network admin for the proxy's CA certificate and append them to `/etc/pki/tls/certs/ca-bundle.crt` 3. Then start your Docker daemon with `HTTPS_PROXY=http://username:password@proxy:port/ docker daemon`. The `username:` and `password@` are optional - and are only needed if your proxy is set up to require authentication. This will only add the proxy and authentication to the Docker daemon's requests - your `docker build`s and running containers will need extra configuration to use the proxy ## Default Ulimits `--default-ulimit` allows you to set the default `ulimit` options to use for all containers. It takes the same options as `--ulimit` for `docker run`. If these defaults are not set, `ulimit` settings will be inherited, if not set on `docker run`, from the Docker daemon. Any `--ulimit` options passed to `docker run` will overwrite these defaults. Be careful setting `nproc` with the `ulimit` flag as `nproc` is designed by Linux to set the maximum number of processes available to a user, not to a container. For details please check the [run](run.md) reference. ## Nodes discovery The `--cluster-advertise` option specifies the `host:port` or `interface:port` combination that this particular daemon instance should use when advertising itself to the cluster. The daemon is reached by remote hosts through this value. If you specify an interface, make sure it includes the IP address of the actual Docker host. For Engine installation created through `docker-machine`, the interface is typically `eth1`. The daemon uses [libkv](https://github.com/docker/libkv/) to advertise the node within the cluster. Some key-value backends support mutual TLS. To configure the client TLS settings used by the daemon can be configured using the `--cluster-store-opt` flag, specifying the paths to PEM encoded files. For example: ```bash docker daemon \ --cluster-advertise 192.168.1.2:2376 \ --cluster-store etcd://192.168.1.2:2379 \ --cluster-store-opt kv.cacertfile=/path/to/ca.pem \ --cluster-store-opt kv.certfile=/path/to/cert.pem \ --cluster-store-opt kv.keyfile=/path/to/key.pem ``` The currently supported cluster store options are: * `discovery.heartbeat` Specifies the heartbeat timer in seconds which is used by the daemon as a keepalive mechanism to make sure discovery module treats the node as alive in the cluster. If not configured, the default value is 20 seconds. * `discovery.ttl` Specifies the ttl (time-to-live) in seconds which is used by the discovery module to timeout a node if a valid heartbeat is not received within the configured ttl value. If not configured, the default value is 60 seconds. * `kv.cacertfile` Specifies the path to a local file with PEM encoded CA certificates to trust * `kv.certfile` Specifies the path to a local file with a PEM encoded certificate. This certificate is used as the client cert for communication with the Key/Value store. * `kv.keyfile` Specifies the path to a local file with a PEM encoded private key. This private key is used as the client key for communication with the Key/Value store. * `kv.path` Specifies the path in the Key/Value store. If not configured, the default value is 'docker/nodes'. ## Access authorization Docker's access authorization can be extended by authorization plugins that your organization can purchase or build themselves. You can install one or more authorization plugins when you start the Docker `daemon` using the `--authorization-plugin=PLUGIN_ID` option. ```bash docker daemon --authorization-plugin=plugin1 --authorization-plugin=plugin2,... ``` The `PLUGIN_ID` value is either the plugin's name or a path to its specification file. The plugin's implementation determines whether you can specify a name or path. Consult with your Docker administrator to get information about the plugins available to you. Once a plugin is installed, requests made to the `daemon` through the command line or Docker's remote API are allowed or denied by the plugin. If you have multiple plugins installed, at least one must allow the request for it to complete. For information about how to create an authorization plugin, see [authorization plugin](../../extend/authorization.md) section in the Docker extend section of this documentation. ## Daemon user namespace options The Linux kernel [user namespace support](http://man7.org/linux/man-pages/man7/user_namespaces.7.html) provides additional security by enabling a process, and therefore a container, to have a unique range of user and group IDs which are outside the traditional user and group range utilized by the host system. Potentially the most important security improvement is that, by default, container processes running as the `root` user will have expected administrative privilege (with some restrictions) inside the container but will effectively be mapped to an unprivileged `uid` on the host. When user namespace support is enabled, Docker creates a single daemon-wide mapping for all containers running on the same engine instance. The mappings will utilize the existing subordinate user and group ID feature available on all modern Linux distributions. The [`/etc/subuid`](http://man7.org/linux/man-pages/man5/subuid.5.html) and [`/etc/subgid`](http://man7.org/linux/man-pages/man5/subgid.5.html) files will be read for the user, and optional group, specified to the `--userns-remap` parameter. If you do not wish to specify your own user and/or group, you can provide `default` as the value to this flag, and a user will be created on your behalf and provided subordinate uid and gid ranges. This default user will be named `dockremap`, and entries will be created for it in `/etc/passwd` and `/etc/group` using your distro's standard user and group creation tools. > **Note**: The single mapping per-daemon restriction is in place for now > because Docker shares image layers from its local cache across all > containers running on the engine instance. Since file ownership must be > the same for all containers sharing the same layer content, the decision > was made to map the file ownership on `docker pull` to the daemon's user and > group mappings so that there is no delay for running containers once the > content is downloaded. This design preserves the same performance for `docker > pull`, `docker push`, and container startup as users expect with > user namespaces disabled. ### Starting the daemon with user namespaces enabled To enable user namespace support, start the daemon with the `--userns-remap` flag, which accepts values in the following formats: - uid - uid:gid - username - username:groupname If numeric IDs are provided, translation back to valid user or group names will occur so that the subordinate uid and gid information can be read, given these resources are name-based, not id-based. If the numeric ID information provided does not exist as entries in `/etc/passwd` or `/etc/group`, daemon startup will fail with an error message. *Example: starting with default Docker user management:* ``` $ docker daemon --userns-remap=default ``` When `default` is provided, Docker will create - or find the existing - user and group named `dockremap`. If the user is created, and the Linux distribution has appropriate support, the `/etc/subuid` and `/etc/subgid` files will be populated with a contiguous 65536 length range of subordinate user and group IDs, starting at an offset based on prior entries in those files. For example, Ubuntu will create the following range, based on an existing user named `user1` already owning the first 65536 range: ``` $ cat /etc/subuid user1:100000:65536 dockremap:165536:65536 ``` > **Note:** On a fresh Fedora install, we had to `touch` the > `/etc/subuid` and `/etc/subgid` files to have ranges assigned when users > were created. Once these files existed, range assignment on user creation > worked properly. If you have a preferred/self-managed user with subordinate ID mappings already configured, you can provide that username or uid to the `--userns-remap` flag. If you have a group that doesn't match the username, you may provide the `gid` or group name as well; otherwise the username will be used as the group name when querying the system for the subordinate group ID range. ### Detailed information on `subuid`/`subgid` ranges Given potential advanced use of the subordinate ID ranges by power users, the following paragraphs define how the Docker daemon currently uses the range entries found within the subordinate range files. The simplest case is that only one contiguous range is defined for the provided user or group. In this case, Docker will use that entire contiguous range for the mapping of host uids and gids to the container process. This means that the first ID in the range will be the remapped root user, and the IDs above that initial ID will map host ID 1 through the end of the range. From the example `/etc/subuid` content shown above, the remapped root user would be uid 165536. If the system administrator has set up multiple ranges for a single user or group, the Docker daemon will read all the available ranges and use the following algorithm to create the mapping ranges: 1. The range segments found for the particular user will be sorted by *start ID* ascending. 2. Map segments will be created from each range in increasing value with a length matching the length of each segment. Therefore the range segment with the lowest numeric starting value will be equal to the remapped root, and continue up through host uid/gid equal to the range segment length. As an example, if the lowest segment starts at ID 1000 and has a length of 100, then a map of 1000 -> 0 (the remapped root) up through 1100 -> 100 will be created from this segment. If the next segment starts at ID 10000, then the next map will start with mapping 10000 -> 101 up to the length of this second segment. This will continue until no more segments are found in the subordinate files for this user. 3. If more than five range segments exist for a single user, only the first five will be utilized, matching the kernel's limitation of only five entries in `/proc/self/uid_map` and `proc/self/gid_map`. ### User namespace known restrictions The following standard Docker features are currently incompatible when running a Docker daemon with user namespaces enabled: - sharing PID or NET namespaces with the host (`--pid=host` or `--net=host`) - sharing a network namespace with an existing container (`--net=container:*other*`) - sharing an IPC namespace with an existing container (`--ipc=container:*other*`) - A `--readonly` container filesystem (this is a Linux kernel restriction against remounting with modified flags of a currently mounted filesystem when inside a user namespace) - external (volume or graph) drivers which are unaware/incapable of using daemon user mappings - Using `--privileged` mode flag on `docker run` In general, user namespaces are an advanced feature and will require coordination with other capabilities. For example, if volumes are mounted from the host, file ownership will have to be pre-arranged if the user or administrator wishes the containers to have expected access to the volume contents. Finally, while the `root` user inside a user namespaced container process has many of the expected admin privileges that go along with being the superuser, the Linux kernel has restrictions based on internal knowledge that this is a user namespaced process. The most notable restriction that we are aware of at this time is the inability to use `mknod`. Permission will be denied for device creation even as container `root` inside a user namespace. ## Miscellaneous options IP masquerading uses address translation to allow containers without a public IP to talk to other machines on the Internet. This may interfere with some network topologies and can be disabled with `--ip-masq=false`. Docker supports softlinks for the Docker data directory (`/var/lib/docker`) and for `/var/lib/docker/tmp`. The `DOCKER_TMPDIR` and the data directory can be set like this: DOCKER_TMPDIR=/mnt/disk2/tmp /usr/local/bin/docker daemon -D -g /var/lib/docker -H unix:// > /var/lib/docker-machine/docker.log 2>&1 # or export DOCKER_TMPDIR=/mnt/disk2/tmp /usr/local/bin/docker daemon -D -g /var/lib/docker -H unix:// > /var/lib/docker-machine/docker.log 2>&1 ## Default cgroup parent The `--cgroup-parent` option allows you to set the default cgroup parent to use for containers. If this option is not set, it defaults to `/docker` for fs cgroup driver and `system.slice` for systemd cgroup driver. If the cgroup has a leading forward slash (`/`), the cgroup is created under the root cgroup, otherwise the cgroup is created under the daemon cgroup. Assuming the daemon is running in cgroup `daemoncgroup`, `--cgroup-parent=/foobar` creates a cgroup in `/sys/fs/cgroup/memory/foobar`, wheras using `--cgroup-parent=foobar` creates the cgroup in `/sys/fs/cgroup/memory/daemoncgroup/foobar` This setting can also be set per container, using the `--cgroup-parent` option on `docker create` and `docker run`, and takes precedence over the `--cgroup-parent` option on the daemon. ## Daemon configuration file The `--config-file` option allows you to set any configuration option for the daemon in a JSON format. This file uses the same flag names as keys, except for flags that allow several entries, where it uses the plural of the flag name, e.g., `labels` for the `label` flag. By default, docker tries to load a configuration file from `/etc/docker/daemon.json` on Linux and `%programdata%\docker\config\daemon.json` on Windows. The options set in the configuration file must not conflict with options set via flags. The docker daemon fails to start if an option is duplicated between the file and the flags, regardless their value. We do this to avoid silently ignore changes introduced in configuration reloads. For example, the daemon fails to start if you set daemon labels in the configuration file and also set daemon labels via the `--label` flag. Options that are not present in the file are ignored when the daemon starts. This is a full example of the allowed configuration options in the file: ```json { "authorization-plugins": [], "dns": [], "dns-opts": [], "dns-search": [], "exec-opts": [], "exec-root": "", "storage-driver": "", "storage-opts": "", "labels": [], "log-driver": "", "log-opts": [], "mtu": 0, "pidfile": "", "graph": "", "cluster-store": "", "cluster-store-opts": [], "cluster-advertise": "", "debug": true, "hosts": [], "log-level": "", "tls": true, "tlsverify": true, "tlscacert": "", "tlscert": "", "tlskey": "", "api-cors-headers": "", "selinux-enabled": false, "userns-remap": "", "group": "", "cgroup-parent": "", "default-ulimits": {}, "ipv6": false, "iptables": false, "ip-forward": false, "ip-mask": false, "userland-proxy": false, "ip": "0.0.0.0", "bridge": "", "bip": "", "fixed-cidr": "", "fixed-cidr-v6": "", "default-gateway": "", "default-gateway-v6": "", "icc": false } ``` ### Configuration reloading Some options can be reconfigured when the daemon is running without requiring to restart the process. We use the `SIGHUP` signal in Linux to reload, and a global event in Windows with the key `Global\docker-daemon-config-$PID`. The options can be modified in the configuration file but still will check for conflicts with the provided flags. The daemon fails to reconfigure itself if there are conflicts, but it won't stop execution. The list of currently supported options that can be reconfigured is this: - `debug`: it changes the daemon to debug mode when set to true. - `labels`: it replaces the daemon labels with a new set of labels. docker-1.10.3/docs/reference/commandline/diff.md000066400000000000000000000014051267010174400214530ustar00rootroot00000000000000 # diff Usage: docker diff [OPTIONS] CONTAINER Inspect changes on a container's filesystem --help Print usage List the changed files and directories in a container᾿s filesystem There are 3 events that are listed in the `diff`: 1. `A` - Add 2. `D` - Delete 3. `C` - Change For example: $ docker diff 7bb0e258aefe C /dev A /dev/kmsg C /etc A /etc/mtab A /go A /go/src A /go/src/github.com A /go/src/github.com/docker A /go/src/github.com/docker/docker A /go/src/github.com/docker/docker/.git .... docker-1.10.3/docs/reference/commandline/docker_images.gif000066400000000000000000001057111267010174400235110ustar00rootroot00000000000000GIF89aQE !!###))+++!--%22222(66788+;;;;;/@@?@@1CCCCCGHH6JJKKK9MMDEF)"D,OP@`~IIhY 0Dl ,0n'F  {jh3<塐BJ#fZVN h* i:0dKPBêVD5+𪮼Z&j8 7pj88,1C^mN8hRD*q:.N { !eV[NtH~Av1 J!t„nLa #P!#\" :L tf^rNp."`aRwܯ1WXv-N / @@@( K /Pʷ :P0P( *u C0B Z^!F #1="q#P:C0Ѓ/t,@A,[3hQN3>[8 +\lD-8a=RǬ8`.k/#DsJq2`?N*o S)T]D@ xI4>9PI$,6,q*Z#hX! ]@H<@*{QAb-t XvE!V X^ס. &8/5_60=]L`mO rPĸ>f`{0M2$"!0 0z2 A _S 96q #-!`zA%/ RH 0Lp1'BCܤ6:!p5B4˖%Bdp~B %Q!mP:+ԃ}CT&6ym<ne3P'm(89L`ADO (jQ'P-h .&t|5ҊZ [Ok X.Ȧ?p@5Q  %&XU h>h iB@ dP 4Ђ K#!A`ylp&4aNqN]I*-/[LC>"̦B &6 H D Zw  ,A|pTnx'4HA0VH9b>xЄ>(, b`h[6S:xHusDT@<'h|@n -*@4 ~ea)c 9 s <#3jl#Ѐ p6hBhp @ (K͙d-fT@  m4.(n+DY>0bud'wFhX$zъn4b`t= X' Z_HԄi ]z>8YUF5c%+[*B `PR )A`brn}G\4ҩ@̥SH }aX.0l3 '-r0n uoiER°^d#܈0T[N;@_kaEpB7+5q"UF(d'nLBeD0y!|*p< bpZs+PZqlpºֹ1#8N(0Ax#-QP휋67-t=} EÄaY0 u'@cK8ջE PCu dJ eoC iz*d[>FH ЀEqDn}aq-pԽ_P JX!^¬=i²B(t{ YF궾Ö^7Yg9}}cEe}1J;ctS~{N<@@u7yDGvvH!7BG}"HtM50 5oy`*$ h |hJ&(2%!!*h8!@dEND`@ D u( ' FMg"HL=Ԇx!Sd%zJeE!%9Mjdp6tPg(HFMxd@/1@1Wg& LnR0!P z8!uTT5xK{P1 T8'R*KNP'!(xA0hNf@@('LpB<DcUi.PI |$֥: >Ɩ 'S@&TNUF0@xB&0ҤvH4rVe?"G+P+07t`J!>YXxU9W dr@Џ^GuOmy$b>Pi4@ 1e Phu!90rL5CE{mA4xJ9`P1ƆI@g)@@W )Wt)JɎ0s`X 1p1 cA8B)__9Қ11(JPPpp)I jɹu y6pN IP!X0%r)(IILP P %X 깝`9*f " f9 G'9+'HuFh9:i$@uɛmM@@% ׂxD3 g5v9^Z9f_Tj4@)&*2 gVAM[E '9ia7 'Pj@*r %?fX&˥I!f p-&ꕚ1gRZ` Q&h! 4d馾 :9'~`uƨP>Э܊fJIࡃX!`-J ڦ_Jfhcy 9@41J 0I*KZ j⸛AS@ v g J >11rkj z.@I yj{*XjNmAЩIAC:A d 2!"tb-pNȉvI c\C`A"n cz{.[I[hGqrM 򸳏Z$w-4c&m `]@_WCsG: B+$j" N20G'x!X[$%()#!+#{qA+ APn'j8+5+r2;pu]G@7(`3; 7pr2^9Y0W!H3"v2ǽ(&T7:xzG,"00!@QyG!X}=>Np)P;@Ϥ &@(6wf  a;|0R&6\39 (@KjC Z1&AxSBBTЫL .[qJ |!RL!TM?W@Nwzr"XZ;hD"w,RYr\Ċrixl!6XlR?JȊ5Q!HA#?w!u$j$ǻ&߶p,PnO\ ʃ,"1c ק!KfE3#0ȼk,MHl 7s}lƷ!N<Όz>j <O Aqi)Ǫ0s"0ǻK،M*@553(LXxRKS2U3CB0&%*-0AM Ԡ\ȏdD@!0lI9ͦ0s0#"I J2ّX5pF@" )R#I!dPVmR;A# P&p-0"PmAI`hm'` b)-f 5K*K!HY3h\fB Ljfzڱ$+*x:Vp1 .@pܼ, {zݑlRqA].`[E\u^<@ =`q^N%`^Cd^Rr洡jnl'n>9r>yR#3s>*|,`;牒wB籑y~$)M#' 3M$!c&4Y\YgvkR^{J>8m#9+5ٲ+hn-"bs$Jz2E)*/,> N<=>3B0Hd IZ;ő`!Kz6C"@ͱ|GG=űv#уBls#"Y 5 Np/-eA), *P^ pkOwH/tHq$5ѲdV<z&T@ SBN*$ӑr8!T7"0ňME5N&/O2L#T5QϸQ#M82NNB( N7N- ;@&-0LP1!K 50N8 & &0;#CF3;NN "%K?&Й #80C?'T 3jȱǎh|)IHz,-fw-m2e̖y!ɣH*]T#MƢ ϫXbũ׉8Jٳhu(|K:DNMLM8}{#c1\ɘ OM(&&cO\5TW N\jGG%-л.Y y+ν{."xW+_R rm I?&)@/`Dm& 6F(FBB4]'$z |ϋ0(4h8昣)VDI9RAPF)TV eX\>x4h*eCi`miF4pg~o*'&f)蠬@!裢)!Avzg x:`Pj'4irs#ڝݬԊ) Yv!M-imZ.{iU@g]  x) f@ 7xD;o o +u}Fob4}D4 K&{1~,Ar)njS2s2~G45 BbI_Dt~GDMnjjU~.Y|aײ~IHMp۴_ (@6g.ޮ_6)-"v#.?:"2 K>llj8֞g韫١ΫO.p)zwBһW<.?FzJBՇAMpB .D <@+g+ ^޳?;r(@kw'S3ZW&F̀AҌ< S* "oۖA`'Fc!P9kwg;/Nb`dNrV#!׵P6xq9N?8†X$ MPUWU#pH:Qc>.D0yMAtF:򑐌bPg Q!@id;BFIRL*OUvЂ Lb4 UHnhs,0S#e Xfz0Eտd9Yl5KU1'13 ' zpTSԔ51X{*Њ>x #kG`M)t | _1\&8( \u6_Oiρ6#`hA|@&̶MPٔ`Jp ΀˴^YD V ++pBÒ#|^&Mʢ ,fKj:N\^Ҁ6^p3#pgHhB]LA:ě7XWmG\ͻx xp:*(`XcHXP  20#&8L G6`'N`lD wMx6"!I.6?>؂QCߺ;?/{YVNXBx E:,(% @n(y K&;K]dV3ۗ L{s v1+gy_bQAP0~ʀ|1Y aw|OV (xp /cۀ+0 H7'f|N@\/3+Cpw1wA!`$>ܑ>0^9V!b98zӒU)W*~I1>,QW a,zH+Z Aء3U%aR@*v\IDj^HI>6 O B=Cؒ]EF 2R9sICsrLqxz b6Z!=Z6i)RWJ -C=~)1"'a0G᩟ V* 9 >@uZ2O1_:08D `".9#S '@9G@* I@i9!@kԺ*P6Z9J~5 &b.I G M@t@y")I$ v+   O`h AwlQqrjqG -QhK1w1 Sxm\ Dymd)V 9g1Y[+ .@O_P@! Ii5$ `MpN ѵ `+s[÷ `Dp,-Cr c3 O{u rL!BKJБYQ`w[F @E``^6+ aDZ9R>?H"K3 6ʳ6W  ~ HB6n7rۚ л4 & f;PPM0  "H`yJຘu[Z^KvJd]!;9l1pP@C *PRJÝ UPЄp$P3,> 2;Ğ`K\`ŝ6Li$ !z{,07L+`e/>QwyLFmBeF'V m(WVlpl*^!i±m̰9вK3SP'4Tk0l"*@-71 PEM|# `tL@ P;6CPZPBl  ,L7j,x'IA@K +(! Z+@# %+ fL[Of܈&| )?඼ TqUgiRK&.5LP?dct& !SƂae O3y@. 2OԔ6P@TQhkcGP`h= ,4:,PcӫC:? Ƿ}b+ 0~cȋK0:ј,#m &D 0 E kbӝ۴@tq[H :CbJҲ@ O)1۝P+[=u=F]TCՙ2RKV̭Amם3͜ 7M}|5 +qHp]ɗp? XX! * V6"nh ĝ3QІ:#02Ne" zb`"h ԍ)ૐ'V:I$6E?P`O M>|ߞUߟ H 4CP 5 1dIrMZ,qፍY#rAN^1>)\ɂ})# ]sRCAIƞa+1 /:]oL$WE)pN{[K'40UTq,!SS.r9"INV $PE^0Ȯ̾นl,p3ʾ&!Pc &4Ċ!` Ԫx-ٓVWɺڝ C`"*&!2Þ@"  (OARu>8!~qp2srsO㲀$* & $/'A4's2)29 ;W-/k9:?E=F~)P!# L n 10u-b&L3& Z"Q?!IpcAO?|[0&"8Ċ亰Mo^ rݯm" |%Q^k _i1:<9I/ j\j 00P9<pXN@dm-0JN? ;N FN&-(%# ?PIMI1O9)1, APLGP6,$1PGOAOOPP)!, LPhkuD0CU$~=|h#96KٳhӪ]O?Wyʀ UH*YaŎz' * 3Ӄ.d jdCч#Lh*V\w L(#Yv"$+C5o1Qha/US:dͷ|(ᢂOP+#8B. :5PB\&4" ]UvP|4"DUOB(CCN8UAС 4(5YAXQ(_Ǟ 9nDe]w߱rİ .# wީ[jғĐK `b|Q]6U!SuR .B""4BD..eHY*+ PbLPžC紒RĔ;*_p$T)*<`ߙLp PyZiG!D(@g֢&袌6zYf饘fZh  M<jꩬ0!:B%*BBZjnꮼ^D *ɜf&P'ʺr(VkD ц+.3) @ ǖJL9& kᄃ.T*Ÿڠ8,9T <-w kV ',3+Qn&,sRA@4N<\#Ȼ2$<ѡP,n.f@HW] ᖮ;{D  V̄@p5430ذu6a7+v]-@+D @6wL}tIX'> セ2 r "FɣC  f. ⸰AEDѿ _L 'f60_ e0 p/1CX8O7_B A-SM]`f$(*+/>IA *о C.C2rѮj;uW ~|Rh B KC6Ad  ~z?+QZ2H%(fCx R@$H{B|@{1 t($@ĀG̞p"@Qzۻ `HLPEnBpVNaN ۼ&;@,30A'!HF: P Or(N]rP4_1BY0ЀA"Р:h@n:kc{ RNT1~LAwęgm(T-&.A d(PAh@3"P@0+'Y"Ff"88hBRD@F7я,Kn(rp2lr0D]T01 |@#$M1z6%Dr@ $ |#D"(PP-x l '`AVm xP2NQ&I6h+ N D@H*W蕯}UE(pd*b3NQPJ 7Lvn `b"R:@Q춷 "":*¹S@Rk 1v+>Kh @ةVu!;.$@0 X/AsRŹЍ~)Davp n!8A͍bCF524a< L@ku].V} t p*$"Nr &/_E햟"8^bEx$vL,\x9^Ď:A &@Ae0N8J"dlbjN?d䚢Бܰ0Y3Qg7t" CY+6PI MmWڧRL1y?n[N8"]RDui3[P{M µGA_ BD‚}" `!NI(Bvq#x ¨ťQ TT (GA!DNH (W|s8) :3=-^,&Ah@-#Zȥ . /n6Pc׋z ahf? .A*E uX:WdET;FKq4 p-z_ߞ=_;D$a ݯT?lĀ|شoO<{7uJtpp  ,W8 v3 "!FX {>FPO7((C'x@3q/3J0>: sJ 6# 0TXVxXZ [[`V3wфÄj ML)pr8tXvxx"$z؇~v8 >fHHm;lxSi.'X0TUC,+H%㉦*ђx2,Qx)#:㢋(&WG(.ĸqzΨ)ҌvB:+؍AjbB6*x@ΦEˆh*E2 0)(R )&ؐIA E86P$!IxP, )0H؋8Yp=ppD G=G @Aɔ䘍O ~OI(W ݘbIjp#@hg 㨖h_r tI 6mrٗ_i{Cy sG9bu{_锗I8A_VLY q mDG?s<ͣZ=ŋ=Y{&a.=1PVG1,9=':p:RE'K y77c902uú"7<%7|s1+Z(d1+ L#JBbJ*MkeqaISK"l-׼DG9@iQ!Tӆ*5.I1LN0wTtahCp3QV.JP/J`i69 ë2#C䫆5s3 gŽj-|#0BQ5tP"?@]]Ɛ@ '{AS"/ֲZj*\z3I22(2)m/qsǦ0Fz BaS!Sz&̆k``բ|Z$D 8 [(.(/uEg-qNPR700T 7̐,] n7/wth/@Il ]+r2QL- 0Bl"‡F,z/G;`u.hDFNРP @ ,R=/),elg-"0Q̊A`1ή*Iׇ*pӬ)I RKՐ?PMՆ=Srvʵ<ٕq5EP p-כ,Qu+&kRa5rl  >FŖڑaܡܤfNpb]\tQ0/q)LPX(B&H"v8 5ݤs0\kԍ!@U6&# >omKPrދa3ڸ<=1Pg G0nս6#}]$MnxEf˟ -ݖR+/|JE/㦠3cb;NM|D~hhR)]P}[!|Mqh& OPF;)L@6^ IUK隞< =)qa|4\7>KzNn{HUJ0<\|F`.dîw|8 !{&> 9Nଙ0!a,P5Eeǫw Yد0@F/bIc3A<+p3U׀$P x4@ ?%o!,7i"SSG'vSԟ_b@ 9Lm2P0Pa&UŽAjŽo1`2 Ӂ PI.O><) ! '$9$A$PO,PPP94ύ1N"Ӓ<MI!$4PPL0Q̇ &HAGtq(DR1j@=xM>#3T-Sb"'(BywADx2" <Ą= ZR) &?V*ڹ`ÊKVl:X @.®@%sE/= EoKB!"G(#zZ׉L;ͩDf.L62,BC%D uS&A} `$  ` LZ?!jRyiF+VFH8MQDF*"M旳 {hh@X dd,PioBq狗p|© ,4YC+"ApHh, ұ r(ĔM\PBmF}jE&8笳ֲO#rsMpK$_RW *M0"lc)pohG6dhTP2x-}vYD|J_Mx! 8@}Bԉ*SwEDIWKր\A术3Z\%%a&St3Ԗ辛>ɂux0o٥BBC@񹦮3B'%CO ="w? |c!M@E*[VcG ːl!IG?A9)tiV)7E4d:QqLQaL=rB& ) Y c;HB8A!) D*=…%sTlAƢtqe<>'p\ !iM@ 1!1.>m̳;cc#`E< ›4`&. FRdj?sEVG8 'H%#ThcV`4#IO*TO)#8"(ќ <fJPc9 }BLOYO57]D=-L% [|M xBh:~e7`;WٯvW5h˺?',EjWc&>9YKڎf\0)[ # B@gņ${!08^Pd+86v.@x Q*Eu`@n Ċ]`5$\C5nehn{kYGǐ@ͰTpFO`6L3@ k3o`X(`X \'  T_"h 1  y`u@pvk%n@c\  0Uv•oc#%`N@f;XsLe+?0%\Z@7Mb&Nz@ r;O1|y .zf k0`@` kl6v0{%v;?+}vq({oglݪuZ|@o|CꂿBM0Sʮ*\ /k#$@51_vNjis6偎`l": v9d<\uUXAr`EyW7 `9`Pb`QIk; 6X_w> Ȏ-5s'y'68o~ 6Aɏ`_'y{ ./}7 K@gksl#޿ 0;w{KtOEPYOw^qgXO`J6PvҷiM``0GvJ4}HbIDyq}Z$kw&~@rgxNPy=xY\TͅCa߷}zZJFIXOn\\DžYdOS UxVO &;#,@p-XbD B41P"+XGB ju'b #Ep v 6l9'S0 F(lYU.wtQ-h[rs\yx*+0"`~>H{r&Kv 57E0*x'Wc$007\~ }F?@c E5PO9 ;=s` y%/1! <P~uߤ/HY9,M3HA!<`aqrbawZPOx6ۀ_PKq#`?8 6~  \` x`w('C9sD(蚯 cB .yG/!za) PWxe-y Dv[dvMl)'9!T`(n>L"' P6I0%I6*pP$A &y`Q] UQV\ZUgD@$,)@>П>@%p49(*L6ɠl*G Q`i 0i@cmta!si;c ^I.b'HJZo5%6LzP$?L@<@Ky !R,H2D4`69`zH.)p!I@ QI"!C<@قImOްjN 3AFqP$`4j4JTyaDΊ9`)`A^^&r֊j44>Mᥬ3?}j sz;݁ך!.а>pLkC94vB{&F@O= #;LK۴AC±5VkUS\`k#phjl۶npr;t[v`:!~ D*۸;[{r1A<۹~K;[ ˴ۺ[`9ۻ[[5+wțۼJ +}b؛۽ [p軾[X;6R;ۿ&1|k< < \ |, <º+$\&|*<-0)4|c8;><DElGĜ6JYJP\Ň@V Z]I,`Ld g\=jLnlq+<1I6 \90>A@[9Hz $]'I.ZG˫ J.1'&Ӷj"*Iʟm4j9K(}9 :}sMЋ.Qmen#ҶJ /C҅$pK=)H+mUՋIYm؃^m`M5T;ͧMY`:*͂5;L5U6lI/+B,7i,טJH۴D`tU&qyJ;:ڥiJs'6pI=8)К 9P[?H04]ݽ 杞Me&:Ge))'M y"@ڠ4@"ߴ$?;k @g4ੋ!>$Ay(#<ց޽G@>2nV.sw{dX Q UqG(pξGpSjpr2nD @ l8U> ,@j RnI5@±m "\  濮A`=0Q=8lf]DqP>@`޿MP"= !? Q\޿O6Q0p}Ev^p񖠕$z n@.AF >;oۦ..* $Gз-=86JEIZ0MA `:r)aр)q!>"fS!"t|'nS0z60P@zq5TL/4U vO_C8' KNsZ4PQ4d'- d(3/W #P\~ ?@}'o273(#E΢mPoo#Rr>pl<-D1p٢ 9P ODI!LG9>)P L̙PPMM4Q.I͛8p ˞X$JPX*W ݌ :% YդVvBϤJH{r(C H$ s]z @`@$G\ܹ\hM)N1ڪ^ͺu-\o6ADA%$iӆBPp.]ljY~7ܻ ɥnB4䲧nOB.@$" &I @7 H2m z{)gYO<څf)5G wEV(Bq9^{8 wc#bM)8C8mA&hx  d$` M~N5b)]DiH,>YD@bdxK$4 !g(tt$-r\rI "O`~F:&|Y 8 tle'#!íx Ttq} 0rJZ44U`Ҟf-)xJ Ml.j!4B!-*Pl;/A۬TlNP#DDC¢ޓ5to!``ֱc!PbaYMc4|7E,OF͙ :cjM˵u-=0@7쯼P$+s<9F!??Lq/cD[{,Vpiq_=!M҈@ǐuwDu֬6a3"4tEPxa Mb.RLvCI:R~TTu->41,h"o!$/90@/D, //P`CG, v MD:ןp=St;wK $v}H "Ӥxx gYpx'Or`X< O`QR;I )؀d PKP sCU ,@TPE8!!!-ay|Z &6G28.ңa ',/b_\z*pt3asHűG2"0!&|OBp2HĆA9 & CbB'iRVrIgIKZ0xLp%vLxe>\ 5R/qLJ&9 7f貖ؔE,6lzINW$8EC@r39A(IY:$e=Ʉ,JGpUYR,BkF /@̞qN hf>pTH#Gr=)@T2i@TJ hVy2, N#6S$`1p>&:HOoB,jMpKU\d1sR"hOOlMh0ּ" 4(VNV:6O> bG>vLAe2F) 1K<@V k6YA@hW4btW 7 x U6IcRX:WCOo[&r Qn6R@x4 H82F/. f[ X"2BD`$`Af@;۝IB, fV@#.u fMk[:1wIlT5@n]nBg`1Te# n1h! Fl6g)Mu[/ڎ4xM6?池9ɨ9'E?cNԣNu O:ַnZs]5^fOLѮp/n;s^z{+//Ex3"Z<'xSnͫ"M@?y0}U˂g Ͻ(pwPޅ|/~w+_o{ԧ<Ǿ쯾c~7X~ͯGn 7g{h8 G7B'8yjOcׁw'8B }&m({072}8xG:{j1w4vqDzJxGLzBӄ7YQ7&-RV|\^{B1WYqЗ*&gFzG@A><>8HGׇX؈vL@6jpgGlxxr6qggF`wwъ1@qDϕfg׶Ɗ'k +AfL`JAgkq7`g"ijEaFXkCט8gohjfTI.phƶh6@lpXGvGS+OO{9tI*$@)sX@wBpv>`P`I4w7F#xO) 2ZuEz 8$x?6H 79!'.j~ǥ>qkh*n=1CO\[,$EQeO;'50 .d9!pTΡU >fH^\YQ>Ɲpgm"1zb3FS;"䅂m 06I<M8VtD ?pH:00:#)'iTj wD:J4ⅳQmRۊ aa Pu]"9L*/`BV" 9`>`W&QA7$tu 7 J!,K `*I(*Ti+/1?A'>A ,7`  O2 UB@[B6 l^&@~".q $gG$,3AR ò?m sKj$ #T|{!i;xt8 eq 'OA>>P>V >&,20A1'A1kUA 8r$M(|tV!br5A #a[\Q[YAnkE@ -1E9^:(a>MVW E&"bCBEA! ٛLB0 L2|K΁_q1Ma[dA xKDZ"л.,`O$dFE`8ġ"P#]59 3w"k* Cl 4R'J B@;1Ƈ` WlP 1@ m!*M.4a58TjBp4"j\W)eΛCCCfҸX?*A'A2۵DXٗ- MU>3TZDAASɍw=f] G`id>{GY3Dk;(;.fGИFlFvնH3qI8}|=a< TdEQ,DHz1P$EDD@HAߝjDlhnԻT[4$Q KLmqTzFA@g鈮ЉIA dJN'!P@L`.?G1J>-v.O^oNqvsu> s-szy~,.v2 -岘jnM)GNG;:t1,P ꋱt V: hei0'Pv٬!T48>3A̹s.3W4"?`lL`! hמ)$La"~X $JF:1`E1`_Y'i"6e y2) ;fSZ;e Fa \vDZPIFefCFe0cG@{S7_^ebe0F^7Mɓ d]Fc6`dadF6Ȏf2i?j?vh3bfdefj Gj$D;vknk?3`sGulQ̆:YVjf_ƍ;*lRotk6 r%g wr8 mn./_ٿOp8ror gC-rPPÛȍЕԿ۰ߩ h`@*woa=#Jq"3jDqCp$8&Sr.cTr6s,sg7> esh8F&-%ҦPu2i*իbդuׇO*:fM˖bضM.]ogw~뻗õ U+s!;|Jr[˔3WΠ1}6;:iϜO'*5"_aV$"dԾ>+4_ |K!q  7 u&3IO7ߠJ'hwBz1:BvD|M$("g@}hW"q)gv6th"!GwD}Ohۉ0*Pmp\u0vBq.q9#|sH'dZMp D)TViXJ9^\ve`CWewK0&)tix)y&mU빗&VI`3,ĤVj饘f馜v)K$aM0qAŨDš 1XB'l@ºA@6ޠ4ל M<@BX؀,LL , @!P<2=k`CC ]x >hM>ؐWÊ[C2m,7DBH"DOD؍4B MԋsjrCD'D@J 4 {?A' `$!GG 0|˗<L)@'IOp :lDt BfR!Pt"x^nPJa A&h` gI A;H V`N EBpzM9 Co8@A P) gdc XRZD' -P4`!d T@t͈. xP| ЩX̔#dDB(IS؁&7VJn&'UL `~Sd#<'@Yp@L4)LjJ\irS?ly!`̦$N'(j4bhqJΣՀydS֬^``sRJ@k\[ L)L@*! ЇN2ԡ,@;Pq(!4U:A " )D`IQ dh#(0jԎ ^u"a@96C500Z / >#CT:TpɦQN YW@ 5 ~BQ0թb Y}\ l8E˻N%,T:GQpB 8ac٩ ?5c(e=P'-*ZI8;e^JAv I  xA8~򕟘A11/$e A1tRBjSOte9 -w^iOB ҃ ASC!!H Iͯ~( S0G1 ,%E9-x4>4/.#61pwWq ijHRy 3(wrzW6@!( #Bw3 M4'P'9( >F8789()p(MP!`}3sO(QxY$G ǂ-lxF @Y!MH$w#"Hh@G "A>)p}P@u88Γa#90P2-!GPP&)!`9`V=}3iX@卉",~r8/M/Y8p(7#DP0xw@AzgR@HI Oo(X)$) M`w PЍđTX7xhay@&P(YuWuI B9zU W9)[) G@O`94,p'p7U9zU(kG C|9"r]IBصF))7_2C#]哘6> }=S*Ћ>`w(P$16 Ircyu,`rvבyY )lD\EIW^[E[)]Ɋ IEOpLw@GD7 j#J 9@ I)M)tOZ)D⣟#頩!֙g)S)mJ z);E@/.JL0/`Lz :`h(t"ERdqE)H(0f{jVUC@N'5a5d3J)C#Pg;0E):`|j]BCJ 9`0$Pt!~'SkE"0/lV5UN*X%@GDMq%UTN[.D\kIG@230竻NZ2o`2rxz6WPņ)2)8#3Je^fZ䯓Nn*[B[\AjltjtSOc$N@N@FY <(IAg讍ٖfEoNp^:NFd:_7)=RC^uG\F0EB {{"?A{P)y)P!e:4'벊\j)fJGv@v9IJz+D+e.5be8 (5)PN}JY)P`@E?PAY >UOydtK)vJ:)8`WEBOV&@)M_C >@yL@p@ @y)oeyg|+JE+G[tZI J)2`˸m[)& yǠDHգ=!6 )@"Zu3mIZ)%C-@I]Jfĸ!Cc;L)A;)x L AmD7`DuSWu2 UjG3`uNp<#*uTXs{/PKc#/a:& 27Z)*@.jC &3$7.Y 72PixC5k'.+A$2-rpzS6)@>| wm/z۲ -9]Sbw.'.Ik4@F64!c-'-2]2av .!- #) ^Ԯ 4G0-4`H-a/ q0I2w.F={ 0a'9 |6FiMت@r6؆CDgz)`*u}ڨڪڬڮڰۨ]RF٠ O_=]}ȝʽѹٻ=Ǡ} ؽݺ`݋=A}/ =M}߻-v>\Ի ^/zU j8$~z&~*c,0T2>T6.: ]9N-СFnt PN0UĜ"!^<=c;byM$l~cquyIDx%>脾%~n%mTQjRR[Rn#'^'% ,>.0Pه)%YJ@/ꇹp.+ BC@>)m b>1|&2ADΎMD1!>^KEᎌ. {c$`Ȃ(|/{3 tBVB'DB#CB"T){:2+B8 s8כ,HnJPQ@SK7jNƑbdddv^d} 2_){G_ @:_)5&*1Req[)tdJCPR͞tECK Rft8CU?]j["¿sSMJ־MZjNN0OpC83vO34wF褷BZ- ZFQg5S:Sܶ,ҧw/ R+VBgD%0*I]UuUYURYMeX?CItd BNp@{^A(P/@^H 1Z0 b)LHZ ]0eR'BNJQ4]a _Cxt*J &Y' ^&Rh GpaJРp( f xvOh@4\r!1I Ѐ'gkB`OvmbqL=c ظPf %[9 5 kN%a3hLf@h@:xγ>π l'MJ[Ҙδ7NK&1MRԨNWVհgMZָG ;docker-1.10.3/docs/reference/commandline/events.md000066400000000000000000000201301267010174400220430ustar00rootroot00000000000000 # events Usage: docker events [OPTIONS] Get real time events from the server -f, --filter=[] Filter output based on conditions provided --help Print usage --since="" Show all events created since timestamp --until="" Stream events until this timestamp Docker containers report the following events: attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update Docker images report the following events: delete, import, pull, push, tag, untag Docker volumes report the following events: create, mount, unmount, destroy Docker networks report the following events: create, connect, disconnect, destroy The `--since` and `--until` parameters can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the client machine’s time. If you do not provide the --since option, the command returns only new and/or live events. Supported formats for date formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, `2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local timezone on the client will be used if you do not provide either a `Z` or a `+-00:00` timezone offset at the end of the timestamp. When providing Unix timestamps enter seconds[.nanoseconds], where seconds is the number of seconds that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a fraction of a second no more than nine digits long. ## Filtering The filtering flag (`-f` or `--filter`) format is of "key=value". If you would like to use multiple filters, pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) Using the same filter multiple times will be handled as a *OR*; for example `--filter container=588a23dac085 --filter container=a8f7720b8c22` will display events for container 588a23dac085 *OR* container a8f7720b8c22 Using multiple filters will be handled as a *AND*; for example `--filter container=588a23dac085 --filter event=start` will display events for container container 588a23dac085 *AND* the event type is *start* The currently supported filters are: * container (`container=`) * event (`event=`) * image (`image=`) * label (`label=` or `label==`) * type (`type=`) * volume (`volume=`) * network (`network=`) ## Examples You'll need two shells for this example. **Shell 1: Listening for events:** $ docker events **Shell 2: Start and Stop containers:** $ docker start 4386fb97867d $ docker stop 4386fb97867d $ docker stop 7805c1d35632 **Shell 1: (Again .. now showing events):** 2015-05-12T11:51:30.999999999Z07:00 container start 4386fb97867d (image=ubuntu-1:14.04) 2015-05-12T11:51:30.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) 2015-05-12T15:52:12.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) 2015-05-12T15:53:45.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) 2015-05-12T15:54:03.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) **Show events in the past from a specified time:** $ docker events --since 1378216169 2015-05-12T11:51:30.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) 2015-05-12T15:52:12.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) 2015-05-12T15:53:45.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) 2015-05-12T15:54:03.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) $ docker events --since '2013-09-03' 2015-05-12T11:51:30.999999999Z07:00 container start 4386fb97867d (image=ubuntu-1:14.04) 2015-05-12T11:51:30.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) 2015-05-12T15:52:12.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) 2015-05-12T15:53:45.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) 2015-05-12T15:54:03.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) $ docker events --since '2013-09-03T15:49:29' 2015-05-12T11:51:30.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) 2015-05-12T15:52:12.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) 2015-05-12T15:53:45.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) 2015-05-12T15:54:03.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) This example outputs all events that were generated in the last 3 minutes, relative to the current time on the client machine: $ docker events --since '3m' 2015-05-12T11:51:30.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) 2015-05-12T15:52:12.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) 2015-05-12T15:53:45.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) 2015-05-12T15:54:03.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) **Filter events:** $ docker events --filter 'event=stop' 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) 2014-09-03T17:42:14.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) $ docker events --filter 'image=ubuntu-1:14.04' 2014-05-10T17:42:14.999999999Z07:00 container start 4386fb97867d (image=ubuntu-1:14.04) 2014-05-10T17:42:14.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) $ docker events --filter 'container=7805c1d35632' 2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image= redis:2.8) $ docker events --filter 'container=7805c1d35632' --filter 'container=4386fb97867d' 2014-09-03T15:49:29.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) 2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) $ docker events --filter 'container=7805c1d35632' --filter 'event=stop' 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) $ docker events --filter 'container=container_1' --filter 'container=container_2' 2014-09-03T15:49:29.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) 2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (imager=redis:2.8) 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) $ docker events --filter 'type=volume' 2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local) 2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, destination=/foo, driver=local, propagation=rprivate) 2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, driver=local) 2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local) $ docker events --filter 'type=network' 2015-12-23T21:38:24.705709133Z network create 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, type=bridge) 2015-12-23T21:38:25.119625123Z network connect 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, container=b4be644031a3d90b400f88ab3d4bdf4dc23adb250e696b6328b85441abe2c54e, type=bridge) docker-1.10.3/docs/reference/commandline/exec.md000066400000000000000000000037451267010174400215000ustar00rootroot00000000000000 # exec Usage: docker exec [OPTIONS] CONTAINER COMMAND [ARG...] Run a command in a running container -d, --detach Detached mode: run command in the background --detach-keys Specify the escape key sequence used to detach a container --help Print usage -i, --interactive Keep STDIN open even if not attached --privileged Give extended Linux capabilities to the command -t, --tty Allocate a pseudo-TTY -u, --user= Username or UID (format: [:]) The `docker exec` command runs a new command in a running container. The command started using `docker exec` only runs while the container's primary process (`PID 1`) is running, and it is not restarted if the container is restarted. If the container is paused, then the `docker exec` command will fail with an error: $ docker pause test test $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 1ae3b36715d2 ubuntu:latest "bash" 17 seconds ago Up 16 seconds (Paused) test $ docker exec test ls FATA[0000] Error response from daemon: Container test is paused, unpause the container before exec $ echo $? 1 ## Examples $ docker run --name ubuntu_bash --rm -i -t ubuntu bash This will create a container named `ubuntu_bash` and start a Bash session. $ docker exec -d ubuntu_bash touch /tmp/execWorks This will create a new file `/tmp/execWorks` inside the running container `ubuntu_bash`, in the background. $ docker exec -it ubuntu_bash bash This will create a new Bash session in the container `ubuntu_bash`. docker-1.10.3/docs/reference/commandline/export.md000066400000000000000000000017621267010174400220720ustar00rootroot00000000000000 # export Usage: docker export [OPTIONS] CONTAINER Export the contents of a container's filesystem as a tar archive --help Print usage -o, --output="" Write to a file, instead of STDOUT The `docker export` command does not export the contents of volumes associated with the container. If a volume is mounted on top of an existing directory in the container, `docker export` will export the contents of the *underlying* directory, not the contents of the volume. Refer to [Backup, restore, or migrate data volumes](../../userguide/containers/dockervolumes.md#backup-restore-or-migrate-data-volumes) in the user guide for examples on exporting data in a volume. ## Examples $ docker export red_panda > latest.tar Or $ docker export --output="latest.tar" red_panda docker-1.10.3/docs/reference/commandline/history.md000066400000000000000000000037311267010174400222500ustar00rootroot00000000000000 # history Usage: docker history [OPTIONS] IMAGE Show the history of an image -H, --human=true Print sizes and dates in human readable format --help Print usage --no-trunc Don't truncate output -q, --quiet Only show numeric IDs To see how the `docker:latest` image was built: $ docker history docker IMAGE CREATED CREATED BY SIZE COMMENT 3e23a5875458 8 days ago /bin/sh -c #(nop) ENV LC_ALL=C.UTF-8 0 B 8578938dd170 8 days ago /bin/sh -c dpkg-reconfigure locales && loc 1.245 MB be51b77efb42 8 days ago /bin/sh -c apt-get update && apt-get install 338.3 MB 4b137612be55 6 weeks ago /bin/sh -c #(nop) ADD jessie.tar.xz in / 121 MB 750d58736b4b 6 weeks ago /bin/sh -c #(nop) MAINTAINER Tianon Gravi +++ title = "images" description = "The images command description and usage" keywords = ["list, docker, images"] [menu.main] parent = "smn_cli" +++ # images Usage: docker images [OPTIONS] [REPOSITORY[:TAG]] List images -a, --all Show all images (default hides intermediate images) --digests Show digests -f, --filter=[] Filter output based on conditions provided --help Print usage --no-trunc Don't truncate output -q, --quiet Only show numeric IDs The default `docker images` will show all top level images, their repository and tags, and their size. Docker images have intermediate layers that increase reusability, decrease disk usage, and speed up `docker build` by allowing each step to be cached. These intermediate layers are not shown by default. The `SIZE` is the cumulative space taken up by the image and all its parent images. This is also the disk space used by the contents of the Tar file created when you `docker save` an image. An image will be listed more than once if it has multiple repository names or tags. This single image (identifiable by its matching `IMAGE ID`) uses up the `SIZE` listed only once. ### Listing the most recently created images $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE 77af4d6b9913 19 hours ago 1.089 GB committ latest b6fa739cedf5 19 hours ago 1.089 GB 78a85c484f71 19 hours ago 1.089 GB docker latest 30557a29d5ab 20 hours ago 1.089 GB 5ed6274db6ce 24 hours ago 1.089 GB postgres 9 746b819f315e 4 days ago 213.4 MB postgres 9.3 746b819f315e 4 days ago 213.4 MB postgres 9.3.5 746b819f315e 4 days ago 213.4 MB postgres latest 746b819f315e 4 days ago 213.4 MB ### Listing images by name and tag The `docker images` command takes an optional `[REPOSITORY[:TAG]]` argument that restricts the list to images that match the argument. If you specify `REPOSITORY`but no `TAG`, the `docker images` command lists all images in the given repository. For example, to list all images in the "java" repository, run this command : $ docker images java REPOSITORY TAG IMAGE ID CREATED SIZE java 8 308e519aac60 6 days ago 824.5 MB java 7 493d82594c15 3 months ago 656.3 MB java latest 2711b1d6f3aa 5 months ago 603.9 MB The `[REPOSITORY[:TAG]]` value must be an "exact match". This means that, for example, `docker images jav` does not match the image `java`. If both `REPOSITORY` and `TAG` are provided, only images matching that repository and tag are listed. To find all local images in the "java" repository with tag "8" you can use: $ docker images java:8 REPOSITORY TAG IMAGE ID CREATED SIZE java 8 308e519aac60 6 days ago 824.5 MB If nothing matches `REPOSITORY[:TAG]`, the list is empty. $ docker images java:0 REPOSITORY TAG IMAGE ID CREATED SIZE ## Listing the full length image IDs $ docker images --no-trunc REPOSITORY TAG IMAGE ID CREATED SIZE 77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182 19 hours ago 1.089 GB committest latest b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f 19 hours ago 1.089 GB 78a85c484f71509adeaace20e72e941f6bdd2b25b4c75da8693efd9f61a37921 19 hours ago 1.089 GB docker latest 30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4 20 hours ago 1.089 GB 0124422dd9f9cf7ef15c0617cda3931ee68346455441d66ab8bdc5b05e9fdce5 20 hours ago 1.089 GB 18ad6fad340262ac2a636efd98a6d1f0ea775ae3d45240d3418466495a19a81b 22 hours ago 1.082 GB f9f1e26352f0a3ba6a0ff68167559f64f3e21ff7ada60366e2d44a04befd1d3a 23 hours ago 1.089 GB tryout latest 2629d1fa0b81b222fca63371ca16cbf6a0772d07759ff80e8d1369b926940074 23 hours ago 131.5 MB 5ed6274db6ceb2397844896966ea239290555e74ef307030ebb01ff91b1914df 24 hours ago 1.089 GB ## Listing image digests Images that use the v2 or later format have a content-addressable identifier called a `digest`. As long as the input used to generate the image is unchanged, the digest value is predictable. To list image digest values, use the `--digests` flag: $ docker images --digests REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE localhost:5000/test/busybox sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 4986bf8c1536 9 weeks ago 2.43 MB When pushing or pulling to a 2.0 registry, the `push` or `pull` command output includes the image digest. You can `pull` using a digest value. You can also reference by digest in `create`, `run`, and `rmi` commands, as well as the `FROM` image reference in a Dockerfile. ## Filtering The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) The currently supported filters are: * dangling (boolean - true or false) * label (`label=` or `label==`) ##### Untagged images (dangling) $ docker images --filter "dangling=true" REPOSITORY TAG IMAGE ID CREATED SIZE 8abc22fbb042 4 weeks ago 0 B 48e5f45168b9 4 weeks ago 2.489 MB bf747efa0e2f 4 weeks ago 0 B 980fe10e5736 12 weeks ago 101.4 MB dea752e4e117 12 weeks ago 101.4 MB 511136ea3c5a 8 months ago 0 B This will display untagged images, that are the leaves of the images tree (not intermediary layers). These images occur when a new build of an image takes the `repo:tag` away from the image ID, leaving it as `:` or untagged. A warning will be issued if trying to remove an image when a container is presently using it. By having this flag it allows for batch cleanup. Ready for use by `docker rmi ...`, like: $ docker rmi $(docker images -f "dangling=true" -q) 8abc22fbb042 48e5f45168b9 bf747efa0e2f 980fe10e5736 dea752e4e117 511136ea3c5a NOTE: Docker will warn you if any containers exist that are using these untagged images. ##### Labeled images The `label` filter matches images based on the presence of a `label` alone or a `label` and a value. The following filter matches images with the `com.example.version` label regardless of its value. $ docker images --filter "label=com.example.version" REPOSITORY TAG IMAGE ID CREATED SIZE match-me-1 latest eeae25ada2aa About a minute ago 188.3 MB match-me-2 latest eeae25ada2aa About a minute ago 188.3 MB The following filter matches images with the `com.example.version` label with the `1.0` value. $ docker images --filter "label=com.example.version=1.0" REPOSITORY TAG IMAGE ID CREATED SIZE match-me latest eeae25ada2aa About a minute ago 188.3 MB In this example, with the `0.1` value, it returns an empty set because no matches were found. $ docker images --filter "label=com.example.version=0.1" REPOSITORY TAG IMAGE ID CREATED SIZE ## Formatting The formatting option (`--format`) will pretty print container output using a Go template. Valid placeholders for the Go template are listed below: Placeholder | Description ---- | ---- `.ID` | Image ID `.Repository` | Image repository `.Tag` | Image tag `.Digest` | Image digest `.CreatedSince` | Elapsed time since the image was created. `.CreatedAt` | Time when the image was created. `.Size` | Image disk size. When using the `--format` option, the `image` command will either output the data exactly as the template declares or, when using the `table` directive, will include column headers as well. The following example uses a template without headers and outputs the `ID` and `Repository` entries separated by a colon for all images: $ docker images --format "{{.ID}}: {{.Repository}}" 77af4d6b9913: b6fa739cedf5: committ 78a85c484f71: 30557a29d5ab: docker 5ed6274db6ce: 746b819f315e: postgres 746b819f315e: postgres 746b819f315e: postgres 746b819f315e: postgres To list all images with their repository and tag in a table format you can use: $ docker images --format "table {{.ID}}\t{{.Repository}}\t{{.Tag}}" IMAGE ID REPOSITORY TAG 77af4d6b9913 b6fa739cedf5 committ latest 78a85c484f71 30557a29d5ab docker latest 5ed6274db6ce 746b819f315e postgres 9 746b819f315e postgres 9.3 746b819f315e postgres 9.3.5 746b819f315e postgres latest docker-1.10.3/docs/reference/commandline/import.md000066400000000000000000000043751267010174400220660ustar00rootroot00000000000000 # import Usage: docker import file|URL|- [REPOSITORY[:TAG]] Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it. -c, --change=[] Apply specified Dockerfile instructions while importing the image --help Print usage -m, --message= Set commit message for imported image You can specify a `URL` or `-` (dash) to take data directly from `STDIN`. The `URL` can point to an archive (.tar, .tar.gz, .tgz, .bzip, .tar.xz, or .txz) containing a filesystem or to an individual file on the Docker host. If you specify an archive, Docker untars it in the container relative to the `/` (root). If you specify an individual file, you must specify the full path within the host. To import from a remote location, specify a `URI` that begins with the `http://` or `https://` protocol. The `--change` option will apply `Dockerfile` instructions to the image that is created. Supported `Dockerfile` instructions: `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` ## Examples **Import from a remote location:** This will create a new untagged image. $ docker import http://example.com/exampleimage.tgz **Import from a local file:** Import to docker via pipe and `STDIN`. $ cat exampleimage.tgz | docker import - exampleimagelocal:new Import with a commit message $ cat exampleimage.tgz | docker import --message "New image imported from tarball" - exampleimagelocal:new Import to docker from a local archive. $ docker import /path/to/exampleimage.tgz **Import from a local directory:** $ sudo tar -c . | docker import - exampleimagedir **Import from a local directory with new configurations:** $ sudo tar -c . | docker import --change "ENV DEBUG true" - exampleimagedir Note the `sudo` in this example – you must preserve the ownership of the files (especially root ownership) during the archiving with tar. If you are not root (or the sudo command) when you tar, then the ownerships might not get preserved. docker-1.10.3/docs/reference/commandline/index.md000066400000000000000000000040701267010174400216530ustar00rootroot00000000000000 # The Docker commands This section contains reference information on using Docker's command line client. Each command has a reference page along with samples. If you are unfamiliar with the command line, you should start by reading about how to [Use the Docker command line](cli.md). You start the Docker daemon with the command line. How you start the daemon affects your Docker containers. For that reason you should also make sure to read the [`daemon`](daemon.md) reference page. ### Docker management commands * [daemon](daemon.md) * [info](info.md) * [inspect](inspect.md) * [version](version.md) ### Image commands * [build](build.md) * [commit](commit.md) * [export](export.md) * [history](history.md) * [images](images.md) * [import](import.md) * [load](load.md) * [rmi](rmi.md) * [save](save.md) * [tag](tag.md) ### Container commands * [attach](attach.md) * [cp](cp.md) * [create](create.md) * [diff](diff.md) * [events](events.md) * [exec](exec.md) * [kill](kill.md) * [logs](logs.md) * [pause](pause.md) * [port](port.md) * [ps](ps.md) * [rename](rename.md) * [restart](restart.md) * [rm](rm.md) * [run](run.md) * [start](start.md) * [stats](stats.md) * [stop](stop.md) * [top](top.md) * [unpause](unpause.md) * [update](update.md) * [wait](wait.md) ### Hub and registry commands * [login](login.md) * [logout](logout.md) * [pull](pull.md) * [push](push.md) * [search](search.md) ### Network and connectivity commands * [network_connect](network_connect.md) * [network_create](network_create.md) * [network_disconnect](network_disconnect.md) * [network_inspect](network_inspect.md) * [network_ls](network_ls.md) * [network_rm](network_rm.md) ### Shared data volume commands * [volume_create](volume_create.md) * [volume_inspect](volume_inspect.md) * [volume_ls](volume_ls.md) * [volume_rm](volume_rm.md) docker-1.10.3/docs/reference/commandline/info.md000066400000000000000000000031501267010174400214750ustar00rootroot00000000000000 # info Usage: docker info [OPTIONS] Display system-wide information --help Print usage For example: $ docker -D info Containers: 14 Running: 3 Paused: 1 Stopped: 10 Images: 52 Server Version: 1.9.0 Storage Driver: aufs Root Dir: /var/lib/docker/aufs Backing Filesystem: extfs Dirs: 545 Dirperm1 Supported: true Execution Driver: native-0.2 Logging Driver: json-file Plugins: Volume: local Network: bridge null host Kernel Version: 3.19.0-22-generic OSType: linux Architecture: x86_64 Operating System: Ubuntu 15.04 CPUs: 24 Total Memory: 62.86 GiB Name: docker ID: I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S Debug mode (server): true File Descriptors: 59 Goroutines: 159 System Time: 2015-09-23T14:04:20.699842089+08:00 EventsListeners: 0 Init SHA1: Init Path: /usr/bin/docker Docker Root Dir: /var/lib/docker Http Proxy: http://test:test@localhost:8080 Https Proxy: https://test:test@localhost:8080 WARNING: No swap limit support Username: svendowideit Registry: [https://index.docker.io/v1/] Labels: storage=ssd The global `-D` option tells all `docker` commands to output debug information. When sending issue reports, please use `docker version` and `docker -D info` to ensure we know how your setup is configured. docker-1.10.3/docs/reference/commandline/inspect.md000066400000000000000000000053351267010174400222160ustar00rootroot00000000000000 # inspect Usage: docker inspect [OPTIONS] CONTAINER|IMAGE [CONTAINER|IMAGE...] Return low-level information on a container or image -f, --format="" Format the output using the given go template --help Print usage --type=container|image Return JSON for specified type, permissible values are "image" or "container" -s, --size Display total file sizes if the type is container By default, this will render all results in a JSON array. If the container and image have the same name, this will return container JSON for unspecified type. If a format is specified, the given template will be executed for each result. Go's [text/template](http://golang.org/pkg/text/template/) package describes all the details of the format. ## Examples **Get an instance's IP address:** For the most part, you can pick out any field from the JSON in a fairly straightforward manner. $ docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $INSTANCE_ID **Get an instance's MAC Address:** For the most part, you can pick out any field from the JSON in a fairly straightforward manner. $ docker inspect --format='{{range .NetworkSettings.Networks}}{{.MacAddress}}{{end}}' $INSTANCE_ID **Get an instance's log path:** $ docker inspect --format='{{.LogPath}}' $INSTANCE_ID **List All Port Bindings:** One can loop over arrays and maps in the results to produce simple text output: $ docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $INSTANCE_ID **Find a Specific Port Mapping:** The `.Field` syntax doesn't work when the field name begins with a number, but the template language's `index` function does. The `.NetworkSettings.Ports` section contains a map of the internal port mappings to a list of external address/port objects. To grab just the numeric public port, you use `index` to find the specific port map, and then `index` 0 contains the first object inside of that. Then we ask for the `HostPort` field to get the public address. $ docker inspect --format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID **Get a subsection in JSON format:** If you request a field which is itself a structure containing other fields, by default you get a Go-style dump of the inner values. Docker adds a template function, `json`, which can be applied to get results in JSON format. $ docker inspect --format='{{json .Config}}' $INSTANCE_ID docker-1.10.3/docs/reference/commandline/kill.md000066400000000000000000000013641267010174400215020ustar00rootroot00000000000000 # kill Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...] Kill a running container using SIGKILL or a specified signal --help Print usage -s, --signal="KILL" Signal to send to the container The main process inside the container will be sent `SIGKILL`, or any signal specified with option `--signal`. > **Note:** > `ENTRYPOINT` and `CMD` in the *shell* form run as a subcommand of `/bin/sh -c`, > which does not pass signals. This means that the executable is not the container’s PID 1 > and does not receive Unix signals. docker-1.10.3/docs/reference/commandline/load.md000066400000000000000000000027271267010174400214720ustar00rootroot00000000000000 # load Usage: docker load [OPTIONS] Load an image from a tar archive or STDIN --help Print usage -i, --input="" Read from a tar archive file, instead of STDIN. The tarball may be compressed with gzip, bzip, or xz Loads a tarred repository from a file or the standard input stream. Restores both images and tags. $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE $ docker load < busybox.tar.gz $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE busybox latest 769b9341d937 7 weeks ago 2.489 MB $ docker load --input fedora.tar $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE busybox latest 769b9341d937 7 weeks ago 2.489 MB fedora rawhide 0d20aec6529d 7 weeks ago 387 MB fedora 20 58394af37342 7 weeks ago 385.5 MB fedora heisenbug 58394af37342 7 weeks ago 385.5 MB fedora latest 58394af37342 7 weeks ago 385.5 MB docker-1.10.3/docs/reference/commandline/login.md000066400000000000000000000025741267010174400216630ustar00rootroot00000000000000 # login Usage: docker login [OPTIONS] [SERVER] Register or log in to a Docker registry server, if no server is specified "https://index.docker.io/v1/" is the default. -e, --email="" Email --help Print usage -p, --password="" Password -u, --username="" Username If you want to login to a self-hosted registry you can specify this by adding the server name. example: $ docker login localhost:8080 `docker login` requires user to use `sudo` or be `root`, except when: 1. connecting to a remote daemon, such as a `docker-machine` provisioned `docker engine`. 2. user is added to the `docker` group. This will impact the security of your system; the `docker` group is `root` equivalent. See [Docker Daemon Attack Surface](https://docs.docker.com/security/security/#docker-daemon-attack-surface) for details. You can log into any public or private repository for which you have credentials. When you log in, the command stores encoded credentials in `$HOME/.docker/config.json` on Linux or `%USERPROFILE%/.docker/config.json` on Windows. > **Note**: When running `sudo docker login` credentials are saved in `/root/.docker/config.json`. > docker-1.10.3/docs/reference/commandline/logout.md000066400000000000000000000006551267010174400220620ustar00rootroot00000000000000 # logout Usage: docker logout [SERVER] Log out from a Docker registry, if no server is specified "https://index.docker.io/v1/" is the default. --help Print usage For example: $ docker logout localhost:8080 docker-1.10.3/docs/reference/commandline/logs.md000066400000000000000000000041741267010174400215150ustar00rootroot00000000000000 # logs Usage: docker logs [OPTIONS] CONTAINER Fetch the logs of a container -f, --follow Follow log output --help Print usage --since="" Show logs since timestamp -t, --timestamps Show timestamps --tail="all" Number of lines to show from the end of the logs > **Note**: this command is available only for containers with `json-file` and > `journald` logging drivers. The `docker logs` command batch-retrieves logs present at the time of execution. The `docker logs --follow` command will continue streaming the new output from the container's `STDOUT` and `STDERR`. Passing a negative number or a non-integer to `--tail` is invalid and the value is set to `all` in that case. The `docker logs --timestamps` command will add an [RFC3339Nano timestamp](https://golang.org/pkg/time/#pkg-constants) , for example `2014-09-16T06:17:46.000000000Z`, to each log entry. To ensure that the timestamps are aligned the nano-second part of the timestamp will be padded with zero when necessary. The `--since` option shows only the container logs generated after a given date. You can specify the date as an RFC 3339 date, a UNIX timestamp, or a Go duration string (e.g. `1m30s`, `3h`). Besides RFC3339 date format you may also use RFC3339Nano, `2006-01-02T15:04:05`, `2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local timezone on the client will be used if you do not provide either a `Z` or a `+-00:00` timezone offset at the end of the timestamp. When providing Unix timestamps enter seconds[.nanoseconds], where seconds is the number of seconds that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a fraction of a second no more than nine digits long. You can combine the `--since` option with either or both of the `--follow` or `--tail` options. docker-1.10.3/docs/reference/commandline/network_connect.md000066400000000000000000000065451267010174400237570ustar00rootroot00000000000000 # network connect Usage: docker network connect [OPTIONS] NETWORK CONTAINER Connects a container to a network --alias=[] Add network-scoped alias for the container --help Print usage --ip IPv4 Address --ip6 IPv6 Address --link=[] Add a link to another container Connects a container to a network. You can connect a container by name or by ID. Once connected, the container can communicate with other containers in the same network. ```bash $ docker network connect multi-host-network container1 ``` You can also use the `docker run --net=` option to start a container and immediately connect it to a network. ```bash $ docker run -itd --net=multi-host-network busybox ``` You can specify the IP address you want to be assigned to the container's interface. ```bash $ docker network connect --ip 10.10.36.122 multi-host-network container2 ``` You can use `--link` option to link another container with a prefered alias ```bash $ docker network connect --link container1:c1 multi-host-network container2 ``` `--alias` option can be used to resolve the container by another name in the network being connected to. ```bash $ docker network connect --alias db --alias mysql multi-host-network container2 ``` You can pause, restart, and stop containers that are connected to a network. Paused containers remain connected and can be revealed by a `network inspect`. When the container is stopped, it does not appear on the network until you restart it. If specified, the container's IP address(es) is reapplied when a stopped container is restarted. If the IP address is no longer available, the container fails to start. One way to guarantee that the IP address is available is to specify an `--ip-range` when creating the network, and choose the static IP address(es) from outside that range. This ensures that the IP address is not given to another container while this container is not on the network. ```bash $ docker network create --subnet 172.20.0.0/16 --ip-range 172.20.240.0/20 multi-host-network ``` ```bash $ docker network connect --ip 172.20.128.2 multi-host-network container2 ``` To verify the container is connected, use the `docker network inspect` command. Use `docker network disconnect` to remove a container from the network. Once connected in network, containers can communicate using only another container's IP address or name. For `overlay` networks or custom plugins that support multi-host connectivity, containers connected to the same multi-host network but launched from different Engines can also communicate in this way. You can connect a container to one or more networks. The networks need not be the same type. For example, you can connect a single container bridge and overlay networks. ## Related information * [network inspect](network_inspect.md) * [network create](network_create.md) * [network disconnect](network_disconnect.md) * [network ls](network_ls.md) * [network rm](network_rm.md) * [Understand Docker container networks](../../userguide/networking/dockernetworks.md) * [Work with networks](../../userguide/networking/work-with-networks.md) docker-1.10.3/docs/reference/commandline/network_create.md000066400000000000000000000163451267010174400235700ustar00rootroot00000000000000 # network create Usage: docker network create [OPTIONS] NETWORK-NAME Creates a new network with a name specified by the user --aux-address=map[] Auxiliary ipv4 or ipv6 addresses used by network driver -d --driver=DRIVER Driver to manage the Network bridge or overlay. The default is bridge. --gateway=[] ipv4 or ipv6 Gateway for the master subnet --help Print usage --internal Restricts external access to the network --ip-range=[] Allocate container ip from a sub-range --ipam-driver=default IP Address Management Driver --ipam-opt=map[] Set custom IPAM driver specific options -o --opt=map[] Set custom driver specific options --subnet=[] Subnet in CIDR format that represents a network segment Creates a new network. The `DRIVER` accepts `bridge` or `overlay` which are the built-in network drivers. If you have installed a third party or your own custom network driver you can specify that `DRIVER` here also. If you don't specify the `--driver` option, the command automatically creates a `bridge` network for you. When you install Docker Engine it creates a `bridge` network automatically. This network corresponds to the `docker0` bridge that Engine has traditionally relied on. When launch a new container with `docker run` it automatically connects to this bridge network. You cannot remove this default bridge network but you can create new ones using the `network create` command. ```bash $ docker network create -d bridge my-bridge-network ``` Bridge networks are isolated networks on a single Engine installation. If you want to create a network that spans multiple Docker hosts each running an Engine, you must create an `overlay` network. Unlike `bridge` networks overlay networks require some pre-existing conditions before you can create one. These conditions are: * Access to a key-value store. Engine supports Consul, Etcd, and ZooKeeper (Distributed store) key-value stores. * A cluster of hosts with connectivity to the key-value store. * A properly configured Engine `daemon` on each host in the cluster. The `docker daemon` options that support the `overlay` network are: * `--cluster-store` * `--cluster-store-opt` * `--cluster-advertise` To read more about these options and how to configure them, see ["*Get started with multi-host network*"](../../userguide/networking/get-started-overlay.md). It is also a good idea, though not required, that you install Docker Swarm on to manage the cluster that makes up your network. Swarm provides sophisticated discovery and server management that can assist your implementation. Once you have prepared the `overlay` network prerequisites you simply choose a Docker host in the cluster and issue the following to create the network: ```bash $ docker network create -d overlay my-multihost-network ``` Network names must be unique. The Docker daemon attempts to identify naming conflicts but this is not guaranteed. It is the user's responsibility to avoid name conflicts. ## Connect containers When you start a container use the `--net` flag to connect it to a network. This adds the `busybox` container to the `mynet` network. ```bash $ docker run -itd --net=mynet busybox ``` If you want to add a container to a network after the container is already running use the `docker network connect` subcommand. You can connect multiple containers to the same network. Once connected, the containers can communicate using only another container's IP address or name. For `overlay` networks or custom plugins that support multi-host connectivity, containers connected to the same multi-host network but launched from different Engines can also communicate in this way. You can disconnect a container from a network using the `docker network disconnect` command. ## Specifying advanced options When you create a network, Engine creates a non-overlapping subnetwork for the network by default. This subnetwork is not a subdivision of an existing network. It is purely for ip-addressing purposes. You can override this default and specify subnetwork values directly using the the `--subnet` option. On a `bridge` network you can only create a single subnet: ```bash docker network create -d --subnet=192.168.0.0/16 ``` Additionally, you also specify the `--gateway` `--ip-range` and `--aux-address` options. ```bash network create --driver=bridge --subnet=172.28.0.0/16 --ip-range=172.28.5.0/24 --gateway=172.28.5.254 br0 ``` If you omit the `--gateway` flag the Engine selects one for you from inside a preferred pool. For `overlay` networks and for network driver plugins that support it you can create multiple subnetworks. ```bash docker network create -d overlay --subnet=192.168.0.0/16 --subnet=192.170.0.0/16 --gateway=192.168.0.100 --gateway=192.170.0.100 --ip-range=192.168.1.0/24 --aux-address a=192.168.1.5 --aux-address b=192.168.1.6 --aux-address a=192.170.1.5 --aux-address b=192.170.1.6 my-multihost-network ``` Be sure that your subnetworks do not overlap. If they do, the network create fails and Engine returns an error. # Bridge driver options When creating a custom network, the default network driver (i.e. `bridge`) has additional options that can be passed. The following are those options and the equivalent docker daemon flags used for docker0 bridge: | Option | Equivalent | Description | |--------------------------------------------------|-------------|-------------------------------------------------------| | `com.docker.network.bridge.name` | - | bridge name to be used when creating the Linux bridge | | `com.docker.network.bridge.enable_ip_masquerade` | `--ip-masq` | Enable IP masquerading | | `com.docker.network.bridge.enable_icc` | `--icc` | Enable or Disable Inter Container Connectivity | | `com.docker.network.bridge.host_binding_ipv4` | `--ip` | Default IP when binding container ports | | `com.docker.network.mtu` | `--mtu` | Set the containers network MTU | | `com.docker.network.enable_ipv6` | `--ipv6` | Enable IPv6 networking | For example, let's use `-o` or `--opt` options to specify an IP address binding when publishing ports: ```bash docker network create -o "com.docker.network.bridge.host_binding_ipv4"="172.19.0.1" simple-network ``` ### Network internal mode By default, when you connect a container to an `overlay` network, Docker also connects a bridge network to it to provide external connectivity. If you want to create an externally isolated `overlay` network, you can specify the `--internal` option. ## Related information * [network inspect](network_inspect.md) * [network connect](network_connect.md) * [network disconnect](network_disconnect.md) * [network ls](network_ls.md) * [network rm](network_rm.md) * [Understand Docker container networks](../../userguide/networking/dockernetworks.md) docker-1.10.3/docs/reference/commandline/network_disconnect.md000066400000000000000000000016521267010174400244510ustar00rootroot00000000000000 # network disconnect Usage: docker network disconnect [OPTIONS] NETWORK CONTAINER Disconnects a container from a network -f, --force Force the container to disconnect from a network --help Print usage Disconnects a container from a network. The container must be running to disconnect it from the network. ```bash $ docker network disconnect multi-host-network container1 ``` ## Related information * [network inspect](network_inspect.md) * [network connect](network_connect.md) * [network create](network_create.md) * [network ls](network_ls.md) * [network rm](network_rm.md) * [Understand Docker container networks](../../userguide/networking/dockernetworks.md) docker-1.10.3/docs/reference/commandline/network_inspect.md000066400000000000000000000072521267010174400237670ustar00rootroot00000000000000 # network inspect Usage: docker network inspect [OPTIONS] NETWORK [NETWORK..] Displays detailed information on a network -f, --format= Format the output using the given go template. --help Print usage Returns information about one or more networks. By default, this command renders all results in a JSON object. For example, if you connect two containers to the default `bridge` network: ```bash $ sudo docker run -itd --name=container1 busybox f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27 $ sudo docker run -itd --name=container2 busybox bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727 ``` The `network inspect` command shows the containers, by id, in its results. You can specify an alternate format to execute a given template for each result. Go's [text/template](http://golang.org/pkg/text/template/) package describes all the details of the format. ```bash $ sudo docker network inspect bridge [ { "Name": "bridge", "Id": "b2b1a2cba717161d984383fd68218cf70bbbd17d328496885f7c921333228b0f", "Scope": "local", "Driver": "bridge", "IPAM": { "Driver": "default", "Config": [ { "Subnet": "172.17.42.1/16", "Gateway": "172.17.42.1" } ] }, "Containers": { "bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727": { "Name": "container2", "EndpointID": "0aebb8fcd2b282abe1365979536f21ee4ceaf3ed56177c628eae9f706e00e019", "MacAddress": "02:42:ac:11:00:02", "IPv4Address": "172.17.0.2/16", "IPv6Address": "" }, "f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27": { "Name": "container1", "EndpointID": "a00676d9c91a96bbe5bcfb34f705387a33d7cc365bac1a29e4e9728df92d10ad", "MacAddress": "02:42:ac:11:00:01", "IPv4Address": "172.17.0.1/16", "IPv6Address": "" } }, "Options": { "com.docker.network.bridge.default_bridge": "true", "com.docker.network.bridge.enable_icc": "true", "com.docker.network.bridge.enable_ip_masquerade": "true", "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", "com.docker.network.bridge.name": "docker0", "com.docker.network.driver.mtu": "1500" } } ] ``` Returns the information about the user-defined network: ```bash $ docker network create simple-network 69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a $ docker network inspect simple-network [ { "Name": "simple-network", "Id": "69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a", "Scope": "local", "Driver": "bridge", "IPAM": { "Driver": "default", "Config": [ { "Subnet": "172.22.0.0/16", "Gateway": "172.22.0.1/16" } ] }, "Containers": {}, "Options": {} } ] ``` ## Related information * [network disconnect ](network_disconnect.md) * [network connect](network_connect.md) * [network create](network_create.md) * [network ls](network_ls.md) * [network rm](network_rm.md) * [Understand Docker container networks](../../userguide/networking/dockernetworks.md) docker-1.10.3/docs/reference/commandline/network_ls.md000066400000000000000000000104331267010174400227330ustar00rootroot00000000000000 # docker network ls Usage: docker network ls [OPTIONS] Lists all the networks created by the user -f, --filter=[] Filter output based on conditions provided --help Print usage --no-trunc Do not truncate the output -q, --quiet Only display numeric IDs Lists all the networks the Engine `daemon` knows about. This includes the networks that span across multiple hosts in a cluster, for example: ```bash $ sudo docker network ls NETWORK ID NAME DRIVER 7fca4eb8c647 bridge bridge 9f904ee27bf5 none null cf03ee007fb4 host host 78b03ee04fc4 multi-host overlay ``` Use the `--no-trunc` option to display the full network id: ```bash docker network ls --no-trunc NETWORK ID NAME DRIVER 18a2866682b85619a026c81b98a5e375bd33e1b0936a26cc497c283d27bae9b3 none null c288470c46f6c8949c5f7e5099b5b7947b07eabe8d9a27d79a9cbf111adcbf47 host host 7b369448dccbf865d397c8d2be0cda7cf7edc6b0945f77d2529912ae917a0185 bridge bridge 95e74588f40db048e86320c6526440c504650a1ff3e9f7d60a497c4d2163e5bd foo bridge 63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 dev bridge ``` ## Filtering The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). Multiple filter flags are combined as an `OR` filter. For example, `-f type=custom -f type=builtin` returns both `custom` and `builtin` networks. The currently supported filters are: * id (network's id) * name (network's name) * type (custom|builtin) #### Type The `type` filter supports two values; `builtin` displays predefined networks (`bridge`, `none`, `host`), whereas `custom` displays user defined networks. The following filter matches all user defined networks: ```bash $ docker network ls --filter type=custom NETWORK ID NAME DRIVER 95e74588f40d foo bridge 63d1ff1f77b0 dev bridge ``` By having this flag it allows for batch cleanup. For example, use this filter to delete all user defined networks: ```bash $ docker network rm `docker network ls --filter type=custom -q` ``` A warning will be issued when trying to remove a network that has containers attached. #### Name The `name` filter matches on all or part of a network's name. The following filter matches all networks with a name containing the `foobar` string. ```bash $ docker network ls --filter name=foobar NETWORK ID NAME DRIVER 06e7eef0a170 foobar bridge ``` You can also filter for a substring in a name as this shows: ```bash $ docker ps --filter name=foo NETWORK ID NAME DRIVER 95e74588f40d foo bridge 06e7eef0a170 foobar bridge ``` #### ID The `id` filter matches on all or part of a network's ID. The following filter matches all networks with a name containing the `06e7eef01700` string. ```bash $ docker network ls --filter id=63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 NETWORK ID NAME DRIVER 63d1ff1f77b0 dev bridge ``` You can also filter for a substring in a ID as this shows: ```bash $ docker ps --filter id=95e74588f40d NETWORK ID NAME DRIVER 95e74588f40d foo bridge $ docker ps --filter id=95e NETWORK ID NAME DRIVER 95e74588f40d foo bridge ``` ## Related information * [network disconnect ](network_disconnect.md) * [network connect](network_connect.md) * [network create](network_create.md) * [network inspect](network_inspect.md) * [network rm](network_rm.md) * [Understand Docker container networks](../../userguide/networking/dockernetworks.md) docker-1.10.3/docs/reference/commandline/network_rm.md000066400000000000000000000025551267010174400227410ustar00rootroot00000000000000 # network rm Usage: docker network rm [OPTIONS] NETWORK [NETWORK...] Deletes one or more networks --help Print usage Removes one or more networks by name or identifier. To remove a network, you must first disconnect any containers connected to it. To remove the network named 'my-network': ```bash $ docker network rm my-network ``` To delete multiple networks in a single `docker network rm` command, provide multiple network names or id's. The following example deletes a network with id `3695c422697f` and a network named `my-network`: ```bash $ docker network rm 3695c422697f my-network ``` When you specify multiple networks, the command attempts to delete each in turn. If the deletion of one network fails, the command continues to the next on the list and tries to delete that. The command reports success or failure for each deletion. ## Related information * [network disconnect ](network_disconnect.md) * [network connect](network_connect.md) * [network create](network_create.md) * [network ls](network_ls.md) * [network inspect](network_inspect.md) * [Understand Docker container networks](../../userguide/networking/dockernetworks.md) docker-1.10.3/docs/reference/commandline/pause.md000066400000000000000000000014701267010174400216620ustar00rootroot00000000000000 # pause Usage: docker pause [OPTIONS] CONTAINER [CONTAINER...] Pause all processes within a container --help Print usage The `docker pause` command uses the cgroups freezer to suspend all processes in a container. Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the cgroups freezer the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. See the [cgroups freezer documentation](https://www.kernel.org/doc/Documentation/cgroups/freezer-subsystem.txt) for further details. docker-1.10.3/docs/reference/commandline/port.md000066400000000000000000000021471267010174400215330ustar00rootroot00000000000000 # port Usage: docker port [OPTIONS] CONTAINER [PRIVATE_PORT[/PROTO]] List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT --help Print usage You can find out all the ports mapped by not specifying a `PRIVATE_PORT`, or just a specific mapping: $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES b650456536c7 busybox:latest top 54 minutes ago Up 54 minutes 0.0.0.0:1234->9876/tcp, 0.0.0.0:4321->7890/tcp test $ docker port test 7890/tcp -> 0.0.0.0:4321 9876/tcp -> 0.0.0.0:1234 $ docker port test 7890/tcp 0.0.0.0:4321 $ docker port test 7890/udp 2014/06/24 11:53:36 Error: No public port '7890/udp' published for test $ docker port test 7890 0.0.0.0:4321 docker-1.10.3/docs/reference/commandline/ps.md000066400000000000000000000254171267010174400211760ustar00rootroot00000000000000 # ps Usage: docker ps [OPTIONS] List containers -a, --all Show all containers (default shows just running) -f, --filter=[] Filter output based on these conditions: - exited= an exit code of - label= or label== - status=(created|restarting|running|paused|exited) - name= a container's name - id= a container's ID - before=(|) - since=(|) - ancestor=([:tag]||) - containers created from an image or a descendant. --format=[] Pretty-print containers using a Go template --help Print usage -l, --latest Show the latest created container (includes all states) -n=-1 Show n last created containers (includes all states) --no-trunc Don't truncate output -q, --quiet Only display numeric IDs -s, --size Display total file sizes Running `docker ps --no-trunc` showing 2 linked containers. $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds 3300-3310/tcp webapp d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db `docker ps` will show only running containers by default. To see all containers: `docker ps -a` `docker ps` will group exposed ports into a single range if possible. E.g., a container that exposes TCP ports `100, 101, 102` will display `100-102/tcp` in the `PORTS` column. ## Filtering The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`) The currently supported filters are: * id (container's id) * label (`label=` or `label==`) * name (container's name) * exited (int - the code of exited containers. Only useful with `--all`) * status (created|restarting|running|paused|exited|dead) * ancestor (`[:]`, `` or ``) - filters containers that were created from the given image or a descendant. * isolation (default|process|hyperv) (Windows daemon only) #### Label The `label` filter matches containers based on the presence of a `label` alone or a `label` and a value. The following filter matches containers with the `color` label regardless of its value. $ docker ps --filter "label=color" CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 673394ef1d4c busybox "top" 47 seconds ago Up 45 seconds nostalgic_shockley d85756f57265 busybox "top" 52 seconds ago Up 51 seconds high_albattani The following filter matches containers with the `color` label with the `blue` value. $ docker ps --filter "label=color=blue" CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES d85756f57265 busybox "top" About a minute ago Up About a minute high_albattani #### Name The `name` filter matches on all or part of a container's name. The following filter matches all containers with a name containing the `nostalgic_stallman` string. $ docker ps --filter "name=nostalgic_stallman" CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 9b6247364a03 busybox "top" 2 minutes ago Up 2 minutes nostalgic_stallman You can also filter for a substring in a name as this shows: $ docker ps --filter "name=nostalgic" CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 715ebfcee040 busybox "top" 3 seconds ago Up 1 seconds i_am_nostalgic 9b6247364a03 busybox "top" 7 minutes ago Up 7 minutes nostalgic_stallman 673394ef1d4c busybox "top" 38 minutes ago Up 38 minutes nostalgic_shockley #### Exited The `exited` filter matches containers by exist status code. For example, to filter for containers that have exited successfully: $ docker ps -a --filter 'exited=0' CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES ea09c3c82f6e registry:latest /srv/run.sh 2 weeks ago Exited (0) 2 weeks ago 127.0.0.1:5000->5000/tcp desperate_leakey 106ea823fe4e fedora:latest /bin/sh -c 'bash -l' 2 weeks ago Exited (0) 2 weeks ago determined_albattani 48ee228c9464 fedora:20 bash 2 weeks ago Exited (0) 2 weeks ago tender_torvalds #### Status The `status` filter matches containers by status. You can filter using `created`, `restarting`, `running`, `paused`, `exited` and `dead`. For example, to filter for `running` containers: $ docker ps --filter status=running CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 715ebfcee040 busybox "top" 16 minutes ago Up 16 minutes i_am_nostalgic d5c976d3c462 busybox "top" 23 minutes ago Up 23 minutes top 9b6247364a03 busybox "top" 24 minutes ago Up 24 minutes nostalgic_stallman To filter for `paused` containers: $ docker ps --filter status=paused CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 673394ef1d4c busybox "top" About an hour ago Up About an hour (Paused) nostalgic_shockley #### Ancestor The `ancestor` filter matches containers based on its image or a descendant of it. The filter supports the following image representation: - image - image:tag - image:tag@digest - short-id - full-id If you don't specify a `tag`, the `latest` tag is used. For example, to filter for containers that use the latest `ubuntu` image: $ docker ps --filter ancestor=ubuntu CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 919e1179bdb8 ubuntu-c1 "top" About a minute ago Up About a minute admiring_lovelace 5d1e4a540723 ubuntu-c2 "top" About a minute ago Up About a minute admiring_sammet 82a598284012 ubuntu "top" 3 minutes ago Up 3 minutes sleepy_bose bab2a34ba363 ubuntu "top" 3 minutes ago Up 3 minutes focused_yonath Match containers based on the `ubuntu-c1` image which, in this case, is a child of `ubuntu`: $ docker ps --filter ancestor=ubuntu-c1 CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 919e1179bdb8 ubuntu-c1 "top" About a minute ago Up About a minute admiring_lovelace Match containers based on the `ubuntu` version `12.04.5` image: $ docker ps --filter ancestor=ubuntu:12.04.5 CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 82a598284012 ubuntu:12.04.5 "top" 3 minutes ago Up 3 minutes sleepy_bose The following matches containers based on the layer `d0e008c6cf02` or an image that have this layer in it's layer stack. $ docker ps --filter ancestor=d0e008c6cf02 CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 82a598284012 ubuntu:12.04.5 "top" 3 minutes ago Up 3 minutes sleepy_bose ## Formatting The formatting option (`--format`) will pretty-print container output using a Go template. Valid placeholders for the Go template are listed below: Placeholder | Description ---- | ---- `.ID` | Container ID `.Image` | Image ID `.Command` | Quoted command `.CreatedAt` | Time when the container was created. `.RunningFor` | Elapsed time since the container was started. `.Ports` | Exposed ports. `.Status` | Container status. `.Size` | Container disk size. `.Names` | Container names. `.Labels` | All labels assigned to the container. `.Label` | Value of a specific label for this container. For example `{{.Label "com.docker.swarm.cpu"}}` When using the `--format` option, the `ps` command will either output the data exactly as the template declares or, when using the `table` directive, will include column headers as well. The following example uses a template without headers and outputs the `ID` and `Command` entries separated by a colon for all running containers: $ docker ps --format "{{.ID}}: {{.Command}}" a87ecb4f327c: /bin/sh -c #(nop) MA 01946d9d34d8: /bin/sh -c #(nop) MA c1d3b0166030: /bin/sh -c yum -y up 41d50ecd2f57: /bin/sh -c #(nop) MA To list all running containers with their labels in a table format you can use: $ docker ps --format "table {{.ID}}\t{{.Labels}}" CONTAINER ID LABELS a87ecb4f327c com.docker.swarm.node=ubuntu,com.docker.swarm.storage=ssd 01946d9d34d8 c1d3b0166030 com.docker.swarm.node=debian,com.docker.swarm.cpu=6 41d50ecd2f57 com.docker.swarm.node=fedora,com.docker.swarm.cpu=3,com.docker.swarm.storage=ssd docker-1.10.3/docs/reference/commandline/pull.md000066400000000000000000000043241267010174400215220ustar00rootroot00000000000000 # pull Usage: docker pull [OPTIONS] NAME[:TAG] | [REGISTRY_HOST[:REGISTRY_PORT]/]NAME[:TAG] Pull an image or a repository from the registry -a, --all-tags Download all tagged images in the repository --disable-content-trust=true Skip image verification --help Print usage Most of your images will be created on top of a base image from the [Docker Hub](https://hub.docker.com) registry. [Docker Hub](https://hub.docker.com) contains many pre-built images that you can `pull` and try without needing to define and configure your own. It is also possible to manually specify the path of a registry to pull from. For example, if you have set up a local registry, you can specify its path to pull from it. A repository path is similar to a URL, but does not contain a protocol specifier (`https://`, for example). To download a particular image, or set of images (i.e., a repository), use `docker pull`: $ docker pull debian # will pull the debian:latest image and its intermediate layers $ docker pull debian:testing # will pull the image named debian:testing and any intermediate # layers it is based on. $ docker pull debian@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf # will pull the image from the debian repository with the digest # sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf # and any intermediate layers it is based on. # (Typically the empty `scratch` image, a MAINTAINER layer, # and the un-tarred base). $ docker pull --all-tags centos # will pull all the images from the centos repository $ docker pull registry.hub.docker.com/debian # manually specifies the path to the default Docker registry. This could # be replaced with the path to a local registry to pull from another source. # sudo docker pull myhub.com:8080/test-image Killing the `docker pull` process, for example by pressing `CTRL-c` while it is running in a terminal, will terminate the pull operation. docker-1.10.3/docs/reference/commandline/push.md000066400000000000000000000012061267010174400215210ustar00rootroot00000000000000 # push Usage: docker push [OPTIONS] NAME[:TAG] Push an image or a repository to the registry --disable-content-trust=true Skip image signing --help Print usage Use `docker push` to share your images to the [Docker Hub](https://hub.docker.com) registry or to a self-hosted one. Killing the `docker push` process, for example by pressing `CTRL-c` while it is running in a terminal, will terminate the push operation. docker-1.10.3/docs/reference/commandline/rename.md000066400000000000000000000006161267010174400220150ustar00rootroot00000000000000 # rename Usage: docker rename [OPTIONS] OLD_NAME NEW_NAME Rename a container --help Print usage The `docker rename` command allows the container to be renamed to a different name. docker-1.10.3/docs/reference/commandline/restart.md000066400000000000000000000006301267010174400222260ustar00rootroot00000000000000 # restart Usage: docker restart [OPTIONS] CONTAINER [CONTAINER...] Restart a container --help Print usage -t, --time=10 Seconds to wait for stop before killing the container docker-1.10.3/docs/reference/commandline/rm.md000066400000000000000000000032561267010174400211670ustar00rootroot00000000000000 # rm Usage: docker rm [OPTIONS] CONTAINER [CONTAINER...] Remove one or more containers -f, --force Force the removal of a running container (uses SIGKILL) --help Print usage -l, --link Remove the specified link -v, --volumes Remove the volumes associated with the container ## Examples $ docker rm /redis /redis This will remove the container referenced under the link `/redis`. $ docker rm --link /webapp/redis /webapp/redis This will remove the underlying link between `/webapp` and the `/redis` containers removing all network communication. $ docker rm --force redis redis The main process inside the container referenced under the link `/redis` will receive `SIGKILL`, then the container will be removed. $ docker rm $(docker ps -a -q) This command will delete all stopped containers. The command `docker ps -a -q` will return all existing container IDs and pass them to the `rm` command which will delete them. Any running containers will not be deleted. $ docker rm -v redis redis This command will remove the container and any volumes associated with it. Note that if a volume was specified with a name, it will not be removed. $ docker create -v awesome:/foo -v /bar --name hello redis hello $ docker rm -v hello In this example, the volume for `/foo` will remain intact, but the volume for `/bar` will be removed. The same behavior holds for volumes inherited with `--volumes-from`. docker-1.10.3/docs/reference/commandline/rmi.md000066400000000000000000000066611267010174400213430ustar00rootroot00000000000000 # rmi Usage: docker rmi [OPTIONS] IMAGE [IMAGE...] Remove one or more images -f, --force Force removal of the image --help Print usage --no-prune Do not delete untagged parents You can remove an image using its short or long ID, its tag, or its digest. If an image has one or more tag referencing it, you must remove all of them before the image is removed. Digest references are removed automatically when an image is removed by tag. $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) $ docker rmi fd484f19954f Error: Conflict, cannot delete image fd484f19954f because it is tagged in multiple repositories, use -f to force 2013/12/11 05:47:16 Error: failed to remove one or more images $ docker rmi test1 Untagged: test1:latest $ docker rmi test2 Untagged: test2:latest $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) $ docker rmi test Untagged: test:latest Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 If you use the `-f` flag and specify the image's short or long ID, then this command untags and removes all images that match the specified ID. $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) $ docker rmi -f fd484f19954f Untagged: test1:latest Untagged: test:latest Untagged: test2:latest Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 An image pulled by digest has no tag associated with it: $ docker images --digests REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE localhost:5000/test/busybox sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 4986bf8c1536 9 weeks ago 2.43 MB To remove an image using its digest: $ docker rmi localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf Untagged: localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf Deleted: 4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125 Deleted: ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2 Deleted: df7546f9f060a2268024c8a230d8639878585defcc1bc6f79d2728a13957871b docker-1.10.3/docs/reference/commandline/run.md000066400000000000000000000663351267010174400213640ustar00rootroot00000000000000 # run Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] Run a command in a new container -a, --attach=[] Attach to STDIN, STDOUT or STDERR --add-host=[] Add a custom host-to-IP mapping (host:ip) --blkio-weight=0 Block IO weight (relative weight) --blkio-weight-device=[] Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`) --cpu-shares=0 CPU shares (relative weight) --cap-add=[] Add Linux capabilities --cap-drop=[] Drop Linux capabilities --cgroup-parent="" Optional parent cgroup for the container --cidfile="" Write the container ID to the file --cpu-period=0 Limit CPU CFS (Completely Fair Scheduler) period --cpu-quota=0 Limit CPU CFS (Completely Fair Scheduler) quota --cpuset-cpus="" CPUs in which to allow execution (0-3, 0,1) --cpuset-mems="" Memory nodes (MEMs) in which to allow execution (0-3, 0,1) -d, --detach Run container in background and print container ID --detach-keys Specify the escape key sequence used to detach a container --device=[] Add a host device to the container --device-read-bps=[] Limit read rate (bytes per second) from a device (e.g., --device-read-bps=/dev/sda:1mb) --device-read-iops=[] Limit read rate (IO per second) from a device (e.g., --device-read-iops=/dev/sda:1000) --device-write-bps=[] Limit write rate (bytes per second) to a device (e.g., --device-write-bps=/dev/sda:1mb) --device-write-iops=[] Limit write rate (IO per second) to a device (e.g., --device-write-bps=/dev/sda:1000) --disable-content-trust=true Skip image verification --dns=[] Set custom DNS servers --dns-opt=[] Set custom DNS options --dns-search=[] Set custom DNS search domains -e, --env=[] Set environment variables --entrypoint="" Overwrite the default ENTRYPOINT of the image --env-file=[] Read in a file of environment variables --expose=[] Expose a port or a range of ports --group-add=[] Add additional groups to run as -h, --hostname="" Container host name --help Print usage -i, --interactive Keep STDIN open even if not attached --ip="" Container IPv4 address (e.g. 172.30.100.104) --ip6="" Container IPv6 address (e.g. 2001:db8::33) --ipc="" IPC namespace to use --isolation="" Container isolation technology --kernel-memory="" Kernel memory limit -l, --label=[] Set metadata on the container (e.g., --label=com.example.key=value) --label-file=[] Read in a file of labels (EOL delimited) --link=[] Add link to another container --log-driver="" Logging driver for container --log-opt=[] Log driver specific options -m, --memory="" Memory limit --mac-address="" Container MAC address (e.g. 92:d0:c6:0a:29:33) --memory-reservation="" Memory soft limit --memory-swap="" A positive integer equal to memory plus swap. Specify -1 to enable unlimited swap. --memory-swappiness="" Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. --name="" Assign a name to the container --net="bridge" Connect a container to a network 'bridge': create a network stack on the default Docker bridge 'none': no networking 'container:': reuse another container's network stack 'host': use the Docker host network stack '|': connect to a user-defined network --net-alias=[] Add network-scoped alias for the container --oom-kill-disable Whether to disable OOM Killer for the container or not --oom-score-adj=0 Tune the host's OOM preferences for containers (accepts -1000 to 1000) -P, --publish-all Publish all exposed ports to random ports -p, --publish=[] Publish a container's port(s) to the host --pid="" PID namespace to use --privileged Give extended privileges to this container --read-only Mount the container's root filesystem as read only --restart="no" Restart policy (no, on-failure[:max-retry], always, unless-stopped) --rm Automatically remove the container when it exits --shm-size=[] Size of `/dev/shm`. The format is ``. `number` must be greater than `0`. Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses `64m`. --security-opt=[] Security Options --sig-proxy=true Proxy received signals to the process --stop-signal="SIGTERM" Signal to stop a container -t, --tty Allocate a pseudo-TTY -u, --user="" Username or UID (format: [:]) --ulimit=[] Ulimit options --uts="" UTS namespace to use -v, --volume=[host-src:]container-dest[:] Bind mount a volume. The comma-delimited `options` are [rw|ro], [z|Z], or [[r]shared|[r]slave|[r]private]. The 'host-src' is an absolute path or a name value. --volume-driver="" Container's volume driver --volumes-from=[] Mount volumes from the specified container(s) -w, --workdir="" Working directory inside the container The `docker run` command first `creates` a writeable container layer over the specified image, and then `starts` it using the specified command. That is, `docker run` is equivalent to the API `/containers/create` then `/containers/(id)/start`. A stopped container can be restarted with all its previous changes intact using `docker start`. See `docker ps -a` to view a list of all containers. The `docker run` command can be used in combination with `docker commit` to [*change the command that a container runs*](commit.md). There is additional detailed information about `docker run` in the [Docker run reference](../run.md). For information on connecting a container to a network, see the ["*Docker network overview*"](../../userguide/networking/index.md). ## Examples ### Assign name and allocate psuedo-TTY (--name, -it) $ docker run --name test -it debian root@d6c0fe130dba:/# exit 13 $ echo $? 13 $ docker ps -a | grep test d6c0fe130dba debian:7 "/bin/bash" 26 seconds ago Exited (13) 17 seconds ago test This example runs a container named `test` using the `debian:latest` image. The `-it` instructs Docker to allocate a pseudo-TTY connected to the container's stdin; creating an interactive `bash` shell in the container. In the example, the `bash` shell is quit by entering `exit 13`. This exit code is passed on to the caller of `docker run`, and is recorded in the `test` container's metadata. ### Capture container ID (--cidfile) $ docker run --cidfile /tmp/docker_test.cid ubuntu echo "test" This will create a container and print `test` to the console. The `cidfile` flag makes Docker attempt to create a new file and write the container ID to it. If the file exists already, Docker will return an error. Docker will close this file when `docker run` exits. ### Full container capabilities (--privileged) $ docker run -t -i --rm ubuntu bash root@bc338942ef20:/# mount -t tmpfs none /mnt mount: permission denied This will *not* work, because by default, most potentially dangerous kernel capabilities are dropped; including `cap_sys_admin` (which is required to mount filesystems). However, the `--privileged` flag will allow it to run: $ docker run -t -i --privileged ubuntu bash root@50e3f57e16e6:/# mount -t tmpfs none /mnt root@50e3f57e16e6:/# df -h Filesystem Size Used Avail Use% Mounted on none 1.9G 0 1.9G 0% /mnt The `--privileged` flag gives *all* capabilities to the container, and it also lifts all the limitations enforced by the `device` cgroup controller. In other words, the container can then do almost everything that the host can do. This flag exists to allow special use-cases, like running Docker within Docker. ### Set working directory (-w) $ docker run -w /path/to/dir/ -i -t ubuntu pwd The `-w` lets the command being executed inside directory given, here `/path/to/dir/`. If the path does not exists it is created inside the container. ### Mount tmpfs (--tmpfs) $ docker run -d --tmpfs /run:rw,noexec,nosuid,size=65536k my_image The `--tmpfs` flag mounts an empty tmpfs into the container with the `rw`, `noexec`, `nosuid`, `size=65536k` options. ### Mount volume (-v, --read-only) $ docker run -v `pwd`:`pwd` -w `pwd` -i -t ubuntu pwd The `-v` flag mounts the current working directory into the container. The `-w` lets the command being executed inside the current working directory, by changing into the directory to the value returned by `pwd`. So this combination executes the command using the container, but inside the current working directory. $ docker run -v /doesnt/exist:/foo -w /foo -i -t ubuntu bash When the host directory of a bind-mounted volume doesn't exist, Docker will automatically create this directory on the host for you. In the example above, Docker will create the `/doesnt/exist` folder before starting your container. $ docker run --read-only -v /icanwrite busybox touch /icanwrite here Volumes can be used in combination with `--read-only` to control where a container writes files. The `--read-only` flag mounts the container's root filesystem as read only prohibiting writes to locations other than the specified volumes for the container. $ docker run -t -i -v /var/run/docker.sock:/var/run/docker.sock -v /path/to/static-docker-binary:/usr/bin/docker busybox sh By bind-mounting the docker unix socket and statically linked docker binary (refer to [get the linux binary]( ../../installation/binaries.md#get-the-linux-binary)), you give the container the full access to create and manipulate the host's Docker daemon. ### Publish or expose port (-p, --expose) $ docker run -p 127.0.0.1:80:8080 ubuntu bash This binds port `8080` of the container to port `80` on `127.0.0.1` of the host machine. The [Docker User Guide](../../userguide/networking/default_network/dockerlinks.md) explains in detail how to manipulate ports in Docker. $ docker run --expose 80 ubuntu bash This exposes port `80` of the container without publishing the port to the host system's interfaces. ### Set environment variables (-e, --env, --env-file) $ docker run -e MYVAR1 --env MYVAR2=foo --env-file ./env.list ubuntu bash This sets environmental variables in the container. For illustration all three flags are shown here. Where `-e`, `--env` take an environment variable and value, or if no `=` is provided, then that variable's current value is passed through (i.e. `$MYVAR1` from the host is set to `$MYVAR1` in the container). When no `=` is provided and that variable is not defined in the client's environment then that variable will be removed from the container's list of environment variables. All three flags, `-e`, `--env` and `--env-file` can be repeated. Regardless of the order of these three flags, the `--env-file` are processed first, and then `-e`, `--env` flags. This way, the `-e` or `--env` will override variables as needed. $ cat ./env.list TEST_FOO=BAR $ docker run --env TEST_FOO="This is a test" --env-file ./env.list busybox env | grep TEST_FOO TEST_FOO=This is a test The `--env-file` flag takes a filename as an argument and expects each line to be in the `VAR=VAL` format, mimicking the argument passed to `--env`. Comment lines need only be prefixed with `#` An example of a file passed with `--env-file` $ cat ./env.list TEST_FOO=BAR # this is a comment TEST_APP_DEST_HOST=10.10.0.127 TEST_APP_DEST_PORT=8888 _TEST_BAR=FOO TEST_APP_42=magic helloWorld=true 123qwe=bar org.spring.config=something # pass through this variable from the caller TEST_PASSTHROUGH $ TEST_PASSTHROUGH=howdy docker run --env-file ./env.list busybox env PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin HOSTNAME=5198e0745561 TEST_FOO=BAR TEST_APP_DEST_HOST=10.10.0.127 TEST_APP_DEST_PORT=8888 _TEST_BAR=FOO TEST_APP_42=magic helloWorld=true TEST_PASSTHROUGH=howdy HOME=/root 123qwe=bar org.spring.config=something $ docker run --env-file ./env.list busybox env PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin HOSTNAME=5198e0745561 TEST_FOO=BAR TEST_APP_DEST_HOST=10.10.0.127 TEST_APP_DEST_PORT=8888 _TEST_BAR=FOO TEST_APP_42=magic helloWorld=true TEST_PASSTHROUGH= HOME=/root 123qwe=bar org.spring.config=something ### Set metadata on container (-l, --label, --label-file) A label is a `key=value` pair that applies metadata to a container. To label a container with two labels: $ docker run -l my-label --label com.example.foo=bar ubuntu bash The `my-label` key doesn't specify a value so the label defaults to an empty string(`""`). To add multiple labels, repeat the label flag (`-l` or `--label`). The `key=value` must be unique to avoid overwriting the label value. If you specify labels with identical keys but different values, each subsequent value overwrites the previous. Docker uses the last `key=value` you supply. Use the `--label-file` flag to load multiple labels from a file. Delimit each label in the file with an EOL mark. The example below loads labels from a labels file in the current directory: $ docker run --label-file ./labels ubuntu bash The label-file format is similar to the format for loading environment variables. (Unlike environment variables, labels are not visible to processes running inside a container.) The following example illustrates a label-file format: com.example.label1="a label" # this is a comment com.example.label2=another\ label com.example.label3 You can load multiple label-files by supplying multiple `--label-file` flags. For additional information on working with labels, see [*Labels - custom metadata in Docker*](../../userguide/labels-custom-metadata.md) in the Docker User Guide. ### Connect a container to a network (--net) When you start a container use the `--net` flag to connect it to a network. This adds the `busybox` container to the `mynet` network. ```bash $ docker run -itd --net=my-multihost-network busybox ``` You can also choose the IP addresses for the container with `--ip` and `--ip6` flags when you start the container on a user-defined network. ```bash $ docker run -itd --net=my-multihost-network --ip=10.10.9.75 busybox ``` If you want to add a running container to a network use the `docker network connect` subcommand. You can connect multiple containers to the same network. Once connected, the containers can communicate easily need only another container's IP address or name. For `overlay` networks or custom plugins that support multi-host connectivity, containers connected to the same multi-host network but launched from different Engines can also communicate in this way. **Note**: Service discovery is unavailable on the default bridge network. Containers can communicate via their IP addresses by default. To communicate by name, they must be linked. You can disconnect a container from a network using the `docker network disconnect` command. ### Mount volumes from container (--volumes-from) $ docker run --volumes-from 777f7dc92da7 --volumes-from ba8c0c54f0f2:ro -i -t ubuntu pwd The `--volumes-from` flag mounts all the defined volumes from the referenced containers. Containers can be specified by repetitions of the `--volumes-from` argument. The container ID may be optionally suffixed with `:ro` or `:rw` to mount the volumes in read-only or read-write mode, respectively. By default, the volumes are mounted in the same mode (read write or read only) as the reference container. Labeling systems like SELinux require that proper labels are placed on volume content mounted into a container. Without a label, the security system might prevent the processes running inside the container from using the content. By default, Docker does not change the labels set by the OS. To change the label in the container context, you can add either of two suffixes `:z` or `:Z` to the volume mount. These suffixes tell Docker to relabel file objects on the shared volumes. The `z` option tells Docker that two containers share the volume content. As a result, Docker labels the content with a shared content label. Shared volume labels allow all containers to read/write content. The `Z` option tells Docker to label the content with a private unshared label. Only the current container can use a private volume. ### Attach to STDIN/STDOUT/STDERR (-a) The `-a` flag tells `docker run` to bind to the container's `STDIN`, `STDOUT` or `STDERR`. This makes it possible to manipulate the output and input as needed. $ echo "test" | docker run -i -a stdin ubuntu cat - This pipes data into a container and prints the container's ID by attaching only to the container's `STDIN`. $ docker run -a stderr ubuntu echo test This isn't going to print anything unless there's an error because we've only attached to the `STDERR` of the container. The container's logs still store what's been written to `STDERR` and `STDOUT`. $ cat somefile | docker run -i -a stdin mybuilder dobuild This is how piping a file into a container could be done for a build. The container's ID will be printed after the build is done and the build logs could be retrieved using `docker logs`. This is useful if you need to pipe a file or something else into a container and retrieve the container's ID once the container has finished running. ### Add host device to container (--device) $ docker run --device=/dev/sdc:/dev/xvdc --device=/dev/sdd --device=/dev/zero:/dev/nulo -i -t ubuntu ls -l /dev/{xvdc,sdd,nulo} brw-rw---- 1 root disk 8, 2 Feb 9 16:05 /dev/xvdc brw-rw---- 1 root disk 8, 3 Feb 9 16:05 /dev/sdd crw-rw-rw- 1 root root 1, 5 Feb 9 16:05 /dev/nulo It is often necessary to directly expose devices to a container. The `--device` option enables that. For example, a specific block storage device or loop device or audio device can be added to an otherwise unprivileged container (without the `--privileged` flag) and have the application directly access it. By default, the container will be able to `read`, `write` and `mknod` these devices. This can be overridden using a third `:rwm` set of options to each `--device` flag: $ docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk /dev/xvdc Command (m for help): q $ docker run --device=/dev/sda:/dev/xvdc:r --rm -it ubuntu fdisk /dev/xvdc You will not be able to write the partition table. Command (m for help): q $ docker run --device=/dev/sda:/dev/xvdc:rw --rm -it ubuntu fdisk /dev/xvdc Command (m for help): q $ docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk /dev/xvdc fdisk: unable to open /dev/xvdc: Operation not permitted > **Note:** > `--device` cannot be safely used with ephemeral devices. Block devices > that may be removed should not be added to untrusted containers with > `--device`. ### Restart policies (--restart) Use Docker's `--restart` to specify a container's *restart policy*. A restart policy controls whether the Docker daemon restarts a container after exit. Docker supports the following restart policies:
Policy Result
no Do not automatically restart the container when it exits. This is the default.
on-failure[:max-retries] Restart only if the container exits with a non-zero exit status. Optionally, limit the number of restart retries the Docker daemon attempts.
always Always restart the container regardless of the exit status. When you specify always, the Docker daemon will try to restart the container indefinitely. The container will also always start on daemon startup, regardless of the current state of the container.
unless-stopped Always restart the container regardless of the exit status, but do not start it on daemon startup if the container has been put to a stopped state before.
$ docker run --restart=always redis This will run the `redis` container with a restart policy of **always** so that if the container exits, Docker will restart it. More detailed information on restart policies can be found in the [Restart Policies (--restart)](../run.md#restart-policies-restart) section of the Docker run reference page. ### Add entries to container hosts file (--add-host) You can add other hosts into a container's `/etc/hosts` file by using one or more `--add-host` flags. This example adds a static address for a host named `docker`: $ docker run --add-host=docker:10.180.0.1 --rm -it debian $$ ping docker PING docker (10.180.0.1): 48 data bytes 56 bytes from 10.180.0.1: icmp_seq=0 ttl=254 time=7.600 ms 56 bytes from 10.180.0.1: icmp_seq=1 ttl=254 time=30.705 ms ^C--- docker ping statistics --- 2 packets transmitted, 2 packets received, 0% packet loss round-trip min/avg/max/stddev = 7.600/19.152/30.705/11.553 ms Sometimes you need to connect to the Docker host from within your container. To enable this, pass the Docker host's IP address to the container using the `--add-host` flag. To find the host's address, use the `ip addr show` command. The flags you pass to `ip addr show` depend on whether you are using IPv4 or IPv6 networking in your containers. Use the following flags for IPv4 address retrieval for a network device named `eth0`: $ HOSTIP=`ip -4 addr show scope global dev eth0 | grep inet | awk '{print \$2}' | cut -d / -f 1` $ docker run --add-host=docker:${HOSTIP} --rm -it debian For IPv6 use the `-6` flag instead of the `-4` flag. For other network devices, replace `eth0` with the correct device name (for example `docker0` for the bridge device). ### Set ulimits in container (--ulimit) Since setting `ulimit` settings in a container requires extra privileges not available in the default container, you can set these using the `--ulimit` flag. `--ulimit` is specified with a soft and hard limit as such: `=[:]`, for example: $ docker run --ulimit nofile=1024:1024 --rm debian sh -c "ulimit -n" 1024 > **Note:** > If you do not provide a `hard limit`, the `soft limit` will be used > for both values. If no `ulimits` are set, they will be inherited from > the default `ulimits` set on the daemon. `as` option is disabled now. > In other words, the following script is not supported: > `$ docker run -it --ulimit as=1024 fedora /bin/bash` The values are sent to the appropriate `syscall` as they are set. Docker doesn't perform any byte conversion. Take this into account when setting the values. #### For `nproc` usage Be careful setting `nproc` with the `ulimit` flag as `nproc` is designed by Linux to set the maximum number of processes available to a user, not to a container. For example, start four containers with `daemon` user: docker run -d -u daemon --ulimit nproc=3 busybox top docker run -d -u daemon --ulimit nproc=3 busybox top docker run -d -u daemon --ulimit nproc=3 busybox top docker run -d -u daemon --ulimit nproc=3 busybox top The 4th container fails and reports "[8] System error: resource temporarily unavailable" error. This fails because the caller set `nproc=3` resulting in the first three containers using up the three processes quota set for the `daemon` user. ### Stop container with signal (--stop-signal) The `--stop-signal` flag sets the system call signal that will be sent to the container to exit. This signal can be a valid unsigned number that matches a position in the kernel's syscall table, for instance 9, or a signal name in the format SIGNAME, for instance SIGKILL. ### Specify isolation technology for container (--isolation) This option is useful in situations where you are running Docker containers on Microsoft Windows. The `--isolation ` option sets a container's isolation technology. On Linux, the only supported is the `default` option which uses Linux namespaces. These two commands are equivalent on Linux: ``` $ docker run -d busybox top $ docker run -d --isolation default busybox top ``` On Microsoft Windows, can take any of these values: | Value | Description | |-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------| | `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. | | `process` | Namespace isolation only. | | `hyperv` | Hyper-V hypervisor partition-based isolation. | In practice, when running on Microsoft Windows without a `daemon` option set, these two commands are equivalent: ``` $ docker run -d --isolation default busybox top $ docker run -d --isolation process busybox top ``` If you have set the `--exec-opt isolation=hyperv` option on the Docker `daemon`, any of these commands also result in `hyperv` isolation: ``` $ docker run -d --isolation default busybox top $ docker run -d --isolation hyperv busybox top ``` docker-1.10.3/docs/reference/commandline/save.md000066400000000000000000000020461267010174400215030ustar00rootroot00000000000000 # save Usage: docker save [OPTIONS] IMAGE [IMAGE...] Save an image(s) to a tar archive (streamed to STDOUT by default) --help Print usage -o, --output="" Write to a file, instead of STDOUT Produces a tarred repository to the standard output stream. Contains all parent layers, and all tags + versions, or specified `repo:tag`, for each argument provided. It is used to create a backup that can then be used with `docker load` $ docker save busybox > busybox.tar $ ls -sh busybox.tar 2.7M busybox.tar $ docker save --output busybox.tar busybox $ ls -sh busybox.tar 2.7M busybox.tar $ docker save -o fedora-all.tar fedora $ docker save -o fedora-latest.tar fedora:latest It is even useful to cherry-pick particular tags of an image repository $ docker save -o ubuntu.tar ubuntu:lucid ubuntu:saucy docker-1.10.3/docs/reference/commandline/search.md000066400000000000000000000127271267010174400220210ustar00rootroot00000000000000 # search Usage: docker search [OPTIONS] TERM Search the Docker Hub for images --automated Only show automated builds --help Print usage --no-trunc Don't truncate output -s, --stars=0 Only displays with at least x stars Search [Docker Hub](https://hub.docker.com) for images See [*Find Public Images on Docker Hub*](../../userguide/containers/dockerrepos.md#searching-for-images) for more details on finding shared images from the command line. > **Note:** > Search queries will only return up to 25 results ## Examples ### Search images by name This example displays images with a name containing 'busybox': $ docker search busybox NAME DESCRIPTION STARS OFFICIAL AUTOMATED busybox Busybox base image. 316 [OK] progrium/busybox 50 [OK] radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] odise/busybox-python 2 [OK] azukiapp/busybox This image is meant to be used as the base... 2 [OK] ofayau/busybox-jvm Prepare busybox to install a 32 bits JVM. 1 [OK] shingonoide/archlinux-busybox Arch Linux, a lightweight and flexible Lin... 1 [OK] odise/busybox-curl 1 [OK] ofayau/busybox-libc32 Busybox with 32 bits (and 64 bits) libs 1 [OK] peelsky/zulu-openjdk-busybox 1 [OK] skomma/busybox-data Docker image suitable for data volume cont... 1 [OK] elektritter/busybox-teamspeak Leightweight teamspeak3 container based on... 1 [OK] socketplane/busybox 1 [OK] oveits/docker-nginx-busybox This is a tiny NginX docker image based on... 0 [OK] ggtools/busybox-ubuntu Busybox ubuntu version with extra goodies 0 [OK] nikfoundas/busybox-confd Minimal busybox based distribution of confd 0 [OK] openshift/busybox-http-app 0 [OK] jllopis/busybox 0 [OK] swyckoff/busybox 0 [OK] powellquiring/busybox 0 [OK] williamyeh/busybox-sh Docker image for BusyBox's sh 0 [OK] simplexsys/busybox-cli-powered Docker busybox images, with a few often us... 0 [OK] fhisamoto/busybox-java Busybox java 0 [OK] scottabernethy/busybox 0 [OK] marclop/busybox-solr ### Search images by name and number of stars (-s, --stars) This example displays images with a name containing 'busybox' and at least 3 stars: $ docker search --stars=3 busybox NAME DESCRIPTION STARS OFFICIAL AUTOMATED busybox Busybox base image. 325 [OK] progrium/busybox 50 [OK] radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] ### Search automated images (--automated) This example displays images with a name containing 'busybox', at least 3 stars and are automated builds: $ docker search --stars=3 --automated busybox NAME DESCRIPTION STARS OFFICIAL AUTOMATED progrium/busybox 50 [OK] radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] ### Display non-truncated description (--no-trunc) This example displays images with a name containing 'busybox', at least 3 stars and the description isn't truncated in the output: $ docker search --stars=3 --no-trunc busybox NAME DESCRIPTION STARS OFFICIAL AUTOMATED busybox Busybox base image. 325 [OK] progrium/busybox 50 [OK] radial/busyboxplus Full-chain, Internet enabled, busybox made from scratch. Comes in git and cURL flavors. 8 [OK] docker-1.10.3/docs/reference/commandline/start.md000066400000000000000000000010611267010174400216760ustar00rootroot00000000000000 # start Usage: docker start [OPTIONS] CONTAINER [CONTAINER...] Start one or more containers -a, --attach Attach STDOUT/STDERR and forward signals --detach-keys Specify the escape key sequence used to detach a container --help Print usage -i, --interactive Attach container's STDIN docker-1.10.3/docs/reference/commandline/stats.md000066400000000000000000000035341267010174400217060ustar00rootroot00000000000000 # stats Usage: docker stats [OPTIONS] [CONTAINER...] Display a live stream of one or more containers' resource usage statistics -a, --all Show all containers (default shows just running) --help Print usage --no-stream Disable streaming stats and only pull the first result The `docker stats` command returns a live data stream for running containers. To limit data to one or more specific containers, specify a list of container names or ids separated by a space. You can specify a stopped container but stopped containers do not return any data. If you want more detailed information about a container's resource usage, use the `/containers/(id)/stats` API endpoint. ## Examples Running `docker stats` on all running containers $ docker stats CONTAINER CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O 1285939c1fd3 0.07% 796 KB / 64 MB 1.21% 788 B / 648 B 3.568 MB / 512 KB 9c76f7834ae2 0.07% 2.746 MB / 64 MB 4.29% 1.266 KB / 648 B 12.4 MB / 0 B d1ea048f04e4 0.03% 4.583 MB / 64 MB 6.30% 2.854 KB / 648 B 27.7 MB / 0 B Running `docker stats` on multiple containers by name and id. $ docker stats fervent_panini 5acfcb1b4fd1 CONTAINER CPU % MEM USAGE/LIMIT MEM % NET I/O 5acfcb1b4fd1 0.00% 115.2 MB/1.045 GB 11.03% 1.422 kB/648 B fervent_panini 0.02% 11.08 MB/1.045 GB 1.06% 648 B/648 B docker-1.10.3/docs/reference/commandline/stop.md000066400000000000000000000010331267010174400215250ustar00rootroot00000000000000 # stop Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] Stop a container by sending SIGTERM and then SIGKILL after a grace period --help Print usage -t, --time=10 Seconds to wait for stop before killing it The main process inside the container will receive `SIGTERM`, and after a grace period, `SIGKILL`. docker-1.10.3/docs/reference/commandline/tag.md000066400000000000000000000010171267010174400213150ustar00rootroot00000000000000 # tag Usage: docker tag [OPTIONS] IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG] Tag an image into a repository --help Print usage You can group your images together using names and tags, and then upload them to [*Share Images via Repositories*](../../userguide/containers/dockerrepos.md#contributing-to-docker-hub). docker-1.10.3/docs/reference/commandline/top.md000066400000000000000000000005201267010174400213420ustar00rootroot00000000000000 # top Usage: docker top [OPTIONS] CONTAINER [ps OPTIONS] Display the running processes of a container --help Print usage docker-1.10.3/docs/reference/commandline/unpause.md000066400000000000000000000011051267010174400222200ustar00rootroot00000000000000 # unpause Usage: docker unpause [OPTIONS] CONTAINER [CONTAINER...] Unpause all processes within a container --help Print usage The `docker unpause` command uses the cgroups freezer to un-suspend all processes in a container. See the [cgroups freezer documentation](https://www.kernel.org/doc/Documentation/cgroups/freezer-subsystem.txt) for further details. docker-1.10.3/docs/reference/commandline/update.md000066400000000000000000000044721267010174400220340ustar00rootroot00000000000000 ## update Usage: docker update [OPTIONS] CONTAINER [CONTAINER...] Updates container resource limits --help=false Print usage --blkio-weight=0 Block IO (relative weight), between 10 and 1000 --cpu-shares=0 CPU shares (relative weight) --cpu-period=0 Limit the CPU CFS (Completely Fair Scheduler) period --cpu-quota=0 Limit the CPU CFS (Completely Fair Scheduler) quota --cpuset-cpus="" CPUs in which to allow execution (0-3, 0,1) --cpuset-mems="" Memory nodes (MEMs) in which to allow execution (0-3, 0,1) -m, --memory="" Memory limit --memory-reservation="" Memory soft limit --memory-swap="" A positive integer equal to memory plus swap. Specify -1 to enable unlimited swap --kernel-memory="" Kernel memory limit: container must be stopped The `docker update` command dynamically updates container resources. Use this command to prevent containers from consuming too many resources from their Docker host. With a single command, you can place limits on a single container or on many. To specify more than one container, provide space-separated list of container names or IDs. With the exception of the `--kernel-memory` value, you can specify these options on a running or a stopped container. You can only update `--kernel-memory` on a stopped container. When you run `docker update` on stopped container, the next time you restart it, the container uses those values. ## EXAMPLES The following sections illustrate ways to use this command. ### Update a container with cpu-shares=512 To limit a container's cpu-shares to 512, first identify the container name or ID. You can use **docker ps** to find these values. You can also use the ID returned from the **docker run** command. Then, do the following: ```bash $ docker update --cpu-shares 512 abebf7571666 ``` ### Update a container with cpu-shares and memory To update multiple resource configurations for multiple containers: ```bash $ docker update --cpu-shares 512 -m 300M abebf7571666 hopeful_morse ``` docker-1.10.3/docs/reference/commandline/version.md000066400000000000000000000030631267010174400222320ustar00rootroot00000000000000 # version Usage: docker version [OPTIONS] Show the Docker version information. -f, --format="" Format the output using the given go template --help Print usage By default, this will render all version information in an easy to read layout. If a format is specified, the given template will be executed instead. Go's [text/template](http://golang.org/pkg/text/template/) package describes all the details of the format. ## Examples **Default output:** $ docker version Client: Version: 1.8.0 API version: 1.20 Go version: go1.4.2 Git commit: f5bae0a Built: Tue Jun 23 17:56:00 UTC 2015 OS/Arch: linux/amd64 Server: Version: 1.8.0 API version: 1.20 Go version: go1.4.2 Git commit: f5bae0a Built: Tue Jun 23 17:56:00 UTC 2015 OS/Arch: linux/amd64 **Get server version:** $ docker version --format '{{.Server.Version}}' 1.8.0 **Dump raw data:** $ docker version --format '{{json .}}' {"Client":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"},"ServerOK":true,"Server":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","KernelVersion":"3.13.2-gentoo","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"}} docker-1.10.3/docs/reference/commandline/volume_create.md000066400000000000000000000037371267010174400234070ustar00rootroot00000000000000 # volume create Usage: docker volume create [OPTIONS] Create a volume -d, --driver=local Specify volume driver name --help Print usage --name= Specify volume name -o, --opt=map[] Set driver specific options Creates a new volume that containers can consume and store data in. If a name is not specified, Docker generates a random name. You create a volume and then configure the container to use it, for example: $ docker volume create --name hello hello $ docker run -d -v hello:/world busybox ls /world The mount is created inside the container's `/world` directory. Docker does not support relative paths for mount points inside the container. Multiple containers can use the same volume in the same time period. This is useful if two containers need access to shared data. For example, if one container writes and the other reads the data. Volume names must be unique among drivers. This means you cannot use the same volume name with two different drivers. If you attempt this `docker` returns an error: ``` A volume named "hello" already exists with the "some-other" driver. Choose a different volume name. ``` If you specify a volume name already in use on the current driver, Docker assumes you want to re-use the existing volume and does not return an error. ## Driver specific options Some volume drivers may take options to customize the volume creation. Use the `-o` or `--opt` flags to pass driver options: $ docker volume create --driver fake --opt tardis=blue --opt timey=wimey These options are passed directly to the volume driver. Options for different volume drivers may do different things (or nothing at all). *Note*: The built-in `local` volume driver does not currently accept any options. docker-1.10.3/docs/reference/commandline/volume_inspect.md000066400000000000000000000025731267010174400236060ustar00rootroot00000000000000 # volume inspect Usage: docker volume inspect [OPTIONS] VOLUME [VOLUME...] Inspect one or more volumes -f, --format= Format the output using the given go template. --help Print usage Returns information about a volume. By default, this command renders all results in a JSON array. You can specify an alternate format to execute a given template for each result. Go's [text/template](http://golang.org/pkg/text/template/) package describes all the details of the format. Example output: $ docker volume create 85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d $ docker volume inspect 85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d [ { "Name": "85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d", "Driver": "local", "Mountpoint": "/var/lib/docker/volumes/85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d/_data" } ] $ docker volume inspect --format '{{ .Mountpoint }}' 85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d /var/lib/docker/volumes/85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d/_data docker-1.10.3/docs/reference/commandline/volume_ls.md000066400000000000000000000017431267010174400225550ustar00rootroot00000000000000 # volume ls Usage: docker volume ls [OPTIONS] List volumes -f, --filter=[] Provide filter values (i.e. 'dangling=true') --help Print usage -q, --quiet Only display volume names Lists all the volumes Docker knows about. You can filter using the `-f` or `--filter` flag. The filtering format is a `key=value` pair. To specify more than one filter, pass multiple flags (for example, `--filter "foo=bar" --filter "bif=baz"`) There is a single supported filter `dangling=value` which takes a boolean of `true` or `false`. Example output: $ docker volume create --name rose rose $docker volume create --name tyler tyler $ docker volume ls DRIVER VOLUME NAME local rose local tyler docker-1.10.3/docs/reference/commandline/volume_rm.md000066400000000000000000000006671267010174400225610ustar00rootroot00000000000000 # volume rm Usage: docker volume rm [OPTIONS] VOLUME [VOLUME...] Remove a volume --help Print usage Removes one or more volumes. You cannot remove a volume that is in use by a container. $ docker volume rm hello hello docker-1.10.3/docs/reference/commandline/wait.md000066400000000000000000000005321267010174400215070ustar00rootroot00000000000000 # wait Usage: docker wait [OPTIONS] CONTAINER [CONTAINER...] Block until a container stops, then print its exit code. --help Print usage docker-1.10.3/docs/reference/glossary.md000066400000000000000000000164571267010174400201350ustar00rootroot00000000000000 # Glossary A list of terms used around the Docker project. ## aufs aufs (advanced multi layered unification filesystem) is a Linux [filesystem](#filesystem) that Docker supports as a storage backend. It implements the [union mount](http://en.wikipedia.org/wiki/Union_mount) for Linux file systems. ## Base image An image that has no parent is a **base image**. ## boot2docker [boot2docker](http://boot2docker.io/) is a lightweight Linux distribution made specifically to run Docker containers. The boot2docker management tool for Mac and Windows was deprecated and replaced by [`docker-machine`](#machine) which you can install with the Docker Toolbox. ## btrfs btrfs (B-tree file system) is a Linux [filesystem](#filesystem) that Docker supports as a storage backend. It is a [copy-on-write](http://en.wikipedia.org/wiki/Copy-on-write) filesystem. ## build build is the process of building Docker images using a [Dockerfile](#dockerfile). The build uses a Dockerfile and a "context". The context is the set of files in the directory in which the image is built. ## cgroups cgroups is a Linux kernel feature that limits, accounts for, and isolates the resource usage (CPU, memory, disk I/O, network, etc.) of a collection of processes. Docker relies on cgroups to control and isolate resource limits. *Also known as : control groups* ## Compose [Compose](https://github.com/docker/compose) is a tool for defining and running complex applications with Docker. With compose, you define a multi-container application in a single file, then spin your application up in a single command which does everything that needs to be done to get it running. *Also known as : docker-compose, fig* ## container A container is a runtime instance of a [docker image](#image). A Docker container consists of - A Docker image - Execution environment - A standard set of instructions The concept is borrowed from Shipping Containers, which define a standard to ship goods globally. Docker defines a standard to ship software. ## data volume A data volume is a specially-designated directory within one or more containers that bypasses the Union File System. Data volumes are designed to persist data, independent of the container's life cycle. Docker therefore never automatically delete volumes when you remove a container, nor will it "garbage collect" volumes that are no longer referenced by a container. ## Docker The term Docker can refer to - The Docker project as a whole, which is a platform for developers and sysadmins to develop, ship, and run applications - The docker daemon process running on the host which manages images and containers ## Docker Hub The [Docker Hub](https://hub.docker.com/) is a centralized resource for working with Docker and its components. It provides the following services: - Docker image hosting - User authentication - Automated image builds and work-flow tools such as build triggers and web hooks - Integration with GitHub and Bitbucket ## Dockerfile A Dockerfile is a text document that contains all the commands you would normally execute manually in order to build a Docker image. Docker can build images automatically by reading the instructions from a Dockerfile. ## filesystem A file system is the method an operating system uses to name files and assign them locations for efficient storage and retrieval. Examples : - Linux : ext4, aufs, btrfs, zfs - Windows : NTFS - OS X : HFS+ ## image Docker images are the basis of [containers](#container). An Image is an ordered collection of root filesystem changes and the corresponding execution parameters for use within a container runtime. An image typically contains a union of layered filesystems stacked on top of each other. An image does not have state and it never changes. ## libcontainer libcontainer provides a native Go implementation for creating containers with namespaces, cgroups, capabilities, and filesystem access controls. It allows you to manage the lifecycle of the container performing additional operations after the container is created. ## libnetwork libnetwork provides a native Go implementation for creating and managing container network namespaces and other network resources. It manage the networking lifecycle of the container performing additional operations after the container is created. ## link links provide a legacy interface to connect Docker containers running on the same host to each other without exposing the hosts' network ports. Use the Docker networks feature instead. ## Machine [Machine](https://github.com/docker/machine) is a Docker tool which makes it really easy to create Docker hosts on your computer, on cloud providers and inside your own data center. It creates servers, installs Docker on them, then configures the Docker client to talk to them. *Also known as : docker-machine* ## overlay network driver Overlay network driver provides out of the box multi-host network connectivity for docker containers in a cluster. ## overlay storage driver OverlayFS is a [filesystem](#filesystem) service for Linux which implements a [union mount](http://en.wikipedia.org/wiki/Union_mount) for other file systems. It is supported by the Docker daemon as a storage driver. ## registry A Registry is a hosted service containing [repositories](#repository) of [images](#image) which responds to the Registry API. The default registry can be accessed using a browser at [Docker Hub](#docker-hub) or using the `docker search` command. ## repository A repository is a set of Docker images. A repository can be shared by pushing it to a [registry](#registry) server. The different images in the repository can be labeled using [tags](#tag). Here is an example of the shared [nginx repository](https://registry.hub.docker.com/_/nginx/) and its [tags](https://registry.hub.docker.com/_/nginx/tags/manage/) ## Swarm [Swarm](https://github.com/docker/swarm) is a native clustering tool for Docker. Swarm pools together several Docker hosts and exposes them as a single virtual Docker host. It serves the standard Docker API, so any tool that already works with Docker can now transparently scale up to multiple hosts. *Also known as : docker-swarm* ## tag A tag is a label applied to a Docker image in a [repository](#repository). tags are how various images in a repository are distinguished from each other. *Note : This label is not related to the key=value labels set for docker daemon* ## Toolbox Docker Toolbox is the installer for Mac and Windows users. ## Union file system Union file systems, or UnionFS, are file systems that operate by creating layers, making them very lightweight and fast. Docker uses union file systems to provide the building blocks for containers. ## Virtual Machine A Virtual Machine is a program that emulates a complete computer and imitates dedicated hardware. It shares physical hardware resources with other users but isolates the operating system. The end user has the same experience on a Virtual Machine as they would have on dedicated hardware. Compared to to containers, a Virtual Machine is heavier to run, provides more isolation, gets its own set of resources and does minimal sharing. *Also known as : VM* docker-1.10.3/docs/reference/index.md000066400000000000000000000005641267010174400173710ustar00rootroot00000000000000 # Engine reference * [Dockerfile reference](builder.md) * [Docker run reference](run.md) * [Command line reference](commandline/index.md) * [API Reference](api/index.md) docker-1.10.3/docs/reference/run.md000066400000000000000000002023161267010174400170650ustar00rootroot00000000000000 # Docker run reference Docker runs processes in isolated containers. A container is a process which runs on a host. The host may be local or remote. When an operator executes `docker run`, the container process that runs is isolated in that it has its own file system, its own networking, and its own isolated process tree separate from the host. This page details how to use the `docker run` command to define the container's resources at runtime. ## General form The basic `docker run` command takes this form: $ docker run [OPTIONS] IMAGE[:TAG|@DIGEST] [COMMAND] [ARG...] The `docker run` command must specify an [*IMAGE*](glossary.md#image) to derive the container from. An image developer can define image defaults related to: * detached or foreground running * container identification * network settings * runtime constraints on CPU and memory With the `docker run [OPTIONS]` an operator can add to or override the image defaults set by a developer. And, additionally, operators can override nearly all the defaults set by the Docker runtime itself. The operator's ability to override image and Docker runtime defaults is why [*run*](commandline/run.md) has more options than any other `docker` command. To learn how to interpret the types of `[OPTIONS]`, see [*Option types*](commandline/cli.md#option-types). > **Note**: Depending on your Docker system configuration, you may be > required to preface the `docker run` command with `sudo`. To avoid > having to use `sudo` with the `docker` command, your system > administrator can create a Unix group called `docker` and add users to > it. For more information about this configuration, refer to the Docker > installation documentation for your operating system. ## Operator exclusive options Only the operator (the person executing `docker run`) can set the following options. - [Detached vs foreground](#detached-vs-foreground) - [Detached (-d)](#detached-d) - [Foreground](#foreground) - [Container identification](#container-identification) - [Name (--name)](#name-name) - [PID equivalent](#pid-equivalent) - [IPC settings (--ipc)](#ipc-settings-ipc) - [Network settings](#network-settings) - [Restart policies (--restart)](#restart-policies-restart) - [Clean up (--rm)](#clean-up-rm) - [Runtime constraints on resources](#runtime-constraints-on-resources) - [Runtime privilege and Linux capabilities](#runtime-privilege-and-linux-capabilities) ## Detached vs foreground When starting a Docker container, you must first decide if you want to run the container in the background in a "detached" mode or in the default foreground mode: -d=false: Detached mode: Run container in the background, print new container id ### Detached (-d) To start a container in detached mode, you use `-d=true` or just `-d` option. By design, containers started in detached mode exit when the root process used to run the container exits. A container in detached mode cannot be automatically removed when it stops, this means you cannot use the `--rm` option with `-d` option. Do not pass a `service x start` command to a detached container. For example, this command attempts to start the `nginx` service. $ docker run -d -p 80:80 my_image service nginx start This succeeds in starting the `nginx` service inside the container. However, it fails the detached container paradigm in that, the root process (`service nginx start`) returns and the detached container stops as designed. As a result, the `nginx` service is started but could not be used. Instead, to start a process such as the `nginx` web server do the following: $ docker run -d -p 80:80 my_image nginx -g 'daemon off;' To do input/output with a detached container use network connections or shared volumes. These are required because the container is no longer listening to the command line where `docker run` was run. To reattach to a detached container, use `docker` [*attach*](commandline/attach.md) command. ### Foreground In foreground mode (the default when `-d` is not specified), `docker run` can start the process in the container and attach the console to the process's standard input, output, and standard error. It can even pretend to be a TTY (this is what most command line executables expect) and pass along signals. All of that is configurable: -a=[] : Attach to `STDIN`, `STDOUT` and/or `STDERR` -t : Allocate a pseudo-tty --sig-proxy=true: Proxy all received signals to the process (non-TTY mode only) -i : Keep STDIN open even if not attached If you do not specify `-a` then Docker will [attach all standard streams]( https://github.com/docker/docker/blob/75a7f4d90cde0295bcfb7213004abce8d4779b75/commands.go#L1797). You can specify to which of the three standard streams (`STDIN`, `STDOUT`, `STDERR`) you'd like to connect instead, as in: $ docker run -a stdin -a stdout -i -t ubuntu /bin/bash For interactive processes (like a shell), you must use `-i -t` together in order to allocate a tty for the container process. `-i -t` is often written `-it` as you'll see in later examples. Specifying `-t` is forbidden when the client standard output is redirected or piped, such as in: $ echo test | docker run -i busybox cat >**Note**: A process running as PID 1 inside a container is treated >specially by Linux: it ignores any signal with the default action. >So, the process will not terminate on `SIGINT` or `SIGTERM` unless it is >coded to do so. ## Container identification ### Name (--name) The operator can identify a container in three ways: | Identifier type | Example value | | --------------------- | ------------------------------------------------------------------ | | UUID long identifier | "f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778" | | UUID short identifier | "f78375b1c487" | | Name | "evil_ptolemy" | The UUID identifiers come from the Docker daemon. If you do not assign a container name with the `--name` option, then the daemon generates a random string name for you. Defining a `name` can be a handy way to add meaning to a container. If you specify a `name`, you can use it when referencing the container within a Docker network. This works for both background and foreground Docker containers. > **Note**: Containers on the default bridge network must be linked to > communicate by name. ### PID equivalent Finally, to help with automation, you can have Docker write the container ID out to a file of your choosing. This is similar to how some programs might write out their process ID to a file (you've seen them as PID files): --cidfile="": Write the container ID to the file ### Image[:tag] While not strictly a means of identifying a container, you can specify a version of an image you'd like to run the container with by adding `image[:tag]` to the command. For example, `docker run ubuntu:14.04`. ### Image[@digest] Images using the v2 or later image format have a content-addressable identifier called a digest. As long as the input used to generate the image is unchanged, the digest value is predictable and referenceable. ## PID settings (--pid) --pid="" : Set the PID (Process) Namespace mode for the container, 'host': use the host's PID namespace inside the container By default, all containers have the PID namespace enabled. PID namespace provides separation of processes. The PID Namespace removes the view of the system processes, and allows process ids to be reused including pid 1. In certain cases you want your container to share the host's process namespace, basically allowing processes within the container to see all of the processes on the system. For example, you could build a container with debugging tools like `strace` or `gdb`, but want to use these tools when debugging processes within the container. ### Example: run htop inside a container Create this Dockerfile: ``` FROM alpine:latest RUN apk add --update htop && rm -rf /var/cache/apk/* CMD ["htop"] ``` Build the Dockerfile and tag the image as `myhtop`: ```bash $ docker build -t myhtop . ``` Use the following command to run `htop` inside a container: ``` $ docker run -it --rm --pid=host myhtop ``` ## UTS settings (--uts) --uts="" : Set the UTS namespace mode for the container, 'host': use the host's UTS namespace inside the container The UTS namespace is for setting the hostname and the domain that is visible to running processes in that namespace. By default, all containers, including those with `--net=host`, have their own UTS namespace. The `host` setting will result in the container using the same UTS namespace as the host. You may wish to share the UTS namespace with the host if you would like the hostname of the container to change as the hostname of the host changes. A more advanced use case would be changing the host's hostname from a container. > **Note**: `--uts="host"` gives the container full access to change the > hostname of the host and is therefore considered insecure. ## IPC settings (--ipc) --ipc="" : Set the IPC mode for the container, 'container:': reuses another container's IPC namespace 'host': use the host's IPC namespace inside the container By default, all containers have the IPC namespace enabled. IPC (POSIX/SysV IPC) namespace provides separation of named shared memory segments, semaphores and message queues. Shared memory segments are used to accelerate inter-process communication at memory speed, rather than through pipes or through the network stack. Shared memory is commonly used by databases and custom-built (typically C/OpenMPI, C++/using boost libraries) high performance applications for scientific computing and financial services industries. If these types of applications are broken into multiple containers, you might need to share the IPC mechanisms of the containers. ## Network settings --dns=[] : Set custom dns servers for the container --net="bridge" : Connect a container to a network 'bridge': create a network stack on the default Docker bridge 'none': no networking 'container:': reuse another container's network stack 'host': use the Docker host network stack '|': connect to a user-defined network --net-alias=[] : Add network-scoped alias for the container --add-host="" : Add a line to /etc/hosts (host:IP) --mac-address="" : Sets the container's Ethernet device's MAC address --ip="" : Sets the container's Ethernet device's IPv4 address --ip6="" : Sets the container's Ethernet device's IPv6 address By default, all containers have networking enabled and they can make any outgoing connections. The operator can completely disable networking with `docker run --net none` which disables all incoming and outgoing networking. In cases like this, you would perform I/O through files or `STDIN` and `STDOUT` only. Publishing ports and linking to other containers only works with the the default (bridge). The linking feature is a legacy feature. You should always prefer using Docker network drivers over linking. Your container will use the same DNS servers as the host by default, but you can override this with `--dns`. By default, the MAC address is generated using the IP address allocated to the container. You can set the container's MAC address explicitly by providing a MAC address via the `--mac-address` parameter (format:`12:34:56:78:9a:bc`). Supported networks :
Network Description
none No networking in the container.
bridge (default) Connect the container to the bridge via veth interfaces.
host Use the host's network stack inside the container.
container:<name|id> Use the network stack of another container, specified via its *name* or *id*.
NETWORK Connects the container to a user created network (using `docker network create` command)
#### Network: none With the network is `none` a container will not have access to any external routes. The container will still have a `loopback` interface enabled in the container but it does not have any routes to external traffic. #### Network: bridge With the network set to `bridge` a container will use docker's default networking setup. A bridge is setup on the host, commonly named `docker0`, and a pair of `veth` interfaces will be created for the container. One side of the `veth` pair will remain on the host attached to the bridge while the other side of the pair will be placed inside the container's namespaces in addition to the `loopback` interface. An IP address will be allocated for containers on the bridge's network and traffic will be routed though this bridge to the container. Containers can communicate via their IP addresses by default. To communicate by name, they must be linked. #### Network: host With the network set to `host` a container will share the host's network stack and all interfaces from the host will be available to the container. The container's hostname will match the hostname on the host system. Note that `--add-host` `--hostname` `--dns` `--dns-search` `--dns-opt` and `--mac-address` are invalid in `host` netmode. Compared to the default `bridge` mode, the `host` mode gives *significantly* better networking performance since it uses the host's native networking stack whereas the bridge has to go through one level of virtualization through the docker daemon. It is recommended to run containers in this mode when their networking performance is critical, for example, a production Load Balancer or a High Performance Web Server. > **Note**: `--net="host"` gives the container full access to local system > services such as D-bus and is therefore considered insecure. #### Network: container With the network set to `container` a container will share the network stack of another container. The other container's name must be provided in the format of `--net container:`. Note that `--add-host` `--hostname` `--dns` `--dns-search` `--dns-opt` and `--mac-address` are invalid in `container` netmode, and `--publish` `--publish-all` `--expose` are also invalid in `container` netmode. Example running a Redis container with Redis binding to `localhost` then running the `redis-cli` command and connecting to the Redis server over the `localhost` interface. $ docker run -d --name redis example/redis --bind 127.0.0.1 $ # use the redis container's network stack to access localhost $ docker run --rm -it --net container:redis example/redis-cli -h 127.0.0.1 #### User-defined network You can create a network using a Docker network driver or an external network driver plugin. You can connect multiple containers to the same network. Once connected to a user-defined network, the containers can communicate easily using only another container's IP address or name. For `overlay` networks or custom plugins that support multi-host connectivity, containers connected to the same multi-host network but launched from different Engines can also communicate in this way. The following example creates a network using the built-in `bridge` network driver and running a container in the created network ``` $ docker network create -d bridge my-net $ docker run --net=my-net -itd --name=container3 busybox ``` ### Managing /etc/hosts Your container will have lines in `/etc/hosts` which define the hostname of the container itself as well as `localhost` and a few other common things. The `--add-host` flag can be used to add additional lines to `/etc/hosts`. $ docker run -it --add-host db-static:86.75.30.9 ubuntu cat /etc/hosts 172.17.0.22 09d03f76bf2c fe00::0 ip6-localnet ff00::0 ip6-mcastprefix ff02::1 ip6-allnodes ff02::2 ip6-allrouters 127.0.0.1 localhost ::1 localhost ip6-localhost ip6-loopback 86.75.30.9 db-static If a container is connected to the default bridge network and `linked` with other containers, then the container's `/etc/hosts` file is updated with the linked container's name. If the container is connected to user-defined network, the container's `/etc/hosts` file is updated with names of all other containers in that user-defined network. > **Note** Since Docker may live update the container’s `/etc/hosts` file, there may be situations when processes inside the container can end up reading an empty or incomplete `/etc/hosts` file. In most cases, retrying the read again should fix the problem. ## Restart policies (--restart) Using the `--restart` flag on Docker run you can specify a restart policy for how a container should or should not be restarted on exit. When a restart policy is active on a container, it will be shown as either `Up` or `Restarting` in [`docker ps`](commandline/ps.md). It can also be useful to use [`docker events`](commandline/events.md) to see the restart policy in effect. Docker supports the following restart policies:
Policy Result
no Do not automatically restart the container when it exits. This is the default.
on-failure[:max-retries] Restart only if the container exits with a non-zero exit status. Optionally, limit the number of restart retries the Docker daemon attempts.
always Always restart the container regardless of the exit status. When you specify always, the Docker daemon will try to restart the container indefinitely. The container will also always start on daemon startup, regardless of the current state of the container.
unless-stopped Always restart the container regardless of the exit status, but do not start it on daemon startup if the container has been put to a stopped state before.
An ever increasing delay (double the previous delay, starting at 100 milliseconds) is added before each restart to prevent flooding the server. This means the daemon will wait for 100 ms, then 200 ms, 400, 800, 1600, and so on until either the `on-failure` limit is hit, or when you `docker stop` or `docker rm -f` the container. If a container is successfully restarted (the container is started and runs for at least 10 seconds), the delay is reset to its default value of 100 ms. You can specify the maximum amount of times Docker will try to restart the container when using the **on-failure** policy. The default is that Docker will try forever to restart the container. The number of (attempted) restarts for a container can be obtained via [`docker inspect`](commandline/inspect.md). For example, to get the number of restarts for container "my-container"; $ docker inspect -f "{{ .RestartCount }}" my-container # 2 Or, to get the last time the container was (re)started; $ docker inspect -f "{{ .State.StartedAt }}" my-container # 2015-03-04T23:47:07.691840179Z Combining `--restart` (restart policy) with the `--rm` (clean up) flag results in an error. On container restart, attached clients are disconnected. See the examples on using the [`--rm` (clean up)](#clean-up-rm) flag later in this page. ### Examples $ docker run --restart=always redis This will run the `redis` container with a restart policy of **always** so that if the container exits, Docker will restart it. $ docker run --restart=on-failure:10 redis This will run the `redis` container with a restart policy of **on-failure** and a maximum restart count of 10. If the `redis` container exits with a non-zero exit status more than 10 times in a row Docker will abort trying to restart the container. Providing a maximum restart limit is only valid for the **on-failure** policy. ## Exit Status The exit code from `docker run` gives information about why the container failed to run or why it exited. When `docker run` exits with a non-zero code, the exit codes follow the `chroot` standard, see below: **_125_** if the error is with Docker daemon **_itself_** $ docker run --foo busybox; echo $? # flag provided but not defined: --foo See 'docker run --help'. 125 **_126_** if the **_contained command_** cannot be invoked $ docker run busybox /etc; echo $? # exec: "/etc": permission denied docker: Error response from daemon: Contained command could not be invoked 126 **_127_** if the **_contained command_** cannot be found $ docker run busybox foo; echo $? # exec: "foo": executable file not found in $PATH docker: Error response from daemon: Contained command not found or does not exist 127 **_Exit code_** of **_contained command_** otherwise $ docker run busybox /bin/sh -c 'exit 3' # 3 ## Clean up (--rm) By default a container's file system persists even after the container exits. This makes debugging a lot easier (since you can inspect the final state) and you retain all your data by default. But if you are running short-term **foreground** processes, these container file systems can really pile up. If instead you'd like Docker to **automatically clean up the container and remove the file system when the container exits**, you can add the `--rm` flag: --rm=false: Automatically remove the container when it exits (incompatible with -d) > **Note**: When you set the `--rm` flag, Docker also removes the volumes associated with the container when the container is removed. This is similar to running `docker rm -v my-container`. Only volumes that are specified without a name are removed. For example, with `docker run --rm -v /foo -v awesome:/bar busybox top`, the volume for `/foo` will be removed, but the volume for `/bar` will not. Volumes inheritted via `--volumes-from` will be removed with the same logic -- if the original volume was specified with a name it will **not** be removed. ## Security configuration --security-opt="label:user:USER" : Set the label user for the container --security-opt="label:role:ROLE" : Set the label role for the container --security-opt="label:type:TYPE" : Set the label type for the container --security-opt="label:level:LEVEL" : Set the label level for the container --security-opt="label:disable" : Turn off label confinement for the container --security-opt="apparmor:PROFILE" : Set the apparmor profile to be applied to the container You can override the default labeling scheme for each container by specifying the `--security-opt` flag. For example, you can specify the MCS/MLS level, a requirement for MLS systems. Specifying the level in the following command allows you to share the same content between containers. $ docker run --security-opt label:level:s0:c100,c200 -it fedora bash An MLS example might be: $ docker run --security-opt label:level:TopSecret -it rhel7 bash To disable the security labeling for this container versus running with the `--permissive` flag, use the following command: $ docker run --security-opt label:disable -it fedora bash If you want a tighter security policy on the processes within a container, you can specify an alternate type for the container. You could run a container that is only allowed to listen on Apache ports by executing the following command: $ docker run --security-opt label:type:svirt_apache_t -it centos bash > **Note**: You would have to write policy defining a `svirt_apache_t` type. ## Specifying custom cgroups Using the `--cgroup-parent` flag, you can pass a specific cgroup to run a container in. This allows you to create and manage cgroups on their own. You can define custom resources for those cgroups and put containers under a common parent group. ## Runtime constraints on resources The operator can also adjust the performance parameters of the container: | Option | Description | | -------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | | `-m`, `--memory=""` | Memory limit (format: `[]`). Number is a positive integer. Unit can be one of `b`, `k`, `m`, or `g`. Minimum is 4M. | | `--memory-swap=""` | Total memory limit (memory + swap, format: `[]`). Number is a positive integer. Unit can be one of `b`, `k`, `m`, or `g`. | | `--memory-reservation=""` | Memory soft limit (format: `[]`). Number is a positive integer. Unit can be one of `b`, `k`, `m`, or `g`. | | `--kernel-memory=""` | Kernel memory limit (format: `[]`). Number is a positive integer. Unit can be one of `b`, `k`, `m`, or `g`. Minimum is 4M. | | `-c`, `--cpu-shares=0` | CPU shares (relative weight) | | `--cpu-period=0` | Limit the CPU CFS (Completely Fair Scheduler) period | | `--cpuset-cpus=""` | CPUs in which to allow execution (0-3, 0,1) | | `--cpuset-mems=""` | Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. | | `--cpu-quota=0` | Limit the CPU CFS (Completely Fair Scheduler) quota | | `--blkio-weight=0` | Block IO weight (relative weight) accepts a weight value between 10 and 1000. | | `--blkio-weight-device=""` | Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`) | | `--device-read-bps=""` | Limit read rate from a device (format: `:[]`). Number is a positive integer. Unit can be one of `kb`, `mb`, or `gb`. | | `--device-write-bps=""` | Limit write rate to a device (format: `:[]`). Number is a positive integer. Unit can be one of `kb`, `mb`, or `gb`. | | `--device-read-iops="" ` | Limit read rate (IO per second) from a device (format: `:`). Number is a positive integer. | | `--device-write-iops="" ` | Limit write rate (IO per second) to a device (format: `:`). Number is a positive integer. | | `--oom-kill-disable=false` | Whether to disable OOM Killer for the container or not. | | `--memory-swappiness=""` | Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. | | `--shm-size=""` | Size of `/dev/shm`. The format is ``. `number` must be greater than `0`. Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses `64m`. | ### User memory constraints We have four ways to set user memory usage:
Option Result
memory=inf, memory-swap=inf (default) There is no memory limit for the container. The container can use as much memory as needed.
memory=L<inf, memory-swap=inf (specify memory and set memory-swap as -1) The container is not allowed to use more than L bytes of memory, but can use as much swap as is needed (if the host supports swap memory).
memory=L<inf, memory-swap=2*L (specify memory without memory-swap) The container is not allowed to use more than L bytes of memory, swap *plus* memory usage is double of that.
memory=L<inf, memory-swap=S<inf, L<=S (specify both memory and memory-swap) The container is not allowed to use more than L bytes of memory, swap *plus* memory usage is limited by S.
Examples: $ docker run -it ubuntu:14.04 /bin/bash We set nothing about memory, this means the processes in the container can use as much memory and swap memory as they need. $ docker run -it -m 300M --memory-swap -1 ubuntu:14.04 /bin/bash We set memory limit and disabled swap memory limit, this means the processes in the container can use 300M memory and as much swap memory as they need (if the host supports swap memory). $ docker run -it -m 300M ubuntu:14.04 /bin/bash We set memory limit only, this means the processes in the container can use 300M memory and 300M swap memory, by default, the total virtual memory size (--memory-swap) will be set as double of memory, in this case, memory + swap would be 2*300M, so processes can use 300M swap memory as well. $ docker run -it -m 300M --memory-swap 1G ubuntu:14.04 /bin/bash We set both memory and swap memory, so the processes in the container can use 300M memory and 700M swap memory. Memory reservation is a kind of memory soft limit that allows for greater sharing of memory. Under normal circumstances, containers can use as much of the memory as needed and are constrained only by the hard limits set with the `-m`/`--memory` option. When memory reservation is set, Docker detects memory contention or low memory and forces containers to restrict their consumption to a reservation limit. Always set the memory reservation value below the hard limit, otherwise the hard limit takes precedence. A reservation of 0 is the same as setting no reservation. By default (without reservation set), memory reservation is the same as the hard memory limit. Memory reservation is a soft-limit feature and does not guarantee the limit won't be exceeded. Instead, the feature attempts to ensure that, when memory is heavily contended for, memory is allocated based on the reservation hints/setup. The following example limits the memory (`-m`) to 500M and sets the memory reservation to 200M. ```bash $ docker run -it -m 500M --memory-reservation 200M ubuntu:14.04 /bin/bash ``` Under this configuration, when the container consumes memory more than 200M and less than 500M, the next system memory reclaim attempts to shrink container memory below 200M. The following example set memory reservation to 1G without a hard memory limit. ```bash $ docker run -it --memory-reservation 1G ubuntu:14.04 /bin/bash ``` The container can use as much memory as it needs. The memory reservation setting ensures the container doesn't consume too much memory for long time, because every memory reclaim shrinks the container's consumption to the reservation. By default, kernel kills processes in a container if an out-of-memory (OOM) error occurs. To change this behaviour, use the `--oom-kill-disable` option. Only disable the OOM killer on containers where you have also set the `-m/--memory` option. If the `-m` flag is not set, this can result in the host running out of memory and require killing the host's system processes to free memory. The following example limits the memory to 100M and disables the OOM killer for this container: $ docker run -it -m 100M --oom-kill-disable ubuntu:14.04 /bin/bash The following example, illustrates a dangerous way to use the flag: $ docker run -it --oom-kill-disable ubuntu:14.04 /bin/bash The container has unlimited memory which can cause the host to run out memory and require killing system processes to free memory. ### Kernel memory constraints Kernel memory is fundamentally different than user memory as kernel memory can't be swapped out. The inability to swap makes it possible for the container to block system services by consuming too much kernel memory. Kernel memory includes: - stack pages - slab pages - sockets memory pressure - tcp memory pressure You can setup kernel memory limit to constrain these kinds of memory. For example, every process consumes some stack pages. By limiting kernel memory, you can prevent new processes from being created when the kernel memory usage is too high. Kernel memory is never completely independent of user memory. Instead, you limit kernel memory in the context of the user memory limit. Assume "U" is the user memory limit and "K" the kernel limit. There are three possible ways to set limits:
Option Result
U != 0, K = inf (default) This is the standard memory limitation mechanism already present before using kernel memory. Kernel memory is completely ignored.
U != 0, K < U Kernel memory is a subset of the user memory. This setup is useful in deployments where the total amount of memory per-cgroup is overcommitted. Overcommitting kernel memory limits is definitely not recommended, since the box can still run out of non-reclaimable memory. In this case, the you can configure K so that the sum of all groups is never greater than the total memory. Then, freely set U at the expense of the system's service quality.
U != 0, K > U Since kernel memory charges are also fed to the user counter and reclamation is triggered for the container for both kinds of memory. This configuration gives the admin a unified view of memory. It is also useful for people who just want to track kernel memory usage.
Examples: $ docker run -it -m 500M --kernel-memory 50M ubuntu:14.04 /bin/bash We set memory and kernel memory, so the processes in the container can use 500M memory in total, in this 500M memory, it can be 50M kernel memory tops. $ docker run -it --kernel-memory 50M ubuntu:14.04 /bin/bash We set kernel memory without **-m**, so the processes in the container can use as much memory as they want, but they can only use 50M kernel memory. ### Swappiness constraint By default, a container's kernel can swap out a percentage of anonymous pages. To set this percentage for a container, specify a `--memory-swappiness` value between 0 and 100. A value of 0 turns off anonymous page swapping. A value of 100 sets all anonymous pages as swappable. By default, if you are not using `--memory-swappiness`, memory swappiness value will be inherited from the parent. For example, you can set: $ docker run -it --memory-swappiness=0 ubuntu:14.04 /bin/bash Setting the `--memory-swappiness` option is helpful when you want to retain the container's working set and to avoid swapping performance penalties. ### CPU share constraint By default, all containers get the same proportion of CPU cycles. This proportion can be modified by changing the container's CPU share weighting relative to the weighting of all other running containers. To modify the proportion from the default of 1024, use the `-c` or `--cpu-shares` flag to set the weighting to 2 or higher. If 0 is set, the system will ignore the value and use the default of 1024. The proportion will only apply when CPU-intensive processes are running. When tasks in one container are idle, other containers can use the left-over CPU time. The actual amount of CPU time will vary depending on the number of containers running on the system. For example, consider three containers, one has a cpu-share of 1024 and two others have a cpu-share setting of 512. When processes in all three containers attempt to use 100% of CPU, the first container would receive 50% of the total CPU time. If you add a fourth container with a cpu-share of 1024, the first container only gets 33% of the CPU. The remaining containers receive 16.5%, 16.5% and 33% of the CPU. On a multi-core system, the shares of CPU time are distributed over all CPU cores. Even if a container is limited to less than 100% of CPU time, it can use 100% of each individual CPU core. For example, consider a system with more than three cores. If you start one container `{C0}` with `-c=512` running one process, and another container `{C1}` with `-c=1024` running two processes, this can result in the following division of CPU shares: PID container CPU CPU share 100 {C0} 0 100% of CPU0 101 {C1} 1 100% of CPU1 102 {C1} 2 100% of CPU2 ### CPU period constraint The default CPU CFS (Completely Fair Scheduler) period is 100ms. We can use `--cpu-period` to set the period of CPUs to limit the container's CPU usage. And usually `--cpu-period` should work with `--cpu-quota`. Examples: $ docker run -it --cpu-period=50000 --cpu-quota=25000 ubuntu:14.04 /bin/bash If there is 1 CPU, this means the container can get 50% CPU worth of run-time every 50ms. For more information, see the [CFS documentation on bandwidth limiting](https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt). ### Cpuset constraint We can set cpus in which to allow execution for containers. Examples: $ docker run -it --cpuset-cpus="1,3" ubuntu:14.04 /bin/bash This means processes in container can be executed on cpu 1 and cpu 3. $ docker run -it --cpuset-cpus="0-2" ubuntu:14.04 /bin/bash This means processes in container can be executed on cpu 0, cpu 1 and cpu 2. We can set mems in which to allow execution for containers. Only effective on NUMA systems. Examples: $ docker run -it --cpuset-mems="1,3" ubuntu:14.04 /bin/bash This example restricts the processes in the container to only use memory from memory nodes 1 and 3. $ docker run -it --cpuset-mems="0-2" ubuntu:14.04 /bin/bash This example restricts the processes in the container to only use memory from memory nodes 0, 1 and 2. ### CPU quota constraint The `--cpu-quota` flag limits the container's CPU usage. The default 0 value allows the container to take 100% of a CPU resource (1 CPU). The CFS (Completely Fair Scheduler) handles resource allocation for executing processes and is default Linux Scheduler used by the kernel. Set this value to 50000 to limit the container to 50% of a CPU resource. For multiple CPUs, adjust the `--cpu-quota` as necessary. For more information, see the [CFS documentation on bandwidth limiting](https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt). ### Block IO bandwidth (Blkio) constraint By default, all containers get the same proportion of block IO bandwidth (blkio). This proportion is 500. To modify this proportion, change the container's blkio weight relative to the weighting of all other running containers using the `--blkio-weight` flag. > **Note:** The blkio weight setting is only available for direct IO. Buffered IO > is not currently supported. The `--blkio-weight` flag can set the weighting to a value between 10 to 1000. For example, the commands below create two containers with different blkio weight: $ docker run -it --name c1 --blkio-weight 300 ubuntu:14.04 /bin/bash $ docker run -it --name c2 --blkio-weight 600 ubuntu:14.04 /bin/bash If you do block IO in the two containers at the same time, by, for example: $ time dd if=/mnt/zerofile of=test.out bs=1M count=1024 oflag=direct You'll find that the proportion of time is the same as the proportion of blkio weights of the two containers. The `--blkio-weight-device="DEVICE_NAME:WEIGHT"` flag sets a specific device weight. The `DEVICE_NAME:WEIGHT` is a string containing a colon-separated device name and weight. For example, to set `/dev/sda` device weight to `200`: $ docker run -it \ --blkio-weight-device "/dev/sda:200" \ ubuntu If you specify both the `--blkio-weight` and `--blkio-weight-device`, Docker uses the `--blkio-weight` as the default weight and uses `--blkio-weight-device` to override this default with a new value on a specific device. The following example uses a default weight of `300` and overrides this default on `/dev/sda` setting that weight to `200`: $ docker run -it \ --blkio-weight 300 \ --blkio-weight-device "/dev/sda:200" \ ubuntu The `--device-read-bps` flag limits the read rate (bytes per second) from a device. For example, this command creates a container and limits the read rate to `1mb` per second from `/dev/sda`: $ docker run -it --device-read-bps /dev/sda:1mb ubuntu The `--device-write-bps` flag limits the write rate (bytes per second)to a device. For example, this command creates a container and limits the write rate to `1mb` per second for `/dev/sda`: $ docker run -it --device-write-bps /dev/sda:1mb ubuntu Both flags take limits in the `:[unit]` format. Both read and write rates must be a positive integer. You can specify the rate in `kb` (kilobytes), `mb` (megabytes), or `gb` (gigabytes). The `--device-read-iops` flag limits read rate (IO per second) from a device. For example, this command creates a container and limits the read rate to `1000` IO per second from `/dev/sda`: $ docker run -ti --device-read-iops /dev/sda:1000 ubuntu The `--device-write-iops` flag limits write rate (IO per second) to a device. For example, this command creates a container and limits the write rate to `1000` IO per second to `/dev/sda`: $ docker run -ti --device-write-iops /dev/sda:1000 ubuntu Both flags take limits in the `:` format. Both read and write rates must be a positive integer. ## Additional groups --group-add: Add Linux capabilities By default, the docker container process runs with the supplementary groups looked up for the specified user. If one wants to add more to that list of groups, then one can use this flag: $ docker run -it --rm --group-add audio --group-add dbus --group-add 777 busybox id uid=0(root) gid=0(root) groups=10(wheel),29(audio),81(dbus),777 ## Runtime privilege and Linux capabilities --cap-add: Add Linux capabilities --cap-drop: Drop Linux capabilities --privileged=false: Give extended privileges to this container --device=[]: Allows you to run devices inside the container without the --privileged flag. By default, Docker containers are "unprivileged" and cannot, for example, run a Docker daemon inside a Docker container. This is because by default a container is not allowed to access any devices, but a "privileged" container is given access to all devices (see the documentation on [cgroups devices](https://www.kernel.org/doc/Documentation/cgroups/devices.txt)). When the operator executes `docker run --privileged`, Docker will enable to access to all devices on the host as well as set some configuration in AppArmor or SELinux to allow the container nearly all the same access to the host as processes running outside containers on the host. Additional information about running with `--privileged` is available on the [Docker Blog](http://blog.docker.com/2013/09/docker-can-now-run-within-docker/). If you want to limit access to a specific device or devices you can use the `--device` flag. It allows you to specify one or more devices that will be accessible within the container. $ docker run --device=/dev/snd:/dev/snd ... By default, the container will be able to `read`, `write`, and `mknod` these devices. This can be overridden using a third `:rwm` set of options to each `--device` flag: $ docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk /dev/xvdc Command (m for help): q $ docker run --device=/dev/sda:/dev/xvdc:r --rm -it ubuntu fdisk /dev/xvdc You will not be able to write the partition table. Command (m for help): q $ docker run --device=/dev/sda:/dev/xvdc:w --rm -it ubuntu fdisk /dev/xvdc crash.... $ docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk /dev/xvdc fdisk: unable to open /dev/xvdc: Operation not permitted In addition to `--privileged`, the operator can have fine grain control over the capabilities using `--cap-add` and `--cap-drop`. By default, Docker has a default list of capabilities that are kept. The following table lists the Linux capability options which can be added or dropped. | Capability Key | Capability Description | | ---------------- | ----------------------------------------------------------------------------------------------------------------------------- | | SETPCAP | Modify process capabilities. | | SYS_MODULE | Load and unload kernel modules. | | SYS_RAWIO | Perform I/O port operations (iopl(2) and ioperm(2)). | | SYS_PACCT | Use acct(2), switch process accounting on or off. | | SYS_ADMIN | Perform a range of system administration operations. | | SYS_NICE | Raise process nice value (nice(2), setpriority(2)) and change the nice value for arbitrary processes. | | SYS_RESOURCE | Override resource Limits. | | SYS_TIME | Set system clock (settimeofday(2), stime(2), adjtimex(2)); set real-time (hardware) clock. | | SYS_TTY_CONFIG | Use vhangup(2); employ various privileged ioctl(2) operations on virtual terminals. | | MKNOD | Create special files using mknod(2). | | AUDIT_WRITE | Write records to kernel auditing log. | | AUDIT_CONTROL | Enable and disable kernel auditing; change auditing filter rules; retrieve auditing status and filtering rules. | | MAC_OVERRIDE | Allow MAC configuration or state changes. Implemented for the Smack LSM. | | MAC_ADMIN | Override Mandatory Access Control (MAC). Implemented for the Smack Linux Security Module (LSM). | | NET_ADMIN | Perform various network-related operations. | | SYSLOG | Perform privileged syslog(2) operations. | | CHOWN | Make arbitrary changes to file UIDs and GIDs (see chown(2)). | | NET_RAW | Use RAW and PACKET sockets. | | DAC_OVERRIDE | Bypass file read, write, and execute permission checks. | | FOWNER | Bypass permission checks on operations that normally require the file system UID of the process to match the UID of the file. | | DAC_READ_SEARCH | Bypass file read permission checks and directory read and execute permission checks. | | FSETID | Don't clear set-user-ID and set-group-ID permission bits when a file is modified. | | KILL | Bypass permission checks for sending signals. | | SETGID | Make arbitrary manipulations of process GIDs and supplementary GID list. | | SETUID | Make arbitrary manipulations of process UIDs. | | LINUX_IMMUTABLE | Set the FS_APPEND_FL and FS_IMMUTABLE_FL i-node flags. | | NET_BIND_SERVICE | Bind a socket to internet domain privileged ports (port numbers less than 1024). | | NET_BROADCAST | Make socket broadcasts, and listen to multicasts. | | IPC_LOCK | Lock memory (mlock(2), mlockall(2), mmap(2), shmctl(2)). | | IPC_OWNER | Bypass permission checks for operations on System V IPC objects. | | SYS_CHROOT | Use chroot(2), change root directory. | | SYS_PTRACE | Trace arbitrary processes using ptrace(2). | | SYS_BOOT | Use reboot(2) and kexec_load(2), reboot and load a new kernel for later execution. | | LEASE | Establish leases on arbitrary files (see fcntl(2)). | | SETFCAP | Set file capabilities. | | WAKE_ALARM | Trigger something that will wake up the system. | | BLOCK_SUSPEND | Employ features that can block system suspend. Further reference information is available on the [capabilities(7) - Linux man page](http://linux.die.net/man/7/capabilities) Both flags support the value `ALL`, so if the operator wants to have all capabilities but `MKNOD` they could use: $ docker run --cap-add=ALL --cap-drop=MKNOD ... For interacting with the network stack, instead of using `--privileged` they should use `--cap-add=NET_ADMIN` to modify the network interfaces. $ docker run -it --rm ubuntu:14.04 ip link add dummy0 type dummy RTNETLINK answers: Operation not permitted $ docker run -it --rm --cap-add=NET_ADMIN ubuntu:14.04 ip link add dummy0 type dummy To mount a FUSE based filesystem, you need to combine both `--cap-add` and `--device`: $ docker run --rm -it --cap-add SYS_ADMIN sshfs sshfs sven@10.10.10.20:/home/sven /mnt fuse: failed to open /dev/fuse: Operation not permitted $ docker run --rm -it --device /dev/fuse sshfs sshfs sven@10.10.10.20:/home/sven /mnt fusermount: mount failed: Operation not permitted $ docker run --rm -it --cap-add SYS_ADMIN --device /dev/fuse sshfs # sshfs sven@10.10.10.20:/home/sven /mnt The authenticity of host '10.10.10.20 (10.10.10.20)' can't be established. ECDSA key fingerprint is 25:34:85:75:25:b0:17:46:05:19:04:93:b5:dd:5f:c6. Are you sure you want to continue connecting (yes/no)? yes sven@10.10.10.20's password: root@30aa0cfaf1b5:/# ls -la /mnt/src/docker total 1516 drwxrwxr-x 1 1000 1000 4096 Dec 4 06:08 . drwxrwxr-x 1 1000 1000 4096 Dec 4 11:46 .. -rw-rw-r-- 1 1000 1000 16 Oct 8 00:09 .dockerignore -rwxrwxr-x 1 1000 1000 464 Oct 8 00:09 .drone.yml drwxrwxr-x 1 1000 1000 4096 Dec 4 06:11 .git -rw-rw-r-- 1 1000 1000 461 Dec 4 06:08 .gitignore .... ## Logging drivers (--log-driver) The container can have a different logging driver than the Docker daemon. Use the `--log-driver=VALUE` with the `docker run` command to configure the container's logging driver. The following options are supported: | Driver | Description | | ----------- | ----------------------------------------------------------------------------------------------------------------------------- | | `none` | Disables any logging for the container. `docker logs` won't be available with this driver. | | `json-file` | Default logging driver for Docker. Writes JSON messages to file. No logging options are supported for this driver. | | `syslog` | Syslog logging driver for Docker. Writes log messages to syslog. | | `journald` | Journald logging driver for Docker. Writes log messages to `journald`. | | `gelf` | Graylog Extended Log Format (GELF) logging driver for Docker. Writes log messages to a GELF endpoint likeGraylog or Logstash. | | `fluentd` | Fluentd logging driver for Docker. Writes log messages to `fluentd` (forward input). | | `awslogs` | Amazon CloudWatch Logs logging driver for Docker. Writes log messages to Amazon CloudWatch Logs | | `splunk` | Splunk logging driver for Docker. Writes log messages to `splunk` using Event Http Collector. | The `docker logs` command is available only for the `json-file` and `journald` logging drivers. For detailed information on working with logging drivers, see [Configure a logging driver](../admin/logging/overview.md). ## Overriding Dockerfile image defaults When a developer builds an image from a [*Dockerfile*](builder.md) or when she commits it, the developer can set a number of default parameters that take effect when the image starts up as a container. Four of the Dockerfile commands cannot be overridden at runtime: `FROM`, `MAINTAINER`, `RUN`, and `ADD`. Everything else has a corresponding override in `docker run`. We'll go through what the developer might have set in each Dockerfile instruction and how the operator can override that setting. - [CMD (Default Command or Options)](#cmd-default-command-or-options) - [ENTRYPOINT (Default Command to Execute at Runtime)]( #entrypoint-default-command-to-execute-at-runtime) - [EXPOSE (Incoming Ports)](#expose-incoming-ports) - [ENV (Environment Variables)](#env-environment-variables) - [VOLUME (Shared Filesystems)](#volume-shared-filesystems) - [USER](#user) - [WORKDIR](#workdir) ### CMD (default command or options) Recall the optional `COMMAND` in the Docker commandline: $ docker run [OPTIONS] IMAGE[:TAG|@DIGEST] [COMMAND] [ARG...] This command is optional because the person who created the `IMAGE` may have already provided a default `COMMAND` using the Dockerfile `CMD` instruction. As the operator (the person running a container from the image), you can override that `CMD` instruction just by specifying a new `COMMAND`. If the image also specifies an `ENTRYPOINT` then the `CMD` or `COMMAND` get appended as arguments to the `ENTRYPOINT`. ### ENTRYPOINT (default command to execute at runtime) --entrypoint="": Overwrite the default entrypoint set by the image The `ENTRYPOINT` of an image is similar to a `COMMAND` because it specifies what executable to run when the container starts, but it is (purposely) more difficult to override. The `ENTRYPOINT` gives a container its default nature or behavior, so that when you set an `ENTRYPOINT` you can run the container *as if it were that binary*, complete with default options, and you can pass in more options via the `COMMAND`. But, sometimes an operator may want to run something else inside the container, so you can override the default `ENTRYPOINT` at runtime by using a string to specify the new `ENTRYPOINT`. Here is an example of how to run a shell in a container that has been set up to automatically run something else (like `/usr/bin/redis-server`): $ docker run -it --entrypoint /bin/bash example/redis or two examples of how to pass more parameters to that ENTRYPOINT: $ docker run -it --entrypoint /bin/bash example/redis -c ls -l $ docker run -it --entrypoint /usr/bin/redis-cli example/redis --help ### EXPOSE (incoming ports) The following `run` command options work with container networking: --expose=[]: Expose a port or a range of ports inside the container. These are additional to those exposed by the `EXPOSE` instruction -P : Publish all exposed ports to the host interfaces -p=[] : Publish a container᾿s port or a range of ports to the host format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort Both hostPort and containerPort can be specified as a range of ports. When specifying ranges for both, the number of container ports in the range must match the number of host ports in the range, for example: -p 1234-1236:1234-1236/tcp When specifying a range for hostPort only, the containerPort must not be a range. In this case the container port is published somewhere within the specified hostPort range. (e.g., `-p 1234-1236:1234/tcp`) (use 'docker port' to see the actual mapping) --link="" : Add link to another container (:alias or ) With the exception of the `EXPOSE` directive, an image developer hasn't got much control over networking. The `EXPOSE` instruction defines the initial incoming ports that provide services. These ports are available to processes inside the container. An operator can use the `--expose` option to add to the exposed ports. To expose a container's internal port, an operator can start the container with the `-P` or `-p` flag. The exposed port is accessible on the host and the ports are available to any client that can reach the host. The `-P` option publishes all the ports to the host interfaces. Docker binds each exposed port to a random port on the host. The range of ports are within an *ephemeral port range* defined by `/proc/sys/net/ipv4/ip_local_port_range`. Use the `-p` flag to explicitly map a single port or range of ports. The port number inside the container (where the service listens) does not need to match the port number exposed on the outside of the container (where clients connect). For example, inside the container an HTTP service is listening on port 80 (and so the image developer specifies `EXPOSE 80` in the Dockerfile). At runtime, the port might be bound to 42800 on the host. To find the mapping between the host ports and the exposed ports, use `docker port`. If the operator uses `--link` when starting a new client container in the default bridge network, then the client container can access the exposed port via a private networking interface. If `--link` is used when starting a container in a user-defined network as described in [*Docker network overview*""](../userguide/networking/index.md)), it will provide a named alias for the container being linked to. ### ENV (environment variables) When a new container is created, Docker will set the following environment variables automatically:
Variable Value
HOME Set based on the value of USER
HOSTNAME The hostname associated with the container
PATH Includes popular directories, such as :
/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
TERM xterm if the container is allocated a pseudo-TTY
Additionally, the operator can **set any environment variable** in the container by using one or more `-e` flags, even overriding those mentioned above, or already defined by the developer with a Dockerfile `ENV`: $ docker run -e "deep=purple" --rm ubuntu /bin/bash -c export declare -x HOME="/" declare -x HOSTNAME="85bc26a0e200" declare -x OLDPWD declare -x PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" declare -x PWD="/" declare -x SHLVL="1" declare -x deep="purple" Similarly the operator can set the **hostname** with `-h`. ### TMPFS (mount tmpfs filesystems) ```bash --tmpfs=[]: Create a tmpfs mount with: container-dir[:], where the options are identical to the Linux 'mount -t tmpfs -o' command. ``` The example below mounts an empty tmpfs into the container with the `rw`, `noexec`, `nosuid`, and `size=65536k` options. $ docker run -d --tmpfs /run:rw,noexec,nosuid,size=65536k my_image ### VOLUME (shared filesystems) -v, --volume=[host-src:]container-dest[:]: Bind mount a volume. The comma-delimited `options` are [rw|ro], [z|Z], or [[r]shared|[r]slave|[r]private]. The 'host-src' is an absolute path or a name value. If neither 'rw' or 'ro' is specified then the volume is mounted in read-write mode. --volumes-from="": Mount all volumes from the given container(s) > **Note**: > The auto-creation of the host path has been [*deprecated*](../deprecated.md#auto-creating-missing-host-paths-for-bind-mounts). > **Note**: > When using systemd to manage the Docker daemon's start and stop, in the systemd > unit file there is an option to control mount propagation for the Docker daemon > itself, called `MountFlags`. The value of this setting may cause Docker to not > see mount propagation changes made on the mount point. For example, if this value > is `slave`, you may not be able to use the `shared` or `rshared` propagation on > a volume. The volumes commands are complex enough to have their own documentation in section [*Managing data in containers*](../userguide/containers/dockervolumes.md). A developer can define one or more `VOLUME`'s associated with an image, but only the operator can give access from one container to another (or from a container to a volume mounted on the host). The `container-dest` must always be an absolute path such as `/src/docs`. The `host-src` can either be an absolute path or a `name` value. If you supply an absolute path for the `host-dir`, Docker bind-mounts to the path you specify. If you supply a `name`, Docker creates a named volume by that `name`. A `name` value must start with start with an alphanumeric character, followed by `a-z0-9`, `_` (underscore), `.` (period) or `-` (hyphen). An absolute path starts with a `/` (forward slash). For example, you can specify either `/foo` or `foo` for a `host-src` value. If you supply the `/foo` value, Docker creates a bind-mount. If you supply the `foo` specification, Docker creates a named volume. ### USER `root` (id = 0) is the default user within a container. The image developer can create additional users. Those users are accessible by name. When passing a numeric ID, the user does not have to exist in the container. The developer can set a default user to run the first process with the Dockerfile `USER` instruction. When starting a container, the operator can override the `USER` instruction by passing the `-u` option. -u="": Username or UID > **Note:** if you pass a numeric uid, it must be in the range of 0-2147483647. ### WORKDIR The default working directory for running binaries within a container is the root directory (`/`), but the developer can set a different default with the Dockerfile `WORKDIR` command. The operator can override this with: -w="": Working directory inside the container docker-1.10.3/docs/security/000077500000000000000000000000001267010174400156445ustar00rootroot00000000000000docker-1.10.3/docs/security/apparmor.md000066400000000000000000000132521267010174400200120ustar00rootroot00000000000000 # AppArmor security profiles for Docker AppArmor (Application Armor) is a Linux security module that protects an operating system and its applications from security threats. To use it, a system administrator associates an AppArmor security profile with each program. Docker expects to find an AppArmor policy loaded and enforced. Docker automatically loads container profiles. The Docker binary installs a `docker-default` profile in the `/etc/apparmor.d/docker` file. This profile is used on containers, _not_ on the Docker Daemon. A profile for the Docker Engine Daemon exists but it is not currently installed with the deb packages. If you are interested in the source for the Daemon profile, it is located in [contrib/apparmor](https://github.com/docker/docker/tree/master/contrib/apparmor) in the Docker Engine source repository. ## Understand the policies The `docker-default` profile is the default for running containers. It is moderately protective while providing wide application compatibility. The profile is the following: ``` #include profile docker-default flags=(attach_disconnected,mediate_deleted) { #include network, capability, file, umount, deny @{PROC}/{*,**^[0-9*],sys/kernel/shm*} wkx, deny @{PROC}/sysrq-trigger rwklx, deny @{PROC}/mem rwklx, deny @{PROC}/kmem rwklx, deny @{PROC}/kcore rwklx, deny mount, deny /sys/[^f]*/** wklx, deny /sys/f[^s]*/** wklx, deny /sys/fs/[^c]*/** wklx, deny /sys/fs/c[^g]*/** wklx, deny /sys/fs/cg[^r]*/** wklx, deny /sys/firmware/efi/efivars/** rwklx, deny /sys/kernel/security/** rwklx, } ``` When you run a container, it uses the `docker-default` policy unless you override it with the `security-opt` option. For example, the following explicitly specifies the default policy: ```bash $ docker run --rm -it --security-opt apparmor:docker-default hello-world ``` ## Loading and Unloading Profiles To load a new profile into AppArmor, for use with containers: ``` $ apparmor_parser -r -W /path/to/your_profile ``` Then you can run the custom profile with `--security-opt` like so: ```bash $ docker run --rm -it --security-opt apparmor:your_profile hello-world ``` To unload a profile from AppArmor: ```bash # stop apparmor $ /etc/init.d/apparmor stop # unload the profile $ apparmor_parser -R /path/to/profile # start apparmor $ /etc/init.d/apparmor start ``` ## Debugging AppArmor ### Using `dmesg` Here are some helpful tips for debugging any problems you might be facing with regard to AppArmor. AppArmor sends quite verbose messaging to `dmesg`. Usually an AppArmor line will look like the following: ``` [ 5442.864673] audit: type=1400 audit(1453830992.845:37): apparmor="ALLOWED" operation="open" profile="/usr/bin/docker" name="/home/jessie/docker/man/man1/docker-attach.1" pid=10923 comm="docker" requested_mask="r" denied_mask="r" fsuid=1000 ouid=0 ``` In the above example, the you can see `profile=/usr/bin/docker`. This means the user has the `docker-engine` (Docker Engine Daemon) profile loaded. > **Note:** On version of Ubuntu > 14.04 this is all fine and well, but Trusty > users might run into some issues when trying to `docker exec`. Let's look at another log line: ``` [ 3256.689120] type=1400 audit(1405454041.341:73): apparmor="DENIED" operation="ptrace" profile="docker-default" pid=17651 comm="docker" requested_mask="receive" denied_mask="receive" ``` This time the profile is `docker-default`, which is run on containers by default unless in `privileged` mode. It is telling us, that apparmor has denied `ptrace` in the container. This is great. ### Using `aa-status` If you need to check which profiles are loaded you can use `aa-status`. The output looks like: ```bash $ sudo aa-status apparmor module is loaded. 14 profiles are loaded. 1 profiles are in enforce mode. docker-default 13 profiles are in complain mode. /usr/bin/docker /usr/bin/docker///bin/cat /usr/bin/docker///bin/ps /usr/bin/docker///sbin/apparmor_parser /usr/bin/docker///sbin/auplink /usr/bin/docker///sbin/blkid /usr/bin/docker///sbin/iptables /usr/bin/docker///sbin/mke2fs /usr/bin/docker///sbin/modprobe /usr/bin/docker///sbin/tune2fs /usr/bin/docker///sbin/xtables-multi /usr/bin/docker///sbin/zfs /usr/bin/docker///usr/bin/xz 38 processes have profiles defined. 37 processes are in enforce mode. docker-default (6044) ... docker-default (31899) 1 processes are in complain mode. /usr/bin/docker (29756) 0 processes are unconfined but have a profile defined. ``` In the above output you can tell that the `docker-default` profile running on various container PIDs is in `enforce` mode. This means AppArmor will actively block and audit in `dmesg` anything outside the bounds of the `docker-default` profile. The output above also shows the `/usr/bin/docker` (Docker Engine Daemon) profile is running in `complain` mode. This means AppArmor will _only_ log to `dmesg` activity outside the bounds of the profile. (Except in the case of Ubuntu Trusty, where we have seen some interesting behaviors being enforced.) ## Contributing to AppArmor code in Docker Advanced users and package managers can find a profile for `/usr/bin/docker` (Docker Engine Daemon) underneath [contrib/apparmor](https://github.com/docker/docker/tree/master/contrib/apparmor) in the Docker Engine source repository. The `docker-default` profile for containers lives in [profiles/apparmor](https://github.com/docker/docker/tree/master/profiles/apparmor). docker-1.10.3/docs/security/certificates.md000066400000000000000000000070011267010174400206310ustar00rootroot00000000000000 # Using certificates for repository client verification In [Running Docker with HTTPS](https.md), you learned that, by default, Docker runs via a non-networked Unix socket and TLS must be enabled in order to have the Docker client and the daemon communicate securely over HTTPS. TLS ensures authenticity of the registry endpoint and that traffic to/from registry is encrypted. This article demonstrates how to ensure the traffic between the Docker registry (i.e., *a server*) and the Docker daemon (i.e., *a client*) traffic is encrypted and a properly authenticated using *certificate-based client-server authentication*. We will show you how to install a Certificate Authority (CA) root certificate for the registry and how to set the client TLS certificate for verification. ## Understanding the configuration A custom certificate is configured by creating a directory under `/etc/docker/certs.d` using the same name as the registry's hostname (e.g., `localhost`). All `*.crt` files are added to this directory as CA roots. > **Note:** > In the absence of any root certificate authorities, Docker > will use the system default (i.e., host's root CA set). The presence of one or more `.key/cert` pairs indicates to Docker that there are custom certificates required for access to the desired repository. > **Note:** > If there are multiple certificates, each will be tried in alphabetical > order. If there is an authentication error (e.g., 403, 404, 5xx, etc.), Docker > will continue to try with the next certificate. The following illustrates a configuration with multiple certs: ``` /etc/docker/certs.d/ <-- Certificate directory └── localhost <-- Hostname ├── client.cert <-- Client certificate ├── client.key <-- Client key └── localhost.crt <-- Certificate authority that signed the registry certificate ``` The preceding example is operating-system specific and is for illustrative purposes only. You should consult your operating system documentation for creating an os-provided bundled certificate chain. ## Creating the client certificates You will use OpenSSL's `genrsa` and `req` commands to first generate an RSA key and then use the key to create the certificate. $ openssl genrsa -out client.key 4096 $ openssl req -new -x509 -text -key client.key -out client.cert > **Note:** > These TLS commands will only generate a working set of certificates on Linux. > The version of OpenSSL in Mac OS X is incompatible with the type of > certificate Docker requires. ## Troubleshooting tips The Docker daemon interprets ``.crt` files as CA certificates and `.cert` files as client certificates. If a CA certificate is accidentally given the extension `.cert` instead of the correct `.crt` extension, the Docker daemon logs the following error message: ``` Missing key KEY_NAME for client certificate CERT_NAME. Note that CA certificates should use the extension .crt. ``` ## Related Information * [Use trusted images](index.md) * [Protect the Docker daemon socket](https.md) docker-1.10.3/docs/security/https.md000066400000000000000000000203601267010174400173310ustar00rootroot00000000000000 # Protect the Docker daemon socket By default, Docker runs via a non-networked Unix socket. It can also optionally communicate using a HTTP socket. If you need Docker to be reachable via the network in a safe manner, you can enable TLS by specifying the `tlsverify` flag and pointing Docker's `tlscacert` flag to a trusted CA certificate. In the daemon mode, it will only allow connections from clients authenticated by a certificate signed by that CA. In the client mode, it will only connect to servers with a certificate signed by that CA. > **Warning**: > Using TLS and managing a CA is an advanced topic. Please familiarize yourself > with OpenSSL, x509 and TLS before using it in production. > **Warning**: > These TLS commands will only generate a working set of certificates on Linux. > Mac OS X comes with a version of OpenSSL that is incompatible with the > certificates that Docker requires. ## Create a CA, server and client keys with OpenSSL > **Note**: replace all instances of `$HOST` in the following example with the > DNS name of your Docker daemon's host. First generate CA private and public keys: $ openssl genrsa -aes256 -out ca-key.pem 4096 Generating RSA private key, 4096 bit long modulus ............................................................................................................................................................................................++ ........++ e is 65537 (0x10001) Enter pass phrase for ca-key.pem: Verifying - Enter pass phrase for ca-key.pem: $ openssl req -new -x509 -days 365 -key ca-key.pem -sha256 -out ca.pem Enter pass phrase for ca-key.pem: You are about to be asked to enter information that will be incorporated into your certificate request. What you are about to enter is what is called a Distinguished Name or a DN. There are quite a few fields but you can leave some blank For some fields there will be a default value, If you enter '.', the field will be left blank. ----- Country Name (2 letter code) [AU]: State or Province Name (full name) [Some-State]:Queensland Locality Name (eg, city) []:Brisbane Organization Name (eg, company) [Internet Widgits Pty Ltd]:Docker Inc Organizational Unit Name (eg, section) []:Sales Common Name (e.g. server FQDN or YOUR name) []:$HOST Email Address []:Sven@home.org.au Now that we have a CA, you can create a server key and certificate signing request (CSR). Make sure that "Common Name" (i.e., server FQDN or YOUR name) matches the hostname you will use to connect to Docker: > **Note**: replace all instances of `$HOST` in the following example with the > DNS name of your Docker daemon's host. $ openssl genrsa -out server-key.pem 4096 Generating RSA private key, 4096 bit long modulus .....................................................................++ .................................................................................................++ e is 65537 (0x10001) $ openssl req -subj "/CN=$HOST" -sha256 -new -key server-key.pem -out server.csr Next, we're going to sign the public key with our CA: Since TLS connections can be made via IP address as well as DNS name, they need to be specified when creating the certificate. For example, to allow connections using `10.10.10.20` and `127.0.0.1`: $ echo subjectAltName = IP:10.10.10.20,IP:127.0.0.1 > extfile.cnf $ openssl x509 -req -days 365 -sha256 -in server.csr -CA ca.pem -CAkey ca-key.pem \ -CAcreateserial -out server-cert.pem -extfile extfile.cnf Signature ok subject=/CN=your.host.com Getting CA Private Key Enter pass phrase for ca-key.pem: For client authentication, create a client key and certificate signing request: $ openssl genrsa -out key.pem 4096 Generating RSA private key, 4096 bit long modulus .........................................................++ ................++ e is 65537 (0x10001) $ openssl req -subj '/CN=client' -new -key key.pem -out client.csr To make the key suitable for client authentication, create an extensions config file: $ echo extendedKeyUsage = clientAuth > extfile.cnf Now sign the public key: $ openssl x509 -req -days 365 -sha256 -in client.csr -CA ca.pem -CAkey ca-key.pem \ -CAcreateserial -out cert.pem -extfile extfile.cnf Signature ok subject=/CN=client Getting CA Private Key Enter pass phrase for ca-key.pem: After generating `cert.pem` and `server-cert.pem` you can safely remove the two certificate signing requests: $ rm -v client.csr server.csr With a default `umask` of 022, your secret keys will be *world-readable* and writable for you and your group. In order to protect your keys from accidental damage, you will want to remove their write permissions. To make them only readable by you, change file modes as follows: $ chmod -v 0400 ca-key.pem key.pem server-key.pem Certificates can be world-readable, but you might want to remove write access to prevent accidental damage: $ chmod -v 0444 ca.pem server-cert.pem cert.pem Now you can make the Docker daemon only accept connections from clients providing a certificate trusted by our CA: $ docker daemon --tlsverify --tlscacert=ca.pem --tlscert=server-cert.pem --tlskey=server-key.pem \ -H=0.0.0.0:2376 To be able to connect to Docker and validate its certificate, you now need to provide your client keys, certificates and trusted CA: > **Note**: replace all instances of `$HOST` in the following example with the > DNS name of your Docker daemon's host. $ docker --tlsverify --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem \ -H=$HOST:2376 version > **Note**: > Docker over TLS should run on TCP port 2376. > **Warning**: > As shown in the example above, you don't have to run the `docker` client > with `sudo` or the `docker` group when you use certificate authentication. > That means anyone with the keys can give any instructions to your Docker > daemon, giving them root access to the machine hosting the daemon. Guard > these keys as you would a root password! ## Secure by default If you want to secure your Docker client connections by default, you can move the files to the `.docker` directory in your home directory -- and set the `DOCKER_HOST` and `DOCKER_TLS_VERIFY` variables as well (instead of passing `-H=tcp://$HOST:2376` and `--tlsverify` on every call). $ mkdir -pv ~/.docker $ cp -v {ca,cert,key}.pem ~/.docker $ export DOCKER_HOST=tcp://$HOST:2376 DOCKER_TLS_VERIFY=1 Docker will now connect securely by default: $ docker ps ## Other modes If you don't want to have complete two-way authentication, you can run Docker in various other modes by mixing the flags. ### Daemon modes - `tlsverify`, `tlscacert`, `tlscert`, `tlskey` set: Authenticate clients - `tls`, `tlscert`, `tlskey`: Do not authenticate clients ### Client modes - `tls`: Authenticate server based on public/default CA pool - `tlsverify`, `tlscacert`: Authenticate server based on given CA - `tls`, `tlscert`, `tlskey`: Authenticate with client certificate, do not authenticate server based on given CA - `tlsverify`, `tlscacert`, `tlscert`, `tlskey`: Authenticate with client certificate and authenticate server based on given CA If found, the client will send its client certificate, so you just need to drop your keys into `~/.docker/{ca,cert,key}.pem`. Alternatively, if you want to store your keys in another location, you can specify that location using the environment variable `DOCKER_CERT_PATH`. $ export DOCKER_CERT_PATH=~/.docker/zone1/ $ docker --tlsverify ps ### Connecting to the secure Docker port using `curl` To use `curl` to make test API requests, you need to use three extra command line flags: $ curl https://$HOST:2376/images/json \ --cert ~/.docker/cert.pem \ --key ~/.docker/key.pem \ --cacert ~/.docker/ca.pem ## Related information * [Using certificates for repository client verification](certificates.md) * [Use trusted images](trust/index.md) docker-1.10.3/docs/security/https/000077500000000000000000000000001267010174400170065ustar00rootroot00000000000000docker-1.10.3/docs/security/https/Dockerfile000066400000000000000000000002101267010174400207710ustar00rootroot00000000000000FROM debian RUN apt-get update && apt-get install -yq openssl ADD make_certs.sh / WORKDIR /data VOLUME ["/data"] CMD /make_certs.sh docker-1.10.3/docs/security/https/Makefile000066400000000000000000000016571267010174400204570ustar00rootroot00000000000000 HOST:=boot2docker makescript: ./parsedocs.sh > make_certs.sh build: clean makescript docker build -t makecerts . cert: build docker run --rm -it -v $(CURDIR):/data -e HOST=$(HOST) -e YOUR_PUBLIC_IP=$(shell ip a | grep "inet " | sed "s/.*inet \([0-9.]*\)\/.*/\1/" | xargs echo | sed "s/ /,IP:/g") makecerts certs: cert run: sudo docker daemon -D --tlsverify --tlscacert=ca.pem --tlscert=server-cert.pem --tlskey=server-key.pem -H=0.0.0.0:6666 --pidfile=$(pwd)/docker.pid --graph=$(pwd)/graph client: sudo docker --tls --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem -H=$(HOST):6666 version sudo docker --tlsverify --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem -H=$(HOST):6666 info sudo curl https://$(HOST):6666/images/json --cert ./cert.pem --key ./key.pem --cacert ./ca.pem clean: rm -f ca-key.pem ca.pem ca.srl cert.pem client.csr extfile.cnf key.pem server-cert.pem server-key.pem server.csr extfile.cnf docker-1.10.3/docs/security/https/README.md000066400000000000000000000013461267010174400202710ustar00rootroot00000000000000 This is an initial attempt to make it easier to test the examples in the https.md doc at this point, it has to be a manual thing, and I've been running it in boot2docker so my process is $ boot2docker ssh $$ git clone https://github.com/docker/docker $$ cd docker/docs/articles/https $$ make cert lots of things to see and manually answer, as openssl wants to be interactive **NOTE:** make sure you enter the hostname (`boot2docker` in my case) when prompted for `Computer Name`) $$ sudo make run start another terminal $ boot2docker ssh $$ cd docker/docs/articles/https $$ make client the last will connect first with `--tls` and then with `--tlsverify` both should succeed docker-1.10.3/docs/security/https/make_certs.sh000077500000000000000000000025211267010174400214620ustar00rootroot00000000000000#!/bin/sh openssl genrsa -aes256 -out ca-key.pem 2048 openssl req -new -x509 -days 365 -key ca-key.pem -sha256 -out ca.pem openssl genrsa -out server-key.pem 2048 openssl req -subj "/CN=$HOST" -new -key server-key.pem -out server.csr echo subjectAltName = IP:$YOUR_PUBLIC_IP > extfile.cnf openssl x509 -req -days 365 -in server.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out server-cert.pem -extfile extfile.cnf openssl genrsa -out key.pem 2048 openssl req -subj '/CN=client' -new -key key.pem -out client.csr echo extendedKeyUsage = clientAuth > extfile.cnf openssl x509 -req -days 365 -in client.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out cert.pem -extfile extfile.cnf rm -v client.csr server.csr chmod -v 0400 ca-key.pem key.pem server-key.pem chmod -v 0444 ca.pem server-cert.pem cert.pem # docker -d --tlsverify --tlscacert=ca.pem --tlscert=server-cert.pem --tlskey=server-key.pem -H=0.0.0.0:7778 # docker --tlsverify --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem -H=$HOST:7778 version mkdir -pv ~/.docker cp -v {ca,cert,key}.pem ~/.docker export DOCKER_HOST=tcp://$HOST:7778 DOCKER_TLS_VERIFY=1 # docker ps export DOCKER_CERT_PATH=~/.docker/zone1/ # docker --tlsverify ps # curl https://$HOST:7778/images/json --cert ~/.docker/cert.pem --key ~/.docker/key.pem --cacert ~/.docker/ca.pem docker-1.10.3/docs/security/https/parsedocs.sh000077500000000000000000000004511267010174400213300ustar00rootroot00000000000000#!/bin/sh echo "#!/bin/sh" cat ../https.md | awk '{if (sub(/\\$/,"")) printf "%s", $0; else print $0}' \ | grep ' $ ' \ | sed 's/ $ //g' \ | sed 's/2375/7777/g' \ | sed 's/2376/7778/g' \ | sed 's/^docker/# docker/g' \ | sed 's/^curl/# curl/g' docker-1.10.3/docs/security/index.md000066400000000000000000000024111267010174400172730ustar00rootroot00000000000000 # Secure Engine This section discusses the security features you can configure and use within your Docker Engine installation. * You can configure Docker's trust features so that your users can push and pull trusted images. To learn how to do this, see [Use trusted images](trust/index.md) in this section. * You can protect the Docker daemon socket and ensure only trusted Docker client connections. For more information, [Protect the Docker daemon socket](https.md) * You can use certificate-based client-server authentication to verify a Docker daemon has the rights to access images on a registry. For more information, see [Using certificates for repository client verification](certificates.md). * You can configure secure computing mode (Seccomp) policies to secure system calls in a container. For more information, see [Seccomp security profiles for Docker](seccomp.md). * An AppArmor profile for Docker is installed with the official *.deb* packages. For information about this profile and overriding it, see [AppArmor security profiles for Docker](apparmor.md). docker-1.10.3/docs/security/seccomp.md000066400000000000000000000234301267010174400176210ustar00rootroot00000000000000 # Seccomp security profiles for Docker Secure computing mode (Seccomp) is a Linux kernel feature. You can use it to restrict the actions available within the container. The `seccomp()` system call operates on the seccomp state of the calling process. You can use this feature to restrict your application's access. This feature is available only if the kernel is configured with `CONFIG_SECCOMP` enabled. ## Passing a profile for a container The default seccomp profile provides a sane default for running containers with seccomp. It is moderately protective while providing wide application compatibility. The default Docker profile has layout in the following form: ``` { "defaultAction": "SCMP_ACT_ALLOW", "syscalls": [ { "name": "getcwd", "action": "SCMP_ACT_ERRNO" }, { "name": "mount", "action": "SCMP_ACT_ERRNO" }, { "name": "setns", "action": "SCMP_ACT_ERRNO" }, { "name": "create_module", "action": "SCMP_ACT_ERRNO" }, { "name": "chown", "action": "SCMP_ACT_ERRNO" }, { "name": "chmod", "action": "SCMP_ACT_ERRNO" } ] } ``` When you run a container, it uses the default profile unless you override it with the `security-opt` option. For example, the following explicitly specifies the default policy: ``` $ docker run --rm -it --security-opt seccomp:/path/to/seccomp/profile.json hello-world ``` ### Syscalls blocked by the default profile Docker's default seccomp profile is a whitelist which specifies the calls that are allowed. The table below lists the significant (but not all) syscalls that are effectively blocked because they are not on the whitelist. The table includes the reason each syscall is blocked rather than white-listed. | Syscall | Description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------| | `acct` | Accounting syscall which could let containers disable their own resource limits or process accounting. Also gated by `CAP_SYS_PACCT`. | | `add_key` | Prevent containers from using the kernel keyring, which is not namespaced. | | `adjtimex` | Similar to `clock_settime` and `settimeofday`, time/date is not namespaced. | | `bpf` | Deny loading potentially persistent bpf programs into kernel, already gated by `CAP_SYS_ADMIN`. | | `clock_adjtime` | Time/date is not namespaced. | | `clock_settime` | Time/date is not namespaced. | | `clone` | Deny cloning new namespaces. Also gated by `CAP_SYS_ADMIN` for CLONE_* flags, except `CLONE_USERNS`. | | `create_module` | Deny manipulation and functions on kernel modules. | | `delete_module` | Deny manipulation and functions on kernel modules. Also gated by `CAP_SYS_MODULE`. | | `finit_module` | Deny manipulation and functions on kernel modules. Also gated by `CAP_SYS_MODULE`. | | `get_kernel_syms` | Deny retrieval of exported kernel and module symbols. | | `get_mempolicy` | Syscall that modifies kernel memory and NUMA settings. Already gated by `CAP_SYS_NICE`. | | `init_module` | Deny manipulation and functions on kernel modules. Also gated by `CAP_SYS_MODULE`. | | `ioperm` | Prevent containers from modifying kernel I/O privilege levels. Already gated by `CAP_SYS_RAWIO`. | | `iopl` | Prevent containers from modifying kernel I/O privilege levels. Already gated by `CAP_SYS_RAWIO`. | | `kcmp` | Restrict process inspection capabilities, already blocked by dropping `CAP_PTRACE`. | | `kexec_file_load` | Sister syscall of `kexec_load` that does the same thing, slightly different arguments. | | `kexec_load` | Deny loading a new kernel for later execution. | | `keyctl` | Prevent containers from using the kernel keyring, which is not namespaced. | | `lookup_dcookie` | Tracing/profiling syscall, which could leak a lot of information on the host. | | `mbind` | Syscall that modifies kernel memory and NUMA settings. Already gated by `CAP_SYS_NICE`. | | `modify_ldt` | Old syscall only used in 16-bit code and a potential information leak. | | `mount` | Deny mounting, already gated by `CAP_SYS_ADMIN`. | | `move_pages` | Syscall that modifies kernel memory and NUMA settings. | | `name_to_handle_at` | Sister syscall to `open_by_handle_at`. Already gated by `CAP_SYS_NICE`. | | `nfsservctl` | Deny interaction with the kernel nfs daemon. | | `open_by_handle_at` | Cause of an old container breakout. Also gated by `CAP_DAC_READ_SEARCH`. | | `perf_event_open` | Tracing/profiling syscall, which could leak a lot of information on the host. | | `personality` | Prevent container from enabling BSD emulation. Not inherently dangerous, but poorly tested, potential for a lot of kernel vulns. | | `pivot_root` | Deny `pivot_root`, should be privileged operation. | | `process_vm_readv` | Restrict process inspection capabilities, already blocked by dropping `CAP_PTRACE`. | | `process_vm_writev` | Restrict process inspection capabilities, already blocked by dropping `CAP_PTRACE`. | | `ptrace` | Tracing/profiling syscall, which could leak a lot of information on the host. Already blocked by dropping `CAP_PTRACE`. | | `query_module` | Deny manipulation and functions on kernel modules. | | `quotactl` | Quota syscall which could let containers disable their own resource limits or process accounting. Also gated by `CAP_SYS_ADMIN`. | | `reboot` | Don't let containers reboot the host. Also gated by `CAP_SYS_BOOT`. | | `restart_syscall` | Don't allow containers to restart a syscall. Possible seccomp bypass see: https://code.google.com/p/chromium/issues/detail?id=408827. | | `request_key` | Prevent containers from using the kernel keyring, which is not namespaced. | | `set_mempolicy` | Syscall that modifies kernel memory and NUMA settings. Already gated by `CAP_SYS_NICE`. | | `setns` | Deny associating a thread with a namespace. Also gated by `CAP_SYS_ADMIN`. | | `settimeofday` | Time/date is not namespaced. Also gated by `CAP_SYS_TIME`. | | `stime` | Time/date is not namespaced. Also gated by `CAP_SYS_TIME`. | | `swapon` | Deny start/stop swapping to file/device. Also gated by `CAP_SYS_ADMIN`. | | `swapoff` | Deny start/stop swapping to file/device. Also gated by `CAP_SYS_ADMIN`. | | `sysfs` | Obsolete syscall. | | `_sysctl` | Obsolete, replaced by /proc/sys. | | `umount` | Should be a privileged operation. Also gated by `CAP_SYS_ADMIN`. | | `umount2` | Should be a privileged operation. | | `unshare` | Deny cloning new namespaces for processes. Also gated by `CAP_SYS_ADMIN`, with the exception of `unshare --user`. | | `uselib` | Older syscall related to shared libraries, unused for a long time. | | `ustat` | Obsolete syscall. | | `vm86` | In kernel x86 real mode virtual machine. Also gated by `CAP_SYS_ADMIN`. | | `vm86old` | In kernel x86 real mode virtual machine. Also gated by `CAP_SYS_ADMIN`. | ## Run without the default seccomp profile You can pass `unconfined` to run a container without the default seccomp profile. ``` $ docker run --rm -it --security-opt seccomp:unconfined debian:jessie \ unshare --map-root-user --user sh -c whoami ``` docker-1.10.3/docs/security/security.md000066400000000000000000000333231267010174400200410ustar00rootroot00000000000000 # Docker security There are three major areas to consider when reviewing Docker security: - the intrinsic security of the kernel and its support for namespaces and cgroups; - the attack surface of the Docker daemon itself; - loopholes in the container configuration profile, either by default, or when customized by users. - the "hardening" security features of the kernel and how they interact with containers. ## Kernel namespaces Docker containers are very similar to LXC containers, and they have similar security features. When you start a container with `docker run`, behind the scenes Docker creates a set of namespaces and control groups for the container. **Namespaces provide the first and most straightforward form of isolation**: processes running within a container cannot see, and even less affect, processes running in another container, or in the host system. **Each container also gets its own network stack**, meaning that a container doesn't get privileged access to the sockets or interfaces of another container. Of course, if the host system is setup accordingly, containers can interact with each other through their respective network interfaces — just like they can interact with external hosts. When you specify public ports for your containers or use [*links*](../userguide/networking/default_network/dockerlinks.md) then IP traffic is allowed between containers. They can ping each other, send/receive UDP packets, and establish TCP connections, but that can be restricted if necessary. From a network architecture point of view, all containers on a given Docker host are sitting on bridge interfaces. This means that they are just like physical machines connected through a common Ethernet switch; no more, no less. How mature is the code providing kernel namespaces and private networking? Kernel namespaces were introduced [between kernel version 2.6.15 and 2.6.26](http://lxc.sourceforge.net/index.php/about/kernel-namespaces/). This means that since July 2008 (date of the 2.6.26 release, now 7 years ago), namespace code has been exercised and scrutinized on a large number of production systems. And there is more: the design and inspiration for the namespaces code are even older. Namespaces are actually an effort to reimplement the features of [OpenVZ]( http://en.wikipedia.org/wiki/OpenVZ) in such a way that they could be merged within the mainstream kernel. And OpenVZ was initially released in 2005, so both the design and the implementation are pretty mature. ## Control groups Control Groups are another key component of Linux Containers. They implement resource accounting and limiting. They provide many useful metrics, but they also help ensure that each container gets its fair share of memory, CPU, disk I/O; and, more importantly, that a single container cannot bring the system down by exhausting one of those resources. So while they do not play a role in preventing one container from accessing or affecting the data and processes of another container, they are essential to fend off some denial-of-service attacks. They are particularly important on multi-tenant platforms, like public and private PaaS, to guarantee a consistent uptime (and performance) even when some applications start to misbehave. Control Groups have been around for a while as well: the code was started in 2006, and initially merged in kernel 2.6.24. ## Docker daemon attack surface Running containers (and applications) with Docker implies running the Docker daemon. This daemon currently requires `root` privileges, and you should therefore be aware of some important details. First of all, **only trusted users should be allowed to control your Docker daemon**. This is a direct consequence of some powerful Docker features. Specifically, Docker allows you to share a directory between the Docker host and a guest container; and it allows you to do so without limiting the access rights of the container. This means that you can start a container where the `/host` directory will be the `/` directory on your host; and the container will be able to alter your host filesystem without any restriction. This is similar to how virtualization systems allow filesystem resource sharing. Nothing prevents you from sharing your root filesystem (or even your root block device) with a virtual machine. This has a strong security implication: for example, if you instrument Docker from a web server to provision containers through an API, you should be even more careful than usual with parameter checking, to make sure that a malicious user cannot pass crafted parameters causing Docker to create arbitrary containers. For this reason, the REST API endpoint (used by the Docker CLI to communicate with the Docker daemon) changed in Docker 0.5.2, and now uses a UNIX socket instead of a TCP socket bound on 127.0.0.1 (the latter being prone to cross-site-scripting attacks if you happen to run Docker directly on your local machine, outside of a VM). You can then use traditional UNIX permission checks to limit access to the control socket. You can also expose the REST API over HTTP if you explicitly decide to do so. However, if you do that, being aware of the above mentioned security implication, you should ensure that it will be reachable only from a trusted network or VPN; or protected with e.g., `stunnel` and client SSL certificates. You can also secure them with [HTTPS and certificates](https.md). The daemon is also potentially vulnerable to other inputs, such as image loading from either disk with 'docker load', or from the network with 'docker pull'. This has been a focus of improvement in the community, especially for 'pull' security. While these overlap, it should be noted that 'docker load' is a mechanism for backup and restore and is not currently considered a secure mechanism for loading images. As of Docker 1.3.2, images are now extracted in a chrooted subprocess on Linux/Unix platforms, being the first-step in a wider effort toward privilege separation. Eventually, it is expected that the Docker daemon will run restricted privileges, delegating operations well-audited sub-processes, each with its own (very limited) scope of Linux capabilities, virtual network setup, filesystem management, etc. That is, most likely, pieces of the Docker engine itself will run inside of containers. Finally, if you run Docker on a server, it is recommended to run exclusively Docker in the server, and move all other services within containers controlled by Docker. Of course, it is fine to keep your favorite admin tools (probably at least an SSH server), as well as existing monitoring/supervision processes (e.g., NRPE, collectd, etc). ## Linux kernel capabilities By default, Docker starts containers with a restricted set of capabilities. What does that mean? Capabilities turn the binary "root/non-root" dichotomy into a fine-grained access control system. Processes (like web servers) that just need to bind on a port below 1024 do not have to run as root: they can just be granted the `net_bind_service` capability instead. And there are many other capabilities, for almost all the specific areas where root privileges are usually needed. This means a lot for container security; let's see why! Your average server (bare metal or virtual machine) needs to run a bunch of processes as root. Those typically include SSH, cron, syslogd; hardware management tools (e.g., load modules), network configuration tools (e.g., to handle DHCP, WPA, or VPNs), and much more. A container is very different, because almost all of those tasks are handled by the infrastructure around the container: - SSH access will typically be managed by a single server running on the Docker host; - `cron`, when necessary, should run as a user process, dedicated and tailored for the app that needs its scheduling service, rather than as a platform-wide facility; - log management will also typically be handed to Docker, or by third-party services like Loggly or Splunk; - hardware management is irrelevant, meaning that you never need to run `udevd` or equivalent daemons within containers; - network management happens outside of the containers, enforcing separation of concerns as much as possible, meaning that a container should never need to perform `ifconfig`, `route`, or ip commands (except when a container is specifically engineered to behave like a router or firewall, of course). This means that in most cases, containers will not need "real" root privileges *at all*. And therefore, containers can run with a reduced capability set; meaning that "root" within a container has much less privileges than the real "root". For instance, it is possible to: - deny all "mount" operations; - deny access to raw sockets (to prevent packet spoofing); - deny access to some filesystem operations, like creating new device nodes, changing the owner of files, or altering attributes (including the immutable flag); - deny module loading; - and many others. This means that even if an intruder manages to escalate to root within a container, it will be much harder to do serious damage, or to escalate to the host. This won't affect regular web apps; but malicious users will find that the arsenal at their disposal has shrunk considerably! By default Docker drops all capabilities except [those needed](https://github.com/docker/docker/blob/87de5fdd5972343a11847922e0f41d9898b5cff7/daemon/execdriver/native/template/default_template_linux.go#L16-L29), a whitelist instead of a blacklist approach. You can see a full list of available capabilities in [Linux manpages](http://man7.org/linux/man-pages/man7/capabilities.7.html). One primary risk with running Docker containers is that the default set of capabilities and mounts given to a container may provide incomplete isolation, either independently, or when used in combination with kernel vulnerabilities. Docker supports the addition and removal of capabilities, allowing use of a non-default profile. This may make Docker more secure through capability removal, or less secure through the addition of capabilities. The best practice for users would be to remove all capabilities except those explicitly required for their processes. ## Other kernel security features Capabilities are just one of the many security features provided by modern Linux kernels. It is also possible to leverage existing, well-known systems like TOMOYO, AppArmor, SELinux, GRSEC, etc. with Docker. While Docker currently only enables capabilities, it doesn't interfere with the other systems. This means that there are many different ways to harden a Docker host. Here are a few examples. - You can run a kernel with GRSEC and PAX. This will add many safety checks, both at compile-time and run-time; it will also defeat many exploits, thanks to techniques like address randomization. It doesn't require Docker-specific configuration, since those security features apply system-wide, independent of containers. - If your distribution comes with security model templates for Docker containers, you can use them out of the box. For instance, we ship a template that works with AppArmor and Red Hat comes with SELinux policies for Docker. These templates provide an extra safety net (even though it overlaps greatly with capabilities). - You can define your own policies using your favorite access control mechanism. Just like there are many third-party tools to augment Docker containers with e.g., special network topologies or shared filesystems, you can expect to see tools to harden existing Docker containers without affecting Docker's core. Recent improvements in Linux namespaces will soon allow to run full-featured containers without root privileges, thanks to the new user namespace. This is covered in detail [here]( http://s3hh.wordpress.com/2013/07/19/creating-and-using-containers-without-privilege/). Moreover, this will solve the problem caused by sharing filesystems between host and guest, since the user namespace allows users within containers (including the root user) to be mapped to other users in the host system. Today, Docker does not directly support user namespaces, but they may still be utilized by Docker containers on supported kernels, by directly using the clone syscall, or utilizing the 'unshare' utility. Using this, some users may find it possible to drop more capabilities from their process as user namespaces provide an artificial capabilities set. Likewise, however, this artificial capabilities set may require use of 'capsh' to restrict the user-namespace capabilities set when using 'unshare'. Eventually, it is expected that Docker will have direct, native support for user-namespaces, simplifying the process of hardening containers. ## Conclusions Docker containers are, by default, quite secure; especially if you take care of running your processes inside the containers as non-privileged users (i.e., non-`root`). You can add an extra layer of safety by enabling AppArmor, SELinux, GRSEC, or your favorite hardening solution. Last but not least, if you see interesting security features in other containerization systems, these are simply kernels features that may be implemented in Docker as well. We welcome users to submit issues, pull requests, and communicate via the mailing list. ## Related Information * [Use trusted images](../security/trust/index.md) * [Seccomp security profiles for Docker](../security/seccomp.md) * [AppArmor security profiles for Docker](../security/apparmor.md) * [On the Security of Containers (2014)](https://medium.com/@ewindisch/on-the-security-of-containers-2c60ffe25a9e) docker-1.10.3/docs/security/trust/000077500000000000000000000000001267010174400170255ustar00rootroot00000000000000docker-1.10.3/docs/security/trust/content_trust.md000066400000000000000000000325551267010174400222740ustar00rootroot00000000000000 # Content trust in Docker When transferring data among networked systems, *trust* is a central concern. In particular, when communicating over an untrusted medium such as the internet, it is critical to ensure the integrity and publisher of all the data a system operates on. You use Docker to push and pull images (data) to a registry. Content trust gives you the ability to both verify the integrity and the publisher of all the data received from a registry over any channel. Content trust is currently only available for users of the public Docker Hub. It is currently not available for the Docker Trusted Registry or for private registries. ## Understand trust in Docker Content trust allows operations with a remote Docker registry to enforce client-side signing and verification of image tags. Content trust provides the ability to use digital signatures for data sent to and received from remote Docker registries. These signatures allow client-side verification of the integrity and publisher of specific image tags. Currently, content trust is disabled by default. You must enabled it by setting the `DOCKER_CONTENT_TRUST` environment variable. Refer to the [environment variables](../../reference/commandline/cli.md#environment-variables) and [Notary](../../reference/commandline/cli.md#notary) configuration for the docker client for more options. Once content trust is enabled, image publishers can sign their images. Image consumers can ensure that the images they use are signed. publishers and consumers can be individuals alone or in organizations. Docker's content trust supports users and automated processes such as builds. ### Image tags and content trust An individual image record has the following identifier: ``` [REGISTRY_HOST[:REGISTRY_PORT]/]REPOSITORY[:TAG] ``` A particular image `REPOSITORY` can have multiple tags. For example, `latest` and `3.1.2` are both tags on the `mongo` image. An image publisher can build an image and tag combination many times changing the image with each build. Content trust is associated with the `TAG` portion of an image. Each image repository has a set of keys that image publishers use to sign an image tag. Image publishers have discretion on which tags they sign. An image repository can contain an image with one tag that is signed and another tag that is not. For example, consider [the Mongo image repository](https://hub.docker.com/r/library/mongo/tags/). The `latest` tag could be unsigned while the `3.1.6` tag could be signed. It is the responsibility of the image publisher to decide if an image tag is signed or not. In this representation, some image tags are signed, others are not: ![Signed tags](images/tag_signing.png) Publishers can choose to sign a specific tag or not. As a result, the content of an unsigned tag and that of a signed tag with the same name may not match. For example, a publisher can push a tagged image `someimage:latest` and sign it. Later, the same publisher can push an unsigned `someimage:latest` image. This second push replaces the last unsigned tag `latest` but does not affect the signed `latest` version. The ability to choose which tags they can sign, allows publishers to iterate over the unsigned version of an image before officially signing it. Image consumers can enable content trust to ensure that images they use were signed. If a consumer enables content trust, they can only pull, run, or build with trusted images. Enabling content trust is like wearing a pair of rose-colored glasses. Consumers "see" only signed images tags and the less desirable, unsigned image tags are "invisible" to them. ![Trust view](images/trust_view.png) To the consumer who does not enabled content trust, nothing about how they work with Docker images changes. Every image is visible regardless of whether it is signed or not. ### Content trust operations and keys When content trust is enabled, `docker` CLI commands that operate on tagged images must either have content signatures or explicit content hashes. The commands that operate with content trust are: * `push` * `build` * `create` * `pull` * `run` For example, with content trust enabled a `docker pull someimage:latest` only succeeds if `someimage:latest` is signed. However, an operation with an explicit content hash always succeeds as long as the hash exists: ```bash $ docker pull someimage@sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a ``` Trust for an image tag is managed through the use of signing keys. A key set is created when an operation using content trust is first invoked. Docker's content trust makes use of four different keys: | Key | Description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------| | root key | Root of content trust for a image tag. When content trust is enabled, you create the root key once. | | target and snapshot | These two keys are known together as the "repository" key. When content trust is enabled, you create this key when you add a new image repository. If you have the root key, you can export the repository key and allow other publishers to sign the image tags. | | timestamp | This key applies to a repository. It allows Docker repositories to have freshness security guarantees without requiring periodic content refreshes on the client's side. | With the exception of the timestamp, all the keys are generated and stored locally client-side. The timestamp is safely generated and stored in a signing server that is deployed alongside the Docker registry. All keys are generated in a backend service that isn't directly exposed to the internet and are encrypted at rest. The following image depicts the various signing keys and their relationships: ![Content trust components](images/trust_components.png) >**WARNING**: Loss of the root key is **very difficult** to recover from. >Correcting this loss requires intervention from [Docker >Support](https://support.docker.com) to reset the repository state. This loss >also requires **manual intervention** from every consumer that used a signed >tag from this repository prior to the loss. You should backup the root key somewhere safe. Given that it is only required to create new repositories, it is a good idea to store it offline. Make sure you read [Manage keys for content trust](trust_key_mng.md) information for details on securing, and backing up your keys. ## Survey of typical content trust operations This section surveys the typical trusted operations users perform with Docker images. ### Enable and disable content trust per-shell or per-invocation In a shell, you can enable content trust by setting the `DOCKER_CONTENT_TRUST` environment variable. Enabling per-shell is useful because you can have one shell configured for trusted operations and another terminal shell for untrusted operations. You can also add this declaration to your shell profile to have it turned on always by default. To enable content trust in a `bash` shell enter the following command: ```bash export DOCKER_CONTENT_TRUST=1 ``` Once set, each of the "tag" operations requires a key for a trusted tag. In an environment where `DOCKER_CONTENT_TRUST` is set, you can use the `--disable-content-trust` flag to run individual operations on tagged images without content trust on an as-needed basis. ```bash $ docker pull --disable-content-trust docker/trusttest:untrusted ``` To invoke a command with content trust enabled regardless of whether or how the `DOCKER_CONTENT_TRUST` variable is set: ```bash $ docker build --disable-content-trust=false -t docker/trusttest:testing . ``` All of the trusted operations support the `--disable-content-trust` flag. ### Push trusted content To create signed content for a specific image tag, simply enable content trust and push a tagged image. If this is the first time you have pushed an image using content trust on your system, the session looks like this: ```bash $ docker push docker/trusttest:latest The push refers to a repository [docker.io/docker/trusttest] (len: 1) 9a61b6b1315e: Image already exists 902b87aaaec9: Image already exists latest: digest: sha256:d02adacee0ac7a5be140adb94fa1dae64f4e71a68696e7f8e7cbf9db8dd49418 size: 3220 Signing and pushing trust metadata You are about to create a new root signing key passphrase. This passphrase will be used to protect the most sensitive key in your signing system. Please choose a long, complex passphrase and be careful to keep the password and the key file itself secure and backed up. It is highly recommended that you use a password manager to generate the passphrase and keep it safe. There will be no way to recover this key. You can find the key in your config directory. Enter passphrase for new root key with id a1d96fb: Repeat passphrase for new root key with id a1d96fb: Enter passphrase for new repository key with id docker.io/docker/trusttest (3a932f1): Repeat passphrase for new repository key with id docker.io/docker/trusttest (3a932f1): Finished initializing "docker.io/docker/trusttest" ``` When you push your first tagged image with content trust enabled, the `docker` client recognizes this is your first push and: - alerts you that it will create a new root key - requests a passphrase for the key - generates a root key in the `~/.docker/trust` directory - generates a repository key for in the `~/.docker/trust` directory The passphrase you chose for both the root key and your content key-pair should be randomly generated and stored in a *password manager*. > **NOTE**: If you omit the `latest` tag, content trust is skipped. This is true even if content trust is enabled and even if this is your first push. ```bash $ docker push docker/trusttest The push refers to a repository [docker.io/docker/trusttest] (len: 1) 9a61b6b1315e: Image successfully pushed 902b87aaaec9: Image successfully pushed latest: digest: sha256:a9a9c4402604b703bed1c847f6d85faac97686e48c579bd9c3b0fa6694a398fc size: 3220 No tag specified, skipping trust metadata push ``` It is skipped because as the message states, you did not supply an image `TAG` value. In Docker content trust, signatures are associated with tags. Once you have a root key on your system, subsequent images repositories you create can use that same root key: ```bash $ docker push docker.io/docker/seaside:latest The push refers to a repository [docker.io/docker/seaside] (len: 1) a9539b34a6ab: Image successfully pushed b3dbab3810fc: Image successfully pushed latest: digest: sha256:d2ba1e603661a59940bfad7072eba698b79a8b20ccbb4e3bfb6f9e367ea43939 size: 3346 Signing and pushing trust metadata Enter key passphrase for root key with id a1d96fb: Enter passphrase for new repository key with id docker.io/docker/seaside (bb045e3): Repeat passphrase for new repository key with id docker.io/docker/seaside (bb045e3): Finished initializing "docker.io/docker/seaside" ``` The new image has its own repository key and timestamp key. The `latest` tag is signed with both of these. ### Pull image content A common way to consume an image is to `pull` it. With content trust enabled, the Docker client only allows `docker pull` to retrieve signed images. ``` $ docker pull docker/seaside Using default tag: latest Pull (1 of 1): docker/trusttest:latest@sha256:d149ab53f871 ... Tagging docker/trusttest@sha256:d149ab53f871 as docker/trusttest:latest ``` The `seaside:latest` image is signed. In the following example, the command does not specify a tag, so the system uses the `latest` tag by default again and the `docker/cliffs:latest` tag is not signed. ```bash $ docker pull docker/cliffs Using default tag: latest no trust data available ``` Because the tag `docker/cliffs:latest` is not trusted, the `pull` fails. ### Disable content trust for specific operations A user that wants to disable content trust for a particular operation can use the `--disable-content-trust` flag. **Warning: this flag disables content trust for this operation**. With this flag, Docker will ignore content-trust and allow all operations to be done without verifying any signatures. If we wanted the previous untrusted build to succeed we could do: ``` $ cat Dockerfile FROM docker/trusttest:notrust RUN echo $ docker build --disable-content-trust -t docker/trusttest:testing . Sending build context to Docker daemon 42.84 MB ... Successfully built f21b872447dc ``` The same is true for all the other commands, such as `pull` and `push`: ``` $ docker pull --disable-content-trust docker/trusttest:untrusted ... $ docker push --disable-content-trust docker/trusttest:untrusted ... ``` ## Related information * [Manage keys for content trust](trust_key_mng.md) * [Automation with content trust](trust_automation.md) * [Play in a content trust sandbox](trust_sandbox.md) docker-1.10.3/docs/security/trust/deploying_notary.md000066400000000000000000000023321267010174400227350ustar00rootroot00000000000000 # Deploying Notary Server with Compose The easiest way to deploy Notary Server is by using Docker Compose. To follow the procedure on this page, you must have already [installed Docker Compose](/compose/install.md). 1. Clone the Notary repository git clone git@github.com:docker/notary.git 2. Build and start Notary Server with the sample certificates. docker-compose up -d For more detailed documentation about how to deploy Notary Server see https://github.com/docker/notary. 3. Make sure that your Docker or Notary client trusts Notary Server's certificate before you try to interact with the Notary server. See the instructions for [Docker](../../reference/commandline/cli.md#notary) or for [Notary](https://github.com/docker/notary#using-notary) depending on which one you are using. ## If you want to use Notary in production Please check back here for instructions after Notary Server has an official stable release. To get a head start on deploying Notary in production see https://github.com/docker/notary. docker-1.10.3/docs/security/trust/images/000077500000000000000000000000001267010174400202725ustar00rootroot00000000000000docker-1.10.3/docs/security/trust/images/tag_signing.png000066400000000000000000002212601267010174400232740ustar00rootroot00000000000000PNG  IHDResRGB@IDATx _cA`9PbP"jD*?xDϿx$1ĈEQTPQ9[匲 %r~ gٝ{|zY2*@ @JHt q@ @ ` 2E @ @ .ₑJ @ @@d;@ @@\ 2#@ @ w @ @d F* @ @ @ @q!T@ @"@ @B!. @ D @ ą"C\0R  @ @ | @ @ D`@ @@ @  qH% @  2 @ @ .ₑJ @ @@d;@ @@\ 2#@ @ w @ @d F* @ @ @ @q!T@ @"@ @B!. @ D @ ą"C\0R  @ @ | @ @ D`@ @@ @  qH% @  @E`-wOR:URyZ|Rڢ@Oc "&#^!s7<˱r9gVs D4P@%]=\S!{siVȐp4@b2g aaZv֒@j7 @<7Ҍ =?Mf%Ƀ Kn4*@@ 2!@RG`n9'eMjΚ @~%בn@<y6yC5i]"@B! !@^&`g,!; zdIFA@  $S`8G iݰAcBL!ȣO!@wVYK$̧2'{cmw˃a o @?D!= XvܕBZ6< @dGj $d傓[O"UGUn{+ٽDn@@@dH? YI>.)}+_=cCRh @  2x@HS?l)o~5){LJ[4@@z(^ݡ7 $@l @8<<>@@*.M7fnKj`v;V,Q @da @M'9[rr~kɿk$;g[HC'&Η;vB<7y^rU>˫ 2 @da @hRΕ}9׾۱v>_p\ֳe'Ic*0!@<DB VZ[(E&ݵOʔ53M;\L-"D{Eε荂4j,!;66U`$?^"߮$=iX\B w ~A@10yjgMS@Flh]ONDNܝI@* Dʔ.%ʔe8y_/s$,xG [`2c $tyſ?Z"VoQB # >,L@I dBB!h?X% z=p(!/3+g9'e9UEҼ1ҭU=ٲsK_I䗼N?Nzʞ}B򝋰G^4fm  aP !RZ) E Tx͹Fpe;KڒeM"4յש!khyGP,\EyY\ا ϑ ϴers$R"7o!QXsWKt-^/T殌̓A ,/|H J%޼Xͣ< @&' , s\P!aP!%.TTu>]h 3O箓#'-0"ƝW.T '(N0ͪ4 Q# @db>Љ,hN**\ҿsfPdZ7elܚgҥ׏"7&)&nC^'`R1vq>@@< 2ē&uA#T`Kmz.tQb v=cD?*4Lx6D2bH/wq!DQ8NU9]CդKR/,Z1` ]"K= @7fX  #_:D`Ի\ڿ}Lk l$楖g [AVl3w50aJB~Ȭ^J7w7kd[TiąD\i;[[}DԨVZNW+M@I *YԤ;Pl-y 0 @iD!@`У_u)2fc x4$:@+KEɏT9kkͿV}Y"lVerK`ă!!!@M!Ǘ`HƬ Bz4hBh0O@uqT,ы*^b< bM:7c}  D4HxUHk=u*.`R ;6ޝڄ@Z |jN,Qᘌ RzvvyԵʶi\ܷoADc'k)Y 6# "CY*x:` x ފw`e1Fe͌hO}1̻߉sf-b @7D.7И 2+|ABgqդqH@rЮ"Ɍ&z@D>$0_^CTѤ"%$@/nh? @ dE@ 1~J*\ޘbIKk9 (:/K-nBk$;g[HvW-u?|hoYWTMEzHeˎN/}%ːp㿿pp@@0 2s Ώõkի}#GdP>m`66B,'( ]Qb=\ĔB @#1>'^*Vz}pj;Yn.Q+(%xtBnOst_r@ b2EJ薐~Kˆ?~ B */+IO#R\UϨ`վjyÚҴNUif}֮j̮bG-/5ic1Rt K]tDZU³ @ du.L_alWόޝ TQ^Vi;aYe{^'RBY9=/? sK䖲wGGN%"$ @d@ ',Mܢ}# QO,bEM93"eɅ @ X.ѧ#ǐհq]',n6?mc7#CCƜCb$'C(Tp ~`ˬTNE7y+6Y=[ط9Bն촶\nsH^m6CENk#~N65ϗˡ_ Vt Ԕw9 ACYNN @&D`;)/XK~[doe@dhb{2 t/?ɾVl?6!*2F3/by|2!@6D`?]R^V^rn8?\O_/88b&`"'cBC(DY-?fy "@LN:tIt.9sjXCyӅ~C ".t}WKkU5 @''Ct?tITBK%Ȑܜ}[oev9BQd?F~([S#+BDLlٱɟ4g)V;|LKO;ΪOVޙJ6lˏɠen]g"!@D5=9V`ѪMrFҷkS,o~cmhbp>}ra,!ޝZɨ <*S:jVw9GiFɄ @E$PDP@ 8DKWč[LEn`*6h0g.ɉ*0ԫ)-U.~TC)$ÈWf_8 ] qnG'#:UDwc vzfˮGKK# s!@H ?0[Gwl -φ݇ch7g.Yo>Y cN 2厭BoeFXo] T`?3G f[LJ2Ȱy!R=Z@H=_  " ЁG=&[A14?_ܽ5Ʒ`P@t({Ƿ5j *0| 5mC'Iwvg˓W*w"+SZo禅tɋ"eco}1L@M!OӀY=[~Իs]kzlA=2;=ri 6 pP|-%%Om"J^5@ b(]xnxe@,6"YíN \HF?PK"0m/aw޽'g!n=D=fHUvó @ G`' tbX Ȧ'{#gUA,H@(MG _݃bp(ANQD ~qNQgq@B W@ThPφ/4hl]uKFS/Gh ]"%DM[{ۙOQ:Z-\)ڦq }L%ٲcϑG5NhQW?{򱷾=$ryuh8K7H @$JBg!a=ߞ.PAr˫AN^8D\кZXR v(.YKweO|$6(rWOHzi`?܈]"dó#^Wֵt9V Lk3B2sO @ => '/OMc@魽eFhhТvoaѪM, {R^Oㅝ8!kCɰcWG ٵءVZR&g1'{,YR.=8ٵ/sBI "Cǟ:!m>27lRK>ٺV(TsȨCQR*+].b( 1@ R-^pH̊wρTsJh @d`JB@ńӖ̤?C‚^1^;51O]s@E/,/ZS0_J6YE aΐkWV2„gwYb.&.W,;M_觥] 9J@7yÅV;YT4ߊRZ%)S:5AEolyET c>ξt>\\s@E!^$I$.0]W,&B(JSjӳϔ&s#-Cz @.?^?}ݣ>Is"{'Dϯ3@ ;DؙRJ@=T`x#uLիA=,4}2w9j Ϳqp7s 4A7.--]s@ UjvAG 6s&L]&tnr4'РfԏsK5|2Q{WkR++CxZU+g˓WzreJʒ@JJyI$Mz݃}<3$AE#~vՊFEOe-@M_&J}HàoAݤ|*S7&wچ? hwG@ dHgZ@ 'ȠޭJ\g*X 4 ҥUTDH::eՖimAcmMY-T\αfAȯ-8Uq@J!x!q tI=AS{jj]'4>REG L1 ɨX|l͐|4!ʗMru;ʎjː>m%wϾ.(yӠd~[UF3O:4%}WX-7kN!@@bgH:gvԉy>:;Gd iiMH=/Gu?Ĺ~EιWg/9O<$Cؾ@F!n(!Am/m$ Xң]#d5~RqŸ@ZYז{B-.ean 9F) @cFq$NtbM[ նlS{si@/ 2b02&J'Dc3‰ _XA I+cvz @_ 2u;4Ff_><SOCN+oO9B ;iv@  @1ȶD;5oP>McKv]H{ ː\@  &s &^:zΩnt} @@  2b$O li+6ا!@ @d`aj%7Pj-5t @ =t"C@ @>B@FrXq: h߬VzLw!@O!ǘ=ӠE]^/ ޱ\Ы/mh@@dΆ;H)53WlqS8v2qzӭ.پҁ Ґ ^qi\# {G @&&">m Udii[~]GB=gha? @(dȆ  z$gw\k䙫M$?0Izk$72^z. $6^{dV2=G^A vj"] `? @d@M˥X.s:1szk^uYuC݅‹A"UY]tG`wsrni G[`hۄ%i5t F! "|&̑CL ~dlhޠԭa>jdJD}-ZIt9 %]1rV c:7/% >DL "CAQ &o~g% ̤ !FWEQP!w2"ca?Pae=<@"C ( /ЀCv2`y7lʪ{ ۿqk'IZ6.k ,H4qꇀ7Xyr|0+ @H8D#$ `%6GM*>wO["z>:LhٸJ*ֵ ke8xPZXRAa@ 2p0I~z[Q K,VjDq J@LR%x:vhZKt Nk /(`6@@`]5 ZԬnUP,qITQ/Zִq[ U+&Z DSz@ @H )iF!@ @H; i7t(#;vRJ9UʸqQAFg@ SÝV!w2k,UV\) 5ko۸G@rIٲeB iV @@ Px< /IH6m$++KZhaZ.]j\haСu-.׭[' 뮻N֭+k֬gyxБ̙RƍMZ_.F2G/^%<_PcǚLXvm  x"G #{F\xw]va_޼UaAGiz衇$;;[}QYj̜9ӈ~޽h*D޽{;u+&F;q"nP'p]w%*ܹSV^-}\|o<@g 2xvh0 7u1nʕ3'ptI_J}+z74=:ٷow ?tYfϞ-4e5k&׿d`#GJ֭[nu! }~t X/ ,ҿu)4N K,>\?FпqSAP˝y|`<4[n2oCjՌ\tɄePR%sϕ?ҷo_Rp "J.-zK )SsC9?@mfDAܩiӦ]g;#vxAo^ZQ;0>^b.:3pz'S~0i{IEZGTkoR. @!iN@wxf.o~,m73fp4lPʗ/o?,7+V0o'5..sP!.Y yRQs:ukIϞ=7{g<3B~n4o3 ċ/hUwq澻 beN*"yT!]{B=IjD.]-*HIWdk;IE!lFF&SISA^KoUW]%_|T}0@A@ 3 : z-߿A4\ڵ͛K]Ra']=m4G\/JQr':Q裏 -~m( 6mszI 4daEn${wldRQ>&z+vd "7|(򗿘1s!@<12 hG]ʠђeQt-[ӝUVǛ&U:颋.2n:18 ,CSxצDQF#e?c7;k,QjxxjyEPw&wrf9k@999_҉'h3f 0@ @<6t7mƲ0:Ɉ*T`t<@ >nl#[o,cPzJq_G3'lP!rͦs^Jkwꫯ6;c4i$>0 _(eWc, @ 4Ίz8KItN52IFcwz1T !'z<]p] %2tG@1?QϢ ApAk^,n^4큡u @<3 ]*VZe4:%8Q]nѵkW%f Q@|Lǃ @ /`F[ @ @>&t@ @ 2xi4 @ c >f>o{  `;xФ%5<1NX @ - ̚5x2L:5-G @@Py )O9~K|1HKx2)@ 0w\` @( f" !t!' K!5ڈ` <@@̛7x2|)!@GO !F`@!MnA -4@ Fx2 ? ,0 ~*&@ !ei| %'@-Q\r<@ qdH[j B,Yx26 @ 2aPzیH;M7$|Mm_,*VuzK<AOڵk'#Fs9Ofc+ @@ Q/M-Z$O>tU>쳄vl˖-<gϞ-[뮻Nˣ/ ,[x2۾!@B $PL_|QWnu>ayw=#r-E]${UJұkx׿d͚5RNP'N ڴic<ҵ  @bfZAH=CQFK*WtKNdҥb ٹs#Bh3<#AI?Ҽysy=Qo\;QAC ^?RR%QO~vvW^yEƎx'p r駟nxhYGƌc۴i 6Lu&7om۶I=䧟~_~Yj׮-?duVS}'ymo7կ__^,T @+W7xCZje&UC $K$2MD&ψ_̤[=tҢT и T<@s*|2`XqڴiriO?Tr}m._C5Bp1w\#ι C 1W\qI^bHx6_z%yGͭ3f!B/=(4=rws/`w / 6A[=J@'?&34u:bQ@;v_oHURżIThݻE=nSNs̙3NYn:3L *07N}?1diڪ}%YYYr%8O 9|iΜ9&r ]I7 <ӥ"PQ6Ky蒉+φ7|SZli=!4  B}qF}dFO͚5ׂzXh^{SJ=t;} Xw^ua?pYg'|&[kȰ~zlClCwp#B@}Tkڴ\ve^1v'+o5hkfJYl\H+*O[O1KعUTD !Ȑ6CIGz  ߍz h=w zmo~ҙ;E8~6 GF-&M21"&.zK%vk/ ]qꩧ:8 'CGF  7Tw&rJ҅ _.8F.tl,ۜB PDpl~ʽk>oн{wi֬؁4_[|Xvhذh<-bԩS+4Bط5C^^Y"u.x׌z8Z,pW [Xj=yUWN|^=<8zo6i MKSoՄ EI:?upT\3iM" 7&zdQAO!_p-m&u ݺR4I't\˴kL56Ă :-,50lAYfSGnRjh]9NSkhM=K/2eʔ-Ի/wk^$>Gx x2<?sݥVyjfm{qY`ՉQ>mev^Ř/4">r IFr uĚ풷g(C;i#~%'-G=%(6  2[!`O-VQ@ҥt}.ˠ}!jr̘1GMC P [dРq' ho76U9LP8=0Lk׮& A1qBwqصk7Bw;NGNlb:.n"}}!#hM*2N`Æ &0꫽nnSoGMqS.nWsb=hl1}"<"y0|IҷkS߱QscOV+8a?H1[l$7CFNr<T`xӭ7J}h@@wjyVZrۨ.&ukMf!0؆{4[pz0ؼ WcԤ;Nd6oY@)!Ȑ4@-+u ]栱40z5ԨQ#e =&S8㌐@)3!<aO&>gJf.Yo.tل{:8Q/;{@oNɬO%Ʌ "C*m~ ZA |kfHF5:~GlVs}gz!K#@aPAS&Zw[;]βN5A2Dxh\ tJf)>%o5@E!ED@&\m&nWl8JX1:'vjb&ʑ_2*.ûv â/Y;(Ĵ3,2K3گx./ q\1H &7HK]@:зÞжi-+h^{#ģ'W_W>Z,K-/M*jw[ME$N`Ϟ=3r-W%"PF.5cvy2Ⱦ$2d"n@@2 2$2m@ !I`Pq3OH{wëgmDT@h(+P]_ hGkʨXNT9IͤwumAڤNСCƓᡇ*CiZkhU$6uc Hbm{K]/$>S PT E%E9@tW/)|m vRFR.]x2qE{ K݌Dlږl_mjnu;NW{|:mDF T@dH> Pda h /%Nu F>ѓwvұobͬ\^~;en ų "_G!@uyvS}MvrjqL;d MdwG\0>֩¾ms?ճc++-q#˷ N @q&gT$ ;]1}ꙣۦp{=c yᇍ'gzrOg.Y!0ӥ;4si`9gr9No7 !I'n|%3qzvDyQmpCg<ԋ x(Y!uAJ*%|J+t׆3ϒF8^:aN>~bܩO{1ٻw6˗\(@}?O垫N:3JLӹg9 Fn͹ɗ l+BwѺuIH5Dwdp)j sdeNu7_|Թ..4Jo@% ĉ"C@RMa;>1~Ryz\nm(~qY!Ľ^ӆ8[lH |I>6tiޠ뉾yc3s]]"|=h^7#Ym_3 * `/hĒB@ ["i@bF"xLzn{nQMӝ2g<c}=mo{er-HŊSmgZnNհs{Kl'N$0q^TgMKIilgwV|HBziPgU/ L_ a_["C"p2E"vtft=Hc*yS-Q Rtx2Dg3}2U]um&=7:i 9%x[Ԗ:+^{z\B~sin Ѯ޽C蒃Krd<9 tO$AQ KTvфbTs׸}A~Kxg,M&1atl6ޠVsYU/?n5Z]=SsN_*%]pkt=P!| vwY=x}GB=iAHU=5Ką—u., B x5T\̋a҉#>HР‚ ԟ%6-@G 2UH~ 7{3cs+OX|xG= ᨨ0~2ј$OKt"Zʣ$TdNxNgآg$z7t9.P&\mHԋ!^:eWDп" ` pOY.~In[}ɴ9Rvؑf=ݖP<η@ ǂN# Pψc21֤K$qG#0h>UvbM/ߝ2OA)_;q=@@ b3 Od'(lM1AH=DV=w'a#"h\ GbtB'ҙCeBp)rarEBh ;0^4ZϭNsbHh;]Z+r0Hft ׊_joLrw/)qsSqPYIthVGw P/1@@>D|A'P|[olͷ:5g?lٲE &5jw¦xou«1T`=).ȓ@G':a,mAch:a.lNm 2Hf;Od4퍸%R#]8w|Fa :9SUj ;ޅӰ' p%*@@J \{2bĈ@ :na5}f2PAɵN?dy"lջBcIt{K%9j}N^W}> *~(7g$~.֣ܴ^;鸩7G@@P 2u7|H`ϾO/N))k_'1r͠.qɷ.Op *4@4ӥ-}C,]*nγv^RbHsCMū- "G !PwMdk4 kN))i_'7*0'4*- hs` /;3m~"򒰓];OA2$#*:~A$ڀ x"F{ 줓 $>d9zhɰaC;$DNqQ&NNŠi@P]j~&GWON8BBԯV Jg駷vu|5&~'|P rUMӚIzotٞ״~|vqz-YtҶmD6j|W|بP;%L7&P'ǫs>dlܚkO2 yoGF!B'3-{4]H|/{<ݡe.YOFr!}†rSanCgi_rL0~K$0C(Db@A|8"nب)P\rrFIAբث3e=_x;~[lFJ&\*:etG}uc%$ **Y k~s5]apW9gޜAtnA_8?#oJf'u~_=dXG{{z52NE^oںK6l=;CBfwwsgq`H0` P  ŀ#!p r-BrȀ uWI~weѢEr9HYu>>}4PZ{z K7}\ڿ֬Hb XT$A]`c>sR{HQL_:F31P-Q|v< @ QEz %BQ F.ѓ5h`MY^ ]4L [kR7\'qΐ::"&x2ĄS×Nq^uMK`'1"h&O ["a6~K$8BDE,A<7V\ņR*a*eV4EcyyAI]50^IT}7o[9}_W=-4c:Ow^+e!/X-? 2cJ~尦S_c'I]B*kw2mb[x2xqT IŅ1אұUAE3cA!o4DOf 4QqafuS}Vg-{꟏>sϪmy;dZ[lffąAl)SȜ9sҭ*4SoεvtXu-L$k6VmE7U7١En *i[H} x  ~K`p @#<2A4AgFS5_ׇdHS5A׎$)R9>Ou2::l8&Q n蒈Y;$wkmaXv&^@β4Mٽ`%0tD Ydd_UߠT:Tp&N_q[X}NH:u̘1C#={,J$ Þ"`'ݶA7W,f!䬞-B򸀀@#1 E0Pǥ:I; y[Hqi!o`իA@* I]\2r#0xk؂ZjK_MJN;49 )dT`xӭ R&ʞV=7C(Q}Z/%|:p 5D^owX.0 nCV=)O_3"0 ˈ+l>Y.w Q@@ [T 4gߢzU`P5U[q$<| ȑ#?2 p4) nڞk'Q%G@7 q Ux2bTih׃tB)g;>$) .99+I&]ذSf}cK֛fH0xa~KxrX0 8<*&qX ~Z6j *<8TɠH p9y*NGa{7ڄ @` . 78oPTiǣۺkKG趕#F0 _XS{"U{kRt;ȫG @9.X@xz\~ɉNXfO|҅9{l駟&-( f'" m>/؆  @dg XL e%V:-?uiIׇwn<훮]]V"Br a @Ypxvy`O=`ZMeoD|A)&1 M& 8ȵ Ŧ]T a'xd߿?H<@=z,c ؆ &7 ɮ2PtJOɓ'gr2;wsnޠs= ^l MY@H> [IwHMYb=NLnܟΝ;KNb~ @GO A tVOX vƗоnh"0iҤ v}^vяll x ^@'oA:p~Vzwjn]?:tc\ĕ@VL\kل~n}vι:3VQ*Sq9k哹k=fP;I x" H A{g =-J;y]vr$˰QS̤^GqT(آԩQxdT_씼=&g9.ZI6n j  o(iV;RJ _t^v  @#1"NJrw jם~lO?OBVVC S @(>[O !<E=R֮]+#G^{-m @|DO BA@#h֬ 8rvAٳg)SF*V6 (<@)'kܵeoG;nn:0nܸu=)}뭷lٲ)*UKNNNRF-[&Q/b-i]ig @@)ko_c{t;rku]KL?>N9q>A:?XW.GG'|R~m3?O3f|ҨQ5;v˥[nQJD޿ԬYS֯_/UV%[lhu]sNܹl۶x4h]9sD:tlܸP[L@@B 2$/CE%вq yvxq/ظ5ϼ?ri2nYzSQLY9QmUv/v>j_$1oG!W\q8H`f[֭[Ԫ/Xf͚e08p@}]SF[g!?d̙Oɓe…FxW0ԯ_?:4͚5kErw1 //O{=QqAŋ^zI¥lJ s,:Ax%ez[cnnj;Ξ=+תUK<6#o9##Wy+8$8ըQ^uZ*}77|# _K.#6m*U+R^xĮ aBpwr6mJ+W +!*~0gJLL={V[DCW U(@|~hÞ$p)7ں*xȚ:mvg+|yɈPv3e5]UǍ ^+KeV2 )Kn'AA;" _rPHnn馒ȫQʕ+논o9ql`]|( jժ?^썍E,W)űG%6>ĈζE.   AY( @@!|3޷K:E#)e$㎎PnCmT4c)j j1NI.\ ~7G6NT d+ĮӧO' +I6D"`Æ rva/7> ѣGKs()HƤ$~!ͱyXqax~əy   ,@@oe\.p.5K3si dEbCB\bUD[ɘ ǰq!A!e/^LvAg"£>ZRD$oX=Pғlv]_.f~~o]d—_~IӦMUV9Pc``r٘^ Ue5iDcĈ•c6pxsJn> Z@& neÃK6Hߝy yaD紴4Rrؐ9X=%$ڸyfڵpcـЫW/a$ y9ƍ)55U|.7Y9$'6zȉ  eC G-[.m۶"vxnxP@%wŢDW1'3 @f/ v'~9ƂUb޽SO}ܸq{nqea@J&GZ"88\u aaZ)W6Bt@`d0KeN.^KO?w+CHII8%*5b֬Yҽ[&> 3B/K`Q, 4ve"YѢE Wր`.S@GF퍉iZiZю4Z~:gΜ +ZyO"8]Y 3+W/y `.<+_WQO9C NطojJ򄵠@.?U@@s~髯*Uo&"'q{YKq{xI=NN1c/ɓ%y/nVbǔ)SIJ|mڴK.srrrW*׿    Y02hvh0x=xs06Ԟӧ馛nGyXpQ8qǎN:ф D~߽{w?ryrwv1M<J޼y_>%_yzg+Ό    &S* w+5jԈb͛7S>}hƍ"£>*4!v_~BJH{졲1x?TF jժ /}GM6k׎^Ϻn#~f#/>i`FGA@g]%9B%Yp!~°J?b _ =z.\bbbJʵK-FDD,r,I~_S    i02hzx8Boi۶mTfMaHJJ.E )))4|#{>,/M -B6 gF ~|/OB2iҤ@@@@@7A7C ½CTn]SثW/Zx1mۖ~;Dp֭mmoѢ͝;s,vhҤO Q6Ɇ;w EnpL    oP2{z06@` `¸q詧gyF(v*p| [ٕBqVZE&@z-ln#6zpΝ;^Vx @@@@@*Ikm>Zg|CO#Ͷ!77\F{9 ٰP6Ҋ؀l8k]@g `.,)wQS `bd`CÔ)SLL]pC~>5!0 &$% ]`.D  @G8 L6Me(%5 @ɠ&}׍&?}"% |pNBF(|#o %D/(;vo9>~P DA `*KjYA'f {B /j侢o    `dP2yt57<}y CP26\@ Jt@K>J,v- (pFOTCA@sdܐA KbHd ̘1C(222< @@@@IP28 ٔ'3E WP2unPG  ` `O ̜9S(}Z/*P Di.`!+ `tDs /E  š 6\(+ ?DA+"RpΝKϟѣGS5LM|dZola @`ء~nj'9_$^ΐgcChEH/<&%cBfn* Ed)NzTQ{-  _02wtr=O xxJ8=Yt4`< ^D:}%'ٳgiԨQg܎g"gVqiFTa VG0 5'qbVpPxP@% Ai(Od.`BJiu Ell,\\=h80?  P embO3Q.T !B"[Q^#X`>}FIk6B`K^3.X#eeCldUlzKnaSîNebꅔ!y59vH4Q @sPA|`#tzn6bBLj\qE~3Ռ+)@@@@)P2(EL@O8G[%88$+d˂  JAI(KX&n}8=Jz9I 0H ko͝K bЙô)ضZGJ/uE   AA(Jxr+^[ҕXMI 5"EkEQZU("4F}| .MNаwVnW?޽{{VZClU{YJ+Y w TlQZ-u|ݢ``;@@&#@Q(,Q˽f# +5Z^W\K:H%j;fMvtSmɚ@#okNcuߟ%q z]-MPK 4]YHy  A9(I2s?Iij)c5F9u3]rvғ}[5i{*aTXtUZ |G т.dPb.=CkRq}/ ^GUdw& @@ɽ[c~7ѳԶ~ Z(RV>"Q hSm'DYb#+E`Ŋk.۷/m۶>#~ː5]~nm3 R-аdtL?zA@ KF-:,v)0SҒ!80@rMEU+!ܵ!RZV=tKzmXgʑ7I$IwӠ7A)i jXMG+y4_wWzԣemaJJl`X/:y>FJnSveێSjHΤSDJѮ]Y8m.J"9g1z_gdxGca,GsW_yV;vP>}}3l \|?1=wk1)puj6}-LCoSzuyX2Dt,dW *17\T eOmUlz˭"^HJ7|̈́/40ux ?]qLR)9=jT F|Yl"NHvg?h#tkċ0~;"!޹| Rz5#%ɻp8\R++& lKJ,軭ǰRE NY 2 $wdBDpHɉ4nq8@@&~8 <K*AwիW6dWQ֕8ʶOV5d~,I4CT1ER$SfX?SIB-tR뛴Qn$w ('6$֌"E"vxt:5Kl D^^ I@z~yP&qcd<;P1Tڵkiԩm۶RۍEvKܧo+28 ;2\dҊ,l!'C2qvՑ]^+JG# H˺ *Wd]}]Xy$ qAj zW{^ .1@@ p}@a(F >\ZDW*0Oۏ+vOwXx;eUCh:|ꂝ/P˺14N 8~/ݭMmcC:ұs4TWZs碑ׅ+"gzӤ~+Fh6$ ZԉoƵq88Փb+kߠҚc4AɠQ[hll?vlJ yO_3B(@@[ pɎћ֭"|'Rz_OsIYY4.t93 ORf%o--sK>'e/GX0~}R#+RPAZoo9nxk0 ޘI $+4vzz⣵t )M(XxeZ! / jOYK ~pݟ%t)hd)*ᨼ|Ia+qW.u넒a˖-3l/ =.^҂4n/ ERe=B;t6ra  -ؗfIF4ge]]~w'.CP/X˵"o@@@dea(q XF5 ;~ B+-MAy994iͷy~5,ZN{vl3ߣ7Ki^ԬURLJ]nkl.$\R5rq5G>)پf_@pCc)eL,D0wW"a 9uzvX&&gdY8//vcdlXXy켍/N/M{7Fcl]yRvc^z̦d]d% .C|Rs _|޲+u,{.C5qٶ\lM PGZ}WyeUr.ϑPϖ7eR#֯ wp P1&U|ZDy9Զcڽ]z*i5lL!`eZPz/ӰҀm3v ,:{/jqcyQ-Iy\%&}N<@;Rx[ `U8XurTuJ;[qiYֻ7zAݻwØ2츲BY\eLefu(ek]֯ Ao%:w1',z o鑁>բb|Od$J~•bľ}EUF|)62NQ]Х\ʡ)tR'ࠓo\昏9y͸ # ~n 2`N\x?+e]~W2d[Zզ)\m/.CĩУ[Kea,d+;ń}jA5 [   JcX(Ŵ~Oէ3iT7!%:NS'+U +,Ծs7UM;KjIKt߰GiԸ?,Gz= <>+x rfC,^{ j& I+A׆Ʊu왠E>0KaGɖ;Qh`/$^5.y)୧yJeiԩS'  6FYⵁs''*O612r;K*T4j[wT@p_#Gu  `j02zy&q X͐]h8 P/nH ۡvQ۶m ?tLjv= @JKud/pD#%T8JF+b {K,_B/tG\B- XAH-@KJ=znEdEBnxԺukM.C ːT 1:,Bv  JAI(Kfnnc3dW@q]m}hٲeԲeK0`i9 `X URU@@,`d0H 6_a^evCQA{\!ƅ-ZrW Ȫ^}()=_wh6`b"(@@dI~lop |)_50 h!OXѠ'B[˱``3Xج{ǏJE/9v|5 jG  e @P=8qJ|dO`9ʹYRVV1b zlpF/{ 1}B6.T4 P sxdž~9b2JgOd゙b.8: SkyhDP]+b8.tyF_EW CCfnZ6Nu"< ^ QB5M'O$3'V('dT#?=J0¨!ET p?>ծ]FB S]n3XXVJ]#Hm¤kEP,Xog 2Rh& ?*T Jd\Iބa"HV23l76.@`6[OkMHJ Y$d0ۙ1^Oӳ D\wf ~q\~8J8<>c`5 V URf/t94 `]t6-X~. Fn=aډ |A@  Lo `V McI,OjBQLtvֈl~n\ب-RTT7N[hk=]axՕGS5+̋ UfjC((̣6жHNʳdQ-yBWruEsC\  ^"#XТA)On_5.nnHw/xʽiW%T+G P26.)N ]Ȁːkd^vS5"ϒMkv}B~.ua?k5ԯбU isT3*aw`u wKTfX4?99xdWmHćw ҀxJTf$5,#@VVX]G5 ']t-@4^NϚ]hY Χy_>+iֻ}օa(AJ%( HJSw*Hr4`JX0=;xeZr)ɛ?C '~OȢ*tc4kC  *$%<[։'S+< AR2-t1@̜Ǜ{R:w$-ٲR}  S80x"^e%?]3{`HOltAZH)ll}-|& @ݛǗڏ/ͥwyBCCW`4[#Z4ϸ./]v+]-f|w_wo 3SwMMn`iʗORn <  v ` vKaI262G+>mѳ4yf^W1!&+}԰Iv6. &k#Bד;HyzWzF$ȱo Zk>v&~uiҋz4z@@< #pqhE )Qs68 EP®8bq!6"88wuX.X,4m4 ^Y|qaҠҋrN>]移fw|ΎZ:7Ķ3iiױ5}nSZGJJvs`(GFX$sk`rU Q%`kl]50pl=(i ظ%g#+)7N$:e]MbyRlŵ@CQ*%.C{_GxW<;5Koꅲ%CrHNU@@#02x?7kқa׉U /-تaZh0i$џJ 0>qaլnx  v.ё˄ -EW͊|   <‡N@)%öMhQvnGSߟCu7*3Ia?ޥaެE pO(wJjôiӨ˓e9e۴^Xp.]vDFJ[|qmȵ`>6(N8ő@H:y >v 5h܌._FON!%8=؟&܍>ωʛOnU A%Pÿ!A*Ԍ*}Mॗ^U~;[߹+B?\?I;&Maf@@* #C*"p93CR$Cm:tYv022e#UF={E~r}=<=xW7?Uj|>ϙJ~bLG. /@!!!jRD_Y]ժːV \xذ Xm pA@@F/@E DvT5:M⽠B7ԩGlA|*"J—p?jM1j{ZqZ0y5DP2ئ!\nJpG>h0"a)]H fATw|Q +޺?~KoMy.[WJR˶55UЧЦ-47G,"={I颽^^ rd^Ǩ{Mll\Qrc|y@V67>7,tNJWM|umG@U{c  AY(l00Y\2&WK8@'դ' 1!Q&n?^/!]'P*s~bB2I_W׆, 7Q |1#WimԡKw\9S/ҵ/^8OW23m82'Qu3gΤz._|} ?Vշ[ZbU7TkJvP%@@CJ5zjO>=p?&S/* ?EkЈ~;jwSWOoG/?fO{H-&V/$T $74Pƌ#`v%#e*2԰VUzw3OYРF(i%$7#V~׆ KN |*k@Pj;Mj[i{o[3#gyoovB]Z쿪Nz!62HHEL`֬YAcǎ*U@Uf``ZKh@+@@`d=s?Zhl#1'-)9+r R|bu ]s -h &JabCR@0%KБ#GhȐ!԰aC M:aQ5Y < zK\?[ @AFs&{?:y1>ЪS/&Ĵ*7% A@`d0kzz ힷh|wtA{iӦޫ%҅ ш%Ne  #|8z22 hd (42h:,4@4d0P  m֭J۷kh]P2E&&A@_dx Kha(MJ< @@@%7nJ-[T     )(<%&nÁ `HP2rX)*%[dp P͛ߑ@@@@IJ}!ZFt#p5b56X&]-t;th8֭[c3  7CO 1(FJP 0 %L3(@ɠBSA@o. ׯ7r7704( =>h{|:5(|M `.1D@GJ)z $sNdXv.ۏFAɀ@5xzT $%&M\BÃƁ &xt@F`Ϟ=Bɰzj5 p NB6 LQ" z=!:Q+8"%#:  3J+VNT    ,(\ .BV0(L0"(Ls 8P @D  8pP2㏞@@@@T!%*Q) d \J %%oy6;:$ ˖-A@@@NJۇ\t  %p&?}MAÂF;vL(.]jΣ    `P2d <}㨡 =P2x-J\¨#~ z=@ DĉBɰxb ]s\㭩郦 @ɠ ;Kn`0AEHJJ/ԩC=Z&{^܂଼R_% /_@@@@/dHx`AE@P2xj2sWd\("K5G!Ba.a `dPkÏ΃@9O RBB >~l/W qӶ!:,RPBj<1I@CF팅Z K!2hR2-Hll,*Q\BO fi   *8w}TV-9r-A |)kV!62GZo6g%L70ȠA2j110Ȣ_ >(g##13)=bBLj\* P!"dL E傀> ?͛Gchp̅bCZU j4[\l# z%Z Rx2yɂ+!p  `EJ+io]#(nY FK}N~p q0Ā'4w3(ɟxYPJ]=>PaѬY(&&Fm~#"V.AkɌ#%v>=  .%<&O4yS玠#c-tGd0lLNJW50@/#;)ϒEuj ]惩zT-SN}.@l6lN dŬL4Y, >T!A3f̠UғO>j2iTmA CwwL}[QTXne76.xkIJggɦ5>{? H}ۍzQ1r3Qͨrem^z+`dp*J@_>^U##YhA>.qAg'V6@qCCJ%1h8qjZ+nޙј;__/;8 [@%`0q&ꅤ<:-؅\Q'?EC|2[Տ#^8S*oWOfsջһ?GIi/|$-ٲ;>`dP&2$}5?g8|>i@dd$M8QuWOرQ99#_ ENms҆A%ML||$w(Ʉ   š +Wr/Ӻ}?{ǷPHj߂zO=A1# xҩf99L |qOk-r/wvv6F<{(pb0(Q"<;5KJK8I?4%sl`X-]`HS1(O13奬Tzɴ~JkZEj[3< GFl㫍E|  >IeOt\b2ḑثiYKǺ>Ybn m9޹ٽ I;L aI29NK*E_}9@!;uZA.7`yf/qה_Pwdjn W h3 iOFRzIWu'b2 |EOQ%~Jo;=t?KhH$#*k`p;A@@p++llNzg+t 1tRXc/>v56\&PT !^B_RmA9౛RߎcdhXyz gP DAF"p4ypjЕ׹.eݟQtHSv'r iF_A RBHJ)E%]F kU@ڒAգBK8zws4UjA^zLR|vv :ityjA/~>lO )A+*SPݚh)dŶ4_2(:ZeAzU< &pUϏ&MT-?R8|ڎ->x=?$d/?<=k=}f?ۚSE?qbC,ң;UǺb:5='~Rr!ʐC։O=[E<,"j/{@@@02x@02_hx]~xUr3:S2+68 q,Jy~'\{P#pפ߰P׾Jec ow;IwPr wMyk^y|ng35hΓKYtsD;7YO﷝e>Ow1D.uU-ǁp4*d3Bz% J7i#ч˧rmu'S(<_et'A,۴iӤk4ydS<80@rMEU+ڐ‚)-+OZ= 63HFIԤmh餻iЛ?QT a|?IN&u{O Dڕ|>W'YZIIHh/Ԏ 5Xe4QRUDӤlk?@הKy/f|kR|rd"#)$}4\Z JyY^2F~vHMű Nꈝ.hv˻(-٨dxGca,GsWz鑴znU@@@@ACZ+X(VI[*Jn|})[s6U I5[ 'H~-IYڭOzӢGD;䓮q o̐Φg+0n/ 0,<|k3qI6:0.=B DA\VI9%g %!2bfW8!x%~IZߟ=6lCǏΞZ؁~qFK">^>2T(@@,|7sJH߇SrD5U I7ݟO0A13$V X ߘO\pc`X|;+!~zb+\/2~}B {eUmzҠr\xVNt;U%>Xc<6~JWUbeGaHMJWZI$J!h.6F?Olw =[Tj._ Ȇ#   „LF&WO{$B_:?5Mv['g}ҹ<{>-c>I>K=N9F \~vMOJP$P=X!d=& =!$-̐m9uzvX&&f+v}]/Wh HKO/q+vm6^YXeZJ eIӾs7UңT9.i(("q̆^|V(E>ުH|]AÄFz@ҝLI']s&er1.?r;}WStgGƏ/:Hl` l%6&KĆ1<|%wq!,>̪5BɗKYIFˢB:wXƲac&_C%4eh0u)\ ԰IsZRxvP"ٿf)Q>82 +k_ZDW\vJ^~n<=ޝ:cX. voWKO:d cKĘ'[xl·LРCޮA m'}>/۞&Ӗ'DJ("t{9Iw,}@}I/3fKdIȐ&R.ǿIKo/r"|;P*/۸f({ )tz =1P,_ғ ֔h7+DVmg:ԩau Cx ǻMYlpxv`dp3'?MVӭmcI#]~e%\cUѣwvg9Ia qc~W>{c=Ү?Z:V޿S">3AE=kX6bte}/E췕'=uY;&NHW20+'vZWۨhEc!J0Yȡ4;60և۱Tjծ(V)i"gDXxM3zx}ٷUp*{m ~R<  Tnt׊+A ~~.8:K<7jJΥRq~:uȟ#R:Z4grڳcM?Ie_Q?RRYW_j3mܗꡊg-NҮO:7[PYt|}O:osT|j;+IpGQff&=J7P^T gpBA^V ,ѬVk$V:fO(瓦໵Ժ]'~ldAe[6tٹg(=O/sޞ%&p8ou`w {3f1|L5ʪ_jP/T jG  `>02ocTA?-=2zG|^ZrF:(JKc#G.kg,jɜ9s(== Jj 7zD5t=|h8ⶫ>IKn )q:q\X{KծHS5|Yh%T !~Kbt~i1O:\#X5b)UBO랎'P~&g?]L:(;;ޛ:.z1 /]vUZb1V/?Ql˗2hH^h[Q_F ̛7O.0v|mwT^lT `dP)J pԺMPˎqh_%uBՌ/Y[t "spȪb=_7xͷ^n2>^NyNzT/(iQDa!ڂ8$;h t  DAJD被h_rPHH ,Wʇ<!'Z[߬/C/^ R1DWiV'PJJ =c~odA{eF#@Kv{J%wfd(7fh8!O\`$x``x[ָ >^ަ; JPD"#dVQ>ȪB zJatm3@L36j@T9m`ELd0;>4,B @@ +tPifuNrq}tk.6@P KobB-)-,EU5p[y ~G5@ɠuԩ9,S*WB -UW_}% IIIj7@9~+hU%e$P jG#{kz(G?I`ذat5M>GyU9/``U   ܡc IP%H Kz".wJ`СP2uL@nrB9^@@@@ `d( !O|W$.H gK,cǎׯ箠&"U rノN0t@tFF }k_?ۊzOX$;CɠA4qe dZv F L|  a02hxp4؅\-^#*YxrVAVC&664jH7VYZ]~EQf(<4Ӣt}<9^؅BVCC's8OU{ /vF62HW̶ 2s(RDkN`eDxHqF]f)  +02jX_eZ24?xҊF$ M65b+쓞 ܹMVGe(V!(,W)3H`)trX2'@@`d08^"5Cdj,_+ -[4]mux!6kf+/E3RCRB |HtIwƨgz^zѓlۨ6Ig aĨ|G%Z3%'!UīIHHyx۫GՏ^$c|9&ILc <7K.رcb ;}Fh:.=kƸÓ GZ9±f;yl؋HHH`բ!Ud19$תS~nO={D=r&F{zcʢXNe^R/Wui12yF@1g(-/Zԍ̥HH*MFJF&s6}}@TDuHn~FSRW^da3MިIy2 (YƤO?)C\HJa]p&;EgY.ӳ1ifS.(1Yy󟚞JfY\XrImJ$@$`p42|NfᵙOc^\ ٹX/ݪ/pw' 3e`h2{AeJ`ƘtݻwW UfxFգ*W-R`\cxn4Im\ =C9 z^!F$@# [SΨ/xp~m6⩩bƊ0),.;=alTVnI7\Rdo6ys~t1B<5yٚ7b5c/fz<ÚP  s`9֑پxg8 #z<T *I6|tF\¦gaB%C2&{FW^ ,_}YU扁6Y14LB'nvȐ %n\-ٱ /ǰv\q^֍d,\?Ys?͂^V'"C4cuC2&ݰ*ޥKt P_ cub#Cqӣu2ěYfa.` tjU-/,VՂVQ)^JV77O x ^H!F$^ucX%r8<͠n|c>eAssЋ^"UB*QG7cNb߹o^[CΝѿgx\#dB$y0Gs4K/g㸺|GO]p 3 T x/_"C> OWOR DEowJ]FW1q?Zؾ QylYʫ!,o\-;\er~ycǎ袋%U\5_V9oWrT0JȄUʯa~}  ߱d(**}*50Zj3=\rN5L-hclĤ Tu f`;힭p="T»9&k1?+~Qx~{Ϯ]P\Y, sdpΆgLJ~2;YC<:"N%y~\'4ix5Hr3 HnJWYØt]~R}h۶-=;zSǦPdi$Qaaz6_"'n=$"T6 k”;Z^Qeɏp[B)\4 j7+B-Se;6$,sL;run\^X/Ƥ{Llڴ }ڵk^_*I ,şCnhu b?[C^}o#)!k?uo08y" =zvCFތG?tW.[Sm KndL| }֯/>֬ĵ7܊Yn^L,]8u֍"zgKhѪ xit Oo45PqIH,DF -6ZB`PePHm`BЪawԯ}!6w[%lm*EtDYc =nKK14ahƤNкukz28)^zpn9O;b٢Hn Ĕ7^-\ 9~x@3&def{WCzckT!7u? \ 4yذRyGkGx{gq-wcάx[=?FmGG =,^T{_ :.\S'^yW^:J >J" 5ѣHqhŨH6 1RzOl1Uغu+f͚ C pJ{p]k+Z%E2j)P6YGy,}/qX3HSx1g4;C aό}x0H 1APxJQ_6~ݯ?cnF/ޚ?vlCT !!6Ʃ.isoa&ҏSQш~KC@K.+?m}m`.ѧ\dym;vQrEFdiEZrG[Gٶo\"sx>H}  ~AL 샛unFj`L۱cf̘-Z`ذaSխ#'0oѪo{(5.2'-9Z<TF~WFD!%ǥRe yո{uQZqq ~}Dޖ @N?oOňQw.v t<b`#  9mݭl'N[JƐ2d}aeū1޾;'y3fL`i{^2]W :)))1bF)rE~:CԎ E\d(DN.|96࣯"$$T^ J~}Svm?uL?u&$BINIhk^w KH`-Wac|l%]7^?6"8-|HHAW6?F$*asױ7G@Ҳ6xĩx*3~mma%Ǝ^/,Hz Z̰j@a k1ݽ˗L^>{7Z?V EEZ:;Sy5EAC0:gG[ԍ2tUY¦ IHJ I@$swFMvr|[gװ$BʘtSoH!)y&MJ!NG]`-Qϔ D 2|gI%q7a*~-jIBU؄2f`_Lp8EۊC1akҿ|191PŶ2j!<֌P/Hc=eΩ:u*5jnTsdG$WCc+@?רvlIu `O  <aU^$[\tBĨubs<&+^Q3gbDhԴb҄7 gƒ5[Jۗ>+=襍L%Db\_F$Րo1T+HIHxhd0ޚQc/0&I$W?ue 9mΑ3#"ÂSXOԾTCCQ\Tښ'4We} -ѣ4hhѺ$WSށ7?}kۗ$~@7#G?qk%Z<N8@rrd0 l^ +PunbDN.)i9Po  ` K㭙i4K)y*Y~!l~c~zÇ'^zꥏMæix41 :pP,rOOm/FzvHM3tfh8Y 9 "z2x&E@Unb>2h|A 1?-lS?Ƹ`*Z'%%ܓaP&+ڀ%I}-z䮑Vy5S\Pp 6:~p$@$3>d2l$@$@U#?MފygBJcZnA~ɓ'{nt5kDbb" +.X`ԩի?m݆[bԨQC.rlڴt)!r;@zzz9<+QFl< D:@={Au֭uM~AF'ďF_A/t%H 7yyy-UXX)n @f|>ث%`~gjh٠=bj!&"N6[[qb3]%~[n}nv\uUQ~}I&FchժM#FPsQF 1NRZ#<,NŽ[y(xJyp9cɓ1|pe6lP/~7 ibliK/n׿U@ӦMqת~-Z@jj@%Wa ]w[FyHϵ`F:INۅbݻ/%+T5 rJ[WutxU̜9o YԩS+!0$ lTHK``w93 kw?eTp;4$ \|#^}24訋iIxu]*C a -Ю]2+%WVċbdAΉ%KT#?s  _ď ׮]e( !Tk%þ}Mio%*"$@$@~%p}cl#(*>b5JfnQ"Â`+\w,E 2,}qIλI!Y,4B~ g ? 7tʕ cK؃"$gS*5k ˗/e]Hn1.DGGnm۶S"[4֮][}$}-&Go߾Bd 2XtJ@L? xQ Mz'z_!gƌ&^.NF@<ĭT ?FF' F9EڧP3.!н_A[`Ą#.f|ZOthr oGSvztI|c-WGH yNlpgЫhK)J>18j?߯(ȾTB%e+/)-)} 18WI83g(}Ig$wK+DHHb5cz2XlM8̼"(PƅLO#2Ļ!!:cBh45ḮǝUyY9gˈ G_ *#U!$w}r*L0Ay)Td`lb8\վ+ʷW^I@ɠו^f|`eI}E{EBB$mb\8r:&Ɔpex|#4 s#ߪ2l?-t[NF0қuz옖UV\ mڴA&Mt,*cH42r̡ ̱2  =supdό gCaks6%Ssd'~ݹ4Cú?~qQ;sI^Vtoy~'3ODq4KjZn*Y/A؃H @#7`JF'@O| GC j,1(jV[qΗHhd0*TG>ta9-"O>ߎ˷Uq8^N" $4VRBxU |MDQ$@$%42x $xN3$`d*w fEC%vR hd]0|0zL&0yd8pr ׯk])Djz^@C#*"n5C%*#xHOF3o GqYHMs;v:+govc޴@1$Юq-  lp7[~a2.$gT[r3H18Y+  F\5,$A^&0uT۷7t5jeÀfѸP1*]cxpYkG !-}ctq4^Y"P9. 8'@#s6|J " $ "  /1cv܉#GYf^H${!=uGrf62pI2 _bNHE`ذa%95"# WʐRqHHA2  p̙3UN;v} ;{aOZȶ|3Os# z2lA XСCMЩY"&=|}l-+K6#aqv=i9`Վ#)L"$ٍ 6sl&  9 '[lٲC A֭=0}֮T y~KFJxFw}>ɵbQ;.Z&~ݯqy~Wje) ʐ +8J$@%@#C`st  rj <AAbݜOC.cC; Js FNy!($AB&3yC> OJ$@$PYgƍ!ƆmVV!O̵v8gǮpc%b;5Աcs3UÛwR2Y-0o¬ F&5Yyxk1xrx A$@$S42/ xJ`޼yX~24t տ~2:knc4j-|yz\ܢ5cz<;[)k"ow8Cb{7R-¦ix+,֌r^3BAaI֧*ჹNDJz.zcdC3\\߫2x\?7\ SY͐_M;~?0;ЮSWfn":IDAT: ZNNHO+DWTgGuy+6 o /[ viv0=q:+OBnkA~^5o>#6[bђP6SU:p-Keg#C;k=EKdK@z/6j֮>rlv5n yص}+n!$ԪhDFxXKT /' p nbG  vZSN2`cdj _tZ8E+-TAY7TrZ < yJ~-ğ*u␯U&Y_1kռ hqc Kmh@v&uakΊ\*pѻm9bHҪs-cQa!>(~_\~" w*=Τѧ76 _'oȥ   (Oϔ$@$@ pW`̘1ر!!3,?Ż`TăZRE"P{\ۭ-R =Z'c#WVSlܓHBħS>JNy]5ħ˵ <5 wD˭ "ә}p饗=i=W^bTpԦ.auLr2T$Y}s%ϾK+۪"_{d[<uAf9ǎiʹ|a7ːQ11xҞEv1#'C:S0:V /& p nbG  ݻ7zeUyJ˅pd69S1>6BQ=* rgODhP-OC0RՐԔ0[+**o')#Å㙱bԐ>$8m#u M'~ 7 0\›4)#tq;e,]˖-CϞ=ѣG[o~`%M%P^ t?;ĥ}lPy3Y*w% 'oR* @% a{PIyʫ"Daaᐏ/T0(HHgҮ:J   +VF $WCr|1$3&\.$@$@$@Os+gE$@%Э[7\r%d0 Gqz5x`0o<  pJ N @ ʓAL ݫ(HH<z2'}  ѥKz2t7Yыzw3gL$@%@#C`st  r֬Y~]vE~ʝ`z<12l^ z@%cBt? T UNjIHMsر#= &`V6Hv$ $ds$@$_ׯW9ěEФV!+P$&U%up\ 2z2Xy9w !:}dXQJ FjzNIԭ\ X,*I$@&$@O.*D$@F&a0w\#O@pP5GCHH=7ѵaMcz_+u# p=gŞ$@$@~ ^ m۶'XsHAQ%jBēHHE <% pH`ʓwxI 䇼crȻc '= n @6mЪU+z2|%+- ^Y8x*Eg]u9|IH<$@#؝HHmۆ3g*CСC};@ $DBC9((ś¨0<,/  0 LTHCe˖hѢ=䆟x$LJC O!3sRaZ )UF$@$@z"@#V `ǎ1c24 6DH0l!bd8r:gƆeC z[C$@'мys3Ʋ 9v/2r w'$%DGz 3TXB$_*jDŽ",y HH|FF`  سgM .#FC_Xa Fo̰{Sq16GBtNr_^H񌈉(!yHHH/V: 4i=e=l`tC[ݛjB WtnIކeÃ<ܓ^$@$@$/42k= X}_@ ]wy x2)o͓й'-i$'מ3  +9 4jҞ fٿ2"NXR^mk![HH(hd0JQO `ԩhРn&̚4_To16ȇ%tp.$@$`>42oM9# 04ӓ+h]@l!"UDn0Ԏ 0K$@$@"@#C"  _8tLd5WØFX(gQaHIGv L3GOD ʃW)U+ԊP +42s$@$@~'P^=z2I}͘V܄슶k9<σ! G2ޑz8#UHϮfo  > D  CZ~F%tAh叜9>/Ky't @ Г!9& S5jԠ'S:N ۏM 랂1.䇺x5@B:"CNOIHB ^H!$@$@"p0qDo4{Oo eR/]A}ʜN$^8;WB6ٳ6ClIn&z 8Uf@; pHH|BF`P  x7!F~XUEYUkJXTX%?W#_g_}ᗳ(xQ8 [?Sg-1}D{~4&Nw64u@zʒ1C\?-wHHHW\Q)HH\c7VoNXkүUR*Sy:z%Ԭx`Lo}\ #raH8|@q׭Z_NN&\.FnN6:vZ_T< RC`P P)ja\9  HHEFAAqT[cUN<ꕋ3I!5=k%+%!*:jj1@^nװ*gN#&6~Aʨpо}{"~7p({nfp>&*22`W $@$@>%q|IH*CࡇR&h`[#2tsMVtK\dī!&l~(XBBÔkv?~epoϮ1c*ȵ_/Z3յ'T_wawKwNv*?fC$@$@~&@O?p$@$@?~<ĝ\&lo+=^2];4܄RZFojh o%Ҳ ]ļo3܍E~1"2 5AVV&|i;ryX`&k9~R.l5j*dD#.>R   *1n89s Rx) 8&UZ\ ᐼ fo1K~ivV @ Г )HH)d2X+C^ ehvlŞ:R = THH࣏>R N28'0WC%ttQ z2Xd9M 0;SKO#u5WC8z1Τ$@$`<d0ޚQc 0=I&)O4ϕy5HDŽh땁EMHH`uIH@n6U6$Lm\ ac4Qל\ [ Y;o4x5$   Г#\L$@$SLQ ǏpeԵ߰l2縣?FlQZڼԊ0d 1Ą7  @Z&VzSiB$`& ŸtV|nfjċa CEg9USOWKByN>K0$@$<Ů%40pazHJJ ~N߳Ÿ  l6yEH="Â`g +2H$`442mL/ L x@ =<3j8Q#gu73d,ې hdM0|0zL'0uT߿7|3ׯ{}6"'g|CU ›8. 8'@#s6p hԨgBխ@@ >!anRA¨0ޠH$@$]42x'y@bWz2XlM2]PBaRVn|s$@z%@#^Wz)@% L6 wȑ#ѴiJJe$|kHAr|%JTV|ϓ O|0@$=q#cP) ƅ1 FyJwYJC$@%@#ys4;|0M 2f̘;wbذaHII)s;$`4A s_"DF!&"w!.2h| fB$@#\M|0&M"s'֓)K@AZfnaA* B lDŌ؃HMFx`P$@̜9; /Y @ >KN  Akb``DIcEEE(..Fpp01@^@!g ,4I$`(|r3rQY ٳgcܸqʛ3,IHHH`u4,FI/ O_BNH`EHHϟ<6olP   z2Xj5Y}zPz2i5 %6ԌH`ݵIH@,X<6nܨ[ p>}hC$@O.U&DsH =*$@$@'h"ɰn:IHHH*M F JoJדy Гk˙7 Y›4)HC H)$@$@^$dɰf/J(    _' SS| O Г[,&v" '_qs0  w,_\y2\ҝC$@$@$@$dBXQ }s$'CHHo 8"g GTxHK $st? `4\Ξ=k0. +ϑ MFQ# +42s$@$@$@$@$@$@$@nmTH$@$@$@$@$@$@$  hdp; "@#+:Timestamp Key

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":311.0,"y":147.0,"rotation":0.0,"id":268,"width":18.0,"height":53.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":178,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":152,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":264,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-3.417721518987321,-4.214000000000027],[9.708860759493689,-4.214000000000027],[9.708860759493689,50.74999999999994],[22.8354430379747,50.74999999999994]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":415.0,"y":313.0,"rotation":0.0,"id":250,"width":7.0,"height":413.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":172,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":79,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[3.5,-3.0],[9.5,497.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":290.0,"y":340.0,"rotation":0.0,"id":11,"width":63.0,"height":82.0,"uid":"com.gliffy.shape.network.network_v4.business.user","order":12,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.user","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":12,"width":48.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Account

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":479.0,"y":330.0,"rotation":0.0,"id":2,"width":120.0,"height":80.0,"uid":"com.gliffy.shape.network.network_v4.business.user_group","order":9,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.user_group","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":3,"width":73.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Organization

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":159.0,"y":310.0,"rotation":0.0,"id":79,"width":531.0,"height":500.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":159.00000000000003,"y":320.0,"rotation":0.0,"id":82,"width":108.99999999999999,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":58,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Registry

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":730.0,"y":340.0,"rotation":0.0,"id":86,"width":61.0,"height":79.0,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":59,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":87,"width":62.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Offline key

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":730.0,"y":455.0,"rotation":0.0,"id":88,"width":61.0,"height":79.0,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":62,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":89,"width":70.0,"height":14.0,"uid":null,"order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Tagging key

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":360.4891500904159,"y":650.0,"rotation":0.0,"id":227,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":158,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":228,"width":16.0,"height":18.0,"uid":null,"order":160,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

X

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":185.1428571428571,"y":587.0,"rotation":0.0,"id":109,"width":187.85714285714286,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":81,"lockAspectRatio":false,"lockShape":false,"children":[{"x":7.142857142857139,"y":50.0,"rotation":0.0,"id":98,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_right","order":74,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":99,"width":71.42857142857143,"height":50.0,"uid":null,"order":77,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":98}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":98}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_right","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":-7.142857142857139,"y":0.0,"rotation":0.0,"id":100,"width":50.0,"height":18.0,"uid":null,"order":80,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":98,"px":-0.1,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

working

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":7.571428571428527,"y":0.0,"rotation":0.0,"id":95,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_right","order":66,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":96,"width":71.42857142857143,"height":50.0,"uid":null,"order":69,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":95}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":95}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_right","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":-7.142857142857139,"y":0.0,"rotation":0.0,"id":97,"width":38.0,"height":18.0,"uid":null,"order":72,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":95,"px":-0.1,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

latest

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":77.85714285714286,"y":8.0,"rotation":0.0,"id":30,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":24,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":31,"width":110.00000000000001,"height":25.0,"uid":null,"order":27,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":32}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":32,"width":110.00000000000001,"height":25.0,"uid":null,"order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":33,"width":110.00000000000001,"height":55.0,"uid":null,"order":34,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":30},{"magnitude":-1,"id":32}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":32,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":184.21428571428567,"y":450.0,"rotation":0.0,"id":253,"width":187.85714285714286,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":173,"lockAspectRatio":false,"lockShape":false,"children":[{"x":77.85714285714286,"y":8.0,"rotation":0.0,"id":125,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":83,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":126,"width":110.00000000000001,"height":25.0,"uid":null,"order":86,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":127}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":127,"width":110.00000000000001,"height":25.0,"uid":null,"order":90,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":128,"width":110.00000000000001,"height":55.0,"uid":null,"order":93,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":125},{"magnitude":-1,"id":127}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":127,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":7.571428571428527,"y":0.0,"rotation":0.0,"id":122,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_right","order":95,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":123,"width":71.42857142857143,"height":50.0,"uid":null,"order":98,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":122}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":122}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_right","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":-7.142857142857139,"y":0.0,"rotation":0.0,"id":124,"width":38.0,"height":18.0,"uid":null,"order":101,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":122,"px":-0.1,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

latest

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":7.142857142857139,"y":50.0,"rotation":0.0,"id":119,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_right","order":103,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":120,"width":71.42857142857143,"height":50.0,"uid":null,"order":106,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":119}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":119}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_right","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":-7.142857142857139,"y":0.0,"rotation":0.0,"id":121,"width":26.0,"height":18.0,"uid":null,"order":109,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":119,"px":-0.1,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

2.0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":479.0,"y":120.74999999999994,"rotation":0.0,"id":261,"width":155.08307142857143,"height":168.072,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":174,"lockAspectRatio":false,"lockShape":false,"children":[{"x":85.65449999999998,"y":38.0,"rotation":0.0,"id":245,"width":28.0,"height":43.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":171,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":193,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":204,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[2.5108499095841808,-13.999999999999972],[16.0465641952984,-13.999999999999972],[16.0465641952984,39.0],[29.582278481012622,39.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":null},{"x":89.65449999999998,"y":25.0,"rotation":0.0,"id":244,"width":24.0,"height":1.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":169,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":193,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":192,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-1.4891500904158192,-0.9999999999999716],[7.534659433393699,-0.9999999999999716],[16.558468957203104,-0.9999999999999716],[25.582278481012622,-0.9999999999999716]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":null},{"x":115.2367784810126,"y":62.0,"rotation":0.0,"id":204,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":151,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":205,"width":15.0,"height":16.0,"uid":null,"order":154,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

C

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":null},{"x":115.2367784810126,"y":9.000000000000028,"rotation":0.0,"id":192,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":148,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":201,"width":15.0,"height":16.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

A

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":null},{"x":65.0007929475588,"y":9.000000000000028,"rotation":0.0,"id":193,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":141,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":194,"width":14.0,"height":18.0,"uid":null,"order":144,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

2

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":null},{"x":55.08307142857143,"y":0.0,"rotation":0.0,"id":195,"width":100.0,"height":133.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.speech_bubble_right","order":129,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"magnitude":1,"id":197},{"magnitude":1,"id":198}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":196,"width":100.0,"height":118.0,"uid":null,"order":132,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":195,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":195},{"magnitude":-1,"id":198}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":195}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.speech_bubble","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":197,"width":100.0,"height":29.0,"uid":null,"order":136,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":195}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":36.0,"y":117.0,"rotation":0.0,"id":198,"width":24.0,"height":15.0,"uid":null,"order":139,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":24}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":15}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":196,"px":1.0,"py":1.0,"xOffset":-64.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.speech_bubble_right","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":67.0,"rotation":0.0,"id":180,"width":67.309,"height":101.072,"uid":"com.gliffy.shape.cisco.cisco_v1.buildings.generic_building","order":126,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.buildings.generic_building","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":182,"width":56.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Company

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":231.1785714285715,"y":204.78599999999997,"rotation":0.0,"id":0,"width":63.0,"height":82.0,"uid":"com.gliffy.shape.network.network_v4.business.female_user","order":6,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.female_user","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":1,"width":43.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Person

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":272.07142857142856,"y":120.286,"rotation":0.0,"id":171,"width":100.0,"height":132.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.speech_bubble_right","order":112,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"magnitude":1,"id":173},{"magnitude":1,"id":174}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":172,"width":100.0,"height":117.0,"uid":null,"order":114,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":171,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":171},{"magnitude":-1,"id":174}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":171}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.speech_bubble","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":173,"width":100.0,"height":29.0,"uid":null,"order":117,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":171}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":36.0,"y":116.0,"rotation":0.0,"id":174,"width":24.0,"height":15.0,"uid":null,"order":119,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":24}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":15}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":172,"px":1.0,"py":1.0,"xOffset":-64.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.speech_bubble_right","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":310.5,"y":146.78599999999997,"rotation":0.0,"id":239,"width":20.0,"height":1.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":167,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":152,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":237,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-2.917721518987321,-4.0],[6.078661844484657,-4.0],[15.075045207956578,-4.0],[24.071428571428555,-4.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":333.8354430379747,"y":182.74999999999994,"rotation":0.0,"id":264,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":175,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":265,"width":21.0,"height":18.0,"uid":null,"order":177,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 N

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":284.4177215189874,"y":127.78599999999997,"rotation":0.0,"id":152,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":120,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":153,"width":14.0,"height":18.0,"uid":null,"order":122,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

1

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":334.57142857142856,"y":127.78599999999997,"rotation":0.0,"id":237,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":164,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":238,"width":16.0,"height":18.0,"uid":null,"order":166,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

X

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":565.0,"y":500.0,"rotation":0.0,"id":40,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":1,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":41,"width":71.42857142857143,"height":50.0,"uid":null,"order":3,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":40}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":40}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285666,"y":0.0,"rotation":0.0,"id":42,"width":26.0,"height":18.0,"uid":null,"order":5,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":40,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

1.0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":454.99999999999994,"y":461.0,"rotation":0.0,"id":16,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":15,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":17,"width":110.00000000000001,"height":25.0,"uid":null,"order":17,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":18}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":18,"width":110.00000000000001,"height":25.0,"uid":null,"order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":19,"width":110.00000000000001,"height":55.0,"uid":null,"order":22,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":16},{"magnitude":-1,"id":18}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":18,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":565.0,"y":450.0,"rotation":0.0,"id":37,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":35,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":38,"width":71.42857142857143,"height":50.0,"uid":null,"order":37,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":37}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":37}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285666,"y":0.0,"rotation":0.0,"id":39,"width":38.0,"height":18.0,"uid":null,"order":39,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":37,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

latest

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":443.4177215189873,"y":513.0,"rotation":0.0,"id":229,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":161,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":230,"width":15.0,"height":16.0,"uid":null,"order":163,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

A

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":565.0,"y":630.0,"rotation":0.0,"id":63,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":40,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":64,"width":71.42857142857143,"height":50.0,"uid":null,"order":42,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":63}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":63}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285666,"y":0.0,"rotation":0.0,"id":65,"width":68.0,"height":18.0,"uid":null,"order":44,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":63,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

producttion

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":454.99999999999994,"y":591.0,"rotation":0.0,"id":58,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":45,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":59,"width":110.00000000000001,"height":25.0,"uid":null,"order":47,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":60}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":60,"width":110.00000000000001,"height":25.0,"uid":null,"order":50,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":61,"width":110.00000000000001,"height":55.0,"uid":null,"order":52,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":58},{"magnitude":-1,"id":60}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":60,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":565.0,"y":580.0,"rotation":0.0,"id":55,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":53,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":56,"width":71.42857142857143,"height":50.0,"uid":null,"order":55,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":55}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":55}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285666,"y":0.0,"rotation":0.0,"id":57,"width":28.0,"height":18.0,"uid":null,"order":57,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":55,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

test

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":443.4177215189873,"y":646.0,"rotation":0.0,"id":221,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":155,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":222,"width":15.0,"height":16.0,"uid":null,"order":157,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

C

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":565.0,"y":745.0,"rotation":0.0,"id":281,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":179,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":282,"width":71.42857142857143,"height":50.0,"uid":null,"order":181,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":281}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":281}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285666,"y":0.0,"rotation":0.0,"id":283,"width":48.0,"height":18.0,"uid":null,"order":183,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":281,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

release

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":454.99999999999994,"y":706.0,"rotation":0.0,"id":277,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":184,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":278,"width":110.00000000000001,"height":25.0,"uid":null,"order":186,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":279}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":279,"width":110.00000000000001,"height":25.0,"uid":null,"order":189,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":280,"width":110.00000000000001,"height":55.0,"uid":null,"order":191,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":277},{"magnitude":-1,"id":279}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":279,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":565.0,"y":695.0,"rotation":0.0,"id":274,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":192,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":275,"width":71.42857142857143,"height":50.0,"uid":null,"order":194,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":274}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":274}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285666,"y":0.0,"rotation":0.0,"id":276,"width":26.0,"height":18.0,"uid":null,"order":196,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":274,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

7.5

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":360.4891500904159,"y":510.0,"rotation":0.0,"id":289,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":197,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":290,"width":21.0,"height":18.0,"uid":null,"order":199,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 N

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":332.57142857142856,"y":532.0,"rotation":0.0,"id":301,"width":30.0,"height":30.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.events.timer_intermediate","order":205,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.timer_intermediate.bpmn_v1","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":330.4177215189874,"y":670.0,"rotation":0.0,"id":302,"width":30.0,"height":30.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.events.timer_intermediate","order":206,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.timer_intermediate.bpmn_v1","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":466.5822784810126,"y":667.0,"rotation":0.0,"id":303,"width":30.0,"height":30.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.events.timer_intermediate","order":207,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.timer_intermediate.bpmn_v1","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":621.401335443038,"y":508.0,"rotation":0.0,"id":306,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":209,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":621.401335443038,"y":459.0,"rotation":0.0,"id":307,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":210,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":621.401335443038,"y":589.0,"rotation":0.0,"id":308,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":211,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":186.21428571428567,"y":594.0,"rotation":0.0,"id":309,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":212,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":189.21428571428567,"y":644.0,"rotation":0.0,"id":310,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":213,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":810.0,"y":358.5,"rotation":0.0,"id":164,"width":217.0,"height":70.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":110,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

A offline key is used to create repository keys. Offline keys belong to a person or an organization. Resides client-side. You should store these in a safe place and back them up. 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":810.0,"y":487.5,"rotation":0.0,"id":170,"width":217.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":111,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

A tagging key is associated with an image repository. publishers with this key can push or pull any tag in this repository. This resides on client-side.

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":810.0,"y":587.0,"rotation":0.0,"id":298,"width":217.0,"height":42.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":203,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

A timestamp key is associated with an image repository. This is created by Docker and resides on the server.

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":743.3333333333334,"y":681.0,"rotation":0.0,"id":314,"width":283.66666666666663,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":215,"lockAspectRatio":false,"lockShape":false,"children":[{"x":66.66666666666663,"y":4.0,"rotation":0.0,"id":312,"width":217.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":214,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Signed tag.

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":null},{"x":0.0,"y":0.0,"rotation":0.0,"id":304,"width":33.333333333333336,"height":20.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":208,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"}],"layers":[{"guid":"dockVlz9GmcW","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":216}],"shapeStyles":{},"lineStyles":{"global":{"strokeWidth":1,"endArrow":17}},"textStyles":{"global":{"size":"16px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.cisco.cisco_v1.buildings","com.gliffy.libraries.sitemap.sitemap_v2","com.gliffy.libraries.sitemap.sitemap_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.table.table_v2.default","com.gliffy.libraries.ui.ui_v3.navigation","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.ui.ui_v3.icon_symbols","com.gliffy.libraries.ui.ui_v2.forms_components","com.gliffy.libraries.ui.ui_v2.content","com.gliffy.libraries.ui.ui_v2.miscellaneous","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.bpmn.bpmn_v1.events","com.gliffy.libraries.bpmn.bpmn_v1.activities","com.gliffy.libraries.bpmn.bpmn_v1.data_artifacts","com.gliffy.libraries.bpmn.bpmn_v1.gateways","com.gliffy.libraries.bpmn.bpmn_v1.connectors","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images"],"lastSerialized":1439068390533},"embeddedResources":{"index":0,"resources":[]}}docker-1.10.3/docs/security/trust/images/trust_components.gliffy000066400000000000000000002245701267010174400251340ustar00rootroot00000000000000{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":881,"height":704,"nodeIndex":316,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":true,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":null,"printShrinkToFit":false,"printPortrait":false,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":10,"y":10},"max":{"x":880.0000000000001,"y":703.7139999999999}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":10.0,"y":199.714,"rotation":0.0,"id":79,"width":531.0,"height":500.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":389.714,"rotation":0.0,"id":40,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":1,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":41,"width":71.42857142857143,"height":50.0,"uid":null,"order":3,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":40}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":40}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":42,"width":26.0,"height":18.0,"uid":null,"order":5,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":40,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

1.0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":82.1785714285715,"y":94.49999999999997,"rotation":0.0,"id":0,"width":63.0,"height":82.0,"uid":"com.gliffy.shape.network.network_v4.business.female_user","order":6,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.female_user","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":1,"width":43.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Person

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":330.0,"y":219.714,"rotation":0.0,"id":2,"width":120.0,"height":80.0,"uid":"com.gliffy.shape.network.network_v4.business.user_group","order":9,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.user_group","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":3,"width":73.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Organization

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":141.0,"y":229.714,"rotation":0.0,"id":11,"width":63.0,"height":82.0,"uid":"com.gliffy.shape.network.network_v4.business.user","order":12,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.user","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":12,"width":48.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Account

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":305.99999999999994,"y":350.714,"rotation":0.0,"id":16,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":15,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":17,"width":110.00000000000001,"height":25.0,"uid":null,"order":17,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":18}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":18,"width":110.00000000000001,"height":25.0,"uid":null,"order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":19,"width":110.00000000000001,"height":55.0,"uid":null,"order":22,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":16},{"magnitude":-1,"id":18}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":18,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":339.714,"rotation":0.0,"id":37,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":35,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":38,"width":71.42857142857143,"height":50.0,"uid":null,"order":37,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":37}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":37}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":39,"width":38.0,"height":18.0,"uid":null,"order":39,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":37,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

latest

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":519.7139999999999,"rotation":0.0,"id":63,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":40,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":64,"width":71.42857142857143,"height":50.0,"uid":null,"order":42,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":63}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":63}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":65,"width":68.0,"height":18.0,"uid":null,"order":44,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":63,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

producttion

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":305.99999999999994,"y":480.71399999999994,"rotation":0.0,"id":58,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":45,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":59,"width":110.00000000000001,"height":25.0,"uid":null,"order":47,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":60}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":60,"width":110.00000000000001,"height":25.0,"uid":null,"order":50,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":61,"width":110.00000000000001,"height":55.0,"uid":null,"order":52,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":58},{"magnitude":-1,"id":60}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":60,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":469.714,"rotation":0.0,"id":55,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":53,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":56,"width":71.42857142857143,"height":50.0,"uid":null,"order":55,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":55}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":55}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":57,"width":28.0,"height":18.0,"uid":null,"order":57,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":55,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

test

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":10.000000000000036,"y":209.714,"rotation":0.0,"id":82,"width":108.99999999999999,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":58,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Registry

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":581.0,"y":229.714,"rotation":0.0,"id":86,"width":61.0,"height":79.0,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":59,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":87,"width":62.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Offline key

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":581.0,"y":344.714,"rotation":0.0,"id":88,"width":61.0,"height":79.0,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":62,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":89,"width":70.0,"height":14.0,"uid":null,"order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Tagging key

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":36.142857142857125,"y":476.71399999999994,"rotation":0.0,"id":109,"width":187.85714285714286,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":81,"lockAspectRatio":false,"lockShape":false,"children":[{"x":7.142857142857139,"y":50.0,"rotation":0.0,"id":98,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_right","order":74,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":99,"width":71.42857142857143,"height":50.0,"uid":null,"order":77,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":98}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":98}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_right","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":-7.142857142857139,"y":0.0,"rotation":0.0,"id":100,"width":50.0,"height":18.0,"uid":null,"order":80,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":98,"px":-0.1,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

working

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":7.571428571428527,"y":0.0,"rotation":0.0,"id":95,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_right","order":66,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":96,"width":71.42857142857143,"height":50.0,"uid":null,"order":69,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":95}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":95}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_right","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":-7.142857142857139,"y":0.0,"rotation":0.0,"id":97,"width":38.0,"height":18.0,"uid":null,"order":72,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":95,"px":-0.1,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

latest

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":77.85714285714286,"y":8.0,"rotation":0.0,"id":30,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":24,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":31,"width":110.00000000000001,"height":25.0,"uid":null,"order":27,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":32}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":32,"width":110.00000000000001,"height":25.0,"uid":null,"order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":33,"width":110.00000000000001,"height":55.0,"uid":null,"order":34,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":30},{"magnitude":-1,"id":32}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":32,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":661.0,"y":248.214,"rotation":0.0,"id":164,"width":217.0,"height":70.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":110,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

A offline key is used to create tagging keys. Offline keys belong to a person or an organization. Resides client-side. You should store these in a safe place and back them up. 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":661.0,"y":377.214,"rotation":0.0,"id":170,"width":217.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":111,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

A tagging key is associated with an image repository. Creators with this key can push or pull any tag in this repository. This resides on client-side.

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":123.07142857142856,"y":10.0,"rotation":0.0,"id":171,"width":100.0,"height":132.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.speech_bubble_right","order":112,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"magnitude":1,"id":173},{"magnitude":1,"id":174}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":172,"width":100.0,"height":117.0,"uid":null,"order":114,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":171,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":171},{"magnitude":-1,"id":174}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":171}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.speech_bubble","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":173,"width":100.0,"height":29.0,"uid":null,"order":117,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":171}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":36.0,"y":116.0,"rotation":0.0,"id":174,"width":24.0,"height":15.0,"uid":null,"order":119,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":24}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":15}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":172,"px":1.0,"py":1.0,"xOffset":-64.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.speech_bubble_right","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":135.41772151898738,"y":17.499999999999968,"rotation":0.0,"id":152,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":120,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":153,"width":14.0,"height":18.0,"uid":null,"order":122,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

1

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":294.4177215189873,"y":535.7139999999999,"rotation":0.0,"id":221,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":155,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":222,"width":15.0,"height":16.0,"uid":null,"order":157,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

C

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":211.48915009041588,"y":539.7139999999999,"rotation":0.0,"id":227,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":158,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":228,"width":16.0,"height":18.0,"uid":null,"order":160,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

X

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":294.4177215189873,"y":402.714,"rotation":0.0,"id":229,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":161,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":230,"width":15.0,"height":16.0,"uid":null,"order":163,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

A

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":185.57142857142856,"y":17.499999999999968,"rotation":0.0,"id":237,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":164,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":238,"width":16.0,"height":18.0,"uid":null,"order":166,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

X

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":161.5,"y":36.49999999999997,"rotation":0.0,"id":239,"width":20.0,"height":1.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":167,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":152,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":237,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-2.9177215189872925,-4.0],[6.078661844484657,-4.0],[15.075045207956606,-4.0],[24.071428571428555,-4.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":266.0,"y":202.714,"rotation":0.0,"id":250,"width":7.0,"height":413.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":172,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":79,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[3.5,-3.0],[9.5,496.99999999999994]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":35.21428571428568,"y":339.714,"rotation":0.0,"id":253,"width":187.85714285714286,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":173,"lockAspectRatio":false,"lockShape":false,"children":[{"x":77.85714285714286,"y":8.0,"rotation":0.0,"id":125,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":83,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":126,"width":110.00000000000001,"height":25.0,"uid":null,"order":86,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":127}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":127,"width":110.00000000000001,"height":25.0,"uid":null,"order":90,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":128,"width":110.00000000000001,"height":55.0,"uid":null,"order":93,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":125},{"magnitude":-1,"id":127}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":127,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":7.571428571428527,"y":0.0,"rotation":0.0,"id":122,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_right","order":95,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":123,"width":71.42857142857143,"height":50.0,"uid":null,"order":98,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":122}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":122}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_right","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":-7.142857142857139,"y":0.0,"rotation":0.0,"id":124,"width":38.0,"height":18.0,"uid":null,"order":101,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":122,"px":-0.1,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

latest

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":7.142857142857139,"y":50.0,"rotation":0.0,"id":119,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_right","order":103,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":120,"width":71.42857142857143,"height":50.0,"uid":null,"order":106,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":119}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":119}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_right","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":-7.142857142857139,"y":0.0,"rotation":0.0,"id":121,"width":26.0,"height":18.0,"uid":null,"order":109,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":119,"px":-0.1,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

2.0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":330.0,"y":10.463999999999942,"rotation":0.0,"id":261,"width":155.08307142857143,"height":168.072,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":174,"lockAspectRatio":false,"lockShape":false,"children":[{"x":85.65449999999998,"y":38.0,"rotation":0.0,"id":245,"width":28.0,"height":43.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":171,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":193,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":204,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[2.510849909584124,-13.999999999999972],[16.0465641952984,-13.999999999999972],[16.0465641952984,39.0],[29.582278481012622,39.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":null},{"x":89.65449999999998,"y":25.0,"rotation":0.0,"id":244,"width":24.0,"height":1.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":169,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":193,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":192,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-1.489150090415876,-0.9999999999999716],[7.534659433393642,-0.9999999999999716],[16.558468957203104,-0.9999999999999716],[25.582278481012622,-0.9999999999999716]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":null},{"x":115.2367784810126,"y":62.0,"rotation":0.0,"id":204,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":151,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":205,"width":15.0,"height":16.0,"uid":null,"order":154,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

C

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":null},{"x":115.2367784810126,"y":9.000000000000028,"rotation":0.0,"id":192,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":148,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":201,"width":15.0,"height":16.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

A

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":null},{"x":65.0007929475588,"y":9.000000000000028,"rotation":0.0,"id":193,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":141,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#ff0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":194,"width":14.0,"height":18.0,"uid":null,"order":144,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

2

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":null},{"x":55.08307142857143,"y":0.0,"rotation":0.0,"id":195,"width":100.0,"height":133.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.speech_bubble_right","order":129,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"magnitude":1,"id":197},{"magnitude":1,"id":198}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":196,"width":100.0,"height":118.0,"uid":null,"order":132,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":195,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":195},{"magnitude":-1,"id":198}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":195}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.speech_bubble","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":197,"width":100.0,"height":29.0,"uid":null,"order":136,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":195}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":36.0,"y":117.0,"rotation":0.0,"id":198,"width":24.0,"height":15.0,"uid":null,"order":139,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":24}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":15}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":196,"px":1.0,"py":1.0,"xOffset":-64.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.speech_bubble_right","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":67.0,"rotation":0.0,"id":180,"width":67.309,"height":101.072,"uid":"com.gliffy.shape.cisco.cisco_v1.buildings.generic_building","order":126,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.buildings.generic_building","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":182,"width":56.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Company

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":184.8354430379747,"y":72.46399999999994,"rotation":0.0,"id":264,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":175,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":265,"width":21.0,"height":18.0,"uid":null,"order":177,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 N

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":162.0,"y":36.714,"rotation":0.0,"id":268,"width":18.0,"height":53.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":178,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":152,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":264,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":17,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-3.4177215189872925,-4.214000000000027],[9.708860759493689,-4.214000000000027],[9.708860759493689,50.74999999999994],[22.8354430379747,50.74999999999994]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":634.7139999999999,"rotation":0.0,"id":281,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":179,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":282,"width":71.42857142857143,"height":50.0,"uid":null,"order":181,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":281}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":281}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":283,"width":48.0,"height":18.0,"uid":null,"order":183,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":281,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

release

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":305.99999999999994,"y":595.7139999999999,"rotation":0.0,"id":277,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":184,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":278,"width":110.00000000000001,"height":25.0,"uid":null,"order":186,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":279}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":279,"width":110.00000000000001,"height":25.0,"uid":null,"order":189,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":280,"width":110.00000000000001,"height":55.0,"uid":null,"order":191,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":277},{"magnitude":-1,"id":279}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":279,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":584.7139999999999,"rotation":0.0,"id":274,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":192,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":275,"width":71.42857142857143,"height":50.0,"uid":null,"order":194,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":274}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":274}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":276,"width":26.0,"height":18.0,"uid":null,"order":196,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":274,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

7.5

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":211.48915009041588,"y":399.714,"rotation":0.0,"id":289,"width":23.16455696202532,"height":30.000000000000007,"uid":"com.gliffy.shape.network.network_v4.business.encrypted","order":197,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.encrypted","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":290,"width":21.0,"height":18.0,"uid":null,"order":199,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 N

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":584.0,"y":467.714,"rotation":0.0,"id":294,"width":54.0,"height":54.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.events.timer_intermediate","order":200,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.timer_intermediate.bpmn_v1","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":297,"width":88.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Timestamp Key

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":661.0,"y":476.714,"rotation":0.0,"id":298,"width":217.0,"height":42.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":203,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

A timestamp key is associated with an image repository. This is created by Docker and resides on the server.

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":316.5822784810126,"y":420.714,"rotation":0.0,"id":299,"width":30.0,"height":30.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.events.timer_intermediate","order":204,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.timer_intermediate.bpmn_v1","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":183.57142857142856,"y":421.714,"rotation":0.0,"id":301,"width":30.0,"height":30.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.events.timer_intermediate","order":205,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.timer_intermediate.bpmn_v1","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":181.41772151898738,"y":559.7139999999999,"rotation":0.0,"id":302,"width":30.0,"height":30.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.events.timer_intermediate","order":206,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.timer_intermediate.bpmn_v1","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":317.5822784810126,"y":556.7139999999999,"rotation":0.0,"id":303,"width":30.0,"height":30.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.events.timer_intermediate","order":207,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.timer_intermediate.bpmn_v1","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":472.40133544303796,"y":397.714,"rotation":0.0,"id":306,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":209,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":472.40133544303796,"y":348.714,"rotation":0.0,"id":307,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":210,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":472.40133544303796,"y":478.714,"rotation":0.0,"id":308,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":211,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":37.214285714285666,"y":483.714,"rotation":0.0,"id":309,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":212,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":40.214285714285666,"y":533.7139999999999,"rotation":0.0,"id":310,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":213,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":594.3333333333335,"y":570.7139999999999,"rotation":0.0,"id":314,"width":283.66666666666663,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":215,"lockAspectRatio":false,"lockShape":false,"children":[{"x":66.66666666666663,"y":4.0,"rotation":0.0,"id":312,"width":217.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":214,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Signed tag.

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":null},{"x":0.0,"y":0.0,"rotation":0.0,"id":304,"width":33.333333333333336,"height":20.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":208,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"}],"layers":[{"guid":"dockVlz9GmcW","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":216}],"shapeStyles":{},"lineStyles":{"global":{"strokeWidth":1,"endArrow":17}},"textStyles":{"global":{"size":"16px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.cisco.cisco_v1.buildings","com.gliffy.libraries.sitemap.sitemap_v2","com.gliffy.libraries.sitemap.sitemap_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.table.table_v2.default","com.gliffy.libraries.ui.ui_v3.navigation","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.ui.ui_v3.icon_symbols","com.gliffy.libraries.ui.ui_v2.forms_components","com.gliffy.libraries.ui.ui_v2.content","com.gliffy.libraries.ui.ui_v2.miscellaneous","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.bpmn.bpmn_v1.events","com.gliffy.libraries.bpmn.bpmn_v1.activities","com.gliffy.libraries.bpmn.bpmn_v1.data_artifacts","com.gliffy.libraries.bpmn.bpmn_v1.gateways","com.gliffy.libraries.bpmn.bpmn_v1.connectors","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images"],"lastSerialized":1439174260766},"embeddedResources":{"index":0,"resources":[]}}docker-1.10.3/docs/security/trust/images/trust_components.png000066400000000000000000003622471267010174400244440ustar00rootroot00000000000000PNG  IHDRN IDATx^xTֆ?zBP.EB@z)" 6\6EP"(* !  =ɤϙ9gÃ̜˻^Ѝ7nHHHHHH ;s$@$@$@$@$@$Pr"  Ss$@$@$@$@$@$@A9@$@$@$@$@$@~JO a ! ) B?5e  e C@@",#RlHX^}cH .>YIȮxkYH]0r ?,#s  p BWvs;g)7믿3:^%><Fn:.ڽ{w{B$@$(}ĖBC`@6`HR+g؁  #@A'7eq0olw/ ._h 8>o/)2!46=cp|qڤK: SahY?<,+eHXk~HZ$yr%p 0a0uR^%UJWhP#S%b3z C{6è!(]!c1Cϲ8Y`oHH_P [hǀݻ>Li@7xpMذ4F E xM(\f?Qh=dr0t[gOtX"d?1`0 U">1*b@+1ЭU1`ԋ7Rae$@$@LЗk36#FX?II - \s YDzHj^,\}'A TmGXDR2ԿkamQ%RҮeq2"M( =aLy(<<%b@ R/. ,]j )"~܉[" NBɀbVbR}፯7Y @Epwx_^8m[$` hJ#X2Q(ZȹEPȱ3?uhy 9PBG%b9B'& ?&@A'wz!®C˂Nݶm BG3.?%W)A(B(M2$5~Z(VbI_љ3- I4%!7n2G+qWZ9಍G+w[J" W뒯x BO /ɈFRPK-e+~29L8\(T(3aOVpyK*VxݸXlS_D"ve4z5t5M] .]=n 3 B? /QQQ駟G}4( aEBM+G tsQz7WEصmZ!Ah}a{T1'  |Ppj!oi((KZÇ 4" { ,Pe \sG9Ą!m Ɋq5v|P V^f?H\u߾r=ݳ9tWՙ]pG$@$@PɔpxFA(+pa%j֬D.^)))YPD0u\0Qd Ə]x%* )) of4U/dbmVx%LT<:Ϣ^৽ p]6X=n7 @V~2#^hYyۇM6}ݗ:O&:IPVi|?!yHHmi^W 7X@ Kד x  BOеB-Y=z [(@@s͂P_|s'@E9˰"^SNP1A$@$@~EO̭kaPrJڵ B-zz !0::cƌQكCBBM͎;:u(1(f6H$@$Z^| UV$$Tuֺ-AAo ", X7'نlR |oV<ԧRӯ83 uobo>xvllu겇íF  JOf 4jI**`j5k}Md$0jK=_^5AOT8*Q e{ /& 蚇"7mL1:RA:'a) HH\A= ] ?"׍ԩr:pŖ|h'R2nӞ l Csikx M@< w*QF$@$(r:k᧳nW\ tE3V^tY\2(S4 ~sz3!/~5*_p,m;{uEac)|cqtC-^L$@$@y B?K z.x8X>X`VuTJ#nʭuW*Ƅ!mѭU.{]pH$@$@Y PɌLTӥ t)dy=t x B3Y1sglXl .l0ŋQLl۶ ͚5Stw;wVZ8x fE4`tfÀ ?!O&)I$@$`: B{Ff-bccч<+Xe߾}p5]vŚ5kPhQ̙3cǎUx 7Y0Ч x<  a)~W^Z&~/x ;v5jw֭S"b=FݻwcڴiX*g 2d^ys M(f By"6mjBsH{lHH Pz  ̾PFAv)gr4o>:2:$5ܰJȵm$vazz:ڴi 2.KX+QpE_  [; ׯc^֖-[7ж[VϡSh{GǏG}*U~h~Kf ׫=ZH>}d2et  B1e1zTX?JBڤI{W <<aZGYO0bGMߊ+[>35k뇟E1PM=c7of?7FPȬHH/P ¤$TFH𰰰0ԯ_ߴihYFk׮ESN75c <:zeߥf3gΠvʕ+1p@DFFB""4ib_ǜH{8 J$@$g(f.4]xײ۬fL3aFYo0cJ©#Gw^k6_#)!^xp_;#ӧ_6e=L4  z^oB`BÛ;о}{whԨ:vBܹs1|pL{ /cFC 5j{EUN<F +Wrq[Μ9ee)Y)  9> 3*e3J6ĺue˖*LQ3'S4P`߾}Q-Rd__S%"%p^eӃU  B?1 ogfBV-" %-[fYfO4i$c;v,>+2t3*ZT/_F%]\({Ŝ & BߴkQ4h <LHR%KVVσcv@Vu3t˛a VJ$@$.F}a9>w3߸-jcZ(hY襤e`3kI4]jW0̐n@"yh A~܉["лm}tJC  k &?EN 7pHHe(]ڽ yBhA(^6Ɂ1[[u U\E_{咶lC={6BCCQpaj^s omǧvgP ^…ƍ:+(Q0v7e#IH<'X}慟9}4,XRJaر[JMT!r$8f_Hi۝N4p1a0fZDO1θS IDAT!T  zAاOKܹ3+; h1(0="EyR/]wX'>%IH!@A5/Ν÷~˗/E7MЫm=&:vʀ<*ˋ.*|T\Ŕ]/ĝ8oB@S :!t2OhĔt9_ƽ %Kt%CJJ7 ~œ5PBGO[?>fHQoN;dD$@$vn7k: K./ٳgahFU!|WZIKxfZ:SΌZW< ntO &e˖PlY>- eWη. =G_yQjNxqCM$@$@A#`~'}qu|wSLq(DLK&;A,AOUXohgc-yS 53(V3x>" s@L9drسM}10(VĮ$1bIKJ%{ooSC{6s7ysiHH-(݂Bcڵغu+f/r;e{EHs_~V{~F.G|Eܹso;RD\k۴C # $@$@f 4 'N`…0z\)GFF"%%Ee/81K9FL%SG^P!x)F}EG?n %JȕS: Y9  C9M_p<_~,"eUQ:;x3D >9{5b/+AiggP뻯Bϒ%K{n?UX^|I{Α|Й{IH|Xq !bHMMU%TPj9lԩ0`d!غukHA $ю1;囏a@K~jKz _CÛ&*1(6d"%A'dQFC7ocĈ:tJ@#كIE;ѳM=4Y^enR:" :}-:Z\p# p3^_ZhHUѢEN:\2]_~EV-a=zO<"AF[^fNNgs?K(hs d`"gP%$e< Xh$@^zy 3@1mܸ$g0&]+jdII@ߎLSG }$@$@ tk (% %x . Sc\EQCYFmFu*#&,ߊ5ە"{f#R]R//B_2X7a>|iii TP x߿?F>\u߾b9ѨrD<~ѩ#i\0HH@ B=Z_]hȢ/&&Fy C c3gDJT61c5iXЋ7v]:?+׮ڕ`_֢勂P<$|411/_VB9B<:Ϣ~cX 2٫@i,y= k PZ󗅆{q IAA趩 Oa;_%f`(61˂0Rw}7£S) )HH_PEjYF3(bP Lrkc̜5.AP4a.+(I:uꤎl.lHHKPzBI8+2 Bggw g ſl7c,!zrd^qϱcT)B B0;I$@$B.ΦEƘН3vT+R{)_n{ 7M1 ˂0 $GN$@$; B?L*'Sc ‡ Q瓱nTv"> n_G"<*1o= 3cGR|B)˞lBAhЌaR@   t9r4HAHA螙翭f)XaVup{Zh^ IAh(NVF$@$B.mDSN'`Ox,6=eNBҺq5*wZAhT()~"E JfQ,ACh= BHHS Pzel14VlPR.eq1_eu?@[̃uGA* hq,GN'XD'3_×{uGq! aXyZ-ߋlbP 4n6Rǒ5 kyݚ a8{>kRj4G ( s –+dluKݚ t $Z?s ҁųOV;#)Wy ;Bkװ\ !CF}G 8D!l$mXB)\! JP)ՑA(X )iWpf"  yk]CjEvb+ (fx(QZʟCxu2p{51(ANUCxO U{&d\lǖφ;bBu $@$@N tѷWP@iCL0[ ["p9_CFkU.>E_mT 4T.Aاu]tnVCO i|o>qso BC2  W te;ڐd1cg!(o{<Δ[7RR:3xNK@1ه@AӨ\NAhKD$@$Zkk,Of[aZ,ݤitn It|fŽ@er:b~Z{=MdhL&5ZnIBjFeo#gq2.sϡ#uI$ѫ׮B@/{p>9ݮ n۞gB^σWC"16·87-/" 0@V'ClM";$1  Eylڸ*y$…^bw&GnCȳ-y@?yh;|HAnxHJL*\dq5_XE BbE5UZFPryIrqC'U1aH[tkUz\po$ pUi˳w@pV~4B. U-W.ؖnGbr:lEeR‹\p!l6E$@$`%@A dO^l L>š1 J ?ױvWKv ›~ Bщn~<B@_{Dp1dT _炋9  EMA"B B3:\p-oF$@$`!@A膙 k#g0(s7tzz?yf 2}$u놝;wb׮]hӦa ?PzlE lU2 B־A&PH,/1u&wbCxW3-#gV<#6”œشiΜ9B y|.xϙ# &@Abf}O  a/]mСCŋcVA("PQx뭷f&. C /mZ :}߯^eo߾XhJ.m|sTHH .A9d~ޔ.a ؾ}{۷O]PJ!tX>\S(YE% }Yf*DT lZbȨ&%l;l9dBF_V[ gSD?u/$M…yh}H B>6)IAjlHH@PpfuwPHAX #22]vURz]#񡦌8v;mHB=Bի8p VX{"$$DFî 4 %+" AP,g..{Q:3U꺗`Aߣlٲ߿xƌJ[!f/́LyЧ кq+_cEmv)CR:7 8IIn.ڱE-L[ `A8{lMOGEJ+Ș!t~j5J ^xoBkpҤICڌMj!&!sO߇w=}لGwG嫸zJ?DD\r-$x0a'Ⳑ}uX{|!>lLD.Upw >> R0@%}fz"zIH#@AiS<[۶m?X]\00' ȍ ŬE;haR1qX{ ܑ@Bܹs1|p녲8ݻ7˧7 jA%Xb/eS^wjZSp2.)UNCRYFX,Nu>o茟6Elb\d9Ndn/_<+Wese  BL&슎-jU BC ~:`yY&bۑ,#ynp=Qf}P& }.|Dw%&/v,Ii(=cv`/HHPlKV\}܄;ڵ޽;-jY}A+ :+cR=̿  (Mka>&{GCREiAAAn$}Yx Ck\01[   t6wVn=ԭ F uycG!1:NFnHLL2eʠ}{'\C؉4缵bӡ8y ĖјllqӬ//H|5s%膃Qw\etPb 5e>{u Wl{_^7vAb(:իy&G 22N:hܸ1a(M٦.BWF܅̤29m|Y>C!X.,0sEcod͡;1:h^=0y/,$@$@$*&!(P+|Z>B) 6mR+Zc'DS<07㝫-A|9 Q@ Cʕ=bHH^%mV- w0x %dX̌B<Crr2}Fк%'[MjjP|(nF,e cݵ.]x(I薄K89% x Bœ5vZpꏻˁ}{p)r%ђd2 t7,'PVA"}Gh~;uO;?9Q+o= 3=ݧc{b̜5YBI{l|ʗ )ۊMHI˰5MƨXzaq=SN!<<\=DzE g&Z_c'̞x'k75v:r ҁ&Xq\/^@jj:EhEOѣGnYyFI CEzHH K>TWwlQ SGz jLo?u;'Bn:IVB)q߮9AyƆ>35~cQ>ߚ9BQEehR* MdG(j 1"A|YaHHOPdh"{=J35dǣ/"6[ sayC}v]%4T{Г"^EYP1 is hp7Xc'_7xIDͰri?Ī7ht"32ބ]}qC*Ys){'{+n!;3f,PԬH6[_-(-j]H)甆peaEDZG=*+`!  w 4_^l~L1&hP35vqJhj_E;Ϟew!BOU=Y+~g\KWTQha QF_r΃#c.*o]za|RZ syۦ~|3W^H9Ϟ={>G^ CHH|IƔ#'$d6Tp oq^I(!_V4֍9MϞT-4`$pl/NT.[Wƴ;Q!t $+G^y ?x5jq "IHH A2{ _[(Jװ12'Uٲ=Xh BڸZ7KWaWz:wIc('?yO^}EB+ulwroL"T.f;o{& R3!qwY?y(QDHR&Mp/SW (]4 v`zb!;4OZ)G^r '{ +w9'  O"@AFk>6[{ { 'kROavϠ%okWdşv3Ƹq2il;z|._]1Bb1(XʙH?la*HNȑeT>?lWѼ护2BH^>@y6hIc욢HHt ԍBd" ;ilC6H۳'ALo[BB7oa?xi-"4tR>azW o22F.2iIHH@ B̺4lAM ! IDATBDmpZAI!E Iafuxz{^u֫R݌o_w@MhkxgD8os.|ٸ*I^xiA$Z &(E83sy/ L`F.*1"Ҟڰ)St}xLgp;]&Բj_͙%!v^Hlmb8u?!^Lk)"P+Wa cؓHWo)A((yQTF 5 P =x! "@A K™>Y=YҎݮC %i̖g90gSL܁RB٠N"N[Dw:٪^գgz~t/l=r )j~,KGRZ=)d?Ic^O$@$@P咫7=?Yk["eaHêRFє ($". ,"e:jW]\_0@,Г}Byw l6B6A@(R032=7CK4$DSO33B$@$@$` B,Z}".r][HmA3߁f4Aاu]u|DTSwļՇp%Q;=:*E ,*R$è, ,OHHH93n_GW`JZ$CY<߭UKY5"' Gz4ŗr>Ks7):{o7q DYFoØ9G9IѣhҤ {  &n|< w"hٰ*ZWAp ,"bvEe"*JRBVAA^9ZG"bx|{l5hRMI@f Bu=8v;׾ %\<*B$@$@$:cmmIq'VnѺحsfdfنPkOtr`^F+`}By¦<&+˙doR-G B9t|2.f 9+°{nRl7s?;ºNJlGǼhjBF=  BZyWPzRPR[k7exp؉suOd<^9?> tT`C$@$@$(]`bP |gL,kB0 =q]OYA 2   $@Ah]%x:(O! M[7nwDfchl9m XIHH Ph?I3|rAoy!=bv&Rc$YL*YdoHHH 4qFgP;cPΞxGbنp|l'A9iU-E[Ow͸lHHH&M {O%"{>[Фj'^c=B2tLڬۓ-Cyaso~\Hayc zҌa_HHHP`/9VbPQ9_pPZr]rxGRgH[ u`K$& nE>G$X{xnmT*$\/'o !5 kh B!-7&%   @AhZ_jPhlCGK,?>f=VIE@'b/Qq *󶍪z s->ZzcD8_fW>8p<\/7bӻC-lvE Rz֜aoHHHP`{_^ I(#Ɂ1[cZq}rFIfYؾ֪;I=v"X jHHH@P<ls D2yal,pT}5:O|{] & Bœ5vZ {㑽rVu/W,m0EVGC i&*9c+HHH@? By%!bqe|ͺOv{eϠ !  nOգ:#宎2Y x\aBPd(=X n}mvmc9U;%3 =.9l٪V~~#QN}A`>bEHHH /.rh}4g 44T"e`Vu8)[ Tb_KP zeIHH@ B= 6R$TD}|]D. jQ غq5,$En;rmޡuT:6;@ R HHHk PzD &e@ ?e_ FMSD^j&ת2 "/Y 5  p  B`f#$@JdlN^{1:ف ?|;uݣem<_ٲ6=  ~oCHDp!5WJb(^('}3V*+W˺)[8* @tHHHې#      P:͹Ѿ}{۷ZQ2e0g\pBox}]}Jxlrxe-wQ^A BM Q\{B.BOAzo..W(%$UJNp% DyigxxBBz$蒤JĤE~d=HHH s A(]HOQ/{j1hKJJRIe-Z}Wj~TJ*U+OB: H$ x-vm/V"b.,,LX/6 \۾奐\zm0@)b/pytE4ps[`IHHt ԍu7ddd --My :^<RJ,/_V uvcKE̙3]6V^pъ$i}|"x{C3c;tPu<B/^,^=,GħgC(m.D[;ֿђ H$ r<$j׮w=@*45%%EO,!z}!U'HHHP9b /$ a"x#iV!]vqƸ[i , B5-F$(x: ŋrʡJ*n x9 B/7 O$(}Ҭ C "";v@Æ Ѯ];sa$@>KgMˁ x1 B/6N&Xz] 푀 Cr$@>EЧ"##m64h:t01N$s(}Τ  #r$*ɈAٲeQjUW5vH| )>eN%pIlٲCNm  B3)D$"'M{/W|-[DϞ=^< mҫWƝwމRJ龟77HIIٳgQLTV͛ξ x B0@$@E;s)k֬^u^0j(|ڵ+֮]"EjJ"ȑ# u?/&o#p)lڴ uA.]/ PlHr!A8~x|G;_~UHedd`ѢEXtN¢Eb?j?!GhRc;qJ,v^K^G 55UfJ.ի{]a  t/N$@p U=EH]rZ¡C׶7?xt O?xh[ۇT^S*ѷd7| I; taܹ*M6:t(za"d1Bv9$&&cǎ*n޼y\ .DBB_Wj)K/&':)NfQfMvmf7I|! n"Ď;ĚVD̅;po\ۆ_ݻwƱo+)bTDΝ;s_Dw"EٖΝ;+1(5{a7wQ_IbR=ӧ_AC(ekԨ{HL%@Ah*^VN$@p xӒٳqqqցhH &, (Q "oܸmZk>}W_JZe˖!<<M6U}d3 */[*aÆY*^=I3c "}2dWDÇ5R^x[(}"±I&7-mœm^IC=* G0Ή@n"^6 ݸq*<ϟG-x) $TB9m+jR:%USQd\:|<#;v(i+BJ 66WV0,vr)N'Xo mR e#qU4] Ja'<is4:jaUdq4]]Cj &ې# =nIeG?|Kx4A(~u BC}7r'O4P!MuaLL5;^, :̍CϜcha" *+FVu0;ZWaS8{>@ ªujҳs Pzm3 %VA=^:g O& ɀ Bd|N !Q(Rx{)6pɮ IDATL~!ھ>GrmQG_A >>^Ǖ"%orGstݺuf?,h|Wɞ%{ll=jxRl/QKdrU ;ӓꖾM+Ddiwح k/O"n>];oU( va"Z X(y"˥wujرx Bϱ{B$@I&S> B$0ac~G=gU{L/̋#"~W̜93P~? C6mZN'IelLR9[>>Ǔ'O*>2O>&$* x:zPv1(ެW6z˟;Oe{x ?'IX#8z`(DH-" g]y\BQS˗ #"v,H/\sJ{쩕t眽ߵֳ~kLJӿ"(n2FZB!}Cwn O< =ztZaD2ڊԩSGWHUҍ55W\ij֨Q#b|+dN rsCBJ-)N=*9.# ?7)V k8T#)"P8YEĉ[n3_2Ob)fbUL@_~wyX;v *']wN%B9VEyi KmfHaX<ߔ@p}iqY@3lZ}4tB7Y ;SBBu(E@P|"uyVb q$#b |~B8j(ٹsT\Y:,iذϥ#o>'¼pfԌ)a.jO$1dQych.GAn'\EIӥuopU3&;TB5d@P#0)AZn_-Yo/ {`̩?שuR*$N;wNi]}'Y{-VBg%t}a"bݓDگȊԟ=֭+[VQ:z"(#0ve kʜeu6!u+K浲2caOH"rT?pHICiOͿfrF=𺭄B-Zo 1rBF!D0Ỗ}DE@Pņ$"ȐC!@]ݥKu=9\WWJ, 6 G/HQ>XV@JYl>Y2g}[WQ9B8v=DO-zHՒ8"(!́E!YwPt.^9F9Ȳra¡%TrA_&[wuvu仕d]0Onf%VgTҌ$egz oq `崮^N6S 3K`ϞD__eKEU˿W AOD-ٶbA {.r(@(!@mX Dpu wPCZ4*Udcdj` YegMTWTW-x1][X|"c;G\$[pWulh&Ɏ@!HfzzBHA^ٰ=IhhVxHT˲e>:Qfd?-z =uE)anP٣.v$kՄ8][!z\w#X zJdĐB_Vi~Nw1;'=>>!D=#}OSǾ0aI*syQL!h GLu}tRڍ%@1B۶͟1KOOݰB oAˡ,3g*),| "B)CA#C*c̓>`-TR-ytR9蠃VNؓBGH¤AW$7Ml$rF͐6%JxAȠ,C\|qEbx[ !W z"B)AmlxL%Rt񔌿mǸ2$ 衧kf^}YIS)e4 }2hl8&UBe2" /,\wc ? j),3BP P7E q æ9ޑrխ>w?3K>[O h5 X k!䡮vQ̾K R] *BH-Ad '@,{z\NmH䐌Xm6RHh&[>4:E (!Lȹ<Ď]{ 2 7Ne Yg~ͼ1{FsK—_Kqqݡ1(ƼB$U2ؽ׌JKA@=KGOJ&ZlbH[ō@VBD!HaA a*# Y&) ?ሡRA}TEP(!^;?9S*T*WJ=Lm1]~naFMM7|B1mv tY{,ICLWo|ئ" %Bo+Ec:RV:Vpm&w.˫-薆 ,y6C(.X,(x-?V꾂$3c̣PQEIfT9n复MǸ>j%S-9Q׎?$Ke$6j5_rHoR;*DZ##*"D@@ n *ZLq'TI+NٹWzBCW^]."m4b!d!֑3d-LLSJ9[\xn'gVVcD:ऩ'n`8bh6P5tҤa.]I?}IDJX"h BSWo6_ݩd WO]$/jS".xٲee裏yBr<;y[ &Ȟ={ΓE['g#A f%^p⇋eҬ%D&H2HDDC І,qB]Zבm+$(dI uE+cT M-[VAPAbmD$ g%YO81/QEPH(t~`E%t٣{$l~K͚5 .M Z=@ڢ~Uiޠ9kK7ʂRc ki'2ev^Xnֆ+IZtn;v6tl-jˠ-;N!V7s'!꜅ke5:V2I\'M9bDJM8cIHPӵk.b~X䡇7/iʮ]!"ο3t:"8nI2[!bn6L'ˈ 9SkMK6i$sCڵ(Q"#7J`}<8/- [b] JDTXGo7"xī X^>Ó8rX:řYA\L)%XB+ry$Zay晦Lѿ=2p@9M]we&^+O>\p_'䐒JW^yIpp|Ƴ&QzƳ{ꪫrHL եI>k_}ed7$ %rK_6vmMNGF[$W /3K"&SBdؼ_,\x ~D4$JZξ.\hw % ̎=X3f4jwӦMeŊrʇ~(K,N:)^wߙ~꫄_~E;Qz3Y;s +T k֬C3hmc)6~ :uJFE.&~)o}ĨH;|MٹstEJ,KY&e b mi/n\I^Ap>SO,`~xW?6t9PϋW&Zb N?N]z=|OkLerRī:^2/&iSeuh < # k LCnM6r#X>={7|csXPq'H2eq?x`q{뭷g!CȪU 7,v!m=9Ҽa?,O*Ba2I%I#!ܶm-[X V۷7D B\49yOA2@HBUnܸ I_{ÇD#0}2tPYh̘1#qÊwԩS>UT1lX65s}L{oyZ#/7̡y!9m,}jbW^=y7 >X@!nB3g\N< [C<ʕ+'72~̸`n:&OlHoamaF H$ [3z%':u*pcψNzt ]}(Yv ynX+̅ԙy2VR+1,+ۂ>cu8Fx, &BI\[_dluD{YMY=Qn Di$SL,<ݻGyĐ1@x  *,FXׯ/3A7֒ꫯy޲eK@C7d{=TX苄hׯu  mtD^!'x`./VR/|jvpZ*~ĉa7gΜ|=w$Db . 7zP_ӹ7amѢQ0k,c%F?BTNF?k+f~<0?xǾ$o%Ce $(uMkZ*tSB+WONsAȖ\C*ƽ nM _Gp#h>WX ʎpyA]ZMnыP+)LGߏ1 "k ` aN^ȗ ߏ?h 믿.H!B7^rѱcs*cȑX%A,X`_qmժћ1,"RqUues n~X6orSue([!Dʼn뇎* ŋ_,rK `r`u|sX+uߵB\ssH-RJ=]'!}Y .^|TuXnݺոp) q 1A& ziqY368vutd%tpq kmf~-~#?V^`BHd "G뎕ue _g !V6,,BgL'!kq +"贁TG.g$!fXES`.C W_}q!6'6+)np+WV?c-a/wK auq[[W{N:wlE2~IUVUB5tb9y6m/܄0~ ׷BLGIiQ[n<[A D-W:_+V,˚݈}RIdܤ: 뼁,=zq%TA;:65'̨`mPB`B8bQ$X2JGX/ed恟&b :c؄0N7K*ֶm[{أGr$ba gwڗ*Uʸ#.$̆AqڡCВj'!ȐC]\ q~` A/9uYdDZ޳:g#sEGZ،47!Mr.̬?.=:9#skWk׮5qBПDH^9c#7շ~jWDJmK۞) _o^##&%9WwHI IDATv$ .IZ_y)j=+8kkC#`NwQu>紤貵[8^$*x&`c)BGJz% : qDbne^~eXAĜ^.XgH0BI<-6qX!+@PG,JD,]̑E~1Ѻh&B=G\/I}1Dˊut2u]gdy{nq s@ha8Hx%\;x%nw@j()A@ aJ`νA'Ԫ(LAXPe Yli&!H!|Dݣ\gvQq 8.'!L5ius:-g6άYb&cI"7$bEb&^:JGEtIf+C52H! LU,4e xr IA(N`&M (%Z3sgըQ#Iԯ_?\uyΜ92e}GXpq*~8$c+%BV&5K_#rхRuRϕB%^O~p#(qpv^Y<@^>2y1CNUsF *ڵk }ȼHY'Xe:jS8$_4%wPLC5 +ĉMvw}+!FnܸQ7onBBWk6ԏG!3g4DxkB8xɒ%r)l~1H޽MÇ E!$kNzzUHtMx|# ׌,b X!\^XP7z+b9+W.s9GƍgH^c91%>Mfpz'qVs;GS#͝ '`k)s/RVH'H a~ NW;qv'vqbQ/_oݺqqB,n•*^$Sǭ!=D̙%=6w'!L61 x y˨zE.OGdȐ/Bl 7Ewޖ-[-Ƀd`7oy'c5cٲem= oK!,w'|l߾\U)KmEd|HȖ@J(կ_Nֺ.o!Ć9vX!^s$)Į7Fxu؅[paηrO>;w4<,te˖ |q@!nBٷO<ٴO aKm-LʬY7k;w;V?\Ρi'{kߡg8]#leĮ Y n~tl?Y*OL<Z,ɉ(.@*z ]K2k TI 1Npq3vw$+:.LpRbR_BzYVP(Poe \RX 0 BD,X/^uW]ujXh"ಉ(ߗ_~iH=)<:E BK!.yB7,/B9رcZJ,gXɾnBĝl3Wpidv0L;v!^X@B%nBHs,U~3nU+:u*@n1?s'W`ikAB '!?~|>ڷo#UJSt~(KҺNJQqzN2<1+W47իgLSKeO 4"~*/ڰy,XXdHD Frۨт%H,DZ6!XX(>4&>-߯,SMiQ`qXcnnnY RiD7SBUJ2!K.5=7@ UW<y('.>}R nW_}IbɃQC v m$$~ !Z,逜` 7';~1a<DH"! 0%í%̋ /X >^X@!;vߦMB?독9b߿X#1n =۫m_ oPB*s|bz*Kl !cIhO5_&صQk` HE. ZIrSO XEV{;w3Ŋ͂ c,hi²!} $SR:Na% .Nb+>+,@+ZJFsd^0Fw !$8>z6nbŊĀm֐=z!> $ .$A։;СIP)d& M‚#c.3R C1NQ-!$ duJ&Nl-HO A0 狒ӹ^;Ȓl\#_Pa,CbqS4[PuF(@?l|rudf r ֹGw~u*:%!'gF;!dbyڝ|t)*}鮙s5YŠvJA 8 ?C |VT- H,+~.*  {ɚmdqX^b2揧ۏ^\&_߽ziK \ IL+!ܙgi(Anݺޞ^) Sg9xJyu>7zUx4uԭQA<tЁD~?o|r,aٰ#X@G,ƶmی *@H,iX }ĥT"bIʂUНFc2g݃€F !V[PDLHLÇ-HJW%Ng-Xmp#*&eK|lsfRpNrJjZb/"CܴmZt]$>s>wRdQtli0 k@BdT E 'PB˞Is DQ0*bv3 bRJzΰ4RG ˡSOH yftxg+A4YgQ3h'E~Sc9b -!1m4S<AqrH ݻO7|pH=ӦA{Hկz'hcQO,LX?xF K.%Jl}5!s92n8K.1}QL=| ýB8B|!v;v;dk>[j܌3 1'4oܐUJ}X+nRyݖ{4r `eRzкB[n BY=8Ppcv pu+.bs \E ȧ>J3a2LTZ'F-_rU r Cz˽:Yx±:i=xͲJI&UVI-LAYfbԲeKCذ@w^_4$fΝꆥ ?~/'$&Z=^}UC*ԱFA߿!͛'ׯ3pxxBcKzP]0(K/¸(NswK?Xhٌ_'QbFm` fmh 5Z}\o>p"#bۻbԦMc9zM!̇{HY2e!3pxw!x9K,X >39t͒X ϯj+ٹs|pʔ)!+Y8A}٨=X/u3c[Z'!@zͣH"fms $AX?b5)! ws3|ZZ\qdNȒaSONzi? ͋dpihdfc\GU%I&<{1!e]& H.N`8D,XC8ZpXNk6pʕBhbcqDwmƍ " a):cIg>N<  5jȷ!cܳg<Fg'۶mk\@ 2oסCY}!J5ba_'!dhו0 o3˩t>pƞk!]5ֹc_xU>aUQB :U-Q:rF<(x% Vƍ/tn *GwN)¾MffLF_ki@ˑBN2!$(q| "Axȸ-ˆ!+Cד$UV&) 6ODJ| ]8N``:묳@(av[yn=c`&JsOf®U@>PB pqrUKL<6MvkuT !Yp JxodīJ E IDAT"iiWj>e{ASt-QE JdB|.Yd\v,D7LIvīA.$4qKaITb18'I8}׶[8>xtZgEDm"SBH4s/ w8!*ڵu]եqF#1iyjҗ9u#rځ+qԔRI?Nw41-) l*€T̖4*i"`֭[*1@@ aV,c&qeʜeF\Բ%V:ڡy-tEnEqE,ZL.5+*€," J\;p@͚5dJ3c:x䌦GRhZt)mG/X? Leu _ߢؐzS_U9=*>|#g! "(!إKlZdIAL!1P۶mL2 WI/rA:?C#.Jn#0 믄0 j$b-Wku BJ 3 ?$[!LI],ٿΝkb5N>9} i3'! JB;xV9{*! Z&Rk׮& '(>:_( AJzqftup21R7J:;Ehs_OI J?2p@S< F^zwׇv%dD?o`TTu!MƲJ*:i´/*J+&˅r^BjQ"1~$>Cㅑlm>'ƹ瞛/i(@^0rLO4~qO:$?' ݘ1cLɎŋ5^}Wj0@I8EfH$N !ˍ~Μ9Hܵ/|zsA2lH8*5[TBM2!dŋMQIӳgOW^)Xs8dhQpȑC!qE#Q+^`^/ۛB7|!>4h 1e,w饗t"?v:b8N3/nJ7ya@~ɑG)dKM=FEݵn|h'X ; ƹW^m "V7lЬi&=Y;fz=SG_-㏍Xћ=ٮ];=e&>H:E5`3>">Y/?B aqE].  ?fCi\Yc‚Q3l|d}Hb )ӢAXTTܰe^V&Zeu**€B:j뽑7O< dHyi g 䌢:c !۷<2|P_n]vLK_z%_)JNH脛 s9GƍgF!ևXrp |ڵu:t,ZX@n 8KQw 0^VV͔(LZa @]Ç{ 6m4SbG̙3x˟WGyD8 Fɯkvorpwo%4?T^=]p{}o%B>%Y dHU2|Fl`%DndKYxn5jԐ-[ӕMd=l,]@ã,gJQT._Z*/)K,|. wɺ;L6Ȏ{ Yj95fj0 BLqԐ af+!=sNc9B|Ckf,959tB˽ rb8qbcZBx-W3ٶAHpzA!e4XǮ:zvE/0,w^}k!}w^Ŗ=5EXYe/P{>}!mo8'z3M7tNg 0]JӅ|Lsq";v0|K6]*@R+|I o/ǭ1), z?嗋xeR޾=+V?6l7.E_ku yP!Xo cǎM7x>?|yG رyǂf𱬑%;1p˖-y#8"~7D\@!z0`>;?֯_-O<12:xW\4/L 񣃓spp%p[ԩSk8\q3j/KƍCxѭݛ6YA׀SG,!M&$i:??vqG}: f.4XlJ2WwC$ rW_-_~tYZh,![jc: k,{Hz1B.[?=3q4nK+Y18@F/L 񣃛:!2uTc:bAp{W{sB82>/m0aoz ieNg+\zÂsϙt< [t}(!L1ю .!,V\5Rr& :-kQ cl)xM/K)8__8ɐGU҉%y?B"r8,lӍ׽s!u=@  ؛X+q??Ig'VnazPBn9݊83ds N -szπr"KZlE@P"/RI.0L#Ćq?qNZdɈ`v; 8Gg=F>G"Nw:IJ{!v3sW?t۾9@j{đ3!RHcqH:ƂaGhIXV^mQzB%q<&dfES71dD;餓xzpm#ҳgիFd߾"_lD.]B(@ 0{23YaSe-ƅlPU7"IN}2gZd|D~kW-'׺j"? e:zv ½w?WXv@PBUP Lsd =vEUԪR6pC6nYsג~dOt`Febۜ)3tFmHβ1b̜9S-IM +x≦3D .0YkG%}Q>EJÇ EId e5,<(פRZJ~ѝ4ƍ3n^8PfT)XyGL_^^&ٴ}u.A@ arp^q'rYIXS ZEjIt)ۺuACrFhcy( O/*w .ЦMs9A;NԱB10wi<:?4^ZBe!BiudNs> ŒغQ)q0H<ܞ|^|oU|@R7k?ttoq8Xϋ9s E F^X BkFڛ0Uk_? LwŸ9Ċ)1D!GWpe]fkP~@<BT6L,X`< d]6W(H(įqH"a{Ny|1SC,aEN%_AM aV!:D$!ܲeK>ovu"YW ^\:~Dҥ{1~ F` *qX p Bbn%n~'( 0ڔ#я`SI Ĕ D"/+5Al|W=aZj%eʔ1IeHai!f7nZ9 )ܹtH|"߹Nb$SL1cPG\L!v,!E$ڱ#Apf`=/vHuOJSdBȍ$ǡd`1LPg@r̡! _dKcP+_HS:R^ibL#dłm] 餓~J$$d8;a$hCᥗ^2Vl",f͚Vx3ki\$''N$WXB,}_z2.P{+%$X&8S͚5~~j,G@ a/NOH4Xx8=J>3 puId !Ipmt,d3ǃ8Ɛi%  !Cd'XE5D +WDC.U_!J^">q;MƞC!O'["K+⪉Vy?c{>^ {+4CjA@ auZf y ⊂$$DrSH )Rv5 Z" d !öWI,#F0iI(1X 7nh!V bgq/.~r |F*W$n(DB^kyl ;F.={,QȊon#0?鳷aP,+T`-Zt+kpӫE@ty7Mqwz5j(x1ͰڲS%6b-!tfфd+W,!`%ɫnbH@B< ]U2gœٳQӍt@OXU6md2w\] kv/_^½{NϞ=M12/WO 1dO9r@HZiC07mr$?p!Fz6_KJPrL!_~q dĤqlB[YIÕ[,\3I z$! 1%_C PD@ !@~ٲe aÿȐ9b,V^mb0~ix#0 m^z}{=9nl A\tp!X}ܸqm4I:SkR`k)@BC-VH'} @]f%8,| IDATj3Agy:d0?{6k,C!2v8!58h a[Sjj׮}cm(9@&L^,jժL(A@ arp^m=oIUnsc>}zȽB<ꨣL5|SbEO&GI4PS\KA?sL>G,͛75fZK.^(9n ʁ*8*"x!PEZ-^ wˮ]K%K='#_%:B^z"!Pw"(C@ aD5RBeR!0pK )" Ju(@e!(x*"A@ E@PR⭣)YYFq%˨0ϊ%I()A@ aJ`AE@ %Q+QP~BEP?(!^(@jPBZu4E +xd߾}U"EbN: E@H>J(@(!1^P~K͚5eT"F@ oBE@PR”A)ك"7٭[7SQEP?(!^(@jPBZu4E +P aV,NBH9JS(@(!,"@PL4Iv%]v%J(@"B@ /"E@PR”­)ف+Lى5jhRXR"fDPPB\z"[o%;vΝ;KҥE@P|!Lz"()E@ aJ@@-ٱ: E (!L5:"(#p E@p!o/":u2e(>"B@ /"E@PR”­)فʕ+MaիKѢEcR: E@H:J(@,!}{~?wAE~.~[EPxwe˖-ҡC)[¢d-'v^QHуE2"fPB"d09Eol!_K"y?*V{JjդXb ;Kpx@K;X/ST=SBnkE@H 9Aw]m+N/uT>' P'6mlڴIڷo/˗(UYE d}QpB+ZL_!Pw"(C a2{ oSFG`ղgZ/^<@ݺx$K U3&.Js}E d%!7Urx"bB\I'ӧO7ʙg)~x:Uѱs$zb9Y]+"zjӯ1 ?dR9TQ5kݻD6HW] !k92k G)%Ky⹃@C 7 \\#S,g'Ϗë:5kҶѹ*@ BepI*!en=RoARd.K gΜ)?mV8^59\ !r::O"XDQ9Ȳr|R|)XTaNYy|tVٱ;K)C ^Rԩg5"DD !1hkEz_>i 2:6)/!=Qve`RfŔm:;Un`:eM9߾!)Sb[r )>)rp滘f;!$ɔKm;H`UeҼ~Ek哅kEk -jK q*"Ċ@FBnX XlҙDeT%3zT?F۴i#UDp t2PM^<ԳU,d&t po,o0"خrnfjF>͚^㓷()9 !q?93U"إuҺ.߱{LX&ZW8GK/LalD !RYZ"QTOHҙLZOTB;tf*Wm: 7!Z M1`ԛ˥ 阪'ɥ4/\B J!}O˗1y2&5?W,Zz )E c !VvVj%"BXL%}PSNjժ^O| /ꠃdžO=VFpzl]HU0Hd7|T\M[Ss \?e˜V+e#!$^a?V^]˙Mk&u)[.'}ZĀvWTԵsE ;XB(2]I0w@uۥrRLzh8Jyq.Kqk d@4P"RH)B\ '~ _v!sZVAhmm >4Ulpu,[\ڭU*^鲵[Q3BP-®(IU4]F*eF![ a]ٳgˊ+ErQGL 3@|  #PƏҠƉ{NxfB?t8!& 9N ?[. nu=.& M*!I 1&IVБG # aXݻkM:wgaeLC>(7.\#7=_uz(>^nnl:!"$ևt1HT|ҍw6g $zYBR˹ʱZȺ/VMmJIMwQ0 ˉLgF61M6uCr("#$ ̎,la(QAm _ڤak2榳ºg7bE6jǎ@4$ۼ3$BuGI*'ٷ[>y}gJ"ޙT$4n kWZC-UIEC`pAݾk,]J,<0S0=Z9-j' AP2#@Z'ԐWn;GU})K]OO˒4:p)^voWy%i!Lui?)~C,}gtR9餓v@3nӱm jҜGFWHyy^z2ydûEYS+ŢFdCRg'7D j7r@{Gg4j5rīʛ/nI 65dx^թQ*(C ! t׈-ZT< [BrR\IN'-/]/L[$#?Mv'>]&'֮$U+/SX)m~uk}ȉ+"w?ɌW˝$*Mt8ԫ^^f,X-K5,_Mz7)rArB zݚf,,|-ҟ=H3KNkHꥏL o (g6!w8GecaI<έbŊ׿5oπMǿNzDse /7ɧL m6=3OʖPh_A :&€k(n,C:]IG}IDJX"@Fl.6aBwn]r y۾{|Rl+2kCĆOR~a滧zHr% 1;j9i}k2u^T|`~k)W4;crߛ5ȿ&ϗ:6 |o묇\I]xJycR|x^( dr}㒞&S .K,M16N t—O>lm;L[W儣JN՛Hb3W:f:e:!tb;SI !dpVĒuYhaSCѱwuL4!+F^&Es%~ax]29RLqٽ7c=oQV\<[p!o}!f|k.eeVDR.1dF{&V0:DaQ̩WBtRYrlRN:XHDBc6ȔD eo tJL!c=&Kϛ<~ێ}c dN .Hyec]Rd"B^zMJ'`2_f@F6u۶=ҵ=4kV_Q9 K䩱dc]弦5P/VHɷlS~}vVnReBtyz2\ Ȫ;DSu W+!?dǎRB)W.x 7u'igw ZA :j.ڴn%yyUihPYqCtVUu0-ÿlS k^ˍ: @W0wKEsy[%k{4iJ4:4x|uԨU[ޟlu] 2eѣGQo3SSra<|21[1K5k /v`."<?L.o)mUwr:h==Y2kҤn- ֐n>Wζ:>|k P:].Y'_>| o8BHh,T.">ߑTWBl2Yb4o\7n&DaN$!4fa#X0:E%~|8zj Z}#fsyj& luK0_(CHw.DoA¿SN9EK$igw k_lܙZC?UI%3!?BSкZY\rf]yN̓ ֨6_ٸS$DHwp?rAFT'ϑ1#_͝s:\,Ͽ5VdK ϔѓf7_)rq3sa ͗Uwt)[.#|E :%AeXi-=k,XU:Vמ!};7S/.xSMX;WBv-!AɔDBc6Lw YuPH/hB bMQ=*h&"!d_6LX/gln?+R@#~X;AjX_֯[#OSi5!DC82a(ycQiwR^>x@>,W+']\q\fE)H aRE$0oAWBO?˥I&ҬY$"({tt! Ѥ:hBLzaSpc Zg8Bh (ӯirW[ a2:ۦA !+BHh߫O?ygdryMeoȑ2ႆ| OkNMH|t̚_PyoI/^\u=MCVVbͱC#Z}ȯD%(F)~yٴ*{]?K/{5߬ $B?#֨n6k`QO !Lb7M|!^k_["_2~b\ z|H5adn"?\%3ϖ}{n~>;2ŷoʽ^V#|_ !9EبQ#iȉJl:rBM 5痽VFQ(7]cs:7;: !dVl'ه7%+n 4;d (UJKo- !W wIS_q\S;wL)[TXѹc/0".E7r.!͠!aij2w|GINŇ[Ps,; {ut?|ҰaCiժURiށw㑾nh KjQ2йEcsX h:@ 0p Oy^akڵKmf4^'wmmW^&bsjsX B&,VQ?Lv$ZA84!~_8_իWŋ~ri%u^Ko㱘MqV`z!M!aij2w|IS0`ri !kwB{nٺu)SF*U 1&I?ƣZKUmaF \R/݄l IDAT;C~er+09,MM/ !6VD/`D#F;!\f,ZH֭+[N*l:ܸSfөvAذJq0UBfB45;$LZ*EdqB J{PkK.-+WN*H*17;p㎃ㅂފ!έ4Cd2q?oӕʼnXh3D̖m8yvwk\n̟?_ԩ#gyf.>auj7ށ=9ew/G&`*3!g=K پv>qٸCK\|DVo!/FQz9 "P^F,jL5!VR'_E 9msvƄWa8سglٲEJ*%UTIdǵeށsnه,i S5}pŲyޘ׸ V-_Ru?M5j7:$;vTo,ho駟gҝ+WʕW^)*_~޽{˗_~t,>~4ek,Jкo1j&"z,EI*sѣ߿_.&_~elǎJ;y 7O>䓊 >\+"}9ʇoQ_Rw_-?eȐ!o߾ܾ ֏yevXnݺĉsnj/Wq`WC裏" )Ç~8wM}ᇊvmjYO#Fȝw 60`@еƻ:SI! ˦IF  p_gKaؙMܹsFҮ];7 [yBlf}odN1lNU(%o?SkƩ8xlܣrk{WLGwڥ!O]t"X"թS'=z"W9O>]4iW_}~B8ݻ04n3R$A 6T;>xACPڦiӦ/իW_7|S.b4hJ+5ayG姟~R𩧞4XS<2VH/fOf5͞=[d9;csI=ܓ%eLvsE?L+j֬i$OT l2>W^jժ)BμC_}UUn֬YyHv(L׮]fvA;&!^#U/[[_sNԫl;< ;cqCF%/dy~m'RtH@GL^&#>]5KK;B .ԩS!bs=U G'|,^Xr[B6m#[nEi `~ $ Ϙg5zB'C+d+o%9묳?ƥIuc )BB!hbL+VT_Bf'MƬŪyo>@ugZH`A"Gakƌ|xLkI98|pB\`^+hy}<òF[vsE?6.>hg̝&VvVKC+ n]r%y!# Ŵ+& dܡG#G? ,Cppdq `h >].Wvy!9\voU?֑d趱dF FCa9&h !!_4IA#X& =4Ҿ&0u׋}lw+XΗ-_ŋck}K;OsY/\ j1Sfp1?#C1[nB4La򈩧U uY@D7|劀o  ~^rGHP"E3ڠ݁hjZ`[K`<|coDJ4`Z/x'3["jaL0"]y|;vXEJ*),L=&4||>5nG~t:ÇAn<7A w.L6 kͰ&8 )k:PCj S*LYgQK ;?}$k6+Tݑ}T SXsAz1 MS$V``C+i=&f,k0|7X`5I]0_7:Ah 8VPҋRdjYÁ[Ε'VgC.n0 Bɔd݇|*m~|h߾낄w݋ &\&>  Ac#`7V a8dpU@{%$i H=B}^>|6t:J+ֽ"9sDxA)Z+ !L{u:@:ur Y$[>Xh0WćBk E!C󢕈E\"BͻnBD<@/5āʼn1ĆdAci/Qc0lIiPk#6]D/ݻwnTxMdٔ$|cdJLdn^; #U$%R|ɰ!w,9>3)wV4n_"arBSnBYw駻?`Łt ݓ(nJUwF4jHC$)'LIOx M[oU(^?ծ\kٚS2nHN9BH.;F2vǟa^hBFvF||CXT|aCAXYHٓ&'pBQށpp‚5?ȡGdzT#C.ly4S!$"+j4":KC`YR+#j hdOMBM^'|Rs=*89!&<@ZL"C{&]H-[TAzR: :T%K嗅dh!ɡGOΓ,ٯ.VLJ={ؖYR[4kL /_]uAʕ+ Çwܡx}7|[dJnnWW`_gpCX#@ú^Pկ__ ~D#FPU5R !Qg B"J8c~RBqۄЪ%!<R)_//V{SN"v$I'=h=39"9\􋱎=Z`uu_xczꩧԥmBH^zI£Mz8 nVyfeT_;<ի-ꫯrf+q"Aχ rn}ךF,N/Zl֖;eWe)CB$Aq;1󥿅]u?].9vyx;Vۻkתͦvmw+\o#Y^

W[iפk7BLP&qْ7dO@>"G 4~{5rQRlٰs8G[CBH$!`TEB^-Z^xA\@z`1dUW]u)rgブ7^84JPZEH Yt!)͛7Wh bB*hX}k_Ӷm[$n5A9'4Z ˗/W?H`lv2vX5gG}1 zP MU _h< O$fh'܂D͊-:wQ m+g%\־ `5U5Zf=!_A &nD#a8TAcɴk Bj뮹n:FϚ8k@0BxӋw`NNl9l(/衘S0Yk6ywi\.iserIʡy{i\iX:e\%Z>I‡M+C!d oYi B4'ho" Fi_2L%юA &,Z*h 3hy0E馛gϞҵkc! YX} :=C1q,I0_*#x$3k hA|bvذaI=v&@ iY$ѝwީ#j<[-~3f8lj-!`$TqA>2z^"Ug7;4DKY+Dž'Kv dЄ?J61`1>2kAKL6T%A+%Ji%˩WKsor%+WΒ?Ϭb!1x! a<0iAD0瀮5NŞrˮz4<\Bse-B !겖?:L(sE+$벎'1EU~ؗHR&89d !;S `/#~I3ϒ&E!iK)&}}r5i۸T8-˓ 0V^zM0q_ ! 2msJ EiM]i{iFs9PSk?BYJ">B;l"3glݱ6j !20.LaAؼOEIE JPBjUc!BA@֞Q*|Nʮ?3k67HwJ:HB)R>g1|GyDٱp$75srr|*c`X?11 l?E[o$mg|E P~PB/Bt2zFMBXdQEe:3BF %?͔ >>nnUV)K+" Bʔ,;KBw\H^ Ex'ZVoqŭjKٴ5#GȮ[dފ%sosiQ49rJ^DU{!ޘDBl\~n2dmʔ)һwo<@)D mٲnҬY3?$e s9r]x*9->|o'OI?뎟wq||v}QѲ/"TgĈejs 7H+W}]wIjTӅ HHK]k# LRWP8ҷN:) ɑS|PwUd3nx?=zCUI_~e! 6\`O (d7Ge˖U>Au`$2|ES-hdSSvBBh5zg7&K>9[ !E+7ED_9sB"CVoS`ϧ&ˌHȓ; lP+'|f  %vf[ d᫏Y+wF`p!Nz !L4M!jՒ7|S%5PBִiS!w&M1#ٳyyټyL81e롇{N01$<+:?UFufff I!xʕm_~ꫯʲeTB뾦yGY"8ĎilժiB3ONS S}>S6ny 6:u$GVhv^,sI!orZLްaðkmU/:GkFw _BEk*2y*P[eB@rD,& 6>0QBHrҧ[ xvmiw(vhr<>qd>"W>z') a~ Z% K_.WhGXۄWZ; CaҦM!ܹs~W9묳r {W\'=+Bl^G&jW_#w$Y3IV oH'0_GI׫O)^f;ثБrpx'M|p'T)TknT-:ʫsb d߬u鈣^%^ 2?Ͷӑ4D*} uk]$^?Kά+eKOgͯBFe˖-2rHwiӦ)A!dOု! 4Ev 6Lbr%%aغuȡnIz)kH 3gYӄ6ђAuX1uԩjDH4h瞨qDַߩt5!CCIjv VL sNP !ゐ_kh9` G76}+?.["k{VE.X)_|9cьRP!ٺy\{2<wi:PSUBL@D܊e=r3SeӗK;jG?[ΖDꆓM-+!ď֋f^#?v@-nyX$ɨXI)"=nn%jo(TR)N΍4>Q`f$52 BeQ0gU 뮻N7 13>z/B4m$A gh>%J(K.DBkw}ơ% }1X`} ˅j`!--~c]GEhƍ$fڇ`>V%r0omSxhɊ`>ͯ!Cf:"+6Qo{o|)dc*G3e$7y|K]&͋>~-:[lÉ!#8q{. 8MG`3L MQBG@vC{N>eۖMrwp"5sR4)$mBH)C >jA3c@0XaY i4iRJJDM tQpBݫޓ۷ji2icǪ &mB@ *a6jHi !Ԛ@̙3Mn ^YTrυ-PCh7pi%`KT[#!Bp!L{2o$i>~oO=EBVyi,eKLF5Ίl &z QD'L6.8 '0n9QY .c͎5g>R e{& *;BUKBI!Tgl LpNŋIw)2oYL@`<PE>V1eD:S8B0[(aEUص(@n$8=cm?:26R|K y^yc9M[` >SOj+*")w'Ru'b\rcOB7"y3JxUcVıwBHni/s H+Bup>ԩPL8, Ɏ2\ZDۗ^zMls !? )YԼHI$w6[? 몶 ή#g {D uA`P'G̷~*DJ *i&ÿ0T:Wb:bY~/e!AC$Ʊ,H˄ B)t)Q-&[nU_ܵjR|B-ecǎ?pض}:>`CS?3א(BfM(7% }jC'U*6jժ)g|JBmC>"s/*+8 EJ-!j e SQ!sEF|\lߛӠ4^kK}gyr…0Ǐ{, 5k]x*<',ryʕե;P+H>}d{'V9[d+3PBK5jTȈڜ7o.$څ(vmr}.#Fen< CݟC5ѭ[7ټyH.A VlÀ\y'E h"!<_8d$ֹP?(W\.n/_i|U&OET+h{[G=2gz9g3L-T'?exq<]q/^Z_|-wDI af~:5g {vBFpDJc9p BjK"!n׮deeG}nn%vZ<)(RX"յSiZ/jrFx4cZs=Wի'$'')A@kơg!w=Oi-[ m y Ùq'(M^\׮v r>$J*Ѻr٬>+gIjϺ>0+7?7Rt9;BWl'هO?NV?*.^+wvi.Wl ~6a>*۶nUY5j{]v/lnIrj=pLrkn?!!tc؛s{. 8^w9$\&O3RJ-4@re-hnn& gV&-ϿBʖ$o_,ON;>"l/^<>|T:C{3XᴸM?xٿS9"w}ڜ1pPHk +a̚5KSr;(-$-Mt}BݟT6R\!LU10Ug6︴$x. Ǐ.a>sXUDSD cI&fh窫dСWEX{`Ѳi&ۺh3+q׶m[PEΝsbK;Bx|iH`ݢn5Yb_޽\O  d{(u˳>5! _6X{q-ȸqT[M6UfV ~Vͱ׾ |I1jO˃T*Z.=TBEu|{l^9,pPՅY7=6;͊CrUk˹tԻQ_d7.-ܸ=x8YA&bja2WXX筷ޒ3gNČ 55„@|+ႀA߫ڞ={J׮]#ZC]](sU>n7T(Bhߎ1&Ì?LK8| 4cnoL|*X~R_X#z@<[lQ-ZȢ&ڞ&.EE^[[BOz?JZR|1)Ycmy˕Տ3U*GiYzlu  @(C{tN0_oVNrWH ]#"ka|6e 2CZI*f;*͞6? 5LNO|@رc!GlY=)Up_7u:<i۸Lxcc!O-y 7ZTL5a /pi>9nr6ʺ|&ۮ.j N:s9GJ(Bh_BV`qd7@Bh'?FpEY,~}X3&nkw}`9ƿQA `.[^!G4X3k{Ct?KBd%>d;f!r\i){e>Rǥxʴvn0Ԧvab棇 8Gۆ2d9Vi !wQsZ ~vr#/JHR%[` ’s+b<&F?rK/v7wFl:K'9:CGT1i S5}zKt ' XFztŊ2v 6Vk)$h A sɦ-x;CȔ6K\6Zc#%\s }Q9@i``z.0VSv! h0 ]wumtEOnk'1}h]/q&_/B{j/p}p~Pr s! >!uT+1{l.v/7|Y[IH!f⋲|r-.=%5vRi 1aʁ a@|Iw3K~Aym?H 0^O|+t9n/ &,kʲ3E 9 뻭S3yDC?h S=uHu s BSBmZ-4/c}$$'?TV5 KL)R&ѦUpeLc- ڱ/^vM`ppL(S#ZV|%'/vg"/&n :PmZGn}ڟhqY_B B R\;u 6BC C v@ =̦;l0%Ĥ%i߽9lpcnU?zO/̇K*8Ρl̋'TM۶¬rR nMT.ב,!~ξqAO[pYXRz2! fg5ôw'>fDT\qi'L>" "ord_+o5a-|H1􃜩vi&L|M{&ڊs=Xp1l6IyAz#P;pw!۹ %`Ip=pΜ$\=~3|/0<tV9?؎ڵkr]L@c␅ y ˬ!t)0ji:!<(AfԏD۴ikp sD4:(!e4>q!Dg"jC!xJ. νykBc8쒓fz1dcwcoKƘLp!)7DI3G `HB\6P&Z}]M6p 4&Ҧ8sJ%O)c[!Q۹3+{wnS~-@p.&FRTG2WB9dD3Ԃ~b3k~R} ! \ jg xA;= oBy߱UHkZ7D e&'ӧqsFW7p"G" ؉PNĹ%h 05.99qGͱVgڵSa,Y"ty]%l~[syw.|ODHBO9}-iH ~EhE8I>^:Tʡ}!DF"~xߧONe$53+!$j%- VJu;\#8p_y|,imS ` )E=Y>#4& eQ~Ѓ×_~Ml$yngrĄD1뮻-2t2SkYkkN.D脻+@ T sQƎIM =0L:rHCа ?dgl$"@HPL0?I0M; ,3e UH!W,o70 7Obȅ!GK4wviB4:>|!Hb!J4ڥUȯkҌ `I9B'5Q=9!:Lټz!iР2#탔3ԇTjcBH")gE&BmL*T!|Bȍ4~5hR1B,KL^rJ-s(!)-.muo $ a| G w#>>L6 !&0Vޚ;70>FR}#]Uizd0$ %}9†}8\fI94DM"42cK6ˍ?0D52dHXBPZDJ*I†F9&H9 0|1%AE Wh9$2f[iiicR),!"HZ Wx[j!6G+w߮.'I!>$_`#),}*|n0$'Vbka!7UB2酔6hcE1q8s0'R(^" m!OFLuZW|1!XLF,X tCdk׮-lq?o54<8&^A"r#~N Zc#|ӈYo#1dpjB5\2vQP,v- V!0ieǜM?б߃ @6 #;/44t@H_h0 +!dt |ug񓆐 9N`"a1tR^hl7\.ļ>8]4UV\F0ƅ&)۟}$}\>!$@7|\N><~>dq[>X&B$g  |GM/Nn 7퐀?[p'ӂ& &3nCB$(歐B_|-Z-dp̝H!)N:  3]V>6h! ST?R|W |$oʻQJ <!niA M !漘^9~yOB_}* p3ؿe.گ_?&!>|z*D8$*~!(n SLxጪ<^SQy3P;(A(OQ4k  HH~v/V<QKƉGdχ~8d-W^Q&^Qc3ΰڎ EL ECm6ٵk҄-[6^8#.R]tyC䯼JuQpqUԇ/!hē 1!`4f% !`a&_B&l֬Y-UBCa}QL1WW`bM5< Q)Niժ2L駟HdeeN[s0B?͖A _00t>'G1$'-qG pV:eBR} e0% ! ;A ?Əgp!!dRCៃIzp'8%ʯ65sQ#bZT)=u)kP'"׺=>SA B0 0Ь|… s9߿_bw߷o"$ov ?Ҥ'ADݴiZhY#\TREԩ~׎ҼxC?G@C7f?栛7o6W?0BH!1bCp3 B&n_"h bU-Y,9 @C7f0sdzh$^!a~zrL >DBNA BSlhpl3b0!`a8A BxM@Dj-WTX1%da0!kj7  `a,2ZJkРJa `0Cp @00 )]T"eJJRbLfB.vA  Ƃ)c0:/^,ՓN; b0"`aXA BpM@@k˔)#+WNAQ "` @LBlA vZYpԩSG8 A `!a!2 #`a!7 R={͛tRJA * `0Ą!1f  ֭ Hڵ34 B"A `H8&rӠA 5RJIժUScPfB*rA )d0lذA͛'5k֔m@ @X ! y `0$Ci ۷O6n(%KjժƠ( W0UxMA & ! 6S ` Ν;Ws9b0"`aXA BpM@@kK(!իWOAQ "`Ȳe˄<_~-{ߵy΅ `0h !4k ` RNp! #A B'M/^ٲe#}C`۶m2k,Tlf9Gd>tDh. i JzZAI/RP /4T'_|Q^xi֬u]*W+b-I(ϐu$$Q/~b*4 @ !4k ` '0&L!#k!9>6dBRT"F"ʄQ#?7EആPk #A.vPkN%O<"F @<BzA #)3gΔ+J1f詀/sOd͉?J颒Qp*@RBnYt2B nx/3|. uߢE 9r:BM%!sͨ q#pY~+VLjժw}@2nyPryaEL!F!t۷WA_nF0`BmYhB]ѢE;x}Akħp2b3gL0q-6RCS{~ !%ӧO ]kTlp -'2k?T"[C;EѣGIڇw^yU1"tTR0 bj `0D!Ѡe5r6(!4o=xXg8.@L"Q`E 0U!>|={]}``Q4|I*%Di&|y E _B|G)ap0:ogt 0* :/O0b26($~'gBF@2( 6Li}]N1$p‚)>l2fk)d lh-!A w/G@~V09@+w)SHҥSN3n#LHoB?BH62L3H†V^'|R6ov)lٲEV8&--M:1D+)}>:!04a;i0R|C BR:-_H7 ,,999*p&Qa+s߶ {T5+*r)*dɡCcǎRn]y7|rg+ W\)W^y|RP!׺nmӵF*_w2!%Qҷ259Zz-Ɓmo۶-*u!ʔ[.j^Zx/ԩSwZT+#sxH]?4U-t1\I.HUg#[d[B.F$ɢϴTj6-k`̜96lrI?QPy8vE+w ;pѯ_?= D $ZD >͚5<{GT?'O]~L" 0 Avܷ\rBѣGcz*tW_}UM=+\Γ}Re0 ss`zbP"\WbFeL٘CSs6Yb۷OğdϞ{EϞ=?/Qi+dns4ƜƍѼysUsibDΙ畆  ܨCK(-kMRH ?$SNb?rЄTR/essaی&6jԨ!>CL)JBHw\RAf=z$*8W6cߧ]ս>{L-<ʠ1Tݩ:Ģ!eϘ_wU8#vjQ['B>Mi*JE*!ɠ[&N={`޼y6-X @;;ax05 -̙#*y!hѢ߿?h}:ThfJӧ>#FDžC"B5bVFj%ya04'3.'I;." .HJg1vX1'NHĚ߹sF4ְa@kСU9vSs&E*sNo]ǹΉc%II>%R%%>$h^ۦr޼xF$\o? /|K#Ε+ $$SNkIvb}6u&rV:V:BH?A0qP4"J3r&ڶ,*Fgy-F _vuU)iU<KxbJKN{16 c *[C: ^oouDBf ]b"{ zuG<o X񶟪TA ePk  ={VT'cC3˚5k!{F&!!Q2JrSdIAh2*U4BDFZ%"{&TH8&>٭[ӿ+bŊYOBW\AʕAC`*\$RJc9P"ӎhIvI9Wss`{3CrBX_dsj"<ܜ'15%4s'-}'KTAI9O?xT/Tx@B훪1b*\gH kQFV:분0Y4)+_v5P/8؛""n c`NGbh4!)H˖ئCeӠ-h\U?쪮U: M- ݵ$ 3@gM򽆨H>,EB4$Ry8!euymY[H" ;< s*!܄$\7nmܹsw 5R%Yp!ԩ#HgHI 8?rB,LIB> +>^S7/b8quQEB=Vź6H*$`4#)zckZ@A?+k T'OrrםQHP9 72g,Y`ȑԔ Al%M<{!L S}Bm'éI>Nz`}3G/<(-kA͖%ٖ3)O*}H3V1}Co] ?D]U!tk !*BBxZt7EGLsx(Dǽenn:ya$ʑLAjoe%ǖI3O?$Ȁ֏KyR,41ZB&MCFRL%<; _UGy'j]TTHRA@mgn ҏ}3 H_^b=׆>@2KSVTV!}LYK9QSs/T2MS:-UQ Sݔ"q̗. 277FZ?O5_^>O긱!׀{ ?IiVLŕP6rȾ韪 k&>RO6 !-I,O_{&`큩唅=;1YS#Gz:S>TAr_PCB\5 (B2uRB|')2kcdI+G84.gۆ=|!tS!"?ɴ$Z$5zQbQ[8VbMR)?hQ*Dף1QMme]BF>P!*LFIHHXh葜' iJ!:DsQ-$sѪC }iJ/[lb +`9sW~lc65׎*(14Ϩm|L͙QWeZ0 =\K¹Sm%ծ c^gTbJ8rΝ`_09|$H_csL\o ıSkr\TI J$ \~6$V-MF-&,OU鍲j7<ž0w`a3IGbhS 6à3T -@l/oص|w uUq/_(~RLT)+q5t%kh~im'<¾7>u@Jj"^|x,~6y5Vc7!} T-o;GO *CEAIsQAzI6*ҿ̞vy0Ii(6i(aO#2[ V0q8*l,->I6c  `K17% fE9kǕ؜O K j8k9S۲-{Ҟ}dX%d 5 o Agq wH@I_zbl3Gook 'I`2hS8fl8ʽu}%=Q0>`0u{N.Ö#k\  x!* #eI2hOz5!f$Y;R R90RTiK=441%Qztğ JaoB5!,T]B/cX{΄zܙr2>tfbAW,F !M)Xd\'χ?Epja~Z >*ˡP: _TX?#,y%ѾɪX<}#nb% (_uT)J-4 om< !ifz)УMVHE+g1Oi$MhI=McϦB%!l=.#1tpG,gUX;!aR6E3V O%$%PKծGF6}Ju?(^ #C@<~[^G#V@E풸EM].fxS||?{l?~ u@~X>>^͟UԉKإUH*Vˉ̶:](W0{Q}/"z.}PK sAb=] L>ի9iǓ$QTe G2== !!IܺͦPQ(\[?B_u_[EkYt6O!-)*AޫLCt&wJ[vFX$924Z+0cqLXb|O0^+dº/㑫0]u|^)<} 6k " l9zT݇ј꼖f–#Wq3>}hhn N忞/`w0)k"sZ_흿.BRS620xn_~С*+Uס\LkߏMLBٞ'=v"8qO!$OTӨ玿w=0PȻᆱ+!xM[I;q !p0Ne:'Xwdl[Dg{z4A9YkRT8@|֞LH(NC,aRI^~_G]=Ł?JaG -&qVȞ_WrgDpB(<";P$6ik"c/*gKaRU, G7MIMG״Z,u-ǭ`pA*b b&wsߕqxt^wi޶7ס"V/$9%!tz>s !S)SB;1cVZ%Rk+jjɈƅ?ֵSs֮;jѼ-鎑@[›!Gf ,}-OU|{| A]U&YS#wޗ1oKNG.mpi|ش8Bٿ`^bFGb^\_*3')$4eI9|D K?uT9 F44. կ-2MS0qi޼Hͨw5L@9NSB7t \椳5 %0/Ç{=d[cǎ e֮[2v{_FeODܿpg\- aR?ō`>w_Cl\rf.JE?BP"H"q2T_zRj3ʇJɄ_,4Lr "$ r0E|EP{Ja[( ai[(˱-9"=?t'TӤ>lK(*lkv (ڔpxЄϐ;KH^T-r,+7[|˱u7Ew $][affBqD0Y:#{m8Y~6C?2W\`A a2U2W⫯*VvmhLpߦMC_|={{p5>(۵o!!!  s@jxcJ ۨG/L,E }SyA_E&pի_A}jj>y[<߻w/z).3ӐII,-]anp?i /J8=qbz#Kĉ17Tӳt=~;OqeNҡ%:**ٓ !K6řįti!.ܼ]o>|~X4U/WPBװra˷Az~T^DPS2_YrŊoFDLY  +-#a4ve;D?#,[>XWR]ULMDRHS֭[ J>u 59J. HRc;i¿3-Y#oΤ#FI1k//9*>˗ꑐYcرbLO":L>h A7s3g ڒ˱~8aÆݡC&u x7Ă𫯾 "Ө@q͈<ʢ]cK'A˛7?wK׆/`^40$/9p\6=Ҵh=1L<+*'.B%TZ^ ˡr\In9XO *_ z<J}gxo2]:7~-I:YhP _7(,%ikإ^!ۥb?{:{&Z}Wb<ϣ3&[Y5ѻCb vl^ƈޭz}; 2fJP_1+BܝMyHf *sƾ *sE>ݼ(J*1!+*k5kJD2ۅBG\B|Ċ1 !L^ܺu EEppPdwv -2[|sigddhp?XBYwRU?hŊ5kVwÓ'OPdI1֕+W f-*uڱ-ΉD$Vb%1BHՕf\k@BD5jj>Z]cKQi垤Hdҵ!96Gئ_^*wN=׃J&}?7{!y?70umX-t&!T>TO&a mFjWgkEe/Oִx(+dmH%JWIt_(L]s4k4?M%\ };Κ?m<|&L5p+Ahmyx?:" T,ƴBx`=q Ҧd̜E}PkLC?J;C?!ɓL aϖvQ,'43h4QWB^!C2]F޽ ֯__[$.SK4naUŤ[_8+T &M2Qca4m%mQ%4W]n] m-^l*͓9 IDAT-թ9BH,իghCIb~HIM͇2eѮ%s1jvMlm=B27}q%O<k4c =}x!H\!;aµ72.T 35ߩ=1K̭|B.=r=+BsH1m1W@eOFRd1$*qzMjU1gФV%LWu+},}:?tm1֨KP_/Bˑ5,SLF<|R IHiJ&M3c<4wGcBjI͛bEE_6oC%|l[_~P Be7?׬Y#T YHvIiI%c0W y)M8{WCKH]H%>ڵC˖-ѠATmf-B(?1,սΝ;'Bm!VS$kؒĜ5kJN>gjM7S{u%![_3י!]b"3`䂹#oGYlRt),tjk1K no}{ןj%h{| GI{2!N<0&gTDil8tYdG}Z *_fSM$EΰrDd,^WW0 QT V-v-i035oR͔\㗫͵IdM~wiJ ͞u 4$՜ ]eT9BHE8*^iѢ e4$!#١)9,UVZ4$gZBñcֻ-c[ThHmԑp0 9Q UDթSG>z bX9*c^ϟ?&$ xc\#%!+O|PƖSm=G9qL`/rm: ,#cϚRIu&ar>qZ<ǥŜE4$2VDͭ /4ܹs'ܾA& *$\oTY)G,{ P"Űptڷ \೧ !/nq SC[oa#֌EPYԁغ֜4xiFAފ(B PYגF%*Dya޻wpDV[Tx f!Qo;d挀2Έpȱ AN;ďTF{XL#ѡ* q@S3G%7WX?M˧֧(AFRsVd>ڱXĜxhXb϶ٟvj=ɂ4sn +_'O>1 ߵmp+wéc0jPOfg;`͘L=|L#h^Q&ܛN8;f<g^ i5WO"/,e.BԚJrK1W]ɽJ81J$VIUJ3~׌H}] ղaÆzw%ڣi#II|&$4 HpmcT1Ȱެ>XqdyhҘHd9of_dAe,OR(*#`xmF,s՗T`Im=#$vxNJƫf<LIBFdC*qL.UD)hLiBH:I?HWў~y2U4l~PNq,X"qК!lץ7 ɿ/ ʥ` M(2dkLP049?r<@LB˽_F!qH*턻gjaTțMi&h(իWĊ$})c^8&.' suG?;q̒ʱsT UQ(x!dVRV pn;vx oڇOB3Z\n|ˡؼ/̞:i!M'KȟTeQТz v 1t3#1ZRe <(稾T!pBho%~w\KEs'|w\Oi!Q$_*!0LZDJS>{ #P\2'ǩW(#>s3i7"7 ?kW.*{oSaȞ3(A7_7Щ!M͍>%T#)BNp=rvFy=wiUv PO|4>=?BXȎa;&ׂ扄T@KabȿTYd((:k2%daq jKg۶m liuE|էcHjŰ9%zG@B"3׊kC 7r\;9d"zSEPYܪ->T2 I|I9>E~}IhArAM̶xGAoL5鄐㧺Fu~ݻw /%Y$!~k1p 4 I;&駟s:(S"0O^,YD.gF&IhޥH>>ĝʬqbskg<}8g+4.Ӗ13\̙ENHF䘙h^&MUx$!x';-hNE).XC!q8V5P ݜ'OIBmv!b?ŕ0S 5DP΁~y$VTh*l27:J21Qd?#NJBB4OAo/DaP ={vݻL<7j(O&Tg:>OU$J$sv98wez22)I6 իE;LdϠ-`J%8sիjԨ!f6n("0Xs37'OF׮]E{SLA"r2$ij5EM$S"W` ^3;#̅H̭vL vڢiSki^q{y$! _\RMBCY;B/*WΚ_Pg8p(B QPŚYׂѢɄď~yTxxך#Ge9vF§(."$?$'NwDў+ ! z:* O>]$4h LNr!=z副 ̛7O|1&' 1b THXiӦ ӑhRD AĘqGRÜ4u̙3(^Hg&\bs7KB^'/ 7xCeFf$3I2kj5NbOS\S}νk -$I[.̙#ĜHIZqLST [J<ɶ1xt3$JU*fv,nCWGBFlU!T>T3иzHuEQߧ3? EE rx2!11T^E$Ǐ&|v{D:P-СX+_v4-YA sTYc85kbСBq4N\wYS.]^,sWReZ$+XBm Q!tFq g[o'y[-[6Gwg}E]ݝ:sG<ш}bMF B΁fT&L |ᴤ{u ʃ5$$2$+WM7Ws>ILUO?T3&?5ڷxb4n'/<5<~X]޽ u$$̯+I'h}祉9:ھ͍** >\sI%}R45sRMS<@Kךb'1ӔI5^3 B3YcxT Q וŜ!DsRy Xd G>C[6lPrDoڴIC2@AsG~&#RRQ$&!3&T7/ |1inTHl RYc˖-|\$to]ILkJS)B(E- !UP:i[219rjπDtbILm+|/9o%ȝm]0[UAEmE}@y5oa튢+POWw: "EMoP9/*4ǔLrn4kc !U(Fu1l$ WDIsR* %c0n␄{5;xPw)~08 ,#w\* ,CFμ$!,]4 "z$Iq0J1*y٩%C\ }+~} gQ;N?s!LO*xo]cBhnZDIkԗsZSWb. !̪U\'?Y~hGZZgFi_&}0hn L*4㥢/_>ܼ&aNBw3t͙S0GzK7: KpՁX>f-4+@>]]eF! h\B~$h$($$n-d0MS6 JEaZEXp $ܷ\ӂB(P!TFy4Fdd?YC"Qh*:$T,H ҬϕyIL]D5R166V(w$Ɩ ?B@G~ vBhL 9_9I'DQGFTҴTAobսB Bȵnv\c{RIX3EA}u%H172Q0C;zutsǘ&z5eq;%!dt'OڑހiROx2HR}ɅJhJJEds's$,TYeH II*8*Фvz]!"W8(c݂$>'5>i<@1Ç={vБߘ)<|+Hψ81Eo6o:ʋFkOJ5ӾMnP7! (cJ% ,# MPx7^Idaq ܞPPpXPٶk2}Y*U$|f\Q\ +ȕ5p~[B#X0G4"*$gBB@!pWp҅C]hbCe.I7B>+WWBICcĐo";S Bx5!P2SS$Ii$\A]M-;NݻW$XHn_Bv;Bg 4|V\J!R(NE YB(>bh_vU8iJ"讄PPYve#5۷ElٲY<>{!\;{N;SܙP\>ΟEox^hOB\/gZt7oar*$W^ELL{aԩ(W[#A +T%C̙q5Fb>H$ cPti 6 ( @# 0XNAxK]z vę3gP|y)R]$٦ӷa[IgD^"W@r4dRLB(')cy%G/M6իJ(ÇYf4W>}j'|'Om۶)S&+pڵvʔ)h߾= ѣG kZTڵKe믿YTѣGct"rr\kƴiӐ/_MB*f&MBΝ ϙ ߿+*eHJE̵}Qxɂz{8qO97G|ܹ0P!fI8r/Ix5~u_w\ BOPgDNrQx d0::Z?SBiӦXh-*L[noݺuFϜ9sbx1b?~[l?͛IFԟ3g,[,֯_/CҸclݺU}Rc )s$رc8O8IB8qD1޽{c̘1\\-[AڤHR=tP{P)^MALyʁ);!-Ŋsǜ^3cٛIb4UT-s>a8|<hv?OgN!3}I'+A(RY&F%T<$$fnnw IDATD288Pϟ/.Le_dIQr *W,Uo.T>HFk֚xI%ӥKH>޽;AgBMu&& dͅrW\y]b* w"ൄfB$y3"0mAO9++ P*YdfS(s?8(D>)bqY7Bw c{nEs[[wGBpBԩSG(b ~:z)(˗/q/`NK1ji% ?~y={6mZB~X| _sLj 2d$СCH"~dƌBEB|zϛ}sfBJiժUEۜqO X!H$ aWBrB`EvEOlۏ[ +? 0@3\ܕ~B#ɢ2HJIirIóg i&DTHoGbs]vhٲ#!s4%\nPϐ%5:GZf~ٻfѦRI$1$>+hn.T!^rΥJZCoޟjn 䊀WBLj1.98-a»wx (%6\S7pn?y~Q4w&[6/ =,P)5%6BHk{1"jm;BCHI&!z/~[(Zb.]*|7 :2|1 9(hB;!ҤSMAwFD@*K/r=g͚%\Hjiʠ7 Cs쨽U(,G#a𭳘i<]ńU~B#KpvT{710~fedRGO9+*#3#>B# ]QTw7M3#vAYv1yU|w*b2z L'\Gގ1Mjd;k\dВҿIyz2!t'KoNC uգ?sIh(N&ϝ;0B@!=!l3N_KҒ4ohܱZ._{&`큩KW25'B|ⷠ7{[4c߉3 `ɛq/"Ci'K މЇp2|SdȜ٤t&! 9.W93=W+<2 z{U I7E[U*t>LDVoԪB@!x!4>hT7  x$ήܭg.'bMI"T-F흯}ўvΜ9vD2b>~^$z|Xlw¯Q&&ᄕhYNR<[zF=)ՋH2ylmRmB D!P0BțaTQ0{>s! vBt&9C.a"C3#$ E9O^ .{ĕ04)Պ8lhB;܍{?j%s/#`"0ZHZI09k[($)B7=B@!`?CD׺;{n;s׏[;B9x1wjar#֘͹յ[p-&}́jz?8p@Ag2cG6 ^3䕻x(?OY0]uI 0QK!O9B\P/Ƣ@RhO!дharUkF!睬*  ĺȖQ {a Gğ;'b߭B !'@2OQ5dPn w:{.8wR p+,7"x&zViW>|}ܺu {BaY  +6Uj\H&uC?pCzZI=1K nRgP(N*]c~}~NV =!lc-.]0{gϞD=ÕLJӤjOvr!CE,| kćՋ"y`"/~Gʅ1,1,X*+Pz˷D981%FQk%/#2qOaQ:(%;ƙ!Cq#rw%V\ Ɛޝ0}ZHayu(W2Ӧ/?^cц=H2z[QVTG!ܞ\>ԩ\ FXc?s\us6 РR7_74bK'C5,z;@ b9/\Ǯc K[0(#($~ Y;fټc%{g͛"V9,fs "lO wT'BfC^?]c $B%4Gi:p?VՑKQw$ ^v_ c5!|8o΋""2!.?e[ZzX!P(@ {א#cS<Bױj$:7?Tj%sZz!L>TwGa[1s . qvΆ.'Kȓ!|'Q WA NقQF"!Yv|f$zEٳGJj6A; &o?ǒaЏ Ϙ]ފˠVټ2/<{搈ለOO@}}P8W3= !E¯Ƣ51hO| kؼf=;¹i t2gg N۵G>NÐߏ:q=hikt7Ǭ;˨^Ϝ"xgC8yɱpSG%1n@T&&S  p{Bh 45'ݧk@̅QGZRX9vzH*C{r vseojC›sUfW $sFm'?Cܸqá akP$(?ླ7-&5J1&o]Uw!ԽC;t7ZBx4*;Ĝi}Z,ٴ_BָE;/U_-ѓP[P/Z!O(uicP /ENo^WGQYc  ҦK/F?MH^ 0w|VYcn |Ҧ#N(V+!V( 8BЇqVO+0}̾O΄0PEEǡO1Ȗ1c/n݋DF#q'q>tWB.䞵Q8O|w,/_Ʈ]/_>T\!Cn9z; 8Aqv8ZtG5׎#IfԣGW$f؂gOז}geb% N%xđ1n|3ztji -ա}݊EX2o֍= U5+`ުdT?'j6Zjr,ieH0nαwpE1U B "qd&bJ-tWB};UA6C;0&bpJ8dN93>_m{DD_i"gΜ"hoc{{޺}Wb|4tD:Ȑ)3:K6O_ABo@/}OA=4lLc\>q_&ۤb+|}} OR*ۺcwܽ}XӦG6 2eTNߎqBBgmGՏB@!Hx!4?VX-tWB}<7 rw46~ZTKU|֡D"")FDaz\v ;v@ܹQZՇB*tNEЭ/LN 7W IHnS1qRi#R!wN|3jSf Bs|?W&B- QAߥBll`PŠsAS aԩ!+cr,$q8qRP* 䇀W#J%_qq[C;rR-tWBȹӶ3|H3n|$aSbJi '.br왮CFFF"$$ȕ+}1ȅumye܍p tBB=z (H_H<\&w剽BΗ*ᕻBabg42*1x _xkQ񗏾~/։ELtHB-l'>?17sVTQo[Q5BhB;e`sň_uAY]|U_̕G"7z8Lyb2oVr>&ơÍÃk͉BS>["{ ; ߭.n(Nwo  =n*7A10@vz7ѩpX.(I0@B%>0MJ;pz:=ĄPH,h\r~~~ȓ'cSPW8ݮ1{!'c[vq@R!o&_k5E 䇀37|ê۩U~#;x!,޴9rN4qoa}bƢu8{_Vm@ tjh+|o\+!*t`7;w`ƍȒ% }]]{zwCo#*2!eʔDY3(7dˮ뼓KcBoU ;HWo-zʪy)+a|sU)%@Wpta&)SϚ =n*ܷ6lƊg,L= yAo]z qh.,޴Rb jP}ނɍJyu!\Eiup^?uwo@#5tL,?_$ʔ`2xIc;~b6;=+!bHԞ\B,ޫ|yZ+ oBaXdشAJ psq4m^o{ڜeJTZ|r#aaaX~=2eʄڵk +Cpp6oܺu  dsd>8ի8{,2g ěa(S:*U7Ick\ȅPx.n }'>yO=ꃾޘjBJ!]ƪP$<:#97j/$Η_~ɃALi[ĊRmA7Ν۷p#9~O"eT D'ghqɍ˂(˗b,yБpb)2h zdz fL'.pz-+WOݻ8P=+ sx$!d)1qbD-ٺsFɛ:k/zq߼[2.wyk8c5 ƍ Gdɒȟ??믿SNJ߱ŋA8q,Y"| o7# 0]iN_?Y!dN$^*"bk!oذAbڵwux1RX;s IDAT7/ ~7AM ΝC?/PUq=ɠ"( Sx,!0n6cߧ-}<<,*ŞU ӿvaҥ2 P7BLL߰aäuשرC+B3x޼ybOUt -?|R}UL# BBnAc2Q{ ŋEPГ"-I0u&LEKHBt4%=w5 buyZ?SvTUiBurΝ"w$T˗/&N0Hzܓ&裏Gl B1r0JtQ|r v5!1B@!p<^E%)RB47HL@mO` A`TJirHRBF{'1f=*`f͚}v&j7|#]PPP)' \p$0I=b~$YG"Vu<{l+%?[^o#C@f0q |Rѭ=ՐB@!:B壩ϕ|@l*-_GNrA9;vDٓ&Gؽ{PU29E BXM=1KMa9&Y2By?̙z! d@<<#$"*JQTT Zζjw֡u(VDdAFD@dB)2BKw}~g-ʻgFфn`e¿{,77#s=gD5}*B 8YK\WgS6-`Яe[ſ'Wꉀ#gM %=Bti?Q#(yFRGkUVxwn6lqW,..sOe wHM/7]eF`תU0$ӎ0%-}4huE%S0oDERELmKq }o4"jFc7~u&U:O~z&?:UGD@hAg8q`>AOEEEs!CpW8PZth<_^4.cǎ519_thw.09-KLb^N{d4#믿ޒ{dZȦ3gĆ бcG#h~~q/»\S$dd ;0'AEphd5ү`:4mUO}w”V*g=N0Z\e[B+#a.=ЃS bD]4F@ѽ{wyϐa. &N1“'O._}7՛ jL>ydXNJWp3' ±a_ZOکyw~C8ڡ}l0&>ȫzXD@<\#/918<]pȐ 'nۢՇ룏>22U֭[gXvj+.^f3?!LYhb[cUrEގ#SPYe!d?\Xda-^he׊  =CE3ڢ.۫ۿF+%NwQWP:} ' B$$kޅ+λJmeS/Gmm+ C4Y=$"We)ZYJkJ`~gm۶V>ӧ ۲e+(Fׯ6o|7n4R&kaQF=&{G@)d;W?4vMIw]5.׏|)Ǐ7ҜP\֯_K.51 HV"jF7b[çq/L{ 3VxƎ1tOLL4ߵ|g2Uן+i8܉& A˧E`޼y-Zi"<]uR1 -]we/3  Iaub:?]9e/vwܹZ(sNJ+; 6S_e?}QCX}F/8dvG8 HVmti w](z>0t!,ޝ˶ { ޠ7 sznj$%%})^`zl} tߚ;~LA#eOŨb4bk!D:[o׎ RRQϱ*x]⊮LQ3k&-};3mO)晎Ag1.5HVF0%/Sc9ϋY.E⊾סAl0=u, sOU">~(/'Xg͒rss =M&-]pE ңk޼^BWB%¨?ZjUnm P|STY2N~˺x|*8_6hn.]@wc΃ܷzJP_pY%h)32!ZD| A1>RHz=.h]dg!|c)~>m2!n;zy 4} |u_|y_} 0C~۰TV::/(b==F2\OySLt')()ɀB+"ݐ(UD@Jp e<_]/3~[nX⌿=nG6\A=^L#;,ZC5!q=ₐuxK|Z#)cqdDS+T#Y믿6,z]$Mj,$޿x"I (x;,_(aÕV=^$}ZxXdrA*?Ir¿& PkdK.;;yqQЖ%3π.p+CQޗ_~ie4i!zĠf*"$y4*`j> _Hh|<!LJKBJDBgD1@EcE*O0Rp3_N 2Y_J%Bte`8sg©BR$P F׬@T)I𚉃qykp{^ey6z*0W὿z:W Voվ8Ϩ]<˪K nfA-^xo<2/^pӗ3 HISmٚO)_n%s`B/ FK O U '򢀶*D KIWʾ`b|7~H*Z3a:5;T2k;Qxn;hӸZ 4gv\,r>7qݦx7ХEooY{)P0=S b4Qv(vi O@0P324!O)=.Oaq E,%).8-d@,"DB0%#i'NmMbk"&BЉѷm*\G2cidH?q0M[yD=+kE8tɧDž}|_` $mD@ށ ?;b`VR:V)/S\q+jlt%mW (+[wo>߼w=v.)na\u9&4΢7г" " Al+ {1eZȌ ;(VD[Yy{Vo۩5B1D[\0=Ų )u'3 7?4@ۄNbnD3'=!" gHj'DyxRB0)49-.qQaKZ\@$+ED@*"AVkNw'"Fn\uYL$w^iӦ Ƴ!`9mJ1qfj_wn B9@`>M'/vhCWD nR" "= B@1x!\xhذlhcd7D@D;" \III0`Zli9+m NͱfX '/݆^_`߸snf#nV%↗|tX tU4ö "]rR tjj." N! A蔕԰ y4t>\LD`Ɵ:' ?i?ⱱ}q m muU؟^m`ӳF 1D-҂VBlF !Xa/fpYV<~ڋF;-#:#Fz_C <K3s6^Z嶷@1,ڂ\}k|*4 #z+3uM=0V%<5, IDAT@rN{>Iċo:v)ykC7B(FV$H:a5 ~;wD߾}Ѷm[Gb tn<:/znLo9X1 !xbJ Fܦu1ϱ>1<3'Axgԍ6c-7 tQŊ`]:qJZV{|Ӿۉ_gi|y:;޽wǻ>{ B;X}{xốy f-At풮Џ n xhAW/" "PUU%D@$pa;vCx򓕈] 9y' vq(0iBρM˺mۋ-c ~5zGN))G>K^7B3)Vm;]$f#n3f~ wc}lYM_ c/1,tKq{f*0X;1Fp|e!Tmڀߍ_,\Z~^.l0;hԤ""yblfqjzY%jKD@! AhG"%fر}Av,`tPAss8szFPHeX '2JAw5\FY~~kԋh|bB0fq'm>вA<ФѦGN]~Bд~tQ^;} Aȱzb4. '٢a\ |e_ӎɓڑq 5ad%bʉ'$M&jPD@|& A3B5 & #66rV B nwO*#O]ă}>|buY7\76'?-C蹇HUÆT96cczlSKx5UѸ[3HP mƍvď;_upmٛf)n!䄮:->`Ős=%D4U.*]FN %#I7#pXL_ub%co~&Z`Ό3Q^}PEC#- IV H7Xnn݊={sϵ|V 3`.BK3isUp~DŽ)FuXT8 Z|feWszE>t>1ú5ū~y(:=DqiL#{a1 {AMf󸚈 3M;4;vXAD@J Ԏ?7k׮ҥdob`U+BXGVy;+Gaey&QF B#"NXEAHy^z2Ajk5F% B+HZAY} lڴ 6l@Νѭ[7g*Ah9rS; dv @ؚi Bˑҡ!I1'M?a "*a' ?vHOE?B+gD 6|4umb!Aω+HZAY} $%%aʕhٲ% `Le!)Z)=S 7A BS~tԈN@tjPE 33@ڵѸqc'/Ah9rS: t%DE(FD@DTTc">Xb7oZ@rt(A|HD|0Sc#j H:}5?3,߿_B?svZt#M*{45 `xhӖҫHzK%$-NDۇe˖iӦjDD@L% Ah*N5&#p BZ؟oBfNsayDgK)IzOarNWn`L`(UG@PAD@G@~k@P`@%Kej-}~6e[>o:}\۱ ,>x8)(DN~!x簼BȨ!d HjgHoM4"*ػwa!ۨ%Ђ#l-{Z3R[vIx!*D^A! k 1HQ;-aeXO@zQEI/^ bذanGxxaVnv(QE$3ng\Gi)cX kQ$ B BF$AE ''L=f͚Y>v @+oKKz\$/ <~VuPE@D $A]}bϞ=0[]$#.A?^V \i5Ջ8ѣG1|ԫW#F|&܅#8y8i.Z;? BCrһf,#2A[r5c 0<՚@^^PV-h Xp'ί ,)49-~_qB7& j: ªғ" eHOOǼyK/rF"d sf=yD[UYaB7 B7*",$e4N)YX%aǡJWٛaHJntR(AX]gD@DZVo"8ǎܹs#GZ>`|lKIϣc*}E2z(COUZS(AhݨW]s  11hB! |X`䟻i unRs} 05{TL}@3F <%}-/"  4ZWW_}h5 b^B>k`W 2̝,4thm{^0Lp B,($ ar ,ݘRr];U2h \`f\$@ ֕ӸE&_"22G|Tb!YS2rfӨM0gsnSm~wGJ;M2hLJzr +hƑN-N]YKD Hi"`'OĮ]֭[[>`f8]lX3znPfWN C :4F1K;hV^/" g Ԯ@nn.f̘a$+}j:()ꞛ,qg!u {*cOZ6 [ו hp" .% A҅״E,NΝ;6mژlq &U)]iY` d!SITe}{i($}+" ! AjU\C ??ӦM3N\uUM1Y)BE VA|+HQ B' B'$"$}5~0Bر5j@۶m-[e۽˻yAQ``ũBff6g$m fL:pk|j:IRM];`%#{DtDZa9:Q*k;&]!A+Agj^D@A@TED$!dB~ᳲU؟\1V8%ef_F|X8g&>[dgeѻoA⎭pߟELl]B-;l,e܅ܽ7ܫإx'4n5eymU6_& lXO@zQGwywv "B/A7)tqSמ=`0F'&F7^Įm䯐wO L?z=빇z׍L6;bagsӽ)s~nBLZqb J]$-GEyd! 5]?<ŀ5uN %#Ԥ7EXj:^ Lșy40'f-N …sgCnN6֮ZquxK>qhެrlڼe&",E*c[RMHD$2رc/VY}E O1)(.Y1d!wʫv{n]/𚵰` Ȱ쟋zHd.,;>^lcc٩[ 穠2l5"" "PUGD@*&sNem۶sb}bjn{X>҂3<t9?2⽿M?E˶푑vH~=2KJ܎ tlỊ0,iGԩS[qgƯ'PoLu(" .$ AEהElӦMC~~>*|Ŗk2>#e2nW¶ ҹ꛵8|"*v)R{DV&jՊ(3J(ͰԊ( }g&Q`  B#"NXEAL 11*ӺukV!Õ 4umUUzF䋵кYӓS#9{D$ͤDKdggcш6Tapn:o!giЮD|m繋Jui\" n' A <VZ!,/&,` tSH`Uߥ,gD@D[" g={60j(DGG[JHRܦu t0.*㜗n³q$MRC" "` BP!p/ݻw-[t] o3U:ZءQ$xЩEЩ+y3 `^=]lB`ܹe]KG}JDGX҉:3n cFYS'<3-LICV) dNMF_a w HjgL ))HLߢE N\t,߼]i\Ԁ Y禬ƠN 1uY`-=#F^zlS fލ Sҿ:n~0-)yH;qj S ӶA]E=he! &S" "P  Bm $''#77͚5CDD0$E){-tJՖC@jE\M`…HMMj} ػw/rrrd!7;az qzDѲRBp/"  j"СC6l6l(>"`{r#m[l_ Y+" ' AX}v)" }!;;M4ATT@ijVߣF׬aXCC?*AjSD@|# A?d8pCEƍD2?! JE*X|3HՏ+".YhMSI %%'NПն P2ᱜS-A! B n ߚhD"t-[PEG"'sX^dЈC2hJ$3D@D~$&#++ k"PURg^ *7s" "gDVX8pzBED@" A}!" # Ah5шD 0LffPvA7~ XDpV/" "  BohY2 \{ТE Q2 HjcHoM4":Ǎ111A7~ XDpV/" "  BohY2 ^ׯZn-J" "   Y( SLСC8v,v^$M߿AIDATl@@B, " HjKL`͚5عs'6mܞp& Bgf%"${4zÇ### 4@ll-ƤA؏D# B ]۷oG^о}{S" $ AuլD@apF/ tǣnݺ!"`?[HD@$D@|&~zlݺ={D|nO 83Un~؂#G#..c D@G@~k6l؀͛7[nԩg tjV" M+A8gΜF/"OFaa!BBB/|*" "P. }"" "`U;wF-3rDD@D@D ٳ6m 1k" "tUNHm5_K^ Bp) B.-" " " " " " " " " " "R.]xM[D@D@D@D@D@$D@D@D@D@D@D$]HjK Ht5m  tk" " " " " " A= " " " " " .% A҅״E@D@D@D@D@D@P{@D@D@D@D@D@\J@Х iK^ Bp) B.-" " " " " n^vXIENDB`docker-1.10.3/docs/security/trust/images/trust_signing.gliffy000066400000000000000000001276201267010174400244030ustar00rootroot00000000000000{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":881,"height":627,"nodeIndex":322,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":true,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":null,"printShrinkToFit":false,"printPortrait":false,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":10,"y":0},"max":{"x":880.0000000000001,"y":626.25}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":10.0,"y":122.25000000000006,"rotation":0.0,"id":79,"width":531.0,"height":500.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":312.25000000000006,"rotation":0.0,"id":40,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":1,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":41,"width":71.42857142857143,"height":50.0,"uid":null,"order":3,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":40}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":40}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":42,"width":26.0,"height":18.0,"uid":null,"order":5,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":40,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

1.0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":82.1785714285715,"y":17.03600000000003,"rotation":0.0,"id":0,"width":63.0,"height":82.0,"uid":"com.gliffy.shape.network.network_v4.business.female_user","order":6,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.female_user","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":1,"width":43.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Person

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":330.0,"y":142.25000000000006,"rotation":0.0,"id":2,"width":120.0,"height":80.0,"uid":"com.gliffy.shape.network.network_v4.business.user_group","order":9,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.user_group","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":3,"width":73.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Organization

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":141.0,"y":152.25000000000006,"rotation":0.0,"id":11,"width":63.0,"height":82.0,"uid":"com.gliffy.shape.network.network_v4.business.user","order":12,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.user","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":12,"width":48.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Account

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":305.99999999999994,"y":273.25000000000006,"rotation":0.0,"id":16,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":15,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":17,"width":110.00000000000001,"height":25.0,"uid":null,"order":17,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":18}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":18,"width":110.00000000000001,"height":25.0,"uid":null,"order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":19,"width":110.00000000000001,"height":55.0,"uid":null,"order":22,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":16},{"magnitude":-1,"id":18}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":18,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":262.25000000000006,"rotation":0.0,"id":37,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":35,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":38,"width":71.42857142857143,"height":50.0,"uid":null,"order":37,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":37}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":37}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":39,"width":38.0,"height":18.0,"uid":null,"order":39,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":37,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

latest

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":442.25000000000006,"rotation":0.0,"id":63,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":40,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":64,"width":71.42857142857143,"height":50.0,"uid":null,"order":42,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":63}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":63}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":65,"width":68.0,"height":18.0,"uid":null,"order":44,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":63,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

producttion

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":305.99999999999994,"y":403.25000000000006,"rotation":0.0,"id":58,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":45,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":59,"width":110.00000000000001,"height":25.0,"uid":null,"order":47,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":60}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":60,"width":110.00000000000001,"height":25.0,"uid":null,"order":50,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":61,"width":110.00000000000001,"height":55.0,"uid":null,"order":52,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":58},{"magnitude":-1,"id":60}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":60,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":392.25000000000006,"rotation":0.0,"id":55,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":53,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":56,"width":71.42857142857143,"height":50.0,"uid":null,"order":55,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":55}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":55}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":57,"width":28.0,"height":18.0,"uid":null,"order":57,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":55,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

test

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":10.000000000000036,"y":132.25000000000006,"rotation":0.0,"id":82,"width":108.99999999999999,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":58,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Registry

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":36.142857142857125,"y":399.25000000000006,"rotation":0.0,"id":109,"width":187.85714285714286,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":81,"lockAspectRatio":false,"lockShape":false,"children":[{"x":7.142857142857139,"y":50.0,"rotation":0.0,"id":98,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_right","order":74,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":99,"width":71.42857142857143,"height":50.0,"uid":null,"order":77,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":98}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":98}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_right","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":-7.142857142857139,"y":0.0,"rotation":0.0,"id":100,"width":50.0,"height":18.0,"uid":null,"order":80,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":98,"px":-0.1,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

working

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":7.571428571428527,"y":0.0,"rotation":0.0,"id":95,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_right","order":66,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":96,"width":71.42857142857143,"height":50.0,"uid":null,"order":69,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":95}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":95}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_right","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":-7.142857142857139,"y":0.0,"rotation":0.0,"id":97,"width":38.0,"height":18.0,"uid":null,"order":72,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":95,"px":-0.1,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

latest

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":77.85714285714286,"y":8.0,"rotation":0.0,"id":30,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":24,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":31,"width":110.00000000000001,"height":25.0,"uid":null,"order":27,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":32}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":32,"width":110.00000000000001,"height":25.0,"uid":null,"order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":33,"width":110.00000000000001,"height":55.0,"uid":null,"order":34,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":30},{"magnitude":-1,"id":32}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":32,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":330.0,"y":0.0,"rotation":0.0,"id":180,"width":67.309,"height":101.072,"uid":"com.gliffy.shape.cisco.cisco_v1.buildings.generic_building","order":126,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.buildings.generic_building","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":182,"width":56.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Company

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":266.0,"y":125.25000000000006,"rotation":0.0,"id":250,"width":7.0,"height":413.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":172,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":79,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[3.5,-3.0],[9.5,496.99999999999994]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":35.21428571428568,"y":262.25000000000006,"rotation":0.0,"id":253,"width":187.85714285714286,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":173,"lockAspectRatio":false,"lockShape":false,"children":[{"x":77.85714285714286,"y":8.0,"rotation":0.0,"id":125,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":83,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":126,"width":110.00000000000001,"height":25.0,"uid":null,"order":86,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":127}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":127,"width":110.00000000000001,"height":25.0,"uid":null,"order":90,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":128,"width":110.00000000000001,"height":55.0,"uid":null,"order":93,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":125},{"magnitude":-1,"id":127}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":127,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":7.571428571428527,"y":0.0,"rotation":0.0,"id":122,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_right","order":95,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":123,"width":71.42857142857143,"height":50.0,"uid":null,"order":98,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":122}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":122}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_right","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":-7.142857142857139,"y":0.0,"rotation":0.0,"id":124,"width":38.0,"height":18.0,"uid":null,"order":101,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":122,"px":-0.1,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

latest

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":7.142857142857139,"y":50.0,"rotation":0.0,"id":119,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_right","order":103,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":120,"width":71.42857142857143,"height":50.0,"uid":null,"order":106,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":119}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":119}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_right","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":-7.142857142857139,"y":0.0,"rotation":0.0,"id":121,"width":26.0,"height":18.0,"uid":null,"order":109,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":119,"px":-0.1,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

2.0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":557.25,"rotation":0.0,"id":281,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":179,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":282,"width":71.42857142857143,"height":50.0,"uid":null,"order":181,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":281}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":281}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":283,"width":48.0,"height":18.0,"uid":null,"order":183,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":281,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

release

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":305.99999999999994,"y":518.25,"rotation":0.0,"id":277,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":184,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":278,"width":110.00000000000001,"height":25.0,"uid":null,"order":186,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":279}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":279,"width":110.00000000000001,"height":25.0,"uid":null,"order":189,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":280,"width":110.00000000000001,"height":55.0,"uid":null,"order":191,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":277},{"magnitude":-1,"id":279}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":279,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":507.25,"rotation":0.0,"id":274,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":192,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":275,"width":71.42857142857143,"height":50.0,"uid":null,"order":194,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":274}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":274}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":276,"width":26.0,"height":18.0,"uid":null,"order":196,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":274,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

7.5

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":472.40133544303796,"y":320.25000000000006,"rotation":0.0,"id":306,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":209,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":472.40133544303796,"y":271.25000000000006,"rotation":0.0,"id":307,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":210,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":472.40133544303796,"y":401.25000000000006,"rotation":0.0,"id":308,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":211,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":37.214285714285666,"y":406.25000000000006,"rotation":0.0,"id":309,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":212,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":40.214285714285666,"y":456.25000000000006,"rotation":0.0,"id":310,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":213,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":594.3333333333335,"y":493.25000000000006,"rotation":0.0,"id":314,"width":283.66666666666663,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":215,"lockAspectRatio":false,"lockShape":false,"children":[{"x":66.66666666666663,"y":4.0,"rotation":0.0,"id":312,"width":217.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":214,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Signed tag.

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":null},{"x":0.0,"y":0.0,"rotation":0.0,"id":304,"width":33.333333333333336,"height":20.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":208,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"}],"layers":[{"guid":"dockVlz9GmcW","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":216}],"shapeStyles":{},"lineStyles":{"global":{"strokeWidth":1,"endArrow":17}},"textStyles":{"global":{"size":"16px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.cisco.cisco_v1.buildings","com.gliffy.libraries.sitemap.sitemap_v2","com.gliffy.libraries.sitemap.sitemap_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.table.table_v2.default","com.gliffy.libraries.ui.ui_v3.navigation","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.ui.ui_v3.icon_symbols","com.gliffy.libraries.ui.ui_v2.forms_components","com.gliffy.libraries.ui.ui_v2.content","com.gliffy.libraries.ui.ui_v2.miscellaneous","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.bpmn.bpmn_v1.events","com.gliffy.libraries.bpmn.bpmn_v1.activities","com.gliffy.libraries.bpmn.bpmn_v1.data_artifacts","com.gliffy.libraries.bpmn.bpmn_v1.gateways","com.gliffy.libraries.bpmn.bpmn_v1.connectors","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images"],"lastSerialized":1439068922785},"embeddedResources":{"index":0,"resources":[]}}docker-1.10.3/docs/security/trust/images/trust_signing.png000066400000000000000000002137051267010174400237070ustar00rootroot00000000000000PNG  IHDRU IDATx^ \UǗ * hiCOsH{|iOR jӴe9P戊NJ 8?k㹜{pι[rkt*sݻp lZ-Tg56T @Bcl0-K 6U}q}y3ac 4>KHɸF.Ck  Q=C?_*&   PR%%@@#{X6A@ 3}q8Iѭ|7֩."D;!T};0 ΂@BeڪdVҊ] ,."AVۍłhEh̜͚L7a@kq9R|"   V jXUhYVN   s:  `Lz*H!>W/W~Uށ$|)@qGVѯ{Nh(?mxj?]S#)=ZRbZ=)^++ZRh9u/.7B}0(a8\S"Ab[ބ#]вtG6=}.NM\ɳt5'j52!'|>"ߢ,n8B_JY7ӖFS(d*]qr=fOzV@/kAǓ3hUTSi<1AB\   !Aӭb!8:)âUԨNu񒯏'xRBre}X}h7]Sa!aO[%іIN9dziHۏ$Ѻ}E AZ\    Am1e5qK 85t°vx|!"$n@qAG A8%9.bj؟bN^LJB^l_Ưzjo+vW E @@@p ]eD Jf¨D  Lm BF,9pqhU*q AX,"\  #A-[+.i}@ݿKaQVЌKv=A(qôx !KoޡϟLӖ2t- ,O    Ԟ-ȩ|~}h]zdGx?(䪣, a %# ‰CR5)5eOG>ݞʗ-KnX^uޔ{~`{ytl:޺m$Ri4}kױsV|u’/ tN4#g |2DK$nM1vCD$Ah}aqG%A\t"9 05N , `  &&AK!l䫱O02{rv /G@(l;Ah~@j'蠖EYQZ:s; "B<   F A.m;preFxjcSHD WL(0 HpptbV>;(.9j7% ׿\wnByAmBrtȳ2Oʠsٖo;3o)S p[|E%ppSȆ6>$4o;a-ĴV,+"4rAMjZyǟϤ t,i8,@@JFdJtW-T=VUiն8jU0 N:ߘ^**㸇yWʨp=F)O4Qt G %0j`s/;և0/BxOEw2,=Bgz0BS.&,W^\Pׂ[!,[ %w.dчșKqL]}'^sȏt=7!&O}o ~8(7|6ETTB~Lx B, >ەDa歊Nք i-L' &|ۺx~YjըVcN](uG{6 4 i0,%[?s~dv1"\>m` &\v!OAdJ/ O'm'wlLqəN뻏ʨɞ3,@@JKk1qoM;soQ떕0J!^W/жO{붂UFM a9   2B^`k~>T^NvZ  H߻LɸF++p }PAxDTuxp!B{\ 4#RB-5=rs| P8]RkAUlQ !bpo*hy҄aTMݡΈ{/>~ ujY_ 1:@Ia5ϊbe!{~\s !HCg.;xk9Jw] 4sA@@ KKGLYM\eaS*ΦVm'$C5qCڪ;!FZIt(Պ͏I3-լZdf<ߙ\_}|}T;Υ7-K<0MwUVp%B˧`aأmCj̥*6Z4َ w ܸER/ӱ{ţWكN\U{My @\T5hݮɏHqԐSIYzV {\&qZ=y$1}>ËMFӻ wXY:i@ ∟&<՞;ER iLs-.Hb0{vWvp @@4!A 'aQ旛pEO>StDrTp1ոQZ*:3Ƚy&eЮ^N>2j>JdB޴zir- jwȽ7_uݦ[pxkp! B=W T{la},E&[a9ԩe}+ h?uԚ7IUxoQ ܶzB_дNw=GA:{ u\K)*6.pk@-yP-I4:fU4FZ2h'7?eh4hYOo}[JuzBẬR   `x:uQ 4k^TŒ#~ޞYك[.cbsn)1iD%}0 T@N/uB}=o@@&A5B m)vb xUPzx' BMd,%k72B,pBgz0B' oW$מqhZ-~@h `[> C..a]\.laRzUFBgz0BKY̺]<`=!q y:B>Ʃ9J)ڻ]Ns{tbRےtk"2E!ͻ'|AW25 Wrd1vހ @` vE`"WXF&X~<9j$A|D(},CiKßvRFPXc_KQ+AxEJM_ӳP^TY:K ׃@jS,~:G\eqR~bj*0/$?}J|G_vm6N[X|ݰE~:W.ݥ|g0_~2M'{v=LbLn|}hIu tC@@@ 5<:vf$<4:T iRGS(mѹc;w)#K~D~E?ݗprdh^NS/S{߿9c$%3V 4SEv 5`mO >J;W%5p)3i=V‰#:P WA@sZ8Q-(c0'B uĔV ډz6>_z$ |̏=ԀG@@@@*s=3h1(!|p4W" k"rMDr&:vh52e-vu}lpU(WmV68c"zf dQbie#D!7_0z`h}wVuNހX TsOL\N/ 4±3[\4R38Muw䵗1s6{^{Am   @(LnJs5z鲀_-ʫhȶ|@h! N@AUMZ-yPxNB&Y^65kPn#%CX8ٟB@@#Aq)ҌHȟ骫%]7ӡ{ziQx]00; Dq%9"_H7xLz.bױ$]#6~A? q,x +!,CkU>ND.C+f >P lA#ڢ\yT2D Uxx0ݛW % ?8N/iA7ܥFz6 0X/!u!0B "˨fJ ;#Ҋg ~x0. Vj(D9@@@ Wyeo=!B\;щ4}n!(! LEe`Ae  $A|g9%F}m=`Sj>ܠ+,in؅:" j@Y     B~:*WPpt} %oR :__P!B] IDATTp[Ьe$\rA0z[SIb(BB<@@'ASۆf #cPiT!.L8    TziFǓ\ ̻ @B  f&A[NHpH&4nH[Hb3x6ހa΂z0. @mKp7Rx:'8   P2%VஓgiĔՖ׹)=G hoIHB32$ $ M_(ul 1w;*C>*dhƢ=z WeZn?]LZ@B!   `n KY9c3:PxTp6m=:6=E,6qD3 4$΂>`D5|0hKPa~b̌VP"0ڇP _ݧ:I3i$:FY9V82!~0 ݣ@@@@`%Z(_}ONj|}fD> (<F9yFmn(zUPpGAؤzA<@@#A[hgh]T<ĥ:= FJ,q)9cȓS7 />zἠ3k Oa,hm   P<)r/"bRh\*\Rd\Pn':ğZSz>jMqA@IB Qs   BU?(G҉UXN(2ʹ"7WCDU忝 W9 E>~n9&hU*ё.wFdi@@MP"s"DrtO~$nrG%[է7Cd} F6&=֦il& fH˼FY}.8\XժZKt\\t:~a:֥6})8{q @(aM      `|.ׯSvСC>3g2eʸ+L      n ],|A駟f͚t]:y$ݛ֭[G= ” JΦSŊc Ukp-ZhZ^xBBBh„ "z8l0:z(9>3D7nթS/_NQQQBDb2=ʖ-K{{2ƴiO{ ^{5ڵ+͚55kO`~73OOO/+0-Bl$w!D.]/Zڵ믿©SRll,m޼YƍӋ/H=?O]pڷo/֭M4GG1fzWh̙tyZj0[?Ҋ+I&FC%Zr%+Wιq5hDP#i!䟳[hmذ&Gdjj&$$Н;wCtq!-[F/H?sW^Zjђ%K/"$r[FhڵTzu1iAS/dǛĉRJEFǏ/ȧL"$ÿʑ}6={VFZDD͛7~qg sR֭;vLϑNNyBt?x8yH 4x@VKDC[hӶmܹs""cWA ],`J%**T I4gQ8j(zg֬Y#ӧO16q'4}tq UV)D%o߾lBOs~|Gwч~hM @@@&A0*., 4}7ᅨ8? _/^(>4r(wr۷ETÇT[W_}ea?< Ab$#W~_r-]HĞX=޽;}"#@N׮] "8uTgd&b    ] ܽ{gk͛B8bi㔱O?Tq &pR#|Ne'0  Liܹ3qHb"bQ{yԟkG9@;떹'dWT6acǎwgHBl   P,b)Aqg|?d裏pBQfŢR)/_^َa+ƈ `^IIIT^=ڴi+s|,ʸ{ Xҽ|IQ ! GHQ~3W!]T6Gyl0̾;ּ%A#77rrrDzXq9UJ*V+q8g:hfx}j|m۶7Hdqhľ0A(U"#|^ss4_<3B4rebEe .Bm8aÆ ,@@@9  P/6l:/,eT:CEbv)\h- L@O"F܃+A   G+Pxb/Um4{N}DjjVVs3-##WHIBs@8 EWV"'[H1     PB%@@@K|>+r%#rgk׮Đ@@@  ip@@@@@ AE     $AhM      B%(b 0 Bn\%8,6lH 4PbN `P @!HOv\   2a2eh˖-*ALPϻ@@tBwտ@܈SRAnf#zj @@)e|p . @y\|222Zjma%  3&@: {8z(۷6mJmڴqoX= F@@@u# `W\tZ*gaX & 5I@@)N ?N{xڶm0z @: 7 UG @<}G5j0°M@jS …A ݻ)88~aՃ8Mid@T'A:bL!pUt!Bh=J@@KZ\  B8*"]vQPP 8E)\@4!A fL E/^$///Y9UhFP3Ԙ@&A0*\ piھ};5lؐy@: &\"x .yԭ[7zꩧTo|-1tPlٲNCu/ԫW/t~F"M.\zZ:|Bl\.cǎ9s7o.]{]I/5j}"ұe*WSCٳGWNlNݏAh/ںu+ԹsgA\LA@W^ywͥKʕ+X(,_*qF:wUZO?*gLĩtUTqv\ # EY  tmT֭[N8a%n޼I-[G82֩S'z嗩QFV:DӧO#G/Dߊ+}U\/^,&I]vF|d>,mVȠi"%K(==]1ydQ|okצ &(}UG3$G~  tmT;ykkѢ^llo.yjN=z(\dJ~$Qvg8b3,֡C!Y 'q~l<IS;Ccp"--M';Bw~=p d ,ޤb.111æd8r jՊN:%bTbE0`Twޥ0 (""{=P6 RWYZI&? r:)%]6lWqA3fZ6uȐ!b\ǎװh!1$$D |JNNM6Q@@(g@:C ׂ6\. [& 'G.n[l\leLIнK[ghmjQu"%#ڸqc)?(/m׮ErjӦMEꩇqEG}v)RNDg&y0 Pjj*"fd T$A"\  %$rA8m4Q؅JB/|/~,$ۿGժUm4f1_QK.QhhURK9S~^Q*it ӧhGa[t…3ψa+E$ J %%6l@ԳgO{bR5vİ E)ąWTʋ؞\vq)G$ApJrf~aXF!' mp r?7N18`M#ׯ?Õ*UV<1QtlxPsԪnYr)d*m=p8C=k/=ҲĵBl\p;.UDgaÆVr8癸o! 2qS>9rgr?A ʢSOylO"͓M^ Q\m{>Bʍ#,Dm}vO,R.)o|>V8_,b1dd&n) Y-|l:Z,%Év0tasu.)A@\IEY,ׯ/QE&r.Uc9r$q믿. [QLYqAŕA'M$^TnC&x峍\G B{s4SnW ܸqCфu)ҭg] YˢiݮB &JԹLʱba8iDrH; ^=Q}&%$gwx斂Z w#B۳w\iT~N_+rزg]ꫯD?tR^wf۶E_l}ĬBv*KdAz&l׮]K5jlY u)Y^+IOQTl2g[ Cs~>>4z5dܤ$u8I[z>]-كA ;  @3o}g"4 ]2ƍTT9?~8'Ԙ̳k֬3gZE%?8 uʔ)R:LΝ;[|111QD-z)6eT/* N+rĞ֩SGj3#[}i°vQ}FZ CNdqcƳQ}-VPQl׌E{PB~"5{|8>^K"NY9T"K/{9q#D[Q=YU?9=#Q<.mc{=GMr,s玨4gԟ4lp/M ==]qLJt nj"9qtVN b95o~bq(Y#AIu`fVXRqY.BGip 18Ip }ߏmի7oKpuF>ȕJ9-88_#Bɽ 8D "G4Qj7 H!I0Qp,ƅcd9gEr#fCjM@L%y+W'x9Z*G+?.r۶mD˗E{~o3˪8e9O2NG OhhGU܂#^(;ceQ}[Ш~-㔐a 6P!:s(|g 9Փ{5kLD 9U9sPvvw-\ ܼySwPhf+jٿ583"-U5('44#rnZ~:KZ-/ExK4 a&P)0KKYVs2E%@K8իWE|.mw;h]Tm]A\hƨƽYJtTUٛ-fzIv4*6S Ahm"@LFdjK>e[v|>rpNHŇ֭[u+W vw #\, 2yх\H+*j`|8OQ Sk0 K@@@ctH 7h)KϢA`sظ @%]Ë,QY*pX#k@;f@p9\#Yɧje lY9~%rn*E$ bnrY*WիWO 8<5Sh3Ʌ R0b=:ظ Cm6+=6+LJ T0 @: w(DSB伽& j& jP:SYӯ\S2X%bz,bC37+zntRQiȐ!pKNmfS;]T$O5j yUVE%nܬ z3>8 `R&X/CUH|{TWpb4OY7h!!,[,կiwGNT`SޗШmPtQ)*sR  P%{JLx+IVlޫ ڄʕ-S7޾sm>F߯?Lwܵ| K/^<<`V%de?9rr)QX<;?s!+aTl2Mv>Smզ%a *AT  $AXJ1ĠVb<Վ(tll?h;SOl AhYKMSB} @ u.L_/nE+ ܲUJB} po~ʂȋdㇶmj2$X^"6Ftp/F? 3/DzNU /\FG]%E7i;-qeSi4~PBum ,K IDAT;uvSgkT'&.'Ox<.|^sJGWL"3/>|w2hP]o vGR|R_Ji4Ն҆l!ͲX@i7uy~hB]%Jܛig3j R(C 5 ptq?=#:? v |coL[T!*M% ER<Edd\nM ]~ܸȌRQU斐RdK7anPT1&"A"HzJu8>1$%#/J)lE͛7iذaTB-'Ϧӈ)gڇ֥GvTgnFjoɤt:y6k 5MrV^#, թ$X &Ga%AZ@ s8y]p=z*>];Kœ>~~n2*ѲMU"Rh+Y0q<Å"p]TK!\ujYWz#>ZmJB v? Ԏ5fG @:J 9L@.:_KuŅ߮=HK7SnHU&㊵h1ŋ)77Jʈ-V{ssq) ciX(͞X }*; qCڊֱ3"-)tmTl2q );h  B%(b +cLɕEW5 ٛ׍ 'pQT‚uC\e%FI1kʂG5֩N>gssx{8r҈T{ݿS0lې|}d\yK\~AoO@P}h`wK;bxT-e(''L+WVcK =&_Nn eBF*~"_<3")֗[)CDD0vbiն|-G 9ͺuw9g?|@,=+[YMHδ:'(brҳD+L_ @!E ΗfD1[Ӵя*:Z:k#8sI |UWW{Y}6իWʕ+jwt5??~șQ B:  5NSǚS=4C)a[&E֭K˗7$ ~mS7+RY|:u.Òr) 6 Ѻit8!6FJXx(t4ĐrZlNAu"*X;rN'ecŭ,T9j)&>r!A~ RjFu-|9P|*B[ލv?5wНwk+BA [ӀNX[7>CeV+WPjժ_-v[ =!=Z Q#ԀN  BM0$rAh,rܒWmM+_)33q^ݺb rTk°v.S4ܞiQ\}9خ}%Ed,{"Ԣr[[r 2; gA@8ϝ" 66#[99=a;L7otmN-Ġ#(AjXe+'g|brKĒ Q<#3- sG) L1"ai ~+\--unU~&]5!a(` Yөo߾VlE |dGM(|SAsrż"rHd< }4q'NLZW`cfOP=2hybBCZ1u dXOA1pL  t.qhh9~ gQ{f]>wR:uCwm︪;Nv6ޙr?`rpl7>K8V1rZTzcz@: W &s^`Y)O}U-^˥VX|5oTɖoх Zjl_T/($qϊULOyq[A3O.%,? T)F ,-A_&K=<:kJ[~U+ƍTvmX۠wIv0;IھB>4.l#O8 r;n57[m7  `(#is8?IA~~~lQ#B\mra#8'A7!{j[ͽWuDq6 Y@@µ=/ɧj%+VY]>m`=ԴO󥤤ߟ*U羪M˪//"?'A(?8Sv]Br&1#6jb`A WOkAX3]koi#7:E7n$NѣHu#Culy8'A(ElN G@)| @1(@P iꂝʗ]rwmmF}RE+WǥK3"Z]]EI.ySoM;O4LyQA>w/`|ޑ=⽯n@j# kJD@h׈dM/K{NtMwM4wӓ dHT!WUDySy/OyaDV\_5B> dF@UIpЃqI~) :pAQP]"z<'=Ahۘ~ȳR7+ߤۘ^N.;II`@4 AdwB^rK_*.Er!  :[lgR.]^=}|VrA *M\abOR.e>'A_ËVLhWA02M^w8CZ@!A g%Z. 4~ xLQ%Lrro[sЉ3IDAcڵkKUV;q鯒!ZNhq.(9*7=׆Y"y*QB"=-VN_}v.yGY9yB/BgZ  B5bL+-:Q%r{>LE.Om6JLLN:Q "+<.hېթ8ס4u86F6>sP=jE66E.LaӇ7K̓|5\@&p2MW*S  B-(cGXנ '*g?E(كyΦZj>*Fl%ŭ'8Zjb{7 &,55':iq:a%d˫z__(}4ҩTѢS򪧶ܘ'TbQ@ M>gιI yK&j;VP]oєv /Gu$AXJ@T AT i(~_%zeP)P8Qi KN|Ν@:td;9"kɦced[j_j, EGvY ]¡J)0kBÓݚШ~-s1=%B]ny>A4fTͳ }N bt/^HYYYTfMr ?W1q)?.rFtdE`S;Kw-moByAڙKm<88d[Q`;;f@М ;#*Rɣ< =1z˕\uG8]ϽeYGfOpCP 4](>>)(yXt%S̉ cJ6-~̩bvM4Gz=[+s@k?r:ܕR0ٷO^R:1KivlQX\͘Mb*ğpN~@d#@qp w;ʭLmáTcgnܼMݱtĻvćBvB `~ύ@eaسm#jT=>w(m{Y<`s&i)!s@ ͼXmB[US]ȷ'T$.Ir2))*%Ϥ3W ]m 4h={Љ']vb pH∡=E]dSnl%WE<\ tm@jpJgEq)=Lm@\q5F(K.իWFt}n0ܭ Q g1VX [^RRT=0&Bc!f7AZgSEIo;cyPZƵ"E#(y,g|2t1 &MqZӺt]!7rZ30 N+v1!0|[S! a PB<  ?y$E'8=8E :dY97pBEܱ@lR7o}ɫrOdb90D6|ʕ+CUVun\~MYSTz5uE?O+7&␣G TM6<4  `zb.K,@tE\N⃩Ƣbƽp|OԺuk uF\{gY>{WXE9uRT &r B`Ǥ  P$B< bi8$7J:!2~>mr(~'222MժU+\){bsOG4+G K2~:'%!d,Bl$ `*N},?K/6SC' kGCt᤽(Dљ(&&>LZf͚b?D>NѧiƢ=G}UHa 'A@GP{bhE'8*JgH{ >QX!B=uN҄S9%B<  ?C{ҌHȠHK^d1 'y!:p5oޜZć |g9qO׆=Ḻߣib~n||"A7   1e%Um@ݿUug (ZWs ?'BNBU&sItPݠ+(+'Wm(/   @oO _2UM)poLtNPjhSsQڷo5mڔڴic5}e2򳄈 SBXb$PR$tq)ąd$dLWTj WOxR~+WPzz:UZ||| 3,ScRR.ڻD&o Z30 3 #A=1G䩣kxiͰ,Ep ڳgPvoEp , +Wp7ySsqA[5o}'A=  `KD A.d!Q3YNQիWҥKt}Q5l߇guG 30&AՃ>^-癚f,C܎w@=\_I>y$EEEQƍ}%)D@<З>UK?<.Rzf 5vT"KeQ3E'oCtլYS'S#AH [@]@N+NPNtQ3"3lppBBܹCfvC8^Ygߤ1cI}g!g@@YnkzPV \)!9yq  @(7lϊ{ٿ5\q~щ4}n&Jӧi԰aCzGn]4uNrA[OeҩL:J,ChC t.M @j<rYl%9M.\ OOOUY~4;CN'ZS4-v,p5qZ< h;WezuHxK}. tJAC8{O\^ H|Vq=n~?~裏c~ەP0,_Ë:G&B>c "Ah҅"i=!B~( ]vҨJ*[|dI}EỉOPU r$q=oѫU?zv,!B=s{5ࡗ5դ$ڼy3խ[vO;5Lcr0.)],dv //XA8x!B=v aК!\2cI704BCokw$-ZO?6nHkצ=z!Ĭ $Ahm &'Ah VcyrA 0osrr(55*UDyi0e ܾ}_Nʕa `fI@L ʹeɦ=:Z/QތeL_/Jq '55R^SJJ mذA={:|.tiРAL986@):v옘СCB:kk֬)UFٹq @! t=/8,T-JjR;]2E>k\IqŴn_@#W, !,CU-+V|._LsΥoOQGmr ?~ڶmT7oޤ5jZ*f,' Ahm nF6\r vZ #! BI= V8lhJc)H8,իGB Jv](aFO?D7nթS8{n~Zܱc?[ȑ#E0!.[n-xEkjf͢f͚ٳg髯׍1 IDAT7n,-[*Yvqz-O,1ɓ==zT;m41nQCaB&T, @  [=Qpİ}h5UH`P|E&["r!iܸqCX(,S^x9B˗_|A[lӧO /C=D-[6mВ%K2d]xQD;vH9sۭ3f +4sLV8z'i5&ЫJׯByveCHec-[BBB믿nݺѤI@Yz"f.O@ ͱ.[/K-,aRgөLq.AY fhҳ N&t针qj )C_~ Qf+XP۴i:u:t :iѢE~GGYtqHN9 'nm6cqg; RLL %$$3}oߞFEǏgΜ;. h2zI1믿NK.%^# lEzVA̳Q @@IP7_+E%8<ɓKY,}קC㥥_t,b0ȀR j*rssܹs!E0e߿_r!f+7{=sz5>G Q/[XP`da`!$ׯ,kFJV޹sGB .Efnݺ%Kr+Ui7|SD'L \sdLa@*liB=|B<*3[J)E S*2 /jGsn_8(믿 +BNˌ aԮ];>|T,8ZǑ>L,9ݔqs,߻+tMo>1 Hdqɩl=G + B{BPJ>8\Y S@!4zR_1'RĿ⿋H/b1Zjoo-6\I299*TY+3psd \MWv]ygDt@ȣnRJ'G Jg/91c4:q_('Wbw}%pƍhԨQy;"r㊉)USH@lHj @AȥA$B(0s6,YDɝ='X[b=0aE_xW-0a\fx$NdV0wj}֬YwbbbtRا8*f$IrAXB(zjьXxeY~_}"Ǎ_lH(-? PpR4$q6w^p]BH)%ItW'ϱD3 (]%|$@$ BX%  !iB$= B PKP-1'PbKJH@  B5H!@AgB" ^FۨxoL$@$  BW(1 KP_llA@s2*<& p+H%@A/oF 7ߠH`1q$@ Ԟ1[  (c~ %XL 8#@AN$@ ԟ9[$={6$ѣL$@$  BW(1 KP_llA >>hѢbF9Ї> !@A (̙<5 !!!B$@. t 3 (uHN8Q^("vHHubN ;w.1b D%LD$@ 7#{8y$ѼyscP (5GHH@5 BXH`č7ވ D%LD$@ 7#{pXcccSQ.(uFHH@ .(.$.QIW,\gϞ 7܀H_6I³E@A?& - zv~b+*EIAHD F+3HHH@QQ5k@_ې@aq)2QR]Q[ CH`]EAhÇC"<"r|mٌ@ @T?)IDjj*z4lО|rw3 ND򞐿$@A@$@#`{A(B0)c>ޘ y7Ha RN:BZ-5klMDd!%ȣ/R!@AU@$@#`[A(} W9uy6!IXd RRR0dDGG[ "9F(U-Qf& ]RO+fDDISrوQPPM"((оqp M,@aqwݩY9U Bg+?'  NƧ+w@Jr<(.*Xq*Dv%l2>}F&M:Lˆn`MK BWW ~l#OBzbⴉ#/,(v=aK [t]bU5S$+b088L]c_HZH,T !v1!>' )Ͱ L6Pr_|[Xz#Ѣ߸-D+;P+wuŊ2W_}XLkEQHA` З-DMřHn=Z <;qOyfLjaƸk$3݆ϱjbex(8 X%խ6T j/ ?Saxl1ǘy1vLfHBv9b:l]mC|)))Att4´h:x\ 3&3SfQh <.m'p q[Ykzٱ@Su/ 6K 9x$Ԇc3o臊2s`]vDAbF  ,'^{Y%DEp ; w l.MG' ‚P\RջNbk "﹦+s!6RGR0y1wb+s='q? :"_VosQa=_br߬ۏ}C7S鸨m l?}6>1KiC,N6Q2 kuo޼^:-z<<6)T+π7eu,:`^a6me?s 8$( ^43Щg#EV5Zf!rp$,ܨ~qC6v4.ۃ܂"|k ZDa"swn(=w_؋(BlXYbU@Rz.nKG3pǠ.^~^LS Njwa;C¯V-:*"b#P gn»uw\*fإtΔ,4l;Mn شv3;TIԩ[vTP]GVFFNryBqe뵛j 4! ,%+xTPͰCj7xf%&/بBl{fRx)BbMJ5|z;G*¬SH<}бcGC3cޟꚞ ,o5L=U;aoJ°o{ "(>?  XJZUkxi&D F^Abq藣h9VW5˟뮈?Yʎ=M&y%(-- ?#woFyenz}Q = ~| >䭮ycԵUSK8E:k YͼM0?eQdUAkB)U[:PKH#`)Af0^~Luoʥw_׫|Uw*Z U#"řLH.-*N]$DZV>"R'#"Z92*iG0 rиA(z<4S ahr'PR,c`ŭ*>|:F*u:w,;&݊«O~* B‰0[ =ɖډqέ*mۆ{/FΝY^+3c^*>zXnC7F B'͓ @5l+>lQa@8,zaAyZ)^ɱBO:"¯yE.}FI?|rclޏS~бވX 'gaX6{v<1?8ڲ~9k ~xqvoXXib Prw^Qc}N)e*Ze@#knH8[ɛp*Ґ(ԯoqSشfzπ~* BOfeIH "DܹsvgxsJϽ.nN|?*@ {MLƿـR_u8S3GK  Ę7.o?C9T_,7ek7K\=k'Õݛ㽿 @bJ]nu|qF<:'mp?X50yPezf<+VlUA믿bϞ=袋еkW]XԈǦ?)ܯ]A$&m&eh!lr,$@v!`{A껟a̵}h̞>;nĔo}x#w?CpM懸Aqx^|6Gܶ?r7^g0Ɓp ˱S?sƺhX,{-!+qd)kW~uѢ*+;UfŻ*L֭[)Yj) <^`ߧXwjUAxYddd 22Zc~=!M{wnGzR'9 =ZS Ԃ*$ ^$'%^}_l?|[[XZZWEq b8c:!5%7艅wcx'kKȨF8s9rԕ{}b8,Ow&/퍡{2zϵݔ{z9qtުpΝصk.B发IOAcޟ餙)’sޯ9-̙fc! K B5A/ />P>gzޙ: E}ejlݰO7yh-:vn)R~0n IDAT>x7 2Eɐ4{xs| ?8nK _y LOOiРȤ iϴ<%琔Y"7PFxPHHGRPK OWG_C60^}aBb!\|1͚_/TȽu@aA>JJJٰۃncn"Br,.ũa>- ܽ{7v؁nݺgϞrf=<6¤UR;G+QdwV+ T%`)A(_ \EK{IobM ܍[QZzķ>X\kxIXؽX(V.]i "$4>./B$E!п,6.M3 tX#42YWIg@`|Z$*0Ek:ydPlH%`)A(_Ө+kw&[ncJ/?y߭g_iV"%9 O{/,<w6. dgᕧ'Es]~`TEDV,߽WoWv[e3m^ЪѥKY3cӮ BG." m-ʻ IHXJ< haa1TRQQ! #%b4C*Xޭ9)$Cւ괪 iTbJ,B##{vmπBY ^N-BhxH@rPM,B;L [{*ۇ_~\p.C'π=nܕg@+kǝרz\-VK$@ Ts.*Xs >UuͰG hT833iiijחuZh!h~)Ͳ Lr{ˎn59 V''78l9?]Ͱ ߿[lAǎѧOCǏ"qπ/X E (cЎ1 XZy3rB] V'~k]g* \lK ¬,^zhذ{7;wy]ZA(Ee(~ 8'`IAۨsȡ*HAh9VƍѮ]; #wq BHt"`IA(l\ NOX]fgg̙3GF4$j i̔[Ǧd-`禡(: XVbu _G h86<2F []9rׯG6mЯ_?񣈡ȽҸ;X 㢂;+a%e$@$& BoBzՒĖqՎP-1sfXQ9,aaaֻJ0ۍk 0)в=hDAhHH҂P^rtTVH޲ :bٮZlr1[ZBl( vZ>r<>5%бqW>owڢ tː ,-A"͜ąX}7a3z}r3B=h۳ y' =9DUf_ / 4( cZ$ B-j_֛aG5k ..πֳMz=0ljh +r 4*dH|-LbZN Me)t8Ufؚ^a"99Ѫ3&epX tĠ,B B= 6LBFABJh֚6(rJ⪫v3 i2 gdi9ņsE1HAhcHHZ2R=o6m6O#'z^GiT! {syyy8}4BBBиqcoV.3<;0rd;Z]#0Ң3 " B$B-P7ÓĜ,{ {cX|96mAyJ0p>^ πHr.fDAhb_I|mLވ)΢3z3<3hw{T aOI&TqYpx#M.Ŭ(4[+  tL/y޹7"G~"B vlP=9.l25\/0ش 5 t\ a(ȣz@L[P_lH\!Bf(rG`TX+lucf |h@ßTP=ɂȱѠ بgǦݛ} ŠwDٟVCe =G߸RƱg$@$PU!(+,UT :u_!~}*'7=^Έs JdDGGcȐ!+b =^.Ub7VF,+Sr@4 :e!'I)5*IHC>'ğ)V^μ̉' Sf{ٛm3LAipMO2<6ښπDI^QهCWΐ?f{W7K/EA}H<%PџB/ug@ˎCɘ!YQ=3Y(*.EXpZ5?BMKKâECz^5`8=I wVjV2i9E^yW{BL j   Ic (iA C3nk:2e&/٥88ZХ C4J(=G\XXSN!00͚5Bj03`RoǑdmebPFKA%]M$@ {{d\,e04f#ǒ30?pڲ!>y*ɻ}vX`4haÆy^5 tm2pdTK!X# 2}tf\3d $@$mO+Z W˾IDrwDdpMk;EwGmQ::ͯe=É8u6E%.mF ^릅  MaIDA3q \?z&Os`m*wFbPlS':}$lŨ+:8I BO蕕¼yP^= > =l@mpl'g7A=[!4H۰6.=pC,JEaF'ooCA HK6P^Tnj.βĻ|Z'm)]q3s*..ɓ'͛{Re $ħW:>{̤ |U鑤Bdi%DE# BI V:y˝-m3#=V999nɵAkl@M~GR졫qA(PUF3M܁\6l^=нEOB B#V $ X^U V.Bjla:qWB-Z6hrI OsJ1}pMN!]P..ʕ#+`H:{r]uƠn;}?ICdv <*`1+ D҂P?kAWI`\TcRm3\[`zsX]aΜ9 Q\} 4ggеiH(DJvaVA롥jՋ/̩J6l(jv,4oWgkpMēî ,K £gu&J" zZRlc Ld1$4 EmbУmtlTWRR'NB#םVP4FND nnaXv^3x`(~jӺ̞3 " B"ЊɛNg(1g~O w 1-z{iThMF?kCͰ}XwAA[a̘1z5[m;f{TƵglNcA/]ý@!dcӁfkpMēî ,K B+meә*6f 'O[v^!^ D֖$ۜgSbUM@YRǣnݺBj03M7| LH?R_oƐ^#6|U+Mm t?' p% ]'ͶR|x<Էo?:%Z2SZ ;q-+f{LشAq#ЌOAx-yH8ʺ[S:Tg}"  ܛc G2jG8 VlaZ1`Ǡ.sPJSA Y}s_-[jӕpE2.gy?Sӱ޷Z ]aZNUШPG~]0#A3GNk_cTSP.f& paZNEخIm m BټJJM#:2O_eڨ(O1crdnBj03 #Mv%C#VFPT|'p1X1e?g B5HHUV,d8m3\ Q'N`'mŭ0W+1ϊIMZ v7URKr KV<Ѓ h)Zʫ;6meA(2 Mm,{Eծl,>˶ ^43Щg#Er4hwB6QamTkbmrMD+Iae`bGBўXJf?3y BWfyӧ+Ə<9 h%ՊAr{v+ B+Y+.;UC~5>Eud5odTo ƕݮIҧj B}8 PCRʡ&LJżڛm3\U>{+oSwO]V2#w;V6ήԙ tش8Rct6UM[MJy =L=U;aoJ°o{MkDAhdIHRPȑ!_L!J@{YĠկCAGA'H]BC(w Jf{j">Xڻcӣ.p^L|GVBB ,oFm}`DӚIA)^VN$@n <jBY ͶvX*dQ`o,˳Q#-f{/_,ݥxlڙ*86mA+VϓpD #QAm @,%7\oyn˅{U:of?an_B _Sf ;2j8>m|b+|dgaŗ]z%%xIZCsX cMVٽ%g\y⼢q񢹶T-Eox9C(fZC&m׬ VL|'?W7z(zX/ @U>'܎=;a_UXVMP?ϐk~i3tbn [k|F/uo6`7ҕАNx({g͚B;URfشa7⮠|ܘ0~ڴර'Bz7E&^xBU-He%ڟYvg1,L8(,k.VJ l4g̑Q/BYϋ' DbγЫ];X2qqqt*SaAͺ5 \jǐt!9УE5r B#M e1-0TTFo= _.kA(wBآE 6F?u ЬY3yZ)FghAoBb4qC')=$@$K B`u+aMqݙYQ&׹޺ KAl2$''kALqǕ;?)YP4 5`qT:|'EO{qQA 3AW&I%zPf$@$e|F-Vrn&?)]x=՘X#Œysx aӦMlG{?\k@.xv%<6I֠.1u'xش2 -xLIA' B0G$`EjGG/_k+hE>&˗#))  B&M ˞cӆ̀wIV ȱMoE˘z*p!S-H>vl B0 ,-URFEI^rWP2]$&&"??_!!Z62?شEǦZ(!~)sK")I$S,/e|PbIUYyZu-\rlR˘!شf>}l: g %fLpo"w B3BH BQ 0 9.g zpX tĠ9+ sf(z IDATp>mAXz5N<+͛77C 8! b0!ql}Q R% s X^F$:1:4ӧƍ#4crT$Π/|1B苳1 xef_֭[ǏhٲMNДHyGF5]ЬH$`KA`^{ a7ٜQ҇|] 8diז \=IHLւP&O)ZL* A+/y[֯_G_~hݺe=$`-aH@]%?R( H"7"0;D8꯼%DNa!FxxmD #eJT[ wE B=( m 8kťüR\ҿCɋ]qm9/ظq#>}m۶^ՐT|WHϪ3 :uw tΈ9HH@o>+HnΜ9,4jճ8  PjU (=$6oތK/E}M$ XJ$@: 4!n3(gHH@- BĘH@!/?%\N: 8%@A3 (uGIRSS ~GA$) BMr p[XH`ػw/zΝ; 8%@A3 (uGIҐ(DDDcP (5IH-nac! ;wbѣuF $@$SD@$@ 9${8{, cP (5IH-nac! ~ v…^ݻ 8%@A3 (uGIBH tF?'  PϜ--ٳ;v@׮]ѳgO[ Ж|Y; Cj,C$+xOL$@$3B9 OPllA@bJ,BA(HH Bgs П" ؂ Xb:9Ѕ. "@A 3 8_ЩS'\r%C$@N P:E $@$; BݑAL~hذ=Q hJPSH"@A6"8x 6oތK/% pJ)"f   Pꎜ =dee̙3W5jdAq$@ /+' PH>7m۶۷/ SN1 NPwlA ;;)))Gtt=Q hJPSH"@A6"8z(֯_֭[_~B$@N P:E $@$; BݑA0,, 111GA$) BMr p[XHXnZl 8%@A3 (uGIrrr Z1 APlH TNjIGɓXz57o+\HH) BHt'@A;r6H ӧO#447Ǡ8  M Pj [(B$@Nʕ+ѬY3\uUB$@N P:E $@$; BݑA<$%%!$$M4Ǡ8  M Pj [(B$@r\~R<@H'dTgH@]խc )Au w Pz'k#!DiӦ>3n7"#yE%+,E^QiCH`]"z\p$@"@AhboI4$IJe˔\siŎJJ!-i9EN6"BO\ $@$`>,Aa! R0U L*BJv俽I E2\$@$`>,A 55/FÆ quY$ T$JZFAr IJЬ3~ @BO"66eH29YbZqnR?M"e)m; PZxu0@ZZ~GDEE7+l\& aP+BFn쓎g(X xѡ^N˫ي@aa!h!լw0;>)P+h$v1!>' )Ͱ L'l q͑8P\!"}  ,X6li d#>c</B BOW ˓ (%'%xc)/Jh EEE8y$мyskt)KPCx™vh &@~Q.bGףu SB Bz9X  A(BM6" IYdggc޼yýV{rq Ӂ@֍0ָwa$ *--oGVbtHu݅-+w kKgsyS 4崰S$@>NւPϯaH?Q>4b8qhѢ#CNr!|1UPY~=_z95 ,#>.Qj7oWgwzg{$@$m MH/hs|sQ(␉N ''sEXXFapE $6 s$P7ٜȘD n !!"|Qx&3LOuZ3P}? _$`KA1eT;X0] >>~~~3lN{O>v@ͰϏӘtc :QOOuUdcӁf&Ms BO;H$L+KJq>}Р0m Czބ6M: (Pr) Z t;F;TxC5|̙35jXȏ k {N ݮD~fLjaƸk$3݆ϱjbex(8 X)a^a.m ]>r+A +^~ M"lXHyJyӪoݡҪ[ZZǏB#ۧM50%I: #5"+39~g]!Qz2,K$@$PK BǠ^8C.!KŇ ʼnd^S@:ˆ`45b:YMvoQN|͆uM߫] |t0O+N~>Ǧ!/LfHBv^asNî+Y+2q:c{W&0˶OŲ9EUsT3 "[B8ѻ j87AIf&0.s3zl}k̮lꋚvΝ;cǎ)G[jW__>6XqUPTR2O³ݓVxu\T}W⎫;cO{p.Xn}{+EMbbZK%D5{.(Ů###'aon[C>SZ~ 9 0 Ѻ7EH*s$*\7 zb} ن43fPmfX7Xjo~='G.K\'Lt}+Ch@,{e8?`0d5 ,oўѕ'S`i0i 8:vS5' B O$`Gw](np?B5/8 jcP!VÛy Q=zTl֭ ڋXAxt.|hfyCpxH>nf'GB bB%{\t-;޾H 4O9=Q|9`ݞ/܀ [G+er KȰ VJo>XQc}Rzt\6KU>]K!O' (ui[V zUhLSi! r߿^*Mס?DZ?ahA(`txMbޡryheAWeVqƩ 01-wM"C7nmNc=-(͇qqƸEn?/;w߮sŊ]"6UNѽ/.nqQX $▿tT9z:w ׊U4-`~x CJ?$(j>2"Ro{{)l9Š-1q&;WoMUT\FY׭\zExʎ_\kG> q GAhbGI|)cѤکHH?缀mߕlo]b)UB 1Pi,}B(m&xc\gF]mb⌍x+ĶnV,>-~"ޙ+&ؤTǧqd":5D#)Ǖ} zwh%K;5ų[/=tä;1a~AE{s(iusExYCӼ0n={>XUOIce,..€qX~=-_SHH$`JAbLjRZN&$adۧ/ʭ6op2x-!=!Pi,gΜ ?q뭷*'Hzxi&D F^Abq藣hWMrf+OVa2o+uzF^aQcBjvH]/Ǜ퇦QaʿP۲? 7\Ni?l>?^}WVW߼M1xlZŪ)Ix"^fަC2eJ~C Nz}"]4k? pp<39n3>N oN|Rk[ćo%;6)D)BhXHBܣgW"u$/ 'Àn- xqظ|Eyqm8\Crl4l*_9S?7ߋ{/>~/^g0Ɓp *N8!wκoSZna  `IA;!%܈D^ 1PyYYΚ5 EEE;v,*>ks>aFyGD.}FI?|rclޏS~ EoDu,Ǔ0OE=;mKOPtmo} ?8 P,r,TU{1~~(Y;}'Ҕ2-2} X$MU˹Lyb!,,1wyuo\N92yEԔd8'`{o?G 7' Qp*+neVa;% B;&B$`%8g!㢂ϳ!*USRf+ BC('ZlGFJW8XxfxH ƼO}/hVsC1RTK⍯.8zYMװGOgډpexoADgg[_FCx1O5'w ' q zbٺq}Ń?4'S1폥?݋Ǟ s "$4>T[w!vK<2jxH@rPWdWұc֒ 5 BO]{ZޕqԖGVUky *c #G"$Ęiz|ƬO>^vF^\^\CMTxM_Cb|G_`ڼޫ7V.Y'UfXx8l\Э^yz~Z4W˯ޟ3Z㪗iogzf[$@$[ B>ym ܰf9HKMA {]{(*?}y#D*džt9JLƿـS!yJ62[Yǣqqq#Ulfm_Okaaa1ԑ Q8A7?,QpHU{^]*OAhyK$@5 A껟a̵}#@m;\ӧ`֍b>expMl֏عm3n]]]}vǫ-Cܹs#F ,֛r]= _ B2H$ 9) o+w KKK]o\Zw ƾ; Or\"g}b~Ut\Yz=qѢE {11Pe=FSGF?HAhb$@$P=37_|w_{_wBqQaTn|梵ع;&n.]]q챯4Cjj;TGVV2Q]jd* tt"w-vUH,'9»')eGw+\p7v_(V.]i8 B]%Gw6f aÆ!""nqkzh-wQǂk+"  .v%9d 8L)S'qJ.Ss7~eW ';KWՍOfABMTL̛Ǔ ѣ2p}Ux5JnO׿;sPvMvYH,' K7$.882&wҞ^.ǫUP7b#G?"-- _= o6}6+AhB9!(< U XN I+**sU*HA 3[Y&$$h2t IDAT 43!=nۂP:$¤Bdy?+~{3 X%aBz%_jHmVA B4͛ʂpɒ%8s F7 tt*(,9q?@>ulR~/\6j(lHj&`IA(A5bNVP9SN͚5CPP!] 4Ǎj)sb-KX ͞f*{ BFH XRDبV!PYzC~ OѸqc=< BC{ܨ^_> &-X ͘|`Ef\  L)B|ZmOUS׆XKRJ(M"88X˦j7wX rhkbPSe$@$' BS9#Rۢs*HAhX VXW_}" HFPM#ֿAS< IH@#rPrFh u Fl=$I& 1C"gU\cFxrgP>bgc&0;K Boj':" Ub͑ Z zbJ #׿=o _c{_,v$JB9mL:q&ƽ@YV9f}$5{Rf!ĭIG(2lw[av)))Ftt4a)UAhRpǑd< p(>пR!@AU@$@#`[A@-2 !kE/wq ` Vw'qF>|}E۶mݯȃf[M>h)RcrzP|0 IP+a(V@b4J:f [m7ÆS~gΜAVV5jzyZ[Ͷ]YK3plJ $6@T`Ư-ehhRf =XӼhL3? Fg8 xS{GDD`Dr'LB̶!*>;­[bݻ7:vl6AvI|l3ϣ0gK<9"iO8[gK\~Y9 #s"9.p'.BRyiՅ_:; 7XZiCnyj7ޤL4lf.e'>qo屻W׿^bB|NR:[9 O'a?Ū܉{]U!$!@("i@Ab{];H)D(^MHSY;go&{fOu/Y_ehb_߸q#ك.].E48k#l8\?_<-FVZ v\#3;B BO߹lG$@#`Ahtfy v^ B_f3J:q3 B&¿ ;wDǎѺukC`,럂dzKvnA9.g P(0Hl,-%yLBZD֯e ,;7.ҌQA5! d ᧈ$! BڭѵōдN9ٿA)F2:FqiNv?CQA(w֨QQQQjtۆֿ; Tc"*L#@$"ܔe #9~`Q B`^v+V6JAhLI@TCCI\FL)%t{:G߅]TOA  Pi}ǝ= zҳ0wL[dWX4* B93bb5vJ jHOOGJJ #NP׼9"ēKw^aw:V8%"2gTlsgJ4-k̮^EghtfODvZPf$@$ »KcZaozSNdH^LYlvPW\7 Uب@;.]1`h3T-߿ׯGqWjuq_F^f? 7ƹSI.#9BgT0AC4cVuwp^|1:va5J|^}'f.Y\_ЪOP  ^>xi[<(peb{/9j4D{Bz![~!=//s,q1:CEuJ`PPH*!`:AUER*g_VqqqXf 7n=z4F.'Og]tQϤ2T"EN+(}0oVȖ̜l ]sg*Yj9?bx!BQog!ܺ,^zbA(g =x31{s'O$Zҧ+-Xlu|/s51,]Sc Qĸq[)fHH"L'SsUhQ^hĎKx~ Ԯ}~h_kOOA9a%Q5(++Mtⶁ=қfdkf/@E%<9V1껟+0=i""wO+E\ٹ?;0”+XRx/$%9аp%rYOi;s;ae끯 xOtABYB=;.yѣXj6l={f{qr+2qfjPS*e} WçrX^$崙Q"]~Ms* o|X,OUڕK_)I6Чo|5|⬠wDL_LE8$S![=Z%BXzuE#m̱ t7kHH*&`:AhOT>%s!S{:Wu@xsGF;C%l[mtH#`*A(ۉ$**,?"ll0~,ޛ#y^^ƽ8o\?lg ݊E Y.-u!+{ %M3B fKv/+Z(b(e[fe%??EE jyftq|*;7zjA"ZaQ(!S B_|J+fM =8~\NyӈPIE^Vi[nw&<#vzҩKwN$SdScFZjJq;_. v/(6Pyݕb,kĉXlbbbЧO:vB]{ݩ;9Zh^(À\3Զ>DŘ]IA*)# ^n^*&O=P b@A~hR+Ѐ#~H3 PaYKN7Xxnq'~r瓜!)/dwsNSHX %힎ᒒG\$E~[VU4wb=y)\Z/zl{T';s؈l$̒%K2iRPqeT>&Oyg +<5D (b ' 0 إ;wOKX}% \jJ2|0Xe$1.ݮCO<_B#YHK_Z܏$>ْUI߅>\ȶQ{MQ(}ެ2z?:,XyP omg (-Y~hrT)gwomDz *L=^:ݷW~_\}cn7⩇FUxiqB|rVof!{ר(:3 NJQ6\nm0{F4m~fLl˃vS^};-ꣷ~9]`ďP:B\_Ҡ{s_ul.8O\P |u.| x  //ǏW׫WOpNZV~b!_Z(:HL'#1=#`FfʶyKO*WX8iI_}jbOFb==III… 2Vc >93BLW2(vZ!p{K]ޭ -+D @dGF(}fIH "w՛$DN͔4j\8igEQzJWvV?ׯ -ǴU{pMxfDYw.U43'w&GSʐ ^(>HL'rY_ز EgϢYV߰Cs, LjumhAf͚uy$)W;.oZ[qS}(IX#=jW~k/ԒSr_tݦԗ--)I¬^(>HL)SsUhF.ٕ! Ba~~>Pzu4h@7Nkvq]'нU=y_k\Fn֔hD ==bfw^U5PzTؘH|B0,&fF] :g5*G}3g`޼y7襅3Yyqb\"C4H8wQ22PIGv87茀(+  L)eee5ĺtg5x] VfffbΜ9 СCGD &ȁ G˖HHL!2OBRHV9y(P#PkhqJ "8z(аaC-d$5@QvR$3ߎB;:}&0:S ³{2FʄDԿog\}h}ʗfggcưa|mF|h+$ u ОJ*iɼr IDAT@|jpѿZF7*<ZlC$@0 Wp |", ',$`r13#FX-aC,LvDĬSDAh֙ILPd YMdgΞ=#GjժhԨQw 5XM=KI\$@$`<$dF)0P?"2ȵӧOW9r:eЉU39Д_ժTQDON ݒ @,#>a<DVGp.>4GP7nli_  R`Ӂ8܂F7Eh` n{ZvPg!  9Nң5UJBOp$2~;MYH, |/cNWuqc: ::^r*hH~,)eedV=T`Ν;80BhpH*dƔcΟӐU)\բ'(Ǵ~E2s $@%`YA3u:q,r}sSNUwigHViYlD JVeު/W,%o㧧w6  !`yA(N:҉˝jE No 8O&M THpŒ'|p@)zn-a w~xW&CILw}k׮œO>7x۷Ǹqлwo{h۶bOv}݇z k{!Cp %1qD4m6mºu;IIIҥ >3tر=} UVkP---ȧIȑ#QgYlN (F>@RQshi.С45jD`Xe(p8 BQFAΘȒ(W\l"[oUݻZ?֭[cĈ8u.\Ν;㡇رc; ;m1114i Dw؁+V(HO?yJ?\rEP 7܀)S ""_~9~We "\׬Y+Wb߾}eO|v,$`VE@:pD5jt ŮI HPt9QKi'pZp+F⩈B9'm>"친!A(BPM6M#SI:p]6gφD6nܨU8y$Zh?PlΝ;WB<!2۴ix\}Ր$8W믿)o|[ϴ= B홳G3g"77Çg9K,! A |-Wbx'jB,:O??v1 @jj*B)yyyJNlͬUV ^͚5(E_n/;Ν;D"R7tq'GٞCH:'[u S6UGby T{Ӟ(ϾI9eR '] _ lۨ3^]ԏ:/ˉ0iXcrsTl4A(JOę9G3V:?~M9S(Eֵ^ő=gA(mK,[Li#HT"qrtR vUTZxiF](U]43 B'ݓ ̚5KI ܃O$CV1 樯,|D ?]D U&3~sy%ʒ%Kp*[Be \K!gdۨ |(Pp9gA(QCb*BW^h3rѣԩ{r1--3L",Dnݪ={uEHXh"E:"j'퐀(&Hb܅gbKw$WTZ5VϏnt6Ʋlۈ澄wZF2\r9TJmLGݕ:TFvMGNzn#Fe+yгgOS*Q?JTT"r&Q:L0aTdK-k|COL̳DZ̙3Lm T!_xgrSP)8[Ѐj *(eEAx= ^#nr۔U1?yw\Wv|X )l<F~x5|%'H-U'%LpppmތWLi&@A6Q#6GW>Eu>+b3 tΞCjV!R *?W (G*6T'9L{#;/8wގox5j-K\!Io68 4Зl둌r7t)- $g@WUA8d%!QxޯQJUԙDl8_.}S;%2x]} o4{7lؠ? 4ib)0 1% BSNM!p1bzpg^8 R]֏9sbP<߯/<-@ʅ_;:@x0/GWkhJЮ3OI@%4h""`V1Bu# n  e_ xjs,2[ԣ%ԏEmqD.:7gمCH|JЧxiO@Rr߾֟ikx(aNtP-haZLX> i/}}[cpۋ57V&y ڭXu(V2%0 B3H&,DGG#44Tj!wjOcWbN86}aïk"Lq2t9 5CPXt++E<6HJƥ#P24G?U|SZ$[X~=ߏ+͛7w-#+]DwMkㅩ񖑓MݦD |G<1"/8̞FND".kk-!uk)t.o=mCDh ,|*`<2=f_'WeO߭=.Ow~U!iyo8| Gw1S|! $cGW~S 3 a5JL{]((W޾(%$@$@ t IԩS@Z9%-ɴl4D"'P܉8[0b"B HKxh`;E= LY[ai4rpyᘐYlxOwD:)BϽłpҒx_k]c>Yݟޭ-ޜ?bxח)QM)-u'w)o?!jqw`eIa ݹ oB%m{w?G  ܿT*Z^O_εfׄiH'@A>3 p"aݻ]vE-4g ,TFĠ$s9u(%\D teTW*[Fl.ԏ ,ԩNS#Bড় [Jdqй ׮qWEŦCZ{>BwA2xP2ٶ!d:5şnqΧܙ/>5^TXF n-wwQ0ʖ%ofGjAyYKlN$@> @A4Iv"tDEEF[ࠥ n \J+7^ٳEx .E ^{+LZg"I;$@$@ _'@jj*Μ9HkNK ASkⳂޱe_~CjUW*>^;{]u‘On+l?Z 30c;2$E,r8LE2DgݓYF3pT"(" 6:^ѭ!l^=ƠnmhXaSHH*!@A%B$ ;wDǎѺuklyh<'ZmU,V9ǨU23!139><ÒitD0{FԌ<3@+#~,v؂WEv1c$eh@`jc Cldjb[F2 \ @A@$F]mWޭ'+#5Ҩ˷_WO>j٭.E02Zx4ڧAǖ7>w¹s0oG7$ a㦪/W' T@˃H+[nŶmо}{k+[46KЦ B͑aEёBUtӘU3  B7 (5.Hoߎ-[m۶СRj\]ҡ&w~PB{$@$ Bu8 ؖD%JmW*-1(ЊB+*}"0; B O: iT2JQ #ZW?wգV.Z# PEvHB4*w]Z B۟'Bfu#hH @AE سg6n܈. ]tќC~`Of*7P[lD[CZPZuf Py8v0tF:!׼9Bnٗz<5CA2F 2dݜё ؓ=^jۇ?^z) jURƪ $Zc9hA{ꅂ3LHHЌ18"aaaU# *B:W\Se6"[D[ _U_w} Bݧ  PrQ xE?ЬY3\uU^1'oKA(IP"14z5S}5r|$@v$@AhY$" :u |-s "t$jhbs) 9&  $%CGӦMѭ[7/ߜBV#Z(CANAhq \ $@^Drr2BCC-OSzBM6Z BX`BZR u`G1HAHHL\$@^Ú5kиqc+[4 mel#ZI #g% #vuL$`tF! N ++ III Aڵ5-UPOAp@mA~Ƞ/lqPVF (gIR=UVaÆٳQj\ G6{_jUeߨ3S B_0$ PzǏIqIN:0 < Y9 NDb"53(/aPȐ06bFH(? xIXbׯ^zyiz m̵AZ ~)S,IDATh&)EnG EVSD|R!W #"Sabb"a*uE Qɚf,/<$?Fs U(]} BW( hKP[,GĉX|": 7GxoM F"%BSP2J JPGaX t$@$ B 2(d,uY_-%P!ԬJ`e%hd+\ $@$`<ƛLE@XdrD~4"z+w}( c?[EAD enE\$@$`<ƛLE 77X&00|! ˗B6O4CAIH@ ZPf$`aNŋQV-_sO(8㧳PXPIrIL8"5aGAxl MLC~o,o2GpMڈPqV8&  $%<$$$ @zBb&AcOJSa;.%9L} """p 7xe˓f;S0xExkh0zV SܾdSƮ( tuu hGP;,I -- ,@͚51h }4 2O^;5$.Aznl& )+[|H'@A=sH"V^ 67 BE&Grfᢂe͖U(g P(0Hl(6c/ @FFΝ0tM,[FK.OP#sêJIcRf|P<(0  $@AA$=z~~~ʖ'"TƷhIc]ժVA:AnSSէ 4tq$@6!@Ah$+YYY3gBBB0tP_uS]BqN\F5#4&!M;^v:JA=  ilM*XJ׃/Y8y: u"Bбimt$1X D%iBB)j / 3 4q$`999={60l0Gdpfw"5"7G] ^ L:Qh"bPDU Ug~ gc'(**‘#GPjU4nXMfoA]E%&2w(4 4U޾ Z[,!IH@}3E<̜9>|I*?jbJ5 L(@bz墂'n  8Ҥ'#% B[N;&={qqq?RWzB]1]1ȭk߯ ZńxL ߆S ؐ '.$ӕ2#GTӴK!3Ӱ?4‚yLDI`DAs3,:j6A(KTnŪF)/ Pa8F00sÐ&Mh>RBuR?m:7nB9I3y.H%4 SrMwsT]ld"CwQ=Z+vHH@=걤%%2+[FovAN^_/cj*u+KA@3*9U%jkEBFyY!Z# Pjǚ=% 8"\ӦM5ȂPz߂4RCI.#If!+* ^y!LH`ŪOPauq $@$P!W Nظ; BO뿫mmXAgT0><(L_; zc3 ! B¥i C)2B8_JTpѦÆYE`?YO!N /)V";3[Ͻ,w(ޚ' 5%ws}-tD L|q2Ͼ.q".˳U^-Uw1[ BK$`veH>&JQyӲML#.:ӨgRi*sbAx>/}SFjՐ!=/qX._4z_ w9Go+?~4m:tAvV""kaůA셻!rZ*h[zwRiHT!@A F!{PTTnMyֲMjZ})3g=画3 rI]C0{FbŸw]dڂ^,W,Wf)sW(w_{Ƶ}/ٳbo*a(ƽ>[XL*R!  JPr xM@!sTvqr2^Oիf+𧻓uDf^UtЅs~Ĝ+e7i)łpܙe֋;=76G|AL?VJR>|Gp]>%jYQ v= (]%z$@1c1bkJJ$Oo5:1숮uZ!139^ D1{vbD+3]"^h02 ƒvYOLc{1k̟]L[нw?\yu)Ɫ@1qV8&  $8%LƍmTF|4QDCOe-|<};$4}5l/uz67X;%~E|;l#j?O=% ~R^֫^{K%iy}V6鼘2B|HH@-j13g"//ÇG@@$jꬅ:LjE #/7!a&@*^Vf*f.>٬vdۨ #VQC$`VE@:pD5jd2:7mBsF ɪFe(Ͷ9^ ; ,G19s ++ CEH!ȼ;WB ?4e^/    V%{Gaa!bcc…jҡ TvvQhg> g'矑!C 44TQj[<VZy(#}hHT%@A*N#{`ОB+G ]1gza[   BpU̙3j[VJtUL0U Ug~ gc'8z( РA/o5f 30c DLo<F) 6: >[>VDȐ0 B E$`gv}N*_pip PɪkfFV5]ZC҅@fN>;c#VL=/\8|$gfS+Cr=hBAh$@f$@AhYI`;|]"G2pոHW\޴p8>X#=j(3)/:m_--߯/&! ! -Z 0QQQ^sSxjuݦz]Z>])ы(͞諑## B;:}& $$$ //G@@]7w&+"YK@D`xvk$,NAF,v87̝Јc"; BO*Xd Я_?Ԯ-* 894JFY} \ @A@$5Ǐ#77C``h|A@`BZR }a-vn-V& Pju ,]'OD߾}QN:J,A@6RNu2rfPڱ0BhY$@F'@AhHN8 (M0% F ZʰSVF (gIrVX6ڻwoe( #irf|uпZF7*(8IDNM_IGºu"88G, /aPȐ,Sl8\ [ 4I-UVA.ٳr9= BWvPD`h`5E,% PrE PoN8"0DdggBM\ ³J hJg_ժTQD@;te1QBuHH@[fo$`IWF||<ZG:E$= B MPmG6$ WNdee)WNؐ]&p+XH%@A-oF$vZš{hܸ%}S$@ !- (&J{$`CIIIDڵjCtHPb Ж X⪫%\bI (gH $@$6 B ؐ@rr22220$@ t (HO߿W\q7onI (gH $@$6 B ؐSZjF6$@I\!@A %! m Pj˛% lڴ {A.]ТE KHH'@A=CZ   PMHRRRpDEE!<<܆2 +(]:$@$- Bmy7$͛7cԩZliI (gH $@$6 B ؐ#6tL  P0͓lٲ;v@ЦM;LI< @A46!  1`';HMMEZZ"##QfM;LI< @A46! pKáy   XjΝ;gU ˂P~ 77ؖH' TkZ$     Г7 HPGHHHHH$@A'}M$@$@$@$@$@: >&     = PI} (uϮIHHHHH@Ozg$@$@$@$@$@$# Bk     Г7 HPGHHHHH$@A'}M$@$@$@$@$@: >&     = PI} (uϮIHHHHH@Ozg$@$@$@$@$@$# Bk     Г7 HPGHHHHH$@A'}M$@$@$@$@$@: >&     = PI} AU^bIENDB`docker-1.10.3/docs/security/trust/images/trust_view.gliffy000066400000000000000000000766131267010174400237240ustar00rootroot00000000000000{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":866,"height":537,"nodeIndex":323,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":true,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":null,"printShrinkToFit":false,"printPortrait":false,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":10,"y":0},"max":{"x":865.6666666666666,"y":536.25}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":10.0,"y":122.25000000000006,"rotation":0.0,"id":79,"width":531.0,"height":409.99999999999994,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":312.25000000000006,"rotation":0.0,"id":40,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":1,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":41,"width":71.42857142857143,"height":50.0,"uid":null,"order":3,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":40}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":40}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":42,"width":26.0,"height":18.0,"uid":null,"order":5,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":40,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

1.0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":82.1785714285715,"y":17.03600000000003,"rotation":0.0,"id":0,"width":63.0,"height":82.0,"uid":"com.gliffy.shape.network.network_v4.business.female_user","order":6,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.female_user","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":1,"width":43.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Person

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":330.0,"y":142.25000000000006,"rotation":0.0,"id":2,"width":120.0,"height":80.0,"uid":"com.gliffy.shape.network.network_v4.business.user_group","order":9,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.user_group","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":3,"width":73.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Organization

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":141.0,"y":152.25000000000006,"rotation":0.0,"id":11,"width":63.0,"height":82.0,"uid":"com.gliffy.shape.network.network_v4.business.user","order":12,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.user","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":12,"width":48.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Account

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":305.99999999999994,"y":273.25000000000006,"rotation":0.0,"id":16,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":15,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":17,"width":110.00000000000001,"height":25.0,"uid":null,"order":17,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":18}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":18,"width":110.00000000000001,"height":25.0,"uid":null,"order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":19,"width":110.00000000000001,"height":55.0,"uid":null,"order":22,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":16},{"magnitude":-1,"id":18}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":18,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":262.25000000000006,"rotation":0.0,"id":37,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":35,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":38,"width":71.42857142857143,"height":50.0,"uid":null,"order":37,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":37}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":37}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":39,"width":38.0,"height":18.0,"uid":null,"order":39,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":37,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

latest

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":442.25000000000006,"rotation":0.0,"id":63,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":40,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":64,"width":71.42857142857143,"height":50.0,"uid":null,"order":42,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":63}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":63}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":65,"width":68.0,"height":18.0,"uid":null,"order":44,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":63,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

producttion

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":305.99999999999994,"y":403.25000000000006,"rotation":0.0,"id":58,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":45,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":59,"width":110.00000000000001,"height":25.0,"uid":null,"order":47,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":60}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":60,"width":110.00000000000001,"height":25.0,"uid":null,"order":50,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":61,"width":110.00000000000001,"height":55.0,"uid":null,"order":52,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":58},{"magnitude":-1,"id":60}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":60,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":416.0,"y":392.25000000000006,"rotation":0.0,"id":55,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_left","order":53,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":56,"width":71.42857142857143,"height":50.0,"uid":null,"order":55,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":55}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":55}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_left","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":10.714285714285722,"y":0.0,"rotation":0.0,"id":57,"width":28.0,"height":18.0,"uid":null,"order":57,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":55,"px":0.15,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

test

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":10.000000000000036,"y":132.25000000000006,"rotation":0.0,"id":82,"width":108.99999999999999,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":58,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Registry

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":36.142857142857125,"y":399.25000000000006,"rotation":0.0,"id":109,"width":187.85714285714286,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":81,"lockAspectRatio":false,"lockShape":false,"children":[{"x":7.142857142857139,"y":50.0,"rotation":0.0,"id":98,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_right","order":74,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":99,"width":71.42857142857143,"height":50.0,"uid":null,"order":77,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":98}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":98}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_right","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":-7.142857142857139,"y":0.0,"rotation":0.0,"id":100,"width":50.0,"height":18.0,"uid":null,"order":80,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":98,"px":-0.1,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

working

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":7.571428571428527,"y":0.0,"rotation":0.0,"id":95,"width":71.42857142857142,"height":50.0,"uid":"com.gliffy.shape.ui.ui_v3.icon_symbols.annotate_right","order":66,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"MinHeightConstraint","MinHeightConstraint":{"height":28}},{"type":"MinWidthConstraint","MinWidthConstraint":{"width":40}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":96,"width":71.42857142857143,"height":50.0,"uid":null,"order":69,"lockAspectRatio":true,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"magnitude":1,"id":95}],"minWidth":0.0,"growParent":false,"padding":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":95}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.icon_symbols.annotate_right","strokeWidth":1.0,"strokeColor":"#EA6624","fillColor":"#cfe2f3","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"hidden":false,"layerId":null},{"x":-7.142857142857139,"y":0.0,"rotation":0.0,"id":97,"width":38.0,"height":18.0,"uid":null,"order":72,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":95,"px":-0.1,"py":0.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

latest

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":77.85714285714286,"y":8.0,"rotation":0.0,"id":30,"width":110.00000000000001,"height":80.0,"uid":"com.gliffy.shape.sitemap.sitemap_v2.photo","order":24,"lockAspectRatio":false,"lockShape":false,"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":31,"width":110.00000000000001,"height":25.0,"uid":null,"order":27,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":32}],"minHeight":0.0,"growParent":true,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.rounded_top","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":32,"width":110.00000000000001,"height":25.0,"uid":null,"order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":6,"paddingRight":2,"paddingBottom":6,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":25.0,"rotation":0.0,"id":33,"width":110.00000000000001,"height":55.0,"uid":null,"order":34,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"magnitude":1,"id":30},{"magnitude":-1,"id":32}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":32,"px":0.0,"py":1.0,"xOffset":0.0,"yOffset":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.sitemap.sitemap_v2.photo","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"hidden":false,"layerId":null}],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":330.0,"y":0.0,"rotation":0.0,"id":180,"width":67.309,"height":101.072,"uid":"com.gliffy.shape.cisco.cisco_v1.buildings.generic_building","order":126,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.buildings.generic_building","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#0b5394","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":182,"width":56.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Company

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"dockVlz9GmcW"}],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":266.0,"y":125.25000000000006,"rotation":0.0,"id":250,"width":7.0,"height":413.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":172,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":79,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[3.5,-3.0],[9.5,406.99999999999994]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":472.40133544303796,"y":320.25000000000006,"rotation":0.0,"id":306,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":209,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":472.40133544303796,"y":271.25000000000006,"rotation":0.0,"id":307,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":210,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":472.40133544303796,"y":401.25000000000006,"rotation":0.0,"id":308,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":211,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":37.214285714285666,"y":406.25000000000006,"rotation":0.0,"id":309,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":212,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":40.214285714285666,"y":456.25000000000006,"rotation":0.0,"id":310,"width":20.0,"height":12.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":213,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"dockVlz9GmcW"},{"x":580.0,"y":418.25000000000006,"rotation":0.0,"id":314,"width":283.66666666666663,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":215,"lockAspectRatio":false,"lockShape":false,"children":[{"x":66.66666666666663,"y":4.0,"rotation":0.0,"id":312,"width":217.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":214,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Signed tag.

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":null},{"x":0.0,"y":0.0,"rotation":0.0,"id":304,"width":33.333333333333336,"height":20.0,"uid":"com.gliffy.shape.bpmn.bpmn_v1.activities.ad_hoc","order":208,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ad_hoc.bpmn_v1","strokeWidth":0.0,"strokeColor":"#38761d","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"dockVlz9GmcW"}],"layers":[{"guid":"dockVlz9GmcW","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":216}],"shapeStyles":{},"lineStyles":{"global":{"strokeWidth":1,"endArrow":17}},"textStyles":{"global":{"size":"16px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.cisco.cisco_v1.buildings","com.gliffy.libraries.sitemap.sitemap_v2","com.gliffy.libraries.sitemap.sitemap_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.table.table_v2.default","com.gliffy.libraries.ui.ui_v3.navigation","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.ui.ui_v3.icon_symbols","com.gliffy.libraries.ui.ui_v2.forms_components","com.gliffy.libraries.ui.ui_v2.content","com.gliffy.libraries.ui.ui_v2.miscellaneous","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.bpmn.bpmn_v1.events","com.gliffy.libraries.bpmn.bpmn_v1.activities","com.gliffy.libraries.bpmn.bpmn_v1.data_artifacts","com.gliffy.libraries.bpmn.bpmn_v1.gateways","com.gliffy.libraries.bpmn.bpmn_v1.connectors","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images"],"lastSerialized":1439069097667},"embeddedResources":{"index":0,"resources":[]}}docker-1.10.3/docs/security/trust/images/trust_view.png000066400000000000000000001642151267010174400232240ustar00rootroot00000000000000PNG  IHDRu,U IDATx^xT $%RB U& "AĆz `G>^\JQ"@H HU g&m&9g朙w=Odrkwkt% @jfL[ 8`jUGz46L x@:L$@$'Ad❦iݰW}D&n|HHH{(ǚ3 @jiyM5dm[$Z%q'HHHEm&B$`tC{(Uv'g-jS1$@$@$6:QB (=7o*`Ō&2L!p9E9 xEבsB @%AdR]Fa2V-'9Ӂ.P>HH Prm$@!uZ^{y.!加c2!  C P @~iYwKw]T<Խ`AGQg:~N@$@$@>!@Q윔H PxSе ݠ"t{g闁q$@$@D.vk%*WsG1Rd$@$@$5u^C͉H@VYf.Y=eP;D֪"tD4'3};8 xEבsB @ zB{AЁ5Ȕ;e֚bR Pn0 |d +ZV<,;E]x ؎E ؁UE]ϖueؾv@HIHH$@Q&(^F$@PjyN8"TL?rW͍å^X$@$@$@31g @{$xv)OBV8,[u7/k>2qf9v27Mw/+/+Om)Oʌ HprtߖrM٫e7yt//&  0E9 @0^9q%utORs~*W,`Ǘnr$@$@$`muޟB{(Csϩ=)rIRzz-\wMWGIX瑺JEJ+=zu4  ?$@QgMKH 't+ Z5QҥE T[{ 4,K:MIɒkow_zJytPEA!   P Mk+)wthDH\CD~XU{$1!=+Zr\l_-!.Yg+g(o{: QtM9yo ٝ)s&yXMmLntu!$@$@$Uu^ds_"G_;byiPzFhIRJ}ۖ^d3Rܹ<&Rg/hq6R?j:UU+l+9cAW5ۺ﨓 $@$@$5u^CDqo\i&֐y(ۚ.ۓem||{2-8= Zhn yg::tqU3o!  /dwrV"rDI϶%,4JI, gjV]>ݤQ; q|!v|(80UԹ4C$p٪W+XDHH|B'EIv޿Kcy_+xa%N)cxOv D^~7ȟH%9s/2~9H,#g_P(:HHHd(JƭTwm.q!6S,ͮ0!h$@%#1IJZi@I|*:}3-/!.ȩܼ`O}LV{:!.\tz]udҿCkͮNuܿ;J7.`d˻HHHJ(|ONuCQww7eI#&,s`[UN]hݬ?i$@Dݽɡc)J?* )NpO"!_KɂmG^+{=V yvaپər8#+L[+IHH(꼸5^% &p  4puy}vɁSN,򚏯*zQ"uuNN<%5cx!:y  @&@Qݿ/'- #;EN5gE|:gUT_ J'Jqü+YSbHHGK{ODʥ Jhf>L/=+?,t+O]^5ˋԭ+6Szu$@$@$`=u^}/{\}w&]Zv^KM@ߧHґg.,OQ}v<.ϸ]~էnc}[%%v9sJ:ӧ_>yg Y:7H;t9.S.HHOPyi#]/  (%Rtv;2p]Au27"ඨK)s RF wE]y\o)z8. xEɰpX^~zng4v,1a4RyAk @u)5GK"8xϜ`KOz  :djѵ嵡L3Ksϩ_QP'$gJ/ OԵS- HR4v=g\ E\q ijk-D!.cK?{  pEJsIS#ܯ4c{7Sӵ I{{kjC%Jn!5\S|ן= u:cK PԙeR j9:;ZZF\ ǹ: N- JCeV}NПN3_"o|Rf`9~84 ؟zS9%cW%KKKO1SbHHG:dŖ'K #Bd^&a{1DH`nEJ"It?(<%IHHz(Lړflwwi,keLIKd[Rp̐'&{s&Mԡu=Gd@IoUK"{d)V$8[*3_Sޟ\҉:ӟ{~@6Qr"t 7K뤨كEwIHHu&=t|B)/5B yUe[bze1ܢ;I8, ؟&^NRAЮ@g!*'OS?7s~{9'g;^"6^^f9|޺q^u~F_+S[YZդHAuAl'R/4Q)=U&/&8\Lq  0E|$ʂVlV8stK:}.vD VQJȱ0Na E :,{ %3߬c=wA:[f.rPURu}Ѡ_$Vh2MAEݳw$ߗO5"N^){?|Jtu?~j>^QYw$@$@$`6: 1> %19q7S1 Jd> >YM^E]\H]iE[X1s$@$@$` :S?hA1Pr´qGPйo2@Сkn:-R7s=}`f9~2i~X2WK$@$(,v/|ĩ *M݃ ѹOgo6ww؝-dXߖH;Qv&gʚ^NӺx䄌||LOٸ7MR9;dr&x7ɜuIrżʈ|t,xSo_/ީʖno /$  $@QgmruNoF>H̑o"-c(]Zֵ B!uZ>OwykJ2ҷ]|ݰ;6;KaSԹ+HHEvsBzS!:BZDIJD߹m51ݩdCq*C@w,EMk=[֕-ʋ_rR]omj9W"#\uZ]KR$@$@$` ug'MflefIP028]aM{+;#Xp钵MU( Ү {Z!  + 5AbnŖE9F P$%Ryx$`{C/b%Ew6C^%^iH)<%IHHz(|'s_* CeIÈkDPߋY? PܹC WQ-e.]Y}q/=:Oz  :ɑ٪(ʂ5C~.mTQ(I]*u;7PW! x:Oz  :/ɸ)EЏƂ( sJD'Ip_yI4Qޣ%i9s%2Wd{dtp/6tYb}Gg^K+=U>[]2N|_Iʑyk9}#Y^ִNu\ xF3^% A`hץW_~DxSRJLaSn̐'&Y8 ؃@iE7VKoP$@$@$`]u^؛!:EFnYpK(쬼I+(`zK} O}V% PWa3vFb= ɮ4Q}}uUr}NKy֦'%K:x]_axU2eְf59sV\ pG?]}Ҭ`EIHHup,pw36:~g'AWC)c 'QwIdDYk? #:v3S' uv41au4(ַBIcu#u"uhr>|˯麟{HHHPԙ^% &Qrޖ,HOUڃ?@D]uU+m|OLO;)$;^4w*GJާ.?q_S @(LxB6'ʓb#;tOZ".pܢ`]4wzQ?&RR2K^'U!r){SFaJԡ]G$YK,>EK~ZYY]Ê=r֧۰Or|]^NQO!G$@$@u^Q:;]CrTԌ:?ZJ[[Jpr7']+m[M }$  ` 91Dhsu?<ΒJ+꼱(FAs u P7 #Bd^~x@>N93uH P7Py2   3xo;fmT*pH{nҥe] I[uGDT';}:oIQ ʜHHKl :CM^~۸_9L =1x<*E7(s  .:fŖ'KՈ֐FguH} f0ծFJ@_RrOߵ$9!R][5W/auޠ9HHH( ܛflwwi,keJߌ0$@$@$@F3<>1Oo ZC%a IDATgt/in- DΠGÐ i( DuZ$Eåu l+z45$"HOHHH`u=M]MXӠXEE%L:E$@$@:u=R j 936:]MO  ::vH/K/h"#3$!knZDoHHHΠ E{qKۖ?n&,OtHHEAP 71#b珖s漠PJRJZ.sM:ƵC"RA$@$@V$@Qga{EmI2azIh9byJ_Y=؟ɵ@: :HHH( ܣ#dzK$19/Cє]KLdldц}Q1C:JH:HYP;T8NE$@$@Yw vCÈ!#kX>5s,ٗ%'t=nDПY@: :=tHH %@Qg1}Nr|bH?Dw #Ixh$A^lv.n[b5 UJMaI:Q5*=uBzB$@$@E3 An`m%y<D_PrʉOLj{m^=Xth(wD'kmO D]ӺtrY{ @N=*$xf.4HD֍åuT4Zx ؜@ : :?tHH ` PhKHBß2L4!RRy}O6z.Gv$`QntLj{+!dǟǺ2B1A@$@$@'@Q=(=-nOjEMr򌼕$o D%u+/oWnRQk6 Lʆq  0E@9 H:-EO亽Qar]`)ͅQvʐx믿^]>d۶mO?֭[nPvڥG4   ^ig * aÆʛ~I|AUZUz"nxy7ԇ1eŊra ÜaNqw1 :< 0+W\ ҄_QsaÆ#<Й7oBO>jl|T^zI>u iժ*a+WҥKeꞂHF_w୷RHHE63u+H/W/!=c?["Ui/^T9&?JZ||Q|Qx}UTv;sD!pz;*2(=ޯH##OV_ #WhT>##C ſ/::o !  @!@Q烝Dݺu5W۾}4o\}sv7dZUC{ァRP꣊&Cd'DᆱR 9:<o ֵkWA4I4 ۺuJɄ+*rCT_բ7vIU(eƌZ˛:u*? 5j… UNI%,uԑŋJ*εAXJsDRSSӃ RUj! LUJ5k(vTL! ~F\D0wQQyD06*#Fv8 2: s$77WZאiWJ*9ٳ˪[x *8ۮ];u3s7r^*"/|^{M͍ x衇C\΢wU:׷^p$@$@$(q׹f C ?XE4sSn估KL ^ZEXj[QC$LR"ףG%<[/Xp0  :D$@NEP鑨Nj#JV|!zO*w<"z4   {k #J/T5"d]Jy 0\. ؞E 8/ (jb!MYCr    xNK$@$@$@$@$@F3"      xNK$@$@$@$@$@F3"      xNK$@$@$@$@$@Fp[կ__իgĜH ?F$@8p߿ߓ[x- ueʔK1$/!Wu\ x@nMi$@$@Hbcs44SZje7/ b~&pz $@QE9N8!RjU 1gJ$(vk0 pz;wʦMiӦҦM;N_I,@@HE_n+E8ydddH*U$44ԜI8* ۭH|LIN#ׯƍK:}%: l] Ku~\ CԩSrq\T^ݜI8* ۭH|LINe͚5)111vr XE6. %:V.!>uǎSǯZs&$@~Ko #1:o';ؿ\Rׯ/;wH(, tH/ PrQ$`9zu]g$Ho Pra$@>&@Q $`'e˖Iݺu[nH(, tH/ D=yңGMonq}w}'W]uǛ|知w*zA#&piIOOJ*I5y\ (L!IH@D|"F!'O.v,Y"ݺu+^0l0ꫯ٠KW_Pw$`7)))xbP_HHu$@$>~Z+/ :w̘1CfϞ~aWlYW>|XT"D!"( @nnIŊ%,,̟ʵ @H|D]ڵeϞ=NbҲeKٹsJGA ֥KyꩧANm6dǎ*k)6k,zK}(}0c=D˾["vM7ɠA{1{4{G]v{fftA9"SLQ% bO.j7xCaΗ^zIVfM=ztRADL<={4{:O$g(lC ibj޽JpiA֢E Wx%~W; OߋZo߾*:A q2dD:vħAw}W !`o/lP xD#\H&SQ/mV$..NaL8'OVZɾ}rW߿T.]m:D믿.zW_}UE`zQE !̙# ҤIu݇~((Jf/Do_]C &) 8P k.u ^wyG7v׸qc77 266V^E# OPyBג |. s"g̘1<>?nh!څtFM=K:B"*YkݺJD)5Ri}i۷w:6mTq/_^P)eW^-H=?HԄg,x% Lٳ*_ F$@%  \ԍ?^+Y9 >c bK͛7;{ӦMRjU%`&MÇ;smEE?.NBmj"-R~O⩍4QMG:p9uTy衇԰6lP"Q[ӴiTtFV%zgV2q RzZZϖVQL-ޞ#  SQ/zfN/p~'{$LD4QoH!Җ;||A"v0-RW+]jj#PC*&زe U /ayU߮ȅArHM2 :RyҲtmYWZ5 =-9ۛ&˷[ ^ W:ClDIlIεlΣmݺUׯf{8虅3>补:*D)C$"h8WTZa4NhR&a'E_j\& @IO*Ӷ%%TVM.ۮC}[hLƆxW9=RC XSTE2r~qnB8,4H:DGH϶ aD|sdY=Y[:؃CQg ;$@~C'矗_Eҋ:E#G*EqmԨQlfZq4HCyĉ"uoH7n\H{C}O=ivĹ߯_j>~'B.>i$`u~]!!7bbSy2zp{8q~۸_;;BY;D6_dS*x=+Zz.6k´-J[4@ѬA@/HOD]i0:CeJTcrΜ9#>:|h68džQ"= 44ZxJq?*Žz0ו % }Exs=fOgQ eȸA4Hgs6 fsA=ޯuMw0lN#nؾع Py2 HuEDλlAܹs >3$AKCM¢rі"/9: թSRDR.!޽tAO}D?=3*/b((&s)!K:D1YZV޾9 @+QMyᄏC?pT&޽]{Ŋz4DʧO 4.sȗ:loU(CJ>b7s:!9L!U(1acawa}[5<)!m$@$P uXUdCd Yf*j껳Mlq\7PԹ @ Pԕo1"h>4c Pw} <n? 'e ѺՃgjsRQ:*jH ݭPYmG 󗝴: Pl¬5e%JEAeAXv98+g/z>0ŝq#uWXM4V- 68A~;_C뀡a{4vr)&IE]֍J3/(IkU:ezեA R@3N}3eׁ*]sׁc_$ lÅoF?l% *+"Mfvꥶ8} ]諅zqCCr I{{yy($ 9(Q!pj,ڦԺ6D ˖7k%+ڡ9t8W^BBU(g}Z_/KWx^jA+3/G'\u>Y95j]uUyw3{2uK]_c"Z35lB\rSN6xUk@5} }=ȁ2VjQGxygC\Ex) x@Xt FLuJ*qd8Ͳ~aǥHǜ4]q CK|@ T["o|R!k>mCtW!:`.vb (aM(Hy+ w ;OwӦM\9SKwBCG36ȌŻx:sNQ.)^G$@.<~w[.Ifь®)ԡ fݺu:TV7!A%]*}d ՃeAjk^IQg}W$@'@Qg= MY h6r[>{ц2Qe쐎>ŮΘ1CΜ9#  Jj׵y>7˭m/ k5}?4kGPi4[ԑw?kuv9M$`uuV!]tU3r]? U:u&jtD~Q?yLnTD/FQʡ n\u\ PYmG]=f IDATOXHy6X7H}TœNqsgfΜ)9992` 2?M~ʼned敻f&|_$19S gvyf0az:_'WuX BݰA Js/*`kyR(/Jڵlٲ%nы3~ J3a3Į6M=er?,4H&?۴>2vj}.o:MHJ(ꬴ~ka d˷K߽ݙ3g QY^dlݛ\cνJNN .HDDD@4hu #BT#ψ0 hʱz4uo,|L"7N14OF"[q(JǏw @a(lBNsC}X35ۓRClݢ1Mac͝;W233o߾)-=C2dܕ3[kkC;3JCh&Mΐ26 X@QiT;Ԕ3 @B5Uvo|J';ƚ2OI-:$@Kg+ӧE 6CϓֱfRRRTZjI勿!pM4"b* z_ _>ar &TAK˺~Z1iykC!\#tX.]{9 @` j{yK:WʌŻwth(c1&RR~߼y$##CN eC/sP~AL]/Wq3it/"Ms(dU?ʑ۩H a1wi,8um|}{ڈ2v :cyr4 PY0zPiU%Q^xU0rIGIՐ) >|XΝ;H]__Xw(h~A(1RA@Z!XV5 Ҧ"5#GrK߃TƱC:"4 vXw.Qr[R#sq4f9YKFQg=G$@A?2W \A2% p/u `e=zTnv,v^$4pޮa󙱜Jh=ѽ IwD募,u!دKcP ħȜ{mp-vHYn^jKBpBU,4ORJӹ9cEi"@Qg7$@C+''*_Z7b s♏~=3!RӮ?r䈜={Vj֬)\s]ax5w#&RBEKjN/z vJE͑{^ ;hbiϢBЁ%J -gM3*My3:>$@$`:s_"_۪gF䀛lM ,ZHRSSnGh3?Q1uBRTjXXB i]#jZ(|438Ӈ/,HG~Qc`ǥ/' 7 PԹ G@/FnÙ;st*T~z_TDL~4"B:kk  ۳]}ٶ~q*NgQE bvEih90 @ ?D&S#0e_VɳPcGl0hZZJXXTX""`*} R=ܸg,g8 SB,!ZW?WA@hSCD43h@:pb pEۨx;=5t#pk-[&[nE ꂮ:DFno#U2*P({#i~"BS3WdHT^ `G>:3$@A.0kԋ{7QQ/;E$9E];.O5jHJ창ZIN[+*2 Aag!MVXG{LG[8κpFQ;>wPϔ# P90ء kC1o`5<mEhXr߿_:w,8 {mh'.USGĐw=cf9|B &o̮5T/76'NAz{י1) ӑ  j,Ta^ͪs{gRGNQ @Zv4Qǃ oXW#_ڣ$=e3vo|qkoEފ$" h5 }jTU# x͵bgQ#@Q'^E$@/@S8n\|Sɽcg9\X_c׬Y##Ho4 _rWqTȕ[ߙU O}WQ:O/+񿧨3)G$ s`8KHSptmib˷o0NjV8cǎIvv\{0g6/M dч߃f&E'坫s}kC;>t4kQ֗dOQKH Phmv;Wt=(֭%*}=[aW_D_ Jjb{6gE|zfuTwPy3g!<u禯*L$d<#>qU9Ǐ˩SzRre[^f5Iw ϱYIXv5iڤX@$@3k[Hbr02fd227^E7n]vI۶mI&d\X8>KH>LZI}>w6e; `H.` k)^%SehtH_;ӵ)``Ќ 9y򤄆J*U`E/asB<9!V]Bzoq.Lk歏8_k4Y}JP<!zAa8~ϿWPIc u|L#WAL$8}Įߧne}7oxiݺDG\J0+:Hq*SKO_SQ_ *NpӋ:yĒ?%uP5$@$9:ϙ7 UP rZ$0Jdffʉ'$$$DV6Ҋ4:S^+Aʙ9gː4Dz+s4 `?<ێ-[ȶmۤE'L/RR~ϘY,{,LI=^7,G]x`5~㱰/6KgWkyK$(}}qs.׭. &˗g.ȋʞؾ=YYYjժ@0U;oS+-c|р~-J:96%Z?%Mz o9!: MÓ 41^u+ݵuǻIH0u|6L'ٲQ>T(s'8s ٲ71:L&Ugl<%..N5k&Z<7P~99!bkKvA-[mf{q]M:MaG ,IpנxL<ݞ"koAQ]ޜH pP^tHC1R3ro?"t0~;_28Osu8OsubhktbUQ@aDU,ר<8϶pCKoUw8IWԿ>}NxFKFa.zOqr!A0| "2mZzf}=/I)yUt+vj<^zR+%@N$@5®Z =m7fid:㘆w)6mMJ6mJ> D Bi]e@NZGꆾ -DZڶ^݆tf*"b R;0GַwR#PAc @~u|*JUa>1ͤj5r"|vz+fC:C@5 VQoP;g%uǖ[!>ԍPST *]ђ9U rѽ l9hrhϞ=~ziܸoB ۓ@T k-FuZB+jxϽR'toy@~:{S:#|{n%~_Ցc7PP^TمiZ5ڡ,价uA$w(nK |~U~qz+SF6isCM{TΞ(vu)r3-xN`gq)9~T\YWn>aa> qw[<ϰڶģhþ|g &76O栨2H,Gr[XVs]}ݰ*Rf5]Ԩ$U*K2NIN?%d#&RX`M\mbbYF"##%&&ę8 FLD 2UDWS_zDQ=@%ο#:ؙ߱^r!UpBK2> sl<~vv;vLkRwUe.etx_+HEտǡH|Gwnf/m.K +,$HЏ %=$=tо-J|>ʕ+~ҹsg\_i$hƆo̡@ae)hՀȝd\ nPJH(u|>HHup:Zt/kYC}pćLͲst@ÇO 1׬a X.߽[dm|JDaxf`[F<|Zsrrѣ$]wwronܔժҤٶtzmkbz>ԛ M]FbJ6نPp2 -Pb$ʾ|Q~]ԇKo =OWC*F߅?S/_._tytvb^ц ?9)24wJQ k8OP;'%uɾZ363[Eސ :K$Hߥ<ޯ)ĮgP뜉={Vy5רL )U|=Otz%ٹKhq 43@$@3k@Zc_}IKd[R^QьF?~\ϟ/իW>}㔂G36ȌŻVREQRlJQg XEežlNHG}փ(e7bB#0fHGiܹsra)_ԪU m8.!M-嵡-:dm|JS0uƏFu|HHup Q. gi5aY!++K~Vu]AJG )~BJ7w`=cf:FZFv'@Qg$@V%@Qg՝_8K /\?"4KJJ+WN""L>}p}{_P,EShM.'0Eyl~d9_2i`wth(c~/3WұO:%gϖʕ+KK: 3>yވ\aSHyu\  PYqWlmxg\[:.\d)[Ԯ]&OIQ{+]H@ `C$rK4j@# y;#3gΔ 0g }ܻ'?[*L]Ι2|uC$7(f+}q*JQ92oKlZꫯ:uXr)9A\!`hVDKJJt3u\Z:>$@$`:s/L^"+RGүK_iX=osss+ʽnE_(oOY6"8c"2LG^'5B+bpKɒiA5b[=QKQg[L$({Kzy:jQcYFsu6]xQ:$YIx|J%xHT4Qb%;AJ2~bƹ\<3-<K7p. JIKλb6+P7,;6j4|S>6Mt_" $)LtA ^=X#% fSl. mPf㨾04u3bK믿rUWIݺuM ! iܞT{e*kT'Drl8=@?O7"9:or u3hז3U_~w*094x jd=yOqVzG37XTPF㊃;Pa  sPԙկG]堼RF5;6CωIEoӋ:?mᇢ.8p@^z>[R\Ϝ9R\+T3?81 MhHPI;UtzaM~*?XʷP}PT4uTU0TSh9ss= ?&)b]vmۦD6o<ܹTZUJ;szC {k\噎jzN67ł3sZ.T5~BÙ:|HCwQn֬Yr7ʉ'O?J]11ɓ'eҮ];:T^]J*R<7؂E-N ؐE 7 .SV;ҳm}ߵ4eFs.6wZߘ!D2iӦ > PԙϘ3 &wCV]u5TuL;ضtٖ.kS9tλSKcpc)[Ӡo,]T߯O-+VP141   qqqR(;t Æ QFJ4/h\x7˽{{935C}NN!WZ5}3ٲ(uV B$O(i7}4$_&QMr QR5!`Zɭ3NKjse6IZvg;:4;b"uT^e3|G y'yff"˗w>SQ6޽{([oSO=(胸2eTWF;ėSq.\P{9҉PNm^xAEF\2?T):{PߞrE$@ @Qg}/PDe*$вBëK0i.][e1b6꧟~RDa*UrOx[uv)zrԾ}{B Q3D z k0Dvζ+*eWA(LԽ*ui0D]A:D 5&"e0_(tEq,9 Py03'UTMS<8R*kVV?;s(T;FQTA⭷ު #:UIypOo߹M鯇h;wu *;B wDذT~+|*MޤR!!!'sTٽwΜ}̜1~~~DdCgVdetNmٲEIﯜs(r^M"~]Sĉ!%6%RV::f)'gI,׮\0n8%"uV#BWXXXX,Oy/MiLN `$@%@Qgةc$`?鑳U_:Szu%g*KD#[/%7~x% _2ZOt"[dm=O@",3asY7%:S: :pU04b(+gX#Zj) Mn.:A,$7uzH((2T$\>.[y+J۹srQM6h@Aq$P:.  :pտ V+SA)$3B4Y\]bb>88rlA$(r4 (T]:tHɆ'E6% Q% \b/B׮]̝jYbm g"@QLM_I$@Q&m'Cf͐k׮7(c-#?T>\=8z($#ᅬp={V7(3g?V/dO0AYRO8Y6 6ݻT,X|91x`ԨQI$(z<  PDիWWD4h"$sV^=L<WDܵ%%_~YyT7TĘg{ƌSO._6_'P2N4IKRz뭘6m֙>P2JG$`:븱 TF2B|j˖-DDxV=|O?U;)%& $|śd]t%&wqə./QףGГho]I*!BwVU Y徲5 d! PUF u(VFRR/wqf̘XђmˬY*R;bԩur$(8Xmо}{l߾[/X"dDUvلH( :tH@suO1 $&Iνoݻ5SLuD$v>j|֭ &QײeKHOrYc QDz("XZ1Rg[v-Z:+ 9(̡:$@$`9:˙ ȃ… 1݀ЧO%Z֢E l޼YI2{|rV}sD$i&'oum;ڵS%ԙ1\|`[ 8%:v:M$: ;[-k׮8qx3qǏW^Z`RD_>~r>{lyJ0XBB)bQ2aJ)r/l 4ґ:)7lT%,D^EKb 0E9XH,'@Qg93!I"Wsɸ(EFF3G(l\E E\[؂HH(4H@O(4 8*WUI 9(̡:$@$`9:˙ 8=9(JEM K HC>\i M`˖-Jn0tHv(lǒHH8:  %iii U`! sPԙCuHHru3c pzr'Э[74iy y(Z$@$`):K> N@ 0E9XH,'@Qg93 'k.wyNσH#@Qg'" K PYJI# DHH,ufab% E؀H`޽8|0:tVZ E,LD$@ \z B`` Y(J$@$`1: qAmVB$@3 (,g$ Wa! sPԙCuHHru3c pz¾}кukoy y(Z$@$`):K> rNy:9WB$@3 (,g$=ݻwEԩ !_YowTsUٜH* @QǥA$`1N;:0$g#379-~p-(EQgw ! :ADعs'5k.]89T@~5$!1=RW@o7AfEW ؇E}* )RCJO@\|j.r!epc( HH>(ÕVIN@ \pׯG:uзo*ZM?`ɮ38xm Ҋ]iW7Mα bю{5dj@c@&rzjz"|-^u)t9- P` M ##/_LYk>|mo~3kJTNHBS+0}[Ip(}0K-ނ1X]ruUFH$@$P.:.   \t k֬AZ0`۲9uR$9}5[-(>G"D٢-$n*uێSBoYaw%%O0 s_X-W q|$@JQg& dff"66ިY#MXG/j:vn-jcŻík\$BIRιn3f<3e4o\d:#; oxNl+RS~8@ %@Qa$Iʕ+$)7|Co4ퟝW@'6 9I"NRQ'>㶮wbpǑK-?aݡe{(8 E. "قڵkk:x:MWskE]bzatN^?ꅵDDHS$$ljK \ LLQיHH@{uG@G !!˖-CHHnMOQ)*wn58nu76=_g4%8ΚcEEXHESM7%ŋ၈lu떈\Ħ::WԵo !< ZtKJL[:>u^ hINK@bb".]` 2DS/ugn.Egbá xn6Y _ nW~ٌ+)y"i·#):W+GVwiDrda)XJy{bݻ* LHEMALLD<7.Ր_DUM/ר>#_UM-xGtJ:D\sXEK:)df`z$Ǖ[o}[k-怢iH @rr2-Z@ 6lFnPHyeN_JBg~-wĹzb-2V5ᆕǼ'4f:<<:!KW?s^H8uh{~x,5D9zsIJN3q … pwwG:u4d}jf~8=]@3QyXX/[!.)M"틊,PL~ȿhD]6!Xh۠&=;g $j(fm ~B{[$t! Ql_MFf+}Hd(Ŗ=KyΑsbU35=leN7v(t3 PlB A -- ,URb:b5j(Cճ1nO]ƔOSp4{aK0xtPk\EFa~ZWGuFaxkVEf`w*n/ ;(bj½0cb! _LBZk=/V{Vs[;,)uW[p{[4FhlTRdo*n, >{YzvN"u/Gu[6/<Ąxlo|9j/? u̥ xu:ygxՃp5̙#>>w??zD#vwAӚa#F4H@O(4 8|DGGtj҉RDIsq) ZF(Jz,<[%bw"aRʡo""^힚:SM9#']1S ?mBӈ ŦI\ݨW={6)!d,' Cu!#0hBl=vѮs+Bu[~)`u?cuǷ3&1wd۹,#psצ&L(~wo<)sj{[?XEe UQyv86 G&@QȳDZFJ9s(W3FQvf"Eԭx6D&cK6>̏-mpG8.*:Sg:'xKŰ -xaxGQ ǿށ?~m*?b)n꺣_s9^;v>QiS||Ohծ32SkVޚ2XY#:#&}!:=BB@ ;wN9u.&) JLE"jc>\Gt@测^7텐vD)aIDﶉ!K־w;Z )z }wI=_[{kV<;=BlY<Ū ٻCەubW" ̚o]y3uהSqI\~f0F^nF|#;lKƖa1sF4m?|&MC|AEz)xNNHH0( 3t# /W%rq]wq9=!,qP"luk#5#윴7m|nFV Yr{6C@$deK&[9]\!:>sK|ni8s%Ky*&Q0ifogOcO+:H˰`Tkrȁߨ r PÌョb=L]{مFRKH(<H`cnMceL;pgWۯvV;&Jznxrh[jonk uu"l3SΤ]BYe8O4|7 Bx3ן1uZVE{!=-<֣@EoߦcJb8 IDATt^>n126  &@QOIzgΜQ7hz#6h(DENwNa]nʜl%dge))%TH)^% (ɓe #uFQC$uz ̙3 /qV%:5(ۯ:SϫY6jKBQW$@$P:  "urt*V6N#djDtnhjtuzYa  PqV @@_J?~)Y0*uZM:SoA'Έ:#*}":=@H@_JLFpt4dKEѣvFzHx PnJ C`Μ91cNHfmұ5Q;f4#u6y !@QEA$`s)R"##u(q}򃍴!"(]Z>Z(:H@kuZ'%0|dffbȑ SϏGk6vl9^/ϵ3lv=n9LjAQ@š 8:.C ::ZIR^=]$J0g$u]lJSsȝtr7 Eg hEN+ ÇO3o }:3 (-; YɿV1X[6 Z]jEf1 P| ؋瑗uMuaڵ˽vȝދ0(͞N𣏕#pTu:s7 hL`ѢEHNNưah(4C_-L-@lHNNQU1 Ef>.\\ԩS%w`mХZ4TSN"xz)&;E^VA$`4uFQC*Xt)1dkn(4C_u2Xt1IHLϫmE-Vm @(2H"\>ǙҪiԬlŔ-5S"IQ dtH3:}&PE +AFyHfԱ4pdzA"tF\EAQW!N$@[x"QV-xyyiƃN3UXN0eɌOUmZ B7:W)E=Vm @QU@$`UV8p íaFzu'/&!=.E`[L6"LvwA wjH≱s:;y %@QSOIj.],}h?m>Q5ճ NM?GDy*BNREW ؇E}* 5k nӪh3j~XmU5cq{=( U2s +^|=]RMr"9JC$@[ Eff&j֬ ooo͘PUC.DI.37_teʔl.JVKCqWHH<u\$@VX~=}*kU&G+f{*& _._.E]!9W'w%V**-.} CHC>\i O@ddd(IR||I!&H,}Qqx u#Iɼ[5 T @QU@$@!@QgJ'i&;w{FddfRY^sgEdFy:}:ޯM$@3 tW3:(N_Jū͇$*V չ돢p6ؔln5}w>W:jT}\/4 v:aGQWw/ے @(:H"}v Kvu+gmgur\tb[˸t+'rYiMyD;[éEV @Iu\$@Vr RSS lآǾ^щ昮8Diղ bю{5ԧ/aԺSjz"|'+]6@Qg#4C$@PqI XE`׮]8~8:wf͚Ye(,,.69iEdVZBVNzT7 y#o O,1ûluXN! E!N?{tQ%H*?HM׎D$$DѺzJa]ǸY6암8<(}ic^N3 8*:G94&w^9r:t@˖-5:&J["\Qg"Vు/A?gT3ƌDZFl(Z:I! ("H"d#00*h(N|̸-#u%&I;*[:w;D\r,o -SFQWrN$@NKi@߿BvкuBkGu&7my=gq)1#< 5*$j3uD("XuPFK m8qiwp+\~\MyZ (8 E>恣 #pU$%%!((իWl(lk?3ʘk0 ~e^3KL/Ήk1Ȟ3@IunIg-:EEXHESM7%8x 86mڠm۶3l%gui9x}V8^샟XF9KԵo !< ZtKJL[:LD>(,Ϻ$@$\(k- ،D$Z'Q:iUIԉ{ 8y1,܍kWO,QבE]|j.bSrt+{ѻhѥI=SrϏΧd\J}8> G%@Q3q$d̗S,RAgFw-M#:G j.=s3p0j=~ ˏxџo!Zl/EP @QA$`|)0%dԪ83pbp2*ѸVuW̽1ms*.zK6LGurD^.$Sq4zK q׽Y?<|h"P9 r$@z%@QיH@?]vYfܹfuQ'E= 'kpգ ,\"P~Z}9ãtqܫҮ^'}ݫbBm)t9- P` hA %% @HHCPtQ#yk\0heP2(BqCl>MxlVz9Ktz0kxf`6 Qiݒ E᧘}>>lLZ:I"k;A.UjAP?~w Sg{"&~[D^|Aiuz_ sԙI@c?>1rHGS}rz6D藀@_O[ِ3+ΝNc"0:#"}  #::W#`$`>SN"xz)&;E^VA$`4uFQC*+ ̙\i0fze7$`=t1IHLϳވZ: (E U p9 22*lDZ%Sf]$)lXsY$@jS2 D)fRy.[2 :#_.^ٚ_' PYǍH \v gϞUԯ_yc0eɌO˅yVC_u>IONs. 1c"]"s Cy;}Q~ F(4nIΜ9ѠA#CH@!|w"\!'f)I+HC>\i?ǎI#W$T)LR;oV9q|^ΜAQg%!  PYΌ-H&H %(,ź$@$`>:Y& @)+KHGHC>\id)RXHHkH'@Q>sH!0g`̘10_tH>WZ% :  ;w 3Y IIP9DM  Pԩq̟?9r$=! :`Q E D)c)! 8:kzJ$.:uy70  -- Ç|3$@'@Qg{H$@BHjϟG^^֭ 777! ssy$@S9{$Xh1l0/:B$`uJ$@$@Q5@$`5 . 77uԁvؐH9P9\iH(H&p%deeVZ 8:gzI$>:G0 5k@݀aB$@#@QA$@!@QgJNA 66Y&g:I$`=:ٱ% \Kk׮]3CuH_ry߾} YHH: P#u3g$`$%##CIc } bpU  Mp9VaC  @QL/I'@Q>sH!t0~ WZ% :  lٲQQQѣ6lh6$pu1H@}u3g$`HKKC5g }Pه+ E oߎ'O[nhܸvؐH9P9:G0  $$񋎐 ؇E}* Pq XM`Ϟ=8z(:v-ZXm IQe \]95'ES EA'n`% U 93)@fnut oCW>.:U/;!pBuN8tlE`8tڵk֭[,퐀& !1=鹕`_7,j  WZ% pU$%%!((իWw OE|Z.߶,nP3Sx,EW ؇E}* 8hӦ ڶm>I+MIdFyB9ss٧$@$@QgOM' Q:INu,$(delJ6rlQY5þNQgةc$@x= 82#G`޽hٲ%:tȮpNB@\F,j/Q^NLN͕ƾHE߳-pJg7cjgz+WkHKɀ)/%& 蕀MQK0ovuzX}  8wVRU_ptWU"n}&ˆodǏc׮]h֬:wl";HCtb͓Tu((ꪺj؞H'TNDL>}ݕlfo-g& $$ęQwDiR\9ݧ̥}@DHctl2 7"vu:|pH$@ N>cs.N3zgɓ'}v4nݺumy$Kv3WlklJMP ZFnic;WX2肂<ZE;BըMnB6땫u-u %  Z?:b IDAT-m wE1rgcM'+W94fc ӷ +i!_I"gmޑZBVNzaDpK[_a;cxױf*uHq V6˘*mtZEЉB@ ** [lAÆ ѣGM\1'N$(ةϑ:w(zK">zJEc[V]I?ܗ6VjK(>C 0N̂AU3wrɬ$Wa!#HKKC||㶮wbpǑK-?aݡe{(8 肀.E]fNlYi%@xoAxxsT赊SH3jm"ͦuY0==qqqEXXބ `J&c{za- q®e/j9pju1' %KQ7y/K521hU=d;DK[5!ْY/ ٚ}L}G]cÆ [.@Q vujS3VU4Ȟ3@IunIg-ꂢ"\L$@NE@N^!2 GVR`,$2~c{b˞9[2;u H`t)4!]K ?u8&{6{*Aԉ30Wr5jyfqEGuW\+of}E5ֿI]JLǃ_F ez6ƭ]b˘0y2spitjub9` &=jk(LSۏ_º.HHϫN Т^08ؤ 3M-ꂎ J_IvA{Uhԥ$eRDM_w wi{>]E;CqSHL -kiFUtnХ;K&31/QCpzu3U( Х/P#3:qqyobώE❻;ԴXzmax W}ERY<l@$@NM@naLχ*\֣aQnO!'7j>L헒#Wbҥ !$qPs_NfnC2%ΠV/$]v/Qؓ߭ôuY ׾;7̜|d{C_GD`_"v?XR$&:Oow*u˳`iM٨DߎU; Xl;-},ܗL}:c?X'ƫ@mX<{LMxQ{]-xɗ{MkFT3l =Х Й+YHC~A..\9Eۿ@ԥ=E-"{NO "qet|GyBP3E_.,rrrpxxxN:cE5/5DIsq) ZF(Jz,<[%bw"aRʡo""^힚:SM9#']1S ?mBӈ ŦI\ݨW={6)!d,' Cu! []4c"Bu[#7`"$-IW0c#Lk{| Nu{o=] Wpm0cxxxhfjĵvulࡾLPk>8 pHQw(&[g>*zP3E_l,꒓xb[o=3,E?hT"Vsb%|l| x6wSNDg3$6{c<sqֵ"^/ #1JҕV;[ЯM]%'[,EM]wk=sq<ثB{'*mGġgϥN\-ӌiJqJ.'_}r.dr݌%ꊄ8֧=^b⃯BvP\õQC謑k׮)gWq)q>,<_kDVаfSTSm NԥeT\4\Z^/ثLN͇Z);u\.&) JLE"jc>\Gt@测^7텐vD)]mC}vR)4aē "{<(̭xvX{xcxUړwㇶ+#ĮD(5u^Eֿ|I'wŧqzNvF|#^bfB4nlǦ]_~vӐx?zPpU'RhglEr;qɗ0{r$ŗ1 _?{~E٢dfhH 8l'\.SglYԥa…mf{8fXTc1*a[9EgE&ok5%Jo3DBJ&[vkťS1I(g"KҙY5uN׫$Le `«($bs^V/BB3w<~'"LaP^:[Ι'1 }zY ##q[qsP#nfVtlG$@%8$CZx'0yrT|[UQW5kxNڊ._"kfdGuyyyիWuVXPc[12ML ?,?d 6!/Z9קVKtщʹi5JfFF :Fog,Dyxaɘx#}|}#ѳ@mu.Mٙa헧c㓅oc&<494heq[ӗt>hUوHNK6לr }|K騫?WGv9U5u&Q3EȢ.##;|||0b1â wvŽZoaվsfx^'UVvƠgu"H{=~SE]v"0}yvxXw={_LQlno37-RӆEGYZ?[lcljMxuX-n$@$  -$R{QejHLG˶yvʁ/_P&u` e -Y-w*֒f5:)YYԙ"uP"22,m]QD65v99J&KSAvVz{`~Odzk)3LY(<Ç#**J\_zj4k ǎȑ#!u={'O⡇† Zd zdbf!g%!.6m;v|˺wOvE騷n\Sn*śء}TKgmɂu.3E1֒f]0ٳl캨/:M۬Ss׿Q;9g@o745f4D]_CFa7TV_~{Y9`tio6ncx "KѥK+W7WBI搐Ԁ & S7b, ISf#/7(-Me=sF4m?|j]gs~\OKD)wq-6(,F:Ӡ!j'N~8+Zu1vW纓kXsma7Q'BI9z(7o$[~/~G;В=܃L|gؼy3^z%|h۶-^|E9Znؓ(ߝwީO>Appɞ[2,_tIɸ\<ݫ> 6ݻuVN\\:w:t(a+%3 8:uщYHL7/+L_¤ PacD=>DLNQo\T5oJg]’9=>VFK߯[P'o3g-U 4dh\`Y֬#G팼R:aXuBå+َݧ!3;{IoN~w}>}"$Z׵kWe ҍ3F_hѢ̙-[bѸr -[N:'O?I&3^jɓ1x`%xa[N=IոqcnQ8dL6 AAAh߾=V\AĉO|v,$NYz$Jg0ˏ0w67ygNkvZQ:jbveRYwkW,>S- kPoCkj|Ϝ9SyPlj۶#F팚ҴjJU <rrtoʻ@./'NIKzu2:=/gDx-^\xSO=3ROԉ1'3A+NĠI<:uJI~ i۵kr&hڴ)N>m۶)63F eVZ)(teӦMx6*mDD|=uLu+.DU=$_ "J-ʺӍj:op_1(^Pkjo<+yE1/Ѣ{i%ߠ7RN~ǷۨőDCU.,>մ+v"T$&&e7Vh"dchhh(['E[=tRK#G([%g+/ɖ4I`elܾ};TnV%W(첔iT'r pRbK3//ow9ltJg]ڷ9:Kg_Pg3fP ((&Qǣ}ZIsm"L!j';0dK]3N{ل5{p^ODݼyH,d"g l4 Yfaƍ;)")/ufʔ)XfF"$D)'6vD)&Q'dۥb %'}=cJ˘5"pN8mE9:u9KΞm-dMA|jlgŝMg{x/_t$g\ŏ{dK#Xߏ^{MI(j*<J2i:S'rM`ԝ;wNurIu"/9'gΟ?;*8'g򒒒ʜ3eq(m$"w%Yرckd111O^[|"@o{u1Im^/:GQg)MwtQ'’DH=E"#u]=D|D'd!'_N/ȗwMýI֏Paxw,'6 W}$WVIoN+bLvbHvJ)Qwmٲ={THIR"[M26Ylm&ۋ-B>e'3S1m,R{ƍS"rFO^l8qSģd,o|1]vO-C:IΨŜ\ygkEL^AyPvSmJN"v{1]={0C-e+]vp]ʖ*ٰgcI<"}}}F^J&+vYzdysrrȚ3WsD5 k'<==J[F2>{rmuQ`+OqرC:^s΅| ˷ju[EmҡZ_ 89,b0xh:B-^*/y/tqJ`xV7[^L[5o&N"g#FV%":1ۡ9:ǟzjMٳ/q!5٧SmQ{vKq6A',ꦬ(=ZU.vuwWUjSYFl)lyI|,!97KIu\$@$`uG @@ݥC}K,ARRի[&u^y3wnSgGY9yn̜n.!'"Ι˙(̡:$@$`9C:_V̪^S`TjPkXYP[ٜ]pA|N:Ny:??]4usW]e2wRPܕd(lVA PNԉSU釰=րlm"ƩQ3ERPkG˗/GBBs٬뿴r\fCb%LS/;Qln$eRt)a8F G$`HQgpMɁVȷ5ܕCjCC-YŒ"bbbxzzZ ^E|(Gyj<HC΄K.M΁m"rvMCz{g{噢]j0p@U͘)̛D=|aO1Wlɔ]Ξ<μk  /%$'bNsZ9oz{t꡾zx/"++ ޙWU]}/d $BEA(TT@hmh_QpC>QZq(VI(!( )&B&B xux3翟 w=ֺ{05 7tI=QS c;5l+Am6gӵz$se:; psYT\23!B.6#a8 2Ȼ͛7رc꫑Ue6oO}hK[qq̺Dj{,E|瞨t'U ao4.=ާӃ2 p"lj:w#Wj{[4'2:u$&B~^kAtSkojն*EЅݼ{0}v_%amC<9 N3ړd9'(Lj:;M+2˼Kir:SԩE @Sut@~s9p5ߙc8~]Ύ%:c@6`jvehŕke9/  ^UVb‚/ (\0/HVV+G %]%*7)+((@YYK: fEF&ϱM_ܫ3+Dx.Ni=agBQgWr^$@F0{ssXv9uO썛|=/`UmSG>csَ)2ؒiZSm߾HOOG>}t|YS^Ug,kd aguQgc uQHZ'`zQ71wsYZn]߇dҪdLS}knjSwSznaa!JKKo.Q?Љ?ZUSa#Ե+(C^i){e?+Cؖ~6g(LeHFl#\6[0i ?LUEvr ;n> OUmΊ7);wľ}0p@WϗH :WKKJjEQ9w?:_> xGvNk 5'<:)fu2نڒ革ڮ唛Sߨu}QQNmUйڱCLjcoq⢋.Ҫ65Ͼ]pG\w( S!W\>|;o._|[7o%BTtLׇY&a/ѡh:ÿ 8 0Jݝ#^DblR $ZnIB,vD&X `X2]_8qGVdNbV!VqiEѫsGdc,0h{&B_GFE d>'\L$@'`zQ,c~̫lgQ^u_}w꺖]0u<F6͘D"݋%\ˬ/sw֝G[r6f`:QWtbD|=O `ZO0 LQgtvI$uu.nzi7)jK"~ر>.R?fhrDE?#-]Y=喦"np[ƪJ?gNV[k˰ bU+upv'u|7FQ'vn{ÚxƸrq0s/Oǒu[헮2>S݅MV:蘸&9Κ6|KQgF$@&"`9Q]\z!o(mڙuȘ}.NΨRw=)Wޡ$$^SqY=}yewBHh" ?Tu}QWZrBY"Bi+o)D ' Sw S5aVS v R=}MQ> @ˉtN3dkvfu)rѧO4}x5*=x9)qXtH/kW5pZym<"V}!PUʪ{KY3ק_juRw+QF<{rpUߵRzbe寨8,mrү*+uWV"}u[" ەSmiI4OkoZjjgM5W#l1ERaRZZB !!AB?pDڡƥ+G]ZA'[OV#S'a; ʖˣ9ٍ[%E۳ 7VmB `{'>ىWޓյ}\X#edddk)~:_ssO @&BLv~e?}=g_1CZxʸE͙1{z;&# kf8xMK_V&tcj.SضmzAy7yk-T"D5_sI@)oX眧g}vD}8?XFCsoE<$U+Bhh"7aГ".__vǓ߅]^DyxfeKkLSX[Ԝ;EguTp$@$?K:ٚs qqϟn>sW<1g&plQQV'܂=vJ=/>6[ #+O[vzx\{~CrW욚*zx. sJ\^,[zhΐAةMmS2 ::ꈢNwtj'9SeR[[SѢj[UW5dY oyM]]-j݋u{s:CΪ[aKY!0 K:89R v_W#??QQQܹPg= EWw(Cv֡<-ņgByH(TɦHHD]kJd3 \69ؼa-7 -|U9Hb*Jk-YS`.Ke+-q-Ğ/=ltO~fڞ'Os`SP6b srR{ԚMNN>3tC}u#WCX;UeFDЉcU9' 3`ͳ_9sN:+3g?d'+.*חQchJkZ*i9/H qCMDl̎`Z~$mV:HEHPn?3'ɏ1{SGW^RQQǏ#""IIIZu#W@SLUhP#vz)H) r,vK$`{us+ZdC۸nfϘ/47*tvnnJZVl@ݘ0r,Y""pߌ)L+cVUԹVHWM5?ԚiG'|\uU;rU:Tj'/bCTalF(hH,'$dt/9?a7-r@lǠ-,mn-{J)_xkYsJ{}I=ˆ17_`qƒ; So›Z{7ΠJ!<<ɺ;rU:Tj'%G@ke9/  XN坬E^i_KkW.-[)[LsW9;j-oj!j5j=zi&t#F}82,3-> v%7"51{_c}o*aNG^Fա3<%;ˉgՙ@E1|9azTXp;ζVۣ󗮱EUWWC`)[0.oƒ-?`]qn߭kzբZ_a8|~ժu#vЯN6jHl::[#0Dj-(l{vӧqyе{mlE߈ .6l@BBZݙ_Aa^-7D`ۼ NWeU_/5lA[RcNtu>++ O,+d !iZ$ۙϘ"ky1Ező#Gݻ-! CAْ)[3.Eb߉+uN:L$Kx̴ɏ%Fcu)zeeeXf qui'%36, X^ k+q Of k@u:;0#p ۈ:̰S~GDKB\9 Gu EvN(HF39Ȉ"1s]BT9Ol\t(e„ ^$@"@Q cs$@]pAjO(Zꂙ3gpaZϞ=M8BH,(b Hnl+\;Y SU6Kf7G񖀈%K(nĉ^z$@$@Q@s$@TrZ+wLU8t2^zYd& (ꌠ>I@ݐTէ|xrllD%$#;('ME$Hu4;'M$lj:wkOFmwuSURgiq$:MIDATP# ps9]Ќ 9oV6`! P/HH@upe$(:~)7m,$@$@QG  Pǚ=m ,[ uuu?~SH$@B~@$0,%QJ=бcǀc$@$@QgOrV$@3 XիQQQ7|8 mPiÕ E}H `٨Gjj*n ؓE=Y Oxp$`yk׮Eii)Ǝχ ІE6\* PH&iн{wi0M6@%@Qg_rf$@3?{'[XnJJJ0fbN (gIH@PH&p֢[n =6@$`Ou+gE$`<:m _EEE5j:udp$@ӆ+[% : @rssǻvH(k[ΌHXug$` 7nD~~>FΝ;bN (gIH@PH&pQTWW#%%aaaHIΞvH'@Qg 8 6(VIH>@$0۷#33ӧO {]9+  Po,OHHH@LL hCNlH($@عs'ۇo߾HIΞvH'@Qg 8<">qqq'@$ :mU I 6HHHH/[l3g MkQ'_,$@$@$@$> % x-HHHHHH|(gHHHHH&@Q5*V$     3M8"     EרXHHHHHG|6HHHHHHku^bE     0:ل#"      PyIHHHHH|(gHHHHH&@Q5*V$     3M8"     EרXHHHHHG|6HHHHHHku^bE     0:ل#"      PyIHHHHH|(gHHHHH&@Q5*V$     3M8"     EרXHHHHHG|6HHHHHHkS%X.nIENDB`docker-1.10.3/docs/security/trust/index.md000066400000000000000000000007661267010174400204670ustar00rootroot00000000000000 # Use trusted images The following topics are available: * [Content trust in Docker](content_trust.md) * [Manage keys for content trust](trust_key_mng.md) * [Automation with content trust](trust_automation.md) * [Play in a content trust sandbox](trust_sandbox.md) docker-1.10.3/docs/security/trust/trust_automation.md000066400000000000000000000052071267010174400227740ustar00rootroot00000000000000 # Automation with content trust Your automation systems that pull or build images can also work with trust. Any automation environment must set `DOCKER_TRUST_ENABLED` either manually or in in a scripted fashion before processing images. ## Bypass requests for passphrases To allow tools to wrap docker and push trusted content, there are two environment variables that allow you to provide the passphrases without an expect script, or typing them in: - `DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE` - `DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE` Docker attempts to use the contents of these environment variables as passphrase for the keys. For example, an image publisher can export the repository `target` and `snapshot` passphrases: ```bash $ export DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE="u7pEQcGoebUHm6LHe6" $ export DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE="l7pEQcTKJjUHm6Lpe4" ``` Then, when pushing a new tag the Docker client does not request these values but signs automatically: ```bash $ docker push docker/trusttest:latest The push refers to a repository [docker.io/docker/trusttest] (len: 1) a9539b34a6ab: Image already exists b3dbab3810fc: Image already exists latest: digest: sha256:d149ab53f871 size: 3355 Signing and pushing trust metadata ``` ## Building with content trust You can also build with content trust. Before running the `docker build` command, you should set the environment variable `DOCKER_CONTENT_TRUST` either manually or in in a scripted fashion. Consider the simple Dockerfile below. ```Dockerfile FROM docker/trusttest:latest RUN echo ``` The `FROM` tag is pulling a signed image. You cannot build an image that has a `FROM` that is not either present locally or signed. Given that content trust data exists for the tag `latest`, the following build should succeed: ```bash $ docker build -t docker/trusttest:testing . Using default tag: latest latest: Pulling from docker/trusttest b3dbab3810fc: Pull complete a9539b34a6ab: Pull complete Digest: sha256:d149ab53f871 ``` If content trust is enabled, building from a Dockerfile that relies on tag without trust data, causes the build command to fail: ```bash $ docker build -t docker/trusttest:testing . unable to process Dockerfile: No trust data for notrust ``` ## Related information * [Content trust in Docker](content_trust.md) * [Manage keys for content trust](trust_key_mng.md) * [Play in a content trust sandbox](trust_sandbox.md) docker-1.10.3/docs/security/trust/trust_key_mng.md000066400000000000000000000074041267010174400222460ustar00rootroot00000000000000 # Manage keys for content trust Trust for an image tag is managed through the use of keys. Docker's content trust makes use four different keys: | Key | Description | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------| | root key | Root of content trust for a image tag. When content trust is enabled, you create the root key once. | | target and snapshot | These two keys are known together as the "repository" key. When content trust is enabled, you create this key when you add a new image repository. If you have the root key, you can export the repository key and allow other publishers to sign the image tags. | | timestamp | This key applies to a repository. It allows Docker repositories to have freshness security guarantees without requiring periodic content refreshes on the client's side. | With the exception of the timestamp, all the keys are generated and stored locally client-side. The timestamp is safely generated and stored in a signing server that is deployed alongside the Docker registry. All keys are generated in a backend service that isn't directly exposed to the internet and are encrypted at rest. ## Choosing a passphrase The passphrases you chose for both the root key and your repository key should be randomly generated and stored in a password manager. Having the repository key allow users to sign image tags on a repository. Passphrases are used to encrypt your keys at rest and ensures that a lost laptop or an unintended backup doesn't put the private key material at risk. ## Back up your keys All the Docker trust keys are stored encrypted using the passphrase you provide on creation. Even so, you should still take care of the location where you back them up. Good practice is to create two encrypted USB keys. It is very important that you backup your keys to a safe, secure location. Loss of the repository key is recoverable; loss of the root key is not. The Docker client stores the keys in the `~/.docker/trust/private` directory. Before backing them up, you should `tar` them into an archive: ```bash $ umask 077; tar -zcvf private_keys_backup.tar.gz ~/.docker/trust/private; umask 022 ``` ## Lost keys If a publisher loses keys it means losing the ability to sign trusted content for your repositories. If you lose a key, contact [Docker Support](https://support.docker.com) (support@docker.com) to reset the repository state. This loss also requires **manual intervention** from every consumer that pulled the tagged image prior to the loss. Image consumers would get an error for content that they already downloaded: ``` could not validate the path to a trusted root: failed to validate data with current trusted certificates ``` To correct this, they need to download a new image tag with that is signed with the new key. ## Related information * [Content trust in Docker](content_trust.md) * [Automation with content trust](trust_automation.md) * [Play in a content trust sandbox](trust_sandbox.md) docker-1.10.3/docs/security/trust/trust_sandbox.md000066400000000000000000000317201267010174400222510ustar00rootroot00000000000000 # Play in a content trust sandbox This page explains how to set up and use a sandbox for experimenting with trust. The sandbox allows you to configure and try trust operations locally without impacting your production images. Before working through this sandbox, you should have read through the [trust overview](content_trust.md). ### Prerequisites These instructions assume you are running in Linux or Mac OS X. You can run this sandbox on a local machine or on a virtual machine. You will need to have `sudo` privileges on your local machine or in the VM. This sandbox requires you to install two Docker tools: Docker Engine and Docker Compose. To install the Docker Engine, choose from the [list of supported platforms](../../installation/index.md). To install Docker Compose, see the [detailed instructions here](https://docs.docker.com/compose/install/). Finally, you'll need to have `git` installed on your local system or VM. ## What is in the sandbox? If you are just using trust out-of-the-box you only need your Docker Engine client and access to the Docker hub. The sandbox mimics a production trust environment, and requires these additional components: | Container | Description | |-----------------|---------------------------------------------------------------------------------------------------------------------------------------------| | notarysandbox | A container with the latest version of Docker Engine and with some preconfigured certifications. This is your sandbox where you can use the `docker` client to test trust operations. | | Registry server | A local registry service. | | Notary server | The service that does all the heavy-lifting of managing trust | | Notary signer | A service that ensures that your keys are secure. | | MySQL | The database where all of the trust information will be stored | The sandbox uses the Docker daemon on your local system. Within the `notarysandbox` you interact with a local registry rather than the Docker Hub. This means your everyday image repositories are not used. They are protected while you play. When you play in the sandbox, you'll also create root and repository keys. The sandbox is configured to store all the keys and files inside the `notarysandbox` container. Since the keys you create in the sandbox are for play only, destroying the container destroys them as well. ## Build the sandbox In this section, you build the Docker components for your trust sandbox. If you work exclusively with the Docker Hub, you would not need with these components. They are built into the Docker Hub for you. For the sandbox, however, you must build your own entire, mock production environment and registry. ### Configure /etc/hosts The sandbox' `notaryserver` and `sandboxregistry` run on your local server. The client inside the `notarysandbox` container connects to them over your network. So, you'll need an entry for both the servers in your local `/etc/hosts` file. 1. Add an entry for the `notaryserver` to `/etc/hosts`. $ sudo sh -c 'echo "127.0.0.1 notaryserver" >> /etc/hosts' 2. Add an entry for the `sandboxregistry` to `/etc/hosts`. $ sudo sh -c 'echo "127.0.0.1 sandboxregistry" >> /etc/hosts' ### Build the notarytest image 1. Create a `notarytest` directory on your system. $ mkdir notarysandbox 2. Change into your `notarysandbox` directory. $ cd notarysandbox 3. Create a `notarytest` directory then change into that. $ mkdir notarytest $ cd notarytest 4. Create a filed called `Dockerfile` with your favorite editor. 5. Add the following to the new file. FROM debian:jessie ADD https://master.dockerproject.org/linux/amd64/docker /usr/bin/docker RUN chmod +x /usr/bin/docker \ && apt-get update \ && apt-get install -y \ tree \ vim \ git \ ca-certificates \ --no-install-recommends WORKDIR /root RUN git clone -b trust-sandbox https://github.com/docker/notary.git RUN cp /root/notary/fixtures/root-ca.crt /usr/local/share/ca-certificates/root-ca.crt RUN update-ca-certificates ENTRYPOINT ["bash"] 6. Save and close the file. 7. Build the testing container. $ docker build -t notarysandbox . Sending build context to Docker daemon 2.048 kB Step 1 : FROM debian:jessie ... Successfully built 5683f17e9d72 ### Build and start up the trust servers In this step, you get the source code for your notary and registry services. Then, you'll use Docker Compose to build and start them on your local system. 1. Change to back to the root of your `notarysandbox` directory. $ cd notarysandbox 2. Clone the `notary` project. $ git clone -b trust-sandbox https://github.com/docker/notary.git 3. Clone the `distribution` project. $ git clone https://github.com/docker/distribution.git 4. Change to the `notary` project directory. $ cd notary The directory contains a `docker-compose` file that you'll use to run a notary server together with a notary signer and the corresponding MySQL databases. The databases store the trust information for an image. 5. Build the server images. $ docker-compose build The first time you run this, the build takes some time. 6. Run the server containers on your local system. $ docker-compose up -d Once the trust services are up, you'll setup a local version of the Docker Registry v2. 7. Change to the `notarysandbox/distribution` directory. 8. Build the `sandboxregistry` server. $ docker build -t sandboxregistry . 9. Start the `sandboxregistry` server running. $ docker run -p 5000:5000 --name sandboxregistry sandboxregistry & ## Playing in the sandbox Now that everything is setup, you can go into your `notarysandbox` container and start testing Docker content trust. ### Start the notarysandbox container In this procedure, you start the `notarysandbox` and link it to the running `notary_notaryserver_1` and `sandboxregistry` containers. The links allow communication among the containers. ``` $ docker run -it -v /var/run/docker.sock:/var/run/docker.sock --link notary_notaryserver_1:notaryserver --link sandboxregistry:sandboxregistry notarysandbox root@0710762bb59a:/# ``` Mounting the `docker.sock` gives the `notarysandbox` access to the `docker` daemon on your host, while storing all the keys and files inside the sandbox container. When you destroy the container, you destroy the "play" keys. ### Test some trust operations Now, you'll pull some images. 1. Download a `docker` image to test with. # docker pull docker/trusttest docker pull docker/trusttest Using default tag: latest latest: Pulling from docker/trusttest b3dbab3810fc: Pull complete a9539b34a6ab: Pull complete Digest: sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a Status: Downloaded newer image for docker/trusttest:latest 2. Tag it to be pushed to our sandbox registry: # docker tag docker/trusttest sandboxregistry:5000/test/trusttest:latest 3. Enable content trust. # export DOCKER_CONTENT_TRUST=1 4. Identify the trust server. # export DOCKER_CONTENT_TRUST_SERVER=https://notaryserver:4443 This step is only necessary because the sandbox is using its own server. Normally, if you are using the Docker Public Hub this step isn't necessary. 5. Pull the test image. # docker pull sandboxregistry:5000/test/trusttest Using default tag: latest no trust data available You see an error, because this content doesn't exist on the `sandboxregistry` yet. 6. Push the trusted image. # docker push sandboxregistry:5000/test/trusttest:latest The push refers to a repository [sandboxregistry:5000/test/trusttest] (len: 1) a9539b34a6ab: Image successfully pushed b3dbab3810fc: Image successfully pushed latest: digest: sha256:1d871dcb16805f0604f10d31260e79c22070b35abc71a3d1e7ee54f1042c8c7c size: 3348 Signing and pushing trust metadata You are about to create a new root signing key passphrase. This passphrase will be used to protect the most sensitive key in your signing system. Please choose a long, complex passphrase and be careful to keep the password and the key file itself secure and backed up. It is highly recommended that you use a password manager to generate the passphrase and keep it safe. There will be no way to recover this key. You can find the key in your config directory. Enter passphrase for new root key with id 8c69e04: Repeat passphrase for new root key with id 8c69e04: Enter passphrase for new repository key with id sandboxregistry:5000/test/trusttest (93c362a): Repeat passphrase for new repository key with id sandboxregistry:5000/test/trusttest (93c362a): Finished initializing "sandboxregistry:5000/test/trusttest" latest: digest: sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a size: 3355 Signing and pushing trust metadata 7. Try pulling the image you just pushed: # docker pull sandboxregistry:5000/test/trusttest Using default tag: latest Pull (1 of 1): sandboxregistry:5000/test/trusttest:latest@sha256:1d871dcb16805f0604f10d31260e79c22070b35abc71a3d1e7ee54f1042c8c7c sha256:1d871dcb16805f0604f10d31260e79c22070b35abc71a3d1e7ee54f1042c8c7c: Pulling from test/trusttest b3dbab3810fc: Already exists a9539b34a6ab: Already exists Digest: sha256:1d871dcb16805f0604f10d31260e79c22070b35abc71a3d1e7ee54f1042c8c7c Status: Downloaded newer image for sandboxregistry:5000/test/trusttest@sha256:1d871dcb16805f0604f10d31260e79c22070b35abc71a3d1e7ee54f1042c8c7c Tagging sandboxregistry:5000/test/trusttest@sha256:1d871dcb16805f0604f10d31260e79c22070b35abc71a3d1e7ee54f1042c8c7c as sandboxregistry:5000/test/trusttest:latest ### Test with malicious images What happens when data is corrupted and you try to pull it when trust is enabled? In this section, you go into the `sandboxregistry` and tamper with some data. Then, you try and pull it. 1. Leave the sandbox container running. 2. Open a new bash terminal from your host into the `sandboxregistry`. $ docker exec -it sandboxregistry bash 296db6068327# 3. Change into the registry storage. You'll need to provide the `sha` you received when you pushed the image. # cd /var/lib/registry/docker/registry/v2/blobs/sha256/aa/aac0c133338db2b18ff054943cee3267fe50c75cdee969aed88b1992539ed042 4. Add malicious data to one of the trusttest layers: # echo "Malicious data" > data 5. Got back to your sandbox terminal. 6. List the trusttest image. # docker images | grep trusttest docker/trusttest latest a9539b34a6ab 7 weeks ago 5.025 MB sandboxregistry:5000/test/trusttest latest a9539b34a6ab 7 weeks ago 5.025 MB sandboxregistry:5000/test/trusttest a9539b34a6ab 7 weeks ago 5.025 MB 7. Remove the `trusttest:latest` image. # docker rmi -f a9539b34a6ab Untagged: docker/trusttest:latest Untagged: sandboxregistry:5000/test/trusttest:latest Untagged: sandboxregistry:5000/test/trusttest@sha256:1d871dcb16805f0604f10d31260e79c22070b35abc71a3d1e7ee54f1042c8c7c Deleted: a9539b34a6aba01d3942605dfe09ab821cd66abf3cf07755b0681f25ad81f675 Deleted: b3dbab3810fc299c21f0894d39a7952b363f14520c2f3d13443c669b63b6aa20 8. Pull the image again. # docker pull sandboxregistry:5000/test/trusttest Using default tag: latest ... b3dbab3810fc: Verifying Checksum a9539b34a6ab: Pulling fs layer filesystem layer verification failed for digest sha256:aac0c133338db2b18ff054943cee3267fe50c75cdee969aed88b1992539ed042 You'll see the the pull did not complete because the trust system was unable to verify the image. ## More play in the sandbox Now, that you have a full Docker content trust sandbox on your local system, feel free to play with it and see how it behaves. If you find any security issues with Docker, feel free to send us an email at .   docker-1.10.3/docs/static_files/000077500000000000000000000000001267010174400164465ustar00rootroot00000000000000docker-1.10.3/docs/static_files/README.md000066400000000000000000000007031267010174400177250ustar00rootroot00000000000000 Static files dir ================ Files you put in /static_files/ will be copied to the web visible /_static/ Be careful not to override pre-existing static files from the template. Generally, layout related files should go in the /theme directory. If you want to add images to your particular documentation page. Just put them next to your .rst source file and reference them relatively. docker-1.10.3/docs/static_files/contributors.png000066400000000000000000000550741267010174400217240ustar00rootroot00000000000000PNG  IHDR8sRGB@IDATx] |ޖ!¾ KPp1-B+mbۿVڠbTP6A B!a 'owf{y/{Bo2۝̹]@':t9s@:t9s@:t9s@:t9s@:t9s@:t9s@@kqZ=(Dnfzq-]Ss@s5in'xH5477Gx~LX0 ?7q** >ّ|hh. E4P)*Up8pԊ M#]5o),xһ|Q#.vQg:(9 sZ] ;{1d DMtkֆP^] jmAjE=ݗEo;\w5F&a%/s %pn:fF> -׸s iv/(+UKۜr(^BnVnꞚ?]UI 7Ss51uEhDc:e+_Vƭ^!gr?t8}0m9 8%\!_R d ]8 #>hS[%-ͧqZCs@;1\&U7'ͶMVQ_7 &6k]٫ԫ]җI>f>' )~.aQ?j[B1QԜw<4옾`oDߠe moKEskUإnp¹`+*yrWEݾ|Nio:O>F"T 4p2-qT!|G>qCTrUj$ N[R"n@I]IӀG|sNl=)au*B_yH.iJ(-/FRpUؒF/F5QUut]@D^xN{ApF Ch$pG퉬V5z/G_-ysذg4ti4k!vdhIex{t0O95_ɱF̗[:{ 8Ux+/6>]IĪp3U^I}Wh-9RsaUXC\V,)]YWR襁ns 8ǫ*__gpstIGntr#o䨩X܋ؿ{'ؘ,ǣc}xi6Zh&.XFX bN:E3'W%/$-1徾[z-D!&D{ۭJMx2Lk~MQ U=(= Fs?0B|R Q.#0Ys'BI2׀ft&؋m,!}UۮѾ+hM!qʑ:]JenW/16vT<>q<WO$S%IH?!WvEd)\s/#)n1r >Xj+[[dwxyֈ*7OC|$T#{(㮅 |+v8SCv}IH=a5W3t26|Gxjh7N ꇘx9etNǟ\i0/Op5 .I!Wso-ەʾ1#n |dfaW#n] K `=/?N#oɑvJ{/M&sw 89I%@e7 x(*@f߳KKl*5~4ZX,{%AcɬxT!0ؓ=1_g&5}!Ff0 s=e{~M1%؟̝+1)!![H^2+OYY{#7r8+p<܈_L{."\%(^/8`vH+39Hym33jҔyUOnmƦCejw-H#O{D+:* B%rמ 2y/=YmG%-[)du7[Y1aZ&\vjc;nbt}/[E)DCvq`fv y9~ I60M  w7v6Xzn& sp*6?v Xbgx53QO{ srPH~EW5iTKkK,ez&^uhftj0#,/;ČH(ΡgQO>߰Hƚ3t 4Al +æmp ((=.݈iSpjkGie˴CZz:A#fѸ:L\gzz[ut2p::tי޶V mn!vyQOA{16f:躛?EN5,رz3,WކQP_q3]΁V@t-ݧw;)I5t,6OIy]XKr:6lkhZrO69Ԩmq$ WR XaS &S{:CVBB`,KNCHf`RތUG<*Ȝi`adkUza-Á&F;h"kFV,qq6qmhNڞ$yQ1*: w+x;- ^'Aie O5To8K*E+ϸ.Gx !xڴiD nݻZ<NP?< &B^Usޝe%ꐯ\Ջ_03s"OsXwyA|pbu#L ?j#pl`bLz"$p $45̭YX`r.+Tt\{yStOVZ4X_GxmоVrkuж: ( j'ϛfZ5W2\!Eq]\_'2!&/PruVz¥D3ĚWg` +kP.Oxt fa3W>7BAi=sv7+3o/\l/Zv_+nv,0o_NFd̙o`;IXzxYnI^w=YD❂DL=D"vqvwd+j]rk0Lw>S]]m yQ?6DA6W~zuդDH@H7J)O*qXʨ/Q^[\5qeػijI+|tHBXLA?)^ `#%E`Ƌ*qRԅfJ)RehJ:;<*)s4t9ޫ3JZ n_:{E)ʢ9}3Dsz,&T"&״pK=.b$WR֧*UH(R&mH(Plz 0tt>$o$eцD=Oҍ %q2ǕTGw{/J"'_P?+)\2Cn"eLIܖHd7p,c?AB8ڧbCVX|g31vC1l#qvzD %&0s['Hx!DŒF0K4`'C`9.NKQ ZL!.߁G+>8dCj`#tޡWGϲ-x@!gM.||\3pj*`)£౑l s1hʻpZg8M0k,axm>/'mp}ꭃ12*Jݏkf"-o E?—u L܏w7F;Vہ1~(Kfc}X25e<Җa7'9jNt\{'Flf%*{py ;?O1rAݳ'Vom z/q7HZ)f2|ʇW"fb=.cLh_m1p2h(RP$n4  ?T˿G9q*?F*A&Qk MY#1zv ?[M-rœ wF-q|w~R a.T*6#| y3>QUd}Ue푪4=6: [Sl ^%#僮 фA=q ^'i9qȫr#"3n8Sڿ֢lk+FIv& tj%Gs(HW5Ry17#n lAQjexc?f%cXJ,yyPm3/Gozq͇< fq8bsPBppXƣ9XZhRޥօ'F-dUmvDp@ٴsGhߢ( BS(_yAlZl6+Nх_Wjc~7 ` X{!"fl8(T} dxEr`ePh?sͰ,(]Ϣ >=Y n>Nn{Aԛnߺz#:\7j=6y."i۫*7|ok ?Enx 9Mѐhn+0^UhW5Zwud7z&cl*7k7aU v% }]}nj=0?kôk߂)~QIM to r[?BXav z a'bOT'i~"j VR0ND<@[J yӀ"y:-}dwȬ42,2vvJ LPd-.p9w}>1:P=3#=^<]<Ǖ_9`#݈I@~sCp@m'}G"\aisr(ޛ_7OK&e5ꬩK{/D5tD;KFcǟr7c" [~s]s@i'i#*̥DU|P;wvaXjH]P ӹ8 }Xi\)PL*=&8EuY>=9Tռ 9bM̫^Kz :JL*98JģHwOy  ,O71b2U͢s^ (7!p UiMt͠NeP`YͥEE]b}Nƌ[Gfp$H,EkPK4ӻYGကWfainQ8/j >*IfKeBisbws < 7}~ھ4&ðQx@=DULrF~YA H:vhU"H,k9Q 1IS ?6d߁/dH%a3Sv$+sJTv]RrV­{i"?20Jbpn<d 5rXkcQlu+Ð,t.Xkߨz =R>\wf R '^lTEUǸ/8فە;8 搪 ?t@ɸ{`ޯ9xCikk, X@/i+>cXGnߡ8sUMN4P#>ؔWN).Ȧ.揀@BX?\twn/GOa8 ]Q`7%E(؎yi5\2 M\Ad7quX{njWK !؋3qJߌqcуk:Uʤ]{ d Z#{DcIlCQqJn(v-,n',ffʹn}h۫cZJ9Z,09YXKвϮpQw;® DU1F٨x ֞`^"+lL&Fwf0]6}k}2!Q<5z}@,(<߽?©alޭp (/X[ R71*Ԗ76BV x}+0LHˎgh 䛋lp1%ދyN,~4FMՆE#6+LVP&Eq/a{2ͥv&Zhj'VMp.V+B>y3lG0q#|8,q&vrCZ8@n.AGC#Lŀ&!cY˩y]D5Žhw:V:yprThCi2m"K .ma2U8v22 ).,^CN"DɗN,lbk-Mɇȓ ߯ɩv9L&,{lDpiI&>چLICߦ߲zhhnߕ_$ŒX?CP(ġHU4 '*n rt8?x9I$%, C JOr&|dE6jh&[17`"]5]#hGjGF:,  z$ڛ}! <(pt+RV.+2J0ORύ֮`6h]ȡw aTٹSc4E4C/&v8|&|&;Z%~UUa&Z=~rтϨ 8':^j*(= ۔9qnDr jlC'.8C3U6Q鹂d{:,mQz8'_ZDzת'Kr]̵Ya2P qtq҈Pj@W0v9D^jTn3oKPdEQ eBp#:"8qmQ /NuHG#K3:~ N 赝 zU /qf:0's,v uP? 5X=SXfɕȅw zVXʊ\U榼Xu_S'.:KXzxpt%=XN'ryҎ}u6o@`1fEKAQ?~Dאn(lt",(BEԑXe|o6(3=Q@p8;hv8%ٟ Hj^!jH-9zW%+XaSl-&Sv8Xw `)qZrŪN)eaȗR/! (9ɖD]]{ óhl+B/G F \ ;*i~IS+e{*-?@5LAW ł0?r&*=VEMo2Իѩ5;[NUCScǻo@D_4Z7l2,A/u +ƣKNUe%8p*O-L!EP*V;دP"NPG ~Eږn]N뢾`o") Լ/us]Ss>1s0RrƩ?!釮[KcY.S{t"?W(5fT~ oѸR1N4ABW:`}z 'Lr9zs0C?#<\ў%` "c%,:AC(mnMB)XuĎL٘uf4>zFsH Ktzvn{r O߯z]?oY#9!@_.WޅQ+jS_AZfסx[e$M6j" DiF=x| V`&hD!v۫Ǟyê?ʱ> 3N:]kpCEwb{̏Бns]1oH@Oz3<#MSxуl02w+h U5Te#-%w]"Ho)PZhp1_̭5]'auL׃>t٩V|(j044/T(Ï9 ]q5L=\<:в=ktyA7x! rQ6GV[~E]su8Qύn P#DM])آafxǂa VyG]Vp4kpnbK"ϕgKm>!z<LxRl>+y 5C_]FaODpUpsN"xM&nҜDci!Zq~_׵Ĩe)pe 랬b菲htl%zV1V*AňPUGl& z Ys*.ߨ{eXֺ;(s^I5Q~h"938|Bqr +N5sbX\UpBmM46'6 Aq8>D(6ӽHm6)P\Adƴ. D,Wm[R!'Cij~b}r|6mkrb)K2`Tme*X>H-x3&UDf5~߂E5*kѺKΝBzOrz^kc*0.GdȡnU(vЯ='UѸJ~r3}OZ<,KN$PN82.L"hw'g{co%zOrV@g\sx7UP2 SE&$kPi}g$[ڼMCm ^svJ3pH܇d:52^1٧]{uF'׹shNf;=2 35(Ĥh3~C^]CFǶ!9k엤_1[}<~FN2)ST%O/pW+eqZRj_`_:_V+3ou%Uߟ);h@;xw4'&wNj*fMxwMN QB7=c6:vyQogz *{ཀྵ5lI~uٷ8{jycJl깇c{^Uo9vZ)VU,صj&M 6qSL1Ŧ}HԳ _"TS2e(W> )AfՔN߭5D^BaQ[;BY캒Ƚ ?»-Ù?^W=ˊr㦭wbK~w^l"L,tC5;[깉8mFP4;V9h^8Y\_%<'L/a2!,=8'-f{\{;GJjjjEMvMlWV%Ll\ϧڇD\WIyʯay -ص9oS%{UUa\K,䚩3vtQS!OWɰ52=t\0o'o~s,*Eg#^OZS[u18EfH'r{~dڻ3Ӱ%re+Ûaםxrq#35S`(+Fq i! nxT4&[!~v(=+gbl=xLuHɥ~:!n# L4z ,>h=N%KG.5`z -Pd 5w%uraPrmf'C͌ QT,&M6;aT oP$\Sb7ɚ?Zhc5Lr=Ubg|gk|}~ =|w䁴,.-cOGb"B, Gz++w@,8p9T:bgcdhk&Ⱥ:uݙV9(ɢփ%D\:v򄂑\('f+o ιE_v·?9fQDu;ӝE {y6P-B^0g(oFR9) i՜iX2y 9͵;Y2J;]䐥H"Vӛa 4{aׁBW \{&0\a(Ƚg7ys̲74TqJ}"XQ)*鴳g5$.dDMI\& v?_ߑa2Q -|M6#W.|Y.<\#hoJ.i}} C_PCBAY6|,%H._᪙7?#;Xx/=kWt嗍拾6:.7Ig%9A_J>lF zt8 .x#2;@xqԕI՗2 jsB.lӸ7x0װŨ!ls 5? &?# ޺K6ŷШr󞙶.E6;ȇn ?lbKFIy3stԕ:+EAr[ٹ&h0Q7g \ralhsp󙣬Quв"*\G㔐bmJ?i%;#|tՖ_ŘWVxe7ZF} *׷%VlОDYp$\PY`o,v"5G? si`G};`7}%$TEp'@3pƢ)ԎL#Kk:/} = z̒ұ,"\ٶحbGE=gp]QG,yq~\#*Xl"zd=&Y?#KQ>vϫ<7 5LuU:N`^*kCmL QC泙0x!QJ1D#Zśw&P`7 FCOR*tV̌e F.{D ?Jҿ}# 'DD>|6r IjSyxþIeF⻐ Ӈo^ΛBu 89 _:*RN?#V`d{0NO{^̥\!$ۨک}xK&uOVZ_ƻ߇ ֻ?"ܴƨ/5mxs7pvv"#;ɢkl\jZqaetNJ2n*`P3SjO$o:a,q*B͇Xrb1xn'PQGOCҏh^󘊃رh>LR'*Jun WB{9Vj9h:Bm+R$.6<9+ԭJevM0^" ʝ_nUiHiS!|CJt]=qا8(#(?uE`͝ï8nvE~5GN0P90ۮV)ߦő"wH&NudƊu`jkDfFnvi0Rjޠ ~6!<뀖ܠ<-eHEp`UQ"7 Y6;2^ ֣w튧':_-9 ADТ 6"gFOh2hbkFf 8RQL3xX$Y7i^eUڈZ҄jˮ\-O 9Q0^)x8x/zC<鳦jlk |B#IUkkwTNlrB9 Ay']٭bu/]'^@kru[s؈,j K#}赜ߵ5)ވZj 3}& A[ .6h!E)CyN` ^&ӱ%p$'̌2NTDQ ZliSmm䭹2,mN=;\Y)Ek0W^Z]ܿ\!`z1lQuNԉSј< qٻD`a6f0 Y_ rQ+A@xDs>ΎYFǔƲ2k8QHpY HwJm;VD?9Ъhu'3܇q}JPKԡgSBы+zW9goЧO_`@/Wر'/5#U8_hCd~6$"}(@|6Um&5YL663[ 1\ʚlO]΁Ko.v>z!ik-XS/)}w:IrYH" hc4\ ]a ʇ_ hS˔sQ*ܚQy\oHI0<:ZD}ᇹl翤XD.=T$3|d,-Cc33_^áO`s8q͌`=D+ڜM*&B%H@mYvmeyh,.J2dܩԫʼnp!W%}]AFQ9j)Ǝi'kRaaW(mmouu&GEKQKn2:0bӛHie0RMΘu:.5. N}wz_aaC52  .-[GuvE9Μ> |/a:<xRn-ܐV Le*GKͰ!nطԌQ=M\2ߏݼ`%kOu꫐̪µo@8 f:=ٝYz*}Bik 2+a N _ErͼF2:ZXDU(APSwq!_NM)D0.K׌3z[!V] A!]d/ܠ`_;|͐Ռjeي1t;|;Cti:5bzz5p 8u'OXʑkHmr0oJTB _z DxjNdEv\\&HjD^1x#T۶Q~gmŊZ:ڄ~ |w2'_~ dh mU\;c5Y{N `\<µ |)hBܓܲphéY1"qXz~1y"k-0|刉 A@?9yw3-L0f/jJ 4| r\nYylG+⺠uU' nITXfڻB]}_Dk?-AWt췦DJ^b,6h.ʵ2W)C0m^6ZY+WF/N@8Цܰ)S|B* 7Kk}n)zn=Yi!eS,C&8Ȥ@x/ig 'ȋ{H45j[30]ۗ˨3kj(n߳NV^ԏtm4*-]G!w'$&ɣ0Gs_.zbo39U8SqEcBYYl5\_zܛ:~(u1NshsN~ӤIF^.W݄=!>Mw6G3c?jRLhm|]U54ZA3<}3wMytGox^ԏupP(c~}0Z/NbiZ֖sWJ m /V"9mrXBñdbeEѝ$]M;^[7vKK[lO؞ChjWe|{l&100"oLb3R1ҧ†o@Uܻ"K\Yr$G +U`zݕ+ỴM%S?F7+/G:'ԟn[ w3a4B&Vi ƭ#i~oK:т\M C3b2+΁ρZ8r؛DȎ8ݵb3edtWB"EЭdwz1] a E1):xzczUZN sJOF\<ҽ'<E͵_{4lm|!{`YVU|ل93Bm⋞΁́v)\ >q}TZ@>±\iO?W_B!]@{7?[M[.٭Lmks%9Ю0)S jl1.W?8cFQ rVq~'.Sk>u6$D{FH~ssp 8k1mڜ5ٛV{:n("E+źD n1`ߙyt]!F4l=A?0u("ӅSy#K( `lebX_aƬ۳؛2H&Ŧji~ Ůfze9sm0}czԏutft8zL +tWymko$fנ8t5i,g\{sp0U\{jfF:Z*_,瞀oIU;o6r[iͩn |Uj`YwzKw~CֽC{H ztE_E73K_5P;Gi*#8ffpV . 4' Swpf*ڲ5Ds@瀋5Z]w;~ʔ)+  ]oMih3& 3-]he%0PR y(c@EpCtŠ6ro3uh@pU 1~<Æ3uZ!lXV*e7oz`ojjj9s@ErS x[`D!P1c0W 4RjrȀs2G^V/p}xxYt9s@:t9s@:t9s@:t9s@:t9s@:t9s@:t9s@:t9s@:t44IǺ<IENDB`docker-1.10.3/docs/static_files/docker-logo-compressed.png000066400000000000000000000115541267010174400235310ustar00rootroot00000000000000PNG  IHDR '3WfPLTE9MT9MT9MT9MT9MT9MT9MT9MT9MT$ +dt|3j|&-8MT9Xb{Aυhj tRNSd)Bj"IDATx1 0EA؝E@H14Sb.fK[ aKLFva0-a0ڎz'a01a0@ €@a@ 0 €@пX MC6Q<\C)}q+t| VG<  _Y+G /{4974)Չ>aIp,my~Xni[ Ɣ1֔02p Z+q>cNG@Z4#U]Z FQv`hY1, gɠNPX YSw y\'‚ #.lmPm6``#OC_3/PV*,܎#㯁 d`,^u.' &˔/ dS)9~``,f.0ݔb%E0=J 9 B.cUnA2qBZN<>wvmMgڇ J;3j"C6=gsaR2ڞߚw1^ EM#^:wmrm]7`XSt,tkſG  )CrUGj91Gj9vO`긘XzWpȂ 4%)Od,׾-;qA* 4pqOf?P*a_ SIvn޹m adrʅPB}bmk Hو|{e&A"fb)taIld"p -X|37#HtΉH0r$0|LrԒh4R'5)2g?`9fQflLV0 pv0lNZZtOUw[!;YȂ#K&QKCӅ\| #dw0CvoCGk,GlbdbT [F$ML0LDR=@ 7^*@v}Vb*7oj%LD-t]Qo!ƭg8 '͹&@+{%3f!^x/?"4`|=o41쮠V;T1,](Qʾ>liȶd<7}`Bb# G^&<}V J p62!P+W[h) FR/vZ1! ERٞpN,qwuA.,6-ܣ:/TpYa1p+2 /LM s <o"jbvaj纤øw ">f`8x 2,T) \[^l,׆/bH2iIF PKea7܋0/lRsI@'q;߷iƩpEdVk#139 ۢ!jSD/Ҡ"tDN"rU _W[^7u72{unfl:;8&Q*[Y*TH |vfo+/OȍS2+p_W^sgl"9/Flƍw6E/Е (e,35kX.T&]NkX&S\YʬhNI ޻6B=CG& ft57 bkz`h+ԍ:T!|6ihZa%߆e|EH1<55,Ē8]5>lQn"hVrḉB)V&CqQEnncLccTZ P(DTcn^ffXH24N2Bɴx\ f汯\GƩkQn;G֝5!;A(p*LumsJjmL}E `*lkl IENDB`docker-1.10.3/docs/static_files/docker_pull_chart.png000066400000000000000000000160241267010174400226430ustar00rootroot00000000000000PNG  IHDRSH(PLTELiq  04:.47.2626= 4:@OW^ HNWIRZ GMT15:06: /48EMT    15:   /38x"    {o{ U^g=CIo{'+/  ՟|IPXR[cbmv/38DZy #7=C%(+49? KT\lwu_hrFMT27;[eoep{r~*.2:@F,15@GMNW_"&(htXbkBIQ"&S\eGNV^7tRNS @s(_K8k S,o0wWGϛ[$g4OC<ۃc{բRzurȁɲ ZIDATxoi'I&MMcQZR]RvY{xx/7 q7=>sqi&i /#cK-ª+ǭ 7}®CL@d=_8Y:}6 Ušo=i)O$q}< aI K.lxy\/8xg(~t"G "`A sLAaqzv˾PLakIޗb#!}{*P}q7Pp@ռ0*>(-9#0`o@M߭rYP*nq6 L}=AXP2Iִ t7]H.Qx6oqJ ~.V =c=@Use% k?SA׷T۽V8wPP_n-Ա/4 4n"7A T <-ϙȂ#U#ۤϔȂ~_8rD$*Z (>țhy١}[hM<̾ G>@źtƺ# ç+*jsd[N \s TZZ@U>7PE繲:'Dܷ}cmONa]2ۄ%t OU6d@G|Uš:›S5–~^8$J@I>+lj SF4ۄMMO7Śݡ=+JNzY|/9)}$|t=G$9Y׮Ѝ$;P `~9:?mD`+~.!w=A&"b߻4(r聖ЖVrqPpY$y}blj%G~|BncTęj?r $Y|3j!jb3ܭXkD6|b-9\q T%=p9 2IK*6I-\'E褶An5Gp%:msyR{;B*N`m'UQE0Ir\x']\UXͱ"N0k9 U !GN#`i'U"DXBŝT!W`&PB*o>3mțO!ŝT!W@H']SȑX`q'UNŃ܍೿s=[DmJ8rAᒡo![$*lUCQ٪H'UY餋 98+喇:}J:C-÷_x']|\}9*b)N@, 2S-:Bne*zز*"vRIreYIr\wR !*Nj%*Nj%’`*"Ir\wRW+:B+7)j(Z*ҐkFQA, JrUIrA܊ 5c'Uȍd1UIrw""-$xe'UX^:z&-jr]7WuReqB4ލIO!>ነ~Z!$ z28p!I8Ku[!WǽdC܉Stk_WE'**&6͙r˒8IrvljgY!"+H"iWe*o .EOZQNB -> F岈56Tdz ㊠'Mnz!g^<^ +P°z9"0zCn86&@@n`VT4 *=ЛC+B}=ЛC-_ys\*\cOtA.;VQy?> r9CF ![7*VpD~rdA'Uȕp- 9F&;B+±ŝT!!37WN`q'Uh'UqEȪ**v9@"h'亙(N`E'UqE*"܊ŝT!(N;ȍp N+ WG=Nkr!FwRʫ!;BCdi'UqQ3Ir\l 9;B+ŝT!!*" 9F;BbfE0IrK*N[ͦ;B"/%T!ŝT!60*ŝT!wT!G1cUMvRyĸ" 9qE(Ir$ZIrص*" 9vRWT!=N`*"XIr\wR;"< H|rEklbj ^ep(&>IzM>$̧CWaq%)kxwKr䜋)麒\ WBέq_(B裾 \+REΘɍ~ gR5S1ao)"/mV؝wf?G6U1_Sc3sʭOYE7r{ #WQZGҕ+ڶt0 v{r mgsv(KVAFovŔVQMU~\v  buƶ6^-?Xf"YAlehgU1N,@7݅8Xu-c^ :S]΀F/]*0{PQjsLҗE~bARuU'AS:L^/M,y<~z3h^nyx|Y0[/FIJy7WdXZnp,~|A.y€,yIEɄ|jZYaSBI 亍_ rxuD :#BEn~ZiD@.HI% C#QKztp{u^/fK+bB|0S))WY\߂*` au]J4Ʈ̑J4PyQGM PPZw.-Qp0|LkPo S:BaRC5HC?<6L) а-2)BvȌQePW,%2K~A?z}2o{trwoqz;?A2VXTF A 0 eGC4MREp!|Y'A9и3m"mJ);A+~CwtGT>NO 5{YPL)׺aCr rd gDef'2 0 O9ޕ ڕisthŅyM)§ZaD##1"Sc2x|AE>'&ȣlgJ6&4$?rC1P2} G{YC8mT/lAGXaxMZP,3! ΆVLTp-"nDhX O}(Ae#e,hF{s^P ~kE'R;`l&GX]5G%;r۵ɺ]Qsm#hb9lW?apW#Strآ@ѩKr4`nɒFX*$μ6jȒ@_S fၘ ;/TZ\x+݂oAL[7߂w}_%8HHZRYB4M4 D{SF~B@Ԓ54t^{\>-<E"G"ATBhpB,ǑHu Q׷#BbőjiH;W˂lai̢tIծY=Կ'Y[hGw^YMiBwծXtVwYSJ k KVVuSik`&i}9c}IENDB`docker-1.10.3/docs/static_files/docker_push_chart.png000066400000000000000000000207741267010174400226550ustar00rootroot00000000000000PNG  IHDRP PLTELiq  04:.47.2626= 4:@OW^ HNWIRZ GMT15:06: /48EMT    15:   /38x"    {o{ U^go{'+/=CI  ս%(+KT\ #49?IPX7=C27;_hrlwy BIQep{|@GMFMT "&bmvR[c,15NW_uXbk/38[eo:@F"&(*.2htr~S\eGNVtRNS @s(K_k S8,0oWwGOgC4[$<{cuRzrȁɲ5D#NIDATxoi'MliMMh)]ضJb q,iHq#H } g؞q'N>:Gi:-xdt=QcgTɒ槯ΣآP$I$I$I$I*omqFK}çEAԻjYP50ݑr~cԋzLخ u $=YyVD%Ms!%wIma&{Fc(,5R]ataE[䳶6§eyr|4SpWV5JXo9>3\$,j גЈ;O=-lR4rEЀN ;\u6 +ܹϟIs 3'}:#>聓aEűVQqJd_-{.aɁcсx@44^mvDVde»M xGߴeO>Ŕ2c*S3+1`[Yn{m"<;bf-xazŒǶy;XrѬ'1bӺSfivӯ醁P)n*Ubވf[4OLa=A}mAgPݼģ&QO9.na%_^otf(8ĿK'̅ǣK[z nSZ `>@ NxANG/(naRE?oG'3BQp!dZBVet iduPŐn%仡s3,*J'ҋ%>غbߗo7d)T-* g om*b)djGVbP=F*Wi}u UE^=,wuZ( ,6ٳם633>`31HCPZUjz?uVam);[ fyoi v`ɚ1m;;ro/ش;{Hb*DNO2q*hѽ![%(pGю p~ĸHzG>7_%<@s;Zv@ ǂ,؞UPn8]J;ʖQٻpݱK~G*'٥Q]u&_ wn?cumtpm9dq){ׁ([]J+"Z]J;Y(icL}UJJ\,S,esJRJ(%RڪRJ#ek.%$GR$%]K*JYVY%rJ(eIePSUB)x"F)}4D(e O?Dds*ɕVI J(%RZRJ$%ܥdHfd W(K*ɑ%R5V LcUB)UV ,jJyPtR*IE(!o#(%UJdJYv*ɑQRJI*Q %R~JYȍpw]/%INsu0+VFʴN;05LiRV$ vmIM]S%k)elERrg%"uL&JS*bQd_=<#;#751ȚJyw"R,ښr;x|O lMҼےr'4og l]Ҽƃm˝4F>X%uҼ)?ǃvF3AUJn'TIʚ4Ppzs)d2;2~RP%4Hpa~yDeHnT')ͻ-G;濮J&;)`WArIHgR˝'4[ĻZU qJJ^KNߣM4{)l1&e u#[`U@JV )Y%d藒U_JV)OJr%e$WRn\Jr%e=;)X%*ahghUR{Jf匤$C)J6R`G2FYHY*F)ϱ)F)/ F)Y% d蓒USJVz)Y%d藒UUʂVIlW.wX%tu)gR Wcx2}e+[grŭERfmc.?) uh/b@Y@DAe?}NslG,9xF5s8`Թ wɲe.l[:`lYyu2FmѲ'G xS٣2vѦlAQpceGGe}Q9d3xkZvx9{ a}ihV퍰W;Y7(4 y.מ[7 zDOQ׮N6_jZ-> 1IߎU#g4U6؈uG[sܫm}8N!i$dt_!M*Ii+cɛ&sgoɯMެaea nz˱T9ٜ-i5z݃Tn@.Tڪ*޸𶫋8Wo(,"Yz'$5dqVYKEKġf V,t9QH{gS8{8Y-?[n*os_3V-2KVsNdkGL"yVI)tbm?YK,Oqr DND$*USR K Ax̺[`G[4h2ksӊ/Awi_oaM oS˭Mr;&F*Bιs%37^3(o$M^}Md.aqdV~x-F|˫MW!$*odd@ #o/PކK 5!]w ,媳CoBh;&3l }lL~KqpznAAu@MǺ T̸wrtQVNM ̢ckoSߴԲaIkock%~թnWYgd]o颲I)Je DXR `PtCOL0==#oJjIbxjѹ Gޮď~7 ׫aWslrE{dhW.dz-ktP53Z6it;m-x)tN{ Rr*l61jIX*K֓?I ]~Z`{K?ܛ9 ~%'K?f<_ ,'~kGt_B]67{[k0{z }'ޚgJaQ֯˼RWmv;&cRT$͟GyC'0EN{a$}o#]` Svy ox3J%6.%\x3yO]f+mv!^n ;E $[ò k} Hiaq}̈́\y+Ύ£SMrVr9HwJ.X臥:靮D*}ղ"g{%[s`iTđ.އ6+R}oަ6vG}R{^# v;%[,e(xmTdGZ3]Vksd~y҆䌴-eHOuĘ^E/qvHFWOõl͗m#n%OcvEї[fX^ǻxMrKO&No񜝌!eJ,.D)G}/K6X~%ݷC&\z:"ki..ArRgo 9N%b'%bo#ܕ% B4jJCzTL,6ұ@OXRzmLoWVD<O J jX :[Uv=h- ={(ki˲,Aw ˶a]<t-5 KM|e.TJ,eS5<:v+_RhoYLUbɠy?IENDB`docker-1.10.3/docs/static_files/dockerlogo-v.png000066400000000000000000000227061267010174400215560ustar00rootroot00000000000000PNG  IHDRenhPLTE ڿ$9MTƼ+et3j{}&ety-DW]4hzzZjpO`fձo}|)/%5ap+dv'1u|8T]5QZl$dz+($p[qw2Uat+]m,2n6Zg-2Ye#" p0|  5S\AV]'`s Rhn).Yg ht.^m|l,aqۇHx v$}`u{ƍ LahsCW^%k'j~Qֺ2Wb  cyD7ˆᚱ"w6R[0(ex` Vkqyi*^J_f%j}y6\g7T^4cq2Yd._lt$q搧!o'bu~n<zr 2WcTʃl-ew"w/fxI}]syAThtRNS8v{"IDATxӱHasX 1@ 1@ 1@ 1@ @ 1@ 1@ 1@ 1@ 1@ 1@ 1@ 1@ 1@ 1@ 1@ 1@ 1@ 1@ 1@ 11@ 1@ 1@ 1`ӱ 0Q\0" 6ËfpHM7> ]1ujq C`!0o`|H`v.0 Mhu{D֌Ȉ^$` 0 C` 0 C`!0 C`` 0 C` 0 C8ޙf֬Ǘim8N!BcI`0`6&30Ӏ-Ye(L_@o]nZJ.Z}Q}IY2J F9G'/!@1=(# 8A0쒍0Iaerc YD\ a n6¸Jra8c ҩicF"X:aLJt Pƣai! @5"2$ux&o_Oa˓ד_/''/joo6)0F="⇋10 VЮqDGOa l0Hmg~ (a@°- cD\QЄqKa@p2 áX*€aFw2"€lQМ{pm àP[0 akCR2j2FBglL_a@¨SgwS Y6€̅a7a1Ѻ)a@ˆIŝѭ:€I n-a@¨0Nt@0uؽtKG0\-Q0 SaPh 92N!&Ҥ0 CaTEޱ)ad0+0v87saG!'3a0+g- @&q(f, @MAB$` Cr:>^Ux{u|燯[ '~ކϷԫa=}Q?Z" aF݋~ECb,V& heӵű'+ g ?8&t0^Ћͧ>Z6 }ˇ:05!8w}mnhYT- 0%85)wXal cvpqN1Ƴ0RS|g޿5.Qja Q#6]H @+JSKa Wvwa qןj>WakO0uG-wj" y\3otda8[._iy#U)@%d$F<;a Bsv?i @Jl0%,S-tS@/LKR7Raخ?v.aT!_= #0DL _*1 0$T{. )l#  Y> Y!u=R 0Dw Q 0Dt(2OVMz<{N+°KtWIߺ՞ t,vG f4>7^ҺwW){2FLpkE9aTEoO^@3 O9#mJLthlDSYpmŇIo~>Koz09kREG?5Zݾ9 ZsId#Ḵe_aUW迴]ˬ& R߀&m:C&!7b0lw.n n]uI3oR$L|~9'7fOf>+&+Y~ 27f+Ba8oabnks$ ,gfZ7Bbv%IWv1Aa?|]E.7R5KF*&F a8t n&r/._D-Ջ0%b._O*7R8./Jpfb'q\T{ B~/pf%0\7K8.*/>Yj.*5W^Y}G~Q4 T^-ыy&{OAg(, sEnSs-Ǜgf_;H! o-!j;_92F3.(ogóuRoȟ#fFX5?1vAȢM1E؄[" ^1kRPѼ,n(7/7̼ckR^ˏ*6v|@X\a0ҍ6thx)_EG/ ),|"1$w )|dQX+q"х##.ʛX5>aD/}aS"c׌)mla~dL0V?MWQ=] G+Ôߏ׉KYp{Q0Hau)+ }xq7o)E Rm,D]0 e,Hvm@ֲ2SXg1/%(Jfr~QYY&}At0 &[tfMw 5X_ܐZsU1/̷u¨OjR#cj #,0IXF_&;/dUsMS(Lо6T0#ƞX>Jx0eIg&A Fhr  _c Y(v Q/W.T<6VMx5;ub@*Zo(2ZKQQ2KwuQuxa(ェ[K,ڣzQnp:}ǪG Ojaa+ ֥{Ƕa{*?-Kz@3:}uqJ݃~)anСE7<|l6;]j0UsG#"F8.F8 ޻҄h(f=PY$(Q@ #.|@5qCc~XY`Fd8u0@؝f.`z(z heNX)yi/ms,xGk^X46g`4 +olMS m1PƟݾa@+4O}0 -abx=Oa\>eq|(9F耰ߪ,6 i|}KX!nWLIܻW1,0M%/y9u*/X>zEmUP a0mhsS_>NŖx`J0[J]POh0=}tF72,> 1yCHQ9J1ҠT>GRtHxT~ta@Kiu_u(N  i%koxNh`ƨašy$zUj,ha*/9;|pRydBZCdpQ ö4#{wF `x`b_uK6 طter 8 8qX AnMevzB۹LN-Z^BΑm.akHk_1?q0]a0@ a0@0@=>r8 z]0귷 ??֥<azSCƥZX"#A|+CI0000FB@@@ aaaac$! ! ! !000F7 >]2k}˽Y ə^cS,+&$Fڳ_nAgqHrm#~j&~qİxԋԞneB]lnR64z/ ?.KG{է=uqxyt]1fS뉾%<-{}aR 6^8sC5AѰX ͉vuEO881B6(8G ^hN0Um)@UMQ"hNS@ObĐc,~#dMc$bD=.5KjpYbHK<#j  hf yRSB ~@%LY^.ۗSZc<$~D]cR[F?ЄRG1/1dZ C^J3׏D2ʤi2/^ț{;<їםw{5^1ᑾ(II4:kmܬNa5-ZƔ zۘ }Լg=|IyQQ K ~ȭkxDŽQ5_۱~-pz(M^&&l}eq b|oLQ\vhj\)[$@ = }+cS,1B 3G’} w_b/ qGSl,r"d|3yڵņ@ o6]{N,kV[b<@ɔŨS[K x^ t{zYbx/CbKFxAzl1E%aDBE幍P,iGv cyܜo *I18/ڝ<^I%qgbx0R(Q~(I127׊b~!FX{\5y<ӉjfhbinM;ç3Z Kyfh&FH.Ecy/p_R5UN_.1n!Pkpw#Mq16bzt^utTKˤvV7L*q1`̓W+ߘH-1\$a1 q.s ;scLL*%<0a' U^=A/7NK #xݠ|8a1(/N /r-.1ĊLpwN_ 1"7q''K̸|Xb]\b *ަ]bn۵۵vhBC)İbf_2Q]%ƫzA?ZM5c( h;bM ޡbA;^ ƌtV \%F\(&r=Ypj5kcS/1^b$k.'Q6l%oct+>w|.+a=}GUKJ<ٺ_4CgyY13Y.h_?N'O]^ :_5xgX/ s'L_ӗj;*%[nFrz? ./aFoQK2U ۙ5IN hsYvT6͘+?~Z~M/*%ƻ)YQ _ߊRtT5x+x^R!!F?m\b_7 FܩΜVAT/1>eNof>f~x3lFD.1ގ(÷s"`Ohk*25tJˎo:XO|QŎiK06W .1 ~aS{U_u1m,X3,2%ƫq1 NĊK Ҭ͠?Ve;d0KXjrCy |Tc_mv-1^\>(2ΛBh^z2x3x~txoa  [/5ݤzE՚d6+^,1^B(纵9?{;7gk;>e%eeft=KXb="q/'Bk;YObQfH)Q373#+G{6c7DvKQ1p7I3V*JR \@bP¬]x% xvYx22q{S]b NwhJYv6:ClZbS5kaE.4U땒Sͼ 7EƺH}a`!3J]b|-1 ǸP3^[[i5ёA^$! xmxтWR%ne+q{ѧ-z=ۜ42h]Yd?"*?G #u ikl.} CE~Tlsoךg1{wut=&%D06;nM>n%p%Ñ(YcOa<pdi 6&OMV,1> ڟz.@ 1@ 1@ xb b b b b b bb b b bb b b b b b b b b b  b b b `T8jIENDB`docker-1.10.3/docs/touch-up.sh000077500000000000000000000006201267010174400160760ustar00rootroot00000000000000#!/bin/bash -e # Sed to process GitHub Markdown # 1-2 Remove comment code from metadata block # for i in ls -l /docs/content/* do # Line breaks are important if [ -d $i ] # Spaces are important then y=${i##*/} find $i -type f -name "*.md" -exec sed -i.old \ -e '/^/g' \ -e '/^/g' {} \; fi done docker-1.10.3/docs/understanding-docker.md000066400000000000000000000321031267010174400204300ustar00rootroot00000000000000 # Understand the architecture **What is Docker?** Docker is an open platform for developing, shipping, and running applications. Docker is designed to deliver your applications faster. With Docker you can separate your applications from your infrastructure and treat your infrastructure like a managed application. Docker helps you ship code faster, test faster, deploy faster, and shorten the cycle between writing code and running code. Docker does this by combining kernel containerization features with workflows and tooling that help you manage and deploy your applications. At its core, Docker provides a way to run almost any application securely isolated in a container. The isolation and security allow you to run many containers simultaneously on your host. The lightweight nature of containers, which run without the extra load of a hypervisor, means you can get more out of your hardware. Surrounding the container is tooling and a platform which can help you in several ways: * getting your applications (and supporting components) into Docker containers * distributing and shipping those containers to your teams for further development and testing * deploying those applications to your production environment, whether it is in a local data center or the Cloud. ## What can I use Docker for? *Faster delivery of your applications* Docker is perfect for helping you with the development lifecycle. Docker allows your developers to develop on local containers that contain your applications and services. It can then integrate into a continuous integration and deployment workflow. For example, your developers write code locally and share their development stack via Docker with their colleagues. When they are ready, they push their code and the stack they are developing onto a test environment and execute any required tests. From the testing environment, you can then push the Docker images into production and deploy your code. *Deploying and scaling more easily* Docker's container-based platform allows for highly portable workloads. Docker containers can run on a developer's local host, on physical or virtual machines in a data center, or in the Cloud. Docker's portability and lightweight nature also make dynamically managing workloads easy. You can use Docker to quickly scale up or tear down applications and services. Docker's speed means that scaling can be near real time. *Achieving higher density and running more workloads* Docker is lightweight and fast. It provides a viable, cost-effective alternative to hypervisor-based virtual machines. This is especially useful in high density environments: for example, building your own Cloud or Platform-as-a-Service. But it is also useful for small and medium deployments where you want to get more out of the resources you have. ## What are the major Docker components? Docker has two major components: * Docker: the open source containerization platform. * [Docker Hub](https://hub.docker.com): our Software-as-a-Service platform for sharing and managing Docker containers. > **Note:** Docker is licensed under the open source Apache 2.0 license. ## What is Docker's architecture? Docker uses a client-server architecture. The Docker *client* talks to the Docker *daemon*, which does the heavy lifting of building, running, and distributing your Docker containers. Both the Docker client and the daemon *can* run on the same system, or you can connect a Docker client to a remote Docker daemon. The Docker client and daemon communicate via sockets or through a RESTful API. ![Docker Architecture Diagram](article-img/architecture.svg) ### The Docker daemon As shown in the diagram above, the Docker daemon runs on a host machine. The user does not directly interact with the daemon, but instead through the Docker client. ### The Docker client The Docker client, in the form of the `docker` binary, is the primary user interface to Docker. It accepts commands from the user and communicates back and forth with a Docker daemon. ### Inside Docker To understand Docker's internals, you need to know about three components: * Docker images. * Docker registries. * Docker containers. #### Docker images A Docker image is a read-only template. For example, an image could contain an Ubuntu operating system with Apache and your web application installed. Images are used to create Docker containers. Docker provides a simple way to build new images or update existing images, or you can download Docker images that other people have already created. Docker images are the **build** component of Docker. #### Docker registries Docker registries hold images. These are public or private stores from which you upload or download images. The public Docker registry is provided with the [Docker Hub](http://hub.docker.com). It serves a huge collection of existing images for your use. These can be images you create yourself or you can use images that others have previously created. Docker registries are the **distribution** component of Docker. #### Docker containers Docker containers are similar to a directory. A Docker container holds everything that is needed for an application to run. Each container is created from a Docker image. Docker containers can be run, started, stopped, moved, and deleted. Each container is an isolated and secure application platform. Docker containers are the **run** component of Docker. ## So how does Docker work? So far, we've learned that: 1. You can build Docker images that hold your applications. 2. You can create Docker containers from those Docker images to run your applications. 3. You can share those Docker images via [Docker Hub](https://hub.docker.com) or your own registry. Let's look at how these elements combine together to make Docker work. ### How does a Docker image work? We've already seen that Docker images are read-only templates from which Docker containers are launched. Each image consists of a series of layers. Docker makes use of [union file systems](http://en.wikipedia.org/wiki/UnionFS) to combine these layers into a single image. Union file systems allow files and directories of separate file systems, known as branches, to be transparently overlaid, forming a single coherent file system. One of the reasons Docker is so lightweight is because of these layers. When you change a Docker image—for example, update an application to a new version— a new layer gets built. Thus, rather than replacing the whole image or entirely rebuilding, as you may do with a virtual machine, only that layer is added or updated. Now you don't need to distribute a whole new image, just the update, making distributing Docker images faster and simpler. Every image starts from a base image, for example `ubuntu`, a base Ubuntu image, or `fedora`, a base Fedora image. You can also use images of your own as the basis for a new image, for example if you have a base Apache image you could use this as the base of all your web application images. > **Note:** Docker usually gets these base images from > [Docker Hub](https://hub.docker.com). Docker images are then built from these base images using a simple, descriptive set of steps we call *instructions*. Each instruction creates a new layer in our image. Instructions include actions like: * Run a command. * Add a file or directory. * Create an environment variable. * What process to run when launching a container from this image. These instructions are stored in a file called a `Dockerfile`. Docker reads this `Dockerfile` when you request a build of an image, executes the instructions, and returns a final image. ### How does a Docker registry work? The Docker registry is the store for your Docker images. Once you build a Docker image you can *push* it to a public registry such as the one provided by [Docker Hub](https://hub.docker.com) or to your own registry running behind your firewall. Using the Docker client, you can search for already published images and then pull them down to your Docker host to build containers from them. [Docker Hub](https://hub.docker.com) provides both public and private storage for images. Public storage is searchable and can be downloaded by anyone. Private storage is excluded from search results and only you and your users can pull images down and use them to build containers. You can [sign up for a storage plan here](https://hub.docker.com/plans). ### How does a container work? A container consists of an operating system, user-added files, and meta-data. As we've seen, each container is built from an image. That image tells Docker what the container holds, what process to run when the container is launched, and a variety of other configuration data. The Docker image is read-only. When Docker runs a container from an image, it adds a read-write layer on top of the image (using a union file system as we saw earlier) in which your application can then run. ### What happens when you run a container? Either by using the `docker` binary or via the API, the Docker client tells the Docker daemon to run a container. $ docker run -i -t ubuntu /bin/bash Let's break down this command. The Docker client is launched using the `docker` binary with the `run` option telling it to launch a new container. The bare minimum the Docker client needs to tell the Docker daemon to run the container is: * What Docker image to build the container from, here `ubuntu`, a base Ubuntu image; * The command you want to run inside the container when it is launched, here `/bin/bash`, to start the Bash shell inside the new container. So what happens under the hood when we run this command? In order, Docker does the following: - **Pulls the `ubuntu` image:** Docker checks for the presence of the `ubuntu` image and, if it doesn't exist locally on the host, then Docker downloads it from [Docker Hub](https://hub.docker.com). If the image already exists, then Docker uses it for the new container. - **Creates a new container:** Once Docker has the image, it uses it to create a container. - **Allocates a filesystem and mounts a read-write _layer_:** The container is created in the file system and a read-write layer is added to the image. - **Allocates a network / bridge interface:** Creates a network interface that allows the Docker container to talk to the local host. - **Sets up an IP address:** Finds and attaches an available IP address from a pool. - **Executes a process that you specify:** Runs your application, and; - **Captures and provides application output:** Connects and logs standard input, outputs and errors for you to see how your application is running. You now have a running container! From here you can manage your container, interact with your application and then, when finished, stop and remove your container. ## The underlying technology Docker is written in Go and makes use of several kernel features to deliver the functionality we've seen. ### Namespaces Docker takes advantage of a technology called `namespaces` to provide the isolated workspace we call the *container*. When you run a container, Docker creates a set of *namespaces* for that container. This provides a layer of isolation: each aspect of a container runs in its own namespace and does not have access outside it. Some of the namespaces that Docker uses on Linux are: - **The `pid` namespace:** Used for process isolation (PID: Process ID). - **The `net` namespace:** Used for managing network interfaces (NET: Networking). - **The `ipc` namespace:** Used for managing access to IPC resources (IPC: InterProcess Communication). - **The `mnt` namespace:** Used for managing mount-points (MNT: Mount). - **The `uts` namespace:** Used for isolating kernel and version identifiers. (UTS: Unix Timesharing System). ### Control groups Docker on Linux also makes use of another technology called `cgroups` or control groups. A key to running applications in isolation is to have them only use the resources you want. This ensures containers are good multi-tenant citizens on a host. Control groups allow Docker to share available hardware resources to containers and, if required, set up limits and constraints. For example, limiting the memory available to a specific container. ### Union file systems Union file systems, or UnionFS, are file systems that operate by creating layers, making them very lightweight and fast. Docker uses union file systems to provide the building blocks for containers. Docker can make use of several union file system variants including: AUFS, btrfs, vfs, and DeviceMapper. ### Container format Docker combines these components into a wrapper we call a container format. The default container format is called `libcontainer`. In the future, Docker may support other container formats, for example, by integrating with BSD Jails or Solaris Zones. ## Next steps ### Installing Docker Visit the [installation section](installation/index.md#installation). ### The Docker user guide [Learn Docker in depth](userguide/index.md). docker-1.10.3/docs/userguide/000077500000000000000000000000001267010174400157715ustar00rootroot00000000000000docker-1.10.3/docs/userguide/containers/000077500000000000000000000000001267010174400201365ustar00rootroot00000000000000docker-1.10.3/docs/userguide/containers/dockerimages.md000066400000000000000000000611431267010174400231220ustar00rootroot00000000000000 # Build your own images Docker images are the basis of containers. Each time you've used `docker run` you told it which image you wanted. In the previous sections of the guide you used Docker images that already exist, for example the `ubuntu` image and the `training/webapp` image. You also discovered that Docker stores downloaded images on the Docker host. If an image isn't already present on the host then it'll be downloaded from a registry: by default the [Docker Hub Registry](https://registry.hub.docker.com). In this section you're going to explore Docker images a bit more including: * Managing and working with images locally on your Docker host. * Creating basic images. * Uploading images to [Docker Hub Registry](https://registry.hub.docker.com). ## Listing images on the host Let's start with listing the images you have locally on our host. You can do this using the `docker images` command like so: $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE ubuntu 14.04 1d073211c498 3 days ago 187.9 MB busybox latest 2c5ac3f849df 5 days ago 1.113 MB training/webapp latest 54bb4e8718e8 5 months ago 348.7 MB You can see the images you've previously used in the user guide. Each has been downloaded from [Docker Hub](https://hub.docker.com) when you launched a container using that image. When you list images, you get three crucial pieces of information in the listing. * What repository they came from, for example `ubuntu`. * The tags for each image, for example `14.04`. * The image ID of each image. > **Tip:** > You can use [a third-party dockviz tool](https://github.com/justone/dockviz) > or the [Image layers site](https://imagelayers.io/) to display > visualizations of image data. A repository potentially holds multiple variants of an image. In the case of our `ubuntu` image you can see multiple variants covering Ubuntu 10.04, 12.04, 12.10, 13.04, 13.10 and 14.04. Each variant is identified by a tag and you can refer to a tagged image like so: ubuntu:14.04 So when you run a container you refer to a tagged image like so: $ docker run -t -i ubuntu:14.04 /bin/bash If instead you wanted to run an Ubuntu 12.04 image you'd use: $ docker run -t -i ubuntu:12.04 /bin/bash If you don't specify a variant, for example you just use `ubuntu`, then Docker will default to using the `ubuntu:latest` image. > **Tip:** > You should always specify an image tag, for example `ubuntu:14.04`. > That way, you always know exactly what variant of an image you are using. > This is useful for troubleshooting and debugging. ## Getting a new image So how do you get new images? Well Docker will automatically download any image you use that isn't already present on the Docker host. But this can potentially add some time to the launch of a container. If you want to pre-load an image you can download it using the `docker pull` command. Suppose you'd like to download the `centos` image. $ docker pull centos Pulling repository centos b7de3133ff98: Pulling dependent layers 5cc9e91966f7: Pulling fs layer 511136ea3c5a: Download complete ef52fb1fe610: Download complete . . . Status: Downloaded newer image for centos You can see that each layer of the image has been pulled down and now you can run a container from this image and you won't have to wait to download the image. $ docker run -t -i centos /bin/bash bash-4.1# ## Finding images One of the features of Docker is that a lot of people have created Docker images for a variety of purposes. Many of these have been uploaded to [Docker Hub](https://hub.docker.com). You can search these images on the [Docker Hub](https://hub.docker.com) website. ![indexsearch](search.png) You can also search for images on the command line using the `docker search` command. Suppose your team wants an image with Ruby and Sinatra installed on which to do our web application development. You can search for a suitable image by using the `docker search` command to find all the images that contain the term `sinatra`. $ docker search sinatra NAME DESCRIPTION STARS OFFICIAL AUTOMATED training/sinatra Sinatra training image 0 [OK] marceldegraaf/sinatra Sinatra test app 0 mattwarren/docker-sinatra-demo 0 [OK] luisbebop/docker-sinatra-hello-world 0 [OK] bmorearty/handson-sinatra handson-ruby + Sinatra for Hands on with D... 0 subwiz/sinatra 0 bmorearty/sinatra 0 . . . You can see the command returns a lot of images that use the term `sinatra`. You've received a list of image names, descriptions, Stars (which measure the social popularity of images - if a user likes an image then they can "star" it), and the Official and Automated build statuses. [Official Repositories](https://docs.docker.com/docker-hub/official_repos) are a carefully curated set of Docker repositories supported by Docker, Inc. Automated repositories are [Automated Builds](dockerrepos.md#automated-builds) that allow you to validate the source and content of an image. You've reviewed the images available to use and you decided to use the `training/sinatra` image. So far you've seen two types of images repositories, images like `ubuntu`, which are called base or root images. These base images are provided by Docker Inc and are built, validated and supported. These can be identified by their single word names. You've also seen user images, for example the `training/sinatra` image you've chosen. A user image belongs to a member of the Docker community and is built and maintained by them. You can identify user images as they are always prefixed with the user name, here `training`, of the user that created them. ## Pulling our image You've identified a suitable image, `training/sinatra`, and now you can download it using the `docker pull` command. $ docker pull training/sinatra The team can now use this image by running their own containers. $ docker run -t -i training/sinatra /bin/bash root@a8cb6ce02d85:/# ## Creating our own images The team has found the `training/sinatra` image pretty useful but it's not quite what they need and you need to make some changes to it. There are two ways you can update and create images. 1. You can update a container created from an image and commit the results to an image. 2. You can use a `Dockerfile` to specify instructions to create an image. ### Updating and committing an image To update an image you first need to create a container from the image you'd like to update. $ docker run -t -i training/sinatra /bin/bash root@0b2616b0e5a8:/# > **Note:** > Take note of the container ID that has been created, `0b2616b0e5a8`, as you'll > need it in a moment. Inside our running container let's add the `json` gem. root@0b2616b0e5a8:/# gem install json Once this has completed let's exit our container using the `exit` command. Now you have a container with the change you want to make. You can then commit a copy of this container to an image using the `docker commit` command. $ docker commit -m "Added json gem" -a "Kate Smith" \ 0b2616b0e5a8 ouruser/sinatra:v2 4f177bd27a9ff0f6dc2a830403925b5360bfe0b93d476f7fc3231110e7f71b1c Here you've used the `docker commit` command. You've specified two flags: `-m` and `-a`. The `-m` flag allows us to specify a commit message, much like you would with a commit on a version control system. The `-a` flag allows us to specify an author for our update. You've also specified the container you want to create this new image from, `0b2616b0e5a8` (the ID you recorded earlier) and you've specified a target for the image: ouruser/sinatra:v2 Break this target down. It consists of a new user, `ouruser`, that you're writing this image to. You've also specified the name of the image, here you're keeping the original image name `sinatra`. Finally you're specifying a tag for the image: `v2`. You can then look at our new `ouruser/sinatra` image using the `docker images` command. $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE training/sinatra latest 5bc342fa0b91 10 hours ago 446.7 MB ouruser/sinatra v2 3c59e02ddd1a 10 hours ago 446.7 MB ouruser/sinatra latest 5db5f8471261 10 hours ago 446.7 MB To use our new image to create a container you can then: $ docker run -t -i ouruser/sinatra:v2 /bin/bash root@78e82f680994:/# ### Building an image from a `Dockerfile` Using the `docker commit` command is a pretty simple way of extending an image but it's a bit cumbersome and it's not easy to share a development process for images amongst a team. Instead you can use a new command, `docker build`, to build new images from scratch. To do this you create a `Dockerfile` that contains a set of instructions that tell Docker how to build our image. First, create a directory and a `Dockerfile`. $ mkdir sinatra $ cd sinatra $ touch Dockerfile If you are using Docker Machine on Windows, you may access your host directory by `cd` to `/c/Users/your_user_name`. Each instruction creates a new layer of the image. Try a simple example now for building your own Sinatra image for your fictitious development team. # This is a comment FROM ubuntu:14.04 MAINTAINER Kate Smith RUN apt-get update && apt-get install -y ruby ruby-dev RUN gem install sinatra Examine what your `Dockerfile` does. Each instruction prefixes a statement and is capitalized. INSTRUCTION statement > **Note:** You use `#` to indicate a comment The first instruction `FROM` tells Docker what the source of our image is, in this case you're basing our new image on an Ubuntu 14.04 image. The instruction uses the `MAINTAINER` instruction to specify who maintains the new image. Lastly, you've specified two `RUN` instructions. A `RUN` instruction executes a command inside the image, for example installing a package. Here you're updating our APT cache, installing Ruby and RubyGems and then installing the Sinatra gem. Now let's take our `Dockerfile` and use the `docker build` command to build an image. $ docker build -t ouruser/sinatra:v2 . Sending build context to Docker daemon 2.048 kB Sending build context to Docker daemon Step 1 : FROM ubuntu:14.04 ---> e54ca5efa2e9 Step 2 : MAINTAINER Kate Smith ---> Using cache ---> 851baf55332b Step 3 : RUN apt-get update && apt-get install -y ruby ruby-dev ---> Running in 3a2558904e9b Selecting previously unselected package libasan0:amd64. (Reading database ... 11518 files and directories currently installed.) Preparing to unpack .../libasan0_4.8.2-19ubuntu1_amd64.deb ... Unpacking libasan0:amd64 (4.8.2-19ubuntu1) ... Selecting previously unselected package libatomic1:amd64. Preparing to unpack .../libatomic1_4.8.2-19ubuntu1_amd64.deb ... Unpacking libatomic1:amd64 (4.8.2-19ubuntu1) ... Selecting previously unselected package libgmp10:amd64. Preparing to unpack .../libgmp10_2%3a5.1.3+dfsg-1ubuntu1_amd64.deb ... Unpacking libgmp10:amd64 (2:5.1.3+dfsg-1ubuntu1) ... Selecting previously unselected package libisl10:amd64. Preparing to unpack .../libisl10_0.12.2-1_amd64.deb ... Unpacking libisl10:amd64 (0.12.2-1) ... Selecting previously unselected package libcloog-isl4:amd64. Preparing to unpack .../libcloog-isl4_0.18.2-1_amd64.deb ... Unpacking libcloog-isl4:amd64 (0.18.2-1) ... Selecting previously unselected package libgomp1:amd64. Preparing to unpack .../libgomp1_4.8.2-19ubuntu1_amd64.deb ... Unpacking libgomp1:amd64 (4.8.2-19ubuntu1) ... Selecting previously unselected package libitm1:amd64. Preparing to unpack .../libitm1_4.8.2-19ubuntu1_amd64.deb ... Unpacking libitm1:amd64 (4.8.2-19ubuntu1) ... Selecting previously unselected package libmpfr4:amd64. Preparing to unpack .../libmpfr4_3.1.2-1_amd64.deb ... Unpacking libmpfr4:amd64 (3.1.2-1) ... Selecting previously unselected package libquadmath0:amd64. Preparing to unpack .../libquadmath0_4.8.2-19ubuntu1_amd64.deb ... Unpacking libquadmath0:amd64 (4.8.2-19ubuntu1) ... Selecting previously unselected package libtsan0:amd64. Preparing to unpack .../libtsan0_4.8.2-19ubuntu1_amd64.deb ... Unpacking libtsan0:amd64 (4.8.2-19ubuntu1) ... Selecting previously unselected package libyaml-0-2:amd64. Preparing to unpack .../libyaml-0-2_0.1.4-3ubuntu3_amd64.deb ... Unpacking libyaml-0-2:amd64 (0.1.4-3ubuntu3) ... Selecting previously unselected package libmpc3:amd64. Preparing to unpack .../libmpc3_1.0.1-1ubuntu1_amd64.deb ... Unpacking libmpc3:amd64 (1.0.1-1ubuntu1) ... Selecting previously unselected package openssl. Preparing to unpack .../openssl_1.0.1f-1ubuntu2.4_amd64.deb ... Unpacking openssl (1.0.1f-1ubuntu2.4) ... Selecting previously unselected package ca-certificates. Preparing to unpack .../ca-certificates_20130906ubuntu2_all.deb ... Unpacking ca-certificates (20130906ubuntu2) ... Selecting previously unselected package manpages. Preparing to unpack .../manpages_3.54-1ubuntu1_all.deb ... Unpacking manpages (3.54-1ubuntu1) ... Selecting previously unselected package binutils. Preparing to unpack .../binutils_2.24-5ubuntu3_amd64.deb ... Unpacking binutils (2.24-5ubuntu3) ... Selecting previously unselected package cpp-4.8. Preparing to unpack .../cpp-4.8_4.8.2-19ubuntu1_amd64.deb ... Unpacking cpp-4.8 (4.8.2-19ubuntu1) ... Selecting previously unselected package cpp. Preparing to unpack .../cpp_4%3a4.8.2-1ubuntu6_amd64.deb ... Unpacking cpp (4:4.8.2-1ubuntu6) ... Selecting previously unselected package libgcc-4.8-dev:amd64. Preparing to unpack .../libgcc-4.8-dev_4.8.2-19ubuntu1_amd64.deb ... Unpacking libgcc-4.8-dev:amd64 (4.8.2-19ubuntu1) ... Selecting previously unselected package gcc-4.8. Preparing to unpack .../gcc-4.8_4.8.2-19ubuntu1_amd64.deb ... Unpacking gcc-4.8 (4.8.2-19ubuntu1) ... Selecting previously unselected package gcc. Preparing to unpack .../gcc_4%3a4.8.2-1ubuntu6_amd64.deb ... Unpacking gcc (4:4.8.2-1ubuntu6) ... Selecting previously unselected package libc-dev-bin. Preparing to unpack .../libc-dev-bin_2.19-0ubuntu6_amd64.deb ... Unpacking libc-dev-bin (2.19-0ubuntu6) ... Selecting previously unselected package linux-libc-dev:amd64. Preparing to unpack .../linux-libc-dev_3.13.0-30.55_amd64.deb ... Unpacking linux-libc-dev:amd64 (3.13.0-30.55) ... Selecting previously unselected package libc6-dev:amd64. Preparing to unpack .../libc6-dev_2.19-0ubuntu6_amd64.deb ... Unpacking libc6-dev:amd64 (2.19-0ubuntu6) ... Selecting previously unselected package ruby. Preparing to unpack .../ruby_1%3a1.9.3.4_all.deb ... Unpacking ruby (1:1.9.3.4) ... Selecting previously unselected package ruby1.9.1. Preparing to unpack .../ruby1.9.1_1.9.3.484-2ubuntu1_amd64.deb ... Unpacking ruby1.9.1 (1.9.3.484-2ubuntu1) ... Selecting previously unselected package libruby1.9.1. Preparing to unpack .../libruby1.9.1_1.9.3.484-2ubuntu1_amd64.deb ... Unpacking libruby1.9.1 (1.9.3.484-2ubuntu1) ... Selecting previously unselected package manpages-dev. Preparing to unpack .../manpages-dev_3.54-1ubuntu1_all.deb ... Unpacking manpages-dev (3.54-1ubuntu1) ... Selecting previously unselected package ruby1.9.1-dev. Preparing to unpack .../ruby1.9.1-dev_1.9.3.484-2ubuntu1_amd64.deb ... Unpacking ruby1.9.1-dev (1.9.3.484-2ubuntu1) ... Selecting previously unselected package ruby-dev. Preparing to unpack .../ruby-dev_1%3a1.9.3.4_all.deb ... Unpacking ruby-dev (1:1.9.3.4) ... Setting up libasan0:amd64 (4.8.2-19ubuntu1) ... Setting up libatomic1:amd64 (4.8.2-19ubuntu1) ... Setting up libgmp10:amd64 (2:5.1.3+dfsg-1ubuntu1) ... Setting up libisl10:amd64 (0.12.2-1) ... Setting up libcloog-isl4:amd64 (0.18.2-1) ... Setting up libgomp1:amd64 (4.8.2-19ubuntu1) ... Setting up libitm1:amd64 (4.8.2-19ubuntu1) ... Setting up libmpfr4:amd64 (3.1.2-1) ... Setting up libquadmath0:amd64 (4.8.2-19ubuntu1) ... Setting up libtsan0:amd64 (4.8.2-19ubuntu1) ... Setting up libyaml-0-2:amd64 (0.1.4-3ubuntu3) ... Setting up libmpc3:amd64 (1.0.1-1ubuntu1) ... Setting up openssl (1.0.1f-1ubuntu2.4) ... Setting up ca-certificates (20130906ubuntu2) ... debconf: unable to initialize frontend: Dialog debconf: (TERM is not set, so the dialog frontend is not usable.) debconf: falling back to frontend: Readline debconf: unable to initialize frontend: Readline debconf: (This frontend requires a controlling tty.) debconf: falling back to frontend: Teletype Setting up manpages (3.54-1ubuntu1) ... Setting up binutils (2.24-5ubuntu3) ... Setting up cpp-4.8 (4.8.2-19ubuntu1) ... Setting up cpp (4:4.8.2-1ubuntu6) ... Setting up libgcc-4.8-dev:amd64 (4.8.2-19ubuntu1) ... Setting up gcc-4.8 (4.8.2-19ubuntu1) ... Setting up gcc (4:4.8.2-1ubuntu6) ... Setting up libc-dev-bin (2.19-0ubuntu6) ... Setting up linux-libc-dev:amd64 (3.13.0-30.55) ... Setting up libc6-dev:amd64 (2.19-0ubuntu6) ... Setting up manpages-dev (3.54-1ubuntu1) ... Setting up libruby1.9.1 (1.9.3.484-2ubuntu1) ... Setting up ruby1.9.1-dev (1.9.3.484-2ubuntu1) ... Setting up ruby-dev (1:1.9.3.4) ... Setting up ruby (1:1.9.3.4) ... Setting up ruby1.9.1 (1.9.3.484-2ubuntu1) ... Processing triggers for libc-bin (2.19-0ubuntu6) ... Processing triggers for ca-certificates (20130906ubuntu2) ... Updating certificates in /etc/ssl/certs... 164 added, 0 removed; done. Running hooks in /etc/ca-certificates/update.d....done. ---> c55c31703134 Removing intermediate container 3a2558904e9b Step 4 : RUN gem install sinatra ---> Running in 6b81cb6313e5 unable to convert "\xC3" to UTF-8 in conversion from ASCII-8BIT to UTF-8 to US-ASCII for README.rdoc, skipping unable to convert "\xC3" to UTF-8 in conversion from ASCII-8BIT to UTF-8 to US-ASCII for README.rdoc, skipping Successfully installed rack-1.5.2 Successfully installed tilt-1.4.1 Successfully installed rack-protection-1.5.3 Successfully installed sinatra-1.4.5 4 gems installed Installing ri documentation for rack-1.5.2... Installing ri documentation for tilt-1.4.1... Installing ri documentation for rack-protection-1.5.3... Installing ri documentation for sinatra-1.4.5... Installing RDoc documentation for rack-1.5.2... Installing RDoc documentation for tilt-1.4.1... Installing RDoc documentation for rack-protection-1.5.3... Installing RDoc documentation for sinatra-1.4.5... ---> 97feabe5d2ed Removing intermediate container 6b81cb6313e5 Successfully built 97feabe5d2ed You've specified our `docker build` command and used the `-t` flag to identify our new image as belonging to the user `ouruser`, the repository name `sinatra` and given it the tag `v2`. You've also specified the location of our `Dockerfile` using the `.` to indicate a `Dockerfile` in the current directory. > **Note:** > You can also specify a path to a `Dockerfile`. Now you can see the build process at work. The first thing Docker does is upload the build context: basically the contents of the directory you're building in. This is done because the Docker daemon does the actual build of the image and it needs the local context to do it. Next you can see each instruction in the `Dockerfile` being executed step-by-step. You can see that each step creates a new container, runs the instruction inside that container and then commits that change - just like the `docker commit` work flow you saw earlier. When all the instructions have executed you're left with the `97feabe5d2ed` image (also helpfully tagged as `ouruser/sinatra:v2`) and all intermediate containers will get removed to clean things up. > **Note:** > An image can't have more than 127 layers regardless of the storage driver. > This limitation is set globally to encourage optimization of the overall > size of images. You can then create a container from our new image. $ docker run -t -i ouruser/sinatra:v2 /bin/bash root@8196968dac35:/# > **Note:** > This is just a brief introduction to creating images. We've > skipped a whole bunch of other instructions that you can use. We'll see more of > those instructions in later sections of the Guide or you can refer to the > [`Dockerfile`](../../reference/builder.md) reference for a > detailed description and examples of every instruction. > To help you write a clear, readable, maintainable `Dockerfile`, we've also > written a [`Dockerfile` Best Practices guide](../eng-image/dockerfile_best-practices.md). ## Setting tags on an image You can also add a tag to an existing image after you commit or build it. We can do this using the `docker tag` command. Now, add a new tag to your `ouruser/sinatra` image. $ docker tag 5db5f8471261 ouruser/sinatra:devel The `docker tag` command takes the ID of the image, here `5db5f8471261`, and our user name, the repository name and the new tag. Now, see your new tag using the `docker images` command. $ docker images ouruser/sinatra REPOSITORY TAG IMAGE ID CREATED SIZE ouruser/sinatra latest 5db5f8471261 11 hours ago 446.7 MB ouruser/sinatra devel 5db5f8471261 11 hours ago 446.7 MB ouruser/sinatra v2 5db5f8471261 11 hours ago 446.7 MB ## Image Digests Images that use the v2 or later format have a content-addressable identifier called a `digest`. As long as the input used to generate the image is unchanged, the digest value is predictable. To list image digest values, use the `--digests` flag: $ docker images --digests | head REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE ouruser/sinatra latest sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 5db5f8471261 11 hours ago 446.7 MB When pushing or pulling to a 2.0 registry, the `push` or `pull` command output includes the image digest. You can `pull` using a digest value. $ docker pull ouruser/sinatra@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf You can also reference by digest in `create`, `run`, and `rmi` commands, as well as the `FROM` image reference in a Dockerfile. ## Push an image to Docker Hub Once you've built or created a new image you can push it to [Docker Hub](https://hub.docker.com) using the `docker push` command. This allows you to share it with others, either publicly, or push it into [a private repository](https://registry.hub.docker.com/plans/). $ docker push ouruser/sinatra The push refers to a repository [ouruser/sinatra] (len: 1) Sending image list Pushing repository ouruser/sinatra (3 tags) . . . ## Remove an image from the host You can also remove images on your Docker host in a way [similar to containers](usingdocker.md) using the `docker rmi` command. Delete the `training/sinatra` image as you don't need it anymore. $ docker rmi training/sinatra Untagged: training/sinatra:latest Deleted: 5bc342fa0b91cabf65246837015197eecfa24b2213ed6a51a8974ae250fedd8d Deleted: ed0fffdcdae5eb2c3a55549857a8be7fc8bc4241fb19ad714364cbfd7a56b22f Deleted: 5c58979d73ae448df5af1d8142436d81116187a7633082650549c52c3a2418f0 > **Note:** To remove an image from the host, please make sure > that there are no containers actively based on it. # Next steps Until now you've seen how to build individual applications inside Docker containers. Now learn how to build whole application stacks with Docker by networking together multiple Docker containers. Go to [Network containers](networkingcontainers.md). docker-1.10.3/docs/userguide/containers/dockerizing.md000066400000000000000000000167141267010174400230010ustar00rootroot00000000000000 # Hello world in a container *So what's this Docker thing all about?* Docker allows you to run applications, worlds you create, inside containers. Running an application inside a container takes a single command: `docker run`. >**Note**: Depending on your Docker system configuration, you may be required to >preface each `docker` command on this page with `sudo`. To avoid this behavior, >your system administrator can create a Unix group called `docker` and add users >to it. ## Run a Hello world Let's try it now. $ docker run ubuntu /bin/echo 'Hello world' Hello world And you just launched your first container! So what just happened? Let's step through what the `docker run` command did. First we specified the `docker` binary and the command we wanted to execute, `run`. The `docker run` combination *runs* containers. Next we specified an image: `ubuntu`. This is the source of the container we ran. Docker calls this an image. In this case we used the Ubuntu operating system image. When you specify an image, Docker looks first for the image on your Docker host. If it can't find it then it downloads the image from the public image registry: [Docker Hub](https://hub.docker.com). Next we told Docker what command to run inside our new container: /bin/echo 'Hello world' When our container was launched Docker created a new Ubuntu environment and then executed the `/bin/echo` command inside it. We saw the result on the command line: Hello world So what happened to our container after that? Well Docker containers only run as long as the command you specify is active. Here, as soon as `Hello world` was echoed, the container stopped. ## An interactive container Let's try the `docker run` command again, this time specifying a new command to run in our container. $ docker run -t -i ubuntu /bin/bash root@af8bae53bdd3:/# Here we've again specified the `docker run` command and launched an `ubuntu` image. But we've also passed in two flags: `-t` and `-i`. The `-t` flag assigns a pseudo-tty or terminal inside our new container and the `-i` flag allows us to make an interactive connection by grabbing the standard in (`STDIN`) of the container. We've also specified a new command for our container to run: `/bin/bash`. This will launch a Bash shell inside our container. So now when our container is launched we can see that we've got a command prompt inside it: root@af8bae53bdd3:/# Let's try running some commands inside our container: root@af8bae53bdd3:/# pwd / root@af8bae53bdd3:/# ls bin boot dev etc home lib lib64 media mnt opt proc root run sbin srv sys tmp usr var You can see we've run the `pwd` to show our current directory and can see we're in the `/` root directory. We've also done a directory listing of the root directory which shows us what looks like a typical Linux file system. You can play around inside this container and when you're done you can use the `exit` command or enter Ctrl-D to finish. root@af8bae53bdd3:/# exit As with our previous container, once the Bash shell process has finished, the container is stopped. ## A daemonized Hello world Now a container that runs a command and then exits has some uses but it's not overly helpful. Let's create a container that runs as a daemon, like most of the applications we're probably going to run with Docker. Again we can do this with the `docker run` command: $ docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done" 1e5535038e285177d5214659a068137486f96ee5c2e85a4ac52dc83f2ebe4147 Wait, what? Where's our "hello world" output? Let's look at what we've run here. It should look pretty familiar. We ran `docker run` but this time we specified a flag: `-d`. The `-d` flag tells Docker to run the container and put it in the background, to daemonize it. We also specified the same image: `ubuntu`. Finally, we specified a command to run: /bin/sh -c "while true; do echo hello world; sleep 1; done" This is the (hello) world's silliest daemon: a shell script that echoes `hello world` forever. So why aren't we seeing any `hello world`'s? Instead Docker has returned a really long string: 1e5535038e285177d5214659a068137486f96ee5c2e85a4ac52dc83f2ebe4147 This really long string is called a *container ID*. It uniquely identifies a container so we can work with it. > **Note:** > The container ID is a bit long and unwieldy. A bit later, > we'll see a shorter ID and ways to name our containers to make > working with them easier. We can use this container ID to see what's happening with our `hello world` daemon. Firstly let's make sure our container is running. We can do that with the `docker ps` command. The `docker ps` command queries the Docker daemon for information about all the containers it knows about. $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 1e5535038e28 ubuntu /bin/sh -c 'while tr 2 minutes ago Up 1 minute insane_babbage Here we can see our daemonized container. The `docker ps` has returned some useful information about it, starting with a shorter variant of its container ID: `1e5535038e28`. We can also see the image we used to build it, `ubuntu`, the command it is running, its status and an automatically assigned name, `insane_babbage`. > **Note:** > Docker automatically generates names for any containers started. > We'll see how to specify your own names a bit later. Okay, so we now know it's running. But is it doing what we asked it to do? To see this we're going to look inside the container using the `docker logs` command. Let's use the container name Docker assigned. $ docker logs insane_babbage hello world hello world hello world . . . The `docker logs` command looks inside the container and returns its standard output: in this case the output of our command `hello world`. Awesome! Our daemon is working and we've just created our first Dockerized application! Now we've established we can create our own containers let's tidy up after ourselves and stop our detached container. To do this we use the `docker stop` command. $ docker stop insane_babbage insane_babbage The `docker stop` command tells Docker to politely stop the running container. If it succeeds it will return the name of the container it has just stopped. Let's check it worked with the `docker ps` command. $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES Excellent. Our container has been stopped. # Next steps So far, you launched your first containers using the `docker run` command. You ran an *interactive container* that ran in the foreground. You also ran a *detached container* that ran in the background. In the process you learned about several Docker commands: * `docker ps` - Lists containers. * `docker logs` - Shows us the standard output of a container. * `docker stop` - Stops running containers. Now, you have the basis learn more about Docker and how to do some more advanced tasks. Go to ["*Run a simple application*"](usingdocker.md) to actually build a web application with the Docker client. docker-1.10.3/docs/userguide/containers/dockerrepos.md000066400000000000000000000177671267010174400230220ustar00rootroot00000000000000 # Store images on Docker Hub So far you've learned how to use the command line to run Docker on your local host. You've learned how to [pull down images](usingdocker.md) to build containers from existing images and you've learned how to [create your own images](dockerimages.md). Next, you're going to learn how to use the [Docker Hub](https://hub.docker.com) to simplify and enhance your Docker workflows. The [Docker Hub](https://hub.docker.com) is a public registry maintained by Docker, Inc. It contains images you can download and use to build containers. It also provides authentication, work group structure, workflow tools like webhooks and build triggers, and privacy tools like private repositories for storing images you don't want to share publicly. ## Docker commands and Docker Hub Docker itself provides access to Docker Hub services via the `docker search`, `pull`, `login`, and `push` commands. This page will show you how these commands work. ### Account creation and login Typically, you'll want to start by creating an account on Docker Hub (if you haven't already) and logging in. You can create your account directly on [Docker Hub](https://hub.docker.com/account/signup/), or by running: $ docker login This will prompt you for a user name, which will become the public namespace for your public repositories. If your user name is available, Docker will prompt you to enter a password and your e-mail address. It will then automatically log you in. You can now commit and push your own images up to your repos on Docker Hub. > **Note:** > Your authentication credentials will be stored in the `~/.docker/config.json` > authentication file in your home directory. ## Searching for images You can search the [Docker Hub](https://hub.docker.com) registry via its search interface or by using the command line interface. Searching can find images by image name, user name, or description: $ docker search centos NAME DESCRIPTION STARS OFFICIAL AUTOMATED centos The official build of CentOS 1223 [OK] tianon/centos CentOS 5 and 6, created using rinse instea... 33 ... There you can see two example results: `centos` and `tianon/centos`. The second result shows that it comes from the public repository of a user, named `tianon/`, while the first result, `centos`, doesn't explicitly list a repository which means that it comes from the trusted top-level namespace for [Official Repositories](https://docs.docker.com/docker-hub/official_repos/). The `/` character separates a user's repository from the image name. Once you've found the image you want, you can download it with `docker pull `: $ docker pull centos Using default tag: latest latest: Pulling from library/centos f1b10cd84249: Pull complete c852f6d61e65: Pull complete 7322fbe74aa5: Pull complete Digest: sha256:90305c9112250c7e3746425477f1c4ef112b03b4abe78c612e092037bfecc3b7 Status: Downloaded newer image for centos:latest You now have an image from which you can run containers. ### Specific Versions or Latest Using `docker pull centos` is equivalent to using `docker pull centos:latest`. To pull an image that is not the default latest image you can be more precise with the image that you want. For example, to pull version 5 of `centos` use `docker pull centos:centos5`. In this example, `centos5` is the tag labeling an image in the `centos` repository for a version of `centos`. To find a list of tags pointing to currently available versions of a repository see the [Docker Hub](https://hub.docker.com) registry. ## Contributing to Docker Hub Anyone can pull public images from the [Docker Hub](https://hub.docker.com) registry, but if you would like to share your own images, then you must [register first](https://docs.docker.com/docker-hub/accounts). ## Pushing a repository to Docker Hub In order to push a repository to its registry, you need to have named an image or committed your container to a named image as we saw [here](dockerimages.md). Now you can push this repository to the registry designated by its name or tag. $ docker push yourname/newimage The image will then be uploaded and available for use by your team-mates and/or the community. ## Features of Docker Hub Let's take a closer look at some of the features of Docker Hub. You can find more information [here](https://docs.docker.com/docker-hub/). * Private repositories * Organizations and teams * Automated Builds * Webhooks ### Private repositories Sometimes you have images you don't want to make public and share with everyone. So Docker Hub allows you to have private repositories. You can sign up for a plan [here](https://registry.hub.docker.com/plans/). ### Organizations and teams One of the useful aspects of private repositories is that you can share them only with members of your organization or team. Docker Hub lets you create organizations where you can collaborate with your colleagues and manage private repositories. You can learn how to create and manage an organization [here](https://registry.hub.docker.com/account/organizations/). ### Automated Builds Automated Builds automate the building and updating of images from [GitHub](https://www.github.com) or [Bitbucket](http://bitbucket.com), directly on Docker Hub. It works by adding a commit hook to your selected GitHub or Bitbucket repository, triggering a build and update when you push a commit. #### To setup an Automated Build 1. Create a [Docker Hub account](https://hub.docker.com/) and login. 2. Link your GitHub or Bitbucket account through the ["Link Accounts"](https://registry.hub.docker.com/account/accounts/) menu. 3. [Configure an Automated Build](https://registry.hub.docker.com/builds/add/). 4. Pick a GitHub or Bitbucket project that has a `Dockerfile` that you want to build. 5. Pick the branch you want to build (the default is the `master` branch). 6. Give the Automated Build a name. 7. Assign an optional Docker tag to the Build. 8. Specify where the `Dockerfile` is located. The default is `/`. Once the Automated Build is configured it will automatically trigger a build and, in a few minutes, you should see your new Automated Build on the [Docker Hub](https://hub.docker.com) Registry. It will stay in sync with your GitHub and Bitbucket repository until you deactivate the Automated Build. To check the output and status of your Automated Build repositories, click on a repository name within the ["Your Repositories" page](https://registry.hub.docker.com/repos/). Automated Builds are indicated by a check-mark icon next to the repository name. Within the repository details page, you may click on the "Build Details" tab to view the status and output of all builds triggered by the Docker Hub. Once you've created an Automated Build you can deactivate or delete it. You cannot, however, push to an Automated Build with the `docker push` command. You can only manage it by committing code to your GitHub or Bitbucket repository. You can create multiple Automated Builds per repository and configure them to point to specific `Dockerfile`'s or Git branches. #### Build triggers Automated Builds can also be triggered via a URL on Docker Hub. This allows you to rebuild an Automated build image on demand. ### Webhooks Webhooks are attached to your repositories and allow you to trigger an event when an image or updated image is pushed to the repository. With a webhook you can specify a target URL and a JSON payload that will be delivered when the image is pushed. See the Docker Hub documentation for [more information on webhooks](https://docs.docker.com/docker-hub/repos/#webhooks) ## Next steps Go and use Docker! docker-1.10.3/docs/userguide/containers/dockervolumes.md000066400000000000000000000311051267010174400233420ustar00rootroot00000000000000 # Manage data in containers So far we've been introduced to some [basic Docker concepts](../containers/usingdocker.md), seen how to work with [Docker images](../containers/dockerimages.md) as well as learned about [networking and links between containers](../networking/default_network/dockerlinks.md). In this section we're going to discuss how you can manage data inside and between your Docker containers. We're going to look at the two primary ways you can manage data in Docker. * Data volumes, and * Data volume containers. ## Data volumes A *data volume* is a specially-designated directory within one or more containers that bypasses the [*Union File System*](../../reference/glossary.md#union-file-system). Data volumes provide several useful features for persistent or shared data: - Volumes are initialized when a container is created. If the container's base image contains data at the specified mount point, that existing data is copied into the new volume upon volume initialization. (Note that this does not apply when [mounting a host directory](#mount-a-host-directory-as-a-data-volume).) - Data volumes can be shared and reused among containers. - Changes to a data volume are made directly. - Changes to a data volume will not be included when you update an image. - Data volumes persist even if the container itself is deleted. Data volumes are designed to persist data, independent of the container's life cycle. Docker therefore *never* automatically deletes volumes when you remove a container, nor will it "garbage collect" volumes that are no longer referenced by a container. ### Adding a data volume You can add a data volume to a container using the `-v` flag with the `docker create` and `docker run` command. You can use the `-v` multiple times to mount multiple data volumes. Let's mount a single volume now in our web application container. $ docker run -d -P --name web -v /webapp training/webapp python app.py This will create a new volume inside a container at `/webapp`. > **Note:** > You can also use the `VOLUME` instruction in a `Dockerfile` to add one or > more new volumes to any container created from that image. ### Locating a volume You can locate the volume on the host by utilizing the `docker inspect` command. $ docker inspect web The output will provide details on the container configurations including the volumes. The output should look something similar to the following: ... Mounts": [ { "Name": "fac362...80535", "Source": "/var/lib/docker/volumes/fac362...80535/_data", "Destination": "/webapp", "Driver": "local", "Mode": "", "RW": true, "Propagation": "" } ] ... You will notice in the above `Source` is specifying the location on the host and `Destination` is specifying the volume location inside the container. `RW` shows if the volume is read/write. ### Mount a host directory as a data volume In addition to creating a volume using the `-v` flag you can also mount a directory from your Docker daemon's host into a container. ``` $ docker run -d -P --name web -v /src/webapp:/opt/webapp training/webapp python app.py ``` This command mounts the host directory, `/src/webapp`, into the container at `/opt/webapp`. If the path `/opt/webapp` already exists inside the container's image, the `/src/webapp` mount overlays but does not remove the pre-existing content. Once the mount is removed, the content is accessible again. This is consistent with the expected behavior of the `mount` command. The `container-dir` must always be an absolute path such as `/src/docs`. The `host-dir` can either be an absolute path or a `name` value. If you supply an absolute path for the `host-dir`, Docker bind-mounts to the path you specify. If you supply a `name`, Docker creates a named volume by that `name`. A `name` value must start with an alphanumeric character, followed by `a-z0-9`, `_` (underscore), `.` (period) or `-` (hyphen). An absolute path starts with a `/` (forward slash). For example, you can specify either `/foo` or `foo` for a `host-dir` value. If you supply the `/foo` value, Docker creates a bind-mount. If you supply the `foo` specification, Docker creates a named volume. If you are using Docker Machine on Mac or Windows, your Docker daemon has only limited access to your OS X or Windows filesystem. Docker Machine tries to auto-share your `/Users` (OS X) or `C:\Users` (Windows) directory. So, you can mount files or directories on OS X using. ``` docker run -v /Users/:/ ... ``` On Windows, mount directories using: ``` docker run -v /c/Users/:/ ...` ``` All other paths come from your virtual machine's filesystem. For example, if you are using VirtualBox some other folder available for sharing, you need to do additional work. In the case of VirtualBox you need to make the host folder available as a shared folder in VirtualBox. Then, you can mount it using the Docker `-v` flag. Mounting a host directory can be useful for testing. For example, you can mount source code inside a container. Then, change the source code and see its effect on the application in real time. The directory on the host must be specified as an absolute path and if the directory doesn't exist Docker will automatically create it for you. This auto-creation of the host path has been [*deprecated*](#auto-creating-missing-host-paths-for-bind-mounts). Docker volumes default to mount in read-write mode, but you can also set it to be mounted read-only. ``` $ docker run -d -P --name web -v /src/webapp:/opt/webapp:ro training/webapp python app.py ``` Here we've mounted the same `/src/webapp` directory but we've added the `ro` option to specify that the mount should be read-only. Because of [limitations in the `mount` function](http://lists.linuxfoundation.org/pipermail/containers/2015-April/035788.html), moving subdirectories within the host's source directory can give access from the container to the host's file system. This requires a malicious user with access to host and its mounted directory. >**Note**: The host directory is, by its nature, host-dependent. For this >reason, you can't mount a host directory from `Dockerfile` because built images >should be portable. A host directory wouldn't be available on all potential >hosts. ### Volume labels Labeling systems like SELinux require that proper labels are placed on volume content mounted into a container. Without a label, the security system might prevent the processes running inside the container from using the content. By default, Docker does not change the labels set by the OS. To change a label in the container context, you can add either of two suffixes `:z` or `:Z` to the volume mount. These suffixes tell Docker to relabel file objects on the shared volumes. The `z` option tells Docker that two containers share the volume content. As a result, Docker labels the content with a shared content label. Shared volume labels allow all containers to read/write content. The `Z` option tells Docker to label the content with a private unshared label. Only the current container can use a private volume. ### Mount a host file as a data volume The `-v` flag can also be used to mount a single file - instead of *just* directories - from the host machine. $ docker run --rm -it -v ~/.bash_history:/root/.bash_history ubuntu /bin/bash This will drop you into a bash shell in a new container, you will have your bash history from the host and when you exit the container, the host will have the history of the commands typed while in the container. > **Note:** > Many tools used to edit files including `vi` and `sed --in-place` may result > in an inode change. Since Docker v1.1.0, this will produce an error such as > "*sed: cannot rename ./sedKdJ9Dy: Device or resource busy*". In the case where > you want to edit the mounted file, it is often easiest to instead mount the > parent directory. ## Creating and mounting a data volume container If you have some persistent data that you want to share between containers, or want to use from non-persistent containers, it's best to create a named Data Volume Container, and then to mount the data from it. Let's create a new named container with a volume to share. While this container doesn't run an application, it reuses the `training/postgres` image so that all containers are using layers in common, saving disk space. $ docker create -v /dbdata --name dbstore training/postgres /bin/true You can then use the `--volumes-from` flag to mount the `/dbdata` volume in another container. $ docker run -d --volumes-from dbstore --name db1 training/postgres And another: $ docker run -d --volumes-from dbstore --name db2 training/postgres In this case, if the `postgres` image contained a directory called `/dbdata` then mounting the volumes from the `dbstore` container hides the `/dbdata` files from the `postgres` image. The result is only the files from the `dbstore` container are visible. You can use multiple `--volumes-from` parameters to combine data volumes from several containers. To find detailed information about `--volumes-from` see the [Mount volumes from container](../../reference/commandline/run.md#mount-volumes-from-container-volumes-from) in the `run` command reference. You can also extend the chain by mounting the volume that came from the `dbstore` container in yet another container via the `db1` or `db2` containers. $ docker run -d --name db3 --volumes-from db1 training/postgres If you remove containers that mount volumes, including the initial `dbstore` container, or the subsequent containers `db1` and `db2`, the volumes will not be deleted. To delete the volume from disk, you must explicitly call `docker rm -v` against the last container with a reference to the volume. This allows you to upgrade, or effectively migrate data volumes between containers. > **Note:** Docker will not warn you when removing a container *without* > providing the `-v` option to delete its volumes. If you remove containers > without using the `-v` option, you may end up with "dangling" volumes; > volumes that are no longer referenced by a container. > You can use `docker volume ls -f dangling=true` to find dangling volumes, > and use `docker volume rm ` to remove a volume that's > no longer needed. ## Backup, restore, or migrate data volumes Another useful function we can perform with volumes is use them for backups, restores or migrations. We do this by using the `--volumes-from` flag to create a new container that mounts that volume, like so: $ docker run --rm --volumes-from dbstore -v $(pwd):/backup ubuntu tar cvf /backup/backup.tar /dbdata Here we've launched a new container and mounted the volume from the `dbstore` container. We've then mounted a local host directory as `/backup`. Finally, we've passed a command that uses `tar` to backup the contents of the `dbdata` volume to a `backup.tar` file inside our `/backup` directory. When the command completes and the container stops we'll be left with a backup of our `dbdata` volume. You could then restore it to the same container, or another that you've made elsewhere. Create a new container. $ docker run -v /dbdata --name dbstore2 ubuntu /bin/bash Then un-tar the backup file in the new container's data volume. $ docker run --rm --volumes-from dbstore2 -v $(pwd):/backup ubuntu bash -c "cd /dbdata && tar xvf /backup/backup.tar --strip 1" You can use the techniques above to automate backup, migration and restore testing using your preferred tools. ## Important tips on using shared volumes Multiple containers can also share one or more data volumes. However, multiple containers writing to a single shared volume can cause data corruption. Make sure your applications are designed to write to shared data stores. Data volumes are directly accessible from the Docker host. This means you can read and write to them with normal Linux tools. In most cases you should not do this as it can cause data corruption if your containers and applications are unaware of your direct access. # Next steps Now we've learned a bit more about how to use Docker we're going to see how to combine Docker with the services available on [Docker Hub](https://hub.docker.com) including Automated Builds and private repositories. Go to [Working with Docker Hub](../containers/dockerrepos.md). docker-1.10.3/docs/userguide/containers/index.md000066400000000000000000000011661267010174400215730ustar00rootroot00000000000000 # Learn by example * [Hello world in a container](dockerizing.md) * [Run a simple application](usingdocker.md) * [Build your own images](dockerimages.md) * [Network containers](networkingcontainers.md) * [Manage data in containers](dockervolumes.md) * [Store images on Docker Hub](dockerrepos.md) docker-1.10.3/docs/userguide/containers/networkingcontainers.md000066400000000000000000000250641267010174400247440ustar00rootroot00000000000000 # Network containers If you are working your way through the user guide, you just built and ran a simple application. You've also built in your own images. This section teaches you how to network your containers. ## Name a container You've already seen that each container you create has an automatically created name; indeed you've become familiar with our old friend `nostalgic_morse` during this guide. You can also name containers yourself. This naming provides two useful functions: * You can name containers that do specific functions in a way that makes it easier for you to remember them, for example naming a container containing a web application `web`. * Names provide Docker with a reference point that allows it to refer to other containers. There are several commands that support this and you'll use one in an exercise later. You name your container by using the `--name` flag, for example launch a new container called web: $ docker run -d -P --name web training/webapp python app.py Use the `docker ps` command to see check the name: $ docker ps -l CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES aed84ee21bde training/webapp:latest python app.py 12 hours ago Up 2 seconds 0.0.0.0:49154->5000/tcp web You can also use `docker inspect` with the container's name. $ docker inspect web [ { "Id": "3ce51710b34f5d6da95e0a340d32aa2e6cf64857fb8cdb2a6c38f7c56f448143", "Created": "2015-10-25T22:44:17.854367116Z", "Path": "python", "Args": [ "app.py" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, ... Container names must be unique. That means you can only call one container `web`. If you want to re-use a container name you must delete the old container (with `docker rm`) before you can reuse the name with a new container. Go ahead and stop and remove your old `web` container. $ docker stop web web $ docker rm web web ## Launch a container on the default network Docker includes support for networking containers through the use of **network drivers**. By default, Docker provides two network drivers for you, the `bridge` and the `overlay` drivers. You can also write a network driver plugin so that you can create your own drivers but that is an advanced task. Every installation of the Docker Engine automatically includes three default networks. You can list them: $ docker network ls NETWORK ID NAME DRIVER 18a2866682b8 none null c288470c46f6 host host 7b369448dccb bridge bridge The network named `bridge` is a special network. Unless you tell it otherwise, Docker always launches your containers in this network. Try this now: $ docker run -itd --name=networktest ubuntu 74695c9cea6d9810718fddadc01a727a5dd3ce6a69d09752239736c030599741 Inspecting the network is an easy way to find out the container's IP address. ```bash $ docker network inspect bridge [ { "Name": "bridge", "Id": "f7ab26d71dbd6f557852c7156ae0574bbf62c42f539b50c8ebde0f728a253b6f", "Scope": "local", "Driver": "bridge", "IPAM": { "Driver": "default", "Config": [ { "Subnet": "172.17.0.1/16", "Gateway": "172.17.0.1" } ] }, "Containers": { "3386a527aa08b37ea9232cbcace2d2458d49f44bb05a6b775fba7ddd40d8f92c": { "EndpointID": "647c12443e91faf0fd508b6edfe59c30b642abb60dfab890b4bdccee38750bc1", "MacAddress": "02:42:ac:11:00:02", "IPv4Address": "172.17.0.2/16", "IPv6Address": "" }, "94447ca479852d29aeddca75c28f7104df3c3196d7b6d83061879e339946805c": { "EndpointID": "b047d090f446ac49747d3c37d63e4307be745876db7f0ceef7b311cbba615f48", "MacAddress": "02:42:ac:11:00:03", "IPv4Address": "172.17.0.3/16", "IPv6Address": "" } }, "Options": { "com.docker.network.bridge.default_bridge": "true", "com.docker.network.bridge.enable_icc": "true", "com.docker.network.bridge.enable_ip_masquerade": "true", "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", "com.docker.network.bridge.name": "docker0", "com.docker.network.driver.mtu": "9001" } } ] ``` You can remove a container from a network by disconnecting the container. To do this, you supply both the network name and the container name. You can also use the container id. In this example, though, the name is faster. $ docker network disconnect bridge networktest While you can disconnect a container from a network, you cannot remove the builtin `bridge` network named `bridge`. Networks are natural ways to isolate containers from other containers or other networks. So, as you get more experienced with Docker, you'll want to create your own networks. ## Create your own bridge network Docker Engine natively supports both bridge networks and overlay networks. A bridge network is limited to a single host running Docker Engine. An overlay network can include multiple hosts and is a more advanced topic. For this example, you'll create a bridge network: $ docker network create -d bridge my-bridge-network The `-d` flag tells Docker to use the `bridge` driver for the new network. You could have left this flag off as `bridge` is the default value for this flag. Go ahead and list the networks on your machine: $ docker network ls NETWORK ID NAME DRIVER 7b369448dccb bridge bridge 615d565d498c my-bridge-network bridge 18a2866682b8 none null c288470c46f6 host host If you inspect the network, you'll find that it has nothing in it. $ docker network inspect my-bridge-network [ { "Name": "my-bridge-network", "Id": "5a8afc6364bccb199540e133e63adb76a557906dd9ff82b94183fc48c40857ac", "Scope": "local", "Driver": "bridge", "IPAM": { "Driver": "default", "Config": [ { "Subnet": "172.18.0.0/16", "Gateway": "172.18.0.1/16" } ] }, "Containers": {}, "Options": {} } ] ## Add containers to a network To build web applications that act in concert but do so securely, create a network. Networks, by definition, provide complete isolation for containers. You can add containers to a network when you first run a container. Launch a container running a PostgreSQL database and pass it the `--net=my-bridge-network` flag to connect it to your new network: $ docker run -d --net=my-bridge-network --name db training/postgres If you inspect your `my-bridge-network` you'll see it has a container attached. You can also inspect your container to see where it is connected: $ docker inspect --format='{{json .NetworkSettings.Networks}}' db {"my-bridge-network":{"NetworkID":"7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99", "EndpointID":"508b170d56b2ac9e4ef86694b0a76a22dd3df1983404f7321da5649645bf7043","Gateway":"172.18.0.1","IPAddress":"172.18.0.2","IPPrefixLen":16,"IPv6Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"MacAddress":"02:42:ac:11:00:02"}} Now, go ahead and start your by now familiar web application. This time leave off the `-P` flag and also don't specify a network. $ docker run -d --name web training/webapp python app.py Which network is your `web` application running under? Inspect the application and you'll find it is running in the default `bridge` network. $ docker inspect --format='{{json .NetworkSettings.Networks}}' web {"bridge":{"NetworkID":"7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", "EndpointID":"508b170d56b2ac9e4ef86694b0a76a22dd3df1983404f7321da5649645bf7043","Gateway":"172.17.0.1","IPAddress":"172.17.0.2","IPPrefixLen":16,"IPv6Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"MacAddress":"02:42:ac:11:00:02"}} Then, get the IP address of your `web` $ docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' web 172.17.0.2 Now, open a shell to your running `db` container: $ docker exec -it db bash root@a205f0dd33b2:/# ping 172.17.0.2 ping 172.17.0.2 PING 172.17.0.2 (172.17.0.2) 56(84) bytes of data. ^C --- 172.17.0.2 ping statistics --- 44 packets transmitted, 0 received, 100% packet loss, time 43185ms After a bit, use CTRL-C to end the `ping` and you'll find the ping failed. That is because the two container are running on different networks. You can fix that. Then, use CTRL-C to exit the container. Docker networking allows you to attach a container to as many networks as you like. You can also attach an already running container. Go ahead and attach your running `web` app to the `my-bridge-network`. $ docker network connect my-bridge-network web Open a shell into the `db` application again and try the ping command. This time just use the container name `web` rather than the IP Address. $ docker exec -it db bash root@a205f0dd33b2:/# ping web PING web (172.18.0.3) 56(84) bytes of data. 64 bytes from web (172.18.0.3): icmp_seq=1 ttl=64 time=0.095 ms 64 bytes from web (172.18.0.3): icmp_seq=2 ttl=64 time=0.060 ms 64 bytes from web (172.18.0.3): icmp_seq=3 ttl=64 time=0.066 ms ^C --- web ping statistics --- 3 packets transmitted, 3 received, 0% packet loss, time 2000ms rtt min/avg/max/mdev = 0.060/0.073/0.095/0.018 ms The `ping` shows it is contacting a different IP address, the address on the `my-bridge-network` which is different from its address on the `bridge` network. ## Next steps Now that you know how to network containers, see [how to manage data in containers](dockervolumes.md). docker-1.10.3/docs/userguide/containers/search.png000066400000000000000000000430031267010174400221110ustar00rootroot00000000000000PNG  IHDR o?_PLTEq, degHFEabd璓ppq{xҮ hij_`a!~ֆ@>=ʀ~]]^\ʹםzxxXWY633+$*(POO&L3'&%̶^ېѹkv{ l$43)$*.hŎԱ¬u2B*q'И0+*,؎?źz#޴",g~rwUcm^lv7IVSyśXAӭrb`,r=z͜ItHӲ꥞wUul_t5DSaa{p&¯x9b r ڲm%DR-UpZKԒE璊+ZtAh^Évp>e?~eYڵtXE};Q)@$9F_8^bJ2D%ɟa QU0*ftń~;azb$!9[SBh8xdT#BIDATx=k#G<e{y9[z]XuU ͠][ZPj?J@@չ6EYDd%V䰟ewŸgf*1c1c1cr`10 JcLw +|<^1FK8ȴVXA-Bȯ@Zf4ү ixY/:hKίֳU@{Ov8!e|'-Ц*HM!۟l/1I'?v07|!'Fᅡ9wwJ葲ۃ"4'ȂhILQKڝ߿ڶ+cҙL6ձ8} P aIA$NAp|8aSu ]WBS*"^J/7!iA^w_@mUfMpH(;f$Z?ߦNMDGI7hOl@σn wI ؾdM8Cx ǽugl;`9癍2Fm:v=+]0@,dyHHx\(wIm lB1Fj"@< ئDhj*} %7BB&^R" 4$EZmwf*O993 d_hpxؠrL+(֚H-DjD7ЀqO)>y:($ @@_=X,"@ bMڞ %hȋ/lLN7ۜiisQCD^e8JiM"\,NHFK #[:ƴIB7%jJ@!mI>EՈidb\R.v"*f(u}F@`k2taa2-2}Y0?"71~ӳ?W$Ѹߐa|_|Ho(T(7\}Coj ֮sB "Z`3NnV@F( wZf6)1r 07I~&d_A;cV v8VCh(lh3ݛ1 }Y i}z+aB3pښ9c,B>omY(Fx~ܱzW7?puZk EZF_*_F*~m SoK \BRC "k.#|z0{D=Y F@Ў5D#] 3mWw/~BFqhxX3VU)Zt)HyQDHXa7CpBI 7•Soip39ګw\}B bӕX(iDDG^0şĢ $hrROB ^Wf8j$ Dj(XǗzՈp>v 9Ö[4|+| ϟ޵_&o/|CqK/=6ŝ32B, ŷa"1Aˁ$h4`шe"\…wwR:~s8 #aoӯ5/Wn"y`T\GȨm|/  @ tW#:o qI'ac3 &AImhmz'veh6=2dmN@*:Ɲ1S4 kb>%efTk;{=+%1@i*kUғ&X]`z8q>!-!6 ~ ' Ś'Rs业"p[ b(1 TDEYZF57MjL Gg0ʾ5@| ;&kJIF _79,BH  ȩT@pˀDFv|1*8 O5@H^Õn4):s^][x_仓@ءWJMcbhI[ ۢIs3@(w甔|3!|%db(LkB8ܜ .Vx%z5CJRk󄸨QG |O;*\{)އI A1hM&`,DQ-GN{k;ZKʗcYkP]К8u:x-r;oڅ"w3 "lJрL==9z< OxsnC8y銈(7>ɟ g ~rQ.Db,qnŢcnL+rq$^  | m(kAdeSU \B^.im2.7 u;G mA;͎sS}$H@b;ssJ}XV]XDڣP%/5"ȿ-y6\[]@`/@{ehR&řw˸ o A7 Ƃ\a,Y&]5 < D;ЩȂ̀_uД a9^ kӘ.\zr XOTOGa\,U::]"Sum8i=Aa>/o <-`Pr"yOxP 63D#FN+WH6o"%p8+=@@Ii `cvfG'Pd3EqSKnFҝ9^1 [C+33fxQ[9S|Slr pv]% o`0 . "i"p渻9j":@K,]@dQE$x/{أp}FlR}[ZJ:hy nY &oxldXD x5b/v LB,[fXh vl%O*dc lkv=[?872 IvO~p܄c(~taE|s.`Jö֝LD)$4bOha\0:S @ޒ=bd$ro#/4\w=[q8#c>\X1ؘRBBFcs۫s Vsw'BQ)B>NBg,Vu1BN$$$$?| p:@>/m&MC5?ENF"ݟ$}} itC_j.E)ILTU* pA\ PA$ q0da4M4;nr0AT a] >:\QAXmuuzGye_`pmJ Z[|h n 0 1^\uKpO*.Xqr8w3< 8+|ޖ\QA֩<'Mg7[3m uBm{Rit4m6l++H .כi+j:fӬ\4EeFsDi%iq\pA۰?&'Qb..}5lu6;/fsaG+L?0,Zu zhqA'(U -ÕPb9K=~&LdDZWdcfhKs'"_׿)߁)#ì\)'e&݃PYp N@t4U\կZ$WKִ ;[hCģ3۬#5*:^a醲dYy5mETWN6CA`qԗZ} O \JFQcchbh-ELJ ΰ{'6o P^tm!T^k7)m]BPIC@\z#o^ W?#bQE?_@&d"]'Ĺ?oWwUٳtql6׬jSbB[ ]K _o+tSp+o D6yÝ}A7A>`^~qUiA1u3m62 < )Q (Uto|lL6o;YL' 3i~ "/%Īi^vb5\-W6dJ7+57|aKxa@ s䰘 rGڜD\[Ӧ3mfǦ_@DgMhM t x1^3Mk0yI<ɵ#&˵紧nh1LhÙwүwLF?څ. [y8>xh){W4ldp&X jمQ #88 = ȬGX!'tL [a֔6٬;B- $}׉ǝ7wLqAc(G"{"<&.S}CL8[~O: pA .fE DHxЈ Q:7W3p<Ԉ }༶Ex s40cAx\ p H(C0DE`nN(^xa Y v X؁bj"D!Y9g3YrN r7\?E9d?eZt/6;9KwAӨo3{*oۤ+}}q>4sbn{1 L00 H)Ba iTHJ6) \^0/8H;dB&dR"s0T9sBr>>)5QTOPN\NHت 8*HxvHJ8wA"y)n1'bs_Gw&"D 80 |3]&X`|żoeuAf˜k+h/|RuK[f dg[J _RStAxa'ƁA+\:~RBxsbY-A"D$ij ,_)-:2,RVųfg'%88w⎰ v,Z C Xb}/¢rtmXq+GGFFsdj[2'6 'P_JXXJKyͤ@.--u-q Reh[K*!K泲ZI]w13lBev,tG+4X:Xj< AO+E2'rv|rNQUGn={)|0T$L'HH*=IU;gS`L;i`[ҊYO䙾c3eFw  >SpyM~4t@IT/ɚgUWj͇aѪ{ey\˹K~伓45 zs-\%AKfߕ"8G]7:@7ɆVQō'*-AIj 2^^OLF-B|1+mS CWWwj ں-ɲ<,PRCǕ'S-X;u9mQXDCs#g6L$N5'Gwel!-NTƓw55vTO%7fg=oO}_M lI͢yrb6 Qy|DMZ,t+yT)m2ڕ{V+,M[l*XʷsNBהkKU̕L,[nV*(@g g?BLkRJVC}c$zﴬzXu@8_Ὃ=>y}KYÁ V+w- ☰C~?xCrAĿ2CAAAAAAAAAAAAAAAv  o~c AB4C2!%rׄ BdXFtJDDh$0OPD4^ ,E`~ X! !|2$|  @h"Dfe P2gB  em8#2tYc "<0<քRNH7;BWB@RF&2H(~AbE {A!H AH!AW!A!Hd H AH!AW!A!H$P<abQ8,?9Z AHC5 _A(AHOf+kTY%)/ cq 1B+yj=c|I+׼ix,x[3= AHF-t[N ϬN[טĚ`wBL9jָʟ~wٙmxn ίG BND􀥨o7n79!rѨЁ tm*G+l`(/GR 5L;rrV,"2Q ZGg?`̀J$oMAdS$[- +O ARӏpLﮫP%!pƎ@Ճ셧Gf*'U&FzJK6:y <\6obgIrӞ=?g`$=._ش߇UUa! k4L䎻4mGF:^+B>7ABe+'- ?^TZ3 'h"EsەѢtS{fKO6%vؑ^:1STLn|3񶒍Z`LO,;U}ݪo:Zp1]_ /vo 0!wׁVsqڵ ?Lb)>ϙ2.}W'j^PVMg- -kXiֶOZTvu3ANz}>im9<0j˜THJo 6dn̎&}ÿxc#ϝV{M DUdߏǎ;~erAsimWn[p>9΃<9JΆܭuBfجa mw٩4L%#.ufbȤU=wN]` Rw.gE;4[r#*73Ali_ <YPwߍ] s:~еN{'N&?S+b1ƍ<1]W +EbLx&HM]]Fo2ؒNa-owp̥xFٯ9ӸrZuU_I 3_k㯻US538&DoH8֭G?J{#Dɋ׎;ϬD "@ ǯ!">cء]|l[;Lpi-ΝR! ̘Jzݭly@w~?P 5?8cfXUMZ5Cʛ0A\*Mujɽ>WS 3eZ7SLΘ!c^;[,GCw-3}摄L=i{b7DDEmHܟ~$)¿Q猓:v?۱Ǐ:9tfox_ՂںQ7 ¯ rœ'/ơNCxTMOt1 m*pP3uaZ3>g5 N|ߓPO$]m1y(Iؖ;0s0s .SVSaJIHX9_TVGPQ8-y?浍|GXyzReK2%)H!QCI>ί}d0+3fzQ+di*3^`~}cgeelc%эm)0^8w:aA4Fx' lt{{{ FY{0ls ]zt={y+^q:8)M ;OyUTXZZjQ=dPc=l7><0 `\@YG= 8;>;^?mt1xag'_q1 \d XL,c03cO}ӎXl HfnԱ? *)kJF^NiwAt}}g5tz!ˣ>d,*IJ9Q b]3. )Z1͎\Y"@޸U5N`<}rL1Vd`Pќ`-ثR> .yl-[ڙ@@pw `r V&ld >GLFbJb>tGu\6@ީ7m->:/_@TlDT ^ّWP!iu=gtX>Ŋ)%}+>uFȍ  9BB}$/c4vcɂ_3r|qa~2}7~W[B4 FҢ+2FNvNF={q g{;;;{oOP62}70W2Ѝ&tFHX Lljrq4wU2Nkij>3tzEC(< kLoQ'g@iH Q*V/KU?zM]'MV7cM2z4ln[n! ӧ++<Ϸ%Ha9Z )wq} ilv|; Ȥ㵊N˘ 6AJ^%hEg16^s@-awi`oBRfQ#26U)Xhu-55s8| #>"~f )"*ŸH@rYD[ aZ|6=x{5 SŽyg{h,?RwcgQȋ%ܯ97NtA9 IK;D5>۶ _ Ԯ݄E&/@R%3kOLap77 .rݘ5^2 ?, &F=`1q<0{'?zĝ,r<>Bw`n5W~OOsftB;n{_SXy!3 W2yH19mUhK#X(Ik!t8f{E@9D<G3'7q:3JD>iQ <F!īUUJjuQZ>` {>ſX5ahxr@VݺmħЪ9A%D-lZ˩д9 BN0<Q?y  <:ڊ@,1B7g''Ө=+يӨ!lQ r&H Ϊ"\?ݝOYW?yݩk[3[aqaumuՙ`x%RK7jbұp;[Q4Jɰ&rF۷=FV&vv xl uE޲Vɛy^,œղ+8`(BK拀t #+XQd  2R]1 d_}+ 0vpA֏/6uB|vb,L=P2ئK+x0@10ƺ#8?8;߯-omNO'c‚',e!״>I8U7{'Tf߀[+Ңc`Ԝ:&<6  -]|Uy% $(9tIRt~`Aˆ/zNn@OO¼ǃ gԨ_% R Qu 95cjfeu͹6;[[ŵULȍͻYS䅔2Ng ,#XD* *G _.}+"JN3cGV-#."ٌs0!ac<" !:Aj3/l8Ij:)'ZAZh<0atP>OFׯlE@I˥`MTjm?|hnmsaс1wom ' wn~vp}Ċ-G$`!cԂJ"jHsodۤǍP6bVwNwlArz<,9I: ``Vp7u[@PP`7S!.| &_) ّRdȁDxوjkk3ks7a~_dUwqjHiibP  lS @ =,JKc;E"Ў@9G,52, bLqMQy4;1y@M_6o!FQRBbUR6nu_mL! [& AvɃ|{c k3fBb` ʤ.7JpIs|Hr:L 3@g' leٞ u|Bt"J$ґ(IJTx:LlALYR"Z \$[: lIL9'lJTN'1`xKdrG6îkV}JA` Z4(79Z4ά0VMkhy<'z#gxPHzz필"q5%lI:])Xj$>@ dM@ Qx1Sߚa)6][آ൙G] P7W22{}qu f澃op $Rs 2 Ħ |R@:zP8a"r!zFzӓs H~h*'XF#* .qKB+D@ZHq|( B suȅhf!*_Z4l%0ªUeFX9m_ͭ} ͵S [ksS7׶柬nMS.66$9Ylu6Ux]3e1̋/,Uré{(ģxt,.$L@$WiQ'5% f%]enR?ĥJ~퀰lQy*? 7^. 1 S E>;٨֥T,|$}SsTapEorobqsnO3X#?ܤ"4sUk.63UITs\(+*Ū- -СD1o$QQ(鑱 Ks9[Et7Qj$F=HC VX>}԰-.LX2]zfR-h8ZZNHv0*sk^S L=25~QWSwp4m>{pD"~b+#lB @PL}8e=,P('[<0(.ړt&_dD`> | ZpAOrn񈁭 1~; b{zTL%LZWt99, J?֒Jx}sgh0{a }n%D: ͵t.y\QK!d`^ɩ k$X;qZY?\Phv@Eȸwb @i)xAzdp&֋\o[GA.E>`$e!C!>M$xSɞA|KZpa7ν :k>%RL[5Gg*f_/23E_̀'mQs {#ɂ"EtC.[dyED?~lO=ee$D 6@*.3 a%'}J$>KgΟ( :ff_0i 3]@Y["Nw0K냢{%I `K3JRJAHP>v2ЫOn8{DaCVݳGWUɌnL~P6zk# *r<&>.=AmM^]t) ه"cG\w)W@ʗK\Zj DLk0b:5p~X! IENDB`docker-1.10.3/docs/userguide/containers/usingdocker.md000066400000000000000000000256671267010174400230150ustar00rootroot00000000000000 # Run a simple application In the ["*Hello world in a container*"](dockerizing.md) you launched your first containers using the `docker run` command. You ran an *interactive container* that ran in the foreground. You also ran a *detached container* that ran in the background. In the process you learned about several Docker commands: * `docker ps` - Lists containers. * `docker logs` - Shows us the standard output of a container. * `docker stop` - Stops running containers. ## Learn about the Docker client If you didn't realize it yet, you've been using the Docker client each time you typed `docker` in your Bash terminal. The client is a simple command line client also known as a command-line interface (CLI). Each action you can take with the client is a command and each command can take a series of flags and arguments. # Usage: [sudo] docker [subcommand] [flags] [arguments] .. # Example: $ docker run -i -t ubuntu /bin/bash You can see this in action by using the `docker version` command to return version information on the currently installed Docker client and daemon. $ docker version This command will not only provide you the version of Docker client and daemon you are using, but also the version of Go (the programming language powering Docker). Client: Version: 1.8.1 API version: 1.20 Go version: go1.4.2 Git commit: d12ea79 Built: Thu Aug 13 02:35:49 UTC 2015 OS/Arch: linux/amd64 Server: Version: 1.8.1 API version: 1.20 Go version: go1.4.2 Git commit: d12ea79 Built: Thu Aug 13 02:35:49 UTC 2015 OS/Arch: linux/amd64 ## Get Docker command help You can display the help for specific Docker commands. The help details the options and their usage. To see a list of all the possible commands, use the following: $ docker --help To see usage for a specific command, specify the command with the `--help` flag: $ docker attach --help Usage: docker attach [OPTIONS] CONTAINER Attach to a running container --help Print usage --no-stdin Do not attach stdin --sig-proxy=true Proxy all received signals to the process > **Note:** > For further details and examples of each command, see the > [command reference](../../reference/commandline/cli.md) in this guide. ## Running a web application in Docker So now you've learned a bit more about the `docker` client you can move onto the important stuff: running more containers. So far none of the containers you've run did anything particularly useful, so you can change that by running an example web application in Docker. For our web application we're going to run a Python Flask application. Start with a `docker run` command. $ docker run -d -P training/webapp python app.py Review what the command did. You've specified two flags: `-d` and `-P`. You've already seen the `-d` flag which tells Docker to run the container in the background. The `-P` flag is new and tells Docker to map any required network ports inside our container to our host. This lets us view our web application. You've specified an image: `training/webapp`. This image is a pre-built image you've created that contains a simple Python Flask web application. Lastly, you've specified a command for our container to run: `python app.py`. This launches our web application. > **Note:** > You can see more detail on the `docker run` command in the [command > reference](../../reference/commandline/run.md) and the [Docker Run > Reference](../../reference/run.md). ## Viewing our web application container Now you can see your running container using the `docker ps` command. $ docker ps -l CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES bc533791f3f5 training/webapp:latest python app.py 5 seconds ago Up 2 seconds 0.0.0.0:49155->5000/tcp nostalgic_morse You can see you've specified a new flag, `-l`, for the `docker ps` command. This tells the `docker ps` command to return the details of the *last* container started. > **Note:** > By default, the `docker ps` command only shows information about running > containers. If you want to see stopped containers too use the `-a` flag. We can see the same details we saw [when we first Dockerized a container](dockerizing.md) with one important addition in the `PORTS` column. PORTS 0.0.0.0:49155->5000/tcp When we passed the `-P` flag to the `docker run` command Docker mapped any ports exposed in our image to our host. > **Note:** > We'll learn more about how to expose ports in Docker images when > [we learn how to build images](dockerimages.md). In this case Docker has exposed port 5000 (the default Python Flask port) on port 49155. Network port bindings are very configurable in Docker. In our last example the `-P` flag is a shortcut for `-p 5000` that maps port 5000 inside the container to a high port (from *ephemeral port range* which typically ranges from 32768 to 61000) on the local Docker host. We can also bind Docker containers to specific ports using the `-p` flag, for example: $ docker run -d -p 80:5000 training/webapp python app.py This would map port 5000 inside our container to port 80 on our local host. You might be asking about now: why wouldn't we just want to always use 1:1 port mappings in Docker containers rather than mapping to high ports? Well 1:1 mappings have the constraint of only being able to map one of each port on your local host. Suppose you want to test two Python applications: both bound to port 5000 inside their own containers. Without Docker's port mapping you could only access one at a time on the Docker host. So you can now browse to port 49155 in a web browser to see the application. ![Viewing the web application](webapp1.png). Our Python application is live! > **Note:** > If you have been using a virtual machine on OS X, Windows or Linux, > you'll need to get the IP of the virtual host instead of using localhost. > You can do this by running the `docker-machine ip your_vm_name` from your command line or terminal application, for example: > > $ docker-machine ip my-docker-vm > 192.168.99.100 > > In this case you'd browse to `http://192.168.99.100:49155` for the above example. ## A network port shortcut Using the `docker ps` command to return the mapped port is a bit clumsy so Docker has a useful shortcut we can use: `docker port`. To use `docker port` we specify the ID or name of our container and then the port for which we need the corresponding public-facing port. $ docker port nostalgic_morse 5000 0.0.0.0:49155 In this case you've looked up what port is mapped externally to port 5000 inside the container. ## Viewing the web application's logs You can also find out a bit more about what's happening with our application and use another of the commands you've learned, `docker logs`. $ docker logs -f nostalgic_morse * Running on http://0.0.0.0:5000/ 10.0.2.2 - - [23/May/2014 20:16:31] "GET / HTTP/1.1" 200 - 10.0.2.2 - - [23/May/2014 20:16:31] "GET /favicon.ico HTTP/1.1" 404 - This time though you've added a new flag, `-f`. This causes the `docker logs` command to act like the `tail -f` command and watch the container's standard out. We can see here the logs from Flask showing the application running on port 5000 and the access log entries for it. ## Looking at our web application container's processes In addition to the container's logs we can also examine the processes running inside it using the `docker top` command. $ docker top nostalgic_morse PID USER COMMAND 854 root python app.py Here we can see our `python app.py` command is the only process running inside the container. ## Inspecting our web application container Lastly, we can take a low-level dive into our Docker container using the `docker inspect` command. It returns a JSON document containing useful configuration and status information for the specified container. $ docker inspect nostalgic_morse You can see a sample of that JSON output. [{ "ID": "bc533791f3f500b280a9626688bc79e342e3ea0d528efe3a86a51ecb28ea20", "Created": "2014-05-26T05:52:40.808952951Z", "Path": "python", "Args": [ "app.py" ], "Config": { "Hostname": "bc533791f3f5", "Domainname": "", "User": "", . . . We can also narrow down the information we want to return by requesting a specific element, for example to return the container's IP address we would: $ docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' nostalgic_morse 172.17.0.5 ## Stopping our web application container Okay you've seen web application working. Now you can stop it using the `docker stop` command and the name of our container: `nostalgic_morse`. $ docker stop nostalgic_morse nostalgic_morse We can now use the `docker ps` command to check if the container has been stopped. $ docker ps -l ## Restarting our web application container Oops! Just after you stopped the container you get a call to say another developer needs the container back. From here you have two choices: you can create a new container or restart the old one. Look at starting your previous container back up. $ docker start nostalgic_morse nostalgic_morse Now quickly run `docker ps -l` again to see the running container is back up or browse to the container's URL to see if the application responds. > **Note:** > Also available is the `docker restart` command that runs a stop and > then start on the container. ## Removing our web application container Your colleague has let you know that they've now finished with the container and won't need it again. Now, you can remove it using the `docker rm` command. $ docker rm nostalgic_morse Error: Impossible to remove a running container, please stop it first or use -f 2014/05/24 08:12:56 Error: failed to remove one or more containers What happened? We can't actually remove a running container. This protects you from accidentally removing a running container you might need. You can try this again by stopping the container first. $ docker stop nostalgic_morse nostalgic_morse $ docker rm nostalgic_morse nostalgic_morse And now our container is stopped and deleted. > **Note:** > Always remember that removing a container is final! # Next steps Until now you've only used images that you've downloaded from Docker Hub. Next, you can get introduced to building and sharing our own images. Go to [Working with Docker Images](dockerimages.md). docker-1.10.3/docs/userguide/containers/webapp1.png000066400000000000000000000320411267010174400222030ustar00rootroot00000000000000PNG  IHDRPLTE䩩੪᪩ᱱponhhh⨨\\[⯯mlmrrrcbbƷVUT͵軻766A?@xxyz_" |8J!oq[8X!4Pi{%Y"wO4ڎ:u3ͱev Au -ǭcs\,W~p;]'4PuΏkܞ*ۭ{BHJ(t}˝hlX.ײ?.lrw>n.eCq$Owr/q G]xrݺ\rݗ/9rx~<]لG e=j3A@fX0lD0&DmLLb#B-gctm0F~ImNW~~:],Wt3՜rmttc@_9;LC̗rv \-P:I OWN7}rx1v[vt]\pPȶ?62,8"FȺu oo\*B|wO~.0B2螅)C,%IfdLI`Dn%-D5%#3va{vM#f,p;/n"R-zKٲm+)?De#(c)%{LǾĬ9nٲ(7Y/-xo+4W^+n"pW07V\v+zhnp7K͗(ɕ_:6$sn,ţܾVw"܂V(/eไUXL0zh""3)+gJ rG,E\, J0R("UVNԭ]W{]X:'59g"7Եuӵk"Z? kz gzݩ~vO7wztϿs{wj<=X v6el׮u箇;}N{p'$InNb~U{pqz bUCĩ32)ګdS$2'Ee~('3 :A SxmtTDc80k[m]h[ M՛~p[#u~gw;᎘s܆_ONPpG{pޝ;ҳ[`mPXܖoH!L`ڋ0rB'+/)%3bОv)^ڟVRR bNOK}n.7wyo}C1A: G5l>O#@EQU f~vjpuquw#yNׇ[̝UIG⛛v̢9[qH0-/ ؽp 0B3<&$$%sTűIG,(Eq8:"$Pl{.Pۗr#A9BhQJ82כFF˒--ْm_~GpQfK%wQc{[ԊvjGcqG#-t~$kI[PJwGn2bgˑOϟ#?o{^$)!,mPAh)4U$ԡ(P")/Fr$8I%[.ۯ_~1,Bv,-Z{q $ bC %Z@HAlPrDDK mt,)IݾvKn壸ٻ\LG^຿ Ll)P+eʡ#F9"C8cG:Z 9#o]]>t@ 's嬢&PnTwlK jmhNpdt sgal1OϦz5ЂaV3zNYv|'LEYQ 62\g{z>ɕӆ*&M]+S5U Hue\ߺf;_xAسUYj@նms])4 >W'xꉪq8\G yΪT0.y. |Ͽt?I3f`/G ʫMuNúPnUJLGt?dnt9TteG;S֐2{޽-{:p89)a/IO49MIRKKJYERPP*.]*, UDW@Y]p׫}3-MC<=ۉ39z5cpxp^[S[@r.#Eu/HBBzMbS\· R n $zw1 5Fp4l 6PdU_Yn7|U1Afl {/x|h4Qp_p!lzs984Wc'؝ ƎX"y)v;9`[ vbgĭ8jYd$jo]_r #`.pېQ*8UX )fn̈&,҆tmAodhq[[P$&"hX(sVЧH̕ 納%u3+gԱV~Ȏ0'/cѡ#Ot=2XѰfF;TE>0x#K/@a#hB1f6I R2IZGRVqm PYÒn@]S})l 0lW1N@+&ؒOmmQ\&; `P7 ;njTAPÁjF*} RtWto)d7n 2(ܵ"kMM%]Lo٢0<*ɎqHI.pRYaīNQDVl,s)oYq L1n\mzt v?p;.F?WS7* Fai&JS[Hb&ԠL0Q+*qj.Vÿ)@`cSOA 0-vSj2&^o}1[wiABBсɄL\2nX%ӆt ܱ?_P8~Sr|Rry&+*X&e(cRJ$ .|LR5! SA@G )FogRTM% !+ILp̟jNf?P~N`I}g ._>(F<uU/d +SS)J?&Opb-5TTD@WKpuMj""\F%kb0EPXh*rMͨ-v!dZ1., !#7'8tGfҷkqRnws+7AW,|gr/)Tۏ's @V8=,TFx4_zPGn͵[O!0++(nYa(mQ$jʊZZT`^ O$ 2|MEqo#JG &I.Jm> JeUv[[mc7g樊2l6<r+_\!?GVN/Y];?z+D.k5:Ë5h}&ꎳd|.{&ݪ}yMZ U7af@_qP Y]LT@b*u[wDl˷Z$231hJTn+qNUZH| XT&BdVvX -D9 Jh#>xd({5v oOq {yy7 tV Xu%}[.w.afF:'3Oq$#'zDYM&I1}@\oRS+!D" nOabPujZDTv@EsIJQ, R_jMUpv 4KpT? RN +?faJwQUJּ:vx6 dDF::;QkwzƋ^C M>iЛ(}H4+hEw0=8(1{FYxFx@RtJaڝ~Y*jL_=5I=YSDfE|~X6_A`osYDo28K, "ZNf*5R_$J߼YtHHpr4Tv9L a/+@Q&0- : u#>3+I:m5x͠E"" y,z cgkc..%.(/c@̐1aZanB.<{C7[X5ئ,cׯ뽂TWZ\Y?~]6 M{"{q/ymQ)UYBT0dW …8rX@GagYԱ28MH'zz2Y߼ j*1Í}Ϳs%-?-o^~~do0m{/o-Df;;eC@wz_g8: 6xUĘ dGk{1&@9cD(OϮ}ї)Z'0e 4(L?=d<:<>) -Ubx)W{^yɭ/dK ۍsSQ3F]9;Ʊ@bE0v.ΑA$$fd4'A:TLwN>} XDs:RDq`=G_J+%/ca%yո-fP7 ˙ۇsqZ:N}ehZ@lha4 ;8K{) HPHzvS d-Od 8cbP^Ր=\|tŤ뼩L >?[/͠5& , >B|@/ҁM:NcHr {[omwIJmlF˗5G^7Dzbx6 z cG='1,0F/Xf_3^ ͽEX|{߽ׯ/V#".=VPDP 6N1Zaa[& |"ZlY1.rG]EAY|E#z FBlK"(H۲ilF- =*ίįDX)#F 7x)߀Ew0fFN\fdhUJm& B50qQj3ڬ ȸO x\ApF,QGU mh= (kef81Dfp6#8,Zc D>9OH)Ъ7U\r)~ςa"js"0g*A+фMIYB xapd²9*f֨̍7ɥ:?|Wm["s *Ȍe1mUkhU Y%aiF%/ܴE_h_>XkWPٽWmt+^TiΡNk]]wZCWJS\uNH\R냛n\x6R\\E΅k߳P}9]"ֲ8;^.{ew^6ukrIs\w,"eq=_?}:Oye4Ts9%?g̦44<킺û^k=7u}\=/p?]lp/{\[g1piܑe| pG7ޕappt{ŕ]d2KU\Rݻ7L޹ ,^TE^2n#Jo}?uOT+]Z]ep%$'wlr@dtXZRh13( e] JefG;YC2(mipz=7\rKW.r  xrKٚ%n#w=s̥!Ј7ܰepif?rw \D}8yo8>Ms9i=/yM.}OOi?2gˮej:$}JNV/.57]trp}p.M;č (L >AFQhhY@ӑrDZw: N)Fo~QR2+4{K!+-f>NBy*ŀ:,nyn7rGc\˵@Mot1+^vnAv ac9Ağ."]. LzShm7LtsF_/tJCB NG"}\t_\۶nѶ]p]^nlק+&zS%v:"f:r#e%CYTJ9D&;HTƑ"r r]L2ylWZU=.iTADZ.k!Lq:c\n!@-W; Jr;֧+JJʄR jpn7ۗǹ],|sIt\6M2Ufخ)G,\Y.o7/W/W"0s8Zψưf{Oz{ [QV]rVܳtϟx\;ߏjnwLw]Sgn,Z{>n\[]n˵Zǵ_2>?9 ]x7?]]VK_ע}@jaX\j*uQ0@na9>Vꉂ8+,́Πl%iJ<9~ L@1P8tn:%{ (@%3+ CY 3ig )ȕ7HKM*p n=0@}Dg[`[ m<䮓(NpvN۲$dۡkcU:8򣍖D]ns_09<ѦZ`eK*k &!i}0)*/nuOT@Yi'DsѴ@n}J- ^*蟉tϪ-gSD˓ W860~ 0yj1Υ 0{Q - 6nBxm5! Ok@]YOΥ?+`d?'"rƮS:}{~BD25(]p6΀_@"uUYeh H L8xފY ~;>+'`C!cyhS?ܥ S/97 [ߠ")hr Zh}o^|?ھSnzT8/ `Q %]R}y ^w@wO1-2vB'F8^Ocf V3P KrLf R)P_Z6٫RE>P.n J^qBDi}w Jцxl= t*@H2"l慰DM]‹PFlr%2\eK-150P>-&3VS;J+uUWQ?EQV_TV -G@<}Pļ  i@VDc} 2#Jş.AhC%| P!dgy;x/'Np E D.9- hYt%}}ϟHa\R.R`8=3;KiBk5ֆt-Q7Pؗ%0" [Z$x8sg0@@өj {d_aewZj%w~%>Z%:Q}.ܠARrwZvy{FII P?'WJJ?I+]PjQ}uP Bdb([>wu(a3ugK'O%»BN:?E8wBd+ZŠiCS|@d+ݟ%HypʹQ HH)ӓJ(x $ ?}SȉrD@iye(AwZbǢ{i]sѵ_G HvyO+`4| J.B6 +|6` 8w!}x9EID @D @D @D @D @D @D @, "@ "@ "@ "@ "@ "@ "@)@ "@ "@ "@ "@ "@ "@       o       |M%riIENDB`docker-1.10.3/docs/userguide/eng-image/000077500000000000000000000000001267010174400176225ustar00rootroot00000000000000docker-1.10.3/docs/userguide/eng-image/baseimages.md000066400000000000000000000060151267010174400222460ustar00rootroot00000000000000 # Create a base image So you want to create your own [*Base Image*](../../reference/glossary.md#base-image)? Great! The specific process will depend heavily on the Linux distribution you want to package. We have some examples below, and you are encouraged to submit pull requests to contribute new ones. ## Create a full image using tar In general, you'll want to start with a working machine that is running the distribution you'd like to package as a base image, though that is not required for some tools like Debian's [Debootstrap](https://wiki.debian.org/Debootstrap), which you can also use to build Ubuntu images. It can be as simple as this to create an Ubuntu base image: $ sudo debootstrap raring raring > /dev/null $ sudo tar -C raring -c . | docker import - raring a29c15f1bf7a $ docker run raring cat /etc/lsb-release DISTRIB_ID=Ubuntu DISTRIB_RELEASE=13.04 DISTRIB_CODENAME=raring DISTRIB_DESCRIPTION="Ubuntu 13.04" There are more example scripts for creating base images in the Docker GitHub Repo: - [BusyBox](https://github.com/docker/docker/blob/master/contrib/mkimage-busybox.sh) - CentOS / Scientific Linux CERN (SLC) [on Debian/Ubuntu]( https://github.com/docker/docker/blob/master/contrib/mkimage-rinse.sh) or [on CentOS/RHEL/SLC/etc.]( https://github.com/docker/docker/blob/master/contrib/mkimage-yum.sh) - [Debian / Ubuntu]( https://github.com/docker/docker/blob/master/contrib/mkimage-debootstrap.sh) ## Creating a simple base image using scratch You can use Docker's reserved, minimal image, `scratch`, as a starting point for building containers. Using the `scratch` "image" signals to the build process that you want the next command in the `Dockerfile` to be the first filesystem layer in your image. While `scratch` appears in Docker's repository on the hub, you can't pull it, run it, or tag any image with the name `scratch`. Instead, you can refer to it in your `Dockerfile`. For example, to create a minimal container using `scratch`: FROM scratch ADD hello / CMD ["/hello"] This example creates the hello-world image used in the tutorials. If you want to test it out, you can clone [the image repo](https://github.com/docker-library/hello-world) ## More resources There are lots more resources available to help you write your 'Dockerfile`. * There's a [complete guide to all the instructions](../../reference/builder.md) available for use in a `Dockerfile` in the reference section. * To help you write a clear, readable, maintainable `Dockerfile`, we've also written a [`Dockerfile` Best Practices guide](dockerfile_best-practices.md). * If your goal is to create a new Official Repository, be sure to read up on Docker's [Official Repositories](https://docs.docker.com/docker-hub/official_repos/). docker-1.10.3/docs/userguide/eng-image/dockerfile_best-practices.md000066400000000000000000000477201267010174400252550ustar00rootroot00000000000000 # Best practices for writing Dockerfiles Docker can build images automatically by reading the instructions from a `Dockerfile`, a text file that contains all the commands, in order, needed to build a given image. `Dockerfile`s adhere to a specific format and use a specific set of instructions. You can learn the basics on the [Dockerfile Reference](../../reference/builder.md) page. If you’re new to writing `Dockerfile`s, you should start there. This document covers the best practices and methods recommended by Docker, Inc. and the Docker community for creating easy-to-use, effective `Dockerfile`s. We strongly suggest you follow these recommendations (in fact, if you’re creating an Official Image, you *must* adhere to these practices). You can see many of these practices and recommendations in action in the [buildpack-deps `Dockerfile`](https://github.com/docker-library/buildpack-deps/blob/master/jessie/Dockerfile). > Note: for more detailed explanations of any of the Dockerfile commands >mentioned here, visit the [Dockerfile Reference](../../reference/builder.md) page. ## General guidelines and recommendations ### Containers should be ephemeral The container produced by the image your `Dockerfile` defines should be as ephemeral as possible. By “ephemeral,” we mean that it can be stopped and destroyed and a new one built and put in place with an absolute minimum of set-up and configuration. ### Use a .dockerignore file In most cases, it's best to put each Dockerfile in an empty directory. Then, add to that directory only the files needed for building the Dockerfile. To increase the build's performance, you can exclude files and directories by adding a `.dockerignore` file to that directory as well. This file supports exclusion patterns similar to `.gitignore` files. For information on creating one, see the [.dockerignore file](../../reference/builder.md#dockerignore-file). ### Avoid installing unnecessary packages In order to reduce complexity, dependencies, file sizes, and build times, you should avoid installing extra or unnecessary packages just because they might be “nice to have.” For example, you don’t need to include a text editor in a database image. ### Run only one process per container In almost all cases, you should only run a single process in a single container. Decoupling applications into multiple containers makes it much easier to scale horizontally and reuse containers. If that service depends on another service, make use of [container linking](../../userguide/networking/default_network/dockerlinks.md). ### Minimize the number of layers You need to find the balance between readability (and thus long-term maintainability) of the `Dockerfile` and minimizing the number of layers it uses. Be strategic and cautious about the number of layers you use. ### Sort multi-line arguments Whenever possible, ease later changes by sorting multi-line arguments alphanumerically. This will help you avoid duplication of packages and make the list much easier to update. This also makes PRs a lot easier to read and review. Adding a space before a backslash (`\`) helps as well. Here’s an example from the [`buildpack-deps` image](https://github.com/docker-library/buildpack-deps): RUN apt-get update && apt-get install -y \ bzr \ cvs \ git \ mercurial \ subversion ### Build cache During the process of building an image Docker will step through the instructions in your `Dockerfile` executing each in the order specified. As each instruction is examined Docker will look for an existing image in its cache that it can reuse, rather than creating a new (duplicate) image. If you do not want to use the cache at all you can use the ` --no-cache=true` option on the `docker build` command. However, if you do let Docker use its cache then it is very important to understand when it will, and will not, find a matching image. The basic rules that Docker will follow are outlined below: * Starting with a base image that is already in the cache, the next instruction is compared against all child images derived from that base image to see if one of them was built using the exact same instruction. If not, the cache is invalidated. * In most cases simply comparing the instruction in the `Dockerfile` with one of the child images is sufficient. However, certain instructions require a little more examination and explanation. * For the `ADD` and `COPY` instructions, the contents of the file(s) in the image are examined and a checksum is calculated for each file. The last-modified and last-accessed times of the file(s) are not considered in these checksums. During the cache lookup, the checksum is compared against the checksum in the existing images. If anything has changed in the file(s), such as the contents and metadata, then the cache is invalidated. * Aside from the `ADD` and `COPY` commands, cache checking will not look at the files in the container to determine a cache match. For example, when processing a `RUN apt-get -y update` command the files updated in the container will not be examined to determine if a cache hit exists. In that case just the command string itself will be used to find a match. Once the cache is invalidated, all subsequent `Dockerfile` commands will generate new images and the cache will not be used. ## The Dockerfile instructions Below you'll find recommendations for the best way to write the various instructions available for use in a `Dockerfile`. ### FROM [Dockerfile reference for the FROM instruction](../../reference/builder.md#from) Whenever possible, use current Official Repositories as the basis for your image. We recommend the [Debian image](https://registry.hub.docker.com/_/debian/) since it’s very tightly controlled and kept extremely minimal (currently under 100 mb), while still being a full distribution. ### RUN [Dockerfile reference for the RUN instruction](../../reference/builder.md#run) As always, to make your `Dockerfile` more readable, understandable, and maintainable, split long or complex `RUN` statements on multiple lines separated with backslashes. ### apt-get Probably the most common use-case for `RUN` is an application of `apt-get`. The `RUN apt-get` command, because it installs packages, has several gotchas to look out for. You should avoid `RUN apt-get upgrade` or `dist-upgrade`, as many of the “essential” packages from the base images won't upgrade inside an unprivileged container. If a package contained in the base image is out-of-date, you should contact its maintainers. If you know there’s a particular package, `foo`, that needs to be updated, use `apt-get install -y foo` to update automatically. Always combine `RUN apt-get update` with `apt-get install` in the same `RUN` statement, for example: RUN apt-get update && apt-get install -y \ package-bar \ package-baz \ package-foo Using `apt-get update` alone in a `RUN` statement causes caching issues and subsequent `apt-get install` instructions fail. For example, say you have a Dockerfile: FROM ubuntu:14.04 RUN apt-get update RUN apt-get install -y curl After building the image, all layers are in the Docker cache. Suppose you later modify `apt-get install` by adding extra package: FROM ubuntu:14.04 RUN apt-get update RUN apt-get install -y curl nginx Docker sees the initial and modified instructions as identical and reuses the cache from previous steps. As a result the `apt-get update` is *NOT* executed because the build uses the cached version. Because the `apt-get update` is not run, your build can potentially get an outdated version of the `curl` and `nginx` packages. Using `RUN apt-get update && apt-get install -y` ensures your Dockerfile installs the latest package versions with no further coding or manual intervention. This technique is known as "cache busting". You can also achieve cache-busting by specifying a package version. This is known as version pinning, for example: RUN apt-get update && apt-get install -y \ package-bar \ package-baz \ package-foo=1.3.* Version pinning forces the build to retrieve a particular version regardless of what’s in the cache. This technique can also reduce failures due to unanticipated changes in required packages. Below is a well-formed `RUN` instruction that demonstrates all the `apt-get` recommendations. RUN apt-get update && apt-get install -y \ aufs-tools \ automake \ build-essential \ curl \ dpkg-sig \ libcap-dev \ libsqlite3-dev \ mercurial \ reprepro \ ruby1.9.1 \ ruby1.9.1-dev \ s3cmd=1.1.* \ && rm -rf /var/lib/apt/lists/* The `s3cmd` instructions specifies a version `1.1.0*`. If the image previously used an older version, specifying the new one causes a cache bust of `apt-get update` and ensure the installation of the new version. Listing packages on each line can also prevent mistakes in package duplication. In addition, cleaning up the apt cache and removing `/var/lib/apt/lists` helps keep the image size down. Since the `RUN` statement starts with `apt-get update`, the package cache will always be refreshed prior to `apt-get install`. > **Note**: The official Debian and Ubuntu images [automatically run `apt-get clean`](https://github.com/docker/docker/blob/03e2923e42446dbb830c654d0eec323a0b4ef02a/contrib/mkimage/debootstrap#L82-L105), > so explicit invocation is not required. ### CMD [Dockerfile reference for the CMD instruction](../../reference/builder.md#cmd) The `CMD` instruction should be used to run the software contained by your image, along with any arguments. `CMD` should almost always be used in the form of `CMD [“executable”, “param1”, “param2”…]`. Thus, if the image is for a service (Apache, Rails, etc.), you would run something like `CMD ["apache2","-DFOREGROUND"]`. Indeed, this form of the instruction is recommended for any service-based image. In most other cases, `CMD` should be given an interactive shell (bash, python, perl, etc), for example, `CMD ["perl", "-de0"]`, `CMD ["python"]`, or `CMD [“php”, “-a”]`. Using this form means that when you execute something like `docker run -it python`, you’ll get dropped into a usable shell, ready to go. `CMD` should rarely be used in the manner of `CMD [“param”, “param”]` in conjunction with [`ENTRYPOINT`](../../reference/builder.md#entrypoint), unless you and your expected users are already quite familiar with how `ENTRYPOINT` works. ### EXPOSE [Dockerfile reference for the EXPOSE instruction](../../reference/builder.md#expose) The `EXPOSE` instruction indicates the ports on which a container will listen for connections. Consequently, you should use the common, traditional port for your application. For example, an image containing the Apache web server would use `EXPOSE 80`, while an image containing MongoDB would use `EXPOSE 27017` and so on. For external access, your users can execute `docker run` with a flag indicating how to map the specified port to the port of their choice. For container linking, Docker provides environment variables for the path from the recipient container back to the source (ie, `MYSQL_PORT_3306_TCP`). ### ENV [Dockerfile reference for the ENV instruction](../../reference/builder.md#env) In order to make new software easier to run, you can use `ENV` to update the `PATH` environment variable for the software your container installs. For example, `ENV PATH /usr/local/nginx/bin:$PATH` will ensure that `CMD [“nginx”]` just works. The `ENV` instruction is also useful for providing required environment variables specific to services you wish to containerize, such as Postgres’s `PGDATA`. Lastly, `ENV` can also be used to set commonly used version numbers so that version bumps are easier to maintain, as seen in the following example: ENV PG_MAJOR 9.3 ENV PG_VERSION 9.3.4 RUN curl -SL http://example.com/postgres-$PG_VERSION.tar.xz | tar -xJC /usr/src/postgress && … ENV PATH /usr/local/postgres-$PG_MAJOR/bin:$PATH Similar to having constant variables in a program (as opposed to hard-coding values), this approach lets you change a single `ENV` instruction to auto-magically bump the version of the software in your container. ### ADD or COPY [Dockerfile reference for the ADD instruction](../../reference/builder.md#add)
[Dockerfile reference for the COPY instruction](../../reference/builder.md#copy) Although `ADD` and `COPY` are functionally similar, generally speaking, `COPY` is preferred. That’s because it’s more transparent than `ADD`. `COPY` only supports the basic copying of local files into the container, while `ADD` has some features (like local-only tar extraction and remote URL support) that are not immediately obvious. Consequently, the best use for `ADD` is local tar file auto-extraction into the image, as in `ADD rootfs.tar.xz /`. If you have multiple `Dockerfile` steps that use different files from your context, `COPY` them individually, rather than all at once. This will ensure that each step's build cache is only invalidated (forcing the step to be re-run) if the specifically required files change. For example: COPY requirements.txt /tmp/ RUN pip install --requirement /tmp/requirements.txt COPY . /tmp/ Results in fewer cache invalidations for the `RUN` step, than if you put the `COPY . /tmp/` before it. Because image size matters, using `ADD` to fetch packages from remote URLs is strongly discouraged; you should use `curl` or `wget` instead. That way you can delete the files you no longer need after they've been extracted and you won't have to add another layer in your image. For example, you should avoid doing things like: ADD http://example.com/big.tar.xz /usr/src/things/ RUN tar -xJf /usr/src/things/big.tar.xz -C /usr/src/things RUN make -C /usr/src/things all And instead, do something like: RUN mkdir -p /usr/src/things \ && curl -SL http://example.com/big.tar.xz \ | tar -xJC /usr/src/things \ && make -C /usr/src/things all For other items (files, directories) that do not require `ADD`’s tar auto-extraction capability, you should always use `COPY`. ### ENTRYPOINT [Dockerfile reference for the ENTRYPOINT instruction](../../reference/builder.md#entrypoint) The best use for `ENTRYPOINT` is to set the image's main command, allowing that image to be run as though it was that command (and then use `CMD` as the default flags). Let's start with an example of an image for the command line tool `s3cmd`: ENTRYPOINT ["s3cmd"] CMD ["--help"] Now the image can be run like this to show the command's help: $ docker run s3cmd Or using the right parameters to execute a command: $ docker run s3cmd ls s3://mybucket This is useful because the image name can double as a reference to the binary as shown in the command above. The `ENTRYPOINT` instruction can also be used in combination with a helper script, allowing it to function in a similar way to the command above, even when starting the tool may require more than one step. For example, the [Postgres Official Image](https://registry.hub.docker.com/_/postgres/) uses the following script as its `ENTRYPOINT`: ```bash #!/bin/bash set -e if [ "$1" = 'postgres' ]; then chown -R postgres "$PGDATA" if [ -z "$(ls -A "$PGDATA")" ]; then gosu postgres initdb fi exec gosu postgres "$@" fi exec "$@" ``` > **Note**: > This script uses [the `exec` Bash command](http://wiki.bash-hackers.org/commands/builtin/exec) > so that the final running application becomes the container's PID 1. This allows > the application to receive any Unix signals sent to the container. > See the [`ENTRYPOINT`](../../reference/builder.md#entrypoint) > help for more details. The helper script is copied into the container and run via `ENTRYPOINT` on container start: COPY ./docker-entrypoint.sh / ENTRYPOINT ["/docker-entrypoint.sh"] This script allows the user to interact with Postgres in several ways. It can simply start Postgres: $ docker run postgres Or, it can be used to run Postgres and pass parameters to the server: $ docker run postgres postgres --help Lastly, it could also be used to start a totally different tool, such as Bash: $ docker run --rm -it postgres bash ### VOLUME [Dockerfile reference for the VOLUME instruction](../../reference/builder.md#volume) The `VOLUME` instruction should be used to expose any database storage area, configuration storage, or files/folders created by your docker container. You are strongly encouraged to use `VOLUME` for any mutable and/or user-serviceable parts of your image. ### USER [Dockerfile reference for the USER instruction](../../reference/builder.md#user) If a service can run without privileges, use `USER` to change to a non-root user. Start by creating the user and group in the `Dockerfile` with something like `RUN groupadd -r postgres && useradd -r -g postgres postgres`. > **Note:** Users and groups in an image get a non-deterministic > UID/GID in that the “next” UID/GID gets assigned regardless of image > rebuilds. So, if it’s critical, you should assign an explicit UID/GID. You should avoid installing or using `sudo` since it has unpredictable TTY and signal-forwarding behavior that can cause more problems than it solves. If you absolutely need functionality similar to `sudo` (e.g., initializing the daemon as root but running it as non-root), you may be able to use [“gosu”](https://github.com/tianon/gosu). Lastly, to reduce layers and complexity, avoid switching `USER` back and forth frequently. ### WORKDIR [Dockerfile reference for the WORKDIR instruction](../../reference/builder.md#workdir) For clarity and reliability, you should always use absolute paths for your `WORKDIR`. Also, you should use `WORKDIR` instead of proliferating instructions like `RUN cd … && do-something`, which are hard to read, troubleshoot, and maintain. ### ONBUILD [Dockerfile reference for the ONBUILD instruction](../../reference/builder.md#onbuild) An `ONBUILD` command executes after the current `Dockerfile` build completes. `ONBUILD` executes in any child image derived `FROM` the current image. Think of the `ONBUILD` command as an instruction the parent `Dockerfile` gives to the child `Dockerfile`. A Docker build executes `ONBUILD` commands before any command in a child `Dockerfile`. `ONBUILD` is useful for images that are going to be built `FROM` a given image. For example, you would use `ONBUILD` for a language stack image that builds arbitrary user software written in that language within the `Dockerfile`, as you can see in [Ruby’s `ONBUILD` variants](https://github.com/docker-library/ruby/blob/master/2.1/onbuild/Dockerfile). Images built from `ONBUILD` should get a separate tag, for example: `ruby:1.9-onbuild` or `ruby:2.0-onbuild`. Be careful when putting `ADD` or `COPY` in `ONBUILD`. The “onbuild” image will fail catastrophically if the new build's context is missing the resource being added. Adding a separate tag, as recommended above, will help mitigate this by allowing the `Dockerfile` author to make a choice. ## Examples for Official Repositories These Official Repositories have exemplary `Dockerfile`s: * [Go](https://registry.hub.docker.com/_/golang/) * [Perl](https://registry.hub.docker.com/_/perl/) * [Hy](https://registry.hub.docker.com/_/hylang/) * [Rails](https://registry.hub.docker.com/_/rails) ## Additional resources: * [Dockerfile Reference](../../reference/builder.md) * [More about Base Images](baseimages.md) * [More about Automated Builds](https://docs.docker.com/docker-hub/builds/) * [Guidelines for Creating Official Repositories](https://docs.docker.com/docker-hub/official_repos/) docker-1.10.3/docs/userguide/eng-image/image_management.md000066400000000000000000000054251267010174400234300ustar00rootroot00000000000000 # Image management The Docker Engine provides a client which you can use to create images on the command line or through a build process. You can run these images in a container or publish them for others to use. Storing the images you create, searching for images you might want, or publishing images others might use are all elements of image management. This section provides an overview of the major features and products Docker provides for image management. ## Docker Hub The [Docker Hub](https://docs.docker.com/docker-hub/) is responsible for centralizing information about user accounts, images, and public name spaces. It has different components: - Web UI - Meta-data store (comments, stars, list public repositories) - Authentication service - Tokenization There is only one instance of the Docker Hub, run and managed by Docker Inc. This public Hub is useful for most individuals and smaller companies. ## Docker Registry and the Docker Trusted Registry The Docker Registry is a component of Docker's ecosystem. A registry is a storage and content delivery system, holding named Docker images, available in different tagged versions. For example, the image `distribution/registry`, with tags `2.0` and `latest`. Users interact with a registry by using docker push and pull commands such as, `docker pull myregistry.com/stevvooe/batman:voice`. The Docker Hub has its own registry which, like the Hub itself, is run and managed by Docker. However, there are other ways to obtain a registry. You can purchase the [Docker Trusted Registry](https://docs.docker.com/docker-trusted-registry) product to run on your company's network. Alternatively, you can use the Docker Registry component to build a private registry. For information about using a registry, see overview for the [Docker Registry](https://docs.docker.com/registry). ## Content Trust When transferring data among networked systems, *trust* is a central concern. In particular, when communicating over an untrusted medium such as the internet, it is critical to ensure the integrity and publisher of all of the data a system operates on. You use Docker to push and pull images (data) to a registry. Content trust gives you the ability to both verify the integrity and the publisher of all the data received from a registry over any channel. [Content trust](../../security/trust/index.md) is currently only available for users of the public Docker Hub. It is currently not available for the Docker Trusted Registry or for private registries. docker-1.10.3/docs/userguide/eng-image/index.md000066400000000000000000000007661267010174400212640ustar00rootroot00000000000000 # Work with images * [Create a base image](baseimages.md) * [Best practices for writing Dockerfiles](dockerfile_best-practices.md) * [Image management](image_management.md) docker-1.10.3/docs/userguide/index.md000066400000000000000000000005231267010174400174220ustar00rootroot00000000000000 # User guide docker-1.10.3/docs/userguide/intro.md000066400000000000000000000103171267010174400174500ustar00rootroot00000000000000 # Introduction to Engine user guide This guide takes you through the fundamentals of using Docker Engine and integrating it into your environment. You'll learn how to use Engine to: * Dockerize your applications. * Run your own containers. * Build Docker images. * Share your Docker images with others. * And a whole lot more! This guide is broken into major sections that take you through learning the basics of Docker Engine and the other Docker products that support it. ## Dockerizing applications: A "Hello world" *How do I run applications inside containers?* Docker Engine offers a containerization platform to power your applications. To learn how to Dockerize applications and run them: Go to [Dockerizing Applications](containers/dockerizing.md). ## Working with containers *How do I manage my containers?* Once you get a grip on running your applications in Docker containers, you'll learn how to manage those containers. To find out about how to inspect, monitor and manage containers: Go to [Working With Containers](containers/usingdocker.md). ## Working with Docker images *How can I access, share and build my own images?* Once you've learnt how to use Docker it's time to take the next step and learn how to build your own application images with Docker. Go to [Working with Docker Images](containers/dockerimages.md). ## Networking containers Until now we've seen how to build individual applications inside Docker containers. Now learn how to build whole application stacks with Docker networking. Go to [Networking Containers](containers/networkingcontainers.md). ## Managing data in containers Now we know how to link Docker containers together the next step is learning how to manage data, volumes and mounts inside our containers. Go to [Managing Data in Containers](containers/dockervolumes.md). ## Docker products that complement Engine Often, one powerful technology spawns many other inventions that make that easier to get to, easier to use, and more powerful. These spawned things share one common characteristic: they augment the central technology. The following Docker products expand on the core Docker Engine functions. ### Docker Hub Docker Hub is the central hub for Docker. It hosts public Docker images and provides services to help you build and manage your Docker environment. To learn more: Go to [Using Docker Hub](https://docs.docker.com/docker-hub). ### Docker Machine Docker Machine helps you get Docker Engines up and running quickly. Machine can set up hosts for Docker Engines on your computer, on cloud providers, and/or in your data center, and then configure your Docker client to securely talk to them. Go to [Docker Machine user guide](https://docs.docker.com/machine/). ### Docker Compose Docker Compose allows you to define a application's components -- their containers, configuration, links and volumes -- in a single file. Then a single command will set everything up and start your application running. Go to [Docker Compose user guide](https://docs.docker.com/compose/). ### Docker Swarm Docker Swarm pools several Docker Engines together and exposes them as a single virtual Docker Engine. It serves the standard Docker API, so any tool that already works with Docker can now transparently scale up to multiple hosts. Go to [Docker Swarm user guide](https://docs.docker.com/swarm/). ## Getting help * [Docker homepage](https://www.docker.com/) * [Docker Hub](https://hub.docker.com) * [Docker blog](https://blog.docker.com/) * [Docker documentation](https://docs.docker.com/) * [Docker Getting Started Guide](https://docs.docker.com/mac/started/) * [Docker code on GitHub](https://github.com/docker/docker) * [Docker mailing list](https://groups.google.com/forum/#!forum/docker-user) * Docker on IRC: irc.freenode.net and channel #docker * [Docker on Twitter](https://twitter.com/docker) * Get [Docker help](https://stackoverflow.com/search?q=docker) on StackOverflow * [Docker.com](https://www.docker.com/) docker-1.10.3/docs/userguide/labels-custom-metadata.md000066400000000000000000000171411267010174400226470ustar00rootroot00000000000000 # Apply custom metadata You can apply metadata to your images, containers, or daemons via labels. Labels serve a wide range of uses, such as adding notes or licensing information to an image, or to identify a host. A label is a `` / `` pair. Docker stores the label values as *strings*. You can specify multiple labels but each `` must be unique or the value will be overwritten. If you specify the same `key` several times but with different values, newer labels overwrite previous labels. Docker uses the last `key=value` you supply. >**Note:** Support for daemon-labels was added in Docker 1.4.1. Labels on >containers and images are new in Docker 1.6.0 ## Label keys (namespaces) Docker puts no hard restrictions on the `key` used for a label. However, using simple keys can easily lead to conflicts. For example, you have chosen to categorize your images by CPU architecture using "architecture" labels in your Dockerfiles: LABEL architecture="amd64" LABEL architecture="ARMv7" Another user may apply the same label based on a building's "architecture": LABEL architecture="Art Nouveau" To prevent naming conflicts, Docker recommends using namespaces to label keys using reverse domain notation. Use the following guidelines to name your keys: - All (third-party) tools should prefix their keys with the reverse DNS notation of a domain controlled by the author. For example, `com.example.some-label`. - The `com.docker.*`, `io.docker.*` and `org.dockerproject.*` namespaces are reserved for Docker's internal use. - Keys should only consist of lower-cased alphanumeric characters, dots and dashes (for example, `[a-z0-9-.]`). - Keys should start *and* end with an alpha numeric character. - Keys may not contain consecutive dots or dashes. - Keys *without* namespace (dots) are reserved for CLI use. This allows end- users to add metadata to their containers and images without having to type cumbersome namespaces on the command-line. These are simply guidelines and Docker does not *enforce* them. However, for the benefit of the community, you *should* use namespaces for your label keys. ## Store structured data in labels Label values can contain any data type as long as it can be represented as a string. For example, consider this JSON document: { "Description": "A containerized foobar", "Usage": "docker run --rm example/foobar [args]", "License": "GPL", "Version": "0.0.1-beta", "aBoolean": true, "aNumber" : 0.01234, "aNestedArray": ["a", "b", "c"] } You can store this struct in a label by serializing it to a string first: LABEL com.example.image-specs="{\"Description\":\"A containerized foobar\",\"Usage\":\"docker run --rm example\\/foobar [args]\",\"License\":\"GPL\",\"Version\":\"0.0.1-beta\",\"aBoolean\":true,\"aNumber\":0.01234,\"aNestedArray\":[\"a\",\"b\",\"c\"]}" While it is *possible* to store structured data in label values, Docker treats this data as a 'regular' string. This means that Docker doesn't offer ways to query (filter) based on nested properties. If your tool needs to filter on nested properties, the tool itself needs to implement this functionality. ## Add labels to images To add labels to an image, use the `LABEL` instruction in your Dockerfile: LABEL [.]= ... The `LABEL` instruction adds a label to your image. A `LABEL` consists of a `` and a ``. Use an empty string for labels that don't have a ``, Use surrounding quotes or backslashes for labels that contain white space characters in the ``: LABEL vendor=ACME\ Incorporated LABEL com.example.version.is-beta= LABEL com.example.version.is-production="" LABEL com.example.version="0.0.1-beta" LABEL com.example.release-date="2015-02-12" The `LABEL` instruction also supports setting multiple `` / `` pairs in a single instruction: LABEL com.example.version="0.0.1-beta" com.example.release-date="2015-02-12" Long lines can be split up by using a backslash (`\`) as continuation marker: LABEL vendor=ACME\ Incorporated \ com.example.is-beta= \ com.example.is-production="" \ com.example.version="0.0.1-beta" \ com.example.release-date="2015-02-12" Docker recommends you add multiple labels in a single `LABEL` instruction. Using individual instructions for each label can result in an inefficient image. This is because each `LABEL` instruction in a Dockerfile produces a new IMAGE layer. You can view the labels via the `docker inspect` command: $ docker inspect 4fa6e0f0c678 ... "Labels": { "vendor": "ACME Incorporated", "com.example.is-beta": "", "com.example.is-production": "", "com.example.version": "0.0.1-beta", "com.example.release-date": "2015-02-12" } ... # Inspect labels on container $ docker inspect -f "{{json .Config.Labels }}" 4fa6e0f0c678 {"Vendor":"ACME Incorporated","com.example.is-beta":"", "com.example.is-production":"", "com.example.version":"0.0.1-beta","com.example.release-date":"2015-02-12"} # Inspect labels on images $ docker inspect -f "{{json .ContainerConfig.Labels }}" myimage ## Query labels Besides storing metadata, you can filter images and containers by label. To list all running containers that have the `com.example.is-beta` label: # List all running containers that have a `com.example.is-beta` label $ docker ps --filter "label=com.example.is-beta" List all running containers with the label `color` that have a value `blue`: $ docker ps --filter "label=color=blue" List all images with the label `vendor` that have the value `ACME`: $ docker images --filter "label=vendor=ACME" ## Container labels docker run \ -d \ --label com.example.group="webservers" \ --label com.example.environment="production" \ busybox \ top Please refer to the [Query labels](#query-labels) section above for information on how to query labels set on a container. ## Daemon labels docker daemon \ --dns 8.8.8.8 \ --dns 8.8.4.4 \ -H unix:///var/run/docker.sock \ --label com.example.environment="production" \ --label com.example.storage="ssd" These labels appear as part of the `docker info` output for the daemon: $ docker -D info Containers: 12 Running: 5 Paused: 2 Stopped: 5 Images: 672 Server Version: 1.9.0 Storage Driver: aufs Root Dir: /var/lib/docker/aufs Backing Filesystem: extfs Dirs: 697 Dirperm1 Supported: true Execution Driver: native-0.2 Logging Driver: json-file Kernel Version: 3.19.0-22-generic Operating System: Ubuntu 15.04 CPUs: 24 Total Memory: 62.86 GiB Name: docker ID: I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S Debug mode (server): true File Descriptors: 59 Goroutines: 159 System Time: 2015-09-23T14:04:20.699842089+08:00 EventsListeners: 0 Init SHA1: Init Path: /usr/bin/docker Docker Root Dir: /var/lib/docker Http Proxy: http://test:test@localhost:8080 Https Proxy: https://test:test@localhost:8080 WARNING: No swap limit support Username: svendowideit Registry: [https://index.docker.io/v1/] Labels: com.example.environment=production com.example.storage=ssd docker-1.10.3/docs/userguide/networking/000077500000000000000000000000001267010174400201605ustar00rootroot00000000000000docker-1.10.3/docs/userguide/networking/configure-dns.md000066400000000000000000000130611267010174400232460ustar00rootroot00000000000000 # Embedded DNS server in user-defined networks The information in this section covers the embedded DNS server operation for containers in user-defined networks. DNS lookup for containers connected to user-defined networks works differently compared to the containers connected to `default bridge` network. > **Note**: In order to maintain backward compatibility, the DNS configuration > in `default bridge` network is retained with no behaviorial change. > Please refer to the [DNS in default bridge network](default_network/configure-dns.md) > for more information on DNS configuration in the `default bridge` network. As of Docker 1.10, the docker daemon implements an embedded DNS server which provides built-in service discovery for any container created with a valid `name` or `net-alias` or aliased by `link`. The exact details of how Docker manages the DNS configurations inside the container can change from one Docker version to the next. So you should not assume the way the files such as `/etc/hosts`, `/etc/resolv.conf` are managed inside the containers and leave the files alone and use the following Docker options instead. Various container options that affect container domain name services.

--name=CONTAINER-NAME

Container name configured using --name is used to discover a container within an user-defined docker network. The embedded DNS server maintains the mapping between the container name and its IP address (on the network the container is connected to).

--net-alias=ALIAS

In addition to --name as described above, a container is discovered by one or more of its configured --net-alias (or --alias in docker network connect command) within the user-defined network. The embedded DNS server maintains the mapping between all of the container aliases and its IP address on a specific user-defined network. A container can have different aliases in different networks by using the --alias option in docker network connect command.

--link=CONTAINER_NAME:ALIAS

Using this option as you run a container gives the embedded DNS an extra entry named ALIAS that points to the IP address of the container identified by CONTAINER_NAME. When using --link the embedded DNS will guarantee that localized lookup result only on that container where the --link is used. This lets processes inside the new container connect to container without without having to know its name or IP.

--dns=[IP_ADDRESS...]

The IP addresses passed via the --dns option is used by the embedded DNS server to forward the DNS query if embedded DNS server is unable to resolve a name resolution request from the containers. These --dns IP addresses are managed by the embedded DNS server and will not be updated in the container's /etc/resolv.conf file.

--dns-search=DOMAIN...

Sets the domain names that are searched when a bare unqualified hostname is used inside of the container. These --dns-search options are managed by the embedded DNS server and will not be updated in the container's /etc/resolv.conf file. When a container process attempts to access host and the search domain example.com is set, for instance, the DNS logic will not only look up host but also host.example.com.

--dns-opt=OPTION...

Sets the options used by DNS resolvers. These options are managed by the embedded DNS server and will not be updated in the container's /etc/resolv.conf file.

See documentation for resolv.conf for a list of valid options

In the absence of the `--dns=IP_ADDRESS...`, `--dns-search=DOMAIN...`, or `--dns-opt=OPTION...` options, Docker uses the `/etc/resolv.conf` of the host machine (where the `docker` daemon runs). While doing so the daemon filters out all localhost IP address `nameserver` entries from the host's original file. Filtering is necessary because all localhost addresses on the host are unreachable from the container's network. After this filtering, if there are no more `nameserver` entries left in the container's `/etc/resolv.conf` file, the daemon adds public Google DNS nameservers (8.8.8.8 and 8.8.4.4) to the container's DNS configuration. If IPv6 is enabled on the daemon, the public IPv6 Google DNS nameservers will also be added (2001:4860:4860::8888 and 2001:4860:4860::8844). > **Note**: If you need access to a host's localhost resolver, you must modify > your DNS service on the host to listen on a non-localhost address that is > reachable from within the container. docker-1.10.3/docs/userguide/networking/default_network/000077500000000000000000000000001267010174400233555ustar00rootroot00000000000000docker-1.10.3/docs/userguide/networking/default_network/binding.md000066400000000000000000000111241267010174400253100ustar00rootroot00000000000000 # Bind container ports to the host The information in this section explains binding container ports within the Docker default bridge. This is a `bridge` network named `bridge` created automatically when you install Docker. > **Note**: The [Docker networks feature](../dockernetworks.md) allows you to create user-defined networks in addition to the default bridge network. By default Docker containers can make connections to the outside world, but the outside world cannot connect to containers. Each outgoing connection will appear to originate from one of the host machine's own IP addresses thanks to an `iptables` masquerading rule on the host machine that the Docker server creates when it starts: ``` $ sudo iptables -t nat -L -n ... Chain POSTROUTING (policy ACCEPT) target prot opt source destination MASQUERADE all -- 172.17.0.0/16 0.0.0.0/0 ... ``` The Docker server creates a masquerade rule that let containers connect to IP addresses in the outside world. If you want containers to accept incoming connections, you will need to provide special options when invoking `docker run`. There are two approaches. First, you can supply `-P` or `--publish-all=true|false` to `docker run` which is a blanket operation that identifies every port with an `EXPOSE` line in the image's `Dockerfile` or `--expose ` commandline flag and maps it to a host port somewhere within an _ephemeral port range_. The `docker port` command then needs to be used to inspect created mapping. The _ephemeral port range_ is configured by `/proc/sys/net/ipv4/ip_local_port_range` kernel parameter, typically ranging from 32768 to 61000. Mapping can be specified explicitly using `-p SPEC` or `--publish=SPEC` option. It allows you to particularize which port on docker server - which can be any port at all, not just one within the _ephemeral port range_ -- you want mapped to which port in the container. Either way, you should be able to peek at what Docker has accomplished in your network stack by examining your NAT tables. ``` # What your NAT rules might look like when Docker # is finished setting up a -P forward: $ iptables -t nat -L -n ... Chain DOCKER (2 references) target prot opt source destination DNAT tcp -- 0.0.0.0/0 0.0.0.0/0 tcp dpt:49153 to:172.17.0.2:80 # What your NAT rules might look like when Docker # is finished setting up a -p 80:80 forward: Chain DOCKER (2 references) target prot opt source destination DNAT tcp -- 0.0.0.0/0 0.0.0.0/0 tcp dpt:80 to:172.17.0.2:80 ``` You can see that Docker has exposed these container ports on `0.0.0.0`, the wildcard IP address that will match any possible incoming port on the host machine. If you want to be more restrictive and only allow container services to be contacted through a specific external interface on the host machine, you have two choices. When you invoke `docker run` you can use either `-p IP:host_port:container_port` or `-p IP::port` to specify the external interface for one particular binding. Or if you always want Docker port forwards to bind to one specific IP address, you can edit your system-wide Docker server settings and add the option `--ip=IP_ADDRESS`. Remember to restart your Docker server after editing this setting. > **Note**: With hairpin NAT enabled (`--userland-proxy=false`), containers port exposure is achieved purely through iptables rules, and no attempt to bind the exposed port is ever made. This means that nothing prevents shadowing a previously listening service outside of Docker through exposing the same port for a container. In such conflicting situation, Docker created iptables rules will take precedence and route to the container. The `--userland-proxy` parameter, true by default, provides a userland implementation for inter-container and outside-to-container communication. When disabled, Docker uses both an additional `MASQUERADE` iptable rule and the `net.ipv4.route_localnet` kernel parameter which allow the host machine to connect to a local container exposed port through the commonly used loopback address: this alternative is preferred for performance reasons. ## Related information - [Understand Docker container networks](../dockernetworks.md) - [Work with network commands](../work-with-networks.md) - [Legacy container links](dockerlinks.md) docker-1.10.3/docs/userguide/networking/default_network/build-bridges.md000066400000000000000000000055221267010174400264170ustar00rootroot00000000000000 # Build your own bridge This section explains how to build your own bridge to replace the Docker default bridge. This is a `bridge` network named `bridge` created automatically when you install Docker. > **Note**: The [Docker networks feature](../dockernetworks.md) allows you to create user-defined networks in addition to the default bridge network. You can set up your own bridge before starting Docker and use `-b BRIDGE` or `--bridge=BRIDGE` to tell Docker to use your bridge instead. If you already have Docker up and running with its default `docker0` still configured, you can directly create your bridge and restart Docker with it or want to begin by stopping the service and removing the interface: ``` # Stopping Docker and removing docker0 $ sudo service docker stop $ sudo ip link set dev docker0 down $ sudo brctl delbr docker0 $ sudo iptables -t nat -F POSTROUTING ``` Then, before starting the Docker service, create your own bridge and give it whatever configuration you want. Here we will create a simple enough bridge that we really could just have used the options in the previous section to customize `docker0`, but it will be enough to illustrate the technique. ``` # Create our own bridge $ sudo brctl addbr bridge0 $ sudo ip addr add 192.168.5.1/24 dev bridge0 $ sudo ip link set dev bridge0 up # Confirming that our bridge is up and running $ ip addr show bridge0 4: bridge0: mtu 1500 qdisc noop state UP group default link/ether 66:38:d0:0d:76:18 brd ff:ff:ff:ff:ff:ff inet 192.168.5.1/24 scope global bridge0 valid_lft forever preferred_lft forever # Tell Docker about it and restart (on Ubuntu) $ echo 'DOCKER_OPTS="-b=bridge0"' >> /etc/default/docker $ sudo service docker start # Confirming new outgoing NAT masquerade is set up $ sudo iptables -t nat -L -n ... Chain POSTROUTING (policy ACCEPT) target prot opt source destination MASQUERADE all -- 192.168.5.0/24 0.0.0.0/0 ``` The result should be that the Docker server starts successfully and is now prepared to bind containers to the new bridge. After pausing to verify the bridge's configuration, try creating a container -- you will see that its IP address is in your new IP address range, which Docker will have auto-detected. You can use the `brctl show` command to see Docker add and remove interfaces from the bridge as you start and stop containers, and can run `ip addr` and `ip route` inside a container to see that it has been given an address in the bridge's IP address range and has been told to use the Docker host's IP address on the bridge as its default gateway to the rest of the Internet. docker-1.10.3/docs/userguide/networking/default_network/configure-dns.md000066400000000000000000000163171267010174400264520ustar00rootroot00000000000000 # Configure container DNS The information in this section explains configuring container DNS within the Docker default bridge. This is a `bridge` network named `bridge` created automatically when you install Docker. > **Note**: The [Docker networks feature](../dockernetworks.md) allows you to create user-defined networks in addition to the default bridge network. Please refer to the [Docker Embedded DNS](../configure-dns.md) section for more information on DNS configurations in user-defined networks. How can Docker supply each container with a hostname and DNS configuration, without having to build a custom image with the hostname written inside? Its trick is to overlay three crucial `/etc` files inside the container with virtual files where it can write fresh information. You can see this by running `mount` inside a container: ``` $$ mount ... /dev/disk/by-uuid/1fec...ebdf on /etc/hostname type ext4 ... /dev/disk/by-uuid/1fec...ebdf on /etc/hosts type ext4 ... /dev/disk/by-uuid/1fec...ebdf on /etc/resolv.conf type ext4 ... ... ``` This arrangement allows Docker to do clever things like keep `resolv.conf` up to date across all containers when the host machine receives new configuration over DHCP later. The exact details of how Docker maintains these files inside the container can change from one Docker version to the next, so you should leave the files themselves alone and use the following Docker options instead. Four different options affect container domain name services.

-h HOSTNAME or --hostname=HOSTNAME

Sets the hostname by which the container knows itself. This is written into /etc/hostname, into /etc/hosts as the name of the container's host-facing IP address, and is the name that /bin/bash inside the container will display inside its prompt. But the hostname is not easy to see from outside the container. It will not appear in docker ps nor in the /etc/hosts file of any other container.

--link=CONTAINER_NAME or ID:ALIAS

Using this option as you run a container gives the new container's /etc/hosts an extra entry named ALIAS that points to the IP address of the container identified by CONTAINER_NAME_or_ID. This lets processes inside the new container connect to the hostname ALIAS without having to know its IP. The --link= option is discussed in more detail below. Because Docker may assign a different IP address to the linked containers on restart, Docker updates the ALIAS entry in the /etc/hosts file of the recipient containers.

--dns=IP_ADDRESS...

Sets the IP addresses added as server lines to the container's /etc/resolv.conf file. Processes in the container, when confronted with a hostname not in /etc/hosts, will connect to these IP addresses on port 53 looking for name resolution services.

--dns-search=DOMAIN...

Sets the domain names that are searched when a bare unqualified hostname is used inside of the container, by writing search lines into the container's /etc/resolv.conf. When a container process attempts to access host and the search domain example.com is set, for instance, the DNS logic will not only look up host but also host.example.com.

Use --dns-search=. if you don't wish to set the search domain.

--dns-opt=OPTION...

Sets the options used by DNS resolvers by writing an options line into the container's /etc/resolv.conf.

See documentation for resolv.conf for a list of valid options

Regarding DNS settings, in the absence of the `--dns=IP_ADDRESS...`, `--dns-search=DOMAIN...`, or `--dns-opt=OPTION...` options, Docker makes each container's `/etc/resolv.conf` look like the `/etc/resolv.conf` of the host machine (where the `docker` daemon runs). When creating the container's `/etc/resolv.conf`, the daemon filters out all localhost IP address `nameserver` entries from the host's original file. Filtering is necessary because all localhost addresses on the host are unreachable from the container's network. After this filtering, if there are no more `nameserver` entries left in the container's `/etc/resolv.conf` file, the daemon adds public Google DNS nameservers (8.8.8.8 and 8.8.4.4) to the container's DNS configuration. If IPv6 is enabled on the daemon, the public IPv6 Google DNS nameservers will also be added (2001:4860:4860::8888 and 2001:4860:4860::8844). > **Note**: If you need access to a host's localhost resolver, you must modify your DNS service on the host to listen on a non-localhost address that is reachable from within the container. You might wonder what happens when the host machine's `/etc/resolv.conf` file changes. The `docker` daemon has a file change notifier active which will watch for changes to the host DNS configuration. > **Note**: The file change notifier relies on the Linux kernel's inotify feature. Because this feature is currently incompatible with the overlay filesystem driver, a Docker daemon using "overlay" will not be able to take advantage of the `/etc/resolv.conf` auto-update feature. When the host file changes, all stopped containers which have a matching `resolv.conf` to the host will be updated immediately to this newest host configuration. Containers which are running when the host configuration changes will need to stop and start to pick up the host changes due to lack of a facility to ensure atomic writes of the `resolv.conf` file while the container is running. If the container's `resolv.conf` has been edited since it was started with the default configuration, no replacement will be attempted as it would overwrite the changes performed by the container. If the options (`--dns`, `--dns-search`, or `--dns-opt`) have been used to modify the default host configuration, then the replacement with an updated host's `/etc/resolv.conf` will not happen as well. > **Note**: For containers which were created prior to the implementation of the `/etc/resolv.conf` update feature in Docker 1.5.0: those containers will **not** receive updates when the host `resolv.conf` file changes. Only containers created with Docker 1.5.0 and above will utilize this auto-update feature. docker-1.10.3/docs/userguide/networking/default_network/container-communication.md000066400000000000000000000133521267010174400305300ustar00rootroot00000000000000 # Understand container communication The information in this section explains container communication within the Docker default bridge. This is a `bridge` network named `bridge` created automatically when you install Docker. **Note**: The [Docker networks feature](../dockernetworks.md) allows you to create user-defined networks in addition to the default bridge network. ## Communicating to the outside world Whether a container can talk to the world is governed by two factors. The first factor is whether the host machine is forwarding its IP packets. The second is whether the host's `iptables` allow this particular connection. IP packet forwarding is governed by the `ip_forward` system parameter. Packets can only pass between containers if this parameter is `1`. Usually you will simply leave the Docker server at its default setting `--ip-forward=true` and Docker will go set `ip_forward` to `1` for you when the server starts up. If you set `--ip-forward=false` and your system's kernel has it enabled, the `--ip-forward=false` option has no effect. To check the setting on your kernel or to turn it on manually: ``` $ sysctl net.ipv4.conf.all.forwarding net.ipv4.conf.all.forwarding = 0 $ sysctl net.ipv4.conf.all.forwarding=1 $ sysctl net.ipv4.conf.all.forwarding net.ipv4.conf.all.forwarding = 1 ``` Many using Docker will want `ip_forward` to be on, to at least make communication _possible_ between containers and the wider world. May also be needed for inter-container communication if you are in a multiple bridge setup. Docker will never make changes to your system `iptables` rules if you set `--iptables=false` when the daemon starts. Otherwise the Docker server will append forwarding rules to the `DOCKER` filter chain. Docker will not delete or modify any pre-existing rules from the `DOCKER` filter chain. This allows the user to create in advance any rules required to further restrict access to the containers. Docker's forward rules permit all external source IPs by default. To allow only a specific IP or network to access the containers, insert a negated rule at the top of the `DOCKER` filter chain. For example, to restrict external access such that _only_ source IP 8.8.8.8 can access the containers, the following rule could be added: ``` $ iptables -I DOCKER -i ext_if ! -s 8.8.8.8 -j DROP ``` ## Communication between containers Whether two containers can communicate is governed, at the operating system level, by two factors. - Does the network topology even connect the containers' network interfaces? By default Docker will attach all containers to a single `docker0` bridge, providing a path for packets to travel between them. See the later sections of this document for other possible topologies. - Do your `iptables` allow this particular connection? Docker will never make changes to your system `iptables` rules if you set `--iptables=false` when the daemon starts. Otherwise the Docker server will add a default rule to the `FORWARD` chain with a blanket `ACCEPT` policy if you retain the default `--icc=true`, or else will set the policy to `DROP` if `--icc=false`. It is a strategic question whether to leave `--icc=true` or change it to `--icc=false` so that `iptables` will protect other containers -- and the main host -- from having arbitrary ports probed or accessed by a container that gets compromised. If you choose the most secure setting of `--icc=false`, then how can containers communicate in those cases where you _want_ them to provide each other services? The answer is the `--link=CONTAINER_NAME_or_ID:ALIAS` option, which was mentioned in the previous section because of its effect upon name services. If the Docker daemon is running with both `--icc=false` and `--iptables=true` then, when it sees `docker run` invoked with the `--link=` option, the Docker server will insert a pair of `iptables` `ACCEPT` rules so that the new container can connect to the ports exposed by the other container -- the ports that it mentioned in the `EXPOSE` lines of its `Dockerfile`. > **Note**: The value `CONTAINER_NAME` in `--link=` must either be an auto-assigned Docker name like `stupefied_pare` or else the name you assigned with `--name=` when you ran `docker run`. It cannot be a hostname, which Docker will not recognize in the context of the `--link=` option. You can run the `iptables` command on your Docker host to see whether the `FORWARD` chain has a default policy of `ACCEPT` or `DROP`: ``` # When --icc=false, you should see a DROP rule: $ sudo iptables -L -n ... Chain FORWARD (policy ACCEPT) target prot opt source destination DOCKER all -- 0.0.0.0/0 0.0.0.0/0 DROP all -- 0.0.0.0/0 0.0.0.0/0 ... # When a --link= has been created under --icc=false, # you should see port-specific ACCEPT rules overriding # the subsequent DROP policy for all other packets: $ sudo iptables -L -n ... Chain FORWARD (policy ACCEPT) target prot opt source destination DOCKER all -- 0.0.0.0/0 0.0.0.0/0 DROP all -- 0.0.0.0/0 0.0.0.0/0 Chain DOCKER (1 references) target prot opt source destination ACCEPT tcp -- 172.17.0.2 172.17.0.3 tcp spt:80 ACCEPT tcp -- 172.17.0.3 172.17.0.2 tcp dpt:80 ``` > **Note**: Docker is careful that its host-wide `iptables` rules fully expose containers to each other's raw IP addresses, so connections from one container to another should always appear to be originating from the first container's own IP address. docker-1.10.3/docs/userguide/networking/default_network/custom-docker0.md000066400000000000000000000073411267010174400265430ustar00rootroot00000000000000 # Customize the docker0 bridge The information in this section explains how to customize the Docker default bridge. This is a `bridge` network named `bridge` created automatically when you install Docker. **Note**: The [Docker networks feature](../dockernetworks.md) allows you to create user-defined networks in addition to the default bridge network. By default, the Docker server creates and configures the host system's `docker0` interface as an _Ethernet bridge_ inside the Linux kernel that can pass packets back and forth between other physical or virtual network interfaces so that they behave as a single Ethernet network. Docker configures `docker0` with an IP address, netmask and IP allocation range. The host machine can both receive and send packets to containers connected to the bridge, and gives it an MTU -- the _maximum transmission unit_ or largest packet length that the interface will allow -- of 1,500 bytes. These options are configurable at server startup: - `--bip=CIDR` -- supply a specific IP address and netmask for the `docker0` bridge, using standard CIDR notation like `192.168.1.5/24`. - `--fixed-cidr=CIDR` -- restrict the IP range from the `docker0` subnet, using the standard CIDR notation like `172.167.1.0/28`. This range must be an IPv4 range for fixed IPs (ex: 10.20.0.0/16) and must be a subset of the bridge IP range (`docker0` or set using `--bridge`). For example with `--fixed-cidr=192.168.1.0/25`, IPs for your containers will be chosen from the first half of `192.168.1.0/24` subnet. - `--mtu=BYTES` -- override the maximum packet length on `docker0`. Once you have one or more containers up and running, you can confirm that Docker has properly connected them to the `docker0` bridge by running the `brctl` command on the host machine and looking at the `interfaces` column of the output. Here is a host with two different containers connected: ``` # Display bridge info $ sudo brctl show bridge name bridge id STP enabled interfaces docker0 8000.3a1d7362b4ee no veth65f9 vethdda6 ``` If the `brctl` command is not installed on your Docker host, then on Ubuntu you should be able to run `sudo apt-get install bridge-utils` to install it. Finally, the `docker0` Ethernet bridge settings are used every time you create a new container. Docker selects a free IP address from the range available on the bridge each time you `docker run` a new container, and configures the container's `eth0` interface with that IP address and the bridge's netmask. The Docker host's own IP address on the bridge is used as the default gateway by which each container reaches the rest of the Internet. ``` # The network, as seen from a container $ docker run -i -t --rm base /bin/bash $$ ip addr show eth0 24: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 link/ether 32:6f:e0:35:57:91 brd ff:ff:ff:ff:ff:ff inet 172.17.0.3/16 scope global eth0 valid_lft forever preferred_lft forever inet6 fe80::306f:e0ff:fe35:5791/64 scope link valid_lft forever preferred_lft forever $$ ip route default via 172.17.42.1 dev eth0 172.17.0.0/16 dev eth0 proto kernel scope link src 172.17.0.3 $$ exit ``` Remember that the Docker host will not be willing to forward container packets out on to the Internet unless its `ip_forward` system setting is `1` -- see the section on [Communicating to the outside world](container-communication.md#communicating-to-the-outside-world) for details. docker-1.10.3/docs/userguide/networking/default_network/dockerlinks.md000066400000000000000000000365051267010174400262200ustar00rootroot00000000000000 # Legacy container links The information in this section explains legacy container links within the Docker default bridge. This is a `bridge` network named `bridge` created automatically when you install Docker. Before the [Docker networks feature](../dockernetworks.md), you could use the Docker link feature to allow containers to discover each other and securely transfer information about one container to another container. With the introduction of the Docker networks feature, you can still create links but they behave differently between default `bridge` network and [user defined networks](../work-with-networks.md#linking-containers-in-user-defined-networks) This section briefly discusses connecting via a network port and then goes into detail on container linking in default `bridge` network. ## Connect using network port mapping In [the Using Docker section](../../containers/usingdocker.md), you created a container that ran a Python Flask application: $ docker run -d -P training/webapp python app.py > **Note:** > Containers have an internal network and an IP address > (as we saw when we used the `docker inspect` command to show the container's > IP address in the [Using Docker](../../containers/usingdocker.md) section). > Docker can have a variety of network configurations. You can see more > information on Docker networking [here](../index.md). When that container was created, the `-P` flag was used to automatically map any network port inside it to a random high port within an *ephemeral port range* on your Docker host. Next, when `docker ps` was run, you saw that port 5000 in the container was bound to port 49155 on the host. $ docker ps nostalgic_morse CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES bc533791f3f5 training/webapp:latest python app.py 5 seconds ago Up 2 seconds 0.0.0.0:49155->5000/tcp nostalgic_morse You also saw how you can bind a container's ports to a specific port using the `-p` flag. Here port 80 of the host is mapped to port 5000 of the container: $ docker run -d -p 80:5000 training/webapp python app.py And you saw why this isn't such a great idea because it constrains you to only one container on that specific port. Instead, you may specify a range of host ports to bind a container port to that is different than the default *ephemeral port range*: $ docker run -d -p 8000-9000:5000 training/webapp python app.py This would bind port 5000 in the container to a randomly available port between 8000 and 9000 on the host. There are also a few other ways you can configure the `-p` flag. By default the `-p` flag will bind the specified port to all interfaces on the host machine. But you can also specify a binding to a specific interface, for example only to the `localhost`. $ docker run -d -p 127.0.0.1:80:5000 training/webapp python app.py This would bind port 5000 inside the container to port 80 on the `localhost` or `127.0.0.1` interface on the host machine. Or, to bind port 5000 of the container to a dynamic port but only on the `localhost`, you could use: $ docker run -d -p 127.0.0.1::5000 training/webapp python app.py You can also bind UDP ports by adding a trailing `/udp`. For example: $ docker run -d -p 127.0.0.1:80:5000/udp training/webapp python app.py You also learned about the useful `docker port` shortcut which showed us the current port bindings. This is also useful for showing you specific port configurations. For example, if you've bound the container port to the `localhost` on the host machine, then the `docker port` output will reflect that. $ docker port nostalgic_morse 5000 127.0.0.1:49155 > **Note:** > The `-p` flag can be used multiple times to configure multiple ports. ## Connect with the linking system > **Note**: > This section covers the legacy link feature in the default `bridge` network. > Please refer to [linking containers in user-defined networks] > (../work-with-networks.md#linking-containers-in-user-defined-networks) > for more information on links in user-defined networks. Network port mappings are not the only way Docker containers can connect to one another. Docker also has a linking system that allows you to link multiple containers together and send connection information from one to another. When containers are linked, information about a source container can be sent to a recipient container. This allows the recipient to see selected data describing aspects of the source container. ### The importance of naming To establish links, Docker relies on the names of your containers. You've already seen that each container you create has an automatically created name; indeed you've become familiar with our old friend `nostalgic_morse` during this guide. You can also name containers yourself. This naming provides two useful functions: 1. It can be useful to name containers that do specific functions in a way that makes it easier for you to remember them, for example naming a container containing a web application `web`. 2. It provides Docker with a reference point that allows it to refer to other containers, for example, you can specify to link the container `web` to container `db`. You can name your container by using the `--name` flag, for example: $ docker run -d -P --name web training/webapp python app.py This launches a new container and uses the `--name` flag to name the container `web`. You can see the container's name using the `docker ps` command. $ docker ps -l CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES aed84ee21bde training/webapp:latest python app.py 12 hours ago Up 2 seconds 0.0.0.0:49154->5000/tcp web You can also use `docker inspect` to return the container's name. > **Note:** > Container names have to be unique. That means you can only call > one container `web`. If you want to re-use a container name you must delete > the old container (with `docker rm`) before you can create a new > container with the same name. As an alternative you can use the `--rm` > flag with the `docker run` command. This will delete the container > immediately after it is stopped. ## Communication across links Links allow containers to discover each other and securely transfer information about one container to another container. When you set up a link, you create a conduit between a source container and a recipient container. The recipient can then access select data about the source. To create a link, you use the `--link` flag. First, create a new container, this time one containing a database. $ docker run -d --name db training/postgres This creates a new container called `db` from the `training/postgres` image, which contains a PostgreSQL database. Now, you need to delete the `web` container you created previously so you can replace it with a linked one: $ docker rm -f web Now, create a new `web` container and link it with your `db` container. $ docker run -d -P --name web --link db:db training/webapp python app.py This will link the new `web` container with the `db` container you created earlier. The `--link` flag takes the form: --link :alias Where `name` is the name of the container we're linking to and `alias` is an alias for the link name. You'll see how that alias gets used shortly. The `--link` flag also takes the form: --link In which case the alias will match the name. You could have written the previous example as: $ docker run -d -P --name web --link db training/webapp python app.py Next, inspect your linked containers with `docker inspect`: $ docker inspect -f "{{ .HostConfig.Links }}" web [/db:/web/db] You can see that the `web` container is now linked to the `db` container `web/db`. Which allows it to access information about the `db` container. So what does linking the containers actually do? You've learned that a link allows a source container to provide information about itself to a recipient container. In our example, the recipient, `web`, can access information about the source `db`. To do this, Docker creates a secure tunnel between the containers that doesn't need to expose any ports externally on the container; you'll note when we started the `db` container we did not use either the `-P` or `-p` flags. That's a big benefit of linking: we don't need to expose the source container, here the PostgreSQL database, to the network. Docker exposes connectivity information for the source container to the recipient container in two ways: * Environment variables, * Updating the `/etc/hosts` file. ### Environment variables Docker creates several environment variables when you link containers. Docker automatically creates environment variables in the target container based on the `--link` parameters. It will also expose all environment variables originating from Docker from the source container. These include variables from: * the `ENV` commands in the source container's Dockerfile * the `-e`, `--env` and `--env-file` options on the `docker run` command when the source container is started These environment variables enable programmatic discovery from within the target container of information related to the source container. > **Warning**: > It is important to understand that *all* environment variables originating > from Docker within a container are made available to *any* container > that links to it. This could have serious security implications if sensitive > data is stored in them. Docker sets an `_NAME` environment variable for each target container listed in the `--link` parameter. For example, if a new container called `web` is linked to a database container called `db` via `--link db:webdb`, then Docker creates a `WEBDB_NAME=/web/webdb` variable in the `web` container. Docker also defines a set of environment variables for each port exposed by the source container. Each variable has a unique prefix in the form: `_PORT__` The components in this prefix are: * the alias `` specified in the `--link` parameter (for example, `webdb`) * the `` number exposed * a `` which is either TCP or UDP Docker uses this prefix format to define three distinct environment variables: * The `prefix_ADDR` variable contains the IP Address from the URL, for example `WEBDB_PORT_5432_TCP_ADDR=172.17.0.82`. * The `prefix_PORT` variable contains just the port number from the URL for example `WEBDB_PORT_5432_TCP_PORT=5432`. * The `prefix_PROTO` variable contains just the protocol from the URL for example `WEBDB_PORT_5432_TCP_PROTO=tcp`. If the container exposes multiple ports, an environment variable set is defined for each one. This means, for example, if a container exposes 4 ports that Docker creates 12 environment variables, 3 for each port. Additionally, Docker creates an environment variable called `_PORT`. This variable contains the URL of the source container's first exposed port. The 'first' port is defined as the exposed port with the lowest number. For example, consider the `WEBDB_PORT=tcp://172.17.0.82:5432` variable. If that port is used for both tcp and udp, then the tcp one is specified. Finally, Docker also exposes each Docker originated environment variable from the source container as an environment variable in the target. For each variable Docker creates an `_ENV_` variable in the target container. The variable's value is set to the value Docker used when it started the source container. Returning back to our database example, you can run the `env` command to list the specified container's environment variables. ``` $ docker run --rm --name web2 --link db:db training/webapp env . . . DB_NAME=/web2/db DB_PORT=tcp://172.17.0.5:5432 DB_PORT_5432_TCP=tcp://172.17.0.5:5432 DB_PORT_5432_TCP_PROTO=tcp DB_PORT_5432_TCP_PORT=5432 DB_PORT_5432_TCP_ADDR=172.17.0.5 . . . ``` You can see that Docker has created a series of environment variables with useful information about the source `db` container. Each variable is prefixed with `DB_`, which is populated from the `alias` you specified above. If the `alias` were `db1`, the variables would be prefixed with `DB1_`. You can use these environment variables to configure your applications to connect to the database on the `db` container. The connection will be secure and private; only the linked `web` container will be able to talk to the `db` container. ### Important notes on Docker environment variables Unlike host entries in the [`/etc/hosts` file](#updating-the-etchosts-file), IP addresses stored in the environment variables are not automatically updated if the source container is restarted. We recommend using the host entries in `/etc/hosts` to resolve the IP address of linked containers. These environment variables are only set for the first process in the container. Some daemons, such as `sshd`, will scrub them when spawning shells for connection. ### Updating the `/etc/hosts` file In addition to the environment variables, Docker adds a host entry for the source container to the `/etc/hosts` file. Here's an entry for the `web` container: $ docker run -t -i --rm --link db:webdb training/webapp /bin/bash root@aed84ee21bde:/opt/webapp# cat /etc/hosts 172.17.0.7 aed84ee21bde . . . 172.17.0.5 webdb 6e5cdeb2d300 db You can see two relevant host entries. The first is an entry for the `web` container that uses the Container ID as a host name. The second entry uses the link alias to reference the IP address of the `db` container. In addition to the alias you provide, the linked container's name--if unique from the alias provided to the `--link` parameter--and the linked container's hostname will also be added in `/etc/hosts` for the linked container's IP address. You can ping that host now via any of these entries: root@aed84ee21bde:/opt/webapp# apt-get install -yqq inetutils-ping root@aed84ee21bde:/opt/webapp# ping webdb PING webdb (172.17.0.5): 48 data bytes 56 bytes from 172.17.0.5: icmp_seq=0 ttl=64 time=0.267 ms 56 bytes from 172.17.0.5: icmp_seq=1 ttl=64 time=0.250 ms 56 bytes from 172.17.0.5: icmp_seq=2 ttl=64 time=0.256 ms > **Note:** > In the example, you'll note you had to install `ping` because it was not included > in the container initially. Here, you used the `ping` command to ping the `db` container using its host entry, which resolves to `172.17.0.5`. You can use this host entry to configure an application to make use of your `db` container. > **Note:** > You can link multiple recipient containers to a single source. For > example, you could have multiple (differently named) web containers attached to your >`db` container. If you restart the source container, the linked containers `/etc/hosts` files will be automatically updated with the source container's new IP address, allowing linked communication to continue. $ docker restart db db $ docker run -t -i --rm --link db:db training/webapp /bin/bash root@aed84ee21bde:/opt/webapp# cat /etc/hosts 172.17.0.7 aed84ee21bde . . . 172.17.0.9 db # Related information docker-1.10.3/docs/userguide/networking/default_network/images/000077500000000000000000000000001267010174400246225ustar00rootroot00000000000000docker-1.10.3/docs/userguide/networking/default_network/images/ipv6_basic_host_config.gliffy000066400000000000000000000132711267010174400324370ustar00rootroot00000000000000{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":414,"height":127,"nodeIndex":173,"autoFit":true,"exportBorder":false,"gridOn":false,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":8.5,"y":0.5},"max":{"x":413.75,"y":126.5}},"objects":[{"x":6.5,"y":106.0,"rotation":0.0,"id":9,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker0 fe80::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":19.5,"y":9.0,"rotation":0.0,"id":7,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":19,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":31.5,"y":23.5,"rotation":0.0,"id":4,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":16,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#a4c2f4","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":5,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Host2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":11.75,"y":0.5,"rotation":0.0,"id":60,"width":402.0,"height":126.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":146.5,"y":83.0,"rotation":0.0,"id":164,"width":249.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":44,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add 2001:db8:1::/64 dev docker0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":146.5,"y":27.5,"rotation":0.0,"id":73,"width":249.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":35,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add default via fe80::1 dev eth0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]}],"shapeStyles":{"com.gliffy.shape.basic.basic_v1.default":{"fill":"#fff2cc","stroke":"#333333","strokeWidth":2,"dashStyle":"2.0,2.0","gradient":true,"shadow":true}},"lineStyles":{"global":{"stroke":"#d9d9d9"}},"textStyles":{"global":{"size":"12px","color":"#b7b7b7"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v2.class","com.gliffy.libraries.uml.uml_v2.sequence","com.gliffy.libraries.uml.uml_v2.activity","com.gliffy.libraries.erd.erd_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.images"],"autosaveDisabled":false},"embeddedResources":{"index":0,"resources":[]}}docker-1.10.3/docs/userguide/networking/default_network/images/ipv6_basic_host_config.svg000066400000000000000000000740021267010174400317550ustar00rootroot00000000000000Host2eth02001:db8::1/64docker0fe80::1/64ip -6routeadddefaultviafe80::1deveth0ip -6routeadd2001:db8:1::/64devdocker0docker-1.10.3/docs/userguide/networking/default_network/images/ipv6_ndp_proxying.gliffy000066400000000000000000000521751267010174400315220ustar00rootroot00000000000000{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":616,"height":438,"nodeIndex":207,"autoFit":true,"exportBorder":false,"gridOn":false,"snapToGrid":false,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":3,"y":-7.75},"max":{"x":615.5,"y":437.5}},"objects":[{"x":173.0,"y":117.0,"rotation":0.0,"id":190,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":30,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":0,"py":1.0,"px":0.7071067811865476}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":186,"py":0.0,"px":0.2928932188134524}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":"4.0,4.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[120.21067811865476,-7.0],[335.78932188134524,57.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":195.0,"y":117.0,"rotation":0.0,"id":83,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":222.5,"y":35.0,"rotation":0.0,"id":0,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#fff2cc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":1,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Router

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":26.0,"y":109.0,"rotation":0.0,"id":33,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":6,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":0,"py":0.9999999999999998,"px":0.29289321881345254}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":2,"py":0.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[225.78932188134524,0.9999999999999858],[57.710678118654755,65.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":20.289321881345245,"y":150.0,"rotation":0.0,"id":32,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":5,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":4,"py":0.0,"px":0.2928932188134524}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":0,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[333.5,24.5],[272.9213562373095,-40.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":271.0,"y":37.0,"rotation":0.0,"id":89,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":1,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":0,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#d9d9d9","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[1.5,-2.0],[1.5,-21.125],[1.5,-21.125],[1.5,-40.25]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[]},{"x":151.0,"y":115.0,"rotation":0.0,"id":183,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":0,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":0,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":179,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[121.5,-5.0],[62.5,59.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":455.5,"y":257.0,"rotation":0.0,"id":200,"width":150.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":200,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":5,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

expected Container location

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":467.5,"y":156.0,"rotation":0.0,"id":185,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8::c00y/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":479.5,"y":174.5,"rotation":0.0,"id":186,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#e2e2e2","gradient":false,"dashStyle":"2,2","dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":187,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container x

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":151.5,"y":156.0,"rotation":0.0,"id":178,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":26,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8::b001/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":163.5,"y":174.5,"rotation":0.0,"id":179,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#a4c2f4","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":180,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Host2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":299.5,"y":257.0,"rotation":0.0,"id":9,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker0 fe80::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":317.5,"y":156.0,"rotation":0.0,"id":7,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":14,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8::c001/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":1.0,"y":156.0,"rotation":0.0,"id":6,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":13,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8::a001/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":324.5,"y":174.5,"rotation":0.0,"id":4,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#a4c2f4","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":5,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Host3

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":13.0,"y":174.5,"rotation":0.0,"id":2,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#a4c2f4","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":3,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Host1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":-142.5,"y":118.5,"rotation":0.0,"id":31,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":4,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":4,"py":1.0,"px":0.7071067811865476}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":25,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[537.7106781186548,131.0],[602.0,204.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":-181.5,"y":122.5,"rotation":0.0,"id":30,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":3,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":4,"py":0.9999999999999998,"px":0.29289321881345254}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":27,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[535.2893218813452,127.0],[473.0,200.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":386.0,"y":306.0,"rotation":0.0,"id":78,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":22,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8::c00a/125

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":218.0,"y":306.0,"rotation":0.0,"id":77,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":21,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8::c009/125

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":409.5,"y":323.0,"rotation":0.0,"id":25,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":18,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":26,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":241.5,"y":323.0,"rotation":0.0,"id":27,"width":99.99999999999999,"height":99.99999999999999,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":16,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":28,"width":95.99999999999999,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":207.75,"y":297.5,"rotation":0.0,"id":58,"width":339.75,"height":140.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]}],"shapeStyles":{"com.gliffy.shape.basic.basic_v1.default":{"fill":"#e2e2e2","stroke":"#333333","strokeWidth":2,"dashStyle":"2.0,2.0","gradient":false,"shadow":true}},"lineStyles":{"global":{"stroke":"#cccccc","strokeWidth":2,"dashStyle":"4.0,4.0"}},"textStyles":{"global":{"size":"12px","italic":true}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v2.class","com.gliffy.libraries.uml.uml_v2.sequence","com.gliffy.libraries.uml.uml_v2.activity","com.gliffy.libraries.erd.erd_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.images"],"autosaveDisabled":false},"embeddedResources":{"index":0,"resources":[]}}docker-1.10.3/docs/userguide/networking/default_network/images/ipv6_ndp_proxying.svg000066400000000000000000002047221267010174400310360ustar00rootroot00000000000000RouterHost1Host3eth02001:db8::a001/64eth02001:db8::c001/64docker0fe80::1/64Container1Container2eth02001:db8::c009/125eth02001:db8::c00a/125eth02001:db8::1/64Host2eth02001:db8::b001/64Containerxeth02001:db8::c00y/64expectedContainerlocationdocker-1.10.3/docs/userguide/networking/default_network/images/ipv6_routed_network_example.gliffy000066400000000000000000000717211267010174400335660ustar00rootroot00000000000000{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":893,"height":447,"nodeIndex":185,"autoFit":true,"exportBorder":false,"gridOn":false,"snapToGrid":false,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":-17.000680271168676,"y":7},"max":{"x":892.767693574114,"y":447}},"objects":[{"x":17.5,"y":205.5,"rotation":0.0,"id":167,"width":238.5,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add 2001:db8:1::/64 dev docker0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":231.28932188134524,"y":95.0,"rotation":0.0,"id":120,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":6,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":161,"py":0.0,"px":0.2928932188134524}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":131,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[267.5,47.5],[217.9213562373095,-13.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":187.0,"y":206.5,"rotation":0.0,"id":121,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":9,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":140,"py":0.9999999999999998,"px":0.29289321881345254}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":148,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[130.28932188134524,11.0],[-79.0,91.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":174.0,"y":217.5,"rotation":0.0,"id":122,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":8,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":140,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":146,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[164.0,0.0],[120.0,81.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":33.50000000000003,"y":409.0,"rotation":0.0,"id":123,"width":346.49999999999994,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add default via fe80::1 dev eth0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":3.5000000000000284,"y":268.5,"rotation":0.0,"id":124,"width":411.00000000000006,"height":163.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":237.0,"y":54.0,"rotation":0.0,"id":125,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":7,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":131,"py":0.9999999999999998,"px":0.29289321881345254}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":140,"py":0.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[170.78932188134524,27.999999999999986],[121.71067811865476,88.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":378.5,"y":7.0,"rotation":0.0,"id":131,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#e2e2e2","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":132,"width":96.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Layer 2 Switch

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":785.0,"y":195.0,"rotation":0.0,"id":136,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":32,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":143,"py":0.6187943262411347,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":"8.0,8.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[78.75000000000011,-0.25],[-798.0006802711687,-3.410605131648481E-13]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":262.0,"y":224.0,"rotation":0.0,"id":138,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":19,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker0 fe80::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":278.0,"y":126.0,"rotation":0.0,"id":139,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":16,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:0::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":288.0,"y":142.5,"rotation":0.0,"id":140,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#a4c2f4","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":141,"width":96.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Host1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":3.4999999999999716,"y":107.5,"rotation":0.0,"id":142,"width":411.0,"height":141.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":221.0,"y":283.0,"rotation":0.0,"id":144,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:1::2/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":34.000000000000014,"y":283.0,"rotation":0.0,"id":145,"width":149.99999999999997,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:1::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":244.0,"y":299.0,"rotation":0.0,"id":146,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":22,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":147,"width":96.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container1-2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":58.0,"y":298.0,"rotation":0.0,"id":148,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":20,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":149,"width":96.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container1-1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":317.0,"y":436.5,"rotation":0.0,"id":158,"width":223.00000000000003,"height":11.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

containers' link-local addresses are not displayed

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":17.5,"y":148.0,"rotation":0.0,"id":137,"width":291.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add 2001:db8:0::/64 dev eth0

ip -6 route add 2001:db8:2::/64 via 2001:db8:0::2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":901.7500000000001,"y":195.0,"rotation":0.0,"id":172,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":43,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-12.982306425886122,0.0],[-41.25,0.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":670.0,"y":284.0,"rotation":0.0,"id":155,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":36,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:2::2/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":479.0,"y":284.0,"rotation":0.0,"id":150,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":35,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:2::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":488.75,"y":408.0,"rotation":0.0,"id":152,"width":339.75,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add default via fe80::1 dev eth0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":694.5,"y":298.0,"rotation":0.0,"id":156,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":27,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":157,"width":96.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container2-2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":501.5,"y":298.0,"rotation":0.0,"id":153,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":25,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":154,"width":96.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container2-1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":444.5,"y":223.0,"rotation":0.0,"id":160,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":18,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker0 fe80::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":460.5,"y":128.0,"rotation":0.0,"id":159,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:0::2/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":469.5,"y":142.5,"rotation":0.0,"id":161,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":14,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#a4c2f4","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":162,"width":96.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Host2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":139.5,"y":86.5,"rotation":0.0,"id":126,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":5,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":161,"py":1.0,"px":0.7071067811865476}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":156,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[400.71067811865476,131.0],[605.0,211.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":100.5,"y":90.5,"rotation":0.0,"id":127,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":4,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":161,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":153,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[419.0,127.0],[451.0,207.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":447.75,"y":268.5,"rotation":0.0,"id":151,"width":416.0000000000001,"height":163.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":447.75,"y":107.5,"rotation":0.0,"id":143,"width":416.0000000000001,"height":141.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":795.7500000000001,"y":307.5,"rotation":270.0,"id":173,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":41,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

managed by Docker

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":879.7500000000001,"y":417.0,"rotation":0.0,"id":174,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":40,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":2,"endArrow":2,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[0.0,14.008510484195028],[0.0,-221.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":898.7500000000001,"y":432.0,"rotation":0.0,"id":171,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-13.981657549458532,0.0],[-41.25,0.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":582.5,"y":151.0,"rotation":0.0,"id":135,"width":285.25000000000017,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":33,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add 2001:db8:0::/64 dev eth0

ip -6 route add 2001:db8:1::/64 via 2001:db8:0::1 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":583.0,"y":204.0,"rotation":0.0,"id":168,"width":272.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":39,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add 2001:db8:2::/64 dev docker0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]}],"shapeStyles":{"com.gliffy.shape.basic.basic_v1.default":{"fill":"#e2e2e2","stroke":"#333333","strokeWidth":2,"dashStyle":"2.0,2.0","gradient":true,"shadow":true}},"lineStyles":{"global":{"stroke":"#000000","strokeWidth":1,"dashStyle":"8.0,8.0"}},"textStyles":{}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v2.class","com.gliffy.libraries.uml.uml_v2.sequence","com.gliffy.libraries.uml.uml_v2.activity","com.gliffy.libraries.erd.erd_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.images"],"autosaveDisabled":false},"embeddedResources":{"index":0,"resources":[]}}docker-1.10.3/docs/userguide/networking/default_network/images/ipv6_routed_network_example.svg000066400000000000000000003006561267010174400331070ustar00rootroot00000000000000Layer 2 SwitchHost1Host2eth0 2001:db8:0::1/64eth0 2001:db8:0::2/64docker0 fe80::1/64docker0 fe80::1/64Container1-1Container1-2eth0 2001:db8:1::1/64Container2-1Container2-2ip -6 route add 2001:db8:0::/64 dev eth0ip -6 route add 2001:db8:2::/64 via 2001:db8:0::2ip -6 route add default via fe80::1 dev eth0ip -6 route add default via fe80::1 dev eth0ip -6 route add 2001:db8:0::/64 dev eth0ip -6 route add 2001:db8:1::/64 via 2001:db8:0::1 eth0 2001:db8:1::2/64eth0 2001:db8:2::1/64eth0 2001:db8:2::2/64containers' link-local addresses are not displayedip -6 route add 2001:db8:1::/64 dev docker0ip -6 route add 2001:db8:2::/64 dev docker0managed by Dockerdocker-1.10.3/docs/userguide/networking/default_network/images/ipv6_slash64_subnet_config.gliffy000066400000000000000000000347421267010174400331730ustar00rootroot00000000000000{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":550,"height":341,"nodeIndex":88,"autoFit":true,"exportBorder":false,"gridOn":false,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":2.5,"y":2.5},"max":{"x":550,"y":341}},"objects":[{"x":10.5,"y":53.5,"rotation":0.0,"id":74,"width":150.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":26,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

fe80::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":37.0,"y":2.5,"rotation":0.0,"id":72,"width":100.0,"height":46.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#d9d9d9","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":73,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Router

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":89.5,"y":83.5,"rotation":0.0,"id":59,"width":150.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Routed Network:
2001:db8:23:42::/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":313.0,"y":314.0,"rotation":0.0,"id":39,"width":235.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":16,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add default via fe80::1 dev eth0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":352.0,"y":185.5,"rotation":0.0,"id":36,"width":169.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:23:42:1::2/80

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":351.0,"y":49.5,"rotation":0.0,"id":29,"width":171.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":14,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:23:42:1::1/80

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":382.1250000000001,"y":202.5,"rotation":0.0,"id":30,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":12,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":31,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container1-2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":382.0,"y":65.5,"rotation":0.0,"id":32,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":10,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":33,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container1-1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":15.125000000000057,"y":264.0,"rotation":0.0,"id":20,"width":273.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add default via fe80::1 dev eth0

ip -6 route add 2001:db8:23:42:1::/80 dev docker0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":120.0,"y":178.5,"rotation":0.0,"id":21,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":8,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker0 fe80::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":13.0,"y":132.5,"rotation":0.0,"id":22,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:23:42::1/80

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":38.0,"y":149.0,"rotation":0.0,"id":23,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":5,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#a4c2f4","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":24,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

host1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":-118.0,"y":123.0,"rotation":0.0,"id":44,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":4,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":23,"py":0.7071067811865475,"px":0.9999999999999998}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":30,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[255.99999999999997,79.03300858899107],[500.1250000000001,129.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":-138.0,"y":129.0,"rotation":0.0,"id":43,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":3,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":23,"py":0.29289321881345237,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":32,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[276.0,41.966991411008934],[520.0,-13.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":313.0,"y":40.0,"rotation":0.0,"id":34,"width":237.00000000000003,"height":301.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":87.0,"y":150.0,"rotation":0.0,"id":58,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":1,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":23,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":72,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.0,-1.0],[0.0,-101.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":2.5,"y":118.50000000000001,"rotation":0.0,"id":25,"width":292.0,"height":178.99999999999997,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#cccccc"}},"textStyles":{"global":{"bold":true,"italic":true}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v1.default","com.gliffy.libraries.erd.erd_v1.default","com.gliffy.libraries.ui.ui_v2.forms_components","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.images"],"autosaveDisabled":false},"embeddedResources":{"index":0,"resources":[]}}docker-1.10.3/docs/userguide/networking/default_network/images/ipv6_slash64_subnet_config.svg000066400000000000000000002237401267010174400325100ustar00rootroot00000000000000host1eth02001:db8:23:42::1/80docker0fe80::1/64ip -6routeadddefaultviafe80::1deveth0ip-6routeadd2001:db8:23:42:1::/80devdocker0container1-1container1-2eth02001:db8:23:42:1::1/80eth02001:db8:23:42:1::2/80ip-6routeadddefaultviafe80::1deveth0RoutedNetwork:2001:db8:23:42::/64Routerfe80::1/64docker-1.10.3/docs/userguide/networking/default_network/images/ipv6_switched_network_example.gliffy000066400000000000000000000717231267010174400341000ustar00rootroot00000000000000{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":893,"height":448,"nodeIndex":185,"autoFit":true,"exportBorder":false,"gridOn":false,"snapToGrid":false,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":-17.000680271168676,"y":7},"max":{"x":892.767693574114,"y":447.5}},"objects":[{"x":17.5,"y":205.5,"rotation":0.0,"id":167,"width":238.5,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add 2001:db8:1::/64 dev docker0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":231.28932188134524,"y":95.0,"rotation":0.0,"id":120,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":6,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":161,"py":0.0,"px":0.2928932188134524}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":131,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[267.5,47.5],[217.9213562373095,-13.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":187.0,"y":206.5,"rotation":0.0,"id":121,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":9,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":140,"py":0.9999999999999998,"px":0.29289321881345254}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":148,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[130.28932188134524,11.0],[-79.0,91.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":174.0,"y":217.5,"rotation":0.0,"id":122,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":8,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":140,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":146,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[164.0,0.0],[120.0,81.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":33.50000000000003,"y":409.0,"rotation":0.0,"id":123,"width":346.49999999999994,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add default via fe80::1 dev eth0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":3.5000000000000284,"y":268.5,"rotation":0.0,"id":124,"width":411.00000000000006,"height":163.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":237.0,"y":54.0,"rotation":0.0,"id":125,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":7,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":131,"py":0.9999999999999998,"px":0.29289321881345254}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":140,"py":0.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[170.78932188134524,27.999999999999986],[121.71067811865476,88.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":378.5,"y":7.0,"rotation":0.0,"id":131,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#e2e2e2","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":132,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Level 2 Switch

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":785.0,"y":195.0,"rotation":0.0,"id":136,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":32,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":143,"py":0.6187943262411347,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":"8.0,8.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[78.75000000000011,-0.25],[-798.0006802711687,-3.410605131648481E-13]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":262.0,"y":224.0,"rotation":0.0,"id":138,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":19,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker0 fe80::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":278.0,"y":126.0,"rotation":0.0,"id":139,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":16,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:0::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":288.0,"y":142.5,"rotation":0.0,"id":140,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#a4c2f4","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":141,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Host1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":3.4999999999999716,"y":107.5,"rotation":0.0,"id":142,"width":411.0,"height":141.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":221.0,"y":283.0,"rotation":0.0,"id":144,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:1::2/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":34.000000000000014,"y":283.0,"rotation":0.0,"id":145,"width":149.99999999999997,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:1::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":244.0,"y":299.0,"rotation":0.0,"id":146,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":22,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":147,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container1-2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":58.0,"y":298.0,"rotation":0.0,"id":148,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":20,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":149,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container1-1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":317.0,"y":436.5,"rotation":0.0,"id":158,"width":223.00000000000003,"height":11.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

containers' link-local addresses are not displayed

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":17.5,"y":148.0,"rotation":0.0,"id":137,"width":291.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add 2001:db8:0::/64 dev eth0

ip -6 route add 2001:db8:2::/64 via 2001:db8:0::2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":901.7500000000001,"y":195.0,"rotation":0.0,"id":172,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":43,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-12.982306425886122,0.0],[-41.25,0.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":670.0,"y":284.0,"rotation":0.0,"id":155,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":36,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:2::2/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":479.0,"y":284.0,"rotation":0.0,"id":150,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":35,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:2::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":488.75,"y":408.0,"rotation":0.0,"id":152,"width":339.75,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add default via fe80::1 dev eth0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":694.5,"y":298.0,"rotation":0.0,"id":156,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":27,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":157,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container2-2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":501.5,"y":298.0,"rotation":0.0,"id":153,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":25,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":154,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container2-1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":444.5,"y":223.0,"rotation":0.0,"id":160,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":18,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker0 fe80::1/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":460.5,"y":128.0,"rotation":0.0,"id":159,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 2001:db8:0::2/64

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":469.5,"y":142.5,"rotation":0.0,"id":161,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":14,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#a4c2f4","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":162,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Host2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":139.5,"y":86.5,"rotation":0.0,"id":126,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":5,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":161,"py":1.0,"px":0.7071067811865476}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":156,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[400.71067811865476,131.0],[605.0,211.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":100.5,"y":90.5,"rotation":0.0,"id":127,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":4,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":161,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":153,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[419.0,127.0],[451.0,207.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":447.75,"y":268.5,"rotation":0.0,"id":151,"width":416.0000000000001,"height":163.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":447.75,"y":107.5,"rotation":0.0,"id":143,"width":416.0000000000001,"height":141.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":795.7500000000001,"y":307.5,"rotation":270.0,"id":173,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":41,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

managed by Docker

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":879.7500000000001,"y":417.0,"rotation":0.0,"id":174,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":40,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":2,"endArrow":2,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[0.0,14.008510484195028],[0.0,-221.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":898.7500000000001,"y":432.0,"rotation":0.0,"id":171,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-13.981657549458532,0.0],[-41.25,0.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":582.5,"y":151.0,"rotation":0.0,"id":135,"width":285.25000000000017,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":33,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add 2001:db8:0::/64 dev eth0

ip -6 route add 2001:db8:1::/64 via 2001:db8:0::1 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":583.0,"y":204.0,"rotation":0.0,"id":168,"width":272.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":39,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

ip -6 route add 2001:db8:2::/64 dev docker0

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]}],"shapeStyles":{"com.gliffy.shape.basic.basic_v1.default":{"fill":"#e2e2e2","stroke":"#333333","strokeWidth":2,"dashStyle":"2.0,2.0","gradient":true,"shadow":true}},"lineStyles":{"global":{"stroke":"#000000","strokeWidth":1,"dashStyle":"8.0,8.0"}},"textStyles":{}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v2.class","com.gliffy.libraries.uml.uml_v2.sequence","com.gliffy.libraries.uml.uml_v2.activity","com.gliffy.libraries.erd.erd_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.images"],"autosaveDisabled":false},"embeddedResources":{"index":0,"resources":[]}}docker-1.10.3/docs/userguide/networking/default_network/images/ipv6_switched_network_example.svg000066400000000000000000005365211267010174400334210ustar00rootroot00000000000000Level2SwitchHost1Host2eth02001:db8:0::1/64eth02001:db8:0::2/64docker0fe80::1/64docker0fe80::1/64Container1-1Container1-2eth02001:db8:1::1/64Container2-1Container2-2ip -6routeadd2001:db8:0::/64deveth0ip -6routeadd2001:db8:2::/64via2001:db8:0::2ip-6routeadddefaultviafe80::1deveth0ip-6routeadddefaultviafe80::1deveth0ip -6routeadd2001:db8:0::/64deveth0ip -6routeadd2001:db8:1::/64via2001:db8:0::1eth02001:db8:1::2/64eth02001:db8:2::1/64eth02001:db8:2::2/64containers'link-localaddressesarenotdisplayedip -6routeadd2001:db8:1::/64devdocker0ip -6routeadd2001:db8:2::/64devdocker0managedbyDockerdocker-1.10.3/docs/userguide/networking/default_network/index.md000066400000000000000000000016431267010174400250120ustar00rootroot00000000000000 # Docker default bridge network With the introduction of the Docker networks feature, you can create your own user-defined networks. The Docker default bridge is created when you install Docker Engine. It is a `bridge` network and is also named `bridge`. The topics in this section are related to interacting with that default bridge network. - [Understand container communication](container-communication.md) - [Legacy container links](dockerlinks.md) - [Binding container ports to the host](binding.md) - [Build your own bridge](build-bridges.md) - [Configure container DNS](configure-dns.md) - [Customize the docker0 bridge](custom-docker0.md) - [IPv6 with Docker](ipv6.md) docker-1.10.3/docs/userguide/networking/default_network/ipv6.md000066400000000000000000000261631267010174400245730ustar00rootroot00000000000000 # IPv6 with Docker The information in this section explains IPv6 with the Docker default bridge. This is a `bridge` network named `bridge` created automatically when you install Docker. As we are [running out of IPv4 addresses](http://en.wikipedia.org/wiki/IPv4_address_exhaustion) the IETF has standardized an IPv4 successor, [Internet Protocol Version 6](http://en.wikipedia.org/wiki/IPv6) , in [RFC 2460](https://www.ietf.org/rfc/rfc2460.txt). Both protocols, IPv4 and IPv6, reside on layer 3 of the [OSI model](http://en.wikipedia.org/wiki/OSI_model). ## How IPv6 works on Docker By default, the Docker server configures the container network for IPv4 only. You can enable IPv4/IPv6 dualstack support by running the Docker daemon with the `--ipv6` flag. Docker will set up the bridge `docker0` with the IPv6 [link-local address](http://en.wikipedia.org/wiki/Link-local_address) `fe80::1`. By default, containers that are created will only get a link-local IPv6 address. To assign globally routable IPv6 addresses to your containers you have to specify an IPv6 subnet to pick the addresses from. Set the IPv6 subnet via the `--fixed-cidr-v6` parameter when starting Docker daemon: ``` docker daemon --ipv6 --fixed-cidr-v6="2001:db8:1::/64" ``` The subnet for Docker containers should at least have a size of `/80`. This way an IPv6 address can end with the container's MAC address and you prevent NDP neighbor cache invalidation issues in the Docker layer. With the `--fixed-cidr-v6` parameter set Docker will add a new route to the routing table. Further IPv6 routing will be enabled (you may prevent this by starting Docker daemon with `--ip-forward=false`): ``` $ ip -6 route add 2001:db8:1::/64 dev docker0 $ sysctl net.ipv6.conf.default.forwarding=1 $ sysctl net.ipv6.conf.all.forwarding=1 ``` All traffic to the subnet `2001:db8:1::/64` will now be routed via the `docker0` interface. Be aware that IPv6 forwarding may interfere with your existing IPv6 configuration: If you are using Router Advertisements to get IPv6 settings for your host's interfaces you should set `accept_ra` to `2`. Otherwise IPv6 enabled forwarding will result in rejecting Router Advertisements. E.g., if you want to configure `eth0` via Router Advertisements you should set: ``` $ sysctl net.ipv6.conf.eth0.accept_ra=2 ``` ![](images/ipv6_basic_host_config.svg) Every new container will get an IPv6 address from the defined subnet. Further a default route will be added on `eth0` in the container via the address specified by the daemon option `--default-gateway-v6` if present, otherwise via `fe80::1`: ``` docker run -it ubuntu bash -c "ip -6 addr show dev eth0; ip -6 route show" 15: eth0: mtu 1500 inet6 2001:db8:1:0:0:242:ac11:3/64 scope global valid_lft forever preferred_lft forever inet6 fe80::42:acff:fe11:3/64 scope link valid_lft forever preferred_lft forever 2001:db8:1::/64 dev eth0 proto kernel metric 256 fe80::/64 dev eth0 proto kernel metric 256 default via fe80::1 dev eth0 metric 1024 ``` In this example the Docker container is assigned a link-local address with the network suffix `/64` (here: `fe80::42:acff:fe11:3/64`) and a globally routable IPv6 address (here: `2001:db8:1:0:0:242:ac11:3/64`). The container will create connections to addresses outside of the `2001:db8:1::/64` network via the link-local gateway at `fe80::1` on `eth0`. Often servers or virtual machines get a `/64` IPv6 subnet assigned (e.g. `2001:db8:23:42::/64`). In this case you can split it up further and provide Docker a `/80` subnet while using a separate `/80` subnet for other applications on the host: ![](images/ipv6_slash64_subnet_config.svg) In this setup the subnet `2001:db8:23:42::/80` with a range from `2001:db8:23:42:0:0:0:0` to `2001:db8:23:42:0:ffff:ffff:ffff` is attached to `eth0`, with the host listening at `2001:db8:23:42::1`. The subnet `2001:db8:23:42:1::/80` with an address range from `2001:db8:23:42:1:0:0:0` to `2001:db8:23:42:1:ffff:ffff:ffff` is attached to `docker0` and will be used by containers. ### Using NDP proxying If your Docker host is only part of an IPv6 subnet but has not got an IPv6 subnet assigned you can use NDP proxying to connect your containers via IPv6 to the internet. For example your host has the IPv6 address `2001:db8::c001`, is part of the subnet `2001:db8::/64` and your IaaS provider allows you to configure the IPv6 addresses `2001:db8::c000` to `2001:db8::c00f`: ``` $ ip -6 addr show 1: lo: mtu 65536 inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: eth0: mtu 1500 qlen 1000 inet6 2001:db8::c001/64 scope global valid_lft forever preferred_lft forever inet6 fe80::601:3fff:fea1:9c01/64 scope link valid_lft forever preferred_lft forever ``` Let's split up the configurable address range into two subnets `2001:db8::c000/125` and `2001:db8::c008/125`. The first one can be used by the host itself, the latter by Docker: ``` docker daemon --ipv6 --fixed-cidr-v6 2001:db8::c008/125 ``` You notice the Docker subnet is within the subnet managed by your router that is connected to `eth0`. This means all devices (containers) with the addresses from the Docker subnet are expected to be found within the router subnet. Therefore the router thinks it can talk to these containers directly. ![](images/ipv6_ndp_proxying.svg) As soon as the router wants to send an IPv6 packet to the first container it will transmit a neighbor solicitation request, asking, who has `2001:db8::c009`? But it will get no answer because no one on this subnet has this address. The container with this address is hidden behind the Docker host. The Docker host has to listen to neighbor solicitation requests for the container address and send a response that itself is the device that is responsible for the address. This is done by a Kernel feature called `NDP Proxy`. You can enable it by executing ``` $ sysctl net.ipv6.conf.eth0.proxy_ndp=1 ``` Now you can add the container's IPv6 address to the NDP proxy table: ``` $ ip -6 neigh add proxy 2001:db8::c009 dev eth0 ``` This command tells the Kernel to answer to incoming neighbor solicitation requests regarding the IPv6 address `2001:db8::c009` on the device `eth0`. As a consequence of this all traffic to this IPv6 address will go into the Docker host and it will forward it according to its routing table via the `docker0` device to the container network: ``` $ ip -6 route show 2001:db8::c008/125 dev docker0 metric 1 2001:db8::/64 dev eth0 proto kernel metric 256 ``` You have to execute the `ip -6 neigh add proxy ...` command for every IPv6 address in your Docker subnet. Unfortunately there is no functionality for adding a whole subnet by executing one command. An alternative approach would be to use an NDP proxy daemon such as [ndppd](https://github.com/DanielAdolfsson/ndppd). ## Docker IPv6 cluster ### Switched network environment Using routable IPv6 addresses allows you to realize communication between containers on different hosts. Let's have a look at a simple Docker IPv6 cluster example: ![](images/ipv6_switched_network_example.svg) The Docker hosts are in the `2001:db8:0::/64` subnet. Host1 is configured to provide addresses from the `2001:db8:1::/64` subnet to its containers. It has three routes configured: - Route all traffic to `2001:db8:0::/64` via `eth0` - Route all traffic to `2001:db8:1::/64` via `docker0` - Route all traffic to `2001:db8:2::/64` via Host2 with IP `2001:db8::2` Host1 also acts as a router on OSI layer 3. When one of the network clients tries to contact a target that is specified in Host1's routing table Host1 will forward the traffic accordingly. It acts as a router for all networks it knows: `2001:db8::/64`, `2001:db8:1::/64` and `2001:db8:2::/64`. On Host2 we have nearly the same configuration. Host2's containers will get IPv6 addresses from `2001:db8:2::/64`. Host2 has three routes configured: - Route all traffic to `2001:db8:0::/64` via `eth0` - Route all traffic to `2001:db8:2::/64` via `docker0` - Route all traffic to `2001:db8:1::/64` via Host1 with IP `2001:db8:0::1` The difference to Host1 is that the network `2001:db8:2::/64` is directly attached to the host via its `docker0` interface whereas it reaches `2001:db8:1::/64` via Host1's IPv6 address `2001:db8::1`. This way every container is able to contact every other container. The containers `Container1-*` share the same subnet and contact each other directly. The traffic between `Container1-*` and `Container2-*` will be routed via Host1 and Host2 because those containers do not share the same subnet. In a switched environment every host has to know all routes to every subnet. You always have to update the hosts' routing tables once you add or remove a host to the cluster. Every configuration in the diagram that is shown below the dashed line is handled by Docker: The `docker0` bridge IP address configuration, the route to the Docker subnet on the host, the container IP addresses and the routes on the containers. The configuration above the line is up to the user and can be adapted to the individual environment. ### Routed network environment In a routed network environment you replace the layer 2 switch with a layer 3 router. Now the hosts just have to know their default gateway (the router) and the route to their own containers (managed by Docker). The router holds all routing information about the Docker subnets. When you add or remove a host to this environment you just have to update the routing table in the router - not on every host. ![](images/ipv6_routed_network_example.svg) In this scenario containers of the same host can communicate directly with each other. The traffic between containers on different hosts will be routed via their hosts and the router. For example packet from `Container1-1` to `Container2-1` will be routed through `Host1`, `Router` and `Host2` until it arrives at `Container2-1`. To keep the IPv6 addresses short in this example a `/48` network is assigned to every host. The hosts use a `/64` subnet of this for its own services and one for Docker. When adding a third host you would add a route for the subnet `2001:db8:3::/48` in the router and configure Docker on Host3 with `--fixed-cidr-v6=2001:db8:3:1::/64`. Remember the subnet for Docker containers should at least have a size of `/80`. This way an IPv6 address can end with the container's MAC address and you prevent NDP neighbor cache invalidation issues in the Docker layer. So if you have a `/64` for your whole environment use `/78` subnets for the hosts and `/80` for the containers. This way you can use 4096 hosts with 16 `/80` subnets each. Every configuration in the diagram that is visualized below the dashed line is handled by Docker: The `docker0` bridge IP address configuration, the route to the Docker subnet on the host, the container IP addresses and the routes on the containers. The configuration above the line is up to the user and can be adapted to the individual environment. docker-1.10.3/docs/userguide/networking/default_network/options.md000066400000000000000000000072101267010174400253720ustar00rootroot00000000000000 # Quick guide to the options Here is a quick list of the networking-related Docker command-line options, in case it helps you find the section below that you are looking for. Some networking command-line options can only be supplied to the Docker server when it starts up, and cannot be changed once it is running: - `-b BRIDGE` or `--bridge=BRIDGE` -- see [Building your own bridge](#bridge-building) - `--bip=CIDR` -- see [Customizing docker0](#docker0) - `--default-gateway=IP_ADDRESS` -- see [How Docker networks a container](#container-networking) - `--default-gateway-v6=IP_ADDRESS` -- see [IPv6](#ipv6) - `--fixed-cidr` -- see [Customizing docker0](#docker0) - `--fixed-cidr-v6` -- see [IPv6](#ipv6) - `-H SOCKET...` or `--host=SOCKET...` -- This might sound like it would affect container networking, but it actually faces in the other direction: it tells the Docker server over what channels it should be willing to receive commands like "run container" and "stop container." - `--icc=true|false` -- see [Communication between containers](#between-containers) - `--ip=IP_ADDRESS` -- see [Binding container ports](#binding-ports) - `--ipv6=true|false` -- see [IPv6](#ipv6) - `--ip-forward=true|false` -- see [Communication between containers and the wider world](#the-world) - `--iptables=true|false` -- see [Communication between containers](#between-containers) - `--mtu=BYTES` -- see [Customizing docker0](#docker0) - `--userland-proxy=true|false` -- see [Binding container ports](#binding-ports) There are three networking options that can be supplied either at startup or when `docker run` is invoked. When provided at startup, set the default value that `docker run` will later use if the options are not specified: - `--dns=IP_ADDRESS...` -- see [Configuring DNS](#dns) - `--dns-search=DOMAIN...` -- see [Configuring DNS](#dns) - `--dns-opt=OPTION...` -- see [Configuring DNS](#dns) Finally, several networking options can only be provided when calling `docker run` because they specify something specific to one container: - `-h HOSTNAME` or `--hostname=HOSTNAME` -- see [Configuring DNS](#dns) and [How Docker networks a container](#container-networking) - `--link=CONTAINER_NAME_or_ID:ALIAS` -- see [Configuring DNS](#dns) and [Communication between containers](#between-containers) - `--net=bridge|none|container:NAME_or_ID|host` -- see [How Docker networks a container](#container-networking) - `--mac-address=MACADDRESS...` -- see [How Docker networks a container](#container-networking) - `-p SPEC` or `--publish=SPEC` -- see [Binding container ports](#binding-ports) - `-P` or `--publish-all=true|false` -- see [Binding container ports](#binding-ports) To supply networking options to the Docker server at startup, use the `DOCKER_OPTS` variable in the Docker upstart configuration file. For Ubuntu, edit the variable in `/etc/default/docker` or `/etc/sysconfig/docker` for CentOS. The following example illustrates how to configure Docker on Ubuntu to recognize a newly built bridge. Edit the `/etc/default/docker` file: ``` $ echo 'DOCKER_OPTS="-b=bridge0"' >> /etc/default/docker ``` Then restart the Docker server. ``` $ sudo service docker start ``` For additional information on bridges, see [building your own bridge](#building-your-own-bridge) later on this page. docker-1.10.3/docs/userguide/networking/default_network/saveme.md000066400000000000000000000055131267010174400251630ustar00rootroot00000000000000 ## A Brief introduction to networking and docker When Docker starts, it creates a virtual interface named `docker0` on the host machine. It randomly chooses an address and subnet from the private range defined by [RFC 1918](http://tools.ietf.org/html/rfc1918) that are not in use on the host machine, and assigns it to `docker0`. Docker made the choice `172.17.42.1/16` when I started it a few minutes ago, for example -- a 16-bit netmask providing 65,534 addresses for the host machine and its containers. The MAC address is generated using the IP address allocated to the container to avoid ARP collisions, using a range from `02:42:ac:11:00:00` to `02:42:ac:11:ff:ff`. > **Note:** This document discusses advanced networking configuration and options for Docker. In most cases you won't need this information. If you're looking to get started with a simpler explanation of Docker networking and an introduction to the concept of container linking see the [Docker User Guide](dockerlinks.md). But `docker0` is no ordinary interface. It is a virtual _Ethernet bridge_ that automatically forwards packets between any other network interfaces that are attached to it. This lets containers communicate both with the host machine and with each other. Every time Docker creates a container, it creates a pair of "peer" interfaces that are like opposite ends of a pipe -- a packet sent on one will be received on the other. It gives one of the peers to the container to become its `eth0` interface and keeps the other peer, with a unique name like `vethAQI2QT`, out in the namespace of the host machine. By binding every `veth*` interface to the `docker0` bridge, Docker creates a virtual subnet shared between the host machine and every Docker container. The remaining sections of this document explain all of the ways that you can use Docker options and -- in advanced cases -- raw Linux networking commands to tweak, supplement, or entirely replace Docker's default networking configuration. ## Editing networking config files Starting with Docker v.1.2.0, you can now edit `/etc/hosts`, `/etc/hostname` and `/etc/resolve.conf` in a running container. This is useful if you need to install bind or other services that might override one of those files. Note, however, that changes to these files will not be saved by `docker commit`, nor will they be saved during `docker run`. That means they won't be saved in the image, nor will they persist when a container is restarted; they will only "stick" in a running container. docker-1.10.3/docs/userguide/networking/default_network/tools.md000066400000000000000000000105361267010174400250440ustar00rootroot00000000000000 # Tools and examples Before diving into the following sections on custom network topologies, you might be interested in glancing at a few external tools or examples of the same kinds of configuration. Here are two: - Jérôme Petazzoni has created a `pipework` shell script to help you connect together containers in arbitrarily complex scenarios: [https://github.com/jpetazzo/pipework](https://github.com/jpetazzo/pipework) - Brandon Rhodes has created a whole network topology of Docker containers for the next edition of Foundations of Python Network Programming that includes routing, NAT'd firewalls, and servers that offer HTTP, SMTP, POP, IMAP, Telnet, SSH, and FTP: [https://github.com/brandon-rhodes/fopnp/tree/m/playground](https://github.com/brandon-rhodes/fopnp/tree/m/playground) Both tools use networking commands very much like the ones you saw in the previous section, and will see in the following sections. # Building a point-to-point connection By default, Docker attaches all containers to the virtual subnet implemented by `docker0`. You can create containers that are each connected to some different virtual subnet by creating your own bridge as shown in [Building your own bridge](#bridge-building), starting each container with `docker run --net=none`, and then attaching the containers to your bridge with the shell commands shown in [How Docker networks a container](#container-networking). But sometimes you want two particular containers to be able to communicate directly without the added complexity of both being bound to a host-wide Ethernet bridge. The solution is simple: when you create your pair of peer interfaces, simply throw _both_ of them into containers, and configure them as classic point-to-point links. The two containers will then be able to communicate directly (provided you manage to tell each container the other's IP address, of course). You might adjust the instructions of the previous section to go something like this: ``` # Start up two containers in two terminal windows $ docker run -i -t --rm --net=none base /bin/bash root@1f1f4c1f931a:/# $ docker run -i -t --rm --net=none base /bin/bash root@12e343489d2f:/# # Learn the container process IDs # and create their namespace entries $ docker inspect -f '{{.State.Pid}}' 1f1f4c1f931a 2989 $ docker inspect -f '{{.State.Pid}}' 12e343489d2f 3004 $ sudo mkdir -p /var/run/netns $ sudo ln -s /proc/2989/ns/net /var/run/netns/2989 $ sudo ln -s /proc/3004/ns/net /var/run/netns/3004 # Create the "peer" interfaces and hand them out $ sudo ip link add A type veth peer name B $ sudo ip link set A netns 2989 $ sudo ip netns exec 2989 ip addr add 10.1.1.1/32 dev A $ sudo ip netns exec 2989 ip link set A up $ sudo ip netns exec 2989 ip route add 10.1.1.2/32 dev A $ sudo ip link set B netns 3004 $ sudo ip netns exec 3004 ip addr add 10.1.1.2/32 dev B $ sudo ip netns exec 3004 ip link set B up $ sudo ip netns exec 3004 ip route add 10.1.1.1/32 dev B ``` The two containers should now be able to ping each other and make connections successfully. Point-to-point links like this do not depend on a subnet nor a netmask, but on the bare assertion made by `ip route` that some other single IP address is connected to a particular network interface. Note that point-to-point links can be safely combined with other kinds of network connectivity -- there is no need to start the containers with `--net=none` if you want point-to-point links to be an addition to the container's normal networking instead of a replacement. A final permutation of this pattern is to create the point-to-point link between the Docker host and one container, which would allow the host to communicate with that one container on some single IP address and thus communicate "out-of-band" of the bridge that connects the other, more usual containers. But unless you have very specific networking needs that drive you to such a solution, it is probably far preferable to use `--icc=false` to lock down inter-container communication, as we explored earlier. docker-1.10.3/docs/userguide/networking/dockernetworks.md000066400000000000000000000475431267010174400235630ustar00rootroot00000000000000 # Understand Docker container networks To build web applications that act in concert but do so securely, use the Docker networks feature. Networks, by definition, provide complete isolation for containers. So, it is important to have control over the networks your applications run on. Docker container networks give you that control. This section provides an overview of the default networking behavior that Docker Engine delivers natively. It describes the type of networks created by default and how to create your own, user--defined networks. It also describes the resources required to create networks on a single host or across a cluster of hosts. ## Default Networks When you install Docker, it creates three networks automatically. You can list these networks using the `docker network ls` command: ``` $ docker network ls NETWORK ID NAME DRIVER 7fca4eb8c647 bridge bridge 9f904ee27bf5 none null cf03ee007fb4 host host ``` Historically, these three networks are part of Docker's implementation. When you run a container you can use the `--net` flag to specify which network you want to run a container on. These three networks are still available to you. The `bridge` network represents the `docker0` network present in all Docker installations. Unless you specify otherwise with the `docker run --net=` option, the Docker daemon connects containers to this network by default. You can see this bridge as part of a host's network stack by using the `ifconfig` command on the host. ``` ubuntu@ip-172-31-36-118:~$ ifconfig docker0 Link encap:Ethernet HWaddr 02:42:47:bc:3a:eb inet addr:172.17.0.1 Bcast:0.0.0.0 Mask:255.255.0.0 inet6 addr: fe80::42:47ff:febc:3aeb/64 Scope:Link UP BROADCAST RUNNING MULTICAST MTU:9001 Metric:1 RX packets:17 errors:0 dropped:0 overruns:0 frame:0 TX packets:8 errors:0 dropped:0 overruns:0 carrier:0 collisions:0 txqueuelen:0 RX bytes:1100 (1.1 KB) TX bytes:648 (648.0 B) ``` The `none` network adds a container to a container-specific network stack. That container lacks a network interface. Attaching to such a container and looking at it's stack you see this: ``` ubuntu@ip-172-31-36-118:~$ docker attach nonenetcontainer / # cat /etc/hosts 127.0.0.1 localhost ::1 localhost ip6-localhost ip6-loopback fe00::0 ip6-localnet ff00::0 ip6-mcastprefix ff02::1 ip6-allnodes ff02::2 ip6-allrouters / # ifconfig lo Link encap:Local Loopback inet addr:127.0.0.1 Mask:255.0.0.0 inet6 addr: ::1/128 Scope:Host UP LOOPBACK RUNNING MTU:65536 Metric:1 RX packets:0 errors:0 dropped:0 overruns:0 frame:0 TX packets:0 errors:0 dropped:0 overruns:0 carrier:0 collisions:0 txqueuelen:0 RX bytes:0 (0.0 B) TX bytes:0 (0.0 B) / # ``` >**Note**: You can detach from the container and leave it running with `CTRL-p CTRL-q`. The `host` network adds a container on the hosts network stack. You'll find the network configuration inside the container is identical to the host. With the exception of the the `bridge` network, you really don't need to interact with these default networks. While you can list and inspect them, you cannot remove them. They are required by your Docker installation. However, you can add your own user-defined networks and these you can remove when you no longer need them. Before you learn more about creating your own networks, it is worth looking at the `default` network a bit. ### The default bridge network in detail The default bridge network is present on all Docker hosts. The `docker network inspect` ``` $ docker network inspect bridge [ { "Name": "bridge", "Id": "f7ab26d71dbd6f557852c7156ae0574bbf62c42f539b50c8ebde0f728a253b6f", "Scope": "local", "Driver": "bridge", "IPAM": { "Driver": "default", "Config": [ { "Subnet": "172.17.0.1/16", "Gateway": "172.17.0.1" } ] }, "Containers": {}, "Options": { "com.docker.network.bridge.default_bridge": "true", "com.docker.network.bridge.enable_icc": "true", "com.docker.network.bridge.enable_ip_masquerade": "true", "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", "com.docker.network.bridge.name": "docker0", "com.docker.network.driver.mtu": "9001" } } ] ``` The Engine automatically creates a `Subnet` and `Gateway` to the network. The `docker run` command automatically adds new containers to this network. ``` $ docker run -itd --name=container1 busybox 3386a527aa08b37ea9232cbcace2d2458d49f44bb05a6b775fba7ddd40d8f92c $ docker run -itd --name=container2 busybox 94447ca479852d29aeddca75c28f7104df3c3196d7b6d83061879e339946805c ``` Inspecting the `bridge` network again after starting two containers shows both newly launched containers in the network. Their ids show up in the container ``` $ docker network inspect bridge {[ { "Name": "bridge", "Id": "f7ab26d71dbd6f557852c7156ae0574bbf62c42f539b50c8ebde0f728a253b6f", "Scope": "local", "Driver": "bridge", "IPAM": { "Driver": "default", "Config": [ { "Subnet": "172.17.0.1/16", "Gateway": "172.17.0.1" } ] }, "Containers": { "3386a527aa08b37ea9232cbcace2d2458d49f44bb05a6b775fba7ddd40d8f92c": { "EndpointID": "647c12443e91faf0fd508b6edfe59c30b642abb60dfab890b4bdccee38750bc1", "MacAddress": "02:42:ac:11:00:02", "IPv4Address": "172.17.0.2/16", "IPv6Address": "" }, "94447ca479852d29aeddca75c28f7104df3c3196d7b6d83061879e339946805c": { "EndpointID": "b047d090f446ac49747d3c37d63e4307be745876db7f0ceef7b311cbba615f48", "MacAddress": "02:42:ac:11:00:03", "IPv4Address": "172.17.0.3/16", "IPv6Address": "" } }, "Options": { "com.docker.network.bridge.default_bridge": "true", "com.docker.network.bridge.enable_icc": "true", "com.docker.network.bridge.enable_ip_masquerade": "true", "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", "com.docker.network.bridge.name": "docker0", "com.docker.network.driver.mtu": "9001" } } ] ``` The `docker network inspect` command above shows all the connected containers and their network resources on a given network. Containers in this default network are able to communicate with each other using IP addresses. Docker does not support automatic service discovery on the default bridge network. If you want to communicate with container names in this default bridge network, you must connect the containers via the legacy `docker run --link` option. You can `attach` to a running `container` and investigate its configuration: ``` $ docker attach container1 / # ifconfig ifconfig eth0 Link encap:Ethernet HWaddr 02:42:AC:11:00:02 inet addr:172.17.0.2 Bcast:0.0.0.0 Mask:255.255.0.0 inet6 addr: fe80::42:acff:fe11:2/64 Scope:Link UP BROADCAST RUNNING MULTICAST MTU:9001 Metric:1 RX packets:16 errors:0 dropped:0 overruns:0 frame:0 TX packets:8 errors:0 dropped:0 overruns:0 carrier:0 collisions:0 txqueuelen:0 RX bytes:1296 (1.2 KiB) TX bytes:648 (648.0 B) lo Link encap:Local Loopback inet addr:127.0.0.1 Mask:255.0.0.0 inet6 addr: ::1/128 Scope:Host UP LOOPBACK RUNNING MTU:65536 Metric:1 RX packets:0 errors:0 dropped:0 overruns:0 frame:0 TX packets:0 errors:0 dropped:0 overruns:0 carrier:0 collisions:0 txqueuelen:0 RX bytes:0 (0.0 B) TX bytes:0 (0.0 B) ``` Then use `ping` for about 3 seconds to test the connectivity of the containers on this `bridge` network. ``` / # ping -w3 172.17.0.3 PING 172.17.0.3 (172.17.0.3): 56 data bytes 64 bytes from 172.17.0.3: seq=0 ttl=64 time=0.096 ms 64 bytes from 172.17.0.3: seq=1 ttl=64 time=0.080 ms 64 bytes from 172.17.0.3: seq=2 ttl=64 time=0.074 ms --- 172.17.0.3 ping statistics --- 3 packets transmitted, 3 packets received, 0% packet loss round-trip min/avg/max = 0.074/0.083/0.096 ms ``` Finally, use the `cat` command to check the `container1` network configuration: ``` / # cat /etc/hosts 172.17.0.2 3386a527aa08 127.0.0.1 localhost ::1 localhost ip6-localhost ip6-loopback fe00::0 ip6-localnet ff00::0 ip6-mcastprefix ff02::1 ip6-allnodes ff02::2 ip6-allrouters ``` To detach from a `container1` and leave it running use `CTRL-p CTRL-q`.Then, attach to `container2` and repeat these three commands. ``` $ docker attach container2 / # ifconfig eth0 Link encap:Ethernet HWaddr 02:42:AC:11:00:03 inet addr:172.17.0.3 Bcast:0.0.0.0 Mask:255.255.0.0 inet6 addr: fe80::42:acff:fe11:3/64 Scope:Link UP BROADCAST RUNNING MULTICAST MTU:9001 Metric:1 RX packets:15 errors:0 dropped:0 overruns:0 frame:0 TX packets:13 errors:0 dropped:0 overruns:0 carrier:0 collisions:0 txqueuelen:0 RX bytes:1166 (1.1 KiB) TX bytes:1026 (1.0 KiB) lo Link encap:Local Loopback inet addr:127.0.0.1 Mask:255.0.0.0 inet6 addr: ::1/128 Scope:Host UP LOOPBACK RUNNING MTU:65536 Metric:1 RX packets:0 errors:0 dropped:0 overruns:0 frame:0 TX packets:0 errors:0 dropped:0 overruns:0 carrier:0 collisions:0 txqueuelen:0 RX bytes:0 (0.0 B) TX bytes:0 (0.0 B) / # ping -w3 172.17.0.2 PING 172.17.0.2 (172.17.0.2): 56 data bytes 64 bytes from 172.17.0.2: seq=0 ttl=64 time=0.067 ms 64 bytes from 172.17.0.2: seq=1 ttl=64 time=0.075 ms 64 bytes from 172.17.0.2: seq=2 ttl=64 time=0.072 ms --- 172.17.0.2 ping statistics --- 3 packets transmitted, 3 packets received, 0% packet loss round-trip min/avg/max = 0.067/0.071/0.075 ms / # cat /etc/hosts 172.17.0.3 94447ca47985 127.0.0.1 localhost ::1 localhost ip6-localhost ip6-loopback fe00::0 ip6-localnet ff00::0 ip6-mcastprefix ff02::1 ip6-allnodes ff02::2 ip6-allrouters ``` The default `docker0` bridge network supports the use of port mapping and `docker run --link` to allow communications between containers in the `docker0` network. These techniques are cumbersome to set up and prone to error. While they are still available to you as techniques, it is better to avoid them and define your own bridge networks instead. ## User-defined networks You can create your own user-defined networks that better isolate containers. Docker provides some default **network drivers** for creating these networks. You can create a new **bridge network** or **overlay network**. You can also create a **network plugin** or **remote network** written to your own specifications. You can create multiple networks. You can add containers to more than one network. Containers can only communicate within networks but not across networks. A container attached to two networks can communicate with member containers in either network. The next few sections describe each of Docker's built-in network drivers in greater detail. ### A bridge network The easiest user-defined network to create is a `bridge` network. This network is similar to the historical, default `docker0` network. There are some added features and some old features that aren't available. ``` $ docker network create --driver bridge isolated_nw 1196a4c5af43a21ae38ef34515b6af19236a3fc48122cf585e3f3054d509679b $ docker network inspect isolated_nw [ { "Name": "isolated_nw", "Id": "1196a4c5af43a21ae38ef34515b6af19236a3fc48122cf585e3f3054d509679b", "Scope": "local", "Driver": "bridge", "IPAM": { "Driver": "default", "Config": [ { "Subnet": "172.21.0.0/16", "Gateway": "172.21.0.1/16" } ] }, "Containers": {}, "Options": {} } ] $ docker network ls NETWORK ID NAME DRIVER 9f904ee27bf5 none null cf03ee007fb4 host host 7fca4eb8c647 bridge bridge c5ee82f76de3 isolated_nw bridge ``` After you create the network, you can launch containers on it using the `docker run --net=` option. ``` $ docker run --net=isolated_nw -itd --name=container3 busybox 8c1a0a5be480921d669a073393ade66a3fc49933f08bcc5515b37b8144f6d47c $ docker network inspect isolated_nw [ { "Name": "isolated_nw", "Id": "1196a4c5af43a21ae38ef34515b6af19236a3fc48122cf585e3f3054d509679b", "Scope": "local", "Driver": "bridge", "IPAM": { "Driver": "default", "Config": [ {} ] }, "Containers": { "8c1a0a5be480921d669a073393ade66a3fc49933f08bcc5515b37b8144f6d47c": { "EndpointID": "93b2db4a9b9a997beb912d28bcfc117f7b0eb924ff91d48cfa251d473e6a9b08", "MacAddress": "02:42:ac:15:00:02", "IPv4Address": "172.21.0.2/16", "IPv6Address": "" } }, "Options": {} } ] ``` The containers you launch into this network must reside on the same Docker host. Each container in the network can immediately communicate with other containers in the network. Though, the network itself isolates the containers from external networks. ![An isolated network](images/bridge_network.png) Within a user-defined bridge network, linking is not supported. You can expose and publish container ports on containers in this network. This is useful if you want to make a portion of the `bridge` network available to an outside network. ![Bridge network](images/network_access.png) A bridge network is useful in cases where you want to run a relatively small network on a single host. You can, however, create significantly larger networks by creating an `overlay` network. ### An overlay network Docker's `overlay` network driver supports multi-host networking natively out-of-the-box. This support is accomplished with the help of `libnetwork`, a built-in VXLAN-based overlay network driver, and Docker's `libkv` library. The `overlay` network requires a valid key-value store service. Currently, Docker's `libkv` supports Consul, Etcd, and ZooKeeper (Distributed store). Before creating a network you must install and configure your chosen key-value store service. The Docker hosts that you intend to network and the service must be able to communicate. ![Key-value store](images/key_value.png) Each host in the network must run a Docker Engine instance. The easiest way to provision the hosts are with Docker Machine. ![Engine on each host](images/engine_on_net.png) You should open the following ports between each of your hosts. | Protocol | Port | Description | |----------|------|-----------------------| | udp | 4789 | Data plane (VXLAN) | | tcp/udp | 7946 | Control plane | Your key-value store service may require additional ports. Check your vendor's documentation and open any required ports. Once you have several machines provisioned, you can use Docker Swarm to quickly form them into a swarm which includes a discovery service as well. To create an overlay network, you configure options on the `daemon` on each Docker Engine for use with `overlay` network. There are two options to set:
Option Description
--cluster-store=PROVIDER://URL
Describes the location of the KV service.
--cluster-advertise=HOST_IP|HOST_IFACE:PORT
The IP address or interface of the HOST used for clustering.
--cluster-store-opt=KEY-VALUE OPTIONS
Options such as TLS certificate or tuning discovery Timers
Create an `overlay` network on one of the machines in the Swarm. $ docker network create --driver overlay my-multi-host-network This results in a single network spanning multiple hosts. An `overlay` network provides complete isolation for the containers. ![An overlay network](images/overlay_network.png) Then, on each host, launch containers making sure to specify the network name. $ docker run -itd --net=my-multi-host-network busybox Once connected, each container has access to all the containers in the network regardless of which Docker host the container was launched on. ![Published port](images/overlay-network-final.png) If you would like to try this for yourself, see the [Getting started for overlay](get-started-overlay.md). ### Custom network plugin If you like, you can write your own network driver plugin. A network driver plugin makes use of Docker's plugin infrastructure. In this infrastructure, a plugin is a process running on the same Docker host as the Docker `daemon`. Network plugins follow the same restrictions and installation rules as other plugins. All plugins make use of the plugin API. They have a lifecycle that encompasses installation, starting, stopping and activation. Once you have created and installed a custom network driver, you use it like the built-in network drivers. For example: $ docker network create --driver weave mynet You can inspect it, add containers too and from it, and so forth. Of course, different plugins may make use of different technologies or frameworks. Custom networks can include features not present in Docker's default networks. For more information on writing plugins, see [Extending Docker](../../extend/index.md) and [Writing a network driver plugin](../../extend/plugins_network.md). ### Docker embedded DNS server Docker daemon runs an embedded DNS server to provide automatic service discovery for containers connected to user defined networks. Name resolution requests from the containers are handled first by the embedded DNS server. If the embedded DNS server is unable to resolve the request it will be forwarded to any external DNS servers configured for the container. To facilitate this when the container is created, only the embedded DNS server reachable at `127.0.0.11` will be listed in the container's `resolv.conf` file. More information on embedded DNS server on user-defined networks can be found in the [embedded DNS server in user-defined networks] (configure-dns.md) ## Links Before the Docker network feature, you could use the Docker link feature to allow containers to discover each other. With the introduction of Docker networks, containers can be discovered by its name automatically. But you can still create links but they behave differently when used in the default `docker0` bridge network compared to user-defined networks. For more information, please refer to [Legacy Links](default_network/dockerlinks.md) for link feature in default `bridge` network and the [linking containers in user-defined networks](work-with-networks.md#linking-containers-in-user-defined-networks) for links functionality in user-defined networks. ## Related information - [Work with network commands](work-with-networks.md) - [Get started with multi-host networking](get-started-overlay.md) - [Managing Data in Containers](../containers/dockervolumes.md) - [Docker Machine overview](https://docs.docker.com/machine) - [Docker Swarm overview](https://docs.docker.com/swarm) - [Investigate the LibNetwork project](https://github.com/docker/libnetwork) docker-1.10.3/docs/userguide/networking/get-started-overlay.md000066400000000000000000000327151267010174400244140ustar00rootroot00000000000000 # Get started with multi-host networking This article uses an example to explain the basics of creating a multi-host network. Docker Engine supports multi-host networking out-of-the-box through the `overlay` network driver. Unlike `bridge` networks, overlay networks require some pre-existing conditions before you can create one. These conditions are: * Access to a key-value store. Docker supports Consul, Etcd, and ZooKeeper (Distributed store) key-value stores. * A cluster of hosts with connectivity to the key-value store. * A properly configured Engine `daemon` on each host in the cluster. Though Docker Machine and Docker Swarm are not mandatory to experience Docker multi-host networking, this example uses them to illustrate how they are integrated. You'll use Machine to create both the key-value store server and the host cluster. This example creates a Swarm cluster. ## Prerequisites Before you begin, make sure you have a system on your network with the latest version of Docker Engine and Docker Machine installed. The example also relies on VirtualBox. If you installed on a Mac or Windows with Docker Toolbox, you have all of these installed already. If you have not already done so, make sure you upgrade Docker Engine and Docker Machine to the latest versions. ## Step 1: Set up a key-value store An overlay network requires a key-value store. The key-value store holds information about the network state which includes discovery, networks, endpoints, IP addresses, and more. Docker supports Consul, Etcd, and ZooKeeper key-value stores. This example uses Consul. 1. Log into a system prepared with the prerequisite Docker Engine, Docker Machine, and VirtualBox software. 2. Provision a VirtualBox machine called `mh-keystore`. $ docker-machine create -d virtualbox mh-keystore When you provision a new machine, the process adds Docker Engine to the host. This means rather than installing Consul manually, you can create an instance using the [consul image from Docker Hub](https://hub.docker.com/r/progrium/consul/). You'll do this in the next step. 3. Start a `progrium/consul` container running on the `mh-keystore` machine. $ docker $(docker-machine config mh-keystore) run -d \ -p "8500:8500" \ -h "consul" \ progrium/consul -server -bootstrap A bash expansion `$(docker-machine config mh-keystore)` is used to pass the connection configuration to the `docker run` command. The client starts a `progrium/consul` image running in the `mh-keystore` machine. The server is called `consul` and is listening on port `8500`. 4. Set your local environment to the `mh-keystore` machine. $ eval "$(docker-machine env mh-keystore)" 5. Run the `docker ps` command to see the `consul` container. $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 4d51392253b3 progrium/consul "/bin/start -server -" 25 minutes ago Up 25 minutes 53/tcp, 53/udp, 8300-8302/tcp, 0.0.0.0:8500->8500/tcp, 8400/tcp, 8301-8302/udp admiring_panini Keep your terminal open and move onto the next step. ## Step 2: Create a Swarm cluster In this step, you use `docker-machine` to provision the hosts for your network. At this point, you won't actually create the network. You'll create several machines in VirtualBox. One of the machines will act as the Swarm master; you'll create that first. As you create each host, you'll pass the Engine on that machine options that are needed by the `overlay` network driver. 1. Create a Swarm master. $ docker-machine create \ -d virtualbox \ --swarm --swarm-master \ --swarm-discovery="consul://$(docker-machine ip mh-keystore):8500" \ --engine-opt="cluster-store=consul://$(docker-machine ip mh-keystore):8500" \ --engine-opt="cluster-advertise=eth1:2376" \ mhs-demo0 At creation time, you supply the Engine `daemon` with the ` --cluster-store` option. This option tells the Engine the location of the key-value store for the `overlay` network. The bash expansion `$(docker-machine ip mh-keystore)` resolves to the IP address of the Consul server you created in "STEP 1". The `--cluster-advertise` option advertises the machine on the network. 2. Create another host and add it to the Swarm cluster. $ docker-machine create -d virtualbox \ --swarm \ --swarm-discovery="consul://$(docker-machine ip mh-keystore):8500" \ --engine-opt="cluster-store=consul://$(docker-machine ip mh-keystore):8500" \ --engine-opt="cluster-advertise=eth1:2376" \ mhs-demo1 3. List your machines to confirm they are all up and running. $ docker-machine ls NAME ACTIVE DRIVER STATE URL SWARM default - virtualbox Running tcp://192.168.99.100:2376 mh-keystore * virtualbox Running tcp://192.168.99.103:2376 mhs-demo0 - virtualbox Running tcp://192.168.99.104:2376 mhs-demo0 (master) mhs-demo1 - virtualbox Running tcp://192.168.99.105:2376 mhs-demo0 At this point you have a set of hosts running on your network. You are ready to create a multi-host network for containers using these hosts. Leave your terminal open and go onto the next step. ## Step 3: Create the overlay Network To create an overlay network 1. Set your docker environment to the Swarm master. $ eval $(docker-machine env --swarm mhs-demo0) Using the `--swarm` flag with `docker-machine` restricts the `docker` commands to Swarm information alone. 2. Use the `docker info` command to view the Swarm. $ docker info Containers: 3 Images: 2 Role: primary Strategy: spread Filters: affinity, health, constraint, port, dependency Nodes: 2 mhs-demo0: 192.168.99.104:2376 └ Containers: 2 └ Reserved CPUs: 0 / 1 └ Reserved Memory: 0 B / 1.021 GiB └ Labels: executiondriver=native-0.2, kernelversion=4.1.10-boot2docker, operatingsystem=Boot2Docker 1.9.0-rc1 (TCL 6.4); master : 4187d2c - Wed Oct 14 14:00:28 UTC 2015, provider=virtualbox, storagedriver=aufs mhs-demo1: 192.168.99.105:2376 └ Containers: 1 └ Reserved CPUs: 0 / 1 └ Reserved Memory: 0 B / 1.021 GiB └ Labels: executiondriver=native-0.2, kernelversion=4.1.10-boot2docker, operatingsystem=Boot2Docker 1.9.0-rc1 (TCL 6.4); master : 4187d2c - Wed Oct 14 14:00:28 UTC 2015, provider=virtualbox, storagedriver=aufs CPUs: 2 Total Memory: 2.043 GiB Name: 30438ece0915 From this information, you can see that you are running three containers and two images on the Master. 3. Create your `overlay` network. $ docker network create --driver overlay --subnet=10.0.9.0/24 my-net You only need to create the network on a single host in the cluster. In this case, you used the Swarm master but you could easily have run it on any host in the cluster. > **Note** : It is highly recommended to use the `--subnet` option when creating > a network. If the `--subnet` is not specified, the docker daemon automatically > chooses and assigns a subnet for the network and it could overlap with another subnet > in your infrastructure that is not managed by docker. Such overlaps can cause > connectivity issues or failures when containers are connected to that network. 4. Check that the network is running: $ docker network ls NETWORK ID NAME DRIVER 412c2496d0eb mhs-demo1/host host dd51763e6dd2 mhs-demo0/bridge bridge 6b07d0be843f my-net overlay b4234109bd9b mhs-demo0/none null 1aeead6dd890 mhs-demo0/host host d0bb78cbe7bd mhs-demo1/bridge bridge 1c0eb8f69ebb mhs-demo1/none null As you are in the Swarm master environment, you see all the networks on all the Swarm agents: the default networks on each engine and the single overlay network. Notice that each `NETWORK ID` is unique. 5. Switch to each Swarm agent in turn and list the networks. $ eval $(docker-machine env mhs-demo0) $ docker network ls NETWORK ID NAME DRIVER 6b07d0be843f my-net overlay dd51763e6dd2 bridge bridge b4234109bd9b none null 1aeead6dd890 host host $ eval $(docker-machine env mhs-demo1) $ docker network ls NETWORK ID NAME DRIVER d0bb78cbe7bd bridge bridge 1c0eb8f69ebb none null 412c2496d0eb host host 6b07d0be843f my-net overlay Both agents report they have the `my-net` network with the `6b07d0be843f` ID. You now have a multi-host container network running! ## Step 4: Run an application on your Network Once your network is created, you can start a container on any of the hosts and it automatically is part of the network. 1. Point your environment to the Swarm master. $ eval $(docker-machine env --swarm mhs-demo0) 2. Start an Nginx web server on the `mhs-demo0` instance. $ docker run -itd --name=web --net=my-net --env="constraint:node==mhs-demo0" nginx 4. Run a BusyBox instance on the `mhs-demo1` instance and get the contents of the Nginx server's home page. $ docker run -it --rm --net=my-net --env="constraint:node==mhs-demo1" busybox wget -O- http://web Unable to find image 'busybox:latest' locally latest: Pulling from library/busybox ab2b8a86ca6c: Pull complete 2c5ac3f849df: Pull complete Digest: sha256:5551dbdfc48d66734d0f01cafee0952cb6e8eeecd1e2492240bf2fd9640c2279 Status: Downloaded newer image for busybox:latest Connecting to web (10.0.0.2:80) Welcome to nginx!

Welcome to nginx!

If you see this page, the nginx web server is successfully installed and working. Further configuration is required.

For online documentation and support please refer to nginx.org.
Commercial support is available at nginx.com.

Thank you for using nginx.

- 100% |*******************************| 612 0:00:00 ETA ## Step 5: Check external connectivity As you've seen, Docker's built-in overlay network driver provides out-of-the-box connectivity between the containers on multiple hosts within the same network. Additionally, containers connected to the multi-host network are automatically connected to the `docker_gwbridge` network. This network allows the containers to have external connectivity outside of their cluster. 1. Change your environment to the Swarm agent. $ eval $(docker-machine env mhs-demo1) 2. View the `docker_gwbridge` network, by listing the networks. $ docker network ls NETWORK ID NAME DRIVER 6b07d0be843f my-net overlay dd51763e6dd2 bridge bridge b4234109bd9b none null 1aeead6dd890 host host e1dbd5dff8be docker_gwbridge bridge 3. Repeat steps 1 and 2 on the Swarm master. $ eval $(docker-machine env mhs-demo0) $ docker network ls NETWORK ID NAME DRIVER 6b07d0be843f my-net overlay d0bb78cbe7bd bridge bridge 1c0eb8f69ebb none null 412c2496d0eb host host 97102a22e8d2 docker_gwbridge bridge 2. Check the Nginx container's network interfaces. $ docker exec web ip addr 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 22: eth0: mtu 1450 qdisc noqueue state UP group default link/ether 02:42:0a:00:09:03 brd ff:ff:ff:ff:ff:ff inet 10.0.9.3/24 scope global eth0 valid_lft forever preferred_lft forever inet6 fe80::42:aff:fe00:903/64 scope link valid_lft forever preferred_lft forever 24: eth1: mtu 1500 qdisc noqueue state UP group default link/ether 02:42:ac:12:00:02 brd ff:ff:ff:ff:ff:ff inet 172.18.0.2/16 scope global eth1 valid_lft forever preferred_lft forever inet6 fe80::42:acff:fe12:2/64 scope link valid_lft forever preferred_lft forever The `eth0` interface represents the container interface that is connected to the `my-net` overlay network. While the `eth1` interface represents the container interface that is connected to the `docker_gwbridge` network. ## Step 6: Extra Credit with Docker Compose Please refer to the Networking feature introduced in [Compose V2 format] (https://docs.docker.com/compose/networking/) and execute the multi-host networking scenario in the Swarm cluster used above. ## Related information * [Understand Docker container networks](dockernetworks.md) * [Work with network commands](work-with-networks.md) * [Docker Swarm overview](https://docs.docker.com/swarm) * [Docker Machine overview](https://docs.docker.com/machine) docker-1.10.3/docs/userguide/networking/images/000077500000000000000000000000001267010174400214255ustar00rootroot00000000000000docker-1.10.3/docs/userguide/networking/images/bridge_network.gliffy000066400000000000000000000410561267010174400256420ustar00rootroot00000000000000{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#ffffff","width":378,"height":236,"nodeIndex":146,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":7,"y":5.1999969482421875},"max":{"x":378,"y":235.1428540910994}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":196.0,"y":100.69999694824219,"rotation":0.0,"id":140,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":61,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"


","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":149.0,"y":154.96785409109907,"rotation":0.0,"id":114,"width":150.0,"height":54.732142857143145,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":16,"lockAspectRatio":false,"lockShape":false,"children":[{"x":44.0,"y":2.7321428571431454,"rotation":0.0,"id":95,"width":62.0,"height":33.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":5,"lockAspectRatio":false,"lockShape":false,"children":[{"x":29.139999999999997,"y":2.94642857142857,"rotation":0.0,"id":96,"width":3.719999999999998,"height":27.107142857142843,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":13,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":99,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":99,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.8600000000000136,-1.1785714285714448],[1.8600000000000136,28.285714285714278]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":null},{"x":51.46,"y":2.94642857142857,"rotation":0.0,"id":97,"width":1.2156862745098034,"height":28.285714285714267,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.4193795664340882,-1.178571428571729],[-1.4193795664340882,28.28571428571442]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":null},{"x":9.919999999999993,"y":1.3749999999999987,"rotation":0.0,"id":98,"width":1.239999999999999,"height":28.285714285714267,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.0393795664339223,0.3928571428572809],[2.0393795664339223,29.85714285714272]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":null},{"x":0.0,"y":1.7678571428571417,"rotation":0.0,"id":99,"width":62.0,"height":29.46428571428572,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":4,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":40.732142857143145,"rotation":0.0,"id":112,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":226.0,"y":155.96785409109907,"rotation":0.0,"id":115,"width":150.0,"height":54.732142857143145,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":34,"lockAspectRatio":false,"lockShape":false,"children":[{"x":44.0,"y":2.7321428571431454,"rotation":0.0,"id":116,"width":62.0,"height":33.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":22,"lockAspectRatio":false,"lockShape":false,"children":[{"x":29.139999999999997,"y":2.94642857142857,"rotation":0.0,"id":117,"width":3.719999999999998,"height":27.107142857142843,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":31,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":120,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":120,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.8600000000000136,-1.1785714285714448],[1.8600000000000136,28.285714285714278]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":null},{"x":51.46,"y":2.94642857142857,"rotation":0.0,"id":118,"width":1.2156862745098034,"height":28.285714285714267,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":28,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.4193795664340882,-1.178571428571729],[-1.4193795664340882,28.28571428571442]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":null},{"x":9.919999999999993,"y":1.3749999999999987,"rotation":0.0,"id":119,"width":1.239999999999999,"height":28.285714285714267,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":25,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.0393795664339223,0.3928571428572809],[2.0393795664339223,29.85714285714272]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":null},{"x":0.0,"y":1.7678571428571417,"rotation":0.0,"id":120,"width":62.0,"height":29.46428571428572,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":40.732142857143145,"rotation":0.0,"id":121,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":33,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container3

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":72.0,"y":154.96785409109907,"rotation":0.0,"id":122,"width":150.0,"height":54.732142857143145,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":51,"lockAspectRatio":false,"lockShape":false,"children":[{"x":44.0,"y":2.7321428571431454,"rotation":0.0,"id":123,"width":62.0,"height":33.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":39,"lockAspectRatio":false,"lockShape":false,"children":[{"x":29.139999999999997,"y":2.94642857142857,"rotation":0.0,"id":124,"width":3.719999999999998,"height":27.107142857142843,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":48,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":127,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":127,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.8600000000000136,-1.1785714285714448],[1.8600000000000136,28.285714285714278]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":null},{"x":51.46,"y":2.94642857142857,"rotation":0.0,"id":125,"width":1.2156862745098034,"height":28.285714285714267,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":45,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.4193795664340882,-1.178571428571729],[-1.4193795664340882,28.28571428571442]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":null},{"x":9.919999999999993,"y":1.3749999999999987,"rotation":0.0,"id":126,"width":1.239999999999999,"height":28.285714285714267,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.0393795664339223,0.3928571428572809],[2.0393795664339223,29.85714285714272]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":null},{"x":0.0,"y":1.7678571428571417,"rotation":0.0,"id":127,"width":62.0,"height":29.46428571428572,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":40.732142857143145,"rotation":0.0,"id":128,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":50,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":81.38636363636368,"y":79.1428540910994,"rotation":0.0,"id":129,"width":291.1363636363638,"height":156.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":51,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#929292","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":157.0,"y":124.19999694824219,"rotation":0.0,"id":130,"width":150.0,"height":27.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":52,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

isolated_nw

 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":15.0,"y":5.1999969482421875,"rotation":0.0,"id":134,"width":73.116,"height":102.32,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":56,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":53.0,"y":57.19999694824219,"rotation":0.0,"id":136,"width":119.0,"height":45.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":57,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":134,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[35.116,-0.8400000000000034],[89.0,-0.8400000000000034],[89.0,57.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":5.0,"y":116.19999694824219,"rotation":0.0,"id":142,"width":78.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":63,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":66}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#999999","strokeWidth":6}},"textStyles":{"global":{"bold":true}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":[],"lastSerialized":1445538566750},"embeddedResources":{"index":0,"resources":[]}}docker-1.10.3/docs/userguide/networking/images/bridge_network.png000066400000000000000000000370061267010174400251460ustar00rootroot00000000000000PNG  IHDR 1 IDATx^xV {o!F-CT,VjZZU[WժuQōRd d!xKs!{{>7ιIBT@S$CS)FJ P*@,)@pXS*@QW/NT P?++p(6nDmZȜ }U&%U{=NJ62cd/OfT P?(px<<7MS:3Tu_rYe'=<|pOfT P(Sp˾ur[e^n'"`qkeGqXL7F>DOR*@b@hFlmtr+ͯ{k=.r졲Zz#<+T Z۔kjǞhpn@xu\OJ' *h}LrVL@*8eomϭ5ᚂ 䥅%I{r Q߼ϊIʢ͞m?, !'OԳkM]8}|B~Z ϺS*c<\S{שԥ՞fv-Yx*70P:wó.gT Xa.+=uMVMXw+BuYUrͳl^ P^7ڷ<?L6L )-,\uCK&D@p *UX;qd\V3Sn>%[vzR@*@+0Vcb2S~wZ,ȫ_U0t bŻևCpx+ PX+ 8^n4k\o,'%7$./{ɳ|)òd< P(`pM%''ty䢣K4''-ן.=%OT cJJ.م]Ul"\m}.OSm;ݹvm+yƣ Cئ Jio0a{LD*@|mp\JO91U.;.Dc;C?z< P)@p!R*m#cףT G0O@ 8h9^ P+ 8ڴLm[JK)6} m,ڶtM{kewBq&-+T NpgvQ >[$'%IVJ9^$8fO+s&UZ{.F!8*H\ɵ''?.Қ9*EtIpgMJ/ջ;Gz#%y2kZ_);MNK|;8fN Y&}'[CPy.kt qɱdrv^R=18]zG z/ 8g#gS*p G z0 8g#gS*p G z0 8g#gS*p G z0p WNϐ߾Vvnv,: FLŷǯtf׺liߺq1H@"* 8&euO$廥Ι=:ںz9W_0l(%uI߮ml\Fsgdz\Gzvj3BCg_\e˖ T [CpTTTȲeT<(`A&~Mnݺرc%+++(vR*>{} ]ոݻTjjjv7.ad᧺Zv!;jQpL&ן4Pˮ?"YSۖR瀜7Mc΄rik_ZyW&fP( 8M~y|:+=tO#+y2ddrޓ̓U}Y͡Y֭K$aeX* OK_R*vw$%#QgڦmͧdK-  hqhC@(@pZCВQP 8:% Aph tCK&DBL< $5 -xHCkZ2M6iuҫW/y 0 8ВӃ+TWW˶mwimڴQ0Myf=ɓlX>&Ku'[^ϕq`uUgJ\aJ0@sXK½V|_SΝHIxrpt3=srZZ$'Iծ2KTU{U,.nhcIj^UM tdG>Eâk<`OJaI \G.mpku fx88z:;Xyna(ڐkEqqE!Dޛ 8(^^ ui͚5ʲXK#? GkbժUqaYXVȰaÔ5 HJKul2Yݗb ##,{5? 2j(C3 Ck Z2 x @4&N:?tMV[Pw/cHǕtm[%0!#ʍcoӐh׺tiҍ˨:somc}tU5--a>E020h`\ # &0_sgvN$ȆWk?M69)IڴLVڄ"v]տc Re߁:_Z{۱mKy~cWAp4/#sp1!~]+K$4#  3ӌ+5kF8\n A6*DvYJpfmV@<%80gWTTȊ+{F0k׮Jc@;^oTkc'? ܽ_zmhR(@ 7+;ѽ^j Mh/b]úf se˖ >A)Y\.?Ҡtv$F r[Ǟ={T4vX(G}wƅ^(^z?v'mD~_53>n8?)ȢZl٢㏷zj 9&a+7gjkf%XVc閒Geŵ28&_H"(`9s孷ޒO>V+8 k] / r[n'|H1E;nAJ;̀zcav{5xƺ`u0AZZ/^|׿ooVڵk w}\s5&*p `2>\ϟ/m۶믿^}ك-s=RYY)wyr-Hw_ި2ĵ;0 w9ԱlSO=%{ԕ!O?B0b!_X-F@L^^iYMcP|^'x-p֭[6n(&M5kȧ~&7|Sm(~jb?c0dONof)//Wp ʕW^&_B{1t6mZc{^yygB+BA׆ + 0;ޓr˪UVg-7o\鍊n}ZWgol)}Mчp"v, y3=\: 8֯_/'OVcg5B;8y+kݺu1)A/Q5Gx$NԃQ6N=TYѡCЀ0.b5){2h پ},uy&bb%mBW!նbU7N[XsW` 8?WPyjV’xGԿa+? 4ph*UOӃm 'HiiΝs뭷 ,&֤Ѻ xbpu@kß/ұ:ZOGxz[裏?p &Ł#ȈS`h0Y#> dʕB0`NmѢJ gff 如v#F(2x`u*>}%7c /$YW@g]bAkKPPڷoi?e˖ZUxJfToSU24~-9 h $aLL0;vhKcR7r*yK^X̡[/Av'^'q2HӞC<* d偅vІ;opG`q^uU*N.p{"7geXHq`AA$4ŵԵQʵ /bFy$p^xD (!u"ŲN!Ӱ{wx h[ymaae-}t!8tt/^F7#L:#.LLpbb{*Ďrߡ &*bi![:42{L؝<{DD&x$}sQAsHFvIya `{Ķ` ;Ly IVjcGܱ 5` y`GRo!V[ q},@X:u>,ZՍ,A#<Ły>.]{Tn#1E\qx3RavEpkGdFYYcN;&VB݀}$oܰL :R1IxС6:pA KٱcJ /=-~A7^`Y pC xNzSRR 턜a%p ^"85#tvB&n˜X5Ez*bX h aq [ zq>&lLd9vY k[Rxq]p n冬E[! =^Yv4aQYB 4(`X7uI1571{ /pAgX0p. 4Ah c߯\X&wnp(ԃ{CHG1NIAK  0:jz'#v=&k NGP`q`Q+ Hcǁ2ΜIDATu. AAƕ7@ ń@ ׂ8ץޟN󫯾ZՅ F{GZ{NZ.]~ F+iZ5>$u@MIB*X'X6)t=Q:Y$\Pd؈ ޜxmaE&9 Hp=U \rI=TEغ$`7dw|YHeJYԋ}"}O:͈f_68: t1_.T~1%Z 0ʙҴ8볙=abγ;:_u/~?*@p8Ù~=n*|Ghpo Xpَfz7]!o 3xU@쐮x꾰V@p͙( ApC7~dvT9a'骲],-Z$؎I!8sghqol;\Qx;Q!Â7aCbz%|IFq -t@["ug=Q@M]+( 3ÓaNt%Uv ƛSVVAaWDw G%g ?eç2`eDgò( L~֧fT-G4HC3:<>V6aZ`]L81|j0ЩJ<x}y#8χk C@b,T +`y G &x)pM2`a;xaT@_ פd<wώD-À*@Qhh>g #=mhaXr^ Rg/zVM]/~159*4L, *@bٷoadetILu.;k(`H3. c„ ͋J /T5 4&h蘳,оzzW'$S9/uMP3Y9N@<7։XBg=#ʃ>җ^zIH[~rRsl$׻tlVkvrϩ{Hl׵cTZ.u]:wh]׃gdikHͬkFؚ.Uq֬Y.'mɞ ʘ1mdn6xj,'̾=Lie??D]2wZ_['zֵv^upP3S lag x uYno:<tN@?ikmW>[%[&c'OWk>z 0k2y哕@̞##RLk|8[}҈Ž:ϿQ+ ~: N11mpGWl*}ZZHӧXM^|GHp-*>[%jɈ(wcpP3!s8[}͌#nuWxȸۆY Ԭfo7n78ZȬ)928;ju6UʬdX}#\ޘpԪ4gy;CAkz L^k7Ӹ2*:,z:UkqQ$}p),l)G컽m*>_,S*9M518̇,H :,H[Z)11~yRy{ZGALYF5m椡28o*w\q!2L 85 Tua?}ʖ0YX[xG3ϪZSM]ؐU5!j-*.lȪ:q ^WR.oȪ1q p{}yV5|AqP>ʺBՙgubHd<ЇimֵE 8aP<7Jrկ8ad;HݰB[F CdF:[{pKL!*V_3#uåMft{ÉzJq ZUn6WUr?ad:Ks7c^7W:9l<оffCpq˦N:ఫy(p[8z0uEeU$9n`cT.}N]sڸlr/R!.Εrܸlp{:532LHݘ[mvGpU.y:؃ f15ٴڼ2hiqާ9\>Y^dxIJcژlp{T95~U^w0O~i.Ip,(Ӊmx`<Щ eæ2<cɀ\!.mcIWJlzGg~橽% 8^_(4$=*ԁN*Rw]o#%2oYLN)cIQB-o`q,IwJt)$Sz)հ8531/(ނJx-VTV8V' k#6n"rb,G ]Ni[* kU&(KKeGL%sj|A 5`Nw$8*yfAq7V7dԪ!Xl.nP42K`E1iDwڴJ,St@IصfC2pP3qFkfH;{9ShÃӣ͜ /?j^eo/4vKt4( >:%:A+¥5]@hQRV/2<;]zulvJZ]Vo8rK׶jY@jd @:6| 8~%I^"i:tOF*U*HaҥgW&5RVU-zg{u\'ff#%Hͭn& ah%H3jӻV^%k7+W8*PdIOnj+R*Kp"g?~G:8p ^f Z%eM "i,]46jjUJvFUFo8T]:Jwjf:daGlC=e)!8*fU?o[L謁rl(,$_9GEUl(h8IwKPP$u2p@f핞rjf:da[G3nv.YzQjqTUEv/*QOvoqTVUK~qfFZts/ܤ?Uw5GVj"A'fCpuY# R kM着Z 1`@tdUkx怴~ՁF K8R5|/ 89fniKVpGEGUv)TΎQ-ś,~ҵ )*ڤP1s?K̭#Ií?CJ5y"%)a17v=,Kk^smZK򤶮V ͒ԔkB88tf * luFǘ#Z+iqԯ~Mo4YoY׬q [ ,bTBcZ:P3pD+=pakN57ߤQ v­T H#F{uYl+Y/Htx0>p$JEb[K.bgpܺfMa֡Gƥ[b5T Ly&Z/KCDŽd TfvT9:3*KS8P,]!#-O(_~zܼyx f7o^+o{쾉IoHA TݨcC=$ӧOY`TTTȣ>*_|]1ZJs}I5ꫯy'gϖÇ+fslR=\PZ_W.x5l۷oudҤIحU,;&j$8\_T.H%ZU~0GjB=z}vL8Q>h դ ΩSo\/#]3rHyդs͛'} 62gyꩧ .L~_Ȗ-[o߾w'xyߪtgt9ꨣŃ6va F^EiӦ5Ah=j(5,/Jah6x`IIIQmСKY|}J:}x~fdSE;vJpt4\F:\IY5 pv '(koƛ=b0cC8@L=\m/:vٲe{nP&+ +Vh` z Ў~h x 5k̟?_UVj}lȧ YXHaV7 _LJJr]7 QOpiqt:kf7&wa`BGvST,.,L(نkʰ8~@,dq* >a<|8*#cBc;O?][ކh#43FdU l< q9l_ XXh}/TZt8&H+ɣ5 ᡥVvܩ]7P+ ip,}bV.j`lqEEEūo gY .Gc0Y9N&zrp < h+/b^% &X갈=h8?sVG4mOT `@,Rcenz|묞 P mjR<6k4?% >VVT DV@e3ڙT&80u]VS"W^Qh/Aw}x `8ŊU!8N MAj$슒 (@#(c,+4V3f̠žᥨ@,4lT1.*C##ʣEga DU;1R 4Ip`ln~LfeT  ~heyՎ4$HB='ajR 2 #xY*w 0˞#flHG4Ux`*KS ,ZH}CpO-E= 4\κ><VM\Vkʰ4 pypةjlQS#ڻb괍P*x]+ 8|6uy6+u"CB2۬0T4›PZ>:ΝknxA g$x"1*P?ksyxN6%V , >,b Īz< W.)êa\1Hiq|zk1`s Fh!yw `a׆̩HIp|;>Bo IKK@ Pw 8!\&N8 t*b555[ o9 >׆8ġK\qqESy YApl#K+7~Pu)#p7v\On/jSNU. # @( L|Td=n捃e^;T&Op8>:W ⣎aSN/xq8ᧅ+-RF/bcUXAF5!~7 Rd&¢@v"LRG,G^ Ơ)$Fa" ]o %Ʀl {;kF<0>dsO+VP*@bCPG?T P(@pP*@? 8lT 0 *6 P*[AG` T CVP*@bCPG?T P(@pP*@? 8lT 0 *6 P*[AG` T CVP*@bCPG?T P(cS \IENDB`docker-1.10.3/docs/userguide/networking/images/bridge_network.svg000066400000000000000000000565511267010174400251670ustar00rootroot00000000000000container2container3container1isolated_nwDockerHostdocker-1.10.3/docs/userguide/networking/images/engine_on_net.gliffy000066400000000000000000000347411267010174400254470ustar00rootroot00000000000000{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#ffffff","width":277,"height":209,"nodeIndex":174,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":3,"y":3.1889969482422202},"max":{"x":277,"y":208.1999969482422}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":223.0,"y":117.3854006442422,"rotation":0.0,"id":171,"width":26.70555282692303,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.svg","order":21,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Svg","Svg":{"embeddedResourceId":0,"strokeWidth":2.0,"strokeColor":"#000000","dropShadow":true,"shadowX":5.0,"shadowY":5.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":1.0,"y":93.51999694824218,"rotation":0.0,"id":152,"width":78.0,"height":77.68,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":4,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":63.68000000000001,"rotation":0.0,"id":142,"width":78.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":null},{"x":23.0,"y":0.0,"rotation":0.0,"id":134,"width":42.8749022673964,"height":60.0,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":1,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":96.0,"y":130.51999694824218,"rotation":0.0,"id":153,"width":78.0,"height":77.68,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":7,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":63.68000000000001,"rotation":0.0,"id":154,"width":78.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":null},{"x":23.0,"y":0.0,"rotation":0.0,"id":155,"width":42.8749022673964,"height":60.0,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":6,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":197.0,"y":99.35999694824216,"rotation":0.0,"id":156,"width":78.0,"height":77.68,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":12,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":63.68000000000001,"rotation":0.0,"id":157,"width":78.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":14,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":null},{"x":23.0,"y":0.0,"rotation":0.0,"id":158,"width":42.8749022673964,"height":60.0,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":11,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":114.0,"y":3.1889969482422202,"rotation":0.0,"id":160,"width":48.773475410240856,"height":39.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.relational_database","order":15,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.relational_database","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#02709F","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":163,"width":88.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Key-value store

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":171.0,"y":25.199996948242188,"rotation":0.0,"id":165,"width":72.0,"height":73.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":18,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":160,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":158,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-32.613262294879576,16.989000000000033],[70.4374511336982,74.15999999999997]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":141.0,"y":37.19999694824219,"rotation":0.0,"id":168,"width":4.0,"height":91.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":19,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":160,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":155,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-2.6132622948795756,4.989000000000033],[-0.5625488663017961,93.32]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":136.0,"y":42.19999694824219,"rotation":0.0,"id":169,"width":86.0,"height":50.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":20,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":160,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":134,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.3867377051204244,-0.010999999999967258],[-90.5625488663018,51.31999999999999]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":122.0,"y":150.3854006442422,"rotation":0.0,"id":172,"width":26.70555282692303,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.svg","order":22,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Svg","Svg":{"embeddedResourceId":0,"strokeWidth":2.0,"strokeColor":"#000000","dropShadow":true,"shadowX":5.0,"shadowY":5.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":27.0,"y":113.3854006442422,"rotation":0.0,"id":173,"width":26.70555282692303,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.svg","order":23,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Svg","Svg":{"embeddedResourceId":0,"strokeWidth":2.0,"strokeColor":"#000000","dropShadow":true,"shadowX":5.0,"shadowY":5.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":24}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#999999","strokeWidth":2,"dashStyle":"1.0,1.0"}},"textStyles":{"global":{"bold":true}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.custom.confluence.c20f4a380e3cee362007f9e62694d34d947f28ed4263c0702b3dd72d9801532a"],"lastSerialized":1445555725710},"embeddedResources":{"index":1,"resources":[{"id":0,"mimeType":"image/svg+xml","data":"\n\n \n logo copy\n Created with Sketch.\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n","width":59.29392246992643,"height":42.185403696,"x":0.4429050300735753,"y":0.7077644040000006}]}}docker-1.10.3/docs/userguide/networking/images/engine_on_net.png000066400000000000000000000333201267010174400247430ustar00rootroot00000000000000PNG  IHDR)Ę\ IDATx^} x}OHH!BJ `EťhU~?{Dj[*v " " D,!a !!{BBBB8Y&3o}`sι) (^BR.* (d!"p n} !){f}U֠յP^Ss/?jV֠_G]}7@ RC@ 2sreylNj+QZ]X>2Jq yPuSORd+"}"C0..MF$E"קSKcE@H?V|'K*v1r+ve"=%f-3'ѩHCHKKI| .-8RPn3~?n r#n *Ɠ`}IM?Izxe]Tn~[vcӡ\#rx\ѿ##}1BRn,<ˡ?>$d>rI?~wt=vv3Ӑq MPh-2 IfѫbR\+Q9,N{a3\QX.0&!)BgcX<Sّ<7:a+SpT(T~4a ĦDHbmIRR s0~s gxEb/]xH!),DMAHA:۶HcA1<>JL{HT1C`.`$D(>-G*\c!s!$eꑔƒ1!0+_1VV3ޏI[% FtHJzjKUT*REv"Dk5(yf!)I5]NJLJVuQ!S &ca lM-Jjpx'1M!)3}C!)$ I\s BR)C2Tv3L`m WHB)  (S22tenI` I@2Ѥ̼,]hR&XBR&LA4)3!)3KW& ր (SMk@Hҕ&e57۳+u}?8 #"FOAHvIrKهNL텷~1QJڛ!)tyzg=V3b_j2X! $<$oי={z(k" $e2U` !繫MJŽcVR2`j͇O_|u ]>ԘpGGjlDGIleqU ヌ|yZRzi克zD`Tb nj +@HʃMJVaGV>Կg0P@m; YJ=̋6% gqALIQ7/:OCUmJΫk!Y44x e~ሎG|D0|T>Px!<a~I `/셤ihh@MM o>|7Dž pUWO>SKvI>OFii) 3g`۶mFxx8J!++ ݺuC=MCP [iNԖAg_Җ灁CAA>#DEEkҬs+`DP&ɨ999HHH@DDvޭÇ+uyER$-Ç&i[_~ ;GSb?C߿?ϟFBx'_}2zQ{ڻw=]r%:t({W–-[z4hWBsǎ$4YGf wOS1"at*$Ų7oI.\UVaHIIqH"'ODyy9 bddd 99YiSTgEB?5*ɲ=|W?O:L޽{K@A\"T e];SA_~%H?? [ovخjl"kԩSlF.[<RM7݄_򗨨P={7|zGٳI-^ӦMC4Yf)rX:E2K.}ݧLO>DSNUqxs*SO= `ƍ}HHnV==#Hg?hR GG»+yf5~4I4c;w?45:ΝM㣴F1ݑiH'1Gŧ~\LLK5wB@H_T/55?(Xb%QM2Ei,=  HB$1cƀ&cUmPK#4rH .eeeXlr&u-(5h<ӊ+rvCsٿ Q`Y+mYql| W3V^xԘ*++I eNvܩ^HУFRbYh-_\ Ϳ/Bi?g+7i$*dF_$d&Qd$-))Q Z59۲*Udd>3E`f̘sQ&% clI+VP$ESnӦMMvv"E襪 $łB"HH"k֬QDn_K/T}ϗ!?C-Nvj#FP1MԌXh21$y>:~"2/C@Kiv=(ZqOƎ۴+IVGe$:#釢Zԃ>Xnnݪ|lIbIR4H$?  .HMb>X{ wH6KK72@9?߆/*͵T={I,e#ټۊ ƍ$Iܹg4!(JsOR!! O}SaQۄ*EӒDH-ZH~E>4WI4%y$=eIR>@ٖN:)C@HeJ3hM c|ʤE'7 IvjH4!qo|H\$:BsEui||M嬦D¢yF$C2⋕l`tb/"$Y C#reQ?.ΝskIRZ&MYK$%/$ `-_^Ԍ:cF:IBF u4:ن^cd.fU'uP NVr %$بU1R̋ɖ—[t`s{Z "GI! ػ [G swH^|[F}P?|_Gt?"IJبitoƹJ\\\d[G/+HR$'9PEǃٚZɹrz/gd8<8pf1ʼnvL}Rzx;DC48\! $$ДĿypj;+WIu_E'?IZ*Bh* ]pU؄,7n3c}lj$44]'7L=L*3w]7~5BRZZ80_#*wrGHC9Ab>b-{/!);uE ~Q&)N.cD|LGip询e.XnӥE#,47*,Y)u=nW7żR t9HSpy7Hq]:)rsn?7p;vmA?ir.B&H?ǯ]ϴ+XĹͅ:wb6:gi 1rftwrҠ|O4aFHBbP+d6 r;i+$e\4xy߉#Pp%$-wZ)CLM$d %Al++i>[^i[nTGs~̛ΠPrs,f1vy IYKDw[Q{,>Dm[ $SÐ=N/I[TT. ߉9xaIړL0O#HRuT1wlx7%RHh铠ς~':`j.ZO1O2f!AsgԪb(5h1K:jg(xf/HPbvTGeO#㫨iqEj[HP[Tfjϫͼ➮{OJŤfE͉ LL:uҶI\FAۿTq0nWC ϙ.FǶ$s̜*a6  u$G$x|G?d nFh P~I7o!QCS *c%)0$&:A}v]ujߞB2nQXJ#Qx€5]@ *(":xΎZ5\))m#@m.#9K1]@_&Ltnx IqQ'GI4B5LƊIiMbsERLëɸ⯲}ŘE:;qQIuO:,kѝm6J/U%aq1>@:fAkkR0Ȏ}^(Ϳ€E r|CFYU/5b5HJ25'qDŽ~'OHba[K+e*x)A@Ö?25Vj[4#) ~٣R/xA"E NM9b?{J_*cdz#̔r1*Eٹ:*nw44ԣ48sQ!I1A((?V7 EhvV>?dA8k'SE.2Tqe-~tWW="}L]Ǝ8RֲBL?5u:,P[Oed^%FSK 㤯iJ+YnݪLzFPz|{VTVA!_ OarCmcIp1`2VYÝA;i奦ޥ$U~(KB֖qX,tG+mPԖEL+$2Z!6ԪGwyf̘1n{IQ +㕭'qcu#z`Tr7EP85@>_I@|R9UaSFݦ*3`v:I+byacT.녾1+jLom TiSFaS3!+HT$&TQQ.9QSIwʱlUBq_21->I1@V -NW.:{NVp^z^e'1!:;3KqĘꏫD#"ȷWN#)޽4V^obڅ[}) j||}C!!%ש6N ڷ6р0TT7^>=ǀ!x8{:g]BRye5,|Uj4V&&" k5>1:HiP $+M>0}$[7n77qA,:g)ƅO":ߥ6JRܱcx[_FC29vp^q}Z ICm;vcGYbݧm1idą*WHYcABA+ewvm$9*k. 89Q>^ GEuV_iEDa÷&><Ƚ~zu,fƌ !Nd)0Z!h*)T?>;>8=?8j!~x}45Kr qlc]JR}j*#>(M6::kgHG׶A&)2"1]hR'y,;KRv!΋-פ % 1*a#JΒm$ OXZ:Â|:\-aIDATF)nh2$z YoTz nD:LVHH/փ:%IM9-.I[fcgYmDFzg.n Ԥ.pUծ2LܒfxFRSgo5ySBRx9k hu!=9u*I1.u)*|0a@>gD ƥF p1^t!)t0s&''ۯsIGԶE|3Ce)lY7+M*22ݺ?SS{v,wjR )hsBR{jT _ғhmSIJۍӆJ צ3'C &"AV2hi̽}OSS{$%>ZҤ挌ÂԈ6[2Lꋧ=jxu$$evql <eѸgO-IJ|ѩk!$12 SzX7j I<-Il>Z$ӈʨi=~+BRz:KJIÎARf:(,c|:ymIozҪZénih@]m .Ԟ|e餘 WÍFXp=$)Z'uolD\ܓ`lj$讽y5ЅV< u +k#UW'9ءs}Ą *+knPZT>$uC|?ӥuVIFX/Hd>ވ S i玎k:*YCn}VY/2* ]tYr7GqHH&~;3C[h[5N!)Dh0YSMQ{У'Ƶ#blq[vNԖEo0X($偷l~eIlwFq݈MTniqݨAŧt"x IyI+byacT"Ve7&XxeUuG* ؔQVH ~a1n(G[?wH]r,8eUFֶ{u:UC k; >Ԡr!EH54C}5?^^^M>ZkqoYOH :.Gg$2t c͗8wmSzMLRp˷4ܞԠgj$ELG%7+0/-y461iY: )##pY]\LJbۡfqwp~U!W 3w&B] TIǍYjh&&"Jђ4wM$Ҿ8SK/GCV9ՏK뎸S=ra ʋz?_lw04v*sǎo}yg.%9GhdקP9;j$%VSHؐk[-Yux|(ߠcUkpTTlm7"5:ĩ=!$eRǹ^Ȏ^ D>GkN! ?oL ^zx(85;#vtT/̟h.JH#$eLBRt  +iI؝UL}!*=p~oeXQt0LEH)$eLBR(gg@JOclڹb@f=*: { #I]8tPR[˷E{W\'/5Woǰf+gY=x`=E*[<л3DwhLT̖Nnd{)\,z{y t(uADM&N;~qFnG=ɱNŴp=i nLMʲo}H5ugk|mn~(,2z3Y;9z$EM=4'}z\OKlijs{]k KSy\8\uҞ0.=b$󊻇+(U.F&w?#)}Q( γXc~%=Qqw{V]vR\\Y:I@={gIRt[' Ê9v|P]oGm )7 @x[BR WnGRLKhs >24F%;WS/mXtmUg6JH}ek#cef6 VOUiӥER'`ol?[ƫ:Z=,Ŭ+iV2ƍ uݭ*$$C"@R*q#_I̫듲ݧ[;`ɯfb!ֲ&¦?^eD!>qhO 5ew*N'#G`޼yزe z0x`s=6ᗗ}a67[HjLJ-%wэP+Sԅڕ]i= XzN]Ӥx? yS]!+jx;/^5}|ew I͞={iFR$4̟?&VX,\ЦfkԒ@?3*qSv€(|(. M1VTͧKH͛UVСC`TWWǓO>0\3gg˖-SuX^~eyeee!%tY|<$mF;PYS[!)푝:2331f,^HVK,#<46l؀u!''SL~@̝;WԚF]vaذa}QQ-ZƜ\AR4X&BR;.!)j=vjjjP__+JKەFE_WԽޫիWcԨQ@LL |}}f*c>) m7N?5 ^l %0ZxwIEP[I̽ ÇݻvZشi~i9stR7BRoIW'6궓X0) ܽY֮zGiD2 4h.!):V8&EmƍHMm|yHR?ZJ=q';mݦ$URR_Iɽv~O;FӦMk"):{9Eb^ A ӈ(F4}?SA!)='InwXu II)N]9oIfԮfv%(-fr1WxH ܴIYR#InukE_Y*橀/\hLfiFyZ^ԧE?ޣ7yH ƦF+g*; PXq*xKEH* 89]԰''ꎜvrB1 ;xiw)Qi玎k:[qaX$%nImfl)'OJz3d[ ۄC\vۗ{ RH0d]@owOvEꒅ錇 I9?Ú8)OM%{rItjP-$թe㺍-wF$F鳺kʧ}8ʪ+F3=]Fu-Ʀ6 |}Qۣ5͚=IM*wIE M۞I=.0'NKHʉ`;QBRD[HnT7 z+k{{y<[mq~㥍 ŗ֚{? '&v!RhWG@H%d,Bd-NKfo,6</|pjyL?^u;l#$MrS Cz3Do*# $e$41$TT2-LD\zu 3 9yAe+v$orcretblFR QAX|c*ň zdS!)rw("&kmu,{3^!-|V{݃ +|BR&]b}R\YX u)lϢIT ):H7RWhB@Hʤ"s1Ҟ&%E!)[sv/;4Jj^MU 6A@H3p[jmms{> ֛6 TyBR!f Id䂀G $bI NF.xBR!f I ܚ w //ɓmi.mwmBcŊp .4:B@HʃI͛UVСC`TWWǓO>0\3gDCC-[갼˸{1o<BT <#33cƌŋHdd<#<6l؀u!''SL~@̝;[lQZѣk. 6L/**¢Eט] `/셤CCm +o]iT$J{zj5 /֬Y\EtR{# $eoD]?=s﮻ñ{n]/"6mڤ4~Z}>g,]x7\HʅCrc8OOOOSsjViiiJqF6OVR~)jOɻ;qm)b"I7:$/" $ y*'5uTe>3(,,ĤIT^^CD>}:MDRt?sĤD@Hʞhx_I)N]9oIfԮ@K.QZXK-!BRn "5'gY*p1g!)Tf$ !)SS&#!)Tf$ !)SS&#!)Tf$ !)SS&#!)Tf$ !)SS&#!)Tf$ !)SS&#!)Tf$ !)SS&#!)Tf$ !)SS&#!)Tf$ !)SS&#!)Tf$ !)SS&#!)Tf$ !)SS&#!)Tf$ !)SS&#I<؞*@`IENDB`docker-1.10.3/docs/userguide/networking/images/engine_on_net.svg000066400000000000000000001070051267010174400247600ustar00rootroot00000000000000HostHostHostKey-valuestoredocker-1.10.3/docs/userguide/networking/images/key_value.gliffy000066400000000000000000000221351267010174400246160ustar00rootroot00000000000000{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#ffffff","width":277,"height":209,"nodeIndex":171,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":3,"y":3.1889969482422202},"max":{"x":277,"y":208.1999969482422}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":1.0,"y":93.51999694824218,"rotation":0.0,"id":152,"width":78.0,"height":77.68,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":4,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":63.68000000000001,"rotation":0.0,"id":142,"width":78.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":null},{"x":23.0,"y":0.0,"rotation":0.0,"id":134,"width":42.8749022673964,"height":60.0,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":1,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":96.0,"y":130.51999694824218,"rotation":0.0,"id":153,"width":78.0,"height":77.68,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":5,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":63.68000000000001,"rotation":0.0,"id":154,"width":78.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":null},{"x":23.0,"y":0.0,"rotation":0.0,"id":155,"width":42.8749022673964,"height":60.0,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":7,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":197.0,"y":99.35999694824216,"rotation":0.0,"id":156,"width":78.0,"height":77.68,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":10,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":63.68000000000001,"rotation":0.0,"id":157,"width":78.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":14,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":null},{"x":23.0,"y":0.0,"rotation":0.0,"id":158,"width":42.8749022673964,"height":60.0,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":12,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":114.0,"y":3.1889969482422202,"rotation":0.0,"id":160,"width":48.773475410240856,"height":39.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.relational_database","order":16,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.relational_database","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#02709F","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":163,"width":88.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Key-value store

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":171.0,"y":25.199996948242188,"rotation":0.0,"id":165,"width":72.0,"height":73.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":17,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":160,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":158,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-32.613262294879576,16.989000000000033],[70.4374511336982,74.15999999999997]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":141.0,"y":37.19999694824219,"rotation":0.0,"id":168,"width":4.0,"height":91.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":20,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":160,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":155,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-2.6132622948795756,4.989000000000033],[-0.5625488663017961,93.32]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":136.0,"y":42.19999694824219,"rotation":0.0,"id":169,"width":86.0,"height":50.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":21,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":160,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":134,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.3867377051204244,-0.010999999999967258],[-90.5625488663018,51.31999999999999]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":22}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#999999","strokeWidth":2,"dashStyle":"1.0,1.0"}},"textStyles":{"global":{"bold":true}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":[],"lastSerialized":1445552948967},"embeddedResources":{"index":0,"resources":[]}}docker-1.10.3/docs/userguide/networking/images/key_value.png000066400000000000000000000311421267010174400241200ustar00rootroot00000000000000PNG  IHDR)Ę\ IDATx^] tUݙ'2B<$Y@Ep@ " VmZATڪBVRAAi@@QIk| $y Hs| E"#$ePHA@! $%& Mhlj|||l^/D@Hʑhzؽ˫QTU! 9eU+Fae r˪P5?"8=%,!O "<8!Bf6\!)#[m;YTZ|UbdW*b|}Y%FgtKztA _*{/BR&}Fa9NW`; |~sQS^]q>Q.{<IÎPN16[%C)c$F ,F!)cGw ʰp^zGJuwE#baXW;a8S{e4RM8HR3܌`n._> 桽؁X7˾:o"$eXkaI[t&\lD;: xiA-V(/$e۞ORRTp "K@E]=/Hw5a%`Hb4 sMʝ'T˙H!) D'uAHI򶭑ur2dŭZ2 pǭZH%ѝpi*|ώa,7I9漇 j5"ccX*;28jO)Fw.R5455~>bÂѥS0b;6+CZA9-Ǝ<Զ  $$`]y[=$ZH:$,n5"aA @h?}}& klDm}jPTQj㼴2pO{ș lQ2BRF1 $eLj.bC@H ZsL`DBI`pI!)3!)3[W& ƀ (]Oc@H֕'e1 $e#Jē22uoI` I<l]xR&|?64:}zdA &1ur˳;qɉxc[I}" $efVĚ}գbxw9)ƣ Ilg?{A=;󜽗o1}r?92]O;x/o=˞F":gT2\yzFy! $e089|,=FG$F}Y$E}]Iޅ58WOfbluBqUKz+ Q%ϖ!)fO *kY|w{Z.d Rm0҄@_|Z,, !HU^idΈQ[KE@H^LR/LPШa*BU]=*kq<uU@ヒptDbd(BT@Njt jNRG! $($Mvj`F\{ٳ'F@H{I>YYY())A/$N:'? BCC($$$^AII9 XO-IO9{7tYvv6jjjH)//9bbbp+"Iiv IyFQAA233ѣGDFFb׮]Ð!CV!^~~>9<)şWy[RG" $H4ׯksUI>,x |ݻw]#zJSȑ#1tPc޽J,; 8EHT$\\r%waETǏAp3BRn6'I/ٳQ[;v@nTH/x Su]oͤf<-zG/~ߣRn?Oq=Lݷ~zN||"3f(Z`&O07߬<.uԩd򗿨bƍ*8q"?/g1i$V˗/ݻ5$KSma{Oz&z'^z)|MvJ1'BRn+_DE$SNH?駟*!նmo+OgQ_ŋcܸq-z?(Rg$?x衇~zE$;SyO?x ~G@l޼Y{?M1e6>c*\?sLo$@^#Ɋ$-$i$+-goM6);qbcclMy3r:W^Q^=_|Q _L$nAy,K,Aqq"IvW!Xo>"^yxte˖)ќސ'Eկ~<.]`…Qݐ$_/P,ԼE1ܹ򞪪s([½UV_>cզ z0}$㢷gO0A-)|I٦$={;o I5jF C+V.S_<^Y$/?^yA(ӋB XB&˽ITsiAR?U(Jbڵ"TzEo+Q,9kHoGQؔ)S@# YBJs'+W*b(>} ==]=Xfs㝀@sK.$OUo~իW+b"0 /EKА?C/=ÇK |210gY'OTDF}.]5Z{R$)u]yNgA]yͳEEEʫf{$0s3^>@L[nUŃ$1$)vH~$cǪ#""TH͍&I1BRnB!p _;C_Ws _TkɊB=zTI$  :￯ફRB9 ag !_~eRYzbéw)x|x?jcs3 /Z$$3)BfJaJehG/^$e(RܧvPb>lS8-yRlەğ TKIj!1WH\$:pr /B , w}WL$,""6<_b6 C0uѣL˳ *$QbHH=ʾ yI Y2l!˫HRRdM s]RKə?YPkш{<^>8[ЈS{bHrbB"Г($%zl,ةѻI$ N(R3bg8kY@ѓ, CCLPs&͉,᮳ 7BRƱE-nY/&AR,faNjKҤg3MAم#CZ e@bIJ?g[X2=TkH /,n@)vgN *E")HQNF|`a2)5/%=%qꝹEԡisFwrҳ&EpuLnΐLz'$z/x"_BDF&eau?EmμQfҥ; *."^ǘJz;ԝXx˳hizMN8sI1\B=3vFg\Igzu-zDJq}n])#iA߳~I9iTXB$<1`h'jR@UL`gp#N[!)=\cSÐ._O[f7=l/CUzl37Σmp'=dĴNRHh-XK0 N)-`7~Lo#cckH 鸮_;BgM)zea.7I=FJK,iGU$=ĸE41#^e15Iqpd\4|' 2~ԼhR N/TcƌQvQ%)p+6%s2 MJ?Č35!ywˠ'E;#)~;1,(5Xk>IhyhR"w%UQl)@T$Ű-2l w׌AIѤͺI)##CÇ)eLNRJ1S{iत$fK8IuC;XF{\Jzg%B=ޓbMRp+<2cˢ0/?RGrw S\E>S<$kǑrʜ;1kb_jZΝEl `ZFiZ-_ 8QSw?ULqǼ8aJLZ)C<њ y2عB*TqeJYThll@I)f!:b"4 VS}qQN䧧]>c g~zPܿI'$$C"fƓ*ӱpA# ~Mٺ0 3XG&AEM=&Uכ.V]"bK"o\JOfko,n'FUӑvR .Yc{`Yj*U7."N/ONMJrwݬWQd$uCMjI542]pqX$V誛<(zm}ℤAP4)]ˡ[A@b!rH*˷†y:uxPZ-#785HPԯd#(!)0 I&x*\cɠNj{ٻw?5Gf`ɖ5Oʀ_Q4ch}kU[y,WLKˇn=#xyc/^IRcSb^-Q*UPVys7ob_UAd})?[!vѤZ]] U7Ӳ"uTx֊WԭvElD9n{}}^mZ|cM'\ ׊&$T6'&7LF歙wX5S}Dg"P\?Z7$u% oR$چ$)i->=?#[H:/NZv=cB[ZΚS8no.|C!) I3_G4c,a\<)cZIuBRa($%{FhR`j"!)0t IQ8ohjҴwԴ (kI=3?=is'&ŪDoIePkwcҚnN!-϶u/3=}p I3幫%ԜWq5,$%>܄D4b,{msa )ǁkEBR1c !)!)M#G4)M0iœ҇#&:w4D&^xpӅU-f(=#^+xZV!)!)MohR`LRvFC^۹X LF½b''dv-HZQ T-!}'%Y  @llzro4I Ii9كlٯ_?M"!!)!)Mc̒KIDAT*O*::QQQEBRBRBR5).Gc$Sl-.pO?f!$%$ipv/-- +#k3gYuJzZ%L:2K7?~ј5f`_+ך@<::KONP!)cecqYWҿzu 5n`xqm:iypzP[Ma|K<)!)ZeX1o/ RO"y=M feoN[Hʋfj! 2˴VWqy"Ćb٦ T-:XyP!`~s]!)m0 IiIUnȜVw{2Jtcnaf퉰 ?A76j'Oa'IiRHJN S*pgq,܊u. jݾ\|C~њha+c4公⩳. 87WgwPpl;Rx{r+@-V{Qg,^;꣞mZjPyX#Kw^]G{r*I%}"q4&;)rޙyU((>=ѳ.!)~9?b^;McZ_`b{CWg۸3vL/xoG6jtߒyL^꣡$Fcڈx{wNd"Ihr Ikҹ9T4h:ƻgLM+}jtk Xn#Dj3.N~;>ܝ#<ǿvd'̒}9hO*$ţ3?ޓ?5 >tUS3PZýG%bθ^!hZBR3}6q IQ(Lm]-J5/ i[j/9L44S25쳉KHʨBa|DxYRB}jO Vʛy:e})?[ًFMktƩۇ{lrQRe;o^$5d$#(BFE6IT m/9^tRH -6lw8㎩;IFOh?JьFpZ>_Bplu$5d=}܇v;{"1X8=E-5{2-6Cc]l;ɖ:=:c7Q;r+<+uun߻8@y#*T_hD#&!){Psn#S8iohB'llgm6rUշ:a&7$#r!)3/2ݬIjZu@*0:amy܄r-)>.D܅+$dQlRU׈?ޜElP(/cyc =Q\̍"=$$eψ:BR3 S#Ӱ<9 Ch=Mp[Աc0k,lٲ1h vc„ v7[%!)YkՆvW}@sCf̘{ )׈#0w\[r%0o<ꛭ,q Iw}ؼy3|H?lȐ!x衇P]]>,[oaڴihjj²e5,Ґ~b y{`?+ ,h&)ŋcΜ9`駟bڵ 7܀> 9s 5=;wСCU͟?`8﫟](lȑ ;KV I9Og͖!_C)00PYJЖ{74[piF:kp'qd=QY<$89:=)M+q;#y$\xeYu=J5DӞtzAn{z (ڏ7Xk7޴ d1`em{IcYw+tC fd[<) LWI 8''DŽ .uvb ʹsڅʕ+͛gW}$}݇͛7Ç ~,ِ!CC?<}Y㭷´iԄe˖kX^{5HKKCRRي+0k,/BT \Aqq\qX`A3I/^<Ƨ~k"337p>3c̙زeF;wbС~AAz)tznOt)rpz;555hllDXX/_[yT$Aʛzj*5 eee?V^,EtRG# $hD |?[޽ދaÆa׮]P=Yf ^y*l…z+.]nݺws!)߃&$tpg5Wg5bI1|۰aI;Ct)zOɛ={6.EL$"IENDB`docker-1.10.3/docs/userguide/networking/images/key_value.svg000066400000000000000000000632271267010174400241440ustar00rootroot00000000000000HostHostHostKey-valuestoredocker-1.10.3/docs/userguide/networking/images/network_access.gliffy000066400000000000000000000666641267010174400256630ustar00rootroot00000000000000{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#ffffff","width":437,"height":368,"nodeIndex":178,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":5,"y":1.1999969482421875},"max":{"x":437,"y":367.5199969482422}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":126.38636363636371,"y":74.1428540910994,"rotation":0.0,"id":129,"width":291.1363636363638,"height":149.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#929292","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":199.0,"y":150.96785409109907,"rotation":0.0,"id":114,"width":150.0,"height":54.732142857143145,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":17,"lockAspectRatio":false,"lockShape":false,"children":[{"x":44.0,"y":2.7321428571431454,"rotation":0.0,"id":95,"width":62.0,"height":33.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":5,"lockAspectRatio":false,"lockShape":false,"children":[{"x":29.139999999999997,"y":2.94642857142857,"rotation":0.0,"id":96,"width":3.719999999999998,"height":27.107142857142843,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":14,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":99,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":99,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.8600000000000136,-1.1785714285714448],[1.8600000000000136,28.285714285714278]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":51.46,"y":2.94642857142857,"rotation":0.0,"id":97,"width":1.2156862745098034,"height":28.285714285714267,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.4193795664340882,-1.178571428571729],[-1.4193795664340882,28.28571428571442]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":9.919999999999993,"y":1.3749999999999987,"rotation":0.0,"id":98,"width":1.239999999999999,"height":28.285714285714267,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":8,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.0393795664339223,0.3928571428572809],[2.0393795664339223,29.85714285714272]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":0.0,"y":1.7678571428571417,"rotation":0.0,"id":99,"width":62.0,"height":29.46428571428572,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":40.732142857143145,"rotation":0.0,"id":112,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":16,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":209.0,"y":284.96785409109907,"rotation":0.0,"id":115,"width":150.0,"height":54.732142857143145,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":34,"lockAspectRatio":false,"lockShape":false,"children":[{"x":44.0,"y":2.7321428571431454,"rotation":0.0,"id":116,"width":62.0,"height":33.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":22,"lockAspectRatio":false,"lockShape":false,"children":[{"x":29.139999999999997,"y":2.94642857142857,"rotation":0.0,"id":117,"width":3.719999999999998,"height":27.107142857142843,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":31,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":120,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":120,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.8600000000000136,-1.178571428571388],[1.8600000000000136,28.285714285714334]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":51.46,"y":2.94642857142857,"rotation":0.0,"id":118,"width":1.2156862745098034,"height":28.285714285714267,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":28,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.4193795664340882,-1.178571428571729],[-1.4193795664340882,28.28571428571442]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":9.919999999999993,"y":1.3749999999999987,"rotation":0.0,"id":119,"width":1.239999999999999,"height":28.285714285714267,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":25,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.0393795664339223,0.3928571428572809],[2.0393795664339223,29.85714285714272]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":0.0,"y":1.7678571428571417,"rotation":0.0,"id":120,"width":62.0,"height":29.46428571428572,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":40.732142857143145,"rotation":0.0,"id":121,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":33,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

external_container

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":122.0,"y":150.96785409109907,"rotation":0.0,"id":122,"width":150.0,"height":54.732142857143145,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":51,"lockAspectRatio":false,"lockShape":false,"children":[{"x":44.0,"y":2.7321428571431454,"rotation":0.0,"id":123,"width":62.0,"height":33.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":39,"lockAspectRatio":false,"lockShape":false,"children":[{"x":29.139999999999997,"y":2.94642857142857,"rotation":0.0,"id":124,"width":3.719999999999998,"height":27.107142857142843,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":48,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":127,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":127,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.8600000000000136,-1.1785714285714448],[1.8600000000000136,28.285714285714278]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":51.46,"y":2.94642857142857,"rotation":0.0,"id":125,"width":1.2156862745098034,"height":28.285714285714267,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":45,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.4193795664340882,-1.178571428571729],[-1.4193795664340882,28.28571428571442]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":9.919999999999993,"y":1.3749999999999987,"rotation":0.0,"id":126,"width":1.239999999999999,"height":28.285714285714267,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.0393795664339223,0.3928571428572809],[2.0393795664339223,29.85714285714272]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":0.0,"y":1.7678571428571417,"rotation":0.0,"id":127,"width":62.0,"height":29.46428571428572,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":40.732142857143145,"rotation":0.0,"id":128,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":50,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":192.0,"y":120.19999694824219,"rotation":0.0,"id":130,"width":150.0,"height":27.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":52,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

isolated_nw

 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":65.0,"y":1.1999969482421875,"rotation":0.0,"id":134,"width":73.116,"height":102.32,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":53,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":103.0,"y":53.19999694824219,"rotation":0.0,"id":136,"width":119.0,"height":45.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":54,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":134,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[35.115999999999985,-0.8400000000000034],[87.0,-0.8400000000000034],[87.0,55.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":20.0,"y":16.699996948242188,"rotation":0.0,"id":140,"width":150.0,"height":1.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":55,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"


","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":55.0,"y":112.19999694824219,"rotation":0.0,"id":142,"width":78.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":56,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":160.0,"y":179.0,"rotation":0.0,"id":145,"width":10.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.ellipse","order":57,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":1.0,"strokeColor":"#00ffff","fillColor":"#00ffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":31.999999999999993,"y":189.1999969482422,"rotation":0.0,"id":147,"width":73.116,"height":102.32,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":58,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":346.0,"y":265.1999969482422,"rotation":0.0,"id":149,"width":73.116,"height":102.32,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":59,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":378.0,"y":276.1999969482422,"rotation":0.0,"id":150,"width":56.0,"height":26.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":60,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":149,"py":0.5,"px":0.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":120,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-32.0,40.15999999999997],[-47.5,40.15999999999997],[-47.5,28.0],[-63.0,28.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":250.0,"y":282.1999969482422,"rotation":0.0,"id":152,"width":84.0,"height":96.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":61,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":120,"py":0.0,"px":0.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":145,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#666666","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[3.0,7.267857142857167],[3.0,-42.96606990269251],[-85.0,-42.96606990269251],[-85.0,-93.19999694824219]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":103.0,"y":242.1999969482422,"rotation":0.0,"id":153,"width":54.0,"height":53.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":62,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":147,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":145,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[2.1159999999999854,-1.8400000000000034],[29.557999999999993,-1.8400000000000034],[29.557999999999993,-58.19999694824219],[57.0,-58.19999694824219]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":250.0,"y":286.0,"rotation":0.0,"id":154,"width":10.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.ellipse","order":63,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":1.0,"strokeColor":"#00ffff","fillColor":"#00ffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":278.0,"y":149.96785409109907,"rotation":0.0,"id":155,"width":150.0,"height":54.732142857143145,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":64,"lockAspectRatio":false,"lockShape":false,"children":[{"x":44.0,"y":2.7321428571431454,"rotation":0.0,"id":156,"width":62.0,"height":33.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":69,"lockAspectRatio":false,"lockShape":false,"children":[{"x":29.139999999999997,"y":2.94642857142857,"rotation":0.0,"id":157,"width":3.719999999999998,"height":27.107142857142843,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":78,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":160,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":160,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.8600000000000136,-1.1785714285714448],[1.8600000000000136,28.285714285714278]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":51.46,"y":2.94642857142857,"rotation":0.0,"id":158,"width":1.2156862745098034,"height":28.285714285714267,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":75,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.4193795664340882,-1.178571428571729],[-1.4193795664340882,28.28571428571442]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":9.919999999999993,"y":1.3749999999999987,"rotation":0.0,"id":159,"width":1.239999999999999,"height":28.285714285714267,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":72,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.0393795664339223,0.3928571428572809],[2.0393795664339223,29.85714285714272]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":0.0,"y":1.7678571428571417,"rotation":0.0,"id":160,"width":62.0,"height":29.46428571428572,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":67,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":40.732142857143145,"rotation":0.0,"id":161,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":80,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container3

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":3.0,"y":296.1999969482422,"rotation":0.0,"id":162,"width":133.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":81,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":5,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

external host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":337.0,"y":21.199996948242188,"rotation":0.0,"id":176,"width":98.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":84,"lockAspectRatio":false,"lockShape":false,"children":[{"x":13.0,"y":0.0,"rotation":0.0,"id":174,"width":85.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":83,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":5,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

published port

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":null},{"x":0.0,"y":3.8000030517578125,"rotation":0.0,"id":173,"width":10.0,"height":10.0,"uid":"com.gliffy.shape.basic.basic_v1.default.ellipse","order":82,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":1.0,"strokeColor":"#00ffff","fillColor":"#00ffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":85}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#666666","strokeWidth":2}},"textStyles":{"global":{"bold":true}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":[],"lastSerialized":1445536836098},"embeddedResources":{"index":0,"resources":[]}}docker-1.10.3/docs/userguide/networking/images/network_access.png000066400000000000000000000736711267010174400251630ustar00rootroot00000000000000PNG  IHDR@{ IDATx^xU)B % &)( k[V]wkXѤBB  $|u\;s<3||'R,UIiLDQHZXu h*33bU_+FC SwmiTD@Q@ ipN<(VͥĴëVkki[ڧ#p-mK( I Vm]=62 Z< Q{~mN,HZw·wzQQ@AɻD@Vk= ]Yʅ;M8pY7~1Qx|F:lۧcK_Ҩ( @]ZXW袋pbرM^۹k0rH 2K,AttWmrJmzU-\ Vh:- 9pյfn#-բϾبX&4$ YYYk#e=3гgOt޽k9sp=`С^]oMM h+$Mxk{saI'HG4qaahnV6 a}n%ZHz' xzw ^Qh= `& 5Zs R">#71}t C[9ɾ's* L7n]B. cٟItq)X>M*`%IP<|҃ѭ$5WTT"\i9Csv'ݠݟtr^[h|뭷йsgn Ws9K/TE2xst&$$(ܻw$ 0nZFIJopB*n}k)7O?ࢵ{%)Xv?e~P+`$G)hM9Xqk}Zs, 1Zx h:ZWCMM-\Oٺu#:tHE㺶X{۪U+m@R )4IUѭl߾=JKKeLDeH $tyVŮYF-àL]ږN.qr y%U^Ή|#zFk23SжeɁyf1҂ $ 9:)HFAijݥ} θsRT:s۩KpNZҙr( T@ mT"r<^uVuk Qe{㤧v[rUUPQQs7 0?Q]]}TLIIѣe~Ҍr( $d>zVV4]K1rH3P/Q[lV@ ) ZHrq\¹I](ИIdPC$+WTt.tcر*;Q@\H $@\駣y1n8o^( @R ܲe ~W0ɹ0 Ϙ1cЩS'y=( tU&dH@r۶mXdQg{ %!RDQ@wI{S{n,]v:zbbbؠNQ@Vq%dSIڻv#FsRDQ@KR,I`$v, * XbI%)/DQ@hB$c:ӹY3^ %sG9-]bHmgMG\ xKr4geZ:NKŗEV@ IѸkj_G:okD8Сu$*ƅ/n33bq^}Uw7$kI( I'HI'%Os[ttOOF '&`p! l%G'"2< Q )dH]Q@Q@ k'3Qv>-$+^[Ʒs2zNOATHĒ4:( 8+ H}"$U^( $In w\(V@R )פ( 4@R )H R+ѭsN2')رcG]ӣRY0XbI%)wcQ]] p߾}S06IǎѡCk¤Q@ )H $-y|\jC0ߜUV`{n!H( t-''/rQQu,:̨3,>:Q6X'ɬ>7L u^&$5hehY"!311QiAо@ #;i`U%"< {*k0_+US[ &ţt9 Æ2N[ryfG4t֭ؾ}C7-}9[(઀@ =Z*yMmZ[($ŀ,Kv@ E 50++ 6lhvwW`(g5XR3HVYnDmܸQR$( ".PN,H $%pGSi5^'Tm) %#!BV/x~CUXiZN`( H $C_$ B-[n 5|=jovD%KwпH $! IqҥE[h-M!@qŚU0H:AupY2#S(bAR6-#&ҊnT\ rPmC{z -[DRRRCth XWt)ksV,[eFF[Z&N<&1 wOMRk_Vn;/ᒏs~Ws2P}5u*w7i##ozqFIǜ?lʭL6#> B,IX@a @5qId>rưHdΐ:Fᙅ[ϗunL@>$tkB?K(C0XfrzS4sqƦ_&z0 ObZ5Q IZ"֌Q㒑EyCRܯ5nΚdJL;!};]'yֈ (;}3Rжeu`$ ?#cQ`.Uit.oH_NI-VvuZt`f Jܭ$)'t_tTw64sS[CIOTi;8WI7=tҪ $ ߵ fUYEyBrX|&b٨(,?wd2X ^?~{Yy_-[f@O:P][leURg[ՒmjZ `'peo_9cZFXFT}tv}! tHzHfx9묳d-`CJQUdCW׆TkSMF }&&{`Gfxؼ@2!YY]2<܍@u7_3T$+,iD!ɜ`w{ǖ;`&*r6A4N\tzw>gkȢ2Hi^BH3{3]{*J׿ʻ 4H 9HCse{Kٜ7Klf{)HK?*#=>,_4@H[eHuE۷o?D㺃C^T.`j{1 :K/TXx`2;_|{ܸk^4]zMAm۶it SQObbG2#$y:H;s-+2?} $VZ#ʰn:èE}}vfRR;т2"z &iU+`\r%9h[+lѥK*~cߺu+jjj\Έ.Y&NOO?o(]HV_)C#^{;ݍ$4 8h1DӾ@Z%F|J/o{OS{v"P@ )"g86&6C#Br9Cpm #$A)w $}dtUr#T,o^"-/ەAfI $}f |` 73ZTQzV™?|qو%\+ē6n`(`$xKx|{/[zXBpԋl[BQta1U_v<'+C%A+I}wCT]AÇ0aտq5`ȑ^| ]/s1byiٵkN:C' OP< .0^bc"sPv!kH~W~ꪫB Pk,Y.}N;4n9眃o2H \!sq1x|\f@<B2Oܫ"sFsQRN5Fen7)d!I+lG0?s=X~=ڴiB=nVEy !MVZhݺ5.kG-~Q^^?_|1fϞ?3nh \ǥ{x'hpg6J޽;VBBRRR篼 hb1[6Zcw3ܥX#n1{iHznPG,O Z~zg}\/";;[Ҝ5kJKKUb{Z}nFvz 8qb̛7 @[χo<5E>SpB|xᇱaZts? m |1 -teUҐt\pO@ǢAR$o2Y6 ,B`|&-LeiU5k( 4SYYA w+s$$5НۣGu~t+O2Z:i=#/9ghy,+}kdݤ|O}]tAUUKswO=fNGGrN@dNB+Pd3I+lpUVyJyrSHҽsUE-]>Z>yd<# 짞z 3g$8x;5z z[7e"EB."*N-HG:I0(G['I Q ah\gH ѥ?u۶mC>}T d? bQtNs AKpZ)yEgb cʕ?\wީ8 h}/vW$?VAdhh+VPcL5  5Z7JHB9d漮dڦFd}Ҹ$>,H7kAzz5^\Ծ}{OW5is?/X 3ZW'͛5F zF(JV&F@>me Y GqZj.-=zn&57ԦkwΝϜ9Syvw$ b=z1ty|M@rZ}3uf[gipjSז'$A/qF_s/,ys&Ϡ˹wfb ;w>c =<5fq8yܭ /yͩV#:1g3*ˈtn~|:imb,]vts" FeeDB;^-fLEqNQ݌&iB /T@cѳ$lSsE^qR޽U[0l0ung.%1T\%}A iҲ XVr>VLp\jahfιd\K 3Qr?,9f%9=Eѳ& Du<-?CmY;Kúq%騨(u([<>`l@(yyhK^J(GA#Ih$DuNEF+a4S RŠ_&teI2]"ݨ<A߿_FT|Jk%]}ZuNnLL6҂a0M Js0ds.}UHP")m-@Os # IZv@CQ'F68%ɨUx]!IB%ŀ.A"h2!-@VW΃sWHj筹NH0$/b4u큺F@PիU=w֔i"~$8GI M"YG9ݧ Fe+$dd$-9@M(]LiHP=UtdlE b!f;6ͥW,b v+їM K/C Brr5\#¢]Ԅ$\p^Gmy$F0d\|9TsCKx1ʔV*̾hEҍIuקe0-ܢ^S3۵W ޑ<<=J&x ˵\6nm0չ}~uk*V #$x1!^Siф`BЌT-))1r>|RDPT$Ӆ9o>Kc $$ *@z;ndj!lL2’i3|t?!Y}̭Y\LiCo'CA#iс_DIG+(fT1G(@(2KzZŌ =Nꇜz6_V)_V $Y!H@zb=rd>SCxȥ{VY5S3ʍCI+GVE*zRЍ5*6\MNKDU\zfuuG'J,AV@ңa=$ݫ)BsU|i]r-@2/Zzg-.\cE@zȸKSU),H|VF0CXwLV0-Kac ˠ)SI i~<m FɥzE| ,X[fzԫa/GU EpΒٴE>}zPR #ӽF;)/_ZMs4-o~?WwkD2g.h߶c]+VBaQs\.Юm<EG\Lgtlp[sIҭK.X#VeR ;[m؈$Y_H`O0R 6`g—1&I: 6w7-ހqlyŘSsƄN0k[ u1?'^7Bu9WIIݡZY>9( _^ZCrbj{]mٍlDˈp=?L@rC^1Ay$7m+P][A}!}~! d4-%y=܀xH>Ur&)?~mh/|yiX43It_u7>#֍Ū-_dR|3!YŹ$4 JN˜wykޙf8/P8tA x;Bj/|yiyHIِWORCJO!UP~s@{mΎR|FeI16LAPZbMTH/}7ڊ,gϞ^—qE3G?Ruc^>_IArt$䦂,XnH3;J,FH./8hOIZuM#k"qԩ렽e\j<$ԏn/ƗKѭSƤ!){K2{[ Zn=uTRMoѭSF!݀vϢ[ݍF#4:?)4 F%\ 9<$h+b,\)79KlH1d2,\ yJF ,Yˑܓ֦b+- D@R ̦AH+deD/—qE3L+xb|"-qRF*M@.E<2Քv2|,5uu8*>l$ِ˶4XA iP`f$J͗[{KC2K9J9hI#S{K2wG)#G9qD Lo*w˳QS[I#Rbu[Zk)$٘5־ѽ+$ ^z%HAe|ԉf!)\W%~Ònp_11h~]CHaܠ$$dA{H HWC<^qkW+E{KCC G KoQ=8 =ym˱dÒ=( Mnw EI*jJv@M2kUW]'y.l[|'`$/Z5% <$ۄ mWfnU֌A}{Hn]e>3' Hu[uԀD6`VյF/#},^6^z>@{K˸yHw,wW.y OD H(*NJyc%͝%{bVɑ+FiJM#y]$~.WSğM3G+h/|yiW^43I6 J;vae!aѣKRxVmtX%"΄vW^R4ID8 InnIʟ@ҟ4R%%%MQF|G7e\z<$W;vpW ʱzS$K@Lg!xd#$wŚyʒ8VidKǮ'vB҈˵9tIO z;CUk/|yi^43ɪJ]wc]N"0(-M@rW)P}L&\Ee{nS>j1(b/MiӦ4I"x+A—'d~&ʮrdn.@Dx8ģ[tGC|w^l$'#Ƅ붸|2sQ[W>$;wl2m%_|ETW7=\+7N iT AAR43VSӮ|~݃J`C.!~{HuXI}LY%{afBV>^CsֽNdu2>Eoޒ,*ߧ+xqlں][Sz HUm%BW=M[V7l%xbYIm+@PfYA iT AAR43%YXW`-'o"‘ܷ7:GHDT[,) ńlo6o!$T[Ftd7$uwUA \AZ!z~ uʚ:c i]=Ŗ Cވ6ɲ=%aI&ؒ uۧDG8\ I7]훫@~ 9uT0[e.[t2h6$.j=$s t.۳y $D[{!o{3WM@rϾ PLΝ!H8`7$qH= AQg  [G[:70\hܴT={!}!ܳw>N&ҽ(((TN֫@RbI)XE9S3WUUHmhn^׭{qG'/#!)/zݡtT [{EBӻwOt4}ؾaINQGm;TNe7bIz>n(2Qg'|2P^^XqzQQQ15kzm.P-&>HKȋ|t Prac"dh$C :%LqںZ KOB\J4Rv4ǞFt%ZAVWor+aڽ{79[f 6^\ 2ˏR^ZGZFhvJ~ȟeD[/]=#K@l_7@SaaaOdd!z~[UlѢvBe IxҒ/`sv^߷~ \*I;՗M)X^E  ݻwm,S|eS|iTDR@o#.Kܭ5|~6kdxx8jkkN; ht:&MQ@U(HE$0/\SFтY IC^uUjWIDRG~E.rS\D(..{HE$m饌 \]̈Uz]1 oM?1Q0ep7oAL+ԬHBOTF>hV‘V$]ZKD DUȆ>$ KpYص.xT&8pAZ]I3ұf>u=5X|i=m0JHΖ$bWr$TW|W%\s hwuʽa[4>mh ?l,g LW & Wd_̾ I+t-v[Iۆ4܌ p 3L+b{1vXKζr ^}UtLC_|7T*˦q^oAj0jmoGᚉtn^xAE[]tu-UVst\lE\SO!''GƖ-[h#Z}hIZXkLrv8CQ>.o/X`΍169ZYyYG%$9')Kuؾdsߊ+YYYԌ/z~9={Jxv.[馛TsnP`޹s'e%9 z\Ao}݇'xBMWK6]vjm?'OV%K ?CU_ Dǂ(Zg}\Ѯm6ޑ,뮻,5 I I~;d!!o Ƙ/,`L рOݪ}T}w\bd'ǻe(9=){75Q] D7v=~Ih<}Drr"Y_YN LKƗk\jnH, f=+\z$@0qhfB<9ԭ$Z&+V/g IW؂Ͷfx\/\17|19uHw55Iu=~^3$O\KwfAIsQ@ҎHx\f$ӓ`K - ٸ@SP2@XICCJ*ͪ9H$k(I^J[whuqZ#6%CwA[E"9֖?AD׊^īv|8KOBv=Ү(` 8g]r@ qUY!yۙ& .QqnRihf; 0ݟAuoF$0' ^e˖=F }Cz%HQG i IOAw 3z@҆(M&`*G^`$H@5PYӪ榊@҆(M&zd70)V衡IjE&p֍e,FGU63 t>`X6P$C񌬕҃@Ҧ)͊KF]l ƏUDjj~nm#nQjQY]u i"Z[C/J2ҍQzZ8]-T zR_F& 'pd̤l[F9[߶7<&1J-?\[jiЪEZ9BrFF,ffĪ~؟Eh![VB ucIý 6mڨL.EQ^,^В׳sko 9&9/PC6$)=V7 Iq[M %zp]=nocCRȖ9 ;?0:.!45`Q|sdѹ Z'Ov)}U$2V&ԘӆWנƄ%ɓWܴY y-8p OMաtrh0vj%D# лxn97]\PWvbƥtv IV⵵!- $z< h MpAնLoRDPT $CkSZǘ`5O IKR ilsnu$ DQj8GMK@bO?fn Ka]7tĒܤMQ@p,ȨP@RܭG|IN JZu)W(| h˱h=ҜbI%SKy>ZKA,w  FP )lvHj'@K>SD]]Nj{UQ@h(p-RF.H$,X9~LF7 !  A@ѻ{Wqr y%U] 5u};]2}DhrM&}f ڶu^߱iN-B=MeK@(m@jV!E~IјuF2 JP[gozR neƝƖJUL %U ̈́jZ:3:XO!i?9^D=](1`u~QHOI[x m@HPDHG l!Z˪Fؖ5*wT nr( ا_AҮl vvީ+N79JS@ iIDHz% ) A[wjH."g)( 4IDHbÆ Xd URc_͛7$ݫ =%(М $mP?!yA[0`Fi2FB$$J EQ `Hp޸;GAA n/)SOFDDOCDQA !!5+VPNnC ]-—_~΁d}}ҟ( I;@A^Ylݺځ<66Nl{ڵʚUIKrرhѢEt* X6@$sNHLLĨQжm[o,ŋ#99Y]yNvNj $4 0A8eVZ@\V tn߾]rHMMEtt wKDQi6@$Vݬ[nUvڷoo*ƛ~ׯ?Ώpɸ8w9hӦ Zli/:!r2mW@ iVB0#4>lL]vΝ;>}uf+9`ʕ7!TT8!S)G* Ibbb2dZF(Its{(--5}!ݫ\y_[sK_ ϗMKw۶mjJTUURTFi%[]-ENw jU )LۘK.hnm  LdJi Zޅ(**R$uգG=sQ@pR@ ipҒG8ТR\!I0ʼW3$ofPRK.9?W@ i 64" h˗5,\{JR֞ 4@@qSKM foɈsRDQ@_FHz,`7nT'$z"  $5@c`!ɵ'td$k)m] 6F J˕6h&%iIP@ iMH 4iZnAFH˼i5PQ@ iÝH 4iZJH2[-ɴ4mJ@+  $mU4,IrJ!@҆-ATiҴIJ!@҆.ATiҴIJ!@҆.ATiҴIJ!@҆.ATiҴIJ!@2c+.aa܅nCjQqpNKح-cY5X=eJC* PLi*dHNӒPS[Cu^߀pCHT< _\($gf±=QU]5-ĭnHz}@_( G)7OJC )o81{w/5 ɿU`+6gEDno@Ҕr $VXF Nog@)>ƥtv ɩC֢T_NOATH)`_( G) HۘiB QsIG $I7Id3AW $}L $4Z( ! x@R #Rd@69fV@ )l!(J~I@2Ƴ\[Cb e' L\_AWuK3 rNrFF,f& ɬ>7([r/Kҗj׺Xu-kޡ,_+FC Swm+HLg9W͖0쭪W\sr0qt9 Æ2N[rr $Pm~|']gK)1UWv~I^8kQdV (Kp^dRгeZ2C/eIP@ iiՇgӻއ'Dag ;m ~h =/ 9˰-WԠ%ig_l[,IQ@ j _Z?®>'wg*6Z[.(<>#kSôcK_V5*JIv6*MV@ iZfi1W>V݉64wL6⁏mƌRKW & SkI7v&Vh %iA( )Pm~_uLQk&ӕ-vtD@rH@ EdqJ('p'ݒ.o/Xy=`lr )=:s~nm"\mT+C WKMD8jҐ $-Ʀ8gAmU:ݫt]>[ZhspS$ Zн{j2jjP][M  Cp%I?w.HjE{7i##A\H`p üe;m=чJ]Ԛ/x4,T7Hv.41ѸԾ(g.a=ң!u獎3 bo<=78oocÕ$ /nm@KP@ iַAꡚ:m"Tp :P!B06 $MoeND~KU3tBr\l>#|VARinȱHR\}n?dP+-#<+h4|~p?I`@J$,P@ i&в"v.iN%5?[ٮrʠnd tH $ݎK]몀@DŽs+>7UW;˩irm_qWId  =~ $MgPW؂ͶfNqpń|'c;֖y*@R ٨sS:v+ [gzPX- ݪ3&srdD2 d_\ڣ@yJ[whWսcKujܢJҾU$czmAj'䍓s^Io_ל};]rֈLZYg$m$v?~S $glr#v_٤)Iwy0K w|7qDaA' n H䖆T@ p&H $B 2:'s$yVö\۹BR-T|e,?;Q(c< `}IS_@!!ZEEx{&[-SB=g>ՠՌz6fIcs͒յjFH92pdt_ދ/' CikzX3$܋-6Wc[Y_9m^.{V۔ aHr2 $BW)ҭ$;Pe C]оmk-4^@ JՇqݻC6^ vq1ѱ][m=DZݗ7֫l-jr[@-Z_}syNbI3]o@#qs67Ŕ*rVACRn:k6/ b,Z -"1yTcvۼ.wl{4$jlug)-ݤ6h<2 }鶵ic憤oK?ܾӆ7_o5n2 4d× 'XK(d{eI``mn[-#1H=21L 0 AlM݇m%cfI1웍~I7%RAV%ٶ-Rnne#ZFf1Zⳏ& i[ /ހ:}l?  $C%1-4D0:'9l!ܟScWB2ӏ^޻[7X~[Kτdjk`>$:cp $ŒAEQ;F>y?7߲%y{Hf<}l?uQYgMGhjW%n訣rTH7=J,IOԒ(`s ┝$Ƽ"|d䴱H6M%X$K;mt:L@rR,-KAig  $M, PQ@.Z [*"mj됼l݋5G\HV~1\n2& Iq[JRGt멣Ґjb~3_n2* \wOt[|^Ē ( c=Z^릂b,\)79KlH1d2,\ yJF ,YZ,I;t$M\m۶Ezzskȑ#?"<ʾJ "u+{[1Y8)#& IHNjuugY:tL R{e;Y%pG7ZA n~sԴi.Ns~D_YYYk#̂tvS I]"tO0g[ #-"0id*zxoI(>'HAͼ]eny6jj1iD R nsK7V}{fb<2t͝}sD}s3S<7דw'j8|0Ν;:t{gN: \r ƍ뮻ù瞫OzYz5 pSLET[(++Sb?+OSNظqj!W^̙3UA)ꫯb G̝;w{o/bذaG3yd X~3d@ h^W9,F"Gn),ŏ+7&7w?a)H2Hܐ6j/(BYԵtiunͽl|ol)T̔N}so}W\OHz?__|xbDDD(Hj[o)ٳGA;묳 ;~׮] t< w-[*y睧 w饗o߾뮻ʛoY($7oތuM7݄ }1j(L0AO>7ވ<\py?cXpF>!V)A2ڲ?U?<}bd2 Ò`u[WnV04 }{/')t$ivHr\&"᤮`u~Uk Q8i6Fx %Q,wA1%-3.q3g \IDAT>eݭYw;D~{9Ϸl2[BnՇzHn 9N?iy$ne-K---e]vDP|NCQQn\%j[4Hvn+,ůp^ݙλR ݢ7/^!$0nP @*t<\1bKwv۫){説Tx>9u$-Z|ꩧWXFBs~MOjt8 :o0vX&;v䜤st3$wލS* k-_=qDvIIٳ=!HV*AC G KoQ=8 =ym˱dÒ=( Mnw EI<ЧM2uuuU>ݺuCBBz`mP@R HZ,;tr[*tOceEtgiFƎk$ 4Q*.lw(7-]4ΐpi 9?cҤIX| ?_o j!sT46G&hmʰ4srf ꋸCrr,;蟈^& YXmQQۀUZ][ֵq#t?@oVUUݫM7Ko@R ͸irW-biqР%(Wd"z+^{5U駟VMCKsthsU[[O>YsNR$r=S[2Keej壏>RAHף"Kc!eX1OA5EX1OuwL41dVlت 9_z/M sLFVTT`˖-T #do\I޳P ):[jP]2>[{sFspR+Mzh uD=4HV˰2 ǰ%J&?/,ރU~3U$0<== @wQH.Hj4;11; Hzȁ([4H_^XT՛%9_b:{ɝ{&;_]xĚX1OYCg*lcHz{B<-IcF@3)AR vc]N"0(-M@rW)P}L&\Ee{nS>j1(b/MiĒd}O*߿nbIz&DzD Y߱  )Q&?]6;,1-.߇̜|<8* B& ! T .+ʮ% X"A,. +%[XZPX 9!L l}88l}{oj]p}K͎I7Jy1DqI3_I$Ӣ$iMH ,zndm7pD!0>EZ.IDDzd=IH{m\KJE-Mr%II^x- Sm>9Sn퐞,:$YXdUe!uLM:n/DvnEYhd_@"ɼ<%IL$ZX7ϚIJe$@SV<'a>-YZhR/TYH%^GmTN$,-µtL: D.WKbunn*g $۵MG%69_."q:$Yb+3g$%[EoI[*IgF$ aJOb}6ScstOiˁb{.BEhj @HpJx%-)INCmZ$FDZm%Rw6)QbuDR-PLIHvL%Id(na,æ_=UѲR\ضv>_/PZIw%/^M SG|G%%V=[&$''Js]IYG$hؘgJK%/﬚ei$;'$]_:A2`w,|֬R{.RN(I_=%7"It%,["#$s$88GtM ]T۫%SXQhwkw:I=yI %SC5-Lu))w%˰{zS'y 07J[8kG$@:Pì$@$`n$%iڑ $%0+ %IIv$@$%IIh>J$@&@IRn @IR: _|! PG;;/0mCJ2ycvnljdGV]tDK6Ɲ@u֚҂֯_/)))yfe!C޽{ò`ꫯH'Mǧ~Z$Bs=dƍ2j(ӧ}3g}&sQCwsЭ[7/k׮*w}'_L8QI\;V1ݻ̙3'o|*;dt Gki)7"\YH)zM^s$n#K+vZ^.˼Yx @O +D^ϟ;Jvv$''$Un߾]"##[oU#ҥ]?C&͓MsbZ7T'$%D9={Ϟ={Z A`h3Fi߾}u!U#[Yd#}&tAA?|y鮒>N*/d_(\;kH;\&]{Qb+,u 7EMG)I]$H h q'HgϞc裏3w﮺-+Tu!iӦ$SSSgU3AƴJݹ]өST7"ܵkgn?F-"QgB}:% |%6lP$$^$<(EpD=?J^%9{+TUh-uȐHJREf&*AwTW'-SN+1~ Ig$-3Tq<$tz12yb{ァFdUqu]7D)nT1Ɗӳ8%)C۶mS6g7D'N.EwIǟMIz$iMHVtA4*|G$u֩CW#$ bLu=?&$-D\h@8(BDi&@D1f͚ɽޫDDȐ=& X"X"lg>cuʕ+juFQ7IzǟfJIR^K$-[䡇R%@2ZY@ YeddI WDl}O(IQs S`,;rON]K/ya:1nXH $ڞ]̈ä&*/f^xu-8 q$>k+$)I %IK>)a2ejr3qk!@& akrx%fS)IJWIB? KP3cC)Qd(w֕HLdCjm(%J κ _(IJүËIH Kߧʲbr-` +;dx]'9wY\ӱN~[H$@$P+WIJc; \՛"zJƬڧr߻zs>c0:[&?,H<tTQ;nI?M#I3 4( D.ϓtn pYII| '`vIN]wXkk_a &Kreu_Svl 'p|]:VIb\ٟc p줓f6u`Q$@$ $3>H㳒 1%iJD$@$`6(C$@$`J0,HHl(IQևHH0a(Y Pf{ a(IP   $FY  PdA$@$@f#@I퍲>$@$@$ CɂHHF4e}HH #@I %i7 F4 % " 0Jlo! 0%iJD$@$`6(C$@$`J0,HHlGFIENDB`docker-1.10.3/docs/userguide/networking/images/network_access.svg000066400000000000000000001260531267010174400251670ustar00rootroot00000000000000container2external_containercontainer1isolated_nwDockerHostcontainer3externalhostpublishedportdocker-1.10.3/docs/userguide/networking/images/overlay-network-final.gliffy000066400000000000000000001144011267010174400270670ustar00rootroot00000000000000{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#ffffff","width":361,"height":263,"nodeIndex":249,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":23.000000000000057,"y":8.18899694824222},"max":{"x":360.00000000000006,"y":262.0000000000038}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":140.0,"y":162.1999969482422,"rotation":0.0,"id":247,"width":33.0,"height":11.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":107,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":238,"py":0.9999999999999998,"px":0.29289321881345254}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":134,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-3.2842712474617883,-3.971422467905427],[-3.2842712474617883,16.319999999999993],[-43.562548866301796,16.319999999999993],[-43.562548866301796,-3.680000000000007]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":187.0,"y":134.1999969482422,"rotation":0.0,"id":246,"width":18.0,"height":17.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":106,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":134,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":223,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-90.5625488663018,-35.68000000000001],[-90.5625488663018,-60.68000000000001],[-43.0,-60.68000000000001],[-43.0,-25.428571428571402]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":166.0,"y":169.1999969482422,"rotation":0.0,"id":245,"width":22.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":105,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":172,"py":0.7071067811865475,"px":0.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":228,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[7.000000000000028,-0.3795674614555935],[-6.5,-0.3795674614555935],[-6.5,29.50000000000003],[-20.0,29.50000000000003]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":189.0,"y":197.1999969482422,"rotation":0.0,"id":244,"width":15.0,"height":36.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":104,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":155,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":233,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[2.437451133698204,-1.6800000000000068],[2.437451133698204,37.5],[-19.0,37.5]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":292.0,"y":163.1999969482422,"rotation":0.0,"id":242,"width":51.0,"height":8.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":102,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":158,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":218,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[0.43745113369817545,1.1599999999999682],[0.43745113369817545,21.428571428571473],[-52.0,21.428571428571473],[-52.0,1.4285714285714732]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":289.0,"y":102.19999694824219,"rotation":0.0,"id":240,"width":51.0,"height":4.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":100,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":158,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":200,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[3.4374511336981755,2.159999999999968],[3.4374511336981755,-7.840000000000032],[-51.0,-7.840000000000032],[-51.0,8.571428571428598]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":23.000000000000057,"y":81.00000000000378,"rotation":180.0,"id":175,"width":337.0,"height":181.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":41,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#929292","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":52.0,"y":8.18899694824222,"rotation":0.0,"id":178,"width":274.0,"height":205.01099999999997,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":42,"lockAspectRatio":false,"lockShape":false,"children":[{"x":25.999999999999996,"y":110.19640369599998,"rotation":0.0,"id":173,"width":20.88802989941042,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.svg","order":40,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Svg","Svg":{"embeddedResourceId":0,"strokeWidth":2.0,"strokeColor":"#000000","dropShadow":true,"shadowX":5.0,"shadowY":5.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":121.00000000000003,"y":147.19640369599998,"rotation":0.0,"id":172,"width":20.88802989941042,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.svg","order":38,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Svg","Svg":{"embeddedResourceId":0,"strokeWidth":2.0,"strokeColor":"#000000","dropShadow":true,"shadowX":5.0,"shadowY":5.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":222.0,"y":114.19640369599998,"rotation":0.0,"id":171,"width":20.88802989941042,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.svg","order":36,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Svg","Svg":{"embeddedResourceId":0,"strokeWidth":2.0,"strokeColor":"#000000","dropShadow":true,"shadowX":5.0,"shadowY":5.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":135.0,"y":39.01099999999997,"rotation":0.0,"id":169,"width":86.0,"height":50.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":34,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":160,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":134,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.3867377051204244,-0.010999999999967258],[-90.5625488663018,51.31999999999999]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":140.0,"y":34.01099999999997,"rotation":0.0,"id":168,"width":4.0,"height":91.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":32,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":160,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":155,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-2.6132622948795756,4.989000000000033],[-0.5625488663017961,93.32]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":170.0,"y":22.010999999999967,"rotation":0.0,"id":165,"width":72.0,"height":73.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":30,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":160,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":158,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-32.613262294879576,16.989000000000033],[70.43745113369818,74.15999999999997]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":113.0,"y":0.0,"rotation":0.0,"id":160,"width":48.773475410240856,"height":39.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.relational_database","order":27,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.relational_database","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#02709F","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":163,"width":88.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Key-value store

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":null},{"x":196.0,"y":96.17099999999994,"rotation":0.0,"id":156,"width":78.0,"height":77.68,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":20,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":63.68000000000001,"rotation":0.0,"id":157,"width":78.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":23.0,"y":0.0,"rotation":0.0,"id":158,"width":42.8749022673964,"height":60.000000000000014,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":18,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":95.0,"y":127.33099999999996,"rotation":0.0,"id":153,"width":78.0,"height":77.68,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":12,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":63.68000000000001,"rotation":0.0,"id":154,"width":78.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":23.0,"y":0.0,"rotation":0.0,"id":155,"width":42.8749022673964,"height":60.000000000000014,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":10,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":90.33099999999996,"rotation":0.0,"id":152,"width":78.0,"height":77.68,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":7,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":63.68000000000001,"rotation":0.0,"id":142,"width":78.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":5,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":23.0,"y":0.0,"rotation":0.0,"id":134,"width":42.8749022673964,"height":60.000000000000014,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":2,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":null}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":218.0,"y":109.69999694824222,"rotation":0.0,"id":196,"width":40.0,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":47,"lockAspectRatio":false,"lockShape":false,"children":[{"x":18.8,"y":1.7857142857142847,"rotation":0.0,"id":197,"width":2.399999999999999,"height":16.428571428571416,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":53,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":200,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":200,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.1999999999999886,-0.7142857142857082],[1.1999999999999886,17.14285714285714]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":33.2,"y":1.7857142857142847,"rotation":0.0,"id":198,"width":1.3333333333333333,"height":17.14285714285713,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":51,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.9157287525381217,-0.7142857142858963],[-0.9157287525381217,17.142857142857224]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":6.399999999999995,"y":0.8333333333333324,"rotation":0.0,"id":199,"width":1.3333333333333333,"height":17.14285714285713,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":49,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.3157287525380146,0.23809523809532174],[1.3157287525380146,18.09523809523801]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":0.0,"y":1.0714285714285707,"rotation":0.0,"id":200,"width":40.0,"height":17.857142857142858,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":46,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":220.0,"y":145.69999694824222,"rotation":0.0,"id":214,"width":40.0,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":57,"lockAspectRatio":false,"lockShape":false,"children":[{"x":18.8,"y":1.7857142857142847,"rotation":0.0,"id":215,"width":2.399999999999999,"height":16.428571428571416,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":62,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":218,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":218,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.1999999999999886,-0.714285714285694],[1.1999999999999886,17.142857142857167]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":33.2,"y":1.7857142857142847,"rotation":0.0,"id":216,"width":1.3333333333333333,"height":17.14285714285713,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":60,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.9157287525381217,-0.7142857142858963],[-0.9157287525381217,17.142857142857224]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":6.399999999999995,"y":0.8333333333333324,"rotation":0.0,"id":217,"width":1.3333333333333333,"height":17.14285714285713,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":58,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.3157287525380146,0.23809523809532174],[1.3157287525380146,18.09523809523801]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":0.0,"y":1.0714285714285707,"rotation":0.0,"id":218,"width":40.0,"height":17.857142857142858,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":56,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":124.0,"y":107.69999694824222,"rotation":0.0,"id":219,"width":40.0,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":66,"lockAspectRatio":false,"lockShape":false,"children":[{"x":18.8,"y":1.7857142857142847,"rotation":0.0,"id":220,"width":2.399999999999999,"height":16.428571428571416,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":71,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":223,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":223,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.1999999999999886,-0.7142857142857082],[1.1999999999999886,17.142857142857153]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":33.2,"y":1.7857142857142847,"rotation":0.0,"id":221,"width":1.3333333333333333,"height":17.14285714285713,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":69,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.9157287525381217,-0.7142857142858963],[-0.9157287525381217,17.142857142857224]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":6.399999999999995,"y":0.8333333333333324,"rotation":0.0,"id":222,"width":1.3333333333333333,"height":17.14285714285713,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":67,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.3157287525380146,0.23809523809532174],[1.3157287525380146,18.09523809523801]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":0.0,"y":1.0714285714285707,"rotation":0.0,"id":223,"width":40.0,"height":17.857142857142858,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":65,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":106.0,"y":188.69999694824222,"rotation":0.0,"id":224,"width":40.0,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":75,"lockAspectRatio":false,"lockShape":false,"children":[{"x":18.8,"y":1.7857142857142847,"rotation":0.0,"id":225,"width":2.399999999999999,"height":16.428571428571416,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":80,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":228,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":228,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.2000000000000028,-0.714285714285694],[1.2000000000000028,17.142857142857167]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":33.2,"y":1.7857142857142847,"rotation":0.0,"id":226,"width":1.3333333333333333,"height":17.14285714285713,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":78,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.9157287525381217,-0.7142857142858963],[-0.9157287525381217,17.142857142857224]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":6.399999999999995,"y":0.8333333333333324,"rotation":0.0,"id":227,"width":1.3333333333333333,"height":17.14285714285713,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":76,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.3157287525380146,0.23809523809532174],[1.3157287525380146,18.09523809523801]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":0.0,"y":1.0714285714285707,"rotation":0.0,"id":228,"width":40.0,"height":17.857142857142858,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":74,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":130.0,"y":224.6999969482422,"rotation":0.0,"id":229,"width":40.0,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":84,"lockAspectRatio":false,"lockShape":false,"children":[{"x":18.8,"y":1.7857142857142847,"rotation":0.0,"id":230,"width":2.399999999999999,"height":16.428571428571416,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":89,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":233,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":233,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.1999999999999886,-0.714285714285694],[1.1999999999999886,17.142857142857167]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":33.2,"y":1.7857142857142847,"rotation":0.0,"id":231,"width":1.3333333333333333,"height":17.14285714285713,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":87,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.9157287525381217,-0.7142857142858963],[-0.9157287525381217,17.142857142857224]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":6.399999999999995,"y":0.8333333333333324,"rotation":0.0,"id":232,"width":1.3333333333333333,"height":17.14285714285713,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":85,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.3157287525380146,0.23809523809532174],[1.3157287525380146,18.09523809523801]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":0.0,"y":1.0714285714285707,"rotation":0.0,"id":233,"width":40.0,"height":17.857142857142858,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":83,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":125.00000000000011,"y":139.30000305176532,"rotation":0.0,"id":234,"width":40.0,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":93,"lockAspectRatio":false,"lockShape":false,"children":[{"x":18.8,"y":1.7857142857142847,"rotation":0.0,"id":235,"width":2.399999999999999,"height":16.428571428571416,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":98,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":238,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":238,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.1999999999999886,-0.714285714285694],[1.1999999999999886,17.142857142857167]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":33.2,"y":1.7857142857142847,"rotation":0.0,"id":236,"width":1.3333333333333333,"height":17.14285714285713,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":96,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.9157287525381217,-0.7142857142858963],[-0.9157287525381217,17.142857142857224]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":6.399999999999995,"y":0.8333333333333324,"rotation":0.0,"id":237,"width":1.3333333333333333,"height":17.14285714285713,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":94,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.3157287525380146,0.23809523809532174],[1.3157287525380146,18.09523809523801]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":0.0,"y":1.0714285714285707,"rotation":0.0,"id":238,"width":40.0,"height":17.857142857142858,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":92,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":109}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#999999","strokeWidth":2}},"textStyles":{"global":{"bold":true,"face":"Courier"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.custom.confluence.c20f4a380e3cee362007f9e62694d34d947f28ed4263c0702b3dd72d9801532a"],"lastSerialized":1445556943068},"embeddedResources":{"index":1,"resources":[{"id":0,"mimeType":"image/svg+xml","data":"\n\n \n logo copy\n Created with Sketch.\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n","width":59.29392246992643,"height":42.185403696,"x":0.4429050300735753,"y":0.7077644040000006}]}}docker-1.10.3/docs/userguide/networking/images/overlay-network-final.png000066400000000000000000000666501267010174400264070ustar00rootroot00000000000000PNG  IHDR|J IDATx^}t]řOdYdjU[66@ v@lH`6l'Mv$,%!!  pw[Vfg=4w7sW&'''!Ir@r@r@r9'XNPr@r@r@p@gBiJHHH{@r@r@r`p@,Yh9M rHHHH% -i$&&'???ɁY j}w#8zGF7! }`m}C8;>I0dc^?R#&tÂſ ̝"iA<|w̺IuK@N7v@u =! shs' 5r>6fcMJ !=*As_>,9.Hw{ su]xx5>n ?26n{Ȍ H|ae8!ޮsn|]4wcwY>TFuZ۔^o\0 tڻdǒfq@Y΁>U҈/DY[q"ܹ1˓R!94HwkeǎpxC'~v9ҍ˞z翯]+r\N"oc9p w1)it|n3KÀ%໔e8 _d#Wq~P>Wq JS*;3Tx$Vҧ:Ǿx1 ##u|~H+ ?I|i";NwxdDV,M$Ix${ڊr7B˗tS79P,x"1͋±j<]'.i+gq@|~rrlI{e*xĀHD|vS@${AL)mQ \D\ܾ>qOT%Xm)([ҍg`E=jKXq@Łzcd,_%o>̤G6cB̅B=&5梢6U3F+ p$;O9'.x,OFNL?a^|@i(@a+Q%(1(q@)?QIjK;8 \FNٱp D‚@qMH,s8:1)kp}`/AUo7N|'3Xvo9|U8$;Cكo"3eWS8 _n |Z9|[Po|o_A9~O|O^Y86 p] .c|H%Fr>Ho)'q@+d. <=k=h|RH%`HřC?W]U*N|]l$|&rXٍ}UPǼ9ݫR1w: 0O>3?9s'%' ߼Q!AN~^r8$|h_:W;MvϺ/r)V&G۱Mr$H7s9?z|܎۪hu(#IoJ8 P$t `:Bƭ^ewm{|kC/Ir9 ߓWGMp w6Ce~Ĥ9;ᖵYȉ wx$p@.6zWބ3:뒱!?>bqtQ.y|Yo'e?.= Y3Q0 sDyD1ύh.|䀙o&7e_n@eG߹b46 apt ]gQދ!cr~ \XysD۴PÂys%I $JyLˁI 00'Ođ#G0w\.Cjj*%I:$ țݍŋ{0,XK/DDDOe$|\Y;+EOW_}NHCbb"o"** g'$ ʙȇ'.߇wMсz <<ڗ/_.@ٳg _w-Bh™m{+W؅,yxGqA;e4BjTq_z5.]"?3}7TdddBw6mAMC䥗^Er N|8)ފUV '}_}!;;)" ա˖-Cgg' )bǎD'$zk"WTT]\ PRR"=3yyy9zqP%$IrS9 SW E o|C>}.\(fC@o~AXXy)s;/}<.8qB=/|_'Ǎ7(C#lYYnwjDGG#$$D:8?â͙3g~_p$~!O W_8 VKH0KJJKk&tTkP'?R8A-[Λ93< ݋v~oصkx~W*M7(SMC0-%\ݻwa;U@<:u ? |+yؼ3g9~oVC=$ %v[ Ip$8` <$.bPMB)ϑFGG3y{ HSWvZ!o?, lg-K_|J111?#⠠~mD[o%T<|?RT:_?!400 <,Y"xOy8&[}W[g?Y_m9 &>*hMKKn< TGq@[s͘zvz C%S*5;wO?zg2>IQNXxkHi QA==#R=O$RQߧJҶ+I13}Cz]@]9. g]>S%x.qƀ9&@ &ZZZġC*-Fe)$I8Y7:jd0BQWO|OcBb;}z_qݮ|BəR3 iIhy * W^q%xa0{ oJ9AH! y1*U)"cdTH{0DϟLQ@C,%SƓɓtj|Ri`&졊~_sz.ܱIеG.F=wADe.LK_r'D 74NOkϵgyDY: ˿{${К(z\07&%k#%K `zF2$AҧxN?|{Rto;+ `+U_>7{8 C֚'=4xm'i#oՠJXxà^Y!31NI+o(1oFwWeaׂ}d2f0${3 ˅y)ݕiOV:tOO$0/ s{q>,`ƒym9fBuE]$D0zAttMGׅBfO* (1350}"7͂.L[L=sw4Բ:Qd*DTtɃz}Fk:Š4ztX%ໆӾEIL'~35YAJ&KaF)+*%JXEwAJMDSB''ׂvVu3U9pJwtX"n-[|4 8uJ0w8Kix* `yH]uUN)$,Fx_)r\Cya.u1v}ݸڧ~ZYhYWO`4xb"n!P/++ BW5:4ӃjJar6qJw_5{VP]ڮ ۃ:yz07 +'|O\9}(3WU>>z&ǢDD Z=t^bU)'̕1PJqE5%dחU{AySoz+%6otv5gƍ>O#T7O/<7+?,eQ 4FJ co0LmPPP'8O=-)?dqpmVgtiiiBM5GQ x8>3vUW>td˟M~a!c,@?v1L.&IܫƘm^^d9z;7c֭T4ZH=4,cǂ̉B9]]k&VATL QNN1 GX4sJ%4f䤴 ܛ,M}I=K2bk]%AsP2pL>؆":f(icI^TpdQ*irOXyUG_!M⭀OGA&;;۞-g8J hQJS㋒3:*ܻR B~؎7u 8t`޸x17>}?CU &_֥uذU˜/y+S3gΈbTK 9UVyl*6"zܔ }&5^a=j>UʞD=M6g&P/m:_[7wqXAޕ[ ??haV>e>&,YbhߧJSeB=>=uh?O"{}O}|{sάwh iߩeq >2SC& y ۖIWᠷ)- 0Y)sШ<(A~EO|^iߓ@ߣ_4&Wr}gd/^22#7vOJxL:m+_{ YϢ!QC7;J4C~aD _1X \<='3zIN ]8]oWIIF#J|d&=Ѝ7vMG><sUA؜A'N]K'$]'ۄɚ$k7;d]JQYNNm1 ?i ]n1sq66[Dرív^caudO/j_?l=na?ܱ%]x 7 {(?.5[ߨTٻ{F_*q0"Bqǖ4T ⹏ ?,$_m IDAT)_TOO ̛i^ktj0Bsu yy;yrob:݉ЧzV7s7t|&^+~mNbOI"sfؕXmcgޫ# p..7#fay42u=D4l6BoL]8)T܀y{eN FAjԣ+Gэ릛nr4602@C܂C)۴  k'Z4@j=CHp#0G=ZwdxC'1/G yC#CxhȌE05Ν;Ej۷۵zgz\O9{P0{8\#uՍ2 0 ~iqCT㸻3<"kmsE4"p~d-W5HQ5R:?NJXWe )V=UӑaN5h9 L aj^g7TYC2#2&޴;zlj^*Pi;Qb:%?[5w5}F ݡw 3޽{u>SK%'ԆUܰi1‚ULMrX}Cf\q1.[aRaـj7+vY'81|wͪkpm{;" [҄_wX@H{6Jݕ[]*r9PK?z{r<| 7,U|p //ҌxؐH /m,)ݐTuKp^m_%:|ͺY-ܬV{͊zv'5[`/i6H] $߼Yuw>ӝmӕ qʹߜKtEV6S}%#f׀1?A-*ZaJgMQtߩjqؾ>~i];v~X,ص<䤨K]Cyխ~Q.6,Ma:Qـjv>j,wyQ\7O7+qiO(]}R7"{*Gـ D0Yه,7F8ʻoǾN͐6zeBa2&εJ!"TSo,l]duƮ(iֵXVw~ɒـjt:; xd+&rO%Sډ>Ⰸ EYO 9F#­|^+[nq[ptR׭I$^=ޢ)05x{&^:܌"k[ـcyxL-9\X\qA.~EC>\*F|Śd.VG0%m|M-Ni_nv6KK71T깾s#īZAxw>EՎʌag\ z=sZ&lU62w` ƚ1t[ lZFT;H2՚k-b1cJFm{{W sa 6B4wbrۧ%D.Yh_Ў+22|w}f%_?Bq2כ{_#l'pT2c~"tZp%.aLīƁjjX8ݥ|x#j7жmۜZ%Occ=g9)2UjoX(m\W*tW/~fm= +p XW!W]8tvIp +Qԁ(ȴ@'Y[X$hK7+qГ"DAkR? "j{i߃ހ,$;\zC=!L4q\&S (p0j gL @U5hq5㣰Fg-#gIi{ -Xi"m'XgK,\/[ T"tۤ鞎;rlG8զ$]hf @Z5"x묤~.|:,g_gY`{{~gGl4DI ""7]Qnў<7 1CxI5[D4۔s,|8=00 6jK$}f 6-gt}}:G+~@էx*1=,޺crkFay c$;U3ZkGN[IVե.ES{' Ґ`ïP5ـڪMMM.?!!Adt&JcǙ[I:[we|yB 7/q|bZc8̔ۻm QT׷ё$gYŕ)yRgQ\Q.-JEzmү ـ./ݬ?޼۴bsO= 2tl TGJY9Q[?/i6A'u-(GLTS0/H;{Q^mDG,PeYUסYiHICـ./ݬT &ܸq#VXdM_Ss~~R(@tDi?ge.{J Mi![Q]ۀpd&#HzzE{RzBDYT֣GOJvfـ./ݬa= Kar/nvVt{}rKnxΨtgZ֭U#OsӷY6mj%[da}? C r-;;i /_x=e&7vتtjZP^]بHd/JՖzPVe93R~e-:ԅ~RkT:,FUq&TJ.X9@L ;˽Kǚz:=&2[`fh0gĄ U;Jɢ0=xeMJ>n f?KXa!D`zūavX*EG T#Pktl-]B|Tz59[wߗnVzTGi4{0N-ꌄjN|JșnHZ||onǕK,"8w,"MHk_\O];flg8ݱ #0~IĤhO2jDF;c+*yG:i7::ӧO ŋ $KǗnVzmZ\sqF^O7*u\0 ͹QxE?4ڒ-чZ|3]KѣGk&23[CW╷ެ|}73<=qu1{1npP'E!N,S*tvv ?22AcoV`rt]yG5r֩2>aKx-!zi1*(H=Xϳ/شܹZzI~6uN|>eȊDK;8 uczZ:,߼}[ #Tddd 5նyo͞|Εыf`I׹S78Xˬ](B=*kQIh[XX* ''GòAڴm Xf u qSv=ۭOԕٺ3 XueM˱"1>tWXk)_oF rx;J b.|g7>:l7zv'ߗ$"z8g&OO7+!{p=CchY œ@іh=>u34/):m835MپNU蝔cZLymۮеDU\[my^?цY"=rf\~ҊH>Y+uKpz/5ذ|6-VY/6$;+3cKrjCQE)VI!xpy~XwxLW=/!fL}U/rH 3#pTuurﮈ2Ҟ(uiF}s;"GJB,2P'%' qvt umgNi1sdtBwUtmgU$dĆ5NϠhgOdO2l|iڴi{MK/mt:-Jn*| &ox\.GDmkշ㍏į^dCQjZu]/IYSBjO/սx* ^olNc)/I'${@ɚ$U @@-kD"k^1񁢢"_oDn} /ɶU}EX(mZ0u/iË{-|YD}Cxqluq Y avѿ~heauǖtM*YO,J;རWF'OeZ }Oym<%|Lћ@n"=tV_󱂅SiS2ꞵ蒼(Us+(*m|ωjrK2ŵxYTu o_Uf}6-?YgU+F("$Pjs5yTM G[ftJnk2Ql9Ո@yPZ;1kmSJOxKt7ޒ^fgڀFnoD*^m$I$W&fTvQJT"sIVa T p4^yC?jB[F~_6JIUV`t/^Oװ0r-fR׫߶m({"|',_h3 kRa\SZyC6Nvc+q-1[*t%SvES U<Ӭ1,Gog~u 5P8cFB~fmTZ{ 9)|Mxp`k(IRֵalͷMK\iek5OW!AC4C#x'E!xzNqttaphDV򐒀M9cv-G{q@ꭣNp?^ImF^h3‚y%\ƒRcT_\;JU$5hcJ%@:Q }zlL;0,ڗ׷c,cW_[0/jđj$Dau~xc[7U ^@Rzծ)B]s'V#/#Ɇ]Lk)%|A_osgh<\d^FZmsCqN;Uz8>>H-^j(^_^HE]  =c ,J Qk;DxՆHMPa;^v\,+rm>d9|{*3XtxHibRlo\˦z %|\ϓzn3_oQs./;l-7aS TғbpE.ўtLīSnlf->F-t9/ [銊(l;wE"#f׀>Z݇-ǬWxam'b,͈ǎ z<>Ե!KTU 8ۺ 2ҹ$?*5ʈ )IzՖMYiޝ[JotMZ,1w>G[+Ü'>sSR@g.lT63UHMP+4v g`a*6w٪bOpa˵Ynʍq7c_I'fJl_YPf1[RkRloNNUc%(Hyzň_!?QX݊bR[u~'>|ctgvbKC&zlfqItN 4]>%џߐ;c#%M8Z\(O#7Q\\-UyHԡG>ZTN>7bϰ& N'{ C͚L"y]"4s~'/M!\^ۄSHIJ4hx~0DrTsNԠ KsRj u/~NmgjSnj, WCo.bM2\b%(mXzH[*:zPKhy$YFO`w}+~wmpe]3*jTMW)l]بpUӕ-]LŢa+/~ZͼὣeNe=;Ve#C%R ƚiORT]uHQOl6;mGSԊzDE 3-Y;{E{GEXz&"}{ghh[05O>Op*7_Z,(m̌?&kU_^h0$M҉=O5 d$`E.:p!~]bDUS;/IJl[[i7ީdޞ^ ڸu s&b[س rOuԱϟQ1Q?R_\a1eFӓmu!Aca瞋l^/jxHGaH[FΒ'pQ[,RDṈ2 >#GԩSXz5VXhכqrr͍8{O[toV#댍j~0%l}E_آV;6%~u(,AbLdk{Hvtd!.ZCtY-;Qib+/_gi鞠7F|??@Ifzіχ ?@s?W_Cf*u-(C*;=ECՖg ZTYuJKFJ-/U<(zi`7Ȋ\qVޜ#3З*m`׉6\b(GAr(taC%ޒNP;;JrX. 5gUx0EE##5A1]C*u!"ך0ܫڭ;zt6sn&g?[#jΤyf=vq2{ϞAv|酏¡d67vتtjZP^]بHdz#pW|"gBQVY.d"uh*96u\|9X&17~^k|>36!Ab2Z1g%Ii6|mq5[oO7woqHj܌'O+tνu'v\RKiyay7.ʲx0R ־Iݹ<=-ـ?ݢUKg7BEju Ho*hA[z&}CUf"=ٱsj3TOYݔP 06~(ie dW:x:D빣,\$ 7܀cǎ<VZ;.gw}+s޻<i#@'D(yrf imItۡ&M]YA @OGc (G`?膫 }mmGLC`[/݀G&0>1!93٪{\Z?o##87Wr^zs`>=xl2Uiv}-]1<<_WO < ZO;w5\#nw-7w'T_Y+Ea;2ީ666Q ERmm3犾iKWr<}>n:_|1sOCqz^+zokmذ:e˖;::p= 16@b!?9њ^4<1ܵdXʒ|W6; 4Iy:;_ڊyo=`#hХלE|J¹ -"$}=u'|RHַꫯbӦMMZZ`4Jp<4EKmm0;5_܊x?2~K |uXk/wp ] ~jRUx#sR sJxs)!^xk|>qRWzd$b-Zf|OZʁ'N<2B۶msAX9ʨb0c>]8pҡjbW\q~0l-\hS> <% 9YxCRTRz66www1//O~S=T'1J7YK[AkM =kMU7SP3BJ5&|N gT.OXG:~J{AUDC';:$z{\ tՔ{1DEE v0Ȝt !W>za^Sk#<"nʴmqy9@!_ApS:WHEZUf|B޼yVMPmii7 /j*Hkf>s >#Oޑu3R=3j :x]RvU kk-o'ڰ>+RGΌ3VR]wk͛7 Ⴎ˞F^'WLg 9;)VWWtÜ }vE7 fo(ݫ[޲8]ґ &bir_,${T ZӿS!wvv I-Z{GU{?яFR=bF*?`J|('l{ O:G{ɒ%N{΃X]lR·]y?hZ^:Z^kr?ЦCa^kz j 宠*=哀o ]@= =ӔpӋRqFÄ;tzL4h#J%v5iXp&)]ZO~"{:0pS[_x9 9*q<|jƴW(Hr$7͟$Mi5?nF*%O M>Vƃ3%%E3˦3֨BEubVVpG˚Zpz L7?qf #|j;+ߚ }.4% g? X4=|VZ~.͂o bOINi ,yy蒼(ri!PN>AkDr~GTw:p ?qsl){z0:D' [w4c3ҧp n[z6|?8W/'ymFXp&[V~%ٚ&&ut. wnI3iNə7S?#;9oq: <u$%ߦ83qVANT7|HCthM :퀘U<jz;ɛׯW6ƆA};%VWA}=LJ _zeUJOWTs{5 ī-ՈΎ?ޞ7y@q,+^‘>~SA {o5|Me6;ayF̧^ 1?!pe-ȿ&{Wy_|{K23} BtD>;C#Cq) QX֍EUb֫3;})-C#8RTNOG^F Ǻ-Z>C '?%*]EOampΪ,& XVt|+O?|JZdf>lEߒc6Xo,L #*,Xuhgj=gD_ھoH?Qь7.e+3lڿ[oLg5k֜+>@ |vetF!F]2y  X(5"|6-:?.X?C'f^@ fQ"[Uxy1fcdž|Dj~qm^g)||䥪~W^WSU-ORR+4v g"=FP5PEf~ZĿK7*X I5;U =!2TJڱb1!'Rk&(nraiMu^g)<>f1׻lfVƎ5 /8lzEf޼oǪD|NC%OU)SVߎ7Yo]duƮ(iֵX6T%g(3#2 sV[m閒7oJoߪc-|uBc*I={nyKo}ꩧ4(2m sblåM"|:W4tåbXWAB =xԶ59Xضvji(;ZؘYlEs'7J~@ZҌ%vt!UIF(;z4;[2H6mˬh["ms-Cvr,.[_؁GD?FF: c2շUXb3n >"sP7;JUTۧls({ff[%={" Zd/s+\M+5͝s\ T70,W4cḺ_SCc.d2ҥKϥ3n$}ǁ~ZWp[NT()dNxNU #1/BXJ NZ/DJ-H[opNTfbYB  l=ClƌJF`\`,fLtIw}/+ؽ;2[nl $۱z]3ͺB*o uxUڅC-Wkd 9NGpMX[L[=cp 7iUrÌԋ$໓漛{kT2K2gECwC^#%mU5hq5㣰Fg-#gIi{ -Xi"m9lJ}K|Gw׫t|=\^íy?;b2)i‰$EbEnȩFݢ=iynbS+0j4vi""47⛮c'}\oV$wPPPo]u3>W^ipe_&sR={= g{5%۹n͸J*oǽ5H’Tdh=8]n%YiW.ES{' ҐbjI j́=oN@}v +8^J篆|;w^=>mf[,)mriM-(E|t$2 A=(]-JALEqE-Z:(6cs 3f;z%^_?#<3kEGyD_}U\}FPYY) ʤ\ $yr8r)K7meF)c3#&*)ݽ(~Vz #cx,ʪٍd$XˡJ .bw}x?qw^{ uV7oټy3!:,{qiM#5`Nzř)u );L(j%|JM]6]46QHMFwԅ WGgWhg3D8px? ?0Q'SOz'o}Kd*/.#-Y.~|VFoZ+VjFXH0c"?8֎n1jj }CUf"WJҡі-%|hvu.>򗿠#!!{/ 'wuuᮻr`gQzЬ/^JsP}(oSZѳ[gs4>1)ړx8av1stl}|>_^gq3>L^:_uU\踯 "PgذaV|_zzd&f z%zPoMzv^'O5'TCiW~_ U:L@?**JœOdZ[7oϴK2BRo- Fv m}Q/F|EҚ=%?z(^/\?zxGؠ1z˖-^q[QL,^>TK{Hf AX&#]R+I[N{2Q{,% 8&_T[M55)ZM` ]x;uQ$˨V$kHz=doIw=7Mbx;oӱS8SB5ݦM<sp{:q &ֈ% K&1݁n55m~^^Ku`oD9F!I?$fKRdx3MfN> <g4o=:{zid-m%ٌODѕjkvG" U!O~,r$Ŭ1xs?M^=R>̕/M^y#DcD'k4M ls=K}ރGs|ވ/o;o|2d%ٔzQ#'AIw[૥@GF @ś),o:6&u2H3fWO-}pFuOăMOWeS{o>OEF8>ISn grB/_ ŧqIw%;xꩧg~W:Z$ʼnɮQHoP]#k  KIDAT=\|F233Kc39ig#zWtڑҍĪko|JbNYRp Q?^#Y MSzCu5Uv/#lvuEO0f'#CJw!@WJ 0I~*GwcKwOåW}ZEAVHp#0G:z,j0 R8:084"iެZ\ J"=zU;Gka0ANq ^N;9 籐x5Kǩ~d-W5HQ5R:?GTWg )6BuC#8RTNOG^FMAK~z z% O.Yy|-ѐxW^PKN 7v= abDrLMhԓ[u '*qŸleM MMMBg+~VVvkiٛ4kT]\]1+a:Uxy1fcdž|Dj~qm^g)Kw|䥪~W^WSU-SN w@~Hv=dF`ls}x$xMDrh}&εƁ5xP pպR6\*kRlP= E'Z>Xy|7.=3@_8VZ+0Vd!Lk;xT970,W4cḺ1̝w/ӵZCCC"_?4.F> n^J.3,R?"nM'pT2c~"tZpdbLG0DUS;/IJ6!v aݺL5QPPt̛g^ĮMid^y=cx7${ɬ HfFaEV!51 !4D=𪾵 N[.@r:`a%j: ?9:MXmkov80~VPMm U#Up߫SߢORU)Uc04 [k$׻ޱ7e@;>{ fX7L&GFFW YZHu+|XdkkuxB íbU/Ij>.?۬E,*sT(Z]~g1d>○cQRU~gg8r5rH Gud(`2L^\L6e5uq<ڏϖeaziϫT/I鿚hH~ݵ% /E|ч nS&sXqyՍ8l eKL[bk/rvY֖%^0?5z4Gz#rssXFt뤩\m;!ăxuƕ_^7k)UjTDL98ʯe6KdPUCɦLNأIcZ<) SbC:1_,vtNgJO_ߘ;"YϦ k~#eӒΒ;9(eZ֖S^4 M#3|y86Tuʨ~WNi7}YDz~!cӲIDoܼ1Gf$6m-wR,W޸1_\0璭L ~zئ3C]cع|?y@NT_2tʧ59,i<&뫙3ƍ[3$b!cTe3Y2 l9MT>ұ[qcH1#K'恰t~U~-S~#1y2nջU 5Zvِ?||[ M6l@>|!&?@D\3I+w2mcc?J)Qy2>ڳp8 7QB'T~oCi?>f^Gu_-0hB8 7AG?>w1z' ~)R7Dž) PM\/Fѻ ?v hjO=2o.x(v!aۆZsm<0Vz xI%}MFln Q|> 8: fbzES.oPXeI9 &(._Ƀ7݄ymBf+++,%9<Ӫg9H ~Ɔ `o g'$b*A!jUt<1|T̡̡ʜA ?A*T*FNCF =b| *fQCD+)U7&e⎰ x-v }?@!32v3gX0ōnU|TCw |3z;x_P!qkT.n 9tjȘgsxEy\b,P(>iZQ.3*Z+,P(r`:Pk0W Yт;3 >G.Jl~8F6jG?5샇Tʘ#>O8x*%bkXVCTa]! P@dtP)H,]  ~H3TA 0cWE^gU & /H?ڋ`2%ʱW"ގmVDpLH"BGL s @DP#hI$@|  ~DM3IH1@$@!@i& P9HH "(q4$  > D?"$@$@  G4H($@$8f cH"BGL s @Dg=j5IENDB`docker-1.10.3/docs/userguide/networking/images/overlay-network-final.svg000066400000000000000000001512371267010174400264160ustar00rootroot00000000000000HostHostHostKey-valuestoredocker-1.10.3/docs/userguide/networking/images/overlay_network.gliffy000066400000000000000000000412671267010174400260730ustar00rootroot00000000000000{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#ffffff","width":361,"height":291,"nodeIndex":195,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":23.000000000000057,"y":8.18899694824222},"max":{"x":360.00000000000006,"y":290.6999969482422}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":194.0,"y":200.1999969482422,"rotation":0.0,"id":193,"width":47.0,"height":77.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":41,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":2,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[10.0,-6.0],[47.0,77.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":64.0,"y":272.6999969482422,"rotation":0.0,"id":179,"width":247.0,"height":24.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":5,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker network create -d overlay

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":23.000000000000057,"y":81.00000000000378,"rotation":180.0,"id":175,"width":337.0,"height":181.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":25,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#929292","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":52.0,"y":8.18899694824222,"rotation":0.0,"id":178,"width":274.0,"height":205.01099999999997,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":26,"lockAspectRatio":false,"lockShape":false,"children":[{"x":25.999999999999996,"y":110.19640369599998,"rotation":0.0,"id":173,"width":20.88802989941042,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.svg","order":23,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Svg","Svg":{"embeddedResourceId":0,"strokeWidth":2.0,"strokeColor":"#000000","dropShadow":true,"shadowX":5.0,"shadowY":5.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":121.00000000000003,"y":147.19640369599998,"rotation":0.0,"id":172,"width":20.88802989941042,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.svg","order":22,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Svg","Svg":{"embeddedResourceId":0,"strokeWidth":2.0,"strokeColor":"#000000","dropShadow":true,"shadowX":5.0,"shadowY":5.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":222.0,"y":114.19640369599998,"rotation":0.0,"id":171,"width":20.88802989941042,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.svg","order":21,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Svg","Svg":{"embeddedResourceId":0,"strokeWidth":2.0,"strokeColor":"#000000","dropShadow":true,"shadowX":5.0,"shadowY":5.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":135.0,"y":39.01099999999997,"rotation":0.0,"id":169,"width":86.0,"height":50.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":20,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":160,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":134,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.3867377051204244,-0.010999999999967258],[-90.5625488663018,51.31999999999999]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":140.0,"y":34.01099999999997,"rotation":0.0,"id":168,"width":4.0,"height":91.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":19,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":160,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":155,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-2.6132622948795756,4.989000000000033],[-0.5625488663017961,93.32]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":170.0,"y":22.010999999999967,"rotation":0.0,"id":165,"width":72.0,"height":73.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":18,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":160,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":158,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-32.613262294879576,16.989000000000033],[70.43745113369818,74.15999999999997]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":113.0,"y":0.0,"rotation":0.0,"id":160,"width":48.773475410240856,"height":39.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.relational_database","order":15,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.relational_database","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#02709F","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":163,"width":88.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Key-value store

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":null},{"x":196.0,"y":96.17099999999994,"rotation":0.0,"id":156,"width":78.0,"height":77.68,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":12,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":63.68000000000001,"rotation":0.0,"id":157,"width":78.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":14,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":23.0,"y":0.0,"rotation":0.0,"id":158,"width":42.8749022673964,"height":60.000000000000014,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":11,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":95.0,"y":127.33099999999996,"rotation":0.0,"id":153,"width":78.0,"height":77.68,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":7,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":63.68000000000001,"rotation":0.0,"id":154,"width":78.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":23.0,"y":0.0,"rotation":0.0,"id":155,"width":42.8749022673964,"height":60.000000000000014,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":6,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":null},{"x":0.0,"y":90.33099999999996,"rotation":0.0,"id":152,"width":78.0,"height":77.68,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":4,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":63.68000000000001,"rotation":0.0,"id":142,"width":78.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[],"hidden":false,"layerId":null},{"x":23.0,"y":0.0,"rotation":0.0,"id":134,"width":42.8749022673964,"height":60.000000000000014,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":1,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":null}],"hidden":false,"layerId":null}],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":43}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#999999","strokeWidth":2,"endArrow":2}},"textStyles":{"global":{"bold":true,"face":"Courier"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.custom.confluence.c20f4a380e3cee362007f9e62694d34d947f28ed4263c0702b3dd72d9801532a"],"lastSerialized":1445556181238},"embeddedResources":{"index":1,"resources":[{"id":0,"mimeType":"image/svg+xml","data":"\n\n \n logo copy\n Created with Sketch.\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n","width":59.29392246992643,"height":42.185403696,"x":0.4429050300735753,"y":0.7077644040000006}]}}docker-1.10.3/docs/userguide/networking/images/overlay_network.png000066400000000000000000000553541267010174400254010ustar00rootroot00000000000000PNG  IHDR|6b$) IDATx^TE 99 Q0 Ei5qW[A]]"DQA"f ar! ޱv[ꭺozԩRѤ("(g|kE@0(@PE KPϒf*"c@P,A@ ?K:Z(E@P%,hm~+}[)GrrrE PϪn(%w={e=Rmn!v]nٽw+NT#y+IJҪnMiX4]kXT\IT"T׉!sMֵL ?<3|cl޹Kf,_,+9k6˪Ųf)]P#+o^_SDoPԬ*mkI +B@ ?U{=#lvYHFX&_.[kH~W^@gr~vg-JSR~J`חzA`"rG۪^M9{+g԰Ԭػ`E/BRɄOu[}/ߏkVɕ u(RPC@ ?0hDj;~;gE"$Y䝿u hԨNR߫/SlP·AI$ U[QK&~)7Y/0j!~!rዓe*2p[O- %|+4S죓CJ+}}˓;_Hj9 x6Y~<8qL^\K*aͪ/Idž' w}=JXi$ ?4q\tX{eiנ<0),gleB1hR†~z$3c+wSfl}1 c"YwӗɈo;M ?u;mu '}roع|p1c+ HNǍEf߁$%PtVa*>W\{tǽhWw2$%uKfehVfSvNfyE=C52(p~dmDvk)[03Vm0>Dt#Ѡ¯[CZկi[7zժ iKF ߏ2@@ ?T̸p#Ȃթ!wh"[6 kKU'E;s1pU%sJJҰf5sJZ+m=YI|t\l J'(u}gT~E@5pI~ͪRjr+ pϾRO=xݶS8 ɯ߾3^R:}0`X@mVW-5qPK%|Ԣr(JL!aQO%0NM ? ;]4%|4"JO~@jQ.ZB@-ϴo~w6 (ᇸsjJd!,=V([P 6}((,/I ;wJ:uOի˒%K^zҤIT G"(gffm+w~wg+~͚5ժUf͚ɺu>|9S$''XU(mM7l +WVZIݺuo5V{޽ c޽>`c;~׾}{UKpm ez{3ؾ[oU:v(Ç7o^y믿mR}~ƌF?ägϞ?o~O|7<ڵ3;\l 0Ɂ!$2f/P =_Hj9rWJ߾} CwqKNAB^blݺUz%7n9sH?oW_}%-Z0P{1o+kז_xʜtI3ϔѣeժUr-[o%3gΔ?R\\l?_ ٶm)wr pB9s w}2diРԨQì< f& zg~G7Ї~hSO=Lk3)tMr=2dz>+}3w\|ԅџgN/$3rH޽{V9PO^J:Bf͛77 ㏆ܜ {^z'w}!zHx@ˑg &O,ׯ7Z'e5k%\bo¿  2fq 'l$B ᅲ?rm_|o&qIʕjԉ URϤE;;S>#3hԨQV3PO y]!'x$_WC$Hl,~ظLB,$q12 V8ϑc gIyBorlٲEFa6li_tEFRoذ ̞=۸m&L`$nˑt^}U#oSQQfѣ7dOX '|Y5~!m^:":CӦM3O/Ґد_?Cґ 3č_ G1?5 2իLw3 "Nِ>L$W2rӦM%䄵 L76zͯ9ŊO>1+N;MD>lSݨQ #L8g,$¼w-_L 8堽CF0z" k52Ë{eEL6#OR&{ XEgo7|SƎ+&M2{>  o~r 9= A"7J~#Ez37'Z!@4t?OFf-Cd:wlr?oڴ;$ ob=&-dC&zǍllڲ A3@C5MX^+~Oܲ^GBfa?ᮻ2 7`I cHwaeijX Q74)~#7Y\RG.],e2e}# Jb`sFgII#2!`d6sIH1+b6JYEty fO†=PIF* rwE]pdc߳7@iV$S*~HO>pԽ3? Zt@AM]vtXx3lĦ( > *Xq\ρ-rOVꆵ1NkRB@ ?(d\W hwF6OqyĺZ"dd5dEw93Z<^7X6ip6 -#^/.XhH45 *!ĤB!xVX뎷3VM@POY^,lnv!>!|d$,|`AtB}*mӦq e2p}(g_xرlT2s,}w5}Љ6r §fJ=/]|s)m`&LOԠAtk7('찿 +@s|S޴[h q$+t}F na{J7v%~J4;;# at2@ w\7 jxP9gFfU>B.ܻ6NXN0c+V v"/cw+K(gnߺ uf$ay S&hqO.bc}ɒ%F!8<8ۤoUFt=V!6<L#;.S4X1r/}„ &!|VuѓF({l%Nƒ%D6בkFGL%2Np 6٬f@jtINxBԛh~̰T؝&E@LLL⿙4t}.X#.LX&@`"@8+OPKjmpV:t0nٺ*wHCgniāQK>}>H",ڵkpޝd,I7jM"B*dC¢5)">C3"|믿N*ѧ'j|&V uC5X)3T1`Gb~0bLq(1YBbJM: O;.c|2ل Jq|zxyKC|tbpB޽{1&rpwX)gсpmڴ12iE|čP?53.5'Nu3.?B X|t":KL p ~1 iFDD&c{ps S;+3cBS~H?Toz OmI+d}H ٯ]ﯨfGs7ȄUw1 :V38,@&havޜzFXH?4Nm SvgH/#]xTsiרqxjJՌeOY%0i >?8d'7@.+ |·Ma {p-*NX*[V,UǷUs ٗ-|jƲ?k";c!p wˬd $}"E$gˏRZHݟ1rfY a*rRRzn1lذ~ ]_0=^8o|S ~ʺNƼ9rƛI+`/ %ؐy%U-9+IԮKv˕] Ûʈ˼ѥiMv2ov9bOy%}!T)%|BOeZemzfըW#OF+׾Z6 iQ]>aE7a֋[.bpju7db/Jfw2r iXJN >N*RJ ߋ Z:*^96 7 /0XѮR;Ӿʕrq*mgl߹צ[#OޛY*)Kw}ׄV8(زK:n!\ǵ5G򒺷mΣ<ɏݲ-d~^I?)#QFeMBAIezwZyd⸪PfsF'q=C7 jk.'p9nL$ R=A`2ǭ%q mk:S-i} y6L'ɓ4UKS·ꮴȤݴxV(V\>y Z1?>i=dz&XaryG4}=.&66_.8D>QV[+%|/4W5|wrؽWf.Z+%lΓң|`co&F5gOSG/m-x\~);P`wÛʞRygF7a|aDu1߭8,Wgt4nmJP{ʠ'*̼mG,]ݪ9:n%xœpxgZyYRI i&2A.xLR 3'l=*{Wά~ݣ{Zvzv`-iϮ5nvƺ[R[_yR C[dXI#|a·S5{I CnؤCz5bRgyƦr5ׄJ/oѰiDtK]$RkNhm^}F';ꢓ ioUwC }vA':4yC ҉ލN7W%GD8ب{=U~P))o{8 09ٸqѲ)ߗ.!U*\Dh[_ܭmݻM,s^zIݺuSY|wP%Aa+jw-ݺӰ묮ᤶ& Z:@^|6o ·հ^$eų~u "ws89]TT${lZ\_)Ihky6)vWk;Iz"s'bߓn-w ntWnnՂAoڴJ"Nm#֮wρO"|vhcyi*"%x~jc󲞲w߁{Zv.^ T U2|P6MXD!:qU ?q,!$l#|wz衉wT'|[*9^fp?8z{٦-8#df&)۠dG5|;bܴ]=*pȫz}.fwz[t{-eyK n 𹯖{kcTS>k3ɩrtoYK>IRn;皭gfJ6u_u711S·*jV0I5ϝi2e&y{zaٞO+6!(-t"u0''xut%3Zked‡5a.]h΋SVʥǴ43\Ԥ<e/&碈o VO ?ޞ;9Ft %k{5j>=[ ݶF{ʄ MQ kҿc=;HmrW֭Zz7y'-ʷg={<֭p&oiUm J:a߰u,HIǵ*]Mj $rMWFHri;m4:Wx{Zwm <CAA,],?Dރiw('Raƍ¯_ԫ%h+^J=-w~iظ m.HX獘&w%|+l3VCWU,ٖtO+`LoLEm}ӑ}U/ظZ [KsHͪ:M/_ZVZe-NvuVi=F-7}K ߲pX׬Ѿ!{.UvK^{SwClΙ3G.]*ݻwΝ;?9 60l`m +[S8]{JYV# S·*jV0̤{ZahТq?]6ʷ-F-asX;~~Uo@Ii'BN 8Kuf{O1wJ{dˎYqgŅJɞ]wnɭRU*ߴhFeݲn[lskZRr'օ{ZŖTұmov8![7~Cgmj^UrE2n:9c}Cv/)=`ϣ,YbϖeIm)+eMv/)WUIѮᒏlHX/rcF~P;ə_3Or+W̯ȓ7[s ?xugUHO!~CXAz!%OeGdƏ[m3ש*D>#tO09(sFmdt {JJ=WFm`]A!!% PNXFL\Yn#46߯g-;PsͶuO+# NϴiJrr߶[BZgi":3dzs?Jaٷn ?v=-!hK~ Tç:Qf]x.|P xrڳϐ_/+y.£KF5IJ'Y_l'||?YiyH=-[{3p·%țT)?e3mVytRRsWJjV5d_.ՌeO $dȤ`WpVV\پk؄_3O9SV?ҎmO}V mh&x]/Wy~!VlkidwXK²wF|Æޟ:&}`OQcm%ϓ tmL B"|^:}ԕy]ޢ|6A^: -5_x'oqk߹,oK6>R^7ޮ,XS)W2QF:1൶dٰ{ }cfZ%F^~Gs2D{JKiRfIE2y_Ⱥ<yG6+ kj^ٷq|6:1xM\Zvm{AB:СCK?)օnU }DO+H:Udپso\<{3 ]e %~|l/@ פE !%|^b= 9)[j·ѨPXu^= D{wpikv2sR%J7~ٯ &')J%A(I!|uz6rӶKZ2kmʷK6Iԥ=dR]һebM*'/! ʤTOl{Jk|-io{Ѐv\x:UMNM•I2gcSl7cYluuǐ&""w}nҏ֓-xg 47pr0!I{jKtSDTڻ/cKنiذ\xnx{z]yFa&|b@d +}=.&6N$CeE}~-,fT&kOW}|c3G:1tmV汲<;tmcݰia%|,9첧,řÊ}yXghdך]N?%ټ}d6'r8(,)CLl;,_wzY! SBE~Ǒ2( ʯ.Y7vJ"ٰa lT_j$ݝp&xgZYPXTrv*M % 0c ;`gS$|ƭGkF~_FꪚWB|sUA xf8,{crԨQ[ RF,qݽ=JΰaäQFnxw.3ZÊ*- %G0>ʶd]"xqKWxk\X gv\Òl 7k,RFT6&4y!}-tT$4]%KuY)G$r_^JJFA3D8vgeAK Wf3=$7wi$Vð E[Ԗ^@m,/MY%B-GFqw ;lQr(oOWsh*[ב,D6 SOuo R(`"EڡJvFH!|6XɊI6MVI`" 6*d+ZWYvq_=_Un`_;ԩAkN𩈗e EeVX7!8f6mAV\wƟ{dk~<#σWL^mY_Pqs*P>+3kzNN3ҽe-|&9{V'(Zu\L|W݅1 %p>X] e7Θ1c6)LCCc!CRcӡ'1>h6┕r1-e-r+sSL }o d7Q+04gfʂ9xwhTCjTo=ǂ q"|*?l಻Md ʕW^)~TT7tt]ڸf5k|q=iE#{Kerr kҿc=;Nubr`g߽1n{n#jwɾ2x3S9"Ur)#gl9N(#|Ciz!d&sϕӧ@L}ÇUi;z>*O>8l%_>yMi6mIھMi#*x @ )8-SxI&M?6~Nw={oQv)<{Œ/Yg%8G1!=SfE}:u2{ "SywU%C. ;A:;I "ɖrBM>OD%h"9c+#|o]zAydرrJk/X=>iȑʿuꫯ ;XhW6c^[D;h4eojvΖTbx!}dτ- 9fS~'7Iꫯ6Ko,$ѣ'0I ~S;O}QSW^yEظUc-|Xc,#3dϝnɫuw=dnʘ`iA )mU4MiӦI?[Φ-39sΦZ>z/lt|:J.RCMnޯ) 7x~S`uzG+|~Ux·Vkm̙C8%mƠa{Dž7N7/4SO=>["sX8_7jgK-|&qFڸqO:O",l.Dw3)DwNd*..6[ƁݻHȭZ嬾MUް]H\'Gwo#whJkd5 L M0}Z~ l5 j4DFfحܴ `:2r%)ٻ?"arnA,;1{-wwxaf5ɲwꞶ~$&jHXphIw*&{*w܃hk eMwV~7I'ݼ&d~AF .?d?9EGY\~z~xa8b#zMapU猰HɺLJkn^:aZ1U XgCU6$# i8?@IDg+)]%z') aK!xnyTGZtȯ䭎Zc3XtzۋJ*j!gk ع#!hIo#Ӧ+ZhYPX,v};KCpW-,ᔯs~$tC~o"uz}z?X6O?Cui|nލ/[o6]E4Wյl ߦ36+~DAl=ka}xSSR*(t- Rv{yZ(}ڊԩj"wvjZӵ\fONM6*{ .Aid߱}`fp*ˑ t݌N *=~zsx2ZR$&@CjW4:=~3J #iF-WQ/7ߴ ͫo%|L #`>d*wNgiۅ*>@E@_w @ /0kqTcIvwszƲ"4l~hS[.!#Rkmbd~?iۦAuٝIOdIەwОQQ][D;+o%|Ruh <2a:3W"b@Ky|²kp-}êm*Y|mV:t 999q~暌w~^pF{p9m)qw쿜k#WR>V^̚5KfϞ-;v>}Hi?OܫgyƵl߰5c"0+ٕKOfָXbL:U'viqsösz)~\>3(%wC(cssڂ [n>5QFYŹf{cnN#ۚ2ŋ+ZjOS'V_~kLϠGۺf2pCw#L>].]*={4V~nhțVCg+5w~CS,#5e7V~~~)KzVGvúh6}oRyl7n5.~96liӦIaa#׋!O`c\_퇭"U4~lqK8 s=+d$зo_>|x\/fuwqGҙ3gywV!`w} :t8ߢEC C4:.;wGyD^_:KD1bCzꩧ̊`ɒ%ҩS'{ά %V:XL DIDAT0;wփXIRQ^ ߷ail?-ZH9ʈR=~r {'cǎ+W76_|o.իy殻f߁[Qڳg~A,TBX~}Vx zgLDT~l`%,|,}M!ccCx,5k֔g}XlaCh#G+3*3x^իWI-9k H qd ȶjQ ?/V6i7Iꫯ6.~m>zhaU6+TΓG}Xȯ sk޼yfp 88`f*3f&-}.kJ pm ^N@W(Ϧ-2+W.lb#ь?$_6:>V=UW]%^z!yӦMr 7XU8C`IH1sUQ #V#/u?jdxh܃ 2V=$f.=2ې!C~JIAh&x&VX5虞`ul㇏ #ֹ{H|x {&~Rb@guP~R<+ gp N} //Iuhe4EOݻ]K˵!ˀwNd*..6[޻MXRTI:ecl [ 8jncsgrGK #>MW 4CbFVÜN5)!oS\7be>"}˫"!+Bx-9Do*|ݕ{S 9mc먕!+&l[E0!wα@6/˷nB6<˪Sxtc'CVLbyk mkk׮IidwJ}JWAQQĂ8u6ph NVR&)Xm~8z饇UY'X^;|baq#mkBlģXm''?W,!~=m&VR\\ Q,|,jF,aMXzxr\'!~5Nl_Q>,y|!z%DY%p~6rVSf yK.F)3ZK!cnx <$+% ?Uvl+?.wH VʉdP~G8PbScc14Mz-RyGM3///fŽ Oa~zx IkXu5M4V/I z~;2]?$vL=U 2x%$v*'vIhÆ 6`8A,dnb8j x<.+ǃru+ٳ)J,xx _?FJ)!| t)/-_ W׋?ҲCSi%ujFn:˧ז?G}t-\J)J)?<:>5 *% TԔ~@&M;w޽{ ]doqC@ $KdM:#t. +;v"r&?d%|[C!Cf}Glԣ5rNIIILYG %xH_=xBԁQBN<, 3!|~Ʋ߷VC%Xz "@zubp]lC!xI=êWͫxB@-|Op.3#%#PkZJBzO'V~VC@ ?>mx劚@z`+gΜitz ƣ ӧˢE{F,&M@(jez@?oY~=o#);i…2uTU5kf_)ͩx@@ Xaʊ^шM]zb,=7kW䅳e6mիtxhRF@ oDX䃬%Eט?# }h(@=ƍ+MM(h C~Dk׮Y5ODK>ڡ)/] m}d7oΝ;;?PHDd&p]v'zӏTvmsXm۶5V>ehRD@ O4CP#`YQ3PV3$( T~<7HAA򋋋o߾ҡCs(K"J~!r"~ cBlDZ!x|ޑ|}4w\#AT|~fkY$H LnuCZs6!y~Ƶ͐@_g#A,B62:Jԙ~4%!y~mG?A,?P2#c"*-[4<|*I># ~A Z&??<ؕ, xtxq%Mֻg͚5E==ͯ:|Eg"g&%wPT >V>gϖN:X5jZWP _8>⃚ ؝C^Xs׆˗+D|6mT6yIl-EFUOʄF=|l;vX ,8F,޹sl߾]X hRb!C9A5kY^zfDZj͛%ZT#+.@܈Ċ+L M]%|Jn(!WRxq;o(g .󟒟o`r>a@f}M$Jく?|2d3`n/o&{>O>|\`qeUvmsj>0W/| 2H? >\8J?~x/)>9iygێN*-["6G9#W_5V;<^{|ᇦ~OM6ɧ~*?#gy4h+>!z_Qȑ#+ćpu駟.w,YD>c9c"vPnݺu;vs9_]}¤{رa5D}(/7\y)(`owZ}n~1K/ܙ8'cArWg-  :t?0pu뭷5sԨQҩS'9~Gw裏b9szr%F=S_MP· }74 '|?l mJB?Asϕ-‡p/"e/yꩧ}=H|l2@xG,PozMm壢n{>Huf;&c .|MT|lz|%ziEL}]3q]z饦NL_4였&Nhe"c"SfLZʴ ӧ >/CfLd^31sss-FYK&%6;VN1K}0 -Fc=&\ ŋ1{믿n& .>o>ęp1yV\)cMPw ˅X%X+ݻw7!foX!*ψ#)nv7HiI ϑ֬Ycn31R ̚5|<njc6A`||PwmzI{4| |Yq`6lfD'V[~2͘0u]g.~~b]R;CH!|&J[RRg7&=g` Ś!}$oԁImrJ,h}M -Znf2wÇ:za"§NLNb8+if|R|^G@ ߒ۴icrcb 9,hڶmk%ȑߐ> ^zɐ;ՏҨQ#3Q` `y~T"?A)~@D H6;B όu3A rdB4xaa: Xt bR4 u,wV`$bQ>Hf;s<#<2/_Wށ$VPX'Vd$&$&'1;VYLhc,|w20jU>+L)VuԑpqJ//r7ٌlϣ2 [d aÆix`c}@,; !v +` }tfd) QR>?"Bj7=r710!L*r:,A\X`<>+l;8ꪫ̳lذ+K5&mz cXnOM41a h/~ nNBj r2;ZB cX} -1c cg g5>t§'GbÐq4| 7eƌ۬HΤķêYS|([^zI'#2a a-MS3("I@Xw>&r A $ʇy9gP܈y'dEHV rJYLhlđ ;ȅĻh3dUcCH#AУ\r "Z❔L +4ʍ $&`ʢXddS2Y'>L9vXlʯj,cY,]&7|bYcL2v,|7sdšgHVyc87vA` 80g^_/es^%|2+6HƑ^u&*RMFbF]+j'R+_,6O> ]DyI˭?O'2b +Rđ;1*FYyxO3J:E @x!%p%f~ݥ?v" \@ӦM~cfSE %| "d JYLE@Pu ("%(gIGk3E@P1(@ %TE@ _ǀ"(Y~t6SP%|"d JYLE@Pu ("%(gIGk3E@P1(@ %TE@ _ǀ"(Y~t6SP%|"d fIENDB`docker-1.10.3/docs/userguide/networking/images/overlay_network.svg000066400000000000000000001307341267010174400254100ustar00rootroot00000000000000HostHostHostKey-valuestoredockernetworkcreate-doverlaydocker-1.10.3/docs/userguide/networking/images/working.gliffy000066400000000000000000000453751267010174400243250ustar00rootroot00000000000000{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#ffffff","width":376,"height":241,"nodeIndex":152,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":1,"y":5.1999969482421875},"max":{"x":375.38636363636374,"y":240.14285409109937}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":85.0,"y":50.0,"rotation":0.0,"id":150,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":60,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":134,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[3.1159999999999997,6.359996948242184],[180.558,6.359996948242184],[180.558,67.0],[180.0,67.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":196.0,"y":100.69999694824219,"rotation":0.0,"id":140,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":56,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"


","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":15.0,"y":5.1999969482421875,"rotation":0.0,"id":134,"width":73.116,"height":102.32,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":54,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":53.0,"y":57.19999694824219,"rotation":0.0,"id":136,"width":119.0,"height":45.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":55,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":134,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[35.116,-0.8400000000000034],[89.0,-0.8400000000000034],[89.0,57.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":5.0,"y":116.19999694824219,"rotation":0.0,"id":142,"width":78.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":57,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":113.38636363636374,"y":116.14285409109937,"rotation":0.0,"id":129,"width":262.0,"height":124.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#929292","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":15.386363636363683,"y":113.14285409109937,"rotation":0.0,"id":146,"width":233.0,"height":127.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#929292","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":106.0,"y":175.96785409109907,"rotation":0.0,"id":114,"width":150.0,"height":54.732142857143145,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":18,"lockAspectRatio":false,"lockShape":false,"children":[{"x":44.0,"y":2.7321428571431454,"rotation":0.0,"id":95,"width":62.0,"height":33.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":6,"lockAspectRatio":false,"lockShape":false,"children":[{"x":29.139999999999997,"y":2.94642857142857,"rotation":0.0,"id":96,"width":3.719999999999998,"height":27.107142857142843,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":15,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":99,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":99,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.8600000000000136,-1.1785714285714448],[1.8600000000000136,28.285714285714278]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":51.46,"y":2.94642857142857,"rotation":0.0,"id":97,"width":1.2156862745098034,"height":28.285714285714267,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.4193795664340882,-1.178571428571729],[-1.4193795664340882,28.28571428571442]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.919999999999993,"y":1.3749999999999987,"rotation":0.0,"id":98,"width":1.239999999999999,"height":28.285714285714267,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.0393795664339223,0.3928571428572809],[2.0393795664339223,29.85714285714272]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.7678571428571417,"rotation":0.0,"id":99,"width":62.0,"height":29.46428571428572,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":4,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":40.732142857143145,"rotation":0.0,"id":112,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":217.0,"y":177.96785409109907,"rotation":0.0,"id":115,"width":150.0,"height":54.732142857143145,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":35,"lockAspectRatio":false,"lockShape":false,"children":[{"x":44.0,"y":2.7321428571431454,"rotation":0.0,"id":116,"width":62.0,"height":33.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":23,"lockAspectRatio":false,"lockShape":false,"children":[{"x":29.139999999999997,"y":2.94642857142857,"rotation":0.0,"id":117,"width":3.719999999999998,"height":27.107142857142843,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":32,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":120,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":120,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.8600000000000136,-1.1785714285714448],[1.8600000000000136,28.285714285714278]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":51.46,"y":2.94642857142857,"rotation":0.0,"id":118,"width":1.2156862745098034,"height":28.285714285714267,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.4193795664340882,-1.178571428571729],[-1.4193795664340882,28.28571428571442]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.919999999999993,"y":1.3749999999999987,"rotation":0.0,"id":119,"width":1.239999999999999,"height":28.285714285714267,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":26,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.0393795664339223,0.3928571428572809],[2.0393795664339223,29.85714285714272]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.7678571428571417,"rotation":0.0,"id":120,"width":62.0,"height":29.46428571428572,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":21,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":40.732142857143145,"rotation":0.0,"id":121,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container3

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":-1.0,"y":175.96785409109907,"rotation":0.0,"id":122,"width":150.0,"height":54.732142857143145,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":52,"lockAspectRatio":false,"lockShape":false,"children":[{"x":44.0,"y":2.7321428571431454,"rotation":0.0,"id":123,"width":62.0,"height":33.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":40,"lockAspectRatio":false,"lockShape":false,"children":[{"x":29.139999999999997,"y":2.94642857142857,"rotation":0.0,"id":124,"width":3.719999999999998,"height":27.107142857142843,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":49,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":127,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":127,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.8599999999999994,-1.1785714285714448],[1.8599999999999994,28.285714285714278]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":51.46,"y":2.94642857142857,"rotation":0.0,"id":125,"width":1.2156862745098034,"height":28.285714285714267,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":46,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.4193795664340882,-1.178571428571729],[-1.4193795664340882,28.28571428571442]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.919999999999993,"y":1.3749999999999987,"rotation":0.0,"id":126,"width":1.239999999999999,"height":28.285714285714267,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":43,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.0393795664339223,0.3928571428572809],[2.0393795664339223,29.85714285714272]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.7678571428571417,"rotation":0.0,"id":127,"width":62.0,"height":29.46428571428572,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":40.732142857143145,"rotation":0.0,"id":128,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":51,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":185.0,"y":143.1999969482422,"rotation":0.0,"id":130,"width":150.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":53,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

isolated_nw

 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":55.0,"y":139.1999969482422,"rotation":0.0,"id":147,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":58,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

bridge 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":62}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#999999","strokeWidth":6}},"textStyles":{"global":{"bold":true}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":[],"lastSerialized":1446315118663,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}}docker-1.10.3/docs/userguide/networking/images/working.png000066400000000000000000000436171267010174400236260ustar00rootroot00000000000000PNG  IHDR IDATx^]xŵ>lm&ٖeJ L6b @B5Bh' L34*[,ɲnKz?ʘ{wfwg=çOF;;?)3QWWWGF` 2,x|0#`P!F`ɂ#0@EmQf͜`FgEVW+7{AhNmU<'A4a`OBFH!׾[GM/fE<;I=rZCgl.`T"R{k鎷K+=aHV %#BWӇ׎?\(#0@ ed(A׽o$tXa;iO/mjQ=L^IeT `,ލ-8`<)/q;'w=ӊlWda2~`c)33v֍HGyqw}H|L`F 8" 9% 6CtsF1-/iXQǖsTT=:t05^Mk,,I~`c FLqRbð620Y0Y=>F!LL!dF& & 0!DY 7n@iqWe;7̤zߺP(,A1@0Bzk'̌ :awŤhN^djO*-21YÑs3@z `,FjCWדʪwB7nFg?cB<13=A1mٱU]ڕ/-bp$0QGY`//,Np|.~k+8aP61ѻI8v'1YAet@Y\4d*[O9"淵$ g& =ު)E: t##CɂF -``HΝdw0Y0YA6#LLi1й#& & w#f@(Y\YJqǝd!`,.;;q*)}#z ]ܗ=.Lu|j(Y80邀׆tr/*A5k׼86'>o\7ZQj]õ:Hە^|܇;ی#MEn&?Sr]K~R IcUNz5LF ! )]jt[;^;a"Ü0n0Fn}+d\F``Ê5 9F }``H=e0Y0Y8<"#LL3ڹ#& & ǃ_dAɂ"}F;`#ddx#>0Y0Yh2c,<$Zڹs'ݻױRbFFeffIEN5dωHdqF7omڴ)T/++6lH 4F_N(7VڴiSjٲ%5oޜ4i"d&%|iCX|9}״ccj4rgSZٳ'hB,81@ɂɂFmP>} Sz#dd_>)4e <ڶmf4+LLi pFK#G~ NdrJoh˖-ap~bp{uuuIව9jذa"؁Sz" 5G/2yD9䮪&*k.ڽ{ſkjj(dѡC޽ ND2bAB Y>vΎr]#dfZ:)./^LK.m۶К; K.)n)Wڰa͟?֬Ys , iԨQM;3Y0YX]hEEE"؄~ѐ!CB:@={9S[nwg7CɂB91̚5KL 6njëM%P\\L_|m߾}Fo^-!)``0E͘1&"eƎ+vr]`qbŊ:ԬY3:C(LLZǗ_~ynt&FM[*38ΝKsٯ7}& & Qj*9s\!:dѦMr8SY Elo߾)``?;;FA:u*3%Kc| vX8:4ր*//~JJJСC._NA":4& & dSd21YDF:dd5,`L&&ȈXGEv˃ddk:ha#;Sq=:4N͌Pu}8La("B L>Ũ6{:ھ+ad:xdeh܀Kg=cB<9 ]K;ky6ߋ5",tFpt0YDGzb,FjC݃n{kx^4[K:ۂ,NމB*m275̠_bPdB(Zϙ,%O1Fϥ+^\H[MSȡm-C:oK{j?. :)Z5mdHd!aQTm1Yϳ,<63YR,)mkZB db(uɂBk01YhLL0Y0Yh && -""24& & dSd21YDF:dd5,`L&&ȈXG$nmLHA("R La{){},YLщnz})uϢz^=aoJdYޣ5=&9t*+3ɏ}? d n5jA lK_O>#b0Ʌ&pV! $8gkc ݘ,:6~]ZXZd@ؐ EAL4B^8^.͚k^0d)F&8,,YE0V1Y0Yh?6CiLL0Y0Yh && -""24#dѸaL&]5ug}nϢI,jդjDݶf_ylb3ₘ,B,O{]̌ :AjlGҸ [G{jUe]/7wRS,k=U2Y k,pS\B|ݤL7oS^,Y(ՙJڼ&5yS3QMdIBte,0_.Kcz$eq es7E(A0YӴ@d^1jnʋ,xjBSV&hDo, :u`0񩅫 &pˏ2Y$t & ?>`d,y5L."]"m`0d,҉(,)m#Yq͚5@h7&?] $?v!M4ݻw?  !_c dQ! +VT }RΝYPƍ9Hm1336oL˖-ZȢGQF,xU&IP{%ABKɪgϞv&+}ASIhР8jkkSY H?)9mΝ;iȑ4w}2d=ta9ΣO?T J m}!q$`2Y " d$!Q 'dv#Gwa+tEK .]wE٢G| ]uU", .{***SOduVVsg+mRqq1]~Ž=uTAXҖ*b֬YԬY}H[ ã>J^zx2e{j,~-07̙3`jr)Jm5nI1bĈȓ Xdd23o`lo͋f05]c7\FE. 6z):#o hݺuH!??J袋` /SN"qGwy ?X{fҼysIhذ!O ;3I ѣŊO= 2qI|HBF!lh$oLNRI#jL/;~c.&e"wyGly'&``Ew&c:@FD&"dC~W, Y&ɓ'4Dm]r% L'X@@yBS&W&,d$Stă8 w4 zME1ơDfwDc',~S_Za"d$*CSc!# h8GLM!x05ޯ h H`:kѤI6{ ƍ' GyDhr˭Jwq(DNT Prm} P~DVJ缝 s1’ Oh&fGiFz ڡ u}Ec_ac7 }O# _~eY<ŒBI'D/?n6aB~LHXݣ^2:FPx>KF] Ϧw,Y`R/H7$t Z>96@0!1Bv0^6/!UfDcFKc*&gyF~ &`/a7&{8BZj\%tMBņ /3fbbRƊ;F}:#7'S<> VEp>?parY_,HF b ^E@И+NeNJH^'Aq$$A`&Z20af ƨ3&6Yv/˱]nn1`2ڈC6m_ux' XIJ[?L5dRiaAv'GI (&\₲\'"_U!vT[FӧOlV)oZ"t?++;/߃v &RJ0Cпdct |Qptz%] hn>>H|]k:0Y1JY`= [e2E: ͞"H@LJ8qbJ,o0YS.CP0?P{B[nL[ЩuC+&'gߔ>?|Sz,(#p;۾6Ig2w:tBb͉U/}Sz,&ظq*ꉉŒttt&1h0Mdk*~zd/9Xn& brNfеuIDŽ'K& =|ɥr2Qx#3H:Yd3ɒM*D7KT*Rξ$& =0Yy.DMxV*%p( o嗨t#Uғ Nǟӂ(/&CV5"ߐzaЅP ##F7 w(( IDATy@HYYYnz.oOod0?#CYcK)Ɍ6NTZ}JJӂ,`==V2& +g5V)([uюu#3$Y`=۶mHPPP@կtjɓiԩ?o\aN$0=$ >s B~ C=$$A[;v@+DTH.ڽfT~ w$ٰ(͆R tƶGaz޽{ˬ¿aN i|3"|T ^{ML2DSO =ʹ; Hh;&D~(L|[VKXy ÿ{1Zt |hAԩ 6UҨ@Wwݘ9%T\6:?/L_@yYdkgB$8}}n&UdIFJa  $ ?-O1%%U@]@T&vwNXucL0}!ڇ+>*>c*..޷H@¶|pE =I&H@&O! O%Cy@믅ѭ[7ٗ窓j~FB"G*2HИ$̺Y֭[m\nӦM OL\>K/0az#ۃ`DPRdQ|\ۖDM!,PLFd@wy &L@Vˍ bYuY3Lc>' x$m鐅$`F'HIF!R INkB~Nj"ES[D+ӧ)3VAH6maÆpT:v۪{(I`󛎣n;t13%;v2$=00+b!I~8h2R p#FZLR/!)h* Pȟ̷CAÁ?&3fS(&|"t,d(2dl{L0 {v8LXL1`iҹeaKP!VBML944A i0"d'"yl`LGle^{KL Cqe!+YLN$0qL2Xz5H$ZfbwH:mY{8 ?Yi 4 6&.x vޭL&2؟o KT7LQH{Qh:(6= IU` 0\)ZTT$n¢,)!*E9"UUv!4oF!%i`q`8,`]Vf[ͭuuDuT'g WNy'b! ʾPE$ʯ8q"B@5ݚQzF:(t^r.evZ223KvԼicw3VoAk*ğ;wlG-5q\;iMYQԲYS<-O;o:DaoIJr.+X%|{?rIyHw2&LgdvMOr*V^ZfeQ# GvwK?^@w\u4%^@s:(Za,t&!f'sa>}ᕆZ}P'@ C*=! ‰3[N@GiunzԨA&M:vs>/(HX tX?]='kU hZ4/ VEnBu0w"dgY# ણ{Aˠ8N8Y`O"98 g߻y+6/QL:e\_*pA 7/:O9/uAKVZ:>4,>^Y4Q<9ᰬCUA0p[mP6BBbƤ0Yp(죐x\^36zKEMY_չjQFQı}Osb zsE(5Y|V1YqrUwN8*pK!VhVv*1m4eT*4 d)bqxtwyaqbYth,_ՓIcPasֲ5fqB۽r,}]1YNG$NMJW,+x3qiM, ?1JVjՏsԑMf.dq˜B,Ӵ'*d|MMj GP t#PmFS *7;c 5q9kN?a`r~Z$Y YA]PK6һGCM]@ykKW:ndv(Z[A~] 5adj~\c?J% QO^TDzfQ8aSn4Eདd(J[m11q_n}IFzz8vT!<nrzۥcF|+U{3 8vDh__\h(+O+wGpFng;)'$T N|vd, tUCrvhUQL:zDo,`:]=Y5+uKiOm-=,zwS;ok,TZOsէJ,+o'( I\u[3NE!:!q^,e*~XF 2Ὡ55DS GJ軥O&[d*ܮ<;J-gyȘ`?-B#daضsU mWrxvfqޔ{p;2X[AZ.>t(Y>^?84LZ%UJCYVEG I**UYXMPNm| M+֔ӌYҋrEJ1,܋0inDZ.AyԳ: wY"`Y3XEG9]eTs窍{^pMP|Ʉ}3ؖAܱiiVi |N5̠ E::'uC 60r]ŪM崷~vHRʶC*Gvz,XVr&M:(sdauc;MYm\oJk+y L{H/ڱꕤKWҗs;'./,jidRUcMvZVȚEAZHF UTi3(]M3qAU[hEIf3+ul\M+JVQm]JڨrMUa$ r`Z`Иh<-˫,]R ~ef*^VE.ԺsbS*^Vٽkgj,TSqAݻud׹~㞊,6fTβR!eysʞ5x,<,dKVW(-T,rr:Sɢjs5^#څZR6WWSiZA];Sk )Z,ޠc%h( L 2ga1dRkUUtZ֭ tE[hz͢kԺȪ-մjAYEnzꆼT Ųg>ga 'z7Ok3Ug 4{qeef>yb#6A}6%krRD5540:gk :IYn2IܪإHD(+ XleH8 EV,^5\*H6y> oԟϔ鮉=]O&h:٪1,+´X+!eU-M'(۴ZlgYVVfX+Y5pnV(2G0*!a!n,X! 4*W eUTnz5YqM+1b:8+[N"Zqxu?!`%k7U,98%/LPFYrP#`DfD5Vrfm)OUx%{c Vr|Xq{wD:YA+r饗,s9`A0g႘i`^*&A7"f->rsjc}PWZq3 Txl6mᢠVSO=ΝKYYYne=~;#Z֡iBR%ӧәgIeee4|pz稰PQHVcr܆3lh\T6 FٳڵkGWtă /^,Tڼy3=CtM7ѣ>J_|q VV0R%e˖Q޽?kFw}7͟?4ibDfBT /T}o$5?M/"b[ YU-L'N: K {mҷ~K{.-\;<뮻(;;ńꫯӃ>H 뮻^q,O_}xW q2dZp4ʿ[NA6w}mh_N;wS_-ȡo߾d+\j5~Fd=,XL\r% ())6mj#WjmeJ+c8ŷ߼yANX Ojq>jU00iSO=EGykoF1/X@Lx.x֭[GoXaV?x`z_~V| auyiѫJ tI㎣;SkE]DCea:1 $4TQ{$Z Ɛ_J^,MTi`Q$n6lHLt˳2@'k3@Ux~)eff[oQFFm۶Mh p nݚƎ+H/_Nr ͞=jjji ^ZZJcƌ۷ NPWQQY,0&* 4Ɔ ;'VMJӓmJÈ0 pԨQM`U+(ho6ܹ;0);f͚ehj0+S@GvvD'>IDATS€:I~׮]o X>4h@h Q0O͜9PQ}exBd+I',&Mty {xq,t&LFIEMV(`xGZtye51oV裏): V,p4 OdgLL4&*݄駟,4  H D [F&tJzF|SLOYO,Lh'? ~iG00@n}JoQ_W曅 .oX'Kq!ݔ뉚V'%d D3o@}&)$ViȑQFXm)_I(IkBj dc8p䱚aΜ9uY}ئ̄d1ydėetD8#|UUUOC;n8mMQ&Dal@Xa%&ׁUzseԴB%L_ fPXcTKc}! YJ@>cUs=9 A+Zٽ{w(F$Rǎ5!^ / |0A! MH ށ:^-;m0kfzІXěTs݉H5 B__oTs:#c,<:=hjAvz _ ! ejk?~#&Ľ{Rfl'`ӦMIm񁌂Tab;h 1H2XV%8Y_h>p|GD_lV * 䞨rzˆvS N 6ɚA '.+Jje  D+a @F8c w D ?!{K)! 4 @4t&)4 & ϣS% LV s(0a|@$FTBuz! Фz2, 4sU RP!pn dȃebO 0iJ2<^91A&!H#;YRJQvlx q"<~'1v߱%@& )T(_L+WG.ЏT94a$ 2XFv}tt/dDˈ5خ]F4*CCʆ dX 9&Lw j?@1 +o Is! ai4c ''Ip vW‰j>؉36x$D('I Z9q8p&WNAZw,@ʎy)W $Tcontainer2container3container1isolated_nwDockerHostbridgedocker-1.10.3/docs/userguide/networking/index.md000066400000000000000000000021231267010174400216070ustar00rootroot00000000000000 # Docker networks feature overview This sections explains how to use the Docker networks feature. This feature allows users to define their own networks and connect containers to them. Using this feature you can create a network on a single host or a network that spans across multiple hosts. - [Understand Docker container networks](dockernetworks.md) - [Work with network commands](work-with-networks.md) - [Get started with multi-host networking](get-started-overlay.md) If you are already familiar with Docker's default bridge network, `docker0` that network continues to be supported. It is created automatically in every installation. The default bridge network is also named `bridge`. To see a list of topics related to that network, read the articles listed in the [Docker default bridge network](default_network/index.md). docker-1.10.3/docs/userguide/networking/work-with-networks.md000066400000000000000000001004761267010174400243170ustar00rootroot00000000000000 # Work with network commands This article provides examples of the network subcommands you can use to interact with Docker networks and the containers in them. The commands are available through the Docker Engine CLI. These commands are: * `docker network create` * `docker network connect` * `docker network ls` * `docker network rm` * `docker network disconnect` * `docker network inspect` While not required, it is a good idea to read [Understanding Docker network](dockernetworks.md) before trying the examples in this section. The examples for the rely on a `bridge` network so that you can try them immediately. If you would prefer to experiment with an `overlay` network see the [Getting started with multi-host networks](get-started-overlay.md) instead. ## Create networks Docker Engine creates a `bridge` network automatically when you install Engine. This network corresponds to the `docker0` bridge that Engine has traditionally relied on. In addition to this network, you can create your own `bridge` or `overlay` network. A `bridge` network resides on a single host running an instance of Docker Engine. An `overlay` network can span multiple hosts running their own engines. If you run `docker network create` and supply only a network name, it creates a bridge network for you. ```bash $ docker network create simple-network 69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a $ docker network inspect simple-network [ { "Name": "simple-network", "Id": "69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a", "Scope": "local", "Driver": "bridge", "IPAM": { "Driver": "default", "Config": [ { "Subnet": "172.22.0.0/16", "Gateway": "172.22.0.1/16" } ] }, "Containers": {}, "Options": {} } ] ``` Unlike `bridge` networks, `overlay` networks require some pre-existing conditions before you can create one. These conditions are: * Access to a key-value store. Engine supports Consul Etcd, and ZooKeeper (Distributed store) key-value stores. * A cluster of hosts with connectivity to the key-value store. * A properly configured Engine `daemon` on each host in the swarm. The `docker daemon` options that support the `overlay` network are: * `--cluster-store` * `--cluster-store-opt` * `--cluster-advertise` It is also a good idea, though not required, that you install Docker Swarm to manage the cluster. Swarm provides sophisticated discovery and server management that can assist your implementation. When you create a network, Engine creates a non-overlapping subnetwork for the network by default. You can override this default and specify a subnetwork directly using the the `--subnet` option. On a `bridge` network you can only specify a single subnet. An `overlay` network supports multiple subnets. > **Note** : It is highly recommended to use the `--subnet` option while creating > a network. If the `--subnet` is not specified, the docker daemon automatically > chooses and assigns a subnet for the network and it could overlap with another subnet > in your infrastructure that is not managed by docker. Such overlaps can cause > connectivity issues or failures when containers are connected to that network. In addition to the `--subnetwork` option, you also specify the `--gateway` `--ip-range` and `--aux-address` options. ```bash $ docker network create -d overlay --subnet=192.168.0.0/16 --subnet=192.170.0.0/16 --gateway=192.168.0.100 --gateway=192.170.0.100 --ip-range=192.168.1.0/24 --aux-address a=192.168.1.5 --aux-address b=192.168.1.6 --aux-address a=192.170.1.5 --aux-address b=192.170.1.6 my-multihost-network ``` Be sure that your subnetworks do not overlap. If they do, the network create fails and Engine returns an error. When creating a custom network, the default network driver (i.e. `bridge`) has additional options that can be passed. The following are those options and the equivalent docker daemon flags used for docker0 bridge: | Option | Equivalent | Description | |--------------------------------------------------|-------------|-------------------------------------------------------| | `com.docker.network.bridge.name` | - | bridge name to be used when creating the Linux bridge | | `com.docker.network.bridge.enable_ip_masquerade` | `--ip-masq` | Enable IP masquerading | | `com.docker.network.bridge.enable_icc` | `--icc` | Enable or Disable Inter Container Connectivity | | `com.docker.network.bridge.host_binding_ipv4` | `--ip` | Default IP when binding container ports | | `com.docker.network.mtu` | `--mtu` | Set the containers network MTU | | `com.docker.network.enable_ipv6` | `--ipv6` | Enable IPv6 networking | For example, now let's use `-o` or `--opt` options to specify an IP address binding when publishing ports: ```bash $ docker network create -o "com.docker.network.bridge.host_binding_ipv4"="172.23.0.1" my-network b1a086897963e6a2e7fc6868962e55e746bee8ad0c97b54a5831054b5f62672a $ docker network inspect my-network [ { "Name": "my-network", "Id": "b1a086897963e6a2e7fc6868962e55e746bee8ad0c97b54a5831054b5f62672a", "Scope": "local", "Driver": "bridge", "IPAM": { "Driver": "default", "Options": {}, "Config": [ { "Subnet": "172.23.0.0/16", "Gateway": "172.23.0.1/16" } ] }, "Containers": {}, "Options": { "com.docker.network.bridge.host_binding_ipv4": "172.23.0.1" } } ] $ docker run -d -P --name redis --net my-network redis bafb0c808c53104b2c90346f284bda33a69beadcab4fc83ab8f2c5a4410cd129 $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES bafb0c808c53 redis "/entrypoint.sh redis" 4 seconds ago Up 3 seconds 172.23.0.1:32770->6379/tcp redis ``` ## Connect containers You can connect containers dynamically to one or more networks. These networks can be backed the same or different network drivers. Once connected, the containers can communicate using another container's IP address or name. For `overlay` networks or custom plugins that support multi-host connectivity, containers connected to the same multi-host network but launched from different hosts can also communicate in this way. Create two containers for this example: ```bash $ docker run -itd --name=container1 busybox 18c062ef45ac0c026ee48a83afa39d25635ee5f02b58de4abc8f467bcaa28731 $ docker run -itd --name=container2 busybox 498eaaaf328e1018042c04b2de04036fc04719a6e39a097a4f4866043a2c2152 ``` Then create an isolated, `bridge` network to test with. ```bash $ docker network create -d bridge --subnet 172.25.0.0/16 isolated_nw 06a62f1c73c4e3107c0f555b7a5f163309827bfbbf999840166065a8f35455a8 ``` Connect `container2` to the network and then `inspect` the network to verify the connection: ``` $ docker network connect isolated_nw container2 $ docker network inspect isolated_nw [ { "Name": "isolated_nw", "Id": "06a62f1c73c4e3107c0f555b7a5f163309827bfbbf999840166065a8f35455a8", "Scope": "local", "Driver": "bridge", "IPAM": { "Driver": "default", "Config": [ { "Subnet": "172.21.0.0/16", "Gateway": "172.21.0.1/16" } ] }, "Containers": { "90e1f3ec71caf82ae776a827e0712a68a110a3f175954e5bd4222fd142ac9428": { "Name": "container2", "EndpointID": "11cedac1810e864d6b1589d92da12af66203879ab89f4ccd8c8fdaa9b1c48b1d", "MacAddress": "02:42:ac:19:00:02", "IPv4Address": "172.25.0.2/16", "IPv6Address": "" } }, "Options": {} } ] ``` You can see that the Engine automatically assigns an IP address to `container2`. Given we specified a `--subnet` when creating the network, Engine picked an address from that same subnet. Now, start a third container and connect it to the network on launch using the `docker run` command's `--net` option: ```bash $ docker run --net=isolated_nw --ip=172.25.3.3 -itd --name=container3 busybox 467a7863c3f0277ef8e661b38427737f28099b61fa55622d6c30fb288d88c551 ``` As you can see you were able to specify the ip address for your container. As long as the network to which the container is connecting was created with a user specified subnet, you will be able to select the IPv4 and/or IPv6 address(es) for your container when executing `docker run` and `docker network connect` commands. The selected IP address is part of the container networking configuration and will be preserved across container reload. The feature is only available on user defined networks, because they guarantee their subnets configuration does not change across daemon reload. Now, inspect the network resources used by `container3`. ```bash $ docker inspect --format='{{json .NetworkSettings.Networks}}' container3 {"isolated_nw":{"IPAMConfig":{"IPv4Address":"172.25.3.3"},"NetworkID":"1196a4c5af43a21ae38ef34515b6af19236a3fc48122cf585e3f3054d509679b", "EndpointID":"dffc7ec2915af58cc827d995e6ebdc897342be0420123277103c40ae35579103","Gateway":"172.25.0.1","IPAddress":"172.25.3.3","IPPrefixLen":16,"IPv6Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"MacAddress":"02:42:ac:19:03:03"}} ``` Repeat this command for `container2`. If you have Python installed, you can pretty print the output. ```bash $ docker inspect --format='{{json .NetworkSettings.Networks}}' container2 | python -m json.tool { "bridge": { "NetworkID":"7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", "EndpointID": "0099f9efb5a3727f6a554f176b1e96fca34cae773da68b3b6a26d046c12cb365", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAMConfig": null, "IPAddress": "172.17.0.3", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:03" }, "isolated_nw": { "NetworkID":"1196a4c5af43a21ae38ef34515b6af19236a3fc48122cf585e3f3054d509679b", "EndpointID": "11cedac1810e864d6b1589d92da12af66203879ab89f4ccd8c8fdaa9b1c48b1d", "Gateway": "172.25.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAMConfig": null, "IPAddress": "172.25.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:19:00:02" } } ``` You should find `container2` belongs to two networks. The `bridge` network which it joined by default when you launched it and the `isolated_nw` which you later connected it to. ![](images/working.png) In the case of `container3`, you connected it through `docker run` to the `isolated_nw` so that container is not connected to `bridge`. Use the `docker attach` command to connect to the running `container2` and examine its networking stack: ```bash $ docker attach container2 ``` If you look a the container's network stack you should see two Ethernet interfaces, one for the default bridge network and one for the `isolated_nw` network. ```bash / # ifconfig eth0 Link encap:Ethernet HWaddr 02:42:AC:11:00:03 inet addr:172.17.0.3 Bcast:0.0.0.0 Mask:255.255.0.0 inet6 addr: fe80::42:acff:fe11:3/64 Scope:Link UP BROADCAST RUNNING MULTICAST MTU:9001 Metric:1 RX packets:8 errors:0 dropped:0 overruns:0 frame:0 TX packets:8 errors:0 dropped:0 overruns:0 carrier:0 collisions:0 txqueuelen:0 RX bytes:648 (648.0 B) TX bytes:648 (648.0 B) eth1 Link encap:Ethernet HWaddr 02:42:AC:15:00:02 inet addr:172.25.0.2 Bcast:0.0.0.0 Mask:255.255.0.0 inet6 addr: fe80::42:acff:fe19:2/64 Scope:Link UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1 RX packets:8 errors:0 dropped:0 overruns:0 frame:0 TX packets:8 errors:0 dropped:0 overruns:0 carrier:0 collisions:0 txqueuelen:0 RX bytes:648 (648.0 B) TX bytes:648 (648.0 B) lo Link encap:Local Loopback inet addr:127.0.0.1 Mask:255.0.0.0 inet6 addr: ::1/128 Scope:Host UP LOOPBACK RUNNING MTU:65536 Metric:1 RX packets:0 errors:0 dropped:0 overruns:0 frame:0 TX packets:0 errors:0 dropped:0 overruns:0 carrier:0 collisions:0 txqueuelen:0 RX bytes:0 (0.0 B) TX bytes:0 (0.0 B) On the `isolated_nw` which was user defined, the Docker embedded DNS server enables name resolution for other containers in the network. Inside of `container2` it is possible to ping `container3` by name. ```bash / # ping -w 4 container3 PING container3 (172.25.3.3): 56 data bytes 64 bytes from 172.25.3.3: seq=0 ttl=64 time=0.070 ms 64 bytes from 172.25.3.3: seq=1 ttl=64 time=0.080 ms 64 bytes from 172.25.3.3: seq=2 ttl=64 time=0.080 ms 64 bytes from 172.25.3.3: seq=3 ttl=64 time=0.097 ms --- container3 ping statistics --- 4 packets transmitted, 4 packets received, 0% packet loss round-trip min/avg/max = 0.070/0.081/0.097 ms ``` This isn't the case for the default `bridge` network. Both `container2` and `container1` are connected to the default bridge network. Docker does not support automatic service discovery on this network. For this reason, pinging `container1` by name fails as you would expect based on the `/etc/hosts` file: ```bash / # ping -w 4 container1 ping: bad address 'container1' ``` A ping using the `container1` IP address does succeed though: ```bash / # ping -w 4 172.17.0.2 PING 172.17.0.2 (172.17.0.2): 56 data bytes 64 bytes from 172.17.0.2: seq=0 ttl=64 time=0.095 ms 64 bytes from 172.17.0.2: seq=1 ttl=64 time=0.075 ms 64 bytes from 172.17.0.2: seq=2 ttl=64 time=0.072 ms 64 bytes from 172.17.0.2: seq=3 ttl=64 time=0.101 ms --- 172.17.0.2 ping statistics --- 4 packets transmitted, 4 packets received, 0% packet loss round-trip min/avg/max = 0.072/0.085/0.101 ms ``` If you wanted you could connect `container1` to `container2` with the `docker run --link` command and that would enable the two containers to interact by name as well as IP. Detach from a `container2` and leave it running using `CTRL-p CTRL-q`. In this example, `container2` is attached to both networks and so can talk to `container1` and `container3`. But `container3` and `container1` are not in the same network and cannot communicate. Test, this now by attaching to `container3` and attempting to ping `container1` by IP address. ```bash $ docker attach container3 / # ping 172.17.0.2 PING 172.17.0.2 (172.17.0.2): 56 data bytes ^C --- 172.17.0.2 ping statistics --- 10 packets transmitted, 0 packets received, 100% packet loss ``` You can connect both running and non-running containers to a network. However, `docker network inspect` only displays information on running containers. ### Linking containers in user-defined networks In the above example, container_2 was able to resolve container_3's name automatically in the user defined network `isolated_nw`, but the name resolution did not succeed automatically in the default `bridge` network. This is expected in order to maintain backward compatibility with [legacy link](default_network/dockerlinks.md). The `legacy link` provided 4 major functionalities to the default `bridge` network. * name resolution * name alias for the linked container using `--link=CONTAINER-NAME:ALIAS` * secured container connectivity (in isolation via `--icc=false`) * environment variable injection Comparing the above 4 functionalities with the non-default user-defined networks such as `isolated_nw` in this example, without any additional config, `docker network` provides * automatic name resolution using DNS * automatic secured isolated environment for the containers in a network * ability to dynamically attach and detach to multiple networks * supports the `--link` option to provide name alias for the linked container Continuing with the above example, create another container `container_4` in `isolated_nw` with `--link` to provide additional name resolution using alias for other containers in the same network. ```bash $ docker run --net=isolated_nw -itd --name=container4 --link container5:c5 busybox 01b5df970834b77a9eadbaff39051f237957bd35c4c56f11193e0594cfd5117c ``` With the help of `--link` container4 will be able to reach container5 using the aliased name `c5` as well. Please note that while creating container4, we linked to a container named `container5` which is not created yet. That is one of the differences in behavior between the `legacy link` in default `bridge` network and the new `link` functionality in user defined networks. The `legacy link` is static in nature and it hard-binds the container with the alias and it doesnt tolerate linked container restarts. While the new `link` functionality in user defined networks are dynamic in nature and supports linked container restarts including tolerating ip-address changes on the linked container. Now let us launch another container named `container5` linking container4 to c4. ```bash $ docker run --net=isolated_nw -itd --name=container5 --link container4:c4 busybox 72eccf2208336f31e9e33ba327734125af00d1e1d2657878e2ee8154fbb23c7a ``` As expected, container4 will be able to reach container5 by both its container name and its alias c5 and container5 will be able to reach container4 by its container name and its alias c4. ```bash $ docker attach container4 / # ping -w 4 c5 PING c5 (172.25.0.5): 56 data bytes 64 bytes from 172.25.0.5: seq=0 ttl=64 time=0.070 ms 64 bytes from 172.25.0.5: seq=1 ttl=64 time=0.080 ms 64 bytes from 172.25.0.5: seq=2 ttl=64 time=0.080 ms 64 bytes from 172.25.0.5: seq=3 ttl=64 time=0.097 ms --- c5 ping statistics --- 4 packets transmitted, 4 packets received, 0% packet loss round-trip min/avg/max = 0.070/0.081/0.097 ms / # ping -w 4 container5 PING container5 (172.25.0.5): 56 data bytes 64 bytes from 172.25.0.5: seq=0 ttl=64 time=0.070 ms 64 bytes from 172.25.0.5: seq=1 ttl=64 time=0.080 ms 64 bytes from 172.25.0.5: seq=2 ttl=64 time=0.080 ms 64 bytes from 172.25.0.5: seq=3 ttl=64 time=0.097 ms --- container5 ping statistics --- 4 packets transmitted, 4 packets received, 0% packet loss round-trip min/avg/max = 0.070/0.081/0.097 ms ``` ```bash $ docker attach container5 / # ping -w 4 c4 PING c4 (172.25.0.4): 56 data bytes 64 bytes from 172.25.0.4: seq=0 ttl=64 time=0.065 ms 64 bytes from 172.25.0.4: seq=1 ttl=64 time=0.070 ms 64 bytes from 172.25.0.4: seq=2 ttl=64 time=0.067 ms 64 bytes from 172.25.0.4: seq=3 ttl=64 time=0.082 ms --- c4 ping statistics --- 4 packets transmitted, 4 packets received, 0% packet loss round-trip min/avg/max = 0.065/0.070/0.082 ms / # ping -w 4 container4 PING container4 (172.25.0.4): 56 data bytes 64 bytes from 172.25.0.4: seq=0 ttl=64 time=0.065 ms 64 bytes from 172.25.0.4: seq=1 ttl=64 time=0.070 ms 64 bytes from 172.25.0.4: seq=2 ttl=64 time=0.067 ms 64 bytes from 172.25.0.4: seq=3 ttl=64 time=0.082 ms --- container4 ping statistics --- 4 packets transmitted, 4 packets received, 0% packet loss round-trip min/avg/max = 0.065/0.070/0.082 ms ``` Similar to the legacy link functionality the new link alias is localized to a container and the aliased name has no meaning outside of the container using the `--link`. Also, it is important to note that if a container belongs to multiple networks, the linked alias is scoped within a given network. Hence the containers can be linked to different aliases in different networks. Extending the example, let us create another network named `local_alias` ```bash $ docker network create -d bridge --subnet 172.26.0.0/24 local_alias 76b7dc932e037589e6553f59f76008e5b76fa069638cd39776b890607f567aaa ``` let us connect container4 and container5 to the new network `local_alias` ``` $ docker network connect --link container5:foo local_alias container4 $ docker network connect --link container4:bar local_alias container5 ``` ```bash $ docker attach container4 / # ping -w 4 foo PING foo (172.26.0.3): 56 data bytes 64 bytes from 172.26.0.3: seq=0 ttl=64 time=0.070 ms 64 bytes from 172.26.0.3: seq=1 ttl=64 time=0.080 ms 64 bytes from 172.26.0.3: seq=2 ttl=64 time=0.080 ms 64 bytes from 172.26.0.3: seq=3 ttl=64 time=0.097 ms --- foo ping statistics --- 4 packets transmitted, 4 packets received, 0% packet loss round-trip min/avg/max = 0.070/0.081/0.097 ms / # ping -w 4 c5 PING c5 (172.25.0.5): 56 data bytes 64 bytes from 172.25.0.5: seq=0 ttl=64 time=0.070 ms 64 bytes from 172.25.0.5: seq=1 ttl=64 time=0.080 ms 64 bytes from 172.25.0.5: seq=2 ttl=64 time=0.080 ms 64 bytes from 172.25.0.5: seq=3 ttl=64 time=0.097 ms --- c5 ping statistics --- 4 packets transmitted, 4 packets received, 0% packet loss round-trip min/avg/max = 0.070/0.081/0.097 ms ``` Note that the ping succeeds for both the aliases but on different networks. Let us conclude this section by disconnecting container5 from the `isolated_nw` and observe the results ``` $ docker network disconnect isolated_nw container5 $ docker attach container4 / # ping -w 4 c5 ping: bad address 'c5' / # ping -w 4 foo PING foo (172.26.0.3): 56 data bytes 64 bytes from 172.26.0.3: seq=0 ttl=64 time=0.070 ms 64 bytes from 172.26.0.3: seq=1 ttl=64 time=0.080 ms 64 bytes from 172.26.0.3: seq=2 ttl=64 time=0.080 ms 64 bytes from 172.26.0.3: seq=3 ttl=64 time=0.097 ms --- foo ping statistics --- 4 packets transmitted, 4 packets received, 0% packet loss round-trip min/avg/max = 0.070/0.081/0.097 ms ``` In conclusion, the new link functionality in user defined networks provides all the benefits of legacy links while avoiding most of the well-known issues with `legacy links`. One notable missing functionality compared to `legacy links` is the injection of environment variables. Though very useful, environment variable injection is static in nature and must be injected when the container is started. One cannot inject environment variables into a running container without significant effort and hence it is not compatible with `docker network` which provides a dynamic way to connect/ disconnect containers to/from a network. ### Network-scoped alias While `links` provide private name resolution that is localized within a container, the network-scoped alias provides a way for a container to be discovered by an alternate name by any other container within the scope of a particular network. Unlike the `link` alias, which is defined by the consumer of a service, the network-scoped alias is defined by the container that is offering the service to the network. Continuing with the above example, create another container in `isolated_nw` with a network alias. ```bash $ docker run --net=isolated_nw -itd --name=container6 --net-alias app busybox 8ebe6767c1e0361f27433090060b33200aac054a68476c3be87ef4005eb1df17 ``` ```bash $ docker attach container4 / # ping -w 4 app PING app (172.25.0.6): 56 data bytes 64 bytes from 172.25.0.6: seq=0 ttl=64 time=0.070 ms 64 bytes from 172.25.0.6: seq=1 ttl=64 time=0.080 ms 64 bytes from 172.25.0.6: seq=2 ttl=64 time=0.080 ms 64 bytes from 172.25.0.6: seq=3 ttl=64 time=0.097 ms --- app ping statistics --- 4 packets transmitted, 4 packets received, 0% packet loss round-trip min/avg/max = 0.070/0.081/0.097 ms / # ping -w 4 container6 PING container5 (172.25.0.6): 56 data bytes 64 bytes from 172.25.0.6: seq=0 ttl=64 time=0.070 ms 64 bytes from 172.25.0.6: seq=1 ttl=64 time=0.080 ms 64 bytes from 172.25.0.6: seq=2 ttl=64 time=0.080 ms 64 bytes from 172.25.0.6: seq=3 ttl=64 time=0.097 ms --- container6 ping statistics --- 4 packets transmitted, 4 packets received, 0% packet loss round-trip min/avg/max = 0.070/0.081/0.097 ms ``` Now let us connect `container6` to the `local_alias` network with a different network-scoped alias. ``` $ docker network connect --alias scoped-app local_alias container6 ``` `container6` in this example now is aliased as `app` in network `isolated_nw` and as `scoped-app` in network `local_alias`. Let's try to reach these aliases from `container4` (which is connected to both these networks) and `container5` (which is connected only to `isolated_nw`). ```bash $ docker attach container4 / # ping -w 4 scoped-app PING foo (172.26.0.5): 56 data bytes 64 bytes from 172.26.0.5: seq=0 ttl=64 time=0.070 ms 64 bytes from 172.26.0.5: seq=1 ttl=64 time=0.080 ms 64 bytes from 172.26.0.5: seq=2 ttl=64 time=0.080 ms 64 bytes from 172.26.0.5: seq=3 ttl=64 time=0.097 ms --- foo ping statistics --- 4 packets transmitted, 4 packets received, 0% packet loss round-trip min/avg/max = 0.070/0.081/0.097 ms $ docker attach container5 / # ping -w 4 scoped-app ping: bad address 'scoped-app' ``` As you can see, the alias is scoped to the network it is defined on and hence only those containers that are connected to that network can access the alias. In addition to the above features, multiple containers can share the same network-scoped alias within the same network. For example, let's launch `container7` in `isolated_nw` with the same alias as `container6` ```bash $ docker run --net=isolated_nw -itd --name=container7 --net-alias app busybox 3138c678c123b8799f4c7cc6a0cecc595acbdfa8bf81f621834103cd4f504554 ``` When multiple containers share the same alias, name resolution to that alias will happen to one of the containers (typically the first container that is aliased). When the container that backs the alias goes down or disconnected from the network, the next container that backs the alias will be resolved. Let us ping the alias `app` from `container4` and bring down `container6` to verify that `container7` is resolving the `app` alias. ```bash $ docker attach container4 / # ping -w 4 app PING app (172.25.0.6): 56 data bytes 64 bytes from 172.25.0.6: seq=0 ttl=64 time=0.070 ms 64 bytes from 172.25.0.6: seq=1 ttl=64 time=0.080 ms 64 bytes from 172.25.0.6: seq=2 ttl=64 time=0.080 ms 64 bytes from 172.25.0.6: seq=3 ttl=64 time=0.097 ms --- app ping statistics --- 4 packets transmitted, 4 packets received, 0% packet loss round-trip min/avg/max = 0.070/0.081/0.097 ms $ docker stop container6 $ docker attach container4 / # ping -w 4 app PING app (172.25.0.7): 56 data bytes 64 bytes from 172.25.0.7: seq=0 ttl=64 time=0.095 ms 64 bytes from 172.25.0.7: seq=1 ttl=64 time=0.075 ms 64 bytes from 172.25.0.7: seq=2 ttl=64 time=0.072 ms 64 bytes from 172.25.0.7: seq=3 ttl=64 time=0.101 ms --- app ping statistics --- 4 packets transmitted, 4 packets received, 0% packet loss round-trip min/avg/max = 0.072/0.085/0.101 ms ``` ## Disconnecting containers You can disconnect a container from a network using the `docker network disconnect` command. ``` $ docker network disconnect isolated_nw container2 docker inspect --format='{{json .NetworkSettings.Networks}}' container2 | python -m json.tool { "bridge": { "NetworkID":"7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", "EndpointID": "9e4575f7f61c0f9d69317b7a4b92eefc133347836dd83ef65deffa16b9985dc0", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.3", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:03" } } $ docker network inspect isolated_nw [ { "Name": "isolated_nw", "Id": "06a62f1c73c4e3107c0f555b7a5f163309827bfbbf999840166065a8f35455a8", "Scope": "local", "Driver": "bridge", "IPAM": { "Driver": "default", "Config": [ { "Subnet": "172.21.0.0/16", "Gateway": "172.21.0.1/16" } ] }, "Containers": { "467a7863c3f0277ef8e661b38427737f28099b61fa55622d6c30fb288d88c551": { "Name": "container3", "EndpointID": "dffc7ec2915af58cc827d995e6ebdc897342be0420123277103c40ae35579103", "MacAddress": "02:42:ac:19:03:03", "IPv4Address": "172.25.3.3/16", "IPv6Address": "" } }, "Options": {} } ] ``` Once a container is disconnected from a network, it cannot communicate with other containers connected to that network. In this example, `container2` can no longer talk to `container3` on the `isolated_nw` network. ``` $ docker attach container2 / # ifconfig eth0 Link encap:Ethernet HWaddr 02:42:AC:11:00:03 inet addr:172.17.0.3 Bcast:0.0.0.0 Mask:255.255.0.0 inet6 addr: fe80::42:acff:fe11:3/64 Scope:Link UP BROADCAST RUNNING MULTICAST MTU:9001 Metric:1 RX packets:8 errors:0 dropped:0 overruns:0 frame:0 TX packets:8 errors:0 dropped:0 overruns:0 carrier:0 collisions:0 txqueuelen:0 RX bytes:648 (648.0 B) TX bytes:648 (648.0 B) lo Link encap:Local Loopback inet addr:127.0.0.1 Mask:255.0.0.0 inet6 addr: ::1/128 Scope:Host UP LOOPBACK RUNNING MTU:65536 Metric:1 RX packets:0 errors:0 dropped:0 overruns:0 frame:0 TX packets:0 errors:0 dropped:0 overruns:0 carrier:0 collisions:0 txqueuelen:0 RX bytes:0 (0.0 B) TX bytes:0 (0.0 B) / # ping container3 PING container3 (172.25.3.3): 56 data bytes ^C --- container3 ping statistics --- 2 packets transmitted, 0 packets received, 100% packet loss ``` The `container2` still has full connectivity to the bridge network ```bash / # ping container1 PING container1 (172.17.0.2): 56 data bytes 64 bytes from 172.17.0.2: seq=0 ttl=64 time=0.119 ms 64 bytes from 172.17.0.2: seq=1 ttl=64 time=0.174 ms ^C --- container1 ping statistics --- 2 packets transmitted, 2 packets received, 0% packet loss round-trip min/avg/max = 0.119/0.146/0.174 ms / # ``` There are certain scenarios such as ungraceful docker daemon restarts in multi-host network, where the daemon is unable to cleanup stale connectivity endpoints. Such stale endpoints may cause an error `container already connected to network` when a new container is connected to that network with the same name as the stale endpoint. In order to cleanup these stale endpoints, first remove the container and force disconnect (`docker network disconnect -f`) the endpoint from the network. Once the endpoint is cleaned up, the container can be connected to the network. ``` $ docker run -d --name redis_db --net multihost redis ERROR: Cannot start container bc0b19c089978f7845633027aa3435624ca3d12dd4f4f764b61eac4c0610f32e: container already connected to network multihost $ docker rm -f redis_db $ docker network disconnect -f multihost redis_db $ docker run -d --name redis_db --net multihost redis 7d986da974aeea5e9f7aca7e510bdb216d58682faa83a9040c2f2adc0544795a ``` ## Remove a network When all the containers in a network are stopped or disconnected, you can remove a network. ```bash $ docker network disconnect isolated_nw container3 ``` ```bash docker network inspect isolated_nw [ { "Name": "isolated_nw", "Id": "06a62f1c73c4e3107c0f555b7a5f163309827bfbbf999840166065a8f35455a8", "Scope": "local", "Driver": "bridge", "IPAM": { "Driver": "default", "Config": [ { "Subnet": "172.21.0.0/16", "Gateway": "172.21.0.1/16" } ] }, "Containers": {}, "Options": {} } ] $ docker network rm isolated_nw ``` List all your networks to verify the `isolated_nw` was removed: ``` $ docker network ls NETWORK ID NAME DRIVER 72314fa53006 host host f7ab26d71dbd bridge bridge 0f32e83e61ac none null ``` ## Related information * [network create](../../reference/commandline/network_create.md) * [network inspect](../../reference/commandline/network_inspect.md) * [network connect](../../reference/commandline/network_connect.md) * [network disconnect](../../reference/commandline/network_disconnect.md) * [network ls](../../reference/commandline/network_ls.md) * [network rm](../../reference/commandline/network_rm.md) docker-1.10.3/docs/userguide/storagedriver/000077500000000000000000000000001267010174400206515ustar00rootroot00000000000000docker-1.10.3/docs/userguide/storagedriver/aufs-driver.md000066400000000000000000000227301267010174400234260ustar00rootroot00000000000000 # Docker and AUFS in practice AUFS was the first storage driver in use with Docker. As a result, it has a long and close history with Docker, is very stable, has a lot of real-world deployments, and has strong community support. AUFS has several features that make it a good choice for Docker. These features enable: - Fast container startup times. - Efficient use of storage. - Efficient use of memory. Despite its capabilities and long history with Docker, some Linux distributions do not support AUFS. This is usually because AUFS is not included in the mainline (upstream) Linux kernel. The following sections examine some AUFS features and how they relate to Docker. ## Image layering and sharing with AUFS AUFS is a *unification filesystem*. This means that it takes multiple directories on a single Linux host, stacks them on top of each other, and provides a single unified view. To achieve this, AUFS uses a *union mount*. AUFS stacks multiple directories and exposes them as a unified view through a single mount point. All of the directories in the stack, as well as the union mount point, must all exist on the same Linux host. AUFS refers to each directory that it stacks as a *branch*. Within Docker, AUFS union mounts enable image layering. The AUFS storage driver implements Docker image layers using this union mount system. AUFS branches correspond to Docker image layers. The diagram below shows a Docker container based on the `ubuntu:latest` image. ![](images/aufs_layers.jpg) This diagram shows that each image layer, and the container layer, is represented in the Docker hosts filesystem as a directory under `/var/lib/docker/`. The union mount point provides the unified view of all layers. As of Docker 1.10, image layer IDs do not correspond to the names of the directories that contain their data. AUFS also supports the copy-on-write technology (CoW). Not all storage drivers do. ## Container reads and writes with AUFS Docker leverages AUFS CoW technology to enable image sharing and minimize the use of disk space. AUFS works at the file level. This means that all AUFS CoW operations copy entire files - even if only a small part of the file is being modified. This behavior can have a noticeable impact on container performance, especially if the files being copied are large, below a lot of image layers, or the CoW operation must search a deep directory tree. Consider, for example, an application running in a container needs to add a single new value to a large key-value store (file). If this is the first time the file is modified, it does not yet exist in the container's top writable layer. So, the CoW must *copy up* the file from the underlying image. The AUFS storage driver searches each image layer for the file. The search order is from top to bottom. When it is found, the entire file is *copied up* to the container's top writable layer. From there, it can be opened and modified. Larger files obviously take longer to *copy up* than smaller files, and files that exist in lower image layers take longer to locate than those in higher layers. However, a *copy up* operation only occurs once per file on any given container. Subsequent reads and writes happen against the file's copy already *copied-up* to the container's top layer. ## Deleting files with the AUFS storage driver The AUFS storage driver deletes a file from a container by placing a *whiteout file* in the container's top layer. The whiteout file effectively obscures the existence of the file in the read-only image layers below. The simplified diagram below shows a container based on an image with three image layers. ![](images/aufs_delete.jpg) The `file3` was deleted from the container. So, the AUFS storage driver placed a whiteout file in the container's top layer. This whiteout file effectively "deletes" `file3` from the container by obscuring any of the original file's existence in the image's read-only layers. This works the same no matter which of the image's read-only layers the file exists in. ## Configure Docker with AUFS You can only use the AUFS storage driver on Linux systems with AUFS installed. Use the following command to determine if your system supports AUFS. $ grep aufs /proc/filesystems nodev aufs This output indicates the system supports AUFS. Once you've verified your system supports AUFS, you can must instruct the Docker daemon to use it. You do this from the command line with the `docker daemon` command: $ sudo docker daemon --storage-driver=aufs & Alternatively, you can edit the Docker config file and add the `--storage-driver=aufs` option to the `DOCKER_OPTS` line. # Use DOCKER_OPTS to modify the daemon startup options. DOCKER_OPTS="--storage-driver=aufs" Once your daemon is running, verify the storage driver with the `docker info` command. $ sudo docker info Containers: 1 Images: 4 Storage Driver: aufs Root Dir: /var/lib/docker/aufs Backing Filesystem: extfs Dirs: 6 Dirperm1 Supported: false Execution Driver: native-0.2 ...output truncated... The output above shows that the Docker daemon is running the AUFS storage driver on top of an existing `ext4` backing filesystem. ## Local storage and AUFS As the `docker daemon` runs with the AUFS driver, the driver stores images and containers within the Docker host's local storage area under `/var/lib/docker/aufs/`. ### Images Image layers and their contents are stored under `/var/lib/docker/aufs/diff/`. With Docker 1.10 and higher, image layer IDs do not correspond to directory names. The `/var/lib/docker/aufs/layers/` directory contains metadata about how image layers are stacked. This directory contains one file for every image or container layer on the Docker host (though file names no longer match image layer IDs). Inside each file are the names of the directories that exist below it in the stack The command below shows the contents of a metadata file in `/var/lib/docker/aufs/layers/` that lists the the three directories that are stacked below it in the union mount. Remember, these directory names do no map to image layer IDs with Docker 1.10 and higher. $ cat /var/lib/docker/aufs/layers/91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82 c22013c8472965aa5b62559f2b540cd440716ef149756e7b958a1b2aba421e87 d3a1f33e8a5a513092f01bb7eb1c2abf4d711e5105390a3fe1ae2248cfde1391 The base layer in an image has no image layers below it, so its file is empty. ### Containers Running containers are mounted below `/var/lib/docker/aufs/mnt/`. This is where the AUFS union mount point that exposes the container and all underlying image layers as a single unified view exists. If a container is not running, it still has a directory here but it is empty. This is because AUFS only mounts a container when it is running. With Docker 1.10 and higher, container IDs no longer correspond to directory names under `/var/lib/docker/aufs/mnt/`. Container metadata and various config files that are placed into the running container are stored in `/var/lib/docker/containers/`. Files in this directory exist for all containers on the system, including ones that are stopped. However, when a container is running the container's log files are also in this directory. A container's thin writable layer is stored in a directory under `/var/lib/docker/aufs/diff/`. With Docker 1.10 and higher, container IDs no longer correspond to directory names. However, the containers thin writable layer still exists under here and is stacked by AUFS as the top writable layer and is where all changes to the container are stored. The directory exists even if the container is stopped. This means that restarting a container will not lose changes made to it. Once a container is deleted, it's thin writable layer in this directory is deleted. ## AUFS and Docker performance To summarize some of the performance related aspects already mentioned: - The AUFS storage driver is a good choice for PaaS and other similar use-cases where container density is important. This is because AUFS efficiently shares images between multiple running containers, enabling fast container start times and minimal use of disk space. - The underlying mechanics of how AUFS shares files between image layers and containers uses the systems page cache very efficiently. - The AUFS storage driver can introduce significant latencies into container write performance. This is because the first time a container writes to any file, the file has be located and copied into the containers top writable layer. These latencies increase and are compounded when these files exist below many image layers and the files themselves are large. One final point. Data volumes provide the best and most predictable performance. This is because they bypass the storage driver and do not incur any of the potential overheads introduced by thin provisioning and copy-on-write. For this reason, you may want to place heavy write workloads on data volumes. ## Related information * [Understand images, containers, and storage drivers](imagesandcontainers.md) * [Select a storage driver](selectadriver.md) * [Btrfs storage driver in practice](btrfs-driver.md) * [Device Mapper storage driver in practice](device-mapper-driver.md) docker-1.10.3/docs/userguide/storagedriver/btrfs-driver.md000066400000000000000000000330641267010174400236120ustar00rootroot00000000000000 # Docker and Btrfs in practice Btrfs is a next generation copy-on-write filesystem that supports many advanced storage technologies that make it a good fit for Docker. Btrfs is included in the mainline Linux kernel and its on-disk-format is now considered stable. However, many of its features are still under heavy development and users should consider it a fast-moving target. Docker's `btrfs` storage driver leverages many Btrfs features for image and container management. Among these features are thin provisioning, copy-on-write, and snapshotting. This article refers to Docker's Btrfs storage driver as `btrfs` and the overall Btrfs Filesystem as Btrfs. >**Note**: The [Commercially Supported Docker Engine (CS-Engine)](https://www.docker.com/compatibility-maintenance) does not currently support the `btrfs` storage driver. ## The future of Btrfs Btrfs has been long hailed as the future of Linux filesystems. With full support in the mainline Linux kernel, a stable on-disk-format, and active development with a focus on stability, this is now becoming more of a reality. As far as Docker on the Linux platform goes, many people see the `btrfs` storage driver as a potential long-term replacement for the `devicemapper` storage driver. However, at the time of writing, the `devicemapper` storage driver should be considered safer, more stable, and more *production ready*. You should only consider the `btrfs` driver for production deployments if you understand it well and have existing experience with Btrfs. ## Image layering and sharing with Btrfs Docker leverages Btrfs *subvolumes* and *snapshots* for managing the on-disk components of image and container layers. Btrfs subvolumes look and feel like a normal Unix filesystem. As such, they can have their own internal directory structure that hooks into the wider Unix filesystem. Subvolumes are natively copy-on-write and have space allocated to them on-demand from an underlying storage pool. They can also be nested and snapped. The diagram blow shows 4 subvolumes. 'Subvolume 2' and 'Subvolume 3' are nested, whereas 'Subvolume 4' shows its own internal directory tree. ![](images/btfs_subvolume.jpg) Snapshots are a point-in-time read-write copy of an entire subvolume. They exist directly below the subvolume they were created from. You can create snapshots of snapshots as shown in the diagram below. ![](images/btfs_snapshots.jpg) Btfs allocates space to subvolumes and snapshots on demand from an underlying pool of storage. The unit of allocation is referred to as a *chunk*, and *chunks* are normally ~1GB in size. Snapshots are first-class citizens in a Btrfs filesystem. This means that they look, feel, and operate just like regular subvolumes. The technology required to create them is built directly into the Btrfs filesystem thanks to its native copy-on-write design. This means that Btrfs snapshots are space efficient with little or no performance overhead. The diagram below shows a subvolume and its snapshot sharing the same data. ![](images/btfs_pool.jpg) Docker's `btrfs` storage driver stores every image layer and container in its own Btrfs subvolume or snapshot. The base layer of an image is stored as a subvolume whereas child image layers and containers are stored as snapshots. This is shown in the diagram below. ![](images/btfs_container_layer.jpg) The high level process for creating images and containers on Docker hosts running the `btrfs` driver is as follows: 1. The image's base layer is stored in a Btrfs *subvolume* under `/var/lib/docker/btrfs/subvolumes`. 2. Subsequent image layers are stored as a Btrfs *snapshot* of the parent layer's subvolume or snapshot. The diagram below shows a three-layer image. The base layer is a subvolume. Layer 1 is a snapshot of the base layer's subvolume. Layer 2 is a snapshot of Layer 1's snapshot. ![](images/btfs_constructs.jpg) As of Docker 1.10, image layer IDs no longer correspond to directory names under `/var/lib/docker/`. ## Image and container on-disk constructs Image layers and containers are visible in the Docker host's filesystem at `/var/lib/docker/btrfs/subvolumes/`. However, as previously stated, directory names no longer correspond to image layer IDs. That said, directories for containers are present even for containers with a stopped status. This is because the `btrfs` storage driver mounts a default, top-level subvolume at `/var/lib/docker/subvolumes`. All other subvolumes and snapshots exist below that as Btrfs filesystem objects and not as individual mounts. Because Btrfs works at the filesystem level and not the block level, each image and container layer can be browsed in the filesystem using normal Unix commands. The example below shows a truncated output of an `ls -l` command an image layer: $ ls -l /var/lib/docker/btrfs/subvolumes/0a17decee4139b0de68478f149cc16346f5e711c5ae3bb969895f22dd6723751/ total 0 drwxr-xr-x 1 root root 1372 Oct 9 08:39 bin drwxr-xr-x 1 root root 0 Apr 10 2014 boot drwxr-xr-x 1 root root 882 Oct 9 08:38 dev drwxr-xr-x 1 root root 2040 Oct 12 17:27 etc drwxr-xr-x 1 root root 0 Apr 10 2014 home ...output truncated... ## Container reads and writes with Btrfs A container is a space-efficient snapshot of an image. Metadata in the snapshot points to the actual data blocks in the storage pool. This is the same as with a subvolume. Therefore, reads performed against a snapshot are essentially the same as reads performed against a subvolume. As a result, no performance overhead is incurred from the Btrfs driver. Writing a new file to a container invokes an allocate-on-demand operation to allocate new data block to the container's snapshot. The file is then written to this new space. The allocate-on-demand operation is native to all writes with Btrfs and is the same as writing new data to a subvolume. As a result, writing new files to a container's snapshot operate at native Btrfs speeds. Updating an existing file in a container causes a copy-on-write operation (technically *redirect-on-write*). The driver leaves the original data and allocates new space to the snapshot. The updated data is written to this new space. Then, the driver updates the filesystem metadata in the snapshot to point to this new data. The original data is preserved in-place for subvolumes and snapshots further up the tree. This behavior is native to copy-on-write filesystems like Btrfs and incurs very little overhead. With Btfs, writing and updating lots of small files can result in slow performance. More on this later. ## Configuring Docker with Btrfs The `btrfs` storage driver only operates on a Docker host where `/var/lib/docker` is mounted as a Btrfs filesystem. The following procedure shows how to configure Btrfs on Ubuntu 14.04 LTS. ### Prerequisites If you have already used the Docker daemon on your Docker host and have images you want to keep, `push` them to Docker Hub or your private Docker Trusted Registry before attempting this procedure. Stop the Docker daemon. Then, ensure that you have a spare block device at `/dev/xvdb`. The device identifier may be different in your environment and you should substitute your own values throughout the procedure. The procedure also assumes your kernel has the appropriate Btrfs modules loaded. To verify this, use the following command: $ cat /proc/filesystems | grep btrfs ### Configure Btrfs on Ubuntu 14.04 LTS Assuming your system meets the prerequisites, do the following: 1. Install the "btrfs-tools" package. $ sudo apt-get install btrfs-tools Reading package lists... Done Building dependency tree 2. Create the Btrfs storage pool. Btrfs storage pools are created with the `mkfs.btrfs` command. Passing multiple devices to the `mkfs.btrfs` command creates a pool across all of those devices. Here you create a pool with a single device at `/dev/xvdb`. $ sudo mkfs.btrfs -f /dev/xvdb WARNING! - Btrfs v3.12 IS EXPERIMENTAL WARNING! - see http://btrfs.wiki.kernel.org before using Turning ON incompat feature 'extref': increased hardlink limit per file to 65536 fs created label (null) on /dev/xvdb nodesize 16384 leafsize 16384 sectorsize 4096 size 4.00GiB Btrfs v3.12 Be sure to substitute `/dev/xvdb` with the appropriate device(s) on your system. > **Warning**: Take note of the warning about Btrfs being experimental. As noted earlier, Btrfs is not currently recommended for production deployments unless you already have extensive experience. 3. If it does not already exist, create a directory for the Docker host's local storage area at `/var/lib/docker`. $ sudo mkdir /var/lib/docker 4. Configure the system to automatically mount the Btrfs filesystem each time the system boots. a. Obtain the Btrfs filesystem's UUID. $ sudo blkid /dev/xvdb /dev/xvdb: UUID="a0ed851e-158b-4120-8416-c9b072c8cf47" UUID_SUB="c3927a64-4454-4eef-95c2-a7d44ac0cf27" TYPE="btrfs" b. Create an `/etc/fstab` entry to automatically mount `/var/lib/docker` each time the system boots. Either of the following lines will work, just remember to substitute the UUID value with the value obtained from the previous command. /dev/xvdb /var/lib/docker btrfs defaults 0 0 UUID="a0ed851e-158b-4120-8416-c9b072c8cf47" /var/lib/docker btrfs defaults 0 0 5. Mount the new filesystem and verify the operation. $ sudo mount -a $ mount /dev/xvda1 on / type ext4 (rw,discard) /dev/xvdb on /var/lib/docker type btrfs (rw) The last line in the output above shows the `/dev/xvdb` mounted at `/var/lib/docker` as Btrfs. Now that you have a Btrfs filesystem mounted at `/var/lib/docker`, the daemon should automatically load with the `btrfs` storage driver. 1. Start the Docker daemon. $ sudo service docker start docker start/running, process 2315 The procedure for starting the Docker daemon may differ depending on the Linux distribution you are using. You can force the the Docker daemon to start with the `btrfs` storage driver by either passing the `--storage-driver=btrfs` flag to the `docker daemon` at startup, or adding it to the `DOCKER_OPTS` line to the Docker config file. 2. Verify the storage driver with the `docker info` command. $ sudo docker info Containers: 0 Images: 0 Storage Driver: btrfs [...] Your Docker host is now configured to use the `btrfs` storage driver. ## Btrfs and Docker performance There are several factors that influence Docker's performance under the `btrfs` storage driver. - **Page caching**. Btrfs does not support page cache sharing. This means that *n* containers accessing the same file require *n* copies to be cached. As a result, the `btrfs` driver may not be the best choice for PaaS and other high density container use cases. - **Small writes**. Containers performing lots of small writes (including Docker hosts that start and stop many containers) can lead to poor use of Btrfs chunks. This can ultimately lead to out-of-space conditions on your Docker host and stop it working. This is currently a major drawback to using current versions of Btrfs. If you use the `btrfs` storage driver, closely monitor the free space on your Btrfs filesystem using the `btrfs filesys show` command. Do not trust the output of normal Unix commands such as `df`; always use the Btrfs native commands. - **Sequential writes**. Btrfs writes data to disk via journaling technique. This can impact sequential writes, where performance can be up to half. - **Fragmentation**. Fragmentation is a natural byproduct of copy-on-write filesystems like Btrfs. Many small random writes can compound this issue. It can manifest as CPU spikes on Docker hosts using SSD media and head thrashing on Docker hosts using spinning media. Both of these result in poor performance. Recent versions of Btrfs allow you to specify `autodefrag` as a mount option. This mode attempts to detect random writes and defragment them. You should perform your own tests before enabling this option on your Docker hosts. Some tests have shown this option has a negative performance impact on Docker hosts performing lots of small writes (including systems that start and stop many containers). - **Solid State Devices (SSD)**. Btrfs has native optimizations for SSD media. To enable these, mount with the `-o ssd` mount option. These optimizations include enhanced SSD write performance by avoiding things like *seek optimizations* that have no use on SSD media. Btfs also supports the TRIM/Discard primitives. However, mounting with the `-o discard` mount option can cause performance issues. Therefore, it is recommended you perform your own tests before using this option. - **Use Data Volumes**. Data volumes provide the best and most predictable performance. This is because they bypass the storage driver and do not incur any of the potential overheads introduced by thin provisioning and copy-on-write. For this reason, you should place heavy write workloads on data volumes. ## Related Information * [Understand images, containers, and storage drivers](imagesandcontainers.md) * [Select a storage driver](selectadriver.md) * [AUFS storage driver in practice](aufs-driver.md) * [Device Mapper storage driver in practice](device-mapper-driver.md) docker-1.10.3/docs/userguide/storagedriver/device-mapper-driver.md000066400000000000000000000422011267010174400252040ustar00rootroot00000000000000 # Docker and the Device Mapper storage driver Device Mapper is a kernel-based framework that underpins many advanced volume management technologies on Linux. Docker's `devicemapper` storage driver leverages the thin provisioning and snapshotting capabilities of this framework for image and container management. This article refers to the Device Mapper storage driver as `devicemapper`, and the kernel framework as `Device Mapper`. >**Note**: The [Commercially Supported Docker Engine (CS-Engine) running on RHEL and CentOS Linux](https://www.docker.com/compatibility-maintenance) requires that you use the `devicemapper` storage driver. ## An alternative to AUFS Docker originally ran on Ubuntu and Debian Linux and used AUFS for its storage backend. As Docker became popular, many of the companies that wanted to use it were using Red Hat Enterprise Linux (RHEL). Unfortunately, because the upstream mainline Linux kernel did not include AUFS, RHEL did not use AUFS either. To correct this Red Hat developers investigated getting AUFS into the mainline kernel. Ultimately, though, they decided a better idea was to develop a new storage backend. Moreover, they would base this new storage backend on existing `Device Mapper` technology. Red Hat collaborated with Docker Inc. to contribute this new driver. As a result of this collaboration, Docker's Engine was re-engineered to make the storage backend pluggable. So it was that the `devicemapper` became the second storage driver Docker supported. Device Mapper has been included in the mainline Linux kernel since version 2.6.9. It is a core part of RHEL family of Linux distributions. This means that the `devicemapper` storage driver is based on stable code that has a lot of real-world production deployments and strong community support. ## Image layering and sharing The `devicemapper` driver stores every image and container on its own virtual device. These devices are thin-provisioned copy-on-write snapshot devices. Device Mapper technology works at the block level rather than the file level. This means that `devicemapper` storage driver's thin provisioning and copy-on-write operations work with blocks rather than entire files. >**Note**: Snapshots are also referred to as *thin devices* or *virtual >devices*. They all mean the same thing in the context of the `devicemapper` >storage driver. With `devicemapper` the high level process for creating images is as follows: 1. The `devicemapper` storage driver creates a thin pool. The pool is created from block devices or loop mounted sparse files (more on this later). 2. Next it creates a *base device*. A base device is a thin device with a filesystem. You can see which filesystem is in use by running the `docker info` command and checking the `Backing filesystem` value. 3. Each new image (and image layer) is a snapshot of this base device. These are thin provisioned copy-on-write snapshots. This means that they are initially empty and only consume space from the pool when data is written to them. With `devicemapper`, container layers are snapshots of the image they are created from. Just as with images, container snapshots are thin provisioned copy-on-write snapshots. The container snapshot stores all updates to the container. The `devicemapper` allocates space to them on-demand from the pool as and when data is written to the container. The high level diagram below shows a thin pool with a base device and two images. ![](images/base_device.jpg) If you look closely at the diagram you'll see that it's snapshots all the way down. Each image layer is a snapshot of the layer below it. The lowest layer of each image is a snapshot of the the base device that exists in the pool. This base device is a `Device Mapper` artifact and not a Docker image layer. A container is a snapshot of the image it is created from. The diagram below shows two containers - one based on the Ubuntu image and the other based on the Busybox image. ![](images/two_dm_container.jpg) ## Reads with the devicemapper Let's look at how reads and writes occur using the `devicemapper` storage driver. The diagram below shows the high level process for reading a single block (`0x44f`) in an example container. ![](images/dm_container.jpg) 1. An application makes a read request for block `0x44f` in the container. Because the container is a thin snapshot of an image it does not have the data. Instead, it has a pointer (PTR) to where the data is stored in the image snapshot lower down in the image stack. 2. The storage driver follows the pointer to block `0xf33` in the snapshot relating to image layer `a005...`. 3. The `devicemapper` copies the contents of block `0xf33` from the image snapshot to memory in the container. 4. The storage driver returns the data to the requesting application. ### Write examples With the `devicemapper` driver, writing new data to a container is accomplished by an *allocate-on-demand* operation. Updating existing data uses a copy-on-write operation. Because Device Mapper is a block-based technology these operations occur at the block level. For example, when making a small change to a large file in a container, the `devicemapper` storage driver does not copy the entire file. It only copies the blocks to be modified. Each block is 64KB. #### Writing new data To write 56KB of new data to a container: 1. An application makes a request to write 56KB of new data to the container. 2. The allocate-on-demand operation allocates a single new 64KB block to the container's snapshot. If the write operation is larger than 64KB, multiple new blocks are allocated to the container's snapshot. 3. The data is written to the newly allocated block. #### Overwriting existing data To modify existing data for the first time: 1. An application makes a request to modify some data in the container. 2. A copy-on-write operation locates the blocks that need updating. 3. The operation allocates new empty blocks to the container snapshot and copies the data into those blocks. 4. The modified data is written into the newly allocated blocks. The application in the container is unaware of any of these allocate-on-demand and copy-on-write operations. However, they may add latency to the application's read and write operations. ## Configuring Docker with Device Mapper The `devicemapper` is the default Docker storage driver on some Linux distributions. This includes RHEL and most of its forks. Currently, the following distributions support the driver: * RHEL/CentOS/Fedora * Ubuntu 12.04 * Ubuntu 14.04 * Debian Docker hosts running the `devicemapper` storage driver default to a configuration mode known as `loop-lvm`. This mode uses sparse files to build the thin pool used by image and container snapshots. The mode is designed to work out-of-the-box with no additional configuration. However, production deployments should not run under `loop-lvm` mode. You can detect the mode by viewing the `docker info` command: $ sudo docker info Containers: 0 Images: 0 Storage Driver: devicemapper Pool Name: docker-202:2-25220302-pool Pool Blocksize: 65.54 kB Backing Filesystem: xfs ... Data loop file: /var/lib/docker/devicemapper/devicemapper/data Metadata loop file: /var/lib/docker/devicemapper/devicemapper/metadata Library Version: 1.02.93-RHEL7 (2015-01-28) ... The output above shows a Docker host running with the `devicemapper` storage driver operating in `loop-lvm` mode. This is indicated by the fact that the `Data loop file` and a `Metadata loop file` are on files under `/var/lib/docker/devicemapper/devicemapper`. These are loopback mounted sparse files. ### Configure direct-lvm mode for production The preferred configuration for production deployments is `direct lvm`. This mode uses block devices to create the thin pool. The following procedure shows you how to configure a Docker host to use the `devicemapper` storage driver in a `direct-lvm` configuration. > **Caution:** If you have already run the Docker daemon on your Docker host > and have images you want to keep, `push` them Docker Hub or your private > Docker Trusted Registry before attempting this procedure. The procedure below will create a 90GB data volume and 4GB metadata volume to use as backing for the storage pool. It assumes that you have a spare block device at `/dev/xvdf` with enough free space to complete the task. The device identifier and volume sizes may be be different in your environment and you should substitute your own values throughout the procedure. The procedure also assumes that the Docker daemon is in the `stopped` state. 1. Log in to the Docker host you want to configure and stop the Docker daemon. 2. If it exists, delete your existing image store by removing the `/var/lib/docker` directory. $ sudo rm -rf /var/lib/docker 3. Create an LVM physical volume (PV) on your spare block device using the `pvcreate` command. $ sudo pvcreate /dev/xvdf Physical volume `/dev/xvdf` successfully created The device identifier may be different on your system. Remember to substitute your value in the command above. 4. Create a new volume group (VG) called `vg-docker` using the PV created in the previous step. $ sudo vgcreate vg-docker /dev/xvdf Volume group `vg-docker` successfully created 5. Create a new 90GB logical volume (LV) called `data` from space in the `vg-docker` volume group. $ sudo lvcreate -L 90G -n data vg-docker Logical volume `data` created. The command creates an LVM logical volume called `data` and an associated block device file at `/dev/vg-docker/data`. In a later step, you instruct the `devicemapper` storage driver to use this block device to store image and container data. If you receive a signature detection warning, make sure you are working on the correct devices before continuing. Signature warnings indicate that the device you're working on is currently in use by LVM or has been used by LVM in the past. 6. Create a new logical volume (LV) called `metadata` from space in the `vg-docker` volume group. $ sudo lvcreate -L 4G -n metadata vg-docker Logical volume `metadata` created. This creates an LVM logical volume called `metadata` and an associated block device file at `/dev/vg-docker/metadata`. In the next step you instruct the `devicemapper` storage driver to use this block device to store image and container metadata. 7. Start the Docker daemon with the `devicemapper` storage driver and the `--storage-opt` flags. The `data` and `metadata` devices that you pass to the `--storage-opt` options were created in the previous steps. $ sudo docker daemon --storage-driver=devicemapper --storage-opt dm.datadev=/dev/vg-docker/data --storage-opt dm.metadatadev=/dev/vg-docker/metadata & [1] 2163 [root@ip-10-0-0-75 centos]# INFO[0000] Listening for HTTP on unix (/var/run/docker.sock) INFO[0027] Option DefaultDriver: bridge INFO[0027] Option DefaultNetwork: bridge INFO[0027] Daemon has completed initialization INFO[0027] Docker daemon commit=0a8c2e3 execdriver=native-0.2 graphdriver=devicemapper version=1.8.2 It is also possible to set the `--storage-driver` and `--storage-opt` flags in the Docker config file and start the daemon normally using the `service` or `systemd` commands. 8. Use the `docker info` command to verify that the daemon is using `data` and `metadata` devices you created. $ sudo docker info INFO[0180] GET /v1.20/info Containers: 0 Images: 0 Storage Driver: devicemapper Pool Name: docker-202:1-1032-pool Pool Blocksize: 65.54 kB Backing Filesystem: xfs Data file: /dev/vg-docker/data Metadata file: /dev/vg-docker/metadata [...] The output of the command above shows the storage driver as `devicemapper`. The last two lines also confirm that the correct devices are being used for the `Data file` and the `Metadata file`. ### Examine devicemapper structures on the host You can use the `lsblk` command to see the device files created above and the `pool` that the `devicemapper` storage driver creates on top of them. $ sudo lsblk NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT xvda 202:0 0 8G 0 disk └─xvda1 202:1 0 8G 0 part / xvdf 202:80 0 10G 0 disk ├─vg--docker-data 253:0 0 90G 0 lvm │ └─docker-202:1-1032-pool 253:2 0 10G 0 dm └─vg--docker-metadata 253:1 0 4G 0 lvm └─docker-202:1-1032-pool 253:2 0 10G 0 dm The diagram below shows the image from prior examples updated with the detail from the `lsblk` command above. ![](http://farm1.staticflickr.com/703/22116692899_0471e5e160_b.jpg) In the diagram, the pool is named `Docker-202:1-1032-pool` and spans the `data` and `metadata` devices created earlier. The `devicemapper` constructs the pool name as follows: ``` Docker-MAJ:MIN-INO-pool ``` `MAJ`, `MIN` and `INO` refer to the major and minor device numbers and inode. Because Device Mapper operates at the block level it is more difficult to see diffs between image layers and containers. Docker 1.10 and later no longer matches image layer IDs with directory names in `/var/lib/docker`. However, there are two key directories. The `/var/lib/docker/devicemapper/mnt` directory contains the mount points for image and container layers. The `/var/lib/docker/devicemapper/metadata`directory contains one file for every image layer and container snapshot. The files contain metadata about each snapshot in JSON format. ## Device Mapper and Docker performance It is important to understand the impact that allocate-on-demand and copy-on-write operations can have on overall container performance. ### Allocate-on-demand performance impact The `devicemapper` storage driver allocates new blocks to a container via an allocate-on-demand operation. This means that each time an app writes to somewhere new inside a container, one or more empty blocks has to be located from the pool and mapped into the container. All blocks are 64KB. A write that uses less than 64KB still results in a single 64KB block being allocated. Writing more than 64KB of data uses multiple 64KB blocks. This can impact container performance, especially in containers that perform lots of small writes. However, once a block is allocated to a container subsequent reads and writes can operate directly on that block. ### Copy-on-write performance impact Each time a container updates existing data for the first time, the `devicemapper` storage driver has to perform a copy-on-write operation. This copies the data from the image snapshot to the container's snapshot. This process can have a noticeable impact on container performance. All copy-on-write operations have a 64KB granularity. As a results, updating 32KB of a 1GB file causes the driver to copy a single 64KB block into the container's snapshot. This has obvious performance advantages over file-level copy-on-write operations which would require copying the entire 1GB file into the container layer. In practice, however, containers that perform lots of small block writes (<64KB) can perform worse with `devicemapper` than with AUFS. ### Other device mapper performance considerations There are several other things that impact the performance of the `devicemapper` storage driver. - **The mode.** The default mode for Docker running the `devicemapper` storage driver is `loop-lvm`. This mode uses sparse files and suffers from poor performance. It is **not recommended for production**. The recommended mode for production environments is `direct-lvm` where the storage driver writes directly to raw block devices. - **High speed storage.** For best performance you should place the `Data file` and `Metadata file` on high speed storage such as SSD. This can be direct attached storage or from a SAN or NAS array. - **Memory usage.** `devicemapper` is not the most memory efficient Docker storage driver. Launching *n* copies of the same container loads *n* copies of its files into memory. This can have a memory impact on your Docker host. As a result, the `devicemapper` storage driver may not be the best choice for PaaS and other high density use cases. One final point, data volumes provide the best and most predictable performance. This is because they bypass the storage driver and do not incur any of the potential overheads introduced by thin provisioning and copy-on-write. For this reason, you should to place heavy write workloads on data volumes. ## Related Information * [Understand images, containers, and storage drivers](imagesandcontainers.md) * [Select a storage driver](selectadriver.md) * [AUFS storage driver in practice](aufs-driver.md) * [Btrfs storage driver in practice](btrfs-driver.md) docker-1.10.3/docs/userguide/storagedriver/images/000077500000000000000000000000001267010174400221165ustar00rootroot00000000000000docker-1.10.3/docs/userguide/storagedriver/images/aufs_delete.jpg000066400000000000000000001142231267010174400251030ustar00rootroot00000000000000JFIF//C     C 9 S98O>{" x K_6 'ws5riu㤞huccW9kP^^xVZ/Z^nؽI 3XY;; u_E ٴ*D£'T;3^uGHs؈l븵m,5k%C _qrr2cR`o(6[O(Z3\'1yQcPha}}eaP[[mYCATtԗ3 L۔.<i y jUjUjibw5 o@@;UZUZZXBP׍ juwAfL TW`kVʃ{`zY/1q!;ϧ6iZX gSEu OABz0) [c # Ҽt6VLu" m~ӂ; չt]ez- Bty`߂Կu;\ʬIm^k1-j6iZXGRoaB2[[d~v_s :p T?~>@,0jFd #1T`1lղ ^€US-Q6skQ׈B7{ؕB!wkl>_qߣz"WWjB7ӄ ]B!wb>0pE=2YN^ !?I#˪+5 oCz@c֝av buWW74>~5J5$^ͩfЃGl)fulʬz;zۚ)wÅCYt趷) ~Uvj3cxMؚ:'l-)GmoRxu?;+tx;Տ4c͉YTל/=۝-*ņ\65I(|3+`!#r8BG{'[Y,OM\>=e2c6>~yrP9.]vU[R>~</IR@ r߀oc5P)@|Ǧ@5:[-H/ zZ _ @#@ jG|/AÏ R< s>@76Vg 1y R<'C`oC@V R<]`ԏF` @i~@\-`dyhq\z<$ s+TcdZH @V@$ǐ4>bFMms @ZHi $@7mx aP)pRn0f@p Mf*+MI@} ސ_s*{honʵ}~므 8 /Myy4o}@J @k$C\oBM< A$C\oBM< A-puHk4Aln"\t0C\=W瞷废@"-p%sz#EZW8.P m`E\ta˲CVp P=_џ+t9_.fͧ$h&Dr)c &"@rR[@P@\M|R0@] Jt:!3 8b iPEEJ.P*Tr xY?2 0564!12@P$`ap&=ܤjTZb#HŪF-R1jTZb#HŪF-R1jTZb#HŪF-R1jTZb#_TZb#HŪF-R1jTZb#H߃-R1jTZb#HŪF-R1jTZb#OQ]z[#{uv$cܥnmFZ~ HǾIg (ĔިqvkZ/sw.|b]1w.|brvswz/c".v"-ōEm4DXխXb(W:^dt"Fk%A2dX-1"ceC'6s,YuL* عdkNʐ&UGMl#˛6Rى(X*NM@Cӣ2F2­#ꭷ}Gä hvj"fΊ#5k٨ڔ˅9GvصoqغiZ eQHRSaGٌȭ(\cI0 4+y(I*~U]r dv3+la.s QF2+m{!y#dvR?G=dφ7 ïhv^q\s80%PW"aYkRtf1' nPf ᇑzG,c<2"If !-bՠeNR씜tmCW[PRsxER *F͚wؕl7v1ߟ)"Zc:scͽDO\LKB+vR)T }wLie'/m1Q$c"V}| E6q7:I?(3r+`Ȅb+ηe @IXq͝/`JCxFȾ|1=?奌U_/|1_|] Ӎ8(!8dPq Uƪ?n5Juf3KUkZV/wUƼxֵFgFc=GQKeE-TRQKeE&b!jQKeE-TRQKeE!qDQKeE-TRQKeE),j4Z{X|;TRQKeE-TRqt"_1l`V[_cW/ơq{k` 0+`p5ʊ[*)lʊGc`>.쨥ʊ[*)lXܲ>dP364*[]'ljMMl;VF+A9m=\fݍ3aF&c>LbJ9-g; Yk$wgٝ?8_#G+?=o^1?eqY)}2g}i>Hﭑ+O[闳;L~p %m?eqY)}2g}iFnAo_o=MÑF/ - - -³(ë!M - - &2-~Uk_ZV* *Uʭ~Uk_ZV*Uʭ~Uk48_ZV*Uʧŷ^aoaoao2/ᅾ[ᅾ[ᅾMf]!Uʭ~Uk_B/!dy^]"u?_`iPً*-[XVk2Ҿk+m#s"OҜm6xmȴvi xPii*& t? ^g&$7x"lZ>9clf9ܫ643uPkcD[^;Y%{-1G;ߎ-ߎ-ߎ-ߎ-ߎ-ߎ)ܻ+ŻŻŻŻŻŻŻŻŻŻ3CoJW" 蛊o λR.kwwwww%sZ\֮kW5jsZ\֮kW5~8~8~8~8~8Ʌ٭]M.K4M.K4M.K4M.K貺,+貲&c,QtY]WEetY]WEetY]VO*UNK2PA~7ef= eo1z,+貺,+2 +2 +2 +貺,+貨v׍eOwNϺHvM>Yx0zeB/,oIrȸ+]V vZ2eka+]V vZ2S9Hc\Uqnź[qnFG,SUaeSt nź[qnO:jV vZ2eka Yܺ;a+]V vZ2eka+]V vZ2eka+]V vZ2eka mOri aiFlf/(kk S=<eH oj~(MRQxF:@R>c%PH:.vΫBef[ z{-~Ay/jpNnR֪|=^P?v :X.6j×fA&!ȴ tL Tp6Cc$qD1C!dm[Mk|ſZo^uUhW&CMpjDV+OʳenKE 1Tc;A{Xmsބ$KcȘ6Ʊ>D~;)JR)Xx}G]?[YvԚ!]RS8QMC 7{v}]6WAg#OMiW'Y}2g%(ԣ3IkU֫W#ZF\jr5kU֫W#ZF3"0[؜[bqmŶ'؜[bqmŶ'؜[bqmŶ'؜M㓭9GVW#ZF\jr5k:Xx-U֫W#ZF\jr5kU֫W#ZF\jr5kU֫W#ZF\jr5kU֫>uKpG_L _{ǿv'q~xyKbqqqqqqqqqqqqqqqqqqqqqqqqqqqq,3AI^(jqUƫW5\jqUƩmǭ5\jqUƫW5\jq ;D/fffffH'&|2[pa'XvDnI|/=@B\¬'df2u%4U-X+Qkt#g '8üzAA$BCS R\})/>?1fue2Vĥё&Vf[>NO!NELH|}Džg)՚?pXTtv7H+s6QhݣDdb#5MĒN ]Pjv+v]3m=ӾkOօr!߂r8Fʰϰŷ["n{xE! 1":rȉ!1_|K{3spQl}E]܉ez|6kNRw=.krgԜ!&H["&>iO}hFZ4ѧ֍>iO}hFZ4ѧ֍>iO}hFZ4ѧ֍>iO};8CxE5Ƣ[cQlj-E5ƢUJص[blZUj-VŪص[blZTHѧ֍>iO} uMȝsΡYffZT`J2r&= h3\d;7QȘר9O fy /ScڞNÚ_b+YLa2N~h3I*ۜ-eX1(اfOz1&C|!#M?:.|Ҕ/Qd(UѢϿ0#7p|9n϶+[uwZYU~%G:z -]ul{6_}]S]ӯ$c~m^h˯2ż&‰=[jH + ‹f=&|G'#Mp),T@e܊rd$Ip![흏2Z"yI̞#mSP(2FIIC&۱nL̐A#2/ap{q~{mRO;[UwC&iHP>4'lMDJ_#iiCtn~"»WȑǎN0hͯVL2ُCAdy`/!it['D Pl ^j~6".qttYhA"E=F] ~ۍEYd%:oV]l,txǢuch;8@Vf7twg)YE6M n:J C)m 9)c7}t<of+`Z}.ǀtKXeácQ[@ }DI~5\e' h-5c QN!1 "AQ#02Ta@RSq$3P4B`p%5CDUbrs ?Sg޵b1X}V#zՈ޵b1X}V#zՈ޵b1X}V#zՈ޵b1wX}V#zՈ޵b1X}V#zՈ޵b1X}V#zՈ޵b1X}V#zՈ5b1X}V#zՈ޵b1X}V#zՈ޵b1X}V#zՈ޵b1UX,֬F>jcZ֬F>jcZ֬F>jcZ֬J.<ؿZ,}' Eq3a_ceNPO־@ HخC5wwwwwwwwwwwww=sH7kٔřm +` j cMv0C[:RòS}crzqT'X,IH*!kmڠ{UT۽8vs0WM젙]MD-9&K,Sl &.H]LR=?)lEG'Pё'fz;V+eMTC+r ]G`OP2:9gss";yXL̒I靗5Ziy$:};~IkXl*YSNn-ŏp>2X~WGdyYS\^׸ B̞w+yuس26ܷ=e4\5SPX v3fsl_kh"d9M1ksG6&_0y#oEGg3xM{}Cö[wⰸC_C cn؟js]<6P,͊7wb94V!6Im1eՆelؿY| CkN/3?.u9/-vLo3uDU(2+|6%?*pkZsnX SS-KZ2mکl tN#aذW)eGZWuѷ-NnK <أ)9ej2EQjKl5ntł3=SuLE#ɩ$fY헲d*z3OnY]b%sZ ԝ}TB; fڬ@2X}HDӈ;3j݂pO_UNNDDQaFۢ6G| lڶx G4>9uTrس&>Om A 9x$h.Zh̩(<-T|ڪ,6 /'-SrS)-< [YbKKQ3ch]UIץ {~XzTGڧ\,8GDnWU9{6<k2SoG&YA7߷fy&o}8w7ߞQO>os(˝)S@{~#krSK4AM/SK4f4AM/SK4AO%V^4AM/SK4AK/`¨ ) ix^ ) i^~Ԏ(;҃J(;h=2wu ҃J(;҃J/FU4AM/SK4AM/S]oLlSK4AM/SK4\:,R,j*NWfՊyk+{5bu b+T. =`C^z;v+n1VvHi=+&Q-o==<Nq罟5tޮ<<ޱ;Ǟ|g{?Sn߇|gz_˝\F.0q,=aa vg7/0q,=aa wXexZ/7Z/7Z/7Z/7Z/7Z/7X GX--------------,IoT]-------vg<,=aa wX{0K6=aa wX{0sZ^n ^n ^n ^n ^n wfRid~01oGg:WdGD}6{1l<{>=<|g?}uKt3~]? R?Q'j nos`.eL|1 6#彳?.w{>k߇ke޴p0ɌTfPafXо:WW#@RX̲ZAY4LYy ##iBl.Y^cFvYvp4V0vao{\ZJ5\feL&LL)~V/~ڠUVHь7p~3;])XuQ~AJrME9}́kud*6:F Dq L rp$#ll=}ԥMnT4tܔ3L2WV',Rajaf\;ԒTIZ]i7Og7P]>z9,;؜MrdYCmXK,ltzXMG d;}<졲zpjձ*+1e}iXlQuFS ;KF˔yD{y/}k|V\˰,Hp9bCXrćvg;nW,Hp9bCXrć$8!,Hp9bCXrY!#Br)gdԐv/ʘ!bCXrć$8C5[{Xrć$8!S#_a_Zo(ǹF=1Qr{cܣ(ǹF=1Qr{Cܢ(D=!Qr{!D=!Qr{Cܢ(D=!T9Ϣގ_Pޓ.;u&^3F٦}.\NA%٧/j!Fcy g3Xڰ95w0ݛӚd8Ǻۚ ʝ[dҷѣ&=DܞkAڛO^S,66ta+v˙zCiDkC[V[V[V[V[V[V[V[V[V[V[V[V[VXܙSJmTP~Aڨ?[UjmTP~Շ.mcV[V[V[V[V[V[V[V xbjjjjjjjjjjjjjjjjjjjjjjjjjjM8k]4n`efn nQ ꬻyk֩O|J{*S9RʔTr=)O|J{*S9RʔTr=)O|J{*S9RʔTr=)O|J{*S9RYu/tQEQEQES<](((vxeJ{*S9RʔTrvk|oSi+Em4BV8{ORž&S5sK>v)ι޴tN'wTۗws44Ʋ ad~aNe̬.GIZ;<8oXIrhn+sinhjL>7%Xv8\=VfsDmxoYX5 by)iE8)f%`o},."J3Aq XKUMaռ4(r6zwrv$lԃ@ýſl0UmdlRf}gBdș(=-OY{rrH@BZwاkl\ޡw7e.} C e&D=䒦YJ\[j8cxJs`MJMaɈvk)GXڀZV顚y93$,e`,Xrkɽv, Z(y|o'_u U9U[Ly\ yldd.PɋoV 5:pW6Z*<uxa`o}~]|¤6 d|Sɕ&":hs77y^U6,"\2=9/y,zYyݫnpeB.NӴ=D]}_zxd멮#A&g Z9퉻9ƙ$›{﷥)0^ZLf_b7FϩmSFb2 )faz't06 .dS:v^ʀږR?B߻zYŔ榜roO N\ǜ Q U*{b:H J wGӎS=+mL%|\Y@mUF--7PGVzq #֪alFZ ӘeS+b.?5;$|4aOeOWK@j Wb:/isLMSsb13;6ܫ[$r72:G~Vلu)`UQFZ& g77e_>|/@k1=HP)u&[n7uYndج|tY&1Xsj9b׈c'/,;:(|GMQ53i϶SG gm)A@vKʳ9}+\d'.lk%yޝF;ɘYh;ysAPgAfmߊ$!.hR;JZ~K+OBt1  g3>O/ #Y~Q.\s+0:Kل 9wIÓY3sԎs晲gٝԘDULj1iouS+˷y9{[ 2GbG@3};іQ\G4d<;wVE"\ 9Ec$ 4'+`dP 7'ݜ$ XM+|djN8ɌihY998NH9v*w]%fǺ"ސd P Hr-7"F\9)1 ijb)q2xB)vpHP愺7< F=`Ɛ*DpjDpM^i Ia%F@!9.X}f̊;A) [אS gC&2% #9O2Ep 9MK⌜h@ZZ. &`B{:BLݜ[oJ;`zU%H9| Eu"&.C @BaN&gs6ᗔv†Th}f|;n#R `ȁiHox 4g xY!kJCYw#PLyEeU 1w3;n|Y9-XU9ʫӇ|2һ#`!}&6_IMlrMle/}&~}&6_IMlĈ ǟ6_IMle!S>lg͟6Hԅ =ew6M6g͟7Q*le/}&6_I쳶se/}&6_I@Fh(5izLz+o祔Bf ° }BރtKDo }wl4'qOK}wl4'qO-Lb'q]z85Z888ㆾWn2,Hh`P,,, 8 qYe]s%8〸XZ]{q7YLKL(݈*  @8T"M,0$cBͫ1Q)3!k@0 *BQ>ED 4@1u0{XxʉZ0HİywR8X>E[)($ú>uDk3٢+d.o'@@ߣ°11LMDBs0JqzWþf-5У~AI5@u=  KpUZlJ8]bfsA>Gi9LeU DdHs.@jaHc! `QXd.fSʁ"к0R?n5E( /)uG )k@p~_xITI/2pˤ؅ p.y@G | ]ܘq3g1c8q3gb}Wv}&7_IMhE{su}&7_IMnu} 3!+)n ~H@ nu}&PZiiiiiiiUMnu} o(EGXO6;VvYڳgj՝;VvYڳkם;^vx@dPYkם;^vyej2lt@y[Jaaƀ- CP(R4Nם;^vyӳgNΝ;:vtӳgNΝ;zvyk9y;BQp)4哀i™8L1m888= \iiqi@m<88R(dϛ8888884~ PuJq`ŏF'`(|GqA9}( k {A QFF%`, (`(MppD ]o@smYp$@efQV/9lr÷goʍbRFhP0 A~D@#T,twi}Di`=EIȣn XL H4T'7jeVU a !yV\:Ҙ[7zU(AB O;va%gU0J<8HcSΐt &1A ,p̨$DAF 0Ye⯕ H_sYU!blAjeĢ) M@ ?2ҸK@UÃn$|LD|m{{O|Nj5[Vf~UY߬oj5[Vf~F2 {mWE y{{{!Rz (\ 0,q3g1c8q3g1c8q3g1c8q3g1c8qۤ-qc3g1c8q3g1c8q3g1c8q3i31zn޳vݽf7oYzn޳vݽf7oYzn޳vݽf7oYzn޳v\lflflfl_X*6 3`̓6 3`̓6 3`̓6 5Xݽf7oYzƛu9ܢHbqQJa̭Q$Ԯi L=h%w-'l,"hx5A@\2|:؂,ň- 5]Nh9bX+* 0Bbo dK|Fa"V/Ir^u[6gϟ i[|QzKhs c.wts\;oKphp s!@xɗ19 &r`(b9G/MW0(hU)/! 2k+0R@Ca,3;"eԻ*AzE:2ISAb*(T&P*E< g ݟ>36%ҍ!W}EHCMNjHZ9o 1R i0o4Æ% 1hgτ5X MjF&.Ze,Hݩþf:       n23_9ۜk5s~ns_9ۜk5s~g?6MlSg?60mZxA*BZE=9Vit 0x@6Ax*~ECSXCI[#/S^+3q!M5lMkHn`P:n 0ʰ2vS ~I4O(2}P0N*k;'D5. *`lb X = :\ ɋeM}ʣ,--*(qb)UB9 }!$6$6J#ʼJi-$ ?YXP Ŧe4-%t9vE18HL?a g "0&y30L hX.B3/P]Q֖x{ݯKC`"\Awc8x(TJT`M:K#1m@Am#: İ}|Y ȅ j)_6@hrctfVHqJMAO*Ft31(K~@aPלRB*h4:?b8"1̌%_xW6,vDࡵ!儆tI*ͪKJ֤0b6XRX)"ө^W᳍I,,I+]!@ /DB 0(PlGu^=l$/?EO%ڌi6)W9 `izIpmZaԑ-Uh*3DMmf р3^LP4d$תcl;;UL,>NP,!1QaA 0q@P`p?!9D4.<˻.<˻.<˻.(4ywyw]ywyw]ywyw]ywxo̺P˻.<˻.<˻.<˻_RYw]ywyw]ywyw]ywyj +C*P7 C7{" N*5%[zˁDҊJ6m(QiFҍJ6m(jNv2J6m(QiFҍJ6m(b E,,,ת%J6m(QiFҍJ6m(Qq|( oKg^( Q C խס+nL$,^s0`W_FC@°hԧ}QcYzDw"ߩg_#$- &oc6\QilGaC 3E5pzGZ:T>65sjdP}<}B) WiyTz^D-3k#L)a8f8#`WCx Q5lXT:5S)b{mm:+½F4f k֍4hѮzBWb4hѣGOu(ߗ9F;g6OOdY?M [G V1'馸I/Pw0_4hѣ&>ӣF4~Bcf̔bi OaM")u*Zi \>:bfl$мj>hsnb^-M=Ip 8qgC>C>C>C>]~}Qc-JceMeu]uGV}w]u\s'{pF)o"y~\~}c?\!뮺j%뮻\L쿏W+NykHȂE@Xh傽ZVB*uMDƵ Q,xam57[ˌl,50˘3Pv_j,Lj0>/7unVݺ?WꮶKuK@ VAVT*7T%q-V0BOZ DLP e-RP- e,>LIVVY7f@SNϭ+-#q0GXxM+4l }h~ܭhc7^t="ժӁi{7_YE^osb 7iuN4pZԄIY}_&RnFGT6D{X"E=p#:rDRy]wB*%V]Ы% JhGĴ:%}5_-ٖQcX5BŐ픖+P>g _Hn]5BtZ`Cz]x[=h~ݴT'-?qrʽwj#ųIy.hϰT҈HFĺ8y?q8y?qb*筳f͛6Vh=v66lٳf͛6mK1ޢa"ziЀJɃ-EӨٳf͈3Osn~?[snQ#E 6lٱP\r ]>%<<<!Q_I彧[yoi彣ڍO]dI$ւBI$I$I$I$I$I$K'qȴxs,{vM*_amԬ.:SmE=VZ2'WRƊjNқlx"D3]@`5"+8;M_Ҋ!A@a4&YE5bnOpQub1aTP^%IC \**@bPNTL43H hI,@UPF |FNZRSl|I cM(1gφs33Fi:oK7>Mp ~Ti7:&s 7D3!l1imޫim"5PV* (m®QG5[gu9S4VcL| c^ҹ p ҄f] V76%#hAdO ܱ({=+gϠ2sx'?ljsx'?ljsx'?ljsx민"D$H"DBG#ljsx'?ljs|x`6m+<9xzMKϪSqSx2|4pÇ8pÇ8pÇ8pî^AC>9ώ|s>9ώ| AѕtN|s>9ώ|s ~c8pêzǘ:JTuF`@7+G@c[vqd@tjDCv`(;Tm==W¶9ƌ #gutaα '.ԅM +(p9hKq˔ځ~ef*ba~+z-7 Jԍ:90 w$4XRBSnj&Oӯp>L/ݔژ&=l:yw`nJ㢛\ {uc1G~+nձP#hA[5" Τy 3}u-ӓSYvy?5/IKeS]cD1R5V0)FL-أU,HFThWC{| SWЄ};t~wи޷KY+YX3ܐG: 3m굽-m=&#"AA(Vz@@P{Gf$WQ -4هBJ_Tb\ƪv}?}2*QVT_ Bf@\yʸ#Ɋu,-<{-<{-<{-<{-<{-<{-<{B[CdbX'bX'bX'b[IGu՜Xcŏӆ Fr1։k8mlİ`THޝ 5$ƉcX~ u x p]&KR_\Ռi(UU$4vҼ=G9ln&7JȽ-CI⒙m7 D.\)Zm*eCa]I[.!W:€bĔNp43Qt4EO6lVu#btV]I -ľtbt(F#Uܼ=^eƇIfuvGY1&C.\65Jial ucuhf  g9: ̑f5Y!FxmUaIet7Q1F=!_KtV of569Z l4C{WIY[,Ђ{5lԽP ,!1AaQ 0q@P`p?ỳq{(PB (Pruh˼B (PB -h0T(PB (PA?ъ1xw(PB (PAHzV,S gY2Uhl0ja]pXISylX ^c,e2Xc,eD#,t2Xc,e2XɡFP" {iirɲԼAc,e2Xc,e2#@#S!(h;Ѐ d=iN MX *?A^l6 ,ªCEa/E(rp'="T#‡FiPrr lIZ#}e*aPLݲ2OV15`4;kU&&m?AE@Zm~/U w')D|}ĩMG$bA7pMP>Pu6d96E*Ri? 8+a.' uP#ҩr<їP 'OKC`X'j@.,U])$t//j/Sc?ၺ, t~<ˆ"hSW0}sGŞkRqv j\0jlTh~sEo2Zpo8 Nβ (P|F&-P2Z(PBT-d.u(PB7P{ A@B ?rh)Ã0>:ѣF)؟iQB_;@-F4@-(PBJ4IY (X 5S:]NϯqTöX-(llK9.t ~ 2w :t2ft%9A!p?ٯzj")Ȯszwh5 zwh5 zwh5 zwh$?l<*qDV!9ˑlˑlˑl`)ȶ\eȶ\eȶ\dEM[}tĤAVW}}}!,0u}ظSzօ.E[.E[.E[-R$FuAC.E[.E[.E;d!ń]]}jqT奫c5XGJtYTK}?_~l+9 ysbAKz\NON=01 6>7O꺁hc A}9Ak>8ʈ[y& S%"ӟT kϨ2Z5Խ˕UXxIԛ00Ԯx!BQ͡k r32iNl6X&Hc2ZʋnpB$6.RHщ1 }?_iYy%vUh Kd)t@WӰ@ 57 ǯ= WcUA5jC'?%vtBI!m&SI9˵^P+F7|7o?nQv k2 vHU|T V3,x^ \%<8jRUYl]íf< 9 #gq ɹU iN@mHt EH iHijB=/f5+y^ Lh @w0c}JG^XR] 0Sw4Erh.#P1c1! {^&L2fGHrdɓ&L2dɟbK$j'O?C@P1^4f$Sdɓ&XLbKaaaȷbd u2dɗ}х֟tWqqqqqqUg/r/r/rHב:? ~˗~˗~˗~˗~yA,.躄  `Y?!$)%7*BɶZo?.k׽J*TRJ*TUk#(PB (PBG% ԩRJF@7`+=>*TRJ*TRJ*TRJ*tЁ~tUR0J(q]X `,X `,X `,X `,)^ArS`,X `,X `,X `w wHYwڵjիVZjիVZjիVZjФv**&FB!BiF~!B!%ID]VZj{Q;׍y9A # ,qx0#eC]$mgo@G\@P ҆Ѳ% @V6#DI)mSEYRcqte0Y\5T1`!Rz1o& 3>!t0H%p8yG/$D lgHpmME ~ t"_ Nas5eaAʑQ$59UiQZX|c Np„[F}j䃇O`<#X7=;}9=rDY@6LMԑ* 9hC0Ð IFcEA/0P 9VJ3^&EiRxgAw 4 a5B@SR|/H .JbyE &Q @B 몂@+Ƴk A+F7ځÈ49tPzBbfF],|$V,@_"r<5Vt!sFh°d(WCdG0(4Gi%Ψ] ,0~c3. EЛH,/C,C @ @ @ @4li1 (PB (PkΘ#Ox,,,C 2)#iH?@&iCN- F4"a3 QCAG_II .kQ_d{H@.B7]39Id@ %jyV "#O\ftFm.:EN:M? ; 1 $꟝QHC>mdL/ʻ2OvOKD^C=]9*zYzPvrbLHr`藥~FĊʱ0֔1X7$.X@ ~<1&Uf D0GC7Uȼ[LՎ& j],8\X*H=8VIҢ+&`.)g.3'5$]2Xp8 ,KM芡̌k(ɰ D+r6@# Er "6VA5*.EcƸG`ST$\#0R&A{YC DsL'FrƄIËA @&"aU\tg|Ú7z<^xb j@DoV0Ժr+OB[ < <&)IBE6vGLd>gxIivZJ +֏^Hd7YQ @bw9RcHYB!!B#%k&CQ%Ѵ#jIT@51ae` >8B7IԄq@G ¥] v+!06P 35@21Me cե*jХ9HM^oz&)DkVJrc6zUTU;gֱ=DQoEWDM.+R.{"K%02S%02S%02S%02S%02S%02S%0 cU:TYY G3Uw P$(dmr !mMLEaXiƮY.s6pdjj8x.=̋ZV%7࿑4C7A\ӌ;S4Or&3WVBSЁJӢJ#I'e*-I'Oȕ\RBa_ ;^[Sp8qD ʟZfq9}Z%T}~VI*;u8tV.*͜pq8Y5!jsǩ.N8Y 8g,ㅜpՑXŻjWj/" TQVoZI)F)%@脊Q4Z`_h7WLY 4M땤_ӎ*SN(`߻kUC+_f4ilMӿ^Z#|rӭ{S7[NzhcNVOm;u>W/ٍ:w[>Su׭ֈ_8Ѩֽ];ܭ)wB|D9IOyj'osV͝qᏎᏎ͓yHPծ>8c>8c>8c>8c>8c>8c:8iVE.u11h=Ѳ3r?}&El|p |p |pZ9(ӽ))YNw2-7k~VOI}T|2렂 +fߡow[>SuuF-+ZinVK^2}#<4M4M4M8Vs9rxyj 60c 60c 60c 60c 60c 6&WL$Rn6ZvZjiHycc$0uL4M4MeZ'ál:ál:ál:àGU(lala{G9?qQTn*7xR6uwL7FqQTn*7☙Z3Ӥ# ٮ1 1G43Rw qQTn,7 ņqaXn,7 ņqaTn*7ENit^fE*տH"J-Q* Ɲ۵nG4J4*?}qEƝt\iqEƝt\iqEƝt\iqBsg#ܕZ5LoijZ=Ɲt\iqEƝt\iѥUJ)rӢN:.4ӢN:.4ӢN:.4ӢN:.4ӢN:.4ӢN:.4ӢN!KlI;a_ ~o׷v¾Nz߭oD|&[^=X<pb Y81g,Ŝpb Y80Xt1ꖅ1SY81g,ŜpcJRwg,Ŝpb Y81g,Ŝpb Y81g,Ŝpb Y81g,!VqEnMѢW9G#r9G#r9G#r9}PME+N/#r9L8ap723s#723s#723s#723s#723s#723s#723s#723s#723s#723s#723s#723s#723s#723s!eUvի&eHP]vK|̌̌ȼw%I;a_>׷v¾}oD|^nՑع^Ëq{/a8^Ëq{/a8^Ëq{/a8^ÅDzUd=L2CzS!d=L2A%cV){/a8Yb&G;!0P1@AQRaq "b2B#3`r ?ܰ[fm $_%$$ -/M=JwyJwyJwyJwyJwyJwyJwm';Q9'6Omz6j?oj_0鵽] }tz=oiC݄|ĝ׭}.V_=$GZ9+!+J!)J-}tyz?xHCu=ïjfNDZ_!.{h{4S!־Dl$dHȑ#"FDV%ows$dHȑ#"FD2$dHȑ#"" a~ȕ䇫z9Hȑ#"FDDq-Kwķq-Kwķq-Kwķq)Hȑ#"FDv[dDDDDDD]2""""""""""!6vcRD_~n6gDDDDDFFFFFFFFFFFFFFDDDDDEY=+6kQ ~' +K]6'PB|E 'PB|E 'PB|E 'Q~V4?'PB|E 'PB|EI;B|E 'PB|E 'PB|E 'PB|E 'PB|E 'PB|E 'PB|E 4\6v׍en[~$zIn%KwĖ_-޿[~$zIn%Hov&UؙWbe]v&QttO0!06@P 1345A$2S(f)Tִ+ZJ(wľ>4IUgT(ᙉU W )BfkD嚦Hz&Ý$U9NvRFVy&:p<:p<:p<:p<:p<:p<!RTNKѣ #T)Fl!YH"dh ֨͂"d3l-9QrN8մ {&X-Bb"hffNm9&=>x[qQ+Wd~N.) ~ʞs7,R)3ZM oưL -E<|B߲^*|3)/p)Ԟ*_APLTM$pjqϦc!;ycsÕٽPVy1CwӮA?{oV}nGVo ._Qu=$gM啚:d%Bۮ!edIe'=˟G 7xa nwR1nܒL 7xa nw0 7xa uuºA < nFӔeQVk-ST 7xab)Cssal9͇96Üssal9͂6J- nw0oYէԥ7 C!) o8DE C!Hp$8 l6n׹.[WE\Lk!Hk?N5 JӻVǂ+%5!Hp$9iWDrG-DrG-DrG-Drѧہ!Hp$8) _uʲUǮ/nz􊲲XeNG'JȞQ6 p {cǸ=1p {c 6Ue ۿlfJ)~qT1p {cǸ|:kbRѦep {cǸ=1p {cǸ=1p {cǸ=1p {ewOTXeu?RMcs>K5QXD~oě22F\˙s#.dě22F\M̱)kR 22F\˙w5ZאB+\22F\˙s#.dě22F\˙s#.dě22F\˙s#.dě2 I7r8ƌe 3\LKAtAtAtAtAtAtAt%Zny0 }AtA;n١6hqClf48١6hqClf48١6hqClf48١6hqCI%nv_Ӻ(Blf48١1GN4+OrRԩ|G*_>}ӖKxW×r\aˌ9q.0×r\aˌ9q.0×r\aˌ9q.0×r"(Mn{Uz8^z8^z8^zJAT0×r\$_$D   !01AQa"@PRbq 2BS#3C`cr ?WeǺ8PQGz(QZE⢂{!19/ (h4R\8[\8[\8[\8[\8[\8[\8[\8[\8[\8[\8[\8[\8[ B(PEAT pX+t"{Qq`S}{Ei +49 \@JTKn Ch Kwtg8Ԋ=.8ZoK[Á1"SxƗ$ Ot 0[}]/__Cihӣ/Iay!nfN=̺E0RFE=u |SB~pуBo|p3DA/OX/[Ç(RQbA~SfE=QJ~c*?_ʺ~c*.8?Ri ׋W^/]x1uU׋W^/D8sfǼmSڦ>*;qO4a ܛBɍ 0p sU( - .wP1٠#ѽAhOuVow[AhOuVow[Ѝ$^jc~z#hG.]ܤ F ?"Tw7zrrrrrr-ǧRRRRRRŷьwfp(8r?iq7ܷzo~;:ozrs՝эǜݗjswd 3l߻G)kBC49#h9Y%!~ƽZ6)O;tZ2#5v舔RڣڣڣڣڣڢM%뾎KP%ڵ* d8.pèї6u2zjjjjDTADTADTADTADTADTADTQBjjjjj}4_Y5555557E55555555555G nSʠ9G!w$ܚܷzY=;9w0sn^&&&&&&&&&&&&&|:CΡӋzfQ;)>OrߩIpPs~t[N 87TSuN 87TSuN 87TSuN 4H(zEN 87TSuN 87TQGnnSuN 87TSuN 87TSuN 87TSuN 87TSuN 87TSuN 87TSuG6u6'owڳuM [?]SoB{O}9 EhR5`k#X)H FR5`k#X):x!X)H FR5`nz=H FR5`k#X)H FR5`k#X)H FR5`k#X)H Z6;s ;: ߤ|FTD`u]*0^>%CT>%CT>%CT>%CT>%CT>%CT>%CT>%CT>%CT>%CT>%CT>%CT>%CT>%CP1Õ>%CT>%CT>%7-m՜m՜m՜=O ~SBP?T)O ~SBP?T)O ~SBP?T)O ~SB}'N:> (g6xg6xg6xg6xg6xg6xgO ~SBPiͧI?docker-1.10.3/docs/userguide/storagedriver/images/aufs_layers.jpg000066400000000000000000002413631267010174400251460ustar00rootroot00000000000000JFIF``pExifMM*1 V`QQQGreenshotC      C  " }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?(((((((((((((((((((((((((((((((((((((((((((((((~ |+Zǎ%4[=s'p;ş icgXSԯ$h[~rA;V7uj<;q$zg46Qn&f"LeXxko?ڒ>/ǷCdϔ6ݷ3vVR.)D_3*rRjߛ;y௃x7Σ557} %Uۍ˕8$T.ww/> x]B!xQ0ށ!BI^[iqƾm= +yq)x0<ɥo eG_G3&|x+>%xn JG=!ݜJ$XF#(pH<'ijWľ5y>$|L":}Y m,rn=l>~+O/.u{ȑ 2٣ Rq9cnf#iO+;|7i-&XŕvzZ34&O1X Pq} 4mY'nXtq?n w>Eկ/<}/{;i]4$?CL׊|?7+xjk(b.BQ4 ;5mM⏆>_xKDc}zHb*wul &GKAx\QqjwͨZo@bU]ܹS$ҼO %b|Cd  /-@6xbY$G媑. JW>|/~{_Ǟ ?]f_|Bj j0 c$xzgqK$?~'\/mRbOyC8~p5U}bܯ'앶g|Vn>(N?|.J- Sׂ~} <VOs|Oдe_ ۍ·}.Jhk wVqv&A'2Ȭ_Dư%d}}= B9|ioh!A}Z dI;sһj):iǣנ&hyoپߍ#$3IȿƩMc%lQqMoq⿈rx~$Nchʰ N]~A@͵w;":T澻lȟS嶛.^|y#!Vs@>H15?~ ?>_xMDg}z)XAjhU]/kϨ[濴ށ?tʜ1'G+?Ÿ a# c9#^X{+}rlt_O7݂II(i`m~ͷ?dN1zןj0I|#w39,~L_>$xUVz 'yW`+$9ȿjhtxMx=(eή/WWݹѠ@ƚ}ޚ0j߁3 dW񕯂|auڌRXo5%EED WNe+Mg~ 3ÚnkӘ1/Ĭ8a߀9 ao xYv[Ws}}=w~^$^~߇WVi.H-Ous:*/0nȭ'T|p>?xWծo FkE!t떸X$̅XQQ]պ}/7E4񿋾߳]}n["MshY_f7ZG$bOjؾG(+|:׎f|ex3mcð$7b]@@D%1C#g?6g&eԴM,-%/SFUqXx+iڳͷ0&~'h,reڤi(#hܰҴ1|݌~ :Vs#cOZ/4x~W_|KɊH"Ŗdfytq  W3*>5|7 ]k_!mOe>Yks5含G_/ 7*&DWϫ|Anm{M^5Di}j;"wa}~|ԴYd#QկrӬJT_z?j=SKj"o|KGmۭ"#QHdK"{z:x-nv}ƺ`_hɎ65.}zvJy~-~7|I?XX>ɠP[1 8'8V߃|H𶛦G/mV7-VWM' 8}z>ҦYbHU m<ٶƾ>]O?Xx3L OMFЍ6n[ҾH>;_.om/jO9K,6mpry wooh?|+Sއk>=hZ?٢@<\TO"\nU?Vm7ݟM|b%7$:Uԯ[M>H2z^2o,;}ۏ N5K QZfai ,H_+i?V_|hW:_ ~?h ]o˴|烵-"OxM"X{=:āq*~nyYgۏ!Po,76Tg8(|~>0uŞ,>>$ξWͽᣇyC+M] A[mϸEW*|C:sk6ܿ'R#M`rTMh[>+x-B/_(ۈ>ɠP[ ;ʼ.w9w MV&I?^#Ԛ4ߍwS_J?Wk.j__?k؏>4.=5wK]/Rj&R&m,`\PYfԯdWgoϽ~$/t&?xíGى]p7>Ԛ7^ox[Fq}jl%z +'/xK5v<;G[ߍ>To-GHV9 ςo?q߆?^.[+w3Y`^0:6`:)m>^^B/n'79_xn+ۻDwg8+/mǁy~'kٚ9_Z_ +@{]6Ě{⧈/aqeck-J  I~,~Z E6{=\MZ?ưbHI?+n'VmYGfxMr?t$o+TF '[fAwC[[Ʃ;! } ^x'5~{| +"iKE$q ,; p Ǿ4x?ԥ;3Z@֡(%X%yS?ʱg9TM;>ϱosj m*ʟjٻ zg[}?©Y$$3]#=O"5;~l?ncGOt .λaw{4kPQ[$ְ;j* /m>WdW'qrjB⥒M{R 1sg^ ڴ>!u{vn3lH >cB0 dk[Sm_>hS:ϊ 'MF[v1fTgbWF63x>x^I|ou=iv686ZrM?WemϮ.A _>"xMa}@ԡ NW~qqZ~MzniJi{y ~&Vݝ 2~ ͡(xƷ],kwW>󜤢 =oԵ [i"LcP.G~ٍ~_7Q͚|K彜5v:1*mZ[M[gy뾈~ mpOqǯ?ğq$z{ 6m*7pTu|B<5$|LIķI 68F[ҽ2T澻l>^[i1|OxGVjݷfvݼczևLM'ė/Ijku=,^EUVepێW2 [K:|QIJ(Y}->ndN%PūO~ ;袾-?`8iv*㨮P|3D'W9?=?O>02 <?=?O>02 <TS  sQz4s_~Z h~f5m4W12vdeR@TW;:şx4]S_6aKJ[|O"ɲ =7aok^S_]SO"64߈Zī|quj)m%om.f7LSvHݵھ_B㖍u'ltd?{22H’3YZ';k?T◅ 퇗AamwoFwe]1NxҰ-,?`jc gq?_hcv>}3ހ>m㧉<{#ݞ3xf>t]ИHAl.Kqes_]>Y|bku:$Ū\O|;4k쀼tcjzXxIR¶Bw&gmٍYyq<3O Zׅ x^#~THs<ז3HJMn*G(ᔌP~W_힋C.&m qGjVJ44x`OS }H<|5{oz&uf`c6’Hby9??i 'y |p5xZ 5JTXJF%r9 ^Cυ< C >"ZXiaNv2nV(<}O忉ѯmag|s;3T?b=G?xWկE>^|6aW$)$is9 /ٛx2xn?SIM;(b(!tvUOC˳0EQEo?hv/<ܴ[l~rs>WL>?i|iEo h6Ҽ;qSLF=֓i\%yh$I`]w2#(KZF7~ ڒe\۬yW5k__M䚥} [˗Ҙ=4bQvtNddV<*I7ݽ^Z/[3&5*#|Oо%xUNMWӯVZŒK?\5 zV>Xx gG:tj24mёzoj.n?5*3۫fy q=3w%xLi|7˧jWj<7B< v9xںWVN_/ϱ=>1xď|׀t[_~oKw77vaK&hÖXat*۰YNߵ~3=?ׅ~&_[F~w*Ctn݇ym[%8|o"OMkqVoa<3),+W ċeM6A{so?ŝ4WZ=w9OOhe楩M{}"L6wF9cYue+%w?io>*x1I3V3oI4 ^ZI2ܳ]ɑPd1lj|s9*yqsdmff7d9A7˷>7~ڠM&z5?no,A5_W|_/ukc<"ܖY$K;3Ff];,,)~zkZ{C7Zjouaij読lCs>+[-?fvn<(RvH9?!&mljys 8erN>Żjװ:_G'' IxWK2{[e-̑ѥ_5PR`}\W"?5/`%$w/~dgP|s{} l/΁}up;4H1i7ujm+=|>yEyo2I 4״φpSΐ<욀 吏ǸpO 7χvDzM_Q|Hd9|U}]r/X.^k?Eq?.#[Acۿ.mgvwcZuA9->ھe#H |ؔG|{*-ڳ;J+Wu}RoYx.TVz藗3o2ƌwA޲^Fi-t_%}Q6 .nzqOؽuZy^izM:Ec>7ؠM6zr7,@c1~㚹~⥾j<?m~eq0+s?`־{ef= uu/kfũ\ŧ|)㍤l>Q8坔k{>shgf$*EpzKᮝƚOjWWaӬWw suan!Wtc.GEA/񋯃߆^؛d5o c =>n{⵾/]|Fӿ]__D| N[ݎ1Kؽ5Z=G?)i_x*+*Akyqtg;)yQwg/<7|re/Y}kAy^tP˰yv 둏j >Geu^\dn9ľ_/'‹>|qe]AX}t;˛\m9LѡSt){g++=|W4qk45 #h6qy% ٷU Aۜ5%,<+zOE|Vw CS6#A-` vfGc\P] Uo{]sDog86 .޹9JXȿ43?h_f|cv2E_7/2'ۮ^k?_ _|`Ǟx _t4]VEy7 U&2~`J/ҿg~>0ֵ ARŸ.O~7w{(hkj#6G+P3~'-BMzDbM5QymzASV>kMJ#FG9ZmZo`yXtg_W|ܼ˾ug3ֿ߉j4yITya5K&m`&%lQ3E5\oix.陾5˛x`m19c7_?O?>Ϳ.y>9OJWm?"Vzy~G+c |7hW55%Ue&+%;w 1f,͚|w!3vu{ ׋t6I:(=ITp0 -ƗYx.Y~N0s晢Bv13Y? ~0_i t/)&wqmMrwfzӸGc]/n~Ǭ^x&{[[ҢSu)8+0^u%{<>_5q .tMV;>-r)T-kKWV+^_|dO ^O}0]jW}Lk62G5+}VxO 剈yVLCH9Һlݞ_/I-o kZo;Ymk'X9Ld'( {/J~ß8Nƞ!,4G9 ef)Dᮟ'7t:zj|HC23<sO|߰ç^\ɦ<6Gdlڧؾ^kZ7 |!վAJ)׭VKTQ$o~Hm8p??S|4gXO|1o6c*s[E?wJV>gb!մ 뻉ZLM{N{U#ITԴφqCnkmJ/w#~8'`֞vz~gğ~:8ZοM<\-o*eXHF,+7և&$ֵ.wB鷦_7_m4ww\ăhǔaq^o^햺=<.^ CGk/*Njo~^'>ϧY*[ ٽSYsj긮|߭jψUq=G))gi+¼f V."Ÿim8??{{~]|E<]Ҳh77晢B;q6k^4V6w(Z5<"V-Ǘ kc}MkkoMQae6}3M%D c 5zc>l k;h5ݦѴRn&T5^_zxcEٹI#O e‘ x˜ GW|aoYxkm~E?. N $կ]|Dፄ& zbp|.D8lm܃#*}i;|7Y&՞_s' 8> [-CJҼNt;5;kU11c, S a1S I)Lzw,R -tĺ(x=I~OK/~ys. ג5rݵN1SAyRX>gAmfpq4H :-&WJ_/ϱaxnKQ>>)7NXO%16dk_21NJ$pO?v[ yf_/ǗtؾU+]/ګgZ %>~X߉\M:>30$M,C$>rF'|BS.>/M#Ki%a;!k@_-Og2/.nDss޲~|bH0~xdٯc &3h/]V{w'-4z~}#4wKK[_%K6t`јnΌX^cG'/jL̒I,qU7X$F U}?_ RP0a(h݃H~0Ow6YﮭWbpWH9?`־`yu?xytGuos~ӭ|^Ls|D}5%} ˘8"e8?wr \/c;ᦛ/4Cŋeber0ʧ E$$i-]ntO'IUNg魿?2h>>kִ?RLK7;ʌ] r(s(((w vO}g &U;eW9j՞I{wkg#VU3y'ҿ$lPw?gº5C@Э|^AD!a$xQ F&mVƀ<3 _ۛBĚZΝZғ^Tk*} JFNӯ|J>.x{1gX,o/'bc-LA)y|Ӵ2-=Z8ot\2c<#?t= ~`od_#T]#VI^K9{el~΅\<8Oh~ ]?~/J%ewgإG墠Pi\k׭u6LHգdKi TA=|'~z/~'xgƾ Ķŵ(fFFQEQEQEQEQEQE߶/π_msKW [ʑHeu 8U$Qc:OؓԿhZ֝qn4EZݣgcF & mAM{Myo{$ro59ץG}Jo.ho}.ǟ[rY][u>QE枀QEQEQEQEQEQE?3~Κ_F.KSU%HE&@y2RX{&Chgy O(X2yEya"q?:*tci^Wwk}ǟCn4Fo(O@(((((} Ku(]u;w$DEV> >7[m+ԫ{/eAӏ+]4ˡ཯JM6\v~EWzEPEPEPEPEPowo^6b]w#ɬV.dZwk_g_lD#x"ް*2 `n=kψd7b6%C˴g5Þ-K:Xn rn HӫU77mˎ*yҦҔJho|OgAc[,-^T8Vni:N 6V٢[ĩ*P09j5#4q̘yaZmY]ޕj tX.Ù^\}Y&򐿗˾p_?¿K}2U_.FH㜨Dc${bxO0ynm/.sְ 5E[9|E~ʬy,q߇Jj+YVzFBK7T:{/Ǎ&\.zg)NbrE*xOXC}f#f\1psڜu1p@K'NhäK'<=}I v=nQKw|[~'|(batnY%.7MsBñ`+ۿ!g۬w˷=q? } X},̌@q: Zp(/N->׽J5%V2\I5ַuU54L\zq׬A}3 W+;yjbL햹fA/9x!%Y$+ 9G+d\nN}̌Ht#un9v3rE*|*j`FA>W:{ 3da2I铞rE*h햹<_&OO܌9<Ùm=bI I ec{Sf'xkF tWr2-QT"g۬w˷=qmYvkU< sG+dx_KUK 20"P}+4]Lkz=qnp)lagv5|B~,h5Lc&܄wlwFl01Ϟ9թK NbwַݩFHjU,l^-QUb|O'>Z̗Z^+]9&CTx۶OY|GwGOVs[y0 qlu5nYy-wLy9>ߌJ:Qcg~$s2q4ѕ}{NI_2W;uq1_?wM5OwqͨMt0%9ZK6@"nk"ZdҲDqѣNIE7wwg<+khW2]dTFfUdWn2{ V' x ja ov_&?J #3W=mkEm#:3Oz J }>m <Ӯc7,|;|pѴe"!2w9~>~xW6/3RI\<)\3^EZaj?ʾyWz, .O-ͭiQm(v3Lh~.|Fׯ!{ibxF|d#t>[*M0*GWῃWEoiQy:En AEhE*|5ZN 0ףNϱFʄ^(gV_r<0KhoFiq l^3" #= '~.${PH:dG9D\E/k( }<~ ƥ?x9!͋~8E ԀjYP͎?ͤKqifTFi%:.0 l\hN֫/?E#g἞3xjms>?Oy.V ^+9?bqIoѢv/*/!Ļ6xBW=pq^EZM^_ryo|c-CZ i0q=#U*2Q~;mx7ޗȖuYK&w(<޾iTg!{]{T_ Foq p mBkA0'> o߰o웦Wk?h} :7Ll~Ω,Mqk%UOfVwթ*S_R> :=߄ǭh>c-[[03Ub0 _c~*^+}GĞ$<k:y{E fcwd` 57Z?$k2)B{30*} x?wh%7|}?_z%č\}}e67B0C}d o{Կ}K?f|3%v;wXbHR<o)2 ]σ:Aq4ԟYmmIl[ty7s,3F n9z߳wg? ot_x$|e ƗB LV7ď>$]~4Y QkشKMRU<%s.@$Z}w}˦#*F<$tV k~l O/ٌo8eMhoc|4iIƈʪ'8_o]{m|?OUOZ|WuamJI @B]cIvox9ߵoi7/ x7}⏋5x7M#m%ͼ]dj[S0/ 1U{Q[E}:-Ix{Rmx63X)oSJEv10K1V$~>+|?IX:dYUd2 c&WBxſ|Ro_ƉgFlϫhiGstW>[c.4#ھE[~|>[FZ_GG첁:jdȒ 0)jh"^ C%𷀵o+˭rA/.c5͵Qˬ ry/b߄t&ǃf%V)S#" XG]<|'㿌g|+;9< ե߈೻Mۃ:Ʃa&㏾?nOzLJ4;忷Z߇=r+ NHq{VGM/o^ ox]bm﯎.<edH=Gbg_?*/#<7fo8~Fs:|Ym؇+w"9gOy?n?>(3Bl^ k'g'lm;r^ eIWG׿w]G@t MўY,mJhҰi .0Tu Vi[k_ C5ӣ#6J>2 Wľ$YI;ͫ\nUt`r LWo O=A[_h |Ae֣iǿto5O~+Z-xzdؐyVqʬ.f+vٹ Qy>I}.YOäE=ݢiQndwλpJlqooï?G Cº`[E5導u$J xVh?] 2xJפmB=BySMR8,l2~>tze|º6ܫwzT:m?Di_O}.#c\aw0#gπ'L>Txرtm[ų.bKGhbXؘgiQό?Rƚ={ã_xVmdяWjڈB`2OVw@_r>Ck tc }ym SkW7̏0vr[2~_z |7kh]X*/!gd҄ۍ.z_H4o/zaz4t{8K3Y1C\$_ݟ-nK r-f>.ּ[~q1{.V6IlDԌQKOv$t2-rkU{ը~'g"oؼ5"s$kذ$v«H{ċU D<.% zw[|+ߍu㯆_$_x*mCv_)a p$ y^JFߖ/o/OK(ced6n ifmIM۾j>[C}3Y3;3G&v6j:7$K_ behg$vנI)5ƴj k}N1*0=k4/G_x}ލjzf2M%0e4ͼ5wڇ٨k ?PѼa|Y:nOh^!|C-S.^&$;>[}gGw?í_nCXg%t @A#wCO_xM5ˋCZP)--m;h#X%6 x 5~>\jSk_}!'xhb_OԅV4w*B QVG~? law "J>$3MVc?ßg:l ccc]u<צfm?-Z?Ⱦ~IZ7 ι+y2gGi B~4/rd'篥Q8##8=-/{>|FJƁ 5t7:ԖrE4pĬ4X,Ď^#[oྛx_#iğGq.' ;s%]KieUEE$>+/Scc_]m;'HԬg.kX\snGp2J"|j^7ռiV? |AhuO^ +FT2i5E%>le%Le©Q9=|G|JxR#ږV+XV)Gγ8=Ƨ֫[8 |Y|pg?xf0GyrZ$y_jH!WS#$Jd5?`?h>#x?¾ ];J%ԯ"!H\!bilϷW9wſτ~ז-4xg$o Dp㌚((({g'xSo}gU?lC=v|y>o=s2/ Ho|;㰓\ӖG;{$RJRXLkp wkT, xJƶh|DFΏ%s9g>x7||o=3E6 TZʾIe$l#T*Z\hn?)땽_nu,x[goְ%&YmO:|`daqCۯ[^/>1}MG,1>2& +{}ymYBٻi`:9C,[aPcsY:_2G&x+A~$|__ A#ePDUrX ?g/w~ W֝m8$vqG 0gYn'W7D4{KOg&XZOxww[^H^r&pQˌRkK'Rߵb7m]]LҗO!HKs=ݘsgK4x{}XDMH,PcF_>*=V⳦n?82?pߖc_pGOc;64'Ś7c:OF<;xѲ8CEcn'7 kO>8|@i;Eu|N,%\)GXnF uvmyƯ}ssjMB{TE`*_,R9|Z -5NF%c}d+yE4{KO޿Mg2s^]O7z9<:n\D\=s*ۼ(fzF.xwY4ߌW牴? *i2 {f<8ܱD4m>x^O~4MX(,6DɈ l6Ty i/~I-[ykM;K/xA*WX^99 տix4[N?/7v~7q-eZϞ6ZkjQOn.-U`Ъ_, K)`P`Oc]:K\=+Ğ?<\ֺL0Gybl4'Xh.0 |UK?^2udI IN8y/|r0y/OgQ^Sy W:v 13 ξ=ze_獧uo ԳɩT,, IT1.rH|U jUO2o1^.?Ey%h|qƯ5w }ZQkb*X d 2 |hta?/Z=*>35y>2mS-X}+lϔVܿ!q',!I /%];K-R  |݅FzG?/m?z?xWL|HK_M5m:O#ifBDHĢč\r>3~D_ /M`-NJ๒WT%p4ix щ,#Aw cXxZ3;Gs*ipAQXvZΧsmY*Y\ $g?dv8Z_رhvU܎Ϳp<ϊfտixi[KO4N`ā#wTߍdk|j%jMҾje1!*[*0@'_~7v~(57XWյkQ{Xغ[-J+%`汿h&ڃ5ߋg+_ihMa=ѓݓ#xm/,ŚXZ\0Ew;՚1+}zXF6?ukcVp.H;m~4I/-C,_0nNzۯ2@D8[IRvN|6_wjoQ.-Xh~z+;Gs*G_?Oط᮵^ U I#i޹7ڤ/ˌm$_?N߃8mco'%kmZVЃk%3%ŵv4Tt +>{{[RM7J3ݣ )1MAU9'j?7.aKk,t@$V?Y_#wh0uפ˯=o T?cx6k/a'^ho.'z$}BEzy$_ߍ\k|j^%MҾa CWvT`ZQ|\~0u&DZ +0FѮI[b~czMdZ h$Gh{ F.("V28*Q^{c?@-$kE6׏ib]4#a[x!NjK1<>@t3''/\|ig4{K__;C >h:?goR>>GV^cҲMGQT@U^k ;"'XXY >(K wn|'iK6ngW۶7SjP*m8*$~/_d~.4+cWT&xgA[rBӭLM{Ou;cs5ȿ|AQ/<1k7EٖQ$R;;G8T0 AY> w{^+]: "H:٭]Eox9 ߍo^7mAjMҾБ*` _/ab%wdu*0uK?͇nVV -kLx 7Uwi% ;,wg& OT֦ԼMv<맺KěU LF0B'/*z>NզE=ݜLY?z-5NKO 2|hPOXPb6cٗWοbƉzO1)reK)+`ڠ6M}/Gbxg3w;oo\?ý^]Чoomo-続wi^Iq+8 Oqέ|OlĺVejZXUfa^ ]FMb7K(ΟKJlUmbNN U|ku.?6i./MҌJ#(0@(Nxa?/m?c?KM\փ-!57gͧ6S,8\H(Y̺v0$*F 9`K_RxT⟋Ohc="7ѷx +3f~}~nqG_~]osGAW^$Fk٬+YRHVX"Д9@zי&_9C熛/~- ۋSPN_cۣg9XqFz_P<6ŧjz]0Onf) B.]meN*/σ[?fy&g[7s0… {^vAw i{ݺ,eK)|`a 7HlJt3)2 7BwIh0uKa?A5qg$گm.?whi6vOv>bcV\=~,vӴKXl@,wyaf`3p*KƗ^Ҭk{ &{Ӵ6ȦԢT98)~ OښTbim?s:mI#oѷ̡ف=AQaum?z>^ .'R0土ڷk7 `4`b]T u_puo?鯤67Wq[5Eef3Ig7Mu[ruA_hha|evO\qZRxU?VuE6lt^:űv?+G_i#?ϡ,RIV䮠.|\yqsN|kkMGOG//&X5Ӵ5P>H0G9~xNPV\=䖰Z31[2y+gQxy^K7 uk6}ᳰ{gN%2g 3mo4^/.IT{) E~^oįD<'{W<#imb[KF$yQw$l#. za|Aa" CKԬ.nmfYd ᗆx*~4o wKV[=/I3aQ 31X@Q@Q@Q@Q@|_A>;|dEω.f|Vlmuk0h(X = afHh;NS?߿pžW?'7Ï'ZYZ>%'LB,I=Mp??3XJ9]YEI53WtJ鵲>~9 {9G;NSOkZ~4c_"&uY}ϻ?߿pžQsSriE?3X&u/ܿ)߇?8O}+?zƟQkQAC5 -?kw_Y}ϻ?߿pžQsSriE?3X&u/ܿ)߇?8O}+?zƟQkQAC5 -?kw_Y}ϻ?߿pžQsSriE?3X&u/ܿ)߇?8O}+?zƟQkQAC5 -?kw_Y}ϻ?߿pžQsSriE?3X&u/ܿ)߇?8O}+?zƟQkQAC5 -?kw_Y}ϻ?߿pžQsSriE?3X&u/ܿ)߇?8O}+?zƟQkQAC5 -?kw_Y}ϻ?߿pžQsSriE?3X&u/ܿ)߇?8O}+?zƟQkQAC5 -?kw_Y}ϻ?߿pžQsSriE?3X&u/ܿ)߇?8O}+?zƟQkQAC5 -?kw_Y}ϻ?߿pžQsSriE?3X&u/ܿ)߇?8O}+?zƟQkQAC5 -?kw_Y}ϻ?߿pžQsSriE?3X&u/ܿ)߇?8O}+?zƟQkQAC5 -?kw_Y}ϻ?߿pžQsSriE?3X&u/ܿ)߇?8O}+?zƟQkQAC5 -?kw_Y}ϻ?߿pžQsSriE?3X&u/ܿ)߇?8O}+?zƟQkQAC5 -?kw_Y}ϻ?߿pžQsSriE?3X&u/ܿ)߇?8O}+?zƟQkQAC5 -?kw_Y}ϻ?߿pžQsSriE?3X&u/ܿ)߇?8O}+?zƟQkQAC5 -?kw_Y}ϻ?߿pžQsSriE?3X&u/ܿ)߇?8O}+?zƟQkQAC5 -?kw_Y}ϻ?߿pžQsSriE?3X&u/ܿ)߇?8O}+?zƟQkQAC5 -?kw_Y}ϻ?߿pžQsSriE?3X&u/ܿ)߇?8O}+?zƟQkWCoG.iP]"mo\Kj9[))Y}-^;B|H$3> ,x=O-x#ӎ۝(2(QO@y$ʊK7?Q[^o/Aֵϱ{Xsж;zOBƥat3NuE12"x*-أE`ǃ~oPGg%ԑ@U dM;p\Ehx:54kK4F omd 2 q%Pn*,+ m`sE8n3xOhq7[xw$HK Q^83 waZ9+Ip!Ln(#DiLe^v>g1EZѴ[}ۈ෶f=UA,}GW]>;[5bf˝c;1+Q['ķ.{P4j[m+oT `TSS<+M6Ts֙42ݰDj ƒT-_stV<Uj-'^RmpeoN(QEQEQEQEU_Sezu5/s,v3G G% .{-?gڮ#+mjo`08E{h+/ڕ^qrlnsd u4x#{[+Bc$pfIS$Ȣ ist (B߂~'[.oϥ4֓JC$s\(nH "? |Io TDOy۞<ɚJf`A8hP ²!,k**H *yUO+7CWuOk-kZmGl$"ӳ *CVѵF{!ck3F\STh 5QEm;]j}6i_$9=XxqLJ(B5^%_zw )|ǯnzt>m_Qa5iUQIةBI٢+c CkmXM VQYI wqR ?(hzo ?O?tc\3R%˫yʨ~^࿈^xAvZ.+Go+]4vyؠ;KH8/ß?Ɵ 4 4GKyZudFY QJW zeI=Ėp]L7|Ge'{d9goS~_w̡f֛ikhY{{y٣w6ؚg5zz4k4LqUbO ~+|5#6Xռ'%eȖ)"00R˒zm3 [\#^!l-x@_vvdG_Cznd<{hi",m™d0o={ARWV BJv墠E_ݦF8]>6zȼ6 ԡJt[88lfT۷O#=/c4 vOLTh:W.)lxǕ q^IICZOR5_Z)[.c?j-pwA]񷉾'i3#CoM>'G6Wl; _sν^:yfwId [R1,taRRzWtk[-OlcBc_xkV휶 ܈%fve 8\k[_jVxwC$GhF$@$=ZRl\ehթ]B(P(((({J|&ǚ}ׇlxLD5{m9n4f*w sTe~ řE-wt2E0#\W> ֭VxaNڤ I%zs+?i?^$kO]CizRh#AJ ncї3տ/!ƛODQjZzﭒ__ ?gUou,@ w GұK:|I}\MxuO(ʅTDw$=sY~xOZR%ǔy#)F zP;OVyRa}蒽n;3| % _|A-徛\_iSDgfFwpǰ"KË=w߳|EIŚc|QI':UL\1m&wg75i56 U',#5T\I =s[#x–z⋻=/MX!cdbvBѹ26]Hl3r7ۿdcZRitOfѷw^*O >+.söڌPژ9V8+K@W?/-O:g_n%xSihձo&XoZa3FH+ڇkMny3MpKyiԜmYڳ h׈u3^m`6 +wGıN3O^AIǥ{5{}ӡ~8>0#+x'kv:W\=+n#1a)m,1Ƿ@h 0-o5V,5#C\-\C  ]6ߚK&ԼOkSjI5IQmȍTZW_~̺[ ! [ yP |bҚC>kFvkSڇKoğtZkaחP'mC#E(B&sǃ͟ ߉ƫjZZ@"YEhlZ =0?~(UsH/|?˫Bm< mnSx8O#ſ |;{xWh5FAw9S-,k7Ͷ6x_HՓD"^OSuƼFo9<חz|D;L3F iP0(6w}dBPqPZmO-^OU}zӳ6pa7$yw=<'i~,՞e2OX!u-Amۘ۴7G OEk(|W_g-i_K$in.qdj_MiY֨r5tߥ${~|@? >+ƞoViL2z!ps ᔌbpOԺF@u-5S}cꭺYwLaܒmS^ִƷAPjR.R9 ##Hѿj6Y-C o! M#!RxیT%v._a ΢['?G% ºU'f/_G[:+/ ?&4/|.oZ/k(igsy=nY2ƙ%@C\sӚέ6%έoZZo*A#*"\.UB1a|1V֦m2s )wL,d\:<~v˯}DA?uhދGeͦ_烼7kK◎~/ .nkH͸hV5n\ W9 'Wl[|?ů5nY1ϚFR7b^G[5R;+)UhJI)9rjt?0$'J߳y[|'o^ݞQ+_}L 98umtwtLvv4mv KLPLIS) $bcvx\|*𕿆t?^jFkP=ԅYAڧoίMpַEoAдcsg^1B$VnSQ^wZ%6Zë/ ?ÿk>4o],sOX."@!0fy `MM4X6fSmG.:iȉcCv"gR9PhʓP_x=]u V[׵T1WUF8tev-4#ԕ%Ӣߕww_xⶏk'2xiP${ڠ҉ܒbOUݕb 8N xPߊu kVkSV{ULҹ62O@~5M| տWo_G/ß[#_ɢ95^poO NCA%{x '!K Cj(>g?{_LWRtok((((((((+هKoI!+هKoI!oN/'bCݑW (ry: ?Ïq5?1S|tyCu9ry:u9ry:u9ry:u9ry:u9ry:u9ry:u9ry:u9ry:u9ry:u9ry:u9ry:u9ry:u9ry:u9ry:u9ry:u9ry:u9ry:u9ry:u9ry:u9ry:u9ry:u9ry:u9ry:u9ry:u9ry:u9ry:u9ry:u9ry:u9ry:u9ry:u9ry:u~~ho~:+(Es`+kͿS87Tzx '!K wc%I^ !Q@3K_ӟ+q)ksQEQEQEQEQEQEQEQE?%?%j 1u}^]Ls$t2I!̤ƫžo ~U_QN~7_kuE}^#s?7ƽyl.}o'⸦*Ӥߕ{gnw)Ч_[žo ~U_?w?q[hӹW/?SS ?Ox7? ;4io.__j~žo ~UG)Ч_[s?7Ə;4e!S OS O¿*(=+"s?7Ə??}] {)WVEBUoW?/?O|/?kOx7? S QӹG?[υw)Ч_[žo ~U_?w?q[hӹG\u?=+"S O¿*+GN~n?w?q[h˗Cn>ڟBUoQ {)WVE~(ioN~n?rSS ?Ox7? ;4io.__j~žo ~UG)Ч_[s?7Ə;4e!S OS O¿*(=+"s?7Ə??}] {)WVEBUoW?/?O|/?kOx7? S QӹG?[υw)Ч_[žo ~U_?w?q[hӹG\u?=+"S O¿*+GN~n?w?q[h˗Cn>ڟBUoQ {)WVE~(ioN~n?rSS ?Ox7? ;4io.__j~žo ~UG)Ч_[s?7Ə;4e!S OS O¿*(=+"s?7Ə??}] {)WVEBUoW?/?O|/?kOx7? S QӹG?[υw)Ч_[žo ~U_?w?q[hӹG\u?=+"S O¿*+GN~n?w?q[h˗Cn>ڟBUoQ {)WVE~(ioN~n?rSS ?Ox7? ;4io.__j~žo ~UG)Ч_[s?7Ə;4e!S OS O¿*(=+"s?7Ə??}] {)WVEBUoW?/?O|/?kOx7? S QӹG?[υw)Ч_[žo ~U_?w?q[hӹG\u?=+"S O¿*+GN~n?w?q[h˗Cn>ڟBUoWAggi0¡#$ WKM$Yb́s0@Lf тKGZTOw=wc%I^^;B|H$8Sj(>g?{_LWRtok((((((((+هKoI!+هKoI!ϟ(Ian-?#\nRXRڽS\¿5?k>iev&(9Ep8fFsW߿S}4&Ǟԡ#I5(oK;$JD!Ut5{sGSSҼh>=5LVo2IU f2>`;c4J]loG]PO^]gþ&$rAE2pW d3NT5fQEQQEQEQEQEQ]8}y17;G eT^縬 *ԼO^$by[fYJb@8`s0=jy{Zmz*V-㙢Z4s@Y%`( @$x革?dCǖ>мakWWS[9aGyc@ =ʎWQWQ3,-˓fm;3鞙,讫G[5 M<PF ѫfJI(5tT(I[j-K&H #=i)dPsڟodtR(^ⴅcA} ڣ{ ,p_ s+ٴ%α{sBFHbPHA=yO20KӔ\̛r%'EI-CC,q;! COk-{X%rTduZfdtWU~.յ xsMu9m徔,vFr3,GjԾIGO״fNW9võD{[(3Y{[,%X5t; 5qyg2=,1ôgb\u$ >;|.'}c}; a BI cnjdϢvGEu|'n!ԯ#j$<+}P.<.O;v/iݟLuϵjU)ph23Ğv'?ʌ$T/rsڹӮ$[{n2[o1{ӽTmooRAi5e"瓎;#]O1x6y&CN`cuy4$cq ┤tn5RKg4 I Ѭ2= )PrQۯzfz^3$ |f}k֐ k9efB2(),7:7_7luԵbGR 4$z $Ʋ8ˑt:+W- xATS[j2CJo0\Ryҵ$|᾿M"msL<;a}O͙v\`잔s!^vn;_q^}Mx_|iZW^Z\T@Gv. +c۞KoxRk}/,Fw<\*DFBJV-]W4_,]7npQEhr~:?| ^T'_N2״U5yyϮhNCA%{x '!K |Z(89b ( ( ( ( ( ( ( ( afHkafHh 7'Xkkۿz~:%F(/g2|)~lChz+ax%+[ųs͸V?2ZꚖt4K[>- Ӡᑗ;]`WCuhJ^rOo4XJQM=vKi{}/& n{],. %P:a$9nWuV;:5%ŭ핺F֦p<&i#bZ<# k:Mëvs)rrIk{6뽶iE;t}R>_z6oP^ň!aXԫ |gC|!xW_ xƗ~%[(G|$_m=( ?*_W=h5^}4ջo}j VԴk?~$Q:-q[6,>x#FQZBvbxS<43x{W.m~I}Aw{WcЄ3|d=/+_潟gEk(kψ>&"35k[mWVf xIg2F3ŃESz?#i{h~$xcPde{H%DˎY`@Tn"%1 zQcUk{>Qk < aUsxvX[Dm|`Πf~5o36_ٻӤq]xZ5f[v޳.\&q SbF>mtR՝s cԓ]-ݷ]lo | h~u뺌t;@"s"{ -kN<9q5ZV]{[Kl ( ( ( ( (>C/ ;y>"ux?s--Q$VpI:WU?i~cKO|]kYn#Mlh H_Ms_k=8)ASp+o?>{IgYxkw+sf*J78p/Z?,O_ xW^+uk๲[T$SNw|¼(r:STVLҌUk%gmv<>~?tX7g]4%1(Dev0W_chGvYq>y{Y%5};#y|%k! I'e'y(V'RM}[vKC?R_&?ݾgg7xWidž"Z.R6xStɯhد/ }>\u%S"}XkhW $-5Կ)BJ,յ=^.Mr0[C>c#wxE&o[yS9Qi{m=}avQY۫uoZr8t,^靽^?[|~+[VP4?.dV٪9 mqp}kxVƩt.DcX<@՘y$I5j}{_cbK>[j:Cocq:ͥ'6.m(b kǬj_I?|3t춖vi=̀7Rǒ\J g~>ռ ew"?X6|Dj-2FWqE\2:*#Z=umF3E7grv{]koV{6>oIMZvu|O-k>'çJ)-mRd.|vJ⨦qs[{fԴimM]fX(p; ܡnÏ:ʍǥJZ#Yg ˗vnO[sv}{W_v^tREup @F$c8w{ O"Ě|]ZgiR~ U֨lf7gvWa@((aQ,5iӱ4gVv릚k5x>uxHK[Kěk?*3Yc/Bk_|{ #<}kW2 ? A<#ozuŷXFp* {5qjc{YvM-}{LYK]{R.m&M+Q5K}%͝+x]vu +_om5|;!vԖKCĊP1p:?*zyTzikz>̗o~ X$|jYlk`ZI!!oAݑy |Xj>%omSڌtfF/8RFb:WEAl}«rhjeW}ϗfdNNI{mO6Ngk/-O?gA=|?o:>$-0=ߘF H w n~;xo|^Դ}Mesisk>Pyz098J)@?ߧjOEߴmߙhC5Ң Ԝ-Ho>^{W]qA+xz'aFB'?5AmxIkW_l(.#Xc54UPUQj!AFUV/-ow+R+Eԭ4AR,UEr0r_SEz0'~&k߁Q2}G>r_SEz0'~&k߁Q2}G>r_SEz0'~&k߁Q2}G>r_SEz0'~&k߁Q2}G>¾f?b/#[_ڝ2ZW"-:m^hf1~E繵lYFK)B*S$_XsvQݳ7`Zwh)4 {~dG~Qǘd1Wp}̨էj3p[6~rxYT$'%}3q)ks|?-N x/ZaյWx_*C|sҀ;+?GKOv~ %h4voZGj+ds'#Rq]~'|&56R̲CuM0 4hF|/&_YZ-k$?3n2{?)gź日xCԵ-:Pi5ΊĬrrF'74ZuMWý7Lpݡu(PK9AZ,|3}jͽF #d̬H4\_?h~%k |sk ڦcjpVt̪w@<((f,vo$f,vo$4+? ~OċjēIn,瘢sς92WU k .DX4e]ts`&JgR~¾  5UЭ@6+I&48$9HF#b_|I1~ƭϷ6^Т̚k0젒VuG fuoz=÷ƛk+:[[gh'sbD@\|>~Ӟ)]u wKk }NPoE6ieG'&?77;ٳY_\7Os='M?V;wV3Z8#[ynH'#~mqNOգ0HS!vGp1/g/C:Npt-bNŔDѧ+"Ɇ7vOU@~Ѿ+|,?f񾃢:xu|ăDn, x;<фfi#ϟ/75 h:Ɠhgr?i7׫iW HI89\~ W_|#-{GM36XT!s fv &?4>2~ĚSxFOZ]ᯇዏ|>fLYlgi`TGV,ŐcgؗNS? }9-kkE+/>Ԓx/~ZZ<x/R'[+MK rqE3!k((g|vٿbg|vٿS 'ׅ|_DT?utp3Q]g߂~ uᱎG%/P<Ҳfns ?@&bR [$f%!Sq+ITH槅;=ٽ"u;Y{^t.i#Ax-FFE|B$q]?e |_j:ֵ궺5:ljw7*"4Q’X`mh56Z_{_>{Y|cW~ ,|M&Y?і}N_K#YVvvy_W%Pxo4K{i"EpV[%5nj/ 5SkvNomc1̾be2+>#8SKBV[_o<>߳`o|+gy6 3XÌ˭ˢڱ'IU`*ǟ ~|XpYOVV[ɤUuc*|JhUPSrI_Nxu, 8\n5 9mӐ}?8: }'nQ^owυz֥Ǎm/R X[т~$qƁ_?h /^ 7vj7+HVe"UV@Y֊ij/;ٯn|Evt !CмeỻPZwY7˵8C/pHA1X~m{6G9}=9ُOMuifS]@3e/yg ӞU MX:v}ǟ^PzAo |Er"mV-]c戌Fr;s^O>-[_5 'KvRp c'#R22z:Qwzw9+YdV>9xG񗉴 >6f?*uC'qq^]+9ҽvs8z+x2^MOӥ ^]5ϖN#h4xٯt{BQpP\]˹Œ]Qfj^XZM< g_~4OmVS& % ^jܢb'>\sN2X #9z>0Ѯl&Y_d!AO@j^̉a_OgE}!{7þn=*zW(R,V my¿x|]iC%>wcyݴv|,,!Aj#Z s#j К֭w+tTsyO h^O-gMAeG{eM"A!rr}N+Kۦ7Ƌ[ã?YjPD3,R#- 6y=o`WtO#(ؗOB~6Nڶx)u{p,/ـB>E?t}3Gjz|30wð!ɺcqJ.,y2F} }7_3K_ӟ+8B(g$gZ̒ φ|7*lKn\Ò:?1Ae[_ ğ /^jڰk=*Agß^nQ.tZϴQ"k+ÀB-]`/s{ ͬ7f-[iaKD5v*h_ne~$q xx%.$Oc }ާKO,4i˭C7&x_#be(<Ѱ.׿kgox?Zt|j/oI> $Ovc9@f7<k#*h6[0{P!Yc q*)EH_NOxQy=1ԋdx2B'|\6{oIOQnI&潼uRvyW Via^@Q@Q@Q@Q@|0`c$5|0`c$4CO7\ 'Jھc_;WEf3־ ן]^\,{=V%!XЇ&A/=k nI[VKJ(rK,j͈rq(eOSq7>m}~ϚO6klփsqj0>fcGxuj>$ԬAe⑤M&脂Uܑ.faʃ):7wm8b[n=?ߌEÍ&Ont&Sky&?1B'hQMCGV'ixO6Ϋkh %Lyd22@Z(TRٿ,t(צhK}ITx6_h:uk|~ evjK4Wo2IJIV Њw_MLƬӌgnψ?-]C^O5k.<#D!PdcWA>g3h} xt-3A^Ia,ed߆v;x^ig?ȟɩ]&޷ѭ<A~$q^wjwڭ!GX6;zϴQN5deNm(((((ۼ;Ih ?g _IOKϯkmYkvb)$y ~?&x]hzoŝ#MtZq$$4;$'_2QX;KKhnɯ]Sw}r_-[WP/Gil&y^R /#r?7]]iSyڗyfcX #h%N+˨1Z"X\MmI-m;ڋ>u/j[uK&)Y V]" t9r 94߃zm{N>nӠu/HprfpÜ+){kR:'tkuE~g|YեI[Y̺~,ֵBTB~Ilף|a_ f-<(i`dh.^yfRTrq_1G_paQsh%kۗg}O|k-z*K+뛨܃m匒z|Ċi+ZS9ϝon{~8´~oK/wf|ݷ<ix/<]irQ)KpFxc4_dj}m=cZ(:%4f>o xu8™1{f qC;v ?K9Zm7Dӣ>MH9i$JVՒi[E~~̾zm]C1\,coK$ωg=64:徛xem;NFl ʿg @jnhS߽롯ק6i+o~{>"h>-c`&iT`Hb<J[WBFg/ąf🄭ϊ$7c3$r"Lz_5R&#YfSUN˙kSIխ,.'[帔t^7zMw?~񕞃%iOPq ̍B@edU-j1_ ;}ۼ1D{7n+_2SxJ-[ri]mo߯7 ?ENjq⋛},[=ޖ֐%=BXkM4? }74oPXpH$O~w-?:</i-O4)No4B#xd qoľ烾&|@}:umk'ʵs\ ɪ]Cf0cME8>fq;zS(M.VnǹxIs_5cA>kwoXyblmV)CG/ U5oKaqq#xC۲HU>VTO''Ҽ=rYmkweʺ=3xkfoE?1~ ֱi~F[z`uR 4#*ּ~ryӍ$"jc%8%o;[~Z6.+FnNќuPѯ4n,&<1GܜVui5E%RTf:_CWm~,t҆?5 3Chwc%I^^;B|H$?J>(q)ks|?-N((((((((g|vٿbg|vٿS 'ׅ|__WI?i._ז@֧L@So1z8y9[7P}}B4KTGG湗T &֫g9[7P}oCGϿ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'E}V?G9[7P}Ͽ?|'__K?3Qw:_4ect`|$s` \9*J\ww=x\S^\n]= wc%I^^;B|H$j(>g?{_LWRtok((((((((+هKoI!+هKoI!9९3g^]kvNO}q(s^os"c_;Wa4%N2qե2U)¥KESqD3G>;?f/Y3G7N`4n?k(/?L~~#o}M?v'0f?1SqD3G>;?f?/d>9gsh?0Y3G7N`4n?k(/?L~~#o}M?v'0f?1SqD3G>;?f?/d>9gsh?0Y3G7N`4n?k(/?L~~#o}M?v'0f?1SqD3G>;?f?/d>9gsh?0Y3G7N`4n?k(/?L~~#o}M?v'0f?1SqD3G>;?f?/d>9gsh?0Y3G7N`4n?k(/?L~~#o}M?v'0f?1SqD3G>;?f?/d>9gsh?0Y3G7N`4n?k(/?L~~#o}M?v'0f?1SqD3G>;?f?/d>9gsh?0Y3G7N`4n?k(/?L~~#o}M?v'0f?1SqD3G>;?f?/d>9gsh?0Y3G7N`4n?k(/?L~~#o}M?v'0f?1SqD3G>;?f?/d>9gǟi_oYk_?)W =Zj/%_PØaiSg38o8NyrZvG㿴'?/J*NCA%xKQE89bg?{_LPEPEPEPEPEPEPEPEP_3?X>;I }1_3?X>;I |)P?IҾvyxX:W_~G㙿_?(1ֽ#_Ikkc$Q]B&OX'׃m GxIKm_ZAa9@8\|,F{JRFtR=k]OȢ>#;`}6-$npx!.,GTX ;Bؕq>韲O __i7&ҵ#ZJסm> 8$j4Z. .g4Hgr_^|&UCMIZi|&ѧ 2xUXncN@?_E/Pխ~um5=^jA*41+( w:Ğ'].#o3Nuk[-+χPi?h9 k8[k9HA8+kS~;|WG xWU }JPecX+IyYxXzdG-(ޞ[]msp("a6⯎m&MI&ҚGeբm d9&_5^iMuo3(L?g mXk?i0 ;2%x9Ve :W֑8Q)Mj(3 ( ( ( (qFksVWƞ$xJ5[? nlZ@A@;Z(Bn%f]>-4=R}W:gmu,*Ħ|lFTIq+l}?~W=tR,_} p}7z6LzNs.6F{R27rx|EXkeMvkLND"Q#Œc,<*}KTrI׷MzϔnP'ֽߊ_=Ν} zl# 7gU]6lx|3kĚEOK-%."[STBB*hfI8fIIp2IS{>1^:g$MԚΧr\6~X.3ط>2j2@xM`{Mԣ,=y+;\TڟOkYlWW_dRǚ&MWR_4yhDde yȲ@{1;_hPڷoOJ՚--_MC:$lg7Sٳ2+=@lw PIl|o*U_wOOCx7S& jo, o/<J5_puZ}y ՅŌVMtd $eVe2 9_UH״4Cs 7i v${%\}]Otm8vcϨi$+;nP(`B6BܠdPue^su^WgS^lͼ2,ealAA{C?|z:x>izRKi%6q,)LȪ[cau(uM+.+:ij N-gg!ϴ<J84Ԫeh:پdڽcsE}+f{?<7mqu=K{ZlӉaYFN Th}gZ>?xRV%FJ\4rJ mÁ?A6@Ymi(k{~~(ަ5 M 2Z/c;x7OR6>l}+N[Au QZEP 8 g WŚ|BSl z^R>8TF=<$|CFj|OpuLw pZE+ @q]G ix⤚ѱŷ8,nl4y.BnF/,0S/a'{kͦO_P>X5/#3KF_Addkx7ſw˩x֚t?ޚc"keRؓT4Do_<@WWbZ6Ќl-n.3A+ xe{v_-)Y}:;w{?/+¿Tf:_CWm~,t҆75 3Chwc%I^^;B|H$?J>(q)ks|?-N((((((((g|vٿbg|vٿS 'ׅ|_DT?utp3"ï ռ;c x.,.neh ,b9b!vR:q[>+~;V6%o{si[F6;~Rpǿ5U84quT/Ou=/F/G"[Gp0\c#yw>b1X7> jWI;h̒Hdɸ4'?1(T`HcIZO_vhV JUiu9P+ !?w_ N5z7nowqZG"F$kg6)c*\ݽv#|a^ρtojukM0dcV퇨x~0Ş|E[i,q=h$C qܧZ(QN X˯]]/ :_#Ft<VD:hbL;$TV?\k9<#"_ E}q\ i7x x{_8EY?zmo#&k5/ X|\ϥ[ɩqq,11fd!l\N+be&+t[mJώ)tiφ{K {nY%f;t_qZ$ SHxԾ;>%xV|x-. #up%0# 0O39khO Wt%I74Vf+f,G8?5⿉xgLJA=L*F@<\JR"޺wG|H5NUbRFVn3Y|wmvk6[X\j5&wL *I8y{8CeλAMKx=sLf; r^H!OAX5ֵ3j u gRYsy3K!`0aYTQʯr%Zn<^>]|',+9RO)OF8t;[U߈>gec~o8W@x)M&|J*}z}jۋZvV=톿<1k8i:)F=By%DI X'? 黮'h>Gj8j=2{%y#[@wc K4R0J>YNNeOKumu^ sA+ikok<9n!Y("ps^/Ok^ u?ǐ_XOu4k ٠ThL2 6DZS8wmHIE輗$t/mkjKGYdLd[9o/zc`Wͷj<)x@#ՠ٬3xfVL+!9tQk,mVԯM[lc5F2U=VegamhimY,ThdC&\7#W^//+1f(gJۼ4͉x?xM;B>tE0 <2*g4K*|ryEy{8r/%]hzgilER\|7́y5 I=t?5[4Mڧo]͵ayr3Zz)qN Xվ(xwP$fڍiyp[&[,uf?0X5 2EyF+4c('Ϣ[άܗ䂿Tf:_CWm~,t҆?5 3Chwc%I^^;B|H$?J>(q)ks|?-N((((((((g|vٿbg|vٿS 'ׅ|__c|]&o z$2\gTPB:kov9(SQi輿,w b'Z2~+B= .#֟Ի?O;WG;0÷ Џo ;G.0S/E~÷ Џo ;G;0kR;WG;0÷ Џo ;G.0S/E~÷ Џo ;G;0kR;WG;0÷ Џo ;G.0S/E~÷ Џo ;G;0kR;WG;0÷ Џo ;G.0S/E~÷ Џo ;G;0kR;WG;0÷ Џo ;G.0S/E~÷ Џo ;G;0kR;WG;0÷ Џo ;G.0S/E~÷ Џo ;G;0kR;WG;0÷ Џo ;G.0S/E~÷ Џo ;G;0kR;WG;0÷ Џo ;G.0S/E~÷ Џo ;G;0kR;WG;0÷ Џo ;G.0S/E~÷ Џo ;G;0kR;WG;0÷ Џo ;G.0S/E~÷ Џo ;G;0kR;WG;0÷ Џo ;G.0S/E~÷ Џo ;G;0kR;WG;0÷ Џo ;G.0S/E~÷ Џo ;G;0kR;WG;0÷ Џo ;G.0S/E~÷ Џo ;G;0kR;WG;0÷ Џo ;G.0S/E~÷ Џo ;G;0kR;WG;0÷ Џo ;G.0S/E~÷ Џo ;G;0kR;WG;0÷ Џo ;G.0S/__K?3[W{]/ tѝGg ]1ĒI&7 "絑p#u5fu.x '!K wc%I^QQE|?-NQEQEQEQEQEQEQEQEW;7C_LW;7C@EPEPEPEPEPEPEPEPEPEPEPEPEPEPEP^#W>G1&i@Й"QrRw3㑃J(ڏ8W*M:d"̙|~ǩ@੿ ?gO1$ƥ[>JW cd+|NAVHV#cWxw@4WhrxOR兯Ft T*̃(2w?coxK1M vZW˰C?tsO/~/|#u='C'{M/tf 5-&H6$ܟE oM߇fu;}ۯ5u&ʳ[o2 :3 ċ?Y4<9q]g\Ɨh mGpGLN?5>8>*t:{WWПrd (j(>g?{_LWwZoͳ޾((((((((Iџ7dQi*lu+Hc1#LbGݜ+j(h@|e.h@|e.>iD_9D_9j(h@|e.h@|e.>iD_9D_9j(h@|e.h@|e.>iD_9D_9j(h@|e.h@|e.>iD_9D_9j(h@|e.h@|e.>iD_9D_9j(h@|e.h@|e.>iD_9D_9j(h@|e.h@|e.>iD_9D_9j(h@|e.h@|e.>iD_9D_9j(h@|e.h@|e.>ix6]S0xByr%\foVq5O4 >2͇G4 >2͇_KQ@4@|L/t@|L/uO4 >2͇G4 >2͇_KQ@4@|L/t@|L/uO4 >2͇G4 >2͇_KQ@4@|L/t@|L/uO4 >2͇G4 >2͇_KQ@4@|L/t@|L/uO4 >2͇G4 >2͇_KQ@4@|L/t@|L/uO4 >2͇G4 >2͇_KQ@4@|L/t@|L/uO4 >2͇G4 >2͇_KQ@4@|L/t@|L/uO4 >2͇G4 >2͇_KQ@4@|L/t@|L/uO4 >2͇Xn~"|n>_|+#GVm&cڼ`;Wo~'xoמ/=CrFuԵ:sN E{ͮ|9OC$dERZ>_Jj-eimw7O =Lo-{1N ,O.}6MNQC'g.:;?]<#wt~xY!xY!Zo=U π,{π Z/ yJlQC&ِҳ/dk\~k[&$FdKVV4T2q~bJS1V ~H~|=d@H|`< 8H{ϟW 6vFٚ[$=;:hT6EfSy0 S [~1lkt|ߺ3]o=m)Ս[P>v޳;+SZ&j|=g3XG>ma2t4M@ |=`'砀'89]oWeSX9Ն6Ɨegz5*%roWdp3z  Ls3o<kEmg6} gC>poySJkl^ zpo@H <xQoPտppo< Lմ鞹MuǞMbb=`6:oOG>o k65\<XrqET_4|p2hϦd_S^@+ڪ4|su>I>Mw7T7>}3&]\%7.blu-o}3&әK-O٢ A&GR,yi}SYjxK-O>e`s^\u8_P@}}CI4ic1TkT˯_P[15J7aj6Swzn|>b{`w8_P@}}oEIT8_P@}}C.aZגń73ljثkiﯨp-ا1jl(aZ`{[_>}'}}CYp\IPf9bﯨp ;;4 8#9@P37 !"26`׺J,bts4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s9=EDvr=ƞ<\\3~TNt7 ~߫x7 ~߫x7 ~߫x7 ~߫x7 ~߫x7 ~߫x7 ~߫x7 ~߫x7 ~߫x7 ~߫x7 ~߫x7 ~߫x7 ~ّxOCW'烉p'~@h@\3~9CIߵ0&9nx8 ߎp~/[pT3+T6\\RѪX?R0rK E2+m뫬`[UO*Yoăj]-{z8vi^8h%V9Z[ Eϗ)Ɇ ߵ0&9nx8 ߎp~/TwPqZegtNi'jGžD2I⹉]P3cfVpNZ,m-LTM)i)wvΔMNiM9N_U|5T]%>E4 ~?oU[yH T%"5|Z٩1jȧ5GwD ?[${7?娚 ]Ξ|᫮X7sV"'HxմM([ou ϻ*L|qD]KF4Iԋq{pdVֿT!"ۍ+}rT-SRWFM9N~{P=P߼&, Hg(Ϊ*ڈM<H o8?~;G-=N4sĀpfsn">JH{~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j"nͧI|dSsĀpfK/KG=QTz94|揜>sGh94|TzG=QTzG=QTzG=QTzG=QꅯY@\3~9N*SyZj+Ƭj+Ƭj+Ƭj+Ƭj+Ƭj+Ƭj+Ƭj+Ƭj+Ƭj+Ƭj+Ƭj+Ƭj+JU_iE3knx8 ߎi^} L9nx8 ߎi^D1pxq?0@\3~9x\m@Z*΄ȼ̛~?n  v`eq=R5'SZ#ᷔ%#5lF5 ^U9Marm|)>D7V^ZT M2Zd4EYmF [;n:ߌTħI[L=+vDUD{5r춽=-+sX7qJD\6]9LT\ڥxu4ʹMx)CID*i%hlXRQojHINJUM"S[,D -=+o$۪XӸ*^)(.%BrӦ,B&C 25!ۃ!LLn*s%+d'k'I3nY=ǘyh]Zjx3\.&iiN ^Gj}%j|MJn n$?Iw5(IWEYXg>kla| +H? ^CkDU1/hmSjF."UOo+ʴTchS&V[|9=krNKNrz󣇀 ĞV1.JIYYk2H!˘Ij:bTl3OiBC߅xF9nx8 ߋ[oH GOm (_xmuo"P0Չ2¤Fls&6Z+yq6N[0,R*j.8~ ۟H_sy_֦zS=B֢"TC\T<uSJDkI-rs?fx ;-V208oѴi+KY=2#4TbWMPȿ[/ݪ;MަѥU]iw+J]JY׀c烉p ڢDBX$>f3Laf1:ij FK)Y] *5Lbj=+N(oC_4c)rCFXoPIUko;4RIϧ(?O͊" uyLFu'&јM.(QƒGQC2}%Tsi)E<$Pp׀c烉p u-=~+~g^[${7k!{b*0¬ *0¬ *0¬ *0¬ *0¬ *0¬ *0¬ *0¬ *0¬ *0¬ *0¬ *0¬ *0¬ D4ܷ<H oÚ4h [#-ze2ށo@F[#-ze2ށo@F[#-ze2ށo@F[#-ze2ށo@F[#-ze2ށo@F[#-ze2ށo@F[#-z8N3烉p u-=~+~g^[${76Vb#KA=[;rːW5;ՙ)V6#Q/FomƯ3F2f* KVV?+x51IT(eѨ}:5^d!^|:9S{b-dДbJȽdoue$ݨ z &@c +Q,Ȋ*KIꔑM g^[${76VQ4EѩyTʕKB\YZzma>můT7LKPv^~dQ'9%͝ѩhP*mNR1h̨DhrIQ@hܡn%NH::~olTY-s ΔگVv|$c-CJB^B"KXPe2Tg*˺\}+Y+q7l2 Stz8220QFrD $S}2k9SK(uDR´>傆JT} a|>Ayxr:"ĉjut7j,~un)>.L^(*VkWjμ#<H olxF9nx8 ߋ[[:rq .q-}HZ؋6" Cb.؋6" Cb.؋6" Cb.؋6" Cb.؋6" Cb.؋6" Cb.؋6" Cb.؋6" Cb.؋6" Cb.؋6" Cb.؋6" Cb.؋6" Cb.؋6" Cb.]kueE^[${7Me0J&JL)Ҥ;O.?9QK>kb9nx8 $5(ʹTP9tG=h۩?6-#ӕ5Os lcj895 v$M7+lw9tGٟi2t2̒U=&ڥkjW/v^dSzR…KMv<za5Y@\38q-—LV[S3)͵W c*VK6g-_s-#s3m^ODu-r4H[ #gkmD\-ġ+EwɊɪY{1˷'e3d/& 9Ao/.Zuujak_sĀ0ÓdynGdynGdynGdynGdynGdynGdynGdynGdynGdynGdynGdynGdynG*9 zT)㥲$5SdC? C: C: C: C: C: C: C: C: C: C: C: C: C: C: C: C: C: C: C: C: C: C: C: C: C: C: C!-2UV'<߸W/+)9 Rm-mdĔmo;joYl-eƽtu#Y])/C2J1 MBŜP+vL<z[%mi%uP%9./Uȵ7M}X m^>K%R !"1ATU2Qaq#@BPR $3brC`s%4c ?bhPj frD^ӽ ڊ^WBz;нiޅE{N/j+w{Q^ӽ ڊ^WBz;нiޅE{N/j+w{Q^ӽ ڊ^WBz;нiޅE{N/j+w{Q^ӽ ڊ^WBz;нiޅE{N/j+w{Q^ӽ ڊ^WBz;нiޅE{N/j+w{Q^ӽ ڊ^WBz;нiޅE{N/j+w{Q^ӽ ڊ^WBz;нDΦLm! F9oX틄lB8i& ̠$ N7syQVʽ'[*l~UIVʽ'[*l~UIVʽ'[*l~UIVʽ'[*l~UIVʽ'[*l~UIVʽ'[*l~UIVʽ'[*l~UIVʽ'[*l~UՊUVʿV*W[*Xe_lb~UՊUVʿV*W[*Xe_lb~UՊUVʿV*W[)yOՁST@1>!f#QJ1>!f#QJ1>!f#QJda3z"b\7C!s'ɒX127 7Uq5 3KN6Gdw%aŏhd`rs.>AFɪXɜQe3W 5E25H1pn%irVxT<Lj=#K=:bVd2Uܘ ~cIM߇-b⽯Ăg.滋&5,J>HM%G$Dx?fؠ&&v81JΒd_N`+rdp 蔶_[eӥO>ž}4CLU5Dg%S=MQ*jTESTJz*U3TLU5Dg%S=MQ*jTESTJz*U3TLU5Dg%S=MQ*jTESTJz*U3TLU5Dg%S=MQ*jTESTJz*U3TLU5Dg%S=MQ*jTESTJz*U3TLU5Dg%S=MQ*jTESTJz*U3TLU5Dg%S=MQ*v*,bKSڎ [4Ic)댧2z)댧2z)댧2z)댧2z)댧2z)댧2z)댧2z)댧2FR2e#)HFR2SO\e=qSO\e=qR82-{$UELb[J6pC|>x|<>{Ϟ=y|>x|>|>|>|>|>|>|>|>|>|>9QiF2ʍ-8pOh٧H }UpOh٧H }7؆mҸ8i8C_|_m*^I9m9GM?5QͻujU&UEW-ǵ13#k5A'pP01;.h*`bIىSM @SO(ɜ*v)bSb=1بRAcA6(ųOdj9 5꩔.bX<0YY&d'DHrj%]d2$3 4̣")% nLIZMjlr0EG P_g-ǹ<v2!L9m~!GG-{$^!GG-{$^!GG-{$^!GG-{$M ][CXnHI9qHI9qHI9qHI9qHI9qHI9qHI9qHI9qHI9qHI9qHI9qHI9qHI9qHI9qHI9qHI9BIE!$梐sQHI9$RNj) '5BIE!$梐sC) '4:␒sC) '4:␒sC) '4:␒sC) '4:␒sC) '4:""ݛ5Mb8'4ql$RIMl̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̂"ܹeI c{4pOh٧HR8 G=GW8i8/*MTl\GJJ 8@m%*I-beߘ ]HHfM 0F12A(6"py-?Yur>[CiKs3f&3M\X9Ķ#9{:rޛb\2%@LqtRقe2m% r)D [8Ct"dUZ+n['O>{bS0+V8MWU u!s@@L>Ts's(w'l*g)>1`0(M/@HgN|@D7SG{7'k1DɀL'-yvՄ\Yӆ_~\a fx9*wg-2:(㊡ƕnKc-E%1b}Ҁ ;a"f('>"IsG⩰\1 C$Q%:L)q33RT.lg5of*@+M(bۑQL6" zj@PSMWnu&o-#4ql$p?_7OT<Օ-}% ~EBw9=:z7& q c~\%Oē͒A< ❝1MpMP2a1r*-/++YjCcL3Ģfd&PR2d"쀄#$WL<78blhEMK6@}L.k"Sd#06x};H| NvQcmpM\"^I3U Ȧ]YMTvڭRZNڈWX/™EYD M1Ű`:peѫp! ,LųOd!~.[@CL=l #l5LC9&"+8>2ŊLKw svIH0~+ src="+aUULpcF9s]q^.y9KB"7na),erT"GoBR3PHky&{^.saxSx\s~!ӖP,cP5c&?Õ ^caUR2Kr 6=pbܿpyʃr0sCrb\]#;` )ųOd!,HjM S\6;2r9ACt1.Id(u& B9䉌O-R" H!LaB1 +LIIHa h&aa/e/%4l;6I!8W)y!YtKPItC16 Jvai5$̍-݌Ф W'Ί4ca nùKLdr@ XLL9?ύ6 4 O GnPV8rܕuL4~޶JA`0y; 3lgE8yھ(df.dtƘ a9 ѿ D',PPD7 H$UʪNk @D0uNZ]e3I'I{mS6qTl\Yc&r⥾$pA-{$ )Q7U ) )N>{>HdLR!ɐa0IDw'z0 - w< '); ԭt3R 0 7+vD[PLT(ؠ>\DsCyBqV*.p2PـýébM.Qn` l'o$&y,*pVª>,؆ck݃ Ju%ôĪ$br X'hs)uPt)5)OQv:J9,?8n=ד*S0ˁFr_'lhK]2(_wAg`P` `0sā T̫ @ bm6%D %kU2ȷ2#c I[m/FLfIM :jc#RR!O `Ϳw P2b5,b73@}"N6 n>{Au)v#mb"Q6$%-6llG?x -YKyL_ v߇-L8E r%/ "PýhrcO4t ,Rp<04s*j@ۨQhk_%5Bù>ٙKqWfpp,pfIY;2W0$Ę89a]4`ksO8i8/ڳ5L*Nڦ TOzś=IB1p![g S,SNM({/,9: vrQD-uRk! +w){+n1 9fݥtdɦAɘL@!JLDsi@ FGr8nT8Q\i;UϷGmbh&r !Z8`RV.S!@ RxJ)?T.\u9Fc|**}kl85˕:͔I7drGw4 jY3r )(F(&r*d*h'a Sa).JTe%RV(!U.- t8U0yd$7 gL%IU Bg*~J CzNOvv)q׌F3N`4߻2/vxP+s7 &moNdRwLb@^jųOd1q+緎;H)o()0oo@bſN 18iLǤԘf̶Ř*l\Y<~ϻ*)`VMZ8UJ\C "ck\BZ_Pk\|pD),!٢X O?r.G%ÞLXfw'UYJic8l%\Bc&7`-Q'RJ 9ơQv1`4''&K|| )QSYM (#eml7G$Wuyɦd'**1Jbۛ'bG~+ B^s&5؊bl9n'Ps+.eLfU#d69ȬfSxBӾɟnM\,G!A91!6F۱2':>`E^=MUtۜ9R!4dCLDc1yY C r#?~KVo*mRLPLA 9b_KT5QIyh*.ST((A) fq4ɦ`P?ܦL DsZ*t¸xs;HdeH\K1HmX? V8 Cg-ldp)1QCqԊTEu".t7HED] R*"n:QCqԊTEu".t7HED] R*"n:QCqԊTEu".t7HED] R*"n:QCqԊTEu".t7HED] R*"n:QCqԊTEu".t7HED] R*"n:QCqԊTEu".t7HED] R*"n:QCqԊTEu".t7HJe M9 !p4PBiBO|&$!4! M>HBiBO|&$!4! M>HBiBO|&$!4! M>HBiBO|&$!4! M>HBiBO|&$!4! M>HBiBO|&$!4! M>HBiBO|&$!4!`l" `Ԇu,7xNS8!DltƹkpbZb eL8<;lX0=\[e`*$L u %-MBf3צ2/1 1W"S`lp8V{Q?D\'#^V2 Q6SA#a޼V3NN@AÒKQ@0k~")IpFJ&BP %)IRJzoEG@J/K}jSlo"m$^`4mɨ c]A@0 |7y<6TU(}vB,ʎԸHep2X$43=SiDV:&8($0!@l]ъ˻Kq$, W!#|qrGmm9‹nQPItNC.0:]c$QE4HRD5_*31P!2@" #$`XLpҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪ/zg~y=hrĆCB38NpdW}uN{c#b#e[#d7]+XpP8#A",#`7<8vL^ucZָHpD v9: l#3F;grơa?<_:=l8nle&tmCC"`ȴvKRNIo`05@c=?<ϴА?;[[[[[[[[[[[[[[[[[[[[[[[[[F3(X `,X `,X `,X `,X `,X `,X `,/瞏>)wsG7z?h[{х; $)swtnyl 8J%tP-s2 vڐ^?pIlGffv[F%#qNɆ~PS~K]˛] xMXF7 Wm=xr;X|n=1۠psX Lk2(č9wͱikX]0f q+ Cc0-k^ƹ6S~ o~M>)wr[fEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfDN3Mq{[OSw=7͏{?VJen]pf0s.^Ay^Ay^Ay^Ay^Ay^Ay^Ay^Ay^Ay^Ay^Ay^Ay^AyR?X+dTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdOaڞ:ok~שߵ}f\+eZj_'k ӵ.S0s'c{iXۣ}L\0 0 0 0 0 0 0 0 0 0 0 0 0 ,4!P123@ #`8S6VҪU[JiUm*UVҪU[JiUm*UVҪU[JiUm*UVҪU[JiUm*UVҪU[JiUm*UVҪU[JiUm*UVҪU[JxnjJ!Mu5 vhuEeKOC)m a\z\W']G<~-(?otsWnFkFkFkFkFkFkFkFkFkFkFkFkFk aoDT2K!,K!,K!,K!,K!,K!,K!,K!,K!,K!,K!,K!,K!,?T ˍD|ߊ_+OGύ*}`C6sz 0r<1ԁ&N)J8b#-/A#uB:yQ OWh2H/A!+ cX]#ل&+lÀfVg#cfNTD|ߊ\Hm%aeKwf R1Ve/ra8(2 HǞ dO$7LEM4XBZ*DKHs3䢖2Ylax䇆Pj&rX 8r,ʬ֥)?>7~)|?>7!='Z[Ykk-meZ[Ykk-meZ[Ykk-meZ[Ykk-meZ[Ykk-meZ[Ykk-meB, ܮT)ao-[ ao-[ ao-[ ao-[ ao-[ ao-[ ao-LWa d^ϊx| c|Sœi> ~Nމ ngN^DGs#vwvgq 8{Gd(Dgqzs#v~i[ѹ>|Sœx_>)/ŏ>"vۋn.]qvۋn.]qvۋn.]qvۋn.]qvۋn.]qvۋn.]qvۋX>! @y }Yo-eՖV[}Yo-eՖV[}Yo-eՖV[}Yo-eՖV[}Yo-eՖV[}Yo-eՖV[}Yo-emH<'>|Sœx_>)#OP(KW,+XsZKWij-]vZKWij-]vZKWij-]vZKWij-]vZKWij-]vZKWij-]vZKWij.W_14!1QAa"Pq 2@#3BR` ?"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&/ߜc|am : UxqhCRT6@1CVUVpJv ՐY!Ck#~2|{1Egv% HTVtc-s.<̇"ZZ,m"vwTW.Uv@ *]'72ZߏߜcBY !d,BY !d,BY !d,BY !d,BY !d,BY !d,BY !~yc1c1c1c1c1c1ckp^{{BB@Q[b1x_v 'KkJ&oAQF*BS R)oDbt!N*ۖd͇OR G, j[Q5Z.!QL\k +ԁBh&*PZ/-8*p^{{^cX+*Lmԙu.d|;o,T"quޤTpҪs."0VVBe!](}*QIv4"g-9b7\_>g~(~WNT:u|p^{{ ۏo5{q/n*hhhhhhhhhhhhhhhhhhhhhhhhhk e[e[e[e[e[e[e[e[e[e[e[e[e[e[e[e[e[e[e[e[e[e[e[e[e[e[8yp\8Ti2nZGEB:;la=U!6$mX:/O#E̢t:k,WpU徫Qҕzp.Q'Eݑt8m/[~/+z"}^?x]zQ*DXa6quDZ$+4ԎzQKX 6D^ZI\m [/#ݖZT^ u^(Wj޿$a]8qc`c?p\8qt3MɚnLrf4ܙ7&i3MɚnLrf4ܙ7&i3MɚnLrf4ܙ7&i3MɚnLrf4ܙ7&i3MɚnLrf4ܙ7&i3MɚnLrf4ܙ7&i3MɚnLrfr+\W"ȮEr+\W"ȮEr+\W"ȮEr+\W"ȮEr+\W"ȮEr+\W"ȮEr+\W"5Îk>ϪCՔo,+}+n\:b4~U[r(/BEC^+qÎaN(-U/ rVZ%j}G 歟ⷥzt)Ґ)^IUo܆~Wla?KwS?CFU.?#8҃a`շح).vU-*\S$n4Uwv(C"J}>}hFI5)?kxnÎk<6KdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdXdocker-1.10.3/docs/userguide/storagedriver/images/base_device.jpg000066400000000000000000001331341267010174400250560ustar00rootroot00000000000000JFIF//C    C   G :-I (  myOZgK%~+mzp <)˻藍ܳmyO6C_JM|>vCl]?Z:՝[)|݋Se&e`kn/KmyOl^^^.W~1ˇ?9q?9q>.[һ^xSQdA2/>3C7䞌מH6+Vl؜}{מHT/$}{b236Iu‡PԪwfb} <)|>vCdbc|byOR{bq'>t3;ޛ>i*P6w_3\Sezk v?!6prxƟ9vIh>{x6Zkxn|:{rS!R55Vѳ6jyB²W%o- //9P)ƃ1bzChBvkUA+[.-h@C ́ ʀOO(Y%c:'>~C=V/7L>r1{ dc{he@շ>ߠ/޺o3yǟ.9pϋKgT ~rI.Bg~;#G>מkΏa=ЦuʉL0|H㮶;Vlجy}מP\lwЬٱ9o+mHT?$}{a36jTmм{x%{s)QʀO:bèY5+s 33?_^ {fU ̖Vl S !|1#v[T`>J|>r1 dc|vT ߟ@-o7i˯_.1@g}2SY;㞿sYdjWZ6כ^z?3}7YdEI'_Ҁ{7he@̽C3?)]sݛ׀͓E* h  9J|>vGdkU6|>vClcqwvTPpLp@ -:o_n@[𹦠ٙ06=oĻSXcvéBd'}Sk<޸L}H;~{)̰|{6R ]Mv.U~Ã}g+?Jbʗ^a2~4vJbH^]\;яlPzu/uvm%._oOw=={ruwtqgOw_.]=]<;z{|Ϧ_-<G͟dÓ)  =z0Yi0J͒67;VzkNFBie{ZՋ/.bp:M<8BTP?bp :@Q4o=I'k ?bp q2pr8]ty4ן bQ0L&`=DZk"&˞%z֖Z"`<4=@ysLūߌJ-YDrj'o m4vM&QhO:HXwUHu[4L+3z{ū(&Bk(N8ˠK4i =~@阘LJ& ֋BkhBkhMeLA+e̪z+zGP9=~jxtCӣyd+&2y0qVsZje^q(a2I~1h* xwlyO.@yvE涄f95&&fxCӢ {=3jL_^8凫Ay.}inh831rN4!%y QhMmjЧ;S[DZ5E(+0ӘN`w059|`4 z639 W辰6/~yܭ:_\}|:_84E)~]9u_qo@Ϧ[;zMұzZsv|}g=oz`>{6|m-|p;ǘL&& k4:Z4E9^G8k9@q հy/Q|nL_9`8<=:ٷwώtoVE>zuұzYM*wVmWJwDsPulg*/K)Vޓl7/VHRӞz>[yZҭkfoqvM4Ҿ}_ׇ/mԥ4^>h}{9?9 a10x:)7wSJM+^1 EixVf[R`89@t@q@ְntt bi])6z:M)6Hikr>pW/RL"Doq~~zl/}`r~moWZy?;WN EjM3?^7πlF1!5STUr 23@AQq"#6PR$04BV7at%`bs'utbqs5kw_a]} wu5kw_a]} wu5kw_a]} wu5k!0sS~D~m1-cW_T:KX-uyB *&#krwcg`cxS&ArI^`*F[.*/EO/ROD~tEmHX^ h&,J"K l=$EKk)Id(>R(&16f.cc[XLl5pñU0J`< ji\HGy1L 8CO?~JVDڰj<.*3WPޭki"U%}* ! B# Dlڀ$´d9q̇J?m+Z+\s^1"D~QRcpDgKd ƐNFCԮr:cqH^[~O\i+ױwjh=wS A2IŽN戇dHZifV]$=^c ,4 {J.ϖ 3c$eLrA Wt^ȟӶWt^ȟӶW^P660x$ U=Fs8ˌN%\/FB~.xXw%ּ~Ew`d-3] +g\hW/uY܇ZRs.Qr9]J'w RQq.LT^O Q:癿'08zƳ$ zS+IWg̢ r!+? ʌ[h1/߭67 V [¬6Ub;FVfaL[9YJT3,)G%UI1`~X^>0WAE)ݚQ6.o{#"95#D%{[JfZ㭥QEey`ʬZ7Ok ߜUfPl+s>Q'fܒ@6ABVJ9mf ++QOɐ-~_ ʫ^5A'}?Uוw^‘0՛S v>䵮\34[vCбW6D [@keµ1dzkzӝ"2m:=ܾw<].GU8qr E!SԧuTy{K= 9.`t>HZzGwTr3$d4ؔH,IUg2ж>`uD]M hc49y*ԜEsS7>Uܡv5A'}?Uה(}*T_JnZnjiBؤjnGM 4P`qc?|1ϏD0Doc}BCXMIWw=*@Jn @1tNDq<˷co80x ;D0FYp4H|[pFgGv= >,c8<5)`CE#;Ѻ-|\1]검#x 87bn_Ff?/,aF 7oA'}?U%MTETOd{ '0^Y[1k~QmN~0V MdWa5V+4g &r0|JRnT^ȟӶWlMk$(RaY|ʹk #aͪnT md$>jc*ݥa5]=.3{dvìe$F8afbn=h8۵:&ew+{+$qʻ9^>n~7XlDYKBUaCPDjiޣ-;}:/ROD~JĩS?!vr*oQz"N']yS=exZݬ.'phK0 5”uŰ1+YV2_\/23)/=km|e5+c˫~M=D iuإlK0s56|9ネ{CF~2!=(J!SS>e$ȣ0J gSnHײ$覉Ȣ~rU<Ԓ Cz=qilcdu ĬA`W AK;}j>y>$|dS&(h.Ic#+D8#TkkgᙃVX\9@FzV 0#~^gFcKh7`H\y@4OY$A;9Tf D,CP/sȔon1^|Q}l6Ӝ%E,*ZD)y? qDqwb"9&)a$4 6L2FV '"XH*s=Y O.+l)}H8eD1㱟*r$Z*[4q:rHPr(>D]#^֢ݻ]ɚLv7f5ݍ왮odwc{&k3]ɚLv7f5ݍ왮odwc{&k3]ɚLv7f5ݍ왮odwc{&k3]ɚL9Ճ"">OD|[6,XagR4q U!Xl""7T^Ȟ}?r90J)svcw eC3g5o.H#QЫ5hp@APBz %w;B0]-i1  -IQ$AY22i9D #}v| /r O,2iJ`ax>_m?͘Nf'2R#T&;%'QK:HaGYǬO:"dfg_^,3chlY 'Jq5Rl|D'Uk<,m"ҜDv0$οՈ;Tg.bͤb)!N"Ȋ*ox5QA'h>Z dE |JKal{uرLZӤT^HTlj0ij(m9{Mpm/#1bֻk]xT_o0\˙22p(IV>3CYElfK P6WDhVڢRe[/kxO! [yk#0Ts!X+77Ӣ(>DՃE(FEo֨xV*nhƌTAsrO %aWDK8hLJ-v! 0"ԉ@35޺ӹ叐峁 #ZME]c8 _-#S +z\-ƇN㛚2g<[|/6Wc\!Ej'_ lI}:/RO>}X?x>_mg6r"#"˗W0W-Rs"ewM:C.'-raP*2IkQ30|Nn~'eMV,!Q7}X"p9Gx23Ud4AB5u5++t9ƻ-V s ,yVlۂoQ¿0~JܙU7T,rbs9"]Z-H\J`ڿS:NM ~Jhn/א]`Ȍ`FFޖ,+m:sP2rXƎPSDZHXUۉo#UKnVb["; -TAti *=ؐv5E>N\XnQWXpNt:ڳt}6ܲQ 36!IKc)D A H`5+@ϳwWL&Sɗi5bLJ%c&UnBiO ۹ (>DՃfg|E0DDc-boyjHud>ޠN{g4օOMRVV>DܮgY][Om.G NV1 Ml &$ǃ"BYjsNn;6y@{F6 W]?UOc֓s)DŲYS"N!medd/9F"&gHHuC2ԥcƸ(\6PhTNg_ }?Uה\f!S1U͋,ɢ>Bo35HUplĽ`CP:Tڽil+vzA aqKc8o Xzy:֞Tt%z #i\:1}a0cfo7qIylv `1ad2kfJXH{Ja9)r[ m93! W, W1#U_GJrQEP|H<љvMƑHVsr,I+ X Ha\bs|cҾ CQ+ Yȓpc[iǒ^{09}-EDy5 ;ɓI0I2%Kf,F]0= ɾQ+Pj(W> MrUĤI5>vR,RTFÅb!,}P{%O#79dve[B US5ªfX>_mJ㰄$UF&]ܛnknOiwǍMݹѾj/RO>}X?^S{p1tG#S4UE]/t^+MӧSvT5W'4UE];оBxw;ztTEM1SЊtzQ4U_Boӽ i+I㧢*i9X @y7po|p']Yŏ$SX˽΂r5YW]jQtj*wup]멕$0(𠭦l,8qܬ~i#nݾtCik\szjή@Ѧ#zj?&ԺJq^yϩCޚ1؈h"3P"/"8 ŽJ:')v 1wC4h4zχG(H, F6~I]_coON\&]D*0FԸ0!^Xqx+v ŽdzbS$5h@!˿q+ErwVXXogeutGIMWL Od^S6? 29c5Y"#<5,`"#|~:/>Q*5w*vMw=FFX{hBsgk$ 4V_6c)"9Zƙ2i]J碵/8jo6Q:%m#N;A /lB-;d3K"0Оa6W=в:jک@Ķ&!SsvXwTn4 ͯ؎N!}qu:gmi)*^ǁ)3rP"K= H2|qq\PJ8uˆ-]m;q}oEP|,;ݾk"@32L}eh-#8شyU;I{+[M=+:[FYVLtd9etFgZJ̚P %T˟(|a4GXՄmF)2K'cY̗_Fl30_Jȑg6˭#f~OD%D]ʩ]2{llVSW1.e1Ǣ3+F;$r`%YoIkRbX;MG R㓚`8=L{ ›:EX cjl4~Ar\r‚$)Q1jJݙձR,x4Mhm5LulekL턏HlBEP|Ϣ]b{O3@jűU3cE ]! c1IG#80̇e6桹8EKa-łn!1dK"4`CcɹF\Rrj(TŸN:ͫ:*>6-%m@A/ '?9e;:0R},[|/6WYm* I EFkxu;&bV#X޼\;CXEL)w2cUѲFZUS/ĩWd?f#WwKJ]1/ĉ(qԖ]Y4b+1Zȩ5ԬrŖKN޼\;RJ18U<%Fk]}9xڛ)n twnEFkx5;&bV46Ɩ7ɽ/RO>}X?Y]J-4+WpJS}KJ_-9!sRx?(E#֤Ez՞CL"sŪ̊od,UgkvVߺKJY(UlJP$ sFl?&4NޕՖAN:p܊YFkx57&bޭRuݿteБ\DkQUB"Ezo+d諴Ud(>DՃ&P[2y2 Q7#ጎ z.L0$s6DUYZTkv={~Z #7 {c1WoV7zC"T `wH 8**i|u/>ʑJX G*4v`{ =PBDHҤmuyF]l8Ɠ$8omٽ\9"0wvͻr$ b](Ów%"NR6(#N#m"4BħS}9't<(b|:h ͜ ',̓oEp)}"֝nc Z[/jiϔxS'}`9Zl<aWQz"y{j9Q }M7~g`][ɹEb+Ά@]{%=3VtQG@\Xm>I+ec'Z TbHaKmPG8GY A?iSZۍc动qD.ȭq#[5XސqӝØ/QŁO3& Y l,eY_ m@mڀ FG^{)3AXr*3͋/>eL, vi hpg ]ώsV~Gδ(r@g&#o_]$PpDʄCPӤ]Y a6x Uڸ ɩtďinA6e{Ȝ{ߒ#"ZJ[xuTY]ƏFHu;&2Y(e[1aNi)n`@{WԨljƗɅy*̜3vObm2qn;EP|-n\)7'~e,c=4xdnEcDWƆ+"!$k.ȁb ǣ`ctLf5dCϧ02kޘ́h%\P.J+O_Szb[m 4麟@4wQX>_mN{p`H$glI7 >PF6j^FVf-"o]Xxf 2Ų710ky&af25WҶxT֍9E_;$XeHr RR& wԃY:J!M]e4Ȑ5SҵTGi~>7*cAG縋ϒx,2$s0} wԉY _7$*kam+hy(>DՃ[R^rM¦Ȕ* w>PRDc9¥& wՁY:J!,.ND+_Kt}nk7*cCGoϒxjndUhbx9T;wԇY _YsڭI7 />XF&k^FV,*ZE)y? q@6edn 㑟l*Zi>hj<_JSZ%:9ϔ1ZBcaˊD^-b~eGkPٮtOf=Dkٮ[%@z"=DkٮtOf=DkٮtOf=DkٮtOflX|i^E=DkٮtOf=DkٮU< č+wW~tOf=DkٮtOfi8D=DkٮtOf=DkٮtOf=DkٮtOflJGDWv]']vuۢ{5n]']vuvrr5.rz⼑.nռx;:Hev0 fyLtH 9l3`B^+wxbu却,דTaP(^sY*|d2 !Li[X&5-l`6U"rPƺW9[f9)'ĉIQƢV@t,Q1TF$<f\ggolo A5ݚˎrXD22"4 |Ee^RI/\vnUDbS"$a@ǨQ+j$5Zmt*l0+<8Pzl1hmafTXkk8pwp9%*QeƑ)#O5"%Ed4*UMXR2bL}sk *gc,k[UAg}YoGi'2^i SL hշuΫtZ/Nmt'JML_(ƬzZ솪\MlfXr`ȊiSJc Zɲ Qaw]6 /DzZ}A*$ʑerDG|XCAaY"L Qyq˨sDǣϕw\6'19v'Bocql髲xxvy#ȱe!=qkoBZ2 ؠ-tAMv; $(}ⰭfMQTec I\_ی/]Ey w$~'2Qd)xF,*+#(HbY ]LZ [)w2L <9U) %3,ɱ4eUÐ ([6Xw1e^5ܨ w֋m8jx&_c2F;KDW':j܂do.2cؖ_}SQ4ezª`BV4CF;Ž,_u܌ca85f(y4v4hX=fH.授6cw|b#붳nG3U`x.mkRL bl;s'XM\b_! AMlnQʭK<$$ Qlv*߽#.g.Hԕ-_E_cceȊb E^)[1Gz:1m. 6{#'>\e1!,FZH&VQb KdFq-f`j۴Gz8\ڌ<;4,*5Z@XoB@VEI!7+sj"3^|ޟ8g\vb R#gGdjAll6K%4fGbqW%*{<ѽ?}eFk 1č¿ķ˙jfƇBXzSAz<搵*?h>2 XXT3qJv~ҋ/\^MӤD3fMDGdgHk)~m|yut5)w-${l"Mڣ\2gl{Jk(ۯ]/oW(rAi̕ NCģTّXYIOy$9{|y` Cֽ)w1iiN)HYi_9(q3v@!Sq3u&iwZ\֝@<493RôsofAñP 0g&L%Pöw|n*c* dMr S 2J6UdgK;0U1l~ 1ےPTtAk] f7%a_lk(u:cw~TxIqicш3n|K󆘙 Z[-9ǥ<;Fsw/ŭ P4Qo\Fgw#gvXJ":m\IM{asDtMGK۞p*#dKFNw2l0m`%)i!RPIe1;*nt 24ǻkNtUiO*9C999T.}0Jk`!I'N ((MO c b))ԁ*ܣ6'L`J4[sA ))L܉#>s`K#y09wNhhQM0R`؜EVM .%et2ƊJm[S6qgQalA6uEͧmns SM 3&:@ޜLyW7jV\30 svS:&~g.u\3@^fz l/7kK7]7"Fcm,t[ i >&M$xqtX[0XN*UtQtr#r`IJ ?h25DA~Zmt5qܰӨ Z-hܧs6Zk תp>BA#,e}Y!xi;PlR&-hBR>7"=r 9#t77Ds*܂![Q Qe8im]Nl;>pZ:LY- @ŵo8h&mVh)3Z݌:n2tp `cM4vE5Ӣ"9S]T 8}-#ƿy; i/Em3| 4t P3~pg/ߛePj=M,DXwOX౯i42A՛H @2CA6Qb/x@QuI1kFLaeK*P{u1"Gp*3b:C߽*!'?R}y*AFb<,@ytT}/2r%4s!sG1(r0U lV±BV+e[ P[3:+aYZd38L Q (Gm O k b))ԁ*ܛH -Nh!24`Bd'L`bg"5#{~́.hpQvGĨz'?-#n{42@N|97Vbg!V[$XAmc4f&\;Fc)o2(m a k,iŀ_'1vҌ sܰH6c.Ņ.A~eSHqi%ۣUs_7 dJ(Gm3;,V eB(2克X\湢 6,i4RkGX:GbqV9#h 4XLC 6c˱a/hh5. 'pӾN۾xUnq$~g>gqAj88Θt1X5Fl} #Cmѷ?VI!4N99'Cs8Hp9Poڳñ-ǫ$) 8NM6igI@gT4@Rsr N }N79GyeTio1u7rI{s> -쏂4S{@[M -).'J;3}K}.*a84Z=. iL]Uo6C7x'xm.['y&tTio1 vokֹkea2 J9#ŀkG, 鋺TXߧ &:EUk]Uv7"3Rñn/b efΜ8uީaDr< I0n 7Xw&T-3z$h,C:iR)[qw'#EVƂat gPçʋ ْF+lݳu29Al<,NP;r8k3_v24I41s2g#؟^]1CXDd*U+;49+l]wCT0#%V 2OŗÁd/m9\ƾ,cc2Iѿ\PlD}# 0X z$Eꫜ=rwrK:v*WT'rmJd7Z&oV@0aU ShUi;bx$GvNh.EO@M%ĵ_nrj5&[$=uV*5&ɠ\*4dP٘S1SRXSpvEܠ_&0#h9nXuCD6{s~ N$) :ޛl  |Uv<qTJA%aldRH);8"\NV#n#ذTpL4b^cU/X[)wIT1ms*)N32遪J*0TI'=B~@ :\US{D_)RSө"E&J>U|m68NTt9/Rv)17)tJc)q`°ضT qQ@1L 1K_Ա8S.VN5"S1L4˦|K|S*NzGMtf`USk TqL{i="Y/RcYRD\L,}79lx2%ald0rЯRv)1r"\N Xe:n0ObbPq yF[6Z^c\_Ա8S.VNF3MPRKe1SM$T1Ff*>\ TqL{)q,TU1"E9*Xns`Wc2%aLZ/GN:郧3bs)lGل+Dx-N`)a+d !L0J V" a9+d<'0aZ#lsM`4A[ aQ AAN`%5Jh)+x-N`)a+d &0J N`%5Jn߳h`>ޏsDlVɣ4@d ܺ4I?x#MnDo Xؓ`eAs !5p?J֧;žBe ۈ\ yĠۍ0On9/$6vFwBe"E7~Pfs#4L5Ԙ2j5 9ٳ>ޅBک8+仰?GpDr24W] Ⱥ3tknG\ $19g_-;*<дt*)}ķdcĦcS, s+eUǚBvSSQsvbsU9Ts۳1cQS-n}|O, 繣nOUyLq;;7{y6=eyR}XsF,nQ4HvyFݙʣ&'Xyeݲ_ǓL%3ꯖ_h΃Q#Yӻo?%#%3^oWNʫ4=46:.T5\ܲڪrwfcRǚX-ϰyygݟ`Lg=CvB|U^S4xvn8Zfz WNv?N)A#޻D \MsǴZԎ>صjH{FR,)>2tTio+Eb!AODFкg2VIKjezڐ2 u[w8BxL{So #./y>?XpDu"i>i_}t %2aVWR rlo$͝:Yb\ػ1nVvhRV+MMLؐPi-!ĭEӌdiM JpQɭԉ}cw_ XMֈRGAf@qz;Ns9㚣`]VIOGեq2b+gjڦA3s_}uU簣ھ&QCѤ~=;Q2¯tVg>m?s]/TiVb)Ʒ~OY },MC,B2gXw1`u+oI@Z"635-BlQn]%UQFN4y/&3ǐo8!jcMiW T^7-ŷn6[0 ܚ"* kQ4?[*?c&鿧,Ǧ`+MbI$-kqEf9Wxfry6Ms/Q4KQZYq e}^AuYMЮ,QHk"ѣ)>6&L2!Cp:W4Z_(l:A$ΆY~wRS`.dZCfTBQ72Bb{Biy*NߊTܘQ:E\T!T\sO(O ֺLlZMoMprPJ($.` uS3bbTk=?0gf^bu_:LQsI)=mpQaU:ٵ#boWb`0*Esu4,&2c-%2USm&OO'ꁩ1y!^=}i)3R%: [-t%}/rBLqHui4wC~Oz5eZ2 AqFg8tQg뀙Fͤ) TT r쒅27.N-S8%*FXK2|^}lxŵ ΨRM0S%GAe폚l^6ƊirdWYOd:N#cDQJ)GUAL CX> ^[8Ƹ~F .N`~/|]8 )zlW3ҍ eZqLQw?Q259 !Q&x~ uS)%[@g:/2D(;5he5sf#JY9ua>J'5@ MbZP!L؟kpEYh^) _BR$(V] &)c!,;U|nc k }cegW\sIu4!Q/^Bz[p@ ip1uUuʤ* )3M.BRڨiw90kcs+4SR,3]Cxi"UŖÃV\R@Pt&gX7BQMn jCP)kI 6a)=|]8!\mXӞ-&f 4z%)fe84|m *村6I-CF TC =fEUtQBJ7'Hc_ڏ*Ldf6fP -EY浌3ail3`lkuX pC6!(5ӘwG AJ4dTTQ6=;RuK4oSAp@։KwrP* /c7-t5K;—h_X|N6GZz?0'[MoCE=Fzy>=qz/EY?g,EY?g,EY?g,EMnP^ dO ta !z搳D>=|naK n\aԀ]ìA`3DA^ :g൪̏$j[s ݰoSR$YA#dn΁A>7>g}cTGhqX/B d7aQzKaX̐дs &)W\A)F)[KܴjH@R+fҫcj,N{^kw~O_: )>4QՑNRԀi(jAH h a־TOHFҚ.R,UȶH։z? BMKdV͛m*lz| n\t6]k:p~̰,PɦpqAmC,+F /rE< [ML@LD spvVEsS?Hbx 'Jv :ԽZ˄=}@4Wӈ :*t7G t?qZTR4/\F-k jh=qwgl^B+uf`؆rP4%-nBFmjT-]ΰ6`[ hTy8?U@Wt3Dku1T?:pDF0kXjFZeVDE3`?IhX!Ƙ+kSNSk}xs%*uBt&=Q*1=]fh&tVcs):t&a`ZN;xX"zm Ny8%P܃%g й0v]QfQmcբ y}3)wHfJ~&ZJ M KE*.Y1`nз^4@ lKJJJt acWlZZ٨[hV,K%<ο]T_X L>"@ nOM`%GVZ ,A!E"VhiqBRrc&rq1Ȭav`pXR@Qw0˩u fg*ցVe.UK#[u pmJy>=|nSN /Bq9SOAY(oab nRA5A`t-.{)&r-P ؗG J:ѩ"&swhmFHXp : "lbWL[PDHkjfR5hd?a>ZfοR ^K%l  K40A4TJ4i0fY^nK9[9or<7F3pi}BSv%.Dj+BeR"gMı{PR pfRbҎ"(&\ +gC/>A*C>U- _j{aL1%_xʊhi2Ȩ)M{ب4]u˘$i=ZS/j:Z^~q%m{T`ݸ:7F SP?QJ bKȧnδ)}R2rt`G[KDeWK!hFc~9TGU,U6ª(QB^`RC-&Y.ΐni!IMx #UFaĴWWFH,ʔ!6i]J 0rssc(gᯧnj'6Ҩ^)SJxPLԁnj]fЖIkJD)Tϧ5YCwf7G`ġ7 m)ʱ)R\R5]/\bSP[-@;VK~g_x8S9:A ϙjƍDn mfWLFooiNRaP$ Inb75zorl⚋a,z zS|zxЭ/n >pn-% \uɦamszI峚 \(Bpzw(B BL+"GAu=}3<_]$,^(LS uvC`ؼm0RHZW/ŋ:NM5cAE(Fz" O_⁓H1Ch.)^uuCl(y4=_O 2ѧ|z-] @{PQt(*E=q:lBSE7*Ԧ2V{Rc_˛t `54EàpF {.U%u Q cU3<ALP[yy6Ff`JoI]Ԣnei/2GOљ}s mqf Hc@=A7yrkJd)JQi'CTUHOa/sax>!mlQg"dfx)DvOO'ǯіBE;s ~C PA5gpb0bwLvB-:3} e3<2}FaPM.ĆP^߽SD[(K0t\`i;&S#*)&A!ow9LSiag fi`}ȂaAgZ61ґ:3Tt!}BWpgMe݀ꕕ}5`I:3Vƒt#LDFʁ Z -5a75 qwup}/Ф"sN7hlUnWN nI?.$ZRx&~iP.°U,hG- ʩ}b)`,Eկ Ő Hqmk|M<;6W( 4q Uݚ"+.2T!qv`U. ѥ@H|zR`}TUp2uRE0qm@fLlRkTx,Ƭ-LA 9H)Qڤ-pN,QE L[Z>&jS-@0ʬ@xQ*R#ua2h-Vt?bGM[G7MS K%}$24r=mNK0ϽJN4\ =[#~'u0l:= 4êm4ƫ/.W977!T' k6$6AV֊\ R ֋tK(˭OבK(f קă&lѶ` bQ Ze;lH3)3פJP-@Q'WXTpbµfFc[K R\I Z2`HP b f"7+/@ ((:pKvXwk$eSC9M,!|U0TY%DW9gQ`S8?D>Q⩹ +ӽW50A+\aivEEJQuYn2`)blԝ}<7F7.qqU"k|ܵ&iQq9XU)P>j#S2\R)e5pI`ifJRԯUd/)mXn7` v5.^nF!^*3KO`aBsKZtG"[O3kO<#xG\@]]g|<#xG3>g|<#kNF>g|<#xG8P\\0Xϴ3>g|<#xG3>g|<#xG̥`B~Ru^g|<#xGOGB.C\V!¸.Hk3eE&ӵN ×U06wl%u:mrTѤ`۵Y] `S %:A n1F3jt8Mlh:ЈA/2dpr*@PT naYDPkA{[ܾi,Yl\@ꏧ1\Lhn )[dF CW lx ,"ܪC @UoM&+턶csvF c,*0J)hJUaij Q2PoXicB[z g@@TQ4mL$XA'ӏ8^JzK'rT7P+2=x{%|d h*Jh{;g><тi B|կ) Db`z 1 "xAUb4\צh .7 ~J5 uQ+XZ] l&jf]&u}V:Jz\7*U_M0Rs1Fxj =LASjxc?.EkNʠqP͔Դ_Fe6{LhZE'qiU."ޔp[p'j嬩79JHG'a neSxl b9rEZV~J#IQA>[ٴe9 \ɻ^uC ILI4ȃ 0 jBAP#jJM"8#|m7VgiAY{1ф-m,oaLJo<w9dUi1ӑfc-ݥ] #cT 5~D-ȥ0Be7*[ IH; zgʴ-kqjZJ|Ս BB!aCG{cARC (%/Gɣuvmgl! *RDGHOCn;2d N`#`Zz $Ш6 )2x-ȥgOYqUCUK:0 0-ph(++ \0(2<0q3b?I]B <-RXo@0ZU< !&Z{L@{!=i [;aR&kBN B.P&4~[V ,-R;b\h¡Ї~=IfyT^`hjTm*]-R=Ƭ\i' yhY>wɪЫnbL&,#pS/?3<}&&~@Ct"|3~&;z E',Qm?9_x1:T"^9h38s8Cj7)a'=mM~a '@*Bf+5#T.z=89K"6[okZ0S]r5x504_YtOX:[ v%>ٖv+B`q@X[N\2,`ۋj04Ml+WnbQM( p(ںt&h݀NLQ'!V 1h]i,ihuV\O`FR9ҎYZt}m m)BړSVܩ$>Lب9T4󣬳.m:l@ikUyy1{@)[d:ڣBh5jl>^L{7 yN2s/UX”ݠwLgUj MSkl10CjuVZ Q3VU'QVݺUf.ڎhTcU7$DY[`$ ,tq,e` YH/"Kڡ\qT^ʠZ]h9,<*j;FC]e[H夡ABpNg7x4LYNG8V.̿j%e+2&ݛ.֤ ]ڲ!`N1*H*Y-OKjoZu5"C.H0oghMV`s R}YauQ\tPY h9&ACE]T2/]IxgivKoI)i* PIg;~EE6ji'%e3.FZq*R90ed1nĴ!VnXd)j;Zm޵nXp țH M$\p˶Ld]'|O0a'|O0a'|O0a'|O0a'|O0a'|O0a'|O0a'|O0a'|@ =ĭiࢃYzã5hbY4s/(j 9Jl͢]3F+/% P5C9>'-0*fv6 R)tkḘuF"Tz[ڧ?JB}cEc`˨$\l?ơ[]޷{ŵVW/[D1MO&N?ܹr˗.\r˗.\r˗.\r˗.\r˗.\r˗.\r˗.\r˗.\rː5@/!1A "03@P2#4`Qjєq3 z+0QXyǬCY0Y=fwX4V`ǬCYs"Ea>R}> ev6cjgmV% IjfnvGbZƱG !QPC,~ {r)_ϴݠAsZ"mSfsVܷ+xL*MuPC+ t<8y$M0򾫫 lc0b|ǁ.;]fTnCAcx|ð,Ic@xe}@yeu ~c_U.9#S?l{(!ǰxǰE{Qa`;9k\{ j,cYl{h!Ʋ-11uyi3 rpǃcL; бlU.=Ҙ)• ^e5٫R5 I bV%?]@ +C@V'-=ZzZi2טre_KGN<+d^Kybr]h(;[륟v5Yc5lQuf@@S2W,ƭ\-gcT'0U-c|5Pb5f j85Pb<-gcU=־aƬX8AX1"a*=bj+=f +yq3 z8(Ova=bz+y=bY^9>| c)k$+[l ayly[UȊb|ǁ.;]fE);X6< ȳ=ێ?HǶ2VW/XbP;yZl6G쯼~oWX?1HN|uŶ.J}~Ƴv5Ց[dt= { ,qhQ`(#c ǰE5hǰAa9{\{ 4X'Y1>1uRs¶= { 4XY j,cX`x  AE6= {,q1q1Ov ,'11u,C}Oqs ϏlS@k$YX*^fR$NC[75<>ŊyJ(a`Ox 8+_5yj C_$xâ_$Zuirן\krek9i2寧Zik9u 9kfEn|TZ+$-}]~-y-=Zz_S-=}y-=Zz^|NZz 9kρˮ 9k5ʞvEVoUOkh팕GKv80g,>bRv2,~giZ>e}cw5}N|qcl*\v-=GryA'F4Ѧ4iMhF4Ѧ4iMhF4Ѧ4iMhF4Ѧ4iMhF4Ѧ4iMhF4Ѧ4iMhF4Ѧ4iMhF4Ѧ4iMhF4Ѧ4iMxkhE,f4YhE,f4YhE,f4YhE,f4YhE,f4YhE,f4YhE,f4YhE,f4YhE,fB~}qmm*׶-խe~#NQJ]c..qv~VG/+{+*A[w?rDjiUٙ5b}޲ǫUn /^V@Gog߼5,<]UTpM]u~Gog߼eN=|)W_ u|)W_ u|)W_ u|)W_ u|)W_ u|)W_ u|)W_ u|)W_ u|)W_ u|)W_ u|)W_ u|)W_ u|)W_ u|)W_ u|)W_ u|)W_ u|)W_ u|)ݫղ4!1AQ0@PRa `q"2Bp ?ېB!""-#<2C  uQR]$hŚBlVfHG x<c.үch?]>T~T[//n=Uw^b۫B!B!Bk3%*Ő.t#2>e::vؕJi ,X!X!`!uwZw%ӭܗ\YE/;U-yc5SNXbXWDD""DB!*TZ -By%f+W B\2CўH2^ZmqW̭| V2Eoz !f,ec$2UC! dUHc61ᔮǃDžc5toӐҎ*{Kۑ~.hT*-9ޞUEP!B!B!BЄ!B!B!B:jSXNI]f**d,XL#J,_ay cc%!ٚ`^BC JX!X!`B  X,B`̪UU}8r*.*OڟXѿKTONBō[O+;_/r^܎v~ϞΞEΎE+|~.tt~aR:pB!B!B!B!B!B!B!B!B,6!B!B!B!B!B!B!B!B!ZaaALJOmS'ڦG馣ffp~ONJImUWgHfI?e=.̣Z/:ћt ۠ODA7Bm>Jɓ&L2dɓ&L2dɓ&L2dɓ&L2dɓ&L2dɓ&L2dɓ&L2dɓ&L2dɓ&L?-!"012 3@P#4`RJRo'|mN.ؚ;6U@ZlMJjh]BҝLw(>>iؚ-1k]Bҝ+MZQw>M8.ZP2G4-q"%IykHad5c F-UEh6Q%w<, $uҴb"ZId4tfo)'pJ#`7H@9{Q1q<-<3F&m) BcUkUF%eg4~:1W͏a`lp #l73cA=͎h̿( lweEQpѣ5v1ܔRJբYcE/0ٖ(ZB/8DpF) ˹5'rѫ ա쨪6 hw_4G@FpPSz\?#qiuF}M^ E1"Pg wUf ybUXQ/auwejjUTEybuX폨UOt+8N;c_&U*v+~.juIPUf" iކ/uy >ELQak,m+PS!gXx1;:#NGșԠ^ nr\˼K(5^q'v]^fd goLPv#0e{{(icmq..?xXJӂ &iq7$+7X+"=(d`͸dߤ]H}aMB<> TѦ?&4i8WGѥѤ?#i8SG3]N>#Gҏhҟhti8G3MB<Դ9 Λy Qꖱk 6 Y_vmJo ƚ;6 B;VRڅwé+&iN;jvm}N 5+KZcu;\xZlMNҍߵ6iؚ;6+~BbY;4 S-14CvWudi4 HP~Z NxJP4i~Y۰NԋDm$1h"FIZ`l)ZEbμ:H7JY8:j8ПdOĭQ"# !D$P#b8EfYui!ED0dSfGH$lR"NT³hX)۫*F\6n1wpH bHŦɤR%WeR6!tɑ"t TGo\cMeXb FOf0A73cA=͏a` e 'bV%hSMZ,L0Uܘޥw;ׅWzп%XԫܚRu;ԁ액A mǭ Vȵ:w"UowF^;a5FXŖ4Q<>ѣ ">-Lwnxł%;7,h k|KF6i z&;EpF$41r4}]f$hq>Uc1# ]ai)]SЧEWVCNcC z}{ٔj%Bf(2&UUTEybuX폨UOt+8N;c_MF5TVf]juIP%5 ڝU;aX/,MJ5TRfW}QMF6UN+P˰O{MFUTVf^X5:폨VOt+?juT`5*vCKn:]ǿY5ڕU;ǖ.U 8N;c_rV;Ԫr 6 Zo h`DMY9WߴU NSNɷkLx U,DqzIƼ ';]ѧM!5ef`3[+RiXCz4di5G6q$zkسHkK TѦ?&4i8Ͽҏhґ櫦؆uV44='h}`|HGh7Ѥʚ4)J~P"\~JRIR1J̫K*D<4}]f$hr5*X]1>z1T?uOĿK#~GDCa޾ESYY׼2{\s5k9s\s5k9s\s5k9s\s5k9s\s5k9s\s5k9s\s5k9s\s5k9s\s5k׳_;Dslͳg6m9slͳg6m9slͳg6m9slͳg6m9slͳg6m9slͳg6m9slͳg6m9slͳg6m9slͳg6m3wA_gON9M Q<- 6{??>z4>Q(լۤ! lj05O$ʽ?Zi"ijԫexg?KZԩO-$VI+@uO$ʽ?$~<;#Xܥl7A_fZYQJ*tҮgJ*tҮgJ*tҮgJ*tҮgJ*tҮgJ*tҮgJ*tҮgJ*tҮgJ*tҮgJ*tҮgJ*tҮgJ*tҮgJ*tҮgJ*tҮgJ*tҮgJ*tҮgJ*tҮgJ*E[YW5!1A0Qaq@P "2R`B ?ҥD6H%DUȕ&HD6Jd%\_2l:vd&cm >8 yFXB3mz|=s2jv[w-aWd$oEKMZwȚTk[?NIU%t,$IHm6[偮sjbE?A`Et#bX:dWA`E'Ȋ~ H~E1$WA`E/B) )zyxI]._K^~?Aά\ {$K${2dH/b]1'ԗ&H2^̟LIu%oyvؾBqyX2N ܓoBW*TO/织B'~&F5T9, :W̛j>ٙ"N&ؓO ҄ZidTSJػ;"Z]Ԃ-q"瘫aHdcD$DQ$$4IY$&+$$&I&5^lmV-}|G97x?;Վ^DR)E!ߠЦKd;{"YN%|Q"lr%|ɒ%d+&W"W̝&Jd*%|P7oRz 'rJ꾤ԒT%IԕU%j*;/bd%ɒ"dtę/fL"X-„.ڰe޼٨i>CMSwd$.yI^c WZkOqxoYYe\1л|0[pOJ~N+^5,RVV'I⪺.<9?Nx,4-yIo]WSqáyR656#j7djoȎ2d*\ۏ5iŝrY39j,HةG$EEdRO>'A9,%wQ&I!J$I2JƉ&I!$i"IILCMV5D$/d$V4$$Ɖ&I!+IHhlCMV4I2I R~!:{WZxjx^-tk*V]}mx ]:|oںI\hwZ_޾[^5㲾~zz{mrk;c1c1c1c1c1c1c1c1cټc1c1c1c1c1c1c1c1c1ٿf!-| אo7}= dr$%ބg5-d;io #ދK-ԥkr#ތhPA7vILv4 + @ @ @ @ @ @ @ @ @docker-1.10.3/docs/userguide/storagedriver/images/btfs_constructs.jpg000066400000000000000000001744351267010174400260630ustar00rootroot00000000000000JFIF``pExifMM*1 V`QQQGreenshotC      C  u" }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?'9ufE9UծH$c޹g_C: =?z}:g_A}?~tۼtyBg>0gD /: ۼtyូK !};({3,=${w_Ώ=?x3πK"wq{_C瀠O^# ?d >_A}?~tۼty<y3,B<ȗCߧ^{w_Ώ=?x3߀"_Y 12g{I=?z}:< 2/~3 =OG_μG`ȗEߧԃ%_>N=OG_μGcd 'I >_A}?~tۼtyG?'? tUw ~~|ty߂e4;Ox|/}%O_7|c?yß_~˦>纀}%O_7;Owp ݗӿ^z}:'OsW߂yQ9=?z}:߂xSSJ|;Ox|/|tyG~ Gyc߂[|/^_}%O_79S?>?߱<__}%O_6ݗ:ݏ^z}:?߱􏞟_Ώ=?|=~߲?ǩgQ-~UA%'=s1t􇞟_Ώ=?x3߀ɞl?y$yOB/Ⱦ?_z}:O_μG%_A~{__۴ 6ֺ[@2G}zhڻH6]m?54_a>k_J|K  h\gPKyyq#mX!K O@!<]ZYz}ީi q`e] Pї_wH~?|KS ~'NZ|DEȷj6̾XZ[u8kܝ_x]t{O|ar ikZ=CsydIyTGŚ>ϯFo|_IuM?M:zuX~u % v7NA'o+ D.~>bmះo>8xg\u9DXh/KkiIeGi,MxaG>qAG-=MB` ̒C+FF=sy:a"SO q'HMMX_~^1Xy>dbQ(vX3uWyҿ172,t ݾ][bSNnqFd^X6~)ЦiomtAuqoET;42F* T˷rGYc 2 Tc?FN}?E4}gVil$Us;Szn $oK k];^zc!=FͼkIen eH>lȐpF*xSF|v]&~ Y&[r#dF.Ot\}O`Z*w duEGXtgO&j>=2үLmmq3ox[*LNڤ3o x'Կ~# |}K:ZjNVhؖE؉GycuݓI%39;\?O`:l?,;OGī? ިVxtgN|Ee俑;I$ʎHC`g;Ş^xE%4H%IZK[@pŷ>BjS^z?=>5x ǿWgq>S}&%۵簖k(DL7v)ʮ_Ǡ 5?<K: %ӮdC$MF,jO_3GŶW^ $^ ?΁i7NiOvܫrEh QOԢ=?RGm>-ncerJt%^0@m_מ? sV1#n?&jڔwrOdX@G~t?=;}2Xc?@'?`,=H ?=_}?O`H}2I>toϦ> ?}?8(?8(ϯ$>l~y?@A>O?$ _oӎ??N: ?} ?}1~\:Ϯ} +9 WѺ ]"5av ( HJ%?*HJ%?|YwM, :ƑEsmqmʲ>yq ׃UW'SH'c_M7[ d_Nd_N½d_Nd_N½d_Nd_N½d_Nd_N½d_Nd_N½"|+'IσSJ߲'8?|Wi+-?g/SC#4^# IH A|4]?~|3}=N[m\1ȣ%,?~:g ?~:o {ZZ}<^{ Ѭ4*XomnVyf} qjц29݋ " |iῈΟ5uŞk7zu#$`2h/d_Nd_N¾7>2Vq=O xƇxz^1ow/ڄK4h,?8'1[ x_Ŝׂ'z _5ݳ]q ;E3+1__D<__D<__D<__D<__D<_(gQh ƗZ cծSBM*v;ng &m 'ϧlG)_$|vi?_o:p+㠮Ck7]ozo(j u_pה~GIͺ \ZZ AqQXK2GڿH.]m:54[}o_WK: 54 P[ DJky#kia&*} EAi(OG$HY|J.Y[;B-OSx֭]sÒRE|F.-|;TVkIΈ6Ho?}?~<g9׊|U-ϊ_GԅV nL7ߧ?^z|o?n|Vlmm<'h]k8gԧ0i;`I 3 I⯎ 7-m7(4?}I !13y" #x܀+O?%<߱w4Km&+_*[8Ϳv?3[x'gk_G|Oڵ5ôZJ0'@-c]_w?`SS62j]M M5F̂46NeI'k? 'Ş$4&/-oڪ˨_;uoG׹mB,ŝ}p?^z_ۗ׿g_=<>q⏉>ຄʢPH'(![ilf䟼?8{}2Xa=X?@NL:t? ϧAG?AG?$lw?u?ӷ?=H;8(?8(?{d??lcϯ;}~?}??{䃧>ߧ>}?_rNN>x?$'H?@@0<'`M_Fzy r}z ( HJ$~xܫg#/(SxI?>^SH'c_M7E (((((|}cAi\O16sFѾAc*~ϭIwEO7(dr* >.\g( / >654K-ճ}v]@P?UxC=C,x ǁ-kY[ ÿmx 9EU *~?:Ow>i72xo:U;eವ;PLѺ_.$(VBs_V~=yw;~Ѽ#Q\TJ( ߧ3qQyI߿#p>ox_rTS>ߧ'?@O=MyhwooxO\;}rT}?O`@~מyh_=:'ӷ>l^z ?_4w?ϧ2Xc?w?L[;`穣?R8{wo?^o}^'*^|CP?_4\2X4^`[ V2{JI?C4K MSߟmIqmXVаv,~QNq?᱾-Dş8尊;?şTϳ֗ɵ-GE_c?~-DVywȏzG67ů(qI k'?ş<(˿?AOHE_cz Olo_Q5g?O8<|C_Ylo_Q!zDOŸ7,=q󆷳`~9>o-OmemZXz~J޺c,Aeemm-|O]'[9cm/v uX';Ἶ(QMN_rn!SRinz?>!H?$'o8'큟ӎt?_RN~ ?\!QEW~GOQ'[kg~hI_~`7?q MZ^bUޭyM[ն#mWj~~#/&}y)kCv_R0:OfҔpUegτᶾ.DW?ᶾ.DW+=?_q7ױ?_{3ᶾ.DW?ᶾ.DW(4}OmQt?O_穠yh?~?=;}2Xct?>ϧ^z?^z:{~>,v?d:t?#3'}^zt ?_?[IN_w?ߗ?.7 ߗ~O?Q?`7YsPRt[j_Y0\m.2RVXѝA9'{Hτf_"~)>gL h|%]3_>+yˬj>)8U i; HbFO> ҇+EW!(Vׄ|K-jȎ K3X UGrkS¬_ "n[PѢ$`Oˌ*tԣW$S ETR)?-澞|9+=Nq-YX)+»vV ÌYE/\>5%Z -:K(V '9RirIKOM-oKO(qMt{&_&4Ι}U:gǪiv?^a կSGM4PDArwUߍ 'UťѴ ~um!m${bT2 c]iգp7,~W֦GR)[.K}y;P?Cb[KTmiak6xy#oaq_`[ύ_ u3Iú >+MŻiz8q7QaO[ #AM㗇tyC{Ciƛh[l'Vxec\;$gm˙ݲM+y69\^ݯIEw's=xsf_j:Lvmiv3_it"uU#%S}:7kA!#3 ;@}gNWQ2O<*uS ?ᕼ-t5ŭZb[-ffTy-p(s6d{+[{k^ڐyVNQ׶Ӿl'_E+/Xyda\~wKȽK1W2tly?'מ^zֽxkw^q[`!cρu<3?J+w^CNJ1r}>??}9Dx>g<7#m@l0-w-KοߑVkқ'8hjAT'ȝ- ȯ\-~'|v4>v;V{6PA 끁ǥsoHK"z<{D\Ǩ$:4=G}^ BiOP y;'҉F>1$Z4=^)׋79,꒥Fy[qxv z2p1嵤SHR)%vO5jo)w 7[hrnjV._u?w&׆.lyco xյkCv_RpvvihrfW*U{8Oz5!2*^:҃"ˏ\дwǺ.j^ kiӿ-/594S)ԫ`303\Σ'|`6ιx_GoMA/1'].ߍrRr~~]<|OEA¼͝ 4E,HB"K1Z> xohk vXj_F]S7%tÒ+ƫ1ATq_ Uf{$zgu-n l ȶ>El _ x[T񞬺~+:Yۼ2 }Ě׆'sd*wp0Dgfsnv8N{''<qsF5?jֱ\p!iI*ģMJouΌ>ZM뾛km;gɞkO[]͢\![~rBK/~$W2h>5k nDS^7x9-_W']k[YQk5A[h|1<0o$Y>6hc~#擧&.V;D,VD$1bJT Om]}/G:4nU0A@|5x復In~-LdG“9p3%r!#VS i]&pզ7#NV_9k &- ?~eKg0vJ6}z\žG?\H?y*{d=>:5/䞿穮Ck?_oÍEPֳʹ_pq+?|[> M/{޿~t?OӾ;woϧ;}>rTP?Ϸ[#O?yh'^z;[p?>x>t_?v o}?N'?yh?yh?=;{v?d2Xϯ?> 0 ܀^z:q~d>:t>t`t?`18(ϯ${d??G?Ln _܀Ͽ_.W|x-[@>㠯K n?_ǩ RSl{>/Ԭ>s?ߗ?૑|*=v͝'P<^"e(B/|FN-9-ự6 e$ìʸ4uvֹ**S%jԔ4w=y=cQW.!kX仑Ԏ0Nqk7ҵ-.A`I NNy&QMS ԓݛx[kuKm2^I$xC\1#K g׊MJ+}JK̍km#Z+?SW[|H9\.Ú:5+KkWYodwdWQҸ6Om$g>[8+а5 ih'45"P5-BQnet†$/[˭x'T7q6sqX5Ē 1f' Bta\Sy%b^eΛbZ c>W?]oT |O`]O aU &o4~w S[?oa?o ?=3>^Gc'>nPEP^>/(r zOO`8ڜ'c_M7hrnjWUSxI=rV49?7e+xnsW,G#D̬!iW՟e֯gxK# LġV¿O㵾4IjekU9E&VeF%$Euk9/ZoM#J"Ff$$Q4J7\'M%Զs !Hd^auuE߻ϷP@]QEQEQEQEji5մ häswj& hZ^-bOPPHҎ!ERWRѬkԊ匚^MKM/C.&Y&шZbRg* ϥtuh}'JFFq  5f"&;Ǐ>h2i~%a4$L$fBH;G4_~&^#TH6rd=dQG*qI鶻zv;~^=_t^iS\=ٵX33~b9UF_K.µUpp9@^E/gUedWEjYꗱu%&rrKt<Nݿ#?y@ԃ?N: ?$FI_>t``1˷㠣8?NI>t~?>1}1ϯ@N>=H~tc㠠ܐ]coxN~.Wo#~>nq7;>ծivc;`A԰H?^!??N: ҍYҚM٧tѝZpNj5~>k2Y_x/[Լ]kp;2eGF%ȃ_^ϿIN?ъRgq?<~q? >?~~P’?A_G?~~PO{?:?H E#Ux?߿=M;"&3}G0?g*?f8+өLWҟ7^:,FZ]+IEibD%T*ڙ'$3c;}>}s*Z;^= pxJܜuWDʀdv ?!9?穯>3}^z??N:v,c'!?7O?'k<'`M_F@(+g#/(*ŝ~DoL?|/&_)¯c1kmn~ PFZ|/&t>ZS55a^i+? '{!uEz?{/ϙSp/_`Ou zja_? '{!uE{/M<?!2=׿G0?O'B?_y~~Bd{/.`OukU7 ?D^_]? ת(֫~?n ?2=׿_TQW?/#'{!td{/.^Ze_G/0?O'BD^_]~QG^0Sp/_`Ou zja_? '{!uE{/M<?!2=׿G0?O'B?_y~~Bd{/.`OukU7 ??bj~,t7Z@#M\P3kpW'*9-k+ʩ`Iw}m$z_?ߧ͈ϯ=@o&o=v?{|v?m;K^M{}Q@wVֿ뭧F_ӯ}SK?zO1>lv?C_S@9?_SF9>o~G?%[%_w?@~ 9?_SGO~?{LOϦI\~8>v'oϦ^zyAqQ[㠣>lv?N_w?}0[_܀? 8(_oӎϯ$A??oo@_׾HN: ?ݽ8(?}?>> gϧ;}>˟U>|o;'׾z?_׾Hw $?xv=@Q7?u?_Hܱ>ܓ?N?>*w?*u?O {u_穣?wSϧ>x?}|ok|/T#ŏq a[8HbN?]F4w''տ<lxM6X=aԕ.-Rn4ֈuj:+l~=M|{ oBo\|<<.VqJ,w[r62#u>8\|,3ľt?&O iP[I t_6;YLD TkP@O?}o*歡X7~/WοkXAg|#FebS{b~e)!S'__8?N~S@9?穣Ϸoo,w?}~Ӷ±N1+#?6|9(+g#/Dܫg#/(7q ^^S#4op(((((((((((((g &m j(G?ݵҸ("VAϧ!OGoӶwO}^z?5|vkq_w}?N?$oӿ{Q@wVz_po?a_ӯ}SM_s@ ?yh?=??O`L;~L;}~~y>~מy@N:/?=I2I@NL:}?}:}^z/ӎOӎv?N?ӡ?p?>x܀ӎ qPOHy>u?^oϧ;OӎOӎ_ܐ??'뀧ϮG;?׾H??O`@ }ߧo $u'AԟL> (l~c?'oϧOx?*w?O?yh?=M; 띡}~Ӷ7OGvտ?#|U?uis>$>&`:'؆%'fg?(;TW$[m[Is^=ßOH|=?x k⹥_Gok: y(Ǖei$(/$ޅsIգj&,[6Ofŏsϧll44~oϦK>,~A?۶;! ?}?@0'`M_Fzy WѺ9>@Q@xw$wEON HJ%q7}N: zqP_rL(?ov?}$|~t/ӎG>'?S?G?ou[{rT?=??N:44w?~˟Uៅ?xx>֏?oϧs $xOp\ԍ뒨?@U?H>&;MK[97E/FSy9@[-Q CD0X}Ͽ zgş#( Hry'*(}4ȇ,>g۞%>k ͦ4U(;[ 8펃-gsoO_}@[K?T-_z_}~oO_}@[Sos4{(}4ȇ,>g }ş#}OO~>\mqgԡO6K W.@ڒ^k7gh +?4R#Tx5͊?J]/fz# ǸU}bqk՟> t?#'~ŏ]kO 65qW䒋NV2M]?AGo}^$?{d?;9 WѶzy?OEP^>>D^? |~%?@oM%#4oӧO4? 5kxncY1$duE 0EyM#gT৿e(*:ٴrJXoH? /Q[@?z4(.~wc2CǫB?yc2CǫB?yc2CǫB?yc2CǫB?yc2CǫB?yc2CǫB?yc2CǫB?yc2CǫB?yc2CǫB?yc2CǫB?yc2CǫB?yc2CǫB?ycŸU߆~/N+R+8[j8E,D$gi dmGNA_x?Ö?Q~C|gk0Т҇Sw4Z5k+t/gt_s['o<@? |vk?__ɮ#$ӿto(j uҸjiwvڷH>mO~LNL3 ϷA@?AGAϯ$l?>t`t?`1c?v㠣/AFx?$'?׶I'O?i?3?|u?A[/ӎ䝿yI뀠 v?}???׾H??N:8(~מ=I_Ϯ? q?}䩞>*~'?SG?SG?sϧ>x>z}yP<}rW*\< hھ?=w 7|15/d Q{5{&`Ru_V$࿄K#LG]8oF?ďY_,n(CPG../Z3HIsGT`O?9zylēW?鮟*FQEῳ?Kzsܫ~]ǥ| /.[ɵid`v,Җc@{&6jo)xI~أ=,G|XTU'Yxok+Bo(9٧T1O=~xK^mO-#k 4󻪳1dj02I>֤G >w ik-KZ+޼[_/ưSqA#QʻF$UmSOP1GƳZ4%7tCL})/j.b1U4}b:>?[Qhnw|EzOG|?>ci:gmr;R%֐I1U,$1PJ´ZM[+%҈; &/C_?x?Ö?Q~C|gk(ϻ_Nt! _܆~?܇zqW}c㠮CYN<܀N: N: ?_r@ ϯI?i뀧oϧ;{_׾H??N: :qQc+> > g#g\w??~O`O_מ'CwAoϧ;}rTS??==M=M#;?ocϧ9?to ?;`O=M|6;yerhOl} uDIte]rR8?=s $ߧ/u!h7;aL[?h}?~'m'?Ož7|.lZa{Keq![*0#tߋ~dyu3Z@S9ww_M-Ĭ/#c poz_dݭ7,"{ne+^}.ϣ|ZszN+귗~}Bβ[|ۉ A$U>~\Eׇm7/]>Kai]r#5#M%@JF21H8#e,*jpVW^}oƱ8&浼]ݟKko4j&-𾹫h=$S!36ߖ mt/5+nmu6w7NB>lqڼF\vjOuǶ{ew/8et}uuWvZx'žsqx÷$eMF?ʼjH۶7j0x{[`/My&8GR-^/@8YU5>u'tɥ;*885g$tKyOڒO| xNssJd(} k?oUɵ in&˻RO&ǜ7-cISnﯫ'.&%nދE߳πqB杌״_wdNdbW0;s\d=Y-IVj[z X꒷2Mݿ=[Əm1]4?+Nrp^1>ݦ>5`G@VcnkGDcR6kMWK79cw7M_K_,,~ f,:Mɢxk׋QsëS$tOoN: wo}^%s ?.?\mϵ_;݆8?}CZ?/(< [O+i7_$W|ֿ뭧\~@N>_׾H?A?ߧ~tv?$?I>x}H;}rA[㠠/oN: :_$?}:}> u{p>*w??ߧ??N:4v>מOӾ;?>x_%O_%N~'yh'穣~[Ogϧ1?O?~ OӶ ?yhя_X[%]15 {]Qw?ߗ? p7펞??J>sn Sx[RWԳ%V(㓅n5 ri1ӎu`qr!i.7 M 紓Gr,ѫ+rzN9\QT.d4{A#KyD2.yp?)G?/qNq}~WSMBQkկ6->B8g{n? !O?5_BE@;O_O6g8g~s?֟ )_ ƿϯ'D a=> Y|8g֟ )vܿ?\Ǽ~GW_? #>$/:?k ,?x(֟ t?0I[XobS?#c/A@?8(?}RrwgJ;w? v?}c'!?7O?V?r&l?5QEWϼ}1?KUῳ?KhSG'GS2ŏEWj)?xItOCk ͯ{hVVHe A֔fR3}9qe[:QIW: 77^f_]{ڴ{?oG~TQ_߂,{[tj?W: 77^f_]ڴ{?O4~GE-B ?׿YG0SQE~ï~ г}{.uooouG??oG~TQ_߂,{[tj?W: 77^f_]ڴ{?O4~GE-B ?׿YG0SQE~ï~ г}{.uooouG??oG~TQ_߂,{[tj?W: 77^f_]ڴ{?O4~GE-B ?׿YG0SQE~ï~ г}{.uooouG??oG~g|XDu EK!6[_7vmwP3."0e%Kpj$QEy[}ktji7?w]kujiַ܀?@ߧ8(?};瀧OϾOt?AOӎ1ԓ'Ccox?>xP?%OO>'?S@_4{w? ~_v?o^TO?qϧOמ4/=3?;XN~  מyhϧ{L??K~Nlu~U>|%,~}IԏL> ?\< oO?$}I@'> v?`1_܀ ?8(?={d?N_??}d~71jK-mL/LϠo,uWO5kp\з3ln4xzE1^~D|=f])~€5>ߧϯK |=9$:yPϮ>yJ>Uf?<?<_~t~tu?_R@_ܕ? ?Nr=}>|\;{b#'`M_Fzyr}v ( HJ$~xϹWϼ|~Dק&8ڛJ??!i>^SH'c_M7E (((((((((((((+g0vJfɠx_+=f#t qSbR?ߧ;> ?O!I}6coӽ?(ڷH>mSK~¡k_JᩥZS ?}??NOg#:s%LS>ߧ ?@O_מ=M;~Ow?ϮJ=:{?מyhϧϷ[?}oS~>tϧqߧly?[?=;}2X[%o}?Nϧ^z?掟=?~%X}8Lq`?}:[㠠㠯?]o8/zcBRQ?~\q߯/-Fy'&sϿRqQ[㠠>ܐ<u='[?<}>}>y>}^ 8(?ࡣ0ǟ:p+/*S Ex6žԤmmzCcɷkcԡ*c7Y7G +(o?)L%6fcj7/q(Z7UBSu ?_iw:hwm\c,MyEi>kfX|Ye?xUf/melY7/B dqF-2<Qx<`l}on"-g??w_g %ƽ{i|Nnѣ^Ы0,ʡe}ռe=^|AT^Յ˓=`k$X~|F q.> 5[Yq<%#ّ&dGCԐ?'CϧOϧ|1?G?o\^\N~=?4u?_S@?; ;9 &tp=rNCnOEP^?=q7I=*/(OLjoW.?>#IN^?INoӁ@EP,6]m?4׀!_JᩥXS ?J%OOϷ 9?~OӾ;}>۷?}?뒦8??o?=yh'yh>wϧ}3?NOӶ ?_穣>=M=??O`?c?>t?lgӶ?^z}?=3q@Lg;}~0?c ?}?A@O}N: ;!ϯ$O;}~˟U%?xx֏؏On?}? > 7^@R18)X|A\oό:5,cF @yy'JTRj5vݒfu*FI$ߑo}:}Gl_ui.~1hKxsyG31: #Qx~Ϲ~~c{?~VFHOOGE6ϭr?R?t?hQx?+[ #?Qߧ'?SMe%6Y#C+)ʰ<ӀOӾsÿ>|v?}~5akȬ<'`M_Foӌbv ( ŝ>%?>^?~D7Kx{{WڛJ??"O}?/^kT4{!7s%]AgiF5 2Uϡ7އſi?>xW?]?|?%tIF?~-ID_:C猿us/Ώz?i67އſi?>xW?]?|?%oG&?U 'O'2GD?:?އczʿ"/\tID_:ȗG}TloCW4OxW?]`\Ϸczʍ*[C猿us '"_~loCQ_?|?"/\tdKs>ޏM*67oO'2G4OxW?]?|?%oG&?U 'O'2GD?:?އczʿ"/\tID_:ȗG}Hk?h0vJcJƞ*m YdiUYYY F(G?ݵҸ+tM绔S\l_G?뒭ׯAWv?>t?N?yh?yhϷ{v?dC_9?;}0Ͽ@^z~t~tv?$ϯ$>tQ`?l;㠠u?㠣>lv?׶I;}~Lq0?>xO>?$|^_oˇZ?cێ㠯]o8nOHԎy?}{ +'? ' ?K?P?/~o~\k $c?3'] 8oF?ďY_,~(CPMO5:|z\HS4m#l@,I}z"HRnn>xw4~G09G~cX1VG<\:{~L9%m6h( ix—_azykQ?1*N藦u8ϥ'2{uTWenU`G?o>wS[-啾mqnI *r8e#s7iԌS.P%-=\=#OiMG?÷?q׀\ӳ{{g^?(>;}~ߧ'Aϧ5b$|'`=_Fsv?q&tp=r ( HJ%9#ܫg#/o({ j~>$|/&+kCv_Rژ'c_M?VhrnjWN<ƯYEswKK\J#$`{j'ʷ1C5? '&uo xI?4]GBk0U*6;Hk*AELdJt,՘QC)OL0*H ƨ(?  jWSZӪZ$%Hv@DO#͕#<JOTT(KRb@ni-ہA!EPEPEPEhGT;y]M!1A$Ŷ||c1|D}mo^$tp SY0z#p*y4s(z-W[ᯀ6. x^մ7!4|FlO?`>{HCm=ǯ8*K9m" $r)WR 0AC?~eKg0vJ#NV_9k &- so>\ 0OGot穦tyi<8M&;K?r܇_&g;@=(;k_Jᩦ?o?qmk\?OSK~@O`?}r~}~OӶO??=M=M?KdN_v?Հ@?yh?yAzo_}Y,$~_t>0? w? ϿAqQoӎHO?'ϯ??8?cp<w? ?A@ӎuI?}~=}x''_׾H?^ ?8(~oӎ_ܐ',ƥh߳0_Ki^TC-9V _kmv`}q뀿.W]o!h~hg^m`9_o?xO¶_ w\2&c-zGZ]n;`qy\{>?tsiCHxEχ袊?= Gm4?ԅ́m{>RǵyOzVCR[h|OaYik 6sl8EHRW^Ë ˦jh]Om:#FLg8Հ`AƚkM4.0c'+Υ5>w}}5vrk vo4e[Mok u/ \H&Gu $yG @lps&־#OO v:uѭΙӌr}$TP$ cna5I'f 1L=}\r̒I~gğ x='RV2_0^fXᣍW0V# 2Y䝕ܨbp@>o&eyyS]%0zԼr^KvZ$-gP)'}5jͧѶϤl~|O?[A'|$1,M4v?v2,vG]9{YhqO6 Ac@Bmt|ЫU}FC(`Hyzz~I^Md~˕[M;G[]R4A|VךT6Zt0lduu]H9U-4xGZnj\?ؗvkYq;_59 b-v45isG5SW-~g[AH|vˏ?~y䈼/3wϻvL{pqg5~OƯ_rqHxO?땟ןGEI<&y~~5?? WQGןR_/Ɵ_rW& Eς3gwp X}f۶cÿg~hI_~Z`O)s<UDZXi2_m J>>Dw|䃍+_ڕ~H'c_M7qW &'*Y/Drf}YR3nAs,$1G;gv <Ҽ}tEΤg٧eRѮkO/ /OhSؔuwY`2$0PX5XD:=7%:Ŷ1" -dbk{ʀ?~Uo? |+YKڎ[ :I(퍷. !(B1UO%.|NTҵOytSx|':^yz_{L{qj B+v7V9v:_@:K5w1VZ5*K%p<|={mKCXM;yhD 2o xܡS؀{TOfH(dںߩk A_k^]'zzo鶋*]Ko,!peq4jg'ǧ帷s0InCF1^]H]]JM#},ܒMCEy!EPEP#.c;[}Z ˯.^OjlkwK,/R8imN3Cǀt;uݮoo=E%6wc\`u60W֮5#xn?Úq7a9 I_sjztB}/Ky$fhmZB )b8]7?i_~-)t0E,P ;T '洩(lD)U%}Zit/;u7~05KvD24kr9~Ҫ\e\ 9bZ>]-]꺁ѵHN½rCcoiwV:7IYHO,1#!X 5;oi1zsuh0Tus`5 ]tMii$U%ꟲ[~*KMmcpfd\w+śþ$_G+O+kXUΛ]AFTy(%{k~o/ٶ$~"=֒ٶ(̐Vm,fUn-Fk;[+v,4rތ؊c[LЮ4}NMkXeYϗ#(8,8'~$l44~?{v?d?C~;`?`1=H=H?~t<[}2I2I@_v?`1}0?}?_rOAG_oӎϯ$lw?N'?q?܃A?ߧqP}O}_rN뀠ӷ?ov?}>|Aӎߧ$S?N?}OEwao*-,r,%>Sa %e_~?qp;I/G'מ/tߏ ? $0ߪ7i:0hQ2;zN.8\m,D~eԡ ^E8~*xX|+q+u8G9a^ͱ^30;12/'.x~O<˧(׍d$*2g Ew$ſ9cqCW3_-,|Rq=EE(K>g8Z+XDW'-(K>g8J+XD8>e_N<]Ǩg?/|whc߈xl??%a3_-E~; ٸU>_xE?Ez_gOkq57{wYn-gS a62⸋.IՕhtM6GF +UT)+ig? ua[F>u %5HcDUTaT ㏧w?o纒擗sq劏`c?C;}0 aXxOz+k|'`=_F׮PEP^?~Dܫg#/(oM%_"irV49?7e+|/&{Pσ>"8͠^u [N@Ӥ;7n!8&0J[y9OVWn/D~LQ^BzBz}f/eԿG^BzBz_̾x/Wô>5О4ô>5О4}f/^7}Kx=_' -?_' -?YAR_E{;CW KO;G;CW KO;Gi2eԿG^BzBz_̾x/Wô>5О4ô>5О4}f/^7}Kx=_' -?_' -?YAR_E{;CW KO;G;CW KO;Gi2eԿG^BzBz_̾x/Wô>5О4ô>5О4}f/^7}Kx=_' -?_' -?YAR_E{;CW KO;G;CW KO;Gi2eԿG$;G-~AhКzy_[t!.^zϫ5|vmg;KN~q_wpQEo}g> M/}`j u_pҏ޷_h?yhϷ@O?%C_?v?`0?R?8(?{d<Olw??G?l;}0=??}??N: ?[㠣< o}^$O??ӷ?o?{䃿??8(~t>'?I}pS?>x?׾H1qQO`O _}^zw?);?}=}?%N\:}?O`@~t?SF^z;{~>\;}rT_&8= ?_穠>מw9??>x?>x~A?ϧמ_מOu?cd??Ӷ:v'_ ?oӎ?=:@l:t> O}^z?8(NCn+Ȭ<'`=_FQEῳ?_Q'[r hI_H(Ӽ_{ozv-~̷U ?/^iVU孽aH_i h?Cq?Wp_ Wi>zA6@=bwP ?/G7(v}to_}?Oߧ@8nQ+j?~GC4o?A0 _O P/ _ZnQ+j?Mb}^? 1}}?<$?E¿֣ _ZOAt11{?O`A30]b}8nQ+j?~GC4=FTC#@7@@8nQ+j?~G> ]_^z>z?>O/ _ZnQ+j?޿?Oz?E¿֣ _ZOA<Fx@C} b ?/G7(vL}/~ .1{`/ _ZnQ+j?4=G @7p_ Q ?/]>.a _O=~G?E¿֮} G(~l? Mb}OQq?W埶['mBmZ[-f" vPO_C Mb}_B9<"J@98둞 B.c_G=Hw?@s~oӎ'I:t`?>?RqPqQ'?$>t2?> O_rsϿAqP>8('$_?뀴A#EAGb/ylG?>x ~_9_|Iºq}h76{Yfcb]'2{PSu/㠣o}_raS>?{Ώe_Aڀ&?u?O-OϷއOncCWOG2 KW{t7Ͽ$Su/q<@GW~Be?A鷹M>~_'G|2 KCW_G2G1}/N>}ߗ>+2 KCW{t*x Կu}M(au/{yMϿSP*x鷹ʞu/_a@yMϷSt_}f8Wn! mߑ?%O)ߗ=Onom?!G2{Su/7[?O{2 KCW{~T)qWO@u/}8?u}~?ʾu/_ _}~u_aPSc}N <}穨<@KWf? xF[ikNNR& #)43voϦK>,??:'?'?R ?8y?~^$=HO?ϯ;}0c ϯ@?N: ??N: ?^$ϯ$>tNCm8^Ec'!?7O?QEWϼ~D^>/(?$㠠㠣&_rN\P|SoϷ5ٮ.G- JдI{ :nBV \}Wn!=:`~~GA6>Oӎ q0KW~Be?A鷹M[>=I mߗ8!U>zj=~uO _}?M0t_}l>+2 KCW_G2 KW~B&7[>ߧ'?ʞnm3G2 KW{~T7oӾ<#o2G1}*? Խ5}1[?o~ |7Q6S Կu}Mho-ߗyh7[>ߧ*? Խ5}MG2G1}7?ۖ<ߑ? |7R_ _}~u_aPSu/v'o52{ |7R6L"o}O`-?%S>1j=U>sg9{PSg_CϯSa鷹HF??A\>YmIX-aicPD]K$ׁ]?׶I?lu?_w?Ө?`0r_w^_r8Mg;KߧN (< [O+s}O?[$Y?^do??RN: ??N: ;Olv?N_w?toϧ_}^ ?㠣>ߧ=I?I)>x?z[Կo=H_׾HOqP! _WǞ-:y&GjM6 Cum݉!Xs&_HGc_/? x⇃u?_ |Yto YZ=ܷF)s*Ω MV#l~_%XKy7Mѵ;[Rl(qɖa%# @?{>>>5{VNagniyDHq$@}W$W0x_(dO[B%\8@"Fo,B}7ڇHx^u-[mCkn6a]!qր9о?jۻ>:Ō]5Me{W,겨;THS(V#H :~x7|Keeܼm{w +Kq&0TG?`o|>/1xեn9W^}OT՘ʷ3°쌪6C5ن{#V^CN1_h7`& o<>Ynk^3 _hfҘ^] tH`eh`5|-WŚ&% &kas\#,R >,VQWH{]7ῂ#Okh~cSfk{"1v[Vn\w?e߳֗5>E
Ɵ|t[4]WI/ KkZ`yom,|寮YPR/'g _PgjROH\կٮ.RdwF0.lnM~|'K?,|Q}u6e%hr$q2&$. o,~8N>SLw_YӮT"h1L64O"sPn|/ngυ~!x5{? ;gGIyw> <%{Eˉ# \د }~ƚ{ I-+P^kͨǖd2BEN@,Z9+ ixG^O𯏼Cwv[\[!3`. (OګAlw< x_ xAn !JM4r$7w@ ו; yo|/OCNe]QmN_*#@ ~n=co [7%#ԗⷋ",p-骚m[q 68$A'|JCgj_~b}4b9!iJ#Cğ n>8wÿŔjI{Xi1;d_ '¯_xGJ>_iuFk>;ﴈٮ'EoT6 Lxq~ [ǟ4|1 i^m?Vf7FH ?M^yopů,MI23GBb84Cܑ%\`2(_OKw_ v?`1܀>?(?8(?$>$t=`0}_rN: >ߧ{d<{$O?>t?x?|b$<'`M_F9]#51QEWϿGtOQ/sܫg>hIO׀@;/OP?_rO_ܓ>|\:ox;}rT ѣj ѢӁ^@Q@|kCkǀ|w4ut}Kh]҉,& 23 bS_~WȺ5ڧ~>^ [\jRgZ8fXEh1bĎ@4̟V ioq~ѡhU3S1$lp(\@|W+? o59״ jZm]^sf(|6>|m7=s߄Oytx=OFe^63$eY@wI&YI@s;xcgn_j:U #S-ƪ/ms4"hʂ|exAG%>AG;j"Z1 r#qљ Gq_>6 /'[K9&Qk_X<{x?Ѿx{MRH:ckw]-h~aBWEc <~F熼MNG<3d"4FMjHIkbD\jj<]+F{Xe5>u%JyMHP7/]m~ҿ |MMO?8?u6QZ.ywAtL'fhT{_/e>(~^%Ie ooշI;Yu͐iD_h2GL/sSߌ <gh4NK+Vk3h4 n`o?ϋ(( ;?s9Yy?9RYӝ?/Y?c???qP?+$oӿ9O}_ry-_o@=(;_ӯ}SM_r,~~ԼasXG$0I H\@gePvX ɿTm+u?qQ_׶I%Won$_MYg%}6t]O?>{_M|U_%A?%VxtK?g'_׾H?ݽ8+Xk +Ym ?/HE_ܓ/; /xtJ8m?$Won_?P@:.O}{txYm ?/NA /tJb:.qTuo/!xܤg?8x?MWg%?rOIU&+3>@)&O<Y졲I+ Nǿ ?85$W_nO_?\x7L俯9?1RL~&f$*_n^Rx?MWg%} _߻+zdϯ__cI7LIM|W_%}? x'&?}|~?}q__+39Xk⯧tJ<g޾wp __W>$V?tJ9?&Qm =̃y>94-yqk F{Z8XH $I%|I&*/ /o ?/NhϿAqW; /tJ<{I7WMv_xOV4/Jk۫I<&Т4/@y{QEῳ?Kzrkܫ~[^ >k _i\w\uw?>$*k xYm ?/{t]>*u?Xk +Ym ?/H+s l!mF[Γ(d)RWc_ ^ r%V?tJ8`_MUg%wO+1/ ˏM~I+?1x/ ˏMI|UDş_ЄXk +c^?c_8l|J~ r=2D&+3RK?tJ8M//9sOPu_1/ ˏM<07L_$ş{Ѐ >%F/9sɿ_'ļ'.?7q?$W_n_O0W_gI7LX&zdC~*I|UD_WO6Ws7į&?^4W&@x_MWg%}?%V?tJ#ߧW_Vb_q"I|UD_'4K5_xo평xN\o/įǿ>{sPI&*3~?&WonMkKN\o?>%F?9qɿO /tJ9?%V'4?Ĭ'.?7}p#b&_cI7KK6W<$*onrA_cI7L^?I׿%Won_?Pqymyt~ͮ/lq{yQ$wP.YUc '{Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@docker-1.10.3/docs/userguide/storagedriver/images/btfs_container_layer.jpg000066400000000000000000002035511267010174400270220ustar00rootroot00000000000000JFIF``pExifMM*1 V`QQQGreenshotC      C  u" }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?1-5??^  ~M2Exo3߂ o/_O^ #@_/4tWy~K~^ @6_/TW/z__ o/_OTW=x/x&dȿmeϧ@=ʊg@_/Yូ@=ʊgKy^?m:H*+[_ӿN @ /y,tWivL=%Aj ! [N(p+{|DVoNx }3M}KT-0D;&]7Q ?SPj^][FOqq((FYTNn{GxZ|Y>񟋼)+Ey:2 mL٘n-1$X#mxۖqW1_o[sOO&[_qad}4RL+#,pA`"^&+KRoڟ7V t,#/^4ln$2CWRx?þ"Mz߈'I,4";o5N$RX*p螥Y ^^Z٭m eM+$k;$<h) 񝼟oS_-~( 2_:-BeHnDbW[oW>ÏaӼC<>xLI/2Hc9??$ 㩾|h' ? w7$MLii$Դ?N?ui4 h=rȼOqxW]W;D[Cu,nSTWi;$Yb-?iwq ,x7̒ܛM܌r^IPp S ?[lOC'DkGL ϟ-^C90]7uzSS 6|WWIn&kA.~ }\r|?NLG?oÏ׾6kψ'! vZbUiK g6.~6>ڧ%*xcV>no>u? ^[\666byo hra|O OjZ&⸷{I?_Oÿ)?Gxㆁk2[Z|?s!٣KUC+%iN?xg|;Ox^=em5tծXx vvX?1.-'7Ǎ_/h ~tv3_O^υ;}>?>@v>oӎ~tv?$?=I:ϮN _$:[ ?=I:Ԑ? ?woϧ?CP?|??O`OO?4v>~4w?9?Ogϯ ӷ> ş?E:qj<5cN^(+|?%sOmgW{JĮ]_ ?oӎϷ[`?d?_c??#O_{ ?qN:H??I_ϯ;}~> 9>oӎqQ?{d?L~L`}1w_׾H1>8(v㠣<\;S?>x?$/A@?8( _}^zt?\_뀩ϧ??ot?\3rAq/@NyhϯRO~Ow?*~= ^z3}=MGwwc}?N;$j?\_db )[v*p u s'Gl+_ x.4I9n]1+xG\??O`@<Z|7kơ&G&?M*/;(-~_eBeAu$g%?Ox}Vvkt"a7AGD?>=M[ gNSʞ!lY Yn#gv8$՝>x?_ޏ{o}MNj~I-4ުyv֫v|lf@U< '¯/+]{sxRUZ34!O< (#穠$'}#ú&ÚB-[͝4$ǶH` xC?^4|Fdž\xfVck&[G$hy,F=ÿ>oo,?#Vg/ڴ7_ ̚Ϋ=JN[m"3ဖGvsI;A2<6c'{plW0ʒ1r6FEefۿ??@@>m o xS%&ϋ<x{dؠӮY|QAq;˒۾P~L?^z'NKGE|F~h b8EhaKLQ,jM;Wgok̋k,wӨM(7|1}H5%D{\p ϛ D$|D.Q<ўZ挢/&$B,*}7W3im(&$emrqaKyBxWt]r͆}8LsM4q)gp_?$7kiz*璧o}?O`A{u?穠穣~}?ow??vO~^z >yhߧ|Z4h[[ګ~-ǟi/~oj(wW>$f,~xbω_]_@:.L;}~OV#;`?R_מ?[qc?$?ӷ?l;}0?}?_r~t~tv?@_VI:?N??>0^ >?[㠠t?㠠}_rN$xPsSx?v㠣>ߧ ?};}p? O,>}rA? ?ߧ'מ?_!ϧ> O>y{}rT_%OO?yh??}?}?od'Ϯvw͜Ϳ f7Oؽam\O&/?:MxE/[ۧPH73@KN[v>xJq?yh?yh;>|v?}?>t>tϧ v&^z34~q?dK?>tϧ;`穣?穣>ߧO}O`Qc=;oϦ> }_r_מ8(}ݽ8($>$~N}_r g?ݽ8(ϷoN: ;{I'S~z}~E?_x.?[{'?7}|vy t_!܎+Y&DP#¢I{ d4'h?*6$|DhÝ) 6~?גvcO_{zΜvM*FqCb]^_s$n͌dx_oY?Y^G'ʼ*4?c[Wo?q6eVDz7/-94{|b?Y:+w,Qe??὾1 l޿s_kstQ g{|bQAG7/-95tQ }o_9Gk/5qG7, l闷⼎?ܳ|CB̿&z7, NOoY?+]??\ŏ-94{bQzבGψ(?//\̿R [Rqj-i3ǒI\t-S}=M~7x~KyHB+WR_SJM=2;c?}>~SG;}WykƍKb~8ϵPEP^;+3k?WJğ Z0E;` X~uce=0O2F8&?SX7 v4͹tYZ)&Wx9QpSI2gY|E/_4_TugxMbS4gEXv<zW|чMT?fBa8'5~&CvoԑPoG8Cf ꭈKwcs.mڗmyC+|Zy5 f#vC'~o3I/׶Е~,$~##?=kh>oVwK<}g5d.I$].GL(e`fڛqΟ&4x/]Cyg${u͋h+:]mb^1Q_ p; _ms#׮z~ ɦ?h.lm5Fr7m"uoq,:#,nm\~YSa w[w?tWQG֧1!^gON>&\G[KhET6?ϵltsu;i~/]M]K1baE,][x^;?~+(Ӕ O)#|<~x ORiG? Zn4v$z͈?\½>>om)\ο%ҴXcu ͋Ho?o0d n1>^,ϭ~(%ɦZ?fݘ}zmۉ<(/?m}^/[ӭbum4]_1bYSfsEhO rxXO1wGa`Z0dd9b[|d|iu|.GKv,om~i߻(/? >>z_ kuEqjQRI 6! ʗݐI ]_s} 5VUr|j~6|-&Ύ_Z?Wl)^o2xA6gpmSbJ@ r1;U*sRv.DS%^U{Yn5{?%N\OS1{y\^z8?_SQǤ޻N&??:<b]6x/?vt^#=g-?JO`@;~yh?SG~;v?O?ו'띧oo;`t?oӶ >}~~2X?_OOӶ?> מ_q>ߧ?cd?_t>}0Ϸy@ԃw?cod?'N_v?`0 ?}1qP?N: ;'o$~:}~N|GE8|vy6}=Oa^Oӎw &7?>:y}?[qג@://MCnM}9"^U?~ϿO/Z/[ԣ];[݄D(r8+$Q޾&e|7oi5]WJcՙG3G;ׂJzoˎv3IJ;}i+Tu ( os}?FQ'Kkh '9_~/^\wwW'Kv2HW62BucM:*xRͤyuX^n6x$hأRTQ׶|&_iNe{tmB8doV3u<'x> "=sK\dx۶1Ϸ\4ܢm]^ϥMFJJҵӪ{~WK7/Ph3j׭Ͷu&!Hn^>Tg|`yc5{ᧁ;K7+Pu+jĐLgeW)UW-HxEL9odҞKR|W}J2>_'hϥhxšu4VHx*"ȇ ʲƽ o7hZtۋe=:a}w w9,ƾ^cvkuA𾎷?!ՐInu%mʝ=ri8NUe~)?G~И7w%vݝԛ^,|Z'o5gN-j$1򣕙pNp;Қ'? R|&}V4۷nɮGd­}ofVkfgؓho*B8|VյRiF}'R8eg<5ׇ-u>x|ȁ"]<oo_8>;ݕUdp;M%o'㽛o Lֿ)fhG"%;Fp2O$s~ +:|%_M,RV䍎v?|3⟉: Y:D11Y wDT8v\zjGi?ss_[(R,ҽ]u>Ύkh7k˦_t-׵W?4jv<|E͙mmO8+ohOoį:eG?(w +u?%hJ[?I6eGBwP߉_e_#K7S[+l&Bר/N<}+^j4)mm[ڮ7܌:ggW(hPm5t$B@@$n{ξӚkZ=u6-.xWQ>ʪ\,`g}+ M5淺K ӮLwN5)!nl,0up*G{X.uF~sΞY<746o#Jֈmcv䌜d5N++x ` n}xy{?Sb+y4}I5YjK-29xypk$W /~ow࿵5Z|+%~ԲN~dy;W=oK՝D}E~S|>~$j_?!~2Rm/ qjٮu"yOkmC6(kww% [~-h*|hGcH6Mm,e&̛ [cٌ1lӻ=^PyEqrh`*J8=='wzZ=\!Y؀N|?"d>$xm:> O q2Td1qq ؈Rk_9gׁk M.O񦈾"N-cBfmF3*ڂH !eܘG?X,/afE7^2RW/ߋ_iύ_|'w@Ӵ'Icf@KdڃgžfM>p[z&Ե!9 41vCހ~W7?y z֩ tٵ xԨ X#9?6gOyW X>[]gZMHBΓN,`]߷clOYhJ+g/h!h{Om5?DԵ m&[ɄKqws* OY$Uz@0/ï7~-cox嵞EWH.BA;yH,~p?֧\)7ߌ57ՖC4:g8Mam*@TxSom^|)ȢDu]$F9̹y;ſO|~#jc=d5h~è3j lHk|~d ~1$,I~5xw}W=edbӭo۰[cjKl˘Ve}7Y[tIyy͍獗R{IUh"iP((࿍gV}g}^O9IMNq\>~|=➡?|Er,> 8f2vޕ_xSWtA׾F{gSgs%]iЭ1H?B0R`"'k&];Þ,C/_wE|)o[a|-^51&BSяKxO|;q4:lhHCx5W?NVOXF<u}w^6N_"GE$}H́žm źW'"5=?QIk+䵸IuUc I VRU@`qȠgrwײiRxoQ[35f@*2v 2+eq|4h#JÕ 뎠%zf^8[}.=r-5yPoɈ ˅ /8{2ld̵lUm9pf?= ?yh?yh}[OӾM\v?gtw7 ߳G^x붮'itOv?Gz Z{潺??}ls}s?=O}?O`B_s@?_穣{c}?O` <}9c|t?lt?穣?4w??=;v?:}X?_?ϧy@_=H3q 2?'I?y?ӷ?  ;lϯHzy?N??_}_}zqP?zqQ?} ?}(umd??|o )(d?qx؝_nCc%q(|,;G~ӿ ?d?%EߧNuTj?4%+i+Tu ( FKI)tl۞j\1=\)Ƥ&eө(IN4?!Ӽ=yaۧf%#*0lHPx\cյ,>; !c!61]+&BMJu;^OMό.|2-]4x+G2HftƽF.jZi2h6,:Y%}.9zX:| {׿qWoKOx1|8 @Qiwcfq󎙪x\Sm[PkBWS̸j9dڪ_k2:pI]3ۥ;?=Oo/5-CַMepcstRxzM5ʵwۯ_=XVvعxo5sw(_FY<">ov?^$Úz~R/G Ek}~g$caK%?Frn4J)~ѻwc}>?=3?;1?H9__t-ګ~,ǧi?/Nj(wW>%bMvק9*]a_6YtH!Wo9sp&? H1I=7Fh}*yUv8۝> Okiq&p Svf@nI< Rg糹>ɦOg9mΒes-3ұlb_gƩ>$[+{ e3m)'ֺO_K3m^[:hV& Vhf:o]~VVl#)$Y4Dy·|(ko_ %zp09a&.~7;&ۺXF`R&0yѤͽ"+3Ce(xRukzShzsy` k4 1 1+ִ= WG&tZBNЖ& P c˷1V9=GxsE7,N֥6iU$8g8"K"l$a1'2Iþ|W"|'oq |ڊ6qK9|🄵=WJއZvŭpmKD*"1p*|\=SIӵ G7O{[鳼2@BeH\á5Eyb !;:[k O*:I 0gYd ol&eMuk]xXKU@͹!q9vi\P|>~ |'}q/֒+kK 6 OIOe] Km-461V"HG  EQEcx+OV}m̐][%u $EVVG5^( {k_~2K^M2*H:Ϊ@Qz~o^PJ߲'¿|!_ xId{DNe֊#aP}Mz% #z>ZL"O1#U_`q>0>Aegwេu r-vI4qI䑙lMze4|;/|/|##}q0-wx" 9euIMmx+w>tVjwq1P d3oQ@_s~>saykl]* ǒ><u{6@<Of?w|;?+Ű7S|:?F}?N?x_Ͽ7 d}~w o{'o}?N;?מ/~nzIڸ٧M/Xx/)_;@;뒦??O`@8??=MяO`Ogp?>N_cOӶ;v&yh?SG}?O`~?%L:ϯ??:o.?'?S@3Ͽ^z~t?@ z!<N???A[㠠>nޜt>l>lt>}q}3/oN: ??N: O}_r@x?ܐ>(:?\m;{<}>E?xnɜ}_r.𵏎|)h^vgsq9\?KP[~_'??ɧt&VgƏ7u4mSZ}spb E&0X c@ ~'?@5 Ԛv|&#pN},/S?\"O}N:}L}ϿRw_|C/-S E׿3?3 _ο_~ ?}3ϮA;ccϷ?#W+/? L :_S8z"<ϓ?}3ϮJ>ߧ'Dj}!g7?_(? ]{?c}?_gϧϕ??Z|Aog?:?@? _cWI'"5Ar3 T?|AA_OH4-P):}?ߟ8Mğѿ穣"5Ar3{iOŶ3Z66e}B0Caaw`8TUBv$ܟ= #gβZ-[/,%tݷov_w?N~r?4[[}WzxkƝKcy5TQEWJį Z7W{Jį :/_מeO6g\OɁ6v!<9J OOӎ=MFVwՏ;]G² [o(tq>[uD=PtF;T |_#4Vp`X7`iw?Ө?`6 7' 0(n͟A%?VX2ٵU mN,nwg=i/C?#dQ:lcBa|v ?#n73Miw? ?qRcu?G7$S[ae-U$ j6IݜAhCQ+4WQwNYM9ܮuRH֡<'tu)'S9oK _joň(Vautv^5Fҩ \.CSc BG]䂗 Y0 m `A5?>{Ju?G=m]ƍ]bm-T}*M~F.769[ ?'m֕I?-iE1R0Il/ӎҟ~,=OhC7+O+-[LΩ7ͷRMZV,)93"vHS }7\wⷀ?8v?iO?yv ¹WDGxmmP cŃo:UkM.|/+jO"=iV}P%vVt?$qQ)'0u?FTߵ7-kO~$H<2}S(@ n~Y?(`##4nkŖ>|WK公JU@I"hFvĝ1]?H???X{:#6oڇf¿[:_/ GGyۋ2[Gd$q~,4]5]*q@q_mv99<}>iO?ΧWGlccPo;wnlZ/0#y @l I-u#8Vk8bdEFҊW K t?ǴgSeR%ѴW#V)m6󔝼[U#k^=΁_7ɧihUX~цTۖR20 ϮJ'gScT_Tm\KU֣y:Qy >~T6k) McL~|R\Md('^z??Na?x-揢ⅎi5+)EB1:N2擿@O?O`Lqϧ穬M_穨7~Iow^<b]q?G^x붠5?D[ۧP돀 %_h':{w?O?OO>lמ{~>,v?~@N~ [A_穠~tϷ?c?8>?Ө?`0?^zw? ?8(ϷAA<}2I'?7?> L3?מqQ?ߧ>lv?$>)Nݿ#;<?A@?A^7}|vy6}}_r}[Q?Ol4g穣>ş?=_?t-ڨ(ω=3k?⽆wW>%bMvנ@:/2ILy?ӷ?L?_r_܃t?㠠_rO_׶I:ϯ?N߇?ӆ10'~t8(==_$=?}_Ϯ6>x?$zc㠠?㠣v?$pS?}?g^H_}^ ?8 ?ߧ?^zI?#?oϧ;?;}rT?8{4OӾ?|v?㝠yP }?N#O穠^z_}?O`??O` x?gtw7;}~wco^<b]q?G^x붠5??`[OonCkg\Wa_sϯ*v?@ qϧ4g?yh=1>?Qc?>t v'מ=M=:q?%C_}0? ?yAԃ/A@?(?'O϶I;?w?Ө?oo ?܀8(AGo}_r@O?_gOg}^ ϯA?ߧ~tv?$?}: t>򏯎O~;<__r:O+Co?@ϧA^}$ɧ,;F>޽ ?yIp?\;{Q[rT?%N8y?'}^z?4};>x*w??=;qמ=M#u?}>Ls~OӶ1>lyh?=;{v?oo,w?WD:GUy:?E:qj(wW~$fM=w\a_6Yр:/?R0~(|F|=M}7E\̐@®FI'wo픲Į 0R<7>qQ|lأo/&?P~K~16h?ӷ?llQ%>'66Oߌ_M9'4 Ϸ+bkgIo/&? }'|ko?&8m_laD_v?ϯ$x66Oߌ_M9'4cq$_#㠣'y[ ?>67K~1gG}6{/Ϯ>{ƿbkA_ll-_r=u? ?=lo_la%9y$?_rN\_K~14i ?bkrG?_[ >Cbkb=>ߧ'_}^zlnbhϩ%?ᱸ[ ?e?;?ogK~14rcgio?&8S?OIo/&g#lnbhs{!??=MOIo/&<{) lX tُwom?#H]?Z\'M` = ׺v?@ qϧ{s6Gԟ=MGy{{itOv?nzIڀ?t׈?A`KN_t^#=g-3~W{{yh}?O`O?dz}2X_?};`O_מOӎ`ettϧϯR?[:ӎId?> v?`1AG?AGoԐ׶I;}~y>No}^ ~tc㠣??!<\??>x[|?$xSQ?q={'_oWEy}|vy6}}_r@:O3MCo>s^?>$ɧ, 7O<xYAf2~YPSXT%&sPZ>*~^&GM;gc%ċ$ }럛|'G_΍GSSTP)Lr.$9 ;?}w8&~ru?qG_I8F?>7;CR$߃)W{GCA?F@)7gK ;?3UF?>6?ࡿq# !|)7_GCA?Q3j`ߊCPahwܸJ")}2[.4sMoqm r9WF"O?d7g R*ڤk|D+m̛:Qj5iKvto'*Uc8[NL~ _穯>~-O ӣlׯ&WyoƝ#Kb~8ڨ(߉_e$bω=3k?׊o3ψ ڱ}?Nz/.#IVڐ 3I5_X&#:䃚*_!Q+moQ+-{4> 74Q 74Wdrϸ?~k|$_ k|$_ E4\??_ ?xWE ?xWE~:GM>?,OgW6O(G6O(_QO> 74Q 74Wdrϸ?~k|$_ k|$_ E4\??_ ?xWE ?xWE~:GM>?,OgW6O(G6O(_QO> 74Q 74Wdrϸ?~k|$_ k|$_ E4\??_ ?xWE ?xWE~:GM>?,OgW6O(G6O(_QO> 7?8Wk7QxRhdYaI#u9WS ~5g_"Z_*; 8ϠsMT\npY?ֿ^z_CL?ק$z}?O`<;?v?dw7 ߳G^x붮'iuOv?G{ Z{w5Mx$I=?厃ϯ;qϧמ=M}?O`X`?w??G>_}^zw?_oӎ˷Qlu?d?ӷ? ;?8(?qQ{dCN?>?} _܃ӷzqPO?Hzu?h?^=oϧ _׾H3?AG_oӎ_ܐ??>7|vy ?|N\(}|vy ?dd&"_?߲Lm on~%rnD#>,{ ީ?9q?&~h??ZJWI_ҧQEQEm|8Mį/[j+8BqTq\ՕJrQCnj^0o>:=r#V`.U-ڪhaq5֓smoˏ2[[Xcp c*k&F%tק$|?Ս\HlY NT*庋_2=穣>n٭-y ^ȈN#>M?>-۟ZO ӣd彪W4[?/PEP^;+1k^9*3i?A7> gՏ&+}S6%\GDe$p<~Y>ɧx;$jҝ}|K GR}O>;mF>5b^@|ƴ%eܺ [Xe+@)=2zcKjYɧ6qiݞ9 Z- Fs+s(ӳy'>"h~MwojRiVHIΗ# xgsPy"i|cv}w1e+{[KJ4ncޠr r*VI]Ey?W}9/Q6^Enf]Awo 6q-s楯Ͱet1K!ss0XyM~mgEF!EPEPEPEu\|i^ C(YBȞS9!u4MOMWWsAaos\/<DE˯$ 2*.h=Rߚ+ψIz懨iMqI&S=@Ls18t*kYz^Տ7ᘢ}CSI<ϲ(y$v 0V$Qstsv_-߷SS_㕱׼u/A 4o&m1dmw>3?ys6_dϧys٣M_/Xq\W[ۧPH? i%?cϯ??N>&yh'}^z;{~>,v?d~>t _;`>=H?Oӎ?89L?l3{@y?ӷ?l:}_r_מ?8(?AGo}^$?$ϯ;}0?>0O?@O?AN: >nޜtǷ䄌w?? gϧ? ? ?{~tc㠣$?yI뀧ϮG?xL~|v;<n>ߧ 'P_%o.m.o+k6AW@0~_L|8<] ?ߴ>u5VڧMfvV h$AFH959}ܜۗ'kÍy?GG.;ڧ_'?;}i)_%JΡEP^!k|yx#ݎ?0p TvՔe9Aq4Uj2kV6tƪ-?f?öėR\XOţF;m g5kt$ 7­+jj2Xqq]o\.qjS^9{9O$SUk͖QI=wKd\[Z{~W=沧eN+Mݥ%$ۥݏ?f|?rZ冭y6lg Xc#NF*~ *ŏtjzGrӯ#KX4Ч5m[ׯ=>klt#ZLvB 8Yo>uQܻS-GuwxjC]H/xR^u}WIV]'DT+,eQbƭëAG3#<%zd <`=+Z*ir;4մozoy٫̥ʮw[;iMLJu;^#Nݿ(`L呠DByrH< g u20aR> # 9X'i?ד^Yib?Ⱦᔴ>$Y}(RheP,Wxf_ # 9^c:\ylR$e%ڼKe$t|=%wWmg v|@~ɖgO _kڍޫi,u G@8.8~& 3j:pGM;)#Rח4_qTʕYGR}nO>g|g7m'~#5HbHP剮b >`^V7#iD9鶾 #4;/ Ǣ[R$p9*I H|@b#9v>oOiTb^_~=xk|4O𾃥VGWiں<$2m}/Ӽ;W6oxA:,|' ALF(%$3յo fK\f!6(=sڼ>Qi8iV䌢|{ ((((zi al>m̂8ͬ2ǁ }H6eφZ}<:ԣ5lY;F?*ǽx=\ZGOI];o{G>*𧊭|; #}D?$_ԴPzO/^Xt=cXu}6]Vm-nͳJfolq"n=Q^__k%o{v^ :M&x{O6Af -@u~ w5Mg{iǶ.>&o|?z"Yigy䶱is"FC#9>7M nLs [-"0X(mYs:W̴R,Žmgtm/7>53ⵏχzvGvڍwmY"AoyBHi$Z߄?<)ښELc.<ًy!OYG_F5g_"Z_*d_~%ؿyy}gu}pY?}k|d?ZϿS^^z<q~Gy͜Ϳ?l4ɺx'jf7Oؽam@ Ckg -?JNۧPH7? i?^z;{c[%L??3?G?v'?h}>ߧ?2Xc1?,__מ>_oӎOQ_׶I?>IN_v?`0?} ?}?[㠠~tI'~@~?Og?}>|AqQz{ ?_r@> w?8?}?ϯ$?㠠㠯sdm!מ[Q:6cx~oo?ghzy2[TzecGݹ &ݔ)=NxLilW280FZQo_hæt\?G:gE&OONSY*}Lii~7{r=YOKaʿ$|_E}7{p~LOZæt\@7?'4NiGųL42`35Qx"?(dB\I2+͵#Å8n_9W5ՒZ^y~ :q>~,ǧh/Ӏ=WD:?[ڨ(=3k/׊\a_5^OaX&> 3j^%l~^x~i\њE `X#vqUNVlʼ\+vp})kaV}dԿ}?% Wh~_?Q_{Õ(&O=G9_O}dhc>E9_O}dÕ(&O=GA?Q_{Õ(&O=G9_O}dhc>E9_O}dÕ(&O=GA?Q_{Õ(&O=G9_O}dhc>E9_O}dÕ(&O=GA?Q_{Õ(&O=G9_O}dhc>E9_O}dÕ(&O=GA?Q_{Õ(&O=G9_O}dhc>E9_O}dÕ(&O=GA?Q_{Õ(&O=G9_O}dhc>E9_O}dÕ(&O=GA?O5g_"Z_*ȍUJ<'{gUr! 872ӪpWJF׵Ofy!&yVOӒ=k>K|?gׁ^Q2Z;c#vOc?szym^<b]q?G^x붠5??`KOonCkg %_`÷?%ϯ??N _穣4~?{?K?_t>oϦ?^zg};z{g?~tϯ$'I;}~NϯAמӎϯ$lsϯ3oϧ?_ ?A@?ӎ>'Ϯ(:}>?>x=A}ݽ8(?ӎyI뀠C7 1\C7}|vumJOYbv0?璾?N:߲W A?_מ~O^;}>?JN:v?'穣穣~[O__/=;{}_穣[%X?__~;`?=MSG?{gd?%+k=<7cNUxş?E:qj(wW~$fM=wV%bM`<`c?nȿ׬_%y> 3j_=k/(Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@SzTE"^<<`d?kφ/%񖒿5HA̒bBӒTlb ʤ3=`'?/?ԇM={2!n5=bo.$q$\[}XWn$?G7m5}X[g L>VS펀9#R>~l|GoV>0i:֠ZM$%£ Z2>s_4ɺx'jf7Oؽam@ Ckg -O}3?+;`÷? מOL??K@?NL:O_܀?AG?@I?ӿ?> v??A_܃㠠/AGo}^$<N_gtϦ7p??y?܃~€?;zqPOׅ;}q[>x'Oӎߧ$?}???Br=}=}??䯍E?_xModϧE??xM?_ܐKP[~_5|O¼Нc{>x*w?O4g穠Ӿ;woϧ;}>z}~>t~;`מ_ю=1/?w?l;?w?@?yh?}>O}O`?c?@0}t?`1v'?@+o=<5cN^_'4[[@Q@xJͯ,${x~.|Jͯ,ߧ^2~?g?c~8="^<`y~OX@(((((((((((((*]?_ʧ/u@4俰Y?? hOhi@| ]NJSA^^K($!}i(0*XD2]k>?>3jzrGɟCm߆) Z_x:phO.qm|`{Rd@Z7B=3 C;xOA^O j^Ul;노6e}{{͔Ϳ[ɿ` i:ffc-Pb-7  efIv7?@/~?nzIڸ٧M/Xlv?$?_t?`0?>}^z?qP{ ?^$=I@??? '}_r?8(÷ ?}1xPr?[?{䀞{Ϸ[㠣'o}_rN\;}p[>x?C$?[?@L>7}|vy6!ϧxOQOs^\+M?o7vwpY<$0@I$sE$`9cI{ 2wu'=G![{F@k"Vk6 s#ğ'?Wa3MTc~&|#U?zVꖟGo}?Nߚ?_xgҟۯ _X!g{#.?[}yP?=Cğ'?Rub_Xϩ!g{ ^ǟ{ ?>wK2 ?0' ?>vܿ-_l5 <3G7g' ??]r3oϦK~huaOi1/ ,?!g{ ^t>5 g ;KӻDхe_yGz-kZMi]Zν%><󸏟,^[%J֩Usaޫth_}N:g?dSzxkƝKc?_\ZxkƝ#Kb?(+|?%sW-gz ױWxa_6Yo3ψ=c?ȿ׬_$ 0$w [Nˑ-+`p@*h_giƚdpMZ=vT"!7J7ҟ$?oðw6-JGKk32 UK,@<.Q;~"׵&wcT`yO. $!j|!.hr\u-] ݭrs$ 󟻽ys6_Հܟދh>f!I |Py\/$n/?o_f7Oؽam\O&??:Mx$+-Mx$\aN`>מ_מOoϦK>,?`?_܃>=H?~t~tgLϯ$?>tc?ԃ~tqQ_@_rA덠n<c㠠~tv?$?yI뀠 v?}??O}zq~tgמ=I;}p}po -g? ?|C뒾7?񯯎O޽oӿdB̒aΙpk~jX~'$~j N׌?O?"_޼i44rݪL۳E+S((aC_I8IwrHUzU$p7(Y51eGtV<x_K-ZuYTDi*ڳȣ__Õۚ_QZ>hM^jZxLyX:w4JJ)Z$厭PF DP?/sڀq_M.d?X[0F +_x'Ru⛭Va\#:2ᕕ8 Mo4mZ}sY82aRѨHLw?t?L|d~,g4,{gګ~-ǧi?ד^@Q@x|Jů,^;sWmg ~ 3ׯ8>ɧx;$joyV?oӁ]'4اuZRש+3ԟp})i);E}Q^#'qj,#򯭧@%D!gzKXT:n^EsTfV=C,z-/7e 6sӵa(b3F (4PEPEPEMgjR2[pѣHeʢ:9''Zcuy4ֶR6~ݸ(3(ս@x}^ |cv㠠~t' w?)8?}??|t?H3}N: ;qQמ&??> u{}?%N뒧_}N:~Hn~|u~x%_^}=MxW'o' `ۏ@49?f߇vPɿf #@0^*Y¢XjJTjO۫ƞ5]~˿m5+m.ǡNVeiE̓SZu9;^ifRK]K5ф9! M+= qoMﯟ>tBo4K]SVv:׉n3P0+*wy,T4? [q/ºnj&Z}YèZ<<xgTa߽ӵlLI0xkWĶ^b{$'+LLiԻiKk>eEyZӅ=VZll?~w%N&k_][k/\_G@谓עhixw 7Z֥PK"fC6vx)T:?u.Q4Z^ nb@U)_ v;K-VkV۷z/9ErC޲[.+O[7g?<]b t_66x;3##/=e5oI:3Zzkym$U-qv0$|񎭨Sۑew \41-'5{V1$ׯ4xM}#t$;q*)58?hv o{43{LҌ%ڵ[dֺەi/ڄ4u {w59_64'Ho&}1.BCf'd~"_4VkX +KHܺ[a®I$MndmZҜc52Ӷvl}7TL֬+׾WIϋ5?.+;HQ mK.FbW 0q]&:09=#гϯRߧ~s[09]P?E#8y_cȞ_ YܵZ"}&mGěV݅ |M ۃOӵ+]_3G)Ϙ?'%Nj^+{?p@w?SFw'_XEM;)#Q_ HAAi㇕,9=]GR}S} *}?>o|N!z&n8KRjN.u Zk -5_ů:ڗluHVQU#3"s+#QֹjKyykw-*C3AO[~.K2N&敭=?y$Wl.æen@lQOScEume$dr;]k^𾗤iݽnmtFhv1fw 9f sԢܹ~S (6zb}g/{}>-ݘ;#f*ojb-[_ԡ6gE*'~$v~4ufI<7:ȋ$RDF Nݞs[ hx~ ;Y9mt[ĸh"h:g^\W#8)Gwmu~tG~㯌5OZݜva `Jw(Rߋ?m} ԴN MuXv3I+!u6+񦋭k&:EX4Clmc2'MQ2P3kXɦvFYmhvľ_e OAt o xcŌ^Ooe37Q1tj=cgç+o5α4i,f7۸Q0Fx"|6|%.Ilc"QuA3U,:tZ_/ƞe$&&k=[cտnqo?+ ֤&yUQQs:+$Ī?ʪ{x=v|}}׌5~}jk__L(2H%VR@`@7;+(#mՙe@-7-ZҜelUZuf+[owu﹉?tퟅi/_M~WHb?J ~|_~gd9#ֿ?O`<=`oӯk$goϦI=&ݎ)/` ߳O^x붮'htOv?Gz Z~{t^#>o-?JKvG?%cϯN0?  ?yAN: ??N: ;OlLw??`1_}_rqQoӎ{d?ӿ?8?}<Ͽ$c?ߧ~tg}_rBx?$> w?)O@_^H?~tu?۷};ԓ 瀧Q[O%O뒧_}N:v'?h?; >%֝? >eki:ww1 D)$nFF*AGC^?x_%@*-4{=7OO;[[xSdvFQGEUP1|@wYN*??N: XږE?VZME=O~_5#z=s.@__X|^NZ~hex5:ד(W3G~̞>ɭcoV RK;KE  Rs $i~h5l,|ocA_qiBZRR5ݝ־?@\N5"[+]=>?=lLw?wÕHؿ}WyoƍKb~(+|<Iͯ,GW1k?0 x@ c?n>ɧx;$jo{U?:ό-߉?f]_xf^iՕTRHƽLq)RKK^'M-ߌ*o&dONek?;o&dONe=;>_%3DDD_*o&kI}+?>*; hDD_*úu_s*; h;o&dONe=;>_%3DDD_*o&kI}+?>*; hDD_*úu_s*; h;o&dONe=;>_%3DDD_*o&kI}+?>*; hDD_*úu_s*; h9Ck¿DU d/(w,@T_g +[+m8Fꎶ0?B'6e5!:kEsa3߇ ?6OZ}%8~3j]G?SC`4w7_=HǜͿ㠠 ߳O^x붮'htOv?Gy ӧr_?t׈A`KNv?ՀO_מ?N:u?ϯ$>$>t;}0c ?yAqQoӎH&y?OA}>O>w? 㠠qϷoN: ?}3> 덠ϯ ?}>|g}y ϧAG_}N: O}_r@^N\N=?rAq'O}^z1ԓ>w?oӾ?\;S?=:OӾ;woϧw}~zϯ;{3c6>~߿=M4מߧpߟ=kc7϶sy?!?@R㠣?NLw?t[L?_r9__t-j~GؿߧN(+|=%wW-g#ثǼ=ω? ,ߧ 7?> 3j:p+4~M[~V[hRW|G?֗Yjmf+k{0<Rxc?N|#fgO*Y27C)^z{ʀw|3C'eG, CW i'^rN@) gN#?^w|3C'eG, CW g?N|#T3J?Gz{c_c?+=} , CQ 7?2xPU&?ӿ$=u 7?2xPT ?Up ӯ?oc7=w|3C'eG, CW gׄ^Opc/u{;Y2od ⫅?l9_'{c?wy, ?UU%m AW~fki@R\o1a4?/O*6xGA0GL?ֻOþ!ZzUX"Ap @:$\?8(?qTw7sol'oϦI=&^<b]q?O^x붠5oE/#P -Oaӎ$G?'C~z}~?> L;?܃㠠~tv?@ ϯ$O;}~錏Ϧ?y? '܀N: ??N: O}_rN>t?_g>5yY}z{(*99q]73\;xQϩ ]{䃨?㠪qƿ<ƿO _=I;ZqEƿq_j,<Zqk45Ǐ}ht>oӎ=I?f1SI ?_kzPO^Oϧޫ v bT5?j-w?{W?}5ǟ}4h4>W?(6G}Z~?2Q >_k(?=?^z >_k?2A@_}?O`Uk?;x12P?j5Cc v?`1_܀ ?8(?8(?{dWykƝKc?__t-ڨ(ω_ Nkث|=%sWmg.w?)O@?(ϷAGo}_r@=I??m':ݻ-l.&]cb ~^ O^ѵ x0\KP.1'N(G?o$UkoT5Cc8-c'O&3\;x1i?2A@A>_jfw Tk?|;{W?}5ǟ}4h^z;w2AG3\;x1}yZqGƿf#c_kOfv _P?L >_kfv Tkj5Cc_kz ?1?K.?<~Ofv TqƨoϦۯ K.kzjn,cܪ:G[oPy?O2IOl_w?oO-`4ǜͿf7Oؽam\O&??:Mx1%_>?t׈\|^? i%?t?_L???y@מϷ[㠣@^$~~> g}_r_܃}?N: ?[㠣>'o}^${???ov?}?+.xƝSJ?uj3ǫ-/O H|3Wݵ PN"$RFTn1|~-ƝSJ|dj-GxOAMnubdNex䁥@[g*y/(ƿ8ΉjC1%KϝV9F|<ٓ_ޫ6G.< q.Z,^iD GICpa_óqx~ mx_mRk]f}#.DGچFQ[ˇIo?>7]I,onvIlo Lh$zuh m&ƚm~&[s :V6tr-pWl_~?񅖫;]'#Y]\PC#mt `8 D'|e'ƏGo_x;-oNռixKĺ-6,ܖ&Vk.v\?gs/?&_M X\J#6ف C?u*69xlhexcW-Ӎyi$ʅr}V);eō\x>/`5.$0g݆_8w2/~ 㩼Yq|>eڤtv?ǘڸOIOվ9|sq_xU]IĚH#Z=T߇^׏E 'h7gHg0H$ @D2-G w%֗-SPi/d{ /#װZ-51&B&Wk:xῊ,O>5v Lw#<4LS?`?'|S,:~,򈥒)'urd7 ' 7-[v~/_']hwVvHu=`;ghb>Ǵ_?৿ UH|e%嵷Ni닍ܤrGtNhEi  x|uo׆|1q]z狯m2O@{^#r,B@ϖ>PY5K[Q_⷇~-Imhrkm՜A$7/b7"B0zoG>|K5|7|[i|5owo F#˖aCj~x xO@zfp ΓM#Gi*mԮ \ 3^4t; [O dD1hq$F|Bۿz0L|?:I/d fS_6or&D1l䁷h|xe]反M+m:=gVG$FTnv;*7K 5/QG}ZgSrþFD7#ed|•GSτO^t}?UFu4\~MiL<<|ܟ cZsF=cpjX;Cr g3εiy⫽#z`I㾹%($!UWǨ~o<]$Z΁}0n$sʨ6!sm\>-UyLgѾC`pr^q+|P/?:?\𭿇>"i(դӬM}ojJKj| ~4'_x<ᏂZcyi25ֶn1w4k>A<c^ mhOY4imoo$!f K6 o {>5oYX;6~eշƑ*J 9#?m7?|z޽z'5)}FR7xbbD02K Lmr66q7u.MyZs *%s!ځe2[h )j_#T'o ^iSY+f7y'>KNħC=cռWgxѸVmkOD J?q # u&75k_3ܺlGoes <;%t%Oiv?ϯ??;~;`ryA~ <=?=??l%}~0?> t? ?}:qQӎHzrI߿?ӿ??9_#_4?/FjH^qTQEW|a_6Y*]a_6Yw?Oӎߧ=I?_rN\;}p9q?o|&WZ蔮OgwYq_7 *DtQE㿋/Po 2xvV5Hm[*vDzBHB־h٧ t ggu Xx,ɨEx.#6>yaJ\rৗ xCc7῏uwլW|v']#˲#dsZ|?xޣi:M.7ץԙSM8%lepv6#\h1񕥥}:/jwhZ l2&BA [P?^mM{^>tO AjҤgU.nbS,F!37oLc> ^o1/\] Ļb7(f'#xZ>/Yj^.|#MGCϢ,QD+<qk,w.o'ů 'o~φC/ G{jwVZ˺Iڭp2F{dG`wK :$i릛2o*kKw{iqj @wGʏWl'?e-;NXZ]9oX(B s7Nk>~ FPlZn]=Կiψ~"վ7:>k euڽۦs|弗>^GPd=[o'?⍽ׇHuINl`;K!ݷuO(zO)FIRt{XcΡ-\92HeBJb?Ɵ?i/>+[oox7RzeU/g5;6g"B0+7>4ٟ 3c[ Ҽaga$i?V=[+bHM0aS$3:L4U}*M?KKacy48ǭX(pҿToӾ3/dzߟJS^ٮtoϦ}?_r#sӎ?~tyM\$^?<b]q?O^x붠5?ӯq_nnx<#Z]x/21mA08>=; ?A\-Jj_c|qX?{d}~*>/XJj_cO[$I_c WoϦ_܎T_/?ԽϾA z?08㠣<<- =K̓?<@:A(>/X@8OR矣)c MKuY_׾H?8+ %5/oaqӀ(c =KI"w,%kZ7z#[!p 2t<8o[$I_g$c MKh.gIQ]^4_[E?L?O|q??%b? %5/cs~4xQ"h`1ogr3 z8SR8q _'rH.?ŠgppC?^+|qMK) zz<6?uxOgF(sG.gIQcޱ|qz8SR |iD (}響 ƟOc8/?Կ^I c MK Ƅ?†/7~W1$F(}3<Ӛ@/:Ч?s _){@6?tx?O |i'QCLFqoXI^? %5/csגhcO?E388ƟPLqoX@8SRc@8OR?.cIP5-G~&xoQԼ7oQoqҢF $ޱ|q|qL?=@: + ~8SR?=y&[9/?ԿW?nSz?eA/?Կ?t? ʏ?z8?o@8SR?=y&:rs _)|qXIyQz?<O[8/?ԿU0?r|qL}[$Jj^zxoƍKb~8ګ rqx;wvHU}@Q@x'6l~x]O/?eRmmo/!1FH(NqLs o)q _'9?W|qL}[?08) jv%TeA82:{{ 🍾!xcšnδ|͈8dH/?ԿJ Jj_cA@5i|B~ŽB/b $/1y/-Jj^~!Ч).=-/_)? KB9c@_){b[ I_g$ oZ_"/8?hB~Ž+' ㎿*j_cAO[8/?Կ K?S~Q K ~’q~/@8SR?珪|qL?=@5/Dȧ).? #'-?XOR\ ~z1M ㎿*j_c@k'(? G-?XO2\sY#? ㎟)_g?oX@8SR?P oZ_S/%Ƿ9{ޱ/?ԽǶ~/Jj^$-/`ȧ).? -YOc/oX@8SRO-I_g )? KB^ G ~’k' %5/3x|qL?=@5i|BOR\{ӗ K)? KB88c@_){󞼓G-Jj_c oZ_"/?i|C~’5 zď z?ec <@$vokc>HⶱcXԒg,[^1?w?nW|q?O[?1ɠ>O}N: oX@8SROޱ|qX=?'I=&qg %5/3xqrdQ,\=ЀwO^x붮?|.tO Bx&BBVS`AWa@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@docker-1.10.3/docs/userguide/storagedriver/images/btfs_layers.png000066400000000000000000002075571267010174400251610ustar00rootroot00000000000000PNG  IHDRe;aj6IDATxպoK: ф@,@{/ykɊIᇔYD3e$?eYfeYd^/geYfe^/geYfe^/geYfe^/W2,2^ڰa7nڴiȻw"1@޼yѐǏ}ŋ C8aقHL j LR|WLg Q QB;!zOAh޽K綘?~Ptwܖ;&f1 &hHZ& }Pfa$\ amȊ 2=dVӗrڵ߾} /#{+/ e*1Jd( a!N!r!4],z5\(DBBN!"i'Ĩq샰]"B-HB qA[/=U|M9ݝn- qC <~1#d|`㫃Kk֬X/-2#U]޲jV-ndu[ nWyʐiB#tB,0 q5BXuB(4!0”;!hf 'Bt˝)D.bC̜BbZ fi!D ®]/oCMެ;:!@Z  ? 1PSRb ž sxK kБР2DB!SR̄ D[T Bb- aoB$FDr |zuV!)&fJ˝fHsg 1B\mAF@Dۅ r"hCZ>Ctk I!FBb(~6%#*L!qB/1O5!V 1[{f :>B !.e F0 ҂P[  a턀 lm!ڎV}Awr i҂/!q͈A,1[tBvv`{,}in> {X?8>q*SQe&KiAH'"ZXb@D~mAthXC1J CbO)m':8!RR<@ьȴ$VLMS;MԝqVϫ$>9!l3*1> =YGה>5ӯ>:+?P"qB8Bn":];!.CO! ӂP@Aĥߓ:uJwZs-F B aDt9$a4<&ρb i(,6d*/}[f|ø"X[C4B;mB6$yיyv!$~m=¶&NP;F]BW[[NHN>^"QR0iB&z ^!ڡ J# NOGAjIϝf@̑4BHAaOR N !d"D5A4` :@$Н77o7x<ФCz+pejItB\E;wd;@W 7&B(Gϟ?ㆦppe֌'N҂X!h)y')|\CPCDo#N܋H&5 G;U'Q\ xTC&V1\"1 a%@a^(c1!FzqM%{o$1 0pO#-ޒfm){˿%,[ `DAO/rXs˖-oܸQ5kXPQO?t%UQiӦիW]Ցԫhs1=z + SŸo>ÒXM9묳C$q0`+K,*E$~$=%kyw- I-IVfR0vgi$ݵ9 ijQjc? bf}4%)>qQGZw QdZ ) dAhQtw6hI@4HlHLyz }ZASO=IˤW^킄BdV^{)Qo%I[{~πH_~ǁr-|Aгcǎ/2YO=Qx ۮRmO]OD5,㴱@@)[p 9ؓDl A޿$. QN;{DI~~.c;V D N@ fzdΝ,<,IQ'Cǀ:0A4w;H3<RՖVDȨcrw"HzK]tOj șț[z%*:fOvOJWD'K#*H/}VJ˻gXO6R6$'aepQ_+ETwwzG)Qcʕ+ɬ$*N*y{a<{:y\,-)?1(] [lvK.bd k$"$ֈ s1 JIu5eIB(%{z2rD]*r7ȟQ).jE&!aTG0百oݔN73ݓ?R: jArfq!HXHia 3Aʧ :I#HRPd66ȩ| 2~a$*LTyޡ+l۶?ʥ+|ه@xw_3X|ƻRы/xu}s /pw21b b"7pÍ7ğ  Qv1(9<`ko8}~vFKŋKwZpw]UD^x{!O>a^{ ARdҥsRt+G`I9쳽lE;YRbҫ4oV-F9g}rLP rjl~ CCx駳}DMv~QVbjޮn]ZíƠ9nVEhY z@AL;\^R>bPaw )R25H(Jم@d3[$J$IIQN#fAhZhI@t<ݯ - yK0H<%FDDJ rNn / l./M<{sa7gY#eD3zIh?#9쳏/p,1χO:$T''%{'.4X#{,j } ŋ[$g}"裏{ۤ#+@Cۏ=XtN'|q_>HdRK)1=^g>7t{m!#8 ~wt=BZ2 T?A-Ĝf"Q gy&y饗@F6$` 8RS)K)$}brF#$$BuY"rC? 4%!+i{SK~ཀྵA{S N@0Q*0$)H[7'ɰ: "v56|u 5{MmDbOϏ †cVny4[ϓ`4mYaƺ͛7u駣%O/ӍR t,Z4Aycƅ &h>G5:, RpHqٲef?J1(̙~22'|b"'aZ>ɿh#'5N5M grjg}D䆯C?1gԙ[@s$=hT`= l'=1D(5H;92ִJw}_|q&4߅[x7ɠ,D݀pND'ʅ=.  \@irBJew+J{gmE{ 6~ze9Sp8bA{8GV\.ل)N˹*?-T/T@N}0<(+iX' ƒ0F^E,%DAD !pX(E,ޅ W2C\q#8hI"jNT" Ie?:HB\%Q$r apT-!JX̘x3xȜfy]0[A)GJd?{spw>5r33rX3_roƀM}dښhS|nCl4~.~SKh!^# .39&!N2QȞ^}E0L0ĖC,5Grdib$@t6Mf7$\H2>:y&q)Nd A N0OUAtDs8q"[@>[b|a5_ ېȺ1UXoؕ| )eH T>|,dH~{-YDUnTfy^& b[ih^hgHV yK9v& İ 'd\EWBd G_X,'HZ2D"v!H9Q [= P63Sl|q4a: Wj25`)p'8ff`{ Xqvb[yX ‘|Ar=iD"`{S}ݗyLR+.j袋LؙAeFN5B݅+c~ck"AZuw 3/uFu2)>F+ RTq=l ȉV.Z9hfY KDec_ϲȪi4 ;h[UaoZrN͞x붼Zڪ&+ ȃ'ƣp4T',b,4F}Z01Obzٸˀ(M ?M:vR?Mn.L9ܒ5k(JJmMLz.Y# b旣C^ZMK.FHʓ73|g]wawG֭r_lQ "]-1@)< jY- L_&[O-+lc.YzSp HFnA    zlyb>Z"n0$ސٖ[9W+=IS@ D2h`Q6 <ϖ=,CJr-a$e9iܒ-O#T}՘>C(Dr`bt ɩxqp20e8;Օ:&Vb:kE9]Q)A:gL *IHg)1l|끻>8(Cwh ࣂ' J "3=OS$.$\N9iU끔r ?gl$O A*A C"8}HOh:O‰1_vuw 3ZjܓPc9Rj<~)$``z̨<\dn5+"LO[l59? j\Bc2@ 06=cy@B[Y@4JhXKE{%M!ٞK@%vX!bOT/|^_f8|9) *3& Ѿ"fs!'9Y˚@6Hx&sC8B13LГ9no~%Ե teu?D^? GI*cړpK+ag }euHk#H㰳 xdǫ'}Dz:uɽdsg^l!^9!y2Aq)L rwZ% & ™C1 :]5N0@< SE8lp|^ 2W쿾:KlOe" 1\zDD;ă@sz5O!L3$hxLBGXX VD)|:<w=T <م;$S!$مڍNR~^.NWMIhȼ )B^9Dm+YA )=J>)2MY~??YOZ͒v%x^|TsV2pu9$W'!|5{eJ/ݑ5R6[[5XxU;o! L* }@B= BZ/ʮ0 YQOJ^fC(y& AW  GA G/e_{'R {H "ձtux"MBRl]s3;SL;,Zih")U[ I3pېKnsG tRկ@8v29T;r @8{·+8.UQͩO!a(@H f&$9:>_΅ԓS']o𱇨\C 4> R~:rH/DSrb)c>f\nA엚C1Dcvh6B8f$)< Zԓf΂h | t.CȪU!ZGwBl{>&>dޓv4u4 @ъt r^W|2Z/*nz]9#ڋx!2:p0DVDQQGah KAdpm zBN5K@x] 5Ѝ'sE}Xx}0]/YՓ2z{Ovdۑ弇# D'q XAh\ՁA7SŎ$ #{,BLq9{[{o[uoƃTR/qoo!!9D9d#HUt< 5vsa |fV@cS_+ܔ]~\I y]!!邔sCQLtCu%cH{ogޔ҅5:BV H@f[uN[VT(r899V߂De).ԗPWy|2vFC"204%X,=|on*g|}zZ|{un31ʹwr.|m>dYxn y$J$GG ¬@ BHWg!+D! 1vs\>"Mp" $.&ol-Yq#C4+80]7v.*Gŧ"S^n(hNqmMJgδz>(7۹J}o:"iVO172L|ɔnk[~;˝Гl+$Ƕ&YP?egZBOhqi0[]Ȧd8@Dg<) t bbMymW0ku;WF؈A&. ,'U||AH*o|u\lC:]ig+RBR?Yɖ;W5@ jX+MQ00,ຆ[@Eo|cztt[mAw!*@ɊaVS˕AzxVŃB3uRj~Gu <D1| B(Ul{Wq+df2/e&Iw b(lt(*h#PBzUuݫ⮍۝;{Ϟ޳gϞ>ym:%244Tg8ee*<Axy|y@HBYb%uUmEv ѹ9w=0g]bwwجf _ICAdAgv#foJ<'vzr$Wq3Eß^~{L;dҝ~m6 z[*vKulBo5MO/m¦a63uKzAޔd ܰͭ-yMwO(tc y{N+ǐ2l;&.=p&%q}-cV*=~8fnpcr oklvMYHmB̴ª;'F;g'J.}<-n…Y &qGxzFa}d迯'"aweqJaEiaR"˫?}Jgl:Q^ӸjInp=:;Se3,ߙ\Q02(|p×<G62:9Y㓻w˹z:l:d<&oN+[@iq*/niq`'3ZTlAT>p"(&e@]Y hAfBRaA18Arj@)ngGnx28{THB'6f== <8;j|\ܵzW&UIԮ=-KiK?/ J&r;ԧ<=8qڄ.w5ww/Dzw:*G!NZ3Юn=C{dfddzz>Jj3PC]:zT HS !{hVKy2\tk=sږ gPƧ``x_e0{:|iW7ٱTFӾ]|Xƿk :\@Zn_' !äI{E0=sܘ5*8ɍ ܳ<G.t薪\09%Z}[*%dikja3f $9ʪl(D]O*_b5QmDlW9C3Cf^aԾ]>7ʗ *M%H=|&,ءA5X 1/<^YBA ^-rz{M'b3"\@߮ũW٪UDDEcnz塮&8c;~9󗷎 l!} ĺy@"l̔j$nD;C=hA3]>v+O_/>@:[o;z߻R{p]w{9(z oI41lᢘmv:L ͱ>v_JVWG};ʗ\Ck 'ڍRbKoNLvBAh_3 Xpʔ)cuZLY3|P/{zΎvA5}'k #M5}}}_*_2I5|D,X}:cjD- XEy<]/r:Sp`D}p!t IB I;{|4oP/sڎbmSDPfUgnT=\m]GcMr9jB#P'9Jwr- 2g/Ćv ZQZ B*2Bq'Q80 }= K/SȊb`T=X`\IJ-# {[/wM)TM]o-&+9kVXi.$c-ŋv!>.`\Ug]O-ׁX_0 0 a&% @ФM<cw$_:ejf>{]0V!6h m'V_&^c7cҚ}%a:3ɢ;kNJ]xr› 7>_|hHc j͆~;P(y%zh]w ٜX~,7XDNO4pڈ"J\1axױ ugB0d1HV@d{aM] I3"[BB* Qlbbb'*fpemBBl0%5QCPOˉ11!jGmi= P&Ҫ7ɆZi=ZN")7Ji"9eDũ l{/WK{gIYY;=6++˪?҆]nHk$ֺ*h=PDAxRo>bocoRݱ>z_#l>Uu݄h}Ңk:_$3z{ftѿצO'O]kK&Kjr(T~3B+t8$rWִŗ?&K=6ゲ~B//ϟ?υ5aP5zRH X7jԨ_ӟON2$..G;98._!n-$$F׿׿?|e0yꩧ/Ko~?ϗF!,*$qŒ(ۅ ~wŊw~'?[~wW@v ,Th>V :Ƴf/gB?@t/>z݃`„ nM[/GfݷXC-nwrCYnd Q]NW=7) "|m[ O_soI]2wQ7DNwWCeVٹCmM> bOL5Jl|sa4rUys=>2P@1| A&Ko^A3tKO8}s=;>|>ˣC|'pO?A,q~{=;v_,%[UJ#l,3,D WQ^A(e"0JK@sbT5[_- J`U&&N|I}`3{Sp χf#5#\h9+[s]qe25lLj]am98ݷ@ #K[rJGzeL+<: JyKjˍUpTAFdbΆS_i; .בq*֨5RtMR9#ekѤf64_uKnuh9W2woë+nq'I-FU^{['3ud/}Pʗ kdc5Ejk'Lzk/m"̙?8tH'vXdə3gpbgP&q߾}HH?III4RB+*&p;l,DIƒ%e˖EFFr_rF yfFSL6M>wV^ 2/5F(}h|K.;/8NY;IW6kw6Hm۶q#')E4~a9,% y<$9E%},:lmy^܂] "3=f :N Bo|5uVgUA! m  ɩjGYsKJ#84d>zK67\/զ"k:{/gD"RrihT>$z&$ vDCOUOynu;\{ 5'kZy3b4w _V7ۺcS}wOԈRJkxpV7 #:uUj&or1#(o㢨giCKa%_26.r^y_I?!vuUTwM}׻pEu_(2MWK/=DHʕ+yG~>H9 pPn_z!ZuVY%&f͊B ?kΜ9999F[y./3޺u+Cb8ո_TC$'&|dy9r %l--܂Ҟ[@,|OSt jwժUt<ځ5+x"IQT'Oz͍#؁.U{շ%: +)OdlDNA b2.“ BYQ.І'+ B)'̂}Ebh 6 C;GrxU4_b,l%lQrB~,g?8RT(*\$J~2;֟u(R#! sB 4QYO.)\ kp,J_ܒCK:$ NSxʴJ=$z"OrfV%7ӼrTqEGI^Ÿz8 \ c2_ۚ}&NҸ,  Ph{_MM]Y|?Wƫ]z=Nğ{x-*VFsxGIȌ7JYE":g3},/,+| ro z r`8nxoS[P {T{zV湠(̰\$w#* "V> ;n#FW/} ֔l_|ѢE֤/"PWUӯ{q8rؙnw};OeƳJ^Y#iYb{dMZf)>ԧxoyMcRHwo~Ώŏڔc/-[F> ?ִS^uB#w߉-i`TB* ȝ 2+Pb"9FU$嵬W@AY\Y(B-2E2qmYzi=mHyI|1842s*;/J45y&$+kJTŢzW>(f_O ԱO1j vif~…^W },򗿼}vC&-,/|+)`/>|H-ZV/TM2@k_#bY=77brsGZ#[dVm].kh ,Uhb0ٲCth:ʚ]z}bgy# 1 Z. u@ 4N`cƌAwK%q dž/84*$g@XF[zc :hې\=pC:,f υ*˫!% P  %ۺKH3-H!°Ud𽃥b e\\,b((bhv#1߹&Wo(}\L7dawJۻ yrCƒSksCnI˗ſӷnOHho78C(-_B֭ &2וIԮl7 t -Wѹ@DkD/]PЖ&QXbqa,")brX6?R|?P*2X#g~{{w!r"fAj:Sŧ*@WO "dw^ D υf3qG&zˆb4zKhr$c"_YYJRS (]v"`NcuKAg&Q\ca1MYf#v!S*eY ♊Kʪ3w^nlkn%scv걚nf-#Lrg \sSN.&_kfȽZUP^bv2QpP}tT |>2_b#|[a9t q02#>9%_ 6L?bv1 K^4$dɅ?y_ &Sa f>yS> |CdIV%1)n;'3( 2/zW!<4'>?hD2}E$9QܹsQ47=yƌ}NftwuZ<yܖKQ`@Yޒ(*YhW B x d{YCHU ęsWK7gR,r'v(bɬ$%HEPԉ3aO,Eb+~A @3س`:AоXHϐ{쒐}1ÁXJs9;}Eܪ⺎cMw"D"U3Dv[YźF+Zf+Pes }^*qѰ//}:а4/$hr*{iX](>j+\USbf颼zw u%7IuףwizIqKˌhŐ9KԤܑ0L "bc k<ݰbʁ}ǘ@2s "sW4u)?%_f456Mk"'ZMկ$%?DL[E5pq+c'/i̗௦֎fW;:s4Ɩ_jWR;X2(u3HB6  @3 B ܬv  _!]BY@H}@yŽ8/tqݬ,5c#/'⦥ ]Bs+ uV[K&[JMj"2(}=cR7t6'-c61v.[$ *MM ԇ1 6fF~(qV" ܦ)L_@(hY&>@ :fſtvO|VZDVS$Q$?lM)Z/DXuS~ABNiA(h U Ji>Ys>v$dhή_2i8kαШWPf|I֫yVNYl l|9vcܩ,7vƱUR Esm)k7I|p17P 5a, 2_%X/lTu!ɗwO<}_'+j*[>1ID[K/%Q9q~lѾ/EC~ʭJB R (2 … Lhu 4VՂX{i ; !.z525N^0ޜ/ͱ%k2vL4;Ȕ|i) "]FDJj j]?2@ ^Ax]e썀PjIMVMbQgOr%}$%ƥOEVKqM+^"X"ft=i[cO\iji`lECZ3+0..ħV bp!߀pl€ jAFx?Kq%^R]QNpKGL[z6: v%LeCֆDc? ||+#5a3=§dUכa:3yMkNJ,] =tDrR~ }  Q EXK}"v};;eT^I$Zz+Zz _.uzQ3kޞ甙a:×7>ZN!X[ S؍TKy"`cud оׁ #|k-уAx!Ŀ64s =0ݤė*͌<:cZKI&q *_]ٜ5bNDx)+oLDzU4fkI_ ՂPE$ xKgs6諲n1eU{")Jëb7Pf(q@@a"Z-7Ь S &}h3csBUmHױ8*B+YUX4Id(BY*_29+i KMk`⨱ċ9u{Vǜ=~!c7vz1t£Z) BhQX8V U,!1gwCŧ-:"!=e“"R4Xq92߻𨄌p9r}ӣ$_G#V=\tmT#y& Oo|IIezcM' +0IYs ٜٸxN6+<[ǤMPs6tr=\lB *`ĉа\dW=0cCNd*ό_i\G>Hгq؋V{dYMTWc 6qS o`' T=QB%x7|hi8Ёbګ>6 1W»]s^h}D?1zʤ ْĹTr/.3j^/-~fgD-ܞTVQnx^y?ɹۏ]̹WH<1gۂmD$=E;A{RzA3#@MoMu#:7- zE&%%!_O;]f _@@ʀ1@B)琄e|)׮Zg{`yՏ^.k_2Y@8"JtǜL+ $h%1(:knzv:v<@HY3QoR3!n!DX=U5|DgljM@ /n!ɻ po_Uh1'm$TG?ǩ8`=DG!oׄHb+vINe{kO ꬙R=>l!>[!$!áPr` BIX.s!άAbkSA!UD8B/^ړc-FǹlhB87h&!\B*6)+vpcZKk.EwA8@8( FFIok!H!a~jr2 EZP ᏺ3zB!xg0z $'[AA4B!dMJ cmm8 eO$g 1Ah=bj .5٭I$ B(E7@Hz$tP]~ 987AHBضB)w+" O>,Ũ^k^nL M=39Dթ Cx?R@;ci/Hh9u|a ׺B9z IؓArpCԮ}?s A@!!; A Bj{ES5'H8jҦ!\ XY$Մ  _d5UBo޼aj>CDّ`ݨ _ abib!腭74>I.V] iAꚟ iaհ@ !d‹,HhLP?.'!5uUsq  H39¥!$\ 3@|Hv>yl= * b4if"S# 3|5GAx&ZP`$1 S{Z] 7'(33$SAx($/_(5 'TM Ȧ6*v = U=\{nS@$_= {=$K 9h84$Q!щ!\-{I= ~y 6.>U]!MDF_' ] N+vo߾uP{4O$7@kf:Ct{Θg" h0_%#$2ʼ }] ğ glK^K4s-DK:CC|M( \؁_X/IsB5 !!m!a@n%}ty闇q}5_csy1'|.reؓ<%r1S71q1Z7 Iԩ@&t[C{k},F'-On8VeUM,:*z# #2cik5ףXzn2Aևjߦ\ϲoP*biWAٳgc#[ 6a=&avص܆bWHmpV{#&6sy@q\_bOT^6AII$!3^,L8a㱴4 cH{, "33?^A@8UbOɳ_x8! 2ip&b> gK I@'&U _'aaRs^z(~<Hӥ!񭡐Xa] !EK/BNBEEaBbBH IFe PHY`C~j j{BNO7''b"x C<\'Sc_מ4w} i Ķj!mɏh_D!oidt $ aOB/BЌ $F!"',p5T! !$4!lLB^W@CK+sB\|*%8TX@B5H(0Q*' :+v2sH[1Ky,L9^7UPGk?sk穀k${O4@xݓh3|ЎrcO cIړCKѣ \>$kG2{\W租~WmzLJPL{ C1V ze |II7_~ tĐ;r0dbl$ ,G}= 8R1~WHn#@ o&U:r9wCxW9!N7ȄbO WUu"8c"D=|@$(Jgz-O@nZr<.Uǿ_H kC/!w4'u2-?i=osHؓoV$a@x ³ OB M5!HP ~e= wq UBxE^ <HbأuܬReM'w7!BE!ZP aI"ebQ_?kJ!|W}I|[AOC^Hn ^B6=!\\o93DŽ/ևT:QuVr~͛7%KsW;թ |^n{8$FACH4%?,$~o@LB@T4H]ONbx,p]>\ )pr-\ <[@BmpAx)=[p00!|= Ňplj+r  '!^4w+TƳ<n1 bT~C鱭S_~X0#!r>&d[!m2!< H jnȰ꺇P>|iF,_3MG29TCxW ;L BMF!1ۧjד;Hl/P{uIE;W B}!ܚK."K^c%Moc$n/!ه>Xm;@H^p~${ϟ/Y4'1Mg{msϯB:8$0"4!5a4!3, l!%%u} !I@2j(B)+T@0!TMgh/KJd?'@&smsZ_}uhѣGLl4#҂۬9~&i!B&k(>x{?'; TIX!LB|(JoB͐BHBQWjO%ԜZk?_9OqEeM~JPRq p8NT( dE "Ё%t2juk2Z;3[]Sޞ϶vzm}; ^^<SNu(zή"M2 ph&aU0Bp8!Iǿ历CIۺu+K D}[еkˀp=%-=i}&'񅮥?^l9y$m;z|,֕ɔ=q I!.BB+ !'" S~!?2 )B~izjr~nfqa~2+ rfrDvb!71_Z"qˎ[@(FH!2RB& Ld:P 2Dž@!@!!E-ƅxX/=Kt|IXZ_&ʘuҽbǏg,c-vUL45T'LT A=C$C bI{!2%!KpL@8oKNl 2pR>}o~KOꍛ{[z9v;R]`Ckwϭkzۗ?O2yݙ)/s\j1 ICKA!@ҩ i{~7vkA4u!=wƳl^a?N!TNe$|:hMdr׮]nn.$L|3gxc/:p-'$ѓiny8$H>9XmYZ\dn{oUkW8j繕o<=YџQ3O=j YƄ|+k~r_>MNg{o,>G? ˤ/YGw^j,4%K.]K NK6Ǘ h;vdk;ޕEr|bcXc=իu PƄx#!B4 Ɗڒ- A~/6\vqvvf_ΚՇq4~Ac>/УG"M *4Sv?q…DǗ,ʶC렒rq~ /}HKUu/8?g/'Gg}U<pbwM_OBޱY]>GEQq~2 %1x | ѯۄvm^p@peI^a|sjo|v2&;R2D]0}vT]0s_}ZZZY29NJ eIz+DC'@x+Fߩ=U%!f>%@ n]U]ˈ٘ bV\Sh,YsϗX)sCo%I}LyN AeKI[, mxη]}UlO*)1AyUL>Wy=\/=;4![6DFЄP@I(*|hݳU.;{YiXfo~t;wB=WQLz$ʕ+t=5a =^7{r< ;v[Z)E) wѳBND!# sr'!:- ?x2k?[󏧦gsPe I)VKF(T4(vHPi-!pmD !'K!Y!ܩc/)yZ'VlH{kO>UST@Ş漡!Q}5*'7t)%at FV\zXS* \(ɇqaRC3K!|S&D<Ի |Q ܳB»۶m#~B&dangɍ/+^WpdLm'+%B%dDU_Mhز`/]}|IޱiO§rkJDw9;Z/Deˀ@p%Hנ,"Kڴi$L}{!7L{Sw/N#'NkP\}d*˶6f}}5&]nK/Nnl:vJcT8ɜYS]N \.(397nğoBtb_Wrˊ_NLN}]ygyW7$ʉ+ydlooƧxk/]r=/fMwJ>ׇ,ҽ6L6@x\ߒ?\?}=+ݰc S'&4۷og|ɇsd*^eD]7>[Njkk'V͚e,%g/U/Gl}2I0\쓢*((m('bc 15>!_!6ʚ&>/)Iqn>R'!e%%I+Y5H5~_܁MNfHCk7YWY,gOOo'^LםD@tb?&,I@4?Iֹ>j/_sƩD&|ڱ:c!ڱL:/iYԓIuOR}x[+t·予LBf\ʗHyr, A@ʀл@be ;瑩ld%tIfRHHH =dB 0!ŀ+`\p 7&.KeK$;'&7^wi^Ϲiî'ֶ!C>kJR1|2L*Uk(+H5B; j艗ӸQ|I2ʻ}N=8Ӂ瓏%^O N=eo$^ɻM|d0 ;C"Q#NWB)A] ~]P D] Wd&-KwOtM7 ME6E^Wb ] ԟ,jMWtSIǑ*M 㗱@;#nTeD3tdD_ɇsO^8.`3E>4dKP-HP dF>U嗼Uy*y\}_^_GL3/\⥣W9 '\v;xE\s_)S$TʝR$ֹ&ErI*W_K|ةd=W,:Q^ A+]XՇ|R ^ 6ZF ^*nR%ƅ?%!@%ܩ۷I= (1GJ_2.ש6;jz-d0O=skRўV7Z7f ؜sGrMɿ0kum[dO94k ,\5f~H]m,g1dׇ>?V|j>jbۓB r+Xdnn'R>P%?tvejzs@OY2LSzO1ER>*9~%w.O#X?t,'/=,&4RIřς"\,>YZx>{*਀})|}t!lN!XVː yŠI{"-;[_zT`y ^eDD$rIT޷i^!/ߋ7O Ç Jh幑J!ssQBÝ;w2Jg?zV1j|ݱ.  ؎!mT`Fm n}7huL$Re \l7|/c~I 'b&\Y}ů^q򫈭m CTI,1L4SnLqQ!br^$ھr:=rO}BYO?S無@y/=7"~ [|IX8àWl_1/:)/l$DO.^r݋g/\b]`]mӋυ6Fщ#׫33grJ1Õ[9 z74Mf`j3b@ܮ oD߆2@Rû%Fsijw5cꍃzwUY*ڻlGbM$W0?|v^6CJ c{*QWP PJ0%~~.N>Y=^f7 2ʃpF#8e6 [;4S@Uΰ@V%mv .m;͝)"+G$'C.=l2p]̾cy0{/x`Q mґV#bG( y0?L}$wޒԍsszطAsohTClȯx_Iyvn8"(nɜ;K}Ŀk lL9ݖIFY~ &ߋڞ!Ԍxx 143'&n){c)sZ5yhҘuȄrYZ"> ˳O-^~K}^ 1koZW 5?TSq)ޛ>3;Kʕ/Nfmp&o6,/G0Y<=`3jAM9֚mS^Pq`=cy9A摹gzPJ'A2^*WGSߖ-[_;JC>c@_z,%Pq.` {x[`\BȻA-̉͡N !-W\`d@] lYK|H> @E& tCFvH=V9)\#=bv0c֤5^.͇Vq7yqL~r&?6P k}}TFTjXh}ltLuE2\Yt84w~9aJmCFaOzϰ:a28&MGܬ<.OM&^|h2=ab|rssVh V /W\aZ4t2dg5Q٭ѥ߬H~ɬ|& ."9w܇ @˧/$AD.vXP3\~'.(ξ58c5fKR-h(j/iih)4Eס1L;\"@qsxI&}Ű_F+$|xvOeM0y&w7J:»+*AJ(Y퀲%-UBePR +tg^}^z_x?>ksϽ+_[NJ6Ev>90' d4gޖԭ8!@@)2ɵ.ݔJC8'N!Qǯ(-I(`_ONrGJW./tT潠 !m`?X5ߕv` 6swa0r{`̀91= +I~݁v%ts]p 8lrq'䕉j)'sH`I&.%*>/OO ;$p .e3ޛv<h_CY=dt9Dθ`+{s:>'_Dӛ.ZU}po?q@4 q)8mL`^|13 PuB({]Ik'X{Kg'T~O)!z•;àM%wMqN9;PZVA61 n8%ѳ)5Qq) 5q\+ 쏌zT a|y%P'A s{Զm˗_~7,׬Y믿dGoyz񭣅%M8q!/iA:Q^C`&]"|o6)OHD1葬A" ޜ($M|TQpRxx"^jR|Ͷ~!C$亚>c.PN 4 Y-ӳ@&K5u:j^B@ ں% vKyҬ8/ ^;jrhc]d*;x8Lᬮr*>tUy(ќy{LhT]?pfD fa Bl>tw@I$ ,(2/ w$S LQK sTP 6'xf7!붨hk,?ɌMEl%\UxYݑVՀq`8e>.[037lj$_K[TJ? -@Z[$gzIvI_J+4\Lq DPwL{*Q^JhJ}KJ{(%h$q I<D*oԿdFZv2$iq ̾}@) m:78fڦBHfP'^J$AϢT*Gwt.Ix,7^$i50<ݕ=gb(@^8I: aW SL;H ~'4J8`$АgQhJQ*%o*K>+: d)7Ũq2,;v8W>U8u#)٤7 0N3?w 󯮞?6rzEY>7d_9Y0XR?GدwB ǠL"n|+N&8cS 4QeUqN(IH&,ç< .K7o<bwΐj~7xqD%7,T[8'y@d*XF0B)Pv@Щ9)$G?#*"1I8]SWl+x$ _GW N#JA2@Kf&KGd_WV bJRoQBo%֭[? =xo+րy*!:77\X7X, npCB&" a$G g & "jPD10cX5X ݒM:x-"^4fAĤ[:&8i۲&F})j34XhY=-=пGVU-P,%Z %ޘ){n֓,g?x_@&Iwtsq ry⠌! 1+GxJ0H~d诒hHT*|!$zw|dڡ1.LksiR#_o,a12u!KP(F3-Q %Ff=db|y؊/Tm|ި_7qPd{,y$ǮzJIb6?>WuhtTi%Y.v<σ"[Fj{e!T %Vty՞4dh~!awa]Jty5 ˪*oƔWS)KYOrGʐ&a@BARi?drs j\0! e1JA-!d"3[&ۆT{(ӫI{/hjqS_&''^xKPO6ْd[+{GA*JhJqI%)J`(}~=(C wj׮]'AR%zL;z(譇oaCMY[CwBR)O~} PLP!.t_X\_>+enm-W4RBV|.{1f*.Inm5| vʮIz>Rέ#ǂՄ ᔞr7]KLHr0R`J $x]D2^!Rê@()w9$L2BJgm^\4x8;&-"{τ#9T>r`_+,B<)+.]WIx(s^*_`& "J|Y0>K~]F '9Y<^~B(Iڔl&jtPJ"&J,?=4(ʥC!ǒԤn`"玁c_Z&mޙ{Q S-JQC{J%U&F_x5&oP%79FV js8 *E(-`AêyOyr2,/y.q?ɏ͉|KP;2J|"vu5>5C#SKvՠt=IjLAnTiӦlNPyF>\i-~WB*b}2BԱC<Մ! =e VF!8ȶ-wnO{%L;]?G>QƋ`Gq= ^lR Ȍ(&R J@ˎ򱝝TL#9˴=^zN`> wm.wQnnæ=z~(pkv-@ $i ! !]vf+BxSCEއW}6i|9ydճ,ݰ%4HA2SW@u$>RKu$)!p^Bpt!Q 0!C'k f|7ܱk76ԓVLfI '_T}e˖Q +PI/uO{]5іtGs[ItWu*#-XW'v3Qg!vq8BXuc!|\y啌/-^/{rͺ 88qU\7N{챨$ǔU/Ӟvh@Adق}TIgi5Tuį&QBBB!D@8r^Bd8.Bum>_R,<sO3mǢ]~2ѽXlM"Il{VXZGš}^|,V)rw(԰s 'Ith,tB=| PzAd۲W Q jN#$['mg/:c R٫ Z wozU3E;Rj]P3.w,NËO­mN8nܸ1铤v}Z ; >$]{op꺈@t X\EUBZ?7}cb"ϐ]zt8mbí+1,eX Ȟ͉,1ek'xWyRDle Dd3?&TT"Q*FB,QB 9Zϴ D@Ej=R~}nF4M,^u߾|8%ƹ'D{xR\{֓4L{$RK,fr" $cްv> aI/TAȂAF\bΆCxX!"ϰY Ç3!{| ?zof۟;{s&pchϺ\7kJJ*jDӨVU1r](COgۚ=b+|4ix-]X6&!RĹYӃSLN\gc!7 B*, a% sUs-u9sd!ڣ! !<?ԓjJiK:˨>-L&#[iK[_Μ9bVa'0%4]+!j@Є(:Ýt1W9 P!JHqT;a!_>  5fω4pBkt ߆j^sV$ϳDs!ZFfT/F͉2m/NJ4%z.0h>H\zTQc7է&q@υC1i׿5K1H6Bs} A|Bx+dXw8DUGo d?ɏ͉ 1SziӦUo*CjB%]w# AB.g£!(C NbwK[H!?*u!B* !<B4 <!ؓ @۟`w!fl؏!oiK1۰ RכRSBIHV\ث 5{Bk8Uc':% p pP]p  jZ}Qm g[хp2>ضH;ORrK[~,m 2 IQ| oe0t| BL5Xf@)8E2 !E!:#(]l wۗ@ !0D!M7'B@bO9Zq ~{J,@ сIt!ĩb{Q:Z +yhu铤-mjT{NЇ>U$? {dׯ_Jg E\5֬"!- 'Ws%gjPB <i8B * G DɥrB !<B4 ) i낰@FnBF8ņ\F'Yr%4ٯ4}K$L޿oWK$?ƎKMjp|lR.u:-Z`R]C! J#OBhPB DBpvĈ}c! a9!rTB.y < ѩK³@xD"*7B fGGGTNDhj_L[W@Q6Ya/p:ع:a„OO@ 'ݖmN lFX/HYuR!@Cp 4МD 9BL!MV>iBH j(0wܨeh$'$\3]Ǧ-/'OLm4u5װI駟~S,= ;RO2i$Ym4*'4 O#Z @h3 VB!2}9/؜BtvA9^ZW+B1S<QI/ɤ#DiKKF̚/~X׿~OFD~5ȾGpv!Tvԟ%]N.Z"Fr%R?!<+Ba>%ϰXkN IJiPH\pq!BM d~TB L NbSVmNoiK3G]3d Ѡt@$&ԩ y:UXVtFr =q\ E!;)X(X}LQ?Bhap\@f!+Ī$CIkr! DrT"C/WZEaQ2Ɨoi>+ڰM7$ =i9;ʄGn i> Ģ_'_'څ_% |@= qɘ8ip!V 8KH @۩8-v*SaLL4v71a!@ ٩w7bpk7n]$mIVZv1O}9o|«YYf 6&͘ԲdqŮm!˥~5B% *B x $!KyW$"E!<=rۓEHI?jRnBd,]ZT{G]$mImsۘW)tRfh3!an.n BǗ"7Mh3f3LBȮ6x@ȹZ1;b!<\OP TW^y%}!я#  `!D@S jm9l!KbmazjQǗGCT-P`=x29fbN=F5e͚5A(@o 4\'):I!m c9I,$J!3vuJCM!. yBw=9]!RUB P !^Br?DT+B7o^T}Cngs»f˴%{x0m 8pb׾F?$%4_(]CZ9.F\B J . ABh<4Bx#'!aÆO~儣 06T›96c.T~!jȅ`Ty%KO|iKK,q.QO;4Lҗtg~_G?:qDbnL }K0!5JVL/Uߍcb#+ !BHt!!N B١f B\M!<4.x B(%B(  g!B/tQ8@H2{$[sB%'I$ƖƗ_hd̉ӃR5;WY|gw8RBc)/T(TJ@!$?@ؘa`|IsKHN8i <p E!2K5B(H-.'Yb>>CWP)S* 60l,srW\q,CP^ao 44D4'!d@J@i!*ąp !m#KC pT[a)8΍jKQr|WiKK [o}8TO [?/ЪbU0SLQ s']`jO\ 4 Dk N!V:įP ! nh!t=¹0d* ʠR|l3&u#>AށPҤϵQpյcM)޻! m52«j+7$4!Z !^!ppCT <%?BQCT lHe \x1Lz4:u*Iڒ[v/K^f%\j@:[(3e@#C]iE@d3V-?S :qH;_WpTO<*75h K.Օiv-@,~S B\ ȅpy !3OrJi:d3/kߒ> %_KXc'?OӟgP[[(69AK?-ros'ʡ2pEr!\8R]#U!= !puĈ})*~Z Lg p!@|.ṅC(B&M!q!a nB[SQǗxM$t/ӖPZOsl.X(%OL ' ;iKmc@}b\nilsY#75tP}\%U׫B3m!T"B8"]W!DV"AH[ Dɒ B(Ts΍Ol2>39&]Ǧ-/'OLG;Q|r $ +"oIE} '4iEN=q>O'\ՑZ :B+d܏/N8pȐ!BΟKNꂐBZBxCvEN!*@82OBI9!'I[_"ʬۜRF SSHZ##(ZNO A/Jb~@*ɄBT&6Vdm2>he|Ip!|%@\ +ĊR?~<Wů1b4a x Մ'?ZM2m13fP]?r>`rG>9&SCS$zVUN0T[%6y/ xk_C@ `WHWcȞ]*dW#9Gv]לW(zM\yհe׾}<{Ӿ`>WW댲?3S 1to(Aꓐ̕?aV'Z2)VDGIB&-DG F~z C!Oa XQK9Ѕw7l{O^?\?Q;~`Ǩ{w^}OۘO_{S^uft^uw븩 4ZB 7@w.i%u-˃n$,dd!= M! \G^ȓ9i mO{=šmo&&(L:͙3'> v*ܸq'ٿo3_:ֶӞvPTrI :I Y<O O@|ͬAr{kR㘵g \u!-!BV,D09r]@Gkyn77.7o$$(P'ahxƘӯf=b 5g+Ja2.-Jm:9$>z gp[Ӟp Vp"{Xwq|ɧz*GO:$zVxdqc:1-][2'A"feD\"Jj<S! ҷ)"OH 6P xt!};4а?3=y}/hY{iɤ5gݺ@_B5x%Az $Ϗ:y9!ƗǶ97ߙocŋ}uqpH2vXZ7NX NON XN%@UQ.B\?£BMaok==t+g]Ѐ K.x` O&ri<NYO`G$ɗ5=ueRZ%r͉4Yr=b%\l$R:9!NeKK }P]#*vydxu\BH<DZ,D]:K9'! aS1 :^g.Gw8g ²[]GRTf!_TyyQmNd{?5i|Cyޡp,A$_庲6LݨG"2ȕ(B l9 p3BHB8@ AZ 8~w¿" /y! ?6}kL>#u?߷/f[-sR8/}/D_=/EWaBQ\Cc)"س%vENX/@p"m/y?9:}s\ar̀[)h]s/9> kHstJiiO)Sqް*CCOS껑@U @8 VB6*Bx x'uv[6y~;>W-k~tת{t ]]U;[LX":ٳά/MG&'nذFyRI(v *'$Թ"U}he!~mk2S /\p^B[\oJ_;z >4vS('kν%0wދ7[!/ vVsN'Ie{_2أUoe t| Z@hJ<BDwL.uŨ(`!pcѿR#w#}Mziˇ7yM)rb' %` => bf%# -/ӞI m2 a&Ʋ05fNμhiO"".BxS^z)@6nz_fmSZAw-'(<E'aٜP>J뗵i|I#YM&~> IjtI`JEAFBO*1*Q5J!F@2*BÇc?U_wl&3 U] ƗtlQc)6'EX|l%ŬzN`Kzi*ːj%ĵ&C{Q4F\P+ ȹB\>~LE-b@԰a'!^ctWxCÇڐ`XR"'ݭQKl[#j9QCTI/ӞƗSNtUoة &:"h752yJ|82<5W޻{_b:Ťyt82O֙ST'9;xgxG-23c>ްK:$;SyDPH#W+Bo RQYI2M!V~Ygy^A߾}ƶ+SH{__a=ŧ Dvzcќϻe.@<4,/_2emHV/zpN_A;?\ 1iі+տ\MtDZkOg,hhYbfHb5'RD+wɃ[_xp?p;j#zKyMCyJο#ܟP'@(ԝT&y>mCx:" ZW2 %Wa[)h !yi {.BxS 䒅Ɂ{M '9qwM][Of$&+omzoyyIx )HLǸް{`plY-*dU[63hY`0u8ws)r>fj&Ӯ=y rU>0nُWOQ:Ϯݎv*w?62NwPz 31vң~0Y]FQ]ؚ!z2a=2oF='bJx#S=N~'!ܫZ"\ 9]q XZj|\:9rsK,Ke/-,?^tH)YCrFgeoBΡXage;S.K䖐ӿ\.dI^좭S)+#eoNOBz *_J!,BE8 Q-vP &M}bsT  ɚJld~XS 8UJD=wcX_(yfwCrܿ_7fcPar )1؛?yZS?fUσ+1ṷpdq4.u+]&'Ԯpʏ_Nyo[@zAAD+ \*4Ϲ28 4\dr\"i6Q̒IE8"QQd2F",U=&sbxNf({H+vqJ'?15_666n*looɌmw߬wy};3]TӿKeНpgT``T%z5Kq%dWhS"DévK*`w9[ rpΤڦ-2F % .-7ʀ@zmyef+:󶃹tvT/zlqCz?g~y# ~^r {ˈm xփd~j7Ή;&ٟlD%CJȿ.k*aZS 3ԃP$zCIL'S"އ?Dj9]R-aM>r'$2Y{b6%uv̦vdIX3j!#՗/oQ)砮O%p2Ր4R^|\IZx這dUBB,s"E -?[Mk7|K;wI/]aDω3]U"m]j8Uei677f$t|h6ñ 5ͦҝ9Z2\SO2 ppZp[[xl~B{LA|wNtz.ӕՋ~\.wSƹnS- RuFܝx( aꚘBB7Xܒs]FJCU"rKgQ`|q^i2OMI?VյOܺ76m+MK^[=" \A3ڷI}ry``i[sis~{)R򪿜MbW~Lޑ2 (㾴9fhBȁ@a2 ͣ_:YXPce~~>M)(Pplpu02zM6<4įIJ )`> JC%d;|0Ir묺cU$! |A_O:^Qb]d4L}K鿲K\N㫩PU#)Tqci˴F?PZ}s\Om3̼kXq9%a1\?ng11*TsGXX!Eػr~K b ҫvn4C+T ̸Yp< , h:pD '8[BHN\QI_V"$3X;>'k/KJJx Ao}9Bll }S82NNpiDq7gIKN ˅?e@o %~*pʼZc,`ez&w༣bdв; Xp6/*+:1}ؐ\65н'_ 7&V UJhJ8]S/i 駟޹s'Kݻ޽bڵh={|{6QoWViGYn_E0@>l`e!(@YjqGP,D ͹0 _\.XH#~rE (Soޓ;K1x= `Y8 ,c'|U@G"%E9"@lR W3ȼr)xjZT4mrZlW$򋄄o[߃&uO\=və Aݿօ"D7o{3)<  Ikh# J ;z>)Z/;: Y#>L+% WrLo'UsT!hlEOVs;P-a Jpu t8wfȇb2,5(%rBPUC=ZXTMAj) Lk7W~;w? ⿥݉a#V  JešJ(򛔐U;PUI TBXvJĪgM%tXzg?<3/=s>/O/#W'X*7y,l&t^;|k8N;o`Ɖmn* S9Sh r 0r5"Y\r:pfw<`$&$h6= 9L C/Exb[?n2PGz^ɛGAY{I^PIú E b:(Ryy$̖>Bf{&/oP+{UM!GR٫lW+z-rAM=}̚XڜQ;H0Dy;)'-5CHdˤsD^~yzC XG1B"=kHrۺL,a-9c` ϓtŸN_`Mmx$4@\.TZG j'vyTmI7hC6Ar$>6@dߒ3Ys4~>ez..[l\"Qjk3o[&~#$ j+Ӏ'?Dj:$Rk;}ȋF+oRdC _eSBS7J0ϊ_=/q]O^ OŒYṵ!b8]~"̰3b0?w0=kW8$`6LDB~PY;APIn(]5*IOjDiF{@/A:i썸_}5|9xv*-g-$ |"~OTkX@@Lq@r`/#`e |9uk-/ ҶO"+w`M^.,3Am{ .B.u9t,Ki'dP:Z7YQ>>2in-Q]p;M&ɓ8.TJV' _Vʊo޽/u_ A+q=$TۈߑY[%49;9nfhl1D@%5ݓp/MXz ˟@k:r{CvG A_똜=EաR!a!sXIڣB"[0xK)%K L)~y? O'x%AkfZ * '4RB=B PpQ*%hAOQ+h% .Ԫ8` !<#90"b"$S !J'fح"xOOҐPEqBz-#mC5k`3E6#>>¼^N8m1R ulp&/(U~;<]A Ȁ]٨!K +P8(B*I9zpB@NY+%g,܊=-cJgY<`&' sdBexNL5Nߚ_! ^__IxϡhIcՆ "f} zz02b5P" UCd>{JMΊl`H7y㜓_AC!@/1I0$&zx|WvJq;0ô~w,h1ڝ%Gdp],i 7WX Ҭ3 qnZJrغb g -cX"`ڕ!j4|[bڵ!Pf{rx)2MxYģ'!':#2y<'>'c^xr6}wܡ"%`9-!J3i`iZI&eQQJXKTE xKlR3K`"iIWVS(uFf Edqm<5Dig3dX h`C7.s+ >UoJS$#[4M6 db*'?*$;ߨ+qT6PJ*T`7MJMcP18j `z^{( yz<ڳ{w|f5;;wϝ3s朹oȌj6Ye|_vtt`N‰w+AxxN@8P^Bg SU?qpTl۶ <}'OB[&rD%G [tu2eNO ҍШ"OU Q~(IpԒn;h[IUw||9U_yy$Or;Wyq"e郪u'pkk"[G 'BVCE ݃KbL@e$-+ݔJRM6?W#:2{AV#( ;o_zVuXLNt#>xF^2:2.nJE@*XFMBBoR- })s:C dɚ 2!DX,~ķZup!U4$!5Mx^s󄓵]fPzkf53]B'A"dO rKKqv-6d97^k4pOA>DcI$S׮,d8?heЗ ςh$m2m,^(UQP\S"*b4@~?.GC*/L BKG6oǗa>6_`UfUNF]8t eeGJLI]FlMx1eHa|I#\;PoKޓG{R!Ϣpb@3hrWyBXyͷ~Le)`#.*'PžH6 r^r!KĬvo Qfv Vy4jD!<c/?W% t)%u?x"=ZZZ.2tǗ=Lޜ4%S!_dkTgOR$>Iٻ{5ݰn X2)3dbSӑ`cjR ܈]W%P2B1 \!I>9 klR}7}%S҈=DA'aGjv !'}h7@hIPPdҕ+8a|h7upePBeUḿ&ӗ!v럯O} p *6k6GƗYuvC' )$c%oew|ߢ96X͉(giw29D  DFk(DH_:N''Wq[;n7ɻݟMscbr +'w6- 뗙GBpr $`C[aa>w1Ea<M&Hшe>6_5BHߨT-M{>o!fH3n..̋+'U9:J;'AeOr D0 )egg'ɵ/}IVC$b5h[#i*b+l 99QX !"fkPCYB ՄpA(_BG 驁+w̐lh?뽽DeFEfEN84L""'M%'"'XF d:O'|bCx@8'_)-Dl!0;rzΣk?ݲwoU/XFӥ's'5]"q82ԟTIkOlVUO_nxĚ)3o_:tg oc/嬶6;zM Ak!܎*hkk;±`G!vXil%K>˿zw[hյ;lݱvimٴ]lI[>c mݭd:R^2)Mݙ@(O~:ѐ]@(Dj.)i dVo[ں~ǏƳԿ0Vpmm`ԟ[:U5yr\W}ɧ!d~tD-P×y\).a8:63WQy4}R~4:;plq<=:^(?+/<c</<{0#@F!) )āV!OCfV cY d2Ss TD!ELH+,/.0V[ja%rSZ_^~:ؚȒ_Z,.-FBReicYOOjhhD&D?KB͔\yJ!, .!@ZBH{M,ԋ6Bz8CdÁ#!Ф-]'h%ٓt~$8֡VZ_j%%QuG @!E3Pg!ہ ׁMïw B:@bf@4GBp\~zR&o/H$[|߿h_R7ҜBA >@pg@C A&BȢ!o3W9 hEj W^-_~ELb ?ׯ<~y?GQs;Қ˔6Q-}cJ|[3҄p !A:q6\l. dWBrB(A8bw!H|@;!}9cyW41X{{?F{qŎ$K˞Khc5{;]C#1%WM4agky:T@xD! (Bz'Bx 0@RkP 3&iB(Fpld2.G Mw[#|$޽C嘄=ׯ_cЁ [j˳w> D@RVp!1@xWBC>!4BB( |t~ ­$_|u41}):??9a7CJѓ'Op+vZ?}_3cIǔfE{ Ç<.р4ҡ10<Č}.剛5糞*<nc B 42s@[C h;qI@=5ǔn>-P!q$&0;y ]o@x r w ͤn<H53Ss"!Ջ/>|`l lb"9JkR!T M@TakTu؁<(.Z1,%zK"CYܧ f q<@HZ䬛J[ l!kb$,G{mlbR'G@߇ ɚp$f 15aI ҡkOBfy'@W ʦ  ! qv2s!a !v!+ d,4f@&Fb3Be9 K :* \mOA%{-W 4!A>bp%אG؁LW˞FJ3>0 {i=!;Cx><:!"Raw!U Ro9аĕ\7!v1D/KI}1!b9&Nr|g daw!e:v!4 *Ů&_BƹahjW 4!&$w"eWP@x.tE!WW@(H>,H@(&ڞ! :L\mO^SLHt" aBޅ qL0A*vA;=A^?|?@&Y(땘H!r \AHE7$b>}|ИB  c>0!Be{L^:?VK&9dZb0Yb۵'1V̻m<&\w\v@xt!%BgAȦ-D !B(>w|eg.[#.w! c AAO@kfb@SRKo߾]B;Ml%A2D%C8nB8BCp?"~P4r؁HeH6 2VHdH()B]! ue R{O>Y IA&AY !/jjH|R@Q@u%Y=$+~=4ƇXzpon!n_ք#l5G@jR=_!!)(X~,Ǐ2W3:LSwbœAp\ZMɁ@$/\,ooo>LJA19ilCp=Xys Un|}}U@8gk1A\ADZ t鞶 oVdlӵ Q Z?$otmI\.*쌀AW :*n}!V̪_^^]}~tń8$H AA,6Z r ӝ &^5oZ%]Urǧ dcYpN" LWq1WGyJxtnc&, dpBAnY!]8O k!{^K===AnD&_ H$P6"Y{9@8. B!Wg>;4x&Zb؅Cv*  NA$S1W #.XB0#9^20gm%S2B%N1DS.mDڡ/q$H* R)9Vk5΁J5%j9[3i9Wxck4m8 @DAM[D a8 &N>;R tE|'3[, AdlAHOj{VyiGHةA?vh2]9_C{GhES(^ȕ*qDz*4KUg] ޯ5Rrx:l򤾗& m7ArD>}%yyՋ&%CGTG oVGYyEY5!ͽs`K)S(ҏ%-Bu8E`DDvf_j-ŭ%/kJOfvңRJiCu?4Wͳ1)|-I:a[v2F/yib>.ܚ_C/dq]Q z-:8Oĵޕsm64 hZy(QJ233333󮖙×Q/D,G*Y3nWWꩪ/9gϞ7x>۸:c2Ok48\|B8_{wq惻"{]ŗDwzއBOCH"v}Hy%Dw.Dg:, <!F )>qE\n[p!?(T},<:/f׬Y#磜/̘ MjՐ-<^aMCp^|7^z=Ci~wG[oT@ k2J'>@_ Q+nc믧*J 2:W^yBB1e]vw]mm%T#ШX?s ;Fl+y%6]h_vuǎ SՆy ?> Ds#~;GB"N즇'7!衇P8ĒR֘Q&;w7SXj\4`5֭SBr9K/E=y\sÞ+#UvϮK."pžf儍['k, -̈~<ush 84=!zŗ̸uƅCADDYQҟH9Zdz"9~ Iŋ]0!KpjT/.nZ2@D7E~Pxzl / \̮q.F)!s"&F 45>FQfO. T> _wuNGyƍyZ7>:/7Jٰ蝡۩ŚO.7A&!zG񈊆~_AݑG"8[󃗔:pxy`juCGm͛7ZG_+z]%ov驃2NI+䷲8A Ghj[ٜwβ',X4 Vएa~N$\jnln s2ZD"$]"qΙ{Aф|ꑶ^Z, |jŷb(d /%L%4B:.hH@A|$fURAj(8(8vIؚpH:!49?|KR7 ksr-/p.+#,voRF~$T=.^ 49FAC!誟ζvO6Wu=:Nsdcd]"O?ڠFImP2L RZO'䡊8zG̃ 0/K_JԔfE$%FFl Rax_',?|_]?IKK/e* JPACRoTpiAAa_n$Oګ4_J2kNU#n $=s"#?0 u#Rүr24uMaf$AMMd{hd,_-k w-/hQ3nK.|'97RTe/a^"bp "[xIv`|je+*IKA]F~6Mz}&`%B x ` AqB%՝(lx0H,hLTf[-k/f檉=njXa7-!7_Ǥp$^rSM% T hF}zGk&q35Nw2 >$JDB 2C^xizEFG~>J]"jN&'fKw2L]6pBCGLiP]k<SsSqPDgZ"I /ĉ'T"7جt\kV5cu0A"F1h> [i PR +Kߚx5P3LjTzoذ!(ͻO8̦di`Q2E]>9 ,Z̋\5Gg֬lpiX$qJCqq̳lxFc$ EƤ%7Z&)dH#㈠NHvwKHCNe aV_jSǹ`cΝ`S7#oxN2r,AjB>K&'#FDif͋h!n z<DOTHjhjXJ>!;ىk:kzE{om/}S֞T/B|Wn $ш0&DbRqKtQ_2qH#ڿ2h'8E*$,|Ɂ̟BBa$KFRbjJjd SoݟdTց8/sm4->t8nJ nNT+&I!.T(LJ:][ŸZI|ߋMbAI+Ō;9݆dY{}KfHlbLn,dv⛪2{/;̵/w{-{B YJUuRqt.~7:S sDXӅhGgXqD!+nq܄ B^B;?"/KH_$,5;R _~?ɍ:Pkfi7.j7\.; N!rBp $ [yu0[?NrqIۄıB |yjR#;@K;/6{O>񓊽:x3O}G< 6eZ~ƅʂ-WHIQ0X"|ER(>xwVtHC^K WDo,IcG]Bε)i'smzC{#UKIb@_~X_#u1*}ZcC$f)$DsrLb:[:BG` A NM ) 9"Gxp GBrM:YV' !}qipIIcϧkai'Y"0LdD7Bh.yAUE^n흸;e[{#Nx.d!+Gːw uNӁ0Mq&]u B&'! "" pW 쉮0*HIj+p)0Oin@Ě@iB{RjK ҁE?ؙ?Woz|j Ї'"?\|,~!J ҇CxMXfkҖ3j٩=uV!nDga$ӄ|M=V/i;5U(/ o$+ m4.EW'/!Bgq"m7 Ky/&Fr'ībBXRB'jo8B!*}KF/ POS"RUc~%ĥd^sy\ ɵbȓ+vb?IDH!pH]}rk|,倈"l≭33'fS3N+Hn o> BnCĺP|xz˥K35ܨD3%.uӁd_އ;J,wzK5z^fB))sx qivY׿F)ɪCacvBgq) @| 8BүBCbh _V(y)Vχ8nȆ8~8za"F>lY=ܲe˖-[^nٲe˖-ܲe˖-[v{e˖-[r˖-[l-[l-[lٲiqMXIENDB`docker-1.10.3/docs/userguide/storagedriver/images/btfs_pool.jpg000066400000000000000000001244031267010174400246130ustar00rootroot00000000000000JFIF//C      C L SEer⭏keNPഀr_y$5wёWL3e\{r9x`o/06>%=`̬G/x3Xή2P ǽHzQۀ9Oѯ6zXF;+sϾ:@e-(;2+\nB6Wf@m?= ِ?5m|xO=?+ ~kuIm|x[i 3!&];_@n羱+.#L_2>XevgM=,PK;>.OXm?=!e0hp(zni v/QY|[@m|xb-Gra\־>l̂?~Lj s[5ۂ31m?=!N:կq=Ӌ2P@?Y6MebU{'1 d1#5妑Ik+J5Ag߻65=sP֕cb<"ޞ~7-vkcN,Ag4Ơm5Y=#_I25m *^];/"܋8E[Y,Л׶:π|#\BoQSîvceylb)[掱eg5&f;6tj$eEWm@2!KbUl1Gm|hGP, u~A0ԭZ"Í\RJkEHmk{MΥv󩾽bKC>5ϊڑ8 ( p2+UBU9礏IJs@?םxtAНAsP& _vVb5NJMϘi}ɨ>u7յ<%y޽F큐 *UeY[0!ZqӨuZ Qjf?5~ϴ3֤`kb Q؋Tfvf'&+Zbp8L6'NN-?G+)Q\Lw/'9\_5d> d"#I6OXvm6wI$ҙte7vEOYVW;vL{n5d'v-hڹ@iթCB7idԽ2n_f][K0]:6/|#ةF@͇E%հ<ӫSj&o.42;!B F*5bE? 4N@CC'gW)bvZ9'_7oҷ.BP%v? ]v+qSiՁae c18x#29\c5g2ID|t>~8< ` 9@|T sUn;Ϡ+9f!.W[YQXad/r|qsXbm~|,TZyXaF:)5+?~XT• 2ą7zjuVbPN?2Ff.&|Oy|9] )`F8.qIÛeB:jueB:jueB:U|}K&gsT8+! hʄupC XsT85茲l6OT#?^=}gdgʄuǤ ;o`gyu:b-G>ųGQ kíCspu>@볂|3v^WP:2F0qKa5nU N.y]@변ʄuƽ,:!;7<Q7}gg'}Cf켮uѬ=Pt8ץ[0f7@}gg'}Cf켮ueB:{jpT_mjV0_JRz5d;;'Ak+m9UPϺNDMD~(?Fi0RȲ>u,6|fɒ'U_i=W6Y4>#c|,8aAqmki;(Vvu͆>cWUclrXtώl7m ]sZZrXtώl7mǩBGh8ioZni\=`Z>-Ud(ɰR2{(p`V$I&L˗&_}{$>~>1cË#F6^<9}{ ){9a> sejsJpa:= gt{)ȞƉ7͜S'2sJp3L2(o8_5 {t{)'@lr@?y}sJpɹi[a:5Π2یW-aO4 {yt ))=HSNȺu0sJp׾9_W9@ҜlrAcԅ?4_L4 {yt ))=HSNȺu0h{֊Mch͚ڏSr ޕp3 {<5~+4 zceT#}cvx |uU/oPz4Pw|հ ˸<ŇXx΢ު/VX|dɗ6\e1Ņ HcGGou{? ={&Ts&͛D\>񎁳s 7Dzcekkg|(,OclÜMg69`(Xx@ه960`؀\>񎁳s 7D |VuIlCA OKz]pgڤ1RS v|>aϏ$[k BK /GFTye͛>}܀&LsfϞD2df~||_~8  036!124@P"57#%CD`$A^=w.X|mEh"HDᝀŔKZ#lFF4 9mK ؛Ju3HL;tġj*ԩiڻ0Wm~!߸!k7'|ebB >(^+䯪]ɝT%yL~=kPۑOY."Hm݅o܋" jv̢7+Pw%v0噛YLe;Ɓ3;e~ۖ+5'IU I~w|(~6=i[GrpcQ ܂K'ͷvCFK>B^2Xeu_b}uV@u~/eA8*IS8&Eۮ̈{3<'>} [LW䥪3*$l*6yx.l>Sؗ]W);"UI5ՒOE5EJ vC'X 322Y'/SbE}G7:o # B9SV8WEw+[*fFΈ=0ϴq?G} /GCq<3;e~G p-RÈH劅H vƏǼ~;kXރEUA7 ~T.YETmѠ4~2x󡗿H ^Hd2hبkʆI.$$Ba's:VcoD=QD^:uzҽq<7.(B }j#)"&A۾!#?u5y/*ɨ$} /:k^9rqUEWrz{[fVZ6ֵ~d'0t2:XJPkg*Z[GJ[I<0yq w)$dTˆ>]HioMT|K1E/UJ} /VuA Y(k=x)gq "j =x)\:7Slժ)O^z{)ֽ\[ :|G~Ǖ^^2x1:vLmbn[F@7ݖc͉=E,a-Am!sSC$OXEth$Ubܙ'k5{>ɂ6:H#yBtTȽ8bUY*'YhoהW]kڙѩ6Cxm?~[VQzA} /N>19nأE VIJ y'\hBKd  ЃPA .S#z++b*#iVƊ~V}t iul$3oSˏ ~ {!ibrJ]۠qEq¡m SO"*I*T$f ꥜H|\>ΥcJ(xYz(,_o(ӽC}MኯH{k/' Ng_}@};-PǛ{uH>%?bYP/lY?Aqdd=>89BtTȾQ_nwծOjgQzA}QeHq;B^֣H[EY""]qysr7i- EPȺ,C'0p7 Fip ,f?Ⱥ`ʨ2fղ'l|Gp_uo^PqQL9@Cᔌpj8KKs^Kޮܫ%1%Ը~P9ʡܗdg=.A2E[D80"nTyPFr^S̃={_zuM*@33+9\unK,ÒT} c&뭲޽Z!v\,\M)`Ng[kwn}ϵc)1xk־ 19Ej}ϵrp*lExy [kw?*+ں x [}ϵ{>g[kwn}qj/-!+'PT%Zr18u6Dv}lk`Ev*)#5vM=lk`[s^]VPc}lk`R);Y5ܵElk`[>}lk`[IE١ń_9)O<^,g:.3aTM9E)Ud8͂ǃ a4efXՠs{ JƜ2D^TrLi#x)*0Uӂo&q(q8oMȩQRβ>R23,2o"Y gvJh*J7DW&d\ M-.gL^V8S@CT[,QKN 2-\2+XZ/ I?Ѩk'TV;F _-0OwvyjS k'9xӡ ݍ3JM9i4 5Ǥm29XSNֶ~id}*7 E2c@e #!0ݯcp>{%++tz}HfRI cܞHx`gb/BND\gpOhq #"#lU3, j''dG6k'ERj97sCeY0wh<"3[ D,#r(y6%N4c!EfеNsFhv@)êE> _ul\9dA]Ցx:C$c$. 0ʕ}XdTwis1,C`9=1yXN  $#⸰!1۸n :i_p O#R5. $(q$~m-FK(#b)&!%L ,~@ñ1kL9Nm isϱ|C#2 INRU< <>Fcy$Zof@:z͍QKI',&! AZ /uoZ5"n2!x^Q,3ky g9N|oIb >?}|@P9S 9ۭ%+ +tz}HfS8'-O.VScl. WمifYyIXEäL$7!@qshg!v9-eC^i!KdkcZ’`%d9(RI`00P1YKw)G&n ' +@lC#QrxNq/q|4)+xH'k1 `ɲ,ҲyRPxFdJdb_tUo=!+q?RLS>㞞-!I0u^z\zի TqX<%j 1l6kݫԌD@~>zmͰAޣ+/JuDӧJ/kY`ZC;Q&H;IOOɧM~& [_C(O d( *-yCz>DU)|TXjӬV7DC|őP }g~> }\z~)vou|{?t_@H&;|{q;Zޭ۷Ǻݾ=,:I %"ou|{J‡f νfL釠Tn`UAڎ]Ug'L4?JLC Hn%# {qt/uC_sOv?J aVtt%"4I'hlJ!He m~^ia a߱_hȇ4)DBOEt駻EsH_ b.sMg?eޕD@58J?p*8EQu*ߙ=wr:Z:!s2iQ{W?ϛ $ T]:Cń. /1,@kfw!D;R+x-nrV_]/v!D;M]Hf_}oCvg.c uHS+)u:+(ÑIj/2ZGlU=OO$7p ֋F[`SCJ$[ A6=#1uH>74Ă ޫ 1:c,셉-Gc"T\ bM&¢oY2jB #ؔȋ;o )AY/fȃ2z(IƲկU!YO!&b>"?y cJR]mս6(Oh M'X;zϲu'78rr/2j!O&,nMJwPCBL'~yU1iFь~\\$ٹ]xV=+4i:U,Nd%)' 2Ē6{8t埝M&!+yq4ɦ,< De ,}6bC\X?r w L| /5ۭܞIWE\9S )?D??Rfϯd뢼 |4&^Pڇ;gM5N~٩ ~4zvD%}@f} $@SI)RXh֗'}dy<+'=f_MOIPUVkfpV=~5@ڨu׸jN O((]>TdΟ*"Om] !1 "AQTaq#02Ur$@BPRs3CSVb%45D`cE ?d c5\c"Q{eCVT? eCVT? eCVT? eCVT? eCVT? eCVT? eCVT? eCVT? eCVT? eCVT? eCVT? eCVT? eCVT? eCVT? eCVT? eCV4UMqZac>]n\֭\ᎂA+dtJGA+dtJGA+dtJGA+dtJGA+dtJGA+dtJGA+dtJGA+dtӯ; *p: +Ga:.'\(z9Tu -70[}ONOʳ~Z-c.f ;ꚑV"dFC?pw:sf]AnU#jh.QţY+&1U@JUN Ir%YŎ2^hEq.*ByŽhֵewq[EA"qTEYh 4%Y8 nET3CM#W & {Ɗ&YFbnF)t%yX38vdT,| TϻuVUqtGQI␄w]u-= 1}ȕhAaSFzHZbn{ P;ul|7xwvXh W*wi<ۊP ݺR+drT5MfWՍ!$Hr"*7q*b4ܺI^RZDEp5j)\ZqWFxXp}t|?Кh)$Ugg\) Sipˡ*A'p\Φg.70v%z!d jva{t>Ju]c%2Kps`ikJz)[5`^w7^gWGE&}Ym2%Nu*:=KB:bkh4`&]ɡ.Vk" G.F.c4ĘoJfesI{#!NuՀ3r[e^V`%mQ-k1&Վ `2ĝ~qmx1kZm͸zjM޳lxwvr {jxTmU.Uhդ8nZm&V Zn7q6>EV 0y5V.uVe0$YWZNbUhZѷSU-h9>5V ۘ])S ZR%O2hg߆8#\UlBUN‡N*qVMG.*H.u4-Y Lg2#L o V+$s֚,T ;dꚪ8#9kJ%EY¢hJ/VK 3=Oh5-Yc̄R; /;֘#k2(KG UѯV{RE7B-2\)V[(ir,NP)8{Q|lxw~CXyElOͱa͸I EDuKv#_ tq"\$Ay.Z=D*s5sDUzh4URnJ̕-7qa!]}{c^Ye\͑HRTfPcܧqfWcuؽlxwz8@ wwۜE(Uب[t+zHt/%z.G% UQ6W4v4jDf9Kw'6wѴ8jVs4^C@w_NKw_Mwk<çjӘ HRPcot`7[:]cZQQ VCda[0"= &"ev=egd55,[.Q,$;{'0F͚H"TFjKO-yKݩ\5*x?G\4;Ū6‡Nƴ^-5hp\GfZff}90U42V}P -H +U2Eiu+l|xza3oݶdLR" yEס2opKXG~쵅luoÍi{ԀG\L0Tf =Βrҍ$D;R߫2.&^XqEz%^lxwvC~irM5gY㌖澅q~nY\GϤ Y\Gρ< ŔCg@38MKx\\ es¨W>*es«6Pd}JҿZR 6CTv]es¨W>*es¨W>*`@ %Qɰze3:2oգWLɯݙ詙5=3&vgdT읾їӯKѵJ_3OXm\f{'~Pg+}dw6g$K&mOez2JɋS^dũ R錜Tc,ףaLKG{bZbŒ`2JɋS^dũ Ri$zE!šTw\ז3nzbWY1j}+Ԭ>VNZh”Q^h])b~m˂k-y#agt;zSJĕ#߮9P)S B#v\=(\k8{v]aX+~X8 \Ju؟cüòZK^H՚49<::v oSz9j2 .&QZagt;zVn}匳 ɽVtaVNhRc^+~һT璵%pHT|d$Xbz믒c˦̸Ԯ"w-Yؙb@ƗW~;u>Ju[.jAY=%c87 RfKyHP_p~߻ 6__XCXyE_S-y XWqՂcbCjq.e/>;ĕd у4wޗwˡUz<]π#D9^=UxQqx3{1.chڑ][1[ĿwomKKZoՖ'&qH ^w=oX60}Sv6 n?kJ${l+Ӎ#>'/Y<7;. ^%ԵVõNy+_OW~Cx[CXyE_ap?[1[ĿwaOĵSvc: <":ҷ.U0ȭ܃>+C+1QVe6@xsf2Yj7`FRe5iN[-=,fVe˕ĺV_dY&~ϭY3}jɟVICjՇ%̨ZLZk's9ŨُJ*U&&n{+jɟVLZgՒ$ˤ lZ VO` Z*V[obm-WՓ?mg֬k>dY .fWBVsqt՟{.ޔRUk&~ϭY3}jɟVLZgՓ?mg֬k>dY'Q8cWx@q6%/f䰝EKx~r;jk&O1wի&O1wի&O1wի&O1wի&O1wլ=I"q$HH um5֬i( $}Zd}Zd}Zd}Zd}Zd .f΍SR*R."5jɓ]jɓ]jɓ]jȓ]jQ:ě]c߷"nv5d.d.d.d.d.d.d.d.d.d.d.a%Hn'ʹ a_yuI OYxj~~"rFKZl+ImeX7 wv ! jHng%Xr{ޭYeZ+:j'i3lY߫6QrjQ{n-LZtq& {4'9I,4"ZݐFVTݺrlAeMk+ܝ.{1#r;{oll*P' KBwPrC9? Ȯ;rjS:bZ7)55 YK?6eԼd0eڼR;nء2:m8p}1TEtp8Ӡ V͑gedbQϴȲ-!mjfJZQX!);dvt bU)dŒ*ܞbDzʎyNٖڌHޒxunT$BIzU$d;{VYY6zNZ;ګd};=X-M!nQ^֘nK|Ld֓nլ܆\E{6_Vh&A}[ПݻyMɎKĕ` ^S C5ɰ[ǭx7VS)d䩸Sh\"l:hlPOqx?8%Mͅ4RղorOe橸_kD:Jf䶻5TZ=g"WY]#7An|`DL VSMsi5]i*"KN5߿rD`V6\-wl`i[ rUۨө).a$]VMn2v;c$kU6s4T$$+z.i3lYGzo`7* 6uGO2h@ h!xw(,`Ҿa?e}UCh|*$E3N+1!&*dLc" HeǞ+}"s*iZ +v%U]"JyBT`ݫ%A*Ium캗/$%iYɹ&l%ZΉv\mwtl"8iBNȳfsըgmm_- 7cǴ\4 Rmgwo&`y0Z}q][? NZ2JɄC;6oj?jKܮ5TZ=[gWCVG6b`[IlkB,ܠP}~rWZM˩{o0xn~.gٶ C^fJvVCH{nOJ102jM8oRԽVH&%0sɐcyU_4ǖC|M9mmV'qծ~ҜRmrr +Ҿa?e}UǴ}T4&H /Ea!7YA"k鹓&:Eΐ s0$+VRg0՛0u۫m!v"rgFLYy;ѩ\_7)VDZVmD5ch.է\_h%Kv]ٻNvde5_8A +ƍhӪ3uYwuKNWrOtUD*ntl5je֔ZjuZ% erSgv%iMg<ۮ6V"/5e*Δ%[@$h룥8zrڱIY2.%ZWz$Cwi!TYÅ_0Zb?փl+&elJN\kvV:pM^66iW⧵18RZo3aCw Vk짱01wq{c0Zf;yמqnD UG ='q0&_]VHTH^jK!y#:O^<ߒ欟;6klկn^0ih˱ UQhSrH% "Jb՝Q4V;h~f@NM:j-JaoifP}rZ/{:1-D ^:umCLH_4'.!Hi۰\I3 i#0*.'Pc//MY|U= k2"uv]R ?ʲEL" ‰ZELVMxVY)_=Ju;V= YKy%׹tV%|5vyIuϓ`]وVM6?+!OLGFm&<Օ2>cm>]Ԃ_=%JLBqW_!8 ޴HR Ψ"4Uӎ:mL O}v MkAEkE5j\.՜-bM&6o:C6~;< [%=cgS~;ue<ʲCHΞpӱEf/uGe. ٙ͜<7.VIC 䘑_Ru.׹VV~vH9k%sŪ5Ds0PqohJX}ٍHW oUmEuIB[\lpor4sI KʴVF }QV瞰0?(5IFLCR E4\ &8* _Y6kt,']w|Zhjo~qIŬ`Vq y:?n6Jɇ@1x>R9 q!J˹V3f.w= ʭV$4xaUTYGmƔꞄREM]WCE$t_NY6.˗2E1SէZ7Ԙ]Ʌ#&*liΤMA|㟺$[BMu-OHx, wp<=)a0n9Z2T'$m*ը˯ȵsutkߩ`Nm\lt'_Z`AVnJ"VO>z%VS'#1K ⬜lRsƵg49"VO"||;ʭy˄Ք)^9XFdd/r/Y_Y/'vRrsVKӕhCI154lB8fǎBIJ_0_gXr)+&esCVKﲩY0žzHԘ<^[ӹ*ʔ(^J$5fL3Ni!?ҬƿYjgU7EXs`U7EY >jkM'+!1A Qaq0@P`?!KNK&B~+¾¾¾¾¾¾¾¾¾¾¾¾¾¾¾¾¾¾¾¾¾¾¾¾¾¾¾¾¾¾¾>fE`@%QEQEQEQEQEQEQEQQKځ )yhC ֤Z1@'A-ڨC(]2PC(M'nZ:CaQTf.D3PgLLR9Aʳ8V[%X$ 2(΄p҆Z3PC-I?QH+(eӑQC>lMxRFt spYV&i̷ԯp}pLDkP@OJou~Nh#g*`eF$_ڣEQE^bAFD&H biwNw;f'Z؝&~u̠B̈́e44" P=- ٷ !pةް ]@05yt6= ;ս;g|T03 I iO=RU!KDEb:KЯil`*=?Nt^a5&.jV5JOWV:^|ȋz<Nq1KsI,vÒqzT*e52Oi+7ʧ"& @\QeRWv@@֤-3*Da'uCὍz` gKxyB(Q{4h1 PтPn67 rMQpZ`@(i) h5,['BBf^n?a fR@&A2aD裸g-y|Uq"]yi\y@-=*B[zќMmSz)[KTEIOj*Eӹ >K3kѤX*a&$-C=;οE%,p#>9bmC9"@rbYNFWK d؄8.7}jO2۞7( 9l3+mfYA䁿*T%$S*TAk,[(4Mv)Rb2/f-ҸBGrm{&ЭԌfTxPjc6g[^7 o$y3Z1G1;w8b9?#kZ4Kt ZJH|? H*AS4b'PGIֹ/ #Fii6NWL ع LvMu)#\vheKi +Q'YyE)qfαM4MN~>E&GKOdGE#S{ڜߐrMF4D fQq@7M@X4k}ZP&:BFjҀ艹/-.)}CF4hѣFIP` q/.ppMXX,F#Rˇ g$e b3}Rwڵׅ-I[?zDhvkQXOױ 85ӆh\D#蓠,Gˌ5..$ 3,ԛsY#Ң'(xlDkV%is6#+!uҵpy2߇ɬրզO0w˜ jvRZՏ6bLZGBIXI<"f p$n.1}Bu6]cނce$8/()Pb4<1G7q\Uw6#ڦJ!cN?OP<2K !`fSt ?b_xujQ~"@gW}*f>㏩.i=s~sO V0mY{Jw w*MQ+ћUbe*PNíd7PAB.ԽozʹjLʐZmʖd4ԼGEAcMYҽSO>ZQP[cN]O2>#VTQc ZÌ6n}J40TGjZ15/~-SbJvDh <%8?!PѐAϏ;5*T}D4-NT|v5+qދQ.ިa,+{M>mKoD=bA{?C}WHA}Ap=PM0(d򦂴*bբ9otc"fةfYa^?zA=P;V+*2j&԰-Mr0~ak7l`(ҷ/!zt@qk<vYʼ^^^NN5 u$-sp%RR؄ɑ @J~ݴұ_qōCi6-&x"GU{*?t#N~Ozޒ@jr->l~8;X+`*#(=1-O,piO[9#ΰONd\^tf0j"6JkF[0Ո8Ό44⻓cfcD?f!4{Ш$LiV .2 V_Щ'_5?T~O VSL{(; -/6RTp# 1o5Tkڢà`-NJ`Z=b&ɭ%"E}Ң92 VEoj :X"1Bh< ΂Z6O_B8QZ{SYkQ.;sJAjaSP+tط8ߕ]SCW: a4z_4u uR]{wZv?T}EڋWXҤkSX+ڦ"zhq*j'T6=;!VlL薙=Rф=|W|N?A2 YFD?PnC\ rYǾ9N/7/%O@? p.U`g/>U:X@+!1AQaq 0@P`?+%u,o?S&L2dɓ&L2dɓ&L2dɓ&L2dL]J[jڰgO         $Tyc "X!?מ Ӡ;F=,%e@SsDw)#V* y]Bީ躷e (NEQZ!G20` dB@_uBQ> iY dY(' HT{E@Ud1vpa[A[_'LG)z:uӤfdQuZr jB2!^F|HfAW#A[j%#Y(ͥw> 0~A]8l}bshn(f"U]Ր]?*<7 Fꢤ!_"n5LpcYnRCl)tb欁fx'`܂JG c+$ 9U &(~Vt+8+!/(c>24Ղ0?Q`|qN]юSI"2BA/: =6X† ?L̰UYrB9]"gD~¨acGentϖ\5{& ;K*2<_T0z\la;G#\:|u¤l'0gekQˤCWCMǪ{u:3(2*g6FiHD اUV}2mJ̈ QG:0;/h ޏ= 3OF,O1gPJ-$"AZ\9rZBAJ'B06-&$X.&Yƥ(d )Yap!,Bz/j2 ;)x{V(^C̑5W8xy`5ī B̻hLd*F*)$@O{D,NcHIv~3$UNg.@| #x)AaH诒34QyA߳>|yK\펣Ԅr;-KPnS.N1hL6&a*F+Sz&<}Gdn}Y@z׽cKkc'qAfe.$4~z"!R+R}=iphȖ,Xyzun'B<.Jŋ#:~Q?HQȱbT`^Ș`)ϥepw+6c"R) 8GAdɽzѾApLčG#ׯA kj%Oe"HmO4 1(Ҫ&Dz~c>FǨKy%_f"(^`{ƾT& BY`j {Dbi-gѱ 7+ Q)s{bDHKpNgX~:V{r24JX$3EYF3I`HGruvq<j[IY h?ׯEⲾ[֡FDO.Y$K48f%'ҞR`{*`䅲hձe3*9}&]4 1> ng(RS8mP[-/C3n o#D $0ZF X~qb B w=  5'Lf')yXiMTqL"_* mHq$X_ D} hajCQ %q &808[ 4v R$M,ٓ9Ehdɞ2S2u(tYAEy$n9 :ɘisU2WPG#Cp d F:` !PtT۲Ծ]Z``^H' h'AD8FQ"V*_K"_MWa[cb UWq+8hrMGfU"G]",]cq`M @&jѐFh&݅ӍK =Q@DY2qAɅryr$p6tKKs[\RdK8:]T 뻮 ځ=l F`5MҚV NYЀ(Dr8hAЄT`CT?O-%b}13N%8 U" '\HM`,fP,MDX=4cwD2vcANQ ?+E!pX`.sv24!29wF&͒le%Pw bMq UnYb7 ?^zb(*᭺ k)&Л.g4M$dCX`BI'd#&0 ՙ9KpF8L8L$QK0hVf:@2c6sLwsA<*.t!?g6pyhzO!x( v+)9 ARH`@mSf:%zvMQ&q^P$[ƀ0`&#}y/R+2Ӈe(_i kJx3b2Ű*했x(QC\FpN$2ÿ0Ju h( .9%tc71@"~9nSyT ҷInHxX,5x!!QYhкl[, o&P ^`8IB CQ5V!s[X 8h>+X.1Xa`v+a?xGp9. \S]W(HG5Q$Bb"n-+0-jXa!=IJ!蜭,ۂV^Q(ɥ.Q*4d V4][ j+R!t&Qs;\gHn@M(=]ayoۊ&Vo/ 1:` k,2U@q_V7ˆz&°~.} >pB4}>C S@pg#Fl5H: 4eۿ! k5cWS QJ,-]T zJEm d!jS/Qc%cޘ7@ oB#0| B MA[%7xFZ7)ڐ?ndG AKښ6zTs<+;Adk4XXG(Ǚ/{;q9r r !밖6HJje؈LD_ U"^#Ms%F)RܸL$-cz+PM`d Fsߏ> Կ!!hSJ+Ź?1qNBtsb!0R]"h(BG3jЪ*EQEPĻV&4CR)Gyb9UVLPB2$rV[N8ޟ#+z ):{iKMj PT~JgYNHeq#&dQ')h+ Z¢|復>;g,CdxJFbQ!F)߆IԦam3W:ú3= VC ~/2rh%0+Up01M`|m 5ʞs"P;/S=!Jكޡph4%GS7& BB8QPByOγPd80r!7 U: hN|*XT=J!NȐnx{Dg@ r&2 _$A A!N }G?4~('0=xyQ,/`n2skG%;LTJyXpU3@̉.x=*)Z83UkgKʛʡE%ArFD}/@m#9ݦWp: \GM0C5 !1502634@PA"C`#%Bq&d^%Mw~A$(/C!B[s yɢY$Wmt?!i903Bˌ%KQv`yg%C< }Pg u5X(<83378. ɖQdrsD@ie9*|6C}\ygskЍ#g^R )eȴ tRb> @tg rJ1a dGK3*V:(ǰÔ2ARW'&b,""GR^rrc ($glygskЍ#0t'-5aˇ+E43dȂW #89AR RWS22)5([mm.ͯB7 Kl&9J~DŽѩםeӲIߗÏ=iX%q᛺[ \g"bɆbH7,AIVJrסGkM\.鈊RzD*YK;84!Hq߫wYƘ~ֳmSt3f!ǂ?ՐծZㆫZסG`2F;O - Wut@":xE9WWYz'~_?Xuwe.WZyAGH2xDWZwK-WZikjAGY7-d!YKV+R,,IJIZ/qԸj\rcθDNJ+R㕩qԸj\ruA,Y'~_?X'tO}%S ;^oigÎ|o^p ާDW gd;q?["{ėy/n_ؾY#{HS=s of="c; ~IߗÏ8'-C$|,3u8ŧגV8סG%W\~k5~K%2%"ՆYU} /,OU['IwYf)qͯB73 >J7zjoS"+3|8 ,+fPMo(Q5hܕ-@=ݸ{&Mo(Q5kyD[&Mo(TCi-!֔W;mSd^ԎZsژyS lg!WT⓪lBba7[ /g7!]Fí[  :NK_ok=GL&2!t5 !6!Z7x+f)|8|4O!|1oomK{UMiV7QS((9i{^KO}70oM3 xbFVr1}kؑAG1nJ-ӧN|NP˘kɚ"$ {ѳ^9|{om~ tOz}/JUx܆Pxm[a<>@XIesb'q%vCRLwn~N FmEeG7R2gg卽G;mB׽͛"?k!_,O77o-s7ctDKjϊm{}cIpRSHF9bNc1b `y<9RŌhs0 XcƆ3ǰO>7r lq "Vifx|xf_9+uܣjV2}kr92dMU<$;16G[}~]}b$%hkڌ!nͣޜBq9NdrӖA+r|}]ScʫN+ J:9],X7Cyo^8{׌yX<|jWxW=xW5̉jA[y+ O[|M+)OEe1Y_2&mY|ʧ^A;Kwޢs]J]MOv=n1n9U O0ex_Ux׌E߯ԫ;!1Q 0Aa@Rq"2P#B`3 ?mJeG*9QʎTrG*9QʎTrG*9QʎTrG*9QʎTr*PnP5 CP5 CP5 CP5 CP5 CP5 CP5 CP5 BLm?B,6j-b5:5l?e?GvSFoQ(`0 ?ɚF գMG*TJ9) qSB(2eml!PCk2Բn6{CZS-dF m4`^}+ ;DJuT\*B>ߵȊ-cGT 49-޿ZH"4wQ!BmW 1QQˈvZs1pGv!5*9 ;n>i"9Dsf)[mQ5h袗 [qS$c_e r4]}ӍjF U(AުpS5:. PnJ\ 8\7&P( zYA2إ\ gn XPnJXQru#d3 )K k}˙`RbJs. ֻjSasm^ɾ4]J Dj];Tsg˳՜+T%_SE0-d|8#Puj2%uSBaqM_)eż?CumKPIue^&-G.hum\mCc\l}Cp#f vZ'Tks-0VmTí}ƻ)}n5ؚ 7~ T˓(曊.5j2kF_&HK&6+pUQC H)QĩQĩQĩQĩp-|%H%*8*8*8*8nRRRRM{ȮE7zo7QQ;7"r*7"r*.;'7QQn7lM0yG ;^ZZ;,_m+zxlM0yG ;^ZZ;,_m+zxlM0yG ;JVz_cQGCoDWS5atr !ieug򷧀7#<TئkhD3Sg.#eW=7ҪpO5fv᥋{x(d@?'pOOQdE=OdFC=N{'h PP=dG#?F=C y܇Co#?O{"Z֢upO{'=ɕ&e?)ޅdG#?O{'=#S{JE0SL0@lSՉ7(L0SL4w `) vQͽL0SL0SL0Sq[T#K X <(LrӬjS aUgj1M&=BVUbE;S<7-9Qʘ*d1*%dT R@<?EeTPQJmT LCPA2?k ~vl.{#NO ;[_P{wd+7oTה("  ppih-j :*]@< MB\(a:>QyGGzCj(( օޢ@Mc}|Ǡ^q:ULw(P3t|򏾏 :_ʯЍ, B:>QyGGzC6N *MIRnJwTK #NjMIRnJwTG_7}%I*MIRnJ[P1x~A,FCE:8NlJtf ( .ƿE7RPg}"Ʋ CZ}BOuMw1֜sQIĴ/Pq)7a|ْOU(P(.5ޡ`R*-PCޢ2JD S>9`,C(GS!L0`E "(寥*KJbI*"((QT`0S 2zA*]Ka8(`NJ?9=D*!Q xxD*"r?3 15!024@36PC"#AB`ʽŖR0=cuDf#7QFn3uDf#7QFn3uDf#7QFn3uDf#7QF7UK.휑7ٞڠ7+&y(pQG7n <y(pQG7n <y(pQG7n <y(pQG72G}9J?>NBǾԘ!zX*+J%G ;Uo&sagSTsXVLn6#YMM+$[r|cN9 ̣5}@qi#HdVZtmGCz3N5W-D!!C[lTQ:Tݺe1l+:zpaquUw;Xb O0לTjehKq` X+?L垉=39WtkȌODW* g@IG{&B>A-$P _+ Ac#dLUW iƕOz uRgi. %>.(衱.+XM>^sSv(d>`$֭#N5J(h z[XY.,԰10,XqY`Ɓ=~cwM CL5UV*C H`.ƗQCX72s!=GC] |MPʙ JgOt6pɸaI}SBAa9^'?鑙 xxhI%"Ex>fNT3uYDlR9R,YD Eh Bޥqr}-GI[9fZ5imlD{M=VCg \=-e,z޹7gHx1 轖kjWZ_cpq$I_wKt}?04I tT% CZ޹7PW`RyϹh`Fxi- fLbFŚң{dmLЁ˜? 7rn(NWiL\b C7#%;iL\bdbUPs^ɴͦ.m1si"PPyj-5p `ۺ6ͻ3n &gV& umAwPclt.NumAwPf,R ;ƛ 7rn!^ki +@2ę xon/cgp'o}؜6]doOt6pɸqyD/f-DOlŏUpѿsbrOpu={&aҿ /Ik9%oOֿG=V~*' FG͉)?eKtJR%|KtCg \oZtBJ/ q&iDz[XY:7 og6',>7 Y/ :I"5o ofdɻFU_6{[ l(N >A"7a>y l,)rn1$sZ`P&n3vO~h%N)և{Er)H+X8x\ 6P6e CP OMKՖu2>&F`dxPζci%$MB-IRL"ś)stRr ʏDW*"S98bJ6m˼ۗy.n]ܻ͹wjl i\=fܻ͹wr6D6IKӊKNg״LGBvo%u)][^vGrTWzj^dd\VնU0vͩDG+v[fJl,VY5u*)5qw ^9] |Cɫj&w6j'sn~6gPK{QyF`GTCj2MWRۓW/ն{_}}'7KqF̼>8di+5;]v~ QȏƖTi>[YGKì~OGߌgXEo`ui"Y]'b!KC9cU.:;Z|/k Ip޺=j^c~Gzd:P.kaÆ:PŭtȰ{;GڿQCNi} uo]fi/a?29fku9C q́ T)fy%"wK7Tta_>i>[YGKì~Odd xU]u@@rU]ugEH$R6y6y6%2/ny=`)]^eMeMeMeMqdiI *l*l*l*le`O3,fHEosShl\{1^$#׽{-5łrY(`G I霊 C;Ug1j6cUY#>J8غ"IWL`kW SFK KH59p0Y$"`*%fe)N Ih-tw%Y%*"&T ]4jR ?蟀 u4DsJU BOInrP_E%%k3XvI6N=@CZ\MT!FO b\3'r,΅ Fe*Xw>f#UGV"`{y$s;{{~Q+~zDt$^dPC($>o;C&?KS?$7i'hm7|Ki/[jn[ZVPL&w$grFw3 3|HZ1y g/$w$w$v&v7L;tn#7Iշ3Fϓ(hhGF=¼y#gp.wp.w#gr>wP'/$9!1Q 0Aa"@Rqс2PB`#3 ?4 P((((((((((((((((S^R)E"HR)E"HR)E"HR)E"JQnX.`vMz+.fZo4> XtH0LP(on<ukqVzOʃ hHCp($;f5QP63:E[6c)Ksؾj /YSҴW<|)*tUc#.ߏ;@ʗHTtUx\~_PCD;|i f OK cjn&FmFo''ZOei>{IV'ZOdԽZOei>}O4㰯^\‹Qs >0\‹QavGq\‹Qs^.َS5sY^=z5=;2_ =QW5lg[#ӳ/ίَS5sY^iu3⴩rXl3g[#ӳ/ίOs\ͼW=|&cyGM\gWAaz:lN̿:c YGuܬ0BYtJ=愈VQ~=Ք{]Ud鴮^o u1Fi@2i)}{a5|jS>Sx|43LuOiaa$ڄa3L9a3LBHÈ$r^3L9aҔ4 S9a3LaKBL9a3L9a3MNࢪ :+^xJ Hqqp@T@T@U\pPPPPPPP =ՠ)Ԉ¡*7H[VjQ87%NO+CBqwfqn0* F.8&@)UZ{+@RQ)JSf?:YȦOcJ Χx|4ǂ 55E.M-.)i)E1V@a'U@H\U?_(W1SUEUڔ :)Gjz1jGR꣢ޤyFgƯu+z\όu:[FgƯu+z\ώ=`ZѼ+[~m ߸#S#%kopVZ pz hvm ߸+[~kYU< :GM*6SC@wY[4!8^@ ?*vzi̫S4( kn$璔9ξ쳼)tN VfT}k݃Iwt?Q8 { tBơRQ: 'D V$U5S.iCRjxzTޯ#DEOoVhѩ RwrBjo / sî5zTmآVrE( t*294JB'jg*TDQiV"P7LLLQj% Pg*j!JN)BD=&(aFCD=aFa15docker-1.10.3/docs/userguide/storagedriver/images/btfs_snapshots.jpg000066400000000000000000000466761267010174400257030ustar00rootroot00000000000000JFIF//C      C    j#aL@aS3 baDrKP U&t2G ԜIJEKr?\ ; "u :}NA0n` oC߫=:4/p"u^VI6]bUT)&U)`r:P:N|Ʊس<Cd`7D7!©nwPʩj0o.ai[a6IEQQmW&_` ͒ߝd uK6]7ZdϞdH69mϝ3>ts@{~ր[s1sɗ29m ގ̀9m;H9mFr<ΨQZ"|y5S<fsy#D2?`9l̿th~Rٶ6r-*WܛQxe?A@ s* J\I&e3slFߨ|3iA[*9IurcaeCK4_&KO.8)T)\o|Dx |P_Jy:;>;72ͼm|*؞>9J_i̪ukz5;QRUREbx6ޙp| @%XQk60X8.q6Uxj2ݎq =Sn4WceNN(oqon9 E@/u1 `)#:@$% !ʀ:mMƇ$P;μr˷.k:n+[Nf>e:ͅMGŪ-`l3pj<Tp$:w@_vvLL;r5o_r!QEz YؔݳU^טe9~ETLS]5D+ XέUHHХ(% Bgif0Սu2^:aVN#?[j.^GYۀuCwjusn[)T:k óPDTS2:+CABHD?f#m=<G&:o\ǻ6{+3CrcZ˛g%[ :6iUQ]3VvJp9!(IBHOj % 6 @ 0267P!81CD"%&4`ڟa@dd5 Mtk]@&5 Mtk]@&5 Mtk]@&uGZ-yr HlYbG)\fPD XCiO< %Y@>-2,a0,fF$|LQN!*0^ۋ.`e bkdMG.IOs-If2D,9A'sH6RIia]}C܄￧A8e;{Ҥ.3 &V~{YBDX ^^LYhW\&g`jLq%HxkWtKT^uR۴|F ,lj?V2q'3Le__Ț_ɝlҋ;,v[t,HRv*v[Ґُܙ-U-߀"k &uJ/[L&xE5`,]GR!bS,5 گ"k %EWj*[+8o X{^|E}}\3McaA1yQ/yo*vƷ]TBsWgsAA]?$co.p3>"g%)no9WOgSvUt,+tgy\ ;yK#J\ld!7Y98-1Op#y<?yVGG&g'zD=wqb^H[?,K=/_ȚǮLZ I~i\e‰ 52)嵴w b?'L7M_I'7/"OriuT5[k$Y"[l0$:ȌuƢ]=i("fYvo]OK [TPH)6K6‡pB[3gF? b~QBqb$\Vw&Q9% {>czOIak~\Jp!3M-ԙ:vt/K/aA"Rl:X}4n@YDldlAʎ$P68j)xHyO&aR4N`f !q#mW[-zLEyms0K-kUٔdٍ99٠{{, :~ (6n9anͽlm; 10q[.ӄbHl{mof% 2Xffd6dGc]ٸfFq"&˴3Lv]# hg- DZq - ˙TYD̆lv@?fgAl g?eeuM@7OY]S~F? VWTߤkэnr>Ћ&.b>|mU=+{XQQ 7 ^ ="~? z1=&p!.U< :k(ՙTNDm"k=qOY]S~[!1W[sND="~? IJRo{v6zD7}[{2CQ1F-s"dfJx4ԁړAQJʣZ/lw"A#cYx/dO0TMk ƀxòNk1 6)n4N 7"Tyƛ&`Ӆ1nW_^ʫ Q(Ԩ̉6/޻!O'쬮HTVPZ`?'VWTB{vm1ͲpmX?(`Fbm%))45 4ӏJŎ `)Q1\FANHˬ&w+,uڪ2rDzkPvfNLjEoۓg8PcAדWpק݌mkn$GܬwaSTቆ7!ḲTTR[~lb Jga#DJ챩8f8`Uhyij=qYa&gݼ)~ч/or!1Tc g]5Ƕ|:Vmm0qM^ U'+,tuω:цaJya.s{4WhYy;SVxﺯ} LPzorGi-Rm0/\@i\3dQu)gn>L7 $e:G£xbGGt%ǷC/2[wy 0[pr@b:}E$Ԯ7̂P( YDz_edi1Xs n0.|D9 4s8DOtRva,)՞*RpV”?tr|YRIBr|XSm^Pc¬!)ў)M>,_O|_9LJ)BQaӞ)MLN'N'N'N'N'MN'N'N'N'N'N'N'N'N'N'N'M'bq52<5#^uE*zr J4X^RBq[Nqښ ZC.ܣkӫ4TdBQHҌYQ WfCd}\ul5bWNUDlɸe/d}^S:8ړ:c9m}^fT#3ݧq?b;3"j0yӄjP;j5wS4*cۜjK%27ƭ #M~QI(צ?(ꪼŢ5znb5l%6XZFʜۿ<͑z5v3|juHFI#W算f-_F)f#V[JrnwǙ5yX(iYTuDIosG'%4}&JMLn=RBmdKUPi9TLiUa,OԿf2wUJ2)Hty*R~ LaVHWm?%jg٦epI% rzRFhW0SLC ivݱK3vKEg,!^Wۏth)vWM 4h+7>+iO)W4 'bI2K]9ƪRSWT!<ҿȟ<ȖX)9Wm8K,Glh>͌cC4ٜpb>Q' ;<ԕ==[L-RI&LR^\t'4"S_m{cC&)S0!&4N:OC6tjEۉӮUH5E2KlY \Xh2iC|0Gλ]6Tb[sĚ[OHBi߹x~Z߄}_s{ bP1+0Ut2'|S%?/-1^i*hDO$%YqڛSn 'E;6; xCcN]TSCcu!/ Y<>%q~F^ʋk$3J ESS埂Xלʜ:9Q$d%1=Hnb$5P4闳%r4q]':2ĉ?SHcc1ftG1cE[z;ֵ&:ֵkZլ<Zֵ@wd{>թy\l 6#~U(qh4)U * n ^(}l8I&8cGV[WC.İ0o=8d)`ya+$$t w'a)%T~o08O-D"1$<0%?&k <=d ( ` (I"ϥBe˚mQO(OȄcri af}t6SDc⣖3pRС:%MHIBsP;}i `th%$  ޷"26-o`q/Ҙ,nlbx@u8![\5d?S8Ye*|DŽl,_:iKP8G_0wΣf+_1#vPn|05PP^"HA+PkXeB*bH89@ HJB o< Y\ǭP4qE??j r60^*K j ϰcXAʟE|i9C22q@ :2ᬎ8QR 뱙'q:˨Y'L!>}U? ؄M V:ҁyDp0+pZ^.F-g,}(+ ƨAo/IPJRy@9| 6-0[:M6P (f`>w3XzJ QuMn5z5]Pv;)el@0y9$ (g˞fpWHU0; pAtW;8@a+8rA`*%_E;+3 @`|axYZ`5P g GUA 55shjbXT5-}K@,=Joo 0J8؇7I22+M|# f>tNyY}& B3"s2׆ W4!HơnNpqH/HĖ[/]I^M2 jlwPׁ5P+>}Bve+!1AQaq@ P0`?-QȌpRGFSǻ;{ǻ;{ǻ;{ǻ;{ǻ;{ǻ;{9ge(~ܲ ~.xL4 ²Q(ţ%E*S |!D7B,jZ]tN 1̤'ab{Ƌ >χ tINa 3!E = <ċb!!(iOշ?;}#!%Q .x $3vd:oBB_aD pXU&-Pj &ib}"K;MǬtrL"{ȹ&`(TIL0%2M&-_7 ?YWk8 l*2^YX5PXA'y;gcT'b9E@ncտ 3$qD<pZ u_ %\cZ' ~b%H &R__*`# !!$L hD}r5u*@TVhˆQ F<`ʄjP51~)sh8VP@V8xA2Hx)ZY7?2+ExRuRMHn>fr.o/ԤiA[[:0`8C8M zmtUXl @kkbN} nU"~̌=z6 O̬v~K ILd)5C*15ܥWLSnuFiCbʊ~h?K6yVТBri`BQvXj@YQ;]r!;]c$.c{CR(Lu9yn[Jʠ\Ҝ0#v>"M E6 l%¢ h tﴤ3EM=YN"0 ~0.l p"A#:JP6@=Rpx΀)O^}C|ՠTP.$_]ؑ2@7` KW̼Oa% ĕK# Z081r''`V Q04\~tsEGj .ЖvE@KP$@&TeH BeU`jE z:[^,c0*V 81@X`PX0L*23DEag:C e~IY=I ,*a5\}411*32laŝq"ˇ63f C\hBP]`IŘL7Vj\x38lO[PညtdfA)%*0C^A8e!r=3y:lj44>#@b~>+Al0G4[=qahmqqĴl"t:kb'#thE9l|X ARܚb>$6IheBf"-O- (W6N׿$Py@^~(agh4kn\~jϬij6 Ov p*@<}-qr R$.B t*pCh՝DQo#E?t|͎%\-}ߕ G0'L #`FMsҨZ, lS܀ h$Y @Hm`vV .0zN "lelj?47 Xh &#nB}sN5 x]JV!0F4.墌nEOxB3 (= & J# v UY<Ty$+-R;t C#<𝂠j&FpQrƢDiA~'uOeB %; 惈|"惎0MS|#:WDwN76p+S B__0"!@ 2501B#$P`ӧK q&v3Lgm;hD&v3Lgm;hD&v3Lgm;hD^z߶?s~I@O*OGq*{bk^Sq qZ 53e *4"nc3\\a>މ4h@Vye[8/ӜwUX=,2ا_!'HVYuu'i9mz0u*`M pf0 _agA+*e%z$1eBW)xdO>CoT#Z&o}15m}6sնumAն[y&Zն<|SV[iVsն[xaVm䚶ӟjMKV[cumKm9:o/m9j?zȚ_*sS(jyI\|DaVU|W;,H ;`ïmHP\_%bMaV­#Hv\xU|Vj: {m"_W|RRX'bNeks7 hJ; ¶fN7m[!3[R(']T0⊤[ܤ{jٲQ~ުh_0vks L\UL]W'}P8H7o-{1C~3 PRFRymV>q&,d}X2qArUmǺuܧ\YJgwM+´Zj;"7QQSB->'()qϮ (<mP͹}0 ( PXg]q( 0x4"@c0w%oONY3U0dC_)&/Ȇ+xHsز"DDROD dY0}VDQOT~)&!CBD@zQ7đ$CUD1_U~(?tq;>UsrTRLET\UqT7-S7]UW7%LE$UWIsrDULwU\ܶRLQqUW7%<!1Q @Aq"2aR0B3b#PSp ?3SSSSSSSSSSSSSSSSSSSSSSSSSSP _-o-_- 7<Ԧȃ~˹I>ԃjo~`_?p1qe;H' + !yo5'F5D&$@⌃WB,ڿufz'xm[IXZ>ʯ̦֎^Malf[&72/X=`9tN kšvkDaqzu㖯A!zh@J&B~  DpB/MWSEQEQEQEQE8QEQEqSEU:*ATT_TܪSTANES!TtU:*9*N/xI 0H>N~߂&20{{ߣf--u2m *&' 0UK>ܜ UW;0BcU.n@!Us O96j^8]jC?T/.UGX։1U ?qOuuv|jI)_y>e!T8.U,;`>˘" QeaU,H?IݕS2IG=_v ~:$4``4 II0w0G};%0w0G};||w IS|w Id)P!0 ɀLLNL LNx&L'rST੉ܘ pTLyj,iiyM'wԘDn[,#ɉM\`( FX8!hlH8Di  DJImb͓ v|Qx?ҪcY'6,C Uaܑ6`WQyAU3o_Sҝib6AgG.jM48!&Z1 r\tr5s rx9i98;ӀNN Úp'zp iÚpNFI0!" @12B045#$P`N[Ez;:|7"Ur%W"Ur%W"Ur%W"Ur%W"Ur%W"Ur%W"Ur%W"Ur%W"Ur%W"Ur%W"Ur%W"Ur%W"UY/9O=!G}QEeeFjv E5v 6R6vC"2 JE",&vAӮ+dd qt:>UiirN[n/L6DDĻ|`!8ݢh`.(?\y?bKzj2 >+HKtgmaH9g] BjiIr?e]rDM|H/9MC*$c4_pu73'?H?؇?@=%dɽF㥒Spӹ;!"-#{SW_0ڢn(+7dWMV{eWu\uOqV/mnl/\}:UjU|WU5\|wMV/궷v-V\k~~N-}֕toP} hTl܁= Nŵ1~'phǦy0Jm9$_\UK+6Cp5xMH\؃sېNVߺ5-N`9(Y}/-VHq0˩-UFb[ܞe"chsݦ9KmopZ`j (#~(*R}D*zhh];>>d4"NMrTF(67pܪf#8˲'ǜLtl~R0X~. h Ş0V;5&(.qwJ2qhOq~֔E ζ K[WZ۲K[$7Qik~|WK[6-mǐk~|K@شM-mCZ--nϊknåih--lNJikn㥭Z--n~V4\2hMloL]̞󛈦\ 3. Y=,ˉf\[L K Ys2y$ˊ_\٢=Y3.-.pP5ۋ."8A=)feϻqٶeůOe?3j)͹ojmVu'fD&.*d-z|\c@#": *FݞhH' Î-Wqͦ.:8.ۮ1%&.[k>PCkz:f'leYI[U_M,Ǜ-7$<-:IfT"FD˓.%gJlT]Ծ*\X-H HݖBӱgt87pyyRTX]O!EiGy3 J(_mhzA,&uu_l7\8RzCoH}<4y{sHģt$Jf<#I{+Ep+4Д-a(+XE"R"%`sJ""V4XEJ%`R*XLV+TE"a)X9!1Q@Aq "a2R#03bPp ?r((((((r2ӳvh&="#zEm$RDHI(hAOT+_k&$pP@||B\ˡ< k@'v}|>L/H[+uoh/r'O! 5n4ܭEw/9LܾI!]_挣DeG4F8haDhG?=mPAAAAM!AB?]XkaӊXuXka5UêX~aXghݵ>VHއr3<\G&' o6}4mC=,6&h5wfXmolTLyf갃j-cU2aouh9mEM5aV Ls X`97IwmD>d Xq"nu230ϼwl\ #'6 ,?xdT&3H濊iX鶼TsN0H3٧#(De'8lSU!QQq(LJ854M=SJiz&ҚciMM)i蚚z&SSJiz&docker-1.10.3/docs/userguide/storagedriver/images/btfs_subvolume.jpg000066400000000000000000000734721267010174400256740ustar00rootroot00000000000000JFIF//C     C    !Lp!bZq1٤P &=/w>kP._#Bom Lodָu#뗛79]\VɽXE憦@$ YY5뽋VӯkͰ=㩬I;5w۞N{vT_4C,P=BCc^n!uULpq1mmޚ&pړy|?߽ئMvo:7csNЯaǣЛM8ad+^q⏲98dz|;@q1|ùN ;s;5@q1|ùՉq7cs 7cs v6:b@@q1|ù0 aCK?aqM8a9ٚom!ǥ0ro溴7/w0wUdZ=N@;z}Ԩlw 8g:19Ḧu#eJ\k +q <<ӝ͂y{0LۣfߓwSǀWmMDa[&zۣ/|E}TimԪR.Sǀ:tp. TbTK@e 9( >qnyA^RpmHz'8d/꼞p]_k5,IW8ط_{G+yF7:/3)P'E!<W |sVCmo&N8Ɋb.X70W&x0 ^ؾWҚFo_n9Y`]b=`n`M̀pjW *QP rX \|<A;~-ܺfa);NK^˗ n .m٥f2Ykic+rSk|v»ŘSC,NK^˔%e4rݯSǀW"#u&NK^˔CNK^˔ '<(@OY9,x{.P-J9,x{.P rX \NK^˔3b:OKt rX \ҬnjzO/%eH v$!f򂜖<( gSǀW rX \`ܾ/y靀SǀWyًNz+yG@+ rXT5y&jdWz r-Yߠ{Ǔf3z rMTM[Yo;}?˦8D1zF=M]>GhN"՟^s3p gNsj_O@ gCsz@\􏬍4UQ_GEF&VnE|7}wKvF".>Pmc~PdG"x v9Na>0M>8.k +9xVr-cAǬZyfl<rѮƿM/^>b:޶WSy6WװC-n 0=uy@ZXTEwWm6⬼3'cj̨tsjϽl퍟^3'cj3'cjuc|=Rp9L3'cj=L=Rp9 IZͽ= z r-AuU}>mXgN"kB IZ3qL=Rp9 Q¶d03'cjcU{d IZ3yH'0-i=Rp9 рTE7t`3'cj#uO_9}S꓀1ȵ>O{e|z r-@o2H꓀ʤ7#yͬ9 0679@ 14 28!"3P%'ApcɭrYB_n 4i'+rϲ^1J7[Ǎuxo7[Ǎuxo7[Ǎuxo7[Ǎuxo7[Ǎuxo7[Ǎuxo7[Ǎu,)wE4 .i1 UAb 9x͵oPȫGQ'n1sXhL($i?72BUe-"sR\OƵDlGjeJnH ɒx m_Zai'+r*Qc+\kW,\#Z ̽}ocO p4\sp lY$\,˔.p F\v8̹s+2lef7=3~LŌSe w*w/+8l9AhqdžZVNuM6.i4~J PGfSȟ4ܭ$vI^j 2{/nMwNVszO& `px=Jzr e/?CKȹM_^Z|7OE_)JNKNE2JHc0E쎕/h!)]ၼ$'qq5p:9".J9Kt 94SMޡ9[dtȾkZ3}բF~Mޡ9[dfkp/鲖zK=lRFh\:N4e,zK=lR[)gYe,zK=lR[)gYe,zK=lh‰`Fr{7zofE_&Br?"s\*)\]gu]sxW\+o7u]sxW\+o7u]sxW\+o7u]sxV)ԃ!+ xH6gYˣJq8G6䉊0j !@)]qCNm922G'NQ.pRF@#JkG\=)0B% lQԐ-;cxyME<.wNVc ڠ<Ͳem0ʱCw&Na6/)C#2Emwp1dK%/WdC:F9ɡD.YV<n!&y*ڦP@>jD;J / xL♸yW $,ri[+BB̢gvѻȻHȎ LԭN|HVO[{>p@05S킞GFxyME<.wNVڷ?пw$ | BuiAIAJ@ىgOadX&S% D/!(/&k01Ӥ3U2Ů$=,&;ɫ1MH)T#*+ " Yt\!)iy IyS&M]^D,$e1578Լ Z#ZFRO)n S`R((.y K[FH]"r˦PMG4ʶX[,r9VeU*clʶX[,r9VeU*clʶX[,r9VeU*clʶX[,rO n 4svIK 5::eUUnFU[VeUUnFU[VeUUnFU[VeUUnFU[VeUUnFU[VeUUnFU[VeT JJ.zĚ(ƛBr!egɖv5#/Lq+JýBr"F<<$@CqrG{ yooůiOpk/$%ۆds~cw}?$t?%nf cb0+be &D6z$qB(E4ǫUQ1vP˥Y%i֚চ'ոٳ.@Ixʘ~|@Ri%JJd(.\S_ &"˄4lROd\1 e%E8R_Zai'+r*Qc#~#ISu2bI yIccy jEsfgYR i"CZv?LRݾg*Q puS.,dH=Ʈι璆V~F 8(fwr'JFPELjT!0zȾ\CYtc62@ ]Ȇ@ջΙZTvE|ڴ 'wNVULJe2ױ'T4jl` P")yPǤ'XQ' 9] TI2iߓ epaw5C|{J?BG#[X:޻甪T9dSH,.t99ɵZ uW4ڴ 'wNVULJL>@{7znE_:xy?Dbyg% t!4HZtbrwV|dn ܊u6RglKLU3,scj,Mޡ9[W⎣O78мr ( i^ f9IW9kgGU04~(1RmiOqPȫGQ'}=SmdUQZG([!04~(1T!57cӚ866){s5-~ΨLNT;UrdDQV|dn ܊u~3+DҘʴS;#UC2ɨC0\_Zai'+r*QcPdO=զ Y=ƛBr"F<<Kyo<̎8hэP ) oj,ǥRF<<KyDvKt51a*g(q֦`: 0 R78V07S8cdsD0~(1ujk1L>@{_:xy?tZŕL7V|dJu~?-ڙ[S3Ư|`PC1+j!Cz:ÓJE4;F%J[ڴ 'ULJV5E#.1ZsT7pf*HJNбN%?4rܜGT8hBĠLWPb\Q9~L>@{_:}?|sBCs + D;ӤM45 . +DqUզ/ Y=ЭZp1Ki(xZZ4(`дMSnݓz;(m{%A[%A[%A[%A[%A[%A[%A[%A[%A[%A[%A[%A[%A[%A[%A[%A[%A[%A[%A[%A[%A[%A[%A[%A[%AA3?ACEϿP  !1Q@ARa"02Tq BS#3br$CPUVĂ5Dcp ?ij5]7펅?lt)'cO<3h`03^3Ó#u к]Cdst.9P ln6G7B#u к]Cdst.9P ln6G7B#u к]Cdst.9P ln6G7B#u кk)GP:yOBx>oTH'~E6[҅ O'_?(SOry6)'JLދ Budȹ&-e62XnmQԁ=\|O(ILiUw;3ӤGs{כ JK9|a.qBq 'pm A%9EB^lqGkʌrVP#tE]6( O4U'WRRT:q+2}'N`9U*Z]*JD:YK`dŬ4:xM[7Zz'ZHQ8=abzGq)J 7)o)uEb'=0HO4#̸ GOJQI( aC:C 8̹%'+Rl-.۟JtOuWJ\ZNLP:*3u @Jlm719vGqN 8}ê5W)d ZMah?s0BR4`8=eq,;푓}z- o ZYØ-V?E(Xoi^qH _}QNbV/5f|sk+ ~Zyo¿'姆LDܫKR7q JeCKɃ~|!(/JT6R3u(⣍)E ZcnsFUlSJu%Л7u9L:s"=+_D qY I{TK#$F]ZU>䳪۫\6Teb6#%B3+_L0Ys,¼CJ ǓH@6[.)%@f.^l9*JckuĞ.0 -%V ĕ%C"] b2PsSnp1JqYIJuamHVrNS23sry?g=SɬH4EknT"*2JKbmxb9<.LJȲ U +9P 5.k.ӉmsɳI!ũ,G@3@*x3άF%9II4\u":׈aq#;JJ2T?b#!:ej, a -86 qJ-A#/*ScNh1d^y8$OozIF1kƗ&"YfUhS&VZi)%Մ):S탉 yТGQj)BH̐- 6EI8I;s6?6q,䬄rRҊMSܿ{O>wSJBu_Yf9$T'\8^|O9Y@mL³Vts̸…JVoKe$sy˘7Fe%,!jqǍ䘙qɌ-߲"yQIvd2鹸q¬\p*QRst+FuêyN+͋\W=|P{"\OΡBP%d LT+>W=|P{"\EBsߧ O*~T+>W=|P{"\EBsߧ O*~T+>W=|P{"\C9ԠߊChiE"=ЧG=fcN?=Q30+ .-]B'i+tBaġ6sl^6sl^6sl^6sl^6sl^6sl^6sl^6sl^6sl^6sl^6sl^6sl^6sl^6'+[fjSO H(xfxYu2nU}ھ(}N?mlp jnq7S·,Qs KخRI0K& 6\(R,f N4\dDnNM8P!Yg-R44 l,̴p`M(g2H+d7 H&wSZ=WN74 l:M%e̸tĽ:QeT$obE<ڡ9dX5ҕK%Hq؛~K"e BO.erv8/ m.OaI5D2UbU9*67Ӟu%cĭ-~BħW>ЧL)s4+J /8#&dӤGiI 48lgM)We63|q]ώ+vSc>8lgM)We63|q]ώ+vSc>8lgM)We63㉅Te󴵧:uЧ펅?ښY j?LK2b+'|TvDȝR*R"wJWdNJ)];+'|TvDȝR*R"wJWdNJ)];+'|TvDȝR*R"wJWdNJ)];+'|Te*NegfgjVyOBx>ȃګyK=g7>R*Y:/:GaNo.O0RGWJ?e|߫G%~90$'Hb"jWn׎?Fԛ[9n׊:<y ;,0.qCSe\q([-YH5!0Jn iM扚lc-4{+6h\қeӚ"ه]r1T伃cziXP19Mx(!Y-h-+l6sx-ՖۭMbaԤVna BQ,!GT)KSJDzBL΋-S{ =i7AC aC[Þ؎-dhϟT9&Ԥ/Sx\,X&JxkÖ|wh~)Ī3k`XbY^:? ~Kbqj&W*-,s[LdT’Ό .Cs "3SWBB[(Sm$v^)nQ'79 eXmvG)NgH&osxrh2W¬\lPĘah:R J* ߌYJq=,ܵAdfeEk~P6]->M^9]:Uearm(lt §H># kcDҹ]RPm/hNqJScD IĪQ9SÇ5yGBx5 s&*uU0![6 ڦLV0=`D[qd&fCzۅVsKz6Ubnep}ĿugjJUzRuNlʊӌuAeD(~0 J!]949tc\:♗ uMsC8ad6 Z]$!u:TT'C@[ U78\yizaEGqB\- 莨oQЧ F:?Qji$ސbosI)[-_4釒2t\h\RMP3n5aK(Iuq}\|O(SO?ʞOyGBx5o!TYI_m MU-%kQQ4yVge->?j1ғs 1 PkW(i%HY7W\d*T:KlVZ8  u7t'8%9B],Y uh vVt?yGBx5i ) b[c=ureƲE6L%.R+[$&X^5OVb3DBU-8ϟqUբ컉#Q,a qǎع&$Yg[kJ} W-& 2WyW:GˡK(NQN$gě~MN#|!OK&ͱ\EACp$"\a |N?L|W+7 VJїayԍ?l+BGQ$5 5Qi>[E85( yzk텯[UN.lyh_)fpzbqAC^Ϟ6͛}va鉥 D3 8UOa,ڰy 7js @u@p>]b`yW9ǫZt|2zaV"'&6Ɨ'^&_Oʱ.ycHW}wG߫|>qX̮#]RX%/4:λ>>cxIJR今d淳G,Avrp5.z!Ni- xٚxP"9`u(%}X-5 Ky6Rnb֜?Y*6X/LƵĐ1g .u=CجZ0޸(VS-";Pu&\PI2-S<jN5W*1Jt,:Y-BW'[)l NTIJcL7,Th_Y5#N8B")̨qƵj>=.TаFVDH^I :FDZVĐ5g(ۮFǠTKV`G&ߙіVkPQgDAYD,V"6rcDB̖HS/#ܬ9PdKS%a[yTd5S +P T [4[l MNe:wقW[Xɉ*kP`LY>xiH8P$hk(gZsR7,_N4gCѸ4( g*ڶOF{eڍ"xJW+\GцO ,Ն# bf:P~ L Zz#$ (:Ϧ~,@U5;׈w^!޼CkZּCxz;׈w^!޼Cxz;׈w^!޼Cxzsa4ʂ&6^,^gK~fx6c߿~߿~߿~z_c-cʼyGz;הw(^Q޼yGz;הw(^Q޼yGz;הw(^Q޼yGz2JUCpMeU5_WU5_WU5_WU5_WU5_WU5_WUHw!%Hu ԷO ?O}=}e ,&, oʗ1[Dg 9F H@5!"F*f]. ?2 !hdQWn`'y2@h:Pg¡DI\™Ax*IR3t;BCA,0Ց(&J) Y)p]-">|^@J v%@^ڼRԊ'3sɫ~ fq)tNUbbƢ\&2QV[-]0l4pU\I(k'#P ,|=FJC{ҔC9[#hi+4m6[4"UɨGrK'H Z-Q&7$8 _pom }|^h(G  HLg^9q޵gX ‰AlԄdkC+ թЍIG1!uQV1@^>$X g=bFR~c'"*="/h*.ciS@Ȥaգv ʺfZ`wv( `{b mqw_j}*x#3E't1Ǘ.\f -ׅ˗.\r˗.\r˗+Hj$N%}}}`IeW0Ç8pÇ8pÇ8pÛ4P<$hN+=kdY3;sGJ^+sIG?˥NQZLKfT20ZH#GZ-C5/]JilB%p׻PF,[4S0!d!w|5QHdbuLwn 69TԊ uS1d#zX2)Tvc:Ij*Nȗק^#oW 1R1 Iemlrk'_M)L *>H^D>i!| 1N߱="o2X Ne:mK;$"?j0/X1z#NH2OjsBPʮuxQPJG`NluhQ 'dez<4BY#vyt7ySftӧN:tӧN:tӧN:@Lb2W3ƑG7 e hT.lOWھ6qB`9ZVӄ~AS~zP=3$7l%u8&ث EJB-4VI@Z hd7ru$qBE!APk!OAŮbb Dgԛb!Bw/K)Je:SnxXۛQ=s(&GzLHxcEVYQj#RS{5@)l<"!42׻& HCC?@%}È?ziSޏIM&~zm"{Rԭ6hnQ/M^@cCaI#|,eѪv0PB$=mJ(ړGcrlgDWc7jܮuCCw56X|@(ݨְ t" )A:s\*LرPf>3& ԓ?@%}&yD֌yZ&51Q'Ww0͛AXGbX C7z>"B3:YFGCth`(C_DYmHP^nT2ܡXM٧+ k{N8t`lQv`G0Рg 韋 WھŐ+m_C9WJ~m}!BT@D)g\d3d [W4=SUpA:Fb(sׄŐ+m_CN֧v) ˹Yh$+4eeҭHCg䶯g䶯IEZv: wړ $k}g䶯@©@:hYu- ,qPbo%"qR"y#2ftkQO[d [W[%sdͧ*Y|WIc3|xc`t"ûF%$ZRD~Y? Yyks Wھ`oID%SXF((DVވ ^"6dKg䶯X:lBmRQ;t cՊJ m[#V2N< E %p,G)DAH<yu~$ 7O%}ֳS+_Po2Kj8&P@i %3ofkApod3Kayi"Ԭ&uYDEֆ$D Ƒy-VKjՃ8Ő[Wk>0ʭSu]FIa](%A"1)byT̊Pv*>#H!/;~yKjQA4"A*D> 'P%~k(U`*~gz ~ߕ3nƁ oQՙd|ϟ>|ϟ>|? O"0@124 !03"#5`p%PkS""߸ŷ-qmo[~߸ŷ-qmo[~߸ŷ-qmo[~߸kcd2cxF8Zw-7/l~cJ,Ty}e~ԝ{F$++%R L4,PEPk6RKHA*LunIldh1qf=YUk0$yGY1}18U}߇~yL!WNB4q^:1b=@Q=AJ hiL-x+ F߀%u/2r?;#K&^0G%ty'b*T#3\džSʦ:,hޯÕM!\ZFس,cv暌v "+k*q|uү ** F R4H@_!EKu7̻D^dL߾1Nײi![%n[%n[%n[%n[%n[%n[%MO+ &kH|-X[꭯Umzk[^VתU꭯Umzk[^VתU꭯Umzk[^VתUWYO4u"@4ŤgUP|χ.?~ nNjNqV:%cN7U +qJŧQVɅai\_&[ǐq ff@H0ƥ\lǑ$cp)1c(BSa)Bퟯ vNe;!9t+Ao+?5HqAyK.nwhHQܰ 6?EL):v_xi*6#Kqc5VgVHH8;tv56%;⽐RoXagFSS1Cv F,9eo tNjNv;c bIBB!ESn,m G41NvtB8%8l1c0!Xb1 cDqzUܹq q+AnWI7ߍF- +c!о9q"WI7߆vRL-Я\TE@8Ge~ԝ{> IV1 9JM\-WǞ}ړou#WI7ߑVV5n䵏v@re~ԝ{> ^fk-fUheUA8٧N<4òP2y&K#ؚgF29ړoux2!xZJ=\ĆJVR?WI7)ح}ړou}_K hn"Pf*6)1+Ao .䢛tKN(te+4vcj6L%b*,hnjNN6,(QY*G8j[Afw`VhF,yA}fus% èڿړou }6]e~ԝ{> xmSnWI7ׇ>yسԕC~C #J+ZŋCW_!V3OBnjNuEQ3i9kZe;ʞ>bKļnW'UN~9D- bt{p>@>1|((((((+ly̧y̧y̧yh#hx' m/Z8J0ӛuN{W|aRm&sI(tay&f8Ŧj a^eq,M,ެe#g`Һ"lAޯ#O.[roN&wX: LoTm`y@HM[aֵfD9ϏL@⮽7 o&mkɴ]W .cK-EΫtnp^ˑ-w)ojqW6S1*+Xۏ򘬨Tz=UUGQTz=UUGQTz=UUGQTz=WD X*TRJ*TRJ*TRJ*T#w6l{nRI-R->Jk"I^nLpX7,*XW9 }wˬPs@=½FJ܁.O-GGGa֓G5ۏm*X]KgJvm]SzvmA\gЧ غ}-y,*85&ABH򐺢-'VX7x]ܮpT5EKcۨېB@a07FL 5F,6>A H0-(A(D6ϐ pۏ|ck:jll L[q.V=oxeǷ2uo!|{~ vQiKLǸ^ U+2+2֬@/KS;Jhx.cW9׀l(>es2{=?"#'|;w|{d) 0^bpOc7'u.X8u`B66 oǹ( =F ã"?ԻG RJNRd_.6'c؞ŏsb}v taR 6OO1lh nB+~F=K næ<*oUN`F:]1!0:WBr ⨛Bǵ;a[Y@ @ @ w0@124 !0"3#5%`pP-ŏn2uꋯT]z^Eת.QuꋯT]z^Eת.QuꋯT]z^Eת.QuꋯT]z"#Fd&".J1 I##.7C%Yk1iq3,g֏u`^9 bj/\˛^LO5OȊdu,e-Ղs/FмS,=i14KKFf,_%]:lhS#%]5!]ڒjOS R\yp2}hSɟs'L^ˋ'/dZjd;b&&wfwmN;'w/}d&2g'?t_&>WvKA-KA-KA-KA-KA-KA-KA-З##Tn腤na^a^a^zXWzXWzXWzXWzXWzXWzXWzXWzXWzXWcmbXb+Xb+Xb+Xb+Xb+Xb+XbeNr#y44444444444444444444444444vd |vBMNUǏ8m')'WɝiN7u,407kSc# L:!%` KBٲ}hS3*ldaͫjo~vZ>7#I8Ih"CTc'b蓳eNe2}hSlуiU-/&oOʕz9صūE ghygT7G^ɝ)l.#eN-$`"]ZWS[&LxZ_2}hSc4`O?k 4%;OTfZPОo ~lZ>7L{9m=tkZ9͊>W̬V74^[M,0~lZ>7LBNj%>IvUN-.tSYy$vH<}өoxmXϭt&^!oy/TiJSMhK%'b)BASjL VS,g֏u?ZW˙/)-1v;8qK2^`̿h&&GbP,g֏u?rr[?Q#8LJӮqZHV QLZ>/HӿyV_+#ۺV;Bbuk-Yml[f5٬ek-Yml[f5٬ek-Yml[f5٬ek&fU1?}; !1AQa"2@Pq 0BR`rbp#3 ?keBD$H"D$H"D$H"DuA}*Usg[ǓGiJ RzWV]zUx~-T1ecO1J2:mh.]Z3zRcӈh-~Ebi|XkW$Fλ":MַR%*=m^cYZWVZBҺm6GiiiVk_c :7߬2(Ӂz-.w# /"~T^u5yWg^|?g[\!B!B!B!Fc1c1c1c뮛F1c1c1c1v^a' E+(KGGj~*cUW,Eͭ]mTؿdocker-1.10.3/docs/userguide/storagedriver/images/container-layers-cas.jpg000066400000000000000000004200751267010174400266530ustar00rootroot00000000000000JFIF``pExifMM*1 V`QQQGreenshotC      C  ]" }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?((((((((((((((((((((`/~ܿ/+ fm?\}"|ϴ_jɜy cgͿ[;@eQz5yφ?']?Twncd(#.jMҍ/?{oSJ+?ڟ G7IxT+VL,` Iny* |mMa>Ϳ[;@eQz5G5/j۾ޚ>ٮ]lW~#tR_M%ȣ4lds+k3xĚ׍,uIh˻hR矛x|:m:V!b1J[NQ_8*a|9uOo7Vu`T^č^s!/? O-E4ۘY/3F`-AG8"m˚os}?Ŧ*jKRQ;,/k^4&բ. !H[g~oʯ|*a|9uOo7Vu`T^čQpKھm.vlOqk[>mim/u_?']?Twncd(?j%ÿ2&KRkhZ-2;p%y΢;[~NXg#nnoӔW? ~At_~<=ջ]CԢck3xĚ׍,uIh˻hR矛 ~At_~<=ջ]C#tR_M%ȣ4lds(ܹ=67ZkJ6 O(j%ÿ2&KRkhZ-2;p%y¯&~|Ý7W6hOsunP)EH Խn[zj_Gfu߶GQ_cK~%K|G{6;K"Ѱ` PqQ;,/k^4&բ. !H[g~oʭ\*[r6+l}9E| |mMa>Ϳ[;@eQz5yφ?']?Twncd(#.jMҍ/?{oSJ+?ڟ G7IxT+VL,` Iny* |mMa>Ϳ[;@eQz5G5/j۾ޚ>ٮ]lW~#tR_M%ȣ4lds+k3xĚ׍,uIh˻hR矛x|:m:V!b1J[NQ_8*a|9uOo7Vu`T^č^s!/? O-E4ۘY/3F`-AG8"m˚os}?Ŧ*jKRQ;,/k^4&բ. !H[g~oʯ|*a|9uOo7Vu`T^čQpKھm.vlOqk[>mim/u_?']?Twncd(?j%ÿ2&KRkhZ-2;p%y΢;[~NXg#nnoӔW? ~At_~<=ջ]CԢck3xĚ׍,uIh˻hR矛 ~At_~<=ջ]C#tR_M%ȣ4lds(ܹ=67ZkJ6 O(j%ÿ2&KRkhZ-2;p%y¯&~|Ý7W6hOsunP)EH Խn[zj_Gfu߶GQ_cK~%K|G{6;K"Ѱ` PqQ;,/k^4&բ. !H[g~oʭ\*[r6+l}9E| |mMa>Ϳ[;@eQz5yφ?']?Twncd(#.jMҍ/?{oSJ+?ڟ G7IxT+VL,` Iny* |mMa>Ϳ[;@eQz5G5/j۾ޚ>ٮ]lW~#tR_M%ȣ4lds+k3xĚ׍,uIh˻hR矛x|:m:V!b1J[NQ_8*a|9uOo7Vu`T^č^s!/? O-E4ۘY/3F`-AG8"m˚os}?Ŧ*jKRQ;,/k^4&բ. !H[g~oʯ|*a|9uOo7Vu`T^čQpKھm.vlOqk[>mim/u_?']?Twncd(?j%ÿ2&KRkhZ-2;p%y΢;[~NXg#nnoӔW? ~At_~<=ջ]CԢck3xĚ׍,uIh˻hR矛 ~At_~<=ջ]C#tR_M%ȣ4lds(ܹ=67ZkJ6 O(j%ÿ2&KRkhZ-2;p%y¯&~|Ý7W6hOsunP)EH Խn[zj_Gfu߶GQ_cK~%K|G{6;K"Ѱ` PqQ;,/k^4&բ. !H[g~oʭ\*[r6+l}9E| |mMa>Ϳ[;@eQz5yφ?']?Twncd(#.jMҍ/?{oSJ+?ڟ G7IxT+VL,` Iny* |mMa>Ϳ[;@eQz5G5/j۾ޚ>ٮ]lW~#tR_M%ȣ4lds+k3xĚ׍,uIh˻hR矛x|:m:V!b1J[NQ_8*a|9uOo7Vu`T^č^s!/? O-E4ۘY/3F`-AG8"m˚os}?Ŧ*jKRQ;,/k^4&բ. !H[g~oʯ|*a|9uOo7Vu`T^čQpKھm.vlOqk[>mim/u_?']?Twncd(?j%ÿ2&KRkhZ-2;p%y΢;[~NXg#nnoӔW? ~At_~<=ջ]CԢck3xĚ׍,uIh˻hR矛 ~At_~<=ջ]C7nggee{_WӮ'#{z\KmkS(ࢊ((((((((((((((((((((((((((((((((((((((((((((((()*oPG.OF? [}fvW?o>>:m%Pΐ9# B)5+KNlpQrTehm߅Š(< ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (^㇂-ox^/^XR ZT-o5Xo *Fx((((((((((((((((((((((((+axRkںP^<"[/uA$(¨||V4x'}sÞ "7޸E9^Ma[G^QOEZZw_徧 c q'>(x^>KvoK'T!kIb[s3%~8n~"x7'>bTocaYeD\2Ey{?:m~$iƁO,%b]"f&HaԬḷ;{E,l$R2HAkP~TOO/7oj\^Ҽ.n&yCZl`Y $}:/?aO$G5_>!6yZ\xgH}^kf7e `>/"؟S |1&>:g{)5[\k)KhԼOjU,qZ }h_S/ػ?-6}u CJ_5$hx:]АE~,?p_0;_ISL_xPL_.E@=O=ĈDFUa |}5~џt_< b5UMwKm?tY$Nq+Gca@' 8JSo6YˤW37lgf۴d9Z*+_1okk-NZ&*[>\inP9g_'# _Y< x^ LY̒,L#˺Lmƫ'ῇEZ&j+>%!l v3V| _ W~τt-P=֣p$6Щ $%G>K?iO>~?tzz6=fvf9#auk) dHH AR@?Y(V ijMjGd2M\]K*ˇUtxdBa߁K}AӴB_;Ȋ6EႣ~Q_rg/س㗉_!ev\M[AI,̫+nĪ"||h~Nx'_ßqEIqV[c *ϖ)eHjċ'N(|5 ~0 ]j~ K}&潙^TV$Hбi:g[c #B$(gH*:pB 'y7 -|{ֽtK8nmЫKpd=)2P;œU?"Gq}*Wv_oأ,;M)/{Ko>'|CQh2j2Zqxfde;f$LIߞB!g[_@`G ~W/ty_ ?ov)hh/*_ßٷW<n#L)f~9WO#4j7FKjZk ՚X 7bY Iw/wGD_j?GFAgKlT/=(Js+ͤۤWS <G4ǟ¯R/h+/|wm OeLv ׬W~F9]Sß j-4^%,`IT.EqB@(((([ya&xaAj񟊵1N$Zf/)..>MH& n[?OٯZkmsPtymVDKKQ8"'tWdzW_G࡟guIח⧀f~T0hm 1y3fbo~$tKQ?>0bL=|+mWj "3~|6}毢 >e_6 ౿_c߳/|Eěek4w:v&nѹ'q1(_,gǟ'}uhŕ]: Ju8gy#g3јn~!jY?~gVĨԗbkai#f5ڰfu9}^-~ޞo-7suYa]<gHdnذŞM0-F e@>??Q'dž.i:]*T,ʦKXG$bQm!b@?aoOڤ~??MIe ϱiI?{ccƾZ6Ϋrxi4ln5~rn-@'*$#ʆ+)J/0-~-[pKvnw [~~?K],º_n>lW>O<͞nWv$iU4Z_{s__᾽/eߌs2Zxmk1򤑤)8S;^6x{>4kfմmCm3&2 o!MRC/#?/kj&mtK\V23Kʔw_x$??Pk%' ৿71ςW7wH:[]IoݻݍN2|_ ! 3?o5Oō{2‘A t~0;ٹa9o fũX|%>lt'{>-%$+M*n` T8J>%|b;N񇊼9;McPIYԡlV4}56ӀpkOD|qj~>7N[妗&,8WOvtMXoqr rqگV>1Otk#žHtL-f[ȰBLFs~_e_>.h?2|@񭿆<~ypڹO[.gyK)c"j 5s OYlφb,c=7GѴ;;rOKe),0ܣscP%0%xw Mw ~񇃧9vƠւD",_s* G{^K;Rz.tX-h9p @*++#>|3W*\~822Ub8$?Oo~w=)|a{ß$S5؎r$iq\ +wǟ!-"go>Ig^Ěάkomp{2U]O ?~_w<7^ oGH}rLMݪ*9CDJIܸoƭo#$o# .Gvqq{xY6F9fڧ@?f+]E<7m6=SN["C L$z9w S]|oFox^4 L-o-M+#u*qȯk)Gτ_oG"KgPL#SO%0GR5ۻp@}+ozljPm i:|cLQh7w1[շƛ0Xyvזyב,>һр*pyu1.1xF⥯fW!ڵRuYmrżք8*d 0#9K2?Dζߴ ޹l5? i\n|8.7Ƿnr~bG _9G|?M3{ºoПT|;/kDPG{߳|όt~,aԵHu}D\Wq nZNvJx| sy9k,<_#<,!H&W8o¿Кdž|#yAH hZFU31(eP<;\|B?{|o?auSln|/?vMO/}E~Rh(O9Iw%w>ÚmKֺ՝([[*$oFI _:,>cn&ႹsQ_oߴ3'ßٯ?ltAkLjuK̫kco|n!W̮0 %G'Ɵkߋ/>ѩK'!V{xk5/kT f?EPEPEPY,/:ǃuj>)т֗P F+ y & sH!/V;gٝk 1 2IzGꔒ孷ۯ[~8(}jrt_)_Vo|7w#fM| ;寏)7aDxg65΋-ܚ.(p6p$`OW$z[?h+[?h+[?h+[?h+[?h+w<SgR\bn-kNF  <(B#? oh6u4$wsw\dqW<8X9㏈J8؆vaF/gǹ ~|JR<oc>.Q4|=Z.G_j6mS9:$>N"k/&0W/co{¯ovI!tl ?chE[ xllaK{x!@~9ÿ $ht mBd09T_5/9;EPxx&ZGhz4X6Nwg h_7K > ~?#xAlcyfTfDmUįoc|$x>"е*/ tBVѧ1>`L2xO+gIc~5S>6|1oj 7W>#InWr&J4h!lTG~>-IZ4X$iMjo~?>-.|3 o>ď/}틟A_Z_R!DžiO隄^:"~֟A_^ x xvQC$1z|oc.^oXAxCef(gd ~Ej<O_t?6:# hڅC 2InaYDEBoBgwO+<׿~B k௉٣t aOڎLUs_GquqC1h!n OQ!|s E-#iWa /96ThH __ b/'oz_>2~Qx`[␎K$MUg('bHNܜg63wY3|F|dς*xH74 IRI|HXGn%eE8D%&v=Lź_>ͮkst]] Cy{iiL?+~o:x;ᮏ2=*Xg>ꪬUFG.Kx^*F|_Ѵ˻UNu'{yVhT\_IyɇۡU߲>i }ů|Ѣգ|C^mu OM [p})L7m*C{QbG3e џ,1XP570m紌[؍%>`P^ğ&suhqAj oYO0!F|vGywp@?lhQ#C'moxJ/ NƟW#?ۉ'izy ~$xSI|cF ɨ2clrA622@Wƹ:Q\_id?ɦ/,jO,y&L87|c (wž&<cxKUvɨo`C;2{άIʊwE[+O/;O4~뙮+29Png4mrJmZ/ 9/o(|7iq]],,BI[`cܒh8پ~:մ-a<йI:$E~o2;d 5IF k'nՌ]]JA'*O! |@3?? &ݤ7zPmoubQjO6_,jI-~90w~\~%~6Eϊ-;<[65Vov~|\닺ČUFI' rFi?kcO[? uv'!vݬrcWRA`]w1fet־%e1YMhGG\L=6~56.+rS&IȎYZ5$}Ҁ极uhF;zjOaq'^ܰ/خ 'G"MW3nj?g.(zD:LV1%$[m alWfNXݏ8<]'"ou%Ls0x%|ǜ/J ݶ$"7C#{ 2{qN5Vxd5G&J@ۅ;UG _$7´xMmeHS|b2Ug&| _#>a2Ji_cOp>#n!Y|D dal"`$z$5'$ ycoj+OeYJNXnp2O_To"_7ÿIчTi$۶T K"د3r~UoK>.v~{Lݧ:_Krd8Zc>Puͧͤk}Uc:-ώ<#(p1; gEMCÓZ\xz .[_ڼjд*G+~,~> cw#|Q E K$b;$*IHu?W G>4SKY$ ]F!?^[G#'ͨ2C oIlWF?o(\~qzMq_\=qxNoLo ,.IS5 VmG[`.$UP[jUTP 9'w1F|l*?:G|ˋͿΓ#~R;8*ضo*?6O ^+>ѹT = >?wlK'^fUf:;)#9GHM}F+#_i̚S|n4UOymQ;|Σ"g"HgчFSʰ ld?O|&]𿊬%u; v\"eSC)  7#G׺k|^A[ƘV>cnao'`XK|=,A$Gk;ڍ)&-Uu+J'.=~Vcҽ.f~6x{_>.xvx^)N|Q, wr" u =;OH?֕eI0f$Px.+D^W&>]Z LJ ªrHMnU4xQaenDd_X?E+xgX&Ѽ*=݋q27t )p$hẀB$T2rK0~Q@>ӵM-yZA -ƢIpǠW ™О.d:ᖹxBivl37`^PPwI-E~Xf?e/CJ7=KMq3Eq/*A5?Lj߳i.5柨5ՍZ]r,l$A+ۻ?`,gkO&ozO,qw ӫP. Tw&J(Uh|?e?z ?'4=mQ{nud|:w5qA os$~zO31㏊===oC-o 햲dI$? SGE|ŏz]k%ˣvvֺ3ڟ34v(vA c%Oc _x{[īFN)iPh3 wy$Daq"/Fl |JմMFVW0^&GDѴ}%WԧQğ31`ʤ~Q@O>4xe|DM/tkžūy,-lS":0\}g__oM/|][Z ~ݩKrybf.6]䃖8.k ()v|Cgx_PO{'M~nH̬FXW/'ǂ_׃|9خtԾ:jxGHŷj7m0XOy+Fm,_?L{ߊ/_>yixNխZv ͔t#8 5~F~Z+wͫxOA𝟅=?P+/& %{ ` o~^-oo?h-NvWunW* "$g-nVuO'o|O < ? :r_[+|oK(bAgݕUTIM~x_7ďt߇6Ǥhtڅߗi 4w;A£&qg6A :9#qF( Px( KO 4>/xW- cTgMR};X܀YWBQnx#টN_ïhG%:]-9tO0<_3g~Q@axUj𿈼AOitPˢ!{P_׋?xO ?~iE&fL۶]mZ[k( =xfgOZXKƚF}nE$K5l`;x'fuxT4 i^$Dg(bK`n2(߃<]ſqu[/TCzt˥]%LܕUPI@ŀ+٣dߎ2h/[gu GJAZLTY쵾 *rf4e~~aۭko:ng%s}w Sđ8 |4AܶEm\_B)Ǘ– _W i4vO,29kk?% 9jœ|@GGx(N|w -)I.F&7JvŸb?2hSvM,otYXVoEF ~W? `|Ek&&?ž&[ccB} EPEPEPY,/:?t)C:B\H+ 7şeXn}R6r0WxZhE}q^#}J5o;w7c}r{Fߩ"./.MaF1_rv8 + 9⏇?_-u|gPXndxY" XI8GʊO@o'_j~uo'_j~uo'_j~uo'_j~uo'_j~u~|C z=,vh}s37'?s'şouPYOi֧[&bfh\Wo _d?wn5kiO*K$J:+6Ԗ)"d,wb _-ץ[|' <#62AuD=3هEACOFjw;\xvz@F7+IZ/Y#6$tM3VGN[|pn$JBdH߳+nZ1~'X@nyضeIWb)$D!(־'Ŀto>iI]WKEu8. #T`v99|sey:w|<|]k^$|-4T2\jQĠ!G>f1${?W7x4ށ ij7a$Y̆Ud}"8T@5hV>,2Nt XBqPW^-9~ A"lB?1tȽ *W'ů Y~;|kva;deYT;pߊ<+zNkV.-PWV_b_,/'w^#G>*-6^x3¶o=XQՋ#n_ӯ'yѿkO ?~ _~\>[ʿ]OfA^W k~|+tO[!n;,{ wF1=v^t Z[CXU*F@(G_߳M~uKǫ:-l4Hl Opc:Xgا<🆗Ɵ]u7 }ylV2"z& Yo?ms߈>x⿈$|94^iJ4Hؓi{ZH~P 3o.F?d guZ{v̧++c*'O^>W o~ZODFeqqs(bXP 9H_g'?C$W.<zs34%sMz^4zmhjlVF$m0q@?j#59Ws ~G?|3 xsiIдtvw.!U1 $k:O3hâ;Z^NMM>}kebƧ׀s`ۏ(KtQu8_g d%'G󑌎 G_Oឭ3"𯌯4h%&N[Ρs̪RU@"Ϭ^3 ?FpgmVh>(dԭ.rN~|#+߀ ~|yhKMvXaU@y$ ֪{ğ^w~bZĂM!H1( f//6[x/|O!iGd=F=So~Wkh>4ǥiϨZjizktI$`0x\,'6?-nO'l{;.-|#Oc2K*Bm@}kٿ֏Jm Wo8ԥu*;*M߂^ |'{ xóJɥTȇ(AGjo4^:n%Z(08 (U#տO[$xf Wkdi& d/?6  <-oCH4Bĸ "( |{C?7/W<|Z:ce|>?PǥDvJ.}OwὟs;;++t8qZ]ou^NQEwQ@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@8O?S„?rn1 ?e};3_⾎Y_t7!-Soy-tm/Wb~I]Z_mtc֪+G[o^.QEyxQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEWW/?o>?>2-5۸o"Ӵ[XYf-2`E¯$.-R*Jjtzyx+8N2qqwk| 'Cgŝ{ú?izMтX{ >bmu8ۧZ} _?'!ۮ>n3_3n9Uz:ܱJ4K|XlG4ehko3Q?,>?fxGKծ:OR u.8.z 'Cgŝ{ú?izMтX{ >bmu8ۧZ⊯}7V߽Пy9}ݭk8i7(OC\}ݏgfsPgA4:^t /zOQq9u־b2N4[y؎hYZ׿M/| 'Cgŝ{ú?izMтX{ >bmu8ۧZ} _?'!ۮ>n3_3n9Uz:+ ]K^t&;;Z:~O?:u٧kαxgԂ|b1˯^|pPYg^4^t`,l]q8v־x_i4{'긞N_l}ckZWG4RQ/us1u nc*a!Np sO?:u٧kαxgԂ|b1˯^PoHվIo €# H:5}{↟ᖋ}462}Q)Z 8<8D(|,xwG|}/I0[kow}A@M.;t]W6a‹$a;uF~m?_GQ^abukik߮b9gg{+GO[y#PgA4:^t /zOQq9u֏Q? >,fxKnśPPm9:U}o 9{W,vkXOɿE?V}?e6XWO?:u٧kαxgԂ|b1˯^+QtUņsEVi#Q? >,fxKnśPPm9:U dX_a ?q+v>џq{PXnXE]ZZ@؎i7Yg>)Y|;~>>].uǻ>>\q]zOϋ:t٧.fg|bq÷N_[No`m{?Urg{Z>q2o,/Q~0O?nh|͸V=O΃citZAs^= 1Xr׭}qEJadw{>iiۮ)jգ~_Oϋ:t٧.fg|bq÷Ni7(OC\}ݏgfsun3_3n9UzI~#bxgR|mղ5[ CY _xu=ӿ~%i>&z=N˜[F?v*݅|/iVW{$]Ho譓}+3‚5ZZʵ>e<&.u?{Ngjz(uO]7F mbϨ(nk2o,/Q~0O?nh|͸V=+x7,Rw-{_#XlG4ehko3Q?,>?fxGKծ:OR u.8.z 'Cgŝ{ú?izMтX{ >bmu8ۧZ⊯}7V߽Пy9}ݭk8i7(OC\}ݏgfsPgA4:^t /zOQq9u־b2N4[y؎hYZ׿M/| 'Cgŝ{ú?izMтX{ >bmu8ۧZ} _?'!ۮ>n3_3n9Uz:+ ]K^t&;;Z:~O?:u٧kαxgԂ|b1˯^|pPYg^4^t`,l]q8v־x_i4{'긞N_l}ckZ?~>M /ioc/ǽr>)Y|;~>>].uǻ>>\q]z(X2g-;ue<6#-VvVuK#pPYg^4^t`,l]q8vֺm?&Iv돷[ێc޾r**|׿]/sIV>GO΃citZAs^= 1Xr׭?>|Y׼;>>v-7>&\su(s{km- ';~Xֱ6a‹$a;uF~m?\oO)?Ft8>w)-iP,J^5 䳨+*C>mȞiF뤀qߔѣĬ$(ͽo/u}k9j<;JRݥ )Coōw7izUɂY{ |b냞Îi7(OC\}ݏgfs{ B oa{7Ofw奋T$Z6= 0,%gAsA%K} p4>YZ>)Y|;~>>].uǻ>>\q]zOϋ:t٧.fg|bq÷N/ ۚ[?Urg{Z>q2o,/Q~0O?nh|͸V=O΃citZAs^= 1Xr׭}qEJadw{>iiۮ)jգ~_Oϋ:t٧.fg|bq÷Ni7(OC\}ݏgfsun3_3n9Uz~D(|SwX|}/V\w}=H'}(_\QRXe'E]ϚZv뭼xlG4Zuhߦ>Gνi&mY16غpӭu_~>M /ioc/ǽ}ETUծ~_ u? m|? 'CŝúzЂX{A>cEׯZ>8D(|,xwG|}/I0[kow}A@M.;t_\QU/ ۚ[\O'/w۵cm?&Iv돷[ێc޹_Q?,>?fxGKծ:OR u.8.zTVFIWw斝o2;+]Z:>=I~#gM|m{[ֲ[i\/8o\YF=c dX_a ?q+v>џq{ϿPoHվIo €# H:5}{↟ᖋ}462}Q)Z3,8L:+/z̵.],fxKnśPPm9:W/ ۚ[qn3_3n9Uz~D(|SwX|}/V\w}=H'}(_\QRXe'E]ϚZv뭼xlG4Zuhߦ>Gνi&mY16غpӭu_~>M /ioc/ǽ}ETUծ~_ u? m|? 'CŝúzЂX{A>cEׯZ>8D(|,xwG|}/I0[kow}A@M.;t_\QU/ ۚ[\O'/w۵cm?&Iv돷[ێc޹_Q?,>?fxGKծ:OR u.8.zTVFIWw斝o2;+]Z:8D(|,xwG|}/I0[kow}A@M.;t]W6a‹$a;uF~m?_GQCabukik߮b9gg{+GO[y#PgA4:^t /zOQq9u֏Q? >,fxKnśPPm9:U}o 9{W,vkXOɿE?V}?e6XWO?:u٧kαxgԂ|b1˯^+QtUņsEVi#Q? >,fxKnśPPm9:GitxP.}oOxW]ǃYPN-|_?? j=٧e #;Q¾ ~_o+.J$7ʼnVɋ>^_E~/zM5#3'Ԭyu}]5>)Y|;~>>].uǻ>>\q]zOϋ:t٧.fg|bq÷No 9{\O'/w۵cm?&Iv돷[ێc޹_Q?,>?fxGKծ:OR u.8.zTVFIWw斝o2;+]Z:8D(|,xwG|}/I0[kow}A@M.;t]W6a‹$a;uF~m?_GQCabukik߮b9gg{+GO[y#PgA4:^t /zOQq9u֏Q? >,fxKnśPPm9:U}o 9{W,vkXOɿE?V}?e6XWO?:u٧kαxgԂ|b1˯^+QtUņsEVi#(wkU/5U3CU^guwnHhVi$nb9(8T p-Wm+:9s[GQEvQ@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@6Eތw du(/;o?_ t95Wֺֺ^4LI y DsE}7>=xNmí.5] [r{NSuV_UWg&]IM-pzY%ӂN.)Yl{(S邊(((((((7zh^!kܴv5sx/zVIk{ƊH\]%%>}A_ NSV_U9v)R\-Ymz8zԂ|In_sUw3noEӅQ@Q@Q@Q@Q@Q@Q@|&3?¯|IJo[DtL<` ߉'}i^".u{\;K24%u?ӫ>Xɫj.a{ hp$1(o~G#~bk{bΛq\"l\VNNk=NӻZQ^yQEQEQEQEQEQEz2e1Sk 9|%4^ZZqx2E$11IϩO߀9Ol t"3ynO@vko9OcZ}}U_Ufv5&ڒw{6ܴ>g)afN 8epMP+O ( ( ( ( ( ( (8ߍt? xΚ r|PB#:dWGq࿊-Zzx\[M%)"Aqv`/9O+aZ}VObhإJmrgsI>c7gR 9'uPm}ScUϴ{uWʟNQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE|~~ο3*Wf?UUf_/\.DB((((((((U?hjU|~_3*+_ ?c}UEW{EPEPEPEPEPEPEP^".u{xwLd1R#~ۭ{Tx^@6F qZ_?fm㟌j<ߛq>K,mxVGθ 8d~o\~ozeŠ(>(((((((#>eZzNki7XYR Oj>k>hvkmw7u๒76`rYl ֪6SAO] D=#񁂃(5}G[ ׼z?uϕθ{8OKU¶qTP;ҝE'Q@Q@Q@Q@Q@Q@Q@|dWc>+?k2,####<׵1Я/iz?x[Gz|Y j6/u n3Fwh$#̑TcrR:lqՑ \^N[% jdOrIz9^;baz>K_Z/wwxؖ'^K(ꂊ(((((((,>~Z-宽涙yeŕ YpxTΏֽ톇kjֶs}Z #so!v%Npzj873h?> xZ\Lnh?(1_]'p5{Xy7^k\뇾OD4_~ +o HU)Q_"}PQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEW$~: CiWTO<&K׸3ęSy^/{jOrTrT梀>/I{h/I{kj(rTrT梀>/I{h/I{kj(rTrT梀>/I{h/I{kj(rTrT梀>/I{h/I{kj(rTrT梀>/I{h/I{kj(rTrT梀>/I{h/I{kj(rTrT梀>/I{h/I{kj(rTrT梀>/I{h/I{kj(rTrT梀>/I{h/I{kj(rTrT梀>/I{h/I{kj(rTrT梀>/I{h/I{kj(rTrT梀>/I{h/I{kj(rTrT梀>/I{h/I{kj(rTrT梀>/I{h/I{kj(rTrT梀>/I{h/I{kj(rTrT梀>/I{h/I{kj(rTrT梀>/I{h/I{kj(rTrT梀>/I{h/I{kj(rTrT梀>/I{h/I{kj(rTrT梀>/I{h/I{kj(rTrT梀>/I{h/I{kj(rTrT梀>/I{h/I{kj(rTrT梀>/I{h/I{kj(rTrT梀>/I{h/I{kj(rTrT梀>/I{kKOx?t˝G^_l;qg8=+ _ڃM':@oc֮oGTk'.4t`U|WFPkn r*E<e<Gc@=QO t{Wþ0 :Iwz<$~ʋR|y)&Rp6?/Á2ʼc_ŕ3,X5x0JHEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEW/{jOF5'y@sW>5&Xب8c.>WWuч{na^N<7>:Q q?_ru>ӯE\xo|uߘ|EVi<7>:Q q?_ru@?}_s(/9_:Gՠuk ώ~b\xo|uh:_qW5dž\1?<7>:WδQh/+ߘs+Z(N} q?_rk ώ~b}Z_>/9G5dž\1֊>ӯE\xo|uߘ|EVi<7>:Q q?_ru@?}_s(/9_:Gՠuk ώ~b\xo|uh:_qW5dž\1?<7>:WδQh/+ߘs+Z(N} q?_rk ώ~b}Z_>/9G5dž\1֊>ӯE\xo|uߘ|EVi<7>:Q q?_ru@?}_s(/9_:Gՠuk ώ~b\xo|uh:_qW5dž\1?<7>:WδQh/+ߘs+Z(N} q?_rk ώ~b}Z_>/9G5dž\1֊>ӯE\xo|uߘ|EVi<7>:Q q?_ru@?}_s(/9_:Gՠuk ώ~b\xo|uh:_qW5dž\1?<7>:WδQh/+ߘs+Z(N} q?_rk ώ~b}Z_>/9G5dž\1֊>ӯE\xo|uߘ|EVi<7>:Q q?_ru@?}_s(/9_:Gՠuk ώ~b\xo|uh:_qW5dž\1?<7>:WδQh/+ߘs+Z(N} q?_rk ώ~b}Z_>/9G5dž\1֊>ӯE\xo|uߘ|EVi<7>:Q q?_ru@?}_s(/9_:Gՠuk ώ~b\xo|uh:_qW5dž\1?<7>:WδQh/c_߇OZ[y"MO'?)ZPjǩ'Z-(_ڃM':]pOOu/%<&qu|RXkK: ( (>?֏ş[nKO -oƁc]^W-ļ op(((((((((((((((((((((((((((((((((((((((((((+5'y_U^p:r#'S> ((((((((((((((((((((((((((((((((((((((((((((((g%#ѫ_`W xV5k n{)%QErW PɴDgRYkibΥ5?ξ_kwIg_`PEPEPZ5ƻ$@[ɉxs^X?kK%N4Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@~r/o+j9R5|G_nWuلxEWaQ@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@ xV5k JF}\8e?B(S j6,_K-wUm??YԿZ"ƿc]YKMc_I, (( [&oK1/.kൿkvIy^/>(((((((((((((((((((((((((((((((((((((((((((_"ԟ~W/{jO>毈0O8?(FmAul"O_]/ii:쑢D 'N\[??P_2C,yǚI '{u~, K֭m6ul3n*AO^1ҸgyٷFk~brottuw}zƿ?t;>wf/cgp =Oú>)[6z}iѹ gϵQZhǥ|YNOc^J|gG-AqhK0nm bNGn^-YwL'=4޽ w ~G:ZյK}Úl;a;`=H#܌^'y%ō;!Y%mf;J6@=N85iO'~nc8wg,Ɣ~jߵox3x~-Fy^FRw=1ָ?/'+O0#)Iem kz}"πP\lX#YJ6#r>ҕ;QwhquOH:֩\Em&%qdxEa]c?7Sx/4ͭZ}'*.;㸯RWO/i^3E;ql%,N> ЙS~ -VYG[$kAIJS#p=!&JӴRֻ ra[ߵGM)\[A(r`? Me9Ek|΂wKռmo}jzXA)B2˒OS8$o Lro,'ɔ,r|`A F3өjOxoR6 ݤh $q 5q>.X4٦[kY jϐ9;YK+cPW%>]+Š(((((((((((((((((((((((((KLVxė[wEdXLW=@^+n6&zIy.O0C#  *S{H틃TWN#VvmB(_ OW>ǢiC;sŞwoń `r=m|ş*hfMeu1YcWx#jX>kzfMD:4rwo3p̩{~'uxW-FrM 쬼9={)E$>^ēh Wm,2k),)'_ڬ#Cp7dF9z|=}h2xNo=YA F[ɶKZ^O +( φR7_u~:tBM?tb0I4eev9cBx6 ˪ d3^Sa\iwqZ5G)$NQȭk߉$ԭkRKYHlRm7kqJwFix+6_RštVHЎkSSjͭ5j)dz[RRʒI_4;6k6yk3Ld qmK5]2,$khDvW}ϢlZ~q[JNHuFl?nOo|S >x>//;اN3`3ʶ}'[JPF\:$8'条xS&iHvxKTTFũNKIi}6|(>ZԖd2A:k^5IW3P*͜pN8WG/|=|Z}ՍʂkyZ)<2yٻ;`9Ve?>F5{WCXum CKol6jD2̯;8{.yevII=ɭa _]Y52$9ǶjtB)ւ5sٮty>~:4Qk~'dRNqpI=`:בx7O~ MqsCn\+c8Ց"Fyu}qmĭ+,Ijօ}kItr6K[\ FMKU+BrIʕ}~xƺ$Ӭ;VQ1X$txmXq)uhwg'`[,_2;z&%%y4B2iԟ:OEK[ĺ T׺Me"$w^W܂J@<;ʯg "Ud~!˧Fq10kG,ij1'5z:jW6-w N8BcfgVV~=Nq񖩥hukB$sC+FFzjZS[ Z\7ìZKTs17x2:nu |AIL; ck9$^+aiWZ5G),NQEhk^<AV>*0G=s~:|o>-W~qsgu26(ø=kWCgɣGRu 8cncHOᐹI@5_2;K..y^I\'YrK\ ]h (7>Hj?)Zq[~ zQ\m??YԿZ?~"س,E/5ƻ$+"ƿc]YQEQEMc@I/+?b^8\ןk4 o&%_Ӆ}EPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEP_E? _"ԟ}_ەva:.q>U}ZLK[u G Xt]8Ϯ*yǟ|9}7Mh5W7z/x=R<4RJ1GxeŜ0à,ctXLpA/mʶ:|i} R輘^y I'\g\g|Hfn2ߞbzA{~EyuaW~(PG5J3-_R:.}+((((((((((((((((((((((((((((((((((((((((((( φR%H|{IiI̓?SשC1s|#hЏWB"*Җ+B(((((((((((((((((((((((((((((((((((((((((((( φRiNQωRy`|wE} /G+? п _Mu}iv<2>?g?_/&.?GW7B14³ ?g_/&V~C>L_`³  /G֗`ɟ#+V~C?Yobh3d|E} /G+? п _MZ]&̏诰?Yobhg?Kd+? п _M7B14}iv2>?g?_/&.?GW7B14³ ?g_/&V~C>L_`³  /G֗`ɟ#+V~C?Yobh3d|E} /G+? п _MZ]&̏诰?Yobhg?Kd+? п _M7B14}iv2>?g?_/&.?GW7B14³ ?g_/&V~C>L_`³  /G֗`ɟ#+V~C?Yobh3d|E} /G+? п _MZ]&̏诰?Yobhg?Kd+? п _M7B14}iv2>?g?_/&.?GW7B14³ ?g_/&V~C>L_`³  /G֗`ɟ#+V~C?Yobh3d|E} /G+? п _MZ]&̏诰?Yobhg?Kd+? п _M7B14}iv2>?g?_/&.?GW7B14³ ?g_/&V~C>L_`³  /G֗`ɟ#+V~C?Yobh3d|E} /G+? п _MZ]&̏诰?Yobhg?Kd+? п _M7B14}iv2>?g?_/&.?GW7B14³ ?g_/&V~C>L_`³  /G֗`ɟ#+V~C?Yobh3d|K|Ien*C.Gҭgh fkNwtz8<+Š(NibΥu\/A&ŝKIe-)ɬk5%}_5?ξ((ൿkvIy^/ [&oK1/.h ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( "3218zE>)j|5]5ŕԿalڳ{m8,9yp /(_mzf35bȖ{'i+8.E"~>8ϊZR"hg% (Zg-4o3F?)Gg{?ϩh|gA7H?>? ֣}KE| ? EѾ3JAQY3Z+hg% (R"?R_-F?)G4o3}V~AG?k7H??|gAj=gԴW_Ѿ3JAQ ? EUkQ>ZR"hg% (Zg-4o3F?)Gg{?ϩh|gA7H?>? ֣}KE| ? EѾ3JAQY3Z+hg% (R"?R_-F?)G4o3}V~AG?k7H??|gAj=gԴW_Ѿ3JAQ ? EUkQ>ZR"hg% (Zg-4o3F?)Gg{?ϩh|gA7H?>? ֣}KE| ? EѾ3JAQY3Z+hg% (R"?R_-F?)G4o3}V~AG?k7H??|gAj=gԴW_Ѿ3JAQ ? EUkQ>ZR"hg% (Zg-4o3F?)Gg{?ϩh|gA7H?>? ֣}KE| ? EѾ3JAQY3Z+hg% (R"?R_-F?)G4o3}V~AG?k7H??|gAj=gԴW_Ѿ3JAQ ? EUkQ>ZR"hg% (Zg-4o3F?)Gg{?ϩh|gA7H?>? ֣}KE| ? EѾ3JAQY3Z+hg% (R"?R_-F?)G4o3}V~AG?k7H??|gAj=gԴW_Ѿ3JAQ ? EUkQ>ZR"hg% (Zg-4o3F?)Gg{?ϩh?i?Asɪ, FְqHP#_PU)8nuaP~K( j6,_K-wUm??YԿZ"ƿc]YKMc_I, (( [&oK1/.kൿkvIy^/>(((((((((((((((((((((((((((((((((((((((((iaRHQ@$3S]:XiG (31$p~gk+?yp-K[#-Lj$LpVs`p/~ޟPmKכ\h׳ ;DcqN<'Y?wXZd>&DVØ= 4Wo챢Gkko,Mp>hnٟ0ҴW/{jOF5'y@sW_omgZkͬt~?^8(ϟ ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( r#+B(3 j6,_K-wUm??YԿZ"ƿc]YKMc_I, (( [&oK1/.kൿkvIy^/>((((((((((((((((((((((((((((((((((((((((e}RԮtk (fcw5U×{o|Fk$pI?+|]k펡ԗmlUe9E葮7cssl[߶  u <9qsE ̓8 pVo|`43+_yc)h \0Ih7|u_>畈#'QE^pѪ/I_P%Y":Zk?DG]8_yyW"(Š(((((((((((((((((((((((((((((((((((((((((((((ܯ=P+_ڃM':]pOOu/%<&qu|RXkK: ( (>?ɬh5%zLK -oƁc]^W-ļ op(((((((((((((((((((((((((((((((((((((((+'8~xJ]o'Mss3acQN$*?O^ t'M̚y:d@㧏,'K);o1{bA].¡g "lM]ǟ+BM-lmyq"`唞I'z⿉5_ x&եtxJG4ec@ ډqW(QQc۫Y,dhhXd`pAAWt)?GRq};ApzS5 ˯{{ܴ"S#g =W_aS>$SVݶS*t9|FMrK-T{D @d;ٳ}˳յڢK}Nh PWxv]Si}unn5NmF8my;R8Գ9rko>1O 㾹MirѪȌwxeX/=Hb e~^2ԴcUmTgEJG}*}IthR+Ws5/>!ѬA֭mnif4QI\V($_Wxu KM/\F6ۍiOd`=e?\hwZ\:>Gp#Mt^y;a 0: WmNZ{5khl!#цD$ݼ`vk~ռ%iw5Ryr)%xukڿj K\חv芑@tW,$l06Fy(X፮niN#lLv,< 5Ҧ'(;[>^f'3igXLy#ՁU8z!<5qqc{>(e^\3gʅj\n-៉/[z.+h^fEh䌕eaR:+uڻڞ%:cfv1)'sY^n>8gP4Mvr|*r(RB)I&Mu=A }ʃU ÷g,n1ʸ|z={+Vϋ'&E66-\ruI9|wC[m^ ,Q' rs aՅ()זMm7Gus֒J5KTnKƷLxnyo ]kVvȱQ8[ KZė.(_,^nc\M'ԯcNPu:Ny {Kۂ2"&‚jƻ_ꍩz)xwmWWTq6;{[if6 &pqq^1ֽkǏo>;|&f.;w-Dpr3ȢSuQNOC(8Š((((((((((((((((((((GM"31dVX5+>wA"sB̤ P¯ ='KRg:+ H=z"?5glt'Rixn8Q^i~~ܐ$rKh`ɂr{UJ\TiC&6#g%ۏ\aGQ^ɥ|_'Ƹtٮ/ri!ncp/;{ Ѽ_[Pb ƈ) \ܓ5ѧivqڞEx^4V$-V}eܖ0oqʅ^!*EGtq1E )rpO7m%P'0eF9KF֌%㾟}%x6+uzk{)%88eR84ß]_h:͕x4RGd2̠ ײ''e? ᖻ[5 Oۉ19[!yy~3x^4}{P=-֩ 62(?8MaTNWھj&hԼK4}$+}HPp={C#7?:̰EiUUff8 j}O@M+wI8+NH#~] <[&чu/`@ qxgƝ3V2ͨ[F  _aOSSw{s7?W fkSҮ[ƷOј~^Rd7(Mek8q-*}7g:ռppJ)&[ c-SѾ02w}+c(+ܮ<_C (h+?~"س,W PɴDgRYh_kwIg_`WE/5ƻ$(((-oƁc]^W-ļ op?ɬh5%zLK ((((((((((((((((((((((((((((((((((((((_??$6خYۖݍ;t 95[gϿ"T>͉/oQB\rGpܟB(((9R5_E? Zk?DG_R_omg /6 Q^Gv~曨hwMe3B\Fq>gJ<7 k4[bp~'u~SsᕄZğZ"Χm!PAX1?M}Zҵ9]Rݶ\ZJDR?{vl ZƋ9ͥV铴dbL']^^x$Y%n!$PO|9xwvhTn#FJ?gSkZYg0dUcKgTVOş'o.R VA([I75Z|~_Qdo JZ7Os#e]Wx|I[ 3)]T1 0`мox^5SNFku$*$)>ǫxWK,O0HVթ'?+_~.x?Z/e6r>&C?kERj>Wel0Ґ6#y$p:M◃(/Ӽ?i4 /܀`tV9`F="1q~f 5Z[䟰/5#-ELA=7k?qxkJ^zNWj~.PJ1ܞ5cWu=3^w{`d)pX{ƁÍK~,['/~=xQUmVP&(a 9˵UuQ+m ?';ӭ$Rӷ\w;A_t=+Ͼ=xƟSM67OM)#z|_¾$v$Ědʊ~`9۸|#Q1rx}3ź-鴻0KFnSZ~+׵P[9Zve%%8a~?{i ]qLrE.*̀OW1=|^mV:ͻ]hZ" US;38#$ݻSnmh}kxZni,^p 6|(#,F#FGk_~5[I<1{T{:ax~ lmcZo:ɭ~rȠ<[g_]Wm b\|(./?:9/~UmrY)5$w5V/C݋L39 9b 8~ڇGӼA_ ->ZE[9d 8+3GKۿB浬iWWdKg<{=}ivr[oAA,v< =vG'UG%9)'h;1|4l}'Cfa$7s?8lw85ooBVWnI;M 2e`TA9bK|-zֽh^o/6X>,|d n[JWqwpi$:;^Ij%'rI&ק>5};OGIu${7nAtۅ5Euyӕ8ܓ^y{5{@@Cͣ./"8!rxCAuM#_m'wsc|U,YjwS) Y1=O{_|!QIn%mc/޾'?].im; yb8'EgէSG(~kgt$= 𾴺đm%\ 1s+`v{ĺkɚo^>^VřQ^FI9n/> \x^Ng Џs~1]>ʹGɲ8 y4nկqʳ$ڶ?h_xVNGggjR7ŎKO^osOx䜌Z>y'{+xQ]gQE|G_nW/doQ\gm??YԿZ?~"س,E/5ƻ$+"ƿc]YQEQEMc@I/+?b^8\ןk4 o&%_Ӆ}EPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEP\Ogûxɷ䷷LqcHI~Ҿ~\쬢#Ȏ1<(Þ^%ͧ:{mphvE|?#$ ~$Xh<|w>cy>=_~<75|;熬6.q}.0e*Yo+gڍBeov' ( ( ( (G˩뺦ۅ% u4_E? Xγm猵ʋK?>#lhW\k}CSʨ_L+?K>5~~y S USoۛV Ki1_,B@..j* 6vlz׳|?o<}0ͧiN>*fZ>7ៃ?5-/zEinA #͒IEY;Xꐅ5)7m6[ݫj6evcpt/{O;hbL6{?!Rq꺶g$ yo̷ "k}FM[aey ԧȕ۷`+Š((((((((((((((((((((((((((((((((((((((((((((ܯ=P+_ڃM':]pOOu/%<&qu|RXkK: ( (>?ɬh5%zLK -oƁc]^W-ļ op((((((((((((((((((((((((((((((((((((+j_ڧ_ֵټˀɦ6'%Tdns‚:?k~>mKTu/Kң}^zr79t$?>|p_nm|7o0Kbr,nEO*ǫ{]{/ظT]2Z)29빏#?? >w|7>=?K\*//+#Vv xPֵm_KKglگ 1vgʏzXh۳TM𭾩kyr[h5@yCv/_ZQcFn )޶éyPF//'ȗ1{2_~?knsZk/"S\"+Q'F_^fO:v6ݯwͻw3>ǠP̿ y7Z5գ2}[u1!|^pѪ/I_P%Y":Zk?DG]8_yyW"(Š(((((((((((((((((((((((((((((((((((((((((((((ܯ=P+_ڃM':]  65ׅՏ51C~7Fhtn}>[}F̨+Č_7qnU\mT_r]e63]w}7_kwIg_`WW@_~ aiiwRaq4qFBBx(NI}kC tۅA֦-d 1!I yOaUPWoe$Fάg h{InӽS(C (>?ɬh5%zLK -oƁc]^W-ļ op(((((((((((((((((((((((((mᧅk:W]=<˭CRHB$PS|9?/~U[s /Qk((((((+~~֟j=WV^׮x$Aesm'Ѡa#猺hѨ2HqpRMex?P^h%2;iJV $d@QE?Oh/_VL<ǧ't܌ >2[j^,ZK;F ch k\|g~|XkJ/˩6pmqLmvvSfI0]xmfycoZZ)@22NXxGO Zo:lB{hWjԒrI9$I$4nGm/OCmmsԒI(>Ҧԯ-te5̫Q/3&-Q_+|v|5[Zxw^7#6J{Ʈ=ͣ6R;ð+uGE9@kχ2xWr^,[ _n~"M7ῆMYˊSQqt6@"@})?ƻ2j?[_"᜼g@oɸ?=;os8+rn.g/&տ(ɸ??᜼g@oCVW3g/&rn.i>[_"᜼g@oɸ?=;os8+rn.g/&տ(ɸ??᜼g@oCVW3g/&rn.i>[_"᜼g@oɸ?=;os8+rn.g/&տ(ɸ??᜼g@oCVW3g/&rn.i>[_"᜼g@oɸ?=;os8+rn.g/&տ(ɸ??᜼g@oCVW3g/&rn.i>[_"᜼g@oɸ?=;os8+rn.g/&տ(ɸ??᜼g@oCVW3g/&rn.i>[_"᜼g@oɸ?=;os8+rn.g/&տ(ɸ??᜼g@oCVW3g/&rn.i>[_"᜼g@oɸ?=;os8+rn.g/&տ(ɸ??᜼g@oCVW3g/&rn.i>[_"᜼g@oɸ?=;os8+rn.g/&տ(ɸ??᜼g@oCVW3g/&rn.i>[_"᜼g@oɸ?=;os8+rn.g/&տ(ɸ??᜼g@oCVW3g/&rn.i>[_"᜼g@oɸ?=;os8+rn.g/&տ(ɸ??᜼g@oCVW3g/&rn.i>[_"᜼g@oɸ?=;os8+rn.g/&տ(ɸ??᜼g@oCVW3g/&rn.i>[_"ܯfOsI(HPA@b!_P&*IDZӜ9[o Q!t+{XY]"'M\/-{%Imf:~בb#)[7 _ݏ>1V;J[ebvvz>篈Noj:?+zI徸H$;U!(;P$zkּ'!TC~( j-wu &nry  .6/;ɽ[rz.YhJ~jxᙼEsGwF 6-pfq4`6C^_ [\px UltaGW՟EdYe_+(eo\#vuߵ쳣?5 [æ5Of¡aQ#vhea^jݥM?+Ֆ\[&qs麴6GZ>0Tk+'-yFwZ\"a_oW~ LC[{lߝMz+8a=kQ_t|Mc@I/+?b^8\ןk4 o&%_Ӆ}EPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEP_^0qf~Jms죣fezbt{$+ʏ_4o#W׋>+ڷIӮtnٺ^gGr#|,~p@>?_|Da1ez$@Imy "&]R f~ )|ig¿]i7g"Ӡ›o$nX!}pZO}5/M◌{[ƿO0|iFp3( ́'ր=[\|u>(|Gx:}2IQx|ADYDb(o44om)CW~)նnyoo$o|722~iPf,W~#IoBC)m1@%QC?|PSooڇZ|x kǧn1$*F99p?'~Ҽm/=yXKtIiuqp#TvH |\dWEMZu}owR5/_E20mwpLY:"C J$An'w_3þO{B >MJ [1VO/l;IQK E'CjxHt/Rcey{w[/$lh; {;C+|`ShkkXixXE{<}2L#s~+~ѺFO;/UŢoh2 9eMvčUQ.4'~|:MSoI>բ:9k1$ᄰqA VW/|{;? |_I@7ukKYdIJZH|Bf!W;i Աɭ+‘K\@irEb<ª "x 7<~]/3mπE؎}=CԾ+C$wWQJm Nd;S3|{ce3dŸJVKn5;/TC"'2BB5|8~?Zo֬u xW_c/ԯ-G#jЪ^,a6 +X _'g (e g7a3XDe +Vm#MJSe [feԑ*yg (o'hΙ 2h@ kg fxtW_|7ZW~%27m)#S 2\+@  ?4?m R#oin-Wkm+_[Hۘd;md<'$nM??භ n~oį ]l4=Ex]@fK%aӭmZy%}<` ^ Ҽ8D<J|Dtl־ %A1o= I#܍ޥ b9{+~6o sPCot!b*C0,@>?ଗ)⿁~7xWLMwCXZtuO_BY0Qd>iޞ5J~? gǏߴ:Ut'OQ$Ԯ$HªL_ >4_ .i>6E~ϣxDs\l2yQ- P>(ݍҾ*V=xw C4D5 o-knq2(HIPP\?%;yxw㦙SKkG1k ծ|I<3 +mTur?L۫ Z|YbQNtII'񯍴#Vx|7ͯF7l@^XQT20챱@_ି)xP_Y[i(Ķi܈^[@@1f 8}_ _ 1?.<+#Gv+PӴz)V[,!҅ >7~kUU'cj=CQ,ft2K48rK1k柎U <]joǼa?/gFŞ&Ӵۍ?y/S/z_+=~غܚ'l/Yk_E= GO}-h7nTEV٬\^_HO]6cVfhGPKuJ)Yk4OO5XuO]jz\%ψ%k[x cU?ޯ? g~MF1!+>GL&^@& A:j!D6GVN[|=;:D:~lV֐,0=T?Wh((((9R5_E? *m7þ_ccgg'XqM/QX<7CT EUrHwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF 30hG, t_U{HwF8I}?m+PC+ ;ҳ[2Of-p?Oѩ\O#\}.s}^wG6;smu,F6kVw,9\ÿ>VjU<[f6jiN2-嶒6nM¸2-F^y_ҾxVhhכ ~tG #a>*kQ5J-"I)*&Cta־ JK8IΤݚ[gř%Z9Ir+Mo9eQ_k4 o&%_ӅqY.]CNf|>%Oe0\:W-o¶ȯ&q}m8;((( ( ( ( ( ( ( ( ( (WEU| xº<;oޭmZ u].9'W2ĎFU@!ԌPUEPEPEPEPEPE?m?_?io|#վ!|9&ɏJI´ALbB [EPEk U @_,|QxsH|9g楨]̮$rKmUBI.9*Y BQH*HQK@W72CWtЦϧy_L0`Q$.6+b=B((((>!|5 h^+}Z+YފEdoV>v09u_y~-4 ~5>i+~̟mغCCZUY- O h\-gui]d Tp|.?tOl}hM$o'UQϥuPG% [WozxMon0<ԴR' |YYCoo v#(*F`* :TPXc;Śே~jzM>R~XcW'k/C[׏Ş*C*&xVPR DD `cP?Z? x}އg5i7v֓ _MBNKl}/:}KR7+i=s#+;J[PIS8cç\\h'L흧$nˌy ,DS8<}߉ -BWO l#.s,aN _k!ih yPx7~KM޻q wsRYdҴlWeT;n͎I/g|Ihޏu-%U(]:(i)*BGJ~%/ x3 ]:?OHkGwl(;I󿆿o>9G;🉵]mմo ^g$E';Mz%Pw+c jZ.ӵkY,b?UM|Gy^#nXdD_LƌYiTPo<DM4 #O`> Ң((((((+5'y_U^pob;fk)? F]o?|QEzͅQ@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@}ы|9<ےO-|}_`|3o襮\V^AQ_7P_hWgsM_QE|!!EP_@8J- %TQo_Y?~LEս(?8 ͇s^T:ű96) w<~~2d|jռ+&C8ʷVʏ`vV/)1Co-3=݁A Dt3xcmĊ X(#ж q5}\|'-cU݁rO:Bzn{ӗyPe1+?r6F^gxۯ-lsvdX7=Ԡ#ˉ.ifvI2~\+ Guh[g?|M־*h:$?Qmb#_JH[o4-җB_}'ϊxr8'KOpG2m22FE~d*`owoO9+Rn4oo< ,bnʯ͂ ?Z&{f?G?,,|4K]Y-ؿ*r$F;oEMCWBv8X༒%#1rHį5}G.״ wH}귑ZZ'YU=M~OGGEvXNwiXi)l!Ywk쟍~oë|:m/RO?m{am;}GNMhB208~,Bࣿi&=O Bo xL;tIⶒKmAnKJcl["+lG54>; k-Vie%3^%3\">l]06}w1~zm m6FiZ>[JvH { ?Hui|J0 ##?7ݭ'{\*sa{v:%yX|6W'Zo[ mĨ׼;UbO"/8䌲%מ:}Spe?ط,?+K^񝓭3y-m8#E{R;C>2|mh>ۥ܉)THz)e%+WiW[KĿ4[Cs.;ui~:x¿O ϠYk0Ih¥9f8!f<@~)?b?[# @~_uIgZ*S~Ο6wůѬC5%%Uj]9  [ĝƗDk5qOg}mmF9~͹ g_4՟s~>&3Ծ,~,tBOb Svق8o|}~.%xVeLnBykTd~~߷_ڻO,OexL}>}*̖-1*EtW.o?4=wϋºouXM-N%tgX+X|v W.?>Fw_T+ IB(rӮʼno''i#~;?|OQ_~aQg ߵO! ~'Eo.>/$% Ag9+c,y4x-*J PT>XlI?'jږşrī_~(~Z~> ]y!c`,i|m.Y.z0+ 9^?/$?V5}? x]Ŗ[ [^[xSi]]|v9_iu|ol. ݵ)XulWpT8/ş vkŗ<1+UuG -}y&WQ?!5_][Mȼ:և'aeVy6!DbB>~/O  _rx'Л@(lܯ9Λௌk5lu'x'ñ 1w9*8,U? m?f^<'>&uo+8 3</ tGICB$i0~3;6|kɉ2k2i </h߳W k;Hֵ I-i_Tw8|"ּa?Kf_~1H ht&!Kvys5 .O:xOgŨŸM'3ewB$U҆XίeŞ8⯈E`dLnmWˤǬHS؋C|aZWG^f( I`hYa;Ծ(xGŚK xg›GX1QWp +[6/;\&'-I# u$q&R)xit~@s?̎?#9%UrA>\?YRyx'FB?HĒkh((_?ο 5OxzG-%;\1 2Y@3}7ƿGyr|*_WĖ#6ZbvU*@IYً'Z|MIdKNXԴ/<*݅R"Sb/GMEU*]iW,D Jq {m&'M=.|{u[g3<% &&ް|?(j Zo|K6_^(n> Wp+_U< v0S>-xgƚ'Y)(-ўrD`xt{w++_m%|m }_03MX2? >.ռ]}N:UusdDYbAGpy-  KB_۩۹SFc%쩓5$77Vo>t0>$=M{A㇆P9/~{u?'$'9pl,^ ?,+ SA4hi#FXTbekXdWއ!~ؿI5KS[iKêxko={DhҖIğ6elp?8Z?|Iou4}.Br984W-L_o׼l46I02Ķ-q ;=?<7d\jcgphT$aKqpp| ş3iu ZOO/o8Γ<*A&$$|G?Oy92?Q$*v-e= p:~ 'c(tC0o*tIWFeLWgAWϥx&|%= eyu-JCo|Erdvm1U~W ?o-/u6?6/os]LCm`7*oSK Je}ml/`_7Q+EJ33_Yٗ@}|TtH$4;Lnt(c߽Њ𽇀?Yѭ`u|4 -kbY%X'yh3xkJ~/$#?Rw}s/˸E敢#fܡq@n|~Пm$/^mڋ8wdvǗmw^7 =1~m~֍xRyӭWx-Х. 9'|K".j&ŗv|gBO{"I,#I R`I+ kğN|2okzw~I+-#>Zt$*B.Ḑ Gg26~f) %ሎ5f!X]+𞥭|F"#)n eH_cf@/?|[|MO_xr@/7.yIS6.ܠso_:]Y%5 B=f.\̂,q'4Qbψ(iGc]\VZ}w@"K졘۩VVmcE(xgƞg. 7wHH{X8|* w^-|0f=CX'}nQ֡ _c&Ak ѬMoc#Oĭ+Y׾n'__Mywx]!Q'0_1c {}8df$?Vu^ M)yAcڨ||*3_NW?_o}jOu#S¿ >&x1Kh6$FNlcePSy_ӊioR| :%Mo򤔐`_7 2iڙ\}#t?~ \Dȫ$l0!?Om_~O7lQ߭x౿ |Eu(O&F>v?ȯVoǛ!xmY\W$k]V]y>yG=;?Ǩisb(-_m/ۤ,$U|xמO֗Y\9b| oGܦ@W[›N]8;Yu}[)1'^.$l?|?67tσ?H_/}E_Wwǁ~ x]OHDkgTvmZ-ťP?ݒB8((_"ԟ~W/{jO>毝k)? F_EWοe7y>h(Š(((((((((((((((((((((((((((((((((((((((((((((g$ K_ )k=l㗡P_hWG?!迵_Lja`WZwǔ6sǓáE z<9sL޶:>[_}#hs%6 =D-+? =s}z,yzϮB9a{g˧$zfP y!s2q8}?(?o!Ϳ?x>qo?7[֗9wQ4?e'׆e"ѼY^Nk}wTB 9# ,iGZQ儮yaq8J {Q_Kp|{pid?lz j1\b+y: 3jhpQF )g 7쫮]V( (Dw+H=ɯnL(F_syCMUux{5Ui]b$Gyc7**(??L?_~  9Ŀy' 4* GI$+2لV`oM|Z6c5vF TPf5W ^k/|UĞ -4\SťjGc(WVPʌJ`u7g I|L%|vo@]ũz4ږVIe@tPfd}be8hk ~$|'6.Q4O}6fV]Zxk<݅)67E HP_)^~?Lk^fB?5[d]mn؁P;~k|MO1mGxi5"'x!\5@dd+-{{gPk} ˫hc,BmJ)RZ3*~߳΋%;gۣxDѭ4ZVnF {@? rԿlo_/>3OVzmVb[L3?dq] Ywx ;㟊=?cq}T𶟦[q,n,R9BO9C'~=|=`?|U)?ZNGiS ڬsDcIQ2+/߅%}}*MSqJ1ܼQ@િ?h/{Yy3:=և$剣[|-Xr~Q(Ϙ>,{2>/֍)o/(-eCa%?g W1B)*\6FmEx5? SٷvO9c/n/1YuOxb)ϖI$*}٧?~?M?&u>k[Ş"e4 JL/$B mUVmЂ4??u1'Q?,~xᥕyjtWʒ@a)9%?5Wm66-sk$X%1Ek.8Dkko|^n|GVU4qJtXwѲF[h$'Lg?/;'Fl]%;unrrXKh8W#ךgͤ{I x{+9]ͼG*`૲efSTPKy㟍0~ u׎nw_ɢidx r ["2Nk(6ι7%dž[ -NH.LڍB]Iq(s$8!B GO3~^?d |y hDfK!!821FN+h.~ s'&_3j7.mJ\>DRP8_3?|gk◃|YG6ğZv"IRH 3+(  -' t'efGy-;XɹHOtYr{OM?M.O+<]ሴ&[1vAt+ PF|rO@/p~o2hAq CzeidgdP;^3Wyǿ"xo/g^0ҼѥwWa3B9]:g};eo_9gOW_XMͦM46." 4itgw8%ls_>:į?/_oO6npBDUd`Qw(#(~imB"}?D-'aP4("+0=鿰4(HǨxux|%\C/<ŏ|25vl5TP?_|yğox_o*Mg64M.I%TXb{ /~sKg[74?j޳x?Kb^ vͻ(㿂?Eo )MqGmtzp7c=c$J)̷2ЃN(lmbAD =_n:<+4OKyoֽ"_H~'+{>~k|eNWX;mnK9[?/ƼOg[8ub>Gش~3|\Hl6ZF|P]е޼}gmࣖCk^KhmVٖnۆL}[P<}9N,WzM}i6}OI3Mʑwq'kdFssҽ]'↗bhXO=ѓ<:%tQEhrQ@?:mx'.5eſ>(kMn#5\+rrKS/?_h[/.{Dd(K[ YEMxv7_ff:gjKP2\ U-ŭDT#Zia8[OUgEu?~:7?O;qgr:j۲1\Q[W^o\xz^;,؛L\qNNkk?[cԵ)n,Zt7n;`p"2rr.WS|3ge&.K脨Yc< Fqo?+^!`un6$dax7mR?&mZl[)nQE{\/A&ŝKIe_ڃM':@[RXkK:?)ɬk5%}@Q@Q@k4 o&%_ӅyMc@I/+?b^8\QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE.C_oO$QNGTPhF^y6 mHBZN֌ $>}º8~dqG?f/m'rтm/]9(vʜ8ů%ġO]zikAk;Ü+86GTWϿ+ŸU:lef].i>[^QEQEQEQEQE^pѪ/I_Pz΁c;CoYZ_[B_&~_W pWFkZc$9_JEcѲtRT| Oٺi{<;x9s9z?ß@xGTPׯr>%03^EvGx3 \ QElp>7࿁Z_lv0Jc[u'%YyI=xkgTvrM+CsܒC$vs"RMNz4.7A499+ sӎr -RKK!=Rgv*Г.WZMs5nW2I!2O'uo O3 Fҳ>:>fp:q=|ǵyo|[sj َ1Q}I=MD ˻2fn,i-g%"-m{{i_/5kg|%$fs8g!'ҼwfyqjSgoK۝(x9'Ax~ Ok8B80(_?٪/ŝVdIeA գ)@վ#|r{.:L#΃EשK1JH]GHŜO Y?'nYmskg;ƄzG9c4X0 O=넫z}| PKDgkWyS{ƸOSܢI.UX1' "|G[^'X|=|8ֹ9%#~oA"O<~-,ͅS ^b<k'c fO/4j2H'{wtڋ^{MJr/Ng=\,5$uO j=O3K*#9v1ے0OCGTӾ*yqxnқ)IYa& ugoI.>k=BhJz=~k4~~:cAkwFKt,h=xb*F{'6Vw>OCNS)X^sH`Yz 7/VobH3r=z?_^KY״[yZu>wK6N8\gpM$VFU`^ꦕ ߮gcvڧoz٭!UbDA9#p#a cW|C3xoĺE0Mj̮NLj28޼/6\jWN,-RTddLe1JRז +xṈ;7A?zOq i|3nlRq{@=rZe&$Ο [Y|v3ܱDxƱ\kڵ7ҷ$f⟧a/зY~KNQXVi1BG- 諞g |CjͭK8yC>/,-2_Eۀ`0?t8;w([Z`ck<́t4JˠrJyw#Ѿi(Gh$ M _%\1pEs'Iduk[Xu$a 0G7ƾѯnlv)' ⽣z㯊;Rծ$t]11 v('$gҦqqRh'M;Yi3t"%UV`z+̱5EgQNCAwtn|;[F* fI5QEFaEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPA|O-[N{;=BQ,!G7 {ϣ~I ZocI7¿kYI.ut:ISOs*ee2z͊lfOҼCq +euYܫmhu`A-sw X=ŭ՟hI nO9Ӽ{?ůZvErE[ş˪<RZnJçRi[[̻qif.зZrG(Gct4~&ryc}AWdK>k bnV df|88ʐF9> י\i7V]Kkq|2͂A 㸯lu OkZQ }UO<x$(Uq8Կc-ֿ?^oO γw%yvLP^nY9Ǐ̈́nxu ++Tgvw+O 1WNi..,rǩ}O _|LKSt~h% WH}ZE >k7.lٔϔ,p}+Z<Ы5΍ ew2xӫy%:mH%?ݿpCJ ].uϒUkc((+M?`o}Imq[#~9zQEpW PɴDgRYkibΥ5?ξ_kwIg_`PEPEPZ5ƻ$@[ɉxs^X?kK%N4Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Uִ[?7f)#E2 TP0C[wLrmX;Bc-‹?}&\iUoC4dIix s?-լ<.SQ?^8[{xx .[kztw]]F%xdG*ÂSG'3+xO:mZBHLl(/> i uqI+;/={uřZ;kCi;`V$ck7QǑc.m? ͷ^VE?~z \7ZOm(ZKkWIWkIr#םREl*HC|>ekwVVʛ$wm\ǥjcI+E`*VR6<)K']_Kw<T};^{eEBvm[3^KET^YWWkU|kg$OZ{oGĖo1%-%RG-xJZpw}K⦏oe{q[nNҸ$qOa[:qHVB#[ped{漾^6\v>=⵴vڌc -mSˈОI8@ui rtSp 7E>Uk[B}ܹsҼ_Uxž|#MCح4A 7Pq d6YmuKKA=Hg(qW*sss;~վ%粱t4M6! fqEym'z(1KbjT'sa2?tm&Z(̛[YNxXkɂ<aC1$ۚE;+ܞg뵯ڦ~Mbq,IbX8h NRTпk>;qkf!('gX5kHDw ,p 8̄gזQQ졽V;oum՝:m՝Ԟky w 2wW5jt6VOy.㉖P@yAk TM_}[" Wn>^[Y+'O?u̖v:qQ#vI8q\*SFU (9Š(g$ K_ )k=l㗡EW pOOu/%j6,_K-yoKMc_I, &quQEQE|X?kK%N5Z5ƻ$@[ɉxs@@QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEksh>+ԬD$N9FB$7'gǍjTL.Tn }B}GuksC27PF A_س ]I-t}~4``gŦް369GLì=׊>&k 6x;WO#"Qσڲ+:ŝe7ÂFzxco|?kֺ}A$S)85~ ( ( /I_W~r/o(jc`lU|\Hl6Z|g-+>l(((((((((((((((((((((((((((((((((((((((((((((+M?`o}Imq[#~9zQEpW PɴDgRYkibΥ5?ξ_kwIg_`PEPEPZ5ƻ$@[ɉxs^X?kK%N4Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@xwG5ѼS {muhǙDxSؐñPu [C/Rk^\;f$crc .#D?f_W~ݗ pB%G\9uK:ٕ*<2Gc_س|3ԵkNs:i+J1$XmC+ا g'x֞Sb(/3rHN~_PEP_E? _"ԟ}_:R,k)? F]o?|QEzͅQ@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@}Im?>7mR.+dzO/Cr( j6,_K-wUm??YԿZ"ƿc]YKMc_I, (( [&oK1/.kൿkvIy^/>(((((((((((((((((((((((((((((((((((((((((mo%.G~%s&̳|GcS2rZcZxN/ 5kNql~҇KG7B*Oy?Kx?_VWP.P 8N6@}&Wvu;]_I][>áV~P~o'ڼ;K( &讽c^O_gAsfx=5̃ϋգ< c/#I=9R5_E? W_CqoE]˒@;$ŠɧtEJq咺<sEԨa:ʏ-Gk/7G2?\*(Sd t#oRm>:ʏ-Gk/7G2?\*(Sd t#oRm>:ʏ-Gk/7G2?\*(Sd t#oRm>:ʏ-Gk/7G2?\*(Sd t#oRm>:ʏ-Gk/7G2?\*(Sd t#oRm>:ʏ-Gk/7G2?\*(Sd t#oRm>:ʏ-Gk/7G2?\*(Sd t#oRm>:ʏ-Gk/7G2?\*(Sd t#oRm>:ʏ-Gk/7G2?\*(Sd t#oRm>:ʏ-Gk/7G2?\*(Sd t#oRm>:ʏ-Gk/7G2?\*(Sd t#oRm>:ʏ-Gk/7G2?\*(Sd t#oRm>:ʏ-Gk/7G2?\*(Sd t#oRm>:ʏ-Gk/7G2?\*(Sd t#oRm>:ʏ-Gk/7G2?\*(Sd t#oRm>:ʏ-Gk/7G2?\*(Sd t#oRm>:ʏ-Gk/7G2?\*(Sd t#oRm>:ʏ-Gk/7G2?\*(Sd t#oRm>:ʏ-Gk/7G2?\*(Sd t#oRm>:ʏ-Gk/7G2?\*(Sd t#oRm>:ʏ-Gk/7G2?\*(Sd t#oRm>:ʏ-Gk/7G2?\*(Sd t#oRm>:ʏ-Gk/7G2?\*(S<6?5f^e%5vOELJ[SNQEIW PɴDgRYkibΥ5?ξ_kwIg_`PEPEPZ5ƻ$@[ɉxs^X?kK%N4Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@1f Wz>j]ys\$Aq _+߳|sTt>Ky[OH#2=p:j&>v>26-N>Cf9R߶6de7_ި$z(GaK&Pk5u=Jj scIH*xn9^^[㮣V~ke1\;+7Vea]߇4F sZd8x[:QA?X6?-mB*S8 h;0'_9G5lj?C2+ñ}r3Կ:Q qO̿r=;+3=Kğߙ$+hð}r3Կ:Q qO̿r=;+3=Kğߙ$+hð}r3Կ:Q qO̿r=;+3=Kğߙ$+hð}r3Կ:Q qO̿r=;+3=Kğߙ$+hð}r3Կ:Q qO̿r=;+3=Kğߙ$+hð}r3Կ:Q qO̿r=;+3=Kğߙ$+hð}r3Կ:Q qO̿r=;+3=Kğߙ$+hð}r3Կ:Q qO̿r=;+3=Kğߙ$+hð}r3Կ:Q qO̿r=;+3=Kğߙ$+hð}r3Կ:Q qO̿r=;+3=Kğߙ$+hð}r3Կ:Q qO̿r=;+3=Kğߙ$+hð}r3Կ:Q qO̿r=;+3=Kğߙ$+hð}r3Կ:Q qO̿r=;+3=Kğߙ$+hð}r3Կ:Q qO̿r=;+3=Kğߙ$+hð}r3Կ:Q qO̿r=;+3=Kğߙ$+hð}r3Կ:Q qO̿r=;+3=Kğߙ$+hð}r3Կ:Q qO̿r=;+3=Kğߙ$+hð}r3Կ:Q qO̿r=;+3=Kğߙ$+hð}r3Կ:Q qO̿r=;+3=Kğߙ$+hð}r3Կ:Q qO̿r=;+3=Kğߙ$+hð}r3Կ:Q qO̿r=;+3=Kğߙ$+hð}r3Կ:Q qO̿r=;+3=Kğߙ$+hð}r3Կ:Q qO̿r=;+3=Mk1K+1C?P[Gq9ٽCc|c_`|3o襮lE8+[RMMܢ+_ڃM':]pOOu/%<&qu|RXkK: ( (>?ɬh5%zLK -oƁc]^W-ļ op(((((((((((((((((((((((((((((((((((((((((((+ :_ĝ'Ķ^w֗q6.-vT[ cR9R;GK BAAk;E2{9ǥ탣:r-l rŢo\08 0OZƏi. SA^+,K*GMz7G|Q uk ^ v*Ik_S^jFJڸZQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE?&mZg$ K\e? (Ox+?~"س,W PɴDgRYh_kwIg_`WE/5ƻ$(((-oƁc]^W-ļ op?ɬh5%zLK (((((((((((((((((((((((((((((((((((((((((((9R5_E? u?X6?-}_:R,0{LJfνpO#$J?F^ +[鬮)&`8< ּ=CX_D!.!YOW#~ 7-ⷕҶA'08rT6~/#$v\ r ?>G+-£yw}%\2ÌG9=+ڞm^NwɰQE]7}ݮ{ukf7\M 1w3ITQEQEQEQWtO >&ht>PFKhfԅIJ跚ZZ]XܠuϪnQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE?&mZg$ K\e? (Ox+?~"س,W PɴDgRYh_kwIg_`WE/5ƻ$(((-oƁc]^W-ļ op?ɬh5%zLK (((((((((((((((((((((((((((((((((((((((((((9R5_E? u?X6?-}_:R,0h (:G[&KQ98ZOS$XjSX-S pï j?|5|3]\i1YyciHŒ}Kdko8T9cYN64,p[` 知sJRo ѧN^My~K|,k];RԮ [Yv/R[#F6{GߋFwNyaC0(Jd2I; x/i,woZ2IԒ08==_ ~)o╍ׅt>onVK(h%:o.Zv=_h?n49fnH] |x־w#mpna |T,<3/tmxhQXT6@ 9=+|9j9usku%R)6FF3M1EJm-x}mI I#uÂE}3=^/þ:_j&կkR7Wv\[;o*G63Wb,`RS4{\=?!?j#?|6ZZxmqfw)†'I_;A\1,|MB틩 V #۾osx M׋=sVvkLlgVck7PAzTQ{I$,#;` $m8^k5[ꚵqye?<717[:~|";p |ҩ6+ U۞=߫<_:>0_Aj^031 4i&LE=RyOMJltM 4:XCɜl!qݫ;?4Bsua1pk>>݋M+;IcUm4x $❮gFRy5+> />N;95Kc5ͼjCv-u*ORkG_|GIEҮ/#i2!s{Wx[]KOT:Ʊ RLWt?.UT00sr}_:#v0nnnEͅpFFKؖLjZ OZm%[|㏄"n6ejHYd\nRW>_kO':laY[u /mIG5[C{w]klKPXYSw'ֹnTfxTIg˻Y2"-ù8%&UVgo>no| ɪ|wEׇM麦=ط`=9Ǩ89ڇ/tO˺q *+J3] V*3\K7hrRp0O!3^) q[1%0P~bYxn:Eny!]AIkwl?1Wq q ?[kiG;ȥYuKeeaE{/χ~$}6mK v.lYy.W#nx^}xC/ma5nj+8!=[$MTf3F0|h|In/kd#3Btj$8XGR-R;H![eQvfrǿz~ٺͨo÷r5xwZ~iwHlFAw8_\^}.Dehlfw"ʧ?@+ Ÿmus/PY#]v(W}W K&Bk3,iR+NoR.7y@^% 5_Y?oywwOc Rxm|oaB/!\g#(=؍lr^=\R禒)}7웡[SDeGXnS̒:#d/rIrOP J6wf`$ik/oN*G鯲z7k_l7";2&v6#koe^Z]—ǰN $ćŎ &{o ;?5ř1dۥf7_1^AAډƙo!Gp0ܑtIۮEf9Úiϛh<0(>7mR?&mZl[)nQE{\/A&ŝKIe_ڃM':@[RXkK:?)ɬk5%}@Q@Q@k4 o&%_ӅyMc@I/+?b^8\QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE^pѪ/I_Pq%"kc`lц?3[EW|QE!+]{ipq=,{ibKYV8Tn??xr{|Ecz_CruƇuoGtH:9xv^ IxK.,coX++6N 89&Sot|뮧|*gmD]xX_XZd(RkeGE'5i7V |oEV2o qF~QPtTq:ZJzGWX֟yk}lU2B7)#/o.7/xjИV$e-/:w-8:7w_~iZ ,|_^^dyU]8iYxM_K:ԛR- އ;3WG&6OJMZy|'|du ]{^mj@߿>.>ezעxg^LW8UTNMy*zݻXB|Qe2t6g%\I0 l?g]An$&8#*ƒs]fŠ(0(((((((((((((((((((KXEqXĎ*d'W<ῃ^0&ͭDiwds~5m#̭s|=eN\ֻ; V^-PP 5N4@,,uGq]KCwZ7,mE[#w"7mR?&mZl[)nQE{\/A&ŝKIe_ڃM':@[RXkK:?)ɬk5%}@Q@Q@k4 o&%_ӅyMc@I/+?b^8\QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE^pѪ/I_Pq%"kc`lц?3[EW|QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEW )k6E-rGr7(=ibΥu\/A&ŝKIe-)ɬk5%}_5?ξ((ൿkvIy^/ [&oK1/.h ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( /I_W~r/o(jc`lU|\Hl6Z|g-+>l(((((((((((((((((((((((((((((((((((((((((((((+M?`o}Imq[#~9zQEpW PɴDgRYkibΥ5?ξ_kwIg_`PEPEPZ5ƻ$@[ɉxs^X?kK%N4Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@~r/o+j9R5|]ZȾ>1zƯBF}@#󯡨)|UmO>#ܢ>y?_nQG:+(ߐc|GE}E[O请(~A }o?>#ܢ?'tW۔Q r>?_nQG:+(ߐc|GE}E[O请(~A }o?>#ܢ?'tW۔Q r>?_nQG:+(ߐc|GE}E[O请(~A }o?>#ܢ?'tW۔Q r>?_nQG:+(ߐc|GE}E[O请(~A }o?>#ܢ?'tW۔Q r>?_nQG:+(ߐc|GE}E[O请(~A }o?>#ܢ?'tW۔Q r>?_nQG:+(ߐc|GE}E[O请(~A }o?>#U,$_b|; n0Tkbƭnuk<m (;_ڃM':]pOOu/%<&qu|RXkK: ( (>?ɬh5%zLK -oƁc]^W-ļ op(((((((((((((((((((((((((((((((((((((((((((+5'y_U^p(((((((((((((((((((7im2HCkddRӦ<}EPEPEx e>07@'ݴF:P@Qn;_cmssw>oϋ<|I7&K,qbIe #R%.$hգQ `WE93Cß> N-~An.RBkItV/ɺ' €~QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEW PɴDgRYkibΥ5?ξ_kwIg_`PEPEPZ5ƻ$@[ɉxs^X?kK%N4Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@~r/o+j9R5Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@ymCmcI]'ڽR[irXb+ҫ uGfv5-. I%p;ۯ,CG.u/᠏Lܖ7o- $*ٺwzփR|;?|-ӮE[|3HND/ZQYlB>~C 9㗆^~̒̓PȐ" $FHg?lğulj#jRh~>x}Vu4Fr2`2)l.v|9~o Nct]I:԰}М,,+ L.p?oľ8Gwox[l] SN{IIYƤ}Io/췭8qk߃~&ZZ#m(%~#} 9 ōKEhmoVD%4 H%,>Ph_ҿNk/ ii=|MkĒ=Ʃa$1]LC6B]Bd WNVmԟ|a}qKHdv^b=$BvǒH}[hԟku^ R?`?x /z_5}8-ޘI8TM[$z){ƟN%+#|xKđx/_״xXRЦH-gg,a9'G ⏍G(xRȼյoj(ɥfw" QG@+k][Aa'?jZuѯ{{2e7%FUl5/]}|w?_X]>-uR+lmJ9Tm€ˑYs7aǞ3k4V rm'@gK+f1̓%ֽuDx ⷅ>2` -|EKk~ƈm72gWfT?oko8_໋ I\ڽt"aʏ]1FRB@@Y:4^3n5ýGL6o&M#6s_/ ussu$iJd`|*n\l, *Sa @yi Lj5C/#Iڥ }b0~׊7kK[u8Ry !mF'+_~M~ n&xvk=69#, iTԁ_+o'}ş]4ZZXG {{~簯o٧ᶓ/xCB .Yb20v2"1AXq\Ə)?/uYx~ծ{rmDȎ8ܴ7ETdN(??Uğ/e3&UWGVSGSr 1COE~I/ #[8h~p0@>ٞ1LJ{ѧ;;v3FU- |E)Dğ:>&\=O2y6r@{H'gsGk!e*G5|2(m& VTDW<8=7w_),|L4>5WK(5bX}KI2RT|/,_ PटVl|7WtEޒH:—; !c¿Fz|-vY47 b8jKDLSGSTݧ[E5(Y5:/Ԓ?b/|Xd`jh"QI;$bVA*>&#ѿm>0x ËCx~9ԗ薊H~z-MSzӼs&*0ybT!1(_}B ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( j6,_K-wUm??YԿZ"ƿc]YKMc_I, (( [&oK1/.kൿkvIy^/>(((((((((((((((((((((((((((((((((((((((((((_"ԟ~W/{jO>梊((((((((((((((((((_Co?Ɖ ෆ]\nf$I=3%If6,O͚??Eo L|9h`\FZ5_$AE{<x|MHw(ֱ7h0 |v.6=K  -C /m{fBۭf*q$wk_O%콥|/mc[gt5<Ԝ,\wfnaoe'AP5F$ͭG x+J(:UK5UWU@0y'? g? /F+.n` M#4ҝLc?o[_ ?jV1!|u?I0I; ɭ_ S /,;|P ./yTfyPI!Y^E'7^>$CkWPɻ2d ݶʮsPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEP\/A&ŝKIe_ڃM':@[RXkK:?)ɬk5%}@Q@W7uXJ;%q%ǠHW9[ Bf ڴ.&4RgT &?U*̗ ƾ'UcK; t[2X|ze#ͺa'Z;8U.Odڢ(((((((((((((((((((((((((((((((((((((((((((u!֭s%ę중%D— 3_{Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@_mU_mU~Q@Q~Y<1mMIu&qw$:G76 ^EQE8z#"O޸_?EkxŶ6P;o /uԾ"Z~|'": k2\^8,c ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (9[ 'OW~\gkֵ΃sn.Z1iVKTn_|Zפ!SMWaݱ8M*z/i|8>$|`+?BDG(#|#gaN,)Ɵ\ԭ|U2~׬N!ǖG@+4nm SQ_)Y?Y]BO]x{qoKv[Hc@%tGO}aSqt|C "4REo4Qn*9Ѫ+oR^~<:o}.𭡎f+q#\ެ%:3*՛'r boAk-fP^8zͭK- r%dvu P  +7៌_|a4i|!&qrE4<1GRuUtXOtOZ׼!:~3 ùJ;A N_sJr?xm)[iIx o>l!oBֿfI?։k[OwtCl" >فo Y/2o5mSzN'֮&KS9mb+)n'֊8??h=)|;W>M;KV*#*aUDRX /ux֟ v-JQ(2 $$@W+>Rk651aFW Y1+/G~9| ~%x/]G9KJKT6WRՕ vWgsE_xn 𭆍kq@WH.Er.Wٌ3t[=M71f6P$oCʶ+POEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEP_s? Ok`n&w&-R5_k&FYVBdlnYUӕR'/xt̚jzt+9~}=W #Oe/4}^*@$^yi/-@;澷eS~>:▗<9nSEՉK5rI}N6o4ே|o7>?|FԵS]( wVQUbL)8XRt7Oخ|LL2uX=M0ɗ$h ~˿I׾|/iow躶I ˢy_,7ײ~_L_ࡋ]~ח Ԇ{ *3!,@'^\E/(S=Ě _c^${kvh|5ÿ_ mn>WuLJ5=oF K l~a !~_K_|9_xVҧӾme7K)n$%3|3 j;k yw4W~&W9i, 1>kܿW?iػQ?ۻ]^V{Mve,Jd^2geσOg^"R Ev3 ,h J(~hWK|d_Z׾9I=nx]=;dVf1CH0ܬ$?8)>@D%uk[hW<*Ǜti1D"VʹwL'Q Y u~!|2}>m[÷PߵiP.Jmf|b?;~( >ָg|wq?|oKǮ[Y^%7pOy~WͼnU8⾋A::׾=_ Ð6~&>}|yg{8x h?ڷ{/mɵQP3NDg>~WSG<7ᮓ% .Ss1Ӯtf3yJepHZ4=Tc~$??c4@k['Ց, g<*٣C84x35 |:i?ۣ|.6NOB 3=>-Bl+^_1]}c_;g~4xe|t uO'Xk(>_6Y# $ ۲|ª|+?</߳ھqgMq5&g`1Tq$L}4o}KLգVk oiW=( |'=ƚ햣JEoc);$BC ±ƫq0@|/Ɵ?`&f⷇GakWi\R6-33Ďۗ68Kbn$^.E6v++]E-̇ ?c~qm⇈xxt{]xrQok6gh";1wcU@4WW KsWlw4xSQԍ>[pPDA:&JHaddjb~Njz|%ޛ?o)r#|y1o"?\vz4ǿ>#y'U' :~mVsGu ]P ח72IѴ4i*8Qo ՘'ּ>"Կtno5/'\Sk@?ۧ UmxOڴg!%I2}&|u9{_IO7kmxZ~3mVGk%/#"Ld~Kh/Xk!״;^}mbڤ .# s ӸPW?k ~%5~З|1_xri,#Hyayl`2@FG|+t涊ZN ʭݐYs|m_ &~!?>xNM6xAMPYXcUWC K~_ /4Y3ME7$rmҊUpH[>/o6iVݣ\H*P) J,6|' 3q3J/ZiV^ӢSH2EkP$g \dծ|+mPmĚ!Q$D+42#RCP))Di)_#x?ῃ~ >d{X$I# IȠwy9AEo՚w3uMbύ^. 5|Njg0bI$y$9 Qڛ o+&6>iɦʗApQm<`Z/Ҭfk[hYG(31$x[^eǭ)dJJs=Ycc&< $eGmuFF0QeDf@>+gI#x^k?=a6g$'m 4дh.ƊE T}߾/"~_I~ ]?> 4/+G-#Vwc޿n ~/|h<]῁>6~QԒhk'Y9Y7² uʗè㯇?Gt$#FlaԯUPORJHd'}*JĞquPkh6 7sgӊ+/  a_WOj Z^HlZ(|ݘR7F\byF# >";xO J6ڧZiv0UpY?J+K]}ct{/FSWfkt{㽗rq8m6wdV?lL^ ~%,^3i& Q3Nw{r\7$qB PڔWZTe+دf\ 4M=!K%n$`d<=+8~(k+_-ԧ6H>⸖\!ET25C /&Լ}cxnY|Co,7.#7-Z2cp @'./N~mOkqCJ|'#L_1R7"\O/ W?h|i蚄cAq D`H=~%~?Nx'[/LN~)SHʫ&-UP x}5~L?KxG|[8e +rl?k?o^7|[w${Qjy*W0+!wdo'a|.HKjֿ^(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((_ >4(?h/ۭ /-,U- 6Zr,!լ'9chaE|I|MYuh˻It[XE _QO 7_/_Izϊ,廵env71ISr(¶N>Ϟ?y^fxGV|7⟇'vҵM:UZ.|$Kbчf?tdzqHY fUoP?OطkMߍ4X9Meȿm-ohPwGϥipYɬ޹%nZ5S4I&Gh#/{_ -^.$ogwG{k3i͛3*xb˟9)ޮ3?!~!!hJxǞ)iz5/I"2ϓ j}'qmӎ_x~| x#Gflt &M-Š// wT_+mK˩7z%ڕaqj#4CUE9PX_꼏 '|=n[Kφ5-[l5X.-oBC,n n5SoV h/;bҴlбbC ,@$ /ğgFUCGCnHG=^cOC?7߈ 5V;/XK&"#IE ~W4?O=|Oᯈ]dT뚧l5( ݗFd\vq@Fob?7+_Ğu/t{v"[N!VtL6ʸfc7mO:3\/ ^]«+^hs''Ғ51#70ߴwyi O]&PP&sfthC"bp) <u?|*0մk۝JO,rlreA-~O6"|n4CV\v $j̐G$dy6*޵~:֑|;kq]| |8-mYO0R 2N = ~|#"<ςumGwt [[̒՛'s^貣+(ea"?37~q4=[XĭKMӬtU(&ҁ|<̥yMB8.&k;_:2z7]'G  .y|EŞS*,nu'6VZcub,GtEc'Ŀg+7=Ÿix5/ ı"DUbVe]NA>%|.oxw|WAviz̈́Ww#Y}Ay| C35Ӽ?閆FEM N~8b%7doσwڽuȼYu ƈVhZ35#pU#_.^ Ԭtr!3J3@"X>c^O[V_ŕΩQ+rF H'Sռua X^SzaDqMr%"VbPf|)ŭA>!|BK ]>94Q!ED۾6D2rq?*|Vփ]7ZkKRc]JE|,><K^kM+wZ=ڶ oLXeQ>cx٧3Yi>'nm-|KWi4G I M|9jZO i|iK]^ᅞ*.#m$yRv_?&s㖵j_Y[ j—;wNCUY2g7G#װoO? 'v6DNt-Dk!@~7_,>,i4Ѧ-J]HvW~ݬ_[H]St̷~QdhكG/u]~ WunE%.2ιhLerN]_g/v~ ndV"-H#nU'GPxڀ??m'ß_*m׌?3kI#b ,L$I)Ĭ{Wr}T+|_xoZ uX[i)ko9(khk%YBg_ ._cz*6-* :[ȊU;zoOgw?~)x;m;¾ v2CFDR 2O Gͪ_ط?goD_Oi\ߊtfI500K!3 :o<# ?iJ/U%<=* ÿ<xkźш^++ ,ʬ(8`F@=c@<'᫯g\i?bDe+y*#۴ o"Gq}*Wğl/efO++ xKM<;[%֩kic(T(#EPU0c4׃O =jmHWbK\\4HiIf%,Ky#W$_7ࣾ#gʚ6a̫7`H:,FI%DڻIt?g5_>/i;hmi%HXo3t]3Ge+>kGIIJoۣxG:2'O~ |`п'?)krϩ}PDb)'x_?軍.mSo;Z v]k! o*+\$*Z*Kuᣍ %9û]#zzχaݜv r1'^Fc_W{׀ j=/஡J|/_N?[̹?m͉udh2=|>|IishO(A»Rš hַMa rч(ŗr7u7X~ |f߆a|bx_%S2a#yV_ٻZ9 Wþ$og~$x^v~Q*]4s:HտG 7_~߱߅ƪo h&9W |r;A~IO W$P ӵo6z`nZޤ U`8+GVg'zGrkt?aY]KoW@k"ػ/\%'ci \@cNqj (cf^ِK:#xϨ@-[:_lZ>W@k":Ũ.꿈qj o$bH u]CD_LP] 5wA7jP] 5wA7jP] 5wA3코{i}t:MA NduuUqj %"5mMq;iByz叙W7jP] 5wA7jP] 5wA7j˙P] 5wA7jи]Wyٽ?-AtU0-SGZ_vf^E]$\"M]~M嚤ftsˑxV3Ũ.껖j2MK+4&ϵpwgͫ3Ũ.껖jחw׊qj 嚤fwzص]WrR;_lZ1W@k`"Ũ.껖jbH u]5Hqj 嚥5׻ >#dnkM^,W@kYl; Yʽ_-L,h`;_lZf~x|~y}dj@}2uK3Ũ.껖jbH u]5Hqj CW@kIM~H +P] 5toqPRK'gsvu`W@kJIbM0&_muWrŢw@c% Z=:w\ٕ@jYHvDžX vWps+U܊7Ahyꇠd=?אּ/*kӥz;j؝?@?CP0of@٦/i NP PڀmM~L K®`ov_v .|B] '~SߐGߐGߒ cm$\ytK(ҏ($qKג]Q[iy@y@y@y@d`Y@RcvbS9im6X-V9u[*Jlm;);)J |q:V #gE ^v+|['ؽYV<|v3a6fm[ڳ4F4G;xkpUe38 2 e[ڲYV[@ r#ڲYVvmu)b glüP՘!vhK@]3 7U-Aq5c+q@;V@ P Ҁ#@ܰx0A34p<><y9@9۽ 庐L@ k99!P 3! zeAq@= t/_9?fuI#n3[*f+:$K_Pbu 'V\ :pt/^@rQ}(sp@=ݎu^k *Ȓ nݪspgHջ\ض6QDڤ-acϽ\hA\~A\l|xWop  e~9,,pM}ϟq'(a;t#z˻F=Ĩ/Lp[k͜˻q,3XYw`=P˻]D` x 7 =P˻]Tݛ|$9Ώ ;fk7TvODsSyJ+9e݀pMe,q;53Z˻]T(!%?776@058P !"$1` %(2p8gBE+qҏӑgnZAKTݞK4ڞK؞K\.֞KT,ԞKW(Y2zssc+=4վ~s-;=Һ֍jQ=Rz/CU_FSzZ {m5=Ҷv˕jX?Q6sk=Y{`g魢kS =Io7qZz.Lʼn2!#7SRg +bŨq*XsMғnSN"?)ZNMT,_ͭ*-DrAM֟Ym\׋%/eS۾!Lc&J(NJѸ_1I[|Cy\}5봆cRpXq*oBy5gd !F:vyH8_J^]-<)i5ޘcu{/\ccqy;# V^DwD)׫IgCW~eaU)ī(eh䏶ݱ5nYmhU'Eu}ufV婶Wdx^m2̶D=4^YʭIrSzRV!$ݴ,$`r$TT&v*Hei.EQd)Eu%RT+K*{>E*Y [&ߔUR&I*n #*YO+K6ш-͚v}e>6nm9zCe2[۴O$]8nG.G͠ηlJQNEȈd͏M/uqvƤ-[ll5,zH\.,`UT5)!plG5He%* $.B%y9bH\ JjԐJbܒJZ䒺N%u0:\BYR#ī5FsJKHJP:oz$.#^ H\E3jTr(%uB%uNmd!qg7a_ZRB*VP2:3.yq]',笺#G\Jt]vH\MuVY%fH\OvVY1VG\$p /aѥu^=-:H뚼z]l:RG\Щ:}%M <$.vPYIG\RuYU#*:h.הW"fG\Xraq#,鰼Ւ_jI kYp4ůEQϨs9}G>QϨs9}G>QϨs9}G>QϨs9}G>QϨs9}G>QϨs9}G>QϨs k㹛#]d@]H  뗔;BP 8j# idm) #U7 ر}ױd\hqchɩ>9Q$A95"U)V3$ Jथ1Tk ijOۉ-DiF!uxIe^lpqz|Ȁ27AޣozC4)"|JH4ZLhJur֝ӮշHgT\JfMifeվ2>ljn+rBIZwp5?/Xppqz|Ȁ27Aޣ([饺FS wD/1)0*cڪ !GhhRЙ <ǒK qyI-ڛ.6q,~ʒ ':Z{xVQ2MXu-~+zۙ0iDm =2?TuI)j֬vKQTVi)*JqmUlmZoߓ687gh./OfR;wߌX+/'Ԯs(6\+So=0ReLv&7>na 6 j֥I!-eA@٩"m**Do&ڢΊo"'LjǦ[~ת5焐m (Ndk6X䪳E$)MOW`k.5.+~NF䙷ELNIkC 2w 7oɛU~xm{̈ )t;ƣ.\Ҫ(ݽGSmzbxj>Nk&j-W>WJw¾l{'niwJֺk>QW*~NQAün?ozxW헆ѻ;Gqz|Ȁ27AޣozxW헆ѻ;Gqz|Ȁ27AޣozxW헆ѻ;Gqz|Ȁ2$=Wa`$I & 2Ld00a`$I & 2Ld00a`$I & 2Ld00a`$I & 2Ld00a`$I & 2Ld00a`$I & 2Ld00a`$)dߓ687gh./OfRröDfFhDfFhDfFhDfFhDfFhDfFhDfFhDfFhDfFhDfFhDfFhDfFhDfnt@ߓ687gh./OfR;w{mZoߓ687gh./OfR;w{mZoߓ687gh./OfR;wsR9X^.ւR֮˭毭~/ZV r\9"SY"̩MJmZP#+ZϻݬCMyV=ǣmsm5:o*_K}w =Eה Ȥc;4>9QdD֪zݒR[ <α Y%A*]l 7+{INZ,hƇ 7oɛU~xm{̈ )t;olW*qt6KK"i=Z9oHkRacA ЊASrs*B1q3ez)EƳ i.yQLZCżmqPe榎haYDH$6^z9 "5T\ zfoH7 Skp\Lw!umZoߓ687gh./OfR;w5kEZkAA=*OYSz}S$XΓ|a+Q"i,W/-2Up^֟t(ӔJsBU(iRNky+WvU+pô ߶Ip@PO9)uMYm{GN'XTqZj$NΝAUfݵjWQ:RmM[f_^F"@zmM[f_^F"@zmM[f_^F"@bG|>G N -80Ӄ N -80Ӄ N -80Ӄ N -80Ӄ N -80Ӄ N -80Ӄ N -80Ӄ N -80Ӄ N -80Ӄ N -80Ӄ N -80Ӄ N -80Ӄ N -8"ͰEp-3c/ vvrve#ګȤP߉\kpI3YE%'`{2{BLlZQbz!;j̒YqhUH9{Rߓ687gh./OfR=^ER>ߒ5vꬨ.t{%%g XQ.ey͖D*J%P$' Yj$\qʩFۥRD ފ۠.-%"H;yNi,l9،SĔ{K~Lhݝܸ>d@]H-$=f.m-u1kVڑj *,^i.RԒ^#a2-YCkV%gu%d!-&!V|=/+'ct+Hcf7ʕ5}oɛ\gMo{ie تU+2;P}Ku/ԼGRKu/ԼGRKu/ԼGRKu/ԼGRKu/ԼGRKu/ԼGRKu/ԼGRKu/ԼGRKu/ԼGRKu/ԼGRKu/ԼGRKu/ԼGRKu/ԼGRKu/htU~Sr#]M2JU4E*/SkezRq>@ݛj]ڿ[YJA\qwjmnd14V'm8Q欗9w5nY!KuR:GQ5ϵ2xCAi{rhLoBXkL uTݩ+ғ`k@1 [IINn7y; \%RTUth=l7E [KUjMcr9} 0]Vڪ-Itf! ;o}+ܧ\JnE=GLͬE4iFT !1AQ"2@RUaq#BSTV03P $%4`brCcps ?I11@΢>r)P*AQSW@xH-&ʢ: A09PtL-o¢2R"ao @b0xU>)H/ۘɘbF&A}>ړ_%b zn H/kTq#S)1 l?ۊM #bېQSfTO#bېb)Au.6,Y *7s`:vQS0 nu.?@>Ba6q`QLC QS,ijCKQS,z)ڭ`PTHb0(|\/(TTHHf=J^P򑣢f@5J^PTfg6򇖢= T'46 /娩}Cmh娩SĻmm|X|-EO%kokj*jx@[{_QSE_ Ȱրċ` %THmim%-EO2N©m%-EO\hB[ 1.bP 9d9H Y Y!6@TO^JP`m[fae䨩 (C m@NݸrTTHM1Ӱ۷Jn'p p=|ehaHKj2x;X7).O٢ݰtIq\y3r:'b\@6riYP ъb;SQrHlBR."1K5hъSem"Hc\@6Eb|bl̼b\y6C5R9\"S˝@EIv5`̗<*mo%%ؘ)'е2aT. XfT솛ICEMQ*Չ)C.K"HɄ: gv=ȃW?Zyjo.f4$OΉc[m|ɸ;(8Me#B` img;ҧBs (HfsmF04[K!lm, anMɞv!T Ō9rM8p.XS 2'tktssZ,?UGX~U7a>oR5]h S5T1 S AG'>z Jr<{DyF*>>EpZ2 YUW QC,(kfiGkq'ְR S{(Ďv0ˆ-iDcSNEG#{ݼ)Hn|Q(r) ܈e);'O # 7jO|1UY4yGT&f0Is:Q>D(.f"U[f NQJ7Ҷ3ĺ(N0\9/˶3I'JH]!\,4b&/7C2XH*$32 f&.5dnsW(r6a ,̨- Yr,m0ڬojF7zgqkl %v c#3|و7~\䩓Dں"̅]i \jI#|)4~tQSj?9N )h,K̢ElW^rTkZ\V֦y-j:YF͈͋\40l.1(VqA6-M #6=#ř<(m5HvUdSLJ_}9*%٥\*_]'`N7d֑vDq1`WǙ@B׵`\u44UD}w'=MGOC8)p9L BjYt;Jb5e_9p[c މ83Į!>l1b\$I@\'pOTݽ(?" h)=@FA8 a ts{CZ?UZkFYH٩@IQ I4ZxtA11LίN't`laaHiSG h" $% =4ЭI^(u6<;|E|_^:d<])];M[c ALSCZJEM#L4Ĥ = 0(EAਂEU)0.@ iѳ0կ)Mڔ;-LݵBoIo E&`8J;usեmv=6Z4hNdɋŃܴLne+:djgtHN't `޴YŽ81\`)l6-zg rPMŰoZ.W.8< Q"mzw 9tcͦ\+8tqiΦۚpδ]yfoaopw0`Y[@#%h δ]ƤXL`9֋tԎ$esp]I.I _ Z.vv,fXvhWBm0'oZ.Fe{&Bm0%; ѥ`*SΧfhgvki; wO`fӶ xv Ϗ`N/VM3#6h 4bKxvhgbCLm1/oZ.C:\i;)6u5:Sm3CgZ4DN}X[!y֋drjM`/7jZ.sb$g3δ]bRm3X23δ]~gdzw xi.X9E 0-y3aPok%mڀ(@%EG#\_/(25n+ >QB6MEa@F$IaȐCZ9P1F}C}>}C}>}C}>}C}>}C}>}C}>}_rR#Yɉɴ.hq8 '*xBQg꯰~cRK&y5DL@9*Uǫ.q˺\zWUǫ.q˺\zWUǫ.q˺\zWUǫ.q˺\zWUǫ.q˺\zWUǫ.q˺\zWUǫ.q˺\zWUǫ.q˺\zWUǫ.q˺\zWUǫ.q˺\zWUǫ.q˺\zWUǫ.q˺\zWUǫ.q˺(4qUH O?ܣ`+?c?U}r?rd_|U~'?}3W?k+xqх2e{gliqʥ7ş'K P]Lx]TbQ@(m0ԃN&M@XFF>g:DJqrÊ0rSbY@׳A+f5`I(a/B|񧑏ѕr7"ᲝFYP̼AC %p57Mk=Fq(9ѵ03}YAtn{\G>ʒUc3 x~`>|Qu[9@Jm'2 nT^f"3rP!qBad@,6'aqB7 v^SJydo.ƒteMv/5`Rl&|HlK(r Tcr&1p Rj(9QfR׸F]58JH& QӘZČb R9y"0,#TG{zn)NqΑ~bJ0Hʉp7R /0Ε!Z iO\b3m*q},.,|Y&4cS*!R&9~SI42yoUW;72U#o<$n^jmiK0J4q"3XP/sFrgX nWP_ nz4;?~|ɏCn!Lu$]v i2N6 Dn)4@@$@E[].cǥ`DO{Ө.`jJVLLr znܬcHw.}xmۍII]CxΰLv'D\vuz'aO[:M5݀ a͟òȬ +5 pOˇQ0p@W_xz~Ux^׌?O˄ou8~zJ^/Ƽd=Aj~\ $ۧw|NvਊlX-A?ɝ~^2W~~4].^.>`b~S}1?HTLO*o'7EM"b~S}1?HTLO*o'7EM"b~S}1?HTLO*o'7EM"b~S}1?HTLO*o'7EM"b~S}1?HTLO*o'7EM"b~S}1?HTLO*o'7EM"b~S}1?HKꄚ:18X.S}?ƢK٢(Gd_|UiUH2†ܷsXzcQ}a9=G1>sXzcQ}a9=G1>sXzcQ}a9=G1>sXzcQ}a9=G1>sXzcQ}a9=G1>sXzcQ}a9=G1>sXzcLG bsxZZrhh)m6y[ UL6ʡs>HMO4Ʉ(/*<2@?R18vG4{C@q7(a_岒Tc>Lti]Bk4%luMꂴR <<tQ%G8H≎{ְܶ8b>E+`~Y! qͥ(>,_sX6Y5}9s#+^TM"&t?%΀y|MLcM H^AM\eCa>-&;F+E (\?;r؀[DFQag" S͘^_}J_ ?j|3R/Jq/ӆ2UM3f |z;|_ Gptp*ϝ W"a+J*&Ofڦa&+L" ZHU=_s s(p*;"eT`ȻG# L2.QCK*X"bݱ`#[h(TOA5Q1qs8TYGQgiˈlP^/ƾ/"&ԠщRM3%^% "X-\Uf"mB0uWc)ۮP`&Zè` ZVrˬȎSH)rs^Wt}l\ƙ3Rwo[*E/tTE^2 6.#_2HX~ɿ ʑ +vp-I99_$fѲ*X@|YLNrQFەFmD3_+(,'"H51PZ2L>r(ӑh!" !1)C WY 88xT? G$UtQ6(_e=vN#p'͘ޔ1O fILl6Y CśŸX HaD <> ?rd_|UkRa)jN`lbߓSFhO`{rlD7+4|19u4$ؠ[LB!L.c p.kEHtlU/j9_*J)&XOf츾k8#Bb [)nSr_mGR13∟,5.ty<%rb̦(scJȝ2l%ϗq%fĤ)~߉=?HyPVO rhk'ؚ*vɦTJ75UOdtCڕ36mX~| y'eVD9|eSdenc-0?(.gg_LL4IPS,#kJ$))"3lTPa`ZL [Rd?9JnIXT1M"J5iXiqQp7h c7ܣ`+?c?Uxpq1; *v ,[9&E-z1o 5,j{l)z9G8vecbyЍ^T@v4sf]5D)E1ӂzGhЎ0G5DьMrO9R> ?rd_|UkRaSᛇQ0p@W_}J_ ?j|3p?}3W1NQ Md uH?I$Rn7TDAM &uH?I$Rn7TDAM &uH?I$Rn7TDAM &uH?I$Rn7TDAM &uH?I$Rn7TDAM &uH?I$Rn7TDAM &uH?IUFդم(8~zJ^/ƙ9\Q:ȪP F0k_g{pUnnQ˼0@D tWh徱y-Qv)( Ux^ }')@?ː8DjYfR3_'NILnK~ <˚=w-P"  Q睩i < [0ZT4+J*QTM;[-XBZc/#:FJ(-=-Bã3ʧp(3LH/V&? OLۢc$M(4{uɲ"Y]A"?DsMPi%F|1 2۶ v*ɲbb 0ԫHVzQRHa&T#Spl`C)(yG0U4~Q,c͇2{Q0p@W_zCrsLˤ4"lSA勄l- Ia)0Vvl!vF>3HS1#J<,"tH$< ZJ5R@GRS# uRP Z-/I`,;.9LMS}[G/-)>ϴ ` -(8LM2-@ Tߔ1wՐGQꑂ8 Qp.} Qp.} Qp.} Qp.} Qp.} Qp.} Qp.} Qp.} Qp.} Qp.} Qp.} Qp.} Qp.} Qp.} Qp.} Qp.} Qp.} Qp.} Qp.} Qp.} Qp.} Qp.} Qp.} Qp.} Qp.} Qp.} Qp.}ԃF[&{=p7/7Pڸ[ZNc㚞A!RE@T' ,@BJ?{>h;4t6@Lqr Mޒk'c Dl\E(d"5 F'QvhH"C p$c]:l賲U ؂%DɓbM*1"Å`d5|aT#iCuI\F"$. ѮO]h13T `^s=#~0 l(ajeVɦ/n?+ ڀ{p:ԍI&JS\s< ٞbntK@iZ'需7X.>w82M6:-\V\6-iYtGHŋF 3Hl%%k"8 Q'RT(78 c%ef ʟGӧEi g*Pp[ oKE]9#ʪw  _>AٞcKE|vmsa8ɕw҉\ɩ\$C e1ֵ%v0jRn:Y"db`یsԣs]#"Rj[$9c\@6Pj T6*/}BFS3[mJMRUuJ_Tܪ"`Pcm@y?f|܋8V76/&M̺5W()DQ _>ZI#@d/. JAF1lOiM5ňJ7@gH¶è OI02@2[ Ĺk*nrG&L۽2JX6l !.5ʄ:25ۅ8.Y1911T(*2=::28*br"0b-h?-ڪfӂg)S`*0+R$p"jɨ]::N28P2s6FS-#4)LDJ/sX3Xi)&ŏcoOuF$MIՙ*-#h̩ tta˙nhI,ƘXT?͛rL-`ʚpMKQ2~TbYLb,Sl amp;T0{kC&C^n L`*\˴h}Yv "D=@t$# eT1BJ:̂X-KGɫ)&^1 c W &)Yݱq_Z0̿j{mc#q*w8֍Œ Rcq3"0l[j:13N6 *ilRJl2;]|&.s8+l Z!#$GdUP)X(goFLߕz3a3bl"QhlM9dPT9E%Ք]:Ѓ2"? #4H^.wq ̓\}]9a aE u?LBοe%-'SPNm >zb.0d \LR (l4Rmb| 1lP B &䝈Xl{e_EgP<&eN-UjjQL8J2LދIUū8 T6*T~Ftn$[^'d4ZB3P OX&6f͹U1H"tǨ**-͇NƄhE⛆gSԬs6DK"FM2ȈvmQ+}vǾ3[ <>(=?L\8{^86> r]Ia2۞R!Doش5 %3׏^%EM$Ze(K POl;o བ}n2@`6) /` h?e4TZ47Mrqw8p\Xw9OTcYDP(\ {^y)i2BƗ:dF2IHT9Dx.FETYB.[~t6 ,i$+4%R^L)x)'.8;F"r\hұ@M-SjuY%_\Ԇ R:I*gK~OΎQ Cp1aòEa*ge:pBr$6ˍƿPU$`X&>Oi &1 p FtoFGKdBBo~qaB'B!jFՈY (!p`ALC Ja 4(P{8Oc%~? Hq@.6N mf ˝ԔZ DL8 du*l\H"}d!bŋ,j: ht&J7Q96[tP,槻rd+U F"a*CxPfڦ.iό"ilTj%b3m0"%WՍaS-@@D7EРynLntD %\TFEJ1h`[ͿF Pr,򗘠%8e-ސM@"CF\.4ڋ4̲ y >A\Ghf g++JN,#5lJʈB: XJfϝ,‚u?&uؾbe 'qx0$d h%#BfW-~A7W`A"D|T QcD$H"DϕaD$H"q H?d(E$H"DM@!/#D$H" [$H"D%@&a,P2A|3"9ȎDr#G"9ȎDr#G"9ȎDr#G"9ȎDr#G"9ȎDr#G"9ȎDr#G"9ȎDr#m2#N!~}OM~%-asnݻv۷nݻv۷nݻv۷nݻv۷nݻv۷nݻv۷nqn|h_7w?:ުޛ|?:ުޛ|?b2BRhT2s^v4VN;:J1:CIWM@hNxHV`ih72˞S\ø @,@3=CHF#m$$\qp{ k Y!G+rY,:CE>:ުޛ|?cFv!GEamiWFFP t/p] <0l<Q@Uhn~Z IkjL O1A5LK?at,qha# r*w"Q@J)A](rG#A=*pzzmzëbw?:ުޛ%7ˤIgp @ @ @ @ @ @ g5&ë%$lٳf͛6lٳf͛6lٳf͛6lٳf͛6lٳf͛6lٳf͛6lک0*W[oX:Vu{U6,Ej;(^ بV< {вmFbHd4H@`4/D1qx: p:BBAidzr0cxCY /-b8,B9у,xcLz|3G۶( \ՆE$:iMa{ډ P^@PgVєFSJ W3l=,u{U6,ǔt룆 J?O1r:F L?0 hFeɅєTq3)(1Î68pFlAP(HaShaefe )s5@.0Z<EP)#Nrn,@ -d({'jj*CT*ĨFZoPD"B$iztJJlȂF rG!fbd>f=pzzmA, M $MA>=6%$S8gb6fχ e!" FGYz[ k1|!OXFLPtT3Ο"QƲi!godIAL5xL4Je F \cp$-?21v୍^7ڕF.Hn}U"  "L^/zI"q5Ӕx)n : zq #tj2^fP(1p{yWmp␠`RaĭI*g.gCL~^UoMJJ'FeQ0 oX!VDܩbp 09.qB('XX`{p71du=@#@&nqbn W` d19"GF㯌MeˆxF 3GNyX+cͨ]O4᪜cWne:|MyA4 aT;^0DNAVT >ä%;pt)C̡ Dpu`bMq?po!N/?Q%.?Q@Baս?0go54A0|~ѣ<=Hw 6cO3ë)&xu{U6$wIjep"1 @ @ @ @ @ @)X-|:ުޛ#')%`7qqqqqqqqqqqqqu{U6$wVTc2hP<Hp4^hedhD)$Y>ۀ2:4"Y!Օ,[*Y*\{ʲ@wT`RBcgSEI\lt#ECt#pl.LÂ*jKB4gz0Q#T&e&,.AS $PS` $4@X2VDj-at Rs@1Ϫ , %(͜\7swɠY'{B=`sc,-^Բl :ns"ʥ1 q1%hqJ݀ބA)~KAEB' B &W`LcH0k1Jfx`2QNBf J3ëI;&K0,&f/H<?$-ȡ\ Λ4Qj6deXfw0t!pOÌ 7;h$-CeVAx"M#rJpHT7m1dJ Z f9C9밀jᅊ#PA [[A,TM]TH\.vSTNAchP` úIcyXCP0aqԣW ֢2(M00\&,d, 5!l TEY0R!bdNRnB rqL&r@Z*f&P)gO1`"FFe^0 4@P q\`;r)cH1\44P!0@"136 5A#$2D`<6zy94򆷸5M<O-zܺykO-`ssiNm<ͧ9'6ZZ֎]<]r寶_m˧ȺyA&dMXa}yZ϶yZ϶yr}˓\m6W}yeeƛO,tÍGO,<tGO)PC4҂RQPSJ*TN_6 js|^?>.VP͇+59vYxt8[sIKUӞF+]:V/t<^5N(VӦiEWNm:e<*$CLiIɤ:q'&ēm8~BH*qw*qw*qY X Ӌ qaWN(j m8uҴ?RmuabXhďpLI;Fe# )0y?3YJD$ZdWa"# Cm^L1 .r8~H_d"Xge/{UHXNkqvf-۔/[qjAg^g#2`{UHGϧٽx*ݤ|UvwOOԟSzG_k>աCf\Yeřqf\Yeřqf\Yeřqf\Yeřqf\Yeřqf\Yeřqf\Yeřqf\Yeřqf\Yeřqf\Yeřqf\Yeřqf\Yeřqf\Y,X<6;4;-V?4`+++++++++++++++++++++++++++++++++++++++++++++++++(86.?qm!rj)r-6hoOoVp-;&I}4a7 m64ch 0[1?f[b;4fu]4ٿٷٿٷٿW]@k u5P]@k u5P]@k u5P]@k u5P]@k u5P]@k u5P]@h p<[Ç8!1QAPa"0q 2@R`bpSBr ?'s''''rq~L_'2q~L_'2q~L]tdk2qu8י8'''''''''''8ߝN7bq;[[[[؜z؜z؜z؜z؜mnv's8۝~/vx~^zvx"wY1mmh[kGs}[-w1iΎ-Gsrw1h'srw1iNN-)ť9;'sb'LOGŧb1iZz3^ϣ1{/ɘJ=(Z=(Z=(Z=(Z=(Z=(Z=(Z=(Z=([FbbRZQ硋ҏҏ}(Ҍžf-^f-^f-^f-^f%}WWWzQU.-G;Y:Inj:s3'}, /q𴗳F q5¥ږ{~QOs?uSZWjp̡| (>ߟ$H"D$H"D$H"D$H"D$H"D$H"D"Zq=xO^TuEZuBj^1B_\QۯbDhQ,!OT^ׁiZq=x$H"D$H"D$H"D$H"D$H"D$H"DB!B!B!B!B!B!BMAcvƘRv,AcZ#x¦OhFUV/[}:Ml$QS6:=GT쫚zy}s1c1c1c1c1c1cdzg#c1c1c1c1c1c1c?WGhQ{9zf*E #aTWe*+/FlWBCx w܇_syd_GG1nپ%w%A֢d|3C !B!B!B!B!B!B.1c1c1c1c1c1c1#o T{i4AYTՐ^/A*~AQ֣I٧_2$VyljDZׄ!lB bؾ"D$H"D$H"D$H"D$H"D$H"D$H/5P04@!1"#2 6`3$jř.IvG7= OnC H{5=q&%$Ěԗ8Sڐ/5= MOjBH3u}f3 iOiOiԠ:ڔ=ZR'Fʔ=5ilg a3[STsX)*9 {G=k{9p?{INci1aSC!XT,*{MJ lIP\i* B}Q4} )kP<&/s4} )+PnyG9Y5MH }u7<mF]\G&TJ%IKTJ#3#J%Q.MI!.MIrlW&#KlI\`Kl"칭(d7Ӌl2&lWD&Kls]`ӝ͌=.ɞR$6&RHɰD)TJ`+VE!**;MN'O}UZjM"G/|\h`qdٝ =Omĥ-}ZnKB\"iea[[5iNVenFVenFVenFVenFVenFVenFVenFVenFVenFVenFVenFVenFVenFVenFVenFVenFVenFVenFVenFVenFVenFVenFVenFVenFVenE,.xĿ_{K=~OW/K@e)3" UH[m[O(l "Ms-79- EG %uUJ`+3\E],"l[(ti%(c>rVߞ_ؗd:p-C#[$KvŢ|W:"Xk\a'eKTS!pqW=QdO79d:Im &:݋Oߞ_ؗO~z b_=%'. J ]0 ]0 ]0 ]0 ]0 ]0 ]0 ]0 ]0 ]0 ]0 ]0 ]0 ]0 ]0 ]0 ]0 ]0 ]0 ]0 ]0 ]0 ]0 ]0 ]0bJ߁mihFnfFnfFnfFnfFnfFnfFnfFnfFnfFnfFnfFnfFnfFnfFnfFnfFnfFnfFnfFnfFnfFnfFnfFnfFnfEUxN瓡 Ӽ9{z6۲$ 5bdRYX'R#Lv&X5M|$mmuR_7ٍDVf%Py1H"3S9d?׉,u0"z%t/#CTDiJ;Þ4 (J~)&)|)yM%!ȮrV%)ZY]o2A;͸D"L"fg= N4LIS<c~qoEq萑A†eA\㭮;ƅ~(Mё%81\D8pl*}0E1Z@ALC*\HOޑ஻ҽ+ ڧK*i C_I"e2;"REyKJ)JZ_ˎcim&ѯz'L6I G}9-I*gͣq1n&5Y's}ӬIVÊてn_)pK +ג!jIjI5-_%t K"]zsp%K&c9/35ȇU5-_ߥ?zGJt +x)t,eנzנzנzנzנzנzנzנzנzנzנzנzנzנzנzנzנzנzנzנzנzנzנzנzנzMQ82=ll(b6(b6(b6(b6(b6(b6(b6(b6(b6(b6(b6(b6(b6(b6(b6(b6(b6(b6(b6(b6(b6(b6(b6(b6(b"]FMJt +x+V⛑3_CTxCq6 "avhG FqŔmh0 SZ,UNwL76 moqW$ S$gCvwcL6ʵ1 !QԶ)<zWY4Tr%Z.zmQZnIڎ.}U-SZ; PskyƖDr%jGyΩ3cR[n%r+98DgքiF%OH[*6J G)U^1LK̍ou+HW]^S\zO!ODPD丢5%_$)F_(o! ǚ_$ ]By~RY).+ѿ%m?qބ&c褩mTH H-/EzSx+'Oޑ஻Ҽ?zGLt#]5#]5#]5#]5#]5#]5#]5#]5#]5#]5#]5#]5#]5#]5#]5#]5#]5#]5#]5#]5#]5#]5#]5#]5#]!R_8?5!Q1APaq"02@BR b`r ?!,z% { /B^h!%Iy^h!%Iy^hBVBXt!,:K%Bá aЄBXt!,:K%Bá aЄ {bB^ؐ$% {bB^ؐ$% {bB^ؐ=1!,z=K%Bǡ cЄBXNAXې}\܃V6R+9]襎WX|.)WX Wov Wo{X~`R WXھR)_WE+QJTRE;WY)]gtRuJ;wE+XgtRs)9{襆wE,3)aK VY)YgtRoTReJ;)Yyuo?򾛫cw/z,v_YǺY3/=gw_*&q{V5Fw,yf+|'s8|'lT* BPT* BPT* BPT* BPT* BPT* BPT%unE,~gf-{fGeHIr 4؎;;Rj/ +7a+Ob7HequoObWᗎ_E=T-̫efR?r;KeGb $ﴝ3|orwn6;]5k2 $Y\X9aCqkܞo{Y/"y~yo3Ƞhhhhhhhhhhhhhhhhhhhhhhhhhۑ<BZ"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z"Z!nE3,Ogرi%v};^.J &:B{wb)ׅ#r!8*q N6ψὤ invvd|Ϫ/7ЋO;'ȳvjdm=HMll"Wj?{X+;-oS*-|rDZ{'[_亓NW^dԿn7w%owX,W[׶db;Vܝ%%~7a%aRov7w,Og~+y?|p::::::::::::::::::::::::::::::::::::::::::::::::::6~v " " " " " " " " " " " " "+?oNϯftgۙk$Hw{ӷO2w߅ܕ츓[6 Ig}շ{9;.6&v-NҟOkhwYq$ߥb_%}^={>Eӷ|_ȽyEa"w^bﵽl%6}7bGj-oXf_;ct0^|u"E+pXY)'vݷ\+mRps6c cǯgȺv?uNϯgȺv}{>E&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&$\-؃ 2 2 2 2 2 2 2 2 2 2 2 2 ȵ~Eӷ3.^ϑt͊jۿ؇o3{YȚٗ{Ja`Oe=\ţ 6,/i.} +"wx[+_wAK' xN7)5n~m0ٽ#5t!Kl,m;oc XQK \ewۮ+u~}^B}|pϣpmw2}ЬuCa,nN2lIܓèLjIؓC$;~nIܓ~I"ۙNϯgȧkQQQQQQQQQQQQQQQQQQQQQQQQQqdocker-1.10.3/docs/userguide/storagedriver/images/dm_container.jpg000066400000000000000000001445531267010174400252760ustar00rootroot00000000000000JFIF//C    C   q S)$w/=z8/Ǘ_Y׆G'rc\?|}~4Mծqks1`>˭lEy:,ܸ5YjvpevZ-݄>Xt}ZUlq\[X\o>]68Jn< +"Kki|zu|ūGvu+kלwᄱ)k_9,v%,S|u<9TFʵ9%m4 v!SlRNne~5`]OOڗ$O =gN{uiz)c$Ynz_An,Ew/s817YWӷol`zxJhͺؖ8+%stY`^nSݞxWll##/|o3&(9;b=q82m-t8^X` ya+l >@^X` PpiGdje ^@ sOU6ݽl yIF>#~Z{T/۞WWVzΞ}6,).@{a4ԍ{~>6U {a nh@^H<-jof=@^N/Mf}<}`'rWAfJ,)_X e+ }`a2hWfp +{"ExJzۭ.Y?[Y5Ճ޳)e` vϻ#[cjouk^[VgEAtmao-Oϭo@Pl?5W#k294}a21}ww͵Gv矗Nhi޸=;CٲօHR|cyEs(>V,F5"msUFֲY`i}=;XV͕'e}a }ɬ/y65 e}ӛ:}Vv٘=eՍ+S`?5@Mt2 64>ߐt0J$8͟_ٶPWnI>E;"Kdd '84ӎtOp~w qmF;[LP[ۂ.??|)ΰl T[{ûkv󾱒qYa]?ڧ"MEÒ xUc`je iST[{W ?; _aZlf9m5$Dotw/ϑ+rq9d%EK[r ߆&ӷ^7 ٵ?umPU^V=?vLc^vUpںԪw}ZהB乎=b8~^@,GW py^@,GW py+y ^@W I+ W'` *N^@T;y6=Jh'xp}Dj3{@B&3ӣՓFpRF`{o/[sgt,8{VMJm {{{ѻgjɣ8)M?Vvw(ՓFpRF`{hMvnoL@@Mvnmoygz \FzֿXt|";VK@[ө=i3:>QJhWFpQ׫Hɠ- Y(4+ȣ8(դR:~qBxUZ tM3FL3A66ju^oǁ}\3p^+}P W|@9 p*L Wz#v;J/i5Ujy e.9u06]Oٺ-T;VI$0,=}Soh>Ћ*C~vdT}픗n>7XZۺTHC %jPﲑJvP%%‹BGwh7B26]o%-TVI$f ҷv[vmmټzl/3?VYZ[Ez^WEz^Vsvy,=w,K7Y Kl]k}ۺ)֛uK6ipZe)iBUԠm=mP6/vuz0 XLH  GH-K?>lC\ٸaS-ݙi߾Kދ2 է*/ ` It6C7ȴqF{XYٱC "z<rhppy$ztf-TP2-}"-YPzo;+2>S7"}_*׷AZ4pB4 oK='ʽ㨾>%>2ě.mhCIZ9Lm`DNB~@ˋSunB^VD̿ʟd)\sZg m_ RĊRf_a48K??)qsP܇ hyFf ei+r ~V-&{rbXrWkFTƒDXQ.MP]dZ>B>4 Lgr>AIL%nm#8vAOk$O4l"Ns_8B^ZF#j Al>C83%Ŭr"8J,xL)ĸ;1rVXV!YȀ v Ų:!dH ي<`,zsJ#bbNϷv!A-@] Pq@7銋{]ChM_bYb] d((Z49AʗdHҀ#cÐ,Na'>uo]Mv5ձkhVh?G'GڻtruG']tru21룓N9:룓"\DN= G']truG']truG']tru%{2nSZ-ky|F&r5ǡ25ݮTЉ%=BFݣ=ځm25yCe-v5uƷ1FH]$s]Mjrf5Tha1\^@Behrn5Gv5ǡ25,ߛ!R"BS,khU[n?ڂm25Epu4$r!j7[57fmb.9 1.REz5d;fK=8es<@f3OmTXb7CU rz r^is&sOɖfF˨w#r75{ߖS.SXT1[ӧct7PPPP6_\`\4aLf?W4;Vu#LOZKEO0H) HHN=E<7pf@WK-I;#ߩmJOf@W>ؑ7ŞX'8-}tW`RT&{k twՊ% '䱦p:|>>J:8q1I ;0b.m&*2zC 8L@ C|%t^ Az/]렽t^ Az/]렽t^ Az/Yc@ճhCso}V}U 2|sϴJOa[9T7>g>*>9?YÙe#]#]#]#]#]#]#]#]#]#]#]#]#]#]#]#]#]#]!]!]!]!]!]!]!]!]!E͚@adw w 4k_pWpWpWpWpWpWpWpWpWpWpWpWpWpWpWpWpWpWpWpWpWpWpWpWpWpWpWpWpWpWpWpWpWpWpTzU(sb|<]puG<]O' B"w1 6H8*SU:C1vSz뇋QE F!OzҸH™u.e1mAcVAޮ2J|XSu\<]puQqf<]jd{8aNxE~*B0HPM˶*{xP0DX뇋.x뇋.Z8:'q?6yWvӖU䥥Vq J|{*FxŲp!V<' hAw2/@23v$xoiR:y/$e!›v̶N֑'צ-pY{)?R|D; ̪>BcY'(ܙ*df\`8ސv}(E+GcȖcś6 ldbMu?U\OͽgCi{S|6'{+Bxo@uD0-B@{*UP^OOYnivdB;[^ymkZo$[w^2Fq>xCx.ֺ|&lZ[ZuoWȇɒ;}̹޲I 7eYݼB4yW4Z#Z'ɋ)! |xSͲSw^nyU}W"Hke r*)'ےܦ\rU5=i85^}YςaVo˲Nȸ Oste 뾬-Yr~NVi)q5֫AnGy㻍 HXe FBSn֌~1SH'&%xxJ6@[i&#LBw wfz=eE#gզV괍wzoԗyNFB槥DɒHpXݼ#YGzBƑmljN/uwb(Yu܎W.Cw +kChȖT"]'AUJ^ k_UX3rNYgƶNsogV4侙X4[ y(qYN )Xݓ <0z%!gÂ[aD\,wU(xY:l@ͺUc!b\iߵz҄]/Kv BDNQo &Su  pa7;Z$kA*Ќ FÆ5T@cV٘ @oHD $ة [SP2U)gvpM!vK8B #$ u bn7 CN&1V$XGqq\&EWF2#`$fتO|2^wiŪe=SPa ڵݒ6yҔ9Zej:VE;+\s;+\s;+\sy_*<8԰;+Eȸs/vVZekvVZekvVZek%dQ(ej^VDհ%|vVZekvVZekvVZejvH#H%^@>WzzzI ]^V3'?$gICAoD`<驱Ǘ k;4-anf3h&b]OK IOߟQm>,Brnu;8l㵳;[8l㵳;[8l㵳;[8l㵳;[8l㵳;[8l㵳;[8l㵳;[8l㵳;[8l㵳;[8l㵳;[8l㵳;[8l㵳;[8l㵳;[8l㵳;[8l㵳;[8l㵳;[8l㵳VeQKwݮ2x+2KRk+eʈ%X|еPrH%>;jd.@#ф^ 1$T42DiTRW+ V)Z *EipRJ+\pRJ+\pRJ+\AX[<HȰl+\pRJ+\pRJ+\pRJ+NDȦY_y&qXZx)Zk V)Zk V)Zk Vb[iײD'ȈťkkiňtYUTRW'"Tz\3dvĽ1ᔾzq 2iTČY!#v[i!&teYciS4OJ^~%VN)J|N4-|qreȻrpf]ױH~RC۾se4fXx&F^񲵕˼;˙Xb2ҁzYo՟+N'ɛ+Qu PsQ͓;$B0Ư8$ MRw_ũ~+P " qI)A(mKo‰C6@}b1:xY-6sN2G߁?yfӇaޱ/uxdsof鏐b*lO`hd[RY[||Ļ:Eؑa"mҘFT%a]'H[a-׷H~Y#W]_oCH^0:!>Z0ZqG |k Sj镧^-'w&/pHĽFy@ǝ `lHVkԿ|vŔEΚB<ۭu9ME*:_y d SO3IR-dHi 󕴷_~+QRgg_us،KsFH͹::mB4"ȦZREJ0x>mr i/$Ley0;.GQ !"1QAa#2Sq 0@BRT3Pb$`4Cr%c&Up ?MCa) ()_7bh#MAPC4V^GdiV5^i<4c4yJF l4qO $ݼ'fnMV؝hNNlOupM=p-$P^nNXXnW֛I45h. PH8Z'V&s4BuNnnE؝XnWbj{#Ii4QNAĶ)M iګMi7\.% &ZP剨&VjMwƐp5ZXFuĬ$9FujtjƓwXti7PrNi'[J! 4+6 F] $ oW>!Kr][ՏH %[- (޴| BUFqSD5TR>!DTw$.$hh[;Oj| BЪE|"CD[폄HnnHJۭWV  Bޱy+TSPV KyG$-NUXDKVMSCYUoZ>!  -*rG$6ZMAo[4iO)u4I%S> dQMWVJ:rHQKi7+iwTR(QK,!=ЄBOt!=ЄB *fdA'⺕2EYda?X#i'dLP d8L+ZMOli0]AI-.Msp=:G04TW4ڭw5$D'm8I b% ,*B0d»5iX-ZG=fgaԶmWD<%,fxK3"YgC--*-R\/f%7,g $a@Ԕ ,.Р Y]Yz*0eE-REiJij<%,fxK3"YgDIRZU@)mZfӋiP[n@GġS*HKI=ǯ[4kUenG?˰'HM蔑 0& zMlI^2ַփa&*[:M*)Ef=2搕uХJa ZC3F -zq-:E5WT:}#(6j]8`oFҭSWޥ=,tI}Bj'e璗Aءj Rb`e&'> UFO*?["{aY5P%G"~ YC8!E5~Xw ʛ2f JBPrM=:Fݻn LZ7bRyc"Z8 ҥ)j`۷T\N3W=[\'.1Q3]DЂ>[,^-aY7yL\IqeQ(>rhƴõBQE!xD7m=Jja|ٙhKV~._ j)ن=&URBOfRtR19},p.[h+34eK(q&ep?+H~oG}rR/):tNLMB׊P)Wl7#JUBMU12qEY3,'~ RN3ĄC„aľPUQz6 YQUSh32(e?hɵjoڅm H3ʍ *4< }s^I6&VϢ\l*&нmmJF%qqB(U!,r%_L%R5鲰qJmۺ$e\J(h m(d[0E5cr=D‹%G#Xm.ZmZj OITY}u4I)Z.T]Knu aMi$m ?_+B3r&d>kpǫw 72Z4[,e4ʞMs׬n>yM]hL+ZWp ЕβJlS12>p*lDSض70J'fB* Iu#*cB[pӛe3뉇]ry+5qMBngՊ5] ${!wpdqm T%WD_.wJ@JK%eTU:)P])E -L,4l vuv!4]s 8+t)H9*)h󮠀!RvTBܽ30 DU,Sm)Ig6D0[$fve]2j9GĤD|*;?E6I7K}/U4Sב"+ -}iwUZҰѽ++ V%Gjv ʊ\QJ jvCu]R9B\Jh@lւqbDĚ\Xo]Q3cra0e6C5wܢsWN MVMi qO].9B|ɯѭzCe*J5f>F\H]*q3Li)lm8\7%::Q 0eLiIŶ^SªS\ęlm E3=PbOӭqD3 "YXqnۄJBME3B3//$G ]4>ou.b,}P+6,xkM5?&Mrg\Eם~u(qN[BcLæ4Q] Ƙ?LitƘ?LitƘ?Litƒ+S6e`1`1`1TA8 ʱ`1[k`000000,T0 Hh@~Ib*)dLѢMq{fm8 U+퉉RFxDn}36k>M=37E36>j}36EbbfSotL+EؘIl ƙDSotLͫ%:OdL͐G&fګv9&fnW n]3N+%8Hfm]!6>M=37G[36$LLͧ6E}13p5J.W"q36z@K3t*sw|Ĺ/\3IjEi7][1M34d4eQ쉙%W]nqJO&&n2I&鷺&fՎu&ș! K6I9|Lͧ6E}12 H{bhTmϲy8W]~w 5Jy~m,u֟ysO?WZIoڀS"z/ZIo&²jÈef,ӊ(ӼDfK-KWˋ(Iw/(6O|t&)JVUiERp2VĊu팭J1.R:B}5jQT [ss6%a"q2s.%7-@$ԘTBV] /nQO?_;IJ8TiWfⵡM#UJ0+rߪ)HFj ˺4ij϶>[E}O0z&mLԋ|m)ƥi'U-Km{6T]h_ZiSO m8зUykN͑8M%rRf%%̳/d0]L¨#89A0k%8٪T* K$58n})M/8'҆Tӫ{ ɿs^CQdV ’͵bJq-$-4:PĞGFE7G۳/Џ!ekQvn`&J.x;MɦPBiO&@|QGt!M>EĂERki.1KiQMɲo FvuzA.8+jw\umjŪ)3v{대*HwŸ(ѫn0isy!Զ\Q[FoO*ͬ H9;&)nUiIPI&'ܗ:Bm]rIMB4h$(Yہ=tM9.- !DWҏf/\Hˈ$%bA ZēKڞ%ť&vI9n"Isj K705M![ne=Uܒ=RϡҀJ|K. HBi_J?. R*"zo|xD3'=7gOM<"zo|xD3'=7gOM<"zo|xD3'=7٢QUXoiG[F/QnoiG[F/Q@c 0# 0# 0# 0# 0# 0# 0# 0# \Yx ,Ag <Yݏh-e0Y˪D,Ag <Yx ,Ag <Yx ,Ag <Yx ,Ag <Yx ,Ag <>Oͽiz|)=+B>7G&\e7\[rwJ?~]G/y&YKMS@]aO9⬣1֝-ZM.ABiu,9B0WALj ϪVm [eKB?8QN:hLuyRʢKhƓB9oQ;C@'D4,aƍRAIJXe{٬xIʐUm[CR|4:9І!Ԍו+Q&-WϏp4ko/[MT 7aQ~}ySoLDaUjwPDu0ӸPJuD o+DG-!ТUWfUGN¡?G=R}6=8Aÿs4aO4%YhEGY(q o([m$B)*"rPEYuŋJR!V90--M v9VնJE5PDj!@ a-*R=cH6 礼1q/G+2RMݔ#SP0kmݑ/$ 00)]y,͎ՄZ &&7Q8y/k%!9[&wGNԥ+vge9,鰫U4(LfTT릔0Tvի oV;1 fJuD+CUudL<ӭ+^PYٞO ODRF0߆:>.}4m4UqnWYݷJ:HQ70ܾX-ql<+z jU_'fPmI^P _&aR~mTӬ퉗멗KKnUJQe)!*sR~Q2ZFUDֵM8K*RT=F&5 5pʞY)*өUuղ&q<ڄ^p!M<:҅KI?&^+t @ϯXkH˦J^ ْtL>ܶLj[I쨉)<_%֠50ԪeKnk8})gs&m} -KB R.$ǬG=R}")2omT"q7:ryꬅgZ fÐY, TmڪbsE`OҭEN4vd8^r |bi9P=iG%؝EUaD{cr5V_H}JEXHu;M< RLrx'*laAN6!(l*NðBW\CS;^6zBr)/,6u7B\-\$)Rv:ٗx*80p ;]E˸(ufcCVKKksy>B>~i-ksTA}LڷBm3*ťzjS+ULAyu]}\ۏ\=4 U Je|^, 1C Rw y$-mJ@~kZ&JM8RZPO.ˢNQmڏEUmgٗ] r$* 'rk0h]x/*gB-ag !.L-O:ޡ^?ztyz9B\{ Y#JH.=q~i yT =4dR[mGwPnX|zEy\uv|6(OHDJ辅P5}3Y %Il;6)bHj*JSFaACG(C)ejclL#"aG(D9B&0PrL#"aG(D9BJ[oe[0PW&IGq0PrL#"aG(D9B&0PrL#"a}8 1}%G(D9B&0PrL#"aG(D9B&0PGON'!!>|bo?M ܒ!2jhU־NC3Xrj?|J#oQyd uO\6i/yVDĶ-6C(fby6)E֣vnSM!j*BEr#1 L-2-.}Ft0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e1j)B5:D~q(%Ģ?8G AL%&&U5G7&yBSӬϤ`Zp2X2>Da*Q4Vy |M[D"g 3#v|K4Өue6`rd-2.OHpܼhY.UY қǸ|2M!o!_$@_04C D04C D04C D04C D04C D04C D04CH)ie| t؟9 7| 7| 7| 7| 7| 7| 71^IPBo!K^plkahahahahahahev"-4+*A\ܕpO HB  mjl;M2ꔶo7m'JtPlR-W0J,ҘbX~ jmRiǜXj3u29?9R+;EQSj %\}Xع)&ҔVbn_Le vTdaM1/>]hL<ֆqƃh;fKo 6" ۚR}x\TVE@t\(ӉRV)&VwgkK2UvnH˥ Q4Ɛ z1=>JYPjo^ʰ>Gyԟ`m4e% JӍ4RT z^qA5˵exHfݔ;2{rچ¤cnw=ȬEi HaxV.MîlM6xw ޛhw"`%Ȧޱ |))P(niq7+,~},o2(';Ms=F ]褸3OQZjP @W[ey"-QklS!b ̧o_\LJBmG)$\Z xO+H!6&LM0UĐ%UQ*UDBMg~}e UZd4u4Keof[#֣$ӓ F!CTFȉyV4ڑܻ!.4QbЍQvLK0(a-y-n(CrrXFջo}RҎX^ ?ђ9ZSmvZCgL͔]VۡR"Vt}t"]cݝ{jvSa4^)H+vXG>CFMKLI&pԔmk <E:w[J{aѥ8\[Z{1U3Az]K&FW})LXb`*EjCYUZey< + ;51VmIXq몖CC]0Ei'o{J3qѮuܛt>#n"Ԋ%Y(mI!W #.mJBc()ku\fQ/1)Қ.]Hv݆yhڼDP!U݆yH8(-vCo4ܪԅ%Hר4!m*Rl(ר4l4[Y#ʮR!B/h6=⬶Pp)hMs˪%sI%EUq.%:sGGIG؝ RkN͝!(fٯ%&&]N0㥖nؑyNXP%^R%:dɥU)sO Z7TtZ AU'x(vObc凅ZI%;_Pw"Ni +ZպDk yRjJE_'TӑT ]a›Nۂ:¯LMț+J~IL2ĤPkStH%zVn"1 ˶$m󊶹ߔH~ґE)&GoYҫAeD&R ;Ewl0wYN葙["QR # $něMuI˜$rmHUEBbGٜvn^ICbҠWotb#ܶ>$c`ba}{Ji .kVJ1%4S4-mPU7s,'OK%q&*$^NaxwĔIKbG~F$fŘ[鹯I9lIu Y*VT).j Y"%6%\Y 5\E;]ge*q46=]JZA mE/vh@L.Ipg"v_"v_"v_"v_"v_"v_"v_"v_"v_"v_"v_"v_"v_"eʦVuQ0'ẑ2KI&ՍQCL؆ 6Co(PNpDjTi`tNDNDNDNDNDNDNDNDNDNDNDNDM2\>lSnCx1.81.81.81.81.81.81.81.81.8LK'1,4IJxLK'1,4IJxLK'1,4IJxLK'1,4IJxLK'1,4IJxLK'1,4IJxLK'1,4IJxLK'1,4IJxLK'1,4IJxLK'1,4IJxLK'1,4IJxLK'1,4IJxL 8sO,!1AQaq 0@P`p?!S321xv 4h3TPdÁ"-GϏ TesPȶ<_6ND$I7d4DEXAHҕzʓZW'&JokV1KCDE49mEf"9Me14EH)@ D^+*:9`>JMD&SPI1xv9Z:")2)"-YʓZI"g$X8Ҋ5KQ.f(ٶiMpe5sO2$F}R#:҄ XF)S!s3|߽+pNmF첃s<9GBZYGY4D9zɋݪ!s Ha4bR’x$A;Ҡ(VhNnmFP]`ъrhSҨf_Z3l՚x4$ #1zQ,;bʯSx"ot 9j"4D1zQ=PR ɏu!֤0@=#_W5#_5)•1n6WDq7ֺX`ui8!# (aX7`~˨`Xm$Z%yXd}ht<(@2ybik L|jи-4ƻRl`Y]x^Xvؐ+v5ڸj]v5ڸj]"qu (=$ЬAJ*|1o\kE 2ZuHD~c:F t5ڸj]v5ڸj]v0cʛ_S L|7dDZW䳊HGK E2[SQu3}oZ1ISWqHvz 9˺4H,^^`%M"h"]Pm C@%fVLȳz[loh-al[JM@4٬1ì^R /EAbQSX0@ ٝ'=__$6derв'Q%@]z'ĀffPS|q}gl|>"$u6L2[j@z[my5]_u]-"f)yd=KV4lY)]錡gNSI1YF#JrK'IiҊ+2`XxDoE@N5(z) S74z1A&jS̛I~6EHs a) LbSn7B"uwy)PYAddnXK8F0q-ɢWEB$16DndJ4 53nRo,TF=DOO"]Jw&ھ;đXtsR˶0"QoY-j XT-0^"g|$WSBU))lFr^􄸎#i֕w1ܐ?),b?6׌ܾrW 2 F-=0tҴRq _5.1E"B>=&E8#M5h@/zf7DCBZO*Vuag|L &K7wE оA.2 C91TY-RBk o7\2f;H]r@0 hP0bEa,Kh"wX bGց,.- JmntwIfj=,cjpʮ#jst! 0Ger V@D !fkP sVsCҴ/<e]GHLif 1cք Ei֡Nr;~^k(aI Zc֧FrͻJƑa ߲(@B҂Vnҥ<Dtt-6Vn֖dx6jG*iC ڍiN"efZM1ЦW*ҍeb:zЭf-T,s!H7!G/GڃjY@' Vc&vG=hт?r~_k'OL 16P&)_'OSˬ&5hf!)Z-[:.$x/yx֘1JVE C+,7elMhnE]FASNBX/B-HM'-G|YlizsTSG?/}]ĹB^NUγH բ&:Za96/Δn@5J Z.l:qZqH_CW)ͅԸUeRmPC 7IpFlA_d t)3JHFwfтY17b8 2"3jBFԩ mFL2p)D-Z_oQ6澄SI6 PwnU cZ-3@`)3N(#WI G 1 %qdO?/x)Q_2݁b*oOKa&6iR# "NP%6FbKKAߥLE3Vz1#!ָ }? 2*@c!%xeq}ՙPMq_f4rB[QGɉ4 9CPԩH5"1z܅!Z$"4 ~dQ^oK*P cJ,b4a |&uI%@=-iZub0~w$XQ~u0zʑGLeЫ+8bZ}_I9PʠeA=r`Jp~~jMߝ|Jyd?zS=O?zS=O?zS0%L/_~^~/Ǐ-z_[?~<_wpZr/:r/:r/:r/:r/:r/:r/:r/:r\ǽs{1\ǽs{1\ǽEXѱ -s{։cZ=c޹z=c޹z=c޹z=c޹z=c޹z=c޹z=c޹z=c޹z=cޠ(?MF"D1ĥ xDՌ^MIv,C}Jg55 'OݪjR9?b%BLCK <"LH}|"nlnWȡ$Q@dJ}+ #1 ZGEsd<9RIYfnϥ=w)p"4W_1@fq>RI|w@N!cxMLOZ$&쟸f! j0 55q *ܹ]sz{K:hxF?i14:g`jo[x3@T*RIE3 T;-xTTzW2ehЃdZym@g$8&$ZCw%Qg.ػ)hА#oO(0Vj\ >ԒJdo֝,RY3ddEL= $/(ִT4V*ü>'m_KeB ެ1S~s\MR(y3$ZNruuOHfFi ?$3$ѭYjݖX` 6 s"t,ao]5K ,,iX&0PЯkCE&Jg1Oxy9E]#Ƶψ LSCd2,hT"t)0 8P̶-eQ̈́쓭91NxY"ŀƗhq"㼴b ÙI !cCIjQ]ĊmXH,z$-~(HaeJM@2f*2 LPIpb:[LEE&.ʭ$X9XTHV&IDR;aFn4(]bRiҤ]||bɭff;YO9I҄, {kaN,g, bQ;nE@J J,Ź噆_of YwR@ y[Cr:)XiI `Hh%Ћ(IҲҸ-҃=Mv[W煎 ڰMY G8Jbcf_KV~ǀ!^ܬ]ha$J1Kh= 5O&^&Rѝ3%߲-B#!Ke;"w$pj!uFoaI|'&Kҝ(!KxyW &VK!cΙ4!%V |-=W;}+4 2(8.A$F,b؀NVCm:QF҄U@t~ϒ0\ )RFL߽`j!ZP [ J"ehxnDV)0˦)q.fN 7On$gYRђ&AmS̱~+V "ؽʼn ]cc!j}u?D*7@Y*$‰׍khRT玔%DJ!9Ms'S(/2M\#ҁgTC[II )K֒"Un]>h-(!^W ż}I` "ZLE|AȖRx Ƅ8_Tte3d^-$8Ҁg OoՒ&E n0̓OJZz#))қ m$z|(y[3Ne @xej(F(7ȕ90zHYdLl /SW=EHFN7Wa2\mN#i8#@@tD@IyL< nk0L(/>ON`;#IE1mjf!ڧ4jKޖ#]糥8PطHEb0hPձR ڇ_9+M9a ΗjWeP&%| 0bH3`zx,ê jY:;g]߼஀mKdQL[2OX!,&#O<-PfK N!I+oS7p.H)C8~ Q.8$/^.j:#M).4&YzAC1HQlZ&[(K\aZ2V>$3\ϐ<_)](5.^>nX#z^M%exJMuN,%^#i@R/E{Bi$F6ҍ(ϒJ<3A\jjֳA_G+%>g9X.o8EҽO@ $}/LA\p, +GU`4/\RdN.DVrF "LR1ԴhL,ȸ`LX$sCզr38(lK|V"ǀzؕZ%>ȸ u ($uW-A+kUbY7~f +8ɗI) 9 6su.L.Q}i8`cGv}w@{!FcF`&2^G6zֈH$޸-fS:5$ĵy$=k~Q2If2ްeUQ`S9Kݍ+B&ee &|nSmᓍ#, _E %`(`JI,,cj} V#޸z=#޸z=#޸z=hQQ TSlt%9\Gq{\Gq{\Gq{\G< X4q,~꺪CۄMu#޸z=#޸z=#޸z=B53R'AҸz]w%޸z]w%޸z]w%޸z]w%޸zT9k\sq;\sq;\sq;\sq;\sq;\sq;\sq;\sq;\sq;\sq;\sq;\sq;\s3I-!1AQa0q @`Pp?[%\h6%M۸tBtX,Cʦ@ A0*Ժ S(@Cwx=r" k%f VÅ($ipkDKs:JpYHq ^$ĝXl;:B"!!DKZWX)OIyXQ]No1\bMMa+hE4ZxLBIJABhE}Uw> W|Rf4#dXŸh - "L| c>\kۼ0Ikb^fqRkwLCL$kS{Q:_Fe Nm:{l] 8L!k dhW7DjurĚPSTI #MEB U)55 'UC6Z*+pit0$ii/h&4Tlĺ+BBswNz `6O6C"E^%$t@#?K.SɢA ja R8Jxeg^ uf 0l 55Fl  ! cW*p"If@N7XmFĆS6XÂEBd7w( (73Xa/^٨9 g YYY^„J): ̹`JhӣFҼh>B׏'{ *;P<`D{Ęr9)19("g&xH";0CPO)JkTXȸh7S⪔GaBgP׷g7_%lNsJ?V[ Ǐ#cF4hщ1|{t#M/GH1ok219SS)lc?:{ΏB oF4hѣF&XD2C>TADBHżUз\ͱBZ)g<>,㉚ވ٧ q/77elKp-1f҅_4R;5#~Xj{`Ao?r!eXqתԯ+HE6PogD( c5yuI޷z v Qvci4Z!\F)#)^$% f#5ߡ#N5ya4nvB0 )x=3 bCyȀ:%BPVBD+I{TFţil!0p;NN|uni:%PZ˵C#JbWfyHlC<]  |tlG{8F^G@RPGRA .I-H?`k]kF~Sћ<x|,b"S"k| XWs_zљF Wԉu.\Z^",9݌d ~1 M`Ϥ 1@( Ib0܅(P WLzĴC!=l`cRѬ 84lXDymy A21jv76`8; hȒYI8AD, S8l}QYYQ(Sa)!2MV _y_=րE# [D!CHONJz_4%=vmOy?*?*fLK+!w3VFU]\ 97UgEgYYD&].rMGQ A4 * E{/pYFMJ u-D*vOe;ZT$2"K˛bnA[N3iX8+Dw}U @g22ty(-f)LjcU3αQvdj59DYj%:JrM3I˴Gêz4VZi\ W! ׀3j4w`2M F%! )7SeùP <9^ƗXzP8K o\CjqKƕLt9#ϥԛp/>GJͻu:t.= 2rq l9Kt9; 0a3G[Ƀ<*$ICH|ᖏv&( Pn\?`Pݝ $)8&1dx9I֯PiM'it%ۭ`Ilrkv+@4yy'p; "rvy*^8Jqss.L"_f(7 ְ#@GJϺ.lJC.!9xk!\9vh&1V**'Hw_8ą0ah/E ,퀟YL( y1VۀB ?& 2`ڻ@H9azQ̙M;ƄGJ`)8ٽ *RLD`t bȐ#stLf\ TYd\1|t1! eEo,\(<˼]vKLNπʐB8W^1:s4h#HkZa=*@71ֱF.+.qT)HMPDg.=}hLF wU(NUo ^'½[{PLP_9c;J  .*jD'D [˃(CKGO|)Jjlȅ-Q9RRQĭJA٩qRӁ]oMeGs gXACD GXXJ&j=mĈQH YZ`i"Œ @40@%\6A)؏g#Tq!W<"c,RwN^eLㅉcjnF+FKJF8h%~l*+,Xbő0'G'^GiUעSܠrVސmdNiӱ PYf<3@" ۘ0PIIRZ9].1/pƐ@JbhV{>{>{>{3?<3?<2 ɗ:~y4%S3~yg~yg~yg~yg~yg_<<<#$z] Oޓq,دP+ ] eDX'48|M 4$KFґBծu|"*^-"j%5r" 0yLJ;ל>&#J7~Ѕe9H0h 턳 }hh@`68Cs!8O՘ Oz=s6>?g2,Dqp(xXФ'thAѨqΥ e(B.y]}mU9퀆L2:5iXh4'ح)&Vb`iC'8lTbU7 GpFƱQ1XvqܠrMl-:ʔ9hZ\ bSS8s7@.l}dr 7fֳn]}]MS6|ֆu g5E0|g(c 8fƫ5O&)EVc8k?ێ] tK59c̯GsE@YoΥGR`Ijq i)C'kB0Hw@.4, ][k뮺뮺뮺&c\dm򾚂Pmu]u]u]6Gc]u]u]u\D/|ۜOyy9Tg5pTlj^Π2Z#{ĔdpDei0t1 ) 94@"ȨAlgR!a(Pojj@ [qqq_Ž 'w"T)G888*4۱^SAYSfüQ3~(cuяREQTLؚAhhND@s±ư<[+1&V\ k6%gтiV~lB\[E'E_Hcѹ` P{D*%= T۰" 0^#!@5w"]7|:r0O1ivJ<}A3_`ɛ<>z !AW`$| 89^gWS$J k.kSqP2OTIv /W&A3~J Ph"4`GpόOtw_5@jLႍ4IzD/J4J Lc|S'EPN eed+E鍧 a")TxH& m Qfrc]A$ !#x(_,LE,%i3hES9{G܈-J*؝zj*3sUP'rѮG`6Q0LDN8"_$*@DH f =sC]fH~ڧo=a]O6sϾZ%.~ρTͤ^ nE>*V*Rr8P{uq݂'όmAhBeOx(9RQI۔Y1fGg^!n27^@oE7aȜ'^=@Qnd˝ƤL3&(Bepi A{jToF  ,Th$uAf]MT~1( {nTt>'uղD \`"44huWP Q+@Lw܅ίv'{8T\7P(?|bEJٿdxiHDžۨi2KȅBsv"?ݿ7*⏂Za ?Mv0#^v=l?A'3U[cP~Dx?8Lp`q&kK,|\Unbe68Ysa,wb E |0cn-Hq%qbلm zQn,Q;mHc6'w6ߗ'ܙ٣=zl1H)=,qrx[hSp֜oWzEiVZEiWGf܆;l;C'=V~U*ߕoʷ[V~U*ߕC,Rߕoʯ &V~U*ߕoʷ[V~U*4`=٣{?#OO/c/.#GW+| -=/YeraIsV1Fͅt]9Neәt]9Neәt]9Neәt]9Neәt]9Neәt]9d~v22={"g[ h{? ݌RR`h4JCW@+O%4j~v22={k[$O&zϴaIb"qzA&k6gP\POigY}igY}igY}iIrVlJ`DigY}igY}igY}FK֋/ߪ?i՟ba?.zz+~o:^?_7_{h1'd&d;;dn߹&ܛKI\f#H\W/ t~?o Ծ|c/Ksg[B/M#ȴ7.H;DՃk.OQoTFUDzި-zި-zި-z'f=2zw-zި-zި-zި&*ȷ :L"Rv}zި-zި-zި(W2M!jA+7v&&ߢg(Sf~)y;?_Z|__4 r1ؙ8-˛Ey1IDΣwrL!BEQ)Rxwt /Xf6~|'?lpvgOOûSWo,Y,ygXY,u:ŝbαgXY,u`YH&,u:ŝbαgXY,u:ŝb΂؄+uV8;ZggXY,u:ŝbαgXY, r/ lϓɉdIJbY1,LK&%ɉdIJbY1,LK&%kQ٬Yfe5k,YYfe5k,YYfe5k,YYfe5k,YYfe5k,YYfe5k,Y&#wo: !QRa"01@AP `qr2bBp ?e{]jՖ4:EtXߍ74ydYf4YVYQkwjUI[MVYVYS S :wjՖjՖiS[T¬T¬Oe5eOeT,0,O[OeWG'xt9=tOZOWHuǤrzS^r{:G'uŃ=#9=)9=tOv+O]rz%9=tOZOWr{:G'u'uǤX:9=)9=tOv"6ʝ!B{Ԛē̓/$$$$$$$$$$$$$$Xyyi~<<<<<<<<<<<<>:;}1}F+߽hǛEuӷݝkbUk[OZ" OB*TU{ǰ\ hmp-\ hmp-\ hmp-\ hmp-\ hmp-\ -vX6y_뱏?w[;/m_:VR]j24I2_%T9c͢]yQϻhzWzя6_]?w[{~}RE{=ygY.KýYեNXeکL2S,}L2:tWS,}L2S,}L,}L2̱2S,}L2S,}L2S,}L>*c~1Qyyݨr#+#ڈ+Dc~1Dcv1ȌoF9WڈDc~1Q~dcݨr#+#ڈ"1QF9_ݨnF9+ĮeF7j#Dc~1"1QF7#WF7j#Dc~1Ȍr21Qݨr#WWk϶>O9to|EezOO}>O=?W^g>'_y+/3{ګ#?r;#?r;#?r;#?r;#?r;#?qRR%O{З=B^ yT洛o,aq,aq,aq,aq,aq,aq,aq,aq,aq,aq,aq,aFЗl~ ~VЗl~ ~VЗl~ ~VЗDHLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLO{ -hɻhFZ2іdMe-huh &4-7UOrіe-hFZ2іe-7VȴdDhFZ2іe-hFZ2ђmQyN_?Vtϫۛc/pw;O]ڽ^v2w n}{s`e?o)UMP̃̇bi /H<\o !r׉ ֲ<<<<<<<<<<< }7M{$7M{$7M{$;݄7WQ7M{$7M{$7M{$;ޯ)s]_봗ϛb~Ͻ1;;*Tw|X?n|W6]vb_[/jub|ʗ N1p~?whm%_>+EI|F/_ d]k dLɐy2&A< dLɐy2&E d]~ dLɐy2&A< dLɐy2.x~LW _ dLɐy2&A< dLɐy2-/Wwc1c1c$M&{ɽoy7M&{ɽoy7M&{ɽoy7M&{ɽoy7M&{ɽoy7M&{ɽoy7M240@123P 5`"!#$ACp[̉4218VvvN/̈]tvBIe \YݺmI܆Yߨ+ K31C#VhdC׫A/kx%fEϹ` {z'WdP:<2nw!F'w%av}{:"Wf(ewggF^2u ވff)A˫4!V;]Ȥ叿 @fvv;z'Yv=2;ڊ@qfb٥s z vE ;31J} 7^ v';Y` 9w$hCs]λw:u\/V0--J>1-Kf%ىlĶb[1-Kf% Ff%qwىlĶb[1-Kf%ىlĶb[1+FËUŬ}hc|I=-{yz:w5`jXֹz3uZһJ ¬ DHQ[ŅalX[Ņj)hY,- bذ,-FŅIذ,- bذ,- bذ,- b:=QqaETKzކw!"ɪgjzŽDs awCTD]{;QUdUD5vCTGj➨v$ȫ rjކTE=QqaETKzw"]jjڞ$;j⸣ڊ"$LI IކTE=QETIV>f0,욨v.,(tOX^N5I Qv\QET\XQUvu5D] QaqOT{;QUfdU5ai;ꚨ.,(tOX\>õS.~޴4P}ͭ5mikM[ZjV֚յ5mikM[ZjV֚յ5mikM[ZjV֚յ5mikM[ZjV֚յ5mikM[ZjV֚յ5mikM[ZjV֚յ5mikM[ZjV֚յ5mikM[ZjV֚յ5mikMQv7g|B_YX(4Ȉ[ebUVV*YX+ebUVTT&e8tbYX+ebUVV*YX+ebUtl e`ȇLMձVV*YX+ebUVV*YX*]>x̨x({zo/Kt^o5wQMPCj>Q =ay՝Ľy]呁x*nqHD ._Ki-{?}G5CG@N.SwC#sd@WspRAfrH0dQ2xdooZ_{MEo?0=-c>)(|mG? $9q_Ki-{?}G5CGra\TJɅra\W&Ʌra\W&Ʌra\W&ɅiF1&jxɅra\W&Ʌra\W&Ʌra\W&Ʌr&jxJf-a\W&Ʌra\W&Ʌra\W&Ʌ_"L4j?GhS{Wc{Oc}Oi}]==v>>`~?du:cN'X~?d1=G#{WKoo}]/mt-"Q,gׂ}x,gׂ}x,gׂ}x,gׂ}x,gׂ}x,gׂ}x,gׂ}x,gׂ}x,gׂ}x,gׂ}x,gׂ}x,g {m{m{m{m@ (QBP(E (QBP(E (QBP(E (QB*P$T(E (QBP(E (QBP(E (QBP(E (QBP(E -dZ`MqL 0&@0L 0& !XLE0&fD |`L 0&`L 0&=Sh0&`L 0&l Cˣfy>}s7נfCˣfy5OaZlDG2`@{֦wSg)ע3qv>hS4TQ/w1˴}9|.{& ytl'Qj3*0R0$GV H rJ PPuAB}p>>Lϗ=}zad<6gf":=j%i>}iPZ*d.#VY@GD' h63\Ȭ;5F`yǗ=y6=}92+!Meѿ%**&G^TTTTTTTTTTTTT>#֞*?PSSSSSSSSSSSSSS zT Y. X?docker-1.10.3/docs/userguide/storagedriver/images/driver-pros-cons.png000066400000000000000000003164421267010174400260520ustar00rootroot00000000000000PNG  IHDR}csRGBgAMA a pHYs(JtEXtSoftwareGreenshot^UIDATx^ Ź_o< aQP}DA׿DqKW1h@sנE+BDPQUAϷ95}ggzOWoյU]oTEPB!B!B!cքB!B!B!чB!B!B!чB!B!B!чB!B!B!чB!B!B!чB!B!B!чB!B!B!чB!B!B!чB!B!B!(`„`փjjnA^2IΨWT:K:_˺jԠ6@ $?`@ !$ȇ>y|t}\wzڳ%dzTT.]T~TUA찦 ljgj[f8%vRjYVnNul]@յqhq@DLYO_Wz[5TWwi_22e€!/ !}rՁjӔ)jrJO[QW_BPAp[jϩͻ1{ !?Wv=O }HT/LY:龥IF `@ !$Tkbi!@(-+S}5ÉjAr$$0~0{ }/}lb_]ѩJ !B`_@!C}2 >]t""0qf+5`yj=f0y*,h!5Ǡ9RdOm!B`_@!C}2\}4>h_|p:ppS8S˙n'-rWRlAHBHjiUr":{ׯW<µoͻ1[VYtU8S<%)<&ث" e!/ !>y&D)MGaϙ!$Sk zO!'P%%%^3G3f}M֭Ւ%KqILWeժUxNF(疕}8Vz_k6Roo@ xa@ !$'?|)BmܨJΚ/8ʇ,sVf0HP&1lݺUܹSh"RO?5x^z%7wHI'FyF᫯p lٲ)e !^fr5\&Lp4h9[i#% =Jo˹jJ9"s kXZkXbBi^Ow,A(PAYƈBqݨ{? |JM˚B}Aa΍>Tp./ 8/`Zy3=5PԄwpwcՖL(3^& n9sI1o~u/x׹S5=ޜE f [Pvj* R~WM(jXfΜiB^ƎpZImnRL[F,ܭ^ٸ_>X @ ?|L l}Lޭ{֮kee.NٜY~.-ț+ڭlR }g#|k?Ŗ|pf? 0' ]ƗLsC)DlH<2ͅ^zGYd"o )ռy^xwۢZD0 ΁^ 6 _ڍvjEww=6Nó `֮~E'ڳDŽ=k֨9MwJJ7{ tp].mkTӨS'ߦ^Dy{w]6hN1sՁ&Pv&m n?}ͻ+nΕwo}o6{ӡET 0*,w)i_~eRjСz? /`BvaBB!R{XtQ&H !BYmu8lܬlB@`\=: C\z x!rgRwu/ 6PKNWOyIz_XJa9 `.'Ϟvt~&'(-~!YkL*~sPY4]DZϮ yL==]0%B5ip-#"rNX˹XPU]оi^ҾEVlPG,ҎoQ 'rE_~F9J:ԋƾd}{CrNtM"$WcǖB}'Q\u<ԙ&mW@ rTo);/Q_Pr~2)VP7v9xx>ʩo9jِkx@eLaΖjhrӄ2@Yfr]FԓKҁHAڤʳ+G,ǖUG[uVWWD^kJP~r~2Y.v6MĂkUWΝxpa~'-J\nˀu59KT%Kr=hr\ޫ3$<˽' )/QT۲. n ue5kVL.$_`-bA r.PNؗ-ҏsJ $JWmbv..7V~䘛4,<^rE{ kƾDnk>7l};yQEXc (wx]Nŏ0%X& ݤ^ҟ,HB[5>ܜ:RXWđHj5/PԵ`xꫯܹsuаWuÆ SV2{He^j\ZW^Hef.'_*PZG9wPs;H~\lH/36KyҮa;ueF%Q> '$"h;( n}H4 㾽[nd]=_To<@92GTȍH9xx>)Ho m+eJ)j1eS*:яq_@?pJ7%@`t^v?Ol>{  H*#ڡu3_gn?ϛVH,F/~HE/#SP~;:M"}uZY..4n'?i2W%3uKK<.% v=NUMz2{jء|qƂDlzYF^܌MM8n%v^vk׸vF@ ^ ~e4VV5 q>‚|`ʭEL[y꓂.NDX@/cQ%7-SΕ+Wu[K/Ddd…z]i۴ )~Wz Ou:CcKžzs\3\qܺޖ4h!NuMH_h d>#ːqt]5hTAmYZa2\5G ˠ|ܫakXSg6=J##ѹ1zuz- }H҃_u }ҵ J_:X's~:%#$o&TM[LHg͟i1bu5]~`3f ?!qe[ÔuU7wjM^ݡ'mlā?hL,+ؓ]'[!s+ba-e K#^nP/wS2ȋ |#Ndj]̱l\EeǤ8&r)'x~ 2>F P8Ӌt 5#k9 Q2)EεHu6Fz}/Oq1ھC1rToB|Wā{ôon_ >(J꼣O:|: 4Bȼ/-|p]\;Wn"iBD$gV/_"6 -yCm)G}tL<,P`^y}ZT+m~ٹ)esW<#Q9>3D|H@YI9@7޶INS[~Q4~Oaq'[/mn7'{XPԋ_$*ʸtۗċAx{TM|a R>.L=ӥl9O{g ұxȐU/À?K_JmJLH6oB4=z[ <ӉrK΋}eqQ?ҏQv(C]#u2J4BǦkc B y@}t.sv5L CHǚqbi]6b1~̀wo_ 2X˹XP˄"}7P5o1n9S[Λ2VYrA?[{G0@o{f\Wa,iְ6bd>W{ݛJsz2gG{-a.?6mxHu?4cc^[w2  S@#qcr095!Mw>*V ׾q΍eFUUFlkq ;0z $}u" nwqC9A/1&8cD F`vl#ԩSM(4qD^3gVc~I# uS}/'Ą1/xdNECKK_"}o(( AxK^ Dukb\a| %\n}L 4dNdy FE!d1/R&xARܶ,E: aNؠ,p]FO K~R;t"^O%r#@a&iwH@YI6. āCmȔãy|mBL|lݮK;+fa( y[FF]Km+ IR7 m@d+пC֤:ȣ既k~ zdoؤ.l #xuf-$xrMtV b L~LvP|ɏzP[݌Bi}K*d CG !*?-} >a/ ACxH[u0*Y ,r9r-1SyN 2r4ԩx# Ov^q,ЂX Ϛ5z r(Ack?~}z(f4nx }>>gܜE|3#.k!`doBQ`d-AƒN f0yr> еkWկ_?޺uk^~`[=p ?s"2s7P#8Ojv/ ^MpAx!p{)=6=6sQ8y{sԸUxĵno}˭[QezO^ܟ 6/b|锏eډFɩܵK>R8|3*;<AHVV Sf$¹ (8 +d"O P[^~VACHcn enɎۄ.UZ?,q'֙ ^(8st2U] #v n'>3Tio4A ""-AmPR}ȉѾ`dvƛt{1pϗ{u^}vɹ3, H;"+ˉ408re *c~,$_ڀD\[2 >NE; PJ 18yG'׍^c-bri e4emᆱר+HFWkqtGXl7s>\pqldQ12? -Fq:+`_jCf25˧Fe#z%a( `mL \ l|hn17$mMm1"uhY=sxtģ7;}|Z!F14uj]xgh#'1X_:] W]u )/PҲeKɠCŌ3\?( '85"^6F&{% szp[K?kNIOm&պ(_yt b3z1}V슾ey\ TZk'ǵ7.˽rjkߐ9[]V^xDٸnB _:Pl>k)kle-NDED^$MaOkoM Ly_bJv&v*^$;nN٬QpXR",0Jy(aKP x$̇}t"EN[6,Xb8KV4FТn=)ܞh__ҹ/! SG`2 Am98%$zN %^uo*caw+*]ڈa$rM+#2^Ɣ&{Tu 2j #Đtz \m:yrץѯ샂2>~4p&#olop&2OuXH|y21v2B_pQ7$0}4G1pyxe/r_`.?k|A/`/wo ׄT\؋2`]붩Y<: HP^#shjfVFFAi&۔Œ%KW_E蹹vlc #6`SUUe.l)/ـq! PbRP/[gV`/'H]$zywSzHŰA'pkoXDVPoCH_`"rwPT\+Ud`/K$+͸N[ū]dT26(#預ėbxW# 7=do)l:g/wYw\Tx .nϴhgn>!lv_, 26,,&xC1ZbpE6. XNz?\ehD܆5?%5>`#qq!(H#8T>Z F;ywٕ)`w|7qS2Ĩu/=P 摁K5eӯ x]Nѣ0}L1dazKt]a#ds ^y^0"`e0i98fsݵݜok%S#dO< Eqˋ/ZjeRz;S|f|3z!{{A_Ï2\rJ_𢇉z/ebOY+t'MimrQC.l쉇 3(2 [-tvBF  fEF6d*P80#!\&d_wwqQ ǖkn : .*Zv5CQ㏛ `&\Pw01I6BA\Ϣ-m1v?{If( JPsKdhk˸ڸ9r0k0LaMk6+W E9.`G S^SRpYػ931f<7N-%ǖW^1gS 3#|@H^+̵ѯk&6yq͠c7Yuόc#w7a*~臗g^` կ5-.2U-y)q&#vܾ5qGs"Rhݸ Fam"5aqd|>/ 7o sGH#7^lɐ7=ǍZs@O]נeM+f2(2dpvkU̧BP%DyO>` 2J0~CN '#v"P <B@7ʖK'"ψ[yA'RoV|B_\3LNEG$Ȼdc|dr-ƨfgo`C9U<)rշhK 8R[gN7}s_<Cwc.sC6&2ŭ.%u2e ܀zκsq%{./' Uˑr IGNk1!~CԫG"k7U+ajF?:>ٗ0Qť/xyqJ7qb%$ skĭTEH΂cy8UTo/z!9 B #w—%~?b.gے\lc SUFu?}Qv77{,7JWgg[(-DVds *cY4haq 8d!^k npy2(l0)'Wσ"t[P(۝ 5.2:u{ ?J1avs_v+='Q5[w6^f [5cӠD^G#m@CQ?2}ˮ1n ܒ/m毞aBJg&> n0B H+I;ZqM>vza&qAѺz0 >9{"?;xWMH{unLr\ qO=\9l5Ӵh„ z9`1Ta`e3̃yE^l)8(c2 搐4 dnlgO,uUb";Ʌlxuas{ (.l9AAEN[|| ^ E!D%/H}-dx] %߳Q\zq-ܒz/#"wX=N(/Dseqi H@Y)+QHMDL\WvT }=y- {P8iu7n-8юdtJ4|=-u{^tr.uG&Rm$:KyE%Π#R![f#(gL(c,.Lʼn(q< ,:Z197J:ϙrg,/~S1JT}z_ q}CɑyY׹"o ҅$6; 2(>"v}^EZϩSiUU`~<(X@aЁF1p8F`O6&G9h`őW^G8yjePG''%n\:~Xx)UԽ@JڰRR suXFe[9l6eb24($6bظQ0`~7lCψ݄='͏ A ۸FW>M \Bnn^[wWSu͟u"3'F ~G Wy̐?'H- oöw}rOe1$_(U?SSo_A %?m%ҡO4l)5tPJUW]eBF;rsq=N%# 0 /E&Y~$AEam =!26S㎍>/W/B󓹉qď@VQ"=AY8^^$=X7(qg s! ̺ Oq7`"!Bp҇pMkmY}EbbG'9^S>:~KuP2o ('Sȯ׵-u p ҆P&( YpOކV y,O(86{(gQ(.XPs{*yqQDm:ۤ.2I:uK{%yRC?(|xfȇ໽t9=J@N<6LrշF8zQC@4YwEy\n%-FͭiD$}/5"7Q NPggȹaU5I+p=w* Adu$c;蓒T3HMZac 0`, FR',Z5Ï(/kA\q 6[ԙ ֯:8ywsd'׭"9Eҧf4p%6?Yp.u0JL0l)uW{~`x3fLЂQ>0 nr9眼q\L(L/1Z/1x6^(Ş;^^Z႗=(b q">U#I1-/^ّ@٥Ӄ$=X'^Dқ F/ f]9 @pM0ȗ kd-S拱 ~)&Em G8ǖ{VH; i}'>[||(PGP(AVПH6uTzo|y,kqȯ_-˲I>0dʏ+u=ʂy} (O>_ nC kϷv)!/߬lc? CH) }s&Q[smR=jHtZgA nmxRIsx!"R6%M>i:} ![p? \&oՇ@nyvցM{ ˜Y^^sn[##TRm_^e◠W쓱2se`9wK2}A!m釐f;Q Ϗni "c&A JT8Tn@)m 9m/P+<>`^e#Fhe<ȓ}6PraB6n8']|{΄d1žFmzYM 3ejj7jÆ0}$.ă}0,6LOkl3׌# h#?fytyn9l5˄*>47nY%>O~F@`,ؿ~[6`upˆ}9ڜKK&ic(8ä9c$nξشH,ܛ}2 QXx?>?vqPȯҊx7k:k罱m2h!}ȓ؏ne#'bm&T35:[&hҰ<}T[E:s1ۈH' #s{fC;餓?G>l<|G7ߌSh?ItN̠At8'NMΉ's߆H~OH=?Q!OsO :7PO:^dS6#NPR@!%#Vf|>_C奘s|"?ƆbJ=M>BsO귿UeF`t n90h$2:-#.]< MasUW#//}B%Er\ad xg]6d䒗!/ S$d̯k7 /4!Gϸ1c u  ,bAh >p)hCw}qi0gٳ ~zda&1)d$_?8YLEo}L'$A&˼l>BHOnBcCG[眣Ŕ-W{rz+2Pasgב0u&H$Fn.?~6]cCܩaʕ+}R 1͚53`DЊ+02 1?,Xn 6R_2f?P6 BHOn1hPA#zjl2Lh5BIc9m4ZT-٠ RuJuaylxXKCS}Kc z)}$iߨ$"8 L={KgB7|v.NR1s|O=ypc4(B2EYT3i<]ƍS;̕5$}y:ǥ:x>^An˹=-R#Vaj9WwiW89a9(ْRi2܌ɧ]cBg郹W +Fad|_0NFL^agB7|v.>R퓏jUZVfs`mQXVVjY+o74GΚ٠G7lsjTۦG-m9!'wBYVnVpQsPWP |#o@5,!B`_@!C}sՒOWU8!JII#[WZfcՖԣU=t))w⧀)>ڟNM@@pҔ_ e‡!/ !>sfRho~8e>#OlB}C&^-.Cf)\E"]ݰ e†!/ !>i^Q.|tW&(WOu{Q1]jպq;lZYMI:%u^NyfOAn$j#}E, b !B`_@!Cdz[vjƍf!$>7 U;l5{!A>xSh!5sHǿޫ/߫6x -뗨I@eq#e€!/ !}={7?֎olBz-[7ݤ:|Ө.S}=]9UoKIDiG~Έ,?7C<oo>߰_-~PȯܪmPQ_ux<8l_? ?O:p@m{mϫ]kОիQBH6m۪}6ÇVg1cCU׿6l_*nW7nI}c[FB!B!B!" KߙB!B!B!LB!B!B!BH@!B!B!BH@!B!B!BH@!B!B!BH@!B!B!BH@!B!B!BH@!B!B!BH@!B!B!BH@!B!B!BH@!B!B!BH@!B!B!BH@!B!B!BH@!B!B!BH@!B!B!BH@!B!B!BH@!B!B!BH@!B!B!BH@!B!B!BH@!B!B!BH@!B!B!BH@!B!B!BH@!B!B!BH@!B!B!BH@!B!B!BH@!B!B!BH@!B!B!BHPR !B!BH?_Oϝ_C{SRjءjԥj֯j3|j1h9($ȇ ljgj[f8%$9uJꪖmU&TǖG T]gf}!B!BҠivxUrKHz@[QW_ᙀK lCUµo_>6%$<`:թ]FLB!9TPoD/vRwR+$t(:4*Q=U^O ,-뗘AH~}!ēxjSaN2FiY:C@%F†_/Oc mFxSFGC!yʶU/GVUS JsuWFꈆQ.P6) 2/ d?Qԡ ̀ Pv[PvIv[Ք'}ʐd_3̞pчBkTj)LZ+Qa#uEfO8P6)<2/ CiE-BG\qJ .)h 0<5EHCVxC!yԾ={R\?,_e'}!O1b}!C:B tMSPiNr7?nC%Fmaڷh!yö꽯pHB0'f EL:J}}vS6)>RBH|AnT#Ԁu봋+PvI1J[4YVN€#}s^{5URR%KĦuֺ|&Nhw"g!n[W0e#}!O*4'yն6[ɡb%h[XS >$عgZR>4NPOp z}G޽{͈# T[lBbׁ** LYOm_iK x / |zW괪*g-yJNm.D[D'i1x9J ma&Y.7V'3gO6{#HwL(}|}D쵔aÆ'xBZ\U3rHu뭷;Os窯J^vm6޺u^F.RF:\H:kW77[ɡlRBH|=FӸ^k\7ܠN^\7)|*>ԄSdƝO=eD'oڷ"Djg&YՏav|o~mrpvjJFXL~7W_G$3`'֭3(;w4!Br#`S_jv.mܿeZ{ mOz1{rCt|<`uld;vleƌ\=1\-~yoڵZr< 3g4!.B"^?7!}Y* V26yΆl\׵֤ЪS{ռ^4;~O6:FT+;7|D z 79>d-y>;g |B|,zmۚ=Oj0!Ri 3!4lpCϭӆsPAJ ~_ٳg~~ӟ0!?/ ѵ0BOSu2y*_o>7tA& Άl|3Pe_0_G_ vRK& P? VTR%Ӷǖ.3wq_s5$e LP<B!;y{ M KH.ٲe# M"]k d!ѩ4ڥMx)6/حm_aZOɫ;O* WMr͚݇أک:*FLlj{Ib ;?K+^RH @ΝUyb6;9Kܚxu'/_ ,D>"Xr w|QܐcA gzq_/hszȹXp\sEZ>o q4`:{b$Up׬Y2l*ﲍdCFzlZ 17~!8GZҁs>rі)^rrHC[nȩ\#^a}9=^]oڥ,fܞΝa˄uGŅcN0Aa{9o~ؽƶ}~tȹe Io17Α佐'c0\s5fK?O&O<6l*))-~wCww`9̘ԺukĈf;nĵ<"8.++]0% vƅ`lOIŸv znv 4qل]/'N5z.X aFN:X>ІrtD] F3B!B{]a=F?0q #d r{ܸݿ̙Zr]f.7'ʩNƮ_l:u8!++6qWm_:^t:i{ zطCo>7qjYZa<0'UuҌ{HsiYkέ{Mg`m6vwK'ń k76HpΤI'aKt=ؾ}^KZq.ą!R^PcZIs>}*a<|H#G nyGǥL^ zk,=O?5!:C yB{Ay ,aIFq]3!>#";LmO}OFRϜh#=*v.zaRr NLX5%ZC|䘊j1oGKn6ԕb-6Ms"r8q-7P#6,ke)Mbi>e#GLPO+_}͉Ϋqn27!KYnǓؾ fG  >/ՂqEOI?߭F[Nt ڗYG#B2WsC#-KR(9O_%\1ګ\gK+-X0`\ci !#ߵO=yD?eӔ)z&xx׮FWqݻu| tq/2$vw60^PrL.ꬳ\ BPEYt{Qm81gOAzrsK4iC~I}08pp55 b1 ?ұ_2GcS.$nǥkZQH@%TN12GF}qҌ:g\BіpI#@ʵ6 ƑOqzݫ?+^g`_߽YqH.<0&2?{c HO\ .kʿPX0ɷ+u} ҉I"~nҁ%ԾgAqҿ9z锺 m!O)4|7&T FQwy1%/FM>]͙3G=sHG p_ۆ]jc`O|m~fBtB9} &4\"PX)/처q>:X}QK0,\p9! Ki̘1Zam! 8 ' Ϯ,G@Z'@!=Xz[avy1#C݀-[_z-7d [0vy^my/&̸c5$/v |1 =_1&:>L_JLJy('i 'znwp(:֏_i$j|-=c`JBиn2C> 8 Ayp;%_JM:T!nk ?*k2҂!"vރ \쉥:-( w#_}J~1 e֡QοYqCfpҍtU^IS ԟFې}ޠTG%:TRWmԱ} 6㸛, BlLIqRz>Agr@ w C>%M@VPKfh⽩\Id_HD>64a}Q=ݗK,DRKګʧw{2񢿴:/˭5v~\gׅ|vSkÆ_o(me2^(_?:D}톇R*ˠ|S:=kw_' s(mel{}3[ػV 3@ >aN@qx ]עʰb$B02 C7@{NE "`F Fx#ҏK: /2V6vNwh08C=IP9c}QgH3#i'z,Ȅ]wumdO1c?3{`#PM}K=kt\g>tNQ&>?yjҠfwalCUOwhJa.wހA#`Q.{ 86B(ߩ~ E+lE^j"c=~AH\A`| ছn2!^~e Fs#7؆ǫc4tZPR @ Ƶn _H =H7?0( K7Lԋ7҄FTUUIk:ʄR ./+n5^eV\6aɅݮQ[&xi 4s`KwT-E(:+;,8.U [:?*W(|iA|i }(j#.'"R/(r=pƉzB}SMOZPP H˔u9zLDв <ȸȻZPJsGȱtI_ף|`"P_H[IzJ 6k;EV7w*2|&yPn8(tඕ6|Є:ʸ5wKW|pZ Vfattg/X *1z=vb ʻsĻkRrn0b$cݟgTv r ;z@kmiuF04Fȁ.U: :>F޲L"؆SlQKaa PSȸ'04b(݀"^P8n܆jB5ټ&ԅ^hBh$1@1=nܸF#lCI #m =}G] Nw-^xfcK `x^z%G Eni?HO@A[ƕz™&J mzn̈́"/\S3efdaae\أj35-^ ={W1/P @郯m0Y>רNBe2H \x?,PKUƥqBE]j#(+?}(wʨdǝt/P _;CcEe-,,8Γ4̠efpq\{;tQ_(k7P蟤qS+}Q,hYWԡȅˬڂ(}v GBAYF6тoP?"KoȄ(l : 3RPC&B?TD P6"hg_kHx۲hY|fp̐Du/ؤzukŨ#]>68%! _R>C+gDZA`PFnmUn7;^JhXhl<"۰A݀#+)3R:U~)(/7%J깛ܖdx{23]*Ǹ|?6{V6x0蹀f͊+G1DNm$`t D `Yyg(߽I7oAq7f}9U5|bLU62'ih\HFGm*ܟ=I2wk |n fBX\Al믛PԀf``$ |\JtE;0,8 ~(4wÞX;dr ytT@ j08Nw2G3.'aʅ];w4!dިɩ%x4뛢/H.[i D⌃(R1|i+Z6 U&A˺{5ee6ܧ_{|@6Rnmw* VYن@EAV:tsU4S}On`8HvFm(H~޾z n}>x~(ٕ0UlQO RZV nU~2C^~I50.H`lBn=d/aKiT[^6;UOӆnI dJ.F/w6ȶ$!q[ @noST v~ pGxҲe,v(L6+vEC  Yqa?^?6h yɷi#Q?~o(̅t/a}~q WhIF"ftgqsfvKwV{&gZ&m̲ƍDAOOcLȝtT\g|CS|Q/l=Zq9 S.?|DG<~eqqK?:QvOzH5w{)lw#BwbWMвө{-rKYCE!F7{24clpT+#„yu 3N҆%+#|[blT}fBسv^7H`BWF @F?<o$5S>$1^rwzӤ^g-Ӫ4fu& )ArZa]:thF>؄je}mF-Twv0fa(Iv:u"pARX{8?wK+ cWn6\~_[77C:Ps)(P.ctfvK {׼Cag$E6y睧#ax: +a%K%C $JPzݴO&FNsr W<ر`^G]v//wNi sk- <;| 'FGn^۴j(gF56h{i29ܩ{tdF-|}!QMU"IUU<ұc ]a>tIք \䶈*@nj^(9ߙV{tIgn!'~ʷz ӰزeKJ {Mq%L `b(090]'CE';-~\5瘏VI- BQ?(*7O%۫ (s? |kv˄vusYkL!vFu칁D˚z_;2\Z5iG$2I\1@鐢,rǽuq ~!Q60:C5Z]=%WLٹhQ~Zhn%='azLH?~4(ΚAXǔ]_- z(1L?šk.3wnxu%KTg] 6^BѼc|nsZ,ŵH Q@~rDS>D&e7B` r$-`q3Ƨk1.ܣ{%-0x̑#T7)D"U{9FY#m kgC`ʸ4 (kēȵ^Sb8gl%n9sRzIpڲjݸ_~Цg7Ĝ>8mG'j¼8NآW:2e$M̈-u3}z'L(#$0G\a4J]?7`F41>30ce|=N~L(5ŒP9LN=&̸.rX[r5=|y<n{9/$_*_yL܋ \A2թSajI 5A; )߆K~1zq6܈ j^S|$Gbt/a{xw/{z4 {v}Ȗ۪'zO <_ -lT2L|Fx1,(?2_B}I><` *擔 -JU;l$UUWka)fd/1/~ 0w 8_BxڲUo17>1"fi7jT,r-k%}6H|rMyl2)ArSO2/ F\L0!9uN7wީQHۊkuQ+l%҉BL[y =al:͛cu;B5iЮ"(sQV!j]NQ|jܠYx090P\X^7شHam7tqO4K #TƇ+̋O#;IL4`.smè)W l٠uE 0\~&Tńbo22裏VS$kk -o ;,?;v$v`II&K'|1`tܪh ycFt 3.'aE.1Q&0`qXUWn>OF`ݙaϣ2Cí_i PZ_ռn|RvPRN?7J4!<y> XL|UlUtU@2ct}6Pb` gO(۸&x@7(׌wyD_#Ɩ.up |@Ѿ8>J"-niYn}lOH|?HtTֿ:Tn K 1F`ߺ6l2E[crG}^fC _z_`pÒ Α4r>¹0Js~'~ҁ}nx;n3ȣ#[@Oo]{s^>YӶ6M&C/%UYL/$_c m(|g̘6wqt$I^c twhF;Vݐ,G , hku9nAϾolPȫRݟ|l%&K%B';+_2[6H0He,s`<fofeߘpKHuSBe=z>0*(|)P&12IgСCc;iq%6N0~f+0{%ڤ3oPxM a ̵*qB:q{x&g}fBA>p\pk>1c-ڑ[vw'PhLz(tqy 8N%viekbT C *16퍾$CA"JavTߺP̜.'8Ƅ幠R\$#C@8S=9'x1Se yuEE[7O0ב_e=|ٞM2s[()"QWHb4< S}H2ҽl~olHVoG Qڲ@ѧ c|Kp?!>q"S?Ƅn#qlc?]򖺗am.D y6c2/$"}aF-9OGp׳~)Y qK}}!MNR},^xs\?y>kӟ!Mw]Ri ڧ6)`{?*h&e^THiF8> 6 }E02墋.Rw٣￯>#'b0?4[$QCl Zho߮&i"(O((O=Tռy_'|m%aq ${ uv}?#}_\ΉH(7~~F#;#Qp J-oUutEزK%@*mҋn0: ;U=[?eF ?{~WH"R2qyA#lE0|/0DF?mIf" (v .ŋP5.e{n&'/x9Ɍ>Pڿ L+̸@6%cr?O0"窫76®Kh W^ߡ_dP6)n h)N<UH„s.Ʃ~[v)$H-= 5 V佫r( |^J ʹ=M#!j`lI8| ~ >Z(e>6}ep"N+ 灖-[H'mXBR#AY<\z,?7E9F ͚Ea݄!@=J\:z} 8Fy\d}!Y/A Iྉ&*u:s%؇c0.%20’7|SB1K_RWX?BAH?`_@!#8UiY db#TǛo6[ [v)$פNzj(Ej+0dZSطC,m=X`Zb{wdӬQ+uFp]pCH9&j;Cw}: #_cXv]b0THl[zک`\4g6S]S*P6)>RpOq>wZr@<`dn]uj=l .)h |}w:T="!:ꗧܭzs*l!Ya&/lBċ^xTlY!qD?K q9F))( v7RФ|Z yiEHNY ( -tmsyMfOU>cB7}SM\a?D|?WtjA8@FQ754{C ?`_@HGUɴS|ڍ%=duUIt쒂&ĶЯSCHv^NyfOCHsWR/ 's_Wz].T PxOٓ: B a>#"lgJ) _pc[B$mCյC&yU5t˔CHy|0r<zMj#uV"JԻT8)\PH=ݷTkP6)\BH|&]VVGϪ~۶f/!SeKe8-_鸱K Lz9O]תIf/!RZ:iY˄K7*|BHy'xbջwoN0*^{5uyQo19̺0a5vX.0im))]XX_둺dP6)2/ dL~[}jW=gj(!iԥ64G>\:,UQv|"mPA WUM(!iݸjҰjߢ:zu㵁FBs{u~w{}$?mQeMS:f{!EO.!Br >B!B!B!E?u!B!B!B)h!B!B!B)h!B!B!B)h!B!B!B)h!B!B!B)h!B!B!B)h!B!B!B)h!B!B!B)h!B!B!B)h!B!B!B)h!B!B!B)h!B!B!B)h!B!B!B)h!B!B!B)h!B!B!B)h!B!B!B)h!B!B!B)h!B!B!B)h!B!B!B)h!B!B!B)h!B!B!B)h!B!B!B)h!B!B!B)h!B!B!B)h!B!B!B)h!B!B!B)h!B!B!B)h!B!B!B)h!B!B!B)J"0!RzPM߸_-?W˞C JPZGui\GkYW ?Ԧ9(\BH~x|t}\wzڳ%)WO5A5E5O>\4 }!$"^SSU++^(n ]藍L@ $t_~€}!PLQkǏW+WFުjmJ}!T/LY:龥QF D_~ƒ}!w-^]|1=LiчBHAըE |AtjTfOzP6)L R/ x/GR**BH20v*4B))%ԠlR 8`_@!A1 >]t"#lFB!\t S/6|I6&)lR x`_@!A:1\}BH~J\uc9!!аCuNFf?V!^S%%%zYdKHfx'nZmٲ=S |zf? BT !$?H?4e >7?nCaԩjĉZNH>q 7GzéP3gܹ@O2\СCz֭j:_@ +t@  8a_@!Ax&D u&tAm1bDL鶔aÆiUU =#GTz: .x櫯{WSeΝZd:W_ $1(Sa&TtUs9:?Qsf-a*f2 B }Bw̟Q>̾ն6[myM(x ,ƈ$(ɺuL(%$<#&T=#S_ބ+k 7}~}\ҾB/Ayg53{ 7,AXrzS- XR(/`?Pxߖzà:7tm`dO>sN!OnB',QUUM3{Il3NRr6a„ oժ9CI&~$PPt\h0K.QmڴLh62e;#6*"%Ǯ&;4G%&MtͲҺQ9u2M=40 "~lPhBumJ}}ސ o.gȧg΍Čy@(OM폷ϝkBѨsg5B+ڝˀuTYTٹ3Ϩ/醑ЩSZjB٥OM)C;2c U^^_~yd;+WHcD>fΜiBJ]x&Y hhh¢EL## #8s=jG\Q\\@j^W/}[}A6QnM=6y*;yeMɧdG4.b? h)\iAHT3TfP͜ع)ڍJu3{Nw^TiHoA׿={v?BK/P]{6@BxǨ##H}SS8i#>'|*)'~w6f-j\~3Uo^˚O}Amwo>ٓ}nRL[F,ܭ^ٸ_?/tC|bo? oTeA.j#CwkJƗ\pԩ MHҸxsf0ΛG aMj7AVF>(~I ϖȟFXI̴k7B 0T\ ;=*#@wB Oyh>i@s;o_~Z6= _[;̞R˚O}AmSitM z+^dd?ڳDŽ2Ϟ5kWnMo{u847:f4!m`9zmՆPeMj/UI%c)~ԟ'rJ'xB 6,6lc?;!IÜɘ;wn1cƘQZn1q-!dܻ,v=ؗzW'_)izOz;d=t;Ӄe͸l®'҃x=,X`BJ}&ځN:)нEؤt;eѭ_{q1cA "HhcvH`- ׾Ozn2bS"B!|`CEzx\#&>`LQ\_2!BrKF?G7a…ZݺuSW_}ur`qi8MH/"g5Bnm -JQ(PuiL4I~ƾD׃۷뵤[!+qat~VܧOO%="Yz q92PG2f\ zA|Pz뭱 jMթS'Fiڱ#,"vYڤ0,J}8֭d ̇q4Fk 0/? ^[9dT^RŴվ/i__]J ,ZӮ;D_v|ؔ_JD PNklաȈEAuH։\P &A҆s&Uwkl#x F\,^R>r.]N!m#n)ƂAkӠ 9^䃋\!gvg@6r 8E ~܎#7 3hX!Ηs|ٶ2Y֩ 3ޠ;&r@<8PN؇{<]F^2Vv!u<֤:?%ƖAN w\ǽ@e{zK2gtGtiqu)g;M(Op\o>$hۓt8&ruȧVr鲑t?nu%</YOg\ ;66!nd㿬P9TYiBt͛4,c_Y`gQ'/_ ~؜#GPz[DSNa\ i\QCy>U5+\1Ӧv9Zɟ9_Ҁ MEzͭ:ywOE\~A^6oƄw^LAӧOWsQ=\l2qU[Al ۸̄'/BoW&Li/Ph']vYLkqG,þϨ%(~q.8sbN̥E:}q- is rGx%H'@!=Xz[avy1ezHdukVvgBq/yĽލF }8L8{zA ^'3sRÏ_D7N@S9qc4ҵVjۓtEDb:}W#ipn^T_ ;N5q =քAiM ib+$}EW\aVCX 4H!JbEmh7t +NK{QCt{4p5~x5vXbhF {ZB \/i(k}U.0̈tFŋ3H38F~ȗ _H]7X/$su`AiP^(!ݸ2Q/RHuSUUS Gu #/-h? A7,52Z\$zyă<%.3w^x%!>i/*6e*x3X #e1/POPZ#dI|8]/R8_&+vpq6pθ/ƮjIKdX@qX rmlo6tG6tQB|t(~d~F8G|qihή&yվo.'!#k(>$"[p)8)?A %}K%l[n-T`p?H?`I" Hi,o|Wӧ;(ڷ1F~X@rP"#x鎃e=Xg}88rY@ZPX  _|_ ÂN/"u6VZV}y'bA!iv>}Iʸ m{/e#Ϣ}2OlA\+%PR Mq_5g oBJ}G&T{8UTJ x'xiy~^j/ (oN ^:R67^tE-/)wvkQ%'GE|zAۖUp/sOH_d\>F7][%w% mt 8CVE;"8ggF\xx5\۩KF؈!eLG8>3N[^DIu\y ԷL-'A:6;/hЦxP6u<A(#<:ƮL%K;}DЮ vZ hbāM~=P.r8? !#9-\"@! >(Ӆts|j{2}vy΅,br#sn-ʽ:aԉԵ%/!ס:4ڹBt0o {QbǎF)plN&c6VລÆŌ40B!]p)a'k#\1hqR{כ=J}_&!6pu^}/TPv =:/^Zk >qcZ>E 10I`  ;]>N2{dlWI!(c^gP_݄7ŵ#HP&NעEL(۝_#ux`Ǔ3[n1!oCN\Ȣ*SJDlCU0X(%ȳq@Fۅ7{"F!%=6aD# pJt m;+V}IMao>EDK!aO4PǾZnOV{)Mí⌗>0/sدeNܔx dfLNi徸jA>NhRk(>D#Ӿu~%. ۣcR(l}nqddzE&ۖA:m!FG"iۢ֩p?6퍖AP[}e츓5ken cnm+h^6dȨ9(/ȉ[t;zq#[ E(A'U!v"¨'n+?+|wKY0,N/0zPEbX2~ Ay~0E |P|c:NĘlWZfNl0憤 dϵ#ې7AéqXw8AS#Wh|4@Iخn;n.lnɔnJs=ׄL[dYɾO4ϋ},Q^~FAi| N\喍=Va+ Y^*O?ԄM(38q5Z ۘv톑: Z N0xlN:?ËYۑ&p͟DEY/zp[ i^y{=/yݚƿ ^/ŢrK)6 .@~w%Q&_gLNxw{k29sqSXhP7Qx/Jלڽ#Tݿ\n+@&^}t:m3ȏ y-SmA &sK}@ȗ0Ɋ]Z>0Vœxe xՇ츍-Gs0P?Tzi]K,gbrq.O'(%ܞh{_ҹ/{ a|~OGRgxqKdx=o'": _(V'O!e+pM6c#s 0"SQw` Ⱟ b  ,=l^;]2:)@f&m<5Q~vK.1|:>Hl@By87ϝkB2ׄ9P^Y^6MO8!6ۧ+wClGSEqUn./Ϸ&=J: ny|1cKJJd kMeRo\V/;w4أ/le~N^ڄ2C?f̑x`XwAhX$?e~1A.e^݀E/xEL? {D"t*M> (g ӊT'˽[{~1g\dk=]yצGm0b٬o>PT C[jA۠^6s%F4P_x3 &B;!Cai% ޗMΐ}aSs3Tad^N ^I6!Eh+0#l:y6( q70#t0þ.0%z7lwkGÈȞ(..{mdBٱ`^6t7=l,qH ^Nv~ $,/^xaFFr?PH00bYFخ.ln~WI%~XN0 y睧ax: +a%P1yej*R5~شl҄Iv܍+„jxG&|@ |p&_yBQ/{>!lS&"Ş6%zi}AEqPtG!I(۵H޹`T ˹^+nۛ)%H~s-m@D0>>">󂠎du< $?9vNT.}.q#DnsR/F l]l*=79-]ʈdN<x% F`L迠Kh+[1ym0mdJ5gBJ5hN1,0bRİ${`nymRaG"[ůq/F]5vıqd/xYtIN2ex;TQfNX?$N7U &UUUrx<\Ak7{T zɄ \䶸6P~1oΜ95*# +%[lVg&po4ɒJ;]\ڮ0O"m$_yBIgO&%r/C߯|: |m֜c>#!ag;)e2}h}n/_涮 c|%"5!mJ(| Nh?;]y@_'_(uLC]YI1ڌ؂B gBYgQ:~eԐ 1 2-G5V&ڹP~@{ȜFIaz;/ܶɽ%Q Q2ߏsGBnDk7K|!OŒر883L(GDň3Оd$="񪫮2b$x)ɒq#n[?/p+큉qdOn:D ^Pess1וZ 2 p7$' ]trUW':L|j+91tS>]C5Vv$x)CґAFiQ7 ]6FfDePj qJbSʜ*b9r:<ږ.og@xspTs`\j#rҹ/;(gX\[}n:3H6EqFK2B.9=%[AhQ4*{֮kTwufRj?v[D'C~m_^t|KFpia{ܶ>;緽^xK/aIM,}{\1$ bTKLK=bs嗛P5"=Ge>&IOKz!&?ބz뭷L0jg?3HfC@:q{6{&NBa=P0Rq&v'.lnh|bcZ(xƄNRch#/1xxPgO,uuk4KF> @]>1-r/r'/J/1SlIsr~["l%9C!k!^DS>׾n? /o/grxe <y~ozR 7r"bqA 겴H J?O6 <2?Fvf#NB=trcGza;# HˢӪ1q_F 9qY_oyԈ!#`T1yFy\T 倲niT`][Jޜ}n:3ۈ+l1R|=l_FI6q&93V^koz :O굍G}sN^\ndQ48e9Mك2ns}&XM妃=o[k_Wz CP 7\M(~RXrJΨQ<]]nW'lBJ}`mXK6QN(lVepB:?ڽ{$"e!6,Ȫ-ZPc LTm>~)ڞb)/>xBL;6riH`Rvo'7p,&ďɸ 2ơ@|5.F;) @= ,Q G]eć(_|Hp({ܐ/5kE49˧Wu $j#H|r ڿp8?ڧH`=G!\sQq@򠑓l/E?PO }?Z7b%6 ̷DB>? {(?':c_Jȑ?BI=AI6~mM:xukP&2YwEy\f r>x$RNcW*9n [{m{^~kKɛ=&sE:pi6}/ٮv lcd>EkLF_sZ?Ba͍8k`9uߺuLoSysoyUU׋ܚ/,ÇčujI>0D|c^K3>0Z!2r6jd}`BZPO\q/w#I bAY}[kolEy)DJ\/P5aÆp'M1rP#tĕr'Pyl`LGy N0$lV bX+4~rnn#l-cƌ1! ?-ZdBJwq&}r1,^pȳ=^ bmX='x ^/}PD|#V=9po[1ůWzvĿƁ٥Ӄ$=X'^(Dқ F/u-Jq2DYv| X ˟Smq"@^i!Mύ ImYܯ]|7yL<[&<niMFU:#K[$5hv9K{؇;F·H|^#dk Hp+L/F?UV~7H'$3P. W!O0`?^-/6ҝjJLu*mЋDiAr1Wک߯+u P/@A_ EMH8wE:@ݡ{uN:$]n*ۖ1K&yvց 7a>s3 9^sn[G#0r(죐716+s3@}I҇,9RmU"Yt|Gq/Y*폃*o&'/܇#S_^}н9+ !P{f 7MShwa0F~~r7.؄8\al=Z6bD3k+;=2rQKShc UcKdBK0qrCUݦ'=۶5!TavrT7n>82gNV #GW]tQWE3f#5[o5[QF?4[HB3[QwqPbw| 3vXPt8bSOU͛70O>3'Ҩys;.@? ALV^v:EY7P{: i09 ndykP/_|qӧOwf\ٮ e4yNl6K1B;EK.Pr~װF__dL2%v>(g7#tz](Q,J5q!UFGɧmT~6P!r7%)u{ԕ_d(dqۃMsa~7 !$?OMd#ų9G^nHpsF[a8[L̄qJ[|T.B}PjcF2pPi~\29Q D`h2(rCu*f,Hd|nܐ|n%r.@H~?|>FwHv] kKUf&$ Ke > [Ĺ CW8TGR|/ pssqJ|WҲ2EAc#Ygbh!B!B!B)-!B!B!B$4B!B!B!4B!B!B!4B!B!B!4B!B!B!4B!B!B!4B!B!B!4B!B!B!4B!B!B!4B!B!B!4B!B!B!4B!B!B!4B!B!B!4B!B!B!4B!B!B!4B!B!B!4B!B!B!4B!B!B!4B!B!B!4B!B!B!4B!B!B!4B!B!B!4B!B!B!4B!B!B!4B!B!B!4B!B!B!4B!B!B!4B!B!B!%ULzPM߸_-?W˞C !$++QCiեqկe]5zjPzh1*>]m;W]^/1G !٢^=հCըKլ_?fpb s4/ &nkʿP6.P|UnˁQBSjYVnNul]@յq(!B)Dh!58)tk. Q(lPLQkǏW+W|掷ުj|'PAp[jϩͻ1{ sujQB!48o?.p7=eMꨧ:gjSKHQZVyPG/ $=2.7lJ}x{HVh(5ě8B!чxjԢJUq"AH!>OmFuo_VUT=B# `ni¾p]~jʇվtHFӯf!Bwh!|.`""sEf+8P~vEfR@|W/ $|m0<5EHC!Bgh!ڥ۠9CH?Om7qx ~OHgNɥB2C:.z&!9#~2!B :fMj)ÇB 瑋*՞SravA%/!#GC{)9Rm9|h!=B!$_ѧ3e>"<zǦ)S8Q;!E7?n̒J\-y7fܲrzfB!+Emٲe;TYY*))I'Nj SB +Q?~Y;~ Bu&~a_@H .g~ k z!B_|}F3-0|13l0u}Uj…٪z|)B6RooP1>'Hٷqf+1 A/8ʇ;lU+_j!BH>ϛ;0#Gjc .??jJ~j„ K.R77!R(,9BM), = uo׾˧gE uZUj fOn3:'/_nR|MF_@jo].۸~c՟2C~d'[ΜźX<'|"B!HJ`<Tg*o.l.dВ%KpT?~;v4c sv3<XmT+oIҺ%&OWk @t}\ufk1xpk܄ (TLŚw<]6a+>Ԅ@&kfrՖL(|8_ag͞(餧~賶|۷;V!BGR2xb/0|}NeYh )sN ۥ۠AL`LrsYիy]mQGnM3˼65q.I7߬꥗}5Jv릚o%{tٴ:L7uJ*.*?$oĤ =[P5mB.!BHvs8Gs9DŽL}}tN& ba:`me^˚}糢wׇvV6mar J VGy[=l6M سv^C16^>'%+Lv^mofyj:uwzM bMB!b#!_| |gOfm>UTɴɫ;|/R,}H~iٛ9jsYwڳDŽ =))Qs4=Amu|\mۚ=D.gB =N~aK3{I!r*n !BHznپ} kG <8I'dD89&WYY׺uk=GH#9Wd\dܹZe}B!B!B!$ 5Zmݺu&{.`1cy;o֭m۶u"#>n%<9rCrIjx 8YrzS^s:jZjyCApqaƶu܋{z4i457=7yz?HﺳE>PvAtY#v=q; z){GK~k~8O zO@:~X5qQ;M3G9K9X-,5˜].ܣD\ą{:/^/<#Aҏm,v9In0Z7.".F=u &HB .c6r.Cs)kR-g[ve2wueh(s7p;n;ej_km@< r>[Qؖv/ O۸ rx ȾD2B&Q{i ?Ս 18\~'vBV$Xcp`mru׹Sԟ2C/c9IsYfO<؏הc1VW_bcmO72u(dyxañH|AO!BP>0Q޽{0:t0a^>hWk&cygդItUVXc7D^$GH|8̙\]y)JѮa+'udžT]pD}զAt_ȱeg3ʼnShEq1Hr1 uCz57c`cWthT PZ^목0%"Se}0~X_Q^) {P(nM9z)5Rai,Xmͧ͊C  < ̣|Nd줓I˖I4+)AtlЮ JֲU5Ksf }2(-;yjء٣t\=# ^Je';,kQ;)-,m׍=LQ#$g\36h|&.Mϰ0FBYak Ofԓ֣ 0gF"/n@{ាk-)ҖhȖMҥz ڍƅ΅ ZpG(s u.v< z]@~¢EZ0)(Q/{0'm Ѿ>hB&!^`rUI"]'N?"2BJz_ioNÏ凼0'נ;nm.WȊ}]HH'қVL@pvKUjWcOz1{_7ڨ~:A݆zm/};c蓪s67nL ē(NOWÉ\s 2uyuMT8؇cneSkܫߵA}!BHm#%ȋܡƘak׮jرz]]z饱XR r(h~p/?R , _(rJsPC̝z^)s@qp'[~ov |QcÍTPgːj˾*λ  ҳtA{^@5r޽u^p ۲")' eÞ=u }Ԝz[u\X\ree`I+mD+g9ǒw & &N/o3Gc&Om *ֲf ; *x5u}_o:޻HE&ŵD4<>QXb1ؐ'$&џb"JҖ9wgԝ2yusϽss^--y ~( ZRcTH3GCJ6mSWԹ <|^}.mSбi{~l#j)]7Ѿsh_;餌iMOl@uh;̌jdj)Pٱ{2M| ;?kQi^0{v=&[ ;6DG_߿˞G9)FShy^ǟ~]OIt0/_<όrk}2 }FUfPjf>lW>վQm{@)>,۷or=3dtK!|%ҼE:0lh t,&@e;ӝfygm#iH qzc]h/ -s`|}9Sx>ھ֫cֹ)0}skmSt i_&ME9;3Hx6Ίy_mphNAՒ֧҃^O"a_=mه1ga^~\J/؎`z7CX PWퟕW0p5l0{n>ރXzk^pYʜ];mGR;j}vB | y5>utu}ux]Þ/%cgOF%|6>AZE}1GH{џ#\۲BxQGm~5#U::6mO^(h2^o&״&RFū??5n[SzF~Q(K&WՖm֌w68Q)Պ_sK[{αe7>zQ4dz|mVeϣ9[P@# hyOy_cQsaW>l HOj?U.q嗛~s9١Nd{_׾e 4^_Z3']aTHնHʢARtxK&s/r J笗k2-/ll_pbFm&&P&(z:עZJ( 8C`e|a'MlZhӜC!T秣G>v?~rfi?#([CP=DUUlPMFLD6Z"~;`7nT;@_Ž=i>HV&SSc'?7 M!_Xl~+Cߋs;v+x "\תv^c)={B|% 9b)VZ7ǎBNXAI02xgѰ+hƒW\ɘ/9؎>;V8oq,Y߸sޮY;NfephSt2z"wrÜ9sl99slKZ#G dƍEr]X 6EL[xTD?&rlH\?>-FGSͦ`&۟>sxkw?g W CKQNiv~JYvl!O9,~UF[ {}ûq]k,p3$lkzT֥s"]ʶo]~Muh `͒lXf^lNYT5r͢+ؚi@@l~cS=^~/sCpH&0$V\^D6ΚJ;PMxnYeǙ--;6qm^Jeks5:nv5Rߘн4 ˯7&^|ϐR(nYz^l9oFg\]|Ŷi7Ynԣ#GԟmgokAM;5oۤSstsixZ,m)ܔ1?\QYfgرwަ,^G5S_M}vuFٿVVQPԼ?xCl6ɨunľ bn ?4}1H~mzu7:5VТM*|ko-G}?#x>6:}~H8:|szmN=Վ#O6)=)+ѹЯ27&}+?R)U)^{6=^T0aC.PpU7_V..N#ͼI$z=zQ>|:73lHv/4/#롏ߕ"Tw`3as)L<ɷk=rooXY3WҨI75&>e\o?Sϯ,m2]jsji衹P_`]85VgMO]_Zۖ-:qEcyb||)_릲rjǪ%@M%W5HV&}8 qRkriWKO'Teğ:%w*>醅ȾwxMr 6ǶrCi3>'A;+߀~SZ4z,^ISf@R͟A=u6ብZ?Æ 3&M2ӦM+-b˨}kX ~k^,6׏j lyE\Vw:S}yL(Mӝoٯڤ^H33X=za?=aԉ?0!oizhmD ?Ǜ˚EwY5DuWCQ5 f=R@:|ivm=zʎP('geH!>_unxU7';*yMuG|v1wX|T+g_pl)0+stn/{Җ%kٱ_h憓3ʼZ+ra??{Eh_>—/ȎDh}C/tDmS_:_|{/b tͼi=?=7v:sF3^.z[}P''C=J֞' Suӭ[7W_yħZ&է0_l>9}}L>S;|ps{iin99EH Ӥn>ɚ~HB_G.9ܸծ.<Ӄd#z@E =׃|=֯S2?b{u%c8цaR E]gϝ^;vr@]r-@$`u):Q/жgil>_;ya ';/U>:Dke#&\4aeiԵ+%W\ rt scѴE/!$:oڞ|ky^G!\ߝd^X^ >N-\}Qvmv _C&r$:g®wxC:_y?̌4s+]ɽTKŜ|h 0 L.C۟ukyѹ}s<:?>4ocU<{:` r~@qQ=π fiuuEek~NC]UfFF5MAEo,޹Xi2Sb?a/%\?/ͪx;wt|rVXu뼄Q/*k^/̿玵 MIر)6'QZelhڰ+:*[•*((}8fW*U\\lÞE9tM;tSΝkz"?S̉'h,YbLb.\^3]Sydj'VpZ梋.2ݯ\_y3:kE&5l3/ . c1MYt55o]8=6\2zhIH *ZP% <]T~.8)dj׈JHϱ]>pSn21jԨrB`ࣀ&,^z-pJN}Q]>`CoQsY _ H?!no@eR T^z˯i@MrڢrmFn;5{PQͣkS:PoX`+L=_nw&;1t=SquM4ߌoǕAu׹Դsy|Mfen5DF͉=h5̠w6] @UP2-wlSSbnAӧv9j/ua6EEnn.k>5}ٳwTY[M۷GFP(Qf3 |Dyn @M-@]dW6>uS@R೷IPCީyƦQTglƶƊop^f*V1vm".(K˾OOnP9TG^exPz-fvn &P>Oiq 0jiޏlv\WLk\|.ۓ30ssQ>u-5|jBm1~vzP_`-lh+m׏>jyٹz תxs:w\.wa%3uy7Ȟ8ǜX2ԫ[|>kWɝ]ٯv9Żmd*ԵIӮAݲҾ9iz9 {b3oi{Yiwے%UQ׮~vYަ!I'r, sgnG+2_/6[VWԵiiڰ9e7sXIPJ8r B<@}@ >y B<@}@ >y B<@}@ >y B6?z5Xa:#g׾@͒%1~c C O6lπiwү|8iFͻp_!wthjY?x#Ԥo^K T){h $1P1i^~=f[pp_#w]l>>@=R pm!AUӽw]w9 cuܪ_7{Wtkw 4ٰĕ@C9s̢Et֭K/d.\hysAe͛6m]FTHbS_j%@J{ Wvkhz4cm]ͩ=+l`hQG}\#\u0:+M=Lcus5:G3ǹCбerxR-R&={9t  ~,&ifGjAt>yH*[v5R +6kK͡j)lgvs;d7BR1&4TeTcm9po_Jjt{LnT|m۶zj><2~v̯&2@b /6^,aRg"jFd{msڧ~ɿldVAy];7ػ+CxvJ֘3ۋ7Ȍz~H}gon&K]ɘSN9I ӕSP /=]~ĉnnY?l0ӦMG}曓NzXԤ]pL&vEEEui(,,n·_NᗧFO5&&L"R1j([Vh4i$[V@sr߾}ͬYl9$9s^zٲ(KOW(H1c~,]"Q2p@[Y`-xbKAQ0,[z9G}\ve\׿UEK_%ҷwJ760omzɖ4c+lhNh[6ZM|X\mf閲?]txc[yn:a z_sff=W7e=էAcHi?fI\R3yP3}|gmλu:M){nu8Ӳ%um3k}|zvsA>Tϙ:o=З;̽ 9iz_ƾvOz]q@8]檏 ݧ;736ǽn]󟕥]|e;o旯ߞ߶d7M߭e>W<:~}>{|>] Jaǎv拋/6ۖ.lb.\h?|UZO=e>l1|tqMΊG}NKߒ4=7U޼'[cK/5-x[,0kK-ܜR~F ܼi9;ݭnJE܆wޱ59:\4;ZCItӇW_]3t9߁Zjiя/΁߮޿aL3Na?oڞyft]oʹeOYϫiM#Mӳ6Ӧ;KC/<| 佹&_E[7 oivf{ss#}slS;;V#lM_K&Z-bwq3=4m¾u'T FۿTyZ_"moyiPݦU}N:ӯkG6`\3/z>ُಱW?k1m 4?52znt^)Y?ڬ{GYZ~GϔϘWM厚EuD0H9C\)uVJƜ|ɮF8 u|ࣀ((\Q@SBi%!K/nLji} |i-?>hG}5?=W|ߦL:Օk>{DXr4kNhJM͕FȬu|^=:(X=tD2=[>}Ȧk_!؇A]@B_+O65M;&axJ{@&EekZ&f}ڞj!ߏ 9:x':N헎isKpbF拓jՃxs+k'>rH6H}"=8XڶϿЄ.~۱|-<Q=CD,+~ ^LG:=]zP뷔gaٯa{((!T-\kǞ~9W^^1;ȟ_}iޫ% 3a2. r}GϛWny=C?)z-"cpeޯAK&٠Aa 4mnR~xDcRua9۸qttzΩ9~ٱ֭c X(s ^[nIz)9&w|A{ $x.Z>*(8hަI68 .5œzv:LoO}GaG҆#٦PDܥMOsvF!g_~d)GǢҲzO ~-L\uqvkO|̹Q@L&yA蓼"[l *W 6g [R"32bD/b]@&+W5nTHfԴifN[3^Dihݺ hTIjۤ4o܆= .\hCKAM0 6'^{+ORy +HJT#~9 FWmUׯ*z_AWʚ6#/w4z60z2~]n= { i}틆C_dשN|T: s{-m3'Ι^/o|{>C@Ktbiy^GItO>ׯu-E OW_˄ ꁢy7xGFF}OeI^*TQԯ)vhYGǫV#G*0) m_tnbQǦ}mgZlٱ~)>-ܸ;t;3\ɘ/T޳B_S &`R@JBoFh {5wy繒1|+ESū /1gu+!=_4fdaNPEM>U1z+/x(&e`IӃ\9}0 =eGԃ`=üL~U^Zٴ =X95lRGޑaIt--ܖ֣k{R=&i=X2!ڟDǞ#~_tn_.r/&^Kj=^&r]\ Cnz,cK#Um#6(W|-9Ż7x~5zu`]F~!{o jeS sp#6IE'[%αou}ZFCbS]XZ$oLsY T wDkf)C4hY5%je?zԌ[m6-[ͱţ`bOmNyQ(# %9Z.?-+XuݦMf<5G;jft:idzJUǦ8 <:d lYǛ5j MIlhjzLMo-Bj"7Wf.l =JIGt|ز~m$;C;|p/+cOwaN:ύY.5d^NXOIP^fOҤQըxMvS\?OW2梋.rpO>qp'ZMJTkBtAȯIWVES*x= {xm|-r>|haU}= '隨 /===Q>[׾+߯CHzq( #Hxlm6F1*r<=P>sfهߌ =:Ew[WraSeB3χ)a%%/ח־ɦtϵ|ϟ0sBm/l|T 'cm_یU8_1уuP=# Ћj:.>|h &reگ4=GcDZ9^`+ʅWɨ%)~エ[UcOX |bG^ Y>"f:"]>pvk }b*/{LiK ŷsBZ.Ҝc>{@V)ڛ>v`M`#ش[lR\\loX|[\veTՄ 6Hn{|YA~]/\,8IDATW~8 y0tYK&܇31AB S c xe*?z,^Hf9${=(x*lzi~".?+2fAkwٔ7WG|V8%z6L+ ԃjufY#}=ko^ToO<> 6e4'D 2*ܜjԄ}Y }O$wbe8_5r͢沒>.\haK}>Zl-J) z>F"v^Je63wZ|}ɤl3;NU.G"grX֮uMM]|̯~84xcj|;vW5r'&Ӿ 3}unIS0:V;խH֨h}~| Cg:=Db;a8;Meӯ;r_ˮT%㛇g*hjv=VMݷ_wGe;x[={*XTݾxF<gXdCom>6UA5VRHcn4I(5떎& 2@<7ﳯ͔䜎ǮP:P)ퟚO 놓E'CbL>SW4K 6M :4k5Y>lW-==JsJǗ.7~xW6QAM Ԏ:{=Q-ٲ|͒z~e> m5q`:g" 1Vk[%kZ+a뭩cءܒ",.G|-.`?B}u0[OXC~= +w9!pWAq6jQS?KozO}+O+;+mykyÆNPּB}rC%/xQ n|:sͿ젾zaݖ\){o~?ÆQϗoyد}\6}ek`kgɵ{fъ*% }>w$J`'شۅ&`qڴif޽) |^Sأtnjc̙Sn_9\l`n>=S? DJ2P?=Ζ!V&^eyC}sRɵR>,R,h~.z,C` 6=klנz|^t=MpX}iJ׾ 3?=ڇQ\#C~ߞ.s ZTqu,UK7׸Ўô0_K_|'Teğ:MTEkP."{7t~d6xuyӡB5~+}jҕҧ7aQl:O^0mD4RPҿJvͼxM?ϮTrĚ,W:}_ozN۷-֓тM2pς#BjV.+7wFSQ݃_BN-]0j+^QZ~'Oqa@,]'Lw$!J8}Q2fRmLu EeH?DVͳX>-KxJŒ"K6T;E:c|TRu..6 }:W>m|7;ӨK[xq6$맦NHͳm˖qP̕9ێ+o֭qDžaEVHn֧B)1l3J(^#]ɘJyDm֬woxfGԯHNT_)=8F&OVow_6p܄N:ʕJT=}q K\ӴL¤^DZԏKlm`.&wG|h0-?:UdTK@}C$w?~ijh癣G:}ᯫ~Nitu_<էIϷz ^gc__}k&HuރD1Mz$#F\}Ni<ݫY{Z5Mfz=iknR!>tgKݷ:{Vei_Pjl[kwyj \vU?K8X-h nwev-B?n@EzٳeӤ]o hGq6)di B(h9p-~V&ԧBF:GގI;eK=F\s&Sp=>DQpFM9n*>Wӯky5UIdٱ _:Ŏ,[`褲Ɖ\fEqBx [vlc4):?Tj$C.934BсWrpFi .:]l "({+Ej?vKR 6q:;_ՄikJkYHtKNnnn?$-G}h@hd*u_B=}hiOZ ?n_we`.\D Oڦ_!xr:>,93؇?O0lz0Qr[Z_M5~{ Խ* 6lϗiܸrW䚥1e?R+Vkm,}jyU1޹ :|?=CbSϷqkyд>;SsgHna}A+9]n|^;($мo-iLiiOpCKk~|CYmbmk;$ L6|:vrL6mMU?M8['M6ٽ#;]wѵIk[TO%MfQc_Q׮NMU%)N~̆ ww3/;O~ak~SWR ~ 4tKӡv]Z(O|[6} M_;_V)Z_li#Ncft t ? fsM\ J_O>+ufРA6Qm/^l"gr)I9])lՔQ͡` #K OAKhjGic*ԷH 6 jTClRդS㺑9z?nzzpAoKv(iZZ_h]Iɿ`&?Z ZPU]yNE 9jKmA:~߻쯠ǃΛ-㩬yz-^@:A!^ {k-.l3IWqD YSDTLYmҵ >`wa!me#>X=9׽A!\mHtS<%%[gWoOT)lcε>lϷ۔_xf͹eYJ]Xa7ٔ; CL)l1E4=뾖yz-=kCA;VrKoLx<iIAMk?JO\$gk:~?:/O KV=8o<=Lh 2Mi afzh "Ck^x!z{FcEJ弦s.9to8iӴrN Q=wK~x>fWFk[ssԟِD!((Ҵu_-{cg>ּi^0+16mNO]_dqzW*/޹ q [_nFOG}o˄ZR~w{KrR yAE,\0ڬ'M]&xVrA7t;Ty qSiOr:-vmrZ t*3jر>.}l(v_<vung͚jk>ٖv.<}&m g= /v TI6NSoI{ @Jk̊wSriK.1*>gZ^? qTJm.Q#Q0+ZdAiVw^z2MٳmX!6QP"G."W*uyR8goDK0j(/!0oBvڗO?=1"L2:l:g:M/A\449/mj-dT?+oڭI{ @Jtt`.vJ"~O|0VtTz%l}In*\߫~}Pōp+R?GGS??69MXi5lMږ 42>( TPUg GCl@<]&7aoRPGaȞnn*>A󯹏)VKn*!z|m9}N=>5L!5W`W]xk@G//0~ uۃ)S@Q u Bڲ:Nމ/6%vYb.\Ȧۛ~_~i4jķgnsԟ՛vsP[Hӽ]Dҵ=liި)gmX>5ܹsM޽mYMĭ]֖5̠w6ۇ|73ئ(Ŧ݂/"|5_ӧ@v|5g-^ymTY[MRڊkqƹR}ox&n @y#gPU{fw9'R|>`ZoMKޒ{$)O fӮ]i-2ݺusSe;lWy^A!]+^=wP3e;`O/~3jTP |:\~N5~x?5~PԵOOws@"!NJ :4osMm塀tԌK6jl~<hEo>HdF5VGr{l*@eн&|H5}Pmmc̣_n7w~ݬmZS_`-lh+R=@/W{m3_?Yvfn.Dje:^{tuNFnnڽüKf f n.=73?q9dW7;?$ A|\y B<@}@ >y B<@}@ >y B<@}@ >y 䁂%\jfʙfOk*P)kZ5ng4tju9q[W!U{6n~> @'t962E k*^dy'av:󏺖?gcvHj(D7H SRsL:n Qn {~xͧn H ٻCn@vw]w9@r>/^lڴic ̸qc1X׬YVnVoMȦ[Ww|M䑉'{ǼnNwu'otǏ`\ @.m IC#G2T{0tٰA 7|9,_TTdHAoFeN?8gdɈ#LnlK~ suk>O)bw}WwKΧc3 M% }2 ])>3-rSɅmWARѣٳ܈ɓ'z])bƍTs938()(t\tEvք{xʙ >Z+-ʡOn}]7j(cƌ1_-3p@[V0s饗r<7 pST:jZli)SD ՘4i-dZ;裏.̖($۷oC7@?NyaMJQ³lРAdԩS]zZu+~;RY V:< ~ǺUC/|dfؚ!?7\?w}ѾLT"Q3d6/0 ?W2_~K.ĕ"O^Tt챥5[x WvJ(:_FUPfof߾m.4S?c;Z4qv{v% >QBxU+jZKsiߘ";_9rC'qڴi]ʚYM~bυFpEz=ouGۦRF":.?jve/N,[M5RjQ|3o(KP}sX1ߙū?jỳi~Z7׎6ha >Q"XA’lX~}tA?2MFرcm+/}ti VXJ.*|SFKq(TRk<+:oMO& ׂ[t\J+6l˶c9ƕ"U;[ 6.w%Z{3mQM 3gΌ`]$\2L_U|r裏f9ڵkСCM>}̒%Kl8k r,'~iZrW^)S;j„ ej(0_=@蜝x≶m{~_~esr}5馛̝wiˢCA?~m碋.2;v46l0Nz饗iR$)O_SF/Q'=ŕ9RhcS@BǤmtÇRzy;wv)򁏮?lat.Db:cg͚e{Zw.;/؜e\ ( }{& ۇDABSNpL̖{([ҵ۱BY\S"woa/9Y ;uqӆ-oO}W 9~iX:6GΉ?6e0?έ n h?f$ѴkԼa+;yPPۏ]OL>QHHUơ քуT顺js$bRqE`1+PHKizĈHy],BxAB,Z$k+^Aj Gti[jC@0l뮻zSͦx4?_UlPMv+އ gvAbkeh{Ϣa1]") RԸ=V( TF7ijx^OyώOڕ+Sj7nn8ǾW۹m҅v:N- dKվGǫs<:o_Lھ+ZvpRh_oOU#՟O>/m9+|C> $^$eA~ߗD5)(Q]`.՜`pAQc aT(^XpꩧRـ"][r-XkLJ7NJ\z饮>~SW3b?(P!u-|o,$h?ji4ٲj.վkuwsľ/DvDԵ͡vc7sdҾ8}`aA(W{G# TWi>5)tY.uk޼+mIG}d>Ar+:dMv  ˗vl|_}WJSHf"Z۷֭C 弯4R3[{^ n=X㣲)\;[D5^< jgZ&Ҿ~M"^KL!UXF0)p4Ȏ~5_sP3$Z!m8;hY: _5zp+ܜaQ"b>n}bkRdɼާN BIɷlK'Rͣ#G"p*KNloݒ gq`9WW9,Yb+~Mߏʣ)x`^Mà~^ D58TKǿGCːAd7_*#I˄W!]3bǪ] :G°Ry>}~;ٱCTQЊ cB;_GUKwT  WkKo߼h[5յ=H&'ݚ?-[J_ b!hZS֬D5sdjhTi~ۯ轧9L(eZdvW+X:y m۹ŎpvkCiK~|7eP-F-\)*bˎѦ'QjrJ+I&EeR͠`5eJi?Mf dQ#FiӦͤO)<-X*QnŽ%ސO0MORb-[tvXx :ݣ|y"cFIC&X:ԯYҨ~;ּzC&~&ty4_(S|Jb÷Aǘ V$v3oLP˿mq6MspFG}ityBʡO&5)*[ǎMeDX6{+S5`;]Ղxy;66tдF= R=\)uk"Tl\rE#@ī 8zfCN"ᦚ<; 4JSeaTkG6l[k6OH[dvp}_o׬-Rp6҂{"HHw{z)>֤J*ΗL"&?'N`ngu+uQG1+FV$ @go:OoTty,ZG0  $WA^ٸO<ѕ:u]..߼Yӆ'_GMw5Iءi|3aL$PBohn,G( kL}ym-Y;ώp(<ɖOyώLB+Yk;lz]SV5lbϧ?o/PSzc5L9[V RJO5)*"vI… DYd+Uj6*4M)49m9Jvӧ+%7Gp\Qtj)ǻRJqq+&˖D}T$B`[ ~7Ut<>|W?zZg݌pwAQ~jUs"0AaKXI 8͟$'HMs}>g\͸k 2?G\CB_~hǢZZ:z}@3<_'(::\:gFI>;mor' !Yu4HMliӦ Ν7n83vlC+ҕ?Y)"Qh?PEj*X^6lXN+G2W"r>!p/F ZW2fԨQOt=Msu׹R}YtM T '?fn<ކ3{^`[C-SM_m֧Ol gaIv{v?ssvH ޫN7|['}M֤j=_OnRjLL?G~7:WS7+7c$;|pxonMǠcǥkPN!z(pfMxX{~pBSmnJ5O> 8ЖI>K" )|AT+g-tM;tSS$%OaǢwS9SmW^+xSձ_z5%ڧ G=z(/ wQ3hРrӿ;-~]N@%;aiݢqX27]vuS)T g/=vM0!nWug)o:29?3o->Q^J5.u- +\3ָ1. \"khlLB(f{ӢLRtϝtvT夃EAzTcGL[BܚE ht:-dkJM?G[;^3y)ks+aYA!h;\laOAK1Yc ոR'>dK J[r? $ }\uEB=`׃T3&Az*jkRpGG=0TYs1zrS)(vTkQs9TK)u - O=? ⊤GM%Q%:礲-^yҚR :TEnta&+T|}?7ėy`'͛G;Oj(PּG}Ԇ>+va՜Q9i_USd֬Y3ΰmOuP  >l.2۽\)1ms̙] պ ,psߕJu!yK$I;cӦMA$$kB.Ʉ]3f1g}+UO~/TeǪei"T_73sKZ KOe1 j`NMĐ{CFȔ>~⡦Otk,m.H$iM*_Fpj53tӮY7vJvk>7jmNq#Au饗ul͟|7uTW2kkF_9u ֎Qh #;NdꮠL]P٪W $Gn/^rOQQB9gnG+2_/6[VWګMLӆ--:gHd@O<@}@ >y B<@}@ >y B<@}@ >y B<@}@ {K2v5e& 71K1+qul\tlT`z6kNٷ)*Z/p][UK̷[WWԵjδ,kܯopi\{5vodNdMl̟o-YbX^kرiZriSTdj^߹vf^1ti`~۳i0֝f-zٻNA]sl"StpӼQk77v_ozꡇ]\b 3_rߚۻ*%&~ӌ?Z+0\ԹSqxmZzMٽ.7t9Ɏo'N4 F u[4o_t j5u6stWƶPTggO)'9@Ls@=ݳ; J5@ը@Dr[Tf|PԔ>2Gʦ]sUt#O ~tJt#AUS3c$7>5F盿MFnwρS7҇OkTǘ/0Λ}Ej^7iި3UNFߗ_ۻ9Pҭo'ssmn*5?]w?}MNͺ*ٶ|yn T&jٴkiS2BW`SR}Vs;ٻ^w>rmݛ6wZ6{wrsQP9vrjPɒ9r)((H{7l'k֬1ˆ nŋo}e/**zOnڴiSf'wǨq:hAQV_LOǧeѤow/j6ݓ馒[;i݇k^|Mʒ44D)..v>,ZM%]ݻw7G6gvs#&Ol0&YnJnٲeTc:*xږc)ѣcǎ5{|,[L>}OwHKIݜ̾~Mܜ!={c۷oܰBA¤L.X',v>(UN41*f |jiÔ;S5~~hٲ# ظ-6 |#{φ:ǣGv7Uرr+ʒaZkfn*lXoOj>쬍$VK.qHӫW/7={+3{l3qD7U;蜪$[nq%$]Ա}>mMA%|#e5ضyONpM?xDcTbld.}ʚ ߫9>OǬאS6efԬobM;-jh1CXӾN2?{}a~РciñeMM}[<5SGeۧvZc#;~͉23vtcEu}Msy^m{>/O=m֬Q.W_/=z]]<͛7O"]oTּD1h}> _uzM6a5jйe&'x˼_ *$vܸq6ݻ/M =~ϷۮM Jƌ;֌9?5B37`۷-ꚉ}'Zߤ㚘`HӰf^;} l cү5/S5fԋOǬc'IN.mziUAMieߙ0`sɏemhi髿_O:h;<ݹ{>{v?Ӵj.z*k=a'SnW*yVv7bv_flX$ tuꗜCcߧ%Ǩyn>t췟+EBC'N4M8͉4Y3C"<`ÎMMNKCqHtyv=Z֫ӤV?r-eާuկܜ4|}kb8rQǚ.?a2=?+,~W3Yj.^xnʘ'|2}pw];Ja#}16Buqf߿6nOC4G+޲SuоmhuN}sоG N:x=ڧOʼoӵA+Ÿ?3kWaF^iq6 A ҒgNl_S ?l%֭aްaf; +4_sN8O t6ڳeK~gv^v[4jgD*r~<2Ҳs]W2}Fi֭[gC5 T,\04hAjeEZ?mڴ }OE$ E*I& ~tGeڵkgTM<پ7% -\lsw["(QÜTM[4}q2sKyCn^ӆLZh|[mY~{pH2 6(qoo*?ZneLI7aÅmiO̗kr&q޹k#nN$Yv-+lfm#8ގg.\n]*_F nm=P)et?uNu-+9|A~>i lhP+7bҥ㎳;,;Afo~cW]ewȧgm_(8IuA CӦiߌNT6)h͵-rxrT\h9m2zZ D!?l˩RX4>-]Jς lALJ*B4l ~f͚e&LP;QS ZE";6L6uSUT>, <4#S h6z`WJB_:ņZoW2L% Vm\njvG4)>FtTJz{#O<=ֿǫYPF Ӣ_?;V(XV5f-&Pkˇ_Ti_F&i>M3|x鯶y䑜4ּysW*Kۺ?~z4neS>-(Rdtw-U_f5fjfnȽ95T&/NjqZj괩ir.5'ǷWfs~fn<>:Pj|RY5J IYkߞ rPPlmٱѕy_x{ qoqmY\)M]k}eZ<Ł>r5]*CΝnʵLOО8HJ;ͺr)6H4I$Q&ߦgЉrPPdڵnɪ/QlAvɳ7>ou-74Omqۦ!d[:|pj^WB A6\Cͧ+Jp?睻Kk{SA H5~pc6.$ޘ!K^#|W+T[Gt;ٲ 1W=?vJ鰂ѣGH-ß~z!Wy9WJŸ3g#}UP>oNhrGAŸt50W_n6{)x8:dj&ㅕ;CؐnE͈wDMMm҅A!I6|]z_oP_=ƿ;3!8UFM)1z|wޡsvbh_-zW;GRy JvZJU+jBkƕ"5au榪jC)SR͓Isyzϳ_6ܹs?W}մ{eCa.g=w7g5L9yT35^ ud`A|7ʼm,]j̋\QEj8GO^>hv\t-|3v / 0Zvg˖͸Ik\XhaZJ&ڏNvo-7H_mP;ٳg۲ScoYKŋO+WPlqcx_>|9> ie!$ZMyX#76sm}>ه-#a2Ӽa+W2߼JM^lA? `tdjNEXGnƒITR斯ܕZq+U-Z6ng :f.]L㏷Dijȵovv]aڝuoc;| S8H_-[fa۬O7UFB }j[RW-}JOJB`saO>+U_ O*bd3JL,~5Q hr2~.괾I ^~e[@R͎ z+ETj7Ώz ^K#_$ rIH߭avBRgү)C/ &eMk|0¨_ƹ[QȎ܅hIA\CPF=uk]<v8 vjki>t6J$@9/ǵ4ĬX)hDcûڱ=4Gϛ *Zۧ]w_㏛ˇMi/ j ڷw%PYRz=rh͙#FfD`Ȓ$gq+3vؤMYxnʘ /,4U'NtH&P^F Z@;}}拓" \iـޅOҎ4>hiR=]%-ӦAAx9E]~O7b(پKŴ5vǨ}Ҵ1aPei0VGU ( x86\3y׍KF}(LP~jۏNT#0};hw 3 yAt~+0QŴqz;O;_ctGh|ǹٷx'\jG`Mɵ'>T)O-])\>f̰(ӟ5~4=ں^˨ii7?nZw[nph5_竇*Ӥ(ٺ`-+d}O{Y߄+t<,h_Wxו=&lm6OEOU߮jW> }ɓmYAn˕M66xJ7Ά,ޕMe;)ff 7ՠ***jzkذav9]x #np%*lVn6/iѠ= _}} N4}˄7z{[̌umآ)ѴX4tehfo1 峵xGI=|=Ud?RO{ ¨ h6haÅ-;6yK&>52_iEW9{n7T$~i}t|c4)/Ѳwfg=Sfݥ!iTm۹ŕ"5ߞ׉gG@L[++ڥqo&%>:8|jD>>$7Ԯ 5b)s 6ThDekZ&dּB?yZ9;V㠏O>{h{Ǣb)\2Zֿ;ov*=o5ϗ~6lJez~R=v~**#(`o WUXE"M(8m9-[.MEBڲ3iRiP9}5}\[gtM;tSSEF1cdoc_!M΋0MϤ~yDO0x;c\Dۚ5k*&TCJ5ą(Je>0v-rS};qTv4՜dz1%)P>%]UW$QM;HXGM0؉GoiDZL_~JlС]WR5mڴjۜ_kyeٵfSsu&={vn&j%59H!I ULFJPͣZ*!fΜ96Q@n{Gmm5;wvHMP[ MRyԴu-/՘ұꨶS֥M:G:Wտr.*ɫҪ~A/NS |qfo.#;~ߖ~hDrA}HԬ@u>T5}|r^ՍxjTǘ~tmZ#3\bP=(v/M QEҲqD5^Ɠ3mfʕ=b7btigQ׮n,? H)$4J+Z{֔"wͶO[W Z4ܽ6cs{C/L;ۛwS |"Av[C_FrBͧJsđk#fڢPy=|LG]ffUWzM]d~ 7T6jV])t>yw恏ue)gh_9?Ss >=5}Yٶ@>|ԤuVaƒI湹]c ԇt\7';_l=۶9@v5PU}gyw]|*j{߇O*6n[g^q3{fn.]};hN=3';V4_xYf]n.Pq N>T#>@ ?/i&ߴǬ޾Cоaiװ)lZǜ_=So=ӱq[Tʙf̷M;< ]64u0?ܯiո{5yEnde|sj;HEMvLBM"ӰcG*}@#>y B<@}@ >y B<@}@ >y B<@}@ >y B<@}@ >y B<@}@ >y B<@}@ >y B<@}@ϰaLAA9ݜ֬Yc_rn.@xv(wa(ow9rd41b28q;v-:<# .>}9mo߾e|Wŋ͙gi˭[6>i۶G!~kӦ)((GmfTܹsDƍ]VK{KrRTTd&Ol:4~|W^xo.Ls=Wf=fѢE )8Ҿ$[ -bCQ>o9tM6Qxzj3ai%\bG赗_~ٖcf<@.E裦|裏QnG1cƘ;tN98^ o&Nhr1/+5uTW2s%ɋgԨQv9{-'3رm9րllǎEP߾}m9/c-@6E=n:sWr"SLc9ƎQ=~z;|o^lobȕ&Wʞ$LIQQH9/5䂂{Msg c( ۿaҤI.BW2fԩviXN>dWȽ }.6>nf[G6/-{gu+s-R;2vX;=}t;-i*,,,Z۷-k=&@eGbMyߢe8p)((Cvj Nra.WPPjlӪU+;5m¨ٳܹsmYThҤIf„ 6R OeBcƌ1s̱R<{nj`o W/dZMMFZp ;|ͻUR#MTB 7n+sy@y jӦYnM@ 9 jTĉm#^zTB xusϵe@nZs>y B<@}@ >y B<@}@ >y B<@}@ {Kr;Nmƺ楕;͊{m{5^?1]1}[5C3sV@ Pv|<|ffF!Ш ]P:J3x-=0M'l\-jw@7;ͻ.Aj _)56`fj .yU/1,7n}\TTNۇ}r[6z*'O 6̆'2w\\6mcȑ6ؙ>}Q\\JwyVlcr B<~2x6nh֭cNAرcɓͻ"/xԍYBB[siĉnNye4C9Ĵnږ;wlUMǭcyj3Ҳr^]n @}W-Zd˖-0/.3T۶mڵkmɚѫ,yJ/J5wP3wHK|&ng2E 'JlAu \ >P"}/lAͦ4/^M5k֘{{Tfܹn5Ui&>ڮ[eQFEߣDҴ2>8֥sDߖm7)Kx~ۊ.moK֧dǒ*u_]WѢlWm$q)\9رc)b5y}-(Q@AMiy-Ϛ޽{ܤO~su=ٳguk~z;c냢u֙O>ĖE( yԾ{*k^XC~vOe}*T[&h-2kEٲ{YUMN4+quUB"}ג%Kl0m4pB3i$;(Qan6 ,Ϛ5̙3dž 7ntK&1nKѾ)ƌc55QihЫW/;?H!SmO0߽"dCǢhs! *ڷ\vev:n/xlzSS֭1s裏e_۪ٳ`mD5o6ܸbM=x)x4}qCWGd'6a;VG<<  Dt\z饶رk(h۶- aJھ}MM.)Ѿhj,|$+pǢ$/68˵oю1)緲 P1>Ȫ-[ڱjӇOu*XK&]~?Zgz|p œxv\.5T&拓GlӠfنP߾K3wPgDuly~\ݭAI8}q~y_:4h0~^{: ͰC7\f["J v׭ZMwu%Sx׶P0&fx6Xyk< ,rIHh| ͦk %Q)-Ǐlm[cKۜwb3}ꕬDou~NДxr}ѶQF٦~i(PpGԌ_XXVZB=z}ˌ=̶}M|\Z?Zs +wS7^ ^` L?nӢi&;O}hӺ2z-̱-eΧ4ޙSa?mK:<Sk)S:F[hMsHLԶgm}}S0}Y5L.z!A\?OٳMK/dV^!4fTy ~_0a}??  dTRm`$LPͨK`I`d ;oҲex֬YIۛ3g x|ψ#8~+ / '娲o6ulYxLM%k )$ߦ}]™}0oKO;GILl /4eWкd8oϷ J},:ӴOꇚ>r͒Ggqᆂub펽V ٰ$(KMPNj<|QzA-fښ]nNy'6 }g[s9E<:wocS?lZr|_Zse YDa7S0M;vKD͘Mlr[mnzM Odlu~9MIӚ-{=7#!Fwۣ}vWnGׯsGA1eS 5}5l3/\ҥywTe5}9jH>$G 'HE} LD@ΌЕ-lhW }ș]%_,ܼµoX`ܢJB _j9S'll@>ӄZ>@ @No'l⦀R |T#"*@ T?(n0WcsynOf]dz>ԇt5|;|ARmc̣_n7w~ݬJmӪ~d.}w@U]ٯv9Żmd*E&uLw˺fH}UVE552A}J@x5JTJd$0GJLN9DL7@>/1|""a&nêbdQꪗ}ߦ9ܺK݅9@$@$@QAγlyp{O5mKo^+ujF&'77g6:thqi|k9$julY3LZ/<:sy6 ; Pm>ھ<f0_ f?_mr}ć[ޣsVw|%>y饗L.]LQQ ~IK[|h{+5kܯ@v~ܽuWc:ޱ֕}on[n1cMii)++3sεޔ)S=m۶Yzo%%%}n ; K]Dw[tMiӦܽ{w;GzK,semڴ;! BT PyGN=TWB>6lJ@J@w%:.BK\tRSTTd'+H{g̴]޽SM/7o6&L0]tIפI3h 3o),G T^(pرc߅ ǿa}{,{cY?G>5Q'y'yTRŲ(ܸզQf?ʽλ!6EïqgW:;Q O4ײミ'nnSZvY0xmH{θ^K]&ϟ#n5KB;2;ysMSKL1_E-Z̚5ˬ\ 8ЮWu:w-؏ }QSZZjϧ>F׿eOo w̙Ω:E?+ u6l=S!_ˇB CGt.S6+ ҵQUUiӦ.Z{9{/t\UwwΝnrY7SmYu3˕Z'; 6zy˜1c Y]G>ѐL-V=ʶh:E yr83v;;&q~SoZh4/d`:x+v#j Q"ny۞mG}w-@E]o?(\8 T)Iגhd… ]ɘЄC?+h#Nvu 7RvN (Ċ%DRÆ \[d⟥ڵ-%݂ R 2ΫZ \aPn:'ݻڳgO;_d'/˻ TwnZUP|iz;o|ivZ4 9򃮣<u'oΣVG @ES`j[nW^)wmGZȺu\|Јj߾+qF;ßVZRau[0E]tET=(x9 ;ҵRቛ. )-|rwO©hQ 8A7|ӕ{1Er)lQQfcjßhi ]ɘQN_o Eva|+e͜ ϝZ%e_d~Nk+OZ#PwW@}@ׇvV. n|XQ B$J |sd 2]ѠE`Dϱo߾8m@.n-e| \\xHzFiϛi&uv *8J'pa]F]%U6j, m8ZM'p{7iĆZNxVj?S ˗$]z-It-G#fڴi6sVeNB!SxqTGs纭fΜipJѣ7e*. 4Q8uԍ‘aaϽ3Nѱrky8 OK̓ U8g&yI;>-_rK'U8vtҽPsD+4/L-1o!⻐%\eŊTIiÆ TXana(x(k׺RCz~?˸*㬳βs7s;ﴡRq2ӧm٤HƎkҡá&8# A=,^W޽9wsٸPWZcC{z.vJMwg>j٤Grw?so!ض|~sq۹>ބL){C-8|p#ztcȯ~+;%'xC:V1`*KIs-B_~v^U| t>[M\w|Y* MK?Tou!2 ጷcWM\7{ݺus%c̙J.ȨV3Õr~?\kg1= pSnrM.l-;RlG>ηCPq]mKm9 @E[VԤ4 vԲ‡!W_}uG-ƍ+/!C Hu1Z[E1b9˟S Jsj1PRbZ}ڀG<k~kt׫PEݦEeig-Sמ ۏ%&O9sQX_sn" MѐzQFٲN׬=jRᄁܙ ~ԊeUSmWfqhE>$(Qjҭod!7:^'ٹ Ͽ=B}Ӻi չuӦ1Fji8Zayƍij}?L}eClvTC>K\|5Jغ&}h^E݁)luV8>#R#>(x?C4 `h|?>hljR P,Xace;n.]Ωõ}]eg?zO==ϝ뿉 a4ΐnj)=ި;w1~fyύ7ޘؖyg7fˮMnpFh8>Mռ[Q+iof.<} iDЮ;m9/>CE(]}+CVhcʕ65DuS}@ |Da>a NkY~|ǯxF:t|'nE!HF\z ׫]wuu!ꧠC׳T] ,Hci?B!L@&|/{3WuLH>Ut_JJJ*>ѵSߑ>TX !~1El A ey׍Qć9nyώ{>Ec}hM T_>hhUָBe=;mϡk9hY7l[e ":kYV4>:յЁ0PytKG݄c{iiiuGTeZǛL6z-uo3𢟺AK$cVX*]ҢE WuAKQCK] @}ahGvC-QXnҦ4 Wb=B _t1|p3i$thu׸'g&80^?u7~`zw[>t 3Ϙ3f`w& g 7(owSFנ^c+ԩ}[S}}> jq "15V/ )@Ail/ᖀC_NÏZ$ |VO*jaS\{e_8tڨ'ov94VO熋GԈ>@RV5`4n-b]T n9$@~w^7m_crٲkI ۘ-.5va {< )}5 )j9a> @> @> @Eer";ϲmf̙fy t`+uEu[2Z6 :w6M4ZswIAZg6Mj=ٽj[B3G4-PM9$jyIyk {`sF@(1ì< ;Q{q4wIA,-)薪> J]ݽ;-|Zt|գ> l>}{AV'je\ijիT )j9TMS '_n`6>[PP0 ۵#AN%ުP;'A^~kn @}Xt4h/5uŬYo?a5\?2s+ltW2(ѣ||\xٸq[m6WvJTy\ ˖|O0dʔ)f֭m۶fvӾ}{E׏|7Cn1,Yb4mڴ;Mr׏A*' x~{}\ @}'ФISTTd^~e&L`S4hYt5k֘ɓ'޽{פgym綾^2]tI+W}.>Nk;]תR_SNjڼys^CtfltNjAˣGpn];o}v]իZcOnzqG~;w6C˕W^im믿ކq,^ح1u~[S.n&Mp}*k~ mf3f̰å[e˖r&~8~Uآe{ou~=N:h{=?=Kh5=#v].^ܵiiܹq6߿۲+fͲӪU:1}0) Ph Æ !?>v(\mWRRf̙EYu6mܯU瞳Baʕ+]hsNe[nntOоs3D-|裏 [X6l`{{k2f;L{k<'yuu/7O+ۢX- 7Asαӂ֭c4jdFصˬO3ϴۿ٬4uFZB-pȎM7i^/*hD9|ijβ/huչN6 ٹ;>]~ulmtn]?-~Ч;4mԮ޽{*8F6mp{O+VR'ڹB[KPTqٹΩcǎvYcp롇rNꬺK1 Ň @>נEhh^_n񶬀oNAճѽPyH>G<Z) EŎ뮻ߔ)S<|d=of{֔;{߳۲m*FΎrE}fՏlCE)Yv5yMCk=k =j+^wkI_L>%.Ї|KwfpnNs*hٳk׮3\`A!CyUkР+ -ZD%gϞvd;O6ٹszEr|⃘].Zd92M>-`7zj#s8OM~} q|`vZ;;-u4]o۷w&j 碋.r1` FaQؑ+b . ICj[BᔺS]yӍѤ[u[6G'曮T>Pc-K{ԪEm"MucS/m]"g9S7s['q-a–a m6;[x>ky'<ҍSY L2Sm~yUgv>n)7qc ]fQi.*Ï$SLqCTou\x;vpu׭[7W2fNT>QeqfNw-rs"W:๵\ԥ\].4~Ԫ_[g|Chχ/]pK")}ט,ˇ6 B4V( [0CzA Έ#\tl.u2SYsj1[&W^}ڀ/U2j5^ƯIw Ug!*(lMV2>RKNשkυRWk'O(9x`ThHO۹N]N׬=EuRᄁLbdzەY*A|(# Eԭkˡ+:t4;x9a=4|aOK/P_+]7mZxZ->yq;y4y>@ST;2(**3"f}l}<8)ݻweOjh`|+պ뮻R6B~0hz}+m^LukXAq(JGu~W릮5zuN++} -rPѿ. ϝ =K! wB0gHT7RWto8?-Jp1~fyύ7ޘؖyg,͞#/W #4O/M| Q+w[cQoZ\0g iDо-[l9/:N1^^{pE!JؚC-T^QLיNW@Ut_JJJ*>ѵSߑ>A!G'(/ydž MݴEsv̟oǽ Q1> A&./G4N*k\gi)޽9t-:g&M[v>Ԯ_|uֲh#Z}uka\!˖> o [rsƿ9wKՃsH Pw% wM5TP0, ȤNFҏ?6skP0gJ@vFHZ(}̢U _G%> Ns9GOhBհ{wAR5ſiһ[ }\iծmMdZ V8l7ϼ{f'58^i 5>Ձ={G5x|UZxQQ#js~VcQQok%%6ڳvIQukSY3sJN5טN {< )}5 )j9a> @> @> @> @> @> @> @> @> @> @> @> @> @> PTv+***r%+˟Cƣ@dm> @> @> @> @> @p37v܏&SIENDB`docker-1.10.3/docs/userguide/storagedriver/images/image-layers.jpg000066400000000000000000000637471267010174400252200ustar00rootroot00000000000000JFIF//C     C    RaMj ,=/Ia~78OKƸ:sltyVGrZݾC$dERZ>_Jj-eimw7O =Lo-{1N ,O.}6MNQC'g.:;?]<#wt~xY!xY!Zo=U π,{π Z/ yJlQC&ِҳ/dk\~k[&$FdKVV4T2q~bJS1V ~H~|=d@H|`< 8H{ϟW 6vFٚ[$=;:hT6EfSy0 S [~1lkt|ߺ3]o=m)Ս[P>v޳;+SZ&j|=g3XG>ma2t4M@ |=`'砀'89]oWeSX9Ն6Ɨegz5*%roWdp3z  Ls3o<kEmg6} gC>poySJkl^ zpo@H <xQoPտppo< Lմ鞹MuǞMbb=`6:oOG>o k65\<XrqET_4|p2hϦd_S^@+ڪ4|su>I>Mw7T7>}3&]\%7.blu-o}3&әK-O٢ A&GR,yi}SYjxK-O>e`s^\u8_P@}}CI4ic1TkT˯_P[15J7aj6Swzn|>b{`w8_P@}}oEIT8_P@}}C.aZגń73ljثkiﯨp-ا1jl(aZ`{[_>}'}}CYp\IPf9bﯨp ;;4 8#9@P37 !"26`׺J,bts4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s4s9=EDvr=ƞ<\\3~TNt7 ~߫x7 ~߫x7 ~߫x7 ~߫x7 ~߫x7 ~߫x7 ~߫x7 ~߫x7 ~߫x7 ~߫x7 ~߫x7 ~߫x7 ~ّxOCW'烉p'~@h@\3~9CIߵ0&9nx8 ߎp~/[pT3+T6\\RѪX?R0rK E2+m뫬`[UO*Yoăj]-{z8vi^8h%V9Z[ Eϗ)Ɇ ߵ0&9nx8 ߎp~/TwPqZegtNi'jGžD2I⹉]P3cfVpNZ,m-LTM)i)wvΔMNiM9N_U|5T]%>E4 ~?oU[yH T%"5|Z٩1jȧ5GwD ?[${7?娚 ]Ξ|᫮X7sV"'HxմM([ou ϻ*L|qD]KF4Iԋq{pdVֿT!"ۍ+}rT-SRWFM9N~{P=P߼&, Hg(Ϊ*ڈM<H o8?~;G-=N4sĀpfsn">JH{~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j;~j"nͧI|dSsĀpfK/KG=QTz94|揜>sGh94|TzG=QTzG=QTzG=QTzG=QꅯY@\3~9N*SyZj+Ƭj+Ƭj+Ƭj+Ƭj+Ƭj+Ƭj+Ƭj+Ƭj+Ƭj+Ƭj+Ƭj+Ƭj+JU_iE3knx8 ߎi^} L9nx8 ߎi^D1pxq?0@\3~9x\m@Z*΄ȼ̛~?n  v`eq=R5'SZ#ᷔ%#5lF5 ^U9Marm|)>D7V^ZT M2Zd4EYmF [;n:ߌTħI[L=+vDUD{5r춽=-+sX7qJD\6]9LT\ڥxu4ʹMx)CID*i%hlXRQojHINJUM"S[,D -=+o$۪XӸ*^)(.%BrӦ,B&C 25!ۃ!LLn*s%+d'k'I3nY=ǘyh]Zjx3\.&iiN ^Gj}%j|MJn n$?Iw5(IWEYXg>kla| +H? ^CkDU1/hmSjF."UOo+ʴTchS&V[|9=krNKNrz󣇀 ĞV1.JIYYk2H!˘Ij:bTl3OiBC߅xF9nx8 ߋ[oH GOm (_xmuo"P0Չ2¤Fls&6Z+yq6N[0,R*j.8~ ۟H_sy_֦zS=B֢"TC\T<uSJDkI-rs?fx ;-V208oѴi+KY=2#4TbWMPȿ[/ݪ;MަѥU]iw+J]JY׀c烉p ڢDBX$>f3Laf1:ij FK)Y] *5Lbj=+N(oC_4c)rCFXoPIUko;4RIϧ(?O͊" uyLFu'&јM.(QƒGQC2}%Tsi)E<$Pp׀c烉p u-=~+~g^[${7k!{b*0¬ *0¬ *0¬ *0¬ *0¬ *0¬ *0¬ *0¬ *0¬ *0¬ *0¬ *0¬ *0¬ D4ܷ<H oÚ4h [#-ze2ށo@F[#-ze2ށo@F[#-ze2ށo@F[#-ze2ށo@F[#-ze2ށo@F[#-ze2ށo@F[#-z8N3烉p u-=~+~g^[${76Vb#KA=[;rːW5;ՙ)V6#Q/FomƯ3F2f* KVV?+x51IT(eѨ}:5^d!^|:9S{b-dДbJȽdoue$ݨ z &@c +Q,Ȋ*KIꔑM g^[${76VQ4EѩyTʕKB\YZzma>můT7LKPv^~dQ'9%͝ѩhP*mNR1h̨DhrIQ@hܡn%NH::~olTY-s ΔگVv|$c-CJB^B"KXPe2Tg*˺\}+Y+q7l2 Stz8220QFrD $S}2k9SK(uDR´>傆JT} a|>Ayxr:"ĉjut7j,~un)>.L^(*VkWjμ#<H olxF9nx8 ߋ[[:rq .q-}HZ؋6" Cb.؋6" Cb.؋6" Cb.؋6" Cb.؋6" Cb.؋6" Cb.؋6" Cb.؋6" Cb.؋6" Cb.؋6" Cb.؋6" Cb.؋6" Cb.؋6" Cb.]kueE^[${7Me0J&JL)Ҥ;O.?9QK>kb9nx8 $5(ʹTP9tG=h۩?6-#ӕ5Os lcj895 v$M7+lw9tGٟi2t2̒U=&ڥkjW/v^dSzR…KMv<za5Y@\38q-—LV[S3)͵W c*VK6g-_s-#s3m^ODu-r4H[ #gkmD\-ġ+EwɊɪY{1˷'e3d/& 9Ao/.Zuujak_sĀ0ÓdynGdynGdynGdynGdynGdynGdynGdynGdynGdynGdynGdynGdynG*9 zT)㥲$5SdC? C: C: C: C: C: C: C: C: C: C: C: C: C: C: C: C: C: C: C: C: C: C: C: C: C: C: C!-2UV'<߸W/+)9 Rm-mdĔmo;joYl-eƽtu#Y])/C2J1 MBŜP+vL<z[%mi%uP%9./Uȵ7M}X m^>K%R !"1ATU2Qaq#@BPR $3brC`s%4c ?bhPj frD^ӽ ڊ^WBz;нiޅE{N/j+w{Q^ӽ ڊ^WBz;нiޅE{N/j+w{Q^ӽ ڊ^WBz;нiޅE{N/j+w{Q^ӽ ڊ^WBz;нiޅE{N/j+w{Q^ӽ ڊ^WBz;нiޅE{N/j+w{Q^ӽ ڊ^WBz;нiޅE{N/j+w{Q^ӽ ڊ^WBz;нDΦLm! F9oX틄lB8i& ̠$ N7syQVʽ'[*l~UIVʽ'[*l~UIVʽ'[*l~UIVʽ'[*l~UIVʽ'[*l~UIVʽ'[*l~UIVʽ'[*l~UIVʽ'[*l~UՊUVʿV*W[*Xe_lb~UՊUVʿV*W[*Xe_lb~UՊUVʿV*W[)yOՁST@1>!f#QJ1>!f#QJ1>!f#QJda3z"b\7C!s'ɒX127 7Uq5 3KN6Gdw%aŏhd`rs.>AFɪXɜQe3W 5E25H1pn%irVxT<Lj=#K=:bVd2Uܘ ~cIM߇-b⽯Ăg.滋&5,J>HM%G$Dx?fؠ&&v81JΒd_N`+rdp 蔶_[eӥO>ž}4CLU5Dg%S=MQ*jTESTJz*U3TLU5Dg%S=MQ*jTESTJz*U3TLU5Dg%S=MQ*jTESTJz*U3TLU5Dg%S=MQ*jTESTJz*U3TLU5Dg%S=MQ*jTESTJz*U3TLU5Dg%S=MQ*jTESTJz*U3TLU5Dg%S=MQ*v*,bKSڎ [4Ic)댧2z)댧2z)댧2z)댧2z)댧2z)댧2z)댧2z)댧2FR2e#)HFR2SO\e=qSO\e=qR82-{$UELb[J6pC|>x|<>{Ϟ=y|>x|>|>|>|>|>|>|>|>|>|>9QiF2ʍ-8pOh٧H }UpOh٧H }7؆mҸ8i8C_|_m*^I9m9GM?5QͻujU&UEW-ǵ13#k5A'pP01;.h*`bIىSM @SO(ɜ*v)bSb=1بRAcA6(ųOdj9 5꩔.bX<0YY&d'DHrj%]d2$3 4̣")% nLIZMjlr0EG P_g-ǹ<v2!L9m~!GG-{$^!GG-{$^!GG-{$^!GG-{$M ][CXnHI9qHI9qHI9qHI9qHI9qHI9qHI9qHI9qHI9qHI9qHI9qHI9qHI9qHI9qHI9qHI9BIE!$梐sQHI9$RNj) '5BIE!$梐sC) '4:␒sC) '4:␒sC) '4:␒sC) '4:␒sC) '4:""ݛ5Mb8'4ql$RIMl̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̆̂"ܹeI c{4pOh٧HR8 G=GW8i8/*MTl\GJJ 8@m%*I-beߘ ]HHfM 0F12A(6"py-?Yur>[CiKs3f&3M\X9Ķ#9{:rޛb\2%@LqtRقe2m% r)D [8Ct"dUZ+n['O>{bS0+V8MWU u!s@@L>Ts's(w'l*g)>1`0(M/@HgN|@D7SG{7'k1DɀL'-yvՄ\Yӆ_~\a fx9*wg-2:(㊡ƕnKc-E%1b}Ҁ ;a"f('>"IsG⩰\1 C$Q%:L)q33RT.lg5of*@+M(bۑQL6" zj@PSMWnu&o-#4ql$p?_7OT<Օ-}% ~EBw9=:z7& q c~\%Oē͒A< ❝1MpMP2a1r*-/++YjCcL3Ģfd&PR2d"쀄#$WL<78blhEMK6@}L.k"Sd#06x};H| NvQcmpM\"^I3U Ȧ]YMTvڭRZNڈWX/™EYD M1Ű`:peѫp! ,LųOd!~.[@CL=l #l5LC9&"+8>2ŊLKw svIH0~+ src="+aUULpcF9s]q^.y9KB"7na),erT"GoBR3PHky&{^.saxSx\s~!ӖP,cP5c&?Õ ^caUR2Kr 6=pbܿpyʃr0sCrb\]#;` )ųOd!,HjM S\6;2r9ACt1.Id(u& B9䉌O-R" H!LaB1 +LIIHa h&aa/e/%4l;6I!8W)y!YtKPItC16 Jvai5$̍-݌Ф W'Ί4ca nùKLdr@ XLL9?ύ6 4 O GnPV8rܕuL4~޶JA`0y; 3lgE8yھ(df.dtƘ a9 ѿ D',PPD7 H$UʪNk @D0uNZ]e3I'I{mS6qTl\Yc&r⥾$pA-{$ )Q7U ) )N>{>HdLR!ɐa0IDw'z0 - w< '); ԭt3R 0 7+vD[PLT(ؠ>\DsCyBqV*.p2PـýébM.Qn` l'o$&y,*pVª>,؆ck݃ Ju%ôĪ$br X'hs)uPt)5)OQv:J9,?8n=ד*S0ˁFr_'lhK]2(_wAg`P` `0sā T̫ @ bm6%D %kU2ȷ2#c I[m/FLfIM :jc#RR!O `Ϳw P2b5,b73@}"N6 n>{Au)v#mb"Q6$%-6llG?x -YKyL_ v߇-L8E r%/ "PýhrcO4t ,Rp<04s*j@ۨQhk_%5Bù>ٙKqWfpp,pfIY;2W0$Ę89a]4`ksO8i8/ڳ5L*Nڦ TOzś=IB1p![g S,SNM({/,9: vrQD-uRk! +w){+n1 9fݥtdɦAɘL@!JLDsi@ FGr8nT8Q\i;UϷGmbh&r !Z8`RV.S!@ RxJ)?T.\u9Fc|**}kl85˕:͔I7drGw4 jY3r )(F(&r*d*h'a Sa).JTe%RV(!U.- t8U0yd$7 gL%IU Bg*~J CzNOvv)q׌F3N`4߻2/vxP+s7 &moNdRwLb@^jųOd1q+緎;H)o()0oo@bſN 18iLǤԘf̶Ř*l\Y<~ϻ*)`VMZ8UJ\C "ck\BZ_Pk\|pD),!٢X O?r.G%ÞLXfw'UYJic8l%\Bc&7`-Q'RJ 9ơQv1`4''&K|| )QSYM (#eml7G$Wuyɦd'**1Jbۛ'bG~+ B^s&5؊bl9n'Ps+.eLfU#d69ȬfSxBӾɟnM\,G!A91!6F۱2':>`E^=MUtۜ9R!4dCLDc1yY C r#?~KVo*mRLPLA 9b_KT5QIyh*.ST((A) fq4ɦ`P?ܦL DsZ*t¸xs;HdeH\K1HmX? V8 Cg-ldp)1QCqԊTEu".t7HED] R*"n:QCqԊTEu".t7HED] R*"n:QCqԊTEu".t7HED] R*"n:QCqԊTEu".t7HED] R*"n:QCqԊTEu".t7HED] R*"n:QCqԊTEu".t7HED] R*"n:QCqԊTEu".t7HJe M9 !p4PBiBO|&$!4! M>HBiBO|&$!4! M>HBiBO|&$!4! M>HBiBO|&$!4! M>HBiBO|&$!4! M>HBiBO|&$!4! M>HBiBO|&$!4!`l" `Ԇu,7xNS8!DltƹkpbZb eL8<;lX0=\[e`*$L u %-MBf3צ2/1 1W"S`lp8V{Q?D\'#^V2 Q6SA#a޼V3NN@AÒKQ@0k~")IpFJ&BP %)IRJzoEG@J/K}jSlo"m$^`4mɨ c]A@0 |7y<6TU(}vB,ʎԸHep2X$43=SiDV:&8($0!@l]ъ˻Kq$, W!#|qrGmm9‹nQPItNC.0:]c$QE4HRD5_*31P!2@" #$`XLpҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪҪ/zg~y=hrĆCB38NpdW}uN{c#b#e[#d7]+XpP8#A",#`7<8vL^ucZָHpD v9: l#3F;grơa?<_:=l8nle&tmCC"`ȴvKRNIo`05@c=?<ϴА?;[[[[[[[[[[[[[[[[[[[[[[[[[F3(X `,X `,X `,X `,X `,X `,X `,/瞏>)wsG7z?h[{х; $)swtnyl 8J%tP-s2 vڐ^?pIlGffv[F%#qNɆ~PS~K]˛] xMXF7 Wm=xr;X|n=1۠psX Lk2(č9wͱikX]0f q+ Cc0-k^ƹ6S~ o~M>)wr[fEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfEfDN3Mq{[OSw=7͏{?VJen]pf0s.^Ay^Ay^Ay^Ay^Ay^Ay^Ay^Ay^Ay^Ay^Ay^Ay^AyR?X+dTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdTdOaڞ:ok~שߵ}f\+eZj_'k ӵ.S0s'c{iXۣ}L\0 0 0 0 0 0 0 0 0 0 0 0 0 ,4!P123@ #`8S6VҪU[JiUm*UVҪU[JiUm*UVҪU[JiUm*UVҪU[JiUm*UVҪU[JiUm*UVҪU[JiUm*UVҪU[JxnjJ!Mu5 vhuEeKOC)m a\z\W']G<~-(?otsWnFkFkFkFkFkFkFkFkFkFkFkFkFk aoDT2K!,K!,K!,K!,K!,K!,K!,K!,K!,K!,K!,K!,K!,?T ˍD|ߊ_+OGύ*}`C6sz 0r<1ԁ&N)J8b#-/A#uB:yQ OWh2H/A!+ cX]#ل&+lÀfVg#cfNTD|ߊ\Hm%aeKwf R1Ve/ra8(2 HǞ dO$7LEM4XBZ*DKHs3䢖2Ylax䇆Pj&rX 8r,ʬ֥)?>7~)|?>7!='Z[Ykk-meZ[Ykk-meZ[Ykk-meZ[Ykk-meZ[Ykk-meZ[Ykk-meB, ܮT)ao-[ ao-[ ao-[ ao-[ ao-[ ao-[ ao-LWa d^ϊx| c|Sœi> ~Nމ ngN^DGs#vwvgq 8{Gd(Dgqzs#v~i[ѹ>|Sœx_>)/ŏ>"vۋn.]qvۋn.]qvۋn.]qvۋn.]qvۋn.]qvۋn.]qvۋX>! @y }Yo-eՖV[}Yo-eՖV[}Yo-eՖV[}Yo-eՖV[}Yo-eՖV[}Yo-eՖV[}Yo-emH<'>|Sœx_>)#OP(KW,+XsZKWij-]vZKWij-]vZKWij-]vZKWij-]vZKWij-]vZKWij-]vZKWij.W_14!1QAa"Pq 2@#3BR` ?"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&"b&/ߜc|am : UxqhCRT6@1CVUVpJv ՐY!Ck#~2|{1Egv% HTVtc-s.<̇"ZZ,m"vwTW.Uv@ *]'72ZߏߜcBY !d,BY !d,BY !d,BY !d,BY !d,BY !d,BY !~yc1c1c1c1c1c1ckp^{{BB@Q[b1x_v 'KkJ&oAQF*BS R)oDbt!N*ۖd͇OR G, j[Q5Z.!QL\k +ԁBh&*PZ/-8*p^{{^cX+*Lmԙu.d|;o,T"quޤTpҪs."0VVBe!](}*QIv4"g-9b7\_>g~(~WNT:u|p^{{ ۏo5{q/n*hhhhhhhhhhhhhhhhhhhhhhhhhk e[e[e[e[e[e[e[e[e[e[e[e[e[e[e[e[e[e[e[e[e[e[e[e[e[e[8yp\8Ti2nZGEB:;la=U!6$mX:/O#E̢t:k,WpU徫Qҕzp.Q'Eݑt8m/[~/+z"}^?x]zQ*DXa6quDZ$+4ԎzQKX 6D^ZI\m [/#ݖZT^ u^(Wj޿$a]8qc`c?p\8qt3MɚnLrf4ܙ7&i3MɚnLrf4ܙ7&i3MɚnLrf4ܙ7&i3MɚnLrf4ܙ7&i3MɚnLrf4ܙ7&i3MɚnLrf4ܙ7&i3MɚnLrfr+\W"ȮEr+\W"ȮEr+\W"ȮEr+\W"ȮEr+\W"ȮEr+\W"ȮEr+\W"5Îk>ϪCՔo,+}+n\:b4~U[r(/BEC^+qÎaN(-U/ rVZ%j}G 歟ⷥzt)Ґ)^IUo܆~Wla?KwS?CFU.?#8҃a`շح).vU-*\S$n4Uwv(C"J}>}hFI5)?kxnÎk<6KdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdXdocker-1.10.3/docs/userguide/storagedriver/images/overlay_constructs.jpg000066400000000000000000001406001267010174400265710ustar00rootroot00000000000000JFIF//C      C   fC~N\ԿfCB/Zue֫6>yΦ$:PQUڧ/*}6+?4⊲So~̆'b.U!x^?l&drn lͫڕōu셉s%=BG -֞Z?FrV#ty- {)c3nS3Iifo}ўVZ'3rf]7yk`4g.[p[j}3{74ϓxa챦7V?X0O3L7=Ϊf/%6G*EvO9ۊ4Il&*9`4s@nX ͲL\),Q Obϴ=_|$o{{4s 2B3bhnc Kby%GDٲ 6)PNVk q\4kŁs7ʂtR䩔>a~Z)aC9I~?>"&g9*?P-),{c_pUw6wc_Ige[t 9^yx'6\VM0=͗ 83dܭ}L44W66,>9k^u4NZ%zA?IƜ;==+w/oo36"Vm`>lnvdj04sAOs{'{Ur}9^_,yU~t F-u.=\_I?)2gzOg7L=nL4s_ēiEB\{uWst_wd?Mi7PUl6 Fo&M%G6 x[ E8 rc@37)m2d#ïme{̬3غLG/ɞCC7i`4sB3|(_sh Kbx;i3fIzTȥA:98s|A{L447ʂtR!2~_>C-M@˟|s"{{L44#6Hw&G.\E]Wa{ Gg3!(78Ys~4`> {6ˎ? :|}`>VZ%|.8Q~ˊ+*2VX1iqVH&|YPi$*6d#EUz]̕mPjJ6|"i_B-mȻAD"ܛ uR#~Y1kv a ` 6Ii|)?zY3_\.r+:"9z6?ОD-\X'y#P/2-LZi={j-2N^ԺjOޟĕc9wWfGgv˻7zMd~Sv#B:Yh*?xˊ(<1 L9lo&Mp_`oGΪoGb(ˠՖ 8e2_G2\1.I@~~yp츣0X)) SCg_>kTg) SB\@z>l6) RX,,T p#   RX,,Tб z.p S@>p)M X)MR#R*KRXp#,G XO,I`4Jh_)MRR44JhJhs0_   @ @ǠØ @+@su LLL&S> 5M`k  5 L!6HHMuX@ 59 @pM`8'Q 5I'QNkXNiHyH$ 5IkX A6 u_(u  @ @50 R  "#XY236 !5CSVWuw$'0789ABRt%14@v&sFQT`r (DEa h(9/ry|ek}&! :{qؽtdN6=lat~iۇ-.}"NN2tL 滧Oc@)nZ<^T]$:a181c<=.VkEVSGn]ۖlaz_5]8sشvB e'+*et{=owJ*z`vF-"NKZ5eR70FGsnA^ 'cV["Վsܻ8qj΢i]r vl 5IOd}XLrjN٭#Qܸu-ػyo$z?Og~&QT-IY.ӪL_f3PR|]#%k@]N6/^ckpI>";U׈rbE0h #P~<__5:|CnSa-) 6=Ķ[bAt㺼J \/(8mUj[jXl([eF|7ҲDsAqRlNP)QWAE:VRk,> rյľ\o=-=#"KȮĘU:3IN}"x\GqߨE\7m)qyD=/L#;PZJo*[cD|E60&Dx+7-[[ODE/!DwM@;N3ȀxȦʆSnzxzbb7%?xgk欐&(r 7N򊸪eAi'ͼv+ Q-} vCWgE.6-kc~L };;v ׺kN8苒5o.h_YLNeeYiQP7= n=v[м-E5[7DV4҃AÏhM޺uGalЛmZ{sIQK\ sۚ`e]6IPPI[.h?+0gEn=ٍ½fk%v/Y]KtnSbTqlj]'unS׆X 9n|et]w{DjfR0cBqpđ5{:qUu\,!j;8Uº&qe* 81s!e0WB(MA ~t*$[=?!DeqpjohIbRP ެIV'e.0(֪*:B(.CGTV;rwg;pcw}^ՕeLtX;^ڣeDBWk5ۭuWBw⬎輽_PEqS]^w}ͦ<(fgWC(-+Y#bF4ˮ ZNՒ'ƨƞey_1マ\FHuU!S[Vj[iIَm8ܬQgucoʺǻ18T(n-ltmeIuSX$[Ap!S}>v.|/'uAvZE}jQ,k&:tm%*x \Qo^+iҪX):\վO;]"xi|m2F^Qt5l:v*]/ aLnF@>L{K%vSmj.UQ|I :Cs}$"D.)twҜ9g뙑U/Ni4;}2L㗣VCGJhI!mYcD2-~_l\$8AtxWoPgLJ"+~~EW-B5 jOA/oB"?/^//Dv^^^^Yic`YlPSWMHad,o2pyv /5^kMӎ4QJ H9vHO~"6ҲS"ɖHu v\D3&ip(ZqyYr| Ъtp;\o'H^2GexQ=mx_V׊E5TWjڱn@1;0w _p{Ӹq2vKXʏ EqRug d,lWii1+$?MY+0V[;Ad%=J(kWX#2pc!eE Z6jJҮ"ɈQn92"6tZ,r2C2ѿT;"{vYsbF/ug 義;D(^[;%⋓SSʺ> d2ZސĿw?xgkf8x|wYwE)T-%Tu 9޵9k^eV>1]iFq]ѕM-.qٷؤ~I7-Șbdl8㬽R/rKduډ'\+\hjIb븙nʜɕKZn.@GQ LiM,TS͢|58qRlQmL~E6U; ͷػD`kC_6=~K4M{@/.[*M=:1FJiq-CL8pF G.Ā\6|YYy\hژU$*و˟dK%͍lke@ -㫈1J6UӞ D7ek9p#]XYe⌭J]Hɣ:5(F=2% x ȟ;pba KWbX}98k#5SK{0(]GNilJB MϻAƟ֮T~E : R>|+ eC.2)Eɝ`iؔf~&㉽ѓwCŜū47g$E2jkNvm?Hn*'MLz;"SpT7E5D^[܈x{6ᶾ?8ҽo+3i-9=]'vr%0mM(pj!v Uγ~K4b[1_dlc~9 5!k\։sD582ϛ1RJ}!Yl̆|8\-vC/?xgk9/\g+phWhҗo1-劔Q"TݲZ qɓpA0-gAMB0N4qJns|[2Bca~JCFd\=6EEcm7:jۤnH4q/ZK:>tVzCKjqqf~&WTj};mS]qMcq֎.JGݔf:oj݃ 74MH>9IojuBH.->62c+@ܘM!pH]C"Y*QYЪ*XmK,$NHޞ;Re v٫6xeqdMxJ+3닱ԬiGG7JЁ4Zo0t)vɷohڐ|p^z5OݠǺDrKR]O[n= o& ,KW'7hHN]̮@L۽lXwCqeҎk],96˜,Jw8bbfmM)7lRc`80g5U5!.l+ǩ )DSȍDˀ[""%OƦ>__pOIrjXbWѳ?H;@TH{5IֽӐo4]Kb64RI?fqOd9PRs|[OPPb%U/]y]9E V]pTΙJ0!ЇZЪw\J!O H]0@tGWLn}^d%ȁ5d{2G}tDdAhzi l{hRSDzb&^)R!-.}DG7>&ضSb"M5x@߉O#t՘WsTGd+{'Z>m-Y3uHaIa{ZjÛTO;=L"4j#ϩ{R!X=q2@l9F5CX s-tCLf. O5ZF{EßΚ(&srhuWS/L* U [Wj酨Q8+~Q:歮֕7n3ryZgMe m}I:Db=YvVZ.:x6~q1gm>u_d0ưش꾆-֥$;3)M#drޅ؛Dkq**nQZOhI3Q\ u|ɬaV%syrI?;,4JΛv`%=9,mR-o/[yAO&LylnMUʪ3'g~;]E;;[P꿮ɓhO'P̵`Y{FRnץ'rT|ޜ` Ǖ)"-oaG>.ZD_+IY[p0whQ,*-mEC|O+&T;ਢQvd,X>ૼI-K-!WnQfҤԵ)/ƅyw? ~y_'n`;'BGv3GTb.EJnbV@9[٦=_v@Fq<_q(\~h7HMB]?)&M>0z5{Lȴ|::`L4G8S/7Kt?:d^.ʊi!G'SrAB.tOJp,fCLP >b"NH%>LH"~TCl싟KJ&ɢB6p7ܩoS]r9Mws{ʚ:F{U +5*kTp7ܩoS]r9Mws{ʚ5*kTp7ܩ~ûtfnE{9Mws{ʚ5*kTp7ܩoS]r9Mws{ʚ58u~a[Đn94vPoS]r9Mws{ʚ5*kTp7ܩoS]r9Mws{ʚǼ;=iA!T.oS]r9Mws{ʚ5*kT2cox8F]mgIpF-P;qBD>`"~nH˥DNev>b=Tm&" 4NOt":[3U em ){LC`#K RfC˨#b.P䤸vesʘ㜱AH``63c'-MuקlRܧ7\[*-7>XrS*8yv":ۦB..:c8#7ه']9,˴%.jH\/s/xkIOā/iR(ّF Ni2iۇ BdۇOb"QCuekxH>=Hh]EF}= {D ""$T1Ҋk˗#[WqL#mkyߛE!b_VE."}OqY+?Hr[[UD;mߌANIn1Ѫb*,\r"[]]DK"[n]oc칑(fe@/l")4TFt3_~]32#"=PU![__yaU;"cUUЅk&; #3/ikMϻAƾ)S+8P2,=^[R.vN)8\]w=x2=)Rڷ&pfMpW|"c&/L߈VE."}OqY+?v"rɎdm} ϖWؿr|w#?2-Wg xq.ox)>%^RD"cć:JhMQ:rLKƹn_nc^VEOkw?xSʞq7çR2E͜-ę4-f23e}WY; ڍ?8ɳNZH8O . "& Gt"g ȓ*#s AMW.#eHwx\ˎ9oN NkCݺvڔkZQ?\ڗ>'1 ڋ+[`PVgߒUn}ް7JyM8m3Jojp! O1|gk>^s/xo{9 Hqb"f tH.*T)1QM'ê@OĞCT ?QC"r\?qD9ic*56B-qOI# UfdF|VSPV-#Õ8@{H}pw??LRè-RlEPN@T}/FZ$dMJ#jo:֔VD @M"DDKl G_qCwWO?uZXk"x8j=mV+ G);5dfN% JfĬ죛kژ蚎1ϴSTڗ6ʺ:˘S=`kb޽h9߲jPk:'C4ʩv\iwjf %t9Vds\Хr"S*E]Ld~&=1N)6+M%&"V_y[_SX9O+忑,[+RVƐ#\(Jĺ|A5 n¢q=(-:LA4 O`I`z/`+bg=6v[7~c!wiula>ڹk&<%Nh7aͭsf?aQ-kis .iHwjzLҰUv7QqM?Kf>d_u]6!Y[NgO뵚u6!aIWݏwvc*iv5.lo4 T${^Gk{@!^=M֙F>fR 38̶k-+|${Jl͒\?B1]k]|VXݳ5<8}?9[O#Y^_hw)'q)U.43U]>o?;eD琡5S\`~Ű{mrzNeReU"#k+k_1m>ۿ($յb~ׅ壱@?1}7ye \qʺ+ 3#J9SͿTY)mjrOFg6ot~\{F=GK_EHB%9q,eÛN=2D ֽj^ˋ.WPT݆ȊR_a BuۭЉu7Q\g,2GYw>b7Z2oҔsۜ.grmGXhbv^ /-NHBɽPŦRKDWR.z~~\ӵ`ݏ=%KnYveecԓjKv-Vñ[ |SG'3x \7>XK)E@"v&{T/s \qd&xfx'5]9lնxxղ˙*ç$E?G9SSTPSm _N}}0҆A~FD3-A}"UN7Lt=?7OOJ-Ώ?3/l_mSGu4p\:="-\vL*hk3-uHi&q.azgL^z~ b#gb!O6f2ræč|ñ\cW*-jEm $b"DtK\8P!Q)p͈r?"C}-éPfC˨"Ǐԗg׬?R^cK^xI{k+`u%y]5}g׬?R^cK^xI{k}/mzϱ%h-^M[\]P ^lC:u?ӧ^lC:u?ӧ^lC:u?ӧ^lC:u?ӧ^lC:u?ӧ[vb(aNŶY<~>Ǐԗg׬?R^cK^xI{k$3%MK8.wt/ރ~wEg-];8n:Aqz?t[vquރ~#4%P$܅tϚg>ǏԗgfW]6cH 7nv$U{E$UQ& "" $t1ҊKȋ*D@CwO:fs"QBWږ=ڈ&_DRSi{_*|FtPߗL̉C1UAe:xpn#Vvc/u GOK3{y.f{Uu-cZ)h%l\gq7>X)S+8P?EoT-vH)j*2Բ{߲6.9-%L7,<N3S1<|ȸ!ǂ)D% xGGȼ9D1kH3pȋL}Sٙ"?AB @D=Txs8gDHy ؈}qR Ho_+]y /zm˒Y*53?p_ ={Dpz|̋-z#(#tȴ_?". q.b/>$^^Ug+fA VmgiWxw0lyq٩H]A9Tz>X珥,ExEMT,8lЪ6vI,3d,. & 'tETd@TGPL4 DU!_;\xDc-$؀H!p$)Ú|~K Hh~lF@rEs_1mvqdSɸ#Uh|܋I'")y#DtkDBUKuGIC`*d 1>c%""H8t}Oi:>$aOfdJHtOt !lj^nOj9~~eחZ/oJ]7Hy$mW;H{IQ>kٙq]Ob\9Ӏq4 |iCm,a^Ј l[m"IhnS&} âDDqU~GPT=!fD1jsvi;uݬJ[cn*Ydc]ei.^ݿDXVdנkϦ}pw?9zTʮH;E-rRmF,XdIG T~D"kԁTu37Xp1&D D82%exnZ>r"2k|y9, ֬mܶUަuT$WtWGЄaywЄaywЄaywЄayw$b!-WpVS}~F~F~F~F~a}\{S_ h”J`yӾ^_5סססססססססססX]Q ʘØ)>1<5B0B0B0B0Fq-S`/BrlG|ݡ\nޣ53-hzP9,5q ̷L4ŋCT3 5]c=fvCm+z>X߉yZ?#z,DWjjDg.v6KC㐶0E^Ce&0s?8{OT -%H}0@`Py|T˞}^g9 jO ]KT=] QÑ~ϑhzfB[1.B \b&_eOhf%`U2[}xIaO].S>ʑs״&|>Dj$"2<8]?8[|^>R-K[iCṊ;uP77Um%MVT'FKmO_]E)~mtRSL(_Co<^CtR-K[jܼ8on׫pt :&ة_3ÏI/m^/e)~mtR-P^nowxʞQKj}i)~mtR#=Xv[˭JͶZ-٪nReIi̸!ùxGtR-K[ka>2+M/ YzL<-;رrP>dkuŠkrM)쥮oݾaH| π"Sx9-J&|D8H}-ڭvˑSF>l"(h)=KidZZ/#Gu Gpoɻ{EA精*EC|O+dNOb|y|'{enp$ݰdإ녍*3jyHs7pDp1s|:yM/ rcDL`ڡ~#!vLo)r`{,5n/g;7gr"BvfQIS3~U.#.H_0*o~ BmI=$@OD{Z[GgGX ad=dj5mh-VliYgfRxCbvBKK !+'1C_N.nZ[]ܳɚ{zbHsc3A;F.ym/vIsG 'eEގ:C䈗#Y5 L#鲖+q0))[H-]R*m.Ѯɽ}1$jmV]8|ƞs1Գ ߋP~AcďyPzG5S4-" UҫvAC RRrWL4a[g٢?D,.@35u#r38.dIhTcTSVw23\iaILP/b71r1!c^=*:fpo_57QsĥK.T6&%A~ ,e*+=wC]ܪ}+j=?QNgWoܩsjK1 QPبOUMZ_NJj&}t׶IdqP(UQJi6Z0EnNc2:21m 7֠>^Ï|{M.efzE7t.R&C0.c5z2ßq8e~c-6n7I8zl]=wvV|EUwhۜbթ{څo&s^UٛZ+GWkԓGn,v]3[v DV_T׫+6ԕC;=r8-qh?dYɍmgb=,6'CVv Rj*dHdȧ?~bJ Sl^Ti͍^?{D-F{OV*cJI˥tl6('4!K}JKMϻAƜUr.ܤ/voP+C_J|4>Wب2% U|{젨Zh9DKmåz2"'FG^o${6s*ShN/~+J42SUudF.nbuevwqdG a> $ûi4m$gVKC*LXOWH& yVGD$&\;#qn=kb~V3&CS15]'8U{X[w&/"2"Yi-UQug}\OS{~js&1zh9"j}FWH,„eP*e:wtq\ql[wN33!+U,u/b71rrKQM < {<*6'ml cm`8ڙmlC-H9 NTWⶖw\hITPowxʞj7[ӆHJZ%M*lNf#ҡr_Lҿ6w񦿣kkMrymAg[ D>v5o˒$%żTC/*ߍA'f1<JۋNХˆr ?;+ksGؑ80nGHMOx5OvF@s&{ 5vZ*+H/-}U=&cb[=Q@8竳oIMK.7qЪ@UFLFWG1cK~jTjٙ(F *"{}R< M8y7XOTep̑L]z+dG;0&1{esb>a3*1)P۶v[df% 5xo cG+U%V^X>*4Ĵqc ʡ D:ծ?fY4;Ke$"<|ȸNJ(#>D[ȋE @L"BDEtȷ")m4|D|u|\~""ఇL>;e8 $x7/S{~jDiRE-X"zX5FlV z^i`שzX5FlV z^i`שzX5FlV `gdEymٓGc!AAx ^i`שzX5FlV z^i`שzX5FlV z^i`שzX4bJ+uajlYKP!5QNxFOyd4{%QY-z.kitK^KZ]=ר,FOyd4{%j3ךD.?%K>v5DQO˹o%T܈t-WGb!rఏL>;X擉yRIN[}mϫG 3eMؓL'7L@Sx}cPT^ hJ<=zr @':<x݃t\Oju:ϖXgwW܃i`Ӏa9 2b )#%,iN>BBcû)zryl"NsϖoNNS0ȟOۇ:yb$E:=7K<@(JM̹hq :1=XA0Hg7L@S}~WKXOxnȧRף=*mw.?6Lp&kd 52Gɚ#Cdw !fH3]$l?6Lp&kd 52Gɚ#Cdw !fH3]$l?6Lp&kd 52Gɚ#Cdw !fH3]$l?6Lp&kd 52Gɚ#Cdw !fH3]$l?6Lp&kd 52Gɚ#Cdw !fH3]$l?6Lp&kd 52Gɚ#Cdw !fH3]$l?6Lp&kd 5emn׫n栓 +]Gzwzuz^AD]9<5>hȡ(g;=73E(( Ptya!}1OAPNuzn&$%;9(Jxz9 "{uzmÏw;t^}>KGE7Wߟ>ZB MliiDE3LW@@[!YBB2Sr-d¢]O敔QRSQ袞pT !"12ABQ#Rbcr 3Caq$%DS04&@ETU`5s?EI˃ativ`G1U% ژ!{U&W+}[kKo_K&6-ړkdtƚ0őxMז/ :cL۵Yk鮰9g4wS$YxM=IJzזT鮂bp]L۬rl'dSf~Zװ ژw^W.ܭLp:/*Bެzە',:FXZp Y^YQtMѬvխ9fgu:_M ',Si.8SjzZ[J\~R/p ]\$DkYp%^VMk&/Sh~X~Cc͟(.t U_ˍ%&djbW.앐r&ԕ.>Q] '+X|ҒFi"W_J^jZC4J{u"m'kUJ^(Y6 垖lKUbC˅%16Y&í>Wio+tӵwLbWuVƿKy\]fL YSWi 3`ʡ|WUzUc0$?zlf ʭ8U>Xu?j+tUķiګI?,T*kT?jl1o+˵W0` >M u.LU|&a+g,|SWUzUM;Uzi3>WiS3VjKگ&Z%fQL*Zh~N^eC)^t=R(e-"5$CHh|S4[j[m =̛d^VCK31,^"˕mu)f֊52f '-^6EHE= 3*&%^+m[_WRE+tVw5m~tقE)v p̠>ȱ2/]|O&WhߝX$*ˬ?bblиKXzFӦfVD苭[kwE)W\NI[yAiWDKXfUؖSXnP"V "~R_K5vD+-ɥ~*ERrI./8zEٳm!d|⸮^+bf4sV(oXjbbPM׉wuly6Z /D읷XDV6*H.ݲ{_'iyE^ 8/ )I+i\,>m|㎊b"9 z'g3G'qT'r᪢jky1/Fɒw&E"}Ȩ?@I{K@(+rn n\uqp܇>];h6Ff7hjBe\JQsl4='sϥFo 齚UBw(ժkAdǰ7[ҭDMѯYW$Lޮ9W @ש+^Wz!f"AAL0EZD?>[w[=EfE%Sw2 Yn=b,@dQHh?5lelqU$U]@Sӭ(-Z&U\T|O[FM_&ȆViԥ+$Ҡ죆ۡ0Ӎ=͐MLcAQnIݜs#Z :it HI9 z &pV]&ݭӰNy=M))nBNcyhp4uUXV14tE#Z㖪DCZevvNbD\у˚2SI7F6ÆʒSxl=HЍ31An_F4@v^VBAidgS&MDdfG\%LUQE_JT'>H5I6q -8Xf~TUjae]fb_{\/1HL<źGrԑRnifme&-Vή*k(\naiA:qU+"÷.u/S}lu o%keziz^MҞ$*& &vM3BɱGӳ\n)؈IR?LNʷM]y qRTZT*ӳ{IK0LQbU8𠾈 Lmr. YUb|e\~2Tm?JOڞa,u-.Xi96N$DE\cfiͪvRrnԒ2(w"K4M4˔6T K2*C(M 1U|rIDo`)NKl4JKV>%I))ucdLq5iueWy)`6m99X!̢KPvaݒfcۙ;m ;yĊf藟]dIUWLQC'R~Yn8.93h-eE.CLO:Ol͔,4kٛ`M &Ԯ-)-A*(kJjd3K=6M2*?Vxfs.S!-(i4+HɔJ)_dSӼpa%Sj[lꚚrvHU'6&eٚ!}4kscݮY:5NIn/lDtEAi {}ط{QE_Iq5u#f&bRv!BO 6Vn~bbx%M6r7[5n' ™ܑNZ݊+ƺ9C`"W\,|op*8"&&&,:9Q0OPLLIY&*= o]B2"X6N\yIi퐚iBrVpVPFi+]W4RLWH&E͎;ˣi瀑N .*$R weRN 4j9 6HhF#khh o9 bCUS\ lDsPp2Ѵx !ݠxh)ɯAP˼Ky6вdI5$QH?@HzPWuv.oPU>nD7BɒiVIh J Pq3*x"%gdRatDl g()Iyl}Rch)ٹ}rUKHMkSgL-ӥY"}Q3Չ&9c7*ٲz(Q#.¨F HYٺA,¢/SEMNݻaѫ4{rݑʨnڵ\74)ܯjDu'm 1 OҜzOUU=3+.,­UF3sBIiW "i6 Y zm$E>ЄkNXdwmpv$؋ "%@gz^ٚ^~fr-DXJb*E}9GR UU UZOFNO  /&ٺ=ID #`TMZW3H5yyMVq&g Rڍ&dZڊ:y~8K`RڥV6QInW`T6^1\זUTR9H?8#gC0]zE+"e}M Y3BNỊƢU7,ٮ<{kT7N:/+J6Q{bٺAnqYD*jhHnXtjƽcfh9tn*-vSv_l\D4)kU?pLm:Գ2o8(T6~wgYƔ%f%%N_r+֓5_W͊ +]B{QO87 Ѳ)'sn5D"Ypu4m1/9ˤ;]S K̰D 9US\ Zώ;&V4C6(+Njb fw4XuT7./#\%$pj,f,"OiӞ]wO8 z\\8BUY:0E`),*ż&;fs< N*\5TRM\\O/*RzޱvTtYT.&L<tSL<tSL<m!.ղLFՠͨ$tSL<tSL<tSL<tSL<tSL<tSL<PO.+lK-zeڨ.GL<tSL<tSL<tSL<tSL<tSL<SO/02(Ynw;./Yuce6[gsdg.veWWcLPT:&-Y & jī_4RpÏ7eHq}diP.d! PwSFTQu3`mg}͘j`UioG x@kP[h)O%>fLp)ɹtn\Bi9s<.:(kzCUzc&H;&%PjIs9mo|Qt"+yb"Yytm0V}`]7BsQK81/B˄{E"w"Oi&_+vN luTR]Mp MXk><*뮀[)dax)ɜpU ܸjy dI;]֢ATi $_}ز+zˉ/kI $*4Xlxlxlىyi%ݰD+Yu4xlxlxlxlxlxlf%9U6%Ǒ=\Yu,ILc2:)#2:)#2:)#2:)#2:)#2)͘a.DS+[cǶ:)#2:)#2:)#2:)#2:)#2:)#2(]\̥F}1xlxlxlxڙ6gyYtD].$"!IȢ*Դݮ ZM]2RWÂS38bNUE-$][&JId7d "Oi%-x ɸ })q:;wu wpu4ʙ;j]QKMs9I5C.o,YhǵN*i_ɳW@P<Ŋ֩ǁ`;f;EPwS@LO< cQQ/Tu5y lV-Ir/͎;ˣioD;莼\̿%9Ndr]'*`ifQI^"^)RLVA5<jEYy}7"ӟqbGrbNfKe5u L]Rfk`itMzb¶. "(8u91T"ZjkS HYytm09=9)GbL@)IllZ]DxFm=9CDȸ3qU#1T$@H ,V[;S\DLu- [=jdb;KNQWH0۞djGoebno̸iwb )-$JjnJw Hiik9 ͆as4SO%$3ĴW'FJVF9'F3x/ZˌO]'K][ X;\yNG&RyUN5d6wߊKkdZ:삸UWw33sN8ܸT";?df0AJYzM_yO@l$ܼ$hXRS |hQRRT vO Y6U-,O#V ?O8C&gܘy%yBĠhNWܻCA_HCj"J%$HE+GN,!N%52JW*'rQi!eMG/wʘ|6}t♧&IHvxV Lׯr2qfeʣnv)j'#L9CYvnZe$pԂ1qٺt!*ٲqpre1E- sɉ*JhJ֊~mSK0;Dô"$)+HYo*,J17)7(sہꈸt3dȨաdBϣiA^tv//p*9}%&ꚡM-5L˵>n۱yT%ef^P^hY+X$r۩9OKlD{\7Qa?ꉂ|#m J:BO* _[  }yT'mYBaBU3k߄ujC^5}Ɋ.EtEA]L<vPWtv6_g&i^zbu617)o'ԋ#<ГmSiBfR^MXDCiqW P2C/F*v,%V.b,DjVOf)g%BY;FUL޲Mg.I9]s42ehj)&+wcmFJrUjBƊY$_(Q:41D9$`VYlG1.QC*lK#$BM֗H%(8%"@ri=}(K[QMmtu5. Hwĉ*D:į5]ڍvk]7CWmy}ҒmIN%2 v.5vA[Il&~P neTr~@Һ/_Ļ#K/)5HVsm6h{U*\"w367Nɔ ug{z:U&V`,⡼xv #?O8@)'E]m}]Ҳ޻!yWWmlo˴cQvֶZmo_vzs%FDƫ);~_]_JHT]+$2OqٽTĜ Y[1&w ֛AsR4\LzΫtYL#evTnvі7Ur2lTasrч(I QP%ybg4iYlJgPJ)ީ1)*Շ͏:rf&h㒝6Ҷl2l{SQ=B 9WS%fE)@7'57*2nYŴot3X2>_}ظCd_W٤ּ7]t8)8ܬ[p[ ȰJQ?H:HggZ;B8b)fŜj«0#ĕxXݱW=D$w%ydS~!1䘣&ٙW Sl_"i,&ǓNv &'faemFDZQ1~:_{G6(+خǝt E/?F˄i]br* LK@uTs,`s1*E-gDT+y1-@(h ) Q?ިWq%$pj,d,"Oi 祐Quv/0T'{>n&)y4@멠 d]ADT\3;.p*'r᪢jk2%EJZ/[.*G$^Sn-EJlŨ׵?j-bֿC%!YDR6=5׃z4hr* ZlPWuv/9y'䌒Z8똠8Ui̾˵: r+9۽+%eAk5p-X*ż"Ӯ= USS\O%$2MwZ͒(܊m=G6(+دyAP$R ,JI&@wzȪp˺)6YيNȿFʾ#Տ~Gfݗ%r)Rن]8rM0d5f䯅PB- }Q0MQ+yS1"<+ke'S.9GM( - (㏕&;9G;[nә #_4RpÏ7eިq}dI;MƢ[$Q h6xHARqEq%XPϏ^k2G-wyt3#vPWNw4_̑K.?W=,)'q{G($PTi -x rT#)o.. xGdK2S+y\$Qk&^ zN n᪢ \\O$dIQ|:Jjؘ a=tĬl-%5t燏s]9殜WNxx3HKul"ڵ'<<|Ӟ>j5t燏s]9g2]7KX,|>f 3t_/cn7KX,'_]9殜WNxx<<|Ӟ>jZJr8i~&+Տs]9殜WNxx<<|Ӟ)"4>)J-IwGQ|gwGQ|gwGP@g殜WNxx(6dvJFÖ Qx, 8d^* _Z}K@(+;Uc9 VL/-2EaSP39/IT-5TR᫋䃞L5j9U"EA6gh{Wv9U*)|\_g=2RN4q[ˆ;i灴u!Q؟%ä$v) 5VD璪bl%i]X\/%$2MwZ4"w"Oi{(+fU{./v>w&Qpd% PwS@LO< Bb["⨽T3 ܷ_p QݴTE0MLT2(Y*pfچp/e.l7V-?Y"OG| &֛.?!;KKl]<4<Íɪ. ,"chpRgMlX%șSx\-#BW_U W}鏣Dr* 1,x^A]S K̰@F5 X= s"OYq%S?07!JHh٩i\V +YM45+ye,}JM39YBw.)nH1"2RN$u$Po{H /ih{v. T#TR㫋:;M֠e(*h 灕3B5.k̐@GVjKf8v&vSdp+^۟)TlJH;3̻+4+@*Iak gQ2m7Z+K[kW[ZU;%N1Xwgȿwav??xǓ'yp%M#>AiHCBP W(/&$_sn?l,;+-SKfaie]K!i+gnvޏ)NtmI6'\3*ݓW֯ܳp8u DƢ,*]Mr eK6:".C@X{v/PK89)'q{Em($Ti-x [ꨥH)VEA@L0 (+hQ3;/PUE.H9.%$2MwZ$QB?@Y-6(+د_2E/JvL$S#Y&)dU8zMHˋ'-(6IzuP T,|~>s?9'LS 8 V:}>AxnTU_o(;;mJ%uW|~>s?9}>AxϠ<|~>s?:ک *QAxϠ<|~>s?9CoF&jPlMDG}>AxϠ<|~(gk*i(Q҉(Q҉(Q҉(Q҉(Q҉(Q҉(Q҉K833, &=hnVJκq#ׂC٪ \\K$VL5j/lG"Oigдؠ{bx ..*Y2RM`).;pTvBqE-1䂙Y:5NI.VM(n[D{\7Qa?ꉂ|!'[_NSQܺ70W-03 :DDZ lV1\]) IF[yyHfBjӌ\d;芃6x x\^2E/ v\%$pMƣ"ݢ* L~&[m?1.ER\P6:)o..P6H6LpT@L灒*A]U ..%} &JIdQI8ě3$rmXv$SpAz{Wv.oPIpB#LJ2MƳ@ɶ%.8 'T +E=~yb4;+g$fextY1f,礣,Ř2g0^H1[HbY1f?bYǛ ըD$ɨ{FY1f?ΙRu)#S}wRs2m㼗7yļQG8+2 9&_VB+biX ^b+7QK.W"dI4C$k4 `Ueˢ*":fSv_l\0o22L~UqOia$CEP]3z Ҭҋ:l*ݢPԩ_{.^Byf j%4^  ;E *Jh%f(#i*6*- m ht&y6ZERxu, >$7H2%EJZ/[.*G$|NuUEj[(;-Qn+P7%IS3G ciOYa3)c;2B QR|ӊ꠴tV @ x"9 7H1|ee,bCXhay6jkvE%ZnKi&(M$rh"b~sDP[.B[DϊL)t'".K^nKgyS0_/i19GQt3&ޒ2ܴ(FhNdHئӞb&G-LtXSĵ [RS"_"mӴ(4')]FS2'DM..b[Ry]naY9L,Ӕ*)E`TaI<ءZ=ai& ouAvcEʄmIΓ꓀nPU: 4(٥@BK}ZA^ I;$qoYNwE7S:1k)9j6Zeھy ӊ7W-٦q}bjL\h E1SKU2SRl!qD5TETx(V"o5G'6[Gg=%Oċ_W+p9M*w)|\_g=,)'qi_ d! PwSFfTS .g2N9(e(d>3tmZ-J3ĭ0 LIJW>oA@ݜIju>lz^eɦm6ة⻑/o^f(]+!ed͊ʤTД2C- u/Jܢ2 T(DIK91eeV/HUc8D+aa72yӴUYȑ/!}Yvp@4jd(-h.ۓsDo=H4a)tU59#SO/6&dtk0ZGq,5/{/&7uPlQMZèڝ鼫=<&nޮ?MKNKξ,<*XTf"HV|S Hh$Z2yEss Y.$߷'B k\y,_tR:aa1;AT Yk7bXikjM1wqUƾnߛX'|YmUG=b"R*W9M WE ~L V"q#%1RL).q),ȼU=%֪R Į<'!IcHJ<"VVԕ>QnEYN>Hꭢfso.Vj_ItoVRwtrڐESpV=%$2M74Q.Pxhb^r/X״q%X#6 špU{uTR㫋&%.]]@˶;h< VPpQKu55A6L5jݲ?J:^Z:[X1t+N}kbůpW|#奻7Y|cŨ?F:\Z6V(]% }_:`uc`? nRK ZԉR&035P @`2$ڌ[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[ϟvvʥK[Kv>q\qВ^se-((/aۧK-5W1 =7rV{66~{?W_]h|gMܹv~Λs;t]WWWWWW6msHۂK\>Cc:~Ѵ>v?\Cc:n)s :Va!Ckϸ>D0a!C "D0a!C "D0a!C "5wcK\p!C "D0a!C "D0a!C.!kM._UvIQ=n.Mn.Mn.Mns*1I, @0a @0a @0a @0a @0a @0a @0k+_4 !0PQRS 1@Aabq"`2 ?-Ž_{q@6 mdk Y@6 mdk Y@6 mdk Y@6 mdk Y@6 mdk Y@6 mdk Y@6 -ϲ}n}Os쿷==hun=h~:s쿷ϣ;~f,tuz{{^< ϲC eC eC eC eC eEs}C eC eC eC eC eC eC eC eC eC eC eC eF ʆʆʆʆʆʆʆʆʆʆʊ- 䡅222222222222~T0T0T0T0T0T0T0P3pn_[<|=[~.}X{`W `W `W `W `W PC`W `W `W `W `W `W `W `W `W `W `W `W Ho_50+0+0+0+0+0+0+0+0+0+0+0+([7pppppppq쿷_ś}:,FO7>y]Z\}Vg]iq[uo?kW,/weqW;э̌ndcs#ț6tcs#F721̌ndcs#F721̌ndcs#F721̌ndMF721̌ndcs#F721̌ndcs#F721̌nd],-ϴ{7q~>YF/'/T T T T T TQn7 T T T T T T T T T T T T T T T T T T T T T T T TQo_%))))))))))))))))))))}ag}x^6M/ڞo>7>y_bb(AkAkAkAkAkAkAkAkAkAkAkAkAkp&3P05 @`2$k|Ib^d#2̄fB3!Fd#2̄fB3!Fd#2̄fB3!Fd#2̄fB3!Fd#2̄fB3!Fd#2̄fB3!Fd#2x$}?i3~g0)Ru+xW)ıɗJ+fDy|g0/r무Hx㤈-{<[O 'D7歿g\mh, (,᭞9 (, (, (,롭fPYAePYAePY[M (, (, (,κT8 (, #FD-(9^nQ>^z7av(/UshmsAf ;yqoYAf 4h,YAg]1M;74h,YAf 4h,YSླAf 4h,YAftŴ 4h,YAfQ9 Z=TwHVcƈWQ>VcƉˊLr>}#~$W#a]9='Ls\7cJp8f3Nύ#qi4p8f3N ӆi4p8f3N ӆi4p8f3N ӆvs>n ӆi4p8f3N ӆi4p8f3N ӆi4o<ėy~o7y~o7y~ov|htA҃Nu.9J(:PtA҃J(:PtA҃J(:PtA҃g ƛ4(:PtA҃J(:PtA҃J(:PtӾG=Ϋ}?i3ɉ{(>P|A(>P|A(>P|A(>P|A(>P|A(>P|A(>E(ZS3P01AQRS !@bq"2`a ?OWtEs+\DW:"ΈtEs+\DW:"ΈtEs+\DW:"ΈtEs+\DW:"ΈtEs+\DW:"ΈtEs+\DW:"ΈtEs+QmiNWsOrJ_?H]oå.??ANW{ݎڿ{GN&j{GN7~۹K:K:K:K:K:I'ne,,,,,,,,,,,$ju–u–u–u–u–u–u–u–u–u–u–u’M߷쥝pppppppߋ7IK9ɺ_1t`azs?fܢ22222uu!*!*!*!*!*!*!*!*!*!*!*!*1?~ϲtʈtʈtʈtʈtʈtʈtʈtʈtʈtʈtʌMDZD:eD:eD:eD:eD:eD:eD:eD:eD:eD:eD:eD:eF'oQQQQQQQ@|y_w# '=# _^nx~'=(Orɾg+|_r~ ( @Vt/쀭tk+]Z @Vtk+]Z @Vtk+]Z @V=tk+]Z @Vtk+]Z @Vtk+]Zz9xM>`Fɾg7ڸSSSSSRIppppppppppppppppppppppppSSSSSSSSSSSSSSSSSSSS^ӏW[jvlj?)NWsOrxӕ\8aiZl)aiZl)aiZl)aiZl)aiZl)aiZl)aiZl)aiZl)aiZl)aiZl)aiZl)aiZl)aiZl#docker-1.10.3/docs/userguide/storagedriver/images/overlay_constructs2.jpg000066400000000000000000002457541267010174400266730ustar00rootroot00000000000000JFIF//C     C    N$O8p"ABAD쭡0[+hZV R=5o&,UjʶDՙjYY[D_^VfV^4Z4J3[t{,k~RDDJ%DbQn9gx/:WWWݫN/YeXLL&ä#\wXyB-^oHX:Sg=|8yi\OPX~HP41w`5qx/6BgVBz~jS[+z[t[,k@9H o :yJbU󷢘GhDL-Y۳ͦ##\wZe }+_~Hҏ;Yy3I6Xs=_VfZRf_D/>7gkne q`:̀39sIVȵVdy>_Gl8Y=(dq̖oVs~w~w^oսb":<\|S=}4o'g# {[LӍHr]U؎= q&qۏk`{:;y՘LJ& 󧢴~lJD{$kds.(ĺz<[`s{b[ngMs VG|ݞC[8 Vq~w)z/I][-Y>~ܿ;{,k~^d9Χ$`:)Vfk3[D[GIFp>K;;y+z6OcJ&{-zMp '>К"՟:z'VF+MYX|s}՝;yLLE0|?})쑮;Ty{:;y/b_k|ұC??fc\o&Ǎj:]5q2r~_1eS~w{|;ns=՛89 άQ(&Y._= kx/7^oսk ZOIƷ0U@Va1(L&'Ξ^AOGFpQ@x9 [BԛR# aևKFual:;y?J>v=IhBk3[+niea}ww "@Y})dq`̙i>XSmH>z>F]7c3YҋoV<.߹5W>]MzZ?ߎ{.Gzg;ye["|m졮;T{;u- VUy:ǿ0@=eb:]5uOg3VJMwSZ(,l݅v6XssYJ-Tko:z*uO5I0˰He q`\es󲶅6<9).KFu Vs~w)z/K)u>|} ߷Wgsps 0L<z/cq::M5s2wyF/c=L ߾ֹG+C|ӍŹ؛x9 [/I,V_1={GD>1}&t(k~^d6idktWNܝ{o_c|ugx/2ԲޫRtNj^}:a\Vc$Dgsgx/:&<ޓIF Q@ |~[/I,ұxmGMp U0^mmYʷVo/h}~Kt[,kؿ p xY Ϣk(&YE0"QhL&dqk:;y;ֿJ/ҋj^);[}+,k~mK+zVmKoXb_ek3Y& ИMePĢИMfk$a5Z ΰ^u%["SelUjZieo[3YE:]5s["ZԴޫV-~w~_Ji8Y`ǦH6=5q߅+zMkps -e3{F}a3;E0'#\w)- o /|߆;`sޝ"`Ls󲷪ԚuI7[6츛7KM,vPoU6Y9AٞёsK qeoU9 ̥&i~h("Q+z6=5n&ium("Q'_ʋmKO ;ñ  ! `9{=0@64x t꾃k-KW%^6g ƀ- sQ=5{x-巖kejDl68u+˛YI|S7}<7l5lyf$0ޖ:8n4l@@ n5-g=>5cX&@@ $|ߝ6[9@ t꾃53S寷phE.KM"WY E[}ş?e`}jn0ϧH4l sU ƀ-}W,0eǐbr"Ly΀Gqz|Y:׏9qc\{bς ޙt꾂eZj@El&%1sU ƀ-`g6[9|)dGo=@<(3#o ; t꾂e#:mY3G ƀ-)yb|qjd1΀Oд8/0{7&6[^z޹}!BDJfsU\6$Gl@V@ 6[9a@ ?3Ԁ- ?r@^P/V`D+0V+ J+?Clmw󋥼V@ V>~iѯV6ӔY҅O$9x=)5?f:|9Ҁ  ɀ1t̝ztr/6ЇxՍϿҀ tҀ+X(r^Pgp3+X}(PKxX(P\z\sc~n Vv'Om3h@foybH!]-bx)e(PKxX˨M+L&]9PX.Y7P|0}+b2$ϸ<`z8Q@g~oV@ V(` V@ V(~pŎaKxX﮹zy$\͝K7W34zG_dKxXszӏwC$>f(PKxX}w9a }srgPX.Y7P| + #Ϥ}~=~}1ky/LBWx -- - -- -- - [J Q6\ڜpEF#DY=2ʎMibS7ۍ|XȽƦnĸgq3PO45U]D5E#aM9'⺑ZmJ|Ic=j3sk(mg'd7N!o2>Ƨ:ℱ -\kR$RwU%n_6=8?߭{3Dsjpy7r5EPZΝV$%;n)u7`R%9J IYѥ2J+Ƣ=G\ =Z!ݑRM΂)%Qr!ƭpˮ>R\>i81GSUXkfe.J]e;W"tD%Ȩ>G|cyŽ2ty&NE9ԵM|:p&T%ܭHJM12%!B(ȡ¹9/l~z{`sdҰ̿&$o%铼#[ԏMjb{}kٜhbpmN?G/ldR5$qIj8Q8qƤ5$qIjHQ8qƣ#ٔLK'wNVElNͩxy|8#g(_}Q~nrc)ŭ2tay85vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv$VElNͩϤ6]> o%铼#[ԁƾHQ6\ڜ~_qogtӘ4ufӺLS[K~~JieJ*}<B Ĕ.N%[?MeJk %铻;́ƾ`EH5~2EoP\ڜG81< }m=UKeW[4@iQ]].ģHmS]Lt!fJ[,UdRMDM$hNvpVW[*:>8O^ioFKEe@. M=NΐI$Ď|k]ݑS2Q#~-0 4٬jn%W'KDQ˜{K3(2M<mRb{.nl*T^%-Vll)sFJLv3ܚ s19P2)jY7g$V({bpmN}${sÞP9|22wta z8ݬ"ȴH%"^n'l)Kr-D]75;iyn~L[N=(1XB\irQC'[W9֝J3E%NsSܞҩH= =B'B.3DֳTҬ|J[EӚ &؜+S-  ކӄ@_vg$V({bpmNaye< s~?3'2[zd q_3+gM8W6>gɥ51|Nv8ӱƝ4qc;iNv8ӱƝ4qc;G|7N!o2j:F(h(h(h(h(h(h(h(h(h Y־ElNͩxǣ>G|z&15;S54qN N Mjv`jhS_$8V 2Hw                                   ?jȡ¹9ÈH\"/ M۸N!oR9C9C9C9C9C9C9C9C9Cg$V(bpmN?G3l83|'/zK'wN}[8yM8W6>G|cy_-,B\`Y5|:j l8}I~I)R–hbJa*_NecOm1ܥTؖn>eф@_vmy&Oz;%QԤ[֍ /Aq%+Nvy~f3NrC]LМ@q,r:"/]C!)3+g"' qGUQWzkkny6Va.œl̚Gxmڟe"*>:nZids!S]U)EPt۬1\@q CV DŽeC[zd qXQwvCMgՌlK+~=-L[?^bbFIw=+"|AGDK'硨fܵSm'8I:R{)WHm`pҖRԙ̲lf6Yːͦs;..S1ñ9u57.|1zg$V4M8W6>[> %铻;́ƾ`SJĚ6I~TKbEQIY[Ƨ٬9!i6K՗^ATrAƨ"92QQs9|=Sq.e$XuzSf[f~ymTdOW#[u'nz?"p({bpmN#G|8_x[>ZK'xFw}[8¹9½O:/?@j8vQR1oy/LBޤ5kF~2EoB' r-?+X8e`vX0;+X8beoy/L8Ad n̍̍̍̍̍̍̍̍̍̍̍̍̍̍̍̍̍G{؜+Sxq8#H&c/mrCU5SCU5[A%arW?U}̑fHq]9l(frOS)I˧:n+w8};qcw8};qcw8};qcw8};qcw[aGVlNͩϤ6M($EȿYFڱoy/L8Bޤ5kF~2EoQ6\ڜG|zqFË?GŸ2{zen qX!fIyf=1r~j:WPz{7jG(p_7?a|۩:]-!5-|\S\^oPgu[YK[u1vt~Z|˓ynNe?"p({bpmN}$̽ky/LBޤ5kF~2EoDsjpy9[w\_鯓d:u!CYd:u!CYg8~pbŽ2wt y8ݯDsjpy|xa#;sC;{\nhvohv湎!ۚ GC?yŭ2way&S[rqƛ4q7iMn8qƛ4q7iMn8qƛ4q7iMn8qƛ4q7iMn8qƛ4p~fp/qCsjs%'ioVj ZP5j@ըVj ZP5j@ը)i~+.-%铼#[ԁTӟɡɡɡɡɡɡɡɡɡɡɡɡɡɡɡɡɡ![gM8W66Q$لIoRA[K'wNw}[8y=8W60<>qF~ex2way8ݯ{&؜+SI3l83hw[HIS CԢFfY4Z!s) :_6lF$Ncfe9ޒU:{(vJ I[8m¹8<yxǣy5,jbcKku Bʢ kk;ގ?/%k&MQI.LkneT3VBS5O.k487e 3:yuv3 ɰ^<'/=oy/L#w}Ž]R'ʧfzT$cBWDuQK&ʅ-ԹO' ݄ra:}1\"N%Ge7KAӤ- Lt_33 NujOSBNT:&lx~:X%;+VE]n dȡ¹9g1s̼*a)u2H;9c~i$E9!&P? āƾ~ebi~2Eo^≶' s?FË?G02wt y8ݯDsjpy1<#>y~l+4#R3333333333333331|G Q6\ڜ~tql"i*_/鎨<~O"uǦ%::IF+De%TWt ?WvtyF?j%Q#*[kJ35Wb. wdq4MZWz{Õ#f9;K'wG5&<%u?PC '譔n 9]KBeI9jfWĎ+Jc]KO:Nq _8(yWc[ˣLD}M䮱1K8<5u(N9Bw/!Dsjpy?i9V"%[7HM&u)q6 R1"ïˢW cԜ?<geOAz4#\$;}:pjr]Qd9%3jE. Ug|zK+wOx,b6?ˋ9.ST9|6GzO Rɢ;>3%6TbHz7tj<0UpSîd14({ n(.rl\@L|j,D08~Ig ({bpmNcjG4^1xb/ ^1xb/ ^1xbΛMo%铼#s6%"/ ^1xb/ ^1xbx"//qD]vj/L^1zb/L^1zb/L^1zb/L^1zb/L^1zb"6 RoL^1zb/L^1zb/L^1zb/L^1zb/ ^1zb/ ^mSNC!1QR "2@ASa0B#Pbq3C$r4D`s ?ޞz{oOm=ޞz{oOm=ޞz{oOm=ޞz{oOm=ޞz{oOm=ޞzp?Dޞz{oOm=ޞz{oOm=ޞz{oOm=ޞz{oOm=ޞz{oOm=ޯf0مF*8مF*8مF*8مF*8مG.Q <*6aQ <*6aQ <*6aQ Tl£ Tl£ Tl£ Tl£f0£f0šPGD=f0مF*6aQ Tl£f0مF*6aQ Tl£f0مF*6aQ Tl£f0مF*6aQ Tl£f0مF*6aQ Tl£ W?ϣ~ً. r2(ˌ.22(ˌ.22(ˌ.22(ˌ.22(ˌ.22(ˌ.22(ˌ.22(ˌ.22(ˌ.22)k^}CYoU1e5ִLƝ6WIB72?)_&괶O=W;D}eHgUmzfAvNVSMfL*Hޞ[~Ie(==VOv U;FŐ;)VHWu*;-zI`[KHY15[6h+⫤oVyx1s{HBG͖Db֫Md&RL;{1c:MCmͭhd65k4gSxΛ>izkµG%wh~o^[ )Jc:u(Z9vPFju ,6@:ܵքԲi2:'2UZP֫%ҜkfIK(y3_j6٪俒\ܤYvSG4bxG5%@_d&Oca֊vR[e(ȲtKePj08pW:<s{״'ѻ^é7,kj,[َ&6FtukY6)a %%!hϣ񽳸i*#ַou1?lۛ"4rF^}<xTJة=R{'R}%ITO=R{'R}*Ob>WGfѼџןv]S4~cc~ٿj$Vޞz{O}=ޞz{O}=ޞz{O}=ޞz{O}=ޞz{^hϻyy{ߡ}fMyo!o9E/m;HR)ֵHWD@hZ=^7-Nn<ĦOkZoCppc~ٷ5zw|޼џןvM&ߖY_ %l KYHEEۖ ([|@R>aM,T~YӪ }Kh 6t1 hK]gXQl׍7z;n jPZٴvY;qNjjQ  +sWn Wv#ښĮ`QibuP}X{o*kHY z{YV<i p}Q%^N.\mSa.Lau5v\ƽ*mg^m:~gƛ߈6ZsQI-V5Ŗ$gc}yo!o=p<ϣyۚsmZ6x1D(k[L[_Q3d[]S_Lca P!ɲ6TmQ$Rchoc܃k&\u-A6MFբFi&9P/5h7STgeR֚]4gݼsͼ=áyf&yQzj3x,Wך3C獤((DADADADADAD 6}f:?E/ҥTJR*_K~/ҥTJR*_K~/ҥTJR*_K~/ҥTJR*_K~/ҥTJg^hϻy.ǃ.Qv]%`( EJ.Qv]%`(EJ} c~ٷ5?ޜӛzsoNmͽ96ޜӛzsoNmͽ96ޜӛzsoNmͽ96ޜӛzsoNmͽ96ޜӛzso^hϻyS4vc~ٿjzF^})mxnh?3[30).n֦:KIre\~VϽy?>&YGPcu\Ա_vaus{uUjsu*ݺCJƝOqˏ(޹bF[Ъٵ>ygGjCǨغϰtRyaڄ a3N",_9>.sDѨQ59լf1`I0?Dm[i!$ɖmzTY{NgM_j) P;A+6MӵRC iֶϽy?>.?mtx6\YAoZ[Eߗ; [onTFD {t` u;:]jf-V !;fZ"Vu ɷH*B5u˶j=`)Sjͽy?>;7{C;v5 c¡ <*c¡ <*b¡ <*c¡ <*64kz<mVN:DN:DN:DN:DN:DN:DN:DN:DN:MKyo!yIܛ-ɲܛ-ɲܛ-ɲܛ-ɲܛ-ɲܛ-ɲܛ-ɲܛ-ʬ2,IbLՍQÉG%8pLՓط7bpm%+RJ+RJ+RJ+RJ+RJ+RJ+RJ+RJ+RJkyo!jTw?lygG5zy?>;mtx6_74gݼssw+fDӮr-~!,i1؜8j݉nmM)?5li6l=w@O {E.};ZQo\YDm:m^QQ{%)P餮 v,ֶ3yn^Q@[8FJ5-!?h6]i(D g u,?kۦ[6h LE؄4uWZcѣSGizf޼џןv]6yfQ!HgeJ{"SPn#5'ĩѹ~ "֭JGH&8|9)δ>u#q=ķ%HY9'iOu:S[\UOwSeך3C{1?lߵzy?>?n5#r!D"B!D"BOթc6_74gݼgk] ;QQEr<*(𨢹EXTQ\ ,*(QGG(Mk:>Y:<ڲ*?r*?r*?r*?r*?r*?r*?r*?r*?r*?r*?r*?r*?r*?r*?r*?r*?r*=yo!ڒ^9}r(Q죗G/_e9}R(Q좗G/_dW>3nj6S$L2D$L2D$L2D$L2D$L2D$L2D$L2D$L2D$L2D54gݼsͫLiws·:<sWך3C=pygoڽD޼џןv6i]7(9: ڮD'<toR11RƩmEρOk^#}Y>>;yfFѕOz'e޲YL{Q=)Oz'e3޲YD{Q=)Oz'e3޲YD{Q=(Oz'e3޲YD{Q=(z'e޲YD}[/,c^D"B!D"B!D"B!D"B!D"ZD">?ꦄКBhM 4&КBhM 4&КBkY:<(z4&КBhM 4&КBhM 4&Ѐ6|/^:zaM9Ƶe.A.kK23exkZ3ME5Ŀβh^:d1:S isYi>SCև*q%$׸,J̈{S2- t?lb,kHLiëQ(fJ@vP ?εN$%u_C_Dֺ,Xԙc \_<<'?œXߔ96 @a.[2;kl5Vw?0 Id"GxZ*A#d*tBdm56;.6$eKO=Lt9S*%L&@]֕S YC4IklkZc"m{*AR3,ZNͳc$m+pc~5Y"FDb3 lSC&Fq;u)&Ĺ"Sɑլ|X| ;PL$M2;_hQSHV&PUH?=ϷM)Cu#4Y~7X46am(c 4L>I?ENc6ya]TԙfoL 3xL7Œޙo u&=Œ 7iC<<uSRgQa0ftxL7{0ޙfoL70ޘo tk_0>C=)f¢T3\T3\T3ܢ l*)QMC5)QMC5)QMC=)¢T3aQOrl*QOrl*QOE6(¡ {SaPr{SaL{@e+'EpG娧E6 (E6 (E6 (¢ {SaPr{SaPr{SaPr|*)E> TSܢ kSܢ {SQME=)f¢T3\T3܅+!A1Qqa @P0`?2Aբ"YEYEYEYEYEYEYEYEYEYEYEYEYEYEYEYEYEYEYEYEYEYCAm$p(((((((((((((((((((((]R*.]<C(/XD?d<"YOYa'ZeD^d<"YECa!xeD~e"ECo@G^b'@ȍ #x/  -"7BHH^-2#x/ -"!xDoKH2#x/ @"!f6FDy&<9)NL̙ٹ>DKu^8%"&vnO DWa Hɝ"fR$ xrL&J[LԵ˒ a#] ,Ly4Ɖbk4 ɵAÂbS̎f5FԏgAXU;Et|^)sGʪUQ7|w*&UDܫhnO:kB#YF&u)VvJ=9ר̝B "D֩#X̩*>yve5n}#;݃BJ&"6vQ%S %  _Y"k@A)b䥲Bk׷d/I\gx=veU2bzقR^D4`H`\UOiQPd&)  ^;H'AW//////////////////|{LOL>sropL&]xrNeg\reg\L&]c-?-ɽWc|?7BO2oUU7VCȡU. LVU1UQ'Uhqe\  *Z` E U CR,_(,{QTٙ Vx]LUH S:M9E-xv;@Tv C_U7ΤH#Es9Mw'Az=bh>7B--YJ]"(ɘ`".jJ"b|fr$A=34GJ>]20^/@- uي5!"IMF"D >bd#fڵAjKGs-IgXMEa.w'Fs]iAE>1)Wa<Wl7ܪUTMɕWl+3rr=^刪GJ+1{LGEWXWX*xKvoThB !QX#Us6ho1*a3Y8h'aw_LC;+,N0鐍M%FBH9Dt Bfa_Q.PcFL8Ӛ+ tY(.D( _MUK(t#<$Rغ2s3U{'u0JU1.0Ʀ%rLhpUTIz%+"\W%'@i>Ԙe\^)ȓt(GVUoy&R%*zrdVd')TÜYf:v߹ yM/𦂚)³TGQtWE}"oUD ʪIo{!.Cɺ|_R/H~E)̤_auR]ԋQnUTMʦsvrn)⛊n)⛊n)⛊n)⛊n)⛊n)⛊n)⛊n)⛊n)p.as<9&VuY%Y%:XKeg\I΁ȞU`RYVJ(aYrV O7U NY}^}9oUجޫY7ت)UTMƦ>WޫRroUج'2z(GVBa<ᆌ.bvh)lJGT{LLhʪb'с;JB"֘4(LZJo Z0m!<.LN&BJTn=TE7B1DQ3V'R^R R%[Qt S$uR\Yན!5K,ꗝd dp"dT !R0T]ňPB%d*#2byjBu6?Ai>.|#ӓ7ܙ.M`ι+2eg\rcÓ}ṪaWHPo3*BM\Q`?= Dq @E|ؾ!- Nv`LtJMXR(T]&lyVd6$4d%[tUV+""roUC$]1ҫ:&E0,3@5wFR2]&깮Fq>)I%bZYu h|25xh#Rڠy=&o*)@tD,jW`UzW}j=,j2af9u }zgU7VCȡ*&]op48&7]ʹ&51UQ':C7ڸMuةi- p>bޫLТ۲΁0vbVl. Bc!q+ȵٕ VSo2QVt0N1A%Dv(iX!WDAA5@ f+q Ƙ>b-egD#b"I2\ ȼUEN Y ЉPdhq\>, DU"RL.{L!*N%#ʪDm9PʪeCOw9L*z"OL .>U9L>s?]ɕWlܪ NeUDrefoU7*:3 Y&"I|%=ʦEfN:]?}y)TC+*ȗ1UQ&48|W&VD+:e\ok0X0U[BqoK8XwiKcSsx+*hGqpOE=\pOE=\pOE=\pOE=\pOE=\pOE=\pOE=\pOE=\pOE="쪞압r~0IvnRsR|̙ٖٹJ6.pJCUve>Fnf;`|`(kPëTE GA9<}i_wܙM_|z/F ftg$O#\r.Eȹ"\r.Eȹ"\r.Eȹ"\r.Eȹ"\rG&{L>s7srnnUTMʪIn.JTQ"ڲ*&SGrwТ0wy+ \ؙY)Tf>͉P%"hpV1KOVDcE'=RIaWG5TkY)H"fFXv5d23Y$j렁J8026l>wy+*v`arU"R^&veU1sLUґ)OJ[?8?|OR%[QtĤ^. EJD Tv~yr +?8H!sYe5TYM5. Z+0!ۑX,&Tu(Q@J# DL9|5 0 {LOTa<5ǧG1Y<9'U誨NLzz~\!R|1ɽWoKP;nO@etzLL>CuتWb\a. bIbNcC748>oaI)7 P^ x/^ x/E"fI!3}\v,Nu]O@iIp ChʪxH) x!*8|ѕT'iTH^*:  d>C ,bEh$C dB#G|1W>iDyUH?OPI铡c1c1c1c1aF*gy:f+3zKrJ{UQ7ܙYv\v92[!WS{S{~AQ+}ئ=dz[E0̪hVds9E"{)HDR'=Oe"{)HDR'=Oe"{)HDR'=Oe"{)HDR'=Oe"{)Ha=QU7VCJaYWCcS +cCI0YTwFB_s*"ĸEe\ .>압r!C=U0<9) fLU"z;2ԥSHxK|eWAٕTʆ㚭v g0(+wCȹUQ77*&onnRt-0uCdAEh꒣8O4h E.0QT&hX'Q$Hq]>WHݱUQ77*?΅L$|'-D n6MsKT8P|fAJÚDYDnt'Eq- dҪ0ZKf15CJ  GbVC < iq5wU2zٜT.n=* Ch/?@e.jg0JU1H >͉LJXa:E )>͉)l0ث+Ue|3 ߣҴgA" W^Eh++ŮHd㣦kZ0._f-!BoIB\cwusGVyfeRS GТKֈDC 22Up"#cyIYqDԈL{幞h']/jD?H^jYTHTY"*&LvH|5Mz":4≯Ĉ8FAd"-UY$>$*ӨEf76Fs!24{$'C13jfL+2d'qÒxKdYfcL8yA'ƗIp(cxD%lya,?EUY,Fc *AQtŘa4DJoῖl2-G}"ʪv*&SaWl&nUTMƄ*&)#z{_Ӕrn57bsE'`L CٕT?"g)bR^ٕTq3GfUSĉHlj%-Ua(E佗^Ky/%众^K{/#k(q/E/P"GЪ888888888v`OTaCC'p48²%rLjbD#G{#GW!d*?w!Bǎ\K*ƇUDWp48&VEe\V+\ 0q\cC%ԯoB8r !#G{+qW! #~"ru^VUYr!C=U1K|LʪbL"n[Kim-[Kim--p`=̪R"vٹU"R6`06)oRɝUL\̵)TaP^[Kim-[Kim-[Kim-R4*;3'L>s7foUrs2iKSe=d\2C`Hϲ/TR %RZ8@-g%/(*gv tqԙ&KgdX#Eb: W3zuyb$MIUSzoUQ7ܪhVfoUQ7'<7™b(EUHX*` !1$sD)HU*sS!xZH!d|(hHKAO\f;Z3YdE* I=+L9a l $Vf#]!Z0q-3|Z.pޫnUTM73U{>R@\J[ء|%8\ʻ `e] ZfOJ@fs97B X*NCG}$l3ZDeUյX_rVg',Gj]`J3-4(Tl*+̰SՕq32cCI\ D&VD1KOORؘĤL D8eUQՉ|Ɗ2B]HV,n#^#ITl-'HG#~$EҌfG<Q[:"|9YAmTy_(h"ƨuBTWBZX FFL:Yj2E]$sԟBedJ[oy&<9HzrgfǑ)G-燬 s=c՘qn?0B>xz00r̤Iz2R)nVe3GRl=92RٸIz!cE]-8>_VaŸ| pÏX3-`-B2R)nVxwuI!)b{,/eZj6Z.cPKa-RX~ b7,oeKa,RX%ZT=,oeK[`,BX창k{,?eZ{)*6?+!1AQaq P@0`?!-r0^yyyyyyyyyyy!th0yyyyyyyyyyx^0Gb\W ^s!]W xL漒 :Zm&nʺsd_D7aEݩ˽bB䣱Ooƻ[~ 5ߵvmS*귒͔}/݇%NvUo-:l}݇%z[jڸVmn7-vk?jehmbW/-Yz)J-ZL jb݇%zS|W ֍-]&r7sAѮiK)q\ג[!eN[AvP'uqM1!]W N/꾬k-:5vJme]W%!Aj䣳΃vUX+pCv_5<5g\+:vfy%u;Wk]Y6XUk[-SvY: ڙW5#+pCvvFn5\+zvat_x9~5vmLmd,?݇%mΓvU{?QR9({-_³n vEٿ\rk?jYSe_V5x 07%m: ڙMwW}0?`MznC/. 2+uQOyhj`rP>7' OQDsyvm{x"rx fkL/ (GCKTS09)ey9p F%>g3z{]_"O|ʽŐ#a7u޿|P!QOAWUk@J{߯픱}^"NgQu9<QOJq &w  c(0GAwNWMwQ?{W/7LS8'IAwNWM~}jQ?M~=mӂ}mԜ=mӕR|߹G_kھS{]ס}=nNgɊzۧ+nܣ6:q&w쩳TPJ~>'pb$&G5!ڤC s8-Ԁ{Â֩;#up!٩Ք$|1%D6b`8H'S -o tT61 %fQ@$V(lTKDBFܦ& !B2].[t\+@"0 W۠^X@t(J\$ܚ*$ód+h,Gky~b#VxIqɵb`rU$1`rUɽÿ&aky~bdݺCxI"G3jţW߄ϏybсOO_k# i Z p bHp@.PSag @!Ѭ ' u`ޱ! A6gLmсTZE~ɟ 0f|e <F=!1,bqBIڹ"dzK)65 pBrQ)(ABĘ ɰ6K+1o$AK؈5:P9 JD!+ H ;p. fYyA.W0ܽ<"_#E̓ *D RMB D{ u~#!Z ht XLА5ouEO Hj=&rm@d`rQm:>2pX vF%1>ro@KPDPUmrx"qMɆ3 n5P ͰvFTl1f|eUs^45vF%U_?wך=:[ڶk 6LYyG5+vM: ؑ\P 1];:c1"4˦!&pɕjCYivZ~tZ/ LOMvJ,ѱ!Kϳj "nѕw? ԡ4_#o/xlƽ_}~R4lo EMQC䮃7G(;v@9%n"w; #F^]~Tk4j $'rm@k@{FSf5hSjM3wk@?_W6'{ֻu AѢPP'Z0-R4lou_s٨{F%j3w lLd3@F pHC:s}LsC5EU' _rm@;ϴ+l7lYGG>nTZF.,~>Gޭ.daA l?t_Q@a'4v%ϟ(=-Jt3>Kd.L멵d7׺}t^{Ok>׺}t^{Ok>׺}t^{Ok>׺}t^{Ok>׺}t@T.;6}l/$9<0lec`cxjmλڊhz09)GOq'Mÿ^N~Sɨ0-R6A|$t۝u6SCOBN}O&ꛇ(\:Orx q㥿He=h0/R O|z{^.cmY|BETzGH7?LV}ځh [09(M{{Z_h ~z~97@ !qrxBj":xNUӼځ S09(Mn;KM~Fro@;n) uZƻZkj>;ohS3 PwȐ tlY i N ;;34rrq& .ě(`&, O/W@/81.&bxµ" *."TKNOO5[Ȉ1Jn'o"5HFˮY^]aqH7s97_.la3k9<45F"0-R{3E'g$ځ1գ09)'ce F%7k3z}e(iD%9<%8 gxTa;lDֲ8QXavY A$AFY5`IMiAB fbyʀ"G.8b;??8DͩwIBD^e+ .%Ń|#ЙSv|ZGtר}/P^C " CGtߡv0~Z !u|5@>`"| Ez B;?0XGQ J;s%- (!M(dL-0jhuY:VDc1DG~'0piBzPD(=G-vE$@PEpS.h&&2WcFQ.@KuUiN34T.ۏt6mR? c-n^|F%Zro@){]9oM'L}Z|E#M_P ck& z^tL鏫.) o3j^O؝JC=a$Sc,Jv`6$$(%2nSÁ..rmH3!̝ToareHs9</`ZF B0_xz)ۢyC$jy"qAu4AKj:QFvr@MsX[pg\֜2h8%u3H+@DTaT.QPw%8.OD}FZQ<{(k$eF]Qj=jp}R%6@dz.Y D!TL)mn60-RLqkF)F%^& 7?.7G3QrxlQm cxOy6BJ<4AQ`rQro@s6E3.MGy 6L432!ao\ujGy!@ZNS| wZY5ĽƫpyN}ڊǃ|f~(pށ|YvmsmtaQ9!{,όdξMJqښ=w?&8JqQ5ݮM 8QE -Yar(w96MBjf|eDwĽƫLR\eM_? 9<-8]ߌ3RV'ݽʖp{97L[_p;l=߸Wv~Dډ8JvcSǯ6~vJp=BH\AfW4>]W32M~g& A09(oA֯WRH@;c97BP%{Tvuҙ5D]:782SW~ɝv sz+&qz㾩+Ag樗}:382S̽l]픱m.Ed7MuJ567&+3ڋnJ}aLfsKJ}bށsy(+f]=NW4?_V5677{& n`ZloD^w?#?";ɽdRl ,C n`rSC5&j^w?" [h4 f3@L&h4 f3@L&h4 f3@L&h4 f3@L&h4 Io8T(~/R9?`3LZY T6p2SVMF%bz74PJށ(Ѵk&-1֪?ho$[ɵc@`rQho /_ N)A`okvm5{}7,D?AN_<)P iZ$5_%5PmSD?ANS9<c~ (;FWaf||ʫM7d`rQX?. ΋J,ށ|KP6 6d!9<E[htRY%);?3j \:>2w=]Ǝ.#z:N-ŅiuqEC-i#V18B,0`MPc ў;HXRu6A%4ys4r 2"QFonrO˶pX z//'7M8n{L_**D6p9$"jAcy G/V ܎.Aet(tP HUhԇ=ِ"^觷`~HloϸLaLbBOR+. >Ij9Fݟ$Mporo@/muiON1j>#Po(夛P-Z0-Rѱ!+Nh># ~wNzsHѰm=T._.7ƄKHa o6C8"4~b !6GALؠ;ȂĘA:ݞ ]` h+ NS ajjTNȇA >Ek$p2f>#)6.a~ 6'w?KU4 !L֕GR&Y` mm#Jz)3rJoc!]ڒqZt&Lp^@{J-;'wQD^A BlL.C T m g1TM}d"K*Ɖ3y.A2:~џƫ@6ꛇ)OH7T#0]Zrm@09(MnTZ_l[J5nz6!=t9< 5MNt96T@6~&1[gtJ"gz_Tn)-ou-?}-Z~>;g*(Q4U2!gdߑz[;OzsMo}b/sxDo{(_6N!$n]0PZ:8\W1gȗ(dH@9O,9-)5V$+OU`ꝱAK -Sׅt u_@)śK<3X9M`w2ՂQ$Ge׬ i0>R xH22?L*x5[@D)֤w|`hF%FG]n09(ɽw;emӓCcMv3>>ITv]ӓj3V$zLg{&Ct`rQ":z/vGQ!wNOe>>I~|)8Aj rx 3?$2;|^ĽƪNrȃ* |FT/>\:xBMlw4=OMÿ}]:)VC/ MuJ^9ie4=h>#%}ށFw<}^/NOKĸ.rx P8S rӧN:tӧN:tp$%FUHDVNj{'㱱j}'Oo@?-ۢ/qE;E"P#0GŽ#m{}ށ sF͟tr5'cYc&t96Z09(6ŧJ"e$}ވ sF [~GVH9<Wݬv7Ժ[P4DtiF<09)7v9Ƭ`JkG||7 7ԓ92x OժVjpОIɵ:`rSZc0_ABV0`rSZ#Z~@Fֳ7[' ijYcx@>Mqx8 &1K!SDVGϟ>|ϟ>|ϟ>|ϟ>|ϟ@d"e?DNy6V<0EE7Glfs.keL.YRɵZQ'\p DR_(p>>TH=[*;g6 oF=Grx 3Y8f|e}\{#C8zq;׾vy!ٶ]A*޽@y Qez@c^k>4;Wvy{lژ?r#Pܞ ;8ge bAxC 2Z"%D@ YۈAmO96T٫G`rT>M^u7wcX'8J^[_Q_8]ɽÿ+öߓrx z&ɵpcW.ƩKj=@sӦ C  lP4fEpp {o~E609*SׯNo{O:ZĽƨdSn#UW0G6VꣵM~g'׋& SP 6ҝEسx>ghsKQknU ,z&A3>2M~g&4@J-śP6V=d,AN6[G:53k!;cl1os͚ =vw?KjmT[a]g J _ZO|]PfFrm@hYQ38C`rV\"w;*Tn;rx髛;YE!u9<[сjC02GE~E/_c0X:&U0 ҙ7=AD29F],UH+F*vd-yNb 3E<9ZF-WjځpсQVgWWl6Jg97_= tk2_rxn-4L6gl;VGrm@ d`ZFn$u3LdhA_?w뺖@ l-tJMg_=S"_&-{gEq//ށ1զ9pi~#/%(}7%(ӈ 'Pi)qv޽n&Gd \0zM>C(Zn?Xc ''GUt\ex(ĉ%9fݺĝkW#FpF :t$PPf} PqoP3z1Ֆl qA!&ځM]{FPb5t;}X=Mw򂽯z~S_v@xC &{& cր T6 O{5JK GZ=BoC[eٴCjɽ$*|ͧ +7'w?Kju_T(q/mun=& OXJ?ho ;6O09*z Qco.j#ijzrm@,Dv8o)5]p=Z҃09(i2G=Q`44)) Od_UȂwh\̢L_/ B_/  7@ N1SSvn|ށz(Q~f#:WQRD`Kw< x⏻h(QB)Nȸ^y廛P!\Svj)EC_'ݤ`rQ!aD%g';)]R.Xo][Jkz)뛇)qz~R_|E#!rx h`Zb.|LwOO]w>#)&=ӓRBh_9"NtQDჸ8V],9QX}7|^ O۲'r=KT~z}{tQG˿l]QU?›h\`Rܡp@!XSƛ^I{'#=76`xz-Iv+D3U* * * * * * * * * @*SGc{ŕ"2Z =n}h~FDsMRjmrxl/dv96CB0-Rlӱ wl`rQ,Qt4E;c(':񕸺[}@Jy"wP6guZrxhkUcx@|?4ww/DP4o0}jGyɵ-`rQ#ݮw=o.F01z58A\>2Srxij|FSřT>#UTM  ԢdNO|Yv#i7ۏt"g^ř?#)Zf= F gTqP&@Ct<=Q e/q6O96VX| M襵 oԧ7~'_k+MJxIm}Fu^;huԶ^Jsw^SDv6>TZk>ӖչO}N (Xw{2p-OnH,\ 'RJn5@rn:tC= T wo$BV?U5aܠ :ϸd[[x0-Rբ\r~a`fOTv=o uk96Cf1EWٲ"09(qiv5RUE~g'9yac(rx }Z6S' J0*\~ѩc4RhX;Yce;6;il!$A`z5VX0>lnSh>J d7S@'U:lt)i*{k/Yɵv%6&>;f$RnvMw?#I OtBN{Ld` MZ4%:uXYS+Džr*ph+Sp*4^Q t*vP&'85p@aM)B.SN`%D d~ ! vt%.vm! _ ]4JC]. ʸ"zsl]cD7)`vF[?U*g؛[P]Cµm#k}^O|]fu0-RiQ3Y[d45EhgsE- ԣud#lk4^2'rx n=(cx'S~dtc{ֻ??cv.;59L$Js3Ƌq'P ;R5>_ۦYi*zS( cǢT]+,Y 2sZ1jO…kgA.U:T,~*=TC !Id]T鐰QTШ@5 ^4PPMi-Iw?* =9?`3L[_@ݬ63j揃JĞ=Q{觯({}`^MO;#CxIy['}1jfݨp@OPM>ߴǯ)*)O2c}OF{ǯ;##oC:)y[ɵk?Kj29䧱޾M >3{Q>>IUjEXس>2sg{ N\ދKP7]0/TJOl0Y$;V 5`;#*6vg#kٵ)[iD3mB`^8'LdShW+UE"_ȯW+E"_ʯSkxZ @;AQW8@;#WMt:[ZkuAm0-RKPw7PM&u Bn7PM&u Bn7PM&u Bn7PM&u Bn7PM-uR}lI|ށSՖl EM*0-R4loN{Q}ootI{(+_WZo?4^O|ǭ@Ś670yhhzq%^w? ԭSj.O7=pvKfӓ[(3b,!!boWrmEԛqF%m 2Ǧ8Bv]ɽ:ΣP8]^ӿt]Rv"oZFӓjcRQjP{fmM%Rzebk _ίWu:_ίWu:_ίWu:_ίWu:_ίS.i%Ũת7`zrm@;Ig]M7@`rQXܳ>7?. ͂$9<ߗlόDPjY@DkJu3>>KdA2Oyɵ #,uf|eL&0nC$g&  q ~' ԳDkJ3>>KdAՑ=& fzglw52 HRszv,,qzBO3LoxڟuG`rPYPb~=giR M4\ m~wE OxٔG0} tӺ3j2ZTE뚳INEu quYe Z!7Ƞ۠їdZ =閗 #d} bJm7ޮm(u qu]ve.xA:Zβ5k(zecx=6KLrJ6) 7atB_Irx Mv/>\;HUk]wǭ#7'lwZW7uGg'0-R"MWLλځMZ䧊 =Sc`rVOtށ}Sp^6uDҧrx %0-R>Ijd3j懭`rUb67$ǭ`rVO&_#(ChIt뽨=h># 'nmOW{O|Kb=}'׭ͅຢL5w$MRxJ&=R@{E#=Grx h T\mp>Ii.#wOcPl~09+PTNMw7P|EkTM*{'u0-R>Ii.#ځ9AD :lwAmSɽ$wOmuM*{'Kb1j x$I$yP)J67$]5~j-ZCkDIꛇ(+SiS9</؏_k"xo ~.vl`rQo6-Q&'ؼn8Mo ];OPJ4Ny멵4yz V8\ͬF%fv] kUk3ANQj3xM 5 7Gk)qw?+6mb09(7M胷b8V-G~C ; U0돰޵#ځ l`ZѦwqogtJ5Lz sMk}~RkR;OxDEv7 ϼ>K{IwSzk:# b6c&jGy!>DEuޤ}ځ "DkI DK6mb09(5g'] ն3xʿprx7i-[o ~4 ΀F%͔W6\g&A۱w?+kFfvFcp/4ɵTGH#\-Ak4#@^n֤wb}4o I& ǀ7[aG(`^w3_'8Ajʹy!M*)#*6g"䜛P!c Y._˯u]._˯u]._˯`(jXOqf||&OW<;2>#AK]~DteQ`(n9<4U3eόn=ӓL=O?WTtDvm>2F9<)fH TY$oWrm@ `rQ#m&gGU\]hu]._˯u]._˯u]E8 Dƥwvݺ]QzHi7I* VgQIr7+Fj<uR0/RL+ݽrg'L`ZѼ$λڊhgtB# C+P۔t@X@ G:NU,Ϩ&WR5uD})9c.P[Gj#wf=@[H`s/t@EH<#) 5CӈTZ ReF&1ImT\ +AA4vdBbŚ]A4 ri%kd{E#ޡR'& z)/Ǐԧ-IooJw4zj;O0&7?&Mό?3j!Jv%6Pz}o` gQש9<4mj@cho[_ ,;c{n-.M)A#.ר97@Lߑf =0 Yt) A `b~o7$'$q[EAȄwy@cT.A@sTw#p@bU,cF8@|*lל,ƌ TJ9K7pRTf L M ]lFi$YƊ'hKoGSTw`Ln8L6[2~g&BJ l17Pl~zF?9> ͢.i(ºO Qdu^lF%8ls "#)"rxno.B~rW.iU1ZE㪂gT<ײv>OgV֗u^J.OievzK<ײv>OgT"B֧P{j F oGQO|E>#)jf|e3YȢQJ~tԅ+=rgm+-ke랴 _[+׎k/7{p`~/rS#N/ 镖7eǣ`? }^9`}ٮ:ܨa~_=N^s]켸 'پ͚c(xG'{2}S#^Y{ٮ>^s\/|2jAd$A YYoc]/.=OzpTo\/X7r0vxh?L{/.=OzpOUp?M@xUpޔlan~_w)F/ l~ϏF>~U5[&'fȘ59?+!1AQaq P@0`?Jy w+|w+|w+|w+|w+|w+|w+|w+|w+|w+|w+|d`jIv$pþW\;p˾W\;pþW.\;pþW\;p˾W\;p˾W.\;pþW\;p˾W\;p˾W\;pþW\;p˾Tð7@G.#wN7Yx0M5߅r`m.%@@nSqp,d@9r}L-Ex0|p` htW_Bq@DSc˞ˀF#yTu8'tv@^ L'T#Beoё'<RJpt6q'qws Ttf:'̃μtvxKԱ0aA! 25VOe f[;j"wxRwfd! buN 4f(d~J E96 fGAT ǐx]^k35c{S^8=Ye]@hwAm?`t$6v(,rJ>#Ae&FZa LײǍEdM9M|n2fcw$]8c.0SCK6D@l=,.`݁uqτRa9p8OULDcƢ5#ſAD,/;cdw$]8̻LM L% ~A`b \:; cc | HbL3Ƣ싁lmp7ٳYk;f`Bntp+C1 Y줝 Bu=3ModW@)ؠA1@7J:DvC'/էQ>@Lj{+{zWy6ng=4ϟQ ͯ@ }7U 65cđ4d/HZ2e08|=ey)G R2r.Kr)ݍ;Y)gS^d!ѭ,xtC lnX6GQ9EP)wå8W<̇#`:j5$E"GͱK]v4i1~0/ $},<%؀) oy/0 D6 K Դ1V D-F.﫧@00Wb'`vGٟ6 (ӷ{}}BP h1HHH6 \w ͚ D,C +> a@B.%+;)@d{h\8%ٴH&؉[ 4K>00]A`ܮ3` | GL6:K]7x kDaPͦ2Cf?Rt^#(ty% tɦL b` @7~Wrխ@.A.e# ;p[Z%J-N~@Xw[OƲ%ceDEb'E@ [ /pdT Ѫ 'gE6X[扮"IMK Njh)Sz=eX%|@oc{6lBסv,3N䀆;xc;w؋!&C$.JmPEULomNz5j6HOu[p{$seA/@ T{&7#U]y9P{[ a0iPd1(2af (T Bʤ$>d fdŴG aiU,XMLDx$bHn=VR6w lPIO䟄Pܭ7Xp*d}69j3n |) "j AW|vbnǰqb5:\GWœИ7®֔寞wCɀ,5H"Y"h[|ax(i[~S F2+CO`ŠMJr: 0r'YA\@6䝒1x`fL{26@FZ3%'<My~ "'"W,˳g1{lY)N(; `Sr3w:0"*&V${IHGWap5xzVHel4F/i40%6u@ac3ΦB*N;6> IA`\h ]ȃ $p,wLSQ,@ sRB1 Cg!?tYHC* "L70IK9! BI1 u6(Bǀ7Ln?,gf-G00p)Y0j~Qlp>z ^NZGOi4CCЁF?(7CsgL igwНv?gV;z{.0@kg_uB3v"F  >T_!L?v&#hӪMGrXy}ȴ  p HJ"keCd=`x;L65c,H<p#@T Jl\${#KvԐ3)p${Ȓ@|]ADd =ذ"f&ȥh&l 'I3!sԑ[)p|@|݉3 @?ynw` c2cSib}{ζkYT"2?s*N cĀR\?!@pbu~e2Wb3˄w.nDQwsRPMj{@pv ؏EL,ǹ`vp| %_$G0z2"H:%J"HP&e 6  JНA2oHF•L?`)ٲUrbs|S J}XC:;v h `as90pCؘ݊;8Zp5sSX_Bvwl,++. :`r" aY- /_P}B /_P}B /_P}B /_P}B 2!k+1lTHE}'o } IsPF'c 4xtܯ ~3NAAOAIk&B܍ۀd>$0":+=:f!zԏEL!+(̡?CjNSO?TSO?TSO?TSOԢd$ag$- (c g`RA8 pR=!RvPv`DSpAD8oEϟv۷nݻv۷nݻv۷nݻv۷nݻvh:+A]ʠɔ$jëӆ CM4'f}V8 .v?+0ہR Ϟ#|=xV|Jhbh؀pv #,6Q]+D@GRpq4tw tq#QlE($ʂwX2?v6diD.03dB!z@" V?(LHd)2 (PIwj8NMhsͽlj ;LG`aHBw^a7OvJ6]էP }EB_#4kǚӇat7\3iT\eC2[$N ʜ4t,م5a7g'zF0$3˱@IBN BP!Fag dD "a)'k1ļ(PAhP`{f@b? a&8Ľ9D TPBL$hYI͉2{ ~pI16ր 4%1@a YN-9@`pUnQH S-Œfh Q"NjC,a^S\`X"C?n= {$XX2z LrY,r i}gCIҰPF)fą$;hat){4Ar8w h@ qa Y5aoĐ  y(@1d"Wqc20ڔ~dRNy[gFlB@;D@!7\z!;;ZuK߳[-sqԁSGŽpw-ba>lti+7Wώ:\^R'WnG͔+4b%bjS=)<VerF,Nou' W?.`wDs(qcQd 90CKم.@v`2"Ze6g5^''̘N! ^doPK"Ƈa/t/pC?3л;#`Ĝ(kr5?&gDb%.4to`J1&0H8!44yyP@j2xك`@ƒ.lRȌ-2+v-c)yEyp p@K"kx  ɆQKR)@7Pdvj9H-5~fS4 89a ~ewLǬ>g%p, (˼ӨS#x-ԁGl[!`7XP#_Sz`3KjFu |G%Lx;[`PHoh~"gf`oq; }@v #ܶTB ɑE1p% $QZl MV)iOK1fBH-.Av Ml?(& :n@E7Or+wht' [@k~#쮞p"f(BvV<(CՎPq$k$ `A+Q%d9%32_EK:j#ܺp!`R@Z),f]P~'S) ~?BOЧS) ~?E J;Ct%4Y/W` `sȑ{)tiv Mg}L64]1~5xQ],raͯ#?MLGX%fSk[Op=֤էP%:p}ٮDtρhu5A Ʌ% AP_jT${Q> <!.B4f#t./$%9IHo)+eyRX&J3 |>C(NP^pL"g@Ru$ME,UocFQ*hb)[f$br{9@RĚΑt66`*ؘ[)BWPsK1WJi")_B` 0 vQt>:JTa${[/ X A{t }Y {UD4 87'}}}}}}}}}wFŁgWO@؈!9v8-v[#ul4x05{+uL >3)r6du$c@* jEUL&9.$@).T#MykXہn^`u8.v]D(5DAKLu4йɑ0I "*V6c ͐%Ȍ_ E̬ c$&weOZ$cq5'9UBηp hnX=9Є! 0蠛X86cCK9/[n0)7"G1h C`h 0`AfˮF}?cPPcSmJB]@-ϸi+|i+|i+|i+|i+|i+|i+|i+|i(uO@Ďʒp 3ec F<n*,uó>BV3hO?rD^=Z G @7bmgރ;0C?!D=<t{n0{IhA1sԑ[)a[i 7%;7GY( J췴6Ob P)7!Lp@l(3v8a@ٲ"Kɳu}_6LYrI$ʒM>e"E/_H}"E/_H}"E/_H}"E/_H}"E tvo*mҎc;2LfFltw usWp>߫"TBOvu|p( t.=lG&MuP4UR<=IYa,x0!9H)mL0Y* 5w"-c/li2ir ɤJDӆ΢Fp!UǠ7N1 ѱ2njkJ/6F,(wl`r#Xq:ο[:@pktACg`QN#X@EBz =i1~i@#L8u Lz,raX$#";h:"Ev ^ҮKrFp!1Ț, vGD.ȃ@bp!v.bppPHyDj7T ?D͞D6b@ 2Eus[GxHe*ja|(ݠϝ[)S*֍zGz,rSQ? Dplb `Ȧ:@jgrFU FA@A*`]w lX`1$:阘8ȕ(=eCԈ䨙[fF`t щ+F rVUάD Y^AKFj+,@X &[B%,4 %u2[ը\ѡO41]]˺&l`uQX 89 !Gu#ȉ3&P* an0N *f)fleJS%:+=BC,6L0Y'nXR[ @1$eq'1CZz8WLc 5F09e. ]:+wDB bnar;4#4叨e`e@SRq@TbkceAiEBp9A丹ȗP@aIA<==2y"l`# 3d"'&Ah2d~9)@Lu2a1Q@ 2=\FRa;14v{b#P"BgOs>BZ0 nx6 (nPlFuJլA<Kko-`>C4#ԑ͔R Ap&h N"I4r3NP1v , ib)6\C@eh*)BtHs62Wm0dML`5L3@@)($iBnFGty :cj1A6d )0T+麁l&@1& L G[a-'0QNpN 5F趸ԾɁr:%pY4-3L;1bu y#.g&&8rBh{T"`8㪶Jh i܈!ndREg=+etrEr{bס8;{:X:dC#@-@)<4lQlͯ b" ΢pFF `6, !, -$X ݈ʠɔ wpdmH ˼>dC1sԑ[ M(0觼*a n1C&WliƐg#Ag&Fנj;ڨF%`s-2فֆT V:ˁ,Gbffp`u41k pO؊:)"T QwMfd3Yu 903|hvh(Sw=#]:~8Od  eρHSNjSZA!Hǀ7Q0Hcp(NHׁh6(Pɕw仹!H"[\˚jDsLuK]%qdֺ@O:$Rh&y$`ri= dAjn)0b):"I6p@: ]V6dOjk;0^Ʃ:rr@O iT0(p4#Fm.-Y2]h0'u-jbC!Lo 0bC-L(X,D㭻ԇ6| ?ǏI a8C- : }l!by_|/~?K}_|/~?K}_|/~?K}_|/~?K}_|/~?Hx!`) d-Iv6dLotD7_׺@;lFuJP~KϨ7Ϩ"rS xgaNh!ib !bK(PePdʝ,>rj-^H4BA8,OmgA|A*^(Hzb)d @@@XO6>rd'"ց Z ;}:dy;/n[ EQ Y8`c+|ޔv@vC F͜Nn?+v@i;Lmk +K֜= 67Ne&@tF;/n%!1Btp ](2 XsȑŞɮ,w 6 @6_ ݀2)keٳN&ɄPum& )ɺ#P?1 ] Y_&$& #&3uBı)E?+4#@](MR/'J#F  ڙlp!b(\xh`Jf"v3Yk香dr0:OZ?[)&Ak"fdSJ~;f3:87LuH/"ADsúp"( CY˛Y#) d;h,H:i$mٚ @l*>ԦM{@pl #18 Pl&4Q9E`E"$V98Y(6.&KOBFPCg \۪Ld StI)4I!&i[ PNb  !D̰.g&Zl hM!o&2 hfb#ܶNBs4u@ #nmAm@C*CSAv)U( I(j3SbL-48XU5$$]S\:"7C# i\0&-DSiWcKRFFI D?ca h20%6 ͦ:{B݀Od^5ib{mQa@& YMY@f?f)^fF*j Pf,dP<"$b$c<R ,q;.(^?؆%E8]"a!ZzDќ8htZdL jP`T@&~.vDYqJY%&PVL!\b\6d9@26XR gB&Pp&2Bƛ ĒcUI .X.\0 %Z\؎X "fmԄ."Їԏ ݫD0qp-}Ob$c< X@(7Mn:o-nHB;Qlx ZѰZv>I@= pv vuK;Q<_Jȯ/`E17@ "sԑ['`./@tJt_@ &ranw/.@"}T)נQѢ aɕĿHO@ %i@lD dh puBԙf<A:;IX#B4Z!Hг Nr%#gq'0mȠIQrF-XD^CdnnZ5H6p!9,t XwM=QC S)tH."Dh,Œ# !#e:U"a2HԸ%ơ4 r!7a7s70jvn! 9bs-bkgw ><kjEUL){Tp@" Z#Ca*v@D|fUBŅl^zׯ^zׯ^zׯ^zׯ^9$(\A=Q"$0*IoYװz ٲeq{(2@;Fq(tJ 0 uBz @n08f!i"?ekP*!MW1$X‡A8Fx -%FTd䭄)u|աeiP".A3 YD1ISZ.Ä :rboT65UP:.@E r$@ Y*ۦGO\K0nh)tɌ uc'i`H=xnT >|ϟ>|ϟ>|ϟ>|ϟ>|vP&rW{ .EPd֙j9HA5s%`y; z$VĚu:4A@pv@ègZ+2DE޿gNiעXz4$9cz&p!&v &xl,"ð! C&P_b,g&fRER%dM ^K°?aU @Ybv(227A!p${[2?BΗfX$Q[` `D AEOV)8! )p${vKrh?w nzo:fcN{+`D@ 6;Lt #LB(C&S$˨YɄ K(!e-MՕ"G}u4@30K @d1( LWy8s#wK5-14ƅ@g,R"ˑ?6 vBpmv LwH ^L|slΟwSF>+8w%w܀t[.`_dwcftע9;03`4w K>_aCbmOQ5Hг Q=H>D6(F$4}OeixE%>N +ѥom,j}83wqA ,%HƐ< 8We5 KCX0 q;tB\i&@dt(ӇX C#hIfhۋ G unH-{<4 @:6zs 1H=]#l~U&M1.D DP50Ҫ[ 8  Y9,eB> mYXL Z!DD|9(TV@ a숍 4Gn4CIp[24C0"[89| 41 pQ@fξ 6( eԆ78nSK:=AWת  s< 9:&9 Q\5QSۗê ocsd*ǧ[aƁfFBgh eߧu쬋T-&7 {.D 3@Btlk*}7M`b#5:4JdIwc,BL'S!"˨YɄnI(n]P $[ S#ے^d"A>(̆,hrX`Bj 3pdBj8\)A$ap,ҰJM,3 E4kh~-V1Z=q4] ̌ `!5FXJ̄aۈX @a0v#ʓ[^ߐG!fçB$ha0 >" 8eٳR,zuE?͐ # 7 M:APɔؠ</' $2'8O r(D H/ I @J:LWX|-`%썇#A`39d1Vy ``Ady)`-;w pESQp@Xv B40؀ G~ C P0Ppu n@ -0:bWuɆfg ` ;c$'P^7Jg X$؀Ѥa#;2 J}L-ru,EK3Ah`Jf*(Ӫ@A{}*.sܶS j{)&}T0Wy Pqny6<{셐(PɕbC,g4\!NaAv`\>I粼:K@e J&ARLuؠB%A>?xnOt.e1$IjlfECԙC:.#%.gig\IN60 K&.>PX\1.2h"[[ҋ,? ,l)]"ÄߛJYqnhH $$5$8 NS-1!@LJ &0d2I6M+``U`Й0?MVZpLe:s}:??%B:Ξ \A[,ù'UgϨ^s-Iߣ^{L`?FOЁ; ;0~C6ܶ^Z4Oo`@mg]j>zQ0B: _Rz&640?ǡ`YR;76Hٲ h..hCUkaq A3MKL:زe$HF!oҞH >$'gp.tnߒG&W&9'RΐGQ Mz@CNW=b졹_.x f$!c2y*D#Sc(IסH@`\ wh ^4Y__p} /_p} /_p} /_p} /_p%='hK=@,xtڭܻČJ#M85gEwJӰ az5=n ˬ' 26YlΧH@[=zdGy@֕-;H 66gՖ|O`x] ΧhLG(7Urck)Lj$(F,?]ܶN`{1 1ҏzIr6p!J65%2 Zf `џ!Gz52Cjt`rBv RĒ, "ʠMYw-~`{1Tch$lT)0%`J=fl@nl )]`t `p${2_+@; <Uy\r'\r'\r'\r'\r'\r'\r'\r'\rtHMm)f;MVYV9B~RGo٭uP p%#?/RP =K˃n4}=%"CHfևdU$q? B0B}8Rdh"ex*! |(7Z|,MwEl$Nw+H =Ȫ8`zFP!C9 Nq/.g$tw Ǡ Ӟ(bb,Y5#IbCʠSSQRG@v";E99Zi N C\L3OF(Bbc@,xCՎL)L܌a0V-4] [1À?񟖉HYQV90QE cnݿbھ]aPb_ kOFnH=~RSND}rYH~䎣c 4˷@DtuALH@#_p\ S@v`/|ѹ)F͜LsbS@Cؚ`gw 0lǀ7PGrR,raHC݇В(P3`r/"op:`2({v ɹ)F΀#)Dt v@Dz$dyv, 4;0PnM#(rO6NHRGaDcM`^ ɔʭ:[*p=[ %:e5魎`Nq@:\QX PnYM+ n@w)ndPٟQ \`L"qt 5]!7\敌'UDy?!w)L(v ;#U3d(I,03CH2CKY#;E[d2,@ @z M56U !1z TtDt6yF(B=$Ǡ7Nhs8I4nX DSUL9hl"t^c`4i9C<͐MB`L\#hf˄rc enymd )0T$c 1`r,7G# Z t iP#f)s o@h"'QPCF @3`1ba>@uE5c > (E|=IN!P =XQ$l@/L\fL >Fa磸X`hxpƢɄdȢB@.h Z%ͬǀޱynmȰi@#QMG/rF͕BBbA m lP&zI@n33F᥊P,YL%1 @PdUͬŔvbh>GkOpa#]]lkc"mD.XO(wMèx1eD#wqԀGk-,@iQ$޵>Ra a[W<p4rC?rvЍԿOxYeѽAMQ2v+CK`uy̺ sp ] gaob!c Rm  /`[`%) 岙I"H{ 4/ SL9Ni Ltb$c C݊bl@n4, ib4q;)B\TPePd WGW[̏`P@dn;%PVxHb 2e7pY#BL-o%F ]nDDz=e2,vr*n` `$n؂I2a@UB>C-v C6D$Cu 90Ls҆@_C`%) 2N]ܶ[2S/kAkB7twDw.e;W`ԥ:}]k9۸P.FX3ıIɅԉ0 hQmn$ef_#@1X;3YYNLPf @ #$E 6ub&XcD IY\'R:EGG?+Y"\XB{/n0N.{&IT9P?=Ŀepv"@hyǜyy .O? .On\@g@@dƦ`:$s'tbMA*;MH mD=ء 41v a 6xŁnf,X$.EA*Yrmd,snf,X$.EA*uHn8VF9܀/z=do#Xq4 t?Ѐ3sEg;Pi$<e= Rf맫 kT4,tF!BFVT@!,؀qO@@vD者^ ^B!OV Bg08ֈ!tJXa `_`#V ;B#\q؋ 0pd<]]2 I"Mb3= џTwR $4%7&OLҖAq) Lh:r5B0<t15:R^f@BL D ;YLÑ"p\:#84C!2p&2ӂ"(Pɕ.g&Tca 7 p${jgA{l-t*u"v8=4i`fg {1f PXlӢdI`2%Lp Oʍw$Op"`^D4hg!Bĕc^7F% 2sCgIՊl p;EsKA`"M$ r(/nR7YT|`K0 ߳1 LsNp #q&arwۭ== Ad!b&@;22ehdd\$hY-z &an8F.l͜R 0iSqPFmʐCI{)'BЀn: ; 5MM!Ψ # fg%,)6`psYvrw*ƫhkY=m_abb!ӕ/5[qY)X4>KNbE،j減Ȭ˚"z[,? !Ü:O^d!ѭacCcp(@q5@xE"(Pɔ;Y+ 2#Eb@#ScLe J'P!^ML_} i3,RH(pQ SNy=e0V:Kh MhćCZlp!ؠvXװYރȍOdcEN BjA$9agh`X-; es)0z V}AI%V5TԪ0G1I͜kxIRr,l9agh`X-; Yb0zӚoG"` .ktlk(`jU˱Gfc 4MMl_`"A-B\gnG#tRVys> \ZJ H q3YfB6pt: ~l HMEYf7ͮ(Ʃ(g "?ߢ fh^l!0t[e"2rZkuͮj65Iuku?] -ӳ.3 Jؑd0kT(7X#!lY\,v^g8L;: RC'%pt;mpmR`cT_1 L+K`*V( v jnG"ix64;:!1it@V!8ի%py7õe lFJKͮzicTsTOYn.3`7X:`)+>|L:M!U[2j Y jg gC .W&Dg",] UZcTh`"@AyP01@!02B4P "#$3`0>KX-b䵋.KX-b䵋.KX-b䵋.KX-b䵋)ښ6O3Z&{f6pnXV U`X*V U`X*V U`X*V U`X*V Qpg(?G蛻?? H0r&b1b#U^G>WFV%i+IZJV%i+IZJV%i+IZJV%i+IZJV`4CTd݃k(G=?B蛻?>ϋQ7w~V<0K-@SFbҕByI傼䙡*+_8x+ zB hbME!'$4Um ~w~Ĥ'.fC(ILm&1&/d&)7H1|AA& Mi1~rY,GfFZ<QA=E4L4N/WG R$qS q<EV4EYing)gc%/0$O`f)! FZ{X|ak/`F:depε̞&E2Zi"HNi^Fd{; fl<~X'hD1>mZCkCAbMvk[/aI f{F2$alL}P!`< O(G!>dq B!&aZBjxij~Hwx={` eT7`r -qc؈Oo~&"|¥sjW1\ƥsjW1\ƥs *W0\¥sjW1RUMR K[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[O\o`UpfyfyfyfyfyfyfyfyfyfyfyfyfyfyfyfOQ`T?OVե,5iaK PENDE?oEm[iEm[iEm[iEm[iEm[iEm[iEm[iEm[iEmOO#w=F8S^ocq=N#Mg(?G蛻?? y8z1h`º(uOLjYBi*ڜ4JDu9 QKRQ]GE I9ѱ?~"`lݟ?Ro$LfS;cl؉le+&xٸf.0h$iz WMےW pD JdQ4G$D45kbP2Ī#xj)`=30hOXTG y*jq:qh▬`" o/Gao HF<ݙYyYTGEF$y,LOq"B;ckZvbK7Qr1q7#(٢,S3Fn z0ގ1G6C3c'3[óWIwf.,~"0iVɚ1FplՖh@cb/X9dn >/Gao?D]ɪG:Ѿ;\W2̮es+\W2̮es)gn<_ZbRğ&]WSlӘڮgO R1ymwdfKy\2F_CPHI68KVŞRo$mɾ1I/7wŒsE^P̩'\ #Ԍ\h(bjޘXF@i#mTA N ń4oo/F&-|6qk y `0C\*rsTv LL 4F,vHFKWf$ LocQ"x<٨q"?4ocԓy1Ba&OïK,KL%,eɟ//F|\0_C|X6ͅsa\W6ͅsaP F|\Y&O~#MuσҪjq=AZj +PVAZj +PVyH^ޱ8%KP-BZ j%KP-BZ jrs3us\W:ήus\W:ήus\W:ήus\W:ήus\y9 !1AQa"02@`q BP#Rbr ?0mle,f6Y͖le,f6Y͖m.U.B!B!B!B!B;lЛnUߡueͩWVdo, 7|rQ5mWx05xҙaW]n,uU>Mš$֋Ӳ֧RcRV0UZ|FfQMe$\| r|[*au|kY)U6räR aDo->-U?nٲZyb:JKf(C9CPr?s?~xҞT̽|Dk/Bw$<ޣux2uL;~΁t{vS1;4;ǡǀ1cl{w1غlE-hFZ2іe-hFZ2іd]*ĉ$H"D$H"D$H"D$H"D${l. CԇR=Hz!Cԁ=H!wL!B!B!B!B!vΪ.l{Ex!QgF)],i,UWԳ+J֞il$,]C_&9_܎UG6WS{tkc7G=Kc;p]ze]Ԡůqi\ݸ!,7*7%j/wUBzReGBQJ*]vXdMMB*כU{b8S\'uKzwA+yFC1}vN6 3"C9hFZ3;upc,ǹh|KGĴ|KGĴ|KGĴ|KGĴ|KGĴ|KGĴ|KGĴ|KGě}΍ĸq%K."\Dq%K."\HD2ZO^ZO^ZO^ZO^壭|f"D$H"D$H"D$H"D$H"Dلo;l mLjSZ׺Qz%j\%u| DkSwĚR:VmU܆q$!]|&=Zr"š+5QV1UX1:Ji\r^T6% ~$H+ sFֵ.dyEenk([ kժuS;(o¸/74]v$;LMi4fͿ)ƒ:ڨ 31c1c1c1c0w3gc͚ h`Չ9MV+\"W!U=*c*9]Y0sf0'NYHSE۷EkWyyWyyWyyWy 310Yx&c ԛxaJ1sI3!7y_"Q3BXȑ$d-<ѥV(jFv4Ѝjs14|/'hU3. Q90^A#1Ġ”[/{'}S#i<~ݗt;N44gewwtҞzG+՛b@ÍC[Lo=9$m[$yKBZri'23IxO E(q2CLuƴ8JO"dW٧d2@)Q)0-|HL";ȭ}bϒ/B??=:H/{'x'kc8H/_1yfhK y}GR MjjjjjڪjjPWSw|.8㋎.8㋎.8㋎.8㋎.8㋎.8㋎.8㋎.8㋎.8Od?ͱVgS[5S[5S[5S[5S[5S[5S[5S[4^RRh}````````````````````````````````````3,6%'t_ ~Qd/~J{Iu^OGbX`Hx@0<+%DZQa0Hub91 B hjkD)HɆo)=ʯɛtQ Xe~Tc rfv%3WlA[ms,RHIj܄j1-r[pSU(JrhK"(ZYgǺ:PL<4mvD1Raҁ#d 6g^ť3ٺrЇ1'6@+zSOS_59<[D,ÜB0xĴGf,\| q.>\| q.>\| q.>\| 8 mfe#5GMr:k\t#5GMr:k\t1ʶ啾573u-\Aj WZ cяwo5Re[e[e[e[e[e[e[e[e[e[e[e[e[e[e[e[e[dؕi?fxa߿.+xK? O?i=N _THL85]G8w)4)Ԉjw,7O5U:u DjёHd -:SOS_C4#(߳-2,(i1! K,[H*3ԋ$djҪY3f _TvR (OfxJ㕱hertpƬ8 v,>m`ThW!3uu<ĜY1Lsץ=:0B g^PFY¬)%x(^君 OJ$QxH^o$! re:_RZ0)HQ? ⴷp8ՉNYgWY]euWY]euWY]euWY]et0IJiz}GɎ&x^(Wx^0Wx^0Wx^(W@PԳLXm]٪٪٪٪٪٪٪٪٪٪٪٪٪٪٪٪٪Vݾ_YǪҖWW'=UOUrz\W'=UOUrz\Pյ,[ӽ ퟐrBBW! !\+rBBW! !\+rBBW! !\+rBBW! !\+rB|3~3-Jo.+Aq? OOi=N _T+!8Bhm-/dpT?}~ZϪ{0L}*J}%@>PrT%@>PLZ MfP-1lS3B8__z 0c؇F6'wq\muMAD|{o_FTaQFTaQFTaQFTaQIUIUIUIUIUIUIUIUIUIUIUIUIUIUIUIUIUIUIUIUIUIUIUIUIUIUIUIUIUIUIUIUIUIUIUI]pN 8'pN 8'pN 8'pN 8';妭}o__I$& UI3B$>&#SIJgjʥ?g`dک;{'m5άXګH7n~&<ja=؛ Gce6޹6 mhA|{#bxc2ک;>IW¾"p.1P jPhڔ͠ 37|QOM`0~ګp|X4lrCբ H TRH TRH TRH TRH TRH TRH TRH TRH TR Uo&>D|;v@R aRY"QiL.Mt 0 ?OL H٬P#߇nO*(Zڲ+Q0Ïm8$@0ڲ+DgD{o-oPkcz2ڬ3\AS] .i3&~b<^*y.XKC~~炤# TϢ~};ߟEN*wS>8};1l;&>D|j    lcK   xAAAAAAAAdocker-1.10.3/docs/userguide/storagedriver/images/saving-space.jpg000066400000000000000000001572611267010174400252140ustar00rootroot00000000000000JFIF``pExifMM*1 V`QQQGreenshotC      C  M" }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?fU$XZ4 js,5zo>E|+{}}YOÏ/L(4b[!ixd(>WϿ4Xe [#¨  ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (9G|Y`kbBQ7Ww h%CM 2yW?ן!oSQ/CЭ|/34vke3Oaa7G,îo*hG}ٵ '00Oaa7XQʃϻ6dwT : &9P{Yf,îo*Y>#]UbG*k>?uMQ '00Z(Agݛ_|GC ?dwUE=kOaa7G,îo*hvm : &?uMV-rͯY>#]U|GC ŢT}ٵ '00Oaa7XQʃϻ6dwT : &9P{Yf,îo*Y>#]UbG*k>?uMQ '00Z(Agݛ_|GC ?dwUE=kOaa7G,îo*hvm : &?uMV-rͯY>#]U|GC ŢT}ٵ '00Oaa7XQʃϻ6dwU'm> ._߬/`c7/H7t }Xis\YWpI<ƚmYOw%WgHmk+c(?s?Ck^_2~QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEs??ן!oSQ/CЭ쟍G|Y`k)( V_U &~qW~_ 4!Ե4-]NE򮵙UA 6lFڥq^g[úu&XV]Ics8#co9t? *sL_g~'wz׆|#qȼQYIIH=b$#ܠr>ST|#5ƿ~q_q؋@pQ,C;Iـ$3~^*]j?z;Zimk vAG.pY'uۻRw*H1qdpЖyEsڲUz+O{id|A׮Eỏ?K`ޱ,1N_rezbe;?|7ܖzOIT~Ǯ^̳\K@Z #\M|A[ik-͝ǖ}FT0![(0Vll}R[x2Ɨ+Z].%iwq$rmxHhUS!9ʵk]0\7h.^;{V^x|p !w,٩~ʖ 6G\КTe;[qč6TfvU5&kgPfqP j1JP"žAƅp1%6l;mۤlO]69^t '!7:_67?i{ TRLa##vq^w].MK]C̀Ӵp fG wFbp@uO7(:m>kQTbQEQEQEQEQEQEQEQEz&ԚCTӣd#oV9g%< ?e? ^^; [umj0-Hv es5ˠMq/ kZ֟q4cXdo~1j>;[_]QM1hV$w^|M|IB9{)qEmµ' hkOũOZ+k?]׿_7G+_лo&d}b[_|IB?Z{4s SkOu ğ./gV+_лo&V$w^|M=?ŢZ{4µ' hA- ğ./?]׿_7G2e>1hV$w^|M|IB9{)qEmµ' hkOũOZ+k?]׿_7G+_лo&d}b[_|IB?Z{4s SkOu ğ./gV+_лo&V$w^|M=?ŢZ{4µ' hA- ğ./?]׿_7G2e>1k"{D{Oۚ?]׿_7_ra8qWa%y|4{7NK1JƯ׺WP~5ֽҾdP(((((_?| My%#Go%,1';13+ L_bx?:lOGΥ(13+ L_bx?:lOGΥ(13+ L_bx?:lOGΥ(13+ L_bx?:lOGΥ(13+ L_bx?:lOGΥ(13+ L_bx?:lOGΥ(13+ L_bx?:lOGΥ(13+ L_bx?:lOGΥ(13+ L_bx?:lOGΥ(13+ L_b'/|Q6%U j :+7/=7\nޗ[Gwk0RlN@# iP3H? yw-o}sNg8Mu6vZG p;ױ|h;^臯tKVahlZ<дNvROa3E>ۤn˄+񍋎^:|)ޟg#o? /uMx& &5~"1H*@~eu#Z|?YD5lClE{l]޸A'^z֏s؋K{_cm?l7ס񅜚>qЊ}(c9`c5o΅+X񅝾 E>۟)I @u.\M(/ +}QxL񥏇n\yiM^g\GG/y~_t/ z,pLl.)MM&~V89~L|Ԍ⽬ykmGs/O.#OkxVx \x}KG{)[&f?&>@8'/'+o;?VM}_ioleP~5ֽҼ/;36QEQEQEQExqW+~+YM6y3pTn|c|ÿ஌-V76 x$ڶj>|>C]GXg<=' w|OW-dz|>C]Ab?_OP"x'(zO?_QK??,WG3 w|OQ;EI'+?iwH?~>C??E~BGM. \_ׯx'(zO?G<='Z(ɥ+#;EI'(| E4r$?^?EOP"!h&wXg<=' w|OW-dz|>C]Ab?_OP"x'(zO?_QK??,WG3 w|OQ;EI'+?iwH?~>C??E~BGM. \_ׯx'(zO?G<='Z(ɥ+#;EI'(| E4r$?^?EOP"!h&wXg<=' w|OW-dz|7Mc[ii9XXr E~+QjǍ>x|A_tjE%fu} ɣ|5nD%zuyY&ŻZs??ן!]7M5_g:}.Xd2/H2]⾊$wş_5zN|=ZF?ܥ]56)p+J?^>3}mӿOS*5߆uEWO䛫my)Fr'W?o [~$:CK𕯉cƭ4{ķ}{kt3N3:(?P]{GR|A4_z|/-V(kt3ZK2]NC]:$M%E?Qe iz.M7uV%Esf>TpE,mC;+>2xKԿ*>oû˽cV![iak`n mF\"ڛqwYz|KOkx xW>ͥv^X,pʒ&A2XM~Q_|)0x<7oH wie%̶P;-wK6Zz_~QI+ tmbS wHtxs,e0[Q_|j5a,<3kW.ך Z4_qZJ'd/pl?g/'υo<[MVx;]]WM$8yn7#>WF$ /?Γ/5rTu(-TlX-xBK[:lj>45rINqT>8K?'IPƚ÷p"\Ie[aW^ƞ"5}>9Ğ0"9v*1Ϙ'fznpil#_JƯ׺WRG?O.<\!gxfV1}?,gcnF[= z>g GVgAe>g GQ#?>7GQTe>g GQ#?>7GQTe>g GQ#?>7GQTe>g GQ#?>7GQT~ݟ&G O!]g]TG'^.<\d<[jsa- 裓WοSۋ?<77$]*fsg]bt] (NZo/ѝ\M6z+;jdG.VA؞2_I~?q4 [k}wZMQݽk#*"wwF ,Dy#UQY=uiWZk^>!MUl[ ?(<цc{dʎ_)AN uVw߭xLԺ?_ '^^\FSAFUV.۰WO7^Cºw"Gk["1%VfVX ?7lƺ-?6I^궷KMOEs򲟡W^8n|7<77tCD:G C$,|UG\tout=ŋxvML:Hʑ< -cd`~{>kx⾘|{Ś[=-VaQi~K*pc1\o߂> Xues]Go$%`v˅@=^qf_KhSh.L~"߲*{E PW﵋NVKKHe2J2[.IϮ]_{慄/ ( ( ( ( ( ( ( ( ( ( @[Ioq;yhO_V|\+[㧂.,4 ?ңcmͬQ:9iWݹ+9i[X|7oo>Ni>_M/PBVǡE3[sFy95⿴/'oiYWmm\1TB@%B@8:QUpcVNۯ旕:N?sWФz8z<[_Bs__=~>?Ѿطc:,FkbݏJ$wş_5jF|=\nu]?Soٜf¹V#0dg5Ə#,5zX4/WƙZWb-n,>F]QI`T7jҏ͌KwҾ'Zλf+ BتG,d xo&nk?|Qu`.mBf+i7)1?DQY'Ϻo;^VIj4,u]Sĺq>v5]aAIn·`o ᦟ{_uCEZO??K __'kk>%Ҽ[u^\OpŞA!Pq,Rk~xg>~"xA0[[7zw! L;s3;s v'ܨAcyW^/A :L:@pwK\1>`gv &uῌZ.fh'񖟣6;hZsut-/fb #  `K \.--, MK77Q#3,y@s ? >(Foi%u[=HDKKXc,amEU*(BGY|;~A\%/Wusi2aaipĴNeكReOɤ+ƽ%վ *U#EK@1^EQEQEQEQEQEQEQEQEQEQEQEQE]:&GigTH <]p9¼N Fxu( kbī/60 ;i O׵̾_S/if_Ix\_(*hآ=߳$d97>/6CHzr5gǸH횧j_<-^xXZ'L݆b%'<8;(Ծ/E >>ZyɼJV+ho/evG'<k/s/]|yGPk<"#6Ǹ065_|f_ me6mio$Z<(wnŋmgI}By37f4˫nqh'^<<f Ԣ7o|#\Q;}xbKDQY|HbS`!Bd5|K?O>"~yAn?ۿ9]!x>g[?&]J>0%:{vf].2|1O|Uo|Qri~DK7*^hNŶn` 'j?,5|+k_3N/}<>@af*9Q|G>pXJ|m|ٷ9G+_ q>r] ǿC5MI`kmxм`&-!|xǟh}ѵŮy{s^~/Ziz<|1ywsl_Q_mhAV݁#fZP|N+PYſ @E_vEu|dDi>x.w6c۷sOOC< 01m;IO/ݻPx5f֒Iy~$2p'Ք-0ij?wRx%E )0nQJe8`WfA'HFek7_$yj'o5/x:o^=j+_xHo2FYHPi'+>hyϧX/1y3k%fm۞sP:o i3Zu Y|%~x޿\iϯA29tuEXR_0]I,ݞաq㟊?K/!X|ZDG|ҩenۖjyZ:7ݮo\]ȹdCU5xKRa_6I|0;ϲ_sG-h^34?au/ [o0y{āmA9#SԾ/=..q}f%)_(A߼$`m뚵]|TZm|9qOv 2X)߁4Y_osG>Oéߌ!j̒j-ۣoגV=ۀtȫ׾:<dO$BAUF$6# .qVӮ~)M-F^CnMx%CyF0 \sSI~/x[Uks-ĦH1^,/|iϢ_\ѼwyTjGVL L xxT;fcAt˯,ڴ_7dcrs*K^-y&K&)X]SG[-66{bYc/n-POV\Ivlނ>_.h4u Wǿº[Yh [ZfI&7'"=+ۀ3Q+k*%m淅ln./rNvoAÑԗ^-ݯ[wk4sc/p/+^ 3G9WWfO}=_OyD-h_7_9WWfsį x7?v{'v9o3G>ݯ[͏eo8sį x7?矉_o\?Nrf}=_~ˇq矉_o\??5 5?{'v9o3G6??=;ɿ?5 4Þ~%kqk;?Nrfle{.wÞ~%kqhݯ[wk4sc/p/+^ 3G9WWfO}=_OyD-h_7_9WWfsį x7?v{'v9o3G>ݯ[͏eo8sį x7?矉_o\?Nrf}=_~ˇq矉_o\??5 5?{'v9o3G6??=;ɿ?5 4Þ~%kqk;?Nrfle{.wÞ~%kqhݯ[wk4sc/p/+^ 3_^~ΟuO>scuY3%3C<U'+}=_S~I];&[<~ zˌxOKK8)7;=ﶝ=C,FkbݏJسMv?!+ӫ>~4g?CgJu'_tREf֒!Xg$29z3GD=|׫N-J}#IZHI͹F `u{VTs?ۧ~)eŰ~ޟaj/V>Q v^i2华7A*@΃֨wů &sykz~:%[UY{\/OQj֟~ x_ƗZqKC%٤lNT1'@Es?>4xGIWBDKs@F]FI+ֿlE'^-3Y:FQsbG*#cd EylмxE5xfku aE #s-YmĭCibSsqMuK+|!_J+x⇈<E<]8m5$itbERWp5jZ\^^O m4J#xI'5'Ꮞ43^ҼQj6Tn $H`Xq((((((((((((D/6.5/H U0yI?ߑs{]x5jfd[Q|'TL?n*--qV؛{dWj {x_w%WgHmk+3((((۳D_.iY{_w5"o}y,=#qj| E5$Y889|\mխldne>\BI8@I82󯨭+C:ٲhP [FBI[wTG޻QNxp{}Vܯ?Z+gkV,x,g(Cݴyklucv9d,F*#J$39^Im[R +Ᾱ-5d+"*2s\q\`N23B{(J6r[ꅢ)QEQEQEQEQEQEQEPQω[hxIlvq%x%r>}s+بM9%$:oupbk{i5 CPhO2Q;8N-wmCRm>Ǥ,׬-^8wI(S! $Rbo_PO/}ZkOMxdSS>Fqz&9ѩyťA ~jN_M0^~i\%n{,FkbݏJسMv?!+ӫONgGD=|׫zVnu2]gJZIڴ6 _J|h;^臯WS4o'o]6cTZQ[|pA I1եGON}W<5/~ÿ%Ğ)<iۓ=ԖS,JsPI|FG foكWMNK.B+5 vç!*3g1Қ !%Q>|g_ڟZo|JUcYe5ú]mX:(KQp٨E- s?`z ?^5$:;cI淎OR-4 ʬI0ŷCD{tmF9cI#n˸€?*?cυz>$~ǚC-&M Kc/k[Jڜ(%k}FY$ȫ$SG 5 A?yYSzfo«K> k\O;y, ܈ RN6g\œtSc^E5O@-=koC]h_ }Zek7M{6=03Ho *ʨw5 O|'GiOx[OZjW:rۙk›H%1Gu0FMWq'D0GncErN`dُe&~~?u{ưkZ?w鶭}k+H/[%Awl5ҵKeƙnZF1y~D˺ع'85OTmRPEPEPEPEPEPEPEPEPEPEPEPEP|o5oCkXt XK{mJ!).`m[l4ϵqo?)0&m36P (&#eMO.F>UZOvG)CC;36;G^Q׿?ho(bUU;G^Q׿?ho(bUU;G^Q׿?ho(bUU;G^Q׿?ho(bUU_k ׄ io]ت_qȶ$ ycU=j)읨|%ŏ0}*kNV#W'"o}y,=JS<|*N~hË\Td[h?A t4.M=7T,5;ũbk2yn* ` Z}hF1冝{T*U+k->HjǍj&}Kuk8^ly\u7,`Ggopz>[lL̛JO _&Pߍeiյ {]3U $y3A T_27$`ѴRXe|fom=o=F7i-FFfkx^ь[C}cgi]=.VHbYb2* S|/Ek/ '5ˍi$tCp$V2ʼnRG qtz8{6JU|a'- Z[ǧiQ}ʍ޼S"E<kFAC<''=V6ٛ1n~i|b3+P 8I֟eS0s#릻y +s ( ( ( ( ( ( (1v6Fq~V~'Rơi֟~Y:Fe++X3?*(TJ-Ce-_[4y^~|}>o k +n?vX9*zvmxĖ>| 6:F!r!J@,|@zQs޲xtw%n^ZExon|}ɴ/o-\%Qc$`*mU->0CZ1:u$h(gVVH _;ъQM8pHb]տ^MO>NhuV(5;o&ԥ爖r%Keȯ5]/]Y Z_]I,rk)uV;s^pP4.kZ5Ғd[A ~jN_M0\y~g_b4o-脯N1?Ѿطc:t4~4g?CƭI+m~,D$}PPFr3w~4g?C׋&q^#j9o ΰba!Uӕc)An]w}"Җ][K⿈-F0be.ܐZ#|Y >g7cߟ7%v6vm;qOy*=}n9g#\.jm%k.}Ж9n^pHh_ٳO_6 gA=lΛG,;E+"Ex>6i!I|W='IԻ>b77;@hKJ_)qf5ƜJ(P v d`Oz9a_O+tWUI,Of1hrw ۀ֙קˍ xC[^%\7.Mڲ# E;2@=o_hf?_t>O|H-)![\4ܹ3898 8_n"I㥼 m=q[ceQZ~z}tUgĆ-uFIsP;p.zQ{JȾ^ z~'Kş!G1O0x]tp*{=CxǑifmn_!T/;_v_?|'_O+ôGG⏈ͫCsM;-8?!^St٧Դugĉ-IaIEDb,TG);N=({JȺEx654Y"84H1e*XK(; mq֬,隴'SiQE0;yBcltExM񖡮/}%Ƥ+%"{Hڅw:U='k L~,,:4()*BwGp~4rÿ=o]z:^ {5[şO'+u+HPw:j-6OǡY [GzxfBt ֗^&W`š܃n Z9a>zWjOZhڞxCiqak"O3v|vQkeƆ<[;ahy$K8 tajuT< wTq?._u [A{|EVhԣG?{#EP|/տ?_5o&J>wTq?._u [A{|EVhԣG?{#EP|/տ?_5o&J>wTq?._u [A{|EVhԣG?{#EP|/տ?_5o&J>wTq?._u [A{|EVhԣG?{#EP|/տ?_5o&J>wTq?._u [A{|EVhԣG?{#EP|/տ?_5o&J>wTq?._u [A{|EVhԣG?{#EP|/տ?_5o&J>wTq?._u [A{|EVhԣG?{#EP|/տ?_5o&J>wTq?._u [A{|EVhԣG?{#EP|/տ?_5o&J>wTq?. &/?+P|/տ ^aM-2<'q,{W&;N>X=!>ګVZ?O#?Ѿطc:,FkbݏJ9G|Y`kfMվ[2j릟'@Xk4g?Cί&Rݵ e-KDx̛loBҞ^ZߓJx"ݾ&K$IFf*C>Hٿpy?[:I>9-n:j êʀɯO_MbÐX.үuer#˷N׹ޓ*Fb4Xm;mR%K$01>p`>GċxHn%A>l^bXSvTn^E/i"N KmDWaHRV*D +66Aʒ0:/2Wֻ:}@gq nnwsڽz=:Vg߲{kq >&eZ%]V$7Y͛0600Oٳg''>hv1f(*W`ݴg n5Q$?ӽ۫<@Ckv:[o,ZO%ޭD#l.ܐF#?Y_϶GV/oq6w9^EB+Zߋ3ȵ1;I__J3KkĒ_dwImmP1޴_l=|I Ǐfٌcm8ze{I괯{vm=Om=][G[GCu+yEۀNFi~me|Oi&[# |'3m{WQGKwKmkD,Wҡx^Vg,L$3.v€0zօ :p HMF1媩VUKQ$?۫Sɴ?M[Zf*j[$w:L&B"dLaI<K{oi'W$jo-+?n %W(v__:ڷ4=~%|O}-eWb[C0s֯^̟l厶< VfܶA|͹|0f3^EC.ݺu"\G%ʭڜmiiľP*cݔ8*3m[T[W5&mV&ؒ!a 8l1^EAJ]}~g~m{-?K__ikXؓnVpʛ~PՍcXm[[үċ5 H 3$K<#9 j(R۫*]j5ņL,!K9^8^EAZ]}wy >Gįku%U^NTnTnNjMsSmjI~#|K]6ΖڤJ2I`b;| z{Y t~/+٧q Q * wG^~m蚽|J|ڬ \<\>HfH?)#רHUۿW~% G=d΢_l 87;9N׿d>$|MʴKWk]V$7Y͛0600(v_ff߉ON<|47)c]?/vќן{g#xMS6]jbMoV{{a &%N}F9}^;X͂Vriby5#cxe[pVڪn2Fw]_fP~5ֽҼ/;36QEQEQEQEx"o}y]?𾹨h77C!\98* A(m=g߱g7_BWWşh [ ^_:~s??ן!qϦXKgkyMm%-r*Op5GƏ#,5z? / 勞q}1p|gmc]gNT'-^oM)ˋWs+>pFxI|Q9O[ԣ׾#\u?_uhrJ ^kf? rrh(Kރ[H}x?MbÓjqyǷ?yp"?_P]'⿃zo+jMⷺAe}uksQq5yKmTc&kgzׄ~/x/u?-hy {A$X!p6C_$W/LIW!{.mm|noW 'RRO"@Kx 9O/?^4V?2hG\?nѲ @d<Z_j=cĿ |N2Q݋@2I,Rvste/؟ )GO~oZC=g[S6`W%>*;;G<'w@ּ[Q7\Z±[\])HX"v,Fm?3I7jZ\`|VV.c#so\g!.{O]x:< KwY&ggpFghK<6 CxFo'&T66*푣4w+^{\s|`NxItƫωI ]6K=4.˸/!,nPO?: g<-CqquH{K8 nyĜCqw߉7xr 2 B& اl]g,u}z-n)xG񜺟Z/2%"|$;I?`+-/MO`K[[xWlpDGeU(Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@nu+9s^Z"WUMyCQ:ժV?smݙ1,B3?M;Lj5n.P/ p;W IkӚIXki[ߌ~)Z'F~-63;z׬Üf&5o$o,i59okk[?G9okk[?@FMB 6A _9Üf&5Üf&5o$o,i59okk[?G9okk[?@FMB 6A _9Üf&5Üf&5ǃ>he8y|mnx9a!!%V;xc3R~ܰo̷R')G#S/O^a.WĚBFV6kp6W$vƒ=vjy$DG`z>#x[_4 mQX6B`[~Ny g5\=v5oݷ(σSŚ_.5sRӯc!HRvJ `|?O]LMZۥ j炡Z%xr#ѿmxma/5 MkP-6G/mѾvy\jڟzL ֵ+^&~w-'YG1+;KG [;/_G׾u~ン{g~~,ռ bv}hKbkD[.V=5i >ொIaw; (k%-gX5s\HSfӢkUtWyyGf,~xoMͬvwW,q¡Fy{樂Xh/wzfc+s ( ( ( ( ( ( (^zLoGLڴ5)%6$G` ې54W5iㄾ>tZƳ !i 8䑜Vu9hua7Klqg=#Oe Kx|/yi$2"bF+Qg?N֤/5kԈU[]L9eWܻR慧v7uaҤ/1>PJ>OE`ů xDּ-czڌ"wEU8`H5};G%u7s~Κeɴw:iְɥiΗ2;GgҼZkڶgmqiLd2;9n < ~iWsMm4.'%xe*AѼZiZ֭-anw)#*2jygmgsk;oC/yxOyKRYYcF+!*OTq5u ߶s- V ;X@e5Oǿo(txcMӯz3Bͦ\~}v,W5_rg_KO3_y1~-?>!\xWF џ[~şh [ ^^cɣ|5nD%zu|h;^臯[F_M[k NE-R_0Z)Lm5WƏ#,5z"'lڦKx^.ݘʛ.tiK^kBkGm/Ʋ,J$\pr3i|)g7yy; bd|'jUܹ~YF*wC6;d#̪vC>&8Kt= _KZ:m׷ȩ <0%s)#MEkm+}4.BK) ȄlHF<{⾹C޺nzz@擫^xLMVnDOL[@ߵ}7V>Ś|zmŵY7Vn>pFO~qGg(:uz=zG^׵M.l,76cɃg5^WOQ-iI3CIvnԍ7AڏcS_\]7TShizLW%9?)`9Zl7t:Y۔tpAۏ2'8W(:uzUwV~NXVɺnIۏ+یM/Ux_T֭|iM mǑ?38Ƨ>C^wS(+iGmirR]I1@ٟzmcck:^y 6 n"f)԰Ǿ(3_zzu6ÍCמŚ| qfMlB18=Qѿmυ^!YL$K% n Ngκ]7Ey]־ ƚbRYAtR]M:9 zc\~xjIZm7*!D2#q1 Ƨ>uQ^yaEDŽ#V$Xe'3oʪǯjпmυ^%umFƚeŖ\ʩ(ѻՎS<zκ]7Ey\¸|ƚb^[% C6g!X杯~ <1%t׭J~ 3" +pxW }Ԩ=_ڷOoim0Hy\Fzvl+=ua=6 (Tr722yOsWM^V?mgg_iث{nK_ns]gM//gkFIH;F\a:nVƧ_\WV;l~ oiLaO0W;v?A~ucc-6h6__IG١FUw9^@fQO4{)pB]]7Rk]ǃ' xLm4nK't.U'8\>xoC5+i:2\XL)1݆#(5?}סW_"xJOi+!oUx;vz'wڵtۉ;YoRAxa8WGgt/nuuW~ y=5}>+;Y2!3@擫xLMVnOL[@W0:oC(8~9,5#n- ɺ_*8;qR2{HMPa,Ϝ+8N3Gg^~ǦWH2Y6 -žhm6`浬?mz-rƚdN40]l6-H۟ktO/Cw^]Zo5Ⱥv+thVT%ɎHSڮ{?¶&T$zY_9!t j {`wQ@Q@Q@Q@vț?t_k_,rO'z+7><Yky'3nuUI;xA"G]'2&74'2&74'2&74'2&74'2&#^g ݒybg}t^0m7  揰bۜ`h;?<#5Ꮖ,tL]ý7+}+@~B4M_2ۍ39eq ()~yQwm;?\=>Wa&4WؒbeVNG nۍ,A`R8Ycܸ21ҋóG?v<οY#^ NQA8sxwz(ɧ \q??|'G7?<;|W=dg8׏n>xwd+?i3??Og7?<;|Q _2E4'}3d(/C?> _2sÿ̟E~CGM?qx/C?? O"!&?qϸ?~sÿ̟E|'_QOA?^? O"n>xwz(ɧ \q??|'G7?<;|W=dg8׏n>xwd+?i3??Og7?<;|Q _2E4'}3d(/C?> _2sÿ̟E~CGM?qx/C?? O"!&?qϸ?~sÿ̟E|'_QOA?^? O"n>xwz(ɧ \q??|']% |`A=KšŎguo$ֹڒI*A N_KO3_y1.//*|lr^"ĪJk[~şh [ ^^cɣ|5nD%zuy'Ə#,5zRZ^"Kk)Ӌd4$0澄$wş_?zmjo-Ѽg&Q?Hǭ]?zbgnm>̒M=b. /lzv՛Atm6h~%ěךԵ;Z>ƞ2|5iڶn6"0'4¨sX +3c_׼'BnøujjMn#+ʲ[xPyUOhy{vjw~$XxjW[K!}6n[ 䅦R=lq2W\xoYCx^xGǣX&qhib;URIu"7vटk7ឍž - .f&-0|l  ?gw񇎬_i\ZIeu#D̑2^o*/߫њMe-n+$з|#la$Vݛg>"-~חsc(?s?Ck^_?T_+[`avhb8+o ?f:ho ?f|_ECOmho ?f|_ECOmho ?f|_ECOmho ?f|_ECOm7><οY#^ No|o|_W*k[)i$ 9O) xK6ݎƖqYCeQԁu`Z-NyNu0!vo>MW;OM#yX/0A8?`f4۝FZ4᭐E#7l@c${h9[}F+ƅdmshnX<+쭫M-rK8J+yK5$!B s]gc ڟO;Y:ť-FUPȈT M_Rj.{|{};2<)thukH0 f {YYQ?gJy4iOPQ}wk]CrC/:peK@U(F쟝?fᯎi'V^kfBd52pq0Fe50< ooWsN[V[{5PT]LcW_OƿMǏggjV*bI6u*n{x=JZ_}~^eE-}}kM2k_Yx9 MhXl(O$zwl Eu[]'XUӅyqKoqr$6_%`,@JnwkV_V+a}m_{z[_KO3_y1~-?>!\ٗWF џ[~şh [ ^^cɣ|5nD%zu|h;^臯_L&cqHͲe+GD=|Z]RmjP#-^[[N-/&I`}pqZRת9vgyj}=H0G3 YʙvwS}Ex? FG fńzZoH4: o}5Vne]C;# oI44 cSnmKPϵYKXa"BXrd%͓x^-+/a6u]A隇 N5$,Ȫ *q%;/7,wu_KԦtM[i\M3[&$+" &ײQ@3 u╯Fiis5bnŔWSȖ&0g^+~ui47^$o-.T-ɲY-dh$!\P| <>/kVZ޿ oS6hY8U" o_57|'Ѽ |=FH$<ij3I&(((((((((((((߇Yl摺1Hm]xgH_ h:XЍ$Tr9t'3KCh%Q L-fg>"d/ >4 4 " aq^@?+c(?s?Ck^Y":OO'z'V@?(GI^Eg#/Od ?2hQ@? ?Dt ZP~ܾӭ|Ccv#<+,ggs ,܀U$}})vț?t_k_,rO'z'/4~qXhVz][E ^G 1>һ?|GWYPZup\:c8eFy< ^~Xۖjә߽@?jOxVRNغK۸|Eڏy{Q 0yMG<9n/YUIVdg*F9^kYm7; 񿆼3cM rn|{H$ȑLeXd#SI7k?fm[$þ[T -.1HAbb)8j϶n}}OxSM-nxi C+26ٔy ?_|go R6PnbX6y! $ Nz\M{8{|l0]\3Z-mԆXCaZvMJ^;km$E*„l)O-p@p1q\{8A;?ƛK\Mpɫj#qs=I,m, T:ĐG${*=6՛9|H{kw:f;X2+$ܚ|pfak[r[o]WPbN hv'T|]ߎ4-WTZY'֡66)pRH~E~xƺ?kƥf( eBSA*$e&רWGL u_KO3_y1e>`Y&Ż?g7_BWW͟Ə#,5zz&_K~gj#b\HmjLCЃ^H? yXgb8`pw8]{E0ztnq_*;~zMrKRɼN \/3э~1PWO{Cqk QYO1wg=U?/s!?ě)| teTkP+_)f v'b/n-^]Cχ xQxP%K ,@vtQ%?_.sZR +(]xv╤z?wï5ŢI|L!f~) 015]ZBgC$QB31@'Y/d|#ug:熮|Qo }k]ilwBd9/_y'şXq!;*-RUu-=7@RMRs;{PtQxLoQy]v淵~Rִ:mQVQ^G ^9 '/u::}ֵZ xf9qmŊ@=(_ʿ+K_.??*W>wIǓo/>w}.ͽ9j5-4߇>ߨ/+e>Z7|ayb1kW^ kV_iz"R 6`͖`*埋CwZ=ϬX;ke=r@h~?OoK%J g+}HΪڷHh,gۿ3՝|ewu/^{{GNxL/n(XR>RgN#dյJ&htӒܒ0M8 ]RF7K˄i"Q, ʹ r@3Qο~?W_Ex.ksVbx^ء-)vmRk*zDß^j;j0KYJ$!>`(apI8dkߵ_5I}4Zԗ&wohc2{Ip+ hZfmw,4'h~?/~Ǹ >%[Gm,³j_ qS+ +*ŀU=  Yu6I<~%ԏ*7HIon:Խ1-:MPl[27;g;}|c'[/#goi\rZC2ı':U~w-~3|U\E8hf$i|NwMg7[oiX8֩pFO`$A7'N=k\;I.&Qij+I$9UJdq9Qoi/~G7{s,^>,Qoʦ}?6.7_F6\b}GA#ʹX~76N5QG_ʿ+JݷR_7|? 𭝚h~/|Iڕ׀t;}6W!$Ly#gB;b(_ʿm33|Uºß[O " J^O y{pwgb7H#\[9Lmp[F6NylFѷhʊ=U;[K%o=NOX6]Ңi[>q'oi+cDO$>vFt=ψ,>}Q_p Cnvn'sG:U~w87[j_|9n:? #b<@kþ񇏾5~^4è>0ϗjM}ګž 4=O^YkSMX䐰̗8888^ [6Kbud4 Hr9R?y}Ou WA|;UkgK{h6IK. $)"&V-A G AkOs-Rj*ѣ񶓦ë]ir\,ro)#| Pʠ1XjV/>^4è>0ϗjM~4u-=lאb?}-*F),8<ς~>?kɤX5ݥɂby>U;˿;U$ڕ!{_|/>^4è>0ϗjM}O${u2K >VK[ӴiFW)8XfaW+hMm][jڥƭF}Po$EmeKĻ**32&J_wTp=?=?ÿ5_&u W<)w/^j6" 'Ө8]*d]ƿ ?U-<[~wQqd=K5#ѿ Ư$z7+?/DF3G%XCh0W= jGaO_Ho,!4_?f=>K5#ѿ Ư$z7+?/DF3G%XCh*>(|5,on> hΙx–KK20x9b+?F98?i뚧k? ?~[,L:eީh'@JlbE} Ư$z7?/DF3@e$<#oz쯬t=#T՗[Jew{}&(aUD{qAC )ZԞ&׎Ax\x'U3ia^;X%d}c Ư$z7?/DF3@Pk4z/9mMmꍪGekih{ ʶ̤;7 񷄾 i0?Oo |KmQZzs.B)xde,,qP"}Y'_!|dÖzk'RGGټϻujGac-gWY;=yxW2i%=%eSxhg0.9p=Oi?>>~:O{SuH$O՞%6y:f Ư$z7?/DF3@ |&\#[~pA+[߱O,I.K6Kbҵ[خ.'Y^oUa pZ_?fK5#ѿ~Z'׾ O~x~ .5lO휒G4 @ ^a Ư$z7?/DF3@Ey%XChW= z}jGa_Ho,!4W_?fK5#ѿ^a Ư$z7?/DF3@Ey%XChW= z}jGa_Ho,!4;36㿲/ÿx9yxI/[Eeo|\Ƞs^@Q@Q@Q@Q@vț?t_kgq/t5𽅮^#i=е[/A O /DF3@Ey%XChW= z}jGa_Ho,!4W_?fK5#ѿ^a Ư$z7?/DF3@Ey%XChW= z}jGa_Ho,!4y_g:E+xHt?`' <+F$6WV#?/DF3G%XChᏅ~:oZCC:NԦnr Fga-eI¹qҷ_?dk?z_VO]'CO \jci3闞 kqyx.)Ploַ}fq2;Qߴ8Z_Ho,!4_?f>T|P]Uc஫oRK "46E*KI6+5~)?/D[?*Bt62I`gXxY{%XChW= |x>6xz'ƭbJ]^d^m$InOT8`蟶Vo#оX?um#{DWV3Ҁ'd6f*O_?fK5#ѿ˺GGį|K߁lGK5#ѿ Ư$z7+?/DF3G%XCh0W= jGaOO覮#_Ho,!5OZƭ{څ*}?[IoNW8yhb4o-脯N+o- .ֶWB'ނHUl ]QEQEQEQEQEQEQEQEQEWL |#age1νwqÓ0|+si% H]BOKy<- En@kxwʣ>[Y%Ѿxw^>UwZq,@!Sc)h|ka?|_$jZmF]ĭg (684Wg½> ;A{mwKKW<R+X%e;m}r ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( +~#~ݚMfDW'U~fy> ńE{ .|r9ſwßa.]XPҡ2ijע/)]ز ;?(Kㅯû__]x'A$zA\O!xng4?Hږ?ڶͧ]Y.eGG$[}@^<2(((((((((((((((((((iV+ளi j|I:ſ]ȶ˨ڭp}ᮛ7}xvg9koǩE$B 2#/h[הP6Go_hRj:1Fy-Kb[mZH٥bk|o=F> 4s>Ζ")Zt|/- dG}~Q@ ?LmR75 xoŞ >]kO. :e{kpH̜ (((((((((((((((((((((((l_W1jNko4X䒆S GrKʁOړ ⏎iz[Ý7)yT[GIpV32obM7?\@ r|`Hk>$еO i~>׋4o}I[y[YUHW_e߇%|:2N1bM@ϪI 26H}eJA6?_h0AG|"ڧ1͗,|kkW9K+lc[qI&A*3__QEQEQEQEQEQEQEQEQEQEdocker-1.10.3/docs/userguide/storagedriver/images/shared-uuid.jpg000066400000000000000000007536471267010174400250600ustar00rootroot00000000000000JFIF``pExifMM*1 V`QQQGreenshotC      C  D" }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?(((((((((((QfǞ*<4 Ů6cvPI;[Xtʖﱮg."'ي~c.8$ޑIgtފҜbj><#ACx{V4{iC<#ACx{V4{iC.{H,|ѹ⽬/f^(ou 5oT<);GW/O>2M}}{M֭-\l8 +> hniT=6*"-4хQXQ@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@|]_1,|?*wLV=흫$=+,_ckk4˖Q3`^"̱НE1m}Fef_? cխ<ϦGwǟj¾"5,O2y aPW馚OS ᦧM?FQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQ_$|gQeΟ{Pf[ω%#˼Kb?y~geGm~fgge;XÛWzZW||w#օ[eldmQ`>VgkPuM {.XI[u A+?:WRevesjxÒ:ӧVQz]A%{^͹%{t֬sZ?L]7OMCtem|XR g⇍"Uǵap kAͭׄ4P>ɣZ>~w<8~xKMis m]ae d_x<mԴ[8ld{|_ۇV&>W̱~[_OF>\M颋m;2eTf2Zj<Mjxźޡw46%pbx88tu_- o yz&uV)ps`s_/]|y$!O+_‹ Z!P0K`aqW3W(CZrM~_\ xNjPi8$4hNMk˨itYf͍rF~E3dq ~ Z#O⯈QoEٽKÏֹ%F}iZnQ;bik~9g،$өN}% -}G/.G1GcMRm&uF {5h:<Ag/uq+W_ ?[shJ$ ̠NzSuncFT,w~-qx_øcQQ[7MSԗ*Eok韱> skX_׫''xh~02U E$2vkˁ&96>xkBڭᲶu2LŶMsy+g3h:ϋŽmk,N-: ΝԩJ2M^ײvє?(3ʭ'UWTyFQe8ᾒ|?kSg^䷱bFWx5%2.Df$l"ggïCĚ~7ҮoӃEj\ɘ#eH~\0ȯSU=HG+ɸʤa7 8e{lU={9qm(Ҝ񔩪RFnNև?5(ndyr?l?ڻ |QxEмD&6.0J2\_-}7(45QR?ʳFXiԧ>IB>#~#צn6muX]:# =mUk 3 8~/^ȹմ{ B% MfP@ϧ=)M7Ym#*;?B8W/Ug( -pqKH¢75t؟RLtԬdIy/kCx<[?xwS uϪpE~ xDŽ|E U5jǶ^&5; iȷrv8M|n;̮jf,SrXtj=W=9KGB$@C+ :Z k?McmzNcf=wFװYg~&|{+=s"܂jiiv%Xsi e(~_W˫8G%u {']J+c]-[]߇oV@1$i7~Ps^,_GM% ̑GuG¾?wxg52}_7bxMCQO(Ưv~;ѡRx^ѳQ^xKD>fi}wZ$5̧vѝ?EH<˱U_fQ^yQ@Q@Q@Q@Q@Q@Q@Q@Q@]f_x7V!Til8K"3 dW¯.%ռ{(ۦV7l+dsw^ x`y}'զլf|oq[tS1洓k_D{գjqٯxzƱpQ-i$ <ٶ א|B~ў/|{}.U,dRV0{?:*Rs6wmxRBYE*Ik-Us~MsGv}^߮+#ƶ/~0~.;]H++Ҽ5?ƶKx=bi4;w [l$ʿ3VE!y)xMUJIh;Z6okKѥ_?3mu( -]l*N+֡ƺeEI'_.0sTyFKڏ>ӟgVnrPX )yo?_[Ȟw6Fw#_SUUMY_vz";_Bf Jq_;7iz-ѐEzhJtU&)GjOz}YB!SJ Qs+v}5.o R֕%ӈKV~渏o =xǾ8N[mA Y܍AWÞ'?,[Z4Kkb3-WI'cyw<+G q8:Iu{|O)9wm:k>~7.4-i:<H1>l8KL uq^6;O+ػD; bh gj~X]lCZ-_c^?a0os=rrlgճԯ1x֞bChײ-U:ujVAn0:q>['82Wc\1RiAK{e|I5Ru(T+GWx 7WZzYmdGN+<𶯁xCYR&bCa7 xVѾۻ36}x lWomqy+Θ^$WW,JrE䔣_gyoKtO9G{HBi?_gǂ?d7F8+I!?Z|k+eCFFss_^u߇-^/UIeY~lqޙU|;5 ʹ~[8%I+eA3KoC6hQNRI)Gï64;yJuifߦ ׯ_[ }Z Eh4a/8>d nzkt6 +Oвߤ6YZJ%XɫUN7+]Hc׼+4g욙rUl3׵x7 O$w hƞq8 x K2"c ](>%WQ^W{7g7ǚO(S^k:ayk}YRE|~3-a.*#plxT](lEW Q@Q@Q@Q@Q@Q@Q@Q@Q@WO{?YhE4"ݜٓ+k1%p\I];+].h(C;{Du>/74pĚtF,&ߎ_VOړºׇAE;EgIVIv#r}: v^FNNn+]J>.>1dsn*j&ܒM۹T~YX3ucWU>*xEoX?2yeYмuxKOZ吱W$8rj0G5T8^OlaU޶ovՕy|^1O7).gytVwwkEۣkZ+ՊIG oҼZo]+m2|hş5kW>e5JKGVLgtnT');b1e)3ʭ7UW?͸q½k\-ok%̟|Uxvյ 0\J..㷑r .FA_=kvMGࢻ)׫FZ6v+'u%IZqfEqҦ&ڽ(r O㶸}|ghҚF_Y'+jzx#j^2&6-ޞq&s T`1>NAy& -!~28T3ҾVxFKylqP*m=C q6+曕R6R]Y}~麣 ,~yM'=s} 4_Divz§ =q>4k}>&F1 ܌Up)<$c/H̷.&҇ni~3 SwZlۣ+<P:t_n\g[Y@qoҿ| ȭjhs.9Iaݗ$;>f!=jx!IyxTWTMOrߤVuNъ:m/_?.#dHeYN?CO_i?#~(wIqmopg8\?>k>(邩,b3qǽ|f?};.&K>*E+(}sR.{{9BxKszuJ}gEΩi8p{Coj4a5E}2| o|K%~}ƞ)v MdQ^'(m>IrZB]BD׫xocVu+TV9_s (2 |&_N25O`((((((((((,o,'ŏfÑ~EӧAragSom#v}?Qg%wuϫG7,'1ܷg._S2sWu&M=Gs.6Xuf O\+/M-bM^֭'s4 0HRȁՒB#8<]aΔdkݥk[vOyL/¬M$UOW;;&XW7CiTĊBjۯQ\-tԵMrIK;$pDH|oD$0sxD1EZZz;}1e)M.NQ[mޫE~x?s3VɅh^\[*{yB 5\81zm~%Z~.~3xbִv/R㾎T^N,ܯ@N|)I}(/)3˭7VWe܋?͸ƿp}O_ ZbHm"cFa!*?yJe?7IH|5I>&|3c|%ŚOkibyd70ۘPw/yr9{׀MUgO]\Y+1)b*ㅩS;URQ}ևm\_ -LTK^mgk^M۷S/_o}葜[2}+[x5i|_x=$Qr[E| E[4uڮ >\O)W?G>45^x_3bWi!G| YS<&Xy-Ԅ5OzYmpd<ڥXc3G$)%NsѫjVI߱x?GoZ| Uf7Wzak}3TyR܈g4!*rҾw(E֫]ghz-?qLR.+)9+rn2XTe!wɯ_08(xJS?ov>z> ioR{xf,χTxO\M_٭ĐMPd['ߊ2Ⲽ>)*u%QR2S)kxMi~΍yYJZ~=wTv5kh^>Yjm6#|VYF~ֿ=_'u ydY GT~hf%cg_&FBF7C{gøL$"H&Q}T*+zs|V^&8.y(Vzǂkfh82!77_> I7foJUB~7Wb |Rv:>h,W yLq_;)*So4N3ӫƍh{:mo%v?iy ,4m`RW|ñnKj-k$87)`bc@X3uSĺZ|TRI =<&*Key}#p538hQ-_+_J+\T/Z>WӉ{gV湧}沞;_pw ~`|j,C婈tڤ'y7n_FH xMNj62}YgEr9E$c<Bpd4}[ęN`ӫ_lܢ+=(((((((((((/ ['Ö]?^Ex]WtW(ey@0F F.,SMlۉ7JA(>cs8__Z0WUjNۦ+Gd>)aSARNGRTW&nVϟOM_ fE%|P9[p[jqv\FZ;Z;SĘǓ^*7+ro+52Z¿b S<2yv]h`)}mw3HFNq5*YØV8LJ[c&|d.n(|7[ S{N֛Pkx&[GO9B6b'5Xס|.[ ӯeWJMP˺y'ӡ,Ux=jTmox2x(MiNJmq@Y,37_OZ- ߭ګ}6Q۰,wA_/C[MOWGZKgkRӵFqp_mUNm}&kccl`m;vaw^uGuE Xq^iz_3 ][meD"׆x-{:l^:??u?>SS(ϢmĔ%$W_]~B_.Z[Đrb_MD5,Ƨ$.Ks7~ |kCo024@V~K<3ㅄde2=?}5r##bT-{/(WOC!xm]~O4k1FrH{:Uv7Z ޥΚIőH>մC?\W}?8cFIJ9+K]:>Ok)%-ϯ;WC >#IlfhĊ2FJF|s厇_6\Ǣbc3$]0^Tެ겿3K &ݛl*sO^o 7RXɆG+{@lOo_j# ޸_ؖf7Vr#z=k?EcMrHs޾7֔%Mvo夔вϤaՍEN/Ntޝ4w~3 F﷜*-k!?1 Wk2FVVT}dj3[Z6GKf 0x<)]6OOŴ?/~>6e|+TC!9ԩfb[t 3+m1xXo#o%O;ZmAIO!mBM> @و?b?'s^5o'N_)6C.p_NUQ򊵾 uㄡ5:i+y{3d4v*K:Wߴoۯ=ghX6s~j=FxBnVXF9=|m6?_h>j-ٯ{JNu#5{u~YU94E14~G1MHB#qߥzg>IBYi%`?!yn/uuo7jpnٴb^'{_'qz?ΒOZEOODOD>eRH?c[Ĕu]~Eu |}io#A[3Rд FdobRiGs;~ |kCo024@V~K<3ㅄde2$ }fˀe$Nm?]ϽcyE't'~6.{{w9R$=A*k RAM$bȿF$jO !ªTc+Ǿ*m'g}RJNg(?τSZF C7{onxsDu-;Y1"L6c͗迅:qؾ 1x597ו'?Lb+If-ʯԶzV;saj#/k<[;}`>W%E30wZŏvkcX>oE63)?xx3õ Sݛi%#,{*cQiӋӫ7M ~BdѼg OC~*́9Y<7x֡ QقL&?.~ |cMmSqm.lᶼY`;GJoQ 0ݣ&J-yv]+T ld$--["X,}߭}kxo-5[l,ٛiO;hwצyA>qajckJvww?Ror<OBHT칔Zr{=6>֢+ࢊ(((((ïhv7P)S5~ԟ *~߶W<9[tizi5q\ΐE;{=+| ?Rh%la-{U,vcI=+ *WBZn-Qml3M4xFRF:M*EIrrOG%ʚN鵪dK߅ߏ?࡞⫁gA 8nm>Cc9%X#e{ xZ#XyNm);~8Rz¿P? זE*aܐFF:^FnX~۩RFt?JwҩuS5N ҔޖOF~A*5 7xF0\0RjM0 iYq+>?q$=+ ;  'Ab5 XRT>d[GtbB`u"`n]ǐ{nd-Bmi*ӌ#m(ͤRNٻNrRԕINT⛓n:ɶ}ҵ#~#YCgkQ,"Wa'pz#:-x1~:Śx\IQs28kϺUS+^ifhߴw+LPm{ھ- q=2H$b@f-9KJa/*qOW?/ Vt޵*ԓ~w<z|kO JtE["y?|E2*ۂ}k)Ƌ5XCsln$3$ŰUw pA=|* a믢?m6/¿!xod3GНuw6&u`1p?e7M?5y?V[< -n3GN?/͍Xf@{Ƽ/Ǽk5Uo#+2£ۖ_>cO~x~<?›N=b)?Z˿_[_*z)U"TDD6vJU(}iG~OOەi/ di668T5<6ob)%Yh2ky].P~"c~)]>Lچe8`G?tcڷl̀I'sǯ+{* vGȜ^=:߈/[oϦ{9sG{֒Ǟ7{m)W?kd?Y>7yR\DzS  9HJ2n7=z+Iۥz_xX>o$jMėZ<.>`Gz.z}Xtj7'*10jwxqZm*OϮ|I RbE$Ϟ?$ KG"m3RTʶSk<G7֍#=BXYC'=_7^53P汵k;DaV\ssT.~iڛ6==F7/otNX{w=_Q7ٿJ_[\;JQ_W9SmnG?<'1^wX/i;J.G #>>"xk$Achtۊ9<A^kھy%[}BQG`_X1emo8J+@|n}-TrSo7L T t\!%Kih>W x|i7pPJ3_kx n ]~@Ѥ|?}M%r%Κf( kE1neo~u`Gz,?,oiiڊQRn.ֺv~%Ƭ6xNN)+7un5Ѭ{KX?JV\¸ܡBNzY qX V፴mk% QT7,Z۵O?Og~ 'Fӳ{̗frߕ:|}Cq{4"Fa vL*?f JN9oqaBxg/WҾ;]£ؚ^C}*;_jIz7ң$1{Qv9_8Iw>_i!oFbo‡@Hޝu_[ [#?XՇ9hfPHqYY<'6kjL2` 4yut=Qmx,&NmsVR d\iBUFpy ISֺ Qz=[-_Խ攽ⷒh|;u|jI~& %npk9O1Z|֎Y/jp{‰mc+p~M0ޗpO@ ,6&oHԝ9GOz4Y5s"t3>L=W ȿl47oi#?FL}2c vՋmEӵKK+OZ]䲰9SJ|9֝9SR__QIUmӋӦ?t<X5#n?v Õoһ[oY!9nCS|f6:zYkZwܵ3Ht~tOg kXo4֬[$d}zzx~&*-1H^e/?mE~2x wv\m\jq"qĊPN*T#Q^RMOX}TvB[G=39\oPxA;O*;-. #/O,{8OƾFh5't}.m>lXz_b:us2 JsVݵiYi?(d# ӌy!%g;)idҽڿgT+N+}s =䯏55>/Z5/֒9Y#HRFEP`x|1n<Z*#7#pp8YAx9b0R7"QWi]kkjljb犫*I$웲M'{ZtyΟYοgYīa1UoF̛ ?j߉LW|'X |_[*qY[ܩ>6IK-)Z]>?h?<_"[_2dQy;A}x\f} xk0\?bu | qJG_f8j4"Z0V>c9,^SkH~L#6yW:x A^!lpD;aھwl+77>MU.?6IO4U Fn'~"HK,i{eʳ~!& h._?w3I7<\ۢ qh_¢Tu;lԵN SRL~1J[~4?WNW]/MZ*ԷҘ,VUA$MFV*]薚57VwlʻY: Z}o:٨TzԴ&_VxYeuce9V|ԪIɝxqTߵoB&۫=?[֖锬WW-tbnk|i,м9pC0m{$tv>vO7F+oOlmZM^IkŸƟ[Q:u.=v=+=O} +erpMHx$# yF2YOkFzI'e{X\EH+KYGO8?z5X,|Q]>HS&e= 2{_? |[o46oϏ+LkՔ =f;uMR 5Ps4D0X}zw7'O3|~*JuRӋx{7kys?߈.&s̛Mz&[j5PՇWEŗ!b1YEԒ( 9i3h&š $3FrKFzk1zR /LVXJPzvQ׷7E~+ Q։jͮjR\uĿp@{'ƿڗA&gou{^6u_ =+3osNNI+w~5tBrВ3ni]~EQ_Q@Q@Q@Q@Q@Q@Q@<pzjr8qvEvMpW>^%#?ڗCzxN?9in~6~JdC.m'S}:$^9T#݇ h[S.ORTa-s7[$kzA_+ؾ:soISZd𵵔i&2Fxkjc}+Z?n_teF~8湱 =E{^KoHr|Gn&ڏ_]φ=KcTfOx G$ں@['ՍĐNkPr½(NNͿ?Of6R(/?N)uMv/&GKA/KáwqW gQ_jW'%MV+ϲՕ%#~ h]G5Xfs*)}Z;9%.oneٴGg{/]M[lluxys va_w?#-ɣBU7qnc˒7Vק~hwG5|1RO2{vh_D'$ڷwG5{Y]w[3FxO/*?Z]pvt?iǻΒOZuqBQqBTGCmJ$+O8捯dqlA1P+3a*byR[~Lc9r'kEtwswK?n{d2wҼ4M6kҙ7,C5xÚC4]"HaV50q_f^ O\В;knEM4p>5b$*E٥oke ՋZZcMM+e`Cj_vβ0רF;5CwaBx=6|G=S딩g9KgjV)*kޢgóW/ETg▧<-meI?_گ|~֣l_9l~cGCQ^ג:ܟv9skRcw?YSÞ%?E%$vƬxI!oucq$T#J8Sog4pͫT=aEep8xS] yu|Kp@:]~Yns8U7DsueF|{l#*Q{gƹ?٣{ʿJ_u?{I}7=˵{[smWSV1_^k0o}rh/PM൷<_羷r5߰.]_;xwLm;T̵ݚ32z?6]^W]Q^2>/ʇ֥t5ݢy6OZt_7?Vu?]Gz P\z P?eP?:-J?vxB[!m.X$s~''>͞[1CZ׾ǷJ̱|;.[^;ۮ]ƹ/,*G2h3l1"eH t=L$tR2Mw~E鶺=ݾYA$byq_y}%횬ݻ{ǧ^φ'? +S4ZU/7yṾ&t5o4m/Oim_l;Hتf*x9f4=I%mߢ>'>_ٹF)Ri%wm%n? tc'5˘o4U=cE[lIIW^qv2*q~_tiiF\W^Xj{ji4i4v'}}Mu9?=anR ko7oN)0_ D ¾9^$; _|Rlu_$-N@U%Q_'QEQEQEQEQEIg \ֵ/eKP Z#և~_W$VwӮyݯ{G讣*GΜ֧u7y+\0==x?oZ%TmiI ѧyHt}%P{exFulJN7c׼~ڟ[O/Z;_-n{{8NZ(?č[EtmZcorL['G'shڝ]m5>MMVg DKx?duI5>]f_K}sx$s,p%T;Vj_سHZ+n.CRzi9o {%WS1j׌4TO_XX jSX DWw? W _JO _J?֔Qi^chq7w.w@3uٳpS(sr^w2 fX僔Rw7-msӮ?GO K4ZQ̿,$HERGs|LkiVھwbgUTr=+sl`pە$o謮Gdg׫'59JM7d[:+ԭk5mj[7VTX]lIIS^qvWF O"_5ѧ٧tFlW^Xj{jzTM>$%_:1i<]~㓯Hm}ߤq,}:+:= WeEqƑӅ X8wFG?i#[䵽~U7߇~y/"G۴QEEPEPEPEPEP_u2?q\|UЛWWp?#Vm?Ow"Ο^v>5?ޞ_7mjf[nҁrkֿt_/ֱKB<ǽPVҥ˹.3v]O٭E*r΍u\W>!¾n>-ꗶG[(E*NX zlV/}Gqd_Q,wqyz7ңƤƧ@Iu>? ~:e պ~RCo;B΢[6#K%ui~e/o;m\lnuY"}TkF$q^b5 -WG^ ?cR> y>oqͶmiZYPq25SppklӏpuӔ$5+%'uv_e ҴE5)!#`v ٛ=08XQz?5Mtiqc5pu4i4O } "OOA]]q֤vXީ?ݡ4+SAՎ, EAoWz/gaRk_'X¯JG Q_4QEQEQEQEQEZ/U~oXOS*@l_eտ7#ʏ`|1}G^qZZ_RvfiVaTS=~P_7Ao$3JG]*ǽOOJkI˩/6 y}z=_fNWUeso^O? {xT :un趖Q[|mhMo [fw!eu /vF&ONVLV7~<8^(#G{$ pU=9xjIkZ[w,N$2H8SJonJxΖ m_쿱H *؟?hYtϑ*ں?^.ԵJҼ\2:؅$Oβha4l/WK|TSBw'nZn5("#)ןTSk7é2=ݪ=<L-L]e+^ײn׳ٞS:˗J1nfu{_kSTJq[sxit$!Yv]~[ tE-Yql\{.:'Cs&[Ub>'%+{5Zhֲ hKDPLz'#$ET7Ԥ*?XwrH$ET<bw?Q+C^]S<wNH+8DNj =QEhQEQEQEQEQEQEQEW.(r]rÿC"R ??6u woE0?}Wj،5yʌM?73G bxN+E*M>4(9U BGnjtYh/M%i$LRv݊OE5vxßzh%ǃzI_ҞgΓEg"2:~M{fCO]GxlcIW;ؚ_>ck6H0q[^1v{ƾ~?&L?}YfXdw6^U<[MÚYnX#\v8\JY~?:oׄ Ӵ>~^wF*8/͔i=}OOS#üF7t O7u??OoD{\-sp|sj%Õ!oqDM?y$1 %SOD?^Ήs}}2mnϰG$ p:o^2I=YDyy9mCI>i+>T3Rj)ٽrܚ1ŤO޼n+m]'U#_CO&Ygމ+ >YYk:+In틍oqtGԾx]#k|eoDmodJyma0`~e^;:g> S.b݈pbTGUxrcU܋!?ykQ[JҴ+ռ[4E&y^)ϔBG>Ɠ R9 Aӕ):SWU=KvqSS[J*m+]=ihHIa31bPdPx'˹;=i70VoCkqo^il2$CH;wp\'P:Ou[bUH,.]$?3>F@2K:w: sOVt[E[Ɩb3հ[>VkO ys\xtqo&{0,zscڱu1S%K{I)k][IuGpQtԛouZ&ݚ]T!imqjji>JbǴS_/?1wxh=&5 "vy)Bu__lC{R5N)?O|W?Sj( ( ( ( ( ( zeSXwdU Wg&߷N}ٿٺQJ#<-\V;9Q״g&c(0XlV!5w hݣ_׿¹X|C*!h@O΋/ɤ $#YN۱I5?hXsoQV~?zxQҩ+SY~軟UGOɯ}y? O i*sr;^~?',mfwF?k}Ҽ?.`ɇUy]'_I dowty\&|' o(ooknoG*F45^};E}@ux/u׵a9gu:0nѕȣi߿IysW|G?vzYk ęɸ=s^yGq Dr; 1ӡP7/_dwqBVw|WCMFu2HsD}5xPĞ15[9g/c*13h`7M8oy;7=|%JR)OGdwW10Fdh^P4'_b:u2y-A+]σB=W)w8Կ`1x8׭ov6Exf[+;W3l>#0USImZ=wcL2ZjUj* mR*WM]+Yz|W`kY|x:cfūɯ]X%M41ȓmD36#Z_ 3}4]Z6gEy-,6SRsMimi~(կ`R$[ymGTgz3 Sy_xTt\۵x+z_Xӎ%J¥jm'g8_h}߆D~L+wpsFd#ϿOJ]BC$qw9*VMWxVYkvJD=*C f9Qp*¿t3OVZ&I{rH'';cLJRkkuX| N:rJzT]_:8?ݬOvhR$pOQ'˨#r~[Ɵ&c[)X=T.߁|3XMubsdv%OVu<](є;IJ򒾛+Z~epnZsnhHM[Kuݫu-|N~'6kj6!lc3O:W.%4/n_jWJ􋄷}u=䞔?V&X~hZmщ"021 zh)AqwXș/+\ܢj/cT~WvҾ͛b ahUuef;rݲKI$ݨI4S=9!㆓_/O O>&7?e|X?}'[J-;p+ӑ?i_0UOѪ(O(((((+*.k_O?^2 A<'"ap(</3\i1p'6Έ#^ekG?wRm:`qI[}! KA5x:M[nJt?.qʲ"x/Uq֯OZ늶2_USRoIz+7)oNI䎵z?xB:oӒ/?ߗ)鸜Ė"ג9o+?1~fWϟW?QϏ|b?sU?ه _Cgu;kb#Gׇx]~vkd hlypΪBjӽzy]69Od>W%\/=R~:C{źUOvg=1BoxG֮l簚ཌVpAc^6goqS\I;7_#rZػN^;M}I+'wn0.ŽdhMyDk4'_ұ:Z GAQ׬ƣy(gd`>k~c-{+4hȎ܍$jWK?J9ovOGp6 `VЌG~.Y8du+.+<A΁,5;%"V !3g8WF_:?aSRLNNvǑQ:^!ԕ8׍k/2*uv˲-ftO+}[w V#xt'.WV|TI7>YM0=YT.|]X-*nz|ǭ~gJeNRod{ukߡSiU;7-#y6m.VvԵ7QnUe(C23=*M;1cf+4ܳJmtm~QD_X&O>d_MJ|_&Jd~&QǿMԯl游8a ;ڨrk5 ![iWPZv432: XLO0*TmrGM'|?[+(u(9(OVivQ_CNgi1Z17, `D:)Qi J̩8L`O?Zw}5=1:M*Guҙp+1.}jfˍjZ\)ӷ+NAWʱR^꿚?2Ɯh3|xmυ5/'J ")-I}claW#XDd D?|Wj_k*ݵ&)h^DҘB@'ֲ,fE՟{JO][+D1V99?eEUoyLR)6i[x>O-^gs$I#'?Vp]Ly݊IBy=P}H?z.mc"5J(\*%qP? }5+jvJHol7_cyJJ~㼜mxt~y8#*o.XF|ܳR!no{|m-2H-{Ym2̌ Csץq"׼I{x&mZXHbC-Ó|@Xc"56IpY!2ac?v -ޥQ+BGDNg|/bgsNvVsFRI(o\dpbT+)I7w:qm7 ɫ5M yBjtCM_ca֤v֤v#_P_L S?vӤ־g_pk n&7l\Kjg4XUQE QEQEQEQEQEWl<Ә_9W-.KDfAy `=|= E^u)VWv]>K ]whSFR} -_Sz}O[cDABsʮp{}>׵~_:̕-5V6+> iٞ gB3J?[l&ҿEjхTK{=3~8ɥck6j,uVDb^Zgo,kP!Z0@?ּ6F>d*)R2㯲ܝRZ/"ӭdEP5ǖ"`jnK0ySWf0t':sF.\\-zNON0YŽ~؛㜯f*>d,9R>Ƽ8chdȗ>ncОzVGu_k ²T!Jd?f]WH|E\B̲-RC¿X1NRqmo&~x6YzϕM˙*OT촳iosfx[S b7&[TdqB::fuo^]i[^9'{'iibDa+M%*WkO|L]# 8`8#=isIh_rmtUjr$*Mlei>).ETg?Z84 [;p;vz!kؙ'Vk+̲5ycs'oeCP*f^S 5r<ҒK%fz}|p8sjueR<ӟ$)[wMTrWfƝ:ǚfֳZ/ e,7jM,O?mm,ro$=Fù1QWljڵƫZ6r}]V>dq& ­64zisq(zbx㝼g-q9*~ѻKJNsB ]j۱1TGҔI'*jeYT%i6OS~t'Zv35m?iS>_ӏ,nݖJsW]켈z7/7//I"?߯eI.)wLI'??uH)'\M-|p;TY`?;L+,J(((((((8R-֭ Z 1X+ap'>c_5?r-OEtWx&tρ'G h ,?.?jZ<v|tr:i!;_14,?8?A  NV"_A όgw ;f2z"fgq~yAz_;#fV ?#fapp̷GI=77?OC?}\-w N*P듇 LN&%~?Bks0-#*5?B{_ßs~wRUC+N/ N<#4Exf;W~#F]Ƽ7~3akok G2|~5}axoH^/Ky$}~$^$bj1޲8_']Fjnko64wyK pe$k.?=1㻱1J$rGV s^cpGInUt>'ZZn$#ye7qu>o20ZH>Kǖ~ `%n$tb9E0x|-Ub#h$$~gYf[^X\nwJ z]>?npoc˱'[5u]O3 m2 ˍ_cOI`OkD}iZ=eʽMYmm#vۏ~+156yw<'ľI$.~xecVF0$5>@sXvrgAnS_ֿJMJqϖ7~s> 'Nx.NrW龔Y>JbǬZOқi Z?cvz#ψOV1O5[7ҟezT~W.¸Z\O|QEߡEPEPEPEPEP_mjPkVRӭ^rlø 19 va?+?UMhgWN􊇇xmwTuCozxR5-s ;>:|9o?Su_}OJ_?jȎgxE˟Ѕz~~?'\_WxE˯Ѕz3} bj3a_3LE8Hp{?Vᚾ{_7_–:I? UIu15z%i/% 92~1|1<3i:omW& ,dV<` rS=cC/#-n"8>Ak)ATzr+5wk_|Wb)::KG)|2N.Mm}&ɥS󒜷t{Vj λe㩼/#N𞼭.1'@er>g\0/k|:ieQ3F;H~Og?kp/4cTOXkZcMtI.k-~rFܲW6k2Z>Om,k?h/Cw]۫.<)^;hm܀ܧI|AmY-{nj~pԝhknҭ]J g4n)5hkU}NC&X?xG̋۳ e2ĒH1ǩVk\]Ly< >וC15Mb*kJrwtHyeM#6QxfxSѬyK3Wt/>fK ot)_B?u3έ?eSQs_u m/z4S=dץ8~N?~7f~ p[>|Bjzݯޔ-4wNz~t?5T + ( ( ( ( ( iZ=7\Fo[Մ1I`B9W_?6S5/k#"|5Uk޷?1)'Oᇪ5bIJO^uj+ѿb?o>=_8?bO4cMީ1~! F⦆W|C Q{w|MGRj'և"N_u#5Ose_Jr ?}H-[SI+еCSOמ|;oӒ4 n'3+=~}d<}| VB|iÿBWGT?'fD5G?&Y5E@>h랕ퟱ_m܏A_{U$r^]+2Ҡ|+p?GCRWw?aᦅ_i^Mnpu 6;"~Y=YUm]m>?2##=xo 0Qʯ̬ݯk+Q^;ר+)|-8+6c4&OJr:tq[}$|:fٟ};zliphxF?'@er>g\0/k|:ieQ7F;H~Og?k/f4cTOXkZcMtI.k-~rFܲW6k2Z>Om,k?h/Cw]۫.<)^;hm܀ܧI|AmY-{nj~pԝhknҭ]J g4n)5hkU}NC&X?xG̋۳ e2ĒH1ǩVk\]Ly< >וC15Mb*kJrwtHyeM#6Q|=Kkpmhd@R1b;V |BU}/Co3 ݏHz=–8p^Ks VE4[OkexΌÊ1MP"nVSTѹkn{hI%y3Fg5sC{^6bd%'[֔+ZyFHPu}եb*J-l&$_zo5~Zkǚky}I?QԿGz]>@ȇV^2ڔ!B+3\hz;dz?U5q!3?KGTQE EPEPEPEPEP_unX²ZjW_f-LcEpq،g_Z'T~??\&}qgנk:> g {'MOx4j_ȑgknSڋ'K?M#j__3F_&_U yGy} OQT J8}~R7q_O:I qko.Rm]-R[Z>HcV/}2qJ|_&JeX~1|i0<:΢2`2s\O~UͫCwVȊ6;t#kץ/Zl:ƍqkq)_7 F}=ֻ=l}NGنTrvOIinx~c,ޭyqnѤv?/WYdlqS;`~57^='4S–ڔQwB:΃!G5_|9С ~'_7ɰ5KgUi%+ͥggv?a¼[QSSް-^>jɭ>x?|U.kVQ<#j" vg 0̀vY>d~F]Qִԓ$Ϊ=+~8?dψPZu˭?}nЪ,#B7!aSpkm4ץp-+8I|Vkg~7fX VYM^hT IʺYݥh$ХMc^=bƨs 3J4ޤHrđJ}O.O %_sʣꓡDnlhj~-JyPmؿ4-ƥy}vn{9@_}.7e&NٮV?ފ9W|?5|>@Zkss'ˇ>ðue7G6N^`_ ַ>$dt]IUj 봪/[/vӖ?5ݮ}%j0;S/KgJRYg%cU+?K? 6Ыѣ(蝵K{Fm:R' -vos)MJJDW? #ӿOi֝/LcW\?Z5gvՇD+8~WM7~"E_ S WռW/J+xE(((((-GP>Q6BȟgT5_ufgN0p+k1[*R>7_M=j V珉uf}SK{o@?I{?㢽9n}|1J#:K+UVʱ??C<'mz'xi?El^y}36U~9xi]2*8y~L!u׷Ͽ+O{յ/b|)?NO]kWmOQ OΙ]oS܇)Yc\X]m"ǡR,r0t ؟/%R/?둯NlsԽ#z߆g?N}[=?0kÿ؟OI>e*)R2~㯵ܩi)uU@2FbWz?4{8u2@Șb>^QjqJj($7*W=3,,=iIIE'k;jx?/G4upќ>$Ci*66e,N+[C MJ}5`ӮX$dCLs)88~JjZ,d@]?yD#!哞 9g50V; /}>gY 1$Y'ki-toO'M$mTawVe]8S+1E= ѸQ`pA#+ڻQ&o4oj&'췱W `2nk[fcʼncENVW2k~mX"ae[w/g$ᳵovTFG930mJ|2HTyo+ÚWZ4ڻuYAU/%? $W_-F"]z7,`\ݏmע~Nj_֯n.0ys)7mnx,G?粧6}zٯ8agC^UkRQ^JO>bT{q 9ϧ{W.O}fGKnfzW=561,`+ X?~:aO,PDӆoc/v7`7ϚJvT&_:O0NK c-+Tm~,7YXN=C_qo.m~)2 ea{gC6ڄM`܀*{[Q%׎~Ik5t]~ pPmqm+=-k9[٪q+:7ZmWv ~!Ǧ:|at֗ CԨeͧ|nrlo?ֽtNizM3Q cCa{Ic.@^ l<7qF+b%*(Yꮮ> G;J)Kn|Qִ pݫo z4TZn7ч֍x՞??Α XI:l}~ ܒS ?s}uy\*9\k%z^i-v+:g)Ad&ߙ%n?GS_֢iH!;Vo|{FԵ+K7n z|VTh͆TThAzoZm)D-q/+"VSR/z?ַN# 4)}i/4ɾ/dWݟoZڕ[w& G̼`Vh)꟠QE!EPEPEPEPEP_'j}5M+HkG8+}^%.>Hm4_SᕪE Z })\՟ڿ֡Vjy-iY"n1cN{noνE ><]PU {wquȸ{Ln wIb9=9 d}# :Z2DjVvuϙ/%ۏ0`}=ڿhrxww+7<%}]v7Ҽٮc]GPO}qC |dڇV%ޜ4}E}N_0EḮ|U㰪D)4p^O: \|r]8]_=;71oiZkkydɸqZÏkysk0H!d3+ ?β &n϶UcJډ.sK]a[(P#$wrnKnIYk_SYכoSk=7 dC.~m?P4uc|ǫfp|DM KկiK` 2O(rO?Zg+a~QS701^&GS)W EUuu~VxDckl$>5YTʩ!`27 nnw}/*[Wz~Gv>+Z4޽{Iտ8׆xGVvֲG$gcD؁^ okRY]UiZ_i]N8~h;^ItROd+Snꞈ?%gZo,mJ6LG4W~6@ֶ凓&c WgXL9IǞ1ri&WC%J'8ß\ɨ$gax~Ė_[0!ᕆ AW|8+|+=rLK+Oqn!ߵynmKᮙ}}pdK4#,Oi<-.f|ϓ?}־{8x9Qf]H)(TI7kC,ӊ2 f# JrT%5m6JGng!UzjŸ,SZ#6w8v$OAk hcC:r4RO[%S[xy(ƵXy%K~Garn"2en?GS_֙9|B=nTF"2 bЊyJ0esvnM-ݗCe??}ҘZqYҚO7֜bSOܛB_8 \|{[Rcrz|caJ?-e4_S(#(((((+I`DJQb4<7&#G>$8oJ-?֊oʴr``nzx}mI~Cgdxfa{;]S ';顶 t[ƍݑl"̺u|9$y !5#%ݗ@ʞr+ \hSD(/?nɝX#I8 |Jv|ui"2x *F҇=_)<i&h#(S>U:ÏM"ڸu)G8zEIo|8ThO]I ׷M:wU#w+}|-i+-y w>-| O<+.Tӏ.e?־e[߈m4]=k^,l_ƥ2Ah 稯~ YRFdp\LXyFJ3z^_ M7NMYHKi^.c ye܃km߉nGq:V,lP [@XM@Ȓd|r1[޾֯f:9|8kyyx:Z (;o}z#M|o \y3LLXϨW$z#M{,,ǚ(  Qzv)zRGKzSS?Zo?*yUV_SSO+{8ٰWc:¹O|Uuozď&MWgXL9IǞ1ri+~?:j9rNjNM-dlĚd, 2Hdaѕ*ñ^v8li0&حII៥zԎL2 ωZnY|p2G^~r2Ν7Ӈ*rqkWV*s4M^kC3+VԴ9mk<`Һ}ྋ[#SLqxaC}oxTYjz6 MH B=5A> ڦ}͸N$`2zzrZQ|J=otNʹt0~{UEӄ9fܢfkKrO^mqQrrWS[>jѬb&3=1/ W1îX4Tڬ}澺cc-$zXBg%e]֔RLs3?ܨv'w zZtp?(sWB)__ψ-?!bZ^=|?Ide !I }Ԭum+Ěijfb})NTFl 3q_uP&ӕ9[F֞ͽRv]7׃LDRPwuvm]e}5`&Ȑ[ uk.|}EK9ǽpýK)[4fSL~c# ar_xnTk u̵1a$tΥ^|3U+$d۫!"*4h*mD|/_jfmݻ${ Z|?[yqn @JJ2Ҏ?f0T4B.uR#ƶ[ԡg86m&^o>~> G~"i7Kk[+{4!, ωtyѴXS4ֳ2/HF^w RKm#pLheq\ގ~GnsdƦ+Mۚ.i&XPbRO{t n5_VV]wRAmxwz7Qh )#=qώ kNDx=Tf]e,&!ʬ)J{Ϛsrjܮ.-hvmi+kgo Y HW<Zƙ\Gu`$HeYajZ1kAqof]ۜߚ8$[%҅jiWݿ6[xZtc)#MBN& G 35擲2JJ1WwNJH3R7k"uD,"ӺO'/8c>?ZiQOO־_~'- /G Q_' QEQEQEQEQEZsYʄ5/_u;`kF35όS?oցA8wFs}ֿocv#xGo$Nhk|A߅H]Bn\}IdqJ8'Vu\[H LxWXvQwǒ߽Π]]ND)p0 uKxj%RwO&mB(8?{|se6Ǘě8w~k4˪y$z}^:&]6=B8>"YM2X<\+3U|ǁK:ʥ煵]GQѢN6;hmi2EC}_ u/ ,wZ6`b+?dO,w/Z0+K1>gHKXi^>RG2~c? q1vq;u Gge\1/$T{s@bՋHp_n 'u:نTTe-i7vrf"Jq1m/Vg.ghE7R2Ek|0^#Z;aq}MCßfב~ξ(Z}\12"9ʐpAߐy5 \j$o _Tteu;y^t@u36w0][>Y"pUui~mN6| pdV QWdp9-VCx 0+tK-nvHpti*$GSs EyѥFs(wJtQǛ~4?y/QizHh!FᕇU?K ASJ=I%JG ^oaJSRmߵ4?3Jխլ[NUX?Ү85tgNP՚ѧM=gR_j3_jb_? xǺ^_xEo+Iw{~:??j(O(((((((+ ᴮ_}=n^LJmێwy{1u_nF469o|4}[0S7%xbUR?93 Þ4]$G>"~2h5B?jQ?k?ZA}GlYvU*?k=gJkh|_#"^W^ss4'O+![kig1sҽk(0U Ơ38CCHoi@ Nyu;a5|qKp3b,uHK1I5?"aѫEut3˼CΑ׌  +oHp6uq)9Szg9mm^q^rґL~CIg|~jT;dU=z7y.uqٮg M͵v':iW$ld4}`wFrp8$6=6jV3kq| \NV<7LIdX|^9͋4~Vz[+5ӽ,#B Ÿ,WUcu73wO5kf >a?%I2%}wM?LM9nf׸Hd>[m*nNXzdfn S:18jJÿ#r ʫ*|MM?R7(%~ ɥ.kիX;W*Ҫ5 )~n-ÒkdӹGu?֏;M=:A57O9U!yZdjTx{SޭyR0Dy( ZuKV-HZۯRsnO%IzFgԥ%*2j4sr(>[q؊Ⱉ%hh]^XYSnG.VᆇvŬאl+\xW|KJoq=o:'/tڽ[-藺}9|3߂k~x ĞE[⺳x3BC#88,E\\Ji Ú5iZVI#(fTsZю1:*UgSIniMx_;gjobR;7+ ֚& Q)Tv=N1zUM:,2ZZiDpHya^w7ė:Z׊-C[#ہ۞F9* mGR\k*U'jpȴl5h$oCXV_ERN2ʣJ*Ԯ;9r8vIx|AΩjJV7}!?{'7hpK&k bg;Qw:u'D M5Wپf?$cFeX`k9C UN4һMZh^%cs'<j"!Fn匓ޭ5+m'GUQpѾ?O/z7ֿLJ/#w~Cl_Ji?No})\Z_x?ܯޏ7{#=|!Urzf7?|`Vh%M/{M?@3?(((((࿱xJu4mĮک&ݸw8#_Uʐ+~ܖTn]CHcF LWճ U>Z3|W*uZ#0Za9AsEQNz[ѳ"(/f8ٳQO!jcSJ4)~4xeK>iq*Y~ҫfh0hۂ/%u93Kb4R8ſvx?3=+־*ٿ#Pj8?xI4;'^WSWԱ8/:RT4_&}qZWO<<xW0d MLhWb9xSk//)lt]?zOj: xIaop񬱵` p=+wysώcWÎôy%⚓o TsePV,OξP_|GI.'1&x9G;w|Tu_7x@?{_o4VSl5*ՊuuT^'t个gJi:b@tXg>n3MyhͶn ~=k?b˸,uV+}0O6MA7TIixW|q=y6SI2iso'x]VO㷉kזqT@9z+I&F|qiOHs! B3)Kn5_ ya5?A@pxZe?Yܲ˥qI1L_},|\U|Nh)yrBYfS(sBPig-Wu&_tֶڕ闫wHzm)]0E6Fs1X~jxaZmC񈼬|dw qXn.,(|1mm&0޽7=޽,-ZR,ӊ冎IJYsoB'*%)sMŸ8tkC>>x?:ľ}S`g J{[ͺڎ䝋gITZY՚VXeՄrPubIwyItM>i>KҭͲX${7>Y2Nו^I&|11QgBw=^:uɖk@;gy?/|UG=݅lT 168'\A L JR*m07+JQKUcK*5UJ?z)ݥ7 2m=rxnk-\N`<7xmt]%f׮-vrќsj|\{6o G0!,3!@bzW qxbχ{ŷR~Kڰ܁ *xFI)WZphF4nkUʒ5e\n0ѴS*si6ѹ6ۏ/_IM%";nr{G_[G4];P ծnv,y'95o[߃ƒ(>bJKFRцr_iZxz97)+=9%ecÉ1q7 %F+U~c;9;^ͻ})J1Ѿ'_9]Қ_x/?֧&}d':JWo@GJ?-}䩥o麇Q_gQEQEQEQEQEW`~xnƹ4ppf/?n;oݐx&+ TAoI5eݎq[œWձxܪ^7O_a09=j+Z]/PPO,S\ߧB+So~2Ū@O˕`3)kl(&xce?Jb)G7ħ$U.=c0ua\`߽74ɾ!D[FVT,;^Z<"UUwx G%W^mGۜl]Ю9x#!磊qN*םF<:nGc#oϓ CJ? 㸶o^KNhk襧zSTf=skMNoZÿg<)_k>$Y--M⨀su$V>|tj=Ӯ,T4.d$FsC#k_Nj//M<3gS۟A@ǫHV7 vpFzqz:yWElA'4#sF,+|;4ӲZ??(T0YP"U Q愡uhN"|(4-3@ռ2mC񈼭5csdw qX;q5C2m&03v=mj8`8FW,4vjJWJͶWzq#UƟ5GR1RU%.zlI:xWڇ? xE&?J{j j:v,\d=prI4)} vM}_ 6ܖ\ _=|?Mx?wmsnAXeSWT橩SnqSZ^ϕ&֖ktufc0/V?Zua*XiIwIzt [` 9~x7Tɤy>X3q!;ܞrMz=׎~&#Lhzj[f6 zלx;þ"C7ZlsX_&5& L`aQ)`6Er{EqRZYf4rn^1%Z1IM-қ6oGUVO|P|e[,Gn c@q5Wog…̄g999r5>0OX|m]BP۲fBi@bzW Qbֻ۩{%mX?'ԁ :J7RJU֜9,t:3rrS.WejrK}Q"[ǞGIFaEk>`~km9MS+E?I#xG[R6E7") +?nNM0\\42bOmȎq_ӒSץ)5KwM1y%U~:{*3Eʆ+ۑFOw/qxi~=fT Ѱrp}Ez['DŽY!WTxfj׹8`7/%\o8nc.HұȒYqROΑ+U=g*&ujNаέh*A><3,K1'OS]Աfg\P;VI-\ֽt[*IoE Yts/ƒ4zSv+>|gWֺ&Ok OWwf<ڼ ſ.?W?N?|D}aaVZ*K._TcnUN_vom].#㞿ZIBc`WK]qID<)^`iMm_ܒ$AI^SMbT*9Ҿ;]ã}ǏŝKEkm|9t>rr_SIٲ@֧;B6D\A#>Ǧ9u;槪Mg5LJHwnt>qGjoꖭ쿒ז/ٽ Y}vfZ9ikz/Ļxo Aq f$Rg^j`eS<']ms/x{K~v TbHHvmWw[[="Xll86M8˝AaGj0ȯ%>+#py\,FoJWЖ'{Ҵ"_h5MKXil˧#v'|_:rxWח 96ʼn#199z}Bd+<ȑțY*.GF7 c*:is\JNi)5%Swx#aIa7ʵTb9I^mZm#Dc1 jH+Sc?%eܤ1}'S֝-dvCD?Z gE| ֟@Ŷl_K?VaQEpQEQEQEQEQEWo*kK.BcG9*mY? Um dt"ߚ{5 V7f 5oKwzc̴oV /[~yx9<n_N 0OZ#I6i_;x0{~nbbFA [l>"iZ_ >f3Ў9,U7PzlHdb!jvΛ8 N/;NP FpzVzWrInqUyT?ѮmU @ + [/s\BV2\6 Wø$V|RZFq$w#טi}|֭.HXJU +ѭ^O Oe!߰hQnlZв7m #Oq3{q$H3wvǒIO\e t ^mqK^nGN+ZG^ =n!'E--:_Z-joܻO 5_Ə3mxC֦C_Z]rlrZ^Cuϝ21F98wȥS'b1\g6x99Sir*nZY4Ե֍<pnJIFTߴTںo[ӎxo}SvbEJb׬x?uulԵ =3rd`^~3Z=ıjQ|NUeB}1݉ -4hlgNA 3#ᲙP\GO~*|r$9N-Y6ukW="̱y>lU (NqPa(融Q4/o m&iD[mK~Dn !o:lju׶mmDn'iV1|Ǩq98kosNMVΫ) # 8 sJSTqXRUJ1qmE8BNsә;jW`R(N#R:M;VtH?k\]ROK[S݂`sׅV~"x:Dk%XW\s_%[;]Py?Ayn^WsEk]WH$ JrB8[qWԱtiR9%=]ۍ֟ l|8ogKU(Q>kIFvvՎ~: xd.Eϻ>GU>O?7\mLι#1ӵ-2:$3ՍeAz~}vokZjZYBۖp3 b8xʕZtBSR=fԯov_s bG*PFIbaNiV;8Z뙾X~z>ƨgۄym-&rrMǥUKnW 5Con\QL byzW5˿|5MKh$`0@#٪[OlR 1{ tABKIY9^n Y%b׻~Mx|5KJ$i%4mGiVXiv%1cEXOD?O~NbKd~/:sm[zݱ=?y?caN?$j_߿|AQ`v#S~7@W7|9r?H^(rQ_GDQEQEQP2YI,v]: 3 W [PoqE?|}E|?~8~(*+ 5&oxXw<ݸ2Hpf^CӕƱ÷k\L'>3ڎQ\Nd&9A;--&]Y> )PHpF7WO1ߋscMW/OJxspsizqyhXn<=5dkߊKC$/ՙ¬zr8d;|}ȩ^[Y^~_EZVTNc^IoE99Zgڥof8?L2DIz||(#e<v2#5ߴ[k[ͬe+yeݷ/ìiwtJBѓ10'vrvgZ>'KLE"S rĒٓy=A|JkG <<Ǚf՟=uunKFS¤RI{zFSho-&snJ 6? A*euu?شAE!Fa6CZ? -GY }4p)POOg5~~W>BX M+Zn4coΣhMs omZ Vt99:t7z%?ҾbwKB_] x+{dWo34{؟ƺAK_]j"sue&h 8s El=LZ7="ݟi-쏵0xjx'R JnJ>K>O~#z7d\M#{ß"7kJF۪H+|e7Gy#`&dm<=GlWy>}Idm+Rrx9NyX27; yn'8*mG][[<|@M-kuK<A^ jokO'5tho&h*r;ht[Mz_ӏ/]?Vo}OxDI'~/K{:l/_|.߈T,>mJI9KmJ$?1u+uޥ K?ޥT߅ Gqk\4I3R_/N?#1JXǐ)/.?Sc)t`'Ҙ 9QœG_:č/3YMfIUnu8-m&y%D]jŽJ[Z`fycϮ?_kљ[ˎyD~dCJ 2%wFKYԚnWLn:̼`9p~^ s?r |QGi{ZTQEEs%o%ӠȊ2?@MEy_?i<V|E*#H бB?QXw/W!SnشW?G?.xRfG57sۏ9@{c/$Vet=9Xk\;zAֿ?ˋ \L!b:c=Ϻh_魶Oi铙k'RnEՓ:B g`kC|~dc7F4%} Ǐ8OG6gQkFFC-s[߶OMWt2MYL* #C oUdYYOy55iH8vQ_ӕіpVoc$JI>z‹"9&[Y'c+?#^H|5ֿe{VRg[:q_κ ~} <:6=qjM4$-;ZC~mi; WiPj>~ | uxna$R.0,I-7sTpΜymYYwW]e<>Q_ L*K.T׿OWo;yZ>Yy^!kW6|c$¯kWW_Oٺ4Rmnx]?,?OszşG bsW묗s%ҿH>v6:0hugC J_NoS+د^.z|7tT%зHⷻVEvC3H @'7kOID֢'7VZlp2iʏF 9s|1VŪss-}֚o\u $䭳ɳG>3}Elr?<9!β-3yFo+b^ t{'=6 ymFFASv{?ǟ^)gԞK/JLfR^u''0(ssພGp/ryտ>uֿgXݓ߰V]Ѧ_Tx3WFkfܧ# hKտ4ׯo8{_oItǼ7˩2>T@)`P?ePoRI1)mD&:u]߅tKw @A$_ Oџ?—CMq? VY7uHS?ZVoVSgTEߌ[ԨŽmk?71x#A56dGD;zz//_xn4i$ IxʁvOZ S Wߧ:3/ڧ}^Χ5EWQTJm+Oi$^P;}7?ZEyk55/О:hF/RduC/S^n:v`}%E|$H{sؑ$Dža~^8fej8iELxS?q~*xP'׵{IG(~]^c馎v ?Zɿ1 \kL!zA־զbM,Kb:^y}}FObxOb?iUWem|K'Pγ$Rm#z8<ۀ994vu~ẵLO/?Hď *Tc )u:F9/:G'g#4I-ձ^?}r~㾂[m2MB>DP qeFc'EƝfؗV)8dXГ^EYF{ |/,-8FϪk?o/:o]Yß U& n 8/_Zt;K]j;wR3:ZGaAʀN 5~7:M 6Yh7dӭ9 sdZ1$䟩%㇩4;6,EI2ib!VNP?i\i.嵙d9I8ܜ+Se'k;I.Fc#k_֟oL=b+QJ\CekQۦkx[t'_#x*i*_藚S?|sxbMKi3Q-զYT!#rAa=+66sMEK{6;ٴTMv<{$+׭|O``x =&Mt?gOJKz4 CZV#P_ƾ(x{ş S1L5줉 M=y<1[ocj!lRz{~q]nGqk44Gc‹sE R(MsZ|7 *M^3+7nF6 vb~cM|yR9YKV^WW$cM{y/+P|7{){.zR'~,?H]ûHQT/ҧ'? ->iAT&_֒eԳEZ:AyoQy\aQ@ m}E-{/Iu?;tցDց [|-ֈVo5}h>bCmF?Ng{%SU̚5nQv"ݬCs _/k_`/ WǍ𞹶;)Fv};uHAC AdȢ+ (8ZFPHQսQ^oq< 0?岤m^{߰g[ѦP:v+^#Eb,9bSׇsiZi>¢#joj kcqZ~2xSrnvÎQ|{+JM7W ,5T }ۨ~^}M/>|ψ7f.4[վC9]Q+h k-ܾzijWp<ךjG̿)z͞;uZ]F̶:2RʙCMz_ݴmF^Wn~{,.?&K{+Io&lhd OΝ)4]Te?iS E.N~K{[K0xn1ֽgXyu7+sImV\36c׏JiEi? t{[o56Y%ei Fq\߈ `x?B }MuiZuѲbrWa@8^;V3̨^qڶkm.ReҞW_PD7<;⹖kUh0Hdcp$Vf3n-ܴ eMu@?ұ~x775ƞ6{rU]3c[W$GET5[/GUe5A#6^ulz% MCŗƏ 鷖vwφmc4zY;Fg>GVLӵkiV2W 98 كg+0x^|R7(7ku31Ln/B ]{Zri۩]V͠$I {'7@."2_9#WMԬlK|'q<ԚJo|ƁK~x__?J.)FU"WJW':6!nF z͘K[{UKp?:w W Öj"IJpm>FW}2Gmo g$qNt+xcD@GNjxKSBERQݹ=]޻GEa00TE =k}2bnOkO`]3Zl_CJT)r)oBJPMZo_^.9cY#meaXzJ-㴵$8Qp=?y .>?QVmLԥ}.Fxv 5WBVYݦ3~/7 $OJ6RoORuZ i=MzbYn8ݘа}M :wh[I6cF9ɓ|'ԧ.(ʟI7#uXYTs[4r( ( ( (h_s^h]ŝ`Уǰ(ڟ!Aq,b<~!kPM&N5\},HY*RNh?m6Q~xSuz39IghG4$Ҳ:5=|H~#xkDEKt)VTe M>}ok-À}4Ҹ+ £Uϋ q?3i=.;ˬ!$Rk)( >׭ ,Go÷?noA2|qځsM1+^X]j-mgk=ρ5Asmڿ)O[?ezAe统G|ϫ ֺj$f7+լMyY?ff1%9k[/ἺLڷܤ{s&5F# ^xCƿ=?uoڤ\Yᕣ$ 1s+2fpV`պy5D{Y_][JjIߦ$e>NamK~2LZMcA"hEm"ͳQh6*RnXvuZ8XQѤkTy|jI>j{m9 IZxKO%̿ 5Ԝrzא|M֐s+cs~$Rumgy_Is!$7  r@58:N{ky:el2ylT‚k0Z|R&dݯukSX\f3B vgv}z$CVEJ5^ƋE+E#NMm:Jyw:Z/$mq؃ӡMt6|QoqfCӉdhẞJͨQ6sz3qKWR&u'-Ѭ~0MһWEK\G[bK{K릍e`CX܇AL?Yߙے]>xO̿E?XO¢\u w?NPoLQYo(">|K/EU}WC7қzsŰR[ [!,-->EƋ?ZQ>i7֋naoVZ[~`o<_ZDoJ}~c7|GBğJ&?14عGAt.R%fZ&S''UW+{yS~(߷Ur !2\nlKqWw?42I/Rǂqøk+H'¿q6u4^ *(|(((l,: aEye̒]-t9E[>>S*(Ƙ$`D{'|?pcִ#_ݜ7 G4O$[ W0 ua}7ftd +~!&Йolu=C}*Dx1ޚGZ .Kok K"] _WM6Tg5oȅiH ߲j_F?Ǿtt M1e!޲t0+?hў>`9u[[O@5o\7٤3-,ױ-3׹Q<8KCI4X52˼n񎆄%MT㪜 C7ᵳӵmZ;{]mm̂=f $}[l>0Ip0_z[cDmu XIa$3A2l-)S٩ITvoF}yNXj^䟪EKV)]j#~cgoGի>(~o?qj}Kŗ^$b٨E1pr20^Wf g4ٵ +UGCǑ?y:P#.ou=,z~2dXԣ/akK-.{/~"ݯso^ izwFok:g/V_U}%D?䚫͞{:I?iby??ΒOZeK}BnOޝ źg:-C꺕[zvt߅2W/Qå8IB G_CgdY=N*߉bbdC{@QzzBnUs>6,+.mRVEnN y qDKX14T-4#JET$ 붖q2+ty'U|!~(^t=^*--d@%> '30*y )x]BIGdC߉8w7kf0fA ~u o7׉[/5KXJi։i ]JchC[!~W湏Y&MPۖ-/~͕slE:4'19/CG>#*x{#C|fvNq9$W[v0œ,q D_U~{ռp!MwsjFOvY5V}]wKF.񖱫Ivl g {}574 .$s\I<,Tv= 1k҂S~eqwEbe(=1I??[rfaa -kf)^[6ٴ8>iмӫ䏶EP1ijꮬ0UA;|2It'lO+Ш۰3_~ɨc?5|MŸi^Gt-4u zcqt9Fx3JV*][+R2^$nH,|Pԩ'|?/gu [%[z G~'ÑXYHq 7^$]oǾ$5/)a)Z%{W$)wr}+鏃N?mMZ+IoO|@{_>5di5CnXnvtY6W}ͱ 8Оjvh|/+r ϴ_ |_vy:Ǹؑ_~oڬo pā~qVVK5M4%宷`>??uy}M =__U56qhĊW0~m4ƻ;Yq\08k7}J6G1^񏅼ɴ~jBzc_>&ѵ/_cy]'oVTWIڇ?z.o.v[I%H{dG+MCmNo뚄0X9~}:X:jnVMz7}7g)K6JIFM>Mȷ"ooq]8-۰I_lEq01ff$ryXN'|Xo4"+q<;o ~3J+.KKZ(1!`1@<د*Ps>!$\w=iF~9Oe\9 ey(J-jDޏvvv|w&T $MĶd^Xq<>|9K^'U|9"3.=yoOOj[E6^0a:^3Gh3wwi'k.dxW%-}$ȍ5kZ~!?FKY0<`#8_v"+JyCIz&]k m:T>- 9#=2}ku-nյm9X*BB8ǭz '6vߔkbzy߇C_xuo CJ5@F{2\,%jt)Ե{'{.qԫ(St^zr1>!xKRO%Vt7p*y? Xn}~' 5r7o Z-WkȮhH@Ai^ rk?o~&x3]kXZ&|`\&D+b*#+N25I.ar\?RX .[{߯E8{V1cMxo[-{U$#M}V[#gP?:'?[=c)N=c)_i~--H>*+J#_ODODEfvԫIeC©}E?ZI[&?6EuD~"tP6aVIMR$8]¾%CI$sF?_B3GX[Fڅk& :up3XW&S/D 9+*?YQ>6CBPnD cfwm3VĚ57̂H)- =͌~7፶qXI_8m!rp7 _G/X _x@B<j@i7b5EQ^-㟄(u#lh׶ѩǜBkk,KSM&ޚ;o?PӆHI>S^y&u|/|ko5ԃKąԼ輟A_eI`ʾQ|j tU5!X4QK\ (5Xg ݡ|F?]2ҼQ͵Bx_F ;/{xtH8g;;G(ޮwzciq+^R{-eW>u7?ұ~s[CL@ʞ>K*:u>߾onT?G\~UOMtoV_U$Wo<'l^hJQLB+8uB-_$j<YW8U?yWt2Xh_z{`׉hԭ#mϢquaGJOšSR5 HMy(1$2J'j+1^GY:+)wEY1ڟ\xU׬Z[g)*WsnRXCx5_~5_|wxxKR/ilGrV7PV+4y*cO6qKo> W 8JPXR~{{.1|D otxVm]/&ZD?hҟپҙ?WT_vk:cݦ5?d5?Dg7>"oœegvů+ab864ǻNWԚ'U,~XfJV4/u4m%$mMb38lF9Hֺͤqsojw9c퓀>{ |M-)T]ZSml^h9MKoJ|Iu,6VNT#~5 ?:w&yb/. q2<{/6lu,mJke *b͗scFk߃?K?_锇3eXkll 7 KnX&Qq>RyݭO 7־"Gex>-6bԊo?J:h6iY>Z|@֍v>[]>F+hV$*j40x0ɹÕ~ #':eTlLѵ8G Q*I_]7So-}<7(fS짨K};tKA|;F⠜ҽ%8 Wyg٤,f&NxF/ɨy|=OdRkW]TvCөib5w ((4|[|gֵf/UZ Ҿ˦ʌE4]S + dHCqb|3k~">uko 9^~P=7!jDx1~x̺捨i1>=eO\_皮RMy/ cu8hH6ۘOv|#>vVk 59( Hczc9+<< gPW O~Xs[-m%REsN3ۚJ]~ɗ_JlD=Z|A9χ:v 捠+AOէ!̘f85׈~/0 rt?h''8Ҿk:*hk{iPnkeʁB c$_f+X:NpSOfݞ&rf~_ӽ$Ԗɫ+uii*|Dx|?"4zT/W yFGO8?`\D͵TI_DʟWXIa#o -Y{s X >>fA#^}S XU'R+ݫw{GK曧'䏝~.kVm) eO>i7guVnIa˫SOR`m>ԙ# lG Q\/.|%n^X2<0˰H`G=z'=lbUi?j͕[k?}Elٓ+ѫĺRjQMϠquSofҸ{ {=KSoHde- ep?ax]vfd3xX֬`HwWR'$$ ?Nץ\t~<7&Դ.Ah-[7( f)b(F4w8%e}=̳ [ rT-uuR~n{l??߲/"%?urZʟ5/G?CҢ}M|TS>d*:x,?_V_[R5 9)nʿR‹U_i]DJjlCE})7m_%4mK{1UTIR'ϜDSn)_u4-?շ[6-k.[Eg' Ȧ=,ֶX]O|M-74]?,qU=bOΚ8'_|Vmy>k>3-#)[ȟZ-}4yo.lmh~뻿e<1fRRf-e>}/}/%n/b^#x\ʖM%K6{w+OI/Zektp~oǐ&FN_˕}?a|qXd1TfI<ͻw5OEeǫkw*xmZVc1pi2>= Z]X[?p׃4ح]- X_oOf.m[0J/yt|+|RtKE+[d>Q%<*JJWNMWMZpzp9O.QE"((((( ڮgY=弃  x~ ([-Y|@|sӜWotMiBVdSYG~ 85'5>%emCI/6%*á޾x:ҥV߳#ofv/6[}'%mWwW:uֺǧZRB$xD4 |x~fL_bs9QVI/~OC/|UX?I4spxa< }a 0>hkkj -iG [xpN@:W'":G_q?CMxωCxW6vV汔foøE*q䤹ѭ9z;j[ErGv3iY=Ơ DEtӼ5C<#ogq4wV2'w\W__< -=ST,6al[Ϳ|?)x~AWnGa:%a EyFZ6pCSJUe($5M=5{b%>=O?kĚ-_YZ[[}ĶyIrpW9| #Rc9~0nZFmG}guum'͕ wmط'gEDO{qOpEY.[]J2Wj-}GszJwZiy+)f Xn<o/^9/.hǮ[BkUᇩ$䖟wn]RA=k=[S'h-9> cˠ|CRXir$?a4_o;MqἸf+>Te}=?MMQauz({n^ Vö~>G,.Ӗ6)I?3r39[.T~.ߊ>pUnQs+'_6Z4ڂ; vVH;6f9jHsMʗ>:%hɹh|m7v:vXf}<̨W׌ 8a*N8E%/pρ#G >^mտ6O?KگtyuN^0I\^Ni>xCӼirS[K K:Z #Լ7 4F Ο=PS>+~$"~Gx Gh2ؐF+S'̰OBM߻w|N=,,N6s^j_D hDÎy'ŏ`| F<[TAxRt4OEu j~m_+~,lc)=R>_C,߇q؊U('GIsˣZs9B-7t>v9?0թ·=yTg$ҳZz7?. 42,xkVxFimDdO* !(˪QNIkTzj>J|{?'׉4[Umi>8r1:FX0Ps]|3a={:NTQ[X<`Ne*9x(C6;Tog;H7jӿ?-ҊWm->#%ᰘuptչ9ܯiJyEXF8iv\Wf>$?7^hi{oc7j lI8= ~r~)l3HgL=q_i~>6 {KAҭ<{0{%hYHA">ĬRTJU+Vmoѝ5WcƬ+sRv4l[rQI&Q֬yl0^ֶ::>d20n;W*yۮR^s[^\#.я\>;RI->ܻl#Ҥ;EfܣEo9WN;livB>vms5k/|u{wK]m%s^ o5ukְ yQ(s2pTItpK_+?`xh)ҧe&O~~.a[j~zyֳߺ&y̏­} qqh:g-d9-]rD%Ff$u鞙ڤM}] +?/Sϰt_ӷ~x/ǖVׄ9[ uC'M\v=~#~A Aߣj%?26fqk([WmXֶGpNsK-Inw??^-H~ѸA<+(1 }y n'XO~Ҟذ m`_s|ao<'J93,P^;  |#Kx*n\mJ>_KK5f2&:T6[G>h;W _ʢIrh慎,O>0ˬxbAmb2ʙ<=+KQm 8}Y Pڷz%;Ǯbf5_Q.4${Wzp:WSx9bpXuxZ& Rha}J6ڊČWS?㗁=׈Qk(`mդ.+gWȊw=EtƩ_hƱh"40 m &I‹Ҳtb.ȯ?tqXe%Rlel218tiR^򾶋GM&eǑn-lWQٿO},T5+yH*F95kXurcDq=|Pχ_th]xCMv#b*j4wHIm'pfAy:s5$ғZ%&e;BY6*Bj-7ۚW^lo7ͬϿ&2~2++E3H8=`[ k [;gOze`i8lWefRޱtn;$]U-tQ8zG6NoZj(tO% ߕU r, Z-5.F8j.V]͋IfX!Vbc55H-dGX}xɢBhaѿa ɣ;Xani}M{{K7y&z*cs4W**qw-[O e`!lW<|}b:i A'2 .OMQ>'3~Do~u|$[¼16~>_>*x^-^ͬ$ 3]q y9 Z>t&OY O ߆O? 9t.pqԻLWy(`9˼" eUV̡J&}3vaw^?k~>k;{&{$mW l72d~YP۬01B2[_cq (wsaQUS,}|׽/2>!qVΛOx\$t'{!;.}!w}>s{Jp<8>.X'6_FMvx"WiRJ|vpP1DE 8 e8 7&gFڜPƑ4.)qN N \ƊB.)h.EP0((((;Hoh9qGPPk"~ľ^UEl;Mo$g0SSM_og&Ywۜ0t瞕cWRkX\ZvC}6Ghn-zY"-KM][ςxrxj/{>ֵg o TmKfKkw#Y4 ƚl>YmoHVb m|<Sc^jQRKemyQ%Gd'é+kuh +'0Bx/JčyI琖3i,{U1 }fKSտv;uM4˲8v3;Ҿ{z~O<\I ljs }G_H}cOE[J5+r}Kf-/ m}6H/x^F OFxo?ݟxB|7ZesXH yW[8tT2\I7('=fxx|rTuErEVfF} )j@-o(85__mF[KK#ݶ᝘r3_k|EC|$G7ۍrU7E Z=c᭍xºn!;IӥJGaU;)(KGfV(ӡNXl~[:*N-UwN> ?kT4{6y\0*n2Tc;_xws^=55Fem~H5KTӤ$29<g^xŚʬ`"1ȸ@#kr)*n\8/HIt~y7UGV+9&>}E1S'խldθ,78Ɫww*h؟xDc՟ϴ%Zm2ٶY7jG ;~rnshЦ{I#ԥh(@r)rXjTW4i0-۳MIag{-RUk^ :vNw^G#@} ^ ?ps3 \ڰ$osxg1̧˗aU]I/o=Sol΄>'\_+;m6mrkΖ֓%^I$' NM~|9']Nr-AJW|8\||<[kPkAUItpv_8o#Vq҂3mr}gon|`ԡ}',8_;{|fh-dnc۠ Z XqRIMIݟu *+lF#إ Y*cSSh P4*KQE ( ( ( (#h6du 'WLe|5()|7 O 7wa/.1ڿ,lZOi2.6?8t9_сY>!fQuXaI }cȱ<+/{>f|op`Ogis^x{"r[[8--W-<gt?5߇?|[ú,4IuG!kKƙf<mCoA>Re,Y;A^8KOϨ|9l&b>OqrT~u+3KRIFWvWoݺѿ&ύ{erg5]/)]Ewd;=RTX<;c#CK!eÞ?f"^mo]JVˬ+qӤ92`m~cyxK/ žO0[ w4>v;=B $sqHwb Xe'}n 3Kζ(m5kfu~7_HwJV?pj|b{Y>c$SL#>gI< -o?;_:|_߆VQLJf׺l[%@9Ru\SA[(NX|~Y:JM]jfWA^ 3/T}aq eZ(Df,l +yq3_#t +;ƎaHxȯew/%7ċ Vm17I%WAnD8/gJ/ʌRn)9 ѭSJ/k]$u>}<>7sn{Iq#+mmY' =_Zx^I1~/r=xm.,ŸX WPx<(ƛ5ݓ+Itg;QGV+7|-}u|Uyj72yE X~mnWiȯңn@n}^ _/hY,ci̟a+V4]s+~c>\ R]I6<&SԵ8-<r?@I?*/XV)T,Hٳ4* s'~:)jʓxV`ֺ5 ׁ9.|;q⋈|n鮗>_qa`Ы*4~þq&"*bӥkvkiFF'l +폄?NO4V''>"X89~m~Cև-=m4=LcbH~ l%^¿4WhVGeiU'953KPf񇎡[-tk={ e{+>- >+$ė W-uGѢ,SuiOclͳ ,89.\o Et&1bHZ%;m|MJӜWQ]B?jxJv)B7TNJ<Ƒ4.iBS/(QEQEQEQEQEQEQEҙo|sŚO+W_g+̕"O%g9ypׇg?(AFsa>ZQ~m/MF?cId5 ;%hoQ`Q[OCnF쎡IJ {x\jT?k.^vk7kǩ1+_|E(:#WsO<aaI,IJaum8ljXAҕ˕nf%R6[=u=4Q<'ck6Ԭm5 whY?_ٝCQe%>g?(A8LciJu~ X2=Z&O'G?5ئG^#c018&R×o_|+7CtG?uci9m+[ ;䈯F+8[G Zu m$m7{4?6o eZUr)Ӝғi%ti]uTJе-[T4>= =M{wï&ƿ/7-5m W ϱD>!d=7A5 j:l6-GqBzڴ~ ݟ hoCUݑ5^[b] QٚΛ_PK^]9bs%;dfcY,Zk>i[k!Yc< kɖE{?&WR,>XN| `?E{E(#WsO<aaI,IJaum8lj6n,?gQs_݊pOo;d[| WZmyMV=#A𵏆4l4XXV({**⼧?_CQe%>?g?(A6XVB Q骶8.Ox_Y CNe 8&p>Կo?!*nK/-_.~\{[som|뛵vS_O]Ɵ? $ˏ@qM}ũ(Rrm0.JlwZzzPo/?!*nK/-_.9Dԣ'JW[W}{ FmVJP"y|'?0W?$CPӿYyq06o/?!*nK/-_.#9rmKnW^hQ?5w?o/&*jwK/.>Q6ޣ]RQtwz۶0¸*Uz?o/?!*nK/-_.9DG.Ź8Rr/@aQRu#g_^?cId5 ;%hoQ`Q[OCnF쎡IJ {x\jb9=/{;vmgι]_=zR/>#iAyeȾy_A,3 {g[Vji̺5\Ԥ +P((((bO"V˝V;eVHpT 8k& q)ʄqqz *Qs5[+矊S ږ ?nςݮ >j]\w]Y|icMm-˩[Gn0 cv<|Zq~|~ha}ιvϤZژ֕ω?)]CKi.4ۙ-eh"IRnG?>'6}Ǜ൵kvO:.մl\T{E2eUC {Usv>&k}JPqke*ï> ދKR{in]N;pXöy?[ h/C-m-N?ڨ~UO?Ũ|Ɩ:ݬp/^ 9 %7WVcvVFr|QSѧ)JUT?6Ě Mo-a`4׷G\oO_2~?Ko烠4S-_}NSĊ) /Hs_>(u ..d3FIRnG^W cFOxw|uIUU^_C4hGs7{}gk<+'>z^ꅛjNW9 ;ᦛE񵽫k*y"b#c33^3SᮥZ6xIO {rzτ"_}kV\D0 1&ys+S-#V>gxMCš6e%RK8Wᣄѫ:\m]xZ:V slxm7AU"Ҵ-KV$<İ{ϡ*BOS^ K [H7y@i@|_W5> |uZ^i5XN| `?E0?WKiwm̖z}Fxة*M88>??-#~<]C|iW繷qv6VR( k'~ZxzwKo|1gi0±FTWgYw]|Z|icMm-˩[Gn0 cv<|]^.xwCi2ZYf2|lt87-}zmn ԕD>S?>x'6}ǻඵkvO:j,Vo_X[Ktvۅ/XgbU[_hsgk/]m+v~xSĺqψqi+Gf^W y|R<_>Z۵;A'VQz Tl\SBcjMT^]?3轴+_c |7T[S,`px*OG?]LJ~#ƛs%Fhة*M8 Pk^LL+=/援R|W {|R[Z۵;A'VQz Q?૟k07z/,uIYu;Kh`Lw珗򬿳1^|~Ɵ8_hsgk/]+vOĺq߈qk+GadQ6*Jt28৾A?-#~<]C|iV1qRD5I7ˡu\w]Y|icMm-˩[Gn0 cv<|p'|#u .ÿMVȣ4lT&dq(Y>5T2Wk6*ڔW?৾A?-#~<]C|iUc |7T[S,`px*3^Gʯwiaιvϧ(/_ύwm&k~)<䷗䐤x+,NZ.7\]7FWEW1QEQEQEQEҔwğWoDŽ'??k7:E֑OqybI"+,z0`k[*T6\V2hC|>6~/\|K~?{˘GlL< (99#Q]M$}~G<,2Qm{ov^d55ھqn, 4mir-ݜя.U??~|*{a| 5 \}^[Q>Xߥ/}egk{붗:һM{6]bM Ɩmoi:nnbK?+ N~.k%n?e}pKu#V`C< (99#QֻڟO\vNƚ"-\0~~oE .m׽w2Jݶ~XF_#tma/ _^Ŀ?xbLn#H_w> |񷋵?~/}kk+u?7ֻM>Q\R=ef dlaA:Xz\U[_g&灏xͧuޏ&^4>";[g)SqEn_'KKXm_..7>r2WֿF+k:ԖγI 㫁Ӓk>,4|AekKc/^-?R+폂?5^$|"id9@'dF>Xߥp^ZoysH-)'$t+n_, M.;h]cLknyvrW_'<9$w>87MY|2},";˯~>=/ԾÚgv>կd|֨g,oҼStG6oysH-):u12rI/wz=_om{Ce(?OsDjoxoM.;h]cLknyvrWW~>xRkۖV Z|J;[+;[ޏ]w+ݗM҄|1=O~#1-ԏ{Y:xPrrGNڟ |D㶊u4ƹ hǗg*zrqJjJ_=uϦR|>6~/\|K~?{˘GlL< (99#QJ9N&NI%G*Y*-mӶ3J+?ڟ |D㶊u4ƹ hǗg*z{W|mK_<9x_onZZ@'jq*?qUK۩=wu{龶u_?৿57}>8%v[+0S#gO NHWqS>|YþO\vNƘ"-\C5MvGB0nO]|s+_ |/~0}kk+u?9]>Q\R=ef dlaA:#d^{_ im{RG?_C eOFOW9, W[ V8Ԛ[jntRS9S;~]EW9QEQEQEQErikoŸ>u ,J$0ƪ"݆9 '%;lޮ}NlV*!;m.)n |,|R <'+iW(wo$?j?|Yg m+}\%EJٺs+uWΩK[kwϱSXVt+}^٠Xa1""DGzĺz_߇:\w2-^XCHrW:+Ѿ+|F4> <'+iW(woI$\WZi}{OO};`"uiA$Zr[=5Ym'M1pq60|u߲ڋş~[J_i1p<o8'9?\/4~۶%#|8#l f }J9mc9lү۱㲼i^+S?mMxGⅫC"Xҙ=W'JtۿXy6O$Iapƿ[+|A <'+iW1ǔI$?oڃş~[J_iQp<o8'9?^<#Ju*V=׽ͦٞ^eVB6y]+[? .|6>$Coh[V$ ducL/ &}z;9 vۆ(ڣⶻZ߳0Iڪ#b[Q=^g x#_ym8*٭jo݄Rmymt>;вǑtͶe"W|5U|{,SM_UiM'u |= w$ekkpvLotJ,|NWf? 5%u-@[ ~OWq.>W}J06n.2o8_,a)OzPsWI~tiizvj"HSP+Ym_gk]B=/Û.;i/b,!FoJtG~'PjMƶwoId)N[ߝ?=]>jq)([}!8G_4~j?Yg m+I}G%Ep'oPK;KEK؃b&C'נYNnߝ[_>\$g_:UJi?|xKOWۿ&nQmt<7N:I%Q>(Ԟ+ ŏ DŽVLK }192n<4v]oӹ֣>Ikmy]y>o7%"8㹑l f u:W|UJ?|xK5OWۿ&.nQmt)U,i{H{ZuװMOO};Fl_2~jO|Yg m+I}]G%Ep'pZPK;KEK؃b&c'נB$!Vϸa?g?NWSW?~%~ҺO{ SI˛[];cx㤲UcIϋ0Xo Ai/˨YZNx}9ߧrhK[kwϱ_7%"8㹑l f u:W|UJ?|xK5OWۿ&.nQmt)ZK-\W]{-IOO};GQ_1jO|Yg m+I}]G%Ep'pZPK;KEK؃b&c'נB$Hh~um|̢=zrڔW?~%~ҺO{ SI˛[];cx㤲QKj|UO4~m*8e[J I!ݺf Rs_QQuM'gfݥgmSH*6iut}{GQEvQ@Q@Q@Q@!\ʿR/7uxg\"^iu{HDe%MpA.qyPz0Y*4wE|V")6V]ݿ3sƷɿ0Zŝ_ B.}gw\[C@5 97ҏl- νg_^ּ!otWJn.a8:d]ٳCkߝ[rhG{[}\QLi_dWa>=7};\gAŸ _Y BBNq 8)^ݻw c"K[}}NYFQhn!1ʁԏ^wо-k~Sk%4˸ 3FAz+?iC&l½-?+1{nf7mwQ[-/߷a,bry'ݻw:~~%ZmƔ.2}wFk|*&ͱgc<r`fOl {΅g㯅>|#qrU- 7ғşCYOu[ү.[p09W3l%Og}*G2d=+ ͭzw6[X k:Qzv_<F#'»;w2gD2,IWI3_oi_iO}v* ru$/Ri[o}/|suL֥U4*V'o{85ݴg>VM~+_n=ZF0%`˶Fcu]_k^/Ÿ^)_^ķ0aB2s G2r4QKhϚy|cZ'N)u/K7j%d񇎬mk33k.䦽o瀞4;]ɗYyяc5 M{[~W'cn< r.ŝ_ B.}ew\[C@5 97ҿ8̸񛩍Q%^ݹlKs|nsIJ.otd})φ:.ۏgcj)?n z?BYW[ܕү.Kp09?) 𷿴O>ǻfcvqk+aͪ*Ź5z}F('JS--;t8_gAŸh Y BBNq о,~SkEt˸ 3FAz+?{Og!έ4Xrs9krݼϬSοi_dWa>=7};\gAŸ Y BBNq X 8\o˹ K[}}h43_%|oah?u?|) {UKqs# ?Je~ҿɿ|! {OJ}{nf7mw(^yb4-/^˹`JP5G?gA¯ Y BBNq  BYWu[ү.[p09S̗~^!hG{[}p(e~ҿɿ|! {OJ}{nf7mw_l-w΃g㯅^|!qtU- R8\o˹o(ǒZ/߷c+l- νg_^ּ!otWJn.a8:d]W,W7c?oi_iO}wl(\ 0-/^˹u??gA¯ Y BBNq  BYWu[ү.[p09UfK{?i ~uo?Q+W+ 𷿴?>ǻfcvqk fCcKtO zû]=#vNwʯ>,.NZM.*97RrWWMtt(< ( ( ( ( ( (<_ C^cx/1ˮk?c2L!W>X#o >|/syM7Cn$v;Hc̄xO"?5EouHCx*qU4 ƥc_;MїUL{` a;;޵W? G> ~BԵx%#B>S<3n\YȬ)~Û?X yѵW2czgI$^ѭqv/\+ k"3gO^UoN-\xkKKt{ԙna$V0wmO?lߌ 2>*skC"𿇡+']d3:d*w'~I+(? !im?tk{_圞Z [#.;D?j #_ۧR_|?|S?~/}䷖pC"& N6ce~TJH \>U;?5g#g(}ko~!u]ϨgHXkDp)؋,Gt`w 1~|Zt??YTCeyhzv+O6ACcOO~){3MY"c,1RRx k*w?o_׼|zUu?iqiƢO\"FbeT9yy\cO_Wk੼I |WmdHu/Klwm4ʨ 1rIZ?ڏ7"̿5 i&5Vrx[{hs "}+o.ցLcyY|? _n7!%yхo0]vH+i&_E6-g_:Ú=Ŕ҅[@pU?zIYE}_~}C@ӄ^fk9<דG5-?e_?߅%W-7ǟ>Lsj}1$٢jz黢XQo*\G_3gÍGƯ_;IOon7Kssu(eb&v$~}|]iN~b /G_|׃>{awŃ WQՊ˴揬UNOazMY}YS/ EC ,,/f4k[q!K*E#?xKψ|' o? -eԢ[`RT$s_?T_2xwG~x3z_:Z. cG`̧kz![ Û;_ /3b,4VKmFC$,U~_0W{|ܪڟ|+ͺEEax\>#4fn>Uy<W+xy/ykı3E[x-'ФB I#~=?W[AW%]¾>jkzׇm Oپ^fEK ]5IyR-U2*w'~I+(^^I⏅?xWVn]BBdĒmxV"7`9bI/cƫGEN-GxK߇%|i&ilk-./%Y%e}} Ox]FuKK$:\yRXԟ~24?= axg_=f!( e ddDp" s9G\nnUCW 2Z|ACoWW~5+ۻwY--{w<ݞ_*w'~I+( <&7F\GY޼9#f~> W>k{}i2JGnd*nn+|?I>)j Z%Ѿ\ȅJ$UIwoecS}^Vq_r>~=~^ >OJ&mQi^/h/Cƫ Ӈ6R8> M+|7Z-]. `l/myo$ slli/Q^_cg1g8xw7?g6Tt6ͬ#t?}XltO_ȡ !{|ܪ'x^ ' j[ɫhFXlvFFpNH7Ia9([/[?WW܏ <;Sh4=~?<}8,-T#EI7.ATJ~S_W5o~[Km>%^tmI$epW͟5}c&9.<;3xQ~SSuӜ`g*5O:+{GV A-zAGqhTW՜W܏|k>xO"?5EouHCx*qGg6w# ѭ~ryh7'lk{|ܪʷQk6/V:4V :4WDFԋp;WU_ jW_[Z}].#`fZύ?=E}]ٗ5Rrn Yi_zTm7]Q^aQ@Q@Q@Q@Y? VzL~ i>^,4{CP@c I< PYPHU=(J,焿k/hpWex$ּ!/mi#'/6qY|>ࡿo'VI+AC$3IεHd`AdzKZࣟT_w߇^?ZNͬxXiu=Fck$QDG¨P_c"?oŸ߆ M⏉S75KmhK(,F`teHTcӤͿ|o^C=F+Km丕bVeVl3(' sTfEgo|LeG ԣH!xĈn 3КP ۷dŸ x37VEﭥ NT*;xWQ~VJ#/g|&Zگ> X`ʺ.tUÅo8|5Լch𶐪ךtn` 1b(_Wx;]:{5A%mv±LĀH溟؃?}xVmZ4BЉk47A!Q\|E{BT? ||pMc5J/4=г\4VJC9 Tx/_)|@'oq6[]٤o1.ɥʑ[~׾$_5/Cl.$_Ml&f|n$`8_ <-m?^ʣnU$inpχG~%MOK]6hoT4K L4jbk+ğ1|>imo#9W3_|;#ifDHaܳ?K~ROWofs+9"k~>y>m#čW޵m:~er F"G<),NT 8td$A<]y/icqc{ yvASO`r+:<7`׿e,:xV{g[II~q,Abq'; ;]WGtpϦx[KԮNo|ԕVBW{ҿl_('b/4_^l񍧂4'ҭ;[ۗDI.LRιdògR>=;An.{Bx 9&1G]6g)ڊ+Hx᎟%?j:W~WShe!0Q1) -o?𖷇FWؓM61h}G[?o+K+ݴ\,r~g?(A=Z !YT$ rpkhA] mωv=|K U9RFS@sʿkW ֞hZZ8R#qAUm-.xliV 1,Oa__j#59W{VZ !Vȷr1GnHςS_?a{6f>}xڎy!kĂFF`1_xï93#=K_8HU5]&qwk)׫;!g4?? DL?A$rŔrrI5ώ߳a4/54ZסD4ѳ#J@?lφѵhCgq<:D"xhfg.&xR}OQ{y<~y"E$W_w=_7G}[Kž+I`NXmQdF;猋e= M=/ ;M/MVxI/ؖ Z[ NUkf (CůzUxGSZt}_I55'] VWUe9  w.2aIukҟ햗vHm`IDSV*p?#/oq&>/h0O?cO>VpU~[#yX_+73mB6}_,|K g?:~xI-S|Iiwm)NV#▼.`PSOzs|b_\WQXj-JҺ'՝W^(ࢊ((((bυ^㦥QxZ~7ho' s2< KBj>Pzk,4irm$ #?xzץxjQӿ-?g CcGX:g|S(3W6UlVS0̅[C G\ʾc/ZW%3r )l$f$`? ڷ㎏QOsqDMXtV$ܬQ3) oABs s_?;+k^wี[`$R˟pk? /fzͯ|QPK,1Dm;*l@ٯ*~ԟ߳3~ i~+XZȲKJK \mݸPٿ gZK=ğ `f0logilڼ~Ͽ_9?x5?Mxf+G纕bG?3d 6?੾wG /Y}Mi7R"4/pA۵/O-|]ߴgOXԯ'y%,~g$0;Ky&D(9$zQP&߆Oƿ hwMgxf;e*8k$erwFT &| .|z״YdmEC$ZkAʹGZ?' ;vᧆm'}~7j f+K9b=[ ~?oj}=4u- .IT+ ?m m_XH=g iTLIG&*s>+??io'oڣ'S-+_K$V#`DӞ5V\ɀKɑ< mۏ,i~?VZ gLh&xT2POۻ xgC%a?_4I5kKP)U B*୿ o_^">@{+v}+ǚ0Q?YՓs ;\p?6~w1P?j{b;φ|;h kk[qgsa1 !g"^$#u/h'ްB}9dpOU^C~/ χ?GOk()<ٺOWU 'x# |-oxT+mV?z0!&⧇~8~i}WGg;-+OKM\Im2'XFRe"dTGcZDOEVd-ӷx߱/ SH^ᯅ۹TlWń hjaѰ& `'?|UjoX|E ?l͵66qPNHր?@<]|!_joÿ?m÷Mex᷁%ִ2e{0AhԎT#?l|2Zmi+z_mZo-dXgeI */VZ7 r-kPiW%ۧ }#Zx֬?ɯDi6 䟜)V--xm~~:.luxgѤλctp]/pO?e/-EF_B+>PxoQ$0Z3,'.Ǵ^ 8i-:X-$Z Oi\@Tߴl]K 4)+i~Ae=m;Awn})$2$M// >2iZmvzvoK[x״D_`m)C/ÿ*|;ƽ3Isޗt&KV%yd{(behf* )?+^ǧ:ݔ0My$9 cH"C &gê|Wm_hmݭ`k%ēKqF#$~.η|8k(|A݁{ m'kਿ?ڞ:4ocEˀyKv;ghg']g? A7vkiӸ Z“Q?Z[{M~  ~%垡qu{avzCpEvUl' g/#?f6:vg~sjkq[-omoCb'?ޭcnuM]6ח=\O=hoKZ%ZXè]Dr6FG"FFR>_7~jWp^q[&;m/= }@Cyx¿ ek^6%'t~׺##+׫_5??w3?-ϕ{o3O6p++\V+mJNXWOKQ\HQEQEQEQEW?Sʾln7ymW_kkX?^:oirkmN{9cz=/mww}qUMVC./-i.'G,p@xU)g|xz|m_>)Fip$S]\Ki"$! 䁚tv֬|  ֱxil!2,C21;~1~ jm[zWme6&/4@ZX85Zf )/=oC c^u_[GnC HQa@5Ś? &kg,-us(;GP;PG ߴ/_>k:lG)`\UT,OܿQO ~k+ odK0[Gˑ2IUs7];nO]| :мsg'W&h#L"74P22z8JsTxw~{cx7[P;MN[$vل*7b<f,ߏߴm={_> H֭$4/< 5/#ZIE)E J lm]>į5K|<زPܼa<} @$)ß= =X>{xW᷉_u hd q5KQ"gvUG zX&[6աK2+mw %kI |1rW4W =e4 _x^;oOZKZ$- XsQ }_> e&/o|@qK:$qV+P3r<~G)L|M[B7z'}ڮ[ļM,&fV"8UFcuS_5 73@Fo0[oO0'?6Ǝo ᇃD;h5,%外Y/"^^_QO'S(i?j·/wB']db8xg_+wSዩŬxkf>nm>A> MgQѭ(ƗaQ߉O|;/c?ٗ ^0֭?^:]myoF8f؁1 q_~參;]6:nmClvơv +S)1oO.%]~|ơ[YAሠ?k*2?[6k ~mhy>Z}3"te?ax7;2)>(nu{4aaG+ #fW2ar }c5ZF48FY $/"-[پ6? DG7HLƌ}br ~̟qk+FoTҴӪW_m>J}a#ݿ9\`?ƛ߁+\xKxzV7၂^2̿)uc\? P/Zrq]H\+K3Wg?_]ug.%~iu@oi>oP֝,| fk^<%\K]3n)< #uɱ0$ |/0NZvVͨj^ Ai4AF2FFx+/G-Yà { UѴ .-.09-*`~eW%;^&Gw/fԛMEggjϱZ%~~[O kSou[7=7@נE.kL*m,Dn@!IcWG ko?c?|u/~ ٣D%eRbo&8I&f%`rqmۯ-G Ug9i>+ơeΚv%#)@?^"<}sCN|ZWWMvo \70$dȠƠ8]f?!Ϊmחq ζ,-on#VD$FG+柆P vjן7T|ZX4KeAoX;(0)>x^~|M.OxCS?GN#,[HwHte, q/ 3-w߀#-76;mZྴ ##DV #k*s|I/kړG~^ е6ȣ<`4@b|0l<, _Xo-K$1KWIC.#H1d7wU/:z]Re+4ݳʍAv˅& ??hyVCĚ*]׏wiKar!e5OkcZ<\^>i šZy6AWkan'x]%߃V3gQd^$/a*?,)$CF$%~oڪ f]r_Ac \%Q[7$/ qa5+G^ Mami|A_GW?L?ه?&J?}j_lҟt1n)NV#▼.`PSOzs|b^V)J\/{9÷;K_.[\QE#>~ 5(f}VowVAlF3y0fQ_,k[_+?gOد>#Bg\$k$B[Z34vJ$mcz#1aY cßY " jxÎ+N=[|^Sx|33-wLuu ?e[m.e}ɸp}:|y3|jG94:ya⣭6+d_;1/6*.udx64r(P` mN~!|t"~ο7|;!^ދ-.G>ldˢM*edgWwi5xK] Y,Rա6Y7N|ɱ*BG~ÿ?m',#7~ݦ SV&мj ll#n'$+[xkz u^ii51i`vo7abu~VۿſW~ j|;O+ HkM2[%M,")c"q.b2r?0nڛoO 'Oh ķV/#͂ݬ6aB2/|on>.|m,~֎}sKwVg8?[(I/OI_~*|Zy5|K7:Bi;mRj;/ßR߀Kpj_$_?ğXOxZ;YHpybQ?g~ Bi? ȡ%ݔvfKl.?"aM'!|~;c,>*MKe|Ap5 Slc>T1 ;~\0 *|Io~|Pj>x[ EogL}V)<$~5C (=koaG^Ircӭ.e8Toʣ9bq3 ?⯀mZ='_X[[l0݋XZٗq%v#8)W9ϏߵO_5wqg8lWokPwoiCY@*w_oOo¿j INgZTvE$#|LgܬX"i~xŞ [ mί\ݷC쨌 '|-]7ׁ-,ۍ=Z-ZTPðn|`yOm,45eyWz`yElSkG_C,z6:TZa H>XWyQvPD@?5i_<] M}AM]^^[Ҁi Slw7.TGAe }5#NytTDثX\8~fQ?D? AI[gtu&֢i ;7@_?+?_▅Ṽi׉ rA u$ B6h#$WGm/ rG$:_>u;g`}F퍁g Ӵ7k^ˏ|1EJ}w3D.UUOּ#s9_%ӼmoY4?jv ے91qЊz~z_1-O=s..mr<3SDݱs;W8 KmZ=Wχ<#6u.eYY%a̧b*wR[eߴ_KC[u4v/V+mii02C_1%) ^/EEUO8F똠\j'Zc[{?hlخ9K|1NOr ~>A'K~|k/4U6obUyNv>M@|~=t" MemWH͟u|~?~=t%Z RHHʟ݆_WYrs}.Ovv{GQEvQ@Q@Q@Q@Q@Q@Cyx¿ ek^6%'t~׺##+5+<hZo6h:~hU _5??w3?-ϕ{o3O6p+wEuOxsO/7ދ46[ܧwIks/[z8WZۭޗvwmc*7aPx;~7Pּ!UaiaІf ,ǒ !O%gxO7yk^hfdC^7Y}" 'U5w|IM5&gV++]O<9H# dηqU>xտe_-,>c^;${o#uHc$^ZuYveoD78o/7KiKs{G:y~>:K'z6ύu~T4Ŀ|Nѵ𽎏^ CY&W/8%VPʒ*dH,0K;&,~|\ῆ> j k7Io.Bm,x\BßjLJ+V ԯt2ThcDP$(]\d?0o[ýSVen}ZV2q@m'|$rVݡ ~# ʿt2Q\Oa:o)Dk?? )G/:{YjsUȹg]Dyek4|8O<3\j>#ғB[~o'&ˉeڻ@Fp+ӿOHzo᷃] $KQQRi@$gU܀݀4(gϾ$gVڷn&;ym,P+1Ǔ~7;vQ-6tKcA-vy*q.9>Wi_~ۆ.wA=~u_t{JSg⦯oំ>#LD:n-,JIn9!C翵b{>$Ggi$\]Ky y#Fœ ~X1^þ:🆼ieLt59sS+!#' |ӿ>H?iG91W1mܠ@߲%?ohtk!M?MբdQJĬQUa hR?8sƟ>\G_Ea5ydu;fD{._ts-lsIiO+oAm=UT)rj 'kWAѼ3ve'$$QPga|n>h~EG]ZSė6Uƻf\Iou>t]….B62gϿemu"WtK)x_l:qeQByqP>S NBa~.~hBʹ"9.mY64DW(AR8(E >|ICѼ_}oj,uBI&)1$;)*+FRUfe q~~&V?]3GytQ&C yo#!_8mث߉Uڗ_Zkxl]F3]iBbV(UBz.eōMż"6YNC)gh$ெ?>,K7 dY}o#_1Dme5#ྟl__`^F7]?~*(ޓm >`naXMo G Wia~-5"WK=* uXE4a$Pdouau,7FO)[Itm 3H!#Ϳ?('>|% |V>+)|1o fX7}V2ʰrwt+hZngk5ͭ+47# pdo{yj\]Yr]s4n}s@_?B%/Ԓb |-u/n@8g~W6> n=KƏuoheK p@eWoi>x/c =>!ѭ%wgF19^7&&on?_R߱]|粏'ZSiN>&rY)wHr\о x?i֝ ƻkG ..D,jPZ-|NѼmK73:Η,p\bGV`ʌ &??%#4x*3l|Igktq msxǩTc-GI|5gxAm?x$—Y,FůN ?t/R͸yT0x<|C𮡡xJuW{K?PK[\xGF jo_;xf{"Y4 kfO܃gQk X}1o $*Xڰ,{+Y 'o~  / iaIt,lûH8bUDvbz_(u,  xNH^ wVv03NLHkܠm(O R>'|!oMFeQ`VN3m־~%|**{CaM@4۱,ݎ|Ã9moee](((((?h?¶ }S//܌w'ݧؚ?`ߍ?|axoc.my<ºEh:F5I[5?2ƿ~}/xC@ӄ\ik6kHב.G4x+ğ;kِfi'b~fN^nWn;=>n^eS7Լ!W{Wãj?61դOsE.dRy'_O!~,|4<;xڎ_ϫxDf1F{wfH . J_6+ 㵻_? MmEjMp~2(rkKSa?6Zͽ??1|ˑ?N/dbWR_z?*]o/Kƞk[N׼MijQxI<i'2cVA1~ଟ]ᾗ7_𽏊 2N-4rHd `duS?~">/xo懤KK eW=|"@_? n^eSS _ .~⿅#xW h ȆkYۨch Ȳ"18t9+mS?oԣv\$Mpm*"i7 Voяm/V>Ockwƿ~}/xC@ӄ\ik6kHב.G4U;8M]I}V^\c~6׿h_ĨZ=JH% it"j;m t}tnҵAτ>VӼOqXq6cimVv`)cs~~G|q 2sy}!-$^V6+ }^vQsRJKGG Q({6FDb=CC1u t=:k=,{Wc6F1v%%Dڛ<-gf\> -uu\Yi$񽘐Яm/V>Ockwƿ~}/xC@ӄ\ik6kHב.G4U;8M]I}EF|7RAAݵz=5SvJO:ď"y3wF4cP9[zp_~$xC_̄f6_dI<;6_~8~ x~$3MF!oq E&p\{>U()%w%?>>&-m;FjѶhgHBF8W|a!MC1ԴRH8 fb eո*>xZþ?aχ ~4P`,$t}Jqmմe-ϖۖF N}?~ >:6k^1nq !dWHVjȨ PK[}bWr_z<' l>W{[?~NzI]△-`Pzrl|eq_\WƟǿ,G/#?j_Cmx{~V!d,kJG8{_}\ժII6g{{QEyQEQEQEQE/S~"M/߈ƙ'HMxv|}kw1]_|V:מVmh.Y!é=kj0r-vCUtҜݥ}S '#^xR6Lz֞8$dvW?:L6ٿἾˆ WQm̼_<+f=o~5faogmCHA8$ X KNJm練75R7{js? 7‹ڟK|;GmHPڂ(Y$pf=o+ω nWV Լ9 hVo"tIQyfm,c0%N ީx?\Ѧ6ڄLG*Ho/?fUG/?n+zُz+=zkivUHoߵu}CG >x?>MCRkE[{k{-5d⬮ĕ*_O/xWZiYݴZiT:ZĂɯVk;z_|; sY[cjN7<_Sh~ .o6-*JC +&@ܠ|iZ|DC~(ߌv^%յ3y4fOX}>X;.#VR e~y|' 3>y;vy^W׾{_)ߋ;3hkPΐqHӒ+-z[{饇:j.nk}~z} /Ï_oC.?|?K}hֳ?ўuq|sv߱+?ڧπGISj+X) @~%N ީx?\Ѧ6ڄLG*Ho/?fUG/?n+zُz+=zkivUHoߵ?ڻ ;X/jUMQVuyUl,k4sHy#2xƟ[{O ~?]xZo/|Hx^/ Z[i1tn~5oٗ_>*_]xCH{**!\,9H;W;d_#T %A_ x_WDذb<(>hR6'yl~;z|EsF[cjy27#u"?Ἶˆ WQm̼_<+f=x JJN}aqU#uKo~?8_/twY~0x_'$h/t@,@WocoymM8 rؾnOe+w{[ahayym~۟?im i6sOZW1Si0/إ]EO3? |l)???e'yMKC!u[CĐ&ok#vWsG_j7_7Q?*#_ٗgy{ǽa5 />-|'QzXI`1-9*rGX^æFmVyex𮙡4$ɿğ `_΂/6Mw|WtgnfX>@ˆ\vcG]lFHѤ}G.*NؾnOe+w{[ahayym~۞y x#^4߆[hV_:uhYHvRzPP`[2[xOKoa_i%&c.H5A=@ 5ߋ_x_vQ<\|5y5;h͟eVEe7m_)ߋ;3hkPΐqHӒ+-z[{饇:j.nk}/ -[ MҾ'xW^_~*x}>˦۽$U0& $d% l" mgo?A㿏U}S}τιgamuq"~Lm- けT[_ D(gif^ݞW<%%'NV{h[װUk׾EO.x_Kkmnl ,w@1 X^GxR}?s,;hf-{Swgþ756!|*/C+>ҿs?>j;+ jw fG-ͤeQcV5]VO;{^ܮkl/ //?կr#fc:+o}HDim؞-O~ 34ohZ:oKXD*yUUTP5 />-|'QzXI`1-9*rGX^æFmVeZ>~ ^iwV7gysHԭA)d8M{߂cCv Fec?JHk˕`p@5? x,oM5]>hm+ "h?&߃d+_]ҼOchzasm6WjCijɦ=О.~i4i/]:( ( ( ( ( h_X?^ׯ!xZ{)X}aCr 1w_\W?'GV|oi6Ծ"i:4ᣤomydY%K%>S^Uz3qI+֏9rSܹR[mD_>Տ7iEψ5-/OKcR8V@OxO=a W#G,Uҵ̈g-ᶏz&ce7w},o;ny*ǯګNC?g-dюBB߆%2򔳪Tqo#| >1|@v4ftMukiL 5|N֥?-xGޥVY Qda_a~?S;x1[+[xȆx˝h+|kg .M4k_`mTEg8?' E#^QC)~d f\]Ĭ~#Ilg]#~ `x#Z5 ׾:j{/#xW>TN,H>1S-o)¨*2৾H%bٶyqq':a.G=a W#G,Uҵ̈g-ᶏz7콿#iǝsv_6Nc<-߇~7iqx^8F_iR4X p|㏎PO/x6X_Xxr̍D1t γ*w/O{ {?_H^].gko'y|lf*nsl++_S\վ8}kk3@l.3@ڧh$OjH !C{w=e.2#3Mzg=a W#G,Uҵ̈g-ᶏz&O#M6 f8MV{yrğ`=Sx^:ɨAK +wyw,jE\S0cš7Eb/ ǷF>oBB"2w^Fgj{ {?_H^].gko'y~?S;x1[+[xȆx˝hq^>^;~f8oky7o#;~?|3?fJ ?z2j~+YC$l$` ?_ ~5xcqÝKVmZio׌;Ig^;B]z==$ZG/.U͵ˈ< wzY>63P7y^v9QsU},Oگ.<''~سYϿU]v쬮.tGV 珩?࠿oth'O* @J́T*?Y I55{?t5;/[A\=ug7+aJc৾H%bٶyqq':a.G=a W#G,Uҵ̈g-ᶏz7콿#iǝsv_>[Ҵ/XۓW_g߀zeXI}T5vkX$H~ʾ|kg .SOW-#ifqĞdpi,Jo;ng(C[4%qn&V>6Ы=nT*s$NO4/vho#z5yVLvQٶ _^Zù?:l&ZO*Bh|d.w/#e7w}Y1Suvb?2?<x7֟-UҦ}ú1C$͐!+򭺪>7Gb[xO|WMeSonH{wh*"~@ SOW-#ifqĞdp?%_0GLmVJ?2!#|=?Wߙu-Ax 3;[/4O{Jx Qզ* fUFS-xWĿ k׏uV{YC*%|kg .SOW-#ifqĞdpi,Jo;ng(?NهAXnL;[ȎI@S $&wďlocuxwu-O>\m,I$>PA'|kg .M4k_`mTEg>⯇g^%Hlk?i)dE"j"WCؼA6=)6&{i<肺)RT_Fu gZLu8~T}EWzEPEPEPEP_#D8xgA4ׯ/)k=b5~ק&Y5|1wx⅏u cúƣᨴ0%4e܍دK)I⢜y_i捬3j\[/c+?c |7T[S,`px*OG?]LJ~#ƛs%Fhة*M8 P|kdדmPUEg?n_4د0%7["K ٭r!Os$p!]0b$'a'_ ^-ִ}<]x޷z3]s(0#=> i0C-mN?ڨUχŘ|Ɩ:,vc&;lV٘f>U{+K .ues?'/c| o/~9|:6v,٢;h?iHY?0c\kio|MhE+$|'ۖƍ)kw+XÄ{]~.xw;i2ZYf2z7_)~|OmKH7gmknP5ZuE.դd\T{o>1pRjw|7:K_<s~)x:`(RKy-"fF|' GsRk|U៏WCF0-žj%NP we*ÿ> ދKRkin]N;pXöy?kKiwm̖vEb7@#Bn^O`y ATu>=lC ~MJ]KlX`kK8ʗn8gs)Wg~=~ |15MnX|/qoqz]-F(P@¾=> i0C-mN?ڨUχŘ|Ɩ:,vc&;lV٘f>U{+K .ue|{`|?૟ tw?HKΗ m_ۖ8V3(ɯ\~¿3a?fþ(o &Gnc)%#VQvOĺq߈qk+GadQ6*Jt28৾A?-#~<]C|iV1qRD5IIˡoßo@>ho> xS{Xsun*57E\2f5ٿqG1Tÿ~ЍeŖԼ9ӉAHB+;{_X|;, 4&e- 1;g>_ʸvOĺq߈qk+GadQ6*Jt28,o+`GQYOsO),ů4 k+vQKȤҮRZ{t/p;m$b~?Po Kwᧉb,>(Zh$M>unrS ږ ?nςݮ >j]\w]Y|icMm-˩[Gn0 cv<|gn#Wm⿴R\^}C 7 ?7~(k_W/<}>mRYbGm$ M .J!_V5߱ʿR/U31q!os/_']CKwnd(%Id oS ږ ?nςݮ >j]IeȸS~}"c$E_)L|Ffrji'kw'Z$s>P8ڼPmd3AD% ,qV;=ǀqswfoE񥎩5K.imX, Oi_ix7=NCnv1Z&.C{g/Klt/|D3euWhTac HTB@dW {|R[Z۵;A'VQz Q?૟k07z/,uIYu;Kh`Lw珗1^|}W*\oxPWƏ!Dk 7֚&/u'&77Ʊ$QpQ1?i#_ uAv4P;iߕAs-(f ~%4G{6KYZ; "ѱRTq@B2{wo kCe~!6v\*%+6kIeȸS~}"c$E__RO xuVAi?i;PO"FePTZ_?ן'ih;_ j4dj j$xDY .k=~_~TFTt i0C-mN?ڨUχŘ|Ɩ:,vc&;lV٘f>U{+K .ue|E?hग़nG5F|=wԭ3X[ʲcA?p k w[>hW2!fbEURx+vOĺq߈qk+GadQ6*Jt287[|1IKEgm.}XǭZHL yIt8#IeȸS~}"c$E6OĭJA 8gEY"uL.1#_ >#K_? 1JP%EB;;y9$O_']CKwnd(%Id Ost7̕{Ͱj 9|;r_.ի>*O|~#jZG'y> k[vh$Ӫ/Ov?&gfwt Djq\=G 慈aG=OE>"ԗ߮=lF.c?y]ۺqvUQEQEQEQEQEQEQE4+SRִ?ZXk&m#̒ P+eNUw|!/? O-E4ۘY/3F`-AG8"Ԣp&:3qOLjV |S(f5X[Ejivۅ -ݳ?7W0麿'yB{vhyL*/OF{5KYvpp\~cK~%K|G{6;K"Ѱ` PqQ;,/k^4&բ. !H[g~oʾ5QwJ{D,j[z8*a|9uOo7Vu`T^č^s!/? O-E4ۘY/3F`-AG8"Ԣȹ8~K~A, %wo.?wfoxZ񥎩5VwmX @<~U{W?>Λ}Ǜ'kv$j:LWT*km"wB_~.[;i1D_fZpEw?wfoxZ񥎩5VwmX @<~Uo8ƹV!e85MSVzW?>Λ}Ǜ'kv$j  ~Oĺ~o/qudQ6 j29G7Eƣ~z[ eXI(M{yu>ck3xĚ׍,uIh˻hR矛 ~At_~<=ջ]C"x7Tկw}Z}YFY96.N5ߐK+IE:k˩⿵?wfoxZ񥎩5VwmX @<~U{W?>Λ}Ǜ'kv$j:LWT*km"wB_~.[;i1D_fZpEw?wfoxZ񥎩5VwmX @<~Uo8ƹV!e85MSVzW?>Λ}Ǜ'kv$j  ~Oĺ~o/qudQ6 j29G7Eƣ~z[ eXI(M{yu>ck3xĚ׍,uIh˻hR矛 ~At_~<=ջ]C6s0C n2?S.wʭe}fὣȹw>U eߍ:/?xW"BKg`ܩnpaG(1X؉sדkҡJ1[QEsEPEPEPEPg<CvbFe6}J9r˕b259swOkz+2j3qfsЪV ^??o l7d4;&wgc WwAd[ O<n_I,?{+my\z*^\msD+ڟ$s OCaa_F솟 a?u*9lz*ܔY]laT\U8<x?¿Y?Q 9~V>ß?g?'^&8'%VW{ga\T]8m|~۟WK#?3D9OzW޽z9=/kwmiȹ_>h>~NMw?`߄?&niwL-R^Y-IՕϙ~ڑES-~ ~ gC+ %\}emO+\79swOkz(cRrUewz۾,EӍ-?~4 h?L<+ݐ[']-Go7?$:~;ˏ@s쭷p}ר#{Ir}ܿsD+ڟ$s OCaa_F솟 a?u*9lz*ܔY]laT\U8<x?¿Y?Q 9~V>ß?g?'^&xE𞧥ꚮkSm>+& \{$q9-IUnWN6[h6'0#vCOӿgyox0wtO`߄!L.>Sާ^븎Oe%rCr.nWϚ?ϟj]Ɵ7? ?~;1ԫ}רcrRueuz_GQqTg_߰os cIdtG&wh)[oS |{S^#ԜY]KqQteO?o l7d4;&wgc WwAd[ O<n_I,?{+my\z*~^\m/t9"e9;5i~xWc!3<OJ['޽zf8%'VW[>gjDpUN6{!-'???$GOgyqr<pW?I9-IUnWN6[h!'0#vCOӿgyox0wtO`߄!L.>Sާ^븎Oe%rCr.nWϞ :O4Wa,gd'đ֜a0 E} ElMjn̪8zTEG%QXQ@Q@Q@Q@y$|)&w? ~3]\rYev,;d3KI$^EiN^N'NC?-??o79<^~/^4uK-,?iŻyc 飯[5dwOip>%~^5յK.o/~/^4uK-,?iŻyc g  cOG5<6y{f([%ai_#z7XmygH.-e>_W5SXo[T2\72}ɮ>V_Wn^UoDrϞoA!ǝkyϙlgPz7XmygH.-e>}EXfl~ƝZye_4׮Mc῀umR˛Oqpwh1&>xOE#w?>gݞwc5ך~n'>)ޡ- H"H˒:.ڏUv\O6ƞk]z7XmygH.-e>_W5SXo[T2\72}ɯ-Zٍi~:vE'@S$I%pޣ}bzV*9g  cOG5<6y{fxkU_zTI,_̶u2pkU\_6?cN<ĿkZ:|e'o;eW<'"C;?`s3;l3͇{|_ֺ:Nbe埇 o#aGe_4׮Mc῀umR˛Oqpwh1&([s;}^yU>xOE#w?>gݞwc5C_ֺ:Nbe埇 o#a]}bzwOip>%~^5յK.o/~/^4uK-,?iŻyc i~ѭ4:OR%hBhUUQS*ipRL(̰((((((((((((I%cI*$+$@> &]yj)ypX.{~ߴ7i^j[kkɬ[ʪerT|õvQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQTXxWG5K=7OC$7S,0£3G5a[S+ƍ]XGowֺ}d0ucK9%4U7_sGg'@5$VsJ[lhb}_j h-SŚ毨$*PI%+MQHaEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPX?h&kym(dF=CVA[P5_ׇo|Q }mGXYs-?l/~$*-j;m'{2U$,.CU/۱__us]u#SLzq~"O|Y6; }nEG)>w,_reUO)eUȊ{h˻"sY@=Dǿi[n%KyH3ۥve2IHѕ@;t("3wH\K-ve>t`)*1<4[`׺rSgul.uAɹ?گ`QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEWſ^7`KR5>gP: jy܏zWU俷»į }jZY[F4aE٤D܊?fQkk?+ =ZfezCuˌ P^(~ӚGsoO񦉤G%V ^HY),y:g$|~$xš XA4|ppQT*I%?Z>m~ϖ[m,|[ᴎ]ӡQDpƃ2%@HenM2QQRPQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEWSGK|"RB7-r|C(kzk9WOn}{ywA937a(֚}sO4mWsb8܇$z 2 Y5"Q_jn}.~|YZ񗈴 q V!!€ʂO~|pq#ܚZ֑vdc Km'>-kڇmnw)^Ut_x QEpQCcz~͟wYWZ:?ajcHË4gq,GqBTQEHŠ((((((((((((((((((((((((((z+kZ˼S$NylפQ@⯇x௉4Fuԭ&v;nRk1|#{AԼOkg^S%Sq??m|I=Z7x _6yo8C_%E)·m$ɘ~[o0W~ It-n^sndz~W:M[WVkZD"i& 4k6˸;t EW?oO]?cO]`_IW?09((((((((((((((((((((񷂴 r KG-esUA9G5~4#~n/ ߖctaÏ:?c|m`%x-GZkGR)ӮQu'R; _x^o=znm2G/fR;WQ_K#\wҺωnGR6wkrM PV7O"Q3@p٠ۆG 41QH((((((((((((((((((n8(׍A_H_VvsRU cNf&OA_܆gnOOtb:G_-_}Ղ5trݪn'۳߹ҋvE|'{`6 _zO;&f@}$]}g׈$fu=]IVԔ\((((((((((((((((((-#L Ÿ>`$>rݗrB?T? ?mgK_&l5+9*>TC\VǺTQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQSIWNOOn~ݖ6O&8:6oWn9n(@QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQ\?~^xƚզٌ&lDq Tp{hI8aK#UE$xrk7 5LJE#5&cmwQ$0,#g4d8 [ %~,u R="Հײ"kL>s-"Eg}şT btVV4{09[nN5>Zb/[R_Ǡ:erwƬNJgl1X߆hzVcii|KK 6ª"U*J ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (/l~n|u7H|6 }̄ȠPo/ ߋCoJ _ܬ*1 =~]pW{~ 8VRV˺4l ez\?$v?W ]|IJΤc< aUnc޿j8k~:g_I[.#X@1,Z7 ˪~ ա%XchfWp#rq?ze_[#Ş#kknG(" x_z Ain>$kwi}.&RLfS_mh:t{}?Kt4mk *:*{W(QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQETWPvs[CŽD)*A`ԴP/^x\M۴$*&w U$ucZtv::sn8dSOO3ˤY_:aWa<ܑ_>|U𽾷kK􋱘,.RuwVoizk r52NI~??~(GýST4XN5 I H5뺞ZW \íg'{DMjIe1dA_:;zIL|-C?"TuQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQET_juxSĖ1Zl֗vH꬧ 9V@8nm@ժN6%߇' FJ?Y.szO;}kwgseG#S WFa/u'C{70?ҚrK`) ((((((((((((((((wXљUQI q4nW]kt$Me7BÕA׌o91/-ւU׼ku=$b  ^G iA_IcM>c2Dɤ葜9@%dqJ*|F;\k6N/ۆw3kkv;፽xæ? V¶:h>0 8U;s"~1RuSj=|8E®NI'֨(((((((((((((((ꚭMy{qogglIAq(1kڧ G4 7ĝ~,M2AtA =Yڣ oPM4غ˥q20"8!6BcVTu˝z*{/#Xi)`[+W }+V}S_VaΉCl?ْ們b#T9roߌ?XϏw [h0*tt:Ѫm.G5~?@|[G4xO6VCW';6>_7 ֏5q+qӒ}묢n"W>u %nu3XWj+(aEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPxF$?ۚ6[oIIs"?azk;<}V1mA&U<sx7Wd>Dھ[j31t j߇//_4E&˫\XGPbk`_x^MWfȵ/]J012@~o_C?O,/KǧmnUKX OF-Oj+v|NuE~5xfđ!u<H*J@ \v-4Փ>*6oI?fmwB3ݓriXw=z(EPEPEPEPEPEPEPEPEPEPEPEPEPEP_\'O- M;GۏH`OQAcv+*d@ ?ӟ oF'yخe奏~ $>ſ)g&KZbd/?ldw^YȻᐺt\o؏mgt.ٵm;~/ U9 Hx9GQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQER0_?.\**_(+q[Vx^#><m|RKk/ _Ⱥc1g 4-lQE ( ( ( ( ( ( ( ( ( ( ( ( ( ( ~/|c_xՖk'*ݺPYۛ )?[^nqȋ} Y.Ü~ZhחS}º\ZvWEgI9\qTJ~5 dԵ AJnK/֟N#f!cKc.٠iX# ~o?@wafVkwJQa=#\(eӿ`p*FQEQEQEQEQEQEQEQEQEQEQEQEQEQEW?_OFgY:o̿}r? gvWO7xh|*X"[V]_Jǁک1g&sAs gŏ ߋ5'úLzPX#'Q{_~p|7>7/*kJ=k篆_KCD>*j~ikY1yK_~G߃?￰28oMyVXǕ .?j'SLEYZ8u bmY<';"z T~7Hr[%Լysq\aM u%O9V+6(;|77i:Vf] /QQ^C ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (<պ'h8ΓM!x_ks{KHu4HXә} 2?2FI/~ pA hm[{Wo.3 xMkGԴZ;;tw:GSA+W}^|~!>gq{%׹Ibݗ#Š((((((((((((((((_ 1&skP(_ge ?-d#>,k޿/sm[ +y?*@-(0((((((((((((7|xq7rl1zQb@nmoooyEޱ6BOjz |qYwrx]7\)dE*J]%N"f2 u>8[ CnV^EKs $c ~bJqsAE?þlt=KEkgi8rK$I$7(((((((((((((((u}"kᵵC$L#G%S@Q_?\?~ӧj_ *!d d#!'_ x΃Z?5S'#['jvڣ wJEHV&#`x'÷), SBLqs)^rs^,6Tf&·9]ܰބF8s_?g]<5, Ѥo O_%N 'Kվ>;kWd&b{12~ dk8 𝍶M^}R*uM{0)QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE>0xVCVHY'f#Y~guozxfE՝,v9/ $?1P~IzBUuxʂ]VAX!@rzWwm7W-ēaM063LQ' }w ~6ntkMմ-/mx'_FFX{_~?@o~Ӫ|6%I foMvJ>9&e]H #>տj#̺x>9p]گ{ cd8d/o_e Z] \i3LzEڣZV yQTXxFԴ=KOA-լ43J>C ( ( ( ( ( ( ( ( ( ( ( (?0No^ hfl.p@$Y'?Q>7KoR|¥8Y1шn>45k n6he@ʌ0x A5q~)G:/O4/D墀lR@FVg?nIPO.ќ4uҁғܥXp½"(((((((((((((((!/؟,執??7/5 >j  ~WJr5N[;QHaEPEPEPEPEPEPEPEPEPEPEPE E٤_?ljvf~(@>o~Þ Eڵiq?0z( ͅ?z m>ҧȌDg9\s>^.]|Fx(bյ ҋ?3+ C0۷5#xK?cZ-mjݘ]۩v%I5[?>ϠZi .kJ'_*,*J ( ( ( ( ( ( ( ( ( ( ( ( (9?ko?ƅKCxmˡLLȠF/9k~*4i6.  =d= k$E5VɋXbOɵRQ[8v||PMO(#omY-,WQ@UAZT((((((((((((((((((((((((((((((((((((((((((((((((((((( Vk[aCJE#YOߵ?ߵj^Go)r؄lݩ /d }E~!j$vqhZC$ךI:p݇8g O7ͥ|X_a+iW8rW.z?7|ըZl{mIPI媽Ic|C~)xbZֳkE7)q 22;lW?`o{ {|=5M[EQiD^GX@'rIzNHa7{D,5%Դl=#Xw~Q\>HI&kĞSvȿz6eaZxGxm;%mF=<>6'].}MHj( ( ( ( ( ( ( ( ( ( paSZ?aEAu9=B2!>E~!ȿ<[vgN-5VhdjVœ9(]Hޣo힏ZH,.`.-!pÂAA?୿OHm? c'yt+jpc-<+| f{<M)+ᅚO$Xb$'#e(,Fҵ;W_ni?8>iK߳ؾЎKrp/]to\x6ދwi1`5Oö:麅żr\ VP^=XgDĻETQEQEQEQEQEQEQEQEQEQ\G/^"/nKpm,k۶Qݘ +9xN^chdFk˹DPcǰI M|*~˺? o϶ t\\/[yZ6ᝈAܻ3oGzz-XWOJɤhq5Ը ȧa\ ڧĭ]g겛}i kx!E~]rdp߅a;{Moċ{OX5gtf59~?|6i+_ &|ɋ? -l }N"*QR0(((((((((((o;W/GESA$H'>.G'{i~&)4bF&+!n+3:);>&Ry| BD'k7'O;χn+yRxe%=FGP"7;W~n0LC\zOX{Hzӷro?Z>r11iVkw+Нq =kK||NMxqoOs̑2Gr0PWW/߳F  Hq- #Ov_ك D[DՕv}KX|X)hQ@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@|X-~~bZ~$mf9̊[}E~.hO|~|EqO~ bj?@_~ O PV9w N5yW>I'Uq텼i2]Fxp` \(>HaEPEPEPEPEPEPEPEPEPEPEPEPEPEP_-]c[Z閿hgk:6̓_[WԔP/FjOL[_7ß _딟Pb~̟ / in :Sl57Zrֺu(=`8EPQGi @)QEQEQEQEQEQEQEQEQEW1]/:=&o;ݾtHOzHSv5/o[ƚ/^: cE(TU$TM4 ;xO]/jo,cWPq db Y>7qij[[dY$qm eE5R%%5TDUT`(6$:(0((((((((((((' ŭ~C/&riIᮦ @\tpžOJkT_zʲg[d]F㑎FkbyNgk߅ ,aC(d/ݝJr2 *J ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( +~!A|,x][MuZ}ȅU/LB3x З{?^%O~4MsN폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^+B_U{폂%^ד 'ok_5K?{@QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE~uߍ\Ӵ5|?O~4MsNֿd'+]d # B"ڎG;6k/|_&-gO|'_Aҝ)F_>ͱqӥJQV^X&w4&w5u:ʏs?Y>/o WG>/o W_'QG*g?']M']M|ES?1X&w4&w5u}N}c@?$@?$Q:ʃs?Y>/o WG>/o W_'QG*g?']M']M|ES?1X&w4&w5u}N}c@?$@?$Q:ʃs?Y>/o WG>/o W_'QG*g?']M']M|ES?1X&w4&w5u}N}c@?$@?$Q:ʃs?Y>/o WG>/o W_'QG*g?']M']M|ES?1X&w4&w5u}N}c@?$@?$Q:ʃs?Y>/o WG>/o W_'QG*g?']M']M|ES?1X&w4&w5u}N}c@?$@?$Q:ʃs?Y>/o WG>/o W_'QG*g?']M']M|ES?1X&w4&w5u}N}c@?$@?$Q:ʃs?Y>/o WG>/o W_'QG*g?']M']M|ES?1X&w4&w5u}N}c@?$@?$Q:ʃs?Y>/o WG>/o W_'QG*g?']M']M|ES?1X&w4&w5u}N}c@?$@?$Q:ʃs?Y>/o WG>/o W_'QG*g?']M']M|ES?1X&w4&w5u}N}c@?$@?$Q:ʃs?Y>/o WG>/o W_'QG*g?']M']M|ES?1X&w4&w5u}N}c@?$@?$Q:ʃs?Y>/o WG>/o W_'QG*g?']M']M|ES?1X&w4&w5u}N}c@?$@?$Q:ʃs?Y>/o WG>/o W_'QG*g?']M']M|ES?1X&w4&w5u}N}c@?$@?$Q:ʃs?Y>/o WG>/o W_'QG*g?']M']M|ES?1X&w4&w5u}N}c@?$@?$Q:ʃs?Y>/o WG>/o W_'QG*g?']M']M|ES?1X&w5kM/i]"E2Mmwr1w` W}Sp\v(F:Cc+cJFt>좊+т(3ROٕj?@(((((((((((((((((((((((((((((((((((((((((((((((((((((((('?릹hk /h(?-gO|'_vk?Sl:|??͟q/|QEvQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEWݟE? N)K_Wa/weO(`\(+?!/}_ 'oh((((((((((((((((((((((((((((((((((((((((((((((((((((((((οؿR{kvேbIƏ>4ugi?ξGʧIAEWiQ@Q@Q@Q@Q@Q@Q@"{?H4kCЋεб1x>', +l@l~Q=7yjq+\ϛAWC[3 ?]:c07/Ϗ} sWObe-QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEgOj_?RŘS,(?W ( _5K?{_fWB_ ((((((((((((((((((((((((((((((((((((((((((((((((((((((((/h+ؿR{kvൟM?)u}iqOx6~QĿ2PQExA_s|E`uψ>:6yLQ%R'h_?gαsVP>w+#%0QˋqZ]_[v=!RmrVRӯGkٞ2~MKↇk>ckKdU RHEaP^qOפ΃}/%ApA'$W_Nw#oxijiswh֖vE,P~=O^f^*Q"NU;.e^Mfk{z>?d'ž=*RN1&F2If8Q&;O/6AtسΟg>F&o5%{Y5iLl_fƯjq7#4í/(EvZHo|`k}kC>>td\\ّf߉ |GcŒ~)tMV.SeYNܤ3mO?~>]bA_RcS]q#6[q^;m. x;<7}Hԣ{Vlvgo{Rg=>{3lfOJ:N[wWd'[ MC<7 n( PNI|CJ~ ~ <' ? ]MVr6rVgT(KxW:ahmNk%RBzҽ{>ƾ4k7<$s\`VGw + 2qjCC"x^ ]4voşi>mѪkW)kn 1Teυ0|lGx㸷cyPwsizXyHĎx_:~.gğyxP]6֍th*;C)<H9/.y$״_ëxåmͨNڑmb3bw}"u'E&.zvqZZA5ԋ 0…UTrXM{6١xRMj:bek˟mMF|*m+Aj^5(m!hbDQ?ƼG|]Y%mdn$$ e`7U*xxalW5I[KZm=O?୞ϊ]ڵMcOk8wyJ苹Q`2H<^4|| _GW<@"P7͙%-!bB3^O׮~jٳ$1FM0*G bRK/Z[ﮈTl6>1RJֻ}Z1S|QZ)~*Ÿo.GQq{!+BA@(| KGT yn3 v?f{w%߂GOGm5(dYfH ~raMyOgH𾫨x\՗Z`-KiUICĨP001]Q<ʟUC+˛kuշmOi^ykq}ѓiH~-Ka'^5վF.Bar 0oଞ/홬b4׬IcU +G#h˒y,^S jq_O%Y:2|^'k.|O_fž!3;Wjۿ(xQM^ܤG HcV2B٥dUو[QE*QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE} y.C{Kۯ5 J47xFUVck(mSk +U$N 띥\rZf>/FB 'AN;cީ>;xo1Kä\&CqK{oHEVQ̖].˥spt[kl{ws1Q}UP4ЭQi6*9+~ێ_p)&M6Q|w<:Ӯ[E+a*Us)Wο-uO~!9H1F>ub=M5!,v^Ͷ~yӟu _{Ʃz+A"dkywY-+8 QH< x<;|Һ{YjrSQ,M$͟n7?>0G~ 'tΖA: X1cK7>D~XC{og C'wchFPRv]43c擤Mե{j}ai|X >$0uY$g6vk{$k_ xe[}(·TT]]44gEJ2jͦޏ{&{7_(?>7=ݴ9,XDFX$p8WOcŞ*+_xEho vru]CD|?%F>l5{"p*I5fݶ>o}[_4?>!jR6'Fկ; : ( #w]dz{|mԞ 9R(ͻ#\ ŴRw6k#xM 0H5i-#ڨ[s+$' OH^9y0#;_A?i,mD>gU($v1gb\T_ -o={K[[kWH.Ğ%м5vٔ6B#x85jJÁż5x׊z^hOzmy#o O'ADA=2k:_K6&>՟ج%336%r( '>w42v7Et(PoV۶۽)K_WugOj_?_2YeQ_0~QEj̯TEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEP__)=]5;C_pWGt? }@ k?Sl: Y4g_ leS+(((((((((((((((((((((((((((((((((((((((((((((((((((((()K_WugOj_y|4{5#*?%vQEEP_ 'ok_5K?{@QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE~uߍ\Ӵ5|?O~4MsNi?ξ?ൟM?)u}>_8FU>_J (O((((((((((((((((((((((((((((((((((((((((((((((((((((((+"Կ|'_v/o+0wG_2YeQ_0~QEj̯TEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEP__)=]5;C_pWGt? }@it+v=SLJD-J{kآL>e쩪|3>&Xi{ikW?O_'v(cRKWls_>cآ~?K/l~_Q%z+b??/OE~QG?? ݊(?Կ}=EO_'v(cRKWls_>cآ~?K/l~_Q%z+b??/OE~QG?? ݊(?Կ}=EO_'v(cRKWls_>cآ~?K/l~_Q%z+b??/OE~QG?? ݊(?Կ}=EO_'v(cRKWls_>cآ~?K/l~_Q%z+b??/OE~QG?? ݊(?Կ}=EO_'v(cRKWls_>cآ~?K/l~_Q%z+b??/OE~QG?? ݊(?Կ}=EO_'v(cRKWls_>cآ~?K/l~_Q%z+b??/OE~QG?? ݊(?Կ}=EO_'v(cRKWls_>cآ~?K/l~_Q%z+b??/OE~QG?? O"ΓuoDdN w}7 {Zn-8_ꘘ=5kQEygՅQ@|g/p+?!/~QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEO~4MsN_)=]5;C_pPE|{n{MkxCoopQ4g}껑N=TW}zX\C/'I*hmLآ 讏O7t?7j~Q_Q&O݊+?E~G?!ӟ?v(z(?OsOE?Noآ ]?MڟW=c9ɿSb'Ct?7j~Q_Q&O݊+?E~G?!ӟ?v(z(?OsOE?Noآ ]?MڟW=c9ɿSb'Ct?7j~Q_Q&O݊+?E~G?!ӟ?v(z(?OsOE?Noآ ]?MڟW=c9ɿSb'Ct?7j~Q_Q&O݊+?E~G?!ӟ?v(z(?OsOE?Noآ ]?MڟW=c9ɿSb'Ct?7j~Q_Q&O݊+?E~G?!ӟ?v(z(?OsOE?Noآ ]?MڟW=c9ɿSb'Ct?7j~Q_Q&O݊+?E~G?!ӟ?v(z(?OsOE?Noآ ]?MڟW=c9ɿSb'Ct?7j~Q_Q&O݊+?E~G?!ӟ?v(z(?OsOE?Noآ ]?MڟW=}uq.CoKpn>[H6ʮGM; 1Λ5qWq[=EW}pQEWB_ 23ROQ@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Gt? }_ߍ\Ӵ5|egKد,_.c=]}&W?/肊(@((((((((((((((((((((((((((((((((((((((((((((((((((((((sZWi_'W?GO9sŋ+\o%z/S(?^ ( _5K?{_fWB_ ((((((((((((((((((((((((((((((((((((((((((((((((((((((((/h+ؿR{kvlIw_OegKدgUQE6QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQENk\bJ+'5رqvˍ[#~EEP_ 'ok_5K?{@QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE~uߍ\Ӵ5|?O~4MsNw/MCI.bIlIw__>lߢ (Š+޿b?ؾ?z^ּA/ j1;KlFovf96wq*F|=J*Jw8o oh~`Fϝw7k??l+NjP<Wvm7_0nǭ}=|~~~F/ ܵ^Ln*#ݸ1.s_6H> _OrƼU:iuUCԮmØ%w_^sQ>e%Ouo :谈iwS sHpI88/Frkn|c_CY>+|L~}~φ4i:6ȄjPZAUUm烝xqI{׻%w޲*M tӊS:ݭn՜ta_'6쓵ѽȾ%3>þ$Ӧ9Oo.22#!`H ^%o6[յ&TPSkKA"Il0zKV0)[-0¦yp<7hBgO~&|/xnMmnPKc'䒬 pdcJ0 w4ԭ:Si}vc[6q`&N-5->c=Jn lr2I%÷A[:g|nu[Ȅ˅b7UԖ,9uz/߱? jkh\]sO[ FݢYW@#k: x?IW87/.u5 u(/fd B|+?d#o~,kxIt 8^WAn \c G7k2VjQQ$6Z6oq!Ӛ֋k~?]ZZl³ PA"k1տgCO^%%76 _)wՁ B+(OKu~3Υ7|+@b|+cHЁArO$UUįuAͭDgz 7*ݷ箚]3 o٧U<-ht>|.488#\oߴ*MB5\J#BqG<(jƿً/io;hzs,s"2o ̀Np[fAƞai6 4}JƟ57k˛ -ēD+!M>dvO((;+k!CLд+ut<zRJ^/<̾0|[6>;-#]O( gszU'G xXvgciIqsk{otfa pNFxp? 4x_o 6M^k)]:]EQYȻ8n\%'WN5Q|kǿ=H?z_ :~$w=%#$Hq~kxŶ ys]\8#4Ŷ6x _q<f_~ȿ_^ȼ1]b3p$ N>Eƫi=s]&_4hw22"YRԭϱ;B*u\e&캫~-jLS^x3J1'}ޥ!OcӇ#޾wo_|{yii!d?vHp݈ ƞ:j~^Q!3ܻL˒6q_Qou2㗄m<f{QyČ}|J6鵅Sz1qpij|OEWaQ@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@2Ğ>>%ux,Ufv+ y?4-qܼ Z'6(He8$å}~-a|1? 7H JO&zm'w;Bp02 ko:6jY-rөQpk'ᣄj I-zyzG(xn+c ç{|K0srpp8&޾}\f~'|u] kO,_D @*|G$,U*8C?f9f:ѶO|AaKEռTz#@Wn.rq }s⯍l;6Tֵ pZf rI@X$}~' x?)"O]>kIy5iX #7̌rʹ(?6&Ok??i& Ytw!wu7oZԣ$=ERR,鹷xwe]&W<~)}uɨ$~aӗU_Ly;s[|7nk^`p0#A5xڨ񽦫疕mx$GOXT;WZæ|u𿈢xD p|FOr$E;VR5&ӺJ|=\4i;ۢc$&s5$Ұgo^CTƞ$_B_? 싩g/v31b>}Q]|Isg~ŅC"hZNj _Oz=~W6_[kTT)UJ!A |%@5Oh/ oO7?jߵw}hYӍG[IYZ߉ӈԧ.~m[[K>dZ/y#-vh/2R;|]7oPM R;6&jRR<ٖvAzY!~Ui|3t%綫*Dht0;N~^|*9^S[7KҡGX]'$~~|@~έo1i1N6$|?94~ğ 3u?-6֮#[=dX~`׷|nOOٓo!>_L>}dM-vgv~tia*'h~g 5ZUikO+%Ws[q4vEL+wgk#/Mñ_j^m"i$ٕu!g|'O~Waq%͐W!% "oN>j^_ÿ ![>Q5)%c+*˴ Hjs-R[X⓿G7G]ExƃSRaHږ- ħ!U <䜒I UᏋVw~ _Z+jq c$3G팒v$ 󊊒I;6}w:Q*?e)5iY)Z]v:.] .4_~<Ρ{ug=_2 ul6Pyω9C_>;/tOq*h_ݚ@K#P1,*ıpF7j ?|=e5_>߳~ohA+}u;[z+yWԿZҋjOYѵ+/izw-żhn+)kc_ڕd_W&Ek*m4AϲfGě>q9+ش?*Nwy,3xOT5-wO!O!!%R5e8׵zrSV,mY۷]4lz/0Gk}$wz"k#7G/_CXXOcBAzL~R>[/,&H`u=1N_oӵ^|Fb{7TarrXp1:SԥS{i1ˈBta|;=/Im5=s fLi?xGWRmwiLoWw2,eݹ#8*cKƋXlt{f/pM"HQŒWiT?|5wC ׮0D\ @z,11~2A[@xK#xO_^߾uoy(0m[c*]p)Ӛ(M V"º~국Ex2cڕ4m3/:b7~[!T|fx^V\ 8QGeU ;*_W|C޺g^1 }dx\p8'hkk_/۫+k+;]!\yeD1g'=;dчU9GWO%a%K UrGT+նW7EWqQ@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@A+ q7]W~ulZ_Qy|aj.#*I3RAp[9$Ǔֹ?W/ǿkV}%֮Zd3 %b2|O5imSܩ2Y9)9m-n|_i~^пe^B/Gk̖>'0 6e 7*X%_Н{S3|\mx``oڗ>9>KɣjGjkFJ#pm9VID`jaURs~䟗[w]gn-|Vw<ʭsesXȱWPÎ)!> Xl?C}jky7$`:n?*V ɸ`IѿाW>o"1&-_ x*Q@d 8!I¶x!A 6+%"҂Q_zuxf)Ғdj#|F.NsMJz.O]|*g?2Ӽ?/k3\ZW\((@ If!EO;F7Q/GVK]`Fp_PC=Y^M௃| ]FanIXb=A_6_u~:fEQ(.R2p1J9yl򱙊xwgQɫ4]m_u;}fF=F=BU8< J ! k/,(R>\GbN[}$wǓ\H>z]C]^-qwsl"?l~ Z[]7G҃.YbO.7 ETRyl{C:upj5T*%fW~y} fs A.9|eOmeaYIR;" jl6bS#"Z [|MIkeY9 biI|B|:N1h+m[4IXJENTVO&6OW՟O[w⯁~?x߈~0=?\]R[hgpP{>߳߈?Xq3Ͷ94;(j]PTfk4`m>bX⿵?ej -AiV2rqIcǮF.^[opV҆?k9eiYk{tC#ֹbWNk\bJ+\o%qOz(Tx(+?!/}_ 'oh((((((((((((((((((((((((((((((((((((((((((((((((((((((((οؿR{kvேbIƏ>Nɲ3%~lW?/MCI.b+͟WAEW|QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEW?GO9sŋ+#ֹbW.7=lOEWʟVS3FP.Ჳe}I&HSQ^lRj1WlЯT^6bx|?58 yՁῲ?ċ}p~J?}k̾OaMϜn9,O?F^qk+EK)Ǜ/VzW| 7l5vjI$KG>`CWPG _WΗS]mN|2F~=QEaEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEP__)=]5;C_pWGt? }@'bt?6+ &ˡou_I +>l((((((((((((((((((((((((((((((((((((((((((((((((((((((+#ֹbWNk\bJ+ zGK'+Oׂ%>2|Vqغl J~GܷP{ ZrXxZ&$6Ȍ;kֿ6N 7h򗝭ev߭{8Ju126K=?~%*෷qp@ܐrp@8[+-SP|?t~l[%}K*>S_GY8V".޲>i]b@$7j_ 0o be&NWX"JS|՟ŽKx^Ys@ݹ*yx=]xO5>~,1 =袸(ŢCX!nu^+5 !*8Ref擄.Ѷ$F8#5:5g)iJv_4}j\UIGEWHQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEO~4MsN_)=]5;C_pPX6]{$͊f[_ڻd~&ѤPH<"H2n] |St׵RKngK]YuKhNU/-U/- ?~Z̿M>)K{h)K{h7?լ}3:+rTrT ?~Z̿M>)K{h)K{h7?լ}3:+rTrT ?~Z̿M>)K{h)K{h7?լ}3:+rTrT ?~Z̿M>)K{h)K{h7?լ}3:+rTrT ?~Z̿M>)K{h)K{h7?լ}3:+rTrT ?~Z̿M>)K{h)K{h7?լ}3:+rTrT ?~Z̿M>)K{h)K{h7?լ}3:+rTrT ?~Z̿M>)K{h)K{h7?լ}3:+rTrT ?~Z̿M>)K{h)K{h7?լ}3:+rTrT ?~Z̿M>)K{h)K{h7?լ}3:+rTrT ?~Z̿M>)K{h)K{h7?լ}3:+rTrT ?~Z̿M>)K{h)K{h7?լ}3:+rTrT ?~Z̿M>)K{h)K{h7?լ}3:+rTrT ?~Z̿M>)K{h)K{h7?լ}3:+rTrT ?~Z̿M>)K{h)K{h7?լ}3:+rTrT ?~Z̿M>)K{h)K{h7?լ}3:+rTrT ?~Z̿M>)K{h)K{h7?լ}3:+rTrT ?~Z̿M>)K{h)K{h7?լ}3:+rTrT ?~Z̿M>)K{h)K{h7?լ}3:+rTrT ?~Z̿M>)K{h)K{h7?լ}3:+rTrT ?~Z̿M>)K{h)K{h7?լ}3:+rTrT ?~Z̿M>)K{h)K{h7?լ}3:+rTrT ?~Z̿M>)K{h)K{h7?լ}3:+rTrT ?~Z̿M>)K{h)K{h7?լ}3:+rTrT ?~Z̿M>)K{h)K{h7?լ}3:+rTrT ?~Z̿M>)K{h)K{h7?լ}3:+rTrT ?~Z̿M>)K{h)K{h7?լ}3:+rTrT ?~Z̿M>sZWi]9O_[^>dZxo؝=dDc$ƽk V:J1z3ru]:ahvF}E?/ -4BM}V8[Ye0Jda_f˰51ӋGxDpvrvk M_5\[]oM Q]Hv~단iTMÑ5Nkˡ8J\=/wVG>/wVG+G4W_ZM_ZMS?0X_'i4i4}N}cE|e@?$e@?$:s?Qw>/wVG>/wVG+G4W_ZM_ZMS?0X_'i4i4}N}cE|e@?$e@?$:s?Qw>/wVG>/wVG+G4W_ZM_ZMS?0X_'i4i4}N}cE|e@?$e@?$:s?Qw>/wVG>/wVG+G4W_ZM_ZMS?0X_'i4i4}N}cE|e@?$e@?$:s?Qw>/wVG>/wVG+G4W_ZM_ZMS?0X_'i4i4}N}cE|e@?$e@?$:s?Qw>/wVG>/wVG+G4W_ZM_ZMS?0X_'i4i4}N}cE|e@?$e@?$:s?Qw>/wVG>/wVG+G4W_ZM_ZMS?0X_'i4i4}N}cE|e@?$e@?$:s?Qw>/wVG>/wVG+G4W_ZM_ZMS?0X_'i4i4}N}cE|e@?$e@?$:s?Qw>/wVG>/wVG+G4W_ZM_ZMS?0X_'i4i4}N}cE|e@?$e@?$:s?Qw>/wVG>/wVG+G4W_ZM_ZMS?0X_'i4i4}N}cE|e@?$e@?$:s?Qw>/wVG>/wVG+G4W_ZM_ZMS?0X_'i4i4}N}cE|e@?$e@?$:s?Qw>/wVG>/wVG+G4W_ZM_ZMS?0X_'i4i4}N}cE|e@?$e@?$:s?Qw>/wVG>/wVG+G4W_ZM_ZMS?0X_'i4i4}N}cE|e@?$*_ß4ZO֮R[H+TRRgaOjOY+`e%WgU7+o$:o"3q1_Q?#>tQEQ@Q@g޿j|3o_I5}_ҞȖ}8~ +B(((((((((((((((((((((((((((((((((((((((((((((((((((((οؿR{kvேbIƏ8OړM7K_U;RɲFcSIkŠ(X(((((((((((((((((((((((((((((((((((((((((((((((((((((([sc>qW ]'5}3J⨩?C xz$XiW'u?z>9~Ϟ'oHn(>5ٕnюG'ӈ9ml6od~ WQw>T?QЯ?_BޣèЅG+?X^p_1?)#_4MpkuE|~ҿm_~7gOcd5ɿ?ל?c~OꏴG=?jG:uyO(5ɿ?ל?gs# OWӤ^ c/];ŞoO43_^dOr8F¿T~'5')]_b۱AC0*M%o (Š(((((((((((((((((((((((((((((((((((((((((((((((((((((οؿR{kvேbIƏ8OړM7K_U;RɲFcSIkŠ(X(((((((((((((((((((((((((((((((((((((((((((((((((((((([sc>qW ]'5}3J⨩?C xz+(3ROٕj?@(((((((((((((((((((((((((((((((((((((((((((((((((((((((('?릹hk /h(?~#ر$_'/o5?$~ zq?( ( (h| Zĺō~Lۛ=ɀʨ9T;v).ԟ,y5M4۲<^٣5C/|Wchn`Y_3cO*:e +a#+o/S-mvºmqgj-"g=Ovx馚FYR#RݓOKwg_n|;[W akˈW¡*P@ɄO GWǫ M}]ZGi͏;vtW#>ʟ )~s|^/hy:·y3Lr72pmhCIdG*7[ /G?S>,6߇~4.T. ,+?g[Si־%Ins0)čG=8XT-좭v{y(^4/q' CX퍋{FLnge_ڧ1G/<'j4FmJWgtFr0g0܅N3*;y/xr)dտ+v>|#^[tƷYmFY 8'8BWG HJpFWi<5xww1ͮ]yMpA!To)(eS#U !P~QSB9frR<=Y4ݵ_cz+#YZD?XgYtРePf-!X5? }P^'" "tvb x;;Fvx$(m4տ*S8FSIo{ O|U P[8Ps[sc>qW ]'5}3J⭪|?gh(Te|g/p ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (?:bIƏ'?릹hk ?jO6_,jI-~1WI&ōOI%*^ej~ (c`5|Bzh|ئo+$vA.ʵW:O? N=>O:IN KIM I85͌mRvS%%^ߍ]uSi4/5_9};R%xYs$q>F&$dI_?dj%+,MfYk`@An~߰c~+b_1_Mi&[UrV 9@^XrV:+P̱S*wmϥǢ,|!ox㯊OɦiӨ1M#$vqx+Ǟ55˥T֯TU呝lmhRexφq-A,`*:O|em/nۨm@Fʒ6n39"(*jdJhQ r6=gO1X ëawQ8uBHyB/ڣfLJ|-&hZ^#nKK:ciY#!^>_| McCx1j, Yeʽ|9/</tppL¦Z~k%F 򶹔+;_O՞ȌoZG']ҶGa'v. ~\;=k'y q|Uu>cH I*v\/F|9[K;o|P҄$@6⤏?54bP5]+uvJJy\TvFtWJcDdy ߌnv8\W_V&7>>7wgc0yaxc~v8 _lH?e ծ!*.tO\嶋*I=?xG7C㏈Gw$1̈mF; _ rkh}CZa< zmз:#a*ѐw25߲/ExWXִI+RQDh1p @bO(J-9MC'zu!%C7hVGwƿ#75"65׍?i4qo+x{‘Emp.FY%]I> 6Yq';~~>2IE)$Sʤgr#9257/|]]?xwUŝ Fnu, Rs7mۥƥ*UMTkYr{(Wğ>!|9rw_CnM~tzgYQLPĖ?x?gQп1OM'xRQ6f$gh3¾H ͗cOCU^kw]׳Hgl0*y_ڛ;Z"5I-u%I"oņͩv6ENYM(KM-GС9֥i-]w_*Wmm|M?j֞[dQfڙQQH%~vŷߋы`v%K&;Znψ13Cx'oN+,1Fwq\W~*o~Ϛ:,F\I~m8SE>ySQWn~C)b+JuyjI+]6už{k"(E_@'WRWo442[.?F_-h`J~о>q?5)#ʴ lε9IY&6qXn|d)җ3[gS0oM1ȰںhY <* +{ {xwLh~tdXeXL}cWT>/9~Ǧ o{ v6pד5t=nN0"bc7G(~ xI/yy7iL.ʬdcx5'u_Rh+\G"xq>l3:٬Bfۍۄ@I_C~ (^x /KnXc\rKnp7rK|ܜ_W| SE^Kn62cm.<`t*ԓZZuc#[ NzQ~친Ko3 On<#xo) ؖKCUud;7.N7/I]7ޗ^hd~r$NN@9ZL]|p,5񵯄zڇ ɴlzr⻯) _Ǐwc{ۏ j+$~XVVG'VXZQG ]{=^gM/h߶ޟ?4罵bݩW\d>cFJPJwa'w?fs$_<++u eIuJ *S ?b߉?M/sJ[,dho Yb6p! g8?aS|cz 3AOqW2)RWk Sr*I1mrpT oz|VtjVĽX޺cq5/3+4f~%.KtcqYO ^W èh!NWq x6X=.>#]}{fxɹ6Rc?bڇ/þ կk-I3UÇ=zj$'uV,SEJVݥ;ZwulZ+ +1>MvouK8DRY.Ar?O-{_9gD ^~Ͽ&e!%%bRy%pC>{">I'_}Rҵ9Q9RT=Ԇ @/tغzR׻ۺ=?7xύ'Oo:oyqwB Ko'ۖx[Z>J ~}2o5b.9野$W#?a?kkmwq $C8#G t_{ "v.)Da$f$gܱ`1f@5;'M\kiĩJTIfwヨ>[sc>qW ]'5}3JN?gh(Te|g/p ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (?:bIƏ'?릹hk ?jO6_,jI-~1WI&ōOI%*^ej~ (c9Hf"1VF 9r*:( hi$xŚjiz:yvwwob 0p7_j5It-fhx;IU82cKZIss]߹%څܷ-ե5[N|y7sIbIMRN*v >#zzzywvoz+TV 0<XUo1&5;QE sUՇ*A+oTs6"?oGW܃S??=]_?u= QU Ov|m(bywa+h}?1_3ݿu=]_(_rLooywa(|m(b&>GW܃S??=]_?u= QU Ov|m(bywa+h}?1_3ݿu=]_(_rLooywa(|m(b&>GW܃S??=]_?u= QU Ov|m(bywa+h}?1_3ݿu=]_(_rLooywa(|m(b&>GW܃S??=]_?u= QU Ov|m(bywa+h}?1_3ݿu=]_(_rLooywa(|m(b&>GW܃S??=]_?u= QU Ov|m(bywa+h}?1_3ݿu=]_(_rLooywa(|m(b&>GW܃S??=]_?u= QU Ov|m(bywa+h}?1_3ݿu=]_(_rLooywa(|m(b&>GW܃S??=]_?u= QU Ov|m(bywa+h}?1_3ݿu=]_(_rLooywa(|m(b&>GW܃S??=]_?u= QU Ov|m(bywa+h}?1_3ݿu=]_(_rLooywa(|m(b&>GW܃S??=]_?u= QU Ov|m(bywa+h}?1_3ݿu=]_(_rLooywa(|m(b&>GW܃S??=]_?u= QU Ov|m(bywa+h}?1_3ݿu=]_(_rLooywa(|m(b&>GW܃S??=]_?u= QU Ov|m(bywa+h}?1_3ݿu=]_(_rLooywa(|m(b&>GW܃S??=]_?u= QU Ov|m(bywa+h}?1_3ݿu=]_(_rLooywa(|m(b&>GW܃S??=]_?u= QU Ov|m(bywa+h}?1_3ݿu=]_(_rLooywa(|m(b&>GW܃S??=]_g(ůz~x[Kֵ]:M6 Ri2CGa#dsRoNkgY~х1-}O(Tx(+?!/}_ 'oh((((((((((((((((((((((((((((((((((((((((((((((((((((((((οؿR{kvேbIƏ8OړM7K_U;RɲFcSIkŠ(X(((((((((((((((((((((((((((((((((((((((((((((((((((((([sc>qW ]'5}3J⨩?C xz+(3ROٕj?@(((((((((((((((((((((((((((((((((((((((((((((((((((((((('?릹hk /h(?~#ر$_'/o5?$~ zq?( ( +C^񟊴ѣ[Z+8L. dkf{VC 4dX]<˛7U/uhߛ<86xCQ.e͵=|G+̾'('͡xG5XT9qp*p@e$<Մ*a+ӏ=H4V_yQETvjq[$8K\v:EhB!W/G{c Թ$٤iNQsm-ݴ^E^φe; #KR.#M+UQIWᎧ_Ttuw2I{ei!玙XE'jŵc+ؾ?>.xnWM gN2 BA0&|_ 1~0o;ɓ̈LG:)(d SWi85;}%Ewe~s 6p!,P6;wgVOدoxFM9Ẇ&' w @@G7-Wtgoch (lgW=P #7Wvr:A"??JT5=ZԢtWIO xMz-ƂO&xFFQIujI9FQ, (Q#K|/qx3GX-.Y-m̨WȄdC1ZRb'bѩV\%w]E{??>xGR׵JkTUhUM~aEUQ@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@W+|@)7Y3ym|u #h0љU*s&RKviN~Dݕݖ˻<+</v[4C%յJDbPq9U#y;GV'?U/-=@Πu%\vʒ3Z)(|Dm&o﵋[W3͕ @9:QoN=YTJ/<7?o3|Qwﴽ=G+0wUp722MZ5)K\_fŠ+K_o,!/%"GTF3 YYp 8I-S9ߑ7eweo?d`3HR7 x[7i2d>%RDo97RBml@7joW2v=};v _#Fc38t/>k7/|"]Ƈm&Em{{e88e$pjcZ-?ux+%*e9?3Wps_g?*u>,/3v(?l ( _5K?{_fWB_ ((((((((((((((((((((((((((((((((((((((((((((((((((((((((/h+ؿR{kvebƧ~ԟlXZb{)%~}_Ƨ0+>0+?fٻ? xA\]NH 4O,%A<T^]?hmKIRuE&QwI/BXV8ܣwxhWƕMnI|bo6MޒxwDt3*4pF sX=Jmӳ.E'mJA־{4 [M,3.bb3:=A#|`OjQEMG%ZڒOz=[ m-VP]j2)0Zp yaaDR0{׶`OSW675x;I*;(# !Y,C47Ė&[[iZe"ABR;^vx'?ct? XHuHc,eF+DIs#rOEƴKeo>F*F)4&֮bM t⏅~i\!JH|p-T|i?Ӌ{8>lK&y FY~\Fυc?|0t~c ?GSIռUuK 6  EԌ{Os~w'2!C{QMe6p_Wߗ}m|Z>9#V*"+Fϔ[uX97I:^ii{! Ŷ]cb~T3.`AьJ\Md))4Onݭ\/~+xni&bZģFA % [};gK6;c{䉄-c g?.x _XmOϳ-k3v1 ^{߉w|AcXo<7mwV|#SVJ0'{ᆅZ-ɿMY'm-C~?g?3/[Xa߈-FTѢROt=9}.ƭGQЛŚun:,<#MD\$MﹾA!H^+߉4){O E:ngԷFY£]FEz')s/nn繹lJQː fRdmSaB9ͪ\KvKuVw}2>%[|%ku/MYI-DuVE==|߰gí?>/xOG&Px=뜁ݿ!SAx"IO^56yKfZE$D+VyN4wW&oZ^?!4e*3.ni(ŴA*թ(ŦmdoFjq?OAu;'Cv/ Ec\1ɽ!1F>4Ɩ?'ơn|8eۜ>ϔ'ۻ8n~|s2hw?todM%1@Ɍr<۪_.u] %I'8}2sߡ[$ZhJou;)qhO>߳]%mJ=wCΗ}SYm JVW_K_ ^4w >ըbY!#~g.X$9d [YMHx`ӍO9 n @um*Ano!WxdxRUG_s~|k Qx2;[ElnW3U 6݁'c.RUaMpoDVfYƵ4i։>s+B((((((((((((((((((((((((((((((@(o(|Eo E ]>V$&wʧ<qpHhJ&4j_Z[Am 6N;$<}cPo?n\xqg=ܣSIw|{1W5hvݔڈ7!Y˸wdw> ëVzLJ$NR4/)&$+,GBY@'iٻߴ' 6:ח$gɉU; %+_ 4(t?LW:SjS"dž2ynakEwgrOb$%f]-Yߥ̟)$?e>}z>YY-aJ0'.XHsf$/~ U ;-+Qő0<2[z港kW=͚w  WNs"%$Um  jI>&׾&|DEԷWR 7NUhY 4A?1;\!獽T~$ޞ}~H*ή#'.ivݵV]{'x_w{B-+ek at .:/&6𧊴?+mr:<~>ZNd_Ne;J$Ϛ.m%}WyE|_WgYu=^߁OEX<`6:w wSAU9'|v񗊼I-޻a-5 .I =IٱH84[+YN+iXx+m(Gaz_Ɖ?g)πXLѭOuوWQS?-CM8P-L9z7\]kCsl4՜aB|27z ?n%տ' I.B9R0)|O.T?lo'FWHw]v6!?w(1^I.T?l88Nmn]ňF?T}*_~IhE;ő?W_Pl?d)9#=[ vq?DB-;$u|2-P{l(σ4C].b%;QWlk]+<5GӴ_ Km#ڷ罻܍ꮻ'$JSJi溙*R)9^Kd+[7CZe9?3Wps_g?*||/3v(?l ( _5K?{_fWB_ ((((((((((((((((((((((((((((((((((((((((((((((((((((((((/h+ؿR{kvebƧ~ԟlXZb{)%~}_Ƨ0+>0+?bA.Mhct3a<:!20C2#詜-W7f> wWW߅^89\}Jw2E&H'`pGko |4?i: 拤b&Q}J]dh%`X!Wy.rbA8X&]uTͫ F<6kͥE| 6/:o~&|05 KIR2yxH<"1]@ϟ uOuXywƵ*|+(%W,UK*wl~9aa'wK}O6N)AE4ʹsԿdoڧX~*/4Pj:|AyUqzŏ+:Ě$\1M>C$HܫPq_)QU<]k;MDx^A572`Cyq,X~ <+m~.'m~4KA,H \;H?U]'Qx-GY& }&$;.d\?k7dVO%`.ʫ1 +)+ 6=}Z+}iucU4tI$Oum}[fO[j w9&vQ%K ls)$C9'? [NgY뚽ޡLYE #p 3F{hB02f+STwI$ (8B((((((((((((((((((((((((((((((]Um7[ռ.H&ҝV*DdlǡAf_ ־x/rxO{n|IA3okj+ xwm硄̪a[nu~Gx㇃Ѻ z֋am,ϙPvx/85j?ٳ%4}7PŹ(%LIyhXx;/}BeV QJ3oEܶ@d2I3}pN3_E\[gi?ʹk}]ծ;D|ҹokwź^*cwê}VuceqΣ릋= N_OxvV!=F8㘺I3nn$<+Ӥ߰B5:&ٿGPbvϽya1j᩸-i済թU=f3ۿ`/SC>=7K j^(׼ BXդ\)# >U (W?o~w{ZY4pUBnbK,N1-=]޶mZeF<$]^]'5}3JoNkgZɅ<=W~EEP_ 'ok_5K?{@QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE~uߍ\Ӵ5|?O~4MsN~ԟlXZbړM7K_Ue?OϸQEzQ@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@wß+Je9?3WEO_~gQ_~QEj̯TEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEP__)=]5;C_pWGt? }@'I&ōOI%*?~#ر$^SKOaEW|`QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEWw-|9LҸ[sc>qTTх<=W~EEP_ 'ok_5K?{@QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE~uߍ\Ӵ5|?O~4MsN|g/,,8O8Uy`xԟlI?FQpc?~Q]x|d覣m{>gPJ2kTvahm++~Xjݗg࿚_zOvahm++~XZe/޿wX??JŇ*(֭~?~ Dhm++~va?v_i}?'?JŇXbjݗa࿚_zOvahm++~XZe/޿wX??JŇ*(֭~?~ Dhm++~va?v_i}?'?JŇXbjݗa࿚_zOvahm++~XZe/޿wX??JŇ*(֭~?~ Dhm++~va?v_i}?'?JŇXbjݗa࿚_zOvahm++~XZe/޿wX??JŇ*(֭~?~ Dhm++~va?v_i}?'?JŇXbjݗa࿚_zOvahm++~XZe/޿wX??JŇ*(֭~?~ Dhm++~va?v_i}?'?JŇXbjݗa࿚_zOvahm++~XZe/޿wX??JŇ*(֭~?~ Dhm++~va?v_i}?'?JŇXbjݗa࿚_zOvahm++~XZe/޿wX??JŇ*(֭~?~ Dhm++~va?v_i}?'?JŇXbjݗa࿚_zOvahm++~XZe/޿wX??JŇ*(֭~?~ Dhm++~va?v_i}?'?JŇXbjݗa࿚_zOvahm++~XZe/޿wX??JŇ*(֭~?~ Dhm++~va?v_i}?'?JŇXbjݗa࿚_zOvahm++~XZe/޿wX??JŇ*(֭~?~ Dhm++~va?v_i}?'?JŇXbjݗa࿚_zOvahm++~XZe/޿wX??JŇ*(֭~?~ Dhm++~va?v_i}?'?JŇXbjݗa࿚_zOvahm++~XZe/޿wX??JŇ*(֭~?~ Dhm++~va?v_i}?'?JŇXbjݗa࿚_zOvahm++~XZe/޿wX??JŇ*(֭~?~ Dhm++~va?v_i}?'?JŇXbjݗa࿚_zOvahm++~XZe/޿wX??JŇ*(֭~?~ Dhm++~O k|>JmbPR*C2@Xf$㩯*eUj_S,&-_QEyQ@|g/p+?!/~QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEO~4MsN_)=]5;C_pPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPES~Vկ/KeԼ#f '⏆~4x" ͞_cv7扙kNXAEr|N<º;j ڦm+bS0]@;W Al쯏O2okgvj%$'=h.)Cߌu?wڴvzնӆ23. ?<{ #E,5Z 6pb+&h)p>) ..eL|b\4Q_ ]XƟ|>KxL.QYWV7|gwW~8|z.P]OԇM5Ə~dXUZgwv+4(*2/?4? xP|= =JvH]; +_I|r~?5x hOkë\[w6ŲVΓs2e3 ;Z=W]_#7_džW^֭H`s+<IƶuA_k?t{^f#$!GN6(;7]'-~Ow㻫E<>$#Y`ʎG_ZuMRDӧ O<8@2YI ~}ܶW'rG'tx ϐA^⿅7x" /]@-u]PP(&dmᔃ": +uۋஏ m|[y?ؓx56n9~ú_?x_Ý?U-xx%ҔiȄ2H˷Ho_s?| 'ÏV:LJ^S3PNt׵܁_[R$T _gSC 5iv1tߪZ`wkG$s +);Fd@ __%ß'wOCsW/><,W?ErJ{`e_xBV6~? ?QCŸ?jQ^񗈵<;m1Br$r"Gyoki,Å]f` J_NoZ?>%_iڇ4ύN;F^tRo /C'mL¿Wů#Up'GZK;L无ψKF2 dd-FXã>=k׾%=|/if[Y'{_>s?)?j;/? iO5&3³IdT*ʤwuį+R|;x'R? uuqvm1V]HvA u/Սn'ӎwQv߻î3ןC@&a_۫ܿbޟQ}\7459mMZl!lXlWo >\j/^$-x+3Y\xzjZUͬӍ@۲ʌ̀y  Av>ڗ2<Z_O[Zocy%BP"8o!PMiVz/֟Zw K}'85arf?>1K%jlg^ ;LƒԮ§s D~/Z´>ªף|C퉪o3ɋʙ<ϳ͏9Ͽiio >2x+_ƽK=i,g+7*|Z+HbWyKo|L|A[h{MGүb$ydIu2Ѹm8(x\¿q~*³r}qX[e./ y|(bAaL5դAeܛIK7/մM +=^xRmnd EnD'2%Æܠc/9w$j='pLmII 0/c/{/Ÿ |Seޣ˧iW TCI33JR}tMWg׋[~/~/LbH@CH!L })lȥ<[c~Ο_>}_xC%gf7ex" C0o^|qo|!z5?XЭŔ~u)cFvq, [|Z}6%ǣä:hcl"$!>V85gc0|3߀u_j;`W^")!I%)po ">$Ѿ4٥~g%O=Sy `E_b ~]~Z# W|MA4jZ QuTVw>Y#X<`/:#xCODk:(=[7~1"BQ.7*oTZ5O(>D/ ?xֿr;+iE>P|4_ZOQ A'}k(_jQ< _9G5O(>D/ ?xֿr;+iE>P|4_ZOQ A'}k(_jQ< _9G5O(>D/ ?xֿr;+iE>P|4_ZOQ A'}k(_jQ< _9G5O(>D/ ?xֿr;+iE>P|4_ZOQ A'}k(_jQ< _9G5O(>D/ ?xֿr;+iE>P|4_ZOQ A'}k(_jQ< _9G5O(>D/ ?xֿr;+iE>P|4_ZOQ A'}k(_jQ< _9G5O(>D/ ?xֿr;+iE>P|4_ZOQ A'}k(_jQ< _9G5O(>D/ ?xֿr;+iE>P|4_ZOQ A'}k(_jQ< _9G5O(>D/ ?xֿr;+iE>P|4_ZOQ A'}k(_+/ğW1.?_,%&k{MJ|C ¬O'.N )iE>P|4_Ze >x-2m WMj+kK)(-d͆*IFG_N |?AcI}A˙EBX4HU2vѓa A'}k(iE>Q\/5O(OPuEp ?xֿrjQ< _9@P|4_Z?>D/wTW A'}k(iE>Q\/5O(OPuEp ?xֿrjQ< _9@P|4_Z?>D/wTW A'}k(iE>Q\/5O(OPuEp ?xֿrjQ< _9@P|4_Z?>D/wTW A'}k(iE>Q\/5O(OPuEp ?xֿrjQ< _9@P|4_Z?>D/wTW A'}k(iE>Q\/5O(OPuEp ?xֿrjQ< _9@P|4_Z?>D/wTW A'}k(iE>Q\/5O(OPuEp ?xֿrjQ< _9@|g/p>D/|/p ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (>agw.kCmt0@];$sH݌1\9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟G9K_ u#WP/֟^,~ZjRkrG%ƣ:K+ÄQBǚz(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((docker-1.10.3/docs/userguide/storagedriver/images/shared-volume.jpg000066400000000000000000001373311267010174400254030ustar00rootroot00000000000000JFIF//C     C    icW|-71hb=xSJ:+8D얿L2Oujެ\r]i1., }^-2Zoֈ/X*}D+ Vun1SÆ`ؤYF;2L~H B;cX3R5gF,_by契gY{#f?i3Л}sÞz}S`/BT.=Yy+qˎx![|xn}cGW8v?>hy.{$c:# Alx &6aY?0 O xq^-m1A=Z@]I n&OVl5P쁌PYvHa'|za5dbEo6UmV]I W']6Ƹ}x.-%Br {#G~ʹ lq(Q Gѽ*Ҿԛ H/kU JZD-dZvﶙ{ (Q38Nծ T1)j`1}r[.X>xלbVa9T tjeOD9%9cܭsH tX~9>Z.SH=;t`q}-dFxk/7kCjdXAzpOb KU|.#J2[.3{ϰ^gE~|Gqĥަ-,Z% 5 Qu@a KUmW\gWTQvHRhe\ V#^o]2[.ZMkOx1qNH'%-UZz5jH˲@ }Q!j30si3y j/}'%-U]|xCjͲ iר> }xn}K:ٜ)y"b{UfUd]zR_eyR.yݮ S/4wz((F~PVAtŲ*D1pXuekw7`j|3<9<l<ҎOa R]Z$mzTeg~`R}g_ Ҁe"kĀ}ަwzqhy3dԴЀ̿ 1|zcӔoGG{pXt< y<^}3yK@/8 gȴ ߣnĺ,yfAi;Z}@~Qv3~Nj@ygU&qտ>ޗߞ` YV=~[hkB֨=cD^0+oط@Z@7G++l++=y.=Z@ 7Gxe/A^y Z9/MQK溓lcJ<-5kT \˱w{mpQ͚16Ѐ>0QЀQ-GNքP kB֨[Gim& -jy5-jy Zo%ALuЀQ͘HЀQ]@kB¼[G8w5@ h@Z y(s.kB֨[G h@Z y(s`Z@J<ݮv h@Z y(i=": h@Z y(lքP Z@J<# h@Z y(߮ Zo%@rZ@J<p h@Z y(U VքP !Ş;ukTm]J<,Ң֚w+j) ~ h@=..uci驼n y(ڝ9t+h@{sjQ1ao%@:6dkD>QH{ō\~)Hz?i[snV~q ZJvuS(wc8޳连E׿XbGHo\kB\ OM뎼{z~z@:Y$a\kB`8󲦵`=?-u6I޺|׮5ѻ5q[mg1&Ѐ/~||AX-u6J%׮536bB]o*yI,{mx5?-e<;gfB>Zg|eb VYdSΞirb-@ VY t̛@mxToH6Yf*yo&Mb 0SΞK6+,O:xk`<meNk VY t bSΞ< 0SΞ.Ⱥq VY t1jeO 5eO uѶ VY t\i  0SΞ63 VY t< 0SΞ'6Yf*yCH VY tc6Yf*yO׀+,O:xrn\yeOsx*yKǬE.ss9η(`Ue7e&u,,Xx~u|tȻJJVҤI0\7:}i 5mk|i!u.lG9 t2&]-)pD=:=#AXdhbB Xݕ9#cho/JǬ%Vik m!)_nmRԥQZJmm5XpU-×bfs]=!Pt;0&:im+HM~ҷ:DsEc.DK+ i)ɉsAW5i{mFG7`Sni òW hٷ1ZHZA2Oqi.b٥2Q"`XaygpP,vd@u]Q5} sVxn: QZ6SQ>Z'{H8vLRrfr2@-ԙSDTJ(ZH6Rm۷Cƕo|Αu^c=ϒ,!REtMԊh&Jmu2۝_"&E) ޶KjYZV'\:խ6aբ唘,ߧO#5# n+NƶGHb쉑Gk'C8Qlj\y䖄:HΖ(x8uD5^Z '&V`R6Ip@/:^6VeG;*-]IDUXzj~KWMjڿ-]Bv*lˮ7I5)IBvVM~k{Y_f~k{Y_f~k{Y_fx[N:{wظBؾK$ؑYKۺy,Za_KôobK5,/Գ[ؿRobK5,/Գ[ؿRobK5,/Գ[ؿRobK5,/Գ[ؿRobK5,iBAӨtxi~vGeԾjw'U^}oS%6=ۛ=j hV++k{Zvxb<_ֱ]/XcK uM0DZZ<Δz]I ‚AWRCp!PUԐ(*Hnu$7 ]I ‚AWRCp!PUԐ(*Hnu$7 ]I ‚AWRCp!PUԐ(*Hnu$7 ]I ‚AWRCp!PUԐ(*Hnu$7 ]I ‚AWRCp"}<2]vJ0bX.%q.1VѨJK!lNo?;7o;CʹҞeB^B<)V!I\G1KCyqiBwFkyofFkymOˁB?z#>AׄI)v&zW9fuY+ʿLL ~*{c_U~kW_5{v+m_rs u"DدFYpca7Fc|[ؾ(}9bq\ڡS**HmzT)WJZܬ+'JaZ ԧ. !Q+qwZl,4#`qks;EL3 I."j+;|_MmtlM8y)b.c RC'IE!kn᥼}Տ9G#W{yohH"GC^ 2(q`^} f9- nFj÷M[*1X qj8Ѹ/rhG4Fz.yZn-k{x7ٍjJ,s uc v݄ߵu}.l1lѐ&*Z:h]l% Ysyls _61-@d1cԈv+)/#l F?oO0A_d> Q-:hFe`#;sxy<2)  $bv3ݞhP5d6J}A#ݑ̊f>4BNa5p4)Cf3T.Mv#x,Ƨsυ7 $:>X"9f]XC{< X‡ixY;ӟg{eڝo <;Om4,]XC{<җMNN깘 Wٔ8 -7 /;6R#o؈hIUg1蛁a\ i2Gw瑘_yi`]r'ىx.|!=kXMV]E=, Oozo§# 1g^43Ya9)w瑘_׽{֔|k%bcpq 4D8)9|;Hdy |h$2#j*=imMnfǫ_F ִU FOg0[dG?"fʌ2c|[ڸn$W$z" c#ƌ9˫svG覌9y8 iՏ9E#jojmk_USM Pr [M. |h.G.\+8wZCJ%T:.C*Ajtg@Ex1dXG xJ%($ gGyZ+ヌ<6XչjMav^s uc vMuMpU6@eFs,h܂R8#ꞯu`6|!eGc Ow糒񉍽gU6)B،7fE6=:iz=? Y{ "d|G1MYh<=Hv831C{yo@b5,V_nGK'.wqs5ɢkuz.r9 7ӥ/ ^} eiA#ݑ58_Fi#YHOvw3k8{EDsԓhYb]gx2aLٗ<ӽ<NJTDf$ذCGOBDAS9˫svGPx^b>˳O+DE ;vN3+Sz{a⨘I̋^ >]XC{=hC;dd" ҸW]w瑘_s.tNa # 'l ixrl~bs ucvQXUFc|[8^zJS7eqy~ i)yIՏ9G#:9fxl䠜ֆS*Cr1"Z8eb0BZ! q#1-rBAٞ ~]ҡ|A#ݑ䖄:*q [mEF2JfpsohX]hhh}EKBi,1͊/(H#NIc.NfiDz2eBƞta37`zJr"AR +9<l0dcY0iK6Y69 j`LS<NJ[8XKxӘX6Qvy,[Qy mmmĮs ucvy 9xgAZ~Ӈѝeg[kIjF*0 e2q9h qT:#+ԧ]\E*).)ܽ{Qq%v[nsCiPM[Ox;èRQ2؜wM$;Jfcc-*?=z?q it\@zqnG1ww,[Wƃ+Arue|h:ܲ4nY_,[Wƃ+Arue|h:ܲ4nY_w,2JIYCY7wUfH0N*40G5O#Y.!2lFnIM<mm/"ɟdP60Q+"- L !z@ND:KJ b B(}IeM$~({B8[6y8\ݯ>]XC{<ߺ#| W~,9̈D{py mg>ZYMMOɓ%(u%sz9}(f+ܬ5 i+PгW4c?U]u%']hdfn"28 bWdٍJq)-SVUYՏ9G#Ox]-M9>\R`D$6zLc[yW,X9vņb&blCq? n!04]MS8hBї8cO#jzǯ%~m_ZaQ(eۍ) 4\d[_ڿ|Wp_rs ucvKTFhPku䙒K`z[?h)Y@#$,tIz)3L%ᡄ:+MCIj^U^NYC׍r-2ZMNOG(02h oHp4zBѸ *rm{gf.>x{ui|`pmBք]]A%BKQJmiW9ޱVP2LYkZ{}P]?C$YP]vt?.:] ·Ԙ ǜ-uզ9˕['CNk[cvZ2ZlFaxDE90 ]v"$& 6sw:n02.38^<>xZb$! U^#Z b ƞ.kV7f'y:/ ɖCq#1-Qs7XWE*덣2_*x"X~R"fx)DYf#7!1{o$r(t.]8܊%X VԌu7vs ߺ#z!.![-:ڗ{]-2 Fq֙ enֱ|HJX$%u_/ ]kĄBWZ!+bֱ|HJX$%u_/ ]kĄS0B$Y_tBD;A˙drSY $kMX.}Ыݵ)S\-jyCZ=\9}("H^s:Z7 YlJC3.H݋AK %$c4Q6,ȿS6vs ߺ#[Fɺ6AE]rr ;6]dE9WA_7G{y9H2?C|$: ?C|$: ?C|$: ?CF GѢ:Yg(VNY5(OxV͢[y{?Pb^t z2g@Йc?{ٮu/]}zUu_*/]} c:@=J]Lgݜ?5=t7sBrJĘ^k( L2IbEF;$ !4Y,5b܅:(y9gvJyMDIߣs*<&rFt{1.]>Q[mk@U W~7j5W_:vs ߺ# F0{nm#&s:LZw94 #^El+CJbr5$m8#<$86G^=z\JnԬBЦ=&,C84 laX ȑm=vBlZw/#=ӲԚZ p ^?6SՏcK u[#<K\C+ͷղ${5!JRUkYJV.fe!̄ ]JRFtVF7эi̅ $5:#IV%эib莊-O%ċ-˺)rs{Ԕ,Jbqw:bVqM:\mB'Y`dQǦ+(q8vEzb"=1]GȣLWdQǦ+(q8vEzb"=1]GȣLWdQǦ+(q8vEzb"=1]GȣLWdQǦ+(q8vEzb"=1]GȣLWdQǦ+(q8[2|)l8#6_IC7Rd 9Sb2d^Z\&壡B0rFj-* m\kJFM)m77HGy:^F^״šji ck6-~u{.ʲcxyH]+J~jzs9hJe-6h׍m]?O(1(h53ݣ_}sP:51@HvU%O s?;vܩ~qYt7 &xj^0d8-VCr qK0'BXK8t AHsҤäkwqM~-."4Q`KuS"a43N>—[qRc,K3 w䤆HՖND -(LHHuv:Ylv6Fa==ll@w"BROɱqUdMd,=;?(5*Sl9m̑6$ t \lsP:qiXg}1 u]Z|<ϪLy̞JalOl fD0jj]3<2T[]7"'dn71r0j,S(<;Eb78 @ECŪN4c |v5!TF6<) hZ!򆤔3w>~-l뮭uؖ3))#R20')ES!1AQs"@aq #02BRbrt3PCSc$Tu4EUp ?wn,B#e8eM\uW% > g0 VEE`/'kXmkƼI4Ea[m1i57ZB0fW"H[6T 477Lf5sVśx[:[NTL (mԩA'24ms;-"7g*ah h LyzaJn}$ȝZ1n F}S\[T)M@M,ɴ¢USg09!eK4nպz!9umsFɸ1kn_,ȜE@rW0ܔAM$ip 6LBq7^8*Sm nڇW6uKÉu ZT0Q$8"3TOdviYciNӘiRT…h.J̕*- obSE1(1baŸG~.>ml5RPůEEK˥Z k&N7Ұ[ŗ eY] .wECI0ɫRAmĚPBBlbQpHKS[Fk:(lK% _(m6Pi@CbT̪)EB-ZK2]tHnPz"ƣDWvl_(2UqhXiZm“~qt)Bi6*^rH ])hUoᢔl| ۭ 3ZR{n'3G{cVPM}-FZ4~zTL.7ފOr-) F.&<=1Ԫ#f0頛}T}&mܼ,Y47 +]ǎ+gAؠ> 4R Q. )&`Ѩ.&JsLy1ǓatUhl~qaZi*Jh[D~15 ATHZWqNBIAZF"aYF/wQ&W.YmioSmHfƴ* B1>B.s Fa>!dߥ'w3(Qׇ4rlQ(OXq&2!_1]Zi6VsVyb<X,G#yb<X,B(D\:ǡ$0V}s$)2ڣ39+<X,G#yb<X,G#yb<XU82Y쉌>0np5v%G$PZP鍑鍑鍑鍑鍑鍑鍑gz)te ]rPjHliW4pF=ODlF=ODlF=ODlF=ODlF=ODlF=ODlF=ODlF=ODlF=ODlF0Kd~W{ `q6#M|dv*ȟչmzcP3YW=GvJP eʶ(!cm8.j VP4|k6bfw.4HLe-]lc:J^0e)H3;ZdhQSUq?ͫs1R(eMQ9Iͤ (̟)HijSBTP_i;JTxD8̠B q]Q:d$$Z9ĦK|kLU*EFTL+%IJټַViDibV0d }.Vz j/ڰ@&cy3Sj"Ky qu ]A\9"R:qJ;U۫UVU{[_[Cve:eWzrƽv&AA#5;*#*oOҷ1V^hhiMT $!Ly1ǓcFX5|wZy/6+Ne}͗85q_H:45*D%Y}6h.)UBSx\ :znzh}0|5PU, ަNaGܹs^;f/ǂ*4?"Xi礊jRhUD !T̡<A{1JhFZVtdPlK'Y!wCcF_I4T.%7G\&4`w gNaF묙7 ,ub8 Z:r[s)f;R4@pN2aH YKl wS@XCeobꩾRFat)~%`}ʟP|cP}M_8")oa)#;?hR"de\ĚZDSFIFNJVi5saevjyhi*VueI.)UKO ʒAQ<4%&UDκ灦U0Tw4ȘlRcJ#9.N_8׾oH#3aZdM_8",sJ<9'F,̣J?큝2=B4=*R?0S?Nh|=IՌzfҥˁu5|w{<8aCY?"4#)7U,Q;2q.WMn=b{ ;v_8׾ N-s^˚V>NaG9…DK6M f)Vf| \ zH̗ūԪA̤*9s^8&éKMxKJ7iR:q;DY,˗M^x=^ fkʥLagШi0ƔML eпwY85 *l6e_dQ)Nrtt5|w{<> _FɾF fQRZfGCSL1kLAV}c^;jE)F#YWb z"Fv5M@yE*RJ‡"45Ш$ >g{O%A!M`@HW7Ʊڱ|D e:;=׿i(5|w{<R e*<zeSӮ( Q g*բ0TLtouQ egdܗp9֜P0s,+&r$B3XW 4^`gsZ wK~@IfCb] RPzkkenR!-K\zi m&H#yE?^^ )Jˋ"*,>4ŸwF iLK !JaRQ {c N&alBfΎsrY%^VoI33%2gVeR+2&ԘOF1㋵0N8W)w.xuRD"<"ŕнĤ`2W\dܚP[)GB]>&ǨP e+y9t%#I:c0F^O>4<+yEYkDX׋8 # K;JvV2ƽu lT eG*da%2nIRSvQ86[R=۞`}M_8",ķiGO :#ZcG,MwQ Lycχ|>ǟ<}0;t;c J̿9q|M=iti^60wiZƴ|yEiOn”ZC n( TOMQDWOhQ!<Ed(+0E-8I)w>^l(̢G,NMƉ䔒w YqmC= 6k'5/W4y^u(R!Rml!*:J{+ ο.˃cxi&|SN_8׾F3k?0,tW|ľnIٿEsƴNaGMN&k[Ǘ1\dljZŶ2تBVP hR1#{V-(Kʹɔ ہunڑz(rie hnLe%l0͜hI(iĊ1y|h8WYRJV8yC JԚbH7g0mՊ%:53A)˕5nmک&62MU^uXO<4c?XO<4c?XO<4c?XO<4c?XZ͚4bJgB\ZBL$Yy{ 80SZ{#CμۋEj)"pJ) s\PJ"r R)gr @DjK<"2dv̥es()rBP}cCqqjɾ$A"0`8+uj/_ bVt,xI7lҴ*0x )S4 s:q;DY&_,N$hwCedI 4xU$ yU2&e-(S?2V[y Z&AnR /NtGX~ѽ ]m72.yEeZEǛM+U%rG)|96Vݫ!M4d{R~YsV:-0䤴Қ¥iMYC!i0mn|y@iZrjpWHSvt&,-@hhb^RQHS"ڒRk61=_y^CKp;( _ Z 厴 J)OH.NM4bZQRx,4TեUoy:<QKcCvR[6wSD"c1ȘLr&9D"c1ȘLr&9jZV7S[updڭSSz|a|m1(g665i±iX\錡jXmYވdBE*d94ڗ|Z =fi̔9Z~(z]2]BNSS u\bhU*CS)u/QJ҇,eŇA@ֵN!>U g0SIqVqK+*JUg\yeAQ4`0p]!VJ YhQ{ t;FR2̿9q|+IDD*ȟչmzcQJ kBA^*LT7m&[eƑ3,- O8[)Z\"I32D)TBoJ)R8ҥy(@YBҤ ҤFT5|xVLtY)GJ@fX׻̣s/Np*R6鍺zcnۧ6鍺zcnۧ6鍺zcnۧ6銃a@lj#*oG" R /Tue C4>ь]uM)BBj/xBfd5LhK ~qxZU$(bVa|qԺ4>3mě.9T:ٸac{N F?(ަO ɗ#r?߯ruj9ӋJHYZ+-!fBK!WC%aN&]eRhR A Ȓm jU,rkiC'w)oF_)q#b#b#b#b#b#b#b#b#b#b#b#b#b o Rx%A2%F*׮;1%de1cJj*4N5Ÿܺm6ӾUȹ J3R"[]Wb~h=hc 0t(,)mZчgfywnqG +yO03LNL wfku77_m(AQ*Yʱ iOpòHRU\tufmXQ;Xymkolj `=$*3qg<}ԻrUu=u)PLrHCM;- V dbWq5i o&T7rB\R­Wƥ)C qN)]"ˬRB'Zޛw ' 4mZ Bh{V%bv[kQ%%<@)T+5 Y2aE-R9d1fݑJDI7IiWpmMjQlz%Y;(q  I 'uyAAVORe sbmMZ"7,^"$vHPVXXraiM @۩y q5 ZMRPa($jQݯ J>J4JԻ SB("AyMw%l*B4,[&tQ1Db;B20SX:dѠ4@ lpME)_zR!u*a;i0T*^Є*m Ib!֢L*y MDN5 HV7FFDJ OD8?EgC>:iSLЌIϻIW 7+5S 3.'7MIIK{+.eh[Z!@(+,>gMmV"ы"=eIE4Ybo4Dp6 d{-$$%.K5^ ;KJ+&"%88(ߥE(Zi8JDڦ޴ qflNn"J s4 VAs_\nH%<)*)Xkhߧ@ W*.Xt|Puңj)$^PK)j"&x_E_ˋTȍ~mpŠyR\Kj eA@Wi}a}a}a}a}a}a}a}a}a}a}a}a}a}a}a}a}a}@S<dBaRXKoGP렜7UǘN+ 43ϫ֯fbZ*Ɨo^JYrf%?ɸ,/hjp5t)=B (PB (PB (PB (PBH.(D-kӸ @J̡]Dr;cX=,Y@au햻._`bWݫL`p*3)L=iqCLe&y91:W}X/.|\ֺn@!XzZ`hc&D4ɾ@@9>o|hGmwgLkK%n=CI0'UNp9WAn\&PSl\($,]/="Xf4{5HI(zu)D&# vnImU>G ޗv۷nݻv۷n3AۜPz=b:b9TGhRˀ hX4o-Y ^sƋ{?֏{λw{u`GۭPg2{ڱ@5}wz2kR~bJ%tVhށ#b ?L"5ҍ8P΅t?TpweTzE)zJ;a̋˒MY^rkMN?wH9 5v SjC0Bbx ,\Al[k/-?A{%tSrЙlG^*:4}Q?E:g=0ղ\"䭠Q=PpvBQ l D:ѼT#&;m7[  F!=J2XaA tߖ.\j5 Lߛ 4neS]٫GcuU;F mgOc,nJy Hi4㺩33"ʆ9zR ْ 'GJ,l wj1LP%9cC)Mj<Ěn0TٙFsFX/4O=(*?4}L\GBdi>0@^rR-3A2 y0JL)N &H |#a"Y0"3&3D4aeO*- I֤scVҡAQQ""ç₵^2Tn!&"[0uNv>EYY4xn*)[ՋJZXg" :Z D'IՉPmRp+";/ն虍\@ * o)+\F؛P0\-F+n`h΀,jLֱ;PglFdII09t%X;F߼ch)J0fÊ!yuh:"RGLޔ r~& CV 9/tκ}NѷNTi䑳!pŊ.ѻAL qxdu4±` O{Yq$ƎWU}NѷtWҏ 7TrmAc :++媘:5܇O~?Z@Җ(hk<p!v$*)$`P6 "JK0L# X<855HԹJ jb@ jw__|o][i?_Sm9jũS%Y܄M "k ,I& 4Q wz gQ p!bQɂ(]~|Nv6ˏNMbsX,_26 C >ܸ o!;F߼Hj1P%7i D֑N "YjtH$z_(MJz)dЖR B Oy'xA!CHP\,VyUU=/bph3]kh"ȍ΋ʧv@:`A#fK Q \[Izwp'v~M}P^Z G柯6uhy%F c oQP3Գ&BHZ^3"A2RHj@ VHI L1tD(R&$C {)% ]jO=ЬHf>҈,r,(pB`N<~ẻf߄Ƌ*+wr*T(PB (PB (PB (PB (P>& {0ex`ZCW'%`=_MClj<>u&m!˰u0-P۱$7)6"8F$Xb0bsWps)ћ&9𦙁ƻe\͛9R:X +"mK1%* R5 Lܧ,ƒV: "TR^ҭ(%2Qh3dX &njj!A$pBxW`ipkA % Չ& z6KkER ZV. D:T#$ `V5DNJ EbdHdKZ&)Z ,-B D 8&&S3rr0+LyU1T #(qfb1L c zwݩ̧)@k"!̗D\f&,LRoV/44%gW#S&@QMhk EN02<\ĵ0 ,OQA-q' E)3 }ЕĨFNW*XC3y'B0"1vEsD,[E^'"ިE #KKmqiastkZ$.^4P-r?63@!14 02P"#5A$BCQ`%p  2ZJ§xH>-MZFr-9Ϩ݁Uecxoհfr.Tp.rZkoOׄXF3ZL2 pZJȉنD23lᱰ+ p[`,J:F?8GX;}odb#fpX|,9p0/;;=>aȘgZ/1>voTgXc`pxxU5+]^k¸v-Cd]AkJ[NBX%~5 ʕoKV%4w⦆!E.hyOT>Rҟb/9sc9I-/8d{XRM&'$pҧ`XױrGh.Ĺ!# cxd?0hrC-Z9hrC&A"p{r3c٦3C-Z9hrC-Z9hrC"=uS6C</7~#]ŨH[%V2YlM{>22222222}?&k?O8.,"ȸ.,"ȸ.,"ȸ.,"ȸ.,"ȸ.,"ȸ.,"ȸ.,"ȸ.,"Ȍ>Ԋ_sZf~5Yt4^ϱr?EP?OmB4N^}ǞajjeMI"S$~=Q{Hլ/jc9b{Vϡsu7$ @ q8 dlMV-.?$gcR=r-r-r-r-r-r-rM -i\GYPcceqlܬ ?=i5C-Z9hrfdҮ{Y~Z=Wa*6hhoe?}m&:z8ib#Ɗ\|ldRK\3 im ]xJԮJWL(w^^d{=I^ح9Ss&át)fIBHhCv4 AQ[#v>k%ٸ%oh5wE͒Q+doxEfY_^LZ:, f#dJSbtkmp|Vil;tJƗ3Aeo5[V|>}[Dž8<(j͸}9q/1dXVmZ zֶ ;[weUm'kBNg5Gw蟑-$vzLǙlVwv>vJN_d;YG;j#lxt5< ^vf5¡e*#\s9k?@+Åd:]{\94GA`' 5'Bsns׎G,DPp>Ľ453p7fz}C鹍w\3,wg/A/gcz}C WWۦvqewg,nv? Gw;gC|3GÙol4%ѱަ FoX}7cK-kPzprxf3v? Gw? (4'xKd{q 04%xXW2slї-"p;zi{;;tJ$,KcV>w(]kP$oAwrv? Gw>K .&z95ٓC?LO^^h]քbp$ hh?~@(} s`=4%?H xcs=4%+KZ,ᄹ< B{2鍐8(Avn=4%؝\sF&iҸ-"bi$s `hvRv? Gw>GKcK-kPKcK-kP)˛tA@; zi{;;tJ1H47vv? Gw;9re?~@(vv?^ĩUuV*X:_`|UuS-H#EKcK-kP|m 2ܶ_5;d٭P+CW vTt,ɞ Hj@ b,Sto!Z:ǃ}6Rv;6lX]~lv aĮ>7Gw;9sӝ_Vdw2U؊k$Zxr^xArk)!ղcXQrH#ŭ`k:q8y<|5qW \|5qW \|5qW \|5qW OU8~ % ɳXUffnL:a6k o7%$aFӤrУ;tJ=@/A͘[0]n i0a¸G):r|1ɤXyX-4XV,l8ֳ-2E>N~!K>Bz3F ϱI;tJ=@/XHt de1!/A֕HgK'NV eؗ<4a=-D±gLV#Mr?%kPmcͮgWA  c2tx!(ohmJ$t`bY`ռ*ى}O+W\º0u+a]s W\º0u+a]s -UyT1Wjc)9ĚƺĹ,S%$ŭM#e,I|E/%$"Ėb-Z1?E-,|Fy䉯+tVKrFwD\J% 3I:N!ttmKnVO_fgt4RVq/jcعb{V{V#Nph̗r-x'$ej/nY;l*zyZ4TY̏0GCgt4T7X^%,/ axK X^%,s_,db7xS1{[ĩMlK~[gЍ9 sIsAZ 3A|Um'G{zIwGt/nkH%H֫y՜o/A6f>1n.cqs\71n.cqsˆ $Ɗ-:e27H-5$6*IUZWOgt,Kg|Sd+K0duY[%YX,4ea=_d4GG{zI{)yxrjF=ZˑYRpdKhejؘ5}wx7Ky 5.Y^8i<12l+IG6ŭ?`#({d@mmW>%mm)7voZsz'wA9 6k\inc g2:ӻuLx, Zo5Dcp_r&I].<LDsZjq0LN_(fP!UUI]cZ.VmE$\6(rjW#H?/1-vqn"|L78m (-m%/V+sY89JƦw汝66*|(Ke.JOآ;_bQAآ ,c#`33ŧ0Bq^1z/\똽sb^1z/\똽sb^1z/\똽sb^1z/\똽sb^1z/GWO> Q!1@Aaq 0P"2RBbSr`p ?@Ǧum cVЇ-j7e[( F0&,:`% 5UϪib0; >?'㸸O<.#tZ?awT.61Nh *n*,BG DU*7S6: {*'#Mܘ8ݰ&0p `p& aX8 kSQF9ّqB 9-u튣#T 5-j; To滕Wrih*wq}_ĩۂo?mH^{wpAAAQ*84!(    !_}%nF#3m,'#/(D9L}S>TϪ`0sdnp(J%DQ(J%DQ(J%DQ(J%DQ(J%ܽO-5+5zJ})⭀;G.۔ݛ_O TpR7/62~TLPI(/٘nYc46TSgeN>ʝ};8*vqTSgeN>ʑP5%\_Q7605U sGXOOڣ^!yCYAA ܾ]ٞ,F&VZǝj5iR7/6C H^Pژ8@D X,h3&0p Lh]wfxxL }?(jQQ"îXnFGjn*Bi2 _L6k #Aos^ҟ;XsG0QyiF GvAȃdT/'2EDa<!^kOn}+ o#NNHvh"Q/;Fp B툔+ tb464 EVXvIs4k'GG[:*@'& 7}gL$*8NRF ۻj?)_!4uծm Ak荀^..&cYU I?6SOhTGԨQR>D}J*#TGԨQR>D}J*#L#:c?6# βuSN ?ÅKᵊZho-E*$ EVuN̦m0+mi*Uȹ橁ꙝ7 *}if8[$ڣ g*6$c[%k/9e3~`f̧m{DffiN1Toy;3JdD#شcZ3X?o_ģ܏j,dn*Bҷpհ!NFlٞTՄÚ` s*fFW6Zh."$Fdlכ`V@0@@ƫ# Bh.%Ց kA5l~upl7XZ$70**!f+|;yo5÷v_k|;yo5÷v_j4m=r(j7!0C[o!/(U"F.mXTR*EH#wfxiSSFj;δk#o} :!k<0GP@r( xIߙ,F&Z^A`Fbj0s9L}S>TϪ`0s9L}S>TϪ`0s9L}S>TϪ`0s9L}S>TϪ`0s9L}P˪1?734@ !1P"02A#$5BCQ`p*Q$e%/ٙ$E Sfr\qXX%RFVݵ`;+\:;u:eJJhR\#&OFNsYE~YʼnΡ4ם7Ы@QDҁ4_񞐼h[D䤒V/qh υ)&|b_)"m/`:S4' HFvƑ#F4i1cHƑ#F4Zi^)ٶfQ2qbD,HƑ#F4i1cHƑ#F4i1cHƐJ# B:(3)E9H$̮j 8iL6BA@q~@0smƦ)i^I3ѣMSc)le62MSc)le62MSc)le62MSc)le62MS`Iz5)ƾ-*I)= &XY=bHS*]+#h1pH1ۨy+  J/9KxRDDV/g NJ#bB!RYC6>;$G{yZ G,mA1/* >c.iGRR9999999)O4NKrSJBDB%-KdvTd46-I)/ f f f f f f f m\rj<3{HIJ ([Gc3cm3D 0“JSKJ-g"ܱ 2 > yƕc6’Z b^Dv9u;h:uːpF/#;\>޲;[RhzTqHflTAI)i%%iyE5N2̀%5S՚e B"FR!>wl-cv'è( g)PI6+$Avo@4S/b']MI[Gc3c/z GN#e4:u~H%XCqM/4=E$yɇTz$ #voC1[؊ IĐ6[f,q&EeYzd/^dv3:; )VN-(JJB,,-k钔B (?9R^ͽdv3:=vo@3/D"v&Ҿꦖ_޲;[;\ }^q>Be^Wgeo޲;[;\ }\Z} K #N; &^Gc3cgk j/B v2Pj#N]έe[?ehyέe[= /hJIfB?7gVDz-uHq.%зQXpg,%Gc3cgk nLdv2^Z$4$rT#j=ڼL{Y.0Kfm d)$XIhŒN#=zful{,rTzf3RdDԬG7gVDz-uHd"5}zful{,rT59 v-A]zful{,rTIY(lZ82A(QfJВQ2N&ӌ&2/&Y. uI+uX ^N(騬Q&Y.oYέe[?;fY.f%cd-M|Ҡde7gVDz-uHdF J?3Gc3cgk`_oYέe[>?x#~AٞS8rÔp3)9LgS8r|Sdw ޲;[;\ }AFe 0%!~}+ )v7dAOU&J|n%%s'i 2< 8x48CzfulvjZRWSH-y.NSRIISu |]Rٲp+ 3fĵ%̛5%iJRqw"HQAZk$(jKm->d*x繗(!YMDdFMDdFMDdFMDdFMDdFMDdFMDdFMDdB!Sq\:qiJp-R6S4>cVyTsT=(D4IUjyDQMSA+27Vbz>QTj>VН[>3HXXXXXXXuoYѓ"raԞL9*Sʜٸn|ҡTuDjԟ_ch (uÁt2ˉv$ UQsc1|K||BTJ"2;\ }fRҟ3z \_u–ͷҥa.#i4mj A1nzTK).(A[Zѕ J66"N-uHl(ɴ$w2ݱMVH%,a53qyi q$pqWiKᧆp"3p>8|p>8|?Kh?^h3!$f<ҕRZI.%PufBwu( |v?AEQEQ?NL+Ixgkd3 /BN÷pM l  ;a$Mj(Ä%SI4`a);Kwszful}me/K+iT?$). .~EpD`d3$ ' >!RB#;\t#-:MᶝJsJ_Yux7Tw7rҙL㋎8㋎8㋎8㋎8㋎8㋎8n2!q8inZ Y bq8)Rx)*TVpZ |7v;Q  1 ;ainxnZ̖Io`Nħ?G;dv.eN_r /9|A_r 72)¥:R'jbťe5Lm@ieJ 6th]'4sht飝G;wM6E4f#;\t#-MqREA&G}5R޲;Pa.7$> ) +TRqJqDqOI*@G X, 2!b"^;ainwl)Dńg"A%ĨR5R޲;ˋ? 0ÏML;t6wIWY&Ć ,M)+aߚB['wEF7 +OsdV$5MWf+`i0p <{ibc$"5+IN Cd ]niSuPv >9@u}!F9@}?fEJ(P?N^9z/@r^9z/@r^9z/@r^9z/@r^9z/@*{_Ced!AQ!1@Aa 0PRq"2BSb`r#3ps ?OOyN 7w8eGsF_Q]:LH+cj=~?-1A*G '{xGyx~#1RwwC" ]F'\-,s^}E;\}8n?ED+?fC>0T@1r}o`.'/ L3`xKk Ш*z4K@6Eb}IOXl_f {1<*fЍT'=%R:N& G^U#*וHq!.-`0005&7+IxGTkw *B~% 窶9Ì R9;E^Xnh7 7uPp@TP>@TP>@oM*M#:g{d>3݉nVk;h((? Gah$ pCxy*ONT]G Zx*W~R:d4L4}BIs;n4Ǫw=U+q]UJz$T#,&ynþ?#Bl1MRGV^cƉHfxHw̳v'8&k ΫS56UZ5*j{%8u([l3 3Yby*W<ϳ,&ynþn 6V*@Tg0&&F0sH"lT̆aX8@kA|=AGyaeP)g̱|Ι<T\Ho6 ֭YH ʠh35q%l#1t; ݆F45#jz \7zQ|jPGPM|+xT3*Ff*Y.M Jz P$'<7aд!>Тhl3v ,R '^CGaYgMt=QyneUi v~QM !<+3v ,&yneqa v}*žKwO#x,vϼm3v ,V d; ݇>Zd; ݇? GaYg33v ,بV!dP0B'5Cpf<Y6+tngf<YL; ݇? GaYgylS3v ,sn<7ag<7ag60`4_MED4_MED4_M*cyne#ب(QEjUDtO#lf ^A`JlU+Ca& Qp丹a / ߭xB!{9CTU!aM#詚B;gKhGo]kY4 bPy5I:Ur$Bv?_#axH8K# ^Q41ǴfYR**cy&F(@kU?[%8 -נ}Oak3;||{2j1(֍PFÚ4w扄Ud&Y<0 CnJܕ;*wrTSw%NJܕ;*wrTSw%NJ:63Gx(N(ŒnhB c}B *B ;Rjgzݣ~qS 3v{wALb'|G[ϹԳڥD)g^ˈ5-1<`"pOxOxOxOxOxOxOO2ķ1yHgߘ);2j?z7鞉M3 -TbF.TbF.TbF.TbF.TbF.TbF.Tb0p0k &_aC== 1z&zo^cO Tͼ*f3o L¦mS6xTΈLb;2j31f!ӻt<}9\m~1$=XL^w$ٞwhm]ݏ75=_2;2vGr&yÎwyaZoixZtx M)=mo= OXf8gr>=A>hjԓs&um~>R.6hM ZX]د%bVrcr~Cmv<S}b-6vByz:sJuz0F~rݒg63YB5)WlP\VF6_^:=8@`Ußd W\< uFܟ9Gy(FP=y7} 9O߆*%оIC{vW'gHonêh=N#mхW} ^ۆ![z"j|U[O~}t[+Y6D1QOɇgfޚuU]w[>@=śSgf Uifãg-]K+ Nu `ϣ\XEL"صgy离g_}2Vzo@J}ݣf~$c$ϬeY$O'|Snq"q6:4r9jiS|ϟ偗WݚpO 51ngBU>shrgvWfTOMoUsrgem>lPzmzn='sesջw7Bݧ$FF9<~Ȉ# wjc>F9c <@ouD;@tLB`:[Д% G8mcF-6"HuJLP&  @` 0JL`&0(% 0 @:,&SXN,6a貍xg9LK` # ES:;@0NfD&@N$U:}: jksY7٢PL f$u ` 7w_˭h;@~}rmĞmPmq69c,r5cŶPm!Y sB2m@ZmPN@ePmUj c<"dL2ίPm|JY#62[6ڀ*PVϸÀ??>籐__qga}|LJ=J@@OOvt v/ʣ}?Z}[0~zy5vu{ |۞u&y,{ni>7&zo\gkR);6ja6? 5tF"i@ -A4|}۷9=!ZvmW}^ Qj<|u۷𹬿>e_ݧ?&xY\/Q^ߥGx纷:|G߮Gqvy&6o3uhR6GH?.;\ZylCO4wx^ϧq@]BT1㏋G5hkrtcQ&}R>_36'^rG7˜;R듟`l!#?9gڭ>2϶K/ron@G?iS |'>ai{NR:<om:7.olv9l%XcCF& iŵXbe2Ę&57HӲ@F.& cWH (۰cC(LSS8qLֈD%`!  HH H"@  D$B@H"@H$ $HF "<ڸ<ھݤg;%@ 7pvWճVYkٯ~{n{ͫ.ͫ.c ᩀ.]SgDά#g1t Ow^;fZ4r;0jf gў Lr\+`:s*@1u}pr:| 9ioۻCn𝚸@f߶xOˢg^RF3riZx0vp9i w`r۲ѿ<0K {rՔ6h-??K e3l}ypYv-<n ?3 2 !"1345@P0#%C$ApDvo/&*^)>1X~'xu#6oCR/ggBf1bر |TUrn%g_ "}?wf!}Bg#O/&lxĎHOE,i"6vQ3%"Dtgq` 33pVi]qn<#m(rco)3'vfZb4(נ(J:318{RaTxyЋn?KğJ<+r0'&yHe;?pg"ۤQTY"Ř<)iKa≢ixj*{W?"484q4 =+,wӗo((ݸa/kQw-17a"B:$81chpO&RD8ͥGWc#SMގhPըŬ٣#fm)?sr/_(Ϩ|~7 `?&>< ~)0|DȂi3/.xF0E*}W7""IN;;4yHXP3uRx9hASyxsxϏ6'HL()}p}hyM&w`QAG<>hL=̱ߑ^&[W>}I*y"jCf!f||??|tأı,,*%R ^%G?Lg}OhH1͗ p^7nY&cԤgy1'vOg'\‹7$??xͿA_90Ƀ /R3acIQ1PGQlsqqJxFW[hGYF"N-SWUjVh/% 1Fyv3e)4b3$t(k9.o.Y4H A#ݓemnT~_4K% :p;2}2I˚2ĆYJŠ=$w㽯JM:,. Pss&h#Fbh4' r8MOD弚*[f{SXN  J.I*w&0&ADXR$9noyrٹB3Q Ak偬 A=ӻ7͸F; B=.`5" N(=Fes2{'8J*XD-B˧w]N2d!=7͸GW; D8tšΞ;ˋ-Jl[~>|7^>WdkV0+M8 ưhjUz+0HvrE/#a lx9 ـ=\sxJhc`,|2 XHdffn g"G٤$\mrzMxcY+91!(zʝ3Ssy=LD Xl|5݁`MBpsbwE? O^~@>3&;/+Bd,zKph* pp cϢσN ^U>i!_/>}.=8{е+ø)H-@XcUb8`K~tB5h0YL'1ڋ'+!rzh>K>4n(sL[*(]X8ldF:T@5kQvbT(jrxCorxXB&%G,aש+ ?&mXs%+T+EjUgQՀ"H½Bzx@Syi3B-!Xh!?#Q 2\~MNBVR1ԏf*S SaAgQ7l+%d\"cI1P$_fyV}~jD>qUyBGBPj\bf[8ÄVH$WZC- !(ZP\*nxǑ1bՅTѭVTɉ*Ҳ7&Iry?џ㩔@F˖&cDXYkZ%BW͖ǚʳǞJMrݚzI[caH"#zozdg8;?&g26Hq\YO℠p–cU1q#ڵvP{KbinG@q9RcJl'D$xF,ˋ|Bm]1*k+l06 [^ jpcn9hKo'k妳])չcq!)-ZXs'UsDmHNsfRaH(#͇iW6I23brmPKRD^8ڑ$ӈ&qV(>C#b9Iq!)-ǰ/(@R ~$qc9-r>/!R6ɱbIr_'6KٱRY2c1G'##W2Pc=^>#L,e ׷*8|#8Ck*X|k^:yUi4sRG9iie/cqlKytTrҧ&K&N:Trҧ&eUsRZE\`,jjٖvqoW-y&HEhG!#= طb+^٨|=ɒ̓mWo|Yjkԇ.ֽSkױO<l>]κVm֧ev*6,HB@vgn\Ijbt?+ZoعGnvk!>qr[Փܭt\隭3憝\+0lҸ߭g7P3䃑}<]*>.2'W@yW/Lw#c^9̓ͱw{V]o7L'U, " p|)Fy78~elo|8a9b`8 -K+kqV!Rֽ6%\،sJ1`)ZOkY\rqu-\rqu-\rqu-\rqu-\rqu-\rqu-\rqu-\rqu-\rqu-\rqu-\rqu-\rqu-\rqu-\rg _+XVaZµk +XVaZµk +XVaZµk +XVaZµk +XVaZµk +XVaZµk +. Q^GMg/}GyU5|?#}}Tw `8`6DE f#G0iۉ "*p δ .-dDP:06kע¾7vA#7Q^GMg/ KA5fL!RXvjX9E\a!]U؅ R]eXf<#xڔįZkGd1 P8Q~SYa#7Q^GMg/}GyU5rn. Ю Ю Ю Ю Ю Ю Ю Ю Ю Ю Ю Ю Ю Ю Ю Ю Ю Ю Ю Ю Ю Ю Ю Ю Dc#}}TPp\]qwp\]qwp\]qwp\]qwp\]qwp\]qwp\]qwp\]qwp\]qwp\]qwp\]qwp\]qwp\]qW#$o~8ӣUSY!$$lQy^[GYZ$X9zvcny;F3qJ0#Գrz"F % D$H20kG9UDR-s9W퓋Q*F?Lp5w|q]Jjs[9n[9n[9n[9n[9n[9n[9n[9n[9n[9n[9n[9n[9@,SY//5k%~>>k%P8+YB5LZST֕f!0uZU5f9+ [zrӀ]6 O]jrk6c* הimX+ujZ+ e!aF?yr"H=hN'b6%+#hd9B!L8֍Ќ@uMzv9UάƲĄ?ܫdUFl?W!'BOWːediX7Q^GMdcx7Q^GMdcx7Q^GMp0"lcɎ[&9lcɎ[&9lcɎ[&9lcɎ[&9lcɎ[&9lcɎ[&9lcɎ[&9lcɎ[&9lcɎ[&9lcɎ[&9lcɎ[&9lcɎ[&9lc"7Q^GM_Hynynynynynynynynynynynynynynynynynynynynynynynynyn:s%L}GyU5}_}GyU5}8לC"S#GL-xS-Ts9 "N[)E(2iSdA鞈|a =cא4:کVw j:Vj'!E0A&+SY/7%܉fh6.ijg؄F S!%d{—Ae>QJvcwB~>>k%~>>k%~>>jؤz {~ߺ~ߺ~ߺ~ߺ~ߺ~ߺ~ߺ~ߺ~ߺ~ߺ~ߺ~ߺ~e^4o|wm•CۜgGSqe*6Ro"4[1ٲ*`ȎѧYe+d dUlSj?4|(ժ>7Q^GO!1ga^2jЭňn;06QVā W13-fm c>b5g<6'pֱxq[N ,`׹^&6֬s!7Q^RqI9AnAnAnAnAnAnAnAnAn4VEn4VEn4VEn4VEn4VEn4VEn4VE;$E ‚(-‚(-‚(-‚(-‚(-‚(-‚(-‚(-‚(-‚{5~돞K.P(K% rB\.P(K% rB\.P(K% rB\.P(K% rB\.P(K% rB\.P(K% rB\.P(K% 4!{~;q L>N4?5D  !1"AQRSa #2@Pq03BCbrT$4cp ?܃eW Pwi*)qٕ ;;TgV7[>G-9j\jHKG%+?jloªZJy\veeqcRzaU1zT֕M<;JZ·LJ*oP8Îd'Gwݙl`;>IL LF| y[reO+=٪S1yNcЦ-vrRqvd'f+{(Bg]~.n[/*3k?U! s };YMaIS N |06u)!.r**h6TU-iY)Xq)]8yâ:ylQb 6pU ;d*F;.H{~G݄EG~aow$pm[u ;'r9aGBr;. ]Ԉ?07s;$pmGu?.yvn!tg QɏAㇶo)wo oMx5&891n{ڻ WM|_̡?0>~.>7Y1mЃ|ԛ,j4$NCǜnLy,X-w..̮hR6qW.NվWTWf]|hbelLsIn坻3]w..Wc jpR6p=.Nԏuh<d3j'@rzB=}rR_0yoρSz P 9;QuJqg!*kymG<>g<Jt #s\2xbpgh(sL~)/|Jv1ⶇ1e9ǠYSHpu^j֔x'x[E>oeP[H ]x*S(nj*y˱s?QwT̿KX#5SqUC hr0ˁحrIQq hNMtB罡0^ج-dsTL'5Sq7' MDBkmI/ғ^F`<^jxS=uTb&3At7b2fiރE*y U]-U3?:VnvyP+vdsMbe'CN'qo(yF7:<mΨ(Lb5}73_6 v9 Je;KǴ7 /9/h꟩0ʂ!32hbt" NE@i?£,2%\v8<5?6 ,\ qTd9ap*X1ge*vEp[ۗز\Xkr:[,R1ʺ.MDFeDPHXYm6W>';ƹγƿY>an|5/9qc0+ |GQs6K8msOGsL\!ml"⬩c}-7wowȏh}[>aPV=+ i0<D3_dKN,Zad̀?Ba^)O*EȲtWg r?댓=!Og_?k> ;1n|<_,&8tvg9C)Hk^82k&w:c'66@p۵5kys&8t}C{k!RG*HY莅L1+qIҩ#,g:mЩT;hT7 2[ L^U$aJsS0 =Bv]RK TMKc.MOz 7S;\c ~N*>,VT {gsĩ#TwnK0*vqqsvF-q4H悩:E$ ;#H0^-uI ,9% 4wrľ`$/{`9%O.a f!{9%O.a f;zRb.i-Т\rlQ1|;XJɺs?4s6Ha8O!쯁?2|c;=E:\Xv^Ԑ1.U\j5WUƪUq\j5WUƪUq\j5WUƪUq\j5WUƪUq\j5WUƪUq\j5WUƪUq\j5WUƪax'GNP:=Btz 'GNP:=Btz 'GNP:=Btz 'GNP:=Btz 'GNP:=Btz 'GNP:=Btz 'GNP@!x_ePpt/uߝJc9g[#\Tu-?򼖹,05,nX?;uS-NiҖ0IC@:`O@Nyf<?h(͊bE2j7_ec{) xFC' 6cshCmޜjK$.`;ts(3sfe{JDp0 E} ͹eBN suËdb"{-6 GI6UBb$nāк}+=/ǁ]a>_n< qhL*4U2hdTɢES&ML*4U2hdTɢES&ML*4U2hdTɢES&ML*4U2hdTɢES&ML*4U2hdTɢES&ML*4U2hdTɢES&ML*4\/ǁ@7GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGmsA_n< f8:_}ۏxXKq`yB u* ݗ!B󐠑!Dmd '(^;`#j=^$;lncXݦ(^;`-BFٹmT:ß/ǁ]<T͂d-T9HO12`# x9[B\ٛ{u6|Y&L,f.{ u@z^{3, 9%ŝ8Qx0;;t#qFZ֟Ht p#<Ň;6gdɄO Pg \6L|E-^TIg;mН9pt/wx#qqN6ަ{uԯsFN4oArnyZ^_־jy1fbL-G})/,•ks7Ow'f{Df8ȿ}+}^\Sx܅}ۏ\lRM&TnIU$۪mI6ꤛuRM&TnIU$۪mI6ꤛuRM&TnIU$۪mI6ꤛuRM&TnIU$۪mI6ꤛuRM&TnIU$۪mI6ꤛuRM&TnIU$۪mI6ꤛuRM&B0\&XY5z dah̕TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTM;xUV ?{[=+>KqWS78)[Qs,Bc^K؎e bs罍hة p83(X_5 LWv)[ |li2ztv`tW|ʤI.{-1t`zaO&2FR 1Jzn6L1ޭ{T,VM7\2S]ᴎɥ_n< pzM7 7 {U!q%(OŋS;+|U)X ͡Cŵqʤ kk3)K~|W`sb:!&w̩{.3ӗvSbv6 qk,grJT$Ùux:_}ۏt/u=_n< kDvQ;|TN*'oʉDvQ;|TN*'oʉDvQ;|TN*'oʉDvQ;|TN*'oʉDvQ;|TN*'oʉDvQ;|TN*'oʉD܇KqS\Uso*\ʮmW6yUͼUso*\ʮmW6yUͼUso*\ʮmW6yUͼUso*\ʮmW6yUͼUso*\ʮmW6yUͼUso*\ʮmW6yUͼUso*[</ǁ]Oix:_}ۏ9aM}Ӛx4T݊&Xȅg\pfM- q8H{Gy69rs )#,s8w=c"_4Z Vk~exxܝg@ؚK w%6 :xx2 6B):xl6v*?R'+?ݳ}/idt.-> d{t9l)m5,#FF+T]Ҧ/ΥŀwA;{%,Ef#򪎅iKi~'['oeT "u 묢nYZr; ֍u]-LV>TӼG5ռ"#Fe%A}{=YDcM0H?K~T h k3ǭSF?GOI  `4QNRw1tAJVì:)hXhwoij\Ui?%-.Z4\eT4cAJo((SF͐,Gv_`!P7{u()_ ֩o=YHۤTf ~0=yzT#J ޙ B}QEv \Cu%V7kW胨1Ud/ʐߣv|&Ϝ˱%o:wE#n"#I)↬.~jߚ qJeȻxΰo5Ը:.uW<AS>xTWgd/MF5H)zFf}-tRW|{Rͱ_ҋUnjPx$30.h jgU 3Ws'j>1vea4nx]AnĤE`G\ݿp ޏYШS n%;imZ*=)bW(CA ][Z5 ~kSN4{@:xM.d`b_-V(mA<5к|B )P ߕBh 4z'3H)7_kbW>Si ;ao ;] kb͚GB)z2 JY4_6|'|&?fܳ]{FxF&? #(IQ[W>yhv&j:rRS,z[kQnE߈86 $  Ne@3]~5/l:.YǽV uZQX|:VYxd r_X_#r]O~"%q:*A haSA8zK=/1; \KeK,|;}/\S_B G1ͪvvֆG7֏ :)4OFH n)aYNpU:^Ҫ?_8݉bF/Q׿.1,j^;(|2êJЕE>7E;+pקx:̴Ƀ,nhƦ,TKh: FN[2s1 # |%M]?3k}!i×xNCƉZ;.(b.fmA2tAL|RݗPJF&C jI`gm;+B1[So:imcT/&M]?3j}!k!gX ]#5L7t.E$' 3:MLjUZ9,qvshWF}4'(hW`#!buY:Cnt^"{ZMlyN2 r|& |DžIeޑiUZ&ӣ7ThEv:kpKؖaD6'eMtBTdCyXi3|qX=B.ᗙçHW##B ZOli_r֐z.~w/'yU.w!di2pm"a?of8֒K&B׽Bmx4tWKfd*{yA|K"QD]*);iX`Y&_MU[/sʔNӮ<ʖk [x?E V >5ajW>O&[Σ#%jh0t^I`n IEt^+RZ?9oתfӚPfj39wu]K竫b0W*<4Њmq" `Qwr.ժȥF(!/=,M-Zv/Do%+zunel:/9f..[Ho>W&i644UsR)O|K2/A\IYS!=%3ΎbswWc5zҳ+f -SOiKR[uz;M=>Iv|}:qU+a8zcjXCŨџIcʘRVHڷjE ӝz 0u*&u[O &,QJ'h|:B85x1ۣBl j]zBslGRN.IC{3U|ZPAN,m/=gWE.vRҩ^Ԝм~ {5U:Aڡ{56A]ڋF-&P\i( >O)q oe,[Z9^Wx|HxaMSwͭ@Wl=CWhNI>zCʿ[sKOܞmo4z\N='ࣧd%jl?VP&j=O,ؖ/o5mzMZ>>d'?OG"?A<1p|?ڌzTiptrl+HcOSJvW^9SzUCUV]9k2?3JR/(]%Ӂ#8E#0d ltӴ(279nss79nss79nss79nss79nss79nss79niׄݣn888888888888:vq{6m(QiFҍJ6m(QiFҍJ6m(QiFҍJ6m(QiFҍJ6m(QiFҍJ6m(QiFҍJ6S}Gܝ}GIK.Ug螊zpŌ/ xpB7( Xe6[m0z)44 g`.Kl0AkO7"}f/Xu;۴q{+Yˮ5^8s[ 5Yl%`P/=t u4@YvݒΪ-Mcp4މEnQSzCVauP!g hJ Tm0|n'F|n'F|nÃè㳎;8㳎;8㳎;8㳎;8㳎;8㳎;8㳎;8㳎;8㳎;8㳎;8㳎;8㳎;8㳇P۴qDhFP<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<= ->ݣ?8ݟ\%U$îk!ZsI4= +Yk^nm@Z^M'[ {x61ׄt P@f:&'' A3CR"(vv>wg|l+IP.nmdǍN::cdm L$I֟v,)SZGVE Ŵrj'\ uBkF3RNv[}:1&aqbTԧ6UWFq}|bĀlzt0Cdhw?dUt+۴q>$"7Ƴ6[K:wxz,jO$wDc&h󊆤Gݢ(#\zT(`v:}N 8'pOi>ӂ}N 8'pOi>ӂ}N 8'pOi>ӂ}N 8'pOi>ӂ}N 8'pOi>ӂ}N 8'pOi>ӂ}B8;~s+ GQOWYs߷hBb3m;.Xrd,u D h9}o8X"(W_T gKm/;d5x.F;T:d'hp(aE *Ɲ:wDJH~ہuf"tntaOwH"t]T@f-Na{*N]FeEܪl>.|N۴q/iy${Ubk%B]X-5WjZ?XQS0AIAS=sKN/Љx,c.M.4 kpr4M43cW30^,n 5J132ط@KRFv"xYxk: qM,ѢhaXi|N۴qN۴qN۴p6ri+ri+ri+ri+ri+ri+ri+ri+ri+ri+ri+ri+rio5 XX  fP0[0o 8lSO'vUt`޺7C|kEL։ |J%i:ח2ѩ|6i҄qm\i|Lv:8 :3H`ŝ{]`G.ZKX]uG_AZM4; kmipCdX:VBn*]ֻ@~{\aC(˚f׬}jGNjb *b˦gGS _id"Nq9Nq9Nq9Nq9pg &pg6pg &peNҦ tNq9Nq9Nq9Nq9Nq9#,-PoZ;?9> Nl8,ೂ 8,ೂ 8,ೂ 8,ೂ 8,ೂ 8,ೂ 8,ೂ 8,ೂ 8,ೂ 8,ೂ 8,ೂ 8,ೂ 8,ೂ 8,zѹS}50 QQ`֟@50"ϥ* _k,!1AQaqP 0@`p?T!+~"་HbmkKwK\zS|Vߔv%MX~gĂS[r%ܩ)n%V !8-OCO\#) M1_{YC)Gڱ-O[FU؏ijůYNM:^QSHe[g9`{\B2v!ūp@ iF6-7W XJC0.TBX& V뉇hC>~iX2PA{31cYYmz}jҿ /f @0WwN~0IaJ@ShPvҿƋi6~*Npy]Fi ׳Ds "XQGx8XwVpL ư(|A=ũUyhLimnH/uI7ʄR[["qPݑf}@:t6(i 8=Hu޷fZqEDPZ{B ڳC@)c{Lч[pe FY4A>atjAKp)D.Sì?nO3Y+)( kJ lJ줁@TPD@ 'jV"8&d~hrʝ޳tCRwzrzDŽJ%'> iտ*;.)erc 1,T2$+d5a8 ۞LB;Na{f<(q_%S%Hbɧ&cFkAQ V ƐK!M`8-eVuV#Е|p6!v*)ᕊ4X0m5 Qt9b .2pm~Rz=Ixt+ dSkf{,Y\}f!-XJOEVe*f*H;,@7嵼=7KsZJx!a- n/\ 8S!geܸcL@fZp!|RvVґ nR]Ƚf앥,HKiGyBSiճ|>nӜTIFfˡ詚DBg#mu@B7ྚ~;ûT/g%*{M+ k\ Mh\wodF^QΜy7awa%_ ahIxvE!MVs$0`)Zԣ +w߰ FՃLfs]]؃nej2bYaw!#l 4i7As)HS,z ӡG9% U22S->)HR~4:: Z) @M*J!STٮ#-m*ÊzJ8bëhUo8p6ς4k ͚# wQNcZEýumEǒ@QֶPhXQˀ`Ayd)nԛ5QM bu>sr.iyǤyIzS*@6WI B*=2?mDz4|odo!mO]]`J,kHbS[&t \TZ{Y56CB3hZ4՗9{Vوw ZӜJ ֬B+k*&JazWFQ"2ZۣQw4Xd]Ny>~EN;MuzS}ٞa,O!GSh6rF hPfX~49tV~[%snzYm4\:h&RțbZ-,RE,}30!CN2&dec$ٴ`5bXDNi<8+f#/O@?2͎WI-<^}ro zj:K)m3BL^5.~d9 1lA ݊sX\ ۴Jbcy#f"0A:ng+_%Q:6֙ʡH3n֜c hUAPQ/ "gQ$=wrqw̽ǰuUj`BĨNad?7:Rdb @+_[xؕagNZn}9Cr gDAkRӖV \2ZJK~`Xbm -lgRLGR+^?`;*8!ʊL9i/Kh,aA/6b;ợ=[E2U2n6xUJP)m/f3gQ&*c]=b@KfZp@";L8;hKg".LUVN4h4V*ձE@\Dұ@K7 uR$y":. r(134(/MLuWc7MqGA*дr03Dp:dm2}4fh4LXoQSPN<")C3!AvГ%Z$0?)o!u0:hXUo~rJ LQbD:r90Qgݏj _ 820:fLGP {E,iu8}eZBo&K\A*u! IYxиKDEgwWxo-K6~=ž.0ЙZҽB+Qo-p=_GN5r;q·-WAgc @7Z?M=21F1Khi3E9n("ڠPi#}a4l;.bmJ&nIAy@AA`ٝn֛3p٬mTk7!H +^[*11k .Е aYd_J F( %kxA$)`ZFBx G &\+aU5XC*gr![jZ$Qajgx#M>YZeQmP:0|N09]l@UumVJÐRHmH1`w]9b$T'(uz >XT"--ꠏ7 ,>`A _ 52,EM =J & e 6))q1TUYx0^iܝ/ZW& n`1 ieXCzH:LRXjW**/4nŊ08ޘNLсsORhh.zya]8-Z7 @TBymB*4ZV!d% 9ݕ}:O 3C2Fd-? $lK?Y4-tq M, ~cTݮZ:ñ s8oG0*5tiw wi޸ڿ }j/}N{" VngsɟUzcלAI2~&n wADz6 5? Fȣ}E,׌Н mz>%o|uX8G@ شjJY6\sU/S{ V#czf$>  Q~(zf5QOw$Ukq6K~Y/]@@&E?p{0;,z?wOFصYt)W?=<Ŝ(6G(!Fza4.M@٫嫍{-=z~K n2$tkJz@ K!7T8]&0k4e 'o?[O{ZֵkZֵkZֵkZֵkZֵkZֵM ~4hAxGywxGAAAAAAAAAAA _jiNt>'A: Nt'A: Nt'A: Nt'A: Nt'A: Nt'A: Nh~_>_r 8e &* .:*e.B6ų`h2;n)C">Al(8G2v0Dp&84mVQpq_藗].) pޙt)JUq>юZa@@۬F!z}CTe aR /bpNEO3<ϴ>O3<ϴ>O3<ϴ>O3<ϴ>O3<ϴ>O3<ϴ>O3<ϴ>O3<ϴ>O3<ϴ>O3<ϴ>O3<ϴ>O3<ϴM؜`bՄBMkڗ腪իVZjիVZjիVZjիVZjիVZjիVZjխ_z}0#뿥EȞUڗ^sؿu (`rpe-iMIJ+hr0NIYFf-f,z><wirM \󎛖ÀpC lkhUB:wZqUDɘoHC\_UaY՞*(]+PQa)cSaY!ܩ}9tA/_IZ,8Ag+H"X Si!u,s*WSv-ƈe w" b\r߬J6t+KM.J*B{+OhmHa=Oic>u~w&j1*c,VU3itbE@8}kƃQEV;сA^ZnKb+^_>^_W<{-<{-<{-<{-<{-<{-<{-<{-<{-<{-<{-<{-<ߴW.bM(YygywYygywYygywYygywYygywYygywYygywYygywYygywYygywYygywYygywYygybU*֋]O/__//띤,3Xi!ڂTF+2, Q^Zq k;xI\RE9t 2K(D4#Aء=)O,P"6a}19Md(/:J]L:PDDB-hT7E˦[ `,4 ZnE)JKiVC 6r~_y~_n_S.K\=.RUJ:΅i(2I3SMK#OjFgJlX 1$T &$(S0Ш9h,T\)}IaR` R)U,UEb1H/ܸp@]Eiܹ^e%nmAJ Avm1]B HAFE*6ѩH '^_֥VҎ%N*QJ8i_YG6q(GO//__//AAs:ׯ^zׯ^zׯ^zׯ^zׯ^zׯ^zU TӿZZБsEuB.kW ^u"UVFX;:ɖxJA83 z2+KHm @*E;Sw!4x)qo, ]԰&º(9X{IzRWz[SwGg_Wl2h@Uq/!Rg*]f R”`4ҀR=G-פ KШ]!~.’AG$7RSRJآ:]La A/Qfļ57n*&JflH(-bbMḀ̈ɀrtuz _AAA@E|N]|αm|NW_M=6?35PGOM|o4L~87]eD .y+AAAAA(,+P#k-k2U=k0 "9׈xIvAyivAyivAyivAyivAyivAyivAyivAyivAyivAyivAyivAyivAyinQ_EL^Q SSnP]}7P[n d5!123@P 0"#5%4A$BCDQp n]%~/T6]lMG>W]hѾBv_3NgWKY_J~;јYkMQW_}@ n]%~/He.&/ Z/o]\>)SuFeU#aHP ,+?fG)>FucR4H؋m\ŲAOr10+JD5[MK/q_LJ:e9-.+FlmMPEɵO7Uli­eVkSoWKoٿ~B GF%5 6ybi3zWQPЕ-æᥗ[]и.J?oR_ƫ.d"0/pYeo+Ǫ (%?mA_lwoSھH ±vرU jrǴOYc 6^n:XPI+H]%ZTͱ7c:Zr,|q7Wd{nJTO]Q$un,X*57c rS+2Q#õRw|u} SUDհ܋!jǰYD+QB۟:^9ulǬRhA cX/Jsەr[3;bX+\cV* 䱲5ʫ%7=G2놦b1QKհ0q˼*y:cPJ75W=q8d-aܺGUqϱfR+0G>lLj1hv@#$ #+"^"P5A{݅l JȭnN" 'ci쳎ʮȵ&NNcDX>F'4^< azu_8l;=}| '2j4ŧ#ƬSrV=m{ ?}q+j_Tr:溗J0/W)ՓM' 1RXȏgM'&cagoǶM&_Qe8_rS^PU OH)Z \.TK^bU6 {M>2VvyJczd{ kCL:ے.Uis2o#G6WUKыf=~0SOi +֞>jy>w;#P:`04􂕦+Y9Yh( U(0񬡯a`i-jWG -N5 e9SKH ^3!v}2y^7{WuYUʝEg2W6Gtp q311)i2y{{ce{XI5e9SKHM<׋]s1jdzcez23o;k5ESH>{Jl~3 OM2NWÉ N>7(Z X6]'2ZX c ̴h3- X .¡I̴b),]JaPZX1ֱ7G*C)̸h),UJqq`f\W(]'2̸1`2 V'2̸1`2_eZ.i].b/ .]˨*ݜq.b n2Cn8ٗ1.ݾ.eˮvqrmmKAٷd9 [2-.n5[2-fݐZtױnȴG5vÑiWY~}5b!zM ikqvO?iZs:M&?OMm3iLf6ͦm3iLf6ͦm3iLf6ͦm3iLf6ͦm3iLf6ͦm3iLf6ͦm?c_!?:($4$#)fg(-[ӹw2;mck=bOцHT*UHe$ژŁ!+1C(a^Vu')e(%BI1F1v/bO1uvp3gp;w 3gp;w 3gp;w 3gp;w 3gp;w 3gp;w 0NSIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIH뵈v7?, pHtԝu'@,]t린:V >𑮐=u#PH+j!G[c}AXTT4O:+BXT uCzoS.#ii$mQsXY?  > p>(H4mi}E>hM&Ѧ(A )~s>EUvzȾ&aZ̺FE &U <9RrIʓ'*NT9RrIʓ'*NT9RrIʓ'*NT9RrIʓ'*NT9RrIʓ'*NT9RrIʓ'*NT9RZFG*=[M Q1[yA0r2?'AdpY8,N 'dpY8,N 'dpY8,N 'dpY8,N 'dpY8,N 'dpY8,N 'd:};:Q>Ѹ?ѸpZ4}A;uP~5*ńMI"e mH:m&%0 c6T}允X%6mv+>Jزo}AЄm4lNT+5nԱX]tBQU~бzUiSqjNPh/}A;uP~NT;6N͓d;6N͓d;6N͓d;6N͓d;6N͓d;6N͓d;6N͓d;6N͓d;6N͓d;6N͓d;6N͓d;6N͓d;6N͑6 xq$Iē'N$I8q$Iē'N$I8q$Iē'N$I8q$Iē'N$I8q$Iē'N$I8q$Iē'N$I8q$}A?E1 -@V 5 bhͶ0QCNQW7cAg `Q?ѵnR ׮b]SSC@5 o : D;X@;u@wC>P:m'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm%<!1AQaq "R0@2BPbp#`r ?~pRsZMI.74AfW2"|Dzt`}p;! S+EH0u2/Gn#9:AL-n?Bq8=S-;昈 o27>cr0>NIDЦF{#X Oq [U+:<Ħ p`FlAqK,\Bu Y36.OO NGWz `Fl\qK*b02O.$db"Mٔ]JʽFjpFOs|k#pBL-\&bE?与)yH&ZdfYB sjdߚh7GM\>] qxD]Hz?^cG 7Ż:+H75#uvGiwFnUn叠uE,>7 D\==ѹwqFvGq\FĜӱ{iQhK~##w68n`;_'f(zopBa'!(Wbd0t{KdO§ #D$ȲlH,):tw[lwx rн۔.c:'I$@L"$2ځ~Á9W?qBva.;!xt[$xy& AE&fJ20wO)90!^ Bn tL&0M+E b1Iew6kx,+C{xr`T&ؘM^W[ b7F{n˼t.ܬX$$ fr& \MOtzo+Y}`v+oo.@A\ 74. ?-*+9/,=B ӡ1iPdarl+\Ml:{c{~עD8"`: @1ءNyct84YY6Js6IAØfT XiITH3N؍3j=c,:>A`ll")ܡ"5vtI :'Q:gbxٟ=={~עD8Q0} lPR%: , Tsȉ긆iNc}e%ڑLI>źc[:F=QVrD )"4qva; Vps51Lc%Mă:\$&'C05Oӆ=F%1EiwG 7lOjDA3$[#0y0^Jy`")H?L!ikj s`jGt)0@@Yw7 !X&&Bᆐ[ $vI5"ZiTRa:k.)kA`4La9A#6ߤ^z%11EZI#OjDA3$[#0y0^FX H iMdEdLuDD4R|14OruF$AKGg-q GjDṔlRTݩf`Ųo0s071xs lΞ.vzEߖ's~4١05h ܯ|qAsD 2[@$vH w %՚ۄjr;@QzEߖ'sM~48҇B4nW>ҟ%w޹|VwVw / I'5 q9 ,>$l'[nIs!1)jP% apBHpUp>KNdQ S)ڙKPE-!ߕ|-PEp3*>AxnKB& 7Z@ ƒ>$pZ[nIsALJi&(-FH-XE"9d)۩\PNʋNdǐCFCO.97*H8qBKd}-4(@AifA+014%q9p>?Qvs3[!q$f\& = p "%¿C`qD*| ZTCCyv>3j=8dT\`@1kb$Ըz"^r 7Z@0Q\[!q$f\&+x\3&!qp @.(Oj`yJ!!e?H;|Ӌr~;AmS k<1Lޘ)`Ҿ邿 oL0i_t_S k.׫yM&+@xU4S <hA_Lh |SE2 ;G7`r@FC 4ِFm#M6d=r@,|hLSD)Ʃ88qu0N4NgkiSղk'l|DϢOiOOiOOiOOk4Ο6d2ȉ9ea^\DI{+B`\Q\/V ȉeoVМd')) BqCAd'))BqC1)ƩN2Q8N2jDD)Ʃ:Gȸq%84:cTLӌkq%84:c N4uT* bUaZ?!(+Ю\(U *            !RJ*TRJ*TRJ*TRJ*TRJ*TRJ*T(QzfhV3EEeލ$,B6t_趚*QUYIFHA!D>tt):GÜ`1)uk2bi"B!D"B!D"B!D"B!D"B!D"B!D":Aih뾩 MaAAAAAAAAAAAAsyO) EFoFUhQ3:i(XVg5z&(j2G[iQk((((((((((((( @ @ @ @ @ @ (:.AU>z ~G^6BDޅRoXY?UbV!X%M9RJ*TRJ*TRJ*TRJ*TRJ*TRJ*TS6!4P 0123@%"#$5ApDQSL/.%R//Rѧ dm8U\'GD6FbQa|"lIb(q>/nãEՌqgf:LW<"e^k ?1ɛĪE%\:H٪D➤N.crcMV6wvjq0 hܘUɚL.(FLV7&$cbrf G:G!bm$!aҞ..r֚L!aqg hRz0։ɉ5XBRz;?,/bwgviB,H\t dբխ HEi(BYg^5!vm:t e-zu8@2ǫRj6lxҞ,jMm[xҞO?f=ZAg,3cթ4y`gd'M-~߻r̽%ge,qS5oݗ;&mG/, ]I_`v-ԝEb]E6pH`!ơ; oW#!ܤ9F {"pa"y:Ny:Ny:Ny:Ny:Ny:Ny:Ny:Ny:Ny:Ny:Ny:Ny:np xN0: .q;]ʳ)a(K HXHE ZE#--.ƶ3K ǓvU^ uI;l_'r7 ׼vcg)Ȟ3b' ,;4$*aq2IPwGy3"v#gSws^O #;~@L 5{J4c~L$N(d3n/+a\=:?Szu.~p172\<ˇp.e̸y2\<ˇp.e̸y2\<ˇp.e̸y2\<ˇp.e̸y2\<ˇp.e̸y2\<ˇp.e̸y2Ubx/?;!1QAaq"P 02@RBb#rp ?:Fai''5&̭ BЇF\ꃘ 푈i<``#$-+y<- 21o)Z0A}kҫBi|W@3ZF4s I1p'5&UM"FNA̛LD;I``# -+G0<Z7<;5 h"Nr/p9(4HZf- +Jgѷl8-<JGa[]~/6 ~ uBA TѲ+l*H]ayG~cz(mB֞Oh ZTѬM)qlHð@=>Qf8 Y?#BԣPmQ&h%H;i;oZAhER-pUN}R+lNqq#QЍ(XtN67DGW{5`bNiڜì0D7^""?M6ߟo~#pγDHFN6{ mDZ Li!I3\ 31 " ^oBݱ5GLnQBȢ~:5fO7Z-z,&A:{T$lnɻwbBGRW;ucuÛ LhX i,6Iv2OgޣB;\ށ^z5~>e#jx fpN!V"6b W$@B؝-)m{/&B;1~!~#[W\먈$[ؐAF`KEL'ZL@Z@m݂2Oe!]v#~_Wz+^$i1 etwEIlqfN)/'NՑ,o3/LMZ9[%-(Ij 8|Z@ŭ Z@9дTV-/l3exx-h&9MM-$LcGq蠏&&=8! WZ֚ɽb2SKv9-p`-xi[@w`]%֦gղ8zk r%EM$&2 AӁa qŤ ZڀŤ-HX/1IsC -ku&Mu{/eniQ4(Q]}n5g*Oc ]$\$-O@q-c\EuFg(A$4ߪ`phλB-OSe mU>LA`3- i d&vو,j"FRa<΄42.oiVtAΡGq"ۀތ^uCeN(-*aЦj|v7WX A.ia )[-0K=ff ;MM% "V].$IrKƖU?Ysr<;o='U#q +JoGUJ;ҐXr D@6j3E,:0%f[!f-+K71jA;IRYEٶKIqh 9ĊUT:Vc{Ǫt(MGBOEJN/H {6@23.6j -4Zu2"$M"GEvUi\5 PhX$VkOo|WE;P[g2Z;S˛"Fհ;wWAShLWeMHM%i\D/ i=3h&x/ɈGt;}چ-SEj8R#ja 7 Z"ւ*iN?e-]&$謗SZB"; t7+duiq e 3\?mՈޟ y"S!{MM@!A۲`(Fz#O|- PgU 9@OѸ \Zfba^6hAn*B#e~>g"^O\M]d;̭$XMN&~C \@đ)&C6Yz0 #~B5G"Z0=V mz p&"h +42V"j%[_&Kn&-p[j25M+^*8ZZզDa9Yp -+M6E@"i/&hnihL^4\94!04n|:)oMAgD@$L)P5͘A1VA"o59`w H̚l>J4E+F^ 47H5 Z` >BF0Jn H"=Ti02%}X9Y縍R=nN<}ܞ}O4rq<$Ow'}ܞ}O4rq<ȓo`N5S%MhkJ&'')iq ƻqĢ{ i 7+ġCN% 7+ġn8,PPyBJ-MhhD**!4Q6Ej*GO)4爯m|=~}HFD"!jDB!Hġn8(7*7-PPUyByBySMZ7iPU MMMMMMMMMMMMMM؛T&؛T&؛TT&؛T Mܚ#h)DpMܚ%4Wrh DM܄B!D"B!D"B!D"B!D"B!D"B!D"Gt#jPe$g`:Q@ZBf#-&lN՝T^eF(f5fw\<:wxÌ'SqN)8SqN)8SqN)8SqN)8SqN)8SqN)8xu?T~ P3@g?T~ P3@g?T~ P3@g?T~ P3@g?T~ P3@g?T~ P3@g?Uxv'p{E,@Bi ˰A)JPZd(BؚOe÷u찔^F xlQꆩp2V޳D*"f6a؛yqHu&"fD榢ڬhoTDm$"iL"LGǟ;'lW״DID!()Sމ?zlN37(؜qv|3w_+P|,I Âa0pL8& Âa0pL8& Âa0pL8& Âa0pL8& Âa0pL8& Âa0pL8& lA*w'hxO <'xO <'xO <'xO <'xO <'xO <'2wܚBhV -(E6 tMA$ ɴ @LRuD&ز/MF%0Mq$1M3)kQ0=y+%h[ ȥE]]vZ6ɪo6׌l me-؄ffCT jnhN5B̈M$d3!?e&m.N 3(fP̡C2e 3(fP̡C2e 3(fP̡C2e 3(fP̡C2e 3(fP̡C2e 3(fP̡C2e 3*0qO8S)yh?!Jt6@P)PFk_; 6yCBS$GG9\7ls. {1WlEO]rϦ}K#k+Yn(Gl[(Z  fɀ+Ɩuik+iou z k+WVm*Լobו2Gædnt~_32o;,-آ^ڞDp y\T-S{7oqZb{G[#r[ }@2YZcYf99yx ^sᇴt/91Xrd<|csxgGfq7Cހkϐu{f?Oؾp+o7?zun&@kύfŸV-bƿͨkl #Lxs8S{Oޠkύc \^k|~|b`dw‘%tl`5px$ߗ>з0ZfO_4gߝg7tvñו> ?#^,-٩pov}:+V\[ްm~ c+_l| mp׵`Jn`K$ c+\ӧ^\ӷ~;Zf/!7;Fñן$LO_L,^Gq۽zǟ~:yuϟ^ybXkϓtmEGTԿT$MC, >{^|m33Rq޵kr[֙1}ofƓx;.<,4n* c>O1rsc:`e1s}wRO'\\ldO^W9q" ]Ըs)q`5pyP v^}anK+9S|uXvݽ`5psO~3^簵ܞ8@ZfY:sA=G:}˨65vfRߕیp-wRߴ#:iGeakM#%:K#.;?a/fٝq2yĐ##7`l; y{۷wӀ Կy7gh_%POmYñן*[xYrQ2`ٛ^j_2s{t^랠g}հ`5恠Fˍ͍鏟Ő*j_k2X^X#YQ>y]v0x:xe2H1 <$Nn*^j^@Yd_lHP޵ݚ*Z7蝻XR:[l7/$`7lfzvγ3dUz^jU* fgs{^@<@=`ʭSUCH{DwV\󋞫Zkj'ZdÛګݡlU[T6F}fVmU׵ڒrP~s2vL+bZ ‘|z;K6{Uz[5jϬZj9z=;VHY*֨ukU^˅7UW/6hc:Km5?E@}(>oP>oP/ &`<O鿊5@ Ϣu3@}'3cnZ`MQ}':MP@\-@ vx6,{{/@66mYڳ5&рkp<=P,Sc2b)WdڪJz72К[BԴE3π΀ 00ͻ/< 0|-P@tڲQjՙɠyP>nRfjdz&P~;\P}6S&)3z*'y`}~ &0o[5-K-K0@_#yۼ@ &&VLmZYE(euw-YE(e՞&"@G:юDL8XCXN<Ե@:J2c-e\;Q c̥tnū(fkjК8CEdZVoTpni uٮЀA8T ۼ:5k :77xt6 mj"Ք^Vu`6ՙ5EhVU: &w3!6&tn^i)|9p{-5fJ`ʮ倯:} DbrVps>(" |[ީޫR,-K+zY[ާ(L`m@/1OE@ .hv6,POE@ >` jj<<нH"@<usty?z~~Of<нH ft @x'/Ry=zdG4@t 8ހiiv&6SlcmSBڦJ^ 'm̶y9u<;sn;cg_o A>`L( q"=nֲm `5 v("Y46t|PѐdƝur~ӬgMDڟ% f4GE |ORfOfZXd w]՘FveQmdtv'2 $dۤʷcgY*^+NKč(WiJ*БTBʐ#YtAc4"+AH9%Y*ZBb_AtAtA$LAtAtAtAtAtAt֎„3:3:3""9??ha| ŌcU|Qi`go" ݪ~ !ԗjVSycnYj ´"Q&]B@2#"F'B-,*-,Mу;QE@7ʴ[a3Bݤv!->Q)*ytmt!y[6F0N#6VR㩭a}9rߗ__a)c58ߩ꟟MN:'ҏTDXml)ID1jWRwMN6*R߮MQxbG[˪!O z;Tw]N6(RwԗTB8k" 5 X%ۂ9r &QS*&VAPDYOI J ,: j+M.yN5U)1spODܻR!k__e*Z?U9&3@2*lN$dK٧txJ>i/nLӧҊ3vuu$Llβ`Ga9lt03ދwdODe?͚DQ2gNhIx328zx⳱.D9uy9!1) z<3.-#ĞdA04+̓ƤlF%?̈́PBQN&Wo=.>f_]/c'[~3K&.|1xQ>ovgK7wՓKWǥD˥\}N=,wՓK1Wǥ},rM/GX݌Fe}Xj>uHov7 zN7@VZ.]G>9uSd)upu]ՀwJ "Q_,]G>˨sQvB澣{^JȕB?pPfWPE'mͫtt| (p τ9#_/]WG՞| * yܑ5PdbO->{&ԑX&c⾾#`3QdtEuc;%.V`}}$R4?u$p"0GO5o\dKd"::fyjUUlM-.l}ŕi5?Ly ݨM*J {\޴2V9y; vN c v^IcRFOTYw ٧ Jk *H `8\wH[ -FI?ER+KbJ?i")K&G!`Uajgmct8 l^cZ[{ZtxREMZz܎O4聹eQ,ܺǩ-7VA[uqڑ HG.`8yxӫBEu W4mR}QܐO00Jr-qYL :k:U;X|Wh3օn,mZV6\) +'wN8EbH.ű+7jN9&{x6=Af/(EbT$V/N PY1SGbک{h$6/N PYGOs"j;1֍wPI1j(Y>WR@9^<>TgY6jWƣWvǰL5UzwZ*83vnttRᔀOšsJ@3kƊ` *GejOE퉷oص.>TN '_TSrڗ+I8V+=HƲӸ*Бy ٨Vo\c-"+h$OS^`pݞTy1HcG5clNq(:rԗM;ΎCfQJMC#^!B8خsnuhrJ]Jril>4 AIK*yƟOXzvzzѴ2iZd#)JNLc:ֲF4x| &cgl5 lQڱWqdqƪ~9lx7>WHڪ泖jI*kN8IXn^\h-EtEl_Jre%c%Qչw*W+9xZݽSVB#:蓃VW:رF͒UdQQ/k>?~oSG,lvz2sXkn3Cdh-/_*8"kQ8[9B5۩d,\$jld <,+ƔCÔkam,jsTrnOp?oTmQ5vbS7Ei+$Z6ń 9dDm py$|K3__dU%-.O+.Cǫ"Q*êl4I֥)Ҹ{z ܇Dh,LGWVXs)Ki ePЎ!ijLBMڞ<E4=ߓ!Xf,s>i&w7H5i6y|#,5ao_ k@pJ0FmlUCta9T:GEK_tQ`D_OgO)#7noÊ/ۊ'_b&qDEK\qC5v$[\bɡFb2= ^FM5(fsޭ$g>hb2Xƥ #i#vFK>T+<7kFI<'S[d/J 7{vI,dĜ"UU^*{v=|k7$V%U#6'6$xE#H!/|ʜ$Y"Q+dW/r5Dv} tUOl}U~誟l}ʪtUOnw>eU_9_쪫dYx?6jyIl?aRe9$"B3*V^p+Zǎ4=esy,UP[]P *#HP2+;IX瑖d?V:"%[HX]Ba1* Kaox1\ՙHJK, [[,H-IP׺Z#kѪ-6CHfc0:\LVs1hh]$>vu-`?͚4I/¬`ž9hB=|㢅p^wrO1QڃNa!F#╗a RMϮL:8 Y!B`Vk/_*ѹ\X H[Rk^d=q`sFVA'(;"94̈!,90َ^xJr+)#En eıLMtuJ5='QG4ǣ"b]Ren/ ě^>)"v!On#\$St4(d!Ng1k[ Be >7 Vs1{ݵ<>XǞo;kBs1GldNm{EK\2 (5vc]כ :MѨ"F%vH#ҕdžIYaw0 v &e]ΐ<2JH6e=vg1!N^ %i":oq`~ opDSU~pRf{ʾ'h,ΆfL02D^)=x?7GbAlsFUڛٻf,c?S( H)P;J }LM}EK_ml'o 6D+QW{>5ɹ{ث73v̪morbqybVTTqo{[=x?7c}t,gUpU19&qitsxV Uq65*A+}Y+>ْ+gȾ|?'7G[୆^K6R¯˧~礕Nh\ڝԯ9(dZix?7ݩR9>*"hG&=⋝(-0Ó1/qr>mLܑ*QW1x#/Z*ɨ7rU_ֱ8zOr2fs#/_*ڀ%X\I\$5}ߨ6oJ;ɨ+cG+dRW53lߌuvՓPPVȜUu%z/ ub3v3Q=VMC[zqUԕ1nQW=MEZM'TF3%櫛xq#tr1|8/kTVձ__dU˘+'c#WlIrH10VN*N{w#*/FkdY`:Cc9QEPb9W;kpL"|N1#*h5sHk9{¼R`iFJ6Igh:lM\0ܲdqrWÂjWr$zW/rUl$LdrrywC%S1L,zr[",dݰ+Ih̊3!aKӺ;z$F+GVYP@1DvʸOfs9gRd  4d0Z1b좀8%w^46pm2Noڮ8Sk;0lMkQrk̯/_*Ӛ72ղKnQ $I㨣lzlc%ilY$I k$W:h|qa;|eF~5:dOe<%L VVtCD>tCD>tCcgC˹z!:!(1d oD>tCD>tCD>tCD>tCD>tCD>tC w؛*=ѫc#Kr*WY-Jdk4XŖqI8=9U7E$OFS$5d,Ec&LHp> & 2HHX! \͎Aֺ.qSBlY`Mu] Ϸ_G4hDP>D_m.\N=5[Ʀoֹ2 6I RqAzyCmp8xzx9k/-G,Jv,}`rM nd+bcdYv隫 <5@opM3O(U EUR%0 Yf4m8:߾Yҭg8$t@쉑EXR̤ MMhk@NR^~k;Yg24rJ` q, 9 )kZjmo#jIk\!l5d4:+ `pz' bEh$EIQ4&c/Px Y':Td0Ƅ5ԘXca4zz&BE.A jU&tct==?I5 U\HDS՗2Pn/ORd0i]+(zIg':*kyVy ap#z chCKT4@Va7Pp Xv41ֳԘhcX^^^^^J&H^^^^^^^^^^^^BQ$JBBBBBB}cDQŠ k]i6y10piÛQ俕cH뎣 -\갾mtẍSbdQɝ$Rd*Q42(JT)6)2iNuՂKSF,pRE(4j*?A8H|HW$Vh l{M:ʊ tj$u&V֜nAo9*܊+BJjמ09."uu^asN4b TtMTV1Ul:5ʉXyp\a4jQ0wj\ֈQs*H9rEPQ5R'yYQYEhS[l\mS+4*ݟ7}a͉Zەor$%z֑*UJIR%ʻ$%z֑*V'I%ʻ9"Oʿyiʛ-ZĞ]'Zj<%vܴeO>KvrD]XUHTZ.i>(w:W5E  !bjJbGm UR(FEPD剰RiG+!5SRW<JM4jz )pҩEzAϬ9ahSL /(h0x,+uykXш^ 'M#*$+VKX+TD4HF̣8_ 'Tq=FiUWQPރ8Ϭ9RTWURU$Ԫ:JV'IUD Qw&}s,,L:*J@$Iz_} p}$h$`M5c"<&e5Ǝ6%X3 KWs/g3}dQh_[: EDعү4۬$]uAD"r4%U%I[I#M9} Qp>hڹ*HNziesMe]JHdid' ,gnI&$ITʱ>L2󍨩(Lؔujk N u{7(KmR.qb]jWFSnuE))\\8i&)S$urC-*95e]qEG="56o+n&e?U1&.#9Mm) @&-9#襫_l5S^Exey/4ZDUĵ'..q/EmrDE,KTgO`T=I@9VFs}9EQV+A{|07K159s C`:ӭ_K6 D#7Mew:Z'ҋ^'HW|>S մ_U^XN oqnqF5#NS8Ӵjm+לi!D͝'k8CƙQ1cǘUFL^q&7=ѦqTYoq cM"wHӸ[B4Z{ wƝ^qpM`8Iƙ!oq;MZ91%_ti;{4{1]-42*Q{ q[>l=D$13) ҃W!t¢*'J ;^z) +ң?H"0~) +%gjA чAHQQH40h_xa8u#gu8$"+chEJO"_sc{B 4TxYz'+\v0nM W X$Cm)&70UW8'*F@\\-HtS6źlOXH6v WDҨ7V\ FΔ+ FE?=٦6IjTZMM^IunT0 W[Rp. §nh}!fqNu"q\)`$-&G ˶dx&Y P*)'41PFVۿ|=H2$L]K]"eU<_u5Dڦjֵ]qR Mv$[{ :S2,,u}Rllnf̮uKUˋ ͡U\#յVB 2V$D^)[L4Av']mE--U"nu%We&i\&ֆ껉L,H.43d`q8c-phA*9/4Ӏ<L(kMΑ*' JT bRQNae!%rF,9UBE-y\yDrVeH5KU}0qؗEUJUbWU"P8%D]>J*JtĶ*Jg݄1-J*J'g݄(;:b[UbY:-%oXvQ+ڪ*&BDG;7\!ʦ%=p[P&¢&j 4T*e ^c9WdpHlʛ90 YpC%V$2E&XP:b\=4v%P|A`5-DLp!]I!Fq`]3Rc|&;J4cһ؊ն4{xN"\-USe^M| w+.LAmM|./o|]LL?4 b3L/FZJb.+j؈L:"ʢKgKO4ڶoQye$]zQrӖҊWR\֑5kSó" Ɣ3@s(7.e 1-$3s!*%IKU"\qiUlBds]B M;%,2,JٹfQ"}ќm %cG, =tbN.GD'\MϝJ@& Nc_. ߣw.l*ĕ٢5'̪u YR1S?Ò(L: &/o||蘕UV:X"YK~Pִ;Lת I ŨCDFUPH IdJh ;f.PV.5K_q/2䇌8_MZud>^#[EPZj2M. l.,e˸[ [ [ [D)Smriu!^LRA5ֹD!:TQz2>H4(CHy-bdA6&zD*CKXƐhVBHxS"-w665+tHuSaaˠҐZʝKb:RUXySURR<ЅBRץaD؅ZÊKUsX]X]X]X]X:Qky3<%ԑcGق %z8+Gl$UU[x8r"uw iQMCUl7pq>. h|cEslUJ$7x7xvؗT *zg$l9vn2)hQ">[uTJDS8cENXH"jAN؝y w!a#-"׾4ҩAoa4Ez#H&"WPZX|m+VUNsEΰfOb\cO@։y jHmS"\6C*0үRCj .JPa!NzSR%ʛm *u2MNH*k:TԆUi ur,K)Z^HeRRR6yUXġS%KĩvV% %,JdJR1(],_bP"T JBb ibP"XġvV%K(\-<+Xy'#%}.B{}hf뉖kJvW8q5\:1i z1=#%9JjXq`=Y{`z 6Jp% `sSpu$o5ҡ=^,K1Vu5#-zaU xYpnR$ TZ i\8΍3B^ZLITDŗuf~+ Kܠ\' Wa"MtJsc9O3ryϪ*r,H ahlZ!V%V[hޭ9ְܰ!WA˃\ Q/=/uL,_ǝnym6T;7L|2$zjwJѵ_[eo67*W n$K DN( JĬP Q*%iK2]cnP sE(u7L ^O/gW=i .HIƩ ',(ʜDPܺĺJd6 SRhLhqsBԔ{}UTBAfQU}?W8RN%_0:`OA$!%C@I )/H=Nê0Jx`,l9%[V~͹Z8F G \(0'QR(዗sF K8aA.N%_0A z9’t*чiU 7669FZBnDZTp}rF#wn*JjT%+T?$Qu*6.JDU$0HU!4rpĺH*&AZz׹y*bDĦ؞?Gd)ϭJqK}mv TY%Eil!ŭ׺IkZ,ӇNXh}.n D2*^n%J3&QJExR ꮼ }vF# $W8 pKb}_Mq.c&q?GedCFcI nouܲ r6ȑ-*7cJi$*+t=Y'$M)8LP䔲.E{VNg ]5]tm8Hzw665w5\P1ň5mCUHeE6ĕĮb%})DunrFޢF$W9HFu\hh K^8 XMF"j%ui=5 j+k>cr:2No.YW|(i0@\ni}z6Dڶ삾UJj<ᛮ8[KolNL5$ڲ-qNg]aiawuA2SDi7.@Zũ.۵Ӄ]P"bחq8&6ٖkyp6Vr-CixbJ-&fn) _| #x8mL`hx%ʉD.T$UU>o8Nrfut`OCprD#KJ&^&˺%?[[V*K]EU۪az`Z/Zv\nP%j5Qs@ EJ+4jz<0&2X@(*gZ­-E2R]bpG*jɫ'z7&!rU\J)pd"$Fjwy#F5smVԉBI5IJRۏ50R&*UXj^ hX֑$"CD2]fy5 U#F5ƫnߜh,*J j0bHH22 XzcF5VÊү4p!ED YmĀD[s6*hX$DH$eJ\y1Umi83a4Z٪0!jHA!42.1kbV4`C^i53UlhІqR-bU^,HH\5h9Uh*uR.IXqNpa8O-Tġ*rW2TEPsbqc1@UAm ۜ IUn! -aaS%g t2smSc5ΔQRKǺhG5 ai,Ll )J8kP1JxթrY V ~Q#4e +i*k"%^1%) A.՚R l>g(JF\JČ$e"Fbd^DXzʸw$I=TK䫍zDhy$5Xл)3?ؒzql3_HTt(~̔pOpuK4lI=g߳U+3?ؒ{X$HU|(IOI=ؒH{bIKU%K#39bI7*n'rD|䣍zDEm[d]TXzKe"Fg{OZc"HTԺEO'JGN'J$Hi'u4>D$\9cs"KUn|E$4D x"Fg{OYwHē,X$HU8 mtbI7ƾX~Q#3'=Ve"Fbd^O'wqHzyWNڴy$5,h]lI=G8K$f*KDIO'G >Q$5Ė6i3?ؒzcfD$Ö7>I/Ե_&'$_MOI=ؒ4lI=ijR$fsZ{bIҩ,n|$+%m;$f*+j"'ēַƺXĻ)3?J$ݤI:B8(Ttƕ>*}T4iSҧ>O|#J`F>*}T4iSҧ>O|#J`F>*}T4iSҧ>O|#J`F>*}T4iSҧ>O|#J`F>*}T4iSҧ>O|#J`F>*}T4iSҧ>O|#J`F>*}T4iSҧ>O|#J`F>*}T4iSҧ>O|#J`F>*}T4iSҧ>O|#J`DʝMϰOy _S\yѢZQD3мB$+-%*,_ _PG^HBf-ys? '<ܖRIė?_bKI|c/~1%$1ė?_bKI|c/~1%$1ė?_bKI|c/~1%$1ė?_bKI|c/~1%$1ė?_bKI|c/~1%$1ė?_bKI|c/~1%$1ė?_bKI|c/~1%$1ė?_bKI|c/~1%$1ė?_bKI|c/~1%$16޵.,!1AQaq 0@P`p?2(! !]9lR򁡀\c7IYs;Ak) kk4XmrpqeZү~zW Ŭl&  XT" hἰ ]QmMw7 rmȸ*76laWR9@vljQH`B'FZA0Ut#6:d]тFo$V4#$)* /O͛pRyC5"Z%hvuXzFBgΓ@ZʖsC©szsh\UK^!,-a/%etǂ-zăTPX.c51aZ a#6N뀜`(@Qc?#á]WDYfHD 6@+>0XiXl#-¨ 4CCGȣӘD6>H4Yqθi`:Şl Hb_fU. @Y K&~޵x8NPgDT5F!6WK6O eJV70} >$xEѧP+;PQ@3;t xby|\^@d]~1E"#!Xk\@XkOb ;׼);8#[VUTԷi@/+BShX۲3]m QZCtP5n &C n'eM6y 7$:(|9Ѻ8م-Ti M>d6%d@&"i7a > ReXTT 2p80m6VJaPԊwVNm{PB Z 2< 4J{iWYCuT9ۇ&8}b?Pq`%9΀`2 1Haw!NV&@NB-8 Qy2 f"`|pi/wmhq tT@$sD|*? fs?i(*7aS [A:2&A!Lv 5ڼJ~]/&@wCg.<#@A6eoBdP:f:3C Zj <0I%[dU:( C2Z h o(6KlRt+|Wmbe5 &f&D$?`.P1i&L!KMPrM0ޡ09D8}D]-)݋+w]E3T[‘:Ǽ3q@EEl3f˛l8&Qн@lF>\S|ʧɮ&bYFnio]7(`рјbw4ֆD17 :w@1CM#(EBWrBM_$irIr@}z' l hB?FHTB AK_91g8x :LI Q:?.zxʧDKOZ~_%e_.A Su@C]>8^"P&CRc\Yٙ^2/t0r>Rq{͜Ƚ;`V\9D{2]**Jdfo. ygŸ+Ri3Tgx;w3gxFw=ѝ;w3gx;w<V3(JZdP~0-rێoYj <'׾uU56֮) 54>8Q,Y H{œ `WR#A*:i.@+k-֛!nY'p4T.E-y7b0G֔3?kB2s?lhq $P-Bso9 %lBJ/H *QҖ$mcPWO_>6tr,h3V;EsJS`{4bhAz@“gkXA4C)AXGu5T}JQAyjY@0KP}nD uP3\@@ }iD&DRE(&s݌>ӴvdD8xұ2E! H A!dG!*58]XoX @s%XtW+X(yDŽd8aFNӁ&  ဋw0Xbp;FN}~)Qc#P&0swƙT$g05¢^U E L(a+P2D< Vj߀(F MƮe0B { e^A6I*FNCF3h1l$ƙpQ $Ǘ%":Q,qv*-*To9ЀE,694JKG><> !x8(23G $)E 8̾< 6YY$vf_f& ^Ii&Px$˴w:1es<ΔY}0|beD3Was7L )R0-3ıh}y*G1 Q`UbJvPaR.BP||حz3y:-dG^R(~!GS{ $!;ՐCcGH2B7&;X0?'$tiM&=tHG 0buk MAwYRRO D! Y e!"*ɁDCRM*@6b_ڠGfɚy5*bgצlg(C(`4j:CPxx=`@AA@iqԝ)faB @P["+4q\[c7TA)2tKM(x"J!Ѧ3].!mA rjJI 'yP0!P@hqq0hApQxE%gנ,̈́=n 0HݥN"%0“#&D4flYC2ZOT))h;!De^H P,ȼbˤleT K.Mϧ) 0ZΨm_.A0$-]<9љߣ}ߠc!:3V~в(ǩ: 2BN2"@UbFIڣpnTs!WF|Eb3׏˟^l D,%#"\Odr ef !3l]P5ArE,a % Z 'ZPS,ZAϬ?tA؝c bXjj!{EW`` $F1׾V:'El/h̷R=4(.؅Z; Rt:V!R 3HݡՔBJ )@Z~"iD9C )OK"DPdF4s}X $ lҨM̶j(~҆ ͍&.}{1Rs r6$ɂvMI|bRj`$+$wMp A0,D0ft@9?RP~N+☍4(oxp5JVE'-1 y#s`xsZƯ|1 rLWdC~XϯG~ ㈅6(0I8U{Hd %^Hq*$`!Z.aFT\/xUFHA{)lCxYgY/ ?1r[tEapXLP&6@ "@t2y6(}zWX0$H9"6dF=PA@ PJw-Q3L!@1ei8}ą:ɡF#Ɗ[\350c['/3-3'(G)@H{KTo=['fE=>ŽPCE̙R…f'El([F[<[IF'Fl?x+2  ;,~諰pe1ےo9K39ә9ә4.E颥s;3s;3s;3kl~liQLc `9OLքwNgtwNgtwNgtwEʛRLNwUYa(YYc-WZ V>i^Sowh66}ގ)zs0j$b] s$UH \74*;@p0N|QxK=r~ AHo6_!dM@tiAMãQ%j-R@5f8L#4)8]I6)wT)l(uY #$5/{hNma\ƹx쬘&*^6,"a :%a']bS!r `^Ҁ Zt sH-?0 KCG62[C !h@xs0P' "NyUOYGݾJ/sа Д!`|.Ch#zDx͙Y,H9T u??ԡ/EbZJ9@󅠷Dgxα8:")DE\3 ca1g3phu-::1{R^]Z .m3]HqacX t$wa8_:H:e i!bit1Ya>&1MHλ??WAs}o5qY$kֿ0Iv'A`0}@I ! Z,HPFQwf1DPig/ ^rF"Qԋ$8g`γCQM^JLQCX6,`3%Yt= j<5аK E!w??OX,/8;v$w+(>pIw@BȀЃ=(^58A NFD{a\!RrcAXW]=l0H=ztuPlgrN'%g3y 6z&@n9Y ._RLc¸"GZ̓ 20'^[df<eVcCѢcۣOlW  _TLEBrG[ =4<>Y_XrNBʅa::lrO,Q1ZNDynа z %6lٳf͛6lٳf͛6lٳf͛6lٳf͛6lٳf͛6lٳf͛6lٳf͛6lٳf͛6lٳf͛V0cçck鮟L""FA4 Ty 8=z (+W@$hb( Kf MZֆ[twzq5{v"ћ\O1J6x0|>zh:|_ckckakժrGʏ*>T|QGʏ*>T|QGʏ*>T|QGʏ*>T|QGʏ*>T|QGʏ*>T|QGʏ*>T|QGʏ*>T|QGʏ*>T|Q;^'+!1AQaq0 @P`?!նM{k1 a2nп_1?_ ]L:6ؘf?c4xVYJ6nʽX$phJXn)0 287ީem>Q8ׂz:IU 3.= N:]ܭv2;ھ*"GW-GMd<pXMQdYd0oתmԚ3Z?jB̠b5 ~ yb^ hRW)X:5sBz ?65 3`[ ? q9ZS+_  gԏ)ŃhB4VYXzW Q`T84Gg1V+:yV擕}psZ6M zz 4B=p9LW ꊄ`jsKɎ-*~Y-g=8p ,U#6X6ʆpmM>7O9ZꎒOM;CΠX E[,ANFa^uca!KAd/>8>[}]}h@~4ŸH\"}( DBnV2 k[@egb0Db5$En%zu$|?'¹HYj<=aJz0^{ {V3086sYyL@(†$RxxxCvC8Ni 'UxaP7GSRZNVTk?|_?|_?|_<%"95?|_?M?4>BmC%ʄ-/XJq4Bkt>"ѽyG}sei,7(9[V1 _hnP:f= }At爚X.~{YypXheIf0E+O9ӵ[|RQ7 ˅(=*π ʄ1tn\A-pp6m* x.k9\k䠖Ly}^Ao&vZ]`:RF,%Qa2c a$,_@!GH{fPth->CAh!)7@* TtVmMQfscHG(n>^,<6U`5 6x֥zw~BU(M$ہD>)t|?_ M$*z#YM!o Ow%!?dMyBУ5t1h>uD߲JN?(XK.ʽ/{g7W}%tS gH=I<GɄ&۬Qڠ7)!j|Jy/P/* <'^03c^(2?; ksƻ >EgX@јNOSW_w p~t x S!{ ?K!LmjVJRƿg?Sv*ƃp l8zZu:[WD }h/PqϞfMC-]J? wdZ>8FY/NUE%oCjt7Е&n(l䮀Pll `2ZK5$OL|T"P^(d /5z>PRr_]8kel)9/p̲U +|yNEMyFu]Iq ~9F†opb=_dz~: $ߋXu@dFFw_}|[ۓELPr6b>+24{r,uXr ,D{76:mMn̲_M)pA AVըߴ%ztp\z0;E";) cu732Ja9A#f 1:kS QwTiPgrQyQZ tsW n9u@5g{Emw2*YK牨nfpWe#?qplLup r_gF'da5K9(F nzerYOT ΛyW~OW[*EGjQ6~ <>eطzq􈉸گ+~IAX 5 `UF"yUch&Wcң(tN=7/y`9jٜ^/!k&.a6%&^Z-~]NĿ%d'^_{BEkO&5UQVK_(uG(v" \XܾY7c`µAUX\@̀k#DjDž^TnFr)i8:_¡q4>l/Yw̺ iܻ/_85*/N|0@l~u"4-~ODZюǠ w}rIA" ?fz_pvw=8GX_{b6mXlEzP(.xUzQm-Qs2 vdp*UzI 4)N|%|Ng;Ywg{;Ywg;Ywgy:|6Oa9t"!V?S&2(Nȏ_$Y[ux68z~Y~CsX^ԣ68ge㙬[/6 ltKI#k( f(Tc%d#{A. ' 'fcn|fjXVWqi'_=Ht y& -c%7Q~ }MSi<1S{( DxC&eXtŜ1U Y+rIb?_,C_'/TO^~O1xϛ⸣ȓ-q׉b ܶb˖` b.긌Xv=ByXH=D 72W2[PF(;wgQ, 3:ސ=傊Rٷx äc,;xJ-n{R=xlӪ**x{dK(p%ai@,fEԨ5PUlc=Hf`Sܩv(0~O6>ED#ok-*څNovFl  YzU\249m[-E.3)*BE,^p M(9MfVU$A߬Z{%S0CTu:%4j6"rΞUe*M&g EVZM*i5xEl\Tŭi#%ULB Web1^g>w|O|l7{1a毐l5t]`=N-Fd0Nͳ)-VFHљ sE3k5xArb rvMXLvjsԛK!+ B&(e1/Kp i/,kOw!!pYn~/u`ZqWQ ^C8DPt \( (2@p E ist?7[}n%l3V7Lذ}f[O`@-ܳmA^QN_o,Sc)3aݳHtn͔ͱ:qK*%ʳ%yb:T8YYLsiBXuтI#aݝﻍ:3wN YWT|Jr/u`y'L99aV`%%20TW7,f \KCKy2X̯CbX+mFhv}r &G9Զ8 H1Qb.#{mU矑M+IW 1X[rkXoBg!^̲2K|BmZe8Hl,~.,ҥM-t5w.먩zҨ;nc"zU๿KYpoŀ5+-gXU~U;kiRô>갭XOˏl}̯m\ _38}\Ag0GNTCq*JX\⌢ ʄ7 FQ?TDDžFKlccr}v^)5~9(rw h9}!n-z;N*PdG;E[f⥪Yv,믿ZivM1Wl 4]0SLas۵o_)*ilc.0p*4U~^&f4A(,9Ճ}%\O 2|.X Z?^:|>0/+f!BU?a\ejvۭ #aEȮV8 #UGؠX@@4S ~ne?GrijNF,m۩9Fբ8b*sdZ3$ D|yW+Gznڙp_S@ ޭPBO {~ORmWrxy| 9rwwٷkKvbRjGdf<2޻yT  q'ZVĵn 5Z 7m3__4Ï2yxZ'AGDt<"%[P;hC~+Bl&FVtj !ID_׳T* l#[pIM{*odH[/r0_=/Uu~lz.Q)MR+/*e$ ת}_XȞ)!ê1d_UG5;ãKXT[lI CD7u(֧jC`*(Ox]MWKJiR3#Ü:=R:MaI1bOo_ `'ȔurQGVf:Qׅz\0D E$(״$4E#kR*SFPlt ^-p!X6w0Peb[QX:QF]x_Gg/uebRGccYZO+}*h)-\t;{o@Y@NYQp2<oT:F#OP-b{L-UgT(l !gP]flcnnA _AAA`,PFIb:aA#r.A dƋ!4   .sAVh6rNr7d_?onVv>}d2x*M_ %74ћ_*],ւȁ68o(Y|Br|j?dj TV2GfDcɊL습=@KG\K'vn#ve~ιPstE;X35%ͩIU!O?VˁJroy2K=byWTx`T/V1Λn 7.]0y<YKR.3QAS0RVb+U`$1S &pGd0QpP,Q{uJ;SEwpYCb^<@2LXxAHV{ȯ'=scd/ 8a -(ݳ0uSÐJ~PP.?p1B\5 ϠB;@7 )1/iRA3"U|>M(!2x032eO<)H'¸jgI k;>*66ſ(T CaiuԪNϬaָV|*`xX b#2 x̾zAO0zEl|PXŒ/"& tE|>)dS b#`| o 05r*;uz@yK472bj)ckW |!<.W " E_aVm:[hRu^*"4dvE;|{ö{E}+1.QW̻Mx6]  rm32 C[/j'жuT٘ (*H˱,1QQϏ/zK3MYabc% x} j?T/\2B=#XۻLRƻ8΂7C~[5i-Wx13(E]ʼn/Qn q迬6e˞r|3 @peL؀(s_`@V|]l2 Ʈ]: v}Qڸq𧼅gG2'Ӿwy5DwyהiS?=BsUaYiKp^st'be^:kف6j#T.yU, KiyiKst@2 P]6O@ 9NUZV88ZKP@]yʴspbpž3lBok sz=M(Z_OwDN`l8cateu8qO1ɧ'3Rllֻi ݰyo7\@c57|<eޫ!UPuͣoYejl&h,XuEw_,o3i)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))e2LS)e2οQ 0r (PB (PB (PB (PB (PB (PB (PB (PB (PB ,!1AQaq 0@P`p?#y]jZ#^QKm.B\C=""+/.DKP@ՐTJQ|k}˦mLe1FNftr yeW Z2^&/XWɰ(Bw.WKڋ_`xкG%#H߶RJ*`!}.]cgMxQ lq@xM9~r8VB[0-XPI@ؐ"JBV+_W |@zISF;ԐH< vkz1q>}!ސ`v*~T2h..j4䈑!!`qdAwz!:c@iPMkYʫ6hkBP\]vZ(ShQOB0"$ֻ | / O%I_j޾Ս.2"8,t V#TZ ,(a|kLٱU͈0^-Bk7Z \7 Y|C uɧ+#-|AI^qouϑK QųVf5us?>ġF(:&y9#Qk=b5ˉ1`\ \lFq+f؀FCRVTߞf!wj5x׀6`0EJa z);kơ\т'Kt1-t. Я<)ع:a~Pr}ƤľB=fbХ=9}9_y:vliep&iH"PZz \OSvO1hx%.4LÈmv(iOqV7-=ƂF#!ivՆv=8bel{,`y6X=3{,laom@*s/g[KSj\]G]dmфD%G@MJ2{+œjC#t/ނ R}AAAV8X߂ *l3oޗ9U7e񈡣X@hFgiޘX> B&nnOZ>+(je%ud^*43l#8ɀ4:#wW&K-Lne0 Ku ~(Tz$T DܮQHD;o寥d!}sɠv"YA,Mse%#Rԉk@ِ[li KzaR]"= 977,n'Bl3fI+=o4R7F`gzlbvЪ)_> `jJ%Dn.gͧKqV*y2h{0@}4k\f<Yb%X!D9be'"3 IҪIP{?qcAt$7J Ϛ0Ft+CRPqC_ AULmM(a7vA Bܺ|5 ߬t^C\9IaFSW9C$& Ά?}-<һ7bڠ㑔T9n[SVW_f@V$$ cB&Ĭvj~@CaKF_0w"8 C|*3QAɞ!x$Px;x RxE>ػJc-iG&9 Ze@]  hxBHHfDr)GODG+:[61 πI"k+zOgC7qyX-㉲NnqBbw6"6p߭q;^cc`)O`dfVawar&]0b4ruѸ^Sk݈&F:\] QRU OH bz=<$F֤1S*fe&m |y*vfuEK%ld~NE?8e)oAXm(S\[82t4:g5k5L7[ܪ6!, 6jS_p}u`?x-bpJu1s qn-q fL,Z*eMˋjW=OB`^%~oݔ<B5fcӠ{?wNJԒ^K:xvPc9cW:X[5S;4UE)66`]!l% cl^[($8Wt6(NDF!NLM)qȍ {ineJ'2Į>Tp*RɖPFVW3P"_ Im0-dG &BHfײR! VЖWBk֮LRZ&!w^z~Z!#.G_xKD QN<7m(bs$}ĂB9Y& W3[U3CI Ph+ vl;tƤT,9-fT1l7~/=jM!ǁt09W.AsQш7@!ک<*lB U]ۯ "x"[yM B|E -na06t+l OrG~`w =h0:]ڜ{#CJ-03Y mnϣ&aTzRɇ:_D)Aˎt~XLV_ `c$6k5B  t`0|^_Ftu<놧R-aPÞad.!`$`-:(_[A?W!bH`cn\BO frvuCD ; 5?ne'~SJ`ψ?V[n9alhRT[9_= @]!C9.qA:! 3b`MιкED0nh!zEc0 z`#6B} #u`@O5Ӎ Wiv[zM_-ӻ3л~Go'Q![6IUmӫV`_՗A/mw5B K@*5MR * obtjqF :vX"Թ2F ٩kSr,T я$gEm;p=^CJ_bV&/98-XӅfFz]Aޝ"A;2u>z=2Һ~NqՁ@ hKSIM{vNh )Vp3Ab6?F(W,-S 0YvޮNo a L'"@lOr.ΰdߑɾ`<{`\˧9Ogʰ d_>y;P<'cA\TS?}'!([ tV|z._'(G~0=4)u_ͧΎ`uS .(żG3 . ˘0 [rX[?S(ynIt'S {?֧{k{z~7̓ r`6[E, &D>B,#F⦅CGVM`u1%>4IF,a|XTRq 􆈑1$4JsZRɊ [!lH Yk8b h89n (G\L1lBeLuQ&!@I`!@.0X;g~io+7˶YXA:҃6Hn˰f*G o= IbȆCB5/v(,pT!m6nRQe$u/>B"S3!?$ycc0D)-8ty](Z !Y#B|ávsh1*;yTk3' R 9@L.*2 4Pq=gFJ\^ #nBUeetIsPƝFܚ{JIZ0=.8R0&t.ODW4ri%(.ml "sVv6 [7|_ہCx."{I/qmf0 C b`Ba #NZ(3RDcNh{dz|0nk4( QP lF0> Tɋt TKj8phNC TЅ>G]Z܍lg?h:b¹7fn(qˋلH_<<3-4s`U=D[H׍̿mЂ#፺Ln6fe!=o7Rl0uO= l0lTP['|>.⇪(+(z@!ͅz0'wz@v z@Xs'NHv"lr $ub7.=48̙Y{~7!{!M.nq+YxR i,Wp/Yqp9= ބ8p4GͅRsA2ڍ'l a NJpJT¥~cX #,^ wˍ=ŏWmN`P(m_c:e YYm1wDQS'k0Vm7uMRqYl,7H'p7)yiZs4{۶i 2lSv%D#+E P;Bz1{~78o;#rUz 1gMlNNqO@Ì#%\G+sS]:%<6|kLjM&jmQj,:Ah!<-KeoG2-u z!dߝ^w@g$?S,5^{`[4v%A_s1J@˖UXAiqhh"6Q$M$&@GI@I )\3B0TD#B&$ ͙v"ɷ1qYPzI=OP)3m7H3ޓzp0 u$ 2z[FC\# 9pQ㊐9NEAO~m .y 8:zQڔ؈ v.^9<S$ъb h$hFG@hިQ$.h_'>&0Մ^}B/T+aψΧz@z1ZGTΰAiќδz-ѠF^nKR>o= =\L{uqF0Fm,pnDtq#ј:dzsX a9݀=fI;/sz(NwhϳqvlP&J91^y )2#>gə B!@;OfFQĄ/cNԳGy;6鐶>^WÒq8 0d‰TSnC [DxZ*"3Pcй=x;OqB#W'_ŚaIbtj 0z@e_, ߬p6jh=13QRm ̼@A>'])dzɉ KPRA@4&ew&%₄(  J429[[JN.Pы"ߵ~#  e&?B?ͧv-9}AY#,Єz@с*}8@Y}:a 힔!o[#h07> t)w`cP5My*])<Q !|1nu ;^o$yv:FRxt&dW5` 1ɉd\\X>&M@U4[ь6ў˾i: } k [ NgS+D fV`W3gZ Qv2t0XhNv"B=W1v@ (1po4Eǝ`u#qf6d[@; CkbWƯ W3y2k-P &r H qLK[Ec _JW̌xoՁh#?(X(cJ~䚝fA5iE..Ccbonkd3{rxB @!ț ?X[?tmM[-> n"o2C%lx# r7w3T-)QQX sFۘE?kZ_87Inq).R6eC IDH!0 7X$iR̎[+dݢAIHRGQXg=u$ia~ku%>! ;%#k`}ԗ7R-v-(^g͂l:<|9C|EP@!&Ȼ{~g=H}}|o[U}s+u|0phb1}} Ȅ[3BHj=m ](n $Ayu`Q#ͨK2$8Ӱ\b1 h@&|K<`KZJ=y fkʂl`8.dR 5P(` EjY3>p?G%il|`˅ͱ4(α[AȀ۝ qm/VsҲߵ~88z%@GL^ tčP0m!2a*b1 |Di CQS`QUlS BG%eF":|qn?a2@`W /I0J4S@( |1a't!%"XtiycSʡNz5p@bi/k06' ؔA8"R {_&Dd+CNl @760 2tJs`.q#0 b(4jS7Xs7ф{+θ+#0X4~u:6?͠" =dP@䱍pU7 $ol}-`O$#ot \ @@޾H*ۍD9oe,S *F5gBG`@pODh"8`oe&0j O0Fjc)nsȠ0ʧdy^4GӍ@j5RprLO";`;odοoi3v fm/B=aAmͬh_([y(/5 Hy` 7L1#Jdv Cr Ś6 320&g,S@$C`@ HV@+ FS$ ȓ&79)` (k m`@Q0̂ ^FѨ2p xxH!٥jSǡVP|",w_wq9:e" i?풇x1 r370^\f]W?)ȧ p{sd~l-sRI屧S>юk+9Uqq83UŸ}\ y\E~mF+(k7vA|8F%&0Vz 7P1x"dq Du#Hm^dgklg7F &ޚ#㎽J]&ZjͤIPRA*a^u 6x=1g(&!?ZA?@dFyvdU7Zc@KYBm,2Ll$ӳWbCph*ȓ6[LzJ7jd$]1S[JL̨Feow@8gJY!EYT;\v\Se53ZC Ye~8[9QV{v{5*"m(<[ fH&`E\U=I1GW5Fj(9vSm1}~[jG 1t!>~ƵP)C0Nr,&+&6'đJcs}5wѶ߿~߿~߿~߿~߿~߿~߿~߿~߿~j\o !4\wFT&O.Cu@ ;Cն3"}9ßQ 2'ךS86D42JzGgb9Ò? D&&$'/@b DnG`[:PJ@ge1YPCkmmNtNtNtNtNtNtNtNwzNwzNwzNwzNwzNwzNwzNwNtNtNtNtNtNtNt~p @ @ @ @ @ @ @ @ @Ldv{31!"0 5@P234A#6`aqϮB9}2t)7UEr9nm4%JeV nm ՗>d8ο.~ru*g%2`G^n$[ $[8YI._%' З{r/%!?j?lP:z w/:ա!ơ @`:zV^tx w&дP~ISeAtw$I %fe 4h<TC##$Mp'R|KńBXLiV6*xnt7X~FiQUN#f?+y$B猻"2#N4 rF7QiBpGiH6iz-',P@$Qr4ԭE "ȩ.,0#o~#v|nGr"2`k a;wThN F b)qbAyz|9ҷ~_sc0 oy[>yyoz`H-/ɯOB ߐG0[7؏BY;[Ѣ2DCS,5v˽ֳ̺8aWO:)~l8?_ݯq]qV**OZi̾8k嫟ry-r9$2Xݩ;^ħdZ$TcU:aZuhZF: OE:QSBiU'Q)E:QTLU o`ԦFZuZF*TQ4L0/vQH)/nĹQب"Opn ll ;y(3,QW~i`4y$YV(FW^>JA̪6u$] 32~#59g`wF,5*4buG+i┺,3J]B$V.J$K 'dш쉚 d"?bp̊Պb}7ket$)(w<_?&,X,*$pOm[A Em#BmK6`,ݵZq[6ͧ"Z n ٶͧ:n,Z z0gVFFiΤ[Xշm<3;oaYm,XCeV5͞k3+B*XiWR\E/=}?Yi<}?B{yw'_'ieB{vǙaj7Xc HWVDa%a r #G1ITjdU9bwAITj$4*t~ǰVc%QUg@(eGq4# 7X*ίhFyHW8UAί@$jwq4Bտ04: GPh#cƄ$huBOeV;Nm۷~yc~ݻǗ|w۷1v=w۱ gA@QKe"ȅ؆HiC;S"b؊BRG`eI#Jg(0%HblgObr˗kJY/K%l); 2XD231IĖH=b*Y`b3!WW5]ԖY[}: LN P D8Jx8Ywg9}ig.fc`̞ekS5x{gͯd8οخUx;>A}n̒\oK]rs/<5.R/'Wbij<5>AnIg۵%V[^5"۸4m(-+L<ڝ:hp)-M+J5 J KSAl;lt-JҍBҴRiS6pе+J5Nm:iG+#)ؑӶt6-+lt-JmvphPvm{ 6;{۸4mZVۻSQFiFiZaxD`S[BiZQZVj^?iA$(Ձj>_/n qk1zN U*yC#kĄt%P20C#82@ۧ($gD #biO͘ *FD $Y -C6,1F2(MHTl+bW~6w 3 ${I4!RqYOaLvOza&]fc^+Oe!I25(#k.3,kDB Î$ *IJI] h՛u/ :n,f6?$yFgvG@ l;65X#$%8:K&Q"bq-;|ՁZĶ#Fȱ 6gfFs0QQL**QԤ\1D :=mgJcF[|u]0'Gk[h]<$z= z]\߈6ެoBO@?@X[Ž7Nzj>-0oVU[3$v(HXHm ^#Dtԋњp5E\[_U ~"tv\BޮFoEt[𓣵-jASz >6}İYOYZf?I< ,"{RpMN6ٯjz=̶XjO> /"v7YՉa6fx+f2+NfIyoCe?]T?W?}'i.jV {hVg D*6{\[yKٔ&c `V[R-?fy/e(r/,91? LuF-2\h ukftޕGuP Ir-l[azk~d''k~d''k~¶ee)s?<3s?<3s?<3s?<3s?<3s?<3s?<3s?<3s?<3s?<3s?<3s?<3s?<3s?<3s?<3s?<3s?<3s?<3Y{2u{7!01@AQPa "Rq2SB`r ?cgYZIgocur6k׿o{N}IOMNvW{]dɓ$T&L"2eBEBdʄ%cؽ1E|yǐ,C.'qQc8u @DDHxJd d)) S S @@Odgh?,v}wq,;g㯂/[b""GdD쀅.vvEr###a! ؁ \9OZ-s:8ukc{;1fJ~}=7կӍv_\8s2&L1ؙ2dɓ&L2dɒ̙2dz齅Y㿇 7֧T.{;+yo[{[ojD""$H" "$D@DH["!ȈDvDB#"u{j嘬NvNؕC2wCxJ%{r/2WV*q*CШdO+R6˞e2"7kd#GЍGdH$GSBŜYM}DDY;\2[$HdHɌ!"C&2CЙ2dɓ&L2dɓ&L2dɓ&O\U6q\Qfş;,_YxoNB\Z^%{6'MKY߇ 7֧|Sz~YiF ,^^=H߻B6O|-?>m~il!؈،DdD/y؅;GbDFGb#˿qc$L-dH>D-KgJ^߇aB߹$R$"!G̎lucfddF˹"]~SS"|IdKPfNby2%kdJ2v2"\IO$Mt%ΡȈDvDeGdD";"!ȈDvDBcnFr 6,tRS}=7Nd^שNiݴLRO=CMtw>e=w,|N_Mn$,>썢yhJT^ʃO]wOmlN'Cbt1: N'Cbt1: N'Cbt1: N'Cbt1: N'Cbt1: N'Cbt1: N'Cbt1: N'Cbt1: N'Cbt1: 3!01 45@P2`"#$36AqkpC>QHwdlSiY.!en]L4a~Ÿcm7?vQӍ}MXF[[{ŭU OԘ,Roo.PӐ85D{}DϨ#4}=EQΡ#3KLu ?oQ C5#5˿Q¿4 8ko_P(.9Pu΢3Q$&c5Wġ,V9};tT QgUSV Ln 4DҸ_?#yOyZE_zfð8hyR\F-vHmZΦU 9T}Q09!j*l=ٵn=%6{7uX^޺la7uA~fG T̙l0B;DȔsY TϬv 7ua^܎Ȝ3sY W]fͺj?I ӎsOO>EhJYFǑS> ‹-Y],"E%E$TUeXs}1tUg1!d)a^uժԂKe$y[mk U\WGW+[j\Bpܻ*,v*T (uUJg۶Vh ԡ3Nw@:ia%c,ƶ\v~6fWc>,^.rKO>K_xs򢨩O҅6#Ң3_lqrf9@j 4gDWs%?S8b=4̠@ڄK+R !lS2j2L D劀dlTa@,]=j6i Ȕ@ 4 7VL8G3fb&)a0_UQgb>@ ̑bk1.Y@S3%2DOėF>`'iԗ&ƸnHb{ep1/RL_ia&F`ep5ș12R~]%u'8LG{đD=}I[R?hGXi1 V2T9kCۣEu*VgZP.@RR(m[68*U^[f.sүzґ4B~]&G`bNkZU*m9ўU8)1k]|[ة[BE}K`%Pk,M5Z'R!'ej{ΩRqzҸwb67+ʴdxRr+6s>)ԖUc:H)E굙PaSJIٮSGBB,%ɫ +ZnzZZ d ]`kC^-mFn+U5ϊWv_Yhh_t^4("1A*zaN5H1*58{cLS^kq^ָiA?mZWȠE}oZlXܓ+]5 fP֋6;me~1"xzjyAr|5_"jb,GGJ֍ "Z z?ml\ZGSZ蔌vt~ڙKFzGgj =.ЦSAڎqqZϦtl`j~E[P":ұ3 ]} WݣӻWQfڿ}\;G>}_X; e:tuoh>\?eְq???B1 sōzȰKU!HUl4QgĻ5(:ߞjԪd}㘿ձF )_~5-I(pjh[Ծ󚊸SX\ͪVwܡgR "ޝ,B,/o!2E%#+ 7rp̕L@k!)ʂ7rX~fJWʉXz{-/I3s%E`zwmd(=B̖J!koLK6>^Kf`ŽBWmydmd(fB$W5{8fBzWؙ"ȅ{ ܣ8 fJzWؙ",8 fJ WDAdp,?I3s%E)ɱ#}B |~&C&=?(6 }~Qw@xY"A?˪Nx ƗD Yl4WhlսF0aH.bb{LA@)(ܙt"[T"lR-ZЫh3GKȬ ɛV(FeM ҩ^a=9ҁdJNYX*׭)[UrzUXIMӪ?C90.jXmlOSH}f:볛`XVZ;Y)mӚP6npFKE;nY%Кnb,ەە%+ՇC"}DKqĵ,MwWϽUWIJ܋x[R֡.|*͖Y,W5徉]@tJ\IN_&~Zc) ĶkWʖ9Ћ'\!a*Dņ )) `\4nLd=-]hڰ5+)CPp@y_XscV"]=l|~Ξ#+)OH_%L=оs0!9O[p5$ =t"% < 19GO[R8rq gO^a}=x偧0kGtSSPR-~WaYq5 z/Dq gO BzwN`O}?G.AWwJ@"}=lcWigO\ϧD$zzܯԒ&t/Ͽb_ jl > Iث 뙭 '4HXjkZi0ON}d"*u%8 r0,ks*ScNFXY ɬ qIcyOj~omw?j3Kt6>'hͰq+Esh15rJ1w>QklM`+"0&2%Ǥ~f*˚uۼ_;uG `7gb^f |0tB4oTҝAlЧCҿw/ҿw/ҿwc;7A                                                                        S |8!10AQ@Paq"R 2B`#b ?F۷*i9}wATߩ+nQ5D~0,;vF&;ɣ+MżSFV%3 x=~əL4ea♔,=~ɣ)xe5Ǡ+ꘞW+0DbU? vflilDSz3Le/l5l0$ŸHE28w!0 *ڎV_jP x\-\\!hC۷Ffwd:\dAO{@NJr=W)DD\# UvAU C1pe|䉿Y:A9 ?u\{dJwৌ}+NA1=M)piOleiXOFϚA7'!}Y:?X7 9 ˄q^QP8Ul$^ 8~*'ܠ?J eӁ9pS=߁U ɭw ɼnCg(}9S3c3`&uL}TΩ1L}S:e5ngܦe3BõӒ6k rߞwn mS0,<;`ABGz =X#ztzN'G\W)ꪲt4 OPt4jBrNUAUpSPPUPShzAGz*(!.Ub7 y6vDf5\I7Lg5՘O 1TDrXQd6ԛ_iL~rTlssvc23 A5E@0'rclE*VLUf8'&sbe}SRvI9HDW7OV,Dyd" T67)4X?&&OwhO/?X3z3y'b'c# sj8G?JNe8mz{';gݞKP=UF\W|4y,EBxp/%Q" gUfH#.~jLEIjI3}͕$ !Q*)&Mʦ h r0l**dDLLLLLLLLLDLLLDLNF7dzlwu܊6 )橉oi# ծr)|MNz_mOU}.~/*ޟLј ʮ)!TJ)DIU9()@y%9\F!8Uʪ␪T SBUpN) UpNJ$"J#*TBl§4ܦnGT=BU@>~@O U@D'*U=SUQ S{źSzURP6jתG置,n;q૛o\#ʹ#^{Ȯ]C:/iׯf}xDB<:B{L"~|\z79y]`!p,jU?42n٫\9v9쪚})Skz>_<9jJ>>>>>>>>>>>>>>>>>>'docker-1.10.3/docs/userguide/storagedriver/images/zfs_clones.jpg000066400000000000000000000547631267010174400250040ustar00rootroot00000000000000JFIF//C     C   B 6="Xwbހ O%! Uaq}-~Z EH AcR8n0~p8IG@KSZ EH~$q,^wEX*@GGy|/h,pA Nubu0yH|F[T9ȖR(n05g}_-?]7Ac6 :Q /Կ@Kf,Ȉ=NZYk4*uaU^Хv>&'@w6(za-ԅG>wV Fv8`مACSR wqUj8ngO. uqFD hO. mrCWD ypSk kB3p or@, bH:>)7Z0M/~v(9ض]˦7c| Q?-ZzN+Zeq U&+:nWmLll_]>Ub r$^ajfȯ@FY$])BE`1Ǘ?Qy-v6 }3Q_#^&==_5[HV;q02ƀ^Bsʩvc^(! KoR{$Đ +}Ɠ쾵}aZMk>E*Sg `U[+栛6qU: +=2}aOhlh RUg5fA3m=^@- {41^,|>~uIkx Z g=/iv ?koذtV;ML8- M~ΐ|/  @ƻ2 88P@ ~/.-N Jqt`m k3<7f*xSi5,v@ۺY<)^][w@O v~^yC n8ۂ;K›kwRY.-UC~<)^q8l$F46t C @*-{.;@5}I8c=`4k͏cI XLs,2Bay,26ξ;; E=x#N"Ӷ8P-j\*wu=SeI i}(]_ZvJo}w*nyT NK|؋͘ۦ; Ai.y__ZvJMIgqU Н.-;c; ^ԩ<#@urTWW;&֖"<|}e>rL%hc_cVm~8l?|&.8eO'O`]MͭY4՟8݅q]L ,8atu0,[jX; 㩁Y9: .+\ӳkg>  !"07@PRUe16 #$2A34B%&5`8 $1 sm9ï^sbWJ~_1+A%?\į똕sbWJ~_1+A%?\į- ppF`Ih~NIC }Xwp+m1+A%?\į똕sbWJ~_1+A%?\į똕sbWJ~_QXIr:B\,+yǔY 1-,U | .czL7c$ӐݝSG˪VspS{*i |Y3N{>ˎ=Ә6{oe}t=sߔrGFHJ GfsSr,C d,-2LwJEapGՌa]1mDv٢e482IO ۞㧝M{u0ĄgF:Խ! Db=l.>Cl8T;.@ pC 1<*87/F1|u~P=ˏaXHi8F$160r\u7 ׉,4s  5f%.::,ɷ}FF\ Ġ eGvX! qK S9>:.Fӂ2?\;M* a2 (ty\"0POn/&D?a`H8W}~:(m9n'&q#܋jcB*qDt0ۊk*KkR-|+9B(V1WWԻסixTcV'Dw==eIM n+EXm̭Xk{oNcۉ@L>'*X@L:%b]dygmoϿ[~}rߟ\->m۳:l". U$.|V6j3 Hpۏg9moϿ[~}rߟ\t1@Yf8ܶ->moϿ[~}rޟL#(N#U2xeExSVknϓ>^0 >fxıxgXJsߋ)Zݭ%D&;I*oY%&j77 [1%(]+D !f;ejeR$~?p+9DZJTfY޶?"C۝:ጫ).DNGO$xc_rvHǹR"D5O 8PFJGw{8->ko&1ݺY%k[Ju.7ƶk+!w|R?TO[*ςaF. Hg`!HNI$,P`HNd;D f .cTSs7 #"$4)@áW󄹊e랩 oͷ^9UpOgӣ.TnoIW(kTW}x>W[;]&IHI-KE[ Xܥ @*ktRJB4.İߐR`Ff"R*X<&hJkY([6C )cPI"3]Ӹ>-17J> 9)y;^ʲe a$|;c,N'~b93͑-T?X5"-$%)ÂAmA<}tu- cʧLmMDm凟prPmS% Y:Ӓ2qßw0lR<k?^݊ ofXBMw^(E9&24ᲄ$ɣDׅBc̘/9i6L L ؉2`:.Q.4+ƃ$ز2) FI#/9jO~=T+5`"!"B om"A:7 ,fn^H2,hR`\8m ib:_%&Xyn==]VZW8›Ugm?]: ykϨɕ#h0AX[7%wsT?$#̒+۩UG 8nq.a"jQ[D+0=6H`zm;%ӯuq%찅/VPBԜ 1"QO OsѮیc+WK}Eo a$hp2V1ѵ'krewV˟:FRR2!8J>38Q8q+F9>)LYv1RNlaѺV&q q/]|Ϡ7g5Qat֍6n{%ґ/I}wⓁvjO֊c'.-ɱ d| GySKIH0 5T 8{mhLjktcdfz2몽莏=Xrei{>um9^*i#qmyj e;o80A FkVu~+y/ 0͎3(e.wB}*#W}L,epǍDuHPgI\o=ħDĸM<-Am y:?2k' 8c|K#؃9G㞯<#^ %+sWs)34η[1 9& m ^XSܧsS#l'8~{h~66qxe ʅ+|Tb *@A0#2: |C<>Y!*_ ̺יu_ 2d^e~.˪]yU .Au]W̺יu_ 2d^e~.˪]yU .AtF[paHB(m B?/Q  !1A "Qaqt02@Pr#BRb3$CS%`4DETcs ?U)pqj]5vF#Wdj]5vF#Wړ QѵPs8v"#Wdj]5vF#Wdj]5vF~Ku݁G7f='Ї#b?3v84H]ۘ >Ŀt7VOA:Y٣Hp^"ư^i&-ODtM?#=iokj:wq4$F·6P3HN6 dx$jߘmbHCǿ-=M.ĄFiF}x—-߾( E Tݬl5,IhCi-ǣb C,ZA٘Ua2mhï]G o(Pv(xЗꍬm46h! 1e NׯB)[i(YBʁVX.R<~ZzaqgIlS`䴐p~z-gb:mO *abBv\nn{Tͤh62voF6uO^2 @\mX!:]B6i2daZ5oA'~Zza&M?#Õ,Y~kpeuz T8]U{^کIoU!M뜮;,hmvq|{;Ϗg:_sXϮBgP=bK 5sULA֢WFkpor-ӿۉx̨IVa,9O@+Pƙ`1H-]&h1(h6:arO׊6)q\,94$Fr/:Fi`@w[ }ƧHd"-9ǿ-=Mxn'VܝhģUWDmζ>_9;^H9ZޥiZ6M?#Ĝ mHM.]e/Wd&I p[],\%]["ߞ¢ \lm#0j NuUX]vTOSx?>>* Mku tm5eB\/z14홗.}OEIeå:}; aeGw*vF xl}Sh|0[m4є=5úUCi5"h&=|VÛf tһ2WfJ]+%vdْ̕2WfJ]+%vd̕$JM3Y@c'Q[ƻ2WfJ]+%vd̔$ Qsaْ̕2WfJ]+%vt7S7U= ^*4h~VN\Nq2kk|`Y͂O0#lK'%S;0XjU0صcDto~& eB'pb3fQ/~XxɉL;[PvǛV dVuf6mɉfssbrQǏC8/mW)X,Űۤ-'¢$]_ˇGư5||dp6m5|,cqVG~„8N zð3vw zQmamIXUoG05py8"H@xM-V bŴpnv4ߤXH^ X?|!O%bu>HY /&YOקwǏ'.D4gV>zx((%E7 W?O'*Fe:ƣ<)${*?1pFva}^ő 6Scc\~Cc[O\-, Xs\6~!Ыai'x_UR:uưLp~aĝ#JF,BU~ljG1Lf9Xj5cR`ܞ_>F`>t2}?E0uڙs.n;zɢ$4ʖ:6sdeߚi$Ml®_Q HA`6M#{&E &^ 2P" hy5EKc[9z|O 2 6b0LK,N7V.2&6m]#e:[*8Ӟy)$"m&0ne?§Vw+sym ?#ƨlIbrw҉o!\ܭ;\\" k¦H7EN Y۹  K>] X }Pôua"ң_U>49(<8nq*Mb镼['bIunhLl<>?i}>Te_Rxxp\Tt+ +}#QڣOv:r]Oxc6'M$w%s XX"o!ob&c/0Esk}).fy;L$ K Vcd9E`K8Ť˻*y}͵S쀟5lZ~9'ow>6EwTUofY:,uZIMf͇nb `Tqf$',Ko[ʸyBTpn4nykFemZQ$}A+1oX<2JǦdmjxXZyMXcUO;t9,y>oϧpC@]7֍aOAF3I,TtP˛a) +o@;A34hDn\)<_ b p\vS6ޓQv"bgз_j7 8қD=C% BkO 6ĭס`&vbxj| `!|^>R}d ڷ% s> )2~fDBXM4vɠ 5cmnb@[k~WC򯻉~56kp|:>қ|)9?1{LuI ?#0Г߅>1:L-kL3pVpj_ F6uta6kIf 5nqқ[I\ǿ׺~GF;#*g؄>H/l+70姯8d*}G1t=} χ14RMWxWlY W]U]U]U]U]U]U]U]T95Q`??+!1AQ aq0@P`?J!1r    ebX B|I<   9nU^6 Āj:k'Iyթa MHJTDRɨh1"dk\CNc|GW9GI{4FnK@hWL^5U(bcFNbI {XyxOЛgЙEr)|ʥ$x* v]} ')X!$ƭ9H u j Dڷ$Ƭ>j'ȶkIIjnԄݞ`;2Y#yH`& 3H zXiQ ]JeBjp!$f $&|*0\R`\a<rvȐKEnȑq aB- 萨ܐe梒VL#/gY B@\bcO/yBTyi! 覢V0C&b p6\H'o3%s3}F;x ws6 h[@n#dn$> KWDU4N_cHSd'j44Q^386DsJ6%OTlLЂ |atCtE m(ԥjDk8"P@ୡ8^QU2^E5cu}cM!95f!6nRgZ&JOíG54 .i:EHy *8%q,%VOg&hE{QΒ8-=!mmmmrP #(Xti˫,Ô)X2̳Qq/BA󄣷\eK)Pw%vQ@m414FXl"2T66LԾK3-=$ ӻ6<W'a7L{7Bn&ٗ+.Efh]b5*;$ nXޱ|b੹$#j( ަW6S&tlD[`e! ]i4#@6vZq jz!Mի1Iim-2c7[Β\EeiU ԉ %%(Y1Eta[` aJ8Ȓ<=Br8Q$BI^5*[)Uq "; \ݥ͟j+kM $`4fY0\R1xDRC'(BKa8@m\;T> M@O>-H1aދ2FMs@Z2\E%B#g[|9x,c7Fjl)b-D? 6Mڽ84aukEaG:L,F(uj oJޅDlaBFvPa։BKtDn"8\S'Rk` Vykے0A.&@AClEp_LCփ@4a_qJh>1(rDH-2-"3mCQd9@czy6۬n,0LdA f0AB H㈼6lHS L_Ĺ2,I 9"-@6Q `oIČ H3@frE݃CE@M(,Ae-$P AqzcJ^(|K+aoʓ$/Ha8G1.a㎉M/9=TͳSW|I%B4夡I!p!ރlm*\ٴ "N@ CyE q; `++ɬSא MS 䔃dOgؑ75XDL XF6lAˍF>1/5y)_{gٌ-0>0ۈГMH5X7KNleO'pGo>2AXZjuGT>bճ-7*(xE8 8f`mH\Ng4$Vq X kEɩ_}>5t,gp:xvUup{ו8Miq[' l\=lbHo48gGC Qn9/y?rʅRBTv|̐!ykR@ZJZuvh\!)p9֍rd%@ 1F6- 6[&t:*D:5jH,wB@n|Q.HzuD)kBB<\ oӕ`rX(Б("^H:KkdJ)YIX( DXK@ t9I&⭓Nn}/J(ʬOYC⎐Rb31; !d8cׅhYZ]I甀G4+ l©MŮ?ʗFo~N~vz̺4ע2o& = YLӰ@"5 i /hUvWV.XiTVS&C*SIlDUEQN:V-Ԧу ؗMb<3ޝk.:6eVs]~7ME%n嚰)U%XH3ep,[-RRRaQN:kj{.R JJXSK.-AVٗGZckkgъzDSWnAF6l&v_/t|*ZJdӶ*3n諩د>~s ]g09.avs ]g09.ŷX- avs ]g09.avs ]g09.:eM⏧p39p39p39p39p39p39p39p0sNu_2"Jͥ#FY셅);NU=I.f5Tm`[e `.6IN虈F,d@f!P`m!\v-aS<Y0w@vn_(Kz6T-J;]*-rbUYApabUQip`-øЪXl=qbbZ 3RF7Q^+J,$J"%B1箿G;_U[gݕۼi].%`l61$i%00l-<^$U޸`qY =P ]Vw*r;u{@}.)A^B(KͅڒE:62"t$0II`lSjWӡS,1v,ӵb=* tft8Eo}W}=u{@JHbvI C_h]-)k.ʣMCoer{YwU[c{_h[S S"ӱjNr{Fk> e&]zu{@z6Fݹf ݠDBECկQ羟dJ$>&;;BUnWm11 2aIhEY-gBq Y-gBq Y-gBq Y-gB%?_5!1AQ Paq"02@B`3CR ?MAAAAPAAAA r{;IXlg2&.~~ w({ Fl58bK?_;Qy&e`{3[ǰ 1j$VehhLǶja,50>͡d«@IU2z72.s-B?A+DZGx$,zt@+F\A0E$<!@ Ԑ@ , &[=kN1da&G[( GIQv~1Ln`f4380B/=Q|< ިia%Li$d|R-hj# nOtDB5" de `2U1Sx+u|Ug kY*`xY]U&H"$Q2d[Z>o$L(P40+cy!GLVn%y:*9%`x!m'o)ecv&GКBhM 4&КBhM 4&3 hM 4&КBhM 4&КB087Ц{ p<$<O4`'y B0=р$F7} M@0 S#! B '#! E7}>> 6WF@NFF״ۖϷlDXN [=Kpnsn)@"('#u @9?Xý9#]a^Ze93#G=y'QxLf n[? rz[i Z4`חF#j`\So--# _۷,O2J tb\v-3{()0!aL(#.W;yO)sIdt~z54Xbɽ`Y+Smי^Okk|LB爰P-Ce-CW{%Ρ V=lj`E&p&6dt~z5h]6O{YU@ n?ĉg 9 TVR%`JB鵎z MUEM=*B;HߧT<+_Gǣ]z(E&4T_=r͕Up"" 6Vq8jBVVM%}0F, N]n "6B鵎z׷ӢY! x@0P'NݏF %id0"<^`Fu+ٓ.sn2mK&{"9Qq1$%B&revi=t S%9R!4LmQ(rI6z~z5ј41ڸTFq=)a!k7+"\}+W TD Ghe#E6%:s`Nl ́9'6s`Nl ́9'6'Dp_Ӻ<= ?9'6s`Nl ́9'6s`Nl ]tea)Qp{<=yp{<=x?T?ڻufY)~a!&Hח*BʇԆ:x-,yjc'W,<cX1`fYDH*dG#U^#5aPFK?g`I(啵 vcJs<;<1%̶^4.eP4Xj m$ [5+L S[/Y`Ȑׂ̗L(2D+OЃ`+j Yϙ\**χ\:m?}w^-ο OI9Yef۪pQ$v[U[ XTXGI!]k dk:CO\)+@SHCV:tsxh]xTj6=ZRd"TWeh[ ZʼD!EY2dn mk#cPڮ"ZdJ&7V)6ꤥMyC4.~\jf8?cB뮤yuP)gQB @C+ J啭>]jJٞ^ ;4aėmh]%ons.E%uVz.~\xZ ? É͖VOk˘tї!Wr%|>D 5 8ѭzkgzkgzkgzk`Ү3""?4 !1AQ Paq"02@B`CR ?(aQEQEQEQE9NEQEQEQEQNٹF| xLc/ k`o#%I㗒oRZh gu!AL} | XD_.ZMW"}rF DO-&uOJ˰xi7O%h p>Qb<{p]ٟ5VHQz s=^=qsXvDSqN)8SqN)8JJ$'SqN)8SqN)8'&Ԧ&&&&&&&&&&&&&&&&#\>  )S0 M rBSO%IтHG4` 0S`<0SHO$`5t\ۈMxڋ!v~:E3e} osQߐأ&g5֭dAb@_ |&jzlypM֩0׉䙪kN0蹼_=@|! } P0)Xbx`hST4 uO*}ЫsL:%D:svB9wZ0EgFK3rO컧 Xo=:_Կ`qDP;?= CG07Y jen puo.^gD4A~W@ݽcXj'puWaߢWw8f7l?V4>moe}gZַTi_yc[) _Ob1Pe2/O4ʀ*>Dd&>D b@SXkD:+-zo 7Duj{Et<Yi&n GEYaL y+pb讇@ ?;ʕէ޷Ocqwahc |c^hguV[ -8:V743z{@qr8讇@ O;x!dX.`@"h P*t`qU@ yƸ VMP VMP VMP VMPDU֤P @|YF/ `]&ߟؐ .ƢTtx89tWCɱn[62:3ҡgtg$l|Z&nG#9<ݥ >:+ˇ6;PtđKaWU7d#,ŝ3|^讇c.ڭqwXLoLp`;. {[/-Й D:+ˇ;VJe;=*w[n^"@ G')M@ G9#9ɠd̺v o5Y4 ?pQ xMyp@ۻnˣ^TYbe_}q;=!RP7+0!_b;4 uֻ}u]$EQ0\^/ceemB/u>Wuyys9nc(F5-_Cwhw-B6y#ݺFvk739~`=Ѱ 2ă&R@Q{b K+j)yJon9H%iR @[4'U_f:s:3~9ntQј;m>H%iꄬDR*N<7:<r $;@ c~vl)JL^:+ӀS~;$QZ*g* W֐5on9/TzN`2ks/vl߿nݳ)3q놽Zt^Zu4aZmj%|J)BO%m@ i M>uBT;5!}@5v:X<-{/jm M>w@7Q{䢔R4kawGH0X_@k+k5ݰ#Nqd݌*ahVW`VVvwmn Z0aŋ!Ǟy2f˟>P:57  0@!2346P"1$CBpAD`^/,(7Ec|4X 7Ec|4X %$]?Me**89hPy5ɑ"LG0SIbjekzue.VAlц4eb&kc'+m@,DQ"TS%3U6Z棐RQ_\EAjF 2G06]Wii J3&c-Id!ΔK~+,^3Y](RaɊ)>"צ4ic#Oѽ*<. ApK48q\tsSK\:- 8e tIT#l2Ecՠt_ L A7o犯UsKQϤCRVU$nUm gE]"D!oy>h^KڈCq5IH6=[z:]H(y wr>#7KSR;uvcLi14ƘcLi14ƘcL3y7ou mxPpWI8u' w!ΡԶK6N(I8XQqNqdߗPv/xWe>Tj(e7P"Th(.P# +bI2 SVar沅nJ01cHETљd2q;4RS3/cׇ FBiMXKk'aECY3fMj.8eFoπ_ $Y3t2n04Ye|ppf,f,f,f,f,f,f,f,f,f,f,f,f,f,f,f,f,f,f,f,f,f,"; 9V~ɸ$kZk (rQeF˒%.J6\l(rQeF˒%.J6\l(rQeF˒% !'ޥSVdahnz?c \,O ψx 2i@[5t,K2Cpz1Ǜ“.PuXBhi}2㚑/r1# 8kG,p+o&*Y*mH1HoV>E*pSOwΪ !WI'XFjEF9$ZqcQ^)Ѽto"ȧC3%ρc|U%)TL]##Pl4-mWWr*a_ѻ\y)?Ȧ]8:/NQ xJ`'.ÖIHUP(]'s,\01r!C,V]C=Ud Q$E"A Jƨ"3BkUS k+ri8aVvWP'푤be8f\[c#7k7c'\R ~wEyvЗ-7p]lFS*iCEʞҼsTKdQeA<+!Y2-+ JO Q ڥIbKr>ؼWqWLnWLnWLnWLnWLnWLnWLnWLnWLnWLnWLnWLnWLnWLnWLnWLnWLnWLnWLnWLY+BdjZQG (k ɷdۂ2m6VpܛpFM#&n8L*C퓮בÏ͈k; ,À:Unɷdۂ2m6pFM#&N ,.M9S>nzBڬH9({Ǜݯu.Ǜݯu.Ǜݯu.Ǜݯu.Ǜ1KSζ5lk`#[F5lk`#[F5ll3_jzYI͠+@V6mXch +;^~qs}+@V6mXҺA&H@V6mX sJqc;Oؔv%GbQؔv%GbQؔv%GbQؔv%GbQؔv%GbQhtkGib2NȲaQ5PTj#U@F ? W({Q$sKq u #U@F5PTj"h…4`K]<݌fp_J5ޣՉ^G*%&S*TF cawڬ8@3@`,qZ&u6!0=oIVc8j ,4mG fc# lJ-JyiT pi^o6 T_J5ޣՉ^G*#eKyC+.jARyܢ _vJ-N])wl a6ap|~ƜJ :RFt%Ft%Ft%Ft%Ft%Ft%Ft%Ft%Ft%E[ uM#yP=BY+i5 |1l7HB]?n7HqI}"N_Hi:B'Tӫ_3XxY KgGƞeQ/GHR!%SHqY=! u}|! ƣPWC!J{_6ebꝕ =o%j4{ÐNBp|UUJ^&gT+ p&*SUgBBc+~~ϵƩպ##aλ9x$-n,Wl*@IPeAoNZeRc,,7Z }Ώ25"ˇV`1ۙ#c>]eq8Z~v`?S N>}97 / E-u|4r(hS+ôK}0Samc/a/WPP}rK> y*|Ʃ\L`"ziR !1A"QTa02@Rq #BPSrs$3CDUbcƒpt` ?K{ʏɆ*YbZSDW̧ډjO_2j%e>KU|}Q-U)^Џj&YRx{Tr%鵥 80(Ry=Э]6u[7ܲ+Lw+Oroۗ\{^b\R@vWr7wF/ɪ=-be$27oe{ZfOn&؜:m[T{X=-_ãm yuZI[-+jܮanVܷ͎qjZS='jp7,7ܲ+-La]ܛrtkL[q5۹ë]V\ ;#[;X&Bf},JQ\h-[?,VչVۺׯ8FxG7+km\:WGM̲*˸^j [Q9pN{$'0CQdogлh.]B7 0aME\_^e =Xq72Vm:վL]EÚ %LIo18]vee5bmio'-zzfMa"^jYJՇmVρ|b#MTԕI-@lUӰv8;T@^Yna/ |miUiJ|̌>qM).%$Ybyǹ*F$%ner]p[|EmNKh3Lj6y.d)oIl${T}&IS3 Jfq8HҬ朁saSMH7*ڦ,@ZYRNaMRAp-hbڢ^モ5s 4d4RUpsبjKnr >FV_$6Zgql [%\{Td…á f@*$EɦQ/-,aHF!G")?S*Ϡi/jU7 ]Nm&*صr[ =s-%a@xhRҐ3,P Mrq Fޔ6a  u!IXI:e60 Eڭs6ái)ԥXf|pAmJ\>cjLӚ Яvu2H); ^ϊsj5 G4W[- rbvl9ڥ[ziB/ޮpnaƛ4ïUCV*)X\Oǽv^&Bi!^&Ga)̩S*BN4Hy M3+؄:BM3+؁L !g[6̰3}g=gK"eU-NG{:ZRP\lS ]_ L .&^Q'_ \HD,=Me-Tllon653!D*b7;bE!Sw77Nc"^iIZVQN9Zpr0+۶)m&sVk__H5%w=ݑNd{/Xp2e唥Udk:H&n=Me,Ҵ 7bʚ\4*\\P\,}LdK>`u2UJHK*,q%fj/u/iW}T>S8*[pu9?GWaflXHzgIG\U%qWtu^QzgIG\U%qWtu^QzgIG\U%qWtu^QzgIG\U%qWtu^QzgIG\U%qWtu^QzgIG\U%q9+5^lnBVOۢS')ri=Q')ri=Q')ri=Q')ri=Q')ri=Q')ri=Q')ri=Q')ri=Q')ri=Q')ri=Q')ri=Pm'p˜ $ݹgЯՎ.;w;߈|+cEaف4"\l)6sszb9LSg9l=1M)6sszb9LSg9l=1M)6sszbUuI, 29/(K/y_OTU}=QWE^Wy_OTU}=QWE^Wy_OTV$U’8.lmS ObS/y_OTU}=QWEZTԐ3hJ1&Jڽb+ꊼ*򾞨zaU_OF_s6_tl@^'?CI0H  K Y͗Ĥ6%\ S,4&%\ ZtbrT0~|>)=I5ٍ.Ꝭ1K~#|, kO 7\%-1}\(kߘSE+mVBR%(Zu\v?.Dɕ܎i4bz?*諞z?*諞z?*諞z?*諞z?*諞ta-ÝLq]cv(2cpE{{QEQgǵYE"qE x3'P==}j(cڊ,8 P) T KSѣrM`F1EE{{QEQgǵYE{{QEQgǵYE{{PdfqxJ#/s}Mvm%9ܯx9aI[#~`{]V<\~#/_ҿyew;ZW?0s}J?.B> j(%#,,sL2 @ @zew; Ǹ+ xG`3oOȑC-s20C-s20C-s20C-s20C-s =+P MWT6Tyv}_ rajTtwf\<\ \ \ \ \ 6 Shus+")'m<E'= v<2ҽ=9[) }I67J4_isbEe~T/_tx 3M *<;"-4ZYCmꐅ X%Z3|kwOL/+)hb |?9#P=m&%ЙXДwCSKQjk<eRƒxMm3tЌr6EUq}Cnc-X#D⇦Cɮi)Vp*I;7'TZiih6 ]R1o劂qeYM9:RqbC> v<2RM)*Y}KPޑؗiyo%I6* bJN]NKbZ>q]ǀ]=1*3 c`Gp7F]ziiJQe-"L8p'; r]V7!vW.ˎi'<(i H %a:E"\)/H&{rTBҤ(()'h##FSG$!HB2=$7|\2}C޿@Z8cO*LYJ5 ɧq mW|D5 P줻jT$+oh" &-y)6 TS52ێʫ~iDn!4<2Ł2$p2˦P%8.pu睯 3p RM:X2^^5Ʂ+?waʓ3 ,).RJl&.riL1kaRpΦaM:BVQY iE00h ﶂ.rMM=Mq*ݓ(@ZlxZqp$qusNHrkZ \SWު`YVc9"Y٥%7..@b;̊D2)ȤN";̊D2)ȤN";̊D2)ȤN";̊D2)ȤN";̊D2)ȤN"IT)]  v۬Z'jLW1LNLH6-ڸh?clA l!LZHՏy SROڗ 2s[Rơ_aɮMtGrk=Ó]f{0D{هaaپN 6Ź0Glkó}f{0D{ه_aɮMtGrk=Ó]f{0,nF>ґ ^[8GhB/~,Vۤ8[IZv J&(g@ )&.̀̒Yj[j񘒘nz]ʽo{AB69rL8[+}7JY'C^L҄ <|uDyƑpfbmvh2r)M{Z#mɢH4>BǾt4mq?5:f1S)œWXk6C'lm8exmNI cf;b8E>msaFٕx'U-FuN1NXMpbmse@q$M Ĺ`68jߓ)#q$%_ h^̗6(s+[h~TM,+h3*_sAaXsu])EIsaPl|uFҠmdV"-|Zg JeݕN.!L] HCLڒ\x#8[[ R5jzHb䜎qP/QMIBIB|@Ree!R"WU`!m'2KeQb`[v?-zbZLЂqͲJw‘&`b B RjLh?tVX?Byʗ_T{1'T_]E_P?8}#g E7TR$ǍE*@xָS~3?(jYQJ'ĝQЃhſ1bu6]}QXeHrhOiGl":u.EY)}Q1+.fxN&%G"QvK0qk'k6A}V獶L+Rla? +H?R&T_hzPzE%2U5´ Nl8bU/ZN,Uf/h&3'+eߙq"48l"BukhQ)#X9rĬ*ڕmQ+2r۲ͩA25CI@`]ch+9J' JXQ&NJMIޙTN(16gM"AڙǁGragS(+jyuv3FHDU 25G.P(`Xc#Fj>˴wy*wÄX;E8 ?J Hwl[OP|$Dۓ>kJN?Ϣ*,p51]%N'=^ ikhpMPSqΦd b.!"~#]ZMv*pk\UX0` 3]Cǵ}>e`@I>s5/ og?#̧Ÿ]h K'}=Lȷ$qbu02)1 ZQ.Vtfbr{!] :CcT%qQ큚lfP^59?%r_,Y|ai?h\-CթH'쫤tBCITk[S1.mz+;a_# E2țGp:[|/٤Kn`Pi’RΆL7Uzej\Y|ݎ{;h!ZJ6l-2V{#$T3)EhYSj%+eLȺhATQ *ѿO;q|Y-(:*Ɩn0U^*d e t`WE5y@vՍ' Q[Gg_hȵaa6SksgKvIsP>_KF$nOPyNe9SNe9SNe9SNe9SNe9 S51Kjw"D~  .ȉ&2@0(Z3JH#f:Ct< vН5W! *te-GjFK(NBɋ im0"kt؝`\P.f9BrSDzw1YNUL جz80Fqz|Ugf ^,) (FYcSśSK =;ueeeeeeeeefDeJ1|&Iwu( nCVP4%spu}8,_11t /Ƙq>S=-λMr;wErȉ Z)C( `t;w-1-2AWz9. YJE %TQ5w?i~Ӽ+[4n{'|χK,ƓBx6j>8MCH;S(=Ç|)ׄYSnr;wq!@=Y+KT4`ɔYm -N GZ U `!ԕJթuh*֮ |\"3! nZV-bb!Pmհ(hxF*5v-.s|cߗOv . ɽ"ƿ=ÄFf# kC:~+1=9:)>®V@O$e/OR+7-sc7V Z;v?yX_+0:='A:IzNt='A:IzNt='A:HO"j+'f{N;3vgiٞӳ=f{KZAIrMFt0jYJo=f{N;3vgiٞӳ=kd=60!8DmiEqd0*%q.]!UXXq:XA3P/[Oq^ǹY8m1aXhSl~վ/4? j\ޠ35, 13x ,qv䕵w%cH` 4F\A!8"t*VNf#9,@4-D@S|E W+4”^LϾZN˰cQb̀u:΢ܐmc2?% 9de؋MJo"iˉHH qp WX9:Z>^6ztAU6ݾ$3֠+P6TNyYQE&MA"YтglQ}ƥ^*U@o9sNj`@R(Z(Rf BRRiYΠ5a_Aڦ2]U@tb@RBЫvM7 NHz7qe7` 4sZe.TKW/Yj3usVz' 6r V.Z)+gL9>N iw=.{e>e7yK:bLo=Q~v3Sl)_m^{%'ȳ [įo\Ͳ) Oil=iל#~%pU@k:/ SK+]Po9R;u~fp` jvb:×< fĸ~wYRzuzk&a85 6{ ~=%0`]5@юL 5y z#MOʠ hnV/ui"ޓ}j5ͷs _ 1b^AqJ,k\;@o1B1 =UzxX0BdW]+ zqH\hbۢ^QwI,(c?.|N2|?${M+Ϥ_ ],;N'v{|Uk]&6IMmmth@ڻ/+­6IMmmtk]&6ԑr~[n,:ap;g6V K1R JGhw?_Q&?oVk|Pd>Y!Ddm #dh XJ?3rzT;!4r _Ӹ۟k06 foAK(1X9H9lۦf@ni$,SchZJ> ?>Ick CF 5 }qÓҡFw7[s oy[~9(o7raÄ]}C,??K(\o .M|d&\g$xs3ay,gw?ſ;Gs`b5F1#\ksb5F1#\ksb5F1(- PAռUEj.QuZ]VUi;YMj.QuZ]VUEj"ŪjXqad98y 9K{˧_Vkqp{l&LŘ#m>߃#^z# ֖OamI8=-49Z!Wݲ}l;LK/-x,Sdd׃"Ob/D&F&F%|%|'|;{>Nϔn7e[‹-X V?wEZ[_]g,bN`7|yJ}hă h׎?foΊ<2Ǘʤt&F6[YKRj7Ѱ%cWC@bĒPۤ{PjO}7'S␴bS;V0( ľmYތ+alabm,H,51q24#$eJf^k`w+9L;V-qHsy?,FŽ{b~kkr`z5´>qOⳍ;};pu}ӱ(((((>eS*TʦU2LeS=*TʦU2LeS*T|wbǮ;G?gl6w_[==Ҍ\y2=W 蝻?&C  v ?aɉ]gL;[[1lbtMIlHc|:sz(X])ZM(f}<b'c}wr G]9]X"#vїtSSSSSSSSSSSSSSSSSSSP b7*****yo  AACwAAd/߮;G  @wgq6E0ҪD5 MqO-2`@#trZQ [M#!Ueխz,w^Z[:\@OO. 2~\Sˋg-Ŷs 8gѸDq_Z"L3bc 0;q9sNrb]?U(Æ8F _`qpW-1urشo˧?4ϣx!f}vHPNӌw c6\NN{@@\o>>ٷw!T`nܧvehFʀĝb hp68rF$68Έac+Q˿3'?e{rP%'FfJ.zi8&mH;~\~U<}Fv.ߨ̌mUy3ڹW lSk Zk .Zk .Zp1ߘ^L#҅݉5(Gz8]Li Bfk&a. éw4]&$a)@CwHPŞ暄 :S҅(G\/^$X]E(QF5Quk]E(QF5Quk]E(QFbcYGQɺU[UU[UU[UU[UUC74mğtJtJtJtJtl6sW.]+xee{6KřJ7\:F6sC꤈rT^UǙ?1Z$KqnqY! m0\-BpBMCFc"rTZ"9dWlBE>(ʘ؎2u?YJNnare_y\ر)EDŽs>#w?~G~!BR{8s,#6@H'g@җYW?d\ʉLdJ3amRPIohw{ @#W.OOʿ1'?d$b-8m& ^KیM͋Dwܰ7[,#Qt.eӌqN2]8˧t.eӌqN2]8˧t.eӌqL!HWخ-خ-خ-خ-خ!nX>cX;$++b+b+b+b+>tFVZkVZkVZkVZkVZkwโ c~C3R{WO\k%Zk%Z bj? brbrKY-dKY-dI!˸O w>_$3ts]UO~GAyd`a R w~sb<'*C4,X5q4 Y䎠Ayyw>_$׸#(k"QAXaћG VT1E~sy.F-emVemVemVemVemVemVemVemVemVemVVZfo'd xf--------ҏ9il*/NJ`3A#;eZ[eZ[eZ[eZ[e8U~ ‹)?{}36\\4 1^~0F?9nZ~@.HRm_G )y|>b8Kc4,HfjB5ToG͵t-?]a" fӬ$]a"li.M6po[6VUs5"Cij%ZOA11z~[1V͏Ү͵&o7IQv$rwNg=1 7#Go|YØiXQZ׳ 栮+-GeV߃O@C$2Wxb ~[GG6 ,Clc( c ar3sVɈSH7,ccFa-U"|J >9/]IBs8}F/HcvuR' |j}}?JPG/{/ґV~`/idRe+ YVzY)O-O[e[uOB.:s2s.3:j6m]6ia66 p`7I6]IMm+9BGOo{+SPԣAU5.8 !1QA 0@Paq"2BRp#C ?Ș:"tD'DN:'Od蒪pU-KRނeR>@U8SU-RT.[YTU-Nʥ}_*|/},?vyBoucFCH[,b@EDcAd%@0aꉳL?dlSAF}cMyFCc`U#l08 hQ:3챞G>o{g,8'pN 8'eݐq6ɷ &6ɸ&ܦ67&67۔& &S aL)0˜S aL)0˜S aL)6#:@P*T U@P*T U@P*|^]n>]uDvx\2f`m凷S>>]LxaR=2WG>AC.L}TLy& g#f}" ggjoƩ`˟_!&8@J3w0@'h2 0<7 h `)7gf2)cӚї>Cot66EporD7d  iNџe.뗀t6F w4Xޛ>b|n7zCtLAyџdbN*SʩTpU8|>UN*SʩTpU8|>UN*Sʩ:g\ɉ=qLLLLLLLLLLX)EQEQEQEQEzQEQG(((w\w'"(( u0((w\rIb eo fW>́<HiPy6pGt;,F ˧.y&cLӫfّ ўT07۫>.9;.sy,f&{Lram`(q4"<}D8۱Hgn3MN# sf>KN]"t D8Vo݆`鼃N\|]9tn!GRZ"&[8/m{/;-6y M 斈_/O4?_gؗ&˵#ߪ^f󏲒#HLLF~LOj-i$7˟|uLeS*TʦU2LeS*TʦU2LeS*MtSvtTΊS:*gEL詝3tT\m4gEL詝3tTΊS:*gELϺ';:`{xc-sSbM*OuL)J'DP*Tᯆ8M%SPEP|T:OT|my#A'0SI$@GǘWoM;0p[gIT<}~ċL IwmFcN2}Caa>ʜ'JfU0T4jeS*S:*gDâa0tL:&3tTΊTboP*ʓ=docker-1.10.3/docs/userguide/storagedriver/images/zpool_blocks.jpg000066400000000000000000001227271267010174400253330ustar00rootroot00000000000000JFIF//C      C M S:=y,Kɢ} W >KB۞xxAX<=Y}yԖ8:û{"|-_sZ_Gq;C'%9x#j4%+-Y):uۭ}lOFՇ`s|mp֠PlL_hֽy 25o0}76Bi;Ti&|vd~>_ׁg䨳|iG%|Áu[soFhpN9{͊㓎N9aA_*6omFG'2soFh}@֠P$soFh2 av& j Uf;9`;QzcՎZU@j~kءpksoFh8XkPWʨՠ}YsoFhVhT 7܊XkPWʨӀ69ӭztM6!cƀ,5+Tv۬ȑ64ӭX;l5EX9x#j4 B-n l4n?>@֠P@s:,Q@mڍl/cb#1֠5= Tۣ:ԖP~BeGzy/WMɀ,u<Ԓ'|e.VS'pɖ])Ϟu*m5n8O4ڳcUVS<}v|#ZثV;[DiWzȲZ~|h&xRxQ&)9Vy8ڮ@C!ݓ|"<)n3=[=*HƉG1Q,uş?63Dw yZ,hȦ2j-ZiE^@XvװZ!.!YgMA"?4^3a*45u+7g%fEFwp'2`Ыd6'ob$訛j^n{Zَ*%Enm[@.R>#ⱘʗ#Ho I?>S;{9<[/zcKuSVuس&z<'~#fzoEVk9 Z"޳ݓX+@v'3$ͣ&: 9-m-ێo1`0X\6'c^?/ז]OdrYlc5l;Ǔ?|:, e(yЃ,yʵ@פ|ϟ`,ڼXA|ŷTFLoi6th;=[`$r[EXAX+[>]svǵZa)`6X4v~ZX@FGSh; X@h;( 4`@מD>H}AF t0ѬI0$dX@Ad09b͏#6( 4`@JycAhիwa ]LtH{ D>_YZP7|0N렢ӇLGAbsQ)古aVGVTMa0тw]:f:;JycAhիwa -8tmtv/_ySO U$RyoBڽn*__)H~}竭A]\5u=>uM&SǏ8p$L6lrg : lR1bŇ,_ ?Ǔm'sk&\˓` HbgA}M~?Z{LRM.GS0~iP"~g>yxYڎAvS.}dY0~iP~yxYڎAvSޠNmSb/d9u9ЬV@쬖`m>Y 1ɋxcng|…7>)UL&ML1Ǐ8h_^}dɗ.\ȑ"F;  6!0123457@P"B#AC%DR`庉~25F#\dkq25F#\dkq25F#\dkq25F#\dj'.T2rQ VcbK+WW0a]\ºus +WW0a]\ºus +WW0a]\ºus +WW0a]\°f,hDAP1hwS9gW8v{]ov{]ov{]ov{]ov{]ov{]ov{]ov{X7rm-<'˸#Ȍ5Ѯr& Ycdrp蜌o\dhí.Qiʘ!EOz>HYs(x#,=佭7o} G6K+]Wxjsۗ,<(l ]n݋4 Z;ewWx!!]1K=ji{*]O&m8̓5/u[1lU,n=0Ki>:<*/:q @|ңc"MIm}d\`X~uNIqpBES7s `5С,cΚGĴ9JO"&7Ű-`#fR\3hsP,,fal|wɾ=Gy]8&.,75zV|:*(Q҂/ Q ]/_ g%XD#JL**/G7Mk$HQJM ,!rQkR74 EUYDH0TZZӌݠv1#f"+bw8ϰELפ̌L1wu'qҟCDq%-,Rl׭ܤZ3yLfW5dl3c`Deo!2T%\½IN,Rd2r7NPSY[faXRD@<Ȫl-̐{0NdL$n]?PP1R0jؤS4àY򪒓:)ؑő`[95}œibA˧|D}׭쁻&h kiCĈ `VJѪ_ hɔZÅ *Hn% ]sv&z8@c,Hp͛#歛#PfVrS p fӾM">lNYTjgI(c#@DemG6R[tn/6=@Vį1*QzU$U0iجL6d9pGNNT,t|{#ȏ3%1I "fu'kkdd9Z GMqvxYiPvh]˶[odyV'j%G)?9x=)<ͼz5$6:풔jջ1؎m͜ʯj1喕Sf)lGQn6ɯ|ΣߺI᳇Q܏htDK٫O8ݒ5N:M\ۧdO/odyV'[9ۖQAԩ%ch_摨ZO |Ȑt!2Ym|ѵ6?!Yh}c&Ȓ=SךZ59sseҟCǭ?_3UF<Ҡ:,=:~#|D}տ҉wIVG4+9B4Dw7)|cߤq|rND Kt;sZַ5E#bF69$#ҟC#I-:A[wN"fA_H1R>.H:Rf`۳pȱH̹T'Y)|gRȳ4CX;l=ޒ4NBۼui:{l|A#퍺WlUc G❴Q5TS'e-K@B6'$ SPW>Y&$L>}#{hA}60C',tI"ll>}#+OgOh}ADdb%$҈g|.ɫF3Z0L/SXWXa]bºŅu pj6Ṳ@f ,+XStRU$YioXa]bºŅH 1 Ȓk8'^æ+.߬XWXa]bºŅu ,+UE+ qxaxY<0QrQ^M~yg#٤aGfb7aŵОʼn;D,P8s p/A" ؍0A@!` 2ζ|pÁ΅BX@eƸU+5ދOrؚ}[N.YT-s8LVP 9HLye(VAd8&ʈV7w 2ڒ!99E u"XbG+a< 9;; i29+v 0O~\L.ih~h쭡;oNvݓG7Wm_6/VR_Oe ٳ^Rǂ lvINA PN13zq > cM`M3<]!ӘUEyfxUʽ_j{(N!i$0qY*XMciXΗl쌽;]Jd -f}8܃;c :BQ,Wmkcn?̴'o2נwQ<" W (,J 6Oа@]8Jˡ˰5TKc!la4:g;3٠d!%13ʟ14c|--dk{뀷;TZcBEsO𒁙g :g?]:kD*7?)$#qm#ۊl+dя.x`MCTxr)ݬޕ;aJ?\< Hݚ})d°}߯nksi4fv&pk!Pc]rş5/p0( W *mju;+_ ?=Y7^›6T%(ØblWͲfHlt>UA7n侜; !FX%-C+$k^ǞI2O𐻘LM8JˡN7d'lΓ^ޔbȬOu;ֱ9275,]ĶޮzXפDMVrD;1U=>kzLkޡ`e%c#6nٳ^Yk=o&IReMڶf8A yi1р%)%7{oMNX &o0},rF؉OHao93On6@Ie]3ReRkHW{@ڠ,tB`+7}%6$5#m}#IFzW,C} T>H6q=Ork>i=~ҔJG_g;ۥLi::|B7s ߆_yVN6q^y4sV8md}\DZ(=x"٬yVߗZ֬?UEBOgyq&}A;Ù9d䗲L|r) ۔1.rIWl3+,Y&:^H"lLF[_ZWQ2kk?UEBOM)ed5 4ac<;țS>[v֚G'?&uW֕8"2 ѿ\To˭kVy%{|ߡ'`I_S oi{DiP-:$ Tv+eu1&NTvZ[n;x|rD;}Q;VQ_I,Dq;^͚X}[H>#rVުjE/u栒ދG{غ}n\N} ͳP%DcG`(k 05^{n-Ax(+{dq,[kٻL?iߜE2aXeAV8B_Ґ$;?k}/OGV*hf?B.sЦVz`Y큇-s:i(t(u9z]r6 `ר']XNbN1&N0f_N0i_,u? Gs4Γѓ??Gz<)/G68y %GGT>{_DXoX  !1A"Qq#02BRTUar$34@PsCVb%&5DS`c ?渞Xu3wucʼn1ĘbLx&H)kuKb6}OQae[4މyx!\ډ ]#rSIBӦUmqGTD۩CBul}Vu6{WZÓsZZSJ̿.ܘw=yiX#I|E7 Y@qn6|(uOJn;unikMtG:%~i˒XUM:beV4bT[S=@8i2Q:Vɲ֨-eqzM1C/-Yuu;#U!e~`Do2KR5JRyBV-fVhuk^YǙ*ۚbeF i*:!Umڬ0P֗mLJ^МwBů. ^aʪDB֪ йq6I3{6gu,|^hdf*L jB.)SRD8 j}HoST eFc}K12E8N#a,P]B.B*TQSNuKHexLnh&I7zc O)(]YAjyY9 NBlCMTs$Ȧ:OC} e>}')[O sj] Rwn5ˠ]EX2U^*#leQlqisAqbދA% q&qMh<wp$>. .mrH[:wb{o2ہ6_^/u&iFrze/LEh2mpYTph2m_MRk]]/\􄄄$$'5!!!:gb{/֕D"\x.^ص;g_vsXR;ϼya^ۮ{sMsEBJVg ma]qa˒)dZs?~c}Y[eNXrZ4"Gv1\1^&%Œ˕Aw.l>Sc 4ߦm]. V^ŏ] 8P17&fUMNvE.N0qqaNfNŝ XY" R&cI;6P29eeZїuT4( BLMKVHNY>fH3UZM˩`&d?$W&{X;REwIi(?Z2n`fY0 qjʘ*BJ$LeFD]ԗX!nrYUMH~9TAM*>OkU{˶7Uʸ\`_vͧ=䴢b91;:>O9VzOBMВVkƵ^.,²mA!:G<ۘEM(X?wMb-,?;"[&/4|͜;6omk?r']R4Tc<.bd\bEvC12*Hʶ i.24pfZ0ʾ DZ/Cbm)]]6%vE 2t6YfZmЮ0&Q{hެIChdYb-~EOw 쌭bh\BySfeXQvݱ$Ndk H;ʛ3$A:-f.EwM>3 :L}2]hkL'mi4 lLTVQbFxL іfuC]Qm /%i֪ՄuVMT;1:YFI$ZZGeZ%1BI:kIkn_KxuM* ZJs Ӕ,,Keѻ$M-6eFfXLKdؕn,Y 75_6Re?w5ysS.'Mu463sӴN6conx1DZvb-]"i[gvdo~g6/'Deċ;gb}ɔ:LN1&trNiEJŰtѭf,4(Q5KM.m7V֊qo+Ji-sZRQ;ba*BNc`ʻGRq=cYS[F* Wb}*G'h1[bArf;ߋ?+*^TW{y,QN}ʎ-5u:#/$žWQ8t|6=~1wBM\2@R׿ %( aS׿ӥGf-qPׯW<"[L' EOԢܯ,\02/2"a|I LY3TȽ]>0&(W7E<"4Ԧb6M6fťwQ&HCvYٍ8,i__,XR"S\wT_8hKα[^5ilt ܉,u&]bꏊs􋚯EAΡ6D\uUy+w:9(DX>r\P_LV%&C(}s*x S}5i7s [Ue}'38CI>/ݖ^tq<.T.T.T. :ӍltLk̻>[*"ɳʖcs+*[6]%Dw;L}d70-Y ɫtrrrrJ9ws ^Wlp싸IRbD]iNSm_!/$XuvlHɱ^Y2P2gf3gi;L}`3~;2EMJ+HTNݕ,>M1פLSZy!ŕazƋt+ u/{̛.s'A&^&j?Gٷ{}ik uEv#}yJ4j򰱿{ɬ)^akXr.z8'*dGֿ=vm;^s]W3>0{^fve/Iz;څ8f-z% `JzŠ تR-M\ui3}*` λ9^^ ಕ2&EUM6L յǽE.:drk{[(qqHZ"$N͢:(%]HXoQz'퉖NîKI^e ӱ1ioy/-s["?304O1:C窦^$[QS kLf͋]&fa0M1{+$*9zض[~^'[Y\$ke0i|+[l)Eט{bلH%|1w.:69OlJ&mb/ Eo B,XkBǔbJiʯ*~.m| a"֘7X^)UX^953+ne= kb*"L;ִ.}ċdf+kbe= 3nw|byԛ*E֘H. `'2 ' Z2j^mV/RdZDq$?ebn)ٖOt9D,!1AQa q0@P`?!.C1c1c1c1eo9O'@"%~kZֵkZֵkZֵkT,Yz~Dy `IB h9Ǐ% y[H76QIOdB XAa£YX;Ìh6oBU 뉛@Xe,w+ƽ MB(DJ0*pQ~Vp212XW_6<|T@Z&'H6٫ jdC)4 `L"qדjPx2g QFl"t*B`>"YnD[7AJD`1 0B[A}Uz~M)SKBB<^ShB`T0Gse(D16fa*B͘q2Fә|=!eGׁ+ɀ@Zh!{ZPw$H1U퀰-LG x'Ƀ|d` n|~G 2 jNq#|)'CYM h#Z8IDm?`xgĐЖnBc jWZ3)AD^`ıu:3d‘Ȉpȳ"&|Nwq7$JUL$Ԓ@EmO{+2-`h πn"B ]~6@~Lk X@ AmDmNeP,"+6:+K\ _nԁ` &^P^4qZufx{+2-4ĉI8[P,5\>Dfֵ %AR^C<8 G#ޢY|,Wd[FpuBĚ qb@5_nE%^i W-X:G,%yUF$PDuoqŴ#sR╺XEr4K=E` `b^?Y<` Pqu8 1W8M$V~]F'ߊNQ~N~N~N~N3 THssssp0e~FEm"l6@׈ D^"mMM~J~J~JAB>cSSSS`#))(7$d"Yȷ 4"a[nH~ CHn6[n3IiܠEÐ nݻrhyROfcx3\7-:rƩ-Љ5xehg0h7 @-Ol{R%p.,ԍhLzke=Dψ3A@*\H'@ʑ TcQV $oS%ޙv2=* 1^O@R7QmV5 8S(.e~FEmjtj ksղ(BDYUk xL^P@e.cbPZ / 2&*]oAC*Z`\Ԙ jo X`,@t 5ձo2_nwi{n ?*  ,*S_" Z4:Աuw>2!  HT4FX9A`˔tFifet-A\O#$S  ,yQb0VubC`ed&ƊT@3,ȱ僰 ^0g|5RU0&c@rK *@*anDMJR+RxN9t+”S0娇XC؋}' #CDw?!JRE_(2;ߴ~>h ė @15^)-{;ߴ~ӽAډ@h[I6g{wi EHiŸpsN;ߴ~ӽN;ߴ~ӽN;ߴ~Lkn yz+ ϕ]JnDlA󌶃hN̄E $SbetcD\lPQex,2pX6.+_P*Nqe?!'*5^lIJ.&'HOg5hyAj9l!)lF.sn`Icx:S/PBQU\"H6hNTzEr"Up![ðp*#}Ae,-Dьqi!|03]ڵ4s vfXDn9Vd+0=}˖0}lErLOŒ̼L92 ױ;Bp^SlT**bՠ ,{!mF QX°'4H7>wlNzqyuکXFRkSbt] G) ( 6 `:~: 8-,T@pZܮZ6Z8ʼAE6jj|Y[mcs27oWҔ~QiF1W kWkd`w#o1t# 5< 2@{h06<+{qEs)ʈ`lAث.e ($q:m*y;Z%M5;&HR* /ö&{%nPq, PC).MXsF` ~9CĶϔl%fǰUrlleƣ$\ ( v9^QuC=Q쑊M` dmcs q ĿTȷ|]){~o F^t T]Xah+U)xhځd:S`+%h`p!CdT2xn𖎮ceq7ʾgB-{G|7M@!,g`y; i"77x`{x&f*)^YYJ ]; EɅ><ϥ8vQh]Lq< pZE-t˅KJcdxNw փ Ŷ䁝 ˗t-(]`4Y.,F~Z@ QN(|Σ ހp29ޟ3Ȩ X#kԣ2"x;ɰ:Ϡ=G#8"1%.*O^P3Β9(S*mc^]-  aKC`r'!xyt)hF1|IW rJ3s yaA! NNަ12e%g :5[vP E#m&+7>byPxcb V c ơى n:X`q/ |>qO [ tܸ3 }_`|{E8ه.dB#MyBG.z\Р~֑qϔ8xؔǹ\F^ʳ)OϹOPBő2C`"zBE"LO掦2_Ϲϸ@{?Kz$jLVа7a*LG8bnf^h3HFU q ~S/A3dR&^1#߇bG FF&Rmqou% B* 2 f Ӷg/Qa@(j#,{6 T1?K~̿Kka3į߮Dh ׆cËˣ+Iv8~: _w"c8㭼C` ET88d8-[A>+[ V,?PKF``Ҡ`Ow<;c ^qB}>pz@忌H#*2~0)v z%5UCb2ïbs'bp6>L ?NEלC9<)9$mJ1`gRpPMc2W`A4Qϖk^Y*+c!L D$OL!y2]"$Ru=Ls.b֘tEͳGT}n]~LdΈ?s/a_ݳ-wo;ʛ,-g;XES (>\3;/]p?n;Bt'X!E7L;N30Q,!1AQa q0@P`?{6xYg+v< SH}`#$o:f=CXI&dbPC3 zwreKs)wкGI&yx C5dkfY6bi6:` a*iuDӏK:.1j6VP05(cbTe |il\{ЕG(z.3n6TMÔx&rr.UڣÜuʐ LV3#n쭱5˩KqJNvesqXau&e-Y.9Mʀ`BU>ht@F4%M?&l:b 5YʐbSDrUi.Pi1 p$֙:37SEQ*jn N_P&]e f Cn ][ZC0&& 0)Z OKsFP8nQ'>n$@K"vpT  'צ|E@bTnhtPC`Gctg|XߐB/_Z^њcQ~p~{`Հk^>mHH daD ^sPī.\4y!s`nTSCu25tp(0#pRo+mf8NT+;Ӓø ɤϋILbq /#:,#,6.(MLi\K7#*_Ux+v/ÃXкlNs)9Uw3uprgXHjbpܡ؃IHHWk|0ζѫnf j/.XHeV<$ZF.Zjn0!ml 7h`Q7Qw˵i,pkj!#D+xiV`m8(c+DL <s1$Tj*p)""+|a؂e/17| (d(dG2Dqf;a*`txl*P@:O'E:+9 !=!#[0A B@E)wD/jTР!|f8ͣCG= 0"?h~C'`2~_w˵i,p 0%!l!@zYÄǞah+Eh4"Q9Ar ;p)=`! @@,TBR/U6ռjq ;*BH U38hFBG1:'~mH%B lB$.}۽]Ic/R[ u[GEVTK@X%dBLdDL4,ؔA!*hz)Hv#7e|Ƅ&. | g? I=*éu5O <2xdᐁ#^/n/Ӈe?;]9'\!'CIZTS;0bq<2xd'O hL &(` `g""_3Y;*|6$=$_qw^ h*XyB1ah:G!,@b- g& ZWFi9@0BD,ԃ/ArZ$@H :isp8GXn8ѣ=R!~edp}UMEc}P CDeVI\HLW2@, k M| b [JA4~ j*5enh ]iBF֧'Ȅ_:i x_$(jAP,V Q4IB#q탰`5P0!iVPaT8(Mt#H,ـGQhIrLRxq+ʋ]RgJNHOe>(r oL)E G'va=yFj.8 K !aaQAd#1puEp=}uָ6Fgר=T\I6A` 9" : [ [VeS@g54$!"4#䈭G/d:y *roN2d2 b(A׃^j@59 mԁJ>N$ ## g}9Q4 n1$I'ShMK@il(!!A~ҢU(p׺b@ԟVH.ADb0EA?uzp3RAӁi@9*1 m_ ڎNpA\#WdtjX>kP@ ⬦P(HUZ.DT9k\T/sÐXy P0א"=TrpgND^6!`e{mqI@yW[zpZc؀l!iMd'hdC J@N p|ʤ`[n6df˅>؈O8l"'&T*x wxPu5h#(b".]f*d `pVCb[vZ̔pŻh Ȋ0&   L6a0 KƇSȋ20OlZ GđJx.m98X>oͨ]qU0"w 4o.H%aV`ј_H5 [LOŠ.1m"[B*= "^l@`2 /P3QHu1vorUG}4ߤ~'#.ti vǼu# ~9u+k6}fm.V!]TȆB;O*e dYOe+!Du s荥8o+h#,؍!h$djEBV_$ Yuo#@QNlTVDHRpya=G%W㾧 'cY7؁S0pJU(ʢ*@un!"}'3hl'9B*9Ry wW*l(ns&E9֥ߘ0Dl?ά_^3x$?3ʧf5Gź =M p r=_\`7QH b\!ʿތQ9r ;5&a'%úO8G6?#UC1<py# ä U*pH(ȅ6`1ȬeJ 3 PD9;tr>τ1oCc<0X߹O/~ m;V{?3 15!0246@"3AP#$`QՍՍՍՍՍՍՍՍՍՍՍՍՍՍՍՍՍՍՍՍՍՍՍՍՍՍՍՍՍՍ/N,UXH%* apc q818pc q818pc q818pc q818 U;֔s#Itx7ҶJdZWmӬe+Dj m²ZJaˍk.Ј%HYm-R's)U8 -b ~Dd.L[ZhIm3 Bl2 z 2#ҵf6R%#9ޏC!Dn |q~dz8:BZE0ۯQK%8'KJ.,2bVWjK+{vo3Ȍ]+CR.+$RȝWKb̳+cǏc˛")RXɹ9ve"%Me d-it딇gHUd<î^*RS>uk:[wf<˘ڸO2^9w]gxI4짫ˏNf(nqb;Dhm2Dl"͟LJjmސ2$!g6%bHi;fL1mt8e̺Y5,f"BSEma5 KȘe)R9Y4"K%zUKZ'%*x2F/4Y]Ϻm-¾SOE~\/yБO\$hʗ(7U9'wj:~K\iW\# Dq% ǭT]Qnkލ + n J(u_͐JtKK"MtY,YIe|lZDUHzSª{݈Z9E7W+Bj\Ȼ;}72Ֆf DGRDZoRǗBcj6\#ĜtϪ[2O险VTrJ7Jn{2zjT74CmﵘW=iͭbTa#ίWdGPN_ZA+bH*a #zʱ˾m|koNj9CJ[AxʞDDmRT1ᲖȻ;};L?_R/_Sb3kWBgm?Qǯ޻Dh˾m|Ȼ;<ыyn9Ʈ{b$x8 G g#/˾m|D!Krywˌ9op7:cՙ Mvҵ\5a6{"io@n"R^j>q' YJCQI%Iuӷ4Wُ\_w.1bް͠)˦IӒ"mi̜VJjCaڨ򥸷 ݈͡aU -'Ȼ;y$C oRjYIu~ 3\TǙf6_kzƦ#Ƈ9#ЎoB8R_Yy#ЎoBq-%h6NzG7ބ2y.zQ=|}G]ch漏Y M'_b\gȻ;}yT$F a-0c"/+~THMal9Ø[ ]2S;"!M a-0b%j_˾m|ב#A{\av]8yw]gx014?3N%gcoN]ch漏Y M'_b\d?*<#l-ATN Z.^abi]lfR&K2ޜ?S$dח}1~#F +I1غq.^abi]lfR&K2ޜ?S̳2:YC QE`k^]chǞDk껧yƼ/07wH"!la*j>_diIj*"GATc6I[ik&:H<6ǗAGU}#M * K6(d[y1wX6Bǖ1}VZۮŋš}Yǖ1D:C;JO jCeDĥ~epftm8?t&fy6)lAȃVjI+#S)[g(~Lv.[CĮ+HV5* @U=:N4u48~a'=nx@V%|6_$b(#kQa#4AKLcoHcwvB$rm%#Ğ\b5GȠZ)J^E $*!IBdoTɪqO@n }ÃK:}| `e@CWUVԻ]x>^#NO=?1[9G|U±gd6bgsg>6quq)mt,}n%Y1z4:(C2iǿXF5 DZC}cwço6r<~Z;% .-͆lfͭƇ#Ff*!ִ߬ClTvn+q;Mu6Ն|MqbʲM[AIS ؟.ǼKbiPcZӏ~Ԕ8ƫ,ԉiӈǎ;Xz#}A_3A]{Zq!nI<'T4.P2TY%7_'_;i%IO"[FgUwr KodCdCdCB1O ʫl.[<""""l۟К59}ς֨W,CDIu+m!*暸~n`fFZ,ipq7b+x$ԑE3KmKKXkTk4%( YS6s۳O&dJ]_+Ñۿ~iKl+hLafyo-sE:ДںunҥGlZ?)Sj\J"Nר yԷoShᤌ_~*:Ʃ>s,oR9+M>[qtL&ȲruN_l̥99#¨,P,0Y.:Y.0Z8;o? AT5)mڇeEPNe6$5vqQX&v=U22aOk]TV׊mYͩ,^jgLvEsgDk-gk<>Dh#CȫCNS"6[8 Ǵ0S_+`vT 6;}B^᝝ڠ:LKP\_ ʂYcU."0L$QE.SJQmh)Ի4fƻ"F .mB?sO֊)\QZZhZ#٥7)'$`hΙRwkR;YUlR8}ZQeݴDYQ_9N?(-?(?.hN]0r*O(CUP:9Y(~n m  7h,-oٮ0U)r"T qb-~ U s!5 u 3Rn +Mf|RۨRnp oPܩ# 2F#\AS0 jKxkOT),w>;U-˸eB e;| "~"]sOZsOjRh}~J>OsOVP, @IkPz)mqS6d Zjs7T-F_ }QQ=>ߺ?kZMiۈtzX:ق7~@}ko|eMZ )GZSv1~n~$R:=ּӫuRXREP5ޥvYQJkm6lRp 3&Yi=Zwi~`Mo6(TpҦMo6)F mTxSmQt|{-[-o-TӸ4%M;SNSjTRNJiJwĩq*!&@Pz4%M;SNTӸ,ϯwt|{-[-onV/0n\t^QwEwU'[ :E>/.GEtZwZԭM7^aVAF44Rʗz|T1  EԷ1򥹏?7Ks)p h7)ncKs)1G o)GP0*Si B< GB<#S/~eÊuTЦ#SizMOOOOOQvVǺOOOOOOOOQ{QÚ7ۈ5^~JeMKtC_m((((((((sL0TSL0TSL0TSL0TSL0TSL0TSL0TSL0TO P.x)bO 'vjaLSX(-S8nd2~AS*eFzDС)u,TPO.S(0U.„EC+n߱3MFT'ۄ?z,i2[:'6~MS P y aF uC²JU&x/3ukwދ:{(QQC#U#5~7CPqP{ԮӓisQ#z/ n|XB˓oguu7t>4b/~%>2Mԓ)'pRNझqi:)'pRNझI;!.F^I;w$ I(=ό}z߉E5 wTe ^1 FR֚IU,k!Th8Bwaƙ@/u+P\;1(GJhj|SE @L'x)bYA,mZwdrXe\ۂfEk^47 =T>-((L*.TJ)_S%GN RY@*UOQ™ d)p,Tp(G `)B aFOUPʀTP!FS%GFJ"2<ԫqjV[[R{?;n6wTĪaN6uHCJ9qdJ63 = ȸmN\gNy aȩyRQ}ʼn/92R֯IZ̶:JV۬8iWQ͝ cҗ2W%)?k` +]''p$䵑fc3"q)U)Pf}Bĺe%S("PuUkqNWE{bC'MSbr;!Yd a>]qKlWU+~ 's=D%bQYr%Kh8,%ĥK%cx^_Ͷʩ+L2\Sْ_ZW9.4W[V7"#UVYx3Kٿ*ڦT!Ūl-&)z]@>*Kr[Q _`Lgqm\݇2?uCȇ/%گ-Uv9UWc]%mҦ`Moo]Uv9UWaݎꐶ!Ϝ6UWc]UvfDGMzT]@>Oo#fKw]D$b[n{9;r/;}wcJbOՏV< X5bc8c F"OՏV< X5c%fD⣑)&. ]؟#[rݞrhOo#fKw]D$b[n{9;r/;}wb~V܏'emʻC.m? eע>s&r$IS~ʤүMiYm}h>"Lu|xJZAgU[V;ϱWD:Lϱf3.$8zƶ3{cßcÌR#OqeQI;ZIDiQUZ'*IqT4H#D"!F$hF!LƉ$hF$hF/Kd_VW+rq+M+rq+rq+eAWN9]8tӎWN9]8tӎWN9]8tӂ#mf'ȈZGU]$+!H<4g/w"|9>A#q+L]>ЅB1e-tnmZ`3Xg-8Q$WW%e[ԙG$ːz1vHoB{m;GvK/O."孫Bz-U|QXe_}iGxBCi G;Y-jƼ+>r[{rbp df.(ӽdwmw~D{5 0FPW:isZѵY*~ޅQ]".Κ5T/ccccmo)Igq.|s|s|s|]:ī+obD -⻺DX\GtLJ̤8/ )Zy"27Z&'L/kJO9s*O_ك- EODf;.Rr ]ňܖW+H? 4gcgb(p7+h_bu\/VjmYR:HԪX+XR@dVo4qTN3:-kd g$X{E Zέ2OoreL(i?_UCsi\ 5tV'T}>. Iޤ9~@/^:YlCZ_fA.lHx,e0Tf P_(;~XbFڟT3q1q:bQжQHD®oZa?: !1QAa"0@Rq2BPb#`r ?QEQEQEQEQEQEQEQGNSD{\A?OA?OA?OA?OA?OA?OA?OA?M<.A(4.%/P*hBogzcx]4PUgG>^mIM!utۚwdbZh>T\rSV͈3fBjF?A;%Bs u Bs ջV;58襞r<)cP?{. ՗lGU)D8͈;Le1ϳtښQmlPmhQ^(]b̷F:KӦvwjy5أ4Tl*0cNUڈv(6mN ks%<9ȅQIQh S[pWjiU5FuvJass⢀ww~n;2hqEBpsvlm*j@Շʱ›yqʹ_Z~?ƩUʙ.kWf~k}N͈vD;L ;N?>H-tkբmoQڍƊ+Dr?{.q fa6}pdhzTp&-lfJ&")rj9D_50 56oSb;q5Nĸ ܨ8a$KzٳE;:U?ww~{ϸ} 7 ?CO=ޞdJԛk+O{k2ACQ٭?~O>n}8QL~OZÚ 媗\*Miݖ #ҠݧӾ( +(coN0Q4eS_Dx Sn*c*۷և;5U-f\9&bOU+`3ZJ,֘o6:}S6NU9tܵMgֺhpqpE`gjd:Ӏ8 Ws_*x)SJ TCO*x)SJ !A.<O*x&P-?M1QwwO@3 Yo)f (`3Rx,K7Pvzʋ'?P)f Yo_w~onU>uZx-/.:-/^S]M1QwwEEPiQwAh.}oCs_cyV KAwWw'P N7{c~5+gST3#'C]#9HL? ަy3'vT#VӻbQ=ETQ:qO8u{>:(QESwMST[κl=Bbbbb.MzߦC☘iܶ(^bs?R@3'bu\.S=X^        w)p— \)p.RK.— \)p€RK.RK.RK.M!QB8TL)۶ҀlT0)|۴!RP6R ~SJP. sPJP)EQQŠ8EuT"OQr'}*_ƻrM'OPx~Ӳ}u kG|ZIݡ'׸)"TQAPHQ`}e 1N^Q懧Kgu^c]x'(FPPQD{SWyGKM.yAw_aB]WGKM.yAw_a6xG4=>];Ɯ &SMѷro:*uTr;~@5S[Noj U0mm uTxSPOeJê},0:wpA]T"hz)HyQƚc\xW*ZkSÚApACPTJ'U@(BB* T42&ָjpL 4ohel4 rj('}jj_5 FeTMj8)paG TLP.TR@*PJh%@*RK.T1Q<(L)T.3Adocker-1.10.3/docs/userguide/storagedriver/imagesandcontainers.md000066400000000000000000000570601267010174400252210ustar00rootroot00000000000000 # Understand images, containers, and storage drivers To use storage drivers effectively, you must understand how Docker builds and stores images. Then, you need an understanding of how these images are used by containers. Finally, you'll need a short introduction to the technologies that enable both images and container operations. ## Images and layers Each Docker image references a list of read-only layers that represent filesystem differences. Layers are stacked on top of each other to form a base for a container's root filesystem. The diagram below shows the Ubuntu 15.04 image comprising 4 stacked image layers. ![](images/image-layers.jpg) The Docker storage driver is responsible for stacking these layers and providing a single unified view. When you create a new container, you add a new, thin, writable layer on top of the underlying stack. This layer is often called the "container layer". All changes made to the running container - such as writing new files, modifying existing files, and deleting files - are written to this thin writable container layer. The diagram below shows a container based on the Ubuntu 15.04 image. ![](images/container-layers.jpg) ### Content addressable storage Docker 1.10 introduced a new content addressable storage model. This is a completely new way to address image and layer data on disk. Previously, image and layer data was referenced and stored using a a randomly generated UUID. In the new model this is replaced by a secure *content hash*. The new model improves security, provides a built-in way to avoid ID collisions, and guarantees data integrity after pull, push, load, and save operations. It also enables better sharing of layers by allowing many images to freely share their layers even if they didn’t come from the same build. The diagram below shows an updated version of the previous diagram, highlighting the changes implemented by Docker 1.10. ![](images/container-layers-cas.jpg) As can be seen, all image layer IDs are cryptographic hashes, whereas the container ID is still a randomly generated UUID. There are several things to note regarding the new model. These include: 1. Migration of existing images 2. Image and layer filesystem structures Existing images, those created and pulled by earlier versions of Docker, need to be migrated before they can be used with the new model. This migration involves calculating new secure checksums and is performed automatically the first time you start an updated Docker daemon. After the migration is complete, all images and tags will have brand new secure IDs. Although the migration is automatic and transparent, it is computationally intensive. This means it and can take time if you have lots of image data. During this time your Docker daemon will not respond to other requests. A migration tool exists that allows you to migrate existing images to the new format before upgrading your Docker daemon. This means that upgraded Docker daemons do not need to perform the migration in-band, and therefore avoids any associated downtime. It also provides a way to manually migrate existing images so that they can be distributed to other Docker daemons in your environment that are already running the latest versions of Docker. The migration tool is provided by Docker, Inc., and runs as a container. You can download it from [https://github.com/docker/v1.10-migrator/releases](https://github.com/docker/v1.10-migrator/releases). While running the "migrator" image you need to expose your Docker host's data directory to the container. If you are using the default Docker data path, the command to run the container will look like this $ sudo docker run --rm -v /var/lib/docker:/var/lib/docker docker/v1.10-migrator If you use the `devicemapper` storage driver, you will need to include the `--privileged` option so that the container has access to your storage devices. #### Migration example The following example shows the migration tool in use on a Docker host running version 1.9.1 of the Docker daemon and the AUFS storage driver. The Docker host is running on a **t2.micro** AWS EC2 instance with 1 vCPU, 1GB RAM, and a single 8GB general purpose SSD EBS volume. The Docker data directory (`/var/lib/docker`) was consuming 2GB of space. $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE jenkins latest 285c9f0f9d3d 17 hours ago 708.5 MB mysql latest d39c3fa09ced 8 days ago 360.3 MB mongo latest a74137af4532 13 days ago 317.4 MB postgres latest 9aae83d4127f 13 days ago 270.7 MB redis latest 8bccd73928d9 2 weeks ago 151.3 MB centos latest c8a648134623 4 weeks ago 196.6 MB ubuntu 15.04 c8be1ac8145a 7 weeks ago 131.3 MB $ du -hs /var/lib/docker 2.0G /var/lib/docker $ time docker run --rm -v /var/lib/docker:/var/lib/docker docker/v1.10-migrator Unable to find image 'docker/v1.10-migrator:latest' locally latest: Pulling from docker/v1.10-migrator ed1f33c5883d: Pull complete b3ca410aa2c1: Pull complete 2b9c6ed9099e: Pull complete dce7e318b173: Pull complete Digest: sha256:bd2b245d5d22dd94ec4a8417a9b81bb5e90b171031c6e216484db3fe300c2097 Status: Downloaded newer image for docker/v1.10-migrator:latest time="2016-01-27T12:31:06Z" level=debug msg="Assembling tar data for 01e70da302a553ba13485ad020a0d77dbb47575a31c4f48221137bb08f45878d from /var/lib/docker/aufs/diff/01e70da302a553ba13485ad020a0d77dbb47575a31c4f48221137bb08f45878d" time="2016-01-27T12:31:06Z" level=debug msg="Assembling tar data for 07ac220aeeef9febf1ac16a9d1a4eff7ef3c8cbf5ed0be6b6f4c35952ed7920d from /var/lib/docker/aufs/diff/07ac220aeeef9febf1ac16a9d1a4eff7ef3c8cbf5ed0be6b6f4c35952ed7920d" time="2016-01-27T12:32:00Z" level=debug msg="layer dbacfa057b30b1feaf15937c28bd8ca0d6c634fc311ccc35bd8d56d017595d5b took 10.80 seconds" real 0m59.583s user 0m0.046s sys 0m0.008s The Unix `time` command prepends the `docker run` command to produce timings for the operation. As can be seen, the overall time taken to migrate 7 images comprising 2GB of disk space took approximately 1 minute. However, this included the time taken to pull the `docker/v1.10-migrator` image (approximately 3.5 seconds). The same operation on an m4.10xlarge EC2 instance with 40 vCPUs, 160GB RAM and an 8GB provisioned IOPS EBS volume resulted in the following improved timings: real 0m9.871s user 0m0.094s sys 0m0.021s This shows that the migration operation is affected by the hardware spec of the machine performing the migration. ## Container and layers The major difference between a container and an image is the top writable layer. All writes to the container that add new or modify existing data are stored in this writable layer. When the container is deleted the writable layer is also deleted. The underlying image remains unchanged. Because each container has its own thin writable container layer, and all changes are stored this container layer, this means that multiple containers can share access to the same underlying image and yet have their own data state. The diagram below shows multiple containers sharing the same Ubuntu 15.04 image. ![](images/sharing-layers.jpg) The Docker storage driver is responsible for enabling and managing both the image layers and the writable container layer. How a storage driver accomplishes these can vary between drivers. Two key technologies behind Docker image and container management are stackable image layers and copy-on-write (CoW). ## The copy-on-write strategy Sharing is a good way to optimize resources. People do this instinctively in daily life. For example, twins Jane and Joseph taking an Algebra class at different times from different teachers can share the same exercise book by passing it between each other. Now, suppose Jane gets an assignment to complete the homework on page 11 in the book. At that point, Jane copies page 11, completes the homework, and hands in her copy. The original exercise book is unchanged and only Jane has a copy of the changed page 11. Copy-on-write is a similar strategy of sharing and copying. In this strategy, system processes that need the same data share the same instance of that data rather than having their own copy. At some point, if one process needs to modify or write to the data, only then does the operating system make a copy of the data for that process to use. Only the process that needs to write has access to the data copy. All the other processes continue to use the original data. Docker uses a copy-on-write technology with both images and containers. This CoW strategy optimizes both image disk space usage and the performance of container start times. The next sections look at how copy-on-write is leveraged with images and containers through sharing and copying. ### Sharing promotes smaller images This section looks at image layers and copy-on-write technology. All image and container layers exist inside the Docker host's *local storage area* and are managed by the storage driver. On Linux-based Docker hosts this is usually located under `/var/lib/docker/`. The Docker client reports on image layers when instructed to pull and push images with `docker pull` and `docker push`. The command below pulls the `ubuntu:15.04` Docker image from Docker Hub. $ docker pull ubuntu:15.04 15.04: Pulling from library/ubuntu 1ba8ac955b97: Pull complete f157c4e5ede7: Pull complete 0b7e98f84c4c: Pull complete a3ed95caeb02: Pull complete Digest: sha256:5e279a9df07990286cce22e1b0f5b0490629ca6d187698746ae5e28e604a640e Status: Downloaded newer image for ubuntu:15.04 From the output, you'll see that the command actually pulls 4 image layers. Each of the above lines lists an image layer and its UUID or cryptographic hash. The combination of these four layers makes up the `ubuntu:15.04` Docker image. Each of these layers is stored in its own directory inside the Docker host's local storage are. Versions of Docker prior to 1.10 stored each layer in a directory with the same name as the image layer ID. However, this is not the case for images pulled with Docker version 1.10 and later. For example, the command below shows an image being pulled from Docker Hub, followed by a directory listing on a host running version 1.9.1 of the Docker Engine. $ docker pull ubuntu:15.04 15.04: Pulling from library/ubuntu 47984b517ca9: Pull complete df6e891a3ea9: Pull complete e65155041eed: Pull complete c8be1ac8145a: Pull complete Digest: sha256:5e279a9df07990286cce22e1b0f5b0490629ca6d187698746ae5e28e604a640e Status: Downloaded newer image for ubuntu:15.04 $ ls /var/lib/docker/aufs/layers 47984b517ca9ca0312aced5c9698753ffa964c2015f2a5f18e5efa9848cf30e2 c8be1ac8145a6e59a55667f573883749ad66eaeef92b4df17e5ea1260e2d7356 df6e891a3ea9cdce2a388a2cf1b1711629557454fd120abd5be6d32329a0e0ac e65155041eed7ec58dea78d90286048055ca75d41ea893c7246e794389ecf203 Notice how the four directories match up with the layer IDs of the downloaded image. Now compare this with the same operations performed on a host running version 1.10 of the Docker Engine. $ docker pull ubuntu:15.04 15.04: Pulling from library/ubuntu 1ba8ac955b97: Pull complete f157c4e5ede7: Pull complete 0b7e98f84c4c: Pull complete a3ed95caeb02: Pull complete Digest: sha256:5e279a9df07990286cce22e1b0f5b0490629ca6d187698746ae5e28e604a640e Status: Downloaded newer image for ubuntu:15.04 $ ls /var/lib/docker/aufs/layers/ 1d6674ff835b10f76e354806e16b950f91a191d3b471236609ab13a930275e24 5dbb0cbe0148cf447b9464a358c1587be586058d9a4c9ce079320265e2bb94e7 bef7199f2ed8e86fa4ada1309cfad3089e0542fec8894690529e4c04a7ca2d73 ebf814eccfe98f2704660ca1d844e4348db3b5ccc637eb905d4818fbfb00a06a See how the four directories do not match up with the image layer IDs pulled in the previous step. Despite the differences between image management before and after version 1.10, all versions of Docker still allow images to share layers. For example, If you `pull` an image that shares some of the same image layers as an image that has already been pulled, the Docker daemon recognizes this, and only pulls the layers it doesn't already have stored locally. After the second pull, the two images will share any common image layers. You can illustrate this now for yourself. Starting with the `ubuntu:15.04` image that you just pulled, make a change to it, and build a new image based on the change. One way to do this is using a `Dockerfile` and the `docker build` command. 1. In an empty directory, create a simple `Dockerfile` that starts with the 2. ubuntu:15.04 image. FROM ubuntu:15.04 2. Add a new file called "newfile" in the image's `/tmp` directory with the 3. text "Hello world" in it. When you are done, the `Dockerfile` contains two lines: FROM ubuntu:15.04 RUN echo "Hello world" > /tmp/newfile 3. Save and close the file. 4. From a terminal in the same folder as your `Dockerfile`, run the following 5. command: $ docker build -t changed-ubuntu . Sending build context to Docker daemon 2.048 kB Step 1 : FROM ubuntu:15.04 ---> 3f7bcee56709 Step 2 : RUN echo "Hello world" > /tmp/newfile ---> Running in d14acd6fad4e ---> 94e6b7d2c720 Removing intermediate container d14acd6fad4e Successfully built 94e6b7d2c720 > **Note:** The period (.) at the end of the above command is important. It > tells the `docker build` command to use the current working directory as > its build context. The output above shows a new image with image ID `94e6b7d2c720`. 5. Run the `docker images` command to verify the new `changed-ubuntu` image is 6. in the Docker host's local storage area. REPOSITORY TAG IMAGE ID CREATED SIZE changed-ubuntu latest 03b964f68d06 33 seconds ago 131.4 MB ubuntu 15.04 013f3d01d247 6 weeks ago 131.3 MB 6. Run the `docker history` command to see which image layers were used to 7. create the new `changed-ubuntu` image. $ docker history changed-ubuntu IMAGE CREATED CREATED BY SIZE COMMENT 94e6b7d2c720 2 minutes ago /bin/sh -c echo "Hello world" > /tmp/newfile 12 B 3f7bcee56709 6 weeks ago /bin/sh -c #(nop) CMD ["/bin/bash"] 0 B 6 weeks ago /bin/sh -c sed -i 's/^#\s*\(deb.*universe\)$/ 1.879 kB 6 weeks ago /bin/sh -c echo '#!/bin/sh' > /usr/sbin/polic 701 B 6 weeks ago /bin/sh -c #(nop) ADD file:8e4943cd86e9b2ca13 131.3 MB The `docker history` output shows the new `94e6b7d2c720` image layer at the top. You know that this is the new image layer added because it was created by the `echo "Hello world" > /tmp/newfile` command in your `Dockerfile`. The 4 image layers below it are the exact same image layers that make up the `ubuntu:15.04` image. > **Note:** Under the content addressable storage model introduced with Docker > 1.10, image history data is no longer stored in a config file with each image > layer. It is now stored as a string of text in a single config file that > relates to the overall image. This can result in some image layers showing as > "missing" in the output of the `docker history` command. This is normal > behaviour and can be ignored. > > You may hear images like these referred to as *flat images*. Notice the new `changed-ubuntu` image does not have its own copies of every layer. As can be seen in the diagram below, the new image is sharing its four underlying layers with the `ubuntu:15.04` image. ![](images/saving-space.jpg) The `docker history` command also shows the size of each image layer. As you can see, the `94e6b7d2c720` layer is only consuming 12 Bytes of disk space. This means that the `changed-ubuntu` image we just created is only consuming an additional 12 Bytes of disk space on the Docker host - all layers below the `94e6b7d2c720` layer already exist on the Docker host and are shared by other images. This sharing of image layers is what makes Docker images and containers so space efficient. ### Copying makes containers efficient You learned earlier that a container is a Docker image with a thin writable, container layer added. The diagram below shows the layers of a container based on the `ubuntu:15.04` image: ![](images/container-layers-cas.jpg) All writes made to a container are stored in the thin writable container layer. The other layers are read-only (RO) image layers and can't be changed. This means that multiple containers can safely share a single underlying image. The diagram below shows multiple containers sharing a single copy of the `ubuntu:15.04` image. Each container has its own thin RW layer, but they all share a single instance of the ubuntu:15.04 image: ![](images/sharing-layers.jpg) When an existing file in a container is modified, Docker uses the storage driver to perform a copy-on-write operation. The specifics of operation depends on the storage driver. For the AUFS and OverlayFS storage drivers, the copy-on-write operation is pretty much as follows: * Search through the image layers for the file to update. The process starts at the top, newest layer and works down to the base layer one layer at a time. * Perform a "copy-up" operation on the first copy of the file that is found. A "copy up" copies the file up to the container's own thin writable layer. * Modify the *copy of the file* in container's thin writable layer. Btrfs, ZFS, and other drivers handle the copy-on-write differently. You can read more about the methods of these drivers later in their detailed descriptions. Containers that write a lot of data will consume more space than containers that do not. This is because most write operations consume new space in the container's thin writable top layer. If your container needs to write a lot of data, you should consider using a data volume. A copy-up operation can incur a noticeable performance overhead. This overhead is different depending on which storage driver is in use. However, large files, lots of layers, and deep directory trees can make the impact more noticeable. Fortunately, the operation only occurs the first time any particular file is modified. Subsequent modifications to the same file do not cause a copy-up operation and can operate directly on the file's existing copy already present in the container layer. Let's see what happens if we spin up 5 containers based on our `changed-ubuntu` image we built earlier: 1. From a terminal on your Docker host, run the following `docker run` command 5 times. $ docker run -dit changed-ubuntu bash 75bab0d54f3cf193cfdc3a86483466363f442fba30859f7dcd1b816b6ede82d4 $ docker run -dit changed-ubuntu bash 9280e777d109e2eb4b13ab211553516124a3d4d4280a0edfc7abf75c59024d47 $ docker run -dit changed-ubuntu bash a651680bd6c2ef64902e154eeb8a064b85c9abf08ac46f922ad8dfc11bb5cd8a $ docker run -dit changed-ubuntu bash 8eb24b3b2d246f225b24f2fca39625aaad71689c392a7b552b78baf264647373 $ docker run -dit changed-ubuntu bash 0ad25d06bdf6fca0dedc38301b2aff7478b3e1ce3d1acd676573bba57cb1cfef This launches 5 containers based on the `changed-ubuntu` image. As each container is created, Docker adds a writable layer and assigns it a random UUID. This is the value returned from the `docker run` command. 2. Run the `docker ps` command to verify the 5 containers are running. $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 0ad25d06bdf6 changed-ubuntu "bash" About a minute ago Up About a minute stoic_ptolemy 8eb24b3b2d24 changed-ubuntu "bash" About a minute ago Up About a minute pensive_bartik a651680bd6c2 changed-ubuntu "bash" 2 minutes ago Up 2 minutes hopeful_turing 9280e777d109 changed-ubuntu "bash" 2 minutes ago Up 2 minutes backstabbing_mahavira 75bab0d54f3c changed-ubuntu "bash" 2 minutes ago Up 2 minutes boring_pasteur The output above shows 5 running containers, all sharing the `changed-ubuntu` image. Each `CONTAINER ID` is derived from the UUID when creating each container. 3. List the contents of the local storage area. $ sudo ls /var/lib/docker/containers 0ad25d06bdf6fca0dedc38301b2aff7478b3e1ce3d1acd676573bba57cb1cfef 9280e777d109e2eb4b13ab211553516124a3d4d4280a0edfc7abf75c59024d47 75bab0d54f3cf193cfdc3a86483466363f442fba30859f7dcd1b816b6ede82d4 a651680bd6c2ef64902e154eeb8a064b85c9abf08ac46f922ad8dfc11bb5cd8a 8eb24b3b2d246f225b24f2fca39625aaad71689c392a7b552b78baf264647373 Docker's copy-on-write strategy not only reduces the amount of space consumed by containers, it also reduces the time required to start a container. At start time, Docker only has to create the thin writable layer for each container. The diagram below shows these 5 containers sharing a single read-only (RO) copy of the `changed-ubuntu` image. ![](images/shared-uuid.jpg) If Docker had to make an entire copy of the underlying image stack each time it started a new container, container start times and disk space used would be significantly increased. ## Data volumes and the storage driver When a container is deleted, any data written to the container that is not stored in a *data volume* is deleted along with the container. A data volume is a directory or file in the Docker host's filesystem that is mounted directly into a container. Data volumes are not controlled by the storage driver. Reads and writes to data volumes bypass the storage driver and operate at native host speeds. You can mount any number of data volumes into a container. Multiple containers can also share one or more data volumes. The diagram below shows a single Docker host running two containers. Each container exists inside of its own address space within the Docker host's local storage area (`/var/lib/docker/...`). There is also a single shared data volume located at `/data` on the Docker host. This is mounted directly into both containers. ![](images/shared-volume.jpg) Data volumes reside outside of the local storage area on the Docker host, further reinforcing their independence from the storage driver's control. When a container is deleted, any data stored in data volumes persists on the Docker host. For detailed information about data volumes [Managing data in containers](https://docs.docker.com/userguide/dockervolumes/). ## Related information * [Select a storage driver](selectadriver.md) * [AUFS storage driver in practice](aufs-driver.md) * [Btrfs storage driver in practice](btrfs-driver.md) * [Device Mapper storage driver in practice](device-mapper-driver.md) docker-1.10.3/docs/userguide/storagedriver/index.md000066400000000000000000000030431267010174400223020ustar00rootroot00000000000000 # Docker storage drivers Docker relies on driver technology to manage the storage and interactions associated with images and the containers that run them. This section contains the following pages: * [Understand images, containers, and storage drivers](imagesandcontainers.md) * [Select a storage driver](selectadriver.md) * [AUFS storage driver in practice](aufs-driver.md) * [Btrfs storage driver in practice](btrfs-driver.md) * [Device Mapper storage driver in practice](device-mapper-driver.md) * [OverlayFS in practice](overlayfs-driver.md) * [ZFS storage in practice](zfs-driver.md) If you are new to Docker containers make sure you read ["Understand images, containers, and storage drivers"](imagesandcontainers.md) first. It explains key concepts and technologies that can help you when working with storage drivers. ### Acknowledgement The Docker storage driver material was created in large part by our guest author Nigel Poulton with a bit of help from Docker's own Jérôme Petazzoni. In his spare time Nigel creates [IT training videos](http://www.pluralsight.com/author/nigel-poulton), co-hosts the weekly [In Tech We Trust podcast](http://intechwetrustpodcast.com/), and lives it large on [Twitter](https://twitter.com/nigelpoulton).   docker-1.10.3/docs/userguide/storagedriver/overlayfs-driver.md000066400000000000000000000337611267010174400245100ustar00rootroot00000000000000 # Docker and OverlayFS in practice OverlayFS is a modern *union filesystem* that is similar to AUFS. In comparison to AUFS, OverlayFS: * has a simpler design * has been in the mainline Linux kernel since version 3.18 * is potentially faster As a result, OverlayFS is rapidly gaining popularity in the Docker community and is seen by many as a natural successor to AUFS. As promising as OverlayFS is, it is still relatively young. Therefore caution should be taken before using it in production Docker environments. Docker's `overlay` storage driver leverages several OverlayFS features to build and manage the on-disk structures of images and containers. >**Note**: Since it was merged into the mainline kernel, the OverlayFS *kernel >module* was renamed from "overlayfs" to "overlay". As a result you may see the > two terms used interchangeably in some documentation. However, this document > uses "OverlayFS" to refer to the overall filesystem, and `overlay` to refer > to Docker's storage-driver. ## Image layering and sharing with OverlayFS OverlayFS takes two directories on a single Linux host, layers one on top of the other, and provides a single unified view. These directories are often referred to as *layers* and the technology used to layer them is known as a *union mount*. The OverlayFS terminology is "lowerdir" for the bottom layer and "upperdir" for the top layer. The unified view is exposed through its own directory called "merged". The diagram below shows how a Docker image and a Docker container are layered. The image layer is the "lowerdir" and the container layer is the "upperdir". The unified view is exposed through a directory called "merged" which is effectively the containers mount point. The diagram shows how Docker constructs map to OverlayFS constructs. ![](images/overlay_constructs.jpg) Notice how the image layer and container layer can contain the same files. When this happens, the files in the container layer ("upperdir") are dominant and obscure the existence of the same files in the image layer ("lowerdir"). The container mount ("merged") presents the unified view. OverlayFS only works with two layers. This means that multi-layered images cannot be implemented as multiple OverlayFS layers. Instead, each image layer is implemented as its own directory under `/var/lib/docker/overlay`. Hard links are then used as a space-efficient way to reference data shared with lower layers. As of Docker 1.10, image layer IDs no longer correspond to directory names in `/var/lib/docker/` To create a container, the `overlay` driver combines the directory representing the image's top layer plus a new directory for the container. The image's top layer is the "lowerdir" in the overlay and read-only. The new directory for the container is the "upperdir" and is writable. ## Example: Image and container on-disk constructs The following `docker pull` command shows a Docker host with downloading a Docker image comprising four layers. $ sudo docker pull ubuntu Using default tag: latest latest: Pulling from library/ubuntu 8387d9ff0016: Pull complete 3b52deaaf0ed: Pull complete 4bd501fad6de: Pull complete a3ed95caeb02: Pull complete Digest: sha256:457b05828bdb5dcc044d93d042863fba3f2158ae249a6db5ae3934307c757c54 Status: Downloaded newer image for ubuntu:latest Each image layer has it's own directory under `/var/lib/docker/overlay/`. This is where the the contents of each image layer are stored. The output of the command below shows the four directories that store the contents of each image layer just pulled. However, as can be seen, the image layer IDs do not match the directory names in `/var/lib/docker/overlay`. This is normal behavior in Docker 1.10 and later. $ ls -l /var/lib/docker/overlay/ total 24 drwx------ 3 root root 4096 Oct 28 11:02 1d073211c498fd5022699b46a936b4e4bdacb04f637ad64d3475f558783f5c3e drwx------ 3 root root 4096 Oct 28 11:02 5a4526e952f0aa24f3fcc1b6971f7744eb5465d572a48d47c492cb6bbf9cbcda drwx------ 5 root root 4096 Oct 28 11:06 99fcaefe76ef1aa4077b90a413af57fd17d19dce4e50d7964a273aae67055235 drwx------ 3 root root 4096 Oct 28 11:01 c63fb41c2213f511f12f294dd729b9903a64d88f098c20d2350905ac1fdbcbba The image layer directories contain the files unique to that layer as well as hard links to the data that is shared with lower layers. This allows for efficient use of disk space. Containers also exist on-disk in the Docker host's filesystem under `/var/lib/docker/overlay/`. If you inspect the directory relating to a running container using the `ls -l` command, you find the following file and directories. $ ls -l /var/lib/docker/overlay/ total 16 -rw-r--r-- 1 root root 64 Oct 28 11:06 lower-id drwxr-xr-x 1 root root 4096 Oct 28 11:06 merged drwxr-xr-x 4 root root 4096 Oct 28 11:06 upper drwx------ 3 root root 4096 Oct 28 11:06 work These four filesystem objects are all artefacts of OverlayFS. The "lower-id" file contains the ID of the top layer of the image the container is based on. This is used by OverlayFS as the "lowerdir". $ cat /var/lib/docker/overlay/73de7176c223a6c82fd46c48c5f152f2c8a7e49ecb795a7197c3bb795c4d879e/lower-id 1d073211c498fd5022699b46a936b4e4bdacb04f637ad64d3475f558783f5c3e The "upper" directory is the containers read-write layer. Any changes made to the container are written to this directory. The "merged" directory is effectively the containers mount point. This is where the unified view of the image ("lowerdir") and container ("upperdir") is exposed. Any changes written to the container are immediately reflected in this directory. The "work" directory is required for OverlayFS to function. It is used for things such as *copy_up* operations. You can verify all of these constructs from the output of the `mount` command. (Ellipses and line breaks are used in the output below to enhance readability.) $ mount | grep overlay overlay on /var/lib/docker/overlay/73de7176c223.../merged type overlay (rw,relatime,lowerdir=/var/lib/docker/overlay/1d073211c498.../root, upperdir=/var/lib/docker/overlay/73de7176c223.../upper, workdir=/var/lib/docker/overlay/73de7176c223.../work) The output reflects that the overlay is mounted as read-write ("rw"). ## Container reads and writes with overlay Consider three scenarios where a container opens a file for read access with overlay. - **The file does not exist in the container layer**. If a container opens a file for read access and the file does not already exist in the container ("upperdir") it is read from the image ("lowerdir"). This should incur very little performance overhead. - **The file only exists in the container layer**. If a container opens a file for read access and the file exists in the container ("upperdir") and not in the image ("lowerdir"), it is read directly from the container. - **The file exists in the container layer and the image layer**. If a container opens a file for read access and the file exists in the image layer and the container layer, the file's version in the container layer is read. This is because files in the container layer ("upperdir") obscure files with the same name in the image layer ("lowerdir"). Consider some scenarios where files in a container are modified. - **Writing to a file for the first time**. The first time a container writes to an existing file, that file does not exist in the container ("upperdir"). The `overlay` driver performs a *copy_up* operation to copy the file from the image ("lowerdir") to the container ("upperdir"). The container then writes the changes to the new copy of the file in the container layer. However, OverlayFS works at the file level not the block level. This means that all OverlayFS copy-up operations copy entire files, even if the file is very large and only a small part of it is being modified. This can have a noticeable impact on container write performance. However, two things are worth noting: * The copy_up operation only occurs the first time any given file is written to. Subsequent writes to the same file will operate against the copy of the file already copied up to the container. * OverlayFS only works with two layers. This means that performance should be better than AUFS which can suffer noticeable latencies when searching for files in images with many layers. - **Deleting files and directories**. When files are deleted within a container a *whiteout* file is created in the containers "upperdir". The version of the file in the image layer ("lowerdir") is not deleted. However, the whiteout file in the container obscures it. Deleting a directory in a container results in *opaque directory* being created in the "upperdir". This has the same effect as a whiteout file and effectively masks the existence of the directory in the image's "lowerdir". ## Configure Docker with the overlay storage driver To configure Docker to use the overlay storage driver your Docker host must be running version 3.18 of the Linux kernel (preferably newer) with the overlay kernel module loaded. OverlayFS can operate on top of most supported Linux filesystems. However, ext4 is currently recommended for use in production environments. The following procedure shows you how to configure your Docker host to use OverlayFS. The procedure assumes that the Docker daemon is in a stopped state. > **Caution:** If you have already run the Docker daemon on your Docker host > and have images you want to keep, `push` them Docker Hub or your private > Docker Trusted Registry before attempting this procedure. 1. If it is running, stop the Docker `daemon`. 2. Verify your kernel version and that the overlay kernel module is loaded. $ uname -r 3.19.0-21-generic $ lsmod | grep overlay overlay 3. Start the Docker daemon with the `overlay` storage driver. $ docker daemon --storage-driver=overlay & [1] 29403 root@ip-10-0-0-174:/home/ubuntu# INFO[0000] Listening for HTTP on unix (/var/run/docker.sock) INFO[0000] Option DefaultDriver: bridge INFO[0000] Option DefaultNetwork: bridge Alternatively, you can force the Docker daemon to automatically start with the `overlay` driver by editing the Docker config file and adding the `--storage-driver=overlay` flag to the `DOCKER_OPTS` line. Once this option is set you can start the daemon using normal startup scripts without having to manually pass in the `--storage-driver` flag. 4. Verify that the daemon is using the `overlay` storage driver $ docker info Containers: 0 Images: 0 Storage Driver: overlay Backing Filesystem: extfs Notice that the *Backing filesystem* in the output above is showing as `extfs`. Multiple backing filesystems are supported but `extfs` (ext4) is recommended for production use cases. Your Docker host is now using the `overlay` storage driver. If you run the `mount` command, you'll find Docker has automatically created the `overlay` mount with the required "lowerdir", "upperdir", "merged" and "workdir" constructs. ## OverlayFS and Docker Performance As a general rule, the `overlay` driver should be fast. Almost certainly faster than `aufs` and `devicemapper`. In certain circumstances it may also be faster than `btrfs`. That said, there are a few things to be aware of relative to the performance of Docker using the `overlay` storage driver. - **Page Caching**. OverlayFS supports page cache sharing. This means multiple containers accessing the same file can share a single page cache entry (or entries). This makes the `overlay` driver efficient with memory and a good option for PaaS and other high density use cases. - **copy_up**. As with AUFS, OverlayFS has to perform copy-up operations any time a container writes to a file for the first time. This can insert latency into the write operation — especially if the file being copied up is large. However, once the file has been copied up, all subsequent writes to that file occur without the need for further copy-up operations. The OverlayFS copy_up operation should be faster than the same operation with AUFS. This is because AUFS supports more layers than OverlayFS and it is possible to incur far larger latencies if searching through many AUFS layers. - **RPMs and Yum**. OverlayFS only implements a subset of the POSIX standards. This can result in certain OverlayFS operations breaking POSIX standards. One such operation is the *copy-up* operation. Therefore, using `yum` inside of a container on a Docker host using the `overlay` storage driver is unlikely to work without implementing workarounds. - **Inode limits**. Use of the `overlay` storage driver can cause excessive inode consumption. This is especially so as the number of images and containers on the Docker host grows. A Docker host with a large number of images and lots of started and stopped containers can quickly run out of inodes. Unfortunately you can only specify the number of inodes in a filesystem at the time of creation. For this reason, you may wish to consider putting `/var/lib/docker` on a separate device with its own filesystem, or manually specifying the number of inodes when creating the filesystem. The following generic performance best practices also apply to OverlayFS. - **Solid State Devices (SSD)**. For best performance it is always a good idea to use fast storage media such as solid state devices (SSD). - **Use Data Volumes**. Data volumes provide the best and most predictable performance. This is because they bypass the storage driver and do not incur any of the potential overheads introduced by thin provisioning and copy-on-write. For this reason, you should place heavy write workloads on data volumes. docker-1.10.3/docs/userguide/storagedriver/selectadriver.md000066400000000000000000000211461267010174400240330ustar00rootroot00000000000000 # Select a storage driver This page describes Docker's storage driver feature. It lists the storage driver's that Docker supports and the basic commands associated with managing them. Finally, this page provides guidance on choosing a storage driver. The material on this page is intended for readers who already have an [understanding of the storage driver technology](imagesandcontainers.md). ## A pluggable storage driver architecture Docker has a pluggable storage driver architecture. This gives you the flexibility to "plug in" the storage driver that is best for your environment and use-case. Each Docker storage driver is based on a Linux filesystem or volume manager. Further, each storage driver is free to implement the management of image layers and the container layer in its own unique way. This means some storage drivers perform better than others in different circumstances. Once you decide which driver is best, you set this driver on the Docker daemon at start time. As a result, the Docker daemon can only run one storage driver, and all containers created by that daemon instance use the same storage driver. The table below shows the supported storage driver technologies and their driver names: |Technology |Storage driver name | |--------------|---------------------| |OverlayFS |`overlay` | |AUFS |`aufs` | |Btrfs |`btrfs` | |Device Mapper |`devicemapper` | |VFS* |`vfs` | |ZFS |`zfs` | To find out which storage driver is set on the daemon , you use the `docker info` command: $ docker info Containers: 0 Images: 0 Storage Driver: overlay Backing Filesystem: extfs Execution Driver: native-0.2 Logging Driver: json-file Kernel Version: 3.19.0-15-generic Operating System: Ubuntu 15.04 ... output truncated ... The `info` subcommand reveals that the Docker daemon is using the `overlay` storage driver with a `Backing Filesystem` value of `extfs`. The `extfs` value means that the `overlay` storage driver is operating on top of an existing (ext) filesystem. The backing filesystem refers to the filesystem that was used to create the Docker host's local storage area under `/var/lib/docker`. Which storage driver you use, in part, depends on the backing filesystem you plan to use for your Docker host's local storage area. Some storage drivers can operate on top of different backing filesystems. However, other storage drivers require the backing filesystem to be the same as the storage driver. For example, the `btrfs` storage driver on a Btrfs backing filesystem. The following table lists each storage driver and whether it must match the host's backing file system: |Storage driver |Must match backing filesystem | |---------------|------------------------------| |overlay |No | |aufs |No | |btrfs |Yes | |devicemapper |No | |vfs* |No | |zfs |Yes | You can set the storage driver by passing the `--storage-driver=` option to the `docker daemon` command line, or by setting the option on the `DOCKER_OPTS` line in the `/etc/default/docker` file. The following command shows how to start the Docker daemon with the `devicemapper` storage driver using the `docker daemon` command: $ docker daemon --storage-driver=devicemapper & $ docker info Containers: 0 Images: 0 Storage Driver: devicemapper Pool Name: docker-252:0-147544-pool Pool Blocksize: 65.54 kB Backing Filesystem: extfs Data file: /dev/loop0 Metadata file: /dev/loop1 Data Space Used: 1.821 GB Data Space Total: 107.4 GB Data Space Available: 3.174 GB Metadata Space Used: 1.479 MB Metadata Space Total: 2.147 GB Metadata Space Available: 2.146 GB Udev Sync Supported: true Deferred Removal Enabled: false Data loop file: /var/lib/docker/devicemapper/devicemapper/data Metadata loop file: /var/lib/docker/devicemapper/devicemapper/metadata Library Version: 1.02.90 (2014-09-01) Execution Driver: native-0.2 Logging Driver: json-file Kernel Version: 3.19.0-15-generic Operating System: Ubuntu 15.04 Your choice of storage driver can affect the performance of your containerized applications. So it's important to understand the different storage driver options available and select the right one for your application. Later, in this page you'll find some advice for choosing an appropriate driver. ## Shared storage systems and the storage driver Many enterprises consume storage from shared storage systems such as SAN and NAS arrays. These often provide increased performance and availability, as well as advanced features such as thin provisioning, deduplication and compression. The Docker storage driver and data volumes can both operate on top of storage provided by shared storage systems. This allows Docker to leverage the increased performance and availability these systems provide. However, Docker does not integrate with these underlying systems. Remember that each Docker storage driver is based on a Linux filesystem or volume manager. Be sure to follow existing best practices for operating your storage driver (filesystem or volume manager) on top of your shared storage system. For example, if using the ZFS storage driver on top of *XYZ* shared storage system, be sure to follow best practices for operating ZFS filesystems on top of XYZ shared storage system. ## Which storage driver should you choose? Several factors influence the selection of a storage driver. However, these two facts must be kept in mind: 1. No single driver is well suited to every use-case 2. Storage drivers are improving and evolving all of the time With these factors in mind, the following points, coupled with the table below, should provide some guidance. ### Stability For the most stable and hassle-free Docker experience, you should consider the following: - **Use the default storage driver for your distribution**. When Docker installs, it chooses a default storage driver based on the configuration of your system. Stability is an important factor influencing which storage driver is used by default. Straying from this default may increase your chances of encountering bugs and nuances. - **Follow the configuration specified on the CS Engine [compatibility matrix](https://www.docker.com/compatibility-maintenance)**. The CS Engine is the commercially supported version of the Docker Engine. It's code-base is identical to the open source Engine, but it has a limited set of supported configurations. These *supported configurations* use the most stable and mature storage drivers. Straying from these configurations may also increase your chances of encountering bugs and nuances. ### Experience and expertise Choose a storage driver that you and your team/organization have experience with. For example, if you use RHEL or one of its downstream forks, you may already have experience with LVM and Device Mapper. If so, you may wish to use the `devicemapper` driver. If you do not feel you have expertise with any of the storage drivers supported by Docker, and you want an easy-to-use stable Docker experience, you should consider using the default driver installed by your distribution's Docker package. ### Future-proofing Many people consider OverlayFS as the future of the Docker storage driver. However, it is less mature, and potentially less stable than some of the more mature drivers such as `aufs` and `devicemapper`. For this reason, you should use the OverlayFS driver with caution and expect to encounter more bugs and nuances than if you were using a more mature driver. The following diagram lists each storage driver and provides insight into some of their pros and cons. When selecting which storage driver to use, consider the guidance offered by the table below along with the points mentioned above. ![](images/driver-pros-cons.png) ## Related information * [Understand images, containers, and storage drivers](imagesandcontainers.md) * [AUFS storage driver in practice](aufs-driver.md) * [Btrfs storage driver in practice](btrfs-driver.md) * [Device Mapper storage driver in practice](device-mapper-driver.md) docker-1.10.3/docs/userguide/storagedriver/zfs-driver.md000066400000000000000000000277411267010174400233010ustar00rootroot00000000000000 # Docker and ZFS in practice ZFS is a next generation filesystem that supports many advanced storage technologies such as volume management, snapshots, checksumming, compression and deduplication, replication and more. It was created by Sun Microsystems (now Oracle Corporation) and is open sourced under the CDDL license. Due to licensing incompatibilities between the CDDL and GPL, ZFS cannot be shipped as part of the mainline Linux kernel. However, the ZFS On Linux (ZoL) project provides an out-of-tree kernel module and userspace tools which can be installed separately. The ZFS on Linux (ZoL) port is healthy and maturing. However, at this point in time it is not recommended to use the `zfs` Docker storage driver for production use unless you have substantial experience with ZFS on Linux. > **Note:** There is also a FUSE implementation of ZFS on the Linux platform. > This should work with Docker but is not recommended. The native ZFS driver > (ZoL) is more tested, more performant, and is more widely used. The remainder > of this document will relate to the native ZoL port. ## Image layering and sharing with ZFS The Docker `zfs` storage driver makes extensive use of three ZFS datasets: - filesystems - snapshots - clones ZFS filesystems are thinly provisioned and have space allocated to them from a ZFS pool (zpool) via allocate on demand operations. Snapshots and clones are space-efficient point-in-time copies of ZFS filesystems. Snapshots are read-only. Clones are read-write. Clones can only be created from snapshots. This simple relationship is shown in the diagram below. ![](images/zfs_clones.jpg) The solid line in the diagram shows the process flow for creating a clone. Step 1 creates a snapshot of the filesystem, and step two creates the clone from the snapshot. The dashed line shows the relationship between the clone and the filesystem, via the snapshot. All three ZFS datasets draw space form the same underlying zpool. On Docker hosts using the `zfs` storage driver, the base layer of an image is a ZFS filesystem. Each child layer is a ZFS clone based on a ZFS snapshot of the layer below it. A container is a ZFS clone based on a ZFS Snapshot of the top layer of the image it's created from. All ZFS datasets draw their space from a common zpool. The diagram below shows how this is put together with a running container based on a two-layer image. ![](images/zfs_zpool.jpg) The following process explains how images are layered and containers created. The process is based on the diagram above. 1. The base layer of the image exists on the Docker host as a ZFS filesystem. This filesystem consumes space from the zpool used to create the Docker host's local storage area at `/var/lib/docker`. 2. Additional image layers are clones of the dataset hosting the image layer directly below it. In the diagram, "Layer 1" is added by making a ZFS snapshot of the base layer and then creating a clone from that snapshot. The clone is writable and consumes space on-demand from the zpool. The snapshot is read-only, maintaining the base layer as an immutable object. 3. When the container is launched, a read-write layer is added above the image. In the diagram above, the container's read-write layer is created by making a snapshot of the top layer of the image (Layer 1) and creating a clone from that snapshot. As changes are made to the container, space is allocated to it from the zpool via allocate-on-demand operations. By default, ZFS will allocate space in blocks of 128K. This process of creating child layers and containers from *read-only* snapshots allows images to be maintained as immutable objects. ## Container reads and writes with ZFS Container reads with the `zfs` storage driver are very simple. A newly launched container is based on a ZFS clone. This clone initially shares all of its data with the dataset it was created from. This means that read operations with the `zfs` storage driver are fast – even if the data being read was note copied into the container yet. This sharing of data blocks is shown in the diagram below. ![](images/zpool_blocks.jpg) Writing new data to a container is accomplished via an allocate-on-demand operation. Every time a new area of the container needs writing to, a new block is allocated from the zpool. This means that containers consume additional space as new data is written to them. New space is allocated to the container (ZFS Clone) from the underlying zpool. Updating *existing data* in a container is accomplished by allocating new blocks to the containers clone and storing the changed data in those new blocks. The original blocks are unchanged, allowing the underlying image dataset to remain immutable. This is the same as writing to a normal ZFS filesystem and is an implementation of copy-on-write semantics. ## Configure Docker with the ZFS storage driver The `zfs` storage driver is only supported on a Docker host where `/var/lib/docker` is mounted as a ZFS filesystem. This section shows you how to install and configure native ZFS on Linux (ZoL) on an Ubuntu 14.04 system. ### Prerequisites If you have already used the Docker daemon on your Docker host and have images you want to keep, `push` them Docker Hub or your private Docker Trusted Registry before attempting this procedure. Stop the Docker daemon. Then, ensure that you have a spare block device at `/dev/xvdb`. The device identifier may be be different in your environment and you should substitute your own values throughout the procedure. ### Install Zfs on Ubuntu 14.04 LTS 1. If it is running, stop the Docker `daemon`. 1. Install `the software-properties-common` package. This is required for the `add-apt-repository` command. $ sudo apt-get install software-properties-common Reading package lists... Done Building dependency tree 2. Add the `zfs-native` package archive. $ sudo add-apt-repository ppa:zfs-native/stable The native ZFS filesystem for Linux. Install the ubuntu-zfs package. gpg: key F6B0FC61: public key "Launchpad PPA for Native ZFS for Linux" imported gpg: Total number processed: 1 gpg: imported: 1 (RSA: 1) OK 3. Get the latest package lists for all registered repositories and package archives. $ sudo apt-get update Ign http://us-west-2.ec2.archive.ubuntu.com trusty InRelease Get:1 http://us-west-2.ec2.archive.ubuntu.com trusty-updates InRelease [64.4 kB] Fetched 10.3 MB in 4s (2,370 kB/s) Reading package lists... Done 4. Install the `ubuntu-zfs` package. $ sudo apt-get install -y ubuntu-zfs Reading package lists... Done Building dependency tree 5. Load the `zfs` module. $ sudo modprobe zfs 6. Verify that it loaded correctly. $ lsmod | grep zfs zfs 2768247 0 zunicode 331170 1 zfs zcommon 55411 1 zfs znvpair 89086 2 zfs,zcommon spl 96378 3 zfs,zcommon,znvpair zavl 15236 1 zfs ## Configure ZFS for Docker Once ZFS is installed and loaded, you're ready to configure ZFS for Docker. 1. Create a new `zpool`. $ sudo zpool create -f zpool-docker /dev/xvdb The command creates the `zpool` and gives it the name "zpool-docker". The name is arbitrary. 2. Check that the `zpool` exists. $ sudo zfs list NAME USED AVAIL REFER MOUNTPOINT zpool-docker 55K 3.84G 19K /zpool-docker 3. Create and mount a new ZFS filesystem to `/var/lib/docker`. $ sudo zfs create -o mountpoint=/var/lib/docker zpool-docker/docker 4. Check that the previous step worked. $ sudo zfs list -t all NAME USED AVAIL REFER MOUNTPOINT zpool-docker 93.5K 3.84G 19K /zpool-docker zpool-docker/docker 19K 3.84G 19K /var/lib/docker Now that you have a ZFS filesystem mounted to `/var/lib/docker`, the daemon should automatically load with the `zfs` storage driver. 5. Start the Docker daemon. $ sudo service docker start docker start/running, process 2315 The procedure for starting the Docker daemon may differ depending on the Linux distribution you are using. It is possible to force the Docker daemon to start with the `zfs` storage driver by passing the `--storage-driver=zfs`flag to the `docker daemon` command, or to the `DOCKER_OPTS` line in the Docker config file. 6. Verify that the daemon is using the `zfs` storage driver. $ sudo docker info Containers: 0 Images: 0 Storage Driver: zfs Zpool: zpool-docker Zpool Health: ONLINE Parent Dataset: zpool-docker/docker Space Used By Parent: 27648 Space Available: 4128139776 Parent Quota: no Compression: off Execution Driver: native-0.2 [...] The output of the command above shows that the Docker daemon is using the `zfs` storage driver and that the parent dataset is the `zpool-docker/docker` filesystem created earlier. Your Docker host is now using ZFS to store to manage its images and containers. ## ZFS and Docker performance There are several factors that influence the performance of Docker using the `zfs` storage driver. - **Memory**. Memory has a major impact on ZFS performance. This goes back to the fact that ZFS was originally designed for use on big Sun Solaris servers with large amounts of memory. Keep this in mind when sizing your Docker hosts. - **ZFS Features**. Using ZFS features, such as deduplication, can significantly increase the amount of memory ZFS uses. For memory consumption and performance reasons it is recommended to turn off ZFS deduplication. However, deduplication at other layers in the stack (such as SAN or NAS arrays) can still be used as these do not impact ZFS memory usage and performance. If using SAN, NAS or other hardware RAID technologies you should continue to follow existing best practices for using them with ZFS. - **ZFS Caching**. ZFS caches disk blocks in a memory structure called the adaptive replacement cache (ARC). The *Single Copy ARC* feature of ZFS allows a single cached copy of a block to be shared by multiple clones of a filesystem. This means that multiple running containers can share a single copy of cached block. This means that ZFS is a good option for PaaS and other high density use cases. - **Fragmentation**. Fragmentation is a natural byproduct of copy-on-write filesystems like ZFS. However, ZFS writes in 128K blocks and allocates *slabs* (multiple 128K blocks) to CoW operations in an attempt to reduce fragmentation. The ZFS intent log (ZIL) and the coalescing of writes (delayed writes) also help to reduce fragmentation. - **Use the native ZFS driver for Linux**. Although the Docker `zfs` storage driver supports the ZFS FUSE implementation, it is not recommended when high performance is required. The native ZFS on Linux driver tends to perform better than the FUSE implementation. The following generic performance best practices also apply to ZFS. - **Use of SSD**. For best performance it is always a good idea to use fast storage media such as solid state devices (SSD). However, if you only have a limited amount of SSD storage available it is recommended to place the ZIL on SSD. - **Use Data Volumes**. Data volumes provide the best and most predictable performance. This is because they bypass the storage driver and do not incur any of the potential overheads introduced by thin provisioning and copy-on-write. For this reason, you should place heavy write workloads on data volumes. docker-1.10.3/errors/000077500000000000000000000000001267010174400143615ustar00rootroot00000000000000docker-1.10.3/errors/README.md000066400000000000000000000050241267010174400156410ustar00rootroot00000000000000Docker 'errors' package ======================= This package contains all of the error messages generated by the Docker engine that might be exposed via the Docker engine's REST API. Each top-level engine package will have its own file in this directory so that there's a clear grouping of errors, instead of just one big file. The errors for each package are defined here instead of within their respective package structure so that Docker CLI code that may need to import these error definition files will not need to know or understand the engine's package/directory structure. In other words, all they should need to do is import `.../docker/errors` and they will automatically pick up all Docker engine defined errors. This also gives the engine developers the freedom to change the engine packaging structure (e.g. to CRUD packages) without worrying about breaking existing clients. These errors are defined using the 'errcode' package. The `errcode` package allows for each error to be typed and include all information necessary to have further processing done on them if necessary. In particular, each error includes: * Value - a unique string (in all caps) associated with this error. Typically, this string is the same name as the variable name of the error (w/o the `ErrorCode` text) but in all caps. * Message - the human readable sentence that will be displayed for this error. It can contain '%s' substitutions that allows for the code generating the error to specify values that will be inserted in the string prior to being displayed to the end-user. The `WithArgs()` function can be used to specify the insertion strings. Note, the evaluation of the strings will be done at the time `WithArgs()` is called. * Description - additional human readable text to further explain the circumstances of the error situation. * HTTPStatusCode - when the error is returned back to a CLI, this value will be used to populate the HTTP status code. If not present the default value will be `StatusInternalServerError`, 500. Not all errors generated within the engine's executable will be propagated back to the engine's API layer. For example, it is expected that errors generated by vendored code (under `docker/vendor`) and packaged code (under `docker/pkg`) will be converted into errors defined by this package. When processing an errcode error, if you are looking for a particular error then you can do something like: ``` import derr "github.com/docker/docker/errors" ... err := someFunc() if err.ErrorCode() == derr.ErrorCodeNoSuchContainer { ... } ``` docker-1.10.3/errors/builder.go000066400000000000000000000076261267010174400163510ustar00rootroot00000000000000package errors // This file contains all of the errors that can be generated from the // docker/builder component. import ( "net/http" "github.com/docker/distribution/registry/api/errcode" ) var ( // ErrorCodeAtLeastOneArg is generated when the parser comes across a // Dockerfile command that doesn't have any args. ErrorCodeAtLeastOneArg = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "ATLEASTONEARG", Message: "%s requires at least one argument", Description: "The specified command requires at least one argument", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeExactlyOneArg is generated when the parser comes across a // Dockerfile command that requires exactly one arg but got less/more. ErrorCodeExactlyOneArg = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "EXACTLYONEARG", Message: "%s requires exactly one argument", Description: "The specified command requires exactly one argument", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeAtLeastTwoArgs is generated when the parser comes across a // Dockerfile command that requires at least two args but got less. ErrorCodeAtLeastTwoArgs = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "ATLEASTTWOARGS", Message: "%s requires at least two arguments", Description: "The specified command requires at least two arguments", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeTooManyArgs is generated when the parser comes across a // Dockerfile command that has more args than it should ErrorCodeTooManyArgs = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "TOOMANYARGS", Message: "Bad input to %s, too many args", Description: "The specified command was passed too many arguments", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeChainOnBuild is generated when the parser comes across a // Dockerfile command that is trying to chain ONBUILD commands. ErrorCodeChainOnBuild = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "CHAINONBUILD", Message: "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed", Description: "ONBUILD Dockerfile commands aren't allow on ONBUILD commands", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeBadOnBuildCmd is generated when the parser comes across a // an ONBUILD Dockerfile command with an invalid trigger/command. ErrorCodeBadOnBuildCmd = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "BADONBUILDCMD", Message: "%s isn't allowed as an ONBUILD trigger", Description: "The specified ONBUILD command isn't allowed", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeMissingFrom is generated when the Dockerfile is missing // a FROM command. ErrorCodeMissingFrom = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "MISSINGFROM", Message: "Please provide a source image with `from` prior to run", Description: "The Dockerfile is missing a FROM command", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeNotOnWindows is generated when the specified Dockerfile // command is not supported on Windows. ErrorCodeNotOnWindows = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NOTONWINDOWS", Message: "%s is not supported on Windows", Description: "The specified Dockerfile command is not supported on Windows", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeVolumeEmpty is generated when the specified Volume string // is empty. ErrorCodeVolumeEmpty = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "VOLUMEEMPTY", Message: "Volume specified can not be an empty string", Description: "The specified volume can not be an empty string", HTTPStatusCode: http.StatusInternalServerError, }) ) docker-1.10.3/errors/daemon.go000066400000000000000000001337101267010174400161600ustar00rootroot00000000000000package errors // This file contains all of the errors that can be generated from the // docker/daemon component. import ( "net/http" "github.com/docker/distribution/registry/api/errcode" ) var ( // ErrorCodeNoSuchContainer is generated when we look for a container by // name or ID and we can't find it. ErrorCodeNoSuchContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NOSUCHCONTAINER", Message: "No such container: %s", Description: "The specified container can not be found", HTTPStatusCode: http.StatusNotFound, }) // ErrorCodeUnregisteredContainer is generated when we try to load // a storage driver for an unregistered container ErrorCodeUnregisteredContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "UNREGISTEREDCONTAINER", Message: "Can't load storage driver for unregistered container %s", Description: "An attempt was made to load the storage driver for a container that is not registered with the daemon", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeContainerBeingRemoved is generated when an attempt to start // a container is made but its in the process of being removed, or is dead. ErrorCodeContainerBeingRemoved = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "CONTAINERBEINGREMOVED", Message: "Container is marked for removal and cannot be started.", Description: "An attempt was made to start a container that is in the process of being deleted", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeUnpauseContainer is generated when we attempt to stop a // container but its paused. ErrorCodeUnpauseContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "UNPAUSECONTAINER", Message: "Container %s is paused. Unpause the container before stopping", Description: "The specified container is paused, before it can be stopped it must be unpaused", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeRemovalContainer is generated when we attempt to connect or disconnect a // container but it's marked for removal. ErrorCodeRemovalContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "REMOVALCONTAINER", Message: "Container %s is marked for removal and cannot be connected or disconnected to the network", Description: "The specified container is marked for removal and cannot be connected or disconnected to the network", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodePausedContainer is generated when we attempt to attach a // container but its paused. ErrorCodePausedContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "CONTAINERPAUSED", Message: "Container %s is paused. Unpause the container before attach", Description: "The specified container is paused, unpause the container before attach", HTTPStatusCode: http.StatusConflict, }) // ErrorCodeAlreadyPaused is generated when we attempt to pause a // container when its already paused. ErrorCodeAlreadyPaused = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "ALREADYPAUSED", Message: "Container %s is already paused", Description: "The specified container is already in the paused state", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeNotPaused is generated when we attempt to unpause a // container when its not paused. ErrorCodeNotPaused = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NOTPAUSED", Message: "Container %s is not paused", Description: "The specified container can not be unpaused because it is not in a paused state", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeImageUnregContainer is generated when we attempt to get the // image of an unknown/unregistered container. ErrorCodeImageUnregContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "IMAGEUNREGCONTAINER", Message: "Can't get image of unregistered container", Description: "An attempt to retrieve the image of a container was made but the container is not registered", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeEmptyID is generated when an ID is the empty string. ErrorCodeEmptyID = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "EMPTYID", Message: "Invalid empty id", Description: "An attempt was made to register a container but the container's ID can not be an empty string", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeLoggingFactory is generated when we could not load the // log driver. ErrorCodeLoggingFactory = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "LOGGINGFACTORY", Message: "Failed to get logging factory: %v", Description: "An attempt was made to register a container but the container's ID can not be an empty string", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeInitLogger is generated when we could not initialize // the logging driver. ErrorCodeInitLogger = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "INITLOGGER", Message: "Failed to initialize logging driver: %v", Description: "An error occurred while trying to initialize the logging driver", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeNotRunning is generated when we need to verify that // a container is running, but its not. ErrorCodeNotRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NOTRUNNING", Message: "Container %s is not running", Description: "The specified action can not be taken due to the container not being in a running state", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeLinkNotRunning is generated when we try to link to a // container that is not running. ErrorCodeLinkNotRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "LINKNOTRUNNING", Message: "Cannot link to a non running container: %s AS %s", Description: "An attempt was made to link to a container but the container is not in a running state", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeDeviceInfo is generated when there is an error while trying // to get info about a custom device. // container that is not running. ErrorCodeDeviceInfo = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "DEVICEINFO", Message: "error gathering device information while adding custom device %q: %s", Description: "There was an error while trying to retrieve the information about a custom device", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeEmptyEndpoint is generated when the endpoint for a port // map is nil. ErrorCodeEmptyEndpoint = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "EMPTYENDPOINT", Message: "invalid endpoint while building port map info", Description: "The specified endpoint for the port mapping is empty", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeEmptyNetwork is generated when the networkSettings for a port // map is nil. ErrorCodeEmptyNetwork = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "EMPTYNETWORK", Message: "invalid networksettings while building port map info", Description: "The specified endpoint for the port mapping is empty", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeParsingPort is generated when there is an error parsing // a "port" string. ErrorCodeParsingPort = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "PARSINGPORT", Message: "Error parsing Port value(%v):%v", Description: "There was an error while trying to parse the specified 'port' value", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeNoSandbox is generated when we can't find the specified // sandbox(network) by ID. ErrorCodeNoSandbox = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NOSANDBOX", Message: "error locating sandbox id %s: %v", Description: "There was an error trying to located the specified networking sandbox", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeNetworkUpdate is generated when there is an error while // trying update a network/sandbox config. ErrorCodeNetworkUpdate = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NETWORKUPDATE", Message: "Update network failed: %v", Description: "There was an error trying to update the configuration information of the specified network sandbox", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeNetworkRefresh is generated when there is an error while // trying refresh a network/sandbox config. ErrorCodeNetworkRefresh = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NETWORKREFRESH", Message: "Update network failed: Failure in refresh sandbox %s: %v", Description: "There was an error trying to refresh the configuration information of the specified network sandbox", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeHostPort is generated when there was an error while trying // to parse a "host/port" string. ErrorCodeHostPort = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "HOSTPORT", Message: "Error parsing HostPort value(%s):%v", Description: "There was an error trying to parse the specified 'HostPort' value", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeNetworkConflict is generated when we try to publish a service // in network mode. ErrorCodeNetworkConflict = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NETWORKCONFLICT", Message: "conflicting options: publishing a service and network mode", Description: "It is not possible to publish a service when it is in network mode", HTTPStatusCode: http.StatusConflict, }) // ErrorCodeJoinInfo is generated when we failed to update a container's // join info. ErrorCodeJoinInfo = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "JOININFO", Message: "Updating join info failed: %v", Description: "There was an error during an attempt update a container's join information", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeIPCRunning is generated when we try to join a container's // IPC but its not running. ErrorCodeIPCRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "IPCRUNNING", Message: "cannot join IPC of a non running container: %s", Description: "An attempt was made to join the IPC of a container, but the container is not running", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeNotADir is generated when we try to create a directory // but the path isn't a dir. ErrorCodeNotADir = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NOTADIR", Message: "Cannot mkdir: %s is not a directory", Description: "An attempt was made create a directory, but the location in which it is being created is not a directory", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeParseContainer is generated when the reference to a // container doesn't include a ":" (another container). ErrorCodeParseContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "PARSECONTAINER", Message: "no container specified to join network", Description: "The specified reference to a container is missing a ':' as a separator between 'container' and 'name'/'id'", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeJoinSelf is generated when we try to network to ourselves. ErrorCodeJoinSelf = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "JOINSELF", Message: "cannot join own network", Description: "An attempt was made to have a container join its own network", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeJoinRunning is generated when we try to network to ourselves. ErrorCodeJoinRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "JOINRUNNING", Message: "cannot join network of a non running container: %s", Description: "An attempt to join the network of a container, but that container isn't running", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeModeNotContainer is generated when we try to network to // another container but the mode isn't 'container'. ErrorCodeModeNotContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "MODENOTCONTAINER", Message: "network mode not set to container", Description: "An attempt was made to connect to a container's network but the mode wasn't set to 'container'", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeRemovingVolume is generated when we try remove a mount // point (volume) but fail. ErrorCodeRemovingVolume = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "REMOVINGVOLUME", Message: "Error removing volumes:\n%v", Description: "There was an error while trying to remove the mount point (volume) of a container", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeInvalidNetworkMode is generated when an invalid network // mode value is specified. ErrorCodeInvalidNetworkMode = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "INVALIDNETWORKMODE", Message: "invalid network mode: %s", Description: "The specified networking mode is not valid", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeGetGraph is generated when there was an error while // trying to find a graph/image. ErrorCodeGetGraph = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "GETGRAPH", Message: "Failed to graph.Get on ImageID %s - %s", Description: "There was an error trying to retrieve the image for the specified image ID", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeGetLayer is generated when there was an error while // trying to retrieve a particular layer of an image. ErrorCodeGetLayer = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "GETLAYER", Message: "Failed to get layer path from graphdriver %s for ImageID %s - %s", Description: "There was an error trying to retrieve the layer of the specified image", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodePutLayer is generated when there was an error while // trying to 'put' a particular layer of an image. ErrorCodePutLayer = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "PUTLAYER", Message: "Failed to put layer path from graphdriver %s for ImageID %s - %s", Description: "There was an error trying to store a layer for the specified image", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeGetLayerMetadata is generated when there was an error while // trying to retrieve the metadata of a layer of an image. ErrorCodeGetLayerMetadata = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "GETLAYERMETADATA", Message: "Failed to get layer metadata - %s", Description: "There was an error trying to retrieve the metadata of a layer for the specified image", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeEmptyConfig is generated when the input config data // is empty. ErrorCodeEmptyConfig = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "EMPTYCONFIG", Message: "Config cannot be empty in order to create a container", Description: "While trying to create a container, the specified configuration information was empty", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeNoSuchImageHash is generated when we can't find the // specified image by its hash ErrorCodeNoSuchImageHash = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NOSUCHIMAGEHASH", Message: "No such image: %s", Description: "An attempt was made to find an image by its hash, but the lookup failed", HTTPStatusCode: http.StatusNotFound, }) // ErrorCodeNoSuchImageTag is generated when we can't find the // specified image byt its name/tag. ErrorCodeNoSuchImageTag = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NOSUCHIMAGETAG", Message: "No such image: %s:%s", Description: "An attempt was made to find an image by its name/tag, but the lookup failed", HTTPStatusCode: http.StatusNotFound, }) // ErrorCodeMountOverFile is generated when we try to mount a volume // over an existing file (but not a dir). ErrorCodeMountOverFile = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "MOUNTOVERFILE", Message: "cannot mount volume over existing file, file exists %s", Description: "An attempt was made to mount a volume at the same location as a pre-existing file", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeMountSetup is generated when we can't define a mount point // due to the source and destination being undefined. ErrorCodeMountSetup = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "MOUNTSETUP", Message: "Unable to setup mount point, neither source nor volume defined", Description: "An attempt was made to setup a mount point, but the source and destination are undefined", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeVolumeInvalidMode is generated when the mode of a volume/bind // mount is invalid. ErrorCodeVolumeInvalidMode = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "VOLUMEINVALIDMODE", Message: "invalid mode: %q", Description: "An invalid 'mode' was specified", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeVolumeInvalid is generated when the format fo the // volume specification isn't valid. ErrorCodeVolumeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "VOLUMEINVALID", Message: "Invalid volume specification: '%s'", Description: "An invalid 'volume' was specified in the mount request", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeVolumeAbs is generated when path to a volume isn't absolute. ErrorCodeVolumeAbs = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "VOLUMEABS", Message: "Invalid volume destination path: '%s' mount path must be absolute.", Description: "An invalid 'destination' path was specified in the mount request, it must be an absolute path", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeVolumeName is generated when the name of named volume isn't valid. ErrorCodeVolumeName = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "VOLUME_NAME_INVALID", Message: "%q includes invalid characters for a local volume name, only %q are allowed", Description: "The name of volume is invalid", HTTPStatusCode: http.StatusBadRequest, }) // ErrorCodeVolumeSlash is generated when destination path to a volume is / ErrorCodeVolumeSlash = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "VOLUMESLASH", Message: "Invalid specification: destination can't be '/' in '%s'", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeVolumeDestIsC is generated the destination is c: (Windows specific) ErrorCodeVolumeDestIsC = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "VOLUMEDESTISC", Message: "Destination drive letter in '%s' cannot be c:", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeVolumeDestIsCRoot is generated the destination path is c:\ (Windows specific) ErrorCodeVolumeDestIsCRoot = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "VOLUMEDESTISCROOT", Message: `Destination path in '%s' cannot be c:\`, HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeVolumeSourceNotFound is generated the source directory could not be found (Windows specific) ErrorCodeVolumeSourceNotFound = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "VOLUMESOURCENOTFOUND", Message: "Source directory '%s' could not be found: %s", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeVolumeSourceNotDirectory is generated the source is not a directory (Windows specific) ErrorCodeVolumeSourceNotDirectory = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "VOLUMESOURCENOTDIRECTORY", Message: "Source '%s' is not a directory", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeVolumeFromBlank is generated when path to a volume is blank. ErrorCodeVolumeFromBlank = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "VOLUMEFROMBLANK", Message: "malformed volumes-from specification: %q", Description: "An invalid 'destination' path was specified in the mount request, it must not be blank", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeMountDup is generated when we try to mount two mounts points // to the same path. ErrorCodeMountDup = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "MOUNTDUP", Message: "Duplicate mount point '%s'", Description: "An attempt was made to mount a content but the specified destination location is already used in a previous mount", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeVolumeNoSourceForMount is generated when no source directory // for a volume mount was found. (Windows specific) ErrorCodeVolumeNoSourceForMount = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "VOLUMENOSOURCEFORMOUNT", Message: "No source for mount name '%s' driver %q destination '%s'", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeVolumeNameReservedWord is generated when the name in a volume // uses a reserved word for filenames. (Windows specific) ErrorCodeVolumeNameReservedWord = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "VOLUMENAMERESERVEDWORD", Message: "Volume name %q cannot be a reserved word for Windows filenames", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeCantUnpause is generated when there's an error while trying // to unpause a container. ErrorCodeCantUnpause = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "CANTUNPAUSE", Message: "Cannot unpause container %s: %s", Description: "An error occurred while trying to unpause the specified container", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodePSError is generated when trying to run 'ps'. ErrorCodePSError = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "PSError", Message: "Error running ps: %s", Description: "There was an error trying to run the 'ps' command in the specified container", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeNoPID is generated when looking for the PID field in the // ps output. ErrorCodeNoPID = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NOPID", Message: "Couldn't find PID field in ps output", Description: "There was no 'PID' field in the output of the 'ps' command that was executed", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeBadPID is generated when we can't convert a PID to an int. ErrorCodeBadPID = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "BADPID", Message: "Unexpected pid '%s': %s", Description: "While trying to parse the output of the 'ps' command, the 'PID' field was not an integer", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeNoTop is generated when we try to run 'top' but can't // because we're on windows. ErrorCodeNoTop = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NOTOP", Message: "Top is not supported on Windows", Description: "The 'top' command is not supported on Windows", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeStopped is generated when we try to stop a container // that is already stopped. ErrorCodeStopped = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "STOPPED", Message: "Container already stopped", Description: "An attempt was made to stop a container, but the container is already stopped", HTTPStatusCode: http.StatusNotModified, }) // ErrorCodeCantStop is generated when we try to stop a container // but failed for some reason. ErrorCodeCantStop = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "CANTSTOP", Message: "Cannot stop container %s: %s\n", Description: "An error occurred while tring to stop the specified container", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeBadCPUFields is generated when the number of CPU fields is // less than 8. ErrorCodeBadCPUFields = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "BADCPUFIELDS", Message: "invalid number of cpu fields", Description: "While reading the '/proc/stat' file, the number of 'cpu' fields is less than 8", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeBadCPUInt is generated the CPU field can't be parsed as an int. ErrorCodeBadCPUInt = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "BADCPUINT", Message: "Unable to convert value %s to int: %s", Description: "While reading the '/proc/stat' file, the 'CPU' field could not be parsed as an integer", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeBadStatFormat is generated the output of the stat info // isn't parseable. ErrorCodeBadStatFormat = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "BADSTATFORMAT", Message: "invalid stat format", Description: "There was an error trying to parse the '/proc/stat' file", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeTimedOut is generated when a timer expires. ErrorCodeTimedOut = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "TIMEDOUT", Message: "Timed out: %v", Description: "A timer expired", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeAlreadyRemoving is generated when we try to remove a // container that is already being removed. ErrorCodeAlreadyRemoving = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "ALREADYREMOVING", Message: "Status is already RemovalInProgress", Description: "An attempt to remove a container was made, but the container is already in the process of being removed", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeStartPaused is generated when we start a paused container. ErrorCodeStartPaused = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "STARTPAUSED", Message: "Cannot start a paused container, try unpause instead.", Description: "An attempt to start a container was made, but the container is paused. Unpause it first", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeAlreadyStarted is generated when we try to start a container // that is already running. ErrorCodeAlreadyStarted = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "ALREADYSTARTED", Message: "Container already started", Description: "An attempt to start a container was made, but the container is already started", HTTPStatusCode: http.StatusNotModified, }) // ErrorCodeHostConfigStart is generated when a HostConfig is passed // into the start command. ErrorCodeHostConfigStart = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "HOSTCONFIGSTART", Message: "Supplying a hostconfig on start is not supported. It should be supplied on create", Description: "The 'start' command does not accept 'HostConfig' data, try using the 'create' command instead", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeCantRestart is generated when an error occurred while // trying to restart a container. ErrorCodeCantRestart = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "CANTRESTART", Message: "Cannot restart container %s: %s", Description: "There was an error while trying to restart a container", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeEmptyRename is generated when one of the names on a // rename is empty. ErrorCodeEmptyRename = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "EMPTYRENAME", Message: "Neither old nor new names may be empty", Description: "An attempt was made to rename a container but either the old or new names were blank", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeRenameTaken is generated when we try to rename but the // new name isn't available. ErrorCodeRenameTaken = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "RENAMETAKEN", Message: "Error when allocating new name: %s", Description: "The new name specified on the 'rename' command is already being used", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeRenameDelete is generated when we try to rename but // failed trying to delete the old container. ErrorCodeRenameDelete = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "RENAMEDELETE", Message: "Failed to delete container %q: %v", Description: "There was an error trying to delete the specified container", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodePauseError is generated when we try to pause a container // but failed. ErrorCodePauseError = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "PAUSEERROR", Message: "Cannot pause container %s: %s", Description: "There was an error trying to pause the specified container", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeNeedStream is generated when we try to stream a container's // logs but no output stream was specified. ErrorCodeNeedStream = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NEEDSTREAM", Message: "You must choose at least one stream", Description: "While trying to stream a container's logs, no output stream was specified", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeDanglingOne is generated when we try to specify more than one // 'dangling' specifier. ErrorCodeDanglingOne = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "DANLGINGONE", Message: "Conflict: cannot use more than 1 value for `dangling` filter", Description: "The specified 'dangling' filter may not have more than one value", HTTPStatusCode: http.StatusConflict, }) // ErrorCodeImgDelUsed is generated when we try to delete an image // but it is being used. ErrorCodeImgDelUsed = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "IMGDELUSED", Message: "conflict: unable to remove repository reference %q (must force) - container %s is using its referenced image %s", Description: "An attempt was made to delete an image but it is currently being used", HTTPStatusCode: http.StatusConflict, }) // ErrorCodeImgNoParent is generated when we try to find an image's // parent but its not in the graph. ErrorCodeImgNoParent = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "IMGNOPARENT", Message: "unable to get parent image: %v", Description: "There was an error trying to find an image's parent, it was not in the graph", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeExportFailed is generated when an export fails. ErrorCodeExportFailed = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "EXPORTFAILED", Message: "%s: %s", Description: "There was an error during an export operation", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeExecResize is generated when we try to resize an exec // but its not running. ErrorCodeExecResize = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "EXECRESIZE", Message: "Exec %s is not running, so it can not be resized.", Description: "An attempt was made to resize an 'exec', but the 'exec' is not running", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeContainerNotRunning is generated when we try to get the info // on an exec but the container is not running. ErrorCodeContainerNotRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "CONTAINERNOTRUNNING", Message: "Container %s is not running: %s", Description: "An attempt was made to retrieve the information about an 'exec' but the container is not running", HTTPStatusCode: http.StatusConflict, }) // ErrorCodeNoExecID is generated when we try to get the info // on an exec but it can't be found. ErrorCodeNoExecID = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NOEXECID", Message: "No such exec instance '%s' found in daemon", Description: "The specified 'exec' instance could not be found", HTTPStatusCode: http.StatusNotFound, }) // ErrorCodeExecPaused is generated when we try to start an exec // but the container is paused. ErrorCodeExecPaused = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "EXECPAUSED", Message: "Container %s is paused, unpause the container before exec", Description: "An attempt to start an 'exec' was made, but the owning container is paused", HTTPStatusCode: http.StatusConflict, }) // ErrorCodeExecRestarting is generated when we try to start an exec // but the container is restarting. ErrorCodeExecRestarting = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "EXECRESTARTING", Message: "Container %s is restarting, wait until the container is running", Description: "An attempt to start an 'exec' was made, but the owning container is restarting", HTTPStatusCode: http.StatusConflict, }) // ErrorCodeExecRunning is generated when we try to start an exec // but its already running. ErrorCodeExecRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "EXECRUNNING", Message: "Error: Exec command %s is already running", Description: "An attempt to start an 'exec' was made, but 'exec' is already running", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeExecExited is generated when we try to start an exec // but its already running. ErrorCodeExecExited = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "EXECEXITED", Message: "Error: Exec command %s has already run", Description: "An attempt to start an 'exec' was made, but 'exec' was already run", HTTPStatusCode: http.StatusConflict, }) // ErrorCodeExecCantRun is generated when we try to start an exec // but it failed for some reason. ErrorCodeExecCantRun = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "EXECCANTRUN", Message: "Cannot run exec command %s in container %s: %s", Description: "An attempt to start an 'exec' was made, but an error occurred", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeExecAttach is generated when we try to attach to an exec // but failed. ErrorCodeExecAttach = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "EXECATTACH", Message: "attach failed with error: %s", Description: "There was an error while trying to attach to an 'exec'", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeExecContainerStopped is generated when we try to start // an exec but then the container stopped. ErrorCodeExecContainerStopped = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "EXECCONTAINERSTOPPED", Message: "container stopped while running exec", Description: "An attempt was made to start an 'exec' but the owning container is in the 'stopped' state", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeDefaultName is generated when we try to delete the // default name of a container. ErrorCodeDefaultName = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "DEFAULTNAME", Message: "Conflict, cannot remove the default name of the container", Description: "An attempt to delete the default name of a container was made, but that is not allowed", HTTPStatusCode: http.StatusConflict, }) // ErrorCodeNoParent is generated when we try to delete a container // but we can't find its parent image. ErrorCodeNoParent = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NOPARENT", Message: "Cannot get parent %s for name %s", Description: "An attempt was made to delete a container but its parent image could not be found", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeCantDestroy is generated when we try to delete a container // but failed for some reason. ErrorCodeCantDestroy = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "CANTDESTROY", Message: "Cannot destroy container %s: %v", Description: "An attempt was made to delete a container but it failed", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeRmRunning is generated when we try to delete a container // but its still running. ErrorCodeRmRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "RMRUNNING", Message: "Conflict, You cannot remove a running container. Stop the container before attempting removal or use -f", Description: "An attempt was made to delete a container but the container is still running, try to either stop it first or use '-f'", HTTPStatusCode: http.StatusConflict, }) // ErrorCodeRmFailed is generated when we try to delete a container // but it failed for some reason. ErrorCodeRmFailed = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "RMFAILED", Message: "Could not kill running container, cannot remove - %v", Description: "An error occurred while trying to delete a running container", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeRmNotFound is generated when we try to delete a container // but couldn't find it. ErrorCodeRmNotFound = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "RMNOTFOUND", Message: "Could not kill running container, cannot remove - %v", Description: "An attempt to delete a container was made but the container could not be found", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeRmState is generated when we try to delete a container // but couldn't set its state to RemovalInProgress. ErrorCodeRmState = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "RMSTATE", Message: "Failed to set container state to RemovalInProgress: %s", Description: "An attempt to delete a container was made, but there as an error trying to set its state to 'RemovalInProgress'", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeRmDriverFS is generated when we try to delete a container // but the driver failed to delete its filesystem. ErrorCodeRmDriverFS = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "RMDRIVERFS", Message: "Driver %s failed to remove root filesystem %s: %s", Description: "While trying to delete a container, the driver failed to remove the root filesystem", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeRmFS is generated when we try to delete a container // but failed deleting its filesystem. ErrorCodeRmFS = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "RMFS", Message: "Unable to remove filesystem for %v: %v", Description: "While trying to delete a container, the driver failed to remove the filesystem", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeRmExecDriver is generated when we try to delete a container // but failed deleting its exec driver data. ErrorCodeRmExecDriver = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "RMEXECDRIVER", Message: "Unable to remove execdriver data for %s: %s", Description: "While trying to delete a container, there was an error trying to remove th exec driver data", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeRmVolumeInUse is generated when we try to delete a container // but failed deleting a volume because its being used. ErrorCodeRmVolumeInUse = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "RMVOLUMEINUSE", Message: "Conflict: %v", Description: "While trying to delete a container, one of its volumes is still being used", HTTPStatusCode: http.StatusConflict, }) // ErrorCodeRmVolume is generated when we try to delete a container // but failed deleting a volume. ErrorCodeRmVolume = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "RMVOLUME", Message: "Error while removing volume %s: %v", Description: "While trying to delete a container, there was an error trying to delete one of its volumes", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeInvalidCpusetCpus is generated when user provided cpuset CPUs // are invalid. ErrorCodeInvalidCpusetCpus = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "INVALIDCPUSETCPUS", Message: "Invalid value %s for cpuset cpus.", Description: "While verifying the container's 'HostConfig', CpusetCpus value was in an incorrect format", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeInvalidCpusetMems is generated when user provided cpuset mems // are invalid. ErrorCodeInvalidCpusetMems = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "INVALIDCPUSETMEMS", Message: "Invalid value %s for cpuset mems.", Description: "While verifying the container's 'HostConfig', CpusetMems value was in an incorrect format", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeNotAvailableCpusetCpus is generated when user provided cpuset // CPUs aren't available in the container's cgroup. ErrorCodeNotAvailableCpusetCpus = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NOTAVAILABLECPUSETCPUS", Message: "Requested CPUs are not available - requested %s, available: %s.", Description: "While verifying the container's 'HostConfig', cpuset CPUs provided aren't available in the container's cgroup available set", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeNotAvailableCpusetMems is generated when user provided cpuset // memory nodes aren't available in the container's cgroup. ErrorCodeNotAvailableCpusetMems = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NOTAVAILABLECPUSETMEMS", Message: "Requested memory nodes are not available - requested %s, available: %s.", Description: "While verifying the container's 'HostConfig', cpuset memory nodes provided aren't available in the container's cgroup available set", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorVolumeNameTaken is generated when an error occurred while // trying to create a volume that has existed using different driver. ErrorVolumeNameTaken = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "VOLUME_NAME_TAKEN", Message: "A volume named %s already exists. Choose a different volume name.", Description: "An attempt to create a volume using a driver but the volume already exists with a different driver", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeCmdNotFound is generated when container cmd can't start, // container command not found error, exit code 127 ErrorCodeCmdNotFound = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "CMDNOTFOUND", Message: "Container command not found or does not exist.", Description: "Command could not be found, command does not exist", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeCmdCouldNotBeInvoked is generated when container cmd can't start, // container command permission denied error, exit code 126 ErrorCodeCmdCouldNotBeInvoked = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "CMDCOULDNOTBEINVOKED", Message: "Container command could not be invoked.", Description: "Permission denied, cannot invoke command", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeCantStart is generated when container cmd can't start, // for any reason other than above 2 errors ErrorCodeCantStart = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "CANTSTART", Message: "Cannot start container %s: %s", Description: "There was an error while trying to start a container", HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeCantDeletePredefinedNetwork is generated when one of the predefined networks // is attempted to be deleted. ErrorCodeCantDeletePredefinedNetwork = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "CANT_DELETE_PREDEFINED_NETWORK", Message: "%s is a pre-defined network and cannot be removed", Description: "Engine's predefined networks cannot be deleted", HTTPStatusCode: http.StatusForbidden, }) // ErrorCodeMultipleNetworkConnect is generated when more than one network is passed // when creating a container ErrorCodeMultipleNetworkConnect = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "CANNOT_CONNECT_TO_MULTIPLE_NETWORKS", Message: "Container cannot be connected to %s", Description: "A container can only be connected to one network at the time", HTTPStatusCode: http.StatusBadRequest, }) ) docker-1.10.3/errors/error.go000066400000000000000000000002711267010174400160410ustar00rootroot00000000000000package errors // This file contains all of the errors that can be generated from the // docker engine but are not tied to any specific top-level component. const errGroup = "engine" docker-1.10.3/errors/image.go000066400000000000000000000011031267010174400157650ustar00rootroot00000000000000package errors // This file contains all of the errors that can be generated from the // docker/image component. import ( "net/http" "github.com/docker/distribution/registry/api/errcode" ) var ( // ErrorCodeInvalidImageID is generated when image id specified is incorrectly formatted. ErrorCodeInvalidImageID = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "INVALIDIMAGEID", Message: "image ID '%s' is invalid ", Description: "The specified image id is incorrectly formatted", HTTPStatusCode: http.StatusInternalServerError, }) ) docker-1.10.3/errors/server.go000066400000000000000000000037551267010174400162300ustar00rootroot00000000000000package errors import ( "net/http" "github.com/docker/distribution/registry/api/errcode" ) var ( // ErrorCodeNewerClientVersion is generated when a request from a client // specifies a higher version than the server supports. ErrorCodeNewerClientVersion = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NEWERCLIENTVERSION", Message: "client is newer than server (client API version: %s, server API version: %s)", Description: "The client version is higher than the server version", HTTPStatusCode: http.StatusBadRequest, }) // ErrorCodeOldClientVersion is generated when a request from a client // specifies a version lower than the minimum version supported by the server. ErrorCodeOldClientVersion = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "OLDCLIENTVERSION", Message: "client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", Description: "The client version is too old for the server", HTTPStatusCode: http.StatusBadRequest, }) // ErrorNetworkControllerNotEnabled is generated when the networking stack in not enabled // for certain platforms, like windows. ErrorNetworkControllerNotEnabled = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NETWORK_CONTROLLER_NOT_ENABLED", Message: "the network controller is not enabled for this platform", Description: "Docker's networking stack is disabled for this platform", HTTPStatusCode: http.StatusNotFound, }) // ErrorCodeNoHijackConnection is generated when a request tries to attach to a container // but the connection to hijack is not provided. ErrorCodeNoHijackConnection = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "HIJACK_CONNECTION_MISSING", Message: "error attaching to container %s, hijack connection missing", Description: "The caller didn't provide a connection to hijack", HTTPStatusCode: http.StatusBadRequest, }) ) docker-1.10.3/experimental/000077500000000000000000000000001267010174400155425ustar00rootroot00000000000000docker-1.10.3/experimental/README.md000066400000000000000000000062441267010174400170270ustar00rootroot00000000000000# Docker Experimental Features This page contains a list of features in the Docker engine which are experimental. Experimental features are **not** ready for production. They are provided for test and evaluation in your sandbox environments. The information below describes each feature and the GitHub pull requests and issues associated with it. If necessary, links are provided to additional documentation on an issue. As an active Docker user and community member, please feel free to provide any feedback on these features you wish. ## Install Docker experimental Unlike the regular Docker binary, the experimental channels is built and updated nightly on https://experimental.docker.com. From one day to the next, new features may appear, while existing experimental features may be refined or entirely removed. 1. Verify that you have `curl` installed. $ which curl If `curl` isn't installed, install it after updating your manager: $ sudo apt-get update $ sudo apt-get install curl 2. Get the latest Docker package. $ curl -sSL https://experimental.docker.com/ | sh The system prompts you for your `sudo` password. Then, it downloads and installs Docker and its dependencies. >**Note**: If your company is behind a filtering proxy, you may find that the >`apt-key` >command fails for the Docker repo during installation. To work around this, >add the key directly using the following: > > $ curl -sSL https://experimental.docker.com/gpg | sudo apt-key add - 3. Verify `docker` is installed correctly. $ sudo docker run hello-world This command downloads a test image and runs it in a container. ### Get the Linux binary To download the latest experimental `docker` binary for Linux, use the following URLs: https://experimental.docker.com/builds/Linux/i386/docker-latest https://experimental.docker.com/builds/Linux/x86_64/docker-latest After downloading the appropriate binary, you can follow the instructions [here](https://docs.docker.com/installation/binaries/#get-the-docker-binary) to run the `docker` daemon. > **Note** > > 1) You can get the MD5 and SHA256 hashes by appending .md5 and .sha256 to the URLs respectively > > 2) You can get the compressed binaries by appending .tgz to the URLs ### Build an experimental binary You can also build the experimental binary from the standard development environment by adding `DOCKER_EXPERIMENTAL=1` to the environment where you run `make` to build Docker binaries. For example, to build a Docker binary with the experimental features enabled: $ DOCKER_EXPERIMENTAL=1 make binary ## Current experimental features * [External graphdriver plugins](plugins_graphdriver.md) * The user namespaces feature has graduated from experimental. ## How to comment on an experimental feature Each feature's documentation includes a list of proposal pull requests or PRs associated with the feature. If you want to comment on or suggest a change to a feature, please add it to the existing feature PR. Issues or problems with a feature? Inquire for help on the `#docker` IRC channel or in on the [Docker Google group](https://groups.google.com/forum/#!forum/docker-user). docker-1.10.3/experimental/plugins_graphdriver.md000066400000000000000000000137411267010174400221500ustar00rootroot00000000000000# Experimental: Docker graph driver plugins Docker graph driver plugins enable admins to use an external/out-of-process graph driver for use with Docker engine. This is an alternative to using the built-in storage drivers, such as aufs/overlay/devicemapper/btrfs. A graph driver plugin is used for image and container fs storage, as such the plugin must be started and available for connections prior to Docker Engine being started. # Write a graph driver plugin See the [plugin documentation](/docs/extend/plugins.md) for detailed information on the underlying plugin protocol. ## Graph Driver plugin protocol If a plugin registers itself as a `GraphDriver` when activated, then it is expected to provide the rootfs for containers as well as image layer storage. ### /GraphDriver.Init **Request**: ``` { "Home": "/graph/home/path", "Opts": [] } ``` Initialize the graph driver plugin with a home directory and array of options. Plugins are not required to accept these options as the Docker Engine does not require that the plugin use this path or options, they are only being passed through from the user. **Response**: ``` { "Err": null } ``` Respond with a string error if an error occurred. ### /GraphDriver.Create **Request**: ``` { "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" } ``` Create a new, empty, filesystem layer with the specified `ID` and `Parent`. `Parent` may be an empty string, which would indicate that there is no parent layer. **Response**: ``` { "Err: null } ``` Respond with a string error if an error occurred. ### /GraphDriver.Remove **Request**: ``` { "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" } ``` Remove the filesystem layer with this given `ID`. **Response**: ``` { "Err: null } ``` Respond with a string error if an error occurred. ### /GraphDriver.Get **Request**: ``` { "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" "MountLabel": "" } ``` Get the mountpoint for the layered filesystem referred to by the given `ID`. **Response**: ``` { "Dir": "/var/mygraph/46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", "Err": "" } ``` Respond with the absolute path to the mounted layered filesystem. Respond with a string error if an error occurred. ### /GraphDriver.Put **Request**: ``` { "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" } ``` Release the system resources for the specified `ID`, such as unmounting the filesystem layer. **Response**: ``` { "Err: null } ``` Respond with a string error if an error occurred. ### /GraphDriver.Exists **Request**: ``` { "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" } ``` Determine if a filesystem layer with the specified `ID` exists. **Response**: ``` { "Exists": true } ``` Respond with a boolean for whether or not the filesystem layer with the specified `ID` exists. ### /GraphDriver.Status **Request**: ``` {} ``` Get low-level diagnostic information about the graph driver. **Response**: ``` { "Status": [[]] } ``` Respond with a 2-D array with key/value pairs for the underlying status information. ### /GraphDriver.GetMetadata **Request**: ``` { "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" } ``` Get low-level diagnostic information about the layered filesystem with the with the specified `ID` **Response**: ``` { "Metadata": {}, "Err": null } ``` Respond with a set of key/value pairs containing the low-level diagnostic information about the layered filesystem. Respond with a string error if an error occurred. ### /GraphDriver.Cleanup **Request**: ``` {} ``` Perform necessary tasks to release resources help by the plugin, for example unmounting all the layered file systems. **Response**: ``` { "Err: null } ``` Respond with a string error if an error occurred. ### /GraphDriver.Diff **Request**: ``` { "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" } ``` Get an archive of the changes between the filesystem layers specified by the `ID` and `Parent`. `Parent` may be an empty string, in which case there is no parent. **Response**: ``` {{ TAR STREAM }} ``` ### /GraphDriver.Changes **Request**: ``` { "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" } ``` Get a list of changes between the filesystem layers specified by the `ID` and `Parent`. `Parent` may be an empty string, in which case there is no parent. **Response**: ``` { "Changes": [{}], "Err": null } ``` Responds with a list of changes. The structure of a change is: ``` "Path": "/some/path", "Kind": 0, ``` Where teh `Path` is the filesystem path within the layered filesystem that is changed and `Kind` is an integer specifying the type of change that occurred: - 0 - Modified - 1 - Added - 2 - Deleted Respond with a string error if an error occurred. ### /GraphDriver.ApplyDiff **Request**: ``` {{ TAR STREAM }} ``` Extract the changeset from the given diff into the layer with the specified `ID` and `Parent` **Query Parameters**: - id (required)- the `ID` of the new filesystem layer to extract the diff to - parent (required)- the `Parent` of the given `ID` **Response**: ``` { "Size": 512366, "Err": null } ``` Respond with the size of the new layer in bytes. Respond with a string error if an error occurred. ### /GraphDriver.DiffSize **Request**: ``` { "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" } ``` Calculate the changes between the specified `ID` **Response**: ``` { "Size": 512366, "Err": null } ``` Respond with the size changes between the specified `ID` and `Parent` Respond with a string error if an error occurred. docker-1.10.3/hack/000077500000000000000000000000001267010174400137535ustar00rootroot00000000000000docker-1.10.3/hack/.vendor-helpers.sh000077500000000000000000000072141267010174400173310ustar00rootroot00000000000000#!/usr/bin/env bash PROJECT=github.com/docker/docker # Downloads dependencies into vendor/ directory mkdir -p vendor if ! go list github.com/docker/docker/docker &> /dev/null; then rm -rf .gopath mkdir -p .gopath/src/github.com/docker ln -sf ../../../.. .gopath/src/${PROJECT} export GOPATH="${PWD}/.gopath:${PWD}/vendor" fi export GOPATH="$GOPATH:${PWD}/vendor" find='find' if [ "$(go env GOHOSTOS)" = 'windows' ]; then find='/usr/bin/find' fi clone() { local vcs="$1" local pkg="$2" local rev="$3" local url="$4" : ${url:=https://$pkg} local target="vendor/src/$pkg" echo -n "$pkg @ $rev: " if [ -d "$target" ]; then echo -n 'rm old, ' rm -rf "$target" fi echo -n 'clone, ' case "$vcs" in git) git clone --quiet --no-checkout "$url" "$target" ( cd "$target" && git checkout --quiet "$rev" && git reset --quiet --hard "$rev" ) ;; hg) hg clone --quiet --updaterev "$rev" "$url" "$target" ;; esac echo -n 'rm VCS, ' ( cd "$target" && rm -rf .{git,hg} ) echo -n 'rm vendor, ' ( cd "$target" && rm -rf vendor Godeps/_workspace ) echo done } # get an ENV from the Dockerfile with support for multiline values _dockerfile_env() { local e="$1" awk ' $1 == "ENV" && $2 == "'"$e"'" { sub(/^ENV +([^ ]+) +/, ""); inEnv = 1; } inEnv { if (sub(/\\$/, "")) { printf "%s", $0; next; } print; exit; } ' ${DOCKER_FILE:="Dockerfile"} } clean() { local packages=( "${PROJECT}/docker" # package main "${PROJECT}/dockerinit" # package main "${PROJECT}/integration-cli" # external tests ) local dockerPlatforms=( ${DOCKER_ENGINE_OSARCH:="linux/amd64"} $(_dockerfile_env DOCKER_CROSSPLATFORMS) ) local dockerBuildTags="$(_dockerfile_env DOCKER_BUILDTAGS)" local buildTagCombos=( '' 'experimental' 'pkcs11' "$dockerBuildTags" "daemon $dockerBuildTags" "daemon cgo $dockerBuildTags" "experimental $dockerBuildTags" "experimental daemon $dockerBuildTags" "experimental daemon cgo $dockerBuildTags" "pkcs11 $dockerBuildTags" "pkcs11 daemon $dockerBuildTags" "pkcs11 daemon cgo $dockerBuildTags" ) echo echo -n 'collecting import graph, ' local IFS=$'\n' local imports=( $( for platform in "${dockerPlatforms[@]}"; do export GOOS="${platform%/*}"; export GOARCH="${platform##*/}"; for buildTags in "${buildTagCombos[@]}"; do go list -e -tags "$buildTags" -f '{{join .Deps "\n"}}' "${packages[@]}" go list -e -tags "$buildTags" -f '{{join .TestImports "\n"}}' "${packages[@]}" done done | grep -vE "^${PROJECT}" | sort -u ) ) imports=( $(go list -e -f '{{if not .Standard}}{{.ImportPath}}{{end}}' "${imports[@]}") ) unset IFS echo -n 'pruning unused packages, ' findArgs=( # This directory contains only .c and .h files which are necessary -path vendor/src/github.com/mattn/go-sqlite3/code ) for import in "${imports[@]}"; do [ "${#findArgs[@]}" -eq 0 ] || findArgs+=( -or ) findArgs+=( -path "vendor/src/$import" ) done local IFS=$'\n' local prune=( $($find vendor -depth -type d -not '(' "${findArgs[@]}" ')') ) unset IFS for dir in "${prune[@]}"; do $find "$dir" -maxdepth 1 -not -type d -not -name 'LICENSE*' -not -name 'COPYING*' -exec rm -v -f '{}' ';' rmdir "$dir" 2>/dev/null || true done echo -n 'pruning unused files, ' $find vendor -type f -name '*_test.go' -exec rm -v '{}' ';' echo done } # Fix up hard-coded imports that refer to Godeps paths so they'll work with our vendoring fix_rewritten_imports () { local pkg="$1" local remove="${pkg}/Godeps/_workspace/src/" local target="vendor/src/$pkg" echo "$pkg: fixing rewritten imports" $find "$target" -name \*.go -exec sed -i -e "s|\"${remove}|\"|g" {} \; } docker-1.10.3/hack/Jenkins/000077500000000000000000000000001267010174400153545ustar00rootroot00000000000000docker-1.10.3/hack/Jenkins/W2L/000077500000000000000000000000001267010174400157605ustar00rootroot00000000000000docker-1.10.3/hack/Jenkins/W2L/setup.sh000066400000000000000000000244211267010174400174570ustar00rootroot00000000000000# Jenkins CI script for Windows to Linux CI. # Heavily modified by John Howard (@jhowardmsft) December 2015 to try to make it more reliable. set +xe SCRIPT_VER="Thu Feb 25 18:54:57 UTC 2016" # TODO to make (even) more resilient: # - Wait for daemon to be running before executing docker commands # - Check if jq is installed # - Make sure bash is v4.3 or later. Can't do until all Azure nodes on the latest version # - Make sure we are not running as local system. Can't do until all Azure nodes are updated. # - Error if docker versions are not equal. Can't do until all Azure nodes are updated # - Error if go versions are not equal. Can't do until all Azure nodes are updated. # - Error if running 32-bit posix tools. Probably can take from bash --version and check contains "x86_64" # - Warn if the CI directory cannot be deleted afterwards. Otherwise turdlets are left behind # - Use %systemdrive% ($SYSTEMDRIVE) rather than hard code to c: for TEMP # - Consider cross builing the Windows binary and copy across. That's a bit of a heavy lift. Only reason # for doing that is that it mirrors the actual release process for docker.exe which is cross-built. # However, should absolutely not be a problem if built natively, so nit-picking. # - Tidy up of images and containers. Either here, or in the teardown script. ec=0 echo INFO: Started at `date`. Script version $SCRIPT_VER # !README! # There are two daemons running on the remote Linux host: # - outer: specified by DOCKER_HOST, this is the daemon that will build and run the inner docker daemon # from the sources matching the PR. # - inner: runs on the host network, on a port number similar to that of DOCKER_HOST but the last two digits are inverted # (2357 if DOCKER_HOST had port 2375; and 2367 if DOCKER_HOST had port 2376). # The windows integration tests are run against this inner daemon. # get the ip, inner and outer ports. ip="${DOCKER_HOST#*://}" port_outer="${ip#*:}" # inner port is like outer port with last two digits inverted. port_inner=$(echo "$port_outer" | sed -E 's/(.)(.)$/\2\1/') ip="${ip%%:*}" echo "INFO: IP=$ip PORT_OUTER=$port_outer PORT_INNER=$port_inner" # If TLS is enabled if [ -n "$DOCKER_TLS_VERIFY" ]; then protocol=https if [ -z "$DOCKER_MACHINE_NAME" ]; then ec=1 echo "ERROR: DOCKER_MACHINE_NAME is undefined" fi certs=$(echo ~/.docker/machine/machines/$DOCKER_MACHINE_NAME) curlopts="--cacert $certs/ca.pem --cert $certs/cert.pem --key $certs/key.pem" run_extra_args="-v tlscerts:/etc/docker" daemon_extra_args="--tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem" else protocol=http fi # Save for use by make.sh and scripts it invokes export MAIN_DOCKER_HOST="tcp://$ip:$port_inner" # Verify we can get the remote node to respond to _ping if [ $ec -eq 0 ]; then reply=`curl -s $curlopts $protocol://$ip:$port_outer/_ping` if [ "$reply" != "OK" ]; then ec=1 echo "ERROR: Failed to get an 'OK' response from the docker daemon on the Linux node" echo " at $ip:$port_outer when called with an http request for '_ping'. This implies that" echo " either the daemon has crashed/is not running, or the Linux node is unavailable." echo echo " A regular ping to the remote Linux node is below. It should reply. If not, the" echo " machine cannot be reached at all and may have crashed. If it does reply, it is" echo " likely a case of the Linux daemon not running or having crashed, which requires" echo " further investigation." echo echo " Try re-running this CI job, or ask on #docker-dev or #docker-maintainers" echo " for someone to perform further diagnostics, or take this node out of rotation." echo ping $ip else echo "INFO: The Linux nodes outer daemon replied to a ping. Good!" fi fi # Get the version from the remote node. Note this may fail if jq is not installed. # That's probably worth checking to make sure, just in case. if [ $ec -eq 0 ]; then remoteVersion=`curl -s $curlopts $protocol://$ip:$port_outer/version | jq -c '.Version'` echo "INFO: Remote daemon is running docker version $remoteVersion" fi # Compare versions. We should really fail if result is no 1. Output at end of script. if [ $ec -eq 0 ]; then uniques=`docker version | grep Version | /usr/bin/sort -u | wc -l` fi # Make sure we are in repo if [ $ec -eq 0 ]; then if [ ! -d hack ]; then echo "ERROR: Are you sure this is being launched from a the root of docker repository?" echo " If this is a Windows CI machine, it should be c:\jenkins\gopath\src\github.com\docker\docker." echo " Current directory is `pwd`" ec=1 fi fi # Get the commit has and verify we have something if [ $ec -eq 0 ]; then export COMMITHASH=$(git rev-parse --short HEAD) echo INFO: Commmit hash is $COMMITHASH if [ -z $COMMITHASH ]; then echo "ERROR: Failed to get commit hash. Are you sure this is a docker repository?" ec=1 fi fi # Redirect to a temporary location. Check is here for local runs from Jenkins machines just in case not # in the right directory where the repo is cloned. We also redirect TEMP to not use the environment # TEMP as when running as a standard user (not local system), it otherwise exposes a bug in posix tar which # will cause CI to fail from Windows to Linux. Obviously it's not best practice to ever run as local system... if [ $ec -eq 0 ]; then export TEMP=/c/CI/CI-$COMMITHASH export TMP=$TMP /usr/bin/mkdir -p $TEMP # Make sure Linux mkdir for -p fi # Tidy up time if [ $ec -eq 0 ]; then echo INFO: Deleting pre-existing containers and images... # Force remove all containers based on a previously built image with this commit ! docker rm -f $(docker ps -aq --filter "ancestor=docker:$COMMITHASH") &>/dev/null # Force remove any container with this commithash as a name ! docker rm -f $(docker ps -aq --filter "name=docker-$COMMITHASH") &>/dev/null # Force remove the image if it exists ! docker rmi -f "docker-$COMMITHASH" &>/dev/null # This SHOULD never happen, but just in case, also blow away any containers # that might be around. ! if [ ! `docker ps -aq | wc -l` -eq 0 ]; then echo WARN: There were some leftover containers. Cleaning them up. ! docker rm -f $(docker ps -aq) fi fi # Provide the docker version for debugging purposes. If these fail, game over. # as the Linux box isn't responding for some reason. if [ $ec -eq 0 ]; then echo INFO: Docker version and info of the outer daemon on the Linux node echo docker version ec=$? if [ 0 -ne $ec ]; then echo "ERROR: The main linux daemon does not appear to be running. Has the Linux node crashed?" fi echo fi # Same as above, but docker info if [ $ec -eq 0 ]; then echo docker info ec=$? if [ 0 -ne $ec ]; then echo "ERROR: The main linux daemon does not appear to be running. Has the Linux node crashed?" fi echo fi # build the daemon image if [ $ec -eq 0 ]; then echo "INFO: Running docker build on Linux host at $DOCKER_HOST" set -x docker build --rm --force-rm -t "docker:$COMMITHASH" . ec=$? set +x if [ 0 -ne $ec ]; then echo "ERROR: docker build failed" fi fi # Start the docker-in-docker daemon from the image we just built if [ $ec -eq 0 ]; then echo "INFO: Starting build of a Linux daemon to test against, and starting it..." set -x # aufs in aufs is faster than vfs in aufs docker run $run_extra_args -e DOCKER_GRAPHDRIVER=aufs --pid host --privileged -d --name "docker-$COMMITHASH" --net host "docker:$COMMITHASH" bash -c "echo 'INFO: Compiling' && date && hack/make.sh binary && echo 'INFO: Compile complete' && date && cp bundles/$(cat VERSION)/binary/docker /bin/docker && echo 'INFO: Starting daemon' && exec docker daemon -D -H tcp://0.0.0.0:$port_inner $daemon_extra_args" ec=$? set +x if [ 0 -ne $ec ]; then echo "ERROR: Failed to compile and start the linux daemon" fi fi # Build locally. if [ $ec -eq 0 ]; then echo "INFO: Starting local build of Windows binary..." set -x export TIMEOUT="120m" export DOCKER_HOST="tcp://$ip:$port_inner" export DOCKER_TEST_HOST="tcp://$ip:$port_inner" unset DOCKER_CLIENTONLY export DOCKER_REMOTE_DAEMON=1 hack/make.sh binary ec=$? set +x if [ 0 -ne $ec ]; then echo "ERROR: Build of binary on Windows failed" fi fi # Make a local copy of the built binary and ensure that is first in our path if [ $ec -eq 0 ]; then VERSION=$(< ./VERSION) cp bundles/$VERSION/binary/docker.exe $TEMP ec=$? if [ 0 -ne $ec ]; then echo "ERROR: Failed to copy built binary to $TEMP" fi export PATH=$TEMP:$PATH fi # Run the integration tests if [ $ec -eq 0 ]; then echo "INFO: Running Integration tests..." set -x export DOCKER_TEST_TLS_VERIFY="$DOCKER_TLS_VERIFY" export DOCKER_TEST_CERT_PATH="$DOCKER_CERT_PATH" hack/make.sh test-integration-cli ec=$? set +x if [ 0 -ne $ec ]; then echo "ERROR: CLI test failed." # Next line is useful, but very long winded if included # docker -H=$MAIN_DOCKER_HOST logs "docker-$COMMITHASH" fi fi # Tidy up any temporary files from the CI run if [ ! -z $COMMITHASH ]; then rm -rf $TEMP fi # CI Integrity check - ensure we are using the same version of go as present in the Dockerfile GOVER_DOCKERFILE=`grep 'ENV GO_VERSION' Dockerfile | awk '{print $3}'` GOVER_INSTALLED=`go version | awk '{print $3}'` if [ "${GOVER_INSTALLED:2}" != "$GOVER_DOCKERFILE" ]; then ec=1 # Uncomment to make CI fail once all nodes are updated. echo echo "---------------------------------------------------------------------------" echo "ERROR: CI should be using go version $GOVER_DOCKERFILE, but is using ${GOVER_INSTALLED:2}" echo " This is currently a warning, but should (will) become an error in the future." echo "---------------------------------------------------------------------------" echo fi # Check the Linux box is running a matching version of docker if [ "$uniques" -ne 1 ]; then ec=1 # Uncomment to make CI fail once all nodes are updated. echo echo "---------------------------------------------------------------------------" echo "ERROR: This CI node is not running the same version of docker as the daemon." echo " This is a CI configuration issue" echo "---------------------------------------------------------------------------" echo fi # Tell the user how we did. if [ $ec -eq 0 ]; then echo INFO: Completed successfully at `date`. else echo ERROR: Failed with exitcode $ec at `date`. fi exit $ec docker-1.10.3/hack/Jenkins/readme.md000066400000000000000000000001401267010174400171260ustar00rootroot00000000000000These files under this directory are for reference only. They are used by Jenkins for CI runs.docker-1.10.3/hack/dind000077500000000000000000000017571267010174400146310ustar00rootroot00000000000000#!/bin/bash set -e # DinD: a wrapper script which allows docker to be run inside a docker container. # Original version by Jerome Petazzoni # See the blog post: https://blog.docker.com/2013/09/docker-can-now-run-within-docker/ # # This script should be executed inside a docker container in privilieged mode # ('docker run --privileged', introduced in docker 0.6). # Usage: dind CMD [ARG...] # apparmor sucks and Docker needs to know that it's in a container (c) @tianon export container=docker if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then mount -t securityfs none /sys/kernel/security || { echo >&2 'Could not mount /sys/kernel/security.' echo >&2 'AppArmor detection and --privileged mode might break.' } fi # Mount /tmp (conditionally) if ! mountpoint -q /tmp; then mount -t tmpfs none /tmp fi if [ $# -gt 0 ]; then exec "$@" fi echo >&2 'ERROR: No command specified.' echo >&2 'You probably want to run hack/make.sh, or maybe a shell?' docker-1.10.3/hack/generate-authors.sh000077500000000000000000000005761267010174400175770ustar00rootroot00000000000000#!/bin/bash set -e cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.." # see also ".mailmap" for how email addresses and names are deduplicated { cat <<-'EOH' # This file lists all individuals having contributed content to the repository. # For how it is generated, see `hack/generate-authors.sh`. EOH echo git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf } > AUTHORS docker-1.10.3/hack/install.sh000077500000000000000000000333141267010174400157640ustar00rootroot00000000000000#!/bin/sh set -e # # This script is meant for quick & easy install via: # 'curl -sSL https://get.docker.com/ | sh' # or: # 'wget -qO- https://get.docker.com/ | sh' # # For test builds (ie. release candidates): # 'curl -fsSL https://test.docker.com/ | sh' # or: # 'wget -qO- https://test.docker.com/ | sh' # # For experimental builds: # 'curl -fsSL https://experimental.docker.com/ | sh' # or: # 'wget -qO- https://experimental.docker.com/ | sh' # # Docker Maintainers: # To update this script on https://get.docker.com, # use hack/release.sh during a normal release, # or the following one-liner for script hotfixes: # s3cmd put --acl-public -P hack/install.sh s3://get.docker.com/index # url='https://get.docker.com/' command_exists() { command -v "$@" > /dev/null 2>&1 } echo_docker_as_nonroot() { if command_exists docker && [ -e /var/run/docker.sock ]; then ( set -x $sh_c 'docker version' ) || true fi your_user=your-user [ "$user" != 'root' ] && your_user="$user" # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output cat <<-EOF If you would like to use Docker as a non-root user, you should now consider adding your user to the "docker" group with something like: sudo usermod -aG docker $your_user Remember that you will have to log out and back in for this to take effect! EOF } # Check if this is a forked Linux distro check_forked() { # Check for lsb_release command existence, it usually exists in forked distros if command_exists lsb_release; then # Check if the `-u` option is supported set +e lsb_release -a -u > /dev/null 2>&1 lsb_release_exit_code=$? set -e # Check if the command has exited successfully, it means we're in a forked distro if [ "$lsb_release_exit_code" = "0" ]; then # Print info about current distro cat <<-EOF You're using '$lsb_dist' version '$dist_version'. EOF # Get the upstream release info lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[[:space:]]') dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[[:space:]]') # Print info about upstream distro cat <<-EOF Upstream release is '$lsb_dist' version '$dist_version'. EOF else if [ -r /etc/debian_version ] && [ "$lsb_dist" != "ubuntu" ]; then # We're Debian and don't even know it! lsb_dist=debian dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')" case "$dist_version" in 8|'Kali Linux 2') dist_version="jessie" ;; 7) dist_version="wheezy" ;; esac fi fi fi } rpm_import_repository_key() { local key=$1; shift local tmpdir=$(mktemp -d) chmod 600 "$tmpdir" gpg --homedir "$tmpdir" --keyserver ha.pool.sks-keyservers.net --recv-keys "$key" gpg --homedir "$tmpdir" --export --armor "$key" > "$tmpdir"/repo.key rpm --import "$tmpdir"/repo.key rm -rf "$tmpdir" } semverParse() { major="${1%%.*}" minor="${1#$major.}" minor="${minor%%.*}" patch="${1#$major.$minor.}" patch="${patch%%[-.]*}" } do_install() { case "$(uname -m)" in *64) ;; *) cat >&2 <<-'EOF' Error: you are not using a 64bit platform. Docker currently only supports 64bit platforms. EOF exit 1 ;; esac if command_exists docker; then version="$(docker -v | awk -F '[ ,]+' '{ print $3 }')" MAJOR_W=1 MINOR_W=10 semverParse $version shouldWarn=0 if [ $major -lt $MAJOR_W ]; then shouldWarn=1 fi if [ $major -le $MAJOR_W ] && [ $minor -lt $MINOR_W ]; then shouldWarn=1 fi cat >&2 <<-'EOF' Warning: the "docker" command appears to already exist on this system. If you already have Docker installed, this script can cause trouble, which is why we're displaying this warning and provide the opportunity to cancel the installation. If you installed the current Docker package using this script and are using it EOF if [ $shouldWarn -eq 1 ]; then cat >&2 <<-'EOF' again to update Docker, we urge you to migrate your image store before upgrading to v1.10+. You can find instructions for this here: https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration EOF else cat >&2 <<-'EOF' again to update Docker, you can safely ignore this message. EOF fi cat >&2 <<-'EOF' You may press Ctrl+C now to abort this script. EOF ( set -x; sleep 20 ) fi user="$(id -un 2>/dev/null || true)" sh_c='sh -c' if [ "$user" != 'root' ]; then if command_exists sudo; then sh_c='sudo -E sh -c' elif command_exists su; then sh_c='su -c' else cat >&2 <<-'EOF' Error: this installer needs the ability to run commands as root. We are unable to find either "sudo" or "su" available to make this happen. EOF exit 1 fi fi curl='' if command_exists curl; then curl='curl -sSL' elif command_exists wget; then curl='wget -qO-' elif command_exists busybox && busybox --list-modules | grep -q wget; then curl='busybox wget -qO-' fi # check to see which repo they are trying to install from repo='main' if [ "https://test.docker.com/" = "$url" ]; then repo='testing' elif [ "https://experimental.docker.com/" = "$url" ]; then repo='experimental' fi # perform some very rudimentary platform detection lsb_dist='' dist_version='' if command_exists lsb_release; then lsb_dist="$(lsb_release -si)" fi if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")" fi if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then lsb_dist='debian' fi if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then lsb_dist='fedora' fi if [ -z "$lsb_dist" ] && [ -r /etc/oracle-release ]; then lsb_dist='oracleserver' fi if [ -z "$lsb_dist" ]; then if [ -r /etc/centos-release ] || [ -r /etc/redhat-release ]; then lsb_dist='centos' fi fi if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then lsb_dist="$(. /etc/os-release && echo "$ID")" fi lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')" case "$lsb_dist" in ubuntu) if command_exists lsb_release; then dist_version="$(lsb_release --codename | cut -f2)" fi if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")" fi ;; debian) dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')" case "$dist_version" in 8) dist_version="jessie" ;; 7) dist_version="wheezy" ;; esac ;; oracleserver) # need to switch lsb_dist to match yum repo URL lsb_dist="oraclelinux" dist_version="$(rpm -q --whatprovides redhat-release --queryformat "%{VERSION}\n" | sed 's/\/.*//' | sed 's/\..*//' | sed 's/Server*//')" ;; fedora|centos) dist_version="$(rpm -q --whatprovides redhat-release --queryformat "%{VERSION}\n" | sed 's/\/.*//' | sed 's/\..*//' | sed 's/Server*//')" ;; *) if command_exists lsb_release; then dist_version="$(lsb_release --codename | cut -f2)" fi if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then dist_version="$(. /etc/os-release && echo "$VERSION_ID")" fi ;; esac # Check if this is a forked Linux distro check_forked # Run setup for each distro accordingly case "$lsb_dist" in amzn) ( set -x $sh_c 'sleep 3; yum -y -q install docker' ) echo_docker_as_nonroot exit 0 ;; 'opensuse project'|opensuse) echo 'Going to perform the following operations:' if [ "$repo" != 'main' ]; then echo ' * add repository obs://Virtualization:containers' fi echo ' * install Docker' $sh_c 'echo "Press CTRL-C to abort"; sleep 3' if [ "$repo" != 'main' ]; then # install experimental packages from OBS://Virtualization:containers ( set -x zypper -n ar -f obs://Virtualization:containers Virtualization:containers rpm_import_repository_key 55A0B34D49501BB7CA474F5AA193FBB572174FC2 ) fi ( set -x zypper -n install docker ) echo_docker_as_nonroot exit 0 ;; 'suse linux'|sle[sd]) echo 'Going to perform the following operations:' if [ "$repo" != 'main' ]; then echo ' * add repository obs://Virtualization:containers' echo ' * install experimental Docker using packages NOT supported by SUSE' else echo ' * add the "Containers" module' echo ' * install Docker using packages supported by SUSE' fi $sh_c 'echo "Press CTRL-C to abort"; sleep 3' if [ "$repo" != 'main' ]; then # install experimental packages from OBS://Virtualization:containers echo >&2 'Warning: installing experimental packages from OBS, these packages are NOT supported by SUSE' ( set -x zypper -n ar -f obs://Virtualization:containers/SLE_12 Virtualization:containers rpm_import_repository_key 55A0B34D49501BB7CA474F5AA193FBB572174FC2 ) else # Add the containers module # Note well-1: the SLE machine must already be registered against SUSE Customer Center # Note well-2: the `-r ""` is required to workaround a known issue of SUSEConnect ( set -x SUSEConnect -p sle-module-containers/12/x86_64 -r "" ) fi ( set -x zypper -n install docker ) echo_docker_as_nonroot exit 0 ;; ubuntu|debian) export DEBIAN_FRONTEND=noninteractive did_apt_get_update= apt_get_update() { if [ -z "$did_apt_get_update" ]; then ( set -x; $sh_c 'sleep 3; apt-get update' ) did_apt_get_update=1 fi } # aufs is preferred over devicemapper; try to ensure the driver is available. if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then if uname -r | grep -q -- '-generic' && dpkg -l 'linux-image-*-generic' | grep -qE '^ii|^hi' 2>/dev/null; then kern_extras="linux-image-extra-$(uname -r) linux-image-extra-virtual" apt_get_update ( set -x; $sh_c 'sleep 3; apt-get install -y -q '"$kern_extras" ) || true if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then echo >&2 'Warning: tried to install '"$kern_extras"' (for AUFS)' echo >&2 ' but we still have no AUFS. Docker may not work. Proceeding anyways!' ( set -x; sleep 10 ) fi else echo >&2 'Warning: current kernel is not supported by the linux-image-extra-virtual' echo >&2 ' package. We have no AUFS support. Consider installing the packages' echo >&2 ' linux-image-virtual kernel and linux-image-extra-virtual for AUFS support.' ( set -x; sleep 10 ) fi fi # install apparmor utils if they're missing and apparmor is enabled in the kernel # otherwise Docker will fail to start if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then if command -v apparmor_parser >/dev/null 2>&1; then echo 'apparmor is enabled in the kernel and apparmor utils were already installed' else echo 'apparmor is enabled in the kernel, but apparmor_parser missing' apt_get_update ( set -x; $sh_c 'sleep 3; apt-get install -y -q apparmor' ) fi fi if [ ! -e /usr/lib/apt/methods/https ]; then apt_get_update ( set -x; $sh_c 'sleep 3; apt-get install -y -q apt-transport-https ca-certificates' ) fi if [ -z "$curl" ]; then apt_get_update ( set -x; $sh_c 'sleep 3; apt-get install -y -q curl ca-certificates' ) curl='curl -sSL' fi ( set -x $sh_c "apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D" $sh_c "mkdir -p /etc/apt/sources.list.d" $sh_c "echo deb [arch=$(dpkg --print-architecture)] https://apt.dockerproject.org/repo ${lsb_dist}-${dist_version} ${repo} > /etc/apt/sources.list.d/docker.list" $sh_c 'sleep 3; apt-get update; apt-get install -y -q docker-engine' ) echo_docker_as_nonroot exit 0 ;; fedora|centos|oraclelinux) $sh_c "cat >/etc/yum.repos.d/docker-${repo}.repo" <<-EOF [docker-${repo}-repo] name=Docker ${repo} Repository baseurl=https://yum.dockerproject.org/repo/${repo}/${lsb_dist}/${dist_version} enabled=1 gpgcheck=1 gpgkey=https://yum.dockerproject.org/gpg EOF if [ "$lsb_dist" = "fedora" ] && [ "$dist_version" -ge "22" ]; then ( set -x $sh_c 'sleep 3; dnf -y -q install docker-engine' ) else ( set -x $sh_c 'sleep 3; yum -y -q install docker-engine' ) fi echo_docker_as_nonroot exit 0 ;; gentoo) if [ "$url" = "https://test.docker.com/" ]; then # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output cat >&2 <<-'EOF' You appear to be trying to install the latest nightly build in Gentoo.' The portage tree should contain the latest stable release of Docker, but' if you want something more recent, you can always use the live ebuild' provided in the "docker" overlay available via layman. For more' instructions, please see the following URL:' https://github.com/tianon/docker-overlay#using-this-overlay' After adding the "docker" overlay, you should be able to:' emerge -av =app-emulation/docker-9999' EOF exit 1 fi ( set -x $sh_c 'sleep 3; emerge app-emulation/docker' ) exit 0 ;; esac # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output cat >&2 <<-'EOF' Either your platform is not easily detectable, is not supported by this installer script (yet - PRs welcome! [hack/install.sh]), or does not yet have a package for Docker. Please visit the following URL for more detailed installation instructions: https://docs.docker.com/engine/installation/ EOF exit 1 } # wrapped up in a function so that we have some protection against only getting # half the file during "curl | sh" do_install docker-1.10.3/hack/make.sh000077500000000000000000000214341267010174400152330ustar00rootroot00000000000000#!/usr/bin/env bash set -e # This script builds various binary artifacts from a checkout of the docker # source code. # # Requirements: # - The current directory should be a checkout of the docker source code # (https://github.com/docker/docker). Whatever version is checked out # will be built. # - The VERSION file, at the root of the repository, should exist, and # will be used as Docker binary version and package version. # - The hash of the git commit will also be included in the Docker binary, # with the suffix -unsupported if the repository isn't clean. # - The script is intended to be run inside the docker container specified # in the Dockerfile at the root of the source. In other words: # DO NOT CALL THIS SCRIPT DIRECTLY. # - The right way to call this script is to invoke "make" from # your checkout of the Docker repository. # the Makefile will do a "docker build -t docker ." and then # "docker run hack/make.sh" in the resulting image. # set -o pipefail export DOCKER_PKG='github.com/docker/docker' export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" export MAKEDIR="$SCRIPTDIR/make" # We're a nice, sexy, little shell script, and people might try to run us; # but really, they shouldn't. We want to be in a container! if [ "$PWD" != "/go/src/$DOCKER_PKG" ] || [ -z "$DOCKER_CROSSPLATFORMS" ]; then { echo "# WARNING! I don't seem to be running in the Docker container." echo "# The result of this command might be an incorrect build, and will not be" echo "# officially supported." echo "#" echo "# Try this instead: make all" echo "#" } >&2 fi echo # List of bundles to create when no argument is passed DEFAULT_BUNDLES=( validate-dco validate-gofmt validate-lint validate-pkg validate-test validate-toml validate-vet validate-vendor binary dynbinary test-unit test-integration-cli test-docker-py cover cross tgz ) VERSION=$(< ./VERSION) if command -v git &> /dev/null && git rev-parse &> /dev/null; then GITCOMMIT=$(git rev-parse --short HEAD) if [ -n "$(git status --porcelain --untracked-files=no)" ]; then GITCOMMIT="$GITCOMMIT-unsupported" fi ! BUILDTIME=$(date --rfc-3339 ns | sed -e 's/ /T/') &> /dev/null if [ -z $BUILDTIME ]; then # If using bash 3.1 which doesn't support --rfc-3389, eg Windows CI BUILDTIME=$(date -u) fi elif [ "$DOCKER_GITCOMMIT" ]; then GITCOMMIT="$DOCKER_GITCOMMIT" else echo >&2 'error: .git directory missing and DOCKER_GITCOMMIT not specified' echo >&2 ' Please either build with the .git directory accessible, or specify the' echo >&2 ' exact (--short) commit hash you are building using DOCKER_GITCOMMIT for' echo >&2 ' future accountability in diagnosing build issues. Thanks!' exit 1 fi if [ "$AUTO_GOPATH" ]; then rm -rf .gopath mkdir -p .gopath/src/"$(dirname "${DOCKER_PKG}")" ln -sf ../../../.. .gopath/src/"${DOCKER_PKG}" export GOPATH="${PWD}/.gopath:${PWD}/vendor" fi if [ ! "$GOPATH" ]; then echo >&2 'error: missing GOPATH; please see https://golang.org/doc/code.html#GOPATH' echo >&2 ' alternatively, set AUTO_GOPATH=1' exit 1 fi if [ "$DOCKER_EXPERIMENTAL" ]; then echo >&2 '# WARNING! DOCKER_EXPERIMENTAL is set: building experimental features' echo >&2 DOCKER_BUILDTAGS+=" experimental pkcs11" fi if [ -z "$DOCKER_CLIENTONLY" ]; then DOCKER_BUILDTAGS+=" daemon" if pkg-config libsystemd-journal 2> /dev/null ; then DOCKER_BUILDTAGS+=" journald" fi fi # test whether "btrfs/version.h" exists and apply btrfs_noversion appropriately if \ command -v gcc &> /dev/null \ && ! gcc -E - -o /dev/null &> /dev/null <<<'#include ' \ ; then DOCKER_BUILDTAGS+=' btrfs_noversion' fi # test whether "libdevmapper.h" is new enough to support deferred remove # functionality. if \ command -v gcc &> /dev/null \ && ! ( echo -e '#include \nint main() { dm_task_deferred_remove(NULL); }'| gcc -xc - -ldevmapper -o /dev/null &> /dev/null ) \ ; then DOCKER_BUILDTAGS+=' libdm_no_deferred_remove' fi # Use these flags when compiling the tests and final binary IAMSTATIC='true' source "$SCRIPTDIR/make/.go-autogen" if [ -z "$DOCKER_DEBUG" ]; then LDFLAGS='-w' fi LDFLAGS_STATIC='' EXTLDFLAGS_STATIC='-static' # ORIG_BUILDFLAGS is necessary for the cross target which cannot always build # with options like -race. ORIG_BUILDFLAGS=( -a -tags "autogen netgo static_build sqlite_omit_load_extension $DOCKER_BUILDTAGS" -installsuffix netgo ) # see https://github.com/golang/go/issues/9369#issuecomment-69864440 for why -installsuffix is necessary here BUILDFLAGS=( $BUILDFLAGS "${ORIG_BUILDFLAGS[@]}" ) # Test timeout. : ${TIMEOUT:=120m} TESTFLAGS+=" -test.timeout=${TIMEOUT}" LDFLAGS_STATIC_DOCKER=" $LDFLAGS_STATIC -extldflags \"$EXTLDFLAGS_STATIC\" " if [ "$(uname -s)" = 'FreeBSD' ]; then # Tell cgo the compiler is Clang, not GCC # https://code.google.com/p/go/source/browse/src/cmd/cgo/gcc.go?spec=svne77e74371f2340ee08622ce602e9f7b15f29d8d3&r=e6794866ebeba2bf8818b9261b54e2eef1c9e588#752 export CC=clang # "-extld clang" is a workaround for # https://code.google.com/p/go/issues/detail?id=6845 LDFLAGS="$LDFLAGS -extld clang" fi # If sqlite3.h doesn't exist under /usr/include, # check /usr/local/include also just in case # (e.g. FreeBSD Ports installs it under the directory) if [ ! -e /usr/include/sqlite3.h ] && [ -e /usr/local/include/sqlite3.h ]; then export CGO_CFLAGS='-I/usr/local/include' export CGO_LDFLAGS='-L/usr/local/lib' fi HAVE_GO_TEST_COVER= if \ go help testflag | grep -- -cover > /dev/null \ && go tool -n cover > /dev/null 2>&1 \ ; then HAVE_GO_TEST_COVER=1 fi # If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. # You can use this to select certain tests to run, eg. # # TESTFLAGS='-test.run ^TestBuild$' ./hack/make.sh test-unit # # For integration-cli test, we use [gocheck](https://labix.org/gocheck), if you want # to run certain tests on your local host, you should run with command: # # TESTFLAGS='-check.f DockerSuite.TestBuild*' ./hack/make.sh binary test-integration-cli # go_test_dir() { dir=$1 coverpkg=$2 testcover=() if [ "$HAVE_GO_TEST_COVER" ]; then # if our current go install has -cover, we want to use it :) mkdir -p "$DEST/coverprofiles" coverprofile="docker${dir#.}" coverprofile="$ABS_DEST/coverprofiles/${coverprofile//\//-}" testcover=( -cover -coverprofile "$coverprofile" $coverpkg ) fi ( echo '+ go test' $TESTFLAGS "${DOCKER_PKG}${dir#.}" cd "$dir" export DEST="$ABS_DEST" # we're in a subshell, so this is safe -- our integration-cli tests need DEST, and "cd" screws it up test_env go test ${testcover[@]} -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS ) } test_env() { # use "env -i" to tightly control the environment variables that bleed into the tests env -i \ DEST="$DEST" \ DOCKER_TLS_VERIFY="$DOCKER_TEST_TLS_VERIFY" \ DOCKER_CERT_PATH="$DOCKER_TEST_CERT_PATH" \ DOCKER_GRAPHDRIVER="$DOCKER_GRAPHDRIVER" \ DOCKER_USERLANDPROXY="$DOCKER_USERLANDPROXY" \ DOCKER_HOST="$DOCKER_HOST" \ DOCKER_REMAP_ROOT="$DOCKER_REMAP_ROOT" \ DOCKER_REMOTE_DAEMON="$DOCKER_REMOTE_DAEMON" \ GOPATH="$GOPATH" \ HOME="$ABS_DEST/fake-HOME" \ PATH="$PATH" \ TEMP="$TEMP" \ TEST_DOCKERINIT_PATH="$TEST_DOCKERINIT_PATH" \ "$@" } # a helper to provide ".exe" when it's appropriate binary_extension() { if [ "$(go env GOOS)" = 'windows' ]; then echo -n '.exe' fi } hash_files() { while [ $# -gt 0 ]; do f="$1" shift dir="$(dirname "$f")" base="$(basename "$f")" for hashAlgo in md5 sha256; do if command -v "${hashAlgo}sum" &> /dev/null; then ( # subshell and cd so that we get output files like: # $HASH docker-$VERSION # instead of: # $HASH /go/src/github.com/.../$VERSION/binary/docker-$VERSION cd "$dir" "${hashAlgo}sum" "$base" > "$base.$hashAlgo" ) fi done done } bundle() { local bundle="$1"; shift echo "---> Making bundle: $(basename "$bundle") (in $DEST)" source "$SCRIPTDIR/make/$bundle" "$@" } main() { # We want this to fail if the bundles already exist and cannot be removed. # This is to avoid mixing bundles from different versions of the code. mkdir -p bundles if [ -e "bundles/$VERSION" ] && [ -z "$KEEPBUNDLE" ]; then echo "bundles/$VERSION already exists. Removing." rm -fr "bundles/$VERSION" && mkdir "bundles/$VERSION" || exit 1 echo fi if [ "$(go env GOHOSTOS)" != 'windows' ]; then # Windows and symlinks don't get along well rm -f bundles/latest ln -s "$VERSION" bundles/latest fi if [ $# -lt 1 ]; then bundles=(${DEFAULT_BUNDLES[@]}) else bundles=($@) fi for bundle in ${bundles[@]}; do export DEST="bundles/$VERSION/$(basename "$bundle")" # Cygdrive paths don't play well with go build -o. if [[ "$(uname -s)" == CYGWIN* ]]; then export DEST="$(cygpath -mw "$DEST")" fi mkdir -p "$DEST" ABS_DEST="$(cd "$DEST" && pwd -P)" bundle "$bundle" echo done } main "$@" docker-1.10.3/hack/make/000077500000000000000000000000001267010174400146705ustar00rootroot00000000000000docker-1.10.3/hack/make/.build-deb/000077500000000000000000000000001267010174400165755ustar00rootroot00000000000000docker-1.10.3/hack/make/.build-deb/compat000066400000000000000000000000021267010174400177730ustar00rootroot000000000000009 docker-1.10.3/hack/make/.build-deb/control000066400000000000000000000023701267010174400202020ustar00rootroot00000000000000Source: docker-engine Section: admin Priority: optional Maintainer: Docker Standards-Version: 3.9.6 Homepage: https://dockerproject.org Vcs-Browser: https://github.com/docker/docker Vcs-Git: git://github.com/docker/docker.git Package: docker-engine Architecture: linux-any Depends: iptables, ${misc:Depends}, ${perl:Depends}, ${shlibs:Depends} Recommends: aufs-tools, ca-certificates, cgroupfs-mount | cgroup-lite, git, xz-utils, ${apparmor:Recommends}, ${yubico:Recommends} Conflicts: docker (<< 1.5~), docker.io, lxc-docker, lxc-docker-virtual-package, docker-engine-cs Description: Docker: the open-source application container engine Docker is an open source project to build, ship and run any application as a lightweight container . Docker containers are both hardware-agnostic and platform-agnostic. This means they can run anywhere, from your laptop to the largest EC2 compute instance and everything in between - and they don't require you to use a particular language, framework or packaging system. That makes them great building blocks for deploying and scaling web apps, databases, and backend services without depending on a particular stack or provider. docker-1.10.3/hack/make/.build-deb/docker-engine.bash-completion000066400000000000000000000000371267010174400243150ustar00rootroot00000000000000contrib/completion/bash/docker docker-1.10.3/hack/make/.build-deb/docker-engine.docker.default000077700000000000000000000000001267010174400353072../../../contrib/init/sysvinit-debian/docker.defaultustar00rootroot00000000000000docker-1.10.3/hack/make/.build-deb/docker-engine.docker.init000077700000000000000000000000001267010174400332032../../../contrib/init/sysvinit-debian/dockerustar00rootroot00000000000000docker-1.10.3/hack/make/.build-deb/docker-engine.docker.upstart000077700000000000000000000000001267010174400332602../../../contrib/init/upstart/docker.confustar00rootroot00000000000000docker-1.10.3/hack/make/.build-deb/docker-engine.install000066400000000000000000000013021267010174400226730ustar00rootroot00000000000000#contrib/syntax/vim/doc/* /usr/share/vim/vimfiles/doc/ #contrib/syntax/vim/ftdetect/* /usr/share/vim/vimfiles/ftdetect/ #contrib/syntax/vim/syntax/* /usr/share/vim/vimfiles/syntax/ contrib/*-integration usr/share/docker-engine/contrib/ contrib/check-config.sh usr/share/docker-engine/contrib/ contrib/completion/fish/docker.fish usr/share/fish/vendor_completions.d/ contrib/completion/zsh/_docker usr/share/zsh/vendor-completions/ contrib/init/systemd/docker.service lib/systemd/system/ contrib/init/systemd/docker.socket lib/systemd/system/ contrib/mk* usr/share/docker-engine/contrib/ contrib/nuke-graph-directory.sh usr/share/docker-engine/contrib/ contrib/syntax/nano/Dockerfile.nanorc usr/share/nano/ docker-1.10.3/hack/make/.build-deb/docker-engine.manpages000066400000000000000000000000131267010174400230160ustar00rootroot00000000000000man/man*/* docker-1.10.3/hack/make/.build-deb/docker-engine.postinst000066400000000000000000000003501267010174400231120ustar00rootroot00000000000000#!/bin/sh set -e case "$1" in configure) if [ -z "$2" ]; then if ! getent group docker > /dev/null; then groupadd --system docker fi fi ;; abort-*) # How'd we get here?? exit 1 ;; *) ;; esac #DEBHELPER# docker-1.10.3/hack/make/.build-deb/docker-engine.udev000077700000000000000000000000001267010174400302032../../../contrib/udev/80-docker.rulesustar00rootroot00000000000000docker-1.10.3/hack/make/.build-deb/docs000066400000000000000000000000121267010174400174410ustar00rootroot00000000000000README.md docker-1.10.3/hack/make/.build-deb/rules000077500000000000000000000030071267010174400176550ustar00rootroot00000000000000#!/usr/bin/make -f VERSION = $(shell cat VERSION) override_dh_gencontrol: # if we're on Ubuntu, we need to Recommends: apparmor echo 'apparmor:Recommends=$(shell dpkg-vendor --is Ubuntu && echo apparmor)' >> debian/docker-engine.substvars # if we are building experimental we reccomend yubico-piv-tool echo 'yubico:Recommends=$(shell [ "$DOCKER_EXPERIMENTAL" ] && echo "yubico-piv-tool (>= 1.1.0~)")' >> debian/docker-engine.substvars dh_gencontrol override_dh_auto_build: ./hack/make.sh dynbinary # ./man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here override_dh_auto_test: ./bundles/$(VERSION)/dynbinary/docker -v override_dh_strip: # the SHA1 of dockerinit is important: don't strip it # also, Go has lots of problems with stripping, so just don't override_dh_auto_install: mkdir -p debian/docker-engine/usr/bin cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary/docker)" debian/docker-engine/usr/bin/docker mkdir -p debian/docker-engine/usr/lib/docker cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary/dockerinit)" debian/docker-engine/usr/lib/docker/dockerinit override_dh_installinit: # use "docker" as our service name, not "docker-engine" dh_installinit --name=docker override_dh_installudev: # match our existing priority dh_installudev --priority=z80 override_dh_install: dh_install dh_apparmor --profile-name=docker-engine -pdocker-engine %: dh $@ --with=bash-completion $(shell command -v dh_systemd_enable > /dev/null 2>&1 && echo --with=systemd) docker-1.10.3/hack/make/.build-rpm/000077500000000000000000000000001267010174400166415ustar00rootroot00000000000000docker-1.10.3/hack/make/.build-rpm/docker-engine-selinux.spec000066400000000000000000000061761267010174400237260ustar00rootroot00000000000000# Some bits borrowed from the openstack-selinux package Name: docker-engine-selinux Version: %{_version} Release: %{_release}%{?dist} Summary: SELinux Policies for the open-source application container engine BuildArch: noarch Group: Tools/Docker License: GPLv2 Source: %{name}.tar.gz URL: https://dockerproject.org Vendor: Docker Packager: Docker # Version of SELinux we were using %if 0%{?fedora} == 20 %global selinux_policyver 3.12.1-197 %endif # fedora 20 %if 0%{?fedora} == 21 %global selinux_policyver 3.13.1-105 %endif # fedora 21 %if 0%{?fedora} >= 22 %global selinux_policyver 3.13.1-128 %endif # fedora 22 %if 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 %global selinux_policyver 3.13.1-23 %endif # centos,rhel,oraclelinux 7 %global selinuxtype targeted %global moduletype services %global modulenames docker Requires(post): selinux-policy-base >= %{selinux_policyver}, selinux-policy-targeted >= %{selinux_policyver}, policycoreutils, policycoreutils-python libselinux-utils BuildRequires: selinux-policy selinux-policy-devel # conflicting packages Conflicts: docker-selinux # Usage: _format var format # Expand 'modulenames' into various formats as needed # Format must contain '$x' somewhere to do anything useful %global _format() export %1=""; for x in %{modulenames}; do %1+=%2; %1+=" "; done; # Relabel files %global relabel_files() \ /sbin/restorecon -R %{_bindir}/docker %{_localstatedir}/run/docker.sock %{_localstatedir}/run/docker.pid %{_sharedstatedir}/docker %{_sysconfdir}/docker %{_localstatedir}/log/docker %{_localstatedir}/log/lxc %{_localstatedir}/lock/lxc %{_usr}/lib/systemd/system/docker.service /root/.docker &> /dev/null || : \ %description SELinux policy modules for use with Docker %prep %if 0%{?centos} <= 6 %setup -n %{name} %else %autosetup -n %{name} %endif %build make SHARE="%{_datadir}" TARGETS="%{modulenames}" %install # Install SELinux interfaces %_format INTERFACES $x.if install -d %{buildroot}%{_datadir}/selinux/devel/include/%{moduletype} install -p -m 644 $INTERFACES %{buildroot}%{_datadir}/selinux/devel/include/%{moduletype} # Install policy modules %_format MODULES $x.pp.bz2 install -d %{buildroot}%{_datadir}/selinux/packages install -m 0644 $MODULES %{buildroot}%{_datadir}/selinux/packages %post # # Install all modules in a single transaction # if [ $1 -eq 1 ]; then %{_sbindir}/setsebool -P -N virt_use_nfs=1 virt_sandbox_use_all_caps=1 fi %_format MODULES %{_datadir}/selinux/packages/$x.pp.bz2 %{_sbindir}/semodule -n -s %{selinuxtype} -i $MODULES if %{_sbindir}/selinuxenabled ; then %{_sbindir}/load_policy %relabel_files fi %postun if [ $1 -eq 0 ]; then %{_sbindir}/semodule -n -r %{modulenames} &> /dev/null || : if %{_sbindir}/selinuxenabled ; then %{_sbindir}/load_policy %relabel_files fi fi %files %doc LICENSE %defattr(-,root,root,0755) %attr(0644,root,root) %{_datadir}/selinux/packages/*.pp.bz2 %attr(0644,root,root) %{_datadir}/selinux/devel/include/%{moduletype}/*.if %changelog * Tue Dec 1 2015 Jessica Frazelle 1.9.1-1 - add licence to rpm - add selinux-policy and docker-engine-selinux rpm docker-1.10.3/hack/make/.build-rpm/docker-engine.spec000066400000000000000000000164371267010174400222420ustar00rootroot00000000000000Name: docker-engine Version: %{_version} Release: %{_release}%{?dist} Summary: The open-source application container engine Group: Tools/Docker License: ASL 2.0 Source: %{name}.tar.gz URL: https://dockerproject.org Vendor: Docker Packager: Docker # docker builds in a checksum of dockerinit into docker, # # so stripping the binaries breaks docker %global __os_install_post %{_rpmconfigdir}/brp-compress %global debug_package %{nil} # is_systemd conditional %if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?suse_version} >= 1210 %global is_systemd 1 %endif # required packages for build # most are already in the container (see contrib/builder/rpm/generate.sh) # only require systemd on those systems %if 0%{?is_systemd} %if 0%{?suse_version} >= 1210 BuildRequires: systemd-rpm-macros %{?systemd_requires} %else BuildRequires: pkgconfig(systemd) Requires: systemd-units BuildRequires: pkgconfig(libsystemd-journal) %endif %else Requires(post): chkconfig Requires(preun): chkconfig # This is for /sbin/service Requires(preun): initscripts %endif # required packages on install Requires: /bin/sh Requires: iptables %if !0%{?suse_version} Requires: libcgroup %else Requires: libcgroup1 %endif Requires: tar Requires: xz %if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 # Resolves: rhbz#1165615 Requires: device-mapper-libs >= 1.02.90-1 %endif %if 0%{?oraclelinux} >= 6 # Require Oracle Unbreakable Enterprise Kernel R4 and newer device-mapper Requires: kernel-uek >= 4.1 Requires: device-mapper >= 1.02.90-2 %endif # docker-selinux conditional %if 0%{?fedora} >= 20 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 %global with_selinux 1 %endif %if 0%{?_experimental} # yubico-piv-tool conditional %if 0%{?fedora} >= 20 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 Requires: yubico-piv-tool >= 1.1.0 %endif %endif # start if with_selinux %if 0%{?with_selinux} # Version of SELinux we were using %if 0%{?fedora} == 20 %global selinux_policyver 3.12.1-197 %endif # fedora 20 %if 0%{?fedora} == 21 %global selinux_policyver 3.13.1-105 %endif # fedora 21 %if 0%{?fedora} >= 22 %global selinux_policyver 3.13.1-128 %endif # fedora 22 %if 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 %global selinux_policyver 3.13.1-23 %endif # centos,oraclelinux 7 %endif # with_selinux # RE: rhbz#1195804 - ensure min NVR for selinux-policy %if 0%{?with_selinux} Requires: selinux-policy >= %{selinux_policyver} Requires(pre): %{name}-selinux >= %{epoch}:%{version}-%{release} %endif # with_selinux # conflicting packages Conflicts: docker Conflicts: docker-io Conflicts: docker-engine-cs %description Docker is an open source project to build, ship and run any application as a lightweight container. Docker containers are both hardware-agnostic and platform-agnostic. This means they can run anywhere, from your laptop to the largest EC2 compute instance and everything in between - and they don't require you to use a particular language, framework or packaging system. That makes them great building blocks for deploying and scaling web apps, databases, and backend services without depending on a particular stack or provider. %prep %if 0%{?centos} <= 6 || 0%{?oraclelinux} <=6 %setup -n %{name} %else %autosetup -n %{name} %endif %build export DOCKER_GITCOMMIT=%{_gitcommit} ./hack/make.sh dynbinary # ./man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here %check ./bundles/%{_origversion}/dynbinary/docker -v %install # install binary install -d $RPM_BUILD_ROOT/%{_bindir} install -p -m 755 bundles/%{_origversion}/dynbinary/docker-%{_origversion} $RPM_BUILD_ROOT/%{_bindir}/docker # install dockerinit install -d $RPM_BUILD_ROOT/%{_libexecdir}/docker install -p -m 755 bundles/%{_origversion}/dynbinary/dockerinit-%{_origversion} $RPM_BUILD_ROOT/%{_libexecdir}/docker/dockerinit # install udev rules install -d $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d install -p -m 644 contrib/udev/80-docker.rules $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d/80-docker.rules # add init scripts install -d $RPM_BUILD_ROOT/etc/sysconfig install -d $RPM_BUILD_ROOT/%{_initddir} %if 0%{?is_systemd} install -d $RPM_BUILD_ROOT/%{_unitdir} install -p -m 644 contrib/init/systemd/docker.service $RPM_BUILD_ROOT/%{_unitdir}/docker.service install -p -m 644 contrib/init/systemd/docker.socket $RPM_BUILD_ROOT/%{_unitdir}/docker.socket %else install -p -m 644 contrib/init/sysvinit-redhat/docker.sysconfig $RPM_BUILD_ROOT/etc/sysconfig/docker install -p -m 755 contrib/init/sysvinit-redhat/docker $RPM_BUILD_ROOT/%{_initddir}/docker %endif # add bash, zsh, and fish completions install -d $RPM_BUILD_ROOT/usr/share/bash-completion/completions install -d $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions install -d $RPM_BUILD_ROOT/usr/share/fish/vendor_completions.d install -p -m 644 contrib/completion/bash/docker $RPM_BUILD_ROOT/usr/share/bash-completion/completions/docker install -p -m 644 contrib/completion/zsh/_docker $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions/_docker install -p -m 644 contrib/completion/fish/docker.fish $RPM_BUILD_ROOT/usr/share/fish/vendor_completions.d/docker.fish # install manpages install -d %{buildroot}%{_mandir}/man1 install -p -m 644 man/man1/*.1 $RPM_BUILD_ROOT/%{_mandir}/man1 install -d %{buildroot}%{_mandir}/man5 install -p -m 644 man/man5/*.5 $RPM_BUILD_ROOT/%{_mandir}/man5 # add vimfiles install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax install -p -m 644 contrib/syntax/vim/doc/dockerfile.txt $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc/dockerfile.txt install -p -m 644 contrib/syntax/vim/ftdetect/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect/dockerfile.vim install -p -m 644 contrib/syntax/vim/syntax/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax/dockerfile.vim # add nano install -d $RPM_BUILD_ROOT/usr/share/nano install -p -m 644 contrib/syntax/nano/Dockerfile.nanorc $RPM_BUILD_ROOT/usr/share/nano/Dockerfile.nanorc # list files owned by the package here %files %doc AUTHORS CHANGELOG.md CONTRIBUTING.md LICENSE MAINTAINERS NOTICE README.md /%{_bindir}/docker /%{_libexecdir}/docker/dockerinit /%{_sysconfdir}/udev/rules.d/80-docker.rules %if 0%{?is_systemd} /%{_unitdir}/docker.service /%{_unitdir}/docker.socket %else %config(noreplace,missingok) /etc/sysconfig/docker /%{_initddir}/docker %endif /usr/share/bash-completion/completions/docker /usr/share/zsh/vendor-completions/_docker /usr/share/fish/vendor_completions.d/docker.fish %doc /%{_mandir}/man1/* /%{_mandir}/man5/* /usr/share/vim/vimfiles/doc/dockerfile.txt /usr/share/vim/vimfiles/ftdetect/dockerfile.vim /usr/share/vim/vimfiles/syntax/dockerfile.vim /usr/share/nano/Dockerfile.nanorc %post %if 0%{?is_systemd} %systemd_post docker %else # This adds the proper /etc/rc*.d links for the script /sbin/chkconfig --add docker %endif if ! getent group docker > /dev/null; then groupadd --system docker fi %preun %if 0%{?is_systemd} %systemd_preun docker %else if [ $1 -eq 0 ] ; then /sbin/service docker stop >/dev/null 2>&1 /sbin/chkconfig --del docker fi %endif %postun %if 0%{?is_systemd} %systemd_postun_with_restart docker %else if [ "$1" -ge "1" ] ; then /sbin/service docker condrestart >/dev/null 2>&1 || : fi %endif %changelog docker-1.10.3/hack/make/.detect-daemon-osarch000066400000000000000000000010701267010174400206550ustar00rootroot00000000000000#!/bin/bash set -e # Retrieve OS/ARCH of docker daemon, eg. linux/amd64 export DOCKER_ENGINE_OSARCH="$(docker version | awk ' $1 == "Client:" { server = 0; next } $1 == "Server:" { server = 1; next } server && $1 == "OS/Arch:" { print $2 } ')" export DOCKER_ENGINE_GOOS="${DOCKER_ENGINE_OSARCH%/*}" export DOCKER_ENGINE_GOARCH="${DOCKER_ENGINE_OSARCH##*/}" # and the client, just in case export DOCKER_CLIENT_OSARCH="$(docker version | awk ' $1 == "Client:" { client = 1; next } $1 == "Server:" { client = 0; next } client && $1 == "OS/Arch:" { print $2 } ')" docker-1.10.3/hack/make/.dockerinit000066400000000000000000000015671267010174400170350ustar00rootroot00000000000000#!/bin/bash set -e IAMSTATIC="true" source "${MAKEDIR}/.go-autogen" # dockerinit still needs to be a static binary, even if docker is dynamic go build \ -o "$DEST/dockerinit-$VERSION" \ "${BUILDFLAGS[@]}" \ -ldflags " $LDFLAGS $LDFLAGS_STATIC -extldflags \"$EXTLDFLAGS_STATIC\" " \ ./dockerinit echo "Created binary: $DEST/dockerinit-$VERSION" ln -sf "dockerinit-$VERSION" "$DEST/dockerinit" sha1sum= if command -v sha1sum &> /dev/null; then sha1sum=sha1sum elif command -v shasum &> /dev/null; then # Mac OS X - why couldn't they just use the same command name and be happy? sha1sum=shasum else echo >&2 'error: cannot find sha1sum command or equivalent' exit 1 fi # sha1 our new dockerinit to ensure separate docker and dockerinit always run in a perfect pair compiled for one another export DOCKER_INITSHA1=$($sha1sum "$DEST/dockerinit-$VERSION" | cut -d' ' -f1) docker-1.10.3/hack/make/.dockerinit-gccgo000066400000000000000000000014011267010174400201000ustar00rootroot00000000000000#!/bin/bash set -e IAMSTATIC="true" source "${MAKEDIR}/.go-autogen" # dockerinit still needs to be a static binary, even if docker is dynamic go build --compiler=gccgo \ -o "$DEST/dockerinit-$VERSION" \ "${BUILDFLAGS[@]}" \ --gccgoflags " -g -Wl,--no-export-dynamic $EXTLDFLAGS_STATIC -lnetgo " \ ./dockerinit echo "Created binary: $DEST/dockerinit-$VERSION" ln -sf "dockerinit-$VERSION" "$DEST/dockerinit" sha1sum= if command -v sha1sum &> /dev/null; then sha1sum=sha1sum else echo >&2 'error: cannot find sha1sum command or equivalent' exit 1 fi # sha1 our new dockerinit to ensure separate docker and dockerinit always run in a perfect pair compiled for one another export DOCKER_INITSHA1=$($sha1sum "$DEST/dockerinit-$VERSION" | cut -d' ' -f1) docker-1.10.3/hack/make/.ensure-emptyfs000066400000000000000000000023711267010174400176620ustar00rootroot00000000000000#!/bin/bash set -e if ! docker inspect emptyfs &> /dev/null; then # let's build a "docker save" tarball for "emptyfs" # see https://github.com/docker/docker/pull/5262 # and also https://github.com/docker/docker/issues/4242 dir="$DEST/emptyfs" mkdir -p "$dir" ( cd "$dir" echo '{"emptyfs":{"latest":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"}}' > repositories mkdir -p 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 ( cd 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 echo '{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0}' > json echo '1.0' > VERSION tar -cf layer.tar --files-from /dev/null ) ) ( set -x; tar -cC "$dir" . | docker load ) rm -rf "$dir" fi docker-1.10.3/hack/make/.ensure-frozen-images000066400000000000000000000043361267010174400207440ustar00rootroot00000000000000#!/bin/bash set -e # image list should match what's in the Dockerfile (minus the explicit images IDs) images=( busybox:latest debian:jessie hello-world:latest ) imagePrefix= case "$DOCKER_ENGINE_OSARCH" in linux/arm) imagePrefix='armhf' ;; linux/ppc64le) imagePrefix='ppc64le' ;; linux/s390x) imagePrefix='s390x' ;; esac if [ "$imagePrefix" ]; then for (( i = 0; i < ${#images[@]}; i++ )); do images[$i]="$imagePrefix/${images[$i]}" done fi if ! docker inspect "${images[@]}" &> /dev/null; then hardCodedDir='/docker-frozen-images' if [ -d "$hardCodedDir" ]; then # Do not use a subshell for the following command. Windows CI # runs bash 3.x so will not trap an error in a subshell. # http://stackoverflow.com/questions/22630363/how-does-set-e-work-with-subshells set -x; tar -cC "$hardCodedDir" . | docker load; set +x else dir="$DEST/frozen-images" # extract the exact "RUN download-frozen-image-v2.sh" line from the Dockerfile itself for consistency # NOTE: this will fail if either "curl" or "jq" is not installed or if the Dockerfile is not available/readable awk ' $1 == "RUN" && $2 == "./contrib/download-frozen-image-v2.sh" { for (i = 2; i < NF; i++) printf ( $i == "'"$hardCodedDir"'" ? "'"$dir"'" : $i ) " "; print $NF; if (/\\$/) { inCont = 1; next; } } inCont { print; if (!/\\$/) { inCont = 0; } } ' "${DOCKERFILE:=Dockerfile}" | sh -x # Do not use a subshell for the following command. Windows CI # runs bash 3.x so will not trap an error in a subshell. # http://stackoverflow.com/questions/22630363/how-does-set-e-work-with-subshells set -x; tar -cC "$dir" . | docker load; set +x fi fi if [ "$imagePrefix" ]; then for image in "${images[@]}"; do target="${image#$imagePrefix/}" if [ "$target" != "$image" ]; then # tag images to ensure that all integrations work with the defined image names docker tag "$image" "$target" # then remove original tags as these make problems with later tests (e.g., TestInspectApiImageResponse) docker rmi "$image" fi done fi # explicitly rename "hello-world:latest" to ":frozen" for the test that uses it docker tag hello-world:latest hello-world:frozen docker rmi hello-world:latest docker-1.10.3/hack/make/.ensure-httpserver000066400000000000000000000006361267010174400204030ustar00rootroot00000000000000#!/bin/bash set -e # Build a Go static web server on top of busybox image # and compile it for target daemon dir="$DEST/httpserver" mkdir -p "$dir" ( cd "$dir" GOOS=${DOCKER_ENGINE_GOOS:="linux"} GOARCH=${DOCKER_ENGINE_GOARCH:="amd64"} go build -o httpserver github.com/docker/docker/contrib/httpserver cp ../../../../contrib/httpserver/Dockerfile . docker build -qt httpserver . > /dev/null ) rm -rf "$dir" docker-1.10.3/hack/make/.ensure-syscall-test000066400000000000000000000003661267010174400206240ustar00rootroot00000000000000#!/bin/bash set -e # Build a C binary for cloning a userns for seccomp tests # and compile it for target daemon if [ "$DOCKER_ENGINE_GOOS" = "linux" ]; then docker build ${DOCKER_BUILD_ARGS} -qt syscall-test contrib/syscall-test > /dev/null fi docker-1.10.3/hack/make/.go-autogen000066400000000000000000000033331267010174400167400ustar00rootroot00000000000000#!/bin/bash rm -rf autogen cat > dockerversion/version_autogen.go < autogen/winresources/resources.go < /dev/null fi docker-1.10.3/hack/make/.integration-daemon-setup000066400000000000000000000002331267010174400216110ustar00rootroot00000000000000#!/bin/bash set -e bundle .detect-daemon-osarch bundle .ensure-emptyfs bundle .ensure-frozen-images bundle .ensure-httpserver bundle .ensure-syscall-test docker-1.10.3/hack/make/.integration-daemon-start000066400000000000000000000054621267010174400216170ustar00rootroot00000000000000#!/bin/bash # see test-integration-cli for example usage of this script export PATH="$ABS_DEST/../binary:$ABS_DEST/../dynbinary:$ABS_DEST/../gccgo:$ABS_DEST/../dyngccgo:$PATH" if ! command -v docker &> /dev/null; then echo >&2 'error: binary or dynbinary must be run before .integration-daemon-start' false fi # intentionally open a couple bogus file descriptors to help test that they get scrubbed in containers exec 41>&1 42>&2 export DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-vfs} export DOCKER_USERLANDPROXY=${DOCKER_USERLANDPROXY:-true} # example usage: DOCKER_STORAGE_OPTS="dm.basesize=20G,dm.loopdatasize=200G" storage_params="" if [ -n "$DOCKER_STORAGE_OPTS" ]; then IFS=',' for i in ${DOCKER_STORAGE_OPTS}; do storage_params="--storage-opt $i $storage_params" done unset IFS fi # example usage: DOCKER_STORAGE_OPTS="dm.basesize=20G,dm.loopdatasize=200G" extra_params="" if [ "$DOCKER_REMAP_ROOT" ]; then extra_params="--userns-remap $DOCKER_REMAP_ROOT" fi if [ -z "$DOCKER_TEST_HOST" ]; then # Start apparmor if it is enabled if [ -e "/sys/module/apparmor/parameters/enabled" ] && [ "$(cat /sys/module/apparmor/parameters/enabled)" == "Y" ]; then # reset container variable so apparmor profile is applied to process # see https://github.com/docker/libcontainer/blob/master/apparmor/apparmor.go#L16 export container="" ( set -x /etc/init.d/apparmor start ) fi export DOCKER_HOST="unix://$(cd "$DEST" && pwd)/docker.sock" # "pwd" tricks to make sure $DEST is an absolute path, not a relative one ( set -x; exec \ docker daemon --debug \ --host "$DOCKER_HOST" \ --storage-driver "$DOCKER_GRAPHDRIVER" \ --pidfile "$DEST/docker.pid" \ --userland-proxy="$DOCKER_USERLANDPROXY" \ $storage_params \ $extra_params \ &> "$DEST/docker.log" ) & # make sure that if the script exits unexpectedly, we stop this daemon we just started trap 'bundle .integration-daemon-stop' EXIT else export DOCKER_HOST="$DOCKER_TEST_HOST" fi # give it a little time to come up so it's "ready" tries=60 echo "INFO: Waiting for daemon to start..." while ! docker version &> /dev/null; do (( tries-- )) if [ $tries -le 0 ]; then printf "\n" if [ -z "$DOCKER_HOST" ]; then echo >&2 "error: daemon failed to start" echo >&2 " check $DEST/docker.log for details" else echo >&2 "error: daemon at $DOCKER_HOST fails to 'docker version':" docker version >&2 || true # Additional Windows CI debugging as this is a common error as of # January 2016 if [ "$(go env GOOS)" = 'windows' ]; then echo >&2 "Container log below:" echo >&2 "---" # Important - use the docker on the CI host, not the one built locally # which is currently in our path. ! /c/bin/docker -H=$MAIN_DOCKER_HOST logs docker-$COMMITHASH echo >&2 "---" fi fi false fi printf "." sleep 2 done printf "\n" docker-1.10.3/hack/make/.integration-daemon-stop000066400000000000000000000014411267010174400214400ustar00rootroot00000000000000#!/bin/bash if [ ! "$(go env GOOS)" = 'windows' ]; then trap - EXIT # reset EXIT trap applied in .integration-daemon-start for pidFile in $(find "$DEST" -name docker.pid); do pid=$(set -x; cat "$pidFile") ( set -x; kill "$pid" ) if ! wait "$pid"; then echo >&2 "warning: PID $pid from $pidFile had a nonzero exit code" fi done if [ -z "$DOCKER_TEST_HOST" ]; then # Stop apparmor if it is enabled if [ -e "/sys/module/apparmor/parameters/enabled" ] && [ "$(cat /sys/module/apparmor/parameters/enabled)" == "Y" ]; then ( set -x /etc/init.d/apparmor stop ) fi fi else # Note this script is not actionable on Windows to Linux CI. Instead the # DIND daemon under test is torn down by the Jenkins tear-down script echo "INFO: Not stopping daemon on Windows CI" fi docker-1.10.3/hack/make/.resources-windows/000077500000000000000000000000001267010174400204505ustar00rootroot00000000000000docker-1.10.3/hack/make/.resources-windows/docker.exe.manifest000066400000000000000000000015051267010174400242300ustar00rootroot00000000000000 Docker docker-1.10.3/hack/make/.resources-windows/docker.ico000066400000000000000000013226261267010174400224270ustar00rootroot00000000000000 ( f ( @@ (B(00 %j   h.( Ҹ`һeѸ_ҵUӼiĀ̘ҧmxÔĖƗǘtΜײɋ˒հY/T8p4ŗ}͖۽Œˤ&+дRFӾsSȎYӨXְc˒ȈƃƅLJLjȊŅƅɌˏͰPv`XV“OŖEȚ5П~ÕǘɋֱصطÙ vҥ*v}ƆԫٸҧҥԫֱٸھϤǫM⹊Ⱥ”o7̞ĕɎѥ|o.ʐg׳ΛӪ۽ڻڻڼۿܿ۾۽۽۽۽ڻ۽өɭQ뾏ŻiŖS͝ɚƘɎѶVҹ^ԭ5̒ԭٸ׳۾۾ۿۿۿ۾۽۽۽۽۾۾۾۾۾ٻ۾Ϻp溌a&͝}ĕɏԨƅGԩԫٸ۾۾۾۽۽۾۾۾۾۾۾۾۾۾۾۾۾ڽڽ͝Ÿ1仍tǘ5ÔϜňÀϝqԫٹںۿ۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾ڻθl A̜_Ǘʏ͚0Өְڼۿܿۿ۽۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾ڻձť>弎Õd˝ ĕuҧhȋ6Р״ڽۿ۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾ڻ͛￑KϟѠʛΜ۾Խl)П׵ܿۿ۽۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾ڼć ٿŗ)uʐѴSΚְۿ۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾ڼнxXժ ÕѵU͚SŁXӨڻ۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾ڼмu{țoӼhӻgϱL͛صۿ۽۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾ڻѾzǙ&ŗŖƈȍāQѤۿ۽۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾ڻćǙ!ĕʉÒ ɏ״ܾ۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾ڼ̜əӼs̖ڻ۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾ڽڽڼ۾԰"{ʙz̖z7ҧۿ۽۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾ڽڽ۽۾ۿѶV˜ØØØØØØØ——[ƅ׵[հܾ۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾۽ڽڽ֞ƜƜƜƜŜŜŜŜƜƜƜƜƜƜƜŜśśśÙ—Ƙ/zƆȋo״۽۾۾۾۾۾۾۾۾۾۾۾۾۾۾۾ڽڽܾʦ%ØŜśśśśśśśśśśśśśśśśśśŜƜƜśęØѢĕٹ͙صڼ۽۽۽۽۽۽۽۽۽۽۽۾۾۾ڽ۽g–ƜśśśśśśśśśśśśśśśśśśśśśśśśŜƜƜśęĖC“ҧٺۿڽۿްƝ ĚśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśƜƜśØ٫ Ǚϣֲ״׳խӧҤҥРПССРРѤҦԫկطӷN–ƜśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśƜƜƜŚŖ,”Тں$ײ ʑ Ƅ!Ӽn ҹaҹcгRϲNгQϳPβOгPҸ] Һf y!nj!Ҧ (P֟֞۬ۨěśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśƜƜĚ—gsHohͰQ pLәd_ޮ٦ҵF×ƜśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśƜƜĚѢĖ9 ש Ԧ ԦТ ϡ"ϡ"ҤΟϠ,Ӥ&ש9:Ɯ)ܫjSעۨɠęśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśƜśÙɛ+”^ĺͻ׻ۻۻ⻍仍二ǜǜǜƜŚĚęŚțʝ̞pʝ#™ ̣߸K Zə 7Ȝ[ƛśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśƜś—ĕHĕĖ@ĚśśśśśśśśśśśŚĚƛɜȜęa˞ [Ӣѡ Śř ŚśśśƜśŚŚƜŜśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśƜƜØfɚƗѡշƜśśśśśśśśśśśśśśśśśŚƜƛŚʞǜ&ϟ ƚ5ŚśŜśĘŝ ĚĚŜśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśƜŚ}ĖØŜśśśśśśśśśśśśśśśśśśśśśśśŚȜǜƛśŜęŜؾ[ڛݠgǟĘŜśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśƜśܫŖÔŖ;ÙƜśśśśśśśśśśśśśśśśśśśśśśśśśśśśŜęɢݦɸǫ̀'ĘŜśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśƜśܬ Ɨǘ·ÙƜśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśÚ֘zbLD.LE1JC.smUƝ ŚśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśƜś—Ϡ ÔYØŜśśśśśśśśśśśśśśśśśśśśśśśśśśśśśŜ×ή6oF@,VPQK7TN:RL8QK6gaMPJ5ǵדÙśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśƜś۫ ɚĕӣӷśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśŜ×׼WhbJNH5VQQL:TN8SM9SM9SM9SM9TN8TN9QK:IF=RL9_U4ve*r"UO8YSM90SM9 SM9SM9SM9SM9 SM9SM9#SM9ڪ:ڪ;ڪ;ڪ;ڪ;ڪ;T^]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]\al}!! ###########%ܫ٪ڪڪڪڪڪڪڪڪ6ڪԧ&;S""#$#####%ܫ٩ڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪ@~#6&u"#$###%ܫ٩ڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪҬҬ!ҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬ2޶l`կҬ;ҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬӭIҬSҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬuҬ Ҭ$6ҥb"#$##%۫٪ڪڪڪڪڪڪeڪڪڪڪڪҬҬAҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬe ۳ ҬqҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬӭ< ҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬ$=!#$#%۪٩ڪڪڪڪڪڪoڪڪڪڪڪڪڪڪڪnڪSڪ5ڪ ڪڪҬҬ=ҬҬWҬUҬҬ3ҬzҬҬ ҬҬҬҬҬҬҬҬYҬMҬҬ`ڲ  dC0wҬoҬҬ4ҬҬҬҬҬҬҬҬҬ(ҬҬhҬ0ҬҬ0Ҭ|Ҭӭ6]E7t!ҬҬҬҬҬҬҬҬҬҬҬ[ҬCҬҬCҬdҬҬҬҬҬҬ#"Ң$!##%ڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪCڪڪڪڪҬҬ=ҬҬAҬ?ҬҬҬfҬҬҬҬҬҬҬAҬ6ҬҬaٲQ)hҬpҬҬҬtҬҬҬҬҬҬ ҬҬUҬҬҬҬmҬӭ6K/bҬҬҬҬҬҬҬҬDҬ+ҬҬ+ҬNҬҬҬҬҬ%3"#%ڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪ:ڪڪҬҬ=ҬҬEҬBҬҬҬkҬҬҬҬҬҬҬFҬ:ҬҬaڲT.iҬpҬҬҬwҬҬҬҬҬҬҬҬҬXҬҬҬҬnҬӭ6L1"f ҬҬҬҬҬҬҬҬIҬ/ҬҬ.ҬSҬҬҬҬҬ(Ui "%٩ڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪHڪڪڪҬҬ=ҬҬDҬAҬҬҬjҬҬҬҬҬҬҬEҬ8ҬҬaٲ~S-hҬpҬҬҬvҬҬҬҬҬҬҬҬҬVҬҬҬҬmҬӭ6J/ eҬҬҬҬҬҬҬҬHҬ-ҬҬ-ҬRҬҬҬҬҬ2##%ڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪ)ڪڪҬҬ=ҬҬDҬAҬҬҬjҬҬҬҬҬҬҬEҬ8ҬҬaٲ~S-hҬpҬҬҬvҬҬҬҬҬҬҬҬҬVҬҬҬҬmҬӭ6J/ eҬҬҬҬҬҬҬҬHҬ-ҬҬ-ҬRҬҬҬҬҬ ##%٪ڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪmڪڪҬҬ=ҬҬDҬAҬҬҬjҬҬҬҬҬҬҬEҬ8ҬҬaٲ~S-hҬpҬҬҬvҬҬҬҬҬҬҬҬҬVҬҬҬҬmҬӭ6J/ eҬҬҬҬҬҬҬҬHҬ-ҬҬ-ҬRҬҬҬҬҬݭ #####߮ ٩ڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪ ڪҬҬ=ҬҬDҬAҬҬҬjҬҬҬҬҬҬҬEҬ8ҬҬaٲ~S-hҬpҬҬҬvҬҬҬҬҬҬҬҬҬVҬҬҬҬmҬӭ6J/ eҬҬҬҬҬҬҬҬHҬ-ҬҬ-ҬRҬҬҬҬҬ$####$!ݬ٩ڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪҬҬ=ҬҬDҬAҬҬҬjҬҬҬҬҬҬҬEҬ8ҬҬaٲ~S-hҬpҬҬҬvҬҬҬҬҬҬҬҬҬVҬҬҬҬmҬӭ6J/ eҬҬҬҬҬҬҬҬHҬ-ҬҬ-ҬRҬҬҬҬҬ#u"######$ܫڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪҬҬ=ҬҬDҬAҬҬҬjҬҬҬҬҬҬҬEҬ8ҬҬaٲ~S-hҬpҬҬҬvҬҬҬҬҬҬҬҬҬVҬҬҬҬmҬӭ6J/ eҬҬҬҬҬҬҬҬHҬ-ҬҬ-ҬRҬҬҬҬҬ& ########%ڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪҬҬ=ҬҬDҬAҬҬҬjҬҬҬҬҬҬҬEҬ8ҬҬaٲ~S-hҬpҬҬҬvҬҬҬҬҬҬҬҬҬVҬҬҬҬmҬӭ6J/ eҬҬҬҬҬҬҬҬHҬ-ҬҬ-ҬRҬҬҬҬҬ###########%٩ڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪgڪҬҬ=ҬҬDҬAҬҬҬjҬҬҬҬҬҬҬEҬ8ҬҬaٲ~S-hҬpҬҬҬvҬҬҬҬҬҬҬҬҬVҬҬҬҬmҬӭ6J/ eҬҬҬҬҬҬҬҬHҬ-ҬҬ-ҬRҬҬҬҬҬ0!$##########%٩ڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪ%ڪҬҬ=ҬҬDҬAҬҬҬjҬҬҬҬҬҬҬEҬ8ҬҬaٲ~S-hҬpҬҬҬvҬҬҬҬҬҬҬҬҬVҬҬҬҬmҬӭ6J/ eҬҬҬҬҬҬҬҬHҬ-ҬҬ-ҬRҬҬҬҬҬ,{##############߮ ڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪҬҬ=ҬҬDҬAҬҬҬjҬҬҬҬҬҬҬEҬ8ҬҬaٲ~S-hҬpҬҬҬvҬҬҬҬҬҬҬҬҬVҬҬҬҬmҬӭ6J/ eҬҬҬҬҬҬҬҬHҬ-ҬҬ-ҬRҬҬҬҬҬפ#############%ڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪ ڪҬҬ=ҬҬHҬEҬҬҬnҬҬҬҬҬҬҬҬJҬ<ҬҬaڲ V1kҬpҬҬ ҬxҬҬҬҬҬҬҬҬҬZҬҬҬҬpҬӭ6N3%h ҬҬҬҬҬҬҬҬҬLҬ2ҬҬ1ҬVҬҬҬҬҬ4"##############զڪyڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪ>ڪҬҬ=ҬҬ9Ҭ;ҬҬҬ^ҬҬҬҬҬҬҬ9Ҭ3ҬҬaٲL#hҬpҬҬҬqҬҬҬҬҬyҬҬҬRҬҬҬ ҬjҬӭ6K-\ҬҬҬҬҬҬҬҬ;Ҭ'ҬҬ'ҬEҬҬҬҬҬ"#############$۫ڪ2ڪrڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪڪ^ڪڪڪҬҬ?ҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬcݵ ǣҬpҬҬҬҬҬҬҬҬ~ҬҬҬҬҬҬҬҬҬҬӭ8ҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬ$!##############$ڪ ڪ1ڪRڪlڪڪڪڪڪڪڪoڪUڪ1ڪڪڪҬҬ;ҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬZ߶ذҬgҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬӭ4ҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬ΢$#############$ݬڪڪڪڪڪڪڪҬ Ҭ+Ҭ'Ҭ'Ҭ*Ҭ&Ҭ'Ҭ*Ҭ%Ҭ)Ҭ)Ҭ%Ҭ)Ҭ(Ҭ%Ҭ*Ҭ&Ҭ&Ҭ-Ҭ޵ $,&,*',)(,')+&*+&- ӭӭ.ӭ'ӭ)ӭ+ӭ'ӭ*ӭ+ӭ'ӭ+ӭ)ӭ'ӭ,ӭ)ӭ'ӭ,ӭ'ӭ)ӭ+Ԯ **',((,'(+'*+&+*&/ҬҬ,Ҭ%Ҭ)Ҭ)Ҭ%Ҭ)Ҭ(Ҭ%Ҭ*Ҭ'Ҭ&Ҭ*Ҭ&Ҭ'Ҭ*Ҭ%Ҭ)Ҭ%Ҭ$$##############3ڪڪڪڪڪڪڪڪڪڪڪڪڪ2$#############"!c!ҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬԮԮԮԮԮԮԮԮԮԮԮԮԮԮԮԮԮԮԮ  װԮԮԮԮԮԮԮԮԮԮԮԮԮԮԮԮԮԮ ԮҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬ7!$############$ TҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬn7ծҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬ:!##############! ҬҬҬҬqҬҬҬ|ҬҬҬҬҬҬҬҬxҬҬҬvҬҬ<p|m8ծҬҬҬzҬҬҬҬҬҬҬҬtҬҬҬsҬҬҬ|ҬҬҬ:!##############ҬҬҬҬҬҬҬҬPҬ(ҬҬҬhҬҬҬҬҬpt Ul6ծҬҬҬҬKҬ.ҬҬҬbҬҬҬҬҬҬҬҬҬ:!#############"_ҬҬҬҬҬҬҬҬTҬ/ҬҬҬiҬҬҬҬҬpwXo6ծҬҬҬҬҬLҬ1ҬҬ"ҬfҬҬҬҬҬҬ ҬҬҬ8 $###########$ҬҬҬҬҬ~ҬҬҬSҬ-ҬҬҬhҬҬҬҬҬpvVm6ծҬҬҬҬҬJҬ/ҬҬ ҬeҬҬҬҬҬҬҬҬҬ2$############&ҬҬҬҬҬ~ҬҬҬSҬ-ҬҬҬhҬҬҬҬҬpvVm6ծҬҬҬҬҬJҬ/ҬҬ ҬeҬҬҬҬҬҬҬҬҬ'$########### ҬҬҬҬҬ~ҬҬҬSҬ-ҬҬҬhҬҬҬҬҬpvVm6ծҬҬҬҬҬJҬ/ҬҬ ҬeҬҬҬҬҬҬҬҬҬ֩$##########"w ҬҬҬҬҬ~ҬҬҬSҬ-ҬҬҬhҬҬҬҬҬpvVm6ծҬҬҬҬҬJҬ/ҬҬ ҬeҬҬҬҬҬҬҬҬҬ(!###########ݭ ҬҬҬҬҬ~ҬҬҬSҬ-ҬҬҬhҬҬҬҬҬpvVm6ծҬҬҬҬҬJҬ/ҬҬ ҬeҬҬҬҬҬҬҬҬҬ"########$!AҬҬҬҬҬ~ҬҬҬSҬ-ҬҬҬhҬҬҬҬҬpvVm6ծҬҬҬҬҬJҬ/ҬҬ ҬeҬҬҬҬҬҬҬҬҬH#########""ҬҬҬҬҬ~ҬҬҬSҬ-ҬҬҬhҬҬҬҬҬpvVm6ծҬҬҬҬҬJҬ/ҬҬ ҬeҬҬҬҬҬҬҬҬҬܮ########pҬҬҬҬҬ~ҬҬҬSҬ-ҬҬҬhҬҬҬҬҬpvVm6ծҬҬҬҬҬJҬ/ҬҬ ҬeҬҬҬҬҬҬҬҬҬ?######$ެҬҬҬҬҬ~ҬҬҬSҬ-ҬҬҬhҬҬҬҬҬpvVm6ծҬҬҬҬҬJҬ/ҬҬ ҬeҬҬҬҬҬҬҬҬҬA!$###$ ҬҬҬҬҬ~ҬҬҬSҬ-ҬҬҬhҬҬҬҬҬpvVm6ծҬҬҬҬҬJҬ/ҬҬ ҬeҬҬҬҬҬҬҬҬҬT$##$.ҬҬҬҬҬҬҬҬTҬ/ҬҬҬiҬҬҬҬҬpwXo6ծҬҬҬҬҬLҬ1ҬҬ"ҬfҬҬҬҬҬҬ ҬҬҬJ!##)ҬҬҬҬҬҬҬҬPҬ*ҬҬҬhҬҬҬҬҬpu Um6ծҬҬҬҬKҬ/ҬҬҬcҬҬҬҬҬҬҬҬҬ1" ҬҬҬҬҬҬҬ(ҬҬjҬKҬҬ9Ҭ|ҬҬ"ҬҬҬ!ҬҬm<'2m995ծҬҬҬ&ҬҬcҬMҬҬ@ҬyҬҬҬҬҬҬҬҬ+ҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬt?ծ"ҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬ 2ҬҬkҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬ_:ծҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬHҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬ\M3$g K/eK0 eJ/ eJ/ eJ/ eJ/ eJ/ eJ/ eJ/ eJ/ eJ/ eJ/ eN3$h J-]s ?????????<??%$崓$?-崓$-崓$-崓$-崓$?-崓$-崓$-崓$-崓$-崓$-崓$-崓$-崓$%崓$-崓$$$-崓-崓-崓-崓-崓-崓-崓?-崓?-崓-崓-崓-崓-崓-崓%$崓<崓崓崓崓崓崓崓崓崓崓崓崓崓崓崓( ѴPѶYҺcѸ_ѵWаF˒͘СyÔϜѣٺdv b ҆ÔСҺq ɋ1ϟWӦxΜֲضԩѤ֮ٵҾw޾ѹtYEΟ өҷgП6Ԭ|ײصںڹ۾۾۾۾ۿԮǪK㺌]+ѡѣΚ'կضڻ۽ۿۿۿ۾۾ڼлs ܻ^“ŃشϞ;ֲٺ۽ۿۿ۾۾۾۾۾۾ڻڼƧCj“ϟٵʓ)֯ںۿۿ۾۾۾۾۾۾۾۾ڻֲ)GѢ”ѣ›*ҥطۿۿ۾۾۾۾۾۾۾۾۾ڼֱ!ҽeȘ ׯņ(ծڽܿ۾۾۾۾۾۾۾۾۾۾ڼۿٺ*߽hɛطΝO״۾۾۾۾۾۾۾ڽڽڽ۾ݿ̯LؽQ|ѣmں۾۾۾۾۾ڽڼ۾ԖĘƝƜƜƜƜƜśśĚØ%կٺ۾ڻڼڼڼڼ۾ʥ"ØŜśśśśśśśśŜƜƜƜŚ—pvѻlհ(׳ԭҦѤҥҤӪԮƲs•ƜśśśśśśśśśśśśśśƜƜŜę”jKa  #WʤęśŜƜƜśśśśśśśśśśśśśśśƜƜę羐B“Õչ׺㺍뺍ƝƝŜśǜƜĘ}ÓSqL˛ŚƜ×ěśśśśśśśśśśśśśśśśśƜś—jĕŜśśśśśśśŜƜƜƜȝJƜ×Ы#cܾOƝęśśśśśśśśśśśśśśśśśśƜƜÙFƝśśśśśśśśśśśśŜ–Ա5ɫvЏƛśśśśśśśśśśśśśśśśśśśśśƜĚ~ÔƸƜśśśśśśśśśśśśśśĘΎif[F>'80ܼFŜśśśśśśśśśśśśśśśśśśśśśƜĚ~”XƜśśśśśśśśśśśśśśśƛ̔[WHLE.gػMƜśśśśśśśśśśśśśśśśśśśśśśśƜĚj¸śśśśśśśśśśśśśśśśƜܿU`]Nŝ ŚśśśśśśśśśśśśśśśśśśśśśśśśśśƜ˜:—ƜśśśśśśśśśśśśśśśśśØڽP͍xʤĘŜśśśśśśśśśśśśśśśśśśśśśśśśśśśƜŜfƜśśśśśśśśśśśśśśśśśśśę×ęŜśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśƜĚ=”ڸØƜśśśśśśśśśśśśśśśśśśśƜśƜŜśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśƜƗSM9SM9SM9SM9SM9SM9SM9SM9SM9SM9SM9SM9SM9SM9|3śśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśśěĚŜÙkm^,UN8SM9SM9SM9SM9SM9SM9SM9SM9SM9SM9SM9SM9SM9SM9SM9SM9SM9SM9SM9oȝǜǜǜǜǜǜǜǜǜǜǝǝȝǝǝǝǜǜǜǜǜǜǜǜǜǜǜǜǜǜǜǜǜǜǝǝǝǝǜǜǜǜǜǜǜǜǜǜǜǜǜǜǜǜǜǜǝǝǝȝȞ–hSM9SM9SM9"SM9SM9SM9 SM9SM9SM9SM9SM9SM9SM9SM9 TN9 gY-},,V]S3 SM9 SM9 SM9 SM9 SM9 SM9 SM9 SM9SM9SM9SM9SM9SM9SM9SM9 SM9SM9SM9"SM9SM9SM9SM95SM9pSM9SM9SM9SM9SM9SM9SM9SM9SM9SM9SM9SM9SM9SM9RL9QK9QK9PJ9QK8UO6[R6\S5[R6ZR6[R6\S5XP6QL7PK7RL7XP6^T5aV5bW5bW5_V5[S5UN7QL7PK7PK7TN6YQ6\S5\S5ZR6ZR6ZR6[R6]T5ZR6SM7PK7QL7WP6^U5aW5aW5_U5XP6RL7PK7SM7ZQ6]T5[R6ZR6ZR6ZR6[S5\S5ZQ6UN6QK7PK7QL7TN7[R5_U5bW5bW5aV5^T5YQ6RL7OJ7PJ7WO7[R5ZQ5YQ5ZQ5[R5[R5SL9OK;TN9SM9SM9SM9SM9SM9SM9SM9SM9SM9SM9SM9SM9SM9SM9SM9SM9SM9SM9uSM99SM9 SM9SM9#SM9SM9SM9SM9SM9SM9SM9SM9SM9SM9RL9yUO8f稈v|j,eZ8NJ:FD;NI:TN:NJ:GE;[R8~k3/+Ƞ)ҧ'ת&ڬ&٫&թ'Σ(ĝ),/q2l^6UN9GE;JF;RL:RL:SM:QK:GE;PK:pb6}0+ȟ)ӧ'ث&٫&Ө'ɠ)+/td5RL:GD;PK:SM:RL:RM:KG;GE;SM9i]7p3/,Ü*ͣ(ը'ث&ڬ&ת&ӧ'ɠ(*.m2\S4HF=NJ;TN9OK;HF>LI[S5u ֧OESM9SM9SM9aSM9SM9SM9SM9SM9*SM9SM9SM9SM9SM9SM9SM9GSM9SM9zSM9SM9zh(ɞةةةר%"!%z0Σ'!################"""›)]T7g[6Ϥ'""############""֩&pa5VO8*"""###############%ɞ r ˠ۫٩֧\v SM9SM9SM9SM9SM9SM9SM9jSM9SM9\SM9SM9SM9SM9%SM9QSM9SM9SM9ݬڪ ڪڪڪڪ%##""#####################"Х'ڬ%"################"߰%ˢ'"###################&ݬةةڪڪڪڪڪSM9SM9SM9SM9HSM9/SM9SM9SM9SM9SM9ڪڪ ڪڪڪڪ$##########################""###################"####################"%"ݬ٩ڪڪڪڪڪ[ڪSM9SM9SM9SM9ڪڪڪڪڪڪ$#####################################################################"$"ޭ ٩ڪڪڪڪSM9SM9ڪڪڪڪڪڪ#""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""#####$#߮ ٩ڪڪڪڪGڪڪڪ ڪ&ڪ%ڪ%14334*&"%#%#%#%#%#%#&")4&"######".1%#%#%#%#%#%#%#%#$$3,"######"(4!'&"%#%#%#%#%#%#&",43333337D Ow !"##$#߮ ٩ڪڪڪڪڪڪڪڪڪڪڪӭ̪#ϫKϫEϫEϫFϫEϫEϫDϫJΪ) 8 H E F F E F E I%ϫFϫFϫEϫEϫEϫFϫFϫFϫApe" K D E E F E E J /Ϋ1ϫJϫDϫEϫEϫFϫFϫDϫJͪΫ٨)p!"$#ޮ ٩ڪڪڪ$ڪڪڪڪڪڪڪگ ҬҬҬҬҬҬҬҬҬҬ$\ʥ5ҬҬҬҬҬҬҬҬӬܽ!TҬҬҬҬҬҬҬҬҬҬٯ  "$7 #"ݭ٩ڪڪڪڪڪڪڪڪڪTڪڪڪҬҬҬuҬnҬ|ҬsҬpҬsҬxҬrѫ@ rspswrr$^ɥ<ӭҬqҬqҬwҬtҬqҬqҬsӭ"||vtqq}ooװҬҬlҬqҬsҬyҬmҬpҬtҬҬҬ"/%="!ܬ٩ڪڪڪڪڪڪڪڪڪڪڪCڪڪҬҬҬ~ҬwҬҬwҬ{ҬzҬ|Ҭ|ѫD zwy|z{z$_ɥ;ӭҬyҬ{ҬzҬ{ҬzҬuҬ{ӭ"}{y{vxyծҬҬtҬ{ҬzҬ}ҬwҬyҬ|ҬҬҬ#߯"۫٩ڪڪڪڪڪڪڪڪڪڪڪڪڪҬҬҬ|ҬuҬҬuҬyҬxҬzҬzѫB xuwyxyx$_ɥ<ӭҬwҬyҬxҬyҬxҬsҬxӭ"}zwytvvծҬҬrҬyҬwҬ{ҬuҬwҬzҬҬҬ5 #%ڪڪڪڪڪڪڪڪڪڪڪڪڪڪ8ڪҬҬҬ|ҬuҬҬuҬyҬxҬzҬzѫB xuwyxyx$_ɥ<ӭҬwҬyҬxҬyҬxҬsҬxӭ"}zwytvvծҬҬrҬyҬwҬ{ҬuҬwҬzҬҬҬ"e###%٩ڪڪڪڪڪڪڪڪڪڪڪڪڪ.ڪҬҬҬ|ҬuҬҬuҬyҬxҬzҬzѫB xuwyxyx$_ɥ<ӭҬwҬyҬxҬyҬxҬsҬxӭ"}zwytvvծҬҬrҬyҬwҬ{ҬuҬwҬzҬҬҬ!###"&٩ڪڪڪڪڪڪڪڪڪڪڪڪڪ ڪҬҬҬ|ҬuҬҬuҬyҬxҬzҬzѫC xuwzwzx$_ɥ;ӭҬwҬyҬxҬyҬxҬsҬxӭ"|zwytvvծҬҬrҬyҬxҬ{ҬuҬwҬzҬҬҬ [######$߮ ٩ڪڪڪڪڪڪڪڪڪڪڪڪUڪҬҬҬzҬtҬҬuҬwҬwҬzҬxѫA vuvxyxw$_ɥ<ӭҬvҬwҬyҬxҬvҬsҬvӭ"~zwwtuu֯ҬҬpҬxҬwҬ|ҬsҬvҬyҬҬҬ( #######j֧Dڪڪڪڪڪڪڪڪڪڪڪ9ڪҬҬҬҬҬҬҬҬҬҬѬ"UȤ/ӬӬӬӬӬӬӬҬӬܵ xҬҬҬҬҬҬҬҬҬҬuҬ!#######b*ڪڪ%ڪ>ڪLڪMڪ@ڪ&ڪҬҬ%Ҭ"Ҭ"Ҭ"Ҭ"Ҭ"Ҭ!Ҭ$ҬP HHHHHHHI$  iHIHHHIHGVҬҬ$Ҭ!Ҭ"Ҭ"Ҭ"Ҭ"Ҭ!Ҭ$Ҭ !#######@ڪڪҫѫtЫЫЫЫѫЫЫѫͨ,+ !!!!!!! IϪAЫѫѫЫЫѫЫЫѫeЫ$"######" ڪڪڪڪڪڪڪڪҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬӬΩ])86ЪҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬҬ$"######!5ҬҬҬҬqҬrҬpҬsҬuҬrҬrӬͨ^);qqvsqqr2ЪzҬ|ҬvҬsҬqҬqҬ|ҬoҬnҬҬ"!######c ҬҬҬҬzҬvҬyҬ{ҬyҬ{ҬzӬͨ_);yzz{zuz3Ъ}ҬҬ{ҬyҬ{ҬvҬҬwҬwҬҬ"#####!ޭҬҬҬҬxҬuҬwҬyҬxҬyҬxӬͨ_)<wyxyxsx3Ъ}ҬҬzҬwҬyҬtҬҬvҬvҬҬ3!####"KҬҬҬҬxҬuҬwҬyҬxҬyҬxӬͨ_)<wyxyxsx3Ъ}ҬҬzҬwҬyҬtҬҬvҬvҬҬ% ####%ҬҬҬҬyҬvҬxҬzҬxҬzҬyӬͨ_);xzyzyty3Ъ}ҬҬzҬxҬzҬuҬҬwҬwҬҬ="## {<ҬҬҬҬtҬsҬsҬuҬvҬuҬuӬͨ^);ttwvtrt2Ъ{ҬҬwҬtҬtҬrҬ~ҬrҬqҬҬA # ߰ ҬҬҬҬҬҬҬҬҬҬҬͨ^):4ЪҬҬҬҬҬҬҬҬҬ;ޱ ҬҬҬҬҬҬҬҬҬҬҬΩA*"D Ы_ҬҬҬҬҬҬҬҬҬҬ&%"$ҬҬҬҬҬҬҬҬҬΩ(nxuvssszwzuvvzwytvvzwytvv|y{vxxyuspq|nn=|~~}\???0`?o}o?}o?}o?}o?}oo?` ` `o?o?oooo`` (@ @ŀʍ̮JֱٹsӨڼX /ںլ0ײnص׳ٸƌ鿗㸉չʹl?Ԭ۾Ϟصtںڼ۾۽̴au%կկgٺ۾ۿۿ۽۾Ĥ?Oʚʒ״۽ۿڽټڽۼʮNѻLɑ׳)ٹܿۿۿґ×ƝŜśĚÙǮ_ϟ7̕a}RāZҡSftͪ,–×ĚśśŜƜƜŜÙﻍR㴃չÖė”ė\ÖˣԲ3ȞĚśśśśśśƜś—|IĚƜŜƜƜƜÚ̡amkdëRțěśśśśśśśƜƜØuÙƜśśśśśÙϤ b}~}ԿpĘśśśśśśśśśśŜƜØSM9SM9SM9SM9SM9SM9{8ŜĚěěěěěěÙ˥ԷIʡÙěěěěěěěěěěěĚśŜgzMI=UN8SM9SM9SM9SM9SM9SM9SM9SM9ǛǜǜȝɞɞɞɞɞɞǛŘƛǜǜȝɞɞɞɞɞȝǜǜǜǝȝɝʟɝ7SM9SM9SM9$SM9SM9SM9TM9YQ5+6Q ê KH<TN9TN8!SM9 SM9SM9SM9SM9SM9SM9$SM9SM9SM9SM9"SM9cSM9SM9SM9SM9RM9VO8m$k%n`1XP6QK7SM6l^3s/,++~,r/rc2[R5QL7WP6QL7fZ4q/,+,r/g[4QL7WP6QL7ZR5qb2r/~,++,s/l^1SL6PJ:ZQ5`V3PK:TN9SM9SM9SM9SM9SM9SM9SM9gSM9%SM9SM9SM9SM9SM9WP7'ޭͣ*p2-$#""""#$Ө'~/ZR9.%#"""#%-[S9{0ҧ($#""""#" }j% ֧ݬSM9SM9#SM9SM9SM9 SM9SM9SM9SM9SM9ӥ٩ڪ#!!"#######"!$!"######!$!"######## ۫ڪTM9SM9SM9SM9SM9SM9SM9k'۫ڪ۫$#$$$$######$$$$$"######$$$#"""""#$%߮ةڪڪl%q`,SM9۫ڪ۪ .ڰ R۰ O۰ Pگ R0GROT<ݰ9خ T۰ O۰ Qۯ J/PPOS2خ E۰ R۰ Oڰ SݰB! !* G ~"%٩ڪqڪڪڪѬѬBѬѬѬЫװZ"!uϫmѬѬѬΪW  \ΪѬѬѬѬ;$߯ ٩ڪڪڪڪڪ?ڪڪڪ۪۰ ӬCҬҬuҬsѫٱ]"zvw}!mѬiҬҬvӬtϪ}[!sv ^ϪuӬxҬvҬxҬw'!#$!8#ޭ ٩ڪڪڪڪڪڪ ڪҬҬAҬҬtҬrѫرZ"yuu~!kѫgҬҬuӬtϪ|X!su [ϪtӭwҬuҬxҬtҬޭ "$!ܬڪڪڪڪڪڪڪڪҬҬDҬҬҬѫر`"!rѫmҬҬҬϪ_! _ϪӬҬҬҬ! C##%ةڪڪڪڪڪڪڪҬҬ(ҬxҬoҬoҬw֯1"`tnx#HϩAҬyҬnҬsЪf- soow3Ъ^ҬuҬoҬvҬWҬ#!z###"ݬڪ*ڪ+ڪϪϪ Ϫ~ϪϪϪ̨\$U""!$zٲ ?ΩϪϪϪϪ=Ϫ#"### w%ڪڪҬҬҬҬҬҬҬwҬ|ӬЫo j|y"ܳ ^ѫӬyҬyҬҬNҬҬҬҬ#!##" ڪڪڪҬҬҬҬtҬwҬЫk gwv"}۳ XѫҬtҬvҬҬLҬ! V##!t#ҬҬҬҬtҬyҬЫm iyv"~ܳ \ѫӬwҬwҬҬMҬ""(ҬҬҬҬҬҬЪo!g!ڲ IΩϪϪϪЪCЪ!2 ҬҬ Ҭ Ҭ ͨ'   !pjir*#? P ҬҬҬҬΩ;qtL;qtL?RB??D??(0` $РΘδ[׳ٹ~׳x s۽ծ ضjٸصʔ鿘 縉۹͹Y"ڻԫ)ٹ۽۾ڼнxɺs,կٺڻٻٹ{ͷklƜ ˜ƜŚÙ^ LŜ ˥ѯ6͂xչXțЦ ęśŜƜƜś—Ėw,śĚę–ȚֶBe˦ęśśśśƜƜę SM9SM9SM9^T3yƝśśśěʟDΫ%˜ěěěěĚĚśŜÙ aP*ye$UN8SM9SM9SM9SM9SM9SM9ÒȖǝǝɞɞɞɞɝ΢өƛǝɞɞɞɞɞȝǜȝɞʟƚSM9SM9 SM9SM9 ]S3-Xs`~~sZ"_\S4(SM9#SM9SM9SM9SM9#SM9SM9 SM9;SM9SM9TN8HF>~n$}q,`V5]T5w.*((*z.qb3QL7ZR5s/*(){-fZ4NI7gZ4t.+(()}+gZ0WO5zh(]T5OJ:lTM9SM9SM9SM9SM9SSM9SM9SM9SM9ko& ը(Τ'""##"#$,ŝ)""#""ڬ&-ޯ%""##""٫ ެqɝ TN9XSM9;SM9SM9SM9Ф֨~٪ "#%%$###"##%$###""$%$####$۪بة ԥue++1JSM9SM9m%٩ ګݯ"լ dدi׮mܱ J!^jmKҪ]լ jخmܱ K!]jmLѩ\׮jخnڰ I$ + W&۫٩uܫ&٩* ڪڪѬѬѬѬЫְm" lΪѬЫկn! lΪ}ѬѬѬaѬ)/#ڪڪڪڪڪxڪڪ٩׫ԭҬ|Ҭsѫzװg!uu }iѫuԭwҬ{ׯi!tv |hЪqӬtҬ|Ҭ[ޱ!!F%ڪڪڪڪڪڪҬҬҬҬѫ֯n%|!#hͨ}ЪΩԮi%|!#iЪҬҬҬdҬ'"#%٩ڪڪڪڪ ڪҬҬ)Ҭ-Ҭ.Ԯݴ hݴ vܴ {ݴ Dbw{Eܳ aݴ wݴ |ܴ BϪҬ/Ҭ/ҬҬ"##!3"ѫѫЪЪϪծm$ "mΩ~ѫЪЫ]Ъ"#"٩ةڪڪڪҬҬҬҬ}Ҭqѫ{װg!su ygѫtԭvӬӬXӬҬҬ&"# C"ҬҬҬҬѫ֯q" pͨ|ЪϪЫ\Ϫ"!R!g#ҬҬ*Ҭ/Ҭ1Ԯ"'00gw}B"  !!!!]!$#ҬҬҬծ"|r~Wb,14?#33#3?#???( @ ֯йjծںٹe ǨJɌطQضϢ濙$귈๋͹q*Ϻq@ۿ۽ѤºH̙|УԪ߽ѭ5ƗśÙSM9SM9pԺ•×ț*㱚BɝĚŜŜęݸ&uJG>TN8SM9SM9~ɞʟȞ˞(O̠ȝȝɝɞɟɞ•ȕ‘SM9TM9VO7 ACD} ̮~ e\S4$SM9#SM9SM9SM9SM9TM9PK:eZ0X(n0(ʡ%ȟ%(u/wf1(ʡ%ŝ&|-sd2*›&ˢ%ƞ&z*m x=>C:[S5rUN8SM9PRL9bX3 %##""#%##"#%##""#76P-1PSM9YQ7<=Erb)Ȟ ר&׮[ҫ֮ d n zaدyѪpd ~eٰkҫԬ @+ת#3'~ޮ ֧֧gޭ4ѬѬ_ѫկt v!~oΪ~Ыws!pϪtҬѬ>ծ߭ % ٩ڪڪ֫ҬSҫ}֯`raӭӭsefѫgҬ~Ҭ3 5$"٩gڪڪvڪҬկѫҬwЪ|߶c!} rׯgҬҬ^ҬҬ!!C#"#ҬҬҬЪ߶r! z֯sϪѫcѫ!!%" "߮ ڪҬҬKЫP߶:!Q!E޵[ܴ ݴ Zݵ "!Z""a#$"ҬЫ߷ bh '7?(  ‚׵޾ ݇ܽĠ-뷈䷉ŶzvDDBVO7Òɤ,׿_ѶR™ ƚ•뿎Gub& e{c h>5׬ӭ ͨ^]jӭm;*!D!!ҬҬ 3 vƣܬڪ֯ ""docker-1.10.3/hack/make/.resources-windows/docker.png000066400000000000000000024054231267010174400224370ustar00rootroot00000000000000PNG  IHDR6 pHYs   OiCCPPhotoshop ICC profilexڝSgTS=BKKoR RB&*! J!QEEȠQ, !{kּ> H3Q5 B.@ $pd!s#~<<+"x M0B\t8K@zB@F&S`cbP-`'{[! eDh;VEX0fK9-0IWfH  0Q){`##xFW<+*x<$9E[-qWW.(I+6aa@.y24x6_-"bbϫp@t~,/;m%h^ uf@Wp~<5j>{-]cK'Xto(hw?G%fIq^D$.Tʳ?D*A, `6B$BB dr`)B(Ͱ*`/@4Qhp.U=pa( Aa!ڈbX#!H$ ɈQ"K5H1RT UH=r9\F;2G1Q= C7F dt1r=6Ыhڏ>C03l0.B8, c˱" VcϱwE 6wB aAHXLXNH $4 7 Q'"K&b21XH,#/{C7$C2'ITFnR#,4H#dk9, +ȅ3![ b@qS(RjJ4e2AURݨT5ZBRQ4u9̓IKhhitݕNWGw Ljg(gwLӋT071oUX**| J&*/Tު UUT^S}FU3S ԖUPSSg;goT?~YYLOCQ_ cx,!k u5&|v*=9C3J3WRf?qtN (~))4L1e\kXHQG6EYAJ'\'GgSSݧ M=:.kDwn^Loy}/TmG X $ <5qo</QC]@Caaᄑ.ȽJtq]zۯ6iܟ4)Y3sCQ? 0k߬~OCOg#/c/Wװwa>>r><72Y_7ȷOo_C#dz%gA[z|!?:eAAA!h쐭!ΑiP~aa~ 'W?pX15wCsDDDޛg1O9-J5*>.j<74?.fYXXIlK9.*6nl {/]py.,:@LN8A*%w% yg"/6шC\*NH*Mz쑼5y$3,幄'L Lݛ:v m2=:1qB!Mggfvˬen/kY- BTZ(*geWf͉9+̳ې7ᒶKW-X潬j9(xoʿܔĹdff-[n ڴ VE/(ۻCɾUUMfeI?m]Nmq#׹=TR+Gw- 6 U#pDy  :v{vg/jBFS[b[O>zG499?rCd&ˮ/~јѡ򗓿m|x31^VwwO| (hSЧc3-:8iTXtXML:com.adobe.xmp Adobe Photoshop CC 2014 (Macintosh) 2015-06-03T15:00:56-07:00 2015-06-03T15:00:56-07:00 2015-06-03T15:00:56-07:00 xmp.iid:86b7b718-e25a-4c13-a683-ec7891e69062 adobe:docid:photoshop:e4eed24c-3e3f-1178-8933-fc93f3906ef0 xmp.did:5f849bd6-ddcf-40db-9cbd-a97dad603daa created xmp.iid:5f849bd6-ddcf-40db-9cbd-a97dad603daa 2015-06-03T15:00:56-07:00 Adobe Photoshop CC 2014 (Macintosh) saved xmp.iid:86b7b718-e25a-4c13-a683-ec7891e69062 2015-06-03T15:00:56-07:00 Adobe Photoshop CC 2014 (Macintosh) / image/png 3 sRGB IEC61966-2.1 1 720000/10000 720000/10000 2 1 400 400 T cHRMz%u0`:o_F IDATxb>~>!>>Q>a>>!>>A  1>  ?QT.P>@1T.wP/ _p@?gP/gh8G`o`0OH7 _OT.T.T.T.T.T.T.T.>>>>>>>>T.T.T.T.T.T.T.T.} <1  _y }kr iti> zhz ?< 8 e < UP   D G3:*  YU '` `$6`(` N00$`N0$0` `H 6$`Z$``B<`T*0`(G?QT.P>@1T.OnL.@K *hT.` >(XT.p>8HT.>7`?(p8i% V&AT.T.!T.T.aT.QT.T.!T.>>>q>A>>1>>11T.T.1T.T.AT.qT.T.T.>!>>Q>a>>!>>AT.aT.QT.T.!T.T.!T.T.O <iڕڼگڀS.;I?1,PV&.HVQ ''hڭV&o!ebddV&Oy9@6-|OkV&uV&NpbWV&NfV&HV&OcV&NV&xM_gV&M<CV&L9/V&/kV&K +V& WV&K V& 2V& FV&0   T. ?   Q>P   T.@    1>p   T.`! CV&          K/>V& V&XV&V&s\ QV&1lF7\@%V&C:1&7" V&A V& V& V&V&9MS9MS 9MS9MS9MSV&9MSdzM9MS 9MSP9MSL9MS  ¨V&9MSQ /99MS? #Ǯ}9MS;=/X9MSzdz + ̵ κ ̵(?Y ¨ 5K(:!ʱ&7-A #V&9MSX.dz9MS 5dzz  ҿ+>X ,>4I  ͹  +=);!ʱ!0(:}V&9MSSz0<dz9MS>dze *<1E(9.A ##3"0  !0+ #V&9MS^hTdz9MS>dzK  ,(8 .$'8#2 *(#!(!fV&9MSKRA]dz9MS`dz4)*(& .)!ldz9MS*3:='}dz9MS,ma5N ! #   %$dz9MSV\;                  dz9MS.JJ:dz9MS(<@>dz:NT/=?=Ʋřf Vg8 g8Kf7 ki: h9E ad5  k Mf700w Xg8ֻ: DX Yf7yզD YGQg8kӂI }  h:bPE  c Tg8d&0*n Tg8F  kg8cқQ d  i:cE9" t yf7G $l rf7_xG  m{k=< qh9fn: s}h9A ?slg8.4w|f7nZ(( x{g9q  j;w ~$ g8a%g99-* f7! Z g8>'3h:R'*c4B| 6$  h9oX   h9j P '  f7#e ?#   d5qT 6 &  f79( - %  c4EoK6.IENDB`docker-1.10.3/hack/make/.validate000066400000000000000000000017041267010174400164640ustar00rootroot00000000000000#!/bin/bash if [ -z "$VALIDATE_UPSTREAM" ]; then # this is kind of an expensive check, so let's not do this twice if we # are running more than one validate bundlescript VALIDATE_REPO='https://github.com/docker/docker.git' VALIDATE_BRANCH='master' if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then VALIDATE_REPO="https://github.com/${TRAVIS_REPO_SLUG}.git" VALIDATE_BRANCH="${TRAVIS_BRANCH}" fi VALIDATE_HEAD="$(git rev-parse --verify HEAD)" git fetch -q "$VALIDATE_REPO" "refs/heads/$VALIDATE_BRANCH" VALIDATE_UPSTREAM="$(git rev-parse --verify FETCH_HEAD)" VALIDATE_COMMIT_LOG="$VALIDATE_UPSTREAM..$VALIDATE_HEAD" VALIDATE_COMMIT_DIFF="$VALIDATE_UPSTREAM...$VALIDATE_HEAD" validate_diff() { if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then git diff "$VALIDATE_COMMIT_DIFF" "$@" fi } validate_log() { if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then git log "$VALIDATE_COMMIT_LOG" "$@" fi } fi docker-1.10.3/hack/make/README.md000066400000000000000000000006341267010174400161520ustar00rootroot00000000000000This directory holds scripts called by `make.sh` in the parent directory. Each script is named after the bundle it creates. They should not be called directly - instead, pass it as argument to make.sh, for example: ``` ./hack/make.sh test ./hack/make.sh binary ubuntu # Or to run all bundles: ./hack/make.sh ``` To add a bundle: * Create a shell-compatible file here * Add it to $DEFAULT_BUNDLES in make.sh docker-1.10.3/hack/make/binary000066400000000000000000000037721267010174400161100ustar00rootroot00000000000000#!/bin/bash set -e BINARY_NAME="docker-$VERSION" BINARY_EXTENSION="$(binary_extension)" BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" source "${MAKEDIR}/.go-autogen" ( if [ "$(go env GOOS)/$(go env GOARCH)" != "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then # must be cross-compiling! case "$(go env GOOS)/$(go env GOARCH)" in windows/amd64) export CC=x86_64-w64-mingw32-gcc export CGO_ENABLED=1 export LDFLAGS_STATIC_DOCKER="$LDFLAGS_STATIC_DOCKER -linkmode internal -extld=${CC}" ;; esac fi if [ "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" == "windows/amd64" ] && [ "$(go env GOOS)" == "windows" ]; then # native compilation of Windows on Windows with golang 1.5+ needs linkmode internal # https://github.com/golang/go/issues/13070 export LDFLAGS_STATIC_DOCKER="$LDFLAGS_STATIC_DOCKER -linkmode=internal" fi if [ "$(go env GOOS)" == "linux" ] ; then case "$(go env GOARCH)" in arm*|386) # linking for Linux on arm or x86 needs external linking to avoid # https://github.com/golang/go/issues/9510 until we move to Go 1.6 if [ "$IAMSTATIC" == "true" ] ; then export EXTLDFLAGS_STATIC="$EXTLDFLAGS_STATIC -zmuldefs" export LDFLAGS_STATIC_DOCKER="$LDFLAGS_STATIC -extldflags \"$EXTLDFLAGS_STATIC\"" else export LDFLAGS="$LDFLAGS -extldflags -zmuldefs" fi ;; esac fi if [ "$IAMSTATIC" == "true" ] && [ "$(go env GOHOSTOS)" == "linux" ] && [ "$DOCKER_EXPERIMENTAL" ]; then if [ "${GOOS}/${GOARCH}" == "darwin/amd64" ]; then export CGO_ENABLED=1 export CC=o64-clang export LDFLAGS='-linkmode external -s' export LDFLAGS_STATIC_DOCKER='-extld='${CC} else export BUILDFLAGS=( "${BUILDFLAGS[@]/pkcs11 /}" ) # we cannot dlopen in pkcs11 in a static binary fi fi echo "Building: $DEST/$BINARY_FULLNAME" go build \ -o "$DEST/$BINARY_FULLNAME" \ "${BUILDFLAGS[@]}" \ -ldflags " $LDFLAGS $LDFLAGS_STATIC_DOCKER " \ ./docker ) echo "Created binary: $DEST/$BINARY_FULLNAME" ln -sf "$BINARY_FULLNAME" "$DEST/docker$BINARY_EXTENSION" hash_files "$DEST/$BINARY_FULLNAME" docker-1.10.3/hack/make/build-deb000066400000000000000000000062201267010174400164420ustar00rootroot00000000000000#!/bin/bash set -e # subshell so that we can export PATH and TZ without breaking other things ( export TZ=UTC # make sure our "date" variables are UTC-based bundle .integration-daemon-start # TODO consider using frozen images for the dockercore/builder-deb tags tilde='~' # ouch Bash 4.2 vs 4.3, you keel me debVersion="${VERSION//-/$tilde}" # using \~ or '~' here works in 4.3, but not 4.2; just ~ causes $HOME to be inserted, hence the $tilde # if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then gitUnix="$(git log -1 --pretty='%at')" gitDate="$(date --date "@$gitUnix" +'%Y%m%d.%H%M%S')" gitCommit="$(git log -1 --pretty='%h')" gitVersion="git${gitDate}.0.${gitCommit}" # gitVersion is now something like 'git20150128.112847.0.17e840a' debVersion="$debVersion~$gitVersion" # $ dpkg --compare-versions 1.5.0 gt 1.5.0~rc1 && echo true || echo false # true # $ dpkg --compare-versions 1.5.0~rc1 gt 1.5.0~git20150128.112847.17e840a && echo true || echo false # true # $ dpkg --compare-versions 1.5.0~git20150128.112847.17e840a gt 1.5.0~dev~git20150128.112847.17e840a && echo true || echo false # true # ie, 1.5.0 > 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a fi debSource="$(awk -F ': ' '$1 == "Source" { print $2; exit }' hack/make/.build-deb/control)" debMaintainer="$(awk -F ': ' '$1 == "Maintainer" { print $2; exit }' hack/make/.build-deb/control)" debDate="$(date --rfc-2822)" # if go-md2man is available, pre-generate the man pages ./man/md2man-all.sh -q || true # TODO decide if it's worth getting go-md2man in _each_ builder environment to avoid this # TODO add a configurable knob for _which_ debs to build so we don't have to modify the file or build all of them every time we need to test for dir in contrib/builder/deb/*/; do version="$(basename "$dir")" suite="${version##*-}" image="dockercore/builder-deb:$version" if ! docker inspect "$image" &> /dev/null; then ( set -x && docker build ${DOCKER_BUILD_ARGS} -t "$image" "$dir" ) fi mkdir -p "$DEST/$version" cat > "$DEST/$version/Dockerfile.build" <<-EOF FROM $image WORKDIR /usr/src/docker COPY . /usr/src/docker RUN mkdir -p /go/src/github.com/docker \ && ln -snf /usr/src/docker /go/src/github.com/docker/docker EOF if [ "$DOCKER_EXPERIMENTAL" ]; then echo 'ENV DOCKER_EXPERIMENTAL 1' >> "$DEST/$version/Dockerfile.build" fi cat >> "$DEST/$version/Dockerfile.build" <<-EOF RUN ln -sfv hack/make/.build-deb debian RUN { echo '$debSource (${debVersion}-0~${suite}) $suite; urgency=low'; echo; echo ' * Version: $VERSION'; echo; echo " -- $debMaintainer $debDate"; } > debian/changelog && cat >&2 debian/changelog RUN dpkg-buildpackage -uc -us EOF tempImage="docker-temp/build-deb:$version" ( set -x && docker build -t "$tempImage" -f "$DEST/$version/Dockerfile.build" . ) docker run --rm "$tempImage" bash -c 'cd .. && tar -c *_*' | tar -xvC "$DEST/$version" docker rmi "$tempImage" done bundle .integration-daemon-stop ) 2>&1 | tee -a "$DEST/test.log" docker-1.10.3/hack/make/build-rpm000066400000000000000000000114051267010174400165070ustar00rootroot00000000000000#!/bin/bash set -e # subshell so that we can export PATH and TZ without breaking other things ( export TZ=UTC # make sure our "date" variables are UTC-based source "$(dirname "$BASH_SOURCE")/.integration-daemon-start" # TODO consider using frozen images for the dockercore/builder-rpm tags rpmName=docker-engine rpmVersion="$VERSION" rpmRelease=1 # rpmRelease versioning is as follows # Docker 1.7.0: version=1.7.0, release=1 # Docker 1.7.0-rc1: version=1.7.0, release=0.1.rc1 # Docker 1.7.0-cs1: version=1.7.0.cs1, release=1 # Docker 1.7.0-cs1-rc1: version=1.7.0.cs1, release=0.1.rc1 # Docker 1.7.0-dev nightly: version=1.7.0, release=0.0.YYYYMMDD.HHMMSS.gitHASH # if we have a "-rc*" suffix, set appropriate release if [[ "$rpmVersion" =~ .*-rc[0-9]+$ ]] ; then rcVersion=${rpmVersion#*-rc} rpmVersion=${rpmVersion%-rc*} rpmRelease="0.${rcVersion}.rc${rcVersion}" fi DOCKER_GITCOMMIT=$(git rev-parse --short HEAD) if [ -n "$(git status --porcelain --untracked-files=no)" ]; then DOCKER_GITCOMMIT="$DOCKER_GITCOMMIT-unsupported" fi # if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better if [[ "$rpmVersion" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then gitUnix="$(git log -1 --pretty='%at')" gitDate="$(date --date "@$gitUnix" +'%Y%m%d.%H%M%S')" gitCommit="$(git log -1 --pretty='%h')" gitVersion="${gitDate}.git${gitCommit}" # gitVersion is now something like '20150128.112847.17e840a' rpmVersion="${rpmVersion%-dev}" rpmRelease="0.0.$gitVersion" fi # Replace any other dashes with periods rpmVersion="${rpmVersion/-/.}" rpmPackager="$(awk -F ': ' '$1 == "Packager" { print $2; exit }' hack/make/.build-rpm/${rpmName}.spec)" rpmDate="$(date +'%a %b %d %Y')" # if go-md2man is available, pre-generate the man pages ./man/md2man-all.sh -q || true # TODO decide if it's worth getting go-md2man in _each_ builder environment to avoid this # Convert the CHANGELOG.md file into RPM changelog format VERSION_REGEX="^\W\W (.*) \((.*)\)$" ENTRY_REGEX="^[-+*] (.*)$" while read -r line || [[ -n "$line" ]]; do if [ -z "$line" ]; then continue; fi if [[ "$line" =~ $VERSION_REGEX ]]; then echo >> contrib/builder/rpm/changelog echo "* `date -d ${BASH_REMATCH[2]} '+%a %b %d %Y'` ${rpmPackager} - ${BASH_REMATCH[1]}" >> contrib/builder/rpm/changelog fi if [[ "$line" =~ $ENTRY_REGEX ]]; then echo "- ${BASH_REMATCH[1]//\`}" >> contrib/builder/rpm/changelog fi done < CHANGELOG.md # TODO add a configurable knob for _which_ rpms to build so we don't have to modify the file or build all of them every time we need to test for dir in contrib/builder/rpm/*/; do version="$(basename "$dir")" suite="${version##*-}" image="dockercore/builder-rpm:$version" if ! docker inspect "$image" &> /dev/null; then ( set -x && docker build ${DOCKER_BUILD_ARGS} -t "$image" "$dir" ) fi mkdir -p "$DEST/$version" cat > "$DEST/$version/Dockerfile.build" <<-EOF FROM $image COPY . /usr/src/${rpmName} EOF if [ "$DOCKER_EXPERIMENTAL" ]; then echo 'ENV DOCKER_EXPERIMENTAL 1' >> "$DEST/$version/Dockerfile.build" fi cat >> "$DEST/$version/Dockerfile.build" <<-EOF RUN mkdir -p /root/rpmbuild/SOURCES \ && echo '%_topdir /root/rpmbuild' > /root/.rpmmacros WORKDIR /root/rpmbuild RUN ln -sfv /usr/src/${rpmName}/hack/make/.build-rpm SPECS WORKDIR /root/rpmbuild/SPECS RUN tar -cz -C /usr/src -f /root/rpmbuild/SOURCES/${rpmName}.tar.gz ${rpmName} RUN { cat /usr/src/${rpmName}/contrib/builder/rpm/changelog; } >> ${rpmName}.spec && tail >&2 ${rpmName}.spec RUN rpmbuild -ba \ --define '_gitcommit $DOCKER_GITCOMMIT' \ --define '_release $rpmRelease' \ --define '_version $rpmVersion' \ --define '_origversion $VERSION' \ --define '_experimental ${DOCKER_EXPERIMENTAL:-0}' \ ${rpmName}.spec EOF # selinux policy referencing systemd things won't work on non-systemd versions # of centos or rhel, which we don't support anyways if [ "${suite%.*}" -gt 6 ] && [[ "$version" != opensuse* ]]; then cat >> "$DEST/$version/Dockerfile.build" <<-EOF RUN tar -cz -C /usr/src/${rpmName}/contrib -f /root/rpmbuild/SOURCES/${rpmName}-selinux.tar.gz ${rpmName}-selinux RUN rpmbuild -ba \ --define '_gitcommit $DOCKER_GITCOMMIT' \ --define '_release $rpmRelease' \ --define '_version $rpmVersion' \ --define '_origversion $VERSION' \ ${rpmName}-selinux.spec EOF fi tempImage="docker-temp/build-rpm:$version" ( set -x && docker build -t "$tempImage" -f $DEST/$version/Dockerfile.build . ) docker run --rm "$tempImage" bash -c 'cd /root/rpmbuild && tar -c *RPMS' | tar -xvC "$DEST/$version" docker rmi "$tempImage" done source "$(dirname "$BASH_SOURCE")/.integration-daemon-stop" ) 2>&1 | tee -a $DEST/test.log docker-1.10.3/hack/make/cover000066400000000000000000000006031267010174400157300ustar00rootroot00000000000000#!/bin/bash set -e bundle_cover() { coverprofiles=( "$DEST/../"*"/coverprofiles/"* ) for p in "${coverprofiles[@]}"; do echo ( set -x go tool cover -func="$p" ) done } if [ "$HAVE_GO_TEST_COVER" ]; then bundle_cover 2>&1 | tee "$DEST/report.log" else echo >&2 'warning: the current version of go does not support -cover' echo >&2 ' skipping test coverage report' fi docker-1.10.3/hack/make/cross000066400000000000000000000017231267010174400157470ustar00rootroot00000000000000#!/bin/bash set -e # explicit list of os/arch combos that support being a daemon declare -A daemonSupporting daemonSupporting=( [linux/amd64]=1 [windows/amd64]=1 ) # if we have our linux/amd64 version compiled, let's symlink it in if [ -x "$DEST/../binary/docker-$VERSION" ]; then mkdir -p "$DEST/linux/amd64" ( cd "$DEST/linux/amd64" ln -s ../../../binary/* ./ ) echo "Created symlinks:" "$DEST/linux/amd64/"* fi for platform in $DOCKER_CROSSPLATFORMS; do ( export DEST="$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION mkdir -p "$DEST" ABS_DEST="$(cd "$DEST" && pwd -P)" export GOOS=${platform%/*} export GOARCH=${platform##*/} if [ -z "${daemonSupporting[$platform]}" ]; then export LDFLAGS_STATIC_DOCKER="" # we just need a simple client for these platforms export BUILDFLAGS=( "${ORIG_BUILDFLAGS[@]/ daemon/}" ) # remove the "daemon" build tag from platforms that aren't supported fi source "${MAKEDIR}/binary" ) done docker-1.10.3/hack/make/dynbinary000066400000000000000000000012271267010174400166140ustar00rootroot00000000000000#!/bin/bash set -e if [ -z "$DOCKER_CLIENTONLY" ]; then source "${MAKEDIR}/.dockerinit" hash_files "$DEST/dockerinit-$VERSION" else # DOCKER_CLIENTONLY must be truthy, so we don't need to bother with dockerinit :) export DOCKER_INITSHA1="" fi # DOCKER_INITSHA1 is exported so that other bundlescripts can easily access it later without recalculating it ( export IAMSTATIC="false" export LDFLAGS_STATIC_DOCKER='' export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here source "${MAKEDIR}/binary" ) docker-1.10.3/hack/make/dyngccgo000066400000000000000000000012711267010174400164110ustar00rootroot00000000000000#!/bin/bash set -e if [ -z "$DOCKER_CLIENTONLY" ]; then source "${MAKEDIR}/.dockerinit-gccgo" hash_files "$DEST/dockerinit-$VERSION" else # DOCKER_CLIENTONLY must be truthy, so we don't need to bother with dockerinit :) export DOCKER_INITSHA1="" fi # DOCKER_INITSHA1 is exported so that other bundlescripts can easily access it later without recalculating it ( export IAMSTATIC="false" export EXTLDFLAGS_STATIC='' export LDFLAGS_STATIC_DOCKER='' export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here source "${MAKEDIR}/gccgo" ) docker-1.10.3/hack/make/gccgo000066400000000000000000000012011267010174400156670ustar00rootroot00000000000000#!/bin/bash set -e BINARY_NAME="docker-$VERSION" BINARY_EXTENSION="$(binary_extension)" BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" source "${MAKEDIR}/.go-autogen" if [[ "${BUILDFLAGS[@]}" =~ 'netgo ' ]]; then EXTLDFLAGS_STATIC+=' -lnetgo' fi # gccgo require explicit flag -pthread to allow goroutines to work. go build -compiler=gccgo \ -o "$DEST/$BINARY_FULLNAME" \ "${BUILDFLAGS[@]}" \ -gccgoflags " -g $EXTLDFLAGS_STATIC -Wl,--no-export-dynamic -ldl -pthread " \ ./docker echo "Created binary: $DEST/$BINARY_FULLNAME" ln -sf "$BINARY_FULLNAME" "$DEST/docker$BINARY_EXTENSION" hash_files "$DEST/$BINARY_FULLNAME" docker-1.10.3/hack/make/generate-index-listing000077500000000000000000000035451267010174400211730ustar00rootroot00000000000000#!/bin/bash set -e # This script generates index files for the directory structure # of the apt and yum repos : ${DOCKER_RELEASE_DIR:=$DEST} APTDIR=$DOCKER_RELEASE_DIR/apt YUMDIR=$DOCKER_RELEASE_DIR/yum if [ ! -d $APTDIR ] && [ ! -d $YUMDIR ]; then echo >&2 'release-rpm or release-deb must be run before generate-index-listing' exit 1 fi create_index() { local directory=$1 local original=$2 local cleaned=${directory#$original} # the index file to create local index_file="${directory}/index" # cd into dir & touch the index file cd $directory touch $index_file # print the html header cat <<-EOF > "$index_file" Index of ${cleaned}/

Index of ${cleaned}/


../
	EOF

	# start of content output
	(
	# change IFS locally within subshell so the for loop saves line correctly to L var
	IFS=$'\n';

	# pretty sweet, will mimick the normal apache output
	for L in $(find -L . -mount -depth -maxdepth 1 -type f ! -name 'index' -printf "%-44f@_@%Td-%Tb-%TY %Tk:%TM  @%f@\n"|sort|sed 's,\([\ ]\+\)@_@,\1,g');
	do
		# file
		F=$(sed -e 's,^.*@\([^@]\+\)@.*$,\1,g'<<<"$L");

		# file with file size
		F=$(du -bh $F | cut -f1);

		# output with correct format
		sed -e 's,\ @.*$, '"$F"',g'<<<"$L";
	done;
	) >> $index_file;

	# now output a list of all directories in this dir (maxdepth 1) other than '.' outputting in a sorted manner exactly like apache
	find -L . -mount -depth -maxdepth 1 -type d ! -name '.' -printf "%-43f@_@%Td-%Tb-%TY %Tk:%TM  -\n"|sort -d|sed 's,\([\ ]\+\)@_@,/\1,g' >> $index_file

	# print the footer html
	echo "

" >> $index_file } get_dirs() { local directory=$1 for d in `find ${directory} -type d`; do create_index $d $directory done } get_dirs $APTDIR get_dirs $YUMDIR docker-1.10.3/hack/make/release-deb000077500000000000000000000111341267010174400167660ustar00rootroot00000000000000#!/bin/bash set -e # This script creates the apt repos for the .deb files generated by hack/make/build-deb # # The following can then be used as apt sources: # deb http://apt.dockerproject.org/repo $distro-$release $version # # For example: # deb http://apt.dockerproject.org/repo ubuntu-trusty main # deb http://apt.dockerproject.org/repo ubuntu-trusty testing # deb http://apt.dockerproject.org/repo debian-wheezy experimental # deb http://apt.dockerproject.org/repo debian-jessie main # # ... and so on and so forth for the builds created by hack/make/build-deb : ${DOCKER_RELEASE_DIR:=$DEST} : ${GPG_KEYID:=releasedocker} APTDIR=$DOCKER_RELEASE_DIR/apt/repo # setup the apt repo (if it does not exist) mkdir -p "$APTDIR/conf" "$APTDIR/db" # supported arches/sections arches=( amd64 i386 ) # Preserve existing components but don't add any non-existing ones for component in main testing experimental ; do if ls "$APTDIR/dists/*/$component" >/dev/null 2>&1 ; then components+=( $component ) fi done # set the component for the version being released component="main" if [[ "$VERSION" == *-rc* ]]; then component="testing" fi if [ "$DOCKER_EXPERIMENTAL" ] || [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then component="experimental" fi # Make sure our component is in the list of components if [[ ! "${components[*]}" =~ $component ]] ; then components+=( $component ) fi # create apt-ftparchive file on every run. This is essential to avoid # using stale versions of the config file that could cause unnecessary # refreshing of bits for EOL-ed releases. cat <<-EOF > "$APTDIR/conf/apt-ftparchive.conf" Dir { ArchiveDir "${APTDIR}"; CacheDir "${APTDIR}/db"; }; Default { Packages::Compress ". gzip bzip2"; Sources::Compress ". gzip bzip2"; Contents::Compress ". gzip bzip2"; }; TreeDefault { BinCacheDB "packages-\$(SECTION)-\$(ARCH).db"; Directory "pool/\$(SECTION)"; Packages "\$(DIST)/\$(SECTION)/binary-\$(ARCH)/Packages"; SrcDirectory "pool/\$(SECTION)"; Sources "\$(DIST)/\$(SECTION)/source/Sources"; Contents "\$(DIST)/\$(SECTION)/Contents-\$(ARCH)"; FileList "$APTDIR/\$(DIST)/\$(SECTION)/filelist"; }; EOF for dir in contrib/builder/deb/${PACKAGE_ARCH}/*/; do version="$(basename "$dir")" suite="${version//debootstrap-}" cat <<-EOF Tree "dists/${suite}" { Sections "${components[*]}"; Architectures "${arches[*]}"; } EOF done >> "$APTDIR/conf/apt-ftparchive.conf" if [ ! -f "$APTDIR/conf/docker-engine-release.conf" ]; then cat <<-EOF > "$APTDIR/conf/docker-engine-release.conf" APT::FTPArchive::Release::Origin "Docker"; APT::FTPArchive::Release::Components "${components[*]}"; APT::FTPArchive::Release::Label "Docker APT Repository"; APT::FTPArchive::Release::Architectures "${arches[*]}"; EOF fi # release the debs for dir in contrib/builder/deb/*/; do version="$(basename "$dir")" codename="${version//debootstrap-}" DEBFILE=( "bundles/$VERSION/build-deb/$version/docker-engine"*.deb ) # if we have a $GPG_PASSPHRASE we may as well # dpkg-sign before copying the deb into the pool if [ ! -z "$GPG_PASSPHRASE" ]; then dpkg-sig -g "--no-tty --passphrase '$GPG_PASSPHRASE'" \ -k "$GPG_KEYID" --sign builder "${DEBFILE[@]}" fi # add the deb for each component for the distro version into the pool mkdir -p "$APTDIR/pool/$component/d/docker-engine/" cp "${DEBFILE[@]}" "$APTDIR/pool/$component/d/docker-engine/" # update the filelist for this codename/component mkdir -p "$APTDIR/dists/$codename/$component" find "$APTDIR/pool/$component" \ -name *~${codename#*-}*.deb > "$APTDIR/dists/$codename/$component/filelist" done # clean the databases apt-ftparchive clean "$APTDIR/conf/apt-ftparchive.conf" # run the apt-ftparchive commands so we can have pinning apt-ftparchive generate "$APTDIR/conf/apt-ftparchive.conf" for dir in contrib/builder/deb/*/; do version="$(basename "$dir")" codename="${version//debootstrap-}" apt-ftparchive \ -o "APT::FTPArchive::Release::Codename=$codename" \ -o "APT::FTPArchive::Release::Suite=$codename" \ -c "$APTDIR/conf/docker-engine-release.conf" \ release \ "$APTDIR/dists/$codename" > "$APTDIR/dists/$codename/Release" for arch in "${arches[@]}"; do mkdir -p "$APTDIR/dists/$codename/$component/binary-$arch" apt-ftparchive \ -o "APT::FTPArchive::Release::Codename=$codename" \ -o "APT::FTPArchive::Release::Suite=$codename" \ -o "APT::FTPArchive::Release::Component=$component" \ -o "APT::FTPArchive::Release::Architecture=$arch" \ -c "$APTDIR/conf/docker-engine-release.conf" \ release \ "$APTDIR/dists/$codename/$component/binary-$arch" > "$APTDIR/dists/$codename/$component/binary-$arch/Release" done done docker-1.10.3/hack/make/release-rpm000077500000000000000000000044061267010174400170360ustar00rootroot00000000000000#!/bin/bash set -e # This script creates the yum repos for the .rpm files generated by hack/make/build-rpm # # The following can then be used as a yum repo: # http://yum.dockerproject.org/repo/$release/$distro/$distro-version # # For example: # http://yum.dockerproject.org/repo/main/fedora/23 # http://yum.dockerproject.org/repo/testing/centos/7 # http://yum.dockerproject.org/repo/experimental/fedora/23 # http://yum.dockerproject.org/repo/main/centos/7 # # ... and so on and so forth for the builds created by hack/make/build-rpm : ${DOCKER_RELEASE_DIR:=$DEST} YUMDIR=$DOCKER_RELEASE_DIR/yum/repo : ${GPG_KEYID:=releasedocker} # manage the repos for each distribution separately distros=( fedora centos opensuse oraclelinux ) # get the release release="main" if [[ "$VERSION" == *-rc* ]]; then release="testing" fi if [ $DOCKER_EXPERIMENTAL ] || [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then release="experimental" fi for distro in "${distros[@]}"; do # Setup the yum repo REPO=$YUMDIR/$release/$distro for dir in contrib/builder/rpm/$distro-*/; do version="$(basename "$dir")" suite="${version##*-}" # if the directory does not exist, initialize the yum repo if [[ ! -d $REPO/$suite/Packages ]]; then mkdir -p "$REPO/$suite/Packages" createrepo --pretty "$REPO/$suite" fi # path to rpms RPMFILE=( "bundles/$VERSION/build-rpm/$version/RPMS/"*"/docker-engine"*.rpm "bundles/$VERSION/build-rpm/$version/SRPMS/docker-engine"*.rpm ) # if we have a $GPG_PASSPHRASE we may as well # sign the rpms before adding to repo if [ ! -z $GPG_PASSPHRASE ]; then # export our key to rpm import gpg --armor --export "$GPG_KEYID" > /tmp/gpg rpm --import /tmp/gpg # sign the rpms echo "yes" | setsid rpm \ --define "_gpg_name $GPG_KEYID" \ --define "_signature gpg" \ --define "__gpg_check_password_cmd /bin/true" \ --define "__gpg_sign_cmd %{__gpg} gpg --batch --no-armor --passphrase '$GPG_PASSPHRASE' --no-secmem-warning -u '%{_gpg_name}' --sign --detach-sign --output %{__signature_filename} %{__plaintext_filename}" \ --resign "${RPMFILE[@]}" fi # copy the rpms to the packages folder cp "${RPMFILE[@]}" "$REPO/$suite/Packages" # update the repo createrepo --pretty --update "$REPO/$suite" done done docker-1.10.3/hack/make/sign-repos000077500000000000000000000025261267010174400167110ustar00rootroot00000000000000#!/bin/bash # This script signs the deliverables from release-deb and release-rpm # with a designated GPG key. : ${DOCKER_RELEASE_DIR:=$DEST} : ${GPG_KEYID:=releasedocker} APTDIR=$DOCKER_RELEASE_DIR/apt/repo YUMDIR=$DOCKER_RELEASE_DIR/yum/repo if [ -z "$GPG_PASSPHRASE" ]; then echo >&2 'you need to set GPG_PASSPHRASE in order to sign artifacts' exit 1 fi if [ ! -d $APTDIR ] && [ ! -d $YUMDIR ]; then echo >&2 'release-rpm or release-deb must be run before sign-repos' exit 1 fi sign_packages(){ # sign apt repo metadata if [ -d $APTDIR ]; then # create file with public key gpg --armor --export "$GPG_KEYID" > "$DOCKER_RELEASE_DIR/apt/gpg" # sign the repo metadata for F in $(find $APTDIR -name Release); do if test "$F" -nt "$F.gpg" ; then gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \ --armor --sign --detach-sign \ --batch --yes \ --output "$F.gpg" "$F" fi done fi # sign yum repo metadata if [ -d $YUMDIR ]; then # create file with public key gpg --armor --export "$GPG_KEYID" > "$DOCKER_RELEASE_DIR/yum/gpg" # sign the repo metadata for F in $(find $YUMDIR -name repomd.xml); do if test "$F" -nt "$F.asc" ; then gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \ --armor --sign --detach-sign \ --batch --yes \ --output "$F.asc" "$F" fi done fi } sign_packages docker-1.10.3/hack/make/test-deb-install000077500000000000000000000023511267010174400177720ustar00rootroot00000000000000#!/bin/bash # This script is used for testing install.sh and that it works for # each of component of our apt and yum repos set -e : ${DEB_DIR:="$(pwd)/bundles/$(cat VERSION)/build-deb"} if [[ ! -d "${DEB_DIR}" ]]; then echo "you must first run `make deb` or hack/make/build-deb" exit 1 fi test_deb_install(){ # test for each Dockerfile in contrib/builder for dir in contrib/builder/deb/*/; do local from="$(awk 'toupper($1) == "FROM" { print $2; exit }' "$dir/Dockerfile")" local dir=$(basename "$dir") if [[ ! -d "${DEB_DIR}/${dir}" ]]; then echo "No deb found for ${dir}" exit 1 fi local script=$(mktemp /tmp/install-XXXXXXXXXX.sh) cat <<-EOF > "${script}" #!/bin/bash set -e set -x apt-get update && apt-get install -y apparmor dpkg -i /root/debs/*.deb || true apt-get install -yf /etc/init.d/apparmor start # this will do everything _except_ load the profile into the kernel ( cd /etc/apparmor.d /sbin/apparmor_parser --skip-kernel-load docker-engine ) EOF chmod +x "${script}" echo "testing deb install for ${from}" docker run --rm -i --privileged \ -v ${DEB_DIR}/${dir}:/root/debs \ -v ${script}:/install.sh \ ${from} /install.sh rm -f ${script} done } test_deb_install docker-1.10.3/hack/make/test-docker-py000066400000000000000000000007341267010174400174710ustar00rootroot00000000000000#!/bin/bash set -e # subshell so that we can export PATH without breaking other things ( bundle .integration-daemon-start dockerPy='/docker-py' [ -d "$dockerPy" ] || { dockerPy="$DEST/docker-py" git clone https://github.com/docker/docker-py.git "$dockerPy" } # exporting PYTHONPATH to import "docker" from our local docker-py test_env PYTHONPATH="$dockerPy" py.test "$dockerPy/tests/integration" bundle .integration-daemon-stop ) 2>&1 | tee -a "$DEST/test.log" docker-1.10.3/hack/make/test-install-script000077500000000000000000000017231267010174400205460ustar00rootroot00000000000000#!/bin/bash # This script is used for testing install.sh and that it works for # each of component of our apt and yum repos set -e test_install_script(){ # these are equivalent to main, testing, experimental components # in the repos, but its the url that will do the conversion components=( experimental test get ) for component in "${components[@]}"; do # change url to specific component for testing local test_url=https://${component}.docker.com local script=$(mktemp /tmp/install-XXXXXXXXXX.sh) sed "s,url='https://get.docker.com/',url='${test_url}/'," hack/install.sh > "${script}" chmod +x "${script}" # test for each Dockerfile in contrib/builder for dir in contrib/builder/*/*/; do local from="$(awk 'toupper($1) == "FROM" { print $2; exit }' "$dir/Dockerfile")" echo "running install.sh for ${component} with ${from}" docker run --rm -i -v ${script}:/install.sh ${from} /install.sh done rm -f ${script} done } test_install_script docker-1.10.3/hack/make/test-integration-cli000066400000000000000000000005431267010174400206620ustar00rootroot00000000000000#!/bin/bash set -e bundle_test_integration_cli() { TESTFLAGS="$TESTFLAGS -check.v" go_test_dir ./integration-cli } # subshell so that we can export PATH without breaking other things ( bundle .integration-daemon-start bundle .integration-daemon-setup bundle_test_integration_cli bundle .integration-daemon-stop ) 2>&1 | tee -a "$DEST/test.log" docker-1.10.3/hack/make/test-old-apt-repo000077500000000000000000000014271267010174400201020ustar00rootroot00000000000000#!/bin/bash set -e versions=( 1.3.3 1.4.1 1.5.0 1.6.2 ) install() { local version=$1 local tmpdir=$(mktemp -d /tmp/XXXXXXXXXX) local dockerfile="${tmpdir}/Dockerfile" cat <<-EOF > "$dockerfile" FROM debian:jessie ENV VERSION ${version} RUN apt-get update && apt-get install -y \ apt-transport-https \ ca-certificates \ --no-install-recommends RUN echo "deb https://get.docker.com/ubuntu docker main" > /etc/apt/sources.list.d/docker.list RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 \ --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 RUN apt-get update && apt-get install -y \ lxc-docker-\${VERSION} EOF docker build --rm --force-rm --no-cache -t docker-old-repo:${version} -f $dockerfile $tmpdir } for v in "${versions[@]}"; do install "$v" done docker-1.10.3/hack/make/test-unit000066400000000000000000000016651267010174400165570ustar00rootroot00000000000000#!/bin/bash set -e # Run Docker's test suite, including sub-packages, and store their output as a bundle # If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. # You can use this to select certain tests to run, eg. # # TESTFLAGS='-test.run ^TestBuild$' ./hack/make.sh test-unit # bundle_test_unit() { date if [ -z "$TESTDIRS" ]; then TEST_PATH=./... else TEST_PATH=./${TESTDIRS} fi pkg_list=$(go list -e \ -f '{{if ne .Name "github.com/docker/docker"}} {{.ImportPath}} {{end}}' \ "${BUILDFLAGS[@]}" $TEST_PATH \ | grep github.com/docker/docker \ | grep -v github.com/docker/docker/vendor \ | grep -v github.com/docker/docker/integration-cli) go test $COVER $GCCGOFLAGS -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS $pkg_list } if [[ "$(go version)" == *"gccgo"* ]]; then GCCGOFLAGS=-gccgoflags="-lpthread" else COVER=-cover fi bundle_test_unit 2>&1 | tee -a "$DEST/test.log" docker-1.10.3/hack/make/tgz000066400000000000000000000013451267010174400154220ustar00rootroot00000000000000#!/bin/bash CROSS="$DEST/../cross" set -e if [ ! -d "$CROSS/linux/amd64" ]; then echo >&2 'error: binary and cross must be run before tgz' false fi for d in "$CROSS/"*/*; do GOARCH="$(basename "$d")" GOOS="$(basename "$(dirname "$d")")" BINARY_NAME="docker-$VERSION" BINARY_EXTENSION="$(export GOOS && binary_extension)" BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" mkdir -p "$DEST/$GOOS/$GOARCH" TGZ="$DEST/$GOOS/$GOARCH/$BINARY_NAME.tgz" mkdir -p "$DEST/build" mkdir -p "$DEST/build/usr/local/bin" cp -L "$d/$BINARY_FULLNAME" "$DEST/build/usr/local/bin/docker$BINARY_EXTENSION" tar --numeric-owner --owner 0 -C "$DEST/build" -czf "$TGZ" usr hash_files "$TGZ" rm -rf "$DEST/build" echo "Created tgz: $TGZ" done docker-1.10.3/hack/make/ubuntu000066400000000000000000000141061267010174400161370ustar00rootroot00000000000000#!/bin/bash PKGVERSION="${VERSION//-/'~'}" # if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then GIT_UNIX="$(git log -1 --pretty='%at')" GIT_DATE="$(date --date "@$GIT_UNIX" +'%Y%m%d.%H%M%S')" GIT_COMMIT="$(git log -1 --pretty='%h')" GIT_VERSION="git${GIT_DATE}.0.${GIT_COMMIT}" # GIT_VERSION is now something like 'git20150128.112847.0.17e840a' PKGVERSION="$PKGVERSION~$GIT_VERSION" fi # $ dpkg --compare-versions 1.5.0 gt 1.5.0~rc1 && echo true || echo false # true # $ dpkg --compare-versions 1.5.0~rc1 gt 1.5.0~git20150128.112847.17e840a && echo true || echo false # true # $ dpkg --compare-versions 1.5.0~git20150128.112847.17e840a gt 1.5.0~dev~git20150128.112847.17e840a && echo true || echo false # true # ie, 1.5.0 > 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a PACKAGE_ARCHITECTURE="$(dpkg-architecture -qDEB_HOST_ARCH)" PACKAGE_URL="https://www.docker.com/" PACKAGE_MAINTAINER="support@docker.com" PACKAGE_DESCRIPTION="Linux container runtime Docker complements LXC with a high-level API which operates at the process level. It runs unix processes with strong guarantees of isolation and repeatability across servers. Docker is a great building block for automating distributed systems: large-scale web deployments, database clusters, continuous deployment systems, private PaaS, service-oriented architectures, etc." PACKAGE_LICENSE="Apache-2.0" # Build docker as an ubuntu package using FPM and REPREPRO (sue me). # bundle_binary must be called first. bundle_ubuntu() { DIR="$ABS_DEST/build" # Include our udev rules mkdir -p "$DIR/etc/udev/rules.d" cp contrib/udev/80-docker.rules "$DIR/etc/udev/rules.d/" # Include our init scripts mkdir -p "$DIR/etc/init" cp contrib/init/upstart/docker.conf "$DIR/etc/init/" mkdir -p "$DIR/etc/init.d" cp contrib/init/sysvinit-debian/docker "$DIR/etc/init.d/" mkdir -p "$DIR/etc/default" cp contrib/init/sysvinit-debian/docker.default "$DIR/etc/default/docker" mkdir -p "$DIR/lib/systemd/system" cp contrib/init/systemd/docker.{service,socket} "$DIR/lib/systemd/system/" # Include contributed completions mkdir -p "$DIR/etc/bash_completion.d" cp contrib/completion/bash/docker "$DIR/etc/bash_completion.d/" mkdir -p "$DIR/usr/share/zsh/vendor-completions" cp contrib/completion/zsh/_docker "$DIR/usr/share/zsh/vendor-completions/" mkdir -p "$DIR/etc/fish/completions" cp contrib/completion/fish/docker.fish "$DIR/etc/fish/completions/" # Include contributed man pages man/md2man-all.sh -q manRoot="$DIR/usr/share/man" mkdir -p "$manRoot" for manDir in man/man?; do manBase="$(basename "$manDir")" # "man1" for manFile in "$manDir"/*; do manName="$(basename "$manFile")" # "docker-build.1" mkdir -p "$manRoot/$manBase" gzip -c "$manFile" > "$manRoot/$manBase/$manName.gz" done done # Copy the binary # This will fail if the binary bundle hasn't been built mkdir -p "$DIR/usr/bin" cp "$DEST/../binary/docker-$VERSION" "$DIR/usr/bin/docker" # Generate postinst/prerm/postrm scripts cat > "$DEST/postinst" <<'EOF' #!/bin/sh set -e set -u if [ "$1" = 'configure' ] && [ -z "$2" ]; then if ! getent group docker > /dev/null; then groupadd --system docker fi fi if ! { [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; }; then # we only need to do this if upstart isn't in charge update-rc.d docker defaults > /dev/null || true fi if [ -n "$2" ]; then _dh_action=restart else _dh_action=start fi service docker $_dh_action 2>/dev/null || true #DEBHELPER# EOF cat > "$DEST/prerm" <<'EOF' #!/bin/sh set -e set -u service docker stop 2>/dev/null || true #DEBHELPER# EOF cat > "$DEST/postrm" <<'EOF' #!/bin/sh set -e set -u if [ "$1" = "purge" ] ; then update-rc.d docker remove > /dev/null || true fi # In case this system is running systemd, we make systemd reload the unit files # to pick up changes. if [ -d /run/systemd/system ] ; then systemctl --system daemon-reload > /dev/null || true fi #DEBHELPER# EOF # TODO swaths of these were borrowed from debhelper's auto-inserted stuff, because we're still using fpm - we need to use debhelper instead, and somehow reconcile Ubuntu that way chmod +x "$DEST/postinst" "$DEST/prerm" "$DEST/postrm" ( # switch directories so we create *.deb in the right folder cd "$DEST" # create lxc-docker-VERSION package fpm -s dir -C "$DIR" \ --name "lxc-docker-$VERSION" --version "$PKGVERSION" \ --after-install "$ABS_DEST/postinst" \ --before-remove "$ABS_DEST/prerm" \ --after-remove "$ABS_DEST/postrm" \ --architecture "$PACKAGE_ARCHITECTURE" \ --prefix / \ --depends iptables \ --deb-recommends aufs-tools \ --deb-recommends ca-certificates \ --deb-recommends git \ --deb-recommends xz-utils \ --deb-recommends 'cgroupfs-mount | cgroup-lite' \ --deb-suggests apparmor \ --description "$PACKAGE_DESCRIPTION" \ --maintainer "$PACKAGE_MAINTAINER" \ --conflicts docker \ --conflicts docker.io \ --conflicts lxc-docker-virtual-package \ --provides lxc-docker \ --provides lxc-docker-virtual-package \ --replaces lxc-docker \ --replaces lxc-docker-virtual-package \ --url "$PACKAGE_URL" \ --license "$PACKAGE_LICENSE" \ --config-files /etc/udev/rules.d/80-docker.rules \ --config-files /etc/init/docker.conf \ --config-files /etc/init.d/docker \ --config-files /etc/default/docker \ --deb-compression gz \ -t deb . # TODO replace "Suggests: cgroup-lite" with "Recommends: cgroupfs-mount | cgroup-lite" once cgroupfs-mount is available # create empty lxc-docker wrapper package fpm -s empty \ --name lxc-docker --version "$PKGVERSION" \ --architecture "$PACKAGE_ARCHITECTURE" \ --depends lxc-docker-$VERSION \ --description "$PACKAGE_DESCRIPTION" \ --maintainer "$PACKAGE_MAINTAINER" \ --url "$PACKAGE_URL" \ --license "$PACKAGE_LICENSE" \ --deb-compression gz \ -t deb ) # clean up after ourselves so we have a clean output directory rm "$DEST/postinst" "$DEST/prerm" "$DEST/postrm" rm -r "$DIR" } bundle_ubuntu docker-1.10.3/hack/make/validate-dco000066400000000000000000000031751267010174400171550ustar00rootroot00000000000000#!/bin/bash source "${MAKEDIR}/.validate" adds=$(validate_diff --numstat | awk '{ s += $1 } END { print s }') dels=$(validate_diff --numstat | awk '{ s += $2 } END { print s }') #notDocs="$(validate_diff --numstat | awk '$3 !~ /^docs\// { print $3 }')" : ${adds:=0} : ${dels:=0} # "Username may only contain alphanumeric characters or dashes and cannot begin with a dash" githubUsernameRegex='[a-zA-Z0-9][a-zA-Z0-9-]+' # https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work dcoPrefix='Signed-off-by:' dcoRegex="^(Docker-DCO-1.1-)?$dcoPrefix ([^<]+) <([^<>@]+@[^<>]+)>( \\(github: ($githubUsernameRegex)\\))?$" check_dco() { grep -qE "$dcoRegex" } if [ $adds -eq 0 -a $dels -eq 0 ]; then echo '0 adds, 0 deletions; nothing to validate! :)' else commits=( $(validate_log --format='format:%H%n') ) badCommits=() for commit in "${commits[@]}"; do if [ -z "$(git log -1 --format='format:' --name-status "$commit")" ]; then # no content (ie, Merge commit, etc) continue fi if ! git log -1 --format='format:%B' "$commit" | check_dco; then badCommits+=( "$commit" ) fi done if [ ${#badCommits[@]} -eq 0 ]; then echo "Congratulations! All commits are properly signed with the DCO!" else { echo "These commits do not have a proper '$dcoPrefix' marker:" for commit in "${badCommits[@]}"; do echo " - $commit" done echo echo 'Please amend each commit to include a properly formatted DCO marker.' echo echo 'Visit the following URL for information about the Docker DCO:' echo ' https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work' echo } >&2 false fi fi docker-1.10.3/hack/make/validate-gofmt000066400000000000000000000012711267010174400175170ustar00rootroot00000000000000#!/bin/bash source "${MAKEDIR}/.validate" IFS=$'\n' files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' || true) ) unset IFS badFiles=() for f in "${files[@]}"; do # we use "git show" here to validate that what's committed is formatted if [ "$(git show "$VALIDATE_HEAD:$f" | gofmt -s -l)" ]; then badFiles+=( "$f" ) fi done if [ ${#badFiles[@]} -eq 0 ]; then echo 'Congratulations! All Go source files are properly formatted.' else { echo "These files are not properly gofmt'd:" for f in "${badFiles[@]}"; do echo " - $f" done echo echo 'Please reformat the above files using "gofmt -s -w" and commit the result.' echo } >&2 false fi docker-1.10.3/hack/make/validate-lint000066400000000000000000000011261267010174400173500ustar00rootroot00000000000000#!/bin/bash source "${MAKEDIR}/.validate" IFS=$'\n' files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' || true) ) unset IFS errors=() for f in "${files[@]}"; do failedLint=$(golint "$f") if [ "$failedLint" ]; then errors+=( "$failedLint" ) fi done if [ ${#errors[@]} -eq 0 ]; then echo 'Congratulations! All Go source files have been linted.' else { echo "Errors from golint:" for err in "${errors[@]}"; do echo "$err" done echo echo 'Please fix the above errors. You can test via "golint" and commit the result.' echo } >&2 false fi docker-1.10.3/hack/make/validate-pkg000066400000000000000000000013401267010174400171610ustar00rootroot00000000000000#!/bin/bash set -e source "${MAKEDIR}/.validate" IFS=$'\n' files=( $(validate_diff --diff-filter=ACMR --name-only -- 'pkg/*.go' || true) ) unset IFS badFiles=() for f in "${files[@]}"; do IFS=$'\n' badImports=( $(go list -e -f '{{ join .Deps "\n" }}' "$f" | sort -u | grep -vE '^github.com/docker/docker/pkg/' | grep -E '^github.com/docker/docker' || true) ) unset IFS for import in "${badImports[@]}"; do badFiles+=( "$f imports $import" ) done done if [ ${#badFiles[@]} -eq 0 ]; then echo 'Congratulations! "./pkg/..." is safely isolated from internal code.' else { echo 'These files import internal code: (either directly or indirectly)' for f in "${badFiles[@]}"; do echo " - $f" done echo } >&2 false fi docker-1.10.3/hack/make/validate-test000066400000000000000000000014651267010174400173670ustar00rootroot00000000000000#!/bin/bash # Make sure we're not using gos' Testing package any more in integration-cli source "${MAKEDIR}/.validate" IFS=$'\n' files=( $(validate_diff --diff-filter=ACMR --name-only -- 'integration-cli/*.go' || true) ) unset IFS badFiles=() for f in "${files[@]}"; do # skip check_test.go since it *does* use the testing package if [ "$f" = "integration-cli/check_test.go" ]; then continue fi # we use "git show" here to validate that what's committed doesn't contain golang built-in testing if git show "$VALIDATE_HEAD:$f" | grep -q testing.T; then badFiles+=( "$f" ) fi done if [ ${#badFiles[@]} -eq 0 ]; then echo 'Congratulations! No testing.T found.' else { echo "These files use the wrong testing infrastructure:" for f in "${badFiles[@]}"; do echo " - $f" done echo } >&2 false fi docker-1.10.3/hack/make/validate-toml000066400000000000000000000012371267010174400173600ustar00rootroot00000000000000#!/bin/bash source "${MAKEDIR}/.validate" IFS=$'\n' files=( $(validate_diff --diff-filter=ACMR --name-only -- 'MAINTAINERS' || true) ) unset IFS badFiles=() for f in "${files[@]}"; do # we use "git show" here to validate that what's committed has valid toml syntax if ! git show "$VALIDATE_HEAD:$f" | tomlv /proc/self/fd/0 ; then badFiles+=( "$f" ) fi done if [ ${#badFiles[@]} -eq 0 ]; then echo 'Congratulations! All toml source files changed here have valid syntax.' else { echo "These files are not valid toml:" for f in "${badFiles[@]}"; do echo " - $f" done echo echo 'Please reformat the above files as valid toml' echo } >&2 false fi docker-1.10.3/hack/make/validate-vendor000066400000000000000000000012611267010174400176770ustar00rootroot00000000000000#!/bin/bash source "${MAKEDIR}/.validate" IFS=$'\n' files=( $(validate_diff --diff-filter=ACMR --name-only -- 'hack/vendor.sh' 'hack/.vendor-helpers.sh' 'vendor/' || true) ) unset IFS if [ ${#files[@]} -gt 0 ]; then # We run vendor.sh to and see if we have a diff afterwards ./hack/vendor.sh >/dev/null # Let see if the working directory is clean diffs="$(git status --porcelain -- vendor 2>/dev/null)" if [ "$diffs" ]; then { echo 'The result of ./hack/vendor.sh differs' echo echo "$diffs" echo echo 'Please vendor your package with ./hack/vendor.sh.' echo } >&2 false else echo 'Congratulations! All vendoring changes are done the right way.' fi fi docker-1.10.3/hack/make/validate-vet000066400000000000000000000011271267010174400172010ustar00rootroot00000000000000#!/bin/bash source "${MAKEDIR}/.validate" IFS=$'\n' files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' || true) ) unset IFS errors=() for f in "${files[@]}"; do failedVet=$(go vet "$f") if [ "$failedVet" ]; then errors+=( "$failedVet" ) fi done if [ ${#errors[@]} -eq 0 ]; then echo 'Congratulations! All Go source files have been vetted.' else { echo "Errors from go vet:" for err in "${errors[@]}"; do echo " - $err" done echo echo 'Please fix the above errors. You can test via "go vet" and commit the result.' echo } >&2 false fi docker-1.10.3/hack/release.sh000077500000000000000000000215511267010174400157360ustar00rootroot00000000000000#!/usr/bin/env bash set -e # This script looks for bundles built by make.sh, and releases them on a # public S3 bucket. # # Bundles should be available for the VERSION string passed as argument. # # The correct way to call this script is inside a container built by the # official Dockerfile at the root of the Docker source code. The Dockerfile, # make.sh and release.sh should all be from the same source code revision. set -o pipefail # Print a usage message and exit. usage() { cat >&2 <<'EOF' To run, I need: - to be in a container generated by the Dockerfile at the top of the Docker repository; - to be provided with the location of an S3 bucket and path, in environment variables AWS_S3_BUCKET and AWS_S3_BUCKET_PATH (default: ''); - to be provided with AWS credentials for this S3 bucket, in environment variables AWS_ACCESS_KEY and AWS_SECRET_KEY; - a generous amount of good will and nice manners. The canonical way to run me is to run the image produced by the Dockerfile: e.g.:" docker run -e AWS_S3_BUCKET=test.docker.com \ -e AWS_ACCESS_KEY=... \ -e AWS_SECRET_KEY=... \ -i -t --privileged \ docker ./hack/release.sh EOF exit 1 } [ "$AWS_S3_BUCKET" ] || usage [ "$AWS_ACCESS_KEY" ] || usage [ "$AWS_SECRET_KEY" ] || usage [ -d /go/src/github.com/docker/docker ] || usage cd /go/src/github.com/docker/docker [ -x hack/make.sh ] || usage RELEASE_BUNDLES=( binary cross tgz ) if [ "$1" != '--release-regardless-of-test-failure' ]; then RELEASE_BUNDLES=( test-unit "${RELEASE_BUNDLES[@]}" test-integration-cli ) fi VERSION=$(< VERSION) BUCKET=$AWS_S3_BUCKET BUCKET_PATH=$BUCKET [[ -n "$AWS_S3_BUCKET_PATH" ]] && BUCKET_PATH+=/$AWS_S3_BUCKET_PATH if command -v git &> /dev/null && git rev-parse &> /dev/null; then if [ -n "$(git status --porcelain --untracked-files=no)" ]; then echo "You cannot run the release script on a repo with uncommitted changes" usage fi fi # These are the 2 keys we've used to sign the deb's # release (get.docker.com) # GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9" # test (test.docker.com) # GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6" setup_s3() { echo "Setting up S3" # Try creating the bucket. Ignore errors (it might already exist). s3cmd mb "s3://$BUCKET" 2>/dev/null || true # Check access to the bucket. # s3cmd has no useful exit status, so we cannot check that. # Instead, we check if it outputs anything on standard output. # (When there are problems, it uses standard error instead.) s3cmd info "s3://$BUCKET" | grep -q . # Make the bucket accessible through website endpoints. s3cmd ws-create --ws-index index --ws-error error "s3://$BUCKET" } # write_to_s3 uploads the contents of standard input to the specified S3 url. write_to_s3() { DEST=$1 F=`mktemp` cat > "$F" s3cmd --acl-public --mime-type='text/plain' put "$F" "$DEST" rm -f "$F" } s3_url() { case "$BUCKET" in get.docker.com|test.docker.com|experimental.docker.com) echo "https://$BUCKET_PATH" ;; *) BASE_URL=$( s3cmd ws-info s3://$BUCKET | awk -v 'FS=: +' '/http:\/\/'$BUCKET'/ { gsub(/\/+$/, "", $2); print $2 }' ) if [[ -n "$AWS_S3_BUCKET_PATH" ]] ; then echo "$BASE_URL/$AWS_S3_BUCKET_PATH" else echo "$BASE_URL" fi ;; esac } build_all() { echo "Building release" if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then echo >&2 echo >&2 'The build or tests appear to have failed.' echo >&2 echo >&2 'You, as the release maintainer, now have a couple options:' echo >&2 '- delay release and fix issues' echo >&2 '- delay release and fix issues' echo >&2 '- did we mention how important this is? issues need fixing :)' echo >&2 echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,' echo >&2 ' really knows all the hairy problems at hand with the current release' echo >&2 ' issues) may bypass this checking by running this script again with the' echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip' echo >&2 ' running the test suite, and will only build the binaries and packages. Please' echo >&2 ' avoid using this if at all possible.' echo >&2 echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass' echo >&2 ' should be used. If there are release issues, we should always err on the' echo >&2 ' side of caution.' echo >&2 exit 1 fi } upload_release_build() { src="$1" dst="$2" latest="$3" echo echo "Uploading $src" echo " to $dst" echo s3cmd --follow-symlinks --preserve --acl-public put "$src" "$dst" if [ "$latest" ]; then echo echo "Copying to $latest" echo s3cmd --acl-public cp "$dst" "$latest" fi # get hash files too (see hash_files() in hack/make.sh) for hashAlgo in md5 sha256; do if [ -e "$src.$hashAlgo" ]; then echo echo "Uploading $src.$hashAlgo" echo " to $dst.$hashAlgo" echo s3cmd --follow-symlinks --preserve --acl-public --mime-type='text/plain' put "$src.$hashAlgo" "$dst.$hashAlgo" if [ "$latest" ]; then echo echo "Copying to $latest.$hashAlgo" echo s3cmd --acl-public cp "$dst.$hashAlgo" "$latest.$hashAlgo" fi fi done } release_build() { echo "Releasing binaries" GOOS=$1 GOARCH=$2 binDir=bundles/$VERSION/cross/$GOOS/$GOARCH tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH binary=docker-$VERSION tgz=docker-$VERSION.tgz latestBase= if [ -z "$NOLATEST" ]; then latestBase=docker-latest fi # we need to map our GOOS and GOARCH to uname values # see https://en.wikipedia.org/wiki/Uname # ie, GOOS=linux -> "uname -s"=Linux s3Os=$GOOS case "$s3Os" in darwin) s3Os=Darwin ;; freebsd) s3Os=FreeBSD ;; linux) s3Os=Linux ;; windows) s3Os=Windows binary+='.exe' if [ "$latestBase" ]; then latestBase+='.exe' fi ;; *) echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'" exit 1 ;; esac s3Arch=$GOARCH case "$s3Arch" in amd64) s3Arch=x86_64 ;; 386) s3Arch=i386 ;; arm) s3Arch=armel # someday, we might potentially support multiple GOARM values, in which case we might get armhf here too ;; *) echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'" exit 1 ;; esac s3Dir="s3://$BUCKET_PATH/builds/$s3Os/$s3Arch" latest= latestTgz= if [ "$latestBase" ]; then latest="$s3Dir/$latestBase" latestTgz="$s3Dir/$latestBase.tgz" fi if [ ! -x "$binDir/$binary" ]; then echo >&2 "error: can't find $binDir/$binary - was it compiled properly?" exit 1 fi if [ ! -f "$tgzDir/$tgz" ]; then echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?" exit 1 fi upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest" upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz" } # Upload binaries and tgz files to S3 release_binaries() { [ -e "bundles/$VERSION/cross/linux/amd64/docker-$VERSION" ] || { echo >&2 './hack/make.sh must be run before release_binaries' exit 1 } for d in bundles/$VERSION/cross/*/*; do GOARCH="$(basename "$d")" GOOS="$(basename "$(dirname "$d")")" release_build "$GOOS" "$GOARCH" done # TODO create redirect from builds/*/i686 to builds/*/i386 cat <
Layer
Images are composed of layers. Image layer is a general term which may be used to refer to one or both of the following:
  1. The metadata for the layer, described in the JSON format.
  2. The filesystem changes described by a layer.
To refer to the former you may use the term Layer JSON or Layer Metadata. To refer to the latter you may use the term Image Filesystem Changeset or Image Diff.
Image JSON
Each layer has an associated JSON structure which describes some basic information about the image such as date created, author, and the ID of its parent image as well as execution/runtime configuration like its entry point, default arguments, CPU/memory shares, networking, and volumes.
Image Filesystem Changeset
Each layer has an archive of the files which have been added, changed, or deleted relative to its parent layer. Using a layer-based or union filesystem such as AUFS, or by computing the diff from filesystem snapshots, the filesystem changeset can be used to present a series of image layers as if they were one cohesive filesystem.
Image ID
Each layer is given an ID upon its creation. It is represented as a hexadecimal encoding of 256 bits, e.g., a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. Image IDs should be sufficiently random so as to be globally unique. 32 bytes read from /dev/urandom is sufficient for all practical purposes. Alternatively, an image ID may be derived as a cryptographic hash of image contents as the result is considered indistinguishable from random. The choice is left up to implementors.
Image Parent
Most layer metadata structs contain a parent field which refers to the Image from which another directly descends. An image contains a separate JSON metadata file and set of changes relative to the filesystem of its parent image. Image Ancestor and Image Descendant are also common terms.
Image Checksum
Layer metadata structs contain a cryptographic hash of the contents of the layer's filesystem changeset. Though the set of changes exists as a simple Tar archive, two archives with identical filenames and content will have different SHA digests if the last-access or last-modified times of any entries differ. For this reason, image checksums are generated using the TarSum algorithm which produces a cryptographic hash of file contents and selected headers only. Details of this algorithm are described in the separate TarSum specification.
Tag
A tag serves to map a descriptive, user-given name to any single image ID. An image name suffix (the name component after :) is often referred to as a tag as well, though it strictly refers to the full name of an image. Acceptable values for a tag suffix are implementation specific, but they SHOULD be limited to the set of alphanumeric characters [a-zA-z0-9], punctuation characters [._-], and MUST NOT contain a : character.
Repository
A collection of tags grouped under a common prefix (the name component before :). For example, in an image tagged with the name my-app:3.1.4, my-app is the Repository component of the name. Acceptable values for repository name are implementation specific, but they SHOULD be limited to the set of alphanumeric characters [a-zA-z0-9], and punctuation characters [._-], however it MAY contain additional / and : characters for organizational purposes, with the last : character being interpreted dividing the repository component of the name from the tag suffix component.
## Image JSON Description Here is an example image JSON file: ``` { "id": "a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9", "parent": "c6e3cedcda2e3982a1a6760e178355e8e65f7b80e4e5248743fa3549d284e024", "checksum": "tarsum.v1+sha256:e58fcf7418d2390dec8e8fb69d88c06ec07039d651fedc3aa72af9972e7d046b", "created": "2014-10-13T21:19:18.674353812Z", "author": "Alyssa P. Hacker <alyspdev@example.com>", "architecture": "amd64", "os": "linux", "Size": 271828, "config": { "User": "alice", "Memory": 2048, "MemorySwap": 4096, "CpuShares": 8, "ExposedPorts": { "8080/tcp": {} }, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "FOO=docker_is_a_really", "BAR=great_tool_you_know" ], "Entrypoint": [ "/bin/my-app-binary" ], "Cmd": [ "--foreground", "--config", "/etc/my-app.d/default.cfg" ], "Volumes": { "/var/job-result-data": {}, "/var/log/my-app-logs": {}, }, "WorkingDir": "/home/alice", } } ``` ### Image JSON Field Descriptions
id string
Randomly generated, 256-bit, hexadecimal encoded. Uniquely identifies the image.
parent string
ID of the parent image. If there is no parent image then this field should be omitted. A collection of images may share many of the same ancestor layers. This organizational structure is strictly a tree with any one layer having either no parent or a single parent and zero or more descendent layers. Cycles are not allowed and implementations should be careful to avoid creating them or iterating through a cycle indefinitely.
created string
ISO-8601 formatted combined date and time at which the image was created.
author string
Gives the name and/or email address of the person or entity which created and is responsible for maintaining the image.
architecture string
The CPU architecture which the binaries in this image are built to run on. Possible values include:
  • 386
  • amd64
  • arm
More values may be supported in the future and any of these may or may not be supported by a given container runtime implementation.
os string
The name of the operating system which the image is built to run on. Possible values include:
  • darwin
  • freebsd
  • linux
More values may be supported in the future and any of these may or may not be supported by a given container runtime implementation.
checksum string
Image Checksum of the filesystem changeset associated with the image layer.
Size integer
The size in bytes of the filesystem changeset associated with the image layer.
config struct
The execution parameters which should be used as a base when running a container using the image. This field can be null, in which case any execution parameters should be specified at creation of the container.

Container RunConfig Field Descriptions

User string

The username or UID which the process in the container should run as. This acts as a default value to use when the value is not specified when creating a container.

All of the following are valid:

  • user
  • uid
  • user:group
  • uid:gid
  • uid:group
  • user:gid

If group/gid is not specified, the default group and supplementary groups of the given user/uid in /etc/passwd from the container are applied.

Memory integer
Memory limit (in bytes). This acts as a default value to use when the value is not specified when creating a container.
MemorySwap integer
Total memory usage (memory + swap); set to -1 to disable swap. This acts as a default value to use when the value is not specified when creating a container.
CpuShares integer
CPU shares (relative weight vs. other containers). This acts as a default value to use when the value is not specified when creating a container.
ExposedPorts struct
A set of ports to expose from a container running this image. This JSON structure value is unusual because it is a direct JSON serialization of the Go type map[string]struct{} and is represented in JSON as an object mapping its keys to an empty object. Here is an example:
{
    "8080": {},
    "53/udp": {},
    "2356/tcp": {}
}
Its keys can be in the format of:
  • "port/tcp"
  • "port/udp"
  • "port"
with the default protocol being "tcp" if not specified. These values act as defaults and are merged with any specified when creating a container.
Env array of strings
Entries are in the format of VARNAME="var value". These values act as defaults and are merged with any specified when creating a container.
Entrypoint array of strings
A list of arguments to use as the command to execute when the container starts. This value acts as a default and is replaced by an entrypoint specified when creating a container.
Cmd array of strings
Default arguments to the entry point of the container. These values act as defaults and are replaced with any specified when creating a container. If an Entrypoint value is not specified, then the first entry of the Cmd array should be interpreted as the executable to run.
Volumes struct
A set of directories which should be created as data volumes in a container running this image. This JSON structure value is unusual because it is a direct JSON serialization of the Go type map[string]struct{} and is represented in JSON as an object mapping its keys to an empty object. Here is an example:
{
    "/var/my-app-data/": {},
    "/etc/some-config.d/": {},
}
WorkingDir string
Sets the current working directory of the entry point process in the container. This value acts as a default and is replaced by a working directory specified when creating a container.
Any extra fields in the Image JSON struct are considered implementation specific and should be ignored by any implementations which are unable to interpret them. ## Creating an Image Filesystem Changeset An example of creating an Image Filesystem Changeset follows. An image root filesystem is first created as an empty directory named with the ID of the image being created. Here is the initial empty directory structure for the changeset for an image with ID `c3167915dc9d` ([real IDs are much longer](#id_desc), but this example use a truncated one here for brevity. Implementations need not name the rootfs directory in this way but it may be convenient for keeping record of a large number of image layers.): ``` c3167915dc9d/ ``` Files and directories are then created: ``` c3167915dc9d/ etc/ my-app-config bin/ my-app-binary my-app-tools ``` The `c3167915dc9d` directory is then committed as a plain Tar archive with entries for the following files: ``` etc/my-app-config bin/my-app-binary bin/my-app-tools ``` The TarSum checksum for the archive file is then computed and placed in the JSON metadata along with the execution parameters. To make changes to the filesystem of this container image, create a new directory named with a new ID, such as `f60c56784b83`, and initialize it with a snapshot of the parent image's root filesystem, so that the directory is identical to that of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem can make this very efficient: ``` f60c56784b83/ etc/ my-app-config bin/ my-app-binary my-app-tools ``` This example change is going add a configuration directory at `/etc/my-app.d` which contains a default config file. There's also a change to the `my-app-tools` binary to handle the config layout change. The `f60c56784b83` directory then looks like this: ``` f60c56784b83/ etc/ my-app.d/ default.cfg bin/ my-app-binary my-app-tools ``` This reflects the removal of `/etc/my-app-config` and creation of a file and directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been replaced with an updated version. Before committing this directory to a changeset, because it has a parent image, it is first compared with the directory tree of the parent snapshot, `f60c56784b83`, looking for files and directories that have been added, modified, or removed. The following changeset is found: ``` Added: /etc/my-app.d/default.cfg Modified: /bin/my-app-tools Deleted: /etc/my-app-config ``` A Tar Archive is then created which contains *only* this changeset: The added and modified files and directories in their entirety, and for each deleted item an entry for an empty file at the same location but with the basename of the deleted file or directory prefixed with `.wh.`. The filenames prefixed with `.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible to create an image root filesystem which contains a file or directory with a name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has the following entries: ``` /etc/my-app.d/default.cfg /bin/my-app-tools /etc/.wh.my-app-config ``` Any given image is likely to be composed of several of these Image Filesystem Changeset tar archives. ## Combined Image JSON + Filesystem Changeset Format There is also a format for a single archive which contains complete information about an image, including: - repository names/tags - all image layer JSON files - all tar archives of each layer filesystem changesets For example, here's what the full archive of `library/busybox` is (displayed in `tree` format): ``` . ├── 5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e │   ├── VERSION │   ├── json │   └── layer.tar ├── a7b8b41220991bfc754d7ad445ad27b7f272ab8b4a2c175b9512b97471d02a8a │   ├── VERSION │   ├── json │   └── layer.tar ├── a936027c5ca8bf8f517923169a233e391cbb38469a75de8383b5228dc2d26ceb │   ├── VERSION │   ├── json │   └── layer.tar ├── f60c56784b832dd990022afc120b8136ab3da9528094752ae13fe63a2d28dc8c │   ├── VERSION │   ├── json │   └── layer.tar └── repositories ``` There are one or more directories named with the ID for each layer in a full image. Each of these directories contains 3 files: * `VERSION` - The schema version of the `json` file * `json` - The JSON metadata for an image layer * `layer.tar` - The Tar archive of the filesystem changeset for an image layer. The content of the `VERSION` files is simply the semantic version of the JSON metadata schema: ``` 1.0 ``` And the `repositories` file is another JSON file which describes names/tags: ``` { "busybox":{ "latest":"5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e" } } ``` Every key in this object is the name of a repository, and maps to a collection of tag suffixes. Each tag maps to the ID of the image represented by that tag. ## Loading an Image Filesystem Changeset Unpacking a bundle of image layer JSON files and their corresponding filesystem changesets can be done using a series of steps: 1. Follow the parent IDs of image layers to find the root ancestor (an image with no parent ID specified). 2. For every image layer, in order from root ancestor and descending down, extract the contents of that layer's filesystem changeset archive into a directory which will be used as the root of a container filesystem. - Extract all contents of each archive. - Walk the directory tree once more, removing any files with the prefix `.wh.` and the corresponding file or directory named without this prefix. ## Implementations This specification is an admittedly imperfect description of an imperfectly-understood problem. The Docker project is, in turn, an attempt to implement this specification. Our goal and our execution toward it will evolve over time, but our primary concern in this specification and in our implementation is compatibility and interoperability. docker-1.10.3/image/store.go000066400000000000000000000135501267010174400156160ustar00rootroot00000000000000package image import ( "encoding/json" "errors" "fmt" "sync" "github.com/Sirupsen/logrus" "github.com/docker/distribution/digest" "github.com/docker/docker/layer" ) // Store is an interface for creating and accessing images type Store interface { Create(config []byte) (ID, error) Get(id ID) (*Image, error) Delete(id ID) ([]layer.Metadata, error) Search(partialID string) (ID, error) SetParent(id ID, parent ID) error GetParent(id ID) (ID, error) Children(id ID) []ID Map() map[ID]*Image Heads() map[ID]*Image } // LayerGetReleaser is a minimal interface for getting and releasing images. type LayerGetReleaser interface { Get(layer.ChainID) (layer.Layer, error) Release(layer.Layer) ([]layer.Metadata, error) } type imageMeta struct { layer layer.Layer children map[ID]struct{} } type store struct { sync.Mutex ls LayerGetReleaser images map[ID]*imageMeta fs StoreBackend digestSet *digest.Set } // NewImageStore returns new store object for given layer store func NewImageStore(fs StoreBackend, ls LayerGetReleaser) (Store, error) { is := &store{ ls: ls, images: make(map[ID]*imageMeta), fs: fs, digestSet: digest.NewSet(), } // load all current images and retain layers if err := is.restore(); err != nil { return nil, err } return is, nil } func (is *store) restore() error { err := is.fs.Walk(func(id ID) error { img, err := is.Get(id) if err != nil { logrus.Errorf("invalid image %v, %v", id, err) return nil } var l layer.Layer if chainID := img.RootFS.ChainID(); chainID != "" { l, err = is.ls.Get(chainID) if err != nil { return err } } if err := is.digestSet.Add(digest.Digest(id)); err != nil { return err } imageMeta := &imageMeta{ layer: l, children: make(map[ID]struct{}), } is.images[ID(id)] = imageMeta return nil }) if err != nil { return err } // Second pass to fill in children maps for id := range is.images { if parent, err := is.GetParent(id); err == nil { if parentMeta := is.images[parent]; parentMeta != nil { parentMeta.children[id] = struct{}{} } } } return nil } func (is *store) Create(config []byte) (ID, error) { var img Image err := json.Unmarshal(config, &img) if err != nil { return "", err } // Must reject any config that references diffIDs from the history // which aren't among the rootfs layers. rootFSLayers := make(map[layer.DiffID]struct{}) for _, diffID := range img.RootFS.DiffIDs { rootFSLayers[diffID] = struct{}{} } layerCounter := 0 for _, h := range img.History { if !h.EmptyLayer { layerCounter++ } } if layerCounter > len(img.RootFS.DiffIDs) { return "", errors.New("too many non-empty layers in History section") } dgst, err := is.fs.Set(config) if err != nil { return "", err } imageID := ID(dgst) is.Lock() defer is.Unlock() if _, exists := is.images[imageID]; exists { return imageID, nil } layerID := img.RootFS.ChainID() var l layer.Layer if layerID != "" { l, err = is.ls.Get(layerID) if err != nil { return "", err } } imageMeta := &imageMeta{ layer: l, children: make(map[ID]struct{}), } is.images[imageID] = imageMeta if err := is.digestSet.Add(digest.Digest(imageID)); err != nil { delete(is.images, imageID) return "", err } return imageID, nil } func (is *store) Search(term string) (ID, error) { is.Lock() defer is.Unlock() dgst, err := is.digestSet.Lookup(term) if err != nil { return "", err } return ID(dgst), nil } func (is *store) Get(id ID) (*Image, error) { // todo: Check if image is in images // todo: Detect manual insertions and start using them config, err := is.fs.Get(id) if err != nil { return nil, err } img, err := NewFromJSON(config) if err != nil { return nil, err } img.computedID = id img.Parent, err = is.GetParent(id) if err != nil { img.Parent = "" } return img, nil } func (is *store) Delete(id ID) ([]layer.Metadata, error) { is.Lock() defer is.Unlock() imageMeta := is.images[id] if imageMeta == nil { return nil, fmt.Errorf("unrecognized image ID %s", id.String()) } for id := range imageMeta.children { is.fs.DeleteMetadata(id, "parent") } if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { delete(is.images[parent].children, id) } if err := is.digestSet.Remove(digest.Digest(id)); err != nil { logrus.Errorf("error removing %s from digest set: %q", id, err) } delete(is.images, id) is.fs.Delete(id) if imageMeta.layer != nil { return is.ls.Release(imageMeta.layer) } return nil, nil } func (is *store) SetParent(id, parent ID) error { is.Lock() defer is.Unlock() parentMeta := is.images[parent] if parentMeta == nil { return fmt.Errorf("unknown parent image ID %s", parent.String()) } if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { delete(is.images[parent].children, id) } parentMeta.children[id] = struct{}{} return is.fs.SetMetadata(id, "parent", []byte(parent)) } func (is *store) GetParent(id ID) (ID, error) { d, err := is.fs.GetMetadata(id, "parent") if err != nil { return "", err } return ID(d), nil // todo: validate? } func (is *store) Children(id ID) []ID { is.Lock() defer is.Unlock() return is.children(id) } func (is *store) children(id ID) []ID { var ids []ID if is.images[id] != nil { for id := range is.images[id].children { ids = append(ids, id) } } return ids } func (is *store) Heads() map[ID]*Image { return is.imagesMap(false) } func (is *store) Map() map[ID]*Image { return is.imagesMap(true) } func (is *store) imagesMap(all bool) map[ID]*Image { is.Lock() defer is.Unlock() images := make(map[ID]*Image) for id := range is.images { if !all && len(is.children(id)) > 0 { continue } img, err := is.Get(id) if err != nil { logrus.Errorf("invalid image access: %q, error: %q", id, err) continue } images[id] = img } return images } docker-1.10.3/image/store_test.go000066400000000000000000000161531267010174400166570ustar00rootroot00000000000000package image import ( "io/ioutil" "os" "testing" "github.com/docker/distribution/digest" "github.com/docker/docker/layer" ) func TestRestore(t *testing.T) { tmpdir, err := ioutil.TempDir("", "images-fs-store") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) fs, err := NewFSStoreBackend(tmpdir) if err != nil { t.Fatal(err) } id1, err := fs.Set([]byte(`{"comment": "abc", "rootfs": {"type": "layers"}}`)) if err != nil { t.Fatal(err) } _, err = fs.Set([]byte(`invalid`)) if err != nil { t.Fatal(err) } id2, err := fs.Set([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) if err != nil { t.Fatal(err) } err = fs.SetMetadata(id2, "parent", []byte(id1)) if err != nil { t.Fatal(err) } is, err := NewImageStore(fs, &mockLayerGetReleaser{}) if err != nil { t.Fatal(err) } imgs := is.Map() if actual, expected := len(imgs), 2; actual != expected { t.Fatalf("invalid images length, expected 2, got %q", len(imgs)) } img1, err := is.Get(ID(id1)) if err != nil { t.Fatal(err) } if actual, expected := img1.computedID, ID(id1); actual != expected { t.Fatalf("invalid image ID: expected %q, got %q", expected, actual) } if actual, expected := img1.computedID.String(), string(id1); actual != expected { t.Fatalf("invalid image ID string: expected %q, got %q", expected, actual) } img2, err := is.Get(ID(id2)) if err != nil { t.Fatal(err) } if actual, expected := img1.Comment, "abc"; actual != expected { t.Fatalf("invalid comment for image1: expected %q, got %q", expected, actual) } if actual, expected := img2.Comment, "def"; actual != expected { t.Fatalf("invalid comment for image2: expected %q, got %q", expected, actual) } p, err := is.GetParent(ID(id1)) if err == nil { t.Fatal("expected error for getting parent") } p, err = is.GetParent(ID(id2)) if err != nil { t.Fatal(err) } if actual, expected := p, ID(id1); actual != expected { t.Fatalf("invalid parent: expected %q, got %q", expected, actual) } children := is.Children(ID(id1)) if len(children) != 1 { t.Fatalf("invalid children length: %q", len(children)) } if actual, expected := children[0], ID(id2); actual != expected { t.Fatalf("invalid child for id1: expected %q, got %q", expected, actual) } heads := is.Heads() if actual, expected := len(heads), 1; actual != expected { t.Fatalf("invalid images length: expected %q, got %q", expected, actual) } sid1, err := is.Search(string(id1)[:10]) if err != nil { t.Fatal(err) } if actual, expected := sid1, ID(id1); actual != expected { t.Fatalf("searched ID mismatch: expected %q, got %q", expected, actual) } sid1, err = is.Search(digest.Digest(id1).Hex()[:6]) if err != nil { t.Fatal(err) } if actual, expected := sid1, ID(id1); actual != expected { t.Fatalf("searched ID mismatch: expected %q, got %q", expected, actual) } invalidPattern := digest.Digest(id1).Hex()[1:6] _, err = is.Search(invalidPattern) if err == nil { t.Fatalf("expected search for %q to fail", invalidPattern) } } func TestAddDelete(t *testing.T) { tmpdir, err := ioutil.TempDir("", "images-fs-store") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) fs, err := NewFSStoreBackend(tmpdir) if err != nil { t.Fatal(err) } is, err := NewImageStore(fs, &mockLayerGetReleaser{}) if err != nil { t.Fatal(err) } id1, err := is.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) if err != nil { t.Fatal(err) } if actual, expected := id1, ID("sha256:8d25a9c45df515f9d0fe8e4a6b1c64dd3b965a84790ddbcc7954bb9bc89eb993"); actual != expected { t.Fatalf("create ID mismatch: expected %q, got %q", expected, actual) } img, err := is.Get(id1) if err != nil { t.Fatal(err) } if actual, expected := img.Comment, "abc"; actual != expected { t.Fatalf("invalid comment in image: expected %q, got %q", expected, actual) } id2, err := is.Create([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) if err != nil { t.Fatal(err) } err = is.SetParent(id2, id1) if err != nil { t.Fatal(err) } pid1, err := is.GetParent(id2) if err != nil { t.Fatal(err) } if actual, expected := pid1, id1; actual != expected { t.Fatalf("invalid parent for image: expected %q, got %q", expected, actual) } _, err = is.Delete(id1) if err != nil { t.Fatal(err) } _, err = is.Get(id1) if err == nil { t.Fatalf("expected get for deleted image %q to fail", id1) } _, err = is.Get(id2) if err != nil { t.Fatal(err) } pid1, err = is.GetParent(id2) if err == nil { t.Fatalf("expected parent check for image %q to fail, got %q", id2, pid1) } } func TestSearchAfterDelete(t *testing.T) { tmpdir, err := ioutil.TempDir("", "images-fs-store") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) fs, err := NewFSStoreBackend(tmpdir) if err != nil { t.Fatal(err) } is, err := NewImageStore(fs, &mockLayerGetReleaser{}) if err != nil { t.Fatal(err) } id, err := is.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers"}}`)) if err != nil { t.Fatal(err) } id1, err := is.Search(string(id)[:15]) if err != nil { t.Fatal(err) } if actual, expected := id1, id; expected != actual { t.Fatalf("wrong id returned from search: expected %q, got %q", expected, actual) } if _, err := is.Delete(id); err != nil { t.Fatal(err) } if _, err := is.Search(string(id)[:15]); err == nil { t.Fatal("expected search after deletion to fail") } } func TestParentReset(t *testing.T) { tmpdir, err := ioutil.TempDir("", "images-fs-store") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) fs, err := NewFSStoreBackend(tmpdir) if err != nil { t.Fatal(err) } is, err := NewImageStore(fs, &mockLayerGetReleaser{}) if err != nil { t.Fatal(err) } id, err := is.Create([]byte(`{"comment": "abc1", "rootfs": {"type": "layers"}}`)) if err != nil { t.Fatal(err) } id2, err := is.Create([]byte(`{"comment": "abc2", "rootfs": {"type": "layers"}}`)) if err != nil { t.Fatal(err) } id3, err := is.Create([]byte(`{"comment": "abc3", "rootfs": {"type": "layers"}}`)) if err != nil { t.Fatal(err) } if err := is.SetParent(id, id2); err != nil { t.Fatal(err) } ids := is.Children(id2) if actual, expected := len(ids), 1; expected != actual { t.Fatalf("wrong number of children: %d, got %d", expected, actual) } if err := is.SetParent(id, id3); err != nil { t.Fatal(err) } ids = is.Children(id2) if actual, expected := len(ids), 0; expected != actual { t.Fatalf("wrong number of children after parent reset: %d, got %d", expected, actual) } ids = is.Children(id3) if actual, expected := len(ids), 1; expected != actual { t.Fatalf("wrong number of children after parent reset: %d, got %d", expected, actual) } } type mockLayerGetReleaser struct{} func (ls *mockLayerGetReleaser) Get(layer.ChainID) (layer.Layer, error) { return nil, nil } func (ls *mockLayerGetReleaser) Release(layer.Layer) ([]layer.Metadata, error) { return nil, nil } docker-1.10.3/image/tarexport/000077500000000000000000000000001267010174400161575ustar00rootroot00000000000000docker-1.10.3/image/tarexport/load.go000066400000000000000000000151021267010174400174240ustar00rootroot00000000000000package tarexport import ( "encoding/json" "fmt" "io" "io/ioutil" "os" "path/filepath" "github.com/Sirupsen/logrus" "github.com/docker/docker/image" "github.com/docker/docker/image/v1" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/reference" ) func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer) error { tmpDir, err := ioutil.TempDir("", "docker-import-") if err != nil { return err } defer os.RemoveAll(tmpDir) if err := chrootarchive.Untar(inTar, tmpDir, nil); err != nil { return err } // read manifest, if no file then load in legacy mode manifestPath, err := safePath(tmpDir, manifestFileName) if err != nil { return err } manifestFile, err := os.Open(manifestPath) if err != nil { if os.IsNotExist(err) { return l.legacyLoad(tmpDir, outStream) } return manifestFile.Close() } defer manifestFile.Close() var manifest []manifestItem if err := json.NewDecoder(manifestFile).Decode(&manifest); err != nil { return err } for _, m := range manifest { configPath, err := safePath(tmpDir, m.Config) if err != nil { return err } config, err := ioutil.ReadFile(configPath) if err != nil { return err } img, err := image.NewFromJSON(config) if err != nil { return err } var rootFS image.RootFS rootFS = *img.RootFS rootFS.DiffIDs = nil if expected, actual := len(m.Layers), len(img.RootFS.DiffIDs); expected != actual { return fmt.Errorf("invalid manifest, layers length mismatch: expected %q, got %q", expected, actual) } for i, diffID := range img.RootFS.DiffIDs { layerPath, err := safePath(tmpDir, m.Layers[i]) if err != nil { return err } r := rootFS r.Append(diffID) newLayer, err := l.ls.Get(r.ChainID()) if err != nil { newLayer, err = l.loadLayer(layerPath, rootFS) if err != nil { return err } } defer layer.ReleaseAndLog(l.ls, newLayer) if expected, actual := diffID, newLayer.DiffID(); expected != actual { return fmt.Errorf("invalid diffID for layer %d: expected %q, got %q", i, expected, actual) } rootFS.Append(diffID) } imgID, err := l.is.Create(config) if err != nil { return err } for _, repoTag := range m.RepoTags { named, err := reference.ParseNamed(repoTag) if err != nil { return err } ref, ok := named.(reference.NamedTagged) if !ok { return fmt.Errorf("invalid tag %q", repoTag) } l.setLoadedTag(ref, imgID, outStream) } } return nil } func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS) (layer.Layer, error) { rawTar, err := os.Open(filename) if err != nil { logrus.Debugf("Error reading embedded tar: %v", err) return nil, err } defer rawTar.Close() inflatedLayerData, err := archive.DecompressStream(rawTar) if err != nil { return nil, err } defer inflatedLayerData.Close() return l.ls.Register(inflatedLayerData, rootFS.ChainID()) } func (l *tarexporter) setLoadedTag(ref reference.NamedTagged, imgID image.ID, outStream io.Writer) error { if prevID, err := l.rs.Get(ref); err == nil && prevID != imgID { fmt.Fprintf(outStream, "The image %s already exists, renaming the old one with ID %s to empty string\n", ref.String(), string(prevID)) // todo: this message is wrong in case of multiple tags } if err := l.rs.AddTag(ref, imgID, true); err != nil { return err } return nil } func (l *tarexporter) legacyLoad(tmpDir string, outStream io.Writer) error { legacyLoadedMap := make(map[string]image.ID) dirs, err := ioutil.ReadDir(tmpDir) if err != nil { return err } // every dir represents an image for _, d := range dirs { if d.IsDir() { if err := l.legacyLoadImage(d.Name(), tmpDir, legacyLoadedMap); err != nil { return err } } } // load tags from repositories file repositoriesPath, err := safePath(tmpDir, legacyRepositoriesFileName) if err != nil { return err } repositoriesFile, err := os.Open(repositoriesPath) if err != nil { if !os.IsNotExist(err) { return err } return repositoriesFile.Close() } defer repositoriesFile.Close() repositories := make(map[string]map[string]string) if err := json.NewDecoder(repositoriesFile).Decode(&repositories); err != nil { return err } for name, tagMap := range repositories { for tag, oldID := range tagMap { imgID, ok := legacyLoadedMap[oldID] if !ok { return fmt.Errorf("invalid target ID: %v", oldID) } named, err := reference.WithName(name) if err != nil { return err } ref, err := reference.WithTag(named, tag) if err != nil { return err } l.setLoadedTag(ref, imgID, outStream) } } return nil } func (l *tarexporter) legacyLoadImage(oldID, sourceDir string, loadedMap map[string]image.ID) error { if _, loaded := loadedMap[oldID]; loaded { return nil } configPath, err := safePath(sourceDir, filepath.Join(oldID, legacyConfigFileName)) if err != nil { return err } imageJSON, err := ioutil.ReadFile(configPath) if err != nil { logrus.Debugf("Error reading json: %v", err) return err } var img struct{ Parent string } if err := json.Unmarshal(imageJSON, &img); err != nil { return err } var parentID image.ID if img.Parent != "" { for { var loaded bool if parentID, loaded = loadedMap[img.Parent]; !loaded { if err := l.legacyLoadImage(img.Parent, sourceDir, loadedMap); err != nil { return err } } else { break } } } // todo: try to connect with migrate code rootFS := image.NewRootFS() var history []image.History if parentID != "" { parentImg, err := l.is.Get(parentID) if err != nil { return err } rootFS = parentImg.RootFS history = parentImg.History } layerPath, err := safePath(sourceDir, filepath.Join(oldID, legacyLayerFileName)) if err != nil { return err } newLayer, err := l.loadLayer(layerPath, *rootFS) if err != nil { return err } rootFS.Append(newLayer.DiffID()) h, err := v1.HistoryFromConfig(imageJSON, false) if err != nil { return err } history = append(history, h) config, err := v1.MakeConfigFromV1Config(imageJSON, rootFS, history) if err != nil { return err } imgID, err := l.is.Create(config) if err != nil { return err } metadata, err := l.ls.Release(newLayer) layer.LogReleaseMetadata(metadata) if err != nil { return err } if parentID != "" { if err := l.is.SetParent(imgID, parentID); err != nil { return err } } loadedMap[oldID] = imgID return nil } func safePath(base, path string) (string, error) { return symlink.FollowSymlinkInScope(filepath.Join(base, path), base) } docker-1.10.3/image/tarexport/save.go000066400000000000000000000151711267010174400174510ustar00rootroot00000000000000package tarexport import ( "encoding/json" "fmt" "io" "io/ioutil" "os" "path/filepath" "time" "github.com/docker/distribution/digest" "github.com/docker/docker/image" "github.com/docker/docker/image/v1" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/reference" ) type imageDescriptor struct { refs []reference.NamedTagged layers []string } type saveSession struct { *tarexporter outDir string images map[image.ID]*imageDescriptor savedLayers map[string]struct{} } func (l *tarexporter) Save(names []string, outStream io.Writer) error { images, err := l.parseNames(names) if err != nil { return err } return (&saveSession{tarexporter: l, images: images}).save(outStream) } func (l *tarexporter) parseNames(names []string) (map[image.ID]*imageDescriptor, error) { imgDescr := make(map[image.ID]*imageDescriptor) addAssoc := func(id image.ID, ref reference.Named) { if _, ok := imgDescr[id]; !ok { imgDescr[id] = &imageDescriptor{} } if ref != nil { var tagged reference.NamedTagged if _, ok := ref.(reference.Canonical); ok { return } var ok bool if tagged, ok = ref.(reference.NamedTagged); !ok { var err error if tagged, err = reference.WithTag(ref, reference.DefaultTag); err != nil { return } } for _, t := range imgDescr[id].refs { if tagged.String() == t.String() { return } } imgDescr[id].refs = append(imgDescr[id].refs, tagged) } } for _, name := range names { ref, err := reference.ParseNamed(name) if err != nil { return nil, err } if ref.Name() == string(digest.Canonical) { imgID, err := l.is.Search(name) if err != nil { return nil, err } addAssoc(imgID, nil) continue } if reference.IsNameOnly(ref) { assocs := l.rs.ReferencesByName(ref) for _, assoc := range assocs { addAssoc(assoc.ImageID, assoc.Ref) } if len(assocs) == 0 { imgID, err := l.is.Search(name) if err != nil { return nil, err } addAssoc(imgID, nil) } continue } var imgID image.ID if imgID, err = l.rs.Get(ref); err != nil { return nil, err } addAssoc(imgID, ref) } return imgDescr, nil } func (s *saveSession) save(outStream io.Writer) error { s.savedLayers = make(map[string]struct{}) // get image json tempDir, err := ioutil.TempDir("", "docker-export-") if err != nil { return err } defer os.RemoveAll(tempDir) s.outDir = tempDir reposLegacy := make(map[string]map[string]string) var manifest []manifestItem for id, imageDescr := range s.images { if err = s.saveImage(id); err != nil { return err } var repoTags []string var layers []string for _, ref := range imageDescr.refs { if _, ok := reposLegacy[ref.Name()]; !ok { reposLegacy[ref.Name()] = make(map[string]string) } reposLegacy[ref.Name()][ref.Tag()] = imageDescr.layers[len(imageDescr.layers)-1] repoTags = append(repoTags, ref.String()) } for _, l := range imageDescr.layers { layers = append(layers, filepath.Join(l, legacyLayerFileName)) } manifest = append(manifest, manifestItem{ Config: digest.Digest(id).Hex() + ".json", RepoTags: repoTags, Layers: layers, }) } if len(reposLegacy) > 0 { reposFile := filepath.Join(tempDir, legacyRepositoriesFileName) f, err := os.OpenFile(reposFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { f.Close() return err } if err := json.NewEncoder(f).Encode(reposLegacy); err != nil { return err } if err := f.Close(); err != nil { return err } if err := os.Chtimes(reposFile, time.Unix(0, 0), time.Unix(0, 0)); err != nil { return err } } manifestFileName := filepath.Join(tempDir, manifestFileName) f, err := os.OpenFile(manifestFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { f.Close() return err } if err := json.NewEncoder(f).Encode(manifest); err != nil { return err } if err := f.Close(); err != nil { return err } if err := os.Chtimes(manifestFileName, time.Unix(0, 0), time.Unix(0, 0)); err != nil { return err } fs, err := archive.Tar(tempDir, archive.Uncompressed) if err != nil { return err } defer fs.Close() if _, err := io.Copy(outStream, fs); err != nil { return err } return nil } func (s *saveSession) saveImage(id image.ID) error { img, err := s.is.Get(id) if err != nil { return err } if len(img.RootFS.DiffIDs) == 0 { return fmt.Errorf("empty export - not implemented") } var parent digest.Digest var layers []string for i := range img.RootFS.DiffIDs { v1Img := image.V1Image{} if i == len(img.RootFS.DiffIDs)-1 { v1Img = img.V1Image } rootFS := *img.RootFS rootFS.DiffIDs = rootFS.DiffIDs[:i+1] v1ID, err := v1.CreateID(v1Img, rootFS.ChainID(), parent) if err != nil { return err } v1Img.ID = v1ID.Hex() if parent != "" { v1Img.Parent = parent.Hex() } if err := s.saveLayer(rootFS.ChainID(), v1Img, img.Created); err != nil { return err } layers = append(layers, v1Img.ID) parent = v1ID } configFile := filepath.Join(s.outDir, digest.Digest(id).Hex()+".json") if err := ioutil.WriteFile(configFile, img.RawJSON(), 0644); err != nil { return err } if err := os.Chtimes(configFile, img.Created, img.Created); err != nil { return err } s.images[id].layers = layers return nil } func (s *saveSession) saveLayer(id layer.ChainID, legacyImg image.V1Image, createdTime time.Time) error { if _, exists := s.savedLayers[legacyImg.ID]; exists { return nil } outDir := filepath.Join(s.outDir, legacyImg.ID) if err := os.Mkdir(outDir, 0755); err != nil { return err } // todo: why is this version file here? if err := ioutil.WriteFile(filepath.Join(outDir, legacyVersionFileName), []byte("1.0"), 0644); err != nil { return err } imageConfig, err := json.Marshal(legacyImg) if err != nil { return err } if err := ioutil.WriteFile(filepath.Join(outDir, legacyConfigFileName), imageConfig, 0644); err != nil { return err } // serialize filesystem tarFile, err := os.Create(filepath.Join(outDir, legacyLayerFileName)) if err != nil { return err } defer tarFile.Close() l, err := s.ls.Get(id) if err != nil { return err } defer layer.ReleaseAndLog(s.ls, l) arch, err := l.TarStream() if err != nil { return err } defer arch.Close() if _, err := io.Copy(tarFile, arch); err != nil { return err } for _, fname := range []string{"", legacyVersionFileName, legacyConfigFileName, legacyLayerFileName} { // todo: maybe save layer created timestamp? if err := os.Chtimes(filepath.Join(outDir, fname), createdTime, createdTime); err != nil { return err } } s.savedLayers[legacyImg.ID] = struct{}{} return nil } docker-1.10.3/image/tarexport/tarexport.go000066400000000000000000000013331267010174400205360ustar00rootroot00000000000000package tarexport import ( "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/reference" ) const ( manifestFileName = "manifest.json" legacyLayerFileName = "layer.tar" legacyConfigFileName = "json" legacyVersionFileName = "VERSION" legacyRepositoriesFileName = "repositories" ) type manifestItem struct { Config string RepoTags []string Layers []string } type tarexporter struct { is image.Store ls layer.Store rs reference.Store } // NewTarExporter returns new ImageExporter for tar packages func NewTarExporter(is image.Store, ls layer.Store, rs reference.Store) image.Exporter { return &tarexporter{ is: is, ls: ls, rs: rs, } } docker-1.10.3/image/v1/000077500000000000000000000000001267010174400144555ustar00rootroot00000000000000docker-1.10.3/image/v1/imagev1.go000066400000000000000000000075501267010174400163440ustar00rootroot00000000000000package v1 import ( "encoding/json" "fmt" "regexp" "strings" "github.com/Sirupsen/logrus" "github.com/docker/distribution/digest" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/version" ) var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) // noFallbackMinVersion is the minimum version for which v1compatibility // information will not be marshaled through the Image struct to remove // blank fields. var noFallbackMinVersion = version.Version("1.8.3") // HistoryFromConfig creates a History struct from v1 configuration JSON func HistoryFromConfig(imageJSON []byte, emptyLayer bool) (image.History, error) { h := image.History{} var v1Image image.V1Image if err := json.Unmarshal(imageJSON, &v1Image); err != nil { return h, err } return image.History{ Author: v1Image.Author, Created: v1Image.Created, CreatedBy: strings.Join(v1Image.ContainerConfig.Cmd.Slice(), " "), Comment: v1Image.Comment, EmptyLayer: emptyLayer, }, nil } // CreateID creates an ID from v1 image, layerID and parent ID. // Used for backwards compatibility with old clients. func CreateID(v1Image image.V1Image, layerID layer.ChainID, parent digest.Digest) (digest.Digest, error) { v1Image.ID = "" v1JSON, err := json.Marshal(v1Image) if err != nil { return "", err } var config map[string]*json.RawMessage if err := json.Unmarshal(v1JSON, &config); err != nil { return "", err } // FIXME: note that this is slightly incompatible with RootFS logic config["layer_id"] = rawJSON(layerID) if parent != "" { config["parent"] = rawJSON(parent) } configJSON, err := json.Marshal(config) if err != nil { return "", err } logrus.Debugf("CreateV1ID %s", configJSON) return digest.FromBytes(configJSON), nil } // MakeConfigFromV1Config creates an image config from the legacy V1 config format. func MakeConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []image.History) ([]byte, error) { var dver struct { DockerVersion string `json:"docker_version"` } if err := json.Unmarshal(imageJSON, &dver); err != nil { return nil, err } useFallback := version.Version(dver.DockerVersion).LessThan(noFallbackMinVersion) if useFallback { var v1Image image.V1Image err := json.Unmarshal(imageJSON, &v1Image) if err != nil { return nil, err } imageJSON, err = json.Marshal(v1Image) if err != nil { return nil, err } } var c map[string]*json.RawMessage if err := json.Unmarshal(imageJSON, &c); err != nil { return nil, err } delete(c, "id") delete(c, "parent") delete(c, "Size") // Size is calculated from data on disk and is inconsitent delete(c, "parent_id") delete(c, "layer_id") delete(c, "throwaway") c["rootfs"] = rawJSON(rootfs) c["history"] = rawJSON(history) return json.Marshal(c) } // MakeV1ConfigFromConfig creates an legacy V1 image config from an Image struct func MakeV1ConfigFromConfig(img *image.Image, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { // Top-level v1compatibility string should be a modified version of the // image config. var configAsMap map[string]*json.RawMessage if err := json.Unmarshal(img.RawJSON(), &configAsMap); err != nil { return nil, err } // Delete fields that didn't exist in old manifest delete(configAsMap, "rootfs") delete(configAsMap, "history") configAsMap["id"] = rawJSON(v1ID) if parentV1ID != "" { configAsMap["parent"] = rawJSON(parentV1ID) } if throwaway { configAsMap["throwaway"] = rawJSON(true) } return json.Marshal(configAsMap) } func rawJSON(value interface{}) *json.RawMessage { jsonval, err := json.Marshal(value) if err != nil { return nil } return (*json.RawMessage)(&jsonval) } // ValidateID checks whether an ID string is a valid image ID. func ValidateID(id string) error { if ok := validHex.MatchString(id); !ok { return fmt.Errorf("image ID '%s' is invalid ", id) } return nil } docker-1.10.3/integration-cli/000077500000000000000000000000001267010174400161355ustar00rootroot00000000000000docker-1.10.3/integration-cli/check_test.go000066400000000000000000000045131267010174400206030ustar00rootroot00000000000000package main import ( "fmt" "testing" "github.com/docker/docker/pkg/reexec" "github.com/go-check/check" ) func Test(t *testing.T) { reexec.Init() // This is required for external graphdriver tests if !isLocalDaemon { fmt.Println("INFO: Testing against a remote daemon") } else { fmt.Println("INFO: Testing against a local daemon") } check.TestingT(t) } func init() { check.Suite(&DockerSuite{}) } type DockerSuite struct { } func (s *DockerSuite) TearDownTest(c *check.C) { unpauseAllContainers() deleteAllContainers() deleteAllImages() deleteAllVolumes() deleteAllNetworks() } func init() { check.Suite(&DockerRegistrySuite{ ds: &DockerSuite{}, }) } type DockerRegistrySuite struct { ds *DockerSuite reg *testRegistryV2 d *Daemon } func (s *DockerRegistrySuite) SetUpTest(c *check.C) { testRequires(c, DaemonIsLinux) s.reg = setupRegistry(c, false) s.d = NewDaemon(c) } func (s *DockerRegistrySuite) TearDownTest(c *check.C) { if s.reg != nil { s.reg.Close() } if s.d != nil { s.d.Stop() } s.ds.TearDownTest(c) } func init() { check.Suite(&DockerSchema1RegistrySuite{ ds: &DockerSuite{}, }) } type DockerSchema1RegistrySuite struct { ds *DockerSuite reg *testRegistryV2 d *Daemon } func (s *DockerSchema1RegistrySuite) SetUpTest(c *check.C) { testRequires(c, DaemonIsLinux) s.reg = setupRegistry(c, true) s.d = NewDaemon(c) } func (s *DockerSchema1RegistrySuite) TearDownTest(c *check.C) { if s.reg != nil { s.reg.Close() } if s.d != nil { s.d.Stop() } s.ds.TearDownTest(c) } func init() { check.Suite(&DockerDaemonSuite{ ds: &DockerSuite{}, }) } type DockerDaemonSuite struct { ds *DockerSuite d *Daemon } func (s *DockerDaemonSuite) SetUpTest(c *check.C) { testRequires(c, DaemonIsLinux) s.d = NewDaemon(c) } func (s *DockerDaemonSuite) TearDownTest(c *check.C) { testRequires(c, DaemonIsLinux) if s.d != nil { s.d.Stop() } s.ds.TearDownTest(c) } func init() { check.Suite(&DockerTrustSuite{ ds: &DockerSuite{}, }) } type DockerTrustSuite struct { ds *DockerSuite reg *testRegistryV2 not *testNotary } func (s *DockerTrustSuite) SetUpTest(c *check.C) { s.reg = setupRegistry(c, false) s.not = setupNotary(c) } func (s *DockerTrustSuite) TearDownTest(c *check.C) { if s.reg != nil { s.reg.Close() } if s.not != nil { s.not.Close() } s.ds.TearDownTest(c) } docker-1.10.3/integration-cli/docker_api_attach_test.go000066400000000000000000000125451267010174400231560ustar00rootroot00000000000000package main import ( "bufio" "io" "net" "net/http" "strings" "time" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" "golang.org/x/net/websocket" ) func (s *DockerSuite) TestGetContainersAttachWebsocket(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-dit", "busybox", "cat") rwc, err := sockConn(time.Duration(10 * time.Second)) c.Assert(err, checker.IsNil) cleanedContainerID := strings.TrimSpace(out) config, err := websocket.NewConfig( "/containers/"+cleanedContainerID+"/attach/ws?stream=1&stdin=1&stdout=1&stderr=1", "http://localhost", ) c.Assert(err, checker.IsNil) ws, err := websocket.NewClient(config, rwc) c.Assert(err, checker.IsNil) defer ws.Close() expected := []byte("hello") actual := make([]byte, len(expected)) outChan := make(chan error) go func() { _, err := ws.Read(actual) outChan <- err close(outChan) }() inChan := make(chan error) go func() { _, err := ws.Write(expected) inChan <- err close(inChan) }() select { case err := <-inChan: c.Assert(err, checker.IsNil) case <-time.After(5 * time.Second): c.Fatal("Timeout writing to ws") } select { case err := <-outChan: c.Assert(err, checker.IsNil) case <-time.After(5 * time.Second): c.Fatal("Timeout reading from ws") } c.Assert(actual, checker.DeepEquals, expected, check.Commentf("Websocket didn't return the expected data")) } // regression gh14320 func (s *DockerSuite) TestPostContainersAttachContainerNotFound(c *check.C) { status, body, err := sockRequest("POST", "/containers/doesnotexist/attach", nil) c.Assert(status, checker.Equals, http.StatusNotFound) c.Assert(err, checker.IsNil) expected := "No such container: doesnotexist\n" c.Assert(string(body), checker.Contains, expected) } func (s *DockerSuite) TestGetContainersWsAttachContainerNotFound(c *check.C) { status, body, err := sockRequest("GET", "/containers/doesnotexist/attach/ws", nil) c.Assert(status, checker.Equals, http.StatusNotFound) c.Assert(err, checker.IsNil) expected := "No such container: doesnotexist\n" c.Assert(string(body), checker.Contains, expected) } func (s *DockerSuite) TestPostContainersAttach(c *check.C) { testRequires(c, DaemonIsLinux) expectSuccess := func(conn net.Conn, br *bufio.Reader, stream string, tty bool) { defer conn.Close() expected := []byte("success") _, err := conn.Write(expected) c.Assert(err, checker.IsNil) conn.SetReadDeadline(time.Now().Add(time.Second)) lenHeader := 0 if !tty { lenHeader = 8 } actual := make([]byte, len(expected)+lenHeader) _, err = io.ReadFull(br, actual) c.Assert(err, checker.IsNil) if !tty { fdMap := map[string]byte{ "stdin": 0, "stdout": 1, "stderr": 2, } c.Assert(actual[0], checker.Equals, fdMap[stream]) } c.Assert(actual[lenHeader:], checker.DeepEquals, expected, check.Commentf("Attach didn't return the expected data from %s", stream)) } expectTimeout := func(conn net.Conn, br *bufio.Reader, stream string) { defer conn.Close() _, err := conn.Write([]byte{'t'}) c.Assert(err, checker.IsNil) conn.SetReadDeadline(time.Now().Add(time.Second)) actual := make([]byte, 1) _, err = io.ReadFull(br, actual) opErr, ok := err.(*net.OpError) c.Assert(ok, checker.Equals, true, check.Commentf("Error is expected to be *net.OpError, got %v", err)) c.Assert(opErr.Timeout(), checker.Equals, true, check.Commentf("Read from %s is expected to timeout", stream)) } // Create a container that only emits stdout. cid, _ := dockerCmd(c, "run", "-di", "busybox", "cat") cid = strings.TrimSpace(cid) // Attach to the container's stdout stream. conn, br, err := sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain") c.Assert(err, checker.IsNil) // Check if the data from stdout can be received. expectSuccess(conn, br, "stdout", false) // Attach to the container's stderr stream. conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain") c.Assert(err, checker.IsNil) // Since the container only emits stdout, attaching to stderr should return nothing. expectTimeout(conn, br, "stdout") // Test the similar functions of the stderr stream. cid, _ = dockerCmd(c, "run", "-di", "busybox", "/bin/sh", "-c", "cat >&2") cid = strings.TrimSpace(cid) conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain") c.Assert(err, checker.IsNil) expectSuccess(conn, br, "stderr", false) conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain") c.Assert(err, checker.IsNil) expectTimeout(conn, br, "stderr") // Test with tty. cid, _ = dockerCmd(c, "run", "-dit", "busybox", "/bin/sh", "-c", "cat >&2") cid = strings.TrimSpace(cid) // Attach to stdout only. conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain") c.Assert(err, checker.IsNil) expectSuccess(conn, br, "stdout", true) // Attach without stdout stream. conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain") c.Assert(err, checker.IsNil) // Nothing should be received because both the stdout and stderr of the container will be // sent to the client as stdout when tty is enabled. expectTimeout(conn, br, "stdout") } docker-1.10.3/integration-cli/docker_api_build_test.go000066400000000000000000000165661267010174400230200ustar00rootroot00000000000000package main import ( "archive/tar" "bytes" "net/http" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (s *DockerSuite) TestBuildApiDockerfilePath(c *check.C) { // Test to make sure we stop people from trying to leave the // build context when specifying the path to the dockerfile buffer := new(bytes.Buffer) tw := tar.NewWriter(buffer) defer tw.Close() dockerfile := []byte("FROM busybox") err := tw.WriteHeader(&tar.Header{ Name: "Dockerfile", Size: int64(len(dockerfile)), }) //failed to write tar file header c.Assert(err, checker.IsNil) _, err = tw.Write(dockerfile) // failed to write tar file content c.Assert(err, checker.IsNil) // failed to close tar archive c.Assert(tw.Close(), checker.IsNil) res, body, err := sockRequestRaw("POST", "/build?dockerfile=../Dockerfile", buffer, "application/x-tar") c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) out, err := readBody(body) c.Assert(err, checker.IsNil) // Didn't complain about leaving build context c.Assert(string(out), checker.Contains, "Forbidden path outside the build context") } func (s *DockerSuite) TestBuildApiDockerFileRemote(c *check.C) { testRequires(c, NotUserNamespace) testRequires(c, DaemonIsLinux) server, err := fakeStorage(map[string]string{ "testD": `FROM busybox COPY * /tmp/ RUN find / -name ba* RUN find /tmp/`, }) c.Assert(err, checker.IsNil) defer server.Close() res, body, err := sockRequestRaw("POST", "/build?dockerfile=baz&remote="+server.URL()+"/testD", nil, "application/json") c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusOK) buf, err := readBody(body) c.Assert(err, checker.IsNil) // Make sure Dockerfile exists. // Make sure 'baz' doesn't exist ANYWHERE despite being mentioned in the URL out := string(buf) c.Assert(out, checker.Contains, "/tmp/Dockerfile") c.Assert(out, checker.Not(checker.Contains), "baz") } func (s *DockerSuite) TestBuildApiRemoteTarballContext(c *check.C) { testRequires(c, DaemonIsLinux) buffer := new(bytes.Buffer) tw := tar.NewWriter(buffer) defer tw.Close() dockerfile := []byte("FROM busybox") err := tw.WriteHeader(&tar.Header{ Name: "Dockerfile", Size: int64(len(dockerfile)), }) // failed to write tar file header c.Assert(err, checker.IsNil) _, err = tw.Write(dockerfile) // failed to write tar file content c.Assert(err, checker.IsNil) // failed to close tar archive c.Assert(tw.Close(), checker.IsNil) server, err := fakeBinaryStorage(map[string]*bytes.Buffer{ "testT.tar": buffer, }) c.Assert(err, checker.IsNil) defer server.Close() res, b, err := sockRequestRaw("POST", "/build?remote="+server.URL()+"/testT.tar", nil, "application/tar") c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusOK) b.Close() } func (s *DockerSuite) TestBuildApiRemoteTarballContextWithCustomDockerfile(c *check.C) { testRequires(c, DaemonIsLinux) buffer := new(bytes.Buffer) tw := tar.NewWriter(buffer) defer tw.Close() dockerfile := []byte(`FROM busybox RUN echo 'wrong'`) err := tw.WriteHeader(&tar.Header{ Name: "Dockerfile", Size: int64(len(dockerfile)), }) // failed to write tar file header c.Assert(err, checker.IsNil) _, err = tw.Write(dockerfile) // failed to write tar file content c.Assert(err, checker.IsNil) custom := []byte(`FROM busybox RUN echo 'right' `) err = tw.WriteHeader(&tar.Header{ Name: "custom", Size: int64(len(custom)), }) // failed to write tar file header c.Assert(err, checker.IsNil) _, err = tw.Write(custom) // failed to write tar file content c.Assert(err, checker.IsNil) // failed to close tar archive c.Assert(tw.Close(), checker.IsNil) server, err := fakeBinaryStorage(map[string]*bytes.Buffer{ "testT.tar": buffer, }) c.Assert(err, checker.IsNil) defer server.Close() url := "/build?dockerfile=custom&remote=" + server.URL() + "/testT.tar" res, body, err := sockRequestRaw("POST", url, nil, "application/tar") c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusOK) defer body.Close() content, err := readBody(body) c.Assert(err, checker.IsNil) // Build used the wrong dockerfile. c.Assert(string(content), checker.Not(checker.Contains), "wrong") } func (s *DockerSuite) TestBuildApiLowerDockerfile(c *check.C) { testRequires(c, DaemonIsLinux) git, err := newFakeGit("repo", map[string]string{ "dockerfile": `FROM busybox RUN echo from dockerfile`, }, false) c.Assert(err, checker.IsNil) defer git.Close() res, body, err := sockRequestRaw("POST", "/build?remote="+git.RepoURL, nil, "application/json") c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusOK) buf, err := readBody(body) c.Assert(err, checker.IsNil) out := string(buf) c.Assert(out, checker.Contains, "from dockerfile") } func (s *DockerSuite) TestBuildApiBuildGitWithF(c *check.C) { testRequires(c, DaemonIsLinux) git, err := newFakeGit("repo", map[string]string{ "baz": `FROM busybox RUN echo from baz`, "Dockerfile": `FROM busybox RUN echo from Dockerfile`, }, false) c.Assert(err, checker.IsNil) defer git.Close() // Make sure it tries to 'dockerfile' query param value res, body, err := sockRequestRaw("POST", "/build?dockerfile=baz&remote="+git.RepoURL, nil, "application/json") c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusOK) buf, err := readBody(body) c.Assert(err, checker.IsNil) out := string(buf) c.Assert(out, checker.Contains, "from baz") } func (s *DockerSuite) TestBuildApiDoubleDockerfile(c *check.C) { testRequires(c, UnixCli) // dockerfile overwrites Dockerfile on Windows git, err := newFakeGit("repo", map[string]string{ "Dockerfile": `FROM busybox RUN echo from Dockerfile`, "dockerfile": `FROM busybox RUN echo from dockerfile`, }, false) c.Assert(err, checker.IsNil) defer git.Close() // Make sure it tries to 'dockerfile' query param value res, body, err := sockRequestRaw("POST", "/build?remote="+git.RepoURL, nil, "application/json") c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusOK) buf, err := readBody(body) c.Assert(err, checker.IsNil) out := string(buf) c.Assert(out, checker.Contains, "from Dockerfile") } func (s *DockerSuite) TestBuildApiDockerfileSymlink(c *check.C) { // Test to make sure we stop people from trying to leave the // build context when specifying a symlink as the path to the dockerfile buffer := new(bytes.Buffer) tw := tar.NewWriter(buffer) defer tw.Close() err := tw.WriteHeader(&tar.Header{ Name: "Dockerfile", Typeflag: tar.TypeSymlink, Linkname: "/etc/passwd", }) // failed to write tar file header c.Assert(err, checker.IsNil) // failed to close tar archive c.Assert(tw.Close(), checker.IsNil) res, body, err := sockRequestRaw("POST", "/build", buffer, "application/x-tar") c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) out, err := readBody(body) c.Assert(err, checker.IsNil) // The reason the error is "Cannot locate specified Dockerfile" is because // in the builder, the symlink is resolved within the context, therefore // Dockerfile -> /etc/passwd becomes etc/passwd from the context which is // a nonexistent file. c.Assert(string(out), checker.Contains, "Cannot locate specified Dockerfile: Dockerfile", check.Commentf("Didn't complain about leaving build context")) } docker-1.10.3/integration-cli/docker_api_containers_test.go000066400000000000000000001512351267010174400240570ustar00rootroot00000000000000package main import ( "archive/tar" "bytes" "encoding/json" "fmt" "io" "net/http" "net/http/httputil" "net/url" "os" "regexp" "strconv" "strings" "time" "github.com/docker/docker/pkg/integration" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/pkg/stringid" "github.com/docker/engine-api/types" containertypes "github.com/docker/engine-api/types/container" networktypes "github.com/docker/engine-api/types/network" "github.com/go-check/check" ) func (s *DockerSuite) TestContainerApiGetAll(c *check.C) { testRequires(c, DaemonIsLinux) startCount, err := getContainerCount() c.Assert(err, checker.IsNil, check.Commentf("Cannot query container count")) name := "getall" dockerCmd(c, "run", "--name", name, "busybox", "true") status, body, err := sockRequest("GET", "/containers/json?all=1", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusOK) var inspectJSON []struct { Names []string } err = json.Unmarshal(body, &inspectJSON) c.Assert(err, checker.IsNil, check.Commentf("unable to unmarshal response body")) c.Assert(inspectJSON, checker.HasLen, startCount+1) actual := inspectJSON[0].Names[0] c.Assert(actual, checker.Equals, "/"+name) } // regression test for empty json field being omitted #13691 func (s *DockerSuite) TestContainerApiGetJSONNoFieldsOmitted(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "busybox", "true") status, body, err := sockRequest("GET", "/containers/json?all=1", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusOK) // empty Labels field triggered this bug, make sense to check for everything // cause even Ports for instance can trigger this bug // better safe than sorry.. fields := []string{ "Id", "Names", "Image", "Command", "Created", "Ports", "Labels", "Status", "NetworkSettings", } // decoding into types.Container do not work since it eventually unmarshal // and empty field to an empty go map, so we just check for a string for _, f := range fields { if !strings.Contains(string(body), f) { c.Fatalf("Field %s is missing and it shouldn't", f) } } } type containerPs struct { Names []string Ports []map[string]interface{} } // regression test for non-empty fields from #13901 func (s *DockerSuite) TestContainerPsOmitFields(c *check.C) { testRequires(c, DaemonIsLinux) name := "pstest" port := 80 dockerCmd(c, "run", "-d", "--name", name, "--expose", strconv.Itoa(port), "busybox", "top") status, body, err := sockRequest("GET", "/containers/json?all=1", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusOK) var resp []containerPs err = json.Unmarshal(body, &resp) c.Assert(err, checker.IsNil) var foundContainer *containerPs for _, container := range resp { for _, testName := range container.Names { if "/"+name == testName { foundContainer = &container break } } } c.Assert(foundContainer.Ports, checker.HasLen, 1) c.Assert(foundContainer.Ports[0]["PrivatePort"], checker.Equals, float64(port)) _, ok := foundContainer.Ports[0]["PublicPort"] c.Assert(ok, checker.Not(checker.Equals), true) _, ok = foundContainer.Ports[0]["IP"] c.Assert(ok, checker.Not(checker.Equals), true) } func (s *DockerSuite) TestContainerApiGetExport(c *check.C) { testRequires(c, DaemonIsLinux) name := "exportcontainer" dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test") status, body, err := sockRequest("GET", "/containers/"+name+"/export", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusOK) found := false for tarReader := tar.NewReader(bytes.NewReader(body)); ; { h, err := tarReader.Next() if err != nil && err == io.EOF { break } if h.Name == "test" { found = true break } } c.Assert(found, checker.True, check.Commentf("The created test file has not been found in the exported image")) } func (s *DockerSuite) TestContainerApiGetChanges(c *check.C) { testRequires(c, DaemonIsLinux) name := "changescontainer" dockerCmd(c, "run", "--name", name, "busybox", "rm", "/etc/passwd") status, body, err := sockRequest("GET", "/containers/"+name+"/changes", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusOK) changes := []struct { Kind int Path string }{} c.Assert(json.Unmarshal(body, &changes), checker.IsNil, check.Commentf("unable to unmarshal response body")) // Check the changelog for removal of /etc/passwd success := false for _, elem := range changes { if elem.Path == "/etc/passwd" && elem.Kind == 2 { success = true } } c.Assert(success, checker.True, check.Commentf("/etc/passwd has been removed but is not present in the diff")) } func (s *DockerSuite) TestContainerApiStartVolumeBinds(c *check.C) { testRequires(c, DaemonIsLinux) name := "testing" config := map[string]interface{}{ "Image": "busybox", "Volumes": map[string]struct{}{"/tmp": {}}, } status, _, err := sockRequest("POST", "/containers/create?name="+name, config) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusCreated) bindPath := randomTmpDirPath("test", daemonPlatform) config = map[string]interface{}{ "Binds": []string{bindPath + ":/tmp"}, } status, _, err = sockRequest("POST", "/containers/"+name+"/start", config) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusNoContent) pth, err := inspectMountSourceField(name, "/tmp") c.Assert(err, checker.IsNil) c.Assert(pth, checker.Equals, bindPath, check.Commentf("expected volume host path to be %s, got %s", bindPath, pth)) } // Test for GH#10618 func (s *DockerSuite) TestContainerApiStartDupVolumeBinds(c *check.C) { testRequires(c, DaemonIsLinux) name := "testdups" config := map[string]interface{}{ "Image": "busybox", "Volumes": map[string]struct{}{"/tmp": {}}, } status, _, err := sockRequest("POST", "/containers/create?name="+name, config) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusCreated) bindPath1 := randomTmpDirPath("test1", daemonPlatform) bindPath2 := randomTmpDirPath("test2", daemonPlatform) config = map[string]interface{}{ "Binds": []string{bindPath1 + ":/tmp", bindPath2 + ":/tmp"}, } status, body, err := sockRequest("POST", "/containers/"+name+"/start", config) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusInternalServerError) c.Assert(string(body), checker.Contains, "Duplicate mount point", check.Commentf("Expected failure due to duplicate bind mounts to same path, instead got: %q with error: %v", string(body), err)) } func (s *DockerSuite) TestContainerApiStartVolumesFrom(c *check.C) { testRequires(c, DaemonIsLinux) volName := "voltst" volPath := "/tmp" dockerCmd(c, "run", "-d", "--name", volName, "-v", volPath, "busybox") name := "TestContainerApiStartVolumesFrom" config := map[string]interface{}{ "Image": "busybox", "Volumes": map[string]struct{}{volPath: {}}, } status, _, err := sockRequest("POST", "/containers/create?name="+name, config) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusCreated) config = map[string]interface{}{ "VolumesFrom": []string{volName}, } status, _, err = sockRequest("POST", "/containers/"+name+"/start", config) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusNoContent) pth, err := inspectMountSourceField(name, volPath) c.Assert(err, checker.IsNil) pth2, err := inspectMountSourceField(volName, volPath) c.Assert(err, checker.IsNil) c.Assert(pth, checker.Equals, pth2, check.Commentf("expected volume host path to be %s, got %s", pth, pth2)) } func (s *DockerSuite) TestGetContainerStats(c *check.C) { testRequires(c, DaemonIsLinux) var ( name = "statscontainer" ) dockerCmd(c, "run", "-d", "--name", name, "busybox", "top") type b struct { status int body []byte err error } bc := make(chan b, 1) go func() { status, body, err := sockRequest("GET", "/containers/"+name+"/stats", nil) bc <- b{status, body, err} }() // allow some time to stream the stats from the container time.Sleep(4 * time.Second) dockerCmd(c, "rm", "-f", name) // collect the results from the stats stream or timeout and fail // if the stream was not disconnected. select { case <-time.After(2 * time.Second): c.Fatal("stream was not closed after container was removed") case sr := <-bc: c.Assert(sr.err, checker.IsNil) c.Assert(sr.status, checker.Equals, http.StatusOK) dec := json.NewDecoder(bytes.NewBuffer(sr.body)) var s *types.Stats // decode only one object from the stream c.Assert(dec.Decode(&s), checker.IsNil) } } func (s *DockerSuite) TestGetContainerStatsRmRunning(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") id := strings.TrimSpace(out) buf := &integration.ChannelBuffer{make(chan []byte, 1)} defer buf.Close() chErr := make(chan error) go func() { _, body, err := sockRequestRaw("GET", "/containers/"+id+"/stats?stream=1", nil, "application/json") if err != nil { chErr <- err } defer body.Close() _, err = io.Copy(buf, body) chErr <- err }() defer func() { c.Assert(<-chErr, checker.IsNil) }() b := make([]byte, 32) // make sure we've got some stats _, err := buf.ReadTimeout(b, 2*time.Second) c.Assert(err, checker.IsNil) // Now remove without `-f` and make sure we are still pulling stats _, _, err = dockerCmdWithError("rm", id) c.Assert(err, checker.Not(checker.IsNil), check.Commentf("rm should have failed but didn't")) _, err = buf.ReadTimeout(b, 2*time.Second) c.Assert(err, checker.IsNil) dockerCmd(c, "rm", "-f", id) _, err = buf.ReadTimeout(b, 2*time.Second) c.Assert(err, checker.Not(checker.IsNil)) } // regression test for gh13421 // previous test was just checking one stat entry so it didn't fail (stats with // stream false always return one stat) func (s *DockerSuite) TestGetContainerStatsStream(c *check.C) { testRequires(c, DaemonIsLinux) name := "statscontainer" dockerCmd(c, "run", "-d", "--name", name, "busybox", "top") type b struct { status int body []byte err error } bc := make(chan b, 1) go func() { status, body, err := sockRequest("GET", "/containers/"+name+"/stats", nil) bc <- b{status, body, err} }() // allow some time to stream the stats from the container time.Sleep(4 * time.Second) dockerCmd(c, "rm", "-f", name) // collect the results from the stats stream or timeout and fail // if the stream was not disconnected. select { case <-time.After(2 * time.Second): c.Fatal("stream was not closed after container was removed") case sr := <-bc: c.Assert(sr.err, checker.IsNil) c.Assert(sr.status, checker.Equals, http.StatusOK) s := string(sr.body) // count occurrences of "read" of types.Stats if l := strings.Count(s, "read"); l < 2 { c.Fatalf("Expected more than one stat streamed, got %d", l) } } } func (s *DockerSuite) TestGetContainerStatsNoStream(c *check.C) { testRequires(c, DaemonIsLinux) name := "statscontainer" dockerCmd(c, "run", "-d", "--name", name, "busybox", "top") type b struct { status int body []byte err error } bc := make(chan b, 1) go func() { status, body, err := sockRequest("GET", "/containers/"+name+"/stats?stream=0", nil) bc <- b{status, body, err} }() // allow some time to stream the stats from the container time.Sleep(4 * time.Second) dockerCmd(c, "rm", "-f", name) // collect the results from the stats stream or timeout and fail // if the stream was not disconnected. select { case <-time.After(2 * time.Second): c.Fatal("stream was not closed after container was removed") case sr := <-bc: c.Assert(sr.err, checker.IsNil) c.Assert(sr.status, checker.Equals, http.StatusOK) s := string(sr.body) // count occurrences of "read" of types.Stats c.Assert(strings.Count(s, "read"), checker.Equals, 1, check.Commentf("Expected only one stat streamed, got %d", strings.Count(s, "read"))) } } func (s *DockerSuite) TestGetStoppedContainerStats(c *check.C) { testRequires(c, DaemonIsLinux) // TODO: this test does nothing because we are c.Assert'ing in goroutine var ( name = "statscontainer" ) dockerCmd(c, "create", "--name", name, "busybox", "top") go func() { // We'll never get return for GET stats from sockRequest as of now, // just send request and see if panic or error would happen on daemon side. status, _, err := sockRequest("GET", "/containers/"+name+"/stats", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusOK) }() // allow some time to send request and let daemon deal with it time.Sleep(1 * time.Second) } // #9981 - Allow a docker created volume (ie, one in /var/lib/docker/volumes) to be used to overwrite (via passing in Binds on api start) an existing volume func (s *DockerSuite) TestPostContainerBindNormalVolume(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "create", "-v", "/foo", "--name=one", "busybox") fooDir, err := inspectMountSourceField("one", "/foo") c.Assert(err, checker.IsNil) dockerCmd(c, "create", "-v", "/foo", "--name=two", "busybox") bindSpec := map[string][]string{"Binds": {fooDir + ":/foo"}} status, _, err := sockRequest("POST", "/containers/two/start", bindSpec) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusNoContent) fooDir2, err := inspectMountSourceField("two", "/foo") c.Assert(err, checker.IsNil) c.Assert(fooDir2, checker.Equals, fooDir, check.Commentf("expected volume path to be %s, got: %s", fooDir, fooDir2)) } func (s *DockerSuite) TestContainerApiPause(c *check.C) { testRequires(c, DaemonIsLinux) defer unpauseAllContainers() out, _ := dockerCmd(c, "run", "-d", "busybox", "sleep", "30") ContainerID := strings.TrimSpace(out) status, _, err := sockRequest("POST", "/containers/"+ContainerID+"/pause", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusNoContent) pausedContainers, err := getSliceOfPausedContainers() c.Assert(err, checker.IsNil, check.Commentf("error thrown while checking if containers were paused")) if len(pausedContainers) != 1 || stringid.TruncateID(ContainerID) != pausedContainers[0] { c.Fatalf("there should be one paused container and not %d", len(pausedContainers)) } status, _, err = sockRequest("POST", "/containers/"+ContainerID+"/unpause", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusNoContent) pausedContainers, err = getSliceOfPausedContainers() c.Assert(err, checker.IsNil, check.Commentf("error thrown while checking if containers were paused")) c.Assert(pausedContainers, checker.IsNil, check.Commentf("There should be no paused container.")) } func (s *DockerSuite) TestContainerApiTop(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "top") id := strings.TrimSpace(string(out)) c.Assert(waitRun(id), checker.IsNil) type topResp struct { Titles []string Processes [][]string } var top topResp status, b, err := sockRequest("GET", "/containers/"+id+"/top?ps_args=aux", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusOK) c.Assert(json.Unmarshal(b, &top), checker.IsNil) c.Assert(top.Titles, checker.HasLen, 11, check.Commentf("expected 11 titles, found %d: %v", len(top.Titles), top.Titles)) if top.Titles[0] != "USER" || top.Titles[10] != "COMMAND" { c.Fatalf("expected `USER` at `Titles[0]` and `COMMAND` at Titles[10]: %v", top.Titles) } c.Assert(top.Processes, checker.HasLen, 2, check.Commentf("expected 2 processes, found %d: %v", len(top.Processes), top.Processes)) c.Assert(top.Processes[0][10], checker.Equals, "/bin/sh -c top") c.Assert(top.Processes[1][10], checker.Equals, "top") } func (s *DockerSuite) TestContainerApiCommit(c *check.C) { testRequires(c, DaemonIsLinux) cName := "testapicommit" dockerCmd(c, "run", "--name="+cName, "busybox", "/bin/sh", "-c", "touch /test") name := "testcontainerapicommit" status, b, err := sockRequest("POST", "/commit?repo="+name+"&testtag=tag&container="+cName, nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusCreated) type resp struct { ID string } var img resp c.Assert(json.Unmarshal(b, &img), checker.IsNil) cmd, err := inspectField(img.ID, "Config.Cmd") c.Assert(err, checker.IsNil) c.Assert(cmd, checker.Equals, "{[/bin/sh -c touch /test]}", check.Commentf("got wrong Cmd from commit: %q", cmd)) // sanity check, make sure the image is what we think it is dockerCmd(c, "run", img.ID, "ls", "/test") } func (s *DockerSuite) TestContainerApiCommitWithLabelInConfig(c *check.C) { testRequires(c, DaemonIsLinux) cName := "testapicommitwithconfig" dockerCmd(c, "run", "--name="+cName, "busybox", "/bin/sh", "-c", "touch /test") config := map[string]interface{}{ "Labels": map[string]string{"key1": "value1", "key2": "value2"}, } name := "testcontainerapicommitwithconfig" status, b, err := sockRequest("POST", "/commit?repo="+name+"&container="+cName, config) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusCreated) type resp struct { ID string } var img resp c.Assert(json.Unmarshal(b, &img), checker.IsNil) label1, err := inspectFieldMap(img.ID, "Config.Labels", "key1") c.Assert(err, checker.IsNil) c.Assert(label1, checker.Equals, "value1") label2, err := inspectFieldMap(img.ID, "Config.Labels", "key2") c.Assert(err, checker.IsNil) c.Assert(label2, checker.Equals, "value2") cmd, err := inspectField(img.ID, "Config.Cmd") c.Assert(err, checker.IsNil) c.Assert(cmd, checker.Equals, "{[/bin/sh -c touch /test]}", check.Commentf("got wrong Cmd from commit: %q", cmd)) // sanity check, make sure the image is what we think it is dockerCmd(c, "run", img.ID, "ls", "/test") } func (s *DockerSuite) TestContainerApiBadPort(c *check.C) { testRequires(c, DaemonIsLinux) config := map[string]interface{}{ "Image": "busybox", "Cmd": []string{"/bin/sh", "-c", "echo test"}, "PortBindings": map[string]interface{}{ "8080/tcp": []map[string]interface{}{ { "HostIP": "", "HostPort": "aa80", }, }, }, } jsonData := bytes.NewBuffer(nil) json.NewEncoder(jsonData).Encode(config) status, b, err := sockRequest("POST", "/containers/create", config) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusInternalServerError) c.Assert(strings.TrimSpace(string(b)), checker.Equals, `Invalid port specification: "aa80"`, check.Commentf("Incorrect error msg: %s", string(b))) } func (s *DockerSuite) TestContainerApiCreate(c *check.C) { testRequires(c, DaemonIsLinux) config := map[string]interface{}{ "Image": "busybox", "Cmd": []string{"/bin/sh", "-c", "touch /test && ls /test"}, } status, b, err := sockRequest("POST", "/containers/create", config) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusCreated) type createResp struct { ID string } var container createResp c.Assert(json.Unmarshal(b, &container), checker.IsNil) out, _ := dockerCmd(c, "start", "-a", container.ID) c.Assert(strings.TrimSpace(out), checker.Equals, "/test") } func (s *DockerSuite) TestContainerApiCreateEmptyConfig(c *check.C) { config := map[string]interface{}{} status, b, err := sockRequest("POST", "/containers/create", config) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusInternalServerError) expected := "Config cannot be empty in order to create a container\n" c.Assert(string(b), checker.Equals, expected) } func (s *DockerSuite) TestContainerApiCreateMultipleNetworksConfig(c *check.C) { // Container creation must fail if client specified configurations for more than one network config := map[string]interface{}{ "Image": "busybox", "NetworkingConfig": networktypes.NetworkingConfig{ EndpointsConfig: map[string]*networktypes.EndpointSettings{ "net1": {}, "net2": {}, "net3": {}, }, }, } status, b, err := sockRequest("POST", "/containers/create", config) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusBadRequest) // network name order in error message is not deterministic c.Assert(string(b), checker.Contains, "Container cannot be connected to [") c.Assert(string(b), checker.Contains, "net1") c.Assert(string(b), checker.Contains, "net2") c.Assert(string(b), checker.Contains, "net3") } func (s *DockerSuite) TestContainerApiCreateWithHostName(c *check.C) { testRequires(c, DaemonIsLinux) hostName := "test-host" config := map[string]interface{}{ "Image": "busybox", "Hostname": hostName, } status, body, err := sockRequest("POST", "/containers/create", config) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusCreated) var container types.ContainerCreateResponse c.Assert(json.Unmarshal(body, &container), checker.IsNil) status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusOK) var containerJSON types.ContainerJSON c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) c.Assert(containerJSON.Config.Hostname, checker.Equals, hostName, check.Commentf("Mismatched Hostname")) } func (s *DockerSuite) TestContainerApiCreateWithDomainName(c *check.C) { testRequires(c, DaemonIsLinux) domainName := "test-domain" config := map[string]interface{}{ "Image": "busybox", "Domainname": domainName, } status, body, err := sockRequest("POST", "/containers/create", config) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusCreated) var container types.ContainerCreateResponse c.Assert(json.Unmarshal(body, &container), checker.IsNil) status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusOK) var containerJSON types.ContainerJSON c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) c.Assert(containerJSON.Config.Domainname, checker.Equals, domainName, check.Commentf("Mismatched Domainname")) } func (s *DockerSuite) TestContainerApiCreateBridgeNetworkMode(c *check.C) { testRequires(c, DaemonIsLinux) UtilCreateNetworkMode(c, "bridge") } func (s *DockerSuite) TestContainerApiCreateOtherNetworkModes(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) UtilCreateNetworkMode(c, "host") UtilCreateNetworkMode(c, "container:web1") } func UtilCreateNetworkMode(c *check.C, networkMode string) { config := map[string]interface{}{ "Image": "busybox", "HostConfig": map[string]interface{}{"NetworkMode": networkMode}, } status, body, err := sockRequest("POST", "/containers/create", config) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusCreated) var container types.ContainerCreateResponse c.Assert(json.Unmarshal(body, &container), checker.IsNil) status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusOK) var containerJSON types.ContainerJSON c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) c.Assert(containerJSON.HostConfig.NetworkMode, checker.Equals, containertypes.NetworkMode(networkMode), check.Commentf("Mismatched NetworkMode")) } func (s *DockerSuite) TestContainerApiCreateWithCpuSharesCpuset(c *check.C) { testRequires(c, DaemonIsLinux) config := map[string]interface{}{ "Image": "busybox", "CpuShares": 512, "CpusetCpus": "0", } status, body, err := sockRequest("POST", "/containers/create", config) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusCreated) var container types.ContainerCreateResponse c.Assert(json.Unmarshal(body, &container), checker.IsNil) status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusOK) var containerJSON types.ContainerJSON c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) out, err := inspectField(containerJSON.ID, "HostConfig.CpuShares") c.Assert(err, checker.IsNil) c.Assert(out, checker.Equals, "512") outCpuset, errCpuset := inspectField(containerJSON.ID, "HostConfig.CpusetCpus") c.Assert(errCpuset, checker.IsNil, check.Commentf("Output: %s", outCpuset)) c.Assert(outCpuset, checker.Equals, "0") } func (s *DockerSuite) TestContainerApiVerifyHeader(c *check.C) { testRequires(c, DaemonIsLinux) config := map[string]interface{}{ "Image": "busybox", } create := func(ct string) (*http.Response, io.ReadCloser, error) { jsonData := bytes.NewBuffer(nil) c.Assert(json.NewEncoder(jsonData).Encode(config), checker.IsNil) return sockRequestRaw("POST", "/containers/create", jsonData, ct) } // Try with no content-type res, body, err := create("") c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) body.Close() // Try with wrong content-type res, body, err = create("application/xml") c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) body.Close() // now application/json res, body, err = create("application/json") c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusCreated) body.Close() } //Issue 14230. daemon should return 500 for invalid port syntax func (s *DockerSuite) TestContainerApiInvalidPortSyntax(c *check.C) { testRequires(c, DaemonIsLinux) config := `{ "Image": "busybox", "HostConfig": { "PortBindings": { "19039;1230": [ {} ] } } }` res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) b, err := readBody(body) c.Assert(err, checker.IsNil) c.Assert(string(b[:]), checker.Contains, "Invalid port") } // Issue 7941 - test to make sure a "null" in JSON is just ignored. // W/o this fix a null in JSON would be parsed into a string var as "null" func (s *DockerSuite) TestContainerApiPostCreateNull(c *check.C) { testRequires(c, DaemonIsLinux) config := `{ "Hostname":"", "Domainname":"", "Memory":0, "MemorySwap":0, "CpuShares":0, "Cpuset":null, "AttachStdin":true, "AttachStdout":true, "AttachStderr":true, "ExposedPorts":{}, "Tty":true, "OpenStdin":true, "StdinOnce":true, "Env":[], "Cmd":"ls", "Image":"busybox", "Volumes":{}, "WorkingDir":"", "Entrypoint":null, "NetworkDisabled":false, "OnBuild":null}` res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusCreated) b, err := readBody(body) c.Assert(err, checker.IsNil) type createResp struct { ID string } var container createResp c.Assert(json.Unmarshal(b, &container), checker.IsNil) out, err := inspectField(container.ID, "HostConfig.CpusetCpus") c.Assert(err, checker.IsNil) c.Assert(out, checker.Equals, "") outMemory, errMemory := inspectField(container.ID, "HostConfig.Memory") c.Assert(outMemory, checker.Equals, "0") c.Assert(errMemory, checker.IsNil) outMemorySwap, errMemorySwap := inspectField(container.ID, "HostConfig.MemorySwap") c.Assert(outMemorySwap, checker.Equals, "0") c.Assert(errMemorySwap, checker.IsNil) } func (s *DockerSuite) TestCreateWithTooLowMemoryLimit(c *check.C) { testRequires(c, DaemonIsLinux) config := `{ "Image": "busybox", "Cmd": "ls", "OpenStdin": true, "CpuShares": 100, "Memory": 524287 }` res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") c.Assert(err, checker.IsNil) b, err2 := readBody(body) c.Assert(err2, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) c.Assert(string(b), checker.Contains, "Minimum memory limit allowed is 4MB") } func (s *DockerSuite) TestStartWithTooLowMemoryLimit(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "create", "busybox") containerID := strings.TrimSpace(out) config := `{ "CpuShares": 100, "Memory": 524287 }` res, body, err := sockRequestRaw("POST", "/containers/"+containerID+"/start", strings.NewReader(config), "application/json") c.Assert(err, checker.IsNil) b, err2 := readBody(body) c.Assert(err2, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) c.Assert(string(b), checker.Contains, "Minimum memory limit allowed is 4MB") } func (s *DockerSuite) TestContainerApiRename(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "--name", "TestContainerApiRename", "-d", "busybox", "sh") containerID := strings.TrimSpace(out) newName := "TestContainerApiRenameNew" statusCode, _, err := sockRequest("POST", "/containers/"+containerID+"/rename?name="+newName, nil) c.Assert(err, checker.IsNil) // 204 No Content is expected, not 200 c.Assert(statusCode, checker.Equals, http.StatusNoContent) name, err := inspectField(containerID, "Name") c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container")) } func (s *DockerSuite) TestContainerApiKill(c *check.C) { testRequires(c, DaemonIsLinux) name := "test-api-kill" dockerCmd(c, "run", "-di", "--name", name, "busybox", "top") status, _, err := sockRequest("POST", "/containers/"+name+"/kill", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusNoContent) state, err := inspectField(name, "State.Running") c.Assert(err, checker.IsNil) c.Assert(state, checker.Equals, "false", check.Commentf("got wrong State from container %s: %q", name, state)) } func (s *DockerSuite) TestContainerApiRestart(c *check.C) { testRequires(c, DaemonIsLinux) name := "test-api-restart" dockerCmd(c, "run", "-di", "--name", name, "busybox", "top") status, _, err := sockRequest("POST", "/containers/"+name+"/restart?t=1", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusNoContent) c.Assert(waitInspect(name, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 5*time.Second), checker.IsNil) } func (s *DockerSuite) TestContainerApiRestartNotimeoutParam(c *check.C) { testRequires(c, DaemonIsLinux) name := "test-api-restart-no-timeout-param" out, _ := dockerCmd(c, "run", "-di", "--name", name, "busybox", "top") id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) status, _, err := sockRequest("POST", "/containers/"+name+"/restart", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusNoContent) c.Assert(waitInspect(name, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 5*time.Second), checker.IsNil) } func (s *DockerSuite) TestContainerApiStart(c *check.C) { testRequires(c, DaemonIsLinux) name := "testing-start" config := map[string]interface{}{ "Image": "busybox", "Cmd": []string{"/bin/sh", "-c", "/bin/top"}, "OpenStdin": true, } status, _, err := sockRequest("POST", "/containers/create?name="+name, config) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusCreated) conf := make(map[string]interface{}) status, _, err = sockRequest("POST", "/containers/"+name+"/start", conf) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusNoContent) // second call to start should give 304 status, _, err = sockRequest("POST", "/containers/"+name+"/start", conf) c.Assert(err, checker.IsNil) // TODO(tibor): figure out why this doesn't work on windows if isLocalDaemon { c.Assert(status, checker.Equals, http.StatusNotModified) } } func (s *DockerSuite) TestContainerApiStop(c *check.C) { testRequires(c, DaemonIsLinux) name := "test-api-stop" dockerCmd(c, "run", "-di", "--name", name, "busybox", "top") status, _, err := sockRequest("POST", "/containers/"+name+"/stop?t=1", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusNoContent) c.Assert(waitInspect(name, "{{ .State.Running }}", "false", 5*time.Second), checker.IsNil) // second call to start should give 304 status, _, err = sockRequest("POST", "/containers/"+name+"/stop?t=1", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusNotModified) } func (s *DockerSuite) TestContainerApiWait(c *check.C) { testRequires(c, DaemonIsLinux) name := "test-api-wait" dockerCmd(c, "run", "--name", name, "busybox", "sleep", "5") status, body, err := sockRequest("POST", "/containers/"+name+"/wait", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusOK) c.Assert(waitInspect(name, "{{ .State.Running }}", "false", 5*time.Second), checker.IsNil) var waitres types.ContainerWaitResponse c.Assert(json.Unmarshal(body, &waitres), checker.IsNil) c.Assert(waitres.StatusCode, checker.Equals, 0) } func (s *DockerSuite) TestContainerApiCopy(c *check.C) { testRequires(c, DaemonIsLinux) name := "test-container-api-copy" dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test.txt") postData := types.CopyConfig{ Resource: "/test.txt", } status, body, err := sockRequest("POST", "/containers/"+name+"/copy", postData) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusOK) found := false for tarReader := tar.NewReader(bytes.NewReader(body)); ; { h, err := tarReader.Next() if err != nil { if err == io.EOF { break } c.Fatal(err) } if h.Name == "test.txt" { found = true break } } c.Assert(found, checker.True) } func (s *DockerSuite) TestContainerApiCopyResourcePathEmpty(c *check.C) { testRequires(c, DaemonIsLinux) name := "test-container-api-copy-resource-empty" dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test.txt") postData := types.CopyConfig{ Resource: "", } status, body, err := sockRequest("POST", "/containers/"+name+"/copy", postData) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusInternalServerError) c.Assert(string(body), checker.Matches, "Path cannot be empty\n") } func (s *DockerSuite) TestContainerApiCopyResourcePathNotFound(c *check.C) { testRequires(c, DaemonIsLinux) name := "test-container-api-copy-resource-not-found" dockerCmd(c, "run", "--name", name, "busybox") postData := types.CopyConfig{ Resource: "/notexist", } status, body, err := sockRequest("POST", "/containers/"+name+"/copy", postData) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusInternalServerError) c.Assert(string(body), checker.Matches, "Could not find the file /notexist in container "+name+"\n") } func (s *DockerSuite) TestContainerApiCopyContainerNotFound(c *check.C) { postData := types.CopyConfig{ Resource: "/something", } status, _, err := sockRequest("POST", "/containers/notexists/copy", postData) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusNotFound) } func (s *DockerSuite) TestContainerApiDelete(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) dockerCmd(c, "stop", id) status, _, err := sockRequest("DELETE", "/containers/"+id, nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusNoContent) } func (s *DockerSuite) TestContainerApiDeleteNotExist(c *check.C) { status, body, err := sockRequest("DELETE", "/containers/doesnotexist", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusNotFound) c.Assert(string(body), checker.Matches, "No such container: doesnotexist\n") } func (s *DockerSuite) TestContainerApiDeleteForce(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) status, _, err := sockRequest("DELETE", "/containers/"+id+"?force=1", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusNoContent) } func (s *DockerSuite) TestContainerApiDeleteRemoveLinks(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "--name", "tlink1", "busybox", "top") id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) out, _ = dockerCmd(c, "run", "--link", "tlink1:tlink1", "--name", "tlink2", "-d", "busybox", "top") id2 := strings.TrimSpace(out) c.Assert(waitRun(id2), checker.IsNil) links, err := inspectFieldJSON(id2, "HostConfig.Links") c.Assert(err, checker.IsNil) c.Assert(links, checker.Equals, "[\"/tlink1:/tlink2/tlink1\"]", check.Commentf("expected to have links between containers")) status, b, err := sockRequest("DELETE", "/containers/tlink2/tlink1?link=1", nil) c.Assert(err, check.IsNil) c.Assert(status, check.Equals, http.StatusNoContent, check.Commentf(string(b))) linksPostRm, err := inspectFieldJSON(id2, "HostConfig.Links") c.Assert(err, checker.IsNil) c.Assert(linksPostRm, checker.Equals, "null", check.Commentf("call to api deleteContainer links should have removed the specified links")) } func (s *DockerSuite) TestContainerApiDeleteConflict(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) status, _, err := sockRequest("DELETE", "/containers/"+id, nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusConflict) } func (s *DockerSuite) TestContainerApiDeleteRemoveVolume(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, SameHostDaemon) out, _ := dockerCmd(c, "run", "-d", "-v", "/testvolume", "busybox", "top") id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) source, err := inspectMountSourceField(id, "/testvolume") _, err = os.Stat(source) c.Assert(err, checker.IsNil) status, _, err := sockRequest("DELETE", "/containers/"+id+"?v=1&force=1", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusNoContent) _, err = os.Stat(source) c.Assert(os.IsNotExist(err), checker.True, check.Commentf("expected to get ErrNotExist error, got %v", err)) } // Regression test for https://github.com/docker/docker/issues/6231 func (s *DockerSuite) TestContainersApiChunkedEncoding(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "create", "-v", "/foo", "busybox", "true") id := strings.TrimSpace(out) conn, err := sockConn(time.Duration(10 * time.Second)) c.Assert(err, checker.IsNil) client := httputil.NewClientConn(conn, nil) defer client.Close() bindCfg := strings.NewReader(`{"Binds": ["/tmp:/foo"]}`) req, err := http.NewRequest("POST", "/containers/"+id+"/start", bindCfg) c.Assert(err, checker.IsNil) req.Header.Set("Content-Type", "application/json") // This is a cheat to make the http request do chunked encoding // Otherwise (just setting the Content-Encoding to chunked) net/http will overwrite // https://golang.org/src/pkg/net/http/request.go?s=11980:12172 req.ContentLength = -1 resp, err := client.Do(req) c.Assert(err, checker.IsNil, check.Commentf("error starting container with chunked encoding")) resp.Body.Close() c.Assert(resp.StatusCode, checker.Equals, 204) out, err = inspectFieldJSON(id, "HostConfig.Binds") c.Assert(err, checker.IsNil) var binds []string c.Assert(json.NewDecoder(strings.NewReader(out)).Decode(&binds), checker.IsNil) c.Assert(binds, checker.HasLen, 1, check.Commentf("Got unexpected binds: %v", binds)) expected := "/tmp:/foo" c.Assert(binds[0], checker.Equals, expected, check.Commentf("got incorrect bind spec")) } func (s *DockerSuite) TestPostContainerStop(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") containerID := strings.TrimSpace(out) c.Assert(waitRun(containerID), checker.IsNil) statusCode, _, err := sockRequest("POST", "/containers/"+containerID+"/stop", nil) c.Assert(err, checker.IsNil) // 204 No Content is expected, not 200 c.Assert(statusCode, checker.Equals, http.StatusNoContent) c.Assert(waitInspect(containerID, "{{ .State.Running }}", "false", 5*time.Second), checker.IsNil) } // #14170 func (s *DockerSuite) TestPostContainersCreateWithStringOrSliceEntrypoint(c *check.C) { testRequires(c, DaemonIsLinux) config := struct { Image string Entrypoint string Cmd []string }{"busybox", "echo", []string{"hello", "world"}} _, _, err := sockRequest("POST", "/containers/create?name=echotest", config) c.Assert(err, checker.IsNil) out, _ := dockerCmd(c, "start", "-a", "echotest") c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") config2 := struct { Image string Entrypoint []string Cmd []string }{"busybox", []string{"echo"}, []string{"hello", "world"}} _, _, err = sockRequest("POST", "/containers/create?name=echotest2", config2) c.Assert(err, checker.IsNil) out, _ = dockerCmd(c, "start", "-a", "echotest2") c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") } // #14170 func (s *DockerSuite) TestPostContainersCreateWithStringOrSliceCmd(c *check.C) { testRequires(c, DaemonIsLinux) config := struct { Image string Entrypoint string Cmd string }{"busybox", "echo", "hello world"} _, _, err := sockRequest("POST", "/containers/create?name=echotest", config) c.Assert(err, checker.IsNil) out, _ := dockerCmd(c, "start", "-a", "echotest") c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") config2 := struct { Image string Cmd []string }{"busybox", []string{"echo", "hello", "world"}} _, _, err = sockRequest("POST", "/containers/create?name=echotest2", config2) c.Assert(err, checker.IsNil) out, _ = dockerCmd(c, "start", "-a", "echotest2") c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") } // regression #14318 func (s *DockerSuite) TestPostContainersCreateWithStringOrSliceCapAddDrop(c *check.C) { testRequires(c, DaemonIsLinux) config := struct { Image string CapAdd string CapDrop string }{"busybox", "NET_ADMIN", "SYS_ADMIN"} status, _, err := sockRequest("POST", "/containers/create?name=capaddtest0", config) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusCreated) config2 := struct { Image string CapAdd []string CapDrop []string }{"busybox", []string{"NET_ADMIN", "SYS_ADMIN"}, []string{"SETGID"}} status, _, err = sockRequest("POST", "/containers/create?name=capaddtest1", config2) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusCreated) } // #14640 func (s *DockerSuite) TestPostContainersStartWithoutLinksInHostConfig(c *check.C) { testRequires(c, DaemonIsLinux) name := "test-host-config-links" dockerCmd(c, "create", "--name", name, "busybox", "top") hc, err := inspectFieldJSON(name, "HostConfig") c.Assert(err, checker.IsNil) config := `{"HostConfig":` + hc + `}` res, b, err := sockRequestRaw("POST", "/containers/"+name+"/start", strings.NewReader(config), "application/json") c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) b.Close() } // #14640 func (s *DockerSuite) TestPostContainersStartWithLinksInHostConfig(c *check.C) { testRequires(c, DaemonIsLinux) name := "test-host-config-links" dockerCmd(c, "run", "--name", "foo", "-d", "busybox", "top") dockerCmd(c, "create", "--name", name, "--link", "foo:bar", "busybox", "top") hc, err := inspectFieldJSON(name, "HostConfig") c.Assert(err, checker.IsNil) config := `{"HostConfig":` + hc + `}` res, b, err := sockRequestRaw("POST", "/containers/"+name+"/start", strings.NewReader(config), "application/json") c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) b.Close() } // #14640 func (s *DockerSuite) TestPostContainersStartWithLinksInHostConfigIdLinked(c *check.C) { testRequires(c, DaemonIsLinux) name := "test-host-config-links" out, _ := dockerCmd(c, "run", "--name", "link0", "-d", "busybox", "top") id := strings.TrimSpace(out) dockerCmd(c, "create", "--name", name, "--link", id, "busybox", "top") hc, err := inspectFieldJSON(name, "HostConfig") c.Assert(err, checker.IsNil) config := `{"HostConfig":` + hc + `}` res, b, err := sockRequestRaw("POST", "/containers/"+name+"/start", strings.NewReader(config), "application/json") c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) b.Close() } // #14915 func (s *DockerSuite) TestContainersApiCreateNoHostConfig118(c *check.C) { testRequires(c, DaemonIsLinux) config := struct { Image string }{"busybox"} status, _, err := sockRequest("POST", "/v1.18/containers/create", config) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusCreated) } // Ensure an error occurs when you have a container read-only rootfs but you // extract an archive to a symlink in a writable volume which points to a // directory outside of the volume. func (s *DockerSuite) TestPutContainerArchiveErrSymlinkInVolumeToReadOnlyRootfs(c *check.C) { // Requires local volume mount bind. // --read-only + userns has remount issues testRequires(c, SameHostDaemon, NotUserNamespace, DaemonIsLinux) testVol := getTestDir(c, "test-put-container-archive-err-symlink-in-volume-to-read-only-rootfs-") defer os.RemoveAll(testVol) makeTestContentInDir(c, testVol) cID := makeTestContainer(c, testContainerOptions{ readOnly: true, volumes: defaultVolumes(testVol), // Our bind mount is at /vol2 }) defer deleteContainer(cID) // Attempt to extract to a symlink in the volume which points to a // directory outside the volume. This should cause an error because the // rootfs is read-only. query := make(url.Values, 1) query.Set("path", "/vol2/symlinkToAbsDir") urlPath := fmt.Sprintf("/v1.20/containers/%s/archive?%s", cID, query.Encode()) statusCode, body, err := sockRequest("PUT", urlPath, nil) c.Assert(err, checker.IsNil) if !isCpCannotCopyReadOnly(fmt.Errorf(string(body))) { c.Fatalf("expected ErrContainerRootfsReadonly error, but got %d: %s", statusCode, string(body)) } } func (s *DockerSuite) TestContainersApiGetContainersJSONEmpty(c *check.C) { testRequires(c, DaemonIsLinux) status, body, err := sockRequest("GET", "/containers/json?all=1", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusOK) c.Assert(string(body), checker.Equals, "[]\n") } func (s *DockerSuite) TestPostContainersCreateWithWrongCpusetValues(c *check.C) { testRequires(c, DaemonIsLinux) c1 := struct { Image string CpusetCpus string }{"busybox", "1-42,,"} name := "wrong-cpuset-cpus" status, body, err := sockRequest("POST", "/containers/create?name="+name, c1) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusInternalServerError) expected := "Invalid value 1-42,, for cpuset cpus.\n" c.Assert(string(body), checker.Equals, expected) c2 := struct { Image string CpusetMems string }{"busybox", "42-3,1--"} name = "wrong-cpuset-mems" status, body, err = sockRequest("POST", "/containers/create?name="+name, c2) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusInternalServerError) expected = "Invalid value 42-3,1-- for cpuset mems.\n" c.Assert(string(body), checker.Equals, expected) } func (s *DockerSuite) TestStartWithNilDNS(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "create", "busybox") containerID := strings.TrimSpace(out) config := `{"HostConfig": {"Dns": null}}` res, b, err := sockRequestRaw("POST", "/containers/"+containerID+"/start", strings.NewReader(config), "application/json") c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) b.Close() dns, err := inspectFieldJSON(containerID, "HostConfig.Dns") c.Assert(err, checker.IsNil) c.Assert(dns, checker.Equals, "[]") } func (s *DockerSuite) TestPostContainersCreateShmSizeNegative(c *check.C) { testRequires(c, DaemonIsLinux) config := map[string]interface{}{ "Image": "busybox", "HostConfig": map[string]interface{}{"ShmSize": -1}, } status, body, err := sockRequest("POST", "/containers/create", config) c.Assert(err, check.IsNil) c.Assert(status, check.Equals, http.StatusInternalServerError) c.Assert(string(body), checker.Contains, "SHM size must be greater then 0") } func (s *DockerSuite) TestPostContainersCreateShmSizeHostConfigOmitted(c *check.C) { testRequires(c, DaemonIsLinux) var defaultSHMSize int64 = 67108864 config := map[string]interface{}{ "Image": "busybox", "Cmd": "mount", } status, body, err := sockRequest("POST", "/containers/create", config) c.Assert(err, check.IsNil) c.Assert(status, check.Equals, http.StatusCreated) var container types.ContainerCreateResponse c.Assert(json.Unmarshal(body, &container), check.IsNil) status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) c.Assert(err, check.IsNil) c.Assert(status, check.Equals, http.StatusOK) var containerJSON types.ContainerJSON c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) c.Assert(containerJSON.HostConfig.ShmSize, check.Equals, defaultSHMSize) out, _ := dockerCmd(c, "start", "-i", containerJSON.ID) shmRegexp := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=65536k`) if !shmRegexp.MatchString(out) { c.Fatalf("Expected shm of 64MB in mount command, got %v", out) } } func (s *DockerSuite) TestPostContainersCreateShmSizeOmitted(c *check.C) { testRequires(c, DaemonIsLinux) config := map[string]interface{}{ "Image": "busybox", "HostConfig": map[string]interface{}{}, "Cmd": "mount", } status, body, err := sockRequest("POST", "/containers/create", config) c.Assert(err, check.IsNil) c.Assert(status, check.Equals, http.StatusCreated) var container types.ContainerCreateResponse c.Assert(json.Unmarshal(body, &container), check.IsNil) status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) c.Assert(err, check.IsNil) c.Assert(status, check.Equals, http.StatusOK) var containerJSON types.ContainerJSON c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) c.Assert(containerJSON.HostConfig.ShmSize, check.Equals, int64(67108864)) out, _ := dockerCmd(c, "start", "-i", containerJSON.ID) shmRegexp := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=65536k`) if !shmRegexp.MatchString(out) { c.Fatalf("Expected shm of 64MB in mount command, got %v", out) } } func (s *DockerSuite) TestPostContainersCreateWithShmSize(c *check.C) { testRequires(c, DaemonIsLinux) config := map[string]interface{}{ "Image": "busybox", "Cmd": "mount", "HostConfig": map[string]interface{}{"ShmSize": 1073741824}, } status, body, err := sockRequest("POST", "/containers/create", config) c.Assert(err, check.IsNil) c.Assert(status, check.Equals, http.StatusCreated) var container types.ContainerCreateResponse c.Assert(json.Unmarshal(body, &container), check.IsNil) status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) c.Assert(err, check.IsNil) c.Assert(status, check.Equals, http.StatusOK) var containerJSON types.ContainerJSON c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) c.Assert(containerJSON.HostConfig.ShmSize, check.Equals, int64(1073741824)) out, _ := dockerCmd(c, "start", "-i", containerJSON.ID) shmRegex := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=1048576k`) if !shmRegex.MatchString(out) { c.Fatalf("Expected shm of 1GB in mount command, got %v", out) } } func (s *DockerSuite) TestPostContainersCreateMemorySwappinessHostConfigOmitted(c *check.C) { testRequires(c, DaemonIsLinux) config := map[string]interface{}{ "Image": "busybox", } status, body, err := sockRequest("POST", "/containers/create", config) c.Assert(err, check.IsNil) c.Assert(status, check.Equals, http.StatusCreated) var container types.ContainerCreateResponse c.Assert(json.Unmarshal(body, &container), check.IsNil) status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) c.Assert(err, check.IsNil) c.Assert(status, check.Equals, http.StatusOK) var containerJSON types.ContainerJSON c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) c.Assert(*containerJSON.HostConfig.MemorySwappiness, check.Equals, int64(-1)) } // check validation is done daemon side and not only in cli func (s *DockerSuite) TestPostContainersCreateWithOomScoreAdjInvalidRange(c *check.C) { testRequires(c, DaemonIsLinux) config := struct { Image string OomScoreAdj int }{"busybox", 1001} name := "oomscoreadj-over" status, b, err := sockRequest("POST", "/containers/create?name="+name, config) c.Assert(err, check.IsNil) c.Assert(status, check.Equals, http.StatusInternalServerError) expected := "Invalid value 1001, range for oom score adj is [-1000, 1000]." if !strings.Contains(string(b), expected) { c.Fatalf("Expected output to contain %q, got %q", expected, string(b)) } config = struct { Image string OomScoreAdj int }{"busybox", -1001} name = "oomscoreadj-low" status, b, err = sockRequest("POST", "/containers/create?name="+name, config) c.Assert(err, check.IsNil) c.Assert(status, check.Equals, http.StatusInternalServerError) expected = "Invalid value -1001, range for oom score adj is [-1000, 1000]." if !strings.Contains(string(b), expected) { c.Fatalf("Expected output to contain %q, got %q", expected, string(b)) } } docker-1.10.3/integration-cli/docker_api_create_test.go000066400000000000000000000026561267010174400231570ustar00rootroot00000000000000package main import ( "net/http" "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (s *DockerSuite) TestApiCreateWithNotExistImage(c *check.C) { name := "test" config := map[string]interface{}{ "Image": "test456:v1", "Volumes": map[string]struct{}{"/tmp": {}}, } status, resp, err := sockRequest("POST", "/containers/create?name="+name, config) c.Assert(err, check.IsNil) c.Assert(status, check.Equals, http.StatusNotFound) expected := "No such image: test456:v1" c.Assert(strings.TrimSpace(string(resp)), checker.Contains, expected) config2 := map[string]interface{}{ "Image": "test456", "Volumes": map[string]struct{}{"/tmp": {}}, } status, resp, err = sockRequest("POST", "/containers/create?name="+name, config2) c.Assert(err, check.IsNil) c.Assert(status, check.Equals, http.StatusNotFound) expected = "No such image: test456:latest" c.Assert(strings.TrimSpace(string(resp)), checker.Equals, expected) config3 := map[string]interface{}{ "Image": "sha256:0cb40641836c461bc97c793971d84d758371ed682042457523e4ae701efeaaaa", } status, resp, err = sockRequest("POST", "/containers/create?name="+name, config3) c.Assert(err, check.IsNil) c.Assert(status, check.Equals, http.StatusNotFound) expected = "No such image: sha256:0cb40641836c461bc97c793971d84d758371ed682042457523e4ae701efeaaaa" c.Assert(strings.TrimSpace(string(resp)), checker.Equals, expected) } docker-1.10.3/integration-cli/docker_api_events_test.go000066400000000000000000000034251267010174400232130ustar00rootroot00000000000000package main import ( "encoding/json" "io" "net/http" "net/url" "strconv" "strings" "time" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/pkg/jsonmessage" "github.com/go-check/check" ) func (s *DockerSuite) TestEventsApiEmptyOutput(c *check.C) { type apiResp struct { resp *http.Response err error } chResp := make(chan *apiResp) go func() { resp, body, err := sockRequestRaw("GET", "/events", nil, "") body.Close() chResp <- &apiResp{resp, err} }() select { case r := <-chResp: c.Assert(r.err, checker.IsNil) c.Assert(r.resp.StatusCode, checker.Equals, http.StatusOK) case <-time.After(3 * time.Second): c.Fatal("timeout waiting for events api to respond, should have responded immediately") } } func (s *DockerSuite) TestEventsApiBackwardsCompatible(c *check.C) { since := daemonTime(c).Unix() ts := strconv.FormatInt(since, 10) out, _ := dockerCmd(c, "run", "--name=foo", "-d", "busybox", "top") containerID := strings.TrimSpace(out) c.Assert(waitRun(containerID), checker.IsNil) q := url.Values{} q.Set("since", ts) _, body, err := sockRequestRaw("GET", "/events?"+q.Encode(), nil, "") c.Assert(err, checker.IsNil) defer body.Close() dec := json.NewDecoder(body) var containerCreateEvent *jsonmessage.JSONMessage for { var event jsonmessage.JSONMessage if err := dec.Decode(&event); err != nil { if err == io.EOF { break } c.Fatal(err) } if event.Status == "create" && event.ID == containerID { containerCreateEvent = &event break } } c.Assert(containerCreateEvent, checker.Not(checker.IsNil)) c.Assert(containerCreateEvent.Status, checker.Equals, "create") c.Assert(containerCreateEvent.ID, checker.Equals, containerID) c.Assert(containerCreateEvent.From, checker.Equals, "busybox") } docker-1.10.3/integration-cli/docker_api_exec_resize_test.go000066400000000000000000000053271267010174400242170ustar00rootroot00000000000000package main import ( "bytes" "encoding/json" "fmt" "io" "net/http" "strings" "sync" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (s *DockerSuite) TestExecResizeApiHeightWidthNoInt(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") cleanedContainerID := strings.TrimSpace(out) endpoint := "/exec/" + cleanedContainerID + "/resize?h=foo&w=bar" status, _, err := sockRequest("POST", endpoint, nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusInternalServerError) } // Part of #14845 func (s *DockerSuite) TestExecResizeImmediatelyAfterExecStart(c *check.C) { testRequires(c, DaemonIsLinux) name := "exec_resize_test" dockerCmd(c, "run", "-d", "-i", "-t", "--name", name, "--restart", "always", "busybox", "/bin/sh") testExecResize := func() error { data := map[string]interface{}{ "AttachStdin": true, "Cmd": []string{"/bin/sh"}, } uri := fmt.Sprintf("/containers/%s/exec", name) status, body, err := sockRequest("POST", uri, data) if err != nil { return err } if status != http.StatusCreated { return fmt.Errorf("POST %s is expected to return %d, got %d", uri, http.StatusCreated, status) } out := map[string]string{} err = json.Unmarshal(body, &out) if err != nil { return fmt.Errorf("ExecCreate returned invalid json. Error: %q", err.Error()) } execID := out["Id"] if len(execID) < 1 { return fmt.Errorf("ExecCreate got invalid execID") } payload := bytes.NewBufferString(`{"Tty":true}`) conn, _, err := sockRequestHijack("POST", fmt.Sprintf("/exec/%s/start", execID), payload, "application/json") if err != nil { return fmt.Errorf("Failed to start the exec: %q", err.Error()) } defer conn.Close() _, rc, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/resize?h=24&w=80", execID), nil, "text/plain") // It's probably a panic of the daemon if io.ErrUnexpectedEOF is returned. if err == io.ErrUnexpectedEOF { return fmt.Errorf("The daemon might have crashed.") } if err == nil { rc.Close() } // We only interested in the io.ErrUnexpectedEOF error, so we return nil otherwise. return nil } // The panic happens when daemon.ContainerExecStart is called but the // container.Exec is not called. // Because the panic is not 100% reproducible, we send the requests concurrently // to increase the probability that the problem is triggered. var ( n = 10 ch = make(chan error, n) wg sync.WaitGroup ) for i := 0; i < n; i++ { wg.Add(1) go func() { defer wg.Done() if err := testExecResize(); err != nil { ch <- err } }() } wg.Wait() select { case err := <-ch: c.Fatal(err.Error()) default: } } docker-1.10.3/integration-cli/docker_api_exec_test.go000066400000000000000000000137701267010174400226370ustar00rootroot00000000000000// +build !test_no_exec package main import ( "bytes" "encoding/json" "fmt" "net/http" "strings" "time" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) // Regression test for #9414 func (s *DockerSuite) TestExecApiCreateNoCmd(c *check.C) { testRequires(c, DaemonIsLinux) name := "exec_test" dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") status, body, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": nil}) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusInternalServerError) comment := check.Commentf("Expected message when creating exec command with no Cmd specified") c.Assert(string(body), checker.Contains, "No exec command specified", comment) } func (s *DockerSuite) TestExecApiCreateNoValidContentType(c *check.C) { testRequires(c, DaemonIsLinux) name := "exec_test" dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") jsonData := bytes.NewBuffer(nil) if err := json.NewEncoder(jsonData).Encode(map[string]interface{}{"Cmd": nil}); err != nil { c.Fatalf("Can not encode data to json %s", err) } res, body, err := sockRequestRaw("POST", fmt.Sprintf("/containers/%s/exec", name), jsonData, "text/plain") c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) b, err := readBody(body) c.Assert(err, checker.IsNil) comment := check.Commentf("Expected message when creating exec command with invalid Content-Type specified") c.Assert(string(b), checker.Contains, "Content-Type specified", comment) } func (s *DockerSuite) TestExecApiCreateContainerPaused(c *check.C) { testRequires(c, DaemonIsLinux) name := "exec_create_test" dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") dockerCmd(c, "pause", name) status, body, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": []string{"true"}}) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusConflict) comment := check.Commentf("Expected message when creating exec command with Container s% is paused", name) c.Assert(string(body), checker.Contains, "Container "+name+" is paused, unpause the container before exec", comment) } func (s *DockerSuite) TestExecAPIStart(c *check.C) { testRequires(c, DaemonIsLinux) // Uses pause/unpause but bits may be salvagable to Windows to Windows CI dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") id := createExec(c, "test") startExec(c, id, http.StatusOK) id = createExec(c, "test") dockerCmd(c, "stop", "test") startExec(c, id, http.StatusNotFound) dockerCmd(c, "start", "test") startExec(c, id, http.StatusNotFound) // make sure exec is created before pausing id = createExec(c, "test") dockerCmd(c, "pause", "test") startExec(c, id, http.StatusConflict) dockerCmd(c, "unpause", "test") startExec(c, id, http.StatusOK) } func (s *DockerSuite) TestExecAPIStartBackwardsCompatible(c *check.C) { dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") id := createExec(c, "test") resp, body, err := sockRequestRaw("POST", fmt.Sprintf("/v1.20/exec/%s/start", id), strings.NewReader(`{"Detach": true}`), "text/plain") c.Assert(err, checker.IsNil) b, err := readBody(body) comment := check.Commentf("response body: %s", b) c.Assert(err, checker.IsNil, comment) c.Assert(resp.StatusCode, checker.Equals, http.StatusOK, comment) } // #19362 func (s *DockerSuite) TestExecAPIStartMultipleTimesError(c *check.C) { dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") execID := createExec(c, "test") startExec(c, execID, http.StatusOK) timeout := time.After(10 * time.Second) var execJSON struct{ Running bool } for { select { case <-timeout: c.Fatal("timeout waiting for exec to start") default: } inspectExec(c, execID, &execJSON) if !execJSON.Running { break } } startExec(c, execID, http.StatusConflict) } // #20638 func (s *DockerSuite) TestExecApiStartWithDetach(c *check.C) { name := "foo" dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "top") data := map[string]interface{}{ "cmd": []string{"true"}, "AttachStdin": true, } _, b, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), data) c.Assert(err, checker.IsNil, check.Commentf(string(b))) createResp := struct { ID string `json:"Id"` }{} c.Assert(json.Unmarshal(b, &createResp), checker.IsNil, check.Commentf(string(b))) _, body, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/start", createResp.ID), strings.NewReader(`{"Detach": true}`), "application/json") c.Assert(err, checker.IsNil) b, err = readBody(body) comment := check.Commentf("response body: %s", b) c.Assert(err, checker.IsNil, comment) resp, _, err := sockRequestRaw("GET", "/_ping", nil, "") c.Assert(err, checker.IsNil) if resp.StatusCode != http.StatusOK { c.Fatal("daemon is down, it should alive") } } func createExec(c *check.C, name string) string { _, b, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": []string{"true"}}) c.Assert(err, checker.IsNil, check.Commentf(string(b))) createResp := struct { ID string `json:"Id"` }{} c.Assert(json.Unmarshal(b, &createResp), checker.IsNil, check.Commentf(string(b))) return createResp.ID } func startExec(c *check.C, id string, code int) { resp, body, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/start", id), strings.NewReader(`{"Detach": true}`), "application/json") c.Assert(err, checker.IsNil) b, err := readBody(body) comment := check.Commentf("response body: %s", b) c.Assert(err, checker.IsNil, comment) c.Assert(resp.StatusCode, checker.Equals, code, comment) } func inspectExec(c *check.C, id string, out interface{}) { resp, body, err := sockRequestRaw("GET", fmt.Sprintf("/exec/%s/json", id), nil, "") c.Assert(err, checker.IsNil) defer body.Close() c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) err = json.NewDecoder(body).Decode(out) c.Assert(err, checker.IsNil) } docker-1.10.3/integration-cli/docker_api_images_test.go000066400000000000000000000075741267010174400231650ustar00rootroot00000000000000package main import ( "encoding/json" "net/http" "net/url" "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/engine-api/types" "github.com/go-check/check" ) func (s *DockerSuite) TestApiImagesFilter(c *check.C) { testRequires(c, DaemonIsLinux) name := "utest:tag1" name2 := "utest/docker:tag2" name3 := "utest:5000/docker:tag3" for _, n := range []string{name, name2, name3} { dockerCmd(c, "tag", "busybox", n) } type image types.Image getImages := func(filter string) []image { v := url.Values{} v.Set("filter", filter) status, b, err := sockRequest("GET", "/images/json?"+v.Encode(), nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusOK) var images []image err = json.Unmarshal(b, &images) c.Assert(err, checker.IsNil) return images } //incorrect number of matches returned images := getImages("utest*/*") c.Assert(images[0].RepoTags, checker.HasLen, 2) images = getImages("utest") c.Assert(images[0].RepoTags, checker.HasLen, 1) images = getImages("utest*") c.Assert(images[0].RepoTags, checker.HasLen, 1) images = getImages("*5000*/*") c.Assert(images[0].RepoTags, checker.HasLen, 1) } func (s *DockerSuite) TestApiImagesSaveAndLoad(c *check.C) { testRequires(c, Network) testRequires(c, DaemonIsLinux) out, err := buildImage("saveandload", "FROM hello-world\nENV FOO bar", false) c.Assert(err, checker.IsNil) id := strings.TrimSpace(out) res, body, err := sockRequestRaw("GET", "/images/"+id+"/get", nil, "") c.Assert(err, checker.IsNil) defer body.Close() c.Assert(res.StatusCode, checker.Equals, http.StatusOK) dockerCmd(c, "rmi", id) res, loadBody, err := sockRequestRaw("POST", "/images/load", body, "application/x-tar") c.Assert(err, checker.IsNil) defer loadBody.Close() c.Assert(res.StatusCode, checker.Equals, http.StatusOK) inspectOut, _ := dockerCmd(c, "inspect", "--format='{{ .Id }}'", id) c.Assert(strings.TrimSpace(string(inspectOut)), checker.Equals, id, check.Commentf("load did not work properly")) } func (s *DockerSuite) TestApiImagesDelete(c *check.C) { testRequires(c, Network) testRequires(c, DaemonIsLinux) name := "test-api-images-delete" out, err := buildImage(name, "FROM hello-world\nENV FOO bar", false) c.Assert(err, checker.IsNil) id := strings.TrimSpace(out) dockerCmd(c, "tag", name, "test:tag1") status, _, err := sockRequest("DELETE", "/images/"+id, nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusConflict) status, _, err = sockRequest("DELETE", "/images/test:noexist", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusNotFound) //Status Codes:404 – no such image status, _, err = sockRequest("DELETE", "/images/test:tag1", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusOK) } func (s *DockerSuite) TestApiImagesHistory(c *check.C) { testRequires(c, Network) testRequires(c, DaemonIsLinux) name := "test-api-images-history" out, err := buildImage(name, "FROM hello-world\nENV FOO bar", false) c.Assert(err, checker.IsNil) id := strings.TrimSpace(out) status, body, err := sockRequest("GET", "/images/"+id+"/history", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusOK) var historydata []types.ImageHistory err = json.Unmarshal(body, &historydata) c.Assert(err, checker.IsNil, check.Commentf("Error on unmarshal")) c.Assert(historydata, checker.Not(checker.HasLen), 0) c.Assert(historydata[0].Tags[0], checker.Equals, "test-api-images-history:latest") } // #14846 func (s *DockerSuite) TestApiImagesSearchJSONContentType(c *check.C) { testRequires(c, Network) res, b, err := sockRequestRaw("GET", "/images/search?term=test", nil, "application/json") c.Assert(err, check.IsNil) b.Close() c.Assert(res.StatusCode, checker.Equals, http.StatusOK) c.Assert(res.Header.Get("Content-Type"), checker.Equals, "application/json") } docker-1.10.3/integration-cli/docker_api_info_test.go000066400000000000000000000013761267010174400226450ustar00rootroot00000000000000package main import ( "net/http" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (s *DockerSuite) TestInfoApi(c *check.C) { endpoint := "/info" status, body, err := sockRequest("GET", endpoint, nil) c.Assert(status, checker.Equals, http.StatusOK) c.Assert(err, checker.IsNil) // always shown fields stringsToCheck := []string{ "ID", "Containers", "ContainersRunning", "ContainersPaused", "ContainersStopped", "Images", "ExecutionDriver", "LoggingDriver", "OperatingSystem", "NCPU", "OSType", "Architecture", "MemTotal", "KernelVersion", "Driver", "ServerVersion"} out := string(body) for _, linePrefix := range stringsToCheck { c.Assert(out, checker.Contains, linePrefix) } } docker-1.10.3/integration-cli/docker_api_inspect_test.go000066400000000000000000000137611267010174400233600ustar00rootroot00000000000000package main import ( "encoding/json" "net/http" "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/pkg/stringutils" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/versions/v1p20" "github.com/go-check/check" ) func (s *DockerSuite) TestInspectApiContainerResponse(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "true") cleanedContainerID := strings.TrimSpace(out) keysBase := []string{"Id", "State", "Created", "Path", "Args", "Config", "Image", "NetworkSettings", "ResolvConfPath", "HostnamePath", "HostsPath", "LogPath", "Name", "Driver", "MountLabel", "ProcessLabel", "GraphDriver"} cases := []struct { version string keys []string }{ {"v1.20", append(keysBase, "Mounts")}, {"v1.19", append(keysBase, "Volumes", "VolumesRW")}, } for _, cs := range cases { body := getInspectBody(c, cs.version, cleanedContainerID) var inspectJSON map[string]interface{} err := json.Unmarshal(body, &inspectJSON) c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for version %s", cs.version)) for _, key := range cs.keys { _, ok := inspectJSON[key] c.Check(ok, checker.True, check.Commentf("%s does not exist in response for version %s", key, cs.version)) } //Issue #6830: type not properly converted to JSON/back _, ok := inspectJSON["Path"].(bool) c.Assert(ok, checker.False, check.Commentf("Path of `true` should not be converted to boolean `true` via JSON marshalling")) } } func (s *DockerSuite) TestInspectApiContainerVolumeDriverLegacy(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "true") cleanedContainerID := strings.TrimSpace(out) cases := []string{"v1.19", "v1.20"} for _, version := range cases { body := getInspectBody(c, version, cleanedContainerID) var inspectJSON map[string]interface{} err := json.Unmarshal(body, &inspectJSON) c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for version %s", version)) config, ok := inspectJSON["Config"] c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) cfg := config.(map[string]interface{}) _, ok = cfg["VolumeDriver"] c.Assert(ok, checker.True, check.Commentf("Api version %s expected to include VolumeDriver in 'Config'", version)) } } func (s *DockerSuite) TestInspectApiContainerVolumeDriver(c *check.C) { out, _ := dockerCmd(c, "run", "-d", "busybox", "true") cleanedContainerID := strings.TrimSpace(out) body := getInspectBody(c, "v1.21", cleanedContainerID) var inspectJSON map[string]interface{} err := json.Unmarshal(body, &inspectJSON) c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for version 1.21")) config, ok := inspectJSON["Config"] c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) cfg := config.(map[string]interface{}) _, ok = cfg["VolumeDriver"] c.Assert(ok, checker.False, check.Commentf("Api version 1.21 expected to not include VolumeDriver in 'Config'")) config, ok = inspectJSON["HostConfig"] c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) cfg = config.(map[string]interface{}) _, ok = cfg["VolumeDriver"] c.Assert(ok, checker.True, check.Commentf("Api version 1.21 expected to include VolumeDriver in 'HostConfig'")) } func (s *DockerSuite) TestInspectApiImageResponse(c *check.C) { dockerCmd(c, "tag", "busybox:latest", "busybox:mytag") endpoint := "/images/busybox/json" status, body, err := sockRequest("GET", endpoint, nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusOK) var imageJSON types.ImageInspect err = json.Unmarshal(body, &imageJSON) c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for latest version")) c.Assert(imageJSON.RepoTags, checker.HasLen, 2) c.Assert(stringutils.InSlice(imageJSON.RepoTags, "busybox:latest"), checker.Equals, true) c.Assert(stringutils.InSlice(imageJSON.RepoTags, "busybox:mytag"), checker.Equals, true) } // #17131, #17139, #17173 func (s *DockerSuite) TestInspectApiEmptyFieldsInConfigPre121(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "true") cleanedContainerID := strings.TrimSpace(out) cases := []string{"v1.19", "v1.20"} for _, version := range cases { body := getInspectBody(c, version, cleanedContainerID) var inspectJSON map[string]interface{} err := json.Unmarshal(body, &inspectJSON) c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for version %s", version)) config, ok := inspectJSON["Config"] c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) cfg := config.(map[string]interface{}) for _, f := range []string{"MacAddress", "NetworkDisabled", "ExposedPorts"} { _, ok := cfg[f] c.Check(ok, checker.True, check.Commentf("Api version %s expected to include %s in 'Config'", version, f)) } } } func (s *DockerSuite) TestInspectApiBridgeNetworkSettings120(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") containerID := strings.TrimSpace(out) waitRun(containerID) body := getInspectBody(c, "v1.20", containerID) var inspectJSON v1p20.ContainerJSON err := json.Unmarshal(body, &inspectJSON) c.Assert(err, checker.IsNil) settings := inspectJSON.NetworkSettings c.Assert(settings.IPAddress, checker.Not(checker.HasLen), 0) } func (s *DockerSuite) TestInspectApiBridgeNetworkSettings121(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") containerID := strings.TrimSpace(out) waitRun(containerID) body := getInspectBody(c, "v1.21", containerID) var inspectJSON types.ContainerJSON err := json.Unmarshal(body, &inspectJSON) c.Assert(err, checker.IsNil) settings := inspectJSON.NetworkSettings c.Assert(settings.IPAddress, checker.Not(checker.HasLen), 0) c.Assert(settings.Networks["bridge"], checker.Not(checker.IsNil)) c.Assert(settings.IPAddress, checker.Equals, settings.Networks["bridge"].IPAddress) } docker-1.10.3/integration-cli/docker_api_inspect_unix_test.go000066400000000000000000000020211267010174400244060ustar00rootroot00000000000000// +build !windows package main import ( "encoding/json" "fmt" "net/http" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) // #16665 func (s *DockerSuite) TestInspectApiCpusetInConfigPre120(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, cgroupCpuset) name := "cpusetinconfig-pre120" dockerCmd(c, "run", "--name", name, "--cpuset-cpus", "0", "busybox", "true") status, body, err := sockRequest("GET", fmt.Sprintf("/v1.19/containers/%s/json", name), nil) c.Assert(status, check.Equals, http.StatusOK) c.Assert(err, check.IsNil) var inspectJSON map[string]interface{} err = json.Unmarshal(body, &inspectJSON) c.Assert(err, checker.IsNil, check.Commentf("unable to unmarshal body for version 1.19")) config, ok := inspectJSON["Config"] c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) cfg := config.(map[string]interface{}) _, ok = cfg["Cpuset"] c.Assert(ok, checker.True, check.Commentf("Api version 1.19 expected to include Cpuset in 'Config'")) } docker-1.10.3/integration-cli/docker_api_logs_test.go000066400000000000000000000052451267010174400226550ustar00rootroot00000000000000package main import ( "bufio" "bytes" "fmt" "net/http" "strings" "time" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (s *DockerSuite) TestLogsApiWithStdout(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "-t", "busybox", "/bin/sh", "-c", "while true; do echo hello; sleep 1; done") id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) type logOut struct { out string res *http.Response err error } chLog := make(chan logOut) go func() { res, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1×tamps=1", id), nil, "") if err != nil { chLog <- logOut{"", nil, err} return } defer body.Close() out, err := bufio.NewReader(body).ReadString('\n') if err != nil { chLog <- logOut{"", nil, err} return } chLog <- logOut{strings.TrimSpace(out), res, err} }() select { case l := <-chLog: c.Assert(l.err, checker.IsNil) c.Assert(l.res.StatusCode, checker.Equals, http.StatusOK) if !strings.HasSuffix(l.out, "hello") { c.Fatalf("expected log output to container 'hello', but it does not") } case <-time.After(2 * time.Second): c.Fatal("timeout waiting for logs to exit") } } func (s *DockerSuite) TestLogsApiNoStdoutNorStderr(c *check.C) { testRequires(c, DaemonIsLinux) name := "logs_test" dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") status, body, err := sockRequest("GET", fmt.Sprintf("/containers/%s/logs", name), nil) c.Assert(status, checker.Equals, http.StatusBadRequest) c.Assert(err, checker.IsNil) expected := "Bad parameters: you must choose at least one stream" if !bytes.Contains(body, []byte(expected)) { c.Fatalf("Expected %s, got %s", expected, string(body[:])) } } // Regression test for #12704 func (s *DockerSuite) TestLogsApiFollowEmptyOutput(c *check.C) { testRequires(c, DaemonIsLinux) name := "logs_test" t0 := time.Now() dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "sleep", "10") _, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name), bytes.NewBuffer(nil), "") t1 := time.Now() c.Assert(err, checker.IsNil) body.Close() elapsed := t1.Sub(t0).Seconds() if elapsed > 5.0 { c.Fatalf("HTTP response was not immediate (elapsed %.1fs)", elapsed) } } func (s *DockerSuite) TestLogsAPIContainerNotFound(c *check.C) { name := "nonExistentContainer" resp, _, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name), bytes.NewBuffer(nil), "") c.Assert(err, checker.IsNil) c.Assert(resp.StatusCode, checker.Equals, http.StatusNotFound) } docker-1.10.3/integration-cli/docker_api_network_test.go000066400000000000000000000240341267010174400233770ustar00rootroot00000000000000package main import ( "encoding/json" "fmt" "net" "net/http" "net/url" "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/filters" "github.com/docker/engine-api/types/network" "github.com/go-check/check" ) func (s *DockerSuite) TestApiNetworkGetDefaults(c *check.C) { testRequires(c, DaemonIsLinux) // By default docker daemon creates 3 networks. check if they are present defaults := []string{"bridge", "host", "none"} for _, nn := range defaults { c.Assert(isNetworkAvailable(c, nn), checker.Equals, true) } } func (s *DockerSuite) TestApiNetworkCreateDelete(c *check.C) { testRequires(c, DaemonIsLinux) // Create a network name := "testnetwork" config := types.NetworkCreate{ Name: name, CheckDuplicate: true, } id := createNetwork(c, config, true) c.Assert(isNetworkAvailable(c, name), checker.Equals, true) // delete the network and make sure it is deleted deleteNetwork(c, id, true) c.Assert(isNetworkAvailable(c, name), checker.Equals, false) } func (s *DockerSuite) TestApiNetworkCreateCheckDuplicate(c *check.C) { testRequires(c, DaemonIsLinux) name := "testcheckduplicate" configOnCheck := types.NetworkCreate{ Name: name, CheckDuplicate: true, } configNotCheck := types.NetworkCreate{ Name: name, CheckDuplicate: false, } // Creating a new network first createNetwork(c, configOnCheck, true) c.Assert(isNetworkAvailable(c, name), checker.Equals, true) // Creating another network with same name and CheckDuplicate must fail createNetwork(c, configOnCheck, false) // Creating another network with same name and not CheckDuplicate must succeed createNetwork(c, configNotCheck, true) } func (s *DockerSuite) TestApiNetworkFilter(c *check.C) { testRequires(c, DaemonIsLinux) nr := getNetworkResource(c, getNetworkIDByName(c, "bridge")) c.Assert(nr.Name, checker.Equals, "bridge") } func (s *DockerSuite) TestApiNetworkInspect(c *check.C) { testRequires(c, DaemonIsLinux) // Inspect default bridge network nr := getNetworkResource(c, "bridge") c.Assert(nr.Name, checker.Equals, "bridge") // run a container and attach it to the default bridge network out, _ := dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") containerID := strings.TrimSpace(out) containerIP := findContainerIP(c, "test", "bridge") // inspect default bridge network again and make sure the container is connected nr = getNetworkResource(c, nr.ID) c.Assert(nr.Driver, checker.Equals, "bridge") c.Assert(nr.Scope, checker.Equals, "local") c.Assert(nr.IPAM.Driver, checker.Equals, "default") c.Assert(len(nr.Containers), checker.Equals, 1) c.Assert(nr.Containers[containerID], checker.NotNil) ip, _, err := net.ParseCIDR(nr.Containers[containerID].IPv4Address) c.Assert(err, checker.IsNil) c.Assert(ip.String(), checker.Equals, containerIP) // IPAM configuration inspect ipam := network.IPAM{ Driver: "default", Config: []network.IPAMConfig{{Subnet: "172.28.0.0/16", IPRange: "172.28.5.0/24", Gateway: "172.28.5.254"}}, } config := types.NetworkCreate{ Name: "br0", Driver: "bridge", IPAM: ipam, Options: map[string]string{"foo": "bar", "opts": "dopts"}, } id0 := createNetwork(c, config, true) c.Assert(isNetworkAvailable(c, "br0"), checker.Equals, true) nr = getNetworkResource(c, id0) c.Assert(len(nr.IPAM.Config), checker.Equals, 1) c.Assert(nr.IPAM.Config[0].Subnet, checker.Equals, "172.28.0.0/16") c.Assert(nr.IPAM.Config[0].IPRange, checker.Equals, "172.28.5.0/24") c.Assert(nr.IPAM.Config[0].Gateway, checker.Equals, "172.28.5.254") c.Assert(nr.Options["foo"], checker.Equals, "bar") c.Assert(nr.Options["opts"], checker.Equals, "dopts") // delete the network and make sure it is deleted deleteNetwork(c, id0, true) c.Assert(isNetworkAvailable(c, "br0"), checker.Equals, false) } func (s *DockerSuite) TestApiNetworkConnectDisconnect(c *check.C) { testRequires(c, DaemonIsLinux) // Create test network name := "testnetwork" config := types.NetworkCreate{ Name: name, } id := createNetwork(c, config, true) nr := getNetworkResource(c, id) c.Assert(nr.Name, checker.Equals, name) c.Assert(nr.ID, checker.Equals, id) c.Assert(len(nr.Containers), checker.Equals, 0) // run a container out, _ := dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") containerID := strings.TrimSpace(out) // connect the container to the test network connectNetwork(c, nr.ID, containerID) // inspect the network to make sure container is connected nr = getNetworkResource(c, nr.ID) c.Assert(len(nr.Containers), checker.Equals, 1) c.Assert(nr.Containers[containerID], checker.NotNil) // check if container IP matches network inspect ip, _, err := net.ParseCIDR(nr.Containers[containerID].IPv4Address) c.Assert(err, checker.IsNil) containerIP := findContainerIP(c, "test", "testnetwork") c.Assert(ip.String(), checker.Equals, containerIP) // disconnect container from the network disconnectNetwork(c, nr.ID, containerID) nr = getNetworkResource(c, nr.ID) c.Assert(nr.Name, checker.Equals, name) c.Assert(len(nr.Containers), checker.Equals, 0) // delete the network deleteNetwork(c, nr.ID, true) } func (s *DockerSuite) TestApiNetworkIpamMultipleBridgeNetworks(c *check.C) { testRequires(c, DaemonIsLinux) // test0 bridge network ipam0 := network.IPAM{ Driver: "default", Config: []network.IPAMConfig{{Subnet: "192.178.0.0/16", IPRange: "192.178.128.0/17", Gateway: "192.178.138.100"}}, } config0 := types.NetworkCreate{ Name: "test0", Driver: "bridge", IPAM: ipam0, } id0 := createNetwork(c, config0, true) c.Assert(isNetworkAvailable(c, "test0"), checker.Equals, true) ipam1 := network.IPAM{ Driver: "default", Config: []network.IPAMConfig{{Subnet: "192.178.128.0/17", Gateway: "192.178.128.1"}}, } // test1 bridge network overlaps with test0 config1 := types.NetworkCreate{ Name: "test1", Driver: "bridge", IPAM: ipam1, } createNetwork(c, config1, false) c.Assert(isNetworkAvailable(c, "test1"), checker.Equals, false) ipam2 := network.IPAM{ Driver: "default", Config: []network.IPAMConfig{{Subnet: "192.169.0.0/16", Gateway: "192.169.100.100"}}, } // test2 bridge network does not overlap config2 := types.NetworkCreate{ Name: "test2", Driver: "bridge", IPAM: ipam2, } createNetwork(c, config2, true) c.Assert(isNetworkAvailable(c, "test2"), checker.Equals, true) // remove test0 and retry to create test1 deleteNetwork(c, id0, true) createNetwork(c, config1, true) c.Assert(isNetworkAvailable(c, "test1"), checker.Equals, true) // for networks w/o ipam specified, docker will choose proper non-overlapping subnets createNetwork(c, types.NetworkCreate{Name: "test3"}, true) c.Assert(isNetworkAvailable(c, "test3"), checker.Equals, true) createNetwork(c, types.NetworkCreate{Name: "test4"}, true) c.Assert(isNetworkAvailable(c, "test4"), checker.Equals, true) createNetwork(c, types.NetworkCreate{Name: "test5"}, true) c.Assert(isNetworkAvailable(c, "test5"), checker.Equals, true) for i := 1; i < 6; i++ { deleteNetwork(c, fmt.Sprintf("test%d", i), true) } } func (s *DockerSuite) TestApiCreateDeletePredefinedNetworks(c *check.C) { testRequires(c, DaemonIsLinux) createDeletePredefinedNetwork(c, "bridge") createDeletePredefinedNetwork(c, "none") createDeletePredefinedNetwork(c, "host") } func createDeletePredefinedNetwork(c *check.C, name string) { // Create pre-defined network config := types.NetworkCreate{ Name: name, CheckDuplicate: true, } shouldSucceed := false createNetwork(c, config, shouldSucceed) deleteNetwork(c, name, shouldSucceed) } func isNetworkAvailable(c *check.C, name string) bool { status, body, err := sockRequest("GET", "/networks", nil) c.Assert(status, checker.Equals, http.StatusOK) c.Assert(err, checker.IsNil) nJSON := []types.NetworkResource{} err = json.Unmarshal(body, &nJSON) c.Assert(err, checker.IsNil) for _, n := range nJSON { if n.Name == name { return true } } return false } func getNetworkIDByName(c *check.C, name string) string { var ( v = url.Values{} filterArgs = filters.NewArgs() ) filterArgs.Add("name", name) filterJSON, err := filters.ToParam(filterArgs) c.Assert(err, checker.IsNil) v.Set("filters", filterJSON) status, body, err := sockRequest("GET", "/networks?"+v.Encode(), nil) c.Assert(status, checker.Equals, http.StatusOK) c.Assert(err, checker.IsNil) nJSON := []types.NetworkResource{} err = json.Unmarshal(body, &nJSON) c.Assert(err, checker.IsNil) c.Assert(len(nJSON), checker.Equals, 1) return nJSON[0].ID } func getNetworkResource(c *check.C, id string) *types.NetworkResource { _, obj, err := sockRequest("GET", "/networks/"+id, nil) c.Assert(err, checker.IsNil) nr := types.NetworkResource{} err = json.Unmarshal(obj, &nr) c.Assert(err, checker.IsNil) return &nr } func createNetwork(c *check.C, config types.NetworkCreate, shouldSucceed bool) string { status, resp, err := sockRequest("POST", "/networks/create", config) if !shouldSucceed { c.Assert(status, checker.Not(checker.Equals), http.StatusCreated) return "" } c.Assert(status, checker.Equals, http.StatusCreated) c.Assert(err, checker.IsNil) var nr types.NetworkCreateResponse err = json.Unmarshal(resp, &nr) c.Assert(err, checker.IsNil) return nr.ID } func connectNetwork(c *check.C, nid, cid string) { config := types.NetworkConnect{ Container: cid, } status, _, err := sockRequest("POST", "/networks/"+nid+"/connect", config) c.Assert(status, checker.Equals, http.StatusOK) c.Assert(err, checker.IsNil) } func disconnectNetwork(c *check.C, nid, cid string) { config := types.NetworkConnect{ Container: cid, } status, _, err := sockRequest("POST", "/networks/"+nid+"/disconnect", config) c.Assert(status, checker.Equals, http.StatusOK) c.Assert(err, checker.IsNil) } func deleteNetwork(c *check.C, id string, shouldSucceed bool) { status, _, err := sockRequest("DELETE", "/networks/"+id, nil) if !shouldSucceed { c.Assert(status, checker.Not(checker.Equals), http.StatusOK) return } c.Assert(status, checker.Equals, http.StatusOK) c.Assert(err, checker.IsNil) } docker-1.10.3/integration-cli/docker_api_resize_test.go000066400000000000000000000031221267010174400232020ustar00rootroot00000000000000package main import ( "net/http" "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (s *DockerSuite) TestResizeApiResponse(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") cleanedContainerID := strings.TrimSpace(out) endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" status, _, err := sockRequest("POST", endpoint, nil) c.Assert(status, check.Equals, http.StatusOK) c.Assert(err, check.IsNil) } func (s *DockerSuite) TestResizeApiHeightWidthNoInt(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") cleanedContainerID := strings.TrimSpace(out) endpoint := "/containers/" + cleanedContainerID + "/resize?h=foo&w=bar" status, _, err := sockRequest("POST", endpoint, nil) c.Assert(status, check.Equals, http.StatusInternalServerError) c.Assert(err, check.IsNil) } func (s *DockerSuite) TestResizeApiResponseWhenContainerNotStarted(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "true") cleanedContainerID := strings.TrimSpace(out) // make sure the exited container is not running dockerCmd(c, "wait", cleanedContainerID) endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" status, body, err := sockRequest("POST", endpoint, nil) c.Assert(status, check.Equals, http.StatusInternalServerError) c.Assert(err, check.IsNil) c.Assert(string(body), checker.Contains, "is not running", check.Commentf("resize should fail with message 'Container is not running'")) } docker-1.10.3/integration-cli/docker_api_stats_test.go000066400000000000000000000170311267010174400230430ustar00rootroot00000000000000package main import ( "encoding/json" "fmt" "net/http" "os/exec" "runtime" "strconv" "strings" "time" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/pkg/version" "github.com/docker/engine-api/types" "github.com/go-check/check" ) var expectedNetworkInterfaceStats = strings.Split("rx_bytes rx_dropped rx_errors rx_packets tx_bytes tx_dropped tx_errors tx_packets", " ") func (s *DockerSuite) TestApiStatsNoStreamGetCpu(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true;do echo 'Hello'; usleep 100000; done") id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) resp, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", id), nil, "") c.Assert(err, checker.IsNil) c.Assert(resp.ContentLength, checker.GreaterThan, int64(0), check.Commentf("should not use chunked encoding")) c.Assert(resp.Header.Get("Content-Type"), checker.Equals, "application/json") var v *types.Stats err = json.NewDecoder(body).Decode(&v) c.Assert(err, checker.IsNil) body.Close() var cpuPercent = 0.0 cpuDelta := float64(v.CPUStats.CPUUsage.TotalUsage - v.PreCPUStats.CPUUsage.TotalUsage) systemDelta := float64(v.CPUStats.SystemUsage - v.PreCPUStats.SystemUsage) cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CPUStats.CPUUsage.PercpuUsage)) * 100.0 c.Assert(cpuPercent, check.Not(checker.Equals), 0.0, check.Commentf("docker stats with no-stream get cpu usage failed: was %v", cpuPercent)) } func (s *DockerSuite) TestApiStatsStoppedContainerInGoroutines(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo 1") id := strings.TrimSpace(out) getGoRoutines := func() int { _, body, err := sockRequestRaw("GET", fmt.Sprintf("/info"), nil, "") c.Assert(err, checker.IsNil) info := types.Info{} err = json.NewDecoder(body).Decode(&info) c.Assert(err, checker.IsNil) body.Close() return info.NGoroutines } // When the HTTP connection is closed, the number of goroutines should not increase. routines := getGoRoutines() _, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats", id), nil, "") c.Assert(err, checker.IsNil) body.Close() t := time.After(30 * time.Second) for { select { case <-t: c.Assert(getGoRoutines(), checker.LessOrEqualThan, routines) return default: if n := getGoRoutines(); n <= routines { return } time.Sleep(200 * time.Millisecond) } } } func (s *DockerSuite) TestApiStatsNetworkStats(c *check.C) { testRequires(c, SameHostDaemon) testRequires(c, DaemonIsLinux) // Run container for 30 secs out, _ := dockerCmd(c, "run", "-d", "busybox", "top") id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) // Retrieve the container address contIP := findContainerIP(c, id, "bridge") numPings := 10 var preRxPackets uint64 var preTxPackets uint64 var postRxPackets uint64 var postTxPackets uint64 // Get the container networking stats before and after pinging the container nwStatsPre := getNetworkStats(c, id) for _, v := range nwStatsPre { preRxPackets += v.RxPackets preTxPackets += v.TxPackets } countParam := "-c" if runtime.GOOS == "windows" { countParam = "-n" // Ping count parameter is -n on Windows } pingout, err := exec.Command("ping", contIP, countParam, strconv.Itoa(numPings)).Output() pingouts := string(pingout[:]) c.Assert(err, checker.IsNil) nwStatsPost := getNetworkStats(c, id) for _, v := range nwStatsPost { postRxPackets += v.RxPackets postTxPackets += v.TxPackets } // Verify the stats contain at least the expected number of packets (account for ARP) expRxPkts := 1 + preRxPackets + uint64(numPings) expTxPkts := 1 + preTxPackets + uint64(numPings) c.Assert(postTxPackets, checker.GreaterOrEqualThan, expTxPkts, check.Commentf("Reported less TxPackets than expected. Expected >= %d. Found %d. %s", expTxPkts, postTxPackets, pingouts)) c.Assert(postRxPackets, checker.GreaterOrEqualThan, expRxPkts, check.Commentf("Reported less Txbytes than expected. Expected >= %d. Found %d. %s", expRxPkts, postRxPackets, pingouts)) } func (s *DockerSuite) TestApiStatsNetworkStatsVersioning(c *check.C) { testRequires(c, SameHostDaemon) testRequires(c, DaemonIsLinux) // Run container for 30 secs out, _ := dockerCmd(c, "run", "-d", "busybox", "top") id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) for i := 17; i <= 21; i++ { apiVersion := fmt.Sprintf("v1.%d", i) for _, statsJSONBlob := range getVersionedStats(c, id, 3, apiVersion) { if version.Version(apiVersion).LessThan("v1.21") { c.Assert(jsonBlobHasLTv121NetworkStats(statsJSONBlob), checker.Equals, true, check.Commentf("Stats JSON blob from API %s %#v does not look like a =v1.21 API stats structure", apiVersion, statsJSONBlob)) } } } } func getNetworkStats(c *check.C, id string) map[string]types.NetworkStats { var st *types.StatsJSON _, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", id), nil, "") c.Assert(err, checker.IsNil) err = json.NewDecoder(body).Decode(&st) c.Assert(err, checker.IsNil) body.Close() return st.Networks } // getVersionedNetworkStats returns a slice of numStats stats results for the // container with id id using an API call with version apiVersion. Since the // stats result type differs between API versions, we simply return // []map[string]interface{}. func getVersionedStats(c *check.C, id string, numStats int, apiVersion string) []map[string]interface{} { stats := make([]map[string]interface{}, numStats) requestPath := fmt.Sprintf("/%s/containers/%s/stats?stream=true", apiVersion, id) _, body, err := sockRequestRaw("GET", requestPath, nil, "") c.Assert(err, checker.IsNil) defer body.Close() statsDecoder := json.NewDecoder(body) for i := range stats { err = statsDecoder.Decode(&stats[i]) c.Assert(err, checker.IsNil, check.Commentf("failed to decode %dth stat: %s", i, err)) } return stats } func jsonBlobHasLTv121NetworkStats(blob map[string]interface{}) bool { networkStatsIntfc, ok := blob["network"] if !ok { return false } networkStats, ok := networkStatsIntfc.(map[string]interface{}) if !ok { return false } for _, expectedKey := range expectedNetworkInterfaceStats { if _, ok := networkStats[expectedKey]; !ok { return false } } return true } func jsonBlobHasGTE121NetworkStats(blob map[string]interface{}) bool { networksStatsIntfc, ok := blob["networks"] if !ok { return false } networksStats, ok := networksStatsIntfc.(map[string]interface{}) if !ok { return false } for _, networkInterfaceStatsIntfc := range networksStats { networkInterfaceStats, ok := networkInterfaceStatsIntfc.(map[string]interface{}) if !ok { return false } for _, expectedKey := range expectedNetworkInterfaceStats { if _, ok := networkInterfaceStats[expectedKey]; !ok { return false } } } return true } func (s *DockerSuite) TestApiStatsContainerNotFound(c *check.C) { testRequires(c, DaemonIsLinux) status, _, err := sockRequest("GET", "/containers/nonexistent/stats", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusNotFound) status, _, err = sockRequest("GET", "/containers/nonexistent/stats?stream=0", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusNotFound) } docker-1.10.3/integration-cli/docker_api_test.go000066400000000000000000000061101267010174400216210ustar00rootroot00000000000000package main import ( "net/http" "net/http/httptest" "net/http/httputil" "os/exec" "strconv" "strings" "time" "github.com/docker/docker/api" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (s *DockerSuite) TestApiOptionsRoute(c *check.C) { status, _, err := sockRequest("OPTIONS", "/", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusOK) } func (s *DockerSuite) TestApiGetEnabledCors(c *check.C) { res, body, err := sockRequestRaw("GET", "/version", nil, "") c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusOK) body.Close() // TODO: @runcom incomplete tests, why old integration tests had this headers // and here none of the headers below are in the response? //c.Log(res.Header) //c.Assert(res.Header.Get("Access-Control-Allow-Origin"), check.Equals, "*") //c.Assert(res.Header.Get("Access-Control-Allow-Headers"), check.Equals, "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") } func (s *DockerSuite) TestApiVersionStatusCode(c *check.C) { conn, err := sockConn(time.Duration(10 * time.Second)) c.Assert(err, checker.IsNil) client := httputil.NewClientConn(conn, nil) defer client.Close() req, err := http.NewRequest("GET", "/v999.0/version", nil) c.Assert(err, checker.IsNil) req.Header.Set("User-Agent", "Docker-Client/999.0 (os)") res, err := client.Do(req) c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest) } func (s *DockerSuite) TestApiClientVersionNewerThanServer(c *check.C) { v := strings.Split(api.DefaultVersion.String(), ".") vMinInt, err := strconv.Atoi(v[1]) c.Assert(err, checker.IsNil) vMinInt++ v[1] = strconv.Itoa(vMinInt) version := strings.Join(v, ".") status, body, err := sockRequest("GET", "/v"+version+"/version", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusBadRequest) c.Assert(len(string(body)), check.Not(checker.Equals), 0) // Expected not empty body } func (s *DockerSuite) TestApiClientVersionOldNotSupported(c *check.C) { v := strings.Split(api.MinVersion.String(), ".") vMinInt, err := strconv.Atoi(v[1]) c.Assert(err, checker.IsNil) vMinInt-- v[1] = strconv.Itoa(vMinInt) version := strings.Join(v, ".") status, body, err := sockRequest("GET", "/v"+version+"/version", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusBadRequest) c.Assert(len(string(body)), checker.Not(check.Equals), 0) // Expected not empty body } func (s *DockerSuite) TestApiDockerApiVersion(c *check.C) { var svrVersion string server := httptest.NewServer(http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { url := r.URL.Path svrVersion = url })) defer server.Close() // Test using the env var first cmd := exec.Command(dockerBinary, "-H="+server.URL[7:], "version") cmd.Env = appendBaseEnv(false, "DOCKER_API_VERSION=xxx") out, _, _ := runCommandWithOutput(cmd) c.Assert(svrVersion, check.Equals, "/vxxx/version") if !strings.Contains(out, "API version: xxx") { c.Fatalf("Out didn't have 'xxx' for the API version, had:\n%s", out) } } docker-1.10.3/integration-cli/docker_api_update_unix_test.go000066400000000000000000000025441267010174400242350ustar00rootroot00000000000000// +build !windows package main import ( "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (s *DockerSuite) TestApiUpdateContainer(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, memoryLimitSupport) testRequires(c, swapMemorySupport) name := "apiUpdateContainer" hostConfig := map[string]interface{}{ "Memory": 314572800, "MemorySwap": 524288000, } dockerCmd(c, "run", "-d", "--name", name, "-m", "200M", "busybox", "top") _, _, err := sockRequest("POST", "/containers/"+name+"/update", hostConfig) c.Assert(err, check.IsNil) memory, err := inspectField(name, "HostConfig.Memory") c.Assert(err, check.IsNil) if memory != "314572800" { c.Fatalf("Got the wrong memory value, we got %d, expected 314572800(300M).", memory) } file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" out, _ := dockerCmd(c, "exec", name, "cat", file) c.Assert(strings.TrimSpace(out), checker.Equals, "314572800") memorySwap, err := inspectField(name, "HostConfig.MemorySwap") c.Assert(err, check.IsNil) if memorySwap != "524288000" { c.Fatalf("Got the wrong memorySwap value, we got %d, expected 524288000(500M).", memorySwap) } file = "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes" out, _ = dockerCmd(c, "exec", name, "cat", file) c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") } docker-1.10.3/integration-cli/docker_api_version_test.go000066400000000000000000000011021267010174400233620ustar00rootroot00000000000000package main import ( "encoding/json" "net/http" "github.com/docker/docker/dockerversion" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/engine-api/types" "github.com/go-check/check" ) func (s *DockerSuite) TestGetVersion(c *check.C) { status, body, err := sockRequest("GET", "/version", nil) c.Assert(status, checker.Equals, http.StatusOK) c.Assert(err, checker.IsNil) var v types.Version c.Assert(json.Unmarshal(body, &v), checker.IsNil) c.Assert(v.Version, checker.Equals, dockerversion.Version, check.Commentf("Version mismatch")) } docker-1.10.3/integration-cli/docker_api_volumes_test.go000066400000000000000000000060101267010174400233720ustar00rootroot00000000000000package main import ( "encoding/json" "net/http" "path/filepath" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/engine-api/types" "github.com/go-check/check" ) func (s *DockerSuite) TestVolumesApiList(c *check.C) { prefix := "" if daemonPlatform == "windows" { prefix = "c:" } dockerCmd(c, "run", "-d", "-v", prefix+"/foo", "busybox") status, b, err := sockRequest("GET", "/volumes", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusOK) var volumes types.VolumesListResponse c.Assert(json.Unmarshal(b, &volumes), checker.IsNil) c.Assert(len(volumes.Volumes), checker.Equals, 1, check.Commentf("\n%v", volumes.Volumes)) } func (s *DockerSuite) TestVolumesApiCreate(c *check.C) { config := types.VolumeCreateRequest{ Name: "test", } status, b, err := sockRequest("POST", "/volumes/create", config) c.Assert(err, check.IsNil) c.Assert(status, check.Equals, http.StatusCreated, check.Commentf(string(b))) var vol types.Volume err = json.Unmarshal(b, &vol) c.Assert(err, checker.IsNil) c.Assert(filepath.Base(filepath.Dir(vol.Mountpoint)), checker.Equals, config.Name) } func (s *DockerSuite) TestVolumesApiRemove(c *check.C) { prefix := "" if daemonPlatform == "windows" { prefix = "c:" } dockerCmd(c, "run", "-d", "-v", prefix+"/foo", "--name=test", "busybox") status, b, err := sockRequest("GET", "/volumes", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusOK) var volumes types.VolumesListResponse c.Assert(json.Unmarshal(b, &volumes), checker.IsNil) c.Assert(len(volumes.Volumes), checker.Equals, 1, check.Commentf("\n%v", volumes.Volumes)) v := volumes.Volumes[0] status, _, err = sockRequest("DELETE", "/volumes/"+v.Name, nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusConflict, check.Commentf("Should not be able to remove a volume that is in use")) dockerCmd(c, "rm", "-f", "test") status, data, err := sockRequest("DELETE", "/volumes/"+v.Name, nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusNoContent, check.Commentf(string(data))) } func (s *DockerSuite) TestVolumesApiInspect(c *check.C) { config := types.VolumeCreateRequest{ Name: "test", } status, b, err := sockRequest("POST", "/volumes/create", config) c.Assert(err, check.IsNil) c.Assert(status, check.Equals, http.StatusCreated, check.Commentf(string(b))) status, b, err = sockRequest("GET", "/volumes", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(b))) var volumes types.VolumesListResponse c.Assert(json.Unmarshal(b, &volumes), checker.IsNil) c.Assert(len(volumes.Volumes), checker.Equals, 1, check.Commentf("\n%v", volumes.Volumes)) var vol types.Volume status, b, err = sockRequest("GET", "/volumes/"+config.Name, nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(b))) c.Assert(json.Unmarshal(b, &vol), checker.IsNil) c.Assert(vol.Name, checker.Equals, config.Name) } docker-1.10.3/integration-cli/docker_cli_attach_test.go000066400000000000000000000072701267010174400231530ustar00rootroot00000000000000package main import ( "bufio" "fmt" "io" "os/exec" "strings" "sync" "time" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) const attachWait = 5 * time.Second func (s *DockerSuite) TestAttachMultipleAndRestart(c *check.C) { testRequires(c, DaemonIsLinux) endGroup := &sync.WaitGroup{} startGroup := &sync.WaitGroup{} endGroup.Add(3) startGroup.Add(3) err := waitForContainer("attacher", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 1; echo hello; done") c.Assert(err, check.IsNil) startDone := make(chan struct{}) endDone := make(chan struct{}) go func() { endGroup.Wait() close(endDone) }() go func() { startGroup.Wait() close(startDone) }() for i := 0; i < 3; i++ { go func() { cmd := exec.Command(dockerBinary, "attach", "attacher") defer func() { cmd.Wait() endGroup.Done() }() out, err := cmd.StdoutPipe() if err != nil { c.Fatal(err) } if err := cmd.Start(); err != nil { c.Fatal(err) } buf := make([]byte, 1024) if _, err := out.Read(buf); err != nil && err != io.EOF { c.Fatal(err) } startGroup.Done() if !strings.Contains(string(buf), "hello") { c.Fatalf("unexpected output %s expected hello\n", string(buf)) } }() } select { case <-startDone: case <-time.After(attachWait): c.Fatalf("Attaches did not initialize properly") } dockerCmd(c, "kill", "attacher") select { case <-endDone: case <-time.After(attachWait): c.Fatalf("Attaches did not finish properly") } } func (s *DockerSuite) TestAttachTtyWithoutStdin(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "-ti", "busybox") id := strings.TrimSpace(out) c.Assert(waitRun(id), check.IsNil) done := make(chan error) go func() { defer close(done) cmd := exec.Command(dockerBinary, "attach", id) if _, err := cmd.StdinPipe(); err != nil { done <- err return } expected := "cannot enable tty mode" if out, _, err := runCommandWithOutput(cmd); err == nil { done <- fmt.Errorf("attach should have failed") return } else if !strings.Contains(out, expected) { done <- fmt.Errorf("attach failed with error %q: expected %q", out, expected) return } }() select { case err := <-done: c.Assert(err, check.IsNil) case <-time.After(attachWait): c.Fatal("attach is running but should have failed") } } func (s *DockerSuite) TestAttachDisconnect(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-di", "busybox", "/bin/cat") id := strings.TrimSpace(out) cmd := exec.Command(dockerBinary, "attach", id) stdin, err := cmd.StdinPipe() if err != nil { c.Fatal(err) } defer stdin.Close() stdout, err := cmd.StdoutPipe() c.Assert(err, check.IsNil) defer stdout.Close() c.Assert(cmd.Start(), check.IsNil) defer cmd.Process.Kill() _, err = stdin.Write([]byte("hello\n")) c.Assert(err, check.IsNil) out, err = bufio.NewReader(stdout).ReadString('\n') c.Assert(err, check.IsNil) c.Assert(strings.TrimSpace(out), check.Equals, "hello") c.Assert(stdin.Close(), check.IsNil) // Expect container to still be running after stdin is closed running, err := inspectField(id, "State.Running") c.Assert(err, check.IsNil) c.Assert(running, check.Equals, "true") } func (s *DockerSuite) TestAttachPausedContainer(c *check.C) { testRequires(c, DaemonIsLinux) // Containers cannot be paused on Windows defer unpauseAllContainers() dockerCmd(c, "run", "-d", "--name=test", "busybox", "top") dockerCmd(c, "pause", "test") out, _, err := dockerCmdWithError("attach", "test") c.Assert(err, checker.NotNil, check.Commentf(out)) c.Assert(out, checker.Contains, "You cannot attach to a paused container, unpause it first") } docker-1.10.3/integration-cli/docker_cli_attach_unix_test.go000066400000000000000000000130131267010174400242060ustar00rootroot00000000000000// +build !windows package main import ( "bufio" "os/exec" "strings" "time" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/pkg/stringid" "github.com/go-check/check" "github.com/kr/pty" ) // #9860 Make sure attach ends when container ends (with no errors) func (s *DockerSuite) TestAttachClosedOnContainerStop(c *check.C) { out, _ := dockerCmd(c, "run", "-dti", "busybox", "/bin/sh", "-c", `trap 'exit 0' SIGTERM; while true; do sleep 1; done`) id := strings.TrimSpace(out) c.Assert(waitRun(id), check.IsNil) _, tty, err := pty.Open() c.Assert(err, check.IsNil) attachCmd := exec.Command(dockerBinary, "attach", id) attachCmd.Stdin = tty attachCmd.Stdout = tty attachCmd.Stderr = tty err = attachCmd.Start() c.Assert(err, check.IsNil) errChan := make(chan error) go func() { defer close(errChan) // Container is waiting for us to signal it to stop dockerCmd(c, "stop", id) // And wait for the attach command to end errChan <- attachCmd.Wait() }() // Wait for the docker to end (should be done by the // stop command in the go routine) dockerCmd(c, "wait", id) select { case err := <-errChan: c.Assert(err, check.IsNil) case <-time.After(attachWait): c.Fatal("timed out without attach returning") } } func (s *DockerSuite) TestAttachAfterDetach(c *check.C) { name := "detachtest" cpty, tty, err := pty.Open() c.Assert(err, checker.IsNil, check.Commentf("Could not open pty: %v", err)) cmd := exec.Command(dockerBinary, "run", "-ti", "--name", name, "busybox") cmd.Stdin = tty cmd.Stdout = tty cmd.Stderr = tty errChan := make(chan error) go func() { errChan <- cmd.Run() close(errChan) }() c.Assert(waitRun(name), check.IsNil) cpty.Write([]byte{16}) time.Sleep(100 * time.Millisecond) cpty.Write([]byte{17}) select { case err := <-errChan: c.Assert(err, check.IsNil) case <-time.After(5 * time.Second): c.Fatal("timeout while detaching") } cpty, tty, err = pty.Open() c.Assert(err, checker.IsNil, check.Commentf("Could not open pty: %v", err)) cmd = exec.Command(dockerBinary, "attach", name) cmd.Stdin = tty cmd.Stdout = tty cmd.Stderr = tty err = cmd.Start() c.Assert(err, checker.IsNil) bytes := make([]byte, 10) var nBytes int readErr := make(chan error, 1) go func() { time.Sleep(500 * time.Millisecond) cpty.Write([]byte("\n")) time.Sleep(500 * time.Millisecond) nBytes, err = cpty.Read(bytes) cpty.Close() readErr <- err }() select { case err := <-readErr: c.Assert(err, check.IsNil) case <-time.After(2 * time.Second): c.Fatal("timeout waiting for attach read") } err = cmd.Wait() c.Assert(err, checker.IsNil) c.Assert(string(bytes[:nBytes]), checker.Contains, "/ #") } // TestAttachDetach checks that attach in tty mode can be detached using the long container ID func (s *DockerSuite) TestAttachDetach(c *check.C) { out, _ := dockerCmd(c, "run", "-itd", "busybox", "cat") id := strings.TrimSpace(out) c.Assert(waitRun(id), check.IsNil) cpty, tty, err := pty.Open() c.Assert(err, check.IsNil) defer cpty.Close() cmd := exec.Command(dockerBinary, "attach", id) cmd.Stdin = tty stdout, err := cmd.StdoutPipe() c.Assert(err, check.IsNil) defer stdout.Close() err = cmd.Start() c.Assert(err, check.IsNil) c.Assert(waitRun(id), check.IsNil) _, err = cpty.Write([]byte("hello\n")) c.Assert(err, check.IsNil) out, err = bufio.NewReader(stdout).ReadString('\n') c.Assert(err, check.IsNil) c.Assert(strings.TrimSpace(out), checker.Equals, "hello", check.Commentf("expected 'hello', got %q", out)) // escape sequence _, err = cpty.Write([]byte{16}) c.Assert(err, checker.IsNil) time.Sleep(100 * time.Millisecond) _, err = cpty.Write([]byte{17}) c.Assert(err, checker.IsNil) ch := make(chan struct{}) go func() { cmd.Wait() ch <- struct{}{} }() running, err := inspectField(id, "State.Running") c.Assert(err, checker.IsNil) c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) go func() { dockerCmd(c, "kill", id) }() select { case <-ch: case <-time.After(10 * time.Millisecond): c.Fatal("timed out waiting for container to exit") } } // TestAttachDetachTruncatedID checks that attach in tty mode can be detached func (s *DockerSuite) TestAttachDetachTruncatedID(c *check.C) { out, _ := dockerCmd(c, "run", "-itd", "busybox", "cat") id := stringid.TruncateID(strings.TrimSpace(out)) c.Assert(waitRun(id), check.IsNil) cpty, tty, err := pty.Open() c.Assert(err, checker.IsNil) defer cpty.Close() cmd := exec.Command(dockerBinary, "attach", id) cmd.Stdin = tty stdout, err := cmd.StdoutPipe() c.Assert(err, checker.IsNil) defer stdout.Close() err = cmd.Start() c.Assert(err, checker.IsNil) _, err = cpty.Write([]byte("hello\n")) c.Assert(err, checker.IsNil) out, err = bufio.NewReader(stdout).ReadString('\n') c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Equals, "hello", check.Commentf("expected 'hello', got %q", out)) // escape sequence _, err = cpty.Write([]byte{16}) c.Assert(err, checker.IsNil) time.Sleep(100 * time.Millisecond) _, err = cpty.Write([]byte{17}) c.Assert(err, checker.IsNil) ch := make(chan struct{}) go func() { cmd.Wait() ch <- struct{}{} }() running, err := inspectField(id, "State.Running") c.Assert(err, checker.IsNil) c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) go func() { dockerCmd(c, "kill", id) }() select { case <-ch: case <-time.After(10 * time.Millisecond): c.Fatal("timed out waiting for container to exit") } } docker-1.10.3/integration-cli/docker_cli_authz_unix_test.go000066400000000000000000000253511267010174400241050ustar00rootroot00000000000000// +build !windows package main import ( "encoding/json" "fmt" "io/ioutil" "net/http" "net/http/httptest" "os" "strings" "bufio" "bytes" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/pkg/plugins" "github.com/go-check/check" "os/exec" "strconv" "time" ) const ( testAuthZPlugin = "authzplugin" unauthorizedMessage = "User unauthorized authz plugin" errorMessage = "something went wrong..." containerListAPI = "/containers/json" ) func init() { check.Suite(&DockerAuthzSuite{ ds: &DockerSuite{}, }) } type DockerAuthzSuite struct { server *httptest.Server ds *DockerSuite d *Daemon ctrl *authorizationController } type authorizationController struct { reqRes authorization.Response // reqRes holds the plugin response to the initial client request resRes authorization.Response // resRes holds the plugin response to the daemon response psRequestCnt int // psRequestCnt counts the number of calls to list container request api psResponseCnt int // psResponseCnt counts the number of calls to list containers response API requestsURIs []string // requestsURIs stores all request URIs that are sent to the authorization controller } func (s *DockerAuthzSuite) SetUpTest(c *check.C) { s.d = NewDaemon(c) s.ctrl = &authorizationController{} } func (s *DockerAuthzSuite) TearDownTest(c *check.C) { s.d.Stop() s.ds.TearDownTest(c) s.ctrl = nil } func (s *DockerAuthzSuite) SetUpSuite(c *check.C) { mux := http.NewServeMux() s.server = httptest.NewServer(mux) c.Assert(s.server, check.NotNil, check.Commentf("Failed to start a HTTP Server")) mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { b, err := json.Marshal(plugins.Manifest{Implements: []string{authorization.AuthZApiImplements}}) c.Assert(err, check.IsNil) w.Write(b) }) mux.HandleFunc("/AuthZPlugin.AuthZReq", func(w http.ResponseWriter, r *http.Request) { if s.ctrl.reqRes.Err != "" { w.WriteHeader(http.StatusInternalServerError) } b, err := json.Marshal(s.ctrl.reqRes) c.Assert(err, check.IsNil) w.Write(b) defer r.Body.Close() body, err := ioutil.ReadAll(r.Body) c.Assert(err, check.IsNil) authReq := authorization.Request{} err = json.Unmarshal(body, &authReq) c.Assert(err, check.IsNil) assertBody(c, authReq.RequestURI, authReq.RequestHeaders, authReq.RequestBody) assertAuthHeaders(c, authReq.RequestHeaders) // Count only container list api if strings.HasSuffix(authReq.RequestURI, containerListAPI) { s.ctrl.psRequestCnt++ } s.ctrl.requestsURIs = append(s.ctrl.requestsURIs, authReq.RequestURI) }) mux.HandleFunc("/AuthZPlugin.AuthZRes", func(w http.ResponseWriter, r *http.Request) { if s.ctrl.resRes.Err != "" { w.WriteHeader(http.StatusInternalServerError) } b, err := json.Marshal(s.ctrl.resRes) c.Assert(err, check.IsNil) w.Write(b) defer r.Body.Close() body, err := ioutil.ReadAll(r.Body) c.Assert(err, check.IsNil) authReq := authorization.Request{} err = json.Unmarshal(body, &authReq) c.Assert(err, check.IsNil) assertBody(c, authReq.RequestURI, authReq.ResponseHeaders, authReq.ResponseBody) assertAuthHeaders(c, authReq.ResponseHeaders) // Count only container list api if strings.HasSuffix(authReq.RequestURI, containerListAPI) { s.ctrl.psResponseCnt++ } }) err := os.MkdirAll("/etc/docker/plugins", 0755) c.Assert(err, checker.IsNil) fileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", testAuthZPlugin) err = ioutil.WriteFile(fileName, []byte(s.server.URL), 0644) c.Assert(err, checker.IsNil) } // assertAuthHeaders validates authentication headers are removed func assertAuthHeaders(c *check.C, headers map[string]string) error { for k := range headers { if strings.Contains(strings.ToLower(k), "auth") || strings.Contains(strings.ToLower(k), "x-registry") { c.Errorf("Found authentication headers in request '%v'", headers) } } return nil } // assertBody asserts that body is removed for non text/json requests func assertBody(c *check.C, requestURI string, headers map[string]string, body []byte) { if strings.Contains(strings.ToLower(requestURI), "auth") && len(body) > 0 { //return fmt.Errorf("Body included for authentication endpoint %s", string(body)) c.Errorf("Body included for authentication endpoint %s", string(body)) } for k, v := range headers { if strings.EqualFold(k, "Content-Type") && strings.HasPrefix(v, "text/") || v == "application/json" { return } } if len(body) > 0 { c.Errorf("Body included while it should not (Headers: '%v')", headers) } } func (s *DockerAuthzSuite) TearDownSuite(c *check.C) { if s.server == nil { return } s.server.Close() err := os.RemoveAll("/etc/docker/plugins") c.Assert(err, checker.IsNil) } func (s *DockerAuthzSuite) TestAuthZPluginAllowRequest(c *check.C) { // start the daemon and load busybox, --net=none build fails otherwise // cause it needs to pull busybox c.Assert(s.d.StartWithBusybox(), check.IsNil) // restart the daemon and enable the plugin, otherwise busybox loading // is blocked by the plugin itself c.Assert(s.d.Restart("--authorization-plugin="+testAuthZPlugin), check.IsNil) s.ctrl.reqRes.Allow = true s.ctrl.resRes.Allow = true // Ensure command successful out, err := s.d.Cmd("run", "-d", "busybox", "top") c.Assert(err, check.IsNil) id := strings.TrimSpace(out) assertURIRecorded(c, s.ctrl.requestsURIs, "/containers/create") assertURIRecorded(c, s.ctrl.requestsURIs, fmt.Sprintf("/containers/%s/start", id)) out, err = s.d.Cmd("ps") c.Assert(err, check.IsNil) c.Assert(assertContainerList(out, []string{id}), check.Equals, true) c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) c.Assert(s.ctrl.psResponseCnt, check.Equals, 1) } func (s *DockerAuthzSuite) TestAuthZPluginDenyRequest(c *check.C) { err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) c.Assert(err, check.IsNil) s.ctrl.reqRes.Allow = false s.ctrl.reqRes.Msg = unauthorizedMessage // Ensure command is blocked res, err := s.d.Cmd("ps") c.Assert(err, check.NotNil) c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) c.Assert(s.ctrl.psResponseCnt, check.Equals, 0) // Ensure unauthorized message appears in response c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: authorization denied by plugin %s: %s\n", testAuthZPlugin, unauthorizedMessage)) } func (s *DockerAuthzSuite) TestAuthZPluginDenyResponse(c *check.C) { err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) c.Assert(err, check.IsNil) s.ctrl.reqRes.Allow = true s.ctrl.resRes.Allow = false s.ctrl.resRes.Msg = unauthorizedMessage // Ensure command is blocked res, err := s.d.Cmd("ps") c.Assert(err, check.NotNil) c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) c.Assert(s.ctrl.psResponseCnt, check.Equals, 1) // Ensure unauthorized message appears in response c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: authorization denied by plugin %s: %s\n", testAuthZPlugin, unauthorizedMessage)) } // TestAuthZPluginAllowEventStream verifies event stream propogates correctly after request pass through by the authorization plugin func (s *DockerAuthzSuite) TestAuthZPluginAllowEventStream(c *check.C) { testRequires(c, DaemonIsLinux) // Start the authorization plugin err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) c.Assert(err, check.IsNil) s.ctrl.reqRes.Allow = true s.ctrl.resRes.Allow = true startTime := strconv.FormatInt(daemonTime(c).Unix(), 10) // Add another command to to enable event pipelining eventsCmd := exec.Command(s.d.cmd.Path, "--host", s.d.sock(), "events", "--since", startTime) stdout, err := eventsCmd.StdoutPipe() if err != nil { c.Assert(err, check.IsNil) } observer := eventObserver{ buffer: new(bytes.Buffer), command: eventsCmd, scanner: bufio.NewScanner(stdout), startTime: startTime, } err = observer.Start() c.Assert(err, checker.IsNil) defer observer.Stop() // Create a container and wait for the creation events _, err = s.d.Cmd("pull", "busybox") c.Assert(err, check.IsNil) out, err := s.d.Cmd("run", "-d", "busybox", "top") c.Assert(err, check.IsNil) containerID := strings.TrimSpace(out) events := map[string]chan bool{ "create": make(chan bool), "start": make(chan bool), } matcher := matchEventLine(containerID, "container", events) processor := processEventMatch(events) go observer.Match(matcher, processor) // Ensure all events are received for event, eventChannel := range events { select { case <-time.After(5 * time.Second): // Fail the test observer.CheckEventError(c, containerID, event, matcher) c.FailNow() case <-eventChannel: // Ignore, event received } } // Ensure both events and container endpoints are passed to the authorization plugin assertURIRecorded(c, s.ctrl.requestsURIs, "/events") assertURIRecorded(c, s.ctrl.requestsURIs, "/containers/create") assertURIRecorded(c, s.ctrl.requestsURIs, fmt.Sprintf("/containers/%s/start", containerID)) } func (s *DockerAuthzSuite) TestAuthZPluginErrorResponse(c *check.C) { err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) c.Assert(err, check.IsNil) s.ctrl.reqRes.Allow = true s.ctrl.resRes.Err = errorMessage // Ensure command is blocked res, err := s.d.Cmd("ps") c.Assert(err, check.NotNil) c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: plugin %s failed with error: %s: %s\n", testAuthZPlugin, authorization.AuthZApiResponse, errorMessage)) } func (s *DockerAuthzSuite) TestAuthZPluginErrorRequest(c *check.C) { err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) c.Assert(err, check.IsNil) s.ctrl.reqRes.Err = errorMessage // Ensure command is blocked res, err := s.d.Cmd("ps") c.Assert(err, check.NotNil) c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: plugin %s failed with error: %s: %s\n", testAuthZPlugin, authorization.AuthZApiRequest, errorMessage)) } func (s *DockerAuthzSuite) TestAuthZPluginEnsureNoDuplicatePluginRegistration(c *check.C) { c.Assert(s.d.Start("--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin), check.IsNil) s.ctrl.reqRes.Allow = true s.ctrl.resRes.Allow = true out, err := s.d.Cmd("ps") c.Assert(err, check.IsNil, check.Commentf(out)) // assert plugin is only called once.. c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) c.Assert(s.ctrl.psResponseCnt, check.Equals, 1) } // assertURIRecorded verifies that the given URI was sent and recorded in the authz plugin func assertURIRecorded(c *check.C, uris []string, uri string) { var found bool for _, u := range uris { if strings.Contains(u, uri) { found = true break } } if !found { c.Fatalf("Expected to find URI '%s', recorded uris '%s'", uri, strings.Join(uris, ",")) } } docker-1.10.3/integration-cli/docker_cli_build_test.go000066400000000000000000005324071267010174400230130ustar00rootroot00000000000000package main import ( "archive/tar" "bytes" "encoding/json" "fmt" "io/ioutil" "os" "os/exec" "path/filepath" "reflect" "regexp" "runtime" "strconv" "strings" "text/template" "time" "github.com/docker/docker/builder/dockerfile/command" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/pkg/stringutils" "github.com/go-check/check" ) func (s *DockerSuite) TestBuildJSONEmptyRun(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildjsonemptyrun" _, err := buildImage( name, ` FROM busybox RUN [] `, true) if err != nil { c.Fatal("error when dealing with a RUN statement with empty JSON array") } } func (s *DockerSuite) TestBuildEmptyWhitespace(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildemptywhitespace" _, err := buildImage( name, ` FROM busybox COPY quux \ bar `, true) if err == nil { c.Fatal("no error when dealing with a COPY statement with no content on the same line") } } func (s *DockerSuite) TestBuildShCmdJSONEntrypoint(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildshcmdjsonentrypoint" _, err := buildImage( name, ` FROM busybox ENTRYPOINT ["/bin/echo"] CMD echo test `, true) if err != nil { c.Fatal(err) } out, _ := dockerCmd(c, "run", "--rm", name) if strings.TrimSpace(out) != "/bin/sh -c echo test" { c.Fatalf("CMD did not contain /bin/sh -c : %s", out) } } func (s *DockerSuite) TestBuildEnvironmentReplacementUser(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildenvironmentreplacement" _, err := buildImage(name, ` FROM scratch ENV user foo USER ${user} `, true) if err != nil { c.Fatal(err) } res, err := inspectFieldJSON(name, "Config.User") if err != nil { c.Fatal(err) } if res != `"foo"` { c.Fatal("User foo from environment not in Config.User on image") } } func (s *DockerSuite) TestBuildEnvironmentReplacementVolume(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildenvironmentreplacement" _, err := buildImage(name, ` FROM scratch ENV volume /quux VOLUME ${volume} `, true) if err != nil { c.Fatal(err) } res, err := inspectFieldJSON(name, "Config.Volumes") if err != nil { c.Fatal(err) } var volumes map[string]interface{} if err := json.Unmarshal([]byte(res), &volumes); err != nil { c.Fatal(err) } if _, ok := volumes["/quux"]; !ok { c.Fatal("Volume /quux from environment not in Config.Volumes on image") } } func (s *DockerSuite) TestBuildEnvironmentReplacementExpose(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildenvironmentreplacement" _, err := buildImage(name, ` FROM scratch ENV port 80 EXPOSE ${port} ENV ports " 99 100 " EXPOSE ${ports} `, true) if err != nil { c.Fatal(err) } res, err := inspectFieldJSON(name, "Config.ExposedPorts") if err != nil { c.Fatal(err) } var exposedPorts map[string]interface{} if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil { c.Fatal(err) } exp := []int{80, 99, 100} for _, p := range exp { tmp := fmt.Sprintf("%d/tcp", p) if _, ok := exposedPorts[tmp]; !ok { c.Fatalf("Exposed port %d from environment not in Config.ExposedPorts on image", p) } } } func (s *DockerSuite) TestBuildEnvironmentReplacementWorkdir(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildenvironmentreplacement" _, err := buildImage(name, ` FROM busybox ENV MYWORKDIR /work RUN mkdir ${MYWORKDIR} WORKDIR ${MYWORKDIR} `, true) if err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildEnvironmentReplacementAddCopy(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildenvironmentreplacement" ctx, err := fakeContext(` FROM scratch ENV baz foo ENV quux bar ENV dot . ENV fee fff ENV gee ggg ADD ${baz} ${dot} COPY ${quux} ${dot} ADD ${zzz:-${fee}} ${dot} COPY ${zzz:-${gee}} ${dot} `, map[string]string{ "foo": "test1", "bar": "test2", "fff": "test3", "ggg": "test4", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildEnvironmentReplacementEnv(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildenvironmentreplacement" _, err := buildImage(name, ` FROM busybox ENV foo zzz ENV bar ${foo} ENV abc1='$foo' ENV env1=$foo env2=${foo} env3="$foo" env4="${foo}" RUN [ "$abc1" = '$foo' ] && (echo "$abc1" | grep -q foo) ENV abc2="\$foo" RUN [ "$abc2" = '$foo' ] && (echo "$abc2" | grep -q foo) ENV abc3 '$foo' RUN [ "$abc3" = '$foo' ] && (echo "$abc3" | grep -q foo) ENV abc4 "\$foo" RUN [ "$abc4" = '$foo' ] && (echo "$abc4" | grep -q foo) `, true) if err != nil { c.Fatal(err) } res, err := inspectFieldJSON(name, "Config.Env") if err != nil { c.Fatal(err) } envResult := []string{} if err = unmarshalJSON([]byte(res), &envResult); err != nil { c.Fatal(err) } found := false envCount := 0 for _, env := range envResult { parts := strings.SplitN(env, "=", 2) if parts[0] == "bar" { found = true if parts[1] != "zzz" { c.Fatalf("Could not find replaced var for env `bar`: got %q instead of `zzz`", parts[1]) } } else if strings.HasPrefix(parts[0], "env") { envCount++ if parts[1] != "zzz" { c.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1]) } } else if strings.HasPrefix(parts[0], "env") { envCount++ if parts[1] != "foo" { c.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1]) } } } if !found { c.Fatal("Never found the `bar` env variable") } if envCount != 4 { c.Fatalf("Didn't find all env vars - only saw %d\n%s", envCount, envResult) } } func (s *DockerSuite) TestBuildHandleEscapes(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildhandleescapes" _, err := buildImage(name, ` FROM scratch ENV FOO bar VOLUME ${FOO} `, true) if err != nil { c.Fatal(err) } var result map[string]map[string]struct{} res, err := inspectFieldJSON(name, "Config.Volumes") if err != nil { c.Fatal(err) } if err = unmarshalJSON([]byte(res), &result); err != nil { c.Fatal(err) } if _, ok := result["bar"]; !ok { c.Fatal("Could not find volume bar set from env foo in volumes table") } deleteImages(name) _, err = buildImage(name, ` FROM scratch ENV FOO bar VOLUME \${FOO} `, true) if err != nil { c.Fatal(err) } res, err = inspectFieldJSON(name, "Config.Volumes") if err != nil { c.Fatal(err) } if err = unmarshalJSON([]byte(res), &result); err != nil { c.Fatal(err) } if _, ok := result["${FOO}"]; !ok { c.Fatal("Could not find volume ${FOO} set from env foo in volumes table") } deleteImages(name) // this test in particular provides *7* backslashes and expects 6 to come back. // Like above, the first escape is swallowed and the rest are treated as // literals, this one is just less obvious because of all the character noise. _, err = buildImage(name, ` FROM scratch ENV FOO bar VOLUME \\\\\\\${FOO} `, true) if err != nil { c.Fatal(err) } res, err = inspectFieldJSON(name, "Config.Volumes") if err != nil { c.Fatal(err) } if err = unmarshalJSON([]byte(res), &result); err != nil { c.Fatal(err) } if _, ok := result[`\\\${FOO}`]; !ok { c.Fatal(`Could not find volume \\\${FOO} set from env foo in volumes table`, result) } } func (s *DockerSuite) TestBuildOnBuildLowercase(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildonbuildlowercase" name2 := "testbuildonbuildlowercase2" _, err := buildImage(name, ` FROM busybox onbuild run echo quux `, true) if err != nil { c.Fatal(err) } _, out, err := buildImageWithOut(name2, fmt.Sprintf(` FROM %s `, name), true) if err != nil { c.Fatal(err) } if !strings.Contains(out, "quux") { c.Fatalf("Did not receive the expected echo text, got %s", out) } if strings.Contains(out, "ONBUILD ONBUILD") { c.Fatalf("Got an ONBUILD ONBUILD error with no error: got %s", out) } } func (s *DockerSuite) TestBuildEnvEscapes(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildenvescapes" _, err := buildImage(name, ` FROM busybox ENV TEST foo CMD echo \$ `, true) if err != nil { c.Fatal(err) } out, _ := dockerCmd(c, "run", "-t", name) if strings.TrimSpace(out) != "$" { c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) } } func (s *DockerSuite) TestBuildEnvOverwrite(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildenvoverwrite" _, err := buildImage(name, ` FROM busybox ENV TEST foo CMD echo ${TEST} `, true) if err != nil { c.Fatal(err) } out, _ := dockerCmd(c, "run", "-e", "TEST=bar", "-t", name) if strings.TrimSpace(out) != "bar" { c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) } } func (s *DockerSuite) TestBuildOnBuildForbiddenMaintainerInSourceImage(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildonbuildforbiddenmaintainerinsourceimage" out, _ := dockerCmd(c, "create", "busybox", "true") cleanedContainerID := strings.TrimSpace(out) dockerCmd(c, "commit", "--run", "{\"OnBuild\":[\"MAINTAINER docker.io\"]}", cleanedContainerID, "onbuild") _, err := buildImage(name, `FROM onbuild`, true) if err != nil { if !strings.Contains(err.Error(), "maintainer isn't allowed as an ONBUILD trigger") { c.Fatalf("Wrong error %v, must be about MAINTAINER and ONBUILD in source image", err) } } else { c.Fatal("Error must not be nil") } } func (s *DockerSuite) TestBuildOnBuildForbiddenFromInSourceImage(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildonbuildforbiddenfrominsourceimage" out, _ := dockerCmd(c, "create", "busybox", "true") cleanedContainerID := strings.TrimSpace(out) dockerCmd(c, "commit", "--run", "{\"OnBuild\":[\"FROM busybox\"]}", cleanedContainerID, "onbuild") _, err := buildImage(name, `FROM onbuild`, true) if err != nil { if !strings.Contains(err.Error(), "from isn't allowed as an ONBUILD trigger") { c.Fatalf("Wrong error %v, must be about FROM and ONBUILD in source image", err) } } else { c.Fatal("Error must not be nil") } } func (s *DockerSuite) TestBuildOnBuildForbiddenChainedInSourceImage(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildonbuildforbiddenchainedinsourceimage" out, _ := dockerCmd(c, "create", "busybox", "true") cleanedContainerID := strings.TrimSpace(out) dockerCmd(c, "commit", "--run", "{\"OnBuild\":[\"ONBUILD RUN ls\"]}", cleanedContainerID, "onbuild") _, err := buildImage(name, `FROM onbuild`, true) if err != nil { if !strings.Contains(err.Error(), "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") { c.Fatalf("Wrong error %v, must be about chaining ONBUILD in source image", err) } } else { c.Fatal("Error must not be nil") } } func (s *DockerSuite) TestBuildOnBuildCmdEntrypointJSON(c *check.C) { testRequires(c, DaemonIsLinux) name1 := "onbuildcmd" name2 := "onbuildgenerated" _, err := buildImage(name1, ` FROM busybox ONBUILD CMD ["hello world"] ONBUILD ENTRYPOINT ["echo"] ONBUILD RUN ["true"]`, false) if err != nil { c.Fatal(err) } _, err = buildImage(name2, fmt.Sprintf(`FROM %s`, name1), false) if err != nil { c.Fatal(err) } out, _ := dockerCmd(c, "run", "-t", name2) if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) { c.Fatal("did not get echo output from onbuild", out) } } func (s *DockerSuite) TestBuildOnBuildEntrypointJSON(c *check.C) { testRequires(c, DaemonIsLinux) name1 := "onbuildcmd" name2 := "onbuildgenerated" _, err := buildImage(name1, ` FROM busybox ONBUILD ENTRYPOINT ["echo"]`, false) if err != nil { c.Fatal(err) } _, err = buildImage(name2, fmt.Sprintf("FROM %s\nCMD [\"hello world\"]\n", name1), false) if err != nil { c.Fatal(err) } out, _ := dockerCmd(c, "run", "-t", name2) if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) { c.Fatal("got malformed output from onbuild", out) } } func (s *DockerSuite) TestBuildCacheAdd(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildtwoimageswithadd" server, err := fakeStorage(map[string]string{ "robots.txt": "hello", "index.html": "world", }) if err != nil { c.Fatal(err) } defer server.Close() if _, err := buildImage(name, fmt.Sprintf(`FROM scratch ADD %s/robots.txt /`, server.URL()), true); err != nil { c.Fatal(err) } if err != nil { c.Fatal(err) } deleteImages(name) _, out, err := buildImageWithOut(name, fmt.Sprintf(`FROM scratch ADD %s/index.html /`, server.URL()), true) if err != nil { c.Fatal(err) } if strings.Contains(out, "Using cache") { c.Fatal("2nd build used cache on ADD, it shouldn't") } } func (s *DockerSuite) TestBuildLastModified(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildlastmodified" server, err := fakeStorage(map[string]string{ "file": "hello", }) if err != nil { c.Fatal(err) } defer server.Close() var out, out2 string dFmt := `FROM busybox ADD %s/file / RUN ls -le /file` dockerfile := fmt.Sprintf(dFmt, server.URL()) if _, out, err = buildImageWithOut(name, dockerfile, false); err != nil { c.Fatal(err) } originMTime := regexp.MustCompile(`root.*/file.*\n`).FindString(out) // Make sure our regexp is correct if strings.Index(originMTime, "/file") < 0 { c.Fatalf("Missing ls info on 'file':\n%s", out) } // Build it again and make sure the mtime of the file didn't change. // Wait a few seconds to make sure the time changed enough to notice time.Sleep(2 * time.Second) if _, out2, err = buildImageWithOut(name, dockerfile, false); err != nil { c.Fatal(err) } newMTime := regexp.MustCompile(`root.*/file.*\n`).FindString(out2) if newMTime != originMTime { c.Fatalf("MTime changed:\nOrigin:%s\nNew:%s", originMTime, newMTime) } // Now 'touch' the file and make sure the timestamp DID change this time // Create a new fakeStorage instead of just using Add() to help windows server, err = fakeStorage(map[string]string{ "file": "hello", }) if err != nil { c.Fatal(err) } defer server.Close() dockerfile = fmt.Sprintf(dFmt, server.URL()) if _, out2, err = buildImageWithOut(name, dockerfile, false); err != nil { c.Fatal(err) } newMTime = regexp.MustCompile(`root.*/file.*\n`).FindString(out2) if newMTime == originMTime { c.Fatalf("MTime didn't change:\nOrigin:%s\nNew:%s", originMTime, newMTime) } } func (s *DockerSuite) TestBuildSixtySteps(c *check.C) { testRequires(c, DaemonIsLinux) name := "foobuildsixtysteps" ctx, err := fakeContext("FROM scratch\n"+strings.Repeat("ADD foo /\n", 60), map[string]string{ "foo": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildAddSingleFileToRoot(c *check.C) { testRequires(c, DaemonIsLinux) name := "testaddimg" ctx, err := fakeContext(fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists RUN chown dockerio.dockerio /exists ADD test_file / RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), map[string]string{ "test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } // Issue #3960: "ADD src ." hangs func (s *DockerSuite) TestBuildAddSingleFileToWorkdir(c *check.C) { testRequires(c, DaemonIsLinux) name := "testaddsinglefiletoworkdir" ctx, err := fakeContext(`FROM busybox ADD test_file .`, map[string]string{ "test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() errChan := make(chan error) go func() { _, err := buildImageFromContext(name, ctx, true) errChan <- err close(errChan) }() select { case <-time.After(15 * time.Second): c.Fatal("Build with adding to workdir timed out") case err := <-errChan: c.Assert(err, check.IsNil) } } func (s *DockerSuite) TestBuildAddSingleFileToExistDir(c *check.C) { testRequires(c, DaemonIsLinux) name := "testaddsinglefiletoexistdir" ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN mkdir /exists RUN touch /exists/exists_file RUN chown -R dockerio.dockerio /exists ADD test_file /exists/ RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, map[string]string{ "test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildCopyAddMultipleFiles(c *check.C) { testRequires(c, DaemonIsLinux) server, err := fakeStorage(map[string]string{ "robots.txt": "hello", }) if err != nil { c.Fatal(err) } defer server.Close() name := "testcopymultiplefilestofile" ctx, err := fakeContext(fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN mkdir /exists RUN touch /exists/exists_file RUN chown -R dockerio.dockerio /exists COPY test_file1 test_file2 /exists/ ADD test_file3 test_file4 %s/robots.txt /exists/ RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/test_file1 | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists/test_file2 | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists/test_file3 | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists/test_file4 | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists/robots.txt | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] `, server.URL()), map[string]string{ "test_file1": "test1", "test_file2": "test2", "test_file3": "test3", "test_file4": "test4", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildAddMultipleFilesToFile(c *check.C) { testRequires(c, DaemonIsLinux) name := "testaddmultiplefilestofile" ctx, err := fakeContext(`FROM scratch ADD file1.txt file2.txt test `, map[string]string{ "file1.txt": "test1", "file2.txt": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) } } func (s *DockerSuite) TestBuildJSONAddMultipleFilesToFile(c *check.C) { testRequires(c, DaemonIsLinux) name := "testjsonaddmultiplefilestofile" ctx, err := fakeContext(`FROM scratch ADD ["file1.txt", "file2.txt", "test"] `, map[string]string{ "file1.txt": "test1", "file2.txt": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) } } func (s *DockerSuite) TestBuildAddMultipleFilesToFileWild(c *check.C) { testRequires(c, DaemonIsLinux) name := "testaddmultiplefilestofilewild" ctx, err := fakeContext(`FROM scratch ADD file*.txt test `, map[string]string{ "file1.txt": "test1", "file2.txt": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) } } func (s *DockerSuite) TestBuildJSONAddMultipleFilesToFileWild(c *check.C) { testRequires(c, DaemonIsLinux) name := "testjsonaddmultiplefilestofilewild" ctx, err := fakeContext(`FROM scratch ADD ["file*.txt", "test"] `, map[string]string{ "file1.txt": "test1", "file2.txt": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) } } func (s *DockerSuite) TestBuildCopyMultipleFilesToFile(c *check.C) { testRequires(c, DaemonIsLinux) name := "testcopymultiplefilestofile" ctx, err := fakeContext(`FROM scratch COPY file1.txt file2.txt test `, map[string]string{ "file1.txt": "test1", "file2.txt": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() expected := "When using COPY with more than one source file, the destination must be a directory and end with a /" if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) } } func (s *DockerSuite) TestBuildJSONCopyMultipleFilesToFile(c *check.C) { testRequires(c, DaemonIsLinux) name := "testjsoncopymultiplefilestofile" ctx, err := fakeContext(`FROM scratch COPY ["file1.txt", "file2.txt", "test"] `, map[string]string{ "file1.txt": "test1", "file2.txt": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() expected := "When using COPY with more than one source file, the destination must be a directory and end with a /" if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) } } func (s *DockerSuite) TestBuildAddFileWithWhitespace(c *check.C) { testRequires(c, DaemonIsLinux) name := "testaddfilewithwhitespace" ctx, err := fakeContext(`FROM busybox RUN mkdir "/test dir" RUN mkdir "/test_dir" ADD [ "test file1", "/test_file1" ] ADD [ "test_file2", "/test file2" ] ADD [ "test file3", "/test file3" ] ADD [ "test dir/test_file4", "/test_dir/test_file4" ] ADD [ "test_dir/test_file5", "/test dir/test_file5" ] ADD [ "test dir/test_file6", "/test dir/test_file6" ] RUN [ $(cat "/test_file1") = 'test1' ] RUN [ $(cat "/test file2") = 'test2' ] RUN [ $(cat "/test file3") = 'test3' ] RUN [ $(cat "/test_dir/test_file4") = 'test4' ] RUN [ $(cat "/test dir/test_file5") = 'test5' ] RUN [ $(cat "/test dir/test_file6") = 'test6' ]`, map[string]string{ "test file1": "test1", "test_file2": "test2", "test file3": "test3", "test dir/test_file4": "test4", "test_dir/test_file5": "test5", "test dir/test_file6": "test6", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildCopyFileWithWhitespace(c *check.C) { testRequires(c, DaemonIsLinux) name := "testcopyfilewithwhitespace" ctx, err := fakeContext(`FROM busybox RUN mkdir "/test dir" RUN mkdir "/test_dir" COPY [ "test file1", "/test_file1" ] COPY [ "test_file2", "/test file2" ] COPY [ "test file3", "/test file3" ] COPY [ "test dir/test_file4", "/test_dir/test_file4" ] COPY [ "test_dir/test_file5", "/test dir/test_file5" ] COPY [ "test dir/test_file6", "/test dir/test_file6" ] RUN [ $(cat "/test_file1") = 'test1' ] RUN [ $(cat "/test file2") = 'test2' ] RUN [ $(cat "/test file3") = 'test3' ] RUN [ $(cat "/test_dir/test_file4") = 'test4' ] RUN [ $(cat "/test dir/test_file5") = 'test5' ] RUN [ $(cat "/test dir/test_file6") = 'test6' ]`, map[string]string{ "test file1": "test1", "test_file2": "test2", "test file3": "test3", "test dir/test_file4": "test4", "test_dir/test_file5": "test5", "test dir/test_file6": "test6", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildAddMultipleFilesToFileWithWhitespace(c *check.C) { testRequires(c, DaemonIsLinux) name := "testaddmultiplefilestofilewithwhitespace" ctx, err := fakeContext(`FROM busybox ADD [ "test file1", "test file2", "test" ] `, map[string]string{ "test file1": "test1", "test file2": "test2", }) if err != nil { c.Fatal(err) } defer ctx.Close() expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) } } func (s *DockerSuite) TestBuildCopyMultipleFilesToFileWithWhitespace(c *check.C) { testRequires(c, DaemonIsLinux) name := "testcopymultiplefilestofilewithwhitespace" ctx, err := fakeContext(`FROM busybox COPY [ "test file1", "test file2", "test" ] `, map[string]string{ "test file1": "test1", "test file2": "test2", }) if err != nil { c.Fatal(err) } defer ctx.Close() expected := "When using COPY with more than one source file, the destination must be a directory and end with a /" if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) } } func (s *DockerSuite) TestBuildCopyWildcard(c *check.C) { testRequires(c, DaemonIsLinux) name := "testcopywildcard" server, err := fakeStorage(map[string]string{ "robots.txt": "hello", "index.html": "world", }) if err != nil { c.Fatal(err) } defer server.Close() ctx, err := fakeContext(fmt.Sprintf(`FROM busybox COPY file*.txt /tmp/ RUN ls /tmp/file1.txt /tmp/file2.txt RUN mkdir /tmp1 COPY dir* /tmp1/ RUN ls /tmp1/dirt /tmp1/nested_file /tmp1/nested_dir/nest_nest_file RUN mkdir /tmp2 ADD dir/*dir %s/robots.txt /tmp2/ RUN ls /tmp2/nest_nest_file /tmp2/robots.txt `, server.URL()), map[string]string{ "file1.txt": "test1", "file2.txt": "test2", "dir/nested_file": "nested file", "dir/nested_dir/nest_nest_file": "2 times nested", "dirt": "dirty", }) if err != nil { c.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } // Now make sure we use a cache the 2nd time id2, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } if id1 != id2 { c.Fatal("didn't use the cache") } } func (s *DockerSuite) TestBuildCopyWildcardNoFind(c *check.C) { testRequires(c, DaemonIsLinux) name := "testcopywildcardnofind" ctx, err := fakeContext(`FROM busybox COPY file*.txt /tmp/ `, nil) if err != nil { c.Fatal(err) } defer ctx.Close() _, err = buildImageFromContext(name, ctx, true) if err == nil { c.Fatal("should have failed to find a file") } if !strings.Contains(err.Error(), "No source files were specified") { c.Fatalf("Wrong error %v, must be about no source files", err) } } func (s *DockerSuite) TestBuildCopyWildcardInName(c *check.C) { testRequires(c, DaemonIsLinux) name := "testcopywildcardinname" ctx, err := fakeContext(`FROM busybox COPY *.txt /tmp/ RUN [ "$(cat /tmp/\*.txt)" = 'hi there' ] `, map[string]string{"*.txt": "hi there"}) if err != nil { // Normally we would do c.Fatal(err) here but given that // the odds of this failing are so rare, it must be because // the OS we're running the client on doesn't support * in // filenames (like windows). So, instead of failing the test // just let it pass. Then we don't need to explicitly // say which OSs this works on or not. return } defer ctx.Close() _, err = buildImageFromContext(name, ctx, true) if err != nil { c.Fatalf("should have built: %q", err) } } func (s *DockerSuite) TestBuildCopyWildcardCache(c *check.C) { testRequires(c, DaemonIsLinux) name := "testcopywildcardcache" ctx, err := fakeContext(`FROM busybox COPY file1.txt /tmp/`, map[string]string{ "file1.txt": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } // Now make sure we use a cache the 2nd time even with wild cards. // Use the same context so the file is the same and the checksum will match ctx.Add("Dockerfile", `FROM busybox COPY file*.txt /tmp/`) id2, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } if id1 != id2 { c.Fatal("didn't use the cache") } } func (s *DockerSuite) TestBuildAddSingleFileToNonExistingDir(c *check.C) { testRequires(c, DaemonIsLinux) name := "testaddsinglefiletononexistingdir" ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists RUN chown dockerio.dockerio /exists ADD test_file /test_dir/ RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, map[string]string{ "test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildAddDirContentToRoot(c *check.C) { testRequires(c, DaemonIsLinux) name := "testadddircontenttoroot" ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists RUN chown dockerio.dockerio exists ADD test_dir / RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, map[string]string{ "test_dir/test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildAddDirContentToExistingDir(c *check.C) { testRequires(c, DaemonIsLinux) name := "testadddircontenttoexistingdir" ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN mkdir /exists RUN touch /exists/exists_file RUN chown -R dockerio.dockerio /exists ADD test_dir/ /exists/ RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`, map[string]string{ "test_dir/test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildAddWholeDirToRoot(c *check.C) { testRequires(c, DaemonIsLinux) name := "testaddwholedirtoroot" ctx, err := fakeContext(fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists RUN chown dockerio.dockerio exists ADD test_dir /test_dir RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), map[string]string{ "test_dir/test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } // Testing #5941 func (s *DockerSuite) TestBuildAddEtcToRoot(c *check.C) { testRequires(c, DaemonIsLinux) name := "testaddetctoroot" ctx, err := fakeContext(`FROM scratch ADD . /`, map[string]string{ "etc/test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } // Testing #9401 func (s *DockerSuite) TestBuildAddPreservesFilesSpecialBits(c *check.C) { testRequires(c, DaemonIsLinux) name := "testaddpreservesfilesspecialbits" ctx, err := fakeContext(`FROM busybox ADD suidbin /usr/bin/suidbin RUN chmod 4755 /usr/bin/suidbin RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ] ADD ./data/ / RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ]`, map[string]string{ "suidbin": "suidbin", "/data/usr/test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildCopySingleFileToRoot(c *check.C) { testRequires(c, DaemonIsLinux) name := "testcopysinglefiletoroot" ctx, err := fakeContext(fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists RUN chown dockerio.dockerio /exists COPY test_file / RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), map[string]string{ "test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } // Issue #3960: "ADD src ." hangs - adapted for COPY func (s *DockerSuite) TestBuildCopySingleFileToWorkdir(c *check.C) { testRequires(c, DaemonIsLinux) name := "testcopysinglefiletoworkdir" ctx, err := fakeContext(`FROM busybox COPY test_file .`, map[string]string{ "test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() errChan := make(chan error) go func() { _, err := buildImageFromContext(name, ctx, true) errChan <- err close(errChan) }() select { case <-time.After(15 * time.Second): c.Fatal("Build with adding to workdir timed out") case err := <-errChan: c.Assert(err, check.IsNil) } } func (s *DockerSuite) TestBuildCopySingleFileToExistDir(c *check.C) { testRequires(c, DaemonIsLinux) name := "testcopysinglefiletoexistdir" ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN mkdir /exists RUN touch /exists/exists_file RUN chown -R dockerio.dockerio /exists COPY test_file /exists/ RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, map[string]string{ "test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildCopySingleFileToNonExistDir(c *check.C) { testRequires(c, DaemonIsLinux) name := "testcopysinglefiletononexistdir" ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists RUN chown dockerio.dockerio /exists COPY test_file /test_dir/ RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, map[string]string{ "test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildCopyDirContentToRoot(c *check.C) { testRequires(c, DaemonIsLinux) name := "testcopydircontenttoroot" ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists RUN chown dockerio.dockerio exists COPY test_dir / RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, map[string]string{ "test_dir/test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildCopyDirContentToExistDir(c *check.C) { testRequires(c, DaemonIsLinux) name := "testcopydircontenttoexistdir" ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN mkdir /exists RUN touch /exists/exists_file RUN chown -R dockerio.dockerio /exists COPY test_dir/ /exists/ RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`, map[string]string{ "test_dir/test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildCopyWholeDirToRoot(c *check.C) { testRequires(c, DaemonIsLinux) name := "testcopywholedirtoroot" ctx, err := fakeContext(fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists RUN chown dockerio.dockerio exists COPY test_dir /test_dir RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), map[string]string{ "test_dir/test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildCopyEtcToRoot(c *check.C) { testRequires(c, DaemonIsLinux) name := "testcopyetctoroot" ctx, err := fakeContext(`FROM scratch COPY . /`, map[string]string{ "etc/test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildCopyDisallowRemote(c *check.C) { testRequires(c, DaemonIsLinux) name := "testcopydisallowremote" _, out, err := buildImageWithOut(name, `FROM scratch COPY https://index.docker.io/robots.txt /`, true) if err == nil || !strings.Contains(out, "Source can't be a URL for COPY") { c.Fatalf("Error should be about disallowed remote source, got err: %s, out: %q", err, out) } } func (s *DockerSuite) TestBuildAddBadLinks(c *check.C) { testRequires(c, DaemonIsLinux) const ( dockerfile = ` FROM scratch ADD links.tar / ADD foo.txt /symlink/ ` targetFile = "foo.txt" ) var ( name = "test-link-absolute" ) ctx, err := fakeContext(dockerfile, nil) if err != nil { c.Fatal(err) } defer ctx.Close() tempDir, err := ioutil.TempDir("", "test-link-absolute-temp-") if err != nil { c.Fatalf("failed to create temporary directory: %s", tempDir) } defer os.RemoveAll(tempDir) var symlinkTarget string if runtime.GOOS == "windows" { var driveLetter string if abs, err := filepath.Abs(tempDir); err != nil { c.Fatal(err) } else { driveLetter = abs[:1] } tempDirWithoutDrive := tempDir[2:] symlinkTarget = fmt.Sprintf(`%s:\..\..\..\..\..\..\..\..\..\..\..\..%s`, driveLetter, tempDirWithoutDrive) } else { symlinkTarget = fmt.Sprintf("/../../../../../../../../../../../..%s", tempDir) } tarPath := filepath.Join(ctx.Dir, "links.tar") nonExistingFile := filepath.Join(tempDir, targetFile) fooPath := filepath.Join(ctx.Dir, targetFile) tarOut, err := os.Create(tarPath) if err != nil { c.Fatal(err) } tarWriter := tar.NewWriter(tarOut) header := &tar.Header{ Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: symlinkTarget, Mode: 0755, Uid: 0, Gid: 0, } err = tarWriter.WriteHeader(header) if err != nil { c.Fatal(err) } tarWriter.Close() tarOut.Close() foo, err := os.Create(fooPath) if err != nil { c.Fatal(err) } defer foo.Close() if _, err := foo.WriteString("test"); err != nil { c.Fatal(err) } if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) } } func (s *DockerSuite) TestBuildAddBadLinksVolume(c *check.C) { testRequires(c, DaemonIsLinux) const ( dockerfileTemplate = ` FROM busybox RUN ln -s /../../../../../../../../%s /x VOLUME /x ADD foo.txt /x/` targetFile = "foo.txt" ) var ( name = "test-link-absolute-volume" dockerfile = "" ) tempDir, err := ioutil.TempDir("", "test-link-absolute-volume-temp-") if err != nil { c.Fatalf("failed to create temporary directory: %s", tempDir) } defer os.RemoveAll(tempDir) dockerfile = fmt.Sprintf(dockerfileTemplate, tempDir) nonExistingFile := filepath.Join(tempDir, targetFile) ctx, err := fakeContext(dockerfile, nil) if err != nil { c.Fatal(err) } defer ctx.Close() fooPath := filepath.Join(ctx.Dir, targetFile) foo, err := os.Create(fooPath) if err != nil { c.Fatal(err) } defer foo.Close() if _, err := foo.WriteString("test"); err != nil { c.Fatal(err) } if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) } } // Issue #5270 - ensure we throw a better error than "unexpected EOF" // when we can't access files in the context. func (s *DockerSuite) TestBuildWithInaccessibleFilesInContext(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, UnixCli) // test uses chown/chmod: not available on windows { name := "testbuildinaccessiblefiles" ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"fileWithoutReadAccess": "foo"}) if err != nil { c.Fatal(err) } defer ctx.Close() // This is used to ensure we detect inaccessible files early during build in the cli client pathToFileWithoutReadAccess := filepath.Join(ctx.Dir, "fileWithoutReadAccess") if err = os.Chown(pathToFileWithoutReadAccess, 0, 0); err != nil { c.Fatalf("failed to chown file to root: %s", err) } if err = os.Chmod(pathToFileWithoutReadAccess, 0700); err != nil { c.Fatalf("failed to chmod file to 700: %s", err) } buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) buildCmd.Dir = ctx.Dir out, _, err := runCommandWithOutput(buildCmd) if err == nil { c.Fatalf("build should have failed: %s %s", err, out) } // check if we've detected the failure before we started building if !strings.Contains(out, "no permission to read from ") { c.Fatalf("output should've contained the string: no permission to read from but contained: %s", out) } if !strings.Contains(out, "Error checking context") { c.Fatalf("output should've contained the string: Error checking context") } } { name := "testbuildinaccessibledirectory" ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"directoryWeCantStat/bar": "foo"}) if err != nil { c.Fatal(err) } defer ctx.Close() // This is used to ensure we detect inaccessible directories early during build in the cli client pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { c.Fatalf("failed to chown directory to root: %s", err) } if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { c.Fatalf("failed to chmod directory to 444: %s", err) } if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { c.Fatalf("failed to chmod file to 700: %s", err) } buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) buildCmd.Dir = ctx.Dir out, _, err := runCommandWithOutput(buildCmd) if err == nil { c.Fatalf("build should have failed: %s %s", err, out) } // check if we've detected the failure before we started building if !strings.Contains(out, "can't stat") { c.Fatalf("output should've contained the string: can't access %s", out) } if !strings.Contains(out, "Error checking context") { c.Fatalf("output should've contained the string: Error checking context\ngot:%s", out) } } { name := "testlinksok" ctx, err := fakeContext("FROM scratch\nADD . /foo/", nil) if err != nil { c.Fatal(err) } defer ctx.Close() target := "../../../../../../../../../../../../../../../../../../../azA" if err := os.Symlink(filepath.Join(ctx.Dir, "g"), target); err != nil { c.Fatal(err) } defer os.Remove(target) // This is used to ensure we don't follow links when checking if everything in the context is accessible // This test doesn't require that we run commands as an unprivileged user if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } { name := "testbuildignoredinaccessible" ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{ "directoryWeCantStat/bar": "foo", ".dockerignore": "directoryWeCantStat", }) if err != nil { c.Fatal(err) } defer ctx.Close() // This is used to ensure we don't try to add inaccessible files when they are ignored by a .dockerignore pattern pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { c.Fatalf("failed to chown directory to root: %s", err) } if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { c.Fatalf("failed to chmod directory to 755: %s", err) } if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { c.Fatalf("failed to chmod file to 444: %s", err) } buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) buildCmd.Dir = ctx.Dir if out, _, err := runCommandWithOutput(buildCmd); err != nil { c.Fatalf("build should have worked: %s %s", err, out) } } } func (s *DockerSuite) TestBuildForceRm(c *check.C) { testRequires(c, DaemonIsLinux) containerCountBefore, err := getContainerCount() if err != nil { c.Fatalf("failed to get the container count: %s", err) } name := "testbuildforcerm" ctx, err := fakeContext("FROM scratch\nRUN true\nRUN thiswillfail", nil) if err != nil { c.Fatal(err) } defer ctx.Close() dockerCmdInDir(c, ctx.Dir, "build", "-t", name, "--force-rm", ".") containerCountAfter, err := getContainerCount() if err != nil { c.Fatalf("failed to get the container count: %s", err) } if containerCountBefore != containerCountAfter { c.Fatalf("--force-rm shouldn't have left containers behind") } } func (s *DockerSuite) TestBuildRm(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildrm" ctx, err := fakeContext("FROM scratch\nADD foo /\nADD foo /", map[string]string{"foo": "bar"}) if err != nil { c.Fatal(err) } defer ctx.Close() { containerCountBefore, err := getContainerCount() if err != nil { c.Fatalf("failed to get the container count: %s", err) } out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--rm", "-t", name, ".") if err != nil { c.Fatal("failed to build the image", out) } containerCountAfter, err := getContainerCount() if err != nil { c.Fatalf("failed to get the container count: %s", err) } if containerCountBefore != containerCountAfter { c.Fatalf("-rm shouldn't have left containers behind") } deleteImages(name) } { containerCountBefore, err := getContainerCount() if err != nil { c.Fatalf("failed to get the container count: %s", err) } out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", name, ".") if err != nil { c.Fatal("failed to build the image", out) } containerCountAfter, err := getContainerCount() if err != nil { c.Fatalf("failed to get the container count: %s", err) } if containerCountBefore != containerCountAfter { c.Fatalf("--rm shouldn't have left containers behind") } deleteImages(name) } { containerCountBefore, err := getContainerCount() if err != nil { c.Fatalf("failed to get the container count: %s", err) } out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--rm=false", "-t", name, ".") if err != nil { c.Fatal("failed to build the image", out) } containerCountAfter, err := getContainerCount() if err != nil { c.Fatalf("failed to get the container count: %s", err) } if containerCountBefore == containerCountAfter { c.Fatalf("--rm=false should have left containers behind") } deleteImages(name) } } func (s *DockerSuite) TestBuildWithVolumes(c *check.C) { testRequires(c, DaemonIsLinux) var ( result map[string]map[string]struct{} name = "testbuildvolumes" emptyMap = make(map[string]struct{}) expected = map[string]map[string]struct{}{ "/test1": emptyMap, "/test2": emptyMap, "/test3": emptyMap, "/test4": emptyMap, "/test5": emptyMap, "/test6": emptyMap, "[/test7": emptyMap, "/test8]": emptyMap, } ) _, err := buildImage(name, `FROM scratch VOLUME /test1 VOLUME /test2 VOLUME /test3 /test4 VOLUME ["/test5", "/test6"] VOLUME [/test7 /test8] `, true) if err != nil { c.Fatal(err) } res, err := inspectFieldJSON(name, "Config.Volumes") if err != nil { c.Fatal(err) } err = unmarshalJSON([]byte(res), &result) if err != nil { c.Fatal(err) } equal := reflect.DeepEqual(&result, &expected) if !equal { c.Fatalf("Volumes %s, expected %s", result, expected) } } func (s *DockerSuite) TestBuildMaintainer(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildmaintainer" expected := "dockerio" _, err := buildImage(name, `FROM scratch MAINTAINER dockerio`, true) if err != nil { c.Fatal(err) } res, err := inspectField(name, "Author") if err != nil { c.Fatal(err) } if res != expected { c.Fatalf("Maintainer %s, expected %s", res, expected) } } func (s *DockerSuite) TestBuildUser(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuilduser" expected := "dockerio" _, err := buildImage(name, `FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd USER dockerio RUN [ $(whoami) = 'dockerio' ]`, true) if err != nil { c.Fatal(err) } res, err := inspectField(name, "Config.User") if err != nil { c.Fatal(err) } if res != expected { c.Fatalf("User %s, expected %s", res, expected) } } func (s *DockerSuite) TestBuildRelativeWorkdir(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildrelativeworkdir" expected := "/test2/test3" _, err := buildImage(name, `FROM busybox RUN [ "$PWD" = '/' ] WORKDIR test1 RUN [ "$PWD" = '/test1' ] WORKDIR /test2 RUN [ "$PWD" = '/test2' ] WORKDIR test3 RUN [ "$PWD" = '/test2/test3' ]`, true) if err != nil { c.Fatal(err) } res, err := inspectField(name, "Config.WorkingDir") if err != nil { c.Fatal(err) } if res != expected { c.Fatalf("Workdir %s, expected %s", res, expected) } } func (s *DockerSuite) TestBuildWorkdirWithEnvVariables(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildworkdirwithenvvariables" expected := "/test1/test2" _, err := buildImage(name, `FROM busybox ENV DIRPATH /test1 ENV SUBDIRNAME test2 WORKDIR $DIRPATH WORKDIR $SUBDIRNAME/$MISSING_VAR`, true) if err != nil { c.Fatal(err) } res, err := inspectField(name, "Config.WorkingDir") if err != nil { c.Fatal(err) } if res != expected { c.Fatalf("Workdir %s, expected %s", res, expected) } } func (s *DockerSuite) TestBuildRelativeCopy(c *check.C) { // cat /test1/test2/foo gets permission denied for the user testRequires(c, NotUserNamespace) testRequires(c, DaemonIsLinux) name := "testbuildrelativecopy" dockerfile := ` FROM busybox WORKDIR /test1 WORKDIR test2 RUN [ "$PWD" = '/test1/test2' ] COPY foo ./ RUN [ "$(cat /test1/test2/foo)" = 'hello' ] ADD foo ./bar/baz RUN [ "$(cat /test1/test2/bar/baz)" = 'hello' ] COPY foo ./bar/baz2 RUN [ "$(cat /test1/test2/bar/baz2)" = 'hello' ] WORKDIR .. COPY foo ./ RUN [ "$(cat /test1/foo)" = 'hello' ] COPY foo /test3/ RUN [ "$(cat /test3/foo)" = 'hello' ] WORKDIR /test4 COPY . . RUN [ "$(cat /test4/foo)" = 'hello' ] WORKDIR /test5/test6 COPY foo ../ RUN [ "$(cat /test5/foo)" = 'hello' ] ` ctx, err := fakeContext(dockerfile, map[string]string{ "foo": "hello", }) if err != nil { c.Fatal(err) } defer ctx.Close() _, err = buildImageFromContext(name, ctx, false) if err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildEnv(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildenv" expected := "[PATH=/test:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]" _, err := buildImage(name, `FROM busybox ENV PATH /test:$PATH ENV PORT 2375 RUN [ $(env | grep PORT) = 'PORT=2375' ]`, true) if err != nil { c.Fatal(err) } res, err := inspectField(name, "Config.Env") if err != nil { c.Fatal(err) } if res != expected { c.Fatalf("Env %s, expected %s", res, expected) } } func (s *DockerSuite) TestBuildPATH(c *check.C) { testRequires(c, DaemonIsLinux) defPath := "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" fn := func(dockerfile string, exp string) { _, err := buildImage("testbldpath", dockerfile, true) c.Assert(err, check.IsNil) res, err := inspectField("testbldpath", "Config.Env") c.Assert(err, check.IsNil) if res != exp { c.Fatalf("Env %q, expected %q for dockerfile:%q", res, exp, dockerfile) } } tests := []struct{ dockerfile, exp string }{ {"FROM scratch\nMAINTAINER me", "[PATH=" + defPath + "]"}, {"FROM busybox\nMAINTAINER me", "[PATH=" + defPath + "]"}, {"FROM scratch\nENV FOO=bar", "[PATH=" + defPath + " FOO=bar]"}, {"FROM busybox\nENV FOO=bar", "[PATH=" + defPath + " FOO=bar]"}, {"FROM scratch\nENV PATH=/test", "[PATH=/test]"}, {"FROM busybox\nENV PATH=/test", "[PATH=/test]"}, {"FROM scratch\nENV PATH=''", "[PATH=]"}, {"FROM busybox\nENV PATH=''", "[PATH=]"}, } for _, test := range tests { fn(test.dockerfile, test.exp) } } func (s *DockerSuite) TestBuildContextCleanup(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, SameHostDaemon) name := "testbuildcontextcleanup" entries, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp")) if err != nil { c.Fatalf("failed to list contents of tmp dir: %s", err) } _, err = buildImage(name, `FROM scratch ENTRYPOINT ["/bin/echo"]`, true) if err != nil { c.Fatal(err) } entriesFinal, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp")) if err != nil { c.Fatalf("failed to list contents of tmp dir: %s", err) } if err = compareDirectoryEntries(entries, entriesFinal); err != nil { c.Fatalf("context should have been deleted, but wasn't") } } func (s *DockerSuite) TestBuildContextCleanupFailedBuild(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, SameHostDaemon) name := "testbuildcontextcleanup" entries, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp")) if err != nil { c.Fatalf("failed to list contents of tmp dir: %s", err) } _, err = buildImage(name, `FROM scratch RUN /non/existing/command`, true) if err == nil { c.Fatalf("expected build to fail, but it didn't") } entriesFinal, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp")) if err != nil { c.Fatalf("failed to list contents of tmp dir: %s", err) } if err = compareDirectoryEntries(entries, entriesFinal); err != nil { c.Fatalf("context should have been deleted, but wasn't") } } func (s *DockerSuite) TestBuildCmd(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildcmd" expected := "{[/bin/echo Hello World]}" _, err := buildImage(name, `FROM scratch CMD ["/bin/echo", "Hello World"]`, true) if err != nil { c.Fatal(err) } res, err := inspectField(name, "Config.Cmd") if err != nil { c.Fatal(err) } if res != expected { c.Fatalf("Cmd %s, expected %s", res, expected) } } func (s *DockerSuite) TestBuildExpose(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildexpose" expected := "map[2375/tcp:{}]" _, err := buildImage(name, `FROM scratch EXPOSE 2375`, true) if err != nil { c.Fatal(err) } res, err := inspectField(name, "Config.ExposedPorts") if err != nil { c.Fatal(err) } if res != expected { c.Fatalf("Exposed ports %s, expected %s", res, expected) } } func (s *DockerSuite) TestBuildExposeMorePorts(c *check.C) { testRequires(c, DaemonIsLinux) // start building docker file with a large number of ports portList := make([]string, 50) line := make([]string, 100) expectedPorts := make([]int, len(portList)*len(line)) for i := 0; i < len(portList); i++ { for j := 0; j < len(line); j++ { p := i*len(line) + j + 1 line[j] = strconv.Itoa(p) expectedPorts[p-1] = p } if i == len(portList)-1 { portList[i] = strings.Join(line, " ") } else { portList[i] = strings.Join(line, " ") + ` \` } } dockerfile := `FROM scratch EXPOSE {{range .}} {{.}} {{end}}` tmpl := template.Must(template.New("dockerfile").Parse(dockerfile)) buf := bytes.NewBuffer(nil) tmpl.Execute(buf, portList) name := "testbuildexpose" _, err := buildImage(name, buf.String(), true) if err != nil { c.Fatal(err) } // check if all the ports are saved inside Config.ExposedPorts res, err := inspectFieldJSON(name, "Config.ExposedPorts") if err != nil { c.Fatal(err) } var exposedPorts map[string]interface{} if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil { c.Fatal(err) } for _, p := range expectedPorts { ep := fmt.Sprintf("%d/tcp", p) if _, ok := exposedPorts[ep]; !ok { c.Errorf("Port(%s) is not exposed", ep) } else { delete(exposedPorts, ep) } } if len(exposedPorts) != 0 { c.Errorf("Unexpected extra exposed ports %v", exposedPorts) } } func (s *DockerSuite) TestBuildExposeOrder(c *check.C) { testRequires(c, DaemonIsLinux) buildID := func(name, exposed string) string { _, err := buildImage(name, fmt.Sprintf(`FROM scratch EXPOSE %s`, exposed), true) if err != nil { c.Fatal(err) } id, err := inspectField(name, "Id") if err != nil { c.Fatal(err) } return id } id1 := buildID("testbuildexpose1", "80 2375") id2 := buildID("testbuildexpose2", "2375 80") if id1 != id2 { c.Errorf("EXPOSE should invalidate the cache only when ports actually changed") } } func (s *DockerSuite) TestBuildExposeUpperCaseProto(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildexposeuppercaseproto" expected := "map[5678/udp:{}]" _, err := buildImage(name, `FROM scratch EXPOSE 5678/UDP`, true) if err != nil { c.Fatal(err) } res, err := inspectField(name, "Config.ExposedPorts") if err != nil { c.Fatal(err) } if res != expected { c.Fatalf("Exposed ports %s, expected %s", res, expected) } } func (s *DockerSuite) TestBuildEmptyEntrypointInheritance(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildentrypointinheritance" name2 := "testbuildentrypointinheritance2" _, err := buildImage(name, `FROM busybox ENTRYPOINT ["/bin/echo"]`, true) if err != nil { c.Fatal(err) } res, err := inspectField(name, "Config.Entrypoint") if err != nil { c.Fatal(err) } expected := "{[/bin/echo]}" if res != expected { c.Fatalf("Entrypoint %s, expected %s", res, expected) } _, err = buildImage(name2, fmt.Sprintf(`FROM %s ENTRYPOINT []`, name), true) if err != nil { c.Fatal(err) } res, err = inspectField(name2, "Config.Entrypoint") if err != nil { c.Fatal(err) } expected = "{[]}" if res != expected { c.Fatalf("Entrypoint %s, expected %s", res, expected) } } func (s *DockerSuite) TestBuildEmptyEntrypoint(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildentrypoint" expected := "{[]}" _, err := buildImage(name, `FROM busybox ENTRYPOINT []`, true) if err != nil { c.Fatal(err) } res, err := inspectField(name, "Config.Entrypoint") if err != nil { c.Fatal(err) } if res != expected { c.Fatalf("Entrypoint %s, expected %s", res, expected) } } func (s *DockerSuite) TestBuildEntrypoint(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildentrypoint" expected := "{[/bin/echo]}" _, err := buildImage(name, `FROM scratch ENTRYPOINT ["/bin/echo"]`, true) if err != nil { c.Fatal(err) } res, err := inspectField(name, "Config.Entrypoint") if err != nil { c.Fatal(err) } if res != expected { c.Fatalf("Entrypoint %s, expected %s", res, expected) } } // #6445 ensure ONBUILD triggers aren't committed to grandchildren func (s *DockerSuite) TestBuildOnBuildLimitedInheritence(c *check.C) { testRequires(c, DaemonIsLinux) var ( out2, out3 string ) { name1 := "testonbuildtrigger1" dockerfile1 := ` FROM busybox RUN echo "GRANDPARENT" ONBUILD RUN echo "ONBUILD PARENT" ` ctx, err := fakeContext(dockerfile1, nil) if err != nil { c.Fatal(err) } defer ctx.Close() out1, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", name1, ".") if err != nil { c.Fatalf("build failed to complete: %s, %v", out1, err) } } { name2 := "testonbuildtrigger2" dockerfile2 := ` FROM testonbuildtrigger1 ` ctx, err := fakeContext(dockerfile2, nil) if err != nil { c.Fatal(err) } defer ctx.Close() out2, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-t", name2, ".") if err != nil { c.Fatalf("build failed to complete: %s, %v", out2, err) } } { name3 := "testonbuildtrigger3" dockerfile3 := ` FROM testonbuildtrigger2 ` ctx, err := fakeContext(dockerfile3, nil) if err != nil { c.Fatal(err) } defer ctx.Close() out3, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-t", name3, ".") if err != nil { c.Fatalf("build failed to complete: %s, %v", out3, err) } } // ONBUILD should be run in second build. if !strings.Contains(out2, "ONBUILD PARENT") { c.Fatalf("ONBUILD instruction did not run in child of ONBUILD parent") } // ONBUILD should *not* be run in third build. if strings.Contains(out3, "ONBUILD PARENT") { c.Fatalf("ONBUILD instruction ran in grandchild of ONBUILD parent") } } func (s *DockerSuite) TestBuildWithCache(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildwithcache" id1, err := buildImage(name, `FROM scratch MAINTAINER dockerio EXPOSE 5432 ENTRYPOINT ["/bin/echo"]`, true) if err != nil { c.Fatal(err) } id2, err := buildImage(name, `FROM scratch MAINTAINER dockerio EXPOSE 5432 ENTRYPOINT ["/bin/echo"]`, true) if err != nil { c.Fatal(err) } if id1 != id2 { c.Fatal("The cache should have been used but hasn't.") } } func (s *DockerSuite) TestBuildWithoutCache(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildwithoutcache" name2 := "testbuildwithoutcache2" id1, err := buildImage(name, `FROM scratch MAINTAINER dockerio EXPOSE 5432 ENTRYPOINT ["/bin/echo"]`, true) if err != nil { c.Fatal(err) } id2, err := buildImage(name2, `FROM scratch MAINTAINER dockerio EXPOSE 5432 ENTRYPOINT ["/bin/echo"]`, false) if err != nil { c.Fatal(err) } if id1 == id2 { c.Fatal("The cache should have been invalided but hasn't.") } } func (s *DockerSuite) TestBuildConditionalCache(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildconditionalcache" dockerfile := ` FROM busybox ADD foo /tmp/` ctx, err := fakeContext(dockerfile, map[string]string{ "foo": "hello", }) if err != nil { c.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatalf("Error building #1: %s", err) } if err := ctx.Add("foo", "bye"); err != nil { c.Fatalf("Error modifying foo: %s", err) } id2, err := buildImageFromContext(name, ctx, false) if err != nil { c.Fatalf("Error building #2: %s", err) } if id2 == id1 { c.Fatal("Should not have used the cache") } id3, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatalf("Error building #3: %s", err) } if id3 != id2 { c.Fatal("Should have used the cache") } } func (s *DockerSuite) TestBuildAddLocalFileWithCache(c *check.C) { // local files are not owned by the correct user testRequires(c, NotUserNamespace) testRequires(c, DaemonIsLinux) name := "testbuildaddlocalfilewithcache" name2 := "testbuildaddlocalfilewithcache2" dockerfile := ` FROM busybox MAINTAINER dockerio ADD foo /usr/lib/bla/bar RUN [ "$(cat /usr/lib/bla/bar)" = "hello" ]` ctx, err := fakeContext(dockerfile, map[string]string{ "foo": "hello", }) if err != nil { c.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, true) if err != nil { c.Fatal(err) } if id1 != id2 { c.Fatal("The cache should have been used but hasn't.") } } func (s *DockerSuite) TestBuildAddMultipleLocalFileWithCache(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildaddmultiplelocalfilewithcache" name2 := "testbuildaddmultiplelocalfilewithcache2" dockerfile := ` FROM busybox MAINTAINER dockerio ADD foo Dockerfile /usr/lib/bla/ RUN [ "$(cat /usr/lib/bla/foo)" = "hello" ]` ctx, err := fakeContext(dockerfile, map[string]string{ "foo": "hello", }) if err != nil { c.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, true) if err != nil { c.Fatal(err) } if id1 != id2 { c.Fatal("The cache should have been used but hasn't.") } } func (s *DockerSuite) TestBuildAddLocalFileWithoutCache(c *check.C) { // local files are not owned by the correct user testRequires(c, NotUserNamespace) testRequires(c, DaemonIsLinux) name := "testbuildaddlocalfilewithoutcache" name2 := "testbuildaddlocalfilewithoutcache2" dockerfile := ` FROM busybox MAINTAINER dockerio ADD foo /usr/lib/bla/bar RUN [ "$(cat /usr/lib/bla/bar)" = "hello" ]` ctx, err := fakeContext(dockerfile, map[string]string{ "foo": "hello", }) if err != nil { c.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, false) if err != nil { c.Fatal(err) } if id1 == id2 { c.Fatal("The cache should have been invalided but hasn't.") } } func (s *DockerSuite) TestBuildCopyDirButNotFile(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildcopydirbutnotfile" name2 := "testbuildcopydirbutnotfile2" dockerfile := ` FROM scratch COPY dir /tmp/` ctx, err := fakeContext(dockerfile, map[string]string{ "dir/foo": "hello", }) if err != nil { c.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } // Check that adding file with similar name doesn't mess with cache if err := ctx.Add("dir_file", "hello2"); err != nil { c.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, true) if err != nil { c.Fatal(err) } if id1 != id2 { c.Fatal("The cache should have been used but wasn't") } } func (s *DockerSuite) TestBuildAddCurrentDirWithCache(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildaddcurrentdirwithcache" name2 := name + "2" name3 := name + "3" name4 := name + "4" dockerfile := ` FROM scratch MAINTAINER dockerio ADD . /usr/lib/bla` ctx, err := fakeContext(dockerfile, map[string]string{ "foo": "hello", }) if err != nil { c.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } // Check that adding file invalidate cache of "ADD ." if err := ctx.Add("bar", "hello2"); err != nil { c.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, true) if err != nil { c.Fatal(err) } if id1 == id2 { c.Fatal("The cache should have been invalided but hasn't.") } // Check that changing file invalidate cache of "ADD ." if err := ctx.Add("foo", "hello1"); err != nil { c.Fatal(err) } id3, err := buildImageFromContext(name3, ctx, true) if err != nil { c.Fatal(err) } if id2 == id3 { c.Fatal("The cache should have been invalided but hasn't.") } // Check that changing file to same content with different mtime does not // invalidate cache of "ADD ." time.Sleep(1 * time.Second) // wait second because of mtime precision if err := ctx.Add("foo", "hello1"); err != nil { c.Fatal(err) } id4, err := buildImageFromContext(name4, ctx, true) if err != nil { c.Fatal(err) } if id3 != id4 { c.Fatal("The cache should have been used but hasn't.") } } func (s *DockerSuite) TestBuildAddCurrentDirWithoutCache(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildaddcurrentdirwithoutcache" name2 := "testbuildaddcurrentdirwithoutcache2" dockerfile := ` FROM scratch MAINTAINER dockerio ADD . /usr/lib/bla` ctx, err := fakeContext(dockerfile, map[string]string{ "foo": "hello", }) if err != nil { c.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, false) if err != nil { c.Fatal(err) } if id1 == id2 { c.Fatal("The cache should have been invalided but hasn't.") } } func (s *DockerSuite) TestBuildAddRemoteFileWithCache(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildaddremotefilewithcache" server, err := fakeStorage(map[string]string{ "baz": "hello", }) if err != nil { c.Fatal(err) } defer server.Close() id1, err := buildImage(name, fmt.Sprintf(`FROM scratch MAINTAINER dockerio ADD %s/baz /usr/lib/baz/quux`, server.URL()), true) if err != nil { c.Fatal(err) } id2, err := buildImage(name, fmt.Sprintf(`FROM scratch MAINTAINER dockerio ADD %s/baz /usr/lib/baz/quux`, server.URL()), true) if err != nil { c.Fatal(err) } if id1 != id2 { c.Fatal("The cache should have been used but hasn't.") } } func (s *DockerSuite) TestBuildAddRemoteFileWithoutCache(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildaddremotefilewithoutcache" name2 := "testbuildaddremotefilewithoutcache2" server, err := fakeStorage(map[string]string{ "baz": "hello", }) if err != nil { c.Fatal(err) } defer server.Close() id1, err := buildImage(name, fmt.Sprintf(`FROM scratch MAINTAINER dockerio ADD %s/baz /usr/lib/baz/quux`, server.URL()), true) if err != nil { c.Fatal(err) } id2, err := buildImage(name2, fmt.Sprintf(`FROM scratch MAINTAINER dockerio ADD %s/baz /usr/lib/baz/quux`, server.URL()), false) if err != nil { c.Fatal(err) } if id1 == id2 { c.Fatal("The cache should have been invalided but hasn't.") } } func (s *DockerSuite) TestBuildAddRemoteFileMTime(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildaddremotefilemtime" name2 := name + "2" name3 := name + "3" files := map[string]string{"baz": "hello"} server, err := fakeStorage(files) if err != nil { c.Fatal(err) } defer server.Close() ctx, err := fakeContext(fmt.Sprintf(`FROM scratch MAINTAINER dockerio ADD %s/baz /usr/lib/baz/quux`, server.URL()), nil) if err != nil { c.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, true) if err != nil { c.Fatal(err) } if id1 != id2 { c.Fatal("The cache should have been used but wasn't - #1") } // Now create a different server with same contents (causes different mtime) // The cache should still be used // allow some time for clock to pass as mtime precision is only 1s time.Sleep(2 * time.Second) server2, err := fakeStorage(files) if err != nil { c.Fatal(err) } defer server2.Close() ctx2, err := fakeContext(fmt.Sprintf(`FROM scratch MAINTAINER dockerio ADD %s/baz /usr/lib/baz/quux`, server2.URL()), nil) if err != nil { c.Fatal(err) } defer ctx2.Close() id3, err := buildImageFromContext(name3, ctx2, true) if err != nil { c.Fatal(err) } if id1 != id3 { c.Fatal("The cache should have been used but wasn't") } } func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithCache(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildaddlocalandremotefilewithcache" server, err := fakeStorage(map[string]string{ "baz": "hello", }) if err != nil { c.Fatal(err) } defer server.Close() ctx, err := fakeContext(fmt.Sprintf(`FROM scratch MAINTAINER dockerio ADD foo /usr/lib/bla/bar ADD %s/baz /usr/lib/baz/quux`, server.URL()), map[string]string{ "foo": "hello world", }) if err != nil { c.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } id2, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } if id1 != id2 { c.Fatal("The cache should have been used but hasn't.") } } func testContextTar(c *check.C, compression archive.Compression) { testRequires(c, DaemonIsLinux) ctx, err := fakeContext( `FROM busybox ADD foo /foo CMD ["cat", "/foo"]`, map[string]string{ "foo": "bar", }, ) if err != nil { c.Fatal(err) } defer ctx.Close() context, err := archive.Tar(ctx.Dir, compression) if err != nil { c.Fatalf("failed to build context tar: %v", err) } name := "contexttar" buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-") buildCmd.Stdin = context if out, _, err := runCommandWithOutput(buildCmd); err != nil { c.Fatalf("build failed to complete: %v %v", out, err) } } func (s *DockerSuite) TestBuildContextTarGzip(c *check.C) { testContextTar(c, archive.Gzip) } func (s *DockerSuite) TestBuildContextTarNoCompression(c *check.C) { testContextTar(c, archive.Uncompressed) } func (s *DockerSuite) TestBuildNoContext(c *check.C) { testRequires(c, DaemonIsLinux) buildCmd := exec.Command(dockerBinary, "build", "-t", "nocontext", "-") buildCmd.Stdin = strings.NewReader("FROM busybox\nCMD echo ok\n") if out, _, err := runCommandWithOutput(buildCmd); err != nil { c.Fatalf("build failed to complete: %v %v", out, err) } if out, _ := dockerCmd(c, "run", "--rm", "nocontext"); out != "ok\n" { c.Fatalf("run produced invalid output: %q, expected %q", out, "ok") } } // TODO: TestCaching func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithoutCache(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildaddlocalandremotefilewithoutcache" name2 := "testbuildaddlocalandremotefilewithoutcache2" server, err := fakeStorage(map[string]string{ "baz": "hello", }) if err != nil { c.Fatal(err) } defer server.Close() ctx, err := fakeContext(fmt.Sprintf(`FROM scratch MAINTAINER dockerio ADD foo /usr/lib/bla/bar ADD %s/baz /usr/lib/baz/quux`, server.URL()), map[string]string{ "foo": "hello world", }) if err != nil { c.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, false) if err != nil { c.Fatal(err) } if id1 == id2 { c.Fatal("The cache should have been invalided but hasn't.") } } func (s *DockerSuite) TestBuildWithVolumeOwnership(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildimg" _, err := buildImage(name, `FROM busybox:latest RUN mkdir /test && chown daemon:daemon /test && chmod 0600 /test VOLUME /test`, true) if err != nil { c.Fatal(err) } out, _ := dockerCmd(c, "run", "--rm", "testbuildimg", "ls", "-la", "/test") if expected := "drw-------"; !strings.Contains(out, expected) { c.Fatalf("expected %s received %s", expected, out) } if expected := "daemon daemon"; !strings.Contains(out, expected) { c.Fatalf("expected %s received %s", expected, out) } } // testing #1405 - config.Cmd does not get cleaned up if // utilizing cache func (s *DockerSuite) TestBuildEntrypointRunCleanup(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildcmdcleanup" if _, err := buildImage(name, `FROM busybox RUN echo "hello"`, true); err != nil { c.Fatal(err) } ctx, err := fakeContext(`FROM busybox RUN echo "hello" ADD foo /foo ENTRYPOINT ["/bin/echo"]`, map[string]string{ "foo": "hello", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } res, err := inspectField(name, "Config.Cmd") if err != nil { c.Fatal(err) } // Cmd must be cleaned up if res != "" { c.Fatalf("Cmd %s, expected nil", res) } } func (s *DockerSuite) TestBuildForbiddenContextPath(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildforbidpath" ctx, err := fakeContext(`FROM scratch ADD ../../ test/ `, map[string]string{ "test.txt": "test1", "other.txt": "other", }) if err != nil { c.Fatal(err) } defer ctx.Close() expected := "Forbidden path outside the build context: ../../ " if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { c.Fatalf("Wrong error: (should contain \"%s\") got:\n%v", expected, err) } } func (s *DockerSuite) TestBuildAddFileNotFound(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildaddnotfound" ctx, err := fakeContext(`FROM scratch ADD foo /usr/local/bar`, map[string]string{"bar": "hello"}) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { if !strings.Contains(err.Error(), "foo: no such file or directory") { c.Fatalf("Wrong error %v, must be about missing foo file or directory", err) } } else { c.Fatal("Error must not be nil") } } func (s *DockerSuite) TestBuildInheritance(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildinheritance" _, err := buildImage(name, `FROM scratch EXPOSE 2375`, true) if err != nil { c.Fatal(err) } ports1, err := inspectField(name, "Config.ExposedPorts") if err != nil { c.Fatal(err) } _, err = buildImage(name, fmt.Sprintf(`FROM %s ENTRYPOINT ["/bin/echo"]`, name), true) if err != nil { c.Fatal(err) } res, err := inspectField(name, "Config.Entrypoint") if err != nil { c.Fatal(err) } if expected := "{[/bin/echo]}"; res != expected { c.Fatalf("Entrypoint %s, expected %s", res, expected) } ports2, err := inspectField(name, "Config.ExposedPorts") if err != nil { c.Fatal(err) } if ports1 != ports2 { c.Fatalf("Ports must be same: %s != %s", ports1, ports2) } } func (s *DockerSuite) TestBuildFails(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildfails" _, err := buildImage(name, `FROM busybox RUN sh -c "exit 23"`, true) if err != nil { if !strings.Contains(err.Error(), "returned a non-zero code: 23") { c.Fatalf("Wrong error %v, must be about non-zero code 23", err) } } else { c.Fatal("Error must not be nil") } } func (s *DockerSuite) TestBuildFailsDockerfileEmpty(c *check.C) { name := "testbuildfails" _, err := buildImage(name, ``, true) if err != nil { if !strings.Contains(err.Error(), "The Dockerfile (Dockerfile) cannot be empty") { c.Fatalf("Wrong error %v, must be about empty Dockerfile", err) } } else { c.Fatal("Error must not be nil") } } func (s *DockerSuite) TestBuildOnBuild(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildonbuild" _, err := buildImage(name, `FROM busybox ONBUILD RUN touch foobar`, true) if err != nil { c.Fatal(err) } _, err = buildImage(name, fmt.Sprintf(`FROM %s RUN [ -f foobar ]`, name), true) if err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildOnBuildForbiddenChained(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildonbuildforbiddenchained" _, err := buildImage(name, `FROM busybox ONBUILD ONBUILD RUN touch foobar`, true) if err != nil { if !strings.Contains(err.Error(), "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") { c.Fatalf("Wrong error %v, must be about chaining ONBUILD", err) } } else { c.Fatal("Error must not be nil") } } func (s *DockerSuite) TestBuildOnBuildForbiddenFrom(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildonbuildforbiddenfrom" _, err := buildImage(name, `FROM busybox ONBUILD FROM scratch`, true) if err != nil { if !strings.Contains(err.Error(), "FROM isn't allowed as an ONBUILD trigger") { c.Fatalf("Wrong error %v, must be about FROM forbidden", err) } } else { c.Fatal("Error must not be nil") } } func (s *DockerSuite) TestBuildOnBuildForbiddenMaintainer(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildonbuildforbiddenmaintainer" _, err := buildImage(name, `FROM busybox ONBUILD MAINTAINER docker.io`, true) if err != nil { if !strings.Contains(err.Error(), "MAINTAINER isn't allowed as an ONBUILD trigger") { c.Fatalf("Wrong error %v, must be about MAINTAINER forbidden", err) } } else { c.Fatal("Error must not be nil") } } // gh #2446 func (s *DockerSuite) TestBuildAddToSymlinkDest(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildaddtosymlinkdest" ctx, err := fakeContext(`FROM busybox RUN mkdir /foo RUN ln -s /foo /bar ADD foo /bar/ RUN [ -f /bar/foo ] RUN [ -f /foo/foo ]`, map[string]string{ "foo": "hello", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildEscapeWhitespace(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildescaping" _, err := buildImage(name, ` FROM busybox MAINTAINER "Docker \ IO " `, true) if err != nil { c.Fatal(err) } res, err := inspectField(name, "Author") if err != nil { c.Fatal(err) } if res != "\"Docker IO \"" { c.Fatalf("Parsed string did not match the escaped string. Got: %q", res) } } func (s *DockerSuite) TestBuildVerifyIntString(c *check.C) { testRequires(c, DaemonIsLinux) // Verify that strings that look like ints are still passed as strings name := "testbuildstringing" _, err := buildImage(name, ` FROM busybox MAINTAINER 123 `, true) if err != nil { c.Fatal(err) } out, _ := dockerCmd(c, "inspect", name) if !strings.Contains(out, "\"123\"") { c.Fatalf("Output does not contain the int as a string:\n%s", out) } } func (s *DockerSuite) TestBuildDockerignore(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuilddockerignore" dockerfile := ` FROM busybox ADD . /bla RUN [[ -f /bla/src/x.go ]] RUN [[ -f /bla/Makefile ]] RUN [[ ! -e /bla/src/_vendor ]] RUN [[ ! -e /bla/.gitignore ]] RUN [[ ! -e /bla/README.md ]] RUN [[ ! -e /bla/dir/foo ]] RUN [[ ! -e /bla/foo ]] RUN [[ ! -e /bla/.git ]] RUN [[ ! -e v.cc ]] RUN [[ ! -e src/v.cc ]] RUN [[ ! -e src/_vendor/v.cc ]]` ctx, err := fakeContext(dockerfile, map[string]string{ "Makefile": "all:", ".git/HEAD": "ref: foo", "src/x.go": "package main", "src/_vendor/v.go": "package main", "src/_vendor/v.cc": "package main", "src/v.cc": "package main", "v.cc": "package main", "dir/foo": "", ".gitignore": "", "README.md": "readme", ".dockerignore": ` .git pkg .gitignore src/_vendor *.md **/*.cc dir`, }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildDockerignoreCleanPaths(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuilddockerignorecleanpaths" dockerfile := ` FROM busybox ADD . /tmp/ RUN (! ls /tmp/foo) && (! ls /tmp/foo2) && (! ls /tmp/dir1/foo)` ctx, err := fakeContext(dockerfile, map[string]string{ "foo": "foo", "foo2": "foo2", "dir1/foo": "foo in dir1", ".dockerignore": "./foo\ndir1//foo\n./dir1/../foo2", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildDockerignoreExceptions(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuilddockerignoreexceptions" dockerfile := ` FROM busybox ADD . /bla RUN [[ -f /bla/src/x.go ]] RUN [[ -f /bla/Makefile ]] RUN [[ ! -e /bla/src/_vendor ]] RUN [[ ! -e /bla/.gitignore ]] RUN [[ ! -e /bla/README.md ]] RUN [[ -e /bla/dir/dir/foo ]] RUN [[ ! -e /bla/dir/foo1 ]] RUN [[ -f /bla/dir/e ]] RUN [[ -f /bla/dir/e-dir/foo ]] RUN [[ ! -e /bla/foo ]] RUN [[ ! -e /bla/.git ]] RUN [[ -e /bla/dir/a.cc ]]` ctx, err := fakeContext(dockerfile, map[string]string{ "Makefile": "all:", ".git/HEAD": "ref: foo", "src/x.go": "package main", "src/_vendor/v.go": "package main", "dir/foo": "", "dir/foo1": "", "dir/dir/f1": "", "dir/dir/foo": "", "dir/e": "", "dir/e-dir/foo": "", ".gitignore": "", "README.md": "readme", "dir/a.cc": "hello", ".dockerignore": ` .git pkg .gitignore src/_vendor *.md dir !dir/e* !dir/dir/foo **/*.cc !**/*.cc`, }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildDockerignoringDockerfile(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuilddockerignoredockerfile" dockerfile := ` FROM busybox ADD . /tmp/ RUN ! ls /tmp/Dockerfile RUN ls /tmp/.dockerignore` ctx, err := fakeContext(dockerfile, map[string]string{ "Dockerfile": dockerfile, ".dockerignore": "Dockerfile\n", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err = buildImageFromContext(name, ctx, true); err != nil { c.Fatalf("Didn't ignore Dockerfile correctly:%s", err) } // now try it with ./Dockerfile ctx.Add(".dockerignore", "./Dockerfile\n") if _, err = buildImageFromContext(name, ctx, true); err != nil { c.Fatalf("Didn't ignore ./Dockerfile correctly:%s", err) } } func (s *DockerSuite) TestBuildDockerignoringRenamedDockerfile(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuilddockerignoredockerfile" dockerfile := ` FROM busybox ADD . /tmp/ RUN ls /tmp/Dockerfile RUN ! ls /tmp/MyDockerfile RUN ls /tmp/.dockerignore` ctx, err := fakeContext(dockerfile, map[string]string{ "Dockerfile": "Should not use me", "MyDockerfile": dockerfile, ".dockerignore": "MyDockerfile\n", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err = buildImageFromContext(name, ctx, true); err != nil { c.Fatalf("Didn't ignore MyDockerfile correctly:%s", err) } // now try it with ./MyDockerfile ctx.Add(".dockerignore", "./MyDockerfile\n") if _, err = buildImageFromContext(name, ctx, true); err != nil { c.Fatalf("Didn't ignore ./MyDockerfile correctly:%s", err) } } func (s *DockerSuite) TestBuildDockerignoringDockerignore(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuilddockerignoredockerignore" dockerfile := ` FROM busybox ADD . /tmp/ RUN ! ls /tmp/.dockerignore RUN ls /tmp/Dockerfile` ctx, err := fakeContext(dockerfile, map[string]string{ "Dockerfile": dockerfile, ".dockerignore": ".dockerignore\n", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err = buildImageFromContext(name, ctx, true); err != nil { c.Fatalf("Didn't ignore .dockerignore correctly:%s", err) } } func (s *DockerSuite) TestBuildDockerignoreTouchDockerfile(c *check.C) { testRequires(c, DaemonIsLinux) var id1 string var id2 string name := "testbuilddockerignoretouchdockerfile" dockerfile := ` FROM busybox ADD . /tmp/` ctx, err := fakeContext(dockerfile, map[string]string{ "Dockerfile": dockerfile, ".dockerignore": "Dockerfile\n", }) if err != nil { c.Fatal(err) } defer ctx.Close() if id1, err = buildImageFromContext(name, ctx, true); err != nil { c.Fatalf("Didn't build it correctly:%s", err) } if id2, err = buildImageFromContext(name, ctx, true); err != nil { c.Fatalf("Didn't build it correctly:%s", err) } if id1 != id2 { c.Fatalf("Didn't use the cache - 1") } // Now make sure touching Dockerfile doesn't invalidate the cache if err = ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil { c.Fatalf("Didn't add Dockerfile: %s", err) } if id2, err = buildImageFromContext(name, ctx, true); err != nil { c.Fatalf("Didn't build it correctly:%s", err) } if id1 != id2 { c.Fatalf("Didn't use the cache - 2") } // One more time but just 'touch' it instead of changing the content if err = ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil { c.Fatalf("Didn't add Dockerfile: %s", err) } if id2, err = buildImageFromContext(name, ctx, true); err != nil { c.Fatalf("Didn't build it correctly:%s", err) } if id1 != id2 { c.Fatalf("Didn't use the cache - 3") } } func (s *DockerSuite) TestBuildDockerignoringWholeDir(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuilddockerignorewholedir" dockerfile := ` FROM busybox COPY . / RUN [[ ! -e /.gitignore ]] RUN [[ -f /Makefile ]]` ctx, err := fakeContext(dockerfile, map[string]string{ "Dockerfile": "FROM scratch", "Makefile": "all:", ".gitignore": "", ".dockerignore": ".*\n", }) c.Assert(err, check.IsNil) defer ctx.Close() if _, err = buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } c.Assert(ctx.Add(".dockerfile", "*"), check.IsNil) if _, err = buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } c.Assert(ctx.Add(".dockerfile", "."), check.IsNil) if _, err = buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } c.Assert(ctx.Add(".dockerfile", "?"), check.IsNil) if _, err = buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildDockerignoringBadExclusion(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuilddockerignorebadexclusion" dockerfile := ` FROM busybox COPY . / RUN [[ ! -e /.gitignore ]] RUN [[ -f /Makefile ]]` ctx, err := fakeContext(dockerfile, map[string]string{ "Dockerfile": "FROM scratch", "Makefile": "all:", ".gitignore": "", ".dockerignore": "!\n", }) c.Assert(err, check.IsNil) defer ctx.Close() if _, err = buildImageFromContext(name, ctx, true); err == nil { c.Fatalf("Build was supposed to fail but didn't") } if err.Error() != "failed to build the image: Error checking context: 'Illegal exclusion pattern: !'.\n" { c.Fatalf("Incorrect output, got:%q", err.Error()) } } func (s *DockerSuite) TestBuildDockerignoringWildTopDir(c *check.C) { testRequires(c, DaemonIsLinux) dockerfile := ` FROM busybox COPY . / RUN [[ ! -e /.dockerignore ]] RUN [[ ! -e /Dockerfile ]] RUN [[ ! -e /file1 ]] RUN [[ ! -e /dir ]]` ctx, err := fakeContext(dockerfile, map[string]string{ "Dockerfile": "FROM scratch", "file1": "", "dir/dfile1": "", }) c.Assert(err, check.IsNil) defer ctx.Close() // All of these should result in ignoring all files for _, variant := range []string{"**", "**/", "**/**", "*"} { ctx.Add(".dockerignore", variant) _, err = buildImageFromContext("noname", ctx, true) c.Assert(err, check.IsNil, check.Commentf("variant: %s", variant)) } } func (s *DockerSuite) TestBuildDockerignoringWildDirs(c *check.C) { testRequires(c, DaemonIsLinux) dockerfile := ` FROM busybox COPY . / RUN [[ -e /.dockerignore ]] RUN [[ -e /Dockerfile ]] RUN [[ ! -e /file0 ]] RUN [[ ! -e /dir1/file0 ]] RUN [[ ! -e /dir2/file0 ]] RUN [[ ! -e /file1 ]] RUN [[ ! -e /dir1/file1 ]] RUN [[ ! -e /dir1/dir2/file1 ]] RUN [[ ! -e /dir1/file2 ]] RUN [[ -e /dir1/dir2/file2 ]] RUN [[ ! -e /dir1/dir2/file4 ]] RUN [[ ! -e /dir1/dir2/file5 ]] RUN [[ ! -e /dir1/dir2/file6 ]] RUN [[ ! -e /dir1/dir3/file7 ]] RUN [[ ! -e /dir1/dir3/file8 ]] RUN [[ -e /dir1/dir3 ]] RUN [[ -e /dir1/dir4 ]] RUN [[ ! -e 'dir1/dir5/fileAA' ]] RUN [[ -e 'dir1/dir5/fileAB' ]] RUN [[ -e 'dir1/dir5/fileB' ]] # "." in pattern means nothing RUN echo all done!` ctx, err := fakeContext(dockerfile, map[string]string{ "Dockerfile": "FROM scratch", "file0": "", "dir1/file0": "", "dir1/dir2/file0": "", "file1": "", "dir1/file1": "", "dir1/dir2/file1": "", "dir1/file2": "", "dir1/dir2/file2": "", // remains "dir1/dir2/file4": "", "dir1/dir2/file5": "", "dir1/dir2/file6": "", "dir1/dir3/file7": "", "dir1/dir3/file8": "", "dir1/dir4/file9": "", "dir1/dir5/fileAA": "", "dir1/dir5/fileAB": "", "dir1/dir5/fileB": "", ".dockerignore": ` **/file0 **/*file1 **/dir1/file2 dir1/**/file4 **/dir2/file5 **/dir1/dir2/file6 dir1/dir3/** **/dir4/** **/file?A **/file\?B **/dir5/file. `, }) c.Assert(err, check.IsNil) defer ctx.Close() _, err = buildImageFromContext("noname", ctx, true) c.Assert(err, check.IsNil) } func (s *DockerSuite) TestBuildLineBreak(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildlinebreak" _, err := buildImage(name, `FROM busybox RUN sh -c 'echo root:testpass \ > /tmp/passwd' RUN mkdir -p /var/run/sshd RUN [ "$(cat /tmp/passwd)" = "root:testpass" ] RUN [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]`, true) if err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildEOLInLine(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildeolinline" _, err := buildImage(name, `FROM busybox RUN sh -c 'echo root:testpass > /tmp/passwd' RUN echo "foo \n bar"; echo "baz" RUN mkdir -p /var/run/sshd RUN [ "$(cat /tmp/passwd)" = "root:testpass" ] RUN [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]`, true) if err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildCommentsShebangs(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildcomments" _, err := buildImage(name, `FROM busybox # This is an ordinary comment. RUN { echo '#!/bin/sh'; echo 'echo hello world'; } > /hello.sh RUN [ ! -x /hello.sh ] # comment with line break \ RUN chmod +x /hello.sh RUN [ -x /hello.sh ] RUN [ "$(cat /hello.sh)" = $'#!/bin/sh\necho hello world' ] RUN [ "$(/hello.sh)" = "hello world" ]`, true) if err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildUsersAndGroups(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildusers" _, err := buildImage(name, `FROM busybox # Make sure our defaults work RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)" = '0:0/root:root' ] # TODO decide if "args.user = strconv.Itoa(syscall.Getuid())" is acceptable behavior for changeUser in sysvinit instead of "return nil" when "USER" isn't specified (so that we get the proper group list even if that is the empty list, even in the default case of not supplying an explicit USER to run as, which implies USER 0) USER root RUN [ "$(id -G):$(id -Gn)" = '0 10:root wheel' ] # Setup dockerio user and group RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group # Make sure we can switch to our user and all the information is exactly as we expect it to be USER dockerio RUN id -G RUN id -Gn RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] # Switch back to root and double check that worked exactly as we might expect it to USER root RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '0:0/root:root/0 10:root wheel' ] # Add a "supplementary" group for our dockerio user RUN echo 'supplementary:x:1002:dockerio' >> /etc/group # ... and then go verify that we get it like we expect USER dockerio RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ] USER 1001 RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ] # super test the new "user:group" syntax USER dockerio:dockerio RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] USER 1001:dockerio RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] USER dockerio:1001 RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] USER 1001:1001 RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] USER dockerio:supplementary RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] USER dockerio:1002 RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] USER 1001:supplementary RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] USER 1001:1002 RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] # make sure unknown uid/gid still works properly USER 1042:1043 RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1042:1043/1042:1043/1043:1043' ]`, true) if err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildEnvUsage(c *check.C) { // /docker/world/hello is not owned by the correct user testRequires(c, NotUserNamespace) testRequires(c, DaemonIsLinux) name := "testbuildenvusage" dockerfile := `FROM busybox ENV HOME /root ENV PATH $HOME/bin:$PATH ENV PATH /tmp:$PATH RUN [ "$PATH" = "/tmp:$HOME/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ] ENV FOO /foo/baz ENV BAR /bar ENV BAZ $BAR ENV FOOPATH $PATH:$FOO RUN [ "$BAR" = "$BAZ" ] RUN [ "$FOOPATH" = "$PATH:/foo/baz" ] ENV FROM hello/docker/world ENV TO /docker/world/hello ADD $FROM $TO RUN [ "$(cat $TO)" = "hello" ] ENV abc=def ENV ghi=$abc RUN [ "$ghi" = "def" ] ` ctx, err := fakeContext(dockerfile, map[string]string{ "hello/docker/world": "hello", }) if err != nil { c.Fatal(err) } defer ctx.Close() _, err = buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildEnvUsage2(c *check.C) { // /docker/world/hello is not owned by the correct user testRequires(c, NotUserNamespace) testRequires(c, DaemonIsLinux) name := "testbuildenvusage2" dockerfile := `FROM busybox ENV abc=def RUN [ "$abc" = "def" ] ENV def="hello world" RUN [ "$def" = "hello world" ] ENV def=hello\ world RUN [ "$def" = "hello world" ] ENV v1=abc v2="hi there" RUN [ "$v1" = "abc" ] RUN [ "$v2" = "hi there" ] ENV v3='boogie nights' v4="with'quotes too" RUN [ "$v3" = "boogie nights" ] RUN [ "$v4" = "with'quotes too" ] ENV abc=zzz FROM=hello/docker/world ENV abc=zzz TO=/docker/world/hello ADD $FROM $TO RUN [ "$(cat $TO)" = "hello" ] ENV abc "zzz" RUN [ $abc = "zzz" ] ENV abc 'yyy' RUN [ $abc = 'yyy' ] ENV abc= RUN [ "$abc" = "" ] # use grep to make sure if the builder substitutes \$foo by mistake # we don't get a false positive ENV abc=\$foo RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo) ENV abc \$foo RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo) ENV abc=\'foo\' RUN [ "$abc" = "'foo'" ] ENV abc=\"foo\" RUN [ "$abc" = "\"foo\"" ] ENV abc "foo" RUN [ "$abc" = "foo" ] ENV abc 'foo' RUN [ "$abc" = 'foo' ] ENV abc \'foo\' RUN [ "$abc" = "'foo'" ] ENV abc \"foo\" RUN [ "$abc" = '"foo"' ] ENV abc=ABC RUN [ "$abc" = "ABC" ] ENV def=${abc:-DEF} RUN [ "$def" = "ABC" ] ENV def=${ccc:-DEF} RUN [ "$def" = "DEF" ] ENV def=${ccc:-${def}xx} RUN [ "$def" = "DEFxx" ] ENV def=${def:+ALT} RUN [ "$def" = "ALT" ] ENV def=${def:+${abc}:} RUN [ "$def" = "ABC:" ] ENV def=${ccc:-\$abc:} RUN [ "$def" = '$abc:' ] ENV def=${ccc:-\${abc}:} RUN [ "$def" = '${abc:}' ] ENV mypath=${mypath:+$mypath:}/home RUN [ "$mypath" = '/home' ] ENV mypath=${mypath:+$mypath:}/away RUN [ "$mypath" = '/home:/away' ] ENV e1=bar ENV e2=$e1 ENV e3=$e11 ENV e4=\$e1 ENV e5=\$e11 RUN [ "$e0,$e1,$e2,$e3,$e4,$e5" = ',bar,bar,,$e1,$e11' ] ENV ee1 bar ENV ee2 $ee1 ENV ee3 $ee11 ENV ee4 \$ee1 ENV ee5 \$ee11 RUN [ "$ee1,$ee2,$ee3,$ee4,$ee5" = 'bar,bar,,$ee1,$ee11' ] ENV eee1="foo" ENV eee2='foo' ENV eee3 "foo" ENV eee4 'foo' RUN [ "$eee1,$eee2,$eee3,$eee4" = 'foo,foo,foo,foo' ] ` ctx, err := fakeContext(dockerfile, map[string]string{ "hello/docker/world": "hello", }) if err != nil { c.Fatal(err) } defer ctx.Close() _, err = buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildAddScript(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildaddscript" dockerfile := ` FROM busybox ADD test /test RUN ["chmod","+x","/test"] RUN ["/test"] RUN [ "$(cat /testfile)" = 'test!' ]` ctx, err := fakeContext(dockerfile, map[string]string{ "test": "#!/bin/sh\necho 'test!' > /testfile", }) if err != nil { c.Fatal(err) } defer ctx.Close() _, err = buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildAddTar(c *check.C) { // /test/foo is not owned by the correct user testRequires(c, NotUserNamespace) testRequires(c, DaemonIsLinux) name := "testbuildaddtar" ctx := func() *FakeContext { dockerfile := ` FROM busybox ADD test.tar / RUN cat /test/foo | grep Hi ADD test.tar /test.tar RUN cat /test.tar/test/foo | grep Hi ADD test.tar /unlikely-to-exist RUN cat /unlikely-to-exist/test/foo | grep Hi ADD test.tar /unlikely-to-exist-trailing-slash/ RUN cat /unlikely-to-exist-trailing-slash/test/foo | grep Hi RUN mkdir /existing-directory ADD test.tar /existing-directory RUN cat /existing-directory/test/foo | grep Hi ADD test.tar /existing-directory-trailing-slash/ RUN cat /existing-directory-trailing-slash/test/foo | grep Hi` tmpDir, err := ioutil.TempDir("", "fake-context") c.Assert(err, check.IsNil) testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) if err != nil { c.Fatalf("failed to create test.tar archive: %v", err) } defer testTar.Close() tw := tar.NewWriter(testTar) if err := tw.WriteHeader(&tar.Header{ Name: "test/foo", Size: 2, }); err != nil { c.Fatalf("failed to write tar file header: %v", err) } if _, err := tw.Write([]byte("Hi")); err != nil { c.Fatalf("failed to write tar file content: %v", err) } if err := tw.Close(); err != nil { c.Fatalf("failed to close tar archive: %v", err) } if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { c.Fatalf("failed to open destination dockerfile: %v", err) } return fakeContextFromDir(tmpDir) }() defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatalf("build failed to complete for TestBuildAddTar: %v", err) } } func (s *DockerSuite) TestBuildAddBrokenTar(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildaddbrokentar" ctx := func() *FakeContext { dockerfile := ` FROM busybox ADD test.tar /` tmpDir, err := ioutil.TempDir("", "fake-context") c.Assert(err, check.IsNil) testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) if err != nil { c.Fatalf("failed to create test.tar archive: %v", err) } defer testTar.Close() tw := tar.NewWriter(testTar) if err := tw.WriteHeader(&tar.Header{ Name: "test/foo", Size: 2, }); err != nil { c.Fatalf("failed to write tar file header: %v", err) } if _, err := tw.Write([]byte("Hi")); err != nil { c.Fatalf("failed to write tar file content: %v", err) } if err := tw.Close(); err != nil { c.Fatalf("failed to close tar archive: %v", err) } // Corrupt the tar by removing one byte off the end stat, err := testTar.Stat() if err != nil { c.Fatalf("failed to stat tar archive: %v", err) } if err := testTar.Truncate(stat.Size() - 1); err != nil { c.Fatalf("failed to truncate tar archive: %v", err) } if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { c.Fatalf("failed to open destination dockerfile: %v", err) } return fakeContextFromDir(tmpDir) }() defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err == nil { c.Fatalf("build should have failed for TestBuildAddBrokenTar") } } func (s *DockerSuite) TestBuildAddNonTar(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildaddnontar" // Should not try to extract test.tar ctx, err := fakeContext(` FROM busybox ADD test.tar / RUN test -f /test.tar`, map[string]string{"test.tar": "not_a_tar_file"}) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatalf("build failed for TestBuildAddNonTar") } } func (s *DockerSuite) TestBuildAddTarXz(c *check.C) { // /test/foo is not owned by the correct user testRequires(c, NotUserNamespace) testRequires(c, DaemonIsLinux) name := "testbuildaddtarxz" ctx := func() *FakeContext { dockerfile := ` FROM busybox ADD test.tar.xz / RUN cat /test/foo | grep Hi` tmpDir, err := ioutil.TempDir("", "fake-context") c.Assert(err, check.IsNil) testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) if err != nil { c.Fatalf("failed to create test.tar archive: %v", err) } defer testTar.Close() tw := tar.NewWriter(testTar) if err := tw.WriteHeader(&tar.Header{ Name: "test/foo", Size: 2, }); err != nil { c.Fatalf("failed to write tar file header: %v", err) } if _, err := tw.Write([]byte("Hi")); err != nil { c.Fatalf("failed to write tar file content: %v", err) } if err := tw.Close(); err != nil { c.Fatalf("failed to close tar archive: %v", err) } xzCompressCmd := exec.Command("xz", "-k", "test.tar") xzCompressCmd.Dir = tmpDir out, _, err := runCommandWithOutput(xzCompressCmd) if err != nil { c.Fatal(err, out) } if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { c.Fatalf("failed to open destination dockerfile: %v", err) } return fakeContextFromDir(tmpDir) }() defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err) } } func (s *DockerSuite) TestBuildAddTarXzGz(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildaddtarxzgz" ctx := func() *FakeContext { dockerfile := ` FROM busybox ADD test.tar.xz.gz / RUN ls /test.tar.xz.gz` tmpDir, err := ioutil.TempDir("", "fake-context") c.Assert(err, check.IsNil) testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) if err != nil { c.Fatalf("failed to create test.tar archive: %v", err) } defer testTar.Close() tw := tar.NewWriter(testTar) if err := tw.WriteHeader(&tar.Header{ Name: "test/foo", Size: 2, }); err != nil { c.Fatalf("failed to write tar file header: %v", err) } if _, err := tw.Write([]byte("Hi")); err != nil { c.Fatalf("failed to write tar file content: %v", err) } if err := tw.Close(); err != nil { c.Fatalf("failed to close tar archive: %v", err) } xzCompressCmd := exec.Command("xz", "-k", "test.tar") xzCompressCmd.Dir = tmpDir out, _, err := runCommandWithOutput(xzCompressCmd) if err != nil { c.Fatal(err, out) } gzipCompressCmd := exec.Command("gzip", "test.tar.xz") gzipCompressCmd.Dir = tmpDir out, _, err = runCommandWithOutput(gzipCompressCmd) if err != nil { c.Fatal(err, out) } if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { c.Fatalf("failed to open destination dockerfile: %v", err) } return fakeContextFromDir(tmpDir) }() defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err) } } func (s *DockerSuite) TestBuildFromGIT(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildfromgit" git, err := newFakeGit("repo", map[string]string{ "Dockerfile": `FROM busybox ADD first /first RUN [ -f /first ] MAINTAINER docker`, "first": "test git data", }, true) if err != nil { c.Fatal(err) } defer git.Close() _, err = buildImageFromPath(name, git.RepoURL, true) if err != nil { c.Fatal(err) } res, err := inspectField(name, "Author") if err != nil { c.Fatal(err) } if res != "docker" { c.Fatalf("Maintainer should be docker, got %s", res) } } func (s *DockerSuite) TestBuildFromGITWithContext(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildfromgit" git, err := newFakeGit("repo", map[string]string{ "docker/Dockerfile": `FROM busybox ADD first /first RUN [ -f /first ] MAINTAINER docker`, "docker/first": "test git data", }, true) if err != nil { c.Fatal(err) } defer git.Close() u := fmt.Sprintf("%s#master:docker", git.RepoURL) _, err = buildImageFromPath(name, u, true) if err != nil { c.Fatal(err) } res, err := inspectField(name, "Author") if err != nil { c.Fatal(err) } if res != "docker" { c.Fatalf("Maintainer should be docker, got %s", res) } } func (s *DockerSuite) TestBuildFromGITwithF(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildfromgitwithf" git, err := newFakeGit("repo", map[string]string{ "myApp/myDockerfile": `FROM busybox RUN echo hi from Dockerfile`, }, true) if err != nil { c.Fatal(err) } defer git.Close() out, _, err := dockerCmdWithError("build", "-t", name, "--no-cache", "-f", "myApp/myDockerfile", git.RepoURL) if err != nil { c.Fatalf("Error on build. Out: %s\nErr: %v", out, err) } if !strings.Contains(out, "hi from Dockerfile") { c.Fatalf("Missing expected output, got:\n%s", out) } } func (s *DockerSuite) TestBuildFromRemoteTarball(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildfromremotetarball" buffer := new(bytes.Buffer) tw := tar.NewWriter(buffer) defer tw.Close() dockerfile := []byte(`FROM busybox MAINTAINER docker`) if err := tw.WriteHeader(&tar.Header{ Name: "Dockerfile", Size: int64(len(dockerfile)), }); err != nil { c.Fatalf("failed to write tar file header: %v", err) } if _, err := tw.Write(dockerfile); err != nil { c.Fatalf("failed to write tar file content: %v", err) } if err := tw.Close(); err != nil { c.Fatalf("failed to close tar archive: %v", err) } server, err := fakeBinaryStorage(map[string]*bytes.Buffer{ "testT.tar": buffer, }) c.Assert(err, check.IsNil) defer server.Close() _, err = buildImageFromPath(name, server.URL()+"/testT.tar", true) c.Assert(err, check.IsNil) res, err := inspectField(name, "Author") c.Assert(err, check.IsNil) if res != "docker" { c.Fatalf("Maintainer should be docker, got %s", res) } } func (s *DockerSuite) TestBuildCleanupCmdOnEntrypoint(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildcmdcleanuponentrypoint" if _, err := buildImage(name, `FROM scratch CMD ["test"] ENTRYPOINT ["echo"]`, true); err != nil { c.Fatal(err) } if _, err := buildImage(name, fmt.Sprintf(`FROM %s ENTRYPOINT ["cat"]`, name), true); err != nil { c.Fatal(err) } res, err := inspectField(name, "Config.Cmd") if err != nil { c.Fatal(err) } if res != "" { c.Fatalf("Cmd %s, expected nil", res) } res, err = inspectField(name, "Config.Entrypoint") if err != nil { c.Fatal(err) } if expected := "{[cat]}"; res != expected { c.Fatalf("Entrypoint %s, expected %s", res, expected) } } func (s *DockerSuite) TestBuildClearCmd(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildclearcmd" _, err := buildImage(name, `From scratch ENTRYPOINT ["/bin/bash"] CMD []`, true) if err != nil { c.Fatal(err) } res, err := inspectFieldJSON(name, "Config.Cmd") if err != nil { c.Fatal(err) } if res != "[]" { c.Fatalf("Cmd %s, expected %s", res, "[]") } } func (s *DockerSuite) TestBuildEmptyCmd(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildemptycmd" if _, err := buildImage(name, "FROM scratch\nMAINTAINER quux\n", true); err != nil { c.Fatal(err) } res, err := inspectFieldJSON(name, "Config.Cmd") if err != nil { c.Fatal(err) } if res != "null" { c.Fatalf("Cmd %s, expected %s", res, "null") } } func (s *DockerSuite) TestBuildOnBuildOutput(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildonbuildparent" if _, err := buildImage(name, "FROM busybox\nONBUILD RUN echo foo\n", true); err != nil { c.Fatal(err) } _, out, err := buildImageWithOut(name, "FROM "+name+"\nMAINTAINER quux\n", true) if err != nil { c.Fatal(err) } if !strings.Contains(out, "# Executing 1 build trigger") { c.Fatal("failed to find the build trigger output", out) } } func (s *DockerSuite) TestBuildInvalidTag(c *check.C) { testRequires(c, DaemonIsLinux) name := "abcd:" + stringutils.GenerateRandomAlphaOnlyString(200) _, out, err := buildImageWithOut(name, "FROM scratch\nMAINTAINER quux\n", true) // if the error doesn't check for illegal tag name, or the image is built // then this should fail if !strings.Contains(out, "Error parsing reference") || strings.Contains(out, "Sending build context to Docker daemon") { c.Fatalf("failed to stop before building. Error: %s, Output: %s", err, out) } } func (s *DockerSuite) TestBuildCmdShDashC(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildcmdshc" if _, err := buildImage(name, "FROM busybox\nCMD echo cmd\n", true); err != nil { c.Fatal(err) } res, err := inspectFieldJSON(name, "Config.Cmd") if err != nil { c.Fatal(err, res) } expected := `["/bin/sh","-c","echo cmd"]` if res != expected { c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) } } func (s *DockerSuite) TestBuildCmdSpaces(c *check.C) { testRequires(c, DaemonIsLinux) // Test to make sure that when we strcat arrays we take into account // the arg separator to make sure ["echo","hi"] and ["echo hi"] don't // look the same name := "testbuildcmdspaces" var id1 string var id2 string var err error if id1, err = buildImage(name, "FROM busybox\nCMD [\"echo hi\"]\n", true); err != nil { c.Fatal(err) } if id2, err = buildImage(name, "FROM busybox\nCMD [\"echo\", \"hi\"]\n", true); err != nil { c.Fatal(err) } if id1 == id2 { c.Fatal("Should not have resulted in the same CMD") } // Now do the same with ENTRYPOINT if id1, err = buildImage(name, "FROM busybox\nENTRYPOINT [\"echo hi\"]\n", true); err != nil { c.Fatal(err) } if id2, err = buildImage(name, "FROM busybox\nENTRYPOINT [\"echo\", \"hi\"]\n", true); err != nil { c.Fatal(err) } if id1 == id2 { c.Fatal("Should not have resulted in the same ENTRYPOINT") } } func (s *DockerSuite) TestBuildCmdJSONNoShDashC(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildcmdjson" if _, err := buildImage(name, "FROM busybox\nCMD [\"echo\", \"cmd\"]", true); err != nil { c.Fatal(err) } res, err := inspectFieldJSON(name, "Config.Cmd") if err != nil { c.Fatal(err, res) } expected := `["echo","cmd"]` if res != expected { c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) } } func (s *DockerSuite) TestBuildErrorInvalidInstruction(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildignoreinvalidinstruction" out, _, err := buildImageWithOut(name, "FROM busybox\nfoo bar", true) if err == nil { c.Fatalf("Should have failed: %s", out) } } func (s *DockerSuite) TestBuildEntrypointInheritance(c *check.C) { testRequires(c, DaemonIsLinux) if _, err := buildImage("parent", ` FROM busybox ENTRYPOINT exit 130 `, true); err != nil { c.Fatal(err) } if _, status, _ := dockerCmdWithError("run", "parent"); status != 130 { c.Fatalf("expected exit code 130 but received %d", status) } if _, err := buildImage("child", ` FROM parent ENTRYPOINT exit 5 `, true); err != nil { c.Fatal(err) } if _, status, _ := dockerCmdWithError("run", "child"); status != 5 { c.Fatalf("expected exit code 5 but received %d", status) } } func (s *DockerSuite) TestBuildEntrypointInheritanceInspect(c *check.C) { testRequires(c, DaemonIsLinux) var ( name = "testbuildepinherit" name2 = "testbuildepinherit2" expected = `["/bin/sh","-c","echo quux"]` ) if _, err := buildImage(name, "FROM busybox\nENTRYPOINT /foo/bar", true); err != nil { c.Fatal(err) } if _, err := buildImage(name2, fmt.Sprintf("FROM %s\nENTRYPOINT echo quux", name), true); err != nil { c.Fatal(err) } res, err := inspectFieldJSON(name2, "Config.Entrypoint") if err != nil { c.Fatal(err, res) } if res != expected { c.Fatalf("Expected value %s not in Config.Entrypoint: %s", expected, res) } out, _ := dockerCmd(c, "run", "-t", name2) expected = "quux" if strings.TrimSpace(out) != expected { c.Fatalf("Expected output is %s, got %s", expected, out) } } func (s *DockerSuite) TestBuildRunShEntrypoint(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildentrypoint" _, err := buildImage(name, `FROM busybox ENTRYPOINT /bin/echo`, true) if err != nil { c.Fatal(err) } dockerCmd(c, "run", "--rm", name) } func (s *DockerSuite) TestBuildExoticShellInterpolation(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildexoticshellinterpolation" _, err := buildImage(name, ` FROM busybox ENV SOME_VAR a.b.c RUN [ "$SOME_VAR" = 'a.b.c' ] RUN [ "${SOME_VAR}" = 'a.b.c' ] RUN [ "${SOME_VAR%.*}" = 'a.b' ] RUN [ "${SOME_VAR%%.*}" = 'a' ] RUN [ "${SOME_VAR#*.}" = 'b.c' ] RUN [ "${SOME_VAR##*.}" = 'c' ] RUN [ "${SOME_VAR/c/d}" = 'a.b.d' ] RUN [ "${#SOME_VAR}" = '5' ] RUN [ "${SOME_UNSET_VAR:-$SOME_VAR}" = 'a.b.c' ] RUN [ "${SOME_VAR:+Version: ${SOME_VAR}}" = 'Version: a.b.c' ] RUN [ "${SOME_UNSET_VAR:+${SOME_VAR}}" = '' ] RUN [ "${SOME_UNSET_VAR:-${SOME_VAR:-d.e.f}}" = 'a.b.c' ] `, false) if err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildVerifySingleQuoteFails(c *check.C) { testRequires(c, DaemonIsLinux) // This testcase is supposed to generate an error because the // JSON array we're passing in on the CMD uses single quotes instead // of double quotes (per the JSON spec). This means we interpret it // as a "string" instead of "JSON array" and pass it on to "sh -c" and // it should barf on it. name := "testbuildsinglequotefails" if _, err := buildImage(name, `FROM busybox CMD [ '/bin/sh', '-c', 'echo hi' ]`, true); err != nil { c.Fatal(err) } if _, _, err := dockerCmdWithError("run", "--rm", name); err == nil { c.Fatal("The image was not supposed to be able to run") } } func (s *DockerSuite) TestBuildVerboseOut(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildverboseout" _, out, err := buildImageWithOut(name, `FROM busybox RUN echo 123`, false) if err != nil { c.Fatal(err) } if !strings.Contains(out, "\n123\n") { c.Fatalf("Output should contain %q: %q", "123", out) } } func (s *DockerSuite) TestBuildWithTabs(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildwithtabs" _, err := buildImage(name, "FROM busybox\nRUN echo\tone\t\ttwo", true) if err != nil { c.Fatal(err) } res, err := inspectFieldJSON(name, "ContainerConfig.Cmd") if err != nil { c.Fatal(err) } expected1 := `["/bin/sh","-c","echo\tone\t\ttwo"]` expected2 := `["/bin/sh","-c","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates if res != expected1 && res != expected2 { c.Fatalf("Missing tabs.\nGot: %s\nExp: %s or %s", res, expected1, expected2) } } func (s *DockerSuite) TestBuildLabels(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildlabel" expected := `{"License":"GPL","Vendor":"Acme"}` _, err := buildImage(name, `FROM busybox LABEL Vendor=Acme LABEL License GPL`, true) if err != nil { c.Fatal(err) } res, err := inspectFieldJSON(name, "Config.Labels") if err != nil { c.Fatal(err) } if res != expected { c.Fatalf("Labels %s, expected %s", res, expected) } } func (s *DockerSuite) TestBuildLabelsCache(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildlabelcache" id1, err := buildImage(name, `FROM busybox LABEL Vendor=Acme`, false) if err != nil { c.Fatalf("Build 1 should have worked: %v", err) } id2, err := buildImage(name, `FROM busybox LABEL Vendor=Acme`, true) if err != nil || id1 != id2 { c.Fatalf("Build 2 should have worked & used cache(%s,%s): %v", id1, id2, err) } id2, err = buildImage(name, `FROM busybox LABEL Vendor=Acme1`, true) if err != nil || id1 == id2 { c.Fatalf("Build 3 should have worked & NOT used cache(%s,%s): %v", id1, id2, err) } id2, err = buildImage(name, `FROM busybox LABEL Vendor Acme`, true) // Note: " " and "=" should be same if err != nil || id1 != id2 { c.Fatalf("Build 4 should have worked & used cache(%s,%s): %v", id1, id2, err) } // Now make sure the cache isn't used by mistake id1, err = buildImage(name, `FROM busybox LABEL f1=b1 f2=b2`, false) if err != nil { c.Fatalf("Build 5 should have worked: %q", err) } id2, err = buildImage(name, `FROM busybox LABEL f1="b1 f2=b2"`, true) if err != nil || id1 == id2 { c.Fatalf("Build 6 should have worked & NOT used the cache(%s,%s): %q", id1, id2, err) } } func (s *DockerSuite) TestBuildNotVerboseSuccess(c *check.C) { testRequires(c, DaemonIsLinux) // This test makes sure that -q works correctly when build is successful: // stdout has only the image ID (long image ID) and stderr is empty. var stdout, stderr string var err error outRegexp := regexp.MustCompile("^(sha256:|)[a-z0-9]{64}\\n$") tt := []struct { Name string BuildFunc func(string) }{ { Name: "quiet_build_stdin_success", BuildFunc: func(name string) { _, stdout, stderr, err = buildImageWithStdoutStderr(name, "FROM busybox", true, "-q", "--force-rm", "--rm") }, }, { Name: "quiet_build_ctx_success", BuildFunc: func(name string) { ctx, err := fakeContext("FROM busybox", map[string]string{ "quiet_build_success_fctx": "test", }) if err != nil { c.Fatalf("Failed to create context: %s", err.Error()) } defer ctx.Close() _, stdout, stderr, err = buildImageFromContextWithStdoutStderr(name, ctx, true, "-q", "--force-rm", "--rm") }, }, { Name: "quiet_build_git_success", BuildFunc: func(name string) { git, err := newFakeGit("repo", map[string]string{ "Dockerfile": "FROM busybox", }, true) if err != nil { c.Fatalf("Failed to create the git repo: %s", err.Error()) } defer git.Close() _, stdout, stderr, err = buildImageFromGitWithStdoutStderr(name, git, true, "-q", "--force-rm", "--rm") }, }, } for _, te := range tt { te.BuildFunc(te.Name) if err != nil { c.Fatalf("Test %s shouldn't fail, but got the following error: %s", te.Name, err.Error()) } if outRegexp.Find([]byte(stdout)) == nil { c.Fatalf("Test %s expected stdout to match the [%v] regexp, but it is [%v]", te.Name, outRegexp, stdout) } if runtime.GOOS == "windows" { // stderr contains a security warning on Windows if the daemon isn't Windows lines := strings.Split(stderr, "\n") warningCount := 0 for _, v := range lines { warningText := "SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host." if strings.Contains(v, warningText) { warningCount++ } if v != "" && !strings.Contains(v, warningText) { c.Fatalf("Stderr contains unexpected output line: %q", v) } } if warningCount != 1 && daemonPlatform != "windows" { c.Fatalf("Test %s didn't get security warning running from Windows to non-Windows", te.Name) } } else { if stderr != "" { c.Fatalf("Test %s expected stderr to be empty, but it is [%#v]", te.Name, stderr) } } } } func (s *DockerSuite) TestBuildNotVerboseFailure(c *check.C) { testRequires(c, DaemonIsLinux) // This test makes sure that -q works correctly when build fails by // comparing between the stderr output in quiet mode and in stdout // and stderr output in verbose mode tt := []struct { TestName string BuildCmds string }{ {"quiet_build_no_from_at_the_beginning", "RUN whoami"}, {"quiet_build_unknown_instr", "FROMD busybox"}, {"quiet_build_not_exists_image", "FROM busybox11"}, } for _, te := range tt { _, _, qstderr, qerr := buildImageWithStdoutStderr(te.TestName, te.BuildCmds, false, "-q", "--force-rm", "--rm") _, vstdout, vstderr, verr := buildImageWithStdoutStderr(te.TestName, te.BuildCmds, false, "--force-rm", "--rm") if verr == nil || qerr == nil { c.Fatal(fmt.Errorf("Test [%s] expected to fail but didn't", te.TestName)) } if qstderr != vstdout+vstderr { c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", te.TestName, qstderr, vstdout+vstderr)) } } } func (s *DockerSuite) TestBuildNotVerboseFailureRemote(c *check.C) { testRequires(c, DaemonIsLinux) // This test ensures that when given a wrong URL, stderr in quiet mode and // stdout and stderr in verbose mode are identical. URL := "http://bla.bla.com" Name := "quiet_build_wrong_remote" _, _, qstderr, qerr := buildImageWithStdoutStderr(Name, "", false, "-q", "--force-rm", "--rm", URL) _, vstdout, vstderr, verr := buildImageWithStdoutStderr(Name, "", false, "--force-rm", "--rm", URL) if qerr == nil || verr == nil { c.Fatal(fmt.Errorf("Test [%s] expected to fail but didn't", Name)) } if qstderr != vstdout+vstderr { c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", Name, qstderr, vstdout)) } } func (s *DockerSuite) TestBuildStderr(c *check.C) { testRequires(c, DaemonIsLinux) // This test just makes sure that no non-error output goes // to stderr name := "testbuildstderr" _, _, stderr, err := buildImageWithStdoutStderr(name, "FROM busybox\nRUN echo one", true) if err != nil { c.Fatal(err) } if runtime.GOOS == "windows" { // stderr might contain a security warning on windows lines := strings.Split(stderr, "\n") for _, v := range lines { if v != "" && !strings.Contains(v, "SECURITY WARNING:") { c.Fatalf("Stderr contains unexpected output line: %q", v) } } } else { if stderr != "" { c.Fatalf("Stderr should have been empty, instead its: %q", stderr) } } } func (s *DockerSuite) TestBuildChownSingleFile(c *check.C) { testRequires(c, UnixCli) // test uses chown: not available on windows testRequires(c, DaemonIsLinux) name := "testbuildchownsinglefile" ctx, err := fakeContext(` FROM busybox COPY test / RUN ls -l /test RUN [ $(ls -l /test | awk '{print $3":"$4}') = 'root:root' ] `, map[string]string{ "test": "test", }) if err != nil { c.Fatal(err) } defer ctx.Close() if err := os.Chown(filepath.Join(ctx.Dir, "test"), 4242, 4242); err != nil { c.Fatal(err) } if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildSymlinkBreakout(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildsymlinkbreakout" tmpdir, err := ioutil.TempDir("", name) c.Assert(err, check.IsNil) defer os.RemoveAll(tmpdir) ctx := filepath.Join(tmpdir, "context") if err := os.MkdirAll(ctx, 0755); err != nil { c.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte(` from busybox add symlink.tar / add inject /symlink/ `), 0644); err != nil { c.Fatal(err) } inject := filepath.Join(ctx, "inject") if err := ioutil.WriteFile(inject, nil, 0644); err != nil { c.Fatal(err) } f, err := os.Create(filepath.Join(ctx, "symlink.tar")) if err != nil { c.Fatal(err) } w := tar.NewWriter(f) w.WriteHeader(&tar.Header{ Name: "symlink2", Typeflag: tar.TypeSymlink, Linkname: "/../../../../../../../../../../../../../../", Uid: os.Getuid(), Gid: os.Getgid(), }) w.WriteHeader(&tar.Header{ Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: filepath.Join("symlink2", tmpdir), Uid: os.Getuid(), Gid: os.Getgid(), }) w.Close() f.Close() if _, err := buildImageFromContext(name, fakeContextFromDir(ctx), false); err != nil { c.Fatal(err) } if _, err := os.Lstat(filepath.Join(tmpdir, "inject")); err == nil { c.Fatal("symlink breakout - inject") } else if !os.IsNotExist(err) { c.Fatalf("unexpected error: %v", err) } } func (s *DockerSuite) TestBuildXZHost(c *check.C) { // /usr/local/sbin/xz gets permission denied for the user testRequires(c, NotUserNamespace) testRequires(c, DaemonIsLinux) name := "testbuildxzhost" ctx, err := fakeContext(` FROM busybox ADD xz /usr/local/sbin/ RUN chmod 755 /usr/local/sbin/xz ADD test.xz / RUN [ ! -e /injected ]`, map[string]string{ "test.xz": "\xfd\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00" + "\x21\x01\x16\x00\x00\x00\x74\x2f\xe5\xa3\x01\x00\x3f\xfd" + "\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00\x21", "xz": "#!/bin/sh\ntouch /injected", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildVolumesRetainContents(c *check.C) { // /foo/file gets permission denied for the user testRequires(c, NotUserNamespace) testRequires(c, DaemonIsLinux) var ( name = "testbuildvolumescontent" expected = "some text" ) ctx, err := fakeContext(` FROM busybox COPY content /foo/file VOLUME /foo CMD cat /foo/file`, map[string]string{ "content": expected, }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, false); err != nil { c.Fatal(err) } out, _ := dockerCmd(c, "run", "--rm", name) if out != expected { c.Fatalf("expected file contents for /foo/file to be %q but received %q", expected, out) } } func (s *DockerSuite) TestBuildRenamedDockerfile(c *check.C) { testRequires(c, DaemonIsLinux) ctx, err := fakeContext(`FROM busybox RUN echo from Dockerfile`, map[string]string{ "Dockerfile": "FROM busybox\nRUN echo from Dockerfile", "files/Dockerfile": "FROM busybox\nRUN echo from files/Dockerfile", "files/dFile": "FROM busybox\nRUN echo from files/dFile", "dFile": "FROM busybox\nRUN echo from dFile", "files/dFile2": "FROM busybox\nRUN echo from files/dFile2", }) if err != nil { c.Fatal(err) } defer ctx.Close() out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".") if err != nil { c.Fatalf("Failed to build: %s\n%s", out, err) } if !strings.Contains(out, "from Dockerfile") { c.Fatalf("test1 should have used Dockerfile, output:%s", out) } out, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-f", filepath.Join("files", "Dockerfile"), "-t", "test2", ".") if err != nil { c.Fatal(err) } if !strings.Contains(out, "from files/Dockerfile") { c.Fatalf("test2 should have used files/Dockerfile, output:%s", out) } out, _, err = dockerCmdInDir(c, ctx.Dir, "build", fmt.Sprintf("--file=%s", filepath.Join("files", "dFile")), "-t", "test3", ".") if err != nil { c.Fatal(err) } if !strings.Contains(out, "from files/dFile") { c.Fatalf("test3 should have used files/dFile, output:%s", out) } out, _, err = dockerCmdInDir(c, ctx.Dir, "build", "--file=dFile", "-t", "test4", ".") if err != nil { c.Fatal(err) } if !strings.Contains(out, "from dFile") { c.Fatalf("test4 should have used dFile, output:%s", out) } dirWithNoDockerfile, err := ioutil.TempDir(os.TempDir(), "test5") c.Assert(err, check.IsNil) nonDockerfileFile := filepath.Join(dirWithNoDockerfile, "notDockerfile") if _, err = os.Create(nonDockerfileFile); err != nil { c.Fatal(err) } out, _, err = dockerCmdInDir(c, ctx.Dir, "build", fmt.Sprintf("--file=%s", nonDockerfileFile), "-t", "test5", ".") if err == nil { c.Fatalf("test5 was supposed to fail to find passwd") } if expected := fmt.Sprintf("The Dockerfile (%s) must be within the build context (.)", nonDockerfileFile); !strings.Contains(out, expected) { c.Fatalf("wrong error messsage:%v\nexpected to contain=%v", out, expected) } out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test6", "..") if err != nil { c.Fatalf("test6 failed: %s", err) } if !strings.Contains(out, "from Dockerfile") { c.Fatalf("test6 should have used root Dockerfile, output:%s", out) } out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join(ctx.Dir, "files", "Dockerfile"), "-t", "test7", "..") if err != nil { c.Fatalf("test7 failed: %s", err) } if !strings.Contains(out, "from files/Dockerfile") { c.Fatalf("test7 should have used files Dockerfile, output:%s", out) } out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test8", ".") if err == nil || !strings.Contains(out, "must be within the build context") { c.Fatalf("test8 should have failed with Dockerfile out of context: %s", err) } tmpDir := os.TempDir() out, _, err = dockerCmdInDir(c, tmpDir, "build", "-t", "test9", ctx.Dir) if err != nil { c.Fatalf("test9 - failed: %s", err) } if !strings.Contains(out, "from Dockerfile") { c.Fatalf("test9 should have used root Dockerfile, output:%s", out) } out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", "dFile2", "-t", "test10", ".") if err != nil { c.Fatalf("test10 should have worked: %s", err) } if !strings.Contains(out, "from files/dFile2") { c.Fatalf("test10 should have used files/dFile2, output:%s", out) } } func (s *DockerSuite) TestBuildFromMixedcaseDockerfile(c *check.C) { testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows testRequires(c, DaemonIsLinux) ctx, err := fakeContext(`FROM busybox RUN echo from dockerfile`, map[string]string{ "dockerfile": "FROM busybox\nRUN echo from dockerfile", }) if err != nil { c.Fatal(err) } defer ctx.Close() out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".") if err != nil { c.Fatalf("Failed to build: %s\n%s", out, err) } if !strings.Contains(out, "from dockerfile") { c.Fatalf("Missing proper output: %s", out) } } func (s *DockerSuite) TestBuildWithTwoDockerfiles(c *check.C) { testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows testRequires(c, DaemonIsLinux) ctx, err := fakeContext(`FROM busybox RUN echo from Dockerfile`, map[string]string{ "dockerfile": "FROM busybox\nRUN echo from dockerfile", }) if err != nil { c.Fatal(err) } defer ctx.Close() out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".") if err != nil { c.Fatalf("Failed to build: %s\n%s", out, err) } if !strings.Contains(out, "from Dockerfile") { c.Fatalf("Missing proper output: %s", out) } } func (s *DockerSuite) TestBuildFromURLWithF(c *check.C) { testRequires(c, DaemonIsLinux) server, err := fakeStorage(map[string]string{"baz": `FROM busybox RUN echo from baz COPY * /tmp/ RUN find /tmp/`}) if err != nil { c.Fatal(err) } defer server.Close() ctx, err := fakeContext(`FROM busybox RUN echo from Dockerfile`, map[string]string{}) if err != nil { c.Fatal(err) } defer ctx.Close() // Make sure that -f is ignored and that we don't use the Dockerfile // that's in the current dir out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-f", "baz", "-t", "test1", server.URL()+"/baz") if err != nil { c.Fatalf("Failed to build: %s\n%s", out, err) } if !strings.Contains(out, "from baz") || strings.Contains(out, "/tmp/baz") || !strings.Contains(out, "/tmp/Dockerfile") { c.Fatalf("Missing proper output: %s", out) } } func (s *DockerSuite) TestBuildFromStdinWithF(c *check.C) { testRequires(c, DaemonIsLinux) ctx, err := fakeContext(`FROM busybox RUN echo from Dockerfile`, map[string]string{}) if err != nil { c.Fatal(err) } defer ctx.Close() // Make sure that -f is ignored and that we don't use the Dockerfile // that's in the current dir dockerCommand := exec.Command(dockerBinary, "build", "-f", "baz", "-t", "test1", "-") dockerCommand.Dir = ctx.Dir dockerCommand.Stdin = strings.NewReader(`FROM busybox RUN echo from baz COPY * /tmp/ RUN find /tmp/`) out, status, err := runCommandWithOutput(dockerCommand) if err != nil || status != 0 { c.Fatalf("Error building: %s", err) } if !strings.Contains(out, "from baz") || strings.Contains(out, "/tmp/baz") || !strings.Contains(out, "/tmp/Dockerfile") { c.Fatalf("Missing proper output: %s", out) } } func (s *DockerSuite) TestBuildFromOfficialNames(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildfromofficial" fromNames := []string{ "busybox", "docker.io/busybox", "index.docker.io/busybox", "library/busybox", "docker.io/library/busybox", "index.docker.io/library/busybox", } for idx, fromName := range fromNames { imgName := fmt.Sprintf("%s%d", name, idx) _, err := buildImage(imgName, "FROM "+fromName, true) if err != nil { c.Errorf("Build failed using FROM %s: %s", fromName, err) } deleteImages(imgName) } } func (s *DockerSuite) TestBuildDockerfileOutsideContext(c *check.C) { testRequires(c, UnixCli) // uses os.Symlink: not implemented in windows at the time of writing (go-1.4.2) testRequires(c, DaemonIsLinux) name := "testbuilddockerfileoutsidecontext" tmpdir, err := ioutil.TempDir("", name) c.Assert(err, check.IsNil) defer os.RemoveAll(tmpdir) ctx := filepath.Join(tmpdir, "context") if err := os.MkdirAll(ctx, 0755); err != nil { c.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte("FROM scratch\nENV X Y"), 0644); err != nil { c.Fatal(err) } wd, err := os.Getwd() if err != nil { c.Fatal(err) } defer os.Chdir(wd) if err := os.Chdir(ctx); err != nil { c.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(tmpdir, "outsideDockerfile"), []byte("FROM scratch\nENV x y"), 0644); err != nil { c.Fatal(err) } if err := os.Symlink(filepath.Join("..", "outsideDockerfile"), filepath.Join(ctx, "dockerfile1")); err != nil { c.Fatal(err) } if err := os.Symlink(filepath.Join(tmpdir, "outsideDockerfile"), filepath.Join(ctx, "dockerfile2")); err != nil { c.Fatal(err) } for _, dockerfilePath := range []string{ filepath.Join("..", "outsideDockerfile"), filepath.Join(ctx, "dockerfile1"), filepath.Join(ctx, "dockerfile2"), } { out, _, err := dockerCmdWithError("build", "-t", name, "--no-cache", "-f", dockerfilePath, ".") if err == nil { c.Fatalf("Expected error with %s. Out: %s", dockerfilePath, out) } if !strings.Contains(out, "must be within the build context") && !strings.Contains(out, "Cannot locate Dockerfile") { c.Fatalf("Unexpected error with %s. Out: %s", dockerfilePath, out) } deleteImages(name) } os.Chdir(tmpdir) // Path to Dockerfile should be resolved relative to working directory, not relative to context. // There is a Dockerfile in the context, but since there is no Dockerfile in the current directory, the following should fail out, _, err := dockerCmdWithError("build", "-t", name, "--no-cache", "-f", "Dockerfile", ctx) if err == nil { c.Fatalf("Expected error. Out: %s", out) } } func (s *DockerSuite) TestBuildSpaces(c *check.C) { testRequires(c, DaemonIsLinux) // Test to make sure that leading/trailing spaces on a command // doesn't change the error msg we get var ( err1 error err2 error ) name := "testspaces" ctx, err := fakeContext("FROM busybox\nCOPY\n", map[string]string{ "Dockerfile": "FROM busybox\nCOPY\n", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err1 = buildImageFromContext(name, ctx, false); err1 == nil { c.Fatal("Build 1 was supposed to fail, but didn't") } ctx.Add("Dockerfile", "FROM busybox\nCOPY ") if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { c.Fatal("Build 2 was supposed to fail, but didn't") } removeLogTimestamps := func(s string) string { return regexp.MustCompile(`time="(.*?)"`).ReplaceAllString(s, `time=[TIMESTAMP]`) } // Skip over the times e1 := removeLogTimestamps(err1.Error()) e2 := removeLogTimestamps(err2.Error()) // Ignore whitespace since that's what were verifying doesn't change stuff if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { c.Fatalf("Build 2's error wasn't the same as build 1's\n1:%s\n2:%s", err1, err2) } ctx.Add("Dockerfile", "FROM busybox\n COPY") if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { c.Fatal("Build 3 was supposed to fail, but didn't") } // Skip over the times e1 = removeLogTimestamps(err1.Error()) e2 = removeLogTimestamps(err2.Error()) // Ignore whitespace since that's what were verifying doesn't change stuff if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { c.Fatalf("Build 3's error wasn't the same as build 1's\n1:%s\n3:%s", err1, err2) } ctx.Add("Dockerfile", "FROM busybox\n COPY ") if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { c.Fatal("Build 4 was supposed to fail, but didn't") } // Skip over the times e1 = removeLogTimestamps(err1.Error()) e2 = removeLogTimestamps(err2.Error()) // Ignore whitespace since that's what were verifying doesn't change stuff if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { c.Fatalf("Build 4's error wasn't the same as build 1's\n1:%s\n4:%s", err1, err2) } } func (s *DockerSuite) TestBuildSpacesWithQuotes(c *check.C) { testRequires(c, DaemonIsLinux) // Test to make sure that spaces in quotes aren't lost name := "testspacesquotes" dockerfile := `FROM busybox RUN echo " \ foo "` _, out, err := buildImageWithOut(name, dockerfile, false) if err != nil { c.Fatal("Build failed:", err) } expecting := "\n foo \n" if !strings.Contains(out, expecting) { c.Fatalf("Bad output: %q expecting to contain %q", out, expecting) } } // #4393 func (s *DockerSuite) TestBuildVolumeFileExistsinContainer(c *check.C) { testRequires(c, DaemonIsLinux) buildCmd := exec.Command(dockerBinary, "build", "-t", "docker-test-errcreatevolumewithfile", "-") buildCmd.Stdin = strings.NewReader(` FROM busybox RUN touch /foo VOLUME /foo `) out, _, err := runCommandWithOutput(buildCmd) if err == nil || !strings.Contains(out, "file exists") { c.Fatalf("expected build to fail when file exists in container at requested volume path") } } func (s *DockerSuite) TestBuildMissingArgs(c *check.C) { testRequires(c, DaemonIsLinux) // Test to make sure that all Dockerfile commands (except the ones listed // in skipCmds) will generate an error if no args are provided. // Note: INSERT is deprecated so we exclude it because of that. skipCmds := map[string]struct{}{ "CMD": {}, "RUN": {}, "ENTRYPOINT": {}, "INSERT": {}, } for cmd := range command.Commands { cmd = strings.ToUpper(cmd) if _, ok := skipCmds[cmd]; ok { continue } var dockerfile string if cmd == "FROM" { dockerfile = cmd } else { // Add FROM to make sure we don't complain about it missing dockerfile = "FROM busybox\n" + cmd } ctx, err := fakeContext(dockerfile, map[string]string{}) if err != nil { c.Fatal(err) } defer ctx.Close() var out string if out, err = buildImageFromContext("args", ctx, true); err == nil { c.Fatalf("%s was supposed to fail. Out:%s", cmd, out) } if !strings.Contains(err.Error(), cmd+" requires") { c.Fatalf("%s returned the wrong type of error:%s", cmd, err) } } } func (s *DockerSuite) TestBuildEmptyScratch(c *check.C) { testRequires(c, DaemonIsLinux) _, out, err := buildImageWithOut("sc", "FROM scratch", true) if err == nil { c.Fatalf("Build was supposed to fail") } if !strings.Contains(out, "No image was generated") { c.Fatalf("Wrong error message: %v", out) } } func (s *DockerSuite) TestBuildDotDotFile(c *check.C) { testRequires(c, DaemonIsLinux) ctx, err := fakeContext("FROM busybox\n", map[string]string{ "..gitme": "", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err = buildImageFromContext("sc", ctx, false); err != nil { c.Fatalf("Build was supposed to work: %s", err) } } func (s *DockerSuite) TestBuildRUNoneJSON(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildrunonejson" ctx, err := fakeContext(`FROM hello-world:frozen RUN [ "/hello" ]`, map[string]string{}) if err != nil { c.Fatal(err) } defer ctx.Close() out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--no-cache", "-t", name, ".") if err != nil { c.Fatalf("failed to build the image: %s, %v", out, err) } if !strings.Contains(out, "Hello from Docker") { c.Fatalf("bad output: %s", out) } } func (s *DockerSuite) TestBuildEmptyStringVolume(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildemptystringvolume" _, err := buildImage(name, ` FROM busybox ENV foo="" VOLUME $foo `, false) if err == nil { c.Fatal("Should have failed to build") } } func (s *DockerSuite) TestBuildContainerWithCgroupParent(c *check.C) { testRequires(c, SameHostDaemon) testRequires(c, DaemonIsLinux) cgroupParent := "test" data, err := ioutil.ReadFile("/proc/self/cgroup") if err != nil { c.Fatalf("failed to read '/proc/self/cgroup - %v", err) } selfCgroupPaths := parseCgroupPaths(string(data)) _, found := selfCgroupPaths["memory"] if !found { c.Fatalf("unable to find self memory cgroup path. CgroupsPath: %v", selfCgroupPaths) } cmd := exec.Command(dockerBinary, "build", "--cgroup-parent", cgroupParent, "-") cmd.Stdin = strings.NewReader(` FROM busybox RUN cat /proc/self/cgroup `) out, _, err := runCommandWithOutput(cmd) if err != nil { c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) } m, err := regexp.MatchString(fmt.Sprintf("memory:.*/%s/.*", cgroupParent), out) c.Assert(err, check.IsNil) if !m { c.Fatalf("There is no expected memory cgroup with parent /%s/: %s", cgroupParent, out) } } func (s *DockerSuite) TestBuildNoDupOutput(c *check.C) { testRequires(c, DaemonIsLinux) // Check to make sure our build output prints the Dockerfile cmd // property - there was a bug that caused it to be duplicated on the // Step X line name := "testbuildnodupoutput" _, out, err := buildImageWithOut(name, ` FROM busybox RUN env`, false) if err != nil { c.Fatalf("Build should have worked: %q", err) } exp := "\nStep 2 : RUN env\n" if !strings.Contains(out, exp) { c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", out, exp) } } // GH15826 func (s *DockerSuite) TestBuildStartsFromOne(c *check.C) { testRequires(c, DaemonIsLinux) // Explicit check to ensure that build starts from step 1 rather than 0 name := "testbuildstartsfromone" _, out, err := buildImageWithOut(name, ` FROM busybox`, false) if err != nil { c.Fatalf("Build should have worked: %q", err) } exp := "\nStep 1 : FROM busybox\n" if !strings.Contains(out, exp) { c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", out, exp) } } func (s *DockerSuite) TestBuildBadCmdFlag(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildbadcmdflag" _, out, err := buildImageWithOut(name, ` FROM busybox MAINTAINER --boo joe@example.com`, false) if err == nil { c.Fatal("Build should have failed") } exp := "\nUnknown flag: boo\n" if !strings.Contains(out, exp) { c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", out, exp) } } func (s *DockerSuite) TestBuildRUNErrMsg(c *check.C) { testRequires(c, DaemonIsLinux) // Test to make sure the bad command is quoted with just "s and // not as a Go []string name := "testbuildbadrunerrmsg" _, out, err := buildImageWithOut(name, ` FROM busybox RUN badEXE a1 \& a2 a3`, false) // tab between a2 and a3 if err == nil { c.Fatal("Should have failed to build") } exp := `The command '/bin/sh -c badEXE a1 \& a2 a3' returned a non-zero code: 127` if !strings.Contains(out, exp) { c.Fatalf("RUN doesn't have the correct output:\nGot:%s\nExpected:%s", out, exp) } } func (s *DockerTrustSuite) TestTrustedBuild(c *check.C) { repoName := s.setupTrustedImage(c, "trusted-build") dockerFile := fmt.Sprintf(` FROM %s RUN [] `, repoName) name := "testtrustedbuild" buildCmd := buildImageCmd(name, dockerFile, true) s.trustedCmd(buildCmd) out, _, err := runCommandWithOutput(buildCmd) if err != nil { c.Fatalf("Error running trusted build: %s\n%s", err, out) } if !strings.Contains(out, fmt.Sprintf("FROM %s@sha", repoName[:len(repoName)-7])) { c.Fatalf("Unexpected output on trusted build:\n%s", out) } // We should also have a tag reference for the image. if out, exitCode := dockerCmd(c, "inspect", repoName); exitCode != 0 { c.Fatalf("unexpected exit code inspecting image %q: %d: %s", repoName, exitCode, out) } // We should now be able to remove the tag reference. if out, exitCode := dockerCmd(c, "rmi", repoName); exitCode != 0 { c.Fatalf("unexpected exit code inspecting image %q: %d: %s", repoName, exitCode, out) } } func (s *DockerTrustSuite) TestTrustedBuildUntrustedTag(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/build-untrusted-tag:latest", privateRegistryURL) dockerFile := fmt.Sprintf(` FROM %s RUN [] `, repoName) name := "testtrustedbuilduntrustedtag" buildCmd := buildImageCmd(name, dockerFile, true) s.trustedCmd(buildCmd) out, _, err := runCommandWithOutput(buildCmd) if err == nil { c.Fatalf("Expected error on trusted build with untrusted tag: %s\n%s", err, out) } if !strings.Contains(out, "does not have trust data for") { c.Fatalf("Unexpected output on trusted build with untrusted tag:\n%s", out) } } func (s *DockerTrustSuite) TestBuildContextDirIsSymlink(c *check.C) { testRequires(c, DaemonIsLinux) tempDir, err := ioutil.TempDir("", "test-build-dir-is-symlink-") c.Assert(err, check.IsNil) defer os.RemoveAll(tempDir) // Make a real context directory in this temp directory with a simple // Dockerfile. realContextDirname := filepath.Join(tempDir, "context") if err := os.Mkdir(realContextDirname, os.FileMode(0755)); err != nil { c.Fatal(err) } if err = ioutil.WriteFile( filepath.Join(realContextDirname, "Dockerfile"), []byte(` FROM busybox RUN echo hello world `), os.FileMode(0644), ); err != nil { c.Fatal(err) } // Make a symlink to the real context directory. contextSymlinkName := filepath.Join(tempDir, "context_link") if err := os.Symlink(realContextDirname, contextSymlinkName); err != nil { c.Fatal(err) } // Executing the build with the symlink as the specified context should // *not* fail. if out, exitStatus := dockerCmd(c, "build", contextSymlinkName); exitStatus != 0 { c.Fatalf("build failed with exit status %d: %s", exitStatus, out) } } // Issue #15634: COPY fails when path starts with "null" func (s *DockerSuite) TestBuildNullStringInAddCopyVolume(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildnullstringinaddcopyvolume" ctx, err := fakeContext(` FROM busybox ADD null / COPY nullfile / VOLUME nullvolume `, map[string]string{ "null": "test1", "nullfile": "test2", }, ) c.Assert(err, check.IsNil) defer ctx.Close() _, err = buildImageFromContext(name, ctx, true) c.Assert(err, check.IsNil) } func (s *DockerSuite) TestBuildStopSignal(c *check.C) { testRequires(c, DaemonIsLinux) name := "test_build_stop_signal" _, err := buildImage(name, `FROM busybox STOPSIGNAL SIGKILL`, true) c.Assert(err, check.IsNil) res, err := inspectFieldJSON(name, "Config.StopSignal") c.Assert(err, check.IsNil) if res != `"SIGKILL"` { c.Fatalf("Signal %s, expected SIGKILL", res) } } func (s *DockerSuite) TestBuildBuildTimeArg(c *check.C) { testRequires(c, DaemonIsLinux) imgName := "bldargtest" envKey := "foo" envVal := "bar" args := []string{ "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), } dockerfile := fmt.Sprintf(`FROM busybox ARG %s RUN echo $%s CMD echo $%s`, envKey, envKey, envKey) if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || !strings.Contains(out, envVal) { if err != nil { c.Fatalf("build failed to complete: %q %q", out, err) } c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envVal) } containerName := "bldargCont" if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); out != "\n" { c.Fatalf("run produced invalid output: %q, expected empty string", out) } } func (s *DockerSuite) TestBuildBuildTimeArgHistory(c *check.C) { testRequires(c, DaemonIsLinux) imgName := "bldargtest" envKey := "foo" envVal := "bar" envDef := "bar1" args := []string{ "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), } dockerfile := fmt.Sprintf(`FROM busybox ARG %s=%s`, envKey, envDef) if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || !strings.Contains(out, envVal) { if err != nil { c.Fatalf("build failed to complete: %q %q", out, err) } c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envVal) } out, _ := dockerCmd(c, "history", "--no-trunc", imgName) outputTabs := strings.Split(out, "\n")[1] if !strings.Contains(outputTabs, envDef) { c.Fatalf("failed to find arg default in image history output: %q expected: %q", outputTabs, envDef) } } func (s *DockerSuite) TestBuildBuildTimeArgCacheHit(c *check.C) { testRequires(c, DaemonIsLinux) imgName := "bldargtest" envKey := "foo" envVal := "bar" args := []string{ "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), } dockerfile := fmt.Sprintf(`FROM busybox ARG %s RUN echo $%s`, envKey, envKey) origImgID := "" var err error if origImgID, err = buildImage(imgName, dockerfile, true, args...); err != nil { c.Fatal(err) } imgNameCache := "bldargtestcachehit" if newImgID, err := buildImage(imgNameCache, dockerfile, true, args...); err != nil || newImgID != origImgID { if err != nil { c.Fatal(err) } c.Fatalf("build didn't use cache! expected image id: %q built image id: %q", origImgID, newImgID) } } func (s *DockerSuite) TestBuildBuildTimeArgCacheMissExtraArg(c *check.C) { testRequires(c, DaemonIsLinux) imgName := "bldargtest" envKey := "foo" envVal := "bar" extraEnvKey := "foo1" extraEnvVal := "bar1" args := []string{ "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), } dockerfile := fmt.Sprintf(`FROM busybox ARG %s ARG %s RUN echo $%s`, envKey, extraEnvKey, envKey) origImgID := "" var err error if origImgID, err = buildImage(imgName, dockerfile, true, args...); err != nil { c.Fatal(err) } imgNameCache := "bldargtestcachemiss" args = append(args, "--build-arg", fmt.Sprintf("%s=%s", extraEnvKey, extraEnvVal)) if newImgID, err := buildImage(imgNameCache, dockerfile, true, args...); err != nil || newImgID == origImgID { if err != nil { c.Fatal(err) } c.Fatalf("build used cache, expected a miss!") } } func (s *DockerSuite) TestBuildBuildTimeArgCacheMissSameArgDiffVal(c *check.C) { testRequires(c, DaemonIsLinux) imgName := "bldargtest" envKey := "foo" envVal := "bar" newEnvVal := "bar1" args := []string{ "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), } dockerfile := fmt.Sprintf(`FROM busybox ARG %s RUN echo $%s`, envKey, envKey) origImgID := "" var err error if origImgID, err = buildImage(imgName, dockerfile, true, args...); err != nil { c.Fatal(err) } imgNameCache := "bldargtestcachemiss" args = []string{ "--build-arg", fmt.Sprintf("%s=%s", envKey, newEnvVal), } if newImgID, err := buildImage(imgNameCache, dockerfile, true, args...); err != nil || newImgID == origImgID { if err != nil { c.Fatal(err) } c.Fatalf("build used cache, expected a miss!") } } func (s *DockerSuite) TestBuildBuildTimeArgOverrideArgDefinedBeforeEnv(c *check.C) { testRequires(c, DaemonIsLinux) imgName := "bldargtest" envKey := "foo" envVal := "bar" envValOveride := "barOverride" args := []string{ "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), } dockerfile := fmt.Sprintf(`FROM busybox ARG %s ENV %s %s RUN echo $%s CMD echo $%s `, envKey, envKey, envValOveride, envKey, envKey) if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 2 { if err != nil { c.Fatalf("build failed to complete: %q %q", out, err) } c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride) } containerName := "bldargCont" if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) } } func (s *DockerSuite) TestBuildBuildTimeArgOverrideEnvDefinedBeforeArg(c *check.C) { testRequires(c, DaemonIsLinux) imgName := "bldargtest" envKey := "foo" envVal := "bar" envValOveride := "barOverride" args := []string{ "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), } dockerfile := fmt.Sprintf(`FROM busybox ENV %s %s ARG %s RUN echo $%s CMD echo $%s `, envKey, envValOveride, envKey, envKey, envKey) if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 2 { if err != nil { c.Fatalf("build failed to complete: %q %q", out, err) } c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride) } containerName := "bldargCont" if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) } } func (s *DockerSuite) TestBuildBuildTimeArgExpansion(c *check.C) { testRequires(c, DaemonIsLinux) imgName := "bldvarstest" wdVar := "WDIR" wdVal := "/tmp/" addVar := "AFILE" addVal := "addFile" copyVar := "CFILE" copyVal := "copyFile" envVar := "foo" envVal := "bar" exposeVar := "EPORT" exposeVal := "9999" userVar := "USER" userVal := "testUser" volVar := "VOL" volVal := "/testVol/" args := []string{ "--build-arg", fmt.Sprintf("%s=%s", wdVar, wdVal), "--build-arg", fmt.Sprintf("%s=%s", addVar, addVal), "--build-arg", fmt.Sprintf("%s=%s", copyVar, copyVal), "--build-arg", fmt.Sprintf("%s=%s", envVar, envVal), "--build-arg", fmt.Sprintf("%s=%s", exposeVar, exposeVal), "--build-arg", fmt.Sprintf("%s=%s", userVar, userVal), "--build-arg", fmt.Sprintf("%s=%s", volVar, volVal), } ctx, err := fakeContext(fmt.Sprintf(`FROM busybox ARG %s WORKDIR ${%s} ARG %s ADD ${%s} testDir/ ARG %s COPY $%s testDir/ ARG %s ENV %s=${%s} ARG %s EXPOSE $%s ARG %s USER $%s ARG %s VOLUME ${%s}`, wdVar, wdVar, addVar, addVar, copyVar, copyVar, envVar, envVar, envVar, exposeVar, exposeVar, userVar, userVar, volVar, volVar), map[string]string{ addVal: "some stuff", copyVal: "some stuff", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(imgName, ctx, true, args...); err != nil { c.Fatal(err) } var resMap map[string]interface{} var resArr []string res := "" res, err = inspectField(imgName, "Config.WorkingDir") if err != nil { c.Fatal(err) } if res != filepath.ToSlash(filepath.Clean(wdVal)) { c.Fatalf("Config.WorkingDir value mismatch. Expected: %s, got: %s", filepath.ToSlash(filepath.Clean(wdVal)), res) } err = inspectFieldAndMarshall(imgName, "Config.Env", &resArr) if err != nil { c.Fatal(err) } found := false for _, v := range resArr { if fmt.Sprintf("%s=%s", envVar, envVal) == v { found = true break } } if !found { c.Fatalf("Config.Env value mismatch. Expected to exist: %s=%s, got: %v", envVar, envVal, resArr) } err = inspectFieldAndMarshall(imgName, "Config.ExposedPorts", &resMap) if err != nil { c.Fatal(err) } if _, ok := resMap[fmt.Sprintf("%s/tcp", exposeVal)]; !ok { c.Fatalf("Config.ExposedPorts value mismatch. Expected exposed port: %s/tcp, got: %v", exposeVal, resMap) } res, err = inspectField(imgName, "Config.User") if err != nil { c.Fatal(err) } if res != userVal { c.Fatalf("Config.User value mismatch. Expected: %s, got: %s", userVal, res) } err = inspectFieldAndMarshall(imgName, "Config.Volumes", &resMap) if err != nil { c.Fatal(err) } if _, ok := resMap[volVal]; !ok { c.Fatalf("Config.Volumes value mismatch. Expected volume: %s, got: %v", volVal, resMap) } } func (s *DockerSuite) TestBuildBuildTimeArgExpansionOverride(c *check.C) { testRequires(c, DaemonIsLinux) imgName := "bldvarstest" envKey := "foo" envVal := "bar" envKey1 := "foo1" envValOveride := "barOverride" args := []string{ "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), } dockerfile := fmt.Sprintf(`FROM busybox ARG %s ENV %s %s ENV %s ${%s} RUN echo $%s CMD echo $%s`, envKey, envKey, envValOveride, envKey1, envKey, envKey1, envKey1) if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 2 { if err != nil { c.Fatalf("build failed to complete: %q %q", out, err) } c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride) } containerName := "bldargCont" if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) } } func (s *DockerSuite) TestBuildBuildTimeArgUntrustedDefinedAfterUse(c *check.C) { testRequires(c, DaemonIsLinux) imgName := "bldargtest" envKey := "foo" envVal := "bar" args := []string{ "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), } dockerfile := fmt.Sprintf(`FROM busybox RUN echo $%s ARG %s CMD echo $%s`, envKey, envKey, envKey) if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Contains(out, envVal) { if err != nil { c.Fatalf("build failed to complete: %q %q", out, err) } c.Fatalf("able to access environment variable in output: %q expected to be missing", out) } containerName := "bldargCont" if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); out != "\n" { c.Fatalf("run produced invalid output: %q, expected empty string", out) } } func (s *DockerSuite) TestBuildBuildTimeArgBuiltinArg(c *check.C) { testRequires(c, DaemonIsLinux) imgName := "bldargtest" envKey := "HTTP_PROXY" envVal := "bar" args := []string{ "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), } dockerfile := fmt.Sprintf(`FROM busybox RUN echo $%s CMD echo $%s`, envKey, envKey) if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || !strings.Contains(out, envVal) { if err != nil { c.Fatalf("build failed to complete: %q %q", out, err) } c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envVal) } containerName := "bldargCont" if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); out != "\n" { c.Fatalf("run produced invalid output: %q, expected empty string", out) } } func (s *DockerSuite) TestBuildBuildTimeArgDefaultOverride(c *check.C) { testRequires(c, DaemonIsLinux) imgName := "bldargtest" envKey := "foo" envVal := "bar" envValOveride := "barOverride" args := []string{ "--build-arg", fmt.Sprintf("%s=%s", envKey, envValOveride), } dockerfile := fmt.Sprintf(`FROM busybox ARG %s=%s ENV %s $%s RUN echo $%s CMD echo $%s`, envKey, envVal, envKey, envKey, envKey, envKey) if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 1 { if err != nil { c.Fatalf("build failed to complete: %q %q", out, err) } c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride) } containerName := "bldargCont" if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) } } func (s *DockerSuite) TestBuildBuildTimeArgMultiArgsSameLine(c *check.C) { testRequires(c, DaemonIsLinux) imgName := "bldargtest" envKey := "foo" envKey1 := "foo1" args := []string{} dockerfile := fmt.Sprintf(`FROM busybox ARG %s %s`, envKey, envKey1) errStr := "ARG requires exactly one argument definition" if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err == nil { c.Fatalf("build succeeded, expected to fail. Output: %v", out) } else if !strings.Contains(out, errStr) { c.Fatalf("Unexpected error. output: %q, expected error: %q", out, errStr) } } func (s *DockerSuite) TestBuildBuildTimeArgUnconsumedArg(c *check.C) { testRequires(c, DaemonIsLinux) imgName := "bldargtest" envKey := "foo" envVal := "bar" args := []string{ "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), } dockerfile := fmt.Sprintf(`FROM busybox RUN echo $%s CMD echo $%s`, envKey, envKey) errStr := "One or more build-args" if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err == nil { c.Fatalf("build succeeded, expected to fail. Output: %v", out) } else if !strings.Contains(out, errStr) { c.Fatalf("Unexpected error. output: %q, expected error: %q", out, errStr) } } func (s *DockerSuite) TestBuildBuildTimeArgQuotedValVariants(c *check.C) { testRequires(c, DaemonIsLinux) imgName := "bldargtest" envKey := "foo" envKey1 := "foo1" envKey2 := "foo2" envKey3 := "foo3" args := []string{} dockerfile := fmt.Sprintf(`FROM busybox ARG %s="" ARG %s='' ARG %s="''" ARG %s='""' RUN [ "$%s" != "$%s" ] RUN [ "$%s" != "$%s" ] RUN [ "$%s" != "$%s" ] RUN [ "$%s" != "$%s" ] RUN [ "$%s" != "$%s" ]`, envKey, envKey1, envKey2, envKey3, envKey, envKey2, envKey, envKey3, envKey1, envKey2, envKey1, envKey3, envKey2, envKey3) if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil { c.Fatalf("build failed to complete: %q %q", out, err) } } func (s *DockerSuite) TestBuildBuildTimeArgEmptyValVariants(c *check.C) { testRequires(c, DaemonIsLinux) imgName := "bldargtest" envKey := "foo" envKey1 := "foo1" envKey2 := "foo2" args := []string{} dockerfile := fmt.Sprintf(`FROM busybox ARG %s= ARG %s="" ARG %s='' RUN [ "$%s" == "$%s" ] RUN [ "$%s" == "$%s" ] RUN [ "$%s" == "$%s" ]`, envKey, envKey1, envKey2, envKey, envKey1, envKey1, envKey2, envKey, envKey2) if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil { c.Fatalf("build failed to complete: %q %q", out, err) } } func (s *DockerSuite) TestBuildBuildTimeArgDefintionWithNoEnvInjection(c *check.C) { testRequires(c, DaemonIsLinux) imgName := "bldargtest" envKey := "foo" args := []string{} dockerfile := fmt.Sprintf(`FROM busybox ARG %s RUN env`, envKey) if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envKey) != 1 { if err != nil { c.Fatalf("build failed to complete: %q %q", out, err) } c.Fatalf("unexpected number of occurrences of the arg in output: %q expected: 1", out) } } func (s *DockerSuite) TestBuildNoNamedVolume(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-v", "testname:/foo", "busybox", "sh", "-c", "touch /foo/oops") dockerFile := `FROM busybox VOLUME testname:/foo RUN ls /foo/oops ` _, err := buildImage("test", dockerFile, false) c.Assert(err, check.NotNil, check.Commentf("image build should have failed")) } func (s *DockerSuite) TestBuildTagEvent(c *check.C) { testRequires(c, DaemonIsLinux) since := daemonTime(c).Unix() dockerFile := `FROM busybox RUN echo events ` _, err := buildImage("test", dockerFile, false) c.Assert(err, check.IsNil) out, _ := dockerCmd(c, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(c).Unix()), "--filter", "type=image") events := strings.Split(strings.TrimSpace(out), "\n") actions := eventActionsByIDAndType(c, events, "test:latest", "image") var foundTag bool for _, a := range actions { if a == "tag" { foundTag = true break } } c.Assert(foundTag, checker.True, check.Commentf("No tag event found:\n%s", out)) } // #15780 func (s *DockerSuite) TestBuildMultipleTags(c *check.C) { dockerfile := ` FROM busybox MAINTAINER test-15780 ` cmd := exec.Command(dockerBinary, "build", "-t", "tag1", "-t", "tag2:v2", "-t", "tag1:latest", "-t", "tag1", "--no-cache", "-") cmd.Stdin = strings.NewReader(dockerfile) _, err := runCommand(cmd) c.Assert(err, check.IsNil) id1, err := getIDByName("tag1") c.Assert(err, check.IsNil) id2, err := getIDByName("tag2:v2") c.Assert(err, check.IsNil) c.Assert(id1, check.Equals, id2) } // #17290 func (s *DockerSuite) TestBuildCacheBrokenSymlink(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildbrokensymlink" ctx, err := fakeContext(` FROM busybox COPY . ./`, map[string]string{ "foo": "bar", }) c.Assert(err, checker.IsNil) defer ctx.Close() err = os.Symlink(filepath.Join(ctx.Dir, "nosuchfile"), filepath.Join(ctx.Dir, "asymlink")) c.Assert(err, checker.IsNil) // warm up cache _, err = buildImageFromContext(name, ctx, true) c.Assert(err, checker.IsNil) // add new file to context, should invalidate cache err = ioutil.WriteFile(filepath.Join(ctx.Dir, "newfile"), []byte("foo"), 0644) c.Assert(err, checker.IsNil) _, out, err := buildImageFromContextWithOut(name, ctx, true) c.Assert(err, checker.IsNil) c.Assert(out, checker.Not(checker.Contains), "Using cache") } func (s *DockerSuite) TestBuildFollowSymlinkToFile(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildbrokensymlink" ctx, err := fakeContext(` FROM busybox COPY asymlink target`, map[string]string{ "foo": "bar", }) c.Assert(err, checker.IsNil) defer ctx.Close() err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) c.Assert(err, checker.IsNil) id, err := buildImageFromContext(name, ctx, true) c.Assert(err, checker.IsNil) out, _ := dockerCmd(c, "run", "--rm", id, "cat", "target") c.Assert(out, checker.Matches, "bar") // change target file should invalidate cache err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644) c.Assert(err, checker.IsNil) id, out, err = buildImageFromContextWithOut(name, ctx, true) c.Assert(err, checker.IsNil) c.Assert(out, checker.Not(checker.Contains), "Using cache") out, _ = dockerCmd(c, "run", "--rm", id, "cat", "target") c.Assert(out, checker.Matches, "baz") } func (s *DockerSuite) TestBuildFollowSymlinkToDir(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildbrokensymlink" ctx, err := fakeContext(` FROM busybox COPY asymlink /`, map[string]string{ "foo/abc": "bar", "foo/def": "baz", }) c.Assert(err, checker.IsNil) defer ctx.Close() err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) c.Assert(err, checker.IsNil) id, err := buildImageFromContext(name, ctx, true) c.Assert(err, checker.IsNil) out, _ := dockerCmd(c, "run", "--rm", id, "cat", "abc", "def") c.Assert(out, checker.Matches, "barbaz") // change target file should invalidate cache err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo/def"), []byte("bax"), 0644) c.Assert(err, checker.IsNil) id, out, err = buildImageFromContextWithOut(name, ctx, true) c.Assert(err, checker.IsNil) c.Assert(out, checker.Not(checker.Contains), "Using cache") out, _ = dockerCmd(c, "run", "--rm", id, "cat", "abc", "def") c.Assert(out, checker.Matches, "barbax") } // TestBuildSymlinkBasename tests that target file gets basename from symlink, // not from the target file. func (s *DockerSuite) TestBuildSymlinkBasename(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildbrokensymlink" ctx, err := fakeContext(` FROM busybox COPY asymlink /`, map[string]string{ "foo": "bar", }) c.Assert(err, checker.IsNil) defer ctx.Close() err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) c.Assert(err, checker.IsNil) id, err := buildImageFromContext(name, ctx, true) c.Assert(err, checker.IsNil) out, _ := dockerCmd(c, "run", "--rm", id, "cat", "asymlink") c.Assert(out, checker.Matches, "bar") } // #17827 func (s *DockerSuite) TestBuildCacheRootSource(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildrootsource" ctx, err := fakeContext(` FROM busybox COPY / /data`, map[string]string{ "foo": "bar", }) c.Assert(err, checker.IsNil) defer ctx.Close() // warm up cache _, err = buildImageFromContext(name, ctx, true) c.Assert(err, checker.IsNil) // change file, should invalidate cache err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644) c.Assert(err, checker.IsNil) _, out, err := buildImageFromContextWithOut(name, ctx, true) c.Assert(err, checker.IsNil) c.Assert(out, checker.Not(checker.Contains), "Using cache") } // #19375 func (s *DockerSuite) TestBuildFailsGitNotCallable(c *check.C) { cmd := exec.Command(dockerBinary, "build", "github.com/docker/v1.10-migrator.git") cmd.Env = append(cmd.Env, "PATH=") out, _, err := runCommandWithOutput(cmd) c.Assert(err, checker.NotNil) c.Assert(out, checker.Contains, "unable to prepare context: unable to find 'git': ") cmd = exec.Command(dockerBinary, "build", "https://github.com/docker/v1.10-migrator.git") cmd.Env = append(cmd.Env, "PATH=") out, _, err = runCommandWithOutput(cmd) c.Assert(err, checker.NotNil) c.Assert(out, checker.Contains, "unable to prepare context: unable to find 'git': ") } docker-1.10.3/integration-cli/docker_cli_build_unix_test.go000066400000000000000000000155071267010174400240530ustar00rootroot00000000000000// +build !windows package main import ( "bufio" "bytes" "encoding/json" "io/ioutil" "os" "os/exec" "path/filepath" "regexp" "strings" "time" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/go-units" "github.com/go-check/check" ) func (s *DockerSuite) TestBuildResourceConstraintsAreUsed(c *check.C) { testRequires(c, cpuCfsQuota) name := "testbuildresourceconstraints" ctx, err := fakeContext(` FROM hello-world:frozen RUN ["/hello"] `, map[string]string{}) c.Assert(err, checker.IsNil) _, _, err = dockerCmdInDir(c, ctx.Dir, "build", "--no-cache", "--rm=false", "--memory=64m", "--memory-swap=-1", "--cpuset-cpus=0", "--cpuset-mems=0", "--cpu-shares=100", "--cpu-quota=8000", "--ulimit", "nofile=42", "-t", name, ".") if err != nil { c.Fatal(err) } out, _ := dockerCmd(c, "ps", "-lq") cID := strings.TrimSpace(out) type hostConfig struct { Memory int64 MemorySwap int64 CpusetCpus string CpusetMems string CPUShares int64 CPUQuota int64 Ulimits []*units.Ulimit } cfg, err := inspectFieldJSON(cID, "HostConfig") c.Assert(err, checker.IsNil) var c1 hostConfig err = json.Unmarshal([]byte(cfg), &c1) c.Assert(err, checker.IsNil, check.Commentf(cfg)) c.Assert(c1.Memory, checker.Equals, int64(64*1024*1024), check.Commentf("resource constraints not set properly for Memory")) c.Assert(c1.MemorySwap, checker.Equals, int64(-1), check.Commentf("resource constraints not set properly for MemorySwap")) c.Assert(c1.CpusetCpus, checker.Equals, "0", check.Commentf("resource constraints not set properly for CpusetCpus")) c.Assert(c1.CpusetMems, checker.Equals, "0", check.Commentf("resource constraints not set properly for CpusetMems")) c.Assert(c1.CPUShares, checker.Equals, int64(100), check.Commentf("resource constraints not set properly for CPUShares")) c.Assert(c1.CPUQuota, checker.Equals, int64(8000), check.Commentf("resource constraints not set properly for CPUQuota")) c.Assert(c1.Ulimits[0].Name, checker.Equals, "nofile", check.Commentf("resource constraints not set properly for Ulimits")) c.Assert(c1.Ulimits[0].Hard, checker.Equals, int64(42), check.Commentf("resource constraints not set properly for Ulimits")) // Make sure constraints aren't saved to image dockerCmd(c, "run", "--name=test", name) cfg, err = inspectFieldJSON("test", "HostConfig") c.Assert(err, checker.IsNil) var c2 hostConfig err = json.Unmarshal([]byte(cfg), &c2) c.Assert(err, checker.IsNil, check.Commentf(cfg)) c.Assert(c2.Memory, check.Not(checker.Equals), int64(64*1024*1024), check.Commentf("resource leaked from build for Memory")) c.Assert(c2.MemorySwap, check.Not(checker.Equals), int64(-1), check.Commentf("resource leaked from build for MemorySwap")) c.Assert(c2.CpusetCpus, check.Not(checker.Equals), "0", check.Commentf("resource leaked from build for CpusetCpus")) c.Assert(c2.CpusetMems, check.Not(checker.Equals), "0", check.Commentf("resource leaked from build for CpusetMems")) c.Assert(c2.CPUShares, check.Not(checker.Equals), int64(100), check.Commentf("resource leaked from build for CPUShares")) c.Assert(c2.CPUQuota, check.Not(checker.Equals), int64(8000), check.Commentf("resource leaked from build for CPUQuota")) c.Assert(c2.Ulimits, checker.IsNil, check.Commentf("resource leaked from build for Ulimits")) } func (s *DockerSuite) TestBuildAddChangeOwnership(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildaddown" ctx := func() *FakeContext { dockerfile := ` FROM busybox ADD foo /bar/ RUN [ $(stat -c %U:%G "/bar") = 'root:root' ] RUN [ $(stat -c %U:%G "/bar/foo") = 'root:root' ] ` tmpDir, err := ioutil.TempDir("", "fake-context") c.Assert(err, check.IsNil) testFile, err := os.Create(filepath.Join(tmpDir, "foo")) if err != nil { c.Fatalf("failed to create foo file: %v", err) } defer testFile.Close() chownCmd := exec.Command("chown", "daemon:daemon", "foo") chownCmd.Dir = tmpDir out, _, err := runCommandWithOutput(chownCmd) if err != nil { c.Fatal(err, out) } if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { c.Fatalf("failed to open destination dockerfile: %v", err) } return fakeContextFromDir(tmpDir) }() defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatalf("build failed to complete for TestBuildAddChangeOwnership: %v", err) } } // Test that an infinite sleep during a build is killed if the client disconnects. // This test is fairly hairy because there are lots of ways to race. // Strategy: // * Monitor the output of docker events starting from before // * Run a 1-year-long sleep from a docker build. // * When docker events sees container start, close the "docker build" command // * Wait for docker events to emit a dying event. func (s *DockerSuite) TestBuildCancellationKillsSleep(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildcancellation" observer, err := newEventObserver(c) c.Assert(err, checker.IsNil) err = observer.Start() c.Assert(err, checker.IsNil) defer observer.Stop() // (Note: one year, will never finish) ctx, err := fakeContext("FROM busybox\nRUN sleep 31536000", nil) if err != nil { c.Fatal(err) } defer ctx.Close() buildCmd := exec.Command(dockerBinary, "build", "-t", name, ".") buildCmd.Dir = ctx.Dir stdoutBuild, err := buildCmd.StdoutPipe() if err := buildCmd.Start(); err != nil { c.Fatalf("failed to run build: %s", err) } matchCID := regexp.MustCompile("Running in (.+)") scanner := bufio.NewScanner(stdoutBuild) outputBuffer := new(bytes.Buffer) var buildID string for scanner.Scan() { line := scanner.Text() outputBuffer.WriteString(line) outputBuffer.WriteString("\n") if matches := matchCID.FindStringSubmatch(line); len(matches) > 0 { buildID = matches[1] break } } if buildID == "" { c.Fatalf("Unable to find build container id in build output:\n%s", outputBuffer.String()) } testActions := map[string]chan bool{ "start": make(chan bool), "die": make(chan bool), } matcher := matchEventLine(buildID, "container", testActions) processor := processEventMatch(testActions) go observer.Match(matcher, processor) select { case <-time.After(10 * time.Second): observer.CheckEventError(c, buildID, "start", matcher) case <-testActions["start"]: // ignore, done } // Send a kill to the `docker build` command. // Causes the underlying build to be cancelled due to socket close. if err := buildCmd.Process.Kill(); err != nil { c.Fatalf("error killing build command: %s", err) } // Get the exit status of `docker build`, check it exited because killed. if err := buildCmd.Wait(); err != nil && !isKilled(err) { c.Fatalf("wait failed during build run: %T %s", err, err) } select { case <-time.After(10 * time.Second): observer.CheckEventError(c, buildID, "die", matcher) case <-testActions["die"]: // ignore, done } } docker-1.10.3/integration-cli/docker_cli_by_digest_test.go000066400000000000000000000557541267010174400236720ustar00rootroot00000000000000package main import ( "encoding/json" "fmt" "os" "path/filepath" "regexp" "strings" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/pkg/stringutils" "github.com/docker/engine-api/types" "github.com/go-check/check" ) var ( remoteRepoName = "dockercli/busybox-by-dgst" repoName = fmt.Sprintf("%v/%s", privateRegistryURL, remoteRepoName) pushDigestRegex = regexp.MustCompile("[\\S]+: digest: ([\\S]+) size: [0-9]+") digestRegex = regexp.MustCompile("Digest: ([\\S]+)") ) func setupImage(c *check.C) (digest.Digest, error) { return setupImageWithTag(c, "latest") } func setupImageWithTag(c *check.C, tag string) (digest.Digest, error) { containerName := "busyboxbydigest" dockerCmd(c, "run", "-d", "-e", "digest=1", "--name", containerName, "busybox") // tag the image to upload it to the private registry repoAndTag := repoName + ":" + tag out, _, err := dockerCmdWithError("commit", containerName, repoAndTag) c.Assert(err, checker.IsNil, check.Commentf("image tagging failed: %s", out)) // delete the container as we don't need it any more err = deleteContainer(containerName) c.Assert(err, checker.IsNil) // push the image out, _, err = dockerCmdWithError("push", repoAndTag) c.Assert(err, checker.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out)) // delete our local repo that we previously tagged rmiout, _, err := dockerCmdWithError("rmi", repoAndTag) c.Assert(err, checker.IsNil, check.Commentf("error deleting images prior to real test: %s", rmiout)) matches := pushDigestRegex.FindStringSubmatch(out) c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from push output: %s", out)) pushDigest := matches[1] return digest.Digest(pushDigest), nil } func testPullByTagDisplaysDigest(c *check.C) { testRequires(c, DaemonIsLinux) pushDigest, err := setupImage(c) c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) // pull from the registry using the tag out, _ := dockerCmd(c, "pull", repoName) // the pull output includes "Digest: ", so find that matches := digestRegex.FindStringSubmatch(out) c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) pullDigest := matches[1] // make sure the pushed and pull digests match c.Assert(pushDigest.String(), checker.Equals, pullDigest) } func (s *DockerRegistrySuite) TestPullByTagDisplaysDigest(c *check.C) { testPullByTagDisplaysDigest(c) } func (s *DockerSchema1RegistrySuite) TestPullByTagDisplaysDigest(c *check.C) { testPullByTagDisplaysDigest(c) } func testPullByDigest(c *check.C) { testRequires(c, DaemonIsLinux) pushDigest, err := setupImage(c) c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) // pull from the registry using the @ reference imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) out, _ := dockerCmd(c, "pull", imageReference) // the pull output includes "Digest: ", so find that matches := digestRegex.FindStringSubmatch(out) c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) pullDigest := matches[1] // make sure the pushed and pull digests match c.Assert(pushDigest.String(), checker.Equals, pullDigest) } func (s *DockerRegistrySuite) TestPullByDigest(c *check.C) { testPullByDigest(c) } func (s *DockerSchema1RegistrySuite) TestPullByDigest(c *check.C) { testPullByDigest(c) } func testPullByDigestNoFallback(c *check.C) { testRequires(c, DaemonIsLinux) // pull from the registry using the @ reference imageReference := fmt.Sprintf("%s@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", repoName) out, _, err := dockerCmdWithError("pull", imageReference) c.Assert(err, checker.NotNil, check.Commentf("expected non-zero exit status and correct error message when pulling non-existing image")) c.Assert(out, checker.Contains, "manifest unknown", check.Commentf("expected non-zero exit status and correct error message when pulling non-existing image")) } func (s *DockerRegistrySuite) TestPullByDigestNoFallback(c *check.C) { testPullByDigestNoFallback(c) } func (s *DockerSchema1RegistrySuite) TestPullByDigestNoFallback(c *check.C) { testPullByDigestNoFallback(c) } func (s *DockerRegistrySuite) TestCreateByDigest(c *check.C) { pushDigest, err := setupImage(c) c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) containerName := "createByDigest" out, _ := dockerCmd(c, "create", "--name", containerName, imageReference) res, err := inspectField(containerName, "Config.Image") c.Assert(err, checker.IsNil, check.Commentf("failed to get Config.Image: %s", out)) c.Assert(res, checker.Equals, imageReference) } func (s *DockerRegistrySuite) TestRunByDigest(c *check.C) { pushDigest, err := setupImage(c) c.Assert(err, checker.IsNil) imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) containerName := "runByDigest" out, _ := dockerCmd(c, "run", "--name", containerName, imageReference, "sh", "-c", "echo found=$digest") foundRegex := regexp.MustCompile("found=([^\n]+)") matches := foundRegex.FindStringSubmatch(out) c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) c.Assert(matches[1], checker.Equals, "1", check.Commentf("Expected %q, got %q", "1", matches[1])) res, err := inspectField(containerName, "Config.Image") c.Assert(err, checker.IsNil, check.Commentf("failed to get Config.Image: %s", out)) c.Assert(res, checker.Equals, imageReference) } func (s *DockerRegistrySuite) TestRemoveImageByDigest(c *check.C) { digest, err := setupImage(c) c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) imageReference := fmt.Sprintf("%s@%s", repoName, digest) // pull from the registry using the @ reference dockerCmd(c, "pull", imageReference) // make sure inspect runs ok _, err = inspectField(imageReference, "Id") c.Assert(err, checker.IsNil, check.Commentf("failed to inspect image")) // do the delete err = deleteImages(imageReference) c.Assert(err, checker.IsNil, check.Commentf("unexpected error deleting image")) // try to inspect again - it should error this time _, err = inspectField(imageReference, "Id") //unexpected nil err trying to inspect what should be a non-existent image c.Assert(err, checker.NotNil) c.Assert(err.Error(), checker.Contains, "No such image") } func (s *DockerRegistrySuite) TestBuildByDigest(c *check.C) { digest, err := setupImage(c) c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) imageReference := fmt.Sprintf("%s@%s", repoName, digest) // pull from the registry using the @ reference dockerCmd(c, "pull", imageReference) // get the image id imageID, err := inspectField(imageReference, "Id") c.Assert(err, checker.IsNil, check.Commentf("error getting image id")) // do the build name := "buildbydigest" _, err = buildImage(name, fmt.Sprintf( `FROM %s CMD ["/bin/echo", "Hello World"]`, imageReference), true) c.Assert(err, checker.IsNil) // get the build's image id res, err := inspectField(name, "Config.Image") c.Assert(err, checker.IsNil) // make sure they match c.Assert(res, checker.Equals, imageID) } func (s *DockerRegistrySuite) TestTagByDigest(c *check.C) { digest, err := setupImage(c) c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) imageReference := fmt.Sprintf("%s@%s", repoName, digest) // pull from the registry using the @ reference dockerCmd(c, "pull", imageReference) // tag it tag := "tagbydigest" dockerCmd(c, "tag", imageReference, tag) expectedID, err := inspectField(imageReference, "Id") c.Assert(err, checker.IsNil, check.Commentf("error getting original image id")) tagID, err := inspectField(tag, "Id") c.Assert(err, checker.IsNil, check.Commentf("error getting tagged image id")) c.Assert(tagID, checker.Equals, expectedID) } func (s *DockerRegistrySuite) TestListImagesWithoutDigests(c *check.C) { digest, err := setupImage(c) c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) imageReference := fmt.Sprintf("%s@%s", repoName, digest) // pull from the registry using the @ reference dockerCmd(c, "pull", imageReference) out, _ := dockerCmd(c, "images") c.Assert(out, checker.Not(checker.Contains), "DIGEST", check.Commentf("list output should not have contained DIGEST header")) } func (s *DockerRegistrySuite) TestListImagesWithDigests(c *check.C) { // setup image1 digest1, err := setupImageWithTag(c, "tag1") c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) imageReference1 := fmt.Sprintf("%s@%s", repoName, digest1) c.Logf("imageReference1 = %s", imageReference1) // pull image1 by digest dockerCmd(c, "pull", imageReference1) // list images out, _ := dockerCmd(c, "images", "--digests") // make sure repo shown, tag=, digest = $digest1 re1 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest1.String() + `\s`) c.Assert(re1.MatchString(out), checker.True, check.Commentf("expected %q: %s", re1.String(), out)) // setup image2 digest2, err := setupImageWithTag(c, "tag2") //error setting up image c.Assert(err, checker.IsNil) imageReference2 := fmt.Sprintf("%s@%s", repoName, digest2) c.Logf("imageReference2 = %s", imageReference2) // pull image1 by digest dockerCmd(c, "pull", imageReference1) // pull image2 by digest dockerCmd(c, "pull", imageReference2) // list images out, _ = dockerCmd(c, "images", "--digests") // make sure repo shown, tag=, digest = $digest1 c.Assert(re1.MatchString(out), checker.True, check.Commentf("expected %q: %s", re1.String(), out)) // make sure repo shown, tag=, digest = $digest2 re2 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest2.String() + `\s`) c.Assert(re2.MatchString(out), checker.True, check.Commentf("expected %q: %s", re2.String(), out)) // pull tag1 dockerCmd(c, "pull", repoName+":tag1") // list images out, _ = dockerCmd(c, "images", "--digests") // make sure image 1 has repo, tag, AND repo, , digest reWithTag1 := regexp.MustCompile(`\s*` + repoName + `\s*tag1\s*\s`) reWithDigest1 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest1.String() + `\s`) c.Assert(reWithDigest1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest1.String(), out)) c.Assert(reWithTag1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithTag1.String(), out)) // make sure image 2 has repo, , digest c.Assert(re2.MatchString(out), checker.True, check.Commentf("expected %q: %s", re2.String(), out)) // pull tag 2 dockerCmd(c, "pull", repoName+":tag2") // list images out, _ = dockerCmd(c, "images", "--digests") // make sure image 1 has repo, tag, digest c.Assert(reWithTag1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithTag1.String(), out)) // make sure image 2 has repo, tag, digest reWithTag2 := regexp.MustCompile(`\s*` + repoName + `\s*tag2\s*\s`) reWithDigest2 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest2.String() + `\s`) c.Assert(reWithTag2.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithTag2.String(), out)) c.Assert(reWithDigest2.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest2.String(), out)) // list images out, _ = dockerCmd(c, "images", "--digests") // make sure image 1 has repo, tag, digest c.Assert(reWithTag1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithTag1.String(), out)) // make sure image 2 has repo, tag, digest c.Assert(reWithTag2.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithTag2.String(), out)) // make sure busybox has tag, but not digest busyboxRe := regexp.MustCompile(`\s*busybox\s*latest\s*\s`) c.Assert(busyboxRe.MatchString(out), checker.True, check.Commentf("expected %q: %s", busyboxRe.String(), out)) } func (s *DockerRegistrySuite) TestInspectImageWithDigests(c *check.C) { digest, err := setupImage(c) c.Assert(err, check.IsNil, check.Commentf("error setting up image")) imageReference := fmt.Sprintf("%s@%s", repoName, digest) // pull from the registry using the @ reference dockerCmd(c, "pull", imageReference) out, _ := dockerCmd(c, "inspect", imageReference) var imageJSON []types.ImageInspect err = json.Unmarshal([]byte(out), &imageJSON) c.Assert(err, checker.IsNil) c.Assert(imageJSON, checker.HasLen, 1) c.Assert(imageJSON[0].RepoDigests, checker.HasLen, 1) c.Assert(stringutils.InSlice(imageJSON[0].RepoDigests, imageReference), checker.Equals, true) } func (s *DockerRegistrySuite) TestPsListContainersFilterAncestorImageByDigest(c *check.C) { digest, err := setupImage(c) c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) imageReference := fmt.Sprintf("%s@%s", repoName, digest) // pull from the registry using the @ reference dockerCmd(c, "pull", imageReference) // build a image from it imageName1 := "images_ps_filter_test" _, err = buildImage(imageName1, fmt.Sprintf( `FROM %s LABEL match me 1`, imageReference), true) c.Assert(err, checker.IsNil) // run a container based on that out, _ := dockerCmd(c, "run", "-d", imageReference, "echo", "hello") expectedID := strings.TrimSpace(out) // run a container based on the a descendant of that too out, _ = dockerCmd(c, "run", "-d", imageName1, "echo", "hello") expectedID1 := strings.TrimSpace(out) expectedIDs := []string{expectedID, expectedID1} // Invalid imageReference out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", fmt.Sprintf("--filter=ancestor=busybox@%s", digest)) // Filter container for ancestor filter should be empty c.Assert(strings.TrimSpace(out), checker.Equals, "") // Valid imageReference out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+imageReference) checkPsAncestorFilterOutput(c, out, imageReference, expectedIDs) } func (s *DockerRegistrySuite) TestDeleteImageByIDOnlyPulledByDigest(c *check.C) { pushDigest, err := setupImage(c) c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) // pull from the registry using the @ reference imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) dockerCmd(c, "pull", imageReference) // just in case... imageID, err := inspectField(imageReference, "Id") c.Assert(err, checker.IsNil, check.Commentf("error inspecting image id")) dockerCmd(c, "rmi", imageID) } func (s *DockerRegistrySuite) TestDeleteImageWithDigestAndTag(c *check.C) { pushDigest, err := setupImage(c) c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) // pull from the registry using the @ reference imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) dockerCmd(c, "pull", imageReference) imageID, err := inspectField(imageReference, "Id") c.Assert(err, checker.IsNil, check.Commentf("error inspecting image id")) repoTag := repoName + ":sometag" repoTag2 := repoName + ":othertag" dockerCmd(c, "tag", imageReference, repoTag) dockerCmd(c, "tag", imageReference, repoTag2) dockerCmd(c, "rmi", repoTag2) // rmi should have deleted only repoTag2, because there's another tag _, err = inspectField(repoTag, "Id") c.Assert(err, checker.IsNil, check.Commentf("repoTag should not have been removed")) dockerCmd(c, "rmi", repoTag) // rmi should have deleted the tag, the digest reference, and the image itself _, err = inspectField(imageID, "Id") c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted")) } // TestPullFailsWithAlteredManifest tests that a `docker pull` fails when // we have modified a manifest blob and its digest cannot be verified. // This is the schema2 version of the test. func (s *DockerRegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) { testRequires(c, DaemonIsLinux) manifestDigest, err := setupImage(c) c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) // Load the target manifest blob. manifestBlob := s.reg.readBlobContents(c, manifestDigest) var imgManifest schema2.Manifest err = json.Unmarshal(manifestBlob, &imgManifest) c.Assert(err, checker.IsNil, check.Commentf("unable to decode image manifest from blob")) // Change a layer in the manifest. imgManifest.Layers[0].Digest = digest.Digest("sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef") // Move the existing data file aside, so that we can replace it with a // malicious blob of data. NOTE: we defer the returned undo func. undo := s.reg.tempMoveBlobData(c, manifestDigest) defer undo() alteredManifestBlob, err := json.MarshalIndent(imgManifest, "", " ") c.Assert(err, checker.IsNil, check.Commentf("unable to encode altered image manifest to JSON")) s.reg.writeBlobContents(c, manifestDigest, alteredManifestBlob) // Now try pulling that image by digest. We should get an error about // digest verification for the manifest digest. // Pull from the registry using the @ reference. imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) out, exitStatus, _ := dockerCmdWithError("pull", imageReference) c.Assert(exitStatus, checker.Not(check.Equals), 0) expectedErrorMsg := fmt.Sprintf("manifest verification failed for digest %s", manifestDigest) c.Assert(out, checker.Contains, expectedErrorMsg) } // TestPullFailsWithAlteredManifest tests that a `docker pull` fails when // we have modified a manifest blob and its digest cannot be verified. // This is the schema1 version of the test. func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) { testRequires(c, DaemonIsLinux) manifestDigest, err := setupImage(c) c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) // Load the target manifest blob. manifestBlob := s.reg.readBlobContents(c, manifestDigest) var imgManifest schema1.Manifest err = json.Unmarshal(manifestBlob, &imgManifest) c.Assert(err, checker.IsNil, check.Commentf("unable to decode image manifest from blob")) // Change a layer in the manifest. imgManifest.FSLayers[0] = schema1.FSLayer{ BlobSum: digest.Digest("sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"), } // Move the existing data file aside, so that we can replace it with a // malicious blob of data. NOTE: we defer the returned undo func. undo := s.reg.tempMoveBlobData(c, manifestDigest) defer undo() alteredManifestBlob, err := json.MarshalIndent(imgManifest, "", " ") c.Assert(err, checker.IsNil, check.Commentf("unable to encode altered image manifest to JSON")) s.reg.writeBlobContents(c, manifestDigest, alteredManifestBlob) // Now try pulling that image by digest. We should get an error about // digest verification for the manifest digest. // Pull from the registry using the @ reference. imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) out, exitStatus, _ := dockerCmdWithError("pull", imageReference) c.Assert(exitStatus, checker.Not(check.Equals), 0) expectedErrorMsg := fmt.Sprintf("image verification failed for digest %s", manifestDigest) c.Assert(out, checker.Contains, expectedErrorMsg) } // TestPullFailsWithAlteredLayer tests that a `docker pull` fails when // we have modified a layer blob and its digest cannot be verified. // This is the schema2 version of the test. func (s *DockerRegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) { testRequires(c, DaemonIsLinux) manifestDigest, err := setupImage(c) c.Assert(err, checker.IsNil) // Load the target manifest blob. manifestBlob := s.reg.readBlobContents(c, manifestDigest) var imgManifest schema2.Manifest err = json.Unmarshal(manifestBlob, &imgManifest) c.Assert(err, checker.IsNil) // Next, get the digest of one of the layers from the manifest. targetLayerDigest := imgManifest.Layers[0].Digest // Move the existing data file aside, so that we can replace it with a // malicious blob of data. NOTE: we defer the returned undo func. undo := s.reg.tempMoveBlobData(c, targetLayerDigest) defer undo() // Now make a fake data blob in this directory. s.reg.writeBlobContents(c, targetLayerDigest, []byte("This is not the data you are looking for.")) // Now try pulling that image by digest. We should get an error about // digest verification for the target layer digest. // Remove distribution cache to force a re-pull of the blobs if err := os.RemoveAll(filepath.Join(dockerBasePath, "image", s.d.storageDriver, "distribution")); err != nil { c.Fatalf("error clearing distribution cache: %v", err) } // Pull from the registry using the @ reference. imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) out, exitStatus, _ := dockerCmdWithError("pull", imageReference) c.Assert(exitStatus, checker.Not(check.Equals), 0, check.Commentf("expected a zero exit status")) expectedErrorMsg := fmt.Sprintf("filesystem layer verification failed for digest %s", targetLayerDigest) c.Assert(out, checker.Contains, expectedErrorMsg, check.Commentf("expected error message in output: %s", out)) } // TestPullFailsWithAlteredLayer tests that a `docker pull` fails when // we have modified a layer blob and its digest cannot be verified. // This is the schema1 version of the test. func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) { testRequires(c, DaemonIsLinux) manifestDigest, err := setupImage(c) c.Assert(err, checker.IsNil) // Load the target manifest blob. manifestBlob := s.reg.readBlobContents(c, manifestDigest) var imgManifest schema1.Manifest err = json.Unmarshal(manifestBlob, &imgManifest) c.Assert(err, checker.IsNil) // Next, get the digest of one of the layers from the manifest. targetLayerDigest := imgManifest.FSLayers[0].BlobSum // Move the existing data file aside, so that we can replace it with a // malicious blob of data. NOTE: we defer the returned undo func. undo := s.reg.tempMoveBlobData(c, targetLayerDigest) defer undo() // Now make a fake data blob in this directory. s.reg.writeBlobContents(c, targetLayerDigest, []byte("This is not the data you are looking for.")) // Now try pulling that image by digest. We should get an error about // digest verification for the target layer digest. // Remove distribution cache to force a re-pull of the blobs if err := os.RemoveAll(filepath.Join(dockerBasePath, "image", s.d.storageDriver, "distribution")); err != nil { c.Fatalf("error clearing distribution cache: %v", err) } // Pull from the registry using the @ reference. imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) out, exitStatus, _ := dockerCmdWithError("pull", imageReference) c.Assert(exitStatus, checker.Not(check.Equals), 0, check.Commentf("expected a zero exit status")) expectedErrorMsg := fmt.Sprintf("filesystem layer verification failed for digest %s", targetLayerDigest) c.Assert(out, checker.Contains, expectedErrorMsg, check.Commentf("expected error message in output: %s", out)) } docker-1.10.3/integration-cli/docker_cli_commit_test.go000066400000000000000000000141421267010174400231730ustar00rootroot00000000000000package main import ( "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (s *DockerSuite) TestCommitAfterContainerIsDone(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") cleanedContainerID := strings.TrimSpace(out) dockerCmd(c, "wait", cleanedContainerID) out, _ = dockerCmd(c, "commit", cleanedContainerID) cleanedImageID := strings.TrimSpace(out) dockerCmd(c, "inspect", cleanedImageID) } func (s *DockerSuite) TestCommitWithoutPause(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") cleanedContainerID := strings.TrimSpace(out) dockerCmd(c, "wait", cleanedContainerID) out, _ = dockerCmd(c, "commit", "-p=false", cleanedContainerID) cleanedImageID := strings.TrimSpace(out) dockerCmd(c, "inspect", cleanedImageID) } //test commit a paused container should not unpause it after commit func (s *DockerSuite) TestCommitPausedContainer(c *check.C) { testRequires(c, DaemonIsLinux) defer unpauseAllContainers() out, _ := dockerCmd(c, "run", "-i", "-d", "busybox") cleanedContainerID := strings.TrimSpace(out) dockerCmd(c, "pause", cleanedContainerID) out, _ = dockerCmd(c, "commit", cleanedContainerID) out, err := inspectField(cleanedContainerID, "State.Paused") c.Assert(err, checker.IsNil, check.Commentf("%s", out)) // commit should not unpause a paused container c.Assert(out, checker.Contains, "true") } func (s *DockerSuite) TestCommitNewFile(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "--name", "commiter", "busybox", "/bin/sh", "-c", "echo koye > /foo") imageID, _ := dockerCmd(c, "commit", "commiter") imageID = strings.TrimSpace(imageID) out, _ := dockerCmd(c, "run", imageID, "cat", "/foo") actual := strings.TrimSpace(out) c.Assert(actual, checker.Equals, "koye") } func (s *DockerSuite) TestCommitHardlink(c *check.C) { testRequires(c, DaemonIsLinux) firstOutput, _ := dockerCmd(c, "run", "-t", "--name", "hardlinks", "busybox", "sh", "-c", "touch file1 && ln file1 file2 && ls -di file1 file2") chunks := strings.Split(strings.TrimSpace(firstOutput), " ") inode := chunks[0] chunks = strings.SplitAfterN(strings.TrimSpace(firstOutput), " ", 2) c.Assert(chunks[1], checker.Contains, chunks[0], check.Commentf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:])) imageID, _ := dockerCmd(c, "commit", "hardlinks", "hardlinks") imageID = strings.TrimSpace(imageID) secondOutput, _ := dockerCmd(c, "run", "-t", "hardlinks", "ls", "-di", "file1", "file2") chunks = strings.Split(strings.TrimSpace(secondOutput), " ") inode = chunks[0] chunks = strings.SplitAfterN(strings.TrimSpace(secondOutput), " ", 2) c.Assert(chunks[1], checker.Contains, chunks[0], check.Commentf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:])) } func (s *DockerSuite) TestCommitTTY(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-t", "--name", "tty", "busybox", "/bin/ls") imageID, _ := dockerCmd(c, "commit", "tty", "ttytest") imageID = strings.TrimSpace(imageID) dockerCmd(c, "run", "ttytest", "/bin/ls") } func (s *DockerSuite) TestCommitWithHostBindMount(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "--name", "bind-commit", "-v", "/dev/null:/winning", "busybox", "true") imageID, _ := dockerCmd(c, "commit", "bind-commit", "bindtest") imageID = strings.TrimSpace(imageID) dockerCmd(c, "run", "bindtest", "true") } func (s *DockerSuite) TestCommitChange(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "--name", "test", "busybox", "true") imageID, _ := dockerCmd(c, "commit", "--change", "EXPOSE 8080", "--change", "ENV DEBUG true", "--change", "ENV test 1", "--change", "ENV PATH /foo", "--change", "LABEL foo bar", "--change", "CMD [\"/bin/sh\"]", "--change", "WORKDIR /opt", "--change", "ENTRYPOINT [\"/bin/sh\"]", "--change", "USER testuser", "--change", "VOLUME /var/lib/docker", "--change", "ONBUILD /usr/local/bin/python-build --dir /app/src", "test", "test-commit") imageID = strings.TrimSpace(imageID) expected := map[string]string{ "Config.ExposedPorts": "map[8080/tcp:{}]", "Config.Env": "[DEBUG=true test=1 PATH=/foo]", "Config.Labels": "map[foo:bar]", "Config.Cmd": "{[/bin/sh]}", "Config.WorkingDir": "/opt", "Config.Entrypoint": "{[/bin/sh]}", "Config.User": "testuser", "Config.Volumes": "map[/var/lib/docker:{}]", "Config.OnBuild": "[/usr/local/bin/python-build --dir /app/src]", } for conf, value := range expected { res, err := inspectField(imageID, conf) c.Assert(err, checker.IsNil, check.Commentf("%s('%s')", conf, res)) if res != value { c.Errorf("%s('%s'), expected %s", conf, res, value) } } } // TODO: commit --run is deprecated, remove this once --run is removed func (s *DockerSuite) TestCommitMergeConfigRun(c *check.C) { testRequires(c, DaemonIsLinux) name := "commit-test" out, _ := dockerCmd(c, "run", "-d", "-e=FOO=bar", "busybox", "/bin/sh", "-c", "echo testing > /tmp/foo") id := strings.TrimSpace(out) dockerCmd(c, "commit", `--run={"Cmd": ["cat", "/tmp/foo"]}`, id, "commit-test") out, _ = dockerCmd(c, "run", "--name", name, "commit-test") //run config in committed container was not merged c.Assert(strings.TrimSpace(out), checker.Equals, "testing") type cfg struct { Env []string Cmd []string } config1 := cfg{} err := inspectFieldAndMarshall(id, "Config", &config1) c.Assert(err, checker.IsNil) config2 := cfg{} err = inspectFieldAndMarshall(name, "Config", &config2) c.Assert(err, checker.IsNil) // Env has at least PATH loaded as well here, so let's just grab the FOO one var env1, env2 string for _, e := range config1.Env { if strings.HasPrefix(e, "FOO") { env1 = e break } } for _, e := range config2.Env { if strings.HasPrefix(e, "FOO") { env2 = e break } } if len(config1.Env) != len(config2.Env) || env1 != env2 && env2 != "" { c.Fatalf("expected envs to match: %v - %v", config1.Env, config2.Env) } } docker-1.10.3/integration-cli/docker_cli_config_test.go000066400000000000000000000112761267010174400231550ustar00rootroot00000000000000package main import ( "io/ioutil" "net/http" "net/http/httptest" "os" "os/exec" "path/filepath" "runtime" "github.com/docker/docker/dockerversion" "github.com/docker/docker/pkg/homedir" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (s *DockerSuite) TestConfigHttpHeader(c *check.C) { testRequires(c, UnixCli) // Can't set/unset HOME on windows right now // We either need a level of Go that supports Unsetenv (for cases // when HOME/USERPROFILE isn't set), or we need to be able to use // os/user but user.Current() only works if we aren't statically compiling var headers map[string][]string server := httptest.NewServer(http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { headers = r.Header })) defer server.Close() homeKey := homedir.Key() homeVal := homedir.Get() tmpDir, err := ioutil.TempDir("", "fake-home") c.Assert(err, checker.IsNil) defer os.RemoveAll(tmpDir) dotDocker := filepath.Join(tmpDir, ".docker") os.Mkdir(dotDocker, 0600) tmpCfg := filepath.Join(dotDocker, "config.json") defer func() { os.Setenv(homeKey, homeVal) }() os.Setenv(homeKey, tmpDir) data := `{ "HttpHeaders": { "MyHeader": "MyValue" } }` err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) c.Assert(err, checker.IsNil) cmd := exec.Command(dockerBinary, "-H="+server.URL[7:], "ps") out, _, _ := runCommandWithOutput(cmd) c.Assert(headers["User-Agent"], checker.NotNil, check.Commentf("Missing User-Agent")) c.Assert(headers["User-Agent"][0], checker.Equals, "Docker-Client/"+dockerversion.Version+" ("+runtime.GOOS+")", check.Commentf("Badly formatted User-Agent,out:%v", out)) c.Assert(headers["Myheader"], checker.NotNil) c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("Missing/bad header,out:%v", out)) } func (s *DockerSuite) TestConfigDir(c *check.C) { cDir, err := ioutil.TempDir("", "fake-home") c.Assert(err, checker.IsNil) defer os.RemoveAll(cDir) // First make sure pointing to empty dir doesn't generate an error dockerCmd(c, "--config", cDir, "ps") // Test with env var too cmd := exec.Command(dockerBinary, "ps") cmd.Env = appendBaseEnv(true, "DOCKER_CONFIG="+cDir) out, _, err := runCommandWithOutput(cmd) c.Assert(err, checker.IsNil, check.Commentf("ps2 didn't work,out:%v", out)) // Start a server so we can check to see if the config file was // loaded properly var headers map[string][]string server := httptest.NewServer(http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { headers = r.Header })) defer server.Close() // Create a dummy config file in our new config dir data := `{ "HttpHeaders": { "MyHeader": "MyValue" } }` tmpCfg := filepath.Join(cDir, "config.json") err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) c.Assert(err, checker.IsNil, check.Commentf("Err creating file")) env := appendBaseEnv(false) cmd = exec.Command(dockerBinary, "--config", cDir, "-H="+server.URL[7:], "ps") cmd.Env = env out, _, err = runCommandWithOutput(cmd) c.Assert(err, checker.NotNil, check.Commentf("out:%v", out)) c.Assert(headers["Myheader"], checker.NotNil) c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("ps3 - Missing header,out:%v", out)) // Reset headers and try again using env var this time headers = map[string][]string{} cmd = exec.Command(dockerBinary, "-H="+server.URL[7:], "ps") cmd.Env = append(env, "DOCKER_CONFIG="+cDir) out, _, err = runCommandWithOutput(cmd) c.Assert(err, checker.NotNil, check.Commentf("%v", out)) c.Assert(headers["Myheader"], checker.NotNil) c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("ps4 - Missing header,out:%v", out)) // Reset headers and make sure flag overrides the env var headers = map[string][]string{} cmd = exec.Command(dockerBinary, "--config", cDir, "-H="+server.URL[7:], "ps") cmd.Env = append(env, "DOCKER_CONFIG=MissingDir") out, _, err = runCommandWithOutput(cmd) c.Assert(err, checker.NotNil, check.Commentf("out:%v", out)) c.Assert(headers["Myheader"], checker.NotNil) c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("ps5 - Missing header,out:%v", out)) // Reset headers and make sure flag overrides the env var. // Almost same as previous but make sure the "MissingDir" isn't // ignore - we don't want to default back to the env var. headers = map[string][]string{} cmd = exec.Command(dockerBinary, "--config", "MissingDir", "-H="+server.URL[7:], "ps") cmd.Env = append(env, "DOCKER_CONFIG="+cDir) out, _, err = runCommandWithOutput(cmd) c.Assert(err, checker.NotNil, check.Commentf("out:%v", out)) c.Assert(headers["Myheader"], checker.IsNil, check.Commentf("ps6 - Headers shouldn't be the expected value,out:%v", out)) } docker-1.10.3/integration-cli/docker_cli_cp_from_container_test.go000066400000000000000000000426631267010174400254030ustar00rootroot00000000000000package main import ( "os" "path/filepath" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) // docker cp CONTAINER:PATH LOCALPATH // Try all of the test cases from the archive package which implements the // internals of `docker cp` and ensure that the behavior matches when actually // copying to and from containers. // Basic assumptions about SRC and DST: // 1. SRC must exist. // 2. If SRC ends with a trailing separator, it must be a directory. // 3. DST parent directory must exist. // 4. If DST exists as a file, it must not end with a trailing separator. // First get these easy error cases out of the way. // Test for error when SRC does not exist. func (s *DockerSuite) TestCpFromErrSrcNotExists(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{}) tmpDir := getTestDir(c, "test-cp-from-err-src-not-exists") defer os.RemoveAll(tmpDir) err := runDockerCp(c, containerCpPath(containerID, "file1"), tmpDir) c.Assert(err, checker.NotNil) c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) } // Test for error when SRC ends in a trailing // path separator but it exists as a file. func (s *DockerSuite) TestCpFromErrSrcNotDir(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{addContent: true}) tmpDir := getTestDir(c, "test-cp-from-err-src-not-dir") defer os.RemoveAll(tmpDir) err := runDockerCp(c, containerCpPathTrailingSep(containerID, "file1"), tmpDir) c.Assert(err, checker.NotNil) c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) } // Test for error when SRC is a valid file or directory, // bu the DST parent directory does not exist. func (s *DockerSuite) TestCpFromErrDstParentNotExists(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{addContent: true}) tmpDir := getTestDir(c, "test-cp-from-err-dst-parent-not-exists") defer os.RemoveAll(tmpDir) makeTestContentInDir(c, tmpDir) // Try with a file source. srcPath := containerCpPath(containerID, "/file1") dstPath := cpPath(tmpDir, "notExists", "file1") err := runDockerCp(c, srcPath, dstPath) c.Assert(err, checker.NotNil) c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) // Try with a directory source. srcPath = containerCpPath(containerID, "/dir1") err = runDockerCp(c, srcPath, dstPath) c.Assert(err, checker.NotNil) c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) } // Test for error when DST ends in a trailing // path separator but exists as a file. func (s *DockerSuite) TestCpFromErrDstNotDir(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{addContent: true}) tmpDir := getTestDir(c, "test-cp-from-err-dst-not-dir") defer os.RemoveAll(tmpDir) makeTestContentInDir(c, tmpDir) // Try with a file source. srcPath := containerCpPath(containerID, "/file1") dstPath := cpPathTrailingSep(tmpDir, "file1") err := runDockerCp(c, srcPath, dstPath) c.Assert(err, checker.NotNil) c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) // Try with a directory source. srcPath = containerCpPath(containerID, "/dir1") err = runDockerCp(c, srcPath, dstPath) c.Assert(err, checker.NotNil) c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) } // Check that copying from a container to a local symlink copies to the symlink // target and does not overwrite the local symlink itself. func (s *DockerSuite) TestCpFromSymlinkDestination(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{addContent: true}) tmpDir := getTestDir(c, "test-cp-from-err-dst-not-dir") defer os.RemoveAll(tmpDir) makeTestContentInDir(c, tmpDir) // First, copy a file from the container to a symlink to a file. This // should overwrite the symlink target contents with the source contents. srcPath := containerCpPath(containerID, "/file2") dstPath := cpPath(tmpDir, "symlinkToFile1") c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, dstPath, "file1"), checker.IsNil) // The file should have the contents of "file2" now. c.Assert(fileContentEquals(c, cpPath(tmpDir, "file1"), "file2\n"), checker.IsNil) // Next, copy a file from the container to a symlink to a directory. This // should copy the file into the symlink target directory. dstPath = cpPath(tmpDir, "symlinkToDir1") c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, dstPath, "dir1"), checker.IsNil) // The file should have the contents of "file2" now. c.Assert(fileContentEquals(c, cpPath(tmpDir, "file2"), "file2\n"), checker.IsNil) // Next, copy a file from the container to a symlink to a file that does // not exist (a broken symlink). This should create the target file with // the contents of the source file. dstPath = cpPath(tmpDir, "brokenSymlinkToFileX") c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, dstPath, "fileX"), checker.IsNil) // The file should have the contents of "file2" now. c.Assert(fileContentEquals(c, cpPath(tmpDir, "fileX"), "file2\n"), checker.IsNil) // Next, copy a directory from the container to a symlink to a local // directory. This should copy the directory into the symlink target // directory and not modify the symlink. srcPath = containerCpPath(containerID, "/dir2") dstPath = cpPath(tmpDir, "symlinkToDir1") c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, dstPath, "dir1"), checker.IsNil) // The directory should now contain a copy of "dir2". c.Assert(fileContentEquals(c, cpPath(tmpDir, "dir1/dir2/file2-1"), "file2-1\n"), checker.IsNil) // Next, copy a directory from the container to a symlink to a local // directory that does not exist (a broken symlink). This should create // the target as a directory with the contents of the source directory. It // should not modify the symlink. dstPath = cpPath(tmpDir, "brokenSymlinkToDirX") c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, dstPath, "dirX"), checker.IsNil) // The "dirX" directory should now be a copy of "dir2". c.Assert(fileContentEquals(c, cpPath(tmpDir, "dirX/file2-1"), "file2-1\n"), checker.IsNil) } // Possibilities are reduced to the remaining 10 cases: // // case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action // =================================================================================================== // A | no | - | no | - | no | create file // B | no | - | no | - | yes | error // C | no | - | yes | no | - | overwrite file // D | no | - | yes | yes | - | create file in dst dir // E | yes | no | no | - | - | create dir, copy contents // F | yes | no | yes | no | - | error // G | yes | no | yes | yes | - | copy dir and contents // H | yes | yes | no | - | - | create dir, copy contents // I | yes | yes | yes | no | - | error // J | yes | yes | yes | yes | - | copy dir contents // // A. SRC specifies a file and DST (no trailing path separator) doesn't // exist. This should create a file with the name DST and copy the // contents of the source file into it. func (s *DockerSuite) TestCpFromCaseA(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{ addContent: true, workDir: "/root", }) tmpDir := getTestDir(c, "test-cp-from-case-a") defer os.RemoveAll(tmpDir) srcPath := containerCpPath(containerID, "/root/file1") dstPath := cpPath(tmpDir, "itWorks.txt") c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) } // B. SRC specifies a file and DST (with trailing path separator) doesn't // exist. This should cause an error because the copy operation cannot // create a directory when copying a single file. func (s *DockerSuite) TestCpFromCaseB(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{addContent: true}) tmpDir := getTestDir(c, "test-cp-from-case-b") defer os.RemoveAll(tmpDir) srcPath := containerCpPath(containerID, "/file1") dstDir := cpPathTrailingSep(tmpDir, "testDir") err := runDockerCp(c, srcPath, dstDir) c.Assert(err, checker.NotNil) c.Assert(isCpDirNotExist(err), checker.True, check.Commentf("expected DirNotExists error, but got %T: %s", err, err)) } // C. SRC specifies a file and DST exists as a file. This should overwrite // the file at DST with the contents of the source file. func (s *DockerSuite) TestCpFromCaseC(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{ addContent: true, workDir: "/root", }) tmpDir := getTestDir(c, "test-cp-from-case-c") defer os.RemoveAll(tmpDir) makeTestContentInDir(c, tmpDir) srcPath := containerCpPath(containerID, "/root/file1") dstPath := cpPath(tmpDir, "file2") // Ensure the local file starts with different content. c.Assert(fileContentEquals(c, dstPath, "file2\n"), checker.IsNil) c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) } // D. SRC specifies a file and DST exists as a directory. This should place // a copy of the source file inside it using the basename from SRC. Ensure // this works whether DST has a trailing path separator or not. func (s *DockerSuite) TestCpFromCaseD(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{addContent: true}) tmpDir := getTestDir(c, "test-cp-from-case-d") defer os.RemoveAll(tmpDir) makeTestContentInDir(c, tmpDir) srcPath := containerCpPath(containerID, "/file1") dstDir := cpPath(tmpDir, "dir1") dstPath := filepath.Join(dstDir, "file1") // Ensure that dstPath doesn't exist. _, err := os.Stat(dstPath) c.Assert(os.IsNotExist(err), checker.True, check.Commentf("did not expect dstPath %q to exist", dstPath)) c.Assert(runDockerCp(c, srcPath, dstDir), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) // Now try again but using a trailing path separator for dstDir. // unable to remove dstDir c.Assert(os.RemoveAll(dstDir), checker.IsNil) // unable to make dstDir c.Assert(os.MkdirAll(dstDir, os.FileMode(0755)), checker.IsNil) dstDir = cpPathTrailingSep(tmpDir, "dir1") c.Assert(runDockerCp(c, srcPath, dstDir), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) } // E. SRC specifies a directory and DST does not exist. This should create a // directory at DST and copy the contents of the SRC directory into the DST // directory. Ensure this works whether DST has a trailing path separator or // not. func (s *DockerSuite) TestCpFromCaseE(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{addContent: true}) tmpDir := getTestDir(c, "test-cp-from-case-e") defer os.RemoveAll(tmpDir) srcDir := containerCpPath(containerID, "dir1") dstDir := cpPath(tmpDir, "testDir") dstPath := filepath.Join(dstDir, "file1-1") c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) // Now try again but using a trailing path separator for dstDir. // unable to remove dstDir c.Assert(os.RemoveAll(dstDir), checker.IsNil) dstDir = cpPathTrailingSep(tmpDir, "testDir") c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) } // F. SRC specifies a directory and DST exists as a file. This should cause an // error as it is not possible to overwrite a file with a directory. func (s *DockerSuite) TestCpFromCaseF(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{ addContent: true, workDir: "/root", }) tmpDir := getTestDir(c, "test-cp-from-case-f") defer os.RemoveAll(tmpDir) makeTestContentInDir(c, tmpDir) srcDir := containerCpPath(containerID, "/root/dir1") dstFile := cpPath(tmpDir, "file1") err := runDockerCp(c, srcDir, dstFile) c.Assert(err, checker.NotNil) c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) } // G. SRC specifies a directory and DST exists as a directory. This should copy // the SRC directory and all its contents to the DST directory. Ensure this // works whether DST has a trailing path separator or not. func (s *DockerSuite) TestCpFromCaseG(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{ addContent: true, workDir: "/root", }) tmpDir := getTestDir(c, "test-cp-from-case-g") defer os.RemoveAll(tmpDir) makeTestContentInDir(c, tmpDir) srcDir := containerCpPath(containerID, "/root/dir1") dstDir := cpPath(tmpDir, "dir2") resultDir := filepath.Join(dstDir, "dir1") dstPath := filepath.Join(resultDir, "file1-1") c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) // Now try again but using a trailing path separator for dstDir. // unable to remove dstDir c.Assert(os.RemoveAll(dstDir), checker.IsNil) // unable to make dstDir c.Assert(os.MkdirAll(dstDir, os.FileMode(0755)), checker.IsNil) dstDir = cpPathTrailingSep(tmpDir, "dir2") c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) } // H. SRC specifies a directory's contents only and DST does not exist. This // should create a directory at DST and copy the contents of the SRC // directory (but not the directory itself) into the DST directory. Ensure // this works whether DST has a trailing path separator or not. func (s *DockerSuite) TestCpFromCaseH(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{addContent: true}) tmpDir := getTestDir(c, "test-cp-from-case-h") defer os.RemoveAll(tmpDir) srcDir := containerCpPathTrailingSep(containerID, "dir1") + "." dstDir := cpPath(tmpDir, "testDir") dstPath := filepath.Join(dstDir, "file1-1") c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) // Now try again but using a trailing path separator for dstDir. // unable to remove resultDir c.Assert(os.RemoveAll(dstDir), checker.IsNil) dstDir = cpPathTrailingSep(tmpDir, "testDir") c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) } // I. SRC specifies a directory's contents only and DST exists as a file. This // should cause an error as it is not possible to overwrite a file with a // directory. func (s *DockerSuite) TestCpFromCaseI(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{ addContent: true, workDir: "/root", }) tmpDir := getTestDir(c, "test-cp-from-case-i") defer os.RemoveAll(tmpDir) makeTestContentInDir(c, tmpDir) srcDir := containerCpPathTrailingSep(containerID, "/root/dir1") + "." dstFile := cpPath(tmpDir, "file1") err := runDockerCp(c, srcDir, dstFile) c.Assert(err, checker.NotNil) c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) } // J. SRC specifies a directory's contents only and DST exists as a directory. // This should copy the contents of the SRC directory (but not the directory // itself) into the DST directory. Ensure this works whether DST has a // trailing path separator or not. func (s *DockerSuite) TestCpFromCaseJ(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{ addContent: true, workDir: "/root", }) tmpDir := getTestDir(c, "test-cp-from-case-j") defer os.RemoveAll(tmpDir) makeTestContentInDir(c, tmpDir) srcDir := containerCpPathTrailingSep(containerID, "/root/dir1") + "." dstDir := cpPath(tmpDir, "dir2") dstPath := filepath.Join(dstDir, "file1-1") c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) // Now try again but using a trailing path separator for dstDir. // unable to remove dstDir c.Assert(os.RemoveAll(dstDir), checker.IsNil) // unable to make dstDir c.Assert(os.MkdirAll(dstDir, os.FileMode(0755)), checker.IsNil) dstDir = cpPathTrailingSep(tmpDir, "dir2") c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) } docker-1.10.3/integration-cli/docker_cli_cp_test.go000066400000000000000000000543441267010174400223150ustar00rootroot00000000000000package main import ( "bytes" "fmt" "io/ioutil" "os" "os/exec" "path" "path/filepath" "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) const ( cpTestPathParent = "/some" cpTestPath = "/some/path" cpTestName = "test" cpFullPath = "/some/path/test" cpContainerContents = "holla, i am the container" cpHostContents = "hello, i am the host" ) // Ensure that an all-local path case returns an error. func (s *DockerSuite) TestCpLocalOnly(c *check.C) { err := runDockerCp(c, "foo", "bar") c.Assert(err, checker.NotNil) c.Assert(err.Error(), checker.Contains, "must specify at least one container source") } // Test for #5656 // Check that garbage paths don't escape the container's rootfs func (s *DockerSuite) TestCpGarbagePath(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) containerID := strings.TrimSpace(out) out, _ = dockerCmd(c, "wait", containerID) // failed to set up container c.Assert(strings.TrimSpace(out), checker.Equals, "0") c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) hostFile, err := os.Create(cpFullPath) c.Assert(err, checker.IsNil) defer hostFile.Close() defer os.RemoveAll(cpTestPathParent) fmt.Fprintf(hostFile, "%s", cpHostContents) tmpdir, err := ioutil.TempDir("", "docker-integration") c.Assert(err, checker.IsNil) tmpname := filepath.Join(tmpdir, cpTestName) defer os.RemoveAll(tmpdir) path := path.Join("../../../../../../../../../../../../", cpFullPath) dockerCmd(c, "cp", containerID+":"+path, tmpdir) file, _ := os.Open(tmpname) defer file.Close() test, err := ioutil.ReadAll(file) c.Assert(err, checker.IsNil) // output matched host file -- garbage path can escape container rootfs c.Assert(string(test), checker.Not(checker.Equals), cpHostContents) // output doesn't match the input for garbage path c.Assert(string(test), checker.Equals, cpContainerContents) } // Check that relative paths are relative to the container's rootfs func (s *DockerSuite) TestCpRelativePath(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) containerID := strings.TrimSpace(out) out, _ = dockerCmd(c, "wait", containerID) // failed to set up container c.Assert(strings.TrimSpace(out), checker.Equals, "0") c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) hostFile, err := os.Create(cpFullPath) c.Assert(err, checker.IsNil) defer hostFile.Close() defer os.RemoveAll(cpTestPathParent) fmt.Fprintf(hostFile, "%s", cpHostContents) tmpdir, err := ioutil.TempDir("", "docker-integration") c.Assert(err, checker.IsNil) tmpname := filepath.Join(tmpdir, cpTestName) defer os.RemoveAll(tmpdir) var relPath string if path.IsAbs(cpFullPath) { // normally this is `filepath.Rel("/", cpFullPath)` but we cannot // get this unix-path manipulation on windows with filepath. relPath = cpFullPath[1:] } c.Assert(path.IsAbs(cpFullPath), checker.True, check.Commentf("path %s was assumed to be an absolute path", cpFullPath)) dockerCmd(c, "cp", containerID+":"+relPath, tmpdir) file, _ := os.Open(tmpname) defer file.Close() test, err := ioutil.ReadAll(file) c.Assert(err, checker.IsNil) // output matched host file -- relative path can escape container rootfs c.Assert(string(test), checker.Not(checker.Equals), cpHostContents) // output doesn't match the input for relative path c.Assert(string(test), checker.Equals, cpContainerContents) } // Check that absolute paths are relative to the container's rootfs func (s *DockerSuite) TestCpAbsolutePath(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) containerID := strings.TrimSpace(out) out, _ = dockerCmd(c, "wait", containerID) // failed to set up container c.Assert(strings.TrimSpace(out), checker.Equals, "0") c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) hostFile, err := os.Create(cpFullPath) c.Assert(err, checker.IsNil) defer hostFile.Close() defer os.RemoveAll(cpTestPathParent) fmt.Fprintf(hostFile, "%s", cpHostContents) tmpdir, err := ioutil.TempDir("", "docker-integration") c.Assert(err, checker.IsNil) tmpname := filepath.Join(tmpdir, cpTestName) defer os.RemoveAll(tmpdir) path := cpFullPath dockerCmd(c, "cp", containerID+":"+path, tmpdir) file, _ := os.Open(tmpname) defer file.Close() test, err := ioutil.ReadAll(file) c.Assert(err, checker.IsNil) // output matched host file -- absolute path can escape container rootfs c.Assert(string(test), checker.Not(checker.Equals), cpHostContents) // output doesn't match the input for absolute path c.Assert(string(test), checker.Equals, cpContainerContents) } // Test for #5619 // Check that absolute symlinks are still relative to the container's rootfs func (s *DockerSuite) TestCpAbsoluteSymlink(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" container_path") containerID := strings.TrimSpace(out) out, _ = dockerCmd(c, "wait", containerID) // failed to set up container c.Assert(strings.TrimSpace(out), checker.Equals, "0") c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) hostFile, err := os.Create(cpFullPath) c.Assert(err, checker.IsNil) defer hostFile.Close() defer os.RemoveAll(cpTestPathParent) fmt.Fprintf(hostFile, "%s", cpHostContents) tmpdir, err := ioutil.TempDir("", "docker-integration") c.Assert(err, checker.IsNil) tmpname := filepath.Join(tmpdir, "container_path") defer os.RemoveAll(tmpdir) path := path.Join("/", "container_path") dockerCmd(c, "cp", containerID+":"+path, tmpdir) // We should have copied a symlink *NOT* the file itself! linkTarget, err := os.Readlink(tmpname) c.Assert(err, checker.IsNil) c.Assert(linkTarget, checker.Equals, filepath.FromSlash(cpFullPath)) } // Check that symlinks to a directory behave as expected when copying one from // a container. func (s *DockerSuite) TestCpFromSymlinkToDirectory(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPathParent+" /dir_link") containerID := strings.TrimSpace(out) out, _ = dockerCmd(c, "wait", containerID) // failed to set up container c.Assert(strings.TrimSpace(out), checker.Equals, "0") testDir, err := ioutil.TempDir("", "test-cp-from-symlink-to-dir-") c.Assert(err, checker.IsNil) defer os.RemoveAll(testDir) // This copy command should copy the symlink, not the target, into the // temporary directory. dockerCmd(c, "cp", containerID+":"+"/dir_link", testDir) expectedPath := filepath.Join(testDir, "dir_link") linkTarget, err := os.Readlink(expectedPath) c.Assert(err, checker.IsNil) c.Assert(linkTarget, checker.Equals, filepath.FromSlash(cpTestPathParent)) os.Remove(expectedPath) // This copy command should resolve the symlink (note the trailing // separator), copying the target into the temporary directory. dockerCmd(c, "cp", containerID+":"+"/dir_link/", testDir) // It *should not* have copied the directory using the target's name, but // used the given name instead. unexpectedPath := filepath.Join(testDir, cpTestPathParent) stat, err := os.Lstat(unexpectedPath) if err == nil { out = fmt.Sprintf("target name was copied: %q - %q", stat.Mode(), stat.Name()) } c.Assert(err, checker.NotNil, check.Commentf(out)) // It *should* have copied the directory using the asked name "dir_link". stat, err = os.Lstat(expectedPath) c.Assert(err, checker.IsNil, check.Commentf("unable to stat resource at %q", expectedPath)) c.Assert(stat.IsDir(), checker.True, check.Commentf("should have copied a directory but got %q instead", stat.Mode())) } // Check that symlinks to a directory behave as expected when copying one to a // container. func (s *DockerSuite) TestCpToSymlinkToDirectory(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, SameHostDaemon) // Requires local volume mount bind. testVol, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-") c.Assert(err, checker.IsNil) defer os.RemoveAll(testVol) // Create a test container with a local volume. We will test by copying // to the volume path in the container which we can then verify locally. out, _ := dockerCmd(c, "create", "-v", testVol+":/testVol", "busybox") containerID := strings.TrimSpace(out) // Create a temp directory to hold a test file nested in a direcotry. testDir, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-") c.Assert(err, checker.IsNil) defer os.RemoveAll(testDir) // This file will be at "/testDir/some/path/test" and will be copied into // the test volume later. hostTestFilename := filepath.Join(testDir, cpFullPath) c.Assert(os.MkdirAll(filepath.Dir(hostTestFilename), os.FileMode(0700)), checker.IsNil) c.Assert(ioutil.WriteFile(hostTestFilename, []byte(cpHostContents), os.FileMode(0600)), checker.IsNil) // Now create another temp directory to hold a symlink to the // "/testDir/some" directory. linkDir, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-") c.Assert(err, checker.IsNil) defer os.RemoveAll(linkDir) // Then symlink "/linkDir/dir_link" to "/testdir/some". linkTarget := filepath.Join(testDir, cpTestPathParent) localLink := filepath.Join(linkDir, "dir_link") c.Assert(os.Symlink(linkTarget, localLink), checker.IsNil) // Now copy that symlink into the test volume in the container. dockerCmd(c, "cp", localLink, containerID+":/testVol") // This copy command should have copied the symlink *not* the target. expectedPath := filepath.Join(testVol, "dir_link") actualLinkTarget, err := os.Readlink(expectedPath) c.Assert(err, checker.IsNil, check.Commentf("unable to read symlink at %q", expectedPath)) c.Assert(actualLinkTarget, checker.Equals, linkTarget) // Good, now remove that copied link for the next test. os.Remove(expectedPath) // This copy command should resolve the symlink (note the trailing // separator), copying the target into the test volume directory in the // container. dockerCmd(c, "cp", localLink+"/", containerID+":/testVol") // It *should not* have copied the directory using the target's name, but // used the given name instead. unexpectedPath := filepath.Join(testVol, cpTestPathParent) stat, err := os.Lstat(unexpectedPath) if err == nil { out = fmt.Sprintf("target name was copied: %q - %q", stat.Mode(), stat.Name()) } c.Assert(err, checker.NotNil, check.Commentf(out)) // It *should* have copied the directory using the asked name "dir_link". stat, err = os.Lstat(expectedPath) c.Assert(err, checker.IsNil, check.Commentf("unable to stat resource at %q", expectedPath)) c.Assert(stat.IsDir(), checker.True, check.Commentf("should have copied a directory but got %q instead", stat.Mode())) // And this directory should contain the file copied from the host at the // expected location: "/testVol/dir_link/path/test" expectedFilepath := filepath.Join(testVol, "dir_link/path/test") fileContents, err := ioutil.ReadFile(expectedFilepath) c.Assert(err, checker.IsNil) c.Assert(string(fileContents), checker.Equals, cpHostContents) } // Test for #5619 // Check that symlinks which are part of the resource path are still relative to the container's rootfs func (s *DockerSuite) TestCpSymlinkComponent(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPath+" container_path") containerID := strings.TrimSpace(out) out, _ = dockerCmd(c, "wait", containerID) // failed to set up container c.Assert(strings.TrimSpace(out), checker.Equals, "0") c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) hostFile, err := os.Create(cpFullPath) c.Assert(err, checker.IsNil) defer hostFile.Close() defer os.RemoveAll(cpTestPathParent) fmt.Fprintf(hostFile, "%s", cpHostContents) tmpdir, err := ioutil.TempDir("", "docker-integration") c.Assert(err, checker.IsNil) tmpname := filepath.Join(tmpdir, cpTestName) defer os.RemoveAll(tmpdir) path := path.Join("/", "container_path", cpTestName) dockerCmd(c, "cp", containerID+":"+path, tmpdir) file, _ := os.Open(tmpname) defer file.Close() test, err := ioutil.ReadAll(file) c.Assert(err, checker.IsNil) // output matched host file -- symlink path component can escape container rootfs c.Assert(string(test), checker.Not(checker.Equals), cpHostContents) // output doesn't match the input for symlink path component c.Assert(string(test), checker.Equals, cpContainerContents) } // Check that cp with unprivileged user doesn't return any error func (s *DockerSuite) TestCpUnprivilegedUser(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, UnixCli) // uses chmod/su: not available on windows out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "touch "+cpTestName) containerID := strings.TrimSpace(out) out, _ = dockerCmd(c, "wait", containerID) // failed to set up container c.Assert(strings.TrimSpace(out), checker.Equals, "0") tmpdir, err := ioutil.TempDir("", "docker-integration") c.Assert(err, checker.IsNil) defer os.RemoveAll(tmpdir) c.Assert(os.Chmod(tmpdir, 0777), checker.IsNil) path := cpTestName _, _, err = runCommandWithOutput(exec.Command("su", "unprivilegeduser", "-c", dockerBinary+" cp "+containerID+":"+path+" "+tmpdir)) c.Assert(err, checker.IsNil, check.Commentf("couldn't copy with unprivileged user: %s:%s", containerID, path)) } func (s *DockerSuite) TestCpSpecialFiles(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, SameHostDaemon) outDir, err := ioutil.TempDir("", "cp-test-special-files") c.Assert(err, checker.IsNil) defer os.RemoveAll(outDir) out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "touch /foo") containerID := strings.TrimSpace(out) out, _ = dockerCmd(c, "wait", containerID) // failed to set up container c.Assert(strings.TrimSpace(out), checker.Equals, "0") // Copy actual /etc/resolv.conf dockerCmd(c, "cp", containerID+":/etc/resolv.conf", outDir) expected, err := readContainerFile(containerID, "resolv.conf") actual, err := ioutil.ReadFile(outDir + "/resolv.conf") // Expected copied file to be duplicate of the container resolvconf c.Assert(bytes.Equal(actual, expected), checker.True) // Copy actual /etc/hosts dockerCmd(c, "cp", containerID+":/etc/hosts", outDir) expected, err = readContainerFile(containerID, "hosts") actual, err = ioutil.ReadFile(outDir + "/hosts") // Expected copied file to be duplicate of the container hosts c.Assert(bytes.Equal(actual, expected), checker.True) // Copy actual /etc/resolv.conf dockerCmd(c, "cp", containerID+":/etc/hostname", outDir) expected, err = readContainerFile(containerID, "hostname") actual, err = ioutil.ReadFile(outDir + "/hostname") // Expected copied file to be duplicate of the container resolvconf c.Assert(bytes.Equal(actual, expected), checker.True) } func (s *DockerSuite) TestCpVolumePath(c *check.C) { // stat /tmp/cp-test-volumepath851508420/test gets permission denied for the user testRequires(c, NotUserNamespace) testRequires(c, DaemonIsLinux) testRequires(c, SameHostDaemon) tmpDir, err := ioutil.TempDir("", "cp-test-volumepath") c.Assert(err, checker.IsNil) defer os.RemoveAll(tmpDir) outDir, err := ioutil.TempDir("", "cp-test-volumepath-out") c.Assert(err, checker.IsNil) defer os.RemoveAll(outDir) _, err = os.Create(tmpDir + "/test") c.Assert(err, checker.IsNil) out, _ := dockerCmd(c, "run", "-d", "-v", "/foo", "-v", tmpDir+"/test:/test", "-v", tmpDir+":/baz", "busybox", "/bin/sh", "-c", "touch /foo/bar") containerID := strings.TrimSpace(out) out, _ = dockerCmd(c, "wait", containerID) // failed to set up container c.Assert(strings.TrimSpace(out), checker.Equals, "0") // Copy actual volume path dockerCmd(c, "cp", containerID+":/foo", outDir) stat, err := os.Stat(outDir + "/foo") c.Assert(err, checker.IsNil) // expected copied content to be dir c.Assert(stat.IsDir(), checker.True) stat, err = os.Stat(outDir + "/foo/bar") c.Assert(err, checker.IsNil) // Expected file `bar` to be a file c.Assert(stat.IsDir(), checker.False) // Copy file nested in volume dockerCmd(c, "cp", containerID+":/foo/bar", outDir) stat, err = os.Stat(outDir + "/bar") c.Assert(err, checker.IsNil) // Expected file `bar` to be a file c.Assert(stat.IsDir(), checker.False) // Copy Bind-mounted dir dockerCmd(c, "cp", containerID+":/baz", outDir) stat, err = os.Stat(outDir + "/baz") c.Assert(err, checker.IsNil) // Expected `baz` to be a dir c.Assert(stat.IsDir(), checker.True) // Copy file nested in bind-mounted dir dockerCmd(c, "cp", containerID+":/baz/test", outDir) fb, err := ioutil.ReadFile(outDir + "/baz/test") c.Assert(err, checker.IsNil) fb2, err := ioutil.ReadFile(tmpDir + "/test") c.Assert(err, checker.IsNil) // Expected copied file to be duplicate of bind-mounted file c.Assert(bytes.Equal(fb, fb2), checker.True) // Copy bind-mounted file dockerCmd(c, "cp", containerID+":/test", outDir) fb, err = ioutil.ReadFile(outDir + "/test") c.Assert(err, checker.IsNil) fb2, err = ioutil.ReadFile(tmpDir + "/test") c.Assert(err, checker.IsNil) // Expected copied file to be duplicate of bind-mounted file c.Assert(bytes.Equal(fb, fb2), checker.True) } func (s *DockerSuite) TestCpToDot(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test") containerID := strings.TrimSpace(out) out, _ = dockerCmd(c, "wait", containerID) // failed to set up container c.Assert(strings.TrimSpace(out), checker.Equals, "0") tmpdir, err := ioutil.TempDir("", "docker-integration") c.Assert(err, checker.IsNil) defer os.RemoveAll(tmpdir) cwd, err := os.Getwd() c.Assert(err, checker.IsNil) defer os.Chdir(cwd) c.Assert(os.Chdir(tmpdir), checker.IsNil) dockerCmd(c, "cp", containerID+":/test", ".") content, err := ioutil.ReadFile("./test") c.Assert(string(content), checker.Equals, "lololol\n") } func (s *DockerSuite) TestCpToStdout(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test") containerID := strings.TrimSpace(out) out, _ = dockerCmd(c, "wait", containerID) // failed to set up container c.Assert(strings.TrimSpace(out), checker.Equals, "0") out, _, err := runCommandPipelineWithOutput( exec.Command(dockerBinary, "cp", containerID+":/test", "-"), exec.Command("tar", "-vtf", "-")) c.Assert(err, checker.IsNil) c.Assert(out, checker.Contains, "test") c.Assert(out, checker.Contains, "-rw") } func (s *DockerSuite) TestCpNameHasColon(c *check.C) { testRequires(c, SameHostDaemon, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /te:s:t") containerID := strings.TrimSpace(out) out, _ = dockerCmd(c, "wait", containerID) // failed to set up container c.Assert(strings.TrimSpace(out), checker.Equals, "0") tmpdir, err := ioutil.TempDir("", "docker-integration") c.Assert(err, checker.IsNil) defer os.RemoveAll(tmpdir) dockerCmd(c, "cp", containerID+":/te:s:t", tmpdir) content, err := ioutil.ReadFile(tmpdir + "/te:s:t") c.Assert(string(content), checker.Equals, "lololol\n") } func (s *DockerSuite) TestCopyAndRestart(c *check.C) { testRequires(c, DaemonIsLinux) expectedMsg := "hello" out, _ := dockerCmd(c, "run", "-d", "busybox", "echo", expectedMsg) containerID := strings.TrimSpace(out) out, _ = dockerCmd(c, "wait", containerID) // failed to set up container c.Assert(strings.TrimSpace(out), checker.Equals, "0") tmpDir, err := ioutil.TempDir("", "test-docker-restart-after-copy-") c.Assert(err, checker.IsNil) defer os.RemoveAll(tmpDir) dockerCmd(c, "cp", fmt.Sprintf("%s:/etc/group", containerID), tmpDir) out, _ = dockerCmd(c, "start", "-a", containerID) c.Assert(strings.TrimSpace(out), checker.Equals, expectedMsg) } func (s *DockerSuite) TestCopyCreatedContainer(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "create", "--name", "test_cp", "-v", "/test", "busybox") tmpDir, err := ioutil.TempDir("", "test") c.Assert(err, checker.IsNil) defer os.RemoveAll(tmpDir) dockerCmd(c, "cp", "test_cp:/bin/sh", tmpDir) } // test copy with option `-L`: following symbol link // Check that symlinks to a file behave as expected when copying one from // a container to host following symbol link func (s *DockerSuite) TestCpSymlinkFromConToHostFollowSymlink(c *check.C) { testRequires(c, DaemonIsLinux) out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" /dir_link") if exitCode != 0 { c.Fatal("failed to create a container", out) } cleanedContainerID := strings.TrimSpace(out) out, _ = dockerCmd(c, "wait", cleanedContainerID) if strings.TrimSpace(out) != "0" { c.Fatal("failed to set up container", out) } testDir, err := ioutil.TempDir("", "test-cp-symlink-container-to-host-follow-symlink") if err != nil { c.Fatal(err) } defer os.RemoveAll(testDir) // This copy command should copy the symlink, not the target, into the // temporary directory. dockerCmd(c, "cp", "-L", cleanedContainerID+":"+"/dir_link", testDir) expectedPath := filepath.Join(testDir, "dir_link") expected := []byte(cpContainerContents) actual, err := ioutil.ReadFile(expectedPath) if !bytes.Equal(actual, expected) { c.Fatalf("Expected copied file to be duplicate of the container symbol link target") } os.Remove(expectedPath) // now test copy symbol link to an non-existing file in host expectedPath = filepath.Join(testDir, "somefile_host") // expectedPath shouldn't exist, if exists, remove it if _, err := os.Lstat(expectedPath); err == nil { os.Remove(expectedPath) } dockerCmd(c, "cp", "-L", cleanedContainerID+":"+"/dir_link", expectedPath) actual, err = ioutil.ReadFile(expectedPath) if !bytes.Equal(actual, expected) { c.Fatalf("Expected copied file to be duplicate of the container symbol link target") } defer os.Remove(expectedPath) } docker-1.10.3/integration-cli/docker_cli_cp_to_container_test.go000066400000000000000000000536621267010174400250630ustar00rootroot00000000000000package main import ( "os" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) // docker cp LOCALPATH CONTAINER:PATH // Try all of the test cases from the archive package which implements the // internals of `docker cp` and ensure that the behavior matches when actually // copying to and from containers. // Basic assumptions about SRC and DST: // 1. SRC must exist. // 2. If SRC ends with a trailing separator, it must be a directory. // 3. DST parent directory must exist. // 4. If DST exists as a file, it must not end with a trailing separator. // First get these easy error cases out of the way. // Test for error when SRC does not exist. func (s *DockerSuite) TestCpToErrSrcNotExists(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{}) tmpDir := getTestDir(c, "test-cp-to-err-src-not-exists") defer os.RemoveAll(tmpDir) srcPath := cpPath(tmpDir, "file1") dstPath := containerCpPath(containerID, "file1") err := runDockerCp(c, srcPath, dstPath) c.Assert(err, checker.NotNil) c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) } // Test for error when SRC ends in a trailing // path separator but it exists as a file. func (s *DockerSuite) TestCpToErrSrcNotDir(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{}) tmpDir := getTestDir(c, "test-cp-to-err-src-not-dir") defer os.RemoveAll(tmpDir) makeTestContentInDir(c, tmpDir) srcPath := cpPathTrailingSep(tmpDir, "file1") dstPath := containerCpPath(containerID, "testDir") err := runDockerCp(c, srcPath, dstPath) c.Assert(err, checker.NotNil) c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) } // Test for error when SRC is a valid file or directory, // bu the DST parent directory does not exist. func (s *DockerSuite) TestCpToErrDstParentNotExists(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{addContent: true}) tmpDir := getTestDir(c, "test-cp-to-err-dst-parent-not-exists") defer os.RemoveAll(tmpDir) makeTestContentInDir(c, tmpDir) // Try with a file source. srcPath := cpPath(tmpDir, "file1") dstPath := containerCpPath(containerID, "/notExists", "file1") err := runDockerCp(c, srcPath, dstPath) c.Assert(err, checker.NotNil) c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) // Try with a directory source. srcPath = cpPath(tmpDir, "dir1") c.Assert(err, checker.NotNil) c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) } // Test for error when DST ends in a trailing path separator but exists as a // file. Also test that we cannot overwrite an existing directory with a // non-directory and cannot overwrite an existing func (s *DockerSuite) TestCpToErrDstNotDir(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{addContent: true}) tmpDir := getTestDir(c, "test-cp-to-err-dst-not-dir") defer os.RemoveAll(tmpDir) makeTestContentInDir(c, tmpDir) // Try with a file source. srcPath := cpPath(tmpDir, "dir1/file1-1") dstPath := containerCpPathTrailingSep(containerID, "file1") // The client should encounter an error trying to stat the destination // and then be unable to copy since the destination is asserted to be a // directory but does not exist. err := runDockerCp(c, srcPath, dstPath) c.Assert(err, checker.NotNil) c.Assert(isCpDirNotExist(err), checker.True, check.Commentf("expected DirNotExist error, but got %T: %s", err, err)) // Try with a directory source. srcPath = cpPath(tmpDir, "dir1") // The client should encounter an error trying to stat the destination and // then decide to extract to the parent directory instead with a rebased // name in the source archive, but this directory would overwrite the // existing file with the same name. err = runDockerCp(c, srcPath, dstPath) c.Assert(err, checker.NotNil) c.Assert(isCannotOverwriteNonDirWithDir(err), checker.True, check.Commentf("expected CannotOverwriteNonDirWithDir error, but got %T: %s", err, err)) } // Check that copying from a local path to a symlink in a container copies to // the symlink target and does not overwrite the container symlink itself. func (s *DockerSuite) TestCpToSymlinkDestination(c *check.C) { // stat /tmp/test-cp-to-symlink-destination-262430901/vol3 gets permission denied for the user testRequires(c, NotUserNamespace) testRequires(c, DaemonIsLinux) testRequires(c, SameHostDaemon) // Requires local volume mount bind. testVol := getTestDir(c, "test-cp-to-symlink-destination-") defer os.RemoveAll(testVol) makeTestContentInDir(c, testVol) containerID := makeTestContainer(c, testContainerOptions{ volumes: defaultVolumes(testVol), // Our bind mount is at /vol2 }) // First, copy a local file to a symlink to a file in the container. This // should overwrite the symlink target contents with the source contents. srcPath := cpPath(testVol, "file2") dstPath := containerCpPath(containerID, "/vol2/symlinkToFile1") c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, cpPath(testVol, "symlinkToFile1"), "file1"), checker.IsNil) // The file should have the contents of "file2" now. c.Assert(fileContentEquals(c, cpPath(testVol, "file1"), "file2\n"), checker.IsNil) // Next, copy a local file to a symlink to a directory in the container. // This should copy the file into the symlink target directory. dstPath = containerCpPath(containerID, "/vol2/symlinkToDir1") c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, cpPath(testVol, "symlinkToDir1"), "dir1"), checker.IsNil) // The file should have the contents of "file2" now. c.Assert(fileContentEquals(c, cpPath(testVol, "file2"), "file2\n"), checker.IsNil) // Next, copy a file to a symlink to a file that does not exist (a broken // symlink) in the container. This should create the target file with the // contents of the source file. dstPath = containerCpPath(containerID, "/vol2/brokenSymlinkToFileX") c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, cpPath(testVol, "brokenSymlinkToFileX"), "fileX"), checker.IsNil) // The file should have the contents of "file2" now. c.Assert(fileContentEquals(c, cpPath(testVol, "fileX"), "file2\n"), checker.IsNil) // Next, copy a local directory to a symlink to a directory in the // container. This should copy the directory into the symlink target // directory and not modify the symlink. srcPath = cpPath(testVol, "/dir2") dstPath = containerCpPath(containerID, "/vol2/symlinkToDir1") c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, cpPath(testVol, "symlinkToDir1"), "dir1"), checker.IsNil) // The directory should now contain a copy of "dir2". c.Assert(fileContentEquals(c, cpPath(testVol, "dir1/dir2/file2-1"), "file2-1\n"), checker.IsNil) // Next, copy a local directory to a symlink to a local directory that does // not exist (a broken symlink) in the container. This should create the // target as a directory with the contents of the source directory. It // should not modify the symlink. dstPath = containerCpPath(containerID, "/vol2/brokenSymlinkToDirX") c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, cpPath(testVol, "brokenSymlinkToDirX"), "dirX"), checker.IsNil) // The "dirX" directory should now be a copy of "dir2". c.Assert(fileContentEquals(c, cpPath(testVol, "dirX/file2-1"), "file2-1\n"), checker.IsNil) } // Possibilities are reduced to the remaining 10 cases: // // case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action // =================================================================================================== // A | no | - | no | - | no | create file // B | no | - | no | - | yes | error // C | no | - | yes | no | - | overwrite file // D | no | - | yes | yes | - | create file in dst dir // E | yes | no | no | - | - | create dir, copy contents // F | yes | no | yes | no | - | error // G | yes | no | yes | yes | - | copy dir and contents // H | yes | yes | no | - | - | create dir, copy contents // I | yes | yes | yes | no | - | error // J | yes | yes | yes | yes | - | copy dir contents // // A. SRC specifies a file and DST (no trailing path separator) doesn't // exist. This should create a file with the name DST and copy the // contents of the source file into it. func (s *DockerSuite) TestCpToCaseA(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{ workDir: "/root", command: makeCatFileCommand("itWorks.txt"), }) tmpDir := getTestDir(c, "test-cp-to-case-a") defer os.RemoveAll(tmpDir) makeTestContentInDir(c, tmpDir) srcPath := cpPath(tmpDir, "file1") dstPath := containerCpPath(containerID, "/root/itWorks.txt") c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) } // B. SRC specifies a file and DST (with trailing path separator) doesn't // exist. This should cause an error because the copy operation cannot // create a directory when copying a single file. func (s *DockerSuite) TestCpToCaseB(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{ command: makeCatFileCommand("testDir/file1"), }) tmpDir := getTestDir(c, "test-cp-to-case-b") defer os.RemoveAll(tmpDir) makeTestContentInDir(c, tmpDir) srcPath := cpPath(tmpDir, "file1") dstDir := containerCpPathTrailingSep(containerID, "testDir") err := runDockerCp(c, srcPath, dstDir) c.Assert(err, checker.NotNil) c.Assert(isCpDirNotExist(err), checker.True, check.Commentf("expected DirNotExists error, but got %T: %s", err, err)) } // C. SRC specifies a file and DST exists as a file. This should overwrite // the file at DST with the contents of the source file. func (s *DockerSuite) TestCpToCaseC(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{ addContent: true, workDir: "/root", command: makeCatFileCommand("file2"), }) tmpDir := getTestDir(c, "test-cp-to-case-c") defer os.RemoveAll(tmpDir) makeTestContentInDir(c, tmpDir) srcPath := cpPath(tmpDir, "file1") dstPath := containerCpPath(containerID, "/root/file2") // Ensure the container's file starts with the original content. c.Assert(containerStartOutputEquals(c, containerID, "file2\n"), checker.IsNil) c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) // Should now contain file1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) } // D. SRC specifies a file and DST exists as a directory. This should place // a copy of the source file inside it using the basename from SRC. Ensure // this works whether DST has a trailing path separator or not. func (s *DockerSuite) TestCpToCaseD(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{ addContent: true, command: makeCatFileCommand("/dir1/file1"), }) tmpDir := getTestDir(c, "test-cp-to-case-d") defer os.RemoveAll(tmpDir) makeTestContentInDir(c, tmpDir) srcPath := cpPath(tmpDir, "file1") dstDir := containerCpPath(containerID, "dir1") // Ensure that dstPath doesn't exist. c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) c.Assert(runDockerCp(c, srcPath, dstDir), checker.IsNil) // Should now contain file1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) // Now try again but using a trailing path separator for dstDir. // Make new destination container. containerID = makeTestContainer(c, testContainerOptions{ addContent: true, command: makeCatFileCommand("/dir1/file1"), }) dstDir = containerCpPathTrailingSep(containerID, "dir1") // Ensure that dstPath doesn't exist. c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) c.Assert(runDockerCp(c, srcPath, dstDir), checker.IsNil) // Should now contain file1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) } // E. SRC specifies a directory and DST does not exist. This should create a // directory at DST and copy the contents of the SRC directory into the DST // directory. Ensure this works whether DST has a trailing path separator or // not. func (s *DockerSuite) TestCpToCaseE(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{ command: makeCatFileCommand("/testDir/file1-1"), }) tmpDir := getTestDir(c, "test-cp-to-case-e") defer os.RemoveAll(tmpDir) makeTestContentInDir(c, tmpDir) srcDir := cpPath(tmpDir, "dir1") dstDir := containerCpPath(containerID, "testDir") c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) // Should now contain file1-1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) // Now try again but using a trailing path separator for dstDir. // Make new destination container. containerID = makeTestContainer(c, testContainerOptions{ command: makeCatFileCommand("/testDir/file1-1"), }) dstDir = containerCpPathTrailingSep(containerID, "testDir") c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) // Should now contain file1-1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) } // F. SRC specifies a directory and DST exists as a file. This should cause an // error as it is not possible to overwrite a file with a directory. func (s *DockerSuite) TestCpToCaseF(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{ addContent: true, workDir: "/root", }) tmpDir := getTestDir(c, "test-cp-to-case-f") defer os.RemoveAll(tmpDir) makeTestContentInDir(c, tmpDir) srcDir := cpPath(tmpDir, "dir1") dstFile := containerCpPath(containerID, "/root/file1") err := runDockerCp(c, srcDir, dstFile) c.Assert(err, checker.NotNil) c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) } // G. SRC specifies a directory and DST exists as a directory. This should copy // the SRC directory and all its contents to the DST directory. Ensure this // works whether DST has a trailing path separator or not. func (s *DockerSuite) TestCpToCaseG(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{ addContent: true, workDir: "/root", command: makeCatFileCommand("dir2/dir1/file1-1"), }) tmpDir := getTestDir(c, "test-cp-to-case-g") defer os.RemoveAll(tmpDir) makeTestContentInDir(c, tmpDir) srcDir := cpPath(tmpDir, "dir1") dstDir := containerCpPath(containerID, "/root/dir2") // Ensure that dstPath doesn't exist. c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) // Should now contain file1-1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) // Now try again but using a trailing path separator for dstDir. // Make new destination container. containerID = makeTestContainer(c, testContainerOptions{ addContent: true, command: makeCatFileCommand("/dir2/dir1/file1-1"), }) dstDir = containerCpPathTrailingSep(containerID, "/dir2") // Ensure that dstPath doesn't exist. c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) // Should now contain file1-1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) } // H. SRC specifies a directory's contents only and DST does not exist. This // should create a directory at DST and copy the contents of the SRC // directory (but not the directory itself) into the DST directory. Ensure // this works whether DST has a trailing path separator or not. func (s *DockerSuite) TestCpToCaseH(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{ command: makeCatFileCommand("/testDir/file1-1"), }) tmpDir := getTestDir(c, "test-cp-to-case-h") defer os.RemoveAll(tmpDir) makeTestContentInDir(c, tmpDir) srcDir := cpPathTrailingSep(tmpDir, "dir1") + "." dstDir := containerCpPath(containerID, "testDir") c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) // Should now contain file1-1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) // Now try again but using a trailing path separator for dstDir. // Make new destination container. containerID = makeTestContainer(c, testContainerOptions{ command: makeCatFileCommand("/testDir/file1-1"), }) dstDir = containerCpPathTrailingSep(containerID, "testDir") c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) // Should now contain file1-1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) } // I. SRC specifies a directory's contents only and DST exists as a file. This // should cause an error as it is not possible to overwrite a file with a // directory. func (s *DockerSuite) TestCpToCaseI(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{ addContent: true, workDir: "/root", }) tmpDir := getTestDir(c, "test-cp-to-case-i") defer os.RemoveAll(tmpDir) makeTestContentInDir(c, tmpDir) srcDir := cpPathTrailingSep(tmpDir, "dir1") + "." dstFile := containerCpPath(containerID, "/root/file1") err := runDockerCp(c, srcDir, dstFile) c.Assert(err, checker.NotNil) c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) } // J. SRC specifies a directory's contents only and DST exists as a directory. // This should copy the contents of the SRC directory (but not the directory // itself) into the DST directory. Ensure this works whether DST has a // trailing path separator or not. func (s *DockerSuite) TestCpToCaseJ(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{ addContent: true, workDir: "/root", command: makeCatFileCommand("/dir2/file1-1"), }) tmpDir := getTestDir(c, "test-cp-to-case-j") defer os.RemoveAll(tmpDir) makeTestContentInDir(c, tmpDir) srcDir := cpPathTrailingSep(tmpDir, "dir1") + "." dstDir := containerCpPath(containerID, "/dir2") // Ensure that dstPath doesn't exist. c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) // Should now contain file1-1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) // Now try again but using a trailing path separator for dstDir. // Make new destination container. containerID = makeTestContainer(c, testContainerOptions{ command: makeCatFileCommand("/dir2/file1-1"), }) dstDir = containerCpPathTrailingSep(containerID, "/dir2") // Ensure that dstPath doesn't exist. c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) // Should now contain file1-1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) } // The `docker cp` command should also ensure that you cannot // write to a container rootfs that is marked as read-only. func (s *DockerSuite) TestCpToErrReadOnlyRootfs(c *check.C) { // --read-only + userns has remount issues testRequires(c, DaemonIsLinux, NotUserNamespace) tmpDir := getTestDir(c, "test-cp-to-err-read-only-rootfs") defer os.RemoveAll(tmpDir) makeTestContentInDir(c, tmpDir) containerID := makeTestContainer(c, testContainerOptions{ readOnly: true, workDir: "/root", command: makeCatFileCommand("shouldNotExist"), }) srcPath := cpPath(tmpDir, "file1") dstPath := containerCpPath(containerID, "/root/shouldNotExist") err := runDockerCp(c, srcPath, dstPath) c.Assert(err, checker.NotNil) c.Assert(isCpCannotCopyReadOnly(err), checker.True, check.Commentf("expected ErrContainerRootfsReadonly error, but got %T: %s", err, err)) // Ensure that dstPath doesn't exist. c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) } // The `docker cp` command should also ensure that you // cannot write to a volume that is mounted as read-only. func (s *DockerSuite) TestCpToErrReadOnlyVolume(c *check.C) { // --read-only + userns has remount issues testRequires(c, DaemonIsLinux, NotUserNamespace) tmpDir := getTestDir(c, "test-cp-to-err-read-only-volume") defer os.RemoveAll(tmpDir) makeTestContentInDir(c, tmpDir) containerID := makeTestContainer(c, testContainerOptions{ volumes: defaultVolumes(tmpDir), workDir: "/root", command: makeCatFileCommand("/vol_ro/shouldNotExist"), }) srcPath := cpPath(tmpDir, "file1") dstPath := containerCpPath(containerID, "/vol_ro/shouldNotExist") err := runDockerCp(c, srcPath, dstPath) c.Assert(err, checker.NotNil) c.Assert(isCpCannotCopyReadOnly(err), checker.True, check.Commentf("expected ErrVolumeReadonly error, but got %T: %s", err, err)) // Ensure that dstPath doesn't exist. c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) } docker-1.10.3/integration-cli/docker_cli_cp_to_container_unix_test.go000066400000000000000000000023001267010174400261050ustar00rootroot00000000000000// +build !windows package main import ( "fmt" "os" "path/filepath" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/pkg/system" "github.com/go-check/check" ) // Check ownership is root, both in non-userns and userns enabled modes func (s *DockerSuite) TestCpCheckDestOwnership(c *check.C) { testRequires(c, DaemonIsLinux, SameHostDaemon) tmpVolDir := getTestDir(c, "test-cp-tmpvol") containerID := makeTestContainer(c, testContainerOptions{volumes: []string{fmt.Sprintf("%s:/tmpvol", tmpVolDir)}}) tmpDir := getTestDir(c, "test-cp-to-check-ownership") defer os.RemoveAll(tmpDir) makeTestContentInDir(c, tmpDir) srcPath := cpPath(tmpDir, "file1") dstPath := containerCpPath(containerID, "/tmpvol", "file1") err := runDockerCp(c, srcPath, dstPath) c.Assert(err, checker.IsNil) stat, err := system.Stat(filepath.Join(tmpVolDir, "file1")) c.Assert(err, checker.IsNil) uid, gid, err := getRootUIDGID() c.Assert(err, checker.IsNil) c.Assert(stat.UID(), checker.Equals, uint32(uid), check.Commentf("Copied file not owned by container root UID")) c.Assert(stat.GID(), checker.Equals, uint32(gid), check.Commentf("Copied file not owned by container root GID")) } docker-1.10.3/integration-cli/docker_cli_cp_utils.go000066400000000000000000000172461267010174400224760ustar00rootroot00000000000000package main import ( "bytes" "fmt" "io/ioutil" "os" "os/exec" "path/filepath" "strings" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) type fileType uint32 const ( ftRegular fileType = iota ftDir ftSymlink ) type fileData struct { filetype fileType path string contents string } func (fd fileData) creationCommand() string { var command string switch fd.filetype { case ftRegular: // Don't overwrite the file if it already exists! command = fmt.Sprintf("if [ ! -f %s ]; then echo %q > %s; fi", fd.path, fd.contents, fd.path) case ftDir: command = fmt.Sprintf("mkdir -p %s", fd.path) case ftSymlink: command = fmt.Sprintf("ln -fs %s %s", fd.contents, fd.path) } return command } func mkFilesCommand(fds []fileData) string { commands := make([]string, len(fds)) for i, fd := range fds { commands[i] = fd.creationCommand() } return strings.Join(commands, " && ") } var defaultFileData = []fileData{ {ftRegular, "file1", "file1"}, {ftRegular, "file2", "file2"}, {ftRegular, "file3", "file3"}, {ftRegular, "file4", "file4"}, {ftRegular, "file5", "file5"}, {ftRegular, "file6", "file6"}, {ftRegular, "file7", "file7"}, {ftDir, "dir1", ""}, {ftRegular, "dir1/file1-1", "file1-1"}, {ftRegular, "dir1/file1-2", "file1-2"}, {ftDir, "dir2", ""}, {ftRegular, "dir2/file2-1", "file2-1"}, {ftRegular, "dir2/file2-2", "file2-2"}, {ftDir, "dir3", ""}, {ftRegular, "dir3/file3-1", "file3-1"}, {ftRegular, "dir3/file3-2", "file3-2"}, {ftDir, "dir4", ""}, {ftRegular, "dir4/file3-1", "file4-1"}, {ftRegular, "dir4/file3-2", "file4-2"}, {ftDir, "dir5", ""}, {ftSymlink, "symlinkToFile1", "file1"}, {ftSymlink, "symlinkToDir1", "dir1"}, {ftSymlink, "brokenSymlinkToFileX", "fileX"}, {ftSymlink, "brokenSymlinkToDirX", "dirX"}, {ftSymlink, "symlinkToAbsDir", "/root"}, } func defaultMkContentCommand() string { return mkFilesCommand(defaultFileData) } func makeTestContentInDir(c *check.C, dir string) { for _, fd := range defaultFileData { path := filepath.Join(dir, filepath.FromSlash(fd.path)) switch fd.filetype { case ftRegular: c.Assert(ioutil.WriteFile(path, []byte(fd.contents+"\n"), os.FileMode(0666)), checker.IsNil) case ftDir: c.Assert(os.Mkdir(path, os.FileMode(0777)), checker.IsNil) case ftSymlink: c.Assert(os.Symlink(fd.contents, path), checker.IsNil) } } } type testContainerOptions struct { addContent bool readOnly bool volumes []string workDir string command string } func makeTestContainer(c *check.C, options testContainerOptions) (containerID string) { if options.addContent { mkContentCmd := defaultMkContentCommand() if options.command == "" { options.command = mkContentCmd } else { options.command = fmt.Sprintf("%s && %s", defaultMkContentCommand(), options.command) } } if options.command == "" { options.command = "#(nop)" } args := []string{"run", "-d"} for _, volume := range options.volumes { args = append(args, "-v", volume) } if options.workDir != "" { args = append(args, "-w", options.workDir) } if options.readOnly { args = append(args, "--read-only") } args = append(args, "busybox", "/bin/sh", "-c", options.command) out, _ := dockerCmd(c, args...) containerID = strings.TrimSpace(out) out, _ = dockerCmd(c, "wait", containerID) exitCode := strings.TrimSpace(out) if exitCode != "0" { out, _ = dockerCmd(c, "logs", containerID) } c.Assert(exitCode, checker.Equals, "0", check.Commentf("failed to make test container: %s", out)) return } func makeCatFileCommand(path string) string { return fmt.Sprintf("if [ -f %s ]; then cat %s; fi", path, path) } func cpPath(pathElements ...string) string { localizedPathElements := make([]string, len(pathElements)) for i, path := range pathElements { localizedPathElements[i] = filepath.FromSlash(path) } return strings.Join(localizedPathElements, string(filepath.Separator)) } func cpPathTrailingSep(pathElements ...string) string { return fmt.Sprintf("%s%c", cpPath(pathElements...), filepath.Separator) } func containerCpPath(containerID string, pathElements ...string) string { joined := strings.Join(pathElements, "/") return fmt.Sprintf("%s:%s", containerID, joined) } func containerCpPathTrailingSep(containerID string, pathElements ...string) string { return fmt.Sprintf("%s/", containerCpPath(containerID, pathElements...)) } func runDockerCp(c *check.C, src, dst string) (err error) { c.Logf("running `docker cp %s %s`", src, dst) args := []string{"cp", src, dst} out, _, err := runCommandWithOutput(exec.Command(dockerBinary, args...)) if err != nil { err = fmt.Errorf("error executing `docker cp` command: %s: %s", err, out) } return } func startContainerGetOutput(c *check.C, containerID string) (out string, err error) { c.Logf("running `docker start -a %s`", containerID) args := []string{"start", "-a", containerID} out, _, err = runCommandWithOutput(exec.Command(dockerBinary, args...)) if err != nil { err = fmt.Errorf("error executing `docker start` command: %s: %s", err, out) } return } func getTestDir(c *check.C, label string) (tmpDir string) { var err error tmpDir, err = ioutil.TempDir("", label) // unable to make temporary directory c.Assert(err, checker.IsNil) return } func isCpNotExist(err error) bool { return strings.Contains(err.Error(), "no such file or directory") || strings.Contains(err.Error(), "cannot find the file specified") } func isCpDirNotExist(err error) bool { return strings.Contains(err.Error(), archive.ErrDirNotExists.Error()) } func isCpNotDir(err error) bool { return strings.Contains(err.Error(), archive.ErrNotDirectory.Error()) || strings.Contains(err.Error(), "filename, directory name, or volume label syntax is incorrect") } func isCpCannotCopyDir(err error) bool { return strings.Contains(err.Error(), archive.ErrCannotCopyDir.Error()) } func isCpCannotCopyReadOnly(err error) bool { return strings.Contains(err.Error(), "marked read-only") } func isCannotOverwriteNonDirWithDir(err error) bool { return strings.Contains(err.Error(), "cannot overwrite non-directory") } func fileContentEquals(c *check.C, filename, contents string) (err error) { c.Logf("checking that file %q contains %q\n", filename, contents) fileBytes, err := ioutil.ReadFile(filename) if err != nil { return } expectedBytes, err := ioutil.ReadAll(strings.NewReader(contents)) if err != nil { return } if !bytes.Equal(fileBytes, expectedBytes) { err = fmt.Errorf("file content not equal - expected %q, got %q", string(expectedBytes), string(fileBytes)) } return } func symlinkTargetEquals(c *check.C, symlink, expectedTarget string) (err error) { c.Logf("checking that the symlink %q points to %q\n", symlink, expectedTarget) actualTarget, err := os.Readlink(symlink) if err != nil { return } if actualTarget != expectedTarget { err = fmt.Errorf("symlink target points to %q not %q", actualTarget, expectedTarget) } return } func containerStartOutputEquals(c *check.C, containerID, contents string) (err error) { c.Logf("checking that container %q start output contains %q\n", containerID, contents) out, err := startContainerGetOutput(c, containerID) if err != nil { return } if out != contents { err = fmt.Errorf("output contents not equal - expected %q, got %q", contents, out) } return } func defaultVolumes(tmpDir string) []string { if SameHostDaemon.Condition() { return []string{ "/vol1", fmt.Sprintf("%s:/vol2", tmpDir), fmt.Sprintf("%s:/vol3", filepath.Join(tmpDir, "vol3")), fmt.Sprintf("%s:/vol_ro:ro", filepath.Join(tmpDir, "vol_ro")), } } // Can't bind-mount volumes with separate host daemon. return []string{"/vol1", "/vol2", "/vol3", "/vol_ro:/vol_ro:ro"} } docker-1.10.3/integration-cli/docker_cli_create_test.go000066400000000000000000000343571267010174400231600ustar00rootroot00000000000000package main import ( "encoding/json" "fmt" "os" "reflect" "strings" "time" "os/exec" "io/ioutil" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/pkg/stringid" "github.com/docker/go-connections/nat" "github.com/go-check/check" ) // Make sure we can create a simple container with some args func (s *DockerSuite) TestCreateArgs(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "create", "busybox", "command", "arg1", "arg2", "arg with space") cleanedContainerID := strings.TrimSpace(out) out, _ = dockerCmd(c, "inspect", cleanedContainerID) containers := []struct { ID string Created time.Time Path string Args []string Image string }{} err := json.Unmarshal([]byte(out), &containers) c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) c.Assert(containers, checker.HasLen, 1) cont := containers[0] c.Assert(string(cont.Path), checker.Equals, "command", check.Commentf("Unexpected container path. Expected command, received: %s", cont.Path)) b := false expected := []string{"arg1", "arg2", "arg with space"} for i, arg := range expected { if arg != cont.Args[i] { b = true break } } if len(cont.Args) != len(expected) || b { c.Fatalf("Unexpected args. Expected %v, received: %v", expected, cont.Args) } } // Make sure we can set hostconfig options too func (s *DockerSuite) TestCreateHostConfig(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "create", "-P", "busybox", "echo") cleanedContainerID := strings.TrimSpace(out) out, _ = dockerCmd(c, "inspect", cleanedContainerID) containers := []struct { HostConfig *struct { PublishAllPorts bool } }{} err := json.Unmarshal([]byte(out), &containers) c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) c.Assert(containers, checker.HasLen, 1) cont := containers[0] c.Assert(cont.HostConfig, check.NotNil, check.Commentf("Expected HostConfig, got none")) c.Assert(cont.HostConfig.PublishAllPorts, check.NotNil, check.Commentf("Expected PublishAllPorts, got false")) } func (s *DockerSuite) TestCreateWithPortRange(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "create", "-p", "3300-3303:3300-3303/tcp", "busybox", "echo") cleanedContainerID := strings.TrimSpace(out) out, _ = dockerCmd(c, "inspect", cleanedContainerID) containers := []struct { HostConfig *struct { PortBindings map[nat.Port][]nat.PortBinding } }{} err := json.Unmarshal([]byte(out), &containers) c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) c.Assert(containers, checker.HasLen, 1) cont := containers[0] c.Assert(cont.HostConfig, check.NotNil, check.Commentf("Expected HostConfig, got none")) c.Assert(cont.HostConfig.PortBindings, checker.HasLen, 4, check.Commentf("Expected 4 ports bindings, got %d", len(cont.HostConfig.PortBindings))) for k, v := range cont.HostConfig.PortBindings { c.Assert(v, checker.HasLen, 1, check.Commentf("Expected 1 ports binding, for the port %s but found %s", k, v)) c.Assert(k.Port(), checker.Equals, v[0].HostPort, check.Commentf("Expected host port %s to match published port %s", k.Port(), v[0].HostPort)) } } func (s *DockerSuite) TestCreateWithiLargePortRange(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "create", "-p", "1-65535:1-65535/tcp", "busybox", "echo") cleanedContainerID := strings.TrimSpace(out) out, _ = dockerCmd(c, "inspect", cleanedContainerID) containers := []struct { HostConfig *struct { PortBindings map[nat.Port][]nat.PortBinding } }{} err := json.Unmarshal([]byte(out), &containers) c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) c.Assert(containers, checker.HasLen, 1) cont := containers[0] c.Assert(cont.HostConfig, check.NotNil, check.Commentf("Expected HostConfig, got none")) c.Assert(cont.HostConfig.PortBindings, checker.HasLen, 65535) for k, v := range cont.HostConfig.PortBindings { c.Assert(v, checker.HasLen, 1) c.Assert(k.Port(), checker.Equals, v[0].HostPort, check.Commentf("Expected host port %s to match published port %s", k.Port(), v[0].HostPort)) } } // "test123" should be printed by docker create + start func (s *DockerSuite) TestCreateEchoStdout(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "create", "busybox", "echo", "test123") cleanedContainerID := strings.TrimSpace(out) out, _ = dockerCmd(c, "start", "-ai", cleanedContainerID) c.Assert(out, checker.Equals, "test123\n", check.Commentf("container should've printed 'test123', got %q", out)) } func (s *DockerSuite) TestCreateVolumesCreated(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, SameHostDaemon) name := "test_create_volume" dockerCmd(c, "create", "--name", name, "-v", "/foo", "busybox") dir, err := inspectMountSourceField(name, "/foo") c.Assert(err, check.IsNil, check.Commentf("Error getting volume host path: %q", err)) if _, err := os.Stat(dir); err != nil && os.IsNotExist(err) { c.Fatalf("Volume was not created") } if err != nil { c.Fatalf("Error statting volume host path: %q", err) } } func (s *DockerSuite) TestCreateLabels(c *check.C) { testRequires(c, DaemonIsLinux) name := "test_create_labels" expected := map[string]string{"k1": "v1", "k2": "v2"} dockerCmd(c, "create", "--name", name, "-l", "k1=v1", "--label", "k2=v2", "busybox") actual := make(map[string]string) err := inspectFieldAndMarshall(name, "Config.Labels", &actual) c.Assert(err, check.IsNil) if !reflect.DeepEqual(expected, actual) { c.Fatalf("Expected %s got %s", expected, actual) } } func (s *DockerSuite) TestCreateLabelFromImage(c *check.C) { testRequires(c, DaemonIsLinux) imageName := "testcreatebuildlabel" _, err := buildImage(imageName, `FROM busybox LABEL k1=v1 k2=v2`, true) c.Assert(err, check.IsNil) name := "test_create_labels_from_image" expected := map[string]string{"k2": "x", "k3": "v3", "k1": "v1"} dockerCmd(c, "create", "--name", name, "-l", "k2=x", "--label", "k3=v3", imageName) actual := make(map[string]string) err = inspectFieldAndMarshall(name, "Config.Labels", &actual) c.Assert(err, check.IsNil) if !reflect.DeepEqual(expected, actual) { c.Fatalf("Expected %s got %s", expected, actual) } } func (s *DockerSuite) TestCreateHostnameWithNumber(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-h", "web.0", "busybox", "hostname") c.Assert(strings.TrimSpace(out), checker.Equals, "web.0", check.Commentf("hostname not set, expected `web.0`, got: %s", out)) } func (s *DockerSuite) TestCreateRM(c *check.C) { testRequires(c, DaemonIsLinux) // Test to make sure we can 'rm' a new container that is in // "Created" state, and has ever been run. Test "rm -f" too. // create a container out, _ := dockerCmd(c, "create", "busybox") cID := strings.TrimSpace(out) dockerCmd(c, "rm", cID) // Now do it again so we can "rm -f" this time out, _ = dockerCmd(c, "create", "busybox") cID = strings.TrimSpace(out) dockerCmd(c, "rm", "-f", cID) } func (s *DockerSuite) TestCreateModeIpcContainer(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, SameHostDaemon, NotUserNamespace) out, _ := dockerCmd(c, "create", "busybox") id := strings.TrimSpace(out) dockerCmd(c, "create", fmt.Sprintf("--ipc=container:%s", id), "busybox") } func (s *DockerSuite) TestCreateByImageID(c *check.C) { imageName := "testcreatebyimageid" imageID, err := buildImage(imageName, `FROM busybox MAINTAINER dockerio`, true) if err != nil { c.Fatal(err) } truncatedImageID := stringid.TruncateID(imageID) dockerCmd(c, "create", imageID) dockerCmd(c, "create", truncatedImageID) dockerCmd(c, "create", fmt.Sprintf("%s:%s", imageName, truncatedImageID)) // Ensure this fails out, exit, _ := dockerCmdWithError("create", fmt.Sprintf("%s:%s", imageName, imageID)) if exit == 0 { c.Fatalf("expected non-zero exit code; received %d", exit) } if expected := "Error parsing reference"; !strings.Contains(out, expected) { c.Fatalf(`Expected %q in output; got: %s`, expected, out) } out, exit, _ = dockerCmdWithError("create", fmt.Sprintf("%s:%s", "wrongimage", truncatedImageID)) if exit == 0 { c.Fatalf("expected non-zero exit code; received %d", exit) } if expected := "Unable to find image"; !strings.Contains(out, expected) { c.Fatalf(`Expected %q in output; got: %s`, expected, out) } } func (s *DockerTrustSuite) TestTrustedCreate(c *check.C) { repoName := s.setupTrustedImage(c, "trusted-create") // Try create createCmd := exec.Command(dockerBinary, "create", repoName) s.trustedCmd(createCmd) out, _, err := runCommandWithOutput(createCmd) c.Assert(err, check.IsNil) c.Assert(string(out), checker.Contains, "Tagging", check.Commentf("Missing expected output on trusted push:\n%s", out)) dockerCmd(c, "rmi", repoName) // Try untrusted create to ensure we pushed the tag to the registry createCmd = exec.Command(dockerBinary, "create", "--disable-content-trust=true", repoName) s.trustedCmd(createCmd) out, _, err = runCommandWithOutput(createCmd) c.Assert(err, check.IsNil) c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf("Missing expected output on trusted create with --disable-content-trust:\n%s", out)) } func (s *DockerTrustSuite) TestUntrustedCreate(c *check.C) { repoName := fmt.Sprintf("%v/dockercliuntrusted/createtest", privateRegistryURL) withTagName := fmt.Sprintf("%s:latest", repoName) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", withTagName) dockerCmd(c, "push", withTagName) dockerCmd(c, "rmi", withTagName) // Try trusted create on untrusted tag createCmd := exec.Command(dockerBinary, "create", withTagName) s.trustedCmd(createCmd) out, _, err := runCommandWithOutput(createCmd) c.Assert(err, check.Not(check.IsNil)) c.Assert(string(out), checker.Contains, fmt.Sprintf("does not have trust data for %s", repoName), check.Commentf("Missing expected output on trusted create:\n%s", out)) } func (s *DockerTrustSuite) TestTrustedIsolatedCreate(c *check.C) { repoName := s.setupTrustedImage(c, "trusted-isolated-create") // Try create createCmd := exec.Command(dockerBinary, "--config", "/tmp/docker-isolated-create", "create", repoName) s.trustedCmd(createCmd) out, _, err := runCommandWithOutput(createCmd) c.Assert(err, check.IsNil) c.Assert(string(out), checker.Contains, "Tagging", check.Commentf("Missing expected output on trusted push:\n%s", out)) dockerCmd(c, "rmi", repoName) } func (s *DockerTrustSuite) TestCreateWhenCertExpired(c *check.C) { c.Skip("Currently changes system time, causing instability") repoName := s.setupTrustedImage(c, "trusted-create-expired") // Certificates have 10 years of expiration elevenYearsFromNow := time.Now().Add(time.Hour * 24 * 365 * 11) runAtDifferentDate(elevenYearsFromNow, func() { // Try create createCmd := exec.Command(dockerBinary, "create", repoName) s.trustedCmd(createCmd) out, _, err := runCommandWithOutput(createCmd) c.Assert(err, check.Not(check.IsNil)) c.Assert(string(out), checker.Contains, "could not validate the path to a trusted root", check.Commentf("Missing expected output on trusted create in the distant future:\n%s", out)) }) runAtDifferentDate(elevenYearsFromNow, func() { // Try create createCmd := exec.Command(dockerBinary, "create", "--disable-content-trust", repoName) s.trustedCmd(createCmd) out, _, err := runCommandWithOutput(createCmd) c.Assert(err, check.Not(check.IsNil)) c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf("Missing expected output on trusted create in the distant future:\n%s", out)) }) } func (s *DockerTrustSuite) TestTrustedCreateFromBadTrustServer(c *check.C) { repoName := fmt.Sprintf("%v/dockerclievilcreate/trusted:latest", privateRegistryURL) evilLocalConfigDir, err := ioutil.TempDir("", "evil-local-config-dir") c.Assert(err, check.IsNil) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) pushCmd := exec.Command(dockerBinary, "push", repoName) s.trustedCmd(pushCmd) out, _, err := runCommandWithOutput(pushCmd) c.Assert(err, check.IsNil) c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push:\n%s", out)) dockerCmd(c, "rmi", repoName) // Try create createCmd := exec.Command(dockerBinary, "create", repoName) s.trustedCmd(createCmd) out, _, err = runCommandWithOutput(createCmd) c.Assert(err, check.IsNil) c.Assert(string(out), checker.Contains, "Tagging", check.Commentf("Missing expected output on trusted push:\n%s", out)) dockerCmd(c, "rmi", repoName) // Kill the notary server, start a new "evil" one. s.not.Close() s.not, err = newTestNotary(c) c.Assert(err, check.IsNil) // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. // tag an image and upload it to the private registry dockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) // Push up to the new server pushCmd = exec.Command(dockerBinary, "--config", evilLocalConfigDir, "push", repoName) s.trustedCmd(pushCmd) out, _, err = runCommandWithOutput(pushCmd) c.Assert(err, check.IsNil) c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push:\n%s", out)) // Now, try creating with the original client from this new trust server. This should fail. createCmd = exec.Command(dockerBinary, "create", repoName) s.trustedCmd(createCmd) out, _, err = runCommandWithOutput(createCmd) c.Assert(err, check.Not(check.IsNil)) c.Assert(string(out), checker.Contains, "valid signatures did not meet threshold", check.Commentf("Missing expected output on trusted push:\n%s", out)) } func (s *DockerSuite) TestCreateStopSignal(c *check.C) { name := "test_create_stop_signal" dockerCmd(c, "create", "--name", name, "--stop-signal", "9", "busybox") res, err := inspectFieldJSON(name, "Config.StopSignal") c.Assert(err, check.IsNil) c.Assert(res, checker.Contains, "9") } func (s *DockerSuite) TestCreateWithWorkdir(c *check.C) { testRequires(c, DaemonIsLinux) name := "foo" dir := "/home/foo/bar" dockerCmd(c, "create", "--name", name, "-w", dir, "busybox") dockerCmd(c, "cp", fmt.Sprintf("%s:%s", name, dir), "/tmp") } docker-1.10.3/integration-cli/docker_cli_daemon_test.go000066400000000000000000002064411267010174400231530ustar00rootroot00000000000000// +build daemon,!windows package main import ( "bytes" "encoding/json" "fmt" "io/ioutil" "net" "os" "os/exec" "path" "path/filepath" "regexp" "strconv" "strings" "sync" "time" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/libnetwork/iptables" "github.com/docker/libtrust" "github.com/go-check/check" ) func (s *DockerDaemonSuite) TestDaemonRestartWithRunningContainersPorts(c *check.C) { if err := s.d.StartWithBusybox(); err != nil { c.Fatalf("Could not start daemon with busybox: %v", err) } if out, err := s.d.Cmd("run", "-d", "--name", "top1", "-p", "1234:80", "--restart", "always", "busybox:latest", "top"); err != nil { c.Fatalf("Could not run top1: err=%v\n%s", err, out) } // --restart=no by default if out, err := s.d.Cmd("run", "-d", "--name", "top2", "-p", "80", "busybox:latest", "top"); err != nil { c.Fatalf("Could not run top2: err=%v\n%s", err, out) } testRun := func(m map[string]bool, prefix string) { var format string for cont, shouldRun := range m { out, err := s.d.Cmd("ps") if err != nil { c.Fatalf("Could not run ps: err=%v\n%q", err, out) } if shouldRun { format = "%scontainer %q is not running" } else { format = "%scontainer %q is running" } if shouldRun != strings.Contains(out, cont) { c.Fatalf(format, prefix, cont) } } } testRun(map[string]bool{"top1": true, "top2": true}, "") if err := s.d.Restart(); err != nil { c.Fatalf("Could not restart daemon: %v", err) } testRun(map[string]bool{"top1": true, "top2": false}, "After daemon restart: ") } func (s *DockerDaemonSuite) TestDaemonRestartWithVolumesRefs(c *check.C) { if err := s.d.StartWithBusybox(); err != nil { c.Fatal(err) } if out, err := s.d.Cmd("run", "-d", "--name", "volrestarttest1", "-v", "/foo", "busybox"); err != nil { c.Fatal(err, out) } if err := s.d.Restart(); err != nil { c.Fatal(err) } if _, err := s.d.Cmd("run", "-d", "--volumes-from", "volrestarttest1", "--name", "volrestarttest2", "busybox", "top"); err != nil { c.Fatal(err) } if out, err := s.d.Cmd("rm", "-fv", "volrestarttest2"); err != nil { c.Fatal(err, out) } out, err := s.d.Cmd("inspect", "-f", "{{json .Mounts}}", "volrestarttest1") c.Assert(err, check.IsNil) if _, err := inspectMountPointJSON(out, "/foo"); err != nil { c.Fatalf("Expected volume to exist: /foo, error: %v\n", err) } } // #11008 func (s *DockerDaemonSuite) TestDaemonRestartUnlessStopped(c *check.C) { err := s.d.StartWithBusybox() c.Assert(err, check.IsNil) out, err := s.d.Cmd("run", "-d", "--name", "top1", "--restart", "always", "busybox:latest", "top") c.Assert(err, check.IsNil, check.Commentf("run top1: %v", out)) out, err = s.d.Cmd("run", "-d", "--name", "top2", "--restart", "unless-stopped", "busybox:latest", "top") c.Assert(err, check.IsNil, check.Commentf("run top2: %v", out)) testRun := func(m map[string]bool, prefix string) { var format string for name, shouldRun := range m { out, err := s.d.Cmd("ps") c.Assert(err, check.IsNil, check.Commentf("run ps: %v", out)) if shouldRun { format = "%scontainer %q is not running" } else { format = "%scontainer %q is running" } c.Assert(strings.Contains(out, name), check.Equals, shouldRun, check.Commentf(format, prefix, name)) } } // both running testRun(map[string]bool{"top1": true, "top2": true}, "") out, err = s.d.Cmd("stop", "top1") c.Assert(err, check.IsNil, check.Commentf(out)) out, err = s.d.Cmd("stop", "top2") c.Assert(err, check.IsNil, check.Commentf(out)) // both stopped testRun(map[string]bool{"top1": false, "top2": false}, "") err = s.d.Restart() c.Assert(err, check.IsNil) // restart=always running testRun(map[string]bool{"top1": true, "top2": false}, "After daemon restart: ") out, err = s.d.Cmd("start", "top2") c.Assert(err, check.IsNil, check.Commentf("start top2: %v", out)) err = s.d.Restart() c.Assert(err, check.IsNil) // both running testRun(map[string]bool{"top1": true, "top2": true}, "After second daemon restart: ") } func (s *DockerDaemonSuite) TestDaemonStartIptablesFalse(c *check.C) { if err := s.d.Start("--iptables=false"); err != nil { c.Fatalf("we should have been able to start the daemon with passing iptables=false: %v", err) } } // Issue #8444: If docker0 bridge is modified (intentionally or unintentionally) and // no longer has an IP associated, we should gracefully handle that case and associate // an IP with it rather than fail daemon start func (s *DockerDaemonSuite) TestDaemonStartBridgeWithoutIPAssociation(c *check.C) { // rather than depending on brctl commands to verify docker0 is created and up // let's start the daemon and stop it, and then make a modification to run the // actual test if err := s.d.Start(); err != nil { c.Fatalf("Could not start daemon: %v", err) } if err := s.d.Stop(); err != nil { c.Fatalf("Could not stop daemon: %v", err) } // now we will remove the ip from docker0 and then try starting the daemon ipCmd := exec.Command("ip", "addr", "flush", "dev", "docker0") stdout, stderr, _, err := runCommandWithStdoutStderr(ipCmd) if err != nil { c.Fatalf("failed to remove docker0 IP association: %v, stdout: %q, stderr: %q", err, stdout, stderr) } if err := s.d.Start(); err != nil { warning := "**WARNING: Docker bridge network in bad state--delete docker0 bridge interface to fix" c.Fatalf("Could not start daemon when docker0 has no IP address: %v\n%s", err, warning) } } func (s *DockerDaemonSuite) TestDaemonIptablesClean(c *check.C) { if err := s.d.StartWithBusybox(); err != nil { c.Fatalf("Could not start daemon with busybox: %v", err) } if out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil { c.Fatalf("Could not run top: %s, %v", out, err) } // get output from iptables with container running ipTablesSearchString := "tcp dpt:80" ipTablesCmd := exec.Command("iptables", "-nvL") out, _, err := runCommandWithOutput(ipTablesCmd) if err != nil { c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) } if !strings.Contains(out, ipTablesSearchString) { c.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out) } if err := s.d.Stop(); err != nil { c.Fatalf("Could not stop daemon: %v", err) } // get output from iptables after restart ipTablesCmd = exec.Command("iptables", "-nvL") out, _, err = runCommandWithOutput(ipTablesCmd) if err != nil { c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) } if strings.Contains(out, ipTablesSearchString) { c.Fatalf("iptables output should not have contained %q, but was %q", ipTablesSearchString, out) } } func (s *DockerDaemonSuite) TestDaemonIptablesCreate(c *check.C) { if err := s.d.StartWithBusybox(); err != nil { c.Fatalf("Could not start daemon with busybox: %v", err) } if out, err := s.d.Cmd("run", "-d", "--name", "top", "--restart=always", "-p", "80", "busybox:latest", "top"); err != nil { c.Fatalf("Could not run top: %s, %v", out, err) } // get output from iptables with container running ipTablesSearchString := "tcp dpt:80" ipTablesCmd := exec.Command("iptables", "-nvL") out, _, err := runCommandWithOutput(ipTablesCmd) if err != nil { c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) } if !strings.Contains(out, ipTablesSearchString) { c.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out) } if err := s.d.Restart(); err != nil { c.Fatalf("Could not restart daemon: %v", err) } // make sure the container is not running runningOut, err := s.d.Cmd("inspect", "--format='{{.State.Running}}'", "top") if err != nil { c.Fatalf("Could not inspect on container: %s, %v", out, err) } if strings.TrimSpace(runningOut) != "true" { c.Fatalf("Container should have been restarted after daemon restart. Status running should have been true but was: %q", strings.TrimSpace(runningOut)) } // get output from iptables after restart ipTablesCmd = exec.Command("iptables", "-nvL") out, _, err = runCommandWithOutput(ipTablesCmd) if err != nil { c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) } if !strings.Contains(out, ipTablesSearchString) { c.Fatalf("iptables output after restart should have contained %q, but was %q", ipTablesSearchString, out) } } // TestDaemonIPv6Enabled checks that when the daemon is started with --ipv6=true that the docker0 bridge // has the fe80::1 address and that a container is assigned a link-local address func (s *DockerSuite) TestDaemonIPv6Enabled(c *check.C) { testRequires(c, IPv6) if err := setupV6(); err != nil { c.Fatal("Could not set up host for IPv6 tests") } d := NewDaemon(c) if err := d.StartWithBusybox("--ipv6"); err != nil { c.Fatal(err) } defer d.Stop() iface, err := net.InterfaceByName("docker0") if err != nil { c.Fatalf("Error getting docker0 interface: %v", err) } addrs, err := iface.Addrs() if err != nil { c.Fatalf("Error getting addresses for docker0 interface: %v", err) } var found bool expected := "fe80::1/64" for i := range addrs { if addrs[i].String() == expected { found = true } } if !found { c.Fatalf("Bridge does not have an IPv6 Address") } if out, err := d.Cmd("run", "-itd", "--name=ipv6test", "busybox:latest"); err != nil { c.Fatalf("Could not run container: %s, %v", out, err) } out, err := d.Cmd("inspect", "--format", "'{{.NetworkSettings.Networks.bridge.LinkLocalIPv6Address}}'", "ipv6test") out = strings.Trim(out, " \r\n'") if err != nil { c.Fatalf("Error inspecting container: %s, %v", out, err) } if ip := net.ParseIP(out); ip == nil { c.Fatalf("Container should have a link-local IPv6 address") } out, err = d.Cmd("inspect", "--format", "'{{.NetworkSettings.Networks.bridge.GlobalIPv6Address}}'", "ipv6test") out = strings.Trim(out, " \r\n'") if err != nil { c.Fatalf("Error inspecting container: %s, %v", out, err) } if ip := net.ParseIP(out); ip != nil { c.Fatalf("Container should not have a global IPv6 address: %v", out) } if err := teardownV6(); err != nil { c.Fatal("Could not perform teardown for IPv6 tests") } } // TestDaemonIPv6FixedCIDR checks that when the daemon is started with --ipv6=true and a fixed CIDR // that running containers are given a link-local and global IPv6 address func (s *DockerSuite) TestDaemonIPv6FixedCIDR(c *check.C) { if err := setupV6(); err != nil { c.Fatal("Could not set up host for IPv6 tests") } d := NewDaemon(c) if err := d.StartWithBusybox("--ipv6", "--fixed-cidr-v6='2001:db8:2::/64'", "--default-gateway-v6='2001:db8:2::100'"); err != nil { c.Fatalf("Could not start daemon with busybox: %v", err) } defer d.Stop() if out, err := d.Cmd("run", "-itd", "--name=ipv6test", "busybox:latest"); err != nil { c.Fatalf("Could not run container: %s, %v", out, err) } out, err := d.Cmd("inspect", "--format", "'{{.NetworkSettings.Networks.bridge.GlobalIPv6Address}}'", "ipv6test") out = strings.Trim(out, " \r\n'") if err != nil { c.Fatalf("Error inspecting container: %s, %v", out, err) } if ip := net.ParseIP(out); ip == nil { c.Fatalf("Container should have a global IPv6 address") } // TODO: Check IPv6 def gateway in inspect o/p (once docker/docker 19001 is merged if err := teardownV6(); err != nil { c.Fatal("Could not perform teardown for IPv6 tests") } } // TestDaemonIPv6FixedCIDRAndMac checks that when the daemon is started with ipv6 fixed CIDR // the running containers are given a an IPv6 address derived from the MAC address and the ipv6 fixed CIDR func (s *DockerSuite) TestDaemonIPv6FixedCIDRAndMac(c *check.C) { err := setupV6() c.Assert(err, checker.IsNil) d := NewDaemon(c) err = d.StartWithBusybox("--ipv6", "--fixed-cidr-v6='2001:db8:1::/64'") c.Assert(err, checker.IsNil) defer d.Stop() out, err := d.Cmd("run", "-itd", "--name=ipv6test", "--mac-address", "AA:BB:CC:DD:EE:FF", "busybox") c.Assert(err, checker.IsNil) out, err = d.Cmd("inspect", "--format", "'{{.NetworkSettings.Networks.bridge.GlobalIPv6Address}}'", "ipv6test") c.Assert(err, checker.IsNil) c.Assert(strings.Trim(out, " \r\n'"), checker.Equals, "2001:db8:1::aabb:ccdd:eeff") err = teardownV6() c.Assert(err, checker.IsNil) } func (s *DockerDaemonSuite) TestDaemonLogLevelWrong(c *check.C) { c.Assert(s.d.Start("--log-level=bogus"), check.NotNil, check.Commentf("Daemon shouldn't start with wrong log level")) } func (s *DockerSuite) TestDaemonStartWithDaemonCommand(c *check.C) { type kind int const ( common kind = iota daemon ) var flags = []map[kind][]string{ {common: {"-l", "info"}, daemon: {"--selinux-enabled"}}, {common: {"-D"}, daemon: {"--selinux-enabled", "-r"}}, {common: {"-D"}, daemon: {"--restart"}}, {common: {"--debug"}, daemon: {"--log-driver=json-file", "--log-opt=max-size=1k"}}, } var invalidGlobalFlags = [][]string{ //Invalid because you cannot pass daemon flags as global flags. {"--selinux-enabled", "-l", "info"}, {"-D", "-r"}, {"--config", "/tmp"}, } // `docker daemon -l info --selinux-enabled` // should NOT error out for _, f := range flags { d := NewDaemon(c) args := append(f[common], f[daemon]...) if err := d.Start(args...); err != nil { c.Fatalf("Daemon should have started successfully with %v: %v", args, err) } d.Stop() } // `docker -l info daemon --selinux-enabled` // should error out for _, f := range flags { d := NewDaemon(c) d.GlobalFlags = f[common] if err := d.Start(f[daemon]...); err == nil { d.Stop() c.Fatalf("Daemon should have failed to start with docker %v daemon %v", d.GlobalFlags, f[daemon]) } } for _, f := range invalidGlobalFlags { cmd := exec.Command(dockerBinary, append(f, "daemon")...) errch := make(chan error) var err error go func() { errch <- cmd.Run() }() select { case <-time.After(time.Second): cmd.Process.Kill() case err = <-errch: } if err == nil { c.Fatalf("Daemon should have failed to start with docker %v daemon", f) } } } func (s *DockerDaemonSuite) TestDaemonLogLevelDebug(c *check.C) { if err := s.d.Start("--log-level=debug"); err != nil { c.Fatal(err) } content, _ := ioutil.ReadFile(s.d.logFile.Name()) if !strings.Contains(string(content), `level=debug`) { c.Fatalf(`Missing level="debug" in log file:\n%s`, string(content)) } } func (s *DockerDaemonSuite) TestDaemonLogLevelFatal(c *check.C) { // we creating new daemons to create new logFile if err := s.d.Start("--log-level=fatal"); err != nil { c.Fatal(err) } content, _ := ioutil.ReadFile(s.d.logFile.Name()) if strings.Contains(string(content), `level=debug`) { c.Fatalf(`Should not have level="debug" in log file:\n%s`, string(content)) } } func (s *DockerDaemonSuite) TestDaemonFlagD(c *check.C) { if err := s.d.Start("-D"); err != nil { c.Fatal(err) } content, _ := ioutil.ReadFile(s.d.logFile.Name()) if !strings.Contains(string(content), `level=debug`) { c.Fatalf(`Should have level="debug" in log file using -D:\n%s`, string(content)) } } func (s *DockerDaemonSuite) TestDaemonFlagDebug(c *check.C) { if err := s.d.Start("--debug"); err != nil { c.Fatal(err) } content, _ := ioutil.ReadFile(s.d.logFile.Name()) if !strings.Contains(string(content), `level=debug`) { c.Fatalf(`Should have level="debug" in log file using --debug:\n%s`, string(content)) } } func (s *DockerDaemonSuite) TestDaemonFlagDebugLogLevelFatal(c *check.C) { if err := s.d.Start("--debug", "--log-level=fatal"); err != nil { c.Fatal(err) } content, _ := ioutil.ReadFile(s.d.logFile.Name()) if !strings.Contains(string(content), `level=debug`) { c.Fatalf(`Should have level="debug" in log file when using both --debug and --log-level=fatal:\n%s`, string(content)) } } func (s *DockerDaemonSuite) TestDaemonAllocatesListeningPort(c *check.C) { listeningPorts := [][]string{ {"0.0.0.0", "0.0.0.0", "5678"}, {"127.0.0.1", "127.0.0.1", "1234"}, {"localhost", "127.0.0.1", "1235"}, } cmdArgs := make([]string, 0, len(listeningPorts)*2) for _, hostDirective := range listeningPorts { cmdArgs = append(cmdArgs, "--host", fmt.Sprintf("tcp://%s:%s", hostDirective[0], hostDirective[2])) } if err := s.d.StartWithBusybox(cmdArgs...); err != nil { c.Fatalf("Could not start daemon with busybox: %v", err) } for _, hostDirective := range listeningPorts { output, err := s.d.Cmd("run", "-p", fmt.Sprintf("%s:%s:80", hostDirective[1], hostDirective[2]), "busybox", "true") if err == nil { c.Fatalf("Container should not start, expected port already allocated error: %q", output) } else if !strings.Contains(output, "port is already allocated") { c.Fatalf("Expected port is already allocated error: %q", output) } } } func (s *DockerDaemonSuite) TestDaemonKeyGeneration(c *check.C) { // TODO: skip or update for Windows daemon os.Remove("/etc/docker/key.json") if err := s.d.Start(); err != nil { c.Fatalf("Could not start daemon: %v", err) } s.d.Stop() k, err := libtrust.LoadKeyFile("/etc/docker/key.json") if err != nil { c.Fatalf("Error opening key file") } kid := k.KeyID() // Test Key ID is a valid fingerprint (e.g. QQXN:JY5W:TBXI:MK3X:GX6P:PD5D:F56N:NHCS:LVRZ:JA46:R24J:XEFF) if len(kid) != 59 { c.Fatalf("Bad key ID: %s", kid) } } func (s *DockerDaemonSuite) TestDaemonKeyMigration(c *check.C) { // TODO: skip or update for Windows daemon os.Remove("/etc/docker/key.json") k1, err := libtrust.GenerateECP256PrivateKey() if err != nil { c.Fatalf("Error generating private key: %s", err) } if err := os.MkdirAll(filepath.Join(os.Getenv("HOME"), ".docker"), 0755); err != nil { c.Fatalf("Error creating .docker directory: %s", err) } if err := libtrust.SaveKey(filepath.Join(os.Getenv("HOME"), ".docker", "key.json"), k1); err != nil { c.Fatalf("Error saving private key: %s", err) } if err := s.d.Start(); err != nil { c.Fatalf("Could not start daemon: %v", err) } s.d.Stop() k2, err := libtrust.LoadKeyFile("/etc/docker/key.json") if err != nil { c.Fatalf("Error opening key file") } if k1.KeyID() != k2.KeyID() { c.Fatalf("Key not migrated") } } // GH#11320 - verify that the daemon exits on failure properly // Note that this explicitly tests the conflict of {-b,--bridge} and {--bip} options as the means // to get a daemon init failure; no other tests for -b/--bip conflict are therefore required func (s *DockerDaemonSuite) TestDaemonExitOnFailure(c *check.C) { //attempt to start daemon with incorrect flags (we know -b and --bip conflict) if err := s.d.Start("--bridge", "nosuchbridge", "--bip", "1.1.1.1"); err != nil { //verify we got the right error if !strings.Contains(err.Error(), "Daemon exited and never started") { c.Fatalf("Expected daemon not to start, got %v", err) } // look in the log and make sure we got the message that daemon is shutting down runCmd := exec.Command("grep", "Error starting daemon", s.d.LogfileName()) if out, _, err := runCommandWithOutput(runCmd); err != nil { c.Fatalf("Expected 'Error starting daemon' message; but doesn't exist in log: %q, err: %v", out, err) } } else { //if we didn't get an error and the daemon is running, this is a failure c.Fatal("Conflicting options should cause the daemon to error out with a failure") } } func (s *DockerDaemonSuite) TestDaemonBridgeExternal(c *check.C) { d := s.d err := d.Start("--bridge", "nosuchbridge") c.Assert(err, check.NotNil, check.Commentf("--bridge option with an invalid bridge should cause the daemon to fail")) defer d.Restart() bridgeName := "external-bridge" bridgeIP := "192.169.1.1/24" _, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) out, err := createInterface(c, "bridge", bridgeName, bridgeIP) c.Assert(err, check.IsNil, check.Commentf(out)) defer deleteInterface(c, bridgeName) err = d.StartWithBusybox("--bridge", bridgeName) c.Assert(err, check.IsNil) ipTablesSearchString := bridgeIPNet.String() ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") out, _, err = runCommandWithOutput(ipTablesCmd) c.Assert(err, check.IsNil) c.Assert(strings.Contains(out, ipTablesSearchString), check.Equals, true, check.Commentf("iptables output should have contained %q, but was %q", ipTablesSearchString, out)) _, err = d.Cmd("run", "-d", "--name", "ExtContainer", "busybox", "top") c.Assert(err, check.IsNil) containerIP := d.findContainerIP("ExtContainer") ip := net.ParseIP(containerIP) c.Assert(bridgeIPNet.Contains(ip), check.Equals, true, check.Commentf("Container IP-Address must be in the same subnet range : %s", containerIP)) } func createInterface(c *check.C, ifType string, ifName string, ipNet string) (string, error) { args := []string{"link", "add", "name", ifName, "type", ifType} ipLinkCmd := exec.Command("ip", args...) out, _, err := runCommandWithOutput(ipLinkCmd) if err != nil { return out, err } ifCfgCmd := exec.Command("ifconfig", ifName, ipNet, "up") out, _, err = runCommandWithOutput(ifCfgCmd) return out, err } func deleteInterface(c *check.C, ifName string) { ifCmd := exec.Command("ip", "link", "delete", ifName) out, _, err := runCommandWithOutput(ifCmd) c.Assert(err, check.IsNil, check.Commentf(out)) flushCmd := exec.Command("iptables", "-t", "nat", "--flush") out, _, err = runCommandWithOutput(flushCmd) c.Assert(err, check.IsNil, check.Commentf(out)) flushCmd = exec.Command("iptables", "--flush") out, _, err = runCommandWithOutput(flushCmd) c.Assert(err, check.IsNil, check.Commentf(out)) } func (s *DockerDaemonSuite) TestDaemonBridgeIP(c *check.C) { // TestDaemonBridgeIP Steps // 1. Delete the existing docker0 Bridge // 2. Set --bip daemon configuration and start the new Docker Daemon // 3. Check if the bip config has taken effect using ifconfig and iptables commands // 4. Launch a Container and make sure the IP-Address is in the expected subnet // 5. Delete the docker0 Bridge // 6. Restart the Docker Daemon (via deferred action) // This Restart takes care of bringing docker0 interface back to auto-assigned IP defaultNetworkBridge := "docker0" deleteInterface(c, defaultNetworkBridge) d := s.d bridgeIP := "192.169.1.1/24" ip, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) err := d.StartWithBusybox("--bip", bridgeIP) c.Assert(err, check.IsNil) defer d.Restart() ifconfigSearchString := ip.String() ifconfigCmd := exec.Command("ifconfig", defaultNetworkBridge) out, _, _, err := runCommandWithStdoutStderr(ifconfigCmd) c.Assert(err, check.IsNil) c.Assert(strings.Contains(out, ifconfigSearchString), check.Equals, true, check.Commentf("ifconfig output should have contained %q, but was %q", ifconfigSearchString, out)) ipTablesSearchString := bridgeIPNet.String() ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") out, _, err = runCommandWithOutput(ipTablesCmd) c.Assert(err, check.IsNil) c.Assert(strings.Contains(out, ipTablesSearchString), check.Equals, true, check.Commentf("iptables output should have contained %q, but was %q", ipTablesSearchString, out)) out, err = d.Cmd("run", "-d", "--name", "test", "busybox", "top") c.Assert(err, check.IsNil) containerIP := d.findContainerIP("test") ip = net.ParseIP(containerIP) c.Assert(bridgeIPNet.Contains(ip), check.Equals, true, check.Commentf("Container IP-Address must be in the same subnet range : %s", containerIP)) deleteInterface(c, defaultNetworkBridge) } func (s *DockerDaemonSuite) TestDaemonRestartWithBridgeIPChange(c *check.C) { if err := s.d.Start(); err != nil { c.Fatalf("Could not start daemon: %v", err) } defer s.d.Restart() if err := s.d.Stop(); err != nil { c.Fatalf("Could not stop daemon: %v", err) } // now we will change the docker0's IP and then try starting the daemon bridgeIP := "192.169.100.1/24" _, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) ipCmd := exec.Command("ifconfig", "docker0", bridgeIP) stdout, stderr, _, err := runCommandWithStdoutStderr(ipCmd) if err != nil { c.Fatalf("failed to change docker0's IP association: %v, stdout: %q, stderr: %q", err, stdout, stderr) } if err := s.d.Start("--bip", bridgeIP); err != nil { c.Fatalf("Could not start daemon: %v", err) } //check if the iptables contains new bridgeIP MASQUERADE rule ipTablesSearchString := bridgeIPNet.String() ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") out, _, err := runCommandWithOutput(ipTablesCmd) if err != nil { c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) } if !strings.Contains(out, ipTablesSearchString) { c.Fatalf("iptables output should have contained new MASQUERADE rule with IP %q, but was %q", ipTablesSearchString, out) } } func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr(c *check.C) { d := s.d bridgeName := "external-bridge" bridgeIP := "192.169.1.1/24" out, err := createInterface(c, "bridge", bridgeName, bridgeIP) c.Assert(err, check.IsNil, check.Commentf(out)) defer deleteInterface(c, bridgeName) args := []string{"--bridge", bridgeName, "--fixed-cidr", "192.169.1.0/30"} err = d.StartWithBusybox(args...) c.Assert(err, check.IsNil) defer d.Restart() for i := 0; i < 4; i++ { cName := "Container" + strconv.Itoa(i) out, err := d.Cmd("run", "-d", "--name", cName, "busybox", "top") if err != nil { c.Assert(strings.Contains(out, "no available IPv4 addresses"), check.Equals, true, check.Commentf("Could not run a Container : %s %s", err.Error(), out)) } } } func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr2(c *check.C) { d := s.d bridgeName := "external-bridge" bridgeIP := "10.2.2.1/16" out, err := createInterface(c, "bridge", bridgeName, bridgeIP) c.Assert(err, check.IsNil, check.Commentf(out)) defer deleteInterface(c, bridgeName) err = d.StartWithBusybox("--bip", bridgeIP, "--fixed-cidr", "10.2.2.0/24") c.Assert(err, check.IsNil) defer s.d.Restart() out, err = d.Cmd("run", "-d", "--name", "bb", "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) defer d.Cmd("stop", "bb") out, err = d.Cmd("exec", "bb", "/bin/sh", "-c", "ifconfig eth0 | awk '/inet addr/{print substr($2,6)}'") c.Assert(out, checker.Equals, "10.2.2.0\n") out, err = d.Cmd("run", "--rm", "busybox", "/bin/sh", "-c", "ifconfig eth0 | awk '/inet addr/{print substr($2,6)}'") c.Assert(err, checker.IsNil, check.Commentf(out)) c.Assert(out, checker.Equals, "10.2.2.2\n") } func (s *DockerDaemonSuite) TestDaemonBridgeFixedCIDREqualBridgeNetwork(c *check.C) { d := s.d bridgeName := "external-bridge" bridgeIP := "172.27.42.1/16" out, err := createInterface(c, "bridge", bridgeName, bridgeIP) c.Assert(err, check.IsNil, check.Commentf(out)) defer deleteInterface(c, bridgeName) err = d.StartWithBusybox("--bridge", bridgeName, "--fixed-cidr", bridgeIP) c.Assert(err, check.IsNil) defer s.d.Restart() out, err = d.Cmd("run", "-d", "busybox", "top") c.Assert(err, check.IsNil, check.Commentf(out)) cid1 := strings.TrimSpace(out) defer d.Cmd("stop", cid1) } func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Implicit(c *check.C) { defaultNetworkBridge := "docker0" deleteInterface(c, defaultNetworkBridge) d := s.d bridgeIP := "192.169.1.1" bridgeIPNet := fmt.Sprintf("%s/24", bridgeIP) err := d.StartWithBusybox("--bip", bridgeIPNet) c.Assert(err, check.IsNil) defer d.Restart() expectedMessage := fmt.Sprintf("default via %s dev", bridgeIP) out, err := d.Cmd("run", "busybox", "ip", "-4", "route", "list", "0/0") c.Assert(strings.Contains(out, expectedMessage), check.Equals, true, check.Commentf("Implicit default gateway should be bridge IP %s, but default route was '%s'", bridgeIP, strings.TrimSpace(out))) deleteInterface(c, defaultNetworkBridge) } func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Explicit(c *check.C) { defaultNetworkBridge := "docker0" deleteInterface(c, defaultNetworkBridge) d := s.d bridgeIP := "192.169.1.1" bridgeIPNet := fmt.Sprintf("%s/24", bridgeIP) gatewayIP := "192.169.1.254" err := d.StartWithBusybox("--bip", bridgeIPNet, "--default-gateway", gatewayIP) c.Assert(err, check.IsNil) defer d.Restart() expectedMessage := fmt.Sprintf("default via %s dev", gatewayIP) out, err := d.Cmd("run", "busybox", "ip", "-4", "route", "list", "0/0") c.Assert(strings.Contains(out, expectedMessage), check.Equals, true, check.Commentf("Explicit default gateway should be %s, but default route was '%s'", gatewayIP, strings.TrimSpace(out))) deleteInterface(c, defaultNetworkBridge) } func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4ExplicitOutsideContainerSubnet(c *check.C) { defaultNetworkBridge := "docker0" deleteInterface(c, defaultNetworkBridge) // Program a custom default gateway outside of the container subnet, daemon should accept it and start err := s.d.StartWithBusybox("--bip", "172.16.0.10/16", "--fixed-cidr", "172.16.1.0/24", "--default-gateway", "172.16.0.254") c.Assert(err, check.IsNil) deleteInterface(c, defaultNetworkBridge) s.d.Restart() } func (s *DockerDaemonSuite) TestDaemonDefaultNetworkInvalidClusterConfig(c *check.C) { testRequires(c, DaemonIsLinux, SameHostDaemon) // Start daemon without docker0 bridge defaultNetworkBridge := "docker0" deleteInterface(c, defaultNetworkBridge) d := NewDaemon(c) discoveryBackend := "consul://consuladdr:consulport/some/path" err := d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend)) c.Assert(err, checker.IsNil) // Start daemon with docker0 bridge ifconfigCmd := exec.Command("ifconfig", defaultNetworkBridge) _, err = runCommand(ifconfigCmd) c.Assert(err, check.IsNil) err = d.Restart(fmt.Sprintf("--cluster-store=%s", discoveryBackend)) c.Assert(err, checker.IsNil) d.Stop() } func (s *DockerDaemonSuite) TestDaemonIP(c *check.C) { d := s.d ipStr := "192.170.1.1/24" ip, _, _ := net.ParseCIDR(ipStr) args := []string{"--ip", ip.String()} err := d.StartWithBusybox(args...) c.Assert(err, check.IsNil) defer d.Restart() out, err := d.Cmd("run", "-d", "-p", "8000:8000", "busybox", "top") c.Assert(err, check.NotNil, check.Commentf("Running a container must fail with an invalid --ip option")) c.Assert(strings.Contains(out, "Error starting userland proxy"), check.Equals, true) ifName := "dummy" out, err = createInterface(c, "dummy", ifName, ipStr) c.Assert(err, check.IsNil, check.Commentf(out)) defer deleteInterface(c, ifName) _, err = d.Cmd("run", "-d", "-p", "8000:8000", "busybox", "top") c.Assert(err, check.IsNil) ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") out, _, err = runCommandWithOutput(ipTablesCmd) c.Assert(err, check.IsNil) regex := fmt.Sprintf("DNAT.*%s.*dpt:8000", ip.String()) matched, _ := regexp.MatchString(regex, out) c.Assert(matched, check.Equals, true, check.Commentf("iptables output should have contained %q, but was %q", regex, out)) } func (s *DockerDaemonSuite) TestDaemonICCPing(c *check.C) { d := s.d bridgeName := "external-bridge" bridgeIP := "192.169.1.1/24" out, err := createInterface(c, "bridge", bridgeName, bridgeIP) c.Assert(err, check.IsNil, check.Commentf(out)) defer deleteInterface(c, bridgeName) args := []string{"--bridge", bridgeName, "--icc=false"} err = d.StartWithBusybox(args...) c.Assert(err, check.IsNil) defer d.Restart() ipTablesCmd := exec.Command("iptables", "-nvL", "FORWARD") out, _, err = runCommandWithOutput(ipTablesCmd) c.Assert(err, check.IsNil) regex := fmt.Sprintf("DROP.*all.*%s.*%s", bridgeName, bridgeName) matched, _ := regexp.MatchString(regex, out) c.Assert(matched, check.Equals, true, check.Commentf("iptables output should have contained %q, but was %q", regex, out)) // Pinging another container must fail with --icc=false pingContainers(c, d, true) ipStr := "192.171.1.1/24" ip, _, _ := net.ParseCIDR(ipStr) ifName := "icc-dummy" createInterface(c, "dummy", ifName, ipStr) // But, Pinging external or a Host interface must succeed pingCmd := fmt.Sprintf("ping -c 1 %s -W 1", ip.String()) runArgs := []string{"--rm", "busybox", "sh", "-c", pingCmd} _, err = d.Cmd("run", runArgs...) c.Assert(err, check.IsNil) } func (s *DockerDaemonSuite) TestDaemonICCLinkExpose(c *check.C) { d := s.d bridgeName := "external-bridge" bridgeIP := "192.169.1.1/24" out, err := createInterface(c, "bridge", bridgeName, bridgeIP) c.Assert(err, check.IsNil, check.Commentf(out)) defer deleteInterface(c, bridgeName) args := []string{"--bridge", bridgeName, "--icc=false"} err = d.StartWithBusybox(args...) c.Assert(err, check.IsNil) defer d.Restart() ipTablesCmd := exec.Command("iptables", "-nvL", "FORWARD") out, _, err = runCommandWithOutput(ipTablesCmd) c.Assert(err, check.IsNil) regex := fmt.Sprintf("DROP.*all.*%s.*%s", bridgeName, bridgeName) matched, _ := regexp.MatchString(regex, out) c.Assert(matched, check.Equals, true, check.Commentf("iptables output should have contained %q, but was %q", regex, out)) out, err = d.Cmd("run", "-d", "--expose", "4567", "--name", "icc1", "busybox", "nc", "-l", "-p", "4567") c.Assert(err, check.IsNil, check.Commentf(out)) out, err = d.Cmd("run", "--link", "icc1:icc1", "busybox", "nc", "icc1", "4567") c.Assert(err, check.IsNil, check.Commentf(out)) } func (s *DockerDaemonSuite) TestDaemonLinksIpTablesRulesWhenLinkAndUnlink(c *check.C) { bridgeName := "external-bridge" bridgeIP := "192.169.1.1/24" out, err := createInterface(c, "bridge", bridgeName, bridgeIP) c.Assert(err, check.IsNil, check.Commentf(out)) defer deleteInterface(c, bridgeName) err = s.d.StartWithBusybox("--bridge", bridgeName, "--icc=false") c.Assert(err, check.IsNil) defer s.d.Restart() _, err = s.d.Cmd("run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "top") c.Assert(err, check.IsNil) _, err = s.d.Cmd("run", "-d", "--name", "parent", "--link", "child:http", "busybox", "top") c.Assert(err, check.IsNil) childIP := s.d.findContainerIP("child") parentIP := s.d.findContainerIP("parent") sourceRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", childIP, "--sport", "80", "-d", parentIP, "-j", "ACCEPT"} destinationRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", parentIP, "--dport", "80", "-d", childIP, "-j", "ACCEPT"} if !iptables.Exists("filter", "DOCKER", sourceRule...) || !iptables.Exists("filter", "DOCKER", destinationRule...) { c.Fatal("Iptables rules not found") } s.d.Cmd("rm", "--link", "parent/http") if iptables.Exists("filter", "DOCKER", sourceRule...) || iptables.Exists("filter", "DOCKER", destinationRule...) { c.Fatal("Iptables rules should be removed when unlink") } s.d.Cmd("kill", "child") s.d.Cmd("kill", "parent") } func (s *DockerDaemonSuite) TestDaemonUlimitDefaults(c *check.C) { testRequires(c, DaemonIsLinux) if err := s.d.StartWithBusybox("--default-ulimit", "nofile=42:42", "--default-ulimit", "nproc=1024:1024"); err != nil { c.Fatal(err) } out, err := s.d.Cmd("run", "--ulimit", "nproc=2048", "--name=test", "busybox", "/bin/sh", "-c", "echo $(ulimit -n); echo $(ulimit -p)") if err != nil { c.Fatal(out, err) } outArr := strings.Split(out, "\n") if len(outArr) < 2 { c.Fatalf("got unexpected output: %s", out) } nofile := strings.TrimSpace(outArr[0]) nproc := strings.TrimSpace(outArr[1]) if nofile != "42" { c.Fatalf("expected `ulimit -n` to be `42`, got: %s", nofile) } if nproc != "2048" { c.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc) } // Now restart daemon with a new default if err := s.d.Restart("--default-ulimit", "nofile=43"); err != nil { c.Fatal(err) } out, err = s.d.Cmd("start", "-a", "test") if err != nil { c.Fatal(err) } outArr = strings.Split(out, "\n") if len(outArr) < 2 { c.Fatalf("got unexpected output: %s", out) } nofile = strings.TrimSpace(outArr[0]) nproc = strings.TrimSpace(outArr[1]) if nofile != "43" { c.Fatalf("expected `ulimit -n` to be `43`, got: %s", nofile) } if nproc != "2048" { c.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc) } } // #11315 func (s *DockerDaemonSuite) TestDaemonRestartRenameContainer(c *check.C) { if err := s.d.StartWithBusybox(); err != nil { c.Fatal(err) } if out, err := s.d.Cmd("run", "--name=test", "busybox"); err != nil { c.Fatal(err, out) } if out, err := s.d.Cmd("rename", "test", "test2"); err != nil { c.Fatal(err, out) } if err := s.d.Restart(); err != nil { c.Fatal(err) } if out, err := s.d.Cmd("start", "test2"); err != nil { c.Fatal(err, out) } } func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefault(c *check.C) { if err := s.d.StartWithBusybox(); err != nil { c.Fatal(err) } out, err := s.d.Cmd("run", "-d", "busybox", "echo", "testline") if err != nil { c.Fatal(out, err) } id := strings.TrimSpace(out) if out, err := s.d.Cmd("wait", id); err != nil { c.Fatal(out, err) } logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") if _, err := os.Stat(logPath); err != nil { c.Fatal(err) } f, err := os.Open(logPath) if err != nil { c.Fatal(err) } var res struct { Log string `json:"log"` Stream string `json:"stream"` Time time.Time `json:"time"` } if err := json.NewDecoder(f).Decode(&res); err != nil { c.Fatal(err) } if res.Log != "testline\n" { c.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n") } if res.Stream != "stdout" { c.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout") } if !time.Now().After(res.Time) { c.Fatalf("Log time %v in future", res.Time) } } func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefaultOverride(c *check.C) { if err := s.d.StartWithBusybox(); err != nil { c.Fatal(err) } out, err := s.d.Cmd("run", "-d", "--log-driver=none", "busybox", "echo", "testline") if err != nil { c.Fatal(out, err) } id := strings.TrimSpace(out) if out, err := s.d.Cmd("wait", id); err != nil { c.Fatal(out, err) } logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) { c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) } } func (s *DockerDaemonSuite) TestDaemonLoggingDriverNone(c *check.C) { if err := s.d.StartWithBusybox("--log-driver=none"); err != nil { c.Fatal(err) } out, err := s.d.Cmd("run", "-d", "busybox", "echo", "testline") if err != nil { c.Fatal(out, err) } id := strings.TrimSpace(out) if out, err := s.d.Cmd("wait", id); err != nil { c.Fatal(out, err) } logPath := filepath.Join(s.d.folder, "graph", "containers", id, id+"-json.log") if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) { c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) } } func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneOverride(c *check.C) { if err := s.d.StartWithBusybox("--log-driver=none"); err != nil { c.Fatal(err) } out, err := s.d.Cmd("run", "-d", "--log-driver=json-file", "busybox", "echo", "testline") if err != nil { c.Fatal(out, err) } id := strings.TrimSpace(out) if out, err := s.d.Cmd("wait", id); err != nil { c.Fatal(out, err) } logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") if _, err := os.Stat(logPath); err != nil { c.Fatal(err) } f, err := os.Open(logPath) if err != nil { c.Fatal(err) } var res struct { Log string `json:"log"` Stream string `json:"stream"` Time time.Time `json:"time"` } if err := json.NewDecoder(f).Decode(&res); err != nil { c.Fatal(err) } if res.Log != "testline\n" { c.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n") } if res.Stream != "stdout" { c.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout") } if !time.Now().After(res.Time) { c.Fatalf("Log time %v in future", res.Time) } } func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneLogsError(c *check.C) { if err := s.d.StartWithBusybox("--log-driver=none"); err != nil { c.Fatal(err) } out, err := s.d.Cmd("run", "-d", "busybox", "echo", "testline") if err != nil { c.Fatal(out, err) } id := strings.TrimSpace(out) out, err = s.d.Cmd("logs", id) if err == nil { c.Fatalf("Logs should fail with 'none' driver") } if !strings.Contains(out, `"logs" command is supported only for "json-file" and "journald" logging drivers (got: none)`) { c.Fatalf("There should be an error about none not being a recognized log driver, got: %s", out) } } func (s *DockerDaemonSuite) TestDaemonDots(c *check.C) { if err := s.d.StartWithBusybox(); err != nil { c.Fatal(err) } // Now create 4 containers if _, err := s.d.Cmd("create", "busybox"); err != nil { c.Fatalf("Error creating container: %q", err) } if _, err := s.d.Cmd("create", "busybox"); err != nil { c.Fatalf("Error creating container: %q", err) } if _, err := s.d.Cmd("create", "busybox"); err != nil { c.Fatalf("Error creating container: %q", err) } if _, err := s.d.Cmd("create", "busybox"); err != nil { c.Fatalf("Error creating container: %q", err) } s.d.Stop() s.d.Start("--log-level=debug") s.d.Stop() content, _ := ioutil.ReadFile(s.d.logFile.Name()) if strings.Contains(string(content), "....") { c.Fatalf("Debug level should not have ....\n%s", string(content)) } s.d.Start("--log-level=error") s.d.Stop() content, _ = ioutil.ReadFile(s.d.logFile.Name()) if strings.Contains(string(content), "....") { c.Fatalf("Error level should not have ....\n%s", string(content)) } s.d.Start("--log-level=info") s.d.Stop() content, _ = ioutil.ReadFile(s.d.logFile.Name()) if !strings.Contains(string(content), "....") { c.Fatalf("Info level should have ....\n%s", string(content)) } } func (s *DockerDaemonSuite) TestDaemonUnixSockCleanedUp(c *check.C) { dir, err := ioutil.TempDir("", "socket-cleanup-test") if err != nil { c.Fatal(err) } defer os.RemoveAll(dir) sockPath := filepath.Join(dir, "docker.sock") if err := s.d.Start("--host", "unix://"+sockPath); err != nil { c.Fatal(err) } if _, err := os.Stat(sockPath); err != nil { c.Fatal("socket does not exist") } if err := s.d.Stop(); err != nil { c.Fatal(err) } if _, err := os.Stat(sockPath); err == nil || !os.IsNotExist(err) { c.Fatal("unix socket is not cleaned up") } } func (s *DockerDaemonSuite) TestDaemonWithWrongkey(c *check.C) { type Config struct { Crv string `json:"crv"` D string `json:"d"` Kid string `json:"kid"` Kty string `json:"kty"` X string `json:"x"` Y string `json:"y"` } os.Remove("/etc/docker/key.json") if err := s.d.Start(); err != nil { c.Fatalf("Failed to start daemon: %v", err) } if err := s.d.Stop(); err != nil { c.Fatalf("Could not stop daemon: %v", err) } config := &Config{} bytes, err := ioutil.ReadFile("/etc/docker/key.json") if err != nil { c.Fatalf("Error reading key.json file: %s", err) } // byte[] to Data-Struct if err := json.Unmarshal(bytes, &config); err != nil { c.Fatalf("Error Unmarshal: %s", err) } //replace config.Kid with the fake value config.Kid = "VSAJ:FUYR:X3H2:B2VZ:KZ6U:CJD5:K7BX:ZXHY:UZXT:P4FT:MJWG:HRJ4" // NEW Data-Struct to byte[] newBytes, err := json.Marshal(&config) if err != nil { c.Fatalf("Error Marshal: %s", err) } // write back if err := ioutil.WriteFile("/etc/docker/key.json", newBytes, 0400); err != nil { c.Fatalf("Error ioutil.WriteFile: %s", err) } defer os.Remove("/etc/docker/key.json") if err := s.d.Start(); err == nil { c.Fatalf("It should not be successful to start daemon with wrong key: %v", err) } content, _ := ioutil.ReadFile(s.d.logFile.Name()) if !strings.Contains(string(content), "Public Key ID does not match") { c.Fatal("Missing KeyID message from daemon logs") } } func (s *DockerDaemonSuite) TestDaemonRestartKillWait(c *check.C) { if err := s.d.StartWithBusybox(); err != nil { c.Fatalf("Could not start daemon with busybox: %v", err) } out, err := s.d.Cmd("run", "-id", "busybox", "/bin/cat") if err != nil { c.Fatalf("Could not run /bin/cat: err=%v\n%s", err, out) } containerID := strings.TrimSpace(out) if out, err := s.d.Cmd("kill", containerID); err != nil { c.Fatalf("Could not kill %s: err=%v\n%s", containerID, err, out) } if err := s.d.Restart(); err != nil { c.Fatalf("Could not restart daemon: %v", err) } errchan := make(chan error) go func() { if out, err := s.d.Cmd("wait", containerID); err != nil { errchan <- fmt.Errorf("%v:\n%s", err, out) } close(errchan) }() select { case <-time.After(5 * time.Second): c.Fatal("Waiting on a stopped (killed) container timed out") case err := <-errchan: if err != nil { c.Fatal(err) } } } // TestHttpsInfo connects via two-way authenticated HTTPS to the info endpoint func (s *DockerDaemonSuite) TestHttpsInfo(c *check.C) { const ( testDaemonHTTPSAddr = "tcp://localhost:4271" ) if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", "--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr); err != nil { c.Fatalf("Could not start daemon with busybox: %v", err) } daemonArgs := []string{"--host", testDaemonHTTPSAddr, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/client-cert.pem", "--tlskey", "fixtures/https/client-key.pem"} out, err := s.d.CmdWithArgs(daemonArgs, "info") if err != nil { c.Fatalf("Error Occurred: %s and output: %s", err, out) } } // TestHttpsRun connects via two-way authenticated HTTPS to the create, attach, start, and wait endpoints. // https://github.com/docker/docker/issues/19280 func (s *DockerDaemonSuite) TestHttpsRun(c *check.C) { const ( testDaemonHTTPSAddr = "tcp://localhost:4271" ) if err := s.d.StartWithBusybox("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", "--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr); err != nil { c.Fatalf("Could not start daemon with busybox: %v", err) } daemonArgs := []string{"--host", testDaemonHTTPSAddr, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/client-cert.pem", "--tlskey", "fixtures/https/client-key.pem"} out, err := s.d.CmdWithArgs(daemonArgs, "run", "busybox", "echo", "TLS response") if err != nil { c.Fatalf("Error Occurred: %s and output: %s", err, out) } if !strings.Contains(out, "TLS response") { c.Fatalf("expected output to include `TLS response`, got %v", out) } } // TestTlsVerify verifies that --tlsverify=false turns on tls func (s *DockerDaemonSuite) TestTlsVerify(c *check.C) { out, err := exec.Command(dockerBinary, "daemon", "--tlsverify=false").CombinedOutput() if err == nil || !strings.Contains(string(out), "Could not load X509 key pair") { c.Fatalf("Daemon should not have started due to missing certs: %v\n%s", err, string(out)) } } // TestHttpsInfoRogueCert connects via two-way authenticated HTTPS to the info endpoint // by using a rogue client certificate and checks that it fails with the expected error. func (s *DockerDaemonSuite) TestHttpsInfoRogueCert(c *check.C) { const ( errBadCertificate = "remote error: bad certificate" testDaemonHTTPSAddr = "tcp://localhost:4271" ) if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", "--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr); err != nil { c.Fatalf("Could not start daemon with busybox: %v", err) } daemonArgs := []string{"--host", testDaemonHTTPSAddr, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/client-rogue-cert.pem", "--tlskey", "fixtures/https/client-rogue-key.pem"} out, err := s.d.CmdWithArgs(daemonArgs, "info") if err == nil || !strings.Contains(out, errBadCertificate) { c.Fatalf("Expected err: %s, got instead: %s and output: %s", errBadCertificate, err, out) } } // TestHttpsInfoRogueServerCert connects via two-way authenticated HTTPS to the info endpoint // which provides a rogue server certificate and checks that it fails with the expected error func (s *DockerDaemonSuite) TestHttpsInfoRogueServerCert(c *check.C) { const ( errCaUnknown = "x509: certificate signed by unknown authority" testDaemonRogueHTTPSAddr = "tcp://localhost:4272" ) if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-rogue-cert.pem", "--tlskey", "fixtures/https/server-rogue-key.pem", "-H", testDaemonRogueHTTPSAddr); err != nil { c.Fatalf("Could not start daemon with busybox: %v", err) } daemonArgs := []string{"--host", testDaemonRogueHTTPSAddr, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/client-rogue-cert.pem", "--tlskey", "fixtures/https/client-rogue-key.pem"} out, err := s.d.CmdWithArgs(daemonArgs, "info") if err == nil || !strings.Contains(out, errCaUnknown) { c.Fatalf("Expected err: %s, got instead: %s and output: %s", errCaUnknown, err, out) } } func pingContainers(c *check.C, d *Daemon, expectFailure bool) { var dargs []string if d != nil { dargs = []string{"--host", d.sock()} } args := append(dargs, "run", "-d", "--name", "container1", "busybox", "top") dockerCmd(c, args...) args = append(dargs, "run", "--rm", "--link", "container1:alias1", "busybox", "sh", "-c") pingCmd := "ping -c 1 %s -W 1" args = append(args, fmt.Sprintf(pingCmd, "alias1")) _, _, err := dockerCmdWithError(args...) if expectFailure { c.Assert(err, check.NotNil) } else { c.Assert(err, check.IsNil) } args = append(dargs, "rm", "-f", "container1") dockerCmd(c, args...) } func (s *DockerDaemonSuite) TestDaemonRestartWithSocketAsVolume(c *check.C) { c.Assert(s.d.StartWithBusybox(), check.IsNil) socket := filepath.Join(s.d.folder, "docker.sock") out, err := s.d.Cmd("run", "-d", "--restart=always", "-v", socket+":/sock", "busybox") c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) c.Assert(s.d.Restart(), check.IsNil) } func (s *DockerDaemonSuite) TestCleanupMountsAfterCrash(c *check.C) { c.Assert(s.d.StartWithBusybox(), check.IsNil) out, err := s.d.Cmd("run", "-d", "busybox", "top") c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) id := strings.TrimSpace(out) c.Assert(s.d.cmd.Process.Signal(os.Kill), check.IsNil) c.Assert(s.d.Start(), check.IsNil) mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) comment := check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) } func (s *DockerDaemonSuite) TestRunContainerWithBridgeNone(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) c.Assert(s.d.StartWithBusybox("-b", "none"), check.IsNil) out, err := s.d.Cmd("run", "--rm", "busybox", "ip", "l") c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) c.Assert(strings.Contains(out, "eth0"), check.Equals, false, check.Commentf("There shouldn't be eth0 in container in default(bridge) mode when bridge network is disabled: %s", out)) out, err = s.d.Cmd("run", "--rm", "--net=bridge", "busybox", "ip", "l") c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) c.Assert(strings.Contains(out, "eth0"), check.Equals, false, check.Commentf("There shouldn't be eth0 in container in bridge mode when bridge network is disabled: %s", out)) // the extra grep and awk clean up the output of `ip` to only list the number and name of // interfaces, allowing for different versions of ip (e.g. inside and outside the container) to // be used while still verifying that the interface list is the exact same cmd := exec.Command("sh", "-c", "ip l | grep -E '^[0-9]+:' | awk -F: ' { print $1\":\"$2 } '") stdout := bytes.NewBuffer(nil) cmd.Stdout = stdout if err := cmd.Run(); err != nil { c.Fatal("Failed to get host network interface") } out, err = s.d.Cmd("run", "--rm", "--net=host", "busybox", "sh", "-c", "ip l | grep -E '^[0-9]+:' | awk -F: ' { print $1\":\"$2 } '") c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) c.Assert(out, check.Equals, fmt.Sprintf("%s", stdout), check.Commentf("The network interfaces in container should be the same with host when --net=host when bridge network is disabled: %s", out)) } func (s *DockerDaemonSuite) TestDaemonRestartWithContainerRunning(t *check.C) { if err := s.d.StartWithBusybox(); err != nil { t.Fatal(err) } if out, err := s.d.Cmd("run", "-ti", "-d", "--name", "test", "busybox"); err != nil { t.Fatal(out, err) } if err := s.d.Restart(); err != nil { t.Fatal(err) } // Container 'test' should be removed without error if out, err := s.d.Cmd("rm", "test"); err != nil { t.Fatal(out, err) } } func (s *DockerDaemonSuite) TestDaemonRestartCleanupNetns(c *check.C) { if err := s.d.StartWithBusybox(); err != nil { c.Fatal(err) } out, err := s.d.Cmd("run", "--name", "netns", "-d", "busybox", "top") if err != nil { c.Fatal(out, err) } // Get sandbox key via inspect out, err = s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.SandboxKey}}'", "netns") if err != nil { c.Fatalf("Error inspecting container: %s, %v", out, err) } fileName := strings.Trim(out, " \r\n'") if out, err := s.d.Cmd("stop", "netns"); err != nil { c.Fatal(out, err) } // Test if the file still exists out, _, err = runCommandWithOutput(exec.Command("stat", "-c", "%n", fileName)) out = strings.TrimSpace(out) c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) c.Assert(out, check.Equals, fileName, check.Commentf("Output: %s", out)) // Remove the container and restart the daemon if out, err := s.d.Cmd("rm", "netns"); err != nil { c.Fatal(out, err) } if err := s.d.Restart(); err != nil { c.Fatal(err) } // Test again and see now the netns file does not exist out, _, err = runCommandWithOutput(exec.Command("stat", "-c", "%n", fileName)) out = strings.TrimSpace(out) c.Assert(err, check.Not(check.IsNil), check.Commentf("Output: %s", out)) } // tests regression detailed in #13964 where DOCKER_TLS_VERIFY env is ignored func (s *DockerDaemonSuite) TestDaemonNoTlsCliTlsVerifyWithEnv(c *check.C) { host := "tcp://localhost:4271" c.Assert(s.d.Start("-H", host), check.IsNil) cmd := exec.Command(dockerBinary, "-H", host, "info") cmd.Env = []string{"DOCKER_TLS_VERIFY=1", "DOCKER_CERT_PATH=fixtures/https"} out, _, err := runCommandWithOutput(cmd) c.Assert(err, check.Not(check.IsNil), check.Commentf("%s", out)) c.Assert(strings.Contains(out, "error occurred trying to connect"), check.Equals, true) } func setupV6() error { // Hack to get the right IPv6 address on docker0, which has already been created err := exec.Command("ip", "addr", "add", "fe80::1/64", "dev", "docker0").Run() if err != nil { return err } return nil } func teardownV6() error { err := exec.Command("ip", "addr", "del", "fe80::1/64", "dev", "docker0").Run() if err != nil { return err } return nil } func (s *DockerDaemonSuite) TestDaemonRestartWithContainerWithRestartPolicyAlways(c *check.C) { c.Assert(s.d.StartWithBusybox(), check.IsNil) out, err := s.d.Cmd("run", "-d", "--restart", "always", "busybox", "top") c.Assert(err, check.IsNil) id := strings.TrimSpace(out) _, err = s.d.Cmd("stop", id) c.Assert(err, check.IsNil) _, err = s.d.Cmd("wait", id) c.Assert(err, check.IsNil) out, err = s.d.Cmd("ps", "-q") c.Assert(err, check.IsNil) c.Assert(out, check.Equals, "") c.Assert(s.d.Restart(), check.IsNil) out, err = s.d.Cmd("ps", "-q") c.Assert(err, check.IsNil) c.Assert(strings.TrimSpace(out), check.Equals, id[:12]) } func (s *DockerDaemonSuite) TestDaemonWideLogConfig(c *check.C) { if err := s.d.StartWithBusybox("--log-driver=json-file", "--log-opt=max-size=1k"); err != nil { c.Fatal(err) } out, err := s.d.Cmd("run", "-d", "--name=logtest", "busybox", "top") c.Assert(err, check.IsNil, check.Commentf("Output: %s, err: %v", out, err)) out, err = s.d.Cmd("inspect", "-f", "{{ .HostConfig.LogConfig.Config }}", "logtest") c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) cfg := strings.TrimSpace(out) if cfg != "map[max-size:1k]" { c.Fatalf("Unexpected log-opt: %s, expected map[max-size:1k]", cfg) } } func (s *DockerDaemonSuite) TestDaemonRestartWithPausedContainer(c *check.C) { if err := s.d.StartWithBusybox(); err != nil { c.Fatal(err) } if out, err := s.d.Cmd("run", "-i", "-d", "--name", "test", "busybox", "top"); err != nil { c.Fatal(err, out) } if out, err := s.d.Cmd("pause", "test"); err != nil { c.Fatal(err, out) } if err := s.d.Restart(); err != nil { c.Fatal(err) } errchan := make(chan error) go func() { out, err := s.d.Cmd("start", "test") if err != nil { errchan <- fmt.Errorf("%v:\n%s", err, out) } name := strings.TrimSpace(out) if name != "test" { errchan <- fmt.Errorf("Paused container start error on docker daemon restart, expected 'test' but got '%s'", name) } close(errchan) }() select { case <-time.After(5 * time.Second): c.Fatal("Waiting on start a container timed out") case err := <-errchan: if err != nil { c.Fatal(err) } } } func (s *DockerDaemonSuite) TestDaemonRestartRmVolumeInUse(c *check.C) { c.Assert(s.d.StartWithBusybox(), check.IsNil) out, err := s.d.Cmd("create", "-v", "test:/foo", "busybox") c.Assert(err, check.IsNil, check.Commentf(out)) c.Assert(s.d.Restart(), check.IsNil) out, err = s.d.Cmd("volume", "rm", "test") c.Assert(err, check.NotNil, check.Commentf("should not be able to remove in use volume after daemon restart")) c.Assert(out, checker.Contains, "in use") } func (s *DockerDaemonSuite) TestDaemonRestartLocalVolumes(c *check.C) { c.Assert(s.d.Start(), check.IsNil) _, err := s.d.Cmd("volume", "create", "--name", "test") c.Assert(err, check.IsNil) c.Assert(s.d.Restart(), check.IsNil) _, err = s.d.Cmd("volume", "inspect", "test") c.Assert(err, check.IsNil) } func (s *DockerDaemonSuite) TestDaemonCorruptedLogDriverAddress(c *check.C) { for _, driver := range []string{ "syslog", "gelf", } { args := []string{"--log-driver=" + driver, "--log-opt", driver + "-address=corrupted:42"} c.Assert(s.d.Start(args...), check.NotNil, check.Commentf(fmt.Sprintf("Expected daemon not to start with invalid %s-address provided", driver))) expected := fmt.Sprintf("Failed to set log opts: %s-address should be in form proto://address", driver) runCmd := exec.Command("grep", expected, s.d.LogfileName()) if out, _, err := runCommandWithOutput(runCmd); err != nil { c.Fatalf("Expected %q message; but doesn't exist in log: %q, err: %v", expected, out, err) } } } func (s *DockerDaemonSuite) TestDaemonCorruptedFluentdAddress(c *check.C) { c.Assert(s.d.Start("--log-driver=fluentd", "--log-opt", "fluentd-address=corrupted:c"), check.NotNil) expected := "Failed to set log opts: invalid fluentd-address corrupted:c: " runCmd := exec.Command("grep", expected, s.d.LogfileName()) if out, _, err := runCommandWithOutput(runCmd); err != nil { c.Fatalf("Expected %q message; but doesn't exist in log: %q, err: %v", expected, out, err) } } func (s *DockerDaemonSuite) TestDaemonStartWithoutHost(c *check.C) { s.d.useDefaultHost = true defer func() { s.d.useDefaultHost = false }() c.Assert(s.d.Start(), check.IsNil) } func (s *DockerDaemonSuite) TestDaemonStartWithDefalutTlsHost(c *check.C) { s.d.useDefaultTLSHost = true defer func() { s.d.useDefaultTLSHost = false }() if err := s.d.Start( "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", "--tlskey", "fixtures/https/server-key.pem"); err != nil { c.Fatalf("Could not start daemon: %v", err) } // The client with --tlsverify should also use default host localhost:2376 tmpHost := os.Getenv("DOCKER_HOST") defer func() { os.Setenv("DOCKER_HOST", tmpHost) }() os.Setenv("DOCKER_HOST", "") out, _ := dockerCmd( c, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/client-cert.pem", "--tlskey", "fixtures/https/client-key.pem", "version", ) if !strings.Contains(out, "Server") { c.Fatalf("docker version should return information of server side") } } func (s *DockerDaemonSuite) TestBridgeIPIsExcludedFromAllocatorPool(c *check.C) { defaultNetworkBridge := "docker0" deleteInterface(c, defaultNetworkBridge) bridgeIP := "192.169.1.1" bridgeRange := bridgeIP + "/30" err := s.d.StartWithBusybox("--bip", bridgeRange) c.Assert(err, check.IsNil) defer s.d.Restart() var cont int for { contName := fmt.Sprintf("container%d", cont) _, err = s.d.Cmd("run", "--name", contName, "-d", "busybox", "/bin/sleep", "2") if err != nil { // pool exhausted break } ip, err := s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.IPAddress}}'", contName) c.Assert(err, check.IsNil) c.Assert(ip, check.Not(check.Equals), bridgeIP) cont++ } } // Test daemon for no space left on device error func (s *DockerDaemonSuite) TestDaemonNoSpaceleftOnDeviceError(c *check.C) { testRequires(c, SameHostDaemon, DaemonIsLinux) // create a 2MiB image and mount it as graph root cmd := exec.Command("dd", "of=/tmp/testfs.img", "bs=1M", "seek=2", "count=0") if err := cmd.Run(); err != nil { c.Fatalf("dd failed: %v", err) } cmd = exec.Command("mkfs.ext4", "-F", "/tmp/testfs.img") if err := cmd.Run(); err != nil { c.Fatalf("mkfs.ext4 failed: %v", err) } cmd = exec.Command("mkdir", "-p", "/tmp/testfs-mount") if err := cmd.Run(); err != nil { c.Fatalf("mkdir failed: %v", err) } cmd = exec.Command("mount", "-t", "ext4", "-no", "loop,rw", "/tmp/testfs.img", "/tmp/testfs-mount") if err := cmd.Run(); err != nil { c.Fatalf("mount failed: %v", err) } err := s.d.Start("--graph", "/tmp/testfs-mount") c.Assert(err, check.IsNil) // pull a repository large enough to fill the mount point out, err := s.d.Cmd("pull", "registry:2") c.Assert(out, check.Not(check.Equals), 1, check.Commentf("no space left on device")) } // Test daemon restart with container links + auto restart func (s *DockerDaemonSuite) TestDaemonRestartContainerLinksRestart(c *check.C) { d := NewDaemon(c) err := d.StartWithBusybox() c.Assert(err, checker.IsNil) parent1Args := []string{} parent2Args := []string{} wg := sync.WaitGroup{} maxChildren := 10 chErr := make(chan error, maxChildren) for i := 0; i < maxChildren; i++ { wg.Add(1) name := fmt.Sprintf("test%d", i) if i < maxChildren/2 { parent1Args = append(parent1Args, []string{"--link", name}...) } else { parent2Args = append(parent2Args, []string{"--link", name}...) } go func() { _, err = d.Cmd("run", "-d", "--name", name, "--restart=always", "busybox", "top") chErr <- err wg.Done() }() } wg.Wait() close(chErr) for err := range chErr { c.Assert(err, check.IsNil) } parent1Args = append([]string{"run", "-d"}, parent1Args...) parent1Args = append(parent1Args, []string{"--name=parent1", "--restart=always", "busybox", "top"}...) parent2Args = append([]string{"run", "-d"}, parent2Args...) parent2Args = append(parent2Args, []string{"--name=parent2", "--restart=always", "busybox", "top"}...) _, err = d.Cmd(parent1Args[0], parent1Args[1:]...) c.Assert(err, check.IsNil) _, err = d.Cmd(parent2Args[0], parent2Args[1:]...) c.Assert(err, check.IsNil) err = d.Stop() c.Assert(err, check.IsNil) // clear the log file -- we don't need any of it but may for the next part // can ignore the error here, this is just a cleanup os.Truncate(d.LogfileName(), 0) err = d.Start() c.Assert(err, check.IsNil) for _, num := range []string{"1", "2"} { out, err := d.Cmd("inspect", "-f", "{{ .State.Running }}", "parent"+num) c.Assert(err, check.IsNil) if strings.TrimSpace(out) != "true" { log, _ := ioutil.ReadFile(d.LogfileName()) c.Fatalf("parent container is not running\n%s", string(log)) } } } func (s *DockerDaemonSuite) TestDaemonCgroupParent(c *check.C) { testRequires(c, DaemonIsLinux) cgroupParent := "test" name := "cgroup-test" err := s.d.StartWithBusybox("--cgroup-parent", cgroupParent) c.Assert(err, check.IsNil) defer s.d.Restart() out, err := s.d.Cmd("run", "--name", name, "busybox", "cat", "/proc/self/cgroup") c.Assert(err, checker.IsNil) cgroupPaths := parseCgroupPaths(string(out)) c.Assert(len(cgroupPaths), checker.Not(checker.Equals), 0, check.Commentf("unexpected output - %q", string(out))) out, err = s.d.Cmd("inspect", "-f", "{{.Id}}", name) c.Assert(err, checker.IsNil) id := strings.TrimSpace(string(out)) expectedCgroup := path.Join(cgroupParent, id) found := false for _, path := range cgroupPaths { if strings.HasSuffix(path, expectedCgroup) { found = true break } } c.Assert(found, checker.True, check.Commentf("Cgroup path for container (%s) doesn't found in cgroups file: %s", expectedCgroup, cgroupPaths)) } func (s *DockerDaemonSuite) TestDaemonRestartWithLinks(c *check.C) { testRequires(c, DaemonIsLinux) // Windows does not support links err := s.d.StartWithBusybox() c.Assert(err, check.IsNil) out, err := s.d.Cmd("run", "-d", "--name=test", "busybox", "top") c.Assert(err, check.IsNil, check.Commentf(out)) out, err = s.d.Cmd("run", "--name=test2", "--link", "test:abc", "busybox", "sh", "-c", "ping -c 1 -w 1 abc") c.Assert(err, check.IsNil, check.Commentf(out)) c.Assert(s.d.Restart(), check.IsNil) // should fail since test is not running yet out, err = s.d.Cmd("start", "test2") c.Assert(err, check.NotNil, check.Commentf(out)) out, err = s.d.Cmd("start", "test") c.Assert(err, check.IsNil, check.Commentf(out)) out, err = s.d.Cmd("start", "-a", "test2") c.Assert(err, check.IsNil, check.Commentf(out)) c.Assert(strings.Contains(out, "1 packets transmitted, 1 packets received"), check.Equals, true, check.Commentf(out)) } func (s *DockerDaemonSuite) TestDaemonRestartWithNames(c *check.C) { testRequires(c, DaemonIsLinux) // Windows does not support links err := s.d.StartWithBusybox() c.Assert(err, check.IsNil) out, err := s.d.Cmd("create", "--name=test", "busybox") c.Assert(err, check.IsNil, check.Commentf(out)) out, err = s.d.Cmd("run", "-d", "--name=test2", "busybox", "top") c.Assert(err, check.IsNil, check.Commentf(out)) test2ID := strings.TrimSpace(out) out, err = s.d.Cmd("run", "-d", "--name=test3", "--link", "test2:abc", "busybox", "top") test3ID := strings.TrimSpace(out) c.Assert(s.d.Restart(), check.IsNil) out, err = s.d.Cmd("create", "--name=test", "busybox") c.Assert(err, check.NotNil, check.Commentf("expected error trying to create container with duplicate name")) // this one is no longer needed, removing simplifies the remainder of the test out, err = s.d.Cmd("rm", "-f", "test") c.Assert(err, check.IsNil, check.Commentf(out)) out, err = s.d.Cmd("ps", "-a", "--no-trunc") c.Assert(err, check.IsNil, check.Commentf(out)) lines := strings.Split(strings.TrimSpace(out), "\n")[1:] test2validated := false test3validated := false for _, line := range lines { fields := strings.Fields(line) names := fields[len(fields)-1] switch fields[0] { case test2ID: c.Assert(names, check.Equals, "test2,test3/abc") test2validated = true case test3ID: c.Assert(names, check.Equals, "test3") test3validated = true } } c.Assert(test2validated, check.Equals, true) c.Assert(test3validated, check.Equals, true) } // TestRunLinksChanged checks that creating a new container with the same name does not update links // this ensures that the old, pre gh#16032 functionality continues on func (s *DockerDaemonSuite) TestRunLinksChanged(c *check.C) { testRequires(c, DaemonIsLinux) // Windows does not support links err := s.d.StartWithBusybox() c.Assert(err, check.IsNil) out, err := s.d.Cmd("run", "-d", "--name=test", "busybox", "top") c.Assert(err, check.IsNil, check.Commentf(out)) out, err = s.d.Cmd("run", "--name=test2", "--link=test:abc", "busybox", "sh", "-c", "ping -c 1 abc") c.Assert(err, check.IsNil, check.Commentf(out)) c.Assert(out, checker.Contains, "1 packets transmitted, 1 packets received") out, err = s.d.Cmd("rm", "-f", "test") c.Assert(err, check.IsNil, check.Commentf(out)) out, err = s.d.Cmd("run", "-d", "--name=test", "busybox", "top") c.Assert(err, check.IsNil, check.Commentf(out)) out, err = s.d.Cmd("start", "-a", "test2") c.Assert(err, check.NotNil, check.Commentf(out)) c.Assert(out, check.Not(checker.Contains), "1 packets transmitted, 1 packets received") err = s.d.Restart() c.Assert(err, check.IsNil) out, err = s.d.Cmd("start", "-a", "test2") c.Assert(err, check.NotNil, check.Commentf(out)) c.Assert(out, check.Not(checker.Contains), "1 packets transmitted, 1 packets received") } docker-1.10.3/integration-cli/docker_cli_diff_test.go000066400000000000000000000050251267010174400226130ustar00rootroot00000000000000package main import ( "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) // ensure that an added file shows up in docker diff func (s *DockerSuite) TestDiffFilenameShownInOutput(c *check.C) { testRequires(c, DaemonIsLinux) containerCmd := `echo foo > /root/bar` out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", containerCmd) cleanCID := strings.TrimSpace(out) out, _ = dockerCmd(c, "diff", cleanCID) found := false for _, line := range strings.Split(out, "\n") { if strings.Contains("A /root/bar", line) { found = true break } } c.Assert(found, checker.True) } // test to ensure GH #3840 doesn't occur any more func (s *DockerSuite) TestDiffEnsureDockerinitFilesAreIgnored(c *check.C) { testRequires(c, DaemonIsLinux) // this is a list of files which shouldn't show up in `docker diff` dockerinitFiles := []string{"/etc/resolv.conf", "/etc/hostname", "/etc/hosts", "/.dockerinit", "/.dockerenv"} containerCount := 5 // we might not run into this problem from the first run, so start a few containers for i := 0; i < containerCount; i++ { containerCmd := `echo foo > /root/bar` out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", containerCmd) cleanCID := strings.TrimSpace(out) out, _ = dockerCmd(c, "diff", cleanCID) for _, filename := range dockerinitFiles { c.Assert(out, checker.Not(checker.Contains), filename) } } } func (s *DockerSuite) TestDiffEnsureOnlyKmsgAndPtmx(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "sleep", "0") cleanCID := strings.TrimSpace(out) out, _ = dockerCmd(c, "diff", cleanCID) expected := map[string]bool{ "C /dev": true, "A /dev/full": true, // busybox "C /dev/ptmx": true, // libcontainer "A /dev/mqueue": true, "A /dev/kmsg": true, "A /dev/fd": true, "A /dev/fuse": true, "A /dev/ptmx": true, "A /dev/null": true, "A /dev/random": true, "A /dev/stdout": true, "A /dev/stderr": true, "A /dev/tty1": true, "A /dev/stdin": true, "A /dev/tty": true, "A /dev/urandom": true, "A /dev/zero": true, } for _, line := range strings.Split(out, "\n") { c.Assert(line == "" || expected[line], checker.True) } } // https://github.com/docker/docker/pull/14381#discussion_r33859347 func (s *DockerSuite) TestDiffEmptyArgClientError(c *check.C) { out, _, err := dockerCmdWithError("diff", "") c.Assert(err, checker.NotNil) c.Assert(strings.TrimSpace(out), checker.Equals, "Container name cannot be empty") } docker-1.10.3/integration-cli/docker_cli_events_test.go000066400000000000000000000517611267010174400232170ustar00rootroot00000000000000package main import ( "bufio" "fmt" "io/ioutil" "net/http" "os" "os/exec" "strconv" "strings" "sync" "time" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (s *DockerSuite) TestEventsTimestampFormats(c *check.C) { testRequires(c, DaemonIsLinux) image := "busybox" // Start stopwatch, generate an event time.Sleep(1 * time.Second) // so that we don't grab events from previous test occured in the same second start := daemonTime(c) dockerCmd(c, "tag", image, "timestamptest:1") dockerCmd(c, "rmi", "timestamptest:1") time.Sleep(1 * time.Second) // so that until > since end := daemonTime(c) // List of available time formats to --since unixTs := func(t time.Time) string { return fmt.Sprintf("%v", t.Unix()) } rfc3339 := func(t time.Time) string { return t.Format(time.RFC3339) } duration := func(t time.Time) string { return time.Now().Sub(t).String() } // --since=$start must contain only the 'untag' event for _, f := range []func(time.Time) string{unixTs, rfc3339, duration} { since, until := f(start), f(end) out, _ := dockerCmd(c, "events", "--since="+since, "--until="+until) events := strings.Split(strings.TrimSpace(out), "\n") c.Assert(events, checker.HasLen, 2, check.Commentf("unexpected events, was expecting only 2 events tag/untag (since=%s, until=%s) out=%s", since, until, out)) c.Assert(out, checker.Contains, "untag", check.Commentf("expected 'untag' event not found (since=%s, until=%s)", since, until)) } } func (s *DockerSuite) TestEventsUntag(c *check.C) { testRequires(c, DaemonIsLinux) image := "busybox" dockerCmd(c, "tag", image, "utest:tag1") dockerCmd(c, "tag", image, "utest:tag2") dockerCmd(c, "rmi", "utest:tag1") dockerCmd(c, "rmi", "utest:tag2") eventsCmd := exec.Command(dockerBinary, "events", "--since=1") out, exitCode, _, err := runCommandWithOutputForDuration(eventsCmd, time.Duration(time.Millisecond*2500)) c.Assert(err, checker.IsNil) c.Assert(exitCode, checker.Equals, 0, check.Commentf("Failed to get events")) events := strings.Split(out, "\n") nEvents := len(events) // The last element after the split above will be an empty string, so we // get the two elements before the last, which are the untags we're // looking for. for _, v := range events[nEvents-3 : nEvents-1] { c.Assert(v, checker.Contains, "untag", check.Commentf("event should be untag")) } } func (s *DockerSuite) TestEventsContainerFailStartDie(c *check.C) { out, _ := dockerCmd(c, "images", "-q") image := strings.Split(out, "\n")[0] _, _, err := dockerCmdWithError("run", "--name", "testeventdie", image, "blerg") c.Assert(err, checker.NotNil, check.Commentf("Container run with command blerg should have failed, but it did not, out=%s", out)) out, _ = dockerCmd(c, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(c).Unix())) events := strings.Split(strings.TrimSpace(out), "\n") nEvents := len(events) c.Assert(nEvents, checker.GreaterOrEqualThan, 1) //Missing expected event actions := eventActionsByIDAndType(c, events, "testeventdie", "container") var startEvent bool var dieEvent bool for _, a := range actions { switch a { case "start": startEvent = true case "die": dieEvent = true } } c.Assert(startEvent, checker.True, check.Commentf("Start event not found: %v\n%v", actions, events)) c.Assert(dieEvent, checker.True, check.Commentf("Die event not found: %v\n%v", actions, events)) } func (s *DockerSuite) TestEventsLimit(c *check.C) { testRequires(c, DaemonIsLinux) var waitGroup sync.WaitGroup errChan := make(chan error, 17) args := []string{"run", "--rm", "busybox", "true"} for i := 0; i < 17; i++ { waitGroup.Add(1) go func() { defer waitGroup.Done() errChan <- exec.Command(dockerBinary, args...).Run() }() } waitGroup.Wait() close(errChan) for err := range errChan { c.Assert(err, checker.IsNil, check.Commentf("%q failed with error", strings.Join(args, " "))) } out, _ := dockerCmd(c, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(c).Unix())) events := strings.Split(out, "\n") nEvents := len(events) - 1 c.Assert(nEvents, checker.Equals, 64, check.Commentf("events should be limited to 64, but received %d", nEvents)) } func (s *DockerSuite) TestEventsContainerEvents(c *check.C) { testRequires(c, DaemonIsLinux) containerID, _ := dockerCmd(c, "run", "--rm", "--name", "container-events-test", "busybox", "true") containerID = strings.TrimSpace(containerID) out, _ := dockerCmd(c, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(c).Unix())) events := strings.Split(out, "\n") events = events[:len(events)-1] nEvents := len(events) c.Assert(nEvents, checker.GreaterOrEqualThan, 5) //Missing expected event containerEvents := eventActionsByIDAndType(c, events, "container-events-test", "container") c.Assert(containerEvents, checker.HasLen, 5, check.Commentf("events: %v", events)) c.Assert(containerEvents[0], checker.Equals, "create", check.Commentf(out)) c.Assert(containerEvents[1], checker.Equals, "attach", check.Commentf(out)) c.Assert(containerEvents[2], checker.Equals, "start", check.Commentf(out)) c.Assert(containerEvents[3], checker.Equals, "die", check.Commentf(out)) c.Assert(containerEvents[4], checker.Equals, "destroy", check.Commentf(out)) } func (s *DockerSuite) TestEventsContainerEventsSinceUnixEpoch(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "--rm", "--name", "since-epoch-test", "busybox", "true") timeBeginning := time.Unix(0, 0).Format(time.RFC3339Nano) timeBeginning = strings.Replace(timeBeginning, "Z", ".000000000Z", -1) out, _ := dockerCmd(c, "events", fmt.Sprintf("--since='%s'", timeBeginning), fmt.Sprintf("--until=%d", daemonTime(c).Unix())) events := strings.Split(out, "\n") events = events[:len(events)-1] nEvents := len(events) c.Assert(nEvents, checker.GreaterOrEqualThan, 5) //Missing expected event containerEvents := eventActionsByIDAndType(c, events, "since-epoch-test", "container") c.Assert(containerEvents, checker.HasLen, 5, check.Commentf("events: %v", events)) c.Assert(containerEvents[0], checker.Equals, "create", check.Commentf(out)) c.Assert(containerEvents[1], checker.Equals, "attach", check.Commentf(out)) c.Assert(containerEvents[2], checker.Equals, "start", check.Commentf(out)) c.Assert(containerEvents[3], checker.Equals, "die", check.Commentf(out)) c.Assert(containerEvents[4], checker.Equals, "destroy", check.Commentf(out)) } func (s *DockerSuite) TestEventsImageTag(c *check.C) { testRequires(c, DaemonIsLinux) time.Sleep(1 * time.Second) // because API has seconds granularity since := daemonTime(c).Unix() image := "testimageevents:tag" dockerCmd(c, "tag", "busybox", image) out, _ := dockerCmd(c, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(c).Unix())) events := strings.Split(strings.TrimSpace(out), "\n") c.Assert(events, checker.HasLen, 1, check.Commentf("was expecting 1 event. out=%s", out)) event := strings.TrimSpace(events[0]) matches := parseEventText(event) c.Assert(matchEventID(matches, image), checker.True, check.Commentf("matches: %v\nout:\n%s", matches, out)) c.Assert(matches["action"], checker.Equals, "tag") } func (s *DockerSuite) TestEventsImagePull(c *check.C) { testRequires(c, DaemonIsLinux) since := daemonTime(c).Unix() testRequires(c, Network) dockerCmd(c, "pull", "hello-world") out, _ := dockerCmd(c, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(c).Unix())) events := strings.Split(strings.TrimSpace(out), "\n") event := strings.TrimSpace(events[len(events)-1]) matches := parseEventText(event) c.Assert(matches["id"], checker.Equals, "hello-world:latest") c.Assert(matches["action"], checker.Equals, "pull") } func (s *DockerSuite) TestEventsImageImport(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "true") cleanedContainerID := strings.TrimSpace(out) since := daemonTime(c).Unix() out, _, err := runCommandPipelineWithOutput( exec.Command(dockerBinary, "export", cleanedContainerID), exec.Command(dockerBinary, "import", "-"), ) c.Assert(err, checker.IsNil, check.Commentf("import failed with output: %q", out)) imageRef := strings.TrimSpace(out) out, _ = dockerCmd(c, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(c).Unix()), "--filter", "event=import") events := strings.Split(strings.TrimSpace(out), "\n") c.Assert(events, checker.HasLen, 1) matches := parseEventText(events[0]) c.Assert(matches["id"], checker.Equals, imageRef, check.Commentf("matches: %v\nout:\n%s\n", matches, out)) c.Assert(matches["action"], checker.Equals, "import", check.Commentf("matches: %v\nout:\n%s\n", matches, out)) } func (s *DockerSuite) TestEventsFilters(c *check.C) { testRequires(c, DaemonIsLinux) since := daemonTime(c).Unix() dockerCmd(c, "run", "--rm", "busybox", "true") dockerCmd(c, "run", "--rm", "busybox", "true") out, _ := dockerCmd(c, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(c).Unix()), "--filter", "event=die") parseEvents(c, out, "die") out, _ = dockerCmd(c, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(c).Unix()), "--filter", "event=die", "--filter", "event=start") parseEvents(c, out, "die|start") // make sure we at least got 2 start events count := strings.Count(out, "start") c.Assert(strings.Count(out, "start"), checker.GreaterOrEqualThan, 2, check.Commentf("should have had 2 start events but had %d, out: %s", count, out)) } func (s *DockerSuite) TestEventsFilterImageName(c *check.C) { testRequires(c, DaemonIsLinux) since := daemonTime(c).Unix() out, _ := dockerCmd(c, "run", "--name", "container_1", "-d", "busybox:latest", "true") container1 := strings.TrimSpace(out) out, _ = dockerCmd(c, "run", "--name", "container_2", "-d", "busybox", "true") container2 := strings.TrimSpace(out) name := "busybox" out, _ = dockerCmd(c, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(c).Unix()), "--filter", fmt.Sprintf("image=%s", name)) events := strings.Split(out, "\n") events = events[:len(events)-1] c.Assert(events, checker.Not(checker.HasLen), 0) //Expected events but found none for the image busybox:latest count1 := 0 count2 := 0 for _, e := range events { if strings.Contains(e, container1) { count1++ } else if strings.Contains(e, container2) { count2++ } } c.Assert(count1, checker.Not(checker.Equals), 0, check.Commentf("Expected event from container but got %d from %s", count1, container1)) c.Assert(count2, checker.Not(checker.Equals), 0, check.Commentf("Expected event from container but got %d from %s", count2, container2)) } func (s *DockerSuite) TestEventsFilterLabels(c *check.C) { testRequires(c, DaemonIsLinux) since := daemonTime(c).Unix() label := "io.docker.testing=foo" out, _ := dockerCmd(c, "run", "-d", "-l", label, "busybox:latest", "true") container1 := strings.TrimSpace(out) out, _ = dockerCmd(c, "run", "-d", "busybox", "true") container2 := strings.TrimSpace(out) out, _ = dockerCmd( c, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(c).Unix()), "--filter", fmt.Sprintf("label=%s", label)) events := strings.Split(strings.TrimSpace(out), "\n") c.Assert(len(events), checker.Equals, 3) for _, e := range events { c.Assert(e, checker.Contains, container1) c.Assert(e, checker.Not(checker.Contains), container2) } } func (s *DockerSuite) TestEventsFilterImageLabels(c *check.C) { testRequires(c, DaemonIsLinux) since := daemonTime(c).Unix() name := "labelfiltertest" label := "io.docker.testing=image" // Build a test image. _, err := buildImage(name, fmt.Sprintf(` FROM busybox:latest LABEL %s`, label), true) c.Assert(err, checker.IsNil, check.Commentf("Couldn't create image")) dockerCmd(c, "tag", name, "labelfiltertest:tag1") dockerCmd(c, "tag", name, "labelfiltertest:tag2") dockerCmd(c, "tag", "busybox:latest", "labelfiltertest:tag3") out, _ := dockerCmd( c, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(c).Unix()), "--filter", fmt.Sprintf("label=%s", label), "--filter", "type=image") events := strings.Split(strings.TrimSpace(out), "\n") // 2 events from the "docker tag" command, another one is from "docker build" c.Assert(events, checker.HasLen, 3, check.Commentf("Events == %s", events)) for _, e := range events { c.Assert(e, checker.Contains, "labelfiltertest") } } func (s *DockerSuite) TestEventsFilterContainer(c *check.C) { testRequires(c, DaemonIsLinux) since := fmt.Sprintf("%d", daemonTime(c).Unix()) nameID := make(map[string]string) for _, name := range []string{"container_1", "container_2"} { dockerCmd(c, "run", "--name", name, "busybox", "true") id, err := inspectField(name, "Id") c.Assert(err, checker.IsNil) nameID[name] = id } until := fmt.Sprintf("%d", daemonTime(c).Unix()) checkEvents := func(id string, events []string) error { if len(events) != 4 { // create, attach, start, die return fmt.Errorf("expected 4 events, got %v", events) } for _, event := range events { matches := parseEventText(event) if !matchEventID(matches, id) { return fmt.Errorf("expected event for container id %s: %s - parsed container id: %s", id, event, matches["id"]) } } return nil } for name, ID := range nameID { // filter by names out, _ := dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "container="+name) events := strings.Split(strings.TrimSuffix(out, "\n"), "\n") c.Assert(checkEvents(ID, events), checker.IsNil) // filter by ID's out, _ = dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "container="+ID) events = strings.Split(strings.TrimSuffix(out, "\n"), "\n") c.Assert(checkEvents(ID, events), checker.IsNil) } } func (s *DockerSuite) TestEventsCommit(c *check.C) { testRequires(c, DaemonIsLinux) since := daemonTime(c).Unix() out, _ := dockerCmd(c, "run", "-d", "busybox", "top") cID := strings.TrimSpace(out) c.Assert(waitRun(cID), checker.IsNil) dockerCmd(c, "commit", "-m", "test", cID) dockerCmd(c, "stop", cID) out, _ = dockerCmd(c, "events", "--since=0", "-f", "container="+cID, "--until="+strconv.Itoa(int(since))) c.Assert(out, checker.Contains, "commit", check.Commentf("Missing 'commit' log event")) } func (s *DockerSuite) TestEventsCopy(c *check.C) { testRequires(c, DaemonIsLinux) since := daemonTime(c).Unix() // Build a test image. id, err := buildImage("cpimg", ` FROM busybox RUN echo HI > /tmp/file`, true) c.Assert(err, checker.IsNil, check.Commentf("Couldn't create image")) // Create an empty test file. tempFile, err := ioutil.TempFile("", "test-events-copy-") c.Assert(err, checker.IsNil) defer os.Remove(tempFile.Name()) c.Assert(tempFile.Close(), checker.IsNil) dockerCmd(c, "create", "--name=cptest", id) dockerCmd(c, "cp", "cptest:/tmp/file", tempFile.Name()) out, _ := dockerCmd(c, "events", "--since=0", "-f", "container=cptest", "--until="+strconv.Itoa(int(since))) c.Assert(out, checker.Contains, "archive-path", check.Commentf("Missing 'archive-path' log event\n")) dockerCmd(c, "cp", tempFile.Name(), "cptest:/tmp/filecopy") out, _ = dockerCmd(c, "events", "--since=0", "-f", "container=cptest", "--until="+strconv.Itoa(int(since))) c.Assert(out, checker.Contains, "extract-to-dir", check.Commentf("Missing 'extract-to-dir' log event")) } func (s *DockerSuite) TestEventsResize(c *check.C) { testRequires(c, DaemonIsLinux) since := daemonTime(c).Unix() out, _ := dockerCmd(c, "run", "-d", "busybox", "top") cID := strings.TrimSpace(out) c.Assert(waitRun(cID), checker.IsNil) endpoint := "/containers/" + cID + "/resize?h=80&w=24" status, _, err := sockRequest("POST", endpoint, nil) c.Assert(status, checker.Equals, http.StatusOK) c.Assert(err, checker.IsNil) dockerCmd(c, "stop", cID) out, _ = dockerCmd(c, "events", "--since=0", "-f", "container="+cID, "--until="+strconv.Itoa(int(since))) c.Assert(out, checker.Contains, "resize", check.Commentf("Missing 'resize' log event")) } func (s *DockerSuite) TestEventsAttach(c *check.C) { testRequires(c, DaemonIsLinux) since := daemonTime(c).Unix() out, _ := dockerCmd(c, "run", "-di", "busybox", "/bin/cat") cID := strings.TrimSpace(out) cmd := exec.Command(dockerBinary, "attach", cID) stdin, err := cmd.StdinPipe() c.Assert(err, checker.IsNil) defer stdin.Close() stdout, err := cmd.StdoutPipe() c.Assert(err, checker.IsNil) defer stdout.Close() c.Assert(cmd.Start(), checker.IsNil) defer cmd.Process.Kill() // Make sure we're done attaching by writing/reading some stuff _, err = stdin.Write([]byte("hello\n")) c.Assert(err, checker.IsNil) out, err = bufio.NewReader(stdout).ReadString('\n') c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Equals, "hello", check.Commentf("expected 'hello'")) c.Assert(stdin.Close(), checker.IsNil) dockerCmd(c, "stop", cID) out, _ = dockerCmd(c, "events", "--since=0", "-f", "container="+cID, "--until="+strconv.Itoa(int(since))) c.Assert(out, checker.Contains, "attach", check.Commentf("Missing 'attach' log event")) } func (s *DockerSuite) TestEventsRename(c *check.C) { testRequires(c, DaemonIsLinux) since := daemonTime(c).Unix() dockerCmd(c, "run", "--name", "oldName", "busybox", "true") dockerCmd(c, "rename", "oldName", "newName") out, _ := dockerCmd(c, "events", "--since=0", "-f", "container=newName", "--until="+strconv.Itoa(int(since))) c.Assert(out, checker.Contains, "rename", check.Commentf("Missing 'rename' log event\n")) } func (s *DockerSuite) TestEventsTop(c *check.C) { testRequires(c, DaemonIsLinux) since := daemonTime(c).Unix() out, _ := dockerCmd(c, "run", "-d", "busybox", "top") cID := strings.TrimSpace(out) c.Assert(waitRun(cID), checker.IsNil) dockerCmd(c, "top", cID) dockerCmd(c, "stop", cID) out, _ = dockerCmd(c, "events", "--since=0", "-f", "container="+cID, "--until="+strconv.Itoa(int(since))) c.Assert(out, checker.Contains, " top", check.Commentf("Missing 'top' log event")) } // #13753 func (s *DockerSuite) TestEventsDefaultEmpty(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "busybox") out, _ := dockerCmd(c, "events", fmt.Sprintf("--until=%d", daemonTime(c).Unix())) c.Assert(strings.TrimSpace(out), checker.Equals, "") } // #14316 func (s *DockerRegistrySuite) TestEventsImageFilterPush(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, Network) since := daemonTime(c).Unix() repoName := fmt.Sprintf("%v/dockercli/testf", privateRegistryURL) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") cID := strings.TrimSpace(out) c.Assert(waitRun(cID), checker.IsNil) dockerCmd(c, "commit", cID, repoName) dockerCmd(c, "stop", cID) dockerCmd(c, "push", repoName) out, _ = dockerCmd(c, "events", "--since=0", "-f", "image="+repoName, "-f", "event=push", "--until="+strconv.Itoa(int(since))) c.Assert(out, checker.Contains, repoName, check.Commentf("Missing 'push' log event for %s", repoName)) } func (s *DockerSuite) TestEventsFilterType(c *check.C) { testRequires(c, DaemonIsLinux) since := daemonTime(c).Unix() name := "labelfiltertest" label := "io.docker.testing=image" // Build a test image. _, err := buildImage(name, fmt.Sprintf(` FROM busybox:latest LABEL %s`, label), true) c.Assert(err, checker.IsNil, check.Commentf("Couldn't create image")) dockerCmd(c, "tag", name, "labelfiltertest:tag1") dockerCmd(c, "tag", name, "labelfiltertest:tag2") dockerCmd(c, "tag", "busybox:latest", "labelfiltertest:tag3") out, _ := dockerCmd( c, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(c).Unix()), "--filter", fmt.Sprintf("label=%s", label), "--filter", "type=image") events := strings.Split(strings.TrimSpace(out), "\n") // 2 events from the "docker tag" command, another one is from "docker build" c.Assert(events, checker.HasLen, 3, check.Commentf("Events == %s", events)) for _, e := range events { c.Assert(e, checker.Contains, "labelfiltertest") } out, _ = dockerCmd( c, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(c).Unix()), "--filter", fmt.Sprintf("label=%s", label), "--filter", "type=container") events = strings.Split(strings.TrimSpace(out), "\n") // Events generated by the container that builds the image c.Assert(events, checker.HasLen, 3, check.Commentf("Events == %s", events)) out, _ = dockerCmd( c, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(c).Unix()), "--filter", "type=network") events = strings.Split(strings.TrimSpace(out), "\n") c.Assert(len(events), checker.GreaterOrEqualThan, 1, check.Commentf("Events == %s", events)) } func (s *DockerSuite) TestEventsFilterImageInContainerAction(c *check.C) { testRequires(c, DaemonIsLinux) since := daemonTime(c).Unix() dockerCmd(c, "run", "--name", "test-container", "-d", "busybox", "true") waitRun("test-container") out, _ := dockerCmd(c, "events", "--filter", "image=busybox", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(c).Unix())) events := strings.Split(strings.TrimSpace(out), "\n") c.Assert(len(events), checker.GreaterThan, 1, check.Commentf(out)) } docker-1.10.3/integration-cli/docker_cli_events_unix_test.go000066400000000000000000000276161267010174400242640ustar00rootroot00000000000000// +build !windows package main import ( "bufio" "fmt" "io/ioutil" "os" "os/exec" "strings" "time" "unicode" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" "github.com/kr/pty" ) // #5979 func (s *DockerSuite) TestEventsRedirectStdout(c *check.C) { since := daemonTime(c).Unix() dockerCmd(c, "run", "busybox", "true") file, err := ioutil.TempFile("", "") c.Assert(err, checker.IsNil, check.Commentf("could not create temp file")) defer os.Remove(file.Name()) command := fmt.Sprintf("%s events --since=%d --until=%d > %s", dockerBinary, since, daemonTime(c).Unix(), file.Name()) _, tty, err := pty.Open() c.Assert(err, checker.IsNil, check.Commentf("Could not open pty")) cmd := exec.Command("sh", "-c", command) cmd.Stdin = tty cmd.Stdout = tty cmd.Stderr = tty c.Assert(cmd.Run(), checker.IsNil, check.Commentf("run err for command %q", command)) scanner := bufio.NewScanner(file) for scanner.Scan() { for _, ch := range scanner.Text() { c.Assert(unicode.IsControl(ch), checker.False, check.Commentf("found control character %v", []byte(string(ch)))) } } c.Assert(scanner.Err(), checker.IsNil, check.Commentf("Scan err for command %q", command)) } func (s *DockerSuite) TestEventsOOMDisableFalse(c *check.C) { testRequires(c, DaemonIsLinux, oomControl, memoryLimitSupport, NotGCCGO) errChan := make(chan error) go func() { defer close(errChan) out, exitCode, _ := dockerCmdWithError("run", "--name", "oomFalse", "-m", "10MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") if expected := 137; exitCode != expected { errChan <- fmt.Errorf("wrong exit code for OOM container: expected %d, got %d (output: %q)", expected, exitCode, out) } }() select { case err := <-errChan: c.Assert(err, checker.IsNil) case <-time.After(30 * time.Second): c.Fatal("Timeout waiting for container to die on OOM") } out, _ := dockerCmd(c, "events", "--since=0", "-f", "container=oomFalse", fmt.Sprintf("--until=%d", daemonTime(c).Unix())) events := strings.Split(strings.TrimSuffix(out, "\n"), "\n") nEvents := len(events) c.Assert(nEvents, checker.GreaterOrEqualThan, 5) //Missing expected event c.Assert(parseEventAction(c, events[nEvents-5]), checker.Equals, "create") c.Assert(parseEventAction(c, events[nEvents-4]), checker.Equals, "attach") c.Assert(parseEventAction(c, events[nEvents-3]), checker.Equals, "start") c.Assert(parseEventAction(c, events[nEvents-2]), checker.Equals, "oom") c.Assert(parseEventAction(c, events[nEvents-1]), checker.Equals, "die") } func (s *DockerSuite) TestEventsOOMDisableTrue(c *check.C) { testRequires(c, DaemonIsLinux, oomControl, memoryLimitSupport, NotGCCGO) errChan := make(chan error) go func() { defer close(errChan) out, exitCode, _ := dockerCmdWithError("run", "--oom-kill-disable=true", "--name", "oomTrue", "-m", "10MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") if expected := 137; exitCode != expected { errChan <- fmt.Errorf("wrong exit code for OOM container: expected %d, got %d (output: %q)", expected, exitCode, out) } }() select { case err := <-errChan: c.Assert(err, checker.IsNil) case <-time.After(20 * time.Second): defer dockerCmd(c, "kill", "oomTrue") out, _ := dockerCmd(c, "events", "--since=0", "-f", "container=oomTrue", fmt.Sprintf("--until=%d", daemonTime(c).Unix())) events := strings.Split(strings.TrimSuffix(out, "\n"), "\n") nEvents := len(events) c.Assert(nEvents, checker.GreaterOrEqualThan, 4) //Missing expected event c.Assert(parseEventAction(c, events[nEvents-4]), checker.Equals, "create") c.Assert(parseEventAction(c, events[nEvents-3]), checker.Equals, "attach") c.Assert(parseEventAction(c, events[nEvents-2]), checker.Equals, "start") c.Assert(parseEventAction(c, events[nEvents-1]), checker.Equals, "oom") out, _ = dockerCmd(c, "inspect", "-f", "{{.State.Status}}", "oomTrue") c.Assert(strings.TrimSpace(out), checker.Equals, "running", check.Commentf("container should be still running")) } } // #18453 func (s *DockerSuite) TestEventsContainerFilterByName(c *check.C) { testRequires(c, DaemonIsLinux) cOut, _ := dockerCmd(c, "run", "--name=foo", "-d", "busybox", "top") c1 := strings.TrimSpace(cOut) waitRun("foo") cOut, _ = dockerCmd(c, "run", "--name=bar", "-d", "busybox", "top") c2 := strings.TrimSpace(cOut) waitRun("bar") out, _ := dockerCmd(c, "events", "-f", "container=foo", "--since=0", fmt.Sprintf("--until=%d", daemonTime(c).Unix())) c.Assert(out, checker.Contains, c1, check.Commentf(out)) c.Assert(out, checker.Not(checker.Contains), c2, check.Commentf(out)) } // #18453 func (s *DockerSuite) TestEventsContainerFilterBeforeCreate(c *check.C) { testRequires(c, DaemonIsLinux) var ( out string ch chan struct{} ) ch = make(chan struct{}) // calculate the time it takes to create and start a container and sleep 2 seconds // this is to make sure the docker event will recevie the event of container since := daemonTime(c).Unix() id, _ := dockerCmd(c, "run", "-d", "busybox", "top") cID := strings.TrimSpace(id) waitRun(cID) time.Sleep(2 * time.Second) duration := daemonTime(c).Unix() - since go func() { out, _ = dockerCmd(c, "events", "-f", "container=foo", "--since=0", fmt.Sprintf("--until=%d", daemonTime(c).Unix()+2*duration)) close(ch) }() // Sleep 2 second to wait docker event to start time.Sleep(2 * time.Second) id, _ = dockerCmd(c, "run", "--name=foo", "-d", "busybox", "top") cID = strings.TrimSpace(id) waitRun(cID) <-ch c.Assert(out, checker.Contains, cID, check.Commentf("Missing event of container (foo)")) } func (s *DockerSuite) TestVolumeEvents(c *check.C) { testRequires(c, DaemonIsLinux) since := daemonTime(c).Unix() // Observe create/mount volume actions dockerCmd(c, "volume", "create", "--name", "test-event-volume-local") dockerCmd(c, "run", "--name", "test-volume-container", "--volume", "test-event-volume-local:/foo", "-d", "busybox", "true") waitRun("test-volume-container") // Observe unmount/destroy volume actions dockerCmd(c, "rm", "-f", "test-volume-container") dockerCmd(c, "volume", "rm", "test-event-volume-local") out, _ := dockerCmd(c, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(c).Unix())) events := strings.Split(strings.TrimSpace(out), "\n") c.Assert(len(events), checker.GreaterThan, 4) volumeEvents := eventActionsByIDAndType(c, events, "test-event-volume-local", "volume") c.Assert(volumeEvents, checker.HasLen, 4) c.Assert(volumeEvents[0], checker.Equals, "create") c.Assert(volumeEvents[1], checker.Equals, "mount") c.Assert(volumeEvents[2], checker.Equals, "unmount") c.Assert(volumeEvents[3], checker.Equals, "destroy") } func (s *DockerSuite) TestNetworkEvents(c *check.C) { testRequires(c, DaemonIsLinux) since := daemonTime(c).Unix() // Observe create/connect network actions dockerCmd(c, "network", "create", "test-event-network-local") dockerCmd(c, "run", "--name", "test-network-container", "--net", "test-event-network-local", "-d", "busybox", "true") waitRun("test-network-container") // Observe disconnect/destroy network actions dockerCmd(c, "rm", "-f", "test-network-container") dockerCmd(c, "network", "rm", "test-event-network-local") out, _ := dockerCmd(c, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(c).Unix())) events := strings.Split(strings.TrimSpace(out), "\n") c.Assert(len(events), checker.GreaterThan, 4) netEvents := eventActionsByIDAndType(c, events, "test-event-network-local", "network") c.Assert(netEvents, checker.HasLen, 4) c.Assert(netEvents[0], checker.Equals, "create") c.Assert(netEvents[1], checker.Equals, "connect") c.Assert(netEvents[2], checker.Equals, "disconnect") c.Assert(netEvents[3], checker.Equals, "destroy") } func (s *DockerSuite) TestEventsStreaming(c *check.C) { testRequires(c, DaemonIsLinux) observer, err := newEventObserver(c) c.Assert(err, checker.IsNil) err = observer.Start() c.Assert(err, checker.IsNil) defer observer.Stop() out, _ := dockerCmd(c, "run", "-d", "busybox:latest", "true") containerID := strings.TrimSpace(out) testActions := map[string]chan bool{ "create": make(chan bool), "start": make(chan bool), "die": make(chan bool), "destroy": make(chan bool), } matcher := matchEventLine(containerID, "container", testActions) processor := processEventMatch(testActions) go observer.Match(matcher, processor) select { case <-time.After(5 * time.Second): observer.CheckEventError(c, containerID, "create", matcher) case <-testActions["create"]: // ignore, done } select { case <-time.After(5 * time.Second): observer.CheckEventError(c, containerID, "start", matcher) case <-testActions["start"]: // ignore, done } select { case <-time.After(5 * time.Second): observer.CheckEventError(c, containerID, "die", matcher) case <-testActions["die"]: // ignore, done } dockerCmd(c, "rm", containerID) select { case <-time.After(5 * time.Second): observer.CheckEventError(c, containerID, "destroy", matcher) case <-testActions["destroy"]: // ignore, done } } func (s *DockerSuite) TestEventsImageUntagDelete(c *check.C) { testRequires(c, DaemonIsLinux) observer, err := newEventObserver(c) c.Assert(err, checker.IsNil) err = observer.Start() c.Assert(err, checker.IsNil) defer observer.Stop() name := "testimageevents" imageID, err := buildImage(name, `FROM scratch MAINTAINER "docker"`, true) c.Assert(err, checker.IsNil) c.Assert(deleteImages(name), checker.IsNil) testActions := map[string]chan bool{ "untag": make(chan bool), "delete": make(chan bool), } matcher := matchEventLine(imageID, "image", testActions) processor := processEventMatch(testActions) go observer.Match(matcher, processor) select { case <-time.After(10 * time.Second): observer.CheckEventError(c, imageID, "untag", matcher) case <-testActions["untag"]: // ignore, done } select { case <-time.After(10 * time.Second): observer.CheckEventError(c, imageID, "delete", matcher) case <-testActions["delete"]: // ignore, done } } func (s *DockerSuite) TestEventsFilterVolumeAndNetworkType(c *check.C) { testRequires(c, DaemonIsLinux) since := daemonTime(c).Unix() dockerCmd(c, "network", "create", "test-event-network-type") dockerCmd(c, "volume", "create", "--name", "test-event-volume-type") out, _ := dockerCmd(c, "events", "--filter", "type=volume", "--filter", "type=network", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(c).Unix())) events := strings.Split(strings.TrimSpace(out), "\n") c.Assert(len(events), checker.GreaterOrEqualThan, 2, check.Commentf(out)) networkActions := eventActionsByIDAndType(c, events, "test-event-network-type", "network") volumeActions := eventActionsByIDAndType(c, events, "test-event-volume-type", "volume") c.Assert(volumeActions[0], checker.Equals, "create") c.Assert(networkActions[0], checker.Equals, "create") } func (s *DockerSuite) TestEventsFilterVolumeID(c *check.C) { testRequires(c, DaemonIsLinux) since := daemonTime(c).Unix() dockerCmd(c, "volume", "create", "--name", "test-event-volume-id") out, _ := dockerCmd(c, "events", "--filter", "volume=test-event-volume-id", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(c).Unix())) events := strings.Split(strings.TrimSpace(out), "\n") c.Assert(events, checker.HasLen, 1) c.Assert(events[0], checker.Contains, "test-event-volume-id") c.Assert(events[0], checker.Contains, "driver=local") } func (s *DockerSuite) TestEventsFilterNetworkID(c *check.C) { testRequires(c, DaemonIsLinux) since := daemonTime(c).Unix() dockerCmd(c, "network", "create", "test-event-network-local") out, _ := dockerCmd(c, "events", "--filter", "network=test-event-network-local", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(c).Unix())) events := strings.Split(strings.TrimSpace(out), "\n") c.Assert(events, checker.HasLen, 1) c.Assert(events[0], checker.Contains, "test-event-network-local") c.Assert(events[0], checker.Contains, "type=bridge") } docker-1.10.3/integration-cli/docker_cli_exec_test.go000066400000000000000000000427241267010174400226360ustar00rootroot00000000000000// +build !test_no_exec package main import ( "bufio" "fmt" "net/http" "os" "os/exec" "path/filepath" "reflect" "sort" "strings" "sync" "time" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (s *DockerSuite) TestExec(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && top") out, _ := dockerCmd(c, "exec", "testing", "cat", "/tmp/file") out = strings.Trim(out, "\r\n") c.Assert(out, checker.Equals, "test") } func (s *DockerSuite) TestExecInteractive(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && top") execCmd := exec.Command(dockerBinary, "exec", "-i", "testing", "sh") stdin, err := execCmd.StdinPipe() c.Assert(err, checker.IsNil) stdout, err := execCmd.StdoutPipe() c.Assert(err, checker.IsNil) err = execCmd.Start() c.Assert(err, checker.IsNil) _, err = stdin.Write([]byte("cat /tmp/file\n")) c.Assert(err, checker.IsNil) r := bufio.NewReader(stdout) line, err := r.ReadString('\n') c.Assert(err, checker.IsNil) line = strings.TrimSpace(line) c.Assert(line, checker.Equals, "test") err = stdin.Close() c.Assert(err, checker.IsNil) errChan := make(chan error) go func() { errChan <- execCmd.Wait() close(errChan) }() select { case err := <-errChan: c.Assert(err, checker.IsNil) case <-time.After(1 * time.Second): c.Fatal("docker exec failed to exit on stdin close") } } func (s *DockerSuite) TestExecAfterContainerRestart(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") cleanedContainerID := strings.TrimSpace(out) c.Assert(waitRun(cleanedContainerID), check.IsNil) dockerCmd(c, "restart", cleanedContainerID) c.Assert(waitRun(cleanedContainerID), check.IsNil) out, _ = dockerCmd(c, "exec", cleanedContainerID, "echo", "hello") outStr := strings.TrimSpace(out) c.Assert(outStr, checker.Equals, "hello") } func (s *DockerDaemonSuite) TestExecAfterDaemonRestart(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, SameHostDaemon) err := s.d.StartWithBusybox() c.Assert(err, checker.IsNil) out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top") c.Assert(err, checker.IsNil, check.Commentf("Could not run top: %s", out)) err = s.d.Restart() c.Assert(err, checker.IsNil, check.Commentf("Could not restart daemon")) out, err = s.d.Cmd("start", "top") c.Assert(err, checker.IsNil, check.Commentf("Could not start top after daemon restart: %s", out)) out, err = s.d.Cmd("exec", "top", "echo", "hello") c.Assert(err, checker.IsNil, check.Commentf("Could not exec on container top: %s", out)) outStr := strings.TrimSpace(string(out)) c.Assert(outStr, checker.Equals, "hello") } // Regression test for #9155, #9044 func (s *DockerSuite) TestExecEnv(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-e", "LALA=value1", "-e", "LALA=value2", "-d", "--name", "testing", "busybox", "top") c.Assert(waitRun("testing"), check.IsNil) out, _ := dockerCmd(c, "exec", "testing", "env") c.Assert(out, checker.Not(checker.Contains), "LALA=value1") c.Assert(out, checker.Contains, "LALA=value2") c.Assert(out, checker.Contains, "HOME=/root") } func (s *DockerSuite) TestExecExitStatus(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-d", "--name", "top", "busybox", "top") // Test normal (non-detached) case first cmd := exec.Command(dockerBinary, "exec", "top", "sh", "-c", "exit 23") ec, _ := runCommand(cmd) c.Assert(ec, checker.Equals, 23) } func (s *DockerSuite) TestExecPausedContainer(c *check.C) { testRequires(c, DaemonIsLinux) defer unpauseAllContainers() out, _ := dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top") ContainerID := strings.TrimSpace(out) dockerCmd(c, "pause", "testing") out, _, err := dockerCmdWithError("exec", "-i", "-t", ContainerID, "echo", "hello") c.Assert(err, checker.NotNil, check.Commentf("container should fail to exec new conmmand if it is paused")) expected := ContainerID + " is paused, unpause the container before exec" c.Assert(out, checker.Contains, expected, check.Commentf("container should not exec new command if it is paused")) } // regression test for #9476 func (s *DockerSuite) TestExecTtyCloseStdin(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-d", "-it", "--name", "exec_tty_stdin", "busybox") cmd := exec.Command(dockerBinary, "exec", "-i", "exec_tty_stdin", "cat") stdinRw, err := cmd.StdinPipe() c.Assert(err, checker.IsNil) stdinRw.Write([]byte("test")) stdinRw.Close() out, _, err := runCommandWithOutput(cmd) c.Assert(err, checker.IsNil, check.Commentf(out)) out, _ = dockerCmd(c, "top", "exec_tty_stdin") outArr := strings.Split(out, "\n") c.Assert(len(outArr), checker.LessOrEqualThan, 3, check.Commentf("exec process left running")) c.Assert(out, checker.Not(checker.Contains), "nsenter-exec") } func (s *DockerSuite) TestExecTtyWithoutStdin(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "-ti", "busybox") id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) errChan := make(chan error) go func() { defer close(errChan) cmd := exec.Command(dockerBinary, "exec", "-ti", id, "true") if _, err := cmd.StdinPipe(); err != nil { errChan <- err return } expected := "cannot enable tty mode" if out, _, err := runCommandWithOutput(cmd); err == nil { errChan <- fmt.Errorf("exec should have failed") return } else if !strings.Contains(out, expected) { errChan <- fmt.Errorf("exec failed with error %q: expected %q", out, expected) return } }() select { case err := <-errChan: c.Assert(err, check.IsNil) case <-time.After(3 * time.Second): c.Fatal("exec is running but should have failed") } } func (s *DockerSuite) TestExecParseError(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-d", "--name", "top", "busybox", "top") // Test normal (non-detached) case first cmd := exec.Command(dockerBinary, "exec", "top") _, stderr, _, err := runCommandWithStdoutStderr(cmd) c.Assert(err, checker.NotNil) c.Assert(stderr, checker.Contains, "See '"+dockerBinary+" exec --help'") } func (s *DockerSuite) TestExecStopNotHanging(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top") err := exec.Command(dockerBinary, "exec", "testing", "top").Start() c.Assert(err, checker.IsNil) type dstop struct { out []byte err error } ch := make(chan dstop) go func() { out, err := exec.Command(dockerBinary, "stop", "testing").CombinedOutput() ch <- dstop{out, err} close(ch) }() select { case <-time.After(3 * time.Second): c.Fatal("Container stop timed out") case s := <-ch: c.Assert(s.err, check.IsNil) } } func (s *DockerSuite) TestExecCgroup(c *check.C) { testRequires(c, NotUserNamespace) testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top") out, _ := dockerCmd(c, "exec", "testing", "cat", "/proc/1/cgroup") containerCgroups := sort.StringSlice(strings.Split(out, "\n")) var wg sync.WaitGroup var mu sync.Mutex execCgroups := []sort.StringSlice{} errChan := make(chan error) // exec a few times concurrently to get consistent failure for i := 0; i < 5; i++ { wg.Add(1) go func() { out, _, err := dockerCmdWithError("exec", "testing", "cat", "/proc/self/cgroup") if err != nil { errChan <- err return } cg := sort.StringSlice(strings.Split(out, "\n")) mu.Lock() execCgroups = append(execCgroups, cg) mu.Unlock() wg.Done() }() } wg.Wait() close(errChan) for err := range errChan { c.Assert(err, checker.IsNil) } for _, cg := range execCgroups { if !reflect.DeepEqual(cg, containerCgroups) { fmt.Println("exec cgroups:") for _, name := range cg { fmt.Printf(" %s\n", name) } fmt.Println("container cgroups:") for _, name := range containerCgroups { fmt.Printf(" %s\n", name) } c.Fatal("cgroups mismatched") } } } func (s *DockerSuite) TestInspectExecID(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") id := strings.TrimSuffix(out, "\n") out, err := inspectField(id, "ExecIDs") c.Assert(err, checker.IsNil, check.Commentf("failed to inspect container: %s", out)) c.Assert(out, checker.Equals, "[]", check.Commentf("ExecIDs should be empty, got: %s", out)) // Start an exec, have it block waiting so we can do some checking cmd := exec.Command(dockerBinary, "exec", id, "sh", "-c", "while ! test -e /tmp/execid1; do sleep 1; done") err = cmd.Start() c.Assert(err, checker.IsNil, check.Commentf("failed to start the exec cmd")) // Give the exec 10 chances/seconds to start then give up and stop the test tries := 10 for i := 0; i < tries; i++ { // Since its still running we should see exec as part of the container out, err = inspectField(id, "ExecIDs") c.Assert(err, checker.IsNil, check.Commentf("failed to inspect container: %s", out)) out = strings.TrimSuffix(out, "\n") if out != "[]" && out != "" { break } c.Assert(i+1, checker.Not(checker.Equals), tries, check.Commentf("ExecIDs should be empty, got: %s", out)) time.Sleep(1 * time.Second) } // Save execID for later execID, err := inspectFilter(id, "index .ExecIDs 0") c.Assert(err, checker.IsNil, check.Commentf("failed to get the exec id")) // End the exec by creating the missing file err = exec.Command(dockerBinary, "exec", id, "sh", "-c", "touch /tmp/execid1").Run() c.Assert(err, checker.IsNil, check.Commentf("failed to run the 2nd exec cmd")) // Wait for 1st exec to complete cmd.Wait() // All execs for the container should be gone now out, err = inspectField(id, "ExecIDs") c.Assert(err, checker.IsNil, check.Commentf("failed to inspect container: %s", out)) out = strings.TrimSuffix(out, "\n") c.Assert(out == "[]" || out == "", checker.True) // But we should still be able to query the execID sc, body, err := sockRequest("GET", "/exec/"+execID+"/json", nil) c.Assert(sc, checker.Equals, http.StatusOK, check.Commentf("received status != 200 OK: %d\n%s", sc, body)) // Now delete the container and then an 'inspect' on the exec should // result in a 404 (not 'container not running') out, ec := dockerCmd(c, "rm", "-f", id) c.Assert(ec, checker.Equals, 0, check.Commentf("error removing container: %s", out)) sc, body, err = sockRequest("GET", "/exec/"+execID+"/json", nil) c.Assert(sc, checker.Equals, http.StatusNotFound, check.Commentf("received status != 404: %d\n%s", sc, body)) } func (s *DockerSuite) TestLinksPingLinkedContainersOnRename(c *check.C) { testRequires(c, DaemonIsLinux) var out string out, _ = dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") idA := strings.TrimSpace(out) c.Assert(idA, checker.Not(checker.Equals), "", check.Commentf("%s, id should not be nil", out)) out, _ = dockerCmd(c, "run", "-d", "--link", "container1:alias1", "--name", "container2", "busybox", "top") idB := strings.TrimSpace(out) c.Assert(idB, checker.Not(checker.Equals), "", check.Commentf("%s, id should not be nil", out)) dockerCmd(c, "exec", "container2", "ping", "-c", "1", "alias1", "-W", "1") dockerCmd(c, "rename", "container1", "container_new") dockerCmd(c, "exec", "container2", "ping", "-c", "1", "alias1", "-W", "1") } func (s *DockerSuite) TestRunExecDir(c *check.C) { testRequires(c, SameHostDaemon, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") id := strings.TrimSpace(out) execDir := filepath.Join(execDriverPath, id) stateFile := filepath.Join(execDir, "state.json") { fi, err := os.Stat(execDir) c.Assert(err, checker.IsNil) if !fi.IsDir() { c.Fatalf("%q must be a directory", execDir) } fi, err = os.Stat(stateFile) c.Assert(err, checker.IsNil) } dockerCmd(c, "stop", id) { _, err := os.Stat(execDir) c.Assert(err, checker.NotNil) c.Assert(err, checker.NotNil, check.Commentf("Exec directory %q exists for removed container!", execDir)) if !os.IsNotExist(err) { c.Fatalf("Error should be about non-existing, got %s", err) } } dockerCmd(c, "start", id) { fi, err := os.Stat(execDir) c.Assert(err, checker.IsNil) if !fi.IsDir() { c.Fatalf("%q must be a directory", execDir) } fi, err = os.Stat(stateFile) c.Assert(err, checker.IsNil) } dockerCmd(c, "rm", "-f", id) { _, err := os.Stat(execDir) c.Assert(err, checker.NotNil, check.Commentf("Exec directory %q exists for removed container!", execDir)) if !os.IsNotExist(err) { c.Fatalf("Error should be about non-existing, got %s", err) } } } func (s *DockerSuite) TestRunMutableNetworkFiles(c *check.C) { testRequires(c, SameHostDaemon, DaemonIsLinux) for _, fn := range []string{"resolv.conf", "hosts"} { deleteAllContainers() content, err := runCommandAndReadContainerFile(fn, exec.Command(dockerBinary, "run", "-d", "--name", "c1", "busybox", "sh", "-c", fmt.Sprintf("echo success >/etc/%s && top", fn))) c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(string(content)), checker.Equals, "success", check.Commentf("Content was not what was modified in the container", string(content))) out, _ := dockerCmd(c, "run", "-d", "--name", "c2", "busybox", "top") contID := strings.TrimSpace(out) netFilePath := containerStorageFile(contID, fn) f, err := os.OpenFile(netFilePath, os.O_WRONLY|os.O_SYNC|os.O_APPEND, 0644) c.Assert(err, checker.IsNil) if _, err := f.Seek(0, 0); err != nil { f.Close() c.Fatal(err) } if err := f.Truncate(0); err != nil { f.Close() c.Fatal(err) } if _, err := f.Write([]byte("success2\n")); err != nil { f.Close() c.Fatal(err) } f.Close() res, _ := dockerCmd(c, "exec", contID, "cat", "/etc/"+fn) c.Assert(res, checker.Equals, "success2\n") } } func (s *DockerSuite) TestExecWithUser(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top") out, _ := dockerCmd(c, "exec", "-u", "1", "parent", "id") c.Assert(out, checker.Contains, "uid=1(daemon) gid=1(daemon)") out, _ = dockerCmd(c, "exec", "-u", "root", "parent", "id") c.Assert(out, checker.Contains, "uid=0(root) gid=0(root)", check.Commentf("exec with user by id expected daemon user got %s", out)) } func (s *DockerSuite) TestExecWithPrivileged(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) // Start main loop which attempts mknod repeatedly dockerCmd(c, "run", "-d", "--name", "parent", "--cap-drop=ALL", "busybox", "sh", "-c", `while (true); do if [ -e /exec_priv ]; then cat /exec_priv && mknod /tmp/sda b 8 0 && echo "Success"; else echo "Privileged exec has not run yet"; fi; usleep 10000; done`) // Check exec mknod doesn't work cmd := exec.Command(dockerBinary, "exec", "parent", "sh", "-c", "mknod /tmp/sdb b 8 16") out, _, err := runCommandWithOutput(cmd) c.Assert(err, checker.NotNil, check.Commentf("exec mknod in --cap-drop=ALL container without --privileged should fail")) c.Assert(out, checker.Contains, "Operation not permitted", check.Commentf("exec mknod in --cap-drop=ALL container without --privileged should fail")) // Check exec mknod does work with --privileged cmd = exec.Command(dockerBinary, "exec", "--privileged", "parent", "sh", "-c", `echo "Running exec --privileged" > /exec_priv && mknod /tmp/sdb b 8 16 && usleep 50000 && echo "Finished exec --privileged" > /exec_priv && echo ok`) out, _, err = runCommandWithOutput(cmd) c.Assert(err, checker.IsNil) actual := strings.TrimSpace(out) c.Assert(actual, checker.Equals, "ok", check.Commentf("exec mknod in --cap-drop=ALL container with --privileged failed, output: %q", out)) // Check subsequent unprivileged exec cannot mknod cmd = exec.Command(dockerBinary, "exec", "parent", "sh", "-c", "mknod /tmp/sdc b 8 32") out, _, err = runCommandWithOutput(cmd) c.Assert(err, checker.NotNil, check.Commentf("repeating exec mknod in --cap-drop=ALL container after --privileged without --privileged should fail")) c.Assert(out, checker.Contains, "Operation not permitted", check.Commentf("repeating exec mknod in --cap-drop=ALL container after --privileged without --privileged should fail")) // Confirm at no point was mknod allowed logCmd := exec.Command(dockerBinary, "logs", "parent") out, _, err = runCommandWithOutput(logCmd) c.Assert(err, checker.IsNil) c.Assert(out, checker.Not(checker.Contains), "Success") } func (s *DockerSuite) TestExecWithImageUser(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuilduser" _, err := buildImage(name, `FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd USER dockerio`, true) c.Assert(err, checker.IsNil) dockerCmd(c, "run", "-d", "--name", "dockerioexec", name, "top") out, _ := dockerCmd(c, "exec", "dockerioexec", "whoami") c.Assert(out, checker.Contains, "dockerio", check.Commentf("exec with user by id expected dockerio user got %s", out)) } func (s *DockerSuite) TestExecOnReadonlyContainer(c *check.C) { // --read-only + userns has remount issues testRequires(c, DaemonIsLinux, NotUserNamespace) dockerCmd(c, "run", "-d", "--read-only", "--name", "parent", "busybox", "top") dockerCmd(c, "exec", "parent", "true") } // #15750 func (s *DockerSuite) TestExecStartFails(c *check.C) { testRequires(c, DaemonIsLinux) name := "exec-15750" dockerCmd(c, "run", "-d", "--name", name, "busybox", "top") c.Assert(waitRun(name), checker.IsNil) out, _, err := dockerCmdWithError("exec", name, "no-such-cmd") c.Assert(err, checker.NotNil, check.Commentf(out)) c.Assert(out, checker.Contains, "executable file not found") } docker-1.10.3/integration-cli/docker_cli_exec_unix_test.go000066400000000000000000000032641267010174400236750ustar00rootroot00000000000000// +build !windows,!test_no_exec package main import ( "bytes" "io" "os/exec" "strings" "time" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" "github.com/kr/pty" ) // regression test for #12546 func (s *DockerSuite) TestExecInteractiveStdinClose(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-itd", "busybox", "/bin/cat") contID := strings.TrimSpace(out) cmd := exec.Command(dockerBinary, "exec", "-i", contID, "echo", "-n", "hello") p, err := pty.Start(cmd) c.Assert(err, checker.IsNil) b := bytes.NewBuffer(nil) go io.Copy(b, p) ch := make(chan error) go func() { ch <- cmd.Wait() }() select { case err := <-ch: c.Assert(err, checker.IsNil) output := b.String() c.Assert(strings.TrimSpace(output), checker.Equals, "hello") case <-time.After(5 * time.Second): c.Fatal("timed out running docker exec") } } func (s *DockerSuite) TestExecTTY(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-d", "--name=test", "busybox", "sh", "-c", "echo hello > /foo && top") cmd := exec.Command(dockerBinary, "exec", "-it", "test", "sh") p, err := pty.Start(cmd) c.Assert(err, checker.IsNil) defer p.Close() _, err = p.Write([]byte("cat /foo && sleep 2 && exit\n")) c.Assert(err, checker.IsNil) chErr := make(chan error) go func() { chErr <- cmd.Wait() }() select { case err := <-chErr: c.Assert(err, checker.IsNil) case <-time.After(3 * time.Second): c.Fatal("timeout waiting for exec to exit") } buf := make([]byte, 256) read, err := p.Read(buf) c.Assert(err, checker.IsNil) c.Assert(bytes.Contains(buf, []byte("hello")), checker.Equals, true, check.Commentf(string(buf[:read]))) } docker-1.10.3/integration-cli/docker_cli_experimental_test.go000066400000000000000000000062151267010174400244020ustar00rootroot00000000000000// +build experimental package main import ( "fmt" "io/ioutil" "os" "os/exec" "path/filepath" "strconv" "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/pkg/system" "github.com/go-check/check" ) func (s *DockerSuite) TestExperimentalVersion(c *check.C) { out, _ := dockerCmd(c, "version") for _, line := range strings.Split(out, "\n") { if strings.HasPrefix(line, "Experimental (client):") || strings.HasPrefix(line, "Experimental (server):") { c.Assert(line, checker.Matches, "*true") } } out, _ = dockerCmd(c, "-v") c.Assert(out, checker.Contains, ", experimental", check.Commentf("docker version did not contain experimental")) } // user namespaces test: run daemon with remapped root setting // 1. validate uid/gid maps are set properly // 2. verify that files created are owned by remapped root func (s *DockerDaemonSuite) TestDaemonUserNamespaceRootSetting(c *check.C) { testRequires(c, DaemonIsLinux, SameHostDaemon) c.Assert(s.d.StartWithBusybox("--userns-remap", "default"), checker.IsNil) tmpDir, err := ioutil.TempDir("", "userns") c.Assert(err, checker.IsNil) defer os.RemoveAll(tmpDir) // we need to find the uid and gid of the remapped root from the daemon's root dir info uidgid := strings.Split(filepath.Base(s.d.root), ".") c.Assert(uidgid, checker.HasLen, 2, check.Commentf("Should have gotten uid/gid strings from root dirname: %s", filepath.Base(s.d.root))) uid, err := strconv.Atoi(uidgid[0]) c.Assert(err, checker.IsNil, check.Commentf("Can't parse uid")) gid, err := strconv.Atoi(uidgid[1]) c.Assert(err, checker.IsNil, check.Commentf("Can't parse gid")) //writeable by the remapped root UID/GID pair c.Assert(os.Chown(tmpDir, uid, gid), checker.IsNil) out, err := s.d.Cmd("run", "-d", "--name", "userns", "-v", tmpDir+":/goofy", "busybox", "sh", "-c", "touch /goofy/testfile; top") c.Assert(err, checker.IsNil, check.Commentf("Output: %s", out)) pid, err := s.d.Cmd("inspect", "--format='{{.State.Pid}}'", "userns") c.Assert(err, checker.IsNil, check.Commentf("Could not inspect running container: out: %q", pid)) // check the uid and gid maps for the PID to ensure root is remapped // (cmd = cat /proc//uid_map | grep -E '0\s+9999\s+1') out, rc1, err := runCommandPipelineWithOutput( exec.Command("cat", "/proc/"+strings.TrimSpace(pid)+"/uid_map"), exec.Command("grep", "-E", fmt.Sprintf("0[[:space:]]+%d[[:space:]]+", uid))) c.Assert(rc1, checker.Equals, 0, check.Commentf("Didn't match uid_map: output: %s", out)) out, rc2, err := runCommandPipelineWithOutput( exec.Command("cat", "/proc/"+strings.TrimSpace(pid)+"/gid_map"), exec.Command("grep", "-E", fmt.Sprintf("0[[:space:]]+%d[[:space:]]+", gid))) c.Assert(rc2, checker.Equals, 0, check.Commentf("Didn't match gid_map: output: %s", out)) // check that the touched file is owned by remapped uid:gid stat, err := system.Stat(filepath.Join(tmpDir, "testfile")) c.Assert(err, checker.IsNil) c.Assert(stat.UID(), checker.Equals, uint32(uid), check.Commentf("Touched file not owned by remapped root UID")) c.Assert(stat.GID(), checker.Equals, uint32(gid), check.Commentf("Touched file not owned by remapped root GID")) } docker-1.10.3/integration-cli/docker_cli_export_import_test.go000066400000000000000000000034171267010174400246210ustar00rootroot00000000000000package main import ( "os" "os/exec" "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) // export an image and try to import it into a new one func (s *DockerSuite) TestExportContainerAndImportImage(c *check.C) { testRequires(c, DaemonIsLinux) containerID := "testexportcontainerandimportimage" dockerCmd(c, "run", "--name", containerID, "busybox", "true") out, _ := dockerCmd(c, "export", containerID) importCmd := exec.Command(dockerBinary, "import", "-", "repo/testexp:v1") importCmd.Stdin = strings.NewReader(out) out, _, err := runCommandWithOutput(importCmd) c.Assert(err, checker.IsNil, check.Commentf("failed to import image repo/testexp:v1: %s", out)) cleanedImageID := strings.TrimSpace(out) c.Assert(cleanedImageID, checker.Not(checker.Equals), "", check.Commentf("output should have been an image id")) } // Used to test output flag in the export command func (s *DockerSuite) TestExportContainerWithOutputAndImportImage(c *check.C) { testRequires(c, DaemonIsLinux) containerID := "testexportcontainerwithoutputandimportimage" dockerCmd(c, "run", "--name", containerID, "busybox", "true") dockerCmd(c, "export", "--output=testexp.tar", containerID) defer os.Remove("testexp.tar") out, _, err := runCommandWithOutput(exec.Command("cat", "testexp.tar")) c.Assert(err, checker.IsNil, check.Commentf(out)) importCmd := exec.Command(dockerBinary, "import", "-", "repo/testexp:v1") importCmd.Stdin = strings.NewReader(out) out, _, err = runCommandWithOutput(importCmd) c.Assert(err, checker.IsNil, check.Commentf("failed to import image repo/testexp:v1: %s", out)) cleanedImageID := strings.TrimSpace(out) c.Assert(cleanedImageID, checker.Not(checker.Equals), "", check.Commentf("output should have been an image id")) } docker-1.10.3/integration-cli/docker_cli_external_graphdriver_unix_test.go000066400000000000000000000220501267010174400271620ustar00rootroot00000000000000// +build experimental // +build !windows package main import ( "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/http/httptest" "os" "strings" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/vfs" "github.com/docker/docker/pkg/archive" "github.com/go-check/check" ) func init() { check.Suite(&DockerExternalGraphdriverSuite{ ds: &DockerSuite{}, }) } type DockerExternalGraphdriverSuite struct { server *httptest.Server ds *DockerSuite d *Daemon ec *graphEventsCounter } type graphEventsCounter struct { activations int creations int removals int gets int puts int stats int cleanups int exists int init int metadata int diff int applydiff int changes int diffsize int } func (s *DockerExternalGraphdriverSuite) SetUpTest(c *check.C) { s.d = NewDaemon(c) s.ec = &graphEventsCounter{} } func (s *DockerExternalGraphdriverSuite) TearDownTest(c *check.C) { s.d.Stop() s.ds.TearDownTest(c) } func (s *DockerExternalGraphdriverSuite) SetUpSuite(c *check.C) { mux := http.NewServeMux() s.server = httptest.NewServer(mux) type graphDriverRequest struct { ID string `json:",omitempty"` Parent string `json:",omitempty"` MountLabel string `json:",omitempty"` } type graphDriverResponse struct { Err error `json:",omitempty"` Dir string `json:",omitempty"` Exists bool `json:",omitempty"` Status [][2]string `json:",omitempty"` Metadata map[string]string `json:",omitempty"` Changes []archive.Change `json:",omitempty"` Size int64 `json:",omitempty"` } respond := func(w http.ResponseWriter, data interface{}) { w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json") switch t := data.(type) { case error: fmt.Fprintln(w, fmt.Sprintf(`{"Err": %q}`, t.Error())) case string: fmt.Fprintln(w, t) default: json.NewEncoder(w).Encode(&data) } } decReq := func(b io.ReadCloser, out interface{}, w http.ResponseWriter) error { defer b.Close() if err := json.NewDecoder(b).Decode(&out); err != nil { http.Error(w, fmt.Sprintf("error decoding json: %s", err.Error()), 500) } return nil } base, err := ioutil.TempDir("", "external-graph-test") c.Assert(err, check.IsNil) vfsProto, err := vfs.Init(base, []string{}, nil, nil) c.Assert(err, check.IsNil, check.Commentf("error initializing graph driver")) driver := graphdriver.NewNaiveDiffDriver(vfsProto, nil, nil) mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { s.ec.activations++ respond(w, `{"Implements": ["GraphDriver"]}`) }) mux.HandleFunc("/GraphDriver.Init", func(w http.ResponseWriter, r *http.Request) { s.ec.init++ respond(w, "{}") }) mux.HandleFunc("/GraphDriver.Create", func(w http.ResponseWriter, r *http.Request) { s.ec.creations++ var req graphDriverRequest if err := decReq(r.Body, &req, w); err != nil { return } if err := driver.Create(req.ID, req.Parent, ""); err != nil { respond(w, err) return } respond(w, "{}") }) mux.HandleFunc("/GraphDriver.Remove", func(w http.ResponseWriter, r *http.Request) { s.ec.removals++ var req graphDriverRequest if err := decReq(r.Body, &req, w); err != nil { return } if err := driver.Remove(req.ID); err != nil { respond(w, err) return } respond(w, "{}") }) mux.HandleFunc("/GraphDriver.Get", func(w http.ResponseWriter, r *http.Request) { s.ec.gets++ var req graphDriverRequest if err := decReq(r.Body, &req, w); err != nil { return } dir, err := driver.Get(req.ID, req.MountLabel) if err != nil { respond(w, err) return } respond(w, &graphDriverResponse{Dir: dir}) }) mux.HandleFunc("/GraphDriver.Put", func(w http.ResponseWriter, r *http.Request) { s.ec.puts++ var req graphDriverRequest if err := decReq(r.Body, &req, w); err != nil { return } if err := driver.Put(req.ID); err != nil { respond(w, err) return } respond(w, "{}") }) mux.HandleFunc("/GraphDriver.Exists", func(w http.ResponseWriter, r *http.Request) { s.ec.exists++ var req graphDriverRequest if err := decReq(r.Body, &req, w); err != nil { return } respond(w, &graphDriverResponse{Exists: driver.Exists(req.ID)}) }) mux.HandleFunc("/GraphDriver.Status", func(w http.ResponseWriter, r *http.Request) { s.ec.stats++ respond(w, &graphDriverResponse{Status: driver.Status()}) }) mux.HandleFunc("/GraphDriver.Cleanup", func(w http.ResponseWriter, r *http.Request) { s.ec.cleanups++ err := driver.Cleanup() if err != nil { respond(w, err) return } respond(w, `{}`) }) mux.HandleFunc("/GraphDriver.GetMetadata", func(w http.ResponseWriter, r *http.Request) { s.ec.metadata++ var req graphDriverRequest if err := decReq(r.Body, &req, w); err != nil { return } data, err := driver.GetMetadata(req.ID) if err != nil { respond(w, err) return } respond(w, &graphDriverResponse{Metadata: data}) }) mux.HandleFunc("/GraphDriver.Diff", func(w http.ResponseWriter, r *http.Request) { s.ec.diff++ var req graphDriverRequest if err := decReq(r.Body, &req, w); err != nil { return } diff, err := driver.Diff(req.ID, req.Parent) if err != nil { respond(w, err) return } io.Copy(w, diff) }) mux.HandleFunc("/GraphDriver.Changes", func(w http.ResponseWriter, r *http.Request) { s.ec.changes++ var req graphDriverRequest if err := decReq(r.Body, &req, w); err != nil { return } changes, err := driver.Changes(req.ID, req.Parent) if err != nil { respond(w, err) return } respond(w, &graphDriverResponse{Changes: changes}) }) mux.HandleFunc("/GraphDriver.ApplyDiff", func(w http.ResponseWriter, r *http.Request) { s.ec.applydiff++ var diff archive.Reader = r.Body defer r.Body.Close() id := r.URL.Query().Get("id") parent := r.URL.Query().Get("parent") if id == "" { http.Error(w, fmt.Sprintf("missing id"), 409) } size, err := driver.ApplyDiff(id, parent, diff) if err != nil { respond(w, err) return } respond(w, &graphDriverResponse{Size: size}) }) mux.HandleFunc("/GraphDriver.DiffSize", func(w http.ResponseWriter, r *http.Request) { s.ec.diffsize++ var req graphDriverRequest if err := decReq(r.Body, &req, w); err != nil { return } size, err := driver.DiffSize(req.ID, req.Parent) if err != nil { respond(w, err) return } respond(w, &graphDriverResponse{Size: size}) }) err = os.MkdirAll("/etc/docker/plugins", 0755) c.Assert(err, check.IsNil, check.Commentf("error creating /etc/docker/plugins")) err = ioutil.WriteFile("/etc/docker/plugins/test-external-graph-driver.spec", []byte(s.server.URL), 0644) c.Assert(err, check.IsNil, check.Commentf("error writing to /etc/docker/plugins/test-external-graph-driver.spec")) } func (s *DockerExternalGraphdriverSuite) TearDownSuite(c *check.C) { s.server.Close() err := os.RemoveAll("/etc/docker/plugins") c.Assert(err, check.IsNil, check.Commentf("error removing /etc/docker/plugins")) } func (s *DockerExternalGraphdriverSuite) TestExternalGraphDriver(c *check.C) { if err := s.d.StartWithBusybox("-s", "test-external-graph-driver"); err != nil { b, _ := ioutil.ReadFile(s.d.LogfileName()) c.Assert(err, check.IsNil, check.Commentf("\n%s", string(b))) } out, err := s.d.Cmd("run", "-d", "--name=graphtest", "busybox", "sh", "-c", "echo hello > /hello") c.Assert(err, check.IsNil, check.Commentf(out)) err = s.d.Restart("-s", "test-external-graph-driver") out, err = s.d.Cmd("inspect", "--format='{{.GraphDriver.Name}}'", "graphtest") c.Assert(err, check.IsNil, check.Commentf(out)) c.Assert(strings.TrimSpace(out), check.Equals, "test-external-graph-driver") out, err = s.d.Cmd("diff", "graphtest") c.Assert(err, check.IsNil, check.Commentf(out)) c.Assert(strings.Contains(out, "A /hello"), check.Equals, true) out, err = s.d.Cmd("rm", "-f", "graphtest") c.Assert(err, check.IsNil, check.Commentf(out)) out, err = s.d.Cmd("info") c.Assert(err, check.IsNil, check.Commentf(out)) err = s.d.Stop() c.Assert(err, check.IsNil) // Don't check s.ec.exists, because the daemon no longer calls the // Exists function. c.Assert(s.ec.activations, check.Equals, 2) c.Assert(s.ec.init, check.Equals, 2) c.Assert(s.ec.creations >= 1, check.Equals, true) c.Assert(s.ec.removals >= 1, check.Equals, true) c.Assert(s.ec.gets >= 1, check.Equals, true) c.Assert(s.ec.puts >= 1, check.Equals, true) c.Assert(s.ec.stats, check.Equals, 3) c.Assert(s.ec.cleanups, check.Equals, 2) c.Assert(s.ec.applydiff >= 1, check.Equals, true) c.Assert(s.ec.changes, check.Equals, 1) c.Assert(s.ec.diffsize, check.Equals, 0) c.Assert(s.ec.diff, check.Equals, 0) c.Assert(s.ec.metadata, check.Equals, 1) } func (s *DockerExternalGraphdriverSuite) TestExternalGraphDriverPull(c *check.C) { testRequires(c, Network) c.Assert(s.d.Start(), check.IsNil) out, err := s.d.Cmd("pull", "busybox:latest") c.Assert(err, check.IsNil, check.Commentf(out)) out, err = s.d.Cmd("run", "-d", "busybox", "top") c.Assert(err, check.IsNil, check.Commentf(out)) } docker-1.10.3/integration-cli/docker_cli_help_test.go000066400000000000000000000264671267010174400226500ustar00rootroot00000000000000package main import ( "os/exec" "runtime" "strings" "unicode" "github.com/docker/docker/pkg/homedir" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (s *DockerSuite) TestHelpTextVerify(c *check.C) { testRequires(c, DaemonIsLinux) // Make sure main help text fits within 80 chars and that // on non-windows system we use ~ when possible (to shorten things). // Test for HOME set to its default value and set to "/" on linux // Yes on windows setting up an array and looping (right now) isn't // necessary because we just have one value, but we'll need the // array/loop on linux so we might as well set it up so that we can // test any number of home dirs later on and all we need to do is // modify the array - the rest of the testing infrastructure should work homes := []string{homedir.Get()} // Non-Windows machines need to test for this special case of $HOME if runtime.GOOS != "windows" { homes = append(homes, "/") } homeKey := homedir.Key() baseEnvs := appendBaseEnv(true) // Remove HOME env var from list so we can add a new value later. for i, env := range baseEnvs { if strings.HasPrefix(env, homeKey+"=") { baseEnvs = append(baseEnvs[:i], baseEnvs[i+1:]...) break } } for _, home := range homes { // Dup baseEnvs and add our new HOME value newEnvs := make([]string, len(baseEnvs)+1) copy(newEnvs, baseEnvs) newEnvs[len(newEnvs)-1] = homeKey + "=" + home scanForHome := runtime.GOOS != "windows" && home != "/" // Check main help text to make sure its not over 80 chars helpCmd := exec.Command(dockerBinary, "help") helpCmd.Env = newEnvs out, _, err := runCommandWithOutput(helpCmd) c.Assert(err, checker.IsNil, check.Commentf(out)) lines := strings.Split(out, "\n") foundTooLongLine := false for _, line := range lines { if !foundTooLongLine && len(line) > 80 { c.Logf("Line is too long:\n%s", line) foundTooLongLine = true } // All lines should not end with a space c.Assert(line, checker.Not(checker.HasSuffix), " ", check.Commentf("Line should not end with a space")) if scanForHome && strings.Contains(line, `=`+home) { c.Fatalf("Line should use '%q' instead of %q:\n%s", homedir.GetShortcutString(), home, line) } if runtime.GOOS != "windows" { i := strings.Index(line, homedir.GetShortcutString()) if i >= 0 && i != len(line)-1 && line[i+1] != '/' { c.Fatalf("Main help should not have used home shortcut:\n%s", line) } } } // Make sure each cmd's help text fits within 90 chars and that // on non-windows system we use ~ when possible (to shorten things). // Pull the list of commands from the "Commands:" section of docker help helpCmd = exec.Command(dockerBinary, "help") helpCmd.Env = newEnvs out, _, err = runCommandWithOutput(helpCmd) c.Assert(err, checker.IsNil, check.Commentf(out)) i := strings.Index(out, "Commands:") c.Assert(i, checker.GreaterOrEqualThan, 0, check.Commentf("Missing 'Commands:' in:\n%s", out)) cmds := []string{} // Grab all chars starting at "Commands:" helpOut := strings.Split(out[i:], "\n") // First line is just "Commands:" if isLocalDaemon { // Replace first line with "daemon" command since it's not part of the list of commands. helpOut[0] = " daemon" } else { // Skip first line helpOut = helpOut[1:] } // Create the list of commands we want to test cmdsToTest := []string{} for _, cmd := range helpOut { // Stop on blank line or non-idented line if cmd == "" || !unicode.IsSpace(rune(cmd[0])) { break } // Grab just the first word of each line cmd = strings.Split(strings.TrimSpace(cmd), " ")[0] cmds = append(cmds, cmd) // Saving count for later cmdsToTest = append(cmdsToTest, cmd) } // Add some 'two word' commands - would be nice to automatically // calculate this list - somehow cmdsToTest = append(cmdsToTest, "volume create") cmdsToTest = append(cmdsToTest, "volume inspect") cmdsToTest = append(cmdsToTest, "volume ls") cmdsToTest = append(cmdsToTest, "volume rm") for _, cmd := range cmdsToTest { var stderr string args := strings.Split(cmd+" --help", " ") // Check the full usage text helpCmd := exec.Command(dockerBinary, args...) helpCmd.Env = newEnvs out, stderr, _, err = runCommandWithStdoutStderr(helpCmd) c.Assert(len(stderr), checker.Equals, 0, check.Commentf("Error on %q help. non-empty stderr:%q", cmd, stderr)) c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have blank line on %q\n", cmd)) c.Assert(out, checker.Contains, "--help", check.Commentf("All commands should mention '--help'. Command '%v' did not.\n", cmd)) c.Assert(err, checker.IsNil, check.Commentf(out)) // Check each line for lots of stuff lines := strings.Split(out, "\n") for _, line := range lines { c.Assert(len(line), checker.LessOrEqualThan, 107, check.Commentf("Help for %q is too long:\n%s", cmd, line)) if scanForHome && strings.Contains(line, `"`+home) { c.Fatalf("Help for %q should use ~ instead of %q on:\n%s", cmd, home, line) } i := strings.Index(line, "~") if i >= 0 && i != len(line)-1 && line[i+1] != '/' { c.Fatalf("Help for %q should not have used ~:\n%s", cmd, line) } // If a line starts with 4 spaces then assume someone // added a multi-line description for an option and we need // to flag it c.Assert(line, checker.Not(checker.HasPrefix), " ", check.Commentf("Help for %q should not have a multi-line option", cmd)) // Options should NOT end with a period if strings.HasPrefix(line, " -") && strings.HasSuffix(line, ".") { c.Fatalf("Help for %q should not end with a period: %s", cmd, line) } // Options should NOT end with a space c.Assert(line, checker.Not(checker.HasSuffix), " ", check.Commentf("Help for %q should not end with a space", cmd)) } // For each command make sure we generate an error // if we give a bad arg args = strings.Split(cmd+" --badArg", " ") out, _, err = dockerCmdWithError(args...) c.Assert(err, checker.NotNil, check.Commentf(out)) // Be really picky c.Assert(stderr, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker rm'\n")) // Now make sure that each command will print a short-usage // (not a full usage - meaning no opts section) if we // are missing a required arg or pass in a bad arg // These commands will never print a short-usage so don't test noShortUsage := map[string]string{ "images": "", "login": "", "logout": "", "network": "", "stats": "", } if _, ok := noShortUsage[cmd]; !ok { // For each command run it w/o any args. It will either return // valid output or print a short-usage var dCmd *exec.Cmd var stdout, stderr string var args []string // skipNoArgs are ones that we don't want to try w/o // any args. Either because it'll hang the test or // lead to incorrect test result (like false negative). // Whatever the reason, skip trying to run w/o args and // jump to trying with a bogus arg. skipNoArgs := map[string]struct{}{ "daemon": {}, "events": {}, "load": {}, } ec := 0 if _, ok := skipNoArgs[cmd]; !ok { args = strings.Split(cmd, " ") dCmd = exec.Command(dockerBinary, args...) stdout, stderr, ec, err = runCommandWithStdoutStderr(dCmd) } // If its ok w/o any args then try again with an arg if ec == 0 { args = strings.Split(cmd+" badArg", " ") dCmd = exec.Command(dockerBinary, args...) stdout, stderr, ec, err = runCommandWithStdoutStderr(dCmd) } if len(stdout) != 0 || len(stderr) == 0 || ec == 0 || err == nil { c.Fatalf("Bad output from %q\nstdout:%q\nstderr:%q\nec:%d\nerr:%q", args, stdout, stderr, ec, err) } // Should have just short usage c.Assert(stderr, checker.Contains, "\nUsage:\t", check.Commentf("Missing short usage on %q\n", args)) // But shouldn't have full usage c.Assert(stderr, checker.Not(checker.Contains), "--help=false", check.Commentf("Should not have full usage on %q\n", args)) c.Assert(stderr, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line on %q\n", args)) } } // Number of commands for standard release and experimental release standard := 41 experimental := 1 expected := standard + experimental if isLocalDaemon { expected++ // for the daemon command } c.Assert(len(cmds), checker.LessOrEqualThan, expected, check.Commentf("Wrong # of cmds, it should be: %d\nThe list:\n%q", expected, cmds)) } } func (s *DockerSuite) TestHelpExitCodesHelpOutput(c *check.C) { testRequires(c, DaemonIsLinux) // Test to make sure the exit code and output (stdout vs stderr) of // various good and bad cases are what we expect // docker : stdout=all, stderr=empty, rc=0 out, _, err := dockerCmdWithError() c.Assert(err, checker.IsNil, check.Commentf(out)) // Be really pick c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker'\n")) // docker help: stdout=all, stderr=empty, rc=0 out, _, err = dockerCmdWithError("help") c.Assert(err, checker.IsNil, check.Commentf(out)) // Be really pick c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker help'\n")) // docker --help: stdout=all, stderr=empty, rc=0 out, _, err = dockerCmdWithError("--help") c.Assert(err, checker.IsNil, check.Commentf(out)) // Be really pick c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker --help'\n")) // docker inspect busybox: stdout=all, stderr=empty, rc=0 // Just making sure stderr is empty on valid cmd out, _, err = dockerCmdWithError("inspect", "busybox") c.Assert(err, checker.IsNil, check.Commentf(out)) // Be really pick c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker inspect busyBox'\n")) // docker rm: stdout=empty, stderr=all, rc!=0 // testing the min arg error msg cmd := exec.Command(dockerBinary, "rm") stdout, stderr, _, err := runCommandWithStdoutStderr(cmd) c.Assert(err, checker.NotNil) c.Assert(stdout, checker.Equals, "") // Should not contain full help text but should contain info about // # of args and Usage line c.Assert(stderr, checker.Contains, "requires a minimum", check.Commentf("Missing # of args text from 'docker rm'\n")) // docker rm NoSuchContainer: stdout=empty, stderr=all, rc=0 // testing to make sure no blank line on error cmd = exec.Command(dockerBinary, "rm", "NoSuchContainer") stdout, stderr, _, err = runCommandWithStdoutStderr(cmd) c.Assert(err, checker.NotNil) c.Assert(len(stderr), checker.Not(checker.Equals), 0) c.Assert(stdout, checker.Equals, "") // Be really picky c.Assert(stderr, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker rm'\n")) // docker BadCmd: stdout=empty, stderr=all, rc=0 cmd = exec.Command(dockerBinary, "BadCmd") stdout, stderr, _, err = runCommandWithStdoutStderr(cmd) c.Assert(err, checker.NotNil) c.Assert(stdout, checker.Equals, "") c.Assert(stderr, checker.Equals, "docker: 'BadCmd' is not a docker command.\nSee 'docker --help'.\n", check.Commentf("Unexcepted output for 'docker badCmd'\n")) } docker-1.10.3/integration-cli/docker_cli_history_test.go000066400000000000000000000070741267010174400234120ustar00rootroot00000000000000package main import ( "fmt" "regexp" "strconv" "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) // This is a heisen-test. Because the created timestamp of images and the behavior of // sort is not predictable it doesn't always fail. func (s *DockerSuite) TestBuildHistory(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildhistory" _, err := buildImage(name, `FROM busybox RUN echo "A" RUN echo "B" RUN echo "C" RUN echo "D" RUN echo "E" RUN echo "F" RUN echo "G" RUN echo "H" RUN echo "I" RUN echo "J" RUN echo "K" RUN echo "L" RUN echo "M" RUN echo "N" RUN echo "O" RUN echo "P" RUN echo "Q" RUN echo "R" RUN echo "S" RUN echo "T" RUN echo "U" RUN echo "V" RUN echo "W" RUN echo "X" RUN echo "Y" RUN echo "Z"`, true) c.Assert(err, checker.IsNil) out, _ := dockerCmd(c, "history", "testbuildhistory") actualValues := strings.Split(out, "\n")[1:27] expectedValues := [26]string{"Z", "Y", "X", "W", "V", "U", "T", "S", "R", "Q", "P", "O", "N", "M", "L", "K", "J", "I", "H", "G", "F", "E", "D", "C", "B", "A"} for i := 0; i < 26; i++ { echoValue := fmt.Sprintf("echo \"%s\"", expectedValues[i]) actualValue := actualValues[i] c.Assert(actualValue, checker.Contains, echoValue) } } func (s *DockerSuite) TestHistoryExistentImage(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "history", "busybox") } func (s *DockerSuite) TestHistoryNonExistentImage(c *check.C) { _, _, err := dockerCmdWithError("history", "testHistoryNonExistentImage") c.Assert(err, checker.NotNil, check.Commentf("history on a non-existent image should fail.")) } func (s *DockerSuite) TestHistoryImageWithComment(c *check.C) { testRequires(c, DaemonIsLinux) name := "testhistoryimagewithcomment" // make a image through docker commit [ -m messages ] dockerCmd(c, "run", "--name", name, "busybox", "true") dockerCmd(c, "wait", name) comment := "This_is_a_comment" dockerCmd(c, "commit", "-m="+comment, name, name) // test docker history to check comment messages out, _ := dockerCmd(c, "history", name) outputTabs := strings.Fields(strings.Split(out, "\n")[1]) actualValue := outputTabs[len(outputTabs)-1] c.Assert(actualValue, checker.Contains, comment) } func (s *DockerSuite) TestHistoryHumanOptionFalse(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "history", "--human=false", "busybox") lines := strings.Split(out, "\n") sizeColumnRegex, _ := regexp.Compile("SIZE +") indices := sizeColumnRegex.FindStringIndex(lines[0]) startIndex := indices[0] endIndex := indices[1] for i := 1; i < len(lines)-1; i++ { if endIndex > len(lines[i]) { endIndex = len(lines[i]) } sizeString := lines[i][startIndex:endIndex] _, err := strconv.Atoi(strings.TrimSpace(sizeString)) c.Assert(err, checker.IsNil, check.Commentf("The size '%s' was not an Integer", sizeString)) } } func (s *DockerSuite) TestHistoryHumanOptionTrue(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "history", "--human=true", "busybox") lines := strings.Split(out, "\n") sizeColumnRegex, _ := regexp.Compile("SIZE +") humanSizeRegexRaw := "\\d+.*B" // Matches human sizes like 10 MB, 3.2 KB, etc indices := sizeColumnRegex.FindStringIndex(lines[0]) startIndex := indices[0] endIndex := indices[1] for i := 1; i < len(lines)-1; i++ { if endIndex > len(lines[i]) { endIndex = len(lines[i]) } sizeString := lines[i][startIndex:endIndex] c.Assert(strings.TrimSpace(sizeString), checker.Matches, humanSizeRegexRaw, check.Commentf("The size '%s' was not in human format", sizeString)) } } docker-1.10.3/integration-cli/docker_cli_images_test.go000066400000000000000000000222541267010174400231530ustar00rootroot00000000000000package main import ( "fmt" "io/ioutil" "os" "path/filepath" "reflect" "sort" "strings" "time" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/pkg/stringid" "github.com/go-check/check" ) func (s *DockerSuite) TestImagesEnsureImageIsListed(c *check.C) { testRequires(c, DaemonIsLinux) imagesOut, _ := dockerCmd(c, "images") c.Assert(imagesOut, checker.Contains, "busybox") } func (s *DockerSuite) TestImagesEnsureImageWithTagIsListed(c *check.C) { testRequires(c, DaemonIsLinux) name := "imagewithtag" dockerCmd(c, "tag", "busybox", name+":v1") dockerCmd(c, "tag", "busybox", name+":v1v1") dockerCmd(c, "tag", "busybox", name+":v2") imagesOut, _ := dockerCmd(c, "images", name+":v1") c.Assert(imagesOut, checker.Contains, name) c.Assert(imagesOut, checker.Contains, "v1") c.Assert(imagesOut, checker.Not(checker.Contains), "v2") c.Assert(imagesOut, checker.Not(checker.Contains), "v1v1") imagesOut, _ = dockerCmd(c, "images", name) c.Assert(imagesOut, checker.Contains, name) c.Assert(imagesOut, checker.Contains, "v1") c.Assert(imagesOut, checker.Contains, "v1v1") c.Assert(imagesOut, checker.Contains, "v2") } func (s *DockerSuite) TestImagesEnsureImageWithBadTagIsNotListed(c *check.C) { imagesOut, _ := dockerCmd(c, "images", "busybox:nonexistent") c.Assert(imagesOut, checker.Not(checker.Contains), "busybox") } func (s *DockerSuite) TestImagesOrderedByCreationDate(c *check.C) { testRequires(c, DaemonIsLinux) id1, err := buildImage("order:test_a", `FROM scratch MAINTAINER dockerio1`, true) c.Assert(err, checker.IsNil) time.Sleep(1 * time.Second) id2, err := buildImage("order:test_c", `FROM scratch MAINTAINER dockerio2`, true) c.Assert(err, checker.IsNil) time.Sleep(1 * time.Second) id3, err := buildImage("order:test_b", `FROM scratch MAINTAINER dockerio3`, true) c.Assert(err, checker.IsNil) out, _ := dockerCmd(c, "images", "-q", "--no-trunc") imgs := strings.Split(out, "\n") c.Assert(imgs[0], checker.Equals, id3, check.Commentf("First image must be %s, got %s", id3, imgs[0])) c.Assert(imgs[1], checker.Equals, id2, check.Commentf("First image must be %s, got %s", id2, imgs[1])) c.Assert(imgs[2], checker.Equals, id1, check.Commentf("First image must be %s, got %s", id1, imgs[2])) } func (s *DockerSuite) TestImagesErrorWithInvalidFilterNameTest(c *check.C) { out, _, err := dockerCmdWithError("images", "-f", "FOO=123") c.Assert(err, checker.NotNil) c.Assert(out, checker.Contains, "Invalid filter") } func (s *DockerSuite) TestImagesFilterLabelMatch(c *check.C) { testRequires(c, DaemonIsLinux) imageName1 := "images_filter_test1" imageName2 := "images_filter_test2" imageName3 := "images_filter_test3" image1ID, err := buildImage(imageName1, `FROM scratch LABEL match me`, true) c.Assert(err, check.IsNil) image2ID, err := buildImage(imageName2, `FROM scratch LABEL match="me too"`, true) c.Assert(err, check.IsNil) image3ID, err := buildImage(imageName3, `FROM scratch LABEL nomatch me`, true) c.Assert(err, check.IsNil) out, _ := dockerCmd(c, "images", "--no-trunc", "-q", "-f", "label=match") out = strings.TrimSpace(out) c.Assert(out, check.Matches, fmt.Sprintf("[\\s\\w:]*%s[\\s\\w:]*", image1ID)) c.Assert(out, check.Matches, fmt.Sprintf("[\\s\\w:]*%s[\\s\\w:]*", image2ID)) c.Assert(out, check.Not(check.Matches), fmt.Sprintf("[\\s\\w:]*%s[\\s\\w:]*", image3ID)) out, _ = dockerCmd(c, "images", "--no-trunc", "-q", "-f", "label=match=me too") out = strings.TrimSpace(out) c.Assert(out, check.Equals, image2ID) } // Regression : #15659 func (s *DockerSuite) TestImagesFilterLabelWithCommit(c *check.C) { // Create a container dockerCmd(c, "run", "--name", "bar", "busybox", "/bin/sh") // Commit with labels "using changes" out, _ := dockerCmd(c, "commit", "-c", "LABEL foo.version=1.0.0-1", "-c", "LABEL foo.name=bar", "-c", "LABEL foo.author=starlord", "bar", "bar:1.0.0-1") imageID := strings.TrimSpace(out) out, _ = dockerCmd(c, "images", "--no-trunc", "-q", "-f", "label=foo.version=1.0.0-1") out = strings.TrimSpace(out) c.Assert(out, check.Equals, imageID) } func (s *DockerSuite) TestImagesFilterSpaceTrimCase(c *check.C) { testRequires(c, DaemonIsLinux) imageName := "images_filter_test" buildImage(imageName, `FROM scratch RUN touch /test/foo RUN touch /test/bar RUN touch /test/baz`, true) filters := []string{ "dangling=true", "Dangling=true", " dangling=true", "dangling=true ", "dangling = true", } imageListings := make([][]string, 5, 5) for idx, filter := range filters { out, _ := dockerCmd(c, "images", "-q", "-f", filter) listing := strings.Split(out, "\n") sort.Strings(listing) imageListings[idx] = listing } for idx, listing := range imageListings { if idx < 4 && !reflect.DeepEqual(listing, imageListings[idx+1]) { for idx, errListing := range imageListings { fmt.Printf("out %d", idx) for _, image := range errListing { fmt.Print(image) } fmt.Print("") } c.Fatalf("All output must be the same") } } } func (s *DockerSuite) TestImagesEnsureDanglingImageOnlyListedOnce(c *check.C) { testRequires(c, DaemonIsLinux) // create container 1 out, _ := dockerCmd(c, "run", "-d", "busybox", "true") containerID1 := strings.TrimSpace(out) // tag as foobox out, _ = dockerCmd(c, "commit", containerID1, "foobox") imageID := stringid.TruncateID(strings.TrimSpace(out)) // overwrite the tag, making the previous image dangling dockerCmd(c, "tag", "-f", "busybox", "foobox") out, _ = dockerCmd(c, "images", "-q", "-f", "dangling=true") // Expect one dangling image c.Assert(strings.Count(out, imageID), checker.Equals, 1) out, _ = dockerCmd(c, "images", "-q", "-f", "dangling=false") //dangling=false would not include dangling images c.Assert(out, checker.Not(checker.Contains), imageID) out, _ = dockerCmd(c, "images") //docker images still include dangling images c.Assert(out, checker.Contains, imageID) } func (s *DockerSuite) TestImagesWithIncorrectFilter(c *check.C) { out, _, err := dockerCmdWithError("images", "-f", "dangling=invalid") c.Assert(err, check.NotNil) c.Assert(out, checker.Contains, "Invalid filter") } func (s *DockerSuite) TestImagesEnsureOnlyHeadsImagesShown(c *check.C) { testRequires(c, DaemonIsLinux) dockerfile := ` FROM scratch MAINTAINER docker ENV foo bar` head, out, err := buildImageWithOut("scratch-image", dockerfile, false) c.Assert(err, check.IsNil) // this is just the output of docker build // we're interested in getting the image id of the MAINTAINER instruction // and that's located at output, line 5, from 7 to end split := strings.Split(out, "\n") intermediate := strings.TrimSpace(split[5][7:]) out, _ = dockerCmd(c, "images") // images shouldn't show non-heads images c.Assert(out, checker.Not(checker.Contains), intermediate) // images should contain final built images c.Assert(out, checker.Contains, stringid.TruncateID(head)) } func (s *DockerSuite) TestImagesEnsureImagesFromScratchShown(c *check.C) { testRequires(c, DaemonIsLinux) dockerfile := ` FROM scratch MAINTAINER docker` id, _, err := buildImageWithOut("scratch-image", dockerfile, false) c.Assert(err, check.IsNil) out, _ := dockerCmd(c, "images") // images should contain images built from scratch c.Assert(out, checker.Contains, stringid.TruncateID(id)) } // #18181 func (s *DockerSuite) TestImagesFilterNameWithPort(c *check.C) { tag := "a.b.c.d:5000/hello" dockerCmd(c, "tag", "busybox", tag) out, _ := dockerCmd(c, "images", tag) c.Assert(out, checker.Contains, tag) out, _ = dockerCmd(c, "images", tag+":latest") c.Assert(out, checker.Contains, tag) out, _ = dockerCmd(c, "images", tag+":no-such-tag") c.Assert(out, checker.Not(checker.Contains), tag) } func (s *DockerSuite) TestImagesFormat(c *check.C) { // testRequires(c, DaemonIsLinux) tag := "myimage" dockerCmd(c, "tag", "busybox", tag+":v1") dockerCmd(c, "tag", "busybox", tag+":v2") out, _ := dockerCmd(c, "images", "--format", "{{.Repository}}", tag) lines := strings.Split(strings.TrimSpace(string(out)), "\n") expected := []string{"myimage", "myimage"} var names []string for _, l := range lines { names = append(names, l) } c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) } // ImagesDefaultFormatAndQuiet func (s *DockerSuite) TestImagesFormatDefaultFormat(c *check.C) { testRequires(c, DaemonIsLinux) // create container 1 out, _ := dockerCmd(c, "run", "-d", "busybox", "true") containerID1 := strings.TrimSpace(out) // tag as foobox out, _ = dockerCmd(c, "commit", containerID1, "myimage") imageID := stringid.TruncateID(strings.TrimSpace(out)) config := `{ "imagesFormat": "{{ .ID }} default" }` d, err := ioutil.TempDir("", "integration-cli-") c.Assert(err, checker.IsNil) defer os.RemoveAll(d) err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) c.Assert(err, checker.IsNil) out, _ = dockerCmd(c, "--config", d, "images", "-q", "myimage") c.Assert(out, checker.Equals, imageID+"\n", check.Commentf("Expected to print only the image id, got %v\n", out)) } docker-1.10.3/integration-cli/docker_cli_import_test.go000066400000000000000000000110051267010174400232100ustar00rootroot00000000000000package main import ( "bufio" "compress/gzip" "io/ioutil" "os" "os/exec" "regexp" "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (s *DockerSuite) TestImportDisplay(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "true") cleanedContainerID := strings.TrimSpace(out) out, _, err := runCommandPipelineWithOutput( exec.Command(dockerBinary, "export", cleanedContainerID), exec.Command(dockerBinary, "import", "-"), ) c.Assert(err, checker.IsNil) c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) image := strings.TrimSpace(out) out, _ = dockerCmd(c, "run", "--rm", image, "true") c.Assert(out, checker.Equals, "", check.Commentf("command output should've been nothing.")) } func (s *DockerSuite) TestImportBadURL(c *check.C) { testRequires(c, DaemonIsLinux) out, _, err := dockerCmdWithError("import", "http://nourl/bad") c.Assert(err, checker.NotNil, check.Commentf("import was supposed to fail but didn't")) c.Assert(out, checker.Contains, "dial tcp", check.Commentf("expected an error msg but didn't get one")) } func (s *DockerSuite) TestImportFile(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "--name", "test-import", "busybox", "true") temporaryFile, err := ioutil.TempFile("", "exportImportTest") c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) defer os.Remove(temporaryFile.Name()) runCmd := exec.Command(dockerBinary, "export", "test-import") runCmd.Stdout = bufio.NewWriter(temporaryFile) _, err = runCommand(runCmd) c.Assert(err, checker.IsNil, check.Commentf("failed to export a container")) out, _ := dockerCmd(c, "import", temporaryFile.Name()) c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) image := strings.TrimSpace(out) out, _ = dockerCmd(c, "run", "--rm", image, "true") c.Assert(out, checker.Equals, "", check.Commentf("command output should've been nothing.")) } func (s *DockerSuite) TestImportGzipped(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "--name", "test-import", "busybox", "true") temporaryFile, err := ioutil.TempFile("", "exportImportTest") c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) defer os.Remove(temporaryFile.Name()) runCmd := exec.Command(dockerBinary, "export", "test-import") w := gzip.NewWriter(temporaryFile) runCmd.Stdout = w _, err = runCommand(runCmd) c.Assert(err, checker.IsNil, check.Commentf("failed to export a container")) err = w.Close() c.Assert(err, checker.IsNil, check.Commentf("failed to close gzip writer")) temporaryFile.Close() out, _ := dockerCmd(c, "import", temporaryFile.Name()) c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) image := strings.TrimSpace(out) out, _ = dockerCmd(c, "run", "--rm", image, "true") c.Assert(out, checker.Equals, "", check.Commentf("command output should've been nothing.")) } func (s *DockerSuite) TestImportFileWithMessage(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "--name", "test-import", "busybox", "true") temporaryFile, err := ioutil.TempFile("", "exportImportTest") c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) defer os.Remove(temporaryFile.Name()) runCmd := exec.Command(dockerBinary, "export", "test-import") runCmd.Stdout = bufio.NewWriter(temporaryFile) _, err = runCommand(runCmd) c.Assert(err, checker.IsNil, check.Commentf("failed to export a container")) message := "Testing commit message" out, _ := dockerCmd(c, "import", "-m", message, temporaryFile.Name()) c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) image := strings.TrimSpace(out) out, _ = dockerCmd(c, "history", image) split := strings.Split(out, "\n") c.Assert(split, checker.HasLen, 3, check.Commentf("expected 3 lines from image history")) r := regexp.MustCompile("[\\s]{2,}") split = r.Split(split[1], -1) c.Assert(message, checker.Equals, split[3], check.Commentf("didn't get expected value in commit message")) out, _ = dockerCmd(c, "run", "--rm", image, "true") c.Assert(out, checker.Equals, "", check.Commentf("command output should've been nothing")) } func (s *DockerSuite) TestImportFileNonExistentFile(c *check.C) { _, _, err := dockerCmdWithError("import", "example.com/myImage.tar") c.Assert(err, checker.NotNil, check.Commentf("import non-existing file must failed")) } docker-1.10.3/integration-cli/docker_cli_info_test.go000066400000000000000000000117011267010174400226340ustar00rootroot00000000000000package main import ( "fmt" "net" "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/utils" "github.com/go-check/check" ) // ensure docker info succeeds func (s *DockerSuite) TestInfoEnsureSucceeds(c *check.C) { out, _ := dockerCmd(c, "info") // always shown fields stringsToCheck := []string{ "ID:", "Containers:", " Running:", " Paused:", " Stopped:", "Images:", "Execution Driver:", "OSType:", "Architecture:", "Logging Driver:", "Operating System:", "CPUs:", "Total Memory:", "Kernel Version:", "Storage Driver:", "Volume:", "Network:", } if utils.ExperimentalBuild() { stringsToCheck = append(stringsToCheck, "Experimental: true") } for _, linePrefix := range stringsToCheck { c.Assert(out, checker.Contains, linePrefix, check.Commentf("couldn't find string %v in output", linePrefix)) } } // TestInfoDiscoveryBackend verifies that a daemon run with `--cluster-advertise` and // `--cluster-store` properly show the backend's endpoint in info output. func (s *DockerSuite) TestInfoDiscoveryBackend(c *check.C) { testRequires(c, SameHostDaemon, DaemonIsLinux) d := NewDaemon(c) discoveryBackend := "consul://consuladdr:consulport/some/path" discoveryAdvertise := "1.1.1.1:2375" err := d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend), fmt.Sprintf("--cluster-advertise=%s", discoveryAdvertise)) c.Assert(err, checker.IsNil) defer d.Stop() out, err := d.Cmd("info") c.Assert(err, checker.IsNil) c.Assert(out, checker.Contains, fmt.Sprintf("Cluster store: %s\n", discoveryBackend)) c.Assert(out, checker.Contains, fmt.Sprintf("Cluster advertise: %s\n", discoveryAdvertise)) } // TestInfoDiscoveryInvalidAdvertise verifies that a daemon run with // an invalid `--cluster-advertise` configuration func (s *DockerSuite) TestInfoDiscoveryInvalidAdvertise(c *check.C) { testRequires(c, SameHostDaemon, DaemonIsLinux) d := NewDaemon(c) discoveryBackend := "consul://consuladdr:consulport/some/path" // --cluster-advertise with an invalid string is an error err := d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend), "--cluster-advertise=invalid") c.Assert(err, checker.Not(checker.IsNil)) // --cluster-advertise without --cluster-store is also an error err = d.Start("--cluster-advertise=1.1.1.1:2375") c.Assert(err, checker.Not(checker.IsNil)) } // TestInfoDiscoveryAdvertiseInterfaceName verifies that a daemon run with `--cluster-advertise` // configured with interface name properly show the advertise ip-address in info output. func (s *DockerSuite) TestInfoDiscoveryAdvertiseInterfaceName(c *check.C) { testRequires(c, SameHostDaemon, Network, DaemonIsLinux) d := NewDaemon(c) discoveryBackend := "consul://consuladdr:consulport/some/path" discoveryAdvertise := "eth0" err := d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend), fmt.Sprintf("--cluster-advertise=%s:2375", discoveryAdvertise)) c.Assert(err, checker.IsNil) defer d.Stop() iface, err := net.InterfaceByName(discoveryAdvertise) c.Assert(err, checker.IsNil) addrs, err := iface.Addrs() c.Assert(err, checker.IsNil) c.Assert(len(addrs), checker.GreaterThan, 0) ip, _, err := net.ParseCIDR(addrs[0].String()) c.Assert(err, checker.IsNil) out, err := d.Cmd("info") c.Assert(err, checker.IsNil) c.Assert(out, checker.Contains, fmt.Sprintf("Cluster store: %s\n", discoveryBackend)) c.Assert(out, checker.Contains, fmt.Sprintf("Cluster advertise: %s:2375\n", ip.String())) } func (s *DockerSuite) TestInfoDisplaysRunningContainers(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-d", "busybox", "top") out, _ := dockerCmd(c, "info") c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1)) c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 1)) c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 0)) c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 0)) } func (s *DockerSuite) TestInfoDisplaysPausedContainers(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") cleanedContainerID := strings.TrimSpace(out) dockerCmd(c, "pause", cleanedContainerID) out, _ = dockerCmd(c, "info") c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1)) c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 0)) c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 1)) c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 0)) } func (s *DockerSuite) TestInfoDisplaysStoppedContainers(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") cleanedContainerID := strings.TrimSpace(out) dockerCmd(c, "stop", cleanedContainerID) out, _ = dockerCmd(c, "info") c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1)) c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 0)) c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 0)) c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 1)) } docker-1.10.3/integration-cli/docker_cli_inspect_experimental_test.go000066400000000000000000000017761267010174400261360ustar00rootroot00000000000000// +build experimental package main import ( "github.com/docker/docker/pkg/integration/checker" "github.com/docker/engine-api/types" "github.com/go-check/check" ) func (s *DockerSuite) TestInspectNamedMountPoint(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-d", "--name", "test", "-v", "data:/data", "busybox", "cat") vol, err := inspectFieldJSON("test", "Mounts") c.Assert(err, checker.IsNil) var mp []types.MountPoint err = unmarshalJSON([]byte(vol), &mp) c.Assert(err, checker.IsNil) c.Assert(mp, checker.HasLen, 1, check.Commentf("Expected 1 mount point")) m := mp[0] c.Assert(m.Name, checker.Equals, "data", check.Commentf("Expected name data")) c.Assert(m.Driver, checker.Equals, "local", check.Commentf("Expected driver local")) c.Assert(m.Source, checker.Not(checker.Equals), "", check.Commentf("Expected source to not be empty")) c.Assert(m.RW, checker.Equals, true) c.Assert(m.Destination, checker.Equals, "/data", check.Commentf("Expected destination /data")) } docker-1.10.3/integration-cli/docker_cli_inspect_test.go000066400000000000000000000362261267010174400233570ustar00rootroot00000000000000package main import ( "encoding/json" "fmt" "os/exec" "strconv" "strings" "time" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/container" "github.com/go-check/check" ) func checkValidGraphDriver(c *check.C, name string) { if name != "devicemapper" && name != "overlay" && name != "vfs" && name != "zfs" && name != "btrfs" && name != "aufs" { c.Fatalf("%v is not a valid graph driver name", name) } } func (s *DockerSuite) TestInspectImage(c *check.C) { testRequires(c, DaemonIsLinux) imageTest := "emptyfs" // It is important that this ID remain stable. If a code change causes // it to be different, this is equivalent to a cache bust when pulling // a legacy-format manifest. If the check at the end of this function // fails, fix the difference in the image serialization instead of // updating this hash. imageTestID := "sha256:11f64303f0f7ffdc71f001788132bca5346831939a956e3e975c93267d89a16d" id, err := inspectField(imageTest, "Id") c.Assert(err, checker.IsNil) c.Assert(id, checker.Equals, imageTestID) } func (s *DockerSuite) TestInspectInt64(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-d", "-m=300M", "--name", "inspectTest", "busybox", "true") inspectOut, err := inspectField("inspectTest", "HostConfig.Memory") c.Assert(err, check.IsNil) c.Assert(inspectOut, checker.Equals, "314572800") } func (s *DockerSuite) TestInspectDefault(c *check.C) { testRequires(c, DaemonIsLinux) //Both the container and image are named busybox. docker inspect will fetch the container JSON. //If the container JSON is not available, it will go for the image JSON. out, _ := dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true") containerID := strings.TrimSpace(out) inspectOut, err := inspectField("busybox", "Id") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(inspectOut), checker.Equals, containerID) } func (s *DockerSuite) TestInspectStatus(c *check.C) { defer unpauseAllContainers() testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") out = strings.TrimSpace(out) inspectOut, err := inspectField(out, "State.Status") c.Assert(err, checker.IsNil) c.Assert(inspectOut, checker.Equals, "running") dockerCmd(c, "pause", out) inspectOut, err = inspectField(out, "State.Status") c.Assert(err, checker.IsNil) c.Assert(inspectOut, checker.Equals, "paused") dockerCmd(c, "unpause", out) inspectOut, err = inspectField(out, "State.Status") c.Assert(err, checker.IsNil) c.Assert(inspectOut, checker.Equals, "running") dockerCmd(c, "stop", out) inspectOut, err = inspectField(out, "State.Status") c.Assert(err, checker.IsNil) c.Assert(inspectOut, checker.Equals, "exited") } func (s *DockerSuite) TestInspectTypeFlagContainer(c *check.C) { testRequires(c, DaemonIsLinux) //Both the container and image are named busybox. docker inspect will fetch container //JSON State.Running field. If the field is true, it's a container. dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "top") formatStr := "--format='{{.State.Running}}'" out, _ := dockerCmd(c, "inspect", "--type=container", formatStr, "busybox") c.Assert(out, checker.Equals, "true\n") // not a container JSON } func (s *DockerSuite) TestInspectTypeFlagWithNoContainer(c *check.C) { testRequires(c, DaemonIsLinux) //Run this test on an image named busybox. docker inspect will try to fetch container //JSON. Since there is no container named busybox and --type=container, docker inspect will //not try to get the image JSON. It will throw an error. dockerCmd(c, "run", "-d", "busybox", "true") _, _, err := dockerCmdWithError("inspect", "--type=container", "busybox") // docker inspect should fail, as there is no container named busybox c.Assert(err, checker.NotNil) } func (s *DockerSuite) TestInspectTypeFlagWithImage(c *check.C) { testRequires(c, DaemonIsLinux) //Both the container and image are named busybox. docker inspect will fetch image //JSON as --type=image. if there is no image with name busybox, docker inspect //will throw an error. dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true") out, _ := dockerCmd(c, "inspect", "--type=image", "busybox") c.Assert(out, checker.Not(checker.Contains), "State") // not an image JSON } func (s *DockerSuite) TestInspectTypeFlagWithInvalidValue(c *check.C) { testRequires(c, DaemonIsLinux) //Both the container and image are named busybox. docker inspect will fail //as --type=foobar is not a valid value for the flag. dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true") out, exitCode, err := dockerCmdWithError("inspect", "--type=foobar", "busybox") c.Assert(err, checker.NotNil, check.Commentf("%s", exitCode)) c.Assert(exitCode, checker.Equals, 1, check.Commentf("%s", err)) c.Assert(out, checker.Contains, "not a valid value for --type") } func (s *DockerSuite) TestInspectImageFilterInt(c *check.C) { testRequires(c, DaemonIsLinux) imageTest := "emptyfs" out, err := inspectField(imageTest, "Size") c.Assert(err, checker.IsNil) size, err := strconv.Atoi(out) c.Assert(err, checker.IsNil, check.Commentf("failed to inspect size of the image: %s, %v", out, err)) //now see if the size turns out to be the same formatStr := fmt.Sprintf("--format='{{eq .Size %d}}'", size) out, _ = dockerCmd(c, "inspect", formatStr, imageTest) result, err := strconv.ParseBool(strings.TrimSuffix(out, "\n")) c.Assert(err, checker.IsNil) c.Assert(result, checker.Equals, true) } func (s *DockerSuite) TestInspectContainerFilterInt(c *check.C) { testRequires(c, DaemonIsLinux) runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "cat") runCmd.Stdin = strings.NewReader("blahblah") out, _, _, err := runCommandWithStdoutStderr(runCmd) c.Assert(err, checker.IsNil, check.Commentf("failed to run container: %v, output: %q", err, out)) id := strings.TrimSpace(out) out, err = inspectField(id, "State.ExitCode") c.Assert(err, checker.IsNil) exitCode, err := strconv.Atoi(out) c.Assert(err, checker.IsNil, check.Commentf("failed to inspect exitcode of the container: %s, %v", out, err)) //now get the exit code to verify formatStr := fmt.Sprintf("--format='{{eq .State.ExitCode %d}}'", exitCode) out, _ = dockerCmd(c, "inspect", formatStr, id) result, err := strconv.ParseBool(strings.TrimSuffix(out, "\n")) c.Assert(err, checker.IsNil) c.Assert(result, checker.Equals, true) } func (s *DockerSuite) TestInspectImageGraphDriver(c *check.C) { testRequires(c, DaemonIsLinux) imageTest := "emptyfs" name, err := inspectField(imageTest, "GraphDriver.Name") c.Assert(err, checker.IsNil) checkValidGraphDriver(c, name) if name != "devicemapper" { c.Skip("requires devicemapper graphdriver") } deviceID, err := inspectField(imageTest, "GraphDriver.Data.DeviceId") c.Assert(err, checker.IsNil) _, err = strconv.Atoi(deviceID) c.Assert(err, checker.IsNil, check.Commentf("failed to inspect DeviceId of the image: %s, %v", deviceID, err)) deviceSize, err := inspectField(imageTest, "GraphDriver.Data.DeviceSize") c.Assert(err, checker.IsNil) _, err = strconv.ParseUint(deviceSize, 10, 64) c.Assert(err, checker.IsNil, check.Commentf("failed to inspect DeviceSize of the image: %s, %v", deviceSize, err)) } func (s *DockerSuite) TestInspectContainerGraphDriver(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "true") out = strings.TrimSpace(out) name, err := inspectField(out, "GraphDriver.Name") c.Assert(err, checker.IsNil) checkValidGraphDriver(c, name) if name != "devicemapper" { return } imageDeviceID, err := inspectField("busybox", "GraphDriver.Data.DeviceId") c.Assert(err, checker.IsNil) deviceID, err := inspectField(out, "GraphDriver.Data.DeviceId") c.Assert(err, checker.IsNil) c.Assert(imageDeviceID, checker.Not(checker.Equals), deviceID) _, err = strconv.Atoi(deviceID) c.Assert(err, checker.IsNil, check.Commentf("failed to inspect DeviceId of the image: %s, %v", deviceID, err)) deviceSize, err := inspectField(out, "GraphDriver.Data.DeviceSize") c.Assert(err, checker.IsNil) _, err = strconv.ParseUint(deviceSize, 10, 64) c.Assert(err, checker.IsNil, check.Commentf("failed to inspect DeviceSize of the image: %s, %v", deviceSize, err)) } func (s *DockerSuite) TestInspectBindMountPoint(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-d", "--name", "test", "-v", "/data:/data:ro,z", "busybox", "cat") vol, err := inspectFieldJSON("test", "Mounts") c.Assert(err, checker.IsNil) var mp []types.MountPoint err = unmarshalJSON([]byte(vol), &mp) c.Assert(err, checker.IsNil) // check that there is only one mountpoint c.Assert(mp, check.HasLen, 1) m := mp[0] c.Assert(m.Name, checker.Equals, "") c.Assert(m.Driver, checker.Equals, "") c.Assert(m.Source, checker.Equals, "/data") c.Assert(m.Destination, checker.Equals, "/data") c.Assert(m.Mode, checker.Equals, "ro,z") c.Assert(m.RW, checker.Equals, false) } // #14947 func (s *DockerSuite) TestInspectTimesAsRFC3339Nano(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "true") id := strings.TrimSpace(out) startedAt, err := inspectField(id, "State.StartedAt") c.Assert(err, checker.IsNil) finishedAt, err := inspectField(id, "State.FinishedAt") c.Assert(err, checker.IsNil) created, err := inspectField(id, "Created") c.Assert(err, checker.IsNil) _, err = time.Parse(time.RFC3339Nano, startedAt) c.Assert(err, checker.IsNil) _, err = time.Parse(time.RFC3339Nano, finishedAt) c.Assert(err, checker.IsNil) _, err = time.Parse(time.RFC3339Nano, created) c.Assert(err, checker.IsNil) created, err = inspectField("busybox", "Created") c.Assert(err, checker.IsNil) _, err = time.Parse(time.RFC3339Nano, created) c.Assert(err, checker.IsNil) } // #15633 func (s *DockerSuite) TestInspectLogConfigNoType(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "create", "--name=test", "--log-opt", "max-file=42", "busybox") var logConfig container.LogConfig out, err := inspectFieldJSON("test", "HostConfig.LogConfig") c.Assert(err, checker.IsNil, check.Commentf("%v", out)) err = json.NewDecoder(strings.NewReader(out)).Decode(&logConfig) c.Assert(err, checker.IsNil, check.Commentf("%v", out)) c.Assert(logConfig.Type, checker.Equals, "json-file") c.Assert(logConfig.Config["max-file"], checker.Equals, "42", check.Commentf("%v", logConfig)) } func (s *DockerSuite) TestInspectNoSizeFlagContainer(c *check.C) { //Both the container and image are named busybox. docker inspect will fetch container //JSON SizeRw and SizeRootFs field. If there is no flag --size/-s, there are no size fields. dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "top") formatStr := "--format='{{.SizeRw}},{{.SizeRootFs}}'" out, _ := dockerCmd(c, "inspect", "--type=container", formatStr, "busybox") c.Assert(strings.TrimSpace(out), check.Equals, ",", check.Commentf("Exepcted not to display size info: %s", out)) } func (s *DockerSuite) TestInspectSizeFlagContainer(c *check.C) { dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "top") formatStr := "--format='{{.SizeRw}},{{.SizeRootFs}}'" out, _ := dockerCmd(c, "inspect", "-s", "--type=container", formatStr, "busybox") sz := strings.Split(out, ",") c.Assert(strings.TrimSpace(sz[0]), check.Not(check.Equals), "") c.Assert(strings.TrimSpace(sz[1]), check.Not(check.Equals), "") } func (s *DockerSuite) TestInspectSizeFlagImage(c *check.C) { dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "top") formatStr := "--format='{{.SizeRw}},{{.SizeRootFs}}'" out, _, err := dockerCmdWithError("inspect", "-s", "--type=image", formatStr, "busybox") // Template error rather than // This is a more correct behavior because images don't have sizes associated. c.Assert(err, check.Not(check.IsNil)) c.Assert(out, checker.Contains, "Template parsing error") } func (s *DockerSuite) TestInspectTempateError(c *check.C) { // Template parsing error for both the container and image. dockerCmd(c, "run", "--name=container1", "-d", "busybox", "top") out, _, err := dockerCmdWithError("inspect", "--type=container", "--format='Format container: {{.ThisDoesNotExist}}'", "container1") c.Assert(err, check.Not(check.IsNil)) c.Assert(out, checker.Contains, "Template parsing error") out, _, err = dockerCmdWithError("inspect", "--type=image", "--format='Format container: {{.ThisDoesNotExist}}'", "busybox") c.Assert(err, check.Not(check.IsNil)) c.Assert(out, checker.Contains, "Template parsing error") } func (s *DockerSuite) TestInspectJSONFields(c *check.C) { dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "top") out, _, err := dockerCmdWithError("inspect", "--type=container", "--format='{{.HostConfig.Dns}}'", "busybox") c.Assert(err, check.IsNil) c.Assert(out, checker.Equals, "[]\n") } func (s *DockerSuite) TestInspectByPrefix(c *check.C) { id, err := inspectField("busybox", "Id") c.Assert(err, checker.IsNil) c.Assert(id, checker.HasPrefix, "sha256:") id2, err := inspectField(id[:12], "Id") c.Assert(err, checker.IsNil) c.Assert(id, checker.Equals, id2) id3, err := inspectField(strings.TrimPrefix(id, "sha256:")[:12], "Id") c.Assert(err, checker.IsNil) c.Assert(id, checker.Equals, id3) } func (s *DockerSuite) TestInspectStopWhenNotFound(c *check.C) { dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "top") dockerCmd(c, "run", "--name=not-shown", "-d", "busybox", "top") out, _, err := dockerCmdWithError("inspect", "--type=container", "--format='{{.Name}}'", "busybox", "missing", "not-shown") c.Assert(err, checker.Not(check.IsNil)) c.Assert(out, checker.Contains, "busybox") c.Assert(out, checker.Not(checker.Contains), "not-shown") c.Assert(out, checker.Contains, "Error: No such container: missing") } func (s *DockerSuite) TestInspectHistory(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "--name=testcont", "-d", "busybox", "top") dockerCmd(c, "commit", "-m", "test comment", "testcont", "testimg") out, _, err := dockerCmdWithError("inspect", "--format='{{.Comment}}'", "testimg") c.Assert(err, check.IsNil) c.Assert(out, checker.Contains, "test comment") } func (s *DockerSuite) TestInspectContainerNetworkDefault(c *check.C) { testRequires(c, DaemonIsLinux) contName := "test1" dockerCmd(c, "run", "--name", contName, "-d", "busybox", "top") netOut, _ := dockerCmd(c, "network", "inspect", "--format='{{.ID}}'", "bridge") out, err := inspectField(contName, "NetworkSettings.Networks") c.Assert(err, checker.IsNil) c.Assert(out, checker.Contains, "bridge") out, err = inspectField(contName, "NetworkSettings.Networks.bridge.NetworkID") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Equals, strings.TrimSpace(netOut)) } func (s *DockerSuite) TestInspectContainerNetworkCustom(c *check.C) { testRequires(c, DaemonIsLinux) netOut, _ := dockerCmd(c, "network", "create", "net1") dockerCmd(c, "run", "--name=container1", "--net=net1", "-d", "busybox", "top") out, err := inspectField("container1", "NetworkSettings.Networks") c.Assert(err, checker.IsNil) c.Assert(out, checker.Contains, "net1") out, err = inspectField("container1", "NetworkSettings.Networks.net1.NetworkID") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Equals, strings.TrimSpace(netOut)) } docker-1.10.3/integration-cli/docker_cli_kill_test.go000066400000000000000000000066551267010174400226500ustar00rootroot00000000000000package main import ( "fmt" "net/http" "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (s *DockerSuite) TestKillContainer(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") cleanedContainerID := strings.TrimSpace(out) c.Assert(waitRun(cleanedContainerID), check.IsNil) dockerCmd(c, "kill", cleanedContainerID) out, _ = dockerCmd(c, "ps", "-q") c.Assert(out, checker.Not(checker.Contains), cleanedContainerID, check.Commentf("killed container is still running")) } func (s *DockerSuite) TestKillofStoppedContainer(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") cleanedContainerID := strings.TrimSpace(out) dockerCmd(c, "stop", cleanedContainerID) _, _, err := dockerCmdWithError("kill", "-s", "30", cleanedContainerID) c.Assert(err, check.Not(check.IsNil), check.Commentf("Container %s is not running", cleanedContainerID)) } func (s *DockerSuite) TestKillDifferentUserContainer(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-u", "daemon", "-d", "busybox", "top") cleanedContainerID := strings.TrimSpace(out) c.Assert(waitRun(cleanedContainerID), check.IsNil) dockerCmd(c, "kill", cleanedContainerID) out, _ = dockerCmd(c, "ps", "-q") c.Assert(out, checker.Not(checker.Contains), cleanedContainerID, check.Commentf("killed container is still running")) } // regression test about correct signal parsing see #13665 func (s *DockerSuite) TestKillWithSignal(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") cid := strings.TrimSpace(out) c.Assert(waitRun(cid), check.IsNil) dockerCmd(c, "kill", "-s", "SIGWINCH", cid) running, _ := inspectField(cid, "State.Running") c.Assert(running, checker.Equals, "true", check.Commentf("Container should be in running state after SIGWINCH")) } func (s *DockerSuite) TestKillWithInvalidSignal(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") cid := strings.TrimSpace(out) c.Assert(waitRun(cid), check.IsNil) out, _, err := dockerCmdWithError("kill", "-s", "0", cid) c.Assert(err, check.NotNil) c.Assert(out, checker.Contains, "Invalid signal: 0", check.Commentf("Kill with an invalid signal didn't error out correctly")) running, _ := inspectField(cid, "State.Running") c.Assert(running, checker.Equals, "true", check.Commentf("Container should be in running state after an invalid signal")) out, _ = dockerCmd(c, "run", "-d", "busybox", "top") cid = strings.TrimSpace(out) c.Assert(waitRun(cid), check.IsNil) out, _, err = dockerCmdWithError("kill", "-s", "SIG42", cid) c.Assert(err, check.NotNil) c.Assert(out, checker.Contains, "Invalid signal: SIG42", check.Commentf("Kill with an invalid signal error out correctly")) running, _ = inspectField(cid, "State.Running") c.Assert(running, checker.Equals, "true", check.Commentf("Container should be in running state after an invalid signal")) } func (s *DockerSuite) TestKillStoppedContainerAPIPre120(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "--name", "docker-kill-test-api", "-d", "busybox", "top") dockerCmd(c, "stop", "docker-kill-test-api") status, _, err := sockRequest("POST", fmt.Sprintf("/v1.19/containers/%s/kill", "docker-kill-test-api"), nil) c.Assert(err, check.IsNil) c.Assert(status, check.Equals, http.StatusNoContent) } docker-1.10.3/integration-cli/docker_cli_links_test.go000066400000000000000000000212711267010174400230240ustar00rootroot00000000000000package main import ( "fmt" "regexp" "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/runconfig" "github.com/go-check/check" ) func (s *DockerSuite) TestLinksPingUnlinkedContainers(c *check.C) { testRequires(c, DaemonIsLinux) _, exitCode, err := dockerCmdWithError("run", "--rm", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") // run ping failed with error c.Assert(exitCode, checker.Equals, 1, check.Commentf("error: %v", err)) } // Test for appropriate error when calling --link with an invalid target container func (s *DockerSuite) TestLinksInvalidContainerTarget(c *check.C) { testRequires(c, DaemonIsLinux) out, _, err := dockerCmdWithError("run", "--link", "bogus:alias", "busybox", "true") // an invalid container target should produce an error c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) // an invalid container target should produce an error c.Assert(out, checker.Contains, "Could not get container") } func (s *DockerSuite) TestLinksPingLinkedContainers(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-d", "--name", "container1", "--hostname", "fred", "busybox", "top") dockerCmd(c, "run", "-d", "--name", "container2", "--hostname", "wilma", "busybox", "top") runArgs := []string{"run", "--rm", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sh", "-c"} pingCmd := "ping -c 1 %s -W 1 && ping -c 1 %s -W 1" // test ping by alias, ping by name, and ping by hostname // 1. Ping by alias dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "alias1", "alias2"))...) // 2. Ping by container name dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "container1", "container2"))...) // 3. Ping by hostname dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "fred", "wilma"))...) } func (s *DockerSuite) TestLinksPingLinkedContainersAfterRename(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") idA := strings.TrimSpace(out) out, _ = dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") idB := strings.TrimSpace(out) dockerCmd(c, "rename", "container1", "container_new") dockerCmd(c, "run", "--rm", "--link", "container_new:alias1", "--link", "container2:alias2", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") dockerCmd(c, "kill", idA) dockerCmd(c, "kill", idB) } func (s *DockerSuite) TestLinksInspectLinksStarted(c *check.C) { testRequires(c, DaemonIsLinux) var ( expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}} result []string ) dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") dockerCmd(c, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "top") links, err := inspectFieldJSON("testinspectlink", "HostConfig.Links") c.Assert(err, checker.IsNil) err = unmarshalJSON([]byte(links), &result) c.Assert(err, checker.IsNil) output := convertSliceOfStringsToMap(result) c.Assert(output, checker.DeepEquals, expected) } func (s *DockerSuite) TestLinksInspectLinksStopped(c *check.C) { testRequires(c, DaemonIsLinux) var ( expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}} result []string ) dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") dockerCmd(c, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "true") links, err := inspectFieldJSON("testinspectlink", "HostConfig.Links") c.Assert(err, checker.IsNil) err = unmarshalJSON([]byte(links), &result) c.Assert(err, checker.IsNil) output := convertSliceOfStringsToMap(result) c.Assert(output, checker.DeepEquals, expected) } func (s *DockerSuite) TestLinksNotStartedParentNotFail(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "create", "--name=first", "busybox", "top") dockerCmd(c, "create", "--name=second", "--link=first:first", "busybox", "top") dockerCmd(c, "start", "first") } func (s *DockerSuite) TestLinksHostsFilesInject(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, SameHostDaemon, ExecSupport) out, _ := dockerCmd(c, "run", "-itd", "--name", "one", "busybox", "top") idOne := strings.TrimSpace(out) out, _ = dockerCmd(c, "run", "-itd", "--name", "two", "--link", "one:onetwo", "busybox", "top") idTwo := strings.TrimSpace(out) c.Assert(waitRun(idTwo), checker.IsNil) contentOne, err := readContainerFileWithExec(idOne, "/etc/hosts") c.Assert(err, checker.IsNil, check.Commentf("contentOne: %s", string(contentOne))) contentTwo, err := readContainerFileWithExec(idTwo, "/etc/hosts") c.Assert(err, checker.IsNil, check.Commentf("contentTwo: %s", string(contentTwo))) // Host is not present in updated hosts file c.Assert(string(contentTwo), checker.Contains, "onetwo") } func (s *DockerSuite) TestLinksUpdateOnRestart(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, SameHostDaemon, ExecSupport) dockerCmd(c, "run", "-d", "--name", "one", "busybox", "top") out, _ := dockerCmd(c, "run", "-d", "--name", "two", "--link", "one:onetwo", "--link", "one:one", "busybox", "top") id := strings.TrimSpace(string(out)) realIP, err := inspectField("one", "NetworkSettings.Networks.bridge.IPAddress") if err != nil { c.Fatal(err) } c.Assert(err, checker.IsNil) content, err := readContainerFileWithExec(id, "/etc/hosts") c.Assert(err, checker.IsNil) getIP := func(hosts []byte, hostname string) string { re := regexp.MustCompile(fmt.Sprintf(`(\S*)\t%s`, regexp.QuoteMeta(hostname))) matches := re.FindSubmatch(hosts) c.Assert(matches, checker.NotNil, check.Commentf("Hostname %s have no matches in hosts", hostname)) return string(matches[1]) } ip := getIP(content, "one") c.Assert(ip, checker.Equals, realIP) ip = getIP(content, "onetwo") c.Assert(ip, checker.Equals, realIP) dockerCmd(c, "restart", "one") realIP, err = inspectField("one", "NetworkSettings.Networks.bridge.IPAddress") c.Assert(err, checker.IsNil) content, err = readContainerFileWithExec(id, "/etc/hosts") c.Assert(err, checker.IsNil, check.Commentf("content: %s", string(content))) ip = getIP(content, "one") c.Assert(ip, checker.Equals, realIP) ip = getIP(content, "onetwo") c.Assert(ip, checker.Equals, realIP) } func (s *DockerSuite) TestLinksEnvs(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-d", "-e", "e1=", "-e", "e2=v2", "-e", "e3=v3=v3", "--name=first", "busybox", "top") out, _ := dockerCmd(c, "run", "--name=second", "--link=first:first", "busybox", "env") c.Assert(out, checker.Contains, "FIRST_ENV_e1=\n") c.Assert(out, checker.Contains, "FIRST_ENV_e2=v2") c.Assert(out, checker.Contains, "FIRST_ENV_e3=v3=v3") } func (s *DockerSuite) TestLinkShortDefinition(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "--name", "shortlinkdef", "busybox", "top") cid := strings.TrimSpace(out) c.Assert(waitRun(cid), checker.IsNil) out, _ = dockerCmd(c, "run", "-d", "--name", "link2", "--link", "shortlinkdef", "busybox", "top") cid2 := strings.TrimSpace(out) c.Assert(waitRun(cid2), checker.IsNil) links, err := inspectFieldJSON(cid2, "HostConfig.Links") c.Assert(err, checker.IsNil) c.Assert(links, checker.Equals, "[\"/shortlinkdef:/link2/shortlinkdef\"]") } func (s *DockerSuite) TestLinksNetworkHostContainer(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) dockerCmd(c, "run", "-d", "--net", "host", "--name", "host_container", "busybox", "top") out, _, err := dockerCmdWithError("run", "--name", "should_fail", "--link", "host_container:tester", "busybox", "true") // Running container linking to a container with --net host should have failed c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) // Running container linking to a container with --net host should have failed c.Assert(out, checker.Contains, runconfig.ErrConflictHostNetworkAndLinks.Error()) } func (s *DockerSuite) TestLinksEtcHostsRegularFile(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "--net=host", "busybox", "ls", "-la", "/etc/hosts") // /etc/hosts should be a regular file c.Assert(out, checker.Matches, "^-.+\n") } func (s *DockerSuite) TestLinksMultipleWithSameName(c *check.C) { dockerCmd(c, "run", "-d", "--name=upstream-a", "busybox", "top") dockerCmd(c, "run", "-d", "--name=upstream-b", "busybox", "top") dockerCmd(c, "run", "--link", "upstream-a:upstream", "--link", "upstream-b:upstream", "busybox", "sh", "-c", "ping -c 1 upstream") } docker-1.10.3/integration-cli/docker_cli_links_unix_test.go000066400000000000000000000012411267010174400240620ustar00rootroot00000000000000// +build !windows package main import ( "io/ioutil" "os" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (s *DockerSuite) TestLinksEtcHostsContentMatch(c *check.C) { // In a _unix file as using Unix specific files, and must be on the // same host as the daemon. testRequires(c, SameHostDaemon, NotUserNamespace) out, _ := dockerCmd(c, "run", "--net=host", "busybox", "cat", "/etc/hosts") hosts, err := ioutil.ReadFile("/etc/hosts") if os.IsNotExist(err) { c.Skip("/etc/hosts does not exist, skip this test") } c.Assert(out, checker.Equals, string(hosts), check.Commentf("container: %s\n\nhost:%s", out, hosts)) } docker-1.10.3/integration-cli/docker_cli_login_test.go000066400000000000000000000007711267010174400230160ustar00rootroot00000000000000package main import ( "bytes" "os/exec" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (s *DockerSuite) TestLoginWithoutTTY(c *check.C) { cmd := exec.Command(dockerBinary, "login") // Send to stdin so the process does not get the TTY cmd.Stdin = bytes.NewBufferString("buffer test string \n") // run the command and block until it's done err := cmd.Run() c.Assert(err, checker.NotNil) //"Expected non nil err when loginning in & TTY not available" } docker-1.10.3/integration-cli/docker_cli_logs_test.go000066400000000000000000000244651267010174400226600ustar00rootroot00000000000000package main import ( "encoding/json" "fmt" "io" "os/exec" "regexp" "strconv" "strings" "time" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/pkg/jsonlog" "github.com/go-check/check" ) // This used to work, it test a log of PageSize-1 (gh#4851) func (s *DockerSuite) TestLogsContainerSmallerThanPage(c *check.C) { testRequires(c, DaemonIsLinux) testLen := 32767 out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen)) id := strings.TrimSpace(out) dockerCmd(c, "wait", id) out, _ = dockerCmd(c, "logs", id) c.Assert(out, checker.HasLen, testLen+1) } // Regression test: When going over the PageSize, it used to panic (gh#4851) func (s *DockerSuite) TestLogsContainerBiggerThanPage(c *check.C) { testRequires(c, DaemonIsLinux) testLen := 32768 out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen)) id := strings.TrimSpace(out) dockerCmd(c, "wait", id) out, _ = dockerCmd(c, "logs", id) c.Assert(out, checker.HasLen, testLen+1) } // Regression test: When going much over the PageSize, it used to block (gh#4851) func (s *DockerSuite) TestLogsContainerMuchBiggerThanPage(c *check.C) { testRequires(c, DaemonIsLinux) testLen := 33000 out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen)) id := strings.TrimSpace(out) dockerCmd(c, "wait", id) out, _ = dockerCmd(c, "logs", id) c.Assert(out, checker.HasLen, testLen+1) } func (s *DockerSuite) TestLogsTimestamps(c *check.C) { testRequires(c, DaemonIsLinux) testLen := 100 out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen)) id := strings.TrimSpace(out) dockerCmd(c, "wait", id) out, _ = dockerCmd(c, "logs", "-t", id) lines := strings.Split(out, "\n") c.Assert(lines, checker.HasLen, testLen+1) ts := regexp.MustCompile(`^.* `) for _, l := range lines { if l != "" { _, err := time.Parse(jsonlog.RFC3339NanoFixed+" ", ts.FindString(l)) c.Assert(err, checker.IsNil, check.Commentf("Failed to parse timestamp from %v", l)) // ensure we have padded 0's c.Assert(l[29], checker.Equals, uint8('Z')) } } } func (s *DockerSuite) TestLogsSeparateStderr(c *check.C) { testRequires(c, DaemonIsLinux) msg := "stderr_log" out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)) id := strings.TrimSpace(out) dockerCmd(c, "wait", id) stdout, stderr, _ := dockerCmdWithStdoutStderr(c, "logs", id) c.Assert(stdout, checker.Equals, "") stderr = strings.TrimSpace(stderr) c.Assert(stderr, checker.Equals, msg) } func (s *DockerSuite) TestLogsStderrInStdout(c *check.C) { testRequires(c, DaemonIsLinux) msg := "stderr_log" out, _ := dockerCmd(c, "run", "-d", "-t", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)) id := strings.TrimSpace(out) dockerCmd(c, "wait", id) stdout, stderr, _ := dockerCmdWithStdoutStderr(c, "logs", id) c.Assert(stderr, checker.Equals, "") stdout = strings.TrimSpace(stdout) c.Assert(stdout, checker.Equals, msg) } func (s *DockerSuite) TestLogsTail(c *check.C) { testRequires(c, DaemonIsLinux) testLen := 100 out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen)) id := strings.TrimSpace(out) dockerCmd(c, "wait", id) out, _ = dockerCmd(c, "logs", "--tail", "5", id) lines := strings.Split(out, "\n") c.Assert(lines, checker.HasLen, 6) out, _ = dockerCmd(c, "logs", "--tail", "all", id) lines = strings.Split(out, "\n") c.Assert(lines, checker.HasLen, testLen+1) out, _, _ = dockerCmdWithStdoutStderr(c, "logs", "--tail", "random", id) lines = strings.Split(out, "\n") c.Assert(lines, checker.HasLen, testLen+1) } func (s *DockerSuite) TestLogsFollowStopped(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "echo", "hello") id := strings.TrimSpace(out) dockerCmd(c, "wait", id) logsCmd := exec.Command(dockerBinary, "logs", "-f", id) c.Assert(logsCmd.Start(), checker.IsNil) errChan := make(chan error) go func() { errChan <- logsCmd.Wait() close(errChan) }() select { case err := <-errChan: c.Assert(err, checker.IsNil) case <-time.After(1 * time.Second): c.Fatal("Following logs is hanged") } } func (s *DockerSuite) TestLogsSince(c *check.C) { testRequires(c, DaemonIsLinux) name := "testlogssince" dockerCmd(c, "run", "--name="+name, "busybox", "/bin/sh", "-c", "for i in $(seq 1 3); do sleep 2; echo log$i; done") out, _ := dockerCmd(c, "logs", "-t", name) log2Line := strings.Split(strings.Split(out, "\n")[1], " ") t, err := time.Parse(time.RFC3339Nano, log2Line[0]) // the timestamp log2 is written c.Assert(err, checker.IsNil) since := t.Unix() + 1 // add 1s so log1 & log2 doesn't show up out, _ = dockerCmd(c, "logs", "-t", fmt.Sprintf("--since=%v", since), name) // Skip 2 seconds unexpected := []string{"log1", "log2"} for _, v := range unexpected { c.Assert(out, checker.Not(checker.Contains), v, check.Commentf("unexpected log message returned, since=%v", since)) } // Test to make sure a bad since format is caught by the client out, _, _ = dockerCmdWithError("logs", "-t", "--since=2006-01-02T15:04:0Z", name) c.Assert(out, checker.Contains, "cannot parse \"0Z\" as \"05\"", check.Commentf("bad since format passed to server")) // Test with default value specified and parameter omitted expected := []string{"log1", "log2", "log3"} for _, cmd := range []*exec.Cmd{ exec.Command(dockerBinary, "logs", "-t", name), exec.Command(dockerBinary, "logs", "-t", "--since=0", name), } { out, _, err = runCommandWithOutput(cmd) c.Assert(err, checker.IsNil, check.Commentf("failed to log container: %s", out)) for _, v := range expected { c.Assert(out, checker.Contains, v) } } } func (s *DockerSuite) TestLogsSinceFutureFollow(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", `for i in $(seq 1 5); do date +%s; sleep 1; done`) id := strings.TrimSpace(out) now := daemonTime(c).Unix() since := now + 2 out, _ = dockerCmd(c, "logs", "-f", fmt.Sprintf("--since=%v", since), id) lines := strings.Split(strings.TrimSpace(out), "\n") c.Assert(lines, checker.Not(checker.HasLen), 0) for _, v := range lines { ts, err := strconv.ParseInt(v, 10, 64) c.Assert(err, checker.IsNil, check.Commentf("cannot parse timestamp output from log: '%v'\nout=%s", v, out)) c.Assert(ts >= since, checker.Equals, true, check.Commentf("earlier log found. since=%v logdate=%v", since, ts)) } } // Regression test for #8832 func (s *DockerSuite) TestLogsFollowSlowStdoutConsumer(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", `usleep 600000;yes X | head -c 200000`) id := strings.TrimSpace(out) stopSlowRead := make(chan bool) go func() { exec.Command(dockerBinary, "wait", id).Run() stopSlowRead <- true }() logCmd := exec.Command(dockerBinary, "logs", "-f", id) stdout, err := logCmd.StdoutPipe() c.Assert(err, checker.IsNil) c.Assert(logCmd.Start(), checker.IsNil) // First read slowly bytes1, err := consumeWithSpeed(stdout, 10, 50*time.Millisecond, stopSlowRead) c.Assert(err, checker.IsNil) // After the container has finished we can continue reading fast bytes2, err := consumeWithSpeed(stdout, 32*1024, 0, nil) c.Assert(err, checker.IsNil) actual := bytes1 + bytes2 expected := 200000 c.Assert(actual, checker.Equals, expected) } func (s *DockerSuite) TestLogsFollowGoroutinesWithStdout(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true; do echo hello; sleep 2; done") id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) type info struct { NGoroutines int } getNGoroutines := func() int { var i info status, b, err := sockRequest("GET", "/info", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, 200) c.Assert(json.Unmarshal(b, &i), checker.IsNil) return i.NGoroutines } nroutines := getNGoroutines() cmd := exec.Command(dockerBinary, "logs", "-f", id) r, w := io.Pipe() cmd.Stdout = w c.Assert(cmd.Start(), checker.IsNil) // Make sure pipe is written to chErr := make(chan error) go func() { b := make([]byte, 1) _, err := r.Read(b) chErr <- err }() c.Assert(<-chErr, checker.IsNil) c.Assert(cmd.Process.Kill(), checker.IsNil) // NGoroutines is not updated right away, so we need to wait before failing t := time.After(30 * time.Second) for { select { case <-t: n := getNGoroutines() c.Assert(n <= nroutines, checker.Equals, true, check.Commentf("leaked goroutines: expected less than or equal to %d, got: %d", nroutines, n)) default: if n := getNGoroutines(); n <= nroutines { return } time.Sleep(200 * time.Millisecond) } } } func (s *DockerSuite) TestLogsFollowGoroutinesNoOutput(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 2; done") id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) type info struct { NGoroutines int } getNGoroutines := func() int { var i info status, b, err := sockRequest("GET", "/info", nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, 200) c.Assert(json.Unmarshal(b, &i), checker.IsNil) return i.NGoroutines } nroutines := getNGoroutines() cmd := exec.Command(dockerBinary, "logs", "-f", id) c.Assert(cmd.Start(), checker.IsNil) time.Sleep(200 * time.Millisecond) c.Assert(cmd.Process.Kill(), checker.IsNil) // NGoroutines is not updated right away, so we need to wait before failing t := time.After(30 * time.Second) for { select { case <-t: n := getNGoroutines() c.Assert(n <= nroutines, checker.Equals, true, check.Commentf("leaked goroutines: expected less than or equal to %d, got: %d", nroutines, n)) default: if n := getNGoroutines(); n <= nroutines { return } time.Sleep(200 * time.Millisecond) } } } func (s *DockerSuite) TestLogsCLIContainerNotFound(c *check.C) { name := "testlogsnocontainer" out, _, _ := dockerCmdWithError("logs", name) message := fmt.Sprintf("Error: No such container: %s\n", name) c.Assert(out, checker.Equals, message) } docker-1.10.3/integration-cli/docker_cli_nat_test.go000066400000000000000000000047261267010174400224740ustar00rootroot00000000000000package main import ( "fmt" "io/ioutil" "net" "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func startServerContainer(c *check.C, msg string, port int) string { name := "server" cmd := []string{ "-d", "-p", fmt.Sprintf("%d:%d", port, port), "busybox", "sh", "-c", fmt.Sprintf("echo %q | nc -lp %d", msg, port), } c.Assert(waitForContainer(name, cmd...), check.IsNil) return name } func getExternalAddress(c *check.C) net.IP { iface, err := net.InterfaceByName("eth0") if err != nil { c.Skip(fmt.Sprintf("Test not running with `make test`. Interface eth0 not found: %v", err)) } ifaceAddrs, err := iface.Addrs() c.Assert(err, check.IsNil) c.Assert(ifaceAddrs, checker.Not(checker.HasLen), 0) ifaceIP, _, err := net.ParseCIDR(ifaceAddrs[0].String()) c.Assert(err, check.IsNil) return ifaceIP } func getContainerLogs(c *check.C, containerID string) string { out, _ := dockerCmd(c, "logs", containerID) return strings.Trim(out, "\r\n") } func getContainerStatus(c *check.C, containerID string) string { out, err := inspectField(containerID, "State.Running") c.Assert(err, check.IsNil) return out } func (s *DockerSuite) TestNetworkNat(c *check.C) { testRequires(c, DaemonIsLinux, SameHostDaemon) msg := "it works" startServerContainer(c, msg, 8080) endpoint := getExternalAddress(c) conn, err := net.Dial("tcp", fmt.Sprintf("%s:%d", endpoint.String(), 8080)) c.Assert(err, check.IsNil) data, err := ioutil.ReadAll(conn) conn.Close() c.Assert(err, check.IsNil) final := strings.TrimRight(string(data), "\n") c.Assert(final, checker.Equals, msg) } func (s *DockerSuite) TestNetworkLocalhostTCPNat(c *check.C) { testRequires(c, DaemonIsLinux, SameHostDaemon) var ( msg = "hi yall" ) startServerContainer(c, msg, 8081) conn, err := net.Dial("tcp", "localhost:8081") c.Assert(err, check.IsNil) data, err := ioutil.ReadAll(conn) conn.Close() c.Assert(err, check.IsNil) final := strings.TrimRight(string(data), "\n") c.Assert(final, checker.Equals, msg) } func (s *DockerSuite) TestNetworkLoopbackNat(c *check.C) { testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) msg := "it works" startServerContainer(c, msg, 8080) endpoint := getExternalAddress(c) out, _ := dockerCmd(c, "run", "-t", "--net=container:server", "busybox", "sh", "-c", fmt.Sprintf("stty raw && nc -w 5 %s 8080", endpoint.String())) final := strings.TrimRight(string(out), "\n") c.Assert(final, checker.Equals, msg) } docker-1.10.3/integration-cli/docker_cli_netmode_test.go000066400000000000000000000101651267010174400233370ustar00rootroot00000000000000package main import ( "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/runconfig" "github.com/go-check/check" ) // GH14530. Validates combinations of --net= with other options // stringCheckPS is how the output of PS starts in order to validate that // the command executed in a container did really run PS correctly. const stringCheckPS = "PID USER" // DockerCmdWithFail executes a docker command that is supposed to fail and returns // the output, the exit code. If the command returns an Nil error, it will fail and // stop the tests. func dockerCmdWithFail(c *check.C, args ...string) (string, int) { out, status, err := dockerCmdWithError(args...) c.Assert(err, check.NotNil, check.Commentf("%v", out)) return out, status } func (s *DockerSuite) TestNetHostname(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "-h=name", "busybox", "ps") c.Assert(out, checker.Contains, stringCheckPS) out, _ = dockerCmd(c, "run", "--net=host", "busybox", "ps") c.Assert(out, checker.Contains, stringCheckPS) out, _ = dockerCmd(c, "run", "-h=name", "--net=bridge", "busybox", "ps") c.Assert(out, checker.Contains, stringCheckPS) out, _ = dockerCmd(c, "run", "-h=name", "--net=none", "busybox", "ps") c.Assert(out, checker.Contains, stringCheckPS) out, _ = dockerCmdWithFail(c, "run", "-h=name", "--net=host", "busybox", "ps") c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkHostname.Error()) out, _ = dockerCmdWithFail(c, "run", "-h=name", "--net=container:other", "busybox", "ps") c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkHostname.Error()) out, _ = dockerCmdWithFail(c, "run", "--net=container", "busybox", "ps") c.Assert(out, checker.Contains, "--net: invalid net mode: invalid container format container:") out, _ = dockerCmdWithFail(c, "run", "--net=weird", "busybox", "ps") c.Assert(out, checker.Contains, "network weird not found") } func (s *DockerSuite) TestConflictContainerNetworkAndLinks(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmdWithFail(c, "run", "--net=container:other", "--link=zip:zap", "busybox", "ps") c.Assert(out, checker.Contains, runconfig.ErrConflictContainerNetworkAndLinks.Error()) out, _ = dockerCmdWithFail(c, "run", "--net=host", "--link=zip:zap", "busybox", "ps") c.Assert(out, checker.Contains, runconfig.ErrConflictHostNetworkAndLinks.Error()) } func (s *DockerSuite) TestConflictNetworkModeAndOptions(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmdWithFail(c, "run", "--net=host", "--dns=8.8.8.8", "busybox", "ps") c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkAndDNS.Error()) out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "--dns=8.8.8.8", "busybox", "ps") c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkAndDNS.Error()) out, _ = dockerCmdWithFail(c, "run", "--net=host", "--add-host=name:8.8.8.8", "busybox", "ps") c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkHosts.Error()) out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "--add-host=name:8.8.8.8", "busybox", "ps") c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkHosts.Error()) out, _ = dockerCmdWithFail(c, "run", "--net=host", "--mac-address=92:d0:c6:0a:29:33", "busybox", "ps") c.Assert(out, checker.Contains, runconfig.ErrConflictContainerNetworkAndMac.Error()) out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "--mac-address=92:d0:c6:0a:29:33", "busybox", "ps") c.Assert(out, checker.Contains, runconfig.ErrConflictContainerNetworkAndMac.Error()) out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "-P", "busybox", "ps") c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkPublishPorts.Error()) out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "-p", "8080", "busybox", "ps") c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkPublishPorts.Error()) out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "--expose", "8000-9000", "busybox", "ps") c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkExposePorts.Error()) } docker-1.10.3/integration-cli/docker_cli_network_unix_test.go000066400000000000000000001551541267010174400244500ustar00rootroot00000000000000// +build !windows package main import ( "encoding/json" "fmt" "io/ioutil" "net" "net/http" "net/http/httptest" "os" "sort" "strings" "time" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/runconfig" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/versions/v1p20" "github.com/docker/libnetwork/driverapi" remoteapi "github.com/docker/libnetwork/drivers/remote/api" "github.com/docker/libnetwork/ipamapi" remoteipam "github.com/docker/libnetwork/ipams/remote/api" "github.com/docker/libnetwork/netlabel" "github.com/go-check/check" "github.com/vishvananda/netlink" ) const dummyNetworkDriver = "dummy-network-driver" const dummyIpamDriver = "dummy-ipam-driver" var remoteDriverNetworkRequest remoteapi.CreateNetworkRequest func init() { check.Suite(&DockerNetworkSuite{ ds: &DockerSuite{}, }) } type DockerNetworkSuite struct { server *httptest.Server ds *DockerSuite d *Daemon } func (s *DockerNetworkSuite) SetUpTest(c *check.C) { s.d = NewDaemon(c) } func (s *DockerNetworkSuite) TearDownTest(c *check.C) { s.d.Stop() s.ds.TearDownTest(c) } func (s *DockerNetworkSuite) SetUpSuite(c *check.C) { mux := http.NewServeMux() s.server = httptest.NewServer(mux) c.Assert(s.server, check.NotNil, check.Commentf("Failed to start a HTTP Server")) setupRemoteNetworkDrivers(c, mux, s.server.URL, dummyNetworkDriver, dummyIpamDriver) } func setupRemoteNetworkDrivers(c *check.C, mux *http.ServeMux, url, netDrv, ipamDrv string) { mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") fmt.Fprintf(w, `{"Implements": ["%s", "%s"]}`, driverapi.NetworkPluginEndpointType, ipamapi.PluginEndpointType) }) // Network driver implementation mux.HandleFunc(fmt.Sprintf("/%s.GetCapabilities", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") fmt.Fprintf(w, `{"Scope":"local"}`) }) mux.HandleFunc(fmt.Sprintf("/%s.CreateNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { err := json.NewDecoder(r.Body).Decode(&remoteDriverNetworkRequest) if err != nil { http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) return } w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") fmt.Fprintf(w, "null") }) mux.HandleFunc(fmt.Sprintf("/%s.DeleteNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") fmt.Fprintf(w, "null") }) mux.HandleFunc(fmt.Sprintf("/%s.CreateEndpoint", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") fmt.Fprintf(w, `{"Interface":{"MacAddress":"a0:b1:c2:d3:e4:f5"}}`) }) mux.HandleFunc(fmt.Sprintf("/%s.Join", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") veth := &netlink.Veth{ LinkAttrs: netlink.LinkAttrs{Name: "randomIfName", TxQLen: 0}, PeerName: "cnt0"} if err := netlink.LinkAdd(veth); err != nil { fmt.Fprintf(w, `{"Error":"failed to add veth pair: `+err.Error()+`"}`) } else { fmt.Fprintf(w, `{"InterfaceName":{ "SrcName":"cnt0", "DstPrefix":"veth"}}`) } }) mux.HandleFunc(fmt.Sprintf("/%s.Leave", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") fmt.Fprintf(w, "null") }) mux.HandleFunc(fmt.Sprintf("/%s.DeleteEndpoint", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") if link, err := netlink.LinkByName("cnt0"); err == nil { netlink.LinkDel(link) } fmt.Fprintf(w, "null") }) // Ipam Driver implementation var ( poolRequest remoteipam.RequestPoolRequest poolReleaseReq remoteipam.ReleasePoolRequest addressRequest remoteipam.RequestAddressRequest addressReleaseReq remoteipam.ReleaseAddressRequest lAS = "localAS" gAS = "globalAS" pool = "172.28.0.0/16" poolID = lAS + "/" + pool gw = "172.28.255.254/16" ) mux.HandleFunc(fmt.Sprintf("/%s.GetDefaultAddressSpaces", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") fmt.Fprintf(w, `{"LocalDefaultAddressSpace":"`+lAS+`", "GlobalDefaultAddressSpace": "`+gAS+`"}`) }) mux.HandleFunc(fmt.Sprintf("/%s.RequestPool", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { err := json.NewDecoder(r.Body).Decode(&poolRequest) if err != nil { http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) return } w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") if poolRequest.AddressSpace != lAS && poolRequest.AddressSpace != gAS { fmt.Fprintf(w, `{"Error":"Unknown address space in pool request: `+poolRequest.AddressSpace+`"}`) } else if poolRequest.Pool != "" && poolRequest.Pool != pool { fmt.Fprintf(w, `{"Error":"Cannot handle explicit pool requests yet"}`) } else { fmt.Fprintf(w, `{"PoolID":"`+poolID+`", "Pool":"`+pool+`"}`) } }) mux.HandleFunc(fmt.Sprintf("/%s.RequestAddress", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { err := json.NewDecoder(r.Body).Decode(&addressRequest) if err != nil { http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) return } w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") // make sure libnetwork is now querying on the expected pool id if addressRequest.PoolID != poolID { fmt.Fprintf(w, `{"Error":"unknown pool id"}`) } else if addressRequest.Address != "" { fmt.Fprintf(w, `{"Error":"Cannot handle explicit address requests yet"}`) } else { fmt.Fprintf(w, `{"Address":"`+gw+`"}`) } }) mux.HandleFunc(fmt.Sprintf("/%s.ReleaseAddress", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { err := json.NewDecoder(r.Body).Decode(&addressReleaseReq) if err != nil { http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) return } w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") // make sure libnetwork is now asking to release the expected address from the expected poolid if addressRequest.PoolID != poolID { fmt.Fprintf(w, `{"Error":"unknown pool id"}`) } else if addressReleaseReq.Address != gw { fmt.Fprintf(w, `{"Error":"unknown address"}`) } else { fmt.Fprintf(w, "null") } }) mux.HandleFunc(fmt.Sprintf("/%s.ReleasePool", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { err := json.NewDecoder(r.Body).Decode(&poolReleaseReq) if err != nil { http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) return } w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") // make sure libnetwork is now asking to release the expected poolid if addressRequest.PoolID != poolID { fmt.Fprintf(w, `{"Error":"unknown pool id"}`) } else { fmt.Fprintf(w, "null") } }) err := os.MkdirAll("/etc/docker/plugins", 0755) c.Assert(err, checker.IsNil) fileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", netDrv) err = ioutil.WriteFile(fileName, []byte(url), 0644) c.Assert(err, checker.IsNil) ipamFileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", ipamDrv) err = ioutil.WriteFile(ipamFileName, []byte(url), 0644) c.Assert(err, checker.IsNil) } func (s *DockerNetworkSuite) TearDownSuite(c *check.C) { if s.server == nil { return } s.server.Close() err := os.RemoveAll("/etc/docker/plugins") c.Assert(err, checker.IsNil) } func assertNwIsAvailable(c *check.C, name string) { if !isNwPresent(c, name) { c.Fatalf("Network %s not found in network ls o/p", name) } } func assertNwNotAvailable(c *check.C, name string) { if isNwPresent(c, name) { c.Fatalf("Found network %s in network ls o/p", name) } } func isNwPresent(c *check.C, name string) bool { out, _ := dockerCmd(c, "network", "ls") lines := strings.Split(out, "\n") for i := 1; i < len(lines)-1; i++ { netFields := strings.Fields(lines[i]) if netFields[1] == name { return true } } return false } // assertNwList checks network list retrived with ls command // equals to expected network list // note: out should be `network ls [option]` result func assertNwList(c *check.C, out string, expectNws []string) { lines := strings.Split(out, "\n") var nwList []string for _, line := range lines[1 : len(lines)-1] { netFields := strings.Fields(line) // wrap all network name in nwList nwList = append(nwList, netFields[1]) } // first need to sort out and expected sort.StringSlice(nwList).Sort() sort.StringSlice(expectNws).Sort() // network ls should contains all expected networks c.Assert(nwList, checker.DeepEquals, expectNws) } func getNwResource(c *check.C, name string) *types.NetworkResource { out, _ := dockerCmd(c, "network", "inspect", name) nr := []types.NetworkResource{} err := json.Unmarshal([]byte(out), &nr) c.Assert(err, check.IsNil) return &nr[0] } func (s *DockerNetworkSuite) TestDockerNetworkLsDefault(c *check.C) { defaults := []string{"bridge", "host", "none"} for _, nn := range defaults { assertNwIsAvailable(c, nn) } } func (s *DockerNetworkSuite) TestDockerNetworkCreatePredefined(c *check.C) { predefined := []string{"bridge", "host", "none", "default"} for _, net := range predefined { // predefined networks can't be created again out, _, err := dockerCmdWithError("network", "create", net) c.Assert(err, checker.NotNil, check.Commentf("%v", out)) } } func (s *DockerNetworkSuite) TestDockerNetworkRmPredefined(c *check.C) { predefined := []string{"bridge", "host", "none", "default"} for _, net := range predefined { // predefined networks can't be removed out, _, err := dockerCmdWithError("network", "rm", net) c.Assert(err, checker.NotNil, check.Commentf("%v", out)) } } func (s *DockerNetworkSuite) TestDockerNetworkLsFilter(c *check.C) { out, _ := dockerCmd(c, "network", "create", "dev") defer func() { dockerCmd(c, "network", "rm", "dev") }() networkID := strings.TrimSpace(out) // filter with partial ID and partial name // only show 'bridge' and 'dev' network out, _ = dockerCmd(c, "network", "ls", "-f", "id="+networkID[0:5], "-f", "name=dge") assertNwList(c, out, []string{"dev", "bridge"}) // only show built-in network (bridge, none, host) out, _ = dockerCmd(c, "network", "ls", "-f", "type=builtin") assertNwList(c, out, []string{"bridge", "none", "host"}) // only show custom networks (dev) out, _ = dockerCmd(c, "network", "ls", "-f", "type=custom") assertNwList(c, out, []string{"dev"}) // show all networks with filter // it should be equivalent of ls without option out, _ = dockerCmd(c, "network", "ls", "-f", "type=custom", "-f", "type=builtin") assertNwList(c, out, []string{"dev", "bridge", "host", "none"}) } func (s *DockerNetworkSuite) TestDockerNetworkCreateDelete(c *check.C) { dockerCmd(c, "network", "create", "test") assertNwIsAvailable(c, "test") dockerCmd(c, "network", "rm", "test") assertNwNotAvailable(c, "test") } func (s *DockerSuite) TestDockerNetworkDeleteNotExists(c *check.C) { out, _, err := dockerCmdWithError("network", "rm", "test") c.Assert(err, checker.NotNil, check.Commentf("%v", out)) } func (s *DockerSuite) TestDockerNetworkDeleteMultiple(c *check.C) { dockerCmd(c, "network", "create", "testDelMulti0") assertNwIsAvailable(c, "testDelMulti0") dockerCmd(c, "network", "create", "testDelMulti1") assertNwIsAvailable(c, "testDelMulti1") dockerCmd(c, "network", "create", "testDelMulti2") assertNwIsAvailable(c, "testDelMulti2") out, _ := dockerCmd(c, "run", "-d", "--net", "testDelMulti2", "busybox", "top") containerID := strings.TrimSpace(out) waitRun(containerID) // delete three networks at the same time, since testDelMulti2 // contains active container, its deletion should fail. out, _, err := dockerCmdWithError("network", "rm", "testDelMulti0", "testDelMulti1", "testDelMulti2") // err should not be nil due to deleting testDelMulti2 failed. c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) // testDelMulti2 should fail due to network has active endpoints c.Assert(out, checker.Contains, "has active endpoints") assertNwNotAvailable(c, "testDelMulti0") assertNwNotAvailable(c, "testDelMulti1") // testDelMulti2 can't be deleted, so it should exist assertNwIsAvailable(c, "testDelMulti2") } func (s *DockerSuite) TestDockerNetworkInspect(c *check.C) { out, _ := dockerCmd(c, "network", "inspect", "host") networkResources := []types.NetworkResource{} err := json.Unmarshal([]byte(out), &networkResources) c.Assert(err, check.IsNil) c.Assert(networkResources, checker.HasLen, 1) out, _ = dockerCmd(c, "network", "inspect", "--format='{{ .Name }}'", "host") c.Assert(strings.TrimSpace(out), check.Equals, "host") } func (s *DockerSuite) TestDockerInspectMultipleNetwork(c *check.C) { out, _ := dockerCmd(c, "network", "inspect", "host", "none") networkResources := []types.NetworkResource{} err := json.Unmarshal([]byte(out), &networkResources) c.Assert(err, check.IsNil) c.Assert(networkResources, checker.HasLen, 2) // Should print an error, return an exitCode 1 *but* should print the host network out, exitCode, err := dockerCmdWithError("network", "inspect", "host", "nonexistent") c.Assert(err, checker.NotNil) c.Assert(exitCode, checker.Equals, 1) c.Assert(out, checker.Contains, "Error: No such network: nonexistent") networkResources = []types.NetworkResource{} inspectOut := strings.SplitN(out, "\nError: No such network: nonexistent\n", 2)[0] err = json.Unmarshal([]byte(inspectOut), &networkResources) c.Assert(networkResources, checker.HasLen, 1) // Should print an error and return an exitCode, nothing else out, exitCode, err = dockerCmdWithError("network", "inspect", "nonexistent") c.Assert(err, checker.NotNil) c.Assert(exitCode, checker.Equals, 1) c.Assert(out, checker.Contains, "Error: No such network: nonexistent") } func (s *DockerSuite) TestDockerInspectNetworkWithContainerName(c *check.C) { dockerCmd(c, "network", "create", "brNetForInspect") assertNwIsAvailable(c, "brNetForInspect") defer func() { dockerCmd(c, "network", "rm", "brNetForInspect") assertNwNotAvailable(c, "brNetForInspect") }() out, _ := dockerCmd(c, "run", "-d", "--name", "testNetInspect1", "--net", "brNetForInspect", "busybox", "top") c.Assert(waitRun("testNetInspect1"), check.IsNil) containerID := strings.TrimSpace(out) defer func() { // we don't stop container by name, because we'll rename it later dockerCmd(c, "stop", containerID) }() out, _ = dockerCmd(c, "network", "inspect", "brNetForInspect") networkResources := []types.NetworkResource{} err := json.Unmarshal([]byte(out), &networkResources) c.Assert(err, check.IsNil) c.Assert(networkResources, checker.HasLen, 1) container, ok := networkResources[0].Containers[containerID] c.Assert(ok, checker.True) c.Assert(container.Name, checker.Equals, "testNetInspect1") // rename container and check docker inspect output update newName := "HappyNewName" dockerCmd(c, "rename", "testNetInspect1", newName) // check whether network inspect works properly out, _ = dockerCmd(c, "network", "inspect", "brNetForInspect") newNetRes := []types.NetworkResource{} err = json.Unmarshal([]byte(out), &newNetRes) c.Assert(err, check.IsNil) c.Assert(newNetRes, checker.HasLen, 1) container1, ok := newNetRes[0].Containers[containerID] c.Assert(ok, checker.True) c.Assert(container1.Name, checker.Equals, newName) } func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnect(c *check.C) { dockerCmd(c, "network", "create", "test") assertNwIsAvailable(c, "test") nr := getNwResource(c, "test") c.Assert(nr.Name, checker.Equals, "test") c.Assert(len(nr.Containers), checker.Equals, 0) // run a container out, _ := dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") c.Assert(waitRun("test"), check.IsNil) containerID := strings.TrimSpace(out) // connect the container to the test network dockerCmd(c, "network", "connect", "test", containerID) // inspect the network to make sure container is connected nr = getNetworkResource(c, nr.ID) c.Assert(len(nr.Containers), checker.Equals, 1) c.Assert(nr.Containers[containerID], check.NotNil) // check if container IP matches network inspect ip, _, err := net.ParseCIDR(nr.Containers[containerID].IPv4Address) c.Assert(err, check.IsNil) containerIP := findContainerIP(c, "test", "test") c.Assert(ip.String(), checker.Equals, containerIP) // disconnect container from the network dockerCmd(c, "network", "disconnect", "test", containerID) nr = getNwResource(c, "test") c.Assert(nr.Name, checker.Equals, "test") c.Assert(len(nr.Containers), checker.Equals, 0) // run another container out, _ = dockerCmd(c, "run", "-d", "--net", "test", "--name", "test2", "busybox", "top") c.Assert(waitRun("test2"), check.IsNil) containerID = strings.TrimSpace(out) nr = getNwResource(c, "test") c.Assert(nr.Name, checker.Equals, "test") c.Assert(len(nr.Containers), checker.Equals, 1) // force disconnect the container to the test network dockerCmd(c, "network", "disconnect", "-f", "test", containerID) nr = getNwResource(c, "test") c.Assert(nr.Name, checker.Equals, "test") c.Assert(len(nr.Containers), checker.Equals, 0) dockerCmd(c, "network", "rm", "test") assertNwNotAvailable(c, "test") } func (s *DockerNetworkSuite) TestDockerNetworkIpamMultipleNetworks(c *check.C) { // test0 bridge network dockerCmd(c, "network", "create", "--subnet=192.168.0.0/16", "test1") assertNwIsAvailable(c, "test1") // test2 bridge network does not overlap dockerCmd(c, "network", "create", "--subnet=192.169.0.0/16", "test2") assertNwIsAvailable(c, "test2") // for networks w/o ipam specified, docker will choose proper non-overlapping subnets dockerCmd(c, "network", "create", "test3") assertNwIsAvailable(c, "test3") dockerCmd(c, "network", "create", "test4") assertNwIsAvailable(c, "test4") dockerCmd(c, "network", "create", "test5") assertNwIsAvailable(c, "test5") // test network with multiple subnets // bridge network doesn't support multiple subnets. hence, use a dummy driver that supports dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, "--subnet=192.168.0.0/16", "--subnet=192.170.0.0/16", "test6") assertNwIsAvailable(c, "test6") // test network with multiple subnets with valid ipam combinations // also check same subnet across networks when the driver supports it. dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, "--subnet=192.168.0.0/16", "--subnet=192.170.0.0/16", "--gateway=192.168.0.100", "--gateway=192.170.0.100", "--ip-range=192.168.1.0/24", "--aux-address", "a=192.168.1.5", "--aux-address", "b=192.168.1.6", "--aux-address", "a=192.170.1.5", "--aux-address", "b=192.170.1.6", "test7") assertNwIsAvailable(c, "test7") // cleanup for i := 1; i < 8; i++ { dockerCmd(c, "network", "rm", fmt.Sprintf("test%d", i)) } } func (s *DockerNetworkSuite) TestDockerNetworkCustomIpam(c *check.C) { // Create a bridge network using custom ipam driver dockerCmd(c, "network", "create", "--ipam-driver", dummyIpamDriver, "br0") assertNwIsAvailable(c, "br0") // Verify expected network ipam fields are there nr := getNetworkResource(c, "br0") c.Assert(nr.Driver, checker.Equals, "bridge") c.Assert(nr.IPAM.Driver, checker.Equals, dummyIpamDriver) // remove network and exercise remote ipam driver dockerCmd(c, "network", "rm", "br0") assertNwNotAvailable(c, "br0") } func (s *DockerNetworkSuite) TestDockerNetworkIpamOptions(c *check.C) { // Create a bridge network using custom ipam driver and options dockerCmd(c, "network", "create", "--ipam-driver", dummyIpamDriver, "--ipam-opt", "opt1=drv1", "--ipam-opt", "opt2=drv2", "br0") assertNwIsAvailable(c, "br0") // Verify expected network ipam options nr := getNetworkResource(c, "br0") opts := nr.IPAM.Options c.Assert(opts["opt1"], checker.Equals, "drv1") c.Assert(opts["opt2"], checker.Equals, "drv2") } func (s *DockerNetworkSuite) TestDockerNetworkInspectDefault(c *check.C) { nr := getNetworkResource(c, "none") c.Assert(nr.Driver, checker.Equals, "null") c.Assert(nr.Scope, checker.Equals, "local") c.Assert(nr.IPAM.Driver, checker.Equals, "default") c.Assert(len(nr.IPAM.Config), checker.Equals, 0) nr = getNetworkResource(c, "host") c.Assert(nr.Driver, checker.Equals, "host") c.Assert(nr.Scope, checker.Equals, "local") c.Assert(nr.IPAM.Driver, checker.Equals, "default") c.Assert(len(nr.IPAM.Config), checker.Equals, 0) nr = getNetworkResource(c, "bridge") c.Assert(nr.Driver, checker.Equals, "bridge") c.Assert(nr.Scope, checker.Equals, "local") c.Assert(nr.IPAM.Driver, checker.Equals, "default") c.Assert(len(nr.IPAM.Config), checker.Equals, 1) c.Assert(nr.IPAM.Config[0].Subnet, checker.NotNil) c.Assert(nr.IPAM.Config[0].Gateway, checker.NotNil) } func (s *DockerNetworkSuite) TestDockerNetworkInspectCustomUnspecified(c *check.C) { // if unspecified, network subnet will be selected from inside preferred pool dockerCmd(c, "network", "create", "test01") assertNwIsAvailable(c, "test01") nr := getNetworkResource(c, "test01") c.Assert(nr.Driver, checker.Equals, "bridge") c.Assert(nr.Scope, checker.Equals, "local") c.Assert(nr.IPAM.Driver, checker.Equals, "default") c.Assert(len(nr.IPAM.Config), checker.Equals, 1) c.Assert(nr.IPAM.Config[0].Subnet, checker.NotNil) c.Assert(nr.IPAM.Config[0].Gateway, checker.NotNil) dockerCmd(c, "network", "rm", "test01") assertNwNotAvailable(c, "test01") } func (s *DockerNetworkSuite) TestDockerNetworkInspectCustomSpecified(c *check.C) { dockerCmd(c, "network", "create", "--driver=bridge", "--subnet=172.28.0.0/16", "--ip-range=172.28.5.0/24", "--gateway=172.28.5.254", "br0") assertNwIsAvailable(c, "br0") nr := getNetworkResource(c, "br0") c.Assert(nr.Driver, checker.Equals, "bridge") c.Assert(nr.Scope, checker.Equals, "local") c.Assert(nr.IPAM.Driver, checker.Equals, "default") c.Assert(len(nr.IPAM.Config), checker.Equals, 1) c.Assert(nr.IPAM.Config[0].Subnet, checker.Equals, "172.28.0.0/16") c.Assert(nr.IPAM.Config[0].IPRange, checker.Equals, "172.28.5.0/24") c.Assert(nr.IPAM.Config[0].Gateway, checker.Equals, "172.28.5.254") dockerCmd(c, "network", "rm", "br0") assertNwNotAvailable(c, "test01") } func (s *DockerNetworkSuite) TestDockerNetworkIpamInvalidCombinations(c *check.C) { // network with ip-range out of subnet range _, _, err := dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--ip-range=192.170.0.0/16", "test") c.Assert(err, check.NotNil) // network with multiple gateways for a single subnet _, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--gateway=192.168.0.1", "--gateway=192.168.0.2", "test") c.Assert(err, check.NotNil) // Multiple overlapping subnets in the same network must fail _, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--subnet=192.168.1.0/16", "test") c.Assert(err, check.NotNil) // overlapping subnets across networks must fail // create a valid test0 network dockerCmd(c, "network", "create", "--subnet=192.168.0.0/16", "test0") assertNwIsAvailable(c, "test0") // create an overlapping test1 network _, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.128.0/17", "test1") c.Assert(err, check.NotNil) dockerCmd(c, "network", "rm", "test0") assertNwNotAvailable(c, "test0") } func (s *DockerNetworkSuite) TestDockerNetworkDriverOptions(c *check.C) { dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, "-o", "opt1=drv1", "-o", "opt2=drv2", "testopt") assertNwIsAvailable(c, "testopt") gopts := remoteDriverNetworkRequest.Options[netlabel.GenericData] c.Assert(gopts, checker.NotNil) opts, ok := gopts.(map[string]interface{}) c.Assert(ok, checker.Equals, true) c.Assert(opts["opt1"], checker.Equals, "drv1") c.Assert(opts["opt2"], checker.Equals, "drv2") dockerCmd(c, "network", "rm", "testopt") assertNwNotAvailable(c, "testopt") } func (s *DockerDaemonSuite) TestDockerNetworkNoDiscoveryDefaultBridgeNetwork(c *check.C) { testRequires(c, ExecSupport) // On default bridge network built-in service discovery should not happen hostsFile := "/etc/hosts" bridgeName := "external-bridge" bridgeIP := "192.169.255.254/24" out, err := createInterface(c, "bridge", bridgeName, bridgeIP) c.Assert(err, check.IsNil, check.Commentf(out)) defer deleteInterface(c, bridgeName) err = s.d.StartWithBusybox("--bridge", bridgeName) c.Assert(err, check.IsNil) defer s.d.Restart() // run two containers and store first container's etc/hosts content out, err = s.d.Cmd("run", "-d", "busybox", "top") c.Assert(err, check.IsNil) cid1 := strings.TrimSpace(out) defer s.d.Cmd("stop", cid1) hosts, err := s.d.Cmd("exec", cid1, "cat", hostsFile) c.Assert(err, checker.IsNil) out, err = s.d.Cmd("run", "-d", "--name", "container2", "busybox", "top") c.Assert(err, check.IsNil) cid2 := strings.TrimSpace(out) // verify first container's etc/hosts file has not changed after spawning the second named container hostsPost, err := s.d.Cmd("exec", cid1, "cat", hostsFile) c.Assert(err, checker.IsNil) c.Assert(string(hosts), checker.Equals, string(hostsPost), check.Commentf("Unexpected %s change on second container creation", hostsFile)) // stop container 2 and verify first container's etc/hosts has not changed _, err = s.d.Cmd("stop", cid2) c.Assert(err, check.IsNil) hostsPost, err = s.d.Cmd("exec", cid1, "cat", hostsFile) c.Assert(err, checker.IsNil) c.Assert(string(hosts), checker.Equals, string(hostsPost), check.Commentf("Unexpected %s change on second container creation", hostsFile)) // but discovery is on when connecting to non default bridge network network := "anotherbridge" out, err = s.d.Cmd("network", "create", network) c.Assert(err, check.IsNil, check.Commentf(out)) defer s.d.Cmd("network", "rm", network) out, err = s.d.Cmd("network", "connect", network, cid1) c.Assert(err, check.IsNil, check.Commentf(out)) hosts, err = s.d.Cmd("exec", cid1, "cat", hostsFile) c.Assert(err, checker.IsNil) hostsPost, err = s.d.Cmd("exec", cid1, "cat", hostsFile) c.Assert(err, checker.IsNil) c.Assert(string(hosts), checker.Equals, string(hostsPost), check.Commentf("Unexpected %s change on second network connection", hostsFile)) } func (s *DockerNetworkSuite) TestDockerNetworkAnonymousEndpoint(c *check.C) { testRequires(c, ExecSupport) hostsFile := "/etc/hosts" cstmBridgeNw := "custom-bridge-nw" cstmBridgeNw1 := "custom-bridge-nw1" dockerCmd(c, "network", "create", "-d", "bridge", cstmBridgeNw) assertNwIsAvailable(c, cstmBridgeNw) // run two anonymous containers and store their etc/hosts content out, _ := dockerCmd(c, "run", "-d", "--net", cstmBridgeNw, "busybox", "top") cid1 := strings.TrimSpace(out) hosts1, err := readContainerFileWithExec(cid1, hostsFile) c.Assert(err, checker.IsNil) out, _ = dockerCmd(c, "run", "-d", "--net", cstmBridgeNw, "busybox", "top") cid2 := strings.TrimSpace(out) hosts2, err := readContainerFileWithExec(cid2, hostsFile) c.Assert(err, checker.IsNil) // verify first container etc/hosts file has not changed hosts1post, err := readContainerFileWithExec(cid1, hostsFile) c.Assert(err, checker.IsNil) c.Assert(string(hosts1), checker.Equals, string(hosts1post), check.Commentf("Unexpected %s change on anonymous container creation", hostsFile)) // Connect the 2nd container to a new network and verify the // first container /etc/hosts file still hasn't changed. dockerCmd(c, "network", "create", "-d", "bridge", cstmBridgeNw1) assertNwIsAvailable(c, cstmBridgeNw1) dockerCmd(c, "network", "connect", cstmBridgeNw1, cid2) hosts2, err = readContainerFileWithExec(cid2, hostsFile) c.Assert(err, checker.IsNil) hosts1post, err = readContainerFileWithExec(cid1, hostsFile) c.Assert(err, checker.IsNil) c.Assert(string(hosts1), checker.Equals, string(hosts1post), check.Commentf("Unexpected %s change on container connect", hostsFile)) // start a named container cName := "AnyName" out, _ = dockerCmd(c, "run", "-d", "--net", cstmBridgeNw, "--name", cName, "busybox", "top") cid3 := strings.TrimSpace(out) // verify that container 1 and 2 can ping the named container dockerCmd(c, "exec", cid1, "ping", "-c", "1", cName) dockerCmd(c, "exec", cid2, "ping", "-c", "1", cName) // Stop named container and verify first two containers' etc/hosts file hasn't changed dockerCmd(c, "stop", cid3) hosts1post, err = readContainerFileWithExec(cid1, hostsFile) c.Assert(err, checker.IsNil) c.Assert(string(hosts1), checker.Equals, string(hosts1post), check.Commentf("Unexpected %s change on name container creation", hostsFile)) hosts2post, err := readContainerFileWithExec(cid2, hostsFile) c.Assert(err, checker.IsNil) c.Assert(string(hosts2), checker.Equals, string(hosts2post), check.Commentf("Unexpected %s change on name container creation", hostsFile)) // verify that container 1 and 2 can't ping the named container now _, _, err = dockerCmdWithError("exec", cid1, "ping", "-c", "1", cName) c.Assert(err, check.NotNil) _, _, err = dockerCmdWithError("exec", cid2, "ping", "-c", "1", cName) c.Assert(err, check.NotNil) } func (s *DockerNetworkSuite) TestDockerNetworkLinkOndefaultNetworkOnly(c *check.C) { // Link feature must work only on default network, and not across networks cnt1 := "container1" cnt2 := "container2" network := "anotherbridge" // Run first container on default network dockerCmd(c, "run", "-d", "--name", cnt1, "busybox", "top") // Create another network and run the second container on it dockerCmd(c, "network", "create", network) assertNwIsAvailable(c, network) dockerCmd(c, "run", "-d", "--net", network, "--name", cnt2, "busybox", "top") // Try launching a container on default network, linking to the first container. Must succeed dockerCmd(c, "run", "-d", "--link", fmt.Sprintf("%s:%s", cnt1, cnt1), "busybox", "top") // Try launching a container on default network, linking to the second container. Must fail _, _, err := dockerCmdWithError("run", "-d", "--link", fmt.Sprintf("%s:%s", cnt2, cnt2), "busybox", "top") c.Assert(err, checker.NotNil) // Connect second container to default network. Now a container on default network can link to it dockerCmd(c, "network", "connect", "bridge", cnt2) dockerCmd(c, "run", "-d", "--link", fmt.Sprintf("%s:%s", cnt2, cnt2), "busybox", "top") } func (s *DockerNetworkSuite) TestDockerNetworkOverlayPortMapping(c *check.C) { // Verify exposed ports are present in ps output when running a container on // a network managed by a driver which does not provide the default gateway // for the container nwn := "ov" ctn := "bb" port1 := 80 port2 := 443 expose1 := fmt.Sprintf("--expose=%d", port1) expose2 := fmt.Sprintf("--expose=%d", port2) dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, nwn) assertNwIsAvailable(c, nwn) dockerCmd(c, "run", "-d", "--net", nwn, "--name", ctn, expose1, expose2, "busybox", "top") // Check docker ps o/p for last created container reports the unpublished ports unpPort1 := fmt.Sprintf("%d/tcp", port1) unpPort2 := fmt.Sprintf("%d/tcp", port2) out, _ := dockerCmd(c, "ps", "-n=1") // Missing unpublished ports in docker ps output c.Assert(out, checker.Contains, unpPort1) // Missing unpublished ports in docker ps output c.Assert(out, checker.Contains, unpPort2) } func (s *DockerNetworkSuite) TestDockerNetworkDriverUngracefulRestart(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) dnd := "dnd" did := "did" mux := http.NewServeMux() server := httptest.NewServer(mux) setupRemoteNetworkDrivers(c, mux, server.URL, dnd, did) s.d.StartWithBusybox() _, err := s.d.Cmd("network", "create", "-d", dnd, "--subnet", "1.1.1.0/24", "net1") c.Assert(err, checker.IsNil) _, err = s.d.Cmd("run", "-itd", "--net", "net1", "--name", "foo", "--ip", "1.1.1.10", "busybox", "sh") c.Assert(err, checker.IsNil) // Kill daemon and restart if err = s.d.cmd.Process.Kill(); err != nil { c.Fatal(err) } server.Close() startTime := time.Now().Unix() if err = s.d.Restart(); err != nil { c.Fatal(err) } lapse := time.Now().Unix() - startTime if lapse > 60 { // In normal scenarios, daemon restart takes ~1 second. // Plugin retry mechanism can delay the daemon start. systemd may not like it. // Avoid accessing plugins during daemon bootup c.Logf("daemon restart took too long : %d seconds", lapse) } // Restart the custom dummy plugin mux = http.NewServeMux() server = httptest.NewServer(mux) setupRemoteNetworkDrivers(c, mux, server.URL, dnd, did) // trying to reuse the same ip must succeed _, err = s.d.Cmd("run", "-itd", "--net", "net1", "--name", "bar", "--ip", "1.1.1.10", "busybox", "sh") c.Assert(err, checker.IsNil) } func (s *DockerNetworkSuite) TestDockerNetworkMacInspect(c *check.C) { // Verify endpoint MAC address is correctly populated in container's network settings nwn := "ov" ctn := "bb" dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, nwn) assertNwIsAvailable(c, nwn) dockerCmd(c, "run", "-d", "--net", nwn, "--name", ctn, "busybox", "top") mac, err := inspectField(ctn, "NetworkSettings.Networks."+nwn+".MacAddress") c.Assert(err, checker.IsNil) c.Assert(mac, checker.Equals, "a0:b1:c2:d3:e4:f5") } func (s *DockerSuite) TestInspectApiMultipleNetworks(c *check.C) { dockerCmd(c, "network", "create", "mybridge1") dockerCmd(c, "network", "create", "mybridge2") out, _ := dockerCmd(c, "run", "-d", "busybox", "top") id := strings.TrimSpace(out) c.Assert(waitRun(id), check.IsNil) dockerCmd(c, "network", "connect", "mybridge1", id) dockerCmd(c, "network", "connect", "mybridge2", id) body := getInspectBody(c, "v1.20", id) var inspect120 v1p20.ContainerJSON err := json.Unmarshal(body, &inspect120) c.Assert(err, checker.IsNil) versionedIP := inspect120.NetworkSettings.IPAddress body = getInspectBody(c, "v1.21", id) var inspect121 types.ContainerJSON err = json.Unmarshal(body, &inspect121) c.Assert(err, checker.IsNil) c.Assert(inspect121.NetworkSettings.Networks, checker.HasLen, 3) bridge := inspect121.NetworkSettings.Networks["bridge"] c.Assert(bridge.IPAddress, checker.Equals, versionedIP) c.Assert(bridge.IPAddress, checker.Equals, inspect121.NetworkSettings.IPAddress) } func connectContainerToNetworks(c *check.C, d *Daemon, cName string, nws []string) { // Run a container on the default network out, err := d.Cmd("run", "-d", "--name", cName, "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) // Attach the container to other networks for _, nw := range nws { out, err = d.Cmd("network", "create", nw) c.Assert(err, checker.IsNil, check.Commentf(out)) out, err = d.Cmd("network", "connect", nw, cName) c.Assert(err, checker.IsNil, check.Commentf(out)) } } func verifyContainerIsConnectedToNetworks(c *check.C, d *Daemon, cName string, nws []string) { // Verify container is connected to all the networks for _, nw := range nws { out, err := d.Cmd("inspect", "-f", fmt.Sprintf("{{.NetworkSettings.Networks.%s}}", nw), cName) c.Assert(err, checker.IsNil, check.Commentf(out)) c.Assert(out, checker.Not(checker.Equals), "\n") } } func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksGracefulDaemonRestart(c *check.C) { cName := "bb" nwList := []string{"nw1", "nw2", "nw3"} s.d.StartWithBusybox() connectContainerToNetworks(c, s.d, cName, nwList) verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) // Reload daemon s.d.Restart() _, err := s.d.Cmd("start", cName) c.Assert(err, checker.IsNil) verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) } func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksUngracefulDaemonRestart(c *check.C) { cName := "cc" nwList := []string{"nw1", "nw2", "nw3"} s.d.StartWithBusybox() connectContainerToNetworks(c, s.d, cName, nwList) verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) // Kill daemon and restart if err := s.d.cmd.Process.Kill(); err != nil { c.Fatal(err) } s.d.Restart() // Restart container _, err := s.d.Cmd("start", cName) c.Assert(err, checker.IsNil) verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) } func (s *DockerNetworkSuite) TestDockerNetworkRunNetByID(c *check.C) { out, _ := dockerCmd(c, "network", "create", "one") containerOut, _, err := dockerCmdWithError("run", "-d", "--net", strings.TrimSpace(out), "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(containerOut)) } func (s *DockerNetworkSuite) TestDockerNetworkHostModeUngracefulDaemonRestart(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) s.d.StartWithBusybox() // Run a few containers on host network for i := 0; i < 10; i++ { cName := fmt.Sprintf("hostc-%d", i) out, err := s.d.Cmd("run", "-d", "--name", cName, "--net=host", "--restart=always", "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) } // Kill daemon ungracefully and restart if err := s.d.cmd.Process.Kill(); err != nil { c.Fatal(err) } s.d.Restart() // make sure all the containers are up and running for i := 0; i < 10; i++ { cName := fmt.Sprintf("hostc-%d", i) runningOut, err := s.d.Cmd("inspect", "--format='{{.State.Running}}'", cName) c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(runningOut), checker.Equals, "true") } } func (s *DockerNetworkSuite) TestDockerNetworkConnectToHostFromOtherNetwork(c *check.C) { dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") c.Assert(waitRun("container1"), check.IsNil) dockerCmd(c, "network", "disconnect", "bridge", "container1") out, _, err := dockerCmdWithError("network", "connect", "host", "container1") c.Assert(err, checker.NotNil, check.Commentf(out)) c.Assert(out, checker.Contains, runconfig.ErrConflictHostNetwork.Error()) } func (s *DockerNetworkSuite) TestDockerNetworkDisconnectFromHost(c *check.C) { dockerCmd(c, "run", "-d", "--name", "container1", "--net=host", "busybox", "top") c.Assert(waitRun("container1"), check.IsNil) out, _, err := dockerCmdWithError("network", "disconnect", "host", "container1") c.Assert(err, checker.NotNil, check.Commentf("Should err out disconnect from host")) c.Assert(out, checker.Contains, runconfig.ErrConflictHostNetwork.Error()) } func (s *DockerNetworkSuite) TestDockerNetworkConnectWithPortMapping(c *check.C) { dockerCmd(c, "network", "create", "test1") dockerCmd(c, "run", "-d", "--name", "c1", "-p", "5000:5000", "busybox", "top") c.Assert(waitRun("c1"), check.IsNil) dockerCmd(c, "network", "connect", "test1", "c1") } func (s *DockerNetworkSuite) TestDockerNetworkConnectWithMac(c *check.C) { macAddress := "02:42:ac:11:00:02" dockerCmd(c, "network", "create", "mynetwork") dockerCmd(c, "run", "--name=test", "-d", "--mac-address", macAddress, "busybox", "top") c.Assert(waitRun("test"), check.IsNil) mac1, err := inspectField("test", "NetworkSettings.Networks.bridge.MacAddress") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(mac1), checker.Equals, macAddress) dockerCmd(c, "network", "connect", "mynetwork", "test") mac2, err := inspectField("test", "NetworkSettings.Networks.mynetwork.MacAddress") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(mac2), checker.Not(checker.Equals), strings.TrimSpace(mac1)) } func (s *DockerNetworkSuite) TestDockerNetworkInspectCreatedContainer(c *check.C) { dockerCmd(c, "create", "--name", "test", "busybox") networks, err := inspectField("test", "NetworkSettings.Networks") c.Assert(err, checker.IsNil) c.Assert(networks, checker.Contains, "bridge", check.Commentf("Should return 'bridge' network")) } func (s *DockerNetworkSuite) TestDockerNetworkRestartWithMultipleNetworks(c *check.C) { dockerCmd(c, "network", "create", "test") dockerCmd(c, "run", "--name=foo", "-d", "busybox", "top") c.Assert(waitRun("foo"), checker.IsNil) dockerCmd(c, "network", "connect", "test", "foo") dockerCmd(c, "restart", "foo") networks, err := inspectField("foo", "NetworkSettings.Networks") c.Assert(err, checker.IsNil) c.Assert(networks, checker.Contains, "bridge", check.Commentf("Should contain 'bridge' network")) c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network")) } func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnectToStoppedContainer(c *check.C) { dockerCmd(c, "network", "create", "test") dockerCmd(c, "create", "--name=foo", "busybox", "top") dockerCmd(c, "network", "connect", "test", "foo") networks, err := inspectField("foo", "NetworkSettings.Networks") c.Assert(err, checker.IsNil) c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network")) // Restart docker daemon to test the config has persisted to disk s.d.Restart() networks, err = inspectField("foo", "NetworkSettings.Networks") c.Assert(err, checker.IsNil) c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network")) // start the container and test if we can ping it from another container in the same network dockerCmd(c, "start", "foo") c.Assert(waitRun("foo"), checker.IsNil) ip, err := inspectField("foo", "NetworkSettings.Networks.test.IPAddress") ip = strings.TrimSpace(ip) dockerCmd(c, "run", "--net=test", "busybox", "sh", "-c", fmt.Sprintf("ping -c 1 %s", ip)) dockerCmd(c, "stop", "foo") // Test disconnect dockerCmd(c, "network", "disconnect", "test", "foo") networks, err = inspectField("foo", "NetworkSettings.Networks") c.Assert(err, checker.IsNil) c.Assert(networks, checker.Not(checker.Contains), "test", check.Commentf("Should not contain 'test' network")) // Restart docker daemon to test the config has persisted to disk s.d.Restart() networks, err = inspectField("foo", "NetworkSettings.Networks") c.Assert(err, checker.IsNil) c.Assert(networks, checker.Not(checker.Contains), "test", check.Commentf("Should not contain 'test' network")) } func (s *DockerNetworkSuite) TestDockerNetworkConnectPreferredIP(c *check.C) { // create two networks dockerCmd(c, "network", "create", "--subnet=172.28.0.0/16", "--subnet=2001:db8:1234::/64", "n0") assertNwIsAvailable(c, "n0") dockerCmd(c, "network", "create", "--subnet=172.30.0.0/16", "--ip-range=172.30.5.0/24", "--subnet=2001:db8:abcd::/64", "--ip-range=2001:db8:abcd::/80", "n1") assertNwIsAvailable(c, "n1") // run a container on first network specifying the ip addresses dockerCmd(c, "run", "-d", "--name", "c0", "--net=n0", "--ip", "172.28.99.88", "--ip6", "2001:db8:1234::9988", "busybox", "top") c.Assert(waitRun("c0"), check.IsNil) verifyIPAddressConfig(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") verifyIPAddresses(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") // connect the container to the second network specifying the preferred ip addresses dockerCmd(c, "network", "connect", "--ip", "172.30.55.44", "--ip6", "2001:db8:abcd::5544", "n1", "c0") verifyIPAddressConfig(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") verifyIPAddresses(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") // Stop and restart the container dockerCmd(c, "stop", "c0") dockerCmd(c, "start", "c0") // verify preferred addresses are applied verifyIPAddressConfig(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") verifyIPAddresses(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") verifyIPAddressConfig(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") verifyIPAddresses(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") // Still it should fail to connect to the default network with a specified IP (whatever ip) out, _, err := dockerCmdWithError("network", "connect", "--ip", "172.21.55.44", "bridge", "c0") c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndIP.Error()) } func (s *DockerNetworkSuite) TestDockerNetworkConnectPreferredIPStoppedContainer(c *check.C) { // create a container dockerCmd(c, "create", "--name", "c0", "busybox", "top") // create a network dockerCmd(c, "network", "create", "--subnet=172.30.0.0/16", "--subnet=2001:db8:abcd::/64", "n0") assertNwIsAvailable(c, "n0") // connect the container to the network specifying an ip addresses dockerCmd(c, "network", "connect", "--ip", "172.30.55.44", "--ip6", "2001:db8:abcd::5544", "n0", "c0") verifyIPAddressConfig(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") // start the container, verify config has not changed and ip addresses are assigned dockerCmd(c, "start", "c0") c.Assert(waitRun("c0"), check.IsNil) verifyIPAddressConfig(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") verifyIPAddresses(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") // stop the container and check ip config has not changed dockerCmd(c, "stop", "c0") verifyIPAddressConfig(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") } func (s *DockerNetworkSuite) TestDockerNetworkUnsupportedPreferredIP(c *check.C) { // preferred IP is not supported on predefined networks for _, mode := range []string{"none", "host", "bridge", "default"} { checkUnsupportedNetworkAndIP(c, mode) } // preferred IP is not supported on networks with no user defined subnets dockerCmd(c, "network", "create", "n0") assertNwIsAvailable(c, "n0") out, _, err := dockerCmdWithError("run", "-d", "--ip", "172.28.99.88", "--net", "n0", "busybox", "top") c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkNoSubnetAndIP.Error()) out, _, err = dockerCmdWithError("run", "-d", "--ip6", "2001:db8:1234::9988", "--net", "n0", "busybox", "top") c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkNoSubnetAndIP.Error()) dockerCmd(c, "network", "rm", "n0") assertNwNotAvailable(c, "n0") } func checkUnsupportedNetworkAndIP(c *check.C, nwMode string) { out, _, err := dockerCmdWithError("run", "-d", "--net", nwMode, "--ip", "172.28.99.88", "--ip6", "2001:db8:1234::9988", "busybox", "top") c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndIP.Error()) } func verifyIPAddressConfig(c *check.C, cName, nwname, ipv4, ipv6 string) { if ipv4 != "" { out, err := inspectField(cName, fmt.Sprintf("NetworkSettings.Networks.%s.IPAMConfig.IPv4Address", nwname)) c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), check.Equals, ipv4) } if ipv6 != "" { out, err := inspectField(cName, fmt.Sprintf("NetworkSettings.Networks.%s.IPAMConfig.IPv6Address", nwname)) c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), check.Equals, ipv6) } } func verifyIPAddresses(c *check.C, cName, nwname, ipv4, ipv6 string) { out, _ := dockerCmd(c, "inspect", fmt.Sprintf("--format='{{ .NetworkSettings.Networks.%s.IPAddress }}'", nwname), cName) c.Assert(strings.TrimSpace(out), check.Equals, ipv4) out, _ = dockerCmd(c, "inspect", fmt.Sprintf("--format='{{ .NetworkSettings.Networks.%s.GlobalIPv6Address }}'", nwname), cName) c.Assert(strings.TrimSpace(out), check.Equals, ipv6) } func (s *DockerSuite) TestUserDefinedNetworkConnectDisconnectLink(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) dockerCmd(c, "network", "create", "-d", "bridge", "foo1") dockerCmd(c, "network", "create", "-d", "bridge", "foo2") dockerCmd(c, "run", "-d", "--net=foo1", "--name=first", "busybox", "top") c.Assert(waitRun("first"), check.IsNil) // run a container in user-defined network udlinkNet with a link for an existing container // and a link for a container that doesnt exist dockerCmd(c, "run", "-d", "--net=foo1", "--name=second", "--link=first:FirstInFoo1", "--link=third:bar", "busybox", "top") c.Assert(waitRun("second"), check.IsNil) // ping to first and its alias FirstInFoo1 must succeed _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") c.Assert(err, check.IsNil) _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo1") c.Assert(err, check.IsNil) // connect first container to foo2 network dockerCmd(c, "network", "connect", "foo2", "first") // connect second container to foo2 network with a different alias for first container dockerCmd(c, "network", "connect", "--link=first:FirstInFoo2", "foo2", "second") // ping the new alias in network foo2 _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo2") c.Assert(err, check.IsNil) // disconnect first container from foo1 network dockerCmd(c, "network", "disconnect", "foo1", "first") // link in foo1 network must fail _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo1") c.Assert(err, check.NotNil) // link in foo2 network must succeed _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo2") c.Assert(err, check.IsNil) } // #19100 This is a deprecated feature test, it should be remove in Docker 1.12 func (s *DockerNetworkSuite) TestDockerNetworkStartAPIWithHostconfig(c *check.C) { netName := "test" conName := "foo" dockerCmd(c, "network", "create", netName) dockerCmd(c, "create", "--name", conName, "busybox", "top") config := map[string]interface{}{ "HostConfig": map[string]interface{}{ "NetworkMode": netName, }, } _, _, err := sockRequest("POST", "/containers/"+conName+"/start", config) c.Assert(err, checker.IsNil) c.Assert(waitRun(conName), checker.IsNil) networks, err := inspectField(conName, "NetworkSettings.Networks") c.Assert(err, checker.IsNil) c.Assert(networks, checker.Contains, netName, check.Commentf(fmt.Sprintf("Should contain '%s' network", netName))) c.Assert(networks, checker.Not(checker.Contains), "bridge", check.Commentf("Should not contain 'bridge' network")) } func (s *DockerNetworkSuite) TestDockerNetworkDisconnectDefault(c *check.C) { netWorkName1 := "test1" netWorkName2 := "test2" containerName := "foo" dockerCmd(c, "network", "create", netWorkName1) dockerCmd(c, "network", "create", netWorkName2) dockerCmd(c, "create", "--name", containerName, "busybox", "top") dockerCmd(c, "network", "connect", netWorkName1, containerName) dockerCmd(c, "network", "connect", netWorkName2, containerName) dockerCmd(c, "network", "disconnect", "bridge", containerName) dockerCmd(c, "start", containerName) c.Assert(waitRun(containerName), checker.IsNil) networks, err := inspectField(containerName, "NetworkSettings.Networks") c.Assert(err, checker.IsNil) c.Assert(networks, checker.Contains, netWorkName1, check.Commentf(fmt.Sprintf("Should contain '%s' network", netWorkName1))) c.Assert(networks, checker.Contains, netWorkName2, check.Commentf(fmt.Sprintf("Should contain '%s' network", netWorkName2))) c.Assert(networks, checker.Not(checker.Contains), "bridge", check.Commentf("Should not contain 'bridge' network")) } func (s *DockerSuite) TestUserDefinedNetworkConnectDisconnectAlias(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) dockerCmd(c, "network", "create", "-d", "bridge", "net1") dockerCmd(c, "network", "create", "-d", "bridge", "net2") dockerCmd(c, "run", "-d", "--net=net1", "--name=first", "--net-alias=foo", "busybox", "top") c.Assert(waitRun("first"), check.IsNil) dockerCmd(c, "run", "-d", "--net=net1", "--name=second", "busybox", "top") c.Assert(waitRun("second"), check.IsNil) // ping first container and its alias _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") c.Assert(err, check.IsNil) _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") c.Assert(err, check.IsNil) // connect first container to net2 network dockerCmd(c, "network", "connect", "--alias=bar", "net2", "first") // connect second container to foo2 network with a different alias for first container dockerCmd(c, "network", "connect", "net2", "second") // ping the new alias in network foo2 _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") c.Assert(err, check.IsNil) // disconnect first container from net1 network dockerCmd(c, "network", "disconnect", "net1", "first") // ping to net1 scoped alias "foo" must fail _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") c.Assert(err, check.NotNil) // ping to net2 scoped alias "bar" must still succeed _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") c.Assert(err, check.IsNil) // verify the alias option is rejected when running on predefined network out, _, err := dockerCmdWithError("run", "--rm", "--name=any", "--net-alias=any", "busybox", "top") c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) // verify the alias option is rejected when connecting to predefined network out, _, err = dockerCmdWithError("network", "connect", "--alias=any", "bridge", "first") c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) } func (s *DockerSuite) TestUserDefinedNetworkConnectivity(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) dockerCmd(c, "network", "create", "-d", "bridge", "br.net1") dockerCmd(c, "run", "-d", "--net=br.net1", "--name=c1.net1", "busybox", "top") c.Assert(waitRun("c1.net1"), check.IsNil) dockerCmd(c, "run", "-d", "--net=br.net1", "--name=c2.net1", "busybox", "top") c.Assert(waitRun("c2.net1"), check.IsNil) // ping first container by its unqualified name _, _, err := dockerCmdWithError("exec", "c2.net1", "ping", "-c", "1", "c1.net1") c.Assert(err, check.IsNil) // ping first container by its qualified name _, _, err = dockerCmdWithError("exec", "c2.net1", "ping", "-c", "1", "c1.net1.br.net1") c.Assert(err, check.IsNil) // ping with first qualified name masked by an additional domain. should fail _, _, err = dockerCmdWithError("exec", "c2.net1", "ping", "-c", "1", "c1.net1.br.net1.google.com") c.Assert(err, check.NotNil) } func (s *DockerSuite) TestEmbeddedDNSInvalidInput(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) dockerCmd(c, "network", "create", "-d", "bridge", "nw1") // Sending garbge to embedded DNS shouldn't crash the daemon dockerCmd(c, "run", "-i", "--net=nw1", "--name=c1", "debian:jessie", "bash", "-c", "echo InvalidQuery > /dev/udp/127.0.0.11/53") } func (s *DockerSuite) TestDockerNetworkConnectFailsNoInspectChange(c *check.C) { dockerCmd(c, "run", "-d", "--name=bb", "busybox", "top") c.Assert(waitRun("bb"), check.IsNil) ns0, _ := dockerCmd(c, "inspect", "--format='{{ .NetworkSettings.Networks.bridge }}'", "bb") // A failing redundant network connect should not alter current container's endpoint settings _, _, err := dockerCmdWithError("network", "connect", "bridge", "bb") c.Assert(err, check.NotNil) ns1, _ := dockerCmd(c, "inspect", "--format='{{ .NetworkSettings.Networks.bridge }}'", "bb") c.Assert(ns1, check.Equals, ns0) } docker-1.10.3/integration-cli/docker_cli_oom_killed_test.go000066400000000000000000000017561267010174400240300ustar00rootroot00000000000000// +build !windows package main import ( "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (s *DockerSuite) TestInspectOomKilledTrue(c *check.C) { testRequires(c, DaemonIsLinux, memoryLimitSupport) name := "testoomkilled" _, exitCode, _ := dockerCmdWithError("run", "--name", name, "--memory", "32MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") c.Assert(exitCode, checker.Equals, 137, check.Commentf("OOM exit should be 137")) oomKilled, err := inspectField(name, "State.OOMKilled") c.Assert(oomKilled, checker.Equals, "true") c.Assert(err, checker.IsNil) } func (s *DockerSuite) TestInspectOomKilledFalse(c *check.C) { testRequires(c, DaemonIsLinux, memoryLimitSupport) name := "testoomkilled" dockerCmd(c, "run", "--name", name, "--memory", "32MB", "busybox", "sh", "-c", "echo hello world") oomKilled, err := inspectField(name, "State.OOMKilled") c.Assert(oomKilled, checker.Equals, "false") c.Assert(err, checker.IsNil) } docker-1.10.3/integration-cli/docker_cli_pause_test.go000066400000000000000000000041211267010174400230140ustar00rootroot00000000000000package main import ( "fmt" "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (s *DockerSuite) TestPause(c *check.C) { testRequires(c, DaemonIsLinux) defer unpauseAllContainers() name := "testeventpause" dockerCmd(c, "run", "-d", "--name", name, "busybox", "top") dockerCmd(c, "pause", name) pausedContainers, err := getSliceOfPausedContainers() c.Assert(err, checker.IsNil) c.Assert(len(pausedContainers), checker.Equals, 1) dockerCmd(c, "unpause", name) out, _ := dockerCmd(c, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(c).Unix())) events := strings.Split(strings.TrimSpace(out), "\n") actions := eventActionsByIDAndType(c, events, name, "container") c.Assert(actions[len(actions)-2], checker.Equals, "pause") c.Assert(actions[len(actions)-1], checker.Equals, "unpause") } func (s *DockerSuite) TestPauseMultipleContainers(c *check.C) { testRequires(c, DaemonIsLinux) defer unpauseAllContainers() containers := []string{ "testpausewithmorecontainers1", "testpausewithmorecontainers2", } for _, name := range containers { dockerCmd(c, "run", "-d", "--name", name, "busybox", "top") } dockerCmd(c, append([]string{"pause"}, containers...)...) pausedContainers, err := getSliceOfPausedContainers() c.Assert(err, checker.IsNil) c.Assert(len(pausedContainers), checker.Equals, len(containers)) dockerCmd(c, append([]string{"unpause"}, containers...)...) out, _ := dockerCmd(c, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(c).Unix())) events := strings.Split(strings.TrimSpace(out), "\n") for _, name := range containers { actions := eventActionsByIDAndType(c, events, name, "container") c.Assert(actions[len(actions)-2], checker.Equals, "pause") c.Assert(actions[len(actions)-1], checker.Equals, "unpause") } } func (s *DockerSuite) TestPauseFailsOnWindows(c *check.C) { testRequires(c, DaemonIsWindows) dockerCmd(c, "run", "-d", "--name=test", "busybox", "sleep 3") out, _, _ := dockerCmdWithError("pause", "test") c.Assert(out, checker.Contains, "Windows: Containers cannot be paused") } docker-1.10.3/integration-cli/docker_cli_port_test.go000066400000000000000000000245321267010174400226730ustar00rootroot00000000000000package main import ( "fmt" "net" "regexp" "sort" "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (s *DockerSuite) TestPortList(c *check.C) { testRequires(c, DaemonIsLinux) // one port out, _ := dockerCmd(c, "run", "-d", "-p", "9876:80", "busybox", "top") firstID := strings.TrimSpace(out) out, _ = dockerCmd(c, "port", firstID, "80") err := assertPortList(c, out, []string{"0.0.0.0:9876"}) // Port list is not correct c.Assert(err, checker.IsNil) out, _ = dockerCmd(c, "port", firstID) err = assertPortList(c, out, []string{"80/tcp -> 0.0.0.0:9876"}) // Port list is not correct c.Assert(err, checker.IsNil) dockerCmd(c, "rm", "-f", firstID) // three port out, _ = dockerCmd(c, "run", "-d", "-p", "9876:80", "-p", "9877:81", "-p", "9878:82", "busybox", "top") ID := strings.TrimSpace(out) out, _ = dockerCmd(c, "port", ID, "80") err = assertPortList(c, out, []string{"0.0.0.0:9876"}) // Port list is not correct c.Assert(err, checker.IsNil) out, _ = dockerCmd(c, "port", ID) err = assertPortList(c, out, []string{ "80/tcp -> 0.0.0.0:9876", "81/tcp -> 0.0.0.0:9877", "82/tcp -> 0.0.0.0:9878"}) // Port list is not correct c.Assert(err, checker.IsNil) dockerCmd(c, "rm", "-f", ID) // more and one port mapped to the same container port out, _ = dockerCmd(c, "run", "-d", "-p", "9876:80", "-p", "9999:80", "-p", "9877:81", "-p", "9878:82", "busybox", "top") ID = strings.TrimSpace(out) out, _ = dockerCmd(c, "port", ID, "80") err = assertPortList(c, out, []string{"0.0.0.0:9876", "0.0.0.0:9999"}) // Port list is not correct c.Assert(err, checker.IsNil) out, _ = dockerCmd(c, "port", ID) err = assertPortList(c, out, []string{ "80/tcp -> 0.0.0.0:9876", "80/tcp -> 0.0.0.0:9999", "81/tcp -> 0.0.0.0:9877", "82/tcp -> 0.0.0.0:9878"}) // Port list is not correct c.Assert(err, checker.IsNil) dockerCmd(c, "rm", "-f", ID) testRange := func() { // host port ranges used IDs := make([]string, 3) for i := 0; i < 3; i++ { out, _ = dockerCmd(c, "run", "-d", "-p", "9090-9092:80", "busybox", "top") IDs[i] = strings.TrimSpace(out) out, _ = dockerCmd(c, "port", IDs[i]) err = assertPortList(c, out, []string{fmt.Sprintf("80/tcp -> 0.0.0.0:%d", 9090+i)}) // Port list is not correct c.Assert(err, checker.IsNil) } // test port range exhaustion out, _, err = dockerCmdWithError("run", "-d", "-p", "9090-9092:80", "busybox", "top") // Exhausted port range did not return an error c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) for i := 0; i < 3; i++ { dockerCmd(c, "rm", "-f", IDs[i]) } } testRange() // Verify we ran re-use port ranges after they are no longer in use. testRange() // test invalid port ranges for _, invalidRange := range []string{"9090-9089:80", "9090-:80", "-9090:80"} { out, _, err = dockerCmdWithError("run", "-d", "-p", invalidRange, "busybox", "top") // Port range should have returned an error c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) } // test host range:container range spec. out, _ = dockerCmd(c, "run", "-d", "-p", "9800-9803:80-83", "busybox", "top") ID = strings.TrimSpace(out) out, _ = dockerCmd(c, "port", ID) err = assertPortList(c, out, []string{ "80/tcp -> 0.0.0.0:9800", "81/tcp -> 0.0.0.0:9801", "82/tcp -> 0.0.0.0:9802", "83/tcp -> 0.0.0.0:9803"}) // Port list is not correct c.Assert(err, checker.IsNil) dockerCmd(c, "rm", "-f", ID) // test mixing protocols in same port range out, _ = dockerCmd(c, "run", "-d", "-p", "8000-8080:80", "-p", "8000-8080:80/udp", "busybox", "top") ID = strings.TrimSpace(out) out, _ = dockerCmd(c, "port", ID) err = assertPortList(c, out, []string{ "80/tcp -> 0.0.0.0:8000", "80/udp -> 0.0.0.0:8000"}) // Port list is not correct c.Assert(err, checker.IsNil) dockerCmd(c, "rm", "-f", ID) } func assertPortList(c *check.C, out string, expected []string) error { lines := strings.Split(strings.Trim(out, "\n "), "\n") if len(lines) != len(expected) { return fmt.Errorf("different size lists %s, %d, %d", out, len(lines), len(expected)) } sort.Strings(lines) sort.Strings(expected) for i := 0; i < len(expected); i++ { if lines[i] != expected[i] { return fmt.Errorf("|" + lines[i] + "!=" + expected[i] + "|") } } return nil } func stopRemoveContainer(id string, c *check.C) { dockerCmd(c, "rm", "-f", id) } func (s *DockerSuite) TestUnpublishedPortsInPsOutput(c *check.C) { testRequires(c, DaemonIsLinux) // Run busybox with command line expose (equivalent to EXPOSE in image's Dockerfile) for the following ports port1 := 80 port2 := 443 expose1 := fmt.Sprintf("--expose=%d", port1) expose2 := fmt.Sprintf("--expose=%d", port2) dockerCmd(c, "run", "-d", expose1, expose2, "busybox", "sleep", "5") // Check docker ps o/p for last created container reports the unpublished ports unpPort1 := fmt.Sprintf("%d/tcp", port1) unpPort2 := fmt.Sprintf("%d/tcp", port2) out, _ := dockerCmd(c, "ps", "-n=1") // Missing unpublished ports in docker ps output c.Assert(out, checker.Contains, unpPort1) // Missing unpublished ports in docker ps output c.Assert(out, checker.Contains, unpPort2) // Run the container forcing to publish the exposed ports dockerCmd(c, "run", "-d", "-P", expose1, expose2, "busybox", "sleep", "5") // Check docker ps o/p for last created container reports the exposed ports in the port bindings expBndRegx1 := regexp.MustCompile(`0.0.0.0:\d\d\d\d\d->` + unpPort1) expBndRegx2 := regexp.MustCompile(`0.0.0.0:\d\d\d\d\d->` + unpPort2) out, _ = dockerCmd(c, "ps", "-n=1") // Cannot find expected port binding port (0.0.0.0:xxxxx->unpPort1) in docker ps output c.Assert(expBndRegx1.MatchString(out), checker.Equals, true, check.Commentf("out: %s; unpPort1: %s", out, unpPort1)) // Cannot find expected port binding port (0.0.0.0:xxxxx->unpPort2) in docker ps output c.Assert(expBndRegx2.MatchString(out), checker.Equals, true, check.Commentf("out: %s; unpPort2: %s", out, unpPort2)) // Run the container specifying explicit port bindings for the exposed ports offset := 10000 pFlag1 := fmt.Sprintf("%d:%d", offset+port1, port1) pFlag2 := fmt.Sprintf("%d:%d", offset+port2, port2) out, _ = dockerCmd(c, "run", "-d", "-p", pFlag1, "-p", pFlag2, expose1, expose2, "busybox", "sleep", "5") id := strings.TrimSpace(out) // Check docker ps o/p for last created container reports the specified port mappings expBnd1 := fmt.Sprintf("0.0.0.0:%d->%s", offset+port1, unpPort1) expBnd2 := fmt.Sprintf("0.0.0.0:%d->%s", offset+port2, unpPort2) out, _ = dockerCmd(c, "ps", "-n=1") // Cannot find expected port binding (expBnd1) in docker ps output c.Assert(out, checker.Contains, expBnd1) // Cannot find expected port binding (expBnd2) in docker ps output c.Assert(out, checker.Contains, expBnd2) // Remove container now otherwise it will interfere with next test stopRemoveContainer(id, c) // Run the container with explicit port bindings and no exposed ports out, _ = dockerCmd(c, "run", "-d", "-p", pFlag1, "-p", pFlag2, "busybox", "sleep", "5") id = strings.TrimSpace(out) // Check docker ps o/p for last created container reports the specified port mappings out, _ = dockerCmd(c, "ps", "-n=1") // Cannot find expected port binding (expBnd1) in docker ps output c.Assert(out, checker.Contains, expBnd1) // Cannot find expected port binding (expBnd2) in docker ps output c.Assert(out, checker.Contains, expBnd2) // Remove container now otherwise it will interfere with next test stopRemoveContainer(id, c) // Run the container with one unpublished exposed port and one explicit port binding dockerCmd(c, "run", "-d", expose1, "-p", pFlag2, "busybox", "sleep", "5") // Check docker ps o/p for last created container reports the specified unpublished port and port mapping out, _ = dockerCmd(c, "ps", "-n=1") // Missing unpublished exposed ports (unpPort1) in docker ps output c.Assert(out, checker.Contains, unpPort1) // Missing port binding (expBnd2) in docker ps output c.Assert(out, checker.Contains, expBnd2) } func (s *DockerSuite) TestPortHostBinding(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "-d", "-p", "9876:80", "busybox", "nc", "-l", "-p", "80") firstID := strings.TrimSpace(out) out, _ = dockerCmd(c, "port", firstID, "80") err := assertPortList(c, out, []string{"0.0.0.0:9876"}) // Port list is not correct c.Assert(err, checker.IsNil) dockerCmd(c, "run", "--net=host", "busybox", "nc", "localhost", "9876") dockerCmd(c, "rm", "-f", firstID) out, _, err = dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "9876") // Port is still bound after the Container is removed c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) } func (s *DockerSuite) TestPortExposeHostBinding(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "-d", "-P", "--expose", "80", "busybox", "nc", "-l", "-p", "80") firstID := strings.TrimSpace(out) out, _ = dockerCmd(c, "port", firstID, "80") _, exposedPort, err := net.SplitHostPort(out) c.Assert(err, checker.IsNil, check.Commentf("out: %s", out)) dockerCmd(c, "run", "--net=host", "busybox", "nc", "localhost", strings.TrimSpace(exposedPort)) dockerCmd(c, "rm", "-f", firstID) out, _, err = dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", strings.TrimSpace(exposedPort)) // Port is still bound after the Container is removed c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) } func (s *DockerSuite) TestPortBindingOnSandbox(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) dockerCmd(c, "network", "create", "--internal", "-d", "bridge", "internal-net") dockerCmd(c, "run", "--net", "internal-net", "-d", "--name", "c1", "-p", "8080:8080", "busybox", "nc", "-l", "-p", "8080") c.Assert(waitRun("c1"), check.IsNil) _, _, err := dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "8080") c.Assert(err, check.NotNil, check.Commentf("Port mapping on internal network is expected to fail")) // Connect container to another normal bridge network dockerCmd(c, "network", "create", "-d", "bridge", "foo-net") dockerCmd(c, "network", "connect", "foo-net", "c1") _, _, err = dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "8080") c.Assert(err, check.IsNil, check.Commentf("Port mapping on the new network is expected to succeed")) } docker-1.10.3/integration-cli/docker_cli_proxy_test.go000066400000000000000000000031731267010174400230660ustar00rootroot00000000000000package main import ( "net" "os/exec" "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (s *DockerSuite) TestCliProxyDisableProxyUnixSock(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, SameHostDaemon) // test is valid when DOCKER_HOST=unix://.. cmd := exec.Command(dockerBinary, "info") cmd.Env = appendBaseEnv(false, "HTTP_PROXY=http://127.0.0.1:9999") out, _, err := runCommandWithOutput(cmd) c.Assert(err, checker.IsNil, check.Commentf("%v", out)) } // Can't use localhost here since go has a special case to not use proxy if connecting to localhost // See https://golang.org/pkg/net/http/#ProxyFromEnvironment func (s *DockerDaemonSuite) TestCliProxyProxyTCPSock(c *check.C) { testRequires(c, SameHostDaemon) // get the IP to use to connect since we can't use localhost addrs, err := net.InterfaceAddrs() c.Assert(err, checker.IsNil) var ip string for _, addr := range addrs { sAddr := addr.String() if !strings.Contains(sAddr, "127.0.0.1") { addrArr := strings.Split(sAddr, "/") ip = addrArr[0] break } } c.Assert(ip, checker.Not(checker.Equals), "") err = s.d.Start("-H", "tcp://"+ip+":2375") c.Assert(err, checker.IsNil) cmd := exec.Command(dockerBinary, "info") cmd.Env = []string{"DOCKER_HOST=tcp://" + ip + ":2375", "HTTP_PROXY=127.0.0.1:9999"} out, _, err := runCommandWithOutput(cmd) c.Assert(err, checker.NotNil, check.Commentf("%v", out)) // Test with no_proxy cmd.Env = append(cmd.Env, "NO_PROXY="+ip) out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "info")) c.Assert(err, checker.IsNil, check.Commentf("%v", out)) } docker-1.10.3/integration-cli/docker_cli_ps_test.go000066400000000000000000000730261267010174400223330ustar00rootroot00000000000000package main import ( "fmt" "io/ioutil" "os" "os/exec" "path/filepath" "sort" "strconv" "strings" "time" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/pkg/stringid" "github.com/go-check/check" ) func (s *DockerSuite) TestPsListContainersBase(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") firstID := strings.TrimSpace(out) out, _ = dockerCmd(c, "run", "-d", "busybox", "top") secondID := strings.TrimSpace(out) // not long running out, _ = dockerCmd(c, "run", "-d", "busybox", "true") thirdID := strings.TrimSpace(out) out, _ = dockerCmd(c, "run", "-d", "busybox", "top") fourthID := strings.TrimSpace(out) // make sure the second is running c.Assert(waitRun(secondID), checker.IsNil) // make sure third one is not running dockerCmd(c, "wait", thirdID) // make sure the forth is running c.Assert(waitRun(fourthID), checker.IsNil) // all out, _ = dockerCmd(c, "ps", "-a") c.Assert(assertContainerList(out, []string{fourthID, thirdID, secondID, firstID}), checker.Equals, true, check.Commentf("ALL: Container list is not in the correct order: \n%s", out)) // running out, _ = dockerCmd(c, "ps") c.Assert(assertContainerList(out, []string{fourthID, secondID, firstID}), checker.Equals, true, check.Commentf("RUNNING: Container list is not in the correct order: \n%s", out)) // limit out, _ = dockerCmd(c, "ps", "-n=2", "-a") expected := []string{fourthID, thirdID} c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("LIMIT & ALL: Container list is not in the correct order: \n%s", out)) out, _ = dockerCmd(c, "ps", "-n=2") c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("LIMIT: Container list is not in the correct order: \n%s", out)) // filter since out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-a") expected = []string{fourthID, thirdID, secondID} c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter & ALL: Container list is not in the correct order: \n%s", out)) out, _ = dockerCmd(c, "ps", "-f", "since="+firstID) expected = []string{fourthID, secondID} c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) // filter before out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-a") expected = []string{thirdID, secondID, firstID} c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter & ALL: Container list is not in the correct order: \n%s", out)) out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID) expected = []string{secondID, firstID} c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Container list is not in the correct order: \n%s", out)) // filter since & before out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-a") expected = []string{thirdID, secondID} c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter & ALL: Container list is not in the correct order: \n%s", out)) out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID) expected = []string{secondID} c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter: Container list is not in the correct order: \n%s", out)) // filter since & limit out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-n=2", "-a") expected = []string{fourthID, thirdID} c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-n=2") c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, LIMIT: Container list is not in the correct order: \n%s", out)) // filter before & limit out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-n=1", "-a") expected = []string{thirdID} c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-n=1") c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter, LIMIT: Container list is not in the correct order: \n%s", out)) // filter since & filter before & limit out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-n=1", "-a") expected = []string{thirdID} c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-n=1") c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter, LIMIT: Container list is not in the correct order: \n%s", out)) } // FIXME remove this for 1.12 as --since and --before are deprecated func (s *DockerSuite) TestPsListContainersDeprecatedSinceAndBefore(c *check.C) { out, _ := runSleepingContainer(c, "-d") firstID := strings.TrimSpace(out) out, _ = runSleepingContainer(c, "-d") secondID := strings.TrimSpace(out) // not long running out, _ = dockerCmd(c, "run", "-d", "busybox", "true") thirdID := strings.TrimSpace(out) out, _ = runSleepingContainer(c, "-d") fourthID := strings.TrimSpace(out) // make sure the second is running c.Assert(waitRun(secondID), checker.IsNil) // make sure third one is not running dockerCmd(c, "wait", thirdID) // make sure the forth is running c.Assert(waitRun(fourthID), checker.IsNil) // since out, _ = dockerCmd(c, "ps", "--since="+firstID, "-a") expected := []string{fourthID, thirdID, secondID} c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE & ALL: Container list is not in the correct order: %v \n%s", expected, out)) out, _ = dockerCmd(c, "ps", "--since="+firstID) c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE: Container list is not in the correct order: %v \n%s", expected, out)) // before out, _ = dockerCmd(c, "ps", "--before="+thirdID, "-a") expected = []string{secondID, firstID} c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE & ALL: Container list is not in the correct order: %v \n%s", expected, out)) out, _ = dockerCmd(c, "ps", "--before="+thirdID) c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE: Container list is not in the correct order: %v \n%s", expected, out)) // since & before out, _ = dockerCmd(c, "ps", "--since="+firstID, "--before="+fourthID, "-a") expected = []string{thirdID, secondID} c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE, BEFORE & ALL: Container list is not in the correct order: %v \n%s", expected, out)) out, _ = dockerCmd(c, "ps", "--since="+firstID, "--before="+fourthID) c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE, BEFORE: Container list is not in the correct order: %v \n%s", expected, out)) // since & limit out, _ = dockerCmd(c, "ps", "--since="+firstID, "-n=2", "-a") expected = []string{fourthID, thirdID} c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE, LIMIT & ALL: Container list is not in the correct order: %v \n%s", expected, out)) out, _ = dockerCmd(c, "ps", "--since="+firstID, "-n=2") c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE, LIMIT: Container list is not in the correct order: %v \n%s", expected, out)) // before & limit out, _ = dockerCmd(c, "ps", "--before="+fourthID, "-n=1", "-a") expected = []string{thirdID} c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE, LIMIT & ALL: Container list is not in the correct order: %v \n%s", expected, out)) out, _ = dockerCmd(c, "ps", "--before="+fourthID, "-n=1") c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE, LIMIT: Container list is not in the correct order: %v \n%s", expected, out)) // since & before & limit out, _ = dockerCmd(c, "ps", "--since="+firstID, "--before="+fourthID, "-n=1", "-a") expected = []string{thirdID} c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE, BEFORE, LIMIT & ALL: Container list is not in the correct order: %v \n%s", expected, out)) } func assertContainerList(out string, expected []string) bool { lines := strings.Split(strings.Trim(out, "\n "), "\n") // FIXME remove this for 1.12 as --since and --before are deprecated // This is here to remove potential Warning: lines (printed out with deprecated flags) for i := 0; i < 2; i++ { if strings.Contains(lines[0], "Warning:") { lines = lines[1:] } } if len(lines)-1 != len(expected) { return false } containerIDIndex := strings.Index(lines[0], "CONTAINER ID") for i := 0; i < len(expected); i++ { foundID := lines[i+1][containerIDIndex : containerIDIndex+12] if foundID != expected[i][:12] { return false } } return true } func (s *DockerSuite) TestPsListContainersSize(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-d", "busybox", "echo", "hello") baseOut, _ := dockerCmd(c, "ps", "-s", "-n=1") baseLines := strings.Split(strings.Trim(baseOut, "\n "), "\n") baseSizeIndex := strings.Index(baseLines[0], "SIZE") baseFoundsize := baseLines[1][baseSizeIndex:] baseBytes, err := strconv.Atoi(strings.Split(baseFoundsize, " ")[0]) c.Assert(err, checker.IsNil) name := "test_size" out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo 1 > test") id, err := getIDByName(name) c.Assert(err, checker.IsNil) runCmd := exec.Command(dockerBinary, "ps", "-s", "-n=1") wait := make(chan struct{}) go func() { out, _, err = runCommandWithOutput(runCmd) close(wait) }() select { case <-wait: case <-time.After(3 * time.Second): c.Fatalf("Calling \"docker ps -s\" timed out!") } c.Assert(err, checker.IsNil) lines := strings.Split(strings.Trim(out, "\n "), "\n") c.Assert(lines, checker.HasLen, 2, check.Commentf("Expected 2 lines for 'ps -s -n=1' output, got %d", len(lines))) sizeIndex := strings.Index(lines[0], "SIZE") idIndex := strings.Index(lines[0], "CONTAINER ID") foundID := lines[1][idIndex : idIndex+12] c.Assert(foundID, checker.Equals, id[:12], check.Commentf("Expected id %s, got %s", id[:12], foundID)) expectedSize := fmt.Sprintf("%d B", (2 + baseBytes)) foundSize := lines[1][sizeIndex:] c.Assert(foundSize, checker.Contains, expectedSize, check.Commentf("Expected size %q, got %q", expectedSize, foundSize)) } func (s *DockerSuite) TestPsListContainersFilterStatus(c *check.C) { testRequires(c, DaemonIsLinux) // start exited container out, _ := dockerCmd(c, "run", "-d", "busybox") firstID := strings.TrimSpace(out) // make sure the exited container is not running dockerCmd(c, "wait", firstID) // start running container out, _ = dockerCmd(c, "run", "-itd", "busybox") secondID := strings.TrimSpace(out) // filter containers by exited out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter=status=exited") containerOut := strings.TrimSpace(out) c.Assert(containerOut, checker.Equals, firstID) out, _ = dockerCmd(c, "ps", "-a", "--no-trunc", "-q", "--filter=status=running") containerOut = strings.TrimSpace(out) c.Assert(containerOut, checker.Equals, secondID) out, _, _ = dockerCmdWithTimeout(time.Second*60, "ps", "-a", "-q", "--filter=status=rubbish") c.Assert(out, checker.Contains, "Unrecognised filter value for status", check.Commentf("Expected error response due to invalid status filter output: %q", out)) // pause running container out, _ = dockerCmd(c, "run", "-itd", "busybox") pausedID := strings.TrimSpace(out) dockerCmd(c, "pause", pausedID) // make sure the container is unpaused to let the daemon stop it properly defer func() { dockerCmd(c, "unpause", pausedID) }() out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter=status=paused") containerOut = strings.TrimSpace(out) c.Assert(containerOut, checker.Equals, pausedID) } func (s *DockerSuite) TestPsListContainersFilterID(c *check.C) { testRequires(c, DaemonIsLinux) // start container out, _ := dockerCmd(c, "run", "-d", "busybox") firstID := strings.TrimSpace(out) // start another container dockerCmd(c, "run", "-d", "busybox", "top") // filter containers by id out, _ = dockerCmd(c, "ps", "-a", "-q", "--filter=id="+firstID) containerOut := strings.TrimSpace(out) c.Assert(containerOut, checker.Equals, firstID[:12], check.Commentf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out)) } func (s *DockerSuite) TestPsListContainersFilterName(c *check.C) { testRequires(c, DaemonIsLinux) // start container out, _ := dockerCmd(c, "run", "-d", "--name=a_name_to_match", "busybox") firstID := strings.TrimSpace(out) // start another container dockerCmd(c, "run", "-d", "--name=b_name_to_match", "busybox", "top") // filter containers by name out, _ = dockerCmd(c, "ps", "-a", "-q", "--filter=name=a_name_to_match") containerOut := strings.TrimSpace(out) c.Assert(containerOut, checker.Equals, firstID[:12], check.Commentf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out)) } // Test for the ancestor filter for ps. // There is also the same test but with image:tag@digest in docker_cli_by_digest_test.go // // What the test setups : // - Create 2 image based on busybox using the same repository but different tags // - Create an image based on the previous image (images_ps_filter_test2) // - Run containers for each of those image (busybox, images_ps_filter_test1, images_ps_filter_test2) // - Filter them out :P func (s *DockerSuite) TestPsListContainersFilterAncestorImage(c *check.C) { testRequires(c, DaemonIsLinux) // Build images imageName1 := "images_ps_filter_test1" imageID1, err := buildImage(imageName1, `FROM busybox LABEL match me 1`, true) c.Assert(err, checker.IsNil) imageName1Tagged := "images_ps_filter_test1:tag" imageID1Tagged, err := buildImage(imageName1Tagged, `FROM busybox LABEL match me 1 tagged`, true) c.Assert(err, checker.IsNil) imageName2 := "images_ps_filter_test2" imageID2, err := buildImage(imageName2, fmt.Sprintf(`FROM %s LABEL match me 2`, imageName1), true) c.Assert(err, checker.IsNil) // start containers out, _ := dockerCmd(c, "run", "-d", "busybox", "echo", "hello") firstID := strings.TrimSpace(out) // start another container out, _ = dockerCmd(c, "run", "-d", "busybox", "echo", "hello") secondID := strings.TrimSpace(out) // start third container out, _ = dockerCmd(c, "run", "-d", imageName1, "echo", "hello") thirdID := strings.TrimSpace(out) // start fourth container out, _ = dockerCmd(c, "run", "-d", imageName1Tagged, "echo", "hello") fourthID := strings.TrimSpace(out) // start fifth container out, _ = dockerCmd(c, "run", "-d", imageName2, "echo", "hello") fifthID := strings.TrimSpace(out) var filterTestSuite = []struct { filterName string expectedIDs []string }{ // non existent stuff {"nonexistent", []string{}}, {"nonexistent:tag", []string{}}, // image {"busybox", []string{firstID, secondID, thirdID, fourthID, fifthID}}, {imageName1, []string{thirdID, fifthID}}, {imageName2, []string{fifthID}}, // image:tag {fmt.Sprintf("%s:latest", imageName1), []string{thirdID, fifthID}}, {imageName1Tagged, []string{fourthID}}, // short-id {stringid.TruncateID(imageID1), []string{thirdID, fifthID}}, {stringid.TruncateID(imageID2), []string{fifthID}}, // full-id {imageID1, []string{thirdID, fifthID}}, {imageID1Tagged, []string{fourthID}}, {imageID2, []string{fifthID}}, } for _, filter := range filterTestSuite { out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+filter.filterName) checkPsAncestorFilterOutput(c, out, filter.filterName, filter.expectedIDs) } // Multiple ancestor filter out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+imageName2, "--filter=ancestor="+imageName1Tagged) checkPsAncestorFilterOutput(c, out, imageName2+","+imageName1Tagged, []string{fourthID, fifthID}) } func checkPsAncestorFilterOutput(c *check.C, out string, filterName string, expectedIDs []string) { actualIDs := []string{} if out != "" { actualIDs = strings.Split(out[:len(out)-1], "\n") } sort.Strings(actualIDs) sort.Strings(expectedIDs) c.Assert(actualIDs, checker.HasLen, len(expectedIDs), check.Commentf("Expected filtered container(s) for %s ancestor filter to be %v:%v, got %v:%v", filterName, len(expectedIDs), expectedIDs, len(actualIDs), actualIDs)) if len(expectedIDs) > 0 { same := true for i := range expectedIDs { if actualIDs[i] != expectedIDs[i] { c.Logf("%s, %s", actualIDs[i], expectedIDs[i]) same = false break } } c.Assert(same, checker.Equals, true, check.Commentf("Expected filtered container(s) for %s ancestor filter to be %v, got %v", filterName, expectedIDs, actualIDs)) } } func (s *DockerSuite) TestPsListContainersFilterLabel(c *check.C) { testRequires(c, DaemonIsLinux) // start container out, _ := dockerCmd(c, "run", "-d", "-l", "match=me", "-l", "second=tag", "busybox") firstID := strings.TrimSpace(out) // start another container out, _ = dockerCmd(c, "run", "-d", "-l", "match=me too", "busybox") secondID := strings.TrimSpace(out) // start third container out, _ = dockerCmd(c, "run", "-d", "-l", "nomatch=me", "busybox") thirdID := strings.TrimSpace(out) // filter containers by exact match out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me") containerOut := strings.TrimSpace(out) c.Assert(containerOut, checker.Equals, firstID, check.Commentf("Expected id %s, got %s for exited filter, output: %q", firstID, containerOut, out)) // filter containers by two labels out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me", "--filter=label=second=tag") containerOut = strings.TrimSpace(out) c.Assert(containerOut, checker.Equals, firstID, check.Commentf("Expected id %s, got %s for exited filter, output: %q", firstID, containerOut, out)) // filter containers by two labels, but expect not found because of AND behavior out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me", "--filter=label=second=tag-no") containerOut = strings.TrimSpace(out) c.Assert(containerOut, checker.Equals, "", check.Commentf("Expected nothing, got %s for exited filter, output: %q", containerOut, out)) // filter containers by exact key out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match") containerOut = strings.TrimSpace(out) c.Assert(containerOut, checker.Contains, firstID) c.Assert(containerOut, checker.Contains, secondID) c.Assert(containerOut, checker.Not(checker.Contains), thirdID) } func (s *DockerSuite) TestPsListContainersFilterExited(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-d", "--name", "top", "busybox", "top") dockerCmd(c, "run", "--name", "zero1", "busybox", "true") firstZero, err := getIDByName("zero1") c.Assert(err, checker.IsNil) dockerCmd(c, "run", "--name", "zero2", "busybox", "true") secondZero, err := getIDByName("zero2") c.Assert(err, checker.IsNil) out, _, err := dockerCmdWithError("run", "--name", "nonzero1", "busybox", "false") c.Assert(err, checker.NotNil, check.Commentf("Should fail.", out, err)) firstNonZero, err := getIDByName("nonzero1") c.Assert(err, checker.IsNil) out, _, err = dockerCmdWithError("run", "--name", "nonzero2", "busybox", "false") c.Assert(err, checker.NotNil, check.Commentf("Should fail.", out, err)) secondNonZero, err := getIDByName("nonzero2") c.Assert(err, checker.IsNil) // filter containers by exited=0 out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=exited=0") ids := strings.Split(strings.TrimSpace(out), "\n") c.Assert(ids, checker.HasLen, 2, check.Commentf("Should be 2 zero exited containers got %d: %s", len(ids), out)) c.Assert(ids[0], checker.Equals, secondZero, check.Commentf("First in list should be %q, got %q", secondZero, ids[0])) c.Assert(ids[1], checker.Equals, firstZero, check.Commentf("Second in list should be %q, got %q", firstZero, ids[1])) out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=exited=1") ids = strings.Split(strings.TrimSpace(out), "\n") c.Assert(ids, checker.HasLen, 2, check.Commentf("Should be 2 zero exited containers got %d", len(ids))) c.Assert(ids[0], checker.Equals, secondNonZero, check.Commentf("First in list should be %q, got %q", secondNonZero, ids[0])) c.Assert(ids[1], checker.Equals, firstNonZero, check.Commentf("Second in list should be %q, got %q", firstNonZero, ids[1])) } func (s *DockerSuite) TestPsRightTagName(c *check.C) { testRequires(c, DaemonIsLinux) tag := "asybox:shmatest" dockerCmd(c, "tag", "busybox", tag) var id1 string out, _ := dockerCmd(c, "run", "-d", "busybox", "top") id1 = strings.TrimSpace(string(out)) var id2 string out, _ = dockerCmd(c, "run", "-d", tag, "top") id2 = strings.TrimSpace(string(out)) var imageID string out, _ = dockerCmd(c, "inspect", "-f", "{{.Id}}", "busybox") imageID = strings.TrimSpace(string(out)) var id3 string out, _ = dockerCmd(c, "run", "-d", imageID, "top") id3 = strings.TrimSpace(string(out)) out, _ = dockerCmd(c, "ps", "--no-trunc") lines := strings.Split(strings.TrimSpace(string(out)), "\n") // skip header lines = lines[1:] c.Assert(lines, checker.HasLen, 3, check.Commentf("There should be 3 running container, got %d", len(lines))) for _, line := range lines { f := strings.Fields(line) switch f[0] { case id1: c.Assert(f[1], checker.Equals, "busybox", check.Commentf("Expected %s tag for id %s, got %s", "busybox", id1, f[1])) case id2: c.Assert(f[1], checker.Equals, tag, check.Commentf("Expected %s tag for id %s, got %s", tag, id2, f[1])) case id3: c.Assert(f[1], checker.Equals, imageID, check.Commentf("Expected %s imageID for id %s, got %s", tag, id3, f[1])) default: c.Fatalf("Unexpected id %s, expected %s and %s and %s", f[0], id1, id2, id3) } } } func (s *DockerSuite) TestPsLinkedWithNoTrunc(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "--name=first", "-d", "busybox", "top") dockerCmd(c, "run", "--name=second", "--link=first:first", "-d", "busybox", "top") out, _ := dockerCmd(c, "ps", "--no-trunc") lines := strings.Split(strings.TrimSpace(string(out)), "\n") // strip header lines = lines[1:] expected := []string{"second", "first,second/first"} var names []string for _, l := range lines { fields := strings.Fields(l) names = append(names, fields[len(fields)-1]) } c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array: %v, got: %v", expected, names)) } func (s *DockerSuite) TestPsGroupPortRange(c *check.C) { testRequires(c, DaemonIsLinux) portRange := "3800-3900" dockerCmd(c, "run", "-d", "--name", "porttest", "-p", portRange+":"+portRange, "busybox", "top") out, _ := dockerCmd(c, "ps") c.Assert(string(out), checker.Contains, portRange, check.Commentf("docker ps output should have had the port range %q: %s", portRange, string(out))) } func (s *DockerSuite) TestPsWithSize(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-d", "--name", "sizetest", "busybox", "top") out, _ := dockerCmd(c, "ps", "--size") c.Assert(out, checker.Contains, "virtual", check.Commentf("docker ps with --size should show virtual size of container")) } func (s *DockerSuite) TestPsListContainersFilterCreated(c *check.C) { testRequires(c, DaemonIsLinux) // create a container out, _ := dockerCmd(c, "create", "busybox") cID := strings.TrimSpace(out) shortCID := cID[:12] // Make sure it DOESN'T show up w/o a '-a' for normal 'ps' out, _ = dockerCmd(c, "ps", "-q") c.Assert(out, checker.Not(checker.Contains), shortCID, check.Commentf("Should have not seen '%s' in ps output:\n%s", shortCID, out)) // Make sure it DOES show up as 'Created' for 'ps -a' out, _ = dockerCmd(c, "ps", "-a") hits := 0 for _, line := range strings.Split(out, "\n") { if !strings.Contains(line, shortCID) { continue } hits++ c.Assert(line, checker.Contains, "Created", check.Commentf("Missing 'Created' on '%s'", line)) } c.Assert(hits, checker.Equals, 1, check.Commentf("Should have seen '%s' in ps -a output once:%d\n%s", shortCID, hits, out)) // filter containers by 'create' - note, no -a needed out, _ = dockerCmd(c, "ps", "-q", "-f", "status=created") containerOut := strings.TrimSpace(out) c.Assert(cID, checker.HasPrefix, containerOut) } func (s *DockerSuite) TestPsFormatMultiNames(c *check.C) { testRequires(c, DaemonIsLinux) //create 2 containers and link them dockerCmd(c, "run", "--name=child", "-d", "busybox", "top") dockerCmd(c, "run", "--name=parent", "--link=child:linkedone", "-d", "busybox", "top") //use the new format capabilities to only list the names and --no-trunc to get all names out, _ := dockerCmd(c, "ps", "--format", "{{.Names}}", "--no-trunc") lines := strings.Split(strings.TrimSpace(string(out)), "\n") expected := []string{"parent", "child,parent/linkedone"} var names []string for _, l := range lines { names = append(names, l) } c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with non-truncated names: %v, got: %v", expected, names)) //now list without turning off truncation and make sure we only get the non-link names out, _ = dockerCmd(c, "ps", "--format", "{{.Names}}") lines = strings.Split(strings.TrimSpace(string(out)), "\n") expected = []string{"parent", "child"} var truncNames []string for _, l := range lines { truncNames = append(truncNames, l) } c.Assert(expected, checker.DeepEquals, truncNames, check.Commentf("Expected array with truncated names: %v, got: %v", expected, truncNames)) } func (s *DockerSuite) TestPsFormatHeaders(c *check.C) { testRequires(c, DaemonIsLinux) // make sure no-container "docker ps" still prints the header row out, _ := dockerCmd(c, "ps", "--format", "table {{.ID}}") c.Assert(out, checker.Equals, "CONTAINER ID\n", check.Commentf(`Expected 'CONTAINER ID\n', got %v`, out)) // verify that "docker ps" with a container still prints the header row also dockerCmd(c, "run", "--name=test", "-d", "busybox", "top") out, _ = dockerCmd(c, "ps", "--format", "table {{.Names}}") c.Assert(out, checker.Equals, "NAMES\ntest\n", check.Commentf(`Expected 'NAMES\ntest\n', got %v`, out)) } func (s *DockerSuite) TestPsDefaultFormatAndQuiet(c *check.C) { testRequires(c, DaemonIsLinux) config := `{ "psFormat": "default {{ .ID }}" }` d, err := ioutil.TempDir("", "integration-cli-") c.Assert(err, checker.IsNil) defer os.RemoveAll(d) err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) c.Assert(err, checker.IsNil) out, _ := dockerCmd(c, "run", "--name=test", "-d", "busybox", "top") id := strings.TrimSpace(out) out, _ = dockerCmd(c, "--config", d, "ps", "-q") c.Assert(id, checker.HasPrefix, strings.TrimSpace(out), check.Commentf("Expected to print only the container id, got %v\n", out)) } // Test for GitHub issue #12595 func (s *DockerSuite) TestPsImageIDAfterUpdate(c *check.C) { testRequires(c, DaemonIsLinux) originalImageName := "busybox:TestPsImageIDAfterUpdate-original" updatedImageName := "busybox:TestPsImageIDAfterUpdate-updated" runCmd := exec.Command(dockerBinary, "tag", "busybox:latest", originalImageName) out, _, err := runCommandWithOutput(runCmd) c.Assert(err, checker.IsNil) originalImageID, err := getIDByName(originalImageName) c.Assert(err, checker.IsNil) runCmd = exec.Command(dockerBinary, "run", "-d", originalImageName, "top") out, _, err = runCommandWithOutput(runCmd) c.Assert(err, checker.IsNil) containerID := strings.TrimSpace(out) linesOut, err := exec.Command(dockerBinary, "ps", "--no-trunc").CombinedOutput() c.Assert(err, checker.IsNil) lines := strings.Split(strings.TrimSpace(string(linesOut)), "\n") // skip header lines = lines[1:] c.Assert(len(lines), checker.Equals, 1) for _, line := range lines { f := strings.Fields(line) c.Assert(f[1], checker.Equals, originalImageName) } runCmd = exec.Command(dockerBinary, "commit", containerID, updatedImageName) out, _, err = runCommandWithOutput(runCmd) c.Assert(err, checker.IsNil) runCmd = exec.Command(dockerBinary, "tag", "-f", updatedImageName, originalImageName) out, _, err = runCommandWithOutput(runCmd) c.Assert(err, checker.IsNil) linesOut, err = exec.Command(dockerBinary, "ps", "--no-trunc").CombinedOutput() c.Assert(err, checker.IsNil) lines = strings.Split(strings.TrimSpace(string(linesOut)), "\n") // skip header lines = lines[1:] c.Assert(len(lines), checker.Equals, 1) for _, line := range lines { f := strings.Fields(line) c.Assert(f[1], checker.Equals, originalImageID) } } func (s *DockerSuite) TestPsNotShowPortsOfStoppedContainer(c *check.C) { dockerCmd(c, "run", "--name=foo", "-d", "-p", "5000:5000", "busybox", "top") c.Assert(waitRun("foo"), checker.IsNil) out, _ := dockerCmd(c, "ps") lines := strings.Split(strings.TrimSpace(string(out)), "\n") expected := "0.0.0.0:5000->5000/tcp" fields := strings.Fields(lines[1]) c.Assert(fields[len(fields)-2], checker.Equals, expected, check.Commentf("Expected: %v, got: %v", expected, fields[len(fields)-2])) dockerCmd(c, "kill", "foo") dockerCmd(c, "wait", "foo") out, _ = dockerCmd(c, "ps", "-l") lines = strings.Split(strings.TrimSpace(string(out)), "\n") fields = strings.Fields(lines[1]) c.Assert(fields[len(fields)-2], checker.Not(checker.Equals), expected, check.Commentf("Should not got %v", expected)) } docker-1.10.3/integration-cli/docker_cli_pull_local_test.go000066400000000000000000000252661267010174400240420ustar00rootroot00000000000000package main import ( "encoding/json" "fmt" "io/ioutil" "os" "os/exec" "path/filepath" "runtime" "strings" "github.com/docker/distribution" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/schema2" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) // testPullImageWithAliases pulls a specific image tag and verifies that any aliases (i.e., other // tags for the same image) are not also pulled down. // // Ref: docker/docker#8141 func testPullImageWithAliases(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) repos := []string{} for _, tag := range []string{"recent", "fresh"} { repos = append(repos, fmt.Sprintf("%v:%v", repoName, tag)) } // Tag and push the same image multiple times. for _, repo := range repos { dockerCmd(c, "tag", "busybox", repo) dockerCmd(c, "push", repo) } // Clear local images store. args := append([]string{"rmi"}, repos...) dockerCmd(c, args...) // Pull a single tag and verify it doesn't bring down all aliases. dockerCmd(c, "pull", repos[0]) dockerCmd(c, "inspect", repos[0]) for _, repo := range repos[1:] { _, _, err := dockerCmdWithError("inspect", repo) c.Assert(err, checker.NotNil, check.Commentf("Image %v shouldn't have been pulled down", repo)) } } func (s *DockerRegistrySuite) TestPullImageWithAliases(c *check.C) { testPullImageWithAliases(c) } func (s *DockerSchema1RegistrySuite) TestPullImageWithAliases(c *check.C) { testPullImageWithAliases(c) } // testConcurrentPullWholeRepo pulls the same repo concurrently. func testConcurrentPullWholeRepo(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) repos := []string{} for _, tag := range []string{"recent", "fresh", "todays"} { repo := fmt.Sprintf("%v:%v", repoName, tag) _, err := buildImage(repo, fmt.Sprintf(` FROM busybox ENTRYPOINT ["/bin/echo"] ENV FOO foo ENV BAR bar CMD echo %s `, repo), true) c.Assert(err, checker.IsNil) dockerCmd(c, "push", repo) repos = append(repos, repo) } // Clear local images store. args := append([]string{"rmi"}, repos...) dockerCmd(c, args...) // Run multiple re-pulls concurrently results := make(chan error) numPulls := 3 for i := 0; i != numPulls; i++ { go func() { _, _, err := runCommandWithOutput(exec.Command(dockerBinary, "pull", "-a", repoName)) results <- err }() } // These checks are separate from the loop above because the check // package is not goroutine-safe. for i := 0; i != numPulls; i++ { err := <-results c.Assert(err, checker.IsNil, check.Commentf("concurrent pull failed with error: %v", err)) } // Ensure all tags were pulled successfully for _, repo := range repos { dockerCmd(c, "inspect", repo) out, _ := dockerCmd(c, "run", "--rm", repo) c.Assert(strings.TrimSpace(out), checker.Equals, "/bin/sh -c echo "+repo) } } func (s *DockerRegistrySuite) testConcurrentPullWholeRepo(c *check.C) { testConcurrentPullWholeRepo(c) } func (s *DockerSchema1RegistrySuite) testConcurrentPullWholeRepo(c *check.C) { testConcurrentPullWholeRepo(c) } // testConcurrentFailingPull tries a concurrent pull that doesn't succeed. func testConcurrentFailingPull(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) // Run multiple pulls concurrently results := make(chan error) numPulls := 3 for i := 0; i != numPulls; i++ { go func() { _, _, err := runCommandWithOutput(exec.Command(dockerBinary, "pull", repoName+":asdfasdf")) results <- err }() } // These checks are separate from the loop above because the check // package is not goroutine-safe. for i := 0; i != numPulls; i++ { err := <-results c.Assert(err, checker.NotNil, check.Commentf("expected pull to fail")) } } func (s *DockerRegistrySuite) testConcurrentFailingPull(c *check.C) { testConcurrentFailingPull(c) } func (s *DockerSchema1RegistrySuite) testConcurrentFailingPull(c *check.C) { testConcurrentFailingPull(c) } // testConcurrentPullMultipleTags pulls multiple tags from the same repo // concurrently. func testConcurrentPullMultipleTags(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) repos := []string{} for _, tag := range []string{"recent", "fresh", "todays"} { repo := fmt.Sprintf("%v:%v", repoName, tag) _, err := buildImage(repo, fmt.Sprintf(` FROM busybox ENTRYPOINT ["/bin/echo"] ENV FOO foo ENV BAR bar CMD echo %s `, repo), true) c.Assert(err, checker.IsNil) dockerCmd(c, "push", repo) repos = append(repos, repo) } // Clear local images store. args := append([]string{"rmi"}, repos...) dockerCmd(c, args...) // Re-pull individual tags, in parallel results := make(chan error) for _, repo := range repos { go func(repo string) { _, _, err := runCommandWithOutput(exec.Command(dockerBinary, "pull", repo)) results <- err }(repo) } // These checks are separate from the loop above because the check // package is not goroutine-safe. for range repos { err := <-results c.Assert(err, checker.IsNil, check.Commentf("concurrent pull failed with error: %v", err)) } // Ensure all tags were pulled successfully for _, repo := range repos { dockerCmd(c, "inspect", repo) out, _ := dockerCmd(c, "run", "--rm", repo) c.Assert(strings.TrimSpace(out), checker.Equals, "/bin/sh -c echo "+repo) } } func (s *DockerRegistrySuite) TestConcurrentPullMultipleTags(c *check.C) { testConcurrentPullMultipleTags(c) } func (s *DockerSchema1RegistrySuite) TestConcurrentPullMultipleTags(c *check.C) { testConcurrentPullMultipleTags(c) } // testPullIDStability verifies that pushing an image and pulling it back // preserves the image ID. func testPullIDStability(c *check.C) { derivedImage := privateRegistryURL + "/dockercli/id-stability" baseImage := "busybox" _, err := buildImage(derivedImage, fmt.Sprintf(` FROM %s ENV derived true ENV asdf true RUN dd if=/dev/zero of=/file bs=1024 count=1024 CMD echo %s `, baseImage, derivedImage), true) if err != nil { c.Fatal(err) } originalID, err := getIDByName(derivedImage) if err != nil { c.Fatalf("error inspecting: %v", err) } dockerCmd(c, "push", derivedImage) // Pull out, _ := dockerCmd(c, "pull", derivedImage) if strings.Contains(out, "Pull complete") { c.Fatalf("repull redownloaded a layer: %s", out) } derivedIDAfterPull, err := getIDByName(derivedImage) if err != nil { c.Fatalf("error inspecting: %v", err) } if derivedIDAfterPull != originalID { c.Fatal("image's ID unexpectedly changed after a repush/repull") } // Make sure the image runs correctly out, _ = dockerCmd(c, "run", "--rm", derivedImage) if strings.TrimSpace(out) != derivedImage { c.Fatalf("expected %s; got %s", derivedImage, out) } // Confirm that repushing and repulling does not change the computed ID dockerCmd(c, "push", derivedImage) dockerCmd(c, "rmi", derivedImage) dockerCmd(c, "pull", derivedImage) derivedIDAfterPull, err = getIDByName(derivedImage) if err != nil { c.Fatalf("error inspecting: %v", err) } if derivedIDAfterPull != originalID { c.Fatal("image's ID unexpectedly changed after a repush/repull") } if err != nil { c.Fatalf("error inspecting: %v", err) } // Make sure the image still runs out, _ = dockerCmd(c, "run", "--rm", derivedImage) if strings.TrimSpace(out) != derivedImage { c.Fatalf("expected %s; got %s", derivedImage, out) } } func (s *DockerRegistrySuite) TestPullIDStability(c *check.C) { testPullIDStability(c) } func (s *DockerSchema1RegistrySuite) TestPullIDStability(c *check.C) { testPullIDStability(c) } func (s *DockerRegistrySuite) TestPullManifestList(c *check.C) { pushDigest, err := setupImage(c) c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) // Inject a manifest list into the registry manifestList := &manifestlist.ManifestList{ Versioned: manifest.Versioned{ SchemaVersion: 2, MediaType: manifestlist.MediaTypeManifestList, }, Manifests: []manifestlist.ManifestDescriptor{ { Descriptor: distribution.Descriptor{ Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", Size: 3253, MediaType: schema2.MediaTypeManifest, }, Platform: manifestlist.PlatformSpec{ Architecture: "bogus_arch", OS: "bogus_os", }, }, { Descriptor: distribution.Descriptor{ Digest: pushDigest, Size: 3253, MediaType: schema2.MediaTypeManifest, }, Platform: manifestlist.PlatformSpec{ Architecture: runtime.GOARCH, OS: runtime.GOOS, }, }, }, } manifestListJSON, err := json.MarshalIndent(manifestList, "", " ") c.Assert(err, checker.IsNil, check.Commentf("error marshalling manifest list")) manifestListDigest := digest.FromBytes(manifestListJSON) hexDigest := manifestListDigest.Hex() registryV2Path := filepath.Join(s.reg.dir, "docker", "registry", "v2") // Write manifest list to blob store blobDir := filepath.Join(registryV2Path, "blobs", "sha256", hexDigest[:2], hexDigest) err = os.MkdirAll(blobDir, 0755) c.Assert(err, checker.IsNil, check.Commentf("error creating blob dir")) blobPath := filepath.Join(blobDir, "data") err = ioutil.WriteFile(blobPath, []byte(manifestListJSON), 0644) c.Assert(err, checker.IsNil, check.Commentf("error writing manifest list")) // Add to revision store revisionDir := filepath.Join(registryV2Path, "repositories", remoteRepoName, "_manifests", "revisions", "sha256", hexDigest) err = os.Mkdir(revisionDir, 0755) c.Assert(err, checker.IsNil, check.Commentf("error creating revision dir")) revisionPath := filepath.Join(revisionDir, "link") err = ioutil.WriteFile(revisionPath, []byte(manifestListDigest.String()), 0644) c.Assert(err, checker.IsNil, check.Commentf("error writing revision link")) // Update tag tagPath := filepath.Join(registryV2Path, "repositories", remoteRepoName, "_manifests", "tags", "latest", "current", "link") err = ioutil.WriteFile(tagPath, []byte(manifestListDigest.String()), 0644) c.Assert(err, checker.IsNil, check.Commentf("error writing tag link")) // Verify that the image can be pulled through the manifest list. out, _ := dockerCmd(c, "pull", repoName) // The pull output includes "Digest: ", so find that matches := digestRegex.FindStringSubmatch(out) c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) pullDigest := matches[1] // Make sure the pushed and pull digests match c.Assert(manifestListDigest.String(), checker.Equals, pullDigest) // Was the image actually created? dockerCmd(c, "inspect", repoName) dockerCmd(c, "rmi", repoName) } docker-1.10.3/integration-cli/docker_cli_pull_test.go000066400000000000000000000165471267010174400226720ustar00rootroot00000000000000package main import ( "fmt" "regexp" "strings" "time" "github.com/docker/distribution/digest" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) // TestPullFromCentralRegistry pulls an image from the central registry and verifies that the client // prints all expected output. func (s *DockerHubPullSuite) TestPullFromCentralRegistry(c *check.C) { testRequires(c, DaemonIsLinux) out := s.Cmd(c, "pull", "hello-world") defer deleteImages("hello-world") c.Assert(out, checker.Contains, "Using default tag: latest", check.Commentf("expected the 'latest' tag to be automatically assumed")) c.Assert(out, checker.Contains, "Pulling from library/hello-world", check.Commentf("expected the 'library/' prefix to be automatically assumed")) c.Assert(out, checker.Contains, "Downloaded newer image for hello-world:latest") matches := regexp.MustCompile(`Digest: (.+)\n`).FindAllStringSubmatch(out, -1) c.Assert(len(matches), checker.Equals, 1, check.Commentf("expected exactly one image digest in the output")) c.Assert(len(matches[0]), checker.Equals, 2, check.Commentf("unexpected number of submatches for the digest")) _, err := digest.ParseDigest(matches[0][1]) c.Check(err, checker.IsNil, check.Commentf("invalid digest %q in output", matches[0][1])) // We should have a single entry in images. img := strings.TrimSpace(s.Cmd(c, "images")) splitImg := strings.Split(img, "\n") c.Assert(splitImg, checker.HasLen, 2) c.Assert(splitImg[1], checker.Matches, `hello-world\s+latest.*?`, check.Commentf("invalid output for `docker images` (expected image and tag name")) } // TestPullNonExistingImage pulls non-existing images from the central registry, with different // combinations of implicit tag and library prefix. func (s *DockerHubPullSuite) TestPullNonExistingImage(c *check.C) { testRequires(c, DaemonIsLinux) for _, e := range []struct { Repo string Alias string }{ {"library/asdfasdf", "asdfasdf:foobar"}, {"library/asdfasdf", "library/asdfasdf:foobar"}, {"library/asdfasdf", "asdfasdf"}, {"library/asdfasdf", "asdfasdf:latest"}, {"library/asdfasdf", "library/asdfasdf"}, {"library/asdfasdf", "library/asdfasdf:latest"}, } { out, err := s.CmdWithError("pull", e.Alias) c.Assert(err, checker.NotNil, check.Commentf("expected non-zero exit status when pulling non-existing image: %s", out)) // Hub returns 401 rather than 404 for nonexistent repos over // the v2 protocol - but we should end up falling back to v1, // which does return a 404. c.Assert(out, checker.Contains, fmt.Sprintf("Error: image %s not found", e.Repo), check.Commentf("expected image not found error messages")) // pull -a on a nonexistent registry should fall back as well if !strings.ContainsRune(e.Alias, ':') { out, err := s.CmdWithError("pull", "-a", e.Alias) c.Assert(err, checker.NotNil, check.Commentf("expected non-zero exit status when pulling non-existing image: %s", out)) c.Assert(out, checker.Contains, fmt.Sprintf("Error: image %s not found", e.Repo), check.Commentf("expected image not found error messages")) c.Assert(out, checker.Not(checker.Contains), "unauthorized", check.Commentf(`message should not contain "unauthorized"`)) } } } // TestPullFromCentralRegistryImplicitRefParts pulls an image from the central registry and verifies // that pulling the same image with different combinations of implicit elements of the the image // reference (tag, repository, central registry url, ...) doesn't trigger a new pull nor leads to // multiple images. func (s *DockerHubPullSuite) TestPullFromCentralRegistryImplicitRefParts(c *check.C) { testRequires(c, DaemonIsLinux) s.Cmd(c, "pull", "hello-world") defer deleteImages("hello-world") for _, i := range []string{ "hello-world", "hello-world:latest", "library/hello-world", "library/hello-world:latest", "docker.io/library/hello-world", "index.docker.io/library/hello-world", } { out := s.Cmd(c, "pull", i) c.Assert(out, checker.Contains, "Image is up to date for hello-world:latest") } // We should have a single entry in images. img := strings.TrimSpace(s.Cmd(c, "images")) splitImg := strings.Split(img, "\n") c.Assert(splitImg, checker.HasLen, 2) c.Assert(splitImg[1], checker.Matches, `hello-world\s+latest.*?`, check.Commentf("invalid output for `docker images` (expected image and tag name")) } // TestPullScratchNotAllowed verifies that pulling 'scratch' is rejected. func (s *DockerHubPullSuite) TestPullScratchNotAllowed(c *check.C) { testRequires(c, DaemonIsLinux) out, err := s.CmdWithError("pull", "scratch") c.Assert(err, checker.NotNil, check.Commentf("expected pull of scratch to fail")) c.Assert(out, checker.Contains, "'scratch' is a reserved name") c.Assert(out, checker.Not(checker.Contains), "Pulling repository scratch") } // TestPullAllTagsFromCentralRegistry pulls using `all-tags` for a given image and verifies that it // results in more images than a naked pull. func (s *DockerHubPullSuite) TestPullAllTagsFromCentralRegistry(c *check.C) { testRequires(c, DaemonIsLinux) s.Cmd(c, "pull", "busybox") outImageCmd := s.Cmd(c, "images", "busybox") splitOutImageCmd := strings.Split(strings.TrimSpace(outImageCmd), "\n") c.Assert(splitOutImageCmd, checker.HasLen, 2) s.Cmd(c, "pull", "--all-tags=true", "busybox") outImageAllTagCmd := s.Cmd(c, "images", "busybox") linesCount := strings.Count(outImageAllTagCmd, "\n") c.Assert(linesCount, checker.GreaterThan, 2, check.Commentf("pulling all tags should provide more than two images, got %s", outImageAllTagCmd)) // Verify that the line for 'busybox:latest' is left unchanged. var latestLine string for _, line := range strings.Split(outImageAllTagCmd, "\n") { if strings.HasPrefix(line, "busybox") && strings.Contains(line, "latest") { latestLine = line break } } c.Assert(latestLine, checker.Not(checker.Equals), "", check.Commentf("no entry for busybox:latest found after pulling all tags")) splitLatest := strings.Fields(latestLine) splitCurrent := strings.Fields(splitOutImageCmd[1]) // Clear relative creation times, since these can easily change between // two invocations of "docker images". Without this, the test can fail // like this: // ... obtained []string = []string{"busybox", "latest", "d9551b4026f0", "27", "minutes", "ago", "1.113", "MB"} // ... expected []string = []string{"busybox", "latest", "d9551b4026f0", "26", "minutes", "ago", "1.113", "MB"} splitLatest[3] = "" splitLatest[4] = "" splitLatest[5] = "" splitCurrent[3] = "" splitCurrent[4] = "" splitCurrent[5] = "" c.Assert(splitLatest, checker.DeepEquals, splitCurrent, check.Commentf("busybox:latest was changed after pulling all tags")) } // TestPullClientDisconnect kills the client during a pull operation and verifies that the operation // gets cancelled. // // Ref: docker/docker#15589 func (s *DockerHubPullSuite) TestPullClientDisconnect(c *check.C) { testRequires(c, DaemonIsLinux) repoName := "hello-world:latest" pullCmd := s.MakeCmd("pull", repoName) stdout, err := pullCmd.StdoutPipe() c.Assert(err, checker.IsNil) err = pullCmd.Start() c.Assert(err, checker.IsNil) // Cancel as soon as we get some output. buf := make([]byte, 10) _, err = stdout.Read(buf) c.Assert(err, checker.IsNil) err = pullCmd.Process.Kill() c.Assert(err, checker.IsNil) time.Sleep(2 * time.Second) _, err = s.CmdWithError("inspect", repoName) c.Assert(err, checker.NotNil, check.Commentf("image was pulled after client disconnected")) } docker-1.10.3/integration-cli/docker_cli_pull_trusted_test.go000066400000000000000000000223161267010174400244330ustar00rootroot00000000000000package main import ( "fmt" "io/ioutil" "os/exec" "strings" "time" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (s *DockerTrustSuite) TestTrustedPull(c *check.C) { repoName := s.setupTrustedImage(c, "trusted-pull") // Try pull pullCmd := exec.Command(dockerBinary, "pull", repoName) s.trustedCmd(pullCmd) out, _, err := runCommandWithOutput(pullCmd) c.Assert(err, check.IsNil, check.Commentf(out)) c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(out)) dockerCmd(c, "rmi", repoName) // Try untrusted pull to ensure we pushed the tag to the registry pullCmd = exec.Command(dockerBinary, "pull", "--disable-content-trust=true", repoName) s.trustedCmd(pullCmd) out, _, err = runCommandWithOutput(pullCmd) c.Assert(err, check.IsNil, check.Commentf(out)) c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf(out)) } func (s *DockerTrustSuite) TestTrustedIsolatedPull(c *check.C) { repoName := s.setupTrustedImage(c, "trusted-isolated-pull") // Try pull (run from isolated directory without trust information) pullCmd := exec.Command(dockerBinary, "--config", "/tmp/docker-isolated", "pull", repoName) s.trustedCmd(pullCmd) out, _, err := runCommandWithOutput(pullCmd) c.Assert(err, check.IsNil, check.Commentf(out)) c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(string(out))) dockerCmd(c, "rmi", repoName) } func (s *DockerTrustSuite) TestUntrustedPull(c *check.C) { repoName := fmt.Sprintf("%v/dockercliuntrusted/pulltest:latest", privateRegistryURL) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) dockerCmd(c, "push", repoName) dockerCmd(c, "rmi", repoName) // Try trusted pull on untrusted tag pullCmd := exec.Command(dockerBinary, "pull", repoName) s.trustedCmd(pullCmd) out, _, err := runCommandWithOutput(pullCmd) c.Assert(err, check.NotNil, check.Commentf(out)) c.Assert(string(out), checker.Contains, "Error: remote trust data does not exist", check.Commentf(out)) } func (s *DockerTrustSuite) TestPullWhenCertExpired(c *check.C) { c.Skip("Currently changes system time, causing instability") repoName := s.setupTrustedImage(c, "trusted-cert-expired") // Certificates have 10 years of expiration elevenYearsFromNow := time.Now().Add(time.Hour * 24 * 365 * 11) runAtDifferentDate(elevenYearsFromNow, func() { // Try pull pullCmd := exec.Command(dockerBinary, "pull", repoName) s.trustedCmd(pullCmd) out, _, err := runCommandWithOutput(pullCmd) c.Assert(err, check.NotNil, check.Commentf(out)) c.Assert(string(out), checker.Contains, "could not validate the path to a trusted root", check.Commentf(out)) }) runAtDifferentDate(elevenYearsFromNow, func() { // Try pull pullCmd := exec.Command(dockerBinary, "pull", "--disable-content-trust", repoName) s.trustedCmd(pullCmd) out, _, err := runCommandWithOutput(pullCmd) c.Assert(err, check.IsNil, check.Commentf(out)) c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf(out)) }) } func (s *DockerTrustSuite) TestTrustedPullFromBadTrustServer(c *check.C) { repoName := fmt.Sprintf("%v/dockerclievilpull/trusted:latest", privateRegistryURL) evilLocalConfigDir, err := ioutil.TempDir("", "evil-local-config-dir") if err != nil { c.Fatalf("Failed to create local temp dir") } // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) pushCmd := exec.Command(dockerBinary, "push", repoName) s.trustedCmd(pushCmd) out, _, err := runCommandWithOutput(pushCmd) c.Assert(err, check.IsNil, check.Commentf(out)) c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf(out)) dockerCmd(c, "rmi", repoName) // Try pull pullCmd := exec.Command(dockerBinary, "pull", repoName) s.trustedCmd(pullCmd) out, _, err = runCommandWithOutput(pullCmd) c.Assert(err, check.IsNil, check.Commentf(out)) c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(out)) dockerCmd(c, "rmi", repoName) // Kill the notary server, start a new "evil" one. s.not.Close() s.not, err = newTestNotary(c) c.Assert(err, check.IsNil, check.Commentf("Restarting notary server failed.")) // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. // tag an image and upload it to the private registry dockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) // Push up to the new server pushCmd = exec.Command(dockerBinary, "--config", evilLocalConfigDir, "push", repoName) s.trustedCmd(pushCmd) out, _, err = runCommandWithOutput(pushCmd) c.Assert(err, check.IsNil, check.Commentf(out)) c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf(out)) // Now, try pulling with the original client from this new trust server. This should fail. pullCmd = exec.Command(dockerBinary, "pull", repoName) s.trustedCmd(pullCmd) out, _, err = runCommandWithOutput(pullCmd) c.Assert(err, check.NotNil, check.Commentf(out)) c.Assert(string(out), checker.Contains, "valid signatures did not meet threshold", check.Commentf(out)) } func (s *DockerTrustSuite) TestTrustedPullWithExpiredSnapshot(c *check.C) { c.Skip("Currently changes system time, causing instability") repoName := fmt.Sprintf("%v/dockercliexpiredtimestamppull/trusted:latest", privateRegistryURL) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) // Push with default passphrases pushCmd := exec.Command(dockerBinary, "push", repoName) s.trustedCmd(pushCmd) out, _, err := runCommandWithOutput(pushCmd) c.Assert(err, check.IsNil, check.Commentf(out)) c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf(out)) dockerCmd(c, "rmi", repoName) // Snapshots last for three years. This should be expired fourYearsLater := time.Now().Add(time.Hour * 24 * 365 * 4) runAtDifferentDate(fourYearsLater, func() { // Try pull pullCmd := exec.Command(dockerBinary, "pull", repoName) s.trustedCmd(pullCmd) out, _, err = runCommandWithOutput(pullCmd) c.Assert(err, check.NotNil, check.Commentf("Missing expected error running trusted pull with expired snapshots")) c.Assert(string(out), checker.Contains, "repository out-of-date", check.Commentf(out)) }) } func (s *DockerTrustSuite) TestTrustedOfflinePull(c *check.C) { repoName := s.setupTrustedImage(c, "trusted-offline-pull") pullCmd := exec.Command(dockerBinary, "pull", repoName) s.trustedCmdWithServer(pullCmd, "https://invalidnotaryserver") out, _, err := runCommandWithOutput(pullCmd) c.Assert(err, check.NotNil, check.Commentf(out)) c.Assert(string(out), checker.Contains, "error contacting notary server", check.Commentf(out)) // Do valid trusted pull to warm cache pullCmd = exec.Command(dockerBinary, "pull", repoName) s.trustedCmd(pullCmd) out, _, err = runCommandWithOutput(pullCmd) c.Assert(err, check.IsNil, check.Commentf(out)) c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(out)) dockerCmd(c, "rmi", repoName) // Try pull again with invalid notary server, should use cache pullCmd = exec.Command(dockerBinary, "pull", repoName) s.trustedCmdWithServer(pullCmd, "https://invalidnotaryserver") out, _, err = runCommandWithOutput(pullCmd) c.Assert(err, check.IsNil, check.Commentf(out)) c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(out)) } func (s *DockerTrustSuite) TestTrustedPullDelete(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, "trusted-pull-delete") // tag the image and upload it to the private registry _, err := buildImage(repoName, ` FROM busybox CMD echo trustedpulldelete `, true) pushCmd := exec.Command(dockerBinary, "push", repoName) s.trustedCmd(pushCmd) out, _, err := runCommandWithOutput(pushCmd) if err != nil { c.Fatalf("Error running trusted push: %s\n%s", err, out) } if !strings.Contains(string(out), "Signing and pushing trust metadata") { c.Fatalf("Missing expected output on trusted push:\n%s", out) } if out, status := dockerCmd(c, "rmi", repoName); status != 0 { c.Fatalf("Error removing image %q\n%s", repoName, out) } // Try pull pullCmd := exec.Command(dockerBinary, "pull", repoName) s.trustedCmd(pullCmd) out, _, err = runCommandWithOutput(pullCmd) c.Assert(err, check.IsNil, check.Commentf(out)) matches := digestRegex.FindStringSubmatch(out) c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) pullDigest := matches[1] imageID, err := inspectField(repoName, "Id") c.Assert(err, checker.IsNil, check.Commentf("error inspecting image id")) imageByDigest := repoName + "@" + pullDigest byDigestID, err := inspectField(imageByDigest, "Id") c.Assert(err, checker.IsNil, check.Commentf("error inspecting image id")) c.Assert(byDigestID, checker.Equals, imageID) // rmi of tag should also remove the digest reference dockerCmd(c, "rmi", repoName) _, err = inspectField(imageByDigest, "Id") c.Assert(err, checker.NotNil, check.Commentf("digest reference should have been removed")) _, err = inspectField(imageID, "Id") c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted")) } docker-1.10.3/integration-cli/docker_cli_push_test.go000066400000000000000000000547011267010174400226670ustar00rootroot00000000000000package main import ( "archive/tar" "fmt" "io/ioutil" "os" "os/exec" "path/filepath" "strings" "time" "github.com/docker/distribution/digest" "github.com/docker/docker/cliconfig" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) // Pushing an image to a private registry. func testPushBusyboxImage(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) // tag the image to upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) // push the image to the registry dockerCmd(c, "push", repoName) } func (s *DockerRegistrySuite) TestPushBusyboxImage(c *check.C) { testPushBusyboxImage(c) } func (s *DockerSchema1RegistrySuite) TestPushBusyboxImage(c *check.C) { testPushBusyboxImage(c) } // pushing an image without a prefix should throw an error func (s *DockerSuite) TestPushUnprefixedRepo(c *check.C) { out, _, err := dockerCmdWithError("push", "busybox") c.Assert(err, check.NotNil, check.Commentf("pushing an unprefixed repo didn't result in a non-zero exit status: %s", out)) } func testPushUntagged(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) expected := "Repository does not exist" out, _, err := dockerCmdWithError("push", repoName) c.Assert(err, check.NotNil, check.Commentf("pushing the image to the private registry should have failed: output %q", out)) c.Assert(out, checker.Contains, expected, check.Commentf("pushing the image failed")) } func (s *DockerRegistrySuite) TestPushUntagged(c *check.C) { testPushUntagged(c) } func (s *DockerSchema1RegistrySuite) TestPushUntagged(c *check.C) { testPushUntagged(c) } func testPushBadTag(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/busybox:latest", privateRegistryURL) expected := "does not exist" out, _, err := dockerCmdWithError("push", repoName) c.Assert(err, check.NotNil, check.Commentf("pushing the image to the private registry should have failed: output %q", out)) c.Assert(out, checker.Contains, expected, check.Commentf("pushing the image failed")) } func (s *DockerRegistrySuite) TestPushBadTag(c *check.C) { testPushBadTag(c) } func (s *DockerSchema1RegistrySuite) TestPushBadTag(c *check.C) { testPushBadTag(c) } func testPushMultipleTags(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) repoTag1 := fmt.Sprintf("%v/dockercli/busybox:t1", privateRegistryURL) repoTag2 := fmt.Sprintf("%v/dockercli/busybox:t2", privateRegistryURL) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoTag1) dockerCmd(c, "tag", "busybox", repoTag2) dockerCmd(c, "push", repoName) // Ensure layer list is equivalent for repoTag1 and repoTag2 out1, _ := dockerCmd(c, "pull", repoTag1) imageAlreadyExists := ": Image already exists" var out1Lines []string for _, outputLine := range strings.Split(out1, "\n") { if strings.Contains(outputLine, imageAlreadyExists) { out1Lines = append(out1Lines, outputLine) } } out2, _ := dockerCmd(c, "pull", repoTag2) var out2Lines []string for _, outputLine := range strings.Split(out2, "\n") { if strings.Contains(outputLine, imageAlreadyExists) { out1Lines = append(out1Lines, outputLine) } } c.Assert(out2Lines, checker.HasLen, len(out1Lines)) for i := range out1Lines { c.Assert(out1Lines[i], checker.Equals, out2Lines[i]) } } func (s *DockerRegistrySuite) TestPushMultipleTags(c *check.C) { testPushMultipleTags(c) } func (s *DockerSchema1RegistrySuite) TestPushMultipleTags(c *check.C) { testPushMultipleTags(c) } func testPushEmptyLayer(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/emptylayer", privateRegistryURL) emptyTarball, err := ioutil.TempFile("", "empty_tarball") c.Assert(err, check.IsNil, check.Commentf("Unable to create test file")) tw := tar.NewWriter(emptyTarball) err = tw.Close() c.Assert(err, check.IsNil, check.Commentf("Error creating empty tarball")) freader, err := os.Open(emptyTarball.Name()) c.Assert(err, check.IsNil, check.Commentf("Could not open test tarball")) importCmd := exec.Command(dockerBinary, "import", "-", repoName) importCmd.Stdin = freader out, _, err := runCommandWithOutput(importCmd) c.Assert(err, check.IsNil, check.Commentf("import failed: %q", out)) // Now verify we can push it out, _, err = dockerCmdWithError("push", repoName) c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out)) } func (s *DockerRegistrySuite) TestPushEmptyLayer(c *check.C) { testPushEmptyLayer(c) } func (s *DockerSchema1RegistrySuite) TestPushEmptyLayer(c *check.C) { testPushEmptyLayer(c) } // testConcurrentPush pushes multiple tags to the same repo // concurrently. func testConcurrentPush(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) repos := []string{} for _, tag := range []string{"push1", "push2", "push3"} { repo := fmt.Sprintf("%v:%v", repoName, tag) _, err := buildImage(repo, fmt.Sprintf(` FROM busybox ENTRYPOINT ["/bin/echo"] ENV FOO foo ENV BAR bar CMD echo %s `, repo), true) c.Assert(err, checker.IsNil) repos = append(repos, repo) } // Push tags, in parallel results := make(chan error) for _, repo := range repos { go func(repo string) { _, _, err := runCommandWithOutput(exec.Command(dockerBinary, "push", repo)) results <- err }(repo) } for range repos { err := <-results c.Assert(err, checker.IsNil, check.Commentf("concurrent push failed with error: %v", err)) } // Clear local images store. args := append([]string{"rmi"}, repos...) dockerCmd(c, args...) // Re-pull and run individual tags, to make sure pushes succeeded for _, repo := range repos { dockerCmd(c, "pull", repo) dockerCmd(c, "inspect", repo) out, _ := dockerCmd(c, "run", "--rm", repo) c.Assert(strings.TrimSpace(out), checker.Equals, "/bin/sh -c echo "+repo) } } func (s *DockerRegistrySuite) TestConcurrentPush(c *check.C) { testConcurrentPush(c) } func (s *DockerSchema1RegistrySuite) TestConcurrentPush(c *check.C) { testConcurrentPush(c) } func (s *DockerRegistrySuite) TestCrossRepositoryLayerPush(c *check.C) { sourceRepoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) // tag the image to upload it to the private registry dockerCmd(c, "tag", "busybox", sourceRepoName) // push the image to the registry out1, _, err := dockerCmdWithError("push", sourceRepoName) c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out1)) // ensure that none of the layers were mounted from another repository during push c.Assert(strings.Contains(out1, "Mounted from"), check.Equals, false) digest1 := digest.DigestRegexp.FindString(out1) c.Assert(len(digest1), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) destRepoName := fmt.Sprintf("%v/dockercli/crossrepopush", privateRegistryURL) // retag the image to upload the same layers to another repo in the same registry dockerCmd(c, "tag", "busybox", destRepoName) // push the image to the registry out2, _, err := dockerCmdWithError("push", destRepoName) c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out2)) // ensure that layers were mounted from the first repo during push c.Assert(strings.Contains(out2, "Mounted from dockercli/busybox"), check.Equals, true) digest2 := digest.DigestRegexp.FindString(out2) c.Assert(len(digest2), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) c.Assert(digest1, check.Equals, digest2) // ensure that we can pull and run the cross-repo-pushed repository dockerCmd(c, "rmi", destRepoName) dockerCmd(c, "pull", destRepoName) out3, _ := dockerCmd(c, "run", destRepoName, "echo", "-n", "hello world") c.Assert(out3, check.Equals, "hello world") } func (s *DockerSchema1RegistrySuite) TestCrossRepositoryLayerPushNotSupported(c *check.C) { sourceRepoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) // tag the image to upload it to the private registry dockerCmd(c, "tag", "busybox", sourceRepoName) // push the image to the registry out1, _, err := dockerCmdWithError("push", sourceRepoName) c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out1)) // ensure that none of the layers were mounted from another repository during push c.Assert(strings.Contains(out1, "Mounted from"), check.Equals, false) digest1 := digest.DigestRegexp.FindString(out1) c.Assert(len(digest1), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) destRepoName := fmt.Sprintf("%v/dockercli/crossrepopush", privateRegistryURL) // retag the image to upload the same layers to another repo in the same registry dockerCmd(c, "tag", "busybox", destRepoName) // push the image to the registry out2, _, err := dockerCmdWithError("push", destRepoName) c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out2)) // schema1 registry should not support cross-repo layer mounts, so ensure that this does not happen c.Assert(strings.Contains(out2, "Mounted from dockercli/busybox"), check.Equals, false) digest2 := digest.DigestRegexp.FindString(out2) c.Assert(len(digest2), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) c.Assert(digest1, check.Equals, digest2) // ensure that we can pull and run the second pushed repository dockerCmd(c, "rmi", destRepoName) dockerCmd(c, "pull", destRepoName) out3, _ := dockerCmd(c, "run", destRepoName, "echo", "-n", "hello world") c.Assert(out3, check.Equals, "hello world") } func (s *DockerTrustSuite) TestTrustedPush(c *check.C) { repoName := fmt.Sprintf("%v/dockerclitrusted/pushtest:latest", privateRegistryURL) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) pushCmd := exec.Command(dockerBinary, "push", repoName) s.trustedCmd(pushCmd) out, _, err := runCommandWithOutput(pushCmd) c.Assert(err, check.IsNil, check.Commentf("Error running trusted push: %s\n%s", err, out)) c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push")) // Try pull after push pullCmd := exec.Command(dockerBinary, "pull", repoName) s.trustedCmd(pullCmd) out, _, err = runCommandWithOutput(pullCmd) c.Assert(err, check.IsNil, check.Commentf(out)) c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf(out)) } func (s *DockerTrustSuite) TestTrustedPushWithEnvPasswords(c *check.C) { repoName := fmt.Sprintf("%v/dockerclienv/trusted:latest", privateRegistryURL) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) pushCmd := exec.Command(dockerBinary, "push", repoName) s.trustedCmdWithPassphrases(pushCmd, "12345678", "12345678") out, _, err := runCommandWithOutput(pushCmd) c.Assert(err, check.IsNil, check.Commentf("Error running trusted push: %s\n%s", err, out)) c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push")) // Try pull after push pullCmd := exec.Command(dockerBinary, "pull", repoName) s.trustedCmd(pullCmd) out, _, err = runCommandWithOutput(pullCmd) c.Assert(err, check.IsNil, check.Commentf(out)) c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf(out)) } // This test ensures backwards compatibility with old ENV variables. Should be // deprecated by 1.10 func (s *DockerTrustSuite) TestTrustedPushWithDeprecatedEnvPasswords(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/trusteddeprecated:latest", privateRegistryURL) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) pushCmd := exec.Command(dockerBinary, "push", repoName) s.trustedCmdWithDeprecatedEnvPassphrases(pushCmd, "12345678", "12345678") out, _, err := runCommandWithOutput(pushCmd) c.Assert(err, check.IsNil, check.Commentf("Error running trusted push: %s\n%s", err, out)) c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push")) } func (s *DockerTrustSuite) TestTrustedPushWithFailingServer(c *check.C) { repoName := fmt.Sprintf("%v/dockerclitrusted/failingserver:latest", privateRegistryURL) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) pushCmd := exec.Command(dockerBinary, "push", repoName) s.trustedCmdWithServer(pushCmd, "https://example.com:81/") out, _, err := runCommandWithOutput(pushCmd) c.Assert(err, check.NotNil, check.Commentf("Missing error while running trusted push w/ no server")) c.Assert(out, checker.Contains, "error contacting notary server", check.Commentf("Missing expected output on trusted push")) } func (s *DockerTrustSuite) TestTrustedPushWithoutServerAndUntrusted(c *check.C) { repoName := fmt.Sprintf("%v/dockerclitrusted/trustedandnot:latest", privateRegistryURL) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) pushCmd := exec.Command(dockerBinary, "push", "--disable-content-trust", repoName) s.trustedCmdWithServer(pushCmd, "https://example.com/") out, _, err := runCommandWithOutput(pushCmd) c.Assert(err, check.IsNil, check.Commentf("trusted push with no server and --disable-content-trust failed: %s\n%s", err, out)) c.Assert(out, check.Not(checker.Contains), "Error establishing connection to notary repository", check.Commentf("Missing expected output on trusted push with --disable-content-trust:")) } func (s *DockerTrustSuite) TestTrustedPushWithExistingTag(c *check.C) { repoName := fmt.Sprintf("%v/dockerclitag/trusted:latest", privateRegistryURL) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) dockerCmd(c, "push", repoName) pushCmd := exec.Command(dockerBinary, "push", repoName) s.trustedCmd(pushCmd) out, _, err := runCommandWithOutput(pushCmd) c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) // Try pull after push pullCmd := exec.Command(dockerBinary, "pull", repoName) s.trustedCmd(pullCmd) out, _, err = runCommandWithOutput(pullCmd) c.Assert(err, check.IsNil, check.Commentf(out)) c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf(out)) } func (s *DockerTrustSuite) TestTrustedPushWithExistingSignedTag(c *check.C) { repoName := fmt.Sprintf("%v/dockerclipushpush/trusted:latest", privateRegistryURL) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) // Do a trusted push pushCmd := exec.Command(dockerBinary, "push", repoName) s.trustedCmd(pushCmd) out, _, err := runCommandWithOutput(pushCmd) c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) // Do another trusted push pushCmd = exec.Command(dockerBinary, "push", repoName) s.trustedCmd(pushCmd) out, _, err = runCommandWithOutput(pushCmd) c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) dockerCmd(c, "rmi", repoName) // Try pull to ensure the double push did not break our ability to pull pullCmd := exec.Command(dockerBinary, "pull", repoName) s.trustedCmd(pullCmd) out, _, err = runCommandWithOutput(pullCmd) c.Assert(err, check.IsNil, check.Commentf("Error running trusted pull: %s\n%s", err, out)) c.Assert(out, checker.Contains, "Status: Downloaded", check.Commentf("Missing expected output on trusted pull with --disable-content-trust")) } func (s *DockerTrustSuite) TestTrustedPushWithIncorrectPassphraseForNonRoot(c *check.C) { repoName := fmt.Sprintf("%v/dockercliincorretpwd/trusted:latest", privateRegistryURL) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) // Push with default passphrases pushCmd := exec.Command(dockerBinary, "push", repoName) s.trustedCmd(pushCmd) out, _, err := runCommandWithOutput(pushCmd) c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push:\n%s", out)) // Push with wrong passphrases pushCmd = exec.Command(dockerBinary, "push", repoName) s.trustedCmdWithPassphrases(pushCmd, "12345678", "87654321") out, _, err = runCommandWithOutput(pushCmd) c.Assert(err, check.NotNil, check.Commentf("Error missing from trusted push with short targets passphrase: \n%s", out)) c.Assert(out, checker.Contains, "could not find necessary signing keys", check.Commentf("Missing expected output on trusted push with short targets/snapsnot passphrase")) } // This test ensures backwards compatibility with old ENV variables. Should be // deprecated by 1.10 func (s *DockerTrustSuite) TestTrustedPushWithIncorrectDeprecatedPassphraseForNonRoot(c *check.C) { repoName := fmt.Sprintf("%v/dockercliincorretdeprecatedpwd/trusted:latest", privateRegistryURL) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) // Push with default passphrases pushCmd := exec.Command(dockerBinary, "push", repoName) s.trustedCmd(pushCmd) out, _, err := runCommandWithOutput(pushCmd) c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push")) // Push with wrong passphrases pushCmd = exec.Command(dockerBinary, "push", repoName) s.trustedCmdWithDeprecatedEnvPassphrases(pushCmd, "12345678", "87654321") out, _, err = runCommandWithOutput(pushCmd) c.Assert(err, check.NotNil, check.Commentf("Error missing from trusted push with short targets passphrase: \n%s", out)) c.Assert(out, checker.Contains, "could not find necessary signing keys", check.Commentf("Missing expected output on trusted push with short targets/snapsnot passphrase")) } func (s *DockerTrustSuite) TestTrustedPushWithExpiredSnapshot(c *check.C) { c.Skip("Currently changes system time, causing instability") repoName := fmt.Sprintf("%v/dockercliexpiredsnapshot/trusted:latest", privateRegistryURL) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) // Push with default passphrases pushCmd := exec.Command(dockerBinary, "push", repoName) s.trustedCmd(pushCmd) out, _, err := runCommandWithOutput(pushCmd) c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push")) // Snapshots last for three years. This should be expired fourYearsLater := time.Now().Add(time.Hour * 24 * 365 * 4) runAtDifferentDate(fourYearsLater, func() { // Push with wrong passphrases pushCmd = exec.Command(dockerBinary, "push", repoName) s.trustedCmd(pushCmd) out, _, err = runCommandWithOutput(pushCmd) c.Assert(err, check.NotNil, check.Commentf("Error missing from trusted push with expired snapshot: \n%s", out)) c.Assert(out, checker.Contains, "repository out-of-date", check.Commentf("Missing expected output on trusted push with expired snapshot")) }) } func (s *DockerTrustSuite) TestTrustedPushWithExpiredTimestamp(c *check.C) { c.Skip("Currently changes system time, causing instability") repoName := fmt.Sprintf("%v/dockercliexpiredtimestamppush/trusted:latest", privateRegistryURL) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) // Push with default passphrases pushCmd := exec.Command(dockerBinary, "push", repoName) s.trustedCmd(pushCmd) out, _, err := runCommandWithOutput(pushCmd) c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push")) // The timestamps expire in two weeks. Lets check three threeWeeksLater := time.Now().Add(time.Hour * 24 * 21) // Should succeed because the server transparently re-signs one runAtDifferentDate(threeWeeksLater, func() { pushCmd := exec.Command(dockerBinary, "push", repoName) s.trustedCmd(pushCmd) out, _, err := runCommandWithOutput(pushCmd) c.Assert(err, check.IsNil, check.Commentf("Error running trusted push: %s\n%s", err, out)) c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with expired timestamp")) }) } func (s *DockerTrustSuite) TestTrustedPushWithReleasesDelegation(c *check.C) { repoName := fmt.Sprintf("%v/dockerclireleasedelegation/trusted", privateRegistryURL) targetName := fmt.Sprintf("%s:latest", repoName) pwd := "12345678" s.setupDelegations(c, repoName, pwd) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", targetName) pushCmd := exec.Command(dockerBinary, "-D", "push", targetName) s.trustedCmdWithPassphrases(pushCmd, pwd, pwd) out, _, err := runCommandWithOutput(pushCmd) c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) // Try pull after push pullCmd := exec.Command(dockerBinary, "pull", targetName) s.trustedCmd(pullCmd) out, _, err = runCommandWithOutput(pullCmd) c.Assert(err, check.IsNil, check.Commentf(out)) c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf(out)) // check to make sure that the target has been added to targets/releases and not targets contents, err := ioutil.ReadFile(filepath.Join(cliconfig.ConfigDir(), "trust/tuf", repoName, "metadata/targets.json")) c.Assert(err, check.IsNil, check.Commentf("Unable to read targets metadata")) c.Assert(strings.Contains(string(contents), `"latest"`), checker.False, check.Commentf(string(contents))) contents, err = ioutil.ReadFile(filepath.Join(cliconfig.ConfigDir(), "trust/tuf", repoName, "metadata/targets/releases.json")) c.Assert(err, check.IsNil, check.Commentf("Unable to read targets/releases metadata")) c.Assert(string(contents), checker.Contains, `"latest"`, check.Commentf(string(contents))) } docker-1.10.3/integration-cli/docker_cli_rename_test.go000066400000000000000000000100201267010174400231410ustar00rootroot00000000000000package main import ( "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/pkg/stringid" "github.com/go-check/check" ) func (s *DockerSuite) TestRenameStoppedContainer(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "sh") cleanedContainerID := strings.TrimSpace(out) dockerCmd(c, "wait", cleanedContainerID) name, err := inspectField(cleanedContainerID, "Name") newName := "new_name" + stringid.GenerateNonCryptoID() dockerCmd(c, "rename", "first_name", newName) name, err = inspectField(cleanedContainerID, "Name") c.Assert(err, checker.IsNil, check.Commentf("Failed to rename container %s", name)) c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container %s", name)) } func (s *DockerSuite) TestRenameRunningContainer(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "sh") newName := "new_name" + stringid.GenerateNonCryptoID() cleanedContainerID := strings.TrimSpace(out) dockerCmd(c, "rename", "first_name", newName) name, err := inspectField(cleanedContainerID, "Name") c.Assert(err, checker.IsNil, check.Commentf("Failed to rename container %s", name)) c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container %s", name)) } func (s *DockerSuite) TestRenameRunningContainerAndReuse(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "top") c.Assert(waitRun("first_name"), check.IsNil) newName := "new_name" ContainerID := strings.TrimSpace(out) dockerCmd(c, "rename", "first_name", newName) name, err := inspectField(ContainerID, "Name") c.Assert(err, checker.IsNil, check.Commentf("Failed to rename container %s", name)) c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container")) out, _ = dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "top") c.Assert(waitRun("first_name"), check.IsNil) newContainerID := strings.TrimSpace(out) name, err = inspectField(newContainerID, "Name") c.Assert(err, checker.IsNil, check.Commentf("Failed to reuse container name")) c.Assert(name, checker.Equals, "/first_name", check.Commentf("Failed to reuse container name")) } func (s *DockerSuite) TestRenameCheckNames(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "sh") newName := "new_name" + stringid.GenerateNonCryptoID() dockerCmd(c, "rename", "first_name", newName) name, err := inspectField(newName, "Name") c.Assert(err, checker.IsNil, check.Commentf("Failed to rename container %s", name)) c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container %s", name)) name, err = inspectField("first_name", "Name") c.Assert(err, checker.NotNil, check.Commentf(name)) c.Assert(err.Error(), checker.Contains, "No such image or container: first_name") } func (s *DockerSuite) TestRenameInvalidName(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "--name", "myname", "-d", "busybox", "top") out, _, err := dockerCmdWithError("rename", "myname", "new:invalid") c.Assert(err, checker.NotNil, check.Commentf("Renaming container to invalid name should have failed: %s", out)) c.Assert(out, checker.Contains, "Invalid container name", check.Commentf("%v", err)) out, _, err = dockerCmdWithError("rename", "myname", "") c.Assert(err, checker.NotNil, check.Commentf("Renaming container to invalid name should have failed: %s", out)) c.Assert(out, checker.Contains, "may be empty", check.Commentf("%v", err)) out, _, err = dockerCmdWithError("rename", "", "newname") c.Assert(err, checker.NotNil, check.Commentf("Renaming container with empty name should have failed: %s", out)) c.Assert(out, checker.Contains, "may be empty", check.Commentf("%v", err)) out, _ = dockerCmd(c, "ps", "-a") c.Assert(out, checker.Contains, "myname", check.Commentf("Output of docker ps should have included 'myname': %s", out)) } docker-1.10.3/integration-cli/docker_cli_restart_test.go000066400000000000000000000145311267010174400233710ustar00rootroot00000000000000package main import ( "os" "strconv" "strings" "time" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (s *DockerSuite) TestRestartStoppedContainer(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "echo", "foobar") cleanedContainerID := strings.TrimSpace(out) dockerCmd(c, "wait", cleanedContainerID) out, _ = dockerCmd(c, "logs", cleanedContainerID) c.Assert(out, checker.Equals, "foobar\n") dockerCmd(c, "restart", cleanedContainerID) out, _ = dockerCmd(c, "logs", cleanedContainerID) c.Assert(out, checker.Equals, "foobar\nfoobar\n") } func (s *DockerSuite) TestRestartRunningContainer(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "echo foobar && sleep 30 && echo 'should not print this'") cleanedContainerID := strings.TrimSpace(out) c.Assert(waitRun(cleanedContainerID), checker.IsNil) out, _ = dockerCmd(c, "logs", cleanedContainerID) c.Assert(out, checker.Equals, "foobar\n") dockerCmd(c, "restart", "-t", "1", cleanedContainerID) out, _ = dockerCmd(c, "logs", cleanedContainerID) c.Assert(waitRun(cleanedContainerID), checker.IsNil) c.Assert(out, checker.Equals, "foobar\nfoobar\n") } // Test that restarting a container with a volume does not create a new volume on restart. Regression test for #819. func (s *DockerSuite) TestRestartWithVolumes(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "-v", "/test", "busybox", "top") cleanedContainerID := strings.TrimSpace(out) out, _ = dockerCmd(c, "inspect", "--format", "{{ len .Mounts }}", cleanedContainerID) out = strings.Trim(out, " \n\r") c.Assert(out, checker.Equals, "1") source, err := inspectMountSourceField(cleanedContainerID, "/test") c.Assert(err, checker.IsNil) dockerCmd(c, "restart", cleanedContainerID) out, _ = dockerCmd(c, "inspect", "--format", "{{ len .Mounts }}", cleanedContainerID) out = strings.Trim(out, " \n\r") c.Assert(out, checker.Equals, "1") sourceAfterRestart, err := inspectMountSourceField(cleanedContainerID, "/test") c.Assert(err, checker.IsNil) c.Assert(source, checker.Equals, sourceAfterRestart) } func (s *DockerSuite) TestRestartPolicyNO(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "--restart=no", "busybox", "false") id := strings.TrimSpace(string(out)) name, err := inspectField(id, "HostConfig.RestartPolicy.Name") c.Assert(err, checker.IsNil) c.Assert(name, checker.Equals, "no") } func (s *DockerSuite) TestRestartPolicyAlways(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "--restart=always", "busybox", "false") id := strings.TrimSpace(string(out)) name, err := inspectField(id, "HostConfig.RestartPolicy.Name") c.Assert(err, checker.IsNil) c.Assert(name, checker.Equals, "always") MaximumRetryCount, err := inspectField(id, "HostConfig.RestartPolicy.MaximumRetryCount") c.Assert(err, checker.IsNil) // MaximumRetryCount=0 if the restart policy is always c.Assert(MaximumRetryCount, checker.Equals, "0") } func (s *DockerSuite) TestRestartPolicyOnFailure(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:1", "busybox", "false") id := strings.TrimSpace(string(out)) name, err := inspectField(id, "HostConfig.RestartPolicy.Name") c.Assert(err, checker.IsNil) c.Assert(name, checker.Equals, "on-failure") } // a good container with --restart=on-failure:3 // MaximumRetryCount!=0; RestartCount=0 func (s *DockerSuite) TestContainerRestartwithGoodContainer(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "true") id := strings.TrimSpace(string(out)) err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", 5*time.Second) c.Assert(err, checker.IsNil) count, err := inspectField(id, "RestartCount") c.Assert(err, checker.IsNil) c.Assert(count, checker.Equals, "0") MaximumRetryCount, err := inspectField(id, "HostConfig.RestartPolicy.MaximumRetryCount") c.Assert(err, checker.IsNil) c.Assert(MaximumRetryCount, checker.Equals, "3") } func (s *DockerSuite) TestContainerRestartSuccess(c *check.C) { testRequires(c, DaemonIsLinux, SameHostDaemon) out, _ := dockerCmd(c, "run", "-d", "--restart=always", "busybox", "top") id := strings.TrimSpace(out) c.Assert(waitRun(id), check.IsNil) pidStr, err := inspectField(id, "State.Pid") c.Assert(err, check.IsNil) pid, err := strconv.Atoi(pidStr) c.Assert(err, check.IsNil) p, err := os.FindProcess(pid) c.Assert(err, check.IsNil) c.Assert(p, check.NotNil) err = p.Kill() c.Assert(err, check.IsNil) err = waitInspect(id, "{{.RestartCount}}", "1", 5*time.Second) c.Assert(err, check.IsNil) err = waitInspect(id, "{{.State.Status}}", "running", 5*time.Second) c.Assert(err, check.IsNil) } func (s *DockerSuite) TestUserDefinedNetworkWithRestartPolicy(c *check.C) { testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) dockerCmd(c, "network", "create", "-d", "bridge", "udNet") dockerCmd(c, "run", "-d", "--net=udNet", "--name=first", "busybox", "top") c.Assert(waitRun("first"), check.IsNil) dockerCmd(c, "run", "-d", "--restart=always", "--net=udNet", "--name=second", "--link=first:foo", "busybox", "top") c.Assert(waitRun("second"), check.IsNil) // ping to first and its alias foo must succeed _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") c.Assert(err, check.IsNil) _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") c.Assert(err, check.IsNil) // Now kill the second container and let the restart policy kick in pidStr, err := inspectField("second", "State.Pid") c.Assert(err, check.IsNil) pid, err := strconv.Atoi(pidStr) c.Assert(err, check.IsNil) p, err := os.FindProcess(pid) c.Assert(err, check.IsNil) c.Assert(p, check.NotNil) err = p.Kill() c.Assert(err, check.IsNil) err = waitInspect("second", "{{.RestartCount}}", "1", 5*time.Second) c.Assert(err, check.IsNil) err = waitInspect("second", "{{.State.Status}}", "running", 5*time.Second) // ping to first and its alias foo must still succeed _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") c.Assert(err, check.IsNil) _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") c.Assert(err, check.IsNil) } docker-1.10.3/integration-cli/docker_cli_rm_test.go000066400000000000000000000052221267010174400223200ustar00rootroot00000000000000package main import ( "os" "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (s *DockerSuite) TestRmContainerWithRemovedVolume(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, SameHostDaemon) dockerCmd(c, "run", "--name", "losemyvolumes", "-v", "/tmp/testing:/test", "busybox", "true") err := os.Remove("/tmp/testing") c.Assert(err, check.IsNil) dockerCmd(c, "rm", "-v", "losemyvolumes") } func (s *DockerSuite) TestRmContainerWithVolume(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "--name", "foo", "-v", "/srv", "busybox", "true") dockerCmd(c, "rm", "-v", "foo") } func (s *DockerSuite) TestRmRunningContainer(c *check.C) { testRequires(c, DaemonIsLinux) createRunningContainer(c, "foo") _, _, err := dockerCmdWithError("rm", "foo") c.Assert(err, checker.NotNil, check.Commentf("Expected error, can't rm a running container")) } func (s *DockerSuite) TestRmForceRemoveRunningContainer(c *check.C) { testRequires(c, DaemonIsLinux) createRunningContainer(c, "foo") // Stop then remove with -s dockerCmd(c, "rm", "-f", "foo") } func (s *DockerSuite) TestRmContainerOrphaning(c *check.C) { testRequires(c, DaemonIsLinux) dockerfile1 := `FROM busybox:latest ENTRYPOINT ["/bin/true"]` img := "test-container-orphaning" dockerfile2 := `FROM busybox:latest ENTRYPOINT ["/bin/true"] MAINTAINER Integration Tests` // build first dockerfile img1, err := buildImage(img, dockerfile1, true) c.Assert(err, check.IsNil, check.Commentf("Could not build image %s", img)) // run container on first image dockerCmd(c, "run", img) // rebuild dockerfile with a small addition at the end _, err = buildImage(img, dockerfile2, true) c.Assert(err, check.IsNil, check.Commentf("Could not rebuild image %s", img)) // try to remove the image, should not error out. out, _, err := dockerCmdWithError("rmi", img) c.Assert(err, check.IsNil, check.Commentf("Expected to removing the image, but failed: %s", out)) // check if we deleted the first image out, _ = dockerCmd(c, "images", "-q", "--no-trunc") c.Assert(out, checker.Contains, img1, check.Commentf("Orphaned container (could not find %q in docker images): %s", img1, out)) } func (s *DockerSuite) TestRmInvalidContainer(c *check.C) { if out, _, err := dockerCmdWithError("rm", "unknown"); err == nil { c.Fatal("Expected error on rm unknown container, got none") } else if !strings.Contains(out, "Failed to remove container") { c.Fatalf("Expected output to contain 'Failed to remove container', got %q", out) } } func createRunningContainer(c *check.C, name string) { dockerCmd(c, "run", "-dt", "--name", name, "busybox", "top") } docker-1.10.3/integration-cli/docker_cli_rmi_test.go000066400000000000000000000305331267010174400224740ustar00rootroot00000000000000package main import ( "fmt" "os/exec" "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/pkg/stringid" "github.com/go-check/check" ) func (s *DockerSuite) TestRmiWithContainerFails(c *check.C) { testRequires(c, DaemonIsLinux) errSubstr := "is using it" // create a container out, _ := dockerCmd(c, "run", "-d", "busybox", "true") cleanedContainerID := strings.TrimSpace(out) // try to delete the image out, _, err := dockerCmdWithError("rmi", "busybox") // Container is using image, should not be able to rmi c.Assert(err, checker.NotNil) // Container is using image, error message should contain errSubstr c.Assert(out, checker.Contains, errSubstr, check.Commentf("Container: %q", cleanedContainerID)) // make sure it didn't delete the busybox name images, _ := dockerCmd(c, "images") // The name 'busybox' should not have been removed from images c.Assert(images, checker.Contains, "busybox") } func (s *DockerSuite) TestRmiTag(c *check.C) { testRequires(c, DaemonIsLinux) imagesBefore, _ := dockerCmd(c, "images", "-a") dockerCmd(c, "tag", "busybox", "utest:tag1") dockerCmd(c, "tag", "busybox", "utest/docker:tag2") dockerCmd(c, "tag", "busybox", "utest:5000/docker:tag3") { imagesAfter, _ := dockerCmd(c, "images", "-a") c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+3, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) } dockerCmd(c, "rmi", "utest/docker:tag2") { imagesAfter, _ := dockerCmd(c, "images", "-a") c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+2, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) } dockerCmd(c, "rmi", "utest:5000/docker:tag3") { imagesAfter, _ := dockerCmd(c, "images", "-a") c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+1, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) } dockerCmd(c, "rmi", "utest:tag1") { imagesAfter, _ := dockerCmd(c, "images", "-a") c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n"), check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) } } func (s *DockerSuite) TestRmiImgIDMultipleTag(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir '/busybox-one'") containerID := strings.TrimSpace(out) dockerCmd(c, "commit", containerID, "busybox-one") imagesBefore, _ := dockerCmd(c, "images", "-a") dockerCmd(c, "tag", "busybox-one", "busybox-one:tag1") dockerCmd(c, "tag", "busybox-one", "busybox-one:tag2") imagesAfter, _ := dockerCmd(c, "images", "-a") // tag busybox to create 2 more images with same imageID c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+2, check.Commentf("docker images shows: %q\n", imagesAfter)) imgID, err := inspectField("busybox-one:tag1", "Id") c.Assert(err, checker.IsNil) // run a container with the image out, _ = dockerCmd(c, "run", "-d", "busybox-one", "top") containerID = strings.TrimSpace(out) // first checkout without force it fails out, _, err = dockerCmdWithError("rmi", imgID) expected := fmt.Sprintf("conflict: unable to delete %s (cannot be forced) - image is being used by running container %s", stringid.TruncateID(imgID), stringid.TruncateID(containerID)) // rmi tagged in multiple repos should have failed without force c.Assert(err, checker.NotNil) c.Assert(out, checker.Contains, expected) dockerCmd(c, "stop", containerID) dockerCmd(c, "rmi", "-f", imgID) imagesAfter, _ = dockerCmd(c, "images", "-a") // rmi -f failed, image still exists c.Assert(imagesAfter, checker.Not(checker.Contains), imgID[:12], check.Commentf("ImageID:%q; ImagesAfter: %q", imgID, imagesAfter)) } func (s *DockerSuite) TestRmiImgIDForce(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir '/busybox-test'") containerID := strings.TrimSpace(out) dockerCmd(c, "commit", containerID, "busybox-test") imagesBefore, _ := dockerCmd(c, "images", "-a") dockerCmd(c, "tag", "busybox-test", "utest:tag1") dockerCmd(c, "tag", "busybox-test", "utest:tag2") dockerCmd(c, "tag", "busybox-test", "utest/docker:tag3") dockerCmd(c, "tag", "busybox-test", "utest:5000/docker:tag4") { imagesAfter, _ := dockerCmd(c, "images", "-a") c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+4, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) } imgID, err := inspectField("busybox-test", "Id") c.Assert(err, checker.IsNil) // first checkout without force it fails out, _, err = dockerCmdWithError("rmi", imgID) // rmi tagged in multiple repos should have failed without force c.Assert(err, checker.NotNil) // rmi tagged in multiple repos should have failed without force c.Assert(out, checker.Contains, "(must be forced) - image is referenced in one or more repositories", check.Commentf("out: %s; err: %v;", out, err)) dockerCmd(c, "rmi", "-f", imgID) { imagesAfter, _ := dockerCmd(c, "images", "-a") // rmi failed, image still exists c.Assert(imagesAfter, checker.Not(checker.Contains), imgID[:12]) } } // See https://github.com/docker/docker/issues/14116 func (s *DockerSuite) TestRmiImageIDForceWithRunningContainersAndMultipleTags(c *check.C) { testRequires(c, DaemonIsLinux) dockerfile := "FROM busybox\nRUN echo test 14116\n" imgID, err := buildImage("test-14116", dockerfile, false) c.Assert(err, checker.IsNil) newTag := "newtag" dockerCmd(c, "tag", imgID, newTag) dockerCmd(c, "run", "-d", imgID, "top") out, _, err := dockerCmdWithError("rmi", "-f", imgID) // rmi -f should not delete image with running containers c.Assert(err, checker.NotNil) c.Assert(out, checker.Contains, "(cannot be forced) - image is being used by running container") } func (s *DockerSuite) TestRmiTagWithExistingContainers(c *check.C) { testRequires(c, DaemonIsLinux) container := "test-delete-tag" newtag := "busybox:newtag" bb := "busybox:latest" dockerCmd(c, "tag", bb, newtag) dockerCmd(c, "run", "--name", container, bb, "/bin/true") out, _ := dockerCmd(c, "rmi", newtag) c.Assert(strings.Count(out, "Untagged: "), checker.Equals, 1) } func (s *DockerSuite) TestRmiForceWithExistingContainers(c *check.C) { testRequires(c, DaemonIsLinux) image := "busybox-clone" cmd := exec.Command(dockerBinary, "build", "--no-cache", "-t", image, "-") cmd.Stdin = strings.NewReader(`FROM busybox MAINTAINER foo`) out, _, err := runCommandWithOutput(cmd) c.Assert(err, checker.IsNil, check.Commentf("Could not build %s: %s", image, out)) dockerCmd(c, "run", "--name", "test-force-rmi", image, "/bin/true") dockerCmd(c, "rmi", "-f", image) } func (s *DockerSuite) TestRmiWithMultipleRepositories(c *check.C) { testRequires(c, DaemonIsLinux) newRepo := "127.0.0.1:5000/busybox" oldRepo := "busybox" newTag := "busybox:test" dockerCmd(c, "tag", oldRepo, newRepo) dockerCmd(c, "run", "--name", "test", oldRepo, "touch", "/home/abcd") dockerCmd(c, "commit", "test", newTag) out, _ := dockerCmd(c, "rmi", newTag) c.Assert(out, checker.Contains, "Untagged: "+newTag) } func (s *DockerSuite) TestRmiForceWithMultipleRepositories(c *check.C) { testRequires(c, DaemonIsLinux) imageName := "rmiimage" tag1 := imageName + ":tag1" tag2 := imageName + ":tag2" _, err := buildImage(tag1, `FROM scratch MAINTAINER "docker"`, true) if err != nil { c.Fatal(err) } dockerCmd(c, "tag", tag1, tag2) out, _ := dockerCmd(c, "rmi", "-f", tag2) c.Assert(out, checker.Contains, "Untagged: "+tag2) c.Assert(out, checker.Not(checker.Contains), "Untagged: "+tag1) // Check built image still exists images, _ := dockerCmd(c, "images", "-a") c.Assert(images, checker.Contains, imageName, check.Commentf("Built image missing %q; Images: %q", imageName, images)) } func (s *DockerSuite) TestRmiBlank(c *check.C) { testRequires(c, DaemonIsLinux) // try to delete a blank image name out, _, err := dockerCmdWithError("rmi", "") // Should have failed to delete '' image c.Assert(err, checker.NotNil) // Wrong error message generated c.Assert(out, checker.Not(checker.Contains), "no such id", check.Commentf("out: %s", out)) // Expected error message not generated c.Assert(out, checker.Contains, "image name cannot be blank", check.Commentf("out: %s", out)) out, _, err = dockerCmdWithError("rmi", " ") // Should have failed to delete ' ' image c.Assert(err, checker.NotNil) // Expected error message not generated c.Assert(out, checker.Contains, "image name cannot be blank", check.Commentf("out: %s", out)) } func (s *DockerSuite) TestRmiContainerImageNotFound(c *check.C) { testRequires(c, DaemonIsLinux) // Build 2 images for testing. imageNames := []string{"test1", "test2"} imageIds := make([]string, 2) for i, name := range imageNames { dockerfile := fmt.Sprintf("FROM busybox\nMAINTAINER %s\nRUN echo %s\n", name, name) id, err := buildImage(name, dockerfile, false) c.Assert(err, checker.IsNil) imageIds[i] = id } // Create a long-running container. dockerCmd(c, "run", "-d", imageNames[0], "top") // Create a stopped container, and then force remove its image. dockerCmd(c, "run", imageNames[1], "true") dockerCmd(c, "rmi", "-f", imageIds[1]) // Try to remove the image of the running container and see if it fails as expected. out, _, err := dockerCmdWithError("rmi", "-f", imageIds[0]) // The image of the running container should not be removed. c.Assert(err, checker.NotNil) c.Assert(out, checker.Contains, "image is being used by running container", check.Commentf("out: %s", out)) } // #13422 func (s *DockerSuite) TestRmiUntagHistoryLayer(c *check.C) { testRequires(c, DaemonIsLinux) image := "tmp1" // Build a image for testing. dockerfile := `FROM busybox MAINTAINER foo RUN echo 0 #layer0 RUN echo 1 #layer1 RUN echo 2 #layer2 ` _, err := buildImage(image, dockerfile, false) c.Assert(err, checker.IsNil) out, _ := dockerCmd(c, "history", "-q", image) ids := strings.Split(out, "\n") idToTag := ids[2] // Tag layer0 to "tmp2". newTag := "tmp2" dockerCmd(c, "tag", idToTag, newTag) // Create a container based on "tmp1". dockerCmd(c, "run", "-d", image, "true") // See if the "tmp2" can be untagged. out, _ = dockerCmd(c, "rmi", newTag) // Expected 1 untagged entry c.Assert(strings.Count(out, "Untagged: "), checker.Equals, 1, check.Commentf("out: %s", out)) // Now let's add the tag again and create a container based on it. dockerCmd(c, "tag", idToTag, newTag) out, _ = dockerCmd(c, "run", "-d", newTag, "true") cid := strings.TrimSpace(out) // At this point we have 2 containers, one based on layer2 and another based on layer0. // Try to untag "tmp2" without the -f flag. out, _, err = dockerCmdWithError("rmi", newTag) // should not be untagged without the -f flag c.Assert(err, checker.NotNil) c.Assert(out, checker.Contains, cid[:12]) c.Assert(out, checker.Contains, "(must force)") // Add the -f flag and test again. out, _ = dockerCmd(c, "rmi", "-f", newTag) // should be allowed to untag with the -f flag c.Assert(out, checker.Contains, fmt.Sprintf("Untagged: %s:latest", newTag)) } func (*DockerSuite) TestRmiParentImageFail(c *check.C) { testRequires(c, DaemonIsLinux) parent, err := inspectField("busybox", "Parent") c.Assert(err, check.IsNil) out, _, err := dockerCmdWithError("rmi", parent) c.Assert(err, check.NotNil) if !strings.Contains(out, "image has dependent child images") { c.Fatalf("rmi should have failed because it's a parent image, got %s", out) } } func (s *DockerSuite) TestRmiWithParentInUse(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "create", "busybox") cID := strings.TrimSpace(out) out, _ = dockerCmd(c, "commit", cID) imageID := strings.TrimSpace(out) out, _ = dockerCmd(c, "create", imageID) cID = strings.TrimSpace(out) out, _ = dockerCmd(c, "commit", cID) imageID = strings.TrimSpace(out) dockerCmd(c, "rmi", imageID) } // #18873 func (s *DockerSuite) TestRmiByIDHardConflict(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "create", "busybox") imgID, err := inspectField("busybox:latest", "Id") c.Assert(err, checker.IsNil) _, _, err = dockerCmdWithError("rmi", imgID[:12]) c.Assert(err, checker.NotNil) // check that tag was not removed imgID2, err := inspectField("busybox:latest", "Id") c.Assert(err, checker.IsNil) c.Assert(imgID, checker.Equals, imgID2) } docker-1.10.3/integration-cli/docker_cli_run_test.go000066400000000000000000004524121267010174400225150ustar00rootroot00000000000000package main import ( "bufio" "bytes" "fmt" "io/ioutil" "net" "os" "os/exec" "path" "path/filepath" "reflect" "regexp" "sort" "strconv" "strings" "sync" "time" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/nat" "github.com/docker/libnetwork/netutils" "github.com/docker/libnetwork/resolvconf" "github.com/go-check/check" ) // "test123" should be printed by docker run func (s *DockerSuite) TestRunEchoStdout(c *check.C) { out, _ := dockerCmd(c, "run", "busybox", "echo", "test123") if out != "test123\n" { c.Fatalf("container should've printed 'test123', got '%s'", out) } } // "test" should be printed func (s *DockerSuite) TestRunEchoNamedContainer(c *check.C) { out, _ := dockerCmd(c, "run", "--name", "testfoonamedcontainer", "busybox", "echo", "test") if out != "test\n" { c.Errorf("container should've printed 'test'") } } // docker run should not leak file descriptors. This test relies on Unix // specific functionality and cannot run on Windows. func (s *DockerSuite) TestRunLeakyFileDescriptors(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "busybox", "ls", "-C", "/proc/self/fd") // normally, we should only get 0, 1, and 2, but 3 gets created by "ls" when it does "opendir" on the "fd" directory if out != "0 1 2 3\n" { c.Errorf("container should've printed '0 1 2 3', not: %s", out) } } // it should be possible to lookup Google DNS // this will fail when Internet access is unavailable func (s *DockerSuite) TestRunLookupGoogleDns(c *check.C) { testRequires(c, Network) image := DefaultImage if daemonPlatform == "windows" { // nslookup isn't present in Windows busybox. Is built-in. image = WindowsBaseImage } dockerCmd(c, "run", image, "nslookup", "google.com") } // the exit code should be 0 func (s *DockerSuite) TestRunExitCodeZero(c *check.C) { dockerCmd(c, "run", "busybox", "true") } // the exit code should be 1 func (s *DockerSuite) TestRunExitCodeOne(c *check.C) { _, exitCode, err := dockerCmdWithError("run", "busybox", "false") if err != nil && !strings.Contains("exit status 1", fmt.Sprintf("%s", err)) { c.Fatal(err) } if exitCode != 1 { c.Errorf("container should've exited with exit code 1. Got %d", exitCode) } } // it should be possible to pipe in data via stdin to a process running in a container func (s *DockerSuite) TestRunStdinPipe(c *check.C) { // TODO Windows: This needs some work to make compatible. testRequires(c, DaemonIsLinux) runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "cat") runCmd.Stdin = strings.NewReader("blahblah") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { c.Fatalf("failed to run container: %v, output: %q", err, out) } out = strings.TrimSpace(out) dockerCmd(c, "wait", out) logsOut, _ := dockerCmd(c, "logs", out) containerLogs := strings.TrimSpace(logsOut) if containerLogs != "blahblah" { c.Errorf("logs didn't print the container's logs %s", containerLogs) } dockerCmd(c, "rm", out) } // the container's ID should be printed when starting a container in detached mode func (s *DockerSuite) TestRunDetachedContainerIDPrinting(c *check.C) { out, _ := dockerCmd(c, "run", "-d", "busybox", "true") out = strings.TrimSpace(out) dockerCmd(c, "wait", out) rmOut, _ := dockerCmd(c, "rm", out) rmOut = strings.TrimSpace(rmOut) if rmOut != out { c.Errorf("rm didn't print the container ID %s %s", out, rmOut) } } // the working directory should be set correctly func (s *DockerSuite) TestRunWorkingDirectory(c *check.C) { // TODO Windows: There's a Windows bug stopping this from working. testRequires(c, DaemonIsLinux) dir := "/root" image := "busybox" if daemonPlatform == "windows" { dir = `/windows` image = WindowsBaseImage } // First with -w out, _ := dockerCmd(c, "run", "-w", dir, image, "pwd") out = strings.TrimSpace(out) if out != dir { c.Errorf("-w failed to set working directory") } // Then with --workdir out, _ = dockerCmd(c, "run", "--workdir", dir, image, "pwd") out = strings.TrimSpace(out) if out != dir { c.Errorf("--workdir failed to set working directory") } } // pinging Google's DNS resolver should fail when we disable the networking func (s *DockerSuite) TestRunWithoutNetworking(c *check.C) { count := "-c" image := "busybox" if daemonPlatform == "windows" { count = "-n" image = WindowsBaseImage } // First using the long form --net out, exitCode, err := dockerCmdWithError("run", "--net=none", image, "ping", count, "1", "8.8.8.8") if err != nil && exitCode != 1 { c.Fatal(out, err) } if exitCode != 1 { c.Errorf("--net=none should've disabled the network; the container shouldn't have been able to ping 8.8.8.8") } } //test --link use container name to link target func (s *DockerSuite) TestRunLinksContainerWithContainerName(c *check.C) { // TODO Windows: This test cannot run on a Windows daemon as the networking // settings are not populated back yet on inspect. testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-i", "-t", "-d", "--name", "parent", "busybox") ip, err := inspectField("parent", "NetworkSettings.Networks.bridge.IPAddress") c.Assert(err, check.IsNil) out, _ := dockerCmd(c, "run", "--link", "parent:test", "busybox", "/bin/cat", "/etc/hosts") if !strings.Contains(out, ip+" test") { c.Fatalf("use a container name to link target failed") } } //test --link use container id to link target func (s *DockerSuite) TestRunLinksContainerWithContainerId(c *check.C) { // TODO Windows: This test cannot run on a Windows daemon as the networking // settings are not populated back yet on inspect. testRequires(c, DaemonIsLinux) cID, _ := dockerCmd(c, "run", "-i", "-t", "-d", "busybox") cID = strings.TrimSpace(cID) ip, err := inspectField(cID, "NetworkSettings.Networks.bridge.IPAddress") c.Assert(err, check.IsNil) out, _ := dockerCmd(c, "run", "--link", cID+":test", "busybox", "/bin/cat", "/etc/hosts") if !strings.Contains(out, ip+" test") { c.Fatalf("use a container id to link target failed") } } func (s *DockerSuite) TestUserDefinedNetworkLinks(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) dockerCmd(c, "network", "create", "-d", "bridge", "udlinkNet") dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=first", "busybox", "top") c.Assert(waitRun("first"), check.IsNil) // run a container in user-defined network udlinkNet with a link for an existing container // and a link for a container that doesnt exist dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=second", "--link=first:foo", "--link=third:bar", "busybox", "top") c.Assert(waitRun("second"), check.IsNil) // ping to first and its alias foo must succeed _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") c.Assert(err, check.IsNil) _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") c.Assert(err, check.IsNil) // ping to third and its alias must fail _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "third") c.Assert(err, check.NotNil) _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") c.Assert(err, check.NotNil) // start third container now dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=third", "busybox", "top") c.Assert(waitRun("third"), check.IsNil) // ping to third and its alias must succeed now _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "third") c.Assert(err, check.IsNil) _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") c.Assert(err, check.IsNil) } func (s *DockerSuite) TestUserDefinedNetworkLinksWithRestart(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) dockerCmd(c, "network", "create", "-d", "bridge", "udlinkNet") dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=first", "busybox", "top") c.Assert(waitRun("first"), check.IsNil) dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=second", "--link=first:foo", "busybox", "top") c.Assert(waitRun("second"), check.IsNil) // ping to first and its alias foo must succeed _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") c.Assert(err, check.IsNil) _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") c.Assert(err, check.IsNil) // Restart first container dockerCmd(c, "restart", "first") c.Assert(waitRun("first"), check.IsNil) // ping to first and its alias foo must still succeed _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") c.Assert(err, check.IsNil) _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") c.Assert(err, check.IsNil) // Restart second container dockerCmd(c, "restart", "second") c.Assert(waitRun("second"), check.IsNil) // ping to first and its alias foo must still succeed _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") c.Assert(err, check.IsNil) _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") c.Assert(err, check.IsNil) } func (s *DockerSuite) TestUserDefinedNetworkAlias(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) dockerCmd(c, "network", "create", "-d", "bridge", "net1") dockerCmd(c, "run", "-d", "--net=net1", "--name=first", "--net-alias=foo1", "--net-alias=foo2", "busybox", "top") c.Assert(waitRun("first"), check.IsNil) dockerCmd(c, "run", "-d", "--net=net1", "--name=second", "busybox", "top") c.Assert(waitRun("second"), check.IsNil) // ping to first and its network-scoped aliases _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") c.Assert(err, check.IsNil) _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo1") c.Assert(err, check.IsNil) _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo2") c.Assert(err, check.IsNil) // Restart first container dockerCmd(c, "restart", "first") c.Assert(waitRun("first"), check.IsNil) // ping to first and its network-scoped aliases must succeed _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") c.Assert(err, check.IsNil) _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo1") c.Assert(err, check.IsNil) _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo2") c.Assert(err, check.IsNil) } // Issue 9677. func (s *DockerSuite) TestRunWithDaemonFlags(c *check.C) { out, _, err := dockerCmdWithError("--exec-opt", "foo=bar", "run", "-i", "busybox", "true") if err != nil { if !strings.Contains(out, "flag provided but not defined: --exec-opt") { // no daemon (client-only) c.Fatal(err, out) } } } // Regression test for #4979 func (s *DockerSuite) TestRunWithVolumesFromExited(c *check.C) { var ( out string exitCode int ) // Create a file in a volume if daemonPlatform == "windows" { out, exitCode = dockerCmd(c, "run", "--name", "test-data", "--volume", `c:\some\dir`, WindowsBaseImage, `cmd /c echo hello > c:\some\dir\file`) } else { out, exitCode = dockerCmd(c, "run", "--name", "test-data", "--volume", "/some/dir", "busybox", "touch", "/some/dir/file") } if exitCode != 0 { c.Fatal("1", out, exitCode) } // Read the file from another container using --volumes-from to access the volume in the second container if daemonPlatform == "windows" { out, exitCode = dockerCmd(c, "run", "--volumes-from", "test-data", WindowsBaseImage, `cmd /c type c:\some\dir\file`) } else { out, exitCode = dockerCmd(c, "run", "--volumes-from", "test-data", "busybox", "cat", "/some/dir/file") } if exitCode != 0 { c.Fatal("2", out, exitCode) } } // Volume path is a symlink which also exists on the host, and the host side is a file not a dir // But the volume call is just a normal volume, not a bind mount func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir(c *check.C) { var ( dockerFile string containerPath string cmd string ) testRequires(c, SameHostDaemon) name := "test-volume-symlink" dir, err := ioutil.TempDir("", name) if err != nil { c.Fatal(err) } defer os.RemoveAll(dir) f, err := os.OpenFile(filepath.Join(dir, "test"), os.O_CREATE, 0700) if err != nil { c.Fatal(err) } f.Close() if daemonPlatform == "windows" { dockerFile = fmt.Sprintf("FROM %s\nRUN mkdir %s\nRUN mklink /D c:\\test %s", WindowsBaseImage, dir, dir) containerPath = `c:\test\test` cmd = "tasklist" } else { dockerFile = fmt.Sprintf("FROM busybox\nRUN mkdir -p %s\nRUN ln -s %s /test", dir, dir) containerPath = "/test/test" cmd = "true" } if _, err := buildImage(name, dockerFile, false); err != nil { c.Fatal(err) } dockerCmd(c, "run", "-v", containerPath, name, cmd) } func (s *DockerSuite) TestRunVolumesMountedAsReadonly(c *check.C) { // TODO Windows (Post TP4): This test cannot run on a Windows daemon as // Windows does not support read-only bind mounts. testRequires(c, DaemonIsLinux) if _, code, err := dockerCmdWithError("run", "-v", "/test:/test:ro", "busybox", "touch", "/test/somefile"); err == nil || code == 0 { c.Fatalf("run should fail because volume is ro: exit code %d", code) } } func (s *DockerSuite) TestRunVolumesFromInReadonlyModeFails(c *check.C) { // TODO Windows (Post TP4): This test cannot run on a Windows daemon as // Windows does not support read-only bind mounts. Modified for when ro is supported. testRequires(c, DaemonIsLinux) var ( volumeDir string fileInVol string ) if daemonPlatform == "windows" { volumeDir = `c:/test` // Forward-slash as using busybox fileInVol = `c:/test/file` } else { testRequires(c, DaemonIsLinux) volumeDir = "/test" fileInVol = `/test/file` } dockerCmd(c, "run", "--name", "parent", "-v", volumeDir, "busybox", "true") if _, code, err := dockerCmdWithError("run", "--volumes-from", "parent:ro", "busybox", "touch", fileInVol); err == nil || code == 0 { c.Fatalf("run should fail because volume is ro: exit code %d", code) } } // Regression test for #1201 func (s *DockerSuite) TestRunVolumesFromInReadWriteMode(c *check.C) { var ( volumeDir string fileInVol string ) if daemonPlatform == "windows" { volumeDir = `c:/test` // Forward-slash as using busybox fileInVol = `c:/test/file` } else { testRequires(c, DaemonIsLinux) volumeDir = "/test" fileInVol = "/test/file" } dockerCmd(c, "run", "--name", "parent", "-v", volumeDir, "busybox", "true") dockerCmd(c, "run", "--volumes-from", "parent:rw", "busybox", "touch", fileInVol) if out, _, err := dockerCmdWithError("run", "--volumes-from", "parent:bar", "busybox", "touch", fileInVol); err == nil || !strings.Contains(out, `invalid mode: "bar"`) { c.Fatalf("running --volumes-from parent:bar should have failed with invalid mode: %q", out) } dockerCmd(c, "run", "--volumes-from", "parent", "busybox", "touch", fileInVol) } func (s *DockerSuite) TestVolumesFromGetsProperMode(c *check.C) { // TODO Windows: This test cannot yet run on a Windows daemon as Windows does // not support read-only bind mounts as at TP4 testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "--name", "parent", "-v", "/test:/test:ro", "busybox", "true") // Expect this "rw" mode to be be ignored since the inherited volume is "ro" if _, _, err := dockerCmdWithError("run", "--volumes-from", "parent:rw", "busybox", "touch", "/test/file"); err == nil { c.Fatal("Expected volumes-from to inherit read-only volume even when passing in `rw`") } dockerCmd(c, "run", "--name", "parent2", "-v", "/test:/test:ro", "busybox", "true") // Expect this to be read-only since both are "ro" if _, _, err := dockerCmdWithError("run", "--volumes-from", "parent2:ro", "busybox", "touch", "/test/file"); err == nil { c.Fatal("Expected volumes-from to inherit read-only volume even when passing in `ro`") } } // Test for GH#10618 func (s *DockerSuite) TestRunNoDupVolumes(c *check.C) { path1 := randomTmpDirPath("test1", daemonPlatform) path2 := randomTmpDirPath("test2", daemonPlatform) someplace := ":/someplace" if daemonPlatform == "windows" { // Windows requires that the source directory exists before calling HCS testRequires(c, SameHostDaemon) someplace = `:c:\someplace` if err := os.MkdirAll(path1, 0755); err != nil { c.Fatalf("Failed to create %s: %q", path1, err) } defer os.RemoveAll(path1) if err := os.MkdirAll(path2, 0755); err != nil { c.Fatalf("Failed to create %s: %q", path1, err) } defer os.RemoveAll(path2) } mountstr1 := path1 + someplace mountstr2 := path2 + someplace if out, _, err := dockerCmdWithError("run", "-v", mountstr1, "-v", mountstr2, "busybox", "true"); err == nil { c.Fatal("Expected error about duplicate mount definitions") } else { if !strings.Contains(out, "Duplicate mount point") { c.Fatalf("Expected 'duplicate mount point' error, got %v", out) } } } // Test for #1351 func (s *DockerSuite) TestRunApplyVolumesFromBeforeVolumes(c *check.C) { prefix := "" if daemonPlatform == "windows" { prefix = `c:` } dockerCmd(c, "run", "--name", "parent", "-v", prefix+"/test", "busybox", "touch", prefix+"/test/foo") dockerCmd(c, "run", "--volumes-from", "parent", "-v", prefix+"/test", "busybox", "cat", prefix+"/test/foo") } func (s *DockerSuite) TestRunMultipleVolumesFrom(c *check.C) { prefix := "" if daemonPlatform == "windows" { prefix = `c:` } dockerCmd(c, "run", "--name", "parent1", "-v", prefix+"/test", "busybox", "touch", prefix+"/test/foo") dockerCmd(c, "run", "--name", "parent2", "-v", prefix+"/other", "busybox", "touch", prefix+"/other/bar") dockerCmd(c, "run", "--volumes-from", "parent1", "--volumes-from", "parent2", "busybox", "sh", "-c", "cat /test/foo && cat /other/bar") } // this tests verifies the ID format for the container func (s *DockerSuite) TestRunVerifyContainerID(c *check.C) { out, exit, err := dockerCmdWithError("run", "-d", "busybox", "true") if err != nil { c.Fatal(err) } if exit != 0 { c.Fatalf("expected exit code 0 received %d", exit) } match, err := regexp.MatchString("^[0-9a-f]{64}$", strings.TrimSuffix(out, "\n")) if err != nil { c.Fatal(err) } if !match { c.Fatalf("Invalid container ID: %s", out) } } // Test that creating a container with a volume doesn't crash. Regression test for #995. func (s *DockerSuite) TestRunCreateVolume(c *check.C) { prefix := "" if daemonPlatform == "windows" { prefix = `c:` } dockerCmd(c, "run", "-v", prefix+"/var/lib/data", "busybox", "true") } // Test that creating a volume with a symlink in its path works correctly. Test for #5152. // Note that this bug happens only with symlinks with a target that starts with '/'. func (s *DockerSuite) TestRunCreateVolumeWithSymlink(c *check.C) { // Cannot run on Windows as relies on Linux-specific functionality (sh -c mount...) testRequires(c, DaemonIsLinux) image := "docker-test-createvolumewithsymlink" buildCmd := exec.Command(dockerBinary, "build", "-t", image, "-") buildCmd.Stdin = strings.NewReader(`FROM busybox RUN ln -s home /bar`) buildCmd.Dir = workingDirectory err := buildCmd.Run() if err != nil { c.Fatalf("could not build '%s': %v", image, err) } _, exitCode, err := dockerCmdWithError("run", "-v", "/bar/foo", "--name", "test-createvolumewithsymlink", image, "sh", "-c", "mount | grep -q /home/foo") if err != nil || exitCode != 0 { c.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) } volPath, err := inspectMountSourceField("test-createvolumewithsymlink", "/bar/foo") if err != nil { c.Fatalf("[inspect] err: %v", err) } _, exitCode, err = dockerCmdWithError("rm", "-v", "test-createvolumewithsymlink") if err != nil || exitCode != 0 { c.Fatalf("[rm] err: %v, exitcode: %d", err, exitCode) } _, err = os.Stat(volPath) if !os.IsNotExist(err) { c.Fatalf("[open] (expecting 'file does not exist' error) err: %v, volPath: %s", err, volPath) } } // Tests that a volume path that has a symlink exists in a container mounting it with `--volumes-from`. func (s *DockerSuite) TestRunVolumesFromSymlinkPath(c *check.C) { name := "docker-test-volumesfromsymlinkpath" prefix := "" dfContents := `FROM busybox RUN ln -s home /foo VOLUME ["/foo/bar"]` if daemonPlatform == "windows" { prefix = `c:` dfContents = `FROM ` + WindowsBaseImage + ` RUN mkdir c:\home RUN mklink /D c:\foo c:\home VOLUME ["c:/foo/bar"] ENTRYPOINT c:\windows\system32\cmd.exe` } buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-") buildCmd.Stdin = strings.NewReader(dfContents) buildCmd.Dir = workingDirectory err := buildCmd.Run() if err != nil { c.Fatalf("could not build 'docker-test-volumesfromsymlinkpath': %v", err) } out, exitCode, err := dockerCmdWithError("run", "--name", "test-volumesfromsymlinkpath", name) if err != nil || exitCode != 0 { c.Fatalf("[run] (volume) err: %v, exitcode: %d, out: %s", err, exitCode, out) } _, exitCode, err = dockerCmdWithError("run", "--volumes-from", "test-volumesfromsymlinkpath", "busybox", "sh", "-c", "ls "+prefix+"/foo | grep -q bar") if err != nil || exitCode != 0 { c.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) } } func (s *DockerSuite) TestRunExitCode(c *check.C) { var ( exit int err error ) _, exit, err = dockerCmdWithError("run", "busybox", "/bin/sh", "-c", "exit 72") if err == nil { c.Fatal("should not have a non nil error") } if exit != 72 { c.Fatalf("expected exit code 72 received %d", exit) } } func (s *DockerSuite) TestRunUserDefaults(c *check.C) { expected := "uid=0(root) gid=0(root)" if daemonPlatform == "windows" { expected = "uid=1000(SYSTEM) gid=1000(SYSTEM)" } out, _ := dockerCmd(c, "run", "busybox", "id") if !strings.Contains(out, expected) { c.Fatalf("expected '%s' got %s", expected, out) } } func (s *DockerSuite) TestRunUserByName(c *check.C) { // TODO Windows: This test cannot run on a Windows daemon as Windows does // not support the use of -u testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-u", "root", "busybox", "id") if !strings.Contains(out, "uid=0(root) gid=0(root)") { c.Fatalf("expected root user got %s", out) } } func (s *DockerSuite) TestRunUserByID(c *check.C) { // TODO Windows: This test cannot run on a Windows daemon as Windows does // not support the use of -u testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-u", "1", "busybox", "id") if !strings.Contains(out, "uid=1(daemon) gid=1(daemon)") { c.Fatalf("expected daemon user got %s", out) } } func (s *DockerSuite) TestRunUserByIDBig(c *check.C) { // TODO Windows: This test cannot run on a Windows daemon as Windows does // not support the use of -u testRequires(c, DaemonIsLinux) out, _, err := dockerCmdWithError("run", "-u", "2147483648", "busybox", "id") if err == nil { c.Fatal("No error, but must be.", out) } if !strings.Contains(out, "Uids and gids must be in range") { c.Fatalf("expected error about uids range, got %s", out) } } func (s *DockerSuite) TestRunUserByIDNegative(c *check.C) { // TODO Windows: This test cannot run on a Windows daemon as Windows does // not support the use of -u testRequires(c, DaemonIsLinux) out, _, err := dockerCmdWithError("run", "-u", "-1", "busybox", "id") if err == nil { c.Fatal("No error, but must be.", out) } if !strings.Contains(out, "Uids and gids must be in range") { c.Fatalf("expected error about uids range, got %s", out) } } func (s *DockerSuite) TestRunUserByIDZero(c *check.C) { // TODO Windows: This test cannot run on a Windows daemon as Windows does // not support the use of -u testRequires(c, DaemonIsLinux) out, _, err := dockerCmdWithError("run", "-u", "0", "busybox", "id") if err != nil { c.Fatal(err, out) } if !strings.Contains(out, "uid=0(root) gid=0(root) groups=10(wheel)") { c.Fatalf("expected daemon user got %s", out) } } func (s *DockerSuite) TestRunUserNotFound(c *check.C) { // TODO Windows: This test cannot run on a Windows daemon as Windows does // not support the use of -u testRequires(c, DaemonIsLinux) _, _, err := dockerCmdWithError("run", "-u", "notme", "busybox", "id") if err == nil { c.Fatal("unknown user should cause container to fail") } } func (s *DockerSuite) TestRunTwoConcurrentContainers(c *check.C) { // TODO Windows. There are two bugs in TP4 which means this test cannot // be reliably enabled. The first is a race condition where sometimes // HCS CreateComputeSystem() will fail "Invalid class string". #4985252 and // #4493430. // // The second, which is seen more readily by increasing the number of concurrent // containers to 5 or more, is that CSRSS hangs. This may fixed in the TP4 ZDP. // #4898773. testRequires(c, DaemonIsLinux) sleepTime := "2" if daemonPlatform == "windows" { sleepTime = "5" // Make more reliable on Windows } group := sync.WaitGroup{} group.Add(2) errChan := make(chan error, 2) for i := 0; i < 2; i++ { go func() { defer group.Done() _, _, err := dockerCmdWithError("run", "busybox", "sleep", sleepTime) errChan <- err }() } group.Wait() close(errChan) for err := range errChan { c.Assert(err, check.IsNil) } } func (s *DockerSuite) TestRunEnvironment(c *check.C) { // TODO Windows: Environment handling is different between Linux and // Windows and this test relies currently on unix functionality. testRequires(c, DaemonIsLinux) cmd := exec.Command(dockerBinary, "run", "-h", "testing", "-e=FALSE=true", "-e=TRUE", "-e=TRICKY", "-e=HOME=", "busybox", "env") cmd.Env = append(os.Environ(), "TRUE=false", "TRICKY=tri\ncky\n", ) out, _, err := runCommandWithOutput(cmd) if err != nil { c.Fatal(err, out) } actualEnv := strings.Split(strings.TrimSpace(out), "\n") sort.Strings(actualEnv) goodEnv := []string{ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "HOSTNAME=testing", "FALSE=true", "TRUE=false", "TRICKY=tri", "cky", "", "HOME=/root", } sort.Strings(goodEnv) if len(goodEnv) != len(actualEnv) { c.Fatalf("Wrong environment: should be %d variables, not: %q\n", len(goodEnv), strings.Join(actualEnv, ", ")) } for i := range goodEnv { if actualEnv[i] != goodEnv[i] { c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) } } } func (s *DockerSuite) TestRunEnvironmentErase(c *check.C) { // TODO Windows: Environment handling is different between Linux and // Windows and this test relies currently on unix functionality. testRequires(c, DaemonIsLinux) // Test to make sure that when we use -e on env vars that are // not set in our local env that they're removed (if present) in // the container cmd := exec.Command(dockerBinary, "run", "-e", "FOO", "-e", "HOSTNAME", "busybox", "env") cmd.Env = appendBaseEnv(true) out, _, err := runCommandWithOutput(cmd) if err != nil { c.Fatal(err, out) } actualEnv := strings.Split(strings.TrimSpace(out), "\n") sort.Strings(actualEnv) goodEnv := []string{ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "HOME=/root", } sort.Strings(goodEnv) if len(goodEnv) != len(actualEnv) { c.Fatalf("Wrong environment: should be %d variables, not: %q\n", len(goodEnv), strings.Join(actualEnv, ", ")) } for i := range goodEnv { if actualEnv[i] != goodEnv[i] { c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) } } } func (s *DockerSuite) TestRunEnvironmentOverride(c *check.C) { // TODO Windows: Environment handling is different between Linux and // Windows and this test relies currently on unix functionality. testRequires(c, DaemonIsLinux) // Test to make sure that when we use -e on env vars that are // already in the env that we're overriding them cmd := exec.Command(dockerBinary, "run", "-e", "HOSTNAME", "-e", "HOME=/root2", "busybox", "env") cmd.Env = appendBaseEnv(true, "HOSTNAME=bar") out, _, err := runCommandWithOutput(cmd) if err != nil { c.Fatal(err, out) } actualEnv := strings.Split(strings.TrimSpace(out), "\n") sort.Strings(actualEnv) goodEnv := []string{ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "HOME=/root2", "HOSTNAME=bar", } sort.Strings(goodEnv) if len(goodEnv) != len(actualEnv) { c.Fatalf("Wrong environment: should be %d variables, not: %q\n", len(goodEnv), strings.Join(actualEnv, ", ")) } for i := range goodEnv { if actualEnv[i] != goodEnv[i] { c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) } } } func (s *DockerSuite) TestRunContainerNetwork(c *check.C) { if daemonPlatform == "windows" { // Windows busybox does not have ping. Use built in ping instead. dockerCmd(c, "run", WindowsBaseImage, "ping", "-n", "1", "127.0.0.1") } else { dockerCmd(c, "run", "busybox", "ping", "-c", "1", "127.0.0.1") } } func (s *DockerSuite) TestRunNetHostNotAllowedWithLinks(c *check.C) { // TODO Windows: This is Linux specific as --link is not supported and // this will be deprecated in favor of container networking model. testRequires(c, DaemonIsLinux, NotUserNamespace) dockerCmd(c, "run", "--name", "linked", "busybox", "true") _, _, err := dockerCmdWithError("run", "--net=host", "--link", "linked:linked", "busybox", "true") if err == nil { c.Fatal("Expected error") } } // #7851 hostname outside container shows FQDN, inside only shortname // For testing purposes it is not required to set host's hostname directly // and use "--net=host" (as the original issue submitter did), as the same // codepath is executed with "docker run -h ". Both were manually // tested, but this testcase takes the simpler path of using "run -h .." func (s *DockerSuite) TestRunFullHostnameSet(c *check.C) { // TODO Windows: -h is not yet functional. testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-h", "foo.bar.baz", "busybox", "hostname") if actual := strings.Trim(out, "\r\n"); actual != "foo.bar.baz" { c.Fatalf("expected hostname 'foo.bar.baz', received %s", actual) } } func (s *DockerSuite) TestRunPrivilegedCanMknod(c *check.C) { // Not applicable for Windows as Windows daemon does not support // the concept of --privileged, and mknod is a Unix concept. testRequires(c, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") if actual := strings.Trim(out, "\r\n"); actual != "ok" { c.Fatalf("expected output ok received %s", actual) } } func (s *DockerSuite) TestRunUnprivilegedCanMknod(c *check.C) { // Not applicable for Windows as Windows daemon does not support // the concept of --privileged, and mknod is a Unix concept. testRequires(c, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") if actual := strings.Trim(out, "\r\n"); actual != "ok" { c.Fatalf("expected output ok received %s", actual) } } func (s *DockerSuite) TestRunCapDropInvalid(c *check.C) { // Not applicable for Windows as there is no concept of --cap-drop testRequires(c, DaemonIsLinux) out, _, err := dockerCmdWithError("run", "--cap-drop=CHPASS", "busybox", "ls") if err == nil { c.Fatal(err, out) } } func (s *DockerSuite) TestRunCapDropCannotMknod(c *check.C) { // Not applicable for Windows as there is no concept of --cap-drop or mknod testRequires(c, DaemonIsLinux) out, _, err := dockerCmdWithError("run", "--cap-drop=MKNOD", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") if err == nil { c.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual == "ok" { c.Fatalf("expected output not ok received %s", actual) } } func (s *DockerSuite) TestRunCapDropCannotMknodLowerCase(c *check.C) { // Not applicable for Windows as there is no concept of --cap-drop or mknod testRequires(c, DaemonIsLinux) out, _, err := dockerCmdWithError("run", "--cap-drop=mknod", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") if err == nil { c.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual == "ok" { c.Fatalf("expected output not ok received %s", actual) } } func (s *DockerSuite) TestRunCapDropALLCannotMknod(c *check.C) { // Not applicable for Windows as there is no concept of --cap-drop or mknod testRequires(c, DaemonIsLinux) out, _, err := dockerCmdWithError("run", "--cap-drop=ALL", "--cap-add=SETGID", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") if err == nil { c.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual == "ok" { c.Fatalf("expected output not ok received %s", actual) } } func (s *DockerSuite) TestRunCapDropALLAddMknodCanMknod(c *check.C) { // Not applicable for Windows as there is no concept of --cap-drop or mknod testRequires(c, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=MKNOD", "--cap-add=SETGID", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") if actual := strings.Trim(out, "\r\n"); actual != "ok" { c.Fatalf("expected output ok received %s", actual) } } func (s *DockerSuite) TestRunCapAddInvalid(c *check.C) { // Not applicable for Windows as there is no concept of --cap-add testRequires(c, DaemonIsLinux) out, _, err := dockerCmdWithError("run", "--cap-add=CHPASS", "busybox", "ls") if err == nil { c.Fatal(err, out) } } func (s *DockerSuite) TestRunCapAddCanDownInterface(c *check.C) { // Not applicable for Windows as there is no concept of --cap-add testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "--cap-add=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") if actual := strings.Trim(out, "\r\n"); actual != "ok" { c.Fatalf("expected output ok received %s", actual) } } func (s *DockerSuite) TestRunCapAddALLCanDownInterface(c *check.C) { // Not applicable for Windows as there is no concept of --cap-add testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "--cap-add=ALL", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") if actual := strings.Trim(out, "\r\n"); actual != "ok" { c.Fatalf("expected output ok received %s", actual) } } func (s *DockerSuite) TestRunCapAddALLDropNetAdminCanDownInterface(c *check.C) { // Not applicable for Windows as there is no concept of --cap-add testRequires(c, DaemonIsLinux) out, _, err := dockerCmdWithError("run", "--cap-add=ALL", "--cap-drop=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") if err == nil { c.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual == "ok" { c.Fatalf("expected output not ok received %s", actual) } } func (s *DockerSuite) TestRunGroupAdd(c *check.C) { // Not applicable for Windows as there is no concept of --group-add testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "--group-add=audio", "--group-add=staff", "--group-add=777", "busybox", "sh", "-c", "id") groupsList := "uid=0(root) gid=0(root) groups=10(wheel),29(audio),50(staff),777" if actual := strings.Trim(out, "\r\n"); actual != groupsList { c.Fatalf("expected output %s received %s", groupsList, actual) } } func (s *DockerSuite) TestRunPrivilegedCanMount(c *check.C) { // Not applicable for Windows as there is no concept of --privileged testRequires(c, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") if actual := strings.Trim(out, "\r\n"); actual != "ok" { c.Fatalf("expected output ok received %s", actual) } } func (s *DockerSuite) TestRunUnprivilegedCannotMount(c *check.C) { // Not applicable for Windows as there is no concept of unprivileged testRequires(c, DaemonIsLinux) out, _, err := dockerCmdWithError("run", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") if err == nil { c.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual == "ok" { c.Fatalf("expected output not ok received %s", actual) } } func (s *DockerSuite) TestRunSysNotWritableInNonPrivilegedContainers(c *check.C) { // Not applicable for Windows as there is no concept of unprivileged testRequires(c, DaemonIsLinux) if _, code, err := dockerCmdWithError("run", "busybox", "touch", "/sys/kernel/profiling"); err == nil || code == 0 { c.Fatal("sys should not be writable in a non privileged container") } } func (s *DockerSuite) TestRunSysWritableInPrivilegedContainers(c *check.C) { // Not applicable for Windows as there is no concept of unprivileged testRequires(c, DaemonIsLinux, NotUserNamespace) if _, code, err := dockerCmdWithError("run", "--privileged", "busybox", "touch", "/sys/kernel/profiling"); err != nil || code != 0 { c.Fatalf("sys should be writable in privileged container") } } func (s *DockerSuite) TestRunProcNotWritableInNonPrivilegedContainers(c *check.C) { // Not applicable for Windows as there is no concept of unprivileged testRequires(c, DaemonIsLinux) if _, code, err := dockerCmdWithError("run", "busybox", "touch", "/proc/sysrq-trigger"); err == nil || code == 0 { c.Fatal("proc should not be writable in a non privileged container") } } func (s *DockerSuite) TestRunProcWritableInPrivilegedContainers(c *check.C) { // Not applicable for Windows as there is no concept of --privileged testRequires(c, DaemonIsLinux, NotUserNamespace) if _, code := dockerCmd(c, "run", "--privileged", "busybox", "touch", "/proc/sysrq-trigger"); code != 0 { c.Fatalf("proc should be writable in privileged container") } } func (s *DockerSuite) TestRunDeviceNumbers(c *check.C) { // Not applicable on Windows as /dev/ is a Unix specific concept // TODO: NotUserNamespace could be removed here if "root" "root" is replaced w user testRequires(c, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "ls -l /dev/null") deviceLineFields := strings.Fields(out) deviceLineFields[6] = "" deviceLineFields[7] = "" deviceLineFields[8] = "" expected := []string{"crw-rw-rw-", "1", "root", "root", "1,", "3", "", "", "", "/dev/null"} if !(reflect.DeepEqual(deviceLineFields, expected)) { c.Fatalf("expected output\ncrw-rw-rw- 1 root root 1, 3 May 24 13:29 /dev/null\n received\n %s\n", out) } } func (s *DockerSuite) TestRunThatCharacterDevicesActLikeCharacterDevices(c *check.C) { // Not applicable on Windows as /dev/ is a Unix specific concept testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "dd if=/dev/zero of=/zero bs=1k count=5 2> /dev/null ; du -h /zero") if actual := strings.Trim(out, "\r\n"); actual[0] == '0' { c.Fatalf("expected a new file called /zero to be create that is greater than 0 bytes long, but du says: %s", actual) } } func (s *DockerSuite) TestRunUnprivilegedWithChroot(c *check.C) { // Not applicable on Windows as it does not support chroot testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "busybox", "chroot", "/", "true") } func (s *DockerSuite) TestRunAddingOptionalDevices(c *check.C) { // Not applicable on Windows as Windows does not support --device testRequires(c, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "--device", "/dev/zero:/dev/nulo", "busybox", "sh", "-c", "ls /dev/nulo") if actual := strings.Trim(out, "\r\n"); actual != "/dev/nulo" { c.Fatalf("expected output /dev/nulo, received %s", actual) } } func (s *DockerSuite) TestRunAddingOptionalDevicesNoSrc(c *check.C) { // Not applicable on Windows as Windows does not support --device testRequires(c, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "--device", "/dev/zero:rw", "busybox", "sh", "-c", "ls /dev/zero") if actual := strings.Trim(out, "\r\n"); actual != "/dev/zero" { c.Fatalf("expected output /dev/zero, received %s", actual) } } func (s *DockerSuite) TestRunAddingOptionalDevicesInvalidMode(c *check.C) { // Not applicable on Windows as Windows does not support --device testRequires(c, DaemonIsLinux, NotUserNamespace) _, _, err := dockerCmdWithError("run", "--device", "/dev/zero:ro", "busybox", "sh", "-c", "ls /dev/zero") if err == nil { c.Fatalf("run container with device mode ro should fail") } } func (s *DockerSuite) TestRunModeHostname(c *check.C) { // Not applicable on Windows as Windows does not support -h testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "-h=testhostname", "busybox", "cat", "/etc/hostname") if actual := strings.Trim(out, "\r\n"); actual != "testhostname" { c.Fatalf("expected 'testhostname', but says: %q", actual) } out, _ = dockerCmd(c, "run", "--net=host", "busybox", "cat", "/etc/hostname") hostname, err := os.Hostname() if err != nil { c.Fatal(err) } if actual := strings.Trim(out, "\r\n"); actual != hostname { c.Fatalf("expected %q, but says: %q", hostname, actual) } } func (s *DockerSuite) TestRunRootWorkdir(c *check.C) { out, _ := dockerCmd(c, "run", "--workdir", "/", "busybox", "pwd") expected := "/\n" if daemonPlatform == "windows" { expected = "C:" + expected } if out != expected { c.Fatalf("pwd returned %q (expected %s)", s, expected) } } func (s *DockerSuite) TestRunAllowBindMountingRoot(c *check.C) { if daemonPlatform == "windows" { // Windows busybox will fail with Permission Denied on items such as pagefile.sys dockerCmd(c, "run", "-v", `c:\:c:\host`, WindowsBaseImage, "cmd", "-c", "dir", `c:\host`) } else { dockerCmd(c, "run", "-v", "/:/host", "busybox", "ls", "/host") } } func (s *DockerSuite) TestRunDisallowBindMountingRootToRoot(c *check.C) { mount := "/:/" targetDir := "/host" if daemonPlatform == "windows" { mount = `c:\:c\` targetDir = "c:/host" // Forward slash as using busybox } out, _, err := dockerCmdWithError("run", "-v", mount, "busybox", "ls", targetDir) if err == nil { c.Fatal(out, err) } } // Verify that a container gets default DNS when only localhost resolvers exist func (s *DockerSuite) TestRunDnsDefaultOptions(c *check.C) { // Not applicable on Windows as this is testing Unix specific functionality testRequires(c, SameHostDaemon, DaemonIsLinux) // preserve original resolv.conf for restoring after test origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf") if os.IsNotExist(err) { c.Fatalf("/etc/resolv.conf does not exist") } // defer restored original conf defer func() { if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil { c.Fatal(err) } }() // test 3 cases: standard IPv4 localhost, commented out localhost, and IPv6 localhost // 2 are removed from the file at container start, and the 3rd (commented out) one is ignored by // GetNameservers(), leading to a replacement of nameservers with the default set tmpResolvConf := []byte("nameserver 127.0.0.1\n#nameserver 127.0.2.1\nnameserver ::1") if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil { c.Fatal(err) } actual, _ := dockerCmd(c, "run", "busybox", "cat", "/etc/resolv.conf") // check that the actual defaults are appended to the commented out // localhost resolver (which should be preserved) // NOTE: if we ever change the defaults from google dns, this will break expected := "#nameserver 127.0.2.1\n\nnameserver 8.8.8.8\nnameserver 8.8.4.4\n" if actual != expected { c.Fatalf("expected resolv.conf be: %q, but was: %q", expected, actual) } } func (s *DockerSuite) TestRunDnsOptions(c *check.C) { // Not applicable on Windows as Windows does not support --dns*, or // the Unix-specific functionality of resolv.conf. testRequires(c, DaemonIsLinux) out, stderr, _ := dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--dns-search=mydomain", "--dns-opt=ndots:9", "busybox", "cat", "/etc/resolv.conf") // The client will get a warning on stderr when setting DNS to a localhost address; verify this: if !strings.Contains(stderr, "Localhost DNS setting") { c.Fatalf("Expected warning on stderr about localhost resolver, but got %q", stderr) } actual := strings.Replace(strings.Trim(out, "\r\n"), "\n", " ", -1) if actual != "search mydomain nameserver 127.0.0.1 options ndots:9" { c.Fatalf("expected 'search mydomain nameserver 127.0.0.1 options ndots:9', but says: %q", actual) } out, stderr, _ = dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--dns-search=.", "--dns-opt=ndots:3", "busybox", "cat", "/etc/resolv.conf") actual = strings.Replace(strings.Trim(strings.Trim(out, "\r\n"), " "), "\n", " ", -1) if actual != "nameserver 127.0.0.1 options ndots:3" { c.Fatalf("expected 'nameserver 127.0.0.1 options ndots:3', but says: %q", actual) } } func (s *DockerSuite) TestRunDnsRepeatOptions(c *check.C) { testRequires(c, DaemonIsLinux) out, _, _ := dockerCmdWithStdoutStderr(c, "run", "--dns=1.1.1.1", "--dns=2.2.2.2", "--dns-search=mydomain", "--dns-search=mydomain2", "--dns-opt=ndots:9", "--dns-opt=timeout:3", "busybox", "cat", "/etc/resolv.conf") actual := strings.Replace(strings.Trim(out, "\r\n"), "\n", " ", -1) if actual != "search mydomain mydomain2 nameserver 1.1.1.1 nameserver 2.2.2.2 options ndots:9 timeout:3" { c.Fatalf("expected 'search mydomain mydomain2 nameserver 1.1.1.1 nameserver 2.2.2.2 options ndots:9 timeout:3', but says: %q", actual) } } func (s *DockerSuite) TestRunDnsOptionsBasedOnHostResolvConf(c *check.C) { // Not applicable on Windows as testing Unix specific functionality testRequires(c, SameHostDaemon, DaemonIsLinux) origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf") if os.IsNotExist(err) { c.Fatalf("/etc/resolv.conf does not exist") } hostNamservers := resolvconf.GetNameservers(origResolvConf, netutils.IP) hostSearch := resolvconf.GetSearchDomains(origResolvConf) var out string out, _ = dockerCmd(c, "run", "--dns=127.0.0.1", "busybox", "cat", "/etc/resolv.conf") if actualNameservers := resolvconf.GetNameservers([]byte(out), netutils.IP); string(actualNameservers[0]) != "127.0.0.1" { c.Fatalf("expected '127.0.0.1', but says: %q", string(actualNameservers[0])) } actualSearch := resolvconf.GetSearchDomains([]byte(out)) if len(actualSearch) != len(hostSearch) { c.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch)) } for i := range actualSearch { if actualSearch[i] != hostSearch[i] { c.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i]) } } out, _ = dockerCmd(c, "run", "--dns-search=mydomain", "busybox", "cat", "/etc/resolv.conf") actualNameservers := resolvconf.GetNameservers([]byte(out), netutils.IP) if len(actualNameservers) != len(hostNamservers) { c.Fatalf("expected %q nameserver(s), but it has: %q", len(hostNamservers), len(actualNameservers)) } for i := range actualNameservers { if actualNameservers[i] != hostNamservers[i] { c.Fatalf("expected %q nameserver, but says: %q", actualNameservers[i], hostNamservers[i]) } } if actualSearch = resolvconf.GetSearchDomains([]byte(out)); string(actualSearch[0]) != "mydomain" { c.Fatalf("expected 'mydomain', but says: %q", string(actualSearch[0])) } // test with file tmpResolvConf := []byte("search example.com\nnameserver 12.34.56.78\nnameserver 127.0.0.1") if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil { c.Fatal(err) } // put the old resolvconf back defer func() { if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil { c.Fatal(err) } }() resolvConf, err := ioutil.ReadFile("/etc/resolv.conf") if os.IsNotExist(err) { c.Fatalf("/etc/resolv.conf does not exist") } hostNamservers = resolvconf.GetNameservers(resolvConf, netutils.IP) hostSearch = resolvconf.GetSearchDomains(resolvConf) out, _ = dockerCmd(c, "run", "busybox", "cat", "/etc/resolv.conf") if actualNameservers = resolvconf.GetNameservers([]byte(out), netutils.IP); string(actualNameservers[0]) != "12.34.56.78" || len(actualNameservers) != 1 { c.Fatalf("expected '12.34.56.78', but has: %v", actualNameservers) } actualSearch = resolvconf.GetSearchDomains([]byte(out)) if len(actualSearch) != len(hostSearch) { c.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch)) } for i := range actualSearch { if actualSearch[i] != hostSearch[i] { c.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i]) } } } // Test to see if a non-root user can resolve a DNS name. Also // check if the container resolv.conf file has at least 0644 perm. func (s *DockerSuite) TestRunNonRootUserResolvName(c *check.C) { // Not applicable on Windows as Windows does not support --user testRequires(c, SameHostDaemon, Network, DaemonIsLinux) dockerCmd(c, "run", "--name=testperm", "--user=nobody", "busybox", "nslookup", "apt.dockerproject.org") cID, err := getIDByName("testperm") if err != nil { c.Fatal(err) } fmode := (os.FileMode)(0644) finfo, err := os.Stat(containerStorageFile(cID, "resolv.conf")) if err != nil { c.Fatal(err) } if (finfo.Mode() & fmode) != fmode { c.Fatalf("Expected container resolv.conf mode to be at least %s, instead got %s", fmode.String(), finfo.Mode().String()) } } // Test if container resolv.conf gets updated the next time it restarts // if host /etc/resolv.conf has changed. This only applies if the container // uses the host's /etc/resolv.conf and does not have any dns options provided. func (s *DockerSuite) TestRunResolvconfUpdate(c *check.C) { // Not applicable on Windows as testing unix specific functionality testRequires(c, SameHostDaemon, DaemonIsLinux) tmpResolvConf := []byte("search pommesfrites.fr\nnameserver 12.34.56.78\n") tmpLocalhostResolvConf := []byte("nameserver 127.0.0.1") //take a copy of resolv.conf for restoring after test completes resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf") if err != nil { c.Fatal(err) } // This test case is meant to test monitoring resolv.conf when it is // a regular file not a bind mounc. So we unmount resolv.conf and replace // it with a file containing the original settings. cmd := exec.Command("umount", "/etc/resolv.conf") if _, err = runCommand(cmd); err != nil { c.Fatal(err) } //cleanup defer func() { if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { c.Fatal(err) } }() //1. test that a restarting container gets an updated resolv.conf dockerCmd(c, "run", "--name='first'", "busybox", "true") containerID1, err := getIDByName("first") if err != nil { c.Fatal(err) } // replace resolv.conf with our temporary copy bytesResolvConf := []byte(tmpResolvConf) if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { c.Fatal(err) } // start the container again to pickup changes dockerCmd(c, "start", "first") // check for update in container containerResolv, err := readContainerFile(containerID1, "resolv.conf") if err != nil { c.Fatal(err) } if !bytes.Equal(containerResolv, bytesResolvConf) { c.Fatalf("Restarted container does not have updated resolv.conf; expected %q, got %q", tmpResolvConf, string(containerResolv)) } /* //make a change to resolv.conf (in this case replacing our tmp copy with orig copy) if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { c.Fatal(err) } */ //2. test that a restarting container does not receive resolv.conf updates // if it modified the container copy of the starting point resolv.conf dockerCmd(c, "run", "--name='second'", "busybox", "sh", "-c", "echo 'search mylittlepony.com' >>/etc/resolv.conf") containerID2, err := getIDByName("second") if err != nil { c.Fatal(err) } //make a change to resolv.conf (in this case replacing our tmp copy with orig copy) if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { c.Fatal(err) } // start the container again dockerCmd(c, "start", "second") // check for update in container containerResolv, err = readContainerFile(containerID2, "resolv.conf") if err != nil { c.Fatal(err) } if bytes.Equal(containerResolv, resolvConfSystem) { c.Fatalf("Container's resolv.conf should not have been updated with host resolv.conf: %q", string(containerResolv)) } //3. test that a running container's resolv.conf is not modified while running out, _ := dockerCmd(c, "run", "-d", "busybox", "top") runningContainerID := strings.TrimSpace(out) // replace resolv.conf if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { c.Fatal(err) } // check for update in container containerResolv, err = readContainerFile(runningContainerID, "resolv.conf") if err != nil { c.Fatal(err) } if bytes.Equal(containerResolv, bytesResolvConf) { c.Fatalf("Running container should not have updated resolv.conf; expected %q, got %q", string(resolvConfSystem), string(containerResolv)) } //4. test that a running container's resolv.conf is updated upon restart // (the above container is still running..) dockerCmd(c, "restart", runningContainerID) // check for update in container containerResolv, err = readContainerFile(runningContainerID, "resolv.conf") if err != nil { c.Fatal(err) } if !bytes.Equal(containerResolv, bytesResolvConf) { c.Fatalf("Restarted container should have updated resolv.conf; expected %q, got %q", string(bytesResolvConf), string(containerResolv)) } //5. test that additions of a localhost resolver are cleaned from // host resolv.conf before updating container's resolv.conf copies // replace resolv.conf with a localhost-only nameserver copy bytesResolvConf = []byte(tmpLocalhostResolvConf) if err = ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { c.Fatal(err) } // start the container again to pickup changes dockerCmd(c, "start", "first") // our first exited container ID should have been updated, but with default DNS // after the cleanup of resolv.conf found only a localhost nameserver: containerResolv, err = readContainerFile(containerID1, "resolv.conf") if err != nil { c.Fatal(err) } expected := "\nnameserver 8.8.8.8\nnameserver 8.8.4.4\n" if !bytes.Equal(containerResolv, []byte(expected)) { c.Fatalf("Container does not have cleaned/replaced DNS in resolv.conf; expected %q, got %q", expected, string(containerResolv)) } //6. Test that replacing (as opposed to modifying) resolv.conf triggers an update // of containers' resolv.conf. // Restore the original resolv.conf if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { c.Fatal(err) } // Run the container so it picks up the old settings dockerCmd(c, "run", "--name='third'", "busybox", "true") containerID3, err := getIDByName("third") if err != nil { c.Fatal(err) } // Create a modified resolv.conf.aside and override resolv.conf with it bytesResolvConf = []byte(tmpResolvConf) if err := ioutil.WriteFile("/etc/resolv.conf.aside", bytesResolvConf, 0644); err != nil { c.Fatal(err) } err = os.Rename("/etc/resolv.conf.aside", "/etc/resolv.conf") if err != nil { c.Fatal(err) } // start the container again to pickup changes dockerCmd(c, "start", "third") // check for update in container containerResolv, err = readContainerFile(containerID3, "resolv.conf") if err != nil { c.Fatal(err) } if !bytes.Equal(containerResolv, bytesResolvConf) { c.Fatalf("Stopped container does not have updated resolv.conf; expected\n%q\n got\n%q", tmpResolvConf, string(containerResolv)) } //cleanup, restore original resolv.conf happens in defer func() } func (s *DockerSuite) TestRunAddHost(c *check.C) { // Not applicable on Windows as it does not support --add-host testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "--add-host=extra:86.75.30.9", "busybox", "grep", "extra", "/etc/hosts") actual := strings.Trim(out, "\r\n") if actual != "86.75.30.9\textra" { c.Fatalf("expected '86.75.30.9\textra', but says: %q", actual) } } // Regression test for #6983 func (s *DockerSuite) TestRunAttachStdErrOnlyTTYMode(c *check.C) { _, exitCode := dockerCmd(c, "run", "-t", "-a", "stderr", "busybox", "true") if exitCode != 0 { c.Fatalf("Container should have exited with error code 0") } } // Regression test for #6983 func (s *DockerSuite) TestRunAttachStdOutOnlyTTYMode(c *check.C) { _, exitCode := dockerCmd(c, "run", "-t", "-a", "stdout", "busybox", "true") if exitCode != 0 { c.Fatalf("Container should have exited with error code 0") } } // Regression test for #6983 func (s *DockerSuite) TestRunAttachStdOutAndErrTTYMode(c *check.C) { _, exitCode := dockerCmd(c, "run", "-t", "-a", "stdout", "-a", "stderr", "busybox", "true") if exitCode != 0 { c.Fatalf("Container should have exited with error code 0") } } // Test for #10388 - this will run the same test as TestRunAttachStdOutAndErrTTYMode // but using --attach instead of -a to make sure we read the flag correctly func (s *DockerSuite) TestRunAttachWithDetach(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-d", "--attach", "stdout", "busybox", "true") _, stderr, _, err := runCommandWithStdoutStderr(cmd) if err == nil { c.Fatal("Container should have exited with error code different than 0") } else if !strings.Contains(stderr, "Conflicting options: -a and -d") { c.Fatal("Should have been returned an error with conflicting options -a and -d") } } func (s *DockerSuite) TestRunState(c *check.C) { // TODO Windows: This needs some rework as Windows busybox does not support top testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") id := strings.TrimSpace(out) state, err := inspectField(id, "State.Running") c.Assert(err, check.IsNil) if state != "true" { c.Fatal("Container state is 'not running'") } pid1, err := inspectField(id, "State.Pid") c.Assert(err, check.IsNil) if pid1 == "0" { c.Fatal("Container state Pid 0") } dockerCmd(c, "stop", id) state, err = inspectField(id, "State.Running") c.Assert(err, check.IsNil) if state != "false" { c.Fatal("Container state is 'running'") } pid2, err := inspectField(id, "State.Pid") c.Assert(err, check.IsNil) if pid2 == pid1 { c.Fatalf("Container state Pid %s, but expected %s", pid2, pid1) } dockerCmd(c, "start", id) state, err = inspectField(id, "State.Running") c.Assert(err, check.IsNil) if state != "true" { c.Fatal("Container state is 'not running'") } pid3, err := inspectField(id, "State.Pid") c.Assert(err, check.IsNil) if pid3 == pid1 { c.Fatalf("Container state Pid %s, but expected %s", pid2, pid1) } } // Test for #1737 func (s *DockerSuite) TestRunCopyVolumeUidGid(c *check.C) { // Not applicable on Windows as it does not support uid or gid in this way testRequires(c, DaemonIsLinux) name := "testrunvolumesuidgid" _, err := buildImage(name, `FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN mkdir -p /hello && touch /hello/test && chown dockerio.dockerio /hello`, true) if err != nil { c.Fatal(err) } // Test that the uid and gid is copied from the image to the volume out, _ := dockerCmd(c, "run", "--rm", "-v", "/hello", name, "sh", "-c", "ls -l / | grep hello | awk '{print $3\":\"$4}'") out = strings.TrimSpace(out) if out != "dockerio:dockerio" { c.Fatalf("Wrong /hello ownership: %s, expected dockerio:dockerio", out) } } // Test for #1582 func (s *DockerSuite) TestRunCopyVolumeContent(c *check.C) { // TODO Windows, post TP4. Windows does not yet support volume functionality // that copies from the image to the volume. testRequires(c, DaemonIsLinux) name := "testruncopyvolumecontent" _, err := buildImage(name, `FROM busybox RUN mkdir -p /hello/local && echo hello > /hello/local/world`, true) if err != nil { c.Fatal(err) } // Test that the content is copied from the image to the volume out, _ := dockerCmd(c, "run", "--rm", "-v", "/hello", name, "find", "/hello") if !(strings.Contains(out, "/hello/local/world") && strings.Contains(out, "/hello/local")) { c.Fatal("Container failed to transfer content to volume") } } func (s *DockerSuite) TestRunCleanupCmdOnEntrypoint(c *check.C) { name := "testrunmdcleanuponentrypoint" if _, err := buildImage(name, `FROM busybox ENTRYPOINT ["echo"] CMD ["testingpoint"]`, true); err != nil { c.Fatal(err) } out, exit := dockerCmd(c, "run", "--entrypoint", "whoami", name) if exit != 0 { c.Fatalf("expected exit code 0 received %d, out: %q", exit, out) } out = strings.TrimSpace(out) expected := "root" if daemonPlatform == "windows" { expected = `nt authority\system` } if out != expected { c.Fatalf("Expected output %s, got %q", expected, out) } } // TestRunWorkdirExistsAndIsFile checks that if 'docker run -w' with existing file can be detected func (s *DockerSuite) TestRunWorkdirExistsAndIsFile(c *check.C) { existingFile := "/bin/cat" expected := "Cannot mkdir: /bin/cat is not a directory" if daemonPlatform == "windows" { existingFile = `\windows\system32\ntdll.dll` expected = "The directory name is invalid" } out, exitCode, err := dockerCmdWithError("run", "-w", existingFile, "busybox") if !(err != nil && exitCode == 125 && strings.Contains(out, expected)) { c.Fatalf("Docker must complains about making dir with exitCode 125 but we got out: %s, exitCode: %d", out, exitCode) } } func (s *DockerSuite) TestRunExitOnStdinClose(c *check.C) { name := "testrunexitonstdinclose" meow := "/bin/cat" delay := 1 if daemonPlatform == "windows" { meow = "cat" delay = 5 } runCmd := exec.Command(dockerBinary, "run", "--name", name, "-i", "busybox", meow) stdin, err := runCmd.StdinPipe() if err != nil { c.Fatal(err) } stdout, err := runCmd.StdoutPipe() if err != nil { c.Fatal(err) } if err := runCmd.Start(); err != nil { c.Fatal(err) } if _, err := stdin.Write([]byte("hello\n")); err != nil { c.Fatal(err) } r := bufio.NewReader(stdout) line, err := r.ReadString('\n') if err != nil { c.Fatal(err) } line = strings.TrimSpace(line) if line != "hello" { c.Fatalf("Output should be 'hello', got '%q'", line) } if err := stdin.Close(); err != nil { c.Fatal(err) } finish := make(chan error) go func() { finish <- runCmd.Wait() close(finish) }() select { case err := <-finish: c.Assert(err, check.IsNil) case <-time.After(time.Duration(delay) * time.Second): c.Fatal("docker run failed to exit on stdin close") } state, err := inspectField(name, "State.Running") c.Assert(err, check.IsNil) if state != "false" { c.Fatal("Container must be stopped after stdin closing") } } // Test for #2267 func (s *DockerSuite) TestRunWriteHostsFileAndNotCommit(c *check.C) { // Cannot run on Windows as Windows does not support diff. testRequires(c, DaemonIsLinux) name := "writehosts" out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hosts && cat /etc/hosts") if !strings.Contains(out, "test2267") { c.Fatal("/etc/hosts should contain 'test2267'") } out, _ = dockerCmd(c, "diff", name) if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) { c.Fatal("diff should be empty") } } func eqToBaseDiff(out string, c *check.C) bool { out1, _ := dockerCmd(c, "run", "-d", "busybox", "echo", "hello") cID := strings.TrimSpace(out1) baseDiff, _ := dockerCmd(c, "diff", cID) baseArr := strings.Split(baseDiff, "\n") sort.Strings(baseArr) outArr := strings.Split(out, "\n") sort.Strings(outArr) return sliceEq(baseArr, outArr) } func sliceEq(a, b []string) bool { if len(a) != len(b) { return false } for i := range a { if a[i] != b[i] { return false } } return true } // Test for #2267 func (s *DockerSuite) TestRunWriteHostnameFileAndNotCommit(c *check.C) { // Cannot run on Windows as Windows does not support diff. testRequires(c, DaemonIsLinux) name := "writehostname" out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hostname && cat /etc/hostname") if !strings.Contains(out, "test2267") { c.Fatal("/etc/hostname should contain 'test2267'") } out, _ = dockerCmd(c, "diff", name) if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) { c.Fatal("diff should be empty") } } // Test for #2267 func (s *DockerSuite) TestRunWriteResolvFileAndNotCommit(c *check.C) { // Cannot run on Windows as Windows does not support diff. testRequires(c, DaemonIsLinux) name := "writeresolv" out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/resolv.conf && cat /etc/resolv.conf") if !strings.Contains(out, "test2267") { c.Fatal("/etc/resolv.conf should contain 'test2267'") } out, _ = dockerCmd(c, "diff", name) if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) { c.Fatal("diff should be empty") } } func (s *DockerSuite) TestRunWithBadDevice(c *check.C) { // Cannot run on Windows as Windows does not support --device testRequires(c, DaemonIsLinux) name := "baddevice" out, _, err := dockerCmdWithError("run", "--name", name, "--device", "/etc", "busybox", "true") if err == nil { c.Fatal("Run should fail with bad device") } expected := `"/etc": not a device node` if !strings.Contains(out, expected) { c.Fatalf("Output should contain %q, actual out: %q", expected, out) } } func (s *DockerSuite) TestRunEntrypoint(c *check.C) { name := "entrypoint" // Note Windows does not have an echo.exe built in. var out, expected string if daemonPlatform == "windows" { out, _ = dockerCmd(c, "run", "--name", name, "--entrypoint", "cmd /s /c echo", "busybox", "foobar") expected = "foobar\r\n" } else { out, _ = dockerCmd(c, "run", "--name", name, "--entrypoint", "/bin/echo", "busybox", "-n", "foobar") expected = "foobar" } if out != expected { c.Fatalf("Output should be %q, actual out: %q", expected, out) } } func (s *DockerSuite) TestRunBindMounts(c *check.C) { testRequires(c, SameHostDaemon) if daemonPlatform == "linux" { testRequires(c, DaemonIsLinux, NotUserNamespace) } tmpDir, err := ioutil.TempDir("", "docker-test-container") if err != nil { c.Fatal(err) } defer os.RemoveAll(tmpDir) writeFile(path.Join(tmpDir, "touch-me"), "", c) // TODO Windows Post TP4. Windows does not yet support :ro binds if daemonPlatform != "windows" { // Test reading from a read-only bind mount out, _ := dockerCmd(c, "run", "-v", fmt.Sprintf("%s:/tmp:ro", tmpDir), "busybox", "ls", "/tmp") if !strings.Contains(out, "touch-me") { c.Fatal("Container failed to read from bind mount") } } // test writing to bind mount if daemonPlatform == "windows" { dockerCmd(c, "run", "-v", fmt.Sprintf(`%s:c:\tmp:rw`, tmpDir), "busybox", "touch", "c:/tmp/holla") } else { dockerCmd(c, "run", "-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "busybox", "touch", "/tmp/holla") } readFile(path.Join(tmpDir, "holla"), c) // Will fail if the file doesn't exist // test mounting to an illegal destination directory _, _, err = dockerCmdWithError("run", "-v", fmt.Sprintf("%s:.", tmpDir), "busybox", "ls", ".") if err == nil { c.Fatal("Container bind mounted illegal directory") } // Windows does not (and likely never will) support mounting a single file if daemonPlatform != "windows" { // test mount a file dockerCmd(c, "run", "-v", fmt.Sprintf("%s/holla:/tmp/holla:rw", tmpDir), "busybox", "sh", "-c", "echo -n 'yotta' > /tmp/holla") content := readFile(path.Join(tmpDir, "holla"), c) // Will fail if the file doesn't exist expected := "yotta" if content != expected { c.Fatalf("Output should be %q, actual out: %q", expected, content) } } } // Ensure that CIDFile gets deleted if it's empty // Perform this test by making `docker run` fail func (s *DockerSuite) TestRunCidFileCleanupIfEmpty(c *check.C) { tmpDir, err := ioutil.TempDir("", "TestRunCidFile") if err != nil { c.Fatal(err) } defer os.RemoveAll(tmpDir) tmpCidFile := path.Join(tmpDir, "cid") image := "emptyfs" if daemonPlatform == "windows" { // Windows can't support an emptyfs image. Just use the regular Windows image image = WindowsBaseImage } out, _, err := dockerCmdWithError("run", "--cidfile", tmpCidFile, image) if err == nil { c.Fatalf("Run without command must fail. out=%s", out) } else if !strings.Contains(out, "No command specified") { c.Fatalf("Run without command failed with wrong output. out=%s\nerr=%v", out, err) } if _, err := os.Stat(tmpCidFile); err == nil { c.Fatalf("empty CIDFile %q should've been deleted", tmpCidFile) } } // #2098 - Docker cidFiles only contain short version of the containerId //sudo docker run --cidfile /tmp/docker_tesc.cid ubuntu echo "test" // TestRunCidFile tests that run --cidfile returns the longid func (s *DockerSuite) TestRunCidFileCheckIDLength(c *check.C) { tmpDir, err := ioutil.TempDir("", "TestRunCidFile") if err != nil { c.Fatal(err) } tmpCidFile := path.Join(tmpDir, "cid") defer os.RemoveAll(tmpDir) out, _ := dockerCmd(c, "run", "-d", "--cidfile", tmpCidFile, "busybox", "true") id := strings.TrimSpace(out) buffer, err := ioutil.ReadFile(tmpCidFile) if err != nil { c.Fatal(err) } cid := string(buffer) if len(cid) != 64 { c.Fatalf("--cidfile should be a long id, not %q", id) } if cid != id { c.Fatalf("cid must be equal to %s, got %s", id, cid) } } func (s *DockerSuite) TestRunSetMacAddress(c *check.C) { mac := "12:34:56:78:9a:bc" var out string if daemonPlatform == "windows" { out, _ = dockerCmd(c, "run", "-i", "--rm", fmt.Sprintf("--mac-address=%s", mac), "busybox", "sh", "-c", "ipconfig /all | grep 'Physical Address' | awk '{print $12}'") mac = strings.Replace(strings.ToUpper(mac), ":", "-", -1) // To Windows-style MACs } else { out, _ = dockerCmd(c, "run", "-i", "--rm", fmt.Sprintf("--mac-address=%s", mac), "busybox", "/bin/sh", "-c", "ip link show eth0 | tail -1 | awk '{print $2}'") } actualMac := strings.TrimSpace(out) if actualMac != mac { c.Fatalf("Set MAC address with --mac-address failed. The container has an incorrect MAC address: %q, expected: %q", actualMac, mac) } } func (s *DockerSuite) TestRunInspectMacAddress(c *check.C) { // TODO Windows. Network settings are not propagated back to inspect. testRequires(c, DaemonIsLinux) mac := "12:34:56:78:9a:bc" out, _ := dockerCmd(c, "run", "-d", "--mac-address="+mac, "busybox", "top") id := strings.TrimSpace(out) inspectedMac, err := inspectField(id, "NetworkSettings.Networks.bridge.MacAddress") c.Assert(err, check.IsNil) if inspectedMac != mac { c.Fatalf("docker inspect outputs wrong MAC address: %q, should be: %q", inspectedMac, mac) } } // test docker run use a invalid mac address func (s *DockerSuite) TestRunWithInvalidMacAddress(c *check.C) { out, _, err := dockerCmdWithError("run", "--mac-address", "92:d0:c6:0a:29", "busybox") //use a invalid mac address should with a error out if err == nil || !strings.Contains(out, "is not a valid mac address") { c.Fatalf("run with an invalid --mac-address should with error out") } } func (s *DockerSuite) TestRunDeallocatePortOnMissingIptablesRule(c *check.C) { // TODO Windows. Network settings are not propagated back to inspect. testRequires(c, SameHostDaemon, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top") id := strings.TrimSpace(out) ip, err := inspectField(id, "NetworkSettings.Networks.bridge.IPAddress") c.Assert(err, check.IsNil) iptCmd := exec.Command("iptables", "-D", "DOCKER", "-d", fmt.Sprintf("%s/32", ip), "!", "-i", "docker0", "-o", "docker0", "-p", "tcp", "-m", "tcp", "--dport", "23", "-j", "ACCEPT") out, _, err = runCommandWithOutput(iptCmd) if err != nil { c.Fatal(err, out) } if err := deleteContainer(id); err != nil { c.Fatal(err) } dockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top") } func (s *DockerSuite) TestRunPortInUse(c *check.C) { // TODO Windows. The duplicate NAT message returned by Windows will be // changing as is currently completely undecipherable. Does need modifying // to run sh rather than top though as top isn't in Windows busybox. testRequires(c, SameHostDaemon, DaemonIsLinux) port := "1234" dockerCmd(c, "run", "-d", "-p", port+":80", "busybox", "top") out, _, err := dockerCmdWithError("run", "-d", "-p", port+":80", "busybox", "top") if err == nil { c.Fatalf("Binding on used port must fail") } if !strings.Contains(out, "port is already allocated") { c.Fatalf("Out must be about \"port is already allocated\", got %s", out) } } // https://github.com/docker/docker/issues/12148 func (s *DockerSuite) TestRunAllocatePortInReservedRange(c *check.C) { // TODO Windows. -P is not yet supported testRequires(c, DaemonIsLinux) // allocate a dynamic port to get the most recent out, _ := dockerCmd(c, "run", "-d", "-P", "-p", "80", "busybox", "top") id := strings.TrimSpace(out) out, _ = dockerCmd(c, "port", id, "80") strPort := strings.Split(strings.TrimSpace(out), ":")[1] port, err := strconv.ParseInt(strPort, 10, 64) if err != nil { c.Fatalf("invalid port, got: %s, error: %s", strPort, err) } // allocate a static port and a dynamic port together, with static port // takes the next recent port in dynamic port range. dockerCmd(c, "run", "-d", "-P", "-p", "80", "-p", fmt.Sprintf("%d:8080", port+1), "busybox", "top") } // Regression test for #7792 func (s *DockerSuite) TestRunMountOrdering(c *check.C) { // TODO Windows: Post TP4. Updated, but Windows does not support nested mounts currently. testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) prefix := "" if daemonPlatform == "windows" { prefix = "c:" } tmpDir, err := ioutil.TempDir("", "docker_nested_mount_test") if err != nil { c.Fatal(err) } defer os.RemoveAll(tmpDir) tmpDir2, err := ioutil.TempDir("", "docker_nested_mount_test2") if err != nil { c.Fatal(err) } defer os.RemoveAll(tmpDir2) // Create a temporary tmpfs mounc. fooDir := filepath.Join(tmpDir, "foo") if err := os.MkdirAll(filepath.Join(tmpDir, "foo"), 0755); err != nil { c.Fatalf("failed to mkdir at %s - %s", fooDir, err) } if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", fooDir), []byte{}, 0644); err != nil { c.Fatal(err) } if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir), []byte{}, 0644); err != nil { c.Fatal(err) } if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir2), []byte{}, 0644); err != nil { c.Fatal(err) } dockerCmd(c, "run", "-v", fmt.Sprintf("%s:"+prefix+"/tmp", tmpDir), "-v", fmt.Sprintf("%s:"+prefix+"/tmp/foo", fooDir), "-v", fmt.Sprintf("%s:"+prefix+"/tmp/tmp2", tmpDir2), "-v", fmt.Sprintf("%s:"+prefix+"/tmp/tmp2/foo", fooDir), "busybox:latest", "sh", "-c", "ls "+prefix+"/tmp/touch-me && ls "+prefix+"/tmp/foo/touch-me && ls "+prefix+"/tmp/tmp2/touch-me && ls "+prefix+"/tmp/tmp2/foo/touch-me") } // Regression test for https://github.com/docker/docker/issues/8259 func (s *DockerSuite) TestRunReuseBindVolumeThatIsSymlink(c *check.C) { // Not applicable on Windows as Windows does not support volumes testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) prefix := "" if daemonPlatform == "windows" { prefix = "c:" } tmpDir, err := ioutil.TempDir(os.TempDir(), "testlink") if err != nil { c.Fatal(err) } defer os.RemoveAll(tmpDir) linkPath := os.TempDir() + "/testlink2" if err := os.Symlink(tmpDir, linkPath); err != nil { c.Fatal(err) } defer os.RemoveAll(linkPath) // Create first container dockerCmd(c, "run", "-v", fmt.Sprintf("%s:"+prefix+"/tmp/test", linkPath), "busybox", "ls", prefix+"/tmp/test") // Create second container with same symlinked path // This will fail if the referenced issue is hit with a "Volume exists" error dockerCmd(c, "run", "-v", fmt.Sprintf("%s:"+prefix+"/tmp/test", linkPath), "busybox", "ls", prefix+"/tmp/test") } //GH#10604: Test an "/etc" volume doesn't overlay special bind mounts in container func (s *DockerSuite) TestRunCreateVolumeEtc(c *check.C) { // While Windows supports volumes, it does not support --add-host hence // this test is not applicable on Windows. testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "--dns=127.0.0.1", "-v", "/etc", "busybox", "cat", "/etc/resolv.conf") if !strings.Contains(out, "nameserver 127.0.0.1") { c.Fatal("/etc volume mount hides /etc/resolv.conf") } out, _ = dockerCmd(c, "run", "-h=test123", "-v", "/etc", "busybox", "cat", "/etc/hostname") if !strings.Contains(out, "test123") { c.Fatal("/etc volume mount hides /etc/hostname") } out, _ = dockerCmd(c, "run", "--add-host=test:192.168.0.1", "-v", "/etc", "busybox", "cat", "/etc/hosts") out = strings.Replace(out, "\n", " ", -1) if !strings.Contains(out, "192.168.0.1\ttest") || !strings.Contains(out, "127.0.0.1\tlocalhost") { c.Fatal("/etc volume mount hides /etc/hosts") } } func (s *DockerSuite) TestVolumesNoCopyData(c *check.C) { // TODO Windows (Post TP4). Windows does not support volumes which // are pre-populated such as is built in the dockerfile used in this test. testRequires(c, DaemonIsLinux) if _, err := buildImage("dataimage", `FROM busybox RUN mkdir -p /foo RUN touch /foo/bar`, true); err != nil { c.Fatal(err) } dockerCmd(c, "run", "--name", "test", "-v", "/foo", "busybox") if out, _, err := dockerCmdWithError("run", "--volumes-from", "test", "dataimage", "ls", "-lh", "/foo/bar"); err == nil || !strings.Contains(out, "No such file or directory") { c.Fatalf("Data was copied on volumes-from but shouldn't be:\n%q", out) } tmpDir := randomTmpDirPath("docker_test_bind_mount_copy_data", daemonPlatform) if out, _, err := dockerCmdWithError("run", "-v", tmpDir+":/foo", "dataimage", "ls", "-lh", "/foo/bar"); err == nil || !strings.Contains(out, "No such file or directory") { c.Fatalf("Data was copied on bind-mount but shouldn't be:\n%q", out) } } func (s *DockerSuite) TestRunNoOutputFromPullInStdout(c *check.C) { // just run with unknown image cmd := exec.Command(dockerBinary, "run", "asdfsg") stdout := bytes.NewBuffer(nil) cmd.Stdout = stdout if err := cmd.Run(); err == nil { c.Fatal("Run with unknown image should fail") } if stdout.Len() != 0 { c.Fatalf("Stdout contains output from pull: %s", stdout) } } func (s *DockerSuite) TestRunVolumesCleanPaths(c *check.C) { testRequires(c, SameHostDaemon) prefix := "" slash := `/` if daemonPlatform == "windows" { prefix = "c:" slash = `\` } if _, err := buildImage("run_volumes_clean_paths", `FROM busybox VOLUME `+prefix+`/foo/`, true); err != nil { c.Fatal(err) } dockerCmd(c, "run", "-v", prefix+"/foo", "-v", prefix+"/bar/", "--name", "dark_helmet", "run_volumes_clean_paths") out, err := inspectMountSourceField("dark_helmet", prefix+slash+"foo"+slash) if err != errMountNotFound { c.Fatalf("Found unexpected volume entry for '%s/foo/' in volumes\n%q", prefix, out) } out, err = inspectMountSourceField("dark_helmet", prefix+slash+`foo`) c.Assert(err, check.IsNil) if !strings.Contains(strings.ToLower(out), strings.ToLower(volumesConfigPath)) { c.Fatalf("Volume was not defined for %s/foo\n%q", prefix, out) } out, err = inspectMountSourceField("dark_helmet", prefix+slash+"bar"+slash) if err != errMountNotFound { c.Fatalf("Found unexpected volume entry for '%s/bar/' in volumes\n%q", prefix, out) } out, err = inspectMountSourceField("dark_helmet", prefix+slash+"bar") c.Assert(err, check.IsNil) if !strings.Contains(strings.ToLower(out), strings.ToLower(volumesConfigPath)) { c.Fatalf("Volume was not defined for %s/bar\n%q", prefix, out) } } // Regression test for #3631 func (s *DockerSuite) TestRunSlowStdoutConsumer(c *check.C) { // TODO Windows: This should be able to run on Windows if can find an // alternate to /dev/zero and /dev/stdout. testRequires(c, DaemonIsLinux) cont := exec.Command(dockerBinary, "run", "--rm", "busybox", "/bin/sh", "-c", "dd if=/dev/zero of=/dev/stdout bs=1024 count=2000 | catv") stdout, err := cont.StdoutPipe() if err != nil { c.Fatal(err) } if err := cont.Start(); err != nil { c.Fatal(err) } n, err := consumeWithSpeed(stdout, 10000, 5*time.Millisecond, nil) if err != nil { c.Fatal(err) } expected := 2 * 1024 * 2000 if n != expected { c.Fatalf("Expected %d, got %d", expected, n) } } func (s *DockerSuite) TestRunAllowPortRangeThroughExpose(c *check.C) { // TODO Windows: -P is not currently supported. Also network // settings are not propagated back. testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "--expose", "3000-3003", "-P", "busybox", "top") id := strings.TrimSpace(out) portstr, err := inspectFieldJSON(id, "NetworkSettings.Ports") c.Assert(err, check.IsNil) var ports nat.PortMap if err = unmarshalJSON([]byte(portstr), &ports); err != nil { c.Fatal(err) } for port, binding := range ports { portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0]) if portnum < 3000 || portnum > 3003 { c.Fatalf("Port %d is out of range ", portnum) } if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 { c.Fatalf("Port is not mapped for the port %s", port) } } } // test docker run expose a invalid port func (s *DockerSuite) TestRunExposePort(c *check.C) { out, _, err := dockerCmdWithError("run", "--expose", "80000", "busybox") //expose a invalid port should with a error out if err == nil || !strings.Contains(out, "Invalid range format for --expose") { c.Fatalf("run --expose a invalid port should with error out") } } func (s *DockerSuite) TestRunUnknownCommand(c *check.C) { out, _, _ := dockerCmdWithStdoutStderr(c, "create", "busybox", "/bin/nada") cID := strings.TrimSpace(out) _, _, err := dockerCmdWithError("start", cID) // Windows and Linux are different here by architectural design. Linux will // fail to start the container, so an error is expected. Windows will // successfully start the container, and once started attempt to execute // the command which will fail. if daemonPlatform == "windows" { // Wait for it to exit. waitExited(cID, 30*time.Second) c.Assert(err, check.IsNil) } else { c.Assert(err, check.NotNil) } rc, err := inspectField(cID, "State.ExitCode") c.Assert(err, check.IsNil) if rc == "0" { c.Fatalf("ExitCode(%v) cannot be 0", rc) } } func (s *DockerSuite) TestRunModeIpcHost(c *check.C) { // Not applicable on Windows as uses Unix-specific capabilities testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) hostIpc, err := os.Readlink("/proc/1/ns/ipc") if err != nil { c.Fatal(err) } out, _ := dockerCmd(c, "run", "--ipc=host", "busybox", "readlink", "/proc/self/ns/ipc") out = strings.Trim(out, "\n") if hostIpc != out { c.Fatalf("IPC different with --ipc=host %s != %s\n", hostIpc, out) } out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/ipc") out = strings.Trim(out, "\n") if hostIpc == out { c.Fatalf("IPC should be different without --ipc=host %s == %s\n", hostIpc, out) } } func (s *DockerSuite) TestRunModeIpcContainer(c *check.C) { // Not applicable on Windows as uses Unix-specific capabilities testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "echo -n test > /dev/shm/test && touch /dev/mqueue/toto && top") id := strings.TrimSpace(out) state, err := inspectField(id, "State.Running") c.Assert(err, check.IsNil) if state != "true" { c.Fatal("Container state is 'not running'") } pid1, err := inspectField(id, "State.Pid") c.Assert(err, check.IsNil) parentContainerIpc, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/ipc", pid1)) if err != nil { c.Fatal(err) } out, _ = dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "readlink", "/proc/self/ns/ipc") out = strings.Trim(out, "\n") if parentContainerIpc != out { c.Fatalf("IPC different with --ipc=container:%s %s != %s\n", id, parentContainerIpc, out) } catOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "cat", "/dev/shm/test") if catOutput != "test" { c.Fatalf("Output of /dev/shm/test expected test but found: %s", catOutput) } // check that /dev/mqueue is actually of mqueue type grepOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "grep", "/dev/mqueue", "/proc/mounts") if !strings.HasPrefix(grepOutput, "mqueue /dev/mqueue mqueue rw") { c.Fatalf("Output of 'grep /proc/mounts' expected 'mqueue /dev/mqueue mqueue rw' but found: %s", grepOutput) } lsOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "ls", "/dev/mqueue") lsOutput = strings.Trim(lsOutput, "\n") if lsOutput != "toto" { c.Fatalf("Output of 'ls /dev/mqueue' expected 'toto' but found: %s", lsOutput) } } func (s *DockerSuite) TestRunModeIpcContainerNotExists(c *check.C) { // Not applicable on Windows as uses Unix-specific capabilities testRequires(c, DaemonIsLinux, NotUserNamespace) out, _, err := dockerCmdWithError("run", "-d", "--ipc", "container:abcd1234", "busybox", "top") if !strings.Contains(out, "abcd1234") || err == nil { c.Fatalf("run IPC from a non exists container should with correct error out") } } func (s *DockerSuite) TestRunModeIpcContainerNotRunning(c *check.C) { // Not applicable on Windows as uses Unix-specific capabilities testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "create", "busybox") id := strings.TrimSpace(out) out, _, err := dockerCmdWithError("run", fmt.Sprintf("--ipc=container:%s", id), "busybox") if err == nil { c.Fatalf("Run container with ipc mode container should fail with non running container: %s\n%s", out, err) } } func (s *DockerSuite) TestRunMountShmMqueueFromHost(c *check.C) { // Not applicable on Windows as uses Unix-specific capabilities testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) dockerCmd(c, "run", "-d", "--name", "shmfromhost", "-v", "/dev/shm:/dev/shm", "-v", "/dev/mqueue:/dev/mqueue", "busybox", "sh", "-c", "echo -n test > /dev/shm/test && touch /dev/mqueue/toto && top") defer os.Remove("/dev/mqueue/toto") defer os.Remove("/dev/shm/test") volPath, err := inspectMountSourceField("shmfromhost", "/dev/shm") c.Assert(err, check.IsNil) if volPath != "/dev/shm" { c.Fatalf("volumePath should have been /dev/shm, was %s", volPath) } out, _ := dockerCmd(c, "run", "--name", "ipchost", "--ipc", "host", "busybox", "cat", "/dev/shm/test") if out != "test" { c.Fatalf("Output of /dev/shm/test expected test but found: %s", out) } // Check that the mq was created if _, err := os.Stat("/dev/mqueue/toto"); err != nil { c.Fatalf("Failed to confirm '/dev/mqueue/toto' presence on host: %s", err.Error()) } } func (s *DockerSuite) TestContainerNetworkMode(c *check.C) { // Not applicable on Windows as uses Unix-specific capabilities testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") id := strings.TrimSpace(out) c.Assert(waitRun(id), check.IsNil) pid1, err := inspectField(id, "State.Pid") c.Assert(err, check.IsNil) parentContainerNet, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1)) if err != nil { c.Fatal(err) } out, _ = dockerCmd(c, "run", fmt.Sprintf("--net=container:%s", id), "busybox", "readlink", "/proc/self/ns/net") out = strings.Trim(out, "\n") if parentContainerNet != out { c.Fatalf("NET different with --net=container:%s %s != %s\n", id, parentContainerNet, out) } } func (s *DockerSuite) TestRunModePidHost(c *check.C) { // Not applicable on Windows as uses Unix-specific capabilities testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) hostPid, err := os.Readlink("/proc/1/ns/pid") if err != nil { c.Fatal(err) } out, _ := dockerCmd(c, "run", "--pid=host", "busybox", "readlink", "/proc/self/ns/pid") out = strings.Trim(out, "\n") if hostPid != out { c.Fatalf("PID different with --pid=host %s != %s\n", hostPid, out) } out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/pid") out = strings.Trim(out, "\n") if hostPid == out { c.Fatalf("PID should be different without --pid=host %s == %s\n", hostPid, out) } } func (s *DockerSuite) TestRunModeUTSHost(c *check.C) { // Not applicable on Windows as uses Unix-specific capabilities testRequires(c, SameHostDaemon, DaemonIsLinux) hostUTS, err := os.Readlink("/proc/1/ns/uts") if err != nil { c.Fatal(err) } out, _ := dockerCmd(c, "run", "--uts=host", "busybox", "readlink", "/proc/self/ns/uts") out = strings.Trim(out, "\n") if hostUTS != out { c.Fatalf("UTS different with --uts=host %s != %s\n", hostUTS, out) } out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/uts") out = strings.Trim(out, "\n") if hostUTS == out { c.Fatalf("UTS should be different without --uts=host %s == %s\n", hostUTS, out) } } func (s *DockerSuite) TestRunTLSverify(c *check.C) { // Remote daemons use TLS and this test is not applicable when TLS is required. testRequires(c, SameHostDaemon) if out, code, err := dockerCmdWithError("ps"); err != nil || code != 0 { c.Fatalf("Should have worked: %v:\n%v", err, out) } // Regardless of whether we specify true or false we need to // test to make sure tls is turned on if --tlsverify is specified at all out, code, err := dockerCmdWithError("--tlsverify=false", "ps") if err == nil || code == 0 || !strings.Contains(out, "trying to connect") { c.Fatalf("Should have failed: \net:%v\nout:%v\nerr:%v", code, out, err) } out, code, err = dockerCmdWithError("--tlsverify=true", "ps") if err == nil || code == 0 || !strings.Contains(out, "cert") { c.Fatalf("Should have failed: \net:%v\nout:%v\nerr:%v", code, out, err) } } func (s *DockerSuite) TestRunPortFromDockerRangeInUse(c *check.C) { // TODO Windows. Once moved to libnetwork/CNM, this may be able to be // re-instated. testRequires(c, DaemonIsLinux) // first find allocator current position out, _ := dockerCmd(c, "run", "-d", "-p", ":80", "busybox", "top") id := strings.TrimSpace(out) out, _ = dockerCmd(c, "port", id) out = strings.TrimSpace(out) if out == "" { c.Fatal("docker port command output is empty") } out = strings.Split(out, ":")[1] lastPort, err := strconv.Atoi(out) if err != nil { c.Fatal(err) } port := lastPort + 1 l, err := net.Listen("tcp", ":"+strconv.Itoa(port)) if err != nil { c.Fatal(err) } defer l.Close() out, _ = dockerCmd(c, "run", "-d", "-p", ":80", "busybox", "top") id = strings.TrimSpace(out) dockerCmd(c, "port", id) } func (s *DockerSuite) TestRunTtyWithPipe(c *check.C) { errChan := make(chan error) go func() { defer close(errChan) cmd := exec.Command(dockerBinary, "run", "-ti", "busybox", "true") if _, err := cmd.StdinPipe(); err != nil { errChan <- err return } expected := "cannot enable tty mode" if out, _, err := runCommandWithOutput(cmd); err == nil { errChan <- fmt.Errorf("run should have failed") return } else if !strings.Contains(out, expected) { errChan <- fmt.Errorf("run failed with error %q: expected %q", out, expected) return } }() select { case err := <-errChan: c.Assert(err, check.IsNil) case <-time.After(6 * time.Second): c.Fatal("container is running but should have failed") } } func (s *DockerSuite) TestRunNonLocalMacAddress(c *check.C) { addr := "00:16:3E:08:00:50" cmd := "ifconfig" image := "busybox" expected := addr if daemonPlatform == "windows" { cmd = "ipconfig /all" image = WindowsBaseImage expected = strings.Replace(strings.ToUpper(addr), ":", "-", -1) } if out, _ := dockerCmd(c, "run", "--mac-address", addr, image, cmd); !strings.Contains(out, expected) { c.Fatalf("Output should have contained %q: %s", expected, out) } } func (s *DockerSuite) TestRunNetHost(c *check.C) { // Not applicable on Windows as uses Unix-specific capabilities testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) hostNet, err := os.Readlink("/proc/1/ns/net") if err != nil { c.Fatal(err) } out, _ := dockerCmd(c, "run", "--net=host", "busybox", "readlink", "/proc/self/ns/net") out = strings.Trim(out, "\n") if hostNet != out { c.Fatalf("Net namespace different with --net=host %s != %s\n", hostNet, out) } out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/net") out = strings.Trim(out, "\n") if hostNet == out { c.Fatalf("Net namespace should be different without --net=host %s == %s\n", hostNet, out) } } func (s *DockerSuite) TestRunNetHostTwiceSameName(c *check.C) { // TODO Windows. As Windows networking evolves and converges towards // CNM, this test may be possible to enable on Windows. testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) dockerCmd(c, "run", "--rm", "--name=thost", "--net=host", "busybox", "true") dockerCmd(c, "run", "--rm", "--name=thost", "--net=host", "busybox", "true") } func (s *DockerSuite) TestRunNetContainerWhichHost(c *check.C) { // Not applicable on Windows as uses Unix-specific capabilities testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) hostNet, err := os.Readlink("/proc/1/ns/net") if err != nil { c.Fatal(err) } dockerCmd(c, "run", "-d", "--net=host", "--name=test", "busybox", "top") out, _ := dockerCmd(c, "run", "--net=container:test", "busybox", "readlink", "/proc/self/ns/net") out = strings.Trim(out, "\n") if hostNet != out { c.Fatalf("Container should have host network namespace") } } func (s *DockerSuite) TestRunAllowPortRangeThroughPublish(c *check.C) { // TODO Windows. This may be possible to enable in the future. However, // Windows does not currently support --expose, or populate the network // settings seen through inspect. testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "--expose", "3000-3003", "-p", "3000-3003", "busybox", "top") id := strings.TrimSpace(out) portstr, err := inspectFieldJSON(id, "NetworkSettings.Ports") c.Assert(err, check.IsNil) var ports nat.PortMap err = unmarshalJSON([]byte(portstr), &ports) for port, binding := range ports { portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0]) if portnum < 3000 || portnum > 3003 { c.Fatalf("Port %d is out of range ", portnum) } if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 { c.Fatal("Port is not mapped for the port "+port, out) } } } func (s *DockerSuite) TestRunSetDefaultRestartPolicy(c *check.C) { dockerCmd(c, "run", "-d", "--name", "test", "busybox", "sleep", "30") out, err := inspectField("test", "HostConfig.RestartPolicy.Name") c.Assert(err, check.IsNil) if out != "no" { c.Fatalf("Set default restart policy failed") } } func (s *DockerSuite) TestRunRestartMaxRetries(c *check.C) { out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "false") timeout := 10 * time.Second if daemonPlatform == "windows" { timeout = 45 * time.Second } id := strings.TrimSpace(string(out)) if err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", timeout); err != nil { c.Fatal(err) } count, err := inspectField(id, "RestartCount") c.Assert(err, check.IsNil) if count != "3" { c.Fatalf("Container was restarted %s times, expected %d", count, 3) } MaximumRetryCount, err := inspectField(id, "HostConfig.RestartPolicy.MaximumRetryCount") c.Assert(err, check.IsNil) if MaximumRetryCount != "3" { c.Fatalf("Container Maximum Retry Count is %s, expected %s", MaximumRetryCount, "3") } } func (s *DockerSuite) TestRunContainerWithWritableRootfs(c *check.C) { dockerCmd(c, "run", "--rm", "busybox", "touch", "/file") } func (s *DockerSuite) TestRunContainerWithReadonlyRootfs(c *check.C) { // Not applicable on Windows which does not support --read-only testRequires(c, DaemonIsLinux) for _, f := range []string{"/file", "/etc/hosts", "/etc/resolv.conf", "/etc/hostname", "/sys/kernel", "/dev/.dont.touch.me"} { testReadOnlyFile(f, c) } } func (s *DockerSuite) TestPermissionsPtsReadonlyRootfs(c *check.C) { // Not applicable on Windows due to use of Unix specific functionality, plus // the use of --read-only which is not supported. // --read-only + userns has remount issues testRequires(c, DaemonIsLinux, NotUserNamespace) // Ensure we have not broken writing /dev/pts out, status := dockerCmd(c, "run", "--read-only", "--rm", "busybox", "mount") if status != 0 { c.Fatal("Could not obtain mounts when checking /dev/pts mntpnt.") } expected := "type devpts (rw," if !strings.Contains(string(out), expected) { c.Fatalf("expected output to contain %s but contains %s", expected, out) } } func testReadOnlyFile(filename string, c *check.C) { // Not applicable on Windows which does not support --read-only testRequires(c, DaemonIsLinux, NotUserNamespace) out, _, err := dockerCmdWithError("run", "--read-only", "--rm", "busybox", "touch", filename) if err == nil { c.Fatal("expected container to error on run with read only error") } expected := "Read-only file system" if !strings.Contains(string(out), expected) { c.Fatalf("expected output from failure to contain %s but contains %s", expected, out) } out, _, err = dockerCmdWithError("run", "--read-only", "--privileged", "--rm", "busybox", "touch", filename) if err == nil { c.Fatal("expected container to error on run with read only error") } expected = "Read-only file system" if !strings.Contains(string(out), expected) { c.Fatalf("expected output from failure to contain %s but contains %s", expected, out) } } func (s *DockerSuite) TestRunContainerWithReadonlyEtcHostsAndLinkedContainer(c *check.C) { // Not applicable on Windows which does not support --link // --read-only + userns has remount issues testRequires(c, DaemonIsLinux, NotUserNamespace) dockerCmd(c, "run", "-d", "--name", "test-etc-hosts-ro-linked", "busybox", "top") out, _ := dockerCmd(c, "run", "--read-only", "--link", "test-etc-hosts-ro-linked:testlinked", "busybox", "cat", "/etc/hosts") if !strings.Contains(string(out), "testlinked") { c.Fatal("Expected /etc/hosts to be updated even if --read-only enabled") } } func (s *DockerSuite) TestRunContainerWithReadonlyRootfsWithDnsFlag(c *check.C) { // Not applicable on Windows which does not support either --read-only or --dns. // --read-only + userns has remount issues testRequires(c, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "--read-only", "--dns", "1.1.1.1", "busybox", "/bin/cat", "/etc/resolv.conf") if !strings.Contains(string(out), "1.1.1.1") { c.Fatal("Expected /etc/resolv.conf to be updated even if --read-only enabled and --dns flag used") } } func (s *DockerSuite) TestRunContainerWithReadonlyRootfsWithAddHostFlag(c *check.C) { // Not applicable on Windows which does not support --read-only // --read-only + userns has remount issues testRequires(c, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "--read-only", "--add-host", "testreadonly:127.0.0.1", "busybox", "/bin/cat", "/etc/hosts") if !strings.Contains(string(out), "testreadonly") { c.Fatal("Expected /etc/hosts to be updated even if --read-only enabled and --add-host flag used") } } func (s *DockerSuite) TestRunVolumesFromRestartAfterRemoved(c *check.C) { prefix := "" if daemonPlatform == "windows" { prefix = "c:" } dockerCmd(c, "run", "-d", "--name", "voltest", "-v", prefix+"/foo", "busybox", "sleep", "60") dockerCmd(c, "run", "-d", "--name", "restarter", "--volumes-from", "voltest", "busybox", "sleep", "60") // Remove the main volume container and restart the consuming container dockerCmd(c, "rm", "-f", "voltest") // This should not fail since the volumes-from were already applied dockerCmd(c, "restart", "restarter") } // run container with --rm should remove container if exit code != 0 func (s *DockerSuite) TestRunContainerWithRmFlagExitCodeNotEqualToZero(c *check.C) { name := "flowers" out, _, err := dockerCmdWithError("run", "--name", name, "--rm", "busybox", "ls", "/notexists") if err == nil { c.Fatal("Expected docker run to fail", out, err) } out, err = getAllContainers() if err != nil { c.Fatal(out, err) } if out != "" { c.Fatal("Expected not to have containers", out) } } func (s *DockerSuite) TestRunContainerWithRmFlagCannotStartContainer(c *check.C) { name := "sparkles" out, _, err := dockerCmdWithError("run", "--name", name, "--rm", "busybox", "commandNotFound") if err == nil { c.Fatal("Expected docker run to fail", out, err) } out, err = getAllContainers() if err != nil { c.Fatal(out, err) } if out != "" { c.Fatal("Expected not to have containers", out) } } func (s *DockerSuite) TestRunPidHostWithChildIsKillable(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux, NotUserNamespace) name := "ibuildthecloud" dockerCmd(c, "run", "-d", "--pid=host", "--name", name, "busybox", "sh", "-c", "sleep 30; echo hi") c.Assert(waitRun(name), check.IsNil) errchan := make(chan error) go func() { if out, _, err := dockerCmdWithError("kill", name); err != nil { errchan <- fmt.Errorf("%v:\n%s", err, out) } close(errchan) }() select { case err := <-errchan: c.Assert(err, check.IsNil) case <-time.After(5 * time.Second): c.Fatal("Kill container timed out") } } func (s *DockerSuite) TestRunWithTooSmallMemoryLimit(c *check.C) { // TODO Windows. This may be possible to enable once Windows supports // memory limits on containers testRequires(c, DaemonIsLinux) // this memory limit is 1 byte less than the min, which is 4MB // https://github.com/docker/docker/blob/v1.5.0/daemon/create.go#L22 out, _, err := dockerCmdWithError("run", "-m", "4194303", "busybox") if err == nil || !strings.Contains(out, "Minimum memory limit allowed is 4MB") { c.Fatalf("expected run to fail when using too low a memory limit: %q", out) } } func (s *DockerSuite) TestRunWriteToProcAsound(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux) _, code, err := dockerCmdWithError("run", "busybox", "sh", "-c", "echo 111 >> /proc/asound/version") if err == nil || code == 0 { c.Fatal("standard container should not be able to write to /proc/asound") } } func (s *DockerSuite) TestRunReadProcTimer(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux) out, code, err := dockerCmdWithError("run", "busybox", "cat", "/proc/timer_stats") if code != 0 { return } if err != nil { c.Fatal(err) } if strings.Trim(out, "\n ") != "" { c.Fatalf("expected to receive no output from /proc/timer_stats but received %q", out) } } func (s *DockerSuite) TestRunReadProcLatency(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux) // some kernels don't have this configured so skip the test if this file is not found // on the host running the tests. if _, err := os.Stat("/proc/latency_stats"); err != nil { c.Skip("kernel doesnt have latency_stats configured") return } out, code, err := dockerCmdWithError("run", "busybox", "cat", "/proc/latency_stats") if code != 0 { return } if err != nil { c.Fatal(err) } if strings.Trim(out, "\n ") != "" { c.Fatalf("expected to receive no output from /proc/latency_stats but received %q", out) } } func (s *DockerSuite) TestRunReadFilteredProc(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace) testReadPaths := []string{ "/proc/latency_stats", "/proc/timer_stats", "/proc/kcore", } for i, filePath := range testReadPaths { name := fmt.Sprintf("procsieve-%d", i) shellCmd := fmt.Sprintf("exec 3<%s", filePath) out, exitCode, err := dockerCmdWithError("run", "--privileged", "--security-opt", "apparmor:docker-default", "--name", name, "busybox", "sh", "-c", shellCmd) if exitCode != 0 { return } if err != nil { c.Fatalf("Open FD for read should have failed with permission denied, got: %s, %v", out, err) } } } func (s *DockerSuite) TestMountIntoProc(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux) _, code, err := dockerCmdWithError("run", "-v", "/proc//sys", "busybox", "true") if err == nil || code == 0 { c.Fatal("container should not be able to mount into /proc") } } func (s *DockerSuite) TestMountIntoSys(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux) testRequires(c, NotUserNamespace) dockerCmd(c, "run", "-v", "/sys/fs/cgroup", "busybox", "true") } func (s *DockerSuite) TestRunUnshareProc(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace) name := "acidburn" out, _, err := dockerCmdWithError("run", "--name", name, "--security-opt", "seccomp:unconfined", "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "--mount-proc=/proc", "mount") if err == nil || !(strings.Contains(strings.ToLower(out), "permission denied") || strings.Contains(strings.ToLower(out), "operation not permitted")) { c.Fatalf("unshare with --mount-proc should have failed with 'permission denied' or 'operation not permitted', got: %s, %v", out, err) } name = "cereal" out, _, err = dockerCmdWithError("run", "--name", name, "--security-opt", "seccomp:unconfined", "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc") if err == nil || !(strings.Contains(strings.ToLower(out), "mount: cannot mount none") || strings.Contains(strings.ToLower(out), "permission denied")) { c.Fatalf("unshare and mount of /proc should have failed with 'mount: cannot mount none' or 'permission denied', got: %s, %v", out, err) } /* Ensure still fails if running privileged with the default policy */ name = "crashoverride" out, _, err = dockerCmdWithError("run", "--privileged", "--security-opt", "seccomp:unconfined", "--security-opt", "apparmor:docker-default", "--name", name, "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc") if err == nil || !(strings.Contains(strings.ToLower(out), "mount: cannot mount none") || strings.Contains(strings.ToLower(out), "permission denied")) { c.Fatalf("privileged unshare with apparmor should have failed with 'mount: cannot mount none' or 'permission denied', got: %s, %v", out, err) } } func (s *DockerSuite) TestRunPublishPort(c *check.C) { // TODO Windows: This may be possible once Windows moves to libnetwork and CNM testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-d", "--name", "test", "--expose", "8080", "busybox", "top") out, _ := dockerCmd(c, "port", "test") out = strings.Trim(out, "\r\n") if out != "" { c.Fatalf("run without --publish-all should not publish port, out should be nil, but got: %s", out) } } // Issue #10184. func (s *DockerSuite) TestDevicePermissions(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux) const permissions = "crw-rw-rw-" out, status := dockerCmd(c, "run", "--device", "/dev/fuse:/dev/fuse:mrw", "busybox:latest", "ls", "-l", "/dev/fuse") if status != 0 { c.Fatalf("expected status 0, got %d", status) } if !strings.HasPrefix(out, permissions) { c.Fatalf("output should begin with %q, got %q", permissions, out) } } func (s *DockerSuite) TestRunCapAddCHOWN(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=CHOWN", "busybox", "sh", "-c", "adduser -D -H newuser && chown newuser /home && echo ok") if actual := strings.Trim(out, "\r\n"); actual != "ok" { c.Fatalf("expected output ok received %s", actual) } } // https://github.com/docker/docker/pull/14498 func (s *DockerSuite) TestVolumeFromMixedRWOptions(c *check.C) { // TODO Windows post TP4. Enable the read-only bits once they are // supported on the platform. prefix := "" slash := `/` if daemonPlatform == "windows" { prefix = "c:" slash = `\` } dockerCmd(c, "run", "--name", "parent", "-v", prefix+"/test", "busybox", "true") if daemonPlatform != "windows" { dockerCmd(c, "run", "--volumes-from", "parent:ro", "--name", "test-volumes-1", "busybox", "true") } dockerCmd(c, "run", "--volumes-from", "parent:rw", "--name", "test-volumes-2", "busybox", "true") if daemonPlatform != "windows" { mRO, err := inspectMountPoint("test-volumes-1", prefix+slash+"test") c.Assert(err, check.IsNil) if mRO.RW { c.Fatalf("Expected RO volume was RW") } } mRW, err := inspectMountPoint("test-volumes-2", prefix+slash+"test") c.Assert(err, check.IsNil) if !mRW.RW { c.Fatalf("Expected RW volume was RO") } } func (s *DockerSuite) TestRunWriteFilteredProc(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace) testWritePaths := []string{ /* modprobe and core_pattern should both be denied by generic * policy of denials for /proc/sys/kernel. These files have been * picked to be checked as they are particularly sensitive to writes */ "/proc/sys/kernel/modprobe", "/proc/sys/kernel/core_pattern", "/proc/sysrq-trigger", "/proc/kcore", } for i, filePath := range testWritePaths { name := fmt.Sprintf("writeprocsieve-%d", i) shellCmd := fmt.Sprintf("exec 3>%s", filePath) out, code, err := dockerCmdWithError("run", "--privileged", "--security-opt", "apparmor:docker-default", "--name", name, "busybox", "sh", "-c", shellCmd) if code != 0 { return } if err != nil { c.Fatalf("Open FD for write should have failed with permission denied, got: %s, %v", out, err) } } } func (s *DockerSuite) TestRunNetworkFilesBindMount(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, SameHostDaemon, DaemonIsLinux) expected := "test123" filename := createTmpFile(c, expected) defer os.Remove(filename) nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"} for i := range nwfiles { actual, _ := dockerCmd(c, "run", "-v", filename+":"+nwfiles[i], "busybox", "cat", nwfiles[i]) if actual != expected { c.Fatalf("expected %s be: %q, but was: %q", nwfiles[i], expected, actual) } } } func (s *DockerSuite) TestRunNetworkFilesBindMountRO(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, SameHostDaemon, DaemonIsLinux) filename := createTmpFile(c, "test123") defer os.Remove(filename) nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"} for i := range nwfiles { _, exitCode, err := dockerCmdWithError("run", "-v", filename+":"+nwfiles[i]+":ro", "busybox", "touch", nwfiles[i]) if err == nil || exitCode == 0 { c.Fatalf("run should fail because bind mount of %s is ro: exit code %d", nwfiles[i], exitCode) } } } func (s *DockerSuite) TestRunNetworkFilesBindMountROFilesystem(c *check.C) { // Not applicable on Windows as uses Unix specific functionality // --read-only + userns has remount issues testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) filename := createTmpFile(c, "test123") defer os.Remove(filename) nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"} for i := range nwfiles { _, exitCode := dockerCmd(c, "run", "-v", filename+":"+nwfiles[i], "--read-only", "busybox", "touch", nwfiles[i]) if exitCode != 0 { c.Fatalf("run should not fail because %s is mounted writable on read-only root filesystem: exit code %d", nwfiles[i], exitCode) } } for i := range nwfiles { _, exitCode, err := dockerCmdWithError("run", "-v", filename+":"+nwfiles[i]+":ro", "--read-only", "busybox", "touch", nwfiles[i]) if err == nil || exitCode == 0 { c.Fatalf("run should fail because %s is mounted read-only on read-only root filesystem: exit code %d", nwfiles[i], exitCode) } } } func (s *DockerTrustSuite) TestTrustedRun(c *check.C) { // Windows does not support this functionality testRequires(c, DaemonIsLinux) repoName := s.setupTrustedImage(c, "trusted-run") // Try run runCmd := exec.Command(dockerBinary, "run", repoName) s.trustedCmd(runCmd) out, _, err := runCommandWithOutput(runCmd) if err != nil { c.Fatalf("Error running trusted run: %s\n%s\n", err, out) } if !strings.Contains(string(out), "Tagging") { c.Fatalf("Missing expected output on trusted push:\n%s", out) } dockerCmd(c, "rmi", repoName) // Try untrusted run to ensure we pushed the tag to the registry runCmd = exec.Command(dockerBinary, "run", "--disable-content-trust=true", repoName) s.trustedCmd(runCmd) out, _, err = runCommandWithOutput(runCmd) if err != nil { c.Fatalf("Error running trusted run: %s\n%s", err, out) } if !strings.Contains(string(out), "Status: Downloaded") { c.Fatalf("Missing expected output on trusted run with --disable-content-trust:\n%s", out) } } func (s *DockerTrustSuite) TestUntrustedRun(c *check.C) { // Windows does not support this functionality testRequires(c, DaemonIsLinux) repoName := fmt.Sprintf("%v/dockercliuntrusted/runtest:latest", privateRegistryURL) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) dockerCmd(c, "push", repoName) dockerCmd(c, "rmi", repoName) // Try trusted run on untrusted tag runCmd := exec.Command(dockerBinary, "run", repoName) s.trustedCmd(runCmd) out, _, err := runCommandWithOutput(runCmd) if err == nil { c.Fatalf("Error expected when running trusted run with:\n%s", out) } if !strings.Contains(string(out), "does not have trust data for") { c.Fatalf("Missing expected output on trusted run:\n%s", out) } } func (s *DockerTrustSuite) TestRunWhenCertExpired(c *check.C) { // Windows does not support this functionality testRequires(c, DaemonIsLinux) c.Skip("Currently changes system time, causing instability") repoName := s.setupTrustedImage(c, "trusted-run-expired") // Certificates have 10 years of expiration elevenYearsFromNow := time.Now().Add(time.Hour * 24 * 365 * 11) runAtDifferentDate(elevenYearsFromNow, func() { // Try run runCmd := exec.Command(dockerBinary, "run", repoName) s.trustedCmd(runCmd) out, _, err := runCommandWithOutput(runCmd) if err == nil { c.Fatalf("Error running trusted run in the distant future: %s\n%s", err, out) } if !strings.Contains(string(out), "could not validate the path to a trusted root") { c.Fatalf("Missing expected output on trusted run in the distant future:\n%s", out) } }) runAtDifferentDate(elevenYearsFromNow, func() { // Try run runCmd := exec.Command(dockerBinary, "run", "--disable-content-trust", repoName) s.trustedCmd(runCmd) out, _, err := runCommandWithOutput(runCmd) if err != nil { c.Fatalf("Error running untrusted run in the distant future: %s\n%s", err, out) } if !strings.Contains(string(out), "Status: Downloaded") { c.Fatalf("Missing expected output on untrusted run in the distant future:\n%s", out) } }) } func (s *DockerTrustSuite) TestTrustedRunFromBadTrustServer(c *check.C) { // Windows does not support this functionality testRequires(c, DaemonIsLinux) repoName := fmt.Sprintf("%v/dockerclievilrun/trusted:latest", privateRegistryURL) evilLocalConfigDir, err := ioutil.TempDir("", "evil-local-config-dir") if err != nil { c.Fatalf("Failed to create local temp dir") } // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) pushCmd := exec.Command(dockerBinary, "push", repoName) s.trustedCmd(pushCmd) out, _, err := runCommandWithOutput(pushCmd) if err != nil { c.Fatalf("Error running trusted push: %s\n%s", err, out) } if !strings.Contains(string(out), "Signing and pushing trust metadata") { c.Fatalf("Missing expected output on trusted push:\n%s", out) } dockerCmd(c, "rmi", repoName) // Try run runCmd := exec.Command(dockerBinary, "run", repoName) s.trustedCmd(runCmd) out, _, err = runCommandWithOutput(runCmd) if err != nil { c.Fatalf("Error running trusted run: %s\n%s", err, out) } if !strings.Contains(string(out), "Tagging") { c.Fatalf("Missing expected output on trusted push:\n%s", out) } dockerCmd(c, "rmi", repoName) // Kill the notary server, start a new "evil" one. s.not.Close() s.not, err = newTestNotary(c) if err != nil { c.Fatalf("Restarting notary server failed.") } // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. // tag an image and upload it to the private registry dockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) // Push up to the new server pushCmd = exec.Command(dockerBinary, "--config", evilLocalConfigDir, "push", repoName) s.trustedCmd(pushCmd) out, _, err = runCommandWithOutput(pushCmd) if err != nil { c.Fatalf("Error running trusted push: %s\n%s", err, out) } if !strings.Contains(string(out), "Signing and pushing trust metadata") { c.Fatalf("Missing expected output on trusted push:\n%s", out) } // Now, try running with the original client from this new trust server. This should fail. runCmd = exec.Command(dockerBinary, "run", repoName) s.trustedCmd(runCmd) out, _, err = runCommandWithOutput(runCmd) if err == nil { c.Fatalf("Expected to fail on this run due to different remote data: %s\n%s", err, out) } if !strings.Contains(string(out), "valid signatures did not meet threshold") { c.Fatalf("Missing expected output on trusted push:\n%s", out) } } func (s *DockerSuite) TestPtraceContainerProcsFromHost(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux, SameHostDaemon) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") id := strings.TrimSpace(out) c.Assert(waitRun(id), check.IsNil) pid1, err := inspectField(id, "State.Pid") c.Assert(err, check.IsNil) _, err = os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1)) if err != nil { c.Fatal(err) } } func (s *DockerSuite) TestAppArmorDeniesPtrace(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, SameHostDaemon, Apparmor, DaemonIsLinux, NotGCCGO) // Run through 'sh' so we are NOT pid 1. Pid 1 may be able to trace // itself, but pid>1 should not be able to trace pid1. _, exitCode, _ := dockerCmdWithError("run", "busybox", "sh", "-c", "sh -c readlink /proc/1/ns/net") if exitCode == 0 { c.Fatal("ptrace was not successfully restricted by AppArmor") } } func (s *DockerSuite) TestAppArmorTraceSelf(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux, SameHostDaemon, Apparmor) _, exitCode, _ := dockerCmdWithError("run", "busybox", "readlink", "/proc/1/ns/net") if exitCode != 0 { c.Fatal("ptrace of self failed.") } } func (s *DockerSuite) TestAppArmorDeniesChmodProc(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, SameHostDaemon, Apparmor, DaemonIsLinux, NotUserNamespace) _, exitCode, _ := dockerCmdWithError("run", "busybox", "chmod", "744", "/proc/cpuinfo") if exitCode == 0 { // If our test failed, attempt to repair the host system... _, exitCode, _ := dockerCmdWithError("run", "busybox", "chmod", "444", "/proc/cpuinfo") if exitCode == 0 { c.Fatal("AppArmor was unsuccessful in prohibiting chmod of /proc/* files.") } } } func (s *DockerSuite) TestRunCapAddSYSTIME(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=SYS_TIME", "busybox", "sh", "-c", "grep ^CapEff /proc/self/status | sed 's/^CapEff:\t//' | grep ^0000000002000000$") } // run create container failed should clean up the container func (s *DockerSuite) TestRunCreateContainerFailedCleanUp(c *check.C) { // TODO Windows. This may be possible to enable once link is supported testRequires(c, DaemonIsLinux) name := "unique_name" _, _, err := dockerCmdWithError("run", "--name", name, "--link", "nothing:nothing", "busybox") c.Assert(err, check.NotNil, check.Commentf("Expected docker run to fail!")) containerID, err := inspectField(name, "Id") c.Assert(containerID, check.Equals, "", check.Commentf("Expected not to have this container: %s!", containerID)) } func (s *DockerSuite) TestRunNamedVolume(c *check.C) { prefix := "" slash := `/` if daemonPlatform == "windows" { prefix = "c:" slash = `\` } testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "--name=test", "-v", "testing:"+prefix+slash+"foo", "busybox", "sh", "-c", "echo hello > "+prefix+"/foo/bar") out, _ := dockerCmd(c, "run", "--volumes-from", "test", "busybox", "sh", "-c", "cat "+prefix+"/foo/bar") c.Assert(strings.TrimSpace(out), check.Equals, "hello") out, _ = dockerCmd(c, "run", "-v", "testing:"+prefix+slash+"foo", "busybox", "sh", "-c", "cat "+prefix+"/foo/bar") c.Assert(strings.TrimSpace(out), check.Equals, "hello") } func (s *DockerSuite) TestRunWithUlimits(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "--name=testulimits", "--ulimit", "nofile=42", "busybox", "/bin/sh", "-c", "ulimit -n") ul := strings.TrimSpace(out) if ul != "42" { c.Fatalf("expected `ulimit -n` to be 42, got %s", ul) } } func (s *DockerSuite) TestRunContainerWithCgroupParent(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux) cgroupParent := "test" name := "cgroup-test" out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") if err != nil { c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) } cgroupPaths := parseCgroupPaths(string(out)) if len(cgroupPaths) == 0 { c.Fatalf("unexpected output - %q", string(out)) } id, err := getIDByName(name) c.Assert(err, check.IsNil) expectedCgroup := path.Join(cgroupParent, id) found := false for _, path := range cgroupPaths { if strings.HasSuffix(path, expectedCgroup) { found = true break } } if !found { c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) } } func (s *DockerSuite) TestRunContainerWithCgroupParentAbsPath(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux) cgroupParent := "/cgroup-parent/test" name := "cgroup-test" out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") if err != nil { c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) } cgroupPaths := parseCgroupPaths(string(out)) if len(cgroupPaths) == 0 { c.Fatalf("unexpected output - %q", string(out)) } id, err := getIDByName(name) c.Assert(err, check.IsNil) expectedCgroup := path.Join(cgroupParent, id) found := false for _, path := range cgroupPaths { if strings.HasSuffix(path, expectedCgroup) { found = true break } } if !found { c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) } } // TestRunInvalidCgroupParent checks that a specially-crafted cgroup parent doesn't cause Docker to crash or start modifying /. func (s *DockerSuite) TestRunInvalidCgroupParent(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux) cgroupParent := "../../../../../../../../SHOULD_NOT_EXIST" cleanCgroupParent := "SHOULD_NOT_EXIST" name := "cgroup-invalid-test" out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") if err != nil { // XXX: This may include a daemon crash. c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) } // We expect "/SHOULD_NOT_EXIST" to not exist. If not, we have a security issue. if _, err := os.Stat("/SHOULD_NOT_EXIST"); err == nil || !os.IsNotExist(err) { c.Fatalf("SECURITY: --cgroup-parent with ../../ relative paths cause files to be created in the host (this is bad) !!") } cgroupPaths := parseCgroupPaths(string(out)) if len(cgroupPaths) == 0 { c.Fatalf("unexpected output - %q", string(out)) } id, err := getIDByName(name) c.Assert(err, check.IsNil) expectedCgroup := path.Join(cleanCgroupParent, id) found := false for _, path := range cgroupPaths { if strings.HasSuffix(path, expectedCgroup) { found = true break } } if !found { c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) } } // TestRunInvalidCgroupParent checks that a specially-crafted cgroup parent doesn't cause Docker to crash or start modifying /. func (s *DockerSuite) TestRunAbsoluteInvalidCgroupParent(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux) cgroupParent := "/../../../../../../../../SHOULD_NOT_EXIST" cleanCgroupParent := "/SHOULD_NOT_EXIST" name := "cgroup-absolute-invalid-test" out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") if err != nil { // XXX: This may include a daemon crash. c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) } // We expect "/SHOULD_NOT_EXIST" to not exist. If not, we have a security issue. if _, err := os.Stat("/SHOULD_NOT_EXIST"); err == nil || !os.IsNotExist(err) { c.Fatalf("SECURITY: --cgroup-parent with /../../ garbage paths cause files to be created in the host (this is bad) !!") } cgroupPaths := parseCgroupPaths(string(out)) if len(cgroupPaths) == 0 { c.Fatalf("unexpected output - %q", string(out)) } id, err := getIDByName(name) c.Assert(err, check.IsNil) expectedCgroup := path.Join(cleanCgroupParent, id) found := false for _, path := range cgroupPaths { if strings.HasSuffix(path, expectedCgroup) { found = true break } } if !found { c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) } } func (s *DockerSuite) TestRunContainerWithCgroupMountRO(c *check.C) { // Not applicable on Windows as uses Unix specific functionality // --read-only + userns has remount issues testRequires(c, DaemonIsLinux, NotUserNamespace) filename := "/sys/fs/cgroup/devices/test123" out, _, err := dockerCmdWithError("run", "busybox", "touch", filename) if err == nil { c.Fatal("expected cgroup mount point to be read-only, touch file should fail") } expected := "Read-only file system" if !strings.Contains(out, expected) { c.Fatalf("expected output from failure to contain %s but contains %s", expected, out) } } func (s *DockerSuite) TestRunContainerNetworkModeToSelf(c *check.C) { // Not applicable on Windows which does not support --net=container testRequires(c, DaemonIsLinux, NotUserNamespace) out, _, err := dockerCmdWithError("run", "--name=me", "--net=container:me", "busybox", "true") if err == nil || !strings.Contains(out, "cannot join own network") { c.Fatalf("using container net mode to self should result in an error\nerr: %q\nout: %s", err, out) } } func (s *DockerSuite) TestRunContainerNetModeWithDnsMacHosts(c *check.C) { // Not applicable on Windows which does not support --net=container testRequires(c, DaemonIsLinux, NotUserNamespace) out, _, err := dockerCmdWithError("run", "-d", "--name", "parent", "busybox", "top") if err != nil { c.Fatalf("failed to run container: %v, output: %q", err, out) } out, _, err = dockerCmdWithError("run", "--dns", "1.2.3.4", "--net=container:parent", "busybox") if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkAndDNS.Error()) { c.Fatalf("run --net=container with --dns should error out") } out, _, err = dockerCmdWithError("run", "--mac-address", "92:d0:c6:0a:29:33", "--net=container:parent", "busybox") if err == nil || !strings.Contains(out, runconfig.ErrConflictContainerNetworkAndMac.Error()) { c.Fatalf("run --net=container with --mac-address should error out") } out, _, err = dockerCmdWithError("run", "--add-host", "test:192.168.2.109", "--net=container:parent", "busybox") if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkHosts.Error()) { c.Fatalf("run --net=container with --add-host should error out") } } func (s *DockerSuite) TestRunContainerNetModeWithExposePort(c *check.C) { // Not applicable on Windows which does not support --net=container testRequires(c, DaemonIsLinux, NotUserNamespace) dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top") out, _, err := dockerCmdWithError("run", "-p", "5000:5000", "--net=container:parent", "busybox") if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkPublishPorts.Error()) { c.Fatalf("run --net=container with -p should error out") } out, _, err = dockerCmdWithError("run", "-P", "--net=container:parent", "busybox") if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkPublishPorts.Error()) { c.Fatalf("run --net=container with -P should error out") } out, _, err = dockerCmdWithError("run", "--expose", "5000", "--net=container:parent", "busybox") if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkExposePorts.Error()) { c.Fatalf("run --net=container with --expose should error out") } } func (s *DockerSuite) TestRunLinkToContainerNetMode(c *check.C) { // Not applicable on Windows which does not support --net=container or --link testRequires(c, DaemonIsLinux, NotUserNamespace) dockerCmd(c, "run", "--name", "test", "-d", "busybox", "top") dockerCmd(c, "run", "--name", "parent", "-d", "--net=container:test", "busybox", "top") dockerCmd(c, "run", "-d", "--link=parent:parent", "busybox", "top") dockerCmd(c, "run", "--name", "child", "-d", "--net=container:parent", "busybox", "top") dockerCmd(c, "run", "-d", "--link=child:child", "busybox", "top") } func (s *DockerSuite) TestRunLoopbackOnlyExistsWhenNetworkingDisabled(c *check.C) { // TODO Windows: This may be possible to convert. testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "--net=none", "busybox", "ip", "-o", "-4", "a", "show", "up") var ( count = 0 parts = strings.Split(out, "\n") ) for _, l := range parts { if l != "" { count++ } } if count != 1 { c.Fatalf("Wrong interface count in container %d", count) } if !strings.HasPrefix(out, "1: lo") { c.Fatalf("Wrong interface in test container: expected [1: lo], got %s", out) } } // Issue #4681 func (s *DockerSuite) TestRunLoopbackWhenNetworkDisabled(c *check.C) { if daemonPlatform == "windows" { dockerCmd(c, "run", "--net=none", WindowsBaseImage, "ping", "-n", "1", "127.0.0.1") } else { dockerCmd(c, "run", "--net=none", "busybox", "ping", "-c", "1", "127.0.0.1") } } func (s *DockerSuite) TestRunModeNetContainerHostname(c *check.C) { // Windows does not support --net=container testRequires(c, DaemonIsLinux, ExecSupport, NotUserNamespace) dockerCmd(c, "run", "-i", "-d", "--name", "parent", "busybox", "top") out, _ := dockerCmd(c, "exec", "parent", "cat", "/etc/hostname") out1, _ := dockerCmd(c, "run", "--net=container:parent", "busybox", "cat", "/etc/hostname") if out1 != out { c.Fatal("containers with shared net namespace should have same hostname") } } func (s *DockerSuite) TestRunNetworkNotInitializedNoneMode(c *check.C) { // TODO Windows: Network settings are not currently propagated. This may // be resolved in the future with the move to libnetwork and CNM. testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "--net=none", "busybox", "top") id := strings.TrimSpace(out) res, err := inspectField(id, "NetworkSettings.Networks.none.IPAddress") c.Assert(err, check.IsNil) if res != "" { c.Fatalf("For 'none' mode network must not be initialized, but container got IP: %s", res) } } func (s *DockerSuite) TestTwoContainersInNetHost(c *check.C) { // Not applicable as Windows does not support --net=host testRequires(c, DaemonIsLinux, NotUserNamespace, NotUserNamespace) dockerCmd(c, "run", "-d", "--net=host", "--name=first", "busybox", "top") dockerCmd(c, "run", "-d", "--net=host", "--name=second", "busybox", "top") dockerCmd(c, "stop", "first") dockerCmd(c, "stop", "second") } func (s *DockerSuite) TestContainersInUserDefinedNetwork(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork") dockerCmd(c, "run", "-d", "--net=testnetwork", "--name=first", "busybox", "top") c.Assert(waitRun("first"), check.IsNil) dockerCmd(c, "run", "-t", "--net=testnetwork", "--name=second", "busybox", "ping", "-c", "1", "first") } func (s *DockerSuite) TestContainersInMultipleNetworks(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) // Create 2 networks using bridge driver dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2") // Run and connect containers to testnetwork1 dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") c.Assert(waitRun("first"), check.IsNil) dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") c.Assert(waitRun("second"), check.IsNil) // Check connectivity between containers in testnetwork2 dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1") // Connect containers to testnetwork2 dockerCmd(c, "network", "connect", "testnetwork2", "first") dockerCmd(c, "network", "connect", "testnetwork2", "second") // Check connectivity between containers dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2") } func (s *DockerSuite) TestContainersNetworkIsolation(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) // Create 2 networks using bridge driver dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2") // Run 1 container in testnetwork1 and another in testnetwork2 dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") c.Assert(waitRun("first"), check.IsNil) dockerCmd(c, "run", "-d", "--net=testnetwork2", "--name=second", "busybox", "top") c.Assert(waitRun("second"), check.IsNil) // Check Isolation between containers : ping must fail _, _, err := dockerCmdWithError("exec", "first", "ping", "-c", "1", "second") c.Assert(err, check.NotNil) // Connect first container to testnetwork2 dockerCmd(c, "network", "connect", "testnetwork2", "first") // ping must succeed now _, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second") c.Assert(err, check.IsNil) // Disconnect first container from testnetwork2 dockerCmd(c, "network", "disconnect", "testnetwork2", "first") // ping must fail again _, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second") c.Assert(err, check.NotNil) } func (s *DockerSuite) TestNetworkRmWithActiveContainers(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) // Create 2 networks using bridge driver dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") // Run and connect containers to testnetwork1 dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") c.Assert(waitRun("first"), check.IsNil) dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") c.Assert(waitRun("second"), check.IsNil) // Network delete with active containers must fail _, _, err := dockerCmdWithError("network", "rm", "testnetwork1") c.Assert(err, check.NotNil) dockerCmd(c, "stop", "first") _, _, err = dockerCmdWithError("network", "rm", "testnetwork1") c.Assert(err, check.NotNil) } func (s *DockerSuite) TestContainerRestartInMultipleNetworks(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) // Create 2 networks using bridge driver dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2") // Run and connect containers to testnetwork1 dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") c.Assert(waitRun("first"), check.IsNil) dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") c.Assert(waitRun("second"), check.IsNil) // Check connectivity between containers in testnetwork2 dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1") // Connect containers to testnetwork2 dockerCmd(c, "network", "connect", "testnetwork2", "first") dockerCmd(c, "network", "connect", "testnetwork2", "second") // Check connectivity between containers dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2") // Stop second container and test ping failures on both networks dockerCmd(c, "stop", "second") _, _, err := dockerCmdWithError("exec", "first", "ping", "-c", "1", "second.testnetwork1") c.Assert(err, check.NotNil) _, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second.testnetwork2") c.Assert(err, check.NotNil) // Start second container and connectivity must be restored on both networks dockerCmd(c, "start", "second") dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1") dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2") } func (s *DockerSuite) TestContainerWithConflictingHostNetworks(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) // Run a container with --net=host dockerCmd(c, "run", "-d", "--net=host", "--name=first", "busybox", "top") c.Assert(waitRun("first"), check.IsNil) // Create a network using bridge driver dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") // Connecting to the user defined network must fail _, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "first") c.Assert(err, check.NotNil) } func (s *DockerSuite) TestContainerWithConflictingSharedNetwork(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) dockerCmd(c, "run", "-d", "--name=first", "busybox", "top") c.Assert(waitRun("first"), check.IsNil) // Run second container in first container's network namespace dockerCmd(c, "run", "-d", "--net=container:first", "--name=second", "busybox", "top") c.Assert(waitRun("second"), check.IsNil) // Create a network using bridge driver dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") // Connecting to the user defined network must fail out, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "second") c.Assert(err, check.NotNil) c.Assert(out, checker.Contains, runconfig.ErrConflictSharedNetwork.Error()) } func (s *DockerSuite) TestContainerWithConflictingNoneNetwork(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) dockerCmd(c, "run", "-d", "--net=none", "--name=first", "busybox", "top") c.Assert(waitRun("first"), check.IsNil) // Create a network using bridge driver dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") // Connecting to the user defined network must fail out, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "first") c.Assert(err, check.NotNil) c.Assert(out, checker.Contains, runconfig.ErrConflictNoNetwork.Error()) // create a container connected to testnetwork1 dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") c.Assert(waitRun("second"), check.IsNil) // Connect second container to none network. it must fail as well _, _, err = dockerCmdWithError("network", "connect", "none", "second") c.Assert(err, check.NotNil) } // #11957 - stdin with no tty does not exit if stdin is not closed even though container exited func (s *DockerSuite) TestRunStdinBlockedAfterContainerExit(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-i", "--name=test", "busybox", "true") in, err := cmd.StdinPipe() c.Assert(err, check.IsNil) defer in.Close() c.Assert(cmd.Start(), check.IsNil) waitChan := make(chan error) go func() { waitChan <- cmd.Wait() }() select { case err := <-waitChan: c.Assert(err, check.IsNil) case <-time.After(30 * time.Second): c.Fatal("timeout waiting for command to exit") } } func (s *DockerSuite) TestRunWrongCpusetCpusFlagValue(c *check.C) { // TODO Windows: This needs validation (error out) in the daemon. testRequires(c, DaemonIsLinux) out, exitCode, err := dockerCmdWithError("run", "--cpuset-cpus", "1-10,11--", "busybox", "true") c.Assert(err, check.NotNil) expected := "Error response from daemon: Invalid value 1-10,11-- for cpuset cpus.\n" if !(strings.Contains(out, expected) || exitCode == 125) { c.Fatalf("Expected output to contain %q with exitCode 125, got out: %q exitCode: %v", expected, out, exitCode) } } func (s *DockerSuite) TestRunWrongCpusetMemsFlagValue(c *check.C) { // TODO Windows: This needs validation (error out) in the daemon. testRequires(c, DaemonIsLinux) out, exitCode, err := dockerCmdWithError("run", "--cpuset-mems", "1-42--", "busybox", "true") c.Assert(err, check.NotNil) expected := "Error response from daemon: Invalid value 1-42-- for cpuset mems.\n" if !(strings.Contains(out, expected) || exitCode == 125) { c.Fatalf("Expected output to contain %q with exitCode 125, got out: %q exitCode: %v", expected, out, exitCode) } } // TestRunNonExecutableCmd checks that 'docker run busybox foo' exits with error code 127' func (s *DockerSuite) TestRunNonExecutableCmd(c *check.C) { name := "testNonExecutableCmd" runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "foo") _, exit, _ := runCommandWithOutput(runCmd) stateExitCode := findContainerExitCode(c, name) if !(exit == 127 && strings.Contains(stateExitCode, "127")) { c.Fatalf("Run non-executable command should have errored with exit code 127, but we got exit: %d, State.ExitCode: %s", exit, stateExitCode) } } // TestRunNonExistingCmd checks that 'docker run busybox /bin/foo' exits with code 127. func (s *DockerSuite) TestRunNonExistingCmd(c *check.C) { name := "testNonExistingCmd" runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "/bin/foo") _, exit, _ := runCommandWithOutput(runCmd) stateExitCode := findContainerExitCode(c, name) if !(exit == 127 && strings.Contains(stateExitCode, "127")) { c.Fatalf("Run non-existing command should have errored with exit code 127, but we got exit: %d, State.ExitCode: %s", exit, stateExitCode) } } // TestCmdCannotBeInvoked checks that 'docker run busybox /etc' exits with 126, or // 127 on Windows. The difference is that in Windows, the container must be started // as that's when the check is made (and yes, by it's design...) func (s *DockerSuite) TestCmdCannotBeInvoked(c *check.C) { expected := 126 if daemonPlatform == "windows" { expected = 127 } name := "testCmdCannotBeInvoked" runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "/etc") _, exit, _ := runCommandWithOutput(runCmd) stateExitCode := findContainerExitCode(c, name) if !(exit == expected && strings.Contains(stateExitCode, strconv.Itoa(expected))) { c.Fatalf("Run cmd that cannot be invoked should have errored with code %d, but we got exit: %d, State.ExitCode: %s", expected, exit, stateExitCode) } } // TestRunNonExistingImage checks that 'docker run foo' exits with error msg 125 and contains 'Unable to find image' func (s *DockerSuite) TestRunNonExistingImage(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "foo") out, exit, err := runCommandWithOutput(runCmd) if !(err != nil && exit == 125 && strings.Contains(out, "Unable to find image")) { c.Fatalf("Run non-existing image should have errored with 'Unable to find image' code 125, but we got out: %s, exit: %d, err: %s", out, exit, err) } } // TestDockerFails checks that 'docker run -foo busybox' exits with 125 to signal docker run failed func (s *DockerSuite) TestDockerFails(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-foo", "busybox") out, exit, err := runCommandWithOutput(runCmd) if !(err != nil && exit == 125) { c.Fatalf("Docker run with flag not defined should exit with 125, but we got out: %s, exit: %d, err: %s", out, exit, err) } } // TestRunInvalidReference invokes docker run with a bad reference. func (s *DockerSuite) TestRunInvalidReference(c *check.C) { out, exit, _ := dockerCmdWithError("run", "busybox@foo") if exit == 0 { c.Fatalf("expected non-zero exist code; received %d", exit) } if !strings.Contains(out, "Error parsing reference") { c.Fatalf(`Expected "Error parsing reference" in output; got: %s`, out) } } // Test fix for issue #17854 func (s *DockerSuite) TestRunInitLayerPathOwnership(c *check.C) { // Not applicable on Windows as it does not support Linux uid/gid ownership testRequires(c, DaemonIsLinux) name := "testetcfileownership" _, err := buildImage(name, `FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN chown dockerio:dockerio /etc`, true) if err != nil { c.Fatal(err) } // Test that dockerio ownership of /etc is retained at runtime out, _ := dockerCmd(c, "run", "--rm", name, "stat", "-c", "%U:%G", "/etc") out = strings.TrimSpace(out) if out != "dockerio:dockerio" { c.Fatalf("Wrong /etc ownership: expected dockerio:dockerio, got %q", out) } } func (s *DockerSuite) TestRunWithOomScoreAdj(c *check.C) { testRequires(c, DaemonIsLinux) expected := "642" out, _ := dockerCmd(c, "run", "--oom-score-adj", expected, "busybox", "cat", "/proc/self/oom_score_adj") oomScoreAdj := strings.TrimSpace(out) if oomScoreAdj != "642" { c.Fatalf("Expected oom_score_adj set to %q, got %q instead", expected, oomScoreAdj) } } func (s *DockerSuite) TestRunWithOomScoreAdjInvalidRange(c *check.C) { testRequires(c, DaemonIsLinux) out, _, err := dockerCmdWithError("run", "--oom-score-adj", "1001", "busybox", "true") c.Assert(err, check.NotNil) expected := "Invalid value 1001, range for oom score adj is [-1000, 1000]." if !strings.Contains(out, expected) { c.Fatalf("Expected output to contain %q, got %q instead", expected, out) } out, _, err = dockerCmdWithError("run", "--oom-score-adj", "-1001", "busybox", "true") c.Assert(err, check.NotNil) expected = "Invalid value -1001, range for oom score adj is [-1000, 1000]." if !strings.Contains(out, expected) { c.Fatalf("Expected output to contain %q, got %q instead", expected, out) } } func (s *DockerSuite) TestRunVolumesMountedAsShared(c *check.C) { // Volume propagation is linux only. Also it creates directories for // bind mounting, so needs to be same host. testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) // Prepare a source directory to bind mount tmpDir, err := ioutil.TempDir("", "volume-source") if err != nil { c.Fatal(err) } defer os.RemoveAll(tmpDir) if err := os.Mkdir(path.Join(tmpDir, "mnt1"), 0755); err != nil { c.Fatal(err) } // Convert this directory into a shared mount point so that we do // not rely on propagation properties of parent mount. cmd := exec.Command("mount", "--bind", tmpDir, tmpDir) if _, err = runCommand(cmd); err != nil { c.Fatal(err) } cmd = exec.Command("mount", "--make-private", "--make-shared", tmpDir) if _, err = runCommand(cmd); err != nil { c.Fatal(err) } dockerCmd(c, "run", "--privileged", "-v", fmt.Sprintf("%s:/volume-dest:shared", tmpDir), "busybox", "mount", "--bind", "/volume-dest/mnt1", "/volume-dest/mnt1") // Make sure a bind mount under a shared volume propagated to host. if mounted, _ := mount.Mounted(path.Join(tmpDir, "mnt1")); !mounted { c.Fatalf("Bind mount under shared volume did not propagate to host") } mount.Unmount(path.Join(tmpDir, "mnt1")) } func (s *DockerSuite) TestRunVolumesMountedAsSlave(c *check.C) { // Volume propagation is linux only. Also it creates directories for // bind mounting, so needs to be same host. testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) // Prepare a source directory to bind mount tmpDir, err := ioutil.TempDir("", "volume-source") if err != nil { c.Fatal(err) } defer os.RemoveAll(tmpDir) if err := os.Mkdir(path.Join(tmpDir, "mnt1"), 0755); err != nil { c.Fatal(err) } // Prepare a source directory with file in it. We will bind mount this // direcotry and see if file shows up. tmpDir2, err := ioutil.TempDir("", "volume-source2") if err != nil { c.Fatal(err) } defer os.RemoveAll(tmpDir2) if err := ioutil.WriteFile(path.Join(tmpDir2, "slave-testfile"), []byte("Test"), 0644); err != nil { c.Fatal(err) } // Convert this directory into a shared mount point so that we do // not rely on propagation properties of parent mount. cmd := exec.Command("mount", "--bind", tmpDir, tmpDir) if _, err = runCommand(cmd); err != nil { c.Fatal(err) } cmd = exec.Command("mount", "--make-private", "--make-shared", tmpDir) if _, err = runCommand(cmd); err != nil { c.Fatal(err) } dockerCmd(c, "run", "-i", "-d", "--name", "parent", "-v", fmt.Sprintf("%s:/volume-dest:slave", tmpDir), "busybox", "top") // Bind mount tmpDir2/ onto tmpDir/mnt1. If mount propagates inside // container then contents of tmpDir2/slave-testfile should become // visible at "/volume-dest/mnt1/slave-testfile" cmd = exec.Command("mount", "--bind", tmpDir2, path.Join(tmpDir, "mnt1")) if _, err = runCommand(cmd); err != nil { c.Fatal(err) } out, _ := dockerCmd(c, "exec", "parent", "cat", "/volume-dest/mnt1/slave-testfile") mount.Unmount(path.Join(tmpDir, "mnt1")) if out != "Test" { c.Fatalf("Bind mount under slave volume did not propagate to container") } } func (s *DockerSuite) TestRunNamedVolumesMountedAsShared(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) out, exitcode, _ := dockerCmdWithError("run", "-v", "foo:/test:shared", "busybox", "touch", "/test/somefile") if exitcode == 0 { c.Fatalf("expected non-zero exit code; received %d", exitcode) } if expected := "Invalid volume specification"; !strings.Contains(out, expected) { c.Fatalf(`Expected %q in output; got: %s`, expected, out) } } func (s *DockerSuite) TestRunNamedVolumeCopyImageData(c *check.C) { testRequires(c, DaemonIsLinux) testImg := "testvolumecopy" _, err := buildImage(testImg, ` FROM busybox RUN mkdir -p /foo && echo hello > /foo/hello `, true) c.Assert(err, check.IsNil) dockerCmd(c, "run", "-v", "foo:/foo", testImg) out, _ := dockerCmd(c, "run", "-v", "foo:/foo", "busybox", "cat", "/foo/hello") c.Assert(strings.TrimSpace(out), check.Equals, "hello") } func (s *DockerSuite) TestRunNamedVolumeNotRemoved(c *check.C) { prefix := "" if daemonPlatform == "windows" { prefix = "c:" } dockerCmd(c, "volume", "create", "--name", "test") dockerCmd(c, "run", "--rm", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") dockerCmd(c, "volume", "inspect", "test") out, _ := dockerCmd(c, "volume", "ls", "-q") c.Assert(strings.TrimSpace(out), checker.Equals, "test") dockerCmd(c, "run", "--name=test", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") dockerCmd(c, "rm", "-fv", "test") dockerCmd(c, "volume", "inspect", "test") out, _ = dockerCmd(c, "volume", "ls", "-q") c.Assert(strings.TrimSpace(out), checker.Equals, "test") } func (s *DockerSuite) TestRunNamedVolumesFromNotRemoved(c *check.C) { prefix := "" if daemonPlatform == "windows" { prefix = "c:" } dockerCmd(c, "volume", "create", "--name", "test") dockerCmd(c, "run", "--name=parent", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") dockerCmd(c, "run", "--name=child", "--volumes-from=parent", "busybox", "true") // Remove the parent so there are not other references to the volumes dockerCmd(c, "rm", "-f", "parent") // now remove the child and ensure the named volume (and only the named volume) still exists dockerCmd(c, "rm", "-fv", "child") dockerCmd(c, "volume", "inspect", "test") out, _ := dockerCmd(c, "volume", "ls", "-q") c.Assert(strings.TrimSpace(out), checker.Equals, "test") } docker-1.10.3/integration-cli/docker_cli_run_unix_test.go000066400000000000000000000762441267010174400235650ustar00rootroot00000000000000// +build !windows package main import ( "bufio" "fmt" "io/ioutil" "os" "os/exec" "path/filepath" "regexp" "strconv" "strings" "time" "github.com/docker/docker/pkg/homedir" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/sysinfo" "github.com/go-check/check" "github.com/kr/pty" ) // #6509 func (s *DockerSuite) TestRunRedirectStdout(c *check.C) { checkRedirect := func(command string) { _, tty, err := pty.Open() c.Assert(err, checker.IsNil, check.Commentf("Could not open pty")) cmd := exec.Command("sh", "-c", command) cmd.Stdin = tty cmd.Stdout = tty cmd.Stderr = tty c.Assert(cmd.Start(), checker.IsNil) ch := make(chan error) go func() { ch <- cmd.Wait() close(ch) }() select { case <-time.After(10 * time.Second): c.Fatal("command timeout") case err := <-ch: c.Assert(err, checker.IsNil, check.Commentf("wait err")) } } checkRedirect(dockerBinary + " run -i busybox cat /etc/passwd | grep -q root") checkRedirect(dockerBinary + " run busybox cat /etc/passwd | grep -q root") } // Test recursive bind mount works by default func (s *DockerSuite) TestRunWithVolumesIsRecursive(c *check.C) { // /tmp gets permission denied testRequires(c, NotUserNamespace) tmpDir, err := ioutil.TempDir("", "docker_recursive_mount_test") c.Assert(err, checker.IsNil) defer os.RemoveAll(tmpDir) // Create a temporary tmpfs mount. tmpfsDir := filepath.Join(tmpDir, "tmpfs") c.Assert(os.MkdirAll(tmpfsDir, 0777), checker.IsNil, check.Commentf("failed to mkdir at %s", tmpfsDir)) c.Assert(mount.Mount("tmpfs", tmpfsDir, "tmpfs", ""), checker.IsNil, check.Commentf("failed to create a tmpfs mount at %s", tmpfsDir)) f, err := ioutil.TempFile(tmpfsDir, "touch-me") c.Assert(err, checker.IsNil) defer f.Close() runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", fmt.Sprintf("%s:/tmp:ro", tmpDir), "busybox:latest", "ls", "/tmp/tmpfs") out, _, _, err := runCommandWithStdoutStderr(runCmd) c.Assert(err, checker.IsNil) c.Assert(out, checker.Contains, filepath.Base(f.Name()), check.Commentf("Recursive bind mount test failed. Expected file not found")) } func (s *DockerSuite) TestRunDeviceDirectory(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) if _, err := os.Stat("/dev/snd"); err != nil { c.Skip("Host does not have /dev/snd") } out, _ := dockerCmd(c, "run", "--device", "/dev/snd:/dev/snd", "busybox", "sh", "-c", "ls /dev/snd/") c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "timer", check.Commentf("expected output /dev/snd/timer")) out, _ = dockerCmd(c, "run", "--device", "/dev/snd:/dev/othersnd", "busybox", "sh", "-c", "ls /dev/othersnd/") c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "seq", check.Commentf("expected output /dev/othersnd/seq")) } // TestRunDetach checks attaching and detaching with the default escape sequence. func (s *DockerSuite) TestRunAttachDetach(c *check.C) { name := "attach-detach" dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") cmd := exec.Command(dockerBinary, "attach", name) stdout, err := cmd.StdoutPipe() c.Assert(err, checker.IsNil) cpty, tty, err := pty.Open() c.Assert(err, checker.IsNil) defer cpty.Close() cmd.Stdin = tty c.Assert(cmd.Start(), checker.IsNil) c.Assert(waitRun(name), check.IsNil) _, err = cpty.Write([]byte("hello\n")) c.Assert(err, checker.IsNil) out, err := bufio.NewReader(stdout).ReadString('\n') c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Equals, "hello") // escape sequence _, err = cpty.Write([]byte{16}) c.Assert(err, checker.IsNil) time.Sleep(100 * time.Millisecond) _, err = cpty.Write([]byte{17}) c.Assert(err, checker.IsNil) ch := make(chan struct{}) go func() { cmd.Wait() ch <- struct{}{} }() select { case <-ch: case <-time.After(10 * time.Second): c.Fatal("timed out waiting for container to exit") } running, err := inspectField(name, "State.Running") c.Assert(err, checker.IsNil) c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) } // TestRunDetach checks attaching and detaching with the escape sequence specified via flags. func (s *DockerSuite) TestRunAttachDetachFromFlag(c *check.C) { name := "attach-detach" keyCtrlA := []byte{1} keyA := []byte{97} dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") cmd := exec.Command(dockerBinary, "attach", "--detach-keys='ctrl-a,a'", name) stdout, err := cmd.StdoutPipe() if err != nil { c.Fatal(err) } cpty, tty, err := pty.Open() if err != nil { c.Fatal(err) } defer cpty.Close() cmd.Stdin = tty if err := cmd.Start(); err != nil { c.Fatal(err) } c.Assert(waitRun(name), check.IsNil) if _, err := cpty.Write([]byte("hello\n")); err != nil { c.Fatal(err) } out, err := bufio.NewReader(stdout).ReadString('\n') if err != nil { c.Fatal(err) } if strings.TrimSpace(out) != "hello" { c.Fatalf("expected 'hello', got %q", out) } // escape sequence if _, err := cpty.Write(keyCtrlA); err != nil { c.Fatal(err) } time.Sleep(100 * time.Millisecond) if _, err := cpty.Write(keyA); err != nil { c.Fatal(err) } ch := make(chan struct{}) go func() { cmd.Wait() ch <- struct{}{} }() select { case <-ch: case <-time.After(10 * time.Second): c.Fatal("timed out waiting for container to exit") } running, err := inspectField(name, "State.Running") c.Assert(err, checker.IsNil) c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) } // TestRunDetach checks attaching and detaching with the escape sequence specified via config file. func (s *DockerSuite) TestRunAttachDetachFromConfig(c *check.C) { keyCtrlA := []byte{1} keyA := []byte{97} // Setup config homeKey := homedir.Key() homeVal := homedir.Get() tmpDir, err := ioutil.TempDir("", "fake-home") c.Assert(err, checker.IsNil) defer os.RemoveAll(tmpDir) dotDocker := filepath.Join(tmpDir, ".docker") os.Mkdir(dotDocker, 0600) tmpCfg := filepath.Join(dotDocker, "config.json") defer func() { os.Setenv(homeKey, homeVal) }() os.Setenv(homeKey, tmpDir) data := `{ "detachKeys": "ctrl-a,a" }` err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) c.Assert(err, checker.IsNil) // Then do the work name := "attach-detach" dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") cmd := exec.Command(dockerBinary, "attach", name) stdout, err := cmd.StdoutPipe() if err != nil { c.Fatal(err) } cpty, tty, err := pty.Open() if err != nil { c.Fatal(err) } defer cpty.Close() cmd.Stdin = tty if err := cmd.Start(); err != nil { c.Fatal(err) } c.Assert(waitRun(name), check.IsNil) if _, err := cpty.Write([]byte("hello\n")); err != nil { c.Fatal(err) } out, err := bufio.NewReader(stdout).ReadString('\n') if err != nil { c.Fatal(err) } if strings.TrimSpace(out) != "hello" { c.Fatalf("expected 'hello', got %q", out) } // escape sequence if _, err := cpty.Write(keyCtrlA); err != nil { c.Fatal(err) } time.Sleep(100 * time.Millisecond) if _, err := cpty.Write(keyA); err != nil { c.Fatal(err) } ch := make(chan struct{}) go func() { cmd.Wait() ch <- struct{}{} }() select { case <-ch: case <-time.After(10 * time.Second): c.Fatal("timed out waiting for container to exit") } running, err := inspectField(name, "State.Running") c.Assert(err, checker.IsNil) c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) } // TestRunDetach checks attaching and detaching with the detach flags, making sure it overrides config file func (s *DockerSuite) TestRunAttachDetachKeysOverrideConfig(c *check.C) { keyCtrlA := []byte{1} keyA := []byte{97} // Setup config homeKey := homedir.Key() homeVal := homedir.Get() tmpDir, err := ioutil.TempDir("", "fake-home") c.Assert(err, checker.IsNil) defer os.RemoveAll(tmpDir) dotDocker := filepath.Join(tmpDir, ".docker") os.Mkdir(dotDocker, 0600) tmpCfg := filepath.Join(dotDocker, "config.json") defer func() { os.Setenv(homeKey, homeVal) }() os.Setenv(homeKey, tmpDir) data := `{ "detachKeys": "ctrl-e,e" }` err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) c.Assert(err, checker.IsNil) // Then do the work name := "attach-detach" dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") cmd := exec.Command(dockerBinary, "attach", "--detach-keys='ctrl-a,a'", name) stdout, err := cmd.StdoutPipe() if err != nil { c.Fatal(err) } cpty, tty, err := pty.Open() if err != nil { c.Fatal(err) } defer cpty.Close() cmd.Stdin = tty if err := cmd.Start(); err != nil { c.Fatal(err) } c.Assert(waitRun(name), check.IsNil) if _, err := cpty.Write([]byte("hello\n")); err != nil { c.Fatal(err) } out, err := bufio.NewReader(stdout).ReadString('\n') if err != nil { c.Fatal(err) } if strings.TrimSpace(out) != "hello" { c.Fatalf("expected 'hello', got %q", out) } // escape sequence if _, err := cpty.Write(keyCtrlA); err != nil { c.Fatal(err) } time.Sleep(100 * time.Millisecond) if _, err := cpty.Write(keyA); err != nil { c.Fatal(err) } ch := make(chan struct{}) go func() { cmd.Wait() ch <- struct{}{} }() select { case <-ch: case <-time.After(10 * time.Second): c.Fatal("timed out waiting for container to exit") } running, err := inspectField(name, "State.Running") c.Assert(err, checker.IsNil) c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) } // "test" should be printed func (s *DockerSuite) TestRunWithCPUQuota(c *check.C) { testRequires(c, cpuCfsQuota) file := "/sys/fs/cgroup/cpu/cpu.cfs_quota_us" out, _ := dockerCmd(c, "run", "--cpu-quota", "8000", "--name", "test", "busybox", "cat", file) c.Assert(strings.TrimSpace(out), checker.Equals, "8000") out, err := inspectField("test", "HostConfig.CpuQuota") c.Assert(err, check.IsNil) c.Assert(out, checker.Equals, "8000", check.Commentf("setting the CPU CFS quota failed")) } func (s *DockerSuite) TestRunWithCpuPeriod(c *check.C) { testRequires(c, cpuCfsPeriod) file := "/sys/fs/cgroup/cpu/cpu.cfs_period_us" out, _ := dockerCmd(c, "run", "--cpu-period", "50000", "--name", "test", "busybox", "cat", file) c.Assert(strings.TrimSpace(out), checker.Equals, "50000") out, err := inspectField("test", "HostConfig.CpuPeriod") c.Assert(err, check.IsNil) c.Assert(out, checker.Equals, "50000", check.Commentf("setting the CPU CFS period failed")) } func (s *DockerSuite) TestRunWithKernelMemory(c *check.C) { testRequires(c, kernelMemorySupport) file := "/sys/fs/cgroup/memory/memory.kmem.limit_in_bytes" stdout, _, _ := dockerCmdWithStdoutStderr(c, "run", "--kernel-memory", "50M", "--name", "test1", "busybox", "cat", file) c.Assert(strings.TrimSpace(stdout), checker.Equals, "52428800") out, err := inspectField("test1", "HostConfig.KernelMemory") c.Assert(err, check.IsNil) c.Assert(out, check.Equals, "52428800") } func (s *DockerSuite) TestRunWithInvalidKernelMemory(c *check.C) { testRequires(c, kernelMemorySupport) out, _, err := dockerCmdWithError("run", "--kernel-memory", "2M", "busybox", "true") c.Assert(err, check.NotNil) expected := "Minimum kernel memory limit allowed is 4MB" c.Assert(out, checker.Contains, expected) out, _, err = dockerCmdWithError("run", "--kernel-memory", "-16m", "--name", "test2", "busybox", "echo", "test") c.Assert(err, check.NotNil) expected = "invalid size" c.Assert(out, checker.Contains, expected) } func (s *DockerSuite) TestRunWithCPUShares(c *check.C) { testRequires(c, cpuShare) file := "/sys/fs/cgroup/cpu/cpu.shares" out, _ := dockerCmd(c, "run", "--cpu-shares", "1000", "--name", "test", "busybox", "cat", file) c.Assert(strings.TrimSpace(out), checker.Equals, "1000") out, err := inspectField("test", "HostConfig.CPUShares") c.Assert(err, check.IsNil) c.Assert(out, check.Equals, "1000") } // "test" should be printed func (s *DockerSuite) TestRunEchoStdoutWithCPUSharesAndMemoryLimit(c *check.C) { testRequires(c, cpuShare) testRequires(c, memoryLimitSupport) out, _, _ := dockerCmdWithStdoutStderr(c, "run", "--cpu-shares", "1000", "-m", "32m", "busybox", "echo", "test") c.Assert(out, checker.Equals, "test\n", check.Commentf("container should've printed 'test'")) } func (s *DockerSuite) TestRunWithCpusetCpus(c *check.C) { testRequires(c, cgroupCpuset) file := "/sys/fs/cgroup/cpuset/cpuset.cpus" out, _ := dockerCmd(c, "run", "--cpuset-cpus", "0", "--name", "test", "busybox", "cat", file) c.Assert(strings.TrimSpace(out), checker.Equals, "0") out, err := inspectField("test", "HostConfig.CpusetCpus") c.Assert(err, check.IsNil) c.Assert(out, check.Equals, "0") } func (s *DockerSuite) TestRunWithCpusetMems(c *check.C) { testRequires(c, cgroupCpuset) file := "/sys/fs/cgroup/cpuset/cpuset.mems" out, _ := dockerCmd(c, "run", "--cpuset-mems", "0", "--name", "test", "busybox", "cat", file) c.Assert(strings.TrimSpace(out), checker.Equals, "0") out, err := inspectField("test", "HostConfig.CpusetMems") c.Assert(err, check.IsNil) c.Assert(out, check.Equals, "0") } func (s *DockerSuite) TestRunWithBlkioWeight(c *check.C) { testRequires(c, blkioWeight) file := "/sys/fs/cgroup/blkio/blkio.weight" out, _ := dockerCmd(c, "run", "--blkio-weight", "300", "--name", "test", "busybox", "cat", file) c.Assert(strings.TrimSpace(out), checker.Equals, "300") out, err := inspectField("test", "HostConfig.BlkioWeight") c.Assert(err, check.IsNil) c.Assert(out, check.Equals, "300") } func (s *DockerSuite) TestRunWithInvalidBlkioWeight(c *check.C) { testRequires(c, blkioWeight) out, _, err := dockerCmdWithError("run", "--blkio-weight", "5", "busybox", "true") c.Assert(err, check.NotNil, check.Commentf(out)) expected := "Range of blkio weight is from 10 to 1000" c.Assert(out, checker.Contains, expected) } func (s *DockerSuite) TestRunWithInvalidPathforBlkioWeightDevice(c *check.C) { testRequires(c, blkioWeight) out, _, err := dockerCmdWithError("run", "--blkio-weight-device", "/dev/sdX:100", "busybox", "true") c.Assert(err, check.NotNil, check.Commentf(out)) } func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceReadBps(c *check.C) { testRequires(c, blkioWeight) out, _, err := dockerCmdWithError("run", "--device-read-bps", "/dev/sdX:500", "busybox", "true") c.Assert(err, check.NotNil, check.Commentf(out)) } func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceWriteBps(c *check.C) { testRequires(c, blkioWeight) out, _, err := dockerCmdWithError("run", "--device-write-bps", "/dev/sdX:500", "busybox", "true") c.Assert(err, check.NotNil, check.Commentf(out)) } func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceReadIOps(c *check.C) { testRequires(c, blkioWeight) out, _, err := dockerCmdWithError("run", "--device-read-iops", "/dev/sdX:500", "busybox", "true") c.Assert(err, check.NotNil, check.Commentf(out)) } func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceWriteIOps(c *check.C) { testRequires(c, blkioWeight) out, _, err := dockerCmdWithError("run", "--device-write-iops", "/dev/sdX:500", "busybox", "true") c.Assert(err, check.NotNil, check.Commentf(out)) } func (s *DockerSuite) TestRunOOMExitCode(c *check.C) { testRequires(c, oomControl) errChan := make(chan error) go func() { defer close(errChan) //changing memory to 40MB from 4MB due to an issue with GCCGO that test fails to start the container. out, exitCode, _ := dockerCmdWithError("run", "-m", "40MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") if expected := 137; exitCode != expected { errChan <- fmt.Errorf("wrong exit code for OOM container: expected %d, got %d (output: %q)", expected, exitCode, out) } }() select { case err := <-errChan: c.Assert(err, check.IsNil) case <-time.After(600 * time.Second): c.Fatal("Timeout waiting for container to die on OOM") } } func (s *DockerSuite) TestRunWithMemoryLimit(c *check.C) { testRequires(c, memoryLimitSupport) file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" stdout, _, _ := dockerCmdWithStdoutStderr(c, "run", "-m", "32M", "--name", "test", "busybox", "cat", file) c.Assert(strings.TrimSpace(stdout), checker.Equals, "33554432") out, err := inspectField("test", "HostConfig.Memory") c.Assert(err, check.IsNil) c.Assert(out, check.Equals, "33554432") } // TestRunWithoutMemoryswapLimit sets memory limit and disables swap // memory limit, this means the processes in the container can use // 16M memory and as much swap memory as they need (if the host // supports swap memory). func (s *DockerSuite) TestRunWithoutMemoryswapLimit(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, memoryLimitSupport) testRequires(c, swapMemorySupport) dockerCmd(c, "run", "-m", "32m", "--memory-swap", "-1", "busybox", "true") } func (s *DockerSuite) TestRunWithSwappiness(c *check.C) { testRequires(c, memorySwappinessSupport) file := "/sys/fs/cgroup/memory/memory.swappiness" out, _ := dockerCmd(c, "run", "--memory-swappiness", "0", "--name", "test", "busybox", "cat", file) c.Assert(strings.TrimSpace(out), checker.Equals, "0") out, err := inspectField("test", "HostConfig.MemorySwappiness") c.Assert(err, check.IsNil) c.Assert(out, check.Equals, "0") } func (s *DockerSuite) TestRunWithSwappinessInvalid(c *check.C) { testRequires(c, memorySwappinessSupport) out, _, err := dockerCmdWithError("run", "--memory-swappiness", "101", "busybox", "true") c.Assert(err, check.NotNil) expected := "Valid memory swappiness range is 0-100" c.Assert(out, checker.Contains, expected, check.Commentf("Expected output to contain %q, not %q", out, expected)) out, _, err = dockerCmdWithError("run", "--memory-swappiness", "-10", "busybox", "true") c.Assert(err, check.NotNil) c.Assert(out, checker.Contains, expected, check.Commentf("Expected output to contain %q, not %q", out, expected)) } func (s *DockerSuite) TestRunWithMemoryReservation(c *check.C) { testRequires(c, memoryReservationSupport) file := "/sys/fs/cgroup/memory/memory.soft_limit_in_bytes" out, _ := dockerCmd(c, "run", "--memory-reservation", "200M", "--name", "test", "busybox", "cat", file) c.Assert(strings.TrimSpace(out), checker.Equals, "209715200") out, err := inspectField("test", "HostConfig.MemoryReservation") c.Assert(err, check.IsNil) c.Assert(out, check.Equals, "209715200") } func (s *DockerSuite) TestRunWithMemoryReservationInvalid(c *check.C) { testRequires(c, memoryLimitSupport) testRequires(c, memoryReservationSupport) out, _, err := dockerCmdWithError("run", "-m", "500M", "--memory-reservation", "800M", "busybox", "true") c.Assert(err, check.NotNil) expected := "Minimum memory limit should be larger than memory reservation limit" c.Assert(strings.TrimSpace(out), checker.Contains, expected, check.Commentf("run container should fail with invalid memory reservation")) } func (s *DockerSuite) TestStopContainerSignal(c *check.C) { out, _ := dockerCmd(c, "run", "--stop-signal", "SIGUSR1", "-d", "busybox", "/bin/sh", "-c", `trap 'echo "exit trapped"; exit 0' USR1; while true; do sleep 1; done`) containerID := strings.TrimSpace(out) c.Assert(waitRun(containerID), checker.IsNil) dockerCmd(c, "stop", containerID) out, _ = dockerCmd(c, "logs", containerID) c.Assert(out, checker.Contains, "exit trapped", check.Commentf("Expected `exit trapped` in the log")) } func (s *DockerSuite) TestRunSwapLessThanMemoryLimit(c *check.C) { testRequires(c, memoryLimitSupport) testRequires(c, swapMemorySupport) out, _, err := dockerCmdWithError("run", "-m", "16m", "--memory-swap", "15m", "busybox", "echo", "test") expected := "Minimum memoryswap limit should be larger than memory limit" c.Assert(err, check.NotNil) c.Assert(out, checker.Contains, expected) } func (s *DockerSuite) TestRunInvalidCpusetCpusFlagValue(c *check.C) { testRequires(c, cgroupCpuset) sysInfo := sysinfo.New(true) cpus, err := parsers.ParseUintList(sysInfo.Cpus) c.Assert(err, check.IsNil) var invalid int for i := 0; i <= len(cpus)+1; i++ { if !cpus[i] { invalid = i break } } out, _, err := dockerCmdWithError("run", "--cpuset-cpus", strconv.Itoa(invalid), "busybox", "true") c.Assert(err, check.NotNil) expected := fmt.Sprintf("Error response from daemon: Requested CPUs are not available - requested %s, available: %s", strconv.Itoa(invalid), sysInfo.Cpus) c.Assert(out, checker.Contains, expected) } func (s *DockerSuite) TestRunInvalidCpusetMemsFlagValue(c *check.C) { testRequires(c, cgroupCpuset) sysInfo := sysinfo.New(true) mems, err := parsers.ParseUintList(sysInfo.Mems) c.Assert(err, check.IsNil) var invalid int for i := 0; i <= len(mems)+1; i++ { if !mems[i] { invalid = i break } } out, _, err := dockerCmdWithError("run", "--cpuset-mems", strconv.Itoa(invalid), "busybox", "true") c.Assert(err, check.NotNil) expected := fmt.Sprintf("Error response from daemon: Requested memory nodes are not available - requested %s, available: %s", strconv.Itoa(invalid), sysInfo.Mems) c.Assert(out, checker.Contains, expected) } func (s *DockerSuite) TestRunInvalidCPUShares(c *check.C) { testRequires(c, cpuShare, DaemonIsLinux) out, _, err := dockerCmdWithError("run", "--cpu-shares", "1", "busybox", "echo", "test") c.Assert(err, check.NotNil, check.Commentf(out)) expected := "The minimum allowed cpu-shares is 2" c.Assert(out, checker.Contains, expected) out, _, err = dockerCmdWithError("run", "--cpu-shares", "-1", "busybox", "echo", "test") c.Assert(err, check.NotNil, check.Commentf(out)) expected = "shares: invalid argument" c.Assert(out, checker.Contains, expected) out, _, err = dockerCmdWithError("run", "--cpu-shares", "99999999", "busybox", "echo", "test") c.Assert(err, check.NotNil, check.Commentf(out)) expected = "The maximum allowed cpu-shares is" c.Assert(out, checker.Contains, expected) } func (s *DockerSuite) TestRunWithDefaultShmSize(c *check.C) { testRequires(c, DaemonIsLinux) name := "shm-default" out, _ := dockerCmd(c, "run", "--name", name, "busybox", "mount") shmRegex := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=65536k`) if !shmRegex.MatchString(out) { c.Fatalf("Expected shm of 64MB in mount command, got %v", out) } shmSize, err := inspectField(name, "HostConfig.ShmSize") c.Assert(err, check.IsNil) c.Assert(shmSize, check.Equals, "67108864") } func (s *DockerSuite) TestRunWithShmSize(c *check.C) { testRequires(c, DaemonIsLinux) name := "shm" out, _ := dockerCmd(c, "run", "--name", name, "--shm-size=1G", "busybox", "mount") shmRegex := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=1048576k`) if !shmRegex.MatchString(out) { c.Fatalf("Expected shm of 1GB in mount command, got %v", out) } shmSize, err := inspectField(name, "HostConfig.ShmSize") c.Assert(err, check.IsNil) c.Assert(shmSize, check.Equals, "1073741824") } func (s *DockerSuite) TestRunTmpfsMounts(c *check.C) { // TODO Windows (Post TP4): This test cannot run on a Windows daemon as // Windows does not support tmpfs mounts. testRequires(c, DaemonIsLinux) if out, _, err := dockerCmdWithError("run", "--tmpfs", "/run", "busybox", "touch", "/run/somefile"); err != nil { c.Fatalf("/run directory not mounted on tmpfs %q %s", err, out) } if out, _, err := dockerCmdWithError("run", "--tmpfs", "/run:noexec", "busybox", "touch", "/run/somefile"); err != nil { c.Fatalf("/run directory not mounted on tmpfs %q %s", err, out) } if out, _, err := dockerCmdWithError("run", "--tmpfs", "/run:noexec,nosuid,rw,size=5k,mode=700", "busybox", "touch", "/run/somefile"); err != nil { c.Fatalf("/run failed to mount on tmpfs with valid options %q %s", err, out) } if _, _, err := dockerCmdWithError("run", "--tmpfs", "/run:foobar", "busybox", "touch", "/run/somefile"); err == nil { c.Fatalf("/run mounted on tmpfs when it should have vailed within invalid mount option") } if _, _, err := dockerCmdWithError("run", "--tmpfs", "/run", "-v", "/run:/run", "busybox", "touch", "/run/somefile"); err == nil { c.Fatalf("Should have generated an error saying Duplicate mount points") } } // TestRunSeccompProfileDenyUnshare checks that 'docker run --security-opt seccomp:/tmp/profile.json debian:jessie unshare' exits with operation not permitted. func (s *DockerSuite) TestRunSeccompProfileDenyUnshare(c *check.C) { testRequires(c, SameHostDaemon, seccompEnabled) jsonData := `{ "defaultAction": "SCMP_ACT_ALLOW", "syscalls": [ { "name": "unshare", "action": "SCMP_ACT_ERRNO" } ] }` tmpFile, err := ioutil.TempFile("", "profile.json") defer tmpFile.Close() if err != nil { c.Fatal(err) } if _, err := tmpFile.Write([]byte(jsonData)); err != nil { c.Fatal(err) } runCmd := exec.Command(dockerBinary, "run", "--security-opt", "apparmor:unconfined", "--security-opt", "seccomp:"+tmpFile.Name(), "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc") out, _, _ := runCommandWithOutput(runCmd) if !strings.Contains(out, "Operation not permitted") { c.Fatalf("expected unshare with seccomp profile denied to fail, got %s", out) } } // TestRunSeccompProfileDenyChmod checks that 'docker run --security-opt seccomp:/tmp/profile.json busybox chmod 400 /etc/hostname' exits with operation not permitted. func (s *DockerSuite) TestRunSeccompProfileDenyChmod(c *check.C) { testRequires(c, SameHostDaemon, seccompEnabled) jsonData := `{ "defaultAction": "SCMP_ACT_ALLOW", "syscalls": [ { "name": "chmod", "action": "SCMP_ACT_ERRNO" } ] }` tmpFile, err := ioutil.TempFile("", "profile.json") defer tmpFile.Close() if err != nil { c.Fatal(err) } if _, err := tmpFile.Write([]byte(jsonData)); err != nil { c.Fatal(err) } runCmd := exec.Command(dockerBinary, "run", "--security-opt", "seccomp:"+tmpFile.Name(), "busybox", "chmod", "400", "/etc/hostname") out, _, _ := runCommandWithOutput(runCmd) if !strings.Contains(out, "Operation not permitted") { c.Fatalf("expected chmod with seccomp profile denied to fail, got %s", out) } } // TestRunSeccompProfileDenyUnshareUserns checks that 'docker run debian:jessie unshare --map-root-user --user sh -c whoami' with a specific profile to // deny unhare of a userns exits with operation not permitted. func (s *DockerSuite) TestRunSeccompProfileDenyUnshareUserns(c *check.C) { testRequires(c, SameHostDaemon, seccompEnabled) // from sched.h jsonData := fmt.Sprintf(`{ "defaultAction": "SCMP_ACT_ALLOW", "syscalls": [ { "name": "unshare", "action": "SCMP_ACT_ERRNO", "args": [ { "index": 0, "value": %d, "op": "SCMP_CMP_EQ" } ] } ] }`, uint64(0x10000000)) tmpFile, err := ioutil.TempFile("", "profile.json") defer tmpFile.Close() if err != nil { c.Fatal(err) } if _, err := tmpFile.Write([]byte(jsonData)); err != nil { c.Fatal(err) } runCmd := exec.Command(dockerBinary, "run", "--security-opt", "apparmor:unconfined", "--security-opt", "seccomp:"+tmpFile.Name(), "debian:jessie", "unshare", "--map-root-user", "--user", "sh", "-c", "whoami") out, _, _ := runCommandWithOutput(runCmd) if !strings.Contains(out, "Operation not permitted") { c.Fatalf("expected unshare userns with seccomp profile denied to fail, got %s", out) } } // TestRunSeccompProfileDenyCloneUserns checks that 'docker run syscall-test' // with a the default seccomp profile exits with operation not permitted. func (s *DockerSuite) TestRunSeccompProfileDenyCloneUserns(c *check.C) { testRequires(c, SameHostDaemon, seccompEnabled) runCmd := exec.Command(dockerBinary, "run", "syscall-test", "userns-test", "id") out, _, err := runCommandWithOutput(runCmd) if err == nil || !strings.Contains(out, "clone failed: Operation not permitted") { c.Fatalf("expected clone userns with default seccomp profile denied to fail, got %s: %v", out, err) } } // TestRunSeccompUnconfinedCloneUserns checks that // 'docker run --security-opt seccomp:unconfined syscall-test' allows creating a userns. func (s *DockerSuite) TestRunSeccompUnconfinedCloneUserns(c *check.C) { testRequires(c, SameHostDaemon, seccompEnabled, NotUserNamespace) // make sure running w privileged is ok runCmd := exec.Command(dockerBinary, "run", "--security-opt", "seccomp:unconfined", "syscall-test", "userns-test", "id") if out, _, err := runCommandWithOutput(runCmd); err != nil || !strings.Contains(out, "nobody") { c.Fatalf("expected clone userns with --security-opt seccomp:unconfined to succeed, got %s: %v", out, err) } } // TestRunSeccompAllowPrivCloneUserns checks that 'docker run --privileged syscall-test' // allows creating a userns. func (s *DockerSuite) TestRunSeccompAllowPrivCloneUserns(c *check.C) { testRequires(c, SameHostDaemon, seccompEnabled, NotUserNamespace) // make sure running w privileged is ok runCmd := exec.Command(dockerBinary, "run", "--privileged", "syscall-test", "userns-test", "id") if out, _, err := runCommandWithOutput(runCmd); err != nil || !strings.Contains(out, "nobody") { c.Fatalf("expected clone userns with --privileged to succeed, got %s: %v", out, err) } } // TestRunSeccompAllowAptKey checks that 'docker run debian:jessie apt-key' succeeds. func (s *DockerSuite) TestRunSeccompAllowAptKey(c *check.C) { testRequires(c, SameHostDaemon, seccompEnabled, Network) // apt-key uses setrlimit & getrlimit, so we want to make sure we don't break it runCmd := exec.Command(dockerBinary, "run", "debian:jessie", "apt-key", "adv", "--keyserver", "hkp://p80.pool.sks-keyservers.net:80", "--recv-keys", "E871F18B51E0147C77796AC81196BA81F6B0FC61") if out, _, err := runCommandWithOutput(runCmd); err != nil { c.Fatalf("expected apt-key with seccomp to succeed, got %s: %v", out, err) } } func (s *DockerSuite) TestRunSeccompDefaultProfile(c *check.C) { testRequires(c, SameHostDaemon, seccompEnabled, NotUserNamespace) out, _, err := dockerCmdWithError("run", "--cap-add", "ALL", "syscall-test", "acct-test") if err == nil || !strings.Contains(out, "Operation not permitted") { c.Fatalf("expected Operation not permitted, got: %s", out) } out, _, err = dockerCmdWithError("run", "--cap-add", "ALL", "syscall-test", "ns-test", "echo", "hello") if err == nil || !strings.Contains(out, "Operation not permitted") { c.Fatalf("expected Operation not permitted, got: %s", out) } out, _, err = dockerCmdWithError("run", "--cap-add", "ALL", "--security-opt", "seccomp:unconfined", "syscall-test", "acct-test") if err == nil || !strings.Contains(out, "No such file or directory") { c.Fatalf("expected No such file or directory, got: %s", out) } out, _, err = dockerCmdWithError("run", "--cap-add", "ALL", "--security-opt", "seccomp:unconfined", "syscall-test", "ns-test", "echo", "hello") if err != nil || !strings.Contains(out, "hello") { c.Fatalf("expected hello, got: %s, %v", out, err) } } func (s *DockerSuite) TestRunApparmorProcDirectory(c *check.C) { testRequires(c, SameHostDaemon, Apparmor) // running w seccomp unconfined tests the apparmor profile runCmd := exec.Command(dockerBinary, "run", "--security-opt", "seccomp:unconfined", "debian:jessie", "chmod", "777", "/proc/1/cgroup") if out, _, err := runCommandWithOutput(runCmd); err == nil || !(strings.Contains(out, "Permission denied") || strings.Contains(out, "Operation not permitted")) { c.Fatalf("expected chmod 777 /proc/1/cgroup to fail, got %s: %v", out, err) } runCmd = exec.Command(dockerBinary, "run", "--security-opt", "seccomp:unconfined", "debian:jessie", "chmod", "777", "/proc/1/attr/current") if out, _, err := runCommandWithOutput(runCmd); err == nil || !(strings.Contains(out, "Permission denied") || strings.Contains(out, "Operation not permitted")) { c.Fatalf("expected chmod 777 /proc/1/attr/current to fail, got %s: %v", out, err) } } docker-1.10.3/integration-cli/docker_cli_save_load_test.go000066400000000000000000000254561267010174400236520ustar00rootroot00000000000000package main import ( "encoding/json" "fmt" "io/ioutil" "os" "os/exec" "path/filepath" "reflect" "regexp" "sort" "strings" "time" "github.com/docker/distribution/digest" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) // save a repo using gz compression and try to load it using stdout func (s *DockerSuite) TestSaveXzAndLoadRepoStdout(c *check.C) { testRequires(c, DaemonIsLinux) name := "test-save-xz-and-load-repo-stdout" dockerCmd(c, "run", "--name", name, "busybox", "true") repoName := "foobar-save-load-test-xz-gz" out, _ := dockerCmd(c, "commit", name, repoName) dockerCmd(c, "inspect", repoName) repoTarball, _, err := runCommandPipelineWithOutput( exec.Command(dockerBinary, "save", repoName), exec.Command("xz", "-c"), exec.Command("gzip", "-c")) c.Assert(err, checker.IsNil, check.Commentf("failed to save repo: %v %v", out, err)) deleteImages(repoName) loadCmd := exec.Command(dockerBinary, "load") loadCmd.Stdin = strings.NewReader(repoTarball) out, _, err = runCommandWithOutput(loadCmd) c.Assert(err, checker.NotNil, check.Commentf("expected error, but succeeded with no error and output: %v", out)) after, _, err := dockerCmdWithError("inspect", repoName) c.Assert(err, checker.NotNil, check.Commentf("the repo should not exist: %v", after)) } // save a repo using xz+gz compression and try to load it using stdout func (s *DockerSuite) TestSaveXzGzAndLoadRepoStdout(c *check.C) { testRequires(c, DaemonIsLinux) name := "test-save-xz-gz-and-load-repo-stdout" dockerCmd(c, "run", "--name", name, "busybox", "true") repoName := "foobar-save-load-test-xz-gz" dockerCmd(c, "commit", name, repoName) dockerCmd(c, "inspect", repoName) out, _, err := runCommandPipelineWithOutput( exec.Command(dockerBinary, "save", repoName), exec.Command("xz", "-c"), exec.Command("gzip", "-c")) c.Assert(err, checker.IsNil, check.Commentf("failed to save repo: %v %v", out, err)) deleteImages(repoName) loadCmd := exec.Command(dockerBinary, "load") loadCmd.Stdin = strings.NewReader(out) out, _, err = runCommandWithOutput(loadCmd) c.Assert(err, checker.NotNil, check.Commentf("expected error, but succeeded with no error and output: %v", out)) after, _, err := dockerCmdWithError("inspect", repoName) c.Assert(err, checker.NotNil, check.Commentf("the repo should not exist: %v", after)) } func (s *DockerSuite) TestSaveSingleTag(c *check.C) { testRequires(c, DaemonIsLinux) repoName := "foobar-save-single-tag-test" dockerCmd(c, "tag", "busybox:latest", fmt.Sprintf("%v:latest", repoName)) out, _ := dockerCmd(c, "images", "-q", "--no-trunc", repoName) cleanedImageID := strings.TrimSpace(out) out, _, err := runCommandPipelineWithOutput( exec.Command(dockerBinary, "save", fmt.Sprintf("%v:latest", repoName)), exec.Command("tar", "t"), exec.Command("grep", "-E", fmt.Sprintf("(^repositories$|%v)", cleanedImageID))) c.Assert(err, checker.IsNil, check.Commentf("failed to save repo with image ID and 'repositories' file: %s, %v", out, err)) } func (s *DockerSuite) TestSaveCheckTimes(c *check.C) { testRequires(c, DaemonIsLinux) repoName := "busybox:latest" out, _ := dockerCmd(c, "inspect", repoName) data := []struct { ID string Created time.Time }{} err := json.Unmarshal([]byte(out), &data) c.Assert(err, checker.IsNil, check.Commentf("failed to marshal from %q: err %v", repoName, err)) c.Assert(len(data), checker.Not(checker.Equals), 0, check.Commentf("failed to marshal the data from %q", repoName)) tarTvTimeFormat := "2006-01-02 15:04" out, _, err = runCommandPipelineWithOutput( exec.Command(dockerBinary, "save", repoName), exec.Command("tar", "tv"), exec.Command("grep", "-E", fmt.Sprintf("%s %s", data[0].Created.Format(tarTvTimeFormat), digest.Digest(data[0].ID).Hex()))) c.Assert(err, checker.IsNil, check.Commentf("failed to save repo with image ID and 'repositories' file: %s, %v", out, err)) } func (s *DockerSuite) TestSaveImageId(c *check.C) { testRequires(c, DaemonIsLinux) repoName := "foobar-save-image-id-test" dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v:latest", repoName)) out, _ := dockerCmd(c, "images", "-q", "--no-trunc", repoName) cleanedLongImageID := strings.TrimPrefix(strings.TrimSpace(out), "sha256:") out, _ = dockerCmd(c, "images", "-q", repoName) cleanedShortImageID := strings.TrimSpace(out) // Make sure IDs are not empty c.Assert(cleanedLongImageID, checker.Not(check.Equals), "", check.Commentf("Id should not be empty.")) c.Assert(cleanedShortImageID, checker.Not(check.Equals), "", check.Commentf("Id should not be empty.")) saveCmd := exec.Command(dockerBinary, "save", cleanedShortImageID) tarCmd := exec.Command("tar", "t") var err error tarCmd.Stdin, err = saveCmd.StdoutPipe() c.Assert(err, checker.IsNil, check.Commentf("cannot set stdout pipe for tar: %v", err)) grepCmd := exec.Command("grep", cleanedLongImageID) grepCmd.Stdin, err = tarCmd.StdoutPipe() c.Assert(err, checker.IsNil, check.Commentf("cannot set stdout pipe for grep: %v", err)) c.Assert(tarCmd.Start(), checker.IsNil, check.Commentf("tar failed with error: %v", err)) c.Assert(saveCmd.Start(), checker.IsNil, check.Commentf("docker save failed with error: %v", err)) defer func() { saveCmd.Wait() tarCmd.Wait() dockerCmd(c, "rmi", repoName) }() out, _, err = runCommandWithOutput(grepCmd) c.Assert(err, checker.IsNil, check.Commentf("failed to save repo with image ID: %s, %v", out, err)) } // save a repo and try to load it using flags func (s *DockerSuite) TestSaveAndLoadRepoFlags(c *check.C) { testRequires(c, DaemonIsLinux) name := "test-save-and-load-repo-flags" dockerCmd(c, "run", "--name", name, "busybox", "true") repoName := "foobar-save-load-test" deleteImages(repoName) dockerCmd(c, "commit", name, repoName) before, _ := dockerCmd(c, "inspect", repoName) out, _, err := runCommandPipelineWithOutput( exec.Command(dockerBinary, "save", repoName), exec.Command(dockerBinary, "load")) c.Assert(err, checker.IsNil, check.Commentf("failed to save and load repo: %s, %v", out, err)) after, _ := dockerCmd(c, "inspect", repoName) c.Assert(before, checker.Equals, after, check.Commentf("inspect is not the same after a save / load")) } func (s *DockerSuite) TestSaveMultipleNames(c *check.C) { testRequires(c, DaemonIsLinux) repoName := "foobar-save-multi-name-test" // Make one image dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v-one:latest", repoName)) // Make two images dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v-two:latest", repoName)) out, _, err := runCommandPipelineWithOutput( exec.Command(dockerBinary, "save", fmt.Sprintf("%v-one", repoName), fmt.Sprintf("%v-two:latest", repoName)), exec.Command("tar", "xO", "repositories"), exec.Command("grep", "-q", "-E", "(-one|-two)"), ) c.Assert(err, checker.IsNil, check.Commentf("failed to save multiple repos: %s, %v", out, err)) } func (s *DockerSuite) TestSaveRepoWithMultipleImages(c *check.C) { testRequires(c, DaemonIsLinux) makeImage := func(from string, tag string) string { var ( out string ) out, _ = dockerCmd(c, "run", "-d", from, "true") cleanedContainerID := strings.TrimSpace(out) out, _ = dockerCmd(c, "commit", cleanedContainerID, tag) imageID := strings.TrimSpace(out) return imageID } repoName := "foobar-save-multi-images-test" tagFoo := repoName + ":foo" tagBar := repoName + ":bar" idFoo := makeImage("busybox:latest", tagFoo) idBar := makeImage("busybox:latest", tagBar) deleteImages(repoName) // create the archive out, _, err := runCommandPipelineWithOutput( exec.Command(dockerBinary, "save", repoName, "busybox:latest"), exec.Command("tar", "t")) c.Assert(err, checker.IsNil, check.Commentf("failed to save multiple images: %s, %v", out, err)) lines := strings.Split(strings.TrimSpace(out), "\n") var actual []string for _, l := range lines { if regexp.MustCompile("^[a-f0-9]{64}\\.json$").Match([]byte(l)) { actual = append(actual, strings.TrimSuffix(l, ".json")) } } // make the list of expected layers out, _ = dockerCmd(c, "inspect", "-f", "{{.Id}}", "busybox:latest") expected := []string{strings.TrimSpace(out), idFoo, idBar} // prefixes are not in tar for i := range expected { expected[i] = digest.Digest(expected[i]).Hex() } sort.Strings(actual) sort.Strings(expected) c.Assert(actual, checker.DeepEquals, expected, check.Commentf("archive does not contains the right layers: got %v, expected %v, output: %q", actual, expected, out)) } // Issue #6722 #5892 ensure directories are included in changes func (s *DockerSuite) TestSaveDirectoryPermissions(c *check.C) { testRequires(c, DaemonIsLinux) layerEntries := []string{"opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"} layerEntriesAUFS := []string{"./", ".wh..wh.aufs", ".wh..wh.orph/", ".wh..wh.plnk/", "opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"} name := "save-directory-permissions" tmpDir, err := ioutil.TempDir("", "save-layers-with-directories") c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary directory: %s", err)) extractionDirectory := filepath.Join(tmpDir, "image-extraction-dir") os.Mkdir(extractionDirectory, 0777) defer os.RemoveAll(tmpDir) _, err = buildImage(name, `FROM busybox RUN adduser -D user && mkdir -p /opt/a/b && chown -R user:user /opt/a RUN touch /opt/a/b/c && chown user:user /opt/a/b/c`, true) c.Assert(err, checker.IsNil, check.Commentf("%v", err)) out, _, err := runCommandPipelineWithOutput( exec.Command(dockerBinary, "save", name), exec.Command("tar", "-xf", "-", "-C", extractionDirectory), ) c.Assert(err, checker.IsNil, check.Commentf("failed to save and extract image: %s", out)) dirs, err := ioutil.ReadDir(extractionDirectory) c.Assert(err, checker.IsNil, check.Commentf("failed to get a listing of the layer directories: %s", err)) found := false for _, entry := range dirs { var entriesSansDev []string if entry.IsDir() { layerPath := filepath.Join(extractionDirectory, entry.Name(), "layer.tar") f, err := os.Open(layerPath) c.Assert(err, checker.IsNil, check.Commentf("failed to open %s: %s", layerPath, err)) entries, err := listTar(f) for _, e := range entries { if !strings.Contains(e, "dev/") { entriesSansDev = append(entriesSansDev, e) } } c.Assert(err, checker.IsNil, check.Commentf("encountered error while listing tar entries: %s", err)) if reflect.DeepEqual(entriesSansDev, layerEntries) || reflect.DeepEqual(entriesSansDev, layerEntriesAUFS) { found = true break } } } c.Assert(found, checker.Equals, true, check.Commentf("failed to find the layer with the right content listing")) } // Test loading a weird image where one of the layers is of zero size. // The layer.tar file is actually zero bytes, no padding or anything else. // See issue: 18170 func (s *DockerSuite) TestLoadZeroSizeLayer(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "load", "-i", "fixtures/load/emptyLayer.tar") } docker-1.10.3/integration-cli/docker_cli_save_load_unix_test.go000066400000000000000000000032331267010174400247020ustar00rootroot00000000000000// +build !windows package main import ( "io/ioutil" "os" "os/exec" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" "github.com/kr/pty" ) // save a repo and try to load it using stdout func (s *DockerSuite) TestSaveAndLoadRepoStdout(c *check.C) { name := "test-save-and-load-repo-stdout" dockerCmd(c, "run", "--name", name, "busybox", "true") repoName := "foobar-save-load-test" before, _ := dockerCmd(c, "commit", name, repoName) tmpFile, err := ioutil.TempFile("", "foobar-save-load-test.tar") c.Assert(err, check.IsNil) defer os.Remove(tmpFile.Name()) saveCmd := exec.Command(dockerBinary, "save", repoName) saveCmd.Stdout = tmpFile _, err = runCommand(saveCmd) c.Assert(err, check.IsNil) tmpFile, err = os.Open(tmpFile.Name()) c.Assert(err, check.IsNil) deleteImages(repoName) loadCmd := exec.Command(dockerBinary, "load") loadCmd.Stdin = tmpFile out, _, err := runCommandWithOutput(loadCmd) c.Assert(err, check.IsNil, check.Commentf(out)) after, _ := dockerCmd(c, "inspect", "-f", "{{.Id}}", repoName) c.Assert(before, check.Equals, after) //inspect is not the same after a save / load deleteImages(repoName) pty, tty, err := pty.Open() c.Assert(err, check.IsNil) cmd := exec.Command(dockerBinary, "save", repoName) cmd.Stdin = tty cmd.Stdout = tty cmd.Stderr = tty c.Assert(cmd.Start(), check.IsNil) c.Assert(cmd.Wait(), check.NotNil) //did not break writing to a TTY buf := make([]byte, 1024) n, err := pty.Read(buf) c.Assert(err, check.IsNil) //could not read tty output c.Assert(string(buf[:n]), checker.Contains, "Cowardly refusing", check.Commentf("help output is not being yielded", out)) } docker-1.10.3/integration-cli/docker_cli_search_test.go000066400000000000000000000047311267010174400231530ustar00rootroot00000000000000package main import ( "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) // search for repos named "registry" on the central registry func (s *DockerSuite) TestSearchOnCentralRegistry(c *check.C) { testRequires(c, Network, DaemonIsLinux) out, _ := dockerCmd(c, "search", "busybox") c.Assert(out, checker.Contains, "Busybox base image.", check.Commentf("couldn't find any repository named (or containing) 'Busybox base image.'")) } func (s *DockerSuite) TestSearchStarsOptionWithWrongParameter(c *check.C) { out, _, err := dockerCmdWithError("search", "--stars=a", "busybox") c.Assert(err, check.NotNil, check.Commentf(out)) c.Assert(out, checker.Contains, "invalid value", check.Commentf("couldn't find the invalid value warning")) out, _, err = dockerCmdWithError("search", "-s=-1", "busybox") c.Assert(err, check.NotNil, check.Commentf(out)) c.Assert(out, checker.Contains, "invalid value", check.Commentf("couldn't find the invalid value warning")) } func (s *DockerSuite) TestSearchCmdOptions(c *check.C) { testRequires(c, Network) out, _ := dockerCmd(c, "search", "--help") c.Assert(out, checker.Contains, "Usage:\tdocker search [OPTIONS] TERM") outSearchCmd, _ := dockerCmd(c, "search", "busybox") outSearchCmdNotrunc, _ := dockerCmd(c, "search", "--no-trunc=true", "busybox") c.Assert(len(outSearchCmd) > len(outSearchCmdNotrunc), check.Equals, false, check.Commentf("The no-trunc option can't take effect.")) outSearchCmdautomated, _ := dockerCmd(c, "search", "--automated=true", "busybox") //The busybox is a busybox base image, not an AUTOMATED image. outSearchCmdautomatedSlice := strings.Split(outSearchCmdautomated, "\n") for i := range outSearchCmdautomatedSlice { c.Assert(strings.HasPrefix(outSearchCmdautomatedSlice[i], "busybox "), check.Equals, false, check.Commentf("The busybox is not an AUTOMATED image: %s", out)) } outSearchCmdStars, _ := dockerCmd(c, "search", "-s=2", "busybox") c.Assert(strings.Count(outSearchCmdStars, "[OK]") > strings.Count(outSearchCmd, "[OK]"), check.Equals, false, check.Commentf("The quantity of images with stars should be less than that of all images: %s", outSearchCmdStars)) dockerCmd(c, "search", "--stars=2", "--automated=true", "--no-trunc=true", "busybox") } // search for repos which start with "ubuntu-" on the central registry func (s *DockerSuite) TestSearchOnCentralRegistryWithDash(c *check.C) { testRequires(c, Network, DaemonIsLinux) dockerCmd(c, "search", "ubuntu-") } docker-1.10.3/integration-cli/docker_cli_start_test.go000066400000000000000000000160071267010174400230420ustar00rootroot00000000000000package main import ( "fmt" "strings" "time" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) // Regression test for https://github.com/docker/docker/issues/7843 func (s *DockerSuite) TestStartAttachReturnsOnError(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-d", "--name", "test", "busybox") dockerCmd(c, "wait", "test") // Expect this to fail because the above container is stopped, this is what we want out, _, err := dockerCmdWithError("run", "-d", "--name", "test2", "--link", "test:test", "busybox") // err shouldn't be nil because container test2 try to link to stopped container c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) ch := make(chan error) go func() { // Attempt to start attached to the container that won't start // This should return an error immediately since the container can't be started if _, _, err := dockerCmdWithError("start", "-a", "test2"); err == nil { ch <- fmt.Errorf("Expected error but got none") } close(ch) }() select { case err := <-ch: c.Assert(err, check.IsNil) case <-time.After(5 * time.Second): c.Fatalf("Attach did not exit properly") } } // gh#8555: Exit code should be passed through when using start -a func (s *DockerSuite) TestStartAttachCorrectExitCode(c *check.C) { testRequires(c, DaemonIsLinux) out, _, _ := dockerCmdWithStdoutStderr(c, "run", "-d", "busybox", "sh", "-c", "sleep 2; exit 1") out = strings.TrimSpace(out) // make sure the container has exited before trying the "start -a" dockerCmd(c, "wait", out) startOut, exitCode, err := dockerCmdWithError("start", "-a", out) // start command should fail c.Assert(err, checker.NotNil, check.Commentf("startOut: %s", startOut)) // start -a did not respond with proper exit code c.Assert(exitCode, checker.Equals, 1, check.Commentf("startOut: %s", startOut)) } func (s *DockerSuite) TestStartAttachSilent(c *check.C) { testRequires(c, DaemonIsLinux) name := "teststartattachcorrectexitcode" dockerCmd(c, "run", "--name", name, "busybox", "echo", "test") // make sure the container has exited before trying the "start -a" dockerCmd(c, "wait", name) startOut, _ := dockerCmd(c, "start", "-a", name) // start -a produced unexpected output c.Assert(startOut, checker.Equals, "test\n") } func (s *DockerSuite) TestStartRecordError(c *check.C) { testRequires(c, DaemonIsLinux) // when container runs successfully, we should not have state.Error dockerCmd(c, "run", "-d", "-p", "9999:9999", "--name", "test", "busybox", "top") stateErr, err := inspectField("test", "State.Error") c.Assert(err, checker.IsNil, check.Commentf("stateErr: %s", stateErr)) // Expected to not have state error c.Assert(stateErr, checker.Equals, "") // Expect this to fail and records error because of ports conflict out, _, err := dockerCmdWithError("run", "-d", "--name", "test2", "-p", "9999:9999", "busybox", "top") // err shouldn't be nil because docker run will fail c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) stateErr, err = inspectField("test2", "State.Error") c.Assert(err, check.IsNil, check.Commentf("stateErr: %s", stateErr)) c.Assert(stateErr, checker.Contains, "port is already allocated") // Expect the conflict to be resolved when we stop the initial container dockerCmd(c, "stop", "test") dockerCmd(c, "start", "test2") stateErr, err = inspectField("test2", "State.Error") c.Assert(err, check.IsNil, check.Commentf("stateErr: %s", stateErr)) // Expected to not have state error but got one c.Assert(stateErr, checker.Equals, "") } func (s *DockerSuite) TestStartPausedContainer(c *check.C) { testRequires(c, DaemonIsLinux) defer unpauseAllContainers() dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top") dockerCmd(c, "pause", "testing") out, _, err := dockerCmdWithError("start", "testing") // an error should have been shown that you cannot start paused container c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) // an error should have been shown that you cannot start paused container c.Assert(out, checker.Contains, "Cannot start a paused container, try unpause instead.") } func (s *DockerSuite) TestStartMultipleContainers(c *check.C) { testRequires(c, DaemonIsLinux) // run a container named 'parent' and create two container link to `parent` dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top") for _, container := range []string{"child_first", "child_second"} { dockerCmd(c, "create", "--name", container, "--link", "parent:parent", "busybox", "top") } // stop 'parent' container dockerCmd(c, "stop", "parent") out, err := inspectField("parent", "State.Running") c.Assert(err, check.IsNil, check.Commentf("out: %s", out)) // Container should be stopped c.Assert(out, checker.Equals, "false") // start all the three containers, container `child_first` start first which should be failed // container 'parent' start second and then start container 'child_second' expOut := "Cannot link to a non running container" expErr := "failed to start containers: [child_first]" out, _, err = dockerCmdWithError("start", "child_first", "parent", "child_second") // err shouldn't be nil because start will fail c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) // output does not correspond to what was expected if !(strings.Contains(out, expOut) || strings.Contains(err.Error(), expErr)) { c.Fatalf("Expected out: %v with err: %v but got out: %v with err: %v", expOut, expErr, out, err) } for container, expected := range map[string]string{"parent": "true", "child_first": "false", "child_second": "true"} { out, err := inspectField(container, "State.Running") // Container running state wrong c.Assert(err, check.IsNil, check.Commentf("out: %s", out)) // Container running state wrong c.Assert(out, checker.Equals, expected) } } func (s *DockerSuite) TestStartAttachMultipleContainers(c *check.C) { testRequires(c, DaemonIsLinux) // run multiple containers to test for _, container := range []string{"test1", "test2", "test3"} { dockerCmd(c, "run", "-d", "--name", container, "busybox", "top") } // stop all the containers for _, container := range []string{"test1", "test2", "test3"} { dockerCmd(c, "stop", container) } // test start and attach multiple containers at once, expected error for _, option := range []string{"-a", "-i", "-ai"} { out, _, err := dockerCmdWithError("start", option, "test1", "test2", "test3") // err shouldn't be nil because start will fail c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) // output does not correspond to what was expected c.Assert(out, checker.Contains, "You cannot start and attach multiple containers at once.") } // confirm the state of all the containers be stopped for container, expected := range map[string]string{"test1": "false", "test2": "false", "test3": "false"} { out, err := inspectField(container, "State.Running") // Container running state wrong c.Assert(err, check.IsNil, check.Commentf("out: %s", out)) // Container running state wrong c.Assert(out, checker.Equals, expected) } } docker-1.10.3/integration-cli/docker_cli_start_volume_driver_unix_test.go000066400000000000000000000306021267010174400270440ustar00rootroot00000000000000// +build !windows package main import ( "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/http/httptest" "os" "os/exec" "path/filepath" "strings" "time" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func init() { check.Suite(&DockerExternalVolumeSuite{ ds: &DockerSuite{}, }) } type eventCounter struct { activations int creations int removals int mounts int unmounts int paths int lists int gets int } type DockerExternalVolumeSuite struct { server *httptest.Server ds *DockerSuite d *Daemon ec *eventCounter } func (s *DockerExternalVolumeSuite) SetUpTest(c *check.C) { s.d = NewDaemon(c) s.ec = &eventCounter{} } func (s *DockerExternalVolumeSuite) TearDownTest(c *check.C) { s.d.Stop() s.ds.TearDownTest(c) } func (s *DockerExternalVolumeSuite) SetUpSuite(c *check.C) { mux := http.NewServeMux() s.server = httptest.NewServer(mux) type pluginRequest struct { Name string Opts map[string]string } type pluginResp struct { Mountpoint string `json:",omitempty"` Err string `json:",omitempty"` } type vol struct { Name string Mountpoint string Ninja bool // hack used to trigger an null volume return on `Get` } var volList []vol read := func(b io.ReadCloser) (pluginRequest, error) { defer b.Close() var pr pluginRequest if err := json.NewDecoder(b).Decode(&pr); err != nil { return pr, err } return pr, nil } send := func(w http.ResponseWriter, data interface{}) { switch t := data.(type) { case error: http.Error(w, t.Error(), 500) case string: w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") fmt.Fprintln(w, t) default: w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") json.NewEncoder(w).Encode(&data) } } mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { s.ec.activations++ send(w, `{"Implements": ["VolumeDriver"]}`) }) mux.HandleFunc("/VolumeDriver.Create", func(w http.ResponseWriter, r *http.Request) { s.ec.creations++ pr, err := read(r.Body) if err != nil { send(w, err) return } _, isNinja := pr.Opts["ninja"] volList = append(volList, vol{Name: pr.Name, Ninja: isNinja}) send(w, nil) }) mux.HandleFunc("/VolumeDriver.List", func(w http.ResponseWriter, r *http.Request) { s.ec.lists++ send(w, map[string][]vol{"Volumes": volList}) }) mux.HandleFunc("/VolumeDriver.Get", func(w http.ResponseWriter, r *http.Request) { s.ec.gets++ pr, err := read(r.Body) if err != nil { send(w, err) return } for _, v := range volList { if v.Name == pr.Name { if v.Ninja { send(w, map[string]vol{}) return } v.Mountpoint = hostVolumePath(pr.Name) send(w, map[string]vol{"Volume": v}) return } } send(w, `{"Err": "no such volume"}`) }) mux.HandleFunc("/VolumeDriver.Remove", func(w http.ResponseWriter, r *http.Request) { s.ec.removals++ pr, err := read(r.Body) if err != nil { send(w, err) return } if err := os.RemoveAll(hostVolumePath(pr.Name)); err != nil { send(w, &pluginResp{Err: err.Error()}) return } for i, v := range volList { if v.Name == pr.Name { if err := os.RemoveAll(hostVolumePath(v.Name)); err != nil { send(w, fmt.Sprintf(`{"Err": "%v"}`, err)) return } volList = append(volList[:i], volList[i+1:]...) break } } send(w, nil) }) mux.HandleFunc("/VolumeDriver.Path", func(w http.ResponseWriter, r *http.Request) { s.ec.paths++ pr, err := read(r.Body) if err != nil { send(w, err) return } p := hostVolumePath(pr.Name) send(w, &pluginResp{Mountpoint: p}) }) mux.HandleFunc("/VolumeDriver.Mount", func(w http.ResponseWriter, r *http.Request) { s.ec.mounts++ pr, err := read(r.Body) if err != nil { send(w, err) return } p := hostVolumePath(pr.Name) if err := os.MkdirAll(p, 0755); err != nil { send(w, &pluginResp{Err: err.Error()}) return } if err := ioutil.WriteFile(filepath.Join(p, "test"), []byte(s.server.URL), 0644); err != nil { send(w, err) return } send(w, &pluginResp{Mountpoint: p}) }) mux.HandleFunc("/VolumeDriver.Unmount", func(w http.ResponseWriter, r *http.Request) { s.ec.unmounts++ _, err := read(r.Body) if err != nil { send(w, err) return } send(w, nil) }) err := os.MkdirAll("/etc/docker/plugins", 0755) c.Assert(err, checker.IsNil) err = ioutil.WriteFile("/etc/docker/plugins/test-external-volume-driver.spec", []byte(s.server.URL), 0644) c.Assert(err, checker.IsNil) } func (s *DockerExternalVolumeSuite) TearDownSuite(c *check.C) { s.server.Close() err := os.RemoveAll("/etc/docker/plugins") c.Assert(err, checker.IsNil) } func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverNamed(c *check.C) { err := s.d.StartWithBusybox() c.Assert(err, checker.IsNil) out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", "test-external-volume-driver", "busybox:latest", "cat", "/tmp/external-volume-test/test") c.Assert(err, checker.IsNil, check.Commentf(out)) c.Assert(out, checker.Contains, s.server.URL) _, err = s.d.Cmd("volume", "rm", "external-volume-test") c.Assert(err, checker.IsNil) p := hostVolumePath("external-volume-test") _, err = os.Lstat(p) c.Assert(err, checker.NotNil) c.Assert(os.IsNotExist(err), checker.True, check.Commentf("Expected volume path in host to not exist: %s, %v\n", p, err)) c.Assert(s.ec.activations, checker.Equals, 1) c.Assert(s.ec.creations, checker.Equals, 1) c.Assert(s.ec.removals, checker.Equals, 1) c.Assert(s.ec.mounts, checker.Equals, 1) c.Assert(s.ec.unmounts, checker.Equals, 1) } func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnnamed(c *check.C) { err := s.d.StartWithBusybox() c.Assert(err, checker.IsNil) out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "/tmp/external-volume-test", "--volume-driver", "test-external-volume-driver", "busybox:latest", "cat", "/tmp/external-volume-test/test") c.Assert(err, checker.IsNil, check.Commentf(out)) c.Assert(out, checker.Contains, s.server.URL) c.Assert(s.ec.activations, checker.Equals, 1) c.Assert(s.ec.creations, checker.Equals, 1) c.Assert(s.ec.removals, checker.Equals, 1) c.Assert(s.ec.mounts, checker.Equals, 1) c.Assert(s.ec.unmounts, checker.Equals, 1) } func (s DockerExternalVolumeSuite) TestExternalVolumeDriverVolumesFrom(c *check.C) { err := s.d.StartWithBusybox() c.Assert(err, checker.IsNil) out, err := s.d.Cmd("run", "-d", "--name", "vol-test1", "-v", "/foo", "--volume-driver", "test-external-volume-driver", "busybox:latest") c.Assert(err, checker.IsNil, check.Commentf(out)) out, err = s.d.Cmd("run", "--rm", "--volumes-from", "vol-test1", "--name", "vol-test2", "busybox", "ls", "/tmp") c.Assert(err, checker.IsNil, check.Commentf(out)) out, err = s.d.Cmd("rm", "-fv", "vol-test1") c.Assert(err, checker.IsNil, check.Commentf(out)) c.Assert(s.ec.activations, checker.Equals, 1) c.Assert(s.ec.creations, checker.Equals, 1) c.Assert(s.ec.removals, checker.Equals, 1) c.Assert(s.ec.mounts, checker.Equals, 2) c.Assert(s.ec.unmounts, checker.Equals, 2) } func (s DockerExternalVolumeSuite) TestExternalVolumeDriverDeleteContainer(c *check.C) { err := s.d.StartWithBusybox() c.Assert(err, checker.IsNil) out, err := s.d.Cmd("run", "-d", "--name", "vol-test1", "-v", "/foo", "--volume-driver", "test-external-volume-driver", "busybox:latest") c.Assert(err, checker.IsNil, check.Commentf(out)) out, err = s.d.Cmd("rm", "-fv", "vol-test1") c.Assert(err, checker.IsNil, check.Commentf(out)) c.Assert(s.ec.activations, checker.Equals, 1) c.Assert(s.ec.creations, checker.Equals, 1) c.Assert(s.ec.removals, checker.Equals, 1) c.Assert(s.ec.mounts, checker.Equals, 1) c.Assert(s.ec.unmounts, checker.Equals, 1) } func hostVolumePath(name string) string { return fmt.Sprintf("/var/lib/docker/volumes/%s", name) } // Make sure a request to use a down driver doesn't block other requests func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverLookupNotBlocked(c *check.C) { specPath := "/etc/docker/plugins/down-driver.spec" err := ioutil.WriteFile(specPath, []byte("tcp://127.0.0.7:9999"), 0644) c.Assert(err, check.IsNil) defer os.RemoveAll(specPath) chCmd1 := make(chan struct{}) chCmd2 := make(chan error) cmd1 := exec.Command(dockerBinary, "volume", "create", "-d", "down-driver") cmd2 := exec.Command(dockerBinary, "volume", "create") c.Assert(cmd1.Start(), checker.IsNil) defer cmd1.Process.Kill() time.Sleep(100 * time.Millisecond) // ensure API has been called c.Assert(cmd2.Start(), checker.IsNil) go func() { cmd1.Wait() close(chCmd1) }() go func() { chCmd2 <- cmd2.Wait() }() select { case <-chCmd1: cmd2.Process.Kill() c.Fatalf("volume create with down driver finished unexpectedly") case err := <-chCmd2: c.Assert(err, checker.IsNil) case <-time.After(5 * time.Second): cmd2.Process.Kill() c.Fatal("volume creates are blocked by previous create requests when previous driver is down") } } func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverRetryNotImmediatelyExists(c *check.C) { err := s.d.StartWithBusybox() c.Assert(err, checker.IsNil) specPath := "/etc/docker/plugins/test-external-volume-driver-retry.spec" os.RemoveAll(specPath) defer os.RemoveAll(specPath) errchan := make(chan error) go func() { if out, err := s.d.Cmd("run", "--rm", "--name", "test-data-retry", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", "test-external-volume-driver-retry", "busybox:latest"); err != nil { errchan <- fmt.Errorf("%v:\n%s", err, out) } close(errchan) }() go func() { // wait for a retry to occur, then create spec to allow plugin to register time.Sleep(2000 * time.Millisecond) // no need to check for an error here since it will get picked up by the timeout later ioutil.WriteFile(specPath, []byte(s.server.URL), 0644) }() select { case err := <-errchan: c.Assert(err, checker.IsNil) case <-time.After(8 * time.Second): c.Fatal("volume creates fail when plugin not immediately available") } _, err = s.d.Cmd("volume", "rm", "external-volume-test") c.Assert(err, checker.IsNil) c.Assert(s.ec.activations, checker.Equals, 1) c.Assert(s.ec.creations, checker.Equals, 1) c.Assert(s.ec.removals, checker.Equals, 1) c.Assert(s.ec.mounts, checker.Equals, 1) c.Assert(s.ec.unmounts, checker.Equals, 1) } func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverBindExternalVolume(c *check.C) { dockerCmd(c, "volume", "create", "-d", "test-external-volume-driver", "--name", "foo") dockerCmd(c, "run", "-d", "--name", "testing", "-v", "foo:/bar", "busybox", "top") var mounts []struct { Name string Driver string } out, err := inspectFieldJSON("testing", "Mounts") c.Assert(err, checker.IsNil) c.Assert(json.NewDecoder(strings.NewReader(out)).Decode(&mounts), checker.IsNil) c.Assert(len(mounts), checker.Equals, 1, check.Commentf(out)) c.Assert(mounts[0].Name, checker.Equals, "foo") c.Assert(mounts[0].Driver, checker.Equals, "test-external-volume-driver") } func (s *DockerExternalVolumeSuite) TesttExternalVolumeDriverList(c *check.C) { dockerCmd(c, "volume", "create", "-d", "test-external-volume-driver", "--name", "abc") out, _ := dockerCmd(c, "volume", "ls") ls := strings.Split(strings.TrimSpace(out), "\n") c.Assert(len(ls), check.Equals, 2, check.Commentf("\n%s", out)) vol := strings.Fields(ls[len(ls)-1]) c.Assert(len(vol), check.Equals, 2, check.Commentf("%v", vol)) c.Assert(vol[0], check.Equals, "test-external-volume-driver") c.Assert(vol[1], check.Equals, "abc") c.Assert(s.ec.lists, check.Equals, 1) } func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverGet(c *check.C) { out, _, err := dockerCmdWithError("volume", "inspect", "dummy") c.Assert(err, check.NotNil, check.Commentf(out)) c.Assert(s.ec.gets, check.Equals, 1) c.Assert(out, checker.Contains, "No such volume") } // Ensures that the daemon handles when the plugin responds to a `Get` request with a null volume and a null error. // Prior the daemon would panic in this scenario. func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverGetEmptyResponse(c *check.C) { dockerCmd(c, "volume", "create", "-d", "test-external-volume-driver", "--name", "abc", "--opt", "ninja=1") out, _, err := dockerCmdWithError("volume", "inspect", "abc") c.Assert(err, checker.NotNil, check.Commentf(out)) c.Assert(out, checker.Contains, "No such volume") } docker-1.10.3/integration-cli/docker_cli_stats_test.go000066400000000000000000000074411267010174400230450ustar00rootroot00000000000000package main import ( "bufio" "os/exec" "regexp" "strings" "time" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (s *DockerSuite) TestStatsNoStream(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) statsCmd := exec.Command(dockerBinary, "stats", "--no-stream", id) type output struct { out []byte err error } ch := make(chan output) go func() { out, err := statsCmd.Output() ch <- output{out, err} }() select { case outerr := <-ch: c.Assert(outerr.err, checker.IsNil, check.Commentf("Error running stats: %v", outerr.err)) c.Assert(string(outerr.out), checker.Contains, id) //running container wasn't present in output case <-time.After(3 * time.Second): statsCmd.Process.Kill() c.Fatalf("stats did not return immediately when not streaming") } } func (s *DockerSuite) TestStatsContainerNotFound(c *check.C) { testRequires(c, DaemonIsLinux) out, _, err := dockerCmdWithError("stats", "notfound") c.Assert(err, checker.NotNil) c.Assert(out, checker.Contains, "No such container: notfound", check.Commentf("Expected to fail on not found container stats, got %q instead", out)) out, _, err = dockerCmdWithError("stats", "--no-stream", "notfound") c.Assert(err, checker.NotNil) c.Assert(out, checker.Contains, "No such container: notfound", check.Commentf("Expected to fail on not found container stats with --no-stream, got %q instead", out)) } func (s *DockerSuite) TestStatsAllRunningNoStream(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") id1 := strings.TrimSpace(out)[:12] c.Assert(waitRun(id1), check.IsNil) out, _ = dockerCmd(c, "run", "-d", "busybox", "top") id2 := strings.TrimSpace(out)[:12] c.Assert(waitRun(id2), check.IsNil) out, _ = dockerCmd(c, "run", "-d", "busybox", "top") id3 := strings.TrimSpace(out)[:12] c.Assert(waitRun(id3), check.IsNil) dockerCmd(c, "stop", id3) out, _ = dockerCmd(c, "stats", "--no-stream") if !strings.Contains(out, id1) || !strings.Contains(out, id2) { c.Fatalf("Expected stats output to contain both %s and %s, got %s", id1, id2, out) } if strings.Contains(out, id3) { c.Fatalf("Did not expect %s in stats, got %s", id3, out) } } func (s *DockerSuite) TestStatsAllNoStream(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "top") id1 := strings.TrimSpace(out)[:12] c.Assert(waitRun(id1), check.IsNil) dockerCmd(c, "stop", id1) out, _ = dockerCmd(c, "run", "-d", "busybox", "top") id2 := strings.TrimSpace(out)[:12] c.Assert(waitRun(id2), check.IsNil) out, _ = dockerCmd(c, "stats", "--all", "--no-stream") if !strings.Contains(out, id1) || !strings.Contains(out, id2) { c.Fatalf("Expected stats output to contain both %s and %s, got %s", id1, id2, out) } } func (s *DockerSuite) TestStatsAllNewContainersAdded(c *check.C) { testRequires(c, DaemonIsLinux) id := make(chan string) addedChan := make(chan struct{}) dockerCmd(c, "run", "-d", "busybox", "top") statsCmd := exec.Command(dockerBinary, "stats") stdout, err := statsCmd.StdoutPipe() c.Assert(err, check.IsNil) c.Assert(statsCmd.Start(), check.IsNil) defer statsCmd.Process.Kill() go func() { containerID := <-id matchID := regexp.MustCompile(containerID) scanner := bufio.NewScanner(stdout) for scanner.Scan() { switch { case matchID.MatchString(scanner.Text()): close(addedChan) } } }() out, _ := dockerCmd(c, "run", "-d", "busybox", "top") c.Assert(waitRun(strings.TrimSpace(out)), check.IsNil) id <- strings.TrimSpace(out)[:12] select { case <-time.After(10 * time.Second): c.Fatal("failed to observe new container created added to stats") case <-addedChan: // ignore, done } } docker-1.10.3/integration-cli/docker_cli_tag_test.go000066400000000000000000000174461267010174400224700ustar00rootroot00000000000000package main import ( "fmt" "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/stringutils" "github.com/go-check/check" ) // tagging a named image in a new unprefixed repo should work func (s *DockerSuite) TestTagUnprefixedRepoByName(c *check.C) { testRequires(c, DaemonIsLinux) if err := pullImageIfNotExist("busybox:latest"); err != nil { c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") } dockerCmd(c, "tag", "busybox:latest", "testfoobarbaz") } // tagging an image by ID in a new unprefixed repo should work func (s *DockerSuite) TestTagUnprefixedRepoByID(c *check.C) { testRequires(c, DaemonIsLinux) imageID, err := inspectField("busybox", "Id") c.Assert(err, check.IsNil) dockerCmd(c, "tag", imageID, "testfoobarbaz") } // ensure we don't allow the use of invalid repository names; these tag operations should fail func (s *DockerSuite) TestTagInvalidUnprefixedRepo(c *check.C) { invalidRepos := []string{"fo$z$", "Foo@3cc", "Foo$3", "Foo*3", "Fo^3", "Foo!3", "F)xcz(", "fo%asd", "FOO/bar"} for _, repo := range invalidRepos { out, _, err := dockerCmdWithError("tag", "busybox", repo) c.Assert(err, checker.NotNil, check.Commentf("tag busybox %v should have failed : %v", repo, out)) } } // ensure we don't allow the use of invalid tags; these tag operations should fail func (s *DockerSuite) TestTagInvalidPrefixedRepo(c *check.C) { longTag := stringutils.GenerateRandomAlphaOnlyString(121) invalidTags := []string{"repo:fo$z$", "repo:Foo@3cc", "repo:Foo$3", "repo:Foo*3", "repo:Fo^3", "repo:Foo!3", "repo:%goodbye", "repo:#hashtagit", "repo:F)xcz(", "repo:-foo", "repo:..", longTag} for _, repotag := range invalidTags { out, _, err := dockerCmdWithError("tag", "busybox", repotag) c.Assert(err, checker.NotNil, check.Commentf("tag busybox %v should have failed : %v", repotag, out)) } } // ensure we allow the use of valid tags func (s *DockerSuite) TestTagValidPrefixedRepo(c *check.C) { testRequires(c, DaemonIsLinux) if err := pullImageIfNotExist("busybox:latest"); err != nil { c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") } validRepos := []string{"fooo/bar", "fooaa/test", "foooo:t", "HOSTNAME.DOMAIN.COM:443/foo/bar"} for _, repo := range validRepos { _, _, err := dockerCmdWithError("tag", "busybox:latest", repo) if err != nil { c.Errorf("tag busybox %v should have worked: %s", repo, err) continue } deleteImages(repo) } } // tag an image with an existed tag name without -f option should work func (s *DockerSuite) TestTagExistedNameWithoutForce(c *check.C) { testRequires(c, DaemonIsLinux) if err := pullImageIfNotExist("busybox:latest"); err != nil { c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") } dockerCmd(c, "tag", "busybox:latest", "busybox:test") } // tag an image with an existed tag name with -f option should work func (s *DockerSuite) TestTagExistedNameWithForce(c *check.C) { testRequires(c, DaemonIsLinux) if err := pullImageIfNotExist("busybox:latest"); err != nil { c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") } dockerCmd(c, "tag", "busybox:latest", "busybox:test") dockerCmd(c, "tag", "-f", "busybox:latest", "busybox:test") } func (s *DockerSuite) TestTagWithPrefixHyphen(c *check.C) { testRequires(c, DaemonIsLinux) if err := pullImageIfNotExist("busybox:latest"); err != nil { c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") } // test repository name begin with '-' out, _, err := dockerCmdWithError("tag", "busybox:latest", "-busybox:test") c.Assert(err, checker.NotNil, check.Commentf(out)) c.Assert(out, checker.Contains, "Error parsing reference", check.Commentf("tag a name begin with '-' should failed")) // test namespace name begin with '-' out, _, err = dockerCmdWithError("tag", "busybox:latest", "-test/busybox:test") c.Assert(err, checker.NotNil, check.Commentf(out)) c.Assert(out, checker.Contains, "Error parsing reference", check.Commentf("tag a name begin with '-' should failed")) // test index name begin with '-' out, _, err = dockerCmdWithError("tag", "busybox:latest", "-index:5000/busybox:test") c.Assert(err, checker.NotNil, check.Commentf(out)) c.Assert(out, checker.Contains, "Error parsing reference", check.Commentf("tag a name begin with '-' should failed")) } // ensure tagging using official names works // ensure all tags result in the same name func (s *DockerSuite) TestTagOfficialNames(c *check.C) { testRequires(c, DaemonIsLinux) names := []string{ "docker.io/busybox", "index.docker.io/busybox", "library/busybox", "docker.io/library/busybox", "index.docker.io/library/busybox", } for _, name := range names { out, exitCode, err := dockerCmdWithError("tag", "busybox:latest", name+":latest") if err != nil || exitCode != 0 { c.Errorf("tag busybox %v should have worked: %s, %s", name, err, out) continue } // ensure we don't have multiple tag names. out, _, err = dockerCmdWithError("images") if err != nil { c.Errorf("listing images failed with errors: %v, %s", err, out) } else if strings.Contains(out, name) { c.Errorf("images should not have listed '%s'", name) deleteImages(name + ":latest") } } for _, name := range names { _, exitCode, err := dockerCmdWithError("tag", name+":latest", "fooo/bar:latest") if err != nil || exitCode != 0 { c.Errorf("tag %v fooo/bar should have worked: %s", name, err) continue } deleteImages("fooo/bar:latest") } } // ensure tags can not match digests func (s *DockerSuite) TestTagMatchesDigest(c *check.C) { testRequires(c, DaemonIsLinux) if err := pullImageIfNotExist("busybox:latest"); err != nil { c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") } digest := "busybox@sha256:abcdef76720241213f5303bda7704ec4c2ef75613173910a56fb1b6e20251507" // test setting tag fails _, _, err := dockerCmdWithError("tag", "busybox:latest", digest) if err == nil { c.Fatal("digest tag a name should have failed") } // check that no new image matches the digest _, _, err = dockerCmdWithError("inspect", digest) if err == nil { c.Fatal("inspecting by digest should have failed") } } func (s *DockerSuite) TestTagInvalidRepoName(c *check.C) { testRequires(c, DaemonIsLinux) if err := pullImageIfNotExist("busybox:latest"); err != nil { c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") } // test setting tag fails _, _, err := dockerCmdWithError("tag", "busybox:latest", "sha256:sometag") if err == nil { c.Fatal("tagging with image named \"sha256\" should have failed") } } // ensure tags cannot create ambiguity with image ids func (s *DockerSuite) TestTagTruncationAmbiguity(c *check.C) { testRequires(c, DaemonIsLinux) if err := pullImageIfNotExist("busybox:latest"); err != nil { c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") } imageID, err := buildImage("notbusybox:latest", `FROM busybox MAINTAINER dockerio`, true) if err != nil { c.Fatal(err) } truncatedImageID := stringid.TruncateID(imageID) truncatedTag := fmt.Sprintf("notbusybox:%s", truncatedImageID) id, err := inspectField(truncatedTag, "Id") if err != nil { c.Fatalf("Error inspecting by image id: %s", err) } // Ensure inspect by image id returns image for image id c.Assert(id, checker.Equals, imageID) c.Logf("Built image: %s", imageID) // test setting tag fails _, _, err = dockerCmdWithError("tag", "busybox:latest", truncatedTag) if err != nil { c.Fatalf("Error tagging with an image id: %s", err) } id, err = inspectField(truncatedTag, "Id") if err != nil { c.Fatalf("Error inspecting by image id: %s", err) } // Ensure id is imageID and not busybox:latest c.Assert(id, checker.Not(checker.Equals), imageID) } docker-1.10.3/integration-cli/docker_cli_top_test.go000066400000000000000000000033201267010174400225010ustar00rootroot00000000000000package main import ( "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (s *DockerSuite) TestTopMultipleArgs(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-i", "-d", "busybox", "top") cleanedContainerID := strings.TrimSpace(out) out, _ = dockerCmd(c, "top", cleanedContainerID, "-o", "pid") c.Assert(out, checker.Contains, "PID", check.Commentf("did not see PID after top -o pid: %s", out)) } func (s *DockerSuite) TestTopNonPrivileged(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-i", "-d", "busybox", "top") cleanedContainerID := strings.TrimSpace(out) out1, _ := dockerCmd(c, "top", cleanedContainerID) out2, _ := dockerCmd(c, "top", cleanedContainerID) dockerCmd(c, "kill", cleanedContainerID) c.Assert(out1, checker.Contains, "top", check.Commentf("top should've listed `top` in the process list, but failed the first time")) c.Assert(out2, checker.Contains, "top", check.Commentf("top should've listed `top` in the process list, but failed the second time")) } func (s *DockerSuite) TestTopPrivileged(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "--privileged", "-i", "-d", "busybox", "top") cleanedContainerID := strings.TrimSpace(out) out1, _ := dockerCmd(c, "top", cleanedContainerID) out2, _ := dockerCmd(c, "top", cleanedContainerID) dockerCmd(c, "kill", cleanedContainerID) c.Assert(out1, checker.Contains, "top", check.Commentf("top should've listed `top` in the process list, but failed the first time")) c.Assert(out2, checker.Contains, "top", check.Commentf("top should've listed `top` in the process list, but failed the second time")) } docker-1.10.3/integration-cli/docker_cli_update_unix_test.go000066400000000000000000000144321267010174400242320ustar00rootroot00000000000000// +build !windows package main import ( "encoding/json" "fmt" "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/engine-api/types" "github.com/go-check/check" ) func (s *DockerSuite) TestUpdateRunningContainer(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, memoryLimitSupport) name := "test-update-container" dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "top") dockerCmd(c, "update", "-m", "500M", name) memory, err := inspectField(name, "HostConfig.Memory") c.Assert(err, check.IsNil) if memory != "524288000" { c.Fatalf("Got the wrong memory value, we got %d, expected 524288000(500M).", memory) } file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" out, _ := dockerCmd(c, "exec", name, "cat", file) c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") } func (s *DockerSuite) TestUpdateRunningContainerWithRestart(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, memoryLimitSupport) name := "test-update-container" dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "top") dockerCmd(c, "update", "-m", "500M", name) dockerCmd(c, "restart", name) memory, err := inspectField(name, "HostConfig.Memory") c.Assert(err, check.IsNil) if memory != "524288000" { c.Fatalf("Got the wrong memory value, we got %d, expected 524288000(500M).", memory) } file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" out, _ := dockerCmd(c, "exec", name, "cat", file) c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") } func (s *DockerSuite) TestUpdateStoppedContainer(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, memoryLimitSupport) name := "test-update-container" file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" dockerCmd(c, "run", "--name", name, "-m", "300M", "busybox", "cat", file) dockerCmd(c, "update", "-m", "500M", name) memory, err := inspectField(name, "HostConfig.Memory") c.Assert(err, check.IsNil) if memory != "524288000" { c.Fatalf("Got the wrong memory value, we got %d, expected 524288000(500M).", memory) } out, _ := dockerCmd(c, "start", "-a", name) c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") } func (s *DockerSuite) TestUpdatePausedContainer(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, cpuShare) name := "test-update-container" dockerCmd(c, "run", "-d", "--name", name, "--cpu-shares", "1000", "busybox", "top") dockerCmd(c, "pause", name) dockerCmd(c, "update", "--cpu-shares", "500", name) out, err := inspectField(name, "HostConfig.CPUShares") c.Assert(err, check.IsNil) if out != "500" { c.Fatalf("Got the wrong cpu shares value, we got %d, expected 500.", out) } dockerCmd(c, "unpause", name) file := "/sys/fs/cgroup/cpu/cpu.shares" out, _ = dockerCmd(c, "exec", name, "cat", file) c.Assert(strings.TrimSpace(out), checker.Equals, "500") } func (s *DockerSuite) TestUpdateWithUntouchedFields(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, memoryLimitSupport) testRequires(c, cpuShare) name := "test-update-container" dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "--cpu-shares", "800", "busybox", "top") dockerCmd(c, "update", "-m", "500M", name) // Update memory and not touch cpus, `cpuset.cpus` should still have the old value out, err := inspectField(name, "HostConfig.CPUShares") c.Assert(err, check.IsNil) c.Assert(out, check.Equals, "800") file := "/sys/fs/cgroup/cpu/cpu.shares" out, _ = dockerCmd(c, "exec", name, "cat", file) c.Assert(strings.TrimSpace(out), checker.Equals, "800") } func (s *DockerSuite) TestUpdateContainerInvalidValue(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, memoryLimitSupport) name := "test-update-container" dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "true") out, _, err := dockerCmdWithError("update", "-m", "2M", name) c.Assert(err, check.NotNil) expected := "Minimum memory limit allowed is 4MB" c.Assert(out, checker.Contains, expected) } func (s *DockerSuite) TestUpdateContainerWithoutFlags(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, memoryLimitSupport) name := "test-update-container" dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "true") _, _, err := dockerCmdWithError("update", name) c.Assert(err, check.NotNil) } func (s *DockerSuite) TestUpdateKernelMemory(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, kernelMemorySupport) name := "test-update-container" dockerCmd(c, "run", "-d", "--name", name, "--kernel-memory", "50M", "busybox", "top") _, _, err := dockerCmdWithError("update", "--kernel-memory", "100M", name) // Update kernel memory to a running container is not allowed. c.Assert(err, check.NotNil) out, err := inspectField(name, "HostConfig.KernelMemory") c.Assert(err, check.IsNil) // Update kernel memory to a running container with failure should not change HostConfig if out != "52428800" { c.Fatalf("Got the wrong memory value, we got %d, expected 52428800(50M).", out) } dockerCmd(c, "stop", name) dockerCmd(c, "update", "--kernel-memory", "100M", name) dockerCmd(c, "start", name) out, err = inspectField(name, "HostConfig.KernelMemory") c.Assert(err, check.IsNil) if out != "104857600" { c.Fatalf("Got the wrong memory value, we got %d, expected 104857600(100M).", out) } file := "/sys/fs/cgroup/memory/memory.kmem.limit_in_bytes" out, _ = dockerCmd(c, "exec", name, "cat", file) c.Assert(strings.TrimSpace(out), checker.Equals, "104857600") } func (s *DockerSuite) TestUpdateStats(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, memoryLimitSupport) testRequires(c, cpuCfsQuota) name := "foo" dockerCmd(c, "run", "-d", "-ti", "--name", name, "-m", "500m", "busybox") c.Assert(waitRun(name), checker.IsNil) getMemLimit := func(id string) uint64 { resp, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", id), nil, "") c.Assert(err, checker.IsNil) c.Assert(resp.Header.Get("Content-Type"), checker.Equals, "application/json") var v *types.Stats err = json.NewDecoder(body).Decode(&v) c.Assert(err, checker.IsNil) body.Close() return v.MemoryStats.Limit } preMemLimit := getMemLimit(name) dockerCmd(c, "update", "--cpu-quota", "2000", name) curMemLimit := getMemLimit(name) c.Assert(preMemLimit, checker.Equals, curMemLimit) } docker-1.10.3/integration-cli/docker_cli_v2_only_test.go000066400000000000000000000070071267010174400232750ustar00rootroot00000000000000package main import ( "fmt" "io/ioutil" "net/http" "os" "github.com/go-check/check" ) func makefile(contents string) (string, func(), error) { cleanup := func() { } f, err := ioutil.TempFile(".", "tmp") if err != nil { return "", cleanup, err } err = ioutil.WriteFile(f.Name(), []byte(contents), os.ModePerm) if err != nil { return "", cleanup, err } cleanup = func() { err := os.Remove(f.Name()) if err != nil { fmt.Println("Error removing tmpfile") } } return f.Name(), cleanup, nil } // TestV2Only ensures that a daemon in v2-only mode does not // attempt to contact any v1 registry endpoints. func (s *DockerRegistrySuite) TestV2Only(c *check.C) { reg, err := newTestRegistry(c) c.Assert(err, check.IsNil) reg.registerHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(404) }) reg.registerHandler("/v1/.*", func(w http.ResponseWriter, r *http.Request) { c.Fatal("V1 registry contacted") }) repoName := fmt.Sprintf("%s/busybox", reg.hostport) err = s.d.Start("--insecure-registry", reg.hostport, "--disable-legacy-registry=true") c.Assert(err, check.IsNil) dockerfileName, cleanup, err := makefile(fmt.Sprintf("FROM %s/busybox", reg.hostport)) c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) defer cleanup() s.d.Cmd("build", "--file", dockerfileName, ".") s.d.Cmd("run", repoName) s.d.Cmd("login", "-u", "richard", "-p", "testtest", "-e", "testuser@testdomain.com", reg.hostport) s.d.Cmd("tag", "busybox", repoName) s.d.Cmd("push", repoName) s.d.Cmd("pull", repoName) } // TestV1 starts a daemon in 'normal' mode // and ensure v1 endpoints are hit for the following operations: // login, push, pull, build & run func (s *DockerRegistrySuite) TestV1(c *check.C) { reg, err := newTestRegistry(c) c.Assert(err, check.IsNil) v2Pings := 0 reg.registerHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { v2Pings++ // V2 ping 404 causes fallback to v1 w.WriteHeader(404) }) v1Pings := 0 reg.registerHandler("/v1/_ping", func(w http.ResponseWriter, r *http.Request) { v1Pings++ }) v1Logins := 0 reg.registerHandler("/v1/users/", func(w http.ResponseWriter, r *http.Request) { v1Logins++ }) v1Repo := 0 reg.registerHandler("/v1/repositories/busybox/", func(w http.ResponseWriter, r *http.Request) { v1Repo++ }) reg.registerHandler("/v1/repositories/busybox/images", func(w http.ResponseWriter, r *http.Request) { v1Repo++ }) err = s.d.Start("--insecure-registry", reg.hostport, "--disable-legacy-registry=false") c.Assert(err, check.IsNil) dockerfileName, cleanup, err := makefile(fmt.Sprintf("FROM %s/busybox", reg.hostport)) c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) defer cleanup() s.d.Cmd("build", "--file", dockerfileName, ".") c.Assert(v1Repo, check.Not(check.Equals), 0, check.Commentf("Expected v1 repository access after build")) repoName := fmt.Sprintf("%s/busybox", reg.hostport) s.d.Cmd("run", repoName) c.Assert(v1Repo, check.Not(check.Equals), 1, check.Commentf("Expected v1 repository access after run")) s.d.Cmd("login", "-u", "richard", "-p", "testtest", "-e", "testuser@testdomain.com", reg.hostport) c.Assert(v1Logins, check.Not(check.Equals), 0, check.Commentf("Expected v1 login attempt")) s.d.Cmd("tag", "busybox", repoName) s.d.Cmd("push", repoName) c.Assert(v1Repo, check.Equals, 2) c.Assert(v1Pings, check.Equals, 1) s.d.Cmd("pull", repoName) c.Assert(v1Repo, check.Equals, 3, check.Commentf("Expected v1 repository access after pull")) } docker-1.10.3/integration-cli/docker_cli_version_test.go000066400000000000000000000031561267010174400233730ustar00rootroot00000000000000package main import ( "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) // ensure docker version works func (s *DockerSuite) TestVersionEnsureSucceeds(c *check.C) { out, _ := dockerCmd(c, "version") stringsToCheck := map[string]int{ "Client:": 1, "Server:": 1, " Version:": 2, " API version:": 2, " Go version:": 2, " Git commit:": 2, " OS/Arch:": 2, " Built:": 2, } for k, v := range stringsToCheck { c.Assert(strings.Count(out, k), checker.Equals, v, check.Commentf("The count of %v in %s does not match excepted", k, out)) } } // ensure the Windows daemon return the correct platform string func (s *DockerSuite) TestVersionPlatform_w(c *check.C) { testRequires(c, DaemonIsWindows) testVersionPlatform(c, "windows/amd64") } // ensure the Linux daemon return the correct platform string func (s *DockerSuite) TestVersionPlatform_l(c *check.C) { testRequires(c, DaemonIsLinux) testVersionPlatform(c, "linux") } func testVersionPlatform(c *check.C, platform string) { out, _ := dockerCmd(c, "version") expected := "OS/Arch: " + platform split := strings.Split(out, "\n") c.Assert(len(split) >= 14, checker.Equals, true, check.Commentf("got %d lines from version", len(split))) // Verify the second 'OS/Arch' matches the platform. Experimental has // more lines of output than 'regular' bFound := false for i := 14; i < len(split); i++ { if strings.Contains(split[i], expected) { bFound = true break } } c.Assert(bFound, checker.Equals, true, check.Commentf("Could not find server '%s' in '%s'", expected, out)) } docker-1.10.3/integration-cli/docker_cli_volume_driver_compat_unix_test.go000066400000000000000000000133051267010174400271730ustar00rootroot00000000000000// +build !windows package main import ( "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/http/httptest" "os" "path/filepath" "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func init() { check.Suite(&DockerExternalVolumeSuiteCompatV1_1{ ds: &DockerSuite{}, }) } type vol struct { Name string Mountpoint string Opts map[string]string } type DockerExternalVolumeSuiteCompatV1_1 struct { server *httptest.Server ds *DockerSuite d *Daemon ec *eventCounter volList []vol } func (s *DockerExternalVolumeSuiteCompatV1_1) SetUpTest(c *check.C) { s.d = NewDaemon(c) s.ec = &eventCounter{} } func (s *DockerExternalVolumeSuiteCompatV1_1) TearDownTest(c *check.C) { s.d.Stop() s.ds.TearDownTest(c) } func (s *DockerExternalVolumeSuiteCompatV1_1) SetUpSuite(c *check.C) { mux := http.NewServeMux() s.server = httptest.NewServer(mux) type pluginRequest struct { Name string Opts map[string]string } type pluginResp struct { Mountpoint string `json:",omitempty"` Err string `json:",omitempty"` } read := func(b io.ReadCloser) (pluginRequest, error) { defer b.Close() var pr pluginRequest if err := json.NewDecoder(b).Decode(&pr); err != nil { return pr, err } return pr, nil } send := func(w http.ResponseWriter, data interface{}) { switch t := data.(type) { case error: http.Error(w, t.Error(), 500) case string: w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") fmt.Fprintln(w, t) default: w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") json.NewEncoder(w).Encode(&data) } } mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { s.ec.activations++ send(w, `{"Implements": ["VolumeDriver"]}`) }) mux.HandleFunc("/VolumeDriver.Create", func(w http.ResponseWriter, r *http.Request) { s.ec.creations++ pr, err := read(r.Body) if err != nil { send(w, err) return } s.volList = append(s.volList, vol{Name: pr.Name, Opts: pr.Opts}) send(w, nil) }) mux.HandleFunc("/VolumeDriver.Remove", func(w http.ResponseWriter, r *http.Request) { s.ec.removals++ pr, err := read(r.Body) if err != nil { send(w, err) return } if err := os.RemoveAll(hostVolumePath(pr.Name)); err != nil { send(w, &pluginResp{Err: err.Error()}) return } for i, v := range s.volList { if v.Name == pr.Name { if err := os.RemoveAll(hostVolumePath(v.Name)); err != nil { send(w, fmt.Sprintf(`{"Err": "%v"}`, err)) return } s.volList = append(s.volList[:i], s.volList[i+1:]...) break } } send(w, nil) }) mux.HandleFunc("/VolumeDriver.Path", func(w http.ResponseWriter, r *http.Request) { s.ec.paths++ pr, err := read(r.Body) if err != nil { send(w, err) return } p := hostVolumePath(pr.Name) send(w, &pluginResp{Mountpoint: p}) }) mux.HandleFunc("/VolumeDriver.Mount", func(w http.ResponseWriter, r *http.Request) { s.ec.mounts++ pr, err := read(r.Body) if err != nil { send(w, err) return } p := hostVolumePath(pr.Name) if err := os.MkdirAll(p, 0755); err != nil { send(w, &pluginResp{Err: err.Error()}) return } if err := ioutil.WriteFile(filepath.Join(p, "test"), []byte(s.server.URL), 0644); err != nil { send(w, err) return } send(w, &pluginResp{Mountpoint: p}) }) mux.HandleFunc("/VolumeDriver.Unmount", func(w http.ResponseWriter, r *http.Request) { s.ec.unmounts++ _, err := read(r.Body) if err != nil { send(w, err) return } send(w, nil) }) err := os.MkdirAll("/etc/docker/plugins", 0755) c.Assert(err, checker.IsNil) err = ioutil.WriteFile("/etc/docker/plugins/test-external-volume-driver.spec", []byte(s.server.URL), 0644) c.Assert(err, checker.IsNil) } func (s *DockerExternalVolumeSuiteCompatV1_1) TearDownSuite(c *check.C) { s.server.Close() err := os.RemoveAll("/etc/docker/plugins") c.Assert(err, checker.IsNil) } func (s *DockerExternalVolumeSuiteCompatV1_1) TestExternalVolumeDriverCompatV1_1(c *check.C) { err := s.d.StartWithBusybox() c.Assert(err, checker.IsNil) out, err := s.d.Cmd("run", "--name=test", "-v", "foo:/bar", "--volume-driver", "test-external-volume-driver", "busybox", "sh", "-c", "echo hello > /bar/hello") c.Assert(err, checker.IsNil, check.Commentf(out)) out, err = s.d.Cmd("rm", "test") c.Assert(err, checker.IsNil, check.Commentf(out)) out, err = s.d.Cmd("run", "--name=test2", "-v", "foo:/bar", "busybox", "cat", "/bar/hello") c.Assert(err, checker.IsNil, check.Commentf(out)) c.Assert(strings.TrimSpace(out), checker.Equals, "hello") err = s.d.Restart() c.Assert(err, checker.IsNil) out, err = s.d.Cmd("start", "-a", "test2") c.Assert(strings.TrimSpace(out), checker.Equals, "hello") out, err = s.d.Cmd("rm", "test2") c.Assert(err, checker.IsNil, check.Commentf(out)) out, err = s.d.Cmd("volume", "inspect", "foo") c.Assert(err, checker.IsNil, check.Commentf(out)) out, err = s.d.Cmd("volume", "rm", "foo") c.Assert(err, checker.IsNil, check.Commentf(out)) } func (s *DockerExternalVolumeSuiteCompatV1_1) TestExternalVolumeDriverCompatOptionsV1_1(c *check.C) { err := s.d.StartWithBusybox() c.Assert(err, checker.IsNil) out, err := s.d.Cmd("volume", "create", "--name", "optvol", "--driver", "test-external-volume-driver", "--opt", "opt1=opt1val", "--opt", "opt2=opt2val") c.Assert(err, checker.IsNil, check.Commentf(out)) out, err = s.d.Cmd("volume", "inspect", "optvol") c.Assert(err, checker.IsNil, check.Commentf(out)) c.Assert(s.volList[0].Opts["opt1"], checker.Equals, "opt1val") c.Assert(s.volList[0].Opts["opt2"], checker.Equals, "opt2val") out, err = s.d.Cmd("volume", "rm", "optvol") c.Assert(err, checker.IsNil, check.Commentf(out)) } docker-1.10.3/integration-cli/docker_cli_volume_test.go000066400000000000000000000225441267010174400232170ustar00rootroot00000000000000package main import ( "os/exec" "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (s *DockerSuite) TestVolumeCliCreate(c *check.C) { dockerCmd(c, "volume", "create") _, err := runCommand(exec.Command(dockerBinary, "volume", "create", "-d", "nosuchdriver")) c.Assert(err, check.Not(check.IsNil)) out, _ := dockerCmd(c, "volume", "create", "--name=test") name := strings.TrimSpace(out) c.Assert(name, check.Equals, "test") } func (s *DockerSuite) TestVolumeCliCreateOptionConflict(c *check.C) { dockerCmd(c, "volume", "create", "--name=test") out, _, err := dockerCmdWithError("volume", "create", "--name", "test", "--driver", "nosuchdriver") c.Assert(err, check.NotNil, check.Commentf("volume create exception name already in use with another driver")) c.Assert(out, checker.Contains, "A volume named test already exists") out, _ = dockerCmd(c, "volume", "inspect", "--format='{{ .Driver }}'", "test") _, _, err = dockerCmdWithError("volume", "create", "--name", "test", "--driver", strings.TrimSpace(out)) c.Assert(err, check.IsNil) } func (s *DockerSuite) TestVolumeCliInspect(c *check.C) { c.Assert( exec.Command(dockerBinary, "volume", "inspect", "doesntexist").Run(), check.Not(check.IsNil), check.Commentf("volume inspect should error on non-existent volume"), ) out, _ := dockerCmd(c, "volume", "create") name := strings.TrimSpace(out) out, _ = dockerCmd(c, "volume", "inspect", "--format='{{ .Name }}'", name) c.Assert(strings.TrimSpace(out), check.Equals, name) dockerCmd(c, "volume", "create", "--name", "test") out, _ = dockerCmd(c, "volume", "inspect", "--format='{{ .Name }}'", "test") c.Assert(strings.TrimSpace(out), check.Equals, "test") } func (s *DockerSuite) TestVolumeCliInspectMulti(c *check.C) { dockerCmd(c, "volume", "create", "--name", "test1") dockerCmd(c, "volume", "create", "--name", "test2") dockerCmd(c, "volume", "create", "--name", "not-shown") out, _, err := dockerCmdWithError("volume", "inspect", "--format='{{ .Name }}'", "test1", "test2", "doesntexist", "not-shown") c.Assert(err, checker.NotNil) outArr := strings.Split(strings.TrimSpace(out), "\n") c.Assert(len(outArr), check.Equals, 3, check.Commentf("\n%s", out)) c.Assert(out, checker.Contains, "test1") c.Assert(out, checker.Contains, "test2") c.Assert(out, checker.Contains, "Error: No such volume: doesntexist") c.Assert(out, checker.Not(checker.Contains), "not-shown") } func (s *DockerSuite) TestVolumeCliLs(c *check.C) { prefix := "" if daemonPlatform == "windows" { prefix = "c:" } out, _ := dockerCmd(c, "volume", "create") id := strings.TrimSpace(out) dockerCmd(c, "volume", "create", "--name", "test") dockerCmd(c, "run", "-v", prefix+"/foo", "busybox", "ls", "/") out, _ = dockerCmd(c, "volume", "ls") outArr := strings.Split(strings.TrimSpace(out), "\n") c.Assert(len(outArr), check.Equals, 4, check.Commentf("\n%s", out)) // Since there is no guarantee of ordering of volumes, we just make sure the names are in the output c.Assert(strings.Contains(out, id+"\n"), check.Equals, true) c.Assert(strings.Contains(out, "test\n"), check.Equals, true) } func (s *DockerSuite) TestVolumeCliLsFilterDangling(c *check.C) { prefix := "" if daemonPlatform == "windows" { prefix = "c:" } dockerCmd(c, "volume", "create", "--name", "testnotinuse1") dockerCmd(c, "volume", "create", "--name", "testisinuse1") dockerCmd(c, "volume", "create", "--name", "testisinuse2") // Make sure both "created" (but not started), and started // containers are included in reference counting dockerCmd(c, "run", "--name", "volume-test1", "-v", "testisinuse1:"+prefix+"/foo", "busybox", "true") dockerCmd(c, "create", "--name", "volume-test2", "-v", "testisinuse2:"+prefix+"/foo", "busybox", "true") out, _ := dockerCmd(c, "volume", "ls") // No filter, all volumes should show c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output")) c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=false") // Explicitly disabling dangling c.Assert(out, check.Not(checker.Contains), "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output")) c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=true") // Filter "dangling" volumes; only "dangling" (unused) volumes should be in the output c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) c.Assert(out, check.Not(checker.Contains), "testisinuse1\n", check.Commentf("volume 'testisinuse1' in output, but not expected")) c.Assert(out, check.Not(checker.Contains), "testisinuse2\n", check.Commentf("volume 'testisinuse2' in output, but not expected")) out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=1") // Filter "dangling" volumes; only "dangling" (unused) volumes should be in the output, dangling also accept 1 c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) c.Assert(out, check.Not(checker.Contains), "testisinuse1\n", check.Commentf("volume 'testisinuse1' in output, but not expected")) c.Assert(out, check.Not(checker.Contains), "testisinuse2\n", check.Commentf("volume 'testisinuse2' in output, but not expected")) out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=0") // dangling=0 is same as dangling=false case c.Assert(out, check.Not(checker.Contains), "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output")) c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) } func (s *DockerSuite) TestVolumeCliLsErrorWithInvalidFilterName(c *check.C) { out, _, err := dockerCmdWithError("volume", "ls", "-f", "FOO=123") c.Assert(err, checker.NotNil) c.Assert(out, checker.Contains, "Invalid filter") } func (s *DockerSuite) TestVolumeCliLsWithIncorrectFilterValue(c *check.C) { out, _, err := dockerCmdWithError("volume", "ls", "-f", "dangling=invalid") c.Assert(err, check.NotNil) c.Assert(out, checker.Contains, "Invalid filter") } func (s *DockerSuite) TestVolumeCliRm(c *check.C) { prefix := "" if daemonPlatform == "windows" { prefix = "c:" } out, _ := dockerCmd(c, "volume", "create") id := strings.TrimSpace(out) dockerCmd(c, "volume", "create", "--name", "test") dockerCmd(c, "volume", "rm", id) dockerCmd(c, "volume", "rm", "test") out, _ = dockerCmd(c, "volume", "ls") outArr := strings.Split(strings.TrimSpace(out), "\n") c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out)) volumeID := "testing" dockerCmd(c, "run", "-v", volumeID+":"+prefix+"/foo", "--name=test", "busybox", "sh", "-c", "echo hello > /foo/bar") out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "volume", "rm", "testing")) c.Assert( err, check.Not(check.IsNil), check.Commentf("Should not be able to remove volume that is in use by a container\n%s", out)) out, _ = dockerCmd(c, "run", "--volumes-from=test", "--name=test2", "busybox", "sh", "-c", "cat /foo/bar") c.Assert(strings.TrimSpace(out), check.Equals, "hello") dockerCmd(c, "rm", "-fv", "test2") dockerCmd(c, "volume", "inspect", volumeID) dockerCmd(c, "rm", "-f", "test") out, _ = dockerCmd(c, "run", "--name=test2", "-v", volumeID+":"+prefix+"/foo", "busybox", "sh", "-c", "cat /foo/bar") c.Assert(strings.TrimSpace(out), check.Equals, "hello", check.Commentf("volume data was removed")) dockerCmd(c, "rm", "test2") dockerCmd(c, "volume", "rm", volumeID) c.Assert( exec.Command("volume", "rm", "doesntexist").Run(), check.Not(check.IsNil), check.Commentf("volume rm should fail with non-existent volume"), ) } func (s *DockerSuite) TestVolumeCliNoArgs(c *check.C) { out, _ := dockerCmd(c, "volume") // no args should produce the cmd usage output usage := "Usage: docker volume [OPTIONS] [COMMAND]" c.Assert(out, checker.Contains, usage) // invalid arg should error and show the command usage on stderr _, stderr, _, err := runCommandWithStdoutStderr(exec.Command(dockerBinary, "volume", "somearg")) c.Assert(err, check.NotNil, check.Commentf(stderr)) c.Assert(stderr, checker.Contains, usage) // invalid flag should error and show the flag error and cmd usage _, stderr, _, err = runCommandWithStdoutStderr(exec.Command(dockerBinary, "volume", "--no-such-flag")) c.Assert(err, check.NotNil, check.Commentf(stderr)) c.Assert(stderr, checker.Contains, usage) c.Assert(stderr, checker.Contains, "flag provided but not defined: --no-such-flag") } func (s *DockerSuite) TestVolumeCliInspectTmplError(c *check.C) { out, _ := dockerCmd(c, "volume", "create") name := strings.TrimSpace(out) out, exitCode, err := dockerCmdWithError("volume", "inspect", "--format='{{ .FooBar }}'", name) c.Assert(err, checker.NotNil, check.Commentf("Output: %s", out)) c.Assert(exitCode, checker.Equals, 1, check.Commentf("Output: %s", out)) c.Assert(out, checker.Contains, "Template parsing error") } docker-1.10.3/integration-cli/docker_cli_wait_test.go000066400000000000000000000061771267010174400226600ustar00rootroot00000000000000package main import ( "bytes" "os/exec" "strings" "time" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) // non-blocking wait with 0 exit code func (s *DockerSuite) TestWaitNonBlockedExitZero(c *check.C) { out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "true") containerID := strings.TrimSpace(out) err := waitInspect(containerID, "{{.State.Running}}", "false", 30*time.Second) c.Assert(err, checker.IsNil) //Container should have stopped by now out, _ = dockerCmd(c, "wait", containerID) c.Assert(strings.TrimSpace(out), checker.Equals, "0", check.Commentf("failed to set up container, %v", out)) } // blocking wait with 0 exit code func (s *DockerSuite) TestWaitBlockedExitZero(c *check.C) { // Windows busybox does not support trap in this way, not sleep with sub-second // granularity. It will always exit 0x40010004. testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "trap 'exit 0' TERM; while true; do usleep 10; done") containerID := strings.TrimSpace(out) c.Assert(waitRun(containerID), checker.IsNil) chWait := make(chan string) go func() { out, _, _ := runCommandWithOutput(exec.Command(dockerBinary, "wait", containerID)) chWait <- out }() time.Sleep(100 * time.Millisecond) dockerCmd(c, "stop", containerID) select { case status := <-chWait: c.Assert(strings.TrimSpace(status), checker.Equals, "0", check.Commentf("expected exit 0, got %s", status)) case <-time.After(2 * time.Second): c.Fatal("timeout waiting for `docker wait` to exit") } } // non-blocking wait with random exit code func (s *DockerSuite) TestWaitNonBlockedExitRandom(c *check.C) { out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "exit 99") containerID := strings.TrimSpace(out) err := waitInspect(containerID, "{{.State.Running}}", "false", 30*time.Second) c.Assert(err, checker.IsNil) //Container should have stopped by now out, _ = dockerCmd(c, "wait", containerID) c.Assert(strings.TrimSpace(out), checker.Equals, "99", check.Commentf("failed to set up container, %v", out)) } // blocking wait with random exit code func (s *DockerSuite) TestWaitBlockedExitRandom(c *check.C) { // Cannot run on Windows as trap in Windows busybox does not support trap in this way. testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "trap 'exit 99' TERM; while true; do usleep 10; done") containerID := strings.TrimSpace(out) c.Assert(waitRun(containerID), checker.IsNil) chWait := make(chan error) waitCmd := exec.Command(dockerBinary, "wait", containerID) waitCmdOut := bytes.NewBuffer(nil) waitCmd.Stdout = waitCmdOut c.Assert(waitCmd.Start(), checker.IsNil) go func() { chWait <- waitCmd.Wait() }() dockerCmd(c, "stop", containerID) select { case err := <-chWait: c.Assert(err, checker.IsNil) status, err := waitCmdOut.ReadString('\n') c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(status), checker.Equals, "99", check.Commentf("expected exit 99, got %s", status)) case <-time.After(2 * time.Second): waitCmd.Process.Kill() c.Fatal("timeout waiting for `docker wait` to exit") } } docker-1.10.3/integration-cli/docker_hub_pull_suite_test.go000066400000000000000000000054341267010174400241030ustar00rootroot00000000000000package main import ( "os/exec" "runtime" "strings" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func init() { // FIXME. Temporarily turning this off for Windows as GH16039 was breaking // Windows to Linux CI @icecrime if runtime.GOOS != "windows" { check.Suite(newDockerHubPullSuite()) } } // DockerHubPullSuite provides a isolated daemon that doesn't have all the // images that are baked into our 'global' test environment daemon (e.g., // busybox, httpserver, ...). // // We use it for push/pull tests where we want to start fresh, and measure the // relative impact of each individual operation. As part of this suite, all // images are removed after each test. type DockerHubPullSuite struct { d *Daemon ds *DockerSuite } // newDockerHubPullSuite returns a new instance of a DockerHubPullSuite. func newDockerHubPullSuite() *DockerHubPullSuite { return &DockerHubPullSuite{ ds: &DockerSuite{}, } } // SetUpSuite starts the suite daemon. func (s *DockerHubPullSuite) SetUpSuite(c *check.C) { testRequires(c, DaemonIsLinux) s.d = NewDaemon(c) err := s.d.Start() c.Assert(err, checker.IsNil, check.Commentf("starting push/pull test daemon: %v", err)) } // TearDownSuite stops the suite daemon. func (s *DockerHubPullSuite) TearDownSuite(c *check.C) { if s.d != nil { err := s.d.Stop() c.Assert(err, checker.IsNil, check.Commentf("stopping push/pull test daemon: %v", err)) } } // SetUpTest declares that all tests of this suite require network. func (s *DockerHubPullSuite) SetUpTest(c *check.C) { testRequires(c, Network) } // TearDownTest removes all images from the suite daemon. func (s *DockerHubPullSuite) TearDownTest(c *check.C) { out := s.Cmd(c, "images", "-aq") images := strings.Split(out, "\n") images = append([]string{"-f"}, images...) s.d.Cmd("rmi", images...) s.ds.TearDownTest(c) } // Cmd executes a command against the suite daemon and returns the combined // output. The function fails the test when the command returns an error. func (s *DockerHubPullSuite) Cmd(c *check.C, name string, arg ...string) string { out, err := s.CmdWithError(name, arg...) c.Assert(err, checker.IsNil, check.Commentf("%q failed with errors: %s, %v", strings.Join(arg, " "), out, err)) return out } // CmdWithError executes a command against the suite daemon and returns the // combined output as well as any error. func (s *DockerHubPullSuite) CmdWithError(name string, arg ...string) (string, error) { c := s.MakeCmd(name, arg...) b, err := c.CombinedOutput() return string(b), err } // MakeCmd returns a exec.Cmd command to run against the suite daemon. func (s *DockerHubPullSuite) MakeCmd(name string, arg ...string) *exec.Cmd { args := []string{"--host", s.d.sock(), name} args = append(args, arg...) return exec.Command(dockerBinary, args...) } docker-1.10.3/integration-cli/docker_test_vars.go000066400000000000000000000071021267010174400220250ustar00rootroot00000000000000package main import ( "encoding/json" "fmt" "os" "os/exec" "github.com/docker/docker/pkg/reexec" ) var ( // the docker binary to use dockerBinary = "docker" // the private registry image to use for tests involving the registry registryImageName = "registry" // the private registry to use for tests privateRegistryURL = "127.0.0.1:5000" runtimePath = "/var/run/docker" execDriverPath = runtimePath + "/execdriver/native" workingDirectory string // isLocalDaemon is true if the daemon under test is on the same // host as the CLI. isLocalDaemon bool // daemonPlatform is held globally so that tests can make intelligent // decisions on how to configure themselves according to the platform // of the daemon. This is initialized in docker_utils by sending // a version call to the daemon and examining the response header. daemonPlatform string // windowsDaemonKV is used on Windows to distinguish between different // versions. This is necessary to enable certain tests based on whether // the platform supports it. For example, Windows Server 2016 TP3 does // not support volumes, but TP4 does. windowsDaemonKV int // daemonDefaultImage is the name of the default image to use when running // tests. This is platform dependent. daemonDefaultImage string // For a local daemon on Linux, these values will be used for testing // user namespace support as the standard graph path(s) will be // appended with the root remapped uid.gid prefix dockerBasePath string volumesConfigPath string containerStoragePath string ) const ( // WindowsBaseImage is the name of the base image for Windows testing WindowsBaseImage = "windowsservercore" // DefaultImage is the name of the base image for the majority of tests that // are run across suites DefaultImage = "busybox" ) func init() { reexec.Init() if dockerBin := os.Getenv("DOCKER_BINARY"); dockerBin != "" { dockerBinary = dockerBin } var err error dockerBinary, err = exec.LookPath(dockerBinary) if err != nil { fmt.Printf("ERROR: couldn't resolve full path to the Docker binary (%v)", err) os.Exit(1) } if registryImage := os.Getenv("REGISTRY_IMAGE"); registryImage != "" { registryImageName = registryImage } if registry := os.Getenv("REGISTRY_URL"); registry != "" { privateRegistryURL = registry } workingDirectory, _ = os.Getwd() // Deterministically working out the environment in which CI is running // to evaluate whether the daemon is local or remote is not possible through // a build tag. // // For example Windows CI under Jenkins test the 64-bit // Windows binary build with the daemon build tag, but calls a remote // Linux daemon. // // We can't just say if Windows then assume the daemon is local as at // some point, we will be testing the Windows CLI against a Windows daemon. // // Similarly, it will be perfectly valid to also run CLI tests from // a Linux CLI (built with the daemon tag) against a Windows daemon. if len(os.Getenv("DOCKER_REMOTE_DAEMON")) > 0 { isLocalDaemon = false } else { isLocalDaemon = true } // This is only used for a tests with local daemon true (Linux-only today) // default is "/var/lib/docker", but we'll try and ask the // /info endpoint for the specific root dir dockerBasePath = "/var/lib/docker" type Info struct { DockerRootDir string } var i Info status, b, err := sockRequest("GET", "/info", nil) if err == nil && status == 200 { if err = json.Unmarshal(b, &i); err == nil { dockerBasePath = i.DockerRootDir } } volumesConfigPath = dockerBasePath + "/volumes" containerStoragePath = dockerBasePath + "/containers" } docker-1.10.3/integration-cli/docker_utils.go000066400000000000000000001375201267010174400211630ustar00rootroot00000000000000package main import ( "bufio" "bytes" "crypto/tls" "encoding/json" "errors" "fmt" "io" "io/ioutil" "net" "net/http" "net/http/httptest" "net/http/httputil" "net/url" "os" "os/exec" "path" "path/filepath" "strconv" "strings" "time" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/httputils" "github.com/docker/docker/pkg/integration" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/stringutils" "github.com/docker/engine-api/types" "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" "github.com/go-check/check" ) func init() { cmd := exec.Command(dockerBinary, "images") cmd.Env = appendBaseEnv(true) out, err := cmd.CombinedOutput() if err != nil { panic(fmt.Errorf("err=%v\nout=%s\n", err, out)) } lines := strings.Split(string(out), "\n")[1:] for _, l := range lines { if l == "" { continue } fields := strings.Fields(l) imgTag := fields[0] + ":" + fields[1] // just for case if we have dangling images in tested daemon if imgTag != ":" { protectedImages[imgTag] = struct{}{} } } // Obtain the daemon platform so that it can be used by tests to make // intelligent decisions about how to configure themselves, and validate // that the target platform is valid. res, _, err := sockRequestRaw("GET", "/version", nil, "application/json") if err != nil || res == nil || (res != nil && res.StatusCode != http.StatusOK) { panic(fmt.Errorf("Init failed to get version: %v. Res=%v", err.Error(), res)) } svrHeader, _ := httputils.ParseServerHeader(res.Header.Get("Server")) daemonPlatform = svrHeader.OS if daemonPlatform != "linux" && daemonPlatform != "windows" { panic("Cannot run tests against platform: " + daemonPlatform) } // On Windows, extract out the version as we need to make selective // decisions during integration testing as and when features are implemented. if daemonPlatform == "windows" { if body, err := ioutil.ReadAll(res.Body); err == nil { var server types.Version if err := json.Unmarshal(body, &server); err == nil { // eg in "10.0 10550 (10550.1000.amd64fre.branch.date-time)" we want 10550 windowsDaemonKV, _ = strconv.Atoi(strings.Split(server.KernelVersion, " ")[1]) } } } // Now we know the daemon platform, can set paths used by tests. _, body, err := sockRequest("GET", "/info", nil) if err != nil { panic(err) } var info types.Info err = json.Unmarshal(body, &info) dockerBasePath = info.DockerRootDir volumesConfigPath = filepath.Join(dockerBasePath, "volumes") containerStoragePath = filepath.Join(dockerBasePath, "containers") // Make sure in context of daemon, not the local platform. Note we can't // use filepath.FromSlash or ToSlash here as they are a no-op on Unix. if daemonPlatform == "windows" { volumesConfigPath = strings.Replace(volumesConfigPath, `/`, `\`, -1) containerStoragePath = strings.Replace(containerStoragePath, `/`, `\`, -1) } else { volumesConfigPath = strings.Replace(volumesConfigPath, `\`, `/`, -1) containerStoragePath = strings.Replace(containerStoragePath, `\`, `/`, -1) } } // Daemon represents a Docker daemon for the testing framework. type Daemon struct { // Defaults to "daemon" // Useful to set to --daemon or -d for checking backwards compatibility Command string GlobalFlags []string id string c *check.C logFile *os.File folder string root string stdin io.WriteCloser stdout, stderr io.ReadCloser cmd *exec.Cmd storageDriver string wait chan error userlandProxy bool useDefaultHost bool useDefaultTLSHost bool } type clientConfig struct { transport *http.Transport scheme string addr string } // NewDaemon returns a Daemon instance to be used for testing. // This will create a directory such as d123456789 in the folder specified by $DEST. // The daemon will not automatically start. func NewDaemon(c *check.C) *Daemon { dest := os.Getenv("DEST") c.Assert(dest, check.Not(check.Equals), "", check.Commentf("Please set the DEST environment variable")) id := fmt.Sprintf("d%d", time.Now().UnixNano()%100000000) dir := filepath.Join(dest, id) daemonFolder, err := filepath.Abs(dir) c.Assert(err, check.IsNil, check.Commentf("Could not make %q an absolute path", dir)) daemonRoot := filepath.Join(daemonFolder, "root") c.Assert(os.MkdirAll(daemonRoot, 0755), check.IsNil, check.Commentf("Could not create daemon root %q", dir)) userlandProxy := true if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" { if val, err := strconv.ParseBool(env); err != nil { userlandProxy = val } } return &Daemon{ Command: "daemon", id: id, c: c, folder: daemonFolder, root: daemonRoot, storageDriver: os.Getenv("DOCKER_GRAPHDRIVER"), userlandProxy: userlandProxy, } } func (d *Daemon) getClientConfig() (*clientConfig, error) { var ( transport *http.Transport scheme string addr string proto string ) if d.useDefaultTLSHost { option := &tlsconfig.Options{ CAFile: "fixtures/https/ca.pem", CertFile: "fixtures/https/client-cert.pem", KeyFile: "fixtures/https/client-key.pem", } tlsConfig, err := tlsconfig.Client(*option) if err != nil { return nil, err } transport = &http.Transport{ TLSClientConfig: tlsConfig, } addr = fmt.Sprintf("%s:%d", opts.DefaultHTTPHost, opts.DefaultTLSHTTPPort) scheme = "https" proto = "tcp" } else if d.useDefaultHost { addr = opts.DefaultUnixSocket proto = "unix" scheme = "http" transport = &http.Transport{} } else { addr = filepath.Join(d.folder, "docker.sock") proto = "unix" scheme = "http" transport = &http.Transport{} } sockets.ConfigureTCPTransport(transport, proto, addr) return &clientConfig{ transport: transport, scheme: scheme, addr: addr, }, nil } // Start will start the daemon and return once it is ready to receive requests. // You can specify additional daemon flags. func (d *Daemon) Start(arg ...string) error { dockerBinary, err := exec.LookPath(dockerBinary) d.c.Assert(err, check.IsNil, check.Commentf("[%s] could not find docker binary in $PATH", d.id)) args := append(d.GlobalFlags, d.Command, "--graph", d.root, "--pidfile", fmt.Sprintf("%s/docker.pid", d.folder), fmt.Sprintf("--userland-proxy=%t", d.userlandProxy), ) if !(d.useDefaultHost || d.useDefaultTLSHost) { args = append(args, []string{"--host", d.sock()}...) } if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { args = append(args, []string{"--userns-remap", root}...) } // If we don't explicitly set the log-level or debug flag(-D) then // turn on debug mode foundIt := false for _, a := range arg { if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") { foundIt = true } } if !foundIt { args = append(args, "--debug") } if d.storageDriver != "" { args = append(args, "--storage-driver", d.storageDriver) } args = append(args, arg...) d.cmd = exec.Command(dockerBinary, args...) d.logFile, err = os.OpenFile(filepath.Join(d.folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) d.c.Assert(err, check.IsNil, check.Commentf("[%s] Could not create %s/docker.log", d.id, d.folder)) d.cmd.Stdout = d.logFile d.cmd.Stderr = d.logFile if err := d.cmd.Start(); err != nil { return fmt.Errorf("[%s] could not start daemon container: %v", d.id, err) } wait := make(chan error) go func() { wait <- d.cmd.Wait() d.c.Logf("[%s] exiting daemon", d.id) close(wait) }() d.wait = wait tick := time.Tick(500 * time.Millisecond) // make sure daemon is ready to receive requests startTime := time.Now().Unix() for { d.c.Logf("[%s] waiting for daemon to start", d.id) if time.Now().Unix()-startTime > 5 { // After 5 seconds, give up return fmt.Errorf("[%s] Daemon exited and never started", d.id) } select { case <-time.After(2 * time.Second): return fmt.Errorf("[%s] timeout: daemon does not respond", d.id) case <-tick: clientConfig, err := d.getClientConfig() if err != nil { return err } client := &http.Client{ Transport: clientConfig.transport, } req, err := http.NewRequest("GET", "/_ping", nil) d.c.Assert(err, check.IsNil, check.Commentf("[%s] could not create new request", d.id)) req.URL.Host = clientConfig.addr req.URL.Scheme = clientConfig.scheme resp, err := client.Do(req) if err != nil { continue } if resp.StatusCode != http.StatusOK { d.c.Logf("[%s] received status != 200 OK: %s", d.id, resp.Status) } d.c.Logf("[%s] daemon started", d.id) d.root, err = d.queryRootDir() if err != nil { return fmt.Errorf("[%s] error querying daemon for root directory: %v", d.id, err) } return nil } } } // StartWithBusybox will first start the daemon with Daemon.Start() // then save the busybox image from the main daemon and load it into this Daemon instance. func (d *Daemon) StartWithBusybox(arg ...string) error { if err := d.Start(arg...); err != nil { return err } bb := filepath.Join(d.folder, "busybox.tar") if _, err := os.Stat(bb); err != nil { if !os.IsNotExist(err) { return fmt.Errorf("unexpected error on busybox.tar stat: %v", err) } // saving busybox image from main daemon if err := exec.Command(dockerBinary, "save", "--output", bb, "busybox:latest").Run(); err != nil { return fmt.Errorf("could not save busybox image: %v", err) } } // loading busybox image to this daemon if out, err := d.Cmd("load", "--input", bb); err != nil { return fmt.Errorf("could not load busybox image: %s", out) } if err := os.Remove(bb); err != nil { d.c.Logf("could not remove %s: %v", bb, err) } return nil } // Stop will send a SIGINT every second and wait for the daemon to stop. // If it timeouts, a SIGKILL is sent. // Stop will not delete the daemon directory. If a purged daemon is needed, // instantiate a new one with NewDaemon. func (d *Daemon) Stop() error { if d.cmd == nil || d.wait == nil { return errors.New("daemon not started") } defer func() { d.logFile.Close() d.cmd = nil }() i := 1 tick := time.Tick(time.Second) if err := d.cmd.Process.Signal(os.Interrupt); err != nil { return fmt.Errorf("could not send signal: %v", err) } out1: for { select { case err := <-d.wait: return err case <-time.After(15 * time.Second): // time for stopping jobs and run onShutdown hooks d.c.Log("timeout") break out1 } } out2: for { select { case err := <-d.wait: return err case <-tick: i++ if i > 4 { d.c.Logf("tried to interrupt daemon for %d times, now try to kill it", i) break out2 } d.c.Logf("Attempt #%d: daemon is still running with pid %d", i, d.cmd.Process.Pid) if err := d.cmd.Process.Signal(os.Interrupt); err != nil { return fmt.Errorf("could not send signal: %v", err) } } } if err := d.cmd.Process.Kill(); err != nil { d.c.Logf("Could not kill daemon: %v", err) return err } return nil } // Restart will restart the daemon by first stopping it and then starting it. func (d *Daemon) Restart(arg ...string) error { d.Stop() // in the case of tests running a user namespace-enabled daemon, we have resolved // d.root to be the actual final path of the graph dir after the "uid.gid" of // remapped root is added--we need to subtract it from the path before calling // start or else we will continue making subdirectories rather than truly restarting // with the same location/root: if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { d.root = filepath.Dir(d.root) } return d.Start(arg...) } func (d *Daemon) queryRootDir() (string, error) { // update daemon root by asking /info endpoint (to support user // namespaced daemon with root remapped uid.gid directory) clientConfig, err := d.getClientConfig() if err != nil { return "", err } client := &http.Client{ Transport: clientConfig.transport, } req, err := http.NewRequest("GET", "/info", nil) if err != nil { return "", err } req.Header.Set("Content-Type", "application/json") req.URL.Host = clientConfig.addr req.URL.Scheme = clientConfig.scheme resp, err := client.Do(req) if err != nil { return "", err } body := ioutils.NewReadCloserWrapper(resp.Body, func() error { return resp.Body.Close() }) type Info struct { DockerRootDir string } var b []byte var i Info b, err = readBody(body) if err == nil && resp.StatusCode == 200 { // read the docker root dir if err = json.Unmarshal(b, &i); err == nil { return i.DockerRootDir, nil } } return "", err } func (d *Daemon) sock() string { return fmt.Sprintf("unix://%s/docker.sock", d.folder) } // Cmd will execute a docker CLI command against this Daemon. // Example: d.Cmd("version") will run docker -H unix://path/to/unix.sock version func (d *Daemon) Cmd(name string, arg ...string) (string, error) { args := []string{"--host", d.sock(), name} args = append(args, arg...) c := exec.Command(dockerBinary, args...) b, err := c.CombinedOutput() return string(b), err } // CmdWithArgs will execute a docker CLI command against a daemon with the // given additional arguments func (d *Daemon) CmdWithArgs(daemonArgs []string, name string, arg ...string) (string, error) { args := append(daemonArgs, name) args = append(args, arg...) c := exec.Command(dockerBinary, args...) b, err := c.CombinedOutput() return string(b), err } // LogfileName returns the path the the daemon's log file func (d *Daemon) LogfileName() string { return d.logFile.Name() } func daemonHost() string { daemonURLStr := "unix://" + opts.DefaultUnixSocket if daemonHostVar := os.Getenv("DOCKER_HOST"); daemonHostVar != "" { daemonURLStr = daemonHostVar } return daemonURLStr } func getTLSConfig() (*tls.Config, error) { dockerCertPath := os.Getenv("DOCKER_CERT_PATH") if dockerCertPath == "" { return nil, fmt.Errorf("DOCKER_TLS_VERIFY specified, but no DOCKER_CERT_PATH environment variable") } option := &tlsconfig.Options{ CAFile: filepath.Join(dockerCertPath, "ca.pem"), CertFile: filepath.Join(dockerCertPath, "cert.pem"), KeyFile: filepath.Join(dockerCertPath, "key.pem"), } tlsConfig, err := tlsconfig.Client(*option) if err != nil { return nil, err } return tlsConfig, nil } func sockConn(timeout time.Duration) (net.Conn, error) { daemon := daemonHost() daemonURL, err := url.Parse(daemon) if err != nil { return nil, fmt.Errorf("could not parse url %q: %v", daemon, err) } var c net.Conn switch daemonURL.Scheme { case "unix": return net.DialTimeout(daemonURL.Scheme, daemonURL.Path, timeout) case "tcp": if os.Getenv("DOCKER_TLS_VERIFY") != "" { // Setup the socket TLS configuration. tlsConfig, err := getTLSConfig() if err != nil { return nil, err } dialer := &net.Dialer{Timeout: timeout} return tls.DialWithDialer(dialer, daemonURL.Scheme, daemonURL.Host, tlsConfig) } return net.DialTimeout(daemonURL.Scheme, daemonURL.Host, timeout) default: return c, fmt.Errorf("unknown scheme %v (%s)", daemonURL.Scheme, daemon) } } func sockRequest(method, endpoint string, data interface{}) (int, []byte, error) { jsonData := bytes.NewBuffer(nil) if err := json.NewEncoder(jsonData).Encode(data); err != nil { return -1, nil, err } res, body, err := sockRequestRaw(method, endpoint, jsonData, "application/json") if err != nil { return -1, nil, err } b, err := readBody(body) return res.StatusCode, b, err } func sockRequestRaw(method, endpoint string, data io.Reader, ct string) (*http.Response, io.ReadCloser, error) { req, client, err := newRequestClient(method, endpoint, data, ct) if err != nil { return nil, nil, err } resp, err := client.Do(req) if err != nil { client.Close() return nil, nil, err } body := ioutils.NewReadCloserWrapper(resp.Body, func() error { defer resp.Body.Close() return client.Close() }) return resp, body, nil } func sockRequestHijack(method, endpoint string, data io.Reader, ct string) (net.Conn, *bufio.Reader, error) { req, client, err := newRequestClient(method, endpoint, data, ct) if err != nil { return nil, nil, err } client.Do(req) conn, br := client.Hijack() return conn, br, nil } func newRequestClient(method, endpoint string, data io.Reader, ct string) (*http.Request, *httputil.ClientConn, error) { c, err := sockConn(time.Duration(10 * time.Second)) if err != nil { return nil, nil, fmt.Errorf("could not dial docker daemon: %v", err) } client := httputil.NewClientConn(c, nil) req, err := http.NewRequest(method, endpoint, data) if err != nil { client.Close() return nil, nil, fmt.Errorf("could not create new request: %v", err) } if ct != "" { req.Header.Set("Content-Type", ct) } return req, client, nil } func readBody(b io.ReadCloser) ([]byte, error) { defer b.Close() return ioutil.ReadAll(b) } func deleteContainer(container string) error { container = strings.TrimSpace(strings.Replace(container, "\n", " ", -1)) rmArgs := strings.Split(fmt.Sprintf("rm -fv %v", container), " ") exitCode, err := runCommand(exec.Command(dockerBinary, rmArgs...)) // set error manually if not set if exitCode != 0 && err == nil { err = fmt.Errorf("failed to remove container: `docker rm` exit is non-zero") } return err } func getAllContainers() (string, error) { getContainersCmd := exec.Command(dockerBinary, "ps", "-q", "-a") out, exitCode, err := runCommandWithOutput(getContainersCmd) if exitCode != 0 && err == nil { err = fmt.Errorf("failed to get a list of containers: %v\n", out) } return out, err } func deleteAllContainers() error { containers, err := getAllContainers() if err != nil { fmt.Println(containers) return err } if containers != "" { if err = deleteContainer(containers); err != nil { return err } } return nil } func deleteAllNetworks() error { networks, err := getAllNetworks() if err != nil { return err } var errors []string for _, n := range networks { if n.Name == "bridge" || n.Name == "none" || n.Name == "host" { continue } status, b, err := sockRequest("DELETE", "/networks/"+n.Name, nil) if err != nil { errors = append(errors, err.Error()) continue } if status != http.StatusNoContent { errors = append(errors, fmt.Sprintf("error deleting network %s: %s", n.Name, string(b))) } } if len(errors) > 0 { return fmt.Errorf(strings.Join(errors, "\n")) } return nil } func getAllNetworks() ([]types.NetworkResource, error) { var networks []types.NetworkResource _, b, err := sockRequest("GET", "/networks", nil) if err != nil { return nil, err } if err := json.Unmarshal(b, &networks); err != nil { return nil, err } return networks, nil } func deleteAllVolumes() error { volumes, err := getAllVolumes() if err != nil { return err } var errors []string for _, v := range volumes { status, b, err := sockRequest("DELETE", "/volumes/"+v.Name, nil) if err != nil { errors = append(errors, err.Error()) continue } if status != http.StatusNoContent { errors = append(errors, fmt.Sprintf("error deleting volume %s: %s", v.Name, string(b))) } } if len(errors) > 0 { return fmt.Errorf(strings.Join(errors, "\n")) } return nil } func getAllVolumes() ([]*types.Volume, error) { var volumes types.VolumesListResponse _, b, err := sockRequest("GET", "/volumes", nil) if err != nil { return nil, err } if err := json.Unmarshal(b, &volumes); err != nil { return nil, err } return volumes.Volumes, nil } var protectedImages = map[string]struct{}{} func deleteAllImages() error { cmd := exec.Command(dockerBinary, "images") cmd.Env = appendBaseEnv(true) out, err := cmd.CombinedOutput() if err != nil { return err } lines := strings.Split(string(out), "\n")[1:] var imgs []string for _, l := range lines { if l == "" { continue } fields := strings.Fields(l) imgTag := fields[0] + ":" + fields[1] if _, ok := protectedImages[imgTag]; !ok { if fields[0] == "" { imgs = append(imgs, fields[2]) continue } imgs = append(imgs, imgTag) } } if len(imgs) == 0 { return nil } args := append([]string{"rmi", "-f"}, imgs...) if err := exec.Command(dockerBinary, args...).Run(); err != nil { return err } return nil } func getPausedContainers() (string, error) { getPausedContainersCmd := exec.Command(dockerBinary, "ps", "-f", "status=paused", "-q", "-a") out, exitCode, err := runCommandWithOutput(getPausedContainersCmd) if exitCode != 0 && err == nil { err = fmt.Errorf("failed to get a list of paused containers: %v\n", out) } return out, err } func getSliceOfPausedContainers() ([]string, error) { out, err := getPausedContainers() if err == nil { if len(out) == 0 { return nil, err } slice := strings.Split(strings.TrimSpace(out), "\n") return slice, err } return []string{out}, err } func unpauseContainer(container string) error { unpauseCmd := exec.Command(dockerBinary, "unpause", container) exitCode, err := runCommand(unpauseCmd) if exitCode != 0 && err == nil { err = fmt.Errorf("failed to unpause container") } return nil } func unpauseAllContainers() error { containers, err := getPausedContainers() if err != nil { fmt.Println(containers) return err } containers = strings.Replace(containers, "\n", " ", -1) containers = strings.Trim(containers, " ") containerList := strings.Split(containers, " ") for _, value := range containerList { if err = unpauseContainer(value); err != nil { return err } } return nil } func deleteImages(images ...string) error { args := []string{"rmi", "-f"} args = append(args, images...) rmiCmd := exec.Command(dockerBinary, args...) exitCode, err := runCommand(rmiCmd) // set error manually if not set if exitCode != 0 && err == nil { err = fmt.Errorf("failed to remove image: `docker rmi` exit is non-zero") } return err } func imageExists(image string) error { inspectCmd := exec.Command(dockerBinary, "inspect", image) exitCode, err := runCommand(inspectCmd) if exitCode != 0 && err == nil { err = fmt.Errorf("couldn't find image %q", image) } return err } func pullImageIfNotExist(image string) error { if err := imageExists(image); err != nil { pullCmd := exec.Command(dockerBinary, "pull", image) _, exitCode, err := runCommandWithOutput(pullCmd) if err != nil || exitCode != 0 { return fmt.Errorf("image %q wasn't found locally and it couldn't be pulled: %s", image, err) } } return nil } func dockerCmdWithError(args ...string) (string, int, error) { return integration.DockerCmdWithError(dockerBinary, args...) } func dockerCmdWithStdoutStderr(c *check.C, args ...string) (string, string, int) { return integration.DockerCmdWithStdoutStderr(dockerBinary, c, args...) } func dockerCmd(c *check.C, args ...string) (string, int) { return integration.DockerCmd(dockerBinary, c, args...) } // execute a docker command with a timeout func dockerCmdWithTimeout(timeout time.Duration, args ...string) (string, int, error) { return integration.DockerCmdWithTimeout(dockerBinary, timeout, args...) } // execute a docker command in a directory func dockerCmdInDir(c *check.C, path string, args ...string) (string, int, error) { return integration.DockerCmdInDir(dockerBinary, path, args...) } // execute a docker command in a directory with a timeout func dockerCmdInDirWithTimeout(timeout time.Duration, path string, args ...string) (string, int, error) { return integration.DockerCmdInDirWithTimeout(dockerBinary, timeout, path, args...) } // find the State.ExitCode in container metadata func findContainerExitCode(c *check.C, name string, vargs ...string) string { args := append(vargs, "inspect", "--format='{{ .State.ExitCode }} {{ .State.Error }}'", name) cmd := exec.Command(dockerBinary, args...) out, _, err := runCommandWithOutput(cmd) if err != nil { c.Fatal(err, out) } return out } func findContainerIP(c *check.C, id string, network string) string { out, _ := dockerCmd(c, "inspect", fmt.Sprintf("--format='{{ .NetworkSettings.Networks.%s.IPAddress }}'", network), id) return strings.Trim(out, " \r\n'") } func (d *Daemon) findContainerIP(id string) string { out, err := d.Cmd("inspect", fmt.Sprintf("--format='{{ .NetworkSettings.Networks.bridge.IPAddress }}'"), id) if err != nil { d.c.Log(err) } return strings.Trim(out, " \r\n'") } func getContainerCount() (int, error) { const containers = "Containers:" cmd := exec.Command(dockerBinary, "info") out, _, err := runCommandWithOutput(cmd) if err != nil { return 0, err } lines := strings.Split(out, "\n") for _, line := range lines { if strings.Contains(line, containers) { output := strings.TrimSpace(line) output = strings.TrimLeft(output, containers) output = strings.Trim(output, " ") containerCount, err := strconv.Atoi(output) if err != nil { return 0, err } return containerCount, nil } } return 0, fmt.Errorf("couldn't find the Container count in the output") } // FakeContext creates directories that can be used as a build context type FakeContext struct { Dir string } // Add a file at a path, creating directories where necessary func (f *FakeContext) Add(file, content string) error { return f.addFile(file, []byte(content)) } func (f *FakeContext) addFile(file string, content []byte) error { filepath := path.Join(f.Dir, file) dirpath := path.Dir(filepath) if dirpath != "." { if err := os.MkdirAll(dirpath, 0755); err != nil { return err } } return ioutil.WriteFile(filepath, content, 0644) } // Delete a file at a path func (f *FakeContext) Delete(file string) error { filepath := path.Join(f.Dir, file) return os.RemoveAll(filepath) } // Close deletes the context func (f *FakeContext) Close() error { return os.RemoveAll(f.Dir) } func fakeContextFromNewTempDir() (*FakeContext, error) { tmp, err := ioutil.TempDir("", "fake-context") if err != nil { return nil, err } if err := os.Chmod(tmp, 0755); err != nil { return nil, err } return fakeContextFromDir(tmp), nil } func fakeContextFromDir(dir string) *FakeContext { return &FakeContext{dir} } func fakeContextWithFiles(files map[string]string) (*FakeContext, error) { ctx, err := fakeContextFromNewTempDir() if err != nil { return nil, err } for file, content := range files { if err := ctx.Add(file, content); err != nil { ctx.Close() return nil, err } } return ctx, nil } func fakeContextAddDockerfile(ctx *FakeContext, dockerfile string) error { if err := ctx.Add("Dockerfile", dockerfile); err != nil { ctx.Close() return err } return nil } func fakeContext(dockerfile string, files map[string]string) (*FakeContext, error) { ctx, err := fakeContextWithFiles(files) if err != nil { return nil, err } if err := fakeContextAddDockerfile(ctx, dockerfile); err != nil { return nil, err } return ctx, nil } // FakeStorage is a static file server. It might be running locally or remotely // on test host. type FakeStorage interface { Close() error URL() string CtxDir() string } func fakeBinaryStorage(archives map[string]*bytes.Buffer) (FakeStorage, error) { ctx, err := fakeContextFromNewTempDir() if err != nil { return nil, err } for name, content := range archives { if err := ctx.addFile(name, content.Bytes()); err != nil { return nil, err } } return fakeStorageWithContext(ctx) } // fakeStorage returns either a local or remote (at daemon machine) file server func fakeStorage(files map[string]string) (FakeStorage, error) { ctx, err := fakeContextWithFiles(files) if err != nil { return nil, err } return fakeStorageWithContext(ctx) } // fakeStorageWithContext returns either a local or remote (at daemon machine) file server func fakeStorageWithContext(ctx *FakeContext) (FakeStorage, error) { if isLocalDaemon { return newLocalFakeStorage(ctx) } return newRemoteFileServer(ctx) } // localFileStorage is a file storage on the running machine type localFileStorage struct { *FakeContext *httptest.Server } func (s *localFileStorage) URL() string { return s.Server.URL } func (s *localFileStorage) CtxDir() string { return s.FakeContext.Dir } func (s *localFileStorage) Close() error { defer s.Server.Close() return s.FakeContext.Close() } func newLocalFakeStorage(ctx *FakeContext) (*localFileStorage, error) { handler := http.FileServer(http.Dir(ctx.Dir)) server := httptest.NewServer(handler) return &localFileStorage{ FakeContext: ctx, Server: server, }, nil } // remoteFileServer is a containerized static file server started on the remote // testing machine to be used in URL-accepting docker build functionality. type remoteFileServer struct { host string // hostname/port web server is listening to on docker host e.g. 0.0.0.0:43712 container string image string ctx *FakeContext } func (f *remoteFileServer) URL() string { u := url.URL{ Scheme: "http", Host: f.host} return u.String() } func (f *remoteFileServer) CtxDir() string { return f.ctx.Dir } func (f *remoteFileServer) Close() error { defer func() { if f.ctx != nil { f.ctx.Close() } if f.image != "" { deleteImages(f.image) } }() if f.container == "" { return nil } return deleteContainer(f.container) } func newRemoteFileServer(ctx *FakeContext) (*remoteFileServer, error) { var ( image = fmt.Sprintf("fileserver-img-%s", strings.ToLower(stringutils.GenerateRandomAlphaOnlyString(10))) container = fmt.Sprintf("fileserver-cnt-%s", strings.ToLower(stringutils.GenerateRandomAlphaOnlyString(10))) ) // Build the image if err := fakeContextAddDockerfile(ctx, `FROM httpserver COPY . /static`); err != nil { return nil, fmt.Errorf("Cannot add Dockerfile to context: %v", err) } if _, err := buildImageFromContext(image, ctx, false); err != nil { return nil, fmt.Errorf("failed building file storage container image: %v", err) } // Start the container runCmd := exec.Command(dockerBinary, "run", "-d", "-P", "--name", container, image) if out, ec, err := runCommandWithOutput(runCmd); err != nil { return nil, fmt.Errorf("failed to start file storage container. ec=%v\nout=%s\nerr=%v", ec, out, err) } // Find out the system assigned port out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "port", container, "80/tcp")) if err != nil { return nil, fmt.Errorf("failed to find container port: err=%v\nout=%s", err, out) } fileserverHostPort := strings.Trim(out, "\n") _, port, err := net.SplitHostPort(fileserverHostPort) if err != nil { return nil, fmt.Errorf("unable to parse file server host:port: %v", err) } dockerHostURL, err := url.Parse(daemonHost()) if err != nil { return nil, fmt.Errorf("unable to parse daemon host URL: %v", err) } host, _, err := net.SplitHostPort(dockerHostURL.Host) if err != nil { return nil, fmt.Errorf("unable to parse docker daemon host:port: %v", err) } return &remoteFileServer{ container: container, image: image, host: fmt.Sprintf("%s:%s", host, port), ctx: ctx}, nil } func inspectFieldAndMarshall(name, field string, output interface{}) error { str, err := inspectFieldJSON(name, field) if err != nil { return err } return json.Unmarshal([]byte(str), output) } func inspectFilter(name, filter string) (string, error) { format := fmt.Sprintf("{{%s}}", filter) inspectCmd := exec.Command(dockerBinary, "inspect", "-f", format, name) out, exitCode, err := runCommandWithOutput(inspectCmd) if err != nil || exitCode != 0 { return "", fmt.Errorf("failed to inspect container %s: %s", name, out) } return strings.TrimSpace(out), nil } func inspectField(name, field string) (string, error) { return inspectFilter(name, fmt.Sprintf(".%s", field)) } func inspectFieldJSON(name, field string) (string, error) { return inspectFilter(name, fmt.Sprintf("json .%s", field)) } func inspectFieldMap(name, path, field string) (string, error) { return inspectFilter(name, fmt.Sprintf("index .%s %q", path, field)) } func inspectMountSourceField(name, destination string) (string, error) { m, err := inspectMountPoint(name, destination) if err != nil { return "", err } return m.Source, nil } func inspectMountPoint(name, destination string) (types.MountPoint, error) { out, err := inspectFieldJSON(name, "Mounts") if err != nil { return types.MountPoint{}, err } return inspectMountPointJSON(out, destination) } var errMountNotFound = errors.New("mount point not found") func inspectMountPointJSON(j, destination string) (types.MountPoint, error) { var mp []types.MountPoint if err := unmarshalJSON([]byte(j), &mp); err != nil { return types.MountPoint{}, err } var m *types.MountPoint for _, c := range mp { if c.Destination == destination { m = &c break } } if m == nil { return types.MountPoint{}, errMountNotFound } return *m, nil } func getIDByName(name string) (string, error) { return inspectField(name, "Id") } // getContainerState returns the exit code of the container // and true if it's running // the exit code should be ignored if it's running func getContainerState(c *check.C, id string) (int, bool, error) { var ( exitStatus int running bool ) out, exitCode := dockerCmd(c, "inspect", "--format={{.State.Running}} {{.State.ExitCode}}", id) if exitCode != 0 { return 0, false, fmt.Errorf("%q doesn't exist: %s", id, out) } out = strings.Trim(out, "\n") splitOutput := strings.Split(out, " ") if len(splitOutput) != 2 { return 0, false, fmt.Errorf("failed to get container state: output is broken") } if splitOutput[0] == "true" { running = true } if n, err := strconv.Atoi(splitOutput[1]); err == nil { exitStatus = n } else { return 0, false, fmt.Errorf("failed to get container state: couldn't parse integer") } return exitStatus, running, nil } func buildImageCmd(name, dockerfile string, useCache bool, buildFlags ...string) *exec.Cmd { args := []string{"build", "-t", name} if !useCache { args = append(args, "--no-cache") } args = append(args, buildFlags...) args = append(args, "-") buildCmd := exec.Command(dockerBinary, args...) buildCmd.Stdin = strings.NewReader(dockerfile) return buildCmd } func buildImageWithOut(name, dockerfile string, useCache bool, buildFlags ...string) (string, string, error) { buildCmd := buildImageCmd(name, dockerfile, useCache, buildFlags...) out, exitCode, err := runCommandWithOutput(buildCmd) if err != nil || exitCode != 0 { return "", out, fmt.Errorf("failed to build the image: %s", out) } id, err := getIDByName(name) if err != nil { return "", out, err } return id, out, nil } func buildImageWithStdoutStderr(name, dockerfile string, useCache bool, buildFlags ...string) (string, string, string, error) { buildCmd := buildImageCmd(name, dockerfile, useCache, buildFlags...) stdout, stderr, exitCode, err := runCommandWithStdoutStderr(buildCmd) if err != nil || exitCode != 0 { return "", stdout, stderr, fmt.Errorf("failed to build the image: %s", stdout) } id, err := getIDByName(name) if err != nil { return "", stdout, stderr, err } return id, stdout, stderr, nil } func buildImage(name, dockerfile string, useCache bool, buildFlags ...string) (string, error) { id, _, err := buildImageWithOut(name, dockerfile, useCache, buildFlags...) return id, err } func buildImageFromContext(name string, ctx *FakeContext, useCache bool, buildFlags ...string) (string, error) { id, _, err := buildImageFromContextWithOut(name, ctx, useCache, buildFlags...) if err != nil { return "", err } return id, nil } func buildImageFromContextWithOut(name string, ctx *FakeContext, useCache bool, buildFlags ...string) (string, string, error) { args := []string{"build", "-t", name} if !useCache { args = append(args, "--no-cache") } args = append(args, buildFlags...) args = append(args, ".") buildCmd := exec.Command(dockerBinary, args...) buildCmd.Dir = ctx.Dir out, exitCode, err := runCommandWithOutput(buildCmd) if err != nil || exitCode != 0 { return "", "", fmt.Errorf("failed to build the image: %s", out) } id, err := getIDByName(name) if err != nil { return "", "", err } return id, out, nil } func buildImageFromContextWithStdoutStderr(name string, ctx *FakeContext, useCache bool, buildFlags ...string) (string, string, string, error) { args := []string{"build", "-t", name} if !useCache { args = append(args, "--no-cache") } args = append(args, buildFlags...) args = append(args, ".") buildCmd := exec.Command(dockerBinary, args...) buildCmd.Dir = ctx.Dir stdout, stderr, exitCode, err := runCommandWithStdoutStderr(buildCmd) if err != nil || exitCode != 0 { return "", stdout, stderr, fmt.Errorf("failed to build the image: %s", stdout) } id, err := getIDByName(name) if err != nil { return "", stdout, stderr, err } return id, stdout, stderr, nil } func buildImageFromGitWithStdoutStderr(name string, ctx *fakeGit, useCache bool, buildFlags ...string) (string, string, string, error) { args := []string{"build", "-t", name} if !useCache { args = append(args, "--no-cache") } args = append(args, buildFlags...) args = append(args, ctx.RepoURL) buildCmd := exec.Command(dockerBinary, args...) stdout, stderr, exitCode, err := runCommandWithStdoutStderr(buildCmd) if err != nil || exitCode != 0 { return "", stdout, stderr, fmt.Errorf("failed to build the image: %s", stdout) } id, err := getIDByName(name) if err != nil { return "", stdout, stderr, err } return id, stdout, stderr, nil } func buildImageFromPath(name, path string, useCache bool, buildFlags ...string) (string, error) { args := []string{"build", "-t", name} if !useCache { args = append(args, "--no-cache") } args = append(args, buildFlags...) args = append(args, path) buildCmd := exec.Command(dockerBinary, args...) out, exitCode, err := runCommandWithOutput(buildCmd) if err != nil || exitCode != 0 { return "", fmt.Errorf("failed to build the image: %s", out) } return getIDByName(name) } type gitServer interface { URL() string Close() error } type localGitServer struct { *httptest.Server } func (r *localGitServer) Close() error { r.Server.Close() return nil } func (r *localGitServer) URL() string { return r.Server.URL } type fakeGit struct { root string server gitServer RepoURL string } func (g *fakeGit) Close() { g.server.Close() os.RemoveAll(g.root) } func newFakeGit(name string, files map[string]string, enforceLocalServer bool) (*fakeGit, error) { ctx, err := fakeContextWithFiles(files) if err != nil { return nil, err } defer ctx.Close() curdir, err := os.Getwd() if err != nil { return nil, err } defer os.Chdir(curdir) if output, err := exec.Command("git", "init", ctx.Dir).CombinedOutput(); err != nil { return nil, fmt.Errorf("error trying to init repo: %s (%s)", err, output) } err = os.Chdir(ctx.Dir) if err != nil { return nil, err } if output, err := exec.Command("git", "config", "user.name", "Fake User").CombinedOutput(); err != nil { return nil, fmt.Errorf("error trying to set 'user.name': %s (%s)", err, output) } if output, err := exec.Command("git", "config", "user.email", "fake.user@example.com").CombinedOutput(); err != nil { return nil, fmt.Errorf("error trying to set 'user.email': %s (%s)", err, output) } if output, err := exec.Command("git", "add", "*").CombinedOutput(); err != nil { return nil, fmt.Errorf("error trying to add files to repo: %s (%s)", err, output) } if output, err := exec.Command("git", "commit", "-a", "-m", "Initial commit").CombinedOutput(); err != nil { return nil, fmt.Errorf("error trying to commit to repo: %s (%s)", err, output) } root, err := ioutil.TempDir("", "docker-test-git-repo") if err != nil { return nil, err } repoPath := filepath.Join(root, name+".git") if output, err := exec.Command("git", "clone", "--bare", ctx.Dir, repoPath).CombinedOutput(); err != nil { os.RemoveAll(root) return nil, fmt.Errorf("error trying to clone --bare: %s (%s)", err, output) } err = os.Chdir(repoPath) if err != nil { os.RemoveAll(root) return nil, err } if output, err := exec.Command("git", "update-server-info").CombinedOutput(); err != nil { os.RemoveAll(root) return nil, fmt.Errorf("error trying to git update-server-info: %s (%s)", err, output) } err = os.Chdir(curdir) if err != nil { os.RemoveAll(root) return nil, err } var server gitServer if !enforceLocalServer { // use fakeStorage server, which might be local or remote (at test daemon) server, err = fakeStorageWithContext(fakeContextFromDir(root)) if err != nil { return nil, fmt.Errorf("cannot start fake storage: %v", err) } } else { // always start a local http server on CLI test machine httpServer := httptest.NewServer(http.FileServer(http.Dir(root))) server = &localGitServer{httpServer} } return &fakeGit{ root: root, server: server, RepoURL: fmt.Sprintf("%s/%s.git", server.URL(), name), }, nil } // Write `content` to the file at path `dst`, creating it if necessary, // as well as any missing directories. // The file is truncated if it already exists. // Fail the test when error occurs. func writeFile(dst, content string, c *check.C) { // Create subdirectories if necessary c.Assert(os.MkdirAll(path.Dir(dst), 0700), check.IsNil) f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700) c.Assert(err, check.IsNil) defer f.Close() // Write content (truncate if it exists) _, err = io.Copy(f, strings.NewReader(content)) c.Assert(err, check.IsNil) } // Return the contents of file at path `src`. // Fail the test when error occurs. func readFile(src string, c *check.C) (content string) { data, err := ioutil.ReadFile(src) c.Assert(err, check.IsNil) return string(data) } func containerStorageFile(containerID, basename string) string { return filepath.Join(containerStoragePath, containerID, basename) } // docker commands that use this function must be run with the '-d' switch. func runCommandAndReadContainerFile(filename string, cmd *exec.Cmd) ([]byte, error) { out, _, err := runCommandWithOutput(cmd) if err != nil { return nil, fmt.Errorf("%v: %q", err, out) } contID := strings.TrimSpace(out) if err := waitRun(contID); err != nil { return nil, fmt.Errorf("%v: %q", contID, err) } return readContainerFile(contID, filename) } func readContainerFile(containerID, filename string) ([]byte, error) { f, err := os.Open(containerStorageFile(containerID, filename)) if err != nil { return nil, err } defer f.Close() content, err := ioutil.ReadAll(f) if err != nil { return nil, err } return content, nil } func readContainerFileWithExec(containerID, filename string) ([]byte, error) { out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "exec", containerID, "cat", filename)) return []byte(out), err } // daemonTime provides the current time on the daemon host func daemonTime(c *check.C) time.Time { if isLocalDaemon { return time.Now() } status, body, err := sockRequest("GET", "/info", nil) c.Assert(err, check.IsNil) c.Assert(status, check.Equals, http.StatusOK) type infoJSON struct { SystemTime string } var info infoJSON err = json.Unmarshal(body, &info) c.Assert(err, check.IsNil, check.Commentf("unable to unmarshal GET /info response")) dt, err := time.Parse(time.RFC3339Nano, info.SystemTime) c.Assert(err, check.IsNil, check.Commentf("invalid time format in GET /info response")) return dt } func setupRegistry(c *check.C, schema1 bool) *testRegistryV2 { testRequires(c, RegistryHosting) reg, err := newTestRegistryV2(c, schema1) c.Assert(err, check.IsNil) // Wait for registry to be ready to serve requests. for i := 0; i != 50; i++ { if err = reg.Ping(); err == nil { break } time.Sleep(100 * time.Millisecond) } c.Assert(err, check.IsNil, check.Commentf("Timeout waiting for test registry to become available")) return reg } func setupNotary(c *check.C) *testNotary { testRequires(c, NotaryHosting) ts, err := newTestNotary(c) c.Assert(err, check.IsNil) return ts } // appendBaseEnv appends the minimum set of environment variables to exec the // docker cli binary for testing with correct configuration to the given env // list. func appendBaseEnv(isTLS bool, env ...string) []string { preserveList := []string{ // preserve remote test host "DOCKER_HOST", // windows: requires preserving SystemRoot, otherwise dial tcp fails // with "GetAddrInfoW: A non-recoverable error occurred during a database lookup." "SystemRoot", } if isTLS { preserveList = append(preserveList, "DOCKER_TLS_VERIFY", "DOCKER_CERT_PATH") } for _, key := range preserveList { if val := os.Getenv(key); val != "" { env = append(env, fmt.Sprintf("%s=%s", key, val)) } } return env } func createTmpFile(c *check.C, content string) string { f, err := ioutil.TempFile("", "testfile") c.Assert(err, check.IsNil) filename := f.Name() err = ioutil.WriteFile(filename, []byte(content), 0644) c.Assert(err, check.IsNil) return filename } func buildImageWithOutInDamon(socket string, name, dockerfile string, useCache bool) (string, error) { args := []string{"--host", socket} buildCmd := buildImageCmdArgs(args, name, dockerfile, useCache) out, exitCode, err := runCommandWithOutput(buildCmd) if err != nil || exitCode != 0 { return out, fmt.Errorf("failed to build the image: %s, error: %v", out, err) } return out, nil } func buildImageCmdArgs(args []string, name, dockerfile string, useCache bool) *exec.Cmd { args = append(args, []string{"-D", "build", "-t", name}...) if !useCache { args = append(args, "--no-cache") } args = append(args, "-") buildCmd := exec.Command(dockerBinary, args...) buildCmd.Stdin = strings.NewReader(dockerfile) return buildCmd } func waitForContainer(contID string, args ...string) error { args = append([]string{"run", "--name", contID}, args...) cmd := exec.Command(dockerBinary, args...) if _, err := runCommand(cmd); err != nil { return err } if err := waitRun(contID); err != nil { return err } return nil } // waitRun will wait for the specified container to be running, maximum 5 seconds. func waitRun(contID string) error { return waitInspect(contID, "{{.State.Running}}", "true", 5*time.Second) } // waitExited will wait for the specified container to state exit, subject // to a maximum time limit in seconds supplied by the caller func waitExited(contID string, duration time.Duration) error { return waitInspect(contID, "{{.State.Status}}", "exited", duration) } // waitInspect will wait for the specified container to have the specified string // in the inspect output. It will wait until the specified timeout (in seconds) // is reached. func waitInspect(name, expr, expected string, timeout time.Duration) error { after := time.After(timeout) for { cmd := exec.Command(dockerBinary, "inspect", "-f", expr, name) out, _, err := runCommandWithOutput(cmd) if err != nil { if !strings.Contains(out, "No such") { return fmt.Errorf("error executing docker inspect: %v\n%s", err, out) } select { case <-after: return err default: time.Sleep(10 * time.Millisecond) continue } } out = strings.TrimSpace(out) if out == expected { break } select { case <-after: return fmt.Errorf("condition \"%q == %q\" not true in time", out, expected) default: } time.Sleep(100 * time.Millisecond) } return nil } func getInspectBody(c *check.C, version, id string) []byte { endpoint := fmt.Sprintf("/%s/containers/%s/json", version, id) status, body, err := sockRequest("GET", endpoint, nil) c.Assert(err, check.IsNil) c.Assert(status, check.Equals, http.StatusOK) return body } // Run a long running idle task in a background container using the // system-specific default image and command. func runSleepingContainer(c *check.C, extraArgs ...string) (string, int) { return runSleepingContainerInImage(c, defaultSleepImage, extraArgs...) } // Run a long running idle task in a background container using the specified // image and the system-specific command. func runSleepingContainerInImage(c *check.C, image string, extraArgs ...string) (string, int) { args := []string{"run", "-d"} args = append(args, extraArgs...) args = append(args, image) args = append(args, defaultSleepCommand...) return dockerCmd(c, args...) } func getRootUIDGID() (int, int, error) { uidgid := strings.Split(filepath.Base(dockerBasePath), ".") if len(uidgid) == 1 { //user namespace remapping is not turned on; return 0 return 0, 0, nil } uid, err := strconv.Atoi(uidgid[0]) if err != nil { return 0, 0, err } gid, err := strconv.Atoi(uidgid[1]) if err != nil { return 0, 0, err } return uid, gid, nil } docker-1.10.3/integration-cli/events_utils.go000066400000000000000000000161651267010174400212210ustar00rootroot00000000000000package main import ( "bufio" "bytes" "fmt" "io" "os/exec" "regexp" "strconv" "strings" "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) var ( reTimestamp = `\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{9}(:?(:?(:?-|\+)\d{2}:\d{2})|Z)` reEventType = `(?P\w+)` reAction = `(?P\w+)` reID = `(?P[^\s]+)` reAttributes = `(\s\((?P[^\)]+)\))?` reString = fmt.Sprintf(`\A%s\s%s\s%s\s%s%s\z`, reTimestamp, reEventType, reAction, reID, reAttributes) // eventCliRegexp is a regular expression that matches all possible event outputs in the cli eventCliRegexp = regexp.MustCompile(reString) ) // eventMatcher is a function that tries to match an event input. // It returns true if the event matches and a map with // a set of key/value to identify the match. type eventMatcher func(text string) (map[string]string, bool) // eventMatchProcessor is a function to handle an event match. // It receives a map of key/value with the information extracted in a match. type eventMatchProcessor func(matches map[string]string) // eventObserver runs an events commands and observes its output. type eventObserver struct { buffer *bytes.Buffer command *exec.Cmd scanner *bufio.Scanner startTime string disconnectionError error } // newEventObserver creates the observer and initializes the command // without running it. Users must call `eventObserver.Start` to start the command. func newEventObserver(c *check.C, args ...string) (*eventObserver, error) { since := daemonTime(c).Unix() return newEventObserverWithBacklog(c, since, args...) } // newEventObserverWithBacklog creates a new observer changing the start time of the backlog to return. func newEventObserverWithBacklog(c *check.C, since int64, args ...string) (*eventObserver, error) { startTime := strconv.FormatInt(since, 10) cmdArgs := []string{"events", "--since", startTime} if len(args) > 0 { cmdArgs = append(cmdArgs, args...) } eventsCmd := exec.Command(dockerBinary, cmdArgs...) stdout, err := eventsCmd.StdoutPipe() if err != nil { return nil, err } return &eventObserver{ buffer: new(bytes.Buffer), command: eventsCmd, scanner: bufio.NewScanner(stdout), startTime: startTime, }, nil } // Start starts the events command. func (e *eventObserver) Start() error { return e.command.Start() } // Stop stops the events command. func (e *eventObserver) Stop() { e.command.Process.Kill() e.command.Process.Release() } // Match tries to match the events output with a given matcher. func (e *eventObserver) Match(match eventMatcher, process eventMatchProcessor) { for e.scanner.Scan() { text := e.scanner.Text() e.buffer.WriteString(text) e.buffer.WriteString("\n") if matches, ok := match(text); ok { process(matches) } } err := e.scanner.Err() if err == nil { err = io.EOF } logrus.Debug("EventObserver scanner loop finished: %v", err) e.disconnectionError = err } func (e *eventObserver) CheckEventError(c *check.C, id, event string, match eventMatcher) { var foundEvent bool scannerOut := e.buffer.String() if e.disconnectionError != nil { until := strconv.FormatInt(daemonTime(c).Unix(), 10) out, _ := dockerCmd(c, "events", "--since", e.startTime, "--until", until) events := strings.Split(strings.TrimSpace(out), "\n") for _, e := range events { if _, ok := match(e); ok { foundEvent = true break } } scannerOut = out } if !foundEvent { c.Fatalf("failed to observe event `%s` for %s. Disconnection error: %v\nout:\n%v", event, id, e.disconnectionError, scannerOut) } } // matchEventLine matches a text with the event regular expression. // It returns the matches and true if the regular expression matches with the given id and event type. // It returns an empty map and false if there is no match. func matchEventLine(id, eventType string, actions map[string]chan bool) eventMatcher { return func(text string) (map[string]string, bool) { matches := parseEventText(text) if len(matches) == 0 { return matches, false } if matchIDAndEventType(matches, id, eventType) { if _, ok := actions[matches["action"]]; ok { return matches, true } } return matches, false } } // processEventMatch closes an action channel when an event line matches the expected action. func processEventMatch(actions map[string]chan bool) eventMatchProcessor { return func(matches map[string]string) { if ch, ok := actions[matches["action"]]; ok { close(ch) } } } // parseEventText parses a line of events coming from the cli and returns // the matchers in a map. func parseEventText(text string) map[string]string { matches := eventCliRegexp.FindAllStringSubmatch(text, -1) md := map[string]string{} if len(matches) == 0 { return md } names := eventCliRegexp.SubexpNames() for i, n := range matches[0] { md[names[i]] = n } return md } // parseEventAction parses an event text and returns the action. // It fails if the text is not in the event format. func parseEventAction(c *check.C, text string) string { matches := parseEventText(text) return matches["action"] } // eventActionsByIDAndType returns the actions for a given id and type. // It fails if the text is not in the event format. func eventActionsByIDAndType(c *check.C, events []string, id, eventType string) []string { var filtered []string for _, event := range events { matches := parseEventText(event) c.Assert(matches, checker.Not(checker.IsNil)) if matchIDAndEventType(matches, id, eventType) { filtered = append(filtered, matches["action"]) } } return filtered } // matchIDAndEventType returns true if an event matches a given id and type. // It also resolves names in the event attributes if the id doesn't match. func matchIDAndEventType(matches map[string]string, id, eventType string) bool { return matchEventID(matches, id) && matches["eventType"] == eventType } func matchEventID(matches map[string]string, id string) bool { matchID := matches["id"] == id || strings.HasPrefix(matches["id"], id) if !matchID && matches["attributes"] != "" { // try matching a name in the attributes attributes := map[string]string{} for _, a := range strings.Split(matches["attributes"], ", ") { kv := strings.Split(a, "=") attributes[kv[0]] = kv[1] } matchID = attributes["name"] == id } return matchID } func parseEvents(c *check.C, out, match string) { events := strings.Split(strings.TrimSpace(out), "\n") for _, event := range events { matches := parseEventText(event) matched, err := regexp.MatchString(match, matches["action"]) c.Assert(err, checker.IsNil) c.Assert(matched, checker.True, check.Commentf("Matcher: %s did not match %s", match, matches["action"])) } } func parseEventsWithID(c *check.C, out, match, id string) { events := strings.Split(strings.TrimSpace(out), "\n") for _, event := range events { matches := parseEventText(event) c.Assert(matchEventID(matches, id), checker.True) matched, err := regexp.MatchString(match, matches["action"]) c.Assert(err, checker.IsNil) c.Assert(matched, checker.True, check.Commentf("Matcher: %s did not match %s", match, matches["action"])) } } docker-1.10.3/integration-cli/fixtures/000077500000000000000000000000001267010174400200065ustar00rootroot00000000000000docker-1.10.3/integration-cli/fixtures/https/000077500000000000000000000000001267010174400211505ustar00rootroot00000000000000docker-1.10.3/integration-cli/fixtures/https/ca.pem000066400000000000000000000025471267010174400222460ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIID0TCCAzqgAwIBAgIJAP2r7GqEJwSnMA0GCSqGSIb3DQEBBQUAMIGiMQswCQYD VQQGEwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMG A1UEChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMI Y2hhbmdlbWUxETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWls QGhvc3QuZG9tYWluMB4XDTEzMTIwMzE2NTYzMFoXDTIzMTIwMTE2NTYzMFowgaIx CzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2Nv MRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMREwDwYD VQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEW EG1haWxAaG9zdC5kb21haW4wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALAn 0xDw+5y7ZptQacq66pUhRu82JP2WU6IDgo5QUtNU6/CX5PwQATe/OnYTZQFbksxp AU9boG0FCkgxfsgPYXEuZxVEGKI2fxfKHOZZI8mrkWmj6eWU/0cvCjGVc9rTITP5 sNQvg+hORyVDdNp2IdsbMJayiB3AQYMFx3vSDOMTAgMBAAGjggELMIIBBzAdBgNV HQ4EFgQUZu7DFz09q0QBa2+ymRm9qgK1NPswgdcGA1UdIwSBzzCBzIAUZu7DFz09 q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJD QTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24x ETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMI Y2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21haW6CCQD9q+xq hCcEpzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4GBAF8fJKKM+/oOdnNi zEd0M1+PmZOyqvjYQn/2ZR8UHH6Imgc/OPQKZXf0bVE1Txc/DaUNn9Isd1SuCuaE ic3vAIYYU7PmgeNN6vwec48V96T7jr+GAi6AVMhQEc2hHCfVtx11Xx+x6aHDZzJt Zxtf5lL6KSO9Y+EFwM+rju6hm5hW -----END CERTIFICATE----- docker-1.10.3/integration-cli/fixtures/https/client-cert.pem000066400000000000000000000076361267010174400241000ustar00rootroot00000000000000Certificate: Data: Version: 3 (0x2) Serial Number: 3 (0x3) Signature Algorithm: sha1WithRSAEncryption Issuer: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain Validity Not Before: Dec 4 14:17:54 2013 GMT Not After : Dec 2 14:17:54 2023 GMT Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=client/name=changeme/emailAddress=mail@host.domain Subject Public Key Info: Public Key Algorithm: rsaEncryption Public-Key: (1024 bit) Modulus: 00:ca:c9:05:d0:09:4e:3e:a4:fc:d5:14:f4:a5:e8: 34:d3:6b:51:e3:f3:62:ea:a1:f0:e8:ed:c4:2a:bc: f0:4f:ca:07:df:e3:88:fa:f4:21:99:35:0e:3d:ea: b0:86:e7:c4:d2:8a:83:2b:42:b8:ec:a3:99:62:70: 81:46:cc:fc:a5:1d:d2:63:e8:eb:07:25:9a:e2:25: 6d:11:56:f2:1a:51:a1:b6:3e:1c:57:32:e9:7b:2c: aa:1b:cc:97:2d:89:2d:b1:c9:5e:35:28:4d:7c:fa: 65:31:3e:f7:70:dd:6e:0b:3c:58:af:a8:2e:24:c0: 7e:4e:78:7d:0a:9e:8f:42:43 Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Basic Constraints: CA:FALSE Netscape Comment: Easy-RSA Generated Certificate X509v3 Subject Key Identifier: DE:42:EF:2D:98:A3:6C:A8:AA:E0:8C:71:2C:9D:64:23:A9:E2:7E:81 X509v3 Authority Key Identifier: keyid:66:EE:C3:17:3D:3D:AB:44:01:6B:6F:B2:99:19:BD:AA:02:B5:34:FB DirName:/C=US/ST=CA/L=SanFrancisco/O=Fort-Funston/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain serial:FD:AB:EC:6A:84:27:04:A7 X509v3 Extended Key Usage: TLS Web Client Authentication X509v3 Key Usage: Digital Signature Signature Algorithm: sha1WithRSAEncryption 1c:44:26:ea:e1:66:25:cb:e4:8e:57:1c:f6:b9:17:22:62:40: 12:90:8f:3b:b2:61:7a:54:94:8f:b1:20:0b:bf:a3:51:e3:fa: 1c:a1:be:92:3a:d0:76:44:c0:57:83:ab:6a:e4:1a:45:49:a4: af:39:0d:60:32:fc:3a:be:d7:fb:5d:99:7a:1f:87:e7:d5:ab: 84:a2:5e:90:d8:bf:fa:89:6d:32:26:02:5e:31:35:68:7f:31: f5:6b:51:46:bc:af:70:ed:5a:09:7d:ec:b2:48:4f:fe:c5:2f: 56:04:ad:f6:c1:d2:2a:e4:6a:c4:87:fe:08:35:c5:38:cb:5e: 4a:c4 -----BEGIN CERTIFICATE----- MIIEFTCCA36gAwIBAgIBAzANBgkqhkiG9w0BAQUFADCBojELMAkGA1UEBhMCVVMx CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xFTATBgNVBAoTDEZv cnQtRnVuc3RvbjERMA8GA1UECxMIY2hhbmdlbWUxETAPBgNVBAMTCGNoYW5nZW1l MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0LmRv bWFpbjAeFw0xMzEyMDQxNDE3NTRaFw0yMzEyMDIxNDE3NTRaMIGgMQswCQYDVQQG EwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UE ChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTEPMA0GA1UEAxMGY2xp ZW50MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0 LmRvbWFpbjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAyskF0AlOPqT81RT0 peg002tR4/Ni6qHw6O3EKrzwT8oH3+OI+vQhmTUOPeqwhufE0oqDK0K47KOZYnCB Rsz8pR3SY+jrByWa4iVtEVbyGlGhtj4cVzLpeyyqG8yXLYktscleNShNfPplMT73 cN1uCzxYr6guJMB+Tnh9Cp6PQkMCAwEAAaOCAVkwggFVMAkGA1UdEwQCMAAwLQYJ YIZIAYb4QgENBCAWHkVhc3ktUlNBIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNV HQ4EFgQU3kLvLZijbKiq4IxxLJ1kI6nifoEwgdcGA1UdIwSBzzCBzIAUZu7DFz09 q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJD QTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24x ETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMI Y2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21haW6CCQD9q+xq hCcEpzATBgNVHSUEDDAKBggrBgEFBQcDAjALBgNVHQ8EBAMCB4AwDQYJKoZIhvcN AQEFBQADgYEAHEQm6uFmJcvkjlcc9rkXImJAEpCPO7JhelSUj7EgC7+jUeP6HKG+ kjrQdkTAV4OrauQaRUmkrzkNYDL8Or7X+12Zeh+H59WrhKJekNi/+oltMiYCXjE1 aH8x9WtRRryvcO1aCX3sskhP/sUvVgSt9sHSKuRqxIf+CDXFOMteSsQ= -----END CERTIFICATE----- docker-1.10.3/integration-cli/fixtures/https/client-key.pem000066400000000000000000000016201267010174400237160ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAMrJBdAJTj6k/NUU 9KXoNNNrUePzYuqh8OjtxCq88E/KB9/jiPr0IZk1Dj3qsIbnxNKKgytCuOyjmWJw gUbM/KUd0mPo6wclmuIlbRFW8hpRobY+HFcy6XssqhvMly2JLbHJXjUoTXz6ZTE+ 93Ddbgs8WK+oLiTAfk54fQqej0JDAgMBAAECgYBOFEzKp2qbMEexe9ofL2N3rDDh xkrl8OijpzkLA6i78BxMFn4dsnZlWUpciMrjhsYAExkiRRSS+QMMJimAq1jzQqc3 FAQV2XGYwkd0cUn7iZGvfNnEPysjsfyYQM+m+sT0ATj4BZjVShC6kkSjTdm1leLN OSvcHdcu3Xxg9ufF0QJBAPYdnNt5sIndt2WECePuRVi+uF4mlxTobFY0fjn26yhC 4RsnhhD3Vldygo9gvnkwrAZYaALGSPBewes2InxvjA8CQQDS7erKiNXpwoqz5XiU SVEsIIVTdWzBjGbIqMOu/hUwM5FK4j6JTBks0aTGMyh0YV9L1EzM0X79J29JahCe iQKNAkBKNMOGqTpBV0hko1sYDk96YobUXG5RL4L6uvkUIQ7mJMQam+AgXXL7Ctuy v0iu4a38e8tgisiTMP7nHHtpaXihAkAOiN54/lzfMsykANgCP9scE1GcoqbP34Dl qttxH4kOPT9xzY1JoLjLYdbc4YGUI3GRpBt2sajygNkmUey7P+2xAkBBsVCZFvTw qHvOpPS2kX5ml5xoc/QAHK9N7kR+X7XFYx82RTVSqJEK4lPb+aEWn+CjiIewO4Q5 ksDFuNxAzbhl -----END PRIVATE KEY----- docker-1.10.3/integration-cli/fixtures/https/client-rogue-cert.pem000066400000000000000000000076121267010174400252110ustar00rootroot00000000000000Certificate: Data: Version: 3 (0x2) Serial Number: 2 (0x2) Signature Algorithm: sha1WithRSAEncryption Issuer: C=US, ST=CA, L=SanFrancisco, O=Evil Inc, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain Validity Not Before: Feb 24 17:54:59 2014 GMT Not After : Feb 22 17:54:59 2024 GMT Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=client/name=changeme/emailAddress=mail@host.domain Subject Public Key Info: Public Key Algorithm: rsaEncryption Public-Key: (1024 bit) Modulus: 00:e8:e2:2c:b8:d4:db:89:50:4f:47:1e:68:db:f7: e4:cc:47:41:63:75:03:37:50:7a:a8:4d:27:36:d5: 15:01:08:b6:cf:56:f7:56:6d:3d:f9:e2:8d:1a:5d: bf:a0:24:5e:07:55:8e:d0:dc:f1:fa:19:87:1d:d6: b6:58:82:2e:ba:69:6d:e9:d9:c8:16:0d:1d:59:7f: f4:8e:58:10:01:3d:21:14:16:3c:ec:cd:8c:b7:0e: e6:7b:77:b4:f9:90:a5:17:01:bb:84:c6:b2:12:87: 70:eb:9f:6d:4f:d0:68:8b:96:c0:e7:0b:51:b4:9d: 1d:7b:6c:7b:be:89:6b:88:8b Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Basic Constraints: CA:FALSE Netscape Comment: Easy-RSA Generated Certificate X509v3 Subject Key Identifier: 9E:F8:49:D0:A2:76:30:5C:AB:2B:8A:B5:8D:C6:45:1F:A7:F8:CF:85 X509v3 Authority Key Identifier: keyid:DC:A5:F1:76:DB:4E:CD:8E:EF:B1:23:56:1D:92:80:99:74:3B:EA:6F DirName:/C=US/ST=CA/L=SanFrancisco/O=Evil Inc/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain serial:E7:21:1E:18:41:1B:96:83 X509v3 Extended Key Usage: TLS Web Client Authentication X509v3 Key Usage: Digital Signature Signature Algorithm: sha1WithRSAEncryption 48:76:c0:18:fa:0a:ee:4e:1a:ec:02:9d:d4:83:ca:94:54:a1: 3f:51:2f:3e:4b:95:c3:42:9b:71:a0:4b:d9:af:47:23:b9:1c: fb:85:ba:76:e2:09:cb:65:bb:d2:7d:44:3d:4b:67:ba:80:83: be:a8:ed:c4:b9:ea:1a:1b:c7:59:3b:d9:5c:0d:46:d8:c9:92: cb:10:c5:f2:1a:38:a4:aa:07:2c:e3:84:16:79:c7:95:09:e3: 01:d2:15:a2:77:0b:8b:bf:94:04:e9:7f:c0:cd:e6:2e:64:cd: 1e:a3:32:ec:11:cc:62:ce:c7:4e:cd:ad:48:5c:b1:b8:e9:76: b3:f9 -----BEGIN CERTIFICATE----- MIIEDTCCA3agAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBnjELMAkGA1UEBhMCVVMx CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2 aWwgSW5jMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAP BgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWlu MB4XDTE0MDIyNDE3NTQ1OVoXDTI0MDIyMjE3NTQ1OVowgaAxCzAJBgNVBAYTAlVT MQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxG b3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMQ8wDQYDVQQDEwZjbGllbnQx ETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9t YWluMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDo4iy41NuJUE9HHmjb9+TM R0FjdQM3UHqoTSc21RUBCLbPVvdWbT354o0aXb+gJF4HVY7Q3PH6GYcd1rZYgi66 aW3p2cgWDR1Zf/SOWBABPSEUFjzszYy3DuZ7d7T5kKUXAbuExrISh3Drn21P0GiL lsDnC1G0nR17bHu+iWuIiwIDAQABo4IBVTCCAVEwCQYDVR0TBAIwADAtBglghkgB hvhCAQ0EIBYeRWFzeS1SU0EgR2VuZXJhdGVkIENlcnRpZmljYXRlMB0GA1UdDgQW BBSe+EnQonYwXKsrirWNxkUfp/jPhTCB0wYDVR0jBIHLMIHIgBTcpfF2207Nju+x I1YdkoCZdDvqb6GBpKSBoTCBnjELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRUw EwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2aWwgSW5jMREwDwYDVQQL EwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAPBgNVBCkTCGNoYW5nZW1l MR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWluggkA5yEeGEEbloMwEwYD VR0lBAwwCgYIKwYBBQUHAwIwCwYDVR0PBAQDAgeAMA0GCSqGSIb3DQEBBQUAA4GB AEh2wBj6Cu5OGuwCndSDypRUoT9RLz5LlcNCm3GgS9mvRyO5HPuFunbiCctlu9J9 RD1LZ7qAg76o7cS56hobx1k72VwNRtjJkssQxfIaOKSqByzjhBZ5x5UJ4wHSFaJ3 C4u/lATpf8DN5i5kzR6jMuwRzGLOx07NrUhcsbjpdrP5 -----END CERTIFICATE----- docker-1.10.3/integration-cli/fixtures/https/client-rogue-key.pem000066400000000000000000000016241267010174400250410ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAOjiLLjU24lQT0ce aNv35MxHQWN1AzdQeqhNJzbVFQEIts9W91ZtPfnijRpdv6AkXgdVjtDc8foZhx3W tliCLrppbenZyBYNHVl/9I5YEAE9IRQWPOzNjLcO5nt3tPmQpRcBu4TGshKHcOuf bU/QaIuWwOcLUbSdHXtse76Ja4iLAgMBAAECgYADs+TmI2xCKKa6CL++D5jxrohZ nnionnz0xBVFh+nHlG3jqgxQsXf0yydXLfpn/2wHTdLxezHVuiYt0UYg7iD0CglW +IjcgMebzyjLeYqYOE5llPlMvhp2HoEMYJNb+7bRrZ1WCITbu+Su0w1cgA7Cs+Ej VlfvGzN+qqnDThRUYQJBAPY0sMWZJKly8QhUmUvmcXdPczzSOf6Mm7gc5LR6wzxd vW7syuqk50qjqVqFpN81vCV7GoDxRUWbTM9ftf7JGFkCQQDyJc/1RMygE2o+enU1 6UBxJyclXITEYtDn8aoEpLNc7RakP1WoPUKjZOnjkcoKcIkFNkSPeCfQujrb5f3F MkuDAkByAI/hzzmkpK5rFxEsjfX4Mve/L/DepyjrpaVY1IdWimlO1aJX6CeY7hNa 8QsYt/74s/nfvtg+lNyKIV1aLq9xAkB+WSSNgfyTeg3x08vc+Xxajmdqoz/TiQwg OoTQL3A3iK5LvZBgXLasszcnOycFE3srcQmNItEDpGiZ3QPxJTEpAkEA45EE9NMJ SA7EGWSFlbz4f4u4oBeiDiJRJbGGfAyVxZlpCWUjPpg9+swsWoFEOjnGYaChAMk5 nrOdMf15T6QF7Q== -----END PRIVATE KEY----- docker-1.10.3/integration-cli/fixtures/https/server-cert.pem000066400000000000000000000100131267010174400241070ustar00rootroot00000000000000Certificate: Data: Version: 3 (0x2) Serial Number: 4 (0x4) Signature Algorithm: sha1WithRSAEncryption Issuer: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain Validity Not Before: Dec 4 15:01:20 2013 GMT Not After : Dec 2 15:01:20 2023 GMT Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=*/name=changeme/emailAddress=mail@host.domain Subject Public Key Info: Public Key Algorithm: rsaEncryption Public-Key: (1024 bit) Modulus: 00:c1:ff:7d:30:6f:64:4a:b1:92:b1:71:d1:c1:74: e2:1d:db:2d:11:24:e1:00:d4:00:ae:6f:c8:9e:ae: 67:b3:4a:bd:f7:e6:9e:57:6d:19:4c:3c:23:94:2d: 3d:d6:63:84:d8:fa:76:2b:38:12:c1:ed:20:9d:32: e0:e8:c2:bf:9a:77:70:04:3f:7f:ca:8c:2c:82:d6: 3d:25:5c:02:1a:4f:64:93:03:dd:9c:42:97:5e:09: 49:af:f0:c2:e1:30:08:0e:21:46:95:d1:13:59:c0: c8:76:be:94:0d:8b:43:67:21:33:b2:08:60:9d:76: a8:05:32:1e:f9:95:09:14:75 Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Basic Constraints: CA:FALSE Netscape Cert Type: SSL Server Netscape Comment: Easy-RSA Generated Server Certificate X509v3 Subject Key Identifier: 14:02:FD:FD:DD:13:38:E0:71:EA:D1:BE:C0:0E:89:1A:2D:B6:19:06 X509v3 Authority Key Identifier: keyid:66:EE:C3:17:3D:3D:AB:44:01:6B:6F:B2:99:19:BD:AA:02:B5:34:FB DirName:/C=US/ST=CA/L=SanFrancisco/O=Fort-Funston/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain serial:FD:AB:EC:6A:84:27:04:A7 X509v3 Extended Key Usage: TLS Web Server Authentication X509v3 Key Usage: Digital Signature, Key Encipherment Signature Algorithm: sha1WithRSAEncryption 40:0f:10:39:c4:b7:0f:0d:2f:bf:d2:16:cc:8e:d3:9a:fb:8b: ce:4b:7b:0d:48:77:ce:f1:fe:d5:8f:ea:b1:71:ed:49:1d:9f: 23:3a:16:d4:70:7c:c5:29:bf:e4:90:34:d0:f0:00:24:f4:e4: df:2c:c3:83:01:66:61:c9:a8:ab:29:e7:98:6d:27:89:4a:76: c9:2e:19:8e:fe:6e:d5:f8:99:11:0e:97:67:4b:34:e3:1e:e3: 9f:35:00:a5:32:f9:b5:2c:f2:e0:c5:2e:cc:81:bd:18:dd:5c: 12:c8:6b:fa:0c:17:74:30:55:f6:6e:20:9a:6c:1e:09:b4:0c: 15:42 -----BEGIN CERTIFICATE----- MIIEKjCCA5OgAwIBAgIBBDANBgkqhkiG9w0BAQUFADCBojELMAkGA1UEBhMCVVMx CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xFTATBgNVBAoTDEZv cnQtRnVuc3RvbjERMA8GA1UECxMIY2hhbmdlbWUxETAPBgNVBAMTCGNoYW5nZW1l MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0LmRv bWFpbjAeFw0xMzEyMDQxNTAxMjBaFw0yMzEyMDIxNTAxMjBaMIGbMQswCQYDVQQG EwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UE ChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTEKMAgGA1UEAxQBKjER MA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21h aW4wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMH/fTBvZEqxkrFx0cF04h3b LREk4QDUAK5vyJ6uZ7NKvffmnldtGUw8I5QtPdZjhNj6dis4EsHtIJ0y4OjCv5p3 cAQ/f8qMLILWPSVcAhpPZJMD3ZxCl14JSa/wwuEwCA4hRpXRE1nAyHa+lA2LQ2ch M7IIYJ12qAUyHvmVCRR1AgMBAAGjggFzMIIBbzAJBgNVHRMEAjAAMBEGCWCGSAGG +EIBAQQEAwIGQDA0BglghkgBhvhCAQ0EJxYlRWFzeS1SU0EgR2VuZXJhdGVkIFNl cnZlciBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUFAL9/d0TOOBx6tG+wA6JGi22GQYw gdcGA1UdIwSBzzCBzIAUZu7DFz09q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJ BgNVBAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUw EwYDVQQKEwxGb3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQD EwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1h aWxAaG9zdC5kb21haW6CCQD9q+xqhCcEpzATBgNVHSUEDDAKBggrBgEFBQcDATAL BgNVHQ8EBAMCBaAwDQYJKoZIhvcNAQEFBQADgYEAQA8QOcS3Dw0vv9IWzI7TmvuL zkt7DUh3zvH+1Y/qsXHtSR2fIzoW1HB8xSm/5JA00PAAJPTk3yzDgwFmYcmoqynn mG0niUp2yS4Zjv5u1fiZEQ6XZ0s04x7jnzUApTL5tSzy4MUuzIG9GN1cEshr+gwX dDBV9m4gmmweCbQMFUI= -----END CERTIFICATE----- docker-1.10.3/integration-cli/fixtures/https/server-key.pem000066400000000000000000000016241267010174400237520ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBAMH/fTBvZEqxkrFx 0cF04h3bLREk4QDUAK5vyJ6uZ7NKvffmnldtGUw8I5QtPdZjhNj6dis4EsHtIJ0y 4OjCv5p3cAQ/f8qMLILWPSVcAhpPZJMD3ZxCl14JSa/wwuEwCA4hRpXRE1nAyHa+ lA2LQ2chM7IIYJ12qAUyHvmVCRR1AgMBAAECgYAmwckb9RUfSwyYgLm8IYLPHiuJ wkllZfVg5Bo7gXJcQnFjZmJ56uTj8xvUjZlODIHM63TSO5ibv6kFXtXKCqZGd2M+ wGbhZ0f+2GvKcwMmJERnIQjuoNaYSQLT0tM0VB9Iz0rJlZC+tzPZ+5pPqEumRdsS IzWNXfF42AhcbwAQYQJBAPVXtMYIJc9EZsz86ZcQiMPWUpCX5vnRmtwL8kKyR8D5 4KfYeiowyFffSRMMcclwNHq7TgSXN+nIXM9WyzyzwikCQQDKbNA28AgZp9aT54HP WnbeE2pmt+uk/zl/BtxJSoK6H+69Jec+lf7EgL7HgOWYRSNot4uQWu8IhsHLTiUq +0FtAkEAqwlRxRy4/x24bP+D+QRV0/D97j93joFJbE4Hved7jlSlAV4xDGilwlyv HNB4Iu5OJ6Gcaibhm+FKkmD3noHSwQJBAIpu3fokLzX0bS+bDFBU6qO3HXX/47xj +tsfQvkwZrSI8AkU6c8IX0HdVhsz0FBRQAT2ORDQz1XCarfxykNZrwUCQQCGCBIc BBCWzhHlswlGidWJg3HqqO6hPPClEr3B5G87oCsdeYwiO23XT6rUnoJXfJHp6oCW 5nCwDu5ZTP+khltg -----END PRIVATE KEY----- docker-1.10.3/integration-cli/fixtures/https/server-rogue-cert.pem000066400000000000000000000100131267010174400252260ustar00rootroot00000000000000Certificate: Data: Version: 3 (0x2) Serial Number: 3 (0x3) Signature Algorithm: sha1WithRSAEncryption Issuer: C=US, ST=CA, L=SanFrancisco, O=Evil Inc, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain Validity Not Before: Feb 28 18:49:31 2014 GMT Not After : Feb 26 18:49:31 2024 GMT Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=localhost/name=changeme/emailAddress=mail@host.domain Subject Public Key Info: Public Key Algorithm: rsaEncryption Public-Key: (1024 bit) Modulus: 00:d1:08:58:24:60:a1:69:65:4b:76:46:8f:88:75: 7c:49:3a:d8:03:cc:5b:58:c5:d1:bb:e5:f9:54:b9: 75:65:df:7e:bb:fb:54:d4:b2:e9:6f:58:a2:a4:84: 43:94:77:24:81:38:36:36:f0:66:65:26:e5:5b:2a: 14:1c:a9:ae:57:7f:75:00:23:14:4b:61:58:e4:82: aa:15:97:94:bd:50:35:0d:5d:18:18:ed:10:6a:bb: d3:64:5a:eb:36:98:5b:58:a7:fe:67:48:c1:6c:3f: 51:2f:02:65:96:54:77:9b:34:f9:a7:d2:63:54:6a: 9e:02:5c:be:65:98:a4:b4:b5 Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Basic Constraints: CA:FALSE Netscape Cert Type: SSL Server Netscape Comment: Easy-RSA Generated Server Certificate X509v3 Subject Key Identifier: 1F:E0:57:CA:CB:76:C9:C4:86:B9:EA:69:17:C0:F3:51:CE:95:40:EC X509v3 Authority Key Identifier: keyid:DC:A5:F1:76:DB:4E:CD:8E:EF:B1:23:56:1D:92:80:99:74:3B:EA:6F DirName:/C=US/ST=CA/L=SanFrancisco/O=Evil Inc/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain serial:E7:21:1E:18:41:1B:96:83 X509v3 Extended Key Usage: TLS Web Server Authentication X509v3 Key Usage: Digital Signature, Key Encipherment Signature Algorithm: sha1WithRSAEncryption 04:93:0e:28:01:94:18:f0:8c:7c:d3:0c:ad:e9:b7:46:b1:30: 65:ed:68:7c:8c:91:cd:1a:86:66:87:4a:4f:c0:97:bc:f7:85: 4b:38:79:31:b2:65:88:b1:76:16:9e:80:93:38:f4:b9:eb:65: 00:6d:bb:89:e0:a1:bf:95:5e:80:13:8e:01:73:d3:f1:08:73: 85:a5:33:75:0b:42:8a:a3:07:09:35:ef:d7:c6:58:eb:60:a3: 06:89:a0:53:99:e2:aa:41:90:e0:1a:d2:12:4b:48:7d:c3:9c: ad:bd:0e:5e:5f:f7:09:0c:5d:7c:86:24:dd:92:d5:b3:14:06: c7:9f -----BEGIN CERTIFICATE----- MIIEKjCCA5OgAwIBAgIBAzANBgkqhkiG9w0BAQUFADCBnjELMAkGA1UEBhMCVVMx CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2 aWwgSW5jMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAP BgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWlu MB4XDTE0MDIyODE4NDkzMVoXDTI0MDIyNjE4NDkzMVowgaMxCzAJBgNVBAYTAlVT MQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxG b3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMRIwEAYDVQQDEwlsb2NhbGhv c3QxETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3Qu ZG9tYWluMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDRCFgkYKFpZUt2Ro+I dXxJOtgDzFtYxdG75flUuXVl3367+1TUsulvWKKkhEOUdySBODY28GZlJuVbKhQc qa5Xf3UAIxRLYVjkgqoVl5S9UDUNXRgY7RBqu9NkWus2mFtYp/5nSMFsP1EvAmWW VHebNPmn0mNUap4CXL5lmKS0tQIDAQABo4IBbzCCAWswCQYDVR0TBAIwADARBglg hkgBhvhCAQEEBAMCBkAwNAYJYIZIAYb4QgENBCcWJUVhc3ktUlNBIEdlbmVyYXRl ZCBTZXJ2ZXIgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFB/gV8rLdsnEhrnqaRfA81HO lUDsMIHTBgNVHSMEgcswgciAFNyl8XbbTs2O77EjVh2SgJl0O+pvoYGkpIGhMIGe MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNj bzERMA8GA1UEChMIRXZpbCBJbmMxETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQD EwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1h aWxAaG9zdC5kb21haW6CCQDnIR4YQRuWgzATBgNVHSUEDDAKBggrBgEFBQcDATAL BgNVHQ8EBAMCBaAwDQYJKoZIhvcNAQEFBQADgYEABJMOKAGUGPCMfNMMrem3RrEw Ze1ofIyRzRqGZodKT8CXvPeFSzh5MbJliLF2Fp6Akzj0uetlAG27ieChv5VegBOO AXPT8QhzhaUzdQtCiqMHCTXv18ZY62CjBomgU5niqkGQ4BrSEktIfcOcrb0OXl/3 CQxdfIYk3ZLVsxQGx58= -----END CERTIFICATE----- docker-1.10.3/integration-cli/fixtures/https/server-rogue-key.pem000066400000000000000000000016241267010174400250710ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBANEIWCRgoWllS3ZG j4h1fEk62APMW1jF0bvl+VS5dWXffrv7VNSy6W9YoqSEQ5R3JIE4NjbwZmUm5Vsq FByprld/dQAjFEthWOSCqhWXlL1QNQ1dGBjtEGq702Ra6zaYW1in/mdIwWw/US8C ZZZUd5s0+afSY1RqngJcvmWYpLS1AgMBAAECgYAJXh9dGfuB1qlIFqduDR3RxlJR 8UGSu+LHUeoXkuwg8aAjWoMVuSLe+5DmYIsKx0AajmNXmPRtyg1zRXJ7SltmubJ8 6qQVDsRk6biMdkpkl6a9Gk2av40psD9/VPGxagEoop7IKYhf3AeKPvPiwVB2qFrl 1aYMZm0aMR55pgRajQJBAOk8IsJDf0beooDZXVdv/oe4hcbM9fxO8Cn3qzoGImqD 37LL+PCzDP7AEV3fk43SsZDeSk+LDX+h0o9nPyhzHasCQQDlb3aDgcQY9NaGLUWO moOCB3148eBVcAwCocu+OSkf7sbQdvXxgThBOrZl11wwRIMQqh99c2yeUwj+tELl 3VcfAkBZTiNpCvtDIaBLge9RuZpWUXs3wec2cutWxnSTxSGMc25GQf/R+l0xdk2w ChmvpktDUzpU9sN2aXn8WuY+EMX9AkEApbLpUbKPUELLB958RLA819TW/lkZXjrs wZ3eSoR3ufM1rOqtVvyvBxUDE+wETWu9iHSFB5Ir2PA5J9JCGkbPmwJAFI1ndfBj iuyU93nFX0p+JE2wVHKx4dMzKCearNKiJh/lGDtUq3REGgamTNUnG8RAITUbxFs+ Z1hrIq8xYl2LOQ== -----END PRIVATE KEY----- docker-1.10.3/integration-cli/fixtures/load/000077500000000000000000000000001267010174400207255ustar00rootroot00000000000000docker-1.10.3/integration-cli/fixtures/load/emptyLayer.tar000066400000000000000000000740001267010174400235710ustar00rootroot0000000000000017d1436ef796af2fc2210cc37c4672e5aa1b62cb08ac4b95dd15372321105a66/0000755000000000000000000000000012156431666020254 5ustar rootroot17d1436ef796af2fc2210cc37c4672e5aa1b62cb08ac4b95dd15372321105a66/VERSION0000644000000000000000000000000312156431666021315 0ustar rootroot1.017d1436ef796af2fc2210cc37c4672e5aa1b62cb08ac4b95dd15372321105a66/json0000644000000000000000000000106212156431666021147 0ustar rootroot{"id":"17d1436ef796af2fc2210cc37c4672e5aa1b62cb08ac4b95dd15372321105a66","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"docker_version":"0.4.0","architecture":"x86_64","layer_id":"sha256:74db925ddbf25d3b8815535598afc9b43b5cd220f194703dd8bde424f93aac8c"}17d1436ef796af2fc2210cc37c4672e5aa1b62cb08ac4b95dd15372321105a66/layer.tar0000644000000000000000000000100012156431666022067 0ustar rootroot./0040755000000000000000000000000012156431635007413 5ustar000000000000000025445a0fc5025c3917a0cd6e307d92322540e0da691614312ddea22511b71513/0000755000000000000000000000000012625327776017660 5ustar rootroot25445a0fc5025c3917a0cd6e307d92322540e0da691614312ddea22511b71513/VERSION0000644000000000000000000000000312475675560020721 0ustar rootroot1.025445a0fc5025c3917a0cd6e307d92322540e0da691614312ddea22511b71513/json0000644000000000000000000000277112475675560020563 0ustar rootroot{"id":"25445a0fc5025c3917a0cd6e307d92322540e0da691614312ddea22511b71513","parent":"9c7cb910d84346a3fbf3cc2be046f44bf0af7f11eb8db2ef1f45e93c1202faac","created":"2015-03-04T21:38:56.769279623Z","container":"cd2c5e92b2a0ef081f7acfe660aeaafbd8498c4428fd1ec130def86e7c9a48f6","container_config":{"Hostname":"cd2c5e92b2a0","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","#(nop) ADD file:2b2b26209d285cd1a96e8168dfb25338aa09155d0a9a6ed54a94be0bd9914461 in /"],"Image":"5b12ef8fd57065237a6833039acc0e7f68e363c15d8abb5cacce7143a1f7de8a","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":[],"Labels":null},"docker_version":"1.4.1","author":"The CentOS Project \u003ccloud-ops@centos.org\u003e - ami_creator","config":{"Hostname":"cd2c5e92b2a0","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"5b12ef8fd57065237a6833039acc0e7f68e363c15d8abb5cacce7143a1f7de8a","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":[],"Labels":null},"architecture":"amd64","os":"linux","Size":215725104,"parent_id":"sha256:9c7cb910d84346a3fbf3cc2be046f44bf0af7f11eb8db2ef1f45e93c1202faac","layer_id":"sha256:7bd4f13333ba078de4fd536af062446ff873b28e886d9250f518f54f7e33d236"}25445a0fc5025c3917a0cd6e307d92322540e0da691614312ddea22511b71513/layer.tar0000644000000000000000000002400012625327767021500 0ustar rootroottmp/0001777000000000000000000000000012625327675010400 5ustar rootroottmp/foo0000644000000000000000000000000412625327675011073 0ustar rootrootfoo 9c7cb910d84346a3fbf3cc2be046f44bf0af7f11eb8db2ef1f45e93c1202faac/0000755000000000000000000000000012413064025021127 5ustar rootroot9c7cb910d84346a3fbf3cc2be046f44bf0af7f11eb8db2ef1f45e93c1202faac/VERSION0000644000000000000000000000000312413064025022170 0ustar rootroot1.09c7cb910d84346a3fbf3cc2be046f44bf0af7f11eb8db2ef1f45e93c1202faac/json0000644000000000000000000000274512413064025022033 0ustar rootroot{"id":"9c7cb910d84346a3fbf3cc2be046f44bf0af7f11eb8db2ef1f45e93c1202faac","parent":"17d1436ef796af2fc2210cc37c4672e5aa1b62cb08ac4b95dd15372321105a66","created":"2014-10-01T20:46:13.51090041Z","container":"fed991d241a9e03e1b38c985a9b9300d144b375c7ff41d5f3c49b27d6250e16d","container_config":{"Hostname":"fed991d241a9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","#(nop) MAINTAINER The CentOS Project \u003ccloud-ops@centos.org\u003e - ami_creator"],"Image":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":[],"Labels":null},"docker_version":"1.2.0","author":"The CentOS Project \u003ccloud-ops@centos.org\u003e - ami_creator","config":{"Hostname":"fed991d241a9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":[],"Labels":null},"architecture":"amd64","os":"linux","parent_id":"sha256:17d1436ef796af2fc2210cc37c4672e5aa1b62cb08ac4b95dd15372321105a66","layer_id":"sha256:1775fca35fb6a4d31c541746eaea63c5cb3c00280c8b5a351d4e944cdca7489d"}9c7cb910d84346a3fbf3cc2be046f44bf0af7f11eb8db2ef1f45e93c1202faac/layer.tar0000644000000000000000000000000012413064025022741 0ustar rootrootrepositories0000644000000000000000000000016200000000000012166 0ustar rootroot{"matthughes/library-centos":{"6-2015.03.20":"25445a0fc5025c3917a0cd6e307d92322540e0da691614312ddea22511b71513"}} docker-1.10.3/integration-cli/fixtures/notary/000077500000000000000000000000001267010174400213225ustar00rootroot00000000000000docker-1.10.3/integration-cli/fixtures/notary/localhost.cert000066400000000000000000000021331267010174400241700ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDCTCCAfOgAwIBAgIQTOoFF+ypXwgdXnXHuCTvYDALBgkqhkiG9w0BAQswJjER MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDcxNzE5 NDg1M1oXDTE4MDcwMTE5NDg1M1owJzERMA8GA1UEChMIUXVpY2tUTFMxEjAQBgNV BAMTCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMDO qvTBAi0ApXLfe90ApJkdkRGwF838Qzt1UFSxomu5fHRV6l3FjX5XCVHiFQ4w3ROh dMOu9NahfGLJv9VvWU2MV3YoY9Y7lIXpKwnK1v064wuls4nPh13BUWKQKofcY/e2 qaSPd6/qmSRc/kJUvOI9jZMSX6ZRPu9K4PCqm2CivlbLq9UYuo1AbRGfuqHRvTxg mQG7WQCzGSvSjuSg5qX3TEh0HckTczJG9ODULNRWNE7ld0W4sfv4VF8R7Uc/G7LO 8QwLCZ9TIl3gYMPCrhUL3Q6z9Jnn1SQS4mhDnPi6ugRYO1X8k3jjdxV9C2sXwUvN OZI1rLEWl9TJNA7ZXtMCAwEAAaM2MDQwDgYDVR0PAQH/BAQDAgCgMAwGA1UdEwEB /wQCMAAwFAYDVR0RBA0wC4IJbG9jYWxob3N0MAsGCSqGSIb3DQEBCwOCAQEAH6iq kM2+UMukGDLEQKHHiauioWJlHDlLXv76bJiNfjSz94B/2XOQMb9PT04//tnGUyPK K8Dx7RoxSodU6T5VRiz/A36mLOvt2t3bcL/1nHf9sAOHcexGtnCbQbW91V7RKfIL sjiLNFDkQ9VfVNY+ynQptZoyH1sy07+dplfkIiPzRs5WuVAnEGsX3r6BrhgUITzi g1B4kpmGZIohP4m6ZEBY5xuo/NQ0+GhjAENQMU38GpuoMyFS0i0dGcbx8weqnI/B Er/qa0+GE/rBnWY8TiRow8dzpneSFQnUZpJ4EwD9IoOIDHo7k2Nbz2P50HMiCXZf 4RqzctVssRlrRVnO5w== -----END CERTIFICATE----- docker-1.10.3/integration-cli/fixtures/notary/localhost.key000066400000000000000000000032131267010174400240230ustar00rootroot00000000000000-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAwM6q9MECLQClct973QCkmR2REbAXzfxDO3VQVLGia7l8dFXq XcWNflcJUeIVDjDdE6F0w6701qF8Ysm/1W9ZTYxXdihj1juUhekrCcrW/TrjC6Wz ic+HXcFRYpAqh9xj97appI93r+qZJFz+QlS84j2NkxJfplE+70rg8KqbYKK+Vsur 1Ri6jUBtEZ+6odG9PGCZAbtZALMZK9KO5KDmpfdMSHQdyRNzMkb04NQs1FY0TuV3 Rbix+/hUXxHtRz8bss7xDAsJn1MiXeBgw8KuFQvdDrP0mefVJBLiaEOc+Lq6BFg7 VfyTeON3FX0LaxfBS805kjWssRaX1Mk0Dtle0wIDAQABAoIBAHbuhNHZROhRn70O Ui9vOBki/dt1ThnH5AkHQngb4t6kWjrAzILvW2p1cdBKr0ZDqftz+rzCbVD/5+Rg Iq8bsnB9g23lWEBMHD/GJsAxmRA3hNooamk11IBmwTcVSsbnkdq5mEdkICYphjHC Ey0DbEf6RBxWlx3WvAWLoNmTw6iFaOCH8IyLavPpe7kLbZc219oNUw2qjCnCXCZE /NuViADHJBPN8r7g1gmyclJmTumdUK6oHgXEMMPe43vhReGcgcReK9QZjnTcIXPM 4oJOraw+BtoZXVvvIPnC+5ntoLFOzjIzM0kaveReZbdgffqF4zy2vRfCHhWssanc 7a0xR4ECgYEA3Xuvcqy5Xw+v/jVCO0VZj++Z7apA78dY4tWsPx5/0DUTTziTlXkC ADduEbwX6HgZ/iLvA9j4C3Z4mO8qByby/6UoBU8NEe+PQt6fT7S+dKSP4uy5ZxVM i5opkEyrJsMbve9Jrlj4bk5CICsydrZ+SBFHnpNGjbduGQick5LORWECgYEA3trt gepteDGiUYmnnBgjbYtcD11RvpKC8Z/QwGnzN5vk4eBu8r7DkMcLN+SiHjAovlJo r5j3EbF8sla1zBf/yySdQZFqUGcwtw7MaAKCLdhQl5WsViNMIx6p2OJapu0dzbv2 KTXrnoRCafcH92k0dUX1ahE9eyc8KX6VhbWwXLMCgYATGCCuEDoC+gVAMzM8jOQF xrBMjwr+IP+GvskUv/pg5tJ9V/FRR5dmkWDJ4p9lCUWkZTqZ6FCqHFKVTLkg2LjG VWS34HLOAwskxrCRXJG22KEW/TWWr31j46yFpjZzJwrzOvftMfpo+BI3V8IH/f+x EtxLzYKdoRy6x8VH67YgwQKBgHor2vjV45142FuK83AHa6SqOZXSuvWWrGJ6Ep7p doSN2jRaLXi2S9AaznOdy6JxFGUCGJHrcccpXgsGrjNtFLXxJKTFa1sYtwQkALsk ZOltJQF09D1krGC0driHntrUMvqOiKye+sS0DRS6cIuaCUAhUiELwoC5SaoV0zKy IDUxAoGAOK8Xq+3/sqe79vTpw25RXl+nkAmOAeKjqf3Kh6jbnBhr81rmefyKXB9a uj0b980tzUnliwA5cCOsyxfN2vASvMnJxFE721QZI04arlcPFHcFqCtmNnUYTcLp 0hgn/yLZptcoxpy+eTBu3eNsxz1Bu/Tx/198+2Wr3MbtGpLNIcA= -----END RSA PRIVATE KEY----- docker-1.10.3/integration-cli/registry.go000066400000000000000000000061351267010174400203410ustar00rootroot00000000000000package main import ( "fmt" "io/ioutil" "net/http" "os" "os/exec" "path/filepath" "github.com/docker/distribution/digest" "github.com/go-check/check" ) const ( v2binary = "registry-v2" v2binarySchema1 = "registry-v2-schema1" ) type testRegistryV2 struct { cmd *exec.Cmd dir string } func newTestRegistryV2(c *check.C, schema1 bool) (*testRegistryV2, error) { template := `version: 0.1 loglevel: debug storage: filesystem: rootdirectory: %s http: addr: %s` tmp, err := ioutil.TempDir("", "registry-test-") if err != nil { return nil, err } confPath := filepath.Join(tmp, "config.yaml") config, err := os.Create(confPath) if err != nil { return nil, err } if _, err := fmt.Fprintf(config, template, tmp, privateRegistryURL); err != nil { os.RemoveAll(tmp) return nil, err } binary := v2binary if schema1 { binary = v2binarySchema1 } cmd := exec.Command(binary, confPath) if err := cmd.Start(); err != nil { os.RemoveAll(tmp) if os.IsNotExist(err) { c.Skip(err.Error()) } return nil, err } return &testRegistryV2{ cmd: cmd, dir: tmp, }, nil } func (t *testRegistryV2) Ping() error { // We always ping through HTTP for our test registry. resp, err := http.Get(fmt.Sprintf("http://%s/v2/", privateRegistryURL)) if err != nil { return err } if resp.StatusCode != 200 { return fmt.Errorf("registry ping replied with an unexpected status code %d", resp.StatusCode) } return nil } func (t *testRegistryV2) Close() { t.cmd.Process.Kill() os.RemoveAll(t.dir) } func (t *testRegistryV2) getBlobFilename(blobDigest digest.Digest) string { // Split the digest into it's algorithm and hex components. dgstAlg, dgstHex := blobDigest.Algorithm(), blobDigest.Hex() // The path to the target blob data looks something like: // baseDir + "docker/registry/v2/blobs/sha256/a3/a3ed...46d4/data" return fmt.Sprintf("%s/docker/registry/v2/blobs/%s/%s/%s/data", t.dir, dgstAlg, dgstHex[:2], dgstHex) } func (t *testRegistryV2) readBlobContents(c *check.C, blobDigest digest.Digest) []byte { // Load the target manifest blob. manifestBlob, err := ioutil.ReadFile(t.getBlobFilename(blobDigest)) if err != nil { c.Fatalf("unable to read blob: %s", err) } return manifestBlob } func (t *testRegistryV2) writeBlobContents(c *check.C, blobDigest digest.Digest, data []byte) { if err := ioutil.WriteFile(t.getBlobFilename(blobDigest), data, os.FileMode(0644)); err != nil { c.Fatalf("unable to write malicious data blob: %s", err) } } func (t *testRegistryV2) tempMoveBlobData(c *check.C, blobDigest digest.Digest) (undo func()) { tempFile, err := ioutil.TempFile("", "registry-temp-blob-") if err != nil { c.Fatalf("unable to get temporary blob file: %s", err) } tempFile.Close() blobFilename := t.getBlobFilename(blobDigest) // Move the existing data file aside, so that we can replace it with a // another blob of data. if err := os.Rename(blobFilename, tempFile.Name()); err != nil { os.Remove(tempFile.Name()) c.Fatalf("unable to move data blob: %s", err) } return func() { os.Rename(tempFile.Name(), blobFilename) os.Remove(tempFile.Name()) } } docker-1.10.3/integration-cli/registry_mock.go000066400000000000000000000021431267010174400213450ustar00rootroot00000000000000package main import ( "net/http" "net/http/httptest" "regexp" "strings" "sync" "github.com/go-check/check" ) type handlerFunc func(w http.ResponseWriter, r *http.Request) type testRegistry struct { server *httptest.Server hostport string handlers map[string]handlerFunc mu sync.Mutex } func (tr *testRegistry) registerHandler(path string, h handlerFunc) { tr.mu.Lock() defer tr.mu.Unlock() tr.handlers[path] = h } func newTestRegistry(c *check.C) (*testRegistry, error) { testReg := &testRegistry{handlers: make(map[string]handlerFunc)} ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { url := r.URL.String() var matched bool var err error for re, function := range testReg.handlers { matched, err = regexp.MatchString(re, url) if err != nil { c.Fatal("Error with handler regexp") return } if matched { function(w, r) break } } if !matched { c.Fatalf("Unable to match %s with regexp", url) } })) testReg.server = ts testReg.hostport = strings.Replace(ts.URL, "http://", "", 1) return testReg, nil } docker-1.10.3/integration-cli/requirements.go000066400000000000000000000072401267010174400212120ustar00rootroot00000000000000package main import ( "fmt" "io/ioutil" "net/http" "os" "os/exec" "strings" "time" "github.com/go-check/check" ) type testCondition func() bool type testRequirement struct { Condition testCondition SkipMessage string } // List test requirements var ( DaemonIsWindows = testRequirement{ func() bool { return daemonPlatform == "windows" }, "Test requires a Windows daemon", } DaemonIsLinux = testRequirement{ func() bool { return daemonPlatform == "linux" }, "Test requires a Linux daemon", } SameHostDaemon = testRequirement{ func() bool { return isLocalDaemon }, "Test requires docker daemon to run on the same machine as CLI", } UnixCli = testRequirement{ func() bool { return isUnixCli }, "Test requires posix utilities or functionality to run.", } ExecSupport = testRequirement{ func() bool { return supportsExec }, "Test requires 'docker exec' capabilities on the tested daemon.", } Network = testRequirement{ func() bool { // Set a timeout on the GET at 15s var timeout = time.Duration(15 * time.Second) var url = "https://hub.docker.com" client := http.Client{ Timeout: timeout, } resp, err := client.Get(url) if err != nil && strings.Contains(err.Error(), "use of closed network connection") { panic(fmt.Sprintf("Timeout for GET request on %s", url)) } if resp != nil { resp.Body.Close() } return err == nil }, "Test requires network availability, environment variable set to none to run in a non-network enabled mode.", } Apparmor = testRequirement{ func() bool { buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled") return err == nil && len(buf) > 1 && buf[0] == 'Y' }, "Test requires apparmor is enabled.", } RegistryHosting = testRequirement{ func() bool { // for now registry binary is built only if we're running inside // container through `make test`. Figure that out by testing if // registry binary is in PATH. _, err := exec.LookPath(v2binary) return err == nil }, fmt.Sprintf("Test requires an environment that can host %s in the same host", v2binary), } NotaryHosting = testRequirement{ func() bool { // for now notary binary is built only if we're running inside // container through `make test`. Figure that out by testing if // notary-server binary is in PATH. _, err := exec.LookPath(notaryBinary) return err == nil }, fmt.Sprintf("Test requires an environment that can host %s in the same host", notaryBinary), } NotOverlay = testRequirement{ func() bool { cmd := exec.Command("grep", "^overlay / overlay", "/proc/mounts") if err := cmd.Run(); err != nil { return true } return false }, "Test requires underlying root filesystem not be backed by overlay.", } IPv6 = testRequirement{ func() bool { cmd := exec.Command("test", "-f", "/proc/net/if_inet6") if err := cmd.Run(); err != nil { return true } return false }, "Test requires support for IPv6", } NotGCCGO = testRequirement{ func() bool { out, err := exec.Command("go", "version").Output() if err == nil && strings.Contains(string(out), "gccgo") { return false } return true }, "Test requires native Golang compiler instead of GCCGO", } NotUserNamespace = testRequirement{ func() bool { root := os.Getenv("DOCKER_REMAP_ROOT") if root != "" { return false } return true }, "Test cannot be run when remapping root", } ) // testRequires checks if the environment satisfies the requirements // for the test to run or skips the tests. func testRequires(c *check.C, requirements ...testRequirement) { for _, r := range requirements { if !r.Condition() { c.Skip(r.SkipMessage) } } } docker-1.10.3/integration-cli/requirements_unix.go000066400000000000000000000041301267010174400222500ustar00rootroot00000000000000// +build !windows package main import ( "github.com/docker/docker/pkg/sysinfo" ) var ( // SysInfo stores information about which features a kernel supports. SysInfo *sysinfo.SysInfo cpuCfsPeriod = testRequirement{ func() bool { return SysInfo.CPUCfsPeriod }, "Test requires an environment that supports cgroup cfs period.", } cpuCfsQuota = testRequirement{ func() bool { return SysInfo.CPUCfsQuota }, "Test requires an environment that supports cgroup cfs quota.", } cpuShare = testRequirement{ func() bool { return SysInfo.CPUShares }, "Test requires an environment that supports cgroup cpu shares.", } oomControl = testRequirement{ func() bool { return SysInfo.OomKillDisable }, "Test requires Oom control enabled.", } kernelMemorySupport = testRequirement{ func() bool { return SysInfo.KernelMemory }, "Test requires an environment that supports cgroup kernel memory.", } memoryLimitSupport = testRequirement{ func() bool { return SysInfo.MemoryLimit }, "Test requires an environment that supports cgroup memory limit.", } memoryReservationSupport = testRequirement{ func() bool { return SysInfo.MemoryReservation }, "Test requires an environment that supports cgroup memory reservation.", } swapMemorySupport = testRequirement{ func() bool { return SysInfo.SwapLimit }, "Test requires an environment that supports cgroup swap memory limit.", } memorySwappinessSupport = testRequirement{ func() bool { return SysInfo.MemorySwappiness }, "Test requires an environment that supports cgroup memory swappiness.", } blkioWeight = testRequirement{ func() bool { return SysInfo.BlkioWeight }, "Test requires an environment that supports blkio weight.", } cgroupCpuset = testRequirement{ func() bool { return SysInfo.Cpuset }, "Test requires an environment that supports cgroup cpuset.", } seccompEnabled = testRequirement{ func() bool { return supportsSeccomp && SysInfo.Seccomp }, "Test requires that seccomp support be enabled in the daemon.", } ) func init() { SysInfo = sysinfo.New(true) } docker-1.10.3/integration-cli/test_vars_exec.go000066400000000000000000000002001267010174400214720ustar00rootroot00000000000000// +build !test_no_exec package main const ( // indicates docker daemon tested supports 'docker exec' supportsExec = true ) docker-1.10.3/integration-cli/test_vars_noexec.go000066400000000000000000000002001267010174400220270ustar00rootroot00000000000000// +build test_no_exec package main const ( // indicates docker daemon tested supports 'docker exec' supportsExec = false ) docker-1.10.3/integration-cli/test_vars_noseccomp.go000066400000000000000000000001741267010174400225460ustar00rootroot00000000000000// +build !seccomp package main const ( // indicates docker daemon built with seccomp support supportsSeccomp = false ) docker-1.10.3/integration-cli/test_vars_seccomp.go000066400000000000000000000001721267010174400222070ustar00rootroot00000000000000// +build seccomp package main const ( // indicates docker daemon built with seccomp support supportsSeccomp = true ) docker-1.10.3/integration-cli/test_vars_unix.go000066400000000000000000000005651267010174400215470ustar00rootroot00000000000000// +build !windows package main const ( // identifies if test suite is running on a unix platform isUnixCli = true expectedFileChmod = "-rw-r--r--" // On Unix variants, the busybox image comes with the `top` command which // runs indefinitely while still being interruptible by a signal. defaultSleepImage = "busybox" ) var defaultSleepCommand = []string{"top"} docker-1.10.3/integration-cli/test_vars_windows.go000066400000000000000000000006431267010174400222530ustar00rootroot00000000000000// +build windows package main const ( // identifies if test suite is running on a unix platform isUnixCli = false // this is the expected file permission set on windows: gh#11395 expectedFileChmod = "-rwxr-xr-x" // On Windows, the busybox image doesn't have the `top` command, so we rely // on `sleep` with a high duration. defaultSleepImage = "busybox" ) var defaultSleepCommand = []string{"sleep", "60"} docker-1.10.3/integration-cli/trust_server.go000066400000000000000000000155741267010174400212470ustar00rootroot00000000000000package main import ( "fmt" "io/ioutil" "net" "net/http" "os" "os/exec" "path/filepath" "strings" "time" "github.com/docker/docker/cliconfig" "github.com/docker/docker/pkg/tlsconfig" "github.com/docker/notary/client" "github.com/docker/notary/passphrase" "github.com/docker/notary/tuf/data" "github.com/go-check/check" ) var notaryBinary = "notary-server" var notaryClientBinary = "notary" type testNotary struct { cmd *exec.Cmd dir string } const notaryHost = "localhost:4443" const notaryURL = "https://" + notaryHost func newTestNotary(c *check.C) (*testNotary, error) { // generate server config template := `{ "server": { "http_addr": "%s", "tls_key_file": "%s", "tls_cert_file": "%s" }, "trust_service": { "type": "local", "hostname": "", "port": "", "key_algorithm": "ed25519" }, "logging": { "level": "debug" }, "storage": { "backend": "memory" } }` tmp, err := ioutil.TempDir("", "notary-test-") if err != nil { return nil, err } confPath := filepath.Join(tmp, "config.json") config, err := os.Create(confPath) defer config.Close() if err != nil { return nil, err } workingDir, err := os.Getwd() if err != nil { return nil, err } if _, err := fmt.Fprintf(config, template, notaryHost, filepath.Join(workingDir, "fixtures/notary/localhost.key"), filepath.Join(workingDir, "fixtures/notary/localhost.cert")); err != nil { os.RemoveAll(tmp) return nil, err } // generate client config clientConfPath := filepath.Join(tmp, "client-config.json") clientConfig, err := os.Create(clientConfPath) defer clientConfig.Close() if err != nil { return nil, err } template = `{ "trust_dir" : "%s", "remote_server": { "url": "%s", "skipTLSVerify": true } }` if _, err = fmt.Fprintf(clientConfig, template, filepath.Join(cliconfig.ConfigDir(), "trust"), notaryURL); err != nil { os.RemoveAll(tmp) return nil, err } // run notary-server cmd := exec.Command(notaryBinary, "-config", confPath) if err := cmd.Start(); err != nil { os.RemoveAll(tmp) if os.IsNotExist(err) { c.Skip(err.Error()) } return nil, err } testNotary := &testNotary{ cmd: cmd, dir: tmp, } // Wait for notary to be ready to serve requests. for i := 1; i <= 5; i++ { if err = testNotary.Ping(); err == nil { break } time.Sleep(10 * time.Millisecond * time.Duration(i*i)) } if err != nil { c.Fatalf("Timeout waiting for test notary to become available: %s", err) } return testNotary, nil } func (t *testNotary) Ping() error { tlsConfig := tlsconfig.ClientDefault tlsConfig.InsecureSkipVerify = true client := http.Client{ Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, Dial: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, }).Dial, TLSHandshakeTimeout: 10 * time.Second, TLSClientConfig: &tlsConfig, }, } resp, err := client.Get(fmt.Sprintf("%s/v2/", notaryURL)) if err != nil { return err } if resp.StatusCode != 200 { return fmt.Errorf("notary ping replied with an unexpected status code %d", resp.StatusCode) } return nil } func (t *testNotary) Close() { t.cmd.Process.Kill() os.RemoveAll(t.dir) } func (s *DockerTrustSuite) trustedCmd(cmd *exec.Cmd) { pwd := "12345678" trustCmdEnv(cmd, notaryURL, pwd, pwd) } func (s *DockerTrustSuite) trustedCmdWithServer(cmd *exec.Cmd, server string) { pwd := "12345678" trustCmdEnv(cmd, server, pwd, pwd) } func (s *DockerTrustSuite) trustedCmdWithPassphrases(cmd *exec.Cmd, rootPwd, repositoryPwd string) { trustCmdEnv(cmd, notaryURL, rootPwd, repositoryPwd) } func (s *DockerTrustSuite) trustedCmdWithDeprecatedEnvPassphrases(cmd *exec.Cmd, offlinePwd, taggingPwd string) { trustCmdDeprecatedEnv(cmd, notaryURL, offlinePwd, taggingPwd) } func trustCmdEnv(cmd *exec.Cmd, server, rootPwd, repositoryPwd string) { env := []string{ "DOCKER_CONTENT_TRUST=1", fmt.Sprintf("DOCKER_CONTENT_TRUST_SERVER=%s", server), fmt.Sprintf("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE=%s", rootPwd), fmt.Sprintf("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE=%s", repositoryPwd), } cmd.Env = append(os.Environ(), env...) } // Helper method to test the old env variables OFFLINE and TAGGING that will // be deprecated by 1.10 func trustCmdDeprecatedEnv(cmd *exec.Cmd, server, offlinePwd, taggingPwd string) { env := []string{ "DOCKER_CONTENT_TRUST=1", fmt.Sprintf("DOCKER_CONTENT_TRUST_SERVER=%s", server), fmt.Sprintf("DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE=%s", offlinePwd), fmt.Sprintf("DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE=%s", taggingPwd), } cmd.Env = append(os.Environ(), env...) } func (s *DockerTrustSuite) setupTrustedImage(c *check.C, name string) string { repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, name) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) pushCmd := exec.Command(dockerBinary, "push", repoName) s.trustedCmd(pushCmd) out, _, err := runCommandWithOutput(pushCmd) if err != nil { c.Fatalf("Error running trusted push: %s\n%s", err, out) } if !strings.Contains(string(out), "Signing and pushing trust metadata") { c.Fatalf("Missing expected output on trusted push:\n%s", out) } if out, status := dockerCmd(c, "rmi", repoName); status != 0 { c.Fatalf("Error removing image %q\n%s", repoName, out) } return repoName } func notaryClientEnv(cmd *exec.Cmd, rootPwd, repositoryPwd string) { env := []string{ fmt.Sprintf("NOTARY_ROOT_PASSPHRASE=%s", rootPwd), fmt.Sprintf("NOTARY_TARGETS_PASSPHRASE=%s", repositoryPwd), fmt.Sprintf("NOTARY_SNAPSHOT_PASSPHRASE=%s", repositoryPwd), } cmd.Env = append(os.Environ(), env...) } func (s *DockerTrustSuite) setupDelegations(c *check.C, repoName, pwd string) { initCmd := exec.Command(notaryClientBinary, "-c", filepath.Join(s.not.dir, "client-config.json"), "init", repoName) notaryClientEnv(initCmd, pwd, pwd) out, _, err := runCommandWithOutput(initCmd) if err != nil { c.Fatalf("Error initializing notary repository: %s\n", out) } // no command line for this, so build by hand nRepo, err := client.NewNotaryRepository(filepath.Join(cliconfig.ConfigDir(), "trust"), repoName, notaryURL, nil, passphrase.ConstantRetriever(pwd)) if err != nil { c.Fatalf("Error creating notary repository: %s\n", err) } delgKey, err := nRepo.CryptoService.Create("targets/releases", data.ECDSAKey) if err != nil { c.Fatalf("Error creating delegation key: %s\n", err) } err = nRepo.AddDelegation("targets/releases", 1, []data.PublicKey{delgKey}, []string{""}) if err != nil { c.Fatalf("Error creating delegation: %s\n", err) } // publishing first simulates the client pushing to a repo that they have been given delegated access to pubCmd := exec.Command(notaryClientBinary, "-c", filepath.Join(s.not.dir, "client-config.json"), "publish", repoName) notaryClientEnv(pubCmd, pwd, pwd) out, _, err = runCommandWithOutput(pubCmd) if err != nil { c.Fatalf("Error publishing notary repository: %s\n", out) } } docker-1.10.3/integration-cli/utils.go000066400000000000000000000043201267010174400176230ustar00rootroot00000000000000package main import ( "io" "os" "os/exec" "time" "github.com/docker/docker/pkg/integration" ) func getExitCode(err error) (int, error) { return integration.GetExitCode(err) } func processExitCode(err error) (exitCode int) { return integration.ProcessExitCode(err) } func isKilled(err error) bool { return integration.IsKilled(err) } func runCommandWithOutput(cmd *exec.Cmd) (output string, exitCode int, err error) { return integration.RunCommandWithOutput(cmd) } func runCommandWithStdoutStderr(cmd *exec.Cmd) (stdout string, stderr string, exitCode int, err error) { return integration.RunCommandWithStdoutStderr(cmd) } func runCommandWithOutputForDuration(cmd *exec.Cmd, duration time.Duration) (output string, exitCode int, timedOut bool, err error) { return integration.RunCommandWithOutputForDuration(cmd, duration) } func runCommandWithOutputAndTimeout(cmd *exec.Cmd, timeout time.Duration) (output string, exitCode int, err error) { return integration.RunCommandWithOutputAndTimeout(cmd, timeout) } func runCommand(cmd *exec.Cmd) (exitCode int, err error) { return integration.RunCommand(cmd) } func runCommandPipelineWithOutput(cmds ...*exec.Cmd) (output string, exitCode int, err error) { return integration.RunCommandPipelineWithOutput(cmds...) } func unmarshalJSON(data []byte, result interface{}) error { return integration.UnmarshalJSON(data, result) } func convertSliceOfStringsToMap(input []string) map[string]struct{} { return integration.ConvertSliceOfStringsToMap(input) } func compareDirectoryEntries(e1 []os.FileInfo, e2 []os.FileInfo) error { return integration.CompareDirectoryEntries(e1, e2) } func listTar(f io.Reader) ([]string, error) { return integration.ListTar(f) } func randomTmpDirPath(s string, platform string) string { return integration.RandomTmpDirPath(s, platform) } func consumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, stop chan bool) (n int, err error) { return integration.ConsumeWithSpeed(reader, chunkSize, interval, stop) } func parseCgroupPaths(procCgroupData string) map[string]string { return integration.ParseCgroupPaths(procCgroupData) } func runAtDifferentDate(date time.Time, block func()) { integration.RunAtDifferentDate(date, block) } docker-1.10.3/layer/000077500000000000000000000000001267010174400141615ustar00rootroot00000000000000docker-1.10.3/layer/empty.go000066400000000000000000000020031267010174400156410ustar00rootroot00000000000000package layer import ( "archive/tar" "bytes" "io" "io/ioutil" ) // DigestSHA256EmptyTar is the canonical sha256 digest of empty tar file - // (1024 NULL bytes) const DigestSHA256EmptyTar = DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef") type emptyLayer struct{} // EmptyLayer is a layer that corresponds to empty tar. var EmptyLayer = &emptyLayer{} func (el *emptyLayer) TarStream() (io.ReadCloser, error) { buf := new(bytes.Buffer) tarWriter := tar.NewWriter(buf) tarWriter.Close() return ioutil.NopCloser(buf), nil } func (el *emptyLayer) ChainID() ChainID { return ChainID(DigestSHA256EmptyTar) } func (el *emptyLayer) DiffID() DiffID { return DigestSHA256EmptyTar } func (el *emptyLayer) Parent() Layer { return nil } func (el *emptyLayer) Size() (size int64, err error) { return 0, nil } func (el *emptyLayer) DiffSize() (size int64, err error) { return 0, nil } func (el *emptyLayer) Metadata() (map[string]string, error) { return make(map[string]string), nil } docker-1.10.3/layer/empty_test.go000066400000000000000000000020361267010174400167060ustar00rootroot00000000000000package layer import ( "io" "testing" "github.com/docker/distribution/digest" ) func TestEmptyLayer(t *testing.T) { if EmptyLayer.ChainID() != ChainID(DigestSHA256EmptyTar) { t.Fatal("wrong ID for empty layer") } if EmptyLayer.DiffID() != DigestSHA256EmptyTar { t.Fatal("wrong DiffID for empty layer") } if EmptyLayer.Parent() != nil { t.Fatal("expected no parent for empty layer") } if size, err := EmptyLayer.Size(); err != nil || size != 0 { t.Fatal("expected zero size for empty layer") } if diffSize, err := EmptyLayer.DiffSize(); err != nil || diffSize != 0 { t.Fatal("expected zero diffsize for empty layer") } tarStream, err := EmptyLayer.TarStream() if err != nil { t.Fatalf("error streaming tar for empty layer: %v", err) } digester := digest.Canonical.New() _, err = io.Copy(digester.Hash(), tarStream) if err != nil { t.Fatalf("error hashing empty tar layer: %v", err) } if digester.Digest() != digest.Digest(DigestSHA256EmptyTar) { t.Fatal("empty layer tar stream hashes to wrong value") } } docker-1.10.3/layer/filestore.go000066400000000000000000000201011267010174400164760ustar00rootroot00000000000000package layer import ( "compress/gzip" "errors" "fmt" "io" "io/ioutil" "os" "path/filepath" "regexp" "strconv" "strings" "github.com/Sirupsen/logrus" "github.com/docker/distribution/digest" "github.com/docker/docker/pkg/ioutils" ) var ( stringIDRegexp = regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`) supportedAlgorithms = []digest.Algorithm{ digest.SHA256, // digest.SHA384, // Currently not used // digest.SHA512, // Currently not used } ) type fileMetadataStore struct { root string } type fileMetadataTransaction struct { store *fileMetadataStore root string } // NewFSMetadataStore returns an instance of a metadata store // which is backed by files on disk using the provided root // as the root of metadata files. func NewFSMetadataStore(root string) (MetadataStore, error) { if err := os.MkdirAll(root, 0700); err != nil { return nil, err } return &fileMetadataStore{ root: root, }, nil } func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string { dgst := digest.Digest(layer) return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Hex()) } func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string { return filepath.Join(fms.getLayerDirectory(layer), filename) } func (fms *fileMetadataStore) getMountDirectory(mount string) string { return filepath.Join(fms.root, "mounts", mount) } func (fms *fileMetadataStore) getMountFilename(mount, filename string) string { return filepath.Join(fms.getMountDirectory(mount), filename) } func (fms *fileMetadataStore) StartTransaction() (MetadataTransaction, error) { tmpDir := filepath.Join(fms.root, "tmp") if err := os.MkdirAll(tmpDir, 0755); err != nil { return nil, err } td, err := ioutil.TempDir(tmpDir, "layer-") if err != nil { return nil, err } // Create a new tempdir return &fileMetadataTransaction{ store: fms, root: td, }, nil } func (fm *fileMetadataTransaction) SetSize(size int64) error { content := fmt.Sprintf("%d", size) return ioutil.WriteFile(filepath.Join(fm.root, "size"), []byte(content), 0644) } func (fm *fileMetadataTransaction) SetParent(parent ChainID) error { return ioutil.WriteFile(filepath.Join(fm.root, "parent"), []byte(digest.Digest(parent).String()), 0644) } func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error { return ioutil.WriteFile(filepath.Join(fm.root, "diff"), []byte(digest.Digest(diff).String()), 0644) } func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error { return ioutil.WriteFile(filepath.Join(fm.root, "cache-id"), []byte(cacheID), 0644) } func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) { f, err := os.OpenFile(filepath.Join(fm.root, "tar-split.json.gz"), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) if err != nil { return nil, err } var wc io.WriteCloser if compressInput { wc = gzip.NewWriter(f) } else { wc = f } return ioutils.NewWriteCloserWrapper(wc, func() error { wc.Close() return f.Close() }), nil } func (fm *fileMetadataTransaction) Commit(layer ChainID) error { finalDir := fm.store.getLayerDirectory(layer) if err := os.MkdirAll(filepath.Dir(finalDir), 0755); err != nil { return err } return os.Rename(fm.root, finalDir) } func (fm *fileMetadataTransaction) Cancel() error { return os.RemoveAll(fm.root) } func (fm *fileMetadataTransaction) String() string { return fm.root } func (fms *fileMetadataStore) GetSize(layer ChainID) (int64, error) { content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "size")) if err != nil { return 0, err } size, err := strconv.ParseInt(string(content), 10, 64) if err != nil { return 0, err } return size, nil } func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) { content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "parent")) if err != nil { if os.IsNotExist(err) { return "", nil } return "", err } dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) if err != nil { return "", err } return ChainID(dgst), nil } func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) { content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "diff")) if err != nil { return "", err } dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) if err != nil { return "", err } return DiffID(dgst), nil } func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) { contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id")) if err != nil { return "", err } content := strings.TrimSpace(string(contentBytes)) if !stringIDRegexp.MatchString(content) { return "", errors.New("invalid cache id value") } return content, nil } func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) { fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz")) if err != nil { return nil, err } f, err := gzip.NewReader(fz) if err != nil { return nil, err } return ioutils.NewReadCloserWrapper(f, func() error { f.Close() return fz.Close() }), nil } func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error { if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { return err } return ioutil.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0644) } func (fms *fileMetadataStore) SetInitID(mount string, init string) error { if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { return err } return ioutil.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0644) } func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error { if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { return err } return ioutil.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0644) } func (fms *fileMetadataStore) GetMountID(mount string) (string, error) { contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id")) if err != nil { return "", err } content := strings.TrimSpace(string(contentBytes)) if !stringIDRegexp.MatchString(content) { return "", errors.New("invalid mount id value") } return content, nil } func (fms *fileMetadataStore) GetInitID(mount string) (string, error) { contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id")) if err != nil { if os.IsNotExist(err) { return "", nil } return "", err } content := strings.TrimSpace(string(contentBytes)) if !stringIDRegexp.MatchString(content) { return "", errors.New("invalid init id value") } return content, nil } func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) { content, err := ioutil.ReadFile(fms.getMountFilename(mount, "parent")) if err != nil { if os.IsNotExist(err) { return "", nil } return "", err } dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) if err != nil { return "", err } return ChainID(dgst), nil } func (fms *fileMetadataStore) List() ([]ChainID, []string, error) { var ids []ChainID for _, algorithm := range supportedAlgorithms { fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, string(algorithm))) if err != nil { if os.IsNotExist(err) { continue } return nil, nil, err } for _, fi := range fileInfos { if fi.IsDir() && fi.Name() != "mounts" { dgst := digest.NewDigestFromHex(string(algorithm), fi.Name()) if err := dgst.Validate(); err != nil { logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name()) } else { ids = append(ids, ChainID(dgst)) } } } } fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, "mounts")) if err != nil { if os.IsNotExist(err) { return ids, []string{}, nil } return nil, nil, err } var mounts []string for _, fi := range fileInfos { if fi.IsDir() { mounts = append(mounts, fi.Name()) } } return ids, mounts, nil } func (fms *fileMetadataStore) Remove(layer ChainID) error { return os.RemoveAll(fms.getLayerDirectory(layer)) } func (fms *fileMetadataStore) RemoveMount(mount string) error { return os.RemoveAll(fms.getMountDirectory(mount)) } docker-1.10.3/layer/filestore_test.go000066400000000000000000000047711267010174400175540ustar00rootroot00000000000000package layer import ( "fmt" "io/ioutil" "math/rand" "os" "path/filepath" "strings" "syscall" "testing" "github.com/docker/distribution/digest" ) func randomLayerID(seed int64) ChainID { r := rand.New(rand.NewSource(seed)) return ChainID(digest.FromBytes([]byte(fmt.Sprintf("%d", r.Int63())))) } func newFileMetadataStore(t *testing.T) (*fileMetadataStore, string, func()) { td, err := ioutil.TempDir("", "layers-") if err != nil { t.Fatal(err) } fms, err := NewFSMetadataStore(td) if err != nil { t.Fatal(err) } return fms.(*fileMetadataStore), td, func() { if err := os.RemoveAll(td); err != nil { t.Logf("Failed to cleanup %q: %s", td, err) } } } func assertNotDirectoryError(t *testing.T, err error) { perr, ok := err.(*os.PathError) if !ok { t.Fatalf("Unexpected error %#v, expected path error", err) } if perr.Err != syscall.ENOTDIR { t.Fatalf("Unexpected error %s, expected %s", perr.Err, syscall.ENOTDIR) } } func assertPermissionError(t *testing.T, err error) { perr, ok := err.(*os.PathError) if !ok { t.Fatalf("Unexpected error %#v, expected path error", err) } if perr.Err != syscall.EACCES { t.Fatalf("Unexpected error %s, expected %s", perr.Err, syscall.EACCES) } } func TestCommitFailure(t *testing.T) { fms, td, cleanup := newFileMetadataStore(t) defer cleanup() if err := ioutil.WriteFile(filepath.Join(td, "sha256"), []byte("was here first!"), 0644); err != nil { t.Fatal(err) } tx, err := fms.StartTransaction() if err != nil { t.Fatal(err) } if err := tx.SetSize(0); err != nil { t.Fatal(err) } err = tx.Commit(randomLayerID(5)) if err == nil { t.Fatalf("Expected error committing with invalid layer parent directory") } assertNotDirectoryError(t, err) } func TestStartTransactionFailure(t *testing.T) { fms, td, cleanup := newFileMetadataStore(t) defer cleanup() if err := ioutil.WriteFile(filepath.Join(td, "tmp"), []byte("was here first!"), 0644); err != nil { t.Fatal(err) } _, err := fms.StartTransaction() if err == nil { t.Fatalf("Expected error starting transaction with invalid layer parent directory") } assertNotDirectoryError(t, err) if err := os.Remove(filepath.Join(td, "tmp")); err != nil { t.Fatal(err) } tx, err := fms.StartTransaction() if err != nil { t.Fatal(err) } if expected := filepath.Join(td, "tmp"); strings.HasPrefix(expected, tx.String()) { t.Fatalf("Unexpected transaction string %q, expected prefix %q", tx.String(), expected) } if err := tx.Cancel(); err != nil { t.Fatal(err) } } docker-1.10.3/layer/layer.go000066400000000000000000000165671267010174400156430ustar00rootroot00000000000000// Package layer is package for managing read only // and read-write mounts on the union file system // driver. Read-only mounts are referenced using a // content hash and are protected from mutation in // the exposed interface. The tar format is used // to create read only layers and export both // read only and writable layers. The exported // tar data for a read only layer should match // the tar used to create the layer. package layer import ( "errors" "io" "github.com/Sirupsen/logrus" "github.com/docker/distribution/digest" "github.com/docker/docker/pkg/archive" ) var ( // ErrLayerDoesNotExist is used when an operation is // attempted on a layer which does not exist. ErrLayerDoesNotExist = errors.New("layer does not exist") // ErrLayerNotRetained is used when a release is // attempted on a layer which is not retained. ErrLayerNotRetained = errors.New("layer not retained") // ErrMountDoesNotExist is used when an operation is // attempted on a mount layer which does not exist. ErrMountDoesNotExist = errors.New("mount does not exist") // ErrMountNameConflict is used when a mount is attempted // to be created but there is already a mount with the name // used for creation. ErrMountNameConflict = errors.New("mount already exists with name") // ErrActiveMount is used when an operation on a // mount is attempted but the layer is still // mounted and the operation cannot be performed. ErrActiveMount = errors.New("mount still active") // ErrNotMounted is used when requesting an active // mount but the layer is not mounted. ErrNotMounted = errors.New("not mounted") // ErrMaxDepthExceeded is used when a layer is attempted // to be created which would result in a layer depth // greater than the 125 max. ErrMaxDepthExceeded = errors.New("max depth exceeded") ) // ChainID is the content-addressable ID of a layer. type ChainID digest.Digest // String returns a string rendition of a layer ID func (id ChainID) String() string { return string(id) } // DiffID is the hash of an individual layer tar. type DiffID digest.Digest // String returns a string rendition of a layer DiffID func (diffID DiffID) String() string { return string(diffID) } // TarStreamer represents an object which may // have its contents exported as a tar stream. type TarStreamer interface { // TarStream returns a tar archive stream // for the contents of a layer. TarStream() (io.ReadCloser, error) } // Layer represents a read only layer type Layer interface { TarStreamer // ChainID returns the content hash of the entire layer chain. The hash // chain is made up of DiffID of top layer and all of its parents. ChainID() ChainID // DiffID returns the content hash of the layer // tar stream used to create this layer. DiffID() DiffID // Parent returns the next layer in the layer chain. Parent() Layer // Size returns the size of the entire layer chain. The size // is calculated from the total size of all files in the layers. Size() (int64, error) // DiffSize returns the size difference of the top layer // from parent layer. DiffSize() (int64, error) // Metadata returns the low level storage metadata associated // with layer. Metadata() (map[string]string, error) } // RWLayer represents a layer which is // read and writable type RWLayer interface { TarStreamer // Name of mounted layer Name() string // Parent returns the layer which the writable // layer was created from. Parent() Layer // Mount mounts the RWLayer and returns the filesystem path // the to the writable layer. Mount(mountLabel string) (string, error) // Unmount unmounts the RWLayer. This should be called // for every mount. If there are multiple mount calls // this operation will only decrement the internal mount counter. Unmount() error // Size represents the size of the writable layer // as calculated by the total size of the files // changed in the mutable layer. Size() (int64, error) // Changes returns the set of changes for the mutable layer // from the base layer. Changes() ([]archive.Change, error) // Metadata returns the low level metadata for the mutable layer Metadata() (map[string]string, error) } // Metadata holds information about a // read only layer type Metadata struct { // ChainID is the content hash of the layer ChainID ChainID // DiffID is the hash of the tar data used to // create the layer DiffID DiffID // Size is the size of the layer and all parents Size int64 // DiffSize is the size of the top layer DiffSize int64 } // MountInit is a function to initialize a // writable mount. Changes made here will // not be included in the Tar stream of the // RWLayer. type MountInit func(root string) error // Store represents a backend for managing both // read-only and read-write layers. type Store interface { Register(io.Reader, ChainID) (Layer, error) Get(ChainID) (Layer, error) Release(Layer) ([]Metadata, error) CreateRWLayer(id string, parent ChainID, mountLabel string, initFunc MountInit) (RWLayer, error) GetRWLayer(id string) (RWLayer, error) ReleaseRWLayer(RWLayer) ([]Metadata, error) Cleanup() error DriverStatus() [][2]string DriverName() string } // MetadataTransaction represents functions for setting layer metadata // with a single transaction. type MetadataTransaction interface { SetSize(int64) error SetParent(parent ChainID) error SetDiffID(DiffID) error SetCacheID(string) error TarSplitWriter(compressInput bool) (io.WriteCloser, error) Commit(ChainID) error Cancel() error String() string } // MetadataStore represents a backend for persisting // metadata about layers and providing the metadata // for restoring a Store. type MetadataStore interface { // StartTransaction starts an update for new metadata // which will be used to represent an ID on commit. StartTransaction() (MetadataTransaction, error) GetSize(ChainID) (int64, error) GetParent(ChainID) (ChainID, error) GetDiffID(ChainID) (DiffID, error) GetCacheID(ChainID) (string, error) TarSplitReader(ChainID) (io.ReadCloser, error) SetMountID(string, string) error SetInitID(string, string) error SetMountParent(string, ChainID) error GetMountID(string) (string, error) GetInitID(string) (string, error) GetMountParent(string) (ChainID, error) // List returns the full list of referenced // read-only and read-write layers List() ([]ChainID, []string, error) Remove(ChainID) error RemoveMount(string) error } // CreateChainID returns ID for a layerDigest slice func CreateChainID(dgsts []DiffID) ChainID { return createChainIDFromParent("", dgsts...) } func createChainIDFromParent(parent ChainID, dgsts ...DiffID) ChainID { if len(dgsts) == 0 { return parent } if parent == "" { return createChainIDFromParent(ChainID(dgsts[0]), dgsts[1:]...) } // H = "H(n-1) SHA256(n)" dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) return createChainIDFromParent(ChainID(dgst), dgsts[1:]...) } // ReleaseAndLog releases the provided layer from the given layer // store, logging any error and release metadata func ReleaseAndLog(ls Store, l Layer) { metadata, err := ls.Release(l) if err != nil { logrus.Errorf("Error releasing layer %s: %v", l.ChainID(), err) } LogReleaseMetadata(metadata) } // LogReleaseMetadata logs a metadata array, use this to // ensure consistent logging for release metadata func LogReleaseMetadata(metadatas []Metadata) { for _, metadata := range metadatas { logrus.Infof("Layer %s cleaned up", metadata.ChainID) } } docker-1.10.3/layer/layer_store.go000066400000000000000000000330471267010174400170470ustar00rootroot00000000000000package layer import ( "errors" "fmt" "io" "io/ioutil" "sync" "github.com/Sirupsen/logrus" "github.com/docker/distribution/digest" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/stringid" "github.com/vbatts/tar-split/tar/asm" "github.com/vbatts/tar-split/tar/storage" ) // maxLayerDepth represents the maximum number of // layers which can be chained together. 125 was // chosen to account for the 127 max in some // graphdrivers plus the 2 additional layers // used to create a rwlayer. const maxLayerDepth = 125 type layerStore struct { store MetadataStore driver graphdriver.Driver layerMap map[ChainID]*roLayer layerL sync.Mutex mounts map[string]*mountedLayer mountL sync.Mutex } // StoreOptions are the options used to create a new Store instance type StoreOptions struct { StorePath string MetadataStorePathTemplate string GraphDriver string GraphDriverOptions []string UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap } // NewStoreFromOptions creates a new Store instance func NewStoreFromOptions(options StoreOptions) (Store, error) { driver, err := graphdriver.New( options.StorePath, options.GraphDriver, options.GraphDriverOptions, options.UIDMaps, options.GIDMaps) if err != nil { return nil, fmt.Errorf("error initializing graphdriver: %v", err) } logrus.Debugf("Using graph driver %s", driver) fms, err := NewFSMetadataStore(fmt.Sprintf(options.MetadataStorePathTemplate, driver)) if err != nil { return nil, err } return NewStoreFromGraphDriver(fms, driver) } // NewStoreFromGraphDriver creates a new Store instance using the provided // metadata store and graph driver. The metadata store will be used to restore // the Store. func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver) (Store, error) { ls := &layerStore{ store: store, driver: driver, layerMap: map[ChainID]*roLayer{}, mounts: map[string]*mountedLayer{}, } ids, mounts, err := store.List() if err != nil { return nil, err } for _, id := range ids { l, err := ls.loadLayer(id) if err != nil { logrus.Debugf("Failed to load layer %s: %s", id, err) continue } if l.parent != nil { l.parent.referenceCount++ } } for _, mount := range mounts { if err := ls.loadMount(mount); err != nil { logrus.Debugf("Failed to load mount %s: %s", mount, err) } } return ls, nil } func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) { cl, ok := ls.layerMap[layer] if ok { return cl, nil } diff, err := ls.store.GetDiffID(layer) if err != nil { return nil, fmt.Errorf("failed to get diff id for %s: %s", layer, err) } size, err := ls.store.GetSize(layer) if err != nil { return nil, fmt.Errorf("failed to get size for %s: %s", layer, err) } cacheID, err := ls.store.GetCacheID(layer) if err != nil { return nil, fmt.Errorf("failed to get cache id for %s: %s", layer, err) } parent, err := ls.store.GetParent(layer) if err != nil { return nil, fmt.Errorf("failed to get parent for %s: %s", layer, err) } cl = &roLayer{ chainID: layer, diffID: diff, size: size, cacheID: cacheID, layerStore: ls, references: map[Layer]struct{}{}, } if parent != "" { p, err := ls.loadLayer(parent) if err != nil { return nil, err } cl.parent = p } ls.layerMap[cl.chainID] = cl return cl, nil } func (ls *layerStore) loadMount(mount string) error { if _, ok := ls.mounts[mount]; ok { return nil } mountID, err := ls.store.GetMountID(mount) if err != nil { return err } initID, err := ls.store.GetInitID(mount) if err != nil { return err } parent, err := ls.store.GetMountParent(mount) if err != nil { return err } ml := &mountedLayer{ name: mount, mountID: mountID, initID: initID, layerStore: ls, references: map[RWLayer]*referencedRWLayer{}, } if parent != "" { p, err := ls.loadLayer(parent) if err != nil { return err } ml.parent = p p.referenceCount++ } ls.mounts[ml.name] = ml return nil } func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent string, layer *roLayer) error { digester := digest.Canonical.New() tr := io.TeeReader(ts, digester.Hash()) tsw, err := tx.TarSplitWriter(true) if err != nil { return err } metaPacker := storage.NewJSONPacker(tsw) defer tsw.Close() // we're passing nil here for the file putter, because the ApplyDiff will // handle the extraction of the archive rdr, err := asm.NewInputTarStream(tr, metaPacker, nil) if err != nil { return err } applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, archive.Reader(rdr)) if err != nil { return err } // Discard trailing data but ensure metadata is picked up to reconstruct stream io.Copy(ioutil.Discard, rdr) // ignore error as reader may be closed layer.size = applySize layer.diffID = DiffID(digester.Digest()) logrus.Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize) return nil } func (ls *layerStore) Register(ts io.Reader, parent ChainID) (Layer, error) { // err is used to hold the error which will always trigger // cleanup of creates sources but may not be an error returned // to the caller (already exists). var err error var pid string var p *roLayer if string(parent) != "" { p = ls.get(parent) if p == nil { return nil, ErrLayerDoesNotExist } pid = p.cacheID // Release parent chain if error defer func() { if err != nil { ls.layerL.Lock() ls.releaseLayer(p) ls.layerL.Unlock() } }() if p.depth() >= maxLayerDepth { err = ErrMaxDepthExceeded return nil, err } } // Create new roLayer layer := &roLayer{ parent: p, cacheID: stringid.GenerateRandomID(), referenceCount: 1, layerStore: ls, references: map[Layer]struct{}{}, } if err = ls.driver.Create(layer.cacheID, pid, ""); err != nil { return nil, err } tx, err := ls.store.StartTransaction() if err != nil { return nil, err } defer func() { if err != nil { logrus.Debugf("Cleaning up layer %s: %v", layer.cacheID, err) if err := ls.driver.Remove(layer.cacheID); err != nil { logrus.Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err) } if err := tx.Cancel(); err != nil { logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) } } }() if err = ls.applyTar(tx, ts, pid, layer); err != nil { return nil, err } if layer.parent == nil { layer.chainID = ChainID(layer.diffID) } else { layer.chainID = createChainIDFromParent(layer.parent.chainID, layer.diffID) } if err = storeLayer(tx, layer); err != nil { return nil, err } ls.layerL.Lock() defer ls.layerL.Unlock() if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { // Set error for cleanup, but do not return the error err = errors.New("layer already exists") return existingLayer.getReference(), nil } if err = tx.Commit(layer.chainID); err != nil { return nil, err } ls.layerMap[layer.chainID] = layer return layer.getReference(), nil } func (ls *layerStore) getWithoutLock(layer ChainID) *roLayer { l, ok := ls.layerMap[layer] if !ok { return nil } l.referenceCount++ return l } func (ls *layerStore) get(l ChainID) *roLayer { ls.layerL.Lock() defer ls.layerL.Unlock() return ls.getWithoutLock(l) } func (ls *layerStore) Get(l ChainID) (Layer, error) { layer := ls.get(l) if layer == nil { return nil, ErrLayerDoesNotExist } return layer.getReference(), nil } func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error { err := ls.driver.Remove(layer.cacheID) if err != nil { return err } err = ls.store.Remove(layer.chainID) if err != nil { return err } metadata.DiffID = layer.diffID metadata.ChainID = layer.chainID metadata.Size, err = layer.Size() if err != nil { return err } metadata.DiffSize = layer.size return nil } func (ls *layerStore) releaseLayer(l *roLayer) ([]Metadata, error) { depth := 0 removed := []Metadata{} for { if l.referenceCount == 0 { panic("layer not retained") } l.referenceCount-- if l.referenceCount != 0 { return removed, nil } if len(removed) == 0 && depth > 0 { panic("cannot remove layer with child") } if l.hasReferences() { panic("cannot delete referenced layer") } var metadata Metadata if err := ls.deleteLayer(l, &metadata); err != nil { return nil, err } delete(ls.layerMap, l.chainID) removed = append(removed, metadata) if l.parent == nil { return removed, nil } depth++ l = l.parent } } func (ls *layerStore) Release(l Layer) ([]Metadata, error) { ls.layerL.Lock() defer ls.layerL.Unlock() layer, ok := ls.layerMap[l.ChainID()] if !ok { return []Metadata{}, nil } if !layer.hasReference(l) { return nil, ErrLayerNotRetained } layer.deleteReference(l) return ls.releaseLayer(layer) } func (ls *layerStore) CreateRWLayer(name string, parent ChainID, mountLabel string, initFunc MountInit) (RWLayer, error) { ls.mountL.Lock() defer ls.mountL.Unlock() m, ok := ls.mounts[name] if ok { return nil, ErrMountNameConflict } var err error var pid string var p *roLayer if string(parent) != "" { p = ls.get(parent) if p == nil { return nil, ErrLayerDoesNotExist } pid = p.cacheID // Release parent chain if error defer func() { if err != nil { ls.layerL.Lock() ls.releaseLayer(p) ls.layerL.Unlock() } }() } m = &mountedLayer{ name: name, parent: p, mountID: ls.mountID(name), layerStore: ls, references: map[RWLayer]*referencedRWLayer{}, } if initFunc != nil { pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc) if err != nil { return nil, err } m.initID = pid } if err = ls.driver.Create(m.mountID, pid, ""); err != nil { return nil, err } if err = ls.saveMount(m); err != nil { return nil, err } return m.getReference(), nil } func (ls *layerStore) GetRWLayer(id string) (RWLayer, error) { ls.mountL.Lock() defer ls.mountL.Unlock() mount, ok := ls.mounts[id] if !ok { return nil, ErrMountDoesNotExist } return mount.getReference(), nil } func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) { ls.mountL.Lock() defer ls.mountL.Unlock() m, ok := ls.mounts[l.Name()] if !ok { return []Metadata{}, nil } if err := m.deleteReference(l); err != nil { return nil, err } if m.hasReferences() { return []Metadata{}, nil } if err := ls.driver.Remove(m.mountID); err != nil { logrus.Errorf("Error removing mounted layer %s: %s", m.name, err) m.retakeReference(l) return nil, err } if m.initID != "" { if err := ls.driver.Remove(m.initID); err != nil { logrus.Errorf("Error removing init layer %s: %s", m.name, err) m.retakeReference(l) return nil, err } } if err := ls.store.RemoveMount(m.name); err != nil { logrus.Errorf("Error removing mount metadata: %s: %s", m.name, err) m.retakeReference(l) return nil, err } delete(ls.mounts, m.Name()) ls.layerL.Lock() defer ls.layerL.Unlock() if m.parent != nil { return ls.releaseLayer(m.parent) } return []Metadata{}, nil } func (ls *layerStore) saveMount(mount *mountedLayer) error { if err := ls.store.SetMountID(mount.name, mount.mountID); err != nil { return err } if mount.initID != "" { if err := ls.store.SetInitID(mount.name, mount.initID); err != nil { return err } } if mount.parent != nil { if err := ls.store.SetMountParent(mount.name, mount.parent.chainID); err != nil { return err } } ls.mounts[mount.name] = mount return nil } func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit) (string, error) { // Use "-init" to maintain compatibility with graph drivers // which are expecting this layer with this special name. If all // graph drivers can be updated to not rely on knowing about this layer // then the initID should be randomly generated. initID := fmt.Sprintf("%s-init", graphID) if err := ls.driver.Create(initID, parent, mountLabel); err != nil { return "", err } p, err := ls.driver.Get(initID, "") if err != nil { return "", err } if err := initFunc(p); err != nil { ls.driver.Put(initID) return "", err } if err := ls.driver.Put(initID); err != nil { return "", err } return initID, nil } func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error { type diffPathDriver interface { DiffPath(string) (string, func() error, error) } diffDriver, ok := ls.driver.(diffPathDriver) if !ok { diffDriver = &naiveDiffPathDriver{ls.driver} } defer metadata.Close() // get our relative path to the container fsPath, releasePath, err := diffDriver.DiffPath(graphID) if err != nil { return err } defer releasePath() metaUnpacker := storage.NewJSONUnpacker(metadata) upackerCounter := &unpackSizeCounter{metaUnpacker, size} fileGetter := storage.NewPathFileGetter(fsPath) logrus.Debugf("Assembling tar data for %s from %s", graphID, fsPath) return asm.WriteOutputTarStream(fileGetter, upackerCounter, w) } func (ls *layerStore) Cleanup() error { return ls.driver.Cleanup() } func (ls *layerStore) DriverStatus() [][2]string { return ls.driver.Status() } func (ls *layerStore) DriverName() string { return ls.driver.String() } type naiveDiffPathDriver struct { graphdriver.Driver } func (n *naiveDiffPathDriver) DiffPath(id string) (string, func() error, error) { p, err := n.Driver.Get(id, "") if err != nil { return "", nil, err } return p, func() error { return n.Driver.Put(id) }, nil } docker-1.10.3/layer/layer_test.go000066400000000000000000000455151267010174400166750ustar00rootroot00000000000000package layer import ( "bytes" "io" "io/ioutil" "os" "path/filepath" "strings" "testing" "github.com/docker/distribution/digest" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/vfs" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/stringid" ) func init() { graphdriver.ApplyUncompressedLayer = archive.UnpackLayer vfs.CopyWithTar = archive.CopyWithTar } func newVFSGraphDriver(td string) (graphdriver.Driver, error) { uidMap := []idtools.IDMap{ { ContainerID: 0, HostID: os.Getuid(), Size: 1, }, } gidMap := []idtools.IDMap{ { ContainerID: 0, HostID: os.Getgid(), Size: 1, }, } return graphdriver.GetDriver("vfs", td, nil, uidMap, gidMap) } func newTestGraphDriver(t *testing.T) (graphdriver.Driver, func()) { td, err := ioutil.TempDir("", "graph-") if err != nil { t.Fatal(err) } driver, err := newVFSGraphDriver(td) if err != nil { t.Fatal(err) } return driver, func() { os.RemoveAll(td) } } func newTestStore(t *testing.T) (Store, string, func()) { td, err := ioutil.TempDir("", "layerstore-") if err != nil { t.Fatal(err) } graph, graphcleanup := newTestGraphDriver(t) fms, err := NewFSMetadataStore(td) if err != nil { t.Fatal(err) } ls, err := NewStoreFromGraphDriver(fms, graph) if err != nil { t.Fatal(err) } return ls, td, func() { graphcleanup() os.RemoveAll(td) } } type layerInit func(root string) error func createLayer(ls Store, parent ChainID, layerFunc layerInit) (Layer, error) { containerID := stringid.GenerateRandomID() mount, err := ls.CreateRWLayer(containerID, parent, "", nil) if err != nil { return nil, err } path, err := mount.Mount("") if err != nil { return nil, err } if err := layerFunc(path); err != nil { return nil, err } ts, err := mount.TarStream() if err != nil { return nil, err } defer ts.Close() layer, err := ls.Register(ts, parent) if err != nil { return nil, err } if err := mount.Unmount(); err != nil { return nil, err } if _, err := ls.ReleaseRWLayer(mount); err != nil { return nil, err } return layer, nil } type FileApplier interface { ApplyFile(root string) error } type testFile struct { name string content []byte permission os.FileMode } func newTestFile(name string, content []byte, perm os.FileMode) FileApplier { return &testFile{ name: name, content: content, permission: perm, } } func (tf *testFile) ApplyFile(root string) error { fullPath := filepath.Join(root, tf.name) if err := os.MkdirAll(filepath.Dir(fullPath), 0755); err != nil { return err } // Check if already exists if stat, err := os.Stat(fullPath); err == nil && stat.Mode().Perm() != tf.permission { if err := os.Chmod(fullPath, tf.permission); err != nil { return err } } if err := ioutil.WriteFile(fullPath, tf.content, tf.permission); err != nil { return err } return nil } func initWithFiles(files ...FileApplier) layerInit { return func(root string) error { for _, f := range files { if err := f.ApplyFile(root); err != nil { return err } } return nil } } func getCachedLayer(l Layer) *roLayer { if rl, ok := l.(*referencedCacheLayer); ok { return rl.roLayer } return l.(*roLayer) } func getMountLayer(l RWLayer) *mountedLayer { if rl, ok := l.(*referencedRWLayer); ok { return rl.mountedLayer } return l.(*mountedLayer) } func createMetadata(layers ...Layer) []Metadata { metadata := make([]Metadata, len(layers)) for i := range layers { size, err := layers[i].Size() if err != nil { panic(err) } metadata[i].ChainID = layers[i].ChainID() metadata[i].DiffID = layers[i].DiffID() metadata[i].Size = size metadata[i].DiffSize = getCachedLayer(layers[i]).size } return metadata } func assertMetadata(t *testing.T, metadata, expectedMetadata []Metadata) { if len(metadata) != len(expectedMetadata) { t.Fatalf("Unexpected number of deletes %d, expected %d", len(metadata), len(expectedMetadata)) } for i := range metadata { if metadata[i] != expectedMetadata[i] { t.Errorf("Unexpected metadata\n\tExpected: %#v\n\tActual: %#v", expectedMetadata[i], metadata[i]) } } if t.Failed() { t.FailNow() } } func releaseAndCheckDeleted(t *testing.T, ls Store, layer Layer, removed ...Layer) { layerCount := len(ls.(*layerStore).layerMap) expectedMetadata := createMetadata(removed...) metadata, err := ls.Release(layer) if err != nil { t.Fatal(err) } assertMetadata(t, metadata, expectedMetadata) if expected := layerCount - len(removed); len(ls.(*layerStore).layerMap) != expected { t.Fatalf("Unexpected number of layers %d, expected %d", len(ls.(*layerStore).layerMap), expected) } } func cacheID(l Layer) string { return getCachedLayer(l).cacheID } func assertLayerEqual(t *testing.T, l1, l2 Layer) { if l1.ChainID() != l2.ChainID() { t.Fatalf("Mismatched ID: %s vs %s", l1.ChainID(), l2.ChainID()) } if l1.DiffID() != l2.DiffID() { t.Fatalf("Mismatched DiffID: %s vs %s", l1.DiffID(), l2.DiffID()) } size1, err := l1.Size() if err != nil { t.Fatal(err) } size2, err := l2.Size() if err != nil { t.Fatal(err) } if size1 != size2 { t.Fatalf("Mismatched size: %d vs %d", size1, size2) } if cacheID(l1) != cacheID(l2) { t.Fatalf("Mismatched cache id: %s vs %s", cacheID(l1), cacheID(l2)) } p1 := l1.Parent() p2 := l2.Parent() if p1 != nil && p2 != nil { assertLayerEqual(t, p1, p2) } else if p1 != nil || p2 != nil { t.Fatalf("Mismatched parents: %v vs %v", p1, p2) } } func TestMountAndRegister(t *testing.T) { ls, _, cleanup := newTestStore(t) defer cleanup() li := initWithFiles(newTestFile("testfile.txt", []byte("some test data"), 0644)) layer, err := createLayer(ls, "", li) if err != nil { t.Fatal(err) } size, _ := layer.Size() t.Logf("Layer size: %d", size) mount2, err := ls.CreateRWLayer("new-test-mount", layer.ChainID(), "", nil) if err != nil { t.Fatal(err) } path2, err := mount2.Mount("") if err != nil { t.Fatal(err) } b, err := ioutil.ReadFile(filepath.Join(path2, "testfile.txt")) if err != nil { t.Fatal(err) } if expected := "some test data"; string(b) != expected { t.Fatalf("Wrong file data, expected %q, got %q", expected, string(b)) } if err := mount2.Unmount(); err != nil { t.Fatal(err) } if _, err := ls.ReleaseRWLayer(mount2); err != nil { t.Fatal(err) } } func TestLayerRelease(t *testing.T) { ls, _, cleanup := newTestStore(t) defer cleanup() layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644))) if err != nil { t.Fatal(err) } layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("layer2.txt", []byte("layer 2 file"), 0644))) if err != nil { t.Fatal(err) } if _, err := ls.Release(layer1); err != nil { t.Fatal(err) } layer3a, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3a file"), 0644))) if err != nil { t.Fatal(err) } layer3b, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3b file"), 0644))) if err != nil { t.Fatal(err) } if _, err := ls.Release(layer2); err != nil { t.Fatal(err) } t.Logf("Layer1: %s", layer1.ChainID()) t.Logf("Layer2: %s", layer2.ChainID()) t.Logf("Layer3a: %s", layer3a.ChainID()) t.Logf("Layer3b: %s", layer3b.ChainID()) if expected := 4; len(ls.(*layerStore).layerMap) != expected { t.Fatalf("Unexpected number of layers %d, expected %d", len(ls.(*layerStore).layerMap), expected) } releaseAndCheckDeleted(t, ls, layer3b, layer3b) releaseAndCheckDeleted(t, ls, layer3a, layer3a, layer2, layer1) } func TestStoreRestore(t *testing.T) { ls, _, cleanup := newTestStore(t) defer cleanup() layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644))) if err != nil { t.Fatal(err) } layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("layer2.txt", []byte("layer 2 file"), 0644))) if err != nil { t.Fatal(err) } if _, err := ls.Release(layer1); err != nil { t.Fatal(err) } layer3, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3 file"), 0644))) if err != nil { t.Fatal(err) } if _, err := ls.Release(layer2); err != nil { t.Fatal(err) } m, err := ls.CreateRWLayer("some-mount_name", layer3.ChainID(), "", nil) if err != nil { t.Fatal(err) } path, err := m.Mount("") if err != nil { t.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(path, "testfile.txt"), []byte("nothing here"), 0644); err != nil { t.Fatal(err) } assertActivityCount(t, m, 1) if err := m.Unmount(); err != nil { t.Fatal(err) } assertActivityCount(t, m, 0) ls2, err := NewStoreFromGraphDriver(ls.(*layerStore).store, ls.(*layerStore).driver) if err != nil { t.Fatal(err) } layer3b, err := ls2.Get(layer3.ChainID()) if err != nil { t.Fatal(err) } assertLayerEqual(t, layer3b, layer3) // Create again with same name, should return error if _, err := ls2.CreateRWLayer("some-mount_name", layer3b.ChainID(), "", nil); err == nil { t.Fatal("Expected error creating mount with same name") } else if err != ErrMountNameConflict { t.Fatal(err) } m2, err := ls2.GetRWLayer("some-mount_name") if err != nil { t.Fatal(err) } if mountPath, err := m2.Mount(""); err != nil { t.Fatal(err) } else if path != mountPath { t.Fatalf("Unexpected path %s, expected %s", mountPath, path) } assertActivityCount(t, m2, 1) if mountPath, err := m2.Mount(""); err != nil { t.Fatal(err) } else if path != mountPath { t.Fatalf("Unexpected path %s, expected %s", mountPath, path) } assertActivityCount(t, m2, 2) if err := m2.Unmount(); err != nil { t.Fatal(err) } assertActivityCount(t, m2, 1) b, err := ioutil.ReadFile(filepath.Join(path, "testfile.txt")) if err != nil { t.Fatal(err) } if expected := "nothing here"; string(b) != expected { t.Fatalf("Unexpected content %q, expected %q", string(b), expected) } if err := m2.Unmount(); err != nil { t.Fatal(err) } assertActivityCount(t, m2, 0) if metadata, err := ls2.ReleaseRWLayer(m2); err != nil { t.Fatal(err) } else if len(metadata) != 0 { t.Fatalf("Unexpectedly deleted layers: %#v", metadata) } if metadata, err := ls2.ReleaseRWLayer(m2); err != nil { t.Fatal(err) } else if len(metadata) != 0 { t.Fatalf("Unexpectedly deleted layers: %#v", metadata) } releaseAndCheckDeleted(t, ls2, layer3b, layer3, layer2, layer1) } func TestTarStreamStability(t *testing.T) { ls, _, cleanup := newTestStore(t) defer cleanup() files1 := []FileApplier{ newTestFile("/etc/hosts", []byte("mydomain 10.0.0.1"), 0644), newTestFile("/etc/profile", []byte("PATH=/usr/bin"), 0644), } addedFile := newTestFile("/etc/shadow", []byte("root:::::::"), 0644) files2 := []FileApplier{ newTestFile("/etc/hosts", []byte("mydomain 10.0.0.2"), 0644), newTestFile("/etc/profile", []byte("PATH=/usr/bin"), 0664), newTestFile("/root/.bashrc", []byte("PATH=/usr/sbin:/usr/bin"), 0644), } tar1, err := tarFromFiles(files1...) if err != nil { t.Fatal(err) } tar2, err := tarFromFiles(files2...) if err != nil { t.Fatal(err) } layer1, err := ls.Register(bytes.NewReader(tar1), "") if err != nil { t.Fatal(err) } // hack layer to add file p, err := ls.(*layerStore).driver.Get(layer1.(*referencedCacheLayer).cacheID, "") if err != nil { t.Fatal(err) } if err := addedFile.ApplyFile(p); err != nil { t.Fatal(err) } if err := ls.(*layerStore).driver.Put(layer1.(*referencedCacheLayer).cacheID); err != nil { t.Fatal(err) } layer2, err := ls.Register(bytes.NewReader(tar2), layer1.ChainID()) if err != nil { t.Fatal(err) } id1 := layer1.ChainID() t.Logf("Layer 1: %s", layer1.ChainID()) t.Logf("Layer 2: %s", layer2.ChainID()) if _, err := ls.Release(layer1); err != nil { t.Fatal(err) } assertLayerDiff(t, tar2, layer2) layer1b, err := ls.Get(id1) if err != nil { t.Logf("Content of layer map: %#v", ls.(*layerStore).layerMap) t.Fatal(err) } if _, err := ls.Release(layer2); err != nil { t.Fatal(err) } assertLayerDiff(t, tar1, layer1b) if _, err := ls.Release(layer1b); err != nil { t.Fatal(err) } } func assertLayerDiff(t *testing.T, expected []byte, layer Layer) { expectedDigest := digest.FromBytes(expected) if digest.Digest(layer.DiffID()) != expectedDigest { t.Fatalf("Mismatched diff id for %s, got %s, expected %s", layer.ChainID(), layer.DiffID(), expected) } ts, err := layer.TarStream() if err != nil { t.Fatal(err) } defer ts.Close() actual, err := ioutil.ReadAll(ts) if err != nil { t.Fatal(err) } if len(actual) != len(expected) { logByteDiff(t, actual, expected) t.Fatalf("Mismatched tar stream size for %s, got %d, expected %d", layer.ChainID(), len(actual), len(expected)) } actualDigest := digest.FromBytes(actual) if actualDigest != expectedDigest { logByteDiff(t, actual, expected) t.Fatalf("Wrong digest of tar stream, got %s, expected %s", actualDigest, expectedDigest) } } const maxByteLog = 4 * 1024 func logByteDiff(t *testing.T, actual, expected []byte) { d1, d2 := byteDiff(actual, expected) if len(d1) == 0 && len(d2) == 0 { return } prefix := len(actual) - len(d1) if len(d1) > maxByteLog || len(d2) > maxByteLog { t.Logf("Byte diff after %d matching bytes", prefix) } else { t.Logf("Byte diff after %d matching bytes\nActual bytes after prefix:\n%x\nExpected bytes after prefix:\n%x", prefix, d1, d2) } } // byteDiff returns the differing bytes after the matching prefix func byteDiff(b1, b2 []byte) ([]byte, []byte) { i := 0 for i < len(b1) && i < len(b2) { if b1[i] != b2[i] { break } i++ } return b1[i:], b2[i:] } func tarFromFiles(files ...FileApplier) ([]byte, error) { td, err := ioutil.TempDir("", "tar-") if err != nil { return nil, err } defer os.RemoveAll(td) for _, f := range files { if err := f.ApplyFile(td); err != nil { return nil, err } } r, err := archive.Tar(td, archive.Uncompressed) if err != nil { return nil, err } buf := bytes.NewBuffer(nil) if _, err := io.Copy(buf, r); err != nil { return nil, err } return buf.Bytes(), nil } // assertReferences asserts that all the references are to the same // image and represent the full set of references to that image. func assertReferences(t *testing.T, references ...Layer) { if len(references) == 0 { return } base := references[0].(*referencedCacheLayer).roLayer seenReferences := map[Layer]struct{}{ references[0]: {}, } for i := 1; i < len(references); i++ { other := references[i].(*referencedCacheLayer).roLayer if base != other { t.Fatalf("Unexpected referenced cache layer %s, expecting %s", other.ChainID(), base.ChainID()) } if _, ok := base.references[references[i]]; !ok { t.Fatalf("Reference not part of reference list: %v", references[i]) } if _, ok := seenReferences[references[i]]; ok { t.Fatalf("Duplicated reference %v", references[i]) } } if rc := len(base.references); rc != len(references) { t.Fatalf("Unexpected number of references %d, expecting %d", rc, len(references)) } } func assertActivityCount(t *testing.T, l RWLayer, expected int) { rl := l.(*referencedRWLayer) if rl.activityCount != expected { t.Fatalf("Unexpected activity count %d, expected %d", rl.activityCount, expected) } } func TestRegisterExistingLayer(t *testing.T) { ls, _, cleanup := newTestStore(t) defer cleanup() baseFiles := []FileApplier{ newTestFile("/etc/profile", []byte("# Base configuration"), 0644), } layerFiles := []FileApplier{ newTestFile("/root/.bashrc", []byte("# Root configuration"), 0644), } li := initWithFiles(baseFiles...) layer1, err := createLayer(ls, "", li) if err != nil { t.Fatal(err) } tar1, err := tarFromFiles(layerFiles...) if err != nil { t.Fatal(err) } layer2a, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID()) if err != nil { t.Fatal(err) } layer2b, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID()) if err != nil { t.Fatal(err) } assertReferences(t, layer2a, layer2b) } func graphDiffSize(ls Store, l Layer) (int64, error) { cl := getCachedLayer(l) var parent string if cl.parent != nil { parent = cl.parent.cacheID } return ls.(*layerStore).driver.DiffSize(cl.cacheID, parent) } func TestLayerSize(t *testing.T) { ls, _, cleanup := newTestStore(t) defer cleanup() content1 := []byte("Base contents") content2 := []byte("Added contents") layer1, err := createLayer(ls, "", initWithFiles(newTestFile("file1", content1, 0644))) if err != nil { t.Fatal(err) } layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("file2", content2, 0644))) if err != nil { t.Fatal(err) } layer1DiffSize, err := graphDiffSize(ls, layer1) if err != nil { t.Fatal(err) } if int(layer1DiffSize) != len(content1) { t.Fatalf("Unexpected diff size %d, expected %d", layer1DiffSize, len(content1)) } layer1Size, err := layer1.Size() if err != nil { t.Fatal(err) } if expected := len(content1); int(layer1Size) != expected { t.Fatalf("Unexpected size %d, expected %d", layer1Size, expected) } layer2DiffSize, err := graphDiffSize(ls, layer2) if err != nil { t.Fatal(err) } if int(layer2DiffSize) != len(content2) { t.Fatalf("Unexpected diff size %d, expected %d", layer2DiffSize, len(content2)) } layer2Size, err := layer2.Size() if err != nil { t.Fatal(err) } if expected := len(content1) + len(content2); int(layer2Size) != expected { t.Fatalf("Unexpected size %d, expected %d", layer2Size, expected) } } func TestTarStreamVerification(t *testing.T) { ls, tmpdir, cleanup := newTestStore(t) defer cleanup() files1 := []FileApplier{ newTestFile("/foo", []byte("abc"), 0644), newTestFile("/bar", []byte("def"), 0644), } files2 := []FileApplier{ newTestFile("/foo", []byte("abc"), 0644), newTestFile("/bar", []byte("def"), 0600), // different perm } tar1, err := tarFromFiles(files1...) if err != nil { t.Fatal(err) } tar2, err := tarFromFiles(files2...) if err != nil { t.Fatal(err) } layer1, err := ls.Register(bytes.NewReader(tar1), "") if err != nil { t.Fatal(err) } layer2, err := ls.Register(bytes.NewReader(tar2), "") if err != nil { t.Fatal(err) } id1 := digest.Digest(layer1.ChainID()) id2 := digest.Digest(layer2.ChainID()) // Replace tar data files src, err := os.Open(filepath.Join(tmpdir, id1.Algorithm().String(), id1.Hex(), "tar-split.json.gz")) if err != nil { t.Fatal(err) } dst, err := os.Create(filepath.Join(tmpdir, id2.Algorithm().String(), id2.Hex(), "tar-split.json.gz")) if err != nil { t.Fatal(err) } if _, err := io.Copy(dst, src); err != nil { t.Fatal(err) } src.Close() dst.Close() ts, err := layer2.TarStream() if err != nil { t.Fatal(err) } _, err = io.Copy(ioutil.Discard, ts) if err == nil { t.Fatal("expected data verification to fail") } if !strings.Contains(err.Error(), "could not verify layer data") { t.Fatalf("wrong error returned from tarstream: %q", err) } } docker-1.10.3/layer/layer_unix.go000066400000000000000000000002711267010174400166670ustar00rootroot00000000000000// +build linux freebsd darwin package layer import "github.com/docker/docker/pkg/stringid" func (ls *layerStore) mountID(name string) string { return stringid.GenerateRandomID() } docker-1.10.3/layer/layer_windows.go000066400000000000000000000041671267010174400174060ustar00rootroot00000000000000package layer import ( "errors" "fmt" "github.com/Sirupsen/logrus" "github.com/docker/distribution/digest" "github.com/docker/docker/daemon/graphdriver" ) // GetLayerPath returns the path to a layer func GetLayerPath(s Store, layer ChainID) (string, error) { ls, ok := s.(*layerStore) if !ok { return "", errors.New("unsupported layer store") } ls.layerL.Lock() defer ls.layerL.Unlock() rl, ok := ls.layerMap[layer] if !ok { return "", ErrLayerDoesNotExist } path, err := ls.driver.Get(rl.cacheID, "") if err != nil { return "", err } if err := ls.driver.Put(rl.cacheID); err != nil { return "", err } return path, nil } func (ls *layerStore) RegisterDiffID(graphID string, size int64) (Layer, error) { var err error // this is used for cleanup in existingLayer case diffID := digest.FromBytes([]byte(graphID)) // Create new roLayer layer := &roLayer{ cacheID: graphID, diffID: DiffID(diffID), referenceCount: 1, layerStore: ls, references: map[Layer]struct{}{}, size: size, } tx, err := ls.store.StartTransaction() if err != nil { return nil, err } defer func() { if err != nil { if err := tx.Cancel(); err != nil { logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) } } }() layer.chainID = createChainIDFromParent("", layer.diffID) if !ls.driver.Exists(layer.cacheID) { return nil, fmt.Errorf("layer %q is unknown to driver", layer.cacheID) } if err = storeLayer(tx, layer); err != nil { return nil, err } ls.layerL.Lock() defer ls.layerL.Unlock() if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { // Set error for cleanup, but do not return err = errors.New("layer already exists") return existingLayer.getReference(), nil } if err = tx.Commit(layer.chainID); err != nil { return nil, err } ls.layerMap[layer.chainID] = layer return layer.getReference(), nil } func (ls *layerStore) mountID(name string) string { // windows has issues if container ID doesn't match mount ID return name } func (ls *layerStore) GraphDriver() graphdriver.Driver { return ls.driver } docker-1.10.3/layer/migration.go000066400000000000000000000126311267010174400165040ustar00rootroot00000000000000package layer import ( "compress/gzip" "errors" "fmt" "io" "os" "github.com/Sirupsen/logrus" "github.com/docker/distribution/digest" "github.com/vbatts/tar-split/tar/asm" "github.com/vbatts/tar-split/tar/storage" ) // CreateRWLayerByGraphID creates a RWLayer in the layer store using // the provided name with the given graphID. To get the RWLayer // after migration the layer may be retrieved by the given name. func (ls *layerStore) CreateRWLayerByGraphID(name string, graphID string, parent ChainID) (err error) { ls.mountL.Lock() defer ls.mountL.Unlock() m, ok := ls.mounts[name] if ok { if m.parent.chainID != parent { return errors.New("name conflict, mismatched parent") } if m.mountID != graphID { return errors.New("mount already exists") } return nil } if !ls.driver.Exists(graphID) { return fmt.Errorf("graph ID does not exist: %q", graphID) } var p *roLayer if string(parent) != "" { p = ls.get(parent) if p == nil { return ErrLayerDoesNotExist } // Release parent chain if error defer func() { if err != nil { ls.layerL.Lock() ls.releaseLayer(p) ls.layerL.Unlock() } }() } // TODO: Ensure graphID has correct parent m = &mountedLayer{ name: name, parent: p, mountID: graphID, layerStore: ls, references: map[RWLayer]*referencedRWLayer{}, } // Check for existing init layer initID := fmt.Sprintf("%s-init", graphID) if ls.driver.Exists(initID) { m.initID = initID } if err = ls.saveMount(m); err != nil { return err } return nil } func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID DiffID, size int64, err error) { defer func() { if err != nil { logrus.Debugf("could not get checksum for %q with tar-split: %q", id, err) diffID, size, err = ls.checksumForGraphIDNoTarsplit(id, parent, newTarDataPath) } }() if oldTarDataPath == "" { err = errors.New("no tar-split file") return } tarDataFile, err := os.Open(oldTarDataPath) if err != nil { return } defer tarDataFile.Close() uncompressed, err := gzip.NewReader(tarDataFile) if err != nil { return } dgst := digest.Canonical.New() err = ls.assembleTarTo(id, uncompressed, &size, dgst.Hash()) if err != nil { return } diffID = DiffID(dgst.Digest()) err = os.RemoveAll(newTarDataPath) if err != nil { return } err = os.Link(oldTarDataPath, newTarDataPath) return } func (ls *layerStore) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) { rawarchive, err := ls.driver.Diff(id, parent) if err != nil { return } defer rawarchive.Close() f, err := os.Create(newTarDataPath) if err != nil { return } defer f.Close() mfz := gzip.NewWriter(f) defer mfz.Close() metaPacker := storage.NewJSONPacker(mfz) packerCounter := &packSizeCounter{metaPacker, &size} archive, err := asm.NewInputTarStream(rawarchive, packerCounter, nil) if err != nil { return } dgst, err := digest.FromReader(archive) if err != nil { return } diffID = DiffID(dgst) return } func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, diffID DiffID, tarDataFile string, size int64) (Layer, error) { // err is used to hold the error which will always trigger // cleanup of creates sources but may not be an error returned // to the caller (already exists). var err error var p *roLayer if string(parent) != "" { p = ls.get(parent) if p == nil { return nil, ErrLayerDoesNotExist } // Release parent chain if error defer func() { if err != nil { ls.layerL.Lock() ls.releaseLayer(p) ls.layerL.Unlock() } }() } // Create new roLayer layer := &roLayer{ parent: p, cacheID: graphID, referenceCount: 1, layerStore: ls, references: map[Layer]struct{}{}, diffID: diffID, size: size, chainID: createChainIDFromParent(parent, diffID), } ls.layerL.Lock() defer ls.layerL.Unlock() if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { // Set error for cleanup, but do not return err = errors.New("layer already exists") return existingLayer.getReference(), nil } tx, err := ls.store.StartTransaction() if err != nil { return nil, err } defer func() { if err != nil { logrus.Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err) if err := tx.Cancel(); err != nil { logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) } } }() tsw, err := tx.TarSplitWriter(false) if err != nil { return nil, err } defer tsw.Close() tdf, err := os.Open(tarDataFile) if err != nil { return nil, err } defer tdf.Close() _, err = io.Copy(tsw, tdf) if err != nil { return nil, err } if err = storeLayer(tx, layer); err != nil { return nil, err } if err = tx.Commit(layer.chainID); err != nil { return nil, err } ls.layerMap[layer.chainID] = layer return layer.getReference(), nil } type unpackSizeCounter struct { unpacker storage.Unpacker size *int64 } func (u *unpackSizeCounter) Next() (*storage.Entry, error) { e, err := u.unpacker.Next() if err == nil && u.size != nil { *u.size += e.Size } return e, err } type packSizeCounter struct { packer storage.Packer size *int64 } func (p *packSizeCounter) AddEntry(e storage.Entry) (int, error) { n, err := p.packer.AddEntry(e) if err == nil && p.size != nil { *p.size += e.Size } return n, err } docker-1.10.3/layer/migration_test.go000066400000000000000000000244561267010174400175530ustar00rootroot00000000000000package layer import ( "bytes" "compress/gzip" "fmt" "io" "io/ioutil" "os" "path/filepath" "testing" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/stringid" "github.com/vbatts/tar-split/tar/asm" "github.com/vbatts/tar-split/tar/storage" ) func writeTarSplitFile(name string, tarContent []byte) error { f, err := os.OpenFile(name, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) if err != nil { return err } defer f.Close() fz := gzip.NewWriter(f) metaPacker := storage.NewJSONPacker(fz) defer fz.Close() rdr, err := asm.NewInputTarStream(bytes.NewReader(tarContent), metaPacker, nil) if err != nil { return err } if _, err := io.Copy(ioutil.Discard, rdr); err != nil { return err } return nil } func TestLayerMigration(t *testing.T) { td, err := ioutil.TempDir("", "migration-test-") if err != nil { t.Fatal(err) } defer os.RemoveAll(td) layer1Files := []FileApplier{ newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), newTestFile("/etc/profile", []byte("# Base configuration"), 0644), } layer2Files := []FileApplier{ newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), } tar1, err := tarFromFiles(layer1Files...) if err != nil { t.Fatal(err) } tar2, err := tarFromFiles(layer2Files...) if err != nil { t.Fatal(err) } graph, err := newVFSGraphDriver(filepath.Join(td, "graphdriver-")) if err != nil { t.Fatal(err) } graphID1 := stringid.GenerateRandomID() if err := graph.Create(graphID1, "", ""); err != nil { t.Fatal(err) } if _, err := graph.ApplyDiff(graphID1, "", archive.Reader(bytes.NewReader(tar1))); err != nil { t.Fatal(err) } tf1 := filepath.Join(td, "tar1.json.gz") if err := writeTarSplitFile(tf1, tar1); err != nil { t.Fatal(err) } fms, err := NewFSMetadataStore(filepath.Join(td, "layers")) if err != nil { t.Fatal(err) } ls, err := NewStoreFromGraphDriver(fms, graph) if err != nil { t.Fatal(err) } newTarDataPath := filepath.Join(td, ".migration-tardata") diffID, size, err := ls.(*layerStore).ChecksumForGraphID(graphID1, "", tf1, newTarDataPath) if err != nil { t.Fatal(err) } layer1a, err := ls.(*layerStore).RegisterByGraphID(graphID1, "", diffID, newTarDataPath, size) if err != nil { t.Fatal(err) } layer1b, err := ls.Register(bytes.NewReader(tar1), "") if err != nil { t.Fatal(err) } assertReferences(t, layer1a, layer1b) // Attempt register, should be same layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID()) if err != nil { t.Fatal(err) } graphID2 := stringid.GenerateRandomID() if err := graph.Create(graphID2, graphID1, ""); err != nil { t.Fatal(err) } if _, err := graph.ApplyDiff(graphID2, graphID1, archive.Reader(bytes.NewReader(tar2))); err != nil { t.Fatal(err) } tf2 := filepath.Join(td, "tar2.json.gz") if err := writeTarSplitFile(tf2, tar2); err != nil { t.Fatal(err) } diffID, size, err = ls.(*layerStore).ChecksumForGraphID(graphID2, graphID1, tf2, newTarDataPath) if err != nil { t.Fatal(err) } layer2b, err := ls.(*layerStore).RegisterByGraphID(graphID2, layer1a.ChainID(), diffID, tf2, size) if err != nil { t.Fatal(err) } assertReferences(t, layer2a, layer2b) if metadata, err := ls.Release(layer2a); err != nil { t.Fatal(err) } else if len(metadata) > 0 { t.Fatalf("Unexpected layer removal after first release: %#v", metadata) } metadata, err := ls.Release(layer2b) if err != nil { t.Fatal(err) } assertMetadata(t, metadata, createMetadata(layer2a)) } func tarFromFilesInGraph(graph graphdriver.Driver, graphID, parentID string, files ...FileApplier) ([]byte, error) { t, err := tarFromFiles(files...) if err != nil { return nil, err } if err := graph.Create(graphID, parentID, ""); err != nil { return nil, err } if _, err := graph.ApplyDiff(graphID, parentID, archive.Reader(bytes.NewReader(t))); err != nil { return nil, err } ar, err := graph.Diff(graphID, parentID) if err != nil { return nil, err } defer ar.Close() return ioutil.ReadAll(ar) } func TestLayerMigrationNoTarsplit(t *testing.T) { td, err := ioutil.TempDir("", "migration-test-") if err != nil { t.Fatal(err) } defer os.RemoveAll(td) layer1Files := []FileApplier{ newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), newTestFile("/etc/profile", []byte("# Base configuration"), 0644), } layer2Files := []FileApplier{ newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), } graph, err := newVFSGraphDriver(filepath.Join(td, "graphdriver-")) if err != nil { t.Fatal(err) } graphID1 := stringid.GenerateRandomID() graphID2 := stringid.GenerateRandomID() tar1, err := tarFromFilesInGraph(graph, graphID1, "", layer1Files...) if err != nil { t.Fatal(err) } tar2, err := tarFromFilesInGraph(graph, graphID2, graphID1, layer2Files...) if err != nil { t.Fatal(err) } fms, err := NewFSMetadataStore(filepath.Join(td, "layers")) if err != nil { t.Fatal(err) } ls, err := NewStoreFromGraphDriver(fms, graph) if err != nil { t.Fatal(err) } newTarDataPath := filepath.Join(td, ".migration-tardata") diffID, size, err := ls.(*layerStore).ChecksumForGraphID(graphID1, "", "", newTarDataPath) if err != nil { t.Fatal(err) } layer1a, err := ls.(*layerStore).RegisterByGraphID(graphID1, "", diffID, newTarDataPath, size) if err != nil { t.Fatal(err) } layer1b, err := ls.Register(bytes.NewReader(tar1), "") if err != nil { t.Fatal(err) } assertReferences(t, layer1a, layer1b) // Attempt register, should be same layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID()) if err != nil { t.Fatal(err) } diffID, size, err = ls.(*layerStore).ChecksumForGraphID(graphID2, graphID1, "", newTarDataPath) if err != nil { t.Fatal(err) } layer2b, err := ls.(*layerStore).RegisterByGraphID(graphID2, layer1a.ChainID(), diffID, newTarDataPath, size) if err != nil { t.Fatal(err) } assertReferences(t, layer2a, layer2b) if metadata, err := ls.Release(layer2a); err != nil { t.Fatal(err) } else if len(metadata) > 0 { t.Fatalf("Unexpected layer removal after first release: %#v", metadata) } metadata, err := ls.Release(layer2b) if err != nil { t.Fatal(err) } assertMetadata(t, metadata, createMetadata(layer2a)) } func TestMountMigration(t *testing.T) { ls, _, cleanup := newTestStore(t) defer cleanup() baseFiles := []FileApplier{ newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), newTestFile("/etc/profile", []byte("# Base configuration"), 0644), } initFiles := []FileApplier{ newTestFile("/etc/hosts", []byte{}, 0644), newTestFile("/etc/resolv.conf", []byte{}, 0644), } mountFiles := []FileApplier{ newTestFile("/etc/hosts", []byte("localhost 127.0.0.1"), 0644), newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), newTestFile("/root/testfile1.txt", []byte("nothing valuable"), 0644), } initTar, err := tarFromFiles(initFiles...) if err != nil { t.Fatal(err) } mountTar, err := tarFromFiles(mountFiles...) if err != nil { t.Fatal(err) } graph := ls.(*layerStore).driver layer1, err := createLayer(ls, "", initWithFiles(baseFiles...)) if err != nil { t.Fatal(err) } graphID1 := layer1.(*referencedCacheLayer).cacheID containerID := stringid.GenerateRandomID() containerInit := fmt.Sprintf("%s-init", containerID) if err := graph.Create(containerInit, graphID1, ""); err != nil { t.Fatal(err) } if _, err := graph.ApplyDiff(containerInit, graphID1, archive.Reader(bytes.NewReader(initTar))); err != nil { t.Fatal(err) } if err := graph.Create(containerID, containerInit, ""); err != nil { t.Fatal(err) } if _, err := graph.ApplyDiff(containerID, containerInit, archive.Reader(bytes.NewReader(mountTar))); err != nil { t.Fatal(err) } if err := ls.(*layerStore).CreateRWLayerByGraphID("migration-mount", containerID, layer1.ChainID()); err != nil { t.Fatal(err) } rwLayer1, err := ls.GetRWLayer("migration-mount") if err != nil { t.Fatal(err) } if _, err := rwLayer1.Mount(""); err != nil { t.Fatal(err) } changes, err := rwLayer1.Changes() if err != nil { t.Fatal(err) } if expected := 5; len(changes) != expected { t.Logf("Changes %#v", changes) t.Fatalf("Wrong number of changes %d, expected %d", len(changes), expected) } sortChanges(changes) assertChange(t, changes[0], archive.Change{ Path: "/etc", Kind: archive.ChangeModify, }) assertChange(t, changes[1], archive.Change{ Path: "/etc/hosts", Kind: archive.ChangeModify, }) assertChange(t, changes[2], archive.Change{ Path: "/root", Kind: archive.ChangeModify, }) assertChange(t, changes[3], archive.Change{ Path: "/root/.bashrc", Kind: archive.ChangeModify, }) assertChange(t, changes[4], archive.Change{ Path: "/root/testfile1.txt", Kind: archive.ChangeAdd, }) assertActivityCount(t, rwLayer1, 1) if _, err := ls.CreateRWLayer("migration-mount", layer1.ChainID(), "", nil); err == nil { t.Fatal("Expected error creating mount with same name") } else if err != ErrMountNameConflict { t.Fatal(err) } rwLayer2, err := ls.GetRWLayer("migration-mount") if err != nil { t.Fatal(err) } if getMountLayer(rwLayer1) != getMountLayer(rwLayer2) { t.Fatal("Expected same layer from get with same name as from migrate") } if _, err := rwLayer2.Mount(""); err != nil { t.Fatal(err) } assertActivityCount(t, rwLayer2, 1) assertActivityCount(t, rwLayer1, 1) if _, err := rwLayer2.Mount(""); err != nil { t.Fatal(err) } assertActivityCount(t, rwLayer2, 2) assertActivityCount(t, rwLayer1, 1) if metadata, err := ls.Release(layer1); err != nil { t.Fatal(err) } else if len(metadata) > 0 { t.Fatalf("Expected no layers to be deleted, deleted %#v", metadata) } if err := rwLayer1.Unmount(); err != nil { t.Fatal(err) } assertActivityCount(t, rwLayer2, 2) assertActivityCount(t, rwLayer1, 0) if _, err := ls.ReleaseRWLayer(rwLayer1); err != nil { t.Fatal(err) } if err := rwLayer2.Unmount(); err != nil { t.Fatal(err) } if _, err := ls.ReleaseRWLayer(rwLayer2); err == nil { t.Fatal("Expected error deleting active mount") } if err := rwLayer2.Unmount(); err != nil { t.Fatal(err) } metadata, err := ls.ReleaseRWLayer(rwLayer2) if err != nil { t.Fatal(err) } if len(metadata) == 0 { t.Fatal("Expected base layer to be deleted when deleting mount") } assertMetadata(t, metadata, createMetadata(layer1)) } docker-1.10.3/layer/mount_test.go000066400000000000000000000113001267010174400167040ustar00rootroot00000000000000package layer import ( "io/ioutil" "os" "path/filepath" "sort" "testing" "github.com/docker/docker/pkg/archive" ) func TestMountInit(t *testing.T) { ls, _, cleanup := newTestStore(t) defer cleanup() basefile := newTestFile("testfile.txt", []byte("base data!"), 0644) initfile := newTestFile("testfile.txt", []byte("init data!"), 0777) li := initWithFiles(basefile) layer, err := createLayer(ls, "", li) if err != nil { t.Fatal(err) } mountInit := func(root string) error { return initfile.ApplyFile(root) } m, err := ls.CreateRWLayer("fun-mount", layer.ChainID(), "", mountInit) if err != nil { t.Fatal(err) } path, err := m.Mount("") if err != nil { t.Fatal(err) } f, err := os.Open(filepath.Join(path, "testfile.txt")) if err != nil { t.Fatal(err) } defer f.Close() fi, err := f.Stat() if err != nil { t.Fatal(err) } b, err := ioutil.ReadAll(f) if err != nil { t.Fatal(err) } if expected := "init data!"; string(b) != expected { t.Fatalf("Unexpected test file contents %q, expected %q", string(b), expected) } if fi.Mode().Perm() != 0777 { t.Fatalf("Unexpected filemode %o, expecting %o", fi.Mode().Perm(), 0777) } } func TestMountSize(t *testing.T) { ls, _, cleanup := newTestStore(t) defer cleanup() content1 := []byte("Base contents") content2 := []byte("Mutable contents") contentInit := []byte("why am I excluded from the size ☹") li := initWithFiles(newTestFile("file1", content1, 0644)) layer, err := createLayer(ls, "", li) if err != nil { t.Fatal(err) } mountInit := func(root string) error { return newTestFile("file-init", contentInit, 0777).ApplyFile(root) } m, err := ls.CreateRWLayer("mount-size", layer.ChainID(), "", mountInit) if err != nil { t.Fatal(err) } path, err := m.Mount("") if err != nil { t.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(path, "file2"), content2, 0755); err != nil { t.Fatal(err) } mountSize, err := m.Size() if err != nil { t.Fatal(err) } if expected := len(content2); int(mountSize) != expected { t.Fatalf("Unexpected mount size %d, expected %d", int(mountSize), expected) } } func TestMountChanges(t *testing.T) { ls, _, cleanup := newTestStore(t) defer cleanup() basefiles := []FileApplier{ newTestFile("testfile1.txt", []byte("base data!"), 0644), newTestFile("testfile2.txt", []byte("base data!"), 0644), newTestFile("testfile3.txt", []byte("base data!"), 0644), } initfile := newTestFile("testfile1.txt", []byte("init data!"), 0777) li := initWithFiles(basefiles...) layer, err := createLayer(ls, "", li) if err != nil { t.Fatal(err) } mountInit := func(root string) error { return initfile.ApplyFile(root) } m, err := ls.CreateRWLayer("mount-changes", layer.ChainID(), "", mountInit) if err != nil { t.Fatal(err) } path, err := m.Mount("") if err != nil { t.Fatal(err) } if err := os.Chmod(filepath.Join(path, "testfile1.txt"), 0755); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(path, "testfile1.txt"), []byte("mount data!"), 0755); err != nil { t.Fatal(err) } if err := os.Remove(filepath.Join(path, "testfile2.txt")); err != nil { t.Fatal(err) } if err := os.Chmod(filepath.Join(path, "testfile3.txt"), 0755); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(path, "testfile4.txt"), []byte("mount data!"), 0644); err != nil { t.Fatal(err) } changes, err := m.Changes() if err != nil { t.Fatal(err) } if expected := 4; len(changes) != expected { t.Fatalf("Wrong number of changes %d, expected %d", len(changes), expected) } sortChanges(changes) assertChange(t, changes[0], archive.Change{ Path: "/testfile1.txt", Kind: archive.ChangeModify, }) assertChange(t, changes[1], archive.Change{ Path: "/testfile2.txt", Kind: archive.ChangeDelete, }) assertChange(t, changes[2], archive.Change{ Path: "/testfile3.txt", Kind: archive.ChangeModify, }) assertChange(t, changes[3], archive.Change{ Path: "/testfile4.txt", Kind: archive.ChangeAdd, }) } func assertChange(t *testing.T, actual, expected archive.Change) { if actual.Path != expected.Path { t.Fatalf("Unexpected change path %s, expected %s", actual.Path, expected.Path) } if actual.Kind != expected.Kind { t.Fatalf("Unexpected change type %s, expected %s", actual.Kind, expected.Kind) } } func sortChanges(changes []archive.Change) { cs := &changeSorter{ changes: changes, } sort.Sort(cs) } type changeSorter struct { changes []archive.Change } func (cs *changeSorter) Len() int { return len(cs.changes) } func (cs *changeSorter) Swap(i, j int) { cs.changes[i], cs.changes[j] = cs.changes[j], cs.changes[i] } func (cs *changeSorter) Less(i, j int) bool { return cs.changes[i].Path < cs.changes[j].Path } docker-1.10.3/layer/mounted_layer.go000066400000000000000000000054571267010174400173720ustar00rootroot00000000000000package layer import ( "io" "sync" "github.com/docker/docker/pkg/archive" ) type mountedLayer struct { name string mountID string initID string parent *roLayer layerStore *layerStore references map[RWLayer]*referencedRWLayer } func (ml *mountedLayer) cacheParent() string { if ml.initID != "" { return ml.initID } if ml.parent != nil { return ml.parent.cacheID } return "" } func (ml *mountedLayer) TarStream() (io.ReadCloser, error) { archiver, err := ml.layerStore.driver.Diff(ml.mountID, ml.cacheParent()) if err != nil { return nil, err } return archiver, nil } func (ml *mountedLayer) Name() string { return ml.name } func (ml *mountedLayer) Parent() Layer { if ml.parent != nil { return ml.parent } // Return a nil interface instead of an interface wrapping a nil // pointer. return nil } func (ml *mountedLayer) Mount(mountLabel string) (string, error) { return ml.layerStore.driver.Get(ml.mountID, mountLabel) } func (ml *mountedLayer) Unmount() error { return ml.layerStore.driver.Put(ml.mountID) } func (ml *mountedLayer) Size() (int64, error) { return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent()) } func (ml *mountedLayer) Changes() ([]archive.Change, error) { return ml.layerStore.driver.Changes(ml.mountID, ml.cacheParent()) } func (ml *mountedLayer) Metadata() (map[string]string, error) { return ml.layerStore.driver.GetMetadata(ml.mountID) } func (ml *mountedLayer) getReference() RWLayer { ref := &referencedRWLayer{ mountedLayer: ml, } ml.references[ref] = ref return ref } func (ml *mountedLayer) hasReferences() bool { return len(ml.references) > 0 } func (ml *mountedLayer) deleteReference(ref RWLayer) error { rl, ok := ml.references[ref] if !ok { return ErrLayerNotRetained } if err := rl.release(); err != nil { return err } delete(ml.references, ref) return nil } func (ml *mountedLayer) retakeReference(r RWLayer) { if ref, ok := r.(*referencedRWLayer); ok { ref.activityCount = 0 ml.references[ref] = ref } } type referencedRWLayer struct { *mountedLayer activityL sync.Mutex activityCount int } func (rl *referencedRWLayer) release() error { rl.activityL.Lock() defer rl.activityL.Unlock() if rl.activityCount > 0 { return ErrActiveMount } rl.activityCount = -1 return nil } func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) { rl.activityL.Lock() defer rl.activityL.Unlock() if rl.activityCount == -1 { return "", ErrLayerNotRetained } rl.activityCount++ return rl.mountedLayer.Mount(mountLabel) } func (rl *referencedRWLayer) Unmount() error { rl.activityL.Lock() defer rl.activityL.Unlock() if rl.activityCount == 0 { return ErrNotMounted } if rl.activityCount == -1 { return ErrLayerNotRetained } rl.activityCount-- return rl.mountedLayer.Unmount() } docker-1.10.3/layer/ro_layer.go000066400000000000000000000061131267010174400163250ustar00rootroot00000000000000package layer import ( "fmt" "io" "github.com/docker/distribution/digest" ) type roLayer struct { chainID ChainID diffID DiffID parent *roLayer cacheID string size int64 layerStore *layerStore referenceCount int references map[Layer]struct{} } func (rl *roLayer) TarStream() (io.ReadCloser, error) { r, err := rl.layerStore.store.TarSplitReader(rl.chainID) if err != nil { return nil, err } pr, pw := io.Pipe() go func() { err := rl.layerStore.assembleTarTo(rl.cacheID, r, nil, pw) if err != nil { pw.CloseWithError(err) } else { pw.Close() } }() rc, err := newVerifiedReadCloser(pr, digest.Digest(rl.diffID)) if err != nil { return nil, err } return rc, nil } func (rl *roLayer) ChainID() ChainID { return rl.chainID } func (rl *roLayer) DiffID() DiffID { return rl.diffID } func (rl *roLayer) Parent() Layer { if rl.parent == nil { return nil } return rl.parent } func (rl *roLayer) Size() (size int64, err error) { if rl.parent != nil { size, err = rl.parent.Size() if err != nil { return } } return size + rl.size, nil } func (rl *roLayer) DiffSize() (size int64, err error) { return rl.size, nil } func (rl *roLayer) Metadata() (map[string]string, error) { return rl.layerStore.driver.GetMetadata(rl.cacheID) } type referencedCacheLayer struct { *roLayer } func (rl *roLayer) getReference() Layer { ref := &referencedCacheLayer{ roLayer: rl, } rl.references[ref] = struct{}{} return ref } func (rl *roLayer) hasReference(ref Layer) bool { _, ok := rl.references[ref] return ok } func (rl *roLayer) hasReferences() bool { return len(rl.references) > 0 } func (rl *roLayer) deleteReference(ref Layer) { delete(rl.references, ref) } func (rl *roLayer) depth() int { if rl.parent == nil { return 1 } return rl.parent.depth() + 1 } func storeLayer(tx MetadataTransaction, layer *roLayer) error { if err := tx.SetDiffID(layer.diffID); err != nil { return err } if err := tx.SetSize(layer.size); err != nil { return err } if err := tx.SetCacheID(layer.cacheID); err != nil { return err } if layer.parent != nil { if err := tx.SetParent(layer.parent.chainID); err != nil { return err } } return nil } func newVerifiedReadCloser(rc io.ReadCloser, dgst digest.Digest) (io.ReadCloser, error) { verifier, err := digest.NewDigestVerifier(dgst) if err != nil { return nil, err } return &verifiedReadCloser{ rc: rc, dgst: dgst, verifier: verifier, }, nil } type verifiedReadCloser struct { rc io.ReadCloser dgst digest.Digest verifier digest.Verifier } func (vrc *verifiedReadCloser) Read(p []byte) (n int, err error) { n, err = vrc.rc.Read(p) if n > 0 { if n, err := vrc.verifier.Write(p[:n]); err != nil { return n, err } } if err == io.EOF { if !vrc.verifier.Verified() { err = fmt.Errorf("could not verify layer data for: %s. This may be because internal files in the layer store were modified. Re-pulling or rebuilding this image may resolve the issue", vrc.dgst) } } return } func (vrc *verifiedReadCloser) Close() error { return vrc.rc.Close() } docker-1.10.3/man/000077500000000000000000000000001267010174400136205ustar00rootroot00000000000000docker-1.10.3/man/Dockerfile000066400000000000000000000004761267010174400156210ustar00rootroot00000000000000FROM golang:1.4 RUN mkdir -p /go/src/github.com/cpuguy83 RUN mkdir -p /go/src/github.com/cpuguy83 \ && git clone -b v1.0.3 https://github.com/cpuguy83/go-md2man.git /go/src/github.com/cpuguy83/go-md2man \ && cd /go/src/github.com/cpuguy83/go-md2man \ && go get -v ./... CMD ["/go/bin/go-md2man", "--help"] docker-1.10.3/man/Dockerfile.5.md000066400000000000000000000417161267010174400163650ustar00rootroot00000000000000% DOCKERFILE(5) Docker User Manuals % Zac Dover % May 2014 # NAME Dockerfile - automate the steps of creating a Docker image # INTRODUCTION The **Dockerfile** is a configuration file that automates the steps of creating a Docker image. It is similar to a Makefile. Docker reads instructions from the **Dockerfile** to automate the steps otherwise performed manually to create an image. To build an image, create a file called **Dockerfile**. The **Dockerfile** describes the steps taken to assemble the image. When the **Dockerfile** has been created, call the `docker build` command, using the path of directory that contains **Dockerfile** as the argument. # SYNOPSIS INSTRUCTION arguments For example: FROM image # DESCRIPTION A Dockerfile is a file that automates the steps of creating a Docker image. A Dockerfile is similar to a Makefile. # USAGE docker build . -- Runs the steps and commits them, building a final image. The path to the source repository defines where to find the context of the build. The build is run by the Docker daemon, not the CLI. The whole context must be transferred to the daemon. The Docker CLI reports `"Sending build context to Docker daemon"` when the context is sent to the daemon. ``` docker build -t repository/tag . ``` -- specifies a repository and tag at which to save the new image if the build succeeds. The Docker daemon runs the steps one-by-one, committing the result to a new image if necessary, before finally outputting the ID of the new image. The Docker daemon automatically cleans up the context it is given. Docker re-uses intermediate images whenever possible. This significantly accelerates the *docker build* process. # FORMAT `FROM image` `FROM image:tag` `FROM image@digest` -- The **FROM** instruction sets the base image for subsequent instructions. A valid Dockerfile must have **FROM** as its first instruction. The image can be any valid image. It is easy to start by pulling an image from the public repositories. -- **FROM** must be the first non-comment instruction in Dockerfile. -- **FROM** may appear multiple times within a single Dockerfile in order to create multiple images. Make a note of the last image ID output by the commit before each new **FROM** command. -- If no tag is given to the **FROM** instruction, Docker applies the `latest` tag. If the used tag does not exist, an error is returned. -- If no digest is given to the **FROM** instruction, Docker applies the `latest` tag. If the used tag does not exist, an error is returned. **MAINTAINER** -- **MAINTAINER** sets the Author field for the generated images. Useful for providing users with an email or url for support. **RUN** -- **RUN** has two forms: ``` # the command is run in a shell - /bin/sh -c RUN # Executable form RUN ["executable", "param1", "param2"] ``` -- The **RUN** instruction executes any commands in a new layer on top of the current image and commits the results. The committed image is used for the next step in Dockerfile. -- Layering **RUN** instructions and generating commits conforms to the core concepts of Docker where commits are cheap and containers can be created from any point in the history of an image. This is similar to source control. The exec form makes it possible to avoid shell string munging. The exec form makes it possible to **RUN** commands using a base image that does not contain `/bin/sh`. Note that the exec form is parsed as a JSON array, which means that you must use double-quotes (") around words not single-quotes ('). **CMD** -- **CMD** has three forms: ``` # Executable form CMD ["executable", "param1", "param2"]` # Provide default arguments to ENTRYPOINT CMD ["param1", "param2"]` # the command is run in a shell - /bin/sh -c CMD command param1 param2 ``` -- There should be only one **CMD** in a Dockerfile. If more than one **CMD** is listed, only the last **CMD** takes effect. The main purpose of a **CMD** is to provide defaults for an executing container. These defaults may include an executable, or they can omit the executable. If they omit the executable, an **ENTRYPOINT** must be specified. When used in the shell or exec formats, the **CMD** instruction sets the command to be executed when running the image. If you use the shell form of the **CMD**, the `` executes in `/bin/sh -c`: Note that the exec form is parsed as a JSON array, which means that you must use double-quotes (") around words not single-quotes ('). ``` FROM ubuntu CMD echo "This is a test." | wc - ``` -- If you run **command** without a shell, then you must express the command as a JSON array and give the full path to the executable. This array form is the preferred form of **CMD**. All additional parameters must be individually expressed as strings in the array: ``` FROM ubuntu CMD ["/usr/bin/wc","--help"] ``` -- To make the container run the same executable every time, use **ENTRYPOINT** in combination with **CMD**. If the user specifies arguments to `docker run`, the specified commands override the default in **CMD**. Do not confuse **RUN** with **CMD**. **RUN** runs a command and commits the result. **CMD** executes nothing at build time, but specifies the intended command for the image. **LABEL** -- `LABEL = [= ...]`or ``` LABEL [ ] LABEL [ ] ... ``` The **LABEL** instruction adds metadata to an image. A **LABEL** is a key-value pair. To specify a **LABEL** without a value, simply use an empty string. To include spaces within a **LABEL** value, use quotes and backslashes as you would in command-line parsing. ``` LABEL com.example.vendor="ACME Incorporated" LABEL com.example.vendor "ACME Incorporated" LABEL com.example.vendor.is-beta "" LABEL com.example.vendor.is-beta= LABEL com.example.vendor.is-beta="" ``` An image can have more than one label. To specify multiple labels, separate each key-value pair by a space. Labels are additive including `LABEL`s in `FROM` images. As the system encounters and then applies a new label, new `key`s override any previous labels with identical keys. To display an image's labels, use the `docker inspect` command. **EXPOSE** -- `EXPOSE [...]` The **EXPOSE** instruction informs Docker that the container listens on the specified network ports at runtime. Docker uses this information to interconnect containers using links and to set up port redirection on the host system. **ENV** -- `ENV ` The **ENV** instruction sets the environment variable to the value ``. This value is passed to all future **RUN**, **ENTRYPOINT**, and **CMD** instructions. This is functionally equivalent to prefixing the command with `=`. The environment variables that are set with **ENV** persist when a container is run from the resulting image. Use `docker inspect` to inspect these values, and change them using `docker run --env =`. Note that setting "`ENV DEBIAN_FRONTEND noninteractive`" may cause unintended consequences, because it will persist when the container is run interactively, as with the following command: `docker run -t -i image bash` **ADD** -- **ADD** has two forms: ``` ADD # Required for paths with whitespace ADD ["",... ""] ``` The **ADD** instruction copies new files, directories or remote file URLs to the filesystem of the container at path ``. Multiple `` resources may be specified but if they are files or directories then they must be relative to the source directory that is being built (the context of the build). The `` is the absolute path, or path relative to **WORKDIR**, into which the source is copied inside the target container. If the `` argument is a local file in a recognized compression format (tar, gzip, bzip2, etc) then it is unpacked at the specified `` in the container's filesystem. Note that only local compressed files will be unpacked, i.e., the URL download and archive unpacking features cannot be used together. All new directories are created with mode 0755 and with the uid and gid of **0**. **COPY** -- **COPY** has two forms: ``` COPY # Required for paths with whitespace COPY ["",... ""] ``` The **COPY** instruction copies new files from `` and adds them to the filesystem of the container at path . The `` must be the path to a file or directory relative to the source directory that is being built (the context of the build) or a remote file URL. The `` is an absolute path, or a path relative to **WORKDIR**, into which the source will be copied inside the target container. If you **COPY** an archive file it will land in the container exactly as it appears in the build context without any attempt to unpack it. All new files and directories are created with mode **0755** and with the uid and gid of **0**. **ENTRYPOINT** -- **ENTRYPOINT** has two forms: ``` # executable form ENTRYPOINT ["executable", "param1", "param2"]` # run command in a shell - /bin/sh -c ENTRYPOINT command param1 param2 ``` -- An **ENTRYPOINT** helps you configure a container that can be run as an executable. When you specify an **ENTRYPOINT**, the whole container runs as if it was only that executable. The **ENTRYPOINT** instruction adds an entry command that is not overwritten when arguments are passed to docker run. This is different from the behavior of **CMD**. This allows arguments to be passed to the entrypoint, for instance `docker run -d` passes the -d argument to the **ENTRYPOINT**. Specify parameters either in the **ENTRYPOINT** JSON array (as in the preferred exec form above), or by using a **CMD** statement. Parameters in the **ENTRYPOINT** are not overwritten by the docker run arguments. Parameters specified via **CMD** are overwritten by docker run arguments. Specify a plain string for the **ENTRYPOINT**, and it will execute in `/bin/sh -c`, like a **CMD** instruction: ``` FROM ubuntu ENTRYPOINT wc -l - ``` This means that the Dockerfile's image always takes stdin as input (that's what "-" means), and prints the number of lines (that's what "-l" means). To make this optional but default, use a **CMD**: ``` FROM ubuntu CMD ["-l", "-"] ENTRYPOINT ["/usr/bin/wc"] ``` **VOLUME** -- `VOLUME ["/data"]` The **VOLUME** instruction creates a mount point with the specified name and marks it as holding externally-mounted volumes from the native host or from other containers. **USER** -- `USER daemon` Sets the username or UID used for running subsequent commands. The **USER** instruction can optionally be used to set the group or GID. The followings examples are all valid: USER [user | user:group | uid | uid:gid | user:gid | uid:group ] Until the **USER** instruction is set, instructions will be run as root. The USER instruction can be used any number of times in a Dockerfile, and will only affect subsequent commands. **WORKDIR** -- `WORKDIR /path/to/workdir` The **WORKDIR** instruction sets the working directory for the **RUN**, **CMD**, **ENTRYPOINT**, **COPY** and **ADD** Dockerfile commands that follow it. It can be used multiple times in a single Dockerfile. Relative paths are defined relative to the path of the previous **WORKDIR** instruction. For example: ``` WORKDIR /a WORKDIR b WORKDIR c RUN pwd ``` In the above example, the output of the **pwd** command is **a/b/c**. **ARG** -- ARG [=] The `ARG` instruction defines a variable that users can pass at build-time to the builder with the `docker build` command using the `--build-arg =` flag. If a user specifies a build argument that was not defined in the Dockerfile, the build outputs an error. ``` One or more build-args were not consumed, failing build. ``` The Dockerfile author can define a single variable by specifying `ARG` once or many variables by specifying `ARG` more than once. For example, a valid Dockerfile: ``` FROM busybox ARG user1 ARG buildno ... ``` A Dockerfile author may optionally specify a default value for an `ARG` instruction: ``` FROM busybox ARG user1=someuser ARG buildno=1 ... ``` If an `ARG` value has a default and if there is no value passed at build-time, the builder uses the default. An `ARG` variable definition comes into effect from the line on which it is defined in the `Dockerfile` not from the argument's use on the command-line or elsewhere. For example, consider this Dockerfile: ``` 1 FROM busybox 2 USER ${user:-some_user} 3 ARG user 4 USER $user ... ``` A user builds this file by calling: ``` $ docker build --build-arg user=what_user Dockerfile ``` The `USER` at line 2 evaluates to `some_user` as the `user` variable is defined on the subsequent line 3. The `USER` at line 4 evaluates to `what_user` as `user` is defined and the `what_user` value was passed on the command line. Prior to its definition by an `ARG` instruction, any use of a variable results in an empty string. > **Note:** It is not recommended to use build-time variables for > passing secrets like github keys, user credentials etc. You can use an `ARG` or an `ENV` instruction to specify variables that are available to the `RUN` instruction. Environment variables defined using the `ENV` instruction always override an `ARG` instruction of the same name. Consider this Dockerfile with an `ENV` and `ARG` instruction. ``` 1 FROM ubuntu 2 ARG CONT_IMG_VER 3 ENV CONT_IMG_VER v1.0.0 4 RUN echo $CONT_IMG_VER ``` Then, assume this image is built with this command: ``` $ docker build --build-arg CONT_IMG_VER=v2.0.1 Dockerfile ``` In this case, the `RUN` instruction uses `v1.0.0` instead of the `ARG` setting passed by the user:`v2.0.1` This behavior is similar to a shell script where a locally scoped variable overrides the variables passed as arguments or inherited from environment, from its point of definition. Using the example above but a different `ENV` specification you can create more useful interactions between `ARG` and `ENV` instructions: ``` 1 FROM ubuntu 2 ARG CONT_IMG_VER 3 ENV CONT_IMG_VER ${CONT_IMG_VER:-v1.0.0} 4 RUN echo $CONT_IMG_VER ``` Unlike an `ARG` instruction, `ENV` values are always persisted in the built image. Consider a docker build without the --build-arg flag: ``` $ docker build Dockerfile ``` Using this Dockerfile example, `CONT_IMG_VER` is still persisted in the image but its value would be `v1.0.0` as it is the default set in line 3 by the `ENV` instruction. The variable expansion technique in this example allows you to pass arguments from the command line and persist them in the final image by leveraging the `ENV` instruction. Variable expansion is only supported for [a limited set of Dockerfile instructions.](#environment-replacement) Docker has a set of predefined `ARG` variables that you can use without a corresponding `ARG` instruction in the Dockerfile. * `HTTP_PROXY` * `http_proxy` * `HTTPS_PROXY` * `https_proxy` * `FTP_PROXY` * `ftp_proxy` * `NO_PROXY` * `no_proxy` To use these, simply pass them on the command line using the `--build-arg =` flag. **ONBUILD** -- `ONBUILD [INSTRUCTION]` The **ONBUILD** instruction adds a trigger instruction to an image. The trigger is executed at a later time, when the image is used as the base for another build. Docker executes the trigger in the context of the downstream build, as if the trigger existed immediately after the **FROM** instruction in the downstream Dockerfile. You can register any build instruction as a trigger. A trigger is useful if you are defining an image to use as a base for building other images. For example, if you are defining an application build environment or a daemon that is customized with a user-specific configuration. Consider an image intended as a reusable python application builder. It must add application source code to a particular directory, and might need a build script called after that. You can't just call **ADD** and **RUN** now, because you don't yet have access to the application source code, and it is different for each application build. -- Providing application developers with a boilerplate Dockerfile to copy-paste into their application is inefficient, error-prone, and difficult to update because it mixes with application-specific code. The solution is to use **ONBUILD** to register instructions in advance, to run later, during the next build stage. # HISTORY *May 2014, Compiled by Zac Dover (zdover at redhat dot com) based on docker.com Dockerfile documentation. *Feb 2015, updated by Brian Goff (cpuguy83@gmail.com) for readability *Sept 2015, updated by Sally O'Malley (somalley@redhat.com) docker-1.10.3/man/README.md000066400000000000000000000023011267010174400150730ustar00rootroot00000000000000Docker Documentation ==================== This directory contains the Docker user manual in the Markdown format. Do *not* edit the man pages in the man1 directory. Instead, amend the Markdown (*.md) files. # Generating man pages from the Markdown files The recommended approach for generating the man pages is via a Docker container using the supplied `Dockerfile` to create an image with the correct environment. This uses `go-md2man`, a pure Go Markdown to man page generator. ## Building the md2man image There is a `Dockerfile` provided in the `/man` directory of your 'docker/docker' fork. Using this `Dockerfile`, create a Docker image tagged `docker/md2man`: docker build -t docker/md2man . ## Utilizing the image From within the `/man` directory run the following command: docker run -v $(pwd):/man -w /man -i docker/md2man ./md2man-all.sh The `md2man` Docker container will process the Markdown files and generate the man pages inside the `/man/man1` directory of your fork using Docker volumes. For more information on Docker volumes see the man page for `docker run` and also look at the article [Sharing Directories via Volumes] (https://docs.docker.com/use/working_with_volumes/). docker-1.10.3/man/config-json.5.md000066400000000000000000000060611267010174400165240ustar00rootroot00000000000000% CONFIG.JSON(5) Docker User Manuals % Docker Community % JANUARY 2016 # NAME HOME/.docker/config.json - Default Docker configuration file # INTRODUCTION By default, the Docker command line stores its configuration files in a directory called `.docker` within your `HOME` directory. Docker manages most of the files in the configuration directory and you should not modify them. However, you *can modify* the `config.json` file to control certain aspects of how the `docker` command behaves. Currently, you can modify the `docker` command behavior using environment variables or command-line options. You can also use options within `config.json` to modify some of the same behavior. When using these mechanisms, you must keep in mind the order of precedence among them. Command line options override environment variables and environment variables override properties you specify in a `config.json` file. The `config.json` file stores a JSON encoding of several properties: * The `HttpHeaders` property specifies a set of headers to include in all messages sent from the Docker client to the daemon. Docker does not try to interpret or understand these header; it simply puts them into the messages. Docker does not allow these headers to change any headers it sets for itself. * The `psFormat` property specifies the default format for `docker ps` output. When the `--format` flag is not provided with the `docker ps` command, Docker's client uses this property. If this property is not set, the client falls back to the default table format. For a list of supported formatting directives, see **docker-ps(1)**. * The `detachKeys` property specifies the default key sequence which detaches the container. When the `--detach-keys` flag is not provide with the `docker attach`, `docker exec`, `docker run` or `docker start`, Docker's client uses this property. If this property is not set, the client falls back to the default sequence `ctrl-p,ctrl-q`. * The `imagesFormat` property specifies the default format for `docker images` output. When the `--format` flag is not provided with the `docker images` command, Docker's client uses this property. If this property is not set, the client falls back to the default table format. For a list of supported formatting directives, see **docker-images(1)**. You can specify a different location for the configuration files via the `DOCKER_CONFIG` environment variable or the `--config` command line option. If both are specified, then the `--config` option overrides the `DOCKER_CONFIG` environment variable: docker --config ~/testconfigs/ ps This command instructs Docker to use the configuration files in the `~/testconfigs/` directory when running the `ps` command. ## Examples Following is a sample `config.json` file: { "HttpHeaders": { "MyHeader": "MyValue" }, "psFormat": "table {{.ID}}\\t{{.Image}}\\t{{.Command}}\\t{{.Labels}}", "imagesFormat": "table {{.ID}}\\t{{.Repository}}\\t{{.Tag}}\\t{{.CreatedAt}}", "detachKeys": "ctrl-e,e" } # HISTORY January 2016, created by Moxiegirl docker-1.10.3/man/docker-attach.1.md000066400000000000000000000077761267010174400170330ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-attach - Attach to a running container # SYNOPSIS **docker attach** [**--detach-keys**[=*[]*]] [**--help**] [**--no-stdin**] [**--sig-proxy**[=*true*]] CONTAINER # DESCRIPTION The **docker attach** command allows you to attach to a running container using the container's ID or name, either to view its ongoing output or to control it interactively. You can attach to the same contained process multiple times simultaneously, screen sharing style, or quickly view the progress of your detached process. To stop a container, use `CTRL-c`. This key sequence sends `SIGKILL` to the container. You can detach from the container (and leave it running) using a configurable key sequence. The default sequence is `CTRL-p CTRL-q`. You configure the key sequence using the **--detach-keys** option or a configuration file. See **config-json(5)** for documentation on using a configuration file. It is forbidden to redirect the standard input of a `docker attach` command while attaching to a tty-enabled container (i.e.: launched with `-t`). # OPTIONS **--detach-keys**="" Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. **--help** Print usage statement **--no-stdin**=*true*|*false* Do not attach STDIN. The default is *false*. **--sig-proxy**=*true*|*false* Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied. The default is *true*. # Override the detach sequence If you want, you can configure a override the Docker key sequence for detach. This is is useful if the Docker default sequence conflicts with key squence you use for other applications. There are two ways to defines a your own detach key sequence, as a per-container override or as a configuration property on your entire configuration. To override the sequence for an individual container, use the `--detach-keys=""` flag with the `docker attach` command. The format of the `` is either a letter [a-Z], or the `ctrl-` combined with any of the following: * `a-z` (a single lowercase alpha character ) * `@` (ampersand) * `[` (left bracket) * `\\` (two backward slashes) * `_` (underscore) * `^` (caret) These `a`, `ctrl-a`, `X`, or `ctrl-\\` values are all examples of valid key sequences. To configure a different configuration default key sequence for all containers, see **docker(1)**. # EXAMPLES ## Attaching to a container In this example the top command is run inside a container, from an image called fedora, in detached mode. The ID from the container is passed into the **docker attach** command: # ID=$(sudo docker run -d fedora /usr/bin/top -b) # sudo docker attach $ID top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st Mem: 373572k total, 355560k used, 18012k free, 27872k buffers Swap: 786428k total, 0k used, 786428k free, 221740k cached PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND 1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st Mem: 373572k total, 355244k used, 18328k free, 27872k buffers Swap: 786428k total, 0k used, 786428k free, 221776k cached PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit docker-1.10.3/man/docker-build.1.md000066400000000000000000000304731267010174400166540ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-build - Build a new image from the source code at PATH # SYNOPSIS **docker build** [**--build-arg**[=*[]*]] [**--cpu-shares**[=*0*]] [**--cgroup-parent**[=*CGROUP-PARENT*]] [**--help**] [**-f**|**--file**[=*PATH/Dockerfile*]] [**--force-rm**] [**--isolation**[=*default*]] [**--no-cache**] [**--pull**] [**-q**|**--quiet**] [**--rm**[=*true*]] [**-t**|**--tag**[=*[]*]] [**-m**|**--memory**[=*MEMORY*]] [**--memory-swap**[=*LIMIT*]] [**--shm-size**[=*SHM-SIZE*]] [**--cpu-period**[=*0*]] [**--cpu-quota**[=*0*]] [**--cpuset-cpus**[=*CPUSET-CPUS*]] [**--cpuset-mems**[=*CPUSET-MEMS*]] [**--ulimit**[=*[]*]] PATH | URL | - # DESCRIPTION This will read the Dockerfile from the directory specified in **PATH**. It also sends any other files and directories found in the current directory to the Docker daemon. The contents of this directory would be used by **ADD** commands found within the Dockerfile. Warning, this will send a lot of data to the Docker daemon depending on the contents of the current directory. The build is run by the Docker daemon, not by the CLI, so the whole context must be transferred to the daemon. The Docker CLI reports "Sending build context to Docker daemon" when the context is sent to the daemon. When the URL to a tarball archive or to a single Dockerfile is given, no context is sent from the client to the Docker daemon. In this case, the Dockerfile at the root of the archive and the rest of the archive will get used as the context of the build. When a Git repository is set as the **URL**, the repository is cloned locally and then sent as the context. # OPTIONS **-f**, **--file**=*PATH/Dockerfile* Path to the Dockerfile to use. If the path is a relative path and you are building from a local directory, then the path must be relative to that directory. If you are building from a remote URL pointing to either a tarball or a Git repository, then the path must be relative to the root of the remote context. In all cases, the file must be within the build context. The default is *Dockerfile*. **--build-arg**=*variable* name and value of a **buildarg**. For example, if you want to pass a value for `http_proxy`, use `--build-arg=http_proxy="http://some.proxy.url"` Users pass these values at build-time. Docker uses the `buildargs` as the environment context for command(s) run via the Dockerfile's `RUN` instruction or for variable expansion in other Dockerfile instructions. This is not meant for passing secret values. [Read more about the buildargs instruction](/reference/builder/#arg) **--force-rm**=*true*|*false* Always remove intermediate containers, even after unsuccessful builds. The default is *false*. **--isolation**="*default*" Isolation specifies the type of isolation technology used by containers. **--no-cache**=*true*|*false* Do not use cache when building the image. The default is *false*. **--help** Print usage statement **--pull**=*true*|*false* Always attempt to pull a newer version of the image. The default is *false*. **-q**, **--quiet**=*true*|*false* Suppress the build output and print image ID on success. The default is *false*. **--rm**=*true*|*false* Remove intermediate containers after a successful build. The default is *true*. **-t**, **--tag**="" Repository names (and optionally with tags) to be applied to the resulting image in case of success. **-m**, **--memory**=*MEMORY* Memory limit **--memory-swap**=*LIMIT* A limit value equal to memory plus swap. Must be used with the **-m** (**--memory**) flag. The swap `LIMIT` should always be larger than **-m** (**--memory**) value. The format of `LIMIT` is `[]`. Unit can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you don't specify a unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap. **--shm-size**=*SHM-SIZE* Size of `/dev/shm`. The format is ``. `number` must be greater than `0`. Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses `64m`. **--cpu-shares**=*0* CPU shares (relative weight). By default, all containers get the same proportion of CPU cycles. CPU shares is a 'relative weight', relative to the default setting of 1024. This default value is defined here: ``` cat /sys/fs/cgroup/cpu/cpu.shares 1024 ``` You can change this proportion by adjusting the container's CPU share weighting relative to the weighting of all other running containers. To modify the proportion from the default of 1024, use the **--cpu-shares** flag to set the weighting to 2 or higher. Container CPU share Flag {C0} 60% of CPU --cpu-shares=614 (614 is 60% of 1024) {C1} 40% of CPU --cpu-shares=410 (410 is 40% of 1024) The proportion is only applied when CPU-intensive processes are running. When tasks in one container are idle, the other containers can use the left-over CPU time. The actual amount of CPU time used varies depending on the number of containers running on the system. For example, consider three containers, where one has **--cpu-shares=1024** and two others have **--cpu-shares=512**. When processes in all three containers attempt to use 100% of CPU, the first container would receive 50% of the total CPU time. If you add a fourth container with **--cpu-shares=1024**, the first container only gets 33% of the CPU. The remaining containers receive 16.5%, 16.5% and 33% of the CPU. Container CPU share Flag CPU time {C0} 100% --cpu-shares=1024 33% {C1} 50% --cpu-shares=512 16.5% {C2} 50% --cpu-shares=512 16.5% {C4} 100% --cpu-shares=1024 33% On a multi-core system, the shares of CPU time are distributed across the CPU cores. Even if a container is limited to less than 100% of CPU time, it can use 100% of each individual CPU core. For example, consider a system with more than three cores. If you start one container **{C0}** with **--cpu-shares=512** running one process, and another container **{C1}** with **--cpu-shares=1024** running two processes, this can result in the following division of CPU shares: PID container CPU CPU share 100 {C0} 0 100% of CPU0 101 {C1} 1 100% of CPU1 102 {C1} 2 100% of CPU2 **--cpu-period**=*0* Limit the CPU CFS (Completely Fair Scheduler) period. Limit the container's CPU usage. This flag causes the kernel to restrict the container's CPU usage to the period you specify. **--cpu-quota**=*0* Limit the CPU CFS (Completely Fair Scheduler) quota. By default, containers run with the full CPU resource. This flag causes the kernel to restrict the container's CPU usage to the quota you specify. **--cpuset-cpus**=*CPUSET-CPUS* CPUs in which to allow execution (0-3, 0,1). **--cpuset-mems**=*CPUSET-MEMS* Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. For example, if you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1` to ensure the processes in your Docker container only use memory from the first two memory nodes. **--cgroup-parent**=*CGROUP-PARENT* Path to `cgroups` under which the container's `cgroup` are created. If the path is not absolute, the path is considered relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. **--ulimit**=[] Ulimit options For more information about `ulimit` see [Setting ulimits in a container](https://docs.docker.com/reference/commandline/run/#setting-ulimits-in-a-container) # EXAMPLES ## Building an image using a Dockerfile located inside the current directory Docker images can be built using the build command and a Dockerfile: docker build . During the build process Docker creates intermediate images. In order to keep them, you must explicitly set `--rm=false`. docker build --rm=false . A good practice is to make a sub-directory with a related name and create the Dockerfile in that directory. For example, a directory called mongo may contain a Dockerfile to create a Docker MongoDB image. Likewise, another directory called httpd may be used to store Dockerfiles for Apache web server images. It is also a good practice to add the files required for the image to the sub-directory. These files will then be specified with the `COPY` or `ADD` instructions in the `Dockerfile`. Note: If you include a tar file (a good practice), then Docker will automatically extract the contents of the tar file specified within the `ADD` instruction into the specified target. ## Building an image and naming that image A good practice is to give a name to the image you are building. Note that only a-z0-9-_. should be used for consistency. There are no hard rules here but it is best to give the names consideration. The **-t**/**--tag** flag is used to rename an image. Here are some examples: Though it is not a good practice, image names can be arbitrary: docker build -t myimage . A better approach is to provide a fully qualified and meaningful repository, name, and tag (where the tag in this context means the qualifier after the ":"). In this example we build a JBoss image for the Fedora repository and give it the version 1.0: docker build -t fedora/jboss:1.0 . The next example is for the "whenry" user repository and uses Fedora and JBoss and gives it the version 2.1 : docker build -t whenry/fedora-jboss:v2.1 . If you do not provide a version tag then Docker will assign `latest`: docker build -t whenry/fedora-jboss . When you list the images, the image above will have the tag `latest`. You can apply multiple tags to an image. For example, you can apply the `latest` tag to a newly built image and add another tag that references a specific version. For example, to tag an image both as `whenry/fedora-jboss:latest` and `whenry/fedora-jboss:v2.1`, use the following: docker build -t whenry/fedora-jboss:latest -t whenry/fedora-jboss:v2.1 . So renaming an image is arbitrary but consideration should be given to a useful convention that makes sense for consumers and should also take into account Docker community conventions. ## Building an image using a URL This will clone the specified GitHub repository from the URL and use it as context. The Dockerfile at the root of the repository is used as Dockerfile. This only works if the GitHub repository is a dedicated repository. docker build github.com/scollier/purpletest Note: You can set an arbitrary Git repository via the `git://` schema. ## Building an image using a URL to a tarball'ed context This will send the URL itself to the Docker daemon. The daemon will fetch the tarball archive, decompress it and use its contents as the build context. The Dockerfile at the root of the archive and the rest of the archive will get used as the context of the build. If you pass an **-f PATH/Dockerfile** option as well, the system will look for that file inside the contents of the tarball. docker build -f dev/Dockerfile https://10.10.10.1/docker/context.tar.gz Note: supported compression formats are 'xz', 'bzip2', 'gzip' and 'identity' (no compression). ## Specify isolation technology for container (--isolation) This option is useful in situations where you are running Docker containers on Windows. The `--isolation=` option sets a container's isolation technology. On Linux, the only supported is the `default` option which uses Linux namespaces. On Microsoft Windows, you can specify these values: * `default`: Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. * `process`: Namespace isolation only. * `hyperv`: Hyper-V hypervisor partition-based isolation. Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. # HISTORY March 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit June 2015, updated by Sally O'Malley docker-1.10.3/man/docker-commit.1.md000066400000000000000000000046551267010174400170500ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-commit - Create a new image from a container's changes # SYNOPSIS **docker commit** [**-a**|**--author**[=*AUTHOR*]] [**-c**|**--change**[=\[*DOCKERFILE INSTRUCTIONS*\]]] [**--help**] [**-m**|**--message**[=*MESSAGE*]] [**-p**|**--pause**[=*true*]] CONTAINER [REPOSITORY[:TAG]] # DESCRIPTION Create a new image from an existing container specified by name or container ID. The new image will contain the contents of the container filesystem, *excluding* any data volumes. While the `docker commit` command is a convenient way of extending an existing image, you should prefer the use of a Dockerfile and `docker build` for generating images that you intend to share with other people. # OPTIONS **-a**, **--author**="" Author (e.g., "John Hannibal Smith ") **-c** , **--change**=[] Apply specified Dockerfile instructions while committing the image Supported Dockerfile instructions: `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`LABEL`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` **--help** Print usage statement **-m**, **--message**="" Commit message **-p**, **--pause**=*true*|*false* Pause container during commit. The default is *true*. # EXAMPLES ## Creating a new image from an existing container An existing Fedora based container has had Apache installed while running in interactive mode with the bash shell. Apache is also running. To create a new image run `docker ps` to find the container's ID and then run: # docker commit -m="Added Apache to Fedora base image" \ -a="A D Ministrator" 98bd7fc99854 fedora/fedora_httpd:20 Note that only a-z0-9-_. are allowed when naming images from an existing container. ## Apply specified Dockerfile instructions while committing the image If an existing container was created without the DEBUG environment variable set to "true", you can create a new image based on that container by first getting the container's ID with `docker ps` and then running: # docker commit -c="ENV DEBUG true" 98bd7fc99854 debug-image # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and in June 2014, updated by Sven Dowideit July 2014, updated by Sven Dowideit Oct 2014, updated by Daniel, Dao Quang Minh June 2015, updated by Sally O'Malley docker-1.10.3/man/docker-cp.1.md000066400000000000000000000163231267010174400161550ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-cp - Copy files/folders between a container and the local filesystem. # SYNOPSIS **docker cp** [**--help**] CONTAINER:SRC_PATH DEST_PATH|- **docker cp** [**--help**] SRC_PATH|- CONTAINER:DEST_PATH # DESCRIPTION The `docker cp` utility copies the contents of `SRC_PATH` to the `DEST_PATH`. You can copy from the container's file system to the local machine or the reverse, from the local filesystem to the container. If `-` is specified for either the `SRC_PATH` or `DEST_PATH`, you can also stream a tar archive from `STDIN` or to `STDOUT`. The `CONTAINER` can be a running or stopped container. The `SRC_PATH` or `DEST_PATH` be a file or directory. The `docker cp` command assumes container paths are relative to the container's `/` (root) directory. This means supplying the initial forward slash is optional; The command sees `compassionate_darwin:/tmp/foo/myfile.txt` and `compassionate_darwin:tmp/foo/myfile.txt` as identical. Local machine paths can be an absolute or relative value. The command interprets a local machine's relative paths as relative to the current working directory where `docker cp` is run. The `cp` command behaves like the Unix `cp -a` command in that directories are copied recursively with permissions preserved if possible. Ownership is set to the user and primary group at the destination. For example, files copied to a container are created with `UID:GID` of the root user. Files copied to the local machine are created with the `UID:GID` of the user which invoked the `docker cp` command. If you specify the `-L` option, `docker cp` follows any symbolic link in the `SRC_PATH`. Assuming a path separator of `/`, a first argument of `SRC_PATH` and second argument of `DEST_PATH`, the behavior is as follows: - `SRC_PATH` specifies a file - `DEST_PATH` does not exist - the file is saved to a file created at `DEST_PATH` - `DEST_PATH` does not exist and ends with `/` - Error condition: the destination directory must exist. - `DEST_PATH` exists and is a file - the destination is overwritten with the source file's contents - `DEST_PATH` exists and is a directory - the file is copied into this directory using the basename from `SRC_PATH` - `SRC_PATH` specifies a directory - `DEST_PATH` does not exist - `DEST_PATH` is created as a directory and the *contents* of the source directory are copied into this directory - `DEST_PATH` exists and is a file - Error condition: cannot copy a directory to a file - `DEST_PATH` exists and is a directory - `SRC_PATH` does not end with `/.` - the source directory is copied into this directory - `SRC_PATH` does end with `/.` - the *content* of the source directory is copied into this directory The command requires `SRC_PATH` and `DEST_PATH` to exist according to the above rules. If `SRC_PATH` is local and is a symbolic link, the symbolic link, not the target, is copied by default. To copy the link target and not the link, specify the `-L` option. A colon (`:`) is used as a delimiter between `CONTAINER` and its path. You can also use `:` when specifying paths to a `SRC_PATH` or `DEST_PATH` on a local machine, for example `file:name.txt`. If you use a `:` in a local machine path, you must be explicit with a relative or absolute path, for example: `/path/to/file:name.txt` or `./file:name.txt` It is not possible to copy certain system files such as resources under `/proc`, `/sys`, `/dev`, and mounts created by the user in the container. Using `-` as the `SRC_PATH` streams the contents of `STDIN` as a tar archive. The command extracts the content of the tar to the `DEST_PATH` in container's filesystem. In this case, `DEST_PATH` must specify a directory. Using `-` as `DEST_PATH` streams the contents of the resource as a tar archive to `STDOUT`. # OPTIONS **-L**, **--follow-link**=*true*|*false* Follow symbol link in SRC_PATH **--help** Print usage statement # EXAMPLES Suppose a container has finished producing some output as a file it saves to somewhere in its filesystem. This could be the output of a build job or some other computation. You can copy these outputs from the container to a location on your local host. If you want to copy the `/tmp/foo` directory from a container to the existing `/tmp` directory on your host. If you run `docker cp` in your `~` (home) directory on the local host: $ docker cp compassionate_darwin:tmp/foo /tmp Docker creates a `/tmp/foo` directory on your host. Alternatively, you can omit the leading slash in the command. If you execute this command from your home directory: $ docker cp compassionate_darwin:tmp/foo tmp If `~/tmp` does not exist, Docker will create it and copy the contents of `/tmp/foo` from the container into this new directory. If `~/tmp` already exists as a directory, then Docker will copy the contents of `/tmp/foo` from the container into a directory at `~/tmp/foo`. When copying a single file to an existing `LOCALPATH`, the `docker cp` command will either overwrite the contents of `LOCALPATH` if it is a file or place it into `LOCALPATH` if it is a directory, overwriting an existing file of the same name if one exists. For example, this command: $ docker cp sharp_ptolemy:/tmp/foo/myfile.txt /test If `/test` does not exist on the local machine, it will be created as a file with the contents of `/tmp/foo/myfile.txt` from the container. If `/test` exists as a file, it will be overwritten. Lastly, if `/test` exists as a directory, the file will be copied to `/test/myfile.txt`. Next, suppose you want to copy a file or folder into a container. For example, this could be a configuration file or some other input to a long running computation that you would like to place into a created container before it starts. This is useful because it does not require the configuration file or other input to exist in the container image. If you have a file, `config.yml`, in the current directory on your local host and wish to copy it to an existing directory at `/etc/my-app.d` in a container, this command can be used: $ docker cp config.yml myappcontainer:/etc/my-app.d If you have several files in a local directory `/config` which you need to copy to a directory `/etc/my-app.d` in a container: $ docker cp /config/. myappcontainer:/etc/my-app.d The above command will copy the contents of the local `/config` directory into the directory `/etc/my-app.d` in the container. Finally, if you want to copy a symbolic link into a container, you typically want to copy the linked target and not the link itself. To copy the target, use the `-L` option, for example: $ ln -s /tmp/somefile /tmp/somefile.ln $ docker cp -L /tmp/somefile.ln myappcontainer:/tmp/ This command copies content of the local `/tmp/somefile` into the file `/tmp/somefile.ln` in the container. Without `-L` option, the `/tmp/somefile.ln` preserves its symbolic link but not its content. # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit May 2015, updated by Josh Hawn docker-1.10.3/man/docker-create.1.md000066400000000000000000000433541267010174400170220ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-create - Create a new container # SYNOPSIS **docker create** [**-a**|**--attach**[=*[]*]] [**--add-host**[=*[]*]] [**--blkio-weight**[=*[BLKIO-WEIGHT]*]] [**--blkio-weight-device**[=*[]*]] [**--cpu-shares**[=*0*]] [**--cap-add**[=*[]*]] [**--cap-drop**[=*[]*]] [**--cgroup-parent**[=*CGROUP-PATH*]] [**--cidfile**[=*CIDFILE*]] [**--cpu-period**[=*0*]] [**--cpu-quota**[=*0*]] [**--cpuset-cpus**[=*CPUSET-CPUS*]] [**--cpuset-mems**[=*CPUSET-MEMS*]] [**--device**[=*[]*]] [**--device-read-bps**[=*[]*]] [**--device-read-iops**[=*[]*]] [**--device-write-bps**[=*[]*]] [**--device-write-iops**[=*[]*]] [**--dns**[=*[]*]] [**--dns-search**[=*[]*]] [**--dns-opt**[=*[]*]] [**-e**|**--env**[=*[]*]] [**--entrypoint**[=*ENTRYPOINT*]] [**--env-file**[=*[]*]] [**--expose**[=*[]*]] [**--group-add**[=*[]*]] [**-h**|**--hostname**[=*HOSTNAME*]] [**--help**] [**-i**|**--interactive**] [**--ip**[=*IPv4-ADDRESS*]] [**--ip6**[=*IPv6-ADDRESS*]] [**--ipc**[=*IPC*]] [**--isolation**[=*default*]] [**--kernel-memory**[=*KERNEL-MEMORY*]] [**-l**|**--label**[=*[]*]] [**--label-file**[=*[]*]] [**--link**[=*[]*]] [**--log-driver**[=*[]*]] [**--log-opt**[=*[]*]] [**-m**|**--memory**[=*MEMORY*]] [**--mac-address**[=*MAC-ADDRESS*]] [**--memory-reservation**[=*MEMORY-RESERVATION*]] [**--memory-swap**[=*LIMIT*]] [**--memory-swappiness**[=*MEMORY-SWAPPINESS*]] [**--name**[=*NAME*]] [**--net**[=*"bridge"*]] [**--net-alias**[=*[]*]] [**--oom-kill-disable**] [**--oom-score-adj**[=*0*]] [**-P**|**--publish-all**] [**-p**|**--publish**[=*[]*]] [**--pid**[=*[]*]] [**--privileged**] [**--read-only**] [**--restart**[=*RESTART*]] [**--security-opt**[=*[]*]] [**--stop-signal**[=*SIGNAL*]] [**--shm-size**[=*[]*]] [**-t**|**--tty**] [**--tmpfs**[=*[CONTAINER-DIR[:]*]] [**-u**|**--user**[=*USER*]] [**--ulimit**[=*[]*]] [**--uts**[=*[]*]] [**-v**|**--volume**[=*[[HOST-DIR:]CONTAINER-DIR[:OPTIONS]]*]] [**--volume-driver**[=*DRIVER*]] [**--volumes-from**[=*[]*]] [**-w**|**--workdir**[=*WORKDIR*]] IMAGE [COMMAND] [ARG...] # DESCRIPTION Creates a writeable container layer over the specified image and prepares it for running the specified command. The container ID is then printed to STDOUT. This is similar to **docker run -d** except the container is never started. You can then use the **docker start ** command to start the container at any point. The initial status of the container created with **docker create** is 'created'. # OPTIONS **-a**, **--attach**=[] Attach to STDIN, STDOUT or STDERR. **--add-host**=[] Add a custom host-to-IP mapping (host:ip) **--blkio-weight**=*0* Block IO weight (relative weight) accepts a weight value between 10 and 1000. **--blkio-weight-device**=[] Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`). **--cpu-shares**=*0* CPU shares (relative weight) **--cap-add**=[] Add Linux capabilities **--cap-drop**=[] Drop Linux capabilities **--cgroup-parent**="" Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. **--cidfile**="" Write the container ID to the file **--cpu-period**=*0* Limit the CPU CFS (Completely Fair Scheduler) period **--cpuset-cpus**="" CPUs in which to allow execution (0-3, 0,1) **--cpuset-mems**="" Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. If you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1` then processes in your Docker container will only use memory from the first two memory nodes. **--cpu-quota**=*0* Limit the CPU CFS (Completely Fair Scheduler) quota **--device**=[] Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) **--device-read-bps**=[] Limit read rate (bytes per second) from a device (e.g. --device-read-bps=/dev/sda:1mb) **--device-read-iops**=[] Limit read rate (IO per second) from a device (e.g. --device-read-iops=/dev/sda:1000) **--device-write-bps**=[] Limit write rate (bytes per second) to a device (e.g. --device-write-bps=/dev/sda:1mb) **--device-write-iops**=[] Limit write rate (IO per second) to a device (e.g. --device-write-iops=/dev/sda:1000) **--dns**=[] Set custom DNS servers **--dns-opt**=[] Set custom DNS options **--dns-search**=[] Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain) **-e**, **--env**=[] Set environment variables **--entrypoint**="" Overwrite the default ENTRYPOINT of the image **--env-file**=[] Read in a line-delimited file of environment variables **--expose**=[] Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host **--group-add**=[] Add additional groups to run as **-h**, **--hostname**="" Container host name **--help** Print usage statement **-i**, **--interactive**=*true*|*false* Keep STDIN open even if not attached. The default is *false*. **--ip**="" Sets the container's interface IPv4 address (e.g. 172.23.0.9) It can only be used in conjunction with **--net** for user-defined networks **--ip6**="" Sets the container's interface IPv6 address (e.g. 2001:db8::1b99) It can only be used in conjunction with **--net** for user-defined networks **--ipc**="" Default is to create a private IPC namespace (POSIX SysV IPC) for the container 'container:': reuses another container shared memory, semaphores and message queues 'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure. **--isolation**="*default*" Isolation specifies the type of isolation technology used by containers. **--kernel-memory**="" Kernel memory limit (format: `[]`, where unit = b, k, m or g) Constrains the kernel memory available to a container. If a limit of 0 is specified (not using `--kernel-memory`), the container's kernel memory is not limited. If you specify a limit, it may be rounded up to a multiple of the operating system's page size and the value can be very large, millions of trillions. **-l**, **--label**=[] Adds metadata to a container (e.g., --label=com.example.key=value) **--label-file**=[] Read labels from a file. Delimit each label with an EOL. **--link**=[] Add link to another container in the form of :alias or just in which case the alias will match the name. **--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*none*" Logging driver for container. Default is defined by daemon `--log-driver` flag. **Warning**: the `docker logs` command works only for the `json-file` and `journald` logging drivers. **--log-opt**=[] Logging driver specific options. **-m**, **--memory**="" Memory limit (format: [], where unit = b, k, m or g) Allows you to constrain the memory available to a container. If the host supports swap memory, then the **-m** memory setting can be larger than physical RAM. If a limit of 0 is specified (not using **-m**), the container's memory is not limited. The actual limit may be rounded up to a multiple of the operating system's page size (the value would be very large, that's millions of trillions). **--mac-address**="" Container MAC address (e.g. 92:d0:c6:0a:29:33) **--memory-reservation**="" Memory soft limit (format: [], where unit = b, k, m or g) After setting memory reservation, when the system detects memory contention or low memory, containers are forced to restrict their consumption to their reservation. So you should always set the value below **--memory**, otherwise the hard limit will take precedence. By default, memory reservation will be the same as memory limit. **--memory-swap**="LIMIT" A limit value equal to memory plus swap. Must be used with the **-m** (**--memory**) flag. The swap `LIMIT` should always be larger than **-m** (**--memory**) value. The format of `LIMIT` is `[]`. Unit can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you don't specify a unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap. **--memory-swappiness**="" Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. **--name**="" Assign a name to the container **--net**="*bridge*" Set the Network mode for the container 'bridge': create a network stack on the default Docker bridge 'none': no networking 'container:': reuse another container's network stack 'host': use the Docker host network stack. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. '|': connect to a user-defined network **--net-alias**=[] Add network-scoped alias for the container **--oom-kill-disable**=*true*|*false* Whether to disable OOM Killer for the container or not. **--oom-score-adj**="" Tune the host's OOM preferences for containers (accepts -1000 to 1000) **-P**, **--publish-all**=*true*|*false* Publish all exposed ports to random ports on the host interfaces. The default is *false*. **-p**, **--publish**=[] Publish a container's port, or a range of ports, to the host format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort Both hostPort and containerPort can be specified as a range of ports. When specifying ranges for both, the number of container ports in the range must match the number of host ports in the range. (e.g., `-p 1234-1236:1234-1236/tcp`) (use 'docker port' to see the actual mapping) **--pid**=*host* Set the PID mode for the container **host**: use the host's PID namespace inside the container. Note: the host mode gives the container full access to local PID and is therefore considered insecure. **--privileged**=*true*|*false* Give extended privileges to this container. The default is *false*. **--read-only**=*true*|*false* Mount the container's root filesystem as read only. **--restart**="*no*" Restart policy to apply when a container exits (no, on-failure[:max-retry], always, unless-stopped). **--shm-size**="" Size of `/dev/shm`. The format is ``. `number` must be greater than `0`. Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses `64m`. **--security-opt**=[] Security Options **--stop-signal**=*SIGTERM* Signal to stop a container. Default is SIGTERM. **-t**, **--tty**=*true*|*false* Allocate a pseudo-TTY. The default is *false*. **--tmpfs**=[] Create a tmpfs mount Mount a temporary filesystem (`tmpfs`) mount into a container, for example: $ docker run -d --tmpfs /tmp:rw,size=787448k,mode=1777 my_image This command mounts a `tmpfs` at `/tmp` within the container. The supported mount options are the same as the Linux default `mount` flags. If you do not specify any options, the systems uses the following options: `rw,noexec,nosuid,nodev,size=65536k`. **-u**, **--user**="" Username or UID **--ulimit**=[] Ulimit options **--uts**=*host* Set the UTS mode for the container **host**: use the host's UTS namespace inside the container. Note: the host mode gives the container access to changing the host's hostname and is therefore considered insecure. **-v**|**--volume**[=*[[HOST-DIR:]CONTAINER-DIR[:OPTIONS]]*] Create a bind mount. If you specify, ` -v /HOST-DIR:/CONTAINER-DIR`, Docker bind mounts `/HOST-DIR` in the host to `/CONTAINER-DIR` in the Docker container. If 'HOST-DIR' is omitted, Docker automatically creates the new volume on the host. The `OPTIONS` are a comma delimited list and can be: * [rw|ro] * [z|Z] * [`[r]shared`|`[r]slave`|`[r]private`] The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The `HOST-DIR` can be an absolute path or a `name` value. A `name` value must start with an alphanumeric character, followed by `a-z0-9`, `_` (underscore), `.` (period) or `-` (hyphen). An absolute path starts with a `/` (forward slash). If you supply a `HOST-DIR` that is an absolute path, Docker bind-mounts to the path you specify. If you supply a `name`, Docker creates a named volume by that `name`. For example, you can specify either `/foo` or `foo` for a `HOST-DIR` value. If you supply the `/foo` value, Docker creates a bind-mount. If you supply the `foo` specification, Docker creates a named volume. You can specify multiple **-v** options to mount one or more mounts to a container. To use these same mounts in other containers, specify the **--volumes-from** option also. You can add `:ro` or `:rw` suffix to a volume to mount it read-only or read-write mode, respectively. By default, the volumes are mounted read-write. See examples. Labeling systems like SELinux require that proper labels are placed on volume content mounted into a container. Without a label, the security system might prevent the processes running inside the container from using the content. By default, Docker does not change the labels set by the OS. To change a label in the container context, you can add either of two suffixes `:z` or `:Z` to the volume mount. These suffixes tell Docker to relabel file objects on the shared volumes. The `z` option tells Docker that two containers share the volume content. As a result, Docker labels the content with a shared content label. Shared volume labels allow all containers to read/write content. The `Z` option tells Docker to label the content with a private unshared label. Only the current container can use a private volume. By default bind mounted volumes are `private`. That means any mounts done inside container will not be visible on host and vice-a-versa. One can change this behavior by specifying a volume mount propagation property. Making a volume `shared` mounts done under that volume inside container will be visible on host and vice-a-versa. Making a volume `slave` enables only one way mount propagation and that is mounts done on host under that volume will be visible inside container but not the other way around. To control mount propagation property of volume one can use `:[r]shared`, `:[r]slave` or `:[r]private` propagation flag. Propagation property can be specified only for bind mounted volumes and not for internal volumes or named volumes. For mount propagation to work source mount point (mount point where source dir is mounted on) has to have right propagation properties. For shared volumes, source mount point has to be shared. And for slave volumes, source mount has to be either shared or slave. Use `df ` to figure out the source mount and then use `findmnt -o TARGET,PROPAGATION ` to figure out propagation properties of source mount. If `findmnt` utility is not available, then one can look at mount entry for source mount point in `/proc/self/mountinfo`. Look at `optional fields` and see if any propagaion properties are specified. `shared:X` means mount is `shared`, `master:X` means mount is `slave` and if nothing is there that means mount is `private`. To change propagation properties of a mount point use `mount` command. For example, if one wants to bind mount source directory `/foo` one can do `mount --bind /foo /foo` and `mount --make-private --make-shared /foo`. This will convert /foo into a `shared` mount point. Alternatively one can directly change propagation properties of source mount. Say `/` is source mount for `/foo`, then use `mount --make-shared /` to convert `/` into a `shared` mount. > **Note**: > When using systemd to manage the Docker daemon's start and stop, in the systemd > unit file there is an option to control mount propagation for the Docker daemon > itself, called `MountFlags`. The value of this setting may cause Docker to not > see mount propagation changes made on the mount point. For example, if this value > is `slave`, you may not be able to use the `shared` or `rshared` propagation on > a volume. **--volume-driver**="" Container's volume driver. This driver creates volumes specified either from a Dockerfile's `VOLUME` instruction or from the `docker run -v` flag. See **docker-volume-create(1)** for full details. **--volumes-from**=[] Mount volumes from the specified container(s) **-w**, **--workdir**="" Working directory inside the container # EXAMPLES ## Specify isolation technology for container (--isolation) This option is useful in situations where you are running Docker containers on Windows. The `--isolation=` option sets a container's isolation technology. On Linux, the only supported is the `default` option which uses Linux namespaces. On Microsoft Windows, you can specify these values: * `default`: Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. * `process`: Namespace isolation only. * `hyperv`: Hyper-V hypervisor partition-based isolation. Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. # HISTORY August 2014, updated by Sven Dowideit September 2014, updated by Sven Dowideit November 2014, updated by Sven Dowideit docker-1.10.3/man/docker-daemon.8.md000066400000000000000000000466171267010174400170360ustar00rootroot00000000000000% DOCKER(8) Docker User Manuals % Shishir Mahajan % SEPTEMBER 2015 # NAME docker-daemon - Enable daemon mode # SYNOPSIS **docker daemon** [**--api-cors-header**=[=*API-CORS-HEADER*]] [**--authorization-plugin**[=*[]*]] [**-b**|**--bridge**[=*BRIDGE*]] [**--bip**[=*BIP*]] [**--cgroup-parent**[=*[]*]] [**--cluster-store**[=*[]*]] [**--cluster-advertise**[=*[]*]] [**--cluster-store-opt**[=*map[]*]] [**--config-file**[=*/etc/docker/daemon.json*]] [**-D**|**--debug**] [**--default-gateway**[=*DEFAULT-GATEWAY*]] [**--default-gateway-v6**[=*DEFAULT-GATEWAY-V6*]] [**--default-ulimit**[=*[]*]] [**--disable-legacy-registry**] [**--dns**[=*[]*]] [**--dns-opt**[=*[]*]] [**--dns-search**[=*[]*]] [**--exec-opt**[=*[]*]] [**--exec-root**[=*/var/run/docker*]] [**--fixed-cidr**[=*FIXED-CIDR*]] [**--fixed-cidr-v6**[=*FIXED-CIDR-V6*]] [**-G**|**--group**[=*docker*]] [**-g**|**--graph**[=*/var/lib/docker*]] [**-H**|**--host**[=*[]*]] [**--help**] [**--icc**[=*true*]] [**--insecure-registry**[=*[]*]] [**--ip**[=*0.0.0.0*]] [**--ip-forward**[=*true*]] [**--ip-masq**[=*true*]] [**--iptables**[=*true*]] [**--ipv6**] [**-l**|**--log-level**[=*info*]] [**--label**[=*[]*]] [**--log-driver**[=*json-file*]] [**--log-opt**[=*map[]*]] [**--mtu**[=*0*]] [**-p**|**--pidfile**[=*/var/run/docker.pid*]] [**--registry-mirror**[=*[]*]] [**-s**|**--storage-driver**[=*STORAGE-DRIVER*]] [**--selinux-enabled**] [**--storage-opt**[=*[]*]] [**--tls**] [**--tlscacert**[=*~/.docker/ca.pem*]] [**--tlscert**[=*~/.docker/cert.pem*]] [**--tlskey**[=*~/.docker/key.pem*]] [**--tlsverify**] [**--userland-proxy**[=*true*]] [**--userns-remap**[=*default*]] # DESCRIPTION **docker** has two distinct functions. It is used for starting the Docker daemon and to run the CLI (i.e., to command the daemon to manage images, containers etc.) So **docker** is both a server, as a daemon, and a client to the daemon, through the CLI. To run the Docker daemon you can specify **docker daemon**. You can check the daemon options using **docker daemon --help**. Daemon options should be specified after the **daemon** keyword in the following format. **docker daemon [OPTIONS]** # OPTIONS **--api-cors-header**="" Set CORS headers in the remote API. Default is cors disabled. Give urls like "http://foo, http://bar, ...". Give "*" to allow all. **--authorization-plugin**="" Set authorization plugins to load **-b**, **--bridge**="" Attach containers to a pre\-existing network bridge; use 'none' to disable container networking **--bip**="" Use the provided CIDR notation address for the dynamically created bridge (docker0); Mutually exclusive of \-b **--cgroup-parent**="" Set parent cgroup for all containers. Default is "/docker" for fs cgroup driver and "system.slice" for systemd cgroup driver. **--cluster-store**="" URL of the distributed storage backend **--cluster-advertise**="" Specifies the 'host:port' or `interface:port` combination that this particular daemon instance should use when advertising itself to the cluster. The daemon is reached through this value. **--cluster-store-opt**="" Specifies options for the Key/Value store. **--config-file**="/etc/docker/daemon.json" Specifies the JSON file path to load the configuration from. **-D**, **--debug**=*true*|*false* Enable debug mode. Default is false. **--default-gateway**="" IPv4 address of the container default gateway; this address must be part of the bridge subnet (which is defined by \-b or \--bip) **--default-gateway-v6**="" IPv6 address of the container default gateway **--default-ulimit**=[] Set default ulimits for containers. **--disable-legacy-registry**=*true*|*false* Do not contact legacy registries **--dns**="" Force Docker to use specific DNS servers **--dns-opt**="" DNS options to use. **--dns-search**=[] DNS search domains to use. **--exec-opt**=[] Set exec driver options. See EXEC DRIVER OPTIONS. **--exec-root**="" Path to use as the root of the Docker exec driver. Default is `/var/run/docker`. **--fixed-cidr**="" IPv4 subnet for fixed IPs (e.g., 10.20.0.0/16); this subnet must be nested in the bridge subnet (which is defined by \-b or \-\-bip) **--fixed-cidr-v6**="" IPv6 subnet for global IPv6 addresses (e.g., 2a00:1450::/64) **-G**, **--group**="" Group to assign the unix socket specified by -H when running in daemon mode. use '' (the empty string) to disable setting of a group. Default is `docker`. **-g**, **--graph**="" Path to use as the root of the Docker runtime. Default is `/var/lib/docker`. **-H**, **--host**=[*unix:///var/run/docker.sock*]: tcp://[host:port] to bind or unix://[/path/to/socket] to use. The socket(s) to bind to in daemon mode specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. **--help** Print usage statement **--icc**=*true*|*false* Allow unrestricted inter\-container and Docker daemon host communication. If disabled, containers can still be linked together using the **--link** option (see **docker-run(1)**). Default is true. **--insecure-registry**=[] Enable insecure registry communication, i.e., enable un-encrypted and/or untrusted communication. List of insecure registries can contain an element with CIDR notation to specify a whole subnet. Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. Enabling `--insecure-registry` is useful when running a local registry. However, because its use creates security vulnerabilities it should ONLY be enabled for testing purposes. For increased security, users should add their CA to their system's list of trusted CAs instead of using `--insecure-registry`. **--ip**="" Default IP address to use when binding container ports. Default is `0.0.0.0`. **--ip-forward**=*true*|*false* Enables IP forwarding on the Docker host. The default is `true`. This flag interacts with the IP forwarding setting on your host system's kernel. If your system has IP forwarding disabled, this setting enables it. If your system has IP forwarding enabled, setting this flag to `--ip-forward=false` has no effect. This setting will also enable IPv6 forwarding if you have both `--ip-forward=true` and `--fixed-cidr-v6` set. Note that this may reject Router Advertisements and interfere with the host's existing IPv6 configuration. For more information, please consult the documentation about "Advanced Networking - IPv6". **--ip-masq**=*true*|*false* Enable IP masquerading for bridge's IP range. Default is true. **--iptables**=*true*|*false* Enable Docker's addition of iptables rules. Default is true. **--ipv6**=*true*|*false* Enable IPv6 support. Default is false. Docker will create an IPv6-enabled bridge with address fe80::1 which will allow you to create IPv6-enabled containers. Use together with `--fixed-cidr-v6` to provide globally routable IPv6 addresses. IPv6 forwarding will be enabled if not used with `--ip-forward=false`. This may collide with your host's current IPv6 settings. For more information please consult the documentation about "Advanced Networking - IPv6". **-l**, **--log-level**="*debug*|*info*|*warn*|*error*|*fatal*" Set the logging level. Default is `info`. **--label**="[]" Set key=value labels to the daemon (displayed in `docker info`) **--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*none*" Default driver for container logs. Default is `json-file`. **Warning**: `docker logs` command works only for `json-file` logging driver. **--log-opt**=[] Logging driver specific options. **--mtu**=*0* Set the containers network mtu. Default is `0`. **-p**, **--pidfile**="" Path to use for daemon PID file. Default is `/var/run/docker.pid` **--registry-mirror**=*://* Prepend a registry mirror to be used for image pulls. May be specified multiple times. **-s**, **--storage-driver**="" Force the Docker runtime to use a specific storage driver. **--selinux-enabled**=*true*|*false* Enable selinux support. Default is false. SELinux does not presently support the overlay storage driver. **--storage-opt**=[] Set storage driver options. See STORAGE DRIVER OPTIONS. **--tls**=*true*|*false* Use TLS; implied by --tlsverify. Default is false. **--tlscacert**=*~/.docker/ca.pem* Trust certs signed only by this CA. **--tlscert**=*~/.docker/cert.pem* Path to TLS certificate file. **--tlskey**=*~/.docker/key.pem* Path to TLS key file. **--tlsverify**=*true*|*false* Use TLS and verify the remote (daemon: verify client, client: verify daemon). Default is false. **--userland-proxy**=*true*|*false* Rely on a userland proxy implementation for inter-container and outside-to-container loopback communications. Default is true. **--userns-remap**=*default*|*uid:gid*|*user:group*|*user*|*uid* Enable user namespaces for containers on the daemon. Specifying "default" will cause a new user and group to be created to handle UID and GID range remapping for the user namespace mappings used for contained processes. Specifying a user (or uid) and optionally a group (or gid) will cause the daemon to lookup the user and group's subordinate ID ranges for use as the user namespace mappings for contained processes. # STORAGE DRIVER OPTIONS Docker uses storage backends (known as "graphdrivers" in the Docker internals) to create writable containers from images. Many of these backends use operating system level technologies and can be configured. Specify options to the storage backend with **--storage-opt** flags. The only backend that currently takes options is *devicemapper*. Therefore use these flags with **-s=**devicemapper. Specifically for devicemapper, the default is a "loopback" model which requires no pre-configuration, but is extremely inefficient. Do not use it in production. To make the best use of Docker with the devicemapper backend, you must have a recent version of LVM. Use `lvm` to create a thin pool; for more information see `man lvmthin`. Then, use `--storage-opt dm.thinpooldev` to tell the Docker engine to use that pool for allocating images and container snapshots. Here is the list of *devicemapper* options: #### dm.thinpooldev Specifies a custom block storage device to use for the thin pool. If using a block device for device mapper storage, it is best to use `lvm` to create and manage the thin-pool volume. This volume is then handed to Docker to create snapshot volumes needed for images and containers. Managing the thin-pool outside of Docker makes for the most feature-rich method of having Docker utilize device mapper thin provisioning as the backing storage for Docker's containers. The highlights of the LVM-based thin-pool management feature include: automatic or interactive thin-pool resize support, dynamically changing thin-pool features, automatic thinp metadata checking when lvm activates the thin-pool, etc. Example use: `docker daemon --storage-opt dm.thinpooldev=/dev/mapper/thin-pool` #### dm.basesize Specifies the size to use when creating the base device, which limits the size of images and containers. The default value is 10G. Note, thin devices are inherently "sparse", so a 10G device which is mostly empty doesn't use 10 GB of space on the pool. However, the filesystem will use more space for base images the larger the device is. The base device size can be increased at daemon restart which will allow all future images and containers (based on those new images) to be of the new base device size. Example use: `docker daemon --storage-opt dm.basesize=50G` This will increase the base device size to 50G. The Docker daemon will throw an error if existing base device size is larger than 50G. A user can use this option to expand the base device size however shrinking is not permitted. This value affects the system-wide "base" empty filesystem that may already be initialized and inherited by pulled images. Typically, a change to this value requires additional steps to take effect: $ sudo service docker stop $ sudo rm -rf /var/lib/docker $ sudo service docker start Example use: `docker daemon --storage-opt dm.basesize=20G` #### dm.fs Specifies the filesystem type to use for the base device. The supported options are `ext4` and `xfs`. The default is `ext4`. Example use: `docker daemon --storage-opt dm.fs=xfs` #### dm.mkfsarg Specifies extra mkfs arguments to be used when creating the base device. Example use: `docker daemon --storage-opt "dm.mkfsarg=-O ^has_journal"` #### dm.mountopt Specifies extra mount options used when mounting the thin devices. Example use: `docker daemon --storage-opt dm.mountopt=nodiscard` #### dm.use_deferred_removal Enables use of deferred device removal if `libdm` and the kernel driver support the mechanism. Deferred device removal means that if device is busy when devices are being removed/deactivated, then a deferred removal is scheduled on device. And devices automatically go away when last user of the device exits. For example, when a container exits, its associated thin device is removed. If that device has leaked into some other mount namespace and can't be removed, the container exit still succeeds and this option causes the system to schedule the device for deferred removal. It does not wait in a loop trying to remove a busy device. Example use: `docker daemon --storage-opt dm.use_deferred_removal=true` #### dm.use_deferred_deletion Enables use of deferred device deletion for thin pool devices. By default, thin pool device deletion is synchronous. Before a container is deleted, the Docker daemon removes any associated devices. If the storage driver can not remove a device, the container deletion fails and daemon returns. `Error deleting container: Error response from daemon: Cannot destroy container` To avoid this failure, enable both deferred device deletion and deferred device removal on the daemon. `docker daemon --storage-opt dm.use_deferred_deletion=true --storage-opt dm.use_deferred_removal=true` With these two options enabled, if a device is busy when the driver is deleting a container, the driver marks the device as deleted. Later, when the device isn't in use, the driver deletes it. In general it should be safe to enable this option by default. It will help when unintentional leaking of mount point happens across multiple mount namespaces. #### dm.loopdatasize **Note**: This option configures devicemapper loopback, which should not be used in production. Specifies the size to use when creating the loopback file for the "data" device which is used for the thin pool. The default size is 100G. The file is sparse, so it will not initially take up this much space. Example use: `docker daemon --storage-opt dm.loopdatasize=200G` #### dm.loopmetadatasize **Note**: This option configures devicemapper loopback, which should not be used in production. Specifies the size to use when creating the loopback file for the "metadata" device which is used for the thin pool. The default size is 2G. The file is sparse, so it will not initially take up this much space. Example use: `docker daemon --storage-opt dm.loopmetadatasize=4G` #### dm.datadev (Deprecated, use `dm.thinpooldev`) Specifies a custom blockdevice to use for data for a Docker-managed thin pool. It is better to use `dm.thinpooldev` - see the documentation for it above for discussion of the advantages. #### dm.metadatadev (Deprecated, use `dm.thinpooldev`) Specifies a custom blockdevice to use for metadata for a Docker-managed thin pool. See `dm.datadev` for why this is deprecated. #### dm.blocksize Specifies a custom blocksize to use for the thin pool. The default blocksize is 64K. Example use: `docker daemon --storage-opt dm.blocksize=512K` #### dm.blkdiscard Enables or disables the use of `blkdiscard` when removing devicemapper devices. This is disabled by default due to the additional latency, but as a special case with loopback devices it will be enabled, in order to re-sparsify the loopback file on image/container removal. Disabling this on loopback can lead to *much* faster container removal times, but it also prevents the space used in `/var/lib/docker` directory from being returned to the system for other use when containers are removed. Example use: `docker daemon --storage-opt dm.blkdiscard=false` #### dm.override_udev_sync_check By default, the devicemapper backend attempts to synchronize with the `udev` device manager for the Linux kernel. This option allows disabling that synchronization, to continue even though the configuration may be buggy. To view the `udev` sync support of a Docker daemon that is using the `devicemapper` driver, run: $ docker info [...] Udev Sync Supported: true [...] When `udev` sync support is `true`, then `devicemapper` and `udev` can coordinate the activation and deactivation of devices for containers. When `udev` sync support is `false`, a race condition occurs between the `devicemapper` and `udev` during create and cleanup. The race condition results in errors and failures. (For information on these failures, see [docker#4036](https://github.com/docker/docker/issues/4036)) To allow the `docker` daemon to start, regardless of whether `udev` sync is `false`, set `dm.override_udev_sync_check` to true: $ docker daemon --storage-opt dm.override_udev_sync_check=true When this value is `true`, the driver continues and simply warns you the errors are happening. **Note**: The ideal is to pursue a `docker` daemon and environment that does support synchronizing with `udev`. For further discussion on this topic, see [docker#4036](https://github.com/docker/docker/issues/4036). Otherwise, set this flag for migrating existing Docker daemons to a daemon with a supported environment. # CLUSTER STORE OPTIONS The daemon uses libkv to advertise the node within the cluster. Some Key/Value backends support mutual TLS, and the client TLS settings used by the daemon can be configured using the **--cluster-store-opt** flag, specifying the paths to PEM encoded files. #### kv.cacertfile Specifies the path to a local file with PEM encoded CA certificates to trust #### kv.certfile Specifies the path to a local file with a PEM encoded certificate. This certificate is used as the client cert for communication with the Key/Value store. #### kv.keyfile Specifies the path to a local file with a PEM encoded private key. This private key is used as the client key for communication with the Key/Value store. # Access authorization Docker's access authorization can be extended by authorization plugins that your organization can purchase or build themselves. You can install one or more authorization plugins when you start the Docker `daemon` using the `--authorization-plugin=PLUGIN_ID` option. ```bash docker daemon --authorization-plugin=plugin1 --authorization-plugin=plugin2,... ``` The `PLUGIN_ID` value is either the plugin's name or a path to its specification file. The plugin's implementation determines whether you can specify a name or path. Consult with your Docker administrator to get information about the plugins available to you. Once a plugin is installed, requests made to the `daemon` through the command line or Docker's remote API are allowed or denied by the plugin. If you have multiple plugins installed, at least one must allow the request for it to complete. For information about how to create an authorization plugin, see [authorization plugin](https://docs.docker.com/engine/extend/authorization.md) section in the Docker extend section of this documentation. # HISTORY Sept 2015, Originally compiled by Shishir Mahajan based on docker.com source material and internal work. docker-1.10.3/man/docker-diff.1.md000066400000000000000000000021411267010174400164540ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-diff - Inspect changes on a container's filesystem # SYNOPSIS **docker diff** [**--help**] CONTAINER # DESCRIPTION Inspect changes on a container's filesystem. You can use the full or shortened container ID or the container name set using **docker run --name** option. # OPTIONS **--help** Print usage statement # EXAMPLES Inspect the changes to on a nginx container: # docker diff 1fdfd1f54c1b C /dev C /dev/console C /dev/core C /dev/stdout C /dev/fd C /dev/ptmx C /dev/stderr C /dev/stdin C /run A /run/nginx.pid C /var/lib/nginx/tmp A /var/lib/nginx/tmp/client_body A /var/lib/nginx/tmp/fastcgi A /var/lib/nginx/tmp/proxy A /var/lib/nginx/tmp/scgi A /var/lib/nginx/tmp/uwsgi C /var/log/nginx A /var/log/nginx/access.log A /var/log/nginx/error.log # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit docker-1.10.3/man/docker-events.1.md000066400000000000000000000100631267010174400170520ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-events - Get real time events from the server # SYNOPSIS **docker events** [**--help**] [**-f**|**--filter**[=*[]*]] [**--since**[=*SINCE*]] [**--until**[=*UNTIL*]] # DESCRIPTION Get event information from the Docker daemon. Information can include historical information and real-time information. Docker containers will report the following events: attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause and Docker images will report: delete, import, pull, push, tag, untag # OPTIONS **--help** Print usage statement **-f**, **--filter**=[] Provide filter values (i.e., 'event=stop') **--since**="" Show all events created since timestamp **--until**="" Stream events until this timestamp The `--since` and `--until` parameters can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the client machine’s time. If you do not provide the --since option, the command returns only new and/or live events. Supported formats for date formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, `2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local timezone on the client will be used if you do not provide either a `Z` or a `+-00:00` timezone offset at the end of the timestamp. When providing Unix timestamps enter seconds[.nanoseconds], where seconds is the number of seconds that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a fraction of a second no more than nine digits long. # EXAMPLES ## Listening for Docker events After running docker events a container 786d698004576 is started and stopped (The container name has been shortened in the output below): # docker events 2015-01-28T20:21:31.000000000-08:00 59211849bc10: (from whenry/testimage:latest) start 2015-01-28T20:21:31.000000000-08:00 59211849bc10: (from whenry/testimage:latest) die 2015-01-28T20:21:32.000000000-08:00 59211849bc10: (from whenry/testimage:latest) stop ## Listening for events since a given date Again the output container IDs have been shortened for the purposes of this document: # docker events --since '2015-01-28' 2015-01-28T20:25:38.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) create 2015-01-28T20:25:38.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) start 2015-01-28T20:25:39.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) create 2015-01-28T20:25:39.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) start 2015-01-28T20:25:40.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) die 2015-01-28T20:25:42.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) stop 2015-01-28T20:25:45.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) start 2015-01-28T20:25:45.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) die 2015-01-28T20:25:46.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) stop The following example outputs all events that were generated in the last 3 minutes, relative to the current time on the client machine: # docker events --since '3m' 2015-05-12T11:51:30.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die 2015-05-12T15:52:12.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop 2015-05-12T15:53:45.999999999Z07:00 7805c1d35632: (from redis:2.8) die 2015-05-12T15:54:03.999999999Z07:00 7805c1d35632: (from redis:2.8) stop If you do not provide the --since option, the command returns only new and/or live events. # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit June 2015, updated by Brian Goff October 2015, updated by Mike Brown docker-1.10.3/man/docker-exec.1.md000066400000000000000000000040521267010174400164730ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-exec - Run a command in a running container # SYNOPSIS **docker exec** [**-d**|**--detach**] [**--detach-keys**[=*[]*]] [**--help**] [**-i**|**--interactive**] [**--privileged**] [**-t**|**--tty**] [**-u**|**--user**[=*USER*]] CONTAINER COMMAND [ARG...] # DESCRIPTION Run a process in a running container. The command started using `docker exec` will only run while the container's primary process (`PID 1`) is running, and will not be restarted if the container is restarted. If the container is paused, then the `docker exec` command will wait until the container is unpaused, and then run # OPTIONS **-d**, **--detach**=*true*|*false* Detached mode: run command in the background. The default is *false*. **--detach-keys**="" Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. **--help** Print usage statement **-i**, **--interactive**=*true*|*false* Keep STDIN open even if not attached. The default is *false*. **--privileged**=*true*|*false* Give the process extended [Linux capabilities](http://man7.org/linux/man-pages/man7/capabilities.7.html) when running in a container. The default is *false*. Without this flag, the process run by `docker exec` in a running container has the same capabilities as the container, which may be limited. Set `--privileged` to give all capabilities to the process. **-t**, **--tty**=*true*|*false* Allocate a pseudo-TTY. The default is *false*. **-u**, **--user**="" Sets the username or UID used and optionally the groupname or GID for the specified command. The followings examples are all valid: --user [user | user:group | uid | uid:gid | user:gid | uid:group ] Without this argument the command will be run as root in the container. The **-t** option is incompatible with a redirection of the docker client standard input. # HISTORY November 2014, updated by Sven Dowideit docker-1.10.3/man/docker-export.1.md000066400000000000000000000024701267010174400170720ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-export - Export the contents of a container's filesystem as a tar archive # SYNOPSIS **docker export** [**--help**] [**-o**|**--output**[=*""*]] CONTAINER # DESCRIPTION Export the contents of a container's filesystem using the full or shortened container ID or container name. The output is exported to STDOUT and can be redirected to a tar file. Stream to a file instead of STDOUT by using **-o**. # OPTIONS **--help** Print usage statement **-o**, **--output**="" Write to a file, instead of STDOUT # EXAMPLES Export the contents of the container called angry_bell to a tar file called angry_bell.tar: # docker export angry_bell > angry_bell.tar # docker export --output=angry_bell-latest.tar angry_bell # ls -sh angry_bell.tar 321M angry_bell.tar # ls -sh angry_bell-latest.tar 321M angry_bell-latest.tar # See also **docker-import(1)** to create an empty filesystem image and import the contents of the tarball into it, then optionally tag it. # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit January 2015, updated by Joseph Kern (josephakern at gmail dot com) docker-1.10.3/man/docker-history.1.md000066400000000000000000000041121267010174400172450ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-history - Show the history of an image # SYNOPSIS **docker history** [**--help**] [**-H**|**--human**[=*true*]] [**--no-trunc**] [**-q**|**--quiet**] IMAGE # DESCRIPTION Show the history of when and how an image was created. # OPTIONS **--help** Print usage statement **-H**, **--human**=*true*|*false* Print sizes and dates in human readable format. The default is *true*. **--no-trunc**=*true*|*false* Don't truncate output. The default is *false*. **-q**, **--quiet**=*true*|*false* Only show numeric IDs. The default is *false*. # EXAMPLES $ docker history fedora IMAGE CREATED CREATED BY SIZE COMMENT 105182bb5e8b 5 days ago /bin/sh -c #(nop) ADD file:71356d2ad59aa3119d 372.7 MB 73bd853d2ea5 13 days ago /bin/sh -c #(nop) MAINTAINER Lokesh Mandvekar 0 B 511136ea3c5a 10 months ago 0 B Imported from - ## Display comments in the image history The `docker commit` command has a **-m** flag for adding comments to the image. These comments will be displayed in the image history. $ sudo docker history docker:scm IMAGE CREATED CREATED BY SIZE COMMENT 2ac9d1098bf1 3 months ago /bin/bash 241.4 MB Added Apache to Fedora base image 88b42ffd1f7c 5 months ago /bin/sh -c #(nop) ADD file:1fd8d7f9f6557cafc7 373.7 MB c69cab00d6ef 5 months ago /bin/sh -c #(nop) MAINTAINER Lokesh Mandvekar 0 B 511136ea3c5a 19 months ago 0 B Imported from - # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit docker-1.10.3/man/docker-images.1.md000066400000000000000000000074221267010174400170200ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-images - List images # SYNOPSIS **docker images** [**--help**] [**-a**|**--all**] [**--digests**] [**-f**|**--filter**[=*[]*]] [**--no-trunc**] [**-q**|**--quiet**] [REPOSITORY[:TAG]] # DESCRIPTION This command lists the images stored in the local Docker repository. By default, intermediate images, used during builds, are not listed. Some of the output, e.g., image ID, is truncated, for space reasons. However the truncated image ID, and often the first few characters, are enough to be used in other Docker commands that use the image ID. The output includes repository, tag, image ID, date created and the virtual size. The title REPOSITORY for the first title may seem confusing. It is essentially the image name. However, because you can tag a specific image, and multiple tags (image instances) can be associated with a single name, the name is really a repository for all tagged images of the same name. For example consider an image called fedora. It may be tagged with 18, 19, or 20, etc. to manage different versions. # OPTIONS **-a**, **--all**=*true*|*false* Show all images (by default filter out the intermediate image layers). The default is *false*. **--digests**=*true*|*false* Show image digests. The default is *false*. **-f**, **--filter**=[] Filters the output. The dangling=true filter finds unused images. While label=com.foo=amd64 filters for images with a com.foo value of amd64. The label=com.foo filter finds images with the label com.foo of any value. **--format**="*TEMPLATE*" Pretty-print containers using a Go template. Valid placeholders: .ID - Image ID .Repository - Image repository .Tag - Image tag .Digest - Image digest .CreatedSince - Elapsed time since the image was created. .CreatedAt - Time when the image was created.. .Size - Image disk size. **--help** Print usage statement **--no-trunc**=*true*|*false* Don't truncate output. The default is *false*. **-q**, **--quiet**=*true*|*false* Only show numeric IDs. The default is *false*. # EXAMPLES ## Listing the images To list the images in a local repository (not the registry) run: docker images The list will contain the image repository name, a tag for the image, and an image ID, when it was created and its virtual size. Columns: REPOSITORY, TAG, IMAGE ID, CREATED, and SIZE. The `docker images` command takes an optional `[REPOSITORY[:TAG]]` argument that restricts the list to images that match the argument. If you specify `REPOSITORY`but no `TAG`, the `docker images` command lists all images in the given repository. docker images java The `[REPOSITORY[:TAG]]` value must be an "exact match". This means that, for example, `docker images jav` does not match the image `java`. If both `REPOSITORY` and `TAG` are provided, only images matching that repository and tag are listed. To find all local images in the "java" repository with tag "8" you can use: docker images java:8 To get a verbose list of images which contains all the intermediate images used in builds use **-a**: docker images -a Previously, the docker images command supported the --tree and --dot arguments, which displayed different visualizations of the image data. Docker core removed this functionality in the 1.7 version. If you liked this functionality, you can still find it in the third-party dockviz tool: https://github.com/justone/dockviz. ## Listing only the shortened image IDs Listing just the shortened image IDs. This can be useful for some automated tools. docker images -q # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit docker-1.10.3/man/docker-import.1.md000066400000000000000000000040611267010174400170610ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-import - Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it. # SYNOPSIS **docker import** [**-c**|**--change**[=*[]*]] [**-m**|**--message**[=*MESSAGE*]] [**--help**] file|URL|**-**[REPOSITORY[:TAG]] # OPTIONS **-c**, **--change**=[] Apply specified Dockerfile instructions while importing the image Supported Dockerfile instructions: `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` **--help** Print usage statement **-m**, **--message**="" Set commit message for imported image # DESCRIPTION Create a new filesystem image from the contents of a tarball (`.tar`, `.tar.gz`, `.tgz`, `.bzip`, `.tar.xz`, `.txz`) into it, then optionally tag it. # EXAMPLES ## Import from a remote location # docker import http://example.com/exampleimage.tgz example/imagerepo ## Import from a local file Import to docker via pipe and stdin: # cat exampleimage.tgz | docker import - example/imagelocal Import with a commit message # cat exampleimage.tgz | docker import --message "New image imported from tarball" - exampleimagelocal:new Import to a Docker image from a local file. # docker import /path/to/exampleimage.tgz ## Import from a local file and tag Import to docker via pipe and stdin: # cat exampleimageV2.tgz | docker import - example/imagelocal:V-2.0 ## Import from a local directory # tar -c . | docker import - exampleimagedir ## Apply specified Dockerfile instructions while importing the image This example sets the docker image ENV variable DEBUG to true by default. # tar -c . | docker import -c="ENV DEBUG true" - exampleimagedir # See also **docker-export(1)** to export the contents of a filesystem as a tar archive to STDOUT. # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit docker-1.10.3/man/docker-info.1.md000066400000000000000000000030641267010174400165040ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-info - Display system-wide information # SYNOPSIS **docker info** [**--help**] # DESCRIPTION This command displays system wide information regarding the Docker installation. Information displayed includes the number of containers and images, pool name, data file, metadata file, data space used, total data space, metadata space used , total metadata space, execution driver, and the kernel version. The data file is where the images are stored and the metadata file is where the meta data regarding those images are stored. When run for the first time Docker allocates a certain amount of data space and meta data space from the space available on the volume where `/var/lib/docker` is mounted. # OPTIONS **--help** Print usage statement # EXAMPLES ## Display Docker system information Here is a sample output: # docker info Containers: 14 Running: 3 Paused: 1 Stopped: 10 Images: 52 Server Version: 1.9.0 Storage Driver: aufs Root Dir: /var/lib/docker/aufs Dirs: 80 Execution Driver: native-0.2 Logging Driver: json-file Plugins: Volume: local Network: bridge null host Kernel Version: 3.13.0-24-generic Operating System: Ubuntu 14.04 LTS OSType: linux Architecture: x86_64 CPUs: 1 Total Memory: 2 GiB # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit docker-1.10.3/man/docker-inspect.1.md000066400000000000000000000231211267010174400172120ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-inspect - Return low-level information on a container or image # SYNOPSIS **docker inspect** [**--help**] [**-f**|**--format**[=*FORMAT*]] [**-s**|**--size**] [**--type**=*container*|*image*] CONTAINER|IMAGE [CONTAINER|IMAGE...] # DESCRIPTION This displays all the information available in Docker for a given container or image. By default, this will render all results in a JSON array. If the container and image have the same name, this will return container JSON for unspecified type. If a format is specified, the given template will be executed for each result. # OPTIONS **--help** Print usage statement **-f**, **--format**="" Format the output using the given Go template. **-s**, **--size** Display total file sizes if the type is container. **--type**="*container*|*image*" Return JSON for specified type, permissible values are "image" or "container" # EXAMPLES Get information about an image when image name conflicts with the container name, e.g. both image and container are named rhel7: $ docker inspect --type=image rhel7 [ { "Id": "fe01a428b9d9de35d29531e9994157978e8c48fa693e1bf1d221dffbbb67b170", "Parent": "10acc31def5d6f249b548e01e8ffbaccfd61af0240c17315a7ad393d022c5ca2", .... } ] ## Getting information on a container To get information on a container use its ID or instance name: $ docker inspect d2cc496561d6 [{ "Id": "d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47", "Created": "2015-06-08T16:18:02.505155285Z", "Path": "bash", "Args": [], "State": { "Running": false, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 0, "ExitCode": 0, "Error": "", "StartedAt": "2015-06-08T16:18:03.643865954Z", "FinishedAt": "2015-06-08T16:57:06.448552862Z" }, "Image": "ded7cd95e059788f2586a51c275a4f151653779d6a7f4dad77c2bd34601d94e4", "NetworkSettings": { "Bridge": "", "SandboxID": "6b4851d1903e16dd6a567bd526553a86664361f31036eaaa2f8454d6f4611f6f", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": {}, "SandboxKey": "/var/run/docker/netns/6b4851d1903e", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:12:00:02", "Networks": { "bridge": { "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:12:00:02" } } }, "ResolvConfPath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/resolv.conf", "HostnamePath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/hostname", "HostsPath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/hosts", "LogPath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47-json.log", "Name": "/adoring_wozniak", "RestartCount": 0, "Driver": "devicemapper", "ExecDriver": "native-0.2", "MountLabel": "", "ProcessLabel": "", "Mounts": [ { "Source": "/data", "Destination": "/data", "Mode": "ro,Z", "RW": false "Propagation": "" } ], "AppArmorProfile": "", "ExecIDs": null, "HostConfig": { "Binds": null, "ContainerIDFile": "", "Memory": 0, "MemorySwap": 0, "CpuShares": 0, "CpuPeriod": 0, "CpusetCpus": "", "CpusetMems": "", "CpuQuota": 0, "BlkioWeight": 0, "OomKillDisable": false, "Privileged": false, "PortBindings": {}, "Links": null, "PublishAllPorts": false, "Dns": null, "DnsSearch": null, "DnsOptions": null, "ExtraHosts": null, "VolumesFrom": null, "Devices": [], "NetworkMode": "bridge", "IpcMode": "", "PidMode": "", "UTSMode": "", "CapAdd": null, "CapDrop": null, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "SecurityOpt": null, "ReadonlyRootfs": false, "Ulimits": null, "LogConfig": { "Type": "json-file", "Config": {} }, "CgroupParent": "" }, "GraphDriver": { "Name": "devicemapper", "Data": { "DeviceId": "5", "DeviceName": "docker-253:1-2763198-d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47", "DeviceSize": "171798691840" } }, "Config": { "Hostname": "d2cc496561d6", "Domainname": "", "User": "", "AttachStdin": true, "AttachStdout": true, "AttachStderr": true, "ExposedPorts": null, "Tty": true, "OpenStdin": true, "StdinOnce": true, "Env": null, "Cmd": [ "bash" ], "Image": "fedora", "Volumes": null, "VolumeDriver": "", "WorkingDir": "", "Entrypoint": null, "NetworkDisabled": false, "MacAddress": "", "OnBuild": null, "Labels": {}, "Memory": 0, "MemorySwap": 0, "CpuShares": 0, "Cpuset": "", "StopSignal": "SIGTERM" } } ] ## Getting the IP address of a container instance To get the IP address of a container use: $ docker inspect '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' d2cc496561d6 172.17.0.2 ## Listing all port bindings One can loop over arrays and maps in the results to produce simple text output: $ docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} \ {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' d2cc496561d6 80/tcp -> 80 You can get more information about how to write a Go template from: https://golang.org/pkg/text/template/. ## Getting size information on an container $ docker inspect -s d2cc496561d6 [ { .... "SizeRw": 0, "SizeRootFs": 972, .... } ] ## Getting information on an image Use an image's ID or name (e.g., repository/name[:tag]) to get information about the image: $ docker inspect ded7cd95e059 [{ "Id": "ded7cd95e059788f2586a51c275a4f151653779d6a7f4dad77c2bd34601d94e4", "Parent": "48ecf305d2cf7046c1f5f8fcbcd4994403173441d4a7f125b1bb0ceead9de731", "Comment": "", "Created": "2015-05-27T16:58:22.937503085Z", "Container": "76cf7f67d83a7a047454b33007d03e32a8f474ad332c3a03c94537edd22b312b", "ContainerConfig": { "Hostname": "76cf7f67d83a", "Domainname": "", "User": "", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": null, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": null, "Cmd": [ "/bin/sh", "-c", "#(nop) ADD file:4be46382bcf2b095fcb9fe8334206b584eff60bb3fad8178cbd97697fcb2ea83 in /" ], "Image": "48ecf305d2cf7046c1f5f8fcbcd4994403173441d4a7f125b1bb0ceead9de731", "Volumes": null, "VolumeDriver": "", "WorkingDir": "", "Entrypoint": null, "NetworkDisabled": false, "MacAddress": "", "OnBuild": null, "Labels": {} }, "DockerVersion": "1.6.0", "Author": "Lokesh Mandvekar \u003clsm5@fedoraproject.org\u003e", "Config": { "Hostname": "76cf7f67d83a", "Domainname": "", "User": "", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": null, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": null, "Cmd": null, "Image": "48ecf305d2cf7046c1f5f8fcbcd4994403173441d4a7f125b1bb0ceead9de731", "Volumes": null, "VolumeDriver": "", "WorkingDir": "", "Entrypoint": null, "NetworkDisabled": false, "MacAddress": "", "OnBuild": null, "Labels": {} }, "Architecture": "amd64", "Os": "linux", "Size": 186507296, "VirtualSize": 186507296, "GraphDriver": { "Name": "devicemapper", "Data": { "DeviceId": "3", "DeviceName": "docker-253:1-2763198-ded7cd95e059788f2586a51c275a4f151653779d6a7f4dad77c2bd34601d94e4", "DeviceSize": "171798691840" } } } ] # HISTORY April 2014, originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit April 2015, updated by Qiang Huang October 2015, updated by Sally O'Malley docker-1.10.3/man/docker-kill.1.md000066400000000000000000000012701267010174400165010ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-kill - Kill a running container using SIGKILL or a specified signal # SYNOPSIS **docker kill** [**--help**] [**-s**|**--signal**[=*"KILL"*]] CONTAINER [CONTAINER...] # DESCRIPTION The main process inside each container specified will be sent SIGKILL, or any signal specified with option --signal. # OPTIONS **--help** Print usage statement **-s**, **--signal**="*KILL*" Signal to send to the container # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit docker-1.10.3/man/docker-load.1.md000066400000000000000000000032061267010174400164660ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-load - Load an image from a tar archive or STDIN # SYNOPSIS **docker load** [**--help**] [**-i**|**--input**[=*INPUT*]] # DESCRIPTION Loads a tarred repository from a file or the standard input stream. Restores both images and tags. # OPTIONS **--help** Print usage statement **-i**, **--input**="" Read from a tar archive file, instead of STDIN. The tarball may be compressed with gzip, bzip, or xz. # EXAMPLES $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE busybox latest 769b9341d937 7 weeks ago 2.489 MB $ docker load --input fedora.tar $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE busybox latest 769b9341d937 7 weeks ago 2.489 MB fedora rawhide 0d20aec6529d 7 weeks ago 387 MB fedora 20 58394af37342 7 weeks ago 385.5 MB fedora heisenbug 58394af37342 7 weeks ago 385.5 MB fedora latest 58394af37342 7 weeks ago 385.5 MB # See also **docker-save(1)** to save an image(s) to a tar archive (streamed to STDOUT by default). # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit July 2015 update by Mary Anthony docker-1.10.3/man/docker-login.1.md000066400000000000000000000040031267010174400166530ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-login - Register or log in to a Docker registry. # SYNOPSIS **docker login** [**-e**|**--email**[=*EMAIL*]] [**--help**] [**-p**|**--password**[=*PASSWORD*]] [**-u**|**--username**[=*USERNAME*]] [SERVER] # DESCRIPTION Register or log in to a Docker Registry located on the specified `SERVER`. You can specify a URL or a `hostname` for the `SERVER` value. If you do not specify a `SERVER`, the command uses Docker's public registry located at `https://registry-1.docker.io/` by default. To get a username/password for Docker's public registry, create an account on Docker Hub. `docker login` requires user to use `sudo` or be `root`, except when: 1. connecting to a remote daemon, such as a `docker-machine` provisioned `docker engine`. 2. user is added to the `docker` group. This will impact the security of your system; the `docker` group is `root` equivalent. See [Docker Daemon Attack Surface](https://docs.docker.com/articles/security/#docker-daemon-attack-surface) for details. You can log into any public or private repository for which you have credentials. When you log in, the command stores encoded credentials in `$HOME/.docker/config.json` on Linux or `%USERPROFILE%/.docker/config.json` on Windows. > **Note**: When running `sudo docker login` credentials are saved in `/root/.docker/config.json`. > # OPTIONS **-e**, **--email**="" Email **--help** Print usage statement **-p**, **--password**="" Password **-u**, **--username**="" Username # EXAMPLES ## Login to a registry on your localhost # docker login localhost:8080 # See also **docker-logout(1)** to log out from a Docker registry. # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit April 2015, updated by Mary Anthony for v2 November 2015, updated by Sally O'Malley docker-1.10.3/man/docker-logout.1.md000066400000000000000000000015741267010174400170660ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-logout - Log out from a Docker registry. # SYNOPSIS **docker logout** [SERVER] # DESCRIPTION Log out of a Docker Registry located on the specified `SERVER`. You can specify a URL or a `hostname` for the `SERVER` value. If you do not specify a `SERVER`, the command attempts to log you out of Docker's public registry located at `https://registry-1.docker.io/` by default. # OPTIONS There are no available options. # EXAMPLES ## Log out from a registry on your localhost # docker logout localhost:8080 # See also **docker-login(1)** to register or log in to a Docker registry server. # HISTORY June 2014, Originally compiled by Daniel, Dao Quang Minh (daniel at nitrous dot io) July 2014, updated by Sven Dowideit April 2015, updated by Mary Anthony for v2 docker-1.10.3/man/docker-logs.1.md000066400000000000000000000047071267010174400165220ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-logs - Fetch the logs of a container # SYNOPSIS **docker logs** [**-f**|**--follow**] [**--help**] [**--since**[=*SINCE*]] [**-t**|**--timestamps**] [**--tail**[=*"all"*]] CONTAINER # DESCRIPTION The **docker logs** command batch-retrieves whatever logs are present for a container at the time of execution. This does not guarantee execution order when combined with a docker run (i.e., your run may not have generated any logs at the time you execute docker logs). The **docker logs --follow** command combines commands **docker logs** and **docker attach**. It will first return all logs from the beginning and then continue streaming new output from the container’s stdout and stderr. **Warning**: This command works only for the **json-file** or **journald** logging drivers. # OPTIONS **--help** Print usage statement **-f**, **--follow**=*true*|*false* Follow log output. The default is *false*. **--since**="" Show logs since timestamp **-t**, **--timestamps**=*true*|*false* Show timestamps. The default is *false*. **--tail**="*all*" Output the specified number of lines at the end of logs (defaults to all logs) The `--since` option can be Unix timestamps, date formated timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the client machine’s time. Supported formats for date formated time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, `2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local timezone on the client will be used if you do not provide either a `Z` or a `+-00:00` timezone offset at the end of the timestamp. When providing Unix timestamps enter seconds[.nanoseconds], where seconds is the number of seconds that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a fraction of a second no more than nine digits long. You can combine the `--since` option with either or both of the `--follow` or `--tail` options. # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit July 2014, updated by Sven Dowideit April 2015, updated by Ahmet Alp Balkan October 2015, updated by Mike Brown docker-1.10.3/man/docker-network-connect.1.md000066400000000000000000000045221267010174400206710ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % OCT 2015 # NAME docker-network-connect - connect a container to a network # SYNOPSIS **docker network connect** [**--help**] NETWORK CONTAINER # DESCRIPTION Connects a container to a network. You can connect a container by name or by ID. Once connected, the container can communicate with other containers in the same network. ```bash $ docker network connect multi-host-network container1 ``` You can also use the `docker run --net=` option to start a container and immediately connect it to a network. ```bash $ docker run -itd --net=multi-host-network --ip 172.20.88.22 --ip6 2001:db8::8822 busybox ``` You can pause, restart, and stop containers that are connected to a network. Paused containers remain connected and can be revealed by a `network inspect`. When the container is stopped, it does not appear on the network until you restart it. If specified, the container's IP address(es) is reapplied when a stopped container is restarted. If the IP address is no longer available, the container fails to start. One way to guarantee that the IP address is available is to specify an `--ip-range` when creating the network, and choose the static IP address(es) from outside that range. This ensures that the IP address is not given to another container while this container is not on the network. ```bash $ docker network create --subnet 172.20.0.0/16 --ip-range 172.20.240.0/20 multi-host-network ``` ```bash $ docker network connect --ip 172.20.128.2 multi-host-network container2 ``` To verify the container is connected, use the `docker network inspect` command. Use `docker network disconnect` to remove a container from the network. Once connected in network, containers can communicate using only another container's IP address or name. For `overlay` networks or custom plugins that support multi-host connectivity, containers connected to the same multi-host network but launched from different Engines can also communicate in this way. You can connect a container to one or more networks. The networks need not be the same type. For example, you can connect a single container bridge and overlay networks. # OPTIONS **NETWORK** Specify network name **CONTAINER** Specify container name **--help** Print usage statement # HISTORY OCT 2015, created by Mary Anthony docker-1.10.3/man/docker-network-create.1.md000066400000000000000000000132141267010174400205010ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % OCT 2015 # NAME docker-network-create - create a new network # SYNOPSIS **docker network create** [**--aux-address**=*map[]*] [**-d**|**--driver**=*DRIVER*] [**--gateway**=*[]*] [**--help**] [**--internal**] [**--ip-range**=*[]*] [**--ipam-driver**=*default*] [**--ipam-opt**=*map[]*] [**-o**|**--opt**=*map[]*] [**--subnet**=*[]*] NETWORK-NAME # DESCRIPTION Creates a new network. The `DRIVER` accepts `bridge` or `overlay` which are the built-in network drivers. If you have installed a third party or your own custom network driver you can specify that `DRIVER` here also. If you don't specify the `--driver` option, the command automatically creates a `bridge` network for you. When you install Docker Engine it creates a `bridge` network automatically. This network corresponds to the `docker0` bridge that Engine has traditionally relied on. When launch a new container with `docker run` it automatically connects to this bridge network. You cannot remove this default bridge network but you can create new ones using the `network create` command. ```bash $ docker network create -d bridge my-bridge-network ``` Bridge networks are isolated networks on a single Engine installation. If you want to create a network that spans multiple Docker hosts each running an Engine, you must create an `overlay` network. Unlike `bridge` networks overlay networks require some pre-existing conditions before you can create one. These conditions are: * Access to a key-value store. Engine supports Consul, Etcd, and Zookeeper (Distributed store) key-value stores. * A cluster of hosts with connectivity to the key-value store. * A properly configured Engine `daemon` on each host in the cluster. The `docker daemon` options that support the `overlay` network are: * `--cluster-store` * `--cluster-store-opt` * `--cluster-advertise` To read more about these options and how to configure them, see ["*Get started with multi-host network*"](https://www.docker.com/engine/userguide/networking/get-started-overlay.md). It is also a good idea, though not required, that you install Docker Swarm on to manage the cluster that makes up your network. Swarm provides sophisticated discovery and server management that can assist your implementation. Once you have prepared the `overlay` network prerequisites you simply choose a Docker host in the cluster and issue the following to create the network: ```bash $ docker network create -d overlay my-multihost-network ``` Network names must be unique. The Docker daemon attempts to identify naming conflicts but this is not guaranteed. It is the user's responsibility to avoid name conflicts. ## Connect containers When you start a container use the `--net` flag to connect it to a network. This adds the `busybox` container to the `mynet` network. ```bash $ docker run -itd --net=mynet busybox ``` If you want to add a container to a network after the container is already running use the `docker network connect` subcommand. You can connect multiple containers to the same network. Once connected, the containers can communicate using only another container's IP address or name. For `overlay` networks or custom plugins that support multi-host connectivity, containers connected to the same multi-host network but launched from different Engines can also communicate in this way. You can disconnect a container from a network using the `docker network disconnect` command. ## Specifying advanced options When you create a network, Engine creates a non-overlapping subnetwork for the network by default. This subnetwork is not a subdivision of an existing network. It is purely for ip-addressing purposes. You can override this default and specify subnetwork values directly using the the `--subnet` option. On a `bridge` network you can only create a single subnet: ```bash docker network create -d bridge --subnet=192.168.0.0/16 br0 ``` Additionally, you also specify the `--gateway` `--ip-range` and `--aux-address` options. ```bash network create --driver=bridge --subnet=172.28.0.0/16 --ip-range=172.28.5.0/24 --gateway=172.28.5.254 br0 ``` If you omit the `--gateway` flag the Engine selects one for you from inside a preferred pool. For `overlay` networks and for network driver plugins that support it you can create multiple subnetworks. ```bash docker network create -d overlay --subnet=192.168.0.0/16 --subnet=192.170.0.0/16 --gateway=192.168.0.100 --gateway=192.170.0.100 --ip-range=192.168.1.0/24 --aux-address a=192.168.1.5 --aux-address b=192.168.1.6 --aux-address a=192.170.1.5 --aux-address b=192.170.1.6 my-multihost-network ``` Be sure that your subnetworks do not overlap. If they do, the network create fails and Engine returns an error. ### Network internal mode By default, when you connect a container to an `overlay` network, Docker also connects a bridge network to it to provide external connectivity. If you want to create an externally isolated `overlay` network, you can specify the `--internal` option. # OPTIONS **--aux-address**=map[] Auxiliary ipv4 or ipv6 addresses used by network driver **-d**, **--driver**=*DRIVER* Driver to manage the Network bridge or overlay. The default is bridge. **--gateway**=[] ipv4 or ipv6 Gateway for the master subnet **--help** Print usage **--internal** Restricts external access to the network **--ip-range**=[] Allocate container ip from a sub-range **--ipam-driver**=*default* IP Address Management Driver **--ipam-opt**=map[] Set custom IPAM driver options **-o**, **--opt**=map[] Set custom driver options **--subnet**=[] Subnet in CIDR format that represents a network segment # HISTORY OCT 2015, created by Mary Anthony docker-1.10.3/man/docker-network-disconnect.1.md000066400000000000000000000011331267010174400213640ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % OCT 2015 # NAME docker-network-disconnect - disconnect a container from a network # SYNOPSIS **docker network disconnect** [**--help**] [**--force**] NETWORK CONTAINER # DESCRIPTION Disconnects a container from a network. ```bash $ docker network disconnect multi-host-network container1 ``` # OPTIONS **NETWORK** Specify network name **CONTAINER** Specify container name **--force** Force the container to disconnect from a network **--help** Print usage statement # HISTORY OCT 2015, created by Mary Anthony docker-1.10.3/man/docker-network-inspect.1.md000066400000000000000000000065301267010174400207060ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % OCT 2015 # NAME docker-network-inspect - inspect a network # SYNOPSIS **docker network inspect** [**-f**|**--format**[=*FORMAT*]] [**--help**] NETWORK [NETWORK...] # DESCRIPTION Returns information about one or more networks. By default, this command renders all results in a JSON object. For example, if you connect two containers to the default `bridge` network: ```bash $ sudo docker run -itd --name=container1 busybox f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27 $ sudo docker run -itd --name=container2 busybox bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727 ``` The `network inspect` command shows the containers, by id, in its results. You can specify an alternate format to execute a given template for each result. Go's [text/template](http://golang.org/pkg/text/template/) package describes all the details of the format. ```bash $ sudo docker network inspect bridge [ { "Name": "bridge", "Id": "b2b1a2cba717161d984383fd68218cf70bbbd17d328496885f7c921333228b0f", "Scope": "local", "Driver": "bridge", "IPAM": { "Driver": "default", "Config": [ { "Subnet": "172.17.42.1/16", "Gateway": "172.17.42.1" } ] }, "Containers": { "bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727": { "Name": "container2", "EndpointID": "0aebb8fcd2b282abe1365979536f21ee4ceaf3ed56177c628eae9f706e00e019", "MacAddress": "02:42:ac:11:00:02", "IPv4Address": "172.17.0.2/16", "IPv6Address": "" }, "f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27": { "Name": "container1", "EndpointID": "a00676d9c91a96bbe5bcfb34f705387a33d7cc365bac1a29e4e9728df92d10ad", "MacAddress": "02:42:ac:11:00:01", "IPv4Address": "172.17.0.1/16", "IPv6Address": "" } }, "Options": { "com.docker.network.bridge.default_bridge": "true", "com.docker.network.bridge.enable_icc": "true", "com.docker.network.bridge.enable_ip_masquerade": "true", "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", "com.docker.network.bridge.name": "docker0", "com.docker.network.driver.mtu": "1500" } } ] ``` Returns the information about the user-defined network: ```bash $ docker network create simple-network 69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a $ docker network inspect simple-network [ { "Name": "simple-network", "Id": "69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a", "Scope": "local", "Driver": "bridge", "IPAM": { "Driver": "default", "Config": [ { "Subnet": "172.22.0.0/16", "Gateway": "172.22.0.1/16" } ] }, "Containers": {}, "Options": {} } ] ``` # OPTIONS **-f**, **--format**="" Format the output using the given go template. **--help** Print usage statement # HISTORY OCT 2015, created by Mary Anthony docker-1.10.3/man/docker-network-ls.1.md000066400000000000000000000100211267010174400176450ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % OCT 2015 # NAME docker-network-ls - list networks # SYNOPSIS **docker network ls** [**-f**|**--filter**[=*[]*]] [**--no-trunc**[=*true*|*false*]] [**-q**|**--quiet**[=*true*|*false*]] [**--help**] # DESCRIPTION Lists all the networks the Engine `daemon` knows about. This includes the networks that span across multiple hosts in a cluster, for example: ```bash $ docker network ls NETWORK ID NAME DRIVER 7fca4eb8c647 bridge bridge 9f904ee27bf5 none null cf03ee007fb4 host host 78b03ee04fc4 multi-host overlay ``` Use the `--no-trunc` option to display the full network id: ```bash $ docker network ls --no-trunc NETWORK ID NAME DRIVER 18a2866682b85619a026c81b98a5e375bd33e1b0936a26cc497c283d27bae9b3 none null c288470c46f6c8949c5f7e5099b5b7947b07eabe8d9a27d79a9cbf111adcbf47 host host 7b369448dccbf865d397c8d2be0cda7cf7edc6b0945f77d2529912ae917a0185 bridge bridge 95e74588f40db048e86320c6526440c504650a1ff3e9f7d60a497c4d2163e5bd foo bridge 63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 dev bridge ``` ## Filtering The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). Multiple filter flags are combined as an `OR` filter. For example, `-f type=custom -f type=builtin` returns both `custom` and `builtin` networks. The currently supported filters are: * id (network's id) * name (network's name) * type (custom|builtin) #### Type The `type` filter supports two values; `builtin` displays predefined networks (`bridge`, `none`, `host`), whereas `custom` displays user defined networks. The following filter matches all user defined networks: ```bash $ docker network ls --filter type=custom NETWORK ID NAME DRIVER 95e74588f40d foo bridge 63d1ff1f77b0 dev bridge ``` By having this flag it allows for batch cleanup. For example, use this filter to delete all user defined networks: ```bash $ docker network rm `docker network ls --filter type=custom -q` ``` A warning will be issued when trying to remove a network that has containers attached. #### Name The `name` filter matches on all or part of a network's name. The following filter matches all networks with a name containing the `foobar` string. ```bash $ docker network ls --filter name=foobar NETWORK ID NAME DRIVER 06e7eef0a170 foobar bridge ``` You can also filter for a substring in a name as this shows: ```bash $ docker ps --filter name=foo NETWORK ID NAME DRIVER 95e74588f40d foo bridge 06e7eef0a170 foobar bridge ``` #### ID The `id` filter matches on all or part of a network's ID. The following filter matches all networks with a name containing the `06e7eef01700` string. ```bash $ docker network ls --filter id=63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 NETWORK ID NAME DRIVER 63d1ff1f77b0 dev bridge ``` You can also filter for a substring in a ID as this shows: ```bash $ docker ps --filter id=95e74588f40d NETWORK ID NAME DRIVER 95e74588f40d foo bridge $ docker ps --filter id=95e NETWORK ID NAME DRIVER 95e74588f40d foo bridge ``` # OPTIONS **-f**, **--filter**=*[]* filter output based on conditions provided. **--no-trunc**=*true*|*false* Do not truncate the output **-q**, **--quiet**=*true*|*false* Only display numeric IDs **--help** Print usage statement # HISTORY OCT 2015, created by Mary Anthony docker-1.10.3/man/docker-network-rm.1.md000066400000000000000000000021001267010174400176440ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % OCT 2015 # NAME docker-network-rm - remove one or more networks # SYNOPSIS **docker network rm** [**--help**] NETWORK [NETWORK...] # DESCRIPTION Removes one or more networks by name or identifier. To remove a network, you must first disconnect any containers connected to it. To remove the network named 'my-network': ```bash $ docker network rm my-network ``` To delete multiple networks in a single `docker network rm` command, provide multiple network names or id's. The following example deletes a network with id `3695c422697f` and a network named `my-network`: ```bash $ docker network rm 3695c422697f my-network ``` When you specify multiple networks, the command attempts to delete each in turn. If the deletion of one network fails, the command continues to the next on the list and tries to delete that. The command reports success or failure for each deletion. # OPTIONS **NETWORK** Specify network name or id **--help** Print usage statement # HISTORY OCT 2015, created by Mary Anthony docker-1.10.3/man/docker-pause.1.md000066400000000000000000000015431267010174400166660ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-pause - Pause all processes within a container # SYNOPSIS **docker pause** CONTAINER [CONTAINER...] # DESCRIPTION The `docker pause` command uses the cgroups freezer to suspend all processes in a container. Traditionally when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the cgroups freezer the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. See the [cgroups freezer documentation] (https://www.kernel.org/doc/Documentation/cgroups/freezer-subsystem.txt) for further details. # OPTIONS There are no available options. # See also **docker-unpause(1)** to unpause all processes within a container. # HISTORY June 2014, updated by Sven Dowideit docker-1.10.3/man/docker-port.1.md000066400000000000000000000025711267010174400165370ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-port - List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT # SYNOPSIS **docker port** [**--help**] CONTAINER [PRIVATE_PORT[/PROTO]] # DESCRIPTION List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT # OPTIONS **--help** Print usage statement # EXAMPLES # docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES b650456536c7 busybox:latest top 54 minutes ago Up 54 minutes 0.0.0.0:1234->9876/tcp, 0.0.0.0:4321->7890/tcp test ## Find out all the ports mapped # docker port test 7890/tcp -> 0.0.0.0:4321 9876/tcp -> 0.0.0.0:1234 ## Find out a specific mapping # docker port test 7890/tcp 0.0.0.0:4321 # docker port test 7890 0.0.0.0:4321 ## An example showing error for non-existent mapping # docker port test 7890/udp 2014/06/24 11:53:36 Error: No public port '7890/udp' published for test # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) June 2014, updated by Sven Dowideit November 2014, updated by Sven Dowideit docker-1.10.3/man/docker-ps.1.md000066400000000000000000000105361267010174400161750ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % FEBRUARY 2015 # NAME docker-ps - List containers # SYNOPSIS **docker ps** [**-a**|**--all**] [**-f**|**--filter**[=*[]*]] [**--format**=*"TEMPLATE"*] [**--help**] [**-l**|**--latest**] [**-n**[=*-1*]] [**--no-trunc**] [**-q**|**--quiet**] [**-s**|**--size**] # DESCRIPTION List the containers in the local repository. By default this shows only the running containers. # OPTIONS **-a**, **--all**=*true*|*false* Show all containers. Only running containers are shown by default. The default is *false*. **-f**, **--filter**=[] Filter output based on these conditions: - exited= an exit code of - label= or label== - status=(created|restarting|running|paused|exited|dead) - name= a container's name - id= a container's ID - before=(|) - since=(|) - ancestor=([:tag]||) - containers created from an image or a descendant. **--format**="*TEMPLATE*" Pretty-print containers using a Go template. Valid placeholders: .ID - Container ID .Image - Image ID .Command - Quoted command .CreatedAt - Time when the container was created. .RunningFor - Elapsed time since the container was started. .Ports - Exposed ports. .Status - Container status. .Size - Container disk size. .Labels - All labels assigned to the container. .Label - Value of a specific label for this container. For example `{{.Label "com.docker.swarm.cpu"}}` **--help** Print usage statement **-l**, **--latest**=*true*|*false* Show only the latest created container (includes all states). The default is *false*. **-n**=*-1* Show n last created containers (includes all states). **--no-trunc**=*true*|*false* Don't truncate output. The default is *false*. **-q**, **--quiet**=*true*|*false* Only display numeric IDs. The default is *false*. **-s**, **--size**=*true*|*false* Display total file sizes. The default is *false*. # EXAMPLES # Display all containers, including non-running # docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES a87ecb4f327c fedora:20 /bin/sh -c #(nop) MA 20 minutes ago Exit 0 desperate_brattain 01946d9d34d8 vpavlin/rhel7:latest /bin/sh -c #(nop) MA 33 minutes ago Exit 0 thirsty_bell c1d3b0166030 acffc0358b9e /bin/sh -c yum -y up 2 weeks ago Exit 1 determined_torvalds 41d50ecd2f57 fedora:20 /bin/sh -c #(nop) MA 2 weeks ago Exit 0 drunk_pike # Display only IDs of all containers, including non-running # docker ps -a -q a87ecb4f327c 01946d9d34d8 c1d3b0166030 41d50ecd2f57 # Display only IDs of all containers that have the name `determined_torvalds` # docker ps -a -q --filter=name=determined_torvalds c1d3b0166030 # Display containers with their commands # docker ps --format "{{.ID}}: {{.Command}}" a87ecb4f327c: /bin/sh -c #(nop) MA 01946d9d34d8: /bin/sh -c #(nop) MA c1d3b0166030: /bin/sh -c yum -y up 41d50ecd2f57: /bin/sh -c #(nop) MA # Display containers with their labels in a table # docker ps --format "table {{.ID}}\t{{.Labels}}" CONTAINER ID LABELS a87ecb4f327c com.docker.swarm.node=ubuntu,com.docker.swarm.storage=ssd 01946d9d34d8 c1d3b0166030 com.docker.swarm.node=debian,com.docker.swarm.cpu=6 41d50ecd2f57 com.docker.swarm.node=fedora,com.docker.swarm.cpu=3,com.docker.swarm.storage=ssd # Display containers with their node label in a table # docker ps --format 'table {{.ID}}\t{{(.Label "com.docker.swarm.node")}}' CONTAINER ID NODE a87ecb4f327c ubuntu 01946d9d34d8 c1d3b0166030 debian 41d50ecd2f57 fedora # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit August 2014, updated by Sven Dowideit November 2014, updated by Sven Dowideit February 2015, updated by André Martins docker-1.10.3/man/docker-pull.1.md000066400000000000000000000062051267010174400165250ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-pull - Pull an image or a repository from a registry # SYNOPSIS **docker pull** [**-a**|**--all-tags**] [**--help**] NAME[:TAG] | [REGISTRY_HOST[:REGISTRY_PORT]/]NAME[:TAG] # DESCRIPTION This command pulls down an image or a repository from a registry. If there is more than one image for a repository (e.g., fedora) then all images for that repository name can be pulled down including any tags (see the option **-a** or **--all-tags**). If you do not specify a `REGISTRY_HOST`, the command uses Docker's public registry located at `registry-1.docker.io` by default. # OPTIONS **-a**, **--all-tags**=*true*|*false* Download all tagged images in the repository. The default is *false*. **--help** Print usage statement # EXAMPLE ## Pull a repository with multiple images with the -a|--all-tags option set to true. Note that if the image is previously downloaded then the status would be `Status: Image is up to date for fedora`. $ docker pull --all-tags fedora Pulling repository fedora ad57ef8d78d7: Download complete 105182bb5e8b: Download complete 511136ea3c5a: Download complete 73bd853d2ea5: Download complete Status: Downloaded newer image for fedora $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE fedora rawhide ad57ef8d78d7 5 days ago 359.3 MB fedora 20 105182bb5e8b 5 days ago 372.7 MB fedora heisenbug 105182bb5e8b 5 days ago 372.7 MB fedora latest 105182bb5e8b 5 days ago 372.7 MB ## Pull a repository with the -a|--all-tags option set to false (this is the default). $ docker pull debian Using default tag: latest latest: Pulling from library/debian 2c49f83e0b13: Pull complete 4a5e6db8c069: Pull complete Status: Downloaded newer image for debian:latest $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE debian latest 4a5e6db8c069 5 days ago 125.1 MB ## Pull an image, manually specifying path to Docker's public registry and tag Note that if the image is previously downloaded then the status would be `Status: Image is up to date for registry.hub.docker.com/fedora:20` $ docker pull registry.hub.docker.com/fedora:20 Pulling repository fedora 3f2fed40e4b0: Download complete 511136ea3c5a: Download complete fd241224e9cf: Download complete Status: Downloaded newer image for registry.hub.docker.com/fedora:20 $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE fedora 20 3f2fed40e4b0 4 days ago 372.7 MB # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit August 2014, updated by Sven Dowideit April 2015, updated by John Willis April 2015, updated by Mary Anthony for v2 September 2015, updated by Sally O'Malley docker-1.10.3/man/docker-push.1.md000066400000000000000000000030531267010174400165260ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-push - Push an image or a repository to a registry # SYNOPSIS **docker push** [**--help**] NAME[:TAG] | [REGISTRY_HOST[:REGISTRY_PORT]/]NAME[:TAG] # DESCRIPTION This command pushes an image or a repository to a registry. If you do not specify a `REGISTRY_HOST`, the command uses Docker's public registry located at `registry-1.docker.io` by default. # OPTIONS **--help** Print usage statement # EXAMPLES # Pushing a new image to a registry First save the new image by finding the container ID (using **docker ps**) and then committing it to a new image name. Note that only a-z0-9-_. are allowed when naming images: # docker commit c16378f943fe rhel-httpd Now, push the image to the registry using the image ID. In this example the registry is on host named `registry-host` and listening on port `5000`. To do this, tag the image with the host name or IP address, and the port of the registry: # docker tag rhel-httpd registry-host:5000/myadmin/rhel-httpd # docker push registry-host:5000/myadmin/rhel-httpd Check that this worked by running: # docker images You should see both `rhel-httpd` and `registry-host:5000/myadmin/rhel-httpd` listed. # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit April 2015, updated by Mary Anthony for v2 June 2015, updated by Sally O'Malley docker-1.10.3/man/docker-rename.1.md000066400000000000000000000004301267010174400170120ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % OCTOBER 2014 # NAME docker-rename - Rename a container # SYNOPSIS **docker rename** OLD_NAME NEW_NAME # OPTIONS There are no available options. # DESCRIPTION Rename a container. Container may be running, paused or stopped. docker-1.10.3/man/docker-restart.1.md000066400000000000000000000012211267010174400172260ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-restart - Restart a container # SYNOPSIS **docker restart** [**--help**] [**-t**|**--time**[=*10*]] CONTAINER [CONTAINER...] # DESCRIPTION Restart each container listed. # OPTIONS **--help** Print usage statement **-t**, **--time**=*10* Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds. # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit docker-1.10.3/man/docker-rm.1.md000066400000000000000000000042711267010174400161700ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-rm - Remove one or more containers # SYNOPSIS **docker rm** [**-f**|**--force**] [**-l**|**--link**] [**-v**|**--volumes**] CONTAINER [CONTAINER...] # DESCRIPTION **docker rm** will remove one or more containers from the host node. The container name or ID can be used. This does not remove images. You cannot remove a running container unless you use the **-f** option. To see all containers on a host use the **docker ps -a** command. # OPTIONS **--help** Print usage statement **-f**, **--force**=*true*|*false* Force the removal of a running container (uses SIGKILL). The default is *false*. **-l**, **--link**=*true*|*false* Remove the specified link and not the underlying container. The default is *false*. **-v**, **--volumes**=*true*|*false* Remove the volumes associated with the container. The default is *false*. # EXAMPLES ## Removing a container using its ID To remove a container using its ID, find either from a **docker ps -a** command, or use the ID returned from the **docker run** command, or retrieve it from a file used to store it using the **docker run --cidfile**: docker rm abebf7571666 ## Removing a container using the container name The name of the container can be found using the **docker ps -a** command. The use that name as follows: docker rm hopeful_morse ## Removing a container and all associated volumes $ docker rm -v redis redis This command will remove the container and any volumes associated with it. Note that if a volume was specified with a name, it will not be removed. $ docker create -v awesome:/foo -v /bar --name hello redis hello $ docker rm -v hello In this example, the volume for `/foo` will remain in tact, but the volume for `/bar` will be removed. The same behavior holds for volumes inherited with `--volumes-from`. # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit July 2014, updated by Sven Dowideit August 2014, updated by Sven Dowideit docker-1.10.3/man/docker-rmi.1.md000066400000000000000000000020411267010174400163320ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-rmi - Remove one or more images # SYNOPSIS **docker rmi** [**-f**|**--force**] [**--help**] [**--no-prune**] IMAGE [IMAGE...] # DESCRIPTION Removes one or more images from the host node. This does not remove images from a registry. You cannot remove an image of a running container unless you use the **-f** option. To see all images on a host use the **docker images** command. # OPTIONS **-f**, **--force**=*true*|*false* Force removal of the image. The default is *false*. **--help** Print usage statement **--no-prune**=*true*|*false* Do not delete untagged parents. The default is *false*. # EXAMPLES ## Removing an image Here is an example of removing an image: docker rmi fedora/httpd # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit April 2015, updated by Mary Anthony for v2 docker-1.10.3/man/docker-run.1.md000066400000000000000000001131641267010174400163600ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-run - Run a command in a new container # SYNOPSIS **docker run** [**-a**|**--attach**[=*[]*]] [**--add-host**[=*[]*]] [**--blkio-weight**[=*[BLKIO-WEIGHT]*]] [**--blkio-weight-device**[=*[]*]] [**--cpu-shares**[=*0*]] [**--cap-add**[=*[]*]] [**--cap-drop**[=*[]*]] [**--cgroup-parent**[=*CGROUP-PATH*]] [**--cidfile**[=*CIDFILE*]] [**--cpu-period**[=*0*]] [**--cpu-quota**[=*0*]] [**--cpuset-cpus**[=*CPUSET-CPUS*]] [**--cpuset-mems**[=*CPUSET-MEMS*]] [**-d**|**--detach**] [**--detach-keys**[=*[]*]] [**--device**[=*[]*]] [**--device-read-bps**[=*[]*]] [**--device-read-iops**[=*[]*]] [**--device-write-bps**[=*[]*]] [**--device-write-iops**[=*[]*]] [**--dns**[=*[]*]] [**--dns-opt**[=*[]*]] [**--dns-search**[=*[]*]] [**-e**|**--env**[=*[]*]] [**--entrypoint**[=*ENTRYPOINT*]] [**--env-file**[=*[]*]] [**--expose**[=*[]*]] [**--group-add**[=*[]*]] [**-h**|**--hostname**[=*HOSTNAME*]] [**--help**] [**-i**|**--interactive**] [**--ip**[=*IPv4-ADDRESS*]] [**--ip6**[=*IPv6-ADDRESS*]] [**--ipc**[=*IPC*]] [**--isolation**[=*default*]] [**--kernel-memory**[=*KERNEL-MEMORY*]] [**-l**|**--label**[=*[]*]] [**--label-file**[=*[]*]] [**--link**[=*[]*]] [**--log-driver**[=*[]*]] [**--log-opt**[=*[]*]] [**-m**|**--memory**[=*MEMORY*]] [**--mac-address**[=*MAC-ADDRESS*]] [**--memory-reservation**[=*MEMORY-RESERVATION*]] [**--memory-swap**[=*LIMIT*]] [**--memory-swappiness**[=*MEMORY-SWAPPINESS*]] [**--name**[=*NAME*]] [**--net**[=*"bridge"*]] [**--net-alias**[=*[]*]] [**--oom-kill-disable**] [**--oom-score-adj**[=*0*]] [**-P**|**--publish-all**] [**-p**|**--publish**[=*[]*]] [**--pid**[=*[]*]] [**--privileged**] [**--read-only**] [**--restart**[=*RESTART*]] [**--rm**] [**--security-opt**[=*[]*]] [**--stop-signal**[=*SIGNAL*]] [**--shm-size**[=*[]*]] [**--sig-proxy**[=*true*]] [**-t**|**--tty**] [**--tmpfs**[=*[CONTAINER-DIR[:]*]] [**-u**|**--user**[=*USER*]] [**--ulimit**[=*[]*]] [**--uts**[=*[]*]] [**-v**|**--volume**[=*[[HOST-DIR:]CONTAINER-DIR[:OPTIONS]]*]] [**--volume-driver**[=*DRIVER*]] [**--volumes-from**[=*[]*]] [**-w**|**--workdir**[=*WORKDIR*]] IMAGE [COMMAND] [ARG...] # DESCRIPTION Run a process in a new container. **docker run** starts a process with its own file system, its own networking, and its own isolated process tree. The IMAGE which starts the process may define defaults related to the process that will be run in the container, the networking to expose, and more, but **docker run** gives final control to the operator or administrator who starts the container from the image. For that reason **docker run** has more options than any other Docker command. If the IMAGE is not already loaded then **docker run** will pull the IMAGE, and all image dependencies, from the repository in the same way running **docker pull** IMAGE, before it starts the container from that image. # OPTIONS **-a**, **--attach**=[] Attach to STDIN, STDOUT or STDERR. In foreground mode (the default when **-d** is not specified), **docker run** can start the process in the container and attach the console to the process’s standard input, output, and standard error. It can even pretend to be a TTY (this is what most commandline executables expect) and pass along signals. The **-a** option can be set for each of stdin, stdout, and stderr. **--add-host**=[] Add a custom host-to-IP mapping (host:ip) Add a line to /etc/hosts. The format is hostname:ip. The **--add-host** option can be set multiple times. **--blkio-weight**=*0* Block IO weight (relative weight) accepts a weight value between 10 and 1000. **--blkio-weight-device**=[] Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`). **--cpu-shares**=*0* CPU shares (relative weight) By default, all containers get the same proportion of CPU cycles. This proportion can be modified by changing the container's CPU share weighting relative to the weighting of all other running containers. To modify the proportion from the default of 1024, use the **--cpu-shares** flag to set the weighting to 2 or higher. The proportion will only apply when CPU-intensive processes are running. When tasks in one container are idle, other containers can use the left-over CPU time. The actual amount of CPU time will vary depending on the number of containers running on the system. For example, consider three containers, one has a cpu-share of 1024 and two others have a cpu-share setting of 512. When processes in all three containers attempt to use 100% of CPU, the first container would receive 50% of the total CPU time. If you add a fourth container with a cpu-share of 1024, the first container only gets 33% of the CPU. The remaining containers receive 16.5%, 16.5% and 33% of the CPU. On a multi-core system, the shares of CPU time are distributed over all CPU cores. Even if a container is limited to less than 100% of CPU time, it can use 100% of each individual CPU core. For example, consider a system with more than three cores. If you start one container **{C0}** with **-c=512** running one process, and another container **{C1}** with **-c=1024** running two processes, this can result in the following division of CPU shares: PID container CPU CPU share 100 {C0} 0 100% of CPU0 101 {C1} 1 100% of CPU1 102 {C1} 2 100% of CPU2 **--cap-add**=[] Add Linux capabilities **--cap-drop**=[] Drop Linux capabilities **--cgroup-parent**="" Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. **--cidfile**="" Write the container ID to the file **--cpu-period**=*0* Limit the CPU CFS (Completely Fair Scheduler) period Limit the container's CPU usage. This flag tell the kernel to restrict the container's CPU usage to the period you specify. **--cpuset-cpus**="" CPUs in which to allow execution (0-3, 0,1) **--cpuset-mems**="" Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. If you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1` then processes in your Docker container will only use memory from the first two memory nodes. **--cpu-quota**=*0* Limit the CPU CFS (Completely Fair Scheduler) quota Limit the container's CPU usage. By default, containers run with the full CPU resource. This flag tell the kernel to restrict the container's CPU usage to the quota you specify. **-d**, **--detach**=*true*|*false* Detached mode: run the container in the background and print the new container ID. The default is *false*. At any time you can run **docker ps** in the other shell to view a list of the running containers. You can reattach to a detached container with **docker attach**. If you choose to run a container in the detached mode, then you cannot use the **-rm** option. When attached in the tty mode, you can detach from the container (and leave it running) using a configurable key sequence. The default sequence is `CTRL-p CTRL-q`. You configure the key sequence using the **--detach-keys** option or a configuration file. See **config-json(5)** for documentation on using a configuration file. **--detach-keys**="" Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. **--device**=[] Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) **--device-read-bps**=[] Limit read rate from a device (e.g. --device-read-bps=/dev/sda:1mb) **--device-read-iops**=[] Limit read rate from a device (e.g. --device-read-iops=/dev/sda:1000) **--device-write-bps**=[] Limit write rate to a device (e.g. --device-write-bps=/dev/sda:1mb) **--device-write-iops**=[] Limit write rate a a device (e.g. --device-write-iops=/dev/sda:1000) **--dns-search**=[] Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain) **--dns-opt**=[] Set custom DNS options **--dns**=[] Set custom DNS servers This option can be used to override the DNS configuration passed to the container. Typically this is necessary when the host DNS configuration is invalid for the container (e.g., 127.0.0.1). When this is the case the **--dns** flags is necessary for every run. **-e**, **--env**=[] Set environment variables This option allows you to specify arbitrary environment variables that are available for the process that will be launched inside of the container. **--entrypoint**="" Overwrite the default ENTRYPOINT of the image This option allows you to overwrite the default entrypoint of the image that is set in the Dockerfile. The ENTRYPOINT of an image is similar to a COMMAND because it specifies what executable to run when the container starts, but it is (purposely) more difficult to override. The ENTRYPOINT gives a container its default nature or behavior, so that when you set an ENTRYPOINT you can run the container as if it were that binary, complete with default options, and you can pass in more options via the COMMAND. But, sometimes an operator may want to run something else inside the container, so you can override the default ENTRYPOINT at runtime by using a **--entrypoint** and a string to specify the new ENTRYPOINT. **--env-file**=[] Read in a line delimited file of environment variables **--expose**=[] Expose a port, or a range of ports (e.g. --expose=3300-3310) informs Docker that the container listens on the specified network ports at runtime. Docker uses this information to interconnect containers using links and to set up port redirection on the host system. **--group-add**=[] Add additional groups to run as **-h**, **--hostname**="" Container host name Sets the container host name that is available inside the container. **--help** Print usage statement **-i**, **--interactive**=*true*|*false* Keep STDIN open even if not attached. The default is *false*. When set to true, keep stdin open even if not attached. The default is false. **--ip**="" Sets the container's interface IPv4 address (e.g. 172.23.0.9) It can only be used in conjunction with **--net** for user-defined networks **--ip6**="" Sets the container's interface IPv6 address (e.g. 2001:db8::1b99) It can only be used in conjunction with **--net** for user-defined networks **--ipc**="" Default is to create a private IPC namespace (POSIX SysV IPC) for the container 'container:': reuses another container shared memory, semaphores and message queues 'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure. **--isolation**="*default*" Isolation specifies the type of isolation technology used by containers. **-l**, **--label**=[] Set metadata on the container (e.g., --label com.example.key=value) **--kernel-memory**="" Kernel memory limit (format: `[]`, where unit = b, k, m or g) Constrains the kernel memory available to a container. If a limit of 0 is specified (not using `--kernel-memory`), the container's kernel memory is not limited. If you specify a limit, it may be rounded up to a multiple of the operating system's page size and the value can be very large, millions of trillions. **--label-file**=[] Read in a line delimited file of labels **--link**=[] Add link to another container in the form of :alias or just in which case the alias will match the name If the operator uses **--link** when starting the new client container, then the client container can access the exposed port via a private networking interface. Docker will set some environment variables in the client container to help indicate which interface and port to use. **--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*none*" Logging driver for container. Default is defined by daemon `--log-driver` flag. **Warning**: the `docker logs` command works only for the `json-file` and `journald` logging drivers. **--log-opt**=[] Logging driver specific options. **-m**, **--memory**="" Memory limit (format: [], where unit = b, k, m or g) Allows you to constrain the memory available to a container. If the host supports swap memory, then the **-m** memory setting can be larger than physical RAM. If a limit of 0 is specified (not using **-m**), the container's memory is not limited. The actual limit may be rounded up to a multiple of the operating system's page size (the value would be very large, that's millions of trillions). **--memory-reservation**="" Memory soft limit (format: [], where unit = b, k, m or g) After setting memory reservation, when the system detects memory contention or low memory, containers are forced to restrict their consumption to their reservation. So you should always set the value below **--memory**, otherwise the hard limit will take precedence. By default, memory reservation will be the same as memory limit. **--memory-swap**="LIMIT" A limit value equal to memory plus swap. Must be used with the **-m** (**--memory**) flag. The swap `LIMIT` should always be larger than **-m** (**--memory**) value. The format of `LIMIT` is `[]`. Unit can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you don't specify a unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap. **--mac-address**="" Container MAC address (e.g. 92:d0:c6:0a:29:33) Remember that the MAC address in an Ethernet network must be unique. The IPv6 link-local address will be based on the device's MAC address according to RFC4862. **--name**="" Assign a name to the container The operator can identify a container in three ways: UUID long identifier (“f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778”) UUID short identifier (“f78375b1c487”) Name (“jonah”) The UUID identifiers come from the Docker daemon, and if a name is not assigned to the container with **--name** then the daemon will also generate a random string name. The name is useful when defining links (see **--link**) (or any other place you need to identify a container). This works for both background and foreground Docker containers. **--net**="*bridge*" Set the Network mode for the container 'bridge': create a network stack on the default Docker bridge 'none': no networking 'container:': reuse another container's network stack 'host': use the Docker host network stack. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. '|': connect to a user-defined network **--net-alias**=[] Add network-scoped alias for the container **--oom-kill-disable**=*true*|*false* Whether to disable OOM Killer for the container or not. **--oom-score-adj**="" Tune the host's OOM preferences for containers (accepts -1000 to 1000) **-P**, **--publish-all**=*true*|*false* Publish all exposed ports to random ports on the host interfaces. The default is *false*. When set to true publish all exposed ports to the host interfaces. The default is false. If the operator uses -P (or -p) then Docker will make the exposed port accessible on the host and the ports will be available to any client that can reach the host. When using -P, Docker will bind any exposed port to a random port on the host within an *ephemeral port range* defined by `/proc/sys/net/ipv4/ip_local_port_range`. To find the mapping between the host ports and the exposed ports, use `docker port`. **-p**, **--publish**=[] Publish a container's port, or range of ports, to the host. Format: `ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort` Both hostPort and containerPort can be specified as a range of ports. When specifying ranges for both, the number of container ports in the range must match the number of host ports in the range. (e.g., `docker run -p 1234-1236:1222-1224 --name thisWorks -t busybox` but not `docker run -p 1230-1236:1230-1240 --name RangeContainerPortsBiggerThanRangeHostPorts -t busybox`) With ip: `docker run -p 127.0.0.1:$HOSTPORT:$CONTAINERPORT --name CONTAINER -t someimage` Use `docker port` to see the actual mapping: `docker port CONTAINER $CONTAINERPORT` **--pid**=*host* Set the PID mode for the container **host**: use the host's PID namespace inside the container. Note: the host mode gives the container full access to local PID and is therefore considered insecure. **--uts**=*host* Set the UTS mode for the container **host**: use the host's UTS namespace inside the container. Note: the host mode gives the container access to changing the host's hostname and is therefore considered insecure. **--privileged**=*true*|*false* Give extended privileges to this container. The default is *false*. By default, Docker containers are “unprivileged” (=false) and cannot, for example, run a Docker daemon inside the Docker container. This is because by default a container is not allowed to access any devices. A “privileged” container is given access to all devices. When the operator executes **docker run --privileged**, Docker will enable access to all devices on the host as well as set some configuration in AppArmor to allow the container nearly all the same access to the host as processes running outside of a container on the host. **--read-only**=*true*|*false* Mount the container's root filesystem as read only. By default a container will have its root filesystem writable allowing processes to write files anywhere. By specifying the `--read-only` flag the container will have its root filesystem mounted as read only prohibiting any writes. **--restart**="*no*" Restart policy to apply when a container exits (no, on-failure[:max-retry], always, unless-stopped). **--rm**=*true*|*false* Automatically remove the container when it exits (incompatible with -d). The default is *false*. **--security-opt**=[] Security Options "label:user:USER" : Set the label user for the container "label:role:ROLE" : Set the label role for the container "label:type:TYPE" : Set the label type for the container "label:level:LEVEL" : Set the label level for the container "label:disable" : Turn off label confinement for the container **--stop-signal**=*SIGTERM* Signal to stop a container. Default is SIGTERM. **--shm-size**="" Size of `/dev/shm`. The format is ``. `number` must be greater than `0`. Unit is optional and can be `b` (bytes), `k` (kilobytes), `m`(megabytes), or `g` (gigabytes). If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses `64m`. **--sig-proxy**=*true*|*false* Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied. The default is *true*. **--memory-swappiness**="" Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. **-t**, **--tty**=*true*|*false* Allocate a pseudo-TTY. The default is *false*. When set to true Docker can allocate a pseudo-tty and attach to the standard input of any container. This can be used, for example, to run a throwaway interactive shell. The default is false. The **-t** option is incompatible with a redirection of the docker client standard input. **--tmpfs**=[] Create a tmpfs mount Mount a temporary filesystem (`tmpfs`) mount into a container, for example: $ docker run -d --tmpfs /tmp:rw,size=787448k,mode=1777 my_image This command mounts a `tmpfs` at `/tmp` within the container. The supported mount options are the same as the Linux default `mount` flags. If you do not specify any options, the systems uses the following options: `rw,noexec,nosuid,nodev,size=65536k`. **-u**, **--user**="" Sets the username or UID used and optionally the groupname or GID for the specified command. The followings examples are all valid: --user [user | user:group | uid | uid:gid | user:gid | uid:group ] Without this argument the command will be run as root in the container. **--ulimit**=[] Ulimit options **-v**|**--volume**[=*[[HOST-DIR:]CONTAINER-DIR[:OPTIONS]]*] Create a bind mount. If you specify, ` -v /HOST-DIR:/CONTAINER-DIR`, Docker bind mounts `/HOST-DIR` in the host to `/CONTAINER-DIR` in the Docker container. If 'HOST-DIR' is omitted, Docker automatically creates the new volume on the host. The `OPTIONS` are a comma delimited list and can be: * [rw|ro] * [z|Z] * [`[r]shared`|`[r]slave`|`[r]private`] The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The `HOST-DIR` can be an absolute path or a `name` value. A `name` value must start with an alphanumeric character, followed by `a-z0-9`, `_` (underscore), `.` (period) or `-` (hyphen). An absolute path starts with a `/` (forward slash). If you supply a `HOST-DIR` that is an absolute path, Docker bind-mounts to the path you specify. If you supply a `name`, Docker creates a named volume by that `name`. For example, you can specify either `/foo` or `foo` for a `HOST-DIR` value. If you supply the `/foo` value, Docker creates a bind-mount. If you supply the `foo` specification, Docker creates a named volume. You can specify multiple **-v** options to mount one or more mounts to a container. To use these same mounts in other containers, specify the **--volumes-from** option also. You can add `:ro` or `:rw` suffix to a volume to mount it read-only or read-write mode, respectively. By default, the volumes are mounted read-write. See examples. Labeling systems like SELinux require that proper labels are placed on volume content mounted into a container. Without a label, the security system might prevent the processes running inside the container from using the content. By default, Docker does not change the labels set by the OS. To change a label in the container context, you can add either of two suffixes `:z` or `:Z` to the volume mount. These suffixes tell Docker to relabel file objects on the shared volumes. The `z` option tells Docker that two containers share the volume content. As a result, Docker labels the content with a shared content label. Shared volume labels allow all containers to read/write content. The `Z` option tells Docker to label the content with a private unshared label. Only the current container can use a private volume. By default bind mounted volumes are `private`. That means any mounts done inside container will not be visible on host and vice-a-versa. One can change this behavior by specifying a volume mount propagation property. Making a volume `shared` mounts done under that volume inside container will be visible on host and vice-a-versa. Making a volume `slave` enables only one way mount propagation and that is mounts done on host under that volume will be visible inside container but not the other way around. To control mount propagation property of volume one can use `:[r]shared`, `:[r]slave` or `:[r]private` propagation flag. Propagation property can be specified only for bind mounted volumes and not for internal volumes or named volumes. For mount propagation to work source mount point (mount point where source dir is mounted on) has to have right propagation properties. For shared volumes, source mount point has to be shared. And for slave volumes, source mount has to be either shared or slave. Use `df ` to figure out the source mount and then use `findmnt -o TARGET,PROPAGATION ` to figure out propagation properties of source mount. If `findmnt` utility is not available, then one can look at mount entry for source mount point in `/proc/self/mountinfo`. Look at `optional fields` and see if any propagaion properties are specified. `shared:X` means mount is `shared`, `master:X` means mount is `slave` and if nothing is there that means mount is `private`. To change propagation properties of a mount point use `mount` command. For example, if one wants to bind mount source directory `/foo` one can do `mount --bind /foo /foo` and `mount --make-private --make-shared /foo`. This will convert /foo into a `shared` mount point. Alternatively one can directly change propagation properties of source mount. Say `/` is source mount for `/foo`, then use `mount --make-shared /` to convert `/` into a `shared` mount. > **Note**: > When using systemd to manage the Docker daemon's start and stop, in the systemd > unit file there is an option to control mount propagation for the Docker daemon > itself, called `MountFlags`. The value of this setting may cause Docker to not > see mount propagation changes made on the mount point. For example, if this value > is `slave`, you may not be able to use the `shared` or `rshared` propagation on > a volume. **--volume-driver**="" Container's volume driver. This driver creates volumes specified either from a Dockerfile's `VOLUME` instruction or from the `docker run -v` flag. See **docker-volume-create(1)** for full details. **--volumes-from**=[] Mount volumes from the specified container(s) Mounts already mounted volumes from a source container onto another container. You must supply the source's container-id. To share a volume, use the **--volumes-from** option when running the target container. You can share volumes even if the source container is not running. By default, Docker mounts the volumes in the same mode (read-write or read-only) as it is mounted in the source container. Optionally, you can change this by suffixing the container-id with either the `:ro` or `:rw ` keyword. If the location of the volume from the source container overlaps with data residing on a target container, then the volume hides that data on the target. **-w**, **--workdir**="" Working directory inside the container The default working directory for running binaries within a container is the root directory (/). The developer can set a different default with the Dockerfile WORKDIR instruction. The operator can override the working directory by using the **-w** option. # Exit Status The exit code from `docker run` gives information about why the container failed to run or why it exited. When `docker run` exits with a non-zero code, the exit codes follow the `chroot` standard, see below: **_125_** if the error is with Docker daemon **_itself_** $ docker run --foo busybox; echo $? # flag provided but not defined: --foo See 'docker run --help'. 125 **_126_** if the **_contained command_** cannot be invoked $ docker run busybox /etc; echo $? # exec: "/etc": permission denied docker: Error response from daemon: Contained command could not be invoked 126 **_127_** if the **_contained command_** cannot be found $ docker run busybox foo; echo $? # exec: "foo": executable file not found in $PATH docker: Error response from daemon: Contained command not found or does not exist 127 **_Exit code_** of **_contained command_** otherwise $ docker run busybox /bin/sh -c 'exit 3' # 3 # EXAMPLES ## Running container in read-only mode During container image development, containers often need to write to the image content. Installing packages into /usr, for example. In production, applications seldom need to write to the image. Container applications write to volumes if they need to write to file systems at all. Applications can be made more secure by running them in read-only mode using the --read-only switch. This protects the containers image from modification. Read only containers may still need to write temporary data. The best way to handle this is to mount tmpfs directories on /run and /tmp. # docker run --read-only --tmpfs /run --tmpfs /tmp -i -t fedora /bin/bash ## Exposing log messages from the container to the host's log If you want messages that are logged in your container to show up in the host's syslog/journal then you should bind mount the /dev/log directory as follows. # docker run -v /dev/log:/dev/log -i -t fedora /bin/bash From inside the container you can test this by sending a message to the log. (bash)# logger "Hello from my container" Then exit and check the journal. # exit # journalctl -b | grep Hello This should list the message sent to logger. ## Attaching to one or more from STDIN, STDOUT, STDERR If you do not specify -a then Docker will attach everything (stdin,stdout,stderr) . You can specify to which of the three standard streams (stdin, stdout, stderr) you’d like to connect instead, as in: # docker run -a stdin -a stdout -i -t fedora /bin/bash ## Sharing IPC between containers Using shm_server.c available here: https://www.cs.cf.ac.uk/Dave/C/node27.html Testing `--ipc=host` mode: Host shows a shared memory segment with 7 pids attached, happens to be from httpd: ``` $ sudo ipcs -m ------ Shared Memory Segments -------- key shmid owner perms bytes nattch status 0x01128e25 0 root 600 1000 7 ``` Now run a regular container, and it correctly does NOT see the shared memory segment from the host: ``` $ docker run -it shm ipcs -m ------ Shared Memory Segments -------- key shmid owner perms bytes nattch status ``` Run a container with the new `--ipc=host` option, and it now sees the shared memory segment from the host httpd: ``` $ docker run -it --ipc=host shm ipcs -m ------ Shared Memory Segments -------- key shmid owner perms bytes nattch status 0x01128e25 0 root 600 1000 7 ``` Testing `--ipc=container:CONTAINERID` mode: Start a container with a program to create a shared memory segment: ``` $ docker run -it shm bash $ sudo shm/shm_server & $ sudo ipcs -m ------ Shared Memory Segments -------- key shmid owner perms bytes nattch status 0x0000162e 0 root 666 27 1 ``` Create a 2nd container correctly shows no shared memory segment from 1st container: ``` $ docker run shm ipcs -m ------ Shared Memory Segments -------- key shmid owner perms bytes nattch status ``` Create a 3rd container using the new --ipc=container:CONTAINERID option, now it shows the shared memory segment from the first: ``` $ docker run -it --ipc=container:ed735b2264ac shm ipcs -m $ sudo ipcs -m ------ Shared Memory Segments -------- key shmid owner perms bytes nattch status 0x0000162e 0 root 666 27 1 ``` ## Linking Containers > **Note**: This section describes linking between containers on the > default (bridge) network, also known as "legacy links". Using `--link` > on user-defined networks uses the DNS-based discovery, which does not add > entries to `/etc/hosts`, and does not set environment variables for > discovery. The link feature allows multiple containers to communicate with each other. For example, a container whose Dockerfile has exposed port 80 can be run and named as follows: # docker run --name=link-test -d -i -t fedora/httpd A second container, in this case called linker, can communicate with the httpd container, named link-test, by running with the **--link=:** # docker run -t -i --link=link-test:lt --name=linker fedora /bin/bash Now the container linker is linked to container link-test with the alias lt. Running the **env** command in the linker container shows environment variables with the LT (alias) context (**LT_**) # env HOSTNAME=668231cb0978 TERM=xterm LT_PORT_80_TCP=tcp://172.17.0.3:80 LT_PORT_80_TCP_PORT=80 LT_PORT_80_TCP_PROTO=tcp LT_PORT=tcp://172.17.0.3:80 PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PWD=/ LT_NAME=/linker/lt SHLVL=1 HOME=/ LT_PORT_80_TCP_ADDR=172.17.0.3 _=/usr/bin/env When linking two containers Docker will use the exposed ports of the container to create a secure tunnel for the parent to access. If a container is connected to the default bridge network and `linked` with other containers, then the container's `/etc/hosts` file is updated with the linked container's name. > **Note** Since Docker may live update the container’s `/etc/hosts` file, there may be situations when processes inside the container can end up reading an empty or incomplete `/etc/hosts` file. In most cases, retrying the read again should fix the problem. ## Mapping Ports for External Usage The exposed port of an application can be mapped to a host port using the **-p** flag. For example, a httpd port 80 can be mapped to the host port 8080 using the following: # docker run -p 8080:80 -d -i -t fedora/httpd ## Creating and Mounting a Data Volume Container Many applications require the sharing of persistent data across several containers. Docker allows you to create a Data Volume Container that other containers can mount from. For example, create a named container that contains directories /var/volume1 and /tmp/volume2. The image will need to contain these directories so a couple of RUN mkdir instructions might be required for you fedora-data image: # docker run --name=data -v /var/volume1 -v /tmp/volume2 -i -t fedora-data true # docker run --volumes-from=data --name=fedora-container1 -i -t fedora bash Multiple --volumes-from parameters will bring together multiple data volumes from multiple containers. And it's possible to mount the volumes that came from the DATA container in yet another container via the fedora-container1 intermediary container, allowing to abstract the actual data source from users of that data: # docker run --volumes-from=fedora-container1 --name=fedora-container2 -i -t fedora bash ## Mounting External Volumes To mount a host directory as a container volume, specify the absolute path to the directory and the absolute path for the container directory separated by a colon: # docker run -v /var/db:/data1 -i -t fedora bash When using SELinux, be aware that the host has no knowledge of container SELinux policy. Therefore, in the above example, if SELinux policy is enforced, the `/var/db` directory is not writable to the container. A "Permission Denied" message will occur and an avc: message in the host's syslog. To work around this, at time of writing this man page, the following command needs to be run in order for the proper SELinux policy type label to be attached to the host directory: # chcon -Rt svirt_sandbox_file_t /var/db Now, writing to the /data1 volume in the container will be allowed and the changes will also be reflected on the host in /var/db. ## Using alternative security labeling You can override the default labeling scheme for each container by specifying the `--security-opt` flag. For example, you can specify the MCS/MLS level, a requirement for MLS systems. Specifying the level in the following command allows you to share the same content between containers. # docker run --security-opt label:level:s0:c100,c200 -i -t fedora bash An MLS example might be: # docker run --security-opt label:level:TopSecret -i -t rhel7 bash To disable the security labeling for this container versus running with the `--permissive` flag, use the following command: # docker run --security-opt label:disable -i -t fedora bash If you want a tighter security policy on the processes within a container, you can specify an alternate type for the container. You could run a container that is only allowed to listen on Apache ports by executing the following command: # docker run --security-opt label:type:svirt_apache_t -i -t centos bash Note: You would have to write policy defining a `svirt_apache_t` type. ## Setting device weight If you want to set `/dev/sda` device weight to `200`, you can specify the device weight by `--blkio-weight-device` flag. Use the following command: # docker run -it --blkio-weight-device "/dev/sda:200" ubuntu ## Specify isolation technology for container (--isolation) This option is useful in situations where you are running Docker containers on Microsoft Windows. The `--isolation ` option sets a container's isolation technology. On Linux, the only supported is the `default` option which uses Linux namespaces. These two commands are equivalent on Linux: ``` $ docker run -d busybox top $ docker run -d --isolation default busybox top ``` On Microsoft Windows, can take any of these values: * `default`: Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. * `process`: Namespace isolation only. * `hyperv`: Hyper-V hypervisor partition-based isolation. In practice, when running on Microsoft Windows without a `daemon` option set, these two commands are equivalent: ``` $ docker run -d --isolation default busybox top $ docker run -d --isolation process busybox top ``` If you have set the `--exec-opt isolation=hyperv` option on the Docker `daemon`, any of these commands also result in `hyperv` isolation: ``` $ docker run -d --isolation default busybox top $ docker run -d --isolation hyperv busybox top ``` # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit July 2014, updated by Sven Dowideit November 2015, updated by Sally O'Malley docker-1.10.3/man/docker-save.1.md000066400000000000000000000023271267010174400165100ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-save - Save an image(s) to a tar archive (streamed to STDOUT by default) # SYNOPSIS **docker save** [**--help**] [**-o**|**--output**[=*OUTPUT*]] IMAGE [IMAGE...] # DESCRIPTION Produces a tarred repository to the standard output stream. Contains all parent layers, and all tags + versions, or specified repo:tag. Stream to a file instead of STDOUT by using **-o**. # OPTIONS **--help** Print usage statement **-o**, **--output**="" Write to a file, instead of STDOUT # EXAMPLES Save all fedora repository images to a fedora-all.tar and save the latest fedora image to a fedora-latest.tar: $ docker save fedora > fedora-all.tar $ docker save --output=fedora-latest.tar fedora:latest $ ls -sh fedora-all.tar 721M fedora-all.tar $ ls -sh fedora-latest.tar 367M fedora-latest.tar # See also **docker-load(1)** to load an image from a tar archive on STDIN. # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit November 2014, updated by Sven Dowideit docker-1.10.3/man/docker-search.1.md000066400000000000000000000041621267010174400170160ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-search - Search the Docker Hub for images # SYNOPSIS **docker search** [**--automated**] [**--help**] [**--no-trunc**] [**-s**|**--stars**[=*0*]] TERM # DESCRIPTION Search Docker Hub for images that match the specified `TERM`. The table of images returned displays the name, description (truncated by default), number of stars awarded, whether the image is official, and whether it is automated. *Note* - Search queries will only return up to 25 results # OPTIONS **--automated**=*true*|*false* Only show automated builds. The default is *false*. **--help** Print usage statement **--no-trunc**=*true*|*false* Don't truncate output. The default is *false*. **-s**, **--stars**=*X* Only displays with at least X stars. The default is zero. # EXAMPLES ## Search Docker Hub for ranked images Search a registry for the term 'fedora' and only display those images ranked 3 or higher: $ docker search -s 3 fedora NAME DESCRIPTION STARS OFFICIAL AUTOMATED mattdm/fedora A basic Fedora image corresponding roughly... 50 fedora (Semi) Official Fedora base image. 38 mattdm/fedora-small A small Fedora image on which to build. Co... 8 goldmann/wildfly A WildFly application server running on a ... 3 [OK] ## Search Docker Hub for automated images Search Docker Hub for the term 'fedora' and only display automated images ranked 1 or higher: $ docker search --automated -s 1 fedora NAME DESCRIPTION STARS OFFICIAL AUTOMATED goldmann/wildfly A WildFly application server running on a ... 3 [OK] tutum/fedora-20 Fedora 20 image with SSH access. For the r... 1 [OK] # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit April 2015, updated by Mary Anthony for v2 docker-1.10.3/man/docker-start.1.md000066400000000000000000000020171267010174400167030ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-start - Start one or more containers # SYNOPSIS **docker start** [**-a**|**--attach**] [**--detach-keys**[=*[]*]] [**--help**] [**-i**|**--interactive**] CONTAINER [CONTAINER...] # DESCRIPTION Start one or more containers. # OPTIONS **-a**, **--attach**=*true*|*false* Attach container's STDOUT and STDERR and forward all signals to the process. The default is *false*. **--detach-keys**="" Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. **--help** Print usage statement **-i**, **--interactive**=*true*|*false* Attach container's STDIN. The default is *false*. # See also **docker-stop(1)** to stop a container. # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit docker-1.10.3/man/docker-stats.1.md000066400000000000000000000030731267010174400167070ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-stats - Display a live stream of one or more containers' resource usage statistics # SYNOPSIS **docker stats** [**-a**|**--all**] [**--help**] [**--no-stream**] [CONTAINER...] # DESCRIPTION Display a live stream of one or more containers' resource usage statistics # OPTIONS **-a**, **--all**=*true*|*false* Show all containers. Only running containers are shown by default. The default is *false*. **--help** Print usage statement **--no-stream**=*true*|*false* Disable streaming stats and only pull the first result, default setting is false. # EXAMPLES Running `docker stats` on all running containers $ docker stats CONTAINER CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O 1285939c1fd3 0.07% 796 KB / 64 MB 1.21% 788 B / 648 B 3.568 MB / 512 KB 9c76f7834ae2 0.07% 2.746 MB / 64 MB 4.29% 1.266 KB / 648 B 12.4 MB / 0 B d1ea048f04e4 0.03% 4.583 MB / 64 MB 6.30% 2.854 KB / 648 B 27.7 MB / 0 B Running `docker stats` on multiple containers by name and id. $ docker stats fervent_panini 5acfcb1b4fd1 CONTAINER CPU % MEM USAGE/LIMIT MEM % NET I/O 5acfcb1b4fd1 0.00% 115.2 MB/1.045 GB 11.03% 1.422 kB/648 B fervent_panini 0.02% 11.08 MB/1.045 GB 1.06% 648 B/648 B docker-1.10.3/man/docker-stop.1.md000066400000000000000000000014031267010174400165310ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-stop - Stop a container by sending SIGTERM and then SIGKILL after a grace period # SYNOPSIS **docker stop** [**--help**] [**-t**|**--time**[=*10*]] CONTAINER [CONTAINER...] # DESCRIPTION Stop a container (Send SIGTERM, and then SIGKILL after grace period) # OPTIONS **--help** Print usage statement **-t**, **--time**=*10* Number of seconds to wait for the container to stop before killing it. Default is 10 seconds. #See also **docker-start(1)** to restart a stopped container. # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit docker-1.10.3/man/docker-tag.1.md000066400000000000000000000035731267010174400163310ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-tag - Tag an image into a repository # SYNOPSIS **docker tag** [**--help**] IMAGE[:TAG] [REGISTRY_HOST/][USERNAME/]NAME[:TAG] # DESCRIPTION Assigns a new alias to an image in a registry. An alias refers to the entire image name including the optional `TAG` after the ':'. If you do not specify a `REGISTRY_HOST`, the command uses Docker's public registry located at `registry-1.docker.io` by default. # "OPTIONS" **--help** Print usage statement. **REGISTRY_HOST** The hostname of the registry if required. This may also include the port separated by a ':' **USERNAME** The username or other qualifying identifier for the image. **NAME** The image name. **TAG** The tag you are assigning to the image. Though this is arbitrary it is recommended to be used for a version to distinguish images with the same name. Also, for consistency tags should only include a-z0-9-_. . Note that here TAG is a part of the overall name or "tag". # EXAMPLES ## Giving an image a new alias Here is an example of aliasing an image (e.g., 0e5574283393) as "httpd" and tagging it into the "fedora" repository with "version1.0": docker tag 0e5574283393 fedora/httpd:version1.0 ## Tagging an image for a private repository To push an image to a private registry and not the central Docker registry you must tag it with the registry hostname and port (if needed). docker tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0 # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit July 2014, updated by Sven Dowideit April 2015, updated by Mary Anthony for v2 June 2015, updated by Sally O'Malley docker-1.10.3/man/docker-top.1.md000066400000000000000000000017071267010174400163550ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-top - Display the running processes of a container # SYNOPSIS **docker top** [**--help**] CONTAINER [ps OPTIONS] # DESCRIPTION Display the running process of the container. ps-OPTION can be any of the options you would pass to a Linux ps command. All displayed information is from host's point of view. # OPTIONS **--help** Print usage statement # EXAMPLES Run **docker top** with the ps option of -x: $ docker top 8601afda2b -x PID TTY STAT TIME COMMAND 16623 ? Ss 0:00 sleep 99999 # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit June 2015, updated by Ma Shimiao December 2015, updated by Pavel Pospisil docker-1.10.3/man/docker-unpause.1.md000066400000000000000000000011611267010174400172250ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-unpause - Unpause all processes within a container # SYNOPSIS **docker unpause** CONTAINER [CONTAINER...] # DESCRIPTION The `docker unpause` command uses the cgroups freezer to un-suspend all processes in a container. See the [cgroups freezer documentation] (https://www.kernel.org/doc/Documentation/cgroups/freezer-subsystem.txt) for further details. # OPTIONS There are no available options. # See also **docker-pause(1)** to pause all processes within a container. # HISTORY June 2014, updated by Sven Dowideit docker-1.10.3/man/docker-update.1.md000066400000000000000000000054451267010174400170400ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-update - Update resource configs of one or more containers # SYNOPSIS **docker update** [**--blkio-weight**[=*[BLKIO-WEIGHT]*]] [**--cpu-shares**[=*0*]] [**--cpu-period**[=*0*]] [**--cpu-quota**[=*0*]] [**--cpuset-cpus**[=*CPUSET-CPUS*]] [**--cpuset-mems**[=*CPUSET-MEMS*]] [**--help**] [**--kernel-memory**[=*KERNEL-MEMORY*]] [**-m**|**--memory**[=*MEMORY*]] [**--memory-reservation**[=*MEMORY-RESERVATION*]] [**--memory-swap**[=*MEMORY-SWAP*]] CONTAINER [CONTAINER...] # DESCRIPTION The `docker update` command dynamically updates container resources. Use this command to prevent containers from consuming too many resources from their Docker host. With a single command, you can place limits on a single container or on many. To specify more than one container, provide space-separated list of container names or IDs. With the exception of the `--kernel-memory` value, you can specify these options on a running or a stopped container. You can only update `--kernel-memory` on a stopped container. When you run `docker update` on stopped container, the next time you restart it, the container uses those values. # OPTIONS **--blkio-weight**=0 Block IO weight (relative weight) accepts a weight value between 10 and 1000. **--cpu-shares**=0 CPU shares (relative weight) **--cpu-period**=0 Limit the CPU CFS (Completely Fair Scheduler) period **--cpu-quota**=0 Limit the CPU CFS (Completely Fair Scheduler) quota **--cpuset-cpus**="" CPUs in which to allow execution (0-3, 0,1) **--cpuset-mems**="" Memory nodes(MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. **--help** Print usage statement **--kernel-memory**="" Kernel memory limit (format: `[]`, where unit = b, k, m or g) Note that you can not update kernel memory to a running container, it can only be updated to a stopped container, and affect after it's started. **-m**, **--memory**="" Memory limit (format: , where unit = b, k, m or g) **--memory-reservation**="" Memory soft limit (format: [], where unit = b, k, m or g) **--memory-swap**="" Total memory limit (memory + swap) # EXAMPLES The following sections illustrate ways to use this command. ### Update a container with cpu-shares=512 To limit a container's cpu-shares to 512, first identify the container name or ID. You can use **docker ps** to find these values. You can also use the ID returned from the **docker run** command. Then, do the following: ```bash $ docker update --cpu-shares 512 abebf7571666 ``` ### Update a container with cpu-shares and memory To update multiple resource configurations for multiple containers: ```bash $ docker update --cpu-shares 512 -m 300M abebf7571666 hopeful_morse ``` docker-1.10.3/man/docker-version.1.md000066400000000000000000000031641267010174400172370ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2015 # NAME docker-version - Show the Docker version information. # SYNOPSIS **docker version** [**--help**] [**-f**|**--format**[=*FORMAT*]] # DESCRIPTION This command displays version information for both the Docker client and daemon. # OPTIONS **--help** Print usage statement **-f**, **--format**="" Format the output using the given go template. # EXAMPLES ## Display Docker version information The default output: $ docker version Client: Version: 1.8.0 API version: 1.20 Go version: go1.4.2 Git commit: f5bae0a Built: Tue Jun 23 17:56:00 UTC 2015 OS/Arch: linux/amd64 Server: Version: 1.8.0 API version: 1.20 Go version: go1.4.2 Git commit: f5bae0a Built: Tue Jun 23 17:56:00 UTC 2015 OS/Arch: linux/amd64 Get server version: $ docker version --format '{{.Server.Version}}' 1.8.0 Dump raw data: To view all available fields, you can use the format `{{json .}}`. $ docker version --format '{{json .}}' {"Client":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"},"ServerOK":true,"Server":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","KernelVersion":"3.13.2-gentoo","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"}} # HISTORY June 2014, updated by Sven Dowideit June 2015, updated by John Howard June 2015, updated by Patrick Hemmer docker-1.10.3/man/docker-volume-inspect.1.md000066400000000000000000000013341267010174400205210ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JULY 2015 # NAME docker-volume-inspect - Get low-level information about a volume # SYNOPSIS **docker volume inspect** [**-f**|**--format**[=*FORMAT*]] [**--help**] VOLUME [VOLUME...] # DESCRIPTION Returns information about one or more volumes. By default, this command renders all results in a JSON array. You can specify an alternate format to execute a given template is executed for each result. Go's http://golang.org/pkg/text/template/ package describes all the details of the format. # OPTIONS **-f**, **--format**="" Format the output using the given go template. **--help** Print usage statement # HISTORY July 2015, created by Brian Goff docker-1.10.3/man/docker-volume-ls.1.md000066400000000000000000000014751267010174400175000ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JULY 2015 # NAME docker-volume-ls - List all volumes # SYNOPSIS **docker volume ls** [**-f**|**--filter**[=*FILTER*]] [**--help**] [**-q**|**--quiet**[=*true*|*false*]] # DESCRIPTION Lists all the volumes Docker knows about. You can filter using the `-f` or `--filter` flag. The filtering format is a `key=value` pair. To specify more than one filter, pass multiple flags (for example, `--filter "foo=bar" --filter "bif=baz"`) There is a single supported filter `dangling=value` which takes a boolean of `true` or `false`. # OPTIONS **-f**, **--filter**="" Provide filter values (i.e. 'dangling=true') **--help** Print usage statement **-q**, **--quiet**=*true*|*false* Only display volume names # HISTORY July 2015, created by Brian Goff docker-1.10.3/man/docker-volume-rm.1.md000066400000000000000000000006601267010174400174730ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JULY 2015 # NAME docker-volume-rm - Remove a volume # SYNOPSIS **docker volume rm** [**--help**] VOLUME [VOLUME...] # DESCRIPTION Removes one or more volumes. You cannot remove a volume that is in use by a container. ``` $ docker volume rm hello hello ``` # OPTIONS **--help** Print usage statement # HISTORY July 2015, created by Brian Goff docker-1.10.3/man/docker-wait.1.md000066400000000000000000000012601267010174400165110ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-wait - Block until a container stops, then print its exit code. # SYNOPSIS **docker wait** [**--help**] CONTAINER [CONTAINER...] # DESCRIPTION Block until a container stops, then print its exit code. # OPTIONS **--help** Print usage statement # EXAMPLES $ docker run -d fedora sleep 99 079b83f558a2bc52ecad6b2a5de13622d584e6bb1aea058c11b36511e85e7622 $ docker wait 079b83f558a2bc 0 # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit docker-1.10.3/man/docker.1.md000066400000000000000000000170441267010174400155560ustar00rootroot00000000000000% DOCKER(1) Docker User Manuals % William Henry % APRIL 2014 # NAME docker \- Docker image and container command line interface # SYNOPSIS **docker** [OPTIONS] COMMAND [arg...] **docker** daemon [--help|...] **docker** [--help|-v|--version] # DESCRIPTION **docker** has two distinct functions. It is used for starting the Docker daemon and to run the CLI (i.e., to command the daemon to manage images, containers etc.) So **docker** is both a server, as a daemon, and a client to the daemon, through the CLI. To run the Docker daemon you can specify **docker daemon**. You can view the daemon options using **docker daemon --help**. To see the man page for the daemon, run **man docker daemon**. The Docker CLI has over 30 commands. The commands are listed below and each has its own man page which explain usage and arguments. To see the man page for a command run **man docker **. # OPTIONS **--help** Print usage statement **--config**="" Specifies the location of the Docker client configuration files. The default is '~/.docker'. **-D**, **--debug**=*true*|*false* Enable debug mode. Default is false. **-H**, **--host**=[*unix:///var/run/docker.sock*]: tcp://[host]:[port][path] to bind or unix://[/path/to/socket] to use. The socket(s) to bind to in daemon mode specified using one or more tcp://host:port/path, unix:///path/to/socket, fd://* or fd://socketfd. If the tcp port is not specified, then it will default to either `2375` when `--tls` is off, or `2376` when `--tls` is on, or `--tlsverify` is specified. **-l**, **--log-level**="*debug*|*info*|*warn*|*error*|*fatal*" Set the logging level. Default is `info`. **--tls**=*true*|*false* Use TLS; implied by --tlsverify. Default is false. **--tlscacert**=*~/.docker/ca.pem* Trust certs signed only by this CA. **--tlscert**=*~/.docker/cert.pem* Path to TLS certificate file. **--tlskey**=*~/.docker/key.pem* Path to TLS key file. **--tlsverify**=*true*|*false* Use TLS and verify the remote (daemon: verify client, client: verify daemon). Default is false. **-v**, **--version**=*true*|*false* Print version information and quit. Default is false. # COMMANDS **attach** Attach to a running container See **docker-attach(1)** for full documentation on the **attach** command. **build** Build an image from a Dockerfile See **docker-build(1)** for full documentation on the **build** command. **commit** Create a new image from a container's changes See **docker-commit(1)** for full documentation on the **commit** command. **cp** Copy files/folders between a container and the local filesystem See **docker-cp(1)** for full documentation on the **cp** command. **create** Create a new container See **docker-create(1)** for full documentation on the **create** command. **diff** Inspect changes on a container's filesystem See **docker-diff(1)** for full documentation on the **diff** command. **events** Get real time events from the server See **docker-events(1)** for full documentation on the **events** command. **exec** Run a command in a running container See **docker-exec(1)** for full documentation on the **exec** command. **export** Stream the contents of a container as a tar archive See **docker-export(1)** for full documentation on the **export** command. **history** Show the history of an image See **docker-history(1)** for full documentation on the **history** command. **images** List images See **docker-images(1)** for full documentation on the **images** command. **import** Create a new filesystem image from the contents of a tarball See **docker-import(1)** for full documentation on the **import** command. **info** Display system-wide information See **docker-info(1)** for full documentation on the **info** command. **inspect** Return low-level information on a container or image See **docker-inspect(1)** for full documentation on the **inspect** command. **kill** Kill a running container (which includes the wrapper process and everything inside it) See **docker-kill(1)** for full documentation on the **kill** command. **load** Load an image from a tar archive See **docker-load(1)** for full documentation on the **load** command. **login** Register or login to a Docker Registry See **docker-login(1)** for full documentation on the **login** command. **logout** Log the user out of a Docker Registry See **docker-logout(1)** for full documentation on the **logout** command. **logs** Fetch the logs of a container See **docker-logs(1)** for full documentation on the **logs** command. **pause** Pause all processes within a container See **docker-pause(1)** for full documentation on the **pause** command. **port** Lookup the public-facing port which is NAT-ed to PRIVATE_PORT See **docker-port(1)** for full documentation on the **port** command. **ps** List containers See **docker-ps(1)** for full documentation on the **ps** command. **pull** Pull an image or a repository from a Docker Registry See **docker-pull(1)** for full documentation on the **pull** command. **push** Push an image or a repository to a Docker Registry See **docker-push(1)** for full documentation on the **push** command. **rename** Rename a container. See **docker-rename(1)** for full documentation on the **rename** command. **restart** Restart a container See **docker-restart(1)** for full documentation on the **restart** command. **rm** Remove one or more containers See **docker-rm(1)** for full documentation on the **rm** command. **rmi** Remove one or more images See **docker-rmi(1)** for full documentation on the **rmi** command. **run** Run a command in a new container See **docker-run(1)** for full documentation on the **run** command. **save** Save an image to a tar archive See **docker-save(1)** for full documentation on the **save** command. **search** Search for an image in the Docker index See **docker-search(1)** for full documentation on the **search** command. **start** Start a container See **docker-start(1)** for full documentation on the **start** command. **stats** Display a live stream of one or more containers' resource usage statistics See **docker-stats(1)** for full documentation on the **stats** command. **stop** Stop a container See **docker-stop(1)** for full documentation on the **stop** command. **tag** Tag an image into a repository See **docker-tag(1)** for full documentation on the **tag** command. **top** Lookup the running processes of a container See **docker-top(1)** for full documentation on the **top** command. **unpause** Unpause all processes within a container See **docker-unpause(1)** for full documentation on the **unpause** command. **version** Show the Docker version information See **docker-version(1)** for full documentation on the **version** command. **wait** Block until a container stops, then print its exit code See **docker-wait(1)** for full documentation on the **wait** command. # EXEC DRIVER OPTIONS Use the **--exec-opt** flags to specify options to the execution driver. The following options are available: #### native.cgroupdriver Specifies the management of the container's `cgroups`. You can specify `cgroupfs` or `systemd`. If you specify `systemd` and it is not available, the system uses `cgroupfs`. #### Client For specific client examples please see the man page for the specific Docker command. For example: man docker-run # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. docker-1.10.3/man/md2man-all.sh000077500000000000000000000006631267010174400161100ustar00rootroot00000000000000#!/bin/bash set -e # get into this script's directory cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" [ "$1" = '-q' ] || { set -x pwd } for FILE in *.md; do base="$(basename "$FILE")" name="${base%.md}" num="${name##*.}" if [ -z "$num" -o "$name" = "$num" ]; then # skip files that aren't of the format xxxx.N.md (like README.md) continue fi mkdir -p "./man${num}" go-md2man -in "$FILE" -out "./man${num}/${name}" done docker-1.10.3/migrate/000077500000000000000000000000001267010174400144755ustar00rootroot00000000000000docker-1.10.3/migrate/v1/000077500000000000000000000000001267010174400150235ustar00rootroot00000000000000docker-1.10.3/migrate/v1/migratev1.go000066400000000000000000000314671267010174400172640ustar00rootroot00000000000000package v1 import ( "errors" "fmt" "io/ioutil" "os" "path/filepath" "runtime" "strconv" "sync" "time" "encoding/json" "github.com/Sirupsen/logrus" "github.com/docker/distribution/digest" "github.com/docker/docker/distribution/metadata" "github.com/docker/docker/image" imagev1 "github.com/docker/docker/image/v1" "github.com/docker/docker/layer" "github.com/docker/docker/reference" ) type graphIDRegistrar interface { RegisterByGraphID(string, layer.ChainID, layer.DiffID, string, int64) (layer.Layer, error) Release(layer.Layer) ([]layer.Metadata, error) } type graphIDMounter interface { CreateRWLayerByGraphID(string, string, layer.ChainID) error } type checksumCalculator interface { ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID layer.DiffID, size int64, err error) } const ( graphDirName = "graph" tarDataFileName = "tar-data.json.gz" migrationFileName = ".migration-v1-images.json" migrationTagsFileName = ".migration-v1-tags" migrationDiffIDFileName = ".migration-diffid" migrationSizeFileName = ".migration-size" migrationTarDataFileName = ".migration-tardata" containersDirName = "containers" configFileNameLegacy = "config.json" configFileName = "config.v2.json" repositoriesFilePrefixLegacy = "repositories-" ) var ( errUnsupported = errors.New("migration is not supported") ) // Migrate takes an old graph directory and transforms the metadata into the // new format. func Migrate(root, driverName string, ls layer.Store, is image.Store, rs reference.Store, ms metadata.Store) error { graphDir := filepath.Join(root, graphDirName) if _, err := os.Lstat(graphDir); os.IsNotExist(err) { return nil } mappings, err := restoreMappings(root) if err != nil { return err } if cc, ok := ls.(checksumCalculator); ok { CalculateLayerChecksums(root, cc, mappings) } if registrar, ok := ls.(graphIDRegistrar); !ok { return errUnsupported } else if err := migrateImages(root, registrar, is, ms, mappings); err != nil { return err } err = saveMappings(root, mappings) if err != nil { return err } if mounter, ok := ls.(graphIDMounter); !ok { return errUnsupported } else if err := migrateContainers(root, mounter, is, mappings); err != nil { return err } if err := migrateRefs(root, driverName, rs, mappings); err != nil { return err } return nil } // CalculateLayerChecksums walks an old graph directory and calculates checksums // for each layer. These checksums are later used for migration. func CalculateLayerChecksums(root string, ls checksumCalculator, mappings map[string]image.ID) { graphDir := filepath.Join(root, graphDirName) // spawn some extra workers also for maximum performance because the process is bounded by both cpu and io workers := runtime.NumCPU() * 3 workQueue := make(chan string, workers) wg := sync.WaitGroup{} for i := 0; i < workers; i++ { wg.Add(1) go func() { for id := range workQueue { start := time.Now() if err := calculateLayerChecksum(graphDir, id, ls); err != nil { logrus.Errorf("could not calculate checksum for %q, %q", id, err) } elapsed := time.Since(start) logrus.Debugf("layer %s took %.2f seconds", id, elapsed.Seconds()) } wg.Done() }() } dir, err := ioutil.ReadDir(graphDir) if err != nil { logrus.Errorf("could not read directory %q", graphDir) return } for _, v := range dir { v1ID := v.Name() if err := imagev1.ValidateID(v1ID); err != nil { continue } if _, ok := mappings[v1ID]; ok { // support old migrations without helper files continue } workQueue <- v1ID } close(workQueue) wg.Wait() } func calculateLayerChecksum(graphDir, id string, ls checksumCalculator) error { diffIDFile := filepath.Join(graphDir, id, migrationDiffIDFileName) if _, err := os.Lstat(diffIDFile); err == nil { return nil } else if !os.IsNotExist(err) { return err } parent, err := getParent(filepath.Join(graphDir, id)) if err != nil { return err } diffID, size, err := ls.ChecksumForGraphID(id, parent, filepath.Join(graphDir, id, tarDataFileName), filepath.Join(graphDir, id, migrationTarDataFileName)) if err != nil { return err } if err := ioutil.WriteFile(filepath.Join(graphDir, id, migrationSizeFileName), []byte(strconv.Itoa(int(size))), 0600); err != nil { return err } tmpFile := filepath.Join(graphDir, id, migrationDiffIDFileName+".tmp") if err := ioutil.WriteFile(tmpFile, []byte(diffID), 0600); err != nil { return err } if err := os.Rename(tmpFile, filepath.Join(graphDir, id, migrationDiffIDFileName)); err != nil { return err } logrus.Infof("calculated checksum for layer %s: %s", id, diffID) return nil } func restoreMappings(root string) (map[string]image.ID, error) { mappings := make(map[string]image.ID) mfile := filepath.Join(root, migrationFileName) f, err := os.Open(mfile) if err != nil && !os.IsNotExist(err) { return nil, err } else if err == nil { err := json.NewDecoder(f).Decode(&mappings) if err != nil { f.Close() return nil, err } f.Close() } return mappings, nil } func saveMappings(root string, mappings map[string]image.ID) error { mfile := filepath.Join(root, migrationFileName) f, err := os.OpenFile(mfile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { return err } defer f.Close() if err := json.NewEncoder(f).Encode(mappings); err != nil { return err } return nil } func migrateImages(root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) error { graphDir := filepath.Join(root, graphDirName) dir, err := ioutil.ReadDir(graphDir) if err != nil { return err } for _, v := range dir { v1ID := v.Name() if err := imagev1.ValidateID(v1ID); err != nil { continue } if _, exists := mappings[v1ID]; exists { continue } if err := migrateImage(v1ID, root, ls, is, ms, mappings); err != nil { continue } } return nil } func migrateContainers(root string, ls graphIDMounter, is image.Store, imageMappings map[string]image.ID) error { containersDir := filepath.Join(root, containersDirName) dir, err := ioutil.ReadDir(containersDir) if err != nil { return err } for _, v := range dir { id := v.Name() if _, err := os.Stat(filepath.Join(containersDir, id, configFileName)); err == nil { continue } containerJSON, err := ioutil.ReadFile(filepath.Join(containersDir, id, configFileNameLegacy)) if err != nil { logrus.Errorf("migrate container error: %v", err) continue } var c map[string]*json.RawMessage if err := json.Unmarshal(containerJSON, &c); err != nil { logrus.Errorf("migrate container error: %v", err) continue } imageStrJSON, ok := c["Image"] if !ok { return fmt.Errorf("invalid container configuration for %v", id) } var image string if err := json.Unmarshal([]byte(*imageStrJSON), &image); err != nil { logrus.Errorf("migrate container error: %v", err) continue } imageID, ok := imageMappings[image] if !ok { logrus.Errorf("image not migrated %v", imageID) // non-fatal error continue } c["Image"] = rawJSON(imageID) containerJSON, err = json.Marshal(c) if err != nil { return err } if err := ioutil.WriteFile(filepath.Join(containersDir, id, configFileName), containerJSON, 0600); err != nil { return err } img, err := is.Get(imageID) if err != nil { return err } if err := ls.CreateRWLayerByGraphID(id, id, img.RootFS.ChainID()); err != nil { logrus.Errorf("migrate container error: %v", err) continue } logrus.Infof("migrated container %s to point to %s", id, imageID) } return nil } type refAdder interface { AddTag(ref reference.Named, id image.ID, force bool) error AddDigest(ref reference.Canonical, id image.ID, force bool) error } func migrateRefs(root, driverName string, rs refAdder, mappings map[string]image.ID) error { migrationFile := filepath.Join(root, migrationTagsFileName) if _, err := os.Lstat(migrationFile); !os.IsNotExist(err) { return err } type repositories struct { Repositories map[string]map[string]string } var repos repositories f, err := os.Open(filepath.Join(root, repositoriesFilePrefixLegacy+driverName)) if err != nil { if os.IsNotExist(err) { return nil } return err } defer f.Close() if err := json.NewDecoder(f).Decode(&repos); err != nil { return err } for name, repo := range repos.Repositories { for tag, id := range repo { if strongID, exists := mappings[id]; exists { ref, err := reference.WithName(name) if err != nil { logrus.Errorf("migrate tags: invalid name %q, %q", name, err) continue } if dgst, err := digest.ParseDigest(tag); err == nil { canonical, err := reference.WithDigest(ref, dgst) if err != nil { logrus.Errorf("migrate tags: invalid digest %q, %q", dgst, err) continue } if err := rs.AddDigest(canonical, strongID, false); err != nil { logrus.Errorf("can't migrate digest %q for %q, err: %q", ref.String(), strongID, err) } } else { tagRef, err := reference.WithTag(ref, tag) if err != nil { logrus.Errorf("migrate tags: invalid tag %q, %q", tag, err) continue } if err := rs.AddTag(tagRef, strongID, false); err != nil { logrus.Errorf("can't migrate tag %q for %q, err: %q", ref.String(), strongID, err) } } logrus.Infof("migrated tag %s:%s to point to %s", name, tag, strongID) } } } mf, err := os.Create(migrationFile) if err != nil { return err } mf.Close() return nil } func getParent(confDir string) (string, error) { jsonFile := filepath.Join(confDir, "json") imageJSON, err := ioutil.ReadFile(jsonFile) if err != nil { return "", err } var parent struct { Parent string ParentID digest.Digest `json:"parent_id"` } if err := json.Unmarshal(imageJSON, &parent); err != nil { return "", err } if parent.Parent == "" && parent.ParentID != "" { // v1.9 parent.Parent = parent.ParentID.Hex() } // compatibilityID for parent parentCompatibilityID, err := ioutil.ReadFile(filepath.Join(confDir, "parent")) if err == nil && len(parentCompatibilityID) > 0 { parent.Parent = string(parentCompatibilityID) } return parent.Parent, nil } func migrateImage(id, root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) (err error) { defer func() { if err != nil { logrus.Errorf("migration failed for %v, err: %v", id, err) } }() parent, err := getParent(filepath.Join(root, graphDirName, id)) if err != nil { return err } var parentID image.ID if parent != "" { var exists bool if parentID, exists = mappings[parent]; !exists { if err := migrateImage(parent, root, ls, is, ms, mappings); err != nil { // todo: fail or allow broken chains? return err } parentID = mappings[parent] } } rootFS := image.NewRootFS() var history []image.History if parentID != "" { parentImg, err := is.Get(parentID) if err != nil { return err } rootFS = parentImg.RootFS history = parentImg.History } diffIDData, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, migrationDiffIDFileName)) if err != nil { return err } diffID, err := digest.ParseDigest(string(diffIDData)) if err != nil { return err } sizeStr, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, migrationSizeFileName)) if err != nil { return err } size, err := strconv.ParseInt(string(sizeStr), 10, 64) if err != nil { return err } layer, err := ls.RegisterByGraphID(id, rootFS.ChainID(), layer.DiffID(diffID), filepath.Join(root, graphDirName, id, migrationTarDataFileName), size) if err != nil { return err } logrus.Infof("migrated layer %s to %s", id, layer.DiffID()) jsonFile := filepath.Join(root, graphDirName, id, "json") imageJSON, err := ioutil.ReadFile(jsonFile) if err != nil { return err } h, err := imagev1.HistoryFromConfig(imageJSON, false) if err != nil { return err } history = append(history, h) rootFS.Append(layer.DiffID()) config, err := imagev1.MakeConfigFromV1Config(imageJSON, rootFS, history) if err != nil { return err } strongID, err := is.Create(config) if err != nil { return err } logrus.Infof("migrated image %s to %s", id, strongID) if parentID != "" { if err := is.SetParent(strongID, parentID); err != nil { return err } } checksum, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, "checksum")) if err == nil { // best effort dgst, err := digest.ParseDigest(string(checksum)) if err == nil { V2MetadataService := metadata.NewV2MetadataService(ms) V2MetadataService.Add(layer.DiffID(), metadata.V2Metadata{Digest: dgst}) } } _, err = ls.Release(layer) if err != nil { return err } mappings[id] = strongID return } func rawJSON(value interface{}) *json.RawMessage { jsonval, err := json.Marshal(value) if err != nil { return nil } return (*json.RawMessage)(&jsonval) } docker-1.10.3/migrate/v1/migratev1_test.go000066400000000000000000000516771267010174400203300ustar00rootroot00000000000000package v1 import ( "crypto/rand" "encoding/hex" "encoding/json" "fmt" "io" "io/ioutil" "os" "path/filepath" "reflect" "testing" "github.com/docker/distribution/digest" "github.com/docker/docker/distribution/metadata" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/reference" ) func TestMigrateRefs(t *testing.T) { tmpdir, err := ioutil.TempDir("", "migrate-tags") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) ioutil.WriteFile(filepath.Join(tmpdir, "repositories-generic"), []byte(`{"Repositories":{"busybox":{"latest":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108","sha256:16a2a52884c2a9481ed267c2d46483eac7693b813a63132368ab098a71303f8a":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108"},"registry":{"2":"5d165b8e4b203685301c815e95663231691d383fd5e3d3185d1ce3f8dddead3d","latest":"8d5547a9f329b1d3f93198cd661fb5117e5a96b721c5cf9a2c389e7dd4877128"}}}`), 0600) ta := &mockTagAdder{} err = migrateRefs(tmpdir, "generic", ta, map[string]image.ID{ "5d165b8e4b203685301c815e95663231691d383fd5e3d3185d1ce3f8dddead3d": image.ID("sha256:2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"), "b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9"), "abcdef3434c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:56434342345ae68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"), }) if err != nil { t.Fatal(err) } expected := map[string]string{ "busybox:latest": "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9", "busybox@sha256:16a2a52884c2a9481ed267c2d46483eac7693b813a63132368ab098a71303f8a": "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9", "registry:2": "sha256:2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae", } if !reflect.DeepEqual(expected, ta.refs) { t.Fatalf("Invalid migrated tags: expected %q, got %q", expected, ta.refs) } // second migration is no-op ioutil.WriteFile(filepath.Join(tmpdir, "repositories-generic"), []byte(`{"Repositories":{"busybox":{"latest":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108"`), 0600) err = migrateRefs(tmpdir, "generic", ta, map[string]image.ID{ "b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9"), }) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(expected, ta.refs) { t.Fatalf("Invalid migrated tags: expected %q, got %q", expected, ta.refs) } } func TestMigrateContainers(t *testing.T) { tmpdir, err := ioutil.TempDir("", "migrate-containers") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) err = addContainer(tmpdir, `{"State":{"Running":false,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":0,"ExitCode":0,"Error":"","StartedAt":"2015-11-10T21:42:40.604267436Z","FinishedAt":"2015-11-10T21:42:41.869265487Z"},"ID":"f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c","Created":"2015-11-10T21:42:40.433831551Z","Path":"sh","Args":[],"Config":{"Hostname":"f780ee3f80e6","Domainname":"","User":"","AttachStdin":true,"AttachStdout":true,"AttachStderr":true,"Tty":true,"OpenStdin":true,"StdinOnce":true,"Env":null,"Cmd":["sh"],"Image":"busybox","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"Image":"2c5ac3f849df8627fcf2822727f87c57f38b7129d3604fbc11d861fe856ff093","NetworkSettings":{"Bridge":"","EndpointID":"","Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"HairpinMode":false,"IPAddress":"","IPPrefixLen":0,"IPv6Gateway":"","LinkLocalIPv6Address":"","LinkLocalIPv6PrefixLen":0,"MacAddress":"","NetworkID":"","PortMapping":null,"Ports":null,"SandboxKey":"","SecondaryIPAddresses":null,"SecondaryIPv6Addresses":null},"ResolvConfPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/resolv.conf","HostnamePath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hostname","HostsPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hosts","LogPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c-json.log","Name":"/determined_euclid","Driver":"overlay","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","RestartCount":0,"UpdateDns":false,"HasBeenStartedBefore":false,"MountPoints":{},"Volumes":{},"VolumesRW":{},"AppArmorProfile":""}`) if err != nil { t.Fatal(err) } // container with invalid image err = addContainer(tmpdir, `{"State":{"Running":false,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":0,"ExitCode":0,"Error":"","StartedAt":"2015-11-10T21:42:40.604267436Z","FinishedAt":"2015-11-10T21:42:41.869265487Z"},"ID":"e780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c","Created":"2015-11-10T21:42:40.433831551Z","Path":"sh","Args":[],"Config":{"Hostname":"f780ee3f80e6","Domainname":"","User":"","AttachStdin":true,"AttachStdout":true,"AttachStderr":true,"Tty":true,"OpenStdin":true,"StdinOnce":true,"Env":null,"Cmd":["sh"],"Image":"busybox","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"Image":"4c5ac3f849df8627fcf2822727f87c57f38b7129d3604fbc11d861fe856ff093","NetworkSettings":{"Bridge":"","EndpointID":"","Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"HairpinMode":false,"IPAddress":"","IPPrefixLen":0,"IPv6Gateway":"","LinkLocalIPv6Address":"","LinkLocalIPv6PrefixLen":0,"MacAddress":"","NetworkID":"","PortMapping":null,"Ports":null,"SandboxKey":"","SecondaryIPAddresses":null,"SecondaryIPv6Addresses":null},"ResolvConfPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/resolv.conf","HostnamePath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hostname","HostsPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hosts","LogPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c-json.log","Name":"/determined_euclid","Driver":"overlay","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","RestartCount":0,"UpdateDns":false,"HasBeenStartedBefore":false,"MountPoints":{},"Volumes":{},"VolumesRW":{},"AppArmorProfile":""}`) if err != nil { t.Fatal(err) } ls := &mockMounter{} ifs, err := image.NewFSStoreBackend(filepath.Join(tmpdir, "imagedb")) if err != nil { t.Fatal(err) } is, err := image.NewImageStore(ifs, ls) if err != nil { t.Fatal(err) } imgID, err := is.Create([]byte(`{"architecture":"amd64","config":{"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Cmd":["sh"],"Entrypoint":null,"Env":null,"Hostname":"23304fc829f9","Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Labels":null,"OnBuild":null,"OpenStdin":false,"StdinOnce":false,"Tty":false,"Volumes":null,"WorkingDir":"","Domainname":"","User":""},"container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Entrypoint":null,"Env":null,"Hostname":"23304fc829f9","Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Labels":null,"OnBuild":null,"OpenStdin":false,"StdinOnce":false,"Tty":false,"Volumes":null,"WorkingDir":"","Domainname":"","User":""},"created":"2015-10-31T22:22:55.613815829Z","docker_version":"1.8.2","history":[{"created":"2015-10-31T22:22:54.690851953Z","created_by":"/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"},{"created":"2015-10-31T22:22:55.613815829Z","created_by":"/bin/sh -c #(nop) CMD [\"sh\"]"}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1","sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"]}}`)) if err != nil { t.Fatal(err) } err = migrateContainers(tmpdir, ls, is, map[string]image.ID{ "2c5ac3f849df8627fcf2822727f87c57f38b7129d3604fbc11d861fe856ff093": imgID, }) if err != nil { t.Fatal(err) } expected := []mountInfo{{ "f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c", "f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c", "sha256:c3191d32a37d7159b2e30830937d2e30268ad6c375a773a8994911a3aba9b93f", }} if !reflect.DeepEqual(expected, ls.mounts) { t.Fatalf("invalid mounts: expected %q, got %q", expected, ls.mounts) } if actual, expected := ls.count, 0; actual != expected { t.Fatalf("invalid active mounts: expected %d, got %d", expected, actual) } config2, err := ioutil.ReadFile(filepath.Join(tmpdir, "containers", "f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c", "config.v2.json")) if err != nil { t.Fatal(err) } var config struct{ Image string } err = json.Unmarshal(config2, &config) if err != nil { t.Fatal(err) } if actual, expected := config.Image, string(imgID); actual != expected { t.Fatalf("invalid image pointer in migrated config: expected %q, got %q", expected, actual) } } func TestMigrateImages(t *testing.T) { tmpdir, err := ioutil.TempDir("", "migrate-images") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) // busybox from 1.9 id1, err := addImage(tmpdir, `{"architecture":"amd64","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"23304fc829f9b9349416f6eb1afec162907eba3a328f51d53a17f8986f865d65","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"],"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2015-10-31T22:22:54.690851953Z","docker_version":"1.8.2","layer_id":"sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57","os":"linux"}`, "", "") if err != nil { t.Fatal(err) } id2, err := addImage(tmpdir, `{"architecture":"amd64","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2015-10-31T22:22:55.613815829Z","docker_version":"1.8.2","layer_id":"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4","os":"linux","parent_id":"sha256:039b63dd2cbaa10d6015ea574392530571ed8d7b174090f032211285a71881d0"}`, id1, "") if err != nil { t.Fatal(err) } ls := &mockRegistrar{} ifs, err := image.NewFSStoreBackend(filepath.Join(tmpdir, "imagedb")) if err != nil { t.Fatal(err) } is, err := image.NewImageStore(ifs, ls) if err != nil { t.Fatal(err) } ms, err := metadata.NewFSMetadataStore(filepath.Join(tmpdir, "distribution")) if err != nil { t.Fatal(err) } mappings := make(map[string]image.ID) err = migrateImages(tmpdir, ls, is, ms, mappings) if err != nil { t.Fatal(err) } expected := map[string]image.ID{ id1: image.ID("sha256:ca406eaf9c26898414ff5b7b3a023c33310759d6203be0663dbf1b3a712f432d"), id2: image.ID("sha256:a488bec94bb96b26a968f913d25ef7d8d204d727ca328b52b4b059c7d03260b6"), } if !reflect.DeepEqual(mappings, expected) { t.Fatalf("invalid image mappings: expected %q, got %q", expected, mappings) } if actual, expected := ls.count, 2; actual != expected { t.Fatalf("invalid register count: expected %q, got %q", expected, actual) } ls.count = 0 // next images are busybox from 1.8.2 _, err = addImage(tmpdir, `{"id":"17583c7dd0dae6244203b8029733bdb7d17fccbb2b5d93e2b24cf48b8bfd06e2","parent":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","created":"2015-10-31T22:22:55.613815829Z","container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"docker_version":"1.8.2","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"architecture":"amd64","os":"linux","Size":0}`, "", "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") if err != nil { t.Fatal(err) } _, err = addImage(tmpdir, `{"id":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","created":"2015-10-31T22:22:54.690851953Z","container":"23304fc829f9b9349416f6eb1afec162907eba3a328f51d53a17f8986f865d65","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"],"Image":"","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"docker_version":"1.8.2","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"architecture":"amd64","os":"linux","Size":1108935}`, "", "sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57") if err != nil { t.Fatal(err) } err = migrateImages(tmpdir, ls, is, ms, mappings) if err != nil { t.Fatal(err) } expected["d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498"] = image.ID("sha256:c091bb33854e57e6902b74c08719856d30b5593c7db6143b2b48376b8a588395") expected["17583c7dd0dae6244203b8029733bdb7d17fccbb2b5d93e2b24cf48b8bfd06e2"] = image.ID("sha256:d963020e755ff2715b936065949472c1f8a6300144b922992a1a421999e71f07") if actual, expected := ls.count, 2; actual != expected { t.Fatalf("invalid register count: expected %q, got %q", expected, actual) } v2MetadataService := metadata.NewV2MetadataService(ms) receivedMetadata, err := v2MetadataService.GetMetadata(layer.EmptyLayer.DiffID()) if err != nil { t.Fatal(err) } expectedMetadata := []metadata.V2Metadata{ {Digest: digest.Digest("sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57")}, {Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, } if !reflect.DeepEqual(expectedMetadata, receivedMetadata) { t.Fatalf("invalid metadata: expected %q, got %q", expectedMetadata, receivedMetadata) } } func TestMigrateUnsupported(t *testing.T) { tmpdir, err := ioutil.TempDir("", "migrate-empty") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) err = os.MkdirAll(filepath.Join(tmpdir, "graph"), 0700) if err != nil { t.Fatal(err) } err = Migrate(tmpdir, "generic", nil, nil, nil, nil) if err != errUnsupported { t.Fatalf("expected unsupported error, got %q", err) } } func TestMigrateEmptyDir(t *testing.T) { tmpdir, err := ioutil.TempDir("", "migrate-empty") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) err = Migrate(tmpdir, "generic", nil, nil, nil, nil) if err != nil { t.Fatal(err) } } func addImage(dest, jsonConfig, parent, checksum string) (string, error) { var config struct{ ID string } if err := json.Unmarshal([]byte(jsonConfig), &config); err != nil { return "", err } if config.ID == "" { b := make([]byte, 32) rand.Read(b) config.ID = hex.EncodeToString(b) } contDir := filepath.Join(dest, "graph", config.ID) if err := os.MkdirAll(contDir, 0700); err != nil { return "", err } if err := ioutil.WriteFile(filepath.Join(contDir, "json"), []byte(jsonConfig), 0600); err != nil { return "", err } if checksum != "" { if err := ioutil.WriteFile(filepath.Join(contDir, "checksum"), []byte(checksum), 0600); err != nil { return "", err } } if err := ioutil.WriteFile(filepath.Join(contDir, ".migration-diffid"), []byte(layer.EmptyLayer.DiffID()), 0600); err != nil { return "", err } if err := ioutil.WriteFile(filepath.Join(contDir, ".migration-size"), []byte("0"), 0600); err != nil { return "", err } if parent != "" { if err := ioutil.WriteFile(filepath.Join(contDir, "parent"), []byte(parent), 0600); err != nil { return "", err } } if checksum != "" { if err := ioutil.WriteFile(filepath.Join(contDir, "checksum"), []byte(checksum), 0600); err != nil { return "", err } } return config.ID, nil } func addContainer(dest, jsonConfig string) error { var config struct{ ID string } if err := json.Unmarshal([]byte(jsonConfig), &config); err != nil { return err } contDir := filepath.Join(dest, "containers", config.ID) if err := os.MkdirAll(contDir, 0700); err != nil { return err } if err := ioutil.WriteFile(filepath.Join(contDir, "config.json"), []byte(jsonConfig), 0600); err != nil { return err } return nil } type mockTagAdder struct { refs map[string]string } func (t *mockTagAdder) AddTag(ref reference.Named, id image.ID, force bool) error { if t.refs == nil { t.refs = make(map[string]string) } t.refs[ref.String()] = id.String() return nil } func (t *mockTagAdder) AddDigest(ref reference.Canonical, id image.ID, force bool) error { return t.AddTag(ref, id, force) } type mockRegistrar struct { layers map[layer.ChainID]*mockLayer count int } func (r *mockRegistrar) RegisterByGraphID(graphID string, parent layer.ChainID, diffID layer.DiffID, tarDataFile string, size int64) (layer.Layer, error) { r.count++ l := &mockLayer{} if parent != "" { p, exists := r.layers[parent] if !exists { return nil, fmt.Errorf("invalid parent %q", parent) } l.parent = p l.diffIDs = append(l.diffIDs, p.diffIDs...) } l.diffIDs = append(l.diffIDs, diffID) if r.layers == nil { r.layers = make(map[layer.ChainID]*mockLayer) } r.layers[l.ChainID()] = l return l, nil } func (r *mockRegistrar) Release(l layer.Layer) ([]layer.Metadata, error) { return nil, nil } func (r *mockRegistrar) Get(layer.ChainID) (layer.Layer, error) { return nil, nil } type mountInfo struct { name, graphID, parent string } type mockMounter struct { mounts []mountInfo count int } func (r *mockMounter) CreateRWLayerByGraphID(name string, graphID string, parent layer.ChainID) error { r.mounts = append(r.mounts, mountInfo{name, graphID, string(parent)}) return nil } func (r *mockMounter) Unmount(string) error { r.count-- return nil } func (r *mockMounter) Get(layer.ChainID) (layer.Layer, error) { return nil, nil } func (r *mockMounter) Release(layer.Layer) ([]layer.Metadata, error) { return nil, nil } type mockLayer struct { diffIDs []layer.DiffID parent *mockLayer } func (l *mockLayer) TarStream() (io.ReadCloser, error) { return nil, nil } func (l *mockLayer) ChainID() layer.ChainID { return layer.CreateChainID(l.diffIDs) } func (l *mockLayer) DiffID() layer.DiffID { return l.diffIDs[len(l.diffIDs)-1] } func (l *mockLayer) Parent() layer.Layer { if l.parent == nil { return nil } return l.parent } func (l *mockLayer) Size() (int64, error) { return 0, nil } func (l *mockLayer) DiffSize() (int64, error) { return 0, nil } func (l *mockLayer) Metadata() (map[string]string, error) { return nil, nil } docker-1.10.3/opts/000077500000000000000000000000001267010174400140325ustar00rootroot00000000000000docker-1.10.3/opts/hosts.go000066400000000000000000000120731267010174400155240ustar00rootroot00000000000000package opts import ( "fmt" "net" "net/url" "runtime" "strconv" "strings" ) var ( // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp:// // TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter // is not supplied. A better longer term solution would be to use a named // pipe as the default on the Windows daemon. // These are the IANA registered port numbers for use with Docker // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker DefaultHTTPPort = 2375 // Default HTTP Port // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port // DefaultUnixSocket Path for the unix socket. // Docker daemon by default always listens on the default unix socket DefaultUnixSocket = "/var/run/docker.sock" // DefaultTCPHost constant defines the default host string used by docker on Windows DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) // DefaultTLSHost constant defines the default host string used by docker for TLS sockets DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort) ) // ValidateHost validates that the specified string is a valid host and returns it. func ValidateHost(val string) (string, error) { _, err := parseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, "", val) if err != nil { return val, err } // Note: unlike most flag validators, we don't return the mutated value here // we need to know what the user entered later (using ParseHost) to adjust for tls return val, nil } // ParseHost and set defaults for a Daemon host string func ParseHost(defaultHost, val string) (string, error) { host, err := parseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, defaultHost, val) if err != nil { return val, err } return host, nil } // parseDockerDaemonHost parses the specified address and returns an address that will be used as the host. // Depending of the address specified, will use the defaultTCPAddr or defaultUnixAddr // defaultUnixAddr must be a absolute file path (no `unix://` prefix) // defaultTCPAddr must be the full `tcp://host:port` form func parseDockerDaemonHost(defaultTCPAddr, defaultTLSHost, defaultUnixAddr, defaultAddr, addr string) (string, error) { addr = strings.TrimSpace(addr) if addr == "" { if defaultAddr == defaultTLSHost { return defaultTLSHost, nil } if runtime.GOOS != "windows" { return fmt.Sprintf("unix://%s", defaultUnixAddr), nil } return defaultTCPAddr, nil } addrParts := strings.Split(addr, "://") if len(addrParts) == 1 { addrParts = []string{"tcp", addrParts[0]} } switch addrParts[0] { case "tcp": return parseTCPAddr(addrParts[1], defaultTCPAddr) case "unix": return parseUnixAddr(addrParts[1], defaultUnixAddr) case "fd": return addr, nil default: return "", fmt.Errorf("Invalid bind address format: %s", addr) } } // parseUnixAddr parses and validates that the specified address is a valid UNIX // socket address. It returns a formatted UNIX socket address, either using the // address parsed from addr, or the contents of defaultAddr if addr is a blank // string. func parseUnixAddr(addr string, defaultAddr string) (string, error) { addr = strings.TrimPrefix(addr, "unix://") if strings.Contains(addr, "://") { return "", fmt.Errorf("Invalid proto, expected unix: %s", addr) } if addr == "" { addr = defaultAddr } return fmt.Sprintf("unix://%s", addr), nil } // parseTCPAddr parses and validates that the specified address is a valid TCP // address. It returns a formatted TCP address, either using the address parsed // from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. // tryAddr is expected to have already been Trim()'d // defaultAddr must be in the full `tcp://host:port` form func parseTCPAddr(tryAddr string, defaultAddr string) (string, error) { if tryAddr == "" || tryAddr == "tcp://" { return defaultAddr, nil } addr := strings.TrimPrefix(tryAddr, "tcp://") if strings.Contains(addr, "://") || addr == "" { return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) } defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://") defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr) if err != nil { return "", err } // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but // not 1.4. See https://github.com/golang/go/issues/12200 and // https://github.com/golang/go/issues/6530. if strings.HasSuffix(addr, "]:") { addr += defaultPort } u, err := url.Parse("tcp://" + addr) if err != nil { return "", err } host, port, err := net.SplitHostPort(u.Host) if err != nil { return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) } if host == "" { host = defaultHost } if port == "" { port = defaultPort } p, err := strconv.Atoi(port) if err != nil && p == 0 { return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) } return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil } docker-1.10.3/opts/hosts_test.go000066400000000000000000000167221267010174400165700ustar00rootroot00000000000000package opts import ( "runtime" "testing" ) func TestParseHost(t *testing.T) { invalid := map[string]string{ "anything": "Invalid bind address format: anything", "something with spaces": "Invalid bind address format: something with spaces", "://": "Invalid bind address format: ://", "unknown://": "Invalid bind address format: unknown://", "tcp://:port": "Invalid bind address format: :port", "tcp://invalid": "Invalid bind address format: invalid", "tcp://invalid:port": "Invalid bind address format: invalid:port", } const defaultHTTPHost = "tcp://127.0.0.1:2375" var defaultHOST = "unix:///var/run/docker.sock" if runtime.GOOS == "windows" { defaultHOST = defaultHTTPHost } valid := map[string]string{ "": defaultHOST, "fd://": "fd://", "fd://something": "fd://something", "tcp://host:": "tcp://host:2375", "tcp://": "tcp://localhost:2375", "tcp://:2375": "tcp://localhost:2375", // default ip address "tcp://:2376": "tcp://localhost:2376", // default ip address "tcp://0.0.0.0:8080": "tcp://0.0.0.0:8080", "tcp://192.168.0.0:12000": "tcp://192.168.0.0:12000", "tcp://192.168:8080": "tcp://192.168:8080", "tcp://0.0.0.0:1234567890": "tcp://0.0.0.0:1234567890", // yeah it's valid :P "tcp://docker.com:2375": "tcp://docker.com:2375", "unix://": "unix:///var/run/docker.sock", // default unix:// value "unix://path/to/socket": "unix://path/to/socket", } for value, errorMessage := range invalid { if _, err := ParseHost(defaultHTTPHost, value); err == nil || err.Error() != errorMessage { t.Fatalf("Expected an error for %v with [%v], got [%v]", value, errorMessage, err) } } for value, expected := range valid { if actual, err := ParseHost(defaultHTTPHost, value); err != nil || actual != expected { t.Fatalf("Expected for %v [%v], got [%v, %v]", value, expected, actual, err) } } } func TestParseDockerDaemonHost(t *testing.T) { var ( defaultHTTPHost = "tcp://localhost:2375" defaultHTTPSHost = "tcp://localhost:2376" defaultUnix = "/var/run/docker.sock" defaultHOST = "unix:///var/run/docker.sock" ) if runtime.GOOS == "windows" { defaultHOST = defaultHTTPHost } invalids := map[string]string{ "0.0.0.0": "Invalid bind address format: 0.0.0.0", "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", "udp://127.0.0.1": "Invalid bind address format: udp://127.0.0.1", "udp://127.0.0.1:2375": "Invalid bind address format: udp://127.0.0.1:2375", "tcp://unix:///run/docker.sock": "Invalid bind address format: unix", "tcp": "Invalid bind address format: tcp", "unix": "Invalid bind address format: unix", "fd": "Invalid bind address format: fd", } valids := map[string]string{ "0.0.0.1:": "tcp://0.0.0.1:2375", "0.0.0.1:5555": "tcp://0.0.0.1:5555", "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", "[::1]:": "tcp://[::1]:2375", "[::1]:5555/path": "tcp://[::1]:5555/path", "[0:0:0:0:0:0:0:1]:": "tcp://[0:0:0:0:0:0:0:1]:2375", "[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path", ":6666": "tcp://localhost:6666", ":6666/path": "tcp://localhost:6666/path", "": defaultHOST, " ": defaultHOST, " ": defaultHOST, "tcp://": defaultHTTPHost, "tcp://:7777": "tcp://localhost:7777", "tcp://:7777/path": "tcp://localhost:7777/path", " tcp://:7777/path ": "tcp://localhost:7777/path", "unix:///run/docker.sock": "unix:///run/docker.sock", "unix://": "unix:///var/run/docker.sock", "fd://": "fd://", "fd://something": "fd://something", "localhost:": "tcp://localhost:2375", "localhost:5555": "tcp://localhost:5555", "localhost:5555/path": "tcp://localhost:5555/path", } for invalidAddr, expectedError := range invalids { if addr, err := parseDockerDaemonHost(defaultHTTPHost, defaultHTTPSHost, defaultUnix, "", invalidAddr); err == nil || err.Error() != expectedError { t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr) } } for validAddr, expectedAddr := range valids { if addr, err := parseDockerDaemonHost(defaultHTTPHost, defaultHTTPSHost, defaultUnix, "", validAddr); err != nil || addr != expectedAddr { t.Errorf("%v -> expected %v, got (%v) addr (%v)", validAddr, expectedAddr, err, addr) } } } func TestParseTCP(t *testing.T) { var ( defaultHTTPHost = "tcp://127.0.0.1:2376" ) invalids := map[string]string{ "0.0.0.0": "Invalid bind address format: 0.0.0.0", "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", "udp://127.0.0.1": "Invalid proto, expected tcp: udp://127.0.0.1", "udp://127.0.0.1:2375": "Invalid proto, expected tcp: udp://127.0.0.1:2375", } valids := map[string]string{ "": defaultHTTPHost, "tcp://": defaultHTTPHost, "0.0.0.1:": "tcp://0.0.0.1:2376", "0.0.0.1:5555": "tcp://0.0.0.1:5555", "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", ":6666": "tcp://127.0.0.1:6666", ":6666/path": "tcp://127.0.0.1:6666/path", "tcp://:7777": "tcp://127.0.0.1:7777", "tcp://:7777/path": "tcp://127.0.0.1:7777/path", "[::1]:": "tcp://[::1]:2376", "[::1]:5555": "tcp://[::1]:5555", "[::1]:5555/path": "tcp://[::1]:5555/path", "[0:0:0:0:0:0:0:1]:": "tcp://[0:0:0:0:0:0:0:1]:2376", "[0:0:0:0:0:0:0:1]:5555": "tcp://[0:0:0:0:0:0:0:1]:5555", "[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path", "localhost:": "tcp://localhost:2376", "localhost:5555": "tcp://localhost:5555", "localhost:5555/path": "tcp://localhost:5555/path", } for invalidAddr, expectedError := range invalids { if addr, err := parseTCPAddr(invalidAddr, defaultHTTPHost); err == nil || err.Error() != expectedError { t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr) } } for validAddr, expectedAddr := range valids { if addr, err := parseTCPAddr(validAddr, defaultHTTPHost); err != nil || addr != expectedAddr { t.Errorf("%v -> expected %v, got %v and addr %v", validAddr, expectedAddr, err, addr) } } } func TestParseInvalidUnixAddrInvalid(t *testing.T) { if _, err := parseUnixAddr("tcp://127.0.0.1", "unix:///var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { t.Fatalf("Expected an error, got %v", err) } if _, err := parseUnixAddr("unix://tcp://127.0.0.1", "/var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { t.Fatalf("Expected an error, got %v", err) } if v, err := parseUnixAddr("", "/var/run/docker.sock"); err != nil || v != "unix:///var/run/docker.sock" { t.Fatalf("Expected an %v, got %v", v, "unix:///var/run/docker.sock") } } docker-1.10.3/opts/hosts_unix.go000066400000000000000000000003211267010174400165600ustar00rootroot00000000000000// +build !windows package opts import "fmt" // DefaultHost constant defines the default host string used by docker on other hosts than Windows var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket) docker-1.10.3/opts/hosts_windows.go000066400000000000000000000002241267010174400172710ustar00rootroot00000000000000// +build windows package opts // DefaultHost constant defines the default host string used by docker on Windows var DefaultHost = DefaultTCPHost docker-1.10.3/opts/ip.go000066400000000000000000000016461267010174400150000ustar00rootroot00000000000000package opts import ( "fmt" "net" ) // IPOpt holds an IP. It is used to store values from CLI flags. type IPOpt struct { *net.IP } // NewIPOpt creates a new IPOpt from a reference net.IP and a // string representation of an IP. If the string is not a valid // IP it will fallback to the specified reference. func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { o := &IPOpt{ IP: ref, } o.Set(defaultVal) return o } // Set sets an IPv4 or IPv6 address from a given string. If the given // string is not parseable as an IP address it returns an error. func (o *IPOpt) Set(val string) error { ip := net.ParseIP(val) if ip == nil { return fmt.Errorf("%s is not an ip address", val) } *o.IP = ip return nil } // String returns the IP address stored in the IPOpt. If stored IP is a // nil pointer, it returns an empty string. func (o *IPOpt) String() string { if *o.IP == nil { return "" } return o.IP.String() } docker-1.10.3/opts/ip_test.go000066400000000000000000000023231267010174400160300ustar00rootroot00000000000000package opts import ( "net" "testing" ) func TestIpOptString(t *testing.T) { addresses := []string{"", "0.0.0.0"} var ip net.IP for _, address := range addresses { stringAddress := NewIPOpt(&ip, address).String() if stringAddress != address { t.Fatalf("IpOpt string should be `%s`, not `%s`", address, stringAddress) } } } func TestNewIpOptInvalidDefaultVal(t *testing.T) { ip := net.IPv4(127, 0, 0, 1) defaultVal := "Not an ip" ipOpt := NewIPOpt(&ip, defaultVal) expected := "127.0.0.1" if ipOpt.String() != expected { t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) } } func TestNewIpOptValidDefaultVal(t *testing.T) { ip := net.IPv4(127, 0, 0, 1) defaultVal := "192.168.1.1" ipOpt := NewIPOpt(&ip, defaultVal) expected := "192.168.1.1" if ipOpt.String() != expected { t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) } } func TestIpOptSetInvalidVal(t *testing.T) { ip := net.IPv4(127, 0, 0, 1) ipOpt := &IPOpt{IP: &ip} invalidIP := "invalid ip" expectedError := "invalid ip is not an ip address" err := ipOpt.Set(invalidIP) if err == nil || err.Error() != expectedError { t.Fatalf("Expected an Error with [%v], got [%v]", expectedError, err.Error()) } } docker-1.10.3/opts/opts.go000066400000000000000000000142741267010174400153560ustar00rootroot00000000000000package opts import ( "fmt" "net" "regexp" "strings" ) var ( alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) ) // ListOpts holds a list of values and a validation function. type ListOpts struct { values *[]string validator ValidatorFctType } // NewListOpts creates a new ListOpts with the specified validator. func NewListOpts(validator ValidatorFctType) ListOpts { var values []string return *NewListOptsRef(&values, validator) } // NewListOptsRef creates a new ListOpts with the specified values and validator. func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { return &ListOpts{ values: values, validator: validator, } } func (opts *ListOpts) String() string { return fmt.Sprintf("%v", []string((*opts.values))) } // Set validates if needed the input value and add it to the // internal slice. func (opts *ListOpts) Set(value string) error { if opts.validator != nil { v, err := opts.validator(value) if err != nil { return err } value = v } (*opts.values) = append((*opts.values), value) return nil } // Delete removes the specified element from the slice. func (opts *ListOpts) Delete(key string) { for i, k := range *opts.values { if k == key { (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...) return } } } // GetMap returns the content of values in a map in order to avoid // duplicates. func (opts *ListOpts) GetMap() map[string]struct{} { ret := make(map[string]struct{}) for _, k := range *opts.values { ret[k] = struct{}{} } return ret } // GetAll returns the values of slice. func (opts *ListOpts) GetAll() []string { return (*opts.values) } // GetAllOrEmpty returns the values of the slice // or an empty slice when there are no values. func (opts *ListOpts) GetAllOrEmpty() []string { v := *opts.values if v == nil { return make([]string, 0) } return v } // Get checks the existence of the specified key. func (opts *ListOpts) Get(key string) bool { for _, k := range *opts.values { if k == key { return true } } return false } // Len returns the amount of element in the slice. func (opts *ListOpts) Len() int { return len((*opts.values)) } // NamedOption is an interface that list and map options // with names implement. type NamedOption interface { Name() string } // NamedListOpts is a ListOpts with a configuration name. // This struct is useful to keep reference to the assigned // field name in the internal configuration struct. type NamedListOpts struct { name string ListOpts } var _ NamedOption = &NamedListOpts{} // NewNamedListOptsRef creates a reference to a new NamedListOpts struct. func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts { return &NamedListOpts{ name: name, ListOpts: *NewListOptsRef(values, validator), } } // Name returns the name of the NamedListOpts in the configuration. func (o *NamedListOpts) Name() string { return o.name } //MapOpts holds a map of values and a validation function. type MapOpts struct { values map[string]string validator ValidatorFctType } // Set validates if needed the input value and add it to the // internal map, by splitting on '='. func (opts *MapOpts) Set(value string) error { if opts.validator != nil { v, err := opts.validator(value) if err != nil { return err } value = v } vals := strings.SplitN(value, "=", 2) if len(vals) == 1 { (opts.values)[vals[0]] = "" } else { (opts.values)[vals[0]] = vals[1] } return nil } // GetAll returns the values of MapOpts as a map. func (opts *MapOpts) GetAll() map[string]string { return opts.values } func (opts *MapOpts) String() string { return fmt.Sprintf("%v", map[string]string((opts.values))) } // NewMapOpts creates a new MapOpts with the specified map of values and a validator. func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { if values == nil { values = make(map[string]string) } return &MapOpts{ values: values, validator: validator, } } // NamedMapOpts is a MapOpts struct with a configuration name. // This struct is useful to keep reference to the assigned // field name in the internal configuration struct. type NamedMapOpts struct { name string MapOpts } var _ NamedOption = &NamedMapOpts{} // NewNamedMapOpts creates a reference to a new NamedMapOpts struct. func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts { return &NamedMapOpts{ name: name, MapOpts: *NewMapOpts(values, validator), } } // Name returns the name of the NamedMapOpts in the configuration. func (o *NamedMapOpts) Name() string { return o.name } // ValidatorFctType defines a validator function that returns a validated string and/or an error. type ValidatorFctType func(val string) (string, error) // ValidatorFctListType defines a validator function that returns a validated list of string and/or an error type ValidatorFctListType func(val string) ([]string, error) // ValidateIPAddress validates an Ip address. func ValidateIPAddress(val string) (string, error) { var ip = net.ParseIP(strings.TrimSpace(val)) if ip != nil { return ip.String(), nil } return "", fmt.Errorf("%s is not an ip address", val) } // ValidateDNSSearch validates domain for resolvconf search configuration. // A zero length domain is represented by a dot (.). func ValidateDNSSearch(val string) (string, error) { if val = strings.Trim(val, " "); val == "." { return val, nil } return validateDomain(val) } func validateDomain(val string) (string, error) { if alphaRegexp.FindString(val) == "" { return "", fmt.Errorf("%s is not a valid domain", val) } ns := domainRegexp.FindSubmatch([]byte(val)) if len(ns) > 0 && len(ns[1]) < 255 { return string(ns[1]), nil } return "", fmt.Errorf("%s is not a valid domain", val) } // ValidateLabel validates that the specified string is a valid label, and returns it. // Labels are in the form on key=value. func ValidateLabel(val string) (string, error) { if strings.Count(val, "=") < 1 { return "", fmt.Errorf("bad attribute format: %s", val) } return val, nil } docker-1.10.3/opts/opts_test.go000066400000000000000000000131761267010174400164150ustar00rootroot00000000000000package opts import ( "fmt" "strings" "testing" ) func TestValidateIPAddress(t *testing.T) { if ret, err := ValidateIPAddress(`1.2.3.4`); err != nil || ret == "" { t.Fatalf("ValidateIPAddress(`1.2.3.4`) got %s %s", ret, err) } if ret, err := ValidateIPAddress(`127.0.0.1`); err != nil || ret == "" { t.Fatalf("ValidateIPAddress(`127.0.0.1`) got %s %s", ret, err) } if ret, err := ValidateIPAddress(`::1`); err != nil || ret == "" { t.Fatalf("ValidateIPAddress(`::1`) got %s %s", ret, err) } if ret, err := ValidateIPAddress(`127`); err == nil || ret != "" { t.Fatalf("ValidateIPAddress(`127`) got %s %s", ret, err) } if ret, err := ValidateIPAddress(`random invalid string`); err == nil || ret != "" { t.Fatalf("ValidateIPAddress(`random invalid string`) got %s %s", ret, err) } } func TestMapOpts(t *testing.T) { tmpMap := make(map[string]string) o := NewMapOpts(tmpMap, logOptsValidator) o.Set("max-size=1") if o.String() != "map[max-size:1]" { t.Errorf("%s != [map[max-size:1]", o.String()) } o.Set("max-file=2") if len(tmpMap) != 2 { t.Errorf("map length %d != 2", len(tmpMap)) } if tmpMap["max-file"] != "2" { t.Errorf("max-file = %s != 2", tmpMap["max-file"]) } if tmpMap["max-size"] != "1" { t.Errorf("max-size = %s != 1", tmpMap["max-size"]) } if o.Set("dummy-val=3") == nil { t.Errorf("validator is not being called") } } func TestListOptsWithoutValidator(t *testing.T) { o := NewListOpts(nil) o.Set("foo") if o.String() != "[foo]" { t.Errorf("%s != [foo]", o.String()) } o.Set("bar") if o.Len() != 2 { t.Errorf("%d != 2", o.Len()) } o.Set("bar") if o.Len() != 3 { t.Errorf("%d != 3", o.Len()) } if !o.Get("bar") { t.Error("o.Get(\"bar\") == false") } if o.Get("baz") { t.Error("o.Get(\"baz\") == true") } o.Delete("foo") if o.String() != "[bar bar]" { t.Errorf("%s != [bar bar]", o.String()) } listOpts := o.GetAll() if len(listOpts) != 2 || listOpts[0] != "bar" || listOpts[1] != "bar" { t.Errorf("Expected [[bar bar]], got [%v]", listOpts) } mapListOpts := o.GetMap() if len(mapListOpts) != 1 { t.Errorf("Expected [map[bar:{}]], got [%v]", mapListOpts) } } func TestListOptsWithValidator(t *testing.T) { // Re-using logOptsvalidator (used by MapOpts) o := NewListOpts(logOptsValidator) o.Set("foo") if o.String() != "[]" { t.Errorf("%s != []", o.String()) } o.Set("foo=bar") if o.String() != "[]" { t.Errorf("%s != []", o.String()) } o.Set("max-file=2") if o.Len() != 1 { t.Errorf("%d != 1", o.Len()) } if !o.Get("max-file=2") { t.Error("o.Get(\"max-file=2\") == false") } if o.Get("baz") { t.Error("o.Get(\"baz\") == true") } o.Delete("max-file=2") if o.String() != "[]" { t.Errorf("%s != []", o.String()) } } func TestValidateDNSSearch(t *testing.T) { valid := []string{ `.`, `a`, `a.`, `1.foo`, `17.foo`, `foo.bar`, `foo.bar.baz`, `foo.bar.`, `foo.bar.baz`, `foo1.bar2`, `foo1.bar2.baz`, `1foo.2bar.`, `1foo.2bar.baz`, `foo-1.bar-2`, `foo-1.bar-2.baz`, `foo-1.bar-2.`, `foo-1.bar-2.baz`, `1-foo.2-bar`, `1-foo.2-bar.baz`, `1-foo.2-bar.`, `1-foo.2-bar.baz`, } invalid := []string{ ``, ` `, ` `, `17`, `17.`, `.17`, `17-.`, `17-.foo`, `.foo`, `foo-.bar`, `-foo.bar`, `foo.bar-`, `foo.bar-.baz`, `foo.-bar`, `foo.-bar.baz`, `foo.bar.baz.this.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbe`, } for _, domain := range valid { if ret, err := ValidateDNSSearch(domain); err != nil || ret == "" { t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) } } for _, domain := range invalid { if ret, err := ValidateDNSSearch(domain); err == nil || ret != "" { t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) } } } func TestValidateLabel(t *testing.T) { if _, err := ValidateLabel("label"); err == nil || err.Error() != "bad attribute format: label" { t.Fatalf("Expected an error [bad attribute format: label], go %v", err) } if actual, err := ValidateLabel("key1=value1"); err != nil || actual != "key1=value1" { t.Fatalf("Expected [key1=value1], got [%v,%v]", actual, err) } // Validate it's working with more than one = if actual, err := ValidateLabel("key1=value1=value2"); err != nil { t.Fatalf("Expected [key1=value1=value2], got [%v,%v]", actual, err) } // Validate it's working with one more if actual, err := ValidateLabel("key1=value1=value2=value3"); err != nil { t.Fatalf("Expected [key1=value1=value2=value2], got [%v,%v]", actual, err) } } func logOptsValidator(val string) (string, error) { allowedKeys := map[string]string{"max-size": "1", "max-file": "2"} vals := strings.Split(val, "=") if allowedKeys[vals[0]] != "" { return val, nil } return "", fmt.Errorf("invalid key %s", vals[0]) } func TestNamedListOpts(t *testing.T) { var v []string o := NewNamedListOptsRef("foo-name", &v, nil) o.Set("foo") if o.String() != "[foo]" { t.Errorf("%s != [foo]", o.String()) } if o.Name() != "foo-name" { t.Errorf("%s != foo-name", o.Name()) } if len(v) != 1 { t.Errorf("expected foo to be in the values, got %v", v) } } func TestNamedMapOpts(t *testing.T) { tmpMap := make(map[string]string) o := NewNamedMapOpts("max-name", tmpMap, nil) o.Set("max-size=1") if o.String() != "map[max-size:1]" { t.Errorf("%s != [map[max-size:1]", o.String()) } if o.Name() != "max-name" { t.Errorf("%s != max-name", o.Name()) } if _, exist := tmpMap["max-size"]; !exist { t.Errorf("expected map-size to be in the values, got %v", tmpMap) } } docker-1.10.3/opts/opts_unix.go000066400000000000000000000002661267010174400164150ustar00rootroot00000000000000// +build !windows package opts // DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 const DefaultHTTPHost = "localhost" docker-1.10.3/opts/opts_windows.go000066400000000000000000000065101267010174400171220ustar00rootroot00000000000000package opts // TODO Windows. Identify bug in GOLang 1.5.1 and/or Windows Server 2016 TP4. // @jhowardmsft, @swernli. // // On Windows, this mitigates a problem with the default options of running // a docker client against a local docker daemon on TP4. // // What was found that if the default host is "localhost", even if the client // (and daemon as this is local) is not physically on a network, and the DNS // cache is flushed (ipconfig /flushdns), then the client will pause for // exactly one second when connecting to the daemon for calls. For example // using docker run windowsservercore cmd, the CLI will send a create followed // by an attach. You see the delay between the attach finishing and the attach // being seen by the daemon. // // Here's some daemon debug logs with additional debug spew put in. The // AfterWriteJSON log is the very last thing the daemon does as part of the // create call. The POST /attach is the second CLI call. Notice the second // time gap. // // time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs" // time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig" // time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...." // time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking.... // time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...." // time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...." // time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func" // time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create" // time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2" // time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate" // time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON" // ... 1 second gap here.... // time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach" // time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1" // // We suspect this is either a bug introduced in GOLang 1.5.1, or that a change // in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows TP4. In theory, // the Windows networking stack is supposed to resolve "localhost" internally, // without hitting DNS, or even reading the hosts file (which is why localhost // is commented out in the hosts file on Windows). // // We have validated that working around this using the actual IPv4 localhost // address does not cause the delay. // // This does not occur with the docker client built with 1.4.3 on the same // Windows TP4 build, regardless of whether the daemon is built using 1.5.1 // or 1.4.3. It does not occur on Linux. We also verified we see the same thing // on a cross-compiled Windows binary (from Linux). // // Final note: This is a mitigation, not a 'real' fix. It is still susceptible // to the delay in TP4 if a user were to do 'docker run -H=tcp://localhost:2375...' // explicitly. // DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 const DefaultHTTPHost = "127.0.0.1" docker-1.10.3/pkg/000077500000000000000000000000001267010174400136265ustar00rootroot00000000000000docker-1.10.3/pkg/README.md000066400000000000000000000015031267010174400151040ustar00rootroot00000000000000pkg/ is a collection of utility packages used by the Docker project without being specific to its internals. Utility packages are kept separate from the docker core codebase to keep it as small and concise as possible. If some utilities grow larger and their APIs stabilize, they may be moved to their own repository under the Docker organization, to facilitate re-use by other projects. However that is not the priority. The directory `pkg` is named after the same directory in the camlistore project. Since Brad is a core Go maintainer, we thought it made sense to copy his methods for organizing Go code :) Thanks Brad! Because utility packages are small and neatly separated from the rest of the codebase, they are a good place to start for aspiring maintainers and contributors. Get in touch if you want to help maintain them! docker-1.10.3/pkg/aaparser/000077500000000000000000000000001267010174400154245ustar00rootroot00000000000000docker-1.10.3/pkg/aaparser/aaparser.go000066400000000000000000000021171267010174400175520ustar00rootroot00000000000000package aaparser import ( "fmt" "log" "os/exec" "strconv" "strings" ) // GetVersion returns the major and minor version of apparmor_parser func GetVersion() (int, int, error) { // get the apparmor_version version cmd := exec.Command("apparmor_parser", "--version") output, err := cmd.CombinedOutput() if err != nil { log.Fatalf("getting apparmor_parser version failed: %s (%s)", err, output) } // parse the version from the output // output is in the form of the following: // AppArmor parser version 2.9.1 // Copyright (C) 1999-2008 Novell Inc. // Copyright 2009-2012 Canonical Ltd. lines := strings.SplitN(string(output), "\n", 2) words := strings.Split(lines[0], " ") version := words[len(words)-1] // split by major minor version v := strings.Split(version, ".") if len(v) < 2 { return -1, -1, fmt.Errorf("parsing major minor version failed for %q", version) } majorVersion, err := strconv.Atoi(v[0]) if err != nil { return -1, -1, err } minorVersion, err := strconv.Atoi(v[1]) if err != nil { return -1, -1, err } return majorVersion, minorVersion, nil } docker-1.10.3/pkg/archive/000077500000000000000000000000001267010174400152475ustar00rootroot00000000000000docker-1.10.3/pkg/archive/README.md000066400000000000000000000001041267010174400165210ustar00rootroot00000000000000This code provides helper functions for dealing with archive files. docker-1.10.3/pkg/archive/archive.go000066400000000000000000000734711267010174400172330ustar00rootroot00000000000000package archive import ( "archive/tar" "bufio" "bytes" "compress/bzip2" "compress/gzip" "errors" "fmt" "io" "io/ioutil" "os" "os/exec" "path/filepath" "runtime" "strings" "syscall" "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/system" ) type ( // Archive is a type of io.ReadCloser which has two interfaces Read and Closer. Archive io.ReadCloser // Reader is a type of io.Reader. Reader io.Reader // Compression is the state represents if compressed or not. Compression int // TarChownOptions wraps the chown options UID and GID. TarChownOptions struct { UID, GID int } // TarOptions wraps the tar options. TarOptions struct { IncludeFiles []string ExcludePatterns []string Compression Compression NoLchown bool UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap ChownOpts *TarChownOptions IncludeSourceDir bool // When unpacking, specifies whether overwriting a directory with a // non-directory is allowed and vice versa. NoOverwriteDirNonDir bool // For each include when creating an archive, the included name will be // replaced with the matching name from this map. RebaseNames map[string]string } // Archiver allows the reuse of most utility functions of this package // with a pluggable Untar function. Also, to facilitate the passing of // specific id mappings for untar, an archiver can be created with maps // which will then be passed to Untar operations Archiver struct { Untar func(io.Reader, string, *TarOptions) error UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap } // breakoutError is used to differentiate errors related to breaking out // When testing archive breakout in the unit tests, this error is expected // in order for the test to pass. breakoutError error ) var ( // ErrNotImplemented is the error message of function not implemented. ErrNotImplemented = errors.New("Function not implemented") defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil} ) const ( // HeaderSize is the size in bytes of a tar header HeaderSize = 512 ) const ( // Uncompressed represents the uncompressed. Uncompressed Compression = iota // Bzip2 is bzip2 compression algorithm. Bzip2 // Gzip is gzip compression algorithm. Gzip // Xz is xz compression algorithm. Xz ) // IsArchive checks for the magic bytes of a tar or any supported compression // algorithm. func IsArchive(header []byte) bool { compression := DetectCompression(header) if compression != Uncompressed { return true } r := tar.NewReader(bytes.NewBuffer(header)) _, err := r.Next() return err == nil } // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. func IsArchivePath(path string) bool { file, err := os.Open(path) if err != nil { return false } defer file.Close() rdr, err := DecompressStream(file) if err != nil { return false } r := tar.NewReader(rdr) _, err = r.Next() return err == nil } // DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { for compression, m := range map[Compression][]byte{ Bzip2: {0x42, 0x5A, 0x68}, Gzip: {0x1F, 0x8B, 0x08}, Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, } { if len(source) < len(m) { logrus.Debugf("Len too short") continue } if bytes.Compare(m, source[:len(m)]) == 0 { return compression } } return Uncompressed } func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) { args := []string{"xz", "-d", "-c", "-q"} return cmdStream(exec.Command(args[0], args[1:]...), archive) } // DecompressStream decompress the archive and returns a ReaderCloser with the decompressed archive. func DecompressStream(archive io.Reader) (io.ReadCloser, error) { p := pools.BufioReader32KPool buf := p.Get(archive) bs, err := buf.Peek(10) if err != nil && err != io.EOF { // Note: we'll ignore any io.EOF error because there are some odd // cases where the layer.tar file will be empty (zero bytes) and // that results in an io.EOF from the Peek() call. So, in those // cases we'll just treat it as a non-compressed stream and // that means just create an empty layer. // See Issue 18170 return nil, err } compression := DetectCompression(bs) switch compression { case Uncompressed: readBufWrapper := p.NewReadCloserWrapper(buf, buf) return readBufWrapper, nil case Gzip: gzReader, err := gzip.NewReader(buf) if err != nil { return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) return readBufWrapper, nil case Bzip2: bz2Reader := bzip2.NewReader(buf) readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) return readBufWrapper, nil case Xz: xzReader, chdone, err := xzDecompress(buf) if err != nil { return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) return ioutils.NewReadCloserWrapper(readBufWrapper, func() error { <-chdone return readBufWrapper.Close() }), nil default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) switch compression { case Uncompressed: writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) return writeBufWrapper, nil case Gzip: gzWriter := gzip.NewWriter(dest) writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) return writeBufWrapper, nil case Bzip2, Xz: // archive/bzip2 does not support writing, and there is no xz support at all // However, this is not a problem as docker only currently generates gzipped tars return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } // Extension returns the extension of a file that uses the specified compression algorithm. func (compression *Compression) Extension() string { switch *compression { case Uncompressed: return "tar" case Bzip2: return "tar.bz2" case Gzip: return "tar.gz" case Xz: return "tar.xz" } return "" } type tarAppender struct { TarWriter *tar.Writer Buffer *bufio.Writer // for hardlink mapping SeenFiles map[uint64]string UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap } // canonicalTarName provides a platform-independent and consistent posix-style //path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) (string, error) { name, err := CanonicalTarNameForPath(name) if err != nil { return "", err } // suffix with '/' for directories if isDir && !strings.HasSuffix(name, "/") { name += "/" } return name, nil } func (ta *tarAppender) addTarFile(path, name string) error { fi, err := os.Lstat(path) if err != nil { return err } link := "" if fi.Mode()&os.ModeSymlink != 0 { if link, err = os.Readlink(path); err != nil { return err } } hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return err } hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) name, err = canonicalTarName(name, fi.IsDir()) if err != nil { return fmt.Errorf("tar: cannot canonicalize path: %v", err) } hdr.Name = name inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) if err != nil { return err } // if it's not a directory and has more than 1 link, // it's hardlinked, so set the type flag accordingly if !fi.IsDir() && hasHardlinks(fi) { // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { hdr.Typeflag = tar.TypeLink hdr.Linkname = oldpath hdr.Size = 0 // This Must be here for the writer math to add up! } else { ta.SeenFiles[inode] = name } } capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { hdr.Xattrs = make(map[string]string) hdr.Xattrs["security.capability"] = string(capability) } //handle re-mapping container ID mappings back to host ID mappings before //writing tar headers/files. We skip whiteout files because they were written //by the kernel and already have proper ownership relative to the host if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && (ta.UIDMaps != nil || ta.GIDMaps != nil) { uid, gid, err := getFileUIDGID(fi.Sys()) if err != nil { return err } xUID, err := idtools.ToContainer(uid, ta.UIDMaps) if err != nil { return err } xGID, err := idtools.ToContainer(gid, ta.GIDMaps) if err != nil { return err } hdr.Uid = xUID hdr.Gid = xGID } if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg { file, err := os.Open(path) if err != nil { return err } ta.Buffer.Reset(ta.TarWriter) defer ta.Buffer.Reset(nil) _, err = io.Copy(ta.Buffer, file) file.Close() if err != nil { return err } err = ta.Buffer.Flush() if err != nil { return err } } return nil } func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) hdrInfo := hdr.FileInfo() switch hdr.Typeflag { case tar.TypeDir: // Create directory unless it exists as a directory already. // In that case we just want to merge the two if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { return err } } case tar.TypeReg, tar.TypeRegA: // Source is regular file file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } if _, err := io.Copy(file, reader); err != nil { file.Close() return err } file.Close() case tar.TypeBlock, tar.TypeChar, tar.TypeFifo: // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeLink: targetPath := filepath.Join(extractDir, hdr.Linkname) // check for hardlink breakout if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) } if err := os.Link(targetPath, path); err != nil { return err } case tar.TypeSymlink: // path -> hdr.Linkname = targetPath // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because // that symlink would first have to be created, which would be caught earlier, at this very check: if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } case tar.TypeXGlobalHeader: logrus.Debugf("PAX Global Extended Headers found and ignored") return nil default: return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) } // Lchown is not supported on Windows. if Lchown && runtime.GOOS != "windows" { if chownOpts == nil { chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { return err } } for key, value := range hdr.Xattrs { if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { return err } } // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if err := handleLChmod(hdr, path, hdrInfo); err != nil { return err } aTime := hdr.AccessTime if aTime.Before(hdr.ModTime) { // Last access time should never be before last modified time. aTime = hdr.ModTime } // system.Chtimes doesn't support a NOFOLLOW flag atm if hdr.Typeflag == tar.TypeLink { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } } else if hdr.Typeflag != tar.TypeSymlink { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } else { ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { return err } } return nil } // Tar creates an archive from the directory at `path`, and returns it as a // stream of bytes. func Tar(path string, compression Compression) (io.ReadCloser, error) { return TarWithOptions(path, &TarOptions{Compression: compression}) } // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns) if err != nil { return nil, err } pipeReader, pipeWriter := io.Pipe() compressWriter, err := CompressStream(pipeWriter, options.Compression) if err != nil { return nil, err } go func() { ta := &tarAppender{ TarWriter: tar.NewWriter(compressWriter), Buffer: pools.BufioWriter32KPool.Get(nil), SeenFiles: make(map[uint64]string), UIDMaps: options.UIDMaps, GIDMaps: options.GIDMaps, } defer func() { // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { logrus.Debugf("Can't close tar writer: %s", err) } if err := compressWriter.Close(); err != nil { logrus.Debugf("Can't close compress writer: %s", err) } if err := pipeWriter.Close(); err != nil { logrus.Debugf("Can't close pipe writer: %s", err) } }() // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this stat, err := os.Lstat(srcPath) if err != nil { return } if !stat.IsDir() { // We can't later join a non-dir with any includes because the // 'walk' will error if "file/." is stat-ed and "file" is not a // directory. So, we must split the source path and use the // basename as the include. if len(options.IncludeFiles) > 0 { logrus.Warn("Tar: Can't archive a file with includes") } dir, base := SplitPathDirEntry(srcPath) srcPath = dir options.IncludeFiles = []string{base} } if len(options.IncludeFiles) == 0 { options.IncludeFiles = []string{"."} } seen := make(map[string]bool) for _, include := range options.IncludeFiles { rebaseName := options.RebaseNames[include] walkRoot := getWalkRoot(srcPath, include) filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { if err != nil { logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { // Error getting relative path OR we are looking // at the source directory path. Skip in both situations. return nil } if options.IncludeSourceDir && include == "." && relFilePath != "." { relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) } skip := false // If "include" is an exact match for the current file // then even if there's an "excludePatterns" pattern that // matches it, don't skip it. IOW, assume an explicit 'include' // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs) if err != nil { logrus.Debugf("Error matching %s: %v", relFilePath, err) return err } } if skip { if !exceptions && f.IsDir() { return filepath.SkipDir } return nil } if seen[relFilePath] { return nil } seen[relFilePath] = true // Rename the base resource. if rebaseName != "" { var replacement string if rebaseName != string(filepath.Separator) { // Special case the root directory to replace with an // empty string instead so that we don't end up with // double slashes in the paths. replacement = rebaseName } relFilePath = strings.Replace(relFilePath, include, replacement, 1) } if err := ta.addTarFile(filePath, relFilePath); err != nil { logrus.Debugf("Can't add file %s to tar: %s", filePath, err) } return nil }) } }() return pipeReader, nil } // Unpack unpacks the decompressedArchive to dest with options. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) trBuf := pools.BufioReader32KPool.Get(nil) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) if err != nil { return err } // Iterate through the files in the archive. loop: for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // Normalize name, for safety and for a simple is-root check // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: // This keeps "..\" as-is, but normalizes "\..\" to "\". hdr.Name = filepath.Clean(hdr.Name) for _, exclude := range options.ExcludePatterns { if strings.HasPrefix(hdr.Name, exclude) { continue loop } } // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in // the filepath format for the OS on which the daemon is running. Hence // the check for a slash-suffix MUST be done in an OS-agnostic way. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = system.MkdirAll(parentPath, 0777) if err != nil { return err } } } path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return err } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing directory with a non-directory from the archive. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) } if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing non-directory with a directory from the archive. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) } if fi.IsDir() && hdr.Name == "." { continue } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } trBuf.Reset(tr) // if the options contain a uid & gid maps, convert header uid/gid // entries using the maps such that lchown sets the proper mapped // uid/gid after writing the file. We only perform this mapping if // the file isn't already owned by the remapped root UID or GID, as // that specific uid/gid has no mapping from container -> host, and // those files already have the proper ownership for inside the // container. if hdr.Uid != remappedRootUID { xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps) if err != nil { return err } hdr.Uid = xUID } if hdr.Gid != remappedRootGID { xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps) if err != nil { return err } hdr.Gid = xGID } if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return err } } return nil } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, true) } // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive must be an uncompressed stream. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, false) } // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } dest = filepath.Clean(dest) if options == nil { options = &TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } r := tarArchive if decompress { decompressedArchive, err := DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() r = decompressedArchive } return Unpack(r, dest, options) } // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. // If either Tar or Untar fails, TarUntar aborts and returns the error. func (archiver *Archiver) TarUntar(src, dst string) error { logrus.Debugf("TarUntar(%s %s)", src, dst) archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } defer archive.Close() var options *TarOptions if archiver.UIDMaps != nil || archiver.GIDMaps != nil { options = &TarOptions{ UIDMaps: archiver.UIDMaps, GIDMaps: archiver.GIDMaps, } } return archiver.Untar(archive, dst, options) } // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. // If either Tar or Untar fails, TarUntar aborts and returns the error. func TarUntar(src, dst string) error { return defaultArchiver.TarUntar(src, dst) } // UntarPath untar a file from path to a destination, src is the source tar file path. func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) if err != nil { return err } defer archive.Close() var options *TarOptions if archiver.UIDMaps != nil || archiver.GIDMaps != nil { options = &TarOptions{ UIDMaps: archiver.UIDMaps, GIDMaps: archiver.GIDMaps, } } return archiver.Untar(archive, dst, options) } // UntarPath is a convenience function which looks for an archive // at filesystem path `src`, and unpacks it at `dst`. func UntarPath(src, dst string) error { return defaultArchiver.UntarPath(src, dst) } // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no // intermediary disk IO. func (archiver *Archiver) CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { return err } if !srcSt.IsDir() { return archiver.CopyFileWithTar(src, dst) } // Create dst, copy src's content into it logrus.Debugf("Creating dest directory: %s", dst) if err := system.MkdirAll(dst, 0755); err != nil { return err } logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) return archiver.TarUntar(src, dst) } // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no // intermediary disk IO. func CopyWithTar(src, dst string) error { return defaultArchiver.CopyWithTar(src, dst) } // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) srcSt, err := os.Stat(src) if err != nil { return err } if srcSt.IsDir() { return fmt.Errorf("Can't copy a directory") } // Clean up the trailing slash. This must be done in an operating // system specific manner. if dst[len(dst)-1] == os.PathSeparator { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { return err } r, w := io.Pipe() errC := promise.Go(func() error { defer w.Close() srcF, err := os.Open(src) if err != nil { return err } defer srcF.Close() hdr, err := tar.FileInfoHeader(srcSt, "") if err != nil { return err } hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) if err != nil { return err } // only perform mapping if the file being copied isn't already owned by the // uid or gid of the remapped root in the container if remappedRootUID != hdr.Uid { xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps) if err != nil { return err } hdr.Uid = xUID } if remappedRootGID != hdr.Gid { xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps) if err != nil { return err } hdr.Gid = xGID } tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { return err } if _, err := io.Copy(tw, srcF); err != nil { return err } return nil }) defer func() { if er := <-errC; err != nil { err = er } }() err = archiver.Untar(r, filepath.Dir(dst), nil) if err != nil { r.CloseWithError(err) } return err } // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. // // Destination handling is in an operating specific manner depending // where the daemon is running. If `dst` ends with a trailing slash // the final destination path will be `dst/base(src)` (Linux) or // `dst\base(src)` (Windows). func CopyFileWithTar(src, dst string) (err error) { return defaultArchiver.CopyFileWithTar(src, dst) } // cmdStream executes a command, and returns its stdout as a stream. // If the command fails to run or doesn't complete successfully, an error // will be returned, including anything written on stderr. func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) { chdone := make(chan struct{}) cmd.Stdin = input pipeR, pipeW := io.Pipe() cmd.Stdout = pipeW var errBuf bytes.Buffer cmd.Stderr = &errBuf // Run the command and return the pipe if err := cmd.Start(); err != nil { return nil, nil, err } // Copy stdout to the returned pipe go func() { if err := cmd.Wait(); err != nil { pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) } else { pipeW.Close() } close(chdone) }() return pipeR, chdone, nil } // NewTempArchive reads the content of src into a temporary file, and returns the contents // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. func NewTempArchive(src Archive, dir string) (*TempArchive, error) { f, err := ioutil.TempFile(dir, "") if err != nil { return nil, err } if _, err := io.Copy(f, src); err != nil { return nil, err } if _, err := f.Seek(0, 0); err != nil { return nil, err } st, err := f.Stat() if err != nil { return nil, err } size := st.Size() return &TempArchive{File: f, Size: size}, nil } // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. type TempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience read int64 closed bool } // Close closes the underlying file if it's still open, or does a no-op // to allow callers to try to close the TempArchive multiple times safely. func (archive *TempArchive) Close() error { if archive.closed { return nil } archive.closed = true return archive.File.Close() } func (archive *TempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) archive.read += int64(n) if err != nil || archive.read == archive.Size { archive.Close() os.Remove(archive.File.Name()) } return n, err } docker-1.10.3/pkg/archive/archive_test.go000066400000000000000000001004561267010174400202640ustar00rootroot00000000000000package archive import ( "archive/tar" "bytes" "fmt" "io" "io/ioutil" "os" "os/exec" "path" "path/filepath" "strings" "syscall" "testing" "time" "github.com/docker/docker/pkg/system" ) func TestIsArchiveNilHeader(t *testing.T) { out := IsArchive(nil) if out { t.Fatalf("isArchive should return false as nil is not a valid archive header") } } func TestIsArchiveInvalidHeader(t *testing.T) { header := []byte{0x00, 0x01, 0x02} out := IsArchive(header) if out { t.Fatalf("isArchive should return false as %s is not a valid archive header", header) } } func TestIsArchiveBzip2(t *testing.T) { header := []byte{0x42, 0x5A, 0x68} out := IsArchive(header) if !out { t.Fatalf("isArchive should return true as %s is a bz2 header", header) } } func TestIsArchive7zip(t *testing.T) { header := []byte{0x50, 0x4b, 0x03, 0x04} out := IsArchive(header) if out { t.Fatalf("isArchive should return false as %s is a 7z header and it is not supported", header) } } func TestIsArchivePathDir(t *testing.T) { cmd := exec.Command("/bin/sh", "-c", "mkdir -p /tmp/archivedir") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath("/tmp/archivedir") { t.Fatalf("Incorrectly recognised directory as an archive") } } func TestIsArchivePathInvalidFile(t *testing.T) { cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1K count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath("/tmp/archive") { t.Fatalf("Incorrectly recognised invalid tar path as archive") } if IsArchivePath("/tmp/archive.gz") { t.Fatalf("Incorrectly recognised invalid compressed tar path as archive") } } func TestIsArchivePathTar(t *testing.T) { cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archivedata && tar -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if !IsArchivePath("/tmp/archive") { t.Fatalf("Did not recognise valid tar path as archive") } if !IsArchivePath("/tmp/archive.gz") { t.Fatalf("Did not recognise valid compressed tar path as archive") } } func TestDecompressStreamGzip(t *testing.T) { cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && gzip -f /tmp/archive") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } archive, err := os.Open("/tmp/archive.gz") _, err = DecompressStream(archive) if err != nil { t.Fatalf("Failed to decompress a gzip file.") } } func TestDecompressStreamBzip2(t *testing.T) { cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && bzip2 -f /tmp/archive") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } archive, err := os.Open("/tmp/archive.bz2") _, err = DecompressStream(archive) if err != nil { t.Fatalf("Failed to decompress a bzip2 file.") } } func TestDecompressStreamXz(t *testing.T) { cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && xz -f /tmp/archive") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } archive, err := os.Open("/tmp/archive.xz") _, err = DecompressStream(archive) if err != nil { t.Fatalf("Failed to decompress a xz file.") } } func TestCompressStreamXzUnsuported(t *testing.T) { dest, err := os.Create("/tmp/dest") if err != nil { t.Fatalf("Fail to create the destination file") } _, err = CompressStream(dest, Xz) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestCompressStreamBzip2Unsupported(t *testing.T) { dest, err := os.Create("/tmp/dest") if err != nil { t.Fatalf("Fail to create the destination file") } _, err = CompressStream(dest, Xz) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestCompressStreamInvalid(t *testing.T) { dest, err := os.Create("/tmp/dest") if err != nil { t.Fatalf("Fail to create the destination file") } _, err = CompressStream(dest, -1) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestExtensionInvalid(t *testing.T) { compression := Compression(-1) output := compression.Extension() if output != "" { t.Fatalf("The extension of an invalid compression should be an empty string.") } } func TestExtensionUncompressed(t *testing.T) { compression := Uncompressed output := compression.Extension() if output != "tar" { t.Fatalf("The extension of a uncompressed archive should be 'tar'.") } } func TestExtensionBzip2(t *testing.T) { compression := Bzip2 output := compression.Extension() if output != "tar.bz2" { t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") } } func TestExtensionGzip(t *testing.T) { compression := Gzip output := compression.Extension() if output != "tar.gz" { t.Fatalf("The extension of a bzip2 archive should be 'tar.gz'") } } func TestExtensionXz(t *testing.T) { compression := Xz output := compression.Extension() if output != "tar.xz" { t.Fatalf("The extension of a bzip2 archive should be 'tar.xz'") } } func TestCmdStreamLargeStderr(t *testing.T) { cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") out, _, err := cmdStream(cmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } errCh := make(chan error) go func() { _, err := io.Copy(ioutil.Discard, out) errCh <- err }() select { case err := <-errCh: if err != nil { t.Fatalf("Command should not have failed (err=%.100s...)", err) } case <-time.After(5 * time.Second): t.Fatalf("Command did not complete in 5 seconds; probable deadlock") } } func TestCmdStreamBad(t *testing.T) { badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") out, _, err := cmdStream(badCmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } if output, err := ioutil.ReadAll(out); err == nil { t.Fatalf("Command should have failed") } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { t.Fatalf("Wrong error value (%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestCmdStreamGood(t *testing.T) { cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0") out, _, err := cmdStream(cmd, nil) if err != nil { t.Fatal(err) } if output, err := ioutil.ReadAll(out); err != nil { t.Fatalf("Command should not have failed (err=%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestUntarPathWithInvalidDest(t *testing.T) { tempFolder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempFolder) invalidDestFolder := path.Join(tempFolder, "invalidDest") // Create a src file srcFile := path.Join(tempFolder, "src") tarFile := path.Join(tempFolder, "src.tar") os.Create(srcFile) os.Create(invalidDestFolder) // being a file (not dir) should cause an error cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } err = UntarPath(tarFile, invalidDestFolder) if err == nil { t.Fatalf("UntarPath with invalid destination path should throw an error.") } } func TestUntarPathWithInvalidSrc(t *testing.T) { dest, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatalf("Fail to create the destination file") } defer os.RemoveAll(dest) err = UntarPath("/invalid/path", dest) if err == nil { t.Fatalf("UntarPath with invalid src path should throw an error.") } } func TestUntarPath(t *testing.T) { tmpFolder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := path.Join(tmpFolder, "src") tarFile := path.Join(tmpFolder, "src.tar") os.Create(path.Join(tmpFolder, "src")) cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFolder := path.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination file") } err = UntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath shouldn't throw an error, %s.", err) } expectedFile := path.Join(destFolder, srcFile) _, err = os.Stat(expectedFile) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } // Do the same test as above but with the destination as file, it should fail func TestUntarPathWithDestinationFile(t *testing.T) { tmpFolder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := path.Join(tmpFolder, "src") tarFile := path.Join(tmpFolder, "src.tar") os.Create(path.Join(tmpFolder, "src")) cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFile := path.Join(tmpFolder, "dest") _, err = os.Create(destFile) if err != nil { t.Fatalf("Fail to create the destination file") } err = UntarPath(tarFile, destFile) if err == nil { t.Fatalf("UntarPath should throw an error if the destination if a file") } } // Do the same test as above but with the destination folder already exists // and the destination file is a directory // It's working, see https://github.com/docker/docker/issues/10040 func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { tmpFolder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := path.Join(tmpFolder, "src") tarFile := path.Join(tmpFolder, "src.tar") os.Create(srcFile) cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFolder := path.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination folder") } // Let's create a folder that will has the same path as the extracted file (from tar) destSrcFileAsFolder := path.Join(destFolder, srcFile) err = os.MkdirAll(destSrcFileAsFolder, 0740) if err != nil { t.Fatal(err) } err = UntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") } } func TestCopyWithTarInvalidSrc(t *testing.T) { tempFolder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(nil) } destFolder := path.Join(tempFolder, "dest") invalidSrc := path.Join(tempFolder, "doesnotexists") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } err = CopyWithTar(invalidSrc, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { tempFolder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(nil) } srcFolder := path.Join(tempFolder, "src") inexistentDestFolder := path.Join(tempFolder, "doesnotexists") err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = CopyWithTar(srcFolder, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } } // Test CopyWithTar with a file as src func TestCopyWithTarSrcFile(t *testing.T) { folder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := path.Join(folder, "dest") srcFolder := path.Join(folder, "src") src := path.Join(folder, path.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } ioutil.WriteFile(src, []byte("content"), 0777) err = CopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content if err != nil { t.Fatalf("Destination file should be the same as the source.") } } // Test CopyWithTar with a folder as src func TestCopyWithTarSrcFolder(t *testing.T) { folder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := path.Join(folder, "dest") src := path.Join(folder, path.Join("src", "folder")) err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } ioutil.WriteFile(path.Join(src, "file"), []byte("content"), 0777) err = CopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content (the file inside) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestCopyFileWithTarInvalidSrc(t *testing.T) { tempFolder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempFolder) destFolder := path.Join(tempFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } invalidFile := path.Join(tempFolder, "doesnotexists") err = CopyFileWithTar(invalidFile, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { tempFolder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(nil) } defer os.RemoveAll(tempFolder) srcFile := path.Join(tempFolder, "src") inexistentDestFolder := path.Join(tempFolder, "doesnotexists") _, err = os.Create(srcFile) if err != nil { t.Fatal(err) } err = CopyFileWithTar(srcFile, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } // FIXME Test the src file and content } func TestCopyFileWithTarSrcFolder(t *testing.T) { folder, err := ioutil.TempDir("", "docker-archive-copyfilewithtar-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := path.Join(folder, "dest") src := path.Join(folder, "srcfolder") err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } err = CopyFileWithTar(src, dest) if err == nil { t.Fatalf("CopyFileWithTar should throw an error with a folder.") } } func TestCopyFileWithTarSrcFile(t *testing.T) { folder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := path.Join(folder, "dest") srcFolder := path.Join(folder, "src") src := path.Join(folder, path.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } ioutil.WriteFile(src, []byte("content"), 0777) err = CopyWithTar(src, dest+"/") if err != nil { t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestTarFiles(t *testing.T) { // try without hardlinks if err := checkNoChanges(1000, false); err != nil { t.Fatal(err) } // try with hardlinks if err := checkNoChanges(1000, true); err != nil { t.Fatal(err) } } func checkNoChanges(fileNum int, hardlinks bool) error { srcDir, err := ioutil.TempDir("", "docker-test-srcDir") if err != nil { return err } defer os.RemoveAll(srcDir) destDir, err := ioutil.TempDir("", "docker-test-destDir") if err != nil { return err } defer os.RemoveAll(destDir) _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) if err != nil { return err } err = TarUntar(srcDir, destDir) if err != nil { return err } changes, err := ChangesDirs(destDir, srcDir) if err != nil { return err } if len(changes) > 0 { return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) } return nil } func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { archive, err := TarWithOptions(origin, options) if err != nil { t.Fatal(err) } defer archive.Close() buf := make([]byte, 10) if _, err := archive.Read(buf); err != nil { return nil, err } wrap := io.MultiReader(bytes.NewReader(buf), archive) detectedCompression := DetectCompression(buf) compression := options.Compression if detectedCompression.Extension() != compression.Extension() { return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) } tmp, err := ioutil.TempDir("", "docker-test-untar") if err != nil { return nil, err } defer os.RemoveAll(tmp) if err := Untar(wrap, tmp, nil); err != nil { return nil, err } if _, err := os.Stat(tmp); err != nil { return nil, err } return ChangesDirs(origin, tmp) } func TestTarUntar(t *testing.T) { origin, err := ioutil.TempDir("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { t.Fatal(err) } for _, c := range []Compression{ Uncompressed, Gzip, } { changes, err := tarUntar(t, origin, &TarOptions{ Compression: c, ExcludePatterns: []string{"3"}, }) if err != nil { t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) } if len(changes) != 1 || changes[0].Path != "/3" { t.Fatalf("Unexpected differences after tarUntar: %v", changes) } } } func TestTarUntarWithXattr(t *testing.T) { origin, err := ioutil.TempDir("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { t.Fatal(err) } if err := system.Lsetxattr(path.Join(origin, "2"), "security.capability", []byte{0x00}, 0); err != nil { t.Fatal(err) } for _, c := range []Compression{ Uncompressed, Gzip, } { changes, err := tarUntar(t, origin, &TarOptions{ Compression: c, ExcludePatterns: []string{"3"}, }) if err != nil { t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) } if len(changes) != 1 || changes[0].Path != "/3" { t.Fatalf("Unexpected differences after tarUntar: %v", changes) } capability, _ := system.Lgetxattr(path.Join(origin, "2"), "security.capability") if capability == nil && capability[0] != 0x00 { t.Fatalf("Untar should have kept the 'security.capability' xattr.") } } } func TestTarWithOptions(t *testing.T) { origin, err := ioutil.TempDir("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } if _, err := ioutil.TempDir(origin, "folder"); err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } cases := []struct { opts *TarOptions numChanges int }{ {&TarOptions{IncludeFiles: []string{"1"}}, 2}, {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, } for _, testCase := range cases { changes, err := tarUntar(t, origin, testCase.opts) if err != nil { t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) } if len(changes) != testCase.numChanges { t.Errorf("Expected %d changes, got %d for %+v:", testCase.numChanges, len(changes), testCase.opts) } } } // Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz // use PAX Global Extended Headers. // Failing prevents the archives from being uncompressed during ADD func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil) if err != nil { t.Fatal(err) } } // Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. // Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. func TestUntarUstarGnuConflict(t *testing.T) { f, err := os.Open("testdata/broken.tar") if err != nil { t.Fatal(err) } found := false tr := tar.NewReader(f) // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { t.Fatal(err) } if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { found = true break } } if !found { t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") } } func TestTarWithBlockCharFifo(t *testing.T) { origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") if err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := system.Mknod(path.Join(origin, "2"), syscall.S_IFBLK, int(system.Mkdev(int64(12), int64(5)))); err != nil { t.Fatal(err) } if err := system.Mknod(path.Join(origin, "3"), syscall.S_IFCHR, int(system.Mkdev(int64(12), int64(5)))); err != nil { t.Fatal(err) } if err := system.Mknod(path.Join(origin, "4"), syscall.S_IFIFO, int(system.Mkdev(int64(12), int64(5)))); err != nil { t.Fatal(err) } dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") if err != nil { t.Fatal(err) } defer os.RemoveAll(dest) // we'll do this in two steps to separate failure fh, err := Tar(origin, Uncompressed) if err != nil { t.Fatal(err) } // ensure we can read the whole thing with no error, before writing back out buf, err := ioutil.ReadAll(fh) if err != nil { t.Fatal(err) } bRdr := bytes.NewReader(buf) err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) if err != nil { t.Fatal(err) } changes, err := ChangesDirs(origin, dest) if err != nil { t.Fatal(err) } if len(changes) > 0 { t.Fatalf("Tar with special device (block, char, fifo) should keep them (recreate them when untar) : %v", changes) } } func TestTarWithHardLink(t *testing.T) { origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") if err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.Link(path.Join(origin, "1"), path.Join(origin, "2")); err != nil { t.Fatal(err) } var i1, i2 uint64 if i1, err = getNlink(path.Join(origin, "1")); err != nil { t.Fatal(err) } // sanity check that we can hardlink if i1 != 2 { t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1) } dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") if err != nil { t.Fatal(err) } defer os.RemoveAll(dest) // we'll do this in two steps to separate failure fh, err := Tar(origin, Uncompressed) if err != nil { t.Fatal(err) } // ensure we can read the whole thing with no error, before writing back out buf, err := ioutil.ReadAll(fh) if err != nil { t.Fatal(err) } bRdr := bytes.NewReader(buf) err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) if err != nil { t.Fatal(err) } if i1, err = getInode(path.Join(dest, "1")); err != nil { t.Fatal(err) } if i2, err = getInode(path.Join(dest, "2")); err != nil { t.Fatal(err) } if i1 != i2 { t.Errorf("expected matching inodes, but got %d and %d", i1, i2) } } func getNlink(path string) (uint64, error) { stat, err := os.Stat(path) if err != nil { return 0, err } statT, ok := stat.Sys().(*syscall.Stat_t) if !ok { return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) } // We need this conversion on ARM64 return uint64(statT.Nlink), nil } func getInode(path string) (uint64, error) { stat, err := os.Stat(path) if err != nil { return 0, err } statT, ok := stat.Sys().(*syscall.Stat_t) if !ok { return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) } return statT.Ino, nil } func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { return 0, err } if makeLinks { if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { return 0, err } } } totalSize := numberOfFiles * len(fileData) return totalSize, nil } func BenchmarkTarUntar(b *testing.B) { origin, err := ioutil.TempDir("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := path.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, false) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := TarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func BenchmarkTarUntarWithLinks(b *testing.B) { origin, err := ioutil.TempDir("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := path.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, true) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := TarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func TestUntarInvalidFilenames(t *testing.T) { for i, headers := range [][]*tar.Header{ { { Name: "../victim/dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, { { // Note the leading slash Name: "/../victim/slash-dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarHardlinkToSymlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { { Name: "symlink1", Typeflag: tar.TypeSymlink, Linkname: "regfile", Mode: 0644, }, { Name: "symlink2", Typeflag: tar.TypeLink, Linkname: "symlink1", Mode: 0644, }, { Name: "regfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidHardlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeLink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeLink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (hardlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try reading victim/hello (hardlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try removing victim directory (hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidSymlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeSymlink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeSymlink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (symlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try reading victim/hello (symlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try removing victim directory (symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try writing to victim/newdir/newfile with a symlink in the path { // this header needs to be before the next one, or else there is an error Name: "dir/loophole", Typeflag: tar.TypeSymlink, Linkname: "../../victim", Mode: 0755, }, { Name: "dir/loophole/newdir/newfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestTempArchiveCloseMultipleTimes(t *testing.T) { reader := ioutil.NopCloser(strings.NewReader("hello")) tempArchive, err := NewTempArchive(reader, "") buf := make([]byte, 10) n, err := tempArchive.Read(buf) if n != 5 { t.Fatalf("Expected to read 5 bytes. Read %d instead", n) } for i := 0; i < 3; i++ { if err = tempArchive.Close(); err != nil { t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) } } } docker-1.10.3/pkg/archive/archive_unix.go000066400000000000000000000060051267010174400202630ustar00rootroot00000000000000// +build !windows package archive import ( "archive/tar" "errors" "os" "path/filepath" "syscall" "github.com/docker/docker/pkg/system" ) // fixVolumePathPrefix does platform specific processing to ensure that if // the path being passed in is not in a volume path format, convert it to one. func fixVolumePathPrefix(srcPath string) string { return srcPath } // getWalkRoot calculates the root path when performing a TarWithOptions. // We use a separate function as this is platform specific. On Linux, we // can't use filepath.Join(srcPath,include) because this will clean away // a trailing "." or "/" which may be important. func getWalkRoot(srcPath string, include string) string { return srcPath + string(filepath.Separator) + include } // CanonicalTarNameForPath returns platform-specific filepath // to canonical posix-style path for tar archival. p is relative // path. func CanonicalTarNameForPath(p string) (string, error) { return p, nil // already unix-style } // chmodTarEntry is used to adjust the file permissions used in tar header based // on the platform the archival is done. func chmodTarEntry(perm os.FileMode) os.FileMode { return perm // noop for unix as golang APIs provide perm bits correctly } func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { s, ok := stat.(*syscall.Stat_t) if !ok { err = errors.New("cannot convert stat value to syscall.Stat_t") return } inode = uint64(s.Ino) // Currently go does not fill in the major/minors if s.Mode&syscall.S_IFBLK != 0 || s.Mode&syscall.S_IFCHR != 0 { hdr.Devmajor = int64(major(uint64(s.Rdev))) hdr.Devminor = int64(minor(uint64(s.Rdev))) } return } func getFileUIDGID(stat interface{}) (int, int, error) { s, ok := stat.(*syscall.Stat_t) if !ok { return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t") } return int(s.Uid), int(s.Gid), nil } func major(device uint64) uint64 { return (device >> 8) & 0xfff } func minor(device uint64) uint64 { return (device & 0xff) | ((device >> 12) & 0xfff00) } // handleTarTypeBlockCharFifo is an OS-specific helper function used by // createTarFile to handle the following types of header: Block; Char; Fifo func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { mode := uint32(hdr.Mode & 07777) switch hdr.Typeflag { case tar.TypeBlock: mode |= syscall.S_IFBLK case tar.TypeChar: mode |= syscall.S_IFCHR case tar.TypeFifo: mode |= syscall.S_IFIFO } if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { return err } return nil } func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { if hdr.Typeflag == tar.TypeLink { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { if err := os.Chmod(path, hdrInfo.Mode()); err != nil { return err } } } else if hdr.Typeflag != tar.TypeSymlink { if err := os.Chmod(path, hdrInfo.Mode()); err != nil { return err } } return nil } docker-1.10.3/pkg/archive/archive_unix_test.go000066400000000000000000000025101267010174400213170ustar00rootroot00000000000000// +build !windows package archive import ( "os" "testing" ) func TestCanonicalTarNameForPath(t *testing.T) { cases := []struct{ in, expected string }{ {"foo", "foo"}, {"foo/bar", "foo/bar"}, {"foo/dir/", "foo/dir/"}, } for _, v := range cases { if out, err := CanonicalTarNameForPath(v.in); err != nil { t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) } else if out != v.expected { t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) } } } func TestCanonicalTarName(t *testing.T) { cases := []struct { in string isDir bool expected string }{ {"foo", false, "foo"}, {"foo", true, "foo/"}, {"foo/bar", false, "foo/bar"}, {"foo/bar", true, "foo/bar/"}, } for _, v := range cases { if out, err := canonicalTarName(v.in, v.isDir); err != nil { t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) } else if out != v.expected { t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) } } } func TestChmodTarEntry(t *testing.T) { cases := []struct { in, expected os.FileMode }{ {0000, 0000}, {0777, 0777}, {0644, 0644}, {0755, 0755}, {0444, 0444}, } for _, v := range cases { if out := chmodTarEntry(v.in); out != v.expected { t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) } } } docker-1.10.3/pkg/archive/archive_windows.go000066400000000000000000000041331267010174400207720ustar00rootroot00000000000000// +build windows package archive import ( "archive/tar" "fmt" "os" "path/filepath" "strings" "github.com/docker/docker/pkg/longpath" ) // fixVolumePathPrefix does platform specific processing to ensure that if // the path being passed in is not in a volume path format, convert it to one. func fixVolumePathPrefix(srcPath string) string { return longpath.AddPrefix(srcPath) } // getWalkRoot calculates the root path when performing a TarWithOptions. // We use a separate function as this is platform specific. func getWalkRoot(srcPath string, include string) string { return filepath.Join(srcPath, include) } // CanonicalTarNameForPath returns platform-specific filepath // to canonical posix-style path for tar archival. p is relative // path. func CanonicalTarNameForPath(p string) (string, error) { // windows: convert windows style relative path with backslashes // into forward slashes. Since windows does not allow '/' or '\' // in file names, it is mostly safe to replace however we must // check just in case if strings.Contains(p, "/") { return "", fmt.Errorf("Windows path contains forward slash: %s", p) } return strings.Replace(p, string(os.PathSeparator), "/", -1), nil } // chmodTarEntry is used to adjust the file permissions used in tar header based // on the platform the archival is done. func chmodTarEntry(perm os.FileMode) os.FileMode { perm &= 0755 // Add the x bit: make everything +x from windows perm |= 0111 return perm } func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows return } // handleTarTypeBlockCharFifo is an OS-specific helper function used by // createTarFile to handle the following types of header: Block; Char; Fifo func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { return nil } func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { return nil } func getFileUIDGID(stat interface{}) (int, int, error) { // no notion of file ownership mapping yet on Windows return 0, 0, nil } docker-1.10.3/pkg/archive/archive_windows_test.go000066400000000000000000000041371267010174400220350ustar00rootroot00000000000000// +build windows package archive import ( "io/ioutil" "os" "path/filepath" "testing" ) func TestCopyFileWithInvalidDest(t *testing.T) { folder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := "c:dest" srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, "src", "src") err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } ioutil.WriteFile(src, []byte("content"), 0777) err = CopyWithTar(src, dest) if err == nil { t.Fatalf("archiver.CopyWithTar should throw an error on invalid dest.") } } func TestCanonicalTarNameForPath(t *testing.T) { cases := []struct { in, expected string shouldFail bool }{ {"foo", "foo", false}, {"foo/bar", "___", true}, // unix-styled windows path must fail {`foo\bar`, "foo/bar", false}, } for _, v := range cases { if out, err := CanonicalTarNameForPath(v.in); err != nil && !v.shouldFail { t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) } else if v.shouldFail && err == nil { t.Fatalf("canonical path call should have failed with error. in=%s out=%s", v.in, out) } else if !v.shouldFail && out != v.expected { t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) } } } func TestCanonicalTarName(t *testing.T) { cases := []struct { in string isDir bool expected string }{ {"foo", false, "foo"}, {"foo", true, "foo/"}, {`foo\bar`, false, "foo/bar"}, {`foo\bar`, true, "foo/bar/"}, } for _, v := range cases { if out, err := canonicalTarName(v.in, v.isDir); err != nil { t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) } else if out != v.expected { t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) } } } func TestChmodTarEntry(t *testing.T) { cases := []struct { in, expected os.FileMode }{ {0000, 0111}, {0777, 0755}, {0644, 0755}, {0755, 0755}, {0444, 0555}, } for _, v := range cases { if out := chmodTarEntry(v.in); out != v.expected { t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) } } } docker-1.10.3/pkg/archive/changes.go000066400000000000000000000265611267010174400172200ustar00rootroot00000000000000package archive import ( "archive/tar" "bytes" "fmt" "io" "io/ioutil" "os" "path/filepath" "sort" "strings" "syscall" "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" ) // ChangeType represents the change type. type ChangeType int const ( // ChangeModify represents the modify operation. ChangeModify = iota // ChangeAdd represents the add operation. ChangeAdd // ChangeDelete represents the delete operation. ChangeDelete ) func (c ChangeType) String() string { switch c { case ChangeModify: return "C" case ChangeAdd: return "A" case ChangeDelete: return "D" } return "" } // Change represents a change, it wraps the change type and path. // It describes changes of the files in the path respect to the // parent layers. The change could be modify, add, delete. // This is used for layer diff. type Change struct { Path string Kind ChangeType } func (change *Change) String() string { return fmt.Sprintf("%s %s", change.Kind, change.Path) } // for sort.Sort type changesByPath []Change func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } func (c changesByPath) Len() int { return len(c) } func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } // Gnu tar and the go tar writer don't have sub-second mtime // precision, which is problematic when we apply changes via tar // files, we handle this by comparing for exact times, *or* same // second count and either a or b having exactly 0 nanoseconds func sameFsTime(a, b time.Time) bool { return a == b || (a.Unix() == b.Unix() && (a.Nanosecond() == 0 || b.Nanosecond() == 0)) } func sameFsTimeSpec(a, b syscall.Timespec) bool { return a.Sec == b.Sec && (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) } // Changes walks the path rw and determines changes for the files in the path, // with respect to the parent layers func Changes(layers []string, rw string) ([]Change, error) { var ( changes []Change changedDirs = make(map[string]struct{}) ) err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { if err != nil { return err } // Rebase path path, err = filepath.Rel(rw, path) if err != nil { return err } // As this runs on the daemon side, file paths are OS specific. path = filepath.Join(string(os.PathSeparator), path) // Skip root if path == string(os.PathSeparator) { return nil } // Skip AUFS metadata if matched, err := filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path); err != nil || matched { return err } change := Change{ Path: path, } // Find out what kind of modification happened file := filepath.Base(path) // If there is a whiteout, then the file was removed if strings.HasPrefix(file, WhiteoutPrefix) { originalFile := file[len(WhiteoutPrefix):] change.Path = filepath.Join(filepath.Dir(path), originalFile) change.Kind = ChangeDelete } else { // Otherwise, the file was added change.Kind = ChangeAdd // ...Unless it already existed in a top layer, in which case, it's a modification for _, layer := range layers { stat, err := os.Stat(filepath.Join(layer, path)) if err != nil && !os.IsNotExist(err) { return err } if err == nil { // The file existed in the top layer, so that's a modification // However, if it's a directory, maybe it wasn't actually modified. // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar if stat.IsDir() && f.IsDir() { if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { // Both directories are the same, don't record the change return nil } } change.Kind = ChangeModify break } } } // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. // This block is here to ensure the change is recorded even if the // modify time, mode and size of the parent directory in the rw and ro layers are all equal. // Check https://github.com/docker/docker/pull/13590 for details. if f.IsDir() { changedDirs[path] = struct{}{} } if change.Kind == ChangeAdd || change.Kind == ChangeDelete { parent := filepath.Dir(path) if _, ok := changedDirs[parent]; !ok && parent != "/" { changes = append(changes, Change{Path: parent, Kind: ChangeModify}) changedDirs[parent] = struct{}{} } } // Record change changes = append(changes, change) return nil }) if err != nil && !os.IsNotExist(err) { return nil, err } return changes, nil } // FileInfo describes the information of a file. type FileInfo struct { parent *FileInfo name string stat *system.StatT children map[string]*FileInfo capability []byte added bool } // LookUp looks up the file information of a file. func (info *FileInfo) LookUp(path string) *FileInfo { // As this runs on the daemon side, file paths are OS specific. parent := info if path == string(os.PathSeparator) { return info } pathElements := strings.Split(path, string(os.PathSeparator)) for _, elem := range pathElements { if elem != "" { child := parent.children[elem] if child == nil { return nil } parent = child } } return parent } func (info *FileInfo) path() string { if info.parent == nil { // As this runs on the daemon side, file paths are OS specific. return string(os.PathSeparator) } return filepath.Join(info.parent.path(), info.name) } func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { sizeAtEntry := len(*changes) if oldInfo == nil { // add change := Change{ Path: info.path(), Kind: ChangeAdd, } *changes = append(*changes, change) info.added = true } // We make a copy so we can modify it to detect additions // also, we only recurse on the old dir if the new info is a directory // otherwise any previous delete/change is considered recursive oldChildren := make(map[string]*FileInfo) if oldInfo != nil && info.isDir() { for k, v := range oldInfo.children { oldChildren[k] = v } } for name, newChild := range info.children { oldChild, _ := oldChildren[name] if oldChild != nil { // change? oldStat := oldChild.stat newStat := newChild.stat // Note: We can't compare inode or ctime or blocksize here, because these change // when copying a file into a container. However, that is not generally a problem // because any content change will change mtime, and any status change should // be visible when actually comparing the stat fields. The only time this // breaks down is if some code intentionally hides a change by setting // back mtime if statDifferent(oldStat, newStat) || bytes.Compare(oldChild.capability, newChild.capability) != 0 { change := Change{ Path: newChild.path(), Kind: ChangeModify, } *changes = append(*changes, change) newChild.added = true } // Remove from copy so we can detect deletions delete(oldChildren, name) } newChild.addChanges(oldChild, changes) } for _, oldChild := range oldChildren { // delete change := Change{ Path: oldChild.path(), Kind: ChangeDelete, } *changes = append(*changes, change) } // If there were changes inside this directory, we need to add it, even if the directory // itself wasn't changed. This is needed to properly save and restore filesystem permissions. // As this runs on the daemon side, file paths are OS specific. if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { change := Change{ Path: info.path(), Kind: ChangeModify, } // Let's insert the directory entry before the recently added entries located inside this dir *changes = append(*changes, change) // just to resize the slice, will be overwritten copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) (*changes)[sizeAtEntry] = change } } // Changes add changes to file information. func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { var changes []Change info.addChanges(oldInfo, &changes) return changes } func newRootFileInfo() *FileInfo { // As this runs on the daemon side, file paths are OS specific. root := &FileInfo{ name: string(os.PathSeparator), children: make(map[string]*FileInfo), } return root } // ChangesDirs compares two directories and generates an array of Change objects describing the changes. // If oldDir is "", then all files in newDir will be Add-Changes. func ChangesDirs(newDir, oldDir string) ([]Change, error) { var ( oldRoot, newRoot *FileInfo ) if oldDir == "" { emptyDir, err := ioutil.TempDir("", "empty") if err != nil { return nil, err } defer os.Remove(emptyDir) oldDir = emptyDir } oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) if err != nil { return nil, err } return newRoot.Changes(oldRoot), nil } // ChangesSize calculates the size in bytes of the provided changes, based on newDir. func ChangesSize(newDir string, changes []Change) int64 { var ( size int64 sf = make(map[uint64]struct{}) ) for _, change := range changes { if change.Kind == ChangeModify || change.Kind == ChangeAdd { file := filepath.Join(newDir, change.Path) fileInfo, err := os.Lstat(file) if err != nil { logrus.Errorf("Can not stat %q: %s", file, err) continue } if fileInfo != nil && !fileInfo.IsDir() { if hasHardlinks(fileInfo) { inode := getIno(fileInfo) if _, ok := sf[inode]; !ok { size += fileInfo.Size() sf[inode] = struct{}{} } } else { size += fileInfo.Size() } } } } return size } // ExportChanges produces an Archive from the provided changes, relative to dir. func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (Archive, error) { reader, writer := io.Pipe() go func() { ta := &tarAppender{ TarWriter: tar.NewWriter(writer), Buffer: pools.BufioWriter32KPool.Get(nil), SeenFiles: make(map[uint64]string), UIDMaps: uidMaps, GIDMaps: gidMaps, } // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) sort.Sort(changesByPath(changes)) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this for _, change := range changes { if change.Kind == ChangeDelete { whiteOutDir := filepath.Dir(change.Path) whiteOutBase := filepath.Base(change.Path) whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) timestamp := time.Now() hdr := &tar.Header{ Name: whiteOut[1:], Size: 0, ModTime: timestamp, AccessTime: timestamp, ChangeTime: timestamp, } if err := ta.TarWriter.WriteHeader(hdr); err != nil { logrus.Debugf("Can't write whiteout header: %s", err) } } else { path := filepath.Join(dir, change.Path) if err := ta.addTarFile(path, change.Path[1:]); err != nil { logrus.Debugf("Can't add file %s to tar: %s", path, err) } } } // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { logrus.Debugf("Can't close layer: %s", err) } if err := writer.Close(); err != nil { logrus.Debugf("failed close Changes writer: %s", err) } }() return reader, nil } docker-1.10.3/pkg/archive/changes_linux.go000066400000000000000000000165141267010174400204340ustar00rootroot00000000000000package archive import ( "bytes" "fmt" "os" "path/filepath" "sort" "syscall" "unsafe" "github.com/docker/docker/pkg/system" ) // walker is used to implement collectFileInfoForChanges on linux. Where this // method in general returns the entire contents of two directory trees, we // optimize some FS calls out on linux. In particular, we take advantage of the // fact that getdents(2) returns the inode of each file in the directory being // walked, which, when walking two trees in parallel to generate a list of // changes, can be used to prune subtrees without ever having to lstat(2) them // directly. Eliminating stat calls in this way can save up to seconds on large // images. type walker struct { dir1 string dir2 string root1 *FileInfo root2 *FileInfo } // collectFileInfoForChanges returns a complete representation of the trees // rooted at dir1 and dir2, with one important exception: any subtree or // leaf where the inode and device numbers are an exact match between dir1 // and dir2 will be pruned from the results. This method is *only* to be used // to generating a list of changes between the two directories, as it does not // reflect the full contents. func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { w := &walker{ dir1: dir1, dir2: dir2, root1: newRootFileInfo(), root2: newRootFileInfo(), } i1, err := os.Lstat(w.dir1) if err != nil { return nil, nil, err } i2, err := os.Lstat(w.dir2) if err != nil { return nil, nil, err } if err := w.walk("/", i1, i2); err != nil { return nil, nil, err } return w.root1, w.root2, nil } // Given a FileInfo, its path info, and a reference to the root of the tree // being constructed, register this file with the tree. func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { if fi == nil { return nil } parent := root.LookUp(filepath.Dir(path)) if parent == nil { return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path) } info := &FileInfo{ name: filepath.Base(path), children: make(map[string]*FileInfo), parent: parent, } cpath := filepath.Join(dir, path) stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) if err != nil { return err } info.stat = stat info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access parent.children[info.name] = info return nil } // Walk a subtree rooted at the same path in both trees being iterated. For // example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { // Register these nodes with the return trees, unless we're still at the // (already-created) roots: if path != "/" { if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { return err } if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { return err } } is1Dir := i1 != nil && i1.IsDir() is2Dir := i2 != nil && i2.IsDir() sameDevice := false if i1 != nil && i2 != nil { si1 := i1.Sys().(*syscall.Stat_t) si2 := i2.Sys().(*syscall.Stat_t) if si1.Dev == si2.Dev { sameDevice = true } } // If these files are both non-existent, or leaves (non-dirs), we are done. if !is1Dir && !is2Dir { return nil } // Fetch the names of all the files contained in both directories being walked: var names1, names2 []nameIno if is1Dir { names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access if err != nil { return err } } if is2Dir { names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access if err != nil { return err } } // We have lists of the files contained in both parallel directories, sorted // in the same order. Walk them in parallel, generating a unique merged list // of all items present in either or both directories. var names []string ix1 := 0 ix2 := 0 for { if ix1 >= len(names1) { break } if ix2 >= len(names2) { break } ni1 := names1[ix1] ni2 := names2[ix2] switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { case -1: // ni1 < ni2 -- advance ni1 // we will not encounter ni1 in names2 names = append(names, ni1.name) ix1++ case 0: // ni1 == ni2 if ni1.ino != ni2.ino || !sameDevice { names = append(names, ni1.name) } ix1++ ix2++ case 1: // ni1 > ni2 -- advance ni2 // we will not encounter ni2 in names1 names = append(names, ni2.name) ix2++ } } for ix1 < len(names1) { names = append(names, names1[ix1].name) ix1++ } for ix2 < len(names2) { names = append(names, names2[ix2].name) ix2++ } // For each of the names present in either or both of the directories being // iterated, stat the name under each root, and recurse the pair of them: for _, name := range names { fname := filepath.Join(path, name) var cInfo1, cInfo2 os.FileInfo if is1Dir { cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access if err != nil && !os.IsNotExist(err) { return err } } if is2Dir { cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access if err != nil && !os.IsNotExist(err) { return err } } if err = w.walk(fname, cInfo1, cInfo2); err != nil { return err } } return nil } // {name,inode} pairs used to support the early-pruning logic of the walker type type nameIno struct { name string ino uint64 } type nameInoSlice []nameIno func (s nameInoSlice) Len() int { return len(s) } func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } // readdirnames is a hacked-apart version of the Go stdlib code, exposing inode // numbers further up the stack when reading directory contents. Unlike // os.Readdirnames, which returns a list of filenames, this function returns a // list of {filename,inode} pairs. func readdirnames(dirname string) (names []nameIno, err error) { var ( size = 100 buf = make([]byte, 4096) nbuf int bufp int nb int ) f, err := os.Open(dirname) if err != nil { return nil, err } defer f.Close() names = make([]nameIno, 0, size) // Empty with room to grow. for { // Refill the buffer if necessary if bufp >= nbuf { bufp = 0 nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux if nbuf < 0 { nbuf = 0 } if err != nil { return nil, os.NewSyscallError("readdirent", err) } if nbuf <= 0 { break // EOF } } // Drain the buffer nb, names = parseDirent(buf[bufp:nbuf], names) bufp += nb } sl := nameInoSlice(names) sort.Sort(sl) return sl, nil } // parseDirent is a minor modification of syscall.ParseDirent (linux version) // which returns {name,inode} pairs instead of just names. func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { origlen := len(buf) for len(buf) > 0 { dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) buf = buf[dirent.Reclen:] if dirent.Ino == 0 { // File absent in directory. continue } bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) var name = string(bytes[0:clen(bytes[:])]) if name == "." || name == ".." { // Useless names continue } names = append(names, nameIno{name, dirent.Ino}) } return origlen - len(buf), names } func clen(n []byte) int { for i := 0; i < len(n); i++ { if n[i] == 0 { return i } } return len(n) } docker-1.10.3/pkg/archive/changes_other.go000066400000000000000000000037471267010174400204220ustar00rootroot00000000000000// +build !linux package archive import ( "fmt" "os" "path/filepath" "runtime" "strings" "github.com/docker/docker/pkg/system" ) func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { var ( oldRoot, newRoot *FileInfo err1, err2 error errs = make(chan error, 2) ) go func() { oldRoot, err1 = collectFileInfo(oldDir) errs <- err1 }() go func() { newRoot, err2 = collectFileInfo(newDir) errs <- err2 }() // block until both routines have returned for i := 0; i < 2; i++ { if err := <-errs; err != nil { return nil, nil, err } } return oldRoot, newRoot, nil } func collectFileInfo(sourceDir string) (*FileInfo, error) { root := newRootFileInfo() err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { if err != nil { return err } // Rebase path relPath, err := filepath.Rel(sourceDir, path) if err != nil { return err } // As this runs on the daemon side, file paths are OS specific. relPath = filepath.Join(string(os.PathSeparator), relPath) // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. // Temporary workaround. If the returned path starts with two backslashes, // trim it down to a single backslash. Only relevant on Windows. if runtime.GOOS == "windows" { if strings.HasPrefix(relPath, `\\`) { relPath = relPath[1:] } } if relPath == string(os.PathSeparator) { return nil } parent := root.LookUp(filepath.Dir(relPath)) if parent == nil { return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) } info := &FileInfo{ name: filepath.Base(relPath), children: make(map[string]*FileInfo), parent: parent, } s, err := system.Lstat(path) if err != nil { return err } info.stat = s info.capability, _ = system.Lgetxattr(path, "security.capability") parent.children[info.name] = info return nil }) if err != nil { return nil, err } return root, nil } docker-1.10.3/pkg/archive/changes_posix_test.go000066400000000000000000000055171267010174400214770ustar00rootroot00000000000000package archive import ( "archive/tar" "fmt" "io" "io/ioutil" "os" "path" "sort" "testing" ) func TestHardLinkOrder(t *testing.T) { names := []string{"file1.txt", "file2.txt", "file3.txt"} msg := []byte("Hey y'all") // Create dir src, err := ioutil.TempDir("", "docker-hardlink-test-src-") if err != nil { t.Fatal(err) } //defer os.RemoveAll(src) for _, name := range names { func() { fh, err := os.Create(path.Join(src, name)) if err != nil { t.Fatal(err) } defer fh.Close() if _, err = fh.Write(msg); err != nil { t.Fatal(err) } }() } // Create dest, with changes that includes hardlinks dest, err := ioutil.TempDir("", "docker-hardlink-test-dest-") if err != nil { t.Fatal(err) } os.RemoveAll(dest) // we just want the name, at first if err := copyDir(src, dest); err != nil { t.Fatal(err) } defer os.RemoveAll(dest) for _, name := range names { for i := 0; i < 5; i++ { if err := os.Link(path.Join(dest, name), path.Join(dest, fmt.Sprintf("%s.link%d", name, i))); err != nil { t.Fatal(err) } } } // get changes changes, err := ChangesDirs(dest, src) if err != nil { t.Fatal(err) } // sort sort.Sort(changesByPath(changes)) // ExportChanges ar, err := ExportChanges(dest, changes, nil, nil) if err != nil { t.Fatal(err) } hdrs, err := walkHeaders(ar) if err != nil { t.Fatal(err) } // reverse sort sort.Sort(sort.Reverse(changesByPath(changes))) // ExportChanges arRev, err := ExportChanges(dest, changes, nil, nil) if err != nil { t.Fatal(err) } hdrsRev, err := walkHeaders(arRev) if err != nil { t.Fatal(err) } // line up the two sets sort.Sort(tarHeaders(hdrs)) sort.Sort(tarHeaders(hdrsRev)) // compare Size and LinkName for i := range hdrs { if hdrs[i].Name != hdrsRev[i].Name { t.Errorf("headers - expected name %q; but got %q", hdrs[i].Name, hdrsRev[i].Name) } if hdrs[i].Size != hdrsRev[i].Size { t.Errorf("headers - %q expected size %d; but got %d", hdrs[i].Name, hdrs[i].Size, hdrsRev[i].Size) } if hdrs[i].Typeflag != hdrsRev[i].Typeflag { t.Errorf("headers - %q expected type %d; but got %d", hdrs[i].Name, hdrs[i].Typeflag, hdrsRev[i].Typeflag) } if hdrs[i].Linkname != hdrsRev[i].Linkname { t.Errorf("headers - %q expected linkname %q; but got %q", hdrs[i].Name, hdrs[i].Linkname, hdrsRev[i].Linkname) } } } type tarHeaders []tar.Header func (th tarHeaders) Len() int { return len(th) } func (th tarHeaders) Swap(i, j int) { th[j], th[i] = th[i], th[j] } func (th tarHeaders) Less(i, j int) bool { return th[i].Name < th[j].Name } func walkHeaders(r io.Reader) ([]tar.Header, error) { t := tar.NewReader(r) headers := []tar.Header{} for { hdr, err := t.Next() if err != nil { if err == io.EOF { break } return headers, err } headers = append(headers, *hdr) } return headers, nil } docker-1.10.3/pkg/archive/changes_test.go000066400000000000000000000327001267010174400202470ustar00rootroot00000000000000package archive import ( "io/ioutil" "os" "os/exec" "path" "sort" "testing" "time" ) func max(x, y int) int { if x >= y { return x } return y } func copyDir(src, dst string) error { cmd := exec.Command("cp", "-a", src, dst) if err := cmd.Run(); err != nil { return err } return nil } type FileType uint32 const ( Regular FileType = iota Dir Symlink ) type FileData struct { filetype FileType path string contents string permissions os.FileMode } func createSampleDir(t *testing.T, root string) { files := []FileData{ {Regular, "file1", "file1\n", 0600}, {Regular, "file2", "file2\n", 0666}, {Regular, "file3", "file3\n", 0404}, {Regular, "file4", "file4\n", 0600}, {Regular, "file5", "file5\n", 0600}, {Regular, "file6", "file6\n", 0600}, {Regular, "file7", "file7\n", 0600}, {Dir, "dir1", "", 0740}, {Regular, "dir1/file1-1", "file1-1\n", 01444}, {Regular, "dir1/file1-2", "file1-2\n", 0666}, {Dir, "dir2", "", 0700}, {Regular, "dir2/file2-1", "file2-1\n", 0666}, {Regular, "dir2/file2-2", "file2-2\n", 0666}, {Dir, "dir3", "", 0700}, {Regular, "dir3/file3-1", "file3-1\n", 0666}, {Regular, "dir3/file3-2", "file3-2\n", 0666}, {Dir, "dir4", "", 0700}, {Regular, "dir4/file3-1", "file4-1\n", 0666}, {Regular, "dir4/file3-2", "file4-2\n", 0666}, {Symlink, "symlink1", "target1", 0666}, {Symlink, "symlink2", "target2", 0666}, {Symlink, "symlink3", root + "/file1", 0666}, {Symlink, "symlink4", root + "/symlink3", 0666}, {Symlink, "dirSymlink", root + "/dir1", 0740}, } now := time.Now() for _, info := range files { p := path.Join(root, info.path) if info.filetype == Dir { if err := os.MkdirAll(p, info.permissions); err != nil { t.Fatal(err) } } else if info.filetype == Regular { if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil { t.Fatal(err) } } else if info.filetype == Symlink { if err := os.Symlink(info.contents, p); err != nil { t.Fatal(err) } } if info.filetype != Symlink { // Set a consistent ctime, atime for all files and dirs if err := os.Chtimes(p, now, now); err != nil { t.Fatal(err) } } } } func TestChangeString(t *testing.T) { modifiyChange := Change{"change", ChangeModify} toString := modifiyChange.String() if toString != "C change" { t.Fatalf("String() of a change with ChangeModifiy Kind should have been %s but was %s", "C change", toString) } addChange := Change{"change", ChangeAdd} toString = addChange.String() if toString != "A change" { t.Fatalf("String() of a change with ChangeAdd Kind should have been %s but was %s", "A change", toString) } deleteChange := Change{"change", ChangeDelete} toString = deleteChange.String() if toString != "D change" { t.Fatalf("String() of a change with ChangeDelete Kind should have been %s but was %s", "D change", toString) } } func TestChangesWithNoChanges(t *testing.T) { rwLayer, err := ioutil.TempDir("", "docker-changes-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(rwLayer) layer, err := ioutil.TempDir("", "docker-changes-test-layer") if err != nil { t.Fatal(err) } defer os.RemoveAll(layer) createSampleDir(t, layer) changes, err := Changes([]string{layer}, rwLayer) if err != nil { t.Fatal(err) } if len(changes) != 0 { t.Fatalf("Changes with no difference should have detect no changes, but detected %d", len(changes)) } } func TestChangesWithChanges(t *testing.T) { // Mock the readonly layer layer, err := ioutil.TempDir("", "docker-changes-test-layer") if err != nil { t.Fatal(err) } defer os.RemoveAll(layer) createSampleDir(t, layer) os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740) // Mock the RW layer rwLayer, err := ioutil.TempDir("", "docker-changes-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(rwLayer) // Create a folder in RW layer dir1 := path.Join(rwLayer, "dir1") os.MkdirAll(dir1, 0740) deletedFile := path.Join(dir1, ".wh.file1-2") ioutil.WriteFile(deletedFile, []byte{}, 0600) modifiedFile := path.Join(dir1, "file1-1") ioutil.WriteFile(modifiedFile, []byte{0x00}, 01444) // Let's add a subfolder for a newFile subfolder := path.Join(dir1, "subfolder") os.MkdirAll(subfolder, 0740) newFile := path.Join(subfolder, "newFile") ioutil.WriteFile(newFile, []byte{}, 0740) changes, err := Changes([]string{layer}, rwLayer) if err != nil { t.Fatal(err) } expectedChanges := []Change{ {"/dir1", ChangeModify}, {"/dir1/file1-1", ChangeModify}, {"/dir1/file1-2", ChangeDelete}, {"/dir1/subfolder", ChangeModify}, {"/dir1/subfolder/newFile", ChangeAdd}, } checkChanges(expectedChanges, changes, t) } // See https://github.com/docker/docker/pull/13590 func TestChangesWithChangesGH13590(t *testing.T) { baseLayer, err := ioutil.TempDir("", "docker-changes-test.") defer os.RemoveAll(baseLayer) dir3 := path.Join(baseLayer, "dir1/dir2/dir3") os.MkdirAll(dir3, 07400) file := path.Join(dir3, "file.txt") ioutil.WriteFile(file, []byte("hello"), 0666) layer, err := ioutil.TempDir("", "docker-changes-test2.") defer os.RemoveAll(layer) // Test creating a new file if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { t.Fatalf("Cmd failed: %q", err) } os.Remove(path.Join(layer, "dir1/dir2/dir3/file.txt")) file = path.Join(layer, "dir1/dir2/dir3/file1.txt") ioutil.WriteFile(file, []byte("bye"), 0666) changes, err := Changes([]string{baseLayer}, layer) if err != nil { t.Fatal(err) } expectedChanges := []Change{ {"/dir1/dir2/dir3", ChangeModify}, {"/dir1/dir2/dir3/file1.txt", ChangeAdd}, } checkChanges(expectedChanges, changes, t) // Now test changing a file layer, err = ioutil.TempDir("", "docker-changes-test3.") defer os.RemoveAll(layer) if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { t.Fatalf("Cmd failed: %q", err) } file = path.Join(layer, "dir1/dir2/dir3/file.txt") ioutil.WriteFile(file, []byte("bye"), 0666) changes, err = Changes([]string{baseLayer}, layer) if err != nil { t.Fatal(err) } expectedChanges = []Change{ {"/dir1/dir2/dir3/file.txt", ChangeModify}, } checkChanges(expectedChanges, changes, t) } // Create an directory, copy it, make sure we report no changes between the two func TestChangesDirsEmpty(t *testing.T) { src, err := ioutil.TempDir("", "docker-changes-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(src) createSampleDir(t, src) dst := src + "-copy" if err := copyDir(src, dst); err != nil { t.Fatal(err) } defer os.RemoveAll(dst) changes, err := ChangesDirs(dst, src) if err != nil { t.Fatal(err) } if len(changes) != 0 { t.Fatalf("Reported changes for identical dirs: %v", changes) } os.RemoveAll(src) os.RemoveAll(dst) } func mutateSampleDir(t *testing.T, root string) { // Remove a regular file if err := os.RemoveAll(path.Join(root, "file1")); err != nil { t.Fatal(err) } // Remove a directory if err := os.RemoveAll(path.Join(root, "dir1")); err != nil { t.Fatal(err) } // Remove a symlink if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil { t.Fatal(err) } // Rewrite a file if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil { t.Fatal(err) } // Replace a file if err := os.RemoveAll(path.Join(root, "file3")); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil { t.Fatal(err) } // Touch file if err := os.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { t.Fatal(err) } // Replace file with dir if err := os.RemoveAll(path.Join(root, "file5")); err != nil { t.Fatal(err) } if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil { t.Fatal(err) } // Create new file if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil { t.Fatal(err) } // Create new dir if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil { t.Fatal(err) } // Create a new symlink if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil { t.Fatal(err) } // Change a symlink if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil { t.Fatal(err) } if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil { t.Fatal(err) } // Replace dir with file if err := os.RemoveAll(path.Join(root, "dir2")); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil { t.Fatal(err) } // Touch dir if err := os.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { t.Fatal(err) } } func TestChangesDirsMutated(t *testing.T) { src, err := ioutil.TempDir("", "docker-changes-test") if err != nil { t.Fatal(err) } createSampleDir(t, src) dst := src + "-copy" if err := copyDir(src, dst); err != nil { t.Fatal(err) } defer os.RemoveAll(src) defer os.RemoveAll(dst) mutateSampleDir(t, dst) changes, err := ChangesDirs(dst, src) if err != nil { t.Fatal(err) } sort.Sort(changesByPath(changes)) expectedChanges := []Change{ {"/dir1", ChangeDelete}, {"/dir2", ChangeModify}, {"/dirnew", ChangeAdd}, {"/file1", ChangeDelete}, {"/file2", ChangeModify}, {"/file3", ChangeModify}, {"/file4", ChangeModify}, {"/file5", ChangeModify}, {"/filenew", ChangeAdd}, {"/symlink1", ChangeDelete}, {"/symlink2", ChangeModify}, {"/symlinknew", ChangeAdd}, } for i := 0; i < max(len(changes), len(expectedChanges)); i++ { if i >= len(expectedChanges) { t.Fatalf("unexpected change %s\n", changes[i].String()) } if i >= len(changes) { t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) } if changes[i].Path == expectedChanges[i].Path { if changes[i] != expectedChanges[i] { t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) } } else if changes[i].Path < expectedChanges[i].Path { t.Fatalf("unexpected change %s\n", changes[i].String()) } else { t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) } } } func TestApplyLayer(t *testing.T) { src, err := ioutil.TempDir("", "docker-changes-test") if err != nil { t.Fatal(err) } createSampleDir(t, src) defer os.RemoveAll(src) dst := src + "-copy" if err := copyDir(src, dst); err != nil { t.Fatal(err) } mutateSampleDir(t, dst) defer os.RemoveAll(dst) changes, err := ChangesDirs(dst, src) if err != nil { t.Fatal(err) } layer, err := ExportChanges(dst, changes, nil, nil) if err != nil { t.Fatal(err) } layerCopy, err := NewTempArchive(layer, "") if err != nil { t.Fatal(err) } if _, err := ApplyLayer(src, layerCopy); err != nil { t.Fatal(err) } changes2, err := ChangesDirs(src, dst) if err != nil { t.Fatal(err) } if len(changes2) != 0 { t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) } } func TestChangesSizeWithHardlinks(t *testing.T) { srcDir, err := ioutil.TempDir("", "docker-test-srcDir") if err != nil { t.Fatal(err) } defer os.RemoveAll(srcDir) destDir, err := ioutil.TempDir("", "docker-test-destDir") if err != nil { t.Fatal(err) } defer os.RemoveAll(destDir) creationSize, err := prepareUntarSourceDirectory(100, destDir, true) if err != nil { t.Fatal(err) } changes, err := ChangesDirs(destDir, srcDir) if err != nil { t.Fatal(err) } got := ChangesSize(destDir, changes) if got != int64(creationSize) { t.Errorf("Expected %d bytes of changes, got %d", creationSize, got) } } func TestChangesSizeWithNoChanges(t *testing.T) { size := ChangesSize("/tmp", nil) if size != 0 { t.Fatalf("ChangesSizes with no changes should be 0, was %d", size) } } func TestChangesSizeWithOnlyDeleteChanges(t *testing.T) { changes := []Change{ {Path: "deletedPath", Kind: ChangeDelete}, } size := ChangesSize("/tmp", changes) if size != 0 { t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size) } } func TestChangesSize(t *testing.T) { parentPath, err := ioutil.TempDir("", "docker-changes-test") defer os.RemoveAll(parentPath) addition := path.Join(parentPath, "addition") if err := ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744); err != nil { t.Fatal(err) } modification := path.Join(parentPath, "modification") if err = ioutil.WriteFile(modification, []byte{0x01, 0x01, 0x01}, 0744); err != nil { t.Fatal(err) } changes := []Change{ {Path: "addition", Kind: ChangeAdd}, {Path: "modification", Kind: ChangeModify}, } size := ChangesSize(parentPath, changes) if size != 6 { t.Fatalf("Expected 6 bytes of changes, got %d", size) } } func checkChanges(expectedChanges, changes []Change, t *testing.T) { sort.Sort(changesByPath(expectedChanges)) sort.Sort(changesByPath(changes)) for i := 0; i < max(len(changes), len(expectedChanges)); i++ { if i >= len(expectedChanges) { t.Fatalf("unexpected change %s\n", changes[i].String()) } if i >= len(changes) { t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) } if changes[i].Path == expectedChanges[i].Path { if changes[i] != expectedChanges[i] { t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) } } else if changes[i].Path < expectedChanges[i].Path { t.Fatalf("unexpected change %s\n", changes[i].String()) } else { t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) } } } docker-1.10.3/pkg/archive/changes_unix.go000066400000000000000000000016401267010174400202520ustar00rootroot00000000000000// +build !windows package archive import ( "os" "syscall" "github.com/docker/docker/pkg/system" ) func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { // Don't look at size for dirs, its not a good measure of change if oldStat.Mode() != newStat.Mode() || oldStat.UID() != newStat.UID() || oldStat.GID() != newStat.GID() || oldStat.Rdev() != newStat.Rdev() || // Don't look at size for dirs, its not a good measure of change (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR && (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { return true } return false } func (info *FileInfo) isDir() bool { return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0 } func getIno(fi os.FileInfo) uint64 { return uint64(fi.Sys().(*syscall.Stat_t).Ino) } func hasHardlinks(fi os.FileInfo) bool { return fi.Sys().(*syscall.Stat_t).Nlink > 1 } docker-1.10.3/pkg/archive/changes_windows.go000066400000000000000000000011161267010174400207570ustar00rootroot00000000000000package archive import ( "os" "github.com/docker/docker/pkg/system" ) func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { // Don't look at size for dirs, its not a good measure of change if oldStat.ModTime() != newStat.ModTime() || oldStat.Mode() != newStat.Mode() || oldStat.Size() != newStat.Size() && !oldStat.IsDir() { return true } return false } func (info *FileInfo) isDir() bool { return info.parent == nil || info.stat.IsDir() } func getIno(fi os.FileInfo) (inode uint64) { return } func hasHardlinks(fi os.FileInfo) bool { return false } docker-1.10.3/pkg/archive/copy.go000066400000000000000000000364731267010174400165650ustar00rootroot00000000000000package archive import ( "archive/tar" "errors" "io" "io/ioutil" "os" "path/filepath" "strings" "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/system" ) // Errors used or returned by this file. var ( ErrNotDirectory = errors.New("not a directory") ErrDirNotExists = errors.New("no such directory") ErrCannotCopyDir = errors.New("cannot copy directory") ErrInvalidCopySource = errors.New("invalid copy source content") ) // PreserveTrailingDotOrSeparator returns the given cleaned path (after // processing using any utility functions from the path or filepath stdlib // packages) and appends a trailing `/.` or `/` if its corresponding original // path (from before being processed by utility functions from the path or // filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned // path already ends in a `.` path segment, then another is not added. If the // clean path already ends in a path separator, then another is not added. func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { // Ensure paths are in platform semantics cleanedPath = normalizePath(cleanedPath) originalPath = normalizePath(originalPath) if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { if !hasTrailingPathSeparator(cleanedPath) { // Add a separator if it doesn't already end with one (a cleaned // path would only end in a separator if it is the root). cleanedPath += string(filepath.Separator) } cleanedPath += "." } if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { cleanedPath += string(filepath.Separator) } return cleanedPath } // assertsDirectory returns whether the given path is // asserted to be a directory, i.e., the path ends with // a trailing '/' or `/.`, assuming a path separator of `/`. func assertsDirectory(path string) bool { return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) } // hasTrailingPathSeparator returns whether the given // path ends with the system's path separator character. func hasTrailingPathSeparator(path string) bool { return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) } // specifiesCurrentDir returns whether the given path specifies // a "current directory", i.e., the last path segment is `.`. func specifiesCurrentDir(path string) bool { return filepath.Base(path) == "." } // SplitPathDirEntry splits the given path between its directory name and its // basename by first cleaning the path but preserves a trailing "." if the // original path specified the current directory. func SplitPathDirEntry(path string) (dir, base string) { cleanedPath := filepath.Clean(normalizePath(path)) if specifiesCurrentDir(path) { cleanedPath += string(filepath.Separator) + "." } return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) } // TarResource archives the resource described by the given CopyInfo to a Tar // archive. A non-nil error is returned if sourcePath does not exist or is // asserted to be a directory but exists as another type of file. // // This function acts as a convenient wrapper around TarWithOptions, which // requires a directory as the source path. TarResource accepts either a // directory or a file path and correctly sets the Tar options. func TarResource(sourceInfo CopyInfo) (content Archive, err error) { return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) } // TarResourceRebase is like TarResource but renames the first path element of // items in the resulting tar archive to match the given rebaseName if not "". func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err error) { sourcePath = normalizePath(sourcePath) if _, err = os.Lstat(sourcePath); err != nil { // Catches the case where the source does not exist or is not a // directory if asserted to be a directory, as this also causes an // error. return } // Separate the source path between it's directory and // the entry in that directory which we are archiving. sourceDir, sourceBase := SplitPathDirEntry(sourcePath) filter := []string{sourceBase} logrus.Debugf("copying %q from %q", sourceBase, sourceDir) return TarWithOptions(sourceDir, &TarOptions{ Compression: Uncompressed, IncludeFiles: filter, IncludeSourceDir: true, RebaseNames: map[string]string{ sourceBase: rebaseName, }, }) } // CopyInfo holds basic info about the source // or destination path of a copy operation. type CopyInfo struct { Path string Exists bool IsDir bool RebaseName string } // CopyInfoSourcePath stats the given path to create a CopyInfo // struct representing that resource for the source of an archive copy // operation. The given path should be an absolute local path. A source path // has all symlinks evaluated that appear before the last path separator ("/" // on Unix). As it is to be a copy source, the path must exist. func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { // normalize the file path and then evaluate the symbol link // we will use the target file instead of the symbol link if // followLink is set path = normalizePath(path) resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) if err != nil { return CopyInfo{}, err } stat, err := os.Lstat(resolvedPath) if err != nil { return CopyInfo{}, err } return CopyInfo{ Path: resolvedPath, Exists: true, IsDir: stat.IsDir(), RebaseName: rebaseName, }, nil } // CopyInfoDestinationPath stats the given path to create a CopyInfo // struct representing that resource for the destination of an archive copy // operation. The given path should be an absolute local path. func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. path = normalizePath(path) originalPath := path stat, err := os.Lstat(path) if err == nil && stat.Mode()&os.ModeSymlink == 0 { // The path exists and is not a symlink. return CopyInfo{ Path: path, Exists: true, IsDir: stat.IsDir(), }, nil } // While the path is a symlink. for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { if n > maxSymlinkIter { // Don't follow symlinks more than this arbitrary number of times. return CopyInfo{}, errors.New("too many symlinks in " + originalPath) } // The path is a symbolic link. We need to evaluate it so that the // destination of the copy operation is the link target and not the // link itself. This is notably different than CopyInfoSourcePath which // only evaluates symlinks before the last appearing path separator. // Also note that it is okay if the last path element is a broken // symlink as the copy operation should create the target. var linkTarget string linkTarget, err = os.Readlink(path) if err != nil { return CopyInfo{}, err } if !system.IsAbs(linkTarget) { // Join with the parent directory. dstParent, _ := SplitPathDirEntry(path) linkTarget = filepath.Join(dstParent, linkTarget) } path = linkTarget stat, err = os.Lstat(path) } if err != nil { // It's okay if the destination path doesn't exist. We can still // continue the copy operation if the parent directory exists. if !os.IsNotExist(err) { return CopyInfo{}, err } // Ensure destination parent dir exists. dstParent, _ := SplitPathDirEntry(path) parentDirStat, err := os.Lstat(dstParent) if err != nil { return CopyInfo{}, err } if !parentDirStat.IsDir() { return CopyInfo{}, ErrNotDirectory } return CopyInfo{Path: path}, nil } // The path exists after resolving symlinks. return CopyInfo{ Path: path, Exists: true, IsDir: stat.IsDir(), }, nil } // PrepareArchiveCopy prepares the given srcContent archive, which should // contain the archived resource described by srcInfo, to the destination // described by dstInfo. Returns the possibly modified content archive along // with the path to the destination directory which it should be extracted to. func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) { // Ensure in platform semantics srcInfo.Path = normalizePath(srcInfo.Path) dstInfo.Path = normalizePath(dstInfo.Path) // Separate the destination path between its directory and base // components in case the source archive contents need to be rebased. dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) _, srcBase := SplitPathDirEntry(srcInfo.Path) switch { case dstInfo.Exists && dstInfo.IsDir: // The destination exists as a directory. No alteration // to srcContent is needed as its contents can be // simply extracted to the destination directory. return dstInfo.Path, ioutil.NopCloser(srcContent), nil case dstInfo.Exists && srcInfo.IsDir: // The destination exists as some type of file and the source // content is a directory. This is an error condition since // you cannot copy a directory to an existing file location. return "", nil, ErrCannotCopyDir case dstInfo.Exists: // The destination exists as some type of file and the source content // is also a file. The source content entry will have to be renamed to // have a basename which matches the destination path's basename. if len(srcInfo.RebaseName) != 0 { srcBase = srcInfo.RebaseName } return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil case srcInfo.IsDir: // The destination does not exist and the source content is an archive // of a directory. The archive should be extracted to the parent of // the destination path instead, and when it is, the directory that is // created as a result should take the name of the destination path. // The source content entries will have to be renamed to have a // basename which matches the destination path's basename. if len(srcInfo.RebaseName) != 0 { srcBase = srcInfo.RebaseName } return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil case assertsDirectory(dstInfo.Path): // The destination does not exist and is asserted to be created as a // directory, but the source content is not a directory. This is an // error condition since you cannot create a directory from a file // source. return "", nil, ErrDirNotExists default: // The last remaining case is when the destination does not exist, is // not asserted to be a directory, and the source content is not an // archive of a directory. It this case, the destination file will need // to be created when the archive is extracted and the source content // entry will have to be renamed to have a basename which matches the // destination path's basename. if len(srcInfo.RebaseName) != 0 { srcBase = srcInfo.RebaseName } return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil } } // RebaseArchiveEntries rewrites the given srcContent archive replacing // an occurrence of oldBase with newBase at the beginning of entry names. func RebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive { if oldBase == string(os.PathSeparator) { // If oldBase specifies the root directory, use an empty string as // oldBase instead so that newBase doesn't replace the path separator // that all paths will start with. oldBase = "" } rebased, w := io.Pipe() go func() { srcTar := tar.NewReader(srcContent) rebasedTar := tar.NewWriter(w) for { hdr, err := srcTar.Next() if err == io.EOF { // Signals end of archive. rebasedTar.Close() w.Close() return } if err != nil { w.CloseWithError(err) return } hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) if err = rebasedTar.WriteHeader(hdr); err != nil { w.CloseWithError(err) return } if _, err = io.Copy(rebasedTar, srcTar); err != nil { w.CloseWithError(err) return } } }() return rebased } // CopyResource performs an archive copy from the given source path to the // given destination path. The source path MUST exist and the destination // path's parent directory must exist. func CopyResource(srcPath, dstPath string, followLink bool) error { var ( srcInfo CopyInfo err error ) // Ensure in platform semantics srcPath = normalizePath(srcPath) dstPath = normalizePath(dstPath) // Clean the source and destination paths. srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { return err } content, err := TarResource(srcInfo) if err != nil { return err } defer content.Close() return CopyTo(content, srcInfo, dstPath) } // CopyTo handles extracting the given content whose // entries should be sourced from srcInfo to dstPath. func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error { // The destination path need not exist, but CopyInfoDestinationPath will // ensure that at least the parent directory exists. dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) if err != nil { return err } dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) if err != nil { return err } defer copyArchive.Close() options := &TarOptions{ NoLchown: true, NoOverwriteDirNonDir: true, } return Untar(copyArchive, dstDir, options) } // ResolveHostSourcePath decides real path need to be copied with parameters such as // whether to follow symbol link or not, if followLink is true, resolvedPath will return // link target of any symbol link file, else it will only resolve symlink of directory // but return symbol link file itself without resolving. func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { if followLink { resolvedPath, err = filepath.EvalSymlinks(path) if err != nil { return } resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) } else { dirPath, basePath := filepath.Split(path) // if not follow symbol link, then resolve symbol link of parent dir var resolvedDirPath string resolvedDirPath, err = filepath.EvalSymlinks(dirPath) if err != nil { return } // resolvedDirPath will have been cleaned (no trailing path separators) so // we can manually join it with the base path element. resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { rebaseName = filepath.Base(path) } } return resolvedPath, rebaseName, nil } // GetRebaseName normalizes and compares path and resolvedPath, // return completed resolved path and rebased file name func GetRebaseName(path, resolvedPath string) (string, string) { // linkTarget will have been cleaned (no trailing path separators and dot) so // we can manually join it with them var rebaseName string if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) { resolvedPath += string(filepath.Separator) + "." } if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) { resolvedPath += string(filepath.Separator) } if filepath.Base(path) != filepath.Base(resolvedPath) { // In the case where the path had a trailing separator and a symlink // evaluation has changed the last path component, we will need to // rebase the name in the archive that is being copied to match the // originally requested name. rebaseName = filepath.Base(path) } return resolvedPath, rebaseName } docker-1.10.3/pkg/archive/copy_test.go000066400000000000000000000672721267010174400176250ustar00rootroot00000000000000package archive import ( "bytes" "crypto/sha256" "encoding/hex" "fmt" "io" "io/ioutil" "os" "path/filepath" "strings" "testing" ) func removeAllPaths(paths ...string) { for _, path := range paths { os.RemoveAll(path) } } func getTestTempDirs(t *testing.T) (tmpDirA, tmpDirB string) { var err error if tmpDirA, err = ioutil.TempDir("", "archive-copy-test"); err != nil { t.Fatal(err) } if tmpDirB, err = ioutil.TempDir("", "archive-copy-test"); err != nil { t.Fatal(err) } return } func isNotDir(err error) bool { return strings.Contains(err.Error(), "not a directory") } func joinTrailingSep(pathElements ...string) string { joined := filepath.Join(pathElements...) return fmt.Sprintf("%s%c", joined, filepath.Separator) } func fileContentsEqual(t *testing.T, filenameA, filenameB string) (err error) { t.Logf("checking for equal file contents: %q and %q\n", filenameA, filenameB) fileA, err := os.Open(filenameA) if err != nil { return } defer fileA.Close() fileB, err := os.Open(filenameB) if err != nil { return } defer fileB.Close() hasher := sha256.New() if _, err = io.Copy(hasher, fileA); err != nil { return } hashA := hasher.Sum(nil) hasher.Reset() if _, err = io.Copy(hasher, fileB); err != nil { return } hashB := hasher.Sum(nil) if !bytes.Equal(hashA, hashB) { err = fmt.Errorf("file content hashes not equal - expected %s, got %s", hex.EncodeToString(hashA), hex.EncodeToString(hashB)) } return } func dirContentsEqual(t *testing.T, newDir, oldDir string) (err error) { t.Logf("checking for equal directory contents: %q and %q\n", newDir, oldDir) var changes []Change if changes, err = ChangesDirs(newDir, oldDir); err != nil { return } if len(changes) != 0 { err = fmt.Errorf("expected no changes between directories, but got: %v", changes) } return } func logDirContents(t *testing.T, dirPath string) { logWalkedPaths := filepath.WalkFunc(func(path string, info os.FileInfo, err error) error { if err != nil { t.Errorf("stat error for path %q: %s", path, err) return nil } if info.IsDir() { path = joinTrailingSep(path) } t.Logf("\t%s", path) return nil }) t.Logf("logging directory contents: %q", dirPath) if err := filepath.Walk(dirPath, logWalkedPaths); err != nil { t.Fatal(err) } } func testCopyHelper(t *testing.T, srcPath, dstPath string) (err error) { t.Logf("copying from %q to %q (not follow symbol link)", srcPath, dstPath) return CopyResource(srcPath, dstPath, false) } func testCopyHelperFSym(t *testing.T, srcPath, dstPath string) (err error) { t.Logf("copying from %q to %q (follow symbol link)", srcPath, dstPath) return CopyResource(srcPath, dstPath, true) } // Basic assumptions about SRC and DST: // 1. SRC must exist. // 2. If SRC ends with a trailing separator, it must be a directory. // 3. DST parent directory must exist. // 4. If DST exists as a file, it must not end with a trailing separator. // First get these easy error cases out of the way. // Test for error when SRC does not exist. func TestCopyErrSrcNotExists(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) if _, err := CopyInfoSourcePath(filepath.Join(tmpDirA, "file1"), false); !os.IsNotExist(err) { t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) } } // Test for error when SRC ends in a trailing // path separator but it exists as a file. func TestCopyErrSrcNotDir(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A with some sample files and directories. createSampleDir(t, tmpDirA) if _, err := CopyInfoSourcePath(joinTrailingSep(tmpDirA, "file1"), false); !isNotDir(err) { t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) } } // Test for error when SRC is a valid file or directory, // but the DST parent directory does not exist. func TestCopyErrDstParentNotExists(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A with some sample files and directories. createSampleDir(t, tmpDirA) srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} // Try with a file source. content, err := TarResource(srcInfo) if err != nil { t.Fatalf("unexpected error %T: %s", err, err) } defer content.Close() // Copy to a file whose parent does not exist. if err = CopyTo(content, srcInfo, filepath.Join(tmpDirB, "fakeParentDir", "file1")); err == nil { t.Fatal("expected IsNotExist error, but got nil instead") } if !os.IsNotExist(err) { t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) } // Try with a directory source. srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} content, err = TarResource(srcInfo) if err != nil { t.Fatalf("unexpected error %T: %s", err, err) } defer content.Close() // Copy to a directory whose parent does not exist. if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "fakeParentDir", "fakeDstDir")); err == nil { t.Fatal("expected IsNotExist error, but got nil instead") } if !os.IsNotExist(err) { t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) } } // Test for error when DST ends in a trailing // path separator but exists as a file. func TestCopyErrDstNotDir(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A and B with some sample files and directories. createSampleDir(t, tmpDirA) createSampleDir(t, tmpDirB) // Try with a file source. srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} content, err := TarResource(srcInfo) if err != nil { t.Fatalf("unexpected error %T: %s", err, err) } defer content.Close() if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { t.Fatal("expected IsNotDir error, but got nil instead") } if !isNotDir(err) { t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) } // Try with a directory source. srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} content, err = TarResource(srcInfo) if err != nil { t.Fatalf("unexpected error %T: %s", err, err) } defer content.Close() if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { t.Fatal("expected IsNotDir error, but got nil instead") } if !isNotDir(err) { t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) } } // Possibilities are reduced to the remaining 10 cases: // // case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action // =================================================================================================== // A | no | - | no | - | no | create file // B | no | - | no | - | yes | error // C | no | - | yes | no | - | overwrite file // D | no | - | yes | yes | - | create file in dst dir // E | yes | no | no | - | - | create dir, copy contents // F | yes | no | yes | no | - | error // G | yes | no | yes | yes | - | copy dir and contents // H | yes | yes | no | - | - | create dir, copy contents // I | yes | yes | yes | no | - | error // J | yes | yes | yes | yes | - | copy dir contents // // A. SRC specifies a file and DST (no trailing path separator) doesn't // exist. This should create a file with the name DST and copy the // contents of the source file into it. func TestCopyCaseA(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A with some sample files and directories. createSampleDir(t, tmpDirA) srcPath := filepath.Join(tmpDirA, "file1") dstPath := filepath.Join(tmpDirB, "itWorks.txt") var err error if err = testCopyHelper(t, srcPath, dstPath); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = fileContentsEqual(t, srcPath, dstPath); err != nil { t.Fatal(err) } os.Remove(dstPath) symlinkPath := filepath.Join(tmpDirA, "symlink3") symlinkPath1 := filepath.Join(tmpDirA, "symlink4") linkTarget := filepath.Join(tmpDirA, "file1") if err = testCopyHelperFSym(t, symlinkPath, dstPath); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { t.Fatal(err) } os.Remove(dstPath) if err = testCopyHelperFSym(t, symlinkPath1, dstPath); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { t.Fatal(err) } } // B. SRC specifies a file and DST (with trailing path separator) doesn't // exist. This should cause an error because the copy operation cannot // create a directory when copying a single file. func TestCopyCaseB(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A with some sample files and directories. createSampleDir(t, tmpDirA) srcPath := filepath.Join(tmpDirA, "file1") dstDir := joinTrailingSep(tmpDirB, "testDir") var err error if err = testCopyHelper(t, srcPath, dstDir); err == nil { t.Fatal("expected ErrDirNotExists error, but got nil instead") } if err != ErrDirNotExists { t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err) } symlinkPath := filepath.Join(tmpDirA, "symlink3") if err = testCopyHelperFSym(t, symlinkPath, dstDir); err == nil { t.Fatal("expected ErrDirNotExists error, but got nil instead") } if err != ErrDirNotExists { t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err) } } // C. SRC specifies a file and DST exists as a file. This should overwrite // the file at DST with the contents of the source file. func TestCopyCaseC(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A and B with some sample files and directories. createSampleDir(t, tmpDirA) createSampleDir(t, tmpDirB) srcPath := filepath.Join(tmpDirA, "file1") dstPath := filepath.Join(tmpDirB, "file2") var err error // Ensure they start out different. if err = fileContentsEqual(t, srcPath, dstPath); err == nil { t.Fatal("expected different file contents") } if err = testCopyHelper(t, srcPath, dstPath); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = fileContentsEqual(t, srcPath, dstPath); err != nil { t.Fatal(err) } } // C. Symbol link following version: // SRC specifies a file and DST exists as a file. This should overwrite // the file at DST with the contents of the source file. func TestCopyCaseCFSym(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A and B with some sample files and directories. createSampleDir(t, tmpDirA) createSampleDir(t, tmpDirB) symlinkPathBad := filepath.Join(tmpDirA, "symlink1") symlinkPath := filepath.Join(tmpDirA, "symlink3") linkTarget := filepath.Join(tmpDirA, "file1") dstPath := filepath.Join(tmpDirB, "file2") var err error // first to test broken link if err = testCopyHelperFSym(t, symlinkPathBad, dstPath); err == nil { t.Fatalf("unexpected error %T: %s", err, err) } // test symbol link -> symbol link -> target // Ensure they start out different. if err = fileContentsEqual(t, linkTarget, dstPath); err == nil { t.Fatal("expected different file contents") } if err = testCopyHelperFSym(t, symlinkPath, dstPath); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { t.Fatal(err) } } // D. SRC specifies a file and DST exists as a directory. This should place // a copy of the source file inside it using the basename from SRC. Ensure // this works whether DST has a trailing path separator or not. func TestCopyCaseD(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A and B with some sample files and directories. createSampleDir(t, tmpDirA) createSampleDir(t, tmpDirB) srcPath := filepath.Join(tmpDirA, "file1") dstDir := filepath.Join(tmpDirB, "dir1") dstPath := filepath.Join(dstDir, "file1") var err error // Ensure that dstPath doesn't exist. if _, err = os.Stat(dstPath); !os.IsNotExist(err) { t.Fatalf("did not expect dstPath %q to exist", dstPath) } if err = testCopyHelper(t, srcPath, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = fileContentsEqual(t, srcPath, dstPath); err != nil { t.Fatal(err) } // Now try again but using a trailing path separator for dstDir. if err = os.RemoveAll(dstDir); err != nil { t.Fatalf("unable to remove dstDir: %s", err) } if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { t.Fatalf("unable to make dstDir: %s", err) } dstDir = joinTrailingSep(tmpDirB, "dir1") if err = testCopyHelper(t, srcPath, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = fileContentsEqual(t, srcPath, dstPath); err != nil { t.Fatal(err) } } // D. Symbol link following version: // SRC specifies a file and DST exists as a directory. This should place // a copy of the source file inside it using the basename from SRC. Ensure // this works whether DST has a trailing path separator or not. func TestCopyCaseDFSym(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A and B with some sample files and directories. createSampleDir(t, tmpDirA) createSampleDir(t, tmpDirB) srcPath := filepath.Join(tmpDirA, "symlink4") linkTarget := filepath.Join(tmpDirA, "file1") dstDir := filepath.Join(tmpDirB, "dir1") dstPath := filepath.Join(dstDir, "symlink4") var err error // Ensure that dstPath doesn't exist. if _, err = os.Stat(dstPath); !os.IsNotExist(err) { t.Fatalf("did not expect dstPath %q to exist", dstPath) } if err = testCopyHelperFSym(t, srcPath, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { t.Fatal(err) } // Now try again but using a trailing path separator for dstDir. if err = os.RemoveAll(dstDir); err != nil { t.Fatalf("unable to remove dstDir: %s", err) } if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { t.Fatalf("unable to make dstDir: %s", err) } dstDir = joinTrailingSep(tmpDirB, "dir1") if err = testCopyHelperFSym(t, srcPath, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { t.Fatal(err) } } // E. SRC specifies a directory and DST does not exist. This should create a // directory at DST and copy the contents of the SRC directory into the DST // directory. Ensure this works whether DST has a trailing path separator or // not. func TestCopyCaseE(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A with some sample files and directories. createSampleDir(t, tmpDirA) srcDir := filepath.Join(tmpDirA, "dir1") dstDir := filepath.Join(tmpDirB, "testDir") var err error if err = testCopyHelper(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = dirContentsEqual(t, dstDir, srcDir); err != nil { t.Log("dir contents not equal") logDirContents(t, tmpDirA) logDirContents(t, tmpDirB) t.Fatal(err) } // Now try again but using a trailing path separator for dstDir. if err = os.RemoveAll(dstDir); err != nil { t.Fatalf("unable to remove dstDir: %s", err) } dstDir = joinTrailingSep(tmpDirB, "testDir") if err = testCopyHelper(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = dirContentsEqual(t, dstDir, srcDir); err != nil { t.Fatal(err) } } // E. Symbol link following version: // SRC specifies a directory and DST does not exist. This should create a // directory at DST and copy the contents of the SRC directory into the DST // directory. Ensure this works whether DST has a trailing path separator or // not. func TestCopyCaseEFSym(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A with some sample files and directories. createSampleDir(t, tmpDirA) srcDir := filepath.Join(tmpDirA, "dirSymlink") linkTarget := filepath.Join(tmpDirA, "dir1") dstDir := filepath.Join(tmpDirB, "testDir") var err error if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { t.Log("dir contents not equal") logDirContents(t, tmpDirA) logDirContents(t, tmpDirB) t.Fatal(err) } // Now try again but using a trailing path separator for dstDir. if err = os.RemoveAll(dstDir); err != nil { t.Fatalf("unable to remove dstDir: %s", err) } dstDir = joinTrailingSep(tmpDirB, "testDir") if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { t.Fatal(err) } } // F. SRC specifies a directory and DST exists as a file. This should cause an // error as it is not possible to overwrite a file with a directory. func TestCopyCaseF(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A and B with some sample files and directories. createSampleDir(t, tmpDirA) createSampleDir(t, tmpDirB) srcDir := filepath.Join(tmpDirA, "dir1") symSrcDir := filepath.Join(tmpDirA, "dirSymlink") dstFile := filepath.Join(tmpDirB, "file1") var err error if err = testCopyHelper(t, srcDir, dstFile); err == nil { t.Fatal("expected ErrCannotCopyDir error, but got nil instead") } if err != ErrCannotCopyDir { t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) } // now test with symbol link if err = testCopyHelperFSym(t, symSrcDir, dstFile); err == nil { t.Fatal("expected ErrCannotCopyDir error, but got nil instead") } if err != ErrCannotCopyDir { t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) } } // G. SRC specifies a directory and DST exists as a directory. This should copy // the SRC directory and all its contents to the DST directory. Ensure this // works whether DST has a trailing path separator or not. func TestCopyCaseG(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A and B with some sample files and directories. createSampleDir(t, tmpDirA) createSampleDir(t, tmpDirB) srcDir := filepath.Join(tmpDirA, "dir1") dstDir := filepath.Join(tmpDirB, "dir2") resultDir := filepath.Join(dstDir, "dir1") var err error if err = testCopyHelper(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = dirContentsEqual(t, resultDir, srcDir); err != nil { t.Fatal(err) } // Now try again but using a trailing path separator for dstDir. if err = os.RemoveAll(dstDir); err != nil { t.Fatalf("unable to remove dstDir: %s", err) } if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { t.Fatalf("unable to make dstDir: %s", err) } dstDir = joinTrailingSep(tmpDirB, "dir2") if err = testCopyHelper(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = dirContentsEqual(t, resultDir, srcDir); err != nil { t.Fatal(err) } } // G. Symbol link version: // SRC specifies a directory and DST exists as a directory. This should copy // the SRC directory and all its contents to the DST directory. Ensure this // works whether DST has a trailing path separator or not. func TestCopyCaseGFSym(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A and B with some sample files and directories. createSampleDir(t, tmpDirA) createSampleDir(t, tmpDirB) srcDir := filepath.Join(tmpDirA, "dirSymlink") linkTarget := filepath.Join(tmpDirA, "dir1") dstDir := filepath.Join(tmpDirB, "dir2") resultDir := filepath.Join(dstDir, "dirSymlink") var err error if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = dirContentsEqual(t, resultDir, linkTarget); err != nil { t.Fatal(err) } // Now try again but using a trailing path separator for dstDir. if err = os.RemoveAll(dstDir); err != nil { t.Fatalf("unable to remove dstDir: %s", err) } if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { t.Fatalf("unable to make dstDir: %s", err) } dstDir = joinTrailingSep(tmpDirB, "dir2") if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = dirContentsEqual(t, resultDir, linkTarget); err != nil { t.Fatal(err) } } // H. SRC specifies a directory's contents only and DST does not exist. This // should create a directory at DST and copy the contents of the SRC // directory (but not the directory itself) into the DST directory. Ensure // this works whether DST has a trailing path separator or not. func TestCopyCaseH(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A with some sample files and directories. createSampleDir(t, tmpDirA) srcDir := joinTrailingSep(tmpDirA, "dir1") + "." dstDir := filepath.Join(tmpDirB, "testDir") var err error if err = testCopyHelper(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = dirContentsEqual(t, dstDir, srcDir); err != nil { t.Log("dir contents not equal") logDirContents(t, tmpDirA) logDirContents(t, tmpDirB) t.Fatal(err) } // Now try again but using a trailing path separator for dstDir. if err = os.RemoveAll(dstDir); err != nil { t.Fatalf("unable to remove dstDir: %s", err) } dstDir = joinTrailingSep(tmpDirB, "testDir") if err = testCopyHelper(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = dirContentsEqual(t, dstDir, srcDir); err != nil { t.Log("dir contents not equal") logDirContents(t, tmpDirA) logDirContents(t, tmpDirB) t.Fatal(err) } } // H. Symbol link following version: // SRC specifies a directory's contents only and DST does not exist. This // should create a directory at DST and copy the contents of the SRC // directory (but not the directory itself) into the DST directory. Ensure // this works whether DST has a trailing path separator or not. func TestCopyCaseHFSym(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A with some sample files and directories. createSampleDir(t, tmpDirA) srcDir := joinTrailingSep(tmpDirA, "dirSymlink") + "." linkTarget := filepath.Join(tmpDirA, "dir1") dstDir := filepath.Join(tmpDirB, "testDir") var err error if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { t.Log("dir contents not equal") logDirContents(t, tmpDirA) logDirContents(t, tmpDirB) t.Fatal(err) } // Now try again but using a trailing path separator for dstDir. if err = os.RemoveAll(dstDir); err != nil { t.Fatalf("unable to remove dstDir: %s", err) } dstDir = joinTrailingSep(tmpDirB, "testDir") if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { t.Log("dir contents not equal") logDirContents(t, tmpDirA) logDirContents(t, tmpDirB) t.Fatal(err) } } // I. SRC specifies a directory's contents only and DST exists as a file. This // should cause an error as it is not possible to overwrite a file with a // directory. func TestCopyCaseI(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A and B with some sample files and directories. createSampleDir(t, tmpDirA) createSampleDir(t, tmpDirB) srcDir := joinTrailingSep(tmpDirA, "dir1") + "." symSrcDir := filepath.Join(tmpDirB, "dirSymlink") dstFile := filepath.Join(tmpDirB, "file1") var err error if err = testCopyHelper(t, srcDir, dstFile); err == nil { t.Fatal("expected ErrCannotCopyDir error, but got nil instead") } if err != ErrCannotCopyDir { t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) } // now try with symbol link of dir if err = testCopyHelperFSym(t, symSrcDir, dstFile); err == nil { t.Fatal("expected ErrCannotCopyDir error, but got nil instead") } if err != ErrCannotCopyDir { t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) } } // J. SRC specifies a directory's contents only and DST exists as a directory. // This should copy the contents of the SRC directory (but not the directory // itself) into the DST directory. Ensure this works whether DST has a // trailing path separator or not. func TestCopyCaseJ(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A and B with some sample files and directories. createSampleDir(t, tmpDirA) createSampleDir(t, tmpDirB) srcDir := joinTrailingSep(tmpDirA, "dir1") + "." dstDir := filepath.Join(tmpDirB, "dir5") var err error // first to create an empty dir if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { t.Fatalf("unable to make dstDir: %s", err) } if err = testCopyHelper(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = dirContentsEqual(t, dstDir, srcDir); err != nil { t.Fatal(err) } // Now try again but using a trailing path separator for dstDir. if err = os.RemoveAll(dstDir); err != nil { t.Fatalf("unable to remove dstDir: %s", err) } if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { t.Fatalf("unable to make dstDir: %s", err) } dstDir = joinTrailingSep(tmpDirB, "dir5") if err = testCopyHelper(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = dirContentsEqual(t, dstDir, srcDir); err != nil { t.Fatal(err) } } // J. Symbol link following version: // SRC specifies a directory's contents only and DST exists as a directory. // This should copy the contents of the SRC directory (but not the directory // itself) into the DST directory. Ensure this works whether DST has a // trailing path separator or not. func TestCopyCaseJFSym(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A and B with some sample files and directories. createSampleDir(t, tmpDirA) createSampleDir(t, tmpDirB) srcDir := joinTrailingSep(tmpDirA, "dirSymlink") + "." linkTarget := filepath.Join(tmpDirA, "dir1") dstDir := filepath.Join(tmpDirB, "dir5") var err error // first to create an empty dir if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { t.Fatalf("unable to make dstDir: %s", err) } if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { t.Fatal(err) } // Now try again but using a trailing path separator for dstDir. if err = os.RemoveAll(dstDir); err != nil { t.Fatalf("unable to remove dstDir: %s", err) } if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { t.Fatalf("unable to make dstDir: %s", err) } dstDir = joinTrailingSep(tmpDirB, "dir5") if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { t.Fatal(err) } } docker-1.10.3/pkg/archive/copy_unix.go000066400000000000000000000002141267010174400176100ustar00rootroot00000000000000// +build !windows package archive import ( "path/filepath" ) func normalizePath(path string) string { return filepath.ToSlash(path) } docker-1.10.3/pkg/archive/copy_windows.go000066400000000000000000000001721267010174400203220ustar00rootroot00000000000000package archive import ( "path/filepath" ) func normalizePath(path string) string { return filepath.FromSlash(path) } docker-1.10.3/pkg/archive/diff.go000066400000000000000000000210021267010174400165010ustar00rootroot00000000000000package archive import ( "archive/tar" "fmt" "io" "io/ioutil" "os" "path/filepath" "runtime" "strings" "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" ) // UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be // compressed or uncompressed. // Returns the size in bytes of the contents of the layer. func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) { tr := tar.NewReader(layer) trBuf := pools.BufioReader32KPool.Get(tr) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header unpackedPaths := make(map[string]struct{}) if options == nil { options = &TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) if err != nil { return 0, err } aufsTempdir := "" aufsHardlinks := make(map[string]*tar.Header) if options == nil { options = &TarOptions{} } // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return 0, err } size += hdr.Size // Normalize name, for safety and for a simple is-root check hdr.Name = filepath.Clean(hdr.Name) // Windows does not support filenames with colons in them. Ignore // these files. This is not a problem though (although it might // appear that it is). Let's suppose a client is running docker pull. // The daemon it points to is Windows. Would it make sense for the // client to be doing a docker pull Ubuntu for example (which has files // with colons in the name under /usr/share/man/man3)? No, absolutely // not as it would really only make sense that they were pulling a // Windows image. However, for development, it is necessary to be able // to pull Linux images which are in the repository. // // TODO Windows. Once the registry is aware of what images are Windows- // specific or Linux-specific, this warning should be changed to an error // to cater for the situation where someone does manage to upload a Linux // image but have it tagged as Windows inadvertently. if runtime.GOOS == "windows" { if strings.Contains(hdr.Name, ":") { logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) continue } } // Note as these operations are platform specific, so must the slash be. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { // Not the root directory, ensure that the parent directory exists. // This happened in some tests where an image had a tarfile without any // parent directories. parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = system.MkdirAll(parentPath, 0600) if err != nil { return 0, err } } } // Skip AUFS metadata dirs if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { // Regular files inside /.wh..wh.plnk can be used as hardlink targets // We don't want this directory, but we need the files in them so that // such hardlinks can be resolved. if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { basename := filepath.Base(hdr.Name) aufsHardlinks[basename] = hdr if aufsTempdir == "" { if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { return 0, err } defer os.RemoveAll(aufsTempdir) } if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil { return 0, err } } if hdr.Name != WhiteoutOpaqueDir { continue } } path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return 0, err } // Note as these operations are platform specific, so must the slash be. if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } base := filepath.Base(path) if strings.HasPrefix(base, WhiteoutPrefix) { dir := filepath.Dir(path) if base == WhiteoutOpaqueDir { _, err := os.Lstat(dir) if err != nil { return 0, err } err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { if err != nil { if os.IsNotExist(err) { err = nil // parent was deleted } return err } if path == dir { return nil } if _, exists := unpackedPaths[path]; !exists { err := os.RemoveAll(path) return err } return nil }) if err != nil { return 0, err } } else { originalBase := base[len(WhiteoutPrefix):] originalPath := filepath.Join(dir, originalBase) if err := os.RemoveAll(originalPath); err != nil { return 0, err } } } else { // If path exits we almost always just want to remove and replace it. // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return 0, err } } } trBuf.Reset(tr) srcData := io.Reader(trBuf) srcHdr := hdr // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so // we manually retarget these into the temporary files we extracted them into if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { linkBasename := filepath.Base(hdr.Linkname) srcHdr = aufsHardlinks[linkBasename] if srcHdr == nil { return 0, fmt.Errorf("Invalid aufs hardlink") } tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) if err != nil { return 0, err } defer tmpFile.Close() srcData = tmpFile } // if the options contain a uid & gid maps, convert header uid/gid // entries using the maps such that lchown sets the proper mapped // uid/gid after writing the file. We only perform this mapping if // the file isn't already owned by the remapped root UID or GID, as // that specific uid/gid has no mapping from container -> host, and // those files already have the proper ownership for inside the // container. if srcHdr.Uid != remappedRootUID { xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps) if err != nil { return 0, err } srcHdr.Uid = xUID } if srcHdr.Gid != remappedRootGID { xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps) if err != nil { return 0, err } srcHdr.Gid = xGID } if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil { return 0, err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } unpackedPaths[path] = struct{}{} } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return 0, err } } return size, nil } // ApplyLayer parses a diff in the standard layer format from `layer`, // and applies it to the directory `dest`. The stream `layer` can be // compressed or uncompressed. // Returns the size in bytes of the contents of the layer. func ApplyLayer(dest string, layer Reader) (int64, error) { return applyLayerHandler(dest, layer, &TarOptions{}, true) } // ApplyUncompressedLayer parses a diff in the standard layer format from // `layer`, and applies it to the directory `dest`. The stream `layer` // can only be uncompressed. // Returns the size in bytes of the contents of the layer. func ApplyUncompressedLayer(dest string, layer Reader, options *TarOptions) (int64, error) { return applyLayerHandler(dest, layer, options, false) } // do the bulk load of ApplyLayer, but allow for not calling DecompressStream func applyLayerHandler(dest string, layer Reader, options *TarOptions, decompress bool) (int64, error) { dest = filepath.Clean(dest) // We need to be able to set any perms oldmask, err := system.Umask(0) if err != nil { return 0, err } defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform if decompress { layer, err = DecompressStream(layer) if err != nil { return 0, err } } return UnpackLayer(dest, layer, options) } docker-1.10.3/pkg/archive/diff_test.go000066400000000000000000000152541267010174400175540ustar00rootroot00000000000000package archive import ( "archive/tar" "io" "io/ioutil" "os" "path/filepath" "reflect" "testing" "github.com/docker/docker/pkg/ioutils" ) func TestApplyLayerInvalidFilenames(t *testing.T) { for i, headers := range [][]*tar.Header{ { { Name: "../victim/dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, { { // Note the leading slash Name: "/../victim/slash-dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestApplyLayerInvalidHardlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeLink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeLink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (hardlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try reading victim/hello (hardlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try removing victim directory (hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestApplyLayerInvalidSymlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeSymlink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeSymlink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (symlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try reading victim/hello (symlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try removing victim directory (symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestApplyLayerWhiteouts(t *testing.T) { wd, err := ioutil.TempDir("", "graphdriver-test-whiteouts") if err != nil { return } defer os.RemoveAll(wd) base := []string{ ".baz", "bar/", "bar/bax", "bar/bay/", "baz", "foo/", "foo/.abc", "foo/.bcd/", "foo/.bcd/a", "foo/cde/", "foo/cde/def", "foo/cde/efg", "foo/fgh", "foobar", } type tcase struct { change, expected []string } tcases := []tcase{ { base, base, }, { []string{ ".bay", ".wh.baz", "foo/", "foo/.bce", "foo/.wh..wh..opq", "foo/cde/", "foo/cde/efg", }, []string{ ".bay", ".baz", "bar/", "bar/bax", "bar/bay/", "foo/", "foo/.bce", "foo/cde/", "foo/cde/efg", "foobar", }, }, { []string{ ".bay", ".wh..baz", ".wh.foobar", "foo/", "foo/.abc", "foo/.wh.cde", "bar/", }, []string{ ".bay", "bar/", "bar/bax", "bar/bay/", "foo/", "foo/.abc", "foo/.bce", }, }, { []string{ ".abc", ".wh..wh..opq", "foobar", }, []string{ ".abc", "foobar", }, }, } for i, tc := range tcases { l, err := makeTestLayer(tc.change) if err != nil { t.Fatal(err) } _, err = UnpackLayer(wd, l, nil) if err != nil { t.Fatal(err) } err = l.Close() if err != nil { t.Fatal(err) } paths, err := readDirContents(wd) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(tc.expected, paths) { t.Fatalf("invalid files for layer %d: expected %q, got %q", i, tc.expected, paths) } } } func makeTestLayer(paths []string) (rc io.ReadCloser, err error) { tmpDir, err := ioutil.TempDir("", "graphdriver-test-mklayer") if err != nil { return } defer func() { if err != nil { os.RemoveAll(tmpDir) } }() for _, p := range paths { if p[len(p)-1] == filepath.Separator { if err = os.MkdirAll(filepath.Join(tmpDir, p), 0700); err != nil { return } } else { if err = ioutil.WriteFile(filepath.Join(tmpDir, p), nil, 0600); err != nil { return } } } archive, err := Tar(tmpDir, Uncompressed) if err != nil { return } return ioutils.NewReadCloserWrapper(archive, func() error { err := archive.Close() os.RemoveAll(tmpDir) return err }), nil } func readDirContents(root string) ([]string, error) { var files []string err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { if err != nil { return err } if path == root { return nil } rel, err := filepath.Rel(root, path) if err != nil { return err } if info.IsDir() { rel = rel + "/" } files = append(files, rel) return nil }) if err != nil { return nil, err } return files, nil } docker-1.10.3/pkg/archive/example_changes.go000066400000000000000000000043441267010174400207260ustar00rootroot00000000000000// +build ignore // Simple tool to create an archive stream from an old and new directory // // By default it will stream the comparison of two temporary directories with junk files package main import ( "flag" "fmt" "io" "io/ioutil" "os" "path" "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/archive" ) var ( flDebug = flag.Bool("D", false, "debugging output") flNewDir = flag.String("newdir", "", "") flOldDir = flag.String("olddir", "", "") log = logrus.New() ) func main() { flag.Usage = func() { fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") fmt.Printf("%s [OPTIONS]\n", os.Args[0]) flag.PrintDefaults() } flag.Parse() log.Out = os.Stderr if (len(os.Getenv("DEBUG")) > 0) || *flDebug { logrus.SetLevel(logrus.DebugLevel) } var newDir, oldDir string if len(*flNewDir) == 0 { var err error newDir, err = ioutil.TempDir("", "docker-test-newDir") if err != nil { log.Fatal(err) } defer os.RemoveAll(newDir) if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { log.Fatal(err) } } else { newDir = *flNewDir } if len(*flOldDir) == 0 { oldDir, err := ioutil.TempDir("", "docker-test-oldDir") if err != nil { log.Fatal(err) } defer os.RemoveAll(oldDir) } else { oldDir = *flOldDir } changes, err := archive.ChangesDirs(newDir, oldDir) if err != nil { log.Fatal(err) } a, err := archive.ExportChanges(newDir, changes) if err != nil { log.Fatal(err) } defer a.Close() i, err := io.Copy(os.Stdout, a) if err != nil && err != io.EOF { log.Fatal(err) } fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) } func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { return 0, err } if makeLinks { if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { return 0, err } } } totalSize := numberOfFiles * len(fileData) return totalSize, nil } docker-1.10.3/pkg/archive/testdata/000077500000000000000000000000001267010174400170605ustar00rootroot00000000000000docker-1.10.3/pkg/archive/testdata/broken.tar000066400000000000000000000330001267010174400210440ustar00rootroot00000000000000root/0040700000000000000000000000000012332704605010223 5ustar0000000000000000root/.cpanm/0040755000000000000000000000000012332704605011411 5ustar0000000000000000root/.cpanm/work/0040755000000000000000000000000012332704605012373 5ustar0000000000000000root/.cpanm/work/1395823785.24209/0040755000000000000000000000000012332704605014154 5ustar0000000000000000root/.cpanm/work/1395823785.24209/File-Find-Rule-0.33/0040755000000000000000000000000012332704605017177 5ustar0000000000000000root/.cpanm/work/1395823785.24209/File-Find-Rule-0.33/META.yml01006441Ӏ-0000000112211635626623021652 0ustar 00000000000000--- #YAML:1.0 name: File-Find-Rule version: 0.33 abstract: ~ author: [] license: unknown distribution_type: module configure_requires: ExtUtils::MakeMaker: 0 build_requires: ExtUtils::MakeMaker: 0 requires: File::Find: 0 File::Spec: 0 Number::Compare: 0 Test::More: 0 Text::Glob: 0.07 no_index: directory: - t - inc generated_by: ExtUtils::MakeMaker version 6.57_05 meta-spec: url: http://module-build.sourceforge.net/META-spec-v1.4.html version: 1.4 root/.cpanm/work/1395823785.24209/Plack-1.0030/0040755000000000000000000000000012332704605015665 5ustar 00000000000000root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/0040755000000000000000000000000012332704605016575 5ustar 00000000000000root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/0040755000000000000000000000000012332704605017433 5ustar 00000000000000Plack::Middleware::LighttpdScriptNameFix.3pm0100644000000000000000000001400512314512430027460 0ustar 00000000000000root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3.\" Automatically generated by Pod::Man 2.27 (Pod::Simple 3.28) .\" .\" Standard preamble: .\" ======================================================================== .de Sp \" Vertical space (when we can't use .PP) .if t .sp .5v .if n .sp .. .de Vb \" Begin verbatim text .ft CW .nf .ne \\$1 .. .de Ve \" End verbatim text .ft R .fi .. .\" Set up some character translations and predefined strings. \*(-- will .\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left .\" double quote, and \*(R" will give a right double quote. \*(C+ will .\" give a nicer C++. Capital omega is used to do unbreakable dashes and .\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff, .\" nothing in troff, for use with C<>. .tr \(*W- .ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p' .ie n \{\ . ds -- \(*W- . ds PI pi . if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch . if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch . ds L" "" . ds R" "" . ds C` "" . ds C' "" 'br\} .el\{\ . ds -- \|\(em\| . ds PI \(*p . ds L" `` . ds R" '' . ds C` . ds C' 'br\} .\" .\" Escape single quotes in literal strings from groff's Unicode transform. .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" .\" If the F register is turned on, we'll generate index entries on stderr for .\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index .\" entries marked with X<> in POD. Of course, you'll have to process the .\" output yourself in some meaningful fashion. .\" .\" Avoid warning from groff about undefined register 'F'. .de IX .. .nr rF 0 .if \n(.g .if rF .nr rF 1 .if (\n(rF:(\n(.g==0)) \{ . if \nF \{ . de IX . tm Index:\\$1\t\\n%\t"\\$2" .. . if !\nF==2 \{ . nr % 0 . nr F 2 . \} . \} .\} .rr rF .\" .\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2). .\" Fear. Run. Save yourself. No user-serviceable parts. . \" fudge factors for nroff and troff .if n \{\ . ds #H 0 . ds #V .8m . ds #F .3m . ds #[ \f1 . ds #] \fP .\} .if t \{\ . ds #H ((1u-(\\\\n(.fu%2u))*.13m) . ds #V .6m . ds #F 0 . ds #[ \& . ds #] \& .\} . \" simple accents for nroff and troff .if n \{\ . ds ' \& . ds ` \& . ds ^ \& . ds , \& . ds ~ ~ . ds / .\} .if t \{\ . ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u" . ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u' . ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u' . ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u' . ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u' . ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u' .\} . \" troff and (daisy-wheel) nroff accents .ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V' .ds 8 \h'\*(#H'\(*b\h'-\*(#H' .ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#] .ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H' .ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u' .ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#] .ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#] .ds ae a\h'-(\w'a'u*4/10)'e .ds Ae A\h'-(\w'A'u*4/10)'E . \" corrections for vroff .if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u' .if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u' . \" for low resolution devices (crt and lpr) .if \n(.H>23 .if \n(.V>19 \ \{\ . ds : e . ds 8 ss . ds o a . ds d- d\h'-1'\(ga . ds D- D\h'-1'\(hy . ds th \o'bp' . ds Th \o'LP' . ds ae ae . ds Ae AE .\} .rm #[ #] #H #V #F C .\" ======================================================================== .\" .IX Title "Plack::Middleware::LighttpdScriptNameFix 3pm" .TH Plack::Middleware::LighttpdScriptNameFix 3pm "2013-11-23" "perl v5.18.2" "User Contributed Perl Documentation" .\" For nroff, turn off justification. Always turn off hyphenation; it makes .\" way too many mistakes in technical documents. .if n .ad l .nh .SH "NAME" Plack::Middleware::LighttpdScriptNameFix \- fixes wrong SCRIPT_NAME and PATH_INFO that lighttpd sets .SH "SYNOPSIS" .IX Header "SYNOPSIS" .Vb 2 \& # in your app.psgi \& use Plack::Builder; \& \& builder { \& enable "LighttpdScriptNameFix"; \& $app; \& }; \& \& # Or from the command line \& plackup \-s FCGI \-e \*(Aqenable "LighttpdScriptNameFix"\*(Aq /path/to/app.psgi .Ve .SH "DESCRIPTION" .IX Header "DESCRIPTION" This middleware fixes wrong \f(CW\*(C`SCRIPT_NAME\*(C'\fR and \f(CW\*(C`PATH_INFO\*(C'\fR set by lighttpd when you mount your app under the root path (\*(L"/\*(R"). If you use lighttpd 1.4.23 or later you can instead enable \f(CW\*(C`fix\-root\-scriptname\*(C'\fR flag inside \f(CW\*(C`fastcgi.server\*(C'\fR instead of using this middleware. .SH "CONFIGURATION" .IX Header "CONFIGURATION" .IP "script_name" 4 .IX Item "script_name" Even with \f(CW\*(C`fix\-root\-scriptname\*(C'\fR, lighttpd \fIstill\fR sets weird \&\f(CW\*(C`SCRIPT_NAME\*(C'\fR and \f(CW\*(C`PATH_INFO\*(C'\fR if you mount your application at \f(CW""\fR or something that ends with \f(CW\*(C`/\*(C'\fR. Setting \f(CW\*(C`script_name\*(C'\fR option tells the middleware how to reconstruct the new correct \f(CW\*(C`SCRIPT_NAME\*(C'\fR and \&\f(CW\*(C`PATH_INFO\*(C'\fR. .Sp If you mount the app under \f(CW\*(C`/something/\*(C'\fR, you should set: .Sp .Vb 1 \& enable "LighttpdScriptNameFix", script_name => "/something"; .Ve .Sp and when a request for \f(CW\*(C`/something/a/b?param=1\*(C'\fR comes, \f(CW\*(C`SCRIPT_NAME\*(C'\fR becomes \f(CW\*(C`/something\*(C'\fR and \f(CW\*(C`PATH_INFO\*(C'\fR becomes \f(CW\*(C`/a/b\*(C'\fR. .Sp \&\f(CW\*(C`script_name\*(C'\fR option is set to empty by default, which means all the request path is set to \f(CW\*(C`PATH_INFO\*(C'\fR and it behaves like your fastcgi application is mounted in the root path. .SH "AUTHORS" .IX Header "AUTHORS" Yury Zavarin .PP Tatsuhiko Miyagawa .SH "SEE ALSO" .IX Header "SEE ALSO" Plack::Handler::FCGI docker-1.10.3/pkg/archive/time_linux.go000066400000000000000000000004141267010174400177520ustar00rootroot00000000000000package archive import ( "syscall" "time" ) func timeToTimespec(time time.Time) (ts syscall.Timespec) { if time.IsZero() { // Return UTIME_OMIT special value ts.Sec = 0 ts.Nsec = ((1 << 30) - 2) return } return syscall.NsecToTimespec(time.UnixNano()) } docker-1.10.3/pkg/archive/time_unsupported.go000066400000000000000000000003501267010174400212020ustar00rootroot00000000000000// +build !linux package archive import ( "syscall" "time" ) func timeToTimespec(time time.Time) (ts syscall.Timespec) { nsec := int64(0) if !time.IsZero() { nsec = time.UnixNano() } return syscall.NsecToTimespec(nsec) } docker-1.10.3/pkg/archive/utils_test.go000066400000000000000000000112661267010174400200030ustar00rootroot00000000000000package archive import ( "archive/tar" "bytes" "fmt" "io" "io/ioutil" "os" "path/filepath" "time" ) var testUntarFns = map[string]func(string, io.Reader) error{ "untar": func(dest string, r io.Reader) error { return Untar(r, dest, nil) }, "applylayer": func(dest string, r io.Reader) error { _, err := ApplyLayer(dest, Reader(r)) return err }, } // testBreakout is a helper function that, within the provided `tmpdir` directory, // creates a `victim` folder with a generated `hello` file in it. // `untar` extracts to a directory named `dest`, the tar file created from `headers`. // // Here are the tested scenarios: // - removed `victim` folder (write) // - removed files from `victim` folder (write) // - new files in `victim` folder (write) // - modified files in `victim` folder (write) // - file in `dest` with same content as `victim/hello` (read) // // When using testBreakout make sure you cover one of the scenarios listed above. func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error { tmpdir, err := ioutil.TempDir("", tmpdir) if err != nil { return err } defer os.RemoveAll(tmpdir) dest := filepath.Join(tmpdir, "dest") if err := os.Mkdir(dest, 0755); err != nil { return err } victim := filepath.Join(tmpdir, "victim") if err := os.Mkdir(victim, 0755); err != nil { return err } hello := filepath.Join(victim, "hello") helloData, err := time.Now().MarshalText() if err != nil { return err } if err := ioutil.WriteFile(hello, helloData, 0644); err != nil { return err } helloStat, err := os.Stat(hello) if err != nil { return err } reader, writer := io.Pipe() go func() { t := tar.NewWriter(writer) for _, hdr := range headers { t.WriteHeader(hdr) } t.Close() }() untar := testUntarFns[untarFn] if untar == nil { return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn) } if err := untar(dest, reader); err != nil { if _, ok := err.(breakoutError); !ok { // If untar returns an error unrelated to an archive breakout, // then consider this an unexpected error and abort. return err } // Here, untar detected the breakout. // Let's move on verifying that indeed there was no breakout. fmt.Printf("breakoutError: %v\n", err) } // Check victim folder f, err := os.Open(victim) if err != nil { // codepath taken if victim folder was removed return fmt.Errorf("archive breakout: error reading %q: %v", victim, err) } defer f.Close() // Check contents of victim folder // // We are only interested in getting 2 files from the victim folder, because if all is well // we expect only one result, the `hello` file. If there is a second result, it cannot // hold the same name `hello` and we assume that a new file got created in the victim folder. // That is enough to detect an archive breakout. names, err := f.Readdirnames(2) if err != nil { // codepath taken if victim is not a folder return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err) } for _, name := range names { if name != "hello" { // codepath taken if new file was created in victim folder return fmt.Errorf("archive breakout: new file %q", name) } } // Check victim/hello f, err = os.Open(hello) if err != nil { // codepath taken if read permissions were removed return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err) } defer f.Close() b, err := ioutil.ReadAll(f) if err != nil { return err } fi, err := f.Stat() if err != nil { return err } if helloStat.IsDir() != fi.IsDir() || // TODO: cannot check for fi.ModTime() change helloStat.Mode() != fi.Mode() || helloStat.Size() != fi.Size() || !bytes.Equal(helloData, b) { // codepath taken if hello has been modified return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v", hello, helloData, b, helloStat, fi) } // Check that nothing in dest/ has the same content as victim/hello. // Since victim/hello was generated with time.Now(), it is safe to assume // that any file whose content matches exactly victim/hello, managed somehow // to access victim/hello. return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error { if info.IsDir() { if err != nil { // skip directory if error return filepath.SkipDir } // enter directory return nil } if err != nil { // skip file if error return nil } b, err := ioutil.ReadFile(path) if err != nil { // Houston, we have a problem. Aborting (space)walk. return err } if bytes.Equal(helloData, b) { return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path) } return nil }) } docker-1.10.3/pkg/archive/whiteouts.go000066400000000000000000000021141267010174400176270ustar00rootroot00000000000000package archive // Whiteouts are files with a special meaning for the layered filesystem. // Docker uses AUFS whiteout files inside exported archives. In other // filesystems these files are generated/handled on tar creation/extraction. // WhiteoutPrefix prefix means file is a whiteout. If this is followed by a // filename this means that file has been removed from the base layer. const WhiteoutPrefix = ".wh." // WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not // for removing an actual file. Normally these files are excluded from exported // archives. const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix // WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other // layers. Normally these should not go into exported archives and all changed // hardlinks should be copied to the top layer. const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" // WhiteoutOpaqueDir file means directory has been made opaque - meaning // readdir calls to this directory do not follow to lower layers. const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" docker-1.10.3/pkg/archive/wrap.go000066400000000000000000000026401267010174400165510ustar00rootroot00000000000000package archive import ( "archive/tar" "bytes" "io/ioutil" ) // Generate generates a new archive from the content provided // as input. // // `files` is a sequence of path/content pairs. A new file is // added to the archive for each pair. // If the last pair is incomplete, the file is created with an // empty content. For example: // // Generate("foo.txt", "hello world", "emptyfile") // // The above call will return an archive with 2 files: // * ./foo.txt with content "hello world" // * ./empty with empty content // // FIXME: stream content instead of buffering // FIXME: specify permissions and other archive metadata func Generate(input ...string) (Archive, error) { files := parseStringPairs(input...) buf := new(bytes.Buffer) tw := tar.NewWriter(buf) for _, file := range files { name, content := file[0], file[1] hdr := &tar.Header{ Name: name, Size: int64(len(content)), } if err := tw.WriteHeader(hdr); err != nil { return nil, err } if _, err := tw.Write([]byte(content)); err != nil { return nil, err } } if err := tw.Close(); err != nil { return nil, err } return ioutil.NopCloser(buf), nil } func parseStringPairs(input ...string) (output [][2]string) { output = make([][2]string, 0, len(input)/2+1) for i := 0; i < len(input); i += 2 { var pair [2]string pair[0] = input[i] if i+1 < len(input) { pair[1] = input[i+1] } output = append(output, pair) } return } docker-1.10.3/pkg/archive/wrap_test.go000066400000000000000000000042051267010174400176070ustar00rootroot00000000000000package archive import ( "archive/tar" "bytes" "io" "testing" ) func TestGenerateEmptyFile(t *testing.T) { archive, err := Generate("emptyFile") if err != nil { t.Fatal(err) } if archive == nil { t.Fatal("The generated archive should not be nil.") } expectedFiles := [][]string{ {"emptyFile", ""}, } tr := tar.NewReader(archive) actualFiles := make([][]string, 0, 10) i := 0 for { hdr, err := tr.Next() if err == io.EOF { break } if err != nil { t.Fatal(err) } buf := new(bytes.Buffer) buf.ReadFrom(tr) content := buf.String() actualFiles = append(actualFiles, []string{hdr.Name, content}) i++ } if len(actualFiles) != len(expectedFiles) { t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) } for i := 0; i < len(expectedFiles); i++ { actual := actualFiles[i] expected := expectedFiles[i] if actual[0] != expected[0] { t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) } if actual[1] != expected[1] { t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) } } } func TestGenerateWithContent(t *testing.T) { archive, err := Generate("file", "content") if err != nil { t.Fatal(err) } if archive == nil { t.Fatal("The generated archive should not be nil.") } expectedFiles := [][]string{ {"file", "content"}, } tr := tar.NewReader(archive) actualFiles := make([][]string, 0, 10) i := 0 for { hdr, err := tr.Next() if err == io.EOF { break } if err != nil { t.Fatal(err) } buf := new(bytes.Buffer) buf.ReadFrom(tr) content := buf.String() actualFiles = append(actualFiles, []string{hdr.Name, content}) i++ } if len(actualFiles) != len(expectedFiles) { t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) } for i := 0; i < len(expectedFiles); i++ { actual := actualFiles[i] expected := expectedFiles[i] if actual[0] != expected[0] { t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) } if actual[1] != expected[1] { t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) } } } docker-1.10.3/pkg/authorization/000077500000000000000000000000001267010174400165265ustar00rootroot00000000000000docker-1.10.3/pkg/authorization/api.go000066400000000000000000000035261267010174400176340ustar00rootroot00000000000000package authorization const ( // AuthZApiRequest is the url for daemon request authorization AuthZApiRequest = "AuthZPlugin.AuthZReq" // AuthZApiResponse is the url for daemon response authorization AuthZApiResponse = "AuthZPlugin.AuthZRes" // AuthZApiImplements is the name of the interface all AuthZ plugins implement AuthZApiImplements = "authz" ) // Request holds data required for authZ plugins type Request struct { // User holds the user extracted by AuthN mechanism User string `json:"User,omitempty"` // UserAuthNMethod holds the mechanism used to extract user details (e.g., krb) UserAuthNMethod string `json:"UserAuthNMethod,omitempty"` // RequestMethod holds the HTTP method (GET/POST/PUT) RequestMethod string `json:"RequestMethod,omitempty"` // RequestUri holds the full HTTP uri (e.g., /v1.21/version) RequestURI string `json:"RequestUri,omitempty"` // RequestBody stores the raw request body sent to the docker daemon RequestBody []byte `json:"RequestBody,omitempty"` // RequestHeaders stores the raw request headers sent to the docker daemon RequestHeaders map[string]string `json:"RequestHeaders,omitempty"` // ResponseStatusCode stores the status code returned from docker daemon ResponseStatusCode int `json:"ResponseStatusCode,omitempty"` // ResponseBody stores the raw response body sent from docker daemon ResponseBody []byte `json:"ResponseBody,omitempty"` // ResponseHeaders stores the response headers sent to the docker daemon ResponseHeaders map[string]string `json:"ResponseHeaders,omitempty"` } // Response represents authZ plugin response type Response struct { // Allow indicating whether the user is allowed or not Allow bool `json:"Allow"` // Msg stores the authorization message Msg string `json:"Msg,omitempty"` // Err stores a message in case there's an error Err string `json:"Err,omitempty"` } docker-1.10.3/pkg/authorization/authz.go000066400000000000000000000115231267010174400202120ustar00rootroot00000000000000package authorization import ( "bufio" "bytes" "fmt" "io" "net/http" "strings" "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/ioutils" ) const maxBodySize = 1048576 // 1MB // NewCtx creates new authZ context, it is used to store authorization information related to a specific docker // REST http session // A context provides two method: // Authenticate Request: // Call authZ plugins with current REST request and AuthN response // Request contains full HTTP packet sent to the docker daemon // https://docs.docker.com/reference/api/docker_remote_api/ // // Authenticate Response: // Call authZ plugins with full info about current REST request, REST response and AuthN response // The response from this method may contains content that overrides the daemon response // This allows authZ plugins to filter privileged content // // If multiple authZ plugins are specified, the block/allow decision is based on ANDing all plugin results // For response manipulation, the response from each plugin is piped between plugins. Plugin execution order // is determined according to daemon parameters func NewCtx(authZPlugins []Plugin, user, userAuthNMethod, requestMethod, requestURI string) *Ctx { return &Ctx{ plugins: authZPlugins, user: user, userAuthNMethod: userAuthNMethod, requestMethod: requestMethod, requestURI: requestURI, } } // Ctx stores a a single request-response interaction context type Ctx struct { user string userAuthNMethod string requestMethod string requestURI string plugins []Plugin // authReq stores the cached request object for the current transaction authReq *Request } // AuthZRequest authorized the request to the docker daemon using authZ plugins func (ctx *Ctx) AuthZRequest(w http.ResponseWriter, r *http.Request) error { var body []byte if sendBody(ctx.requestURI, r.Header) { if r.ContentLength < maxBodySize { var err error body, r.Body, err = drainBody(r.Body) if err != nil { return err } } } var h bytes.Buffer if err := r.Header.Write(&h); err != nil { return err } ctx.authReq = &Request{ User: ctx.user, UserAuthNMethod: ctx.userAuthNMethod, RequestMethod: ctx.requestMethod, RequestURI: ctx.requestURI, RequestBody: body, RequestHeaders: headers(r.Header), } for _, plugin := range ctx.plugins { logrus.Debugf("AuthZ request using plugin %s", plugin.Name()) authRes, err := plugin.AuthZRequest(ctx.authReq) if err != nil { return fmt.Errorf("plugin %s failed with error: %s", plugin.Name(), err) } if !authRes.Allow { return fmt.Errorf("authorization denied by plugin %s: %s", plugin.Name(), authRes.Msg) } } return nil } // AuthZResponse authorized and manipulates the response from docker daemon using authZ plugins func (ctx *Ctx) AuthZResponse(rm ResponseModifier, r *http.Request) error { ctx.authReq.ResponseStatusCode = rm.StatusCode() ctx.authReq.ResponseHeaders = headers(rm.Header()) if sendBody(ctx.requestURI, rm.Header()) { ctx.authReq.ResponseBody = rm.RawBody() } for _, plugin := range ctx.plugins { logrus.Debugf("AuthZ response using plugin %s", plugin.Name()) authRes, err := plugin.AuthZResponse(ctx.authReq) if err != nil { return fmt.Errorf("plugin %s failed with error: %s", plugin.Name(), err) } if !authRes.Allow { return fmt.Errorf("authorization denied by plugin %s: %s", plugin.Name(), authRes.Msg) } } rm.FlushAll() return nil } // drainBody dump the body, it reads the body data into memory and // see go sources /go/src/net/http/httputil/dump.go func drainBody(body io.ReadCloser) ([]byte, io.ReadCloser, error) { bufReader := bufio.NewReaderSize(body, maxBodySize) newBody := ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() }) data, err := bufReader.Peek(maxBodySize) if err != io.EOF { // This means the request is larger than our max if err == bufio.ErrBufferFull { return nil, newBody, nil } // This means we had an error reading return nil, nil, err } return data, newBody, nil } // sendBody returns true when request/response body should be sent to AuthZPlugin func sendBody(url string, header http.Header) bool { // Skip body for auth endpoint if strings.HasSuffix(url, "/auth") { return false } // body is sent only for text or json messages v := header.Get("Content-Type") return strings.HasPrefix(v, "text/") || v == "application/json" } // headers returns flatten version of the http headers excluding authorization func headers(header http.Header) map[string]string { v := make(map[string]string, 0) for k, values := range header { // Skip authorization headers if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "X-Registry-Config") || strings.EqualFold(k, "X-Registry-Auth") { continue } for _, val := range values { v[k] = val } } return v } docker-1.10.3/pkg/authorization/authz_test.go000066400000000000000000000135341267010174400212550ustar00rootroot00000000000000package authorization import ( "encoding/json" "io/ioutil" "log" "net" "net/http" "net/http/httptest" "os" "path" "reflect" "testing" "github.com/docker/docker/pkg/plugins" "github.com/docker/go-connections/tlsconfig" "github.com/gorilla/mux" ) const pluginAddress = "authzplugin.sock" func TestAuthZRequestPluginError(t *testing.T) { server := authZPluginTestServer{t: t} go server.start() defer server.stop() authZPlugin := createTestPlugin(t) request := Request{ User: "user", RequestBody: []byte("sample body"), RequestURI: "www.authz.com", RequestMethod: "GET", RequestHeaders: map[string]string{"header": "value"}, } server.replayResponse = Response{ Err: "an error", } actualResponse, err := authZPlugin.AuthZRequest(&request) if err != nil { t.Fatalf("Failed to authorize request %v", err) } if !reflect.DeepEqual(server.replayResponse, *actualResponse) { t.Fatalf("Response must be equal") } if !reflect.DeepEqual(request, server.recordedRequest) { t.Fatalf("Requests must be equal") } } func TestAuthZRequestPlugin(t *testing.T) { server := authZPluginTestServer{t: t} go server.start() defer server.stop() authZPlugin := createTestPlugin(t) request := Request{ User: "user", RequestBody: []byte("sample body"), RequestURI: "www.authz.com", RequestMethod: "GET", RequestHeaders: map[string]string{"header": "value"}, } server.replayResponse = Response{ Allow: true, Msg: "Sample message", } actualResponse, err := authZPlugin.AuthZRequest(&request) if err != nil { t.Fatalf("Failed to authorize request %v", err) } if !reflect.DeepEqual(server.replayResponse, *actualResponse) { t.Fatalf("Response must be equal") } if !reflect.DeepEqual(request, server.recordedRequest) { t.Fatalf("Requests must be equal") } } func TestAuthZResponsePlugin(t *testing.T) { server := authZPluginTestServer{t: t} go server.start() defer server.stop() authZPlugin := createTestPlugin(t) request := Request{ User: "user", RequestBody: []byte("sample body"), } server.replayResponse = Response{ Allow: true, Msg: "Sample message", } actualResponse, err := authZPlugin.AuthZResponse(&request) if err != nil { t.Fatalf("Failed to authorize request %v", err) } if !reflect.DeepEqual(server.replayResponse, *actualResponse) { t.Fatalf("Response must be equal") } if !reflect.DeepEqual(request, server.recordedRequest) { t.Fatalf("Requests must be equal") } } func TestResponseModifier(t *testing.T) { r := httptest.NewRecorder() m := NewResponseModifier(r) m.Header().Set("h1", "v1") m.Write([]byte("body")) m.WriteHeader(500) m.FlushAll() if r.Header().Get("h1") != "v1" { t.Fatalf("Header value must exists %s", r.Header().Get("h1")) } if !reflect.DeepEqual(r.Body.Bytes(), []byte("body")) { t.Fatalf("Body value must exists %s", r.Body.Bytes()) } if r.Code != 500 { t.Fatalf("Status code must be correct %d", r.Code) } } func TestResponseModifierOverride(t *testing.T) { r := httptest.NewRecorder() m := NewResponseModifier(r) m.Header().Set("h1", "v1") m.Write([]byte("body")) m.WriteHeader(500) overrideHeader := make(http.Header) overrideHeader.Add("h1", "v2") overrideHeaderBytes, err := json.Marshal(overrideHeader) if err != nil { t.Fatalf("override header failed %v", err) } m.OverrideHeader(overrideHeaderBytes) m.OverrideBody([]byte("override body")) m.OverrideStatusCode(404) m.FlushAll() if r.Header().Get("h1") != "v2" { t.Fatalf("Header value must exists %s", r.Header().Get("h1")) } if !reflect.DeepEqual(r.Body.Bytes(), []byte("override body")) { t.Fatalf("Body value must exists %s", r.Body.Bytes()) } if r.Code != 404 { t.Fatalf("Status code must be correct %d", r.Code) } } // createTestPlugin creates a new sample authorization plugin func createTestPlugin(t *testing.T) *authorizationPlugin { plugin := &plugins.Plugin{Name: "authz"} pwd, err := os.Getwd() if err != nil { log.Fatal(err) } plugin.Client, err = plugins.NewClient("unix:///"+path.Join(pwd, pluginAddress), tlsconfig.Options{InsecureSkipVerify: true}) if err != nil { t.Fatalf("Failed to create client %v", err) } return &authorizationPlugin{name: "plugin", plugin: plugin} } // AuthZPluginTestServer is a simple server that implements the authZ plugin interface type authZPluginTestServer struct { listener net.Listener t *testing.T // request stores the request sent from the daemon to the plugin recordedRequest Request // response stores the response sent from the plugin to the daemon replayResponse Response } // start starts the test server that implements the plugin func (t *authZPluginTestServer) start() { r := mux.NewRouter() os.Remove(pluginAddress) l, err := net.ListenUnix("unix", &net.UnixAddr{Name: pluginAddress, Net: "unix"}) if err != nil { t.t.Fatalf("Failed to listen %v", err) } t.listener = l r.HandleFunc("/Plugin.Activate", t.activate) r.HandleFunc("/"+AuthZApiRequest, t.auth) r.HandleFunc("/"+AuthZApiResponse, t.auth) t.listener, err = net.Listen("tcp", pluginAddress) server := http.Server{Handler: r, Addr: pluginAddress} server.Serve(l) } // stop stops the test server that implements the plugin func (t *authZPluginTestServer) stop() { os.Remove(pluginAddress) if t.listener != nil { t.listener.Close() } } // auth is a used to record/replay the authentication api messages func (t *authZPluginTestServer) auth(w http.ResponseWriter, r *http.Request) { t.recordedRequest = Request{} defer r.Body.Close() body, err := ioutil.ReadAll(r.Body) json.Unmarshal(body, &t.recordedRequest) b, err := json.Marshal(t.replayResponse) if err != nil { log.Fatal(err) } w.Write(b) } func (t *authZPluginTestServer) activate(w http.ResponseWriter, r *http.Request) { b, err := json.Marshal(plugins.Manifest{Implements: []string{AuthZApiImplements}}) if err != nil { log.Fatal(err) } w.Write(b) } docker-1.10.3/pkg/authorization/plugin.go000066400000000000000000000040201267010174400203470ustar00rootroot00000000000000package authorization import "github.com/docker/docker/pkg/plugins" // Plugin allows third party plugins to authorize requests and responses // in the context of docker API type Plugin interface { // Name returns the registered plugin name Name() string // AuthZRequest authorize the request from the client to the daemon AuthZRequest(*Request) (*Response, error) // AuthZResponse authorize the response from the daemon to the client AuthZResponse(*Request) (*Response, error) } // NewPlugins constructs and initialize the authorization plugins based on plugin names func NewPlugins(names []string) []Plugin { plugins := []Plugin{} pluginsMap := make(map[string]struct{}) for _, name := range names { if _, ok := pluginsMap[name]; ok { continue } pluginsMap[name] = struct{}{} plugins = append(plugins, newAuthorizationPlugin(name)) } return plugins } // authorizationPlugin is an internal adapter to docker plugin system type authorizationPlugin struct { plugin *plugins.Plugin name string } func newAuthorizationPlugin(name string) Plugin { return &authorizationPlugin{name: name} } func (a *authorizationPlugin) Name() string { return a.name } func (a *authorizationPlugin) AuthZRequest(authReq *Request) (*Response, error) { if err := a.initPlugin(); err != nil { return nil, err } authRes := &Response{} if err := a.plugin.Client.Call(AuthZApiRequest, authReq, authRes); err != nil { return nil, err } return authRes, nil } func (a *authorizationPlugin) AuthZResponse(authReq *Request) (*Response, error) { if err := a.initPlugin(); err != nil { return nil, err } authRes := &Response{} if err := a.plugin.Client.Call(AuthZApiResponse, authReq, authRes); err != nil { return nil, err } return authRes, nil } // initPlugin initialize the authorization plugin if needed func (a *authorizationPlugin) initPlugin() error { // Lazy loading of plugins if a.plugin == nil { var err error a.plugin, err = plugins.Get(a.name, AuthZApiImplements) if err != nil { return err } } return nil } docker-1.10.3/pkg/authorization/response.go000066400000000000000000000111621267010174400207140ustar00rootroot00000000000000package authorization import ( "bufio" "bytes" "encoding/json" "fmt" "github.com/Sirupsen/logrus" "net" "net/http" ) // ResponseModifier allows authorization plugins to read and modify the content of the http.response type ResponseModifier interface { http.ResponseWriter http.Flusher http.CloseNotifier // RawBody returns the current http content RawBody() []byte // RawHeaders returns the current content of the http headers RawHeaders() ([]byte, error) // StatusCode returns the current status code StatusCode() int // OverrideBody replace the body of the HTTP reply OverrideBody(b []byte) // OverrideHeader replace the headers of the HTTP reply OverrideHeader(b []byte) error // OverrideStatusCode replaces the status code of the HTTP reply OverrideStatusCode(statusCode int) // Flush flushes all data to the HTTP response FlushAll() error // Hijacked indicates the response has been hijacked by the Docker daemon Hijacked() bool } // NewResponseModifier creates a wrapper to an http.ResponseWriter to allow inspecting and modifying the content func NewResponseModifier(rw http.ResponseWriter) ResponseModifier { return &responseModifier{rw: rw, header: make(http.Header)} } // responseModifier is used as an adapter to http.ResponseWriter in order to manipulate and explore // the http request/response from docker daemon type responseModifier struct { // The original response writer rw http.ResponseWriter r *http.Request status int // body holds the response body body []byte // header holds the response header header http.Header // statusCode holds the response status code statusCode int // hijacked indicates the request has been hijacked hijacked bool } func (rm *responseModifier) Hijacked() bool { return rm.hijacked } // WriterHeader stores the http status code func (rm *responseModifier) WriteHeader(s int) { // Use original request if hijacked if rm.hijacked { rm.rw.WriteHeader(s) return } rm.statusCode = s } // Header returns the internal http header func (rm *responseModifier) Header() http.Header { // Use original header if hijacked if rm.hijacked { return rm.rw.Header() } return rm.header } // Header returns the internal http header func (rm *responseModifier) StatusCode() int { return rm.statusCode } // Override replace the body of the HTTP reply func (rm *responseModifier) OverrideBody(b []byte) { rm.body = b } func (rm *responseModifier) OverrideStatusCode(statusCode int) { rm.statusCode = statusCode } // Override replace the headers of the HTTP reply func (rm *responseModifier) OverrideHeader(b []byte) error { header := http.Header{} if err := json.Unmarshal(b, &header); err != nil { return err } rm.header = header return nil } // Write stores the byte array inside content func (rm *responseModifier) Write(b []byte) (int, error) { if rm.hijacked { return rm.rw.Write(b) } rm.body = append(rm.body, b...) return len(b), nil } // Body returns the response body func (rm *responseModifier) RawBody() []byte { return rm.body } func (rm *responseModifier) RawHeaders() ([]byte, error) { var b bytes.Buffer if err := rm.header.Write(&b); err != nil { return nil, err } return b.Bytes(), nil } // Hijack returns the internal connection of the wrapped http.ResponseWriter func (rm *responseModifier) Hijack() (net.Conn, *bufio.ReadWriter, error) { rm.hijacked = true rm.FlushAll() hijacker, ok := rm.rw.(http.Hijacker) if !ok { return nil, nil, fmt.Errorf("Internal reponse writer doesn't support the Hijacker interface") } return hijacker.Hijack() } // CloseNotify uses the internal close notify API of the wrapped http.ResponseWriter func (rm *responseModifier) CloseNotify() <-chan bool { closeNotifier, ok := rm.rw.(http.CloseNotifier) if !ok { logrus.Errorf("Internal reponse writer doesn't support the CloseNotifier interface") return nil } return closeNotifier.CloseNotify() } // Flush uses the internal flush API of the wrapped http.ResponseWriter func (rm *responseModifier) Flush() { flusher, ok := rm.rw.(http.Flusher) if !ok { logrus.Errorf("Internal reponse writer doesn't support the Flusher interface") return } rm.FlushAll() flusher.Flush() } // FlushAll flushes all data to the HTTP response func (rm *responseModifier) FlushAll() error { // Copy the status code if rm.statusCode > 0 { rm.rw.WriteHeader(rm.statusCode) } // Copy the header for k, vv := range rm.header { for _, v := range vv { rm.rw.Header().Add(k, v) } } var err error if len(rm.body) > 0 { // Write body _, err = rm.rw.Write(rm.body) } // Clean previous data rm.body = nil rm.statusCode = 0 rm.header = http.Header{} return err } docker-1.10.3/pkg/broadcaster/000077500000000000000000000000001267010174400161175ustar00rootroot00000000000000docker-1.10.3/pkg/broadcaster/unbuffered.go000066400000000000000000000020141267010174400205700ustar00rootroot00000000000000package broadcaster import ( "io" "sync" ) // Unbuffered accumulates multiple io.WriteCloser by stream. type Unbuffered struct { mu sync.Mutex writers []io.WriteCloser } // Add adds new io.WriteCloser. func (w *Unbuffered) Add(writer io.WriteCloser) { w.mu.Lock() w.writers = append(w.writers, writer) w.mu.Unlock() } // Write writes bytes to all writers. Failed writers will be evicted during // this call. func (w *Unbuffered) Write(p []byte) (n int, err error) { w.mu.Lock() var evict []int for i, sw := range w.writers { if n, err := sw.Write(p); err != nil || n != len(p) { // On error, evict the writer evict = append(evict, i) } } for n, i := range evict { w.writers = append(w.writers[:i-n], w.writers[i-n+1:]...) } w.mu.Unlock() return len(p), nil } // Clean closes and removes all writers. Last non-eol-terminated part of data // will be saved. func (w *Unbuffered) Clean() error { w.mu.Lock() for _, sw := range w.writers { sw.Close() } w.writers = nil w.mu.Unlock() return nil } docker-1.10.3/pkg/broadcaster/unbuffered_test.go000066400000000000000000000070341267010174400216360ustar00rootroot00000000000000package broadcaster import ( "bytes" "errors" "strings" "testing" ) type dummyWriter struct { buffer bytes.Buffer failOnWrite bool } func (dw *dummyWriter) Write(p []byte) (n int, err error) { if dw.failOnWrite { return 0, errors.New("Fake fail") } return dw.buffer.Write(p) } func (dw *dummyWriter) String() string { return dw.buffer.String() } func (dw *dummyWriter) Close() error { return nil } func TestUnbuffered(t *testing.T) { writer := new(Unbuffered) // Test 1: Both bufferA and bufferB should contain "foo" bufferA := &dummyWriter{} writer.Add(bufferA) bufferB := &dummyWriter{} writer.Add(bufferB) writer.Write([]byte("foo")) if bufferA.String() != "foo" { t.Errorf("Buffer contains %v", bufferA.String()) } if bufferB.String() != "foo" { t.Errorf("Buffer contains %v", bufferB.String()) } // Test2: bufferA and bufferB should contain "foobar", // while bufferC should only contain "bar" bufferC := &dummyWriter{} writer.Add(bufferC) writer.Write([]byte("bar")) if bufferA.String() != "foobar" { t.Errorf("Buffer contains %v", bufferA.String()) } if bufferB.String() != "foobar" { t.Errorf("Buffer contains %v", bufferB.String()) } if bufferC.String() != "bar" { t.Errorf("Buffer contains %v", bufferC.String()) } // Test3: Test eviction on failure bufferA.failOnWrite = true writer.Write([]byte("fail")) if bufferA.String() != "foobar" { t.Errorf("Buffer contains %v", bufferA.String()) } if bufferC.String() != "barfail" { t.Errorf("Buffer contains %v", bufferC.String()) } // Even though we reset the flag, no more writes should go in there bufferA.failOnWrite = false writer.Write([]byte("test")) if bufferA.String() != "foobar" { t.Errorf("Buffer contains %v", bufferA.String()) } if bufferC.String() != "barfailtest" { t.Errorf("Buffer contains %v", bufferC.String()) } // Test4: Test eviction on multiple simultaneous failures bufferB.failOnWrite = true bufferC.failOnWrite = true bufferD := &dummyWriter{} writer.Add(bufferD) writer.Write([]byte("yo")) writer.Write([]byte("ink")) if strings.Contains(bufferB.String(), "yoink") { t.Errorf("bufferB received write. contents: %q", bufferB) } if strings.Contains(bufferC.String(), "yoink") { t.Errorf("bufferC received write. contents: %q", bufferC) } if g, w := bufferD.String(), "yoink"; g != w { t.Errorf("bufferD = %q, want %q", g, w) } writer.Clean() } type devNullCloser int func (d devNullCloser) Close() error { return nil } func (d devNullCloser) Write(buf []byte) (int, error) { return len(buf), nil } // This test checks for races. It is only useful when run with the race detector. func TestRaceUnbuffered(t *testing.T) { writer := new(Unbuffered) c := make(chan bool) go func() { writer.Add(devNullCloser(0)) c <- true }() writer.Write([]byte("hello")) <-c } func BenchmarkUnbuffered(b *testing.B) { writer := new(Unbuffered) setUpWriter := func() { for i := 0; i < 100; i++ { writer.Add(devNullCloser(0)) writer.Add(devNullCloser(0)) writer.Add(devNullCloser(0)) } } testLine := "Line that thinks that it is log line from docker" var buf bytes.Buffer for i := 0; i < 100; i++ { buf.Write([]byte(testLine + "\n")) } // line without eol buf.Write([]byte(testLine)) testText := buf.Bytes() b.SetBytes(int64(5 * len(testText))) b.ResetTimer() for i := 0; i < b.N; i++ { b.StopTimer() setUpWriter() b.StartTimer() for j := 0; j < 5; j++ { if _, err := writer.Write(testText); err != nil { b.Fatal(err) } } b.StopTimer() writer.Clean() b.StartTimer() } } docker-1.10.3/pkg/chrootarchive/000077500000000000000000000000001267010174400164665ustar00rootroot00000000000000docker-1.10.3/pkg/chrootarchive/archive.go000066400000000000000000000057361267010174400204510ustar00rootroot00000000000000package chrootarchive import ( "fmt" "io" "io/ioutil" "os" "path/filepath" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/idtools" ) var chrootArchiver = &archive.Archiver{Untar: Untar} // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { return untarHandler(tarArchive, dest, options, true) } // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive must be an uncompressed stream. func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { return untarHandler(tarArchive, dest, options, false) } // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } if options == nil { options = &archive.TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) if err != nil { return err } dest = filepath.Clean(dest) if _, err := os.Stat(dest); os.IsNotExist(err) { if err := idtools.MkdirAllNewAs(dest, 0755, rootUID, rootGID); err != nil { return err } } r := ioutil.NopCloser(tarArchive) if decompress { decompressedArchive, err := archive.DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() r = decompressedArchive } return invokeUnpack(r, dest, options) } // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. // If either Tar or Untar fails, TarUntar aborts and returns the error. func TarUntar(src, dst string) error { return chrootArchiver.TarUntar(src, dst) } // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no // intermediary disk IO. func CopyWithTar(src, dst string) error { return chrootArchiver.CopyWithTar(src, dst) } // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. // // If `dst` ends with a trailing slash '/' ('\' on Windows), the final // destination path will be `dst/base(src)` or `dst\base(src)` func CopyFileWithTar(src, dst string) (err error) { return chrootArchiver.CopyFileWithTar(src, dst) } // UntarPath is a convenience function which looks for an archive // at filesystem path `src`, and unpacks it at `dst`. func UntarPath(src, dst string) error { return chrootArchiver.UntarPath(src, dst) } docker-1.10.3/pkg/chrootarchive/archive_test.go000066400000000000000000000230731267010174400215020ustar00rootroot00000000000000package chrootarchive import ( "bytes" "fmt" "hash/crc32" "io" "io/ioutil" "os" "path/filepath" "strings" "testing" "time" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/reexec" "github.com/docker/docker/pkg/system" ) func init() { reexec.Init() } func TestChrootTarUntar(t *testing.T) { tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntar") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") if err := system.MkdirAll(src, 0700); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(src, "lolo"), []byte("hello lolo"), 0644); err != nil { t.Fatal(err) } stream, err := archive.Tar(src, archive.Uncompressed) if err != nil { t.Fatal(err) } dest := filepath.Join(tmpdir, "src") if err := system.MkdirAll(dest, 0700); err != nil { t.Fatal(err) } if err := Untar(stream, dest, &archive.TarOptions{ExcludePatterns: []string{"lolo"}}); err != nil { t.Fatal(err) } } // gh#10426: Verify the fix for having a huge excludes list (like on `docker load` with large # of // local images) func TestChrootUntarWithHugeExcludesList(t *testing.T) { tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarHugeExcludes") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") if err := system.MkdirAll(src, 0700); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil { t.Fatal(err) } stream, err := archive.Tar(src, archive.Uncompressed) if err != nil { t.Fatal(err) } dest := filepath.Join(tmpdir, "dest") if err := system.MkdirAll(dest, 0700); err != nil { t.Fatal(err) } options := &archive.TarOptions{} //65534 entries of 64-byte strings ~= 4MB of environment space which should overflow //on most systems when passed via environment or command line arguments excludes := make([]string, 65534, 65534) for i := 0; i < 65534; i++ { excludes[i] = strings.Repeat(string(i), 64) } options.ExcludePatterns = excludes if err := Untar(stream, dest, options); err != nil { t.Fatal(err) } } func TestChrootUntarEmptyArchive(t *testing.T) { tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchive") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) if err := Untar(nil, tmpdir, nil); err == nil { t.Fatal("expected error on empty archive") } } func prepareSourceDirectory(numberOfFiles int, targetPath string, makeSymLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := ioutil.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { return 0, err } if makeSymLinks { if err := os.Symlink(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { return 0, err } } } totalSize := numberOfFiles * len(fileData) return totalSize, nil } func getHash(filename string) (uint32, error) { stream, err := ioutil.ReadFile(filename) if err != nil { return 0, err } hash := crc32.NewIEEE() hash.Write(stream) return hash.Sum32(), nil } func compareDirectories(src string, dest string) error { changes, err := archive.ChangesDirs(dest, src) if err != nil { return err } if len(changes) > 0 { return fmt.Errorf("Unexpected differences after untar: %v", changes) } return nil } func compareFiles(src string, dest string) error { srcHash, err := getHash(src) if err != nil { return err } destHash, err := getHash(dest) if err != nil { return err } if srcHash != destHash { return fmt.Errorf("%s is different from %s", src, dest) } return nil } func TestChrootTarUntarWithSymlink(t *testing.T) { tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntarWithSymlink") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") if err := system.MkdirAll(src, 0700); err != nil { t.Fatal(err) } if _, err := prepareSourceDirectory(10, src, true); err != nil { t.Fatal(err) } dest := filepath.Join(tmpdir, "dest") if err := TarUntar(src, dest); err != nil { t.Fatal(err) } if err := compareDirectories(src, dest); err != nil { t.Fatal(err) } } func TestChrootCopyWithTar(t *testing.T) { tmpdir, err := ioutil.TempDir("", "docker-TestChrootCopyWithTar") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") if err := system.MkdirAll(src, 0700); err != nil { t.Fatal(err) } if _, err := prepareSourceDirectory(10, src, true); err != nil { t.Fatal(err) } // Copy directory dest := filepath.Join(tmpdir, "dest") if err := CopyWithTar(src, dest); err != nil { t.Fatal(err) } if err := compareDirectories(src, dest); err != nil { t.Fatal(err) } // Copy file srcfile := filepath.Join(src, "file-1") dest = filepath.Join(tmpdir, "destFile") destfile := filepath.Join(dest, "file-1") if err := CopyWithTar(srcfile, destfile); err != nil { t.Fatal(err) } if err := compareFiles(srcfile, destfile); err != nil { t.Fatal(err) } // Copy symbolic link srcLinkfile := filepath.Join(src, "file-1-link") dest = filepath.Join(tmpdir, "destSymlink") destLinkfile := filepath.Join(dest, "file-1-link") if err := CopyWithTar(srcLinkfile, destLinkfile); err != nil { t.Fatal(err) } if err := compareFiles(srcLinkfile, destLinkfile); err != nil { t.Fatal(err) } } func TestChrootCopyFileWithTar(t *testing.T) { tmpdir, err := ioutil.TempDir("", "docker-TestChrootCopyFileWithTar") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") if err := system.MkdirAll(src, 0700); err != nil { t.Fatal(err) } if _, err := prepareSourceDirectory(10, src, true); err != nil { t.Fatal(err) } // Copy directory dest := filepath.Join(tmpdir, "dest") if err := CopyFileWithTar(src, dest); err == nil { t.Fatal("Expected error on copying directory") } // Copy file srcfile := filepath.Join(src, "file-1") dest = filepath.Join(tmpdir, "destFile") destfile := filepath.Join(dest, "file-1") if err := CopyFileWithTar(srcfile, destfile); err != nil { t.Fatal(err) } if err := compareFiles(srcfile, destfile); err != nil { t.Fatal(err) } // Copy symbolic link srcLinkfile := filepath.Join(src, "file-1-link") dest = filepath.Join(tmpdir, "destSymlink") destLinkfile := filepath.Join(dest, "file-1-link") if err := CopyFileWithTar(srcLinkfile, destLinkfile); err != nil { t.Fatal(err) } if err := compareFiles(srcLinkfile, destLinkfile); err != nil { t.Fatal(err) } } func TestChrootUntarPath(t *testing.T) { tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarPath") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") if err := system.MkdirAll(src, 0700); err != nil { t.Fatal(err) } if _, err := prepareSourceDirectory(10, src, true); err != nil { t.Fatal(err) } dest := filepath.Join(tmpdir, "dest") // Untar a directory if err := UntarPath(src, dest); err == nil { t.Fatal("Expected error on untaring a directory") } // Untar a tar file stream, err := archive.Tar(src, archive.Uncompressed) if err != nil { t.Fatal(err) } buf := new(bytes.Buffer) buf.ReadFrom(stream) tarfile := filepath.Join(tmpdir, "src.tar") if err := ioutil.WriteFile(tarfile, buf.Bytes(), 0644); err != nil { t.Fatal(err) } if err := UntarPath(tarfile, dest); err != nil { t.Fatal(err) } if err := compareDirectories(src, dest); err != nil { t.Fatal(err) } } type slowEmptyTarReader struct { size int offset int chunkSize int } // Read is a slow reader of an empty tar (like the output of "tar c --files-from /dev/null") func (s *slowEmptyTarReader) Read(p []byte) (int, error) { time.Sleep(100 * time.Millisecond) count := s.chunkSize if len(p) < s.chunkSize { count = len(p) } for i := 0; i < count; i++ { p[i] = 0 } s.offset += count if s.offset > s.size { return count, io.EOF } return count, nil } func TestChrootUntarEmptyArchiveFromSlowReader(t *testing.T) { tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchiveFromSlowReader") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) dest := filepath.Join(tmpdir, "dest") if err := system.MkdirAll(dest, 0700); err != nil { t.Fatal(err) } stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} if err := Untar(stream, dest, nil); err != nil { t.Fatal(err) } } func TestChrootApplyEmptyArchiveFromSlowReader(t *testing.T) { tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyEmptyArchiveFromSlowReader") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) dest := filepath.Join(tmpdir, "dest") if err := system.MkdirAll(dest, 0700); err != nil { t.Fatal(err) } stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} if _, err := ApplyLayer(dest, stream); err != nil { t.Fatal(err) } } func TestChrootApplyDotDotFile(t *testing.T) { tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyDotDotFile") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") if err := system.MkdirAll(src, 0700); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(src, "..gitme"), []byte(""), 0644); err != nil { t.Fatal(err) } stream, err := archive.Tar(src, archive.Uncompressed) if err != nil { t.Fatal(err) } dest := filepath.Join(tmpdir, "dest") if err := system.MkdirAll(dest, 0700); err != nil { t.Fatal(err) } if _, err := ApplyLayer(dest, stream); err != nil { t.Fatal(err) } } docker-1.10.3/pkg/chrootarchive/archive_unix.go000066400000000000000000000044131267010174400215030ustar00rootroot00000000000000// +build !windows package chrootarchive import ( "bytes" "encoding/json" "flag" "fmt" "io" "io/ioutil" "os" "runtime" "syscall" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/reexec" ) func chroot(path string) error { if err := syscall.Chroot(path); err != nil { return err } return syscall.Chdir("/") } // untar is the entry-point for docker-untar on re-exec. This is not used on // Windows as it does not support chroot, hence no point sandboxing through // chroot and rexec. func untar() { runtime.LockOSThread() flag.Parse() var options *archive.TarOptions //read the options from the pipe "ExtraFiles" if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { fatal(err) } if err := chroot(flag.Arg(0)); err != nil { fatal(err) } if err := archive.Unpack(os.Stdin, "/", options); err != nil { fatal(err) } // fully consume stdin in case it is zero padded flush(os.Stdin) os.Exit(0) } func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error { // We can't pass a potentially large exclude list directly via cmd line // because we easily overrun the kernel's max argument/environment size // when the full image list is passed (e.g. when this is used by // `docker load`). We will marshall the options via a pipe to the // child r, w, err := os.Pipe() if err != nil { return fmt.Errorf("Untar pipe failure: %v", err) } cmd := reexec.Command("docker-untar", dest) cmd.Stdin = decompressedArchive cmd.ExtraFiles = append(cmd.ExtraFiles, r) output := bytes.NewBuffer(nil) cmd.Stdout = output cmd.Stderr = output if err := cmd.Start(); err != nil { return fmt.Errorf("Untar error on re-exec cmd: %v", err) } //write the options to the pipe for the untar exec to read if err := json.NewEncoder(w).Encode(options); err != nil { return fmt.Errorf("Untar json encode to pipe failed: %v", err) } w.Close() if err := cmd.Wait(); err != nil { // when `xz -d -c -q | docker-untar ...` failed on docker-untar side, // we need to exhaust `xz`'s output, otherwise the `xz` side will be // pending on write pipe forever io.Copy(ioutil.Discard, decompressedArchive) return fmt.Errorf("Untar re-exec error: %v: output: %s", err, output) } return nil } docker-1.10.3/pkg/chrootarchive/archive_windows.go000066400000000000000000000011351267010174400222100ustar00rootroot00000000000000package chrootarchive import ( "io" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/longpath" ) // chroot is not supported by Windows func chroot(path string) error { return nil } func invokeUnpack(decompressedArchive io.ReadCloser, dest string, options *archive.TarOptions) error { // Windows is different to Linux here because Windows does not support // chroot. Hence there is no point sandboxing a chrooted process to // do the unpack. We call inline instead within the daemon process. return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) } docker-1.10.3/pkg/chrootarchive/diff.go000066400000000000000000000015061267010174400177270ustar00rootroot00000000000000package chrootarchive import "github.com/docker/docker/pkg/archive" // ApplyLayer parses a diff in the standard layer format from `layer`, // and applies it to the directory `dest`. The stream `layer` can only be // uncompressed. // Returns the size in bytes of the contents of the layer. func ApplyLayer(dest string, layer archive.Reader) (size int64, err error) { return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) } // ApplyUncompressedLayer parses a diff in the standard layer format from // `layer`, and applies it to the directory `dest`. The stream `layer` // can only be uncompressed. // Returns the size in bytes of the contents of the layer. func ApplyUncompressedLayer(dest string, layer archive.Reader, options *archive.TarOptions) (int64, error) { return applyLayerHandler(dest, layer, options, false) } docker-1.10.3/pkg/chrootarchive/diff_unix.go000066400000000000000000000053751267010174400210020ustar00rootroot00000000000000//+build !windows package chrootarchive import ( "bytes" "encoding/json" "flag" "fmt" "io/ioutil" "os" "path/filepath" "runtime" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/reexec" "github.com/docker/docker/pkg/system" ) type applyLayerResponse struct { LayerSize int64 `json:"layerSize"` } // applyLayer is the entry-point for docker-applylayer on re-exec. This is not // used on Windows as it does not support chroot, hence no point sandboxing // through chroot and rexec. func applyLayer() { var ( tmpDir = "" err error options *archive.TarOptions ) runtime.LockOSThread() flag.Parse() if err := chroot(flag.Arg(0)); err != nil { fatal(err) } // We need to be able to set any perms oldmask, err := system.Umask(0) defer system.Umask(oldmask) if err != nil { fatal(err) } if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { fatal(err) } if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil { fatal(err) } os.Setenv("TMPDIR", tmpDir) size, err := archive.UnpackLayer("/", os.Stdin, options) os.RemoveAll(tmpDir) if err != nil { fatal(err) } encoder := json.NewEncoder(os.Stdout) if err := encoder.Encode(applyLayerResponse{size}); err != nil { fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) } flush(os.Stdout) flush(os.Stdin) os.Exit(0) } // applyLayerHandler parses a diff in the standard layer format from `layer`, and // applies it to the directory `dest`. Returns the size in bytes of the // contents of the layer. func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { dest = filepath.Clean(dest) if decompress { decompressed, err := archive.DecompressStream(layer) if err != nil { return 0, err } defer decompressed.Close() layer = decompressed } if options == nil { options = &archive.TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } data, err := json.Marshal(options) if err != nil { return 0, fmt.Errorf("ApplyLayer json encode: %v", err) } cmd := reexec.Command("docker-applyLayer", dest) cmd.Stdin = layer cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) cmd.Stdout, cmd.Stderr = outBuf, errBuf if err = cmd.Run(); err != nil { return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) } // Stdout should be a valid JSON struct representing an applyLayerResponse. response := applyLayerResponse{} decoder := json.NewDecoder(outBuf) if err = decoder.Decode(&response); err != nil { return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) } return response.LayerSize, nil } docker-1.10.3/pkg/chrootarchive/diff_windows.go000066400000000000000000000021441267010174400215000ustar00rootroot00000000000000package chrootarchive import ( "fmt" "io/ioutil" "os" "path/filepath" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/longpath" ) // applyLayerHandler parses a diff in the standard layer format from `layer`, and // applies it to the directory `dest`. Returns the size in bytes of the // contents of the layer. func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { dest = filepath.Clean(dest) // Ensure it is a Windows-style volume path dest = longpath.AddPrefix(dest) if decompress { decompressed, err := archive.DecompressStream(layer) if err != nil { return 0, err } defer decompressed.Close() layer = decompressed } tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract") if err != nil { return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err) } s, err := archive.UnpackLayer(dest, layer, nil) os.RemoveAll(tmpDir) if err != nil { return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s", err, dest) } return s, nil } docker-1.10.3/pkg/chrootarchive/init_unix.go000066400000000000000000000006561267010174400210320ustar00rootroot00000000000000// +build !windows package chrootarchive import ( "fmt" "io" "io/ioutil" "os" "github.com/docker/docker/pkg/reexec" ) func init() { reexec.Register("docker-applyLayer", applyLayer) reexec.Register("docker-untar", untar) } func fatal(err error) { fmt.Fprint(os.Stderr, err) os.Exit(1) } // flush consumes all the bytes from the reader discarding // any errors func flush(r io.Reader) { io.Copy(ioutil.Discard, r) } docker-1.10.3/pkg/chrootarchive/init_windows.go000066400000000000000000000000471267010174400215330ustar00rootroot00000000000000package chrootarchive func init() { } docker-1.10.3/pkg/devicemapper/000077500000000000000000000000001267010174400162725ustar00rootroot00000000000000docker-1.10.3/pkg/devicemapper/devmapper.go000066400000000000000000000545001267010174400206100ustar00rootroot00000000000000// +build linux package devicemapper import ( "errors" "fmt" "os" "runtime" "syscall" "unsafe" "github.com/Sirupsen/logrus" ) // DevmapperLogger defines methods for logging with devicemapper. type DevmapperLogger interface { DMLog(level int, file string, line int, dmError int, message string) } const ( deviceCreate TaskType = iota deviceReload deviceRemove deviceRemoveAll deviceSuspend deviceResume deviceInfo deviceDeps deviceRename deviceVersion deviceStatus deviceTable deviceWaitevent deviceList deviceClear deviceMknodes deviceListVersions deviceTargetMsg deviceSetGeometry ) const ( addNodeOnResume AddNodeType = iota addNodeOnCreate ) // List of errors returned when using devicemapper. var ( ErrTaskRun = errors.New("dm_task_run failed") ErrTaskSetName = errors.New("dm_task_set_name failed") ErrTaskSetMessage = errors.New("dm_task_set_message failed") ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed") ErrTaskSetRo = errors.New("dm_task_set_ro failed") ErrTaskAddTarget = errors.New("dm_task_add_target failed") ErrTaskSetSector = errors.New("dm_task_set_sector failed") ErrTaskGetDeps = errors.New("dm_task_get_deps failed") ErrTaskGetInfo = errors.New("dm_task_get_info failed") ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed") ErrTaskDeferredRemove = errors.New("dm_task_deferred_remove failed") ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") ErrNilCookie = errors.New("cookie ptr can't be nil") ErrGetBlockSize = errors.New("Can't get block size") ErrUdevWait = errors.New("wait on udev cookie failed") ErrSetDevDir = errors.New("dm_set_dev_dir failed") ErrGetLibraryVersion = errors.New("dm_get_library_version failed") ErrCreateRemoveTask = errors.New("Can't create task of type deviceRemove") ErrRunRemoveDevice = errors.New("running RemoveDevice failed") ErrInvalidAddNode = errors.New("Invalid AddNode type") ErrBusy = errors.New("Device is Busy") ErrDeviceIDExists = errors.New("Device Id Exists") ErrEnxio = errors.New("No such device or address") ) var ( dmSawBusy bool dmSawExist bool dmSawEnxio bool // No Such Device or Address ) type ( // Task represents a devicemapper task (like lvcreate, etc.) ; a task is needed for each ioctl // command to execute. Task struct { unmanaged *cdmTask } // Deps represents dependents (layer) of a device. Deps struct { Count uint32 Filler uint32 Device []uint64 } // Info represents information about a device. Info struct { Exists int Suspended int LiveTable int InactiveTable int OpenCount int32 EventNr uint32 Major uint32 Minor uint32 ReadOnly int TargetCount int32 DeferredRemove int } // TaskType represents a type of task TaskType int // AddNodeType represents a type of node to be added AddNodeType int ) // DeviceIDExists returns whether error conveys the information about device Id already // exist or not. This will be true if device creation or snap creation // operation fails if device or snap device already exists in pool. // Current implementation is little crude as it scans the error string // for exact pattern match. Replacing it with more robust implementation // is desirable. func DeviceIDExists(err error) bool { return fmt.Sprint(err) == fmt.Sprint(ErrDeviceIDExists) } func (t *Task) destroy() { if t != nil { DmTaskDestroy(t.unmanaged) runtime.SetFinalizer(t, nil) } } // TaskCreateNamed is a convenience function for TaskCreate when a name // will be set on the task as well func TaskCreateNamed(t TaskType, name string) (*Task, error) { task := TaskCreate(t) if task == nil { return nil, fmt.Errorf("devicemapper: Can't create task of type %d", int(t)) } if err := task.setName(name); err != nil { return nil, fmt.Errorf("devicemapper: Can't set task name %s", name) } return task, nil } // TaskCreate initializes a devicemapper task of tasktype func TaskCreate(tasktype TaskType) *Task { Ctask := DmTaskCreate(int(tasktype)) if Ctask == nil { return nil } task := &Task{unmanaged: Ctask} runtime.SetFinalizer(task, (*Task).destroy) return task } func (t *Task) run() error { if res := DmTaskRun(t.unmanaged); res != 1 { return ErrTaskRun } return nil } func (t *Task) setName(name string) error { if res := DmTaskSetName(t.unmanaged, name); res != 1 { return ErrTaskSetName } return nil } func (t *Task) setMessage(message string) error { if res := DmTaskSetMessage(t.unmanaged, message); res != 1 { return ErrTaskSetMessage } return nil } func (t *Task) setSector(sector uint64) error { if res := DmTaskSetSector(t.unmanaged, sector); res != 1 { return ErrTaskSetSector } return nil } func (t *Task) setCookie(cookie *uint, flags uint16) error { if cookie == nil { return ErrNilCookie } if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 { return ErrTaskSetCookie } return nil } func (t *Task) setAddNode(addNode AddNodeType) error { if addNode != addNodeOnResume && addNode != addNodeOnCreate { return ErrInvalidAddNode } if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 { return ErrTaskSetAddNode } return nil } func (t *Task) setRo() error { if res := DmTaskSetRo(t.unmanaged); res != 1 { return ErrTaskSetRo } return nil } func (t *Task) addTarget(start, size uint64, ttype, params string) error { if res := DmTaskAddTarget(t.unmanaged, start, size, ttype, params); res != 1 { return ErrTaskAddTarget } return nil } func (t *Task) getDeps() (*Deps, error) { var deps *Deps if deps = DmTaskGetDeps(t.unmanaged); deps == nil { return nil, ErrTaskGetDeps } return deps, nil } func (t *Task) getInfo() (*Info, error) { info := &Info{} if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { return nil, ErrTaskGetInfo } return info, nil } func (t *Task) getInfoWithDeferred() (*Info, error) { info := &Info{} if res := DmTaskGetInfoWithDeferred(t.unmanaged, info); res != 1 { return nil, ErrTaskGetInfo } return info, nil } func (t *Task) getDriverVersion() (string, error) { res := DmTaskGetDriverVersion(t.unmanaged) if res == "" { return "", ErrTaskGetDriverVersion } return res, nil } func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start uint64, length uint64, targetType string, params string) { return DmGetNextTarget(t.unmanaged, next, &start, &length, &targetType, ¶ms), start, length, targetType, params } // UdevWait waits for any processes that are waiting for udev to complete the specified cookie. func UdevWait(cookie *uint) error { if res := DmUdevWait(*cookie); res != 1 { logrus.Debugf("devicemapper: Failed to wait on udev cookie %d", *cookie) return ErrUdevWait } return nil } // LogInitVerbose is an interface to initialize the verbose logger for the device mapper library. func LogInitVerbose(level int) { DmLogInitVerbose(level) } var dmLogger DevmapperLogger // LogInit initializes the logger for the device mapper library. func LogInit(logger DevmapperLogger) { dmLogger = logger LogWithErrnoInit() } // SetDevDir sets the dev folder for the device mapper library (usually /dev). func SetDevDir(dir string) error { if res := DmSetDevDir(dir); res != 1 { logrus.Debugf("devicemapper: Error dm_set_dev_dir") return ErrSetDevDir } return nil } // GetLibraryVersion returns the device mapper library version. func GetLibraryVersion() (string, error) { var version string if res := DmGetLibraryVersion(&version); res != 1 { return "", ErrGetLibraryVersion } return version, nil } // UdevSyncSupported returns whether device-mapper is able to sync with udev // // This is essential otherwise race conditions can arise where both udev and // device-mapper attempt to create and destroy devices. func UdevSyncSupported() bool { return DmUdevGetSyncSupport() != 0 } // UdevSetSyncSupport allows setting whether the udev sync should be enabled. // The return bool indicates the state of whether the sync is enabled. func UdevSetSyncSupport(enable bool) bool { if enable { DmUdevSetSyncSupport(1) } else { DmUdevSetSyncSupport(0) } return UdevSyncSupported() } // CookieSupported returns whether the version of device-mapper supports the // use of cookie's in the tasks. // This is largely a lower level call that other functions use. func CookieSupported() bool { return DmCookieSupported() != 0 } // RemoveDevice is a useful helper for cleaning up a device. func RemoveDevice(name string) error { task, err := TaskCreateNamed(deviceRemove, name) if task == nil { return err } var cookie uint if err := task.setCookie(&cookie, 0); err != nil { return fmt.Errorf("devicemapper: Can not set cookie: %s", err) } defer UdevWait(&cookie) dmSawBusy = false // reset before the task is run if err = task.run(); err != nil { if dmSawBusy { return ErrBusy } return fmt.Errorf("devicemapper: Error running RemoveDevice %s", err) } return nil } // RemoveDeviceDeferred is a useful helper for cleaning up a device, but deferred. func RemoveDeviceDeferred(name string) error { logrus.Debugf("devicemapper: RemoveDeviceDeferred START(%s)", name) defer logrus.Debugf("devicemapper: RemoveDeviceDeferred END(%s)", name) task, err := TaskCreateNamed(deviceRemove, name) if task == nil { return err } if err := DmTaskDeferredRemove(task.unmanaged); err != 1 { return ErrTaskDeferredRemove } if err = task.run(); err != nil { return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err) } return nil } // CancelDeferredRemove cancels a deferred remove for a device. func CancelDeferredRemove(deviceName string) error { task, err := TaskCreateNamed(deviceTargetMsg, deviceName) if task == nil { return err } if err := task.setSector(0); err != nil { return fmt.Errorf("devicemapper: Can't set sector %s", err) } if err := task.setMessage(fmt.Sprintf("@cancel_deferred_remove")); err != nil { return fmt.Errorf("devicemapper: Can't set message %s", err) } dmSawBusy = false dmSawEnxio = false if err := task.run(); err != nil { // A device might be being deleted already if dmSawBusy { return ErrBusy } else if dmSawEnxio { return ErrEnxio } return fmt.Errorf("devicemapper: Error running CancelDeferredRemove %s", err) } return nil } // GetBlockDeviceSize returns the size of a block device identified by the specified file. func GetBlockDeviceSize(file *os.File) (uint64, error) { size, err := ioctlBlkGetSize64(file.Fd()) if err != nil { logrus.Errorf("devicemapper: Error getblockdevicesize: %s", err) return 0, ErrGetBlockSize } return uint64(size), nil } // BlockDeviceDiscard runs discard for the given path. // This is used as a workaround for the kernel not discarding block so // on the thin pool when we remove a thinp device, so we do it // manually func BlockDeviceDiscard(path string) error { file, err := os.OpenFile(path, os.O_RDWR, 0) if err != nil { return err } defer file.Close() size, err := GetBlockDeviceSize(file) if err != nil { return err } if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { return err } // Without this sometimes the remove of the device that happens after // discard fails with EBUSY. syscall.Sync() return nil } // CreatePool is the programmatic example of "dmsetup create". // It creates a device with the specified poolName, data and metadata file and block size. func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { task, err := TaskCreateNamed(deviceCreate, poolName) if task == nil { return err } size, err := GetBlockDeviceSize(dataFile) if err != nil { return fmt.Errorf("devicemapper: Can't get data size %s", err) } params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { return fmt.Errorf("devicemapper: Can't add target %s", err) } var cookie uint var flags uint16 flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag if err := task.setCookie(&cookie, flags); err != nil { return fmt.Errorf("devicemapper: Can't set cookie %s", err) } defer UdevWait(&cookie) if err := task.run(); err != nil { return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err) } return nil } // ReloadPool is the programmatic example of "dmsetup reload". // It reloads the table with the specified poolName, data and metadata file and block size. func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { task, err := TaskCreateNamed(deviceReload, poolName) if task == nil { return err } size, err := GetBlockDeviceSize(dataFile) if err != nil { return fmt.Errorf("devicemapper: Can't get data size %s", err) } params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { return fmt.Errorf("devicemapper: Can't add target %s", err) } if err := task.run(); err != nil { return fmt.Errorf("devicemapper: Error running deviceCreate %s", err) } return nil } // GetDeps is the programmatic example of "dmsetup deps". // It outputs a list of devices referenced by the live table for the specified device. func GetDeps(name string) (*Deps, error) { task, err := TaskCreateNamed(deviceDeps, name) if task == nil { return nil, err } if err := task.run(); err != nil { return nil, err } return task.getDeps() } // GetInfo is the programmatic example of "dmsetup info". // It outputs some brief information about the device. func GetInfo(name string) (*Info, error) { task, err := TaskCreateNamed(deviceInfo, name) if task == nil { return nil, err } if err := task.run(); err != nil { return nil, err } return task.getInfo() } // GetInfoWithDeferred is the programmatic example of "dmsetup info", but deferred. // It outputs some brief information about the device. func GetInfoWithDeferred(name string) (*Info, error) { task, err := TaskCreateNamed(deviceInfo, name) if task == nil { return nil, err } if err := task.run(); err != nil { return nil, err } return task.getInfoWithDeferred() } // GetDriverVersion is the programmatic example of "dmsetup version". // It outputs version information of the driver. func GetDriverVersion() (string, error) { task := TaskCreate(deviceVersion) if task == nil { return "", fmt.Errorf("devicemapper: Can't create deviceVersion task") } if err := task.run(); err != nil { return "", err } return task.getDriverVersion() } // GetStatus is the programmatic example of "dmsetup status". // It outputs status information for the specified device name. func GetStatus(name string) (uint64, uint64, string, string, error) { task, err := TaskCreateNamed(deviceStatus, name) if task == nil { logrus.Debugf("devicemapper: GetStatus() Error TaskCreateNamed: %s", err) return 0, 0, "", "", err } if err := task.run(); err != nil { logrus.Debugf("devicemapper: GetStatus() Error Run: %s", err) return 0, 0, "", "", err } devinfo, err := task.getInfo() if err != nil { logrus.Debugf("devicemapper: GetStatus() Error GetInfo: %s", err) return 0, 0, "", "", err } if devinfo.Exists == 0 { logrus.Debugf("devicemapper: GetStatus() Non existing device %s", name) return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) } _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) return start, length, targetType, params, nil } // GetTable is the programmatic example for "dmsetup table". // It outputs the current table for the specified device name. func GetTable(name string) (uint64, uint64, string, string, error) { task, err := TaskCreateNamed(deviceTable, name) if task == nil { logrus.Debugf("devicemapper: GetTable() Error TaskCreateNamed: %s", err) return 0, 0, "", "", err } if err := task.run(); err != nil { logrus.Debugf("devicemapper: GetTable() Error Run: %s", err) return 0, 0, "", "", err } devinfo, err := task.getInfo() if err != nil { logrus.Debugf("devicemapper: GetTable() Error GetInfo: %s", err) return 0, 0, "", "", err } if devinfo.Exists == 0 { logrus.Debugf("devicemapper: GetTable() Non existing device %s", name) return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) } _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) return start, length, targetType, params, nil } // SetTransactionID sets a transaction id for the specified device name. func SetTransactionID(poolName string, oldID uint64, newID uint64) error { task, err := TaskCreateNamed(deviceTargetMsg, poolName) if task == nil { return err } if err := task.setSector(0); err != nil { return fmt.Errorf("devicemapper: Can't set sector %s", err) } if err := task.setMessage(fmt.Sprintf("set_transaction_id %d %d", oldID, newID)); err != nil { return fmt.Errorf("devicemapper: Can't set message %s", err) } if err := task.run(); err != nil { return fmt.Errorf("devicemapper: Error running SetTransactionID %s", err) } return nil } // SuspendDevice is the programmatic example of "dmsetup suspend". // It suspends the specified device. func SuspendDevice(name string) error { task, err := TaskCreateNamed(deviceSuspend, name) if task == nil { return err } if err := task.run(); err != nil { return fmt.Errorf("devicemapper: Error running deviceSuspend %s", err) } return nil } // ResumeDevice is the programmatic example of "dmsetup resume". // It un-suspends the specified device. func ResumeDevice(name string) error { task, err := TaskCreateNamed(deviceResume, name) if task == nil { return err } var cookie uint if err := task.setCookie(&cookie, 0); err != nil { return fmt.Errorf("devicemapper: Can't set cookie %s", err) } defer UdevWait(&cookie) if err := task.run(); err != nil { return fmt.Errorf("devicemapper: Error running deviceResume %s", err) } return nil } // CreateDevice creates a device with the specified poolName with the specified device id. func CreateDevice(poolName string, deviceID int) error { logrus.Debugf("devicemapper: CreateDevice(poolName=%v, deviceID=%v)", poolName, deviceID) task, err := TaskCreateNamed(deviceTargetMsg, poolName) if task == nil { return err } if err := task.setSector(0); err != nil { return fmt.Errorf("devicemapper: Can't set sector %s", err) } if err := task.setMessage(fmt.Sprintf("create_thin %d", deviceID)); err != nil { return fmt.Errorf("devicemapper: Can't set message %s", err) } dmSawExist = false // reset before the task is run if err := task.run(); err != nil { // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. if dmSawExist { return ErrDeviceIDExists } return fmt.Errorf("devicemapper: Error running CreateDevice %s", err) } return nil } // DeleteDevice deletes a device with the specified poolName with the specified device id. func DeleteDevice(poolName string, deviceID int) error { task, err := TaskCreateNamed(deviceTargetMsg, poolName) if task == nil { return err } if err := task.setSector(0); err != nil { return fmt.Errorf("devicemapper: Can't set sector %s", err) } if err := task.setMessage(fmt.Sprintf("delete %d", deviceID)); err != nil { return fmt.Errorf("devicemapper: Can't set message %s", err) } dmSawBusy = false if err := task.run(); err != nil { if dmSawBusy { return ErrBusy } return fmt.Errorf("devicemapper: Error running DeleteDevice %s", err) } return nil } // ActivateDevice activates the device identified by the specified // poolName, name and deviceID with the specified size. func ActivateDevice(poolName string, name string, deviceID int, size uint64) error { return activateDevice(poolName, name, deviceID, size, "") } // ActivateDeviceWithExternal activates the device identified by the specified // poolName, name and deviceID with the specified size. func ActivateDeviceWithExternal(poolName string, name string, deviceID int, size uint64, external string) error { return activateDevice(poolName, name, deviceID, size, external) } func activateDevice(poolName string, name string, deviceID int, size uint64, external string) error { task, err := TaskCreateNamed(deviceCreate, name) if task == nil { return err } var params string if len(external) > 0 { params = fmt.Sprintf("%s %d %s", poolName, deviceID, external) } else { params = fmt.Sprintf("%s %d", poolName, deviceID) } if err := task.addTarget(0, size/512, "thin", params); err != nil { return fmt.Errorf("devicemapper: Can't add target %s", err) } if err := task.setAddNode(addNodeOnCreate); err != nil { return fmt.Errorf("devicemapper: Can't add node %s", err) } var cookie uint if err := task.setCookie(&cookie, 0); err != nil { return fmt.Errorf("devicemapper: Can't set cookie %s", err) } defer UdevWait(&cookie) if err := task.run(); err != nil { return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err) } return nil } // CreateSnapDevice creates a snapshot based on the device identified by the baseName and baseDeviceId, func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDeviceID int) error { devinfo, _ := GetInfo(baseName) doSuspend := devinfo != nil && devinfo.Exists != 0 if doSuspend { if err := SuspendDevice(baseName); err != nil { return err } } task, err := TaskCreateNamed(deviceTargetMsg, poolName) if task == nil { if doSuspend { ResumeDevice(baseName) } return err } if err := task.setSector(0); err != nil { if doSuspend { ResumeDevice(baseName) } return fmt.Errorf("devicemapper: Can't set sector %s", err) } if err := task.setMessage(fmt.Sprintf("create_snap %d %d", deviceID, baseDeviceID)); err != nil { if doSuspend { ResumeDevice(baseName) } return fmt.Errorf("devicemapper: Can't set message %s", err) } dmSawExist = false // reset before the task is run if err := task.run(); err != nil { if doSuspend { ResumeDevice(baseName) } // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. if dmSawExist { return ErrDeviceIDExists } return fmt.Errorf("devicemapper: Error running deviceCreate (createSnapDevice) %s", err) } if doSuspend { if err := ResumeDevice(baseName); err != nil { return err } } return nil } docker-1.10.3/pkg/devicemapper/devmapper_log.go000066400000000000000000000014551267010174400214520ustar00rootroot00000000000000// +build linux package devicemapper import "C" import ( "strings" ) // Due to the way cgo works this has to be in a separate file, as devmapper.go has // definitions in the cgo block, which is incompatible with using "//export" // DevmapperLogCallback exports the devmapper log callback for cgo. //export DevmapperLogCallback func DevmapperLogCallback(level C.int, file *C.char, line C.int, dmErrnoOrClass C.int, message *C.char) { msg := C.GoString(message) if level < 7 { if strings.Contains(msg, "busy") { dmSawBusy = true } if strings.Contains(msg, "File exists") { dmSawExist = true } if strings.Contains(msg, "No such device or address") { dmSawEnxio = true } } if dmLogger != nil { dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dmErrnoOrClass), msg) } } docker-1.10.3/pkg/devicemapper/devmapper_wrapper.go000066400000000000000000000152541267010174400223530ustar00rootroot00000000000000// +build linux package devicemapper /* #cgo LDFLAGS: -L. -ldevmapper #include #include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? // FIXME: Can't we find a way to do the logging in pure Go? extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str); static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) { char buffer[256]; va_list ap; va_start(ap, f); vsnprintf(buffer, 256, f, ap); va_end(ap); DevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); } static void log_with_errno_init() { dm_log_with_errno_init(log_cb); } */ import "C" import ( "reflect" "unsafe" ) type ( cdmTask C.struct_dm_task ) // IOCTL consts const ( BlkGetSize64 = C.BLKGETSIZE64 BlkDiscard = C.BLKDISCARD ) // Devicemapper cookie flags. const ( DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG DmUdevDisableLibraryFallback = C.DM_UDEV_DISABLE_LIBRARY_FALLBACK ) // DeviceMapper mapped functions. var ( DmGetLibraryVersion = dmGetLibraryVersionFct DmGetNextTarget = dmGetNextTargetFct DmLogInitVerbose = dmLogInitVerboseFct DmSetDevDir = dmSetDevDirFct DmTaskAddTarget = dmTaskAddTargetFct DmTaskCreate = dmTaskCreateFct DmTaskDestroy = dmTaskDestroyFct DmTaskGetDeps = dmTaskGetDepsFct DmTaskGetInfo = dmTaskGetInfoFct DmTaskGetDriverVersion = dmTaskGetDriverVersionFct DmTaskRun = dmTaskRunFct DmTaskSetAddNode = dmTaskSetAddNodeFct DmTaskSetCookie = dmTaskSetCookieFct DmTaskSetMessage = dmTaskSetMessageFct DmTaskSetName = dmTaskSetNameFct DmTaskSetRo = dmTaskSetRoFct DmTaskSetSector = dmTaskSetSectorFct DmUdevWait = dmUdevWaitFct DmUdevSetSyncSupport = dmUdevSetSyncSupportFct DmUdevGetSyncSupport = dmUdevGetSyncSupportFct DmCookieSupported = dmCookieSupportedFct LogWithErrnoInit = logWithErrnoInitFct DmTaskDeferredRemove = dmTaskDeferredRemoveFct DmTaskGetInfoWithDeferred = dmTaskGetInfoWithDeferredFct ) func free(p *C.char) { C.free(unsafe.Pointer(p)) } func dmTaskDestroyFct(task *cdmTask) { C.dm_task_destroy((*C.struct_dm_task)(task)) } func dmTaskCreateFct(taskType int) *cdmTask { return (*cdmTask)(C.dm_task_create(C.int(taskType))) } func dmTaskRunFct(task *cdmTask) int { ret, _ := C.dm_task_run((*C.struct_dm_task)(task)) return int(ret) } func dmTaskSetNameFct(task *cdmTask, name string) int { Cname := C.CString(name) defer free(Cname) return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname)) } func dmTaskSetMessageFct(task *cdmTask, message string) int { Cmessage := C.CString(message) defer free(Cmessage) return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage)) } func dmTaskSetSectorFct(task *cdmTask, sector uint64) int { return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector))) } func dmTaskSetCookieFct(task *cdmTask, cookie *uint, flags uint16) int { cCookie := C.uint32_t(*cookie) defer func() { *cookie = uint(cCookie) }() return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags))) } func dmTaskSetAddNodeFct(task *cdmTask, addNode AddNodeType) int { return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode))) } func dmTaskSetRoFct(task *cdmTask) int { return int(C.dm_task_set_ro((*C.struct_dm_task)(task))) } func dmTaskAddTargetFct(task *cdmTask, start, size uint64, ttype, params string) int { Cttype := C.CString(ttype) defer free(Cttype) Cparams := C.CString(params) defer free(Cparams) return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) } func dmTaskGetDepsFct(task *cdmTask) *Deps { Cdeps := C.dm_task_get_deps((*C.struct_dm_task)(task)) if Cdeps == nil { return nil } // golang issue: https://github.com/golang/go/issues/11925 hdr := reflect.SliceHeader{ Data: uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps))), Len: int(Cdeps.count), Cap: int(Cdeps.count), } devices := *(*[]C.uint64_t)(unsafe.Pointer(&hdr)) deps := &Deps{ Count: uint32(Cdeps.count), Filler: uint32(Cdeps.filler), } for _, device := range devices { deps.Device = append(deps.Device, uint64(device)) } return deps } func dmTaskGetInfoFct(task *cdmTask, info *Info) int { Cinfo := C.struct_dm_info{} defer func() { info.Exists = int(Cinfo.exists) info.Suspended = int(Cinfo.suspended) info.LiveTable = int(Cinfo.live_table) info.InactiveTable = int(Cinfo.inactive_table) info.OpenCount = int32(Cinfo.open_count) info.EventNr = uint32(Cinfo.event_nr) info.Major = uint32(Cinfo.major) info.Minor = uint32(Cinfo.minor) info.ReadOnly = int(Cinfo.read_only) info.TargetCount = int32(Cinfo.target_count) }() return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) } func dmTaskGetDriverVersionFct(task *cdmTask) string { buffer := C.malloc(128) defer C.free(buffer) res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128) if res == 0 { return "" } return C.GoString((*C.char)(buffer)) } func dmGetNextTargetFct(task *cdmTask, next unsafe.Pointer, start, length *uint64, target, params *string) unsafe.Pointer { var ( Cstart, Clength C.uint64_t CtargetType, Cparams *C.char ) defer func() { *start = uint64(Cstart) *length = uint64(Clength) *target = C.GoString(CtargetType) *params = C.GoString(Cparams) }() nextp := C.dm_get_next_target((*C.struct_dm_task)(task), next, &Cstart, &Clength, &CtargetType, &Cparams) return nextp } func dmUdevSetSyncSupportFct(syncWithUdev int) { (C.dm_udev_set_sync_support(C.int(syncWithUdev))) } func dmUdevGetSyncSupportFct() int { return int(C.dm_udev_get_sync_support()) } func dmUdevWaitFct(cookie uint) int { return int(C.dm_udev_wait(C.uint32_t(cookie))) } func dmCookieSupportedFct() int { return int(C.dm_cookie_supported()) } func dmLogInitVerboseFct(level int) { C.dm_log_init_verbose(C.int(level)) } func logWithErrnoInitFct() { C.log_with_errno_init() } func dmSetDevDirFct(dir string) int { Cdir := C.CString(dir) defer free(Cdir) return int(C.dm_set_dev_dir(Cdir)) } func dmGetLibraryVersionFct(version *string) int { buffer := C.CString(string(make([]byte, 128))) defer free(buffer) defer func() { *version = C.GoString(buffer) }() return int(C.dm_get_library_version(buffer, 128)) } docker-1.10.3/pkg/devicemapper/devmapper_wrapper_deferred_remove.go000066400000000000000000000017641267010174400255710ustar00rootroot00000000000000// +build linux,!libdm_no_deferred_remove package devicemapper /* #cgo LDFLAGS: -L. -ldevmapper #include */ import "C" // LibraryDeferredRemovalSupport is supported when statically linked. const LibraryDeferredRemovalSupport = true func dmTaskDeferredRemoveFct(task *cdmTask) int { return int(C.dm_task_deferred_remove((*C.struct_dm_task)(task))) } func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { Cinfo := C.struct_dm_info{} defer func() { info.Exists = int(Cinfo.exists) info.Suspended = int(Cinfo.suspended) info.LiveTable = int(Cinfo.live_table) info.InactiveTable = int(Cinfo.inactive_table) info.OpenCount = int32(Cinfo.open_count) info.EventNr = uint32(Cinfo.event_nr) info.Major = uint32(Cinfo.major) info.Minor = uint32(Cinfo.minor) info.ReadOnly = int(Cinfo.read_only) info.TargetCount = int32(Cinfo.target_count) info.DeferredRemove = int(Cinfo.deferred_remove) }() return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) } docker-1.10.3/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go000066400000000000000000000005571267010174400262640ustar00rootroot00000000000000// +build linux,libdm_no_deferred_remove package devicemapper // LibraryDeferredRemovalsupport is not supported when statically linked. const LibraryDeferredRemovalSupport = false func dmTaskDeferredRemoveFct(task *cdmTask) int { // Error. Nobody should be calling it. return -1 } func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { return -1 } docker-1.10.3/pkg/devicemapper/ioctl.go000066400000000000000000000010441267010174400177320ustar00rootroot00000000000000// +build linux package devicemapper import ( "syscall" "unsafe" ) func ioctlBlkGetSize64(fd uintptr) (int64, error) { var size int64 if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { return 0, err } return size, nil } func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { var r [2]uint64 r[0] = offset r[1] = length if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { return err } return nil } docker-1.10.3/pkg/devicemapper/log.go000066400000000000000000000004721267010174400174050ustar00rootroot00000000000000package devicemapper // definitions from lvm2 lib/log/log.h const ( LogLevelFatal = 2 + iota // _LOG_FATAL LogLevelErr // _LOG_ERR LogLevelWarn // _LOG_WARN LogLevelNotice // _LOG_NOTICE LogLevelInfo // _LOG_INFO LogLevelDebug // _LOG_DEBUG ) docker-1.10.3/pkg/directory/000077500000000000000000000000001267010174400156325ustar00rootroot00000000000000docker-1.10.3/pkg/directory/directory.go000066400000000000000000000010461267010174400201660ustar00rootroot00000000000000package directory import ( "io/ioutil" "os" "path/filepath" ) // MoveToSubdir moves all contents of a directory to a subdirectory underneath the original path func MoveToSubdir(oldpath, subdir string) error { infos, err := ioutil.ReadDir(oldpath) if err != nil { return err } for _, info := range infos { if info.Name() != subdir { oldName := filepath.Join(oldpath, info.Name()) newName := filepath.Join(oldpath, subdir, info.Name()) if err := os.Rename(oldName, newName); err != nil { return err } } } return nil } docker-1.10.3/pkg/directory/directory_test.go000066400000000000000000000121511267010174400212240ustar00rootroot00000000000000package directory import ( "io/ioutil" "os" "path/filepath" "reflect" "sort" "testing" ) // Size of an empty directory should be 0 func TestSizeEmpty(t *testing.T) { var dir string var err error if dir, err = ioutil.TempDir(os.TempDir(), "testSizeEmptyDirectory"); err != nil { t.Fatalf("failed to create directory: %s", err) } var size int64 if size, _ = Size(dir); size != 0 { t.Fatalf("empty directory has size: %d", size) } } // Size of a directory with one empty file should be 0 func TestSizeEmptyFile(t *testing.T) { var dir string var err error if dir, err = ioutil.TempDir(os.TempDir(), "testSizeEmptyFile"); err != nil { t.Fatalf("failed to create directory: %s", err) } var file *os.File if file, err = ioutil.TempFile(dir, "file"); err != nil { t.Fatalf("failed to create file: %s", err) } var size int64 if size, _ = Size(file.Name()); size != 0 { t.Fatalf("directory with one file has size: %d", size) } } // Size of a directory with one 5-byte file should be 5 func TestSizeNonemptyFile(t *testing.T) { var dir string var err error if dir, err = ioutil.TempDir(os.TempDir(), "testSizeNonemptyFile"); err != nil { t.Fatalf("failed to create directory: %s", err) } var file *os.File if file, err = ioutil.TempFile(dir, "file"); err != nil { t.Fatalf("failed to create file: %s", err) } d := []byte{97, 98, 99, 100, 101} file.Write(d) var size int64 if size, _ = Size(file.Name()); size != 5 { t.Fatalf("directory with one 5-byte file has size: %d", size) } } // Size of a directory with one empty directory should be 0 func TestSizeNestedDirectoryEmpty(t *testing.T) { var dir string var err error if dir, err = ioutil.TempDir(os.TempDir(), "testSizeNestedDirectoryEmpty"); err != nil { t.Fatalf("failed to create directory: %s", err) } if dir, err = ioutil.TempDir(dir, "nested"); err != nil { t.Fatalf("failed to create nested directory: %s", err) } var size int64 if size, _ = Size(dir); size != 0 { t.Fatalf("directory with one empty directory has size: %d", size) } } // Test directory with 1 file and 1 empty directory func TestSizeFileAndNestedDirectoryEmpty(t *testing.T) { var dir string var err error if dir, err = ioutil.TempDir(os.TempDir(), "testSizeFileAndNestedDirectoryEmpty"); err != nil { t.Fatalf("failed to create directory: %s", err) } if dir, err = ioutil.TempDir(dir, "nested"); err != nil { t.Fatalf("failed to create nested directory: %s", err) } var file *os.File if file, err = ioutil.TempFile(dir, "file"); err != nil { t.Fatalf("failed to create file: %s", err) } d := []byte{100, 111, 99, 107, 101, 114} file.Write(d) var size int64 if size, _ = Size(dir); size != 6 { t.Fatalf("directory with 6-byte file and empty directory has size: %d", size) } } // Test directory with 1 file and 1 non-empty directory func TestSizeFileAndNestedDirectoryNonempty(t *testing.T) { var dir, dirNested string var err error if dir, err = ioutil.TempDir(os.TempDir(), "TestSizeFileAndNestedDirectoryNonempty"); err != nil { t.Fatalf("failed to create directory: %s", err) } if dirNested, err = ioutil.TempDir(dir, "nested"); err != nil { t.Fatalf("failed to create nested directory: %s", err) } var file *os.File if file, err = ioutil.TempFile(dir, "file"); err != nil { t.Fatalf("failed to create file: %s", err) } data := []byte{100, 111, 99, 107, 101, 114} file.Write(data) var nestedFile *os.File if nestedFile, err = ioutil.TempFile(dirNested, "file"); err != nil { t.Fatalf("failed to create file in nested directory: %s", err) } nestedData := []byte{100, 111, 99, 107, 101, 114} nestedFile.Write(nestedData) var size int64 if size, _ = Size(dir); size != 12 { t.Fatalf("directory with 6-byte file and nested directory with 6-byte file has size: %d", size) } } // Test migration of directory to a subdir underneath itself func TestMoveToSubdir(t *testing.T) { var outerDir, subDir string var err error if outerDir, err = ioutil.TempDir(os.TempDir(), "TestMoveToSubdir"); err != nil { t.Fatalf("failed to create directory: %v", err) } if subDir, err = ioutil.TempDir(outerDir, "testSub"); err != nil { t.Fatalf("failed to create subdirectory: %v", err) } // write 4 temp files in the outer dir to get moved filesList := []string{"a", "b", "c", "d"} for _, fName := range filesList { if file, err := os.Create(filepath.Join(outerDir, fName)); err != nil { t.Fatalf("couldn't create temp file %q: %v", fName, err) } else { file.WriteString(fName) file.Close() } } if err = MoveToSubdir(outerDir, filepath.Base(subDir)); err != nil { t.Fatalf("Error during migration of content to subdirectory: %v", err) } // validate that the files were moved to the subdirectory infos, err := ioutil.ReadDir(subDir) if len(infos) != 4 { t.Fatalf("Should be four files in the subdir after the migration: actual length: %d", len(infos)) } var results []string for _, info := range infos { results = append(results, info.Name()) } sort.Sort(sort.StringSlice(results)) if !reflect.DeepEqual(filesList, results) { t.Fatalf("Results after migration do not equal list of files: expected: %v, got: %v", filesList, results) } } docker-1.10.3/pkg/directory/directory_unix.go000066400000000000000000000015151267010174400212320ustar00rootroot00000000000000// +build linux freebsd package directory import ( "os" "path/filepath" "syscall" ) // Size walks a directory tree and returns its total size in bytes. func Size(dir string) (size int64, err error) { data := make(map[uint64]struct{}) err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, e error) error { // Ignore directory sizes if fileInfo == nil { return nil } s := fileInfo.Size() if fileInfo.IsDir() || s == 0 { return nil } // Check inode to handle hard links correctly inode := fileInfo.Sys().(*syscall.Stat_t).Ino // inode is not a uint64 on all platforms. Cast it to avoid issues. if _, exists := data[uint64(inode)]; exists { return nil } // inode is not a uint64 on all platforms. Cast it to avoid issues. data[uint64(inode)] = struct{}{} size += s return nil }) return } docker-1.10.3/pkg/directory/directory_windows.go000066400000000000000000000011361267010174400217400ustar00rootroot00000000000000// +build windows package directory import ( "os" "path/filepath" "github.com/docker/docker/pkg/longpath" ) // Size walks a directory tree and returns its total size in bytes. func Size(dir string) (size int64, err error) { fixedPath, err := filepath.Abs(dir) if err != nil { return } fixedPath = longpath.AddPrefix(fixedPath) err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, e error) error { // Ignore directory sizes if fileInfo == nil { return nil } s := fileInfo.Size() if fileInfo.IsDir() || s == 0 { return nil } size += s return nil }) return } docker-1.10.3/pkg/discovery/000077500000000000000000000000001267010174400156355ustar00rootroot00000000000000docker-1.10.3/pkg/discovery/README.md000066400000000000000000000021471267010174400171200ustar00rootroot00000000000000--- page_title: Docker discovery page_description: discovery page_keywords: docker, clustering, discovery --- # Discovery Docker comes with multiple Discovery backends. ## Backends ### Using etcd Point your Docker Engine instances to a common etcd instance. You can specify the address Docker uses to advertise the node using the `--cluster-advertise` flag. ```bash $ docker daemon -H= --cluster-advertise= --cluster-store etcd://,/ ``` ### Using consul Point your Docker Engine instances to a common Consul instance. You can specify the address Docker uses to advertise the node using the `--cluster-advertise` flag. ```bash $ docker daemon -H= --cluster-advertise= --cluster-store consul:/// ``` ### Using zookeeper Point your Docker Engine instances to a common Zookeeper instance. You can specify the address Docker uses to advertise the node using the `--cluster-advertise` flag. ```bash $ docker daemon -H= --cluster-advertise= --cluster-store zk://,/ ``` docker-1.10.3/pkg/discovery/backends.go000066400000000000000000000056451267010174400177500ustar00rootroot00000000000000package discovery import ( "fmt" "net" "strings" "time" log "github.com/Sirupsen/logrus" ) var ( // Backends is a global map of discovery backends indexed by their // associated scheme. backends = make(map[string]Backend) ) // Register makes a discovery backend available by the provided scheme. // If Register is called twice with the same scheme an error is returned. func Register(scheme string, d Backend) error { if _, exists := backends[scheme]; exists { return fmt.Errorf("scheme already registered %s", scheme) } log.WithField("name", scheme).Debug("Registering discovery service") backends[scheme] = d return nil } func parse(rawurl string) (string, string) { parts := strings.SplitN(rawurl, "://", 2) // nodes:port,node2:port => nodes://node1:port,node2:port if len(parts) == 1 { return "nodes", parts[0] } return parts[0], parts[1] } // ParseAdvertise parses the --cluster-advertise daemon config which accepts // : or : func ParseAdvertise(advertise string) (string, error) { var ( iface *net.Interface addrs []net.Addr err error ) addr, port, err := net.SplitHostPort(advertise) if err != nil { return "", fmt.Errorf("invalid --cluster-advertise configuration: %s: %v", advertise, err) } ip := net.ParseIP(addr) // If it is a valid ip-address, use it as is if ip != nil { return advertise, nil } // If advertise is a valid interface name, get the valid ipv4 address and use it to advertise ifaceName := addr iface, err = net.InterfaceByName(ifaceName) if err != nil { return "", fmt.Errorf("invalid cluster advertise IP address or interface name (%s) : %v", advertise, err) } addrs, err = iface.Addrs() if err != nil { return "", fmt.Errorf("unable to get advertise IP address from interface (%s) : %v", advertise, err) } if addrs == nil || len(addrs) == 0 { return "", fmt.Errorf("no available advertise IP address in interface (%s)", advertise) } addr = "" for _, a := range addrs { ip, _, err := net.ParseCIDR(a.String()) if err != nil { return "", fmt.Errorf("error deriving advertise ip-address in interface (%s) : %v", advertise, err) } if ip.To4() == nil || ip.IsLoopback() { continue } addr = ip.String() break } if addr == "" { return "", fmt.Errorf("couldnt find a valid ip-address in interface %s", advertise) } addr = fmt.Sprintf("%s:%s", addr, port) return addr, nil } // New returns a new Discovery given a URL, heartbeat and ttl settings. // Returns an error if the URL scheme is not supported. func New(rawurl string, heartbeat time.Duration, ttl time.Duration, clusterOpts map[string]string) (Backend, error) { scheme, uri := parse(rawurl) if backend, exists := backends[scheme]; exists { log.WithFields(log.Fields{"name": scheme, "uri": uri}).Debug("Initializing discovery service") err := backend.Initialize(uri, heartbeat, ttl, clusterOpts) return backend, err } return nil, ErrNotSupported } docker-1.10.3/pkg/discovery/discovery.go000066400000000000000000000020731267010174400201750ustar00rootroot00000000000000package discovery import ( "errors" "time" ) var ( // ErrNotSupported is returned when a discovery service is not supported. ErrNotSupported = errors.New("discovery service not supported") // ErrNotImplemented is returned when discovery feature is not implemented // by discovery backend. ErrNotImplemented = errors.New("not implemented in this discovery service") ) // Watcher provides watching over a cluster for nodes joining and leaving. type Watcher interface { // Watch the discovery for entry changes. // Returns a channel that will receive changes or an error. // Providing a non-nil stopCh can be used to stop watching. Watch(stopCh <-chan struct{}) (<-chan Entries, <-chan error) } // Backend is implemented by discovery backends which manage cluster entries. type Backend interface { // Watcher must be provided by every backend. Watcher // Initialize the discovery with URIs, a heartbeat, a ttl and optional settings. Initialize(string, time.Duration, time.Duration, map[string]string) error // Register to the discovery. Register(string) error } docker-1.10.3/pkg/discovery/discovery_test.go000066400000000000000000000074361267010174400212440ustar00rootroot00000000000000package discovery import ( "testing" "github.com/go-check/check" ) // Hook up gocheck into the "go test" runner. func Test(t *testing.T) { check.TestingT(t) } type DiscoverySuite struct{} var _ = check.Suite(&DiscoverySuite{}) func (s *DiscoverySuite) TestNewEntry(c *check.C) { entry, err := NewEntry("127.0.0.1:2375") c.Assert(err, check.IsNil) c.Assert(entry.Equals(&Entry{Host: "127.0.0.1", Port: "2375"}), check.Equals, true) c.Assert(entry.String(), check.Equals, "127.0.0.1:2375") _, err = NewEntry("127.0.0.1") c.Assert(err, check.NotNil) } func (s *DiscoverySuite) TestParse(c *check.C) { scheme, uri := parse("127.0.0.1:2375") c.Assert(scheme, check.Equals, "nodes") c.Assert(uri, check.Equals, "127.0.0.1:2375") scheme, uri = parse("localhost:2375") c.Assert(scheme, check.Equals, "nodes") c.Assert(uri, check.Equals, "localhost:2375") scheme, uri = parse("scheme://127.0.0.1:2375") c.Assert(scheme, check.Equals, "scheme") c.Assert(uri, check.Equals, "127.0.0.1:2375") scheme, uri = parse("scheme://localhost:2375") c.Assert(scheme, check.Equals, "scheme") c.Assert(uri, check.Equals, "localhost:2375") scheme, uri = parse("") c.Assert(scheme, check.Equals, "nodes") c.Assert(uri, check.Equals, "") } func (s *DiscoverySuite) TestCreateEntries(c *check.C) { entries, err := CreateEntries(nil) c.Assert(entries, check.DeepEquals, Entries{}) c.Assert(err, check.IsNil) entries, err = CreateEntries([]string{"127.0.0.1:2375", "127.0.0.2:2375", ""}) c.Assert(err, check.IsNil) expected := Entries{ &Entry{Host: "127.0.0.1", Port: "2375"}, &Entry{Host: "127.0.0.2", Port: "2375"}, } c.Assert(entries.Equals(expected), check.Equals, true) _, err = CreateEntries([]string{"127.0.0.1", "127.0.0.2"}) c.Assert(err, check.NotNil) } func (s *DiscoverySuite) TestContainsEntry(c *check.C) { entries, err := CreateEntries([]string{"127.0.0.1:2375", "127.0.0.2:2375", ""}) c.Assert(err, check.IsNil) c.Assert(entries.Contains(&Entry{Host: "127.0.0.1", Port: "2375"}), check.Equals, true) c.Assert(entries.Contains(&Entry{Host: "127.0.0.3", Port: "2375"}), check.Equals, false) } func (s *DiscoverySuite) TestEntriesEquality(c *check.C) { entries := Entries{ &Entry{Host: "127.0.0.1", Port: "2375"}, &Entry{Host: "127.0.0.2", Port: "2375"}, } // Same c.Assert(entries.Equals(Entries{ &Entry{Host: "127.0.0.1", Port: "2375"}, &Entry{Host: "127.0.0.2", Port: "2375"}, }), check. Equals, true) // Different size c.Assert(entries.Equals(Entries{ &Entry{Host: "127.0.0.1", Port: "2375"}, &Entry{Host: "127.0.0.2", Port: "2375"}, &Entry{Host: "127.0.0.3", Port: "2375"}, }), check. Equals, false) // Different content c.Assert(entries.Equals(Entries{ &Entry{Host: "127.0.0.1", Port: "2375"}, &Entry{Host: "127.0.0.42", Port: "2375"}, }), check. Equals, false) } func (s *DiscoverySuite) TestEntriesDiff(c *check.C) { entry1 := &Entry{Host: "1.1.1.1", Port: "1111"} entry2 := &Entry{Host: "2.2.2.2", Port: "2222"} entry3 := &Entry{Host: "3.3.3.3", Port: "3333"} entries := Entries{entry1, entry2} // No diff added, removed := entries.Diff(Entries{entry2, entry1}) c.Assert(added, check.HasLen, 0) c.Assert(removed, check.HasLen, 0) // Add added, removed = entries.Diff(Entries{entry2, entry3, entry1}) c.Assert(added, check.HasLen, 1) c.Assert(added.Contains(entry3), check.Equals, true) c.Assert(removed, check.HasLen, 0) // Remove added, removed = entries.Diff(Entries{entry2}) c.Assert(added, check.HasLen, 0) c.Assert(removed, check.HasLen, 1) c.Assert(removed.Contains(entry1), check.Equals, true) // Add and remove added, removed = entries.Diff(Entries{entry1, entry3}) c.Assert(added, check.HasLen, 1) c.Assert(added.Contains(entry3), check.Equals, true) c.Assert(removed, check.HasLen, 1) c.Assert(removed.Contains(entry2), check.Equals, true) } docker-1.10.3/pkg/discovery/entry.go000066400000000000000000000036061267010174400173320ustar00rootroot00000000000000package discovery import ( "fmt" "net" ) // NewEntry creates a new entry. func NewEntry(url string) (*Entry, error) { host, port, err := net.SplitHostPort(url) if err != nil { return nil, err } return &Entry{host, port}, nil } // An Entry represents a host. type Entry struct { Host string Port string } // Equals returns true if cmp contains the same data. func (e *Entry) Equals(cmp *Entry) bool { return e.Host == cmp.Host && e.Port == cmp.Port } // String returns the string form of an entry. func (e *Entry) String() string { return fmt.Sprintf("%s:%s", e.Host, e.Port) } // Entries is a list of *Entry with some helpers. type Entries []*Entry // Equals returns true if cmp contains the same data. func (e Entries) Equals(cmp Entries) bool { // Check if the file has really changed. if len(e) != len(cmp) { return false } for i := range e { if !e[i].Equals(cmp[i]) { return false } } return true } // Contains returns true if the Entries contain a given Entry. func (e Entries) Contains(entry *Entry) bool { for _, curr := range e { if curr.Equals(entry) { return true } } return false } // Diff compares two entries and returns the added and removed entries. func (e Entries) Diff(cmp Entries) (Entries, Entries) { added := Entries{} for _, entry := range cmp { if !e.Contains(entry) { added = append(added, entry) } } removed := Entries{} for _, entry := range e { if !cmp.Contains(entry) { removed = append(removed, entry) } } return added, removed } // CreateEntries returns an array of entries based on the given addresses. func CreateEntries(addrs []string) (Entries, error) { entries := Entries{} if addrs == nil { return entries, nil } for _, addr := range addrs { if len(addr) == 0 { continue } entry, err := NewEntry(addr) if err != nil { return nil, err } entries = append(entries, entry) } return entries, nil } docker-1.10.3/pkg/discovery/file/000077500000000000000000000000001267010174400165545ustar00rootroot00000000000000docker-1.10.3/pkg/discovery/file/file.go000066400000000000000000000043361267010174400200300ustar00rootroot00000000000000package file import ( "fmt" "io/ioutil" "strings" "time" "github.com/docker/docker/pkg/discovery" ) // Discovery is exported type Discovery struct { heartbeat time.Duration path string } func init() { Init() } // Init is exported func Init() { discovery.Register("file", &Discovery{}) } // Initialize is exported func (s *Discovery) Initialize(path string, heartbeat time.Duration, ttl time.Duration, _ map[string]string) error { s.path = path s.heartbeat = heartbeat return nil } func parseFileContent(content []byte) []string { var result []string for _, line := range strings.Split(strings.TrimSpace(string(content)), "\n") { line = strings.TrimSpace(line) // Ignoring line starts with # if strings.HasPrefix(line, "#") { continue } // Inlined # comment also ignored. if strings.Contains(line, "#") { line = line[0:strings.Index(line, "#")] // Trim additional spaces caused by above stripping. line = strings.TrimSpace(line) } for _, ip := range discovery.Generate(line) { result = append(result, ip) } } return result } func (s *Discovery) fetch() (discovery.Entries, error) { fileContent, err := ioutil.ReadFile(s.path) if err != nil { return nil, fmt.Errorf("failed to read '%s': %v", s.path, err) } return discovery.CreateEntries(parseFileContent(fileContent)) } // Watch is exported func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { ch := make(chan discovery.Entries) errCh := make(chan error) ticker := time.NewTicker(s.heartbeat) go func() { defer close(errCh) defer close(ch) // Send the initial entries if available. currentEntries, err := s.fetch() if err != nil { errCh <- err } else { ch <- currentEntries } // Periodically send updates. for { select { case <-ticker.C: newEntries, err := s.fetch() if err != nil { errCh <- err continue } // Check if the file has really changed. if !newEntries.Equals(currentEntries) { ch <- newEntries } currentEntries = newEntries case <-stopCh: ticker.Stop() return } } }() return ch, errCh } // Register is exported func (s *Discovery) Register(addr string) error { return discovery.ErrNotImplemented } docker-1.10.3/pkg/discovery/file/file_test.go000066400000000000000000000060101267010174400210560ustar00rootroot00000000000000package file import ( "io/ioutil" "os" "testing" "github.com/docker/docker/pkg/discovery" "github.com/go-check/check" ) // Hook up gocheck into the "go test" runner. func Test(t *testing.T) { check.TestingT(t) } type DiscoverySuite struct{} var _ = check.Suite(&DiscoverySuite{}) func (s *DiscoverySuite) TestInitialize(c *check.C) { d := &Discovery{} d.Initialize("/path/to/file", 1000, 0, nil) c.Assert(d.path, check.Equals, "/path/to/file") } func (s *DiscoverySuite) TestNew(c *check.C) { d, err := discovery.New("file:///path/to/file", 0, 0, nil) c.Assert(err, check.IsNil) c.Assert(d.(*Discovery).path, check.Equals, "/path/to/file") } func (s *DiscoverySuite) TestContent(c *check.C) { data := ` 1.1.1.[1:2]:1111 2.2.2.[2:4]:2222 ` ips := parseFileContent([]byte(data)) c.Assert(ips, check.HasLen, 5) c.Assert(ips[0], check.Equals, "1.1.1.1:1111") c.Assert(ips[1], check.Equals, "1.1.1.2:1111") c.Assert(ips[2], check.Equals, "2.2.2.2:2222") c.Assert(ips[3], check.Equals, "2.2.2.3:2222") c.Assert(ips[4], check.Equals, "2.2.2.4:2222") } func (s *DiscoverySuite) TestRegister(c *check.C) { discovery := &Discovery{path: "/path/to/file"} c.Assert(discovery.Register("0.0.0.0"), check.NotNil) } func (s *DiscoverySuite) TestParsingContentsWithComments(c *check.C) { data := ` ### test ### 1.1.1.1:1111 # inline comment # 2.2.2.2:2222 ### empty line with comment 3.3.3.3:3333 ### test ### ` ips := parseFileContent([]byte(data)) c.Assert(ips, check.HasLen, 2) c.Assert("1.1.1.1:1111", check.Equals, ips[0]) c.Assert("3.3.3.3:3333", check.Equals, ips[1]) } func (s *DiscoverySuite) TestWatch(c *check.C) { data := ` 1.1.1.1:1111 2.2.2.2:2222 ` expected := discovery.Entries{ &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, } // Create a temporary file and remove it. tmp, err := ioutil.TempFile(os.TempDir(), "discovery-file-test") c.Assert(err, check.IsNil) c.Assert(tmp.Close(), check.IsNil) c.Assert(os.Remove(tmp.Name()), check.IsNil) // Set up file discovery. d := &Discovery{} d.Initialize(tmp.Name(), 1000, 0, nil) stopCh := make(chan struct{}) ch, errCh := d.Watch(stopCh) // Make sure it fires errors since the file doesn't exist. c.Assert(<-errCh, check.NotNil) // We have to drain the error channel otherwise Watch will get stuck. go func() { for range errCh { } }() // Write the file and make sure we get the expected value back. c.Assert(ioutil.WriteFile(tmp.Name(), []byte(data), 0600), check.IsNil) c.Assert(<-ch, check.DeepEquals, expected) // Add a new entry and look it up. expected = append(expected, &discovery.Entry{Host: "3.3.3.3", Port: "3333"}) f, err := os.OpenFile(tmp.Name(), os.O_APPEND|os.O_WRONLY, 0600) c.Assert(err, check.IsNil) c.Assert(f, check.NotNil) _, err = f.WriteString("\n3.3.3.3:3333\n") c.Assert(err, check.IsNil) f.Close() c.Assert(<-ch, check.DeepEquals, expected) // Stop and make sure it closes all channels. close(stopCh) c.Assert(<-ch, check.IsNil) c.Assert(<-errCh, check.IsNil) } docker-1.10.3/pkg/discovery/generator.go000066400000000000000000000012131267010174400201470ustar00rootroot00000000000000package discovery import ( "fmt" "regexp" "strconv" ) // Generate takes care of IP generation func Generate(pattern string) []string { re, _ := regexp.Compile(`\[(.+):(.+)\]`) submatch := re.FindStringSubmatch(pattern) if submatch == nil { return []string{pattern} } from, err := strconv.Atoi(submatch[1]) if err != nil { return []string{pattern} } to, err := strconv.Atoi(submatch[2]) if err != nil { return []string{pattern} } template := re.ReplaceAllString(pattern, "%d") var result []string for val := from; val <= to; val++ { entry := fmt.Sprintf(template, val) result = append(result, entry) } return result } docker-1.10.3/pkg/discovery/generator_test.go000066400000000000000000000034211267010174400212110ustar00rootroot00000000000000package discovery import ( "github.com/go-check/check" ) func (s *DiscoverySuite) TestGeneratorNotGenerate(c *check.C) { ips := Generate("127.0.0.1") c.Assert(len(ips), check.Equals, 1) c.Assert(ips[0], check.Equals, "127.0.0.1") } func (s *DiscoverySuite) TestGeneratorWithPortNotGenerate(c *check.C) { ips := Generate("127.0.0.1:8080") c.Assert(len(ips), check.Equals, 1) c.Assert(ips[0], check.Equals, "127.0.0.1:8080") } func (s *DiscoverySuite) TestGeneratorMatchFailedNotGenerate(c *check.C) { ips := Generate("127.0.0.[1]") c.Assert(len(ips), check.Equals, 1) c.Assert(ips[0], check.Equals, "127.0.0.[1]") } func (s *DiscoverySuite) TestGeneratorWithPort(c *check.C) { ips := Generate("127.0.0.[1:11]:2375") c.Assert(len(ips), check.Equals, 11) c.Assert(ips[0], check.Equals, "127.0.0.1:2375") c.Assert(ips[1], check.Equals, "127.0.0.2:2375") c.Assert(ips[2], check.Equals, "127.0.0.3:2375") c.Assert(ips[3], check.Equals, "127.0.0.4:2375") c.Assert(ips[4], check.Equals, "127.0.0.5:2375") c.Assert(ips[5], check.Equals, "127.0.0.6:2375") c.Assert(ips[6], check.Equals, "127.0.0.7:2375") c.Assert(ips[7], check.Equals, "127.0.0.8:2375") c.Assert(ips[8], check.Equals, "127.0.0.9:2375") c.Assert(ips[9], check.Equals, "127.0.0.10:2375") c.Assert(ips[10], check.Equals, "127.0.0.11:2375") } func (s *DiscoverySuite) TestGenerateWithMalformedInputAtRangeStart(c *check.C) { malformedInput := "127.0.0.[x:11]:2375" ips := Generate(malformedInput) c.Assert(len(ips), check.Equals, 1) c.Assert(ips[0], check.Equals, malformedInput) } func (s *DiscoverySuite) TestGenerateWithMalformedInputAtRangeEnd(c *check.C) { malformedInput := "127.0.0.[1:x]:2375" ips := Generate(malformedInput) c.Assert(len(ips), check.Equals, 1) c.Assert(ips[0], check.Equals, malformedInput) } docker-1.10.3/pkg/discovery/kv/000077500000000000000000000000001267010174400162555ustar00rootroot00000000000000docker-1.10.3/pkg/discovery/kv/kv.go000066400000000000000000000114201267010174400172220ustar00rootroot00000000000000package kv import ( "fmt" "path" "strings" "time" log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/discovery" "github.com/docker/go-connections/tlsconfig" "github.com/docker/libkv" "github.com/docker/libkv/store" "github.com/docker/libkv/store/consul" "github.com/docker/libkv/store/etcd" "github.com/docker/libkv/store/zookeeper" ) const ( defaultDiscoveryPath = "docker/nodes" ) // Discovery is exported type Discovery struct { backend store.Backend store store.Store heartbeat time.Duration ttl time.Duration prefix string path string } func init() { Init() } // Init is exported func Init() { // Register to libkv zookeeper.Register() consul.Register() etcd.Register() // Register to internal discovery service discovery.Register("zk", &Discovery{backend: store.ZK}) discovery.Register("consul", &Discovery{backend: store.CONSUL}) discovery.Register("etcd", &Discovery{backend: store.ETCD}) } // Initialize is exported func (s *Discovery) Initialize(uris string, heartbeat time.Duration, ttl time.Duration, clusterOpts map[string]string) error { var ( parts = strings.SplitN(uris, "/", 2) addrs = strings.Split(parts[0], ",") err error ) // A custom prefix to the path can be optionally used. if len(parts) == 2 { s.prefix = parts[1] } s.heartbeat = heartbeat s.ttl = ttl // Use a custom path if specified in discovery options dpath := defaultDiscoveryPath if clusterOpts["kv.path"] != "" { dpath = clusterOpts["kv.path"] } s.path = path.Join(s.prefix, dpath) var config *store.Config if clusterOpts["kv.cacertfile"] != "" && clusterOpts["kv.certfile"] != "" && clusterOpts["kv.keyfile"] != "" { log.Info("Initializing discovery with TLS") tlsConfig, err := tlsconfig.Client(tlsconfig.Options{ CAFile: clusterOpts["kv.cacertfile"], CertFile: clusterOpts["kv.certfile"], KeyFile: clusterOpts["kv.keyfile"], }) if err != nil { return err } config = &store.Config{ // Set ClientTLS to trigger https (bug in libkv/etcd) ClientTLS: &store.ClientTLSConfig{ CACertFile: clusterOpts["kv.cacertfile"], CertFile: clusterOpts["kv.certfile"], KeyFile: clusterOpts["kv.keyfile"], }, // The actual TLS config that will be used TLS: tlsConfig, } } else { log.Info("Initializing discovery without TLS") } // Creates a new store, will ignore options given // if not supported by the chosen store s.store, err = libkv.NewStore(s.backend, addrs, config) return err } // Watch the store until either there's a store error or we receive a stop request. // Returns false if we shouldn't attempt watching the store anymore (stop request received). func (s *Discovery) watchOnce(stopCh <-chan struct{}, watchCh <-chan []*store.KVPair, discoveryCh chan discovery.Entries, errCh chan error) bool { for { select { case pairs := <-watchCh: if pairs == nil { return true } log.WithField("discovery", s.backend).Debugf("Watch triggered with %d nodes", len(pairs)) // Convert `KVPair` into `discovery.Entry`. addrs := make([]string, len(pairs)) for _, pair := range pairs { addrs = append(addrs, string(pair.Value)) } entries, err := discovery.CreateEntries(addrs) if err != nil { errCh <- err } else { discoveryCh <- entries } case <-stopCh: // We were requested to stop watching. return false } } } // Watch is exported func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { ch := make(chan discovery.Entries) errCh := make(chan error) go func() { defer close(ch) defer close(errCh) // Forever: Create a store watch, watch until we get an error and then try again. // Will only stop if we receive a stopCh request. for { // Create the path to watch if it does not exist yet exists, err := s.store.Exists(s.path) if err != nil { errCh <- err } if !exists { if err := s.store.Put(s.path, []byte(""), &store.WriteOptions{IsDir: true}); err != nil { errCh <- err } } // Set up a watch. watchCh, err := s.store.WatchTree(s.path, stopCh) if err != nil { errCh <- err } else { if !s.watchOnce(stopCh, watchCh, ch, errCh) { return } } // If we get here it means the store watch channel was closed. This // is unexpected so let's retry later. errCh <- fmt.Errorf("Unexpected watch error") time.Sleep(s.heartbeat) } }() return ch, errCh } // Register is exported func (s *Discovery) Register(addr string) error { opts := &store.WriteOptions{TTL: s.ttl} return s.store.Put(path.Join(s.path, addr), []byte(addr), opts) } // Store returns the underlying store used by KV discovery. func (s *Discovery) Store() store.Store { return s.store } // Prefix returns the store prefix func (s *Discovery) Prefix() string { return s.prefix } docker-1.10.3/pkg/discovery/kv/kv_test.go000066400000000000000000000243501267010174400202670ustar00rootroot00000000000000package kv import ( "errors" "io/ioutil" "os" "path" "testing" "time" "github.com/docker/docker/pkg/discovery" "github.com/docker/libkv" "github.com/docker/libkv/store" "github.com/go-check/check" ) // Hook up gocheck into the "go test" runner. func Test(t *testing.T) { check.TestingT(t) } type DiscoverySuite struct{} var _ = check.Suite(&DiscoverySuite{}) func (ds *DiscoverySuite) TestInitialize(c *check.C) { storeMock := &FakeStore{ Endpoints: []string{"127.0.0.1"}, } d := &Discovery{backend: store.CONSUL} d.Initialize("127.0.0.1", 0, 0, nil) d.store = storeMock s := d.store.(*FakeStore) c.Assert(s.Endpoints, check.HasLen, 1) c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1") c.Assert(d.path, check.Equals, defaultDiscoveryPath) storeMock = &FakeStore{ Endpoints: []string{"127.0.0.1:1234"}, } d = &Discovery{backend: store.CONSUL} d.Initialize("127.0.0.1:1234/path", 0, 0, nil) d.store = storeMock s = d.store.(*FakeStore) c.Assert(s.Endpoints, check.HasLen, 1) c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1:1234") c.Assert(d.path, check.Equals, "path/"+defaultDiscoveryPath) storeMock = &FakeStore{ Endpoints: []string{"127.0.0.1:1234", "127.0.0.2:1234", "127.0.0.3:1234"}, } d = &Discovery{backend: store.CONSUL} d.Initialize("127.0.0.1:1234,127.0.0.2:1234,127.0.0.3:1234/path", 0, 0, nil) d.store = storeMock s = d.store.(*FakeStore) c.Assert(s.Endpoints, check.HasLen, 3) c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1:1234") c.Assert(s.Endpoints[1], check.Equals, "127.0.0.2:1234") c.Assert(s.Endpoints[2], check.Equals, "127.0.0.3:1234") c.Assert(d.path, check.Equals, "path/"+defaultDiscoveryPath) } // Extremely limited mock store so we can test initialization type Mock struct { // Endpoints passed to InitializeMock Endpoints []string // Options passed to InitializeMock Options *store.Config } func NewMock(endpoints []string, options *store.Config) (store.Store, error) { s := &Mock{} s.Endpoints = endpoints s.Options = options return s, nil } func (s *Mock) Put(key string, value []byte, opts *store.WriteOptions) error { return errors.New("Put not supported") } func (s *Mock) Get(key string) (*store.KVPair, error) { return nil, errors.New("Get not supported") } func (s *Mock) Delete(key string) error { return errors.New("Delete not supported") } // Exists mock func (s *Mock) Exists(key string) (bool, error) { return false, errors.New("Exists not supported") } // Watch mock func (s *Mock) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { return nil, errors.New("Watch not supported") } // WatchTree mock func (s *Mock) WatchTree(prefix string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { return nil, errors.New("WatchTree not supported") } // NewLock mock func (s *Mock) NewLock(key string, options *store.LockOptions) (store.Locker, error) { return nil, errors.New("NewLock not supported") } // List mock func (s *Mock) List(prefix string) ([]*store.KVPair, error) { return nil, errors.New("List not supported") } // DeleteTree mock func (s *Mock) DeleteTree(prefix string) error { return errors.New("DeleteTree not supported") } // AtomicPut mock func (s *Mock) AtomicPut(key string, value []byte, previous *store.KVPair, opts *store.WriteOptions) (bool, *store.KVPair, error) { return false, nil, errors.New("AtomicPut not supported") } // AtomicDelete mock func (s *Mock) AtomicDelete(key string, previous *store.KVPair) (bool, error) { return false, errors.New("AtomicDelete not supported") } // Close mock func (s *Mock) Close() { return } func (ds *DiscoverySuite) TestInitializeWithCerts(c *check.C) { cert := `-----BEGIN CERTIFICATE----- MIIDCDCCAfKgAwIBAgIICifG7YeiQOEwCwYJKoZIhvcNAQELMBIxEDAOBgNVBAMT B1Rlc3QgQ0EwHhcNMTUxMDAxMjMwMDAwWhcNMjAwOTI5MjMwMDAwWjASMRAwDgYD VQQDEwdUZXN0IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1wRC O+flnLTK5ImjTurNRHwSejuqGbc4CAvpB0hS+z0QlSs4+zE9h80aC4hz+6caRpds +J908Q+RvAittMHbpc7VjbZP72G6fiXk7yPPl6C10HhRSoSi3nY+B7F2E8cuz14q V2e+ejhWhSrBb/keyXpcyjoW1BOAAJ2TIclRRkICSCZrpXUyXxAvzXfpFXo1RhSb UywN11pfiCQzDUN7sPww9UzFHuAHZHoyfTr27XnJYVUerVYrCPq8vqfn//01qz55 Xs0hvzGdlTFXhuabFtQnKFH5SNwo/fcznhB7rePOwHojxOpXTBepUCIJLbtNnWFT V44t9gh5IqIWtoBReQIDAQABo2YwZDAOBgNVHQ8BAf8EBAMCAAYwEgYDVR0TAQH/ BAgwBgEB/wIBAjAdBgNVHQ4EFgQUZKUI8IIjIww7X/6hvwggQK4bD24wHwYDVR0j BBgwFoAUZKUI8IIjIww7X/6hvwggQK4bD24wCwYJKoZIhvcNAQELA4IBAQDES2cz 7sCQfDCxCIWH7X8kpi/JWExzUyQEJ0rBzN1m3/x8ySRxtXyGekimBqQwQdFqlwMI xzAQKkh3ue8tNSzRbwqMSyH14N1KrSxYS9e9szJHfUasoTpQGPmDmGIoRJuq1h6M ej5x1SCJ7GWCR6xEXKUIE9OftXm9TdFzWa7Ja3OHz/mXteii8VXDuZ5ACq6EE5bY 8sP4gcICfJ5fTrpTlk9FIqEWWQrCGa5wk95PGEj+GJpNogjXQ97wVoo/Y3p1brEn t5zjN9PAq4H1fuCMdNNA+p1DHNwd+ELTxcMAnb2ajwHvV6lKPXutrTFc4umJToBX FpTxDmJHEV4bzUzh -----END CERTIFICATE----- ` key := `-----BEGIN RSA PRIVATE KEY----- MIIEpQIBAAKCAQEA1wRCO+flnLTK5ImjTurNRHwSejuqGbc4CAvpB0hS+z0QlSs4 +zE9h80aC4hz+6caRpds+J908Q+RvAittMHbpc7VjbZP72G6fiXk7yPPl6C10HhR SoSi3nY+B7F2E8cuz14qV2e+ejhWhSrBb/keyXpcyjoW1BOAAJ2TIclRRkICSCZr pXUyXxAvzXfpFXo1RhSbUywN11pfiCQzDUN7sPww9UzFHuAHZHoyfTr27XnJYVUe rVYrCPq8vqfn//01qz55Xs0hvzGdlTFXhuabFtQnKFH5SNwo/fcznhB7rePOwHoj xOpXTBepUCIJLbtNnWFTV44t9gh5IqIWtoBReQIDAQABAoIBAHSWipORGp/uKFXj i/mut776x8ofsAxhnLBARQr93ID+i49W8H7EJGkOfaDjTICYC1dbpGrri61qk8sx qX7p3v/5NzKwOIfEpirgwVIqSNYe/ncbxnhxkx6tXtUtFKmEx40JskvSpSYAhmmO 1XSx0E/PWaEN/nLgX/f1eWJIlxlQkk3QeqL+FGbCXI48DEtlJ9+MzMu4pAwZTpj5 5qtXo5JJ0jRGfJVPAOznRsYqv864AhMdMIWguzk6EGnbaCWwPcfcn+h9a5LMdony MDHfBS7bb5tkF3+AfnVY3IBMVx7YlsD9eAyajlgiKu4zLbwTRHjXgShy+4Oussz0 ugNGnkECgYEA/hi+McrZC8C4gg6XqK8+9joD8tnyDZDz88BQB7CZqABUSwvjDqlP L8hcwo/lzvjBNYGkqaFPUICGWKjeCtd8pPS2DCVXxDQX4aHF1vUur0uYNncJiV3N XQz4Iemsa6wnKf6M67b5vMXICw7dw0HZCdIHD1hnhdtDz0uVpeevLZ8CgYEA2KCT Y43lorjrbCgMqtlefkr3GJA9dey+hTzCiWEOOqn9RqGoEGUday0sKhiLofOgmN2B LEukpKIey8s+Q/cb6lReajDVPDsMweX8i7hz3Wa4Ugp4Xa5BpHqu8qIAE2JUZ7bU t88aQAYE58pUF+/Lq1QzAQdrjjzQBx6SrBxieecCgYEAvukoPZEC8mmiN1VvbTX+ QFHmlZha3QaDxChB+QUe7bMRojEUL/fVnzkTOLuVFqSfxevaI/km9n0ac5KtAchV xjp2bTnBb5EUQFqjopYktWA+xO07JRJtMfSEmjZPbbay1kKC7rdTfBm961EIHaRj xZUf6M+rOE8964oGrdgdLlECgYEA046GQmx6fh7/82FtdZDRQp9tj3SWQUtSiQZc qhO59Lq8mjUXz+MgBuJXxkiwXRpzlbaFB0Bca1fUoYw8o915SrDYf/Zu2OKGQ/qa V81sgiVmDuEgycR7YOlbX6OsVUHrUlpwhY3hgfMe6UtkMvhBvHF/WhroBEIJm1pV PXZ/CbMCgYEApNWVktFBjOaYfY6SNn4iSts1jgsQbbpglg3kT7PLKjCAhI6lNsbk dyT7ut01PL6RaW4SeQWtrJIVQaM6vF3pprMKqlc5XihOGAmVqH7rQx9rtQB5TicL BFrwkQE4HQtQBV60hYQUzzlSk44VFDz+jxIEtacRHaomDRh2FtOTz+I= -----END RSA PRIVATE KEY----- ` certFile, err := ioutil.TempFile("", "cert") c.Assert(err, check.IsNil) defer os.Remove(certFile.Name()) certFile.Write([]byte(cert)) certFile.Close() keyFile, err := ioutil.TempFile("", "key") c.Assert(err, check.IsNil) defer os.Remove(keyFile.Name()) keyFile.Write([]byte(key)) keyFile.Close() libkv.AddStore("mock", NewMock) d := &Discovery{backend: "mock"} err = d.Initialize("127.0.0.3:1234", 0, 0, map[string]string{ "kv.cacertfile": certFile.Name(), "kv.certfile": certFile.Name(), "kv.keyfile": keyFile.Name(), }) c.Assert(err, check.IsNil) s := d.store.(*Mock) c.Assert(s.Options.TLS, check.NotNil) c.Assert(s.Options.TLS.RootCAs, check.NotNil) c.Assert(s.Options.TLS.Certificates, check.HasLen, 1) } func (ds *DiscoverySuite) TestWatch(c *check.C) { mockCh := make(chan []*store.KVPair) storeMock := &FakeStore{ Endpoints: []string{"127.0.0.1:1234"}, mockKVChan: mockCh, } d := &Discovery{backend: store.CONSUL} d.Initialize("127.0.0.1:1234/path", 0, 0, nil) d.store = storeMock expected := discovery.Entries{ &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, } kvs := []*store.KVPair{ {Key: path.Join("path", defaultDiscoveryPath, "1.1.1.1"), Value: []byte("1.1.1.1:1111")}, {Key: path.Join("path", defaultDiscoveryPath, "2.2.2.2"), Value: []byte("2.2.2.2:2222")}, } stopCh := make(chan struct{}) ch, errCh := d.Watch(stopCh) // It should fire an error since the first WatchTree call failed. c.Assert(<-errCh, check.ErrorMatches, "test error") // We have to drain the error channel otherwise Watch will get stuck. go func() { for range errCh { } }() // Push the entries into the store channel and make sure discovery emits. mockCh <- kvs c.Assert(<-ch, check.DeepEquals, expected) // Add a new entry. expected = append(expected, &discovery.Entry{Host: "3.3.3.3", Port: "3333"}) kvs = append(kvs, &store.KVPair{Key: path.Join("path", defaultDiscoveryPath, "3.3.3.3"), Value: []byte("3.3.3.3:3333")}) mockCh <- kvs c.Assert(<-ch, check.DeepEquals, expected) close(mockCh) // Give it enough time to call WatchTree. time.Sleep(3) // Stop and make sure it closes all channels. close(stopCh) c.Assert(<-ch, check.IsNil) c.Assert(<-errCh, check.IsNil) } // FakeStore implements store.Store methods. It mocks all store // function in a simple, naive way. type FakeStore struct { Endpoints []string Options *store.Config mockKVChan <-chan []*store.KVPair watchTreeCallCount int } func (s *FakeStore) Put(key string, value []byte, options *store.WriteOptions) error { return nil } func (s *FakeStore) Get(key string) (*store.KVPair, error) { return nil, nil } func (s *FakeStore) Delete(key string) error { return nil } func (s *FakeStore) Exists(key string) (bool, error) { return true, nil } func (s *FakeStore) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { return nil, nil } // WatchTree will fail the first time, and return the mockKVchan afterwards. // This is the behavior we need for testing.. If we need 'moar', should update this. func (s *FakeStore) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { if s.watchTreeCallCount == 0 { s.watchTreeCallCount = 1 return nil, errors.New("test error") } // First calls error return s.mockKVChan, nil } func (s *FakeStore) NewLock(key string, options *store.LockOptions) (store.Locker, error) { return nil, nil } func (s *FakeStore) List(directory string) ([]*store.KVPair, error) { return []*store.KVPair{}, nil } func (s *FakeStore) DeleteTree(directory string) error { return nil } func (s *FakeStore) AtomicPut(key string, value []byte, previous *store.KVPair, options *store.WriteOptions) (bool, *store.KVPair, error) { return true, nil, nil } func (s *FakeStore) AtomicDelete(key string, previous *store.KVPair) (bool, error) { return true, nil } func (s *FakeStore) Close() { } docker-1.10.3/pkg/discovery/memory/000077500000000000000000000000001267010174400171455ustar00rootroot00000000000000docker-1.10.3/pkg/discovery/memory/memory.go000066400000000000000000000033121267010174400210030ustar00rootroot00000000000000package memory import ( "time" "github.com/docker/docker/pkg/discovery" ) // Discovery implements a descovery backend that keeps // data in memory. type Discovery struct { heartbeat time.Duration values []string } func init() { Init() } // Init registers the memory backend on demand. func Init() { discovery.Register("memory", &Discovery{}) } // Initialize sets the heartbeat for the memory backend. func (s *Discovery) Initialize(_ string, heartbeat time.Duration, _ time.Duration, _ map[string]string) error { s.heartbeat = heartbeat s.values = make([]string, 0) return nil } // Watch sends periodic discovery updates to a channel. func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { ch := make(chan discovery.Entries) errCh := make(chan error) ticker := time.NewTicker(s.heartbeat) go func() { defer close(errCh) defer close(ch) // Send the initial entries if available. var currentEntries discovery.Entries if len(s.values) > 0 { var err error currentEntries, err = discovery.CreateEntries(s.values) if err != nil { errCh <- err } else { ch <- currentEntries } } // Periodically send updates. for { select { case <-ticker.C: newEntries, err := discovery.CreateEntries(s.values) if err != nil { errCh <- err continue } // Check if the file has really changed. if !newEntries.Equals(currentEntries) { ch <- newEntries } currentEntries = newEntries case <-stopCh: ticker.Stop() return } } }() return ch, errCh } // Register adds a new address to the discovery. func (s *Discovery) Register(addr string) error { s.values = append(s.values, addr) return nil } docker-1.10.3/pkg/discovery/memory/memory_test.go000066400000000000000000000021061267010174400220420ustar00rootroot00000000000000package memory import ( "testing" "github.com/docker/docker/pkg/discovery" "github.com/go-check/check" ) // Hook up gocheck into the "go test" runner. func Test(t *testing.T) { check.TestingT(t) } type discoverySuite struct{} var _ = check.Suite(&discoverySuite{}) func (s *discoverySuite) TestWatch(c *check.C) { d := &Discovery{} d.Initialize("foo", 1000, 0, nil) stopCh := make(chan struct{}) ch, errCh := d.Watch(stopCh) // We have to drain the error channel otherwise Watch will get stuck. go func() { for range errCh { } }() expected := discovery.Entries{ &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, } c.Assert(d.Register("1.1.1.1:1111"), check.IsNil) c.Assert(<-ch, check.DeepEquals, expected) expected = discovery.Entries{ &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, } c.Assert(d.Register("2.2.2.2:2222"), check.IsNil) c.Assert(<-ch, check.DeepEquals, expected) // Stop and make sure it closes all channels. close(stopCh) c.Assert(<-ch, check.IsNil) c.Assert(<-errCh, check.IsNil) } docker-1.10.3/pkg/discovery/nodes/000077500000000000000000000000001267010174400167455ustar00rootroot00000000000000docker-1.10.3/pkg/discovery/nodes/nodes.go000066400000000000000000000021111267010174400203770ustar00rootroot00000000000000package nodes import ( "fmt" "strings" "time" "github.com/docker/docker/pkg/discovery" ) // Discovery is exported type Discovery struct { entries discovery.Entries } func init() { Init() } // Init is exported func Init() { discovery.Register("nodes", &Discovery{}) } // Initialize is exported func (s *Discovery) Initialize(uris string, _ time.Duration, _ time.Duration, _ map[string]string) error { for _, input := range strings.Split(uris, ",") { for _, ip := range discovery.Generate(input) { entry, err := discovery.NewEntry(ip) if err != nil { return fmt.Errorf("%s, please check you are using the correct discovery (missing token:// ?)", err.Error()) } s.entries = append(s.entries, entry) } } return nil } // Watch is exported func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { ch := make(chan discovery.Entries) go func() { defer close(ch) ch <- s.entries <-stopCh }() return ch, nil } // Register is exported func (s *Discovery) Register(addr string) error { return discovery.ErrNotImplemented } docker-1.10.3/pkg/discovery/nodes/nodes_test.go000066400000000000000000000030021267010174400214360ustar00rootroot00000000000000package nodes import ( "testing" "github.com/docker/docker/pkg/discovery" "github.com/go-check/check" ) // Hook up gocheck into the "go test" runner. func Test(t *testing.T) { check.TestingT(t) } type DiscoverySuite struct{} var _ = check.Suite(&DiscoverySuite{}) func (s *DiscoverySuite) TestInitialize(c *check.C) { d := &Discovery{} d.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0, 0, nil) c.Assert(len(d.entries), check.Equals, 2) c.Assert(d.entries[0].String(), check.Equals, "1.1.1.1:1111") c.Assert(d.entries[1].String(), check.Equals, "2.2.2.2:2222") } func (s *DiscoverySuite) TestInitializeWithPattern(c *check.C) { d := &Discovery{} d.Initialize("1.1.1.[1:2]:1111,2.2.2.[2:4]:2222", 0, 0, nil) c.Assert(len(d.entries), check.Equals, 5) c.Assert(d.entries[0].String(), check.Equals, "1.1.1.1:1111") c.Assert(d.entries[1].String(), check.Equals, "1.1.1.2:1111") c.Assert(d.entries[2].String(), check.Equals, "2.2.2.2:2222") c.Assert(d.entries[3].String(), check.Equals, "2.2.2.3:2222") c.Assert(d.entries[4].String(), check.Equals, "2.2.2.4:2222") } func (s *DiscoverySuite) TestWatch(c *check.C) { d := &Discovery{} d.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0, 0, nil) expected := discovery.Entries{ &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, } ch, _ := d.Watch(nil) c.Assert(expected.Equals(<-ch), check.Equals, true) } func (s *DiscoverySuite) TestRegister(c *check.C) { d := &Discovery{} c.Assert(d.Register("0.0.0.0"), check.NotNil) } docker-1.10.3/pkg/filenotify/000077500000000000000000000000001267010174400157765ustar00rootroot00000000000000docker-1.10.3/pkg/filenotify/filenotify.go000066400000000000000000000022651267010174400205020ustar00rootroot00000000000000// Package filenotify provides a mechanism for watching file(s) for changes. // Generally leans on fsnotify, but provides a poll-based notifier which fsnotify does not support. // These are wrapped up in a common interface so that either can be used interchangeably in your code. package filenotify import "gopkg.in/fsnotify.v1" // FileWatcher is an interface for implementing file notification watchers type FileWatcher interface { Events() <-chan fsnotify.Event Errors() <-chan error Add(name string) error Remove(name string) error Close() error } // New tries to use an fs-event watcher, and falls back to the poller if there is an error func New() (FileWatcher, error) { if watcher, err := NewEventWatcher(); err == nil { return watcher, nil } return NewPollingWatcher(), nil } // NewPollingWatcher returns a poll-based file watcher func NewPollingWatcher() FileWatcher { return &filePoller{ events: make(chan fsnotify.Event), errors: make(chan error), } } // NewEventWatcher returns an fs-event based file watcher func NewEventWatcher() (FileWatcher, error) { watcher, err := fsnotify.NewWatcher() if err != nil { return nil, err } return &fsNotifyWatcher{watcher}, nil } docker-1.10.3/pkg/filenotify/fsnotify.go000066400000000000000000000007111267010174400201650ustar00rootroot00000000000000package filenotify import "gopkg.in/fsnotify.v1" // fsNotify wraps the fsnotify package to satisfy the FileNotifer interface type fsNotifyWatcher struct { *fsnotify.Watcher } // GetEvents returns the fsnotify event channel receiver func (w *fsNotifyWatcher) Events() <-chan fsnotify.Event { return w.Watcher.Events } // GetErrors returns the fsnotify error channel receiver func (w *fsNotifyWatcher) Errors() <-chan error { return w.Watcher.Errors } docker-1.10.3/pkg/filenotify/poller.go000066400000000000000000000117461267010174400176330ustar00rootroot00000000000000package filenotify import ( "errors" "fmt" "os" "sync" "time" "github.com/Sirupsen/logrus" "gopkg.in/fsnotify.v1" ) var ( // errPollerClosed is returned when the poller is closed errPollerClosed = errors.New("poller is closed") // errNoSuchPoller is returned when trying to remove a watch that doesn't exist errNoSuchWatch = errors.New("poller does not exist") ) // watchWaitTime is the time to wait between file poll loops const watchWaitTime = 200 * time.Millisecond // filePoller is used to poll files for changes, especially in cases where fsnotify // can't be run (e.g. when inotify handles are exhausted) // filePoller satisfies the FileWatcher interface type filePoller struct { // watches is the list of files currently being polled, close the associated channel to stop the watch watches map[string]chan struct{} // events is the channel to listen to for watch events events chan fsnotify.Event // errors is the channel to listen to for watch errors errors chan error // mu locks the poller for modification mu sync.Mutex // closed is used to specify when the poller has already closed closed bool } // Add adds a filename to the list of watches // once added the file is polled for changes in a separate goroutine func (w *filePoller) Add(name string) error { w.mu.Lock() defer w.mu.Unlock() if w.closed == true { return errPollerClosed } f, err := os.Open(name) if err != nil { return err } fi, err := os.Stat(name) if err != nil { return err } if w.watches == nil { w.watches = make(map[string]chan struct{}) } if _, exists := w.watches[name]; exists { return fmt.Errorf("watch exists") } chClose := make(chan struct{}) w.watches[name] = chClose go w.watch(f, fi, chClose) return nil } // Remove stops and removes watch with the specified name func (w *filePoller) Remove(name string) error { w.mu.Lock() defer w.mu.Unlock() return w.remove(name) } func (w *filePoller) remove(name string) error { if w.closed == true { return errPollerClosed } chClose, exists := w.watches[name] if !exists { return errNoSuchWatch } close(chClose) delete(w.watches, name) return nil } // Events returns the event channel // This is used for notifications on events about watched files func (w *filePoller) Events() <-chan fsnotify.Event { return w.events } // Errors returns the errors channel // This is used for notifications about errors on watched files func (w *filePoller) Errors() <-chan error { return w.errors } // Close closes the poller // All watches are stopped, removed, and the poller cannot be added to func (w *filePoller) Close() error { w.mu.Lock() defer w.mu.Unlock() if w.closed { return nil } w.closed = true for name := range w.watches { w.remove(name) delete(w.watches, name) } // channels will be closed by GC, we don't do it to avoid panic in send // functions // close(w.events) // close(w.errors) return nil } // sendEvent publishes the specified event to the events channel func (w *filePoller) sendEvent(e fsnotify.Event, chClose <-chan struct{}) error { select { case w.events <- e: case <-chClose: return fmt.Errorf("closed") } return nil } // sendErr publishes the specified error to the errors channel func (w *filePoller) sendErr(e error, chClose <-chan struct{}) error { select { case w.errors <- e: case <-chClose: return fmt.Errorf("closed") } return nil } // watch is responsible for polling the specified file for changes // upon finding changes to a file or errors, sendEvent/sendErr is called func (w *filePoller) watch(f *os.File, lastFi os.FileInfo, chClose chan struct{}) { for { time.Sleep(watchWaitTime) select { case <-chClose: logrus.Debugf("watch for %s closed", f.Name()) return default: } fi, err := os.Stat(f.Name()) if err != nil { // if we got an error here and lastFi is not set, we can presume that nothing has changed // This should be safe since before `watch()` is called, a stat is performed, there is any error `watch` is not called if lastFi == nil { continue } // If it doesn't exist at this point, it must have been removed // no need to send the error here since this is a valid operation if os.IsNotExist(err) { if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Remove, Name: f.Name()}, chClose); err != nil { return } lastFi = nil continue } // at this point, send the error if err := w.sendErr(err, chClose); err != nil { return } continue } if lastFi == nil { if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Create, Name: fi.Name()}, chClose); err != nil { return } lastFi = fi continue } if fi.Mode() != lastFi.Mode() { if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Chmod, Name: fi.Name()}, chClose); err != nil { return } lastFi = fi continue } if fi.ModTime() != lastFi.ModTime() || fi.Size() != lastFi.Size() { if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Write, Name: fi.Name()}, chClose); err != nil { return } lastFi = fi continue } } } docker-1.10.3/pkg/filenotify/poller_test.go000066400000000000000000000044711267010174400206670ustar00rootroot00000000000000package filenotify import ( "fmt" "io/ioutil" "os" "testing" "time" "gopkg.in/fsnotify.v1" ) func TestPollerAddRemove(t *testing.T) { w := NewPollingWatcher() if err := w.Add("no-such-file"); err == nil { t.Fatal("should have gotten error when adding a non-existent file") } if err := w.Remove("no-such-file"); err == nil { t.Fatal("should have gotten error when removing non-existent watch") } f, err := ioutil.TempFile("", "asdf") if err != nil { t.Fatal(err) } defer os.RemoveAll(f.Name()) if err := w.Add(f.Name()); err != nil { t.Fatal(err) } if err := w.Remove(f.Name()); err != nil { t.Fatal(err) } } func TestPollerEvent(t *testing.T) { w := NewPollingWatcher() f, err := ioutil.TempFile("", "test-poller") if err != nil { t.Fatal("error creating temp file") } defer os.RemoveAll(f.Name()) f.Close() if err := w.Add(f.Name()); err != nil { t.Fatal(err) } select { case <-w.Events(): t.Fatal("got event before anything happened") case <-w.Errors(): t.Fatal("got error before anything happened") default: } if err := ioutil.WriteFile(f.Name(), []byte("hello"), 644); err != nil { t.Fatal(err) } if err := assertEvent(w, fsnotify.Write); err != nil { t.Fatal(err) } if err := os.Chmod(f.Name(), 600); err != nil { t.Fatal(err) } if err := assertEvent(w, fsnotify.Chmod); err != nil { t.Fatal(err) } if err := os.Remove(f.Name()); err != nil { t.Fatal(err) } if err := assertEvent(w, fsnotify.Remove); err != nil { t.Fatal(err) } } func TestPollerClose(t *testing.T) { w := NewPollingWatcher() if err := w.Close(); err != nil { t.Fatal(err) } // test double-close if err := w.Close(); err != nil { t.Fatal(err) } f, err := ioutil.TempFile("", "asdf") if err != nil { t.Fatal(err) } defer os.RemoveAll(f.Name()) if err := w.Add(f.Name()); err == nil { t.Fatal("should have gotten error adding watch for closed watcher") } } func assertEvent(w FileWatcher, eType fsnotify.Op) error { var err error select { case e := <-w.Events(): if e.Op != eType { err = fmt.Errorf("got wrong event type, expected %q: %v", eType, e) } case e := <-w.Errors(): err = fmt.Errorf("got unexpected error waiting for events %v: %v", eType, e) case <-time.After(watchWaitTime * 3): err = fmt.Errorf("timeout waiting for event %v", eType) } return err } docker-1.10.3/pkg/fileutils/000077500000000000000000000000001267010174400156265ustar00rootroot00000000000000docker-1.10.3/pkg/fileutils/fileutils.go000066400000000000000000000173261267010174400201660ustar00rootroot00000000000000package fileutils import ( "errors" "fmt" "io" "os" "path/filepath" "regexp" "strings" "text/scanner" "github.com/Sirupsen/logrus" ) // exclusion return true if the specified pattern is an exclusion func exclusion(pattern string) bool { return pattern[0] == '!' } // empty return true if the specified pattern is empty func empty(pattern string) bool { return pattern == "" } // CleanPatterns takes a slice of patterns returns a new // slice of patterns cleaned with filepath.Clean, stripped // of any empty patterns and lets the caller know whether the // slice contains any exception patterns (prefixed with !). func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) { // Loop over exclusion patterns and: // 1. Clean them up. // 2. Indicate whether we are dealing with any exception rules. // 3. Error if we see a single exclusion marker on it's own (!). cleanedPatterns := []string{} patternDirs := [][]string{} exceptions := false for _, pattern := range patterns { // Eliminate leading and trailing whitespace. pattern = strings.TrimSpace(pattern) if empty(pattern) { continue } if exclusion(pattern) { if len(pattern) == 1 { return nil, nil, false, errors.New("Illegal exclusion pattern: !") } exceptions = true } pattern = filepath.Clean(pattern) cleanedPatterns = append(cleanedPatterns, pattern) if exclusion(pattern) { pattern = pattern[1:] } patternDirs = append(patternDirs, strings.Split(pattern, "/")) } return cleanedPatterns, patternDirs, exceptions, nil } // Matches returns true if file matches any of the patterns // and isn't excluded by any of the subsequent patterns. func Matches(file string, patterns []string) (bool, error) { file = filepath.Clean(file) if file == "." { // Don't let them exclude everything, kind of silly. return false, nil } patterns, patDirs, _, err := CleanPatterns(patterns) if err != nil { return false, err } return OptimizedMatches(file, patterns, patDirs) } // OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go. // It will assume that the inputs have been preprocessed and therefore the function // doesn't need to do as much error checking and clean-up. This was done to avoid // repeating these steps on each file being checked during the archive process. // The more generic fileutils.Matches() can't make these assumptions. func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) { matched := false parentPath := filepath.Dir(file) parentPathDirs := strings.Split(parentPath, "/") for i, pattern := range patterns { negative := false if exclusion(pattern) { negative = true pattern = pattern[1:] } match, err := regexpMatch(pattern, file) if err != nil { return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err) } if !match && parentPath != "." { // Check to see if the pattern matches one of our parent dirs. if len(patDirs[i]) <= len(parentPathDirs) { match, _ = regexpMatch(strings.Join(patDirs[i], "/"), strings.Join(parentPathDirs[:len(patDirs[i])], "/")) } } if match { matched = !negative } } if matched { logrus.Debugf("Skipping excluded path: %s", file) } return matched, nil } // regexpMatch tries to match the logic of filepath.Match but // does so using regexp logic. We do this so that we can expand the // wildcard set to include other things, like "**" to mean any number // of directories. This means that we should be backwards compatible // with filepath.Match(). We'll end up supporting more stuff, due to // the fact that we're using regexp, but that's ok - it does no harm. func regexpMatch(pattern, path string) (bool, error) { regStr := "^" // Do some syntax checking on the pattern. // filepath's Match() has some really weird rules that are inconsistent // so instead of trying to dup their logic, just call Match() for its // error state and if there is an error in the pattern return it. // If this becomes an issue we can remove this since its really only // needed in the error (syntax) case - which isn't really critical. if _, err := filepath.Match(pattern, path); err != nil { return false, err } // Go through the pattern and convert it to a regexp. // We use a scanner so we can support utf-8 chars. var scan scanner.Scanner scan.Init(strings.NewReader(pattern)) sl := string(os.PathSeparator) escSL := sl if sl == `\` { escSL += `\` } for scan.Peek() != scanner.EOF { ch := scan.Next() if ch == '*' { if scan.Peek() == '*' { // is some flavor of "**" scan.Next() if scan.Peek() == scanner.EOF { // is "**EOF" - to align with .gitignore just accept all regStr += ".*" } else { // is "**" regStr += "((.*" + escSL + ")|([^" + escSL + "]*))" } // Treat **/ as ** so eat the "/" if string(scan.Peek()) == sl { scan.Next() } } else { // is "*" so map it to anything but "/" regStr += "[^" + escSL + "]*" } } else if ch == '?' { // "?" is any char except "/" regStr += "[^" + escSL + "]" } else if strings.Index(".$", string(ch)) != -1 { // Escape some regexp special chars that have no meaning // in golang's filepath.Match regStr += `\` + string(ch) } else if ch == '\\' { // escape next char. Note that a trailing \ in the pattern // will be left alone (but need to escape it) if sl == `\` { // On windows map "\" to "\\", meaning an escaped backslash, // and then just continue because filepath.Match on // Windows doesn't allow escaping at all regStr += escSL continue } if scan.Peek() != scanner.EOF { regStr += `\` + string(scan.Next()) } else { regStr += `\` } } else { regStr += string(ch) } } regStr += "$" res, err := regexp.MatchString(regStr, path) // Map regexp's error to filepath's so no one knows we're not using filepath if err != nil { err = filepath.ErrBadPattern } return res, err } // CopyFile copies from src to dst until either EOF is reached // on src or an error occurs. It verifies src exists and remove // the dst if it exists. func CopyFile(src, dst string) (int64, error) { cleanSrc := filepath.Clean(src) cleanDst := filepath.Clean(dst) if cleanSrc == cleanDst { return 0, nil } sf, err := os.Open(cleanSrc) if err != nil { return 0, err } defer sf.Close() if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { return 0, err } df, err := os.Create(cleanDst) if err != nil { return 0, err } defer df.Close() return io.Copy(df, sf) } // ReadSymlinkedDirectory returns the target directory of a symlink. // The target of the symbolic link may not be a file. func ReadSymlinkedDirectory(path string) (string, error) { var realPath string var err error if realPath, err = filepath.Abs(path); err != nil { return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) } if realPath, err = filepath.EvalSymlinks(realPath); err != nil { return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) } realPathInfo, err := os.Stat(realPath) if err != nil { return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) } if !realPathInfo.Mode().IsDir() { return "", fmt.Errorf("canonical path points to a file '%s'", realPath) } return realPath, nil } // CreateIfNotExists creates a file or a directory only if it does not already exist. func CreateIfNotExists(path string, isDir bool) error { if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { if isDir { return os.MkdirAll(path, 0755) } if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { return err } f, err := os.OpenFile(path, os.O_CREATE, 0755) if err != nil { return err } f.Close() } } return nil } docker-1.10.3/pkg/fileutils/fileutils_test.go000066400000000000000000000400551267010174400212200ustar00rootroot00000000000000package fileutils import ( "io/ioutil" "os" "path" "path/filepath" "runtime" "strings" "testing" ) // CopyFile with invalid src func TestCopyFileWithInvalidSrc(t *testing.T) { tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") defer os.RemoveAll(tempFolder) if err != nil { t.Fatal(err) } bytes, err := CopyFile("/invalid/file/path", path.Join(tempFolder, "dest")) if err == nil { t.Fatal("Should have fail to copy an invalid src file") } if bytes != 0 { t.Fatal("Should have written 0 bytes") } } // CopyFile with invalid dest func TestCopyFileWithInvalidDest(t *testing.T) { tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") defer os.RemoveAll(tempFolder) if err != nil { t.Fatal(err) } src := path.Join(tempFolder, "file") err = ioutil.WriteFile(src, []byte("content"), 0740) if err != nil { t.Fatal(err) } bytes, err := CopyFile(src, path.Join(tempFolder, "/invalid/dest/path")) if err == nil { t.Fatal("Should have fail to copy an invalid src file") } if bytes != 0 { t.Fatal("Should have written 0 bytes") } } // CopyFile with same src and dest func TestCopyFileWithSameSrcAndDest(t *testing.T) { tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") defer os.RemoveAll(tempFolder) if err != nil { t.Fatal(err) } file := path.Join(tempFolder, "file") err = ioutil.WriteFile(file, []byte("content"), 0740) if err != nil { t.Fatal(err) } bytes, err := CopyFile(file, file) if err != nil { t.Fatal(err) } if bytes != 0 { t.Fatal("Should have written 0 bytes as it is the same file.") } } // CopyFile with same src and dest but path is different and not clean func TestCopyFileWithSameSrcAndDestWithPathNameDifferent(t *testing.T) { tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") defer os.RemoveAll(tempFolder) if err != nil { t.Fatal(err) } testFolder := path.Join(tempFolder, "test") err = os.MkdirAll(testFolder, 0740) if err != nil { t.Fatal(err) } file := path.Join(testFolder, "file") sameFile := testFolder + "/../test/file" err = ioutil.WriteFile(file, []byte("content"), 0740) if err != nil { t.Fatal(err) } bytes, err := CopyFile(file, sameFile) if err != nil { t.Fatal(err) } if bytes != 0 { t.Fatal("Should have written 0 bytes as it is the same file.") } } func TestCopyFile(t *testing.T) { tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") defer os.RemoveAll(tempFolder) if err != nil { t.Fatal(err) } src := path.Join(tempFolder, "src") dest := path.Join(tempFolder, "dest") ioutil.WriteFile(src, []byte("content"), 0777) ioutil.WriteFile(dest, []byte("destContent"), 0777) bytes, err := CopyFile(src, dest) if err != nil { t.Fatal(err) } if bytes != 7 { t.Fatalf("Should have written %d bytes but wrote %d", 7, bytes) } actual, err := ioutil.ReadFile(dest) if err != nil { t.Fatal(err) } if string(actual) != "content" { t.Fatalf("Dest content was '%s', expected '%s'", string(actual), "content") } } // Reading a symlink to a directory must return the directory func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) { var err error if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil { t.Errorf("failed to create directory: %s", err) } if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil { t.Errorf("failed to create symlink: %s", err) } var path string if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil { t.Fatalf("failed to read symlink to directory: %s", err) } if path != "/tmp/testReadSymlinkToExistingDirectory" { t.Fatalf("symlink returned unexpected directory: %s", path) } if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil { t.Errorf("failed to remove temporary directory: %s", err) } if err = os.Remove("/tmp/dirLinkTest"); err != nil { t.Errorf("failed to remove symlink: %s", err) } } // Reading a non-existing symlink must fail func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) { var path string var err error if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil { t.Fatalf("error expected for non-existing symlink") } if path != "" { t.Fatalf("expected empty path, but '%s' was returned", path) } } // Reading a symlink to a file must fail func TestReadSymlinkedDirectoryToFile(t *testing.T) { var err error var file *os.File if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { t.Fatalf("failed to create file: %s", err) } file.Close() if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { t.Errorf("failed to create symlink: %s", err) } var path string if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") } if path != "" { t.Fatalf("path should've been empty: %s", path) } if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { t.Errorf("failed to remove file: %s", err) } if err = os.Remove("/tmp/fileLinkTest"); err != nil { t.Errorf("failed to remove symlink: %s", err) } } func TestWildcardMatches(t *testing.T) { match, _ := Matches("fileutils.go", []string{"*"}) if match != true { t.Errorf("failed to get a wildcard match, got %v", match) } } // A simple pattern match should return true. func TestPatternMatches(t *testing.T) { match, _ := Matches("fileutils.go", []string{"*.go"}) if match != true { t.Errorf("failed to get a match, got %v", match) } } // An exclusion followed by an inclusion should return true. func TestExclusionPatternMatchesPatternBefore(t *testing.T) { match, _ := Matches("fileutils.go", []string{"!fileutils.go", "*.go"}) if match != true { t.Errorf("failed to get true match on exclusion pattern, got %v", match) } } // A folder pattern followed by an exception should return false. func TestPatternMatchesFolderExclusions(t *testing.T) { match, _ := Matches("docs/README.md", []string{"docs", "!docs/README.md"}) if match != false { t.Errorf("failed to get a false match on exclusion pattern, got %v", match) } } // A folder pattern followed by an exception should return false. func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) { match, _ := Matches("docs/README.md", []string{"docs/", "!docs/README.md"}) if match != false { t.Errorf("failed to get a false match on exclusion pattern, got %v", match) } } // A folder pattern followed by an exception should return false. func TestPatternMatchesFolderWildcardExclusions(t *testing.T) { match, _ := Matches("docs/README.md", []string{"docs/*", "!docs/README.md"}) if match != false { t.Errorf("failed to get a false match on exclusion pattern, got %v", match) } } // A pattern followed by an exclusion should return false. func TestExclusionPatternMatchesPatternAfter(t *testing.T) { match, _ := Matches("fileutils.go", []string{"*.go", "!fileutils.go"}) if match != false { t.Errorf("failed to get false match on exclusion pattern, got %v", match) } } // A filename evaluating to . should return false. func TestExclusionPatternMatchesWholeDirectory(t *testing.T) { match, _ := Matches(".", []string{"*.go"}) if match != false { t.Errorf("failed to get false match on ., got %v", match) } } // A single ! pattern should return an error. func TestSingleExclamationError(t *testing.T) { _, err := Matches("fileutils.go", []string{"!"}) if err == nil { t.Errorf("failed to get an error for a single exclamation point, got %v", err) } } // A string preceded with a ! should return true from Exclusion. func TestExclusion(t *testing.T) { exclusion := exclusion("!") if !exclusion { t.Errorf("failed to get true for a single !, got %v", exclusion) } } // Matches with no patterns func TestMatchesWithNoPatterns(t *testing.T) { matches, err := Matches("/any/path/there", []string{}) if err != nil { t.Fatal(err) } if matches { t.Fatalf("Should not have match anything") } } // Matches with malformed patterns func TestMatchesWithMalformedPatterns(t *testing.T) { matches, err := Matches("/any/path/there", []string{"["}) if err == nil { t.Fatal("Should have failed because of a malformed syntax in the pattern") } if matches { t.Fatalf("Should not have match anything") } } // Test lots of variants of patterns & strings func TestMatches(t *testing.T) { tests := []struct { pattern string text string pass bool }{ {"**", "file", true}, {"**", "file/", true}, {"**/", "file", true}, // weird one {"**/", "file/", true}, {"**", "/", true}, {"**/", "/", true}, {"**", "dir/file", true}, {"**/", "dir/file", false}, {"**", "dir/file/", true}, {"**/", "dir/file/", true}, {"**/**", "dir/file", true}, {"**/**", "dir/file/", true}, {"dir/**", "dir/file", true}, {"dir/**", "dir/file/", true}, {"dir/**", "dir/dir2/file", true}, {"dir/**", "dir/dir2/file/", true}, {"**/dir2/*", "dir/dir2/file", true}, {"**/dir2/*", "dir/dir2/file/", false}, {"**/dir2/**", "dir/dir2/dir3/file", true}, {"**/dir2/**", "dir/dir2/dir3/file/", true}, {"**file", "file", true}, {"**file", "dir/file", true}, {"**/file", "dir/file", true}, {"**file", "dir/dir/file", true}, {"**/file", "dir/dir/file", true}, {"**/file*", "dir/dir/file", true}, {"**/file*", "dir/dir/file.txt", true}, {"**/file*txt", "dir/dir/file.txt", true}, {"**/file*.txt", "dir/dir/file.txt", true}, {"**/file*.txt*", "dir/dir/file.txt", true}, {"**/**/*.txt", "dir/dir/file.txt", true}, {"**/**/*.txt2", "dir/dir/file.txt", false}, {"**/*.txt", "file.txt", true}, {"**/**/*.txt", "file.txt", true}, {"a**/*.txt", "a/file.txt", true}, {"a**/*.txt", "a/dir/file.txt", true}, {"a**/*.txt", "a/dir/dir/file.txt", true}, {"a/*.txt", "a/dir/file.txt", false}, {"a/*.txt", "a/file.txt", true}, {"a/*.txt**", "a/file.txt", true}, {"a[b-d]e", "ae", false}, {"a[b-d]e", "ace", true}, {"a[b-d]e", "aae", false}, {"a[^b-d]e", "aze", true}, {".*", ".foo", true}, {".*", "foo", false}, {"abc.def", "abcdef", false}, {"abc.def", "abc.def", true}, {"abc.def", "abcZdef", false}, {"abc?def", "abcZdef", true}, {"abc?def", "abcdef", false}, {"a\\*b", "a*b", true}, {"a\\", "a", false}, {"a\\", "a\\", false}, {"a\\\\", "a\\", true}, {"**/foo/bar", "foo/bar", true}, {"**/foo/bar", "dir/foo/bar", true}, {"**/foo/bar", "dir/dir2/foo/bar", true}, {"abc/**", "abc", false}, {"abc/**", "abc/def", true}, {"abc/**", "abc/def/ghi", true}, } for _, test := range tests { res, _ := regexpMatch(test.pattern, test.text) if res != test.pass { t.Fatalf("Failed: %v - res:%v", test, res) } } } // An empty string should return true from Empty. func TestEmpty(t *testing.T) { empty := empty("") if !empty { t.Errorf("failed to get true for an empty string, got %v", empty) } } func TestCleanPatterns(t *testing.T) { cleaned, _, _, _ := CleanPatterns([]string{"docs", "config"}) if len(cleaned) != 2 { t.Errorf("expected 2 element slice, got %v", len(cleaned)) } } func TestCleanPatternsStripEmptyPatterns(t *testing.T) { cleaned, _, _, _ := CleanPatterns([]string{"docs", "config", ""}) if len(cleaned) != 2 { t.Errorf("expected 2 element slice, got %v", len(cleaned)) } } func TestCleanPatternsExceptionFlag(t *testing.T) { _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md"}) if !exceptions { t.Errorf("expected exceptions to be true, got %v", exceptions) } } func TestCleanPatternsLeadingSpaceTrimmed(t *testing.T) { _, _, exceptions, _ := CleanPatterns([]string{"docs", " !docs/README.md"}) if !exceptions { t.Errorf("expected exceptions to be true, got %v", exceptions) } } func TestCleanPatternsTrailingSpaceTrimmed(t *testing.T) { _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md "}) if !exceptions { t.Errorf("expected exceptions to be true, got %v", exceptions) } } func TestCleanPatternsErrorSingleException(t *testing.T) { _, _, _, err := CleanPatterns([]string{"!"}) if err == nil { t.Errorf("expected error on single exclamation point, got %v", err) } } func TestCleanPatternsFolderSplit(t *testing.T) { _, dirs, _, _ := CleanPatterns([]string{"docs/config/CONFIG.md"}) if dirs[0][0] != "docs" { t.Errorf("expected first element in dirs slice to be docs, got %v", dirs[0][1]) } if dirs[0][1] != "config" { t.Errorf("expected first element in dirs slice to be config, got %v", dirs[0][1]) } } func TestCreateIfNotExistsDir(t *testing.T) { tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempFolder) folderToCreate := filepath.Join(tempFolder, "tocreate") if err := CreateIfNotExists(folderToCreate, true); err != nil { t.Fatal(err) } fileinfo, err := os.Stat(folderToCreate) if err != nil { t.Fatalf("Should have create a folder, got %v", err) } if !fileinfo.IsDir() { t.Fatalf("Should have been a dir, seems it's not") } } func TestCreateIfNotExistsFile(t *testing.T) { tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempFolder) fileToCreate := filepath.Join(tempFolder, "file/to/create") if err := CreateIfNotExists(fileToCreate, false); err != nil { t.Fatal(err) } fileinfo, err := os.Stat(fileToCreate) if err != nil { t.Fatalf("Should have create a file, got %v", err) } if fileinfo.IsDir() { t.Fatalf("Should have been a file, seems it's not") } } // These matchTests are stolen from go's filepath Match tests. type matchTest struct { pattern, s string match bool err error } var matchTests = []matchTest{ {"abc", "abc", true, nil}, {"*", "abc", true, nil}, {"*c", "abc", true, nil}, {"a*", "a", true, nil}, {"a*", "abc", true, nil}, {"a*", "ab/c", false, nil}, {"a*/b", "abc/b", true, nil}, {"a*/b", "a/c/b", false, nil}, {"a*b*c*d*e*/f", "axbxcxdxe/f", true, nil}, {"a*b*c*d*e*/f", "axbxcxdxexxx/f", true, nil}, {"a*b*c*d*e*/f", "axbxcxdxe/xxx/f", false, nil}, {"a*b*c*d*e*/f", "axbxcxdxexxx/fff", false, nil}, {"a*b?c*x", "abxbbxdbxebxczzx", true, nil}, {"a*b?c*x", "abxbbxdbxebxczzy", false, nil}, {"ab[c]", "abc", true, nil}, {"ab[b-d]", "abc", true, nil}, {"ab[e-g]", "abc", false, nil}, {"ab[^c]", "abc", false, nil}, {"ab[^b-d]", "abc", false, nil}, {"ab[^e-g]", "abc", true, nil}, {"a\\*b", "a*b", true, nil}, {"a\\*b", "ab", false, nil}, {"a?b", "a☺b", true, nil}, {"a[^a]b", "a☺b", true, nil}, {"a???b", "a☺b", false, nil}, {"a[^a][^a][^a]b", "a☺b", false, nil}, {"[a-ζ]*", "α", true, nil}, {"*[a-ζ]", "A", false, nil}, {"a?b", "a/b", false, nil}, {"a*b", "a/b", false, nil}, {"[\\]a]", "]", true, nil}, {"[\\-]", "-", true, nil}, {"[x\\-]", "x", true, nil}, {"[x\\-]", "-", true, nil}, {"[x\\-]", "z", false, nil}, {"[\\-x]", "x", true, nil}, {"[\\-x]", "-", true, nil}, {"[\\-x]", "a", false, nil}, {"[]a]", "]", false, filepath.ErrBadPattern}, {"[-]", "-", false, filepath.ErrBadPattern}, {"[x-]", "x", false, filepath.ErrBadPattern}, {"[x-]", "-", false, filepath.ErrBadPattern}, {"[x-]", "z", false, filepath.ErrBadPattern}, {"[-x]", "x", false, filepath.ErrBadPattern}, {"[-x]", "-", false, filepath.ErrBadPattern}, {"[-x]", "a", false, filepath.ErrBadPattern}, {"\\", "a", false, filepath.ErrBadPattern}, {"[a-b-c]", "a", false, filepath.ErrBadPattern}, {"[", "a", false, filepath.ErrBadPattern}, {"[^", "a", false, filepath.ErrBadPattern}, {"[^bc", "a", false, filepath.ErrBadPattern}, {"a[", "a", false, filepath.ErrBadPattern}, // was nil but IMO its wrong {"a[", "ab", false, filepath.ErrBadPattern}, {"*x", "xxx", true, nil}, } func errp(e error) string { if e == nil { return "" } return e.Error() } // TestMatch test's our version of filepath.Match, called regexpMatch. func TestMatch(t *testing.T) { for _, tt := range matchTests { pattern := tt.pattern s := tt.s if runtime.GOOS == "windows" { if strings.Index(pattern, "\\") >= 0 { // no escape allowed on windows. continue } pattern = filepath.Clean(pattern) s = filepath.Clean(s) } ok, err := regexpMatch(pattern, s) if ok != tt.match || err != tt.err { t.Fatalf("Match(%#q, %#q) = %v, %q want %v, %q", pattern, s, ok, errp(err), tt.match, errp(tt.err)) } } } docker-1.10.3/pkg/fileutils/fileutils_unix.go000066400000000000000000000006711267010174400212240ustar00rootroot00000000000000// +build linux freebsd package fileutils import ( "fmt" "io/ioutil" "os" "github.com/Sirupsen/logrus" ) // GetTotalUsedFds Returns the number of used File Descriptors by // reading it via /proc filesystem. func GetTotalUsedFds() int { if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) } else { return len(fds) } return -1 } docker-1.10.3/pkg/fileutils/fileutils_windows.go000066400000000000000000000002321267010174400217240ustar00rootroot00000000000000package fileutils // GetTotalUsedFds Returns the number of used File Descriptors. Not supported // on Windows. func GetTotalUsedFds() int { return -1 } docker-1.10.3/pkg/gitutils/000077500000000000000000000000001267010174400154725ustar00rootroot00000000000000docker-1.10.3/pkg/gitutils/gitutils.go000066400000000000000000000046151267010174400176730ustar00rootroot00000000000000package gitutils import ( "fmt" "io/ioutil" "net/http" "net/url" "os" "os/exec" "path/filepath" "strings" "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/urlutil" ) // Clone clones a repository into a newly created directory which // will be under "docker-build-git" func Clone(remoteURL string) (string, error) { if !urlutil.IsGitTransport(remoteURL) { remoteURL = "https://" + remoteURL } root, err := ioutil.TempDir("", "docker-build-git") if err != nil { return "", err } u, err := url.Parse(remoteURL) if err != nil { return "", err } fragment := u.Fragment clone := cloneArgs(u, root) if output, err := git(clone...); err != nil { return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output) } return checkoutGit(fragment, root) } func cloneArgs(remoteURL *url.URL, root string) []string { args := []string{"clone", "--recursive"} shallow := len(remoteURL.Fragment) == 0 if shallow && strings.HasPrefix(remoteURL.Scheme, "http") { res, err := http.Head(fmt.Sprintf("%s/info/refs?service=git-upload-pack", remoteURL)) if err != nil || res.Header.Get("Content-Type") != "application/x-git-upload-pack-advertisement" { shallow = false } } if shallow { args = append(args, "--depth", "1") } if remoteURL.Fragment != "" { remoteURL.Fragment = "" } return append(args, remoteURL.String(), root) } func checkoutGit(fragment, root string) (string, error) { refAndDir := strings.SplitN(fragment, ":", 2) if len(refAndDir[0]) != 0 { if output, err := gitWithinDir(root, "checkout", refAndDir[0]); err != nil { return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output) } } if len(refAndDir) > 1 && len(refAndDir[1]) != 0 { newCtx, err := symlink.FollowSymlinkInScope(filepath.Join(root, refAndDir[1]), root) if err != nil { return "", fmt.Errorf("Error setting git context, %q not within git root: %s", refAndDir[1], err) } fi, err := os.Stat(newCtx) if err != nil { return "", err } if !fi.IsDir() { return "", fmt.Errorf("Error setting git context, not a directory: %s", newCtx) } root = newCtx } return root, nil } func gitWithinDir(dir string, args ...string) ([]byte, error) { a := []string{"--work-tree", dir, "--git-dir", filepath.Join(dir, ".git")} return git(append(a, args...)...) } func git(args ...string) ([]byte, error) { return exec.Command("git", args...).CombinedOutput() } docker-1.10.3/pkg/gitutils/gitutils_test.go000066400000000000000000000114161267010174400207270ustar00rootroot00000000000000package gitutils import ( "fmt" "io/ioutil" "net/http" "net/http/httptest" "net/url" "os" "path/filepath" "reflect" "testing" ) func TestCloneArgsSmartHttp(t *testing.T) { mux := http.NewServeMux() server := httptest.NewServer(mux) serverURL, _ := url.Parse(server.URL) serverURL.Path = "/repo.git" gitURL := serverURL.String() mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { q := r.URL.Query().Get("service") w.Header().Set("Content-Type", fmt.Sprintf("application/x-%s-advertisement", q)) }) args := cloneArgs(serverURL, "/tmp") exp := []string{"clone", "--recursive", "--depth", "1", gitURL, "/tmp"} if !reflect.DeepEqual(args, exp) { t.Fatalf("Expected %v, got %v", exp, args) } } func TestCloneArgsDumbHttp(t *testing.T) { mux := http.NewServeMux() server := httptest.NewServer(mux) serverURL, _ := url.Parse(server.URL) serverURL.Path = "/repo.git" gitURL := serverURL.String() mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/plain") }) args := cloneArgs(serverURL, "/tmp") exp := []string{"clone", "--recursive", gitURL, "/tmp"} if !reflect.DeepEqual(args, exp) { t.Fatalf("Expected %v, got %v", exp, args) } } func TestCloneArgsGit(t *testing.T) { u, _ := url.Parse("git://github.com/docker/docker") args := cloneArgs(u, "/tmp") exp := []string{"clone", "--recursive", "--depth", "1", "git://github.com/docker/docker", "/tmp"} if !reflect.DeepEqual(args, exp) { t.Fatalf("Expected %v, got %v", exp, args) } } func TestCloneArgsStripFragment(t *testing.T) { u, _ := url.Parse("git://github.com/docker/docker#test") args := cloneArgs(u, "/tmp") exp := []string{"clone", "--recursive", "git://github.com/docker/docker", "/tmp"} if !reflect.DeepEqual(args, exp) { t.Fatalf("Expected %v, got %v", exp, args) } } func TestCheckoutGit(t *testing.T) { root, err := ioutil.TempDir("", "docker-build-git-checkout") if err != nil { t.Fatal(err) } defer os.RemoveAll(root) gitDir := filepath.Join(root, "repo") _, err = git("init", gitDir) if err != nil { t.Fatal(err) } if _, err = gitWithinDir(gitDir, "config", "user.email", "test@docker.com"); err != nil { t.Fatal(err) } if _, err = gitWithinDir(gitDir, "config", "user.name", "Docker test"); err != nil { t.Fatal(err) } if err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch"), 0644); err != nil { t.Fatal(err) } subDir := filepath.Join(gitDir, "subdir") if err = os.Mkdir(subDir, 0755); err != nil { t.Fatal(err) } if err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 5000"), 0644); err != nil { t.Fatal(err) } if err = os.Symlink("../subdir", filepath.Join(gitDir, "parentlink")); err != nil { t.Fatal(err) } if err = os.Symlink("/subdir", filepath.Join(gitDir, "absolutelink")); err != nil { t.Fatal(err) } if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil { t.Fatal(err) } if _, err = gitWithinDir(gitDir, "commit", "-am", "First commit"); err != nil { t.Fatal(err) } if _, err = gitWithinDir(gitDir, "checkout", "-b", "test"); err != nil { t.Fatal(err) } if err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 3000"), 0644); err != nil { t.Fatal(err) } if err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM busybox\nEXPOSE 5000"), 0644); err != nil { t.Fatal(err) } if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil { t.Fatal(err) } if _, err = gitWithinDir(gitDir, "commit", "-am", "Branch commit"); err != nil { t.Fatal(err) } if _, err = gitWithinDir(gitDir, "checkout", "master"); err != nil { t.Fatal(err) } cases := []struct { frag string exp string fail bool }{ {"", "FROM scratch", false}, {"master", "FROM scratch", false}, {":subdir", "FROM scratch\nEXPOSE 5000", false}, {":nosubdir", "", true}, // missing directory error {":Dockerfile", "", true}, // not a directory error {"master:nosubdir", "", true}, {"master:subdir", "FROM scratch\nEXPOSE 5000", false}, {"master:parentlink", "FROM scratch\nEXPOSE 5000", false}, {"master:absolutelink", "FROM scratch\nEXPOSE 5000", false}, {"master:../subdir", "", true}, {"test", "FROM scratch\nEXPOSE 3000", false}, {"test:", "FROM scratch\nEXPOSE 3000", false}, {"test:subdir", "FROM busybox\nEXPOSE 5000", false}, } for _, c := range cases { r, err := checkoutGit(c.frag, gitDir) fail := err != nil if fail != c.fail { t.Fatalf("Expected %v failure, error was %v\n", c.fail, err) } if c.fail { continue } b, err := ioutil.ReadFile(filepath.Join(r, "Dockerfile")) if err != nil { t.Fatal(err) } if string(b) != c.exp { t.Fatalf("Expected %v, was %v\n", c.exp, string(b)) } } } docker-1.10.3/pkg/graphdb/000077500000000000000000000000001267010174400152355ustar00rootroot00000000000000docker-1.10.3/pkg/graphdb/conn_sqlite3.go000066400000000000000000000004231267010174400201640ustar00rootroot00000000000000// +build cgo package graphdb import "database/sql" // NewSqliteConn opens a connection to a sqlite // database. func NewSqliteConn(root string) (*Database, error) { conn, err := sql.Open("sqlite3", root) if err != nil { return nil, err } return NewDatabase(conn) } docker-1.10.3/pkg/graphdb/conn_sqlite3_unix.go000066400000000000000000000001511267010174400212250ustar00rootroot00000000000000// +build cgo,!windows package graphdb import ( _ "github.com/mattn/go-sqlite3" // registers sqlite ) docker-1.10.3/pkg/graphdb/conn_sqlite3_windows.go000066400000000000000000000001501267010174400217330ustar00rootroot00000000000000// +build cgo,windows package graphdb import ( _ "github.com/mattn/go-sqlite3" // registers sqlite ) docker-1.10.3/pkg/graphdb/conn_unsupported.go000066400000000000000000000002431267010174400211700ustar00rootroot00000000000000// +build !cgo package graphdb // NewSqliteConn return a new sqlite connection. func NewSqliteConn(root string) (*Database, error) { panic("Not implemented") } docker-1.10.3/pkg/graphdb/graphdb.go000066400000000000000000000275351267010174400172070ustar00rootroot00000000000000package graphdb import ( "database/sql" "fmt" "path" "strings" "sync" ) const ( createEntityTable = ` CREATE TABLE IF NOT EXISTS entity ( id text NOT NULL PRIMARY KEY );` createEdgeTable = ` CREATE TABLE IF NOT EXISTS edge ( "entity_id" text NOT NULL, "parent_id" text NULL, "name" text NOT NULL, CONSTRAINT "parent_fk" FOREIGN KEY ("parent_id") REFERENCES "entity" ("id"), CONSTRAINT "entity_fk" FOREIGN KEY ("entity_id") REFERENCES "entity" ("id") ); ` createEdgeIndices = ` CREATE UNIQUE INDEX IF NOT EXISTS "name_parent_ix" ON "edge" (parent_id, name); ` ) // Entity with a unique id. type Entity struct { id string } // An Edge connects two entities together. type Edge struct { EntityID string Name string ParentID string } // Entities stores the list of entities. type Entities map[string]*Entity // Edges stores the relationships between entities. type Edges []*Edge // WalkFunc is a function invoked to process an individual entity. type WalkFunc func(fullPath string, entity *Entity) error // Database is a graph database for storing entities and their relationships. type Database struct { conn *sql.DB mux sync.RWMutex } // IsNonUniqueNameError processes the error to check if it's caused by // a constraint violation. // This is necessary because the error isn't the same across various // sqlite versions. func IsNonUniqueNameError(err error) bool { str := err.Error() // sqlite 3.7.17-1ubuntu1 returns: // Set failure: Abort due to constraint violation: columns parent_id, name are not unique if strings.HasSuffix(str, "name are not unique") { return true } // sqlite-3.8.3-1.fc20 returns: // Set failure: Abort due to constraint violation: UNIQUE constraint failed: edge.parent_id, edge.name if strings.Contains(str, "UNIQUE constraint failed") && strings.Contains(str, "edge.name") { return true } // sqlite-3.6.20-1.el6 returns: // Set failure: Abort due to constraint violation: constraint failed if strings.HasSuffix(str, "constraint failed") { return true } return false } // NewDatabase creates a new graph database initialized with a root entity. func NewDatabase(conn *sql.DB) (*Database, error) { if conn == nil { return nil, fmt.Errorf("Database connection cannot be nil") } db := &Database{conn: conn} // Create root entities tx, err := conn.Begin() if err != nil { return nil, err } if _, err := tx.Exec(createEntityTable); err != nil { return nil, err } if _, err := tx.Exec(createEdgeTable); err != nil { return nil, err } if _, err := tx.Exec(createEdgeIndices); err != nil { return nil, err } if _, err := tx.Exec("DELETE FROM entity where id = ?", "0"); err != nil { tx.Rollback() return nil, err } if _, err := tx.Exec("INSERT INTO entity (id) VALUES (?);", "0"); err != nil { tx.Rollback() return nil, err } if _, err := tx.Exec("DELETE FROM edge where entity_id=? and name=?", "0", "/"); err != nil { tx.Rollback() return nil, err } if _, err := tx.Exec("INSERT INTO edge (entity_id, name) VALUES(?,?);", "0", "/"); err != nil { tx.Rollback() return nil, err } if err := tx.Commit(); err != nil { return nil, err } return db, nil } // Close the underlying connection to the database. func (db *Database) Close() error { return db.conn.Close() } // Set the entity id for a given path. func (db *Database) Set(fullPath, id string) (*Entity, error) { db.mux.Lock() defer db.mux.Unlock() tx, err := db.conn.Begin() if err != nil { return nil, err } var entityID string if err := tx.QueryRow("SELECT id FROM entity WHERE id = ?;", id).Scan(&entityID); err != nil { if err == sql.ErrNoRows { if _, err := tx.Exec("INSERT INTO entity (id) VALUES(?);", id); err != nil { tx.Rollback() return nil, err } } else { tx.Rollback() return nil, err } } e := &Entity{id} parentPath, name := splitPath(fullPath) if err := db.setEdge(parentPath, name, e, tx); err != nil { tx.Rollback() return nil, err } if err := tx.Commit(); err != nil { return nil, err } return e, nil } // Exists returns true if a name already exists in the database. func (db *Database) Exists(name string) bool { db.mux.RLock() defer db.mux.RUnlock() e, err := db.get(name) if err != nil { return false } return e != nil } func (db *Database) setEdge(parentPath, name string, e *Entity, tx *sql.Tx) error { parent, err := db.get(parentPath) if err != nil { return err } if parent.id == e.id { return fmt.Errorf("Cannot set self as child") } if _, err := tx.Exec("INSERT INTO edge (parent_id, name, entity_id) VALUES (?,?,?);", parent.id, name, e.id); err != nil { return err } return nil } // RootEntity returns the root "/" entity for the database. func (db *Database) RootEntity() *Entity { return &Entity{ id: "0", } } // Get returns the entity for a given path. func (db *Database) Get(name string) *Entity { db.mux.RLock() defer db.mux.RUnlock() e, err := db.get(name) if err != nil { return nil } return e } func (db *Database) get(name string) (*Entity, error) { e := db.RootEntity() // We always know the root name so return it if // it is requested if name == "/" { return e, nil } parts := split(name) for i := 1; i < len(parts); i++ { p := parts[i] if p == "" { continue } next := db.child(e, p) if next == nil { return nil, fmt.Errorf("Cannot find child for %s", name) } e = next } return e, nil } // List all entities by from the name. // The key will be the full path of the entity. func (db *Database) List(name string, depth int) Entities { db.mux.RLock() defer db.mux.RUnlock() out := Entities{} e, err := db.get(name) if err != nil { return out } children, err := db.children(e, name, depth, nil) if err != nil { return out } for _, c := range children { out[c.FullPath] = c.Entity } return out } // Walk through the child graph of an entity, calling walkFunc for each child entity. // It is safe for walkFunc to call graph functions. func (db *Database) Walk(name string, walkFunc WalkFunc, depth int) error { children, err := db.Children(name, depth) if err != nil { return err } // Note: the database lock must not be held while calling walkFunc for _, c := range children { if err := walkFunc(c.FullPath, c.Entity); err != nil { return err } } return nil } // Children returns the children of the specified entity. func (db *Database) Children(name string, depth int) ([]WalkMeta, error) { db.mux.RLock() defer db.mux.RUnlock() e, err := db.get(name) if err != nil { return nil, err } return db.children(e, name, depth, nil) } // Parents returns the parents of a specified entity. func (db *Database) Parents(name string) ([]string, error) { db.mux.RLock() defer db.mux.RUnlock() e, err := db.get(name) if err != nil { return nil, err } return db.parents(e) } // Refs returns the reference count for a specified id. func (db *Database) Refs(id string) int { db.mux.RLock() defer db.mux.RUnlock() var count int if err := db.conn.QueryRow("SELECT COUNT(*) FROM edge WHERE entity_id = ?;", id).Scan(&count); err != nil { return 0 } return count } // RefPaths returns all the id's path references. func (db *Database) RefPaths(id string) Edges { db.mux.RLock() defer db.mux.RUnlock() refs := Edges{} rows, err := db.conn.Query("SELECT name, parent_id FROM edge WHERE entity_id = ?;", id) if err != nil { return refs } defer rows.Close() for rows.Next() { var name string var parentID string if err := rows.Scan(&name, &parentID); err != nil { return refs } refs = append(refs, &Edge{ EntityID: id, Name: name, ParentID: parentID, }) } return refs } // Delete the reference to an entity at a given path. func (db *Database) Delete(name string) error { db.mux.Lock() defer db.mux.Unlock() if name == "/" { return fmt.Errorf("Cannot delete root entity") } parentPath, n := splitPath(name) parent, err := db.get(parentPath) if err != nil { return err } if _, err := db.conn.Exec("DELETE FROM edge WHERE parent_id = ? AND name = ?;", parent.id, n); err != nil { return err } return nil } // Purge removes the entity with the specified id // Walk the graph to make sure all references to the entity // are removed and return the number of references removed func (db *Database) Purge(id string) (int, error) { db.mux.Lock() defer db.mux.Unlock() tx, err := db.conn.Begin() if err != nil { return -1, err } // Delete all edges rows, err := tx.Exec("DELETE FROM edge WHERE entity_id = ?;", id) if err != nil { tx.Rollback() return -1, err } changes, err := rows.RowsAffected() if err != nil { return -1, err } // Clear who's using this id as parent refs, err := tx.Exec("DELETE FROM edge WHERE parent_id = ?;", id) if err != nil { tx.Rollback() return -1, err } refsCount, err := refs.RowsAffected() if err != nil { return -1, err } // Delete entity if _, err := tx.Exec("DELETE FROM entity where id = ?;", id); err != nil { tx.Rollback() return -1, err } if err := tx.Commit(); err != nil { return -1, err } return int(changes + refsCount), nil } // Rename an edge for a given path func (db *Database) Rename(currentName, newName string) error { db.mux.Lock() defer db.mux.Unlock() parentPath, name := splitPath(currentName) newParentPath, newEdgeName := splitPath(newName) if parentPath != newParentPath { return fmt.Errorf("Cannot rename when root paths do not match %s != %s", parentPath, newParentPath) } parent, err := db.get(parentPath) if err != nil { return err } rows, err := db.conn.Exec("UPDATE edge SET name = ? WHERE parent_id = ? AND name = ?;", newEdgeName, parent.id, name) if err != nil { return err } i, err := rows.RowsAffected() if err != nil { return err } if i == 0 { return fmt.Errorf("Cannot locate edge for %s %s", parent.id, name) } return nil } // WalkMeta stores the walk metadata. type WalkMeta struct { Parent *Entity Entity *Entity FullPath string Edge *Edge } func (db *Database) children(e *Entity, name string, depth int, entities []WalkMeta) ([]WalkMeta, error) { if e == nil { return entities, nil } rows, err := db.conn.Query("SELECT entity_id, name FROM edge where parent_id = ?;", e.id) if err != nil { return nil, err } defer rows.Close() for rows.Next() { var entityID, entityName string if err := rows.Scan(&entityID, &entityName); err != nil { return nil, err } child := &Entity{entityID} edge := &Edge{ ParentID: e.id, Name: entityName, EntityID: child.id, } meta := WalkMeta{ Parent: e, Entity: child, FullPath: path.Join(name, edge.Name), Edge: edge, } entities = append(entities, meta) if depth != 0 { nDepth := depth if depth != -1 { nDepth-- } entities, err = db.children(child, meta.FullPath, nDepth, entities) if err != nil { return nil, err } } } return entities, nil } func (db *Database) parents(e *Entity) (parents []string, err error) { if e == nil { return parents, nil } rows, err := db.conn.Query("SELECT parent_id FROM edge where entity_id = ?;", e.id) if err != nil { return nil, err } defer rows.Close() for rows.Next() { var parentID string if err := rows.Scan(&parentID); err != nil { return nil, err } parents = append(parents, parentID) } return parents, nil } // Return the entity based on the parent path and name. func (db *Database) child(parent *Entity, name string) *Entity { var id string if err := db.conn.QueryRow("SELECT entity_id FROM edge WHERE parent_id = ? AND name = ?;", parent.id, name).Scan(&id); err != nil { return nil } return &Entity{id} } // ID returns the id used to reference this entity. func (e *Entity) ID() string { return e.id } // Paths returns the paths sorted by depth. func (e Entities) Paths() []string { out := make([]string, len(e)) var i int for k := range e { out[i] = k i++ } sortByDepth(out) return out } docker-1.10.3/pkg/graphdb/graphdb_test.go000066400000000000000000000310101267010174400202250ustar00rootroot00000000000000package graphdb import ( "database/sql" "fmt" "os" "path" "strconv" "testing" _ "github.com/mattn/go-sqlite3" ) func newTestDb(t *testing.T) (*Database, string) { p := path.Join(os.TempDir(), "sqlite.db") conn, err := sql.Open("sqlite3", p) db, err := NewDatabase(conn) if err != nil { t.Fatal(err) } return db, p } func destroyTestDb(dbPath string) { os.Remove(dbPath) } func TestNewDatabase(t *testing.T) { db, dbpath := newTestDb(t) if db == nil { t.Fatal("Database should not be nil") } db.Close() defer destroyTestDb(dbpath) } func TestCreateRootEntity(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) root := db.RootEntity() if root == nil { t.Fatal("Root entity should not be nil") } } func TestGetRootEntity(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) e := db.Get("/") if e == nil { t.Fatal("Entity should not be nil") } if e.ID() != "0" { t.Fatalf("Entity id should be 0, got %s", e.ID()) } } func TestSetEntityWithDifferentName(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) db.Set("/test", "1") if _, err := db.Set("/other", "1"); err != nil { t.Fatal(err) } } func TestSetDuplicateEntity(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) if _, err := db.Set("/foo", "42"); err != nil { t.Fatal(err) } if _, err := db.Set("/foo", "43"); err == nil { t.Fatalf("Creating an entry with a duplicate path did not cause an error") } } func TestCreateChild(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) child, err := db.Set("/db", "1") if err != nil { t.Fatal(err) } if child == nil { t.Fatal("Child should not be nil") } if child.ID() != "1" { t.Fail() } } func TestParents(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) for i := 1; i < 6; i++ { a := strconv.Itoa(i) if _, err := db.Set("/"+a, a); err != nil { t.Fatal(err) } } for i := 6; i < 11; i++ { a := strconv.Itoa(i) p := strconv.Itoa(i - 5) key := fmt.Sprintf("/%s/%s", p, a) if _, err := db.Set(key, a); err != nil { t.Fatal(err) } parents, err := db.Parents(key) if err != nil { t.Fatal(err) } if len(parents) != 1 { t.Fatalf("Expected 1 entry for %s got %d", key, len(parents)) } if parents[0] != p { t.Fatalf("ID %s received, %s expected", parents[0], p) } } } func TestChildren(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) str := "/" for i := 1; i < 6; i++ { a := strconv.Itoa(i) if _, err := db.Set(str+a, a); err != nil { t.Fatal(err) } str = str + a + "/" } str = "/" for i := 10; i < 30; i++ { // 20 entities a := strconv.Itoa(i) if _, err := db.Set(str+a, a); err != nil { t.Fatal(err) } str = str + a + "/" } entries, err := db.Children("/", 5) if err != nil { t.Fatal(err) } if len(entries) != 11 { t.Fatalf("Expect 11 entries for / got %d", len(entries)) } entries, err = db.Children("/", 20) if err != nil { t.Fatal(err) } if len(entries) != 25 { t.Fatalf("Expect 25 entries for / got %d", len(entries)) } } func TestListAllRootChildren(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) for i := 1; i < 6; i++ { a := strconv.Itoa(i) if _, err := db.Set("/"+a, a); err != nil { t.Fatal(err) } } entries := db.List("/", -1) if len(entries) != 5 { t.Fatalf("Expect 5 entries for / got %d", len(entries)) } } func TestListAllSubChildren(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) _, err := db.Set("/webapp", "1") if err != nil { t.Fatal(err) } child2, err := db.Set("/db", "2") if err != nil { t.Fatal(err) } child4, err := db.Set("/logs", "4") if err != nil { t.Fatal(err) } if _, err := db.Set("/db/logs", child4.ID()); err != nil { t.Fatal(err) } child3, err := db.Set("/sentry", "3") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/db", child2.ID()); err != nil { t.Fatal(err) } entries := db.List("/webapp", 1) if len(entries) != 3 { t.Fatalf("Expect 3 entries for / got %d", len(entries)) } entries = db.List("/webapp", 0) if len(entries) != 2 { t.Fatalf("Expect 2 entries for / got %d", len(entries)) } } func TestAddSelfAsChild(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) child, err := db.Set("/test", "1") if err != nil { t.Fatal(err) } if _, err := db.Set("/test/other", child.ID()); err == nil { t.Fatal("Error should not be nil") } } func TestAddChildToNonExistentRoot(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) if _, err := db.Set("/myapp", "1"); err != nil { t.Fatal(err) } if _, err := db.Set("/myapp/proxy/db", "2"); err == nil { t.Fatal("Error should not be nil") } } func TestWalkAll(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) _, err := db.Set("/webapp", "1") if err != nil { t.Fatal(err) } child2, err := db.Set("/db", "2") if err != nil { t.Fatal(err) } child4, err := db.Set("/db/logs", "4") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/logs", child4.ID()); err != nil { t.Fatal(err) } child3, err := db.Set("/sentry", "3") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/db", child2.ID()); err != nil { t.Fatal(err) } child5, err := db.Set("/gograph", "5") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { t.Fatal(err) } if err := db.Walk("/", func(p string, e *Entity) error { t.Logf("Path: %s Entity: %s", p, e.ID()) return nil }, -1); err != nil { t.Fatal(err) } } func TestGetEntityByPath(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) _, err := db.Set("/webapp", "1") if err != nil { t.Fatal(err) } child2, err := db.Set("/db", "2") if err != nil { t.Fatal(err) } child4, err := db.Set("/logs", "4") if err != nil { t.Fatal(err) } if _, err := db.Set("/db/logs", child4.ID()); err != nil { t.Fatal(err) } child3, err := db.Set("/sentry", "3") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/db", child2.ID()); err != nil { t.Fatal(err) } child5, err := db.Set("/gograph", "5") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { t.Fatal(err) } entity := db.Get("/webapp/db/logs") if entity == nil { t.Fatal("Entity should not be nil") } if entity.ID() != "4" { t.Fatalf("Expected to get entity with id 4, got %s", entity.ID()) } } func TestEnitiesPaths(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) _, err := db.Set("/webapp", "1") if err != nil { t.Fatal(err) } child2, err := db.Set("/db", "2") if err != nil { t.Fatal(err) } child4, err := db.Set("/logs", "4") if err != nil { t.Fatal(err) } if _, err := db.Set("/db/logs", child4.ID()); err != nil { t.Fatal(err) } child3, err := db.Set("/sentry", "3") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/db", child2.ID()); err != nil { t.Fatal(err) } child5, err := db.Set("/gograph", "5") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { t.Fatal(err) } out := db.List("/", -1) for _, p := range out.Paths() { t.Log(p) } } func TestDeleteRootEntity(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) if err := db.Delete("/"); err == nil { t.Fatal("Error should not be nil") } } func TestDeleteEntity(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) _, err := db.Set("/webapp", "1") if err != nil { t.Fatal(err) } child2, err := db.Set("/db", "2") if err != nil { t.Fatal(err) } child4, err := db.Set("/logs", "4") if err != nil { t.Fatal(err) } if _, err := db.Set("/db/logs", child4.ID()); err != nil { t.Fatal(err) } child3, err := db.Set("/sentry", "3") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/db", child2.ID()); err != nil { t.Fatal(err) } child5, err := db.Set("/gograph", "5") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { t.Fatal(err) } if err := db.Delete("/webapp/sentry"); err != nil { t.Fatal(err) } entity := db.Get("/webapp/sentry") if entity != nil { t.Fatal("Entity /webapp/sentry should be nil") } } func TestCountRefs(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) db.Set("/webapp", "1") if db.Refs("1") != 1 { t.Fatal("Expect reference count to be 1") } db.Set("/db", "2") db.Set("/webapp/db", "2") if db.Refs("2") != 2 { t.Fatal("Expect reference count to be 2") } } func TestPurgeId(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) db.Set("/webapp", "1") if c := db.Refs("1"); c != 1 { t.Fatalf("Expect reference count to be 1, got %d", c) } db.Set("/db", "2") db.Set("/webapp/db", "2") count, err := db.Purge("2") if err != nil { t.Fatal(err) } if count != 2 { t.Fatalf("Expected 2 references to be removed, got %d", count) } } // Regression test https://github.com/docker/docker/issues/12334 func TestPurgeIdRefPaths(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) db.Set("/webapp", "1") db.Set("/db", "2") db.Set("/db/webapp", "1") if c := db.Refs("1"); c != 2 { t.Fatalf("Expected 2 reference for webapp, got %d", c) } if c := db.Refs("2"); c != 1 { t.Fatalf("Expected 1 reference for db, got %d", c) } if rp := db.RefPaths("2"); len(rp) != 1 { t.Fatalf("Expected 1 reference path for db, got %d", len(rp)) } count, err := db.Purge("2") if err != nil { t.Fatal(err) } if count != 2 { t.Fatalf("Expected 2 rows to be removed, got %d", count) } if c := db.Refs("2"); c != 0 { t.Fatalf("Expected 0 reference for db, got %d", c) } if c := db.Refs("1"); c != 1 { t.Fatalf("Expected 1 reference for webapp, got %d", c) } } func TestRename(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) db.Set("/webapp", "1") if db.Refs("1") != 1 { t.Fatal("Expect reference count to be 1") } db.Set("/db", "2") db.Set("/webapp/db", "2") if db.Get("/webapp/db") == nil { t.Fatal("Cannot find entity at path /webapp/db") } if err := db.Rename("/webapp/db", "/webapp/newdb"); err != nil { t.Fatal(err) } if db.Get("/webapp/db") != nil { t.Fatal("Entity should not exist at /webapp/db") } if db.Get("/webapp/newdb") == nil { t.Fatal("Cannot find entity at path /webapp/newdb") } } func TestCreateMultipleNames(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) db.Set("/db", "1") if _, err := db.Set("/myapp", "1"); err != nil { t.Fatal(err) } db.Walk("/", func(p string, e *Entity) error { t.Logf("%s\n", p) return nil }, -1) } func TestRefPaths(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) db.Set("/webapp", "1") db.Set("/db", "2") db.Set("/webapp/db", "2") refs := db.RefPaths("2") if len(refs) != 2 { t.Fatalf("Expected reference count to be 2, got %d", len(refs)) } } func TestExistsTrue(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) db.Set("/testing", "1") if !db.Exists("/testing") { t.Fatalf("/tesing should exist") } } func TestExistsFalse(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) db.Set("/toerhe", "1") if db.Exists("/testing") { t.Fatalf("/tesing should not exist") } } func TestGetNameWithTrailingSlash(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) db.Set("/todo", "1") e := db.Get("/todo/") if e == nil { t.Fatalf("Entity should not be nil") } } func TestConcurrentWrites(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) errs := make(chan error, 2) save := func(name string, id string) { if _, err := db.Set(fmt.Sprintf("/%s", name), id); err != nil { errs <- err } errs <- nil } purge := func(id string) { if _, err := db.Purge(id); err != nil { errs <- err } errs <- nil } save("/1", "1") go purge("1") go save("/2", "2") any := false for i := 0; i < 2; i++ { if err := <-errs; err != nil { any = true t.Log(err) } } if any { t.Fail() } } docker-1.10.3/pkg/graphdb/sort.go000066400000000000000000000007361267010174400165610ustar00rootroot00000000000000package graphdb import "sort" type pathSorter struct { paths []string by func(i, j string) bool } func sortByDepth(paths []string) { s := &pathSorter{paths, func(i, j string) bool { return PathDepth(i) > PathDepth(j) }} sort.Sort(s) } func (s *pathSorter) Len() int { return len(s.paths) } func (s *pathSorter) Swap(i, j int) { s.paths[i], s.paths[j] = s.paths[j], s.paths[i] } func (s *pathSorter) Less(i, j int) bool { return s.by(s.paths[i], s.paths[j]) } docker-1.10.3/pkg/graphdb/sort_test.go000066400000000000000000000007411267010174400176140ustar00rootroot00000000000000package graphdb import ( "testing" ) func TestSort(t *testing.T) { paths := []string{ "/", "/myreallylongname", "/app/db", } sortByDepth(paths) if len(paths) != 3 { t.Fatalf("Expected 3 parts got %d", len(paths)) } if paths[0] != "/app/db" { t.Fatalf("Expected /app/db got %s", paths[0]) } if paths[1] != "/myreallylongname" { t.Fatalf("Expected /myreallylongname got %s", paths[1]) } if paths[2] != "/" { t.Fatalf("Expected / got %s", paths[2]) } } docker-1.10.3/pkg/graphdb/utils.go000066400000000000000000000007771267010174400167370ustar00rootroot00000000000000package graphdb import ( "path" "strings" ) // Split p on / func split(p string) []string { return strings.Split(p, "/") } // PathDepth returns the depth or number of / in a given path func PathDepth(p string) int { parts := split(p) if len(parts) == 2 && parts[1] == "" { return 1 } return len(parts) } func splitPath(p string) (parent, name string) { if p[0] != '/' { p = "/" + p } parent, name = path.Split(p) l := len(parent) if parent[l-1] == '/' { parent = parent[:l-1] } return } docker-1.10.3/pkg/homedir/000077500000000000000000000000001267010174400152555ustar00rootroot00000000000000docker-1.10.3/pkg/homedir/homedir.go000066400000000000000000000017061267010174400172370ustar00rootroot00000000000000package homedir import ( "os" "runtime" "github.com/opencontainers/runc/libcontainer/user" ) // Key returns the env var name for the user's home dir based on // the platform being run on func Key() string { if runtime.GOOS == "windows" { return "USERPROFILE" } return "HOME" } // Get returns the home directory of the current user with the help of // environment variables depending on the target operating system. // Returned path should be used with "path/filepath" to form new paths. func Get() string { home := os.Getenv(Key()) if home == "" && runtime.GOOS != "windows" { if u, err := user.CurrentUser(); err == nil { return u.Home } } return home } // GetShortcutString returns the string that is shortcut to user's home directory // in the native shell of the platform running on. func GetShortcutString() string { if runtime.GOOS == "windows" { return "%USERPROFILE%" // be careful while using in format functions } return "~" } docker-1.10.3/pkg/homedir/homedir_test.go000066400000000000000000000006251267010174400202750ustar00rootroot00000000000000package homedir import ( "path/filepath" "testing" ) func TestGet(t *testing.T) { home := Get() if home == "" { t.Fatal("returned home directory is empty") } if !filepath.IsAbs(home) { t.Fatalf("returned path is not absolute: %s", home) } } func TestGetShortcutString(t *testing.T) { shortcut := GetShortcutString() if shortcut == "" { t.Fatal("returned shortcut string is empty") } } docker-1.10.3/pkg/httputils/000077500000000000000000000000001267010174400156665ustar00rootroot00000000000000docker-1.10.3/pkg/httputils/httputils.go000066400000000000000000000026201267010174400202550ustar00rootroot00000000000000package httputils import ( "errors" "fmt" "net/http" "regexp" "strings" "github.com/docker/docker/pkg/jsonmessage" ) var ( headerRegexp = regexp.MustCompile(`^(?:(.+)/(.+?))\((.+)\).*$`) errInvalidHeader = errors.New("Bad header, should be in format `docker/version (platform)`") ) // Download requests a given URL and returns an io.Reader. func Download(url string) (resp *http.Response, err error) { if resp, err = http.Get(url); err != nil { return nil, err } if resp.StatusCode >= 400 { return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status) } return resp, nil } // NewHTTPRequestError returns a JSON response error. func NewHTTPRequestError(msg string, res *http.Response) error { return &jsonmessage.JSONError{ Message: msg, Code: res.StatusCode, } } // ServerHeader contains the server information. type ServerHeader struct { App string // docker Ver string // 1.8.0-dev OS string // windows or linux } // ParseServerHeader extracts pieces from an HTTP server header // which is in the format "docker/version (os)" eg docker/1.8.0-dev (windows). func ParseServerHeader(hdr string) (*ServerHeader, error) { matches := headerRegexp.FindStringSubmatch(hdr) if len(matches) != 4 { return nil, errInvalidHeader } return &ServerHeader{ App: strings.TrimSpace(matches[1]), Ver: strings.TrimSpace(matches[2]), OS: strings.TrimSpace(matches[3]), }, nil } docker-1.10.3/pkg/httputils/httputils_test.go000066400000000000000000000070551267010174400213230ustar00rootroot00000000000000package httputils import ( "fmt" "io/ioutil" "net/http" "net/http/httptest" "strings" "testing" ) func TestDownload(t *testing.T) { expected := "Hello, docker !" ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, expected) })) defer ts.Close() response, err := Download(ts.URL) if err != nil { t.Fatal(err) } actual, err := ioutil.ReadAll(response.Body) response.Body.Close() if err != nil || string(actual) != expected { t.Fatalf("Expected the response %q, got err:%v, response:%v, actual:%s", expected, err, response, string(actual)) } } func TestDownload400Errors(t *testing.T) { expectedError := "Got HTTP status code >= 400: 403 Forbidden" ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // 403 http.Error(w, "something failed (forbidden)", http.StatusForbidden) })) defer ts.Close() // Expected status code = 403 if _, err := Download(ts.URL); err == nil || err.Error() != expectedError { t.Fatalf("Expected the the error %q, got %v", expectedError, err) } } func TestDownloadOtherErrors(t *testing.T) { if _, err := Download("I'm not an url.."); err == nil || !strings.Contains(err.Error(), "unsupported protocol scheme") { t.Fatalf("Expected an error with 'unsupported protocol scheme', got %v", err) } } func TestNewHTTPRequestError(t *testing.T) { errorMessage := "Some error message" ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // 403 http.Error(w, errorMessage, http.StatusForbidden) })) defer ts.Close() httpResponse, err := http.Get(ts.URL) if err != nil { t.Fatal(err) } if err := NewHTTPRequestError(errorMessage, httpResponse); err.Error() != errorMessage { t.Fatalf("Expected err to be %q, got %v", errorMessage, err) } } func TestParseServerHeader(t *testing.T) { inputs := map[string][]string{ "bad header": {"error"}, "(bad header)": {"error"}, "(without/spaces)": {"error"}, "(header/with spaces)": {"error"}, "foo/bar (baz)": {"foo", "bar", "baz"}, "foo/bar": {"error"}, "foo": {"error"}, "foo/bar (baz space)": {"foo", "bar", "baz space"}, " f f / b b ( b s ) ": {"f f", "b b", "b s"}, "foo/bar (baz) ignore": {"foo", "bar", "baz"}, "foo/bar ()": {"error"}, "foo/bar()": {"error"}, "foo/bar(baz)": {"foo", "bar", "baz"}, "foo/bar/zzz(baz)": {"foo/bar", "zzz", "baz"}, "foo/bar(baz/abc)": {"foo", "bar", "baz/abc"}, "foo/bar(baz (abc))": {"foo", "bar", "baz (abc)"}, } for header, values := range inputs { serverHeader, err := ParseServerHeader(header) if err != nil { if err != errInvalidHeader { t.Fatalf("Failed to parse %q, and got some unexpected error: %q", header, err) } if values[0] == "error" { continue } t.Fatalf("Header %q failed to parse when it shouldn't have", header) } if values[0] == "error" { t.Fatalf("Header %q parsed ok when it should have failed(%q).", header, serverHeader) } if serverHeader.App != values[0] { t.Fatalf("Expected serverHeader.App for %q to equal %q, got %q", header, values[0], serverHeader.App) } if serverHeader.Ver != values[1] { t.Fatalf("Expected serverHeader.Ver for %q to equal %q, got %q", header, values[1], serverHeader.Ver) } if serverHeader.OS != values[2] { t.Fatalf("Expected serverHeader.OS for %q to equal %q, got %q", header, values[2], serverHeader.OS) } } } docker-1.10.3/pkg/httputils/mimetype.go000066400000000000000000000015451267010174400200530ustar00rootroot00000000000000package httputils import ( "mime" "net/http" ) // MimeTypes stores the MIME content type. var MimeTypes = struct { TextPlain string Tar string OctetStream string }{"text/plain", "application/tar", "application/octet-stream"} // DetectContentType returns a best guess representation of the MIME // content type for the bytes at c. The value detected by // http.DetectContentType is guaranteed not be nil, defaulting to // application/octet-stream when a better guess cannot be made. The // result of this detection is then run through mime.ParseMediaType() // which separates the actual MIME string from any parameters. func DetectContentType(c []byte) (string, map[string]string, error) { ct := http.DetectContentType(c) contentType, args, err := mime.ParseMediaType(ct) if err != nil { return "", nil, err } return contentType, args, nil } docker-1.10.3/pkg/httputils/mimetype_test.go000066400000000000000000000004251267010174400211060ustar00rootroot00000000000000package httputils import ( "testing" ) func TestDetectContentType(t *testing.T) { input := []byte("That is just a plain text") if contentType, _, err := DetectContentType(input); err != nil || contentType != "text/plain" { t.Errorf("TestDetectContentType failed") } } docker-1.10.3/pkg/httputils/resumablerequestreader.go000066400000000000000000000056431267010174400230000ustar00rootroot00000000000000package httputils import ( "fmt" "io" "net/http" "time" "github.com/Sirupsen/logrus" ) type resumableRequestReader struct { client *http.Client request *http.Request lastRange int64 totalSize int64 currentResponse *http.Response failures uint32 maxFailures uint32 } // ResumableRequestReader makes it possible to resume reading a request's body transparently // maxfail is the number of times we retry to make requests again (not resumes) // totalsize is the total length of the body; auto detect if not provided func ResumableRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser { return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize} } // ResumableRequestReaderWithInitialResponse makes it possible to resume // reading the body of an already initiated request. func ResumableRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser { return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse} } func (r *resumableRequestReader) Read(p []byte) (n int, err error) { if r.client == nil || r.request == nil { return 0, fmt.Errorf("client and request can't be nil\n") } isFreshRequest := false if r.lastRange != 0 && r.currentResponse == nil { readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize) r.request.Header.Set("Range", readRange) time.Sleep(5 * time.Second) } if r.currentResponse == nil { r.currentResponse, err = r.client.Do(r.request) isFreshRequest = true } if err != nil && r.failures+1 != r.maxFailures { r.cleanUpResponse() r.failures++ time.Sleep(5 * time.Duration(r.failures) * time.Second) return 0, nil } else if err != nil { r.cleanUpResponse() return 0, err } if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 { r.cleanUpResponse() return 0, io.EOF } else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest { r.cleanUpResponse() return 0, fmt.Errorf("the server doesn't support byte ranges") } if r.totalSize == 0 { r.totalSize = r.currentResponse.ContentLength } else if r.totalSize <= 0 { r.cleanUpResponse() return 0, fmt.Errorf("failed to auto detect content length") } n, err = r.currentResponse.Body.Read(p) r.lastRange += int64(n) if err != nil { r.cleanUpResponse() } if err != nil && err != io.EOF { logrus.Infof("encountered error during pull and clearing it before resume: %s", err) err = nil } return n, err } func (r *resumableRequestReader) Close() error { r.cleanUpResponse() r.client = nil r.request = nil return nil } func (r *resumableRequestReader) cleanUpResponse() { if r.currentResponse != nil { r.currentResponse.Body.Close() r.currentResponse = nil } } docker-1.10.3/pkg/httputils/resumablerequestreader_test.go000066400000000000000000000153201267010174400240300ustar00rootroot00000000000000package httputils import ( "fmt" "io" "io/ioutil" "net/http" "net/http/httptest" "strings" "testing" ) func TestResumableRequestHeaderSimpleErrors(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fmt.Fprintln(w, "Hello, world !") })) defer ts.Close() client := &http.Client{} var req *http.Request req, err := http.NewRequest("GET", ts.URL, nil) if err != nil { t.Fatal(err) } expectedError := "client and request can't be nil\n" resreq := &resumableRequestReader{} _, err = resreq.Read([]byte{}) if err == nil || err.Error() != expectedError { t.Fatalf("Expected an error with '%s', got %v.", expectedError, err) } resreq = &resumableRequestReader{ client: client, request: req, totalSize: -1, } expectedError = "failed to auto detect content length" _, err = resreq.Read([]byte{}) if err == nil || err.Error() != expectedError { t.Fatalf("Expected an error with '%s', got %v.", expectedError, err) } } // Not too much failures, bails out after some wait func TestResumableRequestHeaderNotTooMuchFailures(t *testing.T) { client := &http.Client{} var badReq *http.Request badReq, err := http.NewRequest("GET", "I'm not an url", nil) if err != nil { t.Fatal(err) } resreq := &resumableRequestReader{ client: client, request: badReq, failures: 0, maxFailures: 2, } read, err := resreq.Read([]byte{}) if err != nil || read != 0 { t.Fatalf("Expected no error and no byte read, got err:%v, read:%v.", err, read) } } // Too much failures, returns the error func TestResumableRequestHeaderTooMuchFailures(t *testing.T) { client := &http.Client{} var badReq *http.Request badReq, err := http.NewRequest("GET", "I'm not an url", nil) if err != nil { t.Fatal(err) } resreq := &resumableRequestReader{ client: client, request: badReq, failures: 0, maxFailures: 1, } defer resreq.Close() expectedError := `Get I%27m%20not%20an%20url: unsupported protocol scheme ""` read, err := resreq.Read([]byte{}) if err == nil || err.Error() != expectedError || read != 0 { t.Fatalf("Expected the error '%s', got err:%v, read:%v.", expectedError, err, read) } } type errorReaderCloser struct{} func (errorReaderCloser) Close() error { return nil } func (errorReaderCloser) Read(p []byte) (n int, err error) { return 0, fmt.Errorf("A error occured") } // If a an unknown error is encountered, return 0, nil and log it func TestResumableRequestReaderWithReadError(t *testing.T) { var req *http.Request req, err := http.NewRequest("GET", "", nil) if err != nil { t.Fatal(err) } client := &http.Client{} response := &http.Response{ Status: "500 Internal Server", StatusCode: 500, ContentLength: 0, Close: true, Body: errorReaderCloser{}, } resreq := &resumableRequestReader{ client: client, request: req, currentResponse: response, lastRange: 1, totalSize: 1, } defer resreq.Close() buf := make([]byte, 1) read, err := resreq.Read(buf) if err != nil { t.Fatal(err) } if read != 0 { t.Fatalf("Expected to have read nothing, but read %v", read) } } func TestResumableRequestReaderWithEOFWith416Response(t *testing.T) { var req *http.Request req, err := http.NewRequest("GET", "", nil) if err != nil { t.Fatal(err) } client := &http.Client{} response := &http.Response{ Status: "416 Requested Range Not Satisfiable", StatusCode: 416, ContentLength: 0, Close: true, Body: ioutil.NopCloser(strings.NewReader("")), } resreq := &resumableRequestReader{ client: client, request: req, currentResponse: response, lastRange: 1, totalSize: 1, } defer resreq.Close() buf := make([]byte, 1) _, err = resreq.Read(buf) if err == nil || err != io.EOF { t.Fatalf("Expected an io.EOF error, got %v", err) } } func TestResumableRequestReaderWithServerDoesntSupportByteRanges(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Header.Get("Range") == "" { t.Fatalf("Expected a Range HTTP header, got nothing") } })) defer ts.Close() var req *http.Request req, err := http.NewRequest("GET", ts.URL, nil) if err != nil { t.Fatal(err) } client := &http.Client{} resreq := &resumableRequestReader{ client: client, request: req, lastRange: 1, } defer resreq.Close() buf := make([]byte, 2) _, err = resreq.Read(buf) if err == nil || err.Error() != "the server doesn't support byte ranges" { t.Fatalf("Expected an error 'the server doesn't support byte ranges', got %v", err) } } func TestResumableRequestReaderWithZeroTotalSize(t *testing.T) { srvtxt := "some response text data" ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fmt.Fprintln(w, srvtxt) })) defer ts.Close() var req *http.Request req, err := http.NewRequest("GET", ts.URL, nil) if err != nil { t.Fatal(err) } client := &http.Client{} retries := uint32(5) resreq := ResumableRequestReader(client, req, retries, 0) defer resreq.Close() data, err := ioutil.ReadAll(resreq) if err != nil { t.Fatal(err) } resstr := strings.TrimSuffix(string(data), "\n") if resstr != srvtxt { t.Errorf("resstr != srvtxt") } } func TestResumableRequestReader(t *testing.T) { srvtxt := "some response text data" ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fmt.Fprintln(w, srvtxt) })) defer ts.Close() var req *http.Request req, err := http.NewRequest("GET", ts.URL, nil) if err != nil { t.Fatal(err) } client := &http.Client{} retries := uint32(5) imgSize := int64(len(srvtxt)) resreq := ResumableRequestReader(client, req, retries, imgSize) defer resreq.Close() data, err := ioutil.ReadAll(resreq) if err != nil { t.Fatal(err) } resstr := strings.TrimSuffix(string(data), "\n") if resstr != srvtxt { t.Errorf("resstr != srvtxt") } } func TestResumableRequestReaderWithInitialResponse(t *testing.T) { srvtxt := "some response text data" ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fmt.Fprintln(w, srvtxt) })) defer ts.Close() var req *http.Request req, err := http.NewRequest("GET", ts.URL, nil) if err != nil { t.Fatal(err) } client := &http.Client{} retries := uint32(5) imgSize := int64(len(srvtxt)) res, err := client.Do(req) if err != nil { t.Fatal(err) } resreq := ResumableRequestReaderWithInitialResponse(client, req, retries, imgSize, res) defer resreq.Close() data, err := ioutil.ReadAll(resreq) if err != nil { t.Fatal(err) } resstr := strings.TrimSuffix(string(data), "\n") if resstr != srvtxt { t.Errorf("resstr != srvtxt") } } docker-1.10.3/pkg/idtools/000077500000000000000000000000001267010174400153035ustar00rootroot00000000000000docker-1.10.3/pkg/idtools/idtools.go000066400000000000000000000134061267010174400173130ustar00rootroot00000000000000package idtools import ( "bufio" "fmt" "os" "sort" "strconv" "strings" ) // IDMap contains a single entry for user namespace range remapping. An array // of IDMap entries represents the structure that will be provided to the Linux // kernel for creating a user namespace. type IDMap struct { ContainerID int `json:"container_id"` HostID int `json:"host_id"` Size int `json:"size"` } type subIDRange struct { Start int Length int } type ranges []subIDRange func (e ranges) Len() int { return len(e) } func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } const ( subuidFileName string = "/etc/subuid" subgidFileName string = "/etc/subgid" ) // MkdirAllAs creates a directory (include any along the path) and then modifies // ownership to the requested uid/gid. If the directory already exists, this // function will still change ownership to the requested uid/gid pair. func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { return mkdirAs(path, mode, ownerUID, ownerGID, true, true) } // MkdirAllNewAs creates a directory (include any along the path) and then modifies // ownership ONLY of newly created directories to the requested uid/gid. If the // directories along the path exist, no change of ownership will be performed func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { return mkdirAs(path, mode, ownerUID, ownerGID, true, false) } // MkdirAs creates a directory and then modifies ownership to the requested uid/gid. // If the directory already exists, this function still changes ownership func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { return mkdirAs(path, mode, ownerUID, ownerGID, false, true) } // GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. // If the maps are empty, then the root uid/gid will default to "real" 0/0 func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { var uid, gid int if uidMap != nil { xUID, err := ToHost(0, uidMap) if err != nil { return -1, -1, err } uid = xUID } if gidMap != nil { xGID, err := ToHost(0, gidMap) if err != nil { return -1, -1, err } gid = xGID } return uid, gid, nil } // ToContainer takes an id mapping, and uses it to translate a // host ID to the remapped ID. If no map is provided, then the translation // assumes a 1-to-1 mapping and returns the passed in id func ToContainer(hostID int, idMap []IDMap) (int, error) { if idMap == nil { return hostID, nil } for _, m := range idMap { if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { contID := m.ContainerID + (hostID - m.HostID) return contID, nil } } return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) } // ToHost takes an id mapping and a remapped ID, and translates the // ID to the mapped host ID. If no map is provided, then the translation // assumes a 1-to-1 mapping and returns the passed in id # func ToHost(contID int, idMap []IDMap) (int, error) { if idMap == nil { return contID, nil } for _, m := range idMap { if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { hostID := m.HostID + (contID - m.ContainerID) return hostID, nil } } return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) } // CreateIDMappings takes a requested user and group name and // using the data from /etc/sub{uid,gid} ranges, creates the // proper uid and gid remapping ranges for that user/group pair func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) { subuidRanges, err := parseSubuid(username) if err != nil { return nil, nil, err } subgidRanges, err := parseSubgid(groupname) if err != nil { return nil, nil, err } if len(subuidRanges) == 0 { return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username) } if len(subgidRanges) == 0 { return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname) } return createIDMap(subuidRanges), createIDMap(subgidRanges), nil } func createIDMap(subidRanges ranges) []IDMap { idMap := []IDMap{} // sort the ranges by lowest ID first sort.Sort(subidRanges) containerID := 0 for _, idrange := range subidRanges { idMap = append(idMap, IDMap{ ContainerID: containerID, HostID: idrange.Start, Size: idrange.Length, }) containerID = containerID + idrange.Length } return idMap } func parseSubuid(username string) (ranges, error) { return parseSubidFile(subuidFileName, username) } func parseSubgid(username string) (ranges, error) { return parseSubidFile(subgidFileName, username) } func parseSubidFile(path, username string) (ranges, error) { var rangeList ranges subidFile, err := os.Open(path) if err != nil { return rangeList, err } defer subidFile.Close() s := bufio.NewScanner(subidFile) for s.Scan() { if err := s.Err(); err != nil { return rangeList, err } text := strings.TrimSpace(s.Text()) if text == "" || strings.HasPrefix(text, "#") { continue } parts := strings.Split(text, ":") if len(parts) != 3 { return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) } if parts[0] == username { // return the first entry for a user; ignores potential for multiple ranges per user startid, err := strconv.Atoi(parts[1]) if err != nil { return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) } length, err := strconv.Atoi(parts[2]) if err != nil { return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) } rangeList = append(rangeList, subIDRange{startid, length}) } } return rangeList, nil } docker-1.10.3/pkg/idtools/idtools_unix.go000066400000000000000000000033741267010174400203610ustar00rootroot00000000000000// +build !windows package idtools import ( "os" "path/filepath" "github.com/docker/docker/pkg/system" ) func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { // make an array containing the original path asked for, plus (for mkAll == true) // all path components leading up to the complete path that don't exist before we MkdirAll // so that we can chown all of them properly at the end. If chownExisting is false, we won't // chown the full directory path if it exists var paths []string if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { paths = []string{path} } else if err == nil && chownExisting { if err := os.Chown(path, ownerUID, ownerGID); err != nil { return err } // short-circuit--we were called with an existing directory and chown was requested return nil } else if err == nil { // nothing to do; directory path fully exists already and chown was NOT requested return nil } if mkAll { // walk back to "/" looking for directories which do not exist // and add them to the paths array for chown after creation dirPath := path for { dirPath = filepath.Dir(dirPath) if dirPath == "/" { break } if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { paths = append(paths, dirPath) } } if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { return err } } else { if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { return err } } // even if it existed, we will chown the requested path + any subpaths that // didn't exist when we called MkdirAll for _, pathComponent := range paths { if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil { return err } } return nil } docker-1.10.3/pkg/idtools/idtools_unix_test.go000066400000000000000000000161741267010174400214220ustar00rootroot00000000000000// +build !windows package idtools import ( "fmt" "io/ioutil" "os" "path/filepath" "syscall" "testing" ) type node struct { uid int gid int } func TestMkdirAllAs(t *testing.T) { dirName, err := ioutil.TempDir("", "mkdirall") if err != nil { t.Fatalf("Couldn't create temp dir: %v", err) } defer os.RemoveAll(dirName) testTree := map[string]node{ "usr": {0, 0}, "usr/bin": {0, 0}, "lib": {33, 33}, "lib/x86_64": {45, 45}, "lib/x86_64/share": {1, 1}, } if err := buildTree(dirName, testTree); err != nil { t.Fatal(err) } // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid if err := MkdirAllAs(filepath.Join(dirName, "usr", "share"), 0755, 99, 99); err != nil { t.Fatal(err) } testTree["usr/share"] = node{99, 99} verifyTree, err := readTree(dirName, "") if err != nil { t.Fatal(err) } if err := compareTrees(testTree, verifyTree); err != nil { t.Fatal(err) } // test 2-deep new directories--both should be owned by the uid/gid pair if err := MkdirAllAs(filepath.Join(dirName, "lib", "some", "other"), 0755, 101, 101); err != nil { t.Fatal(err) } testTree["lib/some"] = node{101, 101} testTree["lib/some/other"] = node{101, 101} verifyTree, err = readTree(dirName, "") if err != nil { t.Fatal(err) } if err := compareTrees(testTree, verifyTree); err != nil { t.Fatal(err) } // test a directory that already exists; should be chowned, but nothing else if err := MkdirAllAs(filepath.Join(dirName, "usr"), 0755, 102, 102); err != nil { t.Fatal(err) } testTree["usr"] = node{102, 102} verifyTree, err = readTree(dirName, "") if err != nil { t.Fatal(err) } if err := compareTrees(testTree, verifyTree); err != nil { t.Fatal(err) } } func TestMkdirAllNewAs(t *testing.T) { dirName, err := ioutil.TempDir("", "mkdirnew") if err != nil { t.Fatalf("Couldn't create temp dir: %v", err) } defer os.RemoveAll(dirName) testTree := map[string]node{ "usr": {0, 0}, "usr/bin": {0, 0}, "lib": {33, 33}, "lib/x86_64": {45, 45}, "lib/x86_64/share": {1, 1}, } if err := buildTree(dirName, testTree); err != nil { t.Fatal(err) } // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid if err := MkdirAllNewAs(filepath.Join(dirName, "usr", "share"), 0755, 99, 99); err != nil { t.Fatal(err) } testTree["usr/share"] = node{99, 99} verifyTree, err := readTree(dirName, "") if err != nil { t.Fatal(err) } if err := compareTrees(testTree, verifyTree); err != nil { t.Fatal(err) } // test 2-deep new directories--both should be owned by the uid/gid pair if err := MkdirAllNewAs(filepath.Join(dirName, "lib", "some", "other"), 0755, 101, 101); err != nil { t.Fatal(err) } testTree["lib/some"] = node{101, 101} testTree["lib/some/other"] = node{101, 101} verifyTree, err = readTree(dirName, "") if err != nil { t.Fatal(err) } if err := compareTrees(testTree, verifyTree); err != nil { t.Fatal(err) } // test a directory that already exists; should NOT be chowned if err := MkdirAllNewAs(filepath.Join(dirName, "usr"), 0755, 102, 102); err != nil { t.Fatal(err) } verifyTree, err = readTree(dirName, "") if err != nil { t.Fatal(err) } if err := compareTrees(testTree, verifyTree); err != nil { t.Fatal(err) } } func TestMkdirAs(t *testing.T) { dirName, err := ioutil.TempDir("", "mkdir") if err != nil { t.Fatalf("Couldn't create temp dir: %v", err) } defer os.RemoveAll(dirName) testTree := map[string]node{ "usr": {0, 0}, } if err := buildTree(dirName, testTree); err != nil { t.Fatal(err) } // test a directory that already exists; should just chown to the requested uid/gid if err := MkdirAs(filepath.Join(dirName, "usr"), 0755, 99, 99); err != nil { t.Fatal(err) } testTree["usr"] = node{99, 99} verifyTree, err := readTree(dirName, "") if err != nil { t.Fatal(err) } if err := compareTrees(testTree, verifyTree); err != nil { t.Fatal(err) } // create a subdir under a dir which doesn't exist--should fail if err := MkdirAs(filepath.Join(dirName, "usr", "bin", "subdir"), 0755, 102, 102); err == nil { t.Fatalf("Trying to create a directory with Mkdir where the parent doesn't exist should have failed") } // create a subdir under an existing dir; should only change the ownership of the new subdir if err := MkdirAs(filepath.Join(dirName, "usr", "bin"), 0755, 102, 102); err != nil { t.Fatal(err) } testTree["usr/bin"] = node{102, 102} verifyTree, err = readTree(dirName, "") if err != nil { t.Fatal(err) } if err := compareTrees(testTree, verifyTree); err != nil { t.Fatal(err) } } func buildTree(base string, tree map[string]node) error { for path, node := range tree { fullPath := filepath.Join(base, path) if err := os.MkdirAll(fullPath, 0755); err != nil { return fmt.Errorf("Couldn't create path: %s; error: %v", fullPath, err) } if err := os.Chown(fullPath, node.uid, node.gid); err != nil { return fmt.Errorf("Couldn't chown path: %s; error: %v", fullPath, err) } } return nil } func readTree(base, root string) (map[string]node, error) { tree := make(map[string]node) dirInfos, err := ioutil.ReadDir(base) if err != nil { return nil, fmt.Errorf("Couldn't read directory entries for %q: %v", base, err) } for _, info := range dirInfos { s := &syscall.Stat_t{} if err := syscall.Stat(filepath.Join(base, info.Name()), s); err != nil { return nil, fmt.Errorf("Can't stat file %q: %v", filepath.Join(base, info.Name()), err) } tree[filepath.Join(root, info.Name())] = node{int(s.Uid), int(s.Gid)} if info.IsDir() { // read the subdirectory subtree, err := readTree(filepath.Join(base, info.Name()), filepath.Join(root, info.Name())) if err != nil { return nil, err } for path, nodeinfo := range subtree { tree[path] = nodeinfo } } } return tree, nil } func compareTrees(left, right map[string]node) error { if len(left) != len(right) { return fmt.Errorf("Trees aren't the same size") } for path, nodeLeft := range left { if nodeRight, ok := right[path]; ok { if nodeRight.uid != nodeLeft.uid || nodeRight.gid != nodeLeft.gid { // mismatch return fmt.Errorf("mismatched ownership for %q: expected: %d:%d, got: %d:%d", path, nodeLeft.uid, nodeLeft.gid, nodeRight.uid, nodeRight.gid) } continue } return fmt.Errorf("right tree didn't contain path %q", path) } return nil } func TestParseSubidFileWithNewlinesAndComments(t *testing.T) { tmpDir, err := ioutil.TempDir("", "parsesubid") if err != nil { t.Fatal(err) } fnamePath := filepath.Join(tmpDir, "testsubuid") fcontent := `tss:100000:65536 # empty default subuid/subgid file dockremap:231072:65536` if err := ioutil.WriteFile(fnamePath, []byte(fcontent), 0644); err != nil { t.Fatal(err) } ranges, err := parseSubidFile(fnamePath, "dockremap") if err != nil { t.Fatal(err) } if len(ranges) != 1 { t.Fatalf("wanted 1 element in ranges, got %d instead", len(ranges)) } if ranges[0].Start != 231072 { t.Fatalf("wanted 231072, got %d instead", ranges[0].Start) } if ranges[0].Length != 65536 { t.Fatalf("wanted 65536, got %d instead", ranges[0].Length) } } docker-1.10.3/pkg/idtools/idtools_windows.go000066400000000000000000000006441267010174400210650ustar00rootroot00000000000000// +build windows package idtools import ( "os" "github.com/docker/docker/pkg/system" ) // Platforms such as Windows do not support the UID/GID concept. So make this // just a wrapper around system.MkdirAll. func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { return err } return nil } docker-1.10.3/pkg/idtools/usergroupadd_linux.go000066400000000000000000000112261267010174400215570ustar00rootroot00000000000000package idtools import ( "fmt" "os/exec" "path/filepath" "strings" "syscall" ) // add a user and/or group to Linux /etc/passwd, /etc/group using standard // Linux distribution commands: // adduser --uid --shell /bin/login --no-create-home --disabled-login --ingroup // useradd -M -u -s /bin/nologin -N -g // addgroup --gid // groupadd -g const baseUID int = 10000 const baseGID int = 10000 const idMAX int = 65534 var ( userCommand string groupCommand string cmdTemplates = map[string]string{ "adduser": "--uid %d --shell /bin/false --no-create-home --disabled-login --ingroup %s %s", "useradd": "-M -u %d -s /bin/false -N -g %s %s", "addgroup": "--gid %d %s", "groupadd": "-g %d %s", } ) func init() { // set up which commands are used for adding users/groups dependent on distro if _, err := resolveBinary("adduser"); err == nil { userCommand = "adduser" } else if _, err := resolveBinary("useradd"); err == nil { userCommand = "useradd" } if _, err := resolveBinary("addgroup"); err == nil { groupCommand = "addgroup" } else if _, err := resolveBinary("groupadd"); err == nil { groupCommand = "groupadd" } } func resolveBinary(binname string) (string, error) { binaryPath, err := exec.LookPath(binname) if err != nil { return "", err } resolvedPath, err := filepath.EvalSymlinks(binaryPath) if err != nil { return "", err } //only return no error if the final resolved binary basename //matches what was searched for if filepath.Base(resolvedPath) == binname { return resolvedPath, nil } return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) } // AddNamespaceRangesUser takes a name and finds an unused uid, gid pair // and calls the appropriate helper function to add the group and then // the user to the group in /etc/group and /etc/passwd respectively. // This new user's /etc/sub{uid,gid} ranges will be used for user namespace // mapping ranges in containers. func AddNamespaceRangesUser(name string) (int, int, error) { // Find unused uid, gid pair uid, err := findUnusedUID(baseUID) if err != nil { return -1, -1, fmt.Errorf("Unable to find unused UID: %v", err) } gid, err := findUnusedGID(baseGID) if err != nil { return -1, -1, fmt.Errorf("Unable to find unused GID: %v", err) } // First add the group that we will use if err := addGroup(name, gid); err != nil { return -1, -1, fmt.Errorf("Error adding group %q: %v", name, err) } // Add the user as a member of the group if err := addUser(name, uid, name); err != nil { return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) } return uid, gid, nil } func addUser(userName string, uid int, groupName string) error { if userCommand == "" { return fmt.Errorf("Cannot add user; no useradd/adduser binary found") } args := fmt.Sprintf(cmdTemplates[userCommand], uid, groupName, userName) return execAddCmd(userCommand, args) } func addGroup(groupName string, gid int) error { if groupCommand == "" { return fmt.Errorf("Cannot add group; no groupadd/addgroup binary found") } args := fmt.Sprintf(cmdTemplates[groupCommand], gid, groupName) // only error out if the error isn't that the group already exists // if the group exists then our needs are already met if err := execAddCmd(groupCommand, args); err != nil && !strings.Contains(err.Error(), "already exists") { return err } return nil } func execAddCmd(cmd, args string) error { execCmd := exec.Command(cmd, strings.Split(args, " ")...) out, err := execCmd.CombinedOutput() if err != nil { return fmt.Errorf("Failed to add user/group with error: %v; output: %q", err, string(out)) } return nil } func findUnusedUID(startUID int) (int, error) { return findUnused("passwd", startUID) } func findUnusedGID(startGID int) (int, error) { return findUnused("group", startGID) } func findUnused(file string, id int) (int, error) { for { cmdStr := fmt.Sprintf("cat /etc/%s | cut -d: -f3 | grep '^%d$'", file, id) cmd := exec.Command("sh", "-c", cmdStr) if err := cmd.Run(); err != nil { // if a non-zero return code occurs, then we know the ID was not found // and is usable if exiterr, ok := err.(*exec.ExitError); ok { // The program has exited with an exit code != 0 if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { if status.ExitStatus() == 1 { //no match, we can use this ID return id, nil } } } return -1, fmt.Errorf("Error looking in /etc/%s for unused ID: %v", file, err) } id++ if id > idMAX { return -1, fmt.Errorf("Maximum id in %q reached with finding unused numeric ID", file) } } } docker-1.10.3/pkg/idtools/usergroupadd_unsupported.go000066400000000000000000000006241267010174400230100ustar00rootroot00000000000000// +build !linux package idtools import "fmt" // AddNamespaceRangesUser takes a name and finds an unused uid, gid pair // and calls the appropriate helper function to add the group and then // the user to the group in /etc/group and /etc/passwd respectively. func AddNamespaceRangesUser(name string) (int, int, error) { return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") } docker-1.10.3/pkg/integration/000077500000000000000000000000001267010174400161515ustar00rootroot00000000000000docker-1.10.3/pkg/integration/checker/000077500000000000000000000000001267010174400175555ustar00rootroot00000000000000docker-1.10.3/pkg/integration/checker/checker.go000066400000000000000000000030141267010174400215060ustar00rootroot00000000000000// Package checker provide Docker specific implementations of the go-check.Checker interface. package checker import ( "github.com/go-check/check" "github.com/vdemeester/shakers" ) // As a commodity, we bring all check.Checker variables into the current namespace to avoid having // to think about check.X versus checker.X. var ( DeepEquals = check.DeepEquals ErrorMatches = check.ErrorMatches FitsTypeOf = check.FitsTypeOf HasLen = check.HasLen Implements = check.Implements IsNil = check.IsNil Matches = check.Matches Not = check.Not NotNil = check.NotNil PanicMatches = check.PanicMatches Panics = check.Panics Contains = shakers.Contains ContainsAny = shakers.ContainsAny Count = shakers.Count Equals = shakers.Equals EqualFold = shakers.EqualFold False = shakers.False GreaterOrEqualThan = shakers.GreaterOrEqualThan GreaterThan = shakers.GreaterThan HasPrefix = shakers.HasPrefix HasSuffix = shakers.HasSuffix Index = shakers.Index IndexAny = shakers.IndexAny IsAfter = shakers.IsAfter IsBefore = shakers.IsBefore IsBetween = shakers.IsBetween IsLower = shakers.IsLower IsUpper = shakers.IsUpper LessOrEqualThan = shakers.LessOrEqualThan LessThan = shakers.LessThan TimeEquals = shakers.TimeEquals True = shakers.True TimeIgnore = shakers.TimeIgnore ) docker-1.10.3/pkg/integration/dockerCmd_utils.go000066400000000000000000000056651267010174400216270ustar00rootroot00000000000000package integration import ( "fmt" "os/exec" "strings" "time" "github.com/go-check/check" ) var execCommand = exec.Command // DockerCmdWithError executes a docker command that is supposed to fail and returns // the output, the exit code and the error. func DockerCmdWithError(dockerBinary string, args ...string) (string, int, error) { return RunCommandWithOutput(execCommand(dockerBinary, args...)) } // DockerCmdWithStdoutStderr executes a docker command and returns the content of the // stdout, stderr and the exit code. If a check.C is passed, it will fail and stop tests // if the error is not nil. func DockerCmdWithStdoutStderr(dockerBinary string, c *check.C, args ...string) (string, string, int) { stdout, stderr, status, err := RunCommandWithStdoutStderr(execCommand(dockerBinary, args...)) if c != nil { c.Assert(err, check.IsNil, check.Commentf("%q failed with errors: %s, %v", strings.Join(args, " "), stderr, err)) } return stdout, stderr, status } // DockerCmd executes a docker command and returns the output and the exit code. If the // command returns an error, it will fail and stop the tests. func DockerCmd(dockerBinary string, c *check.C, args ...string) (string, int) { out, status, err := RunCommandWithOutput(execCommand(dockerBinary, args...)) c.Assert(err, check.IsNil, check.Commentf("%q failed with errors: %s, %v", strings.Join(args, " "), out, err)) return out, status } // DockerCmdWithTimeout executes a docker command with a timeout, and returns the output, // the exit code and the error (if any). func DockerCmdWithTimeout(dockerBinary string, timeout time.Duration, args ...string) (string, int, error) { out, status, err := RunCommandWithOutputAndTimeout(execCommand(dockerBinary, args...), timeout) if err != nil { return out, status, fmt.Errorf("%q failed with errors: %v : %q", strings.Join(args, " "), err, out) } return out, status, err } // DockerCmdInDir executes a docker command in a directory and returns the output, the // exit code and the error (if any). func DockerCmdInDir(dockerBinary string, path string, args ...string) (string, int, error) { dockerCommand := execCommand(dockerBinary, args...) dockerCommand.Dir = path out, status, err := RunCommandWithOutput(dockerCommand) if err != nil { return out, status, fmt.Errorf("%q failed with errors: %v : %q", strings.Join(args, " "), err, out) } return out, status, err } // DockerCmdInDirWithTimeout executes a docker command in a directory with a timeout and // returns the output, the exit code and the error (if any). func DockerCmdInDirWithTimeout(dockerBinary string, timeout time.Duration, path string, args ...string) (string, int, error) { dockerCommand := execCommand(dockerBinary, args...) dockerCommand.Dir = path out, status, err := RunCommandWithOutputAndTimeout(dockerCommand, timeout) if err != nil { return out, status, fmt.Errorf("%q failed with errors: %v : %q", strings.Join(args, " "), err, out) } return out, status, err } docker-1.10.3/pkg/integration/dockerCmd_utils_test.go000066400000000000000000000304541267010174400226600ustar00rootroot00000000000000package integration import ( "fmt" "os" "os/exec" "testing" "io/ioutil" "strings" "time" "github.com/go-check/check" ) const dockerBinary = "docker" // Setup go-check for this test func Test(t *testing.T) { check.TestingT(t) } func init() { check.Suite(&DockerCmdSuite{}) } type DockerCmdSuite struct{} // Fake the exec.Command to use our mock. func (s *DockerCmdSuite) SetUpTest(c *check.C) { execCommand = fakeExecCommand } // And bring it back to normal after the test. func (s *DockerCmdSuite) TearDownTest(c *check.C) { execCommand = exec.Command } // DockerCmdWithError tests func (s *DockerCmdSuite) TestDockerCmdWithError(c *check.C) { cmds := []struct { binary string args []string expectedOut string expectedExitCode int expectedError error }{ { "doesnotexists", []string{}, "Command doesnotexists not found.", 1, fmt.Errorf("exit status 1"), }, { dockerBinary, []string{"an", "error"}, "an error has occurred", 1, fmt.Errorf("exit status 1"), }, { dockerBinary, []string{"an", "exitCode", "127"}, "an error has occurred with exitCode 127", 127, fmt.Errorf("exit status 127"), }, { dockerBinary, []string{"run", "-ti", "ubuntu", "echo", "hello"}, "hello", 0, nil, }, } for _, cmd := range cmds { out, exitCode, error := DockerCmdWithError(cmd.binary, cmd.args...) c.Assert(out, check.Equals, cmd.expectedOut, check.Commentf("Expected output %q for arguments %v, got %q", cmd.expectedOut, cmd.args, out)) c.Assert(exitCode, check.Equals, cmd.expectedExitCode, check.Commentf("Expected exitCode %q for arguments %v, got %q", cmd.expectedExitCode, cmd.args, exitCode)) if cmd.expectedError != nil { c.Assert(error, check.NotNil, check.Commentf("Expected an error %q, got nothing", cmd.expectedError)) c.Assert(error.Error(), check.Equals, cmd.expectedError.Error(), check.Commentf("Expected error %q for arguments %v, got %q", cmd.expectedError.Error(), cmd.args, error.Error())) } else { c.Assert(error, check.IsNil, check.Commentf("Expected no error, got %v", error)) } } } // DockerCmdWithStdoutStderr tests type dockerCmdWithStdoutStderrErrorSuite struct{} func (s *dockerCmdWithStdoutStderrErrorSuite) Test(c *check.C) { // Should fail, the test too DockerCmdWithStdoutStderr(dockerBinary, c, "an", "error") } type dockerCmdWithStdoutStderrSuccessSuite struct{} func (s *dockerCmdWithStdoutStderrSuccessSuite) Test(c *check.C) { stdout, stderr, exitCode := DockerCmdWithStdoutStderr(dockerBinary, c, "run", "-ti", "ubuntu", "echo", "hello") c.Assert(stdout, check.Equals, "hello") c.Assert(stderr, check.Equals, "") c.Assert(exitCode, check.Equals, 0) } func (s *DockerCmdSuite) TestDockerCmdWithStdoutStderrError(c *check.C) { // Run error suite, should fail. output := String{} result := check.Run(&dockerCmdWithStdoutStderrErrorSuite{}, &check.RunConf{Output: &output}) c.Check(result.Succeeded, check.Equals, 0) c.Check(result.Failed, check.Equals, 1) } func (s *DockerCmdSuite) TestDockerCmdWithStdoutStderrSuccess(c *check.C) { // Run error suite, should fail. output := String{} result := check.Run(&dockerCmdWithStdoutStderrSuccessSuite{}, &check.RunConf{Output: &output}) c.Check(result.Succeeded, check.Equals, 1) c.Check(result.Failed, check.Equals, 0) } // DockerCmd tests type dockerCmdErrorSuite struct{} func (s *dockerCmdErrorSuite) Test(c *check.C) { // Should fail, the test too DockerCmd(dockerBinary, c, "an", "error") } type dockerCmdSuccessSuite struct{} func (s *dockerCmdSuccessSuite) Test(c *check.C) { stdout, exitCode := DockerCmd(dockerBinary, c, "run", "-ti", "ubuntu", "echo", "hello") c.Assert(stdout, check.Equals, "hello") c.Assert(exitCode, check.Equals, 0) } func (s *DockerCmdSuite) TestDockerCmdError(c *check.C) { // Run error suite, should fail. output := String{} result := check.Run(&dockerCmdErrorSuite{}, &check.RunConf{Output: &output}) c.Check(result.Succeeded, check.Equals, 0) c.Check(result.Failed, check.Equals, 1) } func (s *DockerCmdSuite) TestDockerCmdSuccess(c *check.C) { // Run error suite, should fail. output := String{} result := check.Run(&dockerCmdSuccessSuite{}, &check.RunConf{Output: &output}) c.Check(result.Succeeded, check.Equals, 1) c.Check(result.Failed, check.Equals, 0) } // DockerCmdWithTimeout tests func (s *DockerCmdSuite) TestDockerCmdWithTimeout(c *check.C) { cmds := []struct { binary string args []string timeout time.Duration expectedOut string expectedExitCode int expectedError error }{ { "doesnotexists", []string{}, 200 * time.Millisecond, `Command doesnotexists not found.`, 1, fmt.Errorf(`"" failed with errors: exit status 1 : "Command doesnotexists not found."`), }, { dockerBinary, []string{"an", "error"}, 200 * time.Millisecond, `an error has occurred`, 1, fmt.Errorf(`"an error" failed with errors: exit status 1 : "an error has occurred"`), }, { dockerBinary, []string{"a", "command", "that", "times", "out"}, 5 * time.Millisecond, "", 0, fmt.Errorf(`"a command that times out" failed with errors: command timed out : ""`), }, { dockerBinary, []string{"run", "-ti", "ubuntu", "echo", "hello"}, 200 * time.Millisecond, "hello", 0, nil, }, } for _, cmd := range cmds { out, exitCode, error := DockerCmdWithTimeout(cmd.binary, cmd.timeout, cmd.args...) c.Assert(out, check.Equals, cmd.expectedOut, check.Commentf("Expected output %q for arguments %v, got %q", cmd.expectedOut, cmd.args, out)) c.Assert(exitCode, check.Equals, cmd.expectedExitCode, check.Commentf("Expected exitCode %q for arguments %v, got %q", cmd.expectedExitCode, cmd.args, exitCode)) if cmd.expectedError != nil { c.Assert(error, check.NotNil, check.Commentf("Expected an error %q, got nothing", cmd.expectedError)) c.Assert(error.Error(), check.Equals, cmd.expectedError.Error(), check.Commentf("Expected error %q for arguments %v, got %q", cmd.expectedError.Error(), cmd.args, error.Error())) } else { c.Assert(error, check.IsNil, check.Commentf("Expected no error, got %v", error)) } } } // DockerCmdInDir tests func (s *DockerCmdSuite) TestDockerCmdInDir(c *check.C) { tempFolder, err := ioutil.TempDir("", "test-docker-cmd-in-dir") c.Assert(err, check.IsNil) cmds := []struct { binary string args []string expectedOut string expectedExitCode int expectedError error }{ { "doesnotexists", []string{}, `Command doesnotexists not found.`, 1, fmt.Errorf(`"dir:%s" failed with errors: exit status 1 : "Command doesnotexists not found."`, tempFolder), }, { dockerBinary, []string{"an", "error"}, `an error has occurred`, 1, fmt.Errorf(`"dir:%s an error" failed with errors: exit status 1 : "an error has occurred"`, tempFolder), }, { dockerBinary, []string{"run", "-ti", "ubuntu", "echo", "hello"}, "hello", 0, nil, }, } for _, cmd := range cmds { // We prepend the arguments with dir:thefolder.. the fake command will check // that the current workdir is the same as the one we are passing. args := append([]string{"dir:" + tempFolder}, cmd.args...) out, exitCode, error := DockerCmdInDir(cmd.binary, tempFolder, args...) c.Assert(out, check.Equals, cmd.expectedOut, check.Commentf("Expected output %q for arguments %v, got %q", cmd.expectedOut, cmd.args, out)) c.Assert(exitCode, check.Equals, cmd.expectedExitCode, check.Commentf("Expected exitCode %q for arguments %v, got %q", cmd.expectedExitCode, cmd.args, exitCode)) if cmd.expectedError != nil { c.Assert(error, check.NotNil, check.Commentf("Expected an error %q, got nothing", cmd.expectedError)) c.Assert(error.Error(), check.Equals, cmd.expectedError.Error(), check.Commentf("Expected error %q for arguments %v, got %q", cmd.expectedError.Error(), cmd.args, error.Error())) } else { c.Assert(error, check.IsNil, check.Commentf("Expected no error, got %v", error)) } } } // DockerCmdInDirWithTimeout tests func (s *DockerCmdSuite) TestDockerCmdInDirWithTimeout(c *check.C) { tempFolder, err := ioutil.TempDir("", "test-docker-cmd-in-dir") c.Assert(err, check.IsNil) cmds := []struct { binary string args []string timeout time.Duration expectedOut string expectedExitCode int expectedError error }{ { "doesnotexists", []string{}, 200 * time.Millisecond, `Command doesnotexists not found.`, 1, fmt.Errorf(`"dir:%s" failed with errors: exit status 1 : "Command doesnotexists not found."`, tempFolder), }, { dockerBinary, []string{"an", "error"}, 200 * time.Millisecond, `an error has occurred`, 1, fmt.Errorf(`"dir:%s an error" failed with errors: exit status 1 : "an error has occurred"`, tempFolder), }, { dockerBinary, []string{"a", "command", "that", "times", "out"}, 5 * time.Millisecond, "", 0, fmt.Errorf(`"dir:%s a command that times out" failed with errors: command timed out : ""`, tempFolder), }, { dockerBinary, []string{"run", "-ti", "ubuntu", "echo", "hello"}, 200 * time.Millisecond, "hello", 0, nil, }, } for _, cmd := range cmds { // We prepend the arguments with dir:thefolder.. the fake command will check // that the current workdir is the same as the one we are passing. args := append([]string{"dir:" + tempFolder}, cmd.args...) out, exitCode, error := DockerCmdInDirWithTimeout(cmd.binary, cmd.timeout, tempFolder, args...) c.Assert(out, check.Equals, cmd.expectedOut, check.Commentf("Expected output %q for arguments %v, got %q", cmd.expectedOut, cmd.args, out)) c.Assert(exitCode, check.Equals, cmd.expectedExitCode, check.Commentf("Expected exitCode %q for arguments %v, got %q", cmd.expectedExitCode, cmd.args, exitCode)) if cmd.expectedError != nil { c.Assert(error, check.NotNil, check.Commentf("Expected an error %q, got nothing", cmd.expectedError)) c.Assert(error.Error(), check.Equals, cmd.expectedError.Error(), check.Commentf("Expected error %q for arguments %v, got %q", cmd.expectedError.Error(), cmd.args, error.Error())) } else { c.Assert(error, check.IsNil, check.Commentf("Expected no error, got %v", error)) } } } // Helpers :) // Type implementing the io.Writer interface for analyzing output. type String struct { value string } // The only function required by the io.Writer interface. Will append // written data to the String.value string. func (s *String) Write(p []byte) (n int, err error) { s.value += string(p) return len(p), nil } // Helper function that mock the exec.Command call (and call the test binary) func fakeExecCommand(command string, args ...string) *exec.Cmd { cs := []string{"-test.run=TestHelperProcess", "--", command} cs = append(cs, args...) cmd := exec.Command(os.Args[0], cs...) cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} return cmd } func TestHelperProcess(t *testing.T) { if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { return } args := os.Args // Previous arguments are tests stuff, that looks like : // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- cmd, args := args[3], args[4:] // Handle the case where args[0] is dir:... if len(args) > 0 && strings.HasPrefix(args[0], "dir:") { expectedCwd := args[0][4:] if len(args) > 1 { args = args[1:] } cwd, err := os.Getwd() if err != nil { fmt.Fprintf(os.Stderr, "Failed to get workingdir: %v", err) os.Exit(1) } // This checks that the given path is the same as the currend working dire if expectedCwd != cwd { fmt.Fprintf(os.Stderr, "Current workdir should be %q, but is %q", expectedCwd, cwd) } } switch cmd { case dockerBinary: argsStr := strings.Join(args, " ") switch argsStr { case "an exitCode 127": fmt.Fprintf(os.Stderr, "an error has occurred with exitCode 127") os.Exit(127) case "an error": fmt.Fprintf(os.Stderr, "an error has occurred") os.Exit(1) case "a command that times out": time.Sleep(10 * time.Second) fmt.Fprintf(os.Stdout, "too long, should be killed") // A random exit code (that should never happened in tests) os.Exit(7) case "run -ti ubuntu echo hello": fmt.Fprintf(os.Stdout, "hello") default: fmt.Fprintf(os.Stdout, "no arguments") } default: fmt.Fprintf(os.Stderr, "Command %s not found.", cmd) os.Exit(1) } // some code here to check arguments perhaps? os.Exit(0) } docker-1.10.3/pkg/integration/utils.go000066400000000000000000000237621267010174400176520ustar00rootroot00000000000000package integration import ( "archive/tar" "bytes" "encoding/json" "errors" "fmt" "io" "os" "os/exec" "path/filepath" "reflect" "strings" "syscall" "time" "github.com/docker/docker/pkg/stringutils" ) // GetExitCode returns the ExitStatus of the specified error if its type is // exec.ExitError, returns 0 and an error otherwise. func GetExitCode(err error) (int, error) { exitCode := 0 if exiterr, ok := err.(*exec.ExitError); ok { if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { return procExit.ExitStatus(), nil } } return exitCode, fmt.Errorf("failed to get exit code") } // ProcessExitCode process the specified error and returns the exit status code // if the error was of type exec.ExitError, returns nothing otherwise. func ProcessExitCode(err error) (exitCode int) { if err != nil { var exiterr error if exitCode, exiterr = GetExitCode(err); exiterr != nil { // TODO: Fix this so we check the error's text. // we've failed to retrieve exit code, so we set it to 127 exitCode = 127 } } return } // IsKilled process the specified error and returns whether the process was killed or not. func IsKilled(err error) bool { if exitErr, ok := err.(*exec.ExitError); ok { status, ok := exitErr.Sys().(syscall.WaitStatus) if !ok { return false } // status.ExitStatus() is required on Windows because it does not // implement Signal() nor Signaled(). Just check it had a bad exit // status could mean it was killed (and in tests we do kill) return (status.Signaled() && status.Signal() == os.Kill) || status.ExitStatus() != 0 } return false } // RunCommandWithOutput runs the specified command and returns the combined output (stdout/stderr) // with the exitCode different from 0 and the error if something bad happened func RunCommandWithOutput(cmd *exec.Cmd) (output string, exitCode int, err error) { exitCode = 0 out, err := cmd.CombinedOutput() exitCode = ProcessExitCode(err) output = string(out) return } // RunCommandWithStdoutStderr runs the specified command and returns stdout and stderr separately // with the exitCode different from 0 and the error if something bad happened func RunCommandWithStdoutStderr(cmd *exec.Cmd) (stdout string, stderr string, exitCode int, err error) { var ( stderrBuffer, stdoutBuffer bytes.Buffer ) exitCode = 0 cmd.Stderr = &stderrBuffer cmd.Stdout = &stdoutBuffer err = cmd.Run() exitCode = ProcessExitCode(err) stdout = stdoutBuffer.String() stderr = stderrBuffer.String() return } // RunCommandWithOutputForDuration runs the specified command "timeboxed" by the specified duration. // If the process is still running when the timebox is finished, the process will be killed and . // It will returns the output with the exitCode different from 0 and the error if something bad happened // and a boolean whether it has been killed or not. func RunCommandWithOutputForDuration(cmd *exec.Cmd, duration time.Duration) (output string, exitCode int, timedOut bool, err error) { var outputBuffer bytes.Buffer if cmd.Stdout != nil { err = errors.New("cmd.Stdout already set") return } cmd.Stdout = &outputBuffer if cmd.Stderr != nil { err = errors.New("cmd.Stderr already set") return } cmd.Stderr = &outputBuffer // Start the command in the main thread.. err = cmd.Start() if err != nil { err = fmt.Errorf("Fail to start command %v : %v", cmd, err) } type exitInfo struct { exitErr error exitCode int } done := make(chan exitInfo, 1) go func() { // And wait for it to exit in the goroutine :) info := exitInfo{} info.exitErr = cmd.Wait() info.exitCode = ProcessExitCode(info.exitErr) done <- info }() select { case <-time.After(duration): killErr := cmd.Process.Kill() if killErr != nil { fmt.Printf("failed to kill (pid=%d): %v\n", cmd.Process.Pid, killErr) } timedOut = true case info := <-done: err = info.exitErr exitCode = info.exitCode } output = outputBuffer.String() return } var errCmdTimeout = fmt.Errorf("command timed out") // RunCommandWithOutputAndTimeout runs the specified command "timeboxed" by the specified duration. // It returns the output with the exitCode different from 0 and the error if something bad happened or // if the process timed out (and has been killed). func RunCommandWithOutputAndTimeout(cmd *exec.Cmd, timeout time.Duration) (output string, exitCode int, err error) { var timedOut bool output, exitCode, timedOut, err = RunCommandWithOutputForDuration(cmd, timeout) if timedOut { err = errCmdTimeout } return } // RunCommand runs the specified command and returns the exitCode different from 0 // and the error if something bad happened. func RunCommand(cmd *exec.Cmd) (exitCode int, err error) { exitCode = 0 err = cmd.Run() exitCode = ProcessExitCode(err) return } // RunCommandPipelineWithOutput runs the array of commands with the output // of each pipelined with the following (like cmd1 | cmd2 | cmd3 would do). // It returns the final output, the exitCode different from 0 and the error // if something bad happened. func RunCommandPipelineWithOutput(cmds ...*exec.Cmd) (output string, exitCode int, err error) { if len(cmds) < 2 { return "", 0, errors.New("pipeline does not have multiple cmds") } // connect stdin of each cmd to stdout pipe of previous cmd for i, cmd := range cmds { if i > 0 { prevCmd := cmds[i-1] cmd.Stdin, err = prevCmd.StdoutPipe() if err != nil { return "", 0, fmt.Errorf("cannot set stdout pipe for %s: %v", cmd.Path, err) } } } // start all cmds except the last for _, cmd := range cmds[:len(cmds)-1] { if err = cmd.Start(); err != nil { return "", 0, fmt.Errorf("starting %s failed with error: %v", cmd.Path, err) } } var pipelineError error defer func() { // wait all cmds except the last to release their resources for _, cmd := range cmds[:len(cmds)-1] { if err := cmd.Wait(); err != nil { pipelineError = fmt.Errorf("command %s failed with error: %v", cmd.Path, err) break } } }() if pipelineError != nil { return "", 0, pipelineError } // wait on last cmd return RunCommandWithOutput(cmds[len(cmds)-1]) } // UnmarshalJSON deserialize a JSON in the given interface. func UnmarshalJSON(data []byte, result interface{}) error { if err := json.Unmarshal(data, result); err != nil { return err } return nil } // ConvertSliceOfStringsToMap converts a slices of string in a map // with the strings as key and an empty string as values. func ConvertSliceOfStringsToMap(input []string) map[string]struct{} { output := make(map[string]struct{}) for _, v := range input { output[v] = struct{}{} } return output } // CompareDirectoryEntries compares two sets of FileInfo (usually taken from a directory) // and returns an error if different. func CompareDirectoryEntries(e1 []os.FileInfo, e2 []os.FileInfo) error { var ( e1Entries = make(map[string]struct{}) e2Entries = make(map[string]struct{}) ) for _, e := range e1 { e1Entries[e.Name()] = struct{}{} } for _, e := range e2 { e2Entries[e.Name()] = struct{}{} } if !reflect.DeepEqual(e1Entries, e2Entries) { return fmt.Errorf("entries differ") } return nil } // ListTar lists the entries of a tar. func ListTar(f io.Reader) ([]string, error) { tr := tar.NewReader(f) var entries []string for { th, err := tr.Next() if err == io.EOF { // end of tar archive return entries, nil } if err != nil { return entries, err } entries = append(entries, th.Name) } } // RandomTmpDirPath provides a temporary path with rand string appended. // does not create or checks if it exists. func RandomTmpDirPath(s string, platform string) string { tmp := "/tmp" if platform == "windows" { tmp = os.Getenv("TEMP") } path := filepath.Join(tmp, fmt.Sprintf("%s.%s", s, stringutils.GenerateRandomAlphaOnlyString(10))) if platform == "windows" { return filepath.FromSlash(path) // Using \ } return filepath.ToSlash(path) // Using / } // ConsumeWithSpeed reads chunkSize bytes from reader before sleeping // for interval duration. Returns total read bytes. Send true to the // stop channel to return before reading to EOF on the reader. func ConsumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, stop chan bool) (n int, err error) { buffer := make([]byte, chunkSize) for { var readBytes int readBytes, err = reader.Read(buffer) n += readBytes if err != nil { if err == io.EOF { err = nil } return } select { case <-stop: return case <-time.After(interval): } } } // ParseCgroupPaths parses 'procCgroupData', which is output of '/proc//cgroup', and returns // a map which cgroup name as key and path as value. func ParseCgroupPaths(procCgroupData string) map[string]string { cgroupPaths := map[string]string{} for _, line := range strings.Split(procCgroupData, "\n") { parts := strings.Split(line, ":") if len(parts) != 3 { continue } cgroupPaths[parts[1]] = parts[2] } return cgroupPaths } // ChannelBuffer holds a chan of byte array that can be populate in a goroutine. type ChannelBuffer struct { C chan []byte } // Write implements Writer. func (c *ChannelBuffer) Write(b []byte) (int, error) { c.C <- b return len(b), nil } // Close closes the go channel. func (c *ChannelBuffer) Close() error { close(c.C) return nil } // ReadTimeout reads the content of the channel in the specified byte array with // the specified duration as timeout. func (c *ChannelBuffer) ReadTimeout(p []byte, n time.Duration) (int, error) { select { case b := <-c.C: return copy(p[0:], b), nil case <-time.After(n): return -1, fmt.Errorf("timeout reading from channel") } } // RunAtDifferentDate runs the specified function with the given time. // It changes the date of the system, which can led to weird behaviors. func RunAtDifferentDate(date time.Time, block func()) { // Layout for date. MMDDhhmmYYYY const timeLayout = "010203042006" // Ensure we bring time back to now now := time.Now().Format(timeLayout) dateReset := exec.Command("date", now) defer RunCommand(dateReset) dateChange := exec.Command("date", date.Format(timeLayout)) RunCommand(dateChange) block() return } docker-1.10.3/pkg/integration/utils_test.go000066400000000000000000000357631267010174400207150ustar00rootroot00000000000000package integration import ( "io" "io/ioutil" "os" "os/exec" "path" "runtime" "strings" "testing" "time" ) func TestIsKilledFalseWithNonKilledProcess(t *testing.T) { lsCmd := exec.Command("ls") lsCmd.Start() // Wait for it to finish err := lsCmd.Wait() if IsKilled(err) { t.Fatalf("Expected the ls command to not be killed, was.") } } func TestIsKilledTrueWithKilledProcess(t *testing.T) { longCmd := exec.Command("top") // Start a command longCmd.Start() // Capture the error when *dying* done := make(chan error, 1) go func() { done <- longCmd.Wait() }() // Then kill it longCmd.Process.Kill() // Get the error err := <-done if !IsKilled(err) { t.Fatalf("Expected the command to be killed, was not.") } } func TestRunCommandWithOutput(t *testing.T) { echoHelloWorldCmd := exec.Command("echo", "hello", "world") out, exitCode, err := RunCommandWithOutput(echoHelloWorldCmd) expected := "hello world\n" if out != expected || exitCode != 0 || err != nil { t.Fatalf("Expected command to output %s, got %s, %v with exitCode %v", expected, out, err, exitCode) } } func TestRunCommandWithOutputError(t *testing.T) { cmd := exec.Command("doesnotexists") out, exitCode, err := RunCommandWithOutput(cmd) expectedError := `exec: "doesnotexists": executable file not found in $PATH` if out != "" || exitCode != 127 || err == nil || err.Error() != expectedError { t.Fatalf("Expected command to output %s, got %s, %v with exitCode %v", expectedError, out, err, exitCode) } wrongLsCmd := exec.Command("ls", "-z") expected := `ls: invalid option -- 'z' Try 'ls --help' for more information. ` out, exitCode, err = RunCommandWithOutput(wrongLsCmd) if out != expected || exitCode != 2 || err == nil || err.Error() != "exit status 2" { t.Fatalf("Expected command to output %s, got out:%s, err:%v with exitCode %v", expected, out, err, exitCode) } } func TestRunCommandWithStdoutStderr(t *testing.T) { echoHelloWorldCmd := exec.Command("echo", "hello", "world") stdout, stderr, exitCode, err := RunCommandWithStdoutStderr(echoHelloWorldCmd) expected := "hello world\n" if stdout != expected || stderr != "" || exitCode != 0 || err != nil { t.Fatalf("Expected command to output %s, got stdout:%s, stderr:%s, err:%v with exitCode %v", expected, stdout, stderr, err, exitCode) } } func TestRunCommandWithStdoutStderrError(t *testing.T) { cmd := exec.Command("doesnotexists") stdout, stderr, exitCode, err := RunCommandWithStdoutStderr(cmd) expectedError := `exec: "doesnotexists": executable file not found in $PATH` if stdout != "" || stderr != "" || exitCode != 127 || err == nil || err.Error() != expectedError { t.Fatalf("Expected command to output out:%s, stderr:%s, got stdout:%s, stderr:%s, err:%v with exitCode %v", "", "", stdout, stderr, err, exitCode) } wrongLsCmd := exec.Command("ls", "-z") expected := `ls: invalid option -- 'z' Try 'ls --help' for more information. ` stdout, stderr, exitCode, err = RunCommandWithStdoutStderr(wrongLsCmd) if stdout != "" && stderr != expected || exitCode != 2 || err == nil || err.Error() != "exit status 2" { t.Fatalf("Expected command to output out:%s, stderr:%s, got stdout:%s, stderr:%s, err:%v with exitCode %v", "", expectedError, stdout, stderr, err, exitCode) } } func TestRunCommandWithOutputForDurationFinished(t *testing.T) { cmd := exec.Command("ls") out, exitCode, timedOut, err := RunCommandWithOutputForDuration(cmd, 50*time.Millisecond) if out == "" || exitCode != 0 || timedOut || err != nil { t.Fatalf("Expected the command to run for less 50 milliseconds and thus not time out, but did not : out:[%s], exitCode:[%d], timedOut:[%v], err:[%v]", out, exitCode, timedOut, err) } } func TestRunCommandWithOutputForDurationKilled(t *testing.T) { cmd := exec.Command("sh", "-c", "while true ; do echo 1 ; sleep .1 ; done") out, exitCode, timedOut, err := RunCommandWithOutputForDuration(cmd, 500*time.Millisecond) ones := strings.Split(out, "\n") if len(ones) != 6 || exitCode != 0 || !timedOut || err != nil { t.Fatalf("Expected the command to run for 500 milliseconds (and thus print six lines (five with 1, one empty) and time out, but did not : out:[%s], exitCode:%d, timedOut:%v, err:%v", out, exitCode, timedOut, err) } } func TestRunCommandWithOutputForDurationErrors(t *testing.T) { cmd := exec.Command("ls") cmd.Stdout = os.Stdout if _, _, _, err := RunCommandWithOutputForDuration(cmd, 1*time.Millisecond); err == nil || err.Error() != "cmd.Stdout already set" { t.Fatalf("Expected an error as cmd.Stdout was already set, did not (err:%s).", err) } cmd = exec.Command("ls") cmd.Stderr = os.Stderr if _, _, _, err := RunCommandWithOutputForDuration(cmd, 1*time.Millisecond); err == nil || err.Error() != "cmd.Stderr already set" { t.Fatalf("Expected an error as cmd.Stderr was already set, did not (err:%s).", err) } } func TestRunCommandWithOutputAndTimeoutFinished(t *testing.T) { cmd := exec.Command("ls") out, exitCode, err := RunCommandWithOutputAndTimeout(cmd, 50*time.Millisecond) if out == "" || exitCode != 0 || err != nil { t.Fatalf("Expected the command to run for less 50 milliseconds and thus not time out, but did not : out:[%s], exitCode:[%d], err:[%v]", out, exitCode, err) } } func TestRunCommandWithOutputAndTimeoutKilled(t *testing.T) { cmd := exec.Command("sh", "-c", "while true ; do echo 1 ; sleep .1 ; done") out, exitCode, err := RunCommandWithOutputAndTimeout(cmd, 500*time.Millisecond) ones := strings.Split(out, "\n") if len(ones) != 6 || exitCode != 0 || err == nil || err.Error() != "command timed out" { t.Fatalf("Expected the command to run for 500 milliseconds (and thus print six lines (five with 1, one empty) and time out with an error 'command timed out', but did not : out:[%s], exitCode:%d, err:%v", out, exitCode, err) } } func TestRunCommandWithOutputAndTimeoutErrors(t *testing.T) { cmd := exec.Command("ls") cmd.Stdout = os.Stdout if _, _, err := RunCommandWithOutputAndTimeout(cmd, 1*time.Millisecond); err == nil || err.Error() != "cmd.Stdout already set" { t.Fatalf("Expected an error as cmd.Stdout was already set, did not (err:%s).", err) } cmd = exec.Command("ls") cmd.Stderr = os.Stderr if _, _, err := RunCommandWithOutputAndTimeout(cmd, 1*time.Millisecond); err == nil || err.Error() != "cmd.Stderr already set" { t.Fatalf("Expected an error as cmd.Stderr was already set, did not (err:%s).", err) } } func TestRunCommand(t *testing.T) { lsCmd := exec.Command("ls") exitCode, err := RunCommand(lsCmd) if exitCode != 0 || err != nil { t.Fatalf("Expected runCommand to run the command successfully, got: exitCode:%d, err:%v", exitCode, err) } var expectedError string exitCode, err = RunCommand(exec.Command("doesnotexists")) expectedError = `exec: "doesnotexists": executable file not found in $PATH` if exitCode != 127 || err == nil || err.Error() != expectedError { t.Fatalf("Expected runCommand to run the command successfully, got: exitCode:%d, err:%v", exitCode, err) } wrongLsCmd := exec.Command("ls", "-z") expected := 2 expectedError = `exit status 2` exitCode, err = RunCommand(wrongLsCmd) if exitCode != expected || err == nil || err.Error() != expectedError { t.Fatalf("Expected runCommand to run the command successfully, got: exitCode:%d, err:%v", exitCode, err) } } func TestRunCommandPipelineWithOutputWithNotEnoughCmds(t *testing.T) { _, _, err := RunCommandPipelineWithOutput(exec.Command("ls")) expectedError := "pipeline does not have multiple cmds" if err == nil || err.Error() != expectedError { t.Fatalf("Expected an error with %s, got err:%s", expectedError, err) } } func TestRunCommandPipelineWithOutputErrors(t *testing.T) { cmd1 := exec.Command("ls") cmd1.Stdout = os.Stdout cmd2 := exec.Command("anything really") _, _, err := RunCommandPipelineWithOutput(cmd1, cmd2) if err == nil || err.Error() != "cannot set stdout pipe for anything really: exec: Stdout already set" { t.Fatalf("Expected an error, got %v", err) } cmdWithError := exec.Command("doesnotexists") cmdCat := exec.Command("cat") _, _, err = RunCommandPipelineWithOutput(cmdWithError, cmdCat) if err == nil || err.Error() != `starting doesnotexists failed with error: exec: "doesnotexists": executable file not found in $PATH` { t.Fatalf("Expected an error, got %v", err) } } func TestRunCommandPipelineWithOutput(t *testing.T) { cmds := []*exec.Cmd{ // Print 2 characters exec.Command("echo", "-n", "11"), // Count the number or char from stdin (previous command) exec.Command("wc", "-m"), } out, exitCode, err := RunCommandPipelineWithOutput(cmds...) expectedOutput := "2\n" if out != expectedOutput || exitCode != 0 || err != nil { t.Fatalf("Expected %s for commands %v, got out:%s, exitCode:%d, err:%v", expectedOutput, cmds, out, exitCode, err) } } // Simple simple test as it is just a passthrough for json.Unmarshal func TestUnmarshalJSON(t *testing.T) { emptyResult := struct{}{} if err := UnmarshalJSON([]byte(""), &emptyResult); err == nil { t.Fatalf("Expected an error, got nothing") } result := struct{ Name string }{} if err := UnmarshalJSON([]byte(`{"name": "name"}`), &result); err != nil { t.Fatal(err) } if result.Name != "name" { t.Fatalf("Expected result.name to be 'name', was '%s'", result.Name) } } func TestConvertSliceOfStringsToMap(t *testing.T) { input := []string{"a", "b"} actual := ConvertSliceOfStringsToMap(input) for _, key := range input { if _, ok := actual[key]; !ok { t.Fatalf("Expected output to contains key %s, did not: %v", key, actual) } } } func TestCompareDirectoryEntries(t *testing.T) { tmpFolder, err := ioutil.TempDir("", "integration-cli-utils-compare-directories") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) file1 := path.Join(tmpFolder, "file1") file2 := path.Join(tmpFolder, "file2") os.Create(file1) os.Create(file2) fi1, err := os.Stat(file1) if err != nil { t.Fatal(err) } fi1bis, err := os.Stat(file1) if err != nil { t.Fatal(err) } fi2, err := os.Stat(file2) if err != nil { t.Fatal(err) } cases := []struct { e1 []os.FileInfo e2 []os.FileInfo shouldError bool }{ // Empty directories { []os.FileInfo{}, []os.FileInfo{}, false, }, // Same FileInfos { []os.FileInfo{fi1}, []os.FileInfo{fi1}, false, }, // Different FileInfos but same names { []os.FileInfo{fi1}, []os.FileInfo{fi1bis}, false, }, // Different FileInfos, different names { []os.FileInfo{fi1}, []os.FileInfo{fi2}, true, }, } for _, elt := range cases { err := CompareDirectoryEntries(elt.e1, elt.e2) if elt.shouldError && err == nil { t.Fatalf("Should have return an error, did not with %v and %v", elt.e1, elt.e2) } if !elt.shouldError && err != nil { t.Fatalf("Should have not returned an error, but did : %v with %v and %v", err, elt.e1, elt.e2) } } } // FIXME make an "unhappy path" test for ListTar without "panicking" :-) func TestListTar(t *testing.T) { tmpFolder, err := ioutil.TempDir("", "integration-cli-utils-list-tar") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) // Let's create a Tar file srcFile := path.Join(tmpFolder, "src") tarFile := path.Join(tmpFolder, "src.tar") os.Create(srcFile) cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } reader, err := os.Open(tarFile) if err != nil { t.Fatal(err) } defer reader.Close() entries, err := ListTar(reader) if err != nil { t.Fatal(err) } if len(entries) != 1 && entries[0] != "src" { t.Fatalf("Expected a tar file with 1 entry (%s), got %v", srcFile, entries) } } func TestRandomTmpDirPath(t *testing.T) { path := RandomTmpDirPath("something", runtime.GOOS) prefix := "/tmp/something" if runtime.GOOS == "windows" { prefix = os.Getenv("TEMP") + `\something` } expectedSize := len(prefix) + 11 if !strings.HasPrefix(path, prefix) { t.Fatalf("Expected generated path to have '%s' as prefix, got %s'", prefix, path) } if len(path) != expectedSize { t.Fatalf("Expected generated path to be %d, got %d", expectedSize, len(path)) } } func TestConsumeWithSpeed(t *testing.T) { reader := strings.NewReader("1234567890") chunksize := 2 bytes1, err := ConsumeWithSpeed(reader, chunksize, 1*time.Second, nil) if err != nil { t.Fatal(err) } if bytes1 != 10 { t.Fatalf("Expected to have read 10 bytes, got %d", bytes1) } } func TestConsumeWithSpeedWithStop(t *testing.T) { reader := strings.NewReader("1234567890") chunksize := 2 stopIt := make(chan bool) go func() { time.Sleep(1 * time.Millisecond) stopIt <- true }() bytes1, err := ConsumeWithSpeed(reader, chunksize, 20*time.Millisecond, stopIt) if err != nil { t.Fatal(err) } if bytes1 != 2 { t.Fatalf("Expected to have read 2 bytes, got %d", bytes1) } } func TestParseCgroupPathsEmpty(t *testing.T) { cgroupMap := ParseCgroupPaths("") if len(cgroupMap) != 0 { t.Fatalf("Expected an empty map, got %v", cgroupMap) } cgroupMap = ParseCgroupPaths("\n") if len(cgroupMap) != 0 { t.Fatalf("Expected an empty map, got %v", cgroupMap) } cgroupMap = ParseCgroupPaths("something:else\nagain:here") if len(cgroupMap) != 0 { t.Fatalf("Expected an empty map, got %v", cgroupMap) } } func TestParseCgroupPaths(t *testing.T) { cgroupMap := ParseCgroupPaths("2:memory:/a\n1:cpuset:/b") if len(cgroupMap) != 2 { t.Fatalf("Expected a map with 2 entries, got %v", cgroupMap) } if value, ok := cgroupMap["memory"]; !ok || value != "/a" { t.Fatalf("Expected cgroupMap to contains an entry for 'memory' with value '/a', got %v", cgroupMap) } if value, ok := cgroupMap["cpuset"]; !ok || value != "/b" { t.Fatalf("Expected cgroupMap to contains an entry for 'cpuset' with value '/b', got %v", cgroupMap) } } func TestChannelBufferTimeout(t *testing.T) { expected := "11" buf := &ChannelBuffer{make(chan []byte, 1)} defer buf.Close() go func() { time.Sleep(100 * time.Millisecond) io.Copy(buf, strings.NewReader(expected)) }() // Wait long enough b := make([]byte, 2) _, err := buf.ReadTimeout(b, 50*time.Millisecond) if err == nil && err.Error() != "timeout reading from channel" { t.Fatalf("Expected an error, got %s", err) } // Wait for the end :) time.Sleep(150 * time.Millisecond) } func TestChannelBuffer(t *testing.T) { expected := "11" buf := &ChannelBuffer{make(chan []byte, 1)} defer buf.Close() go func() { time.Sleep(100 * time.Millisecond) io.Copy(buf, strings.NewReader(expected)) }() // Wait long enough b := make([]byte, 2) _, err := buf.ReadTimeout(b, 200*time.Millisecond) if err != nil { t.Fatal(err) } if string(b) != expected { t.Fatalf("Expected '%s', got '%s'", expected, string(b)) } } // FIXME doesn't work // func TestRunAtDifferentDate(t *testing.T) { // var date string // // Layout for date. MMDDhhmmYYYY // const timeLayout = "20060102" // expectedDate := "20100201" // theDate, err := time.Parse(timeLayout, expectedDate) // if err != nil { // t.Fatal(err) // } // RunAtDifferentDate(theDate, func() { // cmd := exec.Command("date", "+%Y%M%d") // out, err := cmd.Output() // if err != nil { // t.Fatal(err) // } // date = string(out) // }) // } docker-1.10.3/pkg/ioutils/000077500000000000000000000000001267010174400153165ustar00rootroot00000000000000docker-1.10.3/pkg/ioutils/bytespipe.go000066400000000000000000000076001267010174400176540ustar00rootroot00000000000000package ioutils import ( "errors" "io" "sync" ) // maxCap is the highest capacity to use in byte slices that buffer data. const maxCap = 1e6 // blockThreshold is the minimum number of bytes in the buffer which will cause // a write to BytesPipe to block when allocating a new slice. const blockThreshold = 1e6 // ErrClosed is returned when Write is called on a closed BytesPipe. var ErrClosed = errors.New("write to closed BytesPipe") // BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). // All written data may be read at most once. Also, BytesPipe allocates // and releases new byte slices to adjust to current needs, so the buffer // won't be overgrown after peak loads. type BytesPipe struct { mu sync.Mutex wait *sync.Cond buf [][]byte // slice of byte-slices of buffered data lastRead int // index in the first slice to a read point bufLen int // length of data buffered over the slices closeErr error // error to return from next Read. set to nil if not closed. } // NewBytesPipe creates new BytesPipe, initialized by specified slice. // If buf is nil, then it will be initialized with slice which cap is 64. // buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). func NewBytesPipe(buf []byte) *BytesPipe { if cap(buf) == 0 { buf = make([]byte, 0, 64) } bp := &BytesPipe{ buf: [][]byte{buf[:0]}, } bp.wait = sync.NewCond(&bp.mu) return bp } // Write writes p to BytesPipe. // It can allocate new []byte slices in a process of writing. func (bp *BytesPipe) Write(p []byte) (int, error) { bp.mu.Lock() defer bp.mu.Unlock() written := 0 for { if bp.closeErr != nil { return written, ErrClosed } // write data to the last buffer b := bp.buf[len(bp.buf)-1] // copy data to the current empty allocated area n := copy(b[len(b):cap(b)], p) // increment buffered data length bp.bufLen += n // include written data in last buffer bp.buf[len(bp.buf)-1] = b[:len(b)+n] written += n // if there was enough room to write all then break if len(p) == n { break } // more data: write to the next slice p = p[n:] // block if too much data is still in the buffer for bp.bufLen >= blockThreshold { bp.wait.Wait() } // allocate slice that has twice the size of the last unless maximum reached nextCap := 2 * cap(bp.buf[len(bp.buf)-1]) if nextCap > maxCap { nextCap = maxCap } // add new byte slice to the buffers slice and continue writing bp.buf = append(bp.buf, make([]byte, 0, nextCap)) } bp.wait.Broadcast() return written, nil } // CloseWithError causes further reads from a BytesPipe to return immediately. func (bp *BytesPipe) CloseWithError(err error) error { bp.mu.Lock() if err != nil { bp.closeErr = err } else { bp.closeErr = io.EOF } bp.wait.Broadcast() bp.mu.Unlock() return nil } // Close causes further reads from a BytesPipe to return immediately. func (bp *BytesPipe) Close() error { return bp.CloseWithError(nil) } func (bp *BytesPipe) len() int { return bp.bufLen - bp.lastRead } // Read reads bytes from BytesPipe. // Data could be read only once. func (bp *BytesPipe) Read(p []byte) (n int, err error) { bp.mu.Lock() defer bp.mu.Unlock() if bp.len() == 0 { if bp.closeErr != nil { return 0, bp.closeErr } bp.wait.Wait() if bp.len() == 0 && bp.closeErr != nil { return 0, bp.closeErr } } for { read := copy(p, bp.buf[0][bp.lastRead:]) n += read bp.lastRead += read if bp.len() == 0 { // we have read everything. reset to the beginning. bp.lastRead = 0 bp.bufLen -= len(bp.buf[0]) bp.buf[0] = bp.buf[0][:0] break } // break if everything was read if len(p) == read { break } // more buffered data and more asked. read from next slice. p = p[read:] bp.lastRead = 0 bp.bufLen -= len(bp.buf[0]) bp.buf[0] = nil // throw away old slice bp.buf = bp.buf[1:] // switch to next } bp.wait.Broadcast() return } docker-1.10.3/pkg/ioutils/bytespipe_test.go000066400000000000000000000067151267010174400207210ustar00rootroot00000000000000package ioutils import ( "crypto/sha1" "encoding/hex" "math/rand" "testing" "time" ) func TestBytesPipeRead(t *testing.T) { buf := NewBytesPipe(nil) buf.Write([]byte("12")) buf.Write([]byte("34")) buf.Write([]byte("56")) buf.Write([]byte("78")) buf.Write([]byte("90")) rd := make([]byte, 4) n, err := buf.Read(rd) if err != nil { t.Fatal(err) } if n != 4 { t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4) } if string(rd) != "1234" { t.Fatalf("Read %s, but must be %s", rd, "1234") } n, err = buf.Read(rd) if err != nil { t.Fatal(err) } if n != 4 { t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4) } if string(rd) != "5678" { t.Fatalf("Read %s, but must be %s", rd, "5679") } n, err = buf.Read(rd) if err != nil { t.Fatal(err) } if n != 2 { t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 2) } if string(rd[:n]) != "90" { t.Fatalf("Read %s, but must be %s", rd, "90") } } func TestBytesPipeWrite(t *testing.T) { buf := NewBytesPipe(nil) buf.Write([]byte("12")) buf.Write([]byte("34")) buf.Write([]byte("56")) buf.Write([]byte("78")) buf.Write([]byte("90")) if string(buf.buf[0]) != "1234567890" { t.Fatalf("Buffer %s, must be %s", buf.buf, "1234567890") } } // Write and read in different speeds/chunk sizes and check valid data is read. func TestBytesPipeWriteRandomChunks(t *testing.T) { cases := []struct{ iterations, writesPerLoop, readsPerLoop int }{ {100, 10, 1}, {1000, 10, 5}, {1000, 100, 0}, {1000, 5, 6}, {10000, 50, 25}, } testMessage := []byte("this is a random string for testing") // random slice sizes to read and write writeChunks := []int{25, 35, 15, 20} readChunks := []int{5, 45, 20, 25} for _, c := range cases { // first pass: write directly to hash hash := sha1.New() for i := 0; i < c.iterations*c.writesPerLoop; i++ { if _, err := hash.Write(testMessage[:writeChunks[i%len(writeChunks)]]); err != nil { t.Fatal(err) } } expected := hex.EncodeToString(hash.Sum(nil)) // write/read through buffer buf := NewBytesPipe(nil) hash.Reset() done := make(chan struct{}) go func() { // random delay before read starts <-time.After(time.Duration(rand.Intn(10)) * time.Millisecond) for i := 0; ; i++ { p := make([]byte, readChunks[(c.iterations*c.readsPerLoop+i)%len(readChunks)]) n, _ := buf.Read(p) if n == 0 { break } hash.Write(p[:n]) } close(done) }() for i := 0; i < c.iterations; i++ { for w := 0; w < c.writesPerLoop; w++ { buf.Write(testMessage[:writeChunks[(i*c.writesPerLoop+w)%len(writeChunks)]]) } } buf.Close() <-done actual := hex.EncodeToString(hash.Sum(nil)) if expected != actual { t.Fatalf("BytesPipe returned invalid data. Expected checksum %v, got %v", expected, actual) } } } func BenchmarkBytesPipeWrite(b *testing.B) { for i := 0; i < b.N; i++ { readBuf := make([]byte, 1024) buf := NewBytesPipe(nil) go func() { var err error for err == nil { _, err = buf.Read(readBuf) } }() for j := 0; j < 1000; j++ { buf.Write([]byte("pretty short line, because why not?")) } buf.Close() } } func BenchmarkBytesPipeRead(b *testing.B) { rd := make([]byte, 512) for i := 0; i < b.N; i++ { b.StopTimer() buf := NewBytesPipe(nil) for j := 0; j < 500; j++ { buf.Write(make([]byte, 1024)) } b.StartTimer() for j := 0; j < 1000; j++ { if n, _ := buf.Read(rd); n != 512 { b.Fatalf("Wrong number of bytes: %d", n) } } } } docker-1.10.3/pkg/ioutils/fmt.go000066400000000000000000000006771267010174400164450ustar00rootroot00000000000000package ioutils import ( "fmt" "io" ) // FprintfIfNotEmpty prints the string value if it's not empty func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) { if value != "" { return fmt.Fprintf(w, format, value) } return 0, nil } // FprintfIfTrue prints the boolean value if it's true func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) { if ok { return fmt.Fprintf(w, format, ok) } return 0, nil } docker-1.10.3/pkg/ioutils/fmt_test.go000066400000000000000000000006031267010174400174710ustar00rootroot00000000000000package ioutils import "testing" func TestFprintfIfNotEmpty(t *testing.T) { wc := NewWriteCounter(&NopWriter{}) n, _ := FprintfIfNotEmpty(wc, "foo%s", "") if wc.Count != 0 || n != 0 { t.Errorf("Wrong count: %v vs. %v vs. 0", wc.Count, n) } n, _ = FprintfIfNotEmpty(wc, "foo%s", "bar") if wc.Count != 6 || n != 6 { t.Errorf("Wrong count: %v vs. %v vs. 6", wc.Count, n) } } docker-1.10.3/pkg/ioutils/multireader.go000066400000000000000000000114141267010174400201630ustar00rootroot00000000000000package ioutils import ( "bytes" "fmt" "io" "os" ) type pos struct { idx int offset int64 } type multiReadSeeker struct { readers []io.ReadSeeker pos *pos posIdx map[io.ReadSeeker]int } func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { var tmpOffset int64 switch whence { case os.SEEK_SET: for i, rdr := range r.readers { // get size of the current reader s, err := rdr.Seek(0, os.SEEK_END) if err != nil { return -1, err } if offset > tmpOffset+s { if i == len(r.readers)-1 { rdrOffset := s + (offset - tmpOffset) if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { return -1, err } r.pos = &pos{i, rdrOffset} return offset, nil } tmpOffset += s continue } rdrOffset := offset - tmpOffset idx := i rdr.Seek(rdrOffset, os.SEEK_SET) // make sure all following readers are at 0 for _, rdr := range r.readers[i+1:] { rdr.Seek(0, os.SEEK_SET) } if rdrOffset == s && i != len(r.readers)-1 { idx++ rdrOffset = 0 } r.pos = &pos{idx, rdrOffset} return offset, nil } case os.SEEK_END: for _, rdr := range r.readers { s, err := rdr.Seek(0, os.SEEK_END) if err != nil { return -1, err } tmpOffset += s } r.Seek(tmpOffset+offset, os.SEEK_SET) return tmpOffset + offset, nil case os.SEEK_CUR: if r.pos == nil { return r.Seek(offset, os.SEEK_SET) } // Just return the current offset if offset == 0 { return r.getCurOffset() } curOffset, err := r.getCurOffset() if err != nil { return -1, err } rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset) if err != nil { return -1, err } r.pos = &pos{r.posIdx[rdr], rdrOffset} return curOffset + offset, nil default: return -1, fmt.Errorf("Invalid whence: %d", whence) } return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset) } func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) { var rdr io.ReadSeeker var rdrOffset int64 for i, rdr := range r.readers { offsetTo, err := r.getOffsetToReader(rdr) if err != nil { return nil, -1, err } if offsetTo > offset { rdr = r.readers[i-1] rdrOffset = offsetTo - offset break } if rdr == r.readers[len(r.readers)-1] { rdrOffset = offsetTo + offset break } } return rdr, rdrOffset, nil } func (r *multiReadSeeker) getCurOffset() (int64, error) { var totalSize int64 for _, rdr := range r.readers[:r.pos.idx+1] { if r.posIdx[rdr] == r.pos.idx { totalSize += r.pos.offset break } size, err := getReadSeekerSize(rdr) if err != nil { return -1, fmt.Errorf("error getting seeker size: %v", err) } totalSize += size } return totalSize, nil } func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) { var offset int64 for _, r := range r.readers { if r == rdr { break } size, err := getReadSeekerSize(rdr) if err != nil { return -1, err } offset += size } return offset, nil } func (r *multiReadSeeker) Read(b []byte) (int, error) { if r.pos == nil { r.pos = &pos{0, 0} } bCap := int64(cap(b)) buf := bytes.NewBuffer(nil) var rdr io.ReadSeeker for _, rdr = range r.readers[r.pos.idx:] { readBytes, err := io.CopyN(buf, rdr, bCap) if err != nil && err != io.EOF { return -1, err } bCap -= readBytes if bCap == 0 { break } } rdrPos, err := rdr.Seek(0, os.SEEK_CUR) if err != nil { return -1, err } r.pos = &pos{r.posIdx[rdr], rdrPos} return buf.Read(b) } func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) { // save the current position pos, err := rdr.Seek(0, os.SEEK_CUR) if err != nil { return -1, err } // get the size size, err := rdr.Seek(0, os.SEEK_END) if err != nil { return -1, err } // reset the position if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil { return -1, err } return size, nil } // MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided // input readseekers. After calling this method the initial position is set to the // beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances // to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker. // Seek can be used over the sum of lengths of all readseekers. // // When a MultiReadSeeker is used, no Read and Seek operations should be made on // its ReadSeeker components. Also, users should make no assumption on the state // of individual readseekers while the MultiReadSeeker is used. func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker { if len(readers) == 1 { return readers[0] } idx := make(map[io.ReadSeeker]int) for i, rdr := range readers { idx[rdr] = i } return &multiReadSeeker{ readers: readers, posIdx: idx, } } docker-1.10.3/pkg/ioutils/multireader_test.go000066400000000000000000000071511267010174400212250ustar00rootroot00000000000000package ioutils import ( "bytes" "fmt" "io" "io/ioutil" "os" "strings" "testing" ) func TestMultiReadSeekerReadAll(t *testing.T) { str := "hello world" s1 := strings.NewReader(str + " 1") s2 := strings.NewReader(str + " 2") s3 := strings.NewReader(str + " 3") mr := MultiReadSeeker(s1, s2, s3) expectedSize := int64(s1.Len() + s2.Len() + s3.Len()) b, err := ioutil.ReadAll(mr) if err != nil { t.Fatal(err) } expected := "hello world 1hello world 2hello world 3" if string(b) != expected { t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) } size, err := mr.Seek(0, os.SEEK_END) if err != nil { t.Fatal(err) } if size != expectedSize { t.Fatalf("reader size does not match, got %d, expected %d", size, expectedSize) } // Reset the position and read again pos, err := mr.Seek(0, os.SEEK_SET) if err != nil { t.Fatal(err) } if pos != 0 { t.Fatalf("expected position to be set to 0, got %d", pos) } b, err = ioutil.ReadAll(mr) if err != nil { t.Fatal(err) } if string(b) != expected { t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) } } func TestMultiReadSeekerReadEach(t *testing.T) { str := "hello world" s1 := strings.NewReader(str + " 1") s2 := strings.NewReader(str + " 2") s3 := strings.NewReader(str + " 3") mr := MultiReadSeeker(s1, s2, s3) var totalBytes int64 for i, s := range []*strings.Reader{s1, s2, s3} { sLen := int64(s.Len()) buf := make([]byte, s.Len()) expected := []byte(fmt.Sprintf("%s %d", str, i+1)) if _, err := mr.Read(buf); err != nil && err != io.EOF { t.Fatal(err) } if !bytes.Equal(buf, expected) { t.Fatalf("expected %q to be %q", string(buf), string(expected)) } pos, err := mr.Seek(0, os.SEEK_CUR) if err != nil { t.Fatalf("iteration: %d, error: %v", i+1, err) } // check that the total bytes read is the current position of the seeker totalBytes += sLen if pos != totalBytes { t.Fatalf("expected current position to be: %d, got: %d, iteration: %d", totalBytes, pos, i+1) } // This tests not only that SEEK_SET and SEEK_CUR give the same values, but that the next iteration is in the expected position as well newPos, err := mr.Seek(pos, os.SEEK_SET) if err != nil { t.Fatal(err) } if newPos != pos { t.Fatalf("expected to get same position when calling SEEK_SET with value from SEEK_CUR, cur: %d, set: %d", pos, newPos) } } } func TestMultiReadSeekerReadSpanningChunks(t *testing.T) { str := "hello world" s1 := strings.NewReader(str + " 1") s2 := strings.NewReader(str + " 2") s3 := strings.NewReader(str + " 3") mr := MultiReadSeeker(s1, s2, s3) buf := make([]byte, s1.Len()+3) _, err := mr.Read(buf) if err != nil { t.Fatal(err) } // expected is the contents of s1 + 3 bytes from s2, ie, the `hel` at the end of this string expected := "hello world 1hel" if string(buf) != expected { t.Fatalf("expected %s to be %s", string(buf), expected) } } func TestMultiReadSeekerNegativeSeek(t *testing.T) { str := "hello world" s1 := strings.NewReader(str + " 1") s2 := strings.NewReader(str + " 2") s3 := strings.NewReader(str + " 3") mr := MultiReadSeeker(s1, s2, s3) s1Len := s1.Len() s2Len := s2.Len() s3Len := s3.Len() s, err := mr.Seek(int64(-1*s3.Len()), os.SEEK_END) if err != nil { t.Fatal(err) } if s != int64(s1Len+s2Len) { t.Fatalf("expected %d to be %d", s, s1.Len()+s2.Len()) } buf := make([]byte, s3Len) if _, err := mr.Read(buf); err != nil && err != io.EOF { t.Fatal(err) } expected := fmt.Sprintf("%s %d", str, 3) if string(buf) != fmt.Sprintf("%s %d", str, 3) { t.Fatalf("expected %q to be %q", string(buf), expected) } } docker-1.10.3/pkg/ioutils/readers.go000066400000000000000000000062711267010174400173000ustar00rootroot00000000000000package ioutils import ( "crypto/sha256" "encoding/hex" "io" "golang.org/x/net/context" ) type readCloserWrapper struct { io.Reader closer func() error } func (r *readCloserWrapper) Close() error { return r.closer() } // NewReadCloserWrapper returns a new io.ReadCloser. func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { return &readCloserWrapper{ Reader: r, closer: closer, } } type readerErrWrapper struct { reader io.Reader closer func() } func (r *readerErrWrapper) Read(p []byte) (int, error) { n, err := r.reader.Read(p) if err != nil { r.closer() } return n, err } // NewReaderErrWrapper returns a new io.Reader. func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { return &readerErrWrapper{ reader: r, closer: closer, } } // HashData returns the sha256 sum of src. func HashData(src io.Reader) (string, error) { h := sha256.New() if _, err := io.Copy(h, src); err != nil { return "", err } return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil } // OnEOFReader wraps a io.ReadCloser and a function // the function will run at the end of file or close the file. type OnEOFReader struct { Rc io.ReadCloser Fn func() } func (r *OnEOFReader) Read(p []byte) (n int, err error) { n, err = r.Rc.Read(p) if err == io.EOF { r.runFunc() } return } // Close closes the file and run the function. func (r *OnEOFReader) Close() error { err := r.Rc.Close() r.runFunc() return err } func (r *OnEOFReader) runFunc() { if fn := r.Fn; fn != nil { fn() r.Fn = nil } } // cancelReadCloser wraps an io.ReadCloser with a context for cancelling read // operations. type cancelReadCloser struct { cancel func() pR *io.PipeReader // Stream to read from pW *io.PipeWriter } // NewCancelReadCloser creates a wrapper that closes the ReadCloser when the // context is cancelled. The returned io.ReadCloser must be closed when it is // no longer needed. func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { pR, pW := io.Pipe() // Create a context used to signal when the pipe is closed doneCtx, cancel := context.WithCancel(context.Background()) p := &cancelReadCloser{ cancel: cancel, pR: pR, pW: pW, } go func() { _, err := io.Copy(pW, in) select { case <-ctx.Done(): // If the context was closed, p.closeWithError // was already called. Calling it again would // change the error that Read returns. default: p.closeWithError(err) } in.Close() }() go func() { for { select { case <-ctx.Done(): p.closeWithError(ctx.Err()) case <-doneCtx.Done(): return } } }() return p } // Read wraps the Read method of the pipe that provides data from the wrapped // ReadCloser. func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { return p.pR.Read(buf) } // closeWithError closes the wrapper and its underlying reader. It will // cause future calls to Read to return err. func (p *cancelReadCloser) closeWithError(err error) { p.pW.CloseWithError(err) p.cancel() } // Close closes the wrapper its underlying reader. It will cause // future calls to Read to return io.EOF. func (p *cancelReadCloser) Close() error { p.closeWithError(io.EOF) return nil } docker-1.10.3/pkg/ioutils/readers_test.go000066400000000000000000000046171267010174400203410ustar00rootroot00000000000000package ioutils import ( "fmt" "io/ioutil" "strings" "testing" "time" "golang.org/x/net/context" ) // Implement io.Reader type errorReader struct{} func (r *errorReader) Read(p []byte) (int, error) { return 0, fmt.Errorf("Error reader always fail.") } func TestReadCloserWrapperClose(t *testing.T) { reader := strings.NewReader("A string reader") wrapper := NewReadCloserWrapper(reader, func() error { return fmt.Errorf("This will be called when closing") }) err := wrapper.Close() if err == nil || !strings.Contains(err.Error(), "This will be called when closing") { t.Fatalf("readCloserWrapper should have call the anonymous func and thus, fail.") } } func TestReaderErrWrapperReadOnError(t *testing.T) { called := false reader := &errorReader{} wrapper := NewReaderErrWrapper(reader, func() { called = true }) _, err := wrapper.Read([]byte{}) if err == nil || !strings.Contains(err.Error(), "Error reader always fail.") { t.Fatalf("readErrWrapper should returned an error") } if !called { t.Fatalf("readErrWrapper should have call the anonymous function on failure") } } func TestReaderErrWrapperRead(t *testing.T) { reader := strings.NewReader("a string reader.") wrapper := NewReaderErrWrapper(reader, func() { t.Fatalf("readErrWrapper should not have called the anonymous function") }) // Read 20 byte (should be ok with the string above) num, err := wrapper.Read(make([]byte, 20)) if err != nil { t.Fatal(err) } if num != 16 { t.Fatalf("readerErrWrapper should have read 16 byte, but read %d", num) } } func TestHashData(t *testing.T) { reader := strings.NewReader("hash-me") actual, err := HashData(reader) if err != nil { t.Fatal(err) } expected := "sha256:4d11186aed035cc624d553e10db358492c84a7cd6b9670d92123c144930450aa" if actual != expected { t.Fatalf("Expecting %s, got %s", expected, actual) } } type perpetualReader struct{} func (p *perpetualReader) Read(buf []byte) (n int, err error) { for i := 0; i != len(buf); i++ { buf[i] = 'a' } return len(buf), nil } func TestCancelReadCloser(t *testing.T) { ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) cancelReadCloser := NewCancelReadCloser(ctx, ioutil.NopCloser(&perpetualReader{})) for { var buf [128]byte _, err := cancelReadCloser.Read(buf[:]) if err == context.DeadlineExceeded { break } else if err != nil { t.Fatalf("got unexpected error: %v", err) } } } docker-1.10.3/pkg/ioutils/scheduler.go000066400000000000000000000001071267010174400176210ustar00rootroot00000000000000// +build !gccgo package ioutils func callSchedulerIfNecessary() { } docker-1.10.3/pkg/ioutils/scheduler_gccgo.go000066400000000000000000000003671267010174400207730ustar00rootroot00000000000000// +build gccgo package ioutils import ( "runtime" ) func callSchedulerIfNecessary() { //allow or force Go scheduler to switch context, without explicitly //forcing this will make it hang when using gccgo implementation runtime.Gosched() } docker-1.10.3/pkg/ioutils/temp_unix.go000066400000000000000000000003161267010174400176550ustar00rootroot00000000000000// +build !windows package ioutils import "io/ioutil" // TempDir on Unix systems is equivalent to ioutil.TempDir. func TempDir(dir, prefix string) (string, error) { return ioutil.TempDir(dir, prefix) } docker-1.10.3/pkg/ioutils/temp_windows.go000066400000000000000000000005751267010174400203730ustar00rootroot00000000000000// +build windows package ioutils import ( "io/ioutil" "github.com/docker/docker/pkg/longpath" ) // TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. func TempDir(dir, prefix string) (string, error) { tempDir, err := ioutil.TempDir(dir, prefix) if err != nil { return "", err } return longpath.AddPrefix(tempDir), nil } docker-1.10.3/pkg/ioutils/writeflusher.go000066400000000000000000000041101267010174400203640ustar00rootroot00000000000000package ioutils import ( "errors" "io" "net/http" "sync" ) // WriteFlusher wraps the Write and Flush operation ensuring that every write // is a flush. In addition, the Close method can be called to intercept // Read/Write calls if the targets lifecycle has already ended. type WriteFlusher struct { mu sync.Mutex w io.Writer flusher http.Flusher flushed bool closed error // TODO(stevvooe): Use channel for closed instead, remove mutex. Using a // channel will allow one to properly order the operations. } var errWriteFlusherClosed = errors.New("writeflusher: closed") func (wf *WriteFlusher) Write(b []byte) (n int, err error) { wf.mu.Lock() defer wf.mu.Unlock() if wf.closed != nil { return 0, wf.closed } n, err = wf.w.Write(b) wf.flush() // every write is a flush. return n, err } // Flush the stream immediately. func (wf *WriteFlusher) Flush() { wf.mu.Lock() defer wf.mu.Unlock() wf.flush() } // flush the stream immediately without taking a lock. Used internally. func (wf *WriteFlusher) flush() { if wf.closed != nil { return } wf.flushed = true wf.flusher.Flush() } // Flushed returns the state of flushed. // If it's flushed, return true, or else it return false. func (wf *WriteFlusher) Flushed() bool { // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to // be used to detect whether or a response code has been issued or not. // Another hook should be used instead. wf.mu.Lock() defer wf.mu.Unlock() return wf.flushed } // Close closes the write flusher, disallowing any further writes to the // target. After the flusher is closed, all calls to write or flush will // result in an error. func (wf *WriteFlusher) Close() error { wf.mu.Lock() defer wf.mu.Unlock() if wf.closed != nil { return wf.closed } wf.closed = errWriteFlusherClosed return nil } // NewWriteFlusher returns a new WriteFlusher. func NewWriteFlusher(w io.Writer) *WriteFlusher { var flusher http.Flusher if f, ok := w.(http.Flusher); ok { flusher = f } else { flusher = &NopFlusher{} } return &WriteFlusher{w: w, flusher: flusher} } docker-1.10.3/pkg/ioutils/writers.go000066400000000000000000000026501267010174400173470ustar00rootroot00000000000000package ioutils import "io" // NopWriter represents a type which write operation is nop. type NopWriter struct{} func (*NopWriter) Write(buf []byte) (int, error) { return len(buf), nil } type nopWriteCloser struct { io.Writer } func (w *nopWriteCloser) Close() error { return nil } // NopWriteCloser returns a nopWriteCloser. func NopWriteCloser(w io.Writer) io.WriteCloser { return &nopWriteCloser{w} } // NopFlusher represents a type which flush operation is nop. type NopFlusher struct{} // Flush is a nop operation. func (f *NopFlusher) Flush() {} type writeCloserWrapper struct { io.Writer closer func() error } func (r *writeCloserWrapper) Close() error { return r.closer() } // NewWriteCloserWrapper returns a new io.WriteCloser. func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { return &writeCloserWrapper{ Writer: r, closer: closer, } } // WriteCounter wraps a concrete io.Writer and hold a count of the number // of bytes written to the writer during a "session". // This can be convenient when write return is masked // (e.g., json.Encoder.Encode()) type WriteCounter struct { Count int64 Writer io.Writer } // NewWriteCounter returns a new WriteCounter. func NewWriteCounter(w io.Writer) *WriteCounter { return &WriteCounter{ Writer: w, } } func (wc *WriteCounter) Write(p []byte) (count int, err error) { count, err = wc.Writer.Write(p) wc.Count += int64(count) return } docker-1.10.3/pkg/ioutils/writers_test.go000066400000000000000000000024461267010174400204110ustar00rootroot00000000000000package ioutils import ( "bytes" "strings" "testing" ) func TestWriteCloserWrapperClose(t *testing.T) { called := false writer := bytes.NewBuffer([]byte{}) wrapper := NewWriteCloserWrapper(writer, func() error { called = true return nil }) if err := wrapper.Close(); err != nil { t.Fatal(err) } if !called { t.Fatalf("writeCloserWrapper should have call the anonymous function.") } } func TestNopWriteCloser(t *testing.T) { writer := bytes.NewBuffer([]byte{}) wrapper := NopWriteCloser(writer) if err := wrapper.Close(); err != nil { t.Fatal("NopWriteCloser always return nil on Close.") } } func TestNopWriter(t *testing.T) { nw := &NopWriter{} l, err := nw.Write([]byte{'c'}) if err != nil { t.Fatal(err) } if l != 1 { t.Fatalf("Expected 1 got %d", l) } } func TestWriteCounter(t *testing.T) { dummy1 := "This is a dummy string." dummy2 := "This is another dummy string." totalLength := int64(len(dummy1) + len(dummy2)) reader1 := strings.NewReader(dummy1) reader2 := strings.NewReader(dummy2) var buffer bytes.Buffer wc := NewWriteCounter(&buffer) reader1.WriteTo(wc) reader2.WriteTo(wc) if wc.Count != totalLength { t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength) } if buffer.String() != dummy1+dummy2 { t.Error("Wrong message written") } } docker-1.10.3/pkg/jsonlog/000077500000000000000000000000001267010174400153015ustar00rootroot00000000000000docker-1.10.3/pkg/jsonlog/jsonlog.go000066400000000000000000000020721267010174400173040ustar00rootroot00000000000000package jsonlog import ( "encoding/json" "fmt" "time" ) // JSONLog represents a log message, typically a single entry from a given log stream. // JSONLogs can be easily serialized to and from JSON and support custom formatting. type JSONLog struct { // Log is the log message Log string `json:"log,omitempty"` // Stream is the log source Stream string `json:"stream,omitempty"` // Created is the created timestamp of log Created time.Time `json:"time"` } // Format returns the log formatted according to format // If format is nil, returns the log message // If format is json, returns the log marshaled in json format // By default, returns the log with the log time formatted according to format. func (jl *JSONLog) Format(format string) (string, error) { if format == "" { return jl.Log, nil } if format == "json" { m, err := json.Marshal(jl) return string(m), err } return fmt.Sprintf("%s %s", jl.Created.Format(format), jl.Log), nil } // Reset resets the log to nil. func (jl *JSONLog) Reset() { jl.Log = "" jl.Stream = "" jl.Created = time.Time{} } docker-1.10.3/pkg/jsonlog/jsonlog_marshalling.go000066400000000000000000000104011267010174400216600ustar00rootroot00000000000000// This code was initially generated by ffjson // This code was generated via the following steps: // $ go get -u github.com/pquerna/ffjson // $ make BIND_DIR=. shell // $ ffjson pkg/jsonlog/jsonlog.go // $ mv pkg/jsonglog/jsonlog_ffjson.go pkg/jsonlog/jsonlog_marshalling.go // // It has been modified to improve the performance of time marshalling to JSON // and to clean it up. // Should this code need to be regenerated when the JSONLog struct is changed, // the relevant changes which have been made are: // import ( // "bytes" //- // "unicode/utf8" // ) // // func (mj *JSONLog) MarshalJSON() ([]byte, error) { //@@ -20,13 +16,13 @@ func (mj *JSONLog) MarshalJSON() ([]byte, error) { // } // return buf.Bytes(), nil // } //+ // func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { //- var err error //- var obj []byte //- var first bool = true //- _ = obj //- _ = err //- _ = first //+ var ( //+ err error //+ timestamp string //+ first bool = true //+ ) // buf.WriteString(`{`) // if len(mj.Log) != 0 { // if first == true { //@@ -52,11 +48,11 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { // buf.WriteString(`,`) // } // buf.WriteString(`"time":`) //- obj, err = mj.Created.MarshalJSON() //+ timestamp, err = FastTimeMarshalJSON(mj.Created) // if err != nil { // return err // } //- buf.Write(obj) //+ buf.WriteString(timestamp) // buf.WriteString(`}`) // return nil // } // @@ -81,9 +81,10 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { // if len(mj.Log) != 0 { // - if first == true { // - first = false // - } else { // - buf.WriteString(`,`) // - } // + first = false // buf.WriteString(`"log":`) // ffjsonWriteJSONString(buf, mj.Log) // } package jsonlog import ( "bytes" "unicode/utf8" ) // MarshalJSON marshals the JSONLog. func (mj *JSONLog) MarshalJSON() ([]byte, error) { var buf bytes.Buffer buf.Grow(1024) if err := mj.MarshalJSONBuf(&buf); err != nil { return nil, err } return buf.Bytes(), nil } // MarshalJSONBuf marshals the JSONLog and stores the result to a bytes.Buffer. func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { var ( err error timestamp string first = true ) buf.WriteString(`{`) if len(mj.Log) != 0 { first = false buf.WriteString(`"log":`) ffjsonWriteJSONString(buf, mj.Log) } if len(mj.Stream) != 0 { if first == true { first = false } else { buf.WriteString(`,`) } buf.WriteString(`"stream":`) ffjsonWriteJSONString(buf, mj.Stream) } if first == true { first = false } else { buf.WriteString(`,`) } buf.WriteString(`"time":`) timestamp, err = FastTimeMarshalJSON(mj.Created) if err != nil { return err } buf.WriteString(timestamp) buf.WriteString(`}`) return nil } func ffjsonWriteJSONString(buf *bytes.Buffer, s string) { const hex = "0123456789abcdef" buf.WriteByte('"') start := 0 for i := 0; i < len(s); { if b := s[i]; b < utf8.RuneSelf { if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { i++ continue } if start < i { buf.WriteString(s[start:i]) } switch b { case '\\', '"': buf.WriteByte('\\') buf.WriteByte(b) case '\n': buf.WriteByte('\\') buf.WriteByte('n') case '\r': buf.WriteByte('\\') buf.WriteByte('r') default: buf.WriteString(`\u00`) buf.WriteByte(hex[b>>4]) buf.WriteByte(hex[b&0xF]) } i++ start = i continue } c, size := utf8.DecodeRuneInString(s[i:]) if c == utf8.RuneError && size == 1 { if start < i { buf.WriteString(s[start:i]) } buf.WriteString(`\ufffd`) i += size start = i continue } if c == '\u2028' || c == '\u2029' { if start < i { buf.WriteString(s[start:i]) } buf.WriteString(`\u202`) buf.WriteByte(hex[c&0xF]) i += size start = i continue } i += size } if start < len(s) { buf.WriteString(s[start:]) } buf.WriteByte('"') } docker-1.10.3/pkg/jsonlog/jsonlog_marshalling_test.go000066400000000000000000000027621267010174400227320ustar00rootroot00000000000000package jsonlog import ( "regexp" "testing" ) func TestJSONLogMarshalJSON(t *testing.T) { logs := map[JSONLog]string{ JSONLog{Log: `"A log line with \\"`}: `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":\".{20,}\"}$`, JSONLog{Log: "A log line"}: `^{\"log\":\"A log line\",\"time\":\".{20,}\"}$`, JSONLog{Log: "A log line with \r"}: `^{\"log\":\"A log line with \\r\",\"time\":\".{20,}\"}$`, JSONLog{Log: "A log line with & < >"}: `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":\".{20,}\"}$`, JSONLog{Log: "A log line with utf8 : 🚀 ψ ω β"}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":\".{20,}\"}$`, JSONLog{Stream: "stdout"}: `^{\"stream\":\"stdout\",\"time\":\".{20,}\"}$`, JSONLog{}: `^{\"time\":\".{20,}\"}$`, // These ones are a little weird JSONLog{Log: "\u2028 \u2029"}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":\".{20,}\"}$`, JSONLog{Log: string([]byte{0xaF})}: `^{\"log\":\"\\ufffd\",\"time\":\".{20,}\"}$`, JSONLog{Log: string([]byte{0x7F})}: `^{\"log\":\"\x7f\",\"time\":\".{20,}\"}$`, } for jsonLog, expression := range logs { data, err := jsonLog.MarshalJSON() if err != nil { t.Fatal(err) } res := string(data) t.Logf("Result of WriteLog: %q", res) logRe := regexp.MustCompile(expression) if !logRe.MatchString(res) { t.Fatalf("Log line not in expected format [%v]: %q", expression, res) } } } docker-1.10.3/pkg/jsonlog/jsonlogbytes.go000066400000000000000000000047541267010174400203640ustar00rootroot00000000000000package jsonlog import ( "bytes" "encoding/json" "unicode/utf8" ) // JSONLogs is based on JSONLog. // It allows marshalling JSONLog from Log as []byte // and an already marshalled Created timestamp. type JSONLogs struct { Log []byte `json:"log,omitempty"` Stream string `json:"stream,omitempty"` Created string `json:"time"` // json-encoded bytes RawAttrs json.RawMessage `json:"attrs,omitempty"` } // MarshalJSONBuf is based on the same method from JSONLog // It has been modified to take into account the necessary changes. func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error { var first = true buf.WriteString(`{`) if len(mj.Log) != 0 { first = false buf.WriteString(`"log":`) ffjsonWriteJSONBytesAsString(buf, mj.Log) } if len(mj.Stream) != 0 { if first == true { first = false } else { buf.WriteString(`,`) } buf.WriteString(`"stream":`) ffjsonWriteJSONString(buf, mj.Stream) } if len(mj.RawAttrs) > 0 { if first == true { first = false } else { buf.WriteString(`,`) } buf.WriteString(`"attrs":`) buf.Write(mj.RawAttrs) } if first == true { first = false } else { buf.WriteString(`,`) } buf.WriteString(`"time":`) buf.WriteString(mj.Created) buf.WriteString(`}`) return nil } // This is based on ffjsonWriteJSONBytesAsString. It has been changed // to accept a string passed as a slice of bytes. func ffjsonWriteJSONBytesAsString(buf *bytes.Buffer, s []byte) { const hex = "0123456789abcdef" buf.WriteByte('"') start := 0 for i := 0; i < len(s); { if b := s[i]; b < utf8.RuneSelf { if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { i++ continue } if start < i { buf.Write(s[start:i]) } switch b { case '\\', '"': buf.WriteByte('\\') buf.WriteByte(b) case '\n': buf.WriteByte('\\') buf.WriteByte('n') case '\r': buf.WriteByte('\\') buf.WriteByte('r') default: buf.WriteString(`\u00`) buf.WriteByte(hex[b>>4]) buf.WriteByte(hex[b&0xF]) } i++ start = i continue } c, size := utf8.DecodeRune(s[i:]) if c == utf8.RuneError && size == 1 { if start < i { buf.Write(s[start:i]) } buf.WriteString(`\ufffd`) i += size start = i continue } if c == '\u2028' || c == '\u2029' { if start < i { buf.Write(s[start:i]) } buf.WriteString(`\u202`) buf.WriteByte(hex[c&0xF]) i += size start = i continue } i += size } if start < len(s) { buf.Write(s[start:]) } buf.WriteByte('"') } docker-1.10.3/pkg/jsonlog/jsonlogbytes_test.go000066400000000000000000000036401267010174400214140ustar00rootroot00000000000000package jsonlog import ( "bytes" "regexp" "testing" ) func TestJSONLogsMarshalJSONBuf(t *testing.T) { logs := map[*JSONLogs]string{ &JSONLogs{Log: []byte(`"A log line with \\"`)}: `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":}$`, &JSONLogs{Log: []byte("A log line")}: `^{\"log\":\"A log line\",\"time\":}$`, &JSONLogs{Log: []byte("A log line with \r")}: `^{\"log\":\"A log line with \\r\",\"time\":}$`, &JSONLogs{Log: []byte("A log line with & < >")}: `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":}$`, &JSONLogs{Log: []byte("A log line with utf8 : 🚀 ψ ω β")}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":}$`, &JSONLogs{Stream: "stdout"}: `^{\"stream\":\"stdout\",\"time\":}$`, &JSONLogs{Stream: "stdout", Log: []byte("A log line")}: `^{\"log\":\"A log line\",\"stream\":\"stdout\",\"time\":}$`, &JSONLogs{Created: "time"}: `^{\"time\":time}$`, &JSONLogs{}: `^{\"time\":}$`, // These ones are a little weird &JSONLogs{Log: []byte("\u2028 \u2029")}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":}$`, &JSONLogs{Log: []byte{0xaF}}: `^{\"log\":\"\\ufffd\",\"time\":}$`, &JSONLogs{Log: []byte{0x7F}}: `^{\"log\":\"\x7f\",\"time\":}$`, // with raw attributes &JSONLogs{Log: []byte("A log line"), RawAttrs: []byte(`{"hello":"world","value":1234}`)}: `^{\"log\":\"A log line\",\"attrs\":{\"hello\":\"world\",\"value\":1234},\"time\":}$`, } for jsonLog, expression := range logs { var buf bytes.Buffer if err := jsonLog.MarshalJSONBuf(&buf); err != nil { t.Fatal(err) } res := buf.String() t.Logf("Result of WriteLog: %q", res) logRe := regexp.MustCompile(expression) if !logRe.MatchString(res) { t.Fatalf("Log line not in expected format [%v]: %q", expression, res) } } } docker-1.10.3/pkg/jsonlog/time_marshalling.go000066400000000000000000000016001267010174400211440ustar00rootroot00000000000000// Package jsonlog provides helper functions to parse and print time (time.Time) as JSON. package jsonlog import ( "errors" "time" ) const ( // RFC3339NanoFixed is our own version of RFC339Nano because we want one // that pads the nano seconds part with zeros to ensure // the timestamps are aligned in the logs. RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" // JSONFormat is the format used by FastMarshalJSON JSONFormat = `"` + time.RFC3339Nano + `"` ) // FastTimeMarshalJSON avoids one of the extra allocations that // time.MarshalJSON is making. func FastTimeMarshalJSON(t time.Time) (string, error) { if y := t.Year(); y < 0 || y >= 10000 { // RFC 3339 is clear that years are 4 digits exactly. // See golang.org/issue/4556#c15 for more discussion. return "", errors.New("time.MarshalJSON: year outside of range [0,9999]") } return t.Format(JSONFormat), nil } docker-1.10.3/pkg/jsonlog/time_marshalling_test.go000066400000000000000000000023021267010174400222030ustar00rootroot00000000000000package jsonlog import ( "testing" "time" ) // Testing to ensure 'year' fields is between 0 and 9999 func TestFastTimeMarshalJSONWithInvalidDate(t *testing.T) { aTime := time.Date(-1, 1, 1, 0, 0, 0, 0, time.Local) json, err := FastTimeMarshalJSON(aTime) if err == nil { t.Fatalf("FastTimeMarshalJSON should throw an error, but was '%v'", json) } anotherTime := time.Date(10000, 1, 1, 0, 0, 0, 0, time.Local) json, err = FastTimeMarshalJSON(anotherTime) if err == nil { t.Fatalf("FastTimeMarshalJSON should throw an error, but was '%v'", json) } } func TestFastTimeMarshalJSON(t *testing.T) { aTime := time.Date(2015, 5, 29, 11, 1, 2, 3, time.UTC) json, err := FastTimeMarshalJSON(aTime) if err != nil { t.Fatal(err) } expected := "\"2015-05-29T11:01:02.000000003Z\"" if json != expected { t.Fatalf("Expected %v, got %v", expected, json) } location, err := time.LoadLocation("Europe/Paris") if err != nil { t.Fatal(err) } aTime = time.Date(2015, 5, 29, 11, 1, 2, 3, location) json, err = FastTimeMarshalJSON(aTime) if err != nil { t.Fatal(err) } expected = "\"2015-05-29T11:01:02.000000003+02:00\"" if json != expected { t.Fatalf("Expected %v, got %v", expected, json) } } docker-1.10.3/pkg/jsonmessage/000077500000000000000000000000001267010174400161445ustar00rootroot00000000000000docker-1.10.3/pkg/jsonmessage/jsonmessage.go000066400000000000000000000146311267010174400210160ustar00rootroot00000000000000package jsonmessage import ( "encoding/json" "fmt" "io" "strings" "time" "github.com/docker/docker/pkg/jsonlog" "github.com/docker/docker/pkg/term" "github.com/docker/go-units" ) // JSONError wraps a concrete Code and Message, `Code` is // is a integer error code, `Message` is the error message. type JSONError struct { Code int `json:"code,omitempty"` Message string `json:"message,omitempty"` } func (e *JSONError) Error() string { return e.Message } // JSONProgress describes a Progress. terminalFd is the fd of the current terminal, // Start is the initial value for the operation. Current is the current status and // value of the progress made towards Total. Total is the end value describing when // we made 100% progress for an operation. type JSONProgress struct { terminalFd uintptr Current int64 `json:"current,omitempty"` Total int64 `json:"total,omitempty"` Start int64 `json:"start,omitempty"` } func (p *JSONProgress) String() string { var ( width = 200 pbBox string numbersBox string timeLeftBox string ) ws, err := term.GetWinsize(p.terminalFd) if err == nil { width = int(ws.Width) } if p.Current <= 0 && p.Total <= 0 { return "" } current := units.HumanSize(float64(p.Current)) if p.Total <= 0 { return fmt.Sprintf("%8v", current) } total := units.HumanSize(float64(p.Total)) percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 if percentage > 50 { percentage = 50 } if width > 110 { // this number can't be negative gh#7136 numSpaces := 0 if 50-percentage > 0 { numSpaces = 50 - percentage } pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) } numbersBox = fmt.Sprintf("%8v/%v", current, total) if p.Current > p.Total { // remove total display if the reported current is wonky. numbersBox = fmt.Sprintf("%8v", current) } if p.Current > 0 && p.Start > 0 && percentage < 50 { fromStart := time.Now().UTC().Sub(time.Unix(p.Start, 0)) perEntry := fromStart / time.Duration(p.Current) left := time.Duration(p.Total-p.Current) * perEntry left = (left / time.Second) * time.Second if width > 50 { timeLeftBox = " " + left.String() } } return pbBox + numbersBox + timeLeftBox } // JSONMessage defines a message struct. It describes // the created time, where it from, status, ID of the // message. It's used for docker events. type JSONMessage struct { Stream string `json:"stream,omitempty"` Status string `json:"status,omitempty"` Progress *JSONProgress `json:"progressDetail,omitempty"` ProgressMessage string `json:"progress,omitempty"` //deprecated ID string `json:"id,omitempty"` From string `json:"from,omitempty"` Time int64 `json:"time,omitempty"` TimeNano int64 `json:"timeNano,omitempty"` Error *JSONError `json:"errorDetail,omitempty"` ErrorMessage string `json:"error,omitempty"` //deprecated // Aux contains out-of-band data, such as digests for push signing. Aux *json.RawMessage `json:"aux,omitempty"` } // Display displays the JSONMessage to `out`. `isTerminal` describes if `out` // is a terminal. If this is the case, it will erase the entire current line // when displaying the progressbar. func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { if jm.Error != nil { if jm.Error.Code == 401 { return fmt.Errorf("Authentication is required.") } return jm.Error } var endl string if isTerminal && jm.Stream == "" && jm.Progress != nil { // [2K = erase entire current line fmt.Fprintf(out, "%c[2K\r", 27) endl = "\r" } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal return nil } if jm.TimeNano != 0 { fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(jsonlog.RFC3339NanoFixed)) } else if jm.Time != 0 { fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(jsonlog.RFC3339NanoFixed)) } if jm.ID != "" { fmt.Fprintf(out, "%s: ", jm.ID) } if jm.From != "" { fmt.Fprintf(out, "(from %s) ", jm.From) } if jm.Progress != nil && isTerminal { fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) } else if jm.ProgressMessage != "" { //deprecated fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) } else if jm.Stream != "" { fmt.Fprintf(out, "%s%s", jm.Stream, endl) } else { fmt.Fprintf(out, "%s%s\n", jm.Status, endl) } return nil } // DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` // describes if `out` is a terminal. If this is the case, it will print `\n` at the end of // each line and move the cursor while displaying. func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(*json.RawMessage)) error { var ( dec = json.NewDecoder(in) ids = make(map[string]int) ) for { diff := 0 var jm JSONMessage if err := dec.Decode(&jm); err != nil { if err == io.EOF { break } return err } if jm.Aux != nil { if auxCallback != nil { auxCallback(jm.Aux) } continue } if jm.Progress != nil { jm.Progress.terminalFd = terminalFd } if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { line, ok := ids[jm.ID] if !ok { // NOTE: This approach of using len(id) to // figure out the number of lines of history // only works as long as we clear the history // when we output something that's not // accounted for in the map, such as a line // with no ID. line = len(ids) ids[jm.ID] = line if isTerminal { fmt.Fprintf(out, "\n") } } else { diff = len(ids) - line } if isTerminal { // NOTE: this appears to be necessary even if // diff == 0. // [{diff}A = move cursor up diff rows fmt.Fprintf(out, "%c[%dA", 27, diff) } } else { // When outputting something that isn't progress // output, clear the history of previous lines. We // don't want progress entries from some previous // operation to be updated (for example, pull -a // with multiple tags). ids = make(map[string]int) } err := jm.Display(out, isTerminal) if jm.ID != "" && isTerminal { // NOTE: this appears to be necessary even if // diff == 0. // [{diff}B = move cursor down diff rows fmt.Fprintf(out, "%c[%dB", 27, diff) } if err != nil { return err } } return nil } docker-1.10.3/pkg/jsonmessage/jsonmessage_test.go000066400000000000000000000150061267010174400220520ustar00rootroot00000000000000package jsonmessage import ( "bytes" "fmt" "strings" "testing" "time" "github.com/docker/docker/pkg/jsonlog" "github.com/docker/docker/pkg/term" ) func TestError(t *testing.T) { je := JSONError{404, "Not found"} if je.Error() != "Not found" { t.Fatalf("Expected 'Not found' got '%s'", je.Error()) } } func TestProgress(t *testing.T) { jp := JSONProgress{} if jp.String() != "" { t.Fatalf("Expected empty string, got '%s'", jp.String()) } expected := " 1 B" jp2 := JSONProgress{Current: 1} if jp2.String() != expected { t.Fatalf("Expected %q, got %q", expected, jp2.String()) } expectedStart := "[==========> ] 20 B/100 B" jp3 := JSONProgress{Current: 20, Total: 100, Start: time.Now().Unix()} // Just look at the start of the string // (the remaining time is really hard to test -_-) if jp3.String()[:len(expectedStart)] != expectedStart { t.Fatalf("Expected to start with %q, got %q", expectedStart, jp3.String()) } expected = "[=========================> ] 50 B/100 B" jp4 := JSONProgress{Current: 50, Total: 100} if jp4.String() != expected { t.Fatalf("Expected %q, got %q", expected, jp4.String()) } // this number can't be negative gh#7136 expected = "[==================================================>] 50 B" jp5 := JSONProgress{Current: 50, Total: 40} if jp5.String() != expected { t.Fatalf("Expected %q, got %q", expected, jp5.String()) } } func TestJSONMessageDisplay(t *testing.T) { now := time.Now() messages := map[JSONMessage][]string{ // Empty JSONMessage{}: {"\n", "\n"}, // Status JSONMessage{ Status: "status", }: { "status\n", "status\n", }, // General JSONMessage{ Time: now.Unix(), ID: "ID", From: "From", Status: "status", }: { fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(jsonlog.RFC3339NanoFixed)), fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(jsonlog.RFC3339NanoFixed)), }, // General, with nano precision time JSONMessage{ TimeNano: now.UnixNano(), ID: "ID", From: "From", Status: "status", }: { fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), }, // General, with both times Nano is preferred JSONMessage{ Time: now.Unix(), TimeNano: now.UnixNano(), ID: "ID", From: "From", Status: "status", }: { fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), }, // Stream over status JSONMessage{ Status: "status", Stream: "stream", }: { "stream", "stream", }, // With progress message JSONMessage{ Status: "status", ProgressMessage: "progressMessage", }: { "status progressMessage", "status progressMessage", }, // With progress, stream empty JSONMessage{ Status: "status", Stream: "", Progress: &JSONProgress{Current: 1}, }: { "", fmt.Sprintf("%c[2K\rstatus 1 B\r", 27), }, } // The tests :) for jsonMessage, expectedMessages := range messages { // Without terminal data := bytes.NewBuffer([]byte{}) if err := jsonMessage.Display(data, false); err != nil { t.Fatal(err) } if data.String() != expectedMessages[0] { t.Fatalf("Expected [%v], got [%v]", expectedMessages[0], data.String()) } // With terminal data = bytes.NewBuffer([]byte{}) if err := jsonMessage.Display(data, true); err != nil { t.Fatal(err) } if data.String() != expectedMessages[1] { t.Fatalf("Expected [%v], got [%v]", expectedMessages[1], data.String()) } } } // Test JSONMessage with an Error. It will return an error with the text as error, not the meaning of the HTTP code. func TestJSONMessageDisplayWithJSONError(t *testing.T) { data := bytes.NewBuffer([]byte{}) jsonMessage := JSONMessage{Error: &JSONError{404, "Can't find it"}} err := jsonMessage.Display(data, true) if err == nil || err.Error() != "Can't find it" { t.Fatalf("Expected a JSONError 404, got [%v]", err) } jsonMessage = JSONMessage{Error: &JSONError{401, "Anything"}} err = jsonMessage.Display(data, true) if err == nil || err.Error() != "Authentication is required." { t.Fatalf("Expected an error [Authentication is required.], got [%v]", err) } } func TestDisplayJSONMessagesStreamInvalidJSON(t *testing.T) { var ( inFd uintptr ) data := bytes.NewBuffer([]byte{}) reader := strings.NewReader("This is not a 'valid' JSON []") inFd, _ = term.GetFdInfo(reader) if err := DisplayJSONMessagesStream(reader, data, inFd, false, nil); err == nil && err.Error()[:17] != "invalid character" { t.Fatalf("Should have thrown an error (invalid character in ..), got [%v]", err) } } func TestDisplayJSONMessagesStream(t *testing.T) { var ( inFd uintptr ) messages := map[string][]string{ // empty string "": { "", ""}, // Without progress & ID "{ \"status\": \"status\" }": { "status\n", "status\n", }, // Without progress, with ID "{ \"id\": \"ID\",\"status\": \"status\" }": { "ID: status\n", fmt.Sprintf("ID: status\n%c[%dB", 27, 0), }, // With progress "{ \"id\": \"ID\", \"status\": \"status\", \"progress\": \"ProgressMessage\" }": { "ID: status ProgressMessage", fmt.Sprintf("\n%c[%dAID: status ProgressMessage%c[%dB", 27, 0, 27, 0), }, // With progressDetail "{ \"id\": \"ID\", \"status\": \"status\", \"progressDetail\": { \"Current\": 1} }": { "", // progressbar is disabled in non-terminal fmt.Sprintf("\n%c[%dA%c[2K\rID: status 1 B\r%c[%dB", 27, 0, 27, 27, 0), }, } for jsonMessage, expectedMessages := range messages { data := bytes.NewBuffer([]byte{}) reader := strings.NewReader(jsonMessage) inFd, _ = term.GetFdInfo(reader) // Without terminal if err := DisplayJSONMessagesStream(reader, data, inFd, false, nil); err != nil { t.Fatal(err) } if data.String() != expectedMessages[0] { t.Fatalf("Expected an [%v], got [%v]", expectedMessages[0], data.String()) } // With terminal data = bytes.NewBuffer([]byte{}) reader = strings.NewReader(jsonMessage) if err := DisplayJSONMessagesStream(reader, data, inFd, true, nil); err != nil { t.Fatal(err) } if data.String() != expectedMessages[1] { t.Fatalf("Expected an [%v], got [%v]", expectedMessages[1], data.String()) } } } docker-1.10.3/pkg/locker/000077500000000000000000000000001267010174400151055ustar00rootroot00000000000000docker-1.10.3/pkg/locker/README.md000066400000000000000000000030171267010174400163650ustar00rootroot00000000000000Locker ===== locker provides a mechanism for creating finer-grained locking to help free up more global locks to handle other tasks. The implementation looks close to a sync.Mutex, however the user must provide a reference to use to refer to the underlying lock when locking and unlocking, and unlock may generate an error. If a lock with a given name does not exist when `Lock` is called, one is created. Lock references are automatically cleaned up on `Unlock` if nothing else is waiting for the lock. ## Usage ```go package important import ( "sync" "time" "github.com/docker/docker/pkg/locker" ) type important struct { locks *locker.Locker data map[string]interface{} mu sync.Mutex } func (i *important) Get(name string) interface{} { i.locks.Lock(name) defer i.locks.Unlock(name) return data[name] } func (i *important) Create(name string, data interface{}) { i.locks.Lock(name) defer i.locks.Unlock(name) i.createImportant(data) s.mu.Lock() i.data[name] = data s.mu.Unlock() } func (i *important) createImportant(data interface{}) { time.Sleep(10 * time.Second) } ``` For functions dealing with a given name, always lock at the beginning of the function (or before doing anything with the underlying state), this ensures any other function that is dealing with the same name will block. When needing to modify the underlying data, use the global lock to ensure nothing else is modfying it at the same time. Since name lock is already in place, no reads will occur while the modification is being performed. docker-1.10.3/pkg/locker/locker.go000066400000000000000000000053021267010174400167130ustar00rootroot00000000000000/* Package locker provides a mechanism for creating finer-grained locking to help free up more global locks to handle other tasks. The implementation looks close to a sync.Mutex, however the user must provide a reference to use to refer to the underlying lock when locking and unlocking, and unlock may generate an error. If a lock with a given name does not exist when `Lock` is called, one is created. Lock references are automatically cleaned up on `Unlock` if nothing else is waiting for the lock. */ package locker import ( "errors" "sync" "sync/atomic" ) // ErrNoSuchLock is returned when the requested lock does not exist var ErrNoSuchLock = errors.New("no such lock") // Locker provides a locking mechanism based on the passed in reference name type Locker struct { mu sync.Mutex locks map[string]*lockCtr } // lockCtr is used by Locker to represent a lock with a given name. type lockCtr struct { mu sync.Mutex // waiters is the number of waiters waiting to acquire the lock // this is int32 instead of uint32 so we can add `-1` in `dec()` waiters int32 } // inc increments the number of waiters waiting for the lock func (l *lockCtr) inc() { atomic.AddInt32(&l.waiters, 1) } // dec decrements the number of waiters waiting on the lock func (l *lockCtr) dec() { atomic.AddInt32(&l.waiters, -1) } // count gets the current number of waiters func (l *lockCtr) count() int32 { return atomic.LoadInt32(&l.waiters) } // Lock locks the mutex func (l *lockCtr) Lock() { l.mu.Lock() } // Unlock unlocks the mutex func (l *lockCtr) Unlock() { l.mu.Unlock() } // New creates a new Locker func New() *Locker { return &Locker{ locks: make(map[string]*lockCtr), } } // Lock locks a mutex with the given name. If it doesn't exist, one is created func (l *Locker) Lock(name string) { l.mu.Lock() if l.locks == nil { l.locks = make(map[string]*lockCtr) } nameLock, exists := l.locks[name] if !exists { nameLock = &lockCtr{} l.locks[name] = nameLock } // increment the nameLock waiters while inside the main mutex // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently nameLock.inc() l.mu.Unlock() // Lock the nameLock outside the main mutex so we don't block other operations // once locked then we can decrement the number of waiters for this lock nameLock.Lock() nameLock.dec() } // Unlock unlocks the mutex with the given name // If the given lock is not being waited on by any other callers, it is deleted func (l *Locker) Unlock(name string) error { l.mu.Lock() nameLock, exists := l.locks[name] if !exists { l.mu.Unlock() return ErrNoSuchLock } if nameLock.count() == 0 { delete(l.locks, name) } nameLock.Unlock() l.mu.Unlock() return nil } docker-1.10.3/pkg/locker/locker_test.go000066400000000000000000000040251267010174400177530ustar00rootroot00000000000000package locker import ( "sync" "testing" "time" ) func TestLockCounter(t *testing.T) { l := &lockCtr{} l.inc() if l.waiters != 1 { t.Fatal("counter inc failed") } l.dec() if l.waiters != 0 { t.Fatal("counter dec failed") } } func TestLockerLock(t *testing.T) { l := New() l.Lock("test") ctr := l.locks["test"] if ctr.count() != 0 { t.Fatalf("expected waiters to be 0, got :%d", ctr.waiters) } chDone := make(chan struct{}) go func() { l.Lock("test") close(chDone) }() chWaiting := make(chan struct{}) go func() { for range time.Tick(1 * time.Millisecond) { if ctr.count() == 1 { close(chWaiting) break } } }() select { case <-chWaiting: case <-time.After(3 * time.Second): t.Fatal("timed out waiting for lock waiters to be incremented") } select { case <-chDone: t.Fatal("lock should not have returned while it was still held") default: } if err := l.Unlock("test"); err != nil { t.Fatal(err) } select { case <-chDone: case <-time.After(3 * time.Second): t.Fatalf("lock should have completed") } if ctr.count() != 0 { t.Fatalf("expected waiters to be 0, got: %d", ctr.count()) } } func TestLockerUnlock(t *testing.T) { l := New() l.Lock("test") l.Unlock("test") chDone := make(chan struct{}) go func() { l.Lock("test") close(chDone) }() select { case <-chDone: case <-time.After(3 * time.Second): t.Fatalf("lock should not be blocked") } } func TestLockerConcurrency(t *testing.T) { l := New() var wg sync.WaitGroup for i := 0; i <= 10000; i++ { wg.Add(1) go func() { l.Lock("test") // if there is a concurrency issue, will very likely panic here l.Unlock("test") wg.Done() }() } chDone := make(chan struct{}) go func() { wg.Wait() close(chDone) }() select { case <-chDone: case <-time.After(10 * time.Second): t.Fatal("timeout waiting for locks to complete") } // Since everything has unlocked this should not exist anymore if ctr, exists := l.locks["test"]; exists { t.Fatalf("lock should not exist: %v", ctr) } } docker-1.10.3/pkg/longpath/000077500000000000000000000000001267010174400154425ustar00rootroot00000000000000docker-1.10.3/pkg/longpath/longpath.go000066400000000000000000000013411267010174400176040ustar00rootroot00000000000000// longpath introduces some constants and helper functions for handling long paths // in Windows, which are expected to be prepended with `\\?\` and followed by either // a drive letter, a UNC server\share, or a volume identifier. package longpath import ( "strings" ) // Prefix is the longpath prefix for Windows file paths. const Prefix = `\\?\` // AddPrefix will add the Windows long path prefix to the path provided if // it does not already have it. func AddPrefix(path string) string { if !strings.HasPrefix(path, Prefix) { if strings.HasPrefix(path, `\\`) { // This is a UNC path, so we need to add 'UNC' to the path as well. path = Prefix + `UNC` + path[1:] } else { path = Prefix + path } } return path } docker-1.10.3/pkg/longpath/longpath_test.go000066400000000000000000000007761267010174400206560ustar00rootroot00000000000000package longpath import ( "strings" "testing" ) func TestStandardLongPath(t *testing.T) { c := `C:\simple\path` longC := AddPrefix(c) if !strings.EqualFold(longC, `\\?\C:\simple\path`) { t.Errorf("Wrong long path returned. Original = %s ; Long = %s", c, longC) } } func TestUNCLongPath(t *testing.T) { c := `\\server\share\path` longC := AddPrefix(c) if !strings.EqualFold(longC, `\\?\UNC\server\share\path`) { t.Errorf("Wrong UNC long path returned. Original = %s ; Long = %s", c, longC) } } docker-1.10.3/pkg/loopback/000077500000000000000000000000001267010174400154205ustar00rootroot00000000000000docker-1.10.3/pkg/loopback/attach_loopback.go000066400000000000000000000067661267010174400211040ustar00rootroot00000000000000// +build linux package loopback import ( "errors" "fmt" "os" "syscall" "github.com/Sirupsen/logrus" ) // Loopback related errors var ( ErrAttachLoopbackDevice = errors.New("loopback attach failed") ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") ErrSetCapacity = errors.New("Unable set loopback capacity") ) func stringToLoopName(src string) [LoNameSize]uint8 { var dst [LoNameSize]uint8 copy(dst[:], src[:]) return dst } func getNextFreeLoopbackIndex() (int, error) { f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644) if err != nil { return 0, err } defer f.Close() index, err := ioctlLoopCtlGetFree(f.Fd()) if index < 0 { index = 0 } return index, err } func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.File, err error) { // Start looking for a free /dev/loop for { target := fmt.Sprintf("/dev/loop%d", index) index++ fi, err := os.Stat(target) if err != nil { if os.IsNotExist(err) { logrus.Errorf("There are no more loopback devices available.") } return nil, ErrAttachLoopbackDevice } if fi.Mode()&os.ModeDevice != os.ModeDevice { logrus.Errorf("Loopback device %s is not a block device.", target) continue } // OpenFile adds O_CLOEXEC loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) if err != nil { logrus.Errorf("Error opening loopback device: %s", err) return nil, ErrAttachLoopbackDevice } // Try to attach to the loop file if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { loopFile.Close() // If the error is EBUSY, then try the next loopback if err != syscall.EBUSY { logrus.Errorf("Cannot set up loopback device %s: %s", target, err) return nil, ErrAttachLoopbackDevice } // Otherwise, we keep going with the loop continue } // In case of success, we finished. Break the loop. break } // This can't happen, but let's be sure if loopFile == nil { logrus.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) return nil, ErrAttachLoopbackDevice } return loopFile, nil } // AttachLoopDevice attaches the given sparse file to the next // available loopback device. It returns an opened *os.File. func AttachLoopDevice(sparseName string) (loop *os.File, err error) { // Try to retrieve the next available loopback device via syscall. // If it fails, we discard error and start looping for a // loopback from index 0. startIndex, err := getNextFreeLoopbackIndex() if err != nil { logrus.Debugf("Error retrieving the next available loopback: %s", err) } // OpenFile adds O_CLOEXEC sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) if err != nil { logrus.Errorf("Error opening sparse file %s: %s", sparseName, err) return nil, ErrAttachLoopbackDevice } defer sparseFile.Close() loopFile, err := openNextAvailableLoopback(startIndex, sparseFile) if err != nil { return nil, err } // Set the status of the loopback device loopInfo := &loopInfo64{ loFileName: stringToLoopName(loopFile.Name()), loOffset: 0, loFlags: LoFlagsAutoClear, } if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { logrus.Errorf("Cannot set up loopback device info: %s", err) // If the call failed, then free the loopback device if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { logrus.Errorf("Error while cleaning up the loopback device") } loopFile.Close() return nil, ErrAttachLoopbackDevice } return loopFile, nil } docker-1.10.3/pkg/loopback/ioctl.go000066400000000000000000000024021267010174400170570ustar00rootroot00000000000000// +build linux package loopback import ( "syscall" "unsafe" ) func ioctlLoopCtlGetFree(fd uintptr) (int, error) { index, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, LoopCtlGetFree, 0) if err != 0 { return 0, err } return int(index), nil } func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetFd, sparseFd); err != 0 { return err } return nil } func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *loopInfo64) error { if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { return err } return nil } func ioctlLoopClrFd(loopFd uintptr) error { if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 { return err } return nil } func ioctlLoopGetStatus64(loopFd uintptr) (*loopInfo64, error) { loopInfo := &loopInfo64{} if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { return nil, err } return loopInfo, nil } func ioctlLoopSetCapacity(loopFd uintptr, value int) error { if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { return err } return nil } docker-1.10.3/pkg/loopback/loop_wrapper.go000066400000000000000000000023651267010174400204660ustar00rootroot00000000000000// +build linux package loopback /* #include // FIXME: present only for defines, maybe we can remove it? #ifndef LOOP_CTL_GET_FREE #define LOOP_CTL_GET_FREE 0x4C82 #endif #ifndef LO_FLAGS_PARTSCAN #define LO_FLAGS_PARTSCAN 8 #endif */ import "C" type loopInfo64 struct { loDevice uint64 /* ioctl r/o */ loInode uint64 /* ioctl r/o */ loRdevice uint64 /* ioctl r/o */ loOffset uint64 loSizelimit uint64 /* bytes, 0 == max available */ loNumber uint32 /* ioctl r/o */ loEncryptType uint32 loEncryptKeySize uint32 /* ioctl w/o */ loFlags uint32 /* ioctl r/o */ loFileName [LoNameSize]uint8 loCryptName [LoNameSize]uint8 loEncryptKey [LoKeySize]uint8 /* ioctl w/o */ loInit [2]uint64 } // IOCTL consts const ( LoopSetFd = C.LOOP_SET_FD LoopCtlGetFree = C.LOOP_CTL_GET_FREE LoopGetStatus64 = C.LOOP_GET_STATUS64 LoopSetStatus64 = C.LOOP_SET_STATUS64 LoopClrFd = C.LOOP_CLR_FD LoopSetCapacity = C.LOOP_SET_CAPACITY ) // LOOP consts. const ( LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY LoFlagsPartScan = C.LO_FLAGS_PARTSCAN LoKeySize = C.LO_KEY_SIZE LoNameSize = C.LO_NAME_SIZE ) docker-1.10.3/pkg/loopback/loopback.go000066400000000000000000000026361267010174400175500ustar00rootroot00000000000000// +build linux package loopback import ( "fmt" "os" "syscall" "github.com/Sirupsen/logrus" ) func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { loopInfo, err := ioctlLoopGetStatus64(file.Fd()) if err != nil { logrus.Errorf("Error get loopback backing file: %s", err) return 0, 0, ErrGetLoopbackBackingFile } return loopInfo.loDevice, loopInfo.loInode, nil } // SetCapacity reloads the size for the loopback device. func SetCapacity(file *os.File) error { if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { logrus.Errorf("Error loopbackSetCapacity: %s", err) return ErrSetCapacity } return nil } // FindLoopDeviceFor returns a loopback device file for the specified file which // is backing file of a loop back device. func FindLoopDeviceFor(file *os.File) *os.File { stat, err := file.Stat() if err != nil { return nil } targetInode := stat.Sys().(*syscall.Stat_t).Ino targetDevice := stat.Sys().(*syscall.Stat_t).Dev for i := 0; true; i++ { path := fmt.Sprintf("/dev/loop%d", i) file, err := os.OpenFile(path, os.O_RDWR, 0) if err != nil { if os.IsNotExist(err) { return nil } // Ignore all errors until the first not-exist // we want to continue looking for the file continue } dev, inode, err := getLoopbackBackingFile(file) if err == nil && dev == targetDevice && inode == targetInode { return file } file.Close() } return nil } docker-1.10.3/pkg/mflag/000077500000000000000000000000001267010174400147145ustar00rootroot00000000000000docker-1.10.3/pkg/mflag/LICENSE000066400000000000000000000027251267010174400157270ustar00rootroot00000000000000Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. docker-1.10.3/pkg/mflag/README.md000066400000000000000000000015171267010174400161770ustar00rootroot00000000000000Package mflag (aka multiple-flag) implements command-line flag parsing. It's an **hacky** fork of the [official golang package](http://golang.org/pkg/flag/) It adds: * both short and long flag version `./example -s red` `./example --string blue` * multiple names for the same option ``` $>./example -h Usage of example: -s, --string="": a simple string ``` ___ It is very flexible on purpose, so you can do things like: ``` $>./example -h Usage of example: -s, -string, --string="": a simple string ``` Or: ``` $>./example -h Usage of example: -oldflag, --newflag="": a simple string ``` You can also hide some flags from the usage, so if we want only `--newflag`: ``` $>./example -h Usage of example: --newflag="": a simple string $>./example -oldflag str str ``` See [example.go](example/example.go) for more details. docker-1.10.3/pkg/mflag/example/000077500000000000000000000000001267010174400163475ustar00rootroot00000000000000docker-1.10.3/pkg/mflag/example/example.go000066400000000000000000000022111267010174400203250ustar00rootroot00000000000000package main import ( "fmt" flag "github.com/docker/docker/pkg/mflag" ) var ( i int str string b, b2, h bool ) func init() { flag.Bool([]string{"#hp", "#-halp"}, false, "display the halp") flag.BoolVar(&b, []string{"b", "#bal", "#bol", "-bal"}, false, "a simple bool") flag.BoolVar(&b, []string{"g", "#gil"}, false, "a simple bool") flag.BoolVar(&b2, []string{"#-bool"}, false, "a simple bool") flag.IntVar(&i, []string{"-integer", "-number"}, -1, "a simple integer") flag.StringVar(&str, []string{"s", "#hidden", "-string"}, "", "a simple string") //-s -hidden and --string will work, but -hidden won't be in the usage flag.BoolVar(&h, []string{"h", "#help", "-help"}, false, "display the help") flag.StringVar(&str, []string{"mode"}, "mode1", "set the mode\nmode1: use the mode1\nmode2: use the mode2\nmode3: use the mode3") flag.Parse() } func main() { if h { flag.PrintDefaults() } else { fmt.Printf("s/#hidden/-string: %s\n", str) fmt.Printf("b: %t\n", b) fmt.Printf("-bool: %t\n", b2) fmt.Printf("s/#hidden/-string(via lookup): %s\n", flag.Lookup("s").Value.String()) fmt.Printf("ARGS: %v\n", flag.Args()) } } docker-1.10.3/pkg/mflag/flag.go000066400000000000000000001167001267010174400161610ustar00rootroot00000000000000// Copyright 2014-2016 The Docker & Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package mflag implements command-line flag parsing. // // Usage: // // Define flags using flag.String(), Bool(), Int(), etc. // // This declares an integer flag, -f or --flagname, stored in the pointer ip, with type *int. // import "flag /github.com/docker/docker/pkg/mflag" // var ip = flag.Int([]string{"f", "-flagname"}, 1234, "help message for flagname") // If you like, you can bind the flag to a variable using the Var() functions. // var flagvar int // func init() { // // -flaghidden will work, but will be hidden from the usage // flag.IntVar(&flagvar, []string{"f", "#flaghidden", "-flagname"}, 1234, "help message for flagname") // } // Or you can create custom flags that satisfy the Value interface (with // pointer receivers) and couple them to flag parsing by // flag.Var(&flagVal, []string{"name"}, "help message for flagname") // For such flags, the default value is just the initial value of the variable. // // You can also add "deprecated" flags, they are still usable, but are not shown // in the usage and will display a warning when you try to use them. `#` before // an option means this option is deprecated, if there is an following option // without `#` ahead, then that's the replacement, if not, it will just be removed: // var ip = flag.Int([]string{"#f", "#flagname", "-flagname"}, 1234, "help message for flagname") // this will display: `Warning: '-f' is deprecated, it will be replaced by '--flagname' soon. See usage.` or // this will display: `Warning: '-flagname' is deprecated, it will be replaced by '--flagname' soon. See usage.` // var ip = flag.Int([]string{"f", "#flagname"}, 1234, "help message for flagname") // will display: `Warning: '-flagname' is deprecated, it will be removed soon. See usage.` // so you can only use `-f`. // // You can also group one letter flags, bif you declare // var v = flag.Bool([]string{"v", "-verbose"}, false, "help message for verbose") // var s = flag.Bool([]string{"s", "-slow"}, false, "help message for slow") // you will be able to use the -vs or -sv // // After all flags are defined, call // flag.Parse() // to parse the command line into the defined flags. // // Flags may then be used directly. If you're using the flags themselves, // they are all pointers; if you bind to variables, they're values. // fmt.Println("ip has value ", *ip) // fmt.Println("flagvar has value ", flagvar) // // After parsing, the arguments after the flag are available as the // slice flag.Args() or individually as flag.Arg(i). // The arguments are indexed from 0 through flag.NArg()-1. // // Command line flag syntax: // -flag // -flag=x // -flag="x" // -flag='x' // -flag x // non-boolean flags only // One or two minus signs may be used; they are equivalent. // The last form is not permitted for boolean flags because the // meaning of the command // cmd -x * // will change if there is a file called 0, false, etc. You must // use the -flag=false form to turn off a boolean flag. // // Flag parsing stops just before the first non-flag argument // ("-" is a non-flag argument) or after the terminator "--". // // Integer flags accept 1234, 0664, 0x1234 and may be negative. // Boolean flags may be 1, 0, t, f, true, false, TRUE, FALSE, True, False. // Duration flags accept any input valid for time.ParseDuration. // // The default set of command-line flags is controlled by // top-level functions. The FlagSet type allows one to define // independent sets of flags, such as to implement subcommands // in a command-line interface. The methods of FlagSet are // analogous to the top-level functions for the command-line // flag set. package mflag import ( "errors" "fmt" "io" "os" "runtime" "sort" "strconv" "strings" "text/tabwriter" "time" "github.com/docker/docker/pkg/homedir" ) // ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. var ErrHelp = errors.New("flag: help requested") // ErrRetry is the error returned if you need to try letter by letter var ErrRetry = errors.New("flag: retry") // -- bool Value type boolValue bool func newBoolValue(val bool, p *bool) *boolValue { *p = val return (*boolValue)(p) } func (b *boolValue) Set(s string) error { v, err := strconv.ParseBool(s) *b = boolValue(v) return err } func (b *boolValue) Get() interface{} { return bool(*b) } func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) } func (b *boolValue) IsBoolFlag() bool { return true } // optional interface to indicate boolean flags that can be // supplied without "=value" text type boolFlag interface { Value IsBoolFlag() bool } // -- int Value type intValue int func newIntValue(val int, p *int) *intValue { *p = val return (*intValue)(p) } func (i *intValue) Set(s string) error { v, err := strconv.ParseInt(s, 0, 64) *i = intValue(v) return err } func (i *intValue) Get() interface{} { return int(*i) } func (i *intValue) String() string { return fmt.Sprintf("%v", *i) } // -- int64 Value type int64Value int64 func newInt64Value(val int64, p *int64) *int64Value { *p = val return (*int64Value)(p) } func (i *int64Value) Set(s string) error { v, err := strconv.ParseInt(s, 0, 64) *i = int64Value(v) return err } func (i *int64Value) Get() interface{} { return int64(*i) } func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) } // -- uint Value type uintValue uint func newUintValue(val uint, p *uint) *uintValue { *p = val return (*uintValue)(p) } func (i *uintValue) Set(s string) error { v, err := strconv.ParseUint(s, 0, 64) *i = uintValue(v) return err } func (i *uintValue) Get() interface{} { return uint(*i) } func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) } // -- uint64 Value type uint64Value uint64 func newUint64Value(val uint64, p *uint64) *uint64Value { *p = val return (*uint64Value)(p) } func (i *uint64Value) Set(s string) error { v, err := strconv.ParseUint(s, 0, 64) *i = uint64Value(v) return err } func (i *uint64Value) Get() interface{} { return uint64(*i) } func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } // -- uint16 Value type uint16Value uint16 func newUint16Value(val uint16, p *uint16) *uint16Value { *p = val return (*uint16Value)(p) } func (i *uint16Value) Set(s string) error { v, err := strconv.ParseUint(s, 0, 16) *i = uint16Value(v) return err } func (i *uint16Value) Get() interface{} { return uint16(*i) } func (i *uint16Value) String() string { return fmt.Sprintf("%v", *i) } // -- string Value type stringValue string func newStringValue(val string, p *string) *stringValue { *p = val return (*stringValue)(p) } func (s *stringValue) Set(val string) error { *s = stringValue(val) return nil } func (s *stringValue) Get() interface{} { return string(*s) } func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) } // -- float64 Value type float64Value float64 func newFloat64Value(val float64, p *float64) *float64Value { *p = val return (*float64Value)(p) } func (f *float64Value) Set(s string) error { v, err := strconv.ParseFloat(s, 64) *f = float64Value(v) return err } func (f *float64Value) Get() interface{} { return float64(*f) } func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } // -- time.Duration Value type durationValue time.Duration func newDurationValue(val time.Duration, p *time.Duration) *durationValue { *p = val return (*durationValue)(p) } func (d *durationValue) Set(s string) error { v, err := time.ParseDuration(s) *d = durationValue(v) return err } func (d *durationValue) Get() interface{} { return time.Duration(*d) } func (d *durationValue) String() string { return (*time.Duration)(d).String() } // Value is the interface to the dynamic value stored in a flag. // (The default value is represented as a string.) // // If a Value has an IsBoolFlag() bool method returning true, // the command-line parser makes -name equivalent to -name=true // rather than using the next command-line argument. type Value interface { String() string Set(string) error } // Getter is an interface that allows the contents of a Value to be retrieved. // It wraps the Value interface, rather than being part of it, because it // appeared after Go 1 and its compatibility rules. All Value types provided // by this package satisfy the Getter interface. type Getter interface { Value Get() interface{} } // ErrorHandling defines how to handle flag parsing errors. type ErrorHandling int // ErrorHandling strategies available when a flag parsing error occurs const ( ContinueOnError ErrorHandling = iota ExitOnError PanicOnError ) // A FlagSet represents a set of defined flags. The zero value of a FlagSet // has no name and has ContinueOnError error handling. type FlagSet struct { // Usage is the function called when an error occurs while parsing flags. // The field is a function (not a method) that may be changed to point to // a custom error handler. Usage func() ShortUsage func() name string parsed bool actual map[string]*Flag formal map[string]*Flag args []string // arguments after flags errorHandling ErrorHandling output io.Writer // nil means stderr; use Out() accessor nArgRequirements []nArgRequirement } // A Flag represents the state of a flag. type Flag struct { Names []string // name as it appears on command line Usage string // help message Value Value // value as set DefValue string // default value (as text); for usage message } type flagSlice []string func (p flagSlice) Len() int { return len(p) } func (p flagSlice) Less(i, j int) bool { pi, pj := strings.TrimPrefix(p[i], "-"), strings.TrimPrefix(p[j], "-") lpi, lpj := strings.ToLower(pi), strings.ToLower(pj) if lpi != lpj { return lpi < lpj } return pi < pj } func (p flagSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } // sortFlags returns the flags as a slice in lexicographical sorted order. func sortFlags(flags map[string]*Flag) []*Flag { var list flagSlice // The sorted list is based on the first name, when flag map might use the other names. nameMap := make(map[string]string) for n, f := range flags { fName := strings.TrimPrefix(f.Names[0], "#") nameMap[fName] = n if len(f.Names) == 1 { list = append(list, fName) continue } found := false for _, name := range list { if name == fName { found = true break } } if !found { list = append(list, fName) } } sort.Sort(list) result := make([]*Flag, len(list)) for i, name := range list { result[i] = flags[nameMap[name]] } return result } // Name returns the name of the FlagSet. func (fs *FlagSet) Name() string { return fs.name } // Out returns the destination for usage and error messages. func (fs *FlagSet) Out() io.Writer { if fs.output == nil { return os.Stderr } return fs.output } // SetOutput sets the destination for usage and error messages. // If output is nil, os.Stderr is used. func (fs *FlagSet) SetOutput(output io.Writer) { fs.output = output } // VisitAll visits the flags in lexicographical order, calling fn for each. // It visits all flags, even those not set. func (fs *FlagSet) VisitAll(fn func(*Flag)) { for _, flag := range sortFlags(fs.formal) { fn(flag) } } // VisitAll visits the command-line flags in lexicographical order, calling // fn for each. It visits all flags, even those not set. func VisitAll(fn func(*Flag)) { CommandLine.VisitAll(fn) } // Visit visits the flags in lexicographical order, calling fn for each. // It visits only those flags that have been set. func (fs *FlagSet) Visit(fn func(*Flag)) { for _, flag := range sortFlags(fs.actual) { fn(flag) } } // Visit visits the command-line flags in lexicographical order, calling fn // for each. It visits only those flags that have been set. func Visit(fn func(*Flag)) { CommandLine.Visit(fn) } // Lookup returns the Flag structure of the named flag, returning nil if none exists. func (fs *FlagSet) Lookup(name string) *Flag { return fs.formal[name] } // IsSet indicates whether the specified flag is set in the given FlagSet func (fs *FlagSet) IsSet(name string) bool { return fs.actual[name] != nil } // Lookup returns the Flag structure of the named command-line flag, // returning nil if none exists. func Lookup(name string) *Flag { return CommandLine.formal[name] } // IsSet indicates whether the specified flag was specified at all on the cmd line. func IsSet(name string) bool { return CommandLine.IsSet(name) } type nArgRequirementType int // Indicator used to pass to BadArgs function const ( Exact nArgRequirementType = iota Max Min ) type nArgRequirement struct { Type nArgRequirementType N int } // Require adds a requirement about the number of arguments for the FlagSet. // The first parameter can be Exact, Max, or Min to respectively specify the exact, // the maximum, or the minimal number of arguments required. // The actual check is done in FlagSet.CheckArgs(). func (fs *FlagSet) Require(nArgRequirementType nArgRequirementType, nArg int) { fs.nArgRequirements = append(fs.nArgRequirements, nArgRequirement{nArgRequirementType, nArg}) } // CheckArgs uses the requirements set by FlagSet.Require() to validate // the number of arguments. If the requirements are not met, // an error message string is returned. func (fs *FlagSet) CheckArgs() (message string) { for _, req := range fs.nArgRequirements { var arguments string if req.N == 1 { arguments = "1 argument" } else { arguments = fmt.Sprintf("%d arguments", req.N) } str := func(kind string) string { return fmt.Sprintf("%q requires %s%s", fs.name, kind, arguments) } switch req.Type { case Exact: if fs.NArg() != req.N { return str("") } case Max: if fs.NArg() > req.N { return str("a maximum of ") } case Min: if fs.NArg() < req.N { return str("a minimum of ") } } } return "" } // Set sets the value of the named flag. func (fs *FlagSet) Set(name, value string) error { flag, ok := fs.formal[name] if !ok { return fmt.Errorf("no such flag -%v", name) } if err := flag.Value.Set(value); err != nil { return err } if fs.actual == nil { fs.actual = make(map[string]*Flag) } fs.actual[name] = flag return nil } // Set sets the value of the named command-line flag. func Set(name, value string) error { return CommandLine.Set(name, value) } // isZeroValue guesses whether the string represents the zero // value for a flag. It is not accurate but in practice works OK. func isZeroValue(value string) bool { switch value { case "false": return true case "": return true case "0": return true } return false } // PrintDefaults prints, to standard error unless configured // otherwise, the default values of all defined flags in the set. func (fs *FlagSet) PrintDefaults() { writer := tabwriter.NewWriter(fs.Out(), 20, 1, 3, ' ', 0) home := homedir.Get() // Don't substitute when HOME is / if runtime.GOOS != "windows" && home == "/" { home = "" } // Add a blank line between cmd description and list of options if fs.FlagCount() > 0 { fmt.Fprintln(writer, "") } fs.VisitAll(func(flag *Flag) { names := []string{} for _, name := range flag.Names { if name[0] != '#' { names = append(names, name) } } if len(names) > 0 && len(flag.Usage) > 0 { val := flag.DefValue if home != "" && strings.HasPrefix(val, home) { val = homedir.GetShortcutString() + val[len(home):] } if isZeroValue(val) { format := " -%s" fmt.Fprintf(writer, format, strings.Join(names, ", -")) } else { format := " -%s=%s" fmt.Fprintf(writer, format, strings.Join(names, ", -"), val) } for _, line := range strings.Split(flag.Usage, "\n") { fmt.Fprintln(writer, "\t", line) } } }) writer.Flush() } // PrintDefaults prints to standard error the default values of all defined command-line flags. func PrintDefaults() { CommandLine.PrintDefaults() } // defaultUsage is the default function to print a usage message. func defaultUsage(fs *FlagSet) { if fs.name == "" { fmt.Fprintf(fs.Out(), "Usage:\n") } else { fmt.Fprintf(fs.Out(), "Usage of %s:\n", fs.name) } fs.PrintDefaults() } // NOTE: Usage is not just defaultUsage(CommandLine) // because it serves (via godoc flag Usage) as the example // for how to write your own usage function. // Usage prints to standard error a usage message documenting all defined command-line flags. // The function is a variable that may be changed to point to a custom function. var Usage = func() { fmt.Fprintf(CommandLine.Out(), "Usage of %s:\n", os.Args[0]) PrintDefaults() } // ShortUsage prints to standard error a usage message documenting the standard command layout // The function is a variable that may be changed to point to a custom function. var ShortUsage = func() { fmt.Fprintf(CommandLine.output, "Usage of %s:\n", os.Args[0]) } // FlagCount returns the number of flags that have been defined. func (fs *FlagSet) FlagCount() int { return len(sortFlags(fs.formal)) } // FlagCountUndeprecated returns the number of undeprecated flags that have been defined. func (fs *FlagSet) FlagCountUndeprecated() int { count := 0 for _, flag := range sortFlags(fs.formal) { for _, name := range flag.Names { if name[0] != '#' { count++ break } } } return count } // NFlag returns the number of flags that have been set. func (fs *FlagSet) NFlag() int { return len(fs.actual) } // NFlag returns the number of command-line flags that have been set. func NFlag() int { return len(CommandLine.actual) } // Arg returns the i'th argument. Arg(0) is the first remaining argument // after flags have been processed. func (fs *FlagSet) Arg(i int) string { if i < 0 || i >= len(fs.args) { return "" } return fs.args[i] } // Arg returns the i'th command-line argument. Arg(0) is the first remaining argument // after flags have been processed. func Arg(i int) string { return CommandLine.Arg(i) } // NArg is the number of arguments remaining after flags have been processed. func (fs *FlagSet) NArg() int { return len(fs.args) } // NArg is the number of arguments remaining after flags have been processed. func NArg() int { return len(CommandLine.args) } // Args returns the non-flag arguments. func (fs *FlagSet) Args() []string { return fs.args } // Args returns the non-flag command-line arguments. func Args() []string { return CommandLine.args } // BoolVar defines a bool flag with specified name, default value, and usage string. // The argument p points to a bool variable in which to store the value of the flag. func (fs *FlagSet) BoolVar(p *bool, names []string, value bool, usage string) { fs.Var(newBoolValue(value, p), names, usage) } // BoolVar defines a bool flag with specified name, default value, and usage string. // The argument p points to a bool variable in which to store the value of the flag. func BoolVar(p *bool, names []string, value bool, usage string) { CommandLine.Var(newBoolValue(value, p), names, usage) } // Bool defines a bool flag with specified name, default value, and usage string. // The return value is the address of a bool variable that stores the value of the flag. func (fs *FlagSet) Bool(names []string, value bool, usage string) *bool { p := new(bool) fs.BoolVar(p, names, value, usage) return p } // Bool defines a bool flag with specified name, default value, and usage string. // The return value is the address of a bool variable that stores the value of the flag. func Bool(names []string, value bool, usage string) *bool { return CommandLine.Bool(names, value, usage) } // IntVar defines an int flag with specified name, default value, and usage string. // The argument p points to an int variable in which to store the value of the flag. func (fs *FlagSet) IntVar(p *int, names []string, value int, usage string) { fs.Var(newIntValue(value, p), names, usage) } // IntVar defines an int flag with specified name, default value, and usage string. // The argument p points to an int variable in which to store the value of the flag. func IntVar(p *int, names []string, value int, usage string) { CommandLine.Var(newIntValue(value, p), names, usage) } // Int defines an int flag with specified name, default value, and usage string. // The return value is the address of an int variable that stores the value of the flag. func (fs *FlagSet) Int(names []string, value int, usage string) *int { p := new(int) fs.IntVar(p, names, value, usage) return p } // Int defines an int flag with specified name, default value, and usage string. // The return value is the address of an int variable that stores the value of the flag. func Int(names []string, value int, usage string) *int { return CommandLine.Int(names, value, usage) } // Int64Var defines an int64 flag with specified name, default value, and usage string. // The argument p points to an int64 variable in which to store the value of the flag. func (fs *FlagSet) Int64Var(p *int64, names []string, value int64, usage string) { fs.Var(newInt64Value(value, p), names, usage) } // Int64Var defines an int64 flag with specified name, default value, and usage string. // The argument p points to an int64 variable in which to store the value of the flag. func Int64Var(p *int64, names []string, value int64, usage string) { CommandLine.Var(newInt64Value(value, p), names, usage) } // Int64 defines an int64 flag with specified name, default value, and usage string. // The return value is the address of an int64 variable that stores the value of the flag. func (fs *FlagSet) Int64(names []string, value int64, usage string) *int64 { p := new(int64) fs.Int64Var(p, names, value, usage) return p } // Int64 defines an int64 flag with specified name, default value, and usage string. // The return value is the address of an int64 variable that stores the value of the flag. func Int64(names []string, value int64, usage string) *int64 { return CommandLine.Int64(names, value, usage) } // UintVar defines a uint flag with specified name, default value, and usage string. // The argument p points to a uint variable in which to store the value of the flag. func (fs *FlagSet) UintVar(p *uint, names []string, value uint, usage string) { fs.Var(newUintValue(value, p), names, usage) } // UintVar defines a uint flag with specified name, default value, and usage string. // The argument p points to a uint variable in which to store the value of the flag. func UintVar(p *uint, names []string, value uint, usage string) { CommandLine.Var(newUintValue(value, p), names, usage) } // Uint defines a uint flag with specified name, default value, and usage string. // The return value is the address of a uint variable that stores the value of the flag. func (fs *FlagSet) Uint(names []string, value uint, usage string) *uint { p := new(uint) fs.UintVar(p, names, value, usage) return p } // Uint defines a uint flag with specified name, default value, and usage string. // The return value is the address of a uint variable that stores the value of the flag. func Uint(names []string, value uint, usage string) *uint { return CommandLine.Uint(names, value, usage) } // Uint64Var defines a uint64 flag with specified name, default value, and usage string. // The argument p points to a uint64 variable in which to store the value of the flag. func (fs *FlagSet) Uint64Var(p *uint64, names []string, value uint64, usage string) { fs.Var(newUint64Value(value, p), names, usage) } // Uint64Var defines a uint64 flag with specified name, default value, and usage string. // The argument p points to a uint64 variable in which to store the value of the flag. func Uint64Var(p *uint64, names []string, value uint64, usage string) { CommandLine.Var(newUint64Value(value, p), names, usage) } // Uint64 defines a uint64 flag with specified name, default value, and usage string. // The return value is the address of a uint64 variable that stores the value of the flag. func (fs *FlagSet) Uint64(names []string, value uint64, usage string) *uint64 { p := new(uint64) fs.Uint64Var(p, names, value, usage) return p } // Uint64 defines a uint64 flag with specified name, default value, and usage string. // The return value is the address of a uint64 variable that stores the value of the flag. func Uint64(names []string, value uint64, usage string) *uint64 { return CommandLine.Uint64(names, value, usage) } // Uint16Var defines a uint16 flag with specified name, default value, and usage string. // The argument p points to a uint16 variable in which to store the value of the flag. func (fs *FlagSet) Uint16Var(p *uint16, names []string, value uint16, usage string) { fs.Var(newUint16Value(value, p), names, usage) } // Uint16Var defines a uint16 flag with specified name, default value, and usage string. // The argument p points to a uint16 variable in which to store the value of the flag. func Uint16Var(p *uint16, names []string, value uint16, usage string) { CommandLine.Var(newUint16Value(value, p), names, usage) } // Uint16 defines a uint16 flag with specified name, default value, and usage string. // The return value is the address of a uint16 variable that stores the value of the flag. func (fs *FlagSet) Uint16(names []string, value uint16, usage string) *uint16 { p := new(uint16) fs.Uint16Var(p, names, value, usage) return p } // Uint16 defines a uint16 flag with specified name, default value, and usage string. // The return value is the address of a uint16 variable that stores the value of the flag. func Uint16(names []string, value uint16, usage string) *uint16 { return CommandLine.Uint16(names, value, usage) } // StringVar defines a string flag with specified name, default value, and usage string. // The argument p points to a string variable in which to store the value of the flag. func (fs *FlagSet) StringVar(p *string, names []string, value string, usage string) { fs.Var(newStringValue(value, p), names, usage) } // StringVar defines a string flag with specified name, default value, and usage string. // The argument p points to a string variable in which to store the value of the flag. func StringVar(p *string, names []string, value string, usage string) { CommandLine.Var(newStringValue(value, p), names, usage) } // String defines a string flag with specified name, default value, and usage string. // The return value is the address of a string variable that stores the value of the flag. func (fs *FlagSet) String(names []string, value string, usage string) *string { p := new(string) fs.StringVar(p, names, value, usage) return p } // String defines a string flag with specified name, default value, and usage string. // The return value is the address of a string variable that stores the value of the flag. func String(names []string, value string, usage string) *string { return CommandLine.String(names, value, usage) } // Float64Var defines a float64 flag with specified name, default value, and usage string. // The argument p points to a float64 variable in which to store the value of the flag. func (fs *FlagSet) Float64Var(p *float64, names []string, value float64, usage string) { fs.Var(newFloat64Value(value, p), names, usage) } // Float64Var defines a float64 flag with specified name, default value, and usage string. // The argument p points to a float64 variable in which to store the value of the flag. func Float64Var(p *float64, names []string, value float64, usage string) { CommandLine.Var(newFloat64Value(value, p), names, usage) } // Float64 defines a float64 flag with specified name, default value, and usage string. // The return value is the address of a float64 variable that stores the value of the flag. func (fs *FlagSet) Float64(names []string, value float64, usage string) *float64 { p := new(float64) fs.Float64Var(p, names, value, usage) return p } // Float64 defines a float64 flag with specified name, default value, and usage string. // The return value is the address of a float64 variable that stores the value of the flag. func Float64(names []string, value float64, usage string) *float64 { return CommandLine.Float64(names, value, usage) } // DurationVar defines a time.Duration flag with specified name, default value, and usage string. // The argument p points to a time.Duration variable in which to store the value of the flag. func (fs *FlagSet) DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { fs.Var(newDurationValue(value, p), names, usage) } // DurationVar defines a time.Duration flag with specified name, default value, and usage string. // The argument p points to a time.Duration variable in which to store the value of the flag. func DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { CommandLine.Var(newDurationValue(value, p), names, usage) } // Duration defines a time.Duration flag with specified name, default value, and usage string. // The return value is the address of a time.Duration variable that stores the value of the flag. func (fs *FlagSet) Duration(names []string, value time.Duration, usage string) *time.Duration { p := new(time.Duration) fs.DurationVar(p, names, value, usage) return p } // Duration defines a time.Duration flag with specified name, default value, and usage string. // The return value is the address of a time.Duration variable that stores the value of the flag. func Duration(names []string, value time.Duration, usage string) *time.Duration { return CommandLine.Duration(names, value, usage) } // Var defines a flag with the specified name and usage string. The type and // value of the flag are represented by the first argument, of type Value, which // typically holds a user-defined implementation of Value. For instance, the // caller could create a flag that turns a comma-separated string into a slice // of strings by giving the slice the methods of Value; in particular, Set would // decompose the comma-separated string into the slice. func (fs *FlagSet) Var(value Value, names []string, usage string) { // Remember the default value as a string; it won't change. flag := &Flag{names, usage, value, value.String()} for _, name := range names { name = strings.TrimPrefix(name, "#") _, alreadythere := fs.formal[name] if alreadythere { var msg string if fs.name == "" { msg = fmt.Sprintf("flag redefined: %s", name) } else { msg = fmt.Sprintf("%s flag redefined: %s", fs.name, name) } fmt.Fprintln(fs.Out(), msg) panic(msg) // Happens only if flags are declared with identical names } if fs.formal == nil { fs.formal = make(map[string]*Flag) } fs.formal[name] = flag } } // Var defines a flag with the specified name and usage string. The type and // value of the flag are represented by the first argument, of type Value, which // typically holds a user-defined implementation of Value. For instance, the // caller could create a flag that turns a comma-separated string into a slice // of strings by giving the slice the methods of Value; in particular, Set would // decompose the comma-separated string into the slice. func Var(value Value, names []string, usage string) { CommandLine.Var(value, names, usage) } // failf prints to standard error a formatted error and usage message and // returns the error. func (fs *FlagSet) failf(format string, a ...interface{}) error { err := fmt.Errorf(format, a...) fmt.Fprintln(fs.Out(), err) if os.Args[0] == fs.name { fmt.Fprintf(fs.Out(), "See '%s --help'.\n", os.Args[0]) } else { fmt.Fprintf(fs.Out(), "See '%s %s --help'.\n", os.Args[0], fs.name) } return err } // usage calls the Usage method for the flag set, or the usage function if // the flag set is CommandLine. func (fs *FlagSet) usage() { if fs == CommandLine { Usage() } else if fs.Usage == nil { defaultUsage(fs) } else { fs.Usage() } } func trimQuotes(str string) string { if len(str) == 0 { return str } type quote struct { start, end byte } // All valid quote types. quotes := []quote{ // Double quotes { start: '"', end: '"', }, // Single quotes { start: '\'', end: '\'', }, } for _, quote := range quotes { // Only strip if outermost match. if str[0] == quote.start && str[len(str)-1] == quote.end { str = str[1 : len(str)-1] break } } return str } // parseOne parses one flag. It reports whether a flag was seen. func (fs *FlagSet) parseOne() (bool, string, error) { if len(fs.args) == 0 { return false, "", nil } s := fs.args[0] if len(s) == 0 || s[0] != '-' || len(s) == 1 { return false, "", nil } if s[1] == '-' && len(s) == 2 { // "--" terminates the flags fs.args = fs.args[1:] return false, "", nil } name := s[1:] if len(name) == 0 || name[0] == '=' { return false, "", fs.failf("bad flag syntax: %s", s) } // it's a flag. does it have an argument? fs.args = fs.args[1:] hasValue := false value := "" if i := strings.Index(name, "="); i != -1 { value = trimQuotes(name[i+1:]) hasValue = true name = name[:i] } m := fs.formal flag, alreadythere := m[name] // BUG if !alreadythere { if name == "-help" || name == "help" || name == "h" { // special case for nice help message. fs.usage() return false, "", ErrHelp } if len(name) > 0 && name[0] == '-' { return false, "", fs.failf("flag provided but not defined: -%s", name) } return false, name, ErrRetry } if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg if hasValue { if err := fv.Set(value); err != nil { return false, "", fs.failf("invalid boolean value %q for -%s: %v", value, name, err) } } else { fv.Set("true") } } else { // It must have a value, which might be the next argument. if !hasValue && len(fs.args) > 0 { // value is the next arg hasValue = true value, fs.args = fs.args[0], fs.args[1:] } if !hasValue { return false, "", fs.failf("flag needs an argument: -%s", name) } if err := flag.Value.Set(value); err != nil { return false, "", fs.failf("invalid value %q for flag -%s: %v", value, name, err) } } if fs.actual == nil { fs.actual = make(map[string]*Flag) } fs.actual[name] = flag for i, n := range flag.Names { if n == fmt.Sprintf("#%s", name) { replacement := "" for j := i; j < len(flag.Names); j++ { if flag.Names[j][0] != '#' { replacement = flag.Names[j] break } } if replacement != "" { fmt.Fprintf(fs.Out(), "Warning: '-%s' is deprecated, it will be replaced by '-%s' soon. See usage.\n", name, replacement) } else { fmt.Fprintf(fs.Out(), "Warning: '-%s' is deprecated, it will be removed soon. See usage.\n", name) } } } return true, "", nil } // Parse parses flag definitions from the argument list, which should not // include the command name. Must be called after all flags in the FlagSet // are defined and before flags are accessed by the program. // The return value will be ErrHelp if -help was set but not defined. func (fs *FlagSet) Parse(arguments []string) error { fs.parsed = true fs.args = arguments for { seen, name, err := fs.parseOne() if seen { continue } if err == nil { break } if err == ErrRetry { if len(name) > 1 { err = nil for _, letter := range strings.Split(name, "") { fs.args = append([]string{"-" + letter}, fs.args...) seen2, _, err2 := fs.parseOne() if seen2 { continue } if err2 != nil { err = fs.failf("flag provided but not defined: -%s", name) break } } if err == nil { continue } } else { err = fs.failf("flag provided but not defined: -%s", name) } } switch fs.errorHandling { case ContinueOnError: return err case ExitOnError: os.Exit(125) case PanicOnError: panic(err) } } return nil } // ParseFlags is a utility function that adds a help flag if withHelp is true, // calls fs.Parse(args) and prints a relevant error message if there are // incorrect number of arguments. It returns error only if error handling is // set to ContinueOnError and parsing fails. If error handling is set to // ExitOnError, it's safe to ignore the return value. func (fs *FlagSet) ParseFlags(args []string, withHelp bool) error { var help *bool if withHelp { help = fs.Bool([]string{"#help", "-help"}, false, "Print usage") } if err := fs.Parse(args); err != nil { return err } if help != nil && *help { fs.SetOutput(os.Stdout) fs.Usage() os.Exit(0) } if str := fs.CheckArgs(); str != "" { fs.SetOutput(os.Stderr) fs.ReportError(str, withHelp) fs.ShortUsage() os.Exit(1) } return nil } // ReportError is a utility method that prints a user-friendly message // containing the error that occurred during parsing and a suggestion to get help func (fs *FlagSet) ReportError(str string, withHelp bool) { if withHelp { if os.Args[0] == fs.Name() { str += ".\nSee '" + os.Args[0] + " --help'" } else { str += ".\nSee '" + os.Args[0] + " " + fs.Name() + " --help'" } } fmt.Fprintf(fs.Out(), "docker: %s.\n", str) } // Parsed reports whether fs.Parse has been called. func (fs *FlagSet) Parsed() bool { return fs.parsed } // Parse parses the command-line flags from os.Args[1:]. Must be called // after all flags are defined and before flags are accessed by the program. func Parse() { // Ignore errors; CommandLine is set for ExitOnError. CommandLine.Parse(os.Args[1:]) } // Parsed returns true if the command-line flags have been parsed. func Parsed() bool { return CommandLine.Parsed() } // CommandLine is the default set of command-line flags, parsed from os.Args. // The top-level functions such as BoolVar, Arg, and on are wrappers for the // methods of CommandLine. var CommandLine = NewFlagSet(os.Args[0], ExitOnError) // NewFlagSet returns a new, empty flag set with the specified name and // error handling property. func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { f := &FlagSet{ name: name, errorHandling: errorHandling, } return f } // Init sets the name and error handling property for a flag set. // By default, the zero FlagSet uses an empty name and the // ContinueOnError error handling policy. func (fs *FlagSet) Init(name string, errorHandling ErrorHandling) { fs.name = name fs.errorHandling = errorHandling } type mergeVal struct { Value key string fset *FlagSet } func (v mergeVal) Set(s string) error { return v.fset.Set(v.key, s) } func (v mergeVal) IsBoolFlag() bool { if b, ok := v.Value.(boolFlag); ok { return b.IsBoolFlag() } return false } // Name returns the name of a mergeVal. // If the original value had a name, return the original name, // otherwise, return the key asinged to this mergeVal. func (v mergeVal) Name() string { type namedValue interface { Name() string } if nVal, ok := v.Value.(namedValue); ok { return nVal.Name() } return v.key } // Merge is an helper function that merges n FlagSets into a single dest FlagSet // In case of name collision between the flagsets it will apply // the destination FlagSet's errorHandling behavior. func Merge(dest *FlagSet, flagsets ...*FlagSet) error { for _, fset := range flagsets { if fset.formal == nil { continue } for k, f := range fset.formal { if _, ok := dest.formal[k]; ok { var err error if fset.name == "" { err = fmt.Errorf("flag redefined: %s", k) } else { err = fmt.Errorf("%s flag redefined: %s", fset.name, k) } fmt.Fprintln(fset.Out(), err.Error()) // Happens only if flags are declared with identical names switch dest.errorHandling { case ContinueOnError: return err case ExitOnError: os.Exit(2) case PanicOnError: panic(err) } } newF := *f newF.Value = mergeVal{f.Value, k, fset} if dest.formal == nil { dest.formal = make(map[string]*Flag) } dest.formal[k] = &newF } } return nil } // IsEmpty reports if the FlagSet is actually empty. func (fs *FlagSet) IsEmpty() bool { return len(fs.actual) == 0 } docker-1.10.3/pkg/mflag/flag_test.go000066400000000000000000000337171267010174400172260ustar00rootroot00000000000000// Copyright 2014-2016 The Docker & Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package mflag import ( "bytes" "fmt" "os" "sort" "strings" "testing" "time" ) // ResetForTesting clears all flag state and sets the usage function as directed. // After calling ResetForTesting, parse errors in flag handling will not // exit the program. func ResetForTesting(usage func()) { CommandLine = NewFlagSet(os.Args[0], ContinueOnError) Usage = usage } func boolString(s string) string { if s == "0" { return "false" } return "true" } func TestEverything(t *testing.T) { ResetForTesting(nil) Bool([]string{"test_bool"}, false, "bool value") Int([]string{"test_int"}, 0, "int value") Int64([]string{"test_int64"}, 0, "int64 value") Uint([]string{"test_uint"}, 0, "uint value") Uint64([]string{"test_uint64"}, 0, "uint64 value") String([]string{"test_string"}, "0", "string value") Float64([]string{"test_float64"}, 0, "float64 value") Duration([]string{"test_duration"}, 0, "time.Duration value") m := make(map[string]*Flag) desired := "0" visitor := func(f *Flag) { for _, name := range f.Names { if len(name) > 5 && name[0:5] == "test_" { m[name] = f ok := false switch { case f.Value.String() == desired: ok = true case name == "test_bool" && f.Value.String() == boolString(desired): ok = true case name == "test_duration" && f.Value.String() == desired+"s": ok = true } if !ok { t.Error("Visit: bad value", f.Value.String(), "for", name) } } } } VisitAll(visitor) if len(m) != 8 { t.Error("VisitAll misses some flags") for k, v := range m { t.Log(k, *v) } } m = make(map[string]*Flag) Visit(visitor) if len(m) != 0 { t.Errorf("Visit sees unset flags") for k, v := range m { t.Log(k, *v) } } // Now set all flags Set("test_bool", "true") Set("test_int", "1") Set("test_int64", "1") Set("test_uint", "1") Set("test_uint64", "1") Set("test_string", "1") Set("test_float64", "1") Set("test_duration", "1s") desired = "1" Visit(visitor) if len(m) != 8 { t.Error("Visit fails after set") for k, v := range m { t.Log(k, *v) } } // Now test they're visited in sort order. var flagNames []string Visit(func(f *Flag) { for _, name := range f.Names { flagNames = append(flagNames, name) } }) if !sort.StringsAreSorted(flagNames) { t.Errorf("flag names not sorted: %v", flagNames) } } func TestGet(t *testing.T) { ResetForTesting(nil) Bool([]string{"test_bool"}, true, "bool value") Int([]string{"test_int"}, 1, "int value") Int64([]string{"test_int64"}, 2, "int64 value") Uint([]string{"test_uint"}, 3, "uint value") Uint64([]string{"test_uint64"}, 4, "uint64 value") String([]string{"test_string"}, "5", "string value") Float64([]string{"test_float64"}, 6, "float64 value") Duration([]string{"test_duration"}, 7, "time.Duration value") visitor := func(f *Flag) { for _, name := range f.Names { if len(name) > 5 && name[0:5] == "test_" { g, ok := f.Value.(Getter) if !ok { t.Errorf("Visit: value does not satisfy Getter: %T", f.Value) return } switch name { case "test_bool": ok = g.Get() == true case "test_int": ok = g.Get() == int(1) case "test_int64": ok = g.Get() == int64(2) case "test_uint": ok = g.Get() == uint(3) case "test_uint64": ok = g.Get() == uint64(4) case "test_string": ok = g.Get() == "5" case "test_float64": ok = g.Get() == float64(6) case "test_duration": ok = g.Get() == time.Duration(7) } if !ok { t.Errorf("Visit: bad value %T(%v) for %s", g.Get(), g.Get(), name) } } } } VisitAll(visitor) } func testParse(f *FlagSet, t *testing.T) { if f.Parsed() { t.Error("f.Parse() = true before Parse") } boolFlag := f.Bool([]string{"bool"}, false, "bool value") bool2Flag := f.Bool([]string{"bool2"}, false, "bool2 value") f.Bool([]string{"bool3"}, false, "bool3 value") bool4Flag := f.Bool([]string{"bool4"}, false, "bool4 value") intFlag := f.Int([]string{"-int"}, 0, "int value") int64Flag := f.Int64([]string{"-int64"}, 0, "int64 value") uintFlag := f.Uint([]string{"uint"}, 0, "uint value") uint64Flag := f.Uint64([]string{"-uint64"}, 0, "uint64 value") stringFlag := f.String([]string{"string"}, "0", "string value") f.String([]string{"string2"}, "0", "string2 value") singleQuoteFlag := f.String([]string{"squote"}, "", "single quoted value") doubleQuoteFlag := f.String([]string{"dquote"}, "", "double quoted value") mixedQuoteFlag := f.String([]string{"mquote"}, "", "mixed quoted value") mixed2QuoteFlag := f.String([]string{"mquote2"}, "", "mixed2 quoted value") nestedQuoteFlag := f.String([]string{"nquote"}, "", "nested quoted value") nested2QuoteFlag := f.String([]string{"nquote2"}, "", "nested2 quoted value") float64Flag := f.Float64([]string{"float64"}, 0, "float64 value") durationFlag := f.Duration([]string{"duration"}, 5*time.Second, "time.Duration value") extra := "one-extra-argument" args := []string{ "-bool", "-bool2=true", "-bool4=false", "--int", "22", "--int64", "0x23", "-uint", "24", "--uint64", "25", "-string", "hello", "-squote='single'", `-dquote="double"`, `-mquote='mixed"`, `-mquote2="mixed2'`, `-nquote="'single nested'"`, `-nquote2='"double nested"'`, "-float64", "2718e28", "-duration", "2m", extra, } if err := f.Parse(args); err != nil { t.Fatal(err) } if !f.Parsed() { t.Error("f.Parse() = false after Parse") } if *boolFlag != true { t.Error("bool flag should be true, is ", *boolFlag) } if *bool2Flag != true { t.Error("bool2 flag should be true, is ", *bool2Flag) } if !f.IsSet("bool2") { t.Error("bool2 should be marked as set") } if f.IsSet("bool3") { t.Error("bool3 should not be marked as set") } if !f.IsSet("bool4") { t.Error("bool4 should be marked as set") } if *bool4Flag != false { t.Error("bool4 flag should be false, is ", *bool4Flag) } if *intFlag != 22 { t.Error("int flag should be 22, is ", *intFlag) } if *int64Flag != 0x23 { t.Error("int64 flag should be 0x23, is ", *int64Flag) } if *uintFlag != 24 { t.Error("uint flag should be 24, is ", *uintFlag) } if *uint64Flag != 25 { t.Error("uint64 flag should be 25, is ", *uint64Flag) } if *stringFlag != "hello" { t.Error("string flag should be `hello`, is ", *stringFlag) } if !f.IsSet("string") { t.Error("string flag should be marked as set") } if f.IsSet("string2") { t.Error("string2 flag should not be marked as set") } if *singleQuoteFlag != "single" { t.Error("single quote string flag should be `single`, is ", *singleQuoteFlag) } if *doubleQuoteFlag != "double" { t.Error("double quote string flag should be `double`, is ", *doubleQuoteFlag) } if *mixedQuoteFlag != `'mixed"` { t.Error("mixed quote string flag should be `'mixed\"`, is ", *mixedQuoteFlag) } if *mixed2QuoteFlag != `"mixed2'` { t.Error("mixed2 quote string flag should be `\"mixed2'`, is ", *mixed2QuoteFlag) } if *nestedQuoteFlag != "'single nested'" { t.Error("nested quote string flag should be `'single nested'`, is ", *nestedQuoteFlag) } if *nested2QuoteFlag != `"double nested"` { t.Error("double quote string flag should be `\"double nested\"`, is ", *nested2QuoteFlag) } if *float64Flag != 2718e28 { t.Error("float64 flag should be 2718e28, is ", *float64Flag) } if *durationFlag != 2*time.Minute { t.Error("duration flag should be 2m, is ", *durationFlag) } if len(f.Args()) != 1 { t.Error("expected one argument, got", len(f.Args())) } else if f.Args()[0] != extra { t.Errorf("expected argument %q got %q", extra, f.Args()[0]) } } func testPanic(f *FlagSet, t *testing.T) { f.Int([]string{"-int"}, 0, "int value") if f.Parsed() { t.Error("f.Parse() = true before Parse") } args := []string{ "-int", "21", } f.Parse(args) } func TestParsePanic(t *testing.T) { ResetForTesting(func() {}) testPanic(CommandLine, t) } func TestParse(t *testing.T) { ResetForTesting(func() { t.Error("bad parse") }) testParse(CommandLine, t) } func TestFlagSetParse(t *testing.T) { testParse(NewFlagSet("test", ContinueOnError), t) } // Declare a user-defined flag type. type flagVar []string func (f *flagVar) String() string { return fmt.Sprint([]string(*f)) } func (f *flagVar) Set(value string) error { *f = append(*f, value) return nil } func TestUserDefined(t *testing.T) { var flags FlagSet flags.Init("test", ContinueOnError) var v flagVar flags.Var(&v, []string{"v"}, "usage") if err := flags.Parse([]string{"-v", "1", "-v", "2", "-v=3"}); err != nil { t.Error(err) } if len(v) != 3 { t.Fatal("expected 3 args; got ", len(v)) } expect := "[1 2 3]" if v.String() != expect { t.Errorf("expected value %q got %q", expect, v.String()) } } // Declare a user-defined boolean flag type. type boolFlagVar struct { count int } func (b *boolFlagVar) String() string { return fmt.Sprintf("%d", b.count) } func (b *boolFlagVar) Set(value string) error { if value == "true" { b.count++ } return nil } func (b *boolFlagVar) IsBoolFlag() bool { return b.count < 4 } func TestUserDefinedBool(t *testing.T) { var flags FlagSet flags.Init("test", ContinueOnError) var b boolFlagVar var err error flags.Var(&b, []string{"b"}, "usage") if err = flags.Parse([]string{"-b", "-b", "-b", "-b=true", "-b=false", "-b", "barg", "-b"}); err != nil { if b.count < 4 { t.Error(err) } } if b.count != 4 { t.Errorf("want: %d; got: %d", 4, b.count) } if err == nil { t.Error("expected error; got none") } } func TestSetOutput(t *testing.T) { var flags FlagSet var buf bytes.Buffer flags.SetOutput(&buf) flags.Init("test", ContinueOnError) flags.Parse([]string{"-unknown"}) if out := buf.String(); !strings.Contains(out, "-unknown") { t.Logf("expected output mentioning unknown; got %q", out) } } // This tests that one can reset the flags. This still works but not well, and is // superseded by FlagSet. func TestChangingArgs(t *testing.T) { ResetForTesting(func() { t.Fatal("bad parse") }) oldArgs := os.Args defer func() { os.Args = oldArgs }() os.Args = []string{"cmd", "-before", "subcmd", "-after", "args"} before := Bool([]string{"before"}, false, "") if err := CommandLine.Parse(os.Args[1:]); err != nil { t.Fatal(err) } cmd := Arg(0) os.Args = Args() after := Bool([]string{"after"}, false, "") Parse() args := Args() if !*before || cmd != "subcmd" || !*after || len(args) != 1 || args[0] != "args" { t.Fatalf("expected true subcmd true [args] got %v %v %v %v", *before, cmd, *after, args) } } // Test that -help invokes the usage message and returns ErrHelp. func TestHelp(t *testing.T) { var helpCalled = false fs := NewFlagSet("help test", ContinueOnError) fs.Usage = func() { helpCalled = true } var flag bool fs.BoolVar(&flag, []string{"flag"}, false, "regular flag") // Regular flag invocation should work err := fs.Parse([]string{"-flag=true"}) if err != nil { t.Fatal("expected no error; got ", err) } if !flag { t.Error("flag was not set by -flag") } if helpCalled { t.Error("help called for regular flag") helpCalled = false // reset for next test } // Help flag should work as expected. err = fs.Parse([]string{"-help"}) if err == nil { t.Fatal("error expected") } if err != ErrHelp { t.Fatal("expected ErrHelp; got ", err) } if !helpCalled { t.Fatal("help was not called") } // If we define a help flag, that should override. var help bool fs.BoolVar(&help, []string{"help"}, false, "help flag") helpCalled = false err = fs.Parse([]string{"-help"}) if err != nil { t.Fatal("expected no error for defined -help; got ", err) } if helpCalled { t.Fatal("help was called; should not have been for defined help flag") } } // Test the flag count functions. func TestFlagCounts(t *testing.T) { fs := NewFlagSet("help test", ContinueOnError) var flag bool fs.BoolVar(&flag, []string{"flag1"}, false, "regular flag") fs.BoolVar(&flag, []string{"#deprecated1"}, false, "regular flag") fs.BoolVar(&flag, []string{"f", "flag2"}, false, "regular flag") fs.BoolVar(&flag, []string{"#d", "#deprecated2"}, false, "regular flag") fs.BoolVar(&flag, []string{"flag3"}, false, "regular flag") fs.BoolVar(&flag, []string{"g", "#flag4", "-flag4"}, false, "regular flag") if fs.FlagCount() != 6 { t.Fatal("FlagCount wrong. ", fs.FlagCount()) } if fs.FlagCountUndeprecated() != 4 { t.Fatal("FlagCountUndeprecated wrong. ", fs.FlagCountUndeprecated()) } if fs.NFlag() != 0 { t.Fatal("NFlag wrong. ", fs.NFlag()) } err := fs.Parse([]string{"-fd", "-g", "-flag4"}) if err != nil { t.Fatal("expected no error for defined -help; got ", err) } if fs.NFlag() != 4 { t.Fatal("NFlag wrong. ", fs.NFlag()) } } // Show up bug in sortFlags func TestSortFlags(t *testing.T) { fs := NewFlagSet("help TestSortFlags", ContinueOnError) var err error var b bool fs.BoolVar(&b, []string{"b", "-banana"}, false, "usage") err = fs.Parse([]string{"--banana=true"}) if err != nil { t.Fatal("expected no error; got ", err) } count := 0 fs.VisitAll(func(flag *Flag) { count++ if flag == nil { t.Fatal("VisitAll should not return a nil flag") } }) flagcount := fs.FlagCount() if flagcount != count { t.Fatalf("FlagCount (%d) != number (%d) of elements visited", flagcount, count) } // Make sure its idempotent if flagcount != fs.FlagCount() { t.Fatalf("FlagCount (%d) != fs.FlagCount() (%d) of elements visited", flagcount, fs.FlagCount()) } count = 0 fs.Visit(func(flag *Flag) { count++ if flag == nil { t.Fatal("Visit should not return a nil flag") } }) nflag := fs.NFlag() if nflag != count { t.Fatalf("NFlag (%d) != number (%d) of elements visited", nflag, count) } if nflag != fs.NFlag() { t.Fatalf("NFlag (%d) != fs.NFlag() (%d) of elements visited", nflag, fs.NFlag()) } } func TestMergeFlags(t *testing.T) { base := NewFlagSet("base", ContinueOnError) base.String([]string{"f"}, "", "") fs := NewFlagSet("test", ContinueOnError) Merge(fs, base) if len(fs.formal) != 1 { t.Fatalf("FlagCount (%d) != number (1) of elements merged", len(fs.formal)) } } docker-1.10.3/pkg/mount/000077500000000000000000000000001267010174400147705ustar00rootroot00000000000000docker-1.10.3/pkg/mount/flags.go000066400000000000000000000046701267010174400164220ustar00rootroot00000000000000package mount import ( "fmt" "strings" ) // Parse fstab type mount options into mount() flags // and device specific data func parseOptions(options string) (int, string) { var ( flag int data []string ) flags := map[string]struct { clear bool flag int }{ "defaults": {false, 0}, "ro": {false, RDONLY}, "rw": {true, RDONLY}, "suid": {true, NOSUID}, "nosuid": {false, NOSUID}, "dev": {true, NODEV}, "nodev": {false, NODEV}, "exec": {true, NOEXEC}, "noexec": {false, NOEXEC}, "sync": {false, SYNCHRONOUS}, "async": {true, SYNCHRONOUS}, "dirsync": {false, DIRSYNC}, "remount": {false, REMOUNT}, "mand": {false, MANDLOCK}, "nomand": {true, MANDLOCK}, "atime": {true, NOATIME}, "noatime": {false, NOATIME}, "diratime": {true, NODIRATIME}, "nodiratime": {false, NODIRATIME}, "bind": {false, BIND}, "rbind": {false, RBIND}, "unbindable": {false, UNBINDABLE}, "runbindable": {false, RUNBINDABLE}, "private": {false, PRIVATE}, "rprivate": {false, RPRIVATE}, "shared": {false, SHARED}, "rshared": {false, RSHARED}, "slave": {false, SLAVE}, "rslave": {false, RSLAVE}, "relatime": {false, RELATIME}, "norelatime": {true, RELATIME}, "strictatime": {false, STRICTATIME}, "nostrictatime": {true, STRICTATIME}, } for _, o := range strings.Split(options, ",") { // If the option does not exist in the flags table or the flag // is not supported on the platform, // then it is a data value for a specific fs type if f, exists := flags[o]; exists && f.flag != 0 { if f.clear { flag &= ^f.flag } else { flag |= f.flag } } else { data = append(data, o) } } return flag, strings.Join(data, ",") } // ParseTmpfsOptions parse fstab type mount options into flags and data func ParseTmpfsOptions(options string) (int, string, error) { flags, data := parseOptions(options) validFlags := map[string]bool{ "": true, "size": true, "mode": true, "uid": true, "gid": true, "nr_inodes": true, "nr_blocks": true, "mpol": true, } for _, o := range strings.Split(data, ",") { opt := strings.SplitN(o, "=", 2) if !validFlags[opt[0]] { return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt) } } return flags, data, nil } docker-1.10.3/pkg/mount/flags_freebsd.go000066400000000000000000000016661267010174400201160ustar00rootroot00000000000000// +build freebsd,cgo package mount /* #include */ import "C" const ( // RDONLY will mount the filesystem as read-only. RDONLY = C.MNT_RDONLY // NOSUID will not allow set-user-identifier or set-group-identifier bits to // take effect. NOSUID = C.MNT_NOSUID // NOEXEC will not allow execution of any binaries on the mounted file system. NOEXEC = C.MNT_NOEXEC // SYNCHRONOUS will allow any I/O to the file system to be done synchronously. SYNCHRONOUS = C.MNT_SYNCHRONOUS // NOATIME will not update the file access time when reading from a file. NOATIME = C.MNT_NOATIME ) // These flags are unsupported. const ( BIND = 0 DIRSYNC = 0 MANDLOCK = 0 NODEV = 0 NODIRATIME = 0 UNBINDABLE = 0 RUNBINDABLE = 0 PRIVATE = 0 RPRIVATE = 0 SHARED = 0 RSHARED = 0 SLAVE = 0 RSLAVE = 0 RBIND = 0 RELATIVE = 0 RELATIME = 0 REMOUNT = 0 STRICTATIME = 0 ) docker-1.10.3/pkg/mount/flags_linux.go000066400000000000000000000054601267010174400176370ustar00rootroot00000000000000package mount import ( "syscall" ) const ( // RDONLY will mount the file system read-only. RDONLY = syscall.MS_RDONLY // NOSUID will not allow set-user-identifier or set-group-identifier bits to // take effect. NOSUID = syscall.MS_NOSUID // NODEV will not interpret character or block special devices on the file // system. NODEV = syscall.MS_NODEV // NOEXEC will not allow execution of any binaries on the mounted file system. NOEXEC = syscall.MS_NOEXEC // SYNCHRONOUS will allow I/O to the file system to be done synchronously. SYNCHRONOUS = syscall.MS_SYNCHRONOUS // DIRSYNC will force all directory updates within the file system to be done // synchronously. This affects the following system calls: create, link, // unlink, symlink, mkdir, rmdir, mknod and rename. DIRSYNC = syscall.MS_DIRSYNC // REMOUNT will attempt to remount an already-mounted file system. This is // commonly used to change the mount flags for a file system, especially to // make a readonly file system writeable. It does not change device or mount // point. REMOUNT = syscall.MS_REMOUNT // MANDLOCK will force mandatory locks on a filesystem. MANDLOCK = syscall.MS_MANDLOCK // NOATIME will not update the file access time when reading from a file. NOATIME = syscall.MS_NOATIME // NODIRATIME will not update the directory access time. NODIRATIME = syscall.MS_NODIRATIME // BIND remounts a subtree somewhere else. BIND = syscall.MS_BIND // RBIND remounts a subtree and all possible submounts somewhere else. RBIND = syscall.MS_BIND | syscall.MS_REC // UNBINDABLE creates a mount which cannot be cloned through a bind operation. UNBINDABLE = syscall.MS_UNBINDABLE // RUNBINDABLE marks the entire mount tree as UNBINDABLE. RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC // PRIVATE creates a mount which carries no propagation abilities. PRIVATE = syscall.MS_PRIVATE // RPRIVATE marks the entire mount tree as PRIVATE. RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC // SLAVE creates a mount which receives propagation from its master, but not // vice versa. SLAVE = syscall.MS_SLAVE // RSLAVE marks the entire mount tree as SLAVE. RSLAVE = syscall.MS_SLAVE | syscall.MS_REC // SHARED creates a mount which provides the ability to create mirrors of // that mount such that mounts and unmounts within any of the mirrors // propagate to the other mirrors. SHARED = syscall.MS_SHARED // RSHARED marks the entire mount tree as SHARED. RSHARED = syscall.MS_SHARED | syscall.MS_REC // RELATIME updates inode access times relative to modify or change time. RELATIME = syscall.MS_RELATIME // STRICTATIME allows to explicitly request full atime updates. This makes // it possible for the kernel to default to relatime or noatime but still // allow userspace to override it. STRICTATIME = syscall.MS_STRICTATIME ) docker-1.10.3/pkg/mount/flags_unsupported.go000066400000000000000000000007501267010174400210650ustar00rootroot00000000000000// +build !linux,!freebsd freebsd,!cgo package mount // These flags are unsupported. const ( BIND = 0 DIRSYNC = 0 MANDLOCK = 0 NOATIME = 0 NODEV = 0 NODIRATIME = 0 NOEXEC = 0 NOSUID = 0 UNBINDABLE = 0 RUNBINDABLE = 0 PRIVATE = 0 RPRIVATE = 0 SHARED = 0 RSHARED = 0 SLAVE = 0 RSLAVE = 0 RBIND = 0 RELATIME = 0 RELATIVE = 0 REMOUNT = 0 STRICTATIME = 0 SYNCHRONOUS = 0 RDONLY = 0 ) docker-1.10.3/pkg/mount/mount.go000066400000000000000000000040741267010174400164660ustar00rootroot00000000000000package mount import ( "time" ) // GetMounts retrieves a list of mounts for the current running process. func GetMounts() ([]*Info, error) { return parseMountTable() } // Mounted looks at /proc/self/mountinfo to determine of the specified // mountpoint has been mounted func Mounted(mountpoint string) (bool, error) { entries, err := parseMountTable() if err != nil { return false, err } // Search the table for the mountpoint for _, e := range entries { if e.Mountpoint == mountpoint { return true, nil } } return false, nil } // Mount will mount filesystem according to the specified configuration, on the // condition that the target path is *not* already mounted. Options must be // specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See // flags.go for supported option flags. func Mount(device, target, mType, options string) error { flag, _ := parseOptions(options) if flag&REMOUNT != REMOUNT { if mounted, err := Mounted(target); err != nil || mounted { return err } } return ForceMount(device, target, mType, options) } // ForceMount will mount a filesystem according to the specified configuration, // *regardless* if the target path is not already mounted. Options must be // specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See // flags.go for supported option flags. func ForceMount(device, target, mType, options string) error { flag, data := parseOptions(options) if err := mount(device, target, mType, uintptr(flag), data); err != nil { return err } return nil } // Unmount will unmount the target filesystem, so long as it is mounted. func Unmount(target string) error { if mounted, err := Mounted(target); err != nil || !mounted { return err } return ForceUnmount(target) } // ForceUnmount will force an unmount of the target filesystem, regardless if // it is mounted or not. func ForceUnmount(target string) (err error) { // Simple retry logic for unmount for i := 0; i < 10; i++ { if err = unmount(target, 0); err == nil { return nil } time.Sleep(100 * time.Millisecond) } return } docker-1.10.3/pkg/mount/mount_test.go000066400000000000000000000047231267010174400175260ustar00rootroot00000000000000package mount import ( "os" "path" "testing" ) func TestMountOptionsParsing(t *testing.T) { options := "noatime,ro,size=10k" flag, data := parseOptions(options) if data != "size=10k" { t.Fatalf("Expected size=10 got %s", data) } expectedFlag := NOATIME | RDONLY if flag != expectedFlag { t.Fatalf("Expected %d got %d", expectedFlag, flag) } } func TestMounted(t *testing.T) { tmp := path.Join(os.TempDir(), "mount-tests") if err := os.MkdirAll(tmp, 0777); err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) var ( sourceDir = path.Join(tmp, "source") targetDir = path.Join(tmp, "target") sourcePath = path.Join(sourceDir, "file.txt") targetPath = path.Join(targetDir, "file.txt") ) os.Mkdir(sourceDir, 0777) os.Mkdir(targetDir, 0777) f, err := os.Create(sourcePath) if err != nil { t.Fatal(err) } f.WriteString("hello") f.Close() f, err = os.Create(targetPath) if err != nil { t.Fatal(err) } f.Close() if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { t.Fatal(err) } defer func() { if err := Unmount(targetDir); err != nil { t.Fatal(err) } }() mounted, err := Mounted(targetDir) if err != nil { t.Fatal(err) } if !mounted { t.Fatalf("Expected %s to be mounted", targetDir) } if _, err := os.Stat(targetDir); err != nil { t.Fatal(err) } } func TestMountReadonly(t *testing.T) { tmp := path.Join(os.TempDir(), "mount-tests") if err := os.MkdirAll(tmp, 0777); err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) var ( sourceDir = path.Join(tmp, "source") targetDir = path.Join(tmp, "target") sourcePath = path.Join(sourceDir, "file.txt") targetPath = path.Join(targetDir, "file.txt") ) os.Mkdir(sourceDir, 0777) os.Mkdir(targetDir, 0777) f, err := os.Create(sourcePath) if err != nil { t.Fatal(err) } f.WriteString("hello") f.Close() f, err = os.Create(targetPath) if err != nil { t.Fatal(err) } f.Close() if err := Mount(sourceDir, targetDir, "none", "bind,ro"); err != nil { t.Fatal(err) } defer func() { if err := Unmount(targetDir); err != nil { t.Fatal(err) } }() f, err = os.OpenFile(targetPath, os.O_RDWR, 0777) if err == nil { t.Fatal("Should not be able to open a ro file as rw") } } func TestGetMounts(t *testing.T) { mounts, err := GetMounts() if err != nil { t.Fatal(err) } root := false for _, entry := range mounts { if entry.Mountpoint == "/" { root = true } } if !root { t.Fatal("/ should be mounted at least") } } docker-1.10.3/pkg/mount/mounter_freebsd.go000066400000000000000000000023711267010174400205050ustar00rootroot00000000000000package mount /* #include #include #include #include #include #include */ import "C" import ( "fmt" "strings" "syscall" "unsafe" ) func allocateIOVecs(options []string) []C.struct_iovec { out := make([]C.struct_iovec, len(options)) for i, option := range options { out[i].iov_base = unsafe.Pointer(C.CString(option)) out[i].iov_len = C.size_t(len(option) + 1) } return out } func mount(device, target, mType string, flag uintptr, data string) error { isNullFS := false xs := strings.Split(data, ",") for _, x := range xs { if x == "bind" { isNullFS = true } } options := []string{"fspath", target} if isNullFS { options = append(options, "fstype", "nullfs", "target", device) } else { options = append(options, "fstype", mType, "from", device) } rawOptions := allocateIOVecs(options) for _, rawOption := range rawOptions { defer C.free(rawOption.iov_base) } if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { reason := C.GoString(C.strerror(*C.__error())) return fmt.Errorf("Failed to call nmount: %s", reason) } return nil } func unmount(target string, flag int) error { return syscall.Unmount(target, flag) } docker-1.10.3/pkg/mount/mounter_linux.go000066400000000000000000000010221267010174400202220ustar00rootroot00000000000000package mount import ( "syscall" ) func mount(device, target, mType string, flag uintptr, data string) error { if err := syscall.Mount(device, target, mType, flag, data); err != nil { return err } // If we have a bind mount or remount, remount... if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY { return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data) } return nil } func unmount(target string, flag int) error { return syscall.Unmount(target, flag) } docker-1.10.3/pkg/mount/mounter_unsupported.go000066400000000000000000000003521267010174400214600ustar00rootroot00000000000000// +build !linux,!freebsd freebsd,!cgo package mount func mount(device, target, mType string, flag uintptr, data string) error { panic("Not implemented") } func unmount(target string, flag int) error { panic("Not implemented") } docker-1.10.3/pkg/mount/mountinfo.go000066400000000000000000000020511267010174400173330ustar00rootroot00000000000000package mount // Info reveals information about a particular mounted filesystem. This // struct is populated from the content in the /proc//mountinfo file. type Info struct { // ID is a unique identifier of the mount (may be reused after umount). ID int // Parent indicates the ID of the mount parent (or of self for the top of the // mount tree). Parent int // Major indicates one half of the device ID which identifies the device class. Major int // Minor indicates one half of the device ID which identifies a specific // instance of device. Minor int // Root of the mount within the filesystem. Root string // Mountpoint indicates the mount point relative to the process's root. Mountpoint string // Opts represents mount-specific options. Opts string // Optional represents optional fields. Optional string // Fstype indicates the type of filesystem, such as EXT3. Fstype string // Source indicates filesystem specific information or "none". Source string // VfsOpts represents per super block options. VfsOpts string } docker-1.10.3/pkg/mount/mountinfo_freebsd.go000066400000000000000000000016311267010174400210300ustar00rootroot00000000000000package mount /* #include #include #include */ import "C" import ( "fmt" "reflect" "unsafe" ) // Parse /proc/self/mountinfo because comparing Dev and ino does not work from // bind mounts. func parseMountTable() ([]*Info, error) { var rawEntries *C.struct_statfs count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) if count == 0 { return nil, fmt.Errorf("Failed to call getmntinfo") } var entries []C.struct_statfs header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) header.Cap = count header.Len = count header.Data = uintptr(unsafe.Pointer(rawEntries)) var out []*Info for _, entry := range entries { var mountinfo Info mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) out = append(out, &mountinfo) } return out, nil } docker-1.10.3/pkg/mount/mountinfo_linux.go000066400000000000000000000051341267010174400205570ustar00rootroot00000000000000// +build linux package mount import ( "bufio" "fmt" "io" "os" "strings" ) const ( /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) (1) mount ID: unique identifier of the mount (may be reused after umount) (2) parent ID: ID of parent (or of self for the top of the mount tree) (3) major:minor: value of st_dev for files on filesystem (4) root: root of the mount within the filesystem (5) mount point: mount point relative to the process's root (6) mount options: per mount options (7) optional fields: zero or more fields of the form "tag[:value]" (8) separator: marks the end of the optional fields (9) filesystem type: name of filesystem of the form "type[.subtype]" (10) mount source: filesystem specific information or "none" (11) super options: per super block options*/ mountinfoFormat = "%d %d %d:%d %s %s %s %s" ) // Parse /proc/self/mountinfo because comparing Dev and ino does not work from // bind mounts func parseMountTable() ([]*Info, error) { f, err := os.Open("/proc/self/mountinfo") if err != nil { return nil, err } defer f.Close() return parseInfoFile(f) } func parseInfoFile(r io.Reader) ([]*Info, error) { var ( s = bufio.NewScanner(r) out = []*Info{} ) for s.Scan() { if err := s.Err(); err != nil { return nil, err } var ( p = &Info{} text = s.Text() optionalFields string ) if _, err := fmt.Sscanf(text, mountinfoFormat, &p.ID, &p.Parent, &p.Major, &p.Minor, &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) } // Safe as mountinfo encodes mountpoints with spaces as \040. index := strings.Index(text, " - ") postSeparatorFields := strings.Fields(text[index+3:]) if len(postSeparatorFields) < 3 { return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) } if optionalFields != "-" { p.Optional = optionalFields } p.Fstype = postSeparatorFields[0] p.Source = postSeparatorFields[1] p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") out = append(out, p) } return out, nil } // PidMountInfo collects the mounts for a specific process ID. If the process // ID is unknown, it is better to use `GetMounts` which will inspect // "/proc/self/mountinfo" instead. func PidMountInfo(pid int) ([]*Info, error) { f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) if err != nil { return nil, err } defer f.Close() return parseInfoFile(f) } docker-1.10.3/pkg/mount/mountinfo_linux_test.go000066400000000000000000001713731267010174400216270ustar00rootroot00000000000000// +build linux package mount import ( "bytes" "testing" ) const ( fedoraMountinfo = `15 35 0:3 / /proc rw,nosuid,nodev,noexec,relatime shared:5 - proc proc rw 16 35 0:14 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel 17 35 0:5 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8056484k,nr_inodes=2014121,mode=755 18 16 0:15 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw 19 16 0:13 / /sys/fs/selinux rw,relatime shared:8 - selinuxfs selinuxfs rw 20 17 0:16 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel 21 17 0:10 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000 22 35 0:17 / /run rw,nosuid,nodev shared:21 - tmpfs tmpfs rw,seclabel,mode=755 23 16 0:18 / /sys/fs/cgroup rw,nosuid,nodev,noexec shared:9 - tmpfs tmpfs rw,seclabel,mode=755 24 23 0:19 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 25 16 0:20 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw 26 23 0:21 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuset,clone_children 27 23 0:22 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuacct,cpu,clone_children 28 23 0:23 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,memory,clone_children 29 23 0:24 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices,clone_children 30 23 0:25 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer,clone_children 31 23 0:26 / /sys/fs/cgroup/net_cls rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,clone_children 32 23 0:27 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,blkio,clone_children 33 23 0:28 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event,clone_children 34 23 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb,clone_children 35 1 253:2 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root--f20 rw,seclabel,data=ordered 36 15 0:30 / /proc/sys/fs/binfmt_misc rw,relatime shared:22 - autofs systemd-1 rw,fd=38,pgrp=1,timeout=300,minproto=5,maxproto=5,direct 37 17 0:12 / /dev/mqueue rw,relatime shared:23 - mqueue mqueue rw,seclabel 38 35 0:31 / /tmp rw shared:24 - tmpfs tmpfs rw,seclabel 39 17 0:32 / /dev/hugepages rw,relatime shared:25 - hugetlbfs hugetlbfs rw,seclabel 40 16 0:7 / /sys/kernel/debug rw,relatime shared:26 - debugfs debugfs rw 41 16 0:33 / /sys/kernel/config rw,relatime shared:27 - configfs configfs rw 42 35 0:34 / /var/lib/nfs/rpc_pipefs rw,relatime shared:28 - rpc_pipefs sunrpc rw 43 15 0:35 / /proc/fs/nfsd rw,relatime shared:29 - nfsd sunrpc rw 45 35 8:17 / /boot rw,relatime shared:30 - ext4 /dev/sdb1 rw,seclabel,data=ordered 46 35 253:4 / /home rw,relatime shared:31 - ext4 /dev/mapper/ssd-home rw,seclabel,data=ordered 47 35 253:5 / /var/lib/libvirt/images rw,noatime,nodiratime shared:32 - ext4 /dev/mapper/ssd-virt rw,seclabel,discard,data=ordered 48 35 253:12 / /mnt/old rw,relatime shared:33 - ext4 /dev/mapper/HelpDeskRHEL6-FedoraRoot rw,seclabel,data=ordered 121 22 0:36 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:104 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000 124 16 0:37 / /sys/fs/fuse/connections rw,relatime shared:107 - fusectl fusectl rw 165 38 253:3 / /tmp/mnt rw,relatime shared:147 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered 167 35 253:15 / /var/lib/docker/devicemapper/mnt/aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,relatime shared:149 - ext4 /dev/mapper/docker-253:2-425882-aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,seclabel,discard,stripe=16,data=ordered 171 35 253:16 / /var/lib/docker/devicemapper/mnt/c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,relatime shared:153 - ext4 /dev/mapper/docker-253:2-425882-c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,seclabel,discard,stripe=16,data=ordered 175 35 253:17 / /var/lib/docker/devicemapper/mnt/1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,relatime shared:157 - ext4 /dev/mapper/docker-253:2-425882-1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,seclabel,discard,stripe=16,data=ordered 179 35 253:18 / /var/lib/docker/devicemapper/mnt/d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,relatime shared:161 - ext4 /dev/mapper/docker-253:2-425882-d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,seclabel,discard,stripe=16,data=ordered 183 35 253:19 / /var/lib/docker/devicemapper/mnt/6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,relatime shared:165 - ext4 /dev/mapper/docker-253:2-425882-6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,seclabel,discard,stripe=16,data=ordered 187 35 253:20 / /var/lib/docker/devicemapper/mnt/8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,relatime shared:169 - ext4 /dev/mapper/docker-253:2-425882-8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,seclabel,discard,stripe=16,data=ordered 191 35 253:21 / /var/lib/docker/devicemapper/mnt/c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,relatime shared:173 - ext4 /dev/mapper/docker-253:2-425882-c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,seclabel,discard,stripe=16,data=ordered 195 35 253:22 / /var/lib/docker/devicemapper/mnt/2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,relatime shared:177 - ext4 /dev/mapper/docker-253:2-425882-2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,seclabel,discard,stripe=16,data=ordered 199 35 253:23 / /var/lib/docker/devicemapper/mnt/37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,relatime shared:181 - ext4 /dev/mapper/docker-253:2-425882-37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,seclabel,discard,stripe=16,data=ordered 203 35 253:24 / /var/lib/docker/devicemapper/mnt/aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,relatime shared:185 - ext4 /dev/mapper/docker-253:2-425882-aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,seclabel,discard,stripe=16,data=ordered 207 35 253:25 / /var/lib/docker/devicemapper/mnt/928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,relatime shared:189 - ext4 /dev/mapper/docker-253:2-425882-928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,seclabel,discard,stripe=16,data=ordered 211 35 253:26 / /var/lib/docker/devicemapper/mnt/0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,relatime shared:193 - ext4 /dev/mapper/docker-253:2-425882-0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,seclabel,discard,stripe=16,data=ordered 215 35 253:27 / /var/lib/docker/devicemapper/mnt/d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,relatime shared:197 - ext4 /dev/mapper/docker-253:2-425882-d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,seclabel,discard,stripe=16,data=ordered 219 35 253:28 / /var/lib/docker/devicemapper/mnt/bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,relatime shared:201 - ext4 /dev/mapper/docker-253:2-425882-bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,seclabel,discard,stripe=16,data=ordered 223 35 253:29 / /var/lib/docker/devicemapper/mnt/7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,relatime shared:205 - ext4 /dev/mapper/docker-253:2-425882-7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,seclabel,discard,stripe=16,data=ordered 227 35 253:30 / /var/lib/docker/devicemapper/mnt/c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,relatime shared:209 - ext4 /dev/mapper/docker-253:2-425882-c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,seclabel,discard,stripe=16,data=ordered 231 35 253:31 / /var/lib/docker/devicemapper/mnt/8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,relatime shared:213 - ext4 /dev/mapper/docker-253:2-425882-8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,seclabel,discard,stripe=16,data=ordered 235 35 253:32 / /var/lib/docker/devicemapper/mnt/1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,relatime shared:217 - ext4 /dev/mapper/docker-253:2-425882-1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,seclabel,discard,stripe=16,data=ordered 239 35 253:33 / /var/lib/docker/devicemapper/mnt/e9aa60c60128cad1 rw,relatime shared:221 - ext4 /dev/mapper/docker-253:2-425882-e9aa60c60128cad1 rw,seclabel,discard,stripe=16,data=ordered 243 35 253:34 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,relatime shared:225 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,seclabel,discard,stripe=16,data=ordered 247 35 253:35 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,relatime shared:229 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,seclabel,discard,stripe=16,data=ordered 31 21 0:23 / /DATA/foo_bla_bla rw,relatime - cifs //foo/BLA\040BLA\040BLA/ rw,sec=ntlm,cache=loose,unc=\\foo\BLA BLA BLA,username=my_login,domain=mydomain.com,uid=12345678,forceuid,gid=12345678,forcegid,addr=10.1.30.10,file_mode=0755,dir_mode=0755,nounix,rsize=61440,wsize=65536,actimeo=1` ubuntuMountInfo = `15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw 16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw 17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=1015140k,nr_inodes=253785,mode=755 18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 19 20 0:15 / /run rw,nosuid,noexec,relatime - tmpfs tmpfs rw,size=205044k,mode=755 20 1 253:0 / / rw,relatime - ext4 /dev/disk/by-label/DOROOT rw,errors=remount-ro,data=ordered 21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs none rw,size=4k,mode=755 22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw 23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw 24 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw 25 19 0:18 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k 26 21 0:19 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset,clone_children 27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw 28 21 0:21 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu 29 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 30 15 0:23 / /sys/fs/pstore rw,relatime - pstore none rw 31 21 0:24 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct 32 21 0:25 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory 33 21 0:26 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices 34 21 0:27 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer 35 21 0:28 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio 36 21 0:29 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event 37 21 0:30 / /sys/fs/cgroup/hugetlb rw,relatime - cgroup cgroup rw,hugetlb 38 21 0:31 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd 39 20 0:32 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=caafa54fdc06525 40 20 0:33 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8-init rw,relatime - aufs none rw,si=caafa54f882b525 41 20 0:34 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8 rw,relatime - aufs none rw,si=caafa54f8829525 42 20 0:35 / /var/lib/docker/aufs/mnt/16f4d7e96dd612903f425bfe856762f291ff2e36a8ecd55a2209b7d7cd81c30b rw,relatime - aufs none rw,si=caafa54f882d525 43 20 0:36 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e-init rw,relatime - aufs none rw,si=caafa54f882f525 44 20 0:37 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e rw,relatime - aufs none rw,si=caafa54f88ba525 45 20 0:38 / /var/lib/docker/aufs/mnt/283f35a910233c756409313be71ecd8fcfef0df57108b8d740b61b3e88860452 rw,relatime - aufs none rw,si=caafa54f88b8525 46 20 0:39 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1-init rw,relatime - aufs none rw,si=caafa54f88be525 47 20 0:40 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1 rw,relatime - aufs none rw,si=caafa54f882c525 48 20 0:41 / /var/lib/docker/aufs/mnt/de2b538c97d6366cc80e8658547c923ea1d042f85580df379846f36a4df7049d rw,relatime - aufs none rw,si=caafa54f85bb525 49 20 0:42 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49-init rw,relatime - aufs none rw,si=caafa54fdc00525 50 20 0:43 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49 rw,relatime - aufs none rw,si=caafa54fbaec525 51 20 0:44 / /var/lib/docker/aufs/mnt/6ac1cace985c9fc9bea32234de8b36dba49bdd5e29a2972b327ff939d78a6274 rw,relatime - aufs none rw,si=caafa54f8e1a525 52 20 0:45 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b-init rw,relatime - aufs none rw,si=caafa54f8e1d525 53 20 0:46 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b rw,relatime - aufs none rw,si=caafa54f8e1b525 54 20 0:47 / /var/lib/docker/aufs/mnt/cabb117d997f0f93519185aea58389a9762770b7496ed0b74a3e4a083fa45902 rw,relatime - aufs none rw,si=caafa54f810a525 55 20 0:48 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33-init rw,relatime - aufs none rw,si=caafa54f8529525 56 20 0:49 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33 rw,relatime - aufs none rw,si=caafa54f852f525 57 20 0:50 / /var/lib/docker/aufs/mnt/16a1526fa445b84ce84f89506d219e87fa488a814063baf045d88b02f21166b3 rw,relatime - aufs none rw,si=caafa54f9e1d525 58 20 0:51 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f-init rw,relatime - aufs none rw,si=caafa54f854d525 59 20 0:52 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f rw,relatime - aufs none rw,si=caafa54f854e525 60 20 0:53 / /var/lib/docker/aufs/mnt/e370c3e286bea027917baa0e4d251262681a472a87056e880dfd0513516dffd9 rw,relatime - aufs none rw,si=caafa54f840a525 61 20 0:54 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e-init rw,relatime - aufs none rw,si=caafa54f8408525 62 20 0:55 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e rw,relatime - aufs none rw,si=caafa54f8409525 63 20 0:56 / /var/lib/docker/aufs/mnt/abd0b5ea5d355a67f911475e271924a5388ee60c27185fcd60d095afc4a09dc7 rw,relatime - aufs none rw,si=caafa54f9eb1525 64 20 0:57 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2-init rw,relatime - aufs none rw,si=caafa54f85bf525 65 20 0:58 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2 rw,relatime - aufs none rw,si=caafa54f85b8525 66 20 0:59 / /var/lib/docker/aufs/mnt/912e1bf28b80a09644503924a8a1a4fb8ed10b808ca847bda27a369919aa52fa rw,relatime - aufs none rw,si=caafa54fbaea525 67 20 0:60 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576-init rw,relatime - aufs none rw,si=caafa54f8472525 68 20 0:61 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576 rw,relatime - aufs none rw,si=caafa54f8474525 69 20 0:62 / /var/lib/docker/aufs/mnt/5aaebb79ef3097dfca377889aeb61a0c9d5e3795117d2b08d0751473c671dfb2 rw,relatime - aufs none rw,si=caafa54f8c5e525 70 20 0:63 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2-init rw,relatime - aufs none rw,si=caafa54f8c3b525 71 20 0:64 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2 rw,relatime - aufs none rw,si=caafa54f8c3d525 72 20 0:65 / /var/lib/docker/aufs/mnt/2777f0763da4de93f8bebbe1595cc77f739806a158657b033eca06f827b6028a rw,relatime - aufs none rw,si=caafa54f8c3e525 73 20 0:66 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e-init rw,relatime - aufs none rw,si=caafa54f8c39525 74 20 0:67 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e rw,relatime - aufs none rw,si=caafa54f854f525 75 20 0:68 / /var/lib/docker/aufs/mnt/06400b526ec18b66639c96efc41a84f4ae0b117cb28dafd56be420651b4084a0 rw,relatime - aufs none rw,si=caafa54f840b525 76 20 0:69 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785-init rw,relatime - aufs none rw,si=caafa54fdddf525 77 20 0:70 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785 rw,relatime - aufs none rw,si=caafa54f854b525 78 20 0:71 / /var/lib/docker/aufs/mnt/1ff414fa93fd61ec81b0ab7b365a841ff6545accae03cceac702833aaeaf718f rw,relatime - aufs none rw,si=caafa54f8d85525 79 20 0:72 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8-init rw,relatime - aufs none rw,si=caafa54f8da3525 80 20 0:73 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8 rw,relatime - aufs none rw,si=caafa54f8da2525 81 20 0:74 / /var/lib/docker/aufs/mnt/b68b1d4fe4d30016c552398e78b379a39f651661d8e1fa5f2460c24a5e723420 rw,relatime - aufs none rw,si=caafa54f8d81525 82 20 0:75 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739-init rw,relatime - aufs none rw,si=caafa54f8da1525 83 20 0:76 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739 rw,relatime - aufs none rw,si=caafa54f8da0525 84 20 0:77 / /var/lib/docker/aufs/mnt/53e10b0329afc0e0d3322d31efaed4064139dc7027fe6ae445cffd7104bcc94f rw,relatime - aufs none rw,si=caafa54f8c35525 85 20 0:78 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494-init rw,relatime - aufs none rw,si=caafa54f8db8525 86 20 0:79 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494 rw,relatime - aufs none rw,si=caafa54f8dba525 87 20 0:80 / /var/lib/docker/aufs/mnt/90fdd2c03eeaf65311f88f4200e18aef6d2772482712d9aea01cd793c64781b5 rw,relatime - aufs none rw,si=caafa54f8315525 88 20 0:81 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f-init rw,relatime - aufs none rw,si=caafa54f8fc6525 89 20 0:82 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f rw,relatime - aufs none rw,si=caafa54f8468525 90 20 0:83 / /var/lib/docker/aufs/mnt/8cf9a993f50f3305abad3da268c0fc44ff78a1e7bba595ef9de963497496c3f9 rw,relatime - aufs none rw,si=caafa54f8c59525 91 20 0:84 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173-init rw,relatime - aufs none rw,si=caafa54f846a525 92 20 0:85 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173 rw,relatime - aufs none rw,si=caafa54f846b525 93 20 0:86 / /var/lib/docker/aufs/mnt/d8c8288ec920439a48b5796bab5883ee47a019240da65e8d8f33400c31bac5df rw,relatime - aufs none rw,si=caafa54f8dbf525 94 20 0:87 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6-init rw,relatime - aufs none rw,si=caafa54f810f525 95 20 0:88 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6 rw,relatime - aufs none rw,si=caafa54fbae9525 96 20 0:89 / /var/lib/docker/aufs/mnt/befc1c67600df449dddbe796c0d06da7caff1d2bbff64cde1f0ba82d224996b5 rw,relatime - aufs none rw,si=caafa54f8dab525 97 20 0:90 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562-init rw,relatime - aufs none rw,si=caafa54fdc02525 98 20 0:91 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562 rw,relatime - aufs none rw,si=caafa54f9eb0525 99 20 0:92 / /var/lib/docker/aufs/mnt/2a31f10029f04ff9d4381167a9b739609853d7220d55a56cb654779a700ee246 rw,relatime - aufs none rw,si=caafa54f8c37525 100 20 0:93 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927-init rw,relatime - aufs none rw,si=caafa54fd173525 101 20 0:94 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927 rw,relatime - aufs none rw,si=caafa54f8108525 102 20 0:95 / /var/lib/docker/aufs/mnt/eaa0f57403a3dc685268f91df3fbcd7a8423cee50e1a9ee5c3e1688d9d676bb4 rw,relatime - aufs none rw,si=caafa54f852d525 103 20 0:96 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b-init rw,relatime - aufs none rw,si=caafa54f8d80525 104 20 0:97 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b rw,relatime - aufs none rw,si=caafa54f8fc3525 105 20 0:98 / /var/lib/docker/aufs/mnt/d1b322ae17613c6adee84e709641a9244ac56675244a89a64dc0075075fcbb83 rw,relatime - aufs none rw,si=caafa54f8c58525 106 20 0:99 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd-init rw,relatime - aufs none rw,si=caafa54f8c63525 107 20 0:100 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd rw,relatime - aufs none rw,si=caafa54f8c67525 108 20 0:101 / /var/lib/docker/aufs/mnt/bc9d2a264158f83a617a069bf17cbbf2a2ba453db7d3951d9dc63cc1558b1c2b rw,relatime - aufs none rw,si=caafa54f8dbe525 109 20 0:102 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99-init rw,relatime - aufs none rw,si=caafa54f9e0d525 110 20 0:103 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99 rw,relatime - aufs none rw,si=caafa54f9e1b525 111 20 0:104 / /var/lib/docker/aufs/mnt/d4dca7b02569c732e740071e1c654d4ad282de5c41edb619af1f0aafa618be26 rw,relatime - aufs none rw,si=caafa54f8dae525 112 20 0:105 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7-init rw,relatime - aufs none rw,si=caafa54f8c5c525 113 20 0:106 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7 rw,relatime - aufs none rw,si=caafa54fd172525 114 20 0:107 / /var/lib/docker/aufs/mnt/e60c57499c0b198a6734f77f660cdbbd950a5b78aa23f470ca4f0cfcc376abef rw,relatime - aufs none rw,si=caafa54909c4525 115 20 0:108 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35-init rw,relatime - aufs none rw,si=caafa54909c3525 116 20 0:109 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35 rw,relatime - aufs none rw,si=caafa54909c7525 117 20 0:110 / /var/lib/docker/aufs/mnt/2997be666d58b9e71469759bcb8bd9608dad0e533a1a7570a896919ba3388825 rw,relatime - aufs none rw,si=caafa54f8557525 118 20 0:111 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93-init rw,relatime - aufs none rw,si=caafa54c6e88525 119 20 0:112 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93 rw,relatime - aufs none rw,si=caafa54c6e8e525 120 20 0:113 / /var/lib/docker/aufs/mnt/a672a1e2f2f051f6e19ed1dfbe80860a2d774174c49f7c476695f5dd1d5b2f67 rw,relatime - aufs none rw,si=caafa54c6e15525 121 20 0:114 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420-init rw,relatime - aufs none rw,si=caafa54f8dad525 122 20 0:115 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420 rw,relatime - aufs none rw,si=caafa54f8d84525 123 20 0:116 / /var/lib/docker/aufs/mnt/2abc86007aca46fb4a817a033e2a05ccacae40b78ea4b03f8ea616b9ada40e2e rw,relatime - aufs none rw,si=caafa54c6e8b525 124 20 0:117 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374-init rw,relatime - aufs none rw,si=caafa54c6e8d525 125 20 0:118 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374 rw,relatime - aufs none rw,si=caafa54f8c34525 126 20 0:119 / /var/lib/docker/aufs/mnt/2f95ca1a629cea8363b829faa727dd52896d5561f2c96ddee4f697ea2fc872c2 rw,relatime - aufs none rw,si=caafa54c6e8a525 127 20 0:120 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2-init rw,relatime - aufs none rw,si=caafa54f8e19525 128 20 0:121 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2 rw,relatime - aufs none rw,si=caafa54fa8c6525 129 20 0:122 / /var/lib/docker/aufs/mnt/c1d04dfdf8cccb3676d5a91e84e9b0781ce40623d127d038bcfbe4c761b27401 rw,relatime - aufs none rw,si=caafa54f8c30525 130 20 0:123 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a-init rw,relatime - aufs none rw,si=caafa54c6e1a525 131 20 0:124 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a rw,relatime - aufs none rw,si=caafa54c6e1c525 132 20 0:125 / /var/lib/docker/aufs/mnt/5ae3b6fccb1539fc02d420e86f3e9637bef5b711fed2ca31a2f426c8f5deddbf rw,relatime - aufs none rw,si=caafa54c4fea525 133 20 0:126 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0-init rw,relatime - aufs none rw,si=caafa54c6e1e525 134 20 0:127 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0 rw,relatime - aufs none rw,si=caafa54fa8c0525 135 20 0:128 / /var/lib/docker/aufs/mnt/f382bd5aaccaf2d04a59089ac7cb12ec87efd769fd0c14d623358fbfd2a3f896 rw,relatime - aufs none rw,si=caafa54c4fec525 136 20 0:129 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735-init rw,relatime - aufs none rw,si=caafa54c4fef525 137 20 0:130 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735 rw,relatime - aufs none rw,si=caafa54c4feb525 138 20 0:131 / /var/lib/docker/aufs/mnt/a9c5ee0854dc083b6bf62b7eb1e5291aefbb10702289a446471ce73aba0d5d7d rw,relatime - aufs none rw,si=caafa54909c6525 139 20 0:134 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0-init rw,relatime - aufs none rw,si=caafa54804fe525 140 20 0:135 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0 rw,relatime - aufs none rw,si=caafa54804fa525 141 20 0:136 / /var/lib/docker/aufs/mnt/7ec3277e5c04c907051caf9c9c35889f5fcd6463e5485971b25404566830bb70 rw,relatime - aufs none rw,si=caafa54804f9525 142 20 0:139 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8-init rw,relatime - aufs none rw,si=caafa54c6ef6525 143 20 0:140 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8 rw,relatime - aufs none rw,si=caafa54c6ef5525 144 20 0:356 / /var/lib/docker/aufs/mnt/e6ecde9e2c18cd3c75f424c67b6d89685cfee0fc67abf2cb6bdc0867eb998026 rw,relatime - aufs none rw,si=caafa548068e525` gentooMountinfo = `15 1 8:6 / / rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered 16 15 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw 17 15 0:14 / /run rw,nosuid,nodev,relatime - tmpfs tmpfs rw,size=3292172k,mode=755 18 15 0:5 / /dev rw,nosuid,relatime - devtmpfs udev rw,size=10240k,nr_inodes=4106451,mode=755 19 18 0:12 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw 20 18 0:10 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 21 18 0:15 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw 22 15 0:16 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw 23 22 0:7 / /sys/kernel/debug rw,nosuid,nodev,noexec,relatime - debugfs debugfs rw 24 22 0:17 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs cgroup_root rw,size=10240k,mode=755 25 24 0:18 / /sys/fs/cgroup/openrc rw,nosuid,nodev,noexec,relatime - cgroup openrc rw,release_agent=/lib64/rc/sh/cgroup-release-agent.sh,name=openrc 26 24 0:19 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cpuset rw,cpuset,clone_children 27 24 0:20 / /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cpu rw,cpu,clone_children 28 24 0:21 / /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cpuacct rw,cpuacct,clone_children 29 24 0:22 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup memory rw,memory,clone_children 30 24 0:23 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup devices rw,devices,clone_children 31 24 0:24 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup freezer rw,freezer,clone_children 32 24 0:25 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup blkio rw,blkio,clone_children 33 15 8:1 / /boot rw,noatime,nodiratime - vfat /dev/sda1 rw,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro 34 15 8:18 / /mnt/xfs rw,noatime,nodiratime - xfs /dev/sdb2 rw,attr2,inode64,noquota 35 15 0:26 / /tmp rw,relatime - tmpfs tmpfs rw 36 16 0:27 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw 42 15 0:33 / /var/lib/nfs/rpc_pipefs rw,relatime - rpc_pipefs rpc_pipefs rw 43 16 0:34 / /proc/fs/nfsd rw,nosuid,nodev,noexec,relatime - nfsd nfsd rw 44 15 0:35 / /home/tianon/.gvfs rw,nosuid,nodev,relatime - fuse.gvfs-fuse-daemon gvfs-fuse-daemon rw,user_id=1000,group_id=1000 68 15 0:3336 / /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd rw,relatime - aufs none rw,si=9b4a7640128db39c 85 68 8:6 /var/lib/docker/init/dockerinit-0.7.2-dev//deleted /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerinit rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered 86 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/config.env /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerenv rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered 87 68 8:6 /etc/resolv.conf /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/resolv.conf rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered 88 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hostname /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hostname rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered 89 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hosts /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hosts rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered 38 15 0:3384 / /var/lib/docker/aufs/mnt/0292005a9292401bb5197657f2b682d97d8edcb3b72b5e390d2a680139985b55 rw,relatime - aufs none rw,si=9b4a7642b584939c 39 15 0:3385 / /var/lib/docker/aufs/mnt/59db98c889de5f71b70cfb82c40cbe47b64332f0f56042a2987a9e5df6e5e3aa rw,relatime - aufs none rw,si=9b4a7642b584e39c 40 15 0:3386 / /var/lib/docker/aufs/mnt/0545f0f2b6548eb9601d08f35a08f5a0a385407d36027a28f58e06e9f61e0278 rw,relatime - aufs none rw,si=9b4a7642b584b39c 41 15 0:3387 / /var/lib/docker/aufs/mnt/d882cfa16d1aa8fe0331a36e79be3d80b151e49f24fc39a39c3fed1735d5feb5 rw,relatime - aufs none rw,si=9b4a76453040039c 45 15 0:3388 / /var/lib/docker/aufs/mnt/055ca3befcb1626e74f5344b3398724ff05c0de0e20021683d04305c9e70a3f6 rw,relatime - aufs none rw,si=9b4a76453040739c 46 15 0:3389 / /var/lib/docker/aufs/mnt/b899e4567a351745d4285e7f1c18fdece75d877deb3041981cd290be348b7aa6 rw,relatime - aufs none rw,si=9b4a7647def4039c 47 15 0:3390 / /var/lib/docker/aufs/mnt/067ca040292c58954c5129f953219accfae0d40faca26b4d05e76ca76a998f16 rw,relatime - aufs none rw,si=9b4a7647def4239c 48 15 0:3391 / /var/lib/docker/aufs/mnt/8c995e7cb6e5082742daeea720e340b021d288d25d92e0412c03d200df308a11 rw,relatime - aufs none rw,si=9b4a764479c1639c 49 15 0:3392 / /var/lib/docker/aufs/mnt/07cc54dfae5b45300efdacdd53cc72c01b9044956a86ce7bff42d087e426096d rw,relatime - aufs none rw,si=9b4a764479c1739c 50 15 0:3393 / /var/lib/docker/aufs/mnt/0a9c95cf4c589c05b06baa79150b0cc1d8e7102759fe3ce4afaabb8247ca4f85 rw,relatime - aufs none rw,si=9b4a7644059c839c 51 15 0:3394 / /var/lib/docker/aufs/mnt/468fa98cececcf4e226e8370f18f4f848d63faf287fb8321a07f73086441a3a0 rw,relatime - aufs none rw,si=9b4a7644059ca39c 52 15 0:3395 / /var/lib/docker/aufs/mnt/0b826192231c5ce066fffb5beff4397337b5fc19a377aa7c6282c7c0ce7f111f rw,relatime - aufs none rw,si=9b4a764479c1339c 53 15 0:3396 / /var/lib/docker/aufs/mnt/93b8ba1b772fbe79709b909c43ea4b2c30d712e53548f467db1ffdc7a384f196 rw,relatime - aufs none rw,si=9b4a7640798a739c 54 15 0:3397 / /var/lib/docker/aufs/mnt/0c0d0acfb506859b12ef18cdfef9ebed0b43a611482403564224bde9149d373c rw,relatime - aufs none rw,si=9b4a7640798a039c 55 15 0:3398 / /var/lib/docker/aufs/mnt/33648c39ab6c7c74af0243d6d6a81b052e9e25ad1e04b19892eb2dde013e358b rw,relatime - aufs none rw,si=9b4a7644b439b39c 56 15 0:3399 / /var/lib/docker/aufs/mnt/0c12bea97a1c958a3c739fb148536c1c89351d48e885ecda8f0499b5cc44407e rw,relatime - aufs none rw,si=9b4a7640798a239c 57 15 0:3400 / /var/lib/docker/aufs/mnt/ed443988ce125f172d7512e84a4de2627405990fd767a16adefa8ce700c19ce8 rw,relatime - aufs none rw,si=9b4a7644c8ed339c 59 15 0:3402 / /var/lib/docker/aufs/mnt/f61612c324ff3c924d3f7a82fb00a0f8d8f73c248c41897061949e9f5ab7e3b1 rw,relatime - aufs none rw,si=9b4a76442810c39c 60 15 0:3403 / /var/lib/docker/aufs/mnt/0f1ee55c6c4e25027b80de8e64b8b6fb542b3b41aa0caab9261da75752e22bfd rw,relatime - aufs none rw,si=9b4a76442810e39c 61 15 0:3404 / /var/lib/docker/aufs/mnt/956f6cc4af5785cb3ee6963dcbca668219437d9b28f513290b1453ac64a34f97 rw,relatime - aufs none rw,si=9b4a7644303ec39c 62 15 0:3405 / /var/lib/docker/aufs/mnt/1099769158c4b4773e2569e38024e8717e400f87a002c41d8cf47cb81b051ba6 rw,relatime - aufs none rw,si=9b4a7644303ee39c 63 15 0:3406 / /var/lib/docker/aufs/mnt/11890ceb98d4442595b676085cd7b21550ab85c5df841e0fba997ff54e3d522d rw,relatime - aufs none rw,si=9b4a7644303ed39c 64 15 0:3407 / /var/lib/docker/aufs/mnt/acdb90dc378e8ed2420b43a6d291f1c789a081cd1904018780cc038fcd7aae53 rw,relatime - aufs none rw,si=9b4a76434be2139c 65 15 0:3408 / /var/lib/docker/aufs/mnt/120e716f19d4714fbe63cc1ed246204f2c1106eefebc6537ba2587d7e7711959 rw,relatime - aufs none rw,si=9b4a76434be2339c 66 15 0:3409 / /var/lib/docker/aufs/mnt/b197b7fffb61d89e0ba1c40de9a9fc0d912e778b3c1bd828cf981ff37c1963bc rw,relatime - aufs none rw,si=9b4a76434be2039c 70 15 0:3412 / /var/lib/docker/aufs/mnt/1434b69d2e1bb18a9f0b96b9cdac30132b2688f5d1379f68a39a5e120c2f93eb rw,relatime - aufs none rw,si=9b4a76434be2639c 71 15 0:3413 / /var/lib/docker/aufs/mnt/16006e83caf33ab5eb0cd6afc92ea2ee8edeff897496b0bb3ec3a75b767374b3 rw,relatime - aufs none rw,si=9b4a7644d790439c 72 15 0:3414 / /var/lib/docker/aufs/mnt/55bfa5f44e94d27f91f79ba901b118b15098449165c87abf1b53ffff147ff164 rw,relatime - aufs none rw,si=9b4a7644d790239c 73 15 0:3415 / /var/lib/docker/aufs/mnt/1912b97a07ab21ccd98a2a27bc779bf3cf364a3138afa3c3e6f7f169a3c3eab5 rw,relatime - aufs none rw,si=9b4a76441822739c 76 15 0:3418 / /var/lib/docker/aufs/mnt/1a7c3292e8879bd91ffd9282e954f643b1db5683093574c248ff14a9609f2f56 rw,relatime - aufs none rw,si=9b4a76438cb7239c 77 15 0:3419 / /var/lib/docker/aufs/mnt/bb1faaf0d076ddba82c2318305a85f490dafa4e8a8640a8db8ed657c439120cc rw,relatime - aufs none rw,si=9b4a76438cb7339c 78 15 0:3420 / /var/lib/docker/aufs/mnt/1ab869f21d2241a73ac840c7f988490313f909ac642eba71d092204fec66dd7c rw,relatime - aufs none rw,si=9b4a76438cb7639c 79 15 0:3421 / /var/lib/docker/aufs/mnt/fd7245b2cfe3890fa5f5b452260e4edf9e7fb7746532ed9d83f7a0d7dbaa610e rw,relatime - aufs none rw,si=9b4a7644bdc0139c 80 15 0:3422 / /var/lib/docker/aufs/mnt/1e5686c5301f26b9b3cd24e322c608913465cc6c5d0dcd7c5e498d1314747d61 rw,relatime - aufs none rw,si=9b4a7644bdc0639c 81 15 0:3423 / /var/lib/docker/aufs/mnt/52edf6ee6e40bfec1e9301a4d4a92ab83d144e2ae4ce5099e99df6138cb844bf rw,relatime - aufs none rw,si=9b4a7644bdc0239c 82 15 0:3424 / /var/lib/docker/aufs/mnt/1ea10fb7085d28cda4904657dff0454e52598d28e1d77e4f2965bbc3666e808f rw,relatime - aufs none rw,si=9b4a76438cb7139c 83 15 0:3425 / /var/lib/docker/aufs/mnt/9c03e98c3593946dbd4087f8d83f9ca262f4a2efdc952ce60690838b9ba6c526 rw,relatime - aufs none rw,si=9b4a76443020639c 84 15 0:3426 / /var/lib/docker/aufs/mnt/220a2344d67437602c6d2cee9a98c46be13f82c2a8063919dd2fad52bf2fb7dd rw,relatime - aufs none rw,si=9b4a76434bff339c 94 15 0:3427 / /var/lib/docker/aufs/mnt/3b32876c5b200312c50baa476ff342248e88c8ea96e6a1032cd53a88738a1cf2 rw,relatime - aufs none rw,si=9b4a76434bff139c 95 15 0:3428 / /var/lib/docker/aufs/mnt/23ee2b8b0d4ae8db6f6d1e168e2c6f79f8a18f953b09f65e0d22cc1e67a3a6fa rw,relatime - aufs none rw,si=9b4a7646c305c39c 96 15 0:3429 / /var/lib/docker/aufs/mnt/e86e6daa70b61b57945fa178222615f3c3d6bcef12c9f28e9f8623d44dc2d429 rw,relatime - aufs none rw,si=9b4a7646c305f39c 97 15 0:3430 / /var/lib/docker/aufs/mnt/2413d07623e80860bb2e9e306fbdee699afd07525785c025c591231e864aa162 rw,relatime - aufs none rw,si=9b4a76434bff039c 98 15 0:3431 / /var/lib/docker/aufs/mnt/adfd622eb22340fc80b429e5564b125668e260bf9068096c46dd59f1386a4b7d rw,relatime - aufs none rw,si=9b4a7646a7a1039c 102 15 0:3435 / /var/lib/docker/aufs/mnt/27cd92e7a91d02e2d6b44d16679a00fb6d169b19b88822891084e7fd1a84882d rw,relatime - aufs none rw,si=9b4a7646f25ec39c 103 15 0:3436 / /var/lib/docker/aufs/mnt/27dfdaf94cfbf45055c748293c37dd68d9140240bff4c646cb09216015914a88 rw,relatime - aufs none rw,si=9b4a7646732f939c 104 15 0:3437 / /var/lib/docker/aufs/mnt/5ed7524aff68dfbf0fc601cbaeac01bab14391850a973dabf3653282a627920f rw,relatime - aufs none rw,si=9b4a7646732f839c 105 15 0:3438 / /var/lib/docker/aufs/mnt/2a0d4767e536beb5785b60e071e3ac8e5e812613ab143a9627bee77d0c9ab062 rw,relatime - aufs none rw,si=9b4a7646732fe39c 106 15 0:3439 / /var/lib/docker/aufs/mnt/dea3fc045d9f4ae51ba952450b948a822cf85c39411489ca5224f6d9a8d02bad rw,relatime - aufs none rw,si=9b4a764012ad839c 107 15 0:3440 / /var/lib/docker/aufs/mnt/2d140a787160798da60cb67c21b1210054ad4dafecdcf832f015995b9aa99cfd rw,relatime - aufs none rw,si=9b4a764012add39c 108 15 0:3441 / /var/lib/docker/aufs/mnt/cb190b2a8e984475914430fbad2382e0d20b9b659f8ef83ae8d170cc672e519c rw,relatime - aufs none rw,si=9b4a76454d9c239c 109 15 0:3442 / /var/lib/docker/aufs/mnt/2f4a012d5a7ffd90256a6e9aa479054b3dddbc3c6a343f26dafbf3196890223b rw,relatime - aufs none rw,si=9b4a76454d9c439c 110 15 0:3443 / /var/lib/docker/aufs/mnt/63cc77904b80c4ffbf49cb974c5d8733dc52ad7640d3ae87554b325d7312d87f rw,relatime - aufs none rw,si=9b4a76454d9c339c 111 15 0:3444 / /var/lib/docker/aufs/mnt/30333e872c451482ea2d235ff2192e875bd234006b238ae2bdde3b91a86d7522 rw,relatime - aufs none rw,si=9b4a76422cebf39c 112 15 0:3445 / /var/lib/docker/aufs/mnt/6c54fc1125da3925cae65b5c9a98f3be55b0a2c2666082e5094a4ba71beb5bff rw,relatime - aufs none rw,si=9b4a7646dd5a439c 113 15 0:3446 / /var/lib/docker/aufs/mnt/3087d48cb01cda9d0a83a9ca301e6ea40e8593d18c4921be4794c91a420ab9a3 rw,relatime - aufs none rw,si=9b4a7646dd5a739c 114 15 0:3447 / /var/lib/docker/aufs/mnt/cc2607462a8f55b179a749b144c3fdbb50678e1a4f3065ea04e283e9b1f1d8e2 rw,relatime - aufs none rw,si=9b4a7646dd5a239c 117 15 0:3450 / /var/lib/docker/aufs/mnt/310c5e8392b29e8658a22e08d96d63936633b7e2c38e8d220047928b00a03d24 rw,relatime - aufs none rw,si=9b4a7647932d739c 118 15 0:3451 / /var/lib/docker/aufs/mnt/38a1f0029406ba9c3b6058f2f406d8a1d23c855046cf355c91d87d446fcc1460 rw,relatime - aufs none rw,si=9b4a76445abc939c 119 15 0:3452 / /var/lib/docker/aufs/mnt/42e109ab7914ae997a11ccd860fd18e4d488c50c044c3240423ce15774b8b62e rw,relatime - aufs none rw,si=9b4a76445abca39c 120 15 0:3453 / /var/lib/docker/aufs/mnt/365d832af0402d052b389c1e9c0d353b48487533d20cd4351df8e24ec4e4f9d8 rw,relatime - aufs none rw,si=9b4a7644066aa39c 121 15 0:3454 / /var/lib/docker/aufs/mnt/d3fa8a24d695b6cda9b64f96188f701963d28bef0473343f8b212df1a2cf1d2b rw,relatime - aufs none rw,si=9b4a7644066af39c 122 15 0:3455 / /var/lib/docker/aufs/mnt/37d4f491919abc49a15d0c7a7cc8383f087573525d7d288accd14f0b4af9eae0 rw,relatime - aufs none rw,si=9b4a7644066ad39c 123 15 0:3456 / /var/lib/docker/aufs/mnt/93902707fe12cbdd0068ce73f2baad4b3a299189b1b19cb5f8a2025e106ae3f5 rw,relatime - aufs none rw,si=9b4a76444445f39c 126 15 0:3459 / /var/lib/docker/aufs/mnt/3b49291670a625b9bbb329ffba99bf7fa7abff80cefef040f8b89e2b3aad4f9f rw,relatime - aufs none rw,si=9b4a7640798a339c 127 15 0:3460 / /var/lib/docker/aufs/mnt/8d9c7b943cc8f854f4d0d4ec19f7c16c13b0cc4f67a41472a072648610cecb59 rw,relatime - aufs none rw,si=9b4a76427383039c 128 15 0:3461 / /var/lib/docker/aufs/mnt/3b6c90036526c376307df71d49c9f5fce334c01b926faa6a78186842de74beac rw,relatime - aufs none rw,si=9b4a7644badd439c 130 15 0:3463 / /var/lib/docker/aufs/mnt/7b24158eeddfb5d31b7e932e406ea4899fd728344335ff8e0765e89ddeb351dd rw,relatime - aufs none rw,si=9b4a7644badd539c 131 15 0:3464 / /var/lib/docker/aufs/mnt/3ead6dd5773765c74850cf6c769f21fe65c29d622ffa712664f9f5b80364ce27 rw,relatime - aufs none rw,si=9b4a7642f469939c 132 15 0:3465 / /var/lib/docker/aufs/mnt/3f825573b29547744a37b65597a9d6d15a8350be4429b7038d126a4c9a8e178f rw,relatime - aufs none rw,si=9b4a7642f469c39c 133 15 0:3466 / /var/lib/docker/aufs/mnt/f67aaaeb3681e5dcb99a41f847087370bd1c206680cb8c7b6a9819fd6c97a331 rw,relatime - aufs none rw,si=9b4a7647cc25939c 134 15 0:3467 / /var/lib/docker/aufs/mnt/41afe6cfb3c1fc2280b869db07699da88552786e28793f0bc048a265c01bd942 rw,relatime - aufs none rw,si=9b4a7647cc25c39c 135 15 0:3468 / /var/lib/docker/aufs/mnt/b8092ea59da34a40b120e8718c3ae9fa8436996edc4fc50e4b99c72dfd81e1af rw,relatime - aufs none rw,si=9b4a76445abc439c 136 15 0:3469 / /var/lib/docker/aufs/mnt/42c69d2cc179e2684458bb8596a9da6dad182c08eae9b74d5f0e615b399f75a5 rw,relatime - aufs none rw,si=9b4a76455ddbe39c 137 15 0:3470 / /var/lib/docker/aufs/mnt/ea0871954acd2d62a211ac60e05969622044d4c74597870c4f818fbb0c56b09b rw,relatime - aufs none rw,si=9b4a76455ddbf39c 138 15 0:3471 / /var/lib/docker/aufs/mnt/4307906b275ab3fc971786b3841ae3217ac85b6756ddeb7ad4ba09cd044c2597 rw,relatime - aufs none rw,si=9b4a76455ddb839c 139 15 0:3472 / /var/lib/docker/aufs/mnt/4390b872928c53500a5035634f3421622ed6299dc1472b631fc45de9f56dc180 rw,relatime - aufs none rw,si=9b4a76402f2fd39c 140 15 0:3473 / /var/lib/docker/aufs/mnt/6bb41e78863b85e4aa7da89455314855c8c3bda64e52a583bab15dc1fa2e80c2 rw,relatime - aufs none rw,si=9b4a76402f2fa39c 141 15 0:3474 / /var/lib/docker/aufs/mnt/4444f583c2a79c66608f4673a32c9c812154f027045fbd558c2d69920c53f835 rw,relatime - aufs none rw,si=9b4a764479dbd39c 142 15 0:3475 / /var/lib/docker/aufs/mnt/6f11883af4a05ea362e0c54df89058da4859f977efd07b6f539e1f55c1d2a668 rw,relatime - aufs none rw,si=9b4a76402f30b39c 143 15 0:3476 / /var/lib/docker/aufs/mnt/453490dd32e7c2e9ef906f995d8fb3c2753923d1a5e0ba3fd3296e2e4dc238e7 rw,relatime - aufs none rw,si=9b4a76402f30c39c 144 15 0:3477 / /var/lib/docker/aufs/mnt/45e5945735ee102b5e891c91650c57ec4b52bb53017d68f02d50ea8a6e230610 rw,relatime - aufs none rw,si=9b4a76423260739c 147 15 0:3480 / /var/lib/docker/aufs/mnt/4727a64a5553a1125f315b96bed10d3073d6988225a292cce732617c925b56ab rw,relatime - aufs none rw,si=9b4a76443030339c 150 15 0:3483 / /var/lib/docker/aufs/mnt/4e348b5187b9a567059306afc72d42e0ec5c893b0d4abd547526d5f9b6fb4590 rw,relatime - aufs none rw,si=9b4a7644f5d8c39c 151 15 0:3484 / /var/lib/docker/aufs/mnt/4efc616bfbc3f906718b052da22e4335f8e9f91ee9b15866ed3a8029645189ef rw,relatime - aufs none rw,si=9b4a7644f5d8939c 152 15 0:3485 / /var/lib/docker/aufs/mnt/83e730ae9754d5adb853b64735472d98dfa17136b8812ac9cfcd1eba7f4e7d2d rw,relatime - aufs none rw,si=9b4a76469aa7139c 153 15 0:3486 / /var/lib/docker/aufs/mnt/4fc5ba8a5b333be2b7eefacccb626772eeec0ae8a6975112b56c9fb36c0d342f rw,relatime - aufs none rw,si=9b4a7640128dc39c 154 15 0:3487 / /var/lib/docker/aufs/mnt/50200d5edff5dfe8d1ef3c78b0bbd709793ac6e936aa16d74ff66f7ea577b6f9 rw,relatime - aufs none rw,si=9b4a7640128da39c 155 15 0:3488 / /var/lib/docker/aufs/mnt/51e5e51604361448f0b9777f38329f414bc5ba9cf238f26d465ff479bd574b61 rw,relatime - aufs none rw,si=9b4a76444f68939c 156 15 0:3489 / /var/lib/docker/aufs/mnt/52a142149aa98bba83df8766bbb1c629a97b9799944ead90dd206c4bdf0b8385 rw,relatime - aufs none rw,si=9b4a76444f68b39c 157 15 0:3490 / /var/lib/docker/aufs/mnt/52dd21a94a00f58a1ed489312fcfffb91578089c76c5650364476f1d5de031bc rw,relatime - aufs none rw,si=9b4a76444f68f39c 158 15 0:3491 / /var/lib/docker/aufs/mnt/ee562415ddaad353ed22c88d0ca768a0c74bfba6333b6e25c46849ee22d990da rw,relatime - aufs none rw,si=9b4a7640128d839c 159 15 0:3492 / /var/lib/docker/aufs/mnt/db47a9e87173f7554f550c8a01891de79cf12acdd32e01f95c1a527a08bdfb2c rw,relatime - aufs none rw,si=9b4a764405a1d39c 160 15 0:3493 / /var/lib/docker/aufs/mnt/55e827bf6d44d930ec0b827c98356eb8b68c3301e2d60d1429aa72e05b4c17df rw,relatime - aufs none rw,si=9b4a764405a1a39c 162 15 0:3495 / /var/lib/docker/aufs/mnt/578dc4e0a87fc37ec081ca098430499a59639c09f6f12a8f48de29828a091aa6 rw,relatime - aufs none rw,si=9b4a76406d7d439c 163 15 0:3496 / /var/lib/docker/aufs/mnt/728cc1cb04fa4bc6f7bf7a90980beda6d8fc0beb71630874c0747b994efb0798 rw,relatime - aufs none rw,si=9b4a76444f20e39c 164 15 0:3497 / /var/lib/docker/aufs/mnt/5850cc4bd9b55aea46c7ad598f1785117607974084ea643580f58ce3222e683a rw,relatime - aufs none rw,si=9b4a7644a824239c 165 15 0:3498 / /var/lib/docker/aufs/mnt/89443b3f766d5a37bc8b84e29da8b84e6a3ea8486d3cf154e2aae1816516e4a8 rw,relatime - aufs none rw,si=9b4a7644a824139c 166 15 0:3499 / /var/lib/docker/aufs/mnt/f5ae8fd5a41a337907d16515bc3162525154b59c32314c695ecd092c3b47943d rw,relatime - aufs none rw,si=9b4a7644a824439c 167 15 0:3500 / /var/lib/docker/aufs/mnt/5a430854f2a03a9e5f7cbc9f3fb46a8ebca526a5b3f435236d8295e5998798f5 rw,relatime - aufs none rw,si=9b4a7647fc82439c 168 15 0:3501 / /var/lib/docker/aufs/mnt/eda16901ae4cead35070c39845cbf1e10bd6b8cb0ffa7879ae2d8a186e460f91 rw,relatime - aufs none rw,si=9b4a76441e0df39c 169 15 0:3502 / /var/lib/docker/aufs/mnt/5a593721430c2a51b119ff86a7e06ea2b37e3b4131f8f1344d402b61b0c8d868 rw,relatime - aufs none rw,si=9b4a764248bad39c 170 15 0:3503 / /var/lib/docker/aufs/mnt/d662ad0a30fbfa902e0962108685b9330597e1ee2abb16dc9462eb5a67fdd23f rw,relatime - aufs none rw,si=9b4a764248bae39c 171 15 0:3504 / /var/lib/docker/aufs/mnt/5bc9de5c79812843fb36eee96bef1ddba812407861f572e33242f4ee10da2c15 rw,relatime - aufs none rw,si=9b4a764248ba839c 172 15 0:3505 / /var/lib/docker/aufs/mnt/5e763de8e9b0f7d58d2e12a341e029ab4efb3b99788b175090d8209e971156c1 rw,relatime - aufs none rw,si=9b4a764248baa39c 173 15 0:3506 / /var/lib/docker/aufs/mnt/b4431dc2739936f1df6387e337f5a0c99cf051900c896bd7fd46a870ce61c873 rw,relatime - aufs none rw,si=9b4a76401263539c 174 15 0:3507 / /var/lib/docker/aufs/mnt/5f37830e5a02561ab8c67ea3113137ba69f67a60e41c05cb0e7a0edaa1925b24 rw,relatime - aufs none rw,si=9b4a76401263639c 184 15 0:3508 / /var/lib/docker/aufs/mnt/62ea10b957e6533538a4633a1e1d678502f50ddcdd354b2ca275c54dd7a7793a rw,relatime - aufs none rw,si=9b4a76401263039c 187 15 0:3509 / /var/lib/docker/aufs/mnt/d56ee9d44195fe390e042fda75ec15af5132adb6d5c69468fa8792f4e54a6953 rw,relatime - aufs none rw,si=9b4a76401263239c 188 15 0:3510 / /var/lib/docker/aufs/mnt/6a300930673174549c2b62f36c933f0332a20735978c007c805a301f897146c5 rw,relatime - aufs none rw,si=9b4a76455d4c539c 189 15 0:3511 / /var/lib/docker/aufs/mnt/64496c45c84d348c24d410015456d101601c30cab4d1998c395591caf7e57a70 rw,relatime - aufs none rw,si=9b4a76455d4c639c 190 15 0:3512 / /var/lib/docker/aufs/mnt/65a6a645883fe97a7422cd5e71ebe0bc17c8e6302a5361edf52e89747387e908 rw,relatime - aufs none rw,si=9b4a76455d4c039c 191 15 0:3513 / /var/lib/docker/aufs/mnt/672be40695f7b6e13b0a3ed9fc996c73727dede3481f58155950fcfad57ed616 rw,relatime - aufs none rw,si=9b4a76455d4c239c 192 15 0:3514 / /var/lib/docker/aufs/mnt/d42438acb2bfb2169e1c0d8e917fc824f7c85d336dadb0b0af36dfe0f001b3ba rw,relatime - aufs none rw,si=9b4a7642bfded39c 193 15 0:3515 / /var/lib/docker/aufs/mnt/b48a54abf26d01cb2ddd908b1ed6034d17397c1341bf0eb2b251a3e5b79be854 rw,relatime - aufs none rw,si=9b4a7642bfdee39c 194 15 0:3516 / /var/lib/docker/aufs/mnt/76f27134491f052bfb87f59092126e53ef875d6851990e59195a9da16a9412f8 rw,relatime - aufs none rw,si=9b4a7642bfde839c 195 15 0:3517 / /var/lib/docker/aufs/mnt/6bd626a5462b4f8a8e1cc7d10351326dca97a59b2758e5ea549a4f6350ce8a90 rw,relatime - aufs none rw,si=9b4a7642bfdea39c 196 15 0:3518 / /var/lib/docker/aufs/mnt/f1fe3549dbd6f5ca615e9139d9b53f0c83a3b825565df37628eacc13e70cbd6d rw,relatime - aufs none rw,si=9b4a7642bfdf539c 197 15 0:3519 / /var/lib/docker/aufs/mnt/6d0458c8426a9e93d58d0625737e6122e725c9408488ed9e3e649a9984e15c34 rw,relatime - aufs none rw,si=9b4a7642bfdf639c 198 15 0:3520 / /var/lib/docker/aufs/mnt/6e4c97db83aa82145c9cf2bafc20d500c0b5389643b689e3ae84188c270a48c5 rw,relatime - aufs none rw,si=9b4a7642bfdf039c 199 15 0:3521 / /var/lib/docker/aufs/mnt/eb94d6498f2c5969eaa9fa11ac2934f1ab90ef88e2d002258dca08e5ba74ea27 rw,relatime - aufs none rw,si=9b4a7642bfdf239c 200 15 0:3522 / /var/lib/docker/aufs/mnt/fe3f88f0c511608a2eec5f13a98703aa16e55dbf930309723d8a37101f539fe1 rw,relatime - aufs none rw,si=9b4a7642bfc3539c 201 15 0:3523 / /var/lib/docker/aufs/mnt/6f40c229fb9cad85fabf4b64a2640a5403ec03fe5ac1a57d0609fb8b606b9c83 rw,relatime - aufs none rw,si=9b4a7642bfc3639c 202 15 0:3524 / /var/lib/docker/aufs/mnt/7513e9131f7a8acf58ff15248237feb767c78732ca46e159f4d791e6ef031dbc rw,relatime - aufs none rw,si=9b4a7642bfc3039c 203 15 0:3525 / /var/lib/docker/aufs/mnt/79f48b00aa713cdf809c6bb7c7cb911b66e9a8076c81d6c9d2504139984ea2da rw,relatime - aufs none rw,si=9b4a7642bfc3239c 204 15 0:3526 / /var/lib/docker/aufs/mnt/c3680418350d11358f0a96c676bc5aa74fa00a7c89e629ef5909d3557b060300 rw,relatime - aufs none rw,si=9b4a7642f47cd39c 205 15 0:3527 / /var/lib/docker/aufs/mnt/7a1744dd350d7fcc0cccb6f1757ca4cbe5453f203a5888b0f1014d96ad5a5ef9 rw,relatime - aufs none rw,si=9b4a7642f47ce39c 206 15 0:3528 / /var/lib/docker/aufs/mnt/7fa99662db046be9f03c33c35251afda9ccdc0085636bbba1d90592cec3ff68d rw,relatime - aufs none rw,si=9b4a7642f47c839c 207 15 0:3529 / /var/lib/docker/aufs/mnt/f815021ef20da9c9b056bd1d52d8aaf6e2c0c19f11122fc793eb2b04eb995e35 rw,relatime - aufs none rw,si=9b4a7642f47ca39c 208 15 0:3530 / /var/lib/docker/aufs/mnt/801086ae3110192d601dfcebdba2db92e86ce6b6a9dba6678ea04488e4513669 rw,relatime - aufs none rw,si=9b4a7642dc6dd39c 209 15 0:3531 / /var/lib/docker/aufs/mnt/822ba7db69f21daddda87c01cfbfbf73013fc03a879daf96d16cdde6f9b1fbd6 rw,relatime - aufs none rw,si=9b4a7642dc6de39c 210 15 0:3532 / /var/lib/docker/aufs/mnt/834227c1a950fef8cae3827489129d0dd220541e60c6b731caaa765bf2e6a199 rw,relatime - aufs none rw,si=9b4a7642dc6d839c 211 15 0:3533 / /var/lib/docker/aufs/mnt/83dccbc385299bd1c7cf19326e791b33a544eea7b4cdfb6db70ea94eed4389fb rw,relatime - aufs none rw,si=9b4a7642dc6da39c 212 15 0:3534 / /var/lib/docker/aufs/mnt/f1b8e6f0e7c8928b5dcdab944db89306ebcae3e0b32f9ff40d2daa8329f21600 rw,relatime - aufs none rw,si=9b4a7645a126039c 213 15 0:3535 / /var/lib/docker/aufs/mnt/970efb262c7a020c2404cbcc5b3259efba0d110a786079faeef05bc2952abf3a rw,relatime - aufs none rw,si=9b4a7644c8ed139c 214 15 0:3536 / /var/lib/docker/aufs/mnt/84b6d73af7450f3117a77e15a5ca1255871fea6182cd8e8a7be6bc744be18c2c rw,relatime - aufs none rw,si=9b4a76406559139c 215 15 0:3537 / /var/lib/docker/aufs/mnt/88be2716e026bc681b5e63fe7942068773efbd0b6e901ca7ba441412006a96b6 rw,relatime - aufs none rw,si=9b4a76406559339c 216 15 0:3538 / /var/lib/docker/aufs/mnt/c81939aa166ce50cd8bca5cfbbcc420a78e0318dd5cd7c755209b9166a00a752 rw,relatime - aufs none rw,si=9b4a76406559239c 217 15 0:3539 / /var/lib/docker/aufs/mnt/e0f241645d64b7dc5ff6a8414087cca226be08fb54ce987d1d1f6350c57083aa rw,relatime - aufs none rw,si=9b4a7647cfc0f39c 218 15 0:3540 / /var/lib/docker/aufs/mnt/e10e2bf75234ed51d8a6a4bb39e465404fecbe318e54400d3879cdb2b0679c78 rw,relatime - aufs none rw,si=9b4a7647cfc0939c 219 15 0:3541 / /var/lib/docker/aufs/mnt/8f71d74c8cfc3228b82564aa9f09b2e576cff0083ddfb6aa5cb350346063f080 rw,relatime - aufs none rw,si=9b4a7647cfc0a39c 220 15 0:3542 / /var/lib/docker/aufs/mnt/9159f1eba2aef7f5205cc18d015cda7f5933cd29bba3b1b8aed5ccb5824c69ee rw,relatime - aufs none rw,si=9b4a76468cedd39c 221 15 0:3543 / /var/lib/docker/aufs/mnt/932cad71e652e048e500d9fbb5b8ea4fc9a269d42a3134ce527ceef42a2be56b rw,relatime - aufs none rw,si=9b4a76468cede39c 222 15 0:3544 / /var/lib/docker/aufs/mnt/bf1e1b5f529e8943cc0144ee86dbaaa37885c1ddffcef29537e0078ee7dd316a rw,relatime - aufs none rw,si=9b4a76468ced839c 223 15 0:3545 / /var/lib/docker/aufs/mnt/949d93ecf3322e09f858ce81d5f4b434068ec44ff84c375de03104f7b45ee955 rw,relatime - aufs none rw,si=9b4a76468ceda39c 224 15 0:3546 / /var/lib/docker/aufs/mnt/d65c6087f92dc2a3841b5251d2fe9ca07d4c6e5b021597692479740816e4e2a1 rw,relatime - aufs none rw,si=9b4a7645a126239c 225 15 0:3547 / /var/lib/docker/aufs/mnt/98a0153119d0651c193d053d254f6e16a68345a141baa80c87ae487e9d33f290 rw,relatime - aufs none rw,si=9b4a7640787cf39c 226 15 0:3548 / /var/lib/docker/aufs/mnt/99daf7fe5847c017392f6e59aa9706b3dfdd9e6d1ba11dae0f7fffde0a60b5e5 rw,relatime - aufs none rw,si=9b4a7640787c839c 227 15 0:3549 / /var/lib/docker/aufs/mnt/9ad1f2fe8a5599d4e10c5a6effa7f03d932d4e92ee13149031a372087a359079 rw,relatime - aufs none rw,si=9b4a7640787ca39c 228 15 0:3550 / /var/lib/docker/aufs/mnt/c26d64494da782ddac26f8370d86ac93e7c1666d88a7b99110fc86b35ea6a85d rw,relatime - aufs none rw,si=9b4a7642fc6b539c 229 15 0:3551 / /var/lib/docker/aufs/mnt/a49e4a8275133c230ec640997f35f172312eb0ea5bd2bbe10abf34aae98f30eb rw,relatime - aufs none rw,si=9b4a7642fc6b639c 230 15 0:3552 / /var/lib/docker/aufs/mnt/b5e2740c867ed843025f49d84e8d769de9e8e6039b3c8cb0735b5bf358994bc7 rw,relatime - aufs none rw,si=9b4a7642fc6b039c 231 15 0:3553 / /var/lib/docker/aufs/mnt/a826fdcf3a7039b30570054579b65763db605a314275d7aef31b872c13311b4b rw,relatime - aufs none rw,si=9b4a7642fc6b239c 232 15 0:3554 / /var/lib/docker/aufs/mnt/addf3025babf5e43b5a3f4a0da7ad863dda3c01fb8365c58fd8d28bb61dc11bc rw,relatime - aufs none rw,si=9b4a76407871d39c 233 15 0:3555 / /var/lib/docker/aufs/mnt/c5b6c6813ab3e5ebdc6d22cb2a3d3106a62095f2c298be52b07a3b0fa20ff690 rw,relatime - aufs none rw,si=9b4a76407871e39c 234 15 0:3556 / /var/lib/docker/aufs/mnt/af0609eaaf64e2392060cb46f5a9f3d681a219bb4c651d4f015bf573fbe6c4cf rw,relatime - aufs none rw,si=9b4a76407871839c 235 15 0:3557 / /var/lib/docker/aufs/mnt/e7f20e3c37ecad39cd90a97cd3549466d0d106ce4f0a930b8495442634fa4a1f rw,relatime - aufs none rw,si=9b4a76407871a39c 237 15 0:3559 / /var/lib/docker/aufs/mnt/b57a53d440ffd0c1295804fa68cdde35d2fed5409484627e71b9c37e4249fd5c rw,relatime - aufs none rw,si=9b4a76444445a39c 238 15 0:3560 / /var/lib/docker/aufs/mnt/b5e7d7b8f35e47efbba3d80c5d722f5e7bd43e54c824e54b4a4b351714d36d42 rw,relatime - aufs none rw,si=9b4a7647932d439c 239 15 0:3561 / /var/lib/docker/aufs/mnt/f1b136def157e9465640658f277f3347de593c6ae76412a2e79f7002f091cae2 rw,relatime - aufs none rw,si=9b4a76445abcd39c 240 15 0:3562 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=9b4a7644403b339c 241 15 0:3563 / /var/lib/docker/aufs/mnt/b89b140cdbc95063761864e0a23346207fa27ee4c5c63a1ae85c9069a9d9cf1d rw,relatime - aufs none rw,si=9b4a7644aa19739c 242 15 0:3564 / /var/lib/docker/aufs/mnt/bc6a69ed51c07f5228f6b4f161c892e6a949c0e7e86a9c3432049d4c0e5cd298 rw,relatime - aufs none rw,si=9b4a7644aa19139c 243 15 0:3565 / /var/lib/docker/aufs/mnt/be4e2ba3f136933e239f7cf3d136f484fb9004f1fbdfee24a62a2c7b0ab30670 rw,relatime - aufs none rw,si=9b4a7644aa19339c 244 15 0:3566 / /var/lib/docker/aufs/mnt/e04ca1a4a5171e30d20f0c92f90a50b8b6f8600af5459c4b4fb25e42e864dfe1 rw,relatime - aufs none rw,si=9b4a7647932d139c 245 15 0:3567 / /var/lib/docker/aufs/mnt/be61576b31db893129aaffcd3dcb5ce35e49c4b71b30c392a78609a45c7323d8 rw,relatime - aufs none rw,si=9b4a7642d85f739c 246 15 0:3568 / /var/lib/docker/aufs/mnt/dda42c191e56becf672327658ab84fcb563322db3764b91c2fefe4aaef04c624 rw,relatime - aufs none rw,si=9b4a7642d85f139c 247 15 0:3569 / /var/lib/docker/aufs/mnt/c0a7995053330f3d88969247a2e72b07e2dd692133f5668a4a35ea3905561072 rw,relatime - aufs none rw,si=9b4a7642d85f339c 249 15 0:3571 / /var/lib/docker/aufs/mnt/c3594b2e5f08c59ff5ed338a1ba1eceeeb1f7fc5d180068338110c00b1eb8502 rw,relatime - aufs none rw,si=9b4a7642738c739c 250 15 0:3572 / /var/lib/docker/aufs/mnt/c58dce03a0ab0a7588393880379dc3bce9f96ec08ed3f99cf1555260ff0031e8 rw,relatime - aufs none rw,si=9b4a7642738c139c 251 15 0:3573 / /var/lib/docker/aufs/mnt/c73e9f1d109c9d14cb36e1c7489df85649be3911116d76c2fd3648ec8fd94e23 rw,relatime - aufs none rw,si=9b4a7642738c339c 252 15 0:3574 / /var/lib/docker/aufs/mnt/c9eef28c344877cd68aa09e543c0710ab2b305a0ff96dbb859bfa7808c3e8d01 rw,relatime - aufs none rw,si=9b4a7642d85f439c 253 15 0:3575 / /var/lib/docker/aufs/mnt/feb67148f548d70cb7484f2aaad2a86051cd6867a561741a2f13b552457d666e rw,relatime - aufs none rw,si=9b4a76468c55739c 254 15 0:3576 / /var/lib/docker/aufs/mnt/cdf1f96c36d35a96041a896bf398ec0f7dc3b0fb0643612a0f4b6ff96e04e1bb rw,relatime - aufs none rw,si=9b4a76468c55139c 255 15 0:3577 / /var/lib/docker/aufs/mnt/ec6e505872353268451ac4bc034c1df00f3bae4a3ea2261c6e48f7bd5417c1b3 rw,relatime - aufs none rw,si=9b4a76468c55339c 256 15 0:3578 / /var/lib/docker/aufs/mnt/d6dc8aca64efd90e0bc10274001882d0efb310d42ccbf5712b99b169053b8b1a rw,relatime - aufs none rw,si=9b4a7642738c439c 257 15 0:3579 / /var/lib/docker/aufs/mnt/d712594e2ff6eaeb895bfd150d694bd1305fb927e7a186b2dab7df2ea95f8f81 rw,relatime - aufs none rw,si=9b4a76401268f39c 259 15 0:3581 / /var/lib/docker/aufs/mnt/dbfa1174cd78cde2d7410eae442af0b416c4a0e6f87ed4ff1e9f169a0029abc0 rw,relatime - aufs none rw,si=9b4a76401268b39c 260 15 0:3582 / /var/lib/docker/aufs/mnt/e883f5a82316d7856fbe93ee8c0af5a920b7079619dd95c4ffd88bbd309d28dd rw,relatime - aufs none rw,si=9b4a76468c55439c 261 15 0:3583 / /var/lib/docker/aufs/mnt/fdec3eff581c4fc2b09f87befa2fa021f3f2d373bea636a87f1fb5b367d6347a rw,relatime - aufs none rw,si=9b4a7644aa1af39c 262 15 0:3584 / /var/lib/docker/aufs/mnt/ef764e26712184653067ecf7afea18a80854c41331ca0f0ef03e1bacf90a6ffc rw,relatime - aufs none rw,si=9b4a7644aa1a939c 263 15 0:3585 / /var/lib/docker/aufs/mnt/f3176b40c41fce8ce6942936359a2001a6f1b5c1bb40ee224186db0789ec2f76 rw,relatime - aufs none rw,si=9b4a7644aa1ab39c 264 15 0:3586 / /var/lib/docker/aufs/mnt/f5daf06785d3565c6dd18ea7d953d9a8b9606107781e63270fe0514508736e6a rw,relatime - aufs none rw,si=9b4a76401268c39c 58 15 0:3587 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8-init rw,relatime - aufs none rw,si=9b4a76444445839c 67 15 0:3588 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8 rw,relatime - aufs none rw,si=9b4a7644badd339c 265 15 0:3610 / /var/lib/docker/aufs/mnt/e812472cd2c8c4748d1ef71fac4e77e50d661b9349abe66ce3e23511ed44f414 rw,relatime - aufs none rw,si=9b4a76427937d39c 270 15 0:3615 / /var/lib/docker/aufs/mnt/997636e7c5c9d0d1376a217e295c14c205350b62bc12052804fb5f90abe6f183 rw,relatime - aufs none rw,si=9b4a76406540739c 273 15 0:3618 / /var/lib/docker/aufs/mnt/d5794d080417b6e52e69227c3873e0e4c1ff0d5a845ebe3860ec2f89a47a2a1e rw,relatime - aufs none rw,si=9b4a76454814039c 278 15 0:3623 / /var/lib/docker/aufs/mnt/586bdd48baced671bb19bc4d294ec325f26c55545ae267db426424f157d59c48 rw,relatime - aufs none rw,si=9b4a7644b439f39c 281 15 0:3626 / /var/lib/docker/aufs/mnt/69739d022f89f8586908bbd5edbbdd95ea5256356f177f9ffcc6ef9c0ea752d2 rw,relatime - aufs none rw,si=9b4a7644a0f1b39c 286 15 0:3631 / /var/lib/docker/aufs/mnt/ff28c27d5f894363993622de26d5dd352dba072f219e4691d6498c19bbbc15a9 rw,relatime - aufs none rw,si=9b4a7642265b339c 289 15 0:3634 / /var/lib/docker/aufs/mnt/aa128fe0e64fdede333aa48fd9de39530c91a9244a0f0649a3c411c61e372daa rw,relatime - aufs none rw,si=9b4a764012ada39c 99 15 8:33 / /media/REMOVE\040ME rw,nosuid,nodev,relatime - fuseblk /dev/sdc1 rw,user_id=0,group_id=0,allow_other,blksize=4096` ) func TestParseFedoraMountinfo(t *testing.T) { r := bytes.NewBuffer([]byte(fedoraMountinfo)) _, err := parseInfoFile(r) if err != nil { t.Fatal(err) } } func TestParseUbuntuMountinfo(t *testing.T) { r := bytes.NewBuffer([]byte(ubuntuMountInfo)) _, err := parseInfoFile(r) if err != nil { t.Fatal(err) } } func TestParseGentooMountinfo(t *testing.T) { r := bytes.NewBuffer([]byte(gentooMountinfo)) _, err := parseInfoFile(r) if err != nil { t.Fatal(err) } } func TestParseFedoraMountinfoFields(t *testing.T) { r := bytes.NewBuffer([]byte(fedoraMountinfo)) infos, err := parseInfoFile(r) if err != nil { t.Fatal(err) } expectedLength := 58 if len(infos) != expectedLength { t.Fatalf("Expected %d entries, got %d", expectedLength, len(infos)) } mi := Info{ ID: 15, Parent: 35, Major: 0, Minor: 3, Root: "/", Mountpoint: "/proc", Opts: "rw,nosuid,nodev,noexec,relatime", Optional: "shared:5", Fstype: "proc", Source: "proc", VfsOpts: "rw", } if *infos[0] != mi { t.Fatalf("expected %#v, got %#v", mi, infos[0]) } } docker-1.10.3/pkg/mount/mountinfo_unsupported.go000066400000000000000000000003541267010174400220070ustar00rootroot00000000000000// +build !linux,!freebsd freebsd,!cgo package mount import ( "fmt" "runtime" ) func parseMountTable() ([]*Info, error) { return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) } docker-1.10.3/pkg/mount/sharedsubtree_linux.go000066400000000000000000000044331267010174400214020ustar00rootroot00000000000000// +build linux package mount // MakeShared ensures a mounted filesystem has the SHARED mount option enabled. // See the supported options in flags.go for further reference. func MakeShared(mountPoint string) error { return ensureMountedAs(mountPoint, "shared") } // MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. // See the supported options in flags.go for further reference. func MakeRShared(mountPoint string) error { return ensureMountedAs(mountPoint, "rshared") } // MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. // See the supported options in flags.go for further reference. func MakePrivate(mountPoint string) error { return ensureMountedAs(mountPoint, "private") } // MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option // enabled. See the supported options in flags.go for further reference. func MakeRPrivate(mountPoint string) error { return ensureMountedAs(mountPoint, "rprivate") } // MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. // See the supported options in flags.go for further reference. func MakeSlave(mountPoint string) error { return ensureMountedAs(mountPoint, "slave") } // MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. // See the supported options in flags.go for further reference. func MakeRSlave(mountPoint string) error { return ensureMountedAs(mountPoint, "rslave") } // MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option // enabled. See the supported options in flags.go for further reference. func MakeUnbindable(mountPoint string) error { return ensureMountedAs(mountPoint, "unbindable") } // MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount // option enabled. See the supported options in flags.go for further reference. func MakeRUnbindable(mountPoint string) error { return ensureMountedAs(mountPoint, "runbindable") } func ensureMountedAs(mountPoint, options string) error { mounted, err := Mounted(mountPoint) if err != nil { return err } if !mounted { if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { return err } } mounted, err = Mounted(mountPoint) if err != nil { return err } return ForceMount("", mountPoint, "none", options) } docker-1.10.3/pkg/mount/sharedsubtree_linux_test.go000066400000000000000000000177641267010174400224540ustar00rootroot00000000000000// +build linux package mount import ( "os" "path" "syscall" "testing" ) // nothing is propagated in or out func TestSubtreePrivate(t *testing.T) { tmp := path.Join(os.TempDir(), "mount-tests") if err := os.MkdirAll(tmp, 0777); err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) var ( sourceDir = path.Join(tmp, "source") targetDir = path.Join(tmp, "target") outside1Dir = path.Join(tmp, "outside1") outside2Dir = path.Join(tmp, "outside2") outside1Path = path.Join(outside1Dir, "file.txt") outside2Path = path.Join(outside2Dir, "file.txt") outside1CheckPath = path.Join(targetDir, "a", "file.txt") outside2CheckPath = path.Join(sourceDir, "b", "file.txt") ) if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { t.Fatal(err) } if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { t.Fatal(err) } if err := os.Mkdir(targetDir, 0777); err != nil { t.Fatal(err) } if err := os.Mkdir(outside1Dir, 0777); err != nil { t.Fatal(err) } if err := os.Mkdir(outside2Dir, 0777); err != nil { t.Fatal(err) } if err := createFile(outside1Path); err != nil { t.Fatal(err) } if err := createFile(outside2Path); err != nil { t.Fatal(err) } // mount the shared directory to a target if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { t.Fatal(err) } defer func() { if err := Unmount(targetDir); err != nil { t.Fatal(err) } }() // next, make the target private if err := MakePrivate(targetDir); err != nil { t.Fatal(err) } defer func() { if err := Unmount(targetDir); err != nil { t.Fatal(err) } }() // mount in an outside path to a mounted path inside the _source_ if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { t.Fatal(err) } defer func() { if err := Unmount(path.Join(sourceDir, "a")); err != nil { t.Fatal(err) } }() // check that this file _does_not_ show in the _target_ if _, err := os.Stat(outside1CheckPath); err != nil && !os.IsNotExist(err) { t.Fatal(err) } else if err == nil { t.Fatalf("%q should not be visible, but is", outside1CheckPath) } // next mount outside2Dir into the _target_ if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { t.Fatal(err) } defer func() { if err := Unmount(path.Join(targetDir, "b")); err != nil { t.Fatal(err) } }() // check that this file _does_not_ show in the _source_ if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { t.Fatal(err) } else if err == nil { t.Fatalf("%q should not be visible, but is", outside2CheckPath) } } // Testing that when a target is a shared mount, // then child mounts propagate to the source func TestSubtreeShared(t *testing.T) { tmp := path.Join(os.TempDir(), "mount-tests") if err := os.MkdirAll(tmp, 0777); err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) var ( sourceDir = path.Join(tmp, "source") targetDir = path.Join(tmp, "target") outsideDir = path.Join(tmp, "outside") outsidePath = path.Join(outsideDir, "file.txt") sourceCheckPath = path.Join(sourceDir, "a", "file.txt") ) if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { t.Fatal(err) } if err := os.Mkdir(targetDir, 0777); err != nil { t.Fatal(err) } if err := os.Mkdir(outsideDir, 0777); err != nil { t.Fatal(err) } if err := createFile(outsidePath); err != nil { t.Fatal(err) } // mount the source as shared if err := MakeShared(sourceDir); err != nil { t.Fatal(err) } defer func() { if err := Unmount(sourceDir); err != nil { t.Fatal(err) } }() // mount the shared directory to a target if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { t.Fatal(err) } defer func() { if err := Unmount(targetDir); err != nil { t.Fatal(err) } }() // mount in an outside path to a mounted path inside the target if err := Mount(outsideDir, path.Join(targetDir, "a"), "none", "bind,rw"); err != nil { t.Fatal(err) } defer func() { if err := Unmount(path.Join(targetDir, "a")); err != nil { t.Fatal(err) } }() // NOW, check that the file from the outside directory is available in the source directory if _, err := os.Stat(sourceCheckPath); err != nil { t.Fatal(err) } } // testing that mounts to a shared source show up in the slave target, // and that mounts into a slave target do _not_ show up in the shared source func TestSubtreeSharedSlave(t *testing.T) { tmp := path.Join(os.TempDir(), "mount-tests") if err := os.MkdirAll(tmp, 0777); err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) var ( sourceDir = path.Join(tmp, "source") targetDir = path.Join(tmp, "target") outside1Dir = path.Join(tmp, "outside1") outside2Dir = path.Join(tmp, "outside2") outside1Path = path.Join(outside1Dir, "file.txt") outside2Path = path.Join(outside2Dir, "file.txt") outside1CheckPath = path.Join(targetDir, "a", "file.txt") outside2CheckPath = path.Join(sourceDir, "b", "file.txt") ) if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { t.Fatal(err) } if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { t.Fatal(err) } if err := os.Mkdir(targetDir, 0777); err != nil { t.Fatal(err) } if err := os.Mkdir(outside1Dir, 0777); err != nil { t.Fatal(err) } if err := os.Mkdir(outside2Dir, 0777); err != nil { t.Fatal(err) } if err := createFile(outside1Path); err != nil { t.Fatal(err) } if err := createFile(outside2Path); err != nil { t.Fatal(err) } // mount the source as shared if err := MakeShared(sourceDir); err != nil { t.Fatal(err) } defer func() { if err := Unmount(sourceDir); err != nil { t.Fatal(err) } }() // mount the shared directory to a target if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { t.Fatal(err) } defer func() { if err := Unmount(targetDir); err != nil { t.Fatal(err) } }() // next, make the target slave if err := MakeSlave(targetDir); err != nil { t.Fatal(err) } defer func() { if err := Unmount(targetDir); err != nil { t.Fatal(err) } }() // mount in an outside path to a mounted path inside the _source_ if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { t.Fatal(err) } defer func() { if err := Unmount(path.Join(sourceDir, "a")); err != nil { t.Fatal(err) } }() // check that this file _does_ show in the _target_ if _, err := os.Stat(outside1CheckPath); err != nil { t.Fatal(err) } // next mount outside2Dir into the _target_ if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { t.Fatal(err) } defer func() { if err := Unmount(path.Join(targetDir, "b")); err != nil { t.Fatal(err) } }() // check that this file _does_not_ show in the _source_ if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { t.Fatal(err) } else if err == nil { t.Fatalf("%q should not be visible, but is", outside2CheckPath) } } func TestSubtreeUnbindable(t *testing.T) { tmp := path.Join(os.TempDir(), "mount-tests") if err := os.MkdirAll(tmp, 0777); err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) var ( sourceDir = path.Join(tmp, "source") targetDir = path.Join(tmp, "target") ) if err := os.MkdirAll(sourceDir, 0777); err != nil { t.Fatal(err) } if err := os.MkdirAll(targetDir, 0777); err != nil { t.Fatal(err) } // next, make the source unbindable if err := MakeUnbindable(sourceDir); err != nil { t.Fatal(err) } defer func() { if err := Unmount(sourceDir); err != nil { t.Fatal(err) } }() // then attempt to mount it to target. It should fail if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil && err != syscall.EINVAL { t.Fatal(err) } else if err == nil { t.Fatalf("%q should not have been bindable", sourceDir) } defer func() { if err := Unmount(targetDir); err != nil { t.Fatal(err) } }() } func createFile(path string) error { f, err := os.Create(path) if err != nil { return err } f.WriteString("hello world!") return f.Close() } docker-1.10.3/pkg/namesgenerator/000077500000000000000000000000001267010174400166405ustar00rootroot00000000000000docker-1.10.3/pkg/namesgenerator/cmd/000077500000000000000000000000001267010174400174035ustar00rootroot00000000000000docker-1.10.3/pkg/namesgenerator/cmd/names-generator/000077500000000000000000000000001267010174400224725ustar00rootroot00000000000000docker-1.10.3/pkg/namesgenerator/cmd/names-generator/main.go000066400000000000000000000002171267010174400237450ustar00rootroot00000000000000package main import ( "fmt" "github.com/docker/docker/pkg/namesgenerator" ) func main() { fmt.Println(namesgenerator.GetRandomName(0)) } docker-1.10.3/pkg/namesgenerator/names-generator.go000066400000000000000000000637511267010174400222720ustar00rootroot00000000000000package namesgenerator import ( "fmt" "github.com/docker/docker/pkg/random" ) var ( left = [...]string{ "admiring", "adoring", "agitated", "amazing", "angry", "awesome", "backstabbing", "berserk", "big", "boring", "clever", "cocky", "compassionate", "condescending", "cranky", "desperate", "determined", "distracted", "dreamy", "drunk", "ecstatic", "elated", "elegant", "evil", "fervent", "focused", "furious", "gigantic", "gloomy", "goofy", "grave", "happy", "high", "hopeful", "hungry", "insane", "jolly", "jovial", "kickass", "lonely", "loving", "mad", "modest", "naughty", "nauseous", "nostalgic", "pedantic", "pensive", "prickly", "reverent", "romantic", "sad", "serene", "sharp", "sick", "silly", "sleepy", "small", "stoic", "stupefied", "suspicious", "tender", "thirsty", "tiny", "trusting", } // Docker, starting from 0.7.x, generates names from notable scientists and hackers. // Please, for any amazing man that you add to the list, consider adding an equally amazing woman to it, and vice versa. right = [...]string{ // Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy. https://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB "albattani", // Frances E. Allen, became the first female IBM Fellow in 1989. In 2006, she became the first female recipient of the ACM's Turing Award. https://en.wikipedia.org/wiki/Frances_E._Allen "allen", // June Almeida - Scottish virologist who took the first pictures of the rubella virus - https://en.wikipedia.org/wiki/June_Almeida "almeida", // Archimedes was a physicist, engineer and mathematician who invented too many things to list them here. https://en.wikipedia.org/wiki/Archimedes "archimedes", // Maria Ardinghelli - Italian translator, mathematician and physicist - https://en.wikipedia.org/wiki/Maria_Ardinghelli "ardinghelli", // Aryabhata - Ancient Indian mathematician-astronomer during 476-550 CE https://en.wikipedia.org/wiki/Aryabhata "aryabhata", // Wanda Austin - Wanda Austin is the President and CEO of The Aerospace Corporation, a leading architect for the US security space programs. https://en.wikipedia.org/wiki/Wanda_Austin "austin", // Charles Babbage invented the concept of a programmable computer. https://en.wikipedia.org/wiki/Charles_Babbage. "babbage", // Stefan Banach - Polish mathematician, was one of the founders of modern functional analysis. https://en.wikipedia.org/wiki/Stefan_Banach "banach", // John Bardeen co-invented the transistor - https://en.wikipedia.org/wiki/John_Bardeen "bardeen", // Jean Bartik, born Betty Jean Jennings, was one of the original programmers for the ENIAC computer. https://en.wikipedia.org/wiki/Jean_Bartik "bartik", // Laura Bassi, the world's first female professor https://en.wikipedia.org/wiki/Laura_Bassi "bassi", // Alexander Graham Bell - an eminent Scottish-born scientist, inventor, engineer and innovator who is credited with inventing the first practical telephone - https://en.wikipedia.org/wiki/Alexander_Graham_Bell "bell", // Homi J Bhabha - was an Indian nuclear physicist, founding director, and professor of physics at the Tata Institute of Fundamental Research. Colloquially known as "father of Indian nuclear programme"- https://en.wikipedia.org/wiki/Homi_J._Bhabha "bhabha", // Bhaskara II - Ancient Indian mathematician-astronomer whose work on calculus predates Newton and Leibniz by over half a millennium - https://en.wikipedia.org/wiki/Bh%C4%81skara_II#Calculus "bhaskara", // Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - https://en.wikipedia.org/wiki/Elizabeth_Blackwell "blackwell", // Niels Bohr is the father of quantum theory. https://en.wikipedia.org/wiki/Niels_Bohr. "bohr", // Kathleen Booth, she's credited with writing the first assembly language. https://en.wikipedia.org/wiki/Kathleen_Booth "booth", // Anita Borg - Anita Borg was the founding director of the Institute for Women and Technology (IWT). https://en.wikipedia.org/wiki/Anita_Borg "borg", // Satyendra Nath Bose - He provided the foundation for Bose–Einstein statistics and the theory of the Bose–Einstein condensate. - https://en.wikipedia.org/wiki/Satyendra_Nath_Bose "bose", // Evelyn Boyd Granville - She was one of the first African-American woman to receive a Ph.D. in mathematics; she earned it in 1949 from Yale University. https://en.wikipedia.org/wiki/Evelyn_Boyd_Granville "boyd", // Brahmagupta - Ancient Indian mathematician during 598-670 CE who gave rules to compute with zero - https://en.wikipedia.org/wiki/Brahmagupta#Zero "brahmagupta", // Walter Houser Brattain co-invented the transistor - https://en.wikipedia.org/wiki/Walter_Houser_Brattain "brattain", // Emmett Brown invented time travel. https://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff) "brown", // Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. https://en.wikipedia.org/wiki/Rachel_Carson "carson", // Subrahmanyan Chandrasekhar - Astrophysicist known for his mathematical theory on different stages and evolution in structures of the stars. He has won nobel prize for physics - https://en.wikipedia.org/wiki/Subrahmanyan_Chandrasekhar "chandrasekhar", // Jane Colden - American botanist widely considered the first female American botanist - https://en.wikipedia.org/wiki/Jane_Colden "colden", // Gerty Theresa Cori - American biochemist who became the third woman—and first American woman—to win a Nobel Prize in science, and the first woman to be awarded the Nobel Prize in Physiology or Medicine. Cori was born in Prague. https://en.wikipedia.org/wiki/Gerty_Cori "cori", // Seymour Roger Cray was an American electrical engineer and supercomputer architect who designed a series of computers that were the fastest in the world for decades. https://en.wikipedia.org/wiki/Seymour_Cray "cray", // Marie Curie discovered radioactivity. https://en.wikipedia.org/wiki/Marie_Curie. "curie", // Charles Darwin established the principles of natural evolution. https://en.wikipedia.org/wiki/Charles_Darwin. "darwin", // Leonardo Da Vinci invented too many things to list here. https://en.wikipedia.org/wiki/Leonardo_da_Vinci. "davinci", // Edsger Wybe Dijkstra was a Dutch computer scientist and mathematical scientist. https://en.wikipedia.org/wiki/Edsger_W._Dijkstra. "dijkstra", // Donna Dubinsky - played an integral role in the development of personal digital assistants (PDAs) serving as CEO of Palm, Inc. and co-founding Handspring. https://en.wikipedia.org/wiki/Donna_Dubinsky "dubinsky", // Annie Easley - She was a leading member of the team which developed software for the Centaur rocket stage and one of the first African-Americans in her field. https://en.wikipedia.org/wiki/Annie_Easley "easley", // Albert Einstein invented the general theory of relativity. https://en.wikipedia.org/wiki/Albert_Einstein "einstein", // Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - https://en.wikipedia.org/wiki/Gertrude_Elion "elion", // Douglas Engelbart gave the mother of all demos: https://en.wikipedia.org/wiki/Douglas_Engelbart "engelbart", // Euclid invented geometry. https://en.wikipedia.org/wiki/Euclid "euclid", // Leonhard Euler invented large parts of modern mathematics. https://de.wikipedia.org/wiki/Leonhard_Euler "euler", // Pierre de Fermat pioneered several aspects of modern mathematics. https://en.wikipedia.org/wiki/Pierre_de_Fermat "fermat", // Enrico Fermi invented the first nuclear reactor. https://en.wikipedia.org/wiki/Enrico_Fermi. "fermi", // Richard Feynman was a key contributor to quantum mechanics and particle physics. https://en.wikipedia.org/wiki/Richard_Feynman "feynman", // Benjamin Franklin is famous for his experiments in electricity and the invention of the lightning rod. "franklin", // Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. https://en.wikipedia.org/wiki/Galileo_Galilei "galileo", // William Henry "Bill" Gates III is an American business magnate, philanthropist, investor, computer programmer, and inventor. https://en.wikipedia.org/wiki/Bill_Gates "gates", // Adele Goldberg, was one of the designers and developers of the Smalltalk language. https://en.wikipedia.org/wiki/Adele_Goldberg_(computer_scientist) "goldberg", // Adele Goldstine, born Adele Katz, wrote the complete technical description for the first electronic digital computer, ENIAC. https://en.wikipedia.org/wiki/Adele_Goldstine "goldstine", // Shafi Goldwasser is a computer scientist known for creating theoretical foundations of modern cryptography. Winner of 2012 ACM Turing Award. https://en.wikipedia.org/wiki/Shafi_Goldwasser "goldwasser", // James Golick, all around gangster. "golick", // Jane Goodall - British primatologist, ethologist, and anthropologist who is considered to be the world's foremost expert on chimpanzees - https://en.wikipedia.org/wiki/Jane_Goodall "goodall", // Margaret Hamilton - Director of the Software Engineering Division of the MIT Instrumentation Laboratory, which developed on-board flight software for the Apollo space program. https://en.wikipedia.org/wiki/Margaret_Hamilton_(scientist) "hamilton", // Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. https://en.wikipedia.org/wiki/Stephen_Hawking "hawking", // Werner Heisenberg was a founding father of quantum mechanics. https://en.wikipedia.org/wiki/Werner_Heisenberg "heisenberg", // Jaroslav Heyrovský was the inventor of the polarographic method, father of the electroanalytical method, and recipient of the Nobel Prize in 1959. His main field of work was polarography. https://en.wikipedia.org/wiki/Jaroslav_Heyrovsk%C3%BD "heyrovsky", // Dorothy Hodgkin was a British biochemist, credited with the development of protein crystallography. She was awarded the Nobel Prize in Chemistry in 1964. https://en.wikipedia.org/wiki/Dorothy_Hodgkin "hodgkin", // Erna Schneider Hoover revolutionized modern communication by inventing a computerized telephon switching method. https://en.wikipedia.org/wiki/Erna_Schneider_Hoover "hoover", // Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term "debugging" for fixing computer glitches. https://en.wikipedia.org/wiki/Grace_Hopper "hopper", // Frances Hugle, she was an American scientist, engineer, and inventor who contributed to the understanding of semiconductors, integrated circuitry, and the unique electrical principles of microscopic materials. https://en.wikipedia.org/wiki/Frances_Hugle "hugle", // Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - https://en.wikipedia.org/wiki/Hypatia "hypatia", // Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal printing press and water gauge. https://en.wikipedia.org/wiki/Jang_Yeong-sil "jang", // Betty Jennings - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Jean_Bartik "jennings", // Mary Lou Jepsen, was the founder and chief technology officer of One Laptop Per Child (OLPC), and the founder of Pixel Qi. https://en.wikipedia.org/wiki/Mary_Lou_Jepsen "jepsen", // Irène Joliot-Curie - French scientist who was awarded the Nobel Prize for Chemistry in 1935. Daughter of Marie and Pierre Curie. https://en.wikipedia.org/wiki/Ir%C3%A8ne_Joliot-Curie "joliot", // Karen Spärck Jones came up with the concept of inverse document frequency, which is used in most search engines today. https://en.wikipedia.org/wiki/Karen_Sp%C3%A4rck_Jones "jones", // A. P. J. Abdul Kalam - is an Indian scientist aka Missile Man of India for his work on the development of ballistic missile and launch vehicle technology - https://en.wikipedia.org/wiki/A._P._J._Abdul_Kalam "kalam", // Susan Kare, created the icons and many of the interface elements for the original Apple Macintosh in the 1980s, and was an original employee of NeXT, working as the Creative Director. https://en.wikipedia.org/wiki/Susan_Kare "kare", // Mary Kenneth Keller, Sister Mary Kenneth Keller became the first American woman to earn a PhD in Computer Science in 1965. https://en.wikipedia.org/wiki/Mary_Kenneth_Keller "keller", // Har Gobind Khorana - Indian-American biochemist who shared the 1968 Nobel Prize for Physiology - https://en.wikipedia.org/wiki/Har_Gobind_Khorana "khorana", // Jack Kilby invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Jack_Kilby "kilby", // Maria Kirch - German astronomer and first woman to discover a comet - https://en.wikipedia.org/wiki/Maria_Margarethe_Kirch "kirch", // Donald Knuth - American computer scientist, author of "The Art of Computer Programming" and creator of the TeX typesetting system. https://en.wikipedia.org/wiki/Donald_Knuth "knuth", // Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - https://en.wikipedia.org/wiki/Sofia_Kovalevskaya "kowalevski", // Marie-Jeanne de Lalande - French astronomer, mathematician and cataloguer of stars - https://en.wikipedia.org/wiki/Marie-Jeanne_de_Lalande "lalande", // Hedy Lamarr - Actress and inventor. The principles of her work are now incorporated into modern Wi-Fi, CDMA and Bluetooth technology. https://en.wikipedia.org/wiki/Hedy_Lamarr "lamarr", // Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - https://en.wikipedia.org/wiki/Mary_Leakey "leakey", // Henrietta Swan Leavitt - she was an American astronomer who discovered the relation between the luminosity and the period of Cepheid variable stars. https://en.wikipedia.org/wiki/Henrietta_Swan_Leavitt "leavitt", // Ruth Lichterman - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Ruth_Teitelbaum "lichterman", // Barbara Liskov - co-developed the Liskov substitution principle. Liskov was also the winner of the Turing Prize in 2008. - https://en.wikipedia.org/wiki/Barbara_Liskov "liskov", // Ada Lovelace invented the first algorithm. https://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull) "lovelace", // Auguste and Louis Lumière - the first filmmakers in history - https://en.wikipedia.org/wiki/Auguste_and_Louis_Lumi%C3%A8re "lumiere", // Mahavira - Ancient Indian mathematician during 9th century AD who discovered basic algebraic identities - https://en.wikipedia.org/wiki/Mah%C4%81v%C4%ABra_(mathematician) "mahavira", // Maria Mayer - American theoretical physicist and Nobel laureate in Physics for proposing the nuclear shell model of the atomic nucleus - https://en.wikipedia.org/wiki/Maria_Mayer "mayer", // John McCarthy invented LISP: https://en.wikipedia.org/wiki/John_McCarthy_(computer_scientist) "mccarthy", // Barbara McClintock - a distinguished American cytogeneticist, 1983 Nobel Laureate in Physiology or Medicine for discovering transposons. https://en.wikipedia.org/wiki/Barbara_McClintock "mcclintock", // Malcolm McLean invented the modern shipping container: https://en.wikipedia.org/wiki/Malcom_McLean "mclean", // Kay McNulty - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Kathleen_Antonelli "mcnulty", // Lise Meitner - Austrian/Swedish physicist who was involved in the discovery of nuclear fission. The element meitnerium is named after her - https://en.wikipedia.org/wiki/Lise_Meitner "meitner", // Carla Meninsky, was the game designer and programmer for Atari 2600 games Dodge 'Em and Warlords. https://en.wikipedia.org/wiki/Carla_Meninsky "meninsky", // Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - https://en.wikipedia.org/wiki/Johanna_Mestorf "mestorf", // Maryam Mirzakhani - an Iranian mathematician and the first woman to win the Fields Medal. https://en.wikipedia.org/wiki/Maryam_Mirzakhani "mirzakhani", // Samuel Morse - contributed to the invention of a single-wire telegraph system based on European telegraphs and was a co-developer of the Morse code - https://en.wikipedia.org/wiki/Samuel_Morse "morse", // Ian Murdock - founder of the Debian project - https://en.wikipedia.org/wiki/Ian_Murdock "murdock", // Isaac Newton invented classic mechanics and modern optics. https://en.wikipedia.org/wiki/Isaac_Newton "newton", // Alfred Nobel - a Swedish chemist, engineer, innovator, and armaments manufacturer (inventor of dynamite) - https://en.wikipedia.org/wiki/Alfred_Nobel "nobel", // Emmy Noether, German mathematician. Noether's Theorem is named after her. https://en.wikipedia.org/wiki/Emmy_Noether "noether", // Poppy Northcutt. Poppy Northcutt was the first woman to work as part of NASA’s Mission Control. http://www.businessinsider.com/poppy-northcutt-helped-apollo-astronauts-2014-12?op=1 "northcutt", // Robert Noyce invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Robert_Noyce "noyce", // Panini - Ancient Indian linguist and grammarian from 4th century CE who worked on the world's first formal system - https://en.wikipedia.org/wiki/P%C4%81%E1%B9%87ini#Comparison_with_modern_formal_systems "panini", // Ambroise Pare invented modern surgery. https://en.wikipedia.org/wiki/Ambroise_Par%C3%A9 "pare", // Louis Pasteur discovered vaccination, fermentation and pasteurization. https://en.wikipedia.org/wiki/Louis_Pasteur. "pasteur", // Cecilia Payne-Gaposchkin was an astronomer and astrophysicist who, in 1925, proposed in her Ph.D. thesis an explanation for the composition of stars in terms of the relative abundances of hydrogen and helium. https://en.wikipedia.org/wiki/Cecilia_Payne-Gaposchkin "payne", // Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). https://en.wikipedia.org/wiki/Radia_Perlman "perlman", // Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. https://en.wikipedia.org/wiki/Rob_Pike "pike", // Henri Poincaré made fundamental contributions in several fields of mathematics. https://en.wikipedia.org/wiki/Henri_Poincar%C3%A9 "poincare", // Laura Poitras is a director and producer whose work, made possible by open source crypto tools, advances the causes of truth and freedom of information by reporting disclosures by whistleblowers such as Edward Snowden. https://en.wikipedia.org/wiki/Laura_Poitras "poitras", // Claudius Ptolemy - a Greco-Egyptian writer of Alexandria, known as a mathematician, astronomer, geographer, astrologer, and poet of a single epigram in the Greek Anthology - https://en.wikipedia.org/wiki/Ptolemy "ptolemy", // C. V. Raman - Indian physicist who won the Nobel Prize in 1930 for proposing the Raman effect. - https://en.wikipedia.org/wiki/C._V._Raman "raman", // Srinivasa Ramanujan - Indian mathematician and autodidact who made extraordinary contributions to mathematical analysis, number theory, infinite series, and continued fractions. - https://en.wikipedia.org/wiki/Srinivasa_Ramanujan "ramanujan", // Sally Kristen Ride was an American physicist and astronaut. She was the first American woman in space, and the youngest American astronaut. https://en.wikipedia.org/wiki/Sally_Ride "ride", // Dennis Ritchie - co-creator of UNIX and the C programming language. - https://en.wikipedia.org/wiki/Dennis_Ritchie "ritchie", // Wilhelm Conrad Röntgen - German physicist who was awarded the first Nobel Prize in Physics in 1901 for the discovery of X-rays (Röntgen rays). https://en.wikipedia.org/wiki/Wilhelm_R%C3%B6ntgen "roentgen", // Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - https://en.wikipedia.org/wiki/Rosalind_Franklin "rosalind", // Meghnad Saha - Indian astrophysicist best known for his development of the Saha equation, used to describe chemical and physical conditions in stars - https://en.wikipedia.org/wiki/Meghnad_Saha "saha", // Jean E. Sammet developed FORMAC, the first widely used computer language for symbolic manipulation of mathematical formulas. https://en.wikipedia.org/wiki/Jean_E._Sammet "sammet", // Carol Shaw - Originally an Atari employee, Carol Shaw is said to be the first female video game designer. https://en.wikipedia.org/wiki/Carol_Shaw_(video_game_designer) "shaw", // Dame Stephanie "Steve" Shirley - Founded a software company in 1962 employing women working from home. https://en.wikipedia.org/wiki/Steve_Shirley "shirley", // William Shockley co-invented the transistor - https://en.wikipedia.org/wiki/William_Shockley "shockley", // Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. https://en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi "sinoussi", // Betty Snyder - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Betty_Holberton "snyder", // Frances Spence - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Frances_Spence "spence", // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. https://en.wikiquote.org/wiki/Richard_Stallman "stallman", // Michael Stonebraker is a database research pioneer and architect of Ingres, Postgres, VoltDB and SciDB. Winner of 2014 ACM Turing Award. https://en.wikipedia.org/wiki/Michael_Stonebraker "stonebraker", // Janese Swanson (with others) developed the first of the Carmen Sandiego games. She went on to found Girl Tech. https://en.wikipedia.org/wiki/Janese_Swanson "swanson", // Aaron Swartz was influential in creating RSS, Markdown, Creative Commons, Reddit, and much of the internet as we know it today. He was devoted to freedom of information on the web. https://en.wikiquote.org/wiki/Aaron_Swartz "swartz", // Bertha Swirles was a theoretical physicist who made a number of contributions to early quantum theory. https://en.wikipedia.org/wiki/Bertha_Swirles "swirles", // Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. https://en.wikipedia.org/wiki/Nikola_Tesla "tesla", // Ken Thompson - co-creator of UNIX and the C programming language - https://en.wikipedia.org/wiki/Ken_Thompson "thompson", // Linus Torvalds invented Linux and Git. https://en.wikipedia.org/wiki/Linus_Torvalds "torvalds", // Alan Turing was a founding father of computer science. https://en.wikipedia.org/wiki/Alan_Turing. "turing", // Varahamihira - Ancient Indian mathematician who discovered trigonometric formulae during 505-587 CE - https://en.wikipedia.org/wiki/Var%C4%81hamihira#Contributions "varahamihira", // Sir Mokshagundam Visvesvaraya - is a notable Indian engineer. He is a recipient of the Indian Republic's highest honour, the Bharat Ratna, in 1955. On his birthday, 15 September is celebrated as Engineer's Day in India in his memory - https://en.wikipedia.org/wiki/Visvesvaraya "visvesvaraya", // Marlyn Wescoff - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Marlyn_Meltzer "wescoff", // Roberta Williams, did pioneering work in graphical adventure games for personal computers, particularly the King's Quest series. https://en.wikipedia.org/wiki/Roberta_Williams "williams", // Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. https://en.wikipedia.org/wiki/Sophie_Wilson "wilson", // Jeannette Wing - co-developed the Liskov substitution principle. - https://en.wikipedia.org/wiki/Jeannette_Wing "wing", // Steve Wozniak invented the Apple I and Apple II. https://en.wikipedia.org/wiki/Steve_Wozniak "wozniak", // The Wright brothers, Orville and Wilbur - credited with inventing and building the world's first successful airplane and making the first controlled, powered and sustained heavier-than-air human flight - https://en.wikipedia.org/wiki/Wright_brothers "wright", // Rosalyn Sussman Yalow - Rosalyn Sussman Yalow was an American medical physicist, and a co-winner of the 1977 Nobel Prize in Physiology or Medicine for development of the radioimmunoassay technique. https://en.wikipedia.org/wiki/Rosalyn_Sussman_Yalow "yalow", // Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. https://en.wikipedia.org/wiki/Ada_Yonath "yonath", } ) // GetRandomName generates a random name from the list of adjectives and surnames in this package // formatted as "adjective_surname". For example 'focused_turing'. If retry is non-zero, a random // integer between 0 and 10 will be added to the end of the name, e.g `focused_turing3` func GetRandomName(retry int) string { rnd := random.Rand begin: name := fmt.Sprintf("%s_%s", left[rnd.Intn(len(left))], right[rnd.Intn(len(right))]) if name == "boring_wozniak" /* Steve Wozniak is not boring */ { goto begin } if retry > 0 { name = fmt.Sprintf("%s%d", name, rnd.Intn(10)) } return name } docker-1.10.3/pkg/namesgenerator/names-generator_test.go000066400000000000000000000022571267010174400233230ustar00rootroot00000000000000package namesgenerator import ( "strings" "testing" ) // Make sure the generated names are awesome func TestGenerateAwesomeNames(t *testing.T) { name := GetRandomName(0) if !isAwesome(name) { t.Fatalf("Generated name '%s' is not awesome.", name) } } func TestNameFormat(t *testing.T) { name := GetRandomName(0) if !strings.Contains(name, "_") { t.Fatalf("Generated name does not contain an underscore") } if strings.ContainsAny(name, "0123456789") { t.Fatalf("Generated name contains numbers!") } } func TestNameRetries(t *testing.T) { name := GetRandomName(1) if !strings.Contains(name, "_") { t.Fatalf("Generated name does not contain an underscore") } if !strings.ContainsAny(name, "0123456789") { t.Fatalf("Generated name doesn't contain a number") } } // To be awesome, a container name must involve cool inventors, be easy to remember, // be at least mildly funny, and always be politically correct for enterprise adoption. func isAwesome(name string) bool { coolInventorNames := true easyToRemember := true mildlyFunnyOnOccasion := true politicallyCorrect := true return coolInventorNames && easyToRemember && mildlyFunnyOnOccasion && politicallyCorrect } docker-1.10.3/pkg/parsers/000077500000000000000000000000001267010174400153055ustar00rootroot00000000000000docker-1.10.3/pkg/parsers/kernel/000077500000000000000000000000001267010174400165655ustar00rootroot00000000000000docker-1.10.3/pkg/parsers/kernel/kernel.go000066400000000000000000000044331267010174400204000ustar00rootroot00000000000000// +build !windows // Package kernel provides helper function to get, parse and compare kernel // versions for different platforms. package kernel import ( "bytes" "errors" "fmt" ) // VersionInfo holds information about the kernel. type VersionInfo struct { Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4) Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1) Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2) Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic) } func (k *VersionInfo) String() string { return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) } // CompareKernelVersion compares two kernel.VersionInfo structs. // Returns -1 if a < b, 0 if a == b, 1 it a > b func CompareKernelVersion(a, b VersionInfo) int { if a.Kernel < b.Kernel { return -1 } else if a.Kernel > b.Kernel { return 1 } if a.Major < b.Major { return -1 } else if a.Major > b.Major { return 1 } if a.Minor < b.Minor { return -1 } else if a.Minor > b.Minor { return 1 } return 0 } // GetKernelVersion gets the current kernel version. func GetKernelVersion() (*VersionInfo, error) { var ( err error ) uts, err := uname() if err != nil { return nil, err } release := make([]byte, len(uts.Release)) i := 0 for _, c := range uts.Release { release[i] = byte(c) i++ } // Remove the \x00 from the release for Atoi to parse correctly release = release[:bytes.IndexByte(release, 0)] return ParseRelease(string(release)) } // ParseRelease parses a string and creates a VersionInfo based on it. func ParseRelease(release string) (*VersionInfo, error) { var ( kernel, major, minor, parsed int flavor, partial string ) // Ignore error from Sscanf to allow an empty flavor. Instead, just // make sure we got all the version numbers. parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) if parsed < 2 { return nil, errors.New("Can't parse kernel version " + release) } // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) if parsed < 1 { flavor = partial } return &VersionInfo{ Kernel: kernel, Major: major, Minor: minor, Flavor: flavor, }, nil } docker-1.10.3/pkg/parsers/kernel/kernel_unix_test.go000066400000000000000000000061131267010174400224770ustar00rootroot00000000000000// +build !windows package kernel import ( "fmt" "testing" ) func assertParseRelease(t *testing.T, release string, b *VersionInfo, result int) { var ( a *VersionInfo ) a, _ = ParseRelease(release) if r := CompareKernelVersion(*a, *b); r != result { t.Fatalf("Unexpected kernel version comparison result for (%v,%v). Found %d, expected %d", release, b, r, result) } if a.Flavor != b.Flavor { t.Fatalf("Unexpected parsed kernel flavor. Found %s, expected %s", a.Flavor, b.Flavor) } } // TestParseRelease tests the ParseRelease() function func TestParseRelease(t *testing.T) { assertParseRelease(t, "3.8.0", &VersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) assertParseRelease(t, "3.4.54.longterm-1", &VersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) assertParseRelease(t, "3.4.54.longterm-1", &VersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) assertParseRelease(t, "3.8.0-19-generic", &VersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "-19-generic"}, 0) assertParseRelease(t, "3.12.8tag", &VersionInfo{Kernel: 3, Major: 12, Minor: 8, Flavor: "tag"}, 0) assertParseRelease(t, "3.12-1-amd64", &VersionInfo{Kernel: 3, Major: 12, Minor: 0, Flavor: "-1-amd64"}, 0) assertParseRelease(t, "3.8.0", &VersionInfo{Kernel: 4, Major: 8, Minor: 0}, -1) // Errors invalids := []string{ "3", "a", "a.a", "a.a.a-a", } for _, invalid := range invalids { expectedMessage := fmt.Sprintf("Can't parse kernel version %v", invalid) if _, err := ParseRelease(invalid); err == nil || err.Error() != expectedMessage { } } } func assertKernelVersion(t *testing.T, a, b VersionInfo, result int) { if r := CompareKernelVersion(a, b); r != result { t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) } } // TestCompareKernelVersion tests the CompareKernelVersion() function func TestCompareKernelVersion(t *testing.T) { assertKernelVersion(t, VersionInfo{Kernel: 3, Major: 8, Minor: 0}, VersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) assertKernelVersion(t, VersionInfo{Kernel: 2, Major: 6, Minor: 0}, VersionInfo{Kernel: 3, Major: 8, Minor: 0}, -1) assertKernelVersion(t, VersionInfo{Kernel: 3, Major: 8, Minor: 0}, VersionInfo{Kernel: 2, Major: 6, Minor: 0}, 1) assertKernelVersion(t, VersionInfo{Kernel: 3, Major: 8, Minor: 0}, VersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) assertKernelVersion(t, VersionInfo{Kernel: 3, Major: 8, Minor: 5}, VersionInfo{Kernel: 3, Major: 8, Minor: 0}, 1) assertKernelVersion(t, VersionInfo{Kernel: 3, Major: 0, Minor: 20}, VersionInfo{Kernel: 3, Major: 8, Minor: 0}, -1) assertKernelVersion(t, VersionInfo{Kernel: 3, Major: 7, Minor: 20}, VersionInfo{Kernel: 3, Major: 8, Minor: 0}, -1) assertKernelVersion(t, VersionInfo{Kernel: 3, Major: 8, Minor: 20}, VersionInfo{Kernel: 3, Major: 7, Minor: 0}, 1) assertKernelVersion(t, VersionInfo{Kernel: 3, Major: 8, Minor: 20}, VersionInfo{Kernel: 3, Major: 8, Minor: 0}, 1) assertKernelVersion(t, VersionInfo{Kernel: 3, Major: 8, Minor: 0}, VersionInfo{Kernel: 3, Major: 8, Minor: 20}, -1) } docker-1.10.3/pkg/parsers/kernel/kernel_windows.go000066400000000000000000000031751267010174400221540ustar00rootroot00000000000000package kernel import ( "fmt" "syscall" "unsafe" ) // VersionInfo holds information about the kernel. type VersionInfo struct { kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6) major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1) minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601) build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592) } func (k *VersionInfo) String() string { return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi) } // GetKernelVersion gets the current kernel version. func GetKernelVersion() (*VersionInfo, error) { var ( h syscall.Handle dwVersion uint32 err error ) KVI := &VersionInfo{"Unknown", 0, 0, 0} if err = syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), 0, syscall.KEY_READ, &h); err != nil { return KVI, err } defer syscall.RegCloseKey(h) var buf [1 << 10]uint16 var typ uint32 n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 if err = syscall.RegQueryValueEx(h, syscall.StringToUTF16Ptr("BuildLabEx"), nil, &typ, (*byte)(unsafe.Pointer(&buf[0])), &n); err != nil { return KVI, err } KVI.kvi = syscall.UTF16ToString(buf[:]) // Important - docker.exe MUST be manifested for this API to return // the correct information. if dwVersion, err = syscall.GetVersion(); err != nil { return KVI, err } KVI.major = int(dwVersion & 0xFF) KVI.minor = int((dwVersion & 0XFF00) >> 8) KVI.build = int((dwVersion & 0xFFFF0000) >> 16) return KVI, nil } docker-1.10.3/pkg/parsers/kernel/uname_linux.go000066400000000000000000000006101267010174400214350ustar00rootroot00000000000000package kernel import ( "syscall" ) // Utsname represents the system name structure. // It is passthgrouh for syscall.Utsname in order to make it portable with // other platforms where it is not available. type Utsname syscall.Utsname func uname() (*syscall.Utsname, error) { uts := &syscall.Utsname{} if err := syscall.Uname(uts); err != nil { return nil, err } return uts, nil } docker-1.10.3/pkg/parsers/kernel/uname_unsupported.go000066400000000000000000000005431267010174400226730ustar00rootroot00000000000000// +build !linux package kernel import ( "errors" ) // Utsname represents the system name structure. // It is defined here to make it portable as it is available on linux but not // on windows. type Utsname struct { Release [65]byte } func uname() (*Utsname, error) { return nil, errors.New("Kernel version detection is available only on linux") } docker-1.10.3/pkg/parsers/operatingsystem/000077500000000000000000000000001267010174400205425ustar00rootroot00000000000000docker-1.10.3/pkg/parsers/operatingsystem/operatingsystem_freebsd.go000066400000000000000000000007661267010174400260310ustar00rootroot00000000000000package operatingsystem import ( "errors" ) // GetOperatingSystem gets the name of the current operating system. func GetOperatingSystem() (string, error) { // TODO: Implement OS detection return "", errors.New("Cannot detect OS version") } // IsContainerized returns true if we are running inside a container. // No-op on FreeBSD, always returns false. func IsContainerized() (bool, error) { // TODO: Implement jail detection return false, errors.New("Cannot detect if we are in container") } docker-1.10.3/pkg/parsers/operatingsystem/operatingsystem_linux.go000066400000000000000000000040311267010174400255430ustar00rootroot00000000000000// Package operatingsystem provides helper function to get the operating system // name for different platforms. package operatingsystem import ( "bufio" "bytes" "fmt" "io/ioutil" "os" "strings" "github.com/mattn/go-shellwords" ) var ( // file to use to detect if the daemon is running in a container proc1Cgroup = "/proc/1/cgroup" // file to check to determine Operating System etcOsRelease = "/etc/os-release" // used by stateless systems like Clear Linux altOsRelease = "/usr/lib/os-release" ) // GetOperatingSystem gets the name of the current operating system. func GetOperatingSystem() (string, error) { osReleaseFile, err := os.Open(etcOsRelease) if err != nil { if !os.IsNotExist(err) { return "", fmt.Errorf("Error opening %s: %v", etcOsRelease, err) } osReleaseFile, err = os.Open(altOsRelease) if err != nil { return "", fmt.Errorf("Error opening %s: %v", altOsRelease, err) } } defer osReleaseFile.Close() var prettyName string scanner := bufio.NewScanner(osReleaseFile) for scanner.Scan() { line := scanner.Text() if strings.HasPrefix(line, "PRETTY_NAME=") { data := strings.SplitN(line, "=", 2) prettyNames, err := shellwords.Parse(data[1]) if err != nil { return "", fmt.Errorf("PRETTY_NAME is invalid: %s", err.Error()) } if len(prettyNames) != 1 { return "", fmt.Errorf("PRETTY_NAME needs to be enclosed by quotes if they have spaces: %s", data[1]) } prettyName = prettyNames[0] } } if prettyName != "" { return prettyName, nil } // If not set, defaults to PRETTY_NAME="Linux" // c.f. http://www.freedesktop.org/software/systemd/man/os-release.html return "Linux", nil } // IsContainerized returns true if we are running inside a container. func IsContainerized() (bool, error) { b, err := ioutil.ReadFile(proc1Cgroup) if err != nil { return false, err } for _, line := range bytes.Split(b, []byte{'\n'}) { if len(line) > 0 && !bytes.HasSuffix(line, []byte{'/'}) && !bytes.HasSuffix(line, []byte("init.scope")) { return true, nil } } return false, nil } docker-1.10.3/pkg/parsers/operatingsystem/operatingsystem_unix_test.go000066400000000000000000000136731267010174400264420ustar00rootroot00000000000000// +build linux freebsd package operatingsystem import ( "io/ioutil" "os" "path/filepath" "testing" ) func TestGetOperatingSystem(t *testing.T) { var backup = etcOsRelease invalids := []struct { content string errorExpected string }{ { `PRETTY_NAME=Source Mage GNU/Linux PRETTY_NAME=Ubuntu 14.04.LTS`, "PRETTY_NAME needs to be enclosed by quotes if they have spaces: Source Mage GNU/Linux", }, { `PRETTY_NAME="Ubuntu Linux PRETTY_NAME=Ubuntu 14.04.LTS`, "PRETTY_NAME is invalid: invalid command line string", }, { `PRETTY_NAME=Ubuntu' PRETTY_NAME=Ubuntu 14.04.LTS`, "PRETTY_NAME is invalid: invalid command line string", }, { `PRETTY_NAME' PRETTY_NAME=Ubuntu 14.04.LTS`, "PRETTY_NAME needs to be enclosed by quotes if they have spaces: Ubuntu 14.04.LTS", }, } valids := []struct { content string expected string }{ { `NAME="Ubuntu" PRETTY_NAME_AGAIN="Ubuntu 14.04.LTS" VERSION="14.04, Trusty Tahr" ID=ubuntu ID_LIKE=debian VERSION_ID="14.04" HOME_URL="http://www.ubuntu.com/" SUPPORT_URL="http://help.ubuntu.com/" BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`, "Linux", }, { `NAME="Ubuntu" VERSION="14.04, Trusty Tahr" ID=ubuntu ID_LIKE=debian VERSION_ID="14.04" HOME_URL="http://www.ubuntu.com/" SUPPORT_URL="http://help.ubuntu.com/" BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`, "Linux", }, { `NAME=Gentoo ID=gentoo PRETTY_NAME="Gentoo/Linux" ANSI_COLOR="1;32" HOME_URL="http://www.gentoo.org/" SUPPORT_URL="http://www.gentoo.org/main/en/support.xml" BUG_REPORT_URL="https://bugs.gentoo.org/" `, "Gentoo/Linux", }, { `NAME="Ubuntu" VERSION="14.04, Trusty Tahr" ID=ubuntu ID_LIKE=debian PRETTY_NAME="Ubuntu 14.04 LTS" VERSION_ID="14.04" HOME_URL="http://www.ubuntu.com/" SUPPORT_URL="http://help.ubuntu.com/" BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`, "Ubuntu 14.04 LTS", }, { `NAME="Ubuntu" VERSION="14.04, Trusty Tahr" ID=ubuntu ID_LIKE=debian PRETTY_NAME='Ubuntu 14.04 LTS'`, "Ubuntu 14.04 LTS", }, { `PRETTY_NAME=Source NAME="Source Mage"`, "Source", }, { `PRETTY_NAME=Source PRETTY_NAME="Source Mage"`, "Source Mage", }, } dir := os.TempDir() etcOsRelease = filepath.Join(dir, "etcOsRelease") defer func() { os.Remove(etcOsRelease) etcOsRelease = backup }() for _, elt := range invalids { if err := ioutil.WriteFile(etcOsRelease, []byte(elt.content), 0600); err != nil { t.Fatalf("failed to write to %s: %v", etcOsRelease, err) } s, err := GetOperatingSystem() if err == nil || err.Error() != elt.errorExpected { t.Fatalf("Expected an error %q, got %q (err: %v)", elt.errorExpected, s, err) } } for _, elt := range valids { if err := ioutil.WriteFile(etcOsRelease, []byte(elt.content), 0600); err != nil { t.Fatalf("failed to write to %s: %v", etcOsRelease, err) } s, err := GetOperatingSystem() if err != nil || s != elt.expected { t.Fatalf("Expected %q, got %q (err: %v)", elt.expected, s, err) } } } func TestIsContainerized(t *testing.T) { var ( backup = proc1Cgroup nonContainerizedProc1Cgroupsystemd226 = []byte(`9:memory:/init.scope 8:net_cls,net_prio:/ 7:cpuset:/ 6:freezer:/ 5:devices:/init.scope 4:blkio:/init.scope 3:cpu,cpuacct:/init.scope 2:perf_event:/ 1:name=systemd:/init.scope `) nonContainerizedProc1Cgroup = []byte(`14:name=systemd:/ 13:hugetlb:/ 12:net_prio:/ 11:perf_event:/ 10:bfqio:/ 9:blkio:/ 8:net_cls:/ 7:freezer:/ 6:devices:/ 5:memory:/ 4:cpuacct:/ 3:cpu:/ 2:cpuset:/ `) containerizedProc1Cgroup = []byte(`9:perf_event:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d 8:blkio:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d 7:net_cls:/ 6:freezer:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d 5:devices:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d 4:memory:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d 3:cpuacct:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d 2:cpu:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d 1:cpuset:/`) ) dir := os.TempDir() proc1Cgroup = filepath.Join(dir, "proc1Cgroup") defer func() { os.Remove(proc1Cgroup) proc1Cgroup = backup }() if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroup, 0600); err != nil { t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) } inContainer, err := IsContainerized() if err != nil { t.Fatal(err) } if inContainer { t.Fatal("Wrongly assuming containerized") } if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroupsystemd226, 0600); err != nil { t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) } inContainer, err = IsContainerized() if err != nil { t.Fatal(err) } if inContainer { t.Fatal("Wrongly assuming containerized for systemd /init.scope cgroup layout") } if err := ioutil.WriteFile(proc1Cgroup, containerizedProc1Cgroup, 0600); err != nil { t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) } inContainer, err = IsContainerized() if err != nil { t.Fatal(err) } if !inContainer { t.Fatal("Wrongly assuming non-containerized") } } func TestOsReleaseFallback(t *testing.T) { var backup = etcOsRelease var altBackup = altOsRelease dir := os.TempDir() etcOsRelease = filepath.Join(dir, "etcOsRelease") altOsRelease = filepath.Join(dir, "altOsRelease") defer func() { os.Remove(dir) etcOsRelease = backup altOsRelease = altBackup }() content := `NAME=Gentoo ID=gentoo PRETTY_NAME="Gentoo/Linux" ANSI_COLOR="1;32" HOME_URL="http://www.gentoo.org/" SUPPORT_URL="http://www.gentoo.org/main/en/support.xml" BUG_REPORT_URL="https://bugs.gentoo.org/" ` if err := ioutil.WriteFile(altOsRelease, []byte(content), 0600); err != nil { t.Fatalf("failed to write to %s: %v", etcOsRelease, err) } s, err := GetOperatingSystem() if err != nil || s != "Gentoo/Linux" { t.Fatalf("Expected %q, got %q (err: %v)", "Gentoo/Linux", s, err) } } docker-1.10.3/pkg/parsers/operatingsystem/operatingsystem_windows.go000066400000000000000000000022041267010174400260760ustar00rootroot00000000000000package operatingsystem import ( "syscall" "unsafe" ) // See https://code.google.com/p/go/source/browse/src/pkg/mime/type_windows.go?r=d14520ac25bf6940785aabb71f5be453a286f58c // for a similar sample // GetOperatingSystem gets the name of the current operating system. func GetOperatingSystem() (string, error) { var h syscall.Handle // Default return value ret := "Unknown Operating System" if err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), 0, syscall.KEY_READ, &h); err != nil { return ret, err } defer syscall.RegCloseKey(h) var buf [1 << 10]uint16 var typ uint32 n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 if err := syscall.RegQueryValueEx(h, syscall.StringToUTF16Ptr("ProductName"), nil, &typ, (*byte)(unsafe.Pointer(&buf[0])), &n); err != nil { return ret, err } ret = syscall.UTF16ToString(buf[:]) return ret, nil } // IsContainerized returns true if we are running inside a container. // No-op on Windows, always returns false. func IsContainerized() (bool, error) { return false, nil } docker-1.10.3/pkg/parsers/parsers.go000066400000000000000000000035661267010174400173250ustar00rootroot00000000000000// Package parsers provides helper functions to parse and validate different type // of string. It can be hosts, unix addresses, tcp addresses, filters, kernel // operating system versions. package parsers import ( "fmt" "strconv" "strings" ) // ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) func ParseKeyValueOpt(opt string) (string, string, error) { parts := strings.SplitN(opt, "=", 2) if len(parts) != 2 { return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) } return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil } // ParseUintList parses and validates the specified string as the value // found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be // one of the formats below. Note that duplicates are actually allowed in the // input string. It returns a `map[int]bool` with available elements from `val` // set to `true`. // Supported formats: // 7 // 1-6 // 0,3-4,7,8-10 // 0-0,0,1-7 // 03,1-3 <- this is gonna get parsed as [1,2,3] // 3,2,1 // 0-2,3,1 func ParseUintList(val string) (map[int]bool, error) { if val == "" { return map[int]bool{}, nil } availableInts := make(map[int]bool) split := strings.Split(val, ",") errInvalidFormat := fmt.Errorf("invalid format: %s", val) for _, r := range split { if !strings.Contains(r, "-") { v, err := strconv.Atoi(r) if err != nil { return nil, errInvalidFormat } availableInts[v] = true } else { split := strings.SplitN(r, "-", 2) min, err := strconv.Atoi(split[0]) if err != nil { return nil, errInvalidFormat } max, err := strconv.Atoi(split[1]) if err != nil { return nil, errInvalidFormat } if max < min { return nil, errInvalidFormat } for i := min; i <= max; i++ { availableInts[i] = true } } } return availableInts, nil } docker-1.10.3/pkg/parsers/parsers_test.go000066400000000000000000000037641267010174400203640ustar00rootroot00000000000000package parsers import ( "reflect" "testing" ) func TestParseKeyValueOpt(t *testing.T) { invalids := map[string]string{ "": "Unable to parse key/value option: ", "key": "Unable to parse key/value option: key", } for invalid, expectedError := range invalids { if _, _, err := ParseKeyValueOpt(invalid); err == nil || err.Error() != expectedError { t.Fatalf("Expected error %v for %v, got %v", expectedError, invalid, err) } } valids := map[string][]string{ "key=value": {"key", "value"}, " key = value ": {"key", "value"}, "key=value1=value2": {"key", "value1=value2"}, " key = value1 = value2 ": {"key", "value1 = value2"}, } for valid, expectedKeyValue := range valids { key, value, err := ParseKeyValueOpt(valid) if err != nil { t.Fatal(err) } if key != expectedKeyValue[0] || value != expectedKeyValue[1] { t.Fatalf("Expected {%v: %v} got {%v: %v}", expectedKeyValue[0], expectedKeyValue[1], key, value) } } } func TestParseUintList(t *testing.T) { valids := map[string]map[int]bool{ "": {}, "7": {7: true}, "1-6": {1: true, 2: true, 3: true, 4: true, 5: true, 6: true}, "0-7": {0: true, 1: true, 2: true, 3: true, 4: true, 5: true, 6: true, 7: true}, "0,3-4,7,8-10": {0: true, 3: true, 4: true, 7: true, 8: true, 9: true, 10: true}, "0-0,0,1-4": {0: true, 1: true, 2: true, 3: true, 4: true}, "03,1-3": {1: true, 2: true, 3: true}, "3,2,1": {1: true, 2: true, 3: true}, "0-2,3,1": {0: true, 1: true, 2: true, 3: true}, } for k, v := range valids { out, err := ParseUintList(k) if err != nil { t.Fatalf("Expected not to fail, got %v", err) } if !reflect.DeepEqual(out, v) { t.Fatalf("Expected %v, got %v", v, out) } } invalids := []string{ "this", "1--", "1-10,,10", "10-1", "-1", "-1,0", } for _, v := range invalids { if out, err := ParseUintList(v); err == nil { t.Fatalf("Expected failure with %s but got %v", v, out) } } } docker-1.10.3/pkg/pidfile/000077500000000000000000000000001267010174400152425ustar00rootroot00000000000000docker-1.10.3/pkg/pidfile/pidfile.go000066400000000000000000000023521267010174400172070ustar00rootroot00000000000000// Package pidfile provides structure and helper functions to create and remove // PID file. A PID file is usually a file used to store the process ID of a // running process. package pidfile import ( "fmt" "io/ioutil" "os" "path/filepath" "strconv" "strings" ) // PIDFile is a file used to store the process ID of a running process. type PIDFile struct { path string } func checkPIDFileAlreadyExists(path string) error { if pidByte, err := ioutil.ReadFile(path); err == nil { pidString := strings.TrimSpace(string(pidByte)) if pid, err := strconv.Atoi(pidString); err == nil { if _, err := os.Stat(filepath.Join("/proc", strconv.Itoa(pid))); err == nil { return fmt.Errorf("pid file found, ensure docker is not running or delete %s", path) } } } return nil } // New creates a PIDfile using the specified path. func New(path string) (*PIDFile, error) { if err := checkPIDFileAlreadyExists(path); err != nil { return nil, err } if err := ioutil.WriteFile(path, []byte(fmt.Sprintf("%d", os.Getpid())), 0644); err != nil { return nil, err } return &PIDFile{path: path}, nil } // Remove removes the PIDFile. func (file PIDFile) Remove() error { if err := os.Remove(file.path); err != nil { return err } return nil } docker-1.10.3/pkg/pidfile/pidfile_test.go000066400000000000000000000012121267010174400202400ustar00rootroot00000000000000package pidfile import ( "io/ioutil" "os" "path/filepath" "testing" ) func TestNewAndRemove(t *testing.T) { dir, err := ioutil.TempDir(os.TempDir(), "test-pidfile") if err != nil { t.Fatal("Could not create test directory") } file, err := New(filepath.Join(dir, "testfile")) if err != nil { t.Fatal("Could not create test file", err) } if err := file.Remove(); err != nil { t.Fatal("Could not delete created test file") } } func TestRemoveInvalidPath(t *testing.T) { file := PIDFile{path: filepath.Join("foo", "bar")} if err := file.Remove(); err == nil { t.Fatal("Non-existing file doesn't give an error on delete") } } docker-1.10.3/pkg/platform/000077500000000000000000000000001267010174400154525ustar00rootroot00000000000000docker-1.10.3/pkg/platform/architecture_freebsd.go000066400000000000000000000004571267010174400221630ustar00rootroot00000000000000package platform import ( "os/exec" ) // runtimeArchitecture get the name of the current architecture (x86, x86_64, …) func runtimeArchitecture() (string, error) { cmd := exec.Command("uname", "-m") machine, err := cmd.Output() if err != nil { return "", err } return string(machine), nil } docker-1.10.3/pkg/platform/architecture_linux.go000066400000000000000000000006431267010174400217050ustar00rootroot00000000000000// Package platform provides helper function to get the runtime architecture // for different platforms. package platform import ( "syscall" ) // runtimeArchitecture get the name of the current architecture (x86, x86_64, …) func runtimeArchitecture() (string, error) { utsname := &syscall.Utsname{} if err := syscall.Uname(utsname); err != nil { return "", err } return charsToString(utsname.Machine), nil } docker-1.10.3/pkg/platform/architecture_windows.go000066400000000000000000000027411267010174400222410ustar00rootroot00000000000000package platform import ( "fmt" "syscall" "unsafe" ) var ( modkernel32 = syscall.NewLazyDLL("kernel32.dll") procGetSystemInfo = modkernel32.NewProc("GetSystemInfo") ) // see http://msdn.microsoft.com/en-us/library/windows/desktop/ms724958(v=vs.85).aspx type systeminfo struct { wProcessorArchitecture uint16 wReserved uint16 dwPageSize uint32 lpMinimumApplicationAddress uintptr lpMaximumApplicationAddress uintptr dwActiveProcessorMask uintptr dwNumberOfProcessors uint32 dwProcessorType uint32 dwAllocationGranularity uint32 wProcessorLevel uint16 wProcessorRevision uint16 } // Constants const ( ProcessorArchitecture64 = 9 // PROCESSOR_ARCHITECTURE_AMD64 ProcessorArchitectureIA64 = 6 // PROCESSOR_ARCHITECTURE_IA64 ProcessorArchitecture32 = 0 // PROCESSOR_ARCHITECTURE_INTEL ProcessorArchitectureArm = 5 // PROCESSOR_ARCHITECTURE_ARM ) var sysinfo systeminfo // runtimeArchitecture get the name of the current architecture (x86, x86_64, …) func runtimeArchitecture() (string, error) { syscall.Syscall(procGetSystemInfo.Addr(), 1, uintptr(unsafe.Pointer(&sysinfo)), 0, 0) switch sysinfo.wProcessorArchitecture { case ProcessorArchitecture64, ProcessorArchitectureIA64: return "x86_64", nil case ProcessorArchitecture32: return "i686", nil case ProcessorArchitectureArm: return "arm", nil default: return "", fmt.Errorf("Unknown processor architecture") } } docker-1.10.3/pkg/platform/platform.go000066400000000000000000000006751267010174400176350ustar00rootroot00000000000000package platform import ( "runtime" "github.com/Sirupsen/logrus" ) var ( // Architecture holds the runtime architecture of the process. Architecture string // OSType holds the runtime operating system type (Linux, …) of the process. OSType string ) func init() { var err error Architecture, err = runtimeArchitecture() if err != nil { logrus.Errorf("Could no read system architecture info: %v", err) } OSType = runtime.GOOS } docker-1.10.3/pkg/platform/utsname_int8.go000066400000000000000000000006611267010174400204220ustar00rootroot00000000000000// +build linux,386 linux,amd64 linux,arm64 // see golang's sources src/syscall/ztypes_linux_*.go that use int8 package platform // Convert the OS/ARCH-specific utsname.Machine to string // given as an array of signed int8 func charsToString(ca [65]int8) string { s := make([]byte, len(ca)) var lens int for ; lens < len(ca); lens++ { if ca[lens] == 0 { break } s[lens] = uint8(ca[lens]) } return string(s[0:lens]) } docker-1.10.3/pkg/platform/utsname_uint8.go000066400000000000000000000006671267010174400206150ustar00rootroot00000000000000// +build linux,arm linux,ppc64 linux,ppc64le s390x // see golang's sources src/syscall/ztypes_linux_*.go that use uint8 package platform // Convert the OS/ARCH-specific utsname.Machine to string // given as an array of unsigned uint8 func charsToString(ca [65]uint8) string { s := make([]byte, len(ca)) var lens int for ; lens < len(ca); lens++ { if ca[lens] == 0 { break } s[lens] = ca[lens] } return string(s[0:lens]) } docker-1.10.3/pkg/plugins/000077500000000000000000000000001267010174400153075ustar00rootroot00000000000000docker-1.10.3/pkg/plugins/client.go000066400000000000000000000101711267010174400171140ustar00rootroot00000000000000package plugins import ( "bytes" "encoding/json" "io" "io/ioutil" "net/http" "strings" "time" "github.com/Sirupsen/logrus" "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" ) const ( versionMimetype = "application/vnd.docker.plugins.v1.2+json" defaultTimeOut = 30 ) // NewClient creates a new plugin client (http). func NewClient(addr string, tlsConfig tlsconfig.Options) (*Client, error) { tr := &http.Transport{} c, err := tlsconfig.Client(tlsConfig) if err != nil { return nil, err } tr.TLSClientConfig = c protoAndAddr := strings.Split(addr, "://") sockets.ConfigureTCPTransport(tr, protoAndAddr[0], protoAndAddr[1]) scheme := protoAndAddr[0] if scheme != "https" { scheme = "http" } return &Client{&http.Client{Transport: tr}, scheme, protoAndAddr[1]}, nil } // Client represents a plugin client. type Client struct { http *http.Client // http client to use scheme string // scheme protocol of the plugin addr string // http address of the plugin } // Call calls the specified method with the specified arguments for the plugin. // It will retry for 30 seconds if a failure occurs when calling. func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) error { var buf bytes.Buffer if args != nil { if err := json.NewEncoder(&buf).Encode(args); err != nil { return err } } body, err := c.callWithRetry(serviceMethod, &buf, true) if err != nil { return err } defer body.Close() if ret != nil { if err := json.NewDecoder(body).Decode(&ret); err != nil { logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) return err } } return nil } // Stream calls the specified method with the specified arguments for the plugin and returns the response body func (c *Client) Stream(serviceMethod string, args interface{}) (io.ReadCloser, error) { var buf bytes.Buffer if err := json.NewEncoder(&buf).Encode(args); err != nil { return nil, err } return c.callWithRetry(serviceMethod, &buf, true) } // SendFile calls the specified method, and passes through the IO stream func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{}) error { body, err := c.callWithRetry(serviceMethod, data, true) if err != nil { return err } if err := json.NewDecoder(body).Decode(&ret); err != nil { logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) return err } return nil } func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) (io.ReadCloser, error) { req, err := http.NewRequest("POST", "/"+serviceMethod, data) if err != nil { return nil, err } req.Header.Add("Accept", versionMimetype) req.URL.Scheme = c.scheme req.URL.Host = c.addr var retries int start := time.Now() for { resp, err := c.http.Do(req) if err != nil { if !retry { return nil, err } timeOff := backoff(retries) if abort(start, timeOff) { return nil, err } retries++ logrus.Warnf("Unable to connect to plugin: %s, retrying in %v", c.addr, timeOff) time.Sleep(timeOff) continue } if resp.StatusCode != http.StatusOK { b, err := ioutil.ReadAll(resp.Body) resp.Body.Close() if err != nil { return nil, &statusError{resp.StatusCode, serviceMethod, err.Error()} } // Plugins' Response(s) should have an Err field indicating what went // wrong. Try to unmarshal into ResponseErr. Otherwise fallback to just // return the string(body) type responseErr struct { Err string } remoteErr := responseErr{} if err := json.Unmarshal(b, &remoteErr); err == nil { if remoteErr.Err != "" { return nil, &statusError{resp.StatusCode, serviceMethod, remoteErr.Err} } } // old way... return nil, &statusError{resp.StatusCode, serviceMethod, string(b)} } return resp.Body, nil } } func backoff(retries int) time.Duration { b, max := 1, defaultTimeOut for b < max && retries > 0 { b *= 2 retries-- } if b > max { b = max } return time.Duration(b) * time.Second } func abort(start time.Time, timeOff time.Duration) bool { return timeOff+time.Since(start) >= time.Duration(defaultTimeOut)*time.Second } docker-1.10.3/pkg/plugins/client_test.go000066400000000000000000000052631267010174400201610ustar00rootroot00000000000000package plugins import ( "io" "net/http" "net/http/httptest" "reflect" "testing" "time" "github.com/docker/go-connections/tlsconfig" ) var ( mux *http.ServeMux server *httptest.Server ) func setupRemotePluginServer() string { mux = http.NewServeMux() server = httptest.NewServer(mux) return server.URL } func teardownRemotePluginServer() { if server != nil { server.Close() } } func TestFailedConnection(t *testing.T) { c, _ := NewClient("tcp://127.0.0.1:1", tlsconfig.Options{InsecureSkipVerify: true}) _, err := c.callWithRetry("Service.Method", nil, false) if err == nil { t.Fatal("Unexpected successful connection") } } func TestEchoInputOutput(t *testing.T) { addr := setupRemotePluginServer() defer teardownRemotePluginServer() m := Manifest{[]string{"VolumeDriver", "NetworkDriver"}} mux.HandleFunc("/Test.Echo", func(w http.ResponseWriter, r *http.Request) { if r.Method != "POST" { t.Fatalf("Expected POST, got %s\n", r.Method) } header := w.Header() header.Set("Content-Type", versionMimetype) io.Copy(w, r.Body) }) c, _ := NewClient(addr, tlsconfig.Options{InsecureSkipVerify: true}) var output Manifest err := c.Call("Test.Echo", m, &output) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(output, m) { t.Fatalf("Expected %v, was %v\n", m, output) } err = c.Call("Test.Echo", nil, nil) if err != nil { t.Fatal(err) } } func TestBackoff(t *testing.T) { cases := []struct { retries int expTimeOff time.Duration }{ {0, time.Duration(1)}, {1, time.Duration(2)}, {2, time.Duration(4)}, {4, time.Duration(16)}, {6, time.Duration(30)}, {10, time.Duration(30)}, } for _, c := range cases { s := c.expTimeOff * time.Second if d := backoff(c.retries); d != s { t.Fatalf("Retry %v, expected %v, was %v\n", c.retries, s, d) } } } func TestAbortRetry(t *testing.T) { cases := []struct { timeOff time.Duration expAbort bool }{ {time.Duration(1), false}, {time.Duration(2), false}, {time.Duration(10), false}, {time.Duration(30), true}, {time.Duration(40), true}, } for _, c := range cases { s := c.timeOff * time.Second if a := abort(time.Now(), s); a != c.expAbort { t.Fatalf("Duration %v, expected %v, was %v\n", c.timeOff, s, a) } } } func TestClientScheme(t *testing.T) { cases := map[string]string{ "tcp://127.0.0.1:8080": "http", "unix:///usr/local/plugins/foo": "http", "http://127.0.0.1:8080": "http", "https://127.0.0.1:8080": "https", } for addr, scheme := range cases { c, _ := NewClient(addr, tlsconfig.Options{InsecureSkipVerify: true}) if c.scheme != scheme { t.Fatalf("URL scheme mismatch, expected %s, got %s", scheme, c.scheme) } } } docker-1.10.3/pkg/plugins/discovery.go000066400000000000000000000055611267010174400176540ustar00rootroot00000000000000package plugins import ( "encoding/json" "errors" "fmt" "io/ioutil" "net/url" "os" "path/filepath" "strings" ) var ( // ErrNotFound plugin not found ErrNotFound = errors.New("plugin not found") socketsPath = "/run/docker/plugins" specsPaths = []string{"/etc/docker/plugins", "/usr/lib/docker/plugins"} ) // localRegistry defines a registry that is local (using unix socket). type localRegistry struct{} func newLocalRegistry() localRegistry { return localRegistry{} } // Scan scans all the plugin paths and returns all the names it found func Scan() ([]string, error) { var names []string if err := filepath.Walk(socketsPath, func(path string, fi os.FileInfo, err error) error { if err != nil { return nil } if fi.Mode()&os.ModeSocket != 0 { name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) names = append(names, name) } return nil }); err != nil { return nil, err } for _, path := range specsPaths { if err := filepath.Walk(path, func(p string, fi os.FileInfo, err error) error { if err != nil || fi.IsDir() { return nil } name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) names = append(names, name) return nil }); err != nil { return nil, err } } return names, nil } // Plugin returns the plugin registered with the given name (or returns an error). func (l *localRegistry) Plugin(name string) (*Plugin, error) { socketpaths := pluginPaths(socketsPath, name, ".sock") for _, p := range socketpaths { if fi, err := os.Stat(p); err == nil && fi.Mode()&os.ModeSocket != 0 { return newLocalPlugin(name, "unix://"+p), nil } } var txtspecpaths []string for _, p := range specsPaths { txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".spec")...) txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".json")...) } for _, p := range txtspecpaths { if _, err := os.Stat(p); err == nil { if strings.HasSuffix(p, ".json") { return readPluginJSONInfo(name, p) } return readPluginInfo(name, p) } } return nil, ErrNotFound } func readPluginInfo(name, path string) (*Plugin, error) { content, err := ioutil.ReadFile(path) if err != nil { return nil, err } addr := strings.TrimSpace(string(content)) u, err := url.Parse(addr) if err != nil { return nil, err } if len(u.Scheme) == 0 { return nil, fmt.Errorf("Unknown protocol") } return newLocalPlugin(name, addr), nil } func readPluginJSONInfo(name, path string) (*Plugin, error) { f, err := os.Open(path) if err != nil { return nil, err } defer f.Close() var p Plugin if err := json.NewDecoder(f).Decode(&p); err != nil { return nil, err } p.Name = name if len(p.TLSConfig.CAFile) == 0 { p.TLSConfig.InsecureSkipVerify = true } return &p, nil } func pluginPaths(base, name, ext string) []string { return []string{ filepath.Join(base, name+ext), filepath.Join(base, name, name+ext), } } docker-1.10.3/pkg/plugins/discovery_test.go000066400000000000000000000077601267010174400207160ustar00rootroot00000000000000package plugins import ( "fmt" "io/ioutil" "net" "os" "path/filepath" "reflect" "testing" ) func setup(t *testing.T) (string, func()) { tmpdir, err := ioutil.TempDir("", "docker-test") if err != nil { t.Fatal(err) } backup := socketsPath socketsPath = tmpdir specsPaths = []string{tmpdir} return tmpdir, func() { socketsPath = backup os.RemoveAll(tmpdir) } } func TestLocalSocket(t *testing.T) { tmpdir, unregister := setup(t) defer unregister() cases := []string{ filepath.Join(tmpdir, "echo.sock"), filepath.Join(tmpdir, "echo", "echo.sock"), } for _, c := range cases { if err := os.MkdirAll(filepath.Dir(c), 0755); err != nil { t.Fatal(err) } l, err := net.Listen("unix", c) if err != nil { t.Fatal(err) } r := newLocalRegistry() p, err := r.Plugin("echo") if err != nil { t.Fatal(err) } pp, err := r.Plugin("echo") if err != nil { t.Fatal(err) } if !reflect.DeepEqual(p, pp) { t.Fatalf("Expected %v, was %v\n", p, pp) } if p.Name != "echo" { t.Fatalf("Expected plugin `echo`, got %s\n", p.Name) } addr := fmt.Sprintf("unix://%s", c) if p.Addr != addr { t.Fatalf("Expected plugin addr `%s`, got %s\n", addr, p.Addr) } if p.TLSConfig.InsecureSkipVerify != true { t.Fatalf("Expected TLS verification to be skipped") } l.Close() } } func TestFileSpecPlugin(t *testing.T) { tmpdir, unregister := setup(t) defer unregister() cases := []struct { path string name string addr string fail bool }{ {filepath.Join(tmpdir, "echo.spec"), "echo", "unix://var/lib/docker/plugins/echo.sock", false}, {filepath.Join(tmpdir, "echo", "echo.spec"), "echo", "unix://var/lib/docker/plugins/echo.sock", false}, {filepath.Join(tmpdir, "foo.spec"), "foo", "tcp://localhost:8080", false}, {filepath.Join(tmpdir, "foo", "foo.spec"), "foo", "tcp://localhost:8080", false}, {filepath.Join(tmpdir, "bar.spec"), "bar", "localhost:8080", true}, // unknown transport } for _, c := range cases { if err := os.MkdirAll(filepath.Dir(c.path), 0755); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(c.path, []byte(c.addr), 0644); err != nil { t.Fatal(err) } r := newLocalRegistry() p, err := r.Plugin(c.name) if c.fail && err == nil { continue } if err != nil { t.Fatal(err) } if p.Name != c.name { t.Fatalf("Expected plugin `%s`, got %s\n", c.name, p.Name) } if p.Addr != c.addr { t.Fatalf("Expected plugin addr `%s`, got %s\n", c.addr, p.Addr) } if p.TLSConfig.InsecureSkipVerify != true { t.Fatalf("Expected TLS verification to be skipped") } } } func TestFileJSONSpecPlugin(t *testing.T) { tmpdir, unregister := setup(t) defer unregister() p := filepath.Join(tmpdir, "example.json") spec := `{ "Name": "plugin-example", "Addr": "https://example.com/docker/plugin", "TLSConfig": { "CAFile": "/usr/shared/docker/certs/example-ca.pem", "CertFile": "/usr/shared/docker/certs/example-cert.pem", "KeyFile": "/usr/shared/docker/certs/example-key.pem" } }` if err := ioutil.WriteFile(p, []byte(spec), 0644); err != nil { t.Fatal(err) } r := newLocalRegistry() plugin, err := r.Plugin("example") if err != nil { t.Fatal(err) } if plugin.Name != "example" { t.Fatalf("Expected plugin `plugin-example`, got %s\n", plugin.Name) } if plugin.Addr != "https://example.com/docker/plugin" { t.Fatalf("Expected plugin addr `https://example.com/docker/plugin`, got %s\n", plugin.Addr) } if plugin.TLSConfig.CAFile != "/usr/shared/docker/certs/example-ca.pem" { t.Fatalf("Expected plugin CA `/usr/shared/docker/certs/example-ca.pem`, got %s\n", plugin.TLSConfig.CAFile) } if plugin.TLSConfig.CertFile != "/usr/shared/docker/certs/example-cert.pem" { t.Fatalf("Expected plugin Certificate `/usr/shared/docker/certs/example-cert.pem`, got %s\n", plugin.TLSConfig.CertFile) } if plugin.TLSConfig.KeyFile != "/usr/shared/docker/certs/example-key.pem" { t.Fatalf("Expected plugin Key `/usr/shared/docker/certs/example-key.pem`, got %s\n", plugin.TLSConfig.KeyFile) } } docker-1.10.3/pkg/plugins/errors.go000066400000000000000000000011441267010174400171520ustar00rootroot00000000000000package plugins import ( "fmt" "net/http" ) type statusError struct { status int method string err string } // Error returns a formated string for this error type func (e *statusError) Error() string { return fmt.Sprintf("%s: %v", e.method, e.err) } // IsNotFound indicates if the passed in error is from an http.StatusNotFound from the plugin func IsNotFound(err error) bool { return isStatusError(err, http.StatusNotFound) } func isStatusError(err error, status int) bool { if err == nil { return false } e, ok := err.(*statusError) if !ok { return false } return e.status == status } docker-1.10.3/pkg/plugins/pluginrpc-gen/000077500000000000000000000000001267010174400200615ustar00rootroot00000000000000docker-1.10.3/pkg/plugins/pluginrpc-gen/README.md000066400000000000000000000040561267010174400213450ustar00rootroot00000000000000Plugin RPC Generator ==================== Generates go code from a Go interface definition for proxying between the plugin API and the subsystem being extended. ## Usage Given an interface definition: ```go type volumeDriver interface { Create(name string, opts opts) (err error) Remove(name string) (err error) Path(name string) (mountpoint string, err error) Mount(name string) (mountpoint string, err error) Unmount(name string) (err error) } ``` **Note**: All function options and return values must be named in the definition. Run the generator: ```bash $ pluginrpc-gen --type volumeDriver --name VolumeDriver -i volumes/drivers/extpoint.go -o volumes/drivers/proxy.go ``` Where: - `--type` is the name of the interface to use - `--name` is the subsystem that the plugin "Implements" - `-i` is the input file containing the interface definition - `-o` is the output file where the the generated code should go **Note**: The generated code will use the same package name as the one defined in the input file Optionally, you can skip functions on the interface that should not be implemented in the generated proxy code by passing in the function name to `--skip`. This flag can be specified multiple times. You can also add build tags that should be prepended to the generated code by supplying `--tag`. This flag can be specified multiple times. ## Known issues The parser can currently only handle types which are not specifically a map or a slice. You can, however, create a type that uses a map or a slice internally, for instance: ```go type opts map[string]string ``` This `opts` type will work, whreas using a `map[string]string` directly will not. ## go-generate You can also use this with go-generate, which is pretty awesome. To do so, place the code at the top of the file which contains the interface definition (i.e., the input file): ```go //go:generate pluginrpc-gen -i $GOFILE -o proxy.go -type volumeDriver -name VolumeDriver ``` Then cd to the package dir and run `go generate` **Note**: the `pluginrpc-gen` binary must be within your `$PATH` docker-1.10.3/pkg/plugins/pluginrpc-gen/fixtures/000077500000000000000000000000001267010174400217325ustar00rootroot00000000000000docker-1.10.3/pkg/plugins/pluginrpc-gen/fixtures/foo.go000066400000000000000000000013121267010174400230410ustar00rootroot00000000000000package foo type wobble struct { Some string Val string Inception *wobble } // Fooer is an empty interface used for tests. type Fooer interface{} // Fooer2 is an interface used for tests. type Fooer2 interface { Foo() } // Fooer3 is an interface used for tests. type Fooer3 interface { Foo() Bar(a string) Baz(a string) (err error) Qux(a, b string) (val string, err error) Wobble() (w *wobble) Wiggle() (w wobble) } // Fooer4 is an interface used for tests. type Fooer4 interface { Foo() error } // Bar is an interface used for tests. type Bar interface { Boo(a string, b string) (s string, err error) } // Fooer5 is an interface used for tests. type Fooer5 interface { Foo() Bar } docker-1.10.3/pkg/plugins/pluginrpc-gen/main.go000066400000000000000000000040401267010174400213320ustar00rootroot00000000000000package main import ( "bytes" "flag" "fmt" "go/format" "io/ioutil" "os" "unicode" "unicode/utf8" ) type stringSet struct { values map[string]struct{} } func (s stringSet) String() string { return "" } func (s stringSet) Set(value string) error { s.values[value] = struct{}{} return nil } func (s stringSet) GetValues() map[string]struct{} { return s.values } var ( typeName = flag.String("type", "", "interface type to generate plugin rpc proxy for") rpcName = flag.String("name", *typeName, "RPC name, set if different from type") inputFile = flag.String("i", "", "input file path") outputFile = flag.String("o", *inputFile+"_proxy.go", "output file path") skipFuncs map[string]struct{} flSkipFuncs = stringSet{make(map[string]struct{})} flBuildTags = stringSet{make(map[string]struct{})} ) func errorOut(msg string, err error) { if err == nil { return } fmt.Fprintf(os.Stderr, "%s: %v\n", msg, err) os.Exit(1) } func checkFlags() error { if *outputFile == "" { return fmt.Errorf("missing required flag `-o`") } if *inputFile == "" { return fmt.Errorf("missing required flag `-i`") } return nil } func main() { flag.Var(flSkipFuncs, "skip", "skip parsing for function") flag.Var(flBuildTags, "tag", "build tags to add to generated files") flag.Parse() skipFuncs = flSkipFuncs.GetValues() errorOut("error", checkFlags()) pkg, err := Parse(*inputFile, *typeName) errorOut(fmt.Sprintf("error parsing requested type %s", *typeName), err) var analysis = struct { InterfaceType string RPCName string BuildTags map[string]struct{} *ParsedPkg }{toLower(*typeName), *rpcName, flBuildTags.GetValues(), pkg} var buf bytes.Buffer errorOut("parser error", generatedTempl.Execute(&buf, analysis)) src, err := format.Source(buf.Bytes()) errorOut("error formating generated source", err) errorOut("error writing file", ioutil.WriteFile(*outputFile, src, 0644)) } func toLower(s string) string { if s == "" { return "" } r, n := utf8.DecodeRuneInString(s) return string(unicode.ToLower(r)) + s[n:] } docker-1.10.3/pkg/plugins/pluginrpc-gen/parser.go000066400000000000000000000072601267010174400217110ustar00rootroot00000000000000package main import ( "errors" "fmt" "go/ast" "go/parser" "go/token" "reflect" ) var errBadReturn = errors.New("found return arg with no name: all args must be named") type errUnexpectedType struct { expected string actual interface{} } func (e errUnexpectedType) Error() string { return fmt.Sprintf("got wrong type expecting %s, got: %v", e.expected, reflect.TypeOf(e.actual)) } // ParsedPkg holds information about a package that has been parsed, // its name and the list of functions. type ParsedPkg struct { Name string Functions []function } type function struct { Name string Args []arg Returns []arg Doc string } type arg struct { Name string ArgType string } func (a *arg) String() string { return a.Name + " " + a.ArgType } // Parse parses the given file for an interface definition with the given name. func Parse(filePath string, objName string) (*ParsedPkg, error) { fs := token.NewFileSet() pkg, err := parser.ParseFile(fs, filePath, nil, parser.AllErrors) if err != nil { return nil, err } p := &ParsedPkg{} p.Name = pkg.Name.Name obj, exists := pkg.Scope.Objects[objName] if !exists { return nil, fmt.Errorf("could not find object %s in %s", objName, filePath) } if obj.Kind != ast.Typ { return nil, fmt.Errorf("exected type, got %s", obj.Kind) } spec, ok := obj.Decl.(*ast.TypeSpec) if !ok { return nil, errUnexpectedType{"*ast.TypeSpec", obj.Decl} } iface, ok := spec.Type.(*ast.InterfaceType) if !ok { return nil, errUnexpectedType{"*ast.InterfaceType", spec.Type} } p.Functions, err = parseInterface(iface) if err != nil { return nil, err } return p, nil } func parseInterface(iface *ast.InterfaceType) ([]function, error) { var functions []function for _, field := range iface.Methods.List { switch f := field.Type.(type) { case *ast.FuncType: method, err := parseFunc(field) if err != nil { return nil, err } if method == nil { continue } functions = append(functions, *method) case *ast.Ident: spec, ok := f.Obj.Decl.(*ast.TypeSpec) if !ok { return nil, errUnexpectedType{"*ast.TypeSpec", f.Obj.Decl} } iface, ok := spec.Type.(*ast.InterfaceType) if !ok { return nil, errUnexpectedType{"*ast.TypeSpec", spec.Type} } funcs, err := parseInterface(iface) if err != nil { fmt.Println(err) continue } functions = append(functions, funcs...) default: return nil, errUnexpectedType{"*astFuncType or *ast.Ident", f} } } return functions, nil } func parseFunc(field *ast.Field) (*function, error) { f := field.Type.(*ast.FuncType) method := &function{Name: field.Names[0].Name} if _, exists := skipFuncs[method.Name]; exists { fmt.Println("skipping:", method.Name) return nil, nil } if f.Params != nil { args, err := parseArgs(f.Params.List) if err != nil { return nil, err } method.Args = args } if f.Results != nil { returns, err := parseArgs(f.Results.List) if err != nil { return nil, fmt.Errorf("error parsing function returns for %q: %v", method.Name, err) } method.Returns = returns } return method, nil } func parseArgs(fields []*ast.Field) ([]arg, error) { var args []arg for _, f := range fields { if len(f.Names) == 0 { return nil, errBadReturn } for _, name := range f.Names { var typeName string switch argType := f.Type.(type) { case *ast.Ident: typeName = argType.Name case *ast.StarExpr: i, ok := argType.X.(*ast.Ident) if !ok { return nil, errUnexpectedType{"*ast.Ident", f.Type} } typeName = "*" + i.Name default: return nil, errUnexpectedType{"*ast.Ident or *ast.StarExpr", f.Type} } args = append(args, arg{name.Name, typeName}) } } return args, nil } docker-1.10.3/pkg/plugins/pluginrpc-gen/parser_test.go000066400000000000000000000076411267010174400227530ustar00rootroot00000000000000package main import ( "fmt" "path/filepath" "runtime" "strings" "testing" ) const testFixture = "fixtures/foo.go" func TestParseEmptyInterface(t *testing.T) { pkg, err := Parse(testFixture, "Fooer") if err != nil { t.Fatal(err) } assertName(t, "foo", pkg.Name) assertNum(t, 0, len(pkg.Functions)) } func TestParseNonInterfaceType(t *testing.T) { _, err := Parse(testFixture, "wobble") if _, ok := err.(errUnexpectedType); !ok { t.Fatal("expected type error when parsing non-interface type") } } func TestParseWithOneFunction(t *testing.T) { pkg, err := Parse(testFixture, "Fooer2") if err != nil { t.Fatal(err) } assertName(t, "foo", pkg.Name) assertNum(t, 1, len(pkg.Functions)) assertName(t, "Foo", pkg.Functions[0].Name) assertNum(t, 0, len(pkg.Functions[0].Args)) assertNum(t, 0, len(pkg.Functions[0].Returns)) } func TestParseWithMultipleFuncs(t *testing.T) { pkg, err := Parse(testFixture, "Fooer3") if err != nil { t.Fatal(err) } assertName(t, "foo", pkg.Name) assertNum(t, 6, len(pkg.Functions)) f := pkg.Functions[0] assertName(t, "Foo", f.Name) assertNum(t, 0, len(f.Args)) assertNum(t, 0, len(f.Returns)) f = pkg.Functions[1] assertName(t, "Bar", f.Name) assertNum(t, 1, len(f.Args)) assertNum(t, 0, len(f.Returns)) arg := f.Args[0] assertName(t, "a", arg.Name) assertName(t, "string", arg.ArgType) f = pkg.Functions[2] assertName(t, "Baz", f.Name) assertNum(t, 1, len(f.Args)) assertNum(t, 1, len(f.Returns)) arg = f.Args[0] assertName(t, "a", arg.Name) assertName(t, "string", arg.ArgType) arg = f.Returns[0] assertName(t, "err", arg.Name) assertName(t, "error", arg.ArgType) f = pkg.Functions[3] assertName(t, "Qux", f.Name) assertNum(t, 2, len(f.Args)) assertNum(t, 2, len(f.Returns)) arg = f.Args[0] assertName(t, "a", f.Args[0].Name) assertName(t, "string", f.Args[0].ArgType) arg = f.Args[1] assertName(t, "b", arg.Name) assertName(t, "string", arg.ArgType) arg = f.Returns[0] assertName(t, "val", arg.Name) assertName(t, "string", arg.ArgType) arg = f.Returns[1] assertName(t, "err", arg.Name) assertName(t, "error", arg.ArgType) f = pkg.Functions[4] assertName(t, "Wobble", f.Name) assertNum(t, 0, len(f.Args)) assertNum(t, 1, len(f.Returns)) arg = f.Returns[0] assertName(t, "w", arg.Name) assertName(t, "*wobble", arg.ArgType) f = pkg.Functions[5] assertName(t, "Wiggle", f.Name) assertNum(t, 0, len(f.Args)) assertNum(t, 1, len(f.Returns)) arg = f.Returns[0] assertName(t, "w", arg.Name) assertName(t, "wobble", arg.ArgType) } func TestParseWithUnamedReturn(t *testing.T) { _, err := Parse(testFixture, "Fooer4") if !strings.HasSuffix(err.Error(), errBadReturn.Error()) { t.Fatalf("expected ErrBadReturn, got %v", err) } } func TestEmbeddedInterface(t *testing.T) { pkg, err := Parse(testFixture, "Fooer5") if err != nil { t.Fatal(err) } assertName(t, "foo", pkg.Name) assertNum(t, 2, len(pkg.Functions)) f := pkg.Functions[0] assertName(t, "Foo", f.Name) assertNum(t, 0, len(f.Args)) assertNum(t, 0, len(f.Returns)) f = pkg.Functions[1] assertName(t, "Boo", f.Name) assertNum(t, 2, len(f.Args)) assertNum(t, 2, len(f.Returns)) arg := f.Args[0] assertName(t, "a", arg.Name) assertName(t, "string", arg.ArgType) arg = f.Args[1] assertName(t, "b", arg.Name) assertName(t, "string", arg.ArgType) arg = f.Returns[0] assertName(t, "s", arg.Name) assertName(t, "string", arg.ArgType) arg = f.Returns[1] assertName(t, "err", arg.Name) assertName(t, "error", arg.ArgType) } func assertName(t *testing.T, expected, actual string) { if expected != actual { fatalOut(t, fmt.Sprintf("expected name to be `%s`, got: %s", expected, actual)) } } func assertNum(t *testing.T, expected, actual int) { if expected != actual { fatalOut(t, fmt.Sprintf("expected number to be %d, got: %d", expected, actual)) } } func fatalOut(t *testing.T, msg string) { _, file, ln, _ := runtime.Caller(2) t.Fatalf("%s:%d: %s", filepath.Base(file), ln, msg) } docker-1.10.3/pkg/plugins/pluginrpc-gen/template.go000066400000000000000000000041451267010174400222270ustar00rootroot00000000000000package main import ( "strings" "text/template" ) func printArgs(args []arg) string { var argStr []string for _, arg := range args { argStr = append(argStr, arg.String()) } return strings.Join(argStr, ", ") } func marshalType(t string) string { switch t { case "error": // convert error types to plain strings to ensure the values are encoded/decoded properly return "string" default: return t } } func isErr(t string) bool { switch t { case "error": return true default: return false } } // Need to use this helper due to issues with go-vet func buildTag(s string) string { return "+build " + s } var templFuncs = template.FuncMap{ "printArgs": printArgs, "marshalType": marshalType, "isErr": isErr, "lower": strings.ToLower, "title": strings.Title, "tag": buildTag, } var generatedTempl = template.Must(template.New("rpc_cient").Funcs(templFuncs).Parse(` // generated code - DO NOT EDIT {{ range $k, $v := .BuildTags }} // {{ tag $k }} {{ end }} package {{ .Name }} import "errors" type client interface{ Call(string, interface{}, interface{}) error } type {{ .InterfaceType }}Proxy struct { client } {{ range .Functions }} type {{ $.InterfaceType }}Proxy{{ .Name }}Request struct{ {{ range .Args }} {{ title .Name }} {{ .ArgType }} {{ end }} } type {{ $.InterfaceType }}Proxy{{ .Name }}Response struct{ {{ range .Returns }} {{ title .Name }} {{ marshalType .ArgType }} {{ end }} } func (pp *{{ $.InterfaceType }}Proxy) {{ .Name }}({{ printArgs .Args }}) ({{ printArgs .Returns }}) { var( req {{ $.InterfaceType }}Proxy{{ .Name }}Request ret {{ $.InterfaceType }}Proxy{{ .Name }}Response ) {{ range .Args }} req.{{ title .Name }} = {{ lower .Name }} {{ end }} if err = pp.Call("{{ $.RPCName }}.{{ .Name }}", req, &ret); err != nil { return } {{ range $r := .Returns }} {{ if isErr .ArgType }} if ret.{{ title .Name }} != "" { {{ lower .Name }} = errors.New(ret.{{ title .Name }}) } {{ end }} {{ if isErr .ArgType | not }} {{ lower .Name }} = ret.{{ title .Name }} {{ end }} {{ end }} return } {{ end }} `)) docker-1.10.3/pkg/plugins/plugins.go000066400000000000000000000116051267010174400173220ustar00rootroot00000000000000// Package plugins provides structures and helper functions to manage Docker // plugins. // // Docker discovers plugins by looking for them in the plugin directory whenever // a user or container tries to use one by name. UNIX domain socket files must // be located under /run/docker/plugins, whereas spec files can be located // either under /etc/docker/plugins or /usr/lib/docker/plugins. This is handled // by the Registry interface, which lets you list all plugins or get a plugin by // its name if it exists. // // The plugins need to implement an HTTP server and bind this to the UNIX socket // or the address specified in the spec files. // A handshake is send at /Plugin.Activate, and plugins are expected to return // a Manifest with a list of of Docker subsystems which this plugin implements. // // In order to use a plugins, you can use the ``Get`` with the name of the // plugin and the subsystem it implements. // // plugin, err := plugins.Get("example", "VolumeDriver") // if err != nil { // return fmt.Errorf("Error looking up volume plugin example: %v", err) // } package plugins import ( "errors" "sync" "time" "github.com/Sirupsen/logrus" "github.com/docker/go-connections/tlsconfig" ) var ( // ErrNotImplements is returned if the plugin does not implement the requested driver. ErrNotImplements = errors.New("Plugin does not implement the requested driver") ) type plugins struct { sync.Mutex plugins map[string]*Plugin } var ( storage = plugins{plugins: make(map[string]*Plugin)} extpointHandlers = make(map[string]func(string, *Client)) ) // Manifest lists what a plugin implements. type Manifest struct { // List of subsystem the plugin implements. Implements []string } // Plugin is the definition of a docker plugin. type Plugin struct { // Name of the plugin Name string `json:"-"` // Address of the plugin Addr string // TLS configuration of the plugin TLSConfig tlsconfig.Options // Client attached to the plugin Client *Client `json:"-"` // Manifest of the plugin (see above) Manifest *Manifest `json:"-"` activatErr error activateOnce sync.Once } func newLocalPlugin(name, addr string) *Plugin { return &Plugin{ Name: name, Addr: addr, TLSConfig: tlsconfig.Options{InsecureSkipVerify: true}, } } func (p *Plugin) activate() error { p.activateOnce.Do(func() { p.activatErr = p.activateWithLock() }) return p.activatErr } func (p *Plugin) activateWithLock() error { c, err := NewClient(p.Addr, p.TLSConfig) if err != nil { return err } p.Client = c m := new(Manifest) if err = p.Client.Call("Plugin.Activate", nil, m); err != nil { return err } p.Manifest = m for _, iface := range m.Implements { handler, handled := extpointHandlers[iface] if !handled { continue } handler(p.Name, p.Client) } return nil } func (p *Plugin) implements(kind string) bool { for _, driver := range p.Manifest.Implements { if driver == kind { return true } } return false } func load(name string) (*Plugin, error) { return loadWithRetry(name, true) } func loadWithRetry(name string, retry bool) (*Plugin, error) { registry := newLocalRegistry() start := time.Now() var retries int for { pl, err := registry.Plugin(name) if err != nil { if !retry { return nil, err } timeOff := backoff(retries) if abort(start, timeOff) { return nil, err } retries++ logrus.Warnf("Unable to locate plugin: %s, retrying in %v", name, timeOff) time.Sleep(timeOff) continue } storage.Lock() storage.plugins[name] = pl storage.Unlock() err = pl.activate() if err != nil { storage.Lock() delete(storage.plugins, name) storage.Unlock() } return pl, err } } func get(name string) (*Plugin, error) { storage.Lock() pl, ok := storage.plugins[name] storage.Unlock() if ok { return pl, pl.activate() } return load(name) } // Get returns the plugin given the specified name and requested implementation. func Get(name, imp string) (*Plugin, error) { pl, err := get(name) if err != nil { return nil, err } if pl.implements(imp) { logrus.Debugf("%s implements: %s", name, imp) return pl, nil } return nil, ErrNotImplements } // Handle adds the specified function to the extpointHandlers. func Handle(iface string, fn func(string, *Client)) { extpointHandlers[iface] = fn } // GetAll returns all the plugins for the specified implementation func GetAll(imp string) ([]*Plugin, error) { pluginNames, err := Scan() if err != nil { return nil, err } type plLoad struct { pl *Plugin err error } chPl := make(chan plLoad, len(pluginNames)) for _, name := range pluginNames { go func(name string) { pl, err := loadWithRetry(name, false) chPl <- plLoad{pl, err} }(name) } var out []*Plugin for i := 0; i < len(pluginNames); i++ { pl := <-chPl if pl.err != nil { logrus.Error(err) continue } if pl.pl.implements(imp) { out = append(out, pl.pl) } } return out, nil } docker-1.10.3/pkg/pools/000077500000000000000000000000001267010174400147625ustar00rootroot00000000000000docker-1.10.3/pkg/pools/pools.go000066400000000000000000000067311267010174400164540ustar00rootroot00000000000000// Package pools provides a collection of pools which provide various // data types with buffers. These can be used to lower the number of // memory allocations and reuse buffers. // // New pools should be added to this package to allow them to be // shared across packages. // // Utility functions which operate on pools should be added to this // package to allow them to be reused. package pools import ( "bufio" "io" "sync" "github.com/docker/docker/pkg/ioutils" ) var ( // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. BufioReader32KPool *BufioReaderPool // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. BufioWriter32KPool *BufioWriterPool ) const buffer32K = 32 * 1024 // BufioReaderPool is a bufio reader that uses sync.Pool. type BufioReaderPool struct { pool sync.Pool } func init() { BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) } // newBufioReaderPoolWithSize is unexported because new pools should be // added here to be shared where required. func newBufioReaderPoolWithSize(size int) *BufioReaderPool { pool := sync.Pool{ New: func() interface{} { return bufio.NewReaderSize(nil, size) }, } return &BufioReaderPool{pool: pool} } // Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { buf := bufPool.pool.Get().(*bufio.Reader) buf.Reset(r) return buf } // Put puts the bufio.Reader back into the pool. func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { b.Reset(nil) bufPool.pool.Put(b) } // Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. func Copy(dst io.Writer, src io.Reader) (written int64, err error) { buf := BufioReader32KPool.Get(src) written, err = io.Copy(dst, buf) BufioReader32KPool.Put(buf) return } // NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back // into the pool and closes the reader if it's an io.ReadCloser. func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { return ioutils.NewReadCloserWrapper(r, func() error { if readCloser, ok := r.(io.ReadCloser); ok { readCloser.Close() } bufPool.Put(buf) return nil }) } // BufioWriterPool is a bufio writer that uses sync.Pool. type BufioWriterPool struct { pool sync.Pool } // newBufioWriterPoolWithSize is unexported because new pools should be // added here to be shared where required. func newBufioWriterPoolWithSize(size int) *BufioWriterPool { pool := sync.Pool{ New: func() interface{} { return bufio.NewWriterSize(nil, size) }, } return &BufioWriterPool{pool: pool} } // Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { buf := bufPool.pool.Get().(*bufio.Writer) buf.Reset(w) return buf } // Put puts the bufio.Writer back into the pool. func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { b.Reset(nil) bufPool.pool.Put(b) } // NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back // into the pool and closes the writer if it's an io.Writecloser. func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { return ioutils.NewWriteCloserWrapper(w, func() error { buf.Flush() if writeCloser, ok := w.(io.WriteCloser); ok { writeCloser.Close() } bufPool.Put(buf) return nil }) } docker-1.10.3/pkg/pools/pools_test.go000066400000000000000000000076471267010174400175220ustar00rootroot00000000000000package pools import ( "bufio" "bytes" "io" "strings" "testing" ) func TestBufioReaderPoolGetWithNoReaderShouldCreateOne(t *testing.T) { reader := BufioReader32KPool.Get(nil) if reader == nil { t.Fatalf("BufioReaderPool should have create a bufio.Reader but did not.") } } func TestBufioReaderPoolPutAndGet(t *testing.T) { sr := bufio.NewReader(strings.NewReader("foobar")) reader := BufioReader32KPool.Get(sr) if reader == nil { t.Fatalf("BufioReaderPool should not return a nil reader.") } // verify the first 3 byte buf1 := make([]byte, 3) _, err := reader.Read(buf1) if err != nil { t.Fatal(err) } if actual := string(buf1); actual != "foo" { t.Fatalf("The first letter should have been 'foo' but was %v", actual) } BufioReader32KPool.Put(reader) // Try to read the next 3 bytes _, err = sr.Read(make([]byte, 3)) if err == nil || err != io.EOF { t.Fatalf("The buffer should have been empty, issue an EOF error.") } } type simpleReaderCloser struct { io.Reader closed bool } func (r *simpleReaderCloser) Close() error { r.closed = true return nil } func TestNewReadCloserWrapperWithAReadCloser(t *testing.T) { br := bufio.NewReader(strings.NewReader("")) sr := &simpleReaderCloser{ Reader: strings.NewReader("foobar"), closed: false, } reader := BufioReader32KPool.NewReadCloserWrapper(br, sr) if reader == nil { t.Fatalf("NewReadCloserWrapper should not return a nil reader.") } // Verify the content of reader buf := make([]byte, 3) _, err := reader.Read(buf) if err != nil { t.Fatal(err) } if actual := string(buf); actual != "foo" { t.Fatalf("The first 3 letter should have been 'foo' but were %v", actual) } reader.Close() // Read 3 more bytes "bar" _, err = reader.Read(buf) if err != nil { t.Fatal(err) } if actual := string(buf); actual != "bar" { t.Fatalf("The first 3 letter should have been 'bar' but were %v", actual) } if !sr.closed { t.Fatalf("The ReaderCloser should have been closed, it is not.") } } func TestBufioWriterPoolGetWithNoReaderShouldCreateOne(t *testing.T) { writer := BufioWriter32KPool.Get(nil) if writer == nil { t.Fatalf("BufioWriterPool should have create a bufio.Writer but did not.") } } func TestBufioWriterPoolPutAndGet(t *testing.T) { buf := new(bytes.Buffer) bw := bufio.NewWriter(buf) writer := BufioWriter32KPool.Get(bw) if writer == nil { t.Fatalf("BufioReaderPool should not return a nil writer.") } written, err := writer.Write([]byte("foobar")) if err != nil { t.Fatal(err) } if written != 6 { t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) } // Make sure we Flush all the way ? writer.Flush() bw.Flush() if len(buf.Bytes()) != 6 { t.Fatalf("The buffer should contain 6 bytes ('foobar') but contains %v ('%v')", buf.Bytes(), string(buf.Bytes())) } // Reset the buffer buf.Reset() BufioWriter32KPool.Put(writer) // Try to write something written, err = writer.Write([]byte("barfoo")) if err != nil { t.Fatal(err) } // If we now try to flush it, it should panic (the writer is nil) // recover it defer func() { if r := recover(); r == nil { t.Fatal("Trying to flush the writter should have 'paniced', did not.") } }() writer.Flush() } type simpleWriterCloser struct { io.Writer closed bool } func (r *simpleWriterCloser) Close() error { r.closed = true return nil } func TestNewWriteCloserWrapperWithAWriteCloser(t *testing.T) { buf := new(bytes.Buffer) bw := bufio.NewWriter(buf) sw := &simpleWriterCloser{ Writer: new(bytes.Buffer), closed: false, } bw.Flush() writer := BufioWriter32KPool.NewWriteCloserWrapper(bw, sw) if writer == nil { t.Fatalf("BufioReaderPool should not return a nil writer.") } written, err := writer.Write([]byte("foobar")) if err != nil { t.Fatal(err) } if written != 6 { t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) } writer.Close() if !sw.closed { t.Fatalf("The ReaderCloser should have been closed, it is not.") } } docker-1.10.3/pkg/progress/000077500000000000000000000000001267010174400154725ustar00rootroot00000000000000docker-1.10.3/pkg/progress/progress.go000066400000000000000000000037321267010174400176720ustar00rootroot00000000000000package progress import ( "fmt" ) // Progress represents the progress of a transfer. type Progress struct { ID string // Progress contains a Message or... Message string // ...progress of an action Action string Current int64 Total int64 // Aux contains extra information not presented to the user, such as // digests for push signing. Aux interface{} LastUpdate bool } // Output is an interface for writing progress information. It's // like a writer for progress, but we don't call it Writer because // that would be confusing next to ProgressReader (also, because it // doesn't implement the io.Writer interface). type Output interface { WriteProgress(Progress) error } type chanOutput chan<- Progress func (out chanOutput) WriteProgress(p Progress) error { out <- p return nil } // ChanOutput returns a Output that writes progress updates to the // supplied channel. func ChanOutput(progressChan chan<- Progress) Output { return chanOutput(progressChan) } // Update is a convenience function to write a progress update to the channel. func Update(out Output, id, action string) { out.WriteProgress(Progress{ID: id, Action: action}) } // Updatef is a convenience function to write a printf-formatted progress update // to the channel. func Updatef(out Output, id, format string, a ...interface{}) { Update(out, id, fmt.Sprintf(format, a...)) } // Message is a convenience function to write a progress message to the channel. func Message(out Output, id, message string) { out.WriteProgress(Progress{ID: id, Message: message}) } // Messagef is a convenience function to write a printf-formatted progress // message to the channel. func Messagef(out Output, id, format string, a ...interface{}) { Message(out, id, fmt.Sprintf(format, a...)) } // Aux sends auxiliary information over a progress interface, which will not be // formatted for the UI. This is used for things such as push signing. func Aux(out Output, a interface{}) { out.WriteProgress(Progress{Aux: a}) } docker-1.10.3/pkg/progress/progressreader.go000066400000000000000000000026401267010174400210520ustar00rootroot00000000000000package progress import ( "io" ) // Reader is a Reader with progress bar. type Reader struct { in io.ReadCloser // Stream to read from out Output // Where to send progress bar to size int64 current int64 lastUpdate int64 id string action string } // NewProgressReader creates a new ProgressReader. func NewProgressReader(in io.ReadCloser, out Output, size int64, id, action string) *Reader { return &Reader{ in: in, out: out, size: size, id: id, action: action, } } func (p *Reader) Read(buf []byte) (n int, err error) { read, err := p.in.Read(buf) p.current += int64(read) updateEvery := int64(1024 * 512) //512kB if p.size > 0 { // Update progress for every 1% read if 1% < 512kB if increment := int64(0.01 * float64(p.size)); increment < updateEvery { updateEvery = increment } } if p.current-p.lastUpdate > updateEvery || err != nil { p.updateProgress(err != nil && read == 0) p.lastUpdate = p.current } return read, err } // Close closes the progress reader and its underlying reader. func (p *Reader) Close() error { if p.current < p.size { // print a full progress bar when closing prematurely p.current = p.size p.updateProgress(false) } return p.in.Close() } func (p *Reader) updateProgress(last bool) { p.out.WriteProgress(Progress{ID: p.id, Action: p.action, Current: p.current, Total: p.size, LastUpdate: last}) } docker-1.10.3/pkg/progress/progressreader_test.go000066400000000000000000000024601267010174400221110ustar00rootroot00000000000000package progress import ( "bytes" "io" "io/ioutil" "testing" ) func TestOutputOnPrematureClose(t *testing.T) { content := []byte("TESTING") reader := ioutil.NopCloser(bytes.NewReader(content)) progressChan := make(chan Progress, 10) pr := NewProgressReader(reader, ChanOutput(progressChan), int64(len(content)), "Test", "Read") part := make([]byte, 4, 4) _, err := io.ReadFull(pr, part) if err != nil { pr.Close() t.Fatal(err) } drainLoop: for { select { case <-progressChan: default: break drainLoop } } pr.Close() select { case <-progressChan: default: t.Fatalf("Expected some output when closing prematurely") } } func TestCompleteSilently(t *testing.T) { content := []byte("TESTING") reader := ioutil.NopCloser(bytes.NewReader(content)) progressChan := make(chan Progress, 10) pr := NewProgressReader(reader, ChanOutput(progressChan), int64(len(content)), "Test", "Read") out, err := ioutil.ReadAll(pr) if err != nil { pr.Close() t.Fatal(err) } if string(out) != "TESTING" { pr.Close() t.Fatalf("Unexpected output %q from reader", string(out)) } drainLoop: for { select { case <-progressChan: default: break drainLoop } } pr.Close() select { case <-progressChan: t.Fatalf("Should have closed silently when read is complete") default: } } docker-1.10.3/pkg/promise/000077500000000000000000000000001267010174400153045ustar00rootroot00000000000000docker-1.10.3/pkg/promise/promise.go000066400000000000000000000004351267010174400173130ustar00rootroot00000000000000package promise // Go is a basic promise implementation: it wraps calls a function in a goroutine, // and returns a channel which will later return the function's return value. func Go(f func() error) chan error { ch := make(chan error, 1) go func() { ch <- f() }() return ch } docker-1.10.3/pkg/proxy/000077500000000000000000000000001267010174400150075ustar00rootroot00000000000000docker-1.10.3/pkg/proxy/network_proxy_test.go000066400000000000000000000125541267010174400213360ustar00rootroot00000000000000package proxy import ( "bytes" "fmt" "io" "net" "strings" "testing" "time" ) var testBuf = []byte("Buffalo buffalo Buffalo buffalo buffalo buffalo Buffalo buffalo") var testBufSize = len(testBuf) type EchoServer interface { Run() Close() LocalAddr() net.Addr } type TCPEchoServer struct { listener net.Listener testCtx *testing.T } type UDPEchoServer struct { conn net.PacketConn testCtx *testing.T } func NewEchoServer(t *testing.T, proto, address string) EchoServer { var server EchoServer if strings.HasPrefix(proto, "tcp") { listener, err := net.Listen(proto, address) if err != nil { t.Fatal(err) } server = &TCPEchoServer{listener: listener, testCtx: t} } else { socket, err := net.ListenPacket(proto, address) if err != nil { t.Fatal(err) } server = &UDPEchoServer{conn: socket, testCtx: t} } return server } func (server *TCPEchoServer) Run() { go func() { for { client, err := server.listener.Accept() if err != nil { return } go func(client net.Conn) { if _, err := io.Copy(client, client); err != nil { server.testCtx.Logf("can't echo to the client: %v\n", err.Error()) } client.Close() }(client) } }() } func (server *TCPEchoServer) LocalAddr() net.Addr { return server.listener.Addr() } func (server *TCPEchoServer) Close() { server.listener.Addr() } func (server *UDPEchoServer) Run() { go func() { readBuf := make([]byte, 1024) for { read, from, err := server.conn.ReadFrom(readBuf) if err != nil { return } for i := 0; i != read; { written, err := server.conn.WriteTo(readBuf[i:read], from) if err != nil { break } i += written } } }() } func (server *UDPEchoServer) LocalAddr() net.Addr { return server.conn.LocalAddr() } func (server *UDPEchoServer) Close() { server.conn.Close() } func testProxyAt(t *testing.T, proto string, proxy Proxy, addr string) { defer proxy.Close() go proxy.Run() client, err := net.Dial(proto, addr) if err != nil { t.Fatalf("Can't connect to the proxy: %v", err) } defer client.Close() client.SetDeadline(time.Now().Add(10 * time.Second)) if _, err = client.Write(testBuf); err != nil { t.Fatal(err) } recvBuf := make([]byte, testBufSize) if _, err = client.Read(recvBuf); err != nil { t.Fatal(err) } if !bytes.Equal(testBuf, recvBuf) { t.Fatal(fmt.Errorf("Expected [%v] but got [%v]", testBuf, recvBuf)) } } func testProxy(t *testing.T, proto string, proxy Proxy) { testProxyAt(t, proto, proxy, proxy.FrontendAddr().String()) } func TestTCP4Proxy(t *testing.T) { backend := NewEchoServer(t, "tcp", "127.0.0.1:0") defer backend.Close() backend.Run() frontendAddr := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) if err != nil { t.Fatal(err) } testProxy(t, "tcp", proxy) } func TestTCP6Proxy(t *testing.T) { backend := NewEchoServer(t, "tcp", "[::1]:0") defer backend.Close() backend.Run() frontendAddr := &net.TCPAddr{IP: net.IPv6loopback, Port: 0} proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) if err != nil { t.Fatal(err) } testProxy(t, "tcp", proxy) } func TestTCPDualStackProxy(t *testing.T) { // If I understand `godoc -src net favoriteAddrFamily` (used by the // net.Listen* functions) correctly this should work, but it doesn't. t.Skip("No support for dual stack yet") backend := NewEchoServer(t, "tcp", "[::1]:0") defer backend.Close() backend.Run() frontendAddr := &net.TCPAddr{IP: net.IPv6loopback, Port: 0} proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) if err != nil { t.Fatal(err) } ipv4ProxyAddr := &net.TCPAddr{ IP: net.IPv4(127, 0, 0, 1), Port: proxy.FrontendAddr().(*net.TCPAddr).Port, } testProxyAt(t, "tcp", proxy, ipv4ProxyAddr.String()) } func TestUDP4Proxy(t *testing.T) { backend := NewEchoServer(t, "udp", "127.0.0.1:0") defer backend.Close() backend.Run() frontendAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) if err != nil { t.Fatal(err) } testProxy(t, "udp", proxy) } func TestUDP6Proxy(t *testing.T) { backend := NewEchoServer(t, "udp", "[::1]:0") defer backend.Close() backend.Run() frontendAddr := &net.UDPAddr{IP: net.IPv6loopback, Port: 0} proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) if err != nil { t.Fatal(err) } testProxy(t, "udp", proxy) } func TestUDPWriteError(t *testing.T) { frontendAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} // Hopefully, this port will be free: */ backendAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 25587} proxy, err := NewProxy(frontendAddr, backendAddr) if err != nil { t.Fatal(err) } defer proxy.Close() go proxy.Run() client, err := net.Dial("udp", "127.0.0.1:25587") if err != nil { t.Fatalf("Can't connect to the proxy: %v", err) } defer client.Close() // Make sure the proxy doesn't stop when there is no actual backend: client.Write(testBuf) client.Write(testBuf) backend := NewEchoServer(t, "udp", "127.0.0.1:25587") defer backend.Close() backend.Run() client.SetDeadline(time.Now().Add(10 * time.Second)) if _, err = client.Write(testBuf); err != nil { t.Fatal(err) } recvBuf := make([]byte, testBufSize) if _, err = client.Read(recvBuf); err != nil { t.Fatal(err) } if !bytes.Equal(testBuf, recvBuf) { t.Fatal(fmt.Errorf("Expected [%v] but got [%v]", testBuf, recvBuf)) } } docker-1.10.3/pkg/proxy/proxy.go000066400000000000000000000023631267010174400165230ustar00rootroot00000000000000// Package proxy provides a network Proxy interface and implementations for TCP // and UDP. package proxy import ( "fmt" "net" ) // Proxy defines the behavior of a proxy. It forwards traffic back and forth // between two endpoints : the frontend and the backend. // It can be used to do software port-mapping between two addresses. // e.g. forward all traffic between the frontend (host) 127.0.0.1:3000 // to the backend (container) at 172.17.42.108:4000. type Proxy interface { // Run starts forwarding traffic back and forth between the front // and back-end addresses. Run() // Close stops forwarding traffic and close both ends of the Proxy. Close() // FrontendAddr returns the address on which the proxy is listening. FrontendAddr() net.Addr // BackendAddr returns the proxied address. BackendAddr() net.Addr } // NewProxy creates a Proxy according to the specified frontendAddr and backendAddr. func NewProxy(frontendAddr, backendAddr net.Addr) (Proxy, error) { switch frontendAddr.(type) { case *net.UDPAddr: return NewUDPProxy(frontendAddr.(*net.UDPAddr), backendAddr.(*net.UDPAddr)) case *net.TCPAddr: return NewTCPProxy(frontendAddr.(*net.TCPAddr), backendAddr.(*net.TCPAddr)) default: panic(fmt.Errorf("Unsupported protocol")) } } docker-1.10.3/pkg/proxy/stub_proxy.go000066400000000000000000000012721267010174400175560ustar00rootroot00000000000000package proxy import ( "net" ) // StubProxy is a proxy that is a stub (does nothing). type StubProxy struct { frontendAddr net.Addr backendAddr net.Addr } // Run does nothing. func (p *StubProxy) Run() {} // Close does nothing. func (p *StubProxy) Close() {} // FrontendAddr returns the frontend address. func (p *StubProxy) FrontendAddr() net.Addr { return p.frontendAddr } // BackendAddr returns the backend address. func (p *StubProxy) BackendAddr() net.Addr { return p.backendAddr } // NewStubProxy creates a new StubProxy func NewStubProxy(frontendAddr, backendAddr net.Addr) (Proxy, error) { return &StubProxy{ frontendAddr: frontendAddr, backendAddr: backendAddr, }, nil } docker-1.10.3/pkg/proxy/tcp_proxy.go000066400000000000000000000050461267010174400173720ustar00rootroot00000000000000package proxy import ( "io" "net" "syscall" "github.com/Sirupsen/logrus" ) // TCPProxy is a proxy for TCP connections. It implements the Proxy interface to // handle TCP traffic forwarding between the frontend and backend addresses. type TCPProxy struct { listener *net.TCPListener frontendAddr *net.TCPAddr backendAddr *net.TCPAddr } // NewTCPProxy creates a new TCPProxy. func NewTCPProxy(frontendAddr, backendAddr *net.TCPAddr) (*TCPProxy, error) { listener, err := net.ListenTCP("tcp", frontendAddr) if err != nil { return nil, err } // If the port in frontendAddr was 0 then ListenTCP will have a picked // a port to listen on, hence the call to Addr to get that actual port: return &TCPProxy{ listener: listener, frontendAddr: listener.Addr().(*net.TCPAddr), backendAddr: backendAddr, }, nil } func (proxy *TCPProxy) clientLoop(client *net.TCPConn, quit chan bool) { backend, err := net.DialTCP("tcp", nil, proxy.backendAddr) if err != nil { logrus.Printf("Can't forward traffic to backend tcp/%v: %s\n", proxy.backendAddr, err) client.Close() return } event := make(chan int64) var broker = func(to, from *net.TCPConn) { written, err := io.Copy(to, from) if err != nil { // If the socket we are writing to is shutdown with // SHUT_WR, forward it to the other end of the pipe: if err, ok := err.(*net.OpError); ok && err.Err == syscall.EPIPE { from.CloseWrite() } } to.CloseRead() event <- written } go broker(client, backend) go broker(backend, client) var transferred int64 for i := 0; i < 2; i++ { select { case written := <-event: transferred += written case <-quit: // Interrupt the two brokers and "join" them. client.Close() backend.Close() for ; i < 2; i++ { transferred += <-event } return } } client.Close() backend.Close() } // Run starts forwarding the traffic using TCP. func (proxy *TCPProxy) Run() { quit := make(chan bool) defer close(quit) for { client, err := proxy.listener.Accept() if err != nil { logrus.Printf("Stopping proxy on tcp/%v for tcp/%v (%s)", proxy.frontendAddr, proxy.backendAddr, err) return } go proxy.clientLoop(client.(*net.TCPConn), quit) } } // Close stops forwarding the traffic. func (proxy *TCPProxy) Close() { proxy.listener.Close() } // FrontendAddr returns the TCP address on which the proxy is listening. func (proxy *TCPProxy) FrontendAddr() net.Addr { return proxy.frontendAddr } // BackendAddr returns the TCP proxied address. func (proxy *TCPProxy) BackendAddr() net.Addr { return proxy.backendAddr } docker-1.10.3/pkg/proxy/udp_proxy.go000066400000000000000000000107451267010174400173760ustar00rootroot00000000000000package proxy import ( "encoding/binary" "net" "strings" "sync" "syscall" "time" "github.com/Sirupsen/logrus" ) const ( // UDPConnTrackTimeout is the timeout used for UDP connection tracking UDPConnTrackTimeout = 90 * time.Second // UDPBufSize is the buffer size for the UDP proxy UDPBufSize = 65507 ) // A net.Addr where the IP is split into two fields so you can use it as a key // in a map: type connTrackKey struct { IPHigh uint64 IPLow uint64 Port int } func newConnTrackKey(addr *net.UDPAddr) *connTrackKey { if len(addr.IP) == net.IPv4len { return &connTrackKey{ IPHigh: 0, IPLow: uint64(binary.BigEndian.Uint32(addr.IP)), Port: addr.Port, } } return &connTrackKey{ IPHigh: binary.BigEndian.Uint64(addr.IP[:8]), IPLow: binary.BigEndian.Uint64(addr.IP[8:]), Port: addr.Port, } } type connTrackMap map[connTrackKey]*net.UDPConn // UDPProxy is proxy for which handles UDP datagrams. It implements the Proxy // interface to handle UDP traffic forwarding between the frontend and backend // addresses. type UDPProxy struct { listener *net.UDPConn frontendAddr *net.UDPAddr backendAddr *net.UDPAddr connTrackTable connTrackMap connTrackLock sync.Mutex } // NewUDPProxy creates a new UDPProxy. func NewUDPProxy(frontendAddr, backendAddr *net.UDPAddr) (*UDPProxy, error) { listener, err := net.ListenUDP("udp", frontendAddr) if err != nil { return nil, err } return &UDPProxy{ listener: listener, frontendAddr: listener.LocalAddr().(*net.UDPAddr), backendAddr: backendAddr, connTrackTable: make(connTrackMap), }, nil } func (proxy *UDPProxy) replyLoop(proxyConn *net.UDPConn, clientAddr *net.UDPAddr, clientKey *connTrackKey) { defer func() { proxy.connTrackLock.Lock() delete(proxy.connTrackTable, *clientKey) proxy.connTrackLock.Unlock() proxyConn.Close() }() readBuf := make([]byte, UDPBufSize) for { proxyConn.SetReadDeadline(time.Now().Add(UDPConnTrackTimeout)) again: read, err := proxyConn.Read(readBuf) if err != nil { if err, ok := err.(*net.OpError); ok && err.Err == syscall.ECONNREFUSED { // This will happen if the last write failed // (e.g: nothing is actually listening on the // proxied port on the container), ignore it // and continue until UDPConnTrackTimeout // expires: goto again } return } for i := 0; i != read; { written, err := proxy.listener.WriteToUDP(readBuf[i:read], clientAddr) if err != nil { return } i += written } } } // Run starts forwarding the traffic using UDP. func (proxy *UDPProxy) Run() { readBuf := make([]byte, UDPBufSize) for { read, from, err := proxy.listener.ReadFromUDP(readBuf) if err != nil { // NOTE: Apparently ReadFrom doesn't return // ECONNREFUSED like Read do (see comment in // UDPProxy.replyLoop) if !isClosedError(err) { logrus.Printf("Stopping proxy on udp/%v for udp/%v (%s)", proxy.frontendAddr, proxy.backendAddr, err) } break } fromKey := newConnTrackKey(from) proxy.connTrackLock.Lock() proxyConn, hit := proxy.connTrackTable[*fromKey] if !hit { proxyConn, err = net.DialUDP("udp", nil, proxy.backendAddr) if err != nil { logrus.Printf("Can't proxy a datagram to udp/%s: %s\n", proxy.backendAddr, err) proxy.connTrackLock.Unlock() continue } proxy.connTrackTable[*fromKey] = proxyConn go proxy.replyLoop(proxyConn, from, fromKey) } proxy.connTrackLock.Unlock() for i := 0; i != read; { written, err := proxyConn.Write(readBuf[i:read]) if err != nil { logrus.Printf("Can't proxy a datagram to udp/%s: %s\n", proxy.backendAddr, err) break } i += written } } } // Close stops forwarding the traffic. func (proxy *UDPProxy) Close() { proxy.listener.Close() proxy.connTrackLock.Lock() defer proxy.connTrackLock.Unlock() for _, conn := range proxy.connTrackTable { conn.Close() } } // FrontendAddr returns the UDP address on which the proxy is listening. func (proxy *UDPProxy) FrontendAddr() net.Addr { return proxy.frontendAddr } // BackendAddr returns the proxied UDP address. func (proxy *UDPProxy) BackendAddr() net.Addr { return proxy.backendAddr } func isClosedError(err error) bool { /* This comparison is ugly, but unfortunately, net.go doesn't export errClosing. * See: * http://golang.org/src/pkg/net/net.go * https://code.google.com/p/go/issues/detail?id=4337 * https://groups.google.com/forum/#!msg/golang-nuts/0_aaCvBmOcM/SptmDyX1XJMJ */ return strings.HasSuffix(err.Error(), "use of closed network connection") } docker-1.10.3/pkg/pubsub/000077500000000000000000000000001267010174400151265ustar00rootroot00000000000000docker-1.10.3/pkg/pubsub/publisher.go000066400000000000000000000047431267010174400174620ustar00rootroot00000000000000package pubsub import ( "sync" "time" ) // NewPublisher creates a new pub/sub publisher to broadcast messages. // The duration is used as the send timeout as to not block the publisher publishing // messages to other clients if one client is slow or unresponsive. // The buffer is used when creating new channels for subscribers. func NewPublisher(publishTimeout time.Duration, buffer int) *Publisher { return &Publisher{ buffer: buffer, timeout: publishTimeout, subscribers: make(map[subscriber]topicFunc), } } type subscriber chan interface{} type topicFunc func(v interface{}) bool // Publisher is basic pub/sub structure. Allows to send events and subscribe // to them. Can be safely used from multiple goroutines. type Publisher struct { m sync.RWMutex buffer int timeout time.Duration subscribers map[subscriber]topicFunc } // Len returns the number of subscribers for the publisher func (p *Publisher) Len() int { p.m.RLock() i := len(p.subscribers) p.m.RUnlock() return i } // Subscribe adds a new subscriber to the publisher returning the channel. func (p *Publisher) Subscribe() chan interface{} { return p.SubscribeTopic(nil) } // SubscribeTopic adds a new subscriber that filters messages sent by a topic. func (p *Publisher) SubscribeTopic(topic topicFunc) chan interface{} { ch := make(chan interface{}, p.buffer) p.m.Lock() p.subscribers[ch] = topic p.m.Unlock() return ch } // Evict removes the specified subscriber from receiving any more messages. func (p *Publisher) Evict(sub chan interface{}) { p.m.Lock() delete(p.subscribers, sub) close(sub) p.m.Unlock() } // Publish sends the data in v to all subscribers currently registered with the publisher. func (p *Publisher) Publish(v interface{}) { p.m.RLock() wg := new(sync.WaitGroup) for sub, topic := range p.subscribers { wg.Add(1) go p.sendTopic(sub, topic, v, wg) } wg.Wait() p.m.RUnlock() } // Close closes the channels to all subscribers registered with the publisher. func (p *Publisher) Close() { p.m.Lock() for sub := range p.subscribers { delete(p.subscribers, sub) close(sub) } p.m.Unlock() } func (p *Publisher) sendTopic(sub subscriber, topic topicFunc, v interface{}, wg *sync.WaitGroup) { defer wg.Done() if topic != nil && !topic(v) { return } // send under a select as to not block if the receiver is unavailable if p.timeout > 0 { select { case sub <- v: case <-time.After(p.timeout): } return } select { case sub <- v: default: } } docker-1.10.3/pkg/pubsub/publisher_test.go000066400000000000000000000052421267010174400205140ustar00rootroot00000000000000package pubsub import ( "fmt" "testing" "time" ) func TestSendToOneSub(t *testing.T) { p := NewPublisher(100*time.Millisecond, 10) c := p.Subscribe() p.Publish("hi") msg := <-c if msg.(string) != "hi" { t.Fatalf("expected message hi but received %v", msg) } } func TestSendToMultipleSubs(t *testing.T) { p := NewPublisher(100*time.Millisecond, 10) subs := []chan interface{}{} subs = append(subs, p.Subscribe(), p.Subscribe(), p.Subscribe()) p.Publish("hi") for _, c := range subs { msg := <-c if msg.(string) != "hi" { t.Fatalf("expected message hi but received %v", msg) } } } func TestEvictOneSub(t *testing.T) { p := NewPublisher(100*time.Millisecond, 10) s1 := p.Subscribe() s2 := p.Subscribe() p.Evict(s1) p.Publish("hi") if _, ok := <-s1; ok { t.Fatal("expected s1 to not receive the published message") } msg := <-s2 if msg.(string) != "hi" { t.Fatalf("expected message hi but received %v", msg) } } func TestClosePublisher(t *testing.T) { p := NewPublisher(100*time.Millisecond, 10) subs := []chan interface{}{} subs = append(subs, p.Subscribe(), p.Subscribe(), p.Subscribe()) p.Close() for _, c := range subs { if _, ok := <-c; ok { t.Fatal("expected all subscriber channels to be closed") } } } const sampleText = "test" type testSubscriber struct { dataCh chan interface{} ch chan error } func (s *testSubscriber) Wait() error { return <-s.ch } func newTestSubscriber(p *Publisher) *testSubscriber { ts := &testSubscriber{ dataCh: p.Subscribe(), ch: make(chan error), } go func() { for data := range ts.dataCh { s, ok := data.(string) if !ok { ts.ch <- fmt.Errorf("Unexpected type %T", data) break } if s != sampleText { ts.ch <- fmt.Errorf("Unexpected text %s", s) break } } close(ts.ch) }() return ts } // for testing with -race func TestPubSubRace(t *testing.T) { p := NewPublisher(0, 1024) var subs [](*testSubscriber) for j := 0; j < 50; j++ { subs = append(subs, newTestSubscriber(p)) } for j := 0; j < 1000; j++ { p.Publish(sampleText) } time.AfterFunc(1*time.Second, func() { for _, s := range subs { p.Evict(s.dataCh) } }) for _, s := range subs { s.Wait() } } func BenchmarkPubSub(b *testing.B) { for i := 0; i < b.N; i++ { b.StopTimer() p := NewPublisher(0, 1024) var subs [](*testSubscriber) for j := 0; j < 50; j++ { subs = append(subs, newTestSubscriber(p)) } b.StartTimer() for j := 0; j < 1000; j++ { p.Publish(sampleText) } time.AfterFunc(1*time.Second, func() { for _, s := range subs { p.Evict(s.dataCh) } }) for _, s := range subs { if err := s.Wait(); err != nil { b.Fatal(err) } } } } docker-1.10.3/pkg/random/000077500000000000000000000000001267010174400151065ustar00rootroot00000000000000docker-1.10.3/pkg/random/random.go000066400000000000000000000025221267010174400167160ustar00rootroot00000000000000package random import ( cryptorand "crypto/rand" "io" "math" "math/big" "math/rand" "sync" "time" ) // Rand is a global *rand.Rand instance, which initialized with NewSource() source. var Rand = rand.New(NewSource()) // Reader is a global, shared instance of a pseudorandom bytes generator. // It doesn't consume entropy. var Reader io.Reader = &reader{rnd: Rand} // copypaste from standard math/rand type lockedSource struct { lk sync.Mutex src rand.Source } func (r *lockedSource) Int63() (n int64) { r.lk.Lock() n = r.src.Int63() r.lk.Unlock() return } func (r *lockedSource) Seed(seed int64) { r.lk.Lock() r.src.Seed(seed) r.lk.Unlock() } // NewSource returns math/rand.Source safe for concurrent use and initialized // with current unix-nano timestamp func NewSource() rand.Source { var seed int64 if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { // This should not happen, but worst-case fallback to time-based seed. seed = time.Now().UnixNano() } else { seed = cryptoseed.Int64() } return &lockedSource{ src: rand.NewSource(seed), } } type reader struct { rnd *rand.Rand } func (r *reader) Read(b []byte) (int, error) { i := 0 for { val := r.rnd.Int63() for val > 0 { b[i] = byte(val) i++ if i == len(b) { return i, nil } val >>= 8 } } } docker-1.10.3/pkg/random/random_test.go000066400000000000000000000004261267010174400177560ustar00rootroot00000000000000package random import ( "math/rand" "sync" "testing" ) // for go test -v -race func TestConcurrency(t *testing.T) { rnd := rand.New(NewSource()) var wg sync.WaitGroup for i := 0; i < 10; i++ { wg.Add(1) go func() { rnd.Int63() wg.Done() }() } wg.Wait() } docker-1.10.3/pkg/reexec/000077500000000000000000000000001267010174400151015ustar00rootroot00000000000000docker-1.10.3/pkg/reexec/README.md000066400000000000000000000004411267010174400163570ustar00rootroot00000000000000## reexec The `reexec` package facilitates the busybox style reexec of the docker binary that we require because of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of the exec of the binary will be used to find and execute custom init paths. docker-1.10.3/pkg/reexec/command_freebsd.go000066400000000000000000000007031267010174400205400ustar00rootroot00000000000000// +build freebsd package reexec import ( "os/exec" ) // Self returns the path to the current process's binary. // Uses os.Args[0]. func Self() string { return naiveSelf() } // Command returns *exec.Cmd which have Path as current binary. // For example if current binary is "docker" at "/usr/bin/", then cmd.Path will // be set to "/usr/bin/docker". func Command(args ...string) *exec.Cmd { return &exec.Cmd{ Path: Self(), Args: args, } } docker-1.10.3/pkg/reexec/command_linux.go000066400000000000000000000012001267010174400202560ustar00rootroot00000000000000// +build linux package reexec import ( "os/exec" "syscall" ) // Self returns the path to the current process's binary. // Returns "/proc/self/exe". func Self() string { return "/proc/self/exe" } // Command returns *exec.Cmd which have Path as current binary. Also it setting // SysProcAttr.Pdeathsig to SIGTERM. // This will use the in-memory version (/proc/self/exe) of the current binary, // it is thus safe to delete or replace the on-disk binary (os.Args[0]). func Command(args ...string) *exec.Cmd { return &exec.Cmd{ Path: Self(), Args: args, SysProcAttr: &syscall.SysProcAttr{ Pdeathsig: syscall.SIGTERM, }, } } docker-1.10.3/pkg/reexec/command_unsupported.go000066400000000000000000000003171267010174400215170ustar00rootroot00000000000000// +build !linux,!windows,!freebsd package reexec import ( "os/exec" ) // Command is unsupported on operating systems apart from Linux and Windows. func Command(args ...string) *exec.Cmd { return nil } docker-1.10.3/pkg/reexec/command_windows.go000066400000000000000000000006771267010174400206320ustar00rootroot00000000000000// +build windows package reexec import ( "os/exec" ) // Self returns the path to the current process's binary. // Uses os.Args[0]. func Self() string { return naiveSelf() } // Command returns *exec.Cmd which have Path as current binary. // For example if current binary is "docker.exe" at "C:\", then cmd.Path will // be set to "C:\docker.exe". func Command(args ...string) *exec.Cmd { return &exec.Cmd{ Path: Self(), Args: args, } } docker-1.10.3/pkg/reexec/reexec.go000066400000000000000000000020641267010174400167050ustar00rootroot00000000000000package reexec import ( "fmt" "os" "os/exec" "path/filepath" ) var registeredInitializers = make(map[string]func()) // Register adds an initialization func under the specified name func Register(name string, initializer func()) { if _, exists := registeredInitializers[name]; exists { panic(fmt.Sprintf("reexec func already registred under name %q", name)) } registeredInitializers[name] = initializer } // Init is called as the first part of the exec process and returns true if an // initialization function was called. func Init() bool { initializer, exists := registeredInitializers[os.Args[0]] if exists { initializer() return true } return false } func naiveSelf() string { name := os.Args[0] if filepath.Base(name) == name { if lp, err := exec.LookPath(name); err == nil { return lp } } // handle conversion of relative paths to absolute if absName, err := filepath.Abs(name); err == nil { return absName } // if we couldn't get absolute name, return original // (NOTE: Go only errors on Abs() if os.Getwd fails) return name } docker-1.10.3/pkg/registrar/000077500000000000000000000000001267010174400156305ustar00rootroot00000000000000docker-1.10.3/pkg/registrar/registrar.go000066400000000000000000000056711267010174400201720ustar00rootroot00000000000000// Package registrar provides name registration. It reserves a name to a given key. package registrar import ( "errors" "sync" ) var ( // ErrNameReserved is an error which is returned when a name is requested to be reserved that already is reserved ErrNameReserved = errors.New("name is reserved") // ErrNameNotReserved is an error which is returned when trying to find a name that is not reserved ErrNameNotReserved = errors.New("name is not reserved") // ErrNoSuchKey is returned when trying to find the names for a key which is not known ErrNoSuchKey = errors.New("provided key does not exist") ) // Registrar stores indexes a list of keys and their registered names as well as indexes names and the key that they are registred to // Names must be unique. // Registrar is safe for concurrent access. type Registrar struct { idx map[string][]string names map[string]string mu sync.Mutex } // NewRegistrar creates a new Registrar with the an empty index func NewRegistrar() *Registrar { return &Registrar{ idx: make(map[string][]string), names: make(map[string]string), } } // Reserve registers a key to a name // Reserve is idempotent // Attempting to reserve a key to a name that already exists results in an `ErrNameReserved` // A name reservation is globally unique func (r *Registrar) Reserve(name, key string) error { r.mu.Lock() defer r.mu.Unlock() if k, exists := r.names[name]; exists { if k != key { return ErrNameReserved } return nil } r.idx[key] = append(r.idx[key], name) r.names[name] = key return nil } // Release releases the reserved name // Once released, a name can be reserved again func (r *Registrar) Release(name string) { r.mu.Lock() defer r.mu.Unlock() key, exists := r.names[name] if !exists { return } for i, n := range r.idx[key] { if n != name { continue } r.idx[key] = append(r.idx[key][:i], r.idx[key][i+1:]...) break } delete(r.names, name) if len(r.idx[key]) == 0 { delete(r.idx, key) } } // Delete removes all reservations for the passed in key. // All names reserved to this key are released. func (r *Registrar) Delete(key string) { r.mu.Lock() for _, name := range r.idx[key] { delete(r.names, name) } delete(r.idx, key) r.mu.Unlock() } // GetNames lists all the reserved names for the given key func (r *Registrar) GetNames(key string) ([]string, error) { r.mu.Lock() defer r.mu.Unlock() names, exists := r.idx[key] if !exists { return nil, ErrNoSuchKey } return names, nil } // Get returns the key that the passed in name is reserved to func (r *Registrar) Get(name string) (string, error) { r.mu.Lock() key, exists := r.names[name] r.mu.Unlock() if !exists { return "", ErrNameNotReserved } return key, nil } // GetAll returns all registered names func (r *Registrar) GetAll() map[string][]string { out := make(map[string][]string) r.mu.Lock() // copy index into out for id, names := range r.idx { out[id] = names } r.mu.Unlock() return out } docker-1.10.3/pkg/registrar/registrar_test.go000066400000000000000000000042021267010174400212160ustar00rootroot00000000000000package registrar import ( "reflect" "testing" ) func TestReserve(t *testing.T) { r := NewRegistrar() obj := "test1" if err := r.Reserve("test", obj); err != nil { t.Fatal(err) } if err := r.Reserve("test", obj); err != nil { t.Fatal(err) } obj2 := "test2" err := r.Reserve("test", obj2) if err == nil { t.Fatalf("expected error when reserving an already reserved name to another object") } if err != ErrNameReserved { t.Fatal("expected `ErrNameReserved` error when attempting to reserve an already reserved name") } } func TestRelease(t *testing.T) { r := NewRegistrar() obj := "testing" if err := r.Reserve("test", obj); err != nil { t.Fatal(err) } r.Release("test") r.Release("test") // Ensure there is no panic here if err := r.Reserve("test", obj); err != nil { t.Fatal(err) } } func TestGetNames(t *testing.T) { r := NewRegistrar() obj := "testing" names := []string{"test1", "test2"} for _, name := range names { if err := r.Reserve(name, obj); err != nil { t.Fatal(err) } } r.Reserve("test3", "other") names2, err := r.GetNames(obj) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(names, names2) { t.Fatalf("Exepected: %v, Got: %v", names, names2) } } func TestDelete(t *testing.T) { r := NewRegistrar() obj := "testing" names := []string{"test1", "test2"} for _, name := range names { if err := r.Reserve(name, obj); err != nil { t.Fatal(err) } } r.Reserve("test3", "other") r.Delete(obj) _, err := r.GetNames(obj) if err == nil { t.Fatal("expected error getting names for deleted key") } if err != ErrNoSuchKey { t.Fatal("expected `ErrNoSuchKey`") } } func TestGet(t *testing.T) { r := NewRegistrar() obj := "testing" name := "test" _, err := r.Get(name) if err == nil { t.Fatal("expected error when key does not exist") } if err != ErrNameNotReserved { t.Fatal(err) } if err := r.Reserve(name, obj); err != nil { t.Fatal(err) } if _, err = r.Get(name); err != nil { t.Fatal(err) } r.Delete(obj) _, err = r.Get(name) if err == nil { t.Fatal("expected error when key does not exist") } if err != ErrNameNotReserved { t.Fatal(err) } } docker-1.10.3/pkg/signal/000077500000000000000000000000001267010174400151035ustar00rootroot00000000000000docker-1.10.3/pkg/signal/README.md000066400000000000000000000001401267010174400163550ustar00rootroot00000000000000This package provides helper functions for dealing with signals across various operating systemsdocker-1.10.3/pkg/signal/signal.go000066400000000000000000000025001267010174400167040ustar00rootroot00000000000000// Package signal provides helper functions for dealing with signals across // various operating systems. package signal import ( "fmt" "os" "os/signal" "strconv" "strings" "syscall" ) // CatchAll catches all signals and relays them to the specified channel. func CatchAll(sigc chan os.Signal) { handledSigs := []os.Signal{} for _, s := range SignalMap { handledSigs = append(handledSigs, s) } signal.Notify(sigc, handledSigs...) } // StopCatch stops catching the signals and closes the specified channel. func StopCatch(sigc chan os.Signal) { signal.Stop(sigc) close(sigc) } // ParseSignal translates a string to a valid syscall signal. // It returns an error if the signal map doesn't include the given signal. func ParseSignal(rawSignal string) (syscall.Signal, error) { s, err := strconv.Atoi(rawSignal) if err == nil { if s == 0 { return -1, fmt.Errorf("Invalid signal: %s", rawSignal) } return syscall.Signal(s), nil } signal, ok := SignalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")] if !ok { return -1, fmt.Errorf("Invalid signal: %s", rawSignal) } return signal, nil } // ValidSignalForPlatform returns true if a signal is valid on the platform func ValidSignalForPlatform(sig syscall.Signal) bool { for _, v := range SignalMap { if v == sig { return true } } return false } docker-1.10.3/pkg/signal/signal_darwin.go000066400000000000000000000017651267010174400202640ustar00rootroot00000000000000package signal import ( "syscall" ) // SignalMap is a map of Darwin signals. var SignalMap = map[string]syscall.Signal{ "ABRT": syscall.SIGABRT, "ALRM": syscall.SIGALRM, "BUG": syscall.SIGBUS, "CHLD": syscall.SIGCHLD, "CONT": syscall.SIGCONT, "EMT": syscall.SIGEMT, "FPE": syscall.SIGFPE, "HUP": syscall.SIGHUP, "ILL": syscall.SIGILL, "INFO": syscall.SIGINFO, "INT": syscall.SIGINT, "IO": syscall.SIGIO, "IOT": syscall.SIGIOT, "KILL": syscall.SIGKILL, "PIPE": syscall.SIGPIPE, "PROF": syscall.SIGPROF, "QUIT": syscall.SIGQUIT, "SEGV": syscall.SIGSEGV, "STOP": syscall.SIGSTOP, "SYS": syscall.SIGSYS, "TERM": syscall.SIGTERM, "TRAP": syscall.SIGTRAP, "TSTP": syscall.SIGTSTP, "TTIN": syscall.SIGTTIN, "TTOU": syscall.SIGTTOU, "URG": syscall.SIGURG, "USR1": syscall.SIGUSR1, "USR2": syscall.SIGUSR2, "VTALRM": syscall.SIGVTALRM, "WINCH": syscall.SIGWINCH, "XCPU": syscall.SIGXCPU, "XFSZ": syscall.SIGXFSZ, } docker-1.10.3/pkg/signal/signal_freebsd.go000066400000000000000000000020541267010174400204020ustar00rootroot00000000000000package signal import ( "syscall" ) // SignalMap is a map of FreeBSD signals. var SignalMap = map[string]syscall.Signal{ "ABRT": syscall.SIGABRT, "ALRM": syscall.SIGALRM, "BUF": syscall.SIGBUS, "CHLD": syscall.SIGCHLD, "CONT": syscall.SIGCONT, "EMT": syscall.SIGEMT, "FPE": syscall.SIGFPE, "HUP": syscall.SIGHUP, "ILL": syscall.SIGILL, "INFO": syscall.SIGINFO, "INT": syscall.SIGINT, "IO": syscall.SIGIO, "IOT": syscall.SIGIOT, "KILL": syscall.SIGKILL, "LWP": syscall.SIGLWP, "PIPE": syscall.SIGPIPE, "PROF": syscall.SIGPROF, "QUIT": syscall.SIGQUIT, "SEGV": syscall.SIGSEGV, "STOP": syscall.SIGSTOP, "SYS": syscall.SIGSYS, "TERM": syscall.SIGTERM, "THR": syscall.SIGTHR, "TRAP": syscall.SIGTRAP, "TSTP": syscall.SIGTSTP, "TTIN": syscall.SIGTTIN, "TTOU": syscall.SIGTTOU, "URG": syscall.SIGURG, "USR1": syscall.SIGUSR1, "USR2": syscall.SIGUSR2, "VTALRM": syscall.SIGVTALRM, "WINCH": syscall.SIGWINCH, "XCPU": syscall.SIGXCPU, "XFSZ": syscall.SIGXFSZ, } docker-1.10.3/pkg/signal/signal_linux.go000066400000000000000000000040021267010174400201220ustar00rootroot00000000000000package signal import ( "syscall" ) const ( sigrtmin = 34 sigrtmax = 64 ) // SignalMap is a map of Linux signals. var SignalMap = map[string]syscall.Signal{ "ABRT": syscall.SIGABRT, "ALRM": syscall.SIGALRM, "BUS": syscall.SIGBUS, "CHLD": syscall.SIGCHLD, "CLD": syscall.SIGCLD, "CONT": syscall.SIGCONT, "FPE": syscall.SIGFPE, "HUP": syscall.SIGHUP, "ILL": syscall.SIGILL, "INT": syscall.SIGINT, "IO": syscall.SIGIO, "IOT": syscall.SIGIOT, "KILL": syscall.SIGKILL, "PIPE": syscall.SIGPIPE, "POLL": syscall.SIGPOLL, "PROF": syscall.SIGPROF, "PWR": syscall.SIGPWR, "QUIT": syscall.SIGQUIT, "SEGV": syscall.SIGSEGV, "STKFLT": syscall.SIGSTKFLT, "STOP": syscall.SIGSTOP, "SYS": syscall.SIGSYS, "TERM": syscall.SIGTERM, "TRAP": syscall.SIGTRAP, "TSTP": syscall.SIGTSTP, "TTIN": syscall.SIGTTIN, "TTOU": syscall.SIGTTOU, "UNUSED": syscall.SIGUNUSED, "URG": syscall.SIGURG, "USR1": syscall.SIGUSR1, "USR2": syscall.SIGUSR2, "VTALRM": syscall.SIGVTALRM, "WINCH": syscall.SIGWINCH, "XCPU": syscall.SIGXCPU, "XFSZ": syscall.SIGXFSZ, "RTMIN": sigrtmin, "RTMIN+1": sigrtmin + 1, "RTMIN+2": sigrtmin + 2, "RTMIN+3": sigrtmin + 3, "RTMIN+4": sigrtmin + 4, "RTMIN+5": sigrtmin + 5, "RTMIN+6": sigrtmin + 6, "RTMIN+7": sigrtmin + 7, "RTMIN+8": sigrtmin + 8, "RTMIN+9": sigrtmin + 9, "RTMIN+10": sigrtmin + 10, "RTMIN+11": sigrtmin + 11, "RTMIN+12": sigrtmin + 12, "RTMIN+13": sigrtmin + 13, "RTMIN+14": sigrtmin + 14, "RTMIN+15": sigrtmin + 15, "RTMAX-14": sigrtmax - 14, "RTMAX-13": sigrtmax - 13, "RTMAX-12": sigrtmax - 12, "RTMAX-11": sigrtmax - 11, "RTMAX-10": sigrtmax - 10, "RTMAX-9": sigrtmax - 9, "RTMAX-8": sigrtmax - 8, "RTMAX-7": sigrtmax - 7, "RTMAX-6": sigrtmax - 6, "RTMAX-5": sigrtmax - 5, "RTMAX-4": sigrtmax - 4, "RTMAX-3": sigrtmax - 3, "RTMAX-2": sigrtmax - 2, "RTMAX-1": sigrtmax - 1, "RTMAX": sigrtmax, } docker-1.10.3/pkg/signal/signal_unix.go000066400000000000000000000010641267010174400177530ustar00rootroot00000000000000// +build !windows package signal import ( "syscall" ) // Signals used in api/client (no windows equivalent, use // invalid signals so they don't get handled) const ( // SIGCHLD is a signal sent to a process when a child process terminates, is interrupted, or resumes after being interrupted. SIGCHLD = syscall.SIGCHLD // SIGWINCH is a signal sent to a process when its controlling terminal changes its size SIGWINCH = syscall.SIGWINCH // DefaultStopSignal is the syscall signal used to stop a container in unix systems. DefaultStopSignal = "SIGTERM" ) docker-1.10.3/pkg/signal/signal_unsupported.go000066400000000000000000000003011267010174400213510ustar00rootroot00000000000000// +build !linux,!darwin,!freebsd,!windows package signal import ( "syscall" ) // SignalMap is an empty map of signals for unsupported platform. var SignalMap = map[string]syscall.Signal{} docker-1.10.3/pkg/signal/signal_windows.go000066400000000000000000000014401267010174400204600ustar00rootroot00000000000000// +build windows package signal import ( "syscall" ) // Signals used in api/client (no windows equivalent, use // invalid signals so they don't get handled) const ( SIGCHLD = syscall.Signal(0xff) SIGWINCH = syscall.Signal(0xff) // DefaultStopSignal is the syscall signal used to stop a container in windows systems. DefaultStopSignal = "15" ) // SignalMap is a map of "supported" signals. As per the comment in GOLang's // ztypes_windows.go: "More invented values for signals". Windows doesn't // really support signals in any way, shape or form that Unix does. // // We have these so that docker kill can be used to gracefully (TERM) and // forcibly (KILL) terminate a container on Windows. var SignalMap = map[string]syscall.Signal{ "KILL": syscall.SIGKILL, "TERM": syscall.SIGTERM, } docker-1.10.3/pkg/signal/trap.go000066400000000000000000000044101267010174400163770ustar00rootroot00000000000000package signal import ( "os" gosignal "os/signal" "runtime" "sync/atomic" "syscall" "github.com/Sirupsen/logrus" ) // Trap sets up a simplified signal "trap", appropriate for common // behavior expected from a vanilla unix command-line tool in general // (and the Docker engine in particular). // // * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated. // * If SIGINT or SIGTERM are received 3 times before cleanup is complete, then cleanup is // skipped and the process is terminated immediately (allows force quit of stuck daemon) // * A SIGQUIT always causes an exit without cleanup, with a goroutine dump preceding exit. // func Trap(cleanup func()) { c := make(chan os.Signal, 1) // we will handle INT, TERM, QUIT here signals := []os.Signal{os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT} gosignal.Notify(c, signals...) go func() { interruptCount := uint32(0) for sig := range c { go func(sig os.Signal) { logrus.Infof("Processing signal '%v'", sig) switch sig { case os.Interrupt, syscall.SIGTERM: if atomic.LoadUint32(&interruptCount) < 3 { // Initiate the cleanup only once if atomic.AddUint32(&interruptCount, 1) == 1 { // Call the provided cleanup handler cleanup() os.Exit(0) } else { return } } else { // 3 SIGTERM/INT signals received; force exit without cleanup logrus.Infof("Forcing docker daemon shutdown without cleanup; 3 interrupts received") } case syscall.SIGQUIT: DumpStacks() logrus.Infof("Forcing docker daemon shutdown without cleanup on SIGQUIT") } //for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal # os.Exit(128 + int(sig.(syscall.Signal))) }(sig) } }() } // DumpStacks dumps the runtime stack. func DumpStacks() { var ( buf []byte stackSize int ) bufferLen := 16384 for stackSize == len(buf) { buf = make([]byte, bufferLen) stackSize = runtime.Stack(buf, true) bufferLen *= 2 } buf = buf[:stackSize] // Note that if the daemon is started with a less-verbose log-level than "info" (the default), the goroutine // traces won't show up in the log. logrus.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf) } docker-1.10.3/pkg/stdcopy/000077500000000000000000000000001267010174400153135ustar00rootroot00000000000000docker-1.10.3/pkg/stdcopy/stdcopy.go000066400000000000000000000113731267010174400173340ustar00rootroot00000000000000package stdcopy import ( "encoding/binary" "errors" "fmt" "io" "github.com/Sirupsen/logrus" ) // StdType is the type of standard stream // a writer can multiplex to. type StdType byte const ( // Stdin represents standard input stream type. Stdin StdType = iota // Stdout represents standard output stream type. Stdout // Stderr represents standard error steam type. Stderr stdWriterPrefixLen = 8 stdWriterFdIndex = 0 stdWriterSizeIndex = 4 startingBufLen = 32*1024 + stdWriterPrefixLen + 1 ) // stdWriter is wrapper of io.Writer with extra customized info. type stdWriter struct { io.Writer prefix byte } // Write sends the buffer to the underneath writer. // It insert the prefix header before the buffer, // so stdcopy.StdCopy knows where to multiplex the output. // It makes stdWriter to implement io.Writer. func (w *stdWriter) Write(buf []byte) (n int, err error) { if w == nil || w.Writer == nil { return 0, errors.New("Writer not instantiated") } if buf == nil { return 0, nil } header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix} binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(buf))) line := append(header[:], buf...) n, err = w.Writer.Write(line) n -= stdWriterPrefixLen if n < 0 { n = 0 } return } // NewStdWriter instantiates a new Writer. // Everything written to it will be encapsulated using a custom format, // and written to the underlying `w` stream. // This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. // `t` indicates the id of the stream to encapsulate. // It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr. func NewStdWriter(w io.Writer, t StdType) io.Writer { return &stdWriter{ Writer: w, prefix: byte(t), } } // StdCopy is a modified version of io.Copy. // // StdCopy will demultiplex `src`, assuming that it contains two streams, // previously multiplexed together using a StdWriter instance. // As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. // // StdCopy will read until it hits EOF on `src`. It will then return a nil error. // In other words: if `err` is non nil, it indicates a real underlying error. // // `written` will hold the total number of bytes written to `dstout` and `dsterr`. func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { var ( buf = make([]byte, startingBufLen) bufLen = len(buf) nr, nw int er, ew error out io.Writer frameSize int ) for { // Make sure we have at least a full header for nr < stdWriterPrefixLen { var nr2 int nr2, er = src.Read(buf[nr:]) nr += nr2 if er == io.EOF { if nr < stdWriterPrefixLen { logrus.Debugf("Corrupted prefix: %v", buf[:nr]) return written, nil } break } if er != nil { logrus.Debugf("Error reading header: %s", er) return 0, er } } // Check the first byte to know where to write switch StdType(buf[stdWriterFdIndex]) { case Stdin: fallthrough case Stdout: // Write on stdout out = dstout case Stderr: // Write on stderr out = dsterr default: logrus.Debugf("Error selecting output fd: (%d)", buf[stdWriterFdIndex]) return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex]) } // Retrieve the size of the frame frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4])) logrus.Debugf("framesize: %d", frameSize) // Check if the buffer is big enough to read the frame. // Extend it if necessary. if frameSize+stdWriterPrefixLen > bufLen { logrus.Debugf("Extending buffer cap by %d (was %d)", frameSize+stdWriterPrefixLen-bufLen+1, len(buf)) buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...) bufLen = len(buf) } // While the amount of bytes read is less than the size of the frame + header, we keep reading for nr < frameSize+stdWriterPrefixLen { var nr2 int nr2, er = src.Read(buf[nr:]) nr += nr2 if er == io.EOF { if nr < frameSize+stdWriterPrefixLen { logrus.Debugf("Corrupted frame: %v", buf[stdWriterPrefixLen:nr]) return written, nil } break } if er != nil { logrus.Debugf("Error reading frame: %s", er) return 0, er } } // Write the retrieved frame (without header) nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen]) if ew != nil { logrus.Debugf("Error writing frame: %s", ew) return 0, ew } // If the frame has not been fully written: error if nw != frameSize { logrus.Debugf("Error Short Write: (%d on %d)", nw, frameSize) return 0, io.ErrShortWrite } written += int64(nw) // Move the rest of the buffer to the beginning copy(buf, buf[frameSize+stdWriterPrefixLen:]) // Move the index nr -= frameSize + stdWriterPrefixLen } } docker-1.10.3/pkg/stdcopy/stdcopy_test.go000066400000000000000000000155511267010174400203750ustar00rootroot00000000000000package stdcopy import ( "bytes" "errors" "io" "io/ioutil" "strings" "testing" ) func TestNewStdWriter(t *testing.T) { writer := NewStdWriter(ioutil.Discard, Stdout) if writer == nil { t.Fatalf("NewStdWriter with an invalid StdType should not return nil.") } } func TestWriteWithUnitializedStdWriter(t *testing.T) { writer := stdWriter{ Writer: nil, prefix: byte(Stdout), } n, err := writer.Write([]byte("Something here")) if n != 0 || err == nil { t.Fatalf("Should fail when given an uncomplete or uninitialized StdWriter") } } func TestWriteWithNilBytes(t *testing.T) { writer := NewStdWriter(ioutil.Discard, Stdout) n, err := writer.Write(nil) if err != nil { t.Fatalf("Shouldn't have fail when given no data") } if n > 0 { t.Fatalf("Write should have written 0 byte, but has written %d", n) } } func TestWrite(t *testing.T) { writer := NewStdWriter(ioutil.Discard, Stdout) data := []byte("Test StdWrite.Write") n, err := writer.Write(data) if err != nil { t.Fatalf("Error while writing with StdWrite") } if n != len(data) { t.Fatalf("Write should have written %d byte but wrote %d.", len(data), n) } } type errWriter struct { n int err error } func (f *errWriter) Write(buf []byte) (int, error) { return f.n, f.err } func TestWriteWithWriterError(t *testing.T) { expectedError := errors.New("expected") expectedReturnedBytes := 10 writer := NewStdWriter(&errWriter{ n: stdWriterPrefixLen + expectedReturnedBytes, err: expectedError}, Stdout) data := []byte("This won't get written, sigh") n, err := writer.Write(data) if err != expectedError { t.Fatalf("Didn't get expected error.") } if n != expectedReturnedBytes { t.Fatalf("Didn't get expected writen bytes %d, got %d.", expectedReturnedBytes, n) } } func TestWriteDoesNotReturnNegativeWrittenBytes(t *testing.T) { writer := NewStdWriter(&errWriter{n: -1}, Stdout) data := []byte("This won't get written, sigh") actual, _ := writer.Write(data) if actual != 0 { t.Fatalf("Expected returned written bytes equal to 0, got %d", actual) } } func getSrcBuffer(stdOutBytes, stdErrBytes []byte) (buffer *bytes.Buffer, err error) { buffer = new(bytes.Buffer) dstOut := NewStdWriter(buffer, Stdout) _, err = dstOut.Write(stdOutBytes) if err != nil { return } dstErr := NewStdWriter(buffer, Stderr) _, err = dstErr.Write(stdErrBytes) return } func TestStdCopyWriteAndRead(t *testing.T) { stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) if err != nil { t.Fatal(err) } written, err := StdCopy(ioutil.Discard, ioutil.Discard, buffer) if err != nil { t.Fatal(err) } expectedTotalWritten := len(stdOutBytes) + len(stdErrBytes) if written != int64(expectedTotalWritten) { t.Fatalf("Expected to have total of %d bytes written, got %d", expectedTotalWritten, written) } } type customReader struct { n int err error totalCalls int correctCalls int src *bytes.Buffer } func (f *customReader) Read(buf []byte) (int, error) { f.totalCalls++ if f.totalCalls <= f.correctCalls { return f.src.Read(buf) } return f.n, f.err } func TestStdCopyReturnsErrorReadingHeader(t *testing.T) { expectedError := errors.New("error") reader := &customReader{ err: expectedError} written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) if written != 0 { t.Fatalf("Expected 0 bytes read, got %d", written) } if err != expectedError { t.Fatalf("Didn't get expected error") } } func TestStdCopyReturnsErrorReadingFrame(t *testing.T) { expectedError := errors.New("error") stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) if err != nil { t.Fatal(err) } reader := &customReader{ correctCalls: 1, n: stdWriterPrefixLen + 1, err: expectedError, src: buffer} written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) if written != 0 { t.Fatalf("Expected 0 bytes read, got %d", written) } if err != expectedError { t.Fatalf("Didn't get expected error") } } func TestStdCopyDetectsCorruptedFrame(t *testing.T) { stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) if err != nil { t.Fatal(err) } reader := &customReader{ correctCalls: 1, n: stdWriterPrefixLen + 1, err: io.EOF, src: buffer} written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) if written != startingBufLen { t.Fatalf("Expected %d bytes read, got %d", startingBufLen, written) } if err != nil { t.Fatal("Didn't get nil error") } } func TestStdCopyWithInvalidInputHeader(t *testing.T) { dstOut := NewStdWriter(ioutil.Discard, Stdout) dstErr := NewStdWriter(ioutil.Discard, Stderr) src := strings.NewReader("Invalid input") _, err := StdCopy(dstOut, dstErr, src) if err == nil { t.Fatal("StdCopy with invalid input header should fail.") } } func TestStdCopyWithCorruptedPrefix(t *testing.T) { data := []byte{0x01, 0x02, 0x03} src := bytes.NewReader(data) written, err := StdCopy(nil, nil, src) if err != nil { t.Fatalf("StdCopy should not return an error with corrupted prefix.") } if written != 0 { t.Fatalf("StdCopy should have written 0, but has written %d", written) } } func TestStdCopyReturnsWriteErrors(t *testing.T) { stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) if err != nil { t.Fatal(err) } expectedError := errors.New("expected") dstOut := &errWriter{err: expectedError} written, err := StdCopy(dstOut, ioutil.Discard, buffer) if written != 0 { t.Fatalf("StdCopy should have written 0, but has written %d", written) } if err != expectedError { t.Fatalf("Didn't get expected error, got %v", err) } } func TestStdCopyDetectsNotFullyWrittenFrames(t *testing.T) { stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) if err != nil { t.Fatal(err) } dstOut := &errWriter{n: startingBufLen - 10} written, err := StdCopy(dstOut, ioutil.Discard, buffer) if written != 0 { t.Fatalf("StdCopy should have return 0 written bytes, but returned %d", written) } if err != io.ErrShortWrite { t.Fatalf("Didn't get expected io.ErrShortWrite error") } } func BenchmarkWrite(b *testing.B) { w := NewStdWriter(ioutil.Discard, Stdout) data := []byte("Test line for testing stdwriter performance\n") data = bytes.Repeat(data, 100) b.SetBytes(int64(len(data))) b.ResetTimer() for i := 0; i < b.N; i++ { if _, err := w.Write(data); err != nil { b.Fatal(err) } } } docker-1.10.3/pkg/streamformatter/000077500000000000000000000000001267010174400170455ustar00rootroot00000000000000docker-1.10.3/pkg/streamformatter/streamformatter.go000066400000000000000000000110501267010174400226100ustar00rootroot00000000000000// Package streamformatter provides helper functions to format a stream. package streamformatter import ( "encoding/json" "fmt" "io" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/progress" ) // StreamFormatter formats a stream, optionally using JSON. type StreamFormatter struct { json bool } // NewStreamFormatter returns a simple StreamFormatter func NewStreamFormatter() *StreamFormatter { return &StreamFormatter{} } // NewJSONStreamFormatter returns a StreamFormatter configured to stream json func NewJSONStreamFormatter() *StreamFormatter { return &StreamFormatter{true} } const streamNewline = "\r\n" var streamNewlineBytes = []byte(streamNewline) // FormatStream formats the specified stream. func (sf *StreamFormatter) FormatStream(str string) []byte { if sf.json { b, err := json.Marshal(&jsonmessage.JSONMessage{Stream: str}) if err != nil { return sf.FormatError(err) } return append(b, streamNewlineBytes...) } return []byte(str + "\r") } // FormatStatus formats the specified objects according to the specified format (and id). func (sf *StreamFormatter) FormatStatus(id, format string, a ...interface{}) []byte { str := fmt.Sprintf(format, a...) if sf.json { b, err := json.Marshal(&jsonmessage.JSONMessage{ID: id, Status: str}) if err != nil { return sf.FormatError(err) } return append(b, streamNewlineBytes...) } return []byte(str + streamNewline) } // FormatError formats the specified error. func (sf *StreamFormatter) FormatError(err error) []byte { if sf.json { jsonError, ok := err.(*jsonmessage.JSONError) if !ok { jsonError = &jsonmessage.JSONError{Message: err.Error()} } if b, err := json.Marshal(&jsonmessage.JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil { return append(b, streamNewlineBytes...) } return []byte("{\"error\":\"format error\"}" + streamNewline) } return []byte("Error: " + err.Error() + streamNewline) } // FormatProgress formats the progress information for a specified action. func (sf *StreamFormatter) FormatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte { if progress == nil { progress = &jsonmessage.JSONProgress{} } if sf.json { var auxJSON *json.RawMessage if aux != nil { auxJSONBytes, err := json.Marshal(aux) if err != nil { return nil } auxJSON = new(json.RawMessage) *auxJSON = auxJSONBytes } b, err := json.Marshal(&jsonmessage.JSONMessage{ Status: action, ProgressMessage: progress.String(), Progress: progress, ID: id, Aux: auxJSON, }) if err != nil { return nil } return append(b, streamNewlineBytes...) } endl := "\r" if progress.String() == "" { endl += "\n" } return []byte(action + " " + progress.String() + endl) } // NewProgressOutput returns a progress.Output object that can be passed to // progress.NewProgressReader. func (sf *StreamFormatter) NewProgressOutput(out io.Writer, newLines bool) progress.Output { return &progressOutput{ sf: sf, out: out, newLines: newLines, } } type progressOutput struct { sf *StreamFormatter out io.Writer newLines bool } // WriteProgress formats progress information from a ProgressReader. func (out *progressOutput) WriteProgress(prog progress.Progress) error { var formatted []byte if prog.Message != "" { formatted = out.sf.FormatStatus(prog.ID, prog.Message) } else { jsonProgress := jsonmessage.JSONProgress{Current: prog.Current, Total: prog.Total} formatted = out.sf.FormatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux) } _, err := out.out.Write(formatted) if err != nil { return err } if out.newLines && prog.LastUpdate { _, err = out.out.Write(out.sf.FormatStatus("", "")) return err } return nil } // StdoutFormatter is a streamFormatter that writes to the standard output. type StdoutFormatter struct { io.Writer *StreamFormatter } func (sf *StdoutFormatter) Write(buf []byte) (int, error) { formattedBuf := sf.StreamFormatter.FormatStream(string(buf)) n, err := sf.Writer.Write(formattedBuf) if n != len(formattedBuf) { return n, io.ErrShortWrite } return len(buf), err } // StderrFormatter is a streamFormatter that writes to the standard error. type StderrFormatter struct { io.Writer *StreamFormatter } func (sf *StderrFormatter) Write(buf []byte) (int, error) { formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m") n, err := sf.Writer.Write(formattedBuf) if n != len(formattedBuf) { return n, io.ErrShortWrite } return len(buf), err } docker-1.10.3/pkg/streamformatter/streamformatter_test.go000066400000000000000000000045311267010174400236550ustar00rootroot00000000000000package streamformatter import ( "encoding/json" "errors" "reflect" "testing" "github.com/docker/docker/pkg/jsonmessage" ) func TestFormatStream(t *testing.T) { sf := NewStreamFormatter() res := sf.FormatStream("stream") if string(res) != "stream"+"\r" { t.Fatalf("%q", res) } } func TestFormatJSONStatus(t *testing.T) { sf := NewStreamFormatter() res := sf.FormatStatus("ID", "%s%d", "a", 1) if string(res) != "a1\r\n" { t.Fatalf("%q", res) } } func TestFormatSimpleError(t *testing.T) { sf := NewStreamFormatter() res := sf.FormatError(errors.New("Error for formatter")) if string(res) != "Error: Error for formatter\r\n" { t.Fatalf("%q", res) } } func TestJSONFormatStream(t *testing.T) { sf := NewJSONStreamFormatter() res := sf.FormatStream("stream") if string(res) != `{"stream":"stream"}`+"\r\n" { t.Fatalf("%q", res) } } func TestJSONFormatStatus(t *testing.T) { sf := NewJSONStreamFormatter() res := sf.FormatStatus("ID", "%s%d", "a", 1) if string(res) != `{"status":"a1","id":"ID"}`+"\r\n" { t.Fatalf("%q", res) } } func TestJSONFormatSimpleError(t *testing.T) { sf := NewJSONStreamFormatter() res := sf.FormatError(errors.New("Error for formatter")) if string(res) != `{"errorDetail":{"message":"Error for formatter"},"error":"Error for formatter"}`+"\r\n" { t.Fatalf("%q", res) } } func TestJSONFormatJSONError(t *testing.T) { sf := NewJSONStreamFormatter() err := &jsonmessage.JSONError{Code: 50, Message: "Json error"} res := sf.FormatError(err) if string(res) != `{"errorDetail":{"code":50,"message":"Json error"},"error":"Json error"}`+"\r\n" { t.Fatalf("%q", res) } } func TestJSONFormatProgress(t *testing.T) { sf := NewJSONStreamFormatter() progress := &jsonmessage.JSONProgress{ Current: 15, Total: 30, Start: 1, } res := sf.FormatProgress("id", "action", progress, nil) msg := &jsonmessage.JSONMessage{} if err := json.Unmarshal(res, msg); err != nil { t.Fatal(err) } if msg.ID != "id" { t.Fatalf("ID must be 'id', got: %s", msg.ID) } if msg.Status != "action" { t.Fatalf("Status must be 'action', got: %s", msg.Status) } if msg.ProgressMessage != progress.String() { t.Fatalf("ProgressMessage must be %s, got: %s", progress.String(), msg.ProgressMessage) } if !reflect.DeepEqual(msg.Progress, progress) { t.Fatal("Original progress not equals progress from FormatProgress") } } docker-1.10.3/pkg/stringid/000077500000000000000000000000001267010174400154515ustar00rootroot00000000000000docker-1.10.3/pkg/stringid/README.md000066400000000000000000000001131267010174400167230ustar00rootroot00000000000000This package provides helper functions for dealing with string identifiers docker-1.10.3/pkg/stringid/stringid.go000066400000000000000000000033301267010174400176220ustar00rootroot00000000000000// Package stringid provides helper functions for dealing with string identifiers package stringid import ( "crypto/rand" "encoding/hex" "io" "regexp" "strconv" "strings" "github.com/docker/docker/pkg/random" ) const shortLen = 12 var validShortID = regexp.MustCompile("^[a-z0-9]{12}$") // IsShortID determines if an arbitrary string *looks like* a short ID. func IsShortID(id string) bool { return validShortID.MatchString(id) } // TruncateID returns a shorthand version of a string identifier for convenience. // A collision with other shorthands is very unlikely, but possible. // In case of a collision a lookup with TruncIndex.Get() will fail, and the caller // will need to use a langer prefix, or the full-length Id. func TruncateID(id string) string { if i := strings.IndexRune(id, ':'); i >= 0 { id = id[i+1:] } trimTo := shortLen if len(id) < shortLen { trimTo = len(id) } return id[:trimTo] } func generateID(crypto bool) string { b := make([]byte, 32) r := random.Reader if crypto { r = rand.Reader } for { if _, err := io.ReadFull(r, b); err != nil { panic(err) // This shouldn't happen } id := hex.EncodeToString(b) // if we try to parse the truncated for as an int and we don't have // an error then the value is all numeric and causes issues when // used as a hostname. ref #3869 if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil { continue } return id } } // GenerateRandomID returns an unique id. func GenerateRandomID() string { return generateID(true) } // GenerateNonCryptoID generates unique id without using cryptographically // secure sources of random. // It helps you to save entropy. func GenerateNonCryptoID() string { return generateID(false) } docker-1.10.3/pkg/stringid/stringid_test.go000066400000000000000000000022221267010174400206600ustar00rootroot00000000000000package stringid import ( "strings" "testing" ) func TestGenerateRandomID(t *testing.T) { id := GenerateRandomID() if len(id) != 64 { t.Fatalf("Id returned is incorrect: %s", id) } } func TestShortenId(t *testing.T) { id := GenerateRandomID() truncID := TruncateID(id) if len(truncID) != 12 { t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) } } func TestShortenIdEmpty(t *testing.T) { id := "" truncID := TruncateID(id) if len(truncID) > len(id) { t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) } } func TestShortenIdInvalid(t *testing.T) { id := "1234" truncID := TruncateID(id) if len(truncID) != len(id) { t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) } } func TestIsShortIDNonHex(t *testing.T) { id := "some non-hex value" if IsShortID(id) { t.Fatalf("%s is not a short ID", id) } } func TestIsShortIDNotCorrectSize(t *testing.T) { id := strings.Repeat("a", shortLen+1) if IsShortID(id) { t.Fatalf("%s is not a short ID", id) } id = strings.Repeat("a", shortLen-1) if IsShortID(id) { t.Fatalf("%s is not a short ID", id) } } docker-1.10.3/pkg/stringutils/000077500000000000000000000000001267010174400162155ustar00rootroot00000000000000docker-1.10.3/pkg/stringutils/README.md000066400000000000000000000001001267010174400174630ustar00rootroot00000000000000This package provides helper functions for dealing with strings docker-1.10.3/pkg/stringutils/stringutils.go000066400000000000000000000040431267010174400211340ustar00rootroot00000000000000// Package stringutils provides helper functions for dealing with strings. package stringutils import ( "bytes" "math/rand" "strings" "github.com/docker/docker/pkg/random" ) // GenerateRandomAlphaOnlyString generates an alphabetical random string with length n. func GenerateRandomAlphaOnlyString(n int) string { // make a really long string letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") b := make([]byte, n) for i := range b { b[i] = letters[random.Rand.Intn(len(letters))] } return string(b) } // GenerateRandomASCIIString generates an ASCII random stirng with length n. func GenerateRandomASCIIString(n int) string { chars := "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` " res := make([]byte, n) for i := 0; i < n; i++ { res[i] = chars[rand.Intn(len(chars))] } return string(res) } // Truncate truncates a string to maxlen. func Truncate(s string, maxlen int) string { if len(s) <= maxlen { return s } return s[:maxlen] } // InSlice tests whether a string is contained in a slice of strings or not. // Comparison is case insensitive func InSlice(slice []string, s string) bool { for _, ss := range slice { if strings.ToLower(s) == strings.ToLower(ss) { return true } } return false } func quote(word string, buf *bytes.Buffer) { // Bail out early for "simple" strings if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") { buf.WriteString(word) return } buf.WriteString("'") for i := 0; i < len(word); i++ { b := word[i] if b == '\'' { // Replace literal ' with a close ', a \', and a open ' buf.WriteString("'\\''") } else { buf.WriteByte(b) } } buf.WriteString("'") } // ShellQuoteArguments takes a list of strings and escapes them so they will be // handled right when passed as arguments to an program via a shell func ShellQuoteArguments(args []string) string { var buf bytes.Buffer for i, arg := range args { if i != 0 { buf.WriteByte(' ') } quote(arg, &buf) } return buf.String() } docker-1.10.3/pkg/stringutils/stringutils_test.go000066400000000000000000000051111267010174400221700ustar00rootroot00000000000000package stringutils import "testing" func testLengthHelper(generator func(int) string, t *testing.T) { expectedLength := 20 s := generator(expectedLength) if len(s) != expectedLength { t.Fatalf("Length of %s was %d but expected length %d", s, len(s), expectedLength) } } func testUniquenessHelper(generator func(int) string, t *testing.T) { repeats := 25 set := make(map[string]struct{}, repeats) for i := 0; i < repeats; i = i + 1 { str := generator(64) if len(str) != 64 { t.Fatalf("Id returned is incorrect: %s", str) } if _, ok := set[str]; ok { t.Fatalf("Random number is repeated") } set[str] = struct{}{} } } func isASCII(s string) bool { for _, c := range s { if c > 127 { return false } } return true } func TestGenerateRandomAlphaOnlyStringLength(t *testing.T) { testLengthHelper(GenerateRandomAlphaOnlyString, t) } func TestGenerateRandomAlphaOnlyStringUniqueness(t *testing.T) { testUniquenessHelper(GenerateRandomAlphaOnlyString, t) } func TestGenerateRandomAsciiStringLength(t *testing.T) { testLengthHelper(GenerateRandomASCIIString, t) } func TestGenerateRandomAsciiStringUniqueness(t *testing.T) { testUniquenessHelper(GenerateRandomASCIIString, t) } func TestGenerateRandomAsciiStringIsAscii(t *testing.T) { str := GenerateRandomASCIIString(64) if !isASCII(str) { t.Fatalf("%s contained non-ascii characters", str) } } func TestTruncate(t *testing.T) { str := "teststring" newstr := Truncate(str, 4) if newstr != "test" { t.Fatalf("Expected test, got %s", newstr) } newstr = Truncate(str, 20) if newstr != "teststring" { t.Fatalf("Expected teststring, got %s", newstr) } } func TestInSlice(t *testing.T) { slice := []string{"test", "in", "slice"} test := InSlice(slice, "test") if !test { t.Fatalf("Expected string test to be in slice") } test = InSlice(slice, "SLICE") if !test { t.Fatalf("Expected string SLICE to be in slice") } test = InSlice(slice, "notinslice") if test { t.Fatalf("Expected string notinslice not to be in slice") } } func TestShellQuoteArgumentsEmpty(t *testing.T) { actual := ShellQuoteArguments([]string{}) expected := "" if actual != expected { t.Fatalf("Expected an empty string") } } func TestShellQuoteArguments(t *testing.T) { simpleString := "simpleString" complexString := "This is a 'more' complex $tring with some special char *" actual := ShellQuoteArguments([]string{simpleString, complexString}) expected := "simpleString 'This is a '\\''more'\\'' complex $tring with some special char *'" if actual != expected { t.Fatalf("Expected \"%v\", got \"%v\"", expected, actual) } } docker-1.10.3/pkg/symlink/000077500000000000000000000000001267010174400153145ustar00rootroot00000000000000docker-1.10.3/pkg/symlink/LICENSE.APACHE000066400000000000000000000250131267010174400172420ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS Copyright 2014-2016 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. docker-1.10.3/pkg/symlink/LICENSE.BSD000066400000000000000000000027251267010174400167360ustar00rootroot00000000000000Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. docker-1.10.3/pkg/symlink/README.md000066400000000000000000000006021267010174400165710ustar00rootroot00000000000000Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks, as well as a Windows long-path aware version of filepath.EvalSymlinks from the [Go standard library](https://golang.org/pkg/path/filepath). The code from filepath.EvalSymlinks has been adapted in fs.go. Please read the LICENSE.BSD file that governs fs.go and LICENSE.APACHE for fs_test.go. docker-1.10.3/pkg/symlink/fs.go000066400000000000000000000114221267010174400162530ustar00rootroot00000000000000// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE.BSD file. // This code is a modified version of path/filepath/symlink.go from the Go standard library. package symlink import ( "bytes" "errors" "os" "path/filepath" "strings" "github.com/docker/docker/pkg/system" ) // FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an // absolute path. This function handles paths in a platform-agnostic manner. func FollowSymlinkInScope(path, root string) (string, error) { path, err := filepath.Abs(filepath.FromSlash(path)) if err != nil { return "", err } root, err = filepath.Abs(filepath.FromSlash(root)) if err != nil { return "", err } return evalSymlinksInScope(path, root) } // evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return // a result guaranteed to be contained within the scope `root`, at the time of the call. // Symlinks in `root` are not evaluated and left as-is. // Errors encountered while attempting to evaluate symlinks in path will be returned. // Non-existing paths are valid and do not constitute an error. // `path` has to contain `root` as a prefix, or else an error will be returned. // Trying to break out from `root` does not constitute an error. // // Example: // If /foo/bar -> /outside, // FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/oustide" // // IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks // are created and not to create subsequently, additional symlinks that could potentially make a // previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo") // would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should // no longer be considered safely contained in "/foo". func evalSymlinksInScope(path, root string) (string, error) { root = filepath.Clean(root) if path == root { return path, nil } if !strings.HasPrefix(path, root) { return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) } const maxIter = 255 originalPath := path // given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c" path = path[len(root):] if root == string(filepath.Separator) { path = string(filepath.Separator) + path } if !strings.HasPrefix(path, string(filepath.Separator)) { return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) } path = filepath.Clean(path) // consume path by taking each frontmost path element, // expanding it if it's a symlink, and appending it to b var b bytes.Buffer // b here will always be considered to be the "current absolute path inside // root" when we append paths to it, we also append a slash and use // filepath.Clean after the loop to trim the trailing slash for n := 0; path != ""; n++ { if n > maxIter { return "", errors.New("evalSymlinksInScope: too many links in " + originalPath) } // find next path component, p i := strings.IndexRune(path, filepath.Separator) var p string if i == -1 { p, path = path, "" } else { p, path = path[:i], path[i+1:] } if p == "" { continue } // this takes a b.String() like "b/../" and a p like "c" and turns it // into "/b/../c" which then gets filepath.Cleaned into "/c" and then // root gets prepended and we Clean again (to remove any trailing slash // if the first Clean gave us just "/") cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p) if cleanP == string(filepath.Separator) { // never Lstat "/" itself b.Reset() continue } fullP := filepath.Clean(root + cleanP) fi, err := os.Lstat(fullP) if os.IsNotExist(err) { // if p does not exist, accept it b.WriteString(p) b.WriteRune(filepath.Separator) continue } if err != nil { return "", err } if fi.Mode()&os.ModeSymlink == 0 { b.WriteString(p + string(filepath.Separator)) continue } // it's a symlink, put it at the front of path dest, err := os.Readlink(fullP) if err != nil { return "", err } if system.IsAbs(dest) { b.Reset() } path = dest + string(filepath.Separator) + path } // see note above on "fullP := ..." for why this is double-cleaned and // what's happening here return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil } // EvalSymlinks returns the path name after the evaluation of any symbolic // links. // If path is relative the result will be relative to the current directory, // unless one of the components is an absolute symbolic link. // This version has been updated to support long paths prepended with `\\?\`. func EvalSymlinks(path string) (string, error) { return evalSymlinks(path) } docker-1.10.3/pkg/symlink/fs_test.go000066400000000000000000000252001267010174400173110ustar00rootroot00000000000000// Licensed under the Apache License, Version 2.0; See LICENSE.APACHE package symlink import ( "fmt" "io/ioutil" "os" "path/filepath" "testing" ) type dirOrLink struct { path string target string } func makeFs(tmpdir string, fs []dirOrLink) error { for _, s := range fs { s.path = filepath.Join(tmpdir, s.path) if s.target == "" { os.MkdirAll(s.path, 0755) continue } if err := os.MkdirAll(filepath.Dir(s.path), 0755); err != nil { return err } if err := os.Symlink(s.target, s.path); err != nil && !os.IsExist(err) { return err } } return nil } func testSymlink(tmpdir, path, expected, scope string) error { rewrite, err := FollowSymlinkInScope(filepath.Join(tmpdir, path), filepath.Join(tmpdir, scope)) if err != nil { return err } expected, err = filepath.Abs(filepath.Join(tmpdir, expected)) if err != nil { return err } if expected != rewrite { return fmt.Errorf("Expected %q got %q", expected, rewrite) } return nil } func TestFollowSymlinkAbsolute(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkAbsolute") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "testdata/fs/a/d/c/data", "testdata/b/c/data", "testdata"); err != nil { t.Fatal(err) } } func TestFollowSymlinkRelativePath(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/i", target: "a"}}); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "testdata/fs/i", "testdata/fs/a", "testdata"); err != nil { t.Fatal(err) } } func TestFollowSymlinkSkipSymlinksOutsideScope(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSkipSymlinksOutsideScope") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) if err := makeFs(tmpdir, []dirOrLink{ {path: "linkdir", target: "realdir"}, {path: "linkdir/foo/bar"}, }); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "linkdir/foo/bar", "linkdir/foo/bar", "linkdir/foo"); err != nil { t.Fatal(err) } } func TestFollowSymlinkInvalidScopePathPair(t *testing.T) { if _, err := FollowSymlinkInScope("toto", "testdata"); err == nil { t.Fatal("expected an error") } } func TestFollowSymlinkLastLink(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkLastLink") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "testdata/fs/a/d", "testdata/b", "testdata"); err != nil { t.Fatal(err) } } func TestFollowSymlinkRelativeLinkChangeScope(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChangeScope") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/e", target: "../b"}}); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "testdata/fs/a/e/c/data", "testdata/fs/b/c/data", "testdata"); err != nil { t.Fatal(err) } // avoid letting allowing symlink e lead us to ../b // normalize to the "testdata/fs/a" if err := testSymlink(tmpdir, "testdata/fs/a/e", "testdata/fs/a/b", "testdata/fs/a"); err != nil { t.Fatal(err) } } func TestFollowSymlinkDeepRelativeLinkChangeScope(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDeepRelativeLinkChangeScope") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/f", target: "../../../../test"}}); err != nil { t.Fatal(err) } // avoid letting symlink f lead us out of the "testdata" scope // we don't normalize because symlink f is in scope and there is no // information leak if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/test", "testdata"); err != nil { t.Fatal(err) } // avoid letting symlink f lead us out of the "testdata/fs" scope // we don't normalize because symlink f is in scope and there is no // information leak if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/fs/test", "testdata/fs"); err != nil { t.Fatal(err) } } func TestFollowSymlinkRelativeLinkChain(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChain") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) // avoid letting symlink g (pointed at by symlink h) take out of scope // TODO: we should probably normalize to scope here because ../[....]/root // is out of scope and we leak information if err := makeFs(tmpdir, []dirOrLink{ {path: "testdata/fs/b/h", target: "../g"}, {path: "testdata/fs/g", target: "../../../../../../../../../../../../root"}, }); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "testdata/fs/b/h", "testdata/root", "testdata"); err != nil { t.Fatal(err) } } func TestFollowSymlinkBreakoutPath(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutPath") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) // avoid letting symlink -> ../directory/file escape from scope // normalize to "testdata/fs/j" if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/j/k", target: "../i/a"}}); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "testdata/fs/j/k", "testdata/fs/j/i/a", "testdata/fs/j"); err != nil { t.Fatal(err) } } func TestFollowSymlinkToRoot(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkToRoot") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) // make sure we don't allow escaping to / // normalize to dir if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/"}}); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "foo", "", ""); err != nil { t.Fatal(err) } } func TestFollowSymlinkSlashDotdot(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSlashDotdot") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) tmpdir = filepath.Join(tmpdir, "dir", "subdir") // make sure we don't allow escaping to / // normalize to dir if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/../../"}}); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "foo", "", ""); err != nil { t.Fatal(err) } } func TestFollowSymlinkDotdot(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDotdot") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) tmpdir = filepath.Join(tmpdir, "dir", "subdir") // make sure we stay in scope without leaking information // this also checks for escaping to / // normalize to dir if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "../../"}}); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "foo", "", ""); err != nil { t.Fatal(err) } } func TestFollowSymlinkRelativePath2(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath2") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) if err := makeFs(tmpdir, []dirOrLink{{path: "bar/foo", target: "baz/target"}}); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "bar/foo", "bar/baz/target", ""); err != nil { t.Fatal(err) } } func TestFollowSymlinkScopeLink(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkScopeLink") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) if err := makeFs(tmpdir, []dirOrLink{ {path: "root2"}, {path: "root", target: "root2"}, {path: "root2/foo", target: "../bar"}, }); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "root/foo", "root/bar", "root"); err != nil { t.Fatal(err) } } func TestFollowSymlinkRootScope(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRootScope") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) expected, err := filepath.EvalSymlinks(tmpdir) if err != nil { t.Fatal(err) } rewrite, err := FollowSymlinkInScope(tmpdir, "/") if err != nil { t.Fatal(err) } if rewrite != expected { t.Fatalf("expected %q got %q", expected, rewrite) } } func TestFollowSymlinkEmpty(t *testing.T) { res, err := FollowSymlinkInScope("", "") if err != nil { t.Fatal(err) } wd, err := os.Getwd() if err != nil { t.Fatal(err) } if res != wd { t.Fatalf("expected %q got %q", wd, res) } } func TestFollowSymlinkCircular(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkCircular") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) if err := makeFs(tmpdir, []dirOrLink{{path: "root/foo", target: "foo"}}); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { t.Fatal("expected an error for foo -> foo") } if err := makeFs(tmpdir, []dirOrLink{ {path: "root/bar", target: "baz"}, {path: "root/baz", target: "../bak"}, {path: "root/bak", target: "/bar"}, }); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { t.Fatal("expected an error for bar -> baz -> bak -> bar") } } func TestFollowSymlinkComplexChainWithTargetPathsContainingLinks(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkComplexChainWithTargetPathsContainingLinks") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) if err := makeFs(tmpdir, []dirOrLink{ {path: "root2"}, {path: "root", target: "root2"}, {path: "root/a", target: "r/s"}, {path: "root/r", target: "../root/t"}, {path: "root/root/t/s/b", target: "/../u"}, {path: "root/u/c", target: "."}, {path: "root/u/x/y", target: "../v"}, {path: "root/u/v", target: "/../w"}, }); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "root/a/b/c/x/y/z", "root/w/z", "root"); err != nil { t.Fatal(err) } } func TestFollowSymlinkBreakoutNonExistent(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutNonExistent") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) if err := makeFs(tmpdir, []dirOrLink{ {path: "root/slash", target: "/"}, {path: "root/sym", target: "/idontexist/../slash"}, }); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "root/sym/file", "root/file", "root"); err != nil { t.Fatal(err) } } func TestFollowSymlinkNoLexicalCleaning(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkNoLexicalCleaning") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) if err := makeFs(tmpdir, []dirOrLink{ {path: "root/sym", target: "/foo/bar"}, {path: "root/hello", target: "/sym/../baz"}, }); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "root/hello", "root/foo/baz", "root"); err != nil { t.Fatal(err) } } docker-1.10.3/pkg/symlink/fs_unix.go000066400000000000000000000002311267010174400173120ustar00rootroot00000000000000// +build !windows package symlink import ( "path/filepath" ) func evalSymlinks(path string) (string, error) { return filepath.EvalSymlinks(path) } docker-1.10.3/pkg/symlink/fs_windows.go000066400000000000000000000070711267010174400200320ustar00rootroot00000000000000package symlink import ( "bytes" "errors" "os" "path/filepath" "strings" "syscall" "github.com/docker/docker/pkg/longpath" ) func toShort(path string) (string, error) { p, err := syscall.UTF16FromString(path) if err != nil { return "", err } b := p // GetShortPathName says we can reuse buffer n, err := syscall.GetShortPathName(&p[0], &b[0], uint32(len(b))) if err != nil { return "", err } if n > uint32(len(b)) { b = make([]uint16, n) n, err = syscall.GetShortPathName(&p[0], &b[0], uint32(len(b))) if err != nil { return "", err } } return syscall.UTF16ToString(b), nil } func toLong(path string) (string, error) { p, err := syscall.UTF16FromString(path) if err != nil { return "", err } b := p // GetLongPathName says we can reuse buffer n, err := syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) if err != nil { return "", err } if n > uint32(len(b)) { b = make([]uint16, n) n, err = syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) if err != nil { return "", err } } b = b[:n] return syscall.UTF16ToString(b), nil } func evalSymlinks(path string) (string, error) { path, err := walkSymlinks(path) if err != nil { return "", err } p, err := toShort(path) if err != nil { return "", err } p, err = toLong(p) if err != nil { return "", err } // syscall.GetLongPathName does not change the case of the drive letter, // but the result of EvalSymlinks must be unique, so we have // EvalSymlinks(`c:\a`) == EvalSymlinks(`C:\a`). // Make drive letter upper case. if len(p) >= 2 && p[1] == ':' && 'a' <= p[0] && p[0] <= 'z' { p = string(p[0]+'A'-'a') + p[1:] } else if len(p) >= 6 && p[5] == ':' && 'a' <= p[4] && p[4] <= 'z' { p = p[:3] + string(p[4]+'A'-'a') + p[5:] } return filepath.Clean(p), nil } const utf8RuneSelf = 0x80 func walkSymlinks(path string) (string, error) { const maxIter = 255 originalPath := path // consume path by taking each frontmost path element, // expanding it if it's a symlink, and appending it to b var b bytes.Buffer for n := 0; path != ""; n++ { if n > maxIter { return "", errors.New("EvalSymlinks: too many links in " + originalPath) } // A path beginning with `\\?\` represents the root, so automatically // skip that part and begin processing the next segment. if strings.HasPrefix(path, longpath.Prefix) { b.WriteString(longpath.Prefix) path = path[4:] continue } // find next path component, p var i = -1 for j, c := range path { if c < utf8RuneSelf && os.IsPathSeparator(uint8(c)) { i = j break } } var p string if i == -1 { p, path = path, "" } else { p, path = path[:i], path[i+1:] } if p == "" { if b.Len() == 0 { // must be absolute path b.WriteRune(filepath.Separator) } continue } // If this is the first segment after the long path prefix, accept the // current segment as a volume root or UNC share and move on to the next. if b.String() == longpath.Prefix { b.WriteString(p) b.WriteRune(filepath.Separator) continue } fi, err := os.Lstat(b.String() + p) if err != nil { return "", err } if fi.Mode()&os.ModeSymlink == 0 { b.WriteString(p) if path != "" || (b.Len() == 2 && len(p) == 2 && p[1] == ':') { b.WriteRune(filepath.Separator) } continue } // it's a symlink, put it at the front of path dest, err := os.Readlink(b.String() + p) if err != nil { return "", err } if filepath.IsAbs(dest) || os.IsPathSeparator(dest[0]) { b.Reset() } path = dest + string(filepath.Separator) + path } return filepath.Clean(b.String()), nil } docker-1.10.3/pkg/sysinfo/000077500000000000000000000000001267010174400153205ustar00rootroot00000000000000docker-1.10.3/pkg/sysinfo/README.md000066400000000000000000000001031267010174400165710ustar00rootroot00000000000000SysInfo stores information about which features a kernel supports. docker-1.10.3/pkg/sysinfo/sysinfo.go000066400000000000000000000063121267010174400173430ustar00rootroot00000000000000package sysinfo import "github.com/docker/docker/pkg/parsers" // SysInfo stores information about which features a kernel supports. // TODO Windows: Factor out platform specific capabilities. type SysInfo struct { // Whether the kernel supports AppArmor or not AppArmor bool // Whether the kernel supports Seccomp or not Seccomp bool cgroupMemInfo cgroupCPUInfo cgroupBlkioInfo cgroupCpusetInfo // Whether IPv4 forwarding is supported or not, if this was disabled, networking will not work IPv4ForwardingDisabled bool // Whether bridge-nf-call-iptables is supported or not BridgeNfCallIptablesDisabled bool // Whether bridge-nf-call-ip6tables is supported or not BridgeNfCallIP6tablesDisabled bool // Whether the cgroup has the mountpoint of "devices" or not CgroupDevicesEnabled bool } type cgroupMemInfo struct { // Whether memory limit is supported or not MemoryLimit bool // Whether swap limit is supported or not SwapLimit bool // Whether soft limit is supported or not MemoryReservation bool // Whether OOM killer disable is supported or not OomKillDisable bool // Whether memory swappiness is supported or not MemorySwappiness bool // Whether kernel memory limit is supported or not KernelMemory bool } type cgroupCPUInfo struct { // Whether CPU shares is supported or not CPUShares bool // Whether CPU CFS(Completely Fair Scheduler) period is supported or not CPUCfsPeriod bool // Whether CPU CFS(Completely Fair Scheduler) quota is supported or not CPUCfsQuota bool } type cgroupBlkioInfo struct { // Whether Block IO weight is supported or not BlkioWeight bool // Whether Block IO weight_device is supported or not BlkioWeightDevice bool // Whether Block IO read limit in bytes per second is supported or not BlkioReadBpsDevice bool // Whether Block IO write limit in bytes per second is supported or not BlkioWriteBpsDevice bool // Whether Block IO read limit in IO per second is supported or not BlkioReadIOpsDevice bool // Whether Block IO write limit in IO per second is supported or not BlkioWriteIOpsDevice bool } type cgroupCpusetInfo struct { // Whether Cpuset is supported or not Cpuset bool // Available Cpuset's cpus Cpus string // Available Cpuset's memory nodes Mems string } // IsCpusetCpusAvailable returns `true` if the provided string set is contained // in cgroup's cpuset.cpus set, `false` otherwise. // If error is not nil a parsing error occurred. func (c cgroupCpusetInfo) IsCpusetCpusAvailable(provided string) (bool, error) { return isCpusetListAvailable(provided, c.Cpus) } // IsCpusetMemsAvailable returns `true` if the provided string set is contained // in cgroup's cpuset.mems set, `false` otherwise. // If error is not nil a parsing error occurred. func (c cgroupCpusetInfo) IsCpusetMemsAvailable(provided string) (bool, error) { return isCpusetListAvailable(provided, c.Mems) } func isCpusetListAvailable(provided, available string) (bool, error) { parsedProvided, err := parsers.ParseUintList(provided) if err != nil { return false, err } parsedAvailable, err := parsers.ParseUintList(available) if err != nil { return false, err } for k := range parsedProvided { if !parsedAvailable[k] { return false, nil } } return true, nil } docker-1.10.3/pkg/sysinfo/sysinfo_freebsd.go000066400000000000000000000002171267010174400210330ustar00rootroot00000000000000package sysinfo // New returns an empty SysInfo for freebsd for now. func New(quiet bool) *SysInfo { sysInfo := &SysInfo{} return sysInfo } docker-1.10.3/pkg/sysinfo/sysinfo_linux.go000066400000000000000000000146451267010174400205720ustar00rootroot00000000000000package sysinfo import ( "io/ioutil" "os" "path" "strings" "syscall" "github.com/Sirupsen/logrus" "github.com/opencontainers/runc/libcontainer/cgroups" ) const ( // SeccompModeFilter refers to the syscall argument SECCOMP_MODE_FILTER. SeccompModeFilter = uintptr(2) ) // New returns a new SysInfo, using the filesystem to detect which features // the kernel supports. If `quiet` is `false` warnings are printed in logs // whenever an error occurs or misconfigurations are present. func New(quiet bool) *SysInfo { sysInfo := &SysInfo{} sysInfo.cgroupMemInfo = checkCgroupMem(quiet) sysInfo.cgroupCPUInfo = checkCgroupCPU(quiet) sysInfo.cgroupBlkioInfo = checkCgroupBlkioInfo(quiet) sysInfo.cgroupCpusetInfo = checkCgroupCpusetInfo(quiet) _, err := cgroups.FindCgroupMountpoint("devices") sysInfo.CgroupDevicesEnabled = err == nil sysInfo.IPv4ForwardingDisabled = !readProcBool("/proc/sys/net/ipv4/ip_forward") sysInfo.BridgeNfCallIptablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-iptables") sysInfo.BridgeNfCallIP6tablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-ip6tables") // Check if AppArmor is supported. if _, err := os.Stat("/sys/kernel/security/apparmor"); !os.IsNotExist(err) { sysInfo.AppArmor = true } // Check if Seccomp is supported, via CONFIG_SECCOMP. if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_SECCOMP, 0, 0); err != syscall.EINVAL { // Make sure the kernel has CONFIG_SECCOMP_FILTER. if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_SECCOMP, SeccompModeFilter, 0); err != syscall.EINVAL { sysInfo.Seccomp = true } } return sysInfo } // checkCgroupMem reads the memory information from the memory cgroup mount point. func checkCgroupMem(quiet bool) cgroupMemInfo { mountPoint, err := cgroups.FindCgroupMountpoint("memory") if err != nil { if !quiet { logrus.Warnf("Your kernel does not support cgroup memory limit: %v", err) } return cgroupMemInfo{} } swapLimit := cgroupEnabled(mountPoint, "memory.memsw.limit_in_bytes") if !quiet && !swapLimit { logrus.Warn("Your kernel does not support swap memory limit.") } memoryReservation := cgroupEnabled(mountPoint, "memory.soft_limit_in_bytes") if !quiet && !memoryReservation { logrus.Warn("Your kernel does not support memory reservation.") } oomKillDisable := cgroupEnabled(mountPoint, "memory.oom_control") if !quiet && !oomKillDisable { logrus.Warnf("Your kernel does not support oom control.") } memorySwappiness := cgroupEnabled(mountPoint, "memory.swappiness") if !quiet && !memorySwappiness { logrus.Warnf("Your kernel does not support memory swappiness.") } kernelMemory := cgroupEnabled(mountPoint, "memory.kmem.limit_in_bytes") if !quiet && !kernelMemory { logrus.Warnf("Your kernel does not support kernel memory limit.") } return cgroupMemInfo{ MemoryLimit: true, SwapLimit: swapLimit, MemoryReservation: memoryReservation, OomKillDisable: oomKillDisable, MemorySwappiness: memorySwappiness, KernelMemory: kernelMemory, } } // checkCgroupCPU reads the cpu information from the cpu cgroup mount point. func checkCgroupCPU(quiet bool) cgroupCPUInfo { mountPoint, err := cgroups.FindCgroupMountpoint("cpu") if err != nil { if !quiet { logrus.Warn(err) } return cgroupCPUInfo{} } cpuShares := cgroupEnabled(mountPoint, "cpu.shares") if !quiet && !cpuShares { logrus.Warn("Your kernel does not support cgroup cpu shares") } cpuCfsPeriod := cgroupEnabled(mountPoint, "cpu.cfs_period_us") if !quiet && !cpuCfsPeriod { logrus.Warn("Your kernel does not support cgroup cfs period") } cpuCfsQuota := cgroupEnabled(mountPoint, "cpu.cfs_quota_us") if !quiet && !cpuCfsQuota { logrus.Warn("Your kernel does not support cgroup cfs quotas") } return cgroupCPUInfo{ CPUShares: cpuShares, CPUCfsPeriod: cpuCfsPeriod, CPUCfsQuota: cpuCfsQuota, } } // checkCgroupBlkioInfo reads the blkio information from the blkio cgroup mount point. func checkCgroupBlkioInfo(quiet bool) cgroupBlkioInfo { mountPoint, err := cgroups.FindCgroupMountpoint("blkio") if err != nil { if !quiet { logrus.Warn(err) } return cgroupBlkioInfo{} } weight := cgroupEnabled(mountPoint, "blkio.weight") if !quiet && !weight { logrus.Warn("Your kernel does not support cgroup blkio weight") } weightDevice := cgroupEnabled(mountPoint, "blkio.weight_device") if !quiet && !weightDevice { logrus.Warn("Your kernel does not support cgroup blkio weight_device") } readBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_bps_device") if !quiet && !readBpsDevice { logrus.Warn("Your kernel does not support cgroup blkio throttle.read_bps_device") } writeBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_bps_device") if !quiet && !writeBpsDevice { logrus.Warn("Your kernel does not support cgroup blkio throttle.write_bps_device") } readIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_iops_device") if !quiet && !readIOpsDevice { logrus.Warn("Your kernel does not support cgroup blkio throttle.read_iops_device") } writeIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_iops_device") if !quiet && !writeIOpsDevice { logrus.Warn("Your kernel does not support cgroup blkio throttle.write_iops_device") } return cgroupBlkioInfo{ BlkioWeight: weight, BlkioWeightDevice: weightDevice, BlkioReadBpsDevice: readBpsDevice, BlkioWriteBpsDevice: writeBpsDevice, BlkioReadIOpsDevice: readIOpsDevice, BlkioWriteIOpsDevice: writeIOpsDevice, } } // checkCgroupCpusetInfo reads the cpuset information from the cpuset cgroup mount point. func checkCgroupCpusetInfo(quiet bool) cgroupCpusetInfo { mountPoint, err := cgroups.FindCgroupMountpoint("cpuset") if err != nil { if !quiet { logrus.Warn(err) } return cgroupCpusetInfo{} } cpus, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.cpus")) if err != nil { return cgroupCpusetInfo{} } mems, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.mems")) if err != nil { return cgroupCpusetInfo{} } return cgroupCpusetInfo{ Cpuset: true, Cpus: strings.TrimSpace(string(cpus)), Mems: strings.TrimSpace(string(mems)), } } func cgroupEnabled(mountPoint, name string) bool { _, err := os.Stat(path.Join(mountPoint, name)) return err == nil } func readProcBool(path string) bool { val, err := ioutil.ReadFile(path) if err != nil { return false } return strings.TrimSpace(string(val)) == "1" } docker-1.10.3/pkg/sysinfo/sysinfo_linux_test.go000066400000000000000000000022531267010174400216210ustar00rootroot00000000000000package sysinfo import ( "io/ioutil" "os" "path" "path/filepath" "testing" ) func TestReadProcBool(t *testing.T) { tmpDir, err := ioutil.TempDir("", "test-sysinfo-proc") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) procFile := filepath.Join(tmpDir, "read-proc-bool") if err := ioutil.WriteFile(procFile, []byte("1"), 644); err != nil { t.Fatal(err) } if !readProcBool(procFile) { t.Fatal("expected proc bool to be true, got false") } if err := ioutil.WriteFile(procFile, []byte("0"), 644); err != nil { t.Fatal(err) } if readProcBool(procFile) { t.Fatal("expected proc bool to be false, got false") } if readProcBool(path.Join(tmpDir, "no-exist")) { t.Fatal("should be false for non-existent entry") } } func TestCgroupEnabled(t *testing.T) { cgroupDir, err := ioutil.TempDir("", "cgroup-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(cgroupDir) if cgroupEnabled(cgroupDir, "test") { t.Fatal("cgroupEnabled should be false") } if err := ioutil.WriteFile(path.Join(cgroupDir, "test"), []byte{}, 644); err != nil { t.Fatal(err) } if !cgroupEnabled(cgroupDir, "test") { t.Fatal("cgroupEnabled should be true") } } docker-1.10.3/pkg/sysinfo/sysinfo_test.go000066400000000000000000000012501267010174400203760ustar00rootroot00000000000000package sysinfo import "testing" func TestIsCpusetListAvailable(t *testing.T) { cases := []struct { provided string available string res bool err bool }{ {"1", "0-4", true, false}, {"01,3", "0-4", true, false}, {"", "0-7", true, false}, {"1--42", "0-7", false, true}, {"1-42", "00-1,8,,9", false, true}, {"1,41-42", "43,45", false, false}, {"0-3", "", false, false}, } for _, c := range cases { r, err := isCpusetListAvailable(c.provided, c.available) if (c.err && err == nil) && r != c.res { t.Fatalf("Expected pair: %v, %v for %s, %s. Got %v, %v instead", c.res, c.err, c.provided, c.available, (c.err && err == nil), r) } } } docker-1.10.3/pkg/sysinfo/sysinfo_windows.go000066400000000000000000000002171267010174400211130ustar00rootroot00000000000000package sysinfo // New returns an empty SysInfo for windows for now. func New(quiet bool) *SysInfo { sysInfo := &SysInfo{} return sysInfo } docker-1.10.3/pkg/system/000077500000000000000000000000001267010174400151525ustar00rootroot00000000000000docker-1.10.3/pkg/system/chtimes.go000066400000000000000000000017331267010174400171410ustar00rootroot00000000000000package system import ( "os" "syscall" "time" "unsafe" ) var ( maxTime time.Time ) func init() { if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { // This is a 64 bit timespec // os.Chtimes limits time to the following maxTime = time.Unix(0, 1<<63-1) } else { // This is a 32 bit timespec maxTime = time.Unix(1<<31-1, 0) } } // Chtimes changes the access time and modified time of a file at the given path func Chtimes(name string, atime time.Time, mtime time.Time) error { unixMinTime := time.Unix(0, 0) unixMaxTime := maxTime // If the modified time is prior to the Unix Epoch, or after the // end of Unix Time, os.Chtimes has undefined behavior // default to Unix Epoch in this case, just in case if atime.Before(unixMinTime) || atime.After(unixMaxTime) { atime = unixMinTime } if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { mtime = unixMinTime } if err := os.Chtimes(name, atime, mtime); err != nil { return err } return nil } docker-1.10.3/pkg/system/chtimes_test.go000066400000000000000000000042761267010174400202050ustar00rootroot00000000000000package system import ( "io/ioutil" "os" "path/filepath" "testing" "time" ) // prepareTempFile creates a temporary file in a temporary directory. func prepareTempFile(t *testing.T) (string, string) { dir, err := ioutil.TempDir("", "docker-system-test") if err != nil { t.Fatal(err) } file := filepath.Join(dir, "exist") if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { t.Fatal(err) } return file, dir } // TestChtimes tests Chtimes on a tempfile. Test only mTime, because aTime is OS dependent func TestChtimes(t *testing.T) { file, dir := prepareTempFile(t) defer os.RemoveAll(dir) beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) unixEpochTime := time.Unix(0, 0) afterUnixEpochTime := time.Unix(100, 0) unixMaxTime := maxTime // Test both aTime and mTime set to Unix Epoch Chtimes(file, unixEpochTime, unixEpochTime) f, err := os.Stat(file) if err != nil { t.Fatal(err) } if f.ModTime() != unixEpochTime { t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) } // Test aTime before Unix Epoch and mTime set to Unix Epoch Chtimes(file, beforeUnixEpochTime, unixEpochTime) f, err = os.Stat(file) if err != nil { t.Fatal(err) } if f.ModTime() != unixEpochTime { t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) } // Test aTime set to Unix Epoch and mTime before Unix Epoch Chtimes(file, unixEpochTime, beforeUnixEpochTime) f, err = os.Stat(file) if err != nil { t.Fatal(err) } if f.ModTime() != unixEpochTime { t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) } // Test both aTime and mTime set to after Unix Epoch (valid time) Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) f, err = os.Stat(file) if err != nil { t.Fatal(err) } if f.ModTime() != afterUnixEpochTime { t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, f.ModTime()) } // Test both aTime and mTime set to Unix max time Chtimes(file, unixMaxTime, unixMaxTime) f, err = os.Stat(file) if err != nil { t.Fatal(err) } if f.ModTime().Truncate(time.Second) != unixMaxTime.Truncate(time.Second) { t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), f.ModTime().Truncate(time.Second)) } } docker-1.10.3/pkg/system/chtimes_unix_test.go000066400000000000000000000043651267010174400212470ustar00rootroot00000000000000// +build linux freebsd package system import ( "os" "syscall" "testing" "time" ) // TestChtimes tests Chtimes access time on a tempfile on Linux func TestChtimesLinux(t *testing.T) { file, dir := prepareTempFile(t) defer os.RemoveAll(dir) beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) unixEpochTime := time.Unix(0, 0) afterUnixEpochTime := time.Unix(100, 0) unixMaxTime := maxTime // Test both aTime and mTime set to Unix Epoch Chtimes(file, unixEpochTime, unixEpochTime) f, err := os.Stat(file) if err != nil { t.Fatal(err) } stat := f.Sys().(*syscall.Stat_t) aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) if aTime != unixEpochTime { t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) } // Test aTime before Unix Epoch and mTime set to Unix Epoch Chtimes(file, beforeUnixEpochTime, unixEpochTime) f, err = os.Stat(file) if err != nil { t.Fatal(err) } stat = f.Sys().(*syscall.Stat_t) aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) if aTime != unixEpochTime { t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) } // Test aTime set to Unix Epoch and mTime before Unix Epoch Chtimes(file, unixEpochTime, beforeUnixEpochTime) f, err = os.Stat(file) if err != nil { t.Fatal(err) } stat = f.Sys().(*syscall.Stat_t) aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) if aTime != unixEpochTime { t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) } // Test both aTime and mTime set to after Unix Epoch (valid time) Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) f, err = os.Stat(file) if err != nil { t.Fatal(err) } stat = f.Sys().(*syscall.Stat_t) aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) if aTime != afterUnixEpochTime { t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, aTime) } // Test both aTime and mTime set to Unix max time Chtimes(file, unixMaxTime, unixMaxTime) f, err = os.Stat(file) if err != nil { t.Fatal(err) } stat = f.Sys().(*syscall.Stat_t) aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) if aTime.Truncate(time.Second) != unixMaxTime.Truncate(time.Second) { t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), aTime.Truncate(time.Second)) } } docker-1.10.3/pkg/system/chtimes_windows_test.go000066400000000000000000000043361267010174400217540ustar00rootroot00000000000000// +build windows package system import ( "os" "syscall" "testing" "time" ) // TestChtimes tests Chtimes access time on a tempfile on Windows func TestChtimesWindows(t *testing.T) { file, dir := prepareTempFile(t) defer os.RemoveAll(dir) beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) unixEpochTime := time.Unix(0, 0) afterUnixEpochTime := time.Unix(100, 0) unixMaxTime := maxTime // Test both aTime and mTime set to Unix Epoch Chtimes(file, unixEpochTime, unixEpochTime) f, err := os.Stat(file) if err != nil { t.Fatal(err) } aTime := time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) if aTime != unixEpochTime { t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) } // Test aTime before Unix Epoch and mTime set to Unix Epoch Chtimes(file, beforeUnixEpochTime, unixEpochTime) f, err = os.Stat(file) if err != nil { t.Fatal(err) } aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) if aTime != unixEpochTime { t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) } // Test aTime set to Unix Epoch and mTime before Unix Epoch Chtimes(file, unixEpochTime, beforeUnixEpochTime) f, err = os.Stat(file) if err != nil { t.Fatal(err) } aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) if aTime != unixEpochTime { t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) } // Test both aTime and mTime set to after Unix Epoch (valid time) Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) f, err = os.Stat(file) if err != nil { t.Fatal(err) } aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) if aTime != afterUnixEpochTime { t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, aTime) } // Test both aTime and mTime set to Unix max time Chtimes(file, unixMaxTime, unixMaxTime) f, err = os.Stat(file) if err != nil { t.Fatal(err) } aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) if aTime.Truncate(time.Second) != unixMaxTime.Truncate(time.Second) { t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), aTime.Truncate(time.Second)) } } docker-1.10.3/pkg/system/errors.go000066400000000000000000000003031267010174400170110ustar00rootroot00000000000000package system import ( "errors" ) var ( // ErrNotSupportedPlatform means the platform is not supported. ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") ) docker-1.10.3/pkg/system/events_windows.go000066400000000000000000000044641267010174400205670ustar00rootroot00000000000000package system // This file implements syscalls for Win32 events which are not implemented // in golang. import ( "syscall" "unsafe" ) var ( procCreateEvent = modkernel32.NewProc("CreateEventW") procOpenEvent = modkernel32.NewProc("OpenEventW") procSetEvent = modkernel32.NewProc("SetEvent") procResetEvent = modkernel32.NewProc("ResetEvent") procPulseEvent = modkernel32.NewProc("PulseEvent") ) // CreateEvent implements win32 CreateEventW func in golang. It will create an event object. func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) { namep, _ := syscall.UTF16PtrFromString(name) var _p1 uint32 if manualReset { _p1 = 1 } var _p2 uint32 if initialState { _p2 = 1 } r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep))) use(unsafe.Pointer(namep)) handle = syscall.Handle(r0) if handle == syscall.InvalidHandle { err = e1 } return } // OpenEvent implements win32 OpenEventW func in golang. It opens an event object. func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) { namep, _ := syscall.UTF16PtrFromString(name) var _p1 uint32 if inheritHandle { _p1 = 1 } r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep))) use(unsafe.Pointer(namep)) handle = syscall.Handle(r0) if handle == syscall.InvalidHandle { err = e1 } return } // SetEvent implements win32 SetEvent func in golang. func SetEvent(handle syscall.Handle) (err error) { return setResetPulse(handle, procSetEvent) } // ResetEvent implements win32 ResetEvent func in golang. func ResetEvent(handle syscall.Handle) (err error) { return setResetPulse(handle, procResetEvent) } // PulseEvent implements win32 PulseEvent func in golang. func PulseEvent(handle syscall.Handle) (err error) { return setResetPulse(handle, procPulseEvent) } func setResetPulse(handle syscall.Handle, proc *syscall.LazyProc) (err error) { r0, _, _ := proc.Call(uintptr(handle)) if r0 != 0 { err = syscall.Errno(r0) } return } var temp unsafe.Pointer // use ensures a variable is kept alive without the GC freeing while still needed func use(p unsafe.Pointer) { temp = p } docker-1.10.3/pkg/system/filesys.go000066400000000000000000000006521267010174400171620ustar00rootroot00000000000000// +build !windows package system import ( "os" "path/filepath" ) // MkdirAll creates a directory named path along with any necessary parents, // with permission specified by attribute perm for all dir created. func MkdirAll(path string, perm os.FileMode) error { return os.MkdirAll(path, perm) } // IsAbs is a platform-specific wrapper for filepath.IsAbs. func IsAbs(path string) bool { return filepath.IsAbs(path) } docker-1.10.3/pkg/system/filesys_windows.go000066400000000000000000000037301267010174400207340ustar00rootroot00000000000000// +build windows package system import ( "os" "path/filepath" "regexp" "strings" "syscall" ) // MkdirAll implementation that is volume path aware for Windows. func MkdirAll(path string, perm os.FileMode) error { if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { return nil } // The rest of this method is copied from os.MkdirAll and should be kept // as-is to ensure compatibility. // Fast path: if we can tell whether path is a directory or file, stop with success or error. dir, err := os.Stat(path) if err == nil { if dir.IsDir() { return nil } return &os.PathError{ Op: "mkdir", Path: path, Err: syscall.ENOTDIR, } } // Slow path: make sure parent exists and then call Mkdir for path. i := len(path) for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. i-- } j := i for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. j-- } if j > 1 { // Create parent err = MkdirAll(path[0:j-1], perm) if err != nil { return err } } // Parent now exists; invoke Mkdir and use its result. err = os.Mkdir(path, perm) if err != nil { // Handle arguments like "foo/." by // double-checking that directory doesn't exist. dir, err1 := os.Lstat(path) if err1 == nil && dir.IsDir() { return nil } return err } return nil } // IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, // golang filepath.IsAbs does not consider a path \windows\system32 as absolute // as it doesn't start with a drive-letter/colon combination. However, in // docker we need to verify things such as WORKDIR /windows/system32 in // a Dockerfile (which gets translated to \windows\system32 when being processed // by the daemon. This SHOULD be treated as absolute from a docker processing // perspective. func IsAbs(path string) bool { if !filepath.IsAbs(path) { if !strings.HasPrefix(path, string(os.PathSeparator)) { return false } } return true } docker-1.10.3/pkg/system/lstat.go000066400000000000000000000005461267010174400166350ustar00rootroot00000000000000// +build !windows package system import ( "syscall" ) // Lstat takes a path to a file and returns // a system.StatT type pertaining to that file. // // Throws an error if the file does not exist func Lstat(path string) (*StatT, error) { s := &syscall.Stat_t{} if err := syscall.Lstat(path, s); err != nil { return nil, err } return fromStatT(s) } docker-1.10.3/pkg/system/lstat_unix_test.go000066400000000000000000000010731267010174400207330ustar00rootroot00000000000000// +build linux freebsd package system import ( "os" "testing" ) // TestLstat tests Lstat for existing and non existing files func TestLstat(t *testing.T) { file, invalid, _, dir := prepareFiles(t) defer os.RemoveAll(dir) statFile, err := Lstat(file) if err != nil { t.Fatal(err) } if statFile == nil { t.Fatal("returned empty stat for existing file") } statInvalid, err := Lstat(invalid) if err == nil { t.Fatal("did not return error for non-existing file") } if statInvalid != nil { t.Fatal("returned non-nil stat for non-existing file") } } docker-1.10.3/pkg/system/lstat_windows.go000066400000000000000000000010531267010174400204010ustar00rootroot00000000000000// +build windows package system import ( "os" ) // Lstat calls os.Lstat to get a fileinfo interface back. // This is then copied into our own locally defined structure. // Note the Linux version uses fromStatT to do the copy back, // but that not strictly necessary when already in an OS specific module. func Lstat(path string) (*StatT, error) { fi, err := os.Lstat(path) if err != nil { return nil, err } return &StatT{ name: fi.Name(), size: fi.Size(), mode: fi.Mode(), modTime: fi.ModTime(), isDir: fi.IsDir()}, nil } docker-1.10.3/pkg/system/meminfo.go000066400000000000000000000006001267010174400171270ustar00rootroot00000000000000package system // MemInfo contains memory statistics of the host system. type MemInfo struct { // Total usable RAM (i.e. physical RAM minus a few reserved bits and the // kernel binary code). MemTotal int64 // Amount of free memory. MemFree int64 // Total amount of swap space available. SwapTotal int64 // Amount of swap space that is currently unused. SwapFree int64 } docker-1.10.3/pkg/system/meminfo_linux.go000066400000000000000000000025321267010174400203540ustar00rootroot00000000000000package system import ( "bufio" "io" "os" "strconv" "strings" "github.com/docker/go-units" ) // ReadMemInfo retrieves memory statistics of the host system and returns a // MemInfo type. func ReadMemInfo() (*MemInfo, error) { file, err := os.Open("/proc/meminfo") if err != nil { return nil, err } defer file.Close() return parseMemInfo(file) } // parseMemInfo parses the /proc/meminfo file into // a MemInfo object given a io.Reader to the file. // // Throws error if there are problems reading from the file func parseMemInfo(reader io.Reader) (*MemInfo, error) { meminfo := &MemInfo{} scanner := bufio.NewScanner(reader) for scanner.Scan() { // Expected format: ["MemTotal:", "1234", "kB"] parts := strings.Fields(scanner.Text()) // Sanity checks: Skip malformed entries. if len(parts) < 3 || parts[2] != "kB" { continue } // Convert to bytes. size, err := strconv.Atoi(parts[1]) if err != nil { continue } bytes := int64(size) * units.KiB switch parts[0] { case "MemTotal:": meminfo.MemTotal = bytes case "MemFree:": meminfo.MemFree = bytes case "SwapTotal:": meminfo.SwapTotal = bytes case "SwapFree:": meminfo.SwapFree = bytes } } // Handle errors that may have occurred during the reading of the file. if err := scanner.Err(); err != nil { return nil, err } return meminfo, nil } docker-1.10.3/pkg/system/meminfo_unix_test.go000066400000000000000000000015341267010174400212400ustar00rootroot00000000000000// +build linux freebsd package system import ( "strings" "testing" "github.com/docker/go-units" ) // TestMemInfo tests parseMemInfo with a static meminfo string func TestMemInfo(t *testing.T) { const input = ` MemTotal: 1 kB MemFree: 2 kB SwapTotal: 3 kB SwapFree: 4 kB Malformed1: Malformed2: 1 Malformed3: 2 MB Malformed4: X kB ` meminfo, err := parseMemInfo(strings.NewReader(input)) if err != nil { t.Fatal(err) } if meminfo.MemTotal != 1*units.KiB { t.Fatalf("Unexpected MemTotal: %d", meminfo.MemTotal) } if meminfo.MemFree != 2*units.KiB { t.Fatalf("Unexpected MemFree: %d", meminfo.MemFree) } if meminfo.SwapTotal != 3*units.KiB { t.Fatalf("Unexpected SwapTotal: %d", meminfo.SwapTotal) } if meminfo.SwapFree != 4*units.KiB { t.Fatalf("Unexpected SwapFree: %d", meminfo.SwapFree) } } docker-1.10.3/pkg/system/meminfo_unsupported.go000066400000000000000000000003041267010174400216000ustar00rootroot00000000000000// +build !linux,!windows package system // ReadMemInfo is not supported on platforms other than linux and windows. func ReadMemInfo() (*MemInfo, error) { return nil, ErrNotSupportedPlatform } docker-1.10.3/pkg/system/meminfo_windows.go000066400000000000000000000021731267010174400207100ustar00rootroot00000000000000package system import ( "syscall" "unsafe" ) var ( modkernel32 = syscall.NewLazyDLL("kernel32.dll") procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") ) // https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx // https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx type memorystatusex struct { dwLength uint32 dwMemoryLoad uint32 ullTotalPhys uint64 ullAvailPhys uint64 ullTotalPageFile uint64 ullAvailPageFile uint64 ullTotalVirtual uint64 ullAvailVirtual uint64 ullAvailExtendedVirtual uint64 } // ReadMemInfo retrieves memory statistics of the host system and returns a // MemInfo type. func ReadMemInfo() (*MemInfo, error) { msi := &memorystatusex{ dwLength: 64, } r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) if r1 == 0 { return &MemInfo{}, nil } return &MemInfo{ MemTotal: int64(msi.ullTotalPhys), MemFree: int64(msi.ullAvailPhys), SwapTotal: int64(msi.ullTotalPageFile), SwapFree: int64(msi.ullAvailPageFile), }, nil } docker-1.10.3/pkg/system/mknod.go000066400000000000000000000014201267010174400166060ustar00rootroot00000000000000// +build !windows package system import ( "syscall" ) // Mknod creates a filesystem node (file, device special file or named pipe) named path // with attributes specified by mode and dev. func Mknod(path string, mode uint32, dev int) error { return syscall.Mknod(path, mode, dev) } // Mkdev is used to build the value of linux devices (in /dev/) which specifies major // and minor number of the newly created device special file. // Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. // They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, // then the top 12 bits of the minor. func Mkdev(major int64, minor int64) uint32 { return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) } docker-1.10.3/pkg/system/mknod_windows.go000066400000000000000000000004501267010174400203620ustar00rootroot00000000000000// +build windows package system // Mknod is not implemented on Windows. func Mknod(path string, mode uint32, dev int) error { return ErrNotSupportedPlatform } // Mkdev is not implemented on Windows. func Mkdev(major int64, minor int64) uint32 { panic("Mkdev not implemented on Windows.") } docker-1.10.3/pkg/system/path_unix.go000066400000000000000000000004241267010174400175000ustar00rootroot00000000000000// +build !windows package system // DefaultPathEnv is unix style list of directories to search for // executables. Each directory is separated from the next by a colon // ':' character . const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" docker-1.10.3/pkg/system/path_windows.go000066400000000000000000000003371267010174400202120ustar00rootroot00000000000000// +build windows package system // DefaultPathEnv is deliberately empty on Windows as the default path will be set by // the container. Docker has no context of what the default path should be. const DefaultPathEnv = "" docker-1.10.3/pkg/system/stat.go000066400000000000000000000017231267010174400164570ustar00rootroot00000000000000// +build !windows package system import ( "syscall" ) // StatT type contains status of a file. It contains metadata // like permission, owner, group, size, etc about a file. type StatT struct { mode uint32 uid uint32 gid uint32 rdev uint64 size int64 mtim syscall.Timespec } // Mode returns file's permission mode. func (s StatT) Mode() uint32 { return s.mode } // UID returns file's user id of owner. func (s StatT) UID() uint32 { return s.uid } // GID returns file's group id of owner. func (s StatT) GID() uint32 { return s.gid } // Rdev returns file's device ID (if it's special file). func (s StatT) Rdev() uint64 { return s.rdev } // Size returns file's size. func (s StatT) Size() int64 { return s.size } // Mtim returns file's last modification time. func (s StatT) Mtim() syscall.Timespec { return s.mtim } // GetLastModification returns file's last modification time. func (s StatT) GetLastModification() syscall.Timespec { return s.Mtim() } docker-1.10.3/pkg/system/stat_freebsd.go000066400000000000000000000011201267010174400201400ustar00rootroot00000000000000package system import ( "syscall" ) // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { return &StatT{size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), mtim: s.Mtimespec}, nil } // Stat takes a path to a file and returns // a system.Stat_t type pertaining to that file. // // Throws an error if the file does not exist func Stat(path string) (*StatT, error) { s := &syscall.Stat_t{} if err := syscall.Stat(path, s); err != nil { return nil, err } return fromStatT(s) } docker-1.10.3/pkg/system/stat_linux.go000066400000000000000000000013331267010174400176730ustar00rootroot00000000000000package system import ( "syscall" ) // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { return &StatT{size: s.Size, mode: s.Mode, uid: s.Uid, gid: s.Gid, rdev: s.Rdev, mtim: s.Mtim}, nil } // FromStatT exists only on linux, and loads a system.StatT from a // syscal.Stat_t. func FromStatT(s *syscall.Stat_t) (*StatT, error) { return fromStatT(s) } // Stat takes a path to a file and returns // a system.StatT type pertaining to that file. // // Throws an error if the file does not exist func Stat(path string) (*StatT, error) { s := &syscall.Stat_t{} if err := syscall.Stat(path, s); err != nil { return nil, err } return fromStatT(s) } docker-1.10.3/pkg/system/stat_solaris.go000066400000000000000000000004641267010174400202140ustar00rootroot00000000000000// +build solaris package system import ( "syscall" ) // fromStatT creates a system.StatT type from a syscall.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { return &StatT{size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), mtim: s.Mtim}, nil } docker-1.10.3/pkg/system/stat_unix_test.go000066400000000000000000000012141267010174400205540ustar00rootroot00000000000000// +build linux freebsd package system import ( "os" "syscall" "testing" ) // TestFromStatT tests fromStatT for a tempfile func TestFromStatT(t *testing.T) { file, _, _, dir := prepareFiles(t) defer os.RemoveAll(dir) stat := &syscall.Stat_t{} err := syscall.Lstat(file, stat) s, err := fromStatT(stat) if err != nil { t.Fatal(err) } if stat.Mode != s.Mode() { t.Fatal("got invalid mode") } if stat.Uid != s.UID() { t.Fatal("got invalid uid") } if stat.Gid != s.GID() { t.Fatal("got invalid gid") } if stat.Rdev != s.Rdev() { t.Fatal("got invalid rdev") } if stat.Mtim != s.Mtim() { t.Fatal("got invalid mtim") } } docker-1.10.3/pkg/system/stat_unsupported.go000066400000000000000000000005231267010174400211240ustar00rootroot00000000000000// +build !linux,!windows,!freebsd,!solaris package system import ( "syscall" ) // fromStatT creates a system.StatT type from a syscall.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { return &StatT{size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), mtim: s.Mtimespec}, nil } docker-1.10.3/pkg/system/stat_windows.go000066400000000000000000000013451267010174400202310ustar00rootroot00000000000000// +build windows package system import ( "os" "time" ) // StatT type contains status of a file. It contains metadata // like name, permission, size, etc about a file. type StatT struct { name string size int64 mode os.FileMode modTime time.Time isDir bool } // Name returns file's name. func (s StatT) Name() string { return s.name } // Size returns file's size. func (s StatT) Size() int64 { return s.size } // Mode returns file's permission mode. func (s StatT) Mode() os.FileMode { return s.mode } // ModTime returns file's last modification time. func (s StatT) ModTime() time.Time { return s.modTime } // IsDir returns whether file is actually a directory. func (s StatT) IsDir() bool { return s.isDir } docker-1.10.3/pkg/system/syscall_unix.go000066400000000000000000000003221267010174400202130ustar00rootroot00000000000000// +build linux freebsd package system import "syscall" // Unmount is a platform-specific helper function to call // the unmount syscall. func Unmount(dest string) error { return syscall.Unmount(dest, 0) } docker-1.10.3/pkg/system/syscall_windows.go000066400000000000000000000016611267010174400207310ustar00rootroot00000000000000package system import ( "fmt" "syscall" ) // OSVersion is a wrapper for Windows version information // https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx type OSVersion struct { Version uint32 MajorVersion uint8 MinorVersion uint8 Build uint16 } // GetOSVersion gets the operating system version on Windows. Note that // docker.exe must be manifested to get the correct version information. func GetOSVersion() (OSVersion, error) { var err error osv := OSVersion{} osv.Version, err = syscall.GetVersion() if err != nil { return osv, fmt.Errorf("Failed to call GetVersion()") } osv.MajorVersion = uint8(osv.Version & 0xFF) osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) osv.Build = uint16(osv.Version >> 16) return osv, nil } // Unmount is a platform-specific helper function to call // the unmount syscall. Not supported on Windows func Unmount(dest string) error { return nil } docker-1.10.3/pkg/system/umask.go000066400000000000000000000003561267010174400166250ustar00rootroot00000000000000// +build !windows package system import ( "syscall" ) // Umask sets current process's file mode creation mask to newmask // and return oldmask. func Umask(newmask int) (oldmask int, err error) { return syscall.Umask(newmask), nil } docker-1.10.3/pkg/system/umask_windows.go000066400000000000000000000003301267010174400203670ustar00rootroot00000000000000// +build windows package system // Umask is not supported on the windows platform. func Umask(newmask int) (oldmask int, err error) { // should not be called on cli code path return 0, ErrNotSupportedPlatform } docker-1.10.3/pkg/system/utimes_darwin.go000066400000000000000000000002651267010174400203560ustar00rootroot00000000000000package system import "syscall" // LUtimesNano is not supported by darwin platform. func LUtimesNano(path string, ts []syscall.Timespec) error { return ErrNotSupportedPlatform } docker-1.10.3/pkg/system/utimes_freebsd.go000066400000000000000000000011051267010174400204760ustar00rootroot00000000000000package system import ( "syscall" "unsafe" ) // LUtimesNano is used to change access and modification time of the specified path. // It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. func LUtimesNano(path string, ts []syscall.Timespec) error { var _path *byte _path, err := syscall.BytePtrFromString(path) if err != nil { return err } if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS { return err } return nil } docker-1.10.3/pkg/system/utimes_linux.go000066400000000000000000000013301267010174400202230ustar00rootroot00000000000000package system import ( "syscall" "unsafe" ) // LUtimesNano is used to change access and modification time of the specified path. // It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. func LUtimesNano(path string, ts []syscall.Timespec) error { // These are not currently available in syscall atFdCwd := -100 atSymLinkNoFollow := 0x100 var _path *byte _path, err := syscall.BytePtrFromString(path) if err != nil { return err } if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(atSymLinkNoFollow), 0, 0); err != 0 && err != syscall.ENOSYS { return err } return nil } docker-1.10.3/pkg/system/utimes_unix_test.go000066400000000000000000000026671267010174400211240ustar00rootroot00000000000000// +build linux freebsd package system import ( "io/ioutil" "os" "path/filepath" "syscall" "testing" ) // prepareFiles creates files for testing in the temp directory func prepareFiles(t *testing.T) (string, string, string, string) { dir, err := ioutil.TempDir("", "docker-system-test") if err != nil { t.Fatal(err) } file := filepath.Join(dir, "exist") if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { t.Fatal(err) } invalid := filepath.Join(dir, "doesnt-exist") symlink := filepath.Join(dir, "symlink") if err := os.Symlink(file, symlink); err != nil { t.Fatal(err) } return file, invalid, symlink, dir } func TestLUtimesNano(t *testing.T) { file, invalid, symlink, dir := prepareFiles(t) defer os.RemoveAll(dir) before, err := os.Stat(file) if err != nil { t.Fatal(err) } ts := []syscall.Timespec{{0, 0}, {0, 0}} if err := LUtimesNano(symlink, ts); err != nil { t.Fatal(err) } symlinkInfo, err := os.Lstat(symlink) if err != nil { t.Fatal(err) } if before.ModTime().Unix() == symlinkInfo.ModTime().Unix() { t.Fatal("The modification time of the symlink should be different") } fileInfo, err := os.Stat(file) if err != nil { t.Fatal(err) } if before.ModTime().Unix() != fileInfo.ModTime().Unix() { t.Fatal("The modification time of the file should be same") } if err := LUtimesNano(invalid, ts); err == nil { t.Fatal("Doesn't return an error on a non-existing file") } } docker-1.10.3/pkg/system/utimes_unsupported.go000066400000000000000000000003671267010174400214650ustar00rootroot00000000000000// +build !linux,!freebsd,!darwin package system import "syscall" // LUtimesNano is not supported on platforms other than linux, freebsd and darwin. func LUtimesNano(path string, ts []syscall.Timespec) error { return ErrNotSupportedPlatform } docker-1.10.3/pkg/system/xattrs_linux.go000066400000000000000000000035451267010174400202540ustar00rootroot00000000000000package system import ( "syscall" "unsafe" ) // Lgetxattr retrieves the value of the extended attribute identified by attr // and associated with the given path in the file system. // It will returns a nil slice and nil error if the xattr is not set. func Lgetxattr(path string, attr string) ([]byte, error) { pathBytes, err := syscall.BytePtrFromString(path) if err != nil { return nil, err } attrBytes, err := syscall.BytePtrFromString(attr) if err != nil { return nil, err } dest := make([]byte, 128) destBytes := unsafe.Pointer(&dest[0]) sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) if errno == syscall.ENODATA { return nil, nil } if errno == syscall.ERANGE { dest = make([]byte, sz) destBytes := unsafe.Pointer(&dest[0]) sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) } if errno != 0 { return nil, errno } return dest[:sz], nil } var _zero uintptr // Lsetxattr sets the value of the extended attribute identified by attr // and associated with the given path in the file system. func Lsetxattr(path string, attr string, data []byte, flags int) error { pathBytes, err := syscall.BytePtrFromString(path) if err != nil { return err } attrBytes, err := syscall.BytePtrFromString(attr) if err != nil { return err } var dataBytes unsafe.Pointer if len(data) > 0 { dataBytes = unsafe.Pointer(&data[0]) } else { dataBytes = unsafe.Pointer(&_zero) } _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) if errno != 0 { return errno } return nil } docker-1.10.3/pkg/system/xattrs_unsupported.go000066400000000000000000000005521267010174400215000ustar00rootroot00000000000000// +build !linux package system // Lgetxattr is not supported on platforms other than linux. func Lgetxattr(path string, attr string) ([]byte, error) { return nil, ErrNotSupportedPlatform } // Lsetxattr is not supported on platforms other than linux. func Lsetxattr(path string, attr string, data []byte, flags int) error { return ErrNotSupportedPlatform } docker-1.10.3/pkg/tailfile/000077500000000000000000000000001267010174400154175ustar00rootroot00000000000000docker-1.10.3/pkg/tailfile/tailfile.go000066400000000000000000000026701267010174400175440ustar00rootroot00000000000000// Package tailfile provides helper functions to read the nth lines of any // ReadSeeker. package tailfile import ( "bytes" "errors" "io" "os" ) const blockSize = 1024 var eol = []byte("\n") // ErrNonPositiveLinesNumber is an error returned if the lines number was negative. var ErrNonPositiveLinesNumber = errors.New("The number of lines to extract from the file must be positive") //TailFile returns last n lines of reader f (could be a fil). func TailFile(f io.ReadSeeker, n int) ([][]byte, error) { if n <= 0 { return nil, ErrNonPositiveLinesNumber } size, err := f.Seek(0, os.SEEK_END) if err != nil { return nil, err } block := -1 var data []byte var cnt int for { var b []byte step := int64(block * blockSize) left := size + step // how many bytes to beginning if left < 0 { if _, err := f.Seek(0, os.SEEK_SET); err != nil { return nil, err } b = make([]byte, blockSize+left) if _, err := f.Read(b); err != nil { return nil, err } data = append(b, data...) break } else { b = make([]byte, blockSize) if _, err := f.Seek(step, os.SEEK_END); err != nil { return nil, err } if _, err := f.Read(b); err != nil { return nil, err } data = append(b, data...) } cnt += bytes.Count(b, eol) if cnt > n { break } block-- } lines := bytes.Split(data, eol) if n < len(lines) { return lines[len(lines)-n-1 : len(lines)-1], nil } return lines[:len(lines)-1], nil } docker-1.10.3/pkg/tailfile/tailfile_test.go000066400000000000000000000056761267010174400206140ustar00rootroot00000000000000package tailfile import ( "io/ioutil" "os" "testing" ) func TestTailFile(t *testing.T) { f, err := ioutil.TempFile("", "tail-test") if err != nil { t.Fatal(err) } defer f.Close() defer os.RemoveAll(f.Name()) testFile := []byte(`first line second line third line fourth line fifth line next first line next second line next third line next fourth line next fifth line last first line next first line next second line next third line next fourth line next fifth line next first line next second line next third line next fourth line next fifth line last second line last third line last fourth line last fifth line truncated line`) if _, err := f.Write(testFile); err != nil { t.Fatal(err) } if _, err := f.Seek(0, os.SEEK_SET); err != nil { t.Fatal(err) } expected := []string{"last fourth line", "last fifth line"} res, err := TailFile(f, 2) if err != nil { t.Fatal(err) } for i, l := range res { t.Logf("%s", l) if expected[i] != string(l) { t.Fatalf("Expected line %s, got %s", expected[i], l) } } } func TestTailFileManyLines(t *testing.T) { f, err := ioutil.TempFile("", "tail-test") if err != nil { t.Fatal(err) } defer f.Close() defer os.RemoveAll(f.Name()) testFile := []byte(`first line second line truncated line`) if _, err := f.Write(testFile); err != nil { t.Fatal(err) } if _, err := f.Seek(0, os.SEEK_SET); err != nil { t.Fatal(err) } expected := []string{"first line", "second line"} res, err := TailFile(f, 10000) if err != nil { t.Fatal(err) } for i, l := range res { t.Logf("%s", l) if expected[i] != string(l) { t.Fatalf("Expected line %s, got %s", expected[i], l) } } } func TestTailEmptyFile(t *testing.T) { f, err := ioutil.TempFile("", "tail-test") if err != nil { t.Fatal(err) } defer f.Close() defer os.RemoveAll(f.Name()) res, err := TailFile(f, 10000) if err != nil { t.Fatal(err) } if len(res) != 0 { t.Fatal("Must be empty slice from empty file") } } func TestTailNegativeN(t *testing.T) { f, err := ioutil.TempFile("", "tail-test") if err != nil { t.Fatal(err) } defer f.Close() defer os.RemoveAll(f.Name()) testFile := []byte(`first line second line truncated line`) if _, err := f.Write(testFile); err != nil { t.Fatal(err) } if _, err := f.Seek(0, os.SEEK_SET); err != nil { t.Fatal(err) } if _, err := TailFile(f, -1); err != ErrNonPositiveLinesNumber { t.Fatalf("Expected ErrNonPositiveLinesNumber, got %s", err) } if _, err := TailFile(f, 0); err != ErrNonPositiveLinesNumber { t.Fatalf("Expected ErrNonPositiveLinesNumber, got %s", err) } } func BenchmarkTail(b *testing.B) { f, err := ioutil.TempFile("", "tail-test") if err != nil { b.Fatal(err) } defer f.Close() defer os.RemoveAll(f.Name()) for i := 0; i < 10000; i++ { if _, err := f.Write([]byte("tailfile pretty interesting line\n")); err != nil { b.Fatal(err) } } b.ResetTimer() for i := 0; i < b.N; i++ { if _, err := TailFile(f, 1000); err != nil { b.Fatal(err) } } } docker-1.10.3/pkg/tarsum/000077500000000000000000000000001267010174400151415ustar00rootroot00000000000000docker-1.10.3/pkg/tarsum/builder_context.go000066400000000000000000000012051267010174400206600ustar00rootroot00000000000000package tarsum // BuilderContext is an interface extending TarSum by adding the Remove method. // In general there was concern about adding this method to TarSum itself // so instead it is being added just to "BuilderContext" which will then // only be used during the .dockerignore file processing // - see builder/evaluator.go type BuilderContext interface { TarSum Remove(string) } func (bc *tarSum) Remove(filename string) { for i, fis := range bc.sums { if fis.Name() == filename { bc.sums = append(bc.sums[:i], bc.sums[i+1:]...) // Note, we don't just return because there could be // more than one with this name } } } docker-1.10.3/pkg/tarsum/builder_context_test.go000066400000000000000000000027601267010174400217260ustar00rootroot00000000000000package tarsum import ( "io" "io/ioutil" "os" "testing" ) // Try to remove tarsum (in the BuilderContext) that do not exists, won't change a thing func TestTarSumRemoveNonExistent(t *testing.T) { filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar" reader, err := os.Open(filename) if err != nil { t.Fatal(err) } ts, err := NewTarSum(reader, false, Version0) if err != nil { t.Fatal(err) } // Read and discard bytes so that it populates sums _, err = io.Copy(ioutil.Discard, ts) if err != nil { t.Errorf("failed to read from %s: %s", filename, err) } expected := len(ts.GetSums()) ts.(BuilderContext).Remove("") ts.(BuilderContext).Remove("Anything") if len(ts.GetSums()) != expected { t.Fatalf("Expected %v sums, go %v.", expected, ts.GetSums()) } } // Remove a tarsum (in the BuilderContext) func TestTarSumRemove(t *testing.T) { filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar" reader, err := os.Open(filename) if err != nil { t.Fatal(err) } ts, err := NewTarSum(reader, false, Version0) if err != nil { t.Fatal(err) } // Read and discard bytes so that it populates sums _, err = io.Copy(ioutil.Discard, ts) if err != nil { t.Errorf("failed to read from %s: %s", filename, err) } expected := len(ts.GetSums()) - 1 ts.(BuilderContext).Remove("etc/sudoers") if len(ts.GetSums()) != expected { t.Fatalf("Expected %v sums, go %v.", expected, len(ts.GetSums())) } } docker-1.10.3/pkg/tarsum/fileinfosums.go000066400000000000000000000063641267010174400202040ustar00rootroot00000000000000package tarsum import "sort" // FileInfoSumInterface provides an interface for accessing file checksum // information within a tar file. This info is accessed through interface // so the actual name and sum cannot be melded with. type FileInfoSumInterface interface { // File name Name() string // Checksum of this particular file and its headers Sum() string // Position of file in the tar Pos() int64 } type fileInfoSum struct { name string sum string pos int64 } func (fis fileInfoSum) Name() string { return fis.name } func (fis fileInfoSum) Sum() string { return fis.sum } func (fis fileInfoSum) Pos() int64 { return fis.pos } // FileInfoSums provides a list of FileInfoSumInterfaces. type FileInfoSums []FileInfoSumInterface // GetFile returns the first FileInfoSumInterface with a matching name. func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface { for i := range fis { if fis[i].Name() == name { return fis[i] } } return nil } // GetAllFile returns a FileInfoSums with all matching names. func (fis FileInfoSums) GetAllFile(name string) FileInfoSums { f := FileInfoSums{} for i := range fis { if fis[i].Name() == name { f = append(f, fis[i]) } } return f } // GetDuplicatePaths returns a FileInfoSums with all duplicated paths. func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) { seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map. for i := range fis { f := fis[i] if _, ok := seen[f.Name()]; ok { dups = append(dups, f) } else { seen[f.Name()] = 0 } } return dups } // Len returns the size of the FileInfoSums. func (fis FileInfoSums) Len() int { return len(fis) } // Swap swaps two FileInfoSum values if a FileInfoSums list. func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] } // SortByPos sorts FileInfoSums content by position. func (fis FileInfoSums) SortByPos() { sort.Sort(byPos{fis}) } // SortByNames sorts FileInfoSums content by name. func (fis FileInfoSums) SortByNames() { sort.Sort(byName{fis}) } // SortBySums sorts FileInfoSums content by sums. func (fis FileInfoSums) SortBySums() { dups := fis.GetDuplicatePaths() if len(dups) > 0 { sort.Sort(bySum{fis, dups}) } else { sort.Sort(bySum{fis, nil}) } } // byName is a sort.Sort helper for sorting by file names. // If names are the same, order them by their appearance in the tar archive type byName struct{ FileInfoSums } func (bn byName) Less(i, j int) bool { if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() { return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos() } return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name() } // bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive type bySum struct { FileInfoSums dups FileInfoSums } func (bs bySum) Less(i, j int) bool { if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() { return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos() } return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum() } // byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order type byPos struct{ FileInfoSums } func (bp byPos) Less(i, j int) bool { return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos() } docker-1.10.3/pkg/tarsum/fileinfosums_test.go000066400000000000000000000032471267010174400212400ustar00rootroot00000000000000package tarsum import "testing" func newFileInfoSums() FileInfoSums { return FileInfoSums{ fileInfoSum{name: "file3", sum: "2abcdef1234567890", pos: 2}, fileInfoSum{name: "dup1", sum: "deadbeef1", pos: 5}, fileInfoSum{name: "file1", sum: "0abcdef1234567890", pos: 0}, fileInfoSum{name: "file4", sum: "3abcdef1234567890", pos: 3}, fileInfoSum{name: "dup1", sum: "deadbeef0", pos: 4}, fileInfoSum{name: "file2", sum: "1abcdef1234567890", pos: 1}, } } func TestSortFileInfoSums(t *testing.T) { dups := newFileInfoSums().GetAllFile("dup1") if len(dups) != 2 { t.Errorf("expected length 2, got %d", len(dups)) } dups.SortByNames() if dups[0].Pos() != 4 { t.Errorf("sorted dups should be ordered by position. Expected 4, got %d", dups[0].Pos()) } fis := newFileInfoSums() expected := "0abcdef1234567890" fis.SortBySums() got := fis[0].Sum() if got != expected { t.Errorf("Expected %q, got %q", expected, got) } fis = newFileInfoSums() expected = "dup1" fis.SortByNames() gotFis := fis[0] if gotFis.Name() != expected { t.Errorf("Expected %q, got %q", expected, gotFis.Name()) } // since a duplicate is first, ensure it is ordered first by position too if gotFis.Pos() != 4 { t.Errorf("Expected %d, got %d", 4, gotFis.Pos()) } fis = newFileInfoSums() fis.SortByPos() if fis[0].Pos() != 0 { t.Errorf("sorted fileInfoSums by Pos should order them by position.") } fis = newFileInfoSums() expected = "deadbeef1" gotFileInfoSum := fis.GetFile("dup1") if gotFileInfoSum.Sum() != expected { t.Errorf("Expected %q, got %q", expected, gotFileInfoSum) } if fis.GetFile("noPresent") != nil { t.Errorf("Should have return nil if name not found.") } } docker-1.10.3/pkg/tarsum/tarsum.go000066400000000000000000000175001267010174400170060ustar00rootroot00000000000000// Package tarsum provides algorithms to perform checksum calculation on // filesystem layers. // // The transportation of filesystems, regarding Docker, is done with tar(1) // archives. There are a variety of tar serialization formats [2], and a key // concern here is ensuring a repeatable checksum given a set of inputs from a // generic tar archive. Types of transportation include distribution to and from a // registry endpoint, saving and loading through commands or Docker daemon APIs, // transferring the build context from client to Docker daemon, and committing the // filesystem of a container to become an image. // // As tar archives are used for transit, but not preserved in many situations, the // focus of the algorithm is to ensure the integrity of the preserved filesystem, // while maintaining a deterministic accountability. This includes neither // constraining the ordering or manipulation of the files during the creation or // unpacking of the archive, nor include additional metadata state about the file // system attributes. package tarsum import ( "archive/tar" "bytes" "compress/gzip" "crypto" "crypto/sha256" "encoding/hex" "errors" "fmt" "hash" "io" "strings" ) const ( buf8K = 8 * 1024 buf16K = 16 * 1024 buf32K = 32 * 1024 ) // NewTarSum creates a new interface for calculating a fixed time checksum of a // tar archive. // // This is used for calculating checksums of layers of an image, in some cases // including the byte payload of the image's json metadata as well, and for // calculating the checksums for buildcache. func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { return NewTarSumHash(r, dc, v, DefaultTHash) } // NewTarSumHash creates a new TarSum, providing a THash to use rather than // the DefaultTHash. func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) { headerSelector, err := getTarHeaderSelector(v) if err != nil { return nil, err } ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash} err = ts.initTarSum() return ts, err } // NewTarSumForLabel creates a new TarSum using the provided TarSum version+hash label. func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) { parts := strings.SplitN(label, "+", 2) if len(parts) != 2 { return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}") } versionName, hashName := parts[0], parts[1] version, ok := tarSumVersionsByName[versionName] if !ok { return nil, fmt.Errorf("unknown TarSum version name: %q", versionName) } hashConfig, ok := standardHashConfigs[hashName] if !ok { return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName) } tHash := NewTHash(hashConfig.name, hashConfig.hash.New) return NewTarSumHash(r, disableCompression, version, tHash) } // TarSum is the generic interface for calculating fixed time // checksums of a tar archive. type TarSum interface { io.Reader GetSums() FileInfoSums Sum([]byte) string Version() Version Hash() THash } // tarSum struct is the structure for a Version0 checksum calculation. type tarSum struct { io.Reader tarR *tar.Reader tarW *tar.Writer writer writeCloseFlusher bufTar *bytes.Buffer bufWriter *bytes.Buffer bufData []byte h hash.Hash tHash THash sums FileInfoSums fileCounter int64 currentFile string finished bool first bool DisableCompression bool // false by default. When false, the output gzip compressed. tarSumVersion Version // this field is not exported so it can not be mutated during use headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive } func (ts tarSum) Hash() THash { return ts.tHash } func (ts tarSum) Version() Version { return ts.tarSumVersion } // THash provides a hash.Hash type generator and its name. type THash interface { Hash() hash.Hash Name() string } // NewTHash is a convenience method for creating a THash. func NewTHash(name string, h func() hash.Hash) THash { return simpleTHash{n: name, h: h} } type tHashConfig struct { name string hash crypto.Hash } var ( // NOTE: DO NOT include MD5 or SHA1, which are considered insecure. standardHashConfigs = map[string]tHashConfig{ "sha256": {name: "sha256", hash: crypto.SHA256}, "sha512": {name: "sha512", hash: crypto.SHA512}, } ) // DefaultTHash is default TarSum hashing algorithm - "sha256". var DefaultTHash = NewTHash("sha256", sha256.New) type simpleTHash struct { n string h func() hash.Hash } func (sth simpleTHash) Name() string { return sth.n } func (sth simpleTHash) Hash() hash.Hash { return sth.h() } func (ts *tarSum) encodeHeader(h *tar.Header) error { for _, elem := range ts.headerSelector.selectHeaders(h) { if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { return err } } return nil } func (ts *tarSum) initTarSum() error { ts.bufTar = bytes.NewBuffer([]byte{}) ts.bufWriter = bytes.NewBuffer([]byte{}) ts.tarR = tar.NewReader(ts.Reader) ts.tarW = tar.NewWriter(ts.bufTar) if !ts.DisableCompression { ts.writer = gzip.NewWriter(ts.bufWriter) } else { ts.writer = &nopCloseFlusher{Writer: ts.bufWriter} } if ts.tHash == nil { ts.tHash = DefaultTHash } ts.h = ts.tHash.Hash() ts.h.Reset() ts.first = true ts.sums = FileInfoSums{} return nil } func (ts *tarSum) Read(buf []byte) (int, error) { if ts.finished { return ts.bufWriter.Read(buf) } if len(ts.bufData) < len(buf) { switch { case len(buf) <= buf8K: ts.bufData = make([]byte, buf8K) case len(buf) <= buf16K: ts.bufData = make([]byte, buf16K) case len(buf) <= buf32K: ts.bufData = make([]byte, buf32K) default: ts.bufData = make([]byte, len(buf)) } } buf2 := ts.bufData[:len(buf)] n, err := ts.tarR.Read(buf2) if err != nil { if err == io.EOF { if _, err := ts.h.Write(buf2[:n]); err != nil { return 0, err } if !ts.first { ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter}) ts.fileCounter++ ts.h.Reset() } else { ts.first = false } currentHeader, err := ts.tarR.Next() if err != nil { if err == io.EOF { if err := ts.tarW.Close(); err != nil { return 0, err } if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { return 0, err } if err := ts.writer.Close(); err != nil { return 0, err } ts.finished = true return n, nil } return n, err } ts.currentFile = strings.TrimSuffix(strings.TrimPrefix(currentHeader.Name, "./"), "/") if err := ts.encodeHeader(currentHeader); err != nil { return 0, err } if err := ts.tarW.WriteHeader(currentHeader); err != nil { return 0, err } if _, err := ts.tarW.Write(buf2[:n]); err != nil { return 0, err } ts.tarW.Flush() if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { return 0, err } ts.writer.Flush() return ts.bufWriter.Read(buf) } return n, err } // Filling the hash buffer if _, err = ts.h.Write(buf2[:n]); err != nil { return 0, err } // Filling the tar writer if _, err = ts.tarW.Write(buf2[:n]); err != nil { return 0, err } ts.tarW.Flush() // Filling the output writer if _, err = io.Copy(ts.writer, ts.bufTar); err != nil { return 0, err } ts.writer.Flush() return ts.bufWriter.Read(buf) } func (ts *tarSum) Sum(extra []byte) string { ts.sums.SortBySums() h := ts.tHash.Hash() if extra != nil { h.Write(extra) } for _, fis := range ts.sums { h.Write([]byte(fis.Sum())) } checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil)) return checksum } func (ts *tarSum) GetSums() FileInfoSums { return ts.sums } docker-1.10.3/pkg/tarsum/tarsum_spec.md000066400000000000000000000214021267010174400200070ustar00rootroot00000000000000page_title: TarSum checksum specification page_description: Documentation for algorithms used in the TarSum checksum calculation page_keywords: docker, checksum, validation, tarsum # TarSum Checksum Specification ## Abstract This document describes the algorithms used in performing the TarSum checksum calculation on filesystem layers, the need for this method over existing methods, and the versioning of this calculation. ## Warning This checksum algorithm is for best-effort comparison of file trees with fuzzy logic. This is _not_ a cryptographic attestation, and should not be considered secure. ## Introduction The transportation of filesystems, regarding Docker, is done with tar(1) archives. There are a variety of tar serialization formats [2], and a key concern here is ensuring a repeatable checksum given a set of inputs from a generic tar archive. Types of transportation include distribution to and from a registry endpoint, saving and loading through commands or Docker daemon APIs, transferring the build context from client to Docker daemon, and committing the filesystem of a container to become an image. As tar archives are used for transit, but not preserved in many situations, the focus of the algorithm is to ensure the integrity of the preserved filesystem, while maintaining a deterministic accountability. This includes neither constraining the ordering or manipulation of the files during the creation or unpacking of the archive, nor include additional metadata state about the file system attributes. ## Intended Audience This document is outlining the methods used for consistent checksum calculation for filesystems transported via tar archives. Auditing these methodologies is an open and iterative process. This document should accommodate the review of source code. Ultimately, this document should be the starting point of further refinements to the algorithm and its future versions. ## Concept The checksum mechanism must ensure the integrity and assurance of the filesystem payload. ## Checksum Algorithm Profile A checksum mechanism must define the following operations and attributes: * Associated hashing cipher - used to checksum each file payload and attribute information. * Checksum list - each file of the filesystem archive has its checksum calculated from the payload and attributes of the file. The final checksum is calculated from this list, with specific ordering. * Version - as the algorithm adapts to requirements, there are behaviors of the algorithm to manage by versioning. * Archive being calculated - the tar archive having its checksum calculated ## Elements of TarSum checksum The calculated sum output is a text string. The elements included in the output of the calculated sum comprise the information needed for validation of the sum (TarSum version and hashing cipher used) and the expected checksum in hexadecimal form. There are two delimiters used: * '+' separates TarSum version from hashing cipher * ':' separates calculation mechanics from expected hash Example: ``` "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e" | | \ | | | \ | |_version_|_cipher__|__ | | \ | |_calculation_mechanics_|______________________expected_sum_______________________| ``` ## Versioning Versioning was introduced [0] to accommodate differences in calculation needed, and ability to maintain reverse compatibility. The general algorithm will be describe further in the 'Calculation'. ### Version0 This is the initial version of TarSum. Its element in the TarSum checksum string is `tarsum`. ### Version1 Its element in the TarSum checksum is `tarsum.v1`. The notable changes in this version: * Exclusion of file `mtime` from the file information headers, in each file checksum calculation * Inclusion of extended attributes (`xattrs`. Also seen as `SCHILY.xattr.` prefixed Pax tar file info headers) keys and values in each file checksum calculation ### VersionDev *Do not use unless validating refinements to the checksum algorithm* Its element in the TarSum checksum is `tarsum.dev`. This is a floating place holder for a next version and grounds for testing changes. The methods used for calculation are subject to change without notice, and this version is for testing and not for production use. ## Ciphers The official default and standard hashing cipher used in the calculation mechanic is `sha256`. This refers to SHA256 hash algorithm as defined in FIPS 180-4. Though the TarSum algorithm itself is not exclusively bound to the single hashing cipher `sha256`, support for alternate hashing ciphers was later added [1]. Use cases for alternate cipher could include future-proofing TarSum checksum format and using faster cipher hashes for tar filesystem checksums. ## Calculation ### Requirement As mentioned earlier, the calculation is such that it takes into consideration the lifecycle of the tar archive. In that the tar archive is not an immutable, permanent artifact. Otherwise options like relying on a known hashing cipher checksum of the archive itself would be reliable enough. The tar archive of the filesystem is used as a transportation medium for Docker images, and the archive is discarded once its contents are extracted. Therefore, for consistent validation items such as order of files in the tar archive and time stamps are subject to change once an image is received. ### Process The method is typically iterative due to reading tar info headers from the archive stream, though this is not a strict requirement. #### Files Each file in the tar archive have their contents (headers and body) checksummed individually using the designated associated hashing cipher. The ordered headers of the file are written to the checksum calculation first, and then the payload of the file body. The resulting checksum of the file is appended to the list of file sums. The sum is encoded as a string of the hexadecimal digest. Additionally, the file name and position in the archive is kept as reference for special ordering. #### Headers The following headers are read, in this order ( and the corresponding representation of its value): * 'name' - string * 'mode' - string of the base10 integer * 'uid' - string of the integer * 'gid' - string of the integer * 'size' - string of the integer * 'mtime' (_Version0 only_) - string of integer of the seconds since 1970-01-01 00:00:00 UTC * 'typeflag' - string of the char * 'linkname' - string * 'uname' - string * 'gname' - string * 'devmajor' - string of the integer * 'devminor' - string of the integer For >= Version1, the extended attribute headers ("SCHILY.xattr." prefixed pax headers) included after the above list. These xattrs key/values are first sorted by the keys. #### Header Format The ordered headers are written to the hash in the format of "{.key}{.value}" with no newline. #### Body After the order headers of the file have been added to the checksum for the file, the body of the file is written to the hash. #### List of file sums The list of file sums is sorted by the string of the hexadecimal digest. If there are two files in the tar with matching paths, the order of occurrence for that path is reflected for the sums of the corresponding file header and body. #### Final Checksum Begin with a fresh or initial state of the associated hash cipher. If there is additional payload to include in the TarSum calculation for the archive, it is written first. Then each checksum from the ordered list of file sums is written to the hash. The resulting digest is formatted per the Elements of TarSum checksum, including the TarSum version, the associated hash cipher and the hexadecimal encoded checksum digest. ## Security Considerations The initial version of TarSum has undergone one update that could invalidate handcrafted tar archives. The tar archive format supports appending of files with same names as prior files in the archive. The latter file will clobber the prior file of the same path. Due to this the algorithm now accounts for files with matching paths, and orders the list of file sums accordingly [3]. ## Footnotes * [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0 * [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e * [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29 * [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31 ## Acknowledgements Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the TarSum calculation. docker-1.10.3/pkg/tarsum/tarsum_test.go000066400000000000000000000407031267010174400200460ustar00rootroot00000000000000package tarsum import ( "archive/tar" "bytes" "compress/gzip" "crypto/md5" "crypto/rand" "crypto/sha1" "crypto/sha256" "crypto/sha512" "encoding/hex" "fmt" "io" "io/ioutil" "os" "strings" "testing" ) type testLayer struct { filename string options *sizedOptions jsonfile string gzip bool tarsum string version Version hash THash } var testLayers = []testLayer{ { filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", version: Version0, tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"}, { filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", version: VersionDev, tarsum: "tarsum.dev+sha256:db56e35eec6ce65ba1588c20ba6b1ea23743b59e81fb6b7f358ccbde5580345c"}, { filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", gzip: true, tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"}, { // Tests existing version of TarSum when xattrs are present filename: "testdata/xattr/layer.tar", jsonfile: "testdata/xattr/json", version: Version0, tarsum: "tarsum+sha256:07e304a8dbcb215b37649fde1a699f8aeea47e60815707f1cdf4d55d25ff6ab4"}, { // Tests next version of TarSum when xattrs are present filename: "testdata/xattr/layer.tar", jsonfile: "testdata/xattr/json", version: VersionDev, tarsum: "tarsum.dev+sha256:6c58917892d77b3b357b0f9ad1e28e1f4ae4de3a8006bd3beb8beda214d8fd16"}, { filename: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar", jsonfile: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json", tarsum: "tarsum+sha256:c66bd5ec9f87b8f4c6135ca37684618f486a3dd1d113b138d0a177bfa39c2571"}, { options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) tarsum: "tarsum+sha256:8bf12d7e67c51ee2e8306cba569398b1b9f419969521a12ffb9d8875e8836738"}, { // this tar has two files with the same path filename: "testdata/collision/collision-0.tar", tarsum: "tarsum+sha256:08653904a68d3ab5c59e65ef58c49c1581caa3c34744f8d354b3f575ea04424a"}, { // this tar has the same two files (with the same path), but reversed order. ensuring is has different hash than above filename: "testdata/collision/collision-1.tar", tarsum: "tarsum+sha256:b51c13fbefe158b5ce420d2b930eef54c5cd55c50a2ee4abdddea8fa9f081e0d"}, { // this tar has newer of collider-0.tar, ensuring is has different hash filename: "testdata/collision/collision-2.tar", tarsum: "tarsum+sha256:381547080919bb82691e995508ae20ed33ce0f6948d41cafbeb70ce20c73ee8e"}, { // this tar has newer of collider-1.tar, ensuring is has different hash filename: "testdata/collision/collision-3.tar", tarsum: "tarsum+sha256:f886e431c08143164a676805205979cd8fa535dfcef714db5515650eea5a7c0f"}, { options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) tarsum: "tarsum+md5:0d7529ec7a8360155b48134b8e599f53", hash: md5THash, }, { options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) tarsum: "tarsum+sha1:f1fee39c5925807ff75ef1925e7a23be444ba4df", hash: sha1Hash, }, { options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) tarsum: "tarsum+sha224:6319390c0b061d639085d8748b14cd55f697cf9313805218b21cf61c", hash: sha224Hash, }, { options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) tarsum: "tarsum+sha384:a578ce3ce29a2ae03b8ed7c26f47d0f75b4fc849557c62454be4b5ffd66ba021e713b48ce71e947b43aab57afd5a7636", hash: sha384Hash, }, { options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) tarsum: "tarsum+sha512:e9bfb90ca5a4dfc93c46ee061a5cf9837de6d2fdf82544d6460d3147290aecfabf7b5e415b9b6e72db9b8941f149d5d69fb17a394cbfaf2eac523bd9eae21855", hash: sha512Hash, }, } type sizedOptions struct { num int64 size int64 isRand bool realFile bool } // make a tar: // * num is the number of files the tar should have // * size is the bytes per file // * isRand is whether the contents of the files should be a random chunk (otherwise it's all zeros) // * realFile will write to a TempFile, instead of an in memory buffer func sizedTar(opts sizedOptions) io.Reader { var ( fh io.ReadWriter err error ) if opts.realFile { fh, err = ioutil.TempFile("", "tarsum") if err != nil { return nil } } else { fh = bytes.NewBuffer([]byte{}) } tarW := tar.NewWriter(fh) defer tarW.Close() for i := int64(0); i < opts.num; i++ { err := tarW.WriteHeader(&tar.Header{ Name: fmt.Sprintf("/testdata%d", i), Mode: 0755, Uid: 0, Gid: 0, Size: opts.size, }) if err != nil { return nil } var rBuf []byte if opts.isRand { rBuf = make([]byte, 8) _, err = rand.Read(rBuf) if err != nil { return nil } } else { rBuf = []byte{0, 0, 0, 0, 0, 0, 0, 0} } for i := int64(0); i < opts.size/int64(8); i++ { tarW.Write(rBuf) } } return fh } func emptyTarSum(gzip bool) (TarSum, error) { reader, writer := io.Pipe() tarWriter := tar.NewWriter(writer) // Immediately close tarWriter and write-end of the // Pipe in a separate goroutine so we don't block. go func() { tarWriter.Close() writer.Close() }() return NewTarSum(reader, !gzip, Version0) } // Test errors on NewTarsumForLabel func TestNewTarSumForLabelInvalid(t *testing.T) { reader := strings.NewReader("") if _, err := NewTarSumForLabel(reader, true, "invalidlabel"); err == nil { t.Fatalf("Expected an error, got nothing.") } if _, err := NewTarSumForLabel(reader, true, "invalid+sha256"); err == nil { t.Fatalf("Expected an error, got nothing.") } if _, err := NewTarSumForLabel(reader, true, "tarsum.v1+invalid"); err == nil { t.Fatalf("Expected an error, got nothing.") } } func TestNewTarSumForLabel(t *testing.T) { layer := testLayers[0] reader, err := os.Open(layer.filename) if err != nil { t.Fatal(err) } label := strings.Split(layer.tarsum, ":")[0] ts, err := NewTarSumForLabel(reader, false, label) if err != nil { t.Fatal(err) } // Make sure it actually worked by reading a little bit of it nbByteToRead := 8 * 1024 dBuf := make([]byte, nbByteToRead) _, err = ts.Read(dBuf) if err != nil { t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err) } } // TestEmptyTar tests that tarsum does not fail to read an empty tar // and correctly returns the hex digest of an empty hash. func TestEmptyTar(t *testing.T) { // Test without gzip. ts, err := emptyTarSum(false) if err != nil { t.Fatal(err) } zeroBlock := make([]byte, 1024) buf := new(bytes.Buffer) n, err := io.Copy(buf, ts) if err != nil { t.Fatal(err) } if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), zeroBlock) { t.Fatalf("tarSum did not write the correct number of zeroed bytes: %d", n) } expectedSum := ts.Version().String() + "+sha256:" + hex.EncodeToString(sha256.New().Sum(nil)) resultSum := ts.Sum(nil) if resultSum != expectedSum { t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) } // Test with gzip. ts, err = emptyTarSum(true) if err != nil { t.Fatal(err) } buf.Reset() n, err = io.Copy(buf, ts) if err != nil { t.Fatal(err) } bufgz := new(bytes.Buffer) gz := gzip.NewWriter(bufgz) n, err = io.Copy(gz, bytes.NewBuffer(zeroBlock)) gz.Close() gzBytes := bufgz.Bytes() if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), gzBytes) { t.Fatalf("tarSum did not write the correct number of gzipped-zeroed bytes: %d", n) } resultSum = ts.Sum(nil) if resultSum != expectedSum { t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) } // Test without ever actually writing anything. if ts, err = NewTarSum(bytes.NewReader([]byte{}), true, Version0); err != nil { t.Fatal(err) } resultSum = ts.Sum(nil) if resultSum != expectedSum { t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) } } var ( md5THash = NewTHash("md5", md5.New) sha1Hash = NewTHash("sha1", sha1.New) sha224Hash = NewTHash("sha224", sha256.New224) sha384Hash = NewTHash("sha384", sha512.New384) sha512Hash = NewTHash("sha512", sha512.New) ) // Test all the build-in read size : buf8K, buf16K, buf32K and more func TestTarSumsReadSize(t *testing.T) { // Test always on the same layer (that is big enough) layer := testLayers[0] for i := 0; i < 5; i++ { reader, err := os.Open(layer.filename) if err != nil { t.Fatal(err) } ts, err := NewTarSum(reader, false, layer.version) if err != nil { t.Fatal(err) } // Read and discard bytes so that it populates sums nbByteToRead := (i + 1) * 8 * 1024 dBuf := make([]byte, nbByteToRead) _, err = ts.Read(dBuf) if err != nil { t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err) continue } } } func TestTarSums(t *testing.T) { for _, layer := range testLayers { var ( fh io.Reader err error ) if len(layer.filename) > 0 { fh, err = os.Open(layer.filename) if err != nil { t.Errorf("failed to open %s: %s", layer.filename, err) continue } } else if layer.options != nil { fh = sizedTar(*layer.options) } else { // What else is there to test? t.Errorf("what to do with %#v", layer) continue } if file, ok := fh.(*os.File); ok { defer file.Close() } var ts TarSum if layer.hash == nil { // double negatives! ts, err = NewTarSum(fh, !layer.gzip, layer.version) } else { ts, err = NewTarSumHash(fh, !layer.gzip, layer.version, layer.hash) } if err != nil { t.Errorf("%q :: %q", err, layer.filename) continue } // Read variable number of bytes to test dynamic buffer dBuf := make([]byte, 1) _, err = ts.Read(dBuf) if err != nil { t.Errorf("failed to read 1B from %s: %s", layer.filename, err) continue } dBuf = make([]byte, 16*1024) _, err = ts.Read(dBuf) if err != nil { t.Errorf("failed to read 16KB from %s: %s", layer.filename, err) continue } // Read and discard remaining bytes _, err = io.Copy(ioutil.Discard, ts) if err != nil { t.Errorf("failed to copy from %s: %s", layer.filename, err) continue } var gotSum string if len(layer.jsonfile) > 0 { jfh, err := os.Open(layer.jsonfile) if err != nil { t.Errorf("failed to open %s: %s", layer.jsonfile, err) continue } buf, err := ioutil.ReadAll(jfh) if err != nil { t.Errorf("failed to readAll %s: %s", layer.jsonfile, err) continue } gotSum = ts.Sum(buf) } else { gotSum = ts.Sum(nil) } if layer.tarsum != gotSum { t.Errorf("expecting [%s], but got [%s]", layer.tarsum, gotSum) } var expectedHashName string if layer.hash != nil { expectedHashName = layer.hash.Name() } else { expectedHashName = DefaultTHash.Name() } if expectedHashName != ts.Hash().Name() { t.Errorf("expecting hash [%v], but got [%s]", expectedHashName, ts.Hash().Name()) } } } func TestIteration(t *testing.T) { headerTests := []struct { expectedSum string // TODO(vbatts) it would be nice to get individual sums of each version Version hdr *tar.Header data []byte }{ { "tarsum+sha256:626c4a2e9a467d65c33ae81f7f3dedd4de8ccaee72af73223c4bc4718cbc7bbd", Version0, &tar.Header{ Name: "file.txt", Size: 0, Typeflag: tar.TypeReg, Devminor: 0, Devmajor: 0, }, []byte(""), }, { "tarsum.dev+sha256:6ffd43a1573a9913325b4918e124ee982a99c0f3cba90fc032a65f5e20bdd465", VersionDev, &tar.Header{ Name: "file.txt", Size: 0, Typeflag: tar.TypeReg, Devminor: 0, Devmajor: 0, }, []byte(""), }, { "tarsum.dev+sha256:b38166c059e11fb77bef30bf16fba7584446e80fcc156ff46d47e36c5305d8ef", VersionDev, &tar.Header{ Name: "another.txt", Uid: 1000, Gid: 1000, Uname: "slartibartfast", Gname: "users", Size: 4, Typeflag: tar.TypeReg, Devminor: 0, Devmajor: 0, }, []byte("test"), }, { "tarsum.dev+sha256:4cc2e71ac5d31833ab2be9b4f7842a14ce595ec96a37af4ed08f87bc374228cd", VersionDev, &tar.Header{ Name: "xattrs.txt", Uid: 1000, Gid: 1000, Uname: "slartibartfast", Gname: "users", Size: 4, Typeflag: tar.TypeReg, Xattrs: map[string]string{ "user.key1": "value1", "user.key2": "value2", }, }, []byte("test"), }, { "tarsum.dev+sha256:65f4284fa32c0d4112dd93c3637697805866415b570587e4fd266af241503760", VersionDev, &tar.Header{ Name: "xattrs.txt", Uid: 1000, Gid: 1000, Uname: "slartibartfast", Gname: "users", Size: 4, Typeflag: tar.TypeReg, Xattrs: map[string]string{ "user.KEY1": "value1", // adding different case to ensure different sum "user.key2": "value2", }, }, []byte("test"), }, { "tarsum+sha256:c12bb6f1303a9ddbf4576c52da74973c00d14c109bcfa76b708d5da1154a07fa", Version0, &tar.Header{ Name: "xattrs.txt", Uid: 1000, Gid: 1000, Uname: "slartibartfast", Gname: "users", Size: 4, Typeflag: tar.TypeReg, Xattrs: map[string]string{ "user.NOT": "CALCULATED", }, }, []byte("test"), }, } for _, htest := range headerTests { s, err := renderSumForHeader(htest.version, htest.hdr, htest.data) if err != nil { t.Fatal(err) } if s != htest.expectedSum { t.Errorf("expected sum: %q, got: %q", htest.expectedSum, s) } } } func renderSumForHeader(v Version, h *tar.Header, data []byte) (string, error) { buf := bytes.NewBuffer(nil) // first build our test tar tw := tar.NewWriter(buf) if err := tw.WriteHeader(h); err != nil { return "", err } if _, err := tw.Write(data); err != nil { return "", err } tw.Close() ts, err := NewTarSum(buf, true, v) if err != nil { return "", err } tr := tar.NewReader(ts) for { hdr, err := tr.Next() if hdr == nil || err == io.EOF { // Signals the end of the archive. break } if err != nil { return "", err } if _, err = io.Copy(ioutil.Discard, tr); err != nil { return "", err } } return ts.Sum(nil), nil } func Benchmark9kTar(b *testing.B) { buf := bytes.NewBuffer([]byte{}) fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") if err != nil { b.Error(err) return } n, err := io.Copy(buf, fh) fh.Close() reader := bytes.NewReader(buf.Bytes()) b.SetBytes(n) b.ResetTimer() for i := 0; i < b.N; i++ { reader.Seek(0, 0) ts, err := NewTarSum(reader, true, Version0) if err != nil { b.Error(err) return } io.Copy(ioutil.Discard, ts) ts.Sum(nil) } } func Benchmark9kTarGzip(b *testing.B) { buf := bytes.NewBuffer([]byte{}) fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") if err != nil { b.Error(err) return } n, err := io.Copy(buf, fh) fh.Close() reader := bytes.NewReader(buf.Bytes()) b.SetBytes(n) b.ResetTimer() for i := 0; i < b.N; i++ { reader.Seek(0, 0) ts, err := NewTarSum(reader, false, Version0) if err != nil { b.Error(err) return } io.Copy(ioutil.Discard, ts) ts.Sum(nil) } } // this is a single big file in the tar archive func Benchmark1mbSingleFileTar(b *testing.B) { benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, false) } // this is a single big file in the tar archive func Benchmark1mbSingleFileTarGzip(b *testing.B) { benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, true) } // this is 1024 1k files in the tar archive func Benchmark1kFilesTar(b *testing.B) { benchmarkTar(b, sizedOptions{1024, 1024, true, true}, false) } // this is 1024 1k files in the tar archive func Benchmark1kFilesTarGzip(b *testing.B) { benchmarkTar(b, sizedOptions{1024, 1024, true, true}, true) } func benchmarkTar(b *testing.B, opts sizedOptions, isGzip bool) { var fh *os.File tarReader := sizedTar(opts) if br, ok := tarReader.(*os.File); ok { fh = br } defer os.Remove(fh.Name()) defer fh.Close() b.SetBytes(opts.size * opts.num) b.ResetTimer() for i := 0; i < b.N; i++ { ts, err := NewTarSum(fh, !isGzip, Version0) if err != nil { b.Error(err) return } io.Copy(ioutil.Discard, ts) ts.Sum(nil) fh.Seek(0, 0) } } docker-1.10.3/pkg/tarsum/testdata/000077500000000000000000000000001267010174400167525ustar00rootroot00000000000000docker-1.10.3/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/000077500000000000000000000000001267010174400275635ustar00rootroot00000000000000json000066400000000000000000000026061267010174400304040ustar00rootroot00000000000000docker-1.10.3/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457{"id":"46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457","parent":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","created":"2014-04-07T02:45:52.610504484Z","container":"e0f07f8d72cae171a3dcc35859960e7e956e0628bce6fedc4122bf55b2c287c7","container_config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","sed -ri 's/^(%wheel.*)(ALL)$/\\1NOPASSWD: \\2/' /etc/sudoers"],"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.9.1-dev","config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":3425} layer.tar000066400000000000000000000220001267010174400313220ustar00rootroot00000000000000docker-1.10.3/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457dev/0040755000000000000000000000000012320410536010022 5ustar0000000000000000dev/core0120777000000000000000000000000012320410536012741 2/proc/kcoreustar0000000000000000dev/stderr0120777000000000000000000000000012320410536013674 2/proc/self/fd/2ustar0000000000000000dev/stdout0120777000000000000000000000000012320410536013712 2/proc/self/fd/1ustar0000000000000000dev/fd0120777000000000000000000000000012320410536012621 2/proc/self/fdustar0000000000000000dev/ptmx0120777000000000000000000000000012320410536012452 2pts/ptmxustar0000000000000000dev/stdin0120777000000000000000000000000012320410536013510 2/proc/self/fd/0ustar0000000000000000etc/0040755000000000000000000000000012320410536010017 5ustar0000000000000000etc/sudoers0100440000000000000000000000642412320410536011423 0ustar0000000000000000## Sudoers allows particular users to run various commands as ## the root user, without needing the root password. ## ## Examples are provided at the bottom of the file for collections ## of related commands, which can then be delegated out to particular ## users or groups. ## ## This file must be edited with the 'visudo' command. ## Host Aliases ## Groups of machines. You may prefer to use hostnames (perhaps using ## wildcards for entire domains) or IP addresses instead. # Host_Alias FILESERVERS = fs1, fs2 # Host_Alias MAILSERVERS = smtp, smtp2 ## User Aliases ## These aren't often necessary, as you can use regular groups ## (ie, from files, LDAP, NIS, etc) in this file - just use %groupname ## rather than USERALIAS # User_Alias ADMINS = jsmith, mikem ## Command Aliases ## These are groups of related commands... ## Networking # Cmnd_Alias NETWORKING = /sbin/route, /sbin/ifconfig, /bin/ping, /sbin/dhclient, /usr/bin/net, /sbin/iptables, /usr/bin/rfcomm, /usr/bin/wvdial, /sbin/iwconfig, /sbin/mii-tool ## Installation and management of software # Cmnd_Alias SOFTWARE = /bin/rpm, /usr/bin/up2date, /usr/bin/yum ## Services # Cmnd_Alias SERVICES = /sbin/service, /sbin/chkconfig ## Updating the locate database # Cmnd_Alias LOCATE = /usr/bin/updatedb ## Storage # Cmnd_Alias STORAGE = /sbin/fdisk, /sbin/sfdisk, /sbin/parted, /sbin/partprobe, /bin/mount, /bin/umount ## Delegating permissions # Cmnd_Alias DELEGATING = /usr/sbin/visudo, /bin/chown, /bin/chmod, /bin/chgrp ## Processes # Cmnd_Alias PROCESSES = /bin/nice, /bin/kill, /usr/bin/kill, /usr/bin/killall ## Drivers # Cmnd_Alias DRIVERS = /sbin/modprobe # Defaults specification # # Disable "ssh hostname sudo ", because it will show the password in clear. # You have to run "ssh -t hostname sudo ". # Defaults requiretty Defaults env_reset Defaults env_keep = "COLORS DISPLAY HOSTNAME HISTSIZE INPUTRC KDEDIR LS_COLORS" Defaults env_keep += "MAIL PS1 PS2 QTDIR USERNAME LANG LC_ADDRESS LC_CTYPE" Defaults env_keep += "LC_COLLATE LC_IDENTIFICATION LC_MEASUREMENT LC_MESSAGES" Defaults env_keep += "LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER LC_TELEPHONE" Defaults env_keep += "LC_TIME LC_ALL LANGUAGE LINGUAS _XKB_CHARSET XAUTHORITY" Defaults secure_path = /sbin:/bin:/usr/sbin:/usr/bin ## Next comes the main part: which users can run what software on ## which machines (the sudoers file can be shared between multiple ## systems). ## Syntax: ## ## user MACHINE=COMMANDS ## ## The COMMANDS section may have other options added to it. ## ## Allow root to run any commands anywhere root ALL=(ALL) ALL ## Allows members of the 'sys' group to run networking, software, ## service management apps and more. # %sys ALL = NETWORKING, SOFTWARE, SERVICES, STORAGE, DELEGATING, PROCESSES, LOCATE, DRIVERS ## Allows people in group wheel to run all commands %wheel ALL=(ALL) NOPASSWD: ALL ## Same thing without a password # %wheel ALL=(ALL) NOPASSWD: ALL ## Allows members of the users group to mount and unmount the ## cdrom as root # %users ALL=/sbin/mount /mnt/cdrom, /sbin/umount /mnt/cdrom ## Allows members of the users group to shutdown this system # %users localhost=/sbin/shutdown -h now ## Read drop-in files from /etc/sudoers.d (the # here does not mean a comment) #includedir /etc/sudoers.d docker-1.10.3/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/000077500000000000000000000000001267010174400272315ustar00rootroot00000000000000json000066400000000000000000000010551267010174400300470ustar00rootroot00000000000000docker-1.10.3/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0} layer.tar000066400000000000000000000030001267010174400307670ustar00rootroot00000000000000docker-1.10.3/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158./0040755000000000000000000000000012156431635007413 5ustar0000000000000000docker-1.10.3/pkg/tarsum/testdata/collision/000077500000000000000000000000001267010174400207455ustar00rootroot00000000000000docker-1.10.3/pkg/tarsum/testdata/collision/collision-0.tar000066400000000000000000000240001267010174400236010ustar00rootroot00000000000000file0000644000175000017500000000000612402144462011303 0ustar vbattsvbattsfarts file0000644000175000017500000000000612402144551011302 0ustar vbattsvbattsHOWDY docker-1.10.3/pkg/tarsum/testdata/collision/collision-1.tar000066400000000000000000000240001267010174400236020ustar00rootroot00000000000000file0000644000175000017500000000000612402144551011302 0ustar vbattsvbattsHOWDY file0000644000175000017500000000000612402144462011303 0ustar vbattsvbattsfarts docker-1.10.3/pkg/tarsum/testdata/collision/collision-2.tar000066400000000000000000000240001267010174400236030ustar00rootroot00000000000000file0000644000175000017500000000000612402144551011302 0ustar vbattsvbattsHOWDY docker-1.10.3/pkg/tarsum/testdata/collision/collision-3.tar000066400000000000000000000240001267010174400236040ustar00rootroot00000000000000file0000644000175000017500000000000612402144462011303 0ustar vbattsvbattsfarts docker-1.10.3/pkg/tarsum/testdata/xattr/000077500000000000000000000000001267010174400201145ustar00rootroot00000000000000docker-1.10.3/pkg/tarsum/testdata/xattr/json000066400000000000000000000026431267010174400210150ustar00rootroot00000000000000{"id":"4439c3c7f847954100b42b267e7e5529cac1d6934db082f65795c5ca2e594d93","parent":"73b164f4437db87e96e90083c73a6592f549646ae2ec00ed33c6b9b49a5c4470","created":"2014-05-16T17:19:44.091534414Z","container":"5f92fb06cc58f357f0cde41394e2bbbb664e663974b2ac1693ab07b7a306749b","container_config":{"Hostname":"9565c6517a0e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","setcap 'cap_setgid,cap_setuid+ep' ./file \u0026\u0026 getcap ./file"],"Image":"73b164f4437db87e96e90083c73a6592f549646ae2ec00ed33c6b9b49a5c4470","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.11.1-dev","config":{"Hostname":"9565c6517a0e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"73b164f4437db87e96e90083c73a6592f549646ae2ec00ed33c6b9b49a5c4470","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":0} docker-1.10.3/pkg/tarsum/testdata/xattr/layer.tar000066400000000000000000000050001267010174400217330ustar00rootroot00000000000000PaxHeaders.12099/file0000000000000000000000000000007112335444166012715 xustar000000000000000057 SCHILY.xattr.security.capability= file0100644000000000000000000000000012335444166010105 0ustar0000000000000000docker-1.10.3/pkg/tarsum/versioning.go000066400000000000000000000100241267010174400176500ustar00rootroot00000000000000package tarsum import ( "archive/tar" "errors" "sort" "strconv" "strings" ) // Version is used for versioning of the TarSum algorithm // based on the prefix of the hash used // i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b" type Version int // Prefix of "tarsum" const ( Version0 Version = iota Version1 // VersionDev this constant will be either the latest or an unsettled next-version of the TarSum calculation VersionDev ) // VersionLabelForChecksum returns the label for the given tarsum // checksum, i.e., everything before the first `+` character in // the string or an empty string if no label separator is found. func VersionLabelForChecksum(checksum string) string { // Checksums are in the form: {versionLabel}+{hashID}:{hex} sepIndex := strings.Index(checksum, "+") if sepIndex < 0 { return "" } return checksum[:sepIndex] } // GetVersions gets a list of all known tarsum versions. func GetVersions() []Version { v := []Version{} for k := range tarSumVersions { v = append(v, k) } return v } var ( tarSumVersions = map[Version]string{ Version0: "tarsum", Version1: "tarsum.v1", VersionDev: "tarsum.dev", } tarSumVersionsByName = map[string]Version{ "tarsum": Version0, "tarsum.v1": Version1, "tarsum.dev": VersionDev, } ) func (tsv Version) String() string { return tarSumVersions[tsv] } // GetVersionFromTarsum returns the Version from the provided string. func GetVersionFromTarsum(tarsum string) (Version, error) { tsv := tarsum if strings.Contains(tarsum, "+") { tsv = strings.SplitN(tarsum, "+", 2)[0] } for v, s := range tarSumVersions { if s == tsv { return v, nil } } return -1, ErrNotVersion } // Errors that may be returned by functions in this package var ( ErrNotVersion = errors.New("string does not include a TarSum Version") ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented") ) // tarHeaderSelector is the interface which different versions // of tarsum should use for selecting and ordering tar headers // for each item in the archive. type tarHeaderSelector interface { selectHeaders(h *tar.Header) (orderedHeaders [][2]string) } type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string) func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) { return f(h) } func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { return [][2]string{ {"name", h.Name}, {"mode", strconv.FormatInt(h.Mode, 10)}, {"uid", strconv.Itoa(h.Uid)}, {"gid", strconv.Itoa(h.Gid)}, {"size", strconv.FormatInt(h.Size, 10)}, {"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)}, {"typeflag", string([]byte{h.Typeflag})}, {"linkname", h.Linkname}, {"uname", h.Uname}, {"gname", h.Gname}, {"devmajor", strconv.FormatInt(h.Devmajor, 10)}, {"devminor", strconv.FormatInt(h.Devminor, 10)}, } } func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { // Get extended attributes. xAttrKeys := make([]string, len(h.Xattrs)) for k := range h.Xattrs { xAttrKeys = append(xAttrKeys, k) } sort.Strings(xAttrKeys) // Make the slice with enough capacity to hold the 11 basic headers // we want from the v0 selector plus however many xattrs we have. orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) // Copy all headers from v0 excluding the 'mtime' header (the 5th element). v0headers := v0TarHeaderSelect(h) orderedHeaders = append(orderedHeaders, v0headers[0:5]...) orderedHeaders = append(orderedHeaders, v0headers[6:]...) // Finally, append the sorted xattrs. for _, k := range xAttrKeys { orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) } return } var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{ Version0: v0TarHeaderSelect, Version1: v1TarHeaderSelect, VersionDev: v1TarHeaderSelect, } func getTarHeaderSelector(v Version) (tarHeaderSelector, error) { headerSelector, ok := registeredHeaderSelectors[v] if !ok { return nil, ErrVersionNotImplemented } return headerSelector, nil } docker-1.10.3/pkg/tarsum/versioning_test.go000066400000000000000000000046341267010174400207210ustar00rootroot00000000000000package tarsum import ( "testing" ) func TestVersionLabelForChecksum(t *testing.T) { version := VersionLabelForChecksum("tarsum+sha256:deadbeef") if version != "tarsum" { t.Fatalf("Version should have been 'tarsum', was %v", version) } version = VersionLabelForChecksum("tarsum.v1+sha256:deadbeef") if version != "tarsum.v1" { t.Fatalf("Version should have been 'tarsum.v1', was %v", version) } version = VersionLabelForChecksum("something+somethingelse") if version != "something" { t.Fatalf("Version should have been 'something', was %v", version) } version = VersionLabelForChecksum("invalidChecksum") if version != "" { t.Fatalf("Version should have been empty, was %v", version) } } func TestVersion(t *testing.T) { expected := "tarsum" var v Version if v.String() != expected { t.Errorf("expected %q, got %q", expected, v.String()) } expected = "tarsum.v1" v = 1 if v.String() != expected { t.Errorf("expected %q, got %q", expected, v.String()) } expected = "tarsum.dev" v = 2 if v.String() != expected { t.Errorf("expected %q, got %q", expected, v.String()) } } func TestGetVersion(t *testing.T) { testSet := []struct { Str string Expected Version }{ {"tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", Version0}, {"tarsum+sha256", Version0}, {"tarsum", Version0}, {"tarsum.dev", VersionDev}, {"tarsum.dev+sha256:deadbeef", VersionDev}, } for _, ts := range testSet { v, err := GetVersionFromTarsum(ts.Str) if err != nil { t.Fatalf("%q : %s", err, ts.Str) } if v != ts.Expected { t.Errorf("expected %d (%q), got %d (%q)", ts.Expected, ts.Expected, v, v) } } // test one that does not exist, to ensure it errors str := "weak+md5:abcdeabcde" _, err := GetVersionFromTarsum(str) if err != ErrNotVersion { t.Fatalf("%q : %s", err, str) } } func TestGetVersions(t *testing.T) { expected := []Version{ Version0, Version1, VersionDev, } versions := GetVersions() if len(versions) != len(expected) { t.Fatalf("Expected %v versions, got %v", len(expected), len(versions)) } if !containsVersion(versions, expected[0]) || !containsVersion(versions, expected[1]) || !containsVersion(versions, expected[2]) { t.Fatalf("Expected [%v], got [%v]", expected, versions) } } func containsVersion(versions []Version, version Version) bool { for _, v := range versions { if v == version { return true } } return false } docker-1.10.3/pkg/tarsum/writercloser.go000066400000000000000000000004041267010174400202120ustar00rootroot00000000000000package tarsum import ( "io" ) type writeCloseFlusher interface { io.WriteCloser Flush() error } type nopCloseFlusher struct { io.Writer } func (n *nopCloseFlusher) Close() error { return nil } func (n *nopCloseFlusher) Flush() error { return nil } docker-1.10.3/pkg/term/000077500000000000000000000000001267010174400145755ustar00rootroot00000000000000docker-1.10.3/pkg/term/ascii.go000066400000000000000000000020261267010174400162140ustar00rootroot00000000000000package term import ( "fmt" "strings" ) // ASCII list the possible supported ASCII key sequence var ASCII = []string{ "ctrl-@", "ctrl-a", "ctrl-b", "ctrl-c", "ctrl-d", "ctrl-e", "ctrl-f", "ctrl-g", "ctrl-h", "ctrl-i", "ctrl-j", "ctrl-k", "ctrl-l", "ctrl-m", "ctrl-n", "ctrl-o", "ctrl-p", "ctrl-q", "ctrl-r", "ctrl-s", "ctrl-t", "ctrl-u", "ctrl-v", "ctrl-w", "ctrl-x", "ctrl-y", "ctrl-z", "ctrl-[", "ctrl-\\", "ctrl-]", "ctrl-^", "ctrl-_", } // ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code. func ToBytes(keys string) ([]byte, error) { codes := []byte{} next: for _, key := range strings.Split(keys, ",") { if len(key) != 1 { for code, ctrl := range ASCII { if ctrl == key { codes = append(codes, byte(code)) continue next } } if key == "DEL" { codes = append(codes, 127) } else { return nil, fmt.Errorf("Unknown character: '%s'", key) } } else { codes = append(codes, byte(key[0])) } } return codes, nil } docker-1.10.3/pkg/term/ascii_test.go000066400000000000000000000017701267010174400172600ustar00rootroot00000000000000package term import "testing" func TestToBytes(t *testing.T) { codes, err := ToBytes("ctrl-a,a") if err != nil { t.Fatal(err) } if len(codes) != 2 { t.Fatalf("Expected 2 codes, got %d", len(codes)) } if codes[0] != 1 || codes[1] != 97 { t.Fatalf("Expected '1' '97', got '%d' '%d'", codes[0], codes[1]) } codes, err = ToBytes("shift-z") if err == nil { t.Fatalf("Expected error, got none") } codes, err = ToBytes("ctrl-@,ctrl-[,~,ctrl-o") if err != nil { t.Fatal(err) } if len(codes) != 4 { t.Fatalf("Expected 4 codes, got %d", len(codes)) } if codes[0] != 0 || codes[1] != 27 || codes[2] != 126 || codes[3] != 15 { t.Fatalf("Expected '0' '27' '126', '15', got '%d' '%d' '%d' '%d'", codes[0], codes[1], codes[2], codes[3]) } codes, err = ToBytes("DEL,+") if err != nil { t.Fatal(err) } if len(codes) != 2 { t.Fatalf("Expected 2 codes, got %d", len(codes)) } if codes[0] != 127 || codes[1] != 43 { t.Fatalf("Expected '127 '43'', got '%d' '%d'", codes[0], codes[1]) } } docker-1.10.3/pkg/term/tc_linux_cgo.go000066400000000000000000000023201267010174400175760ustar00rootroot00000000000000// +build linux,cgo package term import ( "syscall" "unsafe" ) // #include import "C" // Termios is the Unix API for terminal I/O. // It is passthgrouh for syscall.Termios in order to make it portable with // other platforms where it is not available or handled differently. type Termios syscall.Termios // MakeRaw put the terminal connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be // restored. func MakeRaw(fd uintptr) (*State, error) { var oldState State if err := tcget(fd, &oldState.termios); err != 0 { return nil, err } newState := oldState.termios C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState))) newState.Oflag = newState.Oflag | C.OPOST if err := tcset(fd, &newState); err != 0 { return nil, err } return &oldState, nil } func tcget(fd uintptr, p *Termios) syscall.Errno { ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) if ret != 0 { return err.(syscall.Errno) } return 0 } func tcset(fd uintptr, p *Termios) syscall.Errno { ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) if ret != 0 { return err.(syscall.Errno) } return 0 } docker-1.10.3/pkg/term/tc_other.go000066400000000000000000000006371267010174400167410ustar00rootroot00000000000000// +build !windows // +build !linux !cgo package term import ( "syscall" "unsafe" ) func tcget(fd uintptr, p *Termios) syscall.Errno { _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) return err } func tcset(fd uintptr, p *Termios) syscall.Errno { _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) return err } docker-1.10.3/pkg/term/term.go000066400000000000000000000062551267010174400161030ustar00rootroot00000000000000// +build !windows // Package term provides provides structures and helper functions to work with // terminal (state, sizes). package term import ( "errors" "io" "os" "os/signal" "syscall" "unsafe" ) var ( // ErrInvalidState is returned if the state of the terminal is invalid. ErrInvalidState = errors.New("Invalid terminal state") ) // State represents the state of the terminal. type State struct { termios Termios } // Winsize represents the size of the terminal window. type Winsize struct { Height uint16 Width uint16 x uint16 y uint16 } // StdStreams returns the standard streams (stdin, stdout, stedrr). func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { return os.Stdin, os.Stdout, os.Stderr } // GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. func GetFdInfo(in interface{}) (uintptr, bool) { var inFd uintptr var isTerminalIn bool if file, ok := in.(*os.File); ok { inFd = file.Fd() isTerminalIn = IsTerminal(inFd) } return inFd, isTerminalIn } // GetWinsize returns the window size based on the specified file descriptor. func GetWinsize(fd uintptr) (*Winsize, error) { ws := &Winsize{} _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) // Skip errno = 0 if err == 0 { return ws, nil } return ws, err } // SetWinsize tries to set the specified window size for the specified file descriptor. func SetWinsize(fd uintptr, ws *Winsize) error { _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws))) // Skip errno = 0 if err == 0 { return nil } return err } // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { var termios Termios return tcget(fd, &termios) == 0 } // RestoreTerminal restores the terminal connected to the given file descriptor // to a previous state. func RestoreTerminal(fd uintptr, state *State) error { if state == nil { return ErrInvalidState } if err := tcset(fd, &state.termios); err != 0 { return err } return nil } // SaveState saves the state of the terminal connected to the given file descriptor. func SaveState(fd uintptr) (*State, error) { var oldState State if err := tcget(fd, &oldState.termios); err != 0 { return nil, err } return &oldState, nil } // DisableEcho applies the specified state to the terminal connected to the file // descriptor, with echo disabled. func DisableEcho(fd uintptr, state *State) error { newState := state.termios newState.Lflag &^= syscall.ECHO if err := tcset(fd, &newState); err != 0 { return err } handleInterrupt(fd, state) return nil } // SetRawTerminal puts the terminal connected to the given file descriptor into // raw mode and returns the previous state. func SetRawTerminal(fd uintptr) (*State, error) { oldState, err := MakeRaw(fd) if err != nil { return nil, err } handleInterrupt(fd, oldState) return oldState, err } func handleInterrupt(fd uintptr, state *State) { sigchan := make(chan os.Signal, 1) signal.Notify(sigchan, os.Interrupt) go func() { _ = <-sigchan RestoreTerminal(fd, state) os.Exit(0) }() } docker-1.10.3/pkg/term/term_windows.go000066400000000000000000000170421267010174400176510ustar00rootroot00000000000000// +build windows package term import ( "fmt" "io" "os" "os/signal" "syscall" "github.com/Azure/go-ansiterm/winterm" "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/term/windows" ) // State holds the console mode for the terminal. type State struct { mode uint32 } // Winsize is used for window size. type Winsize struct { Height uint16 Width uint16 x uint16 y uint16 } // StdStreams returns the standard streams (stdin, stdout, stedrr). func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { switch { case os.Getenv("ConEmuANSI") == "ON": // The ConEmu terminal emulates ANSI on output streams well. return windows.ConEmuStreams() case os.Getenv("MSYSTEM") != "": // MSYS (mingw) does not emulate ANSI well. return windows.ConsoleStreams() default: if useNativeConsole() { return os.Stdin, os.Stdout, os.Stderr } return windows.ConsoleStreams() } } // useNativeConsole determines if the docker client should use the built-in // console which supports ANSI emulation, or fall-back to the golang emulator // (github.com/azure/go-ansiterm). func useNativeConsole() bool { osv, err := system.GetOSVersion() if err != nil { return false } // Native console is not available major version 10 if osv.MajorVersion < 10 { return false } // Must have a late pre-release TP4 build of Windows Server 2016/Windows 10 TH2 or later if osv.Build < 10578 { return false } // Environment variable override if e := os.Getenv("USE_NATIVE_CONSOLE"); e != "" { if e == "1" { return true } return false } // Get the handle to stdout stdOutHandle, err := syscall.GetStdHandle(syscall.STD_OUTPUT_HANDLE) if err != nil { return false } // Get the console mode from the consoles stdout handle var mode uint32 if err := syscall.GetConsoleMode(stdOutHandle, &mode); err != nil { return false } // Legacy mode does not have native ANSI emulation. // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx const enableVirtualTerminalProcessing = 0x0004 if mode&enableVirtualTerminalProcessing == 0 { return false } // TODO Windows (Post TP4). The native emulator still has issues which // mean it shouldn't be enabled for everyone. Change this next line to true // to change the default to "enable if available". In the meantime, users // can still try it out by using USE_NATIVE_CONSOLE env variable. return false } // GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. func GetFdInfo(in interface{}) (uintptr, bool) { return windows.GetHandleInfo(in) } // GetWinsize returns the window size based on the specified file descriptor. func GetWinsize(fd uintptr) (*Winsize, error) { info, err := winterm.GetConsoleScreenBufferInfo(fd) if err != nil { return nil, err } winsize := &Winsize{ Width: uint16(info.Window.Right - info.Window.Left + 1), Height: uint16(info.Window.Bottom - info.Window.Top + 1), x: 0, y: 0} // Note: GetWinsize is called frequently -- uncomment only for excessive details // logrus.Debugf("[windows] GetWinsize: Console(%v)", info.String()) // logrus.Debugf("[windows] GetWinsize: Width(%v), Height(%v), x(%v), y(%v)", winsize.Width, winsize.Height, winsize.x, winsize.y) return winsize, nil } // SetWinsize tries to set the specified window size for the specified file descriptor. func SetWinsize(fd uintptr, ws *Winsize) error { // Ensure the requested dimensions are no larger than the maximum window size info, err := winterm.GetConsoleScreenBufferInfo(fd) if err != nil { return err } if ws.Width == 0 || ws.Height == 0 || ws.Width > uint16(info.MaximumWindowSize.X) || ws.Height > uint16(info.MaximumWindowSize.Y) { return fmt.Errorf("Illegal window size: (%v,%v) -- Maximum allow: (%v,%v)", ws.Width, ws.Height, info.MaximumWindowSize.X, info.MaximumWindowSize.Y) } // Narrow the sizes to that used by Windows width := winterm.SHORT(ws.Width) height := winterm.SHORT(ws.Height) // Set the dimensions while ensuring they remain within the bounds of the backing console buffer // -- Shrinking will always succeed. Growing may push the edges past the buffer boundary. When that occurs, // shift the upper left just enough to keep the new window within the buffer. rect := info.Window if width < rect.Right-rect.Left+1 { rect.Right = rect.Left + width - 1 } else if width > rect.Right-rect.Left+1 { rect.Right = rect.Left + width - 1 if rect.Right >= info.Size.X { rect.Left = info.Size.X - width rect.Right = info.Size.X - 1 } } if height < rect.Bottom-rect.Top+1 { rect.Bottom = rect.Top + height - 1 } else if height > rect.Bottom-rect.Top+1 { rect.Bottom = rect.Top + height - 1 if rect.Bottom >= info.Size.Y { rect.Top = info.Size.Y - height rect.Bottom = info.Size.Y - 1 } } logrus.Debugf("[windows] SetWinsize: Requested((%v,%v)) Actual(%v)", ws.Width, ws.Height, rect) return winterm.SetConsoleWindowInfo(fd, true, rect) } // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { return windows.IsConsole(fd) } // RestoreTerminal restores the terminal connected to the given file descriptor // to a previous state. func RestoreTerminal(fd uintptr, state *State) error { return winterm.SetConsoleMode(fd, state.mode) } // SaveState saves the state of the terminal connected to the given file descriptor. func SaveState(fd uintptr) (*State, error) { mode, e := winterm.GetConsoleMode(fd) if e != nil { return nil, e } return &State{mode}, nil } // DisableEcho disables echo for the terminal connected to the given file descriptor. // -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx func DisableEcho(fd uintptr, state *State) error { mode := state.mode mode &^= winterm.ENABLE_ECHO_INPUT mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT err := winterm.SetConsoleMode(fd, mode) if err != nil { return err } // Register an interrupt handler to catch and restore prior state restoreAtInterrupt(fd, state) return nil } // SetRawTerminal puts the terminal connected to the given file descriptor into raw // mode and returns the previous state. func SetRawTerminal(fd uintptr) (*State, error) { state, err := MakeRaw(fd) if err != nil { return nil, err } // Register an interrupt handler to catch and restore prior state restoreAtInterrupt(fd, state) return state, err } // MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be restored. func MakeRaw(fd uintptr) (*State, error) { state, err := SaveState(fd) if err != nil { return nil, err } // See // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx mode := state.mode // Disable these modes mode &^= winterm.ENABLE_ECHO_INPUT mode &^= winterm.ENABLE_LINE_INPUT mode &^= winterm.ENABLE_MOUSE_INPUT mode &^= winterm.ENABLE_WINDOW_INPUT mode &^= winterm.ENABLE_PROCESSED_INPUT // Enable these modes mode |= winterm.ENABLE_EXTENDED_FLAGS mode |= winterm.ENABLE_INSERT_MODE mode |= winterm.ENABLE_QUICK_EDIT_MODE err = winterm.SetConsoleMode(fd, mode) if err != nil { return nil, err } return state, nil } func restoreAtInterrupt(fd uintptr, state *State) { sigchan := make(chan os.Signal, 1) signal.Notify(sigchan, os.Interrupt) go func() { _ = <-sigchan RestoreTerminal(fd, state) os.Exit(0) }() } docker-1.10.3/pkg/term/termios_darwin.go000066400000000000000000000032351267010174400201550ustar00rootroot00000000000000package term import ( "syscall" "unsafe" ) const ( getTermios = syscall.TIOCGETA setTermios = syscall.TIOCSETA ) // Termios magic numbers, passthrough to the ones defined in syscall. const ( IGNBRK = syscall.IGNBRK PARMRK = syscall.PARMRK INLCR = syscall.INLCR IGNCR = syscall.IGNCR ECHONL = syscall.ECHONL CSIZE = syscall.CSIZE ICRNL = syscall.ICRNL ISTRIP = syscall.ISTRIP PARENB = syscall.PARENB ECHO = syscall.ECHO ICANON = syscall.ICANON ISIG = syscall.ISIG IXON = syscall.IXON BRKINT = syscall.BRKINT INPCK = syscall.INPCK OPOST = syscall.OPOST CS8 = syscall.CS8 IEXTEN = syscall.IEXTEN ) // Termios is the Unix API for terminal I/O. type Termios struct { Iflag uint64 Oflag uint64 Cflag uint64 Lflag uint64 Cc [20]byte Ispeed uint64 Ospeed uint64 } // MakeRaw put the terminal connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be // restored. func MakeRaw(fd uintptr) (*State, error) { var oldState State if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { return nil, err } newState := oldState.termios newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) newState.Oflag &^= OPOST newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) newState.Cflag &^= (CSIZE | PARENB) newState.Cflag |= CS8 newState.Cc[syscall.VMIN] = 1 newState.Cc[syscall.VTIME] = 0 if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { return nil, err } return &oldState, nil } docker-1.10.3/pkg/term/termios_freebsd.go000066400000000000000000000032351267010174400203030ustar00rootroot00000000000000package term import ( "syscall" "unsafe" ) const ( getTermios = syscall.TIOCGETA setTermios = syscall.TIOCSETA ) // Termios magic numbers, passthrough to the ones defined in syscall. const ( IGNBRK = syscall.IGNBRK PARMRK = syscall.PARMRK INLCR = syscall.INLCR IGNCR = syscall.IGNCR ECHONL = syscall.ECHONL CSIZE = syscall.CSIZE ICRNL = syscall.ICRNL ISTRIP = syscall.ISTRIP PARENB = syscall.PARENB ECHO = syscall.ECHO ICANON = syscall.ICANON ISIG = syscall.ISIG IXON = syscall.IXON BRKINT = syscall.BRKINT INPCK = syscall.INPCK OPOST = syscall.OPOST CS8 = syscall.CS8 IEXTEN = syscall.IEXTEN ) // Termios is the Unix API for terminal I/O. type Termios struct { Iflag uint32 Oflag uint32 Cflag uint32 Lflag uint32 Cc [20]byte Ispeed uint32 Ospeed uint32 } // MakeRaw put the terminal connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be // restored. func MakeRaw(fd uintptr) (*State, error) { var oldState State if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { return nil, err } newState := oldState.termios newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) newState.Oflag &^= OPOST newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) newState.Cflag &^= (CSIZE | PARENB) newState.Cflag |= CS8 newState.Cc[syscall.VMIN] = 1 newState.Cc[syscall.VTIME] = 0 if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { return nil, err } return &oldState, nil } docker-1.10.3/pkg/term/termios_linux.go000066400000000000000000000023341267010174400200270ustar00rootroot00000000000000// +build !cgo package term import ( "syscall" "unsafe" ) const ( getTermios = syscall.TCGETS setTermios = syscall.TCSETS ) // Termios is the Unix API for terminal I/O. type Termios struct { Iflag uint32 Oflag uint32 Cflag uint32 Lflag uint32 Cc [20]byte Ispeed uint32 Ospeed uint32 } // MakeRaw put the terminal connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be // restored. func MakeRaw(fd uintptr) (*State, error) { var oldState State if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { return nil, err } newState := oldState.termios newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON) newState.Oflag &^= syscall.OPOST newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) newState.Cflag |= syscall.CS8 if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { return nil, err } return &oldState, nil } docker-1.10.3/pkg/term/windows/000077500000000000000000000000001267010174400162675ustar00rootroot00000000000000docker-1.10.3/pkg/term/windows/ansi_reader.go000066400000000000000000000167541267010174400211070ustar00rootroot00000000000000// +build windows package windows import ( "bytes" "errors" "fmt" "os" "strings" "unsafe" ansiterm "github.com/Azure/go-ansiterm" "github.com/Azure/go-ansiterm/winterm" ) const ( escapeSequence = ansiterm.KEY_ESC_CSI ) // ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. type ansiReader struct { file *os.File fd uintptr buffer []byte cbBuffer int command []byte } func newAnsiReader(nFile int) *ansiReader { file, fd := winterm.GetStdFile(nFile) return &ansiReader{ file: file, fd: fd, command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), buffer: make([]byte, 0), } } // Close closes the wrapped file. func (ar *ansiReader) Close() (err error) { return ar.file.Close() } // Fd returns the file descriptor of the wrapped file. func (ar *ansiReader) Fd() uintptr { return ar.fd } // Read reads up to len(p) bytes of translated input events into p. func (ar *ansiReader) Read(p []byte) (int, error) { if len(p) == 0 { return 0, nil } // Previously read bytes exist, read as much as we can and return if len(ar.buffer) > 0 { logger.Debugf("Reading previously cached bytes") originalLength := len(ar.buffer) copiedLength := copy(p, ar.buffer) if copiedLength == originalLength { ar.buffer = make([]byte, 0, len(p)) } else { ar.buffer = ar.buffer[copiedLength:] } logger.Debugf("Read from cache p[%d]: % x", copiedLength, p) return copiedLength, nil } // Read and translate key events events, err := readInputEvents(ar.fd, len(p)) if err != nil { return 0, err } else if len(events) == 0 { logger.Debug("No input events detected") return 0, nil } keyBytes := translateKeyEvents(events, []byte(escapeSequence)) // Save excess bytes and right-size keyBytes if len(keyBytes) > len(p) { logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p)) ar.buffer = keyBytes[len(p):] keyBytes = keyBytes[:len(p)] } else if len(keyBytes) == 0 { logger.Debug("No key bytes returned from the translator") return 0, nil } copiedLength := copy(p, keyBytes) if copiedLength != len(keyBytes) { return 0, errors.New("Unexpected copy length encountered.") } logger.Debugf("Read p[%d]: % x", copiedLength, p) logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes) return copiedLength, nil } // readInputEvents polls until at least one event is available. func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) { // Determine the maximum number of records to retrieve // -- Cast around the type system to obtain the size of a single INPUT_RECORD. // unsafe.Sizeof requires an expression vs. a type-reference; the casting // tricks the type system into believing it has such an expression. recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) countRecords := maxBytes / recordSize if countRecords > ansiterm.MAX_INPUT_EVENTS { countRecords = ansiterm.MAX_INPUT_EVENTS } logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize) // Wait for and read input events events := make([]winterm.INPUT_RECORD, countRecords) nEvents := uint32(0) eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE) if err != nil { return nil, err } if eventsExist { err = winterm.ReadConsoleInput(fd, events, &nEvents) if err != nil { return nil, err } } // Return a slice restricted to the number of returned records logger.Debugf("[windows] readInputEvents: Read %v events", nEvents) return events[:nEvents], nil } // KeyEvent Translation Helpers var arrowKeyMapPrefix = map[winterm.WORD]string{ winterm.VK_UP: "%s%sA", winterm.VK_DOWN: "%s%sB", winterm.VK_RIGHT: "%s%sC", winterm.VK_LEFT: "%s%sD", } var keyMapPrefix = map[winterm.WORD]string{ winterm.VK_UP: "\x1B[%sA", winterm.VK_DOWN: "\x1B[%sB", winterm.VK_RIGHT: "\x1B[%sC", winterm.VK_LEFT: "\x1B[%sD", winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 winterm.VK_INSERT: "\x1B[2%s~", winterm.VK_DELETE: "\x1B[3%s~", winterm.VK_PRIOR: "\x1B[5%s~", winterm.VK_NEXT: "\x1B[6%s~", winterm.VK_F1: "", winterm.VK_F2: "", winterm.VK_F3: "\x1B[13%s~", winterm.VK_F4: "\x1B[14%s~", winterm.VK_F5: "\x1B[15%s~", winterm.VK_F6: "\x1B[17%s~", winterm.VK_F7: "\x1B[18%s~", winterm.VK_F8: "\x1B[19%s~", winterm.VK_F9: "\x1B[20%s~", winterm.VK_F10: "\x1B[21%s~", winterm.VK_F11: "\x1B[23%s~", winterm.VK_F12: "\x1B[24%s~", } // translateKeyEvents converts the input events into the appropriate ANSI string. func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { var buffer bytes.Buffer for _, event := range events { if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) } } return buffer.Bytes() } // keyToString maps the given input event record to the corresponding string. func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { if keyEvent.UnicodeChar == 0 { return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) } _, alt, control := getControlKeys(keyEvent.ControlKeyState) if control { // TODO(azlinux): Implement following control sequences // -D Signals the end of input from the keyboard; also exits current shell. // -H Deletes the first character to the left of the cursor. Also called the ERASE key. // -Q Restarts printing after it has been stopped with -s. // -S Suspends printing on the screen (does not stop the program). // -U Deletes all characters on the current line. Also called the KILL key. // -E Quits current command and creates a core } // +Key generates ESC N Key if !control && alt { return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) } return string(keyEvent.UnicodeChar) } // formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. func formatVirtualKey(key winterm.WORD, controlState winterm.DWORD, escapeSequence []byte) string { shift, alt, control := getControlKeys(controlState) modifier := getControlKeysModifier(shift, alt, control) if format, ok := arrowKeyMapPrefix[key]; ok { return fmt.Sprintf(format, escapeSequence, modifier) } if format, ok := keyMapPrefix[key]; ok { return fmt.Sprintf(format, modifier) } return "" } // getControlKeys extracts the shift, alt, and ctrl key states. func getControlKeys(controlState winterm.DWORD) (shift, alt, control bool) { shift = 0 != (controlState & winterm.SHIFT_PRESSED) alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) return shift, alt, control } // getControlKeysModifier returns the ANSI modifier for the given combination of control keys. func getControlKeysModifier(shift, alt, control bool) string { if shift && alt && control { return ansiterm.KEY_CONTROL_PARAM_8 } if alt && control { return ansiterm.KEY_CONTROL_PARAM_7 } if shift && control { return ansiterm.KEY_CONTROL_PARAM_6 } if control { return ansiterm.KEY_CONTROL_PARAM_5 } if shift && alt { return ansiterm.KEY_CONTROL_PARAM_4 } if alt { return ansiterm.KEY_CONTROL_PARAM_3 } if shift { return ansiterm.KEY_CONTROL_PARAM_2 } return "" } docker-1.10.3/pkg/term/windows/ansi_writer.go000066400000000000000000000033571267010174400211540ustar00rootroot00000000000000// +build windows package windows import ( "io/ioutil" "os" ansiterm "github.com/Azure/go-ansiterm" "github.com/Azure/go-ansiterm/winterm" "github.com/Sirupsen/logrus" ) var logger *logrus.Logger // ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. type ansiWriter struct { file *os.File fd uintptr infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO command []byte escapeSequence []byte inAnsiSequence bool parser *ansiterm.AnsiParser } func newAnsiWriter(nFile int) *ansiWriter { logFile := ioutil.Discard if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { logFile, _ = os.Create("ansiReaderWriter.log") } logger = &logrus.Logger{ Out: logFile, Formatter: new(logrus.TextFormatter), Level: logrus.DebugLevel, } file, fd := winterm.GetStdFile(nFile) info, err := winterm.GetConsoleScreenBufferInfo(fd) if err != nil { return nil } parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) logger.Infof("newAnsiWriter: parser %p", parser) aw := &ansiWriter{ file: file, fd: fd, infoReset: info, command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), escapeSequence: []byte(ansiterm.KEY_ESC_CSI), parser: parser, } logger.Infof("newAnsiWriter: aw.parser %p", aw.parser) logger.Infof("newAnsiWriter: %v", aw) return aw } func (aw *ansiWriter) Fd() uintptr { return aw.fd } // Write writes len(p) bytes from p to the underlying data stream. func (aw *ansiWriter) Write(p []byte) (total int, err error) { if len(p) == 0 { return 0, nil } logger.Infof("Write: % x", p) logger.Infof("Write: %s", string(p)) return aw.parser.Parse(p) } docker-1.10.3/pkg/term/windows/console.go000066400000000000000000000043371267010174400202670ustar00rootroot00000000000000// +build windows package windows import ( "io" "os" "syscall" "github.com/Azure/go-ansiterm/winterm" ansiterm "github.com/Azure/go-ansiterm" "github.com/Sirupsen/logrus" "io/ioutil" ) // ConEmuStreams returns prepared versions of console streams, // for proper use in ConEmu terminal. // The ConEmu terminal emulates ANSI on output streams well by default. func ConEmuStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { if IsConsole(os.Stdin.Fd()) { stdIn = newAnsiReader(syscall.STD_INPUT_HANDLE) } else { stdIn = os.Stdin } stdOut = os.Stdout stdErr = os.Stderr // WARNING (BEGIN): sourced from newAnsiWriter logFile := ioutil.Discard if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { logFile, _ = os.Create("ansiReaderWriter.log") } logger = &logrus.Logger{ Out: logFile, Formatter: new(logrus.TextFormatter), Level: logrus.DebugLevel, } // WARNING (END): sourced from newAnsiWriter return stdIn, stdOut, stdErr } // ConsoleStreams returns a wrapped version for each standard stream referencing a console, // that handles ANSI character sequences. func ConsoleStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { if IsConsole(os.Stdin.Fd()) { stdIn = newAnsiReader(syscall.STD_INPUT_HANDLE) } else { stdIn = os.Stdin } if IsConsole(os.Stdout.Fd()) { stdOut = newAnsiWriter(syscall.STD_OUTPUT_HANDLE) } else { stdOut = os.Stdout } if IsConsole(os.Stderr.Fd()) { stdErr = newAnsiWriter(syscall.STD_ERROR_HANDLE) } else { stdErr = os.Stderr } return stdIn, stdOut, stdErr } // GetHandleInfo returns file descriptor and bool indicating whether the file is a console. func GetHandleInfo(in interface{}) (uintptr, bool) { switch t := in.(type) { case *ansiReader: return t.Fd(), true case *ansiWriter: return t.Fd(), true } var inFd uintptr var isTerminal bool if file, ok := in.(*os.File); ok { inFd = file.Fd() isTerminal = IsConsole(inFd) } return inFd, isTerminal } // IsConsole returns true if the given file descriptor is a Windows Console. // The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. func IsConsole(fd uintptr) bool { _, e := winterm.GetConsoleMode(fd) return e == nil } docker-1.10.3/pkg/term/windows/windows.go000066400000000000000000000004641267010174400203140ustar00rootroot00000000000000// These files implement ANSI-aware input and output streams for use by the Docker Windows client. // When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create // and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. package windows docker-1.10.3/pkg/term/windows/windows_test.go000066400000000000000000000001051267010174400213430ustar00rootroot00000000000000// This file is necessary to pass the Docker tests. package windows docker-1.10.3/pkg/tlsconfig/000077500000000000000000000000001267010174400156165ustar00rootroot00000000000000docker-1.10.3/pkg/tlsconfig/config.go000066400000000000000000000110031267010174400174050ustar00rootroot00000000000000// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. // // As a reminder from https://golang.org/pkg/crypto/tls/#Config: // A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. // A Config may be reused; the tls package will also not modify it. package tlsconfig import ( "crypto/tls" "crypto/x509" "fmt" "io/ioutil" "os" "github.com/Sirupsen/logrus" ) // Options represents the information needed to create client and server TLS configurations. type Options struct { CAFile string // If either CertFile or KeyFile is empty, Client() will not load them // preventing the client from authenticating to the server. // However, Server() requires them and will error out if they are empty. CertFile string KeyFile string // client-only option InsecureSkipVerify bool // server-only option ClientAuth tls.ClientAuthType } // Extra (server-side) accepted CBC cipher suites - will phase out in the future var acceptedCBCCiphers = []uint16{ tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, tls.TLS_RSA_WITH_AES_256_CBC_SHA, tls.TLS_RSA_WITH_AES_128_CBC_SHA, } // Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) var clientCipherSuites = []uint16{ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, } // DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls // options struct but wants to use a commonly accepted set of TLS cipher suites, with // known weak algorithms removed. var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) // ServerDefault is a secure-enough TLS configuration for the server TLS configuration. var ServerDefault = tls.Config{ // Avoid fallback to SSL protocols < TLS1.0 MinVersion: tls.VersionTLS10, PreferServerCipherSuites: true, CipherSuites: DefaultServerAcceptedCiphers, } // ClientDefault is a secure-enough TLS configuration for the client TLS configuration. var ClientDefault = tls.Config{ // Prefer TLS1.2 as the client minimum MinVersion: tls.VersionTLS12, CipherSuites: clientCipherSuites, } // certPool returns an X.509 certificate pool from `caFile`, the certificate file. func certPool(caFile string) (*x509.CertPool, error) { // If we should verify the server, we need to load a trusted ca certPool := x509.NewCertPool() pem, err := ioutil.ReadFile(caFile) if err != nil { return nil, fmt.Errorf("Could not read CA certificate %q: %v", caFile, err) } if !certPool.AppendCertsFromPEM(pem) { return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) } s := certPool.Subjects() subjects := make([]string, len(s)) for i, subject := range s { subjects[i] = string(subject) } logrus.Debugf("Trusting certs with subjects: %v", subjects) return certPool, nil } // Client returns a TLS configuration meant to be used by a client. func Client(options Options) (*tls.Config, error) { tlsConfig := ClientDefault tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify if !options.InsecureSkipVerify { CAs, err := certPool(options.CAFile) if err != nil { return nil, err } tlsConfig.RootCAs = CAs } if options.CertFile != "" && options.KeyFile != "" { tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) if err != nil { return nil, fmt.Errorf("Could not load X509 key pair: %v. Make sure the key is not encrypted", err) } tlsConfig.Certificates = []tls.Certificate{tlsCert} } return &tlsConfig, nil } // Server returns a TLS configuration meant to be used by a server. func Server(options Options) (*tls.Config, error) { tlsConfig := ServerDefault tlsConfig.ClientAuth = options.ClientAuth tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) if err != nil { if os.IsNotExist(err) { return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) } return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) } tlsConfig.Certificates = []tls.Certificate{tlsCert} if options.ClientAuth >= tls.VerifyClientCertIfGiven { CAs, err := certPool(options.CAFile) if err != nil { return nil, err } tlsConfig.ClientCAs = CAs } return &tlsConfig, nil } docker-1.10.3/pkg/truncindex/000077500000000000000000000000001267010174400160115ustar00rootroot00000000000000docker-1.10.3/pkg/truncindex/truncindex.go000066400000000000000000000065771267010174400205420ustar00rootroot00000000000000// Package truncindex provides a general 'index tree', used by Docker // in order to be able to reference containers by only a few unambiguous // characters of their id. package truncindex import ( "errors" "fmt" "strings" "sync" "github.com/tchap/go-patricia/patricia" ) var ( // ErrEmptyPrefix is an error returned if the prefix was empty. ErrEmptyPrefix = errors.New("Prefix can't be empty") // ErrAmbiguousPrefix is returned if the prefix was ambiguous // (multiple ids for the prefix). ErrAmbiguousPrefix = errors.New("Multiple IDs found with provided prefix") // ErrIllegalChar is returned when a space is in the ID ErrIllegalChar = errors.New("illegal character: ' '") // ErrNotExist is returned when ID or its prefix not found in index. ErrNotExist = errors.New("ID does not exist") ) // TruncIndex allows the retrieval of string identifiers by any of their unique prefixes. // This is used to retrieve image and container IDs by more convenient shorthand prefixes. type TruncIndex struct { sync.RWMutex trie *patricia.Trie ids map[string]struct{} } // NewTruncIndex creates a new TruncIndex and initializes with a list of IDs. func NewTruncIndex(ids []string) (idx *TruncIndex) { idx = &TruncIndex{ ids: make(map[string]struct{}), // Change patricia max prefix per node length, // because our len(ID) always 64 trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)), } for _, id := range ids { idx.addID(id) } return } func (idx *TruncIndex) addID(id string) error { if strings.Contains(id, " ") { return ErrIllegalChar } if id == "" { return ErrEmptyPrefix } if _, exists := idx.ids[id]; exists { return fmt.Errorf("id already exists: '%s'", id) } idx.ids[id] = struct{}{} if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted { return fmt.Errorf("failed to insert id: %s", id) } return nil } // Add adds a new ID to the TruncIndex. func (idx *TruncIndex) Add(id string) error { idx.Lock() defer idx.Unlock() if err := idx.addID(id); err != nil { return err } return nil } // Delete removes an ID from the TruncIndex. If there are multiple IDs // with the given prefix, an error is thrown. func (idx *TruncIndex) Delete(id string) error { idx.Lock() defer idx.Unlock() if _, exists := idx.ids[id]; !exists || id == "" { return fmt.Errorf("no such id: '%s'", id) } delete(idx.ids, id) if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted { return fmt.Errorf("no such id: '%s'", id) } return nil } // Get retrieves an ID from the TruncIndex. If there are multiple IDs // with the given prefix, an error is thrown. func (idx *TruncIndex) Get(s string) (string, error) { if s == "" { return "", ErrEmptyPrefix } var ( id string ) subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error { if id != "" { // we haven't found the ID if there are two or more IDs id = "" return ErrAmbiguousPrefix } id = string(prefix) return nil } idx.RLock() defer idx.RUnlock() if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil { return "", err } if id != "" { return id, nil } return "", ErrNotExist } // Iterate iterates over all stored IDs, and passes each of them to the given handler. func (idx *TruncIndex) Iterate(handler func(id string)) { idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error { handler(string(prefix)) return nil }) } docker-1.10.3/pkg/truncindex/truncindex_test.go000066400000000000000000000241041267010174400215630ustar00rootroot00000000000000package truncindex import ( "math/rand" "testing" "github.com/docker/docker/pkg/stringid" ) // Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix. func TestTruncIndex(t *testing.T) { ids := []string{} index := NewTruncIndex(ids) // Get on an empty index if _, err := index.Get("foobar"); err == nil { t.Fatal("Get on an empty index should return an error") } // Spaces should be illegal in an id if err := index.Add("I have a space"); err == nil { t.Fatalf("Adding an id with ' ' should return an error") } id := "99b36c2c326ccc11e726eee6ee78a0baf166ef96" // Add an id if err := index.Add(id); err != nil { t.Fatal(err) } // Add an empty id (should fail) if err := index.Add(""); err == nil { t.Fatalf("Adding an empty id should return an error") } // Get a non-existing id assertIndexGet(t, index, "abracadabra", "", true) // Get an empty id assertIndexGet(t, index, "", "", true) // Get the exact id assertIndexGet(t, index, id, id, false) // The first letter should match assertIndexGet(t, index, id[:1], id, false) // The first half should match assertIndexGet(t, index, id[:len(id)/2], id, false) // The second half should NOT match assertIndexGet(t, index, id[len(id)/2:], "", true) id2 := id[:6] + "blabla" // Add an id if err := index.Add(id2); err != nil { t.Fatal(err) } // Both exact IDs should work assertIndexGet(t, index, id, id, false) assertIndexGet(t, index, id2, id2, false) // 6 characters or less should conflict assertIndexGet(t, index, id[:6], "", true) assertIndexGet(t, index, id[:4], "", true) assertIndexGet(t, index, id[:1], "", true) // An ambiguous id prefix should return an error if _, err := index.Get(id[:4]); err == nil { t.Fatal("An ambiguous id prefix should return an error") } // 7 characters should NOT conflict assertIndexGet(t, index, id[:7], id, false) assertIndexGet(t, index, id2[:7], id2, false) // Deleting a non-existing id should return an error if err := index.Delete("non-existing"); err == nil { t.Fatalf("Deleting a non-existing id should return an error") } // Deleting an empty id should return an error if err := index.Delete(""); err == nil { t.Fatal("Deleting an empty id should return an error") } // Deleting id2 should remove conflicts if err := index.Delete(id2); err != nil { t.Fatal(err) } // id2 should no longer work assertIndexGet(t, index, id2, "", true) assertIndexGet(t, index, id2[:7], "", true) assertIndexGet(t, index, id2[:11], "", true) // conflicts between id and id2 should be gone assertIndexGet(t, index, id[:6], id, false) assertIndexGet(t, index, id[:4], id, false) assertIndexGet(t, index, id[:1], id, false) // non-conflicting substrings should still not conflict assertIndexGet(t, index, id[:7], id, false) assertIndexGet(t, index, id[:15], id, false) assertIndexGet(t, index, id, id, false) assertIndexIterate(t) } func assertIndexIterate(t *testing.T) { ids := []string{ "19b36c2c326ccc11e726eee6ee78a0baf166ef96", "28b36c2c326ccc11e726eee6ee78a0baf166ef96", "37b36c2c326ccc11e726eee6ee78a0baf166ef96", "46b36c2c326ccc11e726eee6ee78a0baf166ef96", } index := NewTruncIndex(ids) index.Iterate(func(targetId string) { for _, id := range ids { if targetId == id { return } } t.Fatalf("An unknown ID '%s'", targetId) }) } func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult string, expectError bool) { if result, err := index.Get(input); err != nil && !expectError { t.Fatalf("Unexpected error getting '%s': %s", input, err) } else if err == nil && expectError { t.Fatalf("Getting '%s' should return an error, not '%s'", input, result) } else if result != expectedResult { t.Fatalf("Getting '%s' returned '%s' instead of '%s'", input, result, expectedResult) } } func BenchmarkTruncIndexAdd100(b *testing.B) { var testSet []string for i := 0; i < 100; i++ { testSet = append(testSet, stringid.GenerateNonCryptoID()) } b.ResetTimer() for i := 0; i < b.N; i++ { index := NewTruncIndex([]string{}) for _, id := range testSet { if err := index.Add(id); err != nil { b.Fatal(err) } } } } func BenchmarkTruncIndexAdd250(b *testing.B) { var testSet []string for i := 0; i < 250; i++ { testSet = append(testSet, stringid.GenerateNonCryptoID()) } b.ResetTimer() for i := 0; i < b.N; i++ { index := NewTruncIndex([]string{}) for _, id := range testSet { if err := index.Add(id); err != nil { b.Fatal(err) } } } } func BenchmarkTruncIndexAdd500(b *testing.B) { var testSet []string for i := 0; i < 500; i++ { testSet = append(testSet, stringid.GenerateNonCryptoID()) } b.ResetTimer() for i := 0; i < b.N; i++ { index := NewTruncIndex([]string{}) for _, id := range testSet { if err := index.Add(id); err != nil { b.Fatal(err) } } } } func BenchmarkTruncIndexGet100(b *testing.B) { var testSet []string var testKeys []string for i := 0; i < 100; i++ { testSet = append(testSet, stringid.GenerateNonCryptoID()) } index := NewTruncIndex([]string{}) for _, id := range testSet { if err := index.Add(id); err != nil { b.Fatal(err) } l := rand.Intn(12) + 12 testKeys = append(testKeys, id[:l]) } b.ResetTimer() for i := 0; i < b.N; i++ { for _, id := range testKeys { if res, err := index.Get(id); err != nil { b.Fatal(res, err) } } } } func BenchmarkTruncIndexGet250(b *testing.B) { var testSet []string var testKeys []string for i := 0; i < 250; i++ { testSet = append(testSet, stringid.GenerateNonCryptoID()) } index := NewTruncIndex([]string{}) for _, id := range testSet { if err := index.Add(id); err != nil { b.Fatal(err) } l := rand.Intn(12) + 12 testKeys = append(testKeys, id[:l]) } b.ResetTimer() for i := 0; i < b.N; i++ { for _, id := range testKeys { if res, err := index.Get(id); err != nil { b.Fatal(res, err) } } } } func BenchmarkTruncIndexGet500(b *testing.B) { var testSet []string var testKeys []string for i := 0; i < 500; i++ { testSet = append(testSet, stringid.GenerateNonCryptoID()) } index := NewTruncIndex([]string{}) for _, id := range testSet { if err := index.Add(id); err != nil { b.Fatal(err) } l := rand.Intn(12) + 12 testKeys = append(testKeys, id[:l]) } b.ResetTimer() for i := 0; i < b.N; i++ { for _, id := range testKeys { if res, err := index.Get(id); err != nil { b.Fatal(res, err) } } } } func BenchmarkTruncIndexDelete100(b *testing.B) { var testSet []string for i := 0; i < 100; i++ { testSet = append(testSet, stringid.GenerateNonCryptoID()) } b.ResetTimer() for i := 0; i < b.N; i++ { b.StopTimer() index := NewTruncIndex([]string{}) for _, id := range testSet { if err := index.Add(id); err != nil { b.Fatal(err) } } b.StartTimer() for _, id := range testSet { if err := index.Delete(id); err != nil { b.Fatal(err) } } } } func BenchmarkTruncIndexDelete250(b *testing.B) { var testSet []string for i := 0; i < 250; i++ { testSet = append(testSet, stringid.GenerateNonCryptoID()) } b.ResetTimer() for i := 0; i < b.N; i++ { b.StopTimer() index := NewTruncIndex([]string{}) for _, id := range testSet { if err := index.Add(id); err != nil { b.Fatal(err) } } b.StartTimer() for _, id := range testSet { if err := index.Delete(id); err != nil { b.Fatal(err) } } } } func BenchmarkTruncIndexDelete500(b *testing.B) { var testSet []string for i := 0; i < 500; i++ { testSet = append(testSet, stringid.GenerateNonCryptoID()) } b.ResetTimer() for i := 0; i < b.N; i++ { b.StopTimer() index := NewTruncIndex([]string{}) for _, id := range testSet { if err := index.Add(id); err != nil { b.Fatal(err) } } b.StartTimer() for _, id := range testSet { if err := index.Delete(id); err != nil { b.Fatal(err) } } } } func BenchmarkTruncIndexNew100(b *testing.B) { var testSet []string for i := 0; i < 100; i++ { testSet = append(testSet, stringid.GenerateNonCryptoID()) } b.ResetTimer() for i := 0; i < b.N; i++ { NewTruncIndex(testSet) } } func BenchmarkTruncIndexNew250(b *testing.B) { var testSet []string for i := 0; i < 250; i++ { testSet = append(testSet, stringid.GenerateNonCryptoID()) } b.ResetTimer() for i := 0; i < b.N; i++ { NewTruncIndex(testSet) } } func BenchmarkTruncIndexNew500(b *testing.B) { var testSet []string for i := 0; i < 500; i++ { testSet = append(testSet, stringid.GenerateNonCryptoID()) } b.ResetTimer() for i := 0; i < b.N; i++ { NewTruncIndex(testSet) } } func BenchmarkTruncIndexAddGet100(b *testing.B) { var testSet []string var testKeys []string for i := 0; i < 500; i++ { id := stringid.GenerateNonCryptoID() testSet = append(testSet, id) l := rand.Intn(12) + 12 testKeys = append(testKeys, id[:l]) } b.ResetTimer() for i := 0; i < b.N; i++ { index := NewTruncIndex([]string{}) for _, id := range testSet { if err := index.Add(id); err != nil { b.Fatal(err) } } for _, id := range testKeys { if res, err := index.Get(id); err != nil { b.Fatal(res, err) } } } } func BenchmarkTruncIndexAddGet250(b *testing.B) { var testSet []string var testKeys []string for i := 0; i < 500; i++ { id := stringid.GenerateNonCryptoID() testSet = append(testSet, id) l := rand.Intn(12) + 12 testKeys = append(testKeys, id[:l]) } b.ResetTimer() for i := 0; i < b.N; i++ { index := NewTruncIndex([]string{}) for _, id := range testSet { if err := index.Add(id); err != nil { b.Fatal(err) } } for _, id := range testKeys { if res, err := index.Get(id); err != nil { b.Fatal(res, err) } } } } func BenchmarkTruncIndexAddGet500(b *testing.B) { var testSet []string var testKeys []string for i := 0; i < 500; i++ { id := stringid.GenerateNonCryptoID() testSet = append(testSet, id) l := rand.Intn(12) + 12 testKeys = append(testKeys, id[:l]) } b.ResetTimer() for i := 0; i < b.N; i++ { index := NewTruncIndex([]string{}) for _, id := range testSet { if err := index.Add(id); err != nil { b.Fatal(err) } } for _, id := range testKeys { if res, err := index.Get(id); err != nil { b.Fatal(res, err) } } } } docker-1.10.3/pkg/urlutil/000077500000000000000000000000001267010174400153265ustar00rootroot00000000000000docker-1.10.3/pkg/urlutil/urlutil.go000066400000000000000000000025651267010174400173650ustar00rootroot00000000000000// Package urlutil provides helper function to check urls kind. // It supports http urls, git urls and transport url (tcp://, …) package urlutil import ( "regexp" "strings" ) var ( validPrefixes = map[string][]string{ "url": {"http://", "https://"}, "git": {"git://", "github.com/", "git@"}, "transport": {"tcp://", "tcp+tls://", "udp://", "unix://"}, } urlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$") ) // IsURL returns true if the provided str is an HTTP(S) URL. func IsURL(str string) bool { return checkURL(str, "url") } // IsGitURL returns true if the provided str is a git repository URL. func IsGitURL(str string) bool { if IsURL(str) && urlPathWithFragmentSuffix.MatchString(str) { return true } return checkURL(str, "git") } // IsGitTransport returns true if the provided str is a git transport by inspecting // the prefix of the string for known protocols used in git. func IsGitTransport(str string) bool { return IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") } // IsTransportURL returns true if the provided str is a transport (tcp, tcp+tls, udp, unix) URL. func IsTransportURL(str string) bool { return checkURL(str, "transport") } func checkURL(str, kind string) bool { for _, prefix := range validPrefixes[kind] { if strings.HasPrefix(str, prefix) { return true } } return false } docker-1.10.3/pkg/urlutil/urlutil_test.go000066400000000000000000000031011267010174400204070ustar00rootroot00000000000000package urlutil import "testing" var ( gitUrls = []string{ "git://github.com/docker/docker", "git@github.com:docker/docker.git", "git@bitbucket.org:atlassianlabs/atlassian-docker.git", "https://github.com/docker/docker.git", "http://github.com/docker/docker.git", "http://github.com/docker/docker.git#branch", "http://github.com/docker/docker.git#:dir", } incompleteGitUrls = []string{ "github.com/docker/docker", } invalidGitUrls = []string{ "http://github.com/docker/docker.git:#branch", } transportUrls = []string{ "tcp://example.com", "tcp+tls://example.com", "udp://example.com", "unix:///example", } ) func TestValidGitTransport(t *testing.T) { for _, url := range gitUrls { if IsGitTransport(url) == false { t.Fatalf("%q should be detected as valid Git prefix", url) } } for _, url := range incompleteGitUrls { if IsGitTransport(url) == true { t.Fatalf("%q should not be detected as valid Git prefix", url) } } } func TestIsGIT(t *testing.T) { for _, url := range gitUrls { if IsGitURL(url) == false { t.Fatalf("%q should be detected as valid Git url", url) } } for _, url := range incompleteGitUrls { if IsGitURL(url) == false { t.Fatalf("%q should be detected as valid Git url", url) } } for _, url := range invalidGitUrls { if IsGitURL(url) == true { t.Fatalf("%q should not be detected as valid Git prefix", url) } } } func TestIsTransport(t *testing.T) { for _, url := range transportUrls { if IsTransportURL(url) == false { t.Fatalf("%q should be detected as valid Transport url", url) } } } docker-1.10.3/pkg/useragent/000077500000000000000000000000001267010174400156235ustar00rootroot00000000000000docker-1.10.3/pkg/useragent/README.md000066400000000000000000000001441267010174400171010ustar00rootroot00000000000000This package provides helper functions to pack version information into a single User-Agent header. docker-1.10.3/pkg/useragent/useragent.go000066400000000000000000000026101267010174400201460ustar00rootroot00000000000000// Package useragent provides helper functions to pack // version information into a single User-Agent header. package useragent import ( "strings" ) // VersionInfo is used to model UserAgent versions. type VersionInfo struct { Name string Version string } func (vi *VersionInfo) isValid() bool { const stopChars = " \t\r\n/" name := vi.Name vers := vi.Version if len(name) == 0 || strings.ContainsAny(name, stopChars) { return false } if len(vers) == 0 || strings.ContainsAny(vers, stopChars) { return false } return true } // AppendVersions converts versions to a string and appends the string to the string base. // // Each VersionInfo will be converted to a string in the format of // "product/version", where the "product" is get from the name field, while // version is get from the version field. Several pieces of version information // will be concatenated and separated by space. // // Example: // AppendVersions("base", VersionInfo{"foo", "1.0"}, VersionInfo{"bar", "2.0"}) // results in "base foo/1.0 bar/2.0". func AppendVersions(base string, versions ...VersionInfo) string { if len(versions) == 0 { return base } verstrs := make([]string, 0, 1+len(versions)) if len(base) > 0 { verstrs = append(verstrs, base) } for _, v := range versions { if !v.isValid() { continue } verstrs = append(verstrs, v.Name+"/"+v.Version) } return strings.Join(verstrs, " ") } docker-1.10.3/pkg/useragent/useragent_test.go000066400000000000000000000012041267010174400212030ustar00rootroot00000000000000package useragent import "testing" func TestVersionInfo(t *testing.T) { vi := VersionInfo{"foo", "bar"} if !vi.isValid() { t.Fatalf("VersionInfo should be valid") } vi = VersionInfo{"", "bar"} if vi.isValid() { t.Fatalf("Expected VersionInfo to be invalid") } vi = VersionInfo{"foo", ""} if vi.isValid() { t.Fatalf("Expected VersionInfo to be invalid") } } func TestAppendVersions(t *testing.T) { vis := []VersionInfo{ {"foo", "1.0"}, {"bar", "0.1"}, {"pi", "3.1.4"}, } v := AppendVersions("base", vis...) expect := "base foo/1.0 bar/0.1 pi/3.1.4" if v != expect { t.Fatalf("expected %q, got %q", expect, v) } } docker-1.10.3/pkg/version/000077500000000000000000000000001267010174400153135ustar00rootroot00000000000000docker-1.10.3/pkg/version/version.go000066400000000000000000000027301267010174400173310ustar00rootroot00000000000000package version import ( "strconv" "strings" ) // Version provides utility methods for comparing versions. type Version string func (v Version) compareTo(other Version) int { var ( currTab = strings.Split(string(v), ".") otherTab = strings.Split(string(other), ".") ) max := len(currTab) if len(otherTab) > max { max = len(otherTab) } for i := 0; i < max; i++ { var currInt, otherInt int if len(currTab) > i { currInt, _ = strconv.Atoi(currTab[i]) } if len(otherTab) > i { otherInt, _ = strconv.Atoi(otherTab[i]) } if currInt > otherInt { return 1 } if otherInt > currInt { return -1 } } return 0 } // String returns the version string func (v Version) String() string { return string(v) } // LessThan checks if a version is less than another func (v Version) LessThan(other Version) bool { return v.compareTo(other) == -1 } // LessThanOrEqualTo checks if a version is less than or equal to another func (v Version) LessThanOrEqualTo(other Version) bool { return v.compareTo(other) <= 0 } // GreaterThan checks if a version is greater than another func (v Version) GreaterThan(other Version) bool { return v.compareTo(other) == 1 } // GreaterThanOrEqualTo checks if a version is greater than or equal to another func (v Version) GreaterThanOrEqualTo(other Version) bool { return v.compareTo(other) >= 0 } // Equal checks if a version is equal to another func (v Version) Equal(other Version) bool { return v.compareTo(other) == 0 } docker-1.10.3/pkg/version/version_test.go000066400000000000000000000013641267010174400203720ustar00rootroot00000000000000package version import ( "testing" ) func assertVersion(t *testing.T, a, b string, result int) { if r := Version(a).compareTo(Version(b)); r != result { t.Fatalf("Unexpected version comparison result. Found %d, expected %d", r, result) } } func TestCompareVersion(t *testing.T) { assertVersion(t, "1.12", "1.12", 0) assertVersion(t, "1.0.0", "1", 0) assertVersion(t, "1", "1.0.0", 0) assertVersion(t, "1.05.00.0156", "1.0.221.9289", 1) assertVersion(t, "1", "1.0.1", -1) assertVersion(t, "1.0.1", "1", 1) assertVersion(t, "1.0.1", "1.0.2", -1) assertVersion(t, "1.0.2", "1.0.3", -1) assertVersion(t, "1.0.3", "1.1", -1) assertVersion(t, "1.1", "1.1.1", -1) assertVersion(t, "1.1.1", "1.1.2", -1) assertVersion(t, "1.1.2", "1.2", -1) } docker-1.10.3/project/000077500000000000000000000000001267010174400145135ustar00rootroot00000000000000docker-1.10.3/project/ARM.md000066400000000000000000000022361267010174400154570ustar00rootroot00000000000000# ARM support The ARM support should be considered experimental. It will be extended step by step in the coming weeks. Building a Docker Development Image works in the same fashion as for Intel platform (x86-64). Currently we have initial support for 32bit ARMv7 devices. To work with the Docker Development Image you have to clone the Docker/Docker repo on a supported device. It needs to have a Docker Engine installed to build the Docker Development Image. From the root of the Docker/Docker repo one can use make to execute the following make targets: - make validate - make binary - make build - make bundles - make default - make shell - make The Makefile does include logic to determine on which OS and architecture the Docker Development Image is built. Based on OS and architecture it chooses the correct Dockerfile. For the ARM 32bit architecture it uses `Dockerfile.arm`. So for example in order to build a Docker binary one has to 1. clone the Docker/Docker repository on an ARM device `git clone git@github.com:docker/docker.git` 2. change into the checked out repository with `cd docker` 3. execute `make binary` to create a Docker Engine binary for ARM docker-1.10.3/project/BRANCHES-AND-TAGS.md000066400000000000000000000025541267010174400173640ustar00rootroot00000000000000Branches and tags ================= Note: details of the release process for the Engine are documented in the [RELEASE-CHECKLIST](https://github.com/docker/docker/blob/master/project/RELEASE-CHECKLIST.md). # Branches The docker/docker repository should normally have only three living branches at all time, including the regular `master` branch: ## `docs` branch The `docs` branch supports documentation updates between product releases. This branch allow us to decouple documentation releases from product releases. ## `release` branch The `release` branch contains the last _released_ version of the code for the project. The `release` branch is only updated at each public release of the project. The mechanism for this is that the release is materialized by a pull request against the `release` branch which lives for the duration of the code freeze period. When this pull request is merged, the `release` branch gets updated, and its new state is tagged accordingly. # Tags Any public release of a compiled binary, with the logical exception of nightly builds, should have a corresponding tag in the repository. The general format of a tag is `vX.Y.Z[-suffix[N]]`: - All of `X`, `Y`, `Z` must be specified (example: `v1.0.0`) - First release candidate for version `1.8.0` should be tagged `v1.8.0-rc1` - Second alpha release of a product should be tagged `v1.0.0-alpha1` docker-1.10.3/project/CONTRIBUTORS.md000077700000000000000000000000001267010174400214312../CONTRIBUTING.mdustar00rootroot00000000000000docker-1.10.3/project/GOVERNANCE.md000066400000000000000000000014301267010174400164620ustar00rootroot00000000000000# Docker Governance Advisory Board Meetings In the spirit of openness, Docker created a Governance Advisory Board, and committed to make all materials and notes from the meetings of this group public. All output from the meetings should be considered proposals only, and are subject to the review and approval of the community and the project leadership. The materials from the first Docker Governance Advisory Board meeting, held on October 28, 2014, are available at [Google Docs Folder](https://goo.gl/Alfj8r) These include: * First Meeting Notes * DGAB Charter * Presentation 1: Introductory Presentation, including State of The Project * Presentation 2: Overall Contribution Structure/Docker Project Core Proposal * Presentation 3: Long Term Roadmap/Statement of Direction docker-1.10.3/project/IRC-ADMINISTRATION.md000066400000000000000000000033511267010174400175770ustar00rootroot00000000000000# Freenode IRC Administration Guidelines and Tips This is not meant to be a general "Here's how to IRC" document, so if you're looking for that, check Google instead. ♥ If you've been charged with helping maintain one of Docker's now many IRC channels, this might turn out to be useful. If there's information that you wish you'd known about how a particular channel is organized, you should add deets here! :) ## `ChanServ` Most channel maintenance happens by talking to Freenode's `ChanServ` bot. For example, `/msg ChanServ ACCESS LIST` will show you a list of everyone with "access" privileges for a particular channel. A similar command is used to give someone a particular access level. For example, to add a new maintainer to the `#docker-maintainers` access list so that they can contribute to the discussions (after they've been merged appropriately in a `MAINTAINERS` file, of course), one would use `/msg ChanServ ACCESS #docker-maintainers ADD maintainer`. To setup a new channel with a similar `maintainer` access template, use a command like `/msg ChanServ TEMPLATE maintainer +AV` (`+A` for letting them view the `ACCESS LIST`, `+V` for auto-voice; see `/msg ChanServ HELP FLAGS` for more details). ## Troubleshooting The most common cause of not-getting-auto-`+v` woes is people not being `IDENTIFY`ed with `NickServ` (or their current nickname not being `GROUP`ed with their main nickname) -- often manifested by `ChanServ` responding to an `ACCESS ADD` request with something like `xyz is not registered.`. This is easily fixed by doing `/msg NickServ IDENTIFY OldNick SecretPassword` followed by `/msg NickServ GROUP` to group the two nicknames together. See `/msg NickServ HELP GROUP` for more information. docker-1.10.3/project/ISSUE-TRIAGE.md000066400000000000000000000141761267010174400167070ustar00rootroot00000000000000Triaging of issues ------------------ Triage provides an important way to contribute to an open source project. Triage helps ensure issues resolve quickly by: - Describing the issue's intent and purpose is conveyed precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences an problem and what actions they took. - Giving a contributor the information they need before they commit to resolving an issue. - Lowering the issue count by preventing duplicate issues. - Streamlining the development process by preventing duplicate discussions. If you don't have time to code, consider helping with triage. The community will thank you for saving them time by spending some of yours. ### 1. Ensure the issue contains basic information Before triaging an issue very far, make sure that the issue's author provided the standard issue information. This will help you make an educated recommendation on how this to categorize the issue. Standard information that *must* be included in most issues are things such as: - the output of `docker version` - the output of `docker info` - the output of `uname -a` - a reproducible case if this is a bug, Dockerfiles FTW - host distribution and version ( ubuntu 14.04, RHEL, fedora 23 ) - page URL if this is a docs issue or the name of a man page Depending on the issue, you might not feel all this information is needed. Use your best judgement. If you cannot triage an issue using what its author provided, explain kindly to the author that they must provide the above information to clarify the problem. If the author provides the standard information but you are still unable to triage the issue, request additional information. Do this kindly and politely because you are asking for more of the author's time. If the author does not respond requested information within the timespan of a week, close the issue with a kind note stating that the author can request for the issue to be reopened when the necessary information is provided. ### 2. Classify the Issue An issue can have multiple of the following labels. #### Issue kind | Kind | Description | |------------------|---------------------------------------------------------------------------------------------------------------------------------| | kind/bug | Bugs are bugs. The cause may or may not be known at triage time so debugging should be taken account into the time estimate. | | kind/docs | Writing documentation, man pages, articles, blogs, or other significant word-driven task. | | kind/enhancement | Enhancement are not bugs or new features but can drastically improve usability or performance of a project component. | | kind/feature | Functionality or other elements that the project does not currently support. Features are new and shinny. | | kind/question | Contains a user or contributor question requiring a response. | #### Functional area | Area | |---------------------------| | area/api | | area/builder | | area/cli | | area/kernel | | area/runtime | | area/storage | | area/storage/aufs | | area/storage/btrfs | | area/storage/devicemapper | | area/storage/overlay | | area/storage/zfs | #### Experience level Experience level is a way for a contributor to find an issue based on their skill set. Experience types are applied to the issue or pull request using labels. | Level | Experience level guideline | |------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | exp/beginner | New to Docker, and possibly Golang, and is looking to help while learning the basics. | | exp/intermediate | Comfortable with golang and understands the core concepts of Docker and looking to dive deeper into the project. | | exp/expert | Proficient with Docker and Golang and has been following, and active in, the community to understand the rationale behind design decisions and where the project is headed. | As the table states, these labels are meant as guidelines. You might have written a whole plugin for Docker in a personal project and never contributed to Docker. With that kind of experience, you could take on an exp/expert level task. ### 3. Prioritizing issue When attached to a specific milestone, an issue can be attributed one of the following labels to indicate their degree of priority (from more urgent to less urgent). | Priority | Description | |-------------|-----------------------------------------------------------------------------------------------------------------------------------| | priority/P0 | Urgent: Security, critical bugs, blocking issues. P0 basically means drop everything you are doing until this issue is addressed. | | priority/P1 | Important: P1 issues are a top priority and a must-have for the next release. | | priority/P2 | Normal priority: default priority applied. | | priority/P3 | Best effort: those are nice to have / minor issues. | And that's it. That should be all the information required for a new or existing contributor to come in an resolve an issue. docker-1.10.3/project/PACKAGERS.md000066400000000000000000000327751267010174400163530ustar00rootroot00000000000000# Dear Packager, If you are looking to make Docker available on your favorite software distribution, this document is for you. It summarizes the requirements for building and running the Docker client and the Docker daemon. ## Getting Started We want to help you package Docker successfully. Before doing any packaging, a good first step is to introduce yourself on the [docker-dev mailing list](https://groups.google.com/d/forum/docker-dev), explain what you're trying to achieve, and tell us how we can help. Don't worry, we don't bite! There might even be someone already working on packaging for the same distro! You can also join the IRC channel - #docker and #docker-dev on Freenode are both active and friendly. We like to refer to Tianon ("@tianon" on GitHub and "tianon" on IRC) as our "Packagers Relations", since he's always working to make sure our packagers have a good, healthy upstream to work with (both in our communication and in our build scripts). If you're having any kind of trouble, feel free to ping him directly. He also likes to keep track of what distributions we have packagers for, so feel free to reach out to him even just to say "Hi!" ## Package Name If possible, your package should be called "docker". If that name is already taken, a second choice is "docker-engine". Another possible choice is "docker.io". ## Official Build vs Distro Build The Docker project maintains its own build and release toolchain. It is pretty neat and entirely based on Docker (surprise!). This toolchain is the canonical way to build Docker. We encourage you to give it a try, and if the circumstances allow you to use it, we recommend that you do. You might not be able to use the official build toolchain - usually because your distribution has a toolchain and packaging policy of its own. We get it! Your house, your rules. The rest of this document should give you the information you need to package Docker your way, without denaturing it in the process. ## Build Dependencies To build Docker, you will need the following: * A recent version of Git and Mercurial * Go version 1.4 or later (Go version 1.5 or later required for hardware signing support in Docker Content Trust) * A clean checkout of the source added to a valid [Go workspace](https://golang.org/doc/code.html#Workspaces) under the path *src/github.com/docker/docker* (unless you plan to use `AUTO_GOPATH`, explained in more detail below) To build the Docker daemon, you will additionally need: * An amd64/x86_64 machine running Linux * SQLite version 3.7.9 or later * libdevmapper version 1.02.68-cvs (2012-01-26) or later from lvm2 version 2.02.89 or later * btrfs-progs version 3.16.1 or later (unless using an older version is absolutely necessary, in which case 3.8 is the minimum) * libseccomp version 2.2.1 or later (for build tag seccomp) * yubico-piv-tool version 1.1.0 or later (for experimental) Be sure to also check out Docker's Dockerfile for the most up-to-date list of these build-time dependencies. ### Go Dependencies All Go dependencies are vendored under "./vendor". They are used by the official build, so the source of truth for the current version of each dependency is whatever is in "./vendor". To use the vendored dependencies, simply make sure the path to "./vendor" is included in `GOPATH` (or use `AUTO_GOPATH`, as explained below). If you would rather (or must, due to distro policy) package these dependencies yourself, take a look at "./hack/vendor.sh" for an easy-to-parse list of the exact version for each. NOTE: if you're not able to package the exact version (to the exact commit) of a given dependency, please get in touch so we can remediate! Who knows what discrepancies can be caused by even the slightest deviation. We promise to do our best to make everybody happy. ## Stripping Binaries Please, please, please do not strip any compiled binaries. This is really important. In our own testing, stripping the resulting binaries sometimes results in a binary that appears to work, but more often causes random panics, segfaults, and other issues. Even if the binary appears to work, please don't strip. See the following quotes from Dave Cheney, which explain this position better from the upstream Golang perspective. ### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3) > Super super important: Do not strip go binaries or archives. It isn't tested, > often breaks, and doesn't work. ### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8) > To quote myself: "Please do not strip Go binaries, it is not supported, not > tested, is often broken, and doesn't do what you want" > > To unpack that a bit > > * not supported, as in, we don't support it, and recommend against it when > asked > * not tested, we don't test stripped binaries as part of the build CI process > * is often broken, stripping a go binary will produce anywhere from no, to > subtle, to outright execution failure, see above ### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13) > To clarify my previous statements. > > * I do not disagree with the debian policy, it is there for a good reason > * Having said that, it stripping Go binaries doesn't work, and nobody is > looking at making it work, so there is that. > > Thanks for patching the build formula. ## Building Docker Please use our build script ("./hack/make.sh") for all your compilation of Docker. If there's something you need that it isn't doing, or something it could be doing to make your life as a packager easier, please get in touch with Tianon and help us rectify the situation. Chances are good that other packagers have probably run into the same problems and a fix might already be in the works, but none of us will know for sure unless you harass Tianon about it. :) All the commands listed within this section should be run with the Docker source checkout as the current working directory. ### `AUTO_GOPATH` If you'd rather not be bothered with the hassles that setting up `GOPATH` appropriately can be, and prefer to just get a "build that works", you should add something similar to this to whatever script or process you're using to build Docker: ```bash export AUTO_GOPATH=1 ``` This will cause the build scripts to set up a reasonable `GOPATH` that automatically and properly includes both docker/docker from the local directory, and the local "./vendor" directory as necessary. ### `DOCKER_BUILDTAGS` If you're building a binary that may need to be used on platforms that include AppArmor, you will need to set `DOCKER_BUILDTAGS` as follows: ```bash export DOCKER_BUILDTAGS='apparmor' ``` If you're building a binary that may need to be used on platforms that include SELinux, you will need to use the `selinux` build tag: ```bash export DOCKER_BUILDTAGS='selinux' ``` There are build tags for disabling graphdrivers as well. By default, support for all graphdrivers are built in. To disable btrfs: ```bash export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs' ``` To disable devicemapper: ```bash export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper' ``` To disable aufs: ```bash export DOCKER_BUILDTAGS='exclude_graphdriver_aufs' ``` NOTE: if you need to set more than one build tag, space separate them: ```bash export DOCKER_BUILDTAGS='apparmor selinux exclude_graphdriver_aufs' ``` ### Static Daemon If it is feasible within the constraints of your distribution, you should seriously consider packaging Docker as a single static binary. A good comparison is Busybox, which is often packaged statically as a feature to enable mass portability. Because of the unique way Docker operates, being similarly static is a "feature". To build a static Docker daemon binary, run the following command (first ensuring that all the necessary libraries are available in static form for linking - see the "Build Dependencies" section above, and the relevant lines within Docker's own Dockerfile that set up our official build environment): ```bash ./hack/make.sh binary ``` This will create a static binary under "./bundles/$VERSION/binary/docker-$VERSION", where "$VERSION" is the contents of the file "./VERSION". This binary is usually installed somewhere like "/usr/bin/docker". ### Dynamic Daemon / Client-only Binary If you are only interested in a Docker client binary, set `DOCKER_CLIENTONLY` to a non-empty value using something similar to the following: (which will prevent the extra step of compiling dockerinit) ```bash export DOCKER_CLIENTONLY=1 ``` If you need to (due to distro policy, distro library availability, or for other reasons) create a dynamically compiled daemon binary, or if you are only interested in creating a client binary for Docker, use something similar to the following: ```bash ./hack/make.sh dynbinary ``` This will create "./bundles/$VERSION/dynbinary/docker-$VERSION", which for client-only builds is the important file to grab and install as appropriate. For daemon builds, you will also need to grab and install "./bundles/$VERSION/dynbinary/dockerinit-$VERSION", which is created from the minimal set of Docker's codebase that _must_ be compiled statically (and is thus a pure static binary). The acceptable locations Docker will search for this file are as follows (in order): * as "dockerinit" in the same directory as the daemon binary (ie, if docker is installed at "/usr/bin/docker", then "/usr/bin/dockerinit" will be the first place this file is searched for) * "/usr/libexec/docker/dockerinit" or "/usr/local/libexec/docker/dockerinit" ([FHS 3.0 Draft](https://www.linuxbase.org/betaspecs/fhs/fhs.html#usrlibexec)) * "/usr/lib/docker/dockerinit" or "/usr/local/lib/docker/dockerinit" ([FHS 2.3](https://refspecs.linuxfoundation.org/FHS_2.3/fhs-2.3.html#USRLIBLIBRARIESFORPROGRAMMINGANDPA)) If (and please, only if) one of the paths above is insufficient due to distro policy or similar issues, you may use the `DOCKER_INITPATH` environment variable at compile-time as follows to set a different path for Docker to search: ```bash export DOCKER_INITPATH=/usr/lib/docker.io/dockerinit ``` If you find yourself needing this, please don't hesitate to reach out to Tianon to see if it would be reasonable or helpful to add more paths to Docker's list, especially if there's a relevant standard worth referencing (such as the FHS). Also, it goes without saying, but for the purposes of the daemon please consider these two binaries ("docker" and "dockerinit") as if they were a single unit. Mixing and matching can cause undesired consequences, and will fail to run properly. ## System Dependencies ### Runtime Dependencies To function properly, the Docker daemon needs the following software to be installed and available at runtime: * iptables version 1.4 or later * procps (or similar provider of a "ps" executable) * e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, tune2fs) * xfsprogs (in use: mkfs.xfs) * XZ Utils version 4.9 or later * a [properly mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount) cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point [is](https://github.com/docker/docker/issues/2683) [not](https://github.com/docker/docker/issues/3485) [sufficient](https://github.com/docker/docker/issues/4568)) Additionally, the Docker client needs the following software to be installed and available at runtime: * Git version 1.7 or later ### Kernel Requirements The Docker daemon has very specific kernel requirements. Most pre-packaged kernels already include the necessary options enabled. If you are building your own kernel, you will either need to discover the options necessary via trial and error, or check out the [Gentoo ebuild](https://github.com/tianon/docker-overlay/blob/master/app-emulation/docker/docker-9999.ebuild), in which a list is maintained (and if there are any issues or discrepancies in that list, please contact Tianon so they can be rectified). Note that in client mode, there are no specific kernel requirements, and that the client will even run on alternative platforms such as Mac OS X / Darwin. ### Optional Dependencies Some of Docker's features are activated by using optional command-line flags or by having support for them in the kernel or userspace. A few examples include: * AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at least the "auplink" utility from aufs-tools) * BTRFS graph driver (requires BTRFS support enabled in the kernel) * ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module) * Libseccomp to allow running seccomp profiles with containers ## Daemon Init Script Docker expects to run as a daemon at machine startup. Your package will need to include a script for your distro's process supervisor of choice. Be sure to check out the "contrib/init" folder in case a suitable init script already exists (and if one does not, contact Tianon about whether it might be appropriate for your distro's init script to live there too!). In general, Docker should be run as root, similar to the following: ```bash docker daemon ``` Generally, a `DOCKER_OPTS` variable of some kind is available for adding more flags (such as changing the graph driver to use BTRFS, switching the location of "/var/lib/docker", etc). ## Communicate As a final note, please do feel free to reach out to Tianon at any time for pretty much anything. He really does love hearing from our packagers and wants to make sure we're not being a "hostile upstream". As should be a given, we appreciate the work our packagers do to make sure we have broad distribution! docker-1.10.3/project/PATCH-RELEASES.md000066400000000000000000000050311267010174400170740ustar00rootroot00000000000000# Docker patch (bugfix) release process Patch releases (the 'Z' in vX.Y.Z) are intended to fix major issues in a release. Docker open source projects follow these procedures when creating a patch release; After each release (both "major" (vX.Y.0) and "patch" releases (vX.Y.Z)), a patch release milestone (vX.Y.Z + 1) is created. The creation of a patch release milestone is no obligation to actually *create* a patch release. The purpose of these milestones is to collect issues and pull requests that can *justify* a patch release; - Any maintainer is allowed to add issues and PR's to the milestone, when doing so, preferably leave a comment on the issue or PR explaining *why* you think it should be considered for inclusion in a patch release. - Issues introduced in version vX.Y.0 get added to milestone X.Y.Z+1 - Only *regressions* should be added. Issues *discovered* in version vX.Y.0, but already present in version vX.Y-1.Z should not be added, unless critical. - Patch releases can *only* contain bug-fixes. New features should *never* be added to a patch release. The release captain of the "major" (X.Y.0) release, is also responsible for patch releases. The release captain, together with another maintainer, will review issues and PRs on the milestone, and assigns `priority/`labels. These review sessions take place on a weekly basis, more frequent if needed: - A P0 priority is assigned to critical issues. A maintainer *must* be assigned to these issues. Maintainers should strive to fix a P0 within a week. - A P1 priority is assigned to major issues, but not critical. A maintainer *must* be assigned to these issues. - P2 and P3 priorities are assigned to other issues. A maintainer can be assigned. - Non-critical issues and PR's can be removed from the milestone. Minor changes, such as typo-fixes or omissions in the documentation can be considered for inclusion in a patch release. ## Deciding if a patch release should be done - Only a P0 can justify to proceed with the patch release. - P1, P2, and P3 issues/PR's should not influence the decision, and should be moved to the X.Y.Z+1 milestone, or removed from the milestone. > **Note**: If the next "major" release is imminent, the release captain > can decide to cancel a patch release, and include the patches in the > upcoming major release. > **Note**: Security releases are also "patch releases", but follow > a different procedure. Security releases are developed in a private > repository, released and tested under embargo before they become > publicly available. docker-1.10.3/project/PRINCIPLES.md000066400000000000000000000021401267010174400165020ustar00rootroot00000000000000# Docker principles In the design and development of Docker we try to follow these principles: (Work in progress) * Don't try to replace every tool. Instead, be an ingredient to improve them. * Less code is better. * Fewer components are better. Do you really need to add one more class? * 50 lines of straightforward, readable code is better than 10 lines of magic that nobody can understand. * Don't do later what you can do now. "//FIXME: refactor" is not acceptable in new code. * When hesitating between 2 options, choose the one that is easier to reverse. * No is temporary, Yes is forever. If you're not sure about a new feature, say no. You can change your mind later. * Containers must be portable to the greatest possible number of machines. Be suspicious of any change which makes machines less interchangeable. * The less moving parts in a container, the better. * Don't merge it unless you document it. * Don't document it unless you can keep it up-to-date. * Don't merge it unless you test it! * Everyone's problem is slightly different. Focus on the part that is the same for everyone, and solve that. docker-1.10.3/project/README.md000066400000000000000000000015141267010174400157730ustar00rootroot00000000000000# Hacking on Docker The `project/` directory holds information and tools for everyone involved in the process of creating and distributing Docker, specifically: ## Guides If you're a *contributor* or aspiring contributor, you should read [CONTRIBUTORS.md](../CONTRIBUTING.md). If you're a *maintainer* or aspiring maintainer, you should read [MAINTAINERS](../MAINTAINERS). If you're a *packager* or aspiring packager, you should read [PACKAGERS.md](./PACKAGERS.md). If you're a maintainer in charge of a *release*, you should read [RELEASE-CHECKLIST.md](./RELEASE-CHECKLIST.md). ## Roadmap A high-level roadmap is available at [ROADMAP.md](../ROADMAP.md). ## Build tools [hack/make.sh](../hack/make.sh) is the primary build tool for docker. It is used for compiling the official binary, running the test suite, and pushing releases. docker-1.10.3/project/RELEASE-CHECKLIST.md000066400000000000000000000334211267010174400174270ustar00rootroot00000000000000# Release Checklist ## A maintainer's guide to releasing Docker So you're in charge of a Docker release? Cool. Here's what to do. If your experience deviates from this document, please document the changes to keep it up-to-date. It is important to note that this document assumes that the git remote in your repository that corresponds to "https://github.com/docker/docker" is named "origin". If yours is not (for example, if you've chosen to name it "upstream" or something similar instead), be sure to adjust the listed snippets for your local environment accordingly. If you are not sure what your upstream remote is named, use a command like `git remote -v` to find out. If you don't have an upstream remote, you can add one easily using something like: ```bash export GITHUBUSER="YOUR_GITHUB_USER" git remote add origin https://github.com/docker/docker.git git remote add $GITHUBUSER git@github.com:$GITHUBUSER/docker.git ``` ### 1. Pull from master and create a release branch All releases version numbers will be of the form: vX.Y.Z where X is the major version number, Y is the minor version number and Z is the patch release version number. #### Major releases The release branch name is just vX.Y because it's going to be the basis for all .Z releases. ```bash export BASE=vX.Y export VERSION=vX.Y.Z git fetch origin git checkout --track origin/master git checkout -b release/$BASE ``` This new branch is going to be the base for the release. We need to push it to origin so we can track the cherry-picked changes and the version bump: ```bash git push origin release/$BASE ``` When you have the major release branch in origin, we need to create the bump fork branch that we'll push to our fork: ```bash git checkout -b bump_$VERSION ``` #### Patch releases If we have the release branch in origin, we can create the forked bump branch from it directly: ```bash export VERSION=vX.Y.Z export PATCH=vX.Y.Z+1 git fetch origin git checkout --track origin/release/$BASE git checkout -b bump_$PATCH ``` We cherry-pick only the commits we want into the bump branch: ```bash # get the commits ids we want to cherry-pick git log # cherry-pick the commits starting from the oldest one, without including merge commits git cherry-pick git cherry-pick ... ``` ### 2. Bump the API version on master We don't want to stop contributions to master just because we are releasing. At the same time, now that the release branch exists, we don't want API changes to go to the now frozen API version. Create a new entry in `docs/reference/api/` by copying the latest and bumping the version number (in both the file's name and content), and submit this in a PR against master. ### 3. Update CHANGELOG.md You can run this command for reference with git 2.0: ```bash git fetch --tags LAST_VERSION=$(git tag -l --sort=-version:refname "v*" | grep -E 'v[0-9\.]+$' | head -1) git log --stat $LAST_VERSION..bump_$VERSION ``` If you don't have git 2.0 but have a sort command that supports `-V`: ```bash git fetch --tags LAST_VERSION=$(git tag -l | grep -E 'v[0-9\.]+$' | sort -rV | head -1) git log --stat $LAST_VERSION..bump_$VERSION ``` If releasing a major version (X or Y increased in vX.Y.Z), simply listing notable user-facing features is sufficient. ```markdown #### Notable features since * New docker command to do something useful * Remote API change (deprecating old version) * Performance improvements in some usecases * ... ``` For minor releases (only Z increases in vX.Y.Z), provide a list of user-facing changes. Each change should be listed under a category heading formatted as `#### CATEGORY`. `CATEGORY` should describe which part of the project is affected. Valid categories are: * Builder * Documentation * Hack * Packaging * Remote API * Runtime * Other (please use this category sparingly) Each change should be formatted as `BULLET DESCRIPTION`, given: * BULLET: either `-`, `+` or `*`, to indicate a bugfix, new feature or upgrade, respectively. * DESCRIPTION: a concise description of the change that is relevant to the end-user, using the present tense. Changes should be described in terms of how they affect the user, for example "Add new feature X which allows Y", "Fix bug which caused X", "Increase performance of Y". EXAMPLES: ```markdown ## 0.3.6 (1995-12-25) #### Builder + 'docker build -t FOO .' applies the tag FOO to the newly built image #### Remote API - Fix a bug in the optional unix socket transport #### Runtime * Improve detection of kernel version ``` If you need a list of contributors between the last major release and the current bump branch, use something like: ```bash git log --format='%aN <%aE>' v0.7.0...bump_v0.8.0 | sort -uf ``` Obviously, you'll need to adjust version numbers as necessary. If you just need a count, add a simple `| wc -l`. ### 4. Change the contents of the VERSION file Before the big thing, you'll want to make successive release candidates and get people to test. The release candidate number `N` should be part of the version: ```bash export RC_VERSION=${VERSION}-rcN echo ${RC_VERSION#v} > VERSION ``` ### 5. Test the docs Make sure that your tree includes documentation for any modified or new features, syntax or semantic changes. To test locally: ```bash make docs ``` To make a shared test at https://beta-docs.docker.io: (You will need the `awsconfig` file added to the `docs/` dir) ```bash make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release ``` ### 6. Commit and create a pull request to the "release" branch ```bash git add VERSION CHANGELOG.md git commit -m "Bump version to $VERSION" git push $GITHUBUSER bump_$VERSION echo "https://github.com/$GITHUBUSER/docker/compare/docker:release/$BASE...$GITHUBUSER:bump_$VERSION?expand=1" ``` That last command will give you the proper link to visit to ensure that you open the PR against the "release" branch instead of accidentally against "master" (like so many brave souls before you already have). ### 7. Build release candidate rpms and debs ```bash docker build -t docker . docker run \ --rm -t --privileged \ -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ docker \ hack/make.sh binary build-deb build-rpm ``` ### 8. Publish release candidate binaries To run this you will need access to the release credentials. Get them from the Core maintainers. Replace "..." with the respective credentials: ```bash docker build -t docker . docker run \ -e AWS_S3_BUCKET=test.docker.com \ # static binaries are still pushed to s3 -e AWS_ACCESS_KEY="..." \ -e AWS_SECRET_KEY="..." \ -i -t --privileged \ docker \ hack/release.sh ``` It will run the test suite, build the binaries and upload to the specified bucket, so this is a good time to verify that you're running against **test**.docker.com. After the binaries are uploaded to test.docker.com and the packages are on apt.dockerproject.org and yum.dockerproject.org, make sure they get tested in both Ubuntu and Debian for any obvious installation issues or runtime issues. If everything looks good, it's time to create a git tag for this candidate: ```bash git tag -a $RC_VERSION -m $RC_VERSION bump_$VERSION git push origin $RC_VERSION ``` Announcing on multiple medias is the best way to get some help testing! An easy way to get some useful links for sharing: ```bash echo "Ubuntu/Debian: curl -sSL https://test.docker.com/ | sh" echo "Linux 64bit binary: https://test.docker.com/builds/Linux/x86_64/docker-${VERSION#v}" echo "Darwin/OSX 64bit client binary: https://test.docker.com/builds/Darwin/x86_64/docker-${VERSION#v}" echo "Linux 64bit tgz: https://test.docker.com/builds/Linux/x86_64/docker-${VERSION#v}.tgz" echo "Windows 64bit client binary: https://test.docker.com/builds/Windows/x86_64/docker-${VERSION#v}.exe" echo "Windows 32bit client binary: https://test.docker.com/builds/Windows/i386/docker-${VERSION#v}.exe" ``` We recommend announcing the release candidate on: - IRC on #docker, #docker-dev, #docker-maintainers - In a comment on the pull request to notify subscribed people on GitHub - The [docker-dev](https://groups.google.com/forum/#!forum/docker-dev) group - The [docker-maintainers](https://groups.google.com/a/dockerproject.org/forum/#!forum/maintainers) group - Any social media that can bring some attention to the release candidate ### 9. Iterate on successive release candidates Spend several days along with the community explicitly investing time and resources to try and break Docker in every possible way, documenting any findings pertinent to the release. This time should be spent testing and finding ways in which the release might have caused various features or upgrade environments to have issues, not coding. During this time, the release is in code freeze, and any additional code changes will be pushed out to the next release. It should include various levels of breaking Docker, beyond just using Docker by the book. Any issues found may still remain issues for this release, but they should be documented and give appropriate warnings. During this phase, the `bump_$VERSION` branch will keep evolving as you will produce new release candidates. The frequency of new candidates is up to the release manager: use your best judgement taking into account the severity of reported issues, testers availability, and time to scheduled release date. Each time you'll want to produce a new release candidate, you will start by adding commits to the branch, usually by cherry-picking from master: ```bash git cherry-pick -x -m0 ``` You want your "bump commit" (the one that updates the CHANGELOG and VERSION files) to remain on top, so you'll have to `git rebase -i` to bring it back up. Now that your bump commit is back on top, you will need to update the CHANGELOG file (if appropriate for this particular release candidate), and update the VERSION file to increment the RC number: ```bash export RC_VERSION=$VERSION-rcN echo $RC_VERSION > VERSION ``` You can now amend your last commit and update the bump branch: ```bash git commit --amend git push -f $GITHUBUSER bump_$VERSION ``` Repeat step 6 to tag the code, publish new binaries, announce availability, and get help testing. ### 10. Finalize the bump branch When you're happy with the quality of a release candidate, you can move on and create the real thing. You will first have to amend the "bump commit" to drop the release candidate suffix in the VERSION file: ```bash echo $VERSION > VERSION git add VERSION git commit --amend ``` You will then repeat step 6 to publish the binaries to test ### 11. Get 2 other maintainers to validate the pull request ### 12. Build final rpms and debs ```bash docker build -t docker . docker run \ --rm -t --privileged \ -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ docker \ hack/make.sh binary build-deb build-rpm ``` ### 13. Publish final binaries Once they're tested and reasonably believed to be working, run against get.docker.com: ```bash docker build -t docker . docker run \ -e AWS_S3_BUCKET=get.docker.com \ # static binaries are still pushed to s3 -e AWS_ACCESS_KEY="..." \ -e AWS_SECRET_KEY="..." \ -i -t --privileged \ docker \ hack/release.sh ``` ### 14. Apply tag and create release It's very important that we don't make the tag until after the official release is uploaded to get.docker.com! ```bash git tag -a $VERSION -m $VERSION bump_$VERSION git push origin $VERSION ``` Once the tag is pushed, go to GitHub and create a [new release](https://github.com/docker/docker/releases/new). If the tag is for an RC make sure you check `This is a pre-release` at the bottom of the form. Select the tag that you just pushed as the version and paste the changelog in the description of the release. You can see examples in this two links: https://github.com/docker/docker/releases/tag/v1.8.0 https://github.com/docker/docker/releases/tag/v1.8.0-rc3 ### 15. Go to github to merge the `bump_$VERSION` branch into release Don't forget to push that pretty blue button to delete the leftover branch afterwards! ### 16. Update the docs branch You will need to point the docs branch to the newly created release tag: ```bash git checkout origin/docs git reset --hard origin/$VERSION git push -f origin docs ``` The docs will appear on https://docs.docker.com/ (though there may be cached versions, so its worth checking http://docs.docker.com.s3-website-us-east-1.amazonaws.com/). For more information about documentation releases, see `docs/README.md`. Note that the new docs will not appear live on the site until the cache (a complex, distributed CDN system) is flushed. The `make docs-release` command will do this _if_ the `DISTRIBUTION_ID` is set correctly - this will take at least 15 minutes to run and you can check its progress with the CDN Cloudfront Chrome addon. ### 17. Create a new pull request to merge your bump commit back into master ```bash git checkout master git fetch git reset --hard origin/master git cherry-pick $VERSION git push $GITHUBUSER merge_release_$VERSION echo "https://github.com/$GITHUBUSER/docker/compare/docker:master...$GITHUBUSER:merge_release_$VERSION?expand=1" ``` Again, get two maintainers to validate, then merge, then push that pretty blue button to delete your branch. ### 18. Update the VERSION files Now that version X.Y.Z is out, time to start working on the next! Update the content of the `VERSION` file to be the next minor (incrementing Y) and add the `-dev` suffix. For example, after 1.5.0 release, the `VERSION` file gets updated to `1.6.0-dev` (as in "1.6.0 in the making"). ### 19. Rejoice and Evangelize! Congratulations! You're done. Go forth and announce the glad tidings of the new release in `#docker`, `#docker-dev`, on the [dev mailing list](https://groups.google.com/forum/#!forum/docker-dev), the [announce mailing list](https://groups.google.com/forum/#!forum/docker-announce), and on Twitter! docker-1.10.3/project/RELEASE-PROCESS.md000066400000000000000000000071761267010174400172440ustar00rootroot00000000000000# Docker Release Process This document describes how the Docker project is released. The Docker project release process targets the Engine, Compose, Kitematic, Machine, Swarm, Distribution, Notary and their underlying dependencies (libnetwork, libkv, etc...). Step-by-step technical details of the process are described in [RELEASE-CHECKLIST.md](https://github.com/docker/docker/blob/master/project/RELEASE-CHECKLIST.md). ## Release cycle The Docker project follows a **time-based release cycle** and ships every nine weeks. A release cycle starts the same day the previous release cycle ends. The first six weeks of the cycle are dedicated to development and review. During this phase, new features and bugfixes submitted to any of the projects are **eligible** to be shipped as part of the next release. No changeset submitted during this period is however guaranteed to be merged for the current release cycle. ## The freeze period Six weeks after the beginning of the cycle, the codebase is officially frozen and the codebase reaches a state close to the final release. A Release Candidate (RC) gets created at the same time. The freeze period is used to find bugs and get feedback on the state of the RC before the release. During this freeze period, while the `master` branch will continue its normal development cycle, no new features are accepted into the RC. As bugs are fixed in `master` the release owner will selectively 'cherry-pick' critical ones to be included into the RC. As the RC changes, new ones are made available for the community to test and review. This period lasts for three weeks. ## How to maximize chances of being merged before the freeze date? First of all, there is never a guarantee that a specific changeset is going to be merged. However there are different actions to follow to maximize the chances for a changeset to be merged: - The team gives priority to review the PRs aligned with the Roadmap (usually defined by a ROADMAP.md file at the root of the repository). - The earlier a PR is opened, the more time the maintainers have to review. For example, if a PR is opened the day before the freeze date, it’s very unlikely that it will be merged for the release. - Constant communication with the maintainers (mailing-list, IRC, Github issues, etc.) allows to get early feedback on the design before getting into the implementation, which usually reduces the time needed to discuss a changeset. - If the code is commented, fully tested and by extension follows every single rules defined by the [CONTRIBUTING guide]( https://github.com/docker/docker/blob/master/CONTRIBUTING.md), this will help the maintainers by speeding up the review. ## The release At the end of the freeze (nine weeks after the start of the cycle), all the projects are released together. ``` Codebase Release Start of is frozen (end of the the Cycle (7th week) 9th week) +---------------------------------------+---------------------+ | | | | Development phase | Freeze phase | | | | +---------------------------------------+---------------------+ 6 weeks 3 weeks <---------------------------------------><--------------------> ``` ## Exceptions If a critical issue is found at the end of the freeze period and more time is needed to address it, the release will be pushed back. When a release gets pushed back, the next release cycle gets delayed as well. docker-1.10.3/project/REVIEWING.md000066400000000000000000000173321267010174400164020ustar00rootroot00000000000000Pull request reviewing process ============================== # Labels Labels are carefully picked to optimize for: - Readability: maintainers must immediately know the state of a PR - Filtering simplicity: different labels represent many different aspects of the reviewing work, and can even be targeted at different maintainers groups. A pull request should only be attributed labels documented in this section: other labels that may exist on the repository should apply to issues. ## DCO labels * `dco/no`: automatically set by a bot when one of the commits lacks proper signature ## Status labels * `status/0-triage` * `status/1-design-review` * `status/2-code-review` * `status/3-docs-review` * `status/4-ready-to-merge` Special status labels: * `status/needs-attention`: calls for a collective discussion during a review session ## Specialty group labels Those labels are used to raise awareness of a particular specialty group, either because we need help in reviewing the PR, or because of the potential impact of the PR on their work: * `group/distribution` * `group/networking` * `group/security` * `group/windows` ## Impact labels (apply to merged pull requests) * `impact/api` * `impact/changelog` * `impact/cli` * `impact/deprecation` * `impact/distribution` * `impact/dockerfile` # Workflow An opened pull request can be in 1 of 5 distinct states, for each of which there is a corresponding label that needs to be applied. ## Triage - `status/0-triage` Maintainers are expected to triage new incoming pull requests by removing the `status/0-triage` label and adding the correct labels (e.g. `status/1-design-review`) before any other interaction with the PR. The starting label may potentially skip some steps depending on the kind of pull request: use your best judgement. Maintainers should perform an initial, high-level, overview of the pull request before moving it to the next appropriate stage: - Has DCO - Contains sufficient justification (e.g., usecases) for the proposed change - References the Github issue it fixes (if any) in the commit or the first Github comment Possible transitions from this state: * Close: e.g., unresponsive contributor without DCO * `status/1-design-review`: general case * `status/2-code-review`: e.g. trivial bugfix * `status/3-docs-review`: non-proposal documentation-only change ## Design review - `status/1-design-review` Maintainers are expected to comment on the design of the pull request. Review of documentation is expected only in the context of design validation, not for stylistic changes. Ideally, documentation should reflect the expected behavior of the code. No code review should take place in this step. There are no strict rules on the way a design is validated: we usually aim for a consensus, although a single maintainer approval is often sufficient for obviously reasonable changes. In general, strong disagreement expressed by any of the maintainers should not be taken lightly. Once design is approved, a maintainer should make sure to remove this label and add the next one. Possible transitions from this state: * Close: design rejected * `status/2-code-review`: general case * `status/3-docs-review`: proposals with only documentation changes ## Code review - `status/2-code-review` Maintainers are expected to review the code and ensure that it is good quality and in accordance with the documentation in the PR. New testcases are expected to be added. Ideally, those testcases should fail when the new code is absent, and pass when present. The testcases should strive to test as many variants, code paths, as possible to ensure maximum coverage. Changes to code must be reviewed and approved (LGTM'd) by a minimum of two code maintainers. When the author of a PR is a maintainer, he still needs the approval of two other maintainers. Once code is approved according to the rules of the subsystem, a maintainer should make sure to remove this label and add the next one. If documentation is absent but expected, maintainers should ask for documentation and move to status `status/3-docs-review` for docs maintainer to follow. Possible transitions from this state: * Close * `status/1-design-review`: new design concerns are raised * `status/3-docs-review`: general case * `status/4-ready-to-merge`: change not impacting documentation ## Docs review - `status/3-docs-review` Maintainers are expected to review the documentation in its bigger context, ensuring consistency, completeness, validity, and breadth of coverage across all existing and new documentation. They should ask for any editorial change that makes the documentation more consistent and easier to understand. Changes and additions to docs must be reviewed and approved (LGTM'd) by a minimum of two docs sub-project maintainers. If the docs change originates with a docs maintainer, only one additional LGTM is required (since we assume a docs maintainer approves of their own PR). Once documentation is approved (see below), a maintainer should make sure to remove this label and add the next one. Possible transitions from this state: * Close * `status/1-design-review`: new design concerns are raised * `status/2-code-review`: requires more code changes * `status/4-ready-to-merge`: general case ## Merge - `status/4-ready-to-merge` Maintainers are expected to merge this pull request as soon as possible. They can ask for a rebase or carry the pull request themselves. Possible transitions from this state: * Merge: general case * Close: carry PR After merging a pull request, the maintainer should consider applying one or multiple impact labels to ease future classification: * `impact/api` signifies the patch impacted the remote API * `impact/changelog` signifies the change is significant enough to make it in the changelog * `impact/cli` signifies the patch impacted a CLI command * `impact/dockerfile` signifies the patch impacted the Dockerfile syntax * `impact/deprecation` signifies the patch participates in deprecating an existing feature ## Close If a pull request is closed it is expected that sufficient justification will be provided. In particular, if there are alternative ways of achieving the same net result then those needs to be spelled out. If the pull request is trying to solve a use case that is not one that we (as a community) want to support then a justification for why should be provided. The number of maintainers it takes to decide and close a PR is deliberately left unspecified. We assume that the group of maintainers is bound by mutual trust and respect, and that opposition from any single maintainer should be taken into consideration. Similarly, we expect maintainers to justify their reasoning and to accept debating. # Escalation process Despite the previously described reviewing process, some PR might not show any progress for various reasons: - No strong opinion for or against the proposed patch - Debates about the proper way to solve the problem at hand - Lack of consensus - ... All these will eventually lead to stalled PR, where no apparent progress is made across several weeks, or even months. Maintainers should use their best judgement and apply the `status/needs-attention` label. It must be used sparingly, as each PR with such label will be discussed by a group of maintainers during a review session. The goal of that session is to agree on one of the following outcomes for the PR: * Close, explaining the rationale for not pursuing further * Continue, either by pushing the PR further in the workflow, or by deciding to carry the patch (ideally, a maintainer should be immediately assigned to make sure that the PR keeps continued attention) * Escalate to Solomon by formulating a few specific questions on which his answers will allow maintainers to decide. docker-1.10.3/project/TOOLS.md000066400000000000000000000055231267010174400157420ustar00rootroot00000000000000# Tools This page describes the tools we use and infrastructure that is in place for the Docker project. ### CI The Docker project uses [Jenkins](https://jenkins.dockerproject.org/) as our continuous integration server. Each Pull Request to Docker is tested by running the equivalent of `make all`. We chose Jenkins because we can host it ourselves and we run Docker in Docker to test. #### Leeroy Leeroy is a Go application which integrates Jenkins with GitHub pull requests. Leeroy uses [GitHub hooks](https://developer.github.com/v3/repos/hooks/) to listen for pull request notifications and starts jobs on your Jenkins server. Using the Jenkins [notification plugin][jnp], Leeroy updates the pull request using GitHub's [status API](https://developer.github.com/v3/repos/statuses/) with pending, success, failure, or error statuses. The leeroy repository is maintained at [github.com/docker/leeroy](https://github.com/docker/leeroy). #### GordonTheTurtle IRC Bot The GordonTheTurtle IRC Bot lives in the [#docker-maintainers](https://botbot.me/freenode/docker-maintainers/) channel on Freenode. He is built in Go and is based off the project at [github.com/fabioxgn/go-bot](https://github.com/fabioxgn/go-bot). His main command is `!rebuild`, which rebuilds a given Pull Request for a repository. This command works by integrating with Leroy. He has a few other commands too, such as `!gif` or `!godoc`, but we are always looking for more fun commands to add. The gordon-bot repository is maintained at [github.com/jfrazelle/gordon-bot](https://github.com/jfrazelle/gordon-bot) ### NSQ We use [NSQ](https://github.com/bitly/nsq) for various aspects of the project infrastucture. #### Hooks The hooks project, [github.com/crosbymichael/hooks](https://github.com/crosbymichael/hooks), is a small Go application that manages web hooks from github, hub.docker.com, or other third party services. It can be used for listening to github webhooks & pushing them to a queue, archiving hooks to rethinkdb for processing, and broadcasting hooks to various jobs. #### Docker Master Binaries One of the things queued from the Hooks are the building of the Master Binaries. This happens on every push to the master branch of Docker. The repository for this is maintained at [github.com/jfrazelle/docker-bb](https://github.com/jfrazelle/docker-bb). #### Docker Master Docs The master build of the docs gets queued from the Hooks as well. They are built using [github.com/jfrazelle/nsqexec](https://github.com/jfrazelle/nsqexec). #### Patch Parser Bot The bot, also named GordonTheTurtle, that labels and comments on Pull Requests listens on Hooks as well. He is capable of knowing if a Pull Request needs to be signed, or gofmt'd, as well as rebased. The repository for this is maintained at [github.com/jfrazelle/gh-patch-parser](https://github.com/jfrazelle/gh-patch-parser). docker-1.10.3/reference/000077500000000000000000000000001267010174400150035ustar00rootroot00000000000000docker-1.10.3/reference/reference.go000066400000000000000000000133301267010174400172700ustar00rootroot00000000000000package reference import ( "errors" "fmt" "strings" "github.com/docker/distribution/digest" distreference "github.com/docker/distribution/reference" "github.com/docker/docker/image/v1" ) const ( // DefaultTag defines the default tag used when performing images related actions and no tag or digest is specified DefaultTag = "latest" // DefaultHostname is the default built-in hostname DefaultHostname = "docker.io" // LegacyDefaultHostname is automatically converted to DefaultHostname LegacyDefaultHostname = "index.docker.io" // DefaultRepoPrefix is the prefix used for default repositories in default host DefaultRepoPrefix = "library/" ) // Named is an object with a full name type Named interface { // Name returns normalized repository name, like "ubuntu". Name() string // String returns full reference, like "ubuntu@sha256:abcdef..." String() string // FullName returns full repository name with hostname, like "docker.io/library/ubuntu" FullName() string // Hostname returns hostname for the reference, like "docker.io" Hostname() string // RemoteName returns the repository component of the full name, like "library/ubuntu" RemoteName() string } // NamedTagged is an object including a name and tag. type NamedTagged interface { Named Tag() string } // Canonical reference is an object with a fully unique // name including a name with hostname and digest type Canonical interface { Named Digest() digest.Digest } // ParseNamed parses s and returns a syntactically valid reference implementing // the Named interface. The reference must have a name, otherwise an error is // returned. // If an error was encountered it is returned, along with a nil Reference. func ParseNamed(s string) (Named, error) { named, err := distreference.ParseNamed(s) if err != nil { return nil, fmt.Errorf("Error parsing reference: %q is not a valid repository/tag", s) } r, err := WithName(named.Name()) if err != nil { return nil, err } if canonical, isCanonical := named.(distreference.Canonical); isCanonical { return WithDigest(r, canonical.Digest()) } if tagged, isTagged := named.(distreference.NamedTagged); isTagged { return WithTag(r, tagged.Tag()) } return r, nil } // WithName returns a named object representing the given string. If the input // is invalid ErrReferenceInvalidFormat will be returned. func WithName(name string) (Named, error) { name, err := normalize(name) if err != nil { return nil, err } if err := validateName(name); err != nil { return nil, err } r, err := distreference.WithName(name) if err != nil { return nil, err } return &namedRef{r}, nil } // WithTag combines the name from "name" and the tag from "tag" to form a // reference incorporating both the name and the tag. func WithTag(name Named, tag string) (NamedTagged, error) { r, err := distreference.WithTag(name, tag) if err != nil { return nil, err } return &taggedRef{namedRef{r}}, nil } // WithDigest combines the name from "name" and the digest from "digest" to form // a reference incorporating both the name and the digest. func WithDigest(name Named, digest digest.Digest) (Canonical, error) { r, err := distreference.WithDigest(name, digest) if err != nil { return nil, err } return &canonicalRef{namedRef{r}}, nil } type namedRef struct { distreference.Named } type taggedRef struct { namedRef } type canonicalRef struct { namedRef } func (r *namedRef) FullName() string { hostname, remoteName := splitHostname(r.Name()) return hostname + "/" + remoteName } func (r *namedRef) Hostname() string { hostname, _ := splitHostname(r.Name()) return hostname } func (r *namedRef) RemoteName() string { _, remoteName := splitHostname(r.Name()) return remoteName } func (r *taggedRef) Tag() string { return r.namedRef.Named.(distreference.NamedTagged).Tag() } func (r *canonicalRef) Digest() digest.Digest { return r.namedRef.Named.(distreference.Canonical).Digest() } // WithDefaultTag adds a default tag to a reference if it only has a repo name. func WithDefaultTag(ref Named) Named { if IsNameOnly(ref) { ref, _ = WithTag(ref, DefaultTag) } return ref } // IsNameOnly returns true if reference only contains a repo name. func IsNameOnly(ref Named) bool { if _, ok := ref.(NamedTagged); ok { return false } if _, ok := ref.(Canonical); ok { return false } return true } // splitHostname splits a repository name to hostname and remotename string. // If no valid hostname is found, the default hostname is used. Repository name // needs to be already validated before. func splitHostname(name string) (hostname, remoteName string) { i := strings.IndexRune(name, '/') if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { hostname, remoteName = DefaultHostname, name } else { hostname, remoteName = name[:i], name[i+1:] } if hostname == LegacyDefaultHostname { hostname = DefaultHostname } if hostname == DefaultHostname && !strings.ContainsRune(remoteName, '/') { remoteName = DefaultRepoPrefix + remoteName } return } // normalize returns a repository name in its normalized form, meaning it // will not contain default hostname nor library/ prefix for official images. func normalize(name string) (string, error) { host, remoteName := splitHostname(name) if strings.ToLower(remoteName) != remoteName { return "", errors.New("invalid reference format: repository name must be lowercase") } if host == DefaultHostname { if strings.HasPrefix(remoteName, DefaultRepoPrefix) { return strings.TrimPrefix(remoteName, DefaultRepoPrefix), nil } return remoteName, nil } return name, nil } func validateName(name string) error { if err := v1.ValidateID(name); err == nil { return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name) } return nil } docker-1.10.3/reference/reference_test.go000066400000000000000000000173011267010174400203310ustar00rootroot00000000000000package reference import ( "testing" "github.com/docker/distribution/digest" ) func TestValidateReferenceName(t *testing.T) { validRepoNames := []string{ "docker/docker", "library/debian", "debian", "docker.io/docker/docker", "docker.io/library/debian", "docker.io/debian", "index.docker.io/docker/docker", "index.docker.io/library/debian", "index.docker.io/debian", "127.0.0.1:5000/docker/docker", "127.0.0.1:5000/library/debian", "127.0.0.1:5000/debian", "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", } invalidRepoNames := []string{ "https://github.com/docker/docker", "docker/Docker", "-docker", "-docker/docker", "-docker.io/docker/docker", "docker///docker", "docker.io/docker/Docker", "docker.io/docker///docker", "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", "docker.io/1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", } for _, name := range invalidRepoNames { _, err := ParseNamed(name) if err == nil { t.Fatalf("Expected invalid repo name for %q", name) } } for _, name := range validRepoNames { _, err := ParseNamed(name) if err != nil { t.Fatalf("Error parsing repo name %s, got: %q", name, err) } } } func TestValidateRemoteName(t *testing.T) { validRepositoryNames := []string{ // Sanity check. "docker/docker", // Allow 64-character non-hexadecimal names (hexadecimal names are forbidden). "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", // Allow embedded hyphens. "docker-rules/docker", // Allow multiple hyphens as well. "docker---rules/docker", //Username doc and image name docker being tested. "doc/docker", // single character names are now allowed. "d/docker", "jess/t", // Consecutive underscores. "dock__er/docker", } for _, repositoryName := range validRepositoryNames { _, err := ParseNamed(repositoryName) if err != nil { t.Errorf("Repository name should be valid: %v. Error: %v", repositoryName, err) } } invalidRepositoryNames := []string{ // Disallow capital letters. "docker/Docker", // Only allow one slash. "docker///docker", // Disallow 64-character hexadecimal. "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", // Disallow leading and trailing hyphens in namespace. "-docker/docker", "docker-/docker", "-docker-/docker", // Don't allow underscores everywhere (as opposed to hyphens). "____/____", "_docker/_docker", // Disallow consecutive periods. "dock..er/docker", "dock_.er/docker", "dock-.er/docker", // No repository. "docker/", //namespace too long "this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255/docker", } for _, repositoryName := range invalidRepositoryNames { if _, err := ParseNamed(repositoryName); err == nil { t.Errorf("Repository name should be invalid: %v", repositoryName) } } } func TestParseRepositoryInfo(t *testing.T) { type tcase struct { RemoteName, NormalizedName, FullName, AmbiguousName, Hostname string } tcases := []tcase{ { RemoteName: "fooo/bar", NormalizedName: "fooo/bar", FullName: "docker.io/fooo/bar", AmbiguousName: "index.docker.io/fooo/bar", Hostname: "docker.io", }, { RemoteName: "library/ubuntu", NormalizedName: "ubuntu", FullName: "docker.io/library/ubuntu", AmbiguousName: "library/ubuntu", Hostname: "docker.io", }, { RemoteName: "nonlibrary/ubuntu", NormalizedName: "nonlibrary/ubuntu", FullName: "docker.io/nonlibrary/ubuntu", AmbiguousName: "", Hostname: "docker.io", }, { RemoteName: "other/library", NormalizedName: "other/library", FullName: "docker.io/other/library", AmbiguousName: "", Hostname: "docker.io", }, { RemoteName: "private/moonbase", NormalizedName: "127.0.0.1:8000/private/moonbase", FullName: "127.0.0.1:8000/private/moonbase", AmbiguousName: "", Hostname: "127.0.0.1:8000", }, { RemoteName: "privatebase", NormalizedName: "127.0.0.1:8000/privatebase", FullName: "127.0.0.1:8000/privatebase", AmbiguousName: "", Hostname: "127.0.0.1:8000", }, { RemoteName: "private/moonbase", NormalizedName: "example.com/private/moonbase", FullName: "example.com/private/moonbase", AmbiguousName: "", Hostname: "example.com", }, { RemoteName: "privatebase", NormalizedName: "example.com/privatebase", FullName: "example.com/privatebase", AmbiguousName: "", Hostname: "example.com", }, { RemoteName: "private/moonbase", NormalizedName: "example.com:8000/private/moonbase", FullName: "example.com:8000/private/moonbase", AmbiguousName: "", Hostname: "example.com:8000", }, { RemoteName: "privatebasee", NormalizedName: "example.com:8000/privatebasee", FullName: "example.com:8000/privatebasee", AmbiguousName: "", Hostname: "example.com:8000", }, { RemoteName: "library/ubuntu-12.04-base", NormalizedName: "ubuntu-12.04-base", FullName: "docker.io/library/ubuntu-12.04-base", AmbiguousName: "index.docker.io/library/ubuntu-12.04-base", Hostname: "docker.io", }, } for _, tcase := range tcases { refStrings := []string{tcase.NormalizedName, tcase.FullName} if tcase.AmbiguousName != "" { refStrings = append(refStrings, tcase.AmbiguousName) } var refs []Named for _, r := range refStrings { named, err := ParseNamed(r) if err != nil { t.Fatal(err) } refs = append(refs, named) named, err = WithName(r) if err != nil { t.Fatal(err) } refs = append(refs, named) } for _, r := range refs { if expected, actual := tcase.NormalizedName, r.Name(); expected != actual { t.Fatalf("Invalid normalized reference for %q. Expected %q, got %q", r, expected, actual) } if expected, actual := tcase.FullName, r.FullName(); expected != actual { t.Fatalf("Invalid normalized reference for %q. Expected %q, got %q", r, expected, actual) } if expected, actual := tcase.Hostname, r.Hostname(); expected != actual { t.Fatalf("Invalid hostname for %q. Expected %q, got %q", r, expected, actual) } if expected, actual := tcase.RemoteName, r.RemoteName(); expected != actual { t.Fatalf("Invalid remoteName for %q. Expected %q, got %q", r, expected, actual) } } } } func TestParseReferenceWithTagAndDigest(t *testing.T) { ref, err := ParseNamed("busybox:latest@sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa") if err != nil { t.Fatal(err) } if _, isTagged := ref.(NamedTagged); isTagged { t.Fatalf("Reference from %q should not support tag", ref) } if _, isCanonical := ref.(Canonical); !isCanonical { t.Fatalf("Reference from %q should not support digest", ref) } if expected, actual := "busybox@sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa", ref.String(); actual != expected { t.Fatalf("Invalid parsed reference for %q: expected %q, got %q", ref, expected, actual) } } func TestInvalidReferenceComponents(t *testing.T) { if _, err := WithName("-foo"); err == nil { t.Fatal("Expected WithName to detect invalid name") } ref, err := WithName("busybox") if err != nil { t.Fatal(err) } if _, err := WithTag(ref, "-foo"); err == nil { t.Fatal("Expected WithName to detect invalid tag") } if _, err := WithDigest(ref, digest.Digest("foo")); err == nil { t.Fatal("Expected WithName to detect invalid digest") } } docker-1.10.3/reference/store.go000066400000000000000000000171411267010174400164720ustar00rootroot00000000000000package reference import ( "encoding/json" "errors" "fmt" "io/ioutil" "os" "path/filepath" "sort" "sync" "github.com/docker/distribution/digest" "github.com/docker/docker/image" ) var ( // ErrDoesNotExist is returned if a reference is not found in the // store. ErrDoesNotExist = errors.New("reference does not exist") ) // An Association is a tuple associating a reference with an image ID. type Association struct { Ref Named ImageID image.ID } // Store provides the set of methods which can operate on a tag store. type Store interface { References(id image.ID) []Named ReferencesByName(ref Named) []Association AddTag(ref Named, id image.ID, force bool) error AddDigest(ref Canonical, id image.ID, force bool) error Delete(ref Named) (bool, error) Get(ref Named) (image.ID, error) } type store struct { mu sync.RWMutex // jsonPath is the path to the file where the serialized tag data is // stored. jsonPath string // Repositories is a map of repositories, indexed by name. Repositories map[string]repository // referencesByIDCache is a cache of references indexed by ID, to speed // up References. referencesByIDCache map[image.ID]map[string]Named } // Repository maps tags to image IDs. The key is a a stringified Reference, // including the repository name. type repository map[string]image.ID type lexicalRefs []Named func (a lexicalRefs) Len() int { return len(a) } func (a lexicalRefs) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a lexicalRefs) Less(i, j int) bool { return a[i].String() < a[j].String() } type lexicalAssociations []Association func (a lexicalAssociations) Len() int { return len(a) } func (a lexicalAssociations) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a lexicalAssociations) Less(i, j int) bool { return a[i].Ref.String() < a[j].Ref.String() } // NewReferenceStore creates a new reference store, tied to a file path where // the set of references are serialized in JSON format. func NewReferenceStore(jsonPath string) (Store, error) { abspath, err := filepath.Abs(jsonPath) if err != nil { return nil, err } store := &store{ jsonPath: abspath, Repositories: make(map[string]repository), referencesByIDCache: make(map[image.ID]map[string]Named), } // Load the json file if it exists, otherwise create it. if err := store.reload(); os.IsNotExist(err) { if err := store.save(); err != nil { return nil, err } } else if err != nil { return nil, err } return store, nil } // AddTag adds a tag reference to the store. If force is set to true, existing // references can be overwritten. This only works for tags, not digests. func (store *store) AddTag(ref Named, id image.ID, force bool) error { if _, isCanonical := ref.(Canonical); isCanonical { return errors.New("refusing to create a tag with a digest reference") } return store.addReference(WithDefaultTag(ref), id, force) } // AddDigest adds a digest reference to the store. func (store *store) AddDigest(ref Canonical, id image.ID, force bool) error { return store.addReference(ref, id, force) } func (store *store) addReference(ref Named, id image.ID, force bool) error { if ref.Name() == string(digest.Canonical) { return errors.New("refusing to create an ambiguous tag using digest algorithm as name") } store.mu.Lock() defer store.mu.Unlock() repository, exists := store.Repositories[ref.Name()] if !exists || repository == nil { repository = make(map[string]image.ID) store.Repositories[ref.Name()] = repository } refStr := ref.String() oldID, exists := repository[refStr] if exists { // force only works for tags if digested, isDigest := ref.(Canonical); isDigest { return fmt.Errorf("Cannot overwrite digest %s", digested.Digest().String()) } if !force { return fmt.Errorf("Conflict: Tag %s is already set to image %s, if you want to replace it, please use -f option", ref.String(), oldID.String()) } if store.referencesByIDCache[oldID] != nil { delete(store.referencesByIDCache[oldID], refStr) if len(store.referencesByIDCache[oldID]) == 0 { delete(store.referencesByIDCache, oldID) } } } repository[refStr] = id if store.referencesByIDCache[id] == nil { store.referencesByIDCache[id] = make(map[string]Named) } store.referencesByIDCache[id][refStr] = ref return store.save() } // Delete deletes a reference from the store. It returns true if a deletion // happened, or false otherwise. func (store *store) Delete(ref Named) (bool, error) { ref = WithDefaultTag(ref) store.mu.Lock() defer store.mu.Unlock() repoName := ref.Name() repository, exists := store.Repositories[repoName] if !exists { return false, ErrDoesNotExist } refStr := ref.String() if id, exists := repository[refStr]; exists { delete(repository, refStr) if len(repository) == 0 { delete(store.Repositories, repoName) } if store.referencesByIDCache[id] != nil { delete(store.referencesByIDCache[id], refStr) if len(store.referencesByIDCache[id]) == 0 { delete(store.referencesByIDCache, id) } } return true, store.save() } return false, ErrDoesNotExist } // Get retrieves an item from the store by func (store *store) Get(ref Named) (image.ID, error) { ref = WithDefaultTag(ref) store.mu.RLock() defer store.mu.RUnlock() repository, exists := store.Repositories[ref.Name()] if !exists || repository == nil { return "", ErrDoesNotExist } id, exists := repository[ref.String()] if !exists { return "", ErrDoesNotExist } return id, nil } // References returns a slice of references to the given image ID. The slice // will be nil if there are no references to this image ID. func (store *store) References(id image.ID) []Named { store.mu.RLock() defer store.mu.RUnlock() // Convert the internal map to an array for two reasons: // 1) We must not return a mutable // 2) It would be ugly to expose the extraneous map keys to callers. var references []Named for _, ref := range store.referencesByIDCache[id] { references = append(references, ref) } sort.Sort(lexicalRefs(references)) return references } // ReferencesByName returns the references for a given repository name. // If there are no references known for this repository name, // ReferencesByName returns nil. func (store *store) ReferencesByName(ref Named) []Association { store.mu.RLock() defer store.mu.RUnlock() repository, exists := store.Repositories[ref.Name()] if !exists { return nil } var associations []Association for refStr, refID := range repository { ref, err := ParseNamed(refStr) if err != nil { // Should never happen return nil } associations = append(associations, Association{ Ref: ref, ImageID: refID, }) } sort.Sort(lexicalAssociations(associations)) return associations } func (store *store) save() error { // Store the json jsonData, err := json.Marshal(store) if err != nil { return err } tempFilePath := store.jsonPath + ".tmp" if err := ioutil.WriteFile(tempFilePath, jsonData, 0600); err != nil { return err } if err := os.Rename(tempFilePath, store.jsonPath); err != nil { return err } return nil } func (store *store) reload() error { f, err := os.Open(store.jsonPath) if err != nil { return err } defer f.Close() if err := json.NewDecoder(f).Decode(&store); err != nil { return err } for _, repository := range store.Repositories { for refStr, refID := range repository { ref, err := ParseNamed(refStr) if err != nil { // Should never happen continue } if store.referencesByIDCache[refID] == nil { store.referencesByIDCache[refID] = make(map[string]Named) } store.referencesByIDCache[refID][refStr] = ref } } return nil } docker-1.10.3/reference/store_test.go000066400000000000000000000275301267010174400175340ustar00rootroot00000000000000package reference import ( "bytes" "io/ioutil" "os" "path/filepath" "strings" "testing" "github.com/docker/docker/image" ) var ( saveLoadTestCases = map[string]image.ID{ "registry:5000/foobar:HEAD": "sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6", "registry:5000/foobar:alternate": "sha256:ae300ebc4a4f00693702cfb0a5e0b7bc527b353828dc86ad09fb95c8a681b793", "registry:5000/foobar:latest": "sha256:6153498b9ac00968d71b66cca4eac37e990b5f9eb50c26877eb8799c8847451b", "registry:5000/foobar:master": "sha256:6c9917af4c4e05001b346421959d7ea81b6dc9d25718466a37a6add865dfd7fc", "jess/hollywood:latest": "sha256:ae7a5519a0a55a2d4ef20ddcbd5d0ca0888a1f7ab806acc8e2a27baf46f529fe", "registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6": "sha256:24126a56805beb9711be5f4590cc2eb55ab8d4a85ebd618eed72bb19fc50631c", "busybox:latest": "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", } marshalledSaveLoadTestCases = []byte(`{"Repositories":{"busybox":{"busybox:latest":"sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c"},"jess/hollywood":{"jess/hollywood:latest":"sha256:ae7a5519a0a55a2d4ef20ddcbd5d0ca0888a1f7ab806acc8e2a27baf46f529fe"},"registry":{"registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6":"sha256:24126a56805beb9711be5f4590cc2eb55ab8d4a85ebd618eed72bb19fc50631c"},"registry:5000/foobar":{"registry:5000/foobar:HEAD":"sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6","registry:5000/foobar:alternate":"sha256:ae300ebc4a4f00693702cfb0a5e0b7bc527b353828dc86ad09fb95c8a681b793","registry:5000/foobar:latest":"sha256:6153498b9ac00968d71b66cca4eac37e990b5f9eb50c26877eb8799c8847451b","registry:5000/foobar:master":"sha256:6c9917af4c4e05001b346421959d7ea81b6dc9d25718466a37a6add865dfd7fc"}}}`) ) func TestLoad(t *testing.T) { jsonFile, err := ioutil.TempFile("", "tag-store-test") if err != nil { t.Fatalf("error creating temp file: %v", err) } defer os.RemoveAll(jsonFile.Name()) // Write canned json to the temp file _, err = jsonFile.Write(marshalledSaveLoadTestCases) if err != nil { t.Fatalf("error writing to temp file: %v", err) } jsonFile.Close() store, err := NewReferenceStore(jsonFile.Name()) if err != nil { t.Fatalf("error creating tag store: %v", err) } for refStr, expectedID := range saveLoadTestCases { ref, err := ParseNamed(refStr) if err != nil { t.Fatalf("failed to parse reference: %v", err) } id, err := store.Get(ref) if err != nil { t.Fatalf("could not find reference %s: %v", refStr, err) } if id != expectedID { t.Fatalf("expected %s - got %s", expectedID, id) } } } func TestSave(t *testing.T) { jsonFile, err := ioutil.TempFile("", "tag-store-test") if err != nil { t.Fatalf("error creating temp file: %v", err) } _, err = jsonFile.Write([]byte(`{}`)) jsonFile.Close() defer os.RemoveAll(jsonFile.Name()) store, err := NewReferenceStore(jsonFile.Name()) if err != nil { t.Fatalf("error creating tag store: %v", err) } for refStr, id := range saveLoadTestCases { ref, err := ParseNamed(refStr) if err != nil { t.Fatalf("failed to parse reference: %v", err) } if canonical, ok := ref.(Canonical); ok { err = store.AddDigest(canonical, id, false) if err != nil { t.Fatalf("could not add digest reference %s: %v", refStr, err) } } else { err = store.AddTag(ref, id, false) if err != nil { t.Fatalf("could not add reference %s: %v", refStr, err) } } } jsonBytes, err := ioutil.ReadFile(jsonFile.Name()) if err != nil { t.Fatalf("could not read json file: %v", err) } if !bytes.Equal(jsonBytes, marshalledSaveLoadTestCases) { t.Fatalf("save output did not match expectations\nexpected:\n%s\ngot:\n%s", marshalledSaveLoadTestCases, jsonBytes) } } func TestAddDeleteGet(t *testing.T) { jsonFile, err := ioutil.TempFile("", "tag-store-test") if err != nil { t.Fatalf("error creating temp file: %v", err) } _, err = jsonFile.Write([]byte(`{}`)) jsonFile.Close() defer os.RemoveAll(jsonFile.Name()) store, err := NewReferenceStore(jsonFile.Name()) if err != nil { t.Fatalf("error creating tag store: %v", err) } testImageID1 := image.ID("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9c") testImageID2 := image.ID("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9d") testImageID3 := image.ID("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9e") // Try adding a reference with no tag or digest nameOnly, err := WithName("username/repo") if err != nil { t.Fatalf("could not parse reference: %v", err) } if err = store.AddTag(nameOnly, testImageID1, false); err != nil { t.Fatalf("error adding to store: %v", err) } // Add a few references ref1, err := ParseNamed("username/repo1:latest") if err != nil { t.Fatalf("could not parse reference: %v", err) } if err = store.AddTag(ref1, testImageID1, false); err != nil { t.Fatalf("error adding to store: %v", err) } ref2, err := ParseNamed("username/repo1:old") if err != nil { t.Fatalf("could not parse reference: %v", err) } if err = store.AddTag(ref2, testImageID2, false); err != nil { t.Fatalf("error adding to store: %v", err) } ref3, err := ParseNamed("username/repo1:alias") if err != nil { t.Fatalf("could not parse reference: %v", err) } if err = store.AddTag(ref3, testImageID1, false); err != nil { t.Fatalf("error adding to store: %v", err) } ref4, err := ParseNamed("username/repo2:latest") if err != nil { t.Fatalf("could not parse reference: %v", err) } if err = store.AddTag(ref4, testImageID2, false); err != nil { t.Fatalf("error adding to store: %v", err) } ref5, err := ParseNamed("username/repo3@sha256:58153dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c") if err != nil { t.Fatalf("could not parse reference: %v", err) } if err = store.AddDigest(ref5.(Canonical), testImageID2, false); err != nil { t.Fatalf("error adding to store: %v", err) } // Attempt to overwrite with force == false if err = store.AddTag(ref4, testImageID3, false); err == nil || !strings.HasPrefix(err.Error(), "Conflict:") { t.Fatalf("did not get expected error on overwrite attempt - got %v", err) } // Repeat to overwrite with force == true if err = store.AddTag(ref4, testImageID3, true); err != nil { t.Fatalf("failed to force tag overwrite: %v", err) } // Check references so far id, err := store.Get(nameOnly) if err != nil { t.Fatalf("Get returned error: %v", err) } if id != testImageID1 { t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String()) } id, err = store.Get(ref1) if err != nil { t.Fatalf("Get returned error: %v", err) } if id != testImageID1 { t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String()) } id, err = store.Get(ref2) if err != nil { t.Fatalf("Get returned error: %v", err) } if id != testImageID2 { t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID2.String()) } id, err = store.Get(ref3) if err != nil { t.Fatalf("Get returned error: %v", err) } if id != testImageID1 { t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String()) } id, err = store.Get(ref4) if err != nil { t.Fatalf("Get returned error: %v", err) } if id != testImageID3 { t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID3.String()) } id, err = store.Get(ref5) if err != nil { t.Fatalf("Get returned error: %v", err) } if id != testImageID2 { t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID3.String()) } // Get should return ErrDoesNotExist for a nonexistent repo nonExistRepo, err := ParseNamed("username/nonexistrepo:latest") if err != nil { t.Fatalf("could not parse reference: %v", err) } if _, err = store.Get(nonExistRepo); err != ErrDoesNotExist { t.Fatal("Expected ErrDoesNotExist from Get") } // Get should return ErrDoesNotExist for a nonexistent tag nonExistTag, err := ParseNamed("username/repo1:nonexist") if err != nil { t.Fatalf("could not parse reference: %v", err) } if _, err = store.Get(nonExistTag); err != ErrDoesNotExist { t.Fatal("Expected ErrDoesNotExist from Get") } // Check References refs := store.References(testImageID1) if len(refs) != 3 { t.Fatal("unexpected number of references") } // Looking for the references in this order verifies that they are // returned lexically sorted. if refs[0].String() != ref3.String() { t.Fatalf("unexpected reference: %v", refs[0].String()) } if refs[1].String() != ref1.String() { t.Fatalf("unexpected reference: %v", refs[1].String()) } if refs[2].String() != nameOnly.String()+":latest" { t.Fatalf("unexpected reference: %v", refs[2].String()) } // Check ReferencesByName repoName, err := WithName("username/repo1") if err != nil { t.Fatalf("could not parse reference: %v", err) } associations := store.ReferencesByName(repoName) if len(associations) != 3 { t.Fatal("unexpected number of associations") } // Looking for the associations in this order verifies that they are // returned lexically sorted. if associations[0].Ref.String() != ref3.String() { t.Fatalf("unexpected reference: %v", associations[0].Ref.String()) } if associations[0].ImageID != testImageID1 { t.Fatalf("unexpected reference: %v", associations[0].Ref.String()) } if associations[1].Ref.String() != ref1.String() { t.Fatalf("unexpected reference: %v", associations[1].Ref.String()) } if associations[1].ImageID != testImageID1 { t.Fatalf("unexpected reference: %v", associations[1].Ref.String()) } if associations[2].Ref.String() != ref2.String() { t.Fatalf("unexpected reference: %v", associations[2].Ref.String()) } if associations[2].ImageID != testImageID2 { t.Fatalf("unexpected reference: %v", associations[2].Ref.String()) } // Delete should return ErrDoesNotExist for a nonexistent repo if _, err = store.Delete(nonExistRepo); err != ErrDoesNotExist { t.Fatal("Expected ErrDoesNotExist from Delete") } // Delete should return ErrDoesNotExist for a nonexistent tag if _, err = store.Delete(nonExistTag); err != ErrDoesNotExist { t.Fatal("Expected ErrDoesNotExist from Delete") } // Delete a few references if deleted, err := store.Delete(ref1); err != nil || deleted != true { t.Fatal("Delete failed") } if _, err := store.Get(ref1); err != ErrDoesNotExist { t.Fatal("Expected ErrDoesNotExist from Get") } if deleted, err := store.Delete(ref5); err != nil || deleted != true { t.Fatal("Delete failed") } if _, err := store.Get(ref5); err != ErrDoesNotExist { t.Fatal("Expected ErrDoesNotExist from Get") } if deleted, err := store.Delete(nameOnly); err != nil || deleted != true { t.Fatal("Delete failed") } if _, err := store.Get(nameOnly); err != ErrDoesNotExist { t.Fatal("Expected ErrDoesNotExist from Get") } } func TestInvalidTags(t *testing.T) { tmpDir, err := ioutil.TempDir("", "tag-store-test") defer os.RemoveAll(tmpDir) store, err := NewReferenceStore(filepath.Join(tmpDir, "repositories.json")) if err != nil { t.Fatalf("error creating tag store: %v", err) } id := image.ID("sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6") // sha256 as repo name ref, err := ParseNamed("sha256:abc") if err != nil { t.Fatal(err) } err = store.AddTag(ref, id, true) if err == nil { t.Fatalf("expected setting tag %q to fail", ref) } // setting digest as a tag ref, err = ParseNamed("registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6") if err != nil { t.Fatal(err) } err = store.AddTag(ref, id, true) if err == nil { t.Fatalf("expected setting digest %q to fail", ref) } } docker-1.10.3/registry/000077500000000000000000000000001267010174400147155ustar00rootroot00000000000000docker-1.10.3/registry/auth.go000066400000000000000000000211111267010174400162010ustar00rootroot00000000000000package registry import ( "encoding/json" "fmt" "io/ioutil" "net/http" "strings" "github.com/Sirupsen/logrus" "github.com/docker/engine-api/types" registrytypes "github.com/docker/engine-api/types/registry" ) // Login tries to register/login to the registry server. func Login(authConfig *types.AuthConfig, registryEndpoint *Endpoint) (string, error) { // Separates the v2 registry login logic from the v1 logic. if registryEndpoint.Version == APIVersion2 { return loginV2(authConfig, registryEndpoint, "" /* scope */) } return loginV1(authConfig, registryEndpoint) } // loginV1 tries to register/login to the v1 registry server. func loginV1(authConfig *types.AuthConfig, registryEndpoint *Endpoint) (string, error) { var ( status string respBody []byte err error respStatusCode = 0 serverAddress = authConfig.ServerAddress ) logrus.Debugf("attempting v1 login to registry endpoint %s", registryEndpoint) if serverAddress == "" { return "", fmt.Errorf("Server Error: Server Address not set.") } loginAgainstOfficialIndex := serverAddress == IndexServer // to avoid sending the server address to the server it should be removed before being marshaled authCopy := *authConfig authCopy.ServerAddress = "" jsonBody, err := json.Marshal(authCopy) if err != nil { return "", fmt.Errorf("Config Error: %s", err) } // using `bytes.NewReader(jsonBody)` here causes the server to respond with a 411 status. b := strings.NewReader(string(jsonBody)) resp1, err := registryEndpoint.client.Post(serverAddress+"users/", "application/json; charset=utf-8", b) if err != nil { return "", fmt.Errorf("Server Error: %s", err) } defer resp1.Body.Close() respStatusCode = resp1.StatusCode respBody, err = ioutil.ReadAll(resp1.Body) if err != nil { return "", fmt.Errorf("Server Error: [%#v] %s", respStatusCode, err) } if respStatusCode == 201 { if loginAgainstOfficialIndex { status = "Account created. Please use the confirmation link we sent" + " to your e-mail to activate it." } else { // *TODO: Use registry configuration to determine what this says, if anything? status = "Account created. Please see the documentation of the registry " + serverAddress + " for instructions how to activate it." } } else if respStatusCode == 400 { if string(respBody) == "\"Username or email already exists\"" { req, err := http.NewRequest("GET", serverAddress+"users/", nil) req.SetBasicAuth(authConfig.Username, authConfig.Password) resp, err := registryEndpoint.client.Do(req) if err != nil { return "", err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return "", err } if resp.StatusCode == 200 { return "Login Succeeded", nil } else if resp.StatusCode == 401 { return "", fmt.Errorf("Wrong login/password, please try again") } else if resp.StatusCode == 403 { if loginAgainstOfficialIndex { return "", fmt.Errorf("Login: Account is not Active. Please check your e-mail for a confirmation link.") } // *TODO: Use registry configuration to determine what this says, if anything? return "", fmt.Errorf("Login: Account is not Active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress) } else if resp.StatusCode == 500 { // Issue #14326 logrus.Errorf("%s returned status code %d. Response Body :\n%s", req.URL.String(), resp.StatusCode, body) return "", fmt.Errorf("Internal Server Error") } return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, resp.StatusCode, resp.Header) } return "", fmt.Errorf("Registration: %s", respBody) } else if respStatusCode == 401 { // This case would happen with private registries where /v1/users is // protected, so people can use `docker login` as an auth check. req, err := http.NewRequest("GET", serverAddress+"users/", nil) req.SetBasicAuth(authConfig.Username, authConfig.Password) resp, err := registryEndpoint.client.Do(req) if err != nil { return "", err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return "", err } if resp.StatusCode == 200 { return "Login Succeeded", nil } else if resp.StatusCode == 401 { return "", fmt.Errorf("Wrong login/password, please try again") } else { return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, resp.StatusCode, resp.Header) } } else { return "", fmt.Errorf("Unexpected status code [%d] : %s", respStatusCode, respBody) } return status, nil } // loginV2 tries to login to the v2 registry server. The given registry endpoint has been // pinged or setup with a list of authorization challenges. Each of these challenges are // tried until one of them succeeds. Currently supported challenge schemes are: // HTTP Basic Authorization // Token Authorization with a separate token issuing server // NOTE: the v2 logic does not attempt to create a user account if one doesn't exist. For // now, users should create their account through other means like directly from a web page // served by the v2 registry service provider. Whether this will be supported in the future // is to be determined. func loginV2(authConfig *types.AuthConfig, registryEndpoint *Endpoint, scope string) (string, error) { logrus.Debugf("attempting v2 login to registry endpoint %s", registryEndpoint) var ( err error allErrors []error ) for _, challenge := range registryEndpoint.AuthChallenges { params := make(map[string]string, len(challenge.Parameters)+1) for k, v := range challenge.Parameters { params[k] = v } params["scope"] = scope logrus.Debugf("trying %q auth challenge with params %v", challenge.Scheme, params) switch strings.ToLower(challenge.Scheme) { case "basic": err = tryV2BasicAuthLogin(authConfig, params, registryEndpoint) case "bearer": err = tryV2TokenAuthLogin(authConfig, params, registryEndpoint) default: // Unsupported challenge types are explicitly skipped. err = fmt.Errorf("unsupported auth scheme: %q", challenge.Scheme) } if err == nil { return "Login Succeeded", nil } logrus.Debugf("error trying auth challenge %q: %s", challenge.Scheme, err) allErrors = append(allErrors, err) } return "", fmt.Errorf("no successful auth challenge for %s - errors: %s", registryEndpoint, allErrors) } func tryV2BasicAuthLogin(authConfig *types.AuthConfig, params map[string]string, registryEndpoint *Endpoint) error { req, err := http.NewRequest("GET", registryEndpoint.Path(""), nil) if err != nil { return err } req.SetBasicAuth(authConfig.Username, authConfig.Password) resp, err := registryEndpoint.client.Do(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return fmt.Errorf("basic auth attempt to %s realm %q failed with status: %d %s", registryEndpoint, params["realm"], resp.StatusCode, http.StatusText(resp.StatusCode)) } return nil } func tryV2TokenAuthLogin(authConfig *types.AuthConfig, params map[string]string, registryEndpoint *Endpoint) error { token, err := getToken(authConfig.Username, authConfig.Password, params, registryEndpoint) if err != nil { return err } req, err := http.NewRequest("GET", registryEndpoint.Path(""), nil) if err != nil { return err } req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) resp, err := registryEndpoint.client.Do(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return fmt.Errorf("token auth attempt to %s realm %q failed with status: %d %s", registryEndpoint, params["realm"], resp.StatusCode, http.StatusText(resp.StatusCode)) } return nil } // ResolveAuthConfig matches an auth configuration to a server address or a URL func ResolveAuthConfig(authConfigs map[string]types.AuthConfig, index *registrytypes.IndexInfo) types.AuthConfig { configKey := GetAuthConfigKey(index) // First try the happy case if c, found := authConfigs[configKey]; found || index.Official { return c } convertToHostname := func(url string) string { stripped := url if strings.HasPrefix(url, "http://") { stripped = strings.Replace(url, "http://", "", 1) } else if strings.HasPrefix(url, "https://") { stripped = strings.Replace(url, "https://", "", 1) } nameParts := strings.SplitN(stripped, "/", 2) return nameParts[0] } // Maybe they have a legacy config file, we will iterate the keys converting // them to the new format and testing for registry, ac := range authConfigs { if configKey == convertToHostname(registry) { return ac } } // When all else fails, return an empty auth config return types.AuthConfig{} } docker-1.10.3/registry/auth_test.go000066400000000000000000000061631267010174400172520ustar00rootroot00000000000000package registry import ( "testing" "github.com/docker/engine-api/types" registrytypes "github.com/docker/engine-api/types/registry" ) func buildAuthConfigs() map[string]types.AuthConfig { authConfigs := map[string]types.AuthConfig{} for _, registry := range []string{"testIndex", IndexServer} { authConfigs[registry] = types.AuthConfig{ Username: "docker-user", Password: "docker-pass", Email: "docker@docker.io", } } return authConfigs } func TestSameAuthDataPostSave(t *testing.T) { authConfigs := buildAuthConfigs() authConfig := authConfigs["testIndex"] if authConfig.Username != "docker-user" { t.Fail() } if authConfig.Password != "docker-pass" { t.Fail() } if authConfig.Email != "docker@docker.io" { t.Fail() } if authConfig.Auth != "" { t.Fail() } } func TestResolveAuthConfigIndexServer(t *testing.T) { authConfigs := buildAuthConfigs() indexConfig := authConfigs[IndexServer] officialIndex := ®istrytypes.IndexInfo{ Official: true, } privateIndex := ®istrytypes.IndexInfo{ Official: false, } resolved := ResolveAuthConfig(authConfigs, officialIndex) assertEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to return IndexServer") resolved = ResolveAuthConfig(authConfigs, privateIndex) assertNotEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to not return IndexServer") } func TestResolveAuthConfigFullURL(t *testing.T) { authConfigs := buildAuthConfigs() registryAuth := types.AuthConfig{ Username: "foo-user", Password: "foo-pass", Email: "foo@example.com", } localAuth := types.AuthConfig{ Username: "bar-user", Password: "bar-pass", Email: "bar@example.com", } officialAuth := types.AuthConfig{ Username: "baz-user", Password: "baz-pass", Email: "baz@example.com", } authConfigs[IndexServer] = officialAuth expectedAuths := map[string]types.AuthConfig{ "registry.example.com": registryAuth, "localhost:8000": localAuth, "registry.com": localAuth, } validRegistries := map[string][]string{ "registry.example.com": { "https://registry.example.com/v1/", "http://registry.example.com/v1/", "registry.example.com", "registry.example.com/v1/", }, "localhost:8000": { "https://localhost:8000/v1/", "http://localhost:8000/v1/", "localhost:8000", "localhost:8000/v1/", }, "registry.com": { "https://registry.com/v1/", "http://registry.com/v1/", "registry.com", "registry.com/v1/", }, } for configKey, registries := range validRegistries { configured, ok := expectedAuths[configKey] if !ok || configured.Email == "" { t.Fail() } index := ®istrytypes.IndexInfo{ Name: configKey, } for _, registry := range registries { authConfigs[registry] = configured resolved := ResolveAuthConfig(authConfigs, index) if resolved.Email != configured.Email { t.Errorf("%s -> %q != %q\n", registry, resolved.Email, configured.Email) } delete(authConfigs, registry) resolved = ResolveAuthConfig(authConfigs, index) if resolved.Email == configured.Email { t.Errorf("%s -> %q == %q\n", registry, resolved.Email, configured.Email) } } } } docker-1.10.3/registry/authchallenge.go000066400000000000000000000064411267010174400200550ustar00rootroot00000000000000package registry import ( "net/http" "strings" ) // Octet types from RFC 2616. type octetType byte // AuthorizationChallenge carries information // from a WWW-Authenticate response header. type AuthorizationChallenge struct { Scheme string Parameters map[string]string } var octetTypes [256]octetType const ( isToken octetType = 1 << iota isSpace ) func init() { // OCTET = // CHAR = // CTL = // CR = // LF = // SP = // HT = // <"> = // CRLF = CR LF // LWS = [CRLF] 1*( SP | HT ) // TEXT = // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT // token = 1* // qdtext = > for c := 0; c < 256; c++ { var t octetType isCtl := c <= 31 || c == 127 isChar := 0 <= c && c <= 127 isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { t |= isSpace } if isChar && !isCtl && !isSeparator { t |= isToken } octetTypes[c] = t } } func parseAuthHeader(header http.Header) []*AuthorizationChallenge { var challenges []*AuthorizationChallenge for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { v, p := parseValueAndParams(h) if v != "" { challenges = append(challenges, &AuthorizationChallenge{Scheme: v, Parameters: p}) } } return challenges } func parseValueAndParams(header string) (value string, params map[string]string) { params = make(map[string]string) value, s := expectToken(header) if value == "" { return } value = strings.ToLower(value) s = "," + skipSpace(s) for strings.HasPrefix(s, ",") { var pkey string pkey, s = expectToken(skipSpace(s[1:])) if pkey == "" { return } if !strings.HasPrefix(s, "=") { return } var pvalue string pvalue, s = expectTokenOrQuoted(s[1:]) if pvalue == "" { return } pkey = strings.ToLower(pkey) params[pkey] = pvalue s = skipSpace(s) } return } func skipSpace(s string) (rest string) { i := 0 for ; i < len(s); i++ { if octetTypes[s[i]]&isSpace == 0 { break } } return s[i:] } func expectToken(s string) (token, rest string) { i := 0 for ; i < len(s); i++ { if octetTypes[s[i]]&isToken == 0 { break } } return s[:i], s[i:] } func expectTokenOrQuoted(s string) (value string, rest string) { if !strings.HasPrefix(s, "\"") { return expectToken(s) } s = s[1:] for i := 0; i < len(s); i++ { switch s[i] { case '"': return s[:i], s[i+1:] case '\\': p := make([]byte, len(s)-1) j := copy(p, s[:i]) escape := true for i = i + i; i < len(s); i++ { b := s[i] switch { case escape: escape = false p[j] = b j++ case b == '\\': escape = true case b == '"': return string(p[:j]), s[i+1:] default: p[j] = b j++ } } return "", "" } } return "", "" } docker-1.10.3/registry/config.go000066400000000000000000000210461267010174400165140ustar00rootroot00000000000000package registry import ( "errors" "fmt" "net" "net/url" "strings" "github.com/docker/docker/opts" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/reference" registrytypes "github.com/docker/engine-api/types/registry" ) // Options holds command line options. type Options struct { Mirrors opts.ListOpts InsecureRegistries opts.ListOpts } const ( // DefaultNamespace is the default namespace DefaultNamespace = "docker.io" // DefaultRegistryVersionHeader is the name of the default HTTP header // that carries Registry version info DefaultRegistryVersionHeader = "Docker-Distribution-Api-Version" // IndexServer is the v1 registry server used for user auth + account creation IndexServer = DefaultV1Registry + "/v1/" // IndexName is the name of the index IndexName = "docker.io" // NotaryServer is the endpoint serving the Notary trust server NotaryServer = "https://notary.docker.io" // IndexServer = "https://registry-stage.hub.docker.com/v1/" ) var ( // ErrInvalidRepositoryName is an error returned if the repository name did // not have the correct form ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")") emptyServiceConfig = NewServiceConfig(nil) // V2Only controls access to legacy registries. If it is set to true via the // command line flag the daemon will not attempt to contact v1 legacy registries V2Only = false ) // InstallFlags adds command-line options to the top-level flag parser for // the current process. func (options *Options) InstallFlags(cmd *flag.FlagSet, usageFn func(string) string) { options.Mirrors = opts.NewListOpts(ValidateMirror) cmd.Var(&options.Mirrors, []string{"-registry-mirror"}, usageFn("Preferred Docker registry mirror")) options.InsecureRegistries = opts.NewListOpts(ValidateIndexName) cmd.Var(&options.InsecureRegistries, []string{"-insecure-registry"}, usageFn("Enable insecure registry communication")) cmd.BoolVar(&V2Only, []string{"-disable-legacy-registry"}, false, usageFn("Do not contact legacy registries")) } // NewServiceConfig returns a new instance of ServiceConfig func NewServiceConfig(options *Options) *registrytypes.ServiceConfig { if options == nil { options = &Options{ Mirrors: opts.NewListOpts(nil), InsecureRegistries: opts.NewListOpts(nil), } } // Localhost is by default considered as an insecure registry // This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker). // // TODO: should we deprecate this once it is easier for people to set up a TLS registry or change // daemon flags on boot2docker? options.InsecureRegistries.Set("127.0.0.0/8") config := ®istrytypes.ServiceConfig{ InsecureRegistryCIDRs: make([]*registrytypes.NetIPNet, 0), IndexConfigs: make(map[string]*registrytypes.IndexInfo, 0), // Hack: Bypass setting the mirrors to IndexConfigs since they are going away // and Mirrors are only for the official registry anyways. Mirrors: options.Mirrors.GetAll(), } // Split --insecure-registry into CIDR and registry-specific settings. for _, r := range options.InsecureRegistries.GetAll() { // Check if CIDR was passed to --insecure-registry _, ipnet, err := net.ParseCIDR(r) if err == nil { // Valid CIDR. config.InsecureRegistryCIDRs = append(config.InsecureRegistryCIDRs, (*registrytypes.NetIPNet)(ipnet)) } else { // Assume `host:port` if not CIDR. config.IndexConfigs[r] = ®istrytypes.IndexInfo{ Name: r, Mirrors: make([]string, 0), Secure: false, Official: false, } } } // Configure public registry. config.IndexConfigs[IndexName] = ®istrytypes.IndexInfo{ Name: IndexName, Mirrors: config.Mirrors, Secure: true, Official: true, } return config } // isSecureIndex returns false if the provided indexName is part of the list of insecure registries // Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. // // The list of insecure registries can contain an element with CIDR notation to specify a whole subnet. // If the subnet contains one of the IPs of the registry specified by indexName, the latter is considered // insecure. // // indexName should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name // or an IP address. If it is a domain name, then it will be resolved in order to check if the IP is contained // in a subnet. If the resolving is not successful, isSecureIndex will only try to match hostname to any element // of insecureRegistries. func isSecureIndex(config *registrytypes.ServiceConfig, indexName string) bool { // Check for configured index, first. This is needed in case isSecureIndex // is called from anything besides newIndexInfo, in order to honor per-index configurations. if index, ok := config.IndexConfigs[indexName]; ok { return index.Secure } host, _, err := net.SplitHostPort(indexName) if err != nil { // assume indexName is of the form `host` without the port and go on. host = indexName } addrs, err := lookupIP(host) if err != nil { ip := net.ParseIP(host) if ip != nil { addrs = []net.IP{ip} } // if ip == nil, then `host` is neither an IP nor it could be looked up, // either because the index is unreachable, or because the index is behind an HTTP proxy. // So, len(addrs) == 0 and we're not aborting. } // Try CIDR notation only if addrs has any elements, i.e. if `host`'s IP could be determined. for _, addr := range addrs { for _, ipnet := range config.InsecureRegistryCIDRs { // check if the addr falls in the subnet if (*net.IPNet)(ipnet).Contains(addr) { return false } } } return true } // ValidateMirror validates an HTTP(S) registry mirror func ValidateMirror(val string) (string, error) { uri, err := url.Parse(val) if err != nil { return "", fmt.Errorf("%s is not a valid URI", val) } if uri.Scheme != "http" && uri.Scheme != "https" { return "", fmt.Errorf("Unsupported scheme %s", uri.Scheme) } if uri.Path != "" || uri.RawQuery != "" || uri.Fragment != "" { return "", fmt.Errorf("Unsupported path/query/fragment at end of the URI") } return fmt.Sprintf("%s://%s/", uri.Scheme, uri.Host), nil } // ValidateIndexName validates an index name. func ValidateIndexName(val string) (string, error) { if val == reference.LegacyDefaultHostname { val = reference.DefaultHostname } if strings.HasPrefix(val, "-") || strings.HasSuffix(val, "-") { return "", fmt.Errorf("Invalid index name (%s). Cannot begin or end with a hyphen.", val) } return val, nil } func validateNoSchema(reposName string) error { if strings.Contains(reposName, "://") { // It cannot contain a scheme! return ErrInvalidRepositoryName } return nil } // newIndexInfo returns IndexInfo configuration from indexName func newIndexInfo(config *registrytypes.ServiceConfig, indexName string) (*registrytypes.IndexInfo, error) { var err error indexName, err = ValidateIndexName(indexName) if err != nil { return nil, err } // Return any configured index info, first. if index, ok := config.IndexConfigs[indexName]; ok { return index, nil } // Construct a non-configured index info. index := ®istrytypes.IndexInfo{ Name: indexName, Mirrors: make([]string, 0), Official: false, } index.Secure = isSecureIndex(config, indexName) return index, nil } // GetAuthConfigKey special-cases using the full index address of the official // index as the AuthConfig key, and uses the (host)name[:port] for private indexes. func GetAuthConfigKey(index *registrytypes.IndexInfo) string { if index.Official { return IndexServer } return index.Name } // newRepositoryInfo validates and breaks down a repository name into a RepositoryInfo func newRepositoryInfo(config *registrytypes.ServiceConfig, name reference.Named) (*RepositoryInfo, error) { index, err := newIndexInfo(config, name.Hostname()) if err != nil { return nil, err } official := !strings.ContainsRune(name.Name(), '/') return &RepositoryInfo{name, index, official}, nil } // ParseRepositoryInfo performs the breakdown of a repository name into a RepositoryInfo, but // lacks registry configuration. func ParseRepositoryInfo(reposName reference.Named) (*RepositoryInfo, error) { return newRepositoryInfo(emptyServiceConfig, reposName) } // ParseSearchIndexInfo will use repository name to get back an indexInfo. func ParseSearchIndexInfo(reposName string) (*registrytypes.IndexInfo, error) { indexName, _ := splitReposSearchTerm(reposName) indexInfo, err := newIndexInfo(emptyServiceConfig, indexName) if err != nil { return nil, err } return indexInfo, nil } docker-1.10.3/registry/config_test.go000066400000000000000000000021771267010174400175570ustar00rootroot00000000000000package registry import ( "testing" ) func TestValidateMirror(t *testing.T) { valid := []string{ "http://mirror-1.com", "https://mirror-1.com", "http://localhost", "https://localhost", "http://localhost:5000", "https://localhost:5000", "http://127.0.0.1", "https://127.0.0.1", "http://127.0.0.1:5000", "https://127.0.0.1:5000", } invalid := []string{ "!invalid!://%as%", "ftp://mirror-1.com", "http://mirror-1.com/", "http://mirror-1.com/?q=foo", "http://mirror-1.com/v1/", "http://mirror-1.com/v1/?q=foo", "http://mirror-1.com/v1/?q=foo#frag", "http://mirror-1.com?q=foo", "https://mirror-1.com#frag", "https://mirror-1.com/", "https://mirror-1.com/#frag", "https://mirror-1.com/v1/", "https://mirror-1.com/v1/#", "https://mirror-1.com?q", } for _, address := range valid { if ret, err := ValidateMirror(address); err != nil || ret == "" { t.Errorf("ValidateMirror(`"+address+"`) got %s %s", ret, err) } } for _, address := range invalid { if ret, err := ValidateMirror(address); err == nil || ret != "" { t.Errorf("ValidateMirror(`"+address+"`) got %s %s", ret, err) } } } docker-1.10.3/registry/config_unix.go000066400000000000000000000012661267010174400175610ustar00rootroot00000000000000// +build !windows package registry const ( // DefaultV1Registry is the URI of the default v1 registry DefaultV1Registry = "https://index.docker.io" // DefaultV2Registry is the URI of the default v2 registry DefaultV2Registry = "https://registry-1.docker.io" ) var ( // CertsDir is the directory where certificates are stored CertsDir = "/etc/docker/certs.d" ) // cleanPath is used to ensure that a directory name is valid on the target // platform. It will be passed in something *similar* to a URL such as // https:/index.docker.io/v1. Not all platforms support directory names // which contain those characters (such as : on Windows) func cleanPath(s string) string { return s } docker-1.10.3/registry/config_windows.go000066400000000000000000000017241267010174400202670ustar00rootroot00000000000000package registry import ( "os" "path/filepath" "strings" ) const ( // DefaultV1Registry is the URI of the default v1 registry DefaultV1Registry = "https://registry-win-tp3.docker.io" // DefaultV2Registry is the URI of the default (official) v2 registry. // This is the windows-specific endpoint. // // Currently it is a TEMPORARY link that allows Microsoft to continue // development of Docker Engine for Windows. DefaultV2Registry = "https://registry-win-tp3.docker.io" ) // CertsDir is the directory where certificates are stored var CertsDir = os.Getenv("programdata") + `\docker\certs.d` // cleanPath is used to ensure that a directory name is valid on the target // platform. It will be passed in something *similar* to a URL such as // https:\index.docker.io\v1. Not all platforms support directory names // which contain those characters (such as : on Windows) func cleanPath(s string) string { return filepath.FromSlash(strings.Replace(s, ":", "", -1)) } docker-1.10.3/registry/endpoint.go000066400000000000000000000204631267010174400170710ustar00rootroot00000000000000package registry import ( "crypto/tls" "encoding/json" "fmt" "io/ioutil" "net" "net/http" "net/url" "strings" "github.com/Sirupsen/logrus" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/client/transport" registrytypes "github.com/docker/engine-api/types/registry" ) // for mocking in unit tests var lookupIP = net.LookupIP // scans string for api version in the URL path. returns the trimmed address, if version found, string and API version. func scanForAPIVersion(address string) (string, APIVersion) { var ( chunks []string apiVersionStr string ) if strings.HasSuffix(address, "/") { address = address[:len(address)-1] } chunks = strings.Split(address, "/") apiVersionStr = chunks[len(chunks)-1] for k, v := range apiVersions { if apiVersionStr == v { address = strings.Join(chunks[:len(chunks)-1], "/") return address, k } } return address, APIVersionUnknown } // NewEndpoint parses the given address to return a registry endpoint. v can be used to // specify a specific endpoint version func NewEndpoint(index *registrytypes.IndexInfo, metaHeaders http.Header, v APIVersion) (*Endpoint, error) { tlsConfig, err := newTLSConfig(index.Name, index.Secure) if err != nil { return nil, err } endpoint, err := newEndpoint(GetAuthConfigKey(index), tlsConfig, metaHeaders) if err != nil { return nil, err } if v != APIVersionUnknown { endpoint.Version = v } if err := validateEndpoint(endpoint); err != nil { return nil, err } return endpoint, nil } func validateEndpoint(endpoint *Endpoint) error { logrus.Debugf("pinging registry endpoint %s", endpoint) // Try HTTPS ping to registry endpoint.URL.Scheme = "https" if _, err := endpoint.Ping(); err != nil { if endpoint.IsSecure { // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. return fmt.Errorf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) } // If registry is insecure and HTTPS failed, fallback to HTTP. logrus.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err) endpoint.URL.Scheme = "http" var err2 error if _, err2 = endpoint.Ping(); err2 == nil { return nil } return fmt.Errorf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) } return nil } func newEndpoint(address string, tlsConfig *tls.Config, metaHeaders http.Header) (*Endpoint, error) { var ( endpoint = new(Endpoint) trimmedAddress string err error ) if !strings.HasPrefix(address, "http") { address = "https://" + address } endpoint.IsSecure = (tlsConfig == nil || !tlsConfig.InsecureSkipVerify) trimmedAddress, endpoint.Version = scanForAPIVersion(address) if endpoint.URL, err = url.Parse(trimmedAddress); err != nil { return nil, err } // TODO(tiborvass): make sure a ConnectTimeout transport is used tr := NewTransport(tlsConfig) endpoint.client = HTTPClient(transport.NewTransport(tr, DockerHeaders(metaHeaders)...)) return endpoint, nil } // Endpoint stores basic information about a registry endpoint. type Endpoint struct { client *http.Client URL *url.URL Version APIVersion IsSecure bool AuthChallenges []*AuthorizationChallenge URLBuilder *v2.URLBuilder } // Get the formatted URL for the root of this registry Endpoint func (e *Endpoint) String() string { return fmt.Sprintf("%s/v%d/", e.URL, e.Version) } // VersionString returns a formatted string of this // endpoint address using the given API Version. func (e *Endpoint) VersionString(version APIVersion) string { return fmt.Sprintf("%s/v%d/", e.URL, version) } // Path returns a formatted string for the URL // of this endpoint with the given path appended. func (e *Endpoint) Path(path string) string { return fmt.Sprintf("%s/v%d/%s", e.URL, e.Version, path) } // Ping pings the remote endpoint with v2 and v1 pings to determine the API // version. It returns a PingResult containing the discovered version. The // PingResult also indicates whether the registry is standalone or not. func (e *Endpoint) Ping() (PingResult, error) { // The ping logic to use is determined by the registry endpoint version. switch e.Version { case APIVersion1: return e.pingV1() case APIVersion2: return e.pingV2() } // APIVersionUnknown // We should try v2 first... e.Version = APIVersion2 regInfo, errV2 := e.pingV2() if errV2 == nil { return regInfo, nil } // ... then fallback to v1. e.Version = APIVersion1 regInfo, errV1 := e.pingV1() if errV1 == nil { return regInfo, nil } e.Version = APIVersionUnknown return PingResult{}, fmt.Errorf("unable to ping registry endpoint %s\nv2 ping attempt failed with error: %s\n v1 ping attempt failed with error: %s", e, errV2, errV1) } func (e *Endpoint) pingV1() (PingResult, error) { logrus.Debugf("attempting v1 ping for registry endpoint %s", e) if e.String() == IndexServer { // Skip the check, we know this one is valid // (and we never want to fallback to http in case of error) return PingResult{Standalone: false}, nil } req, err := http.NewRequest("GET", e.Path("_ping"), nil) if err != nil { return PingResult{Standalone: false}, err } resp, err := e.client.Do(req) if err != nil { return PingResult{Standalone: false}, err } defer resp.Body.Close() jsonString, err := ioutil.ReadAll(resp.Body) if err != nil { return PingResult{Standalone: false}, fmt.Errorf("error while reading the http response: %s", err) } // If the header is absent, we assume true for compatibility with earlier // versions of the registry. default to true info := PingResult{ Standalone: true, } if err := json.Unmarshal(jsonString, &info); err != nil { logrus.Debugf("Error unmarshalling the _ping PingResult: %s", err) // don't stop here. Just assume sane defaults } if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { logrus.Debugf("Registry version header: '%s'", hdr) info.Version = hdr } logrus.Debugf("PingResult.Version: %q", info.Version) standalone := resp.Header.Get("X-Docker-Registry-Standalone") logrus.Debugf("Registry standalone header: '%s'", standalone) // Accepted values are "true" (case-insensitive) and "1". if strings.EqualFold(standalone, "true") || standalone == "1" { info.Standalone = true } else if len(standalone) > 0 { // there is a header set, and it is not "true" or "1", so assume fails info.Standalone = false } logrus.Debugf("PingResult.Standalone: %t", info.Standalone) return info, nil } func (e *Endpoint) pingV2() (PingResult, error) { logrus.Debugf("attempting v2 ping for registry endpoint %s", e) req, err := http.NewRequest("GET", e.Path(""), nil) if err != nil { return PingResult{}, err } resp, err := e.client.Do(req) if err != nil { return PingResult{}, err } defer resp.Body.Close() // The endpoint may have multiple supported versions. // Ensure it supports the v2 Registry API. var supportsV2 bool HeaderLoop: for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey("Docker-Distribution-API-Version")] { for _, versionName := range strings.Fields(supportedVersions) { if versionName == "registry/2.0" { supportsV2 = true break HeaderLoop } } } if !supportsV2 { return PingResult{}, fmt.Errorf("%s does not appear to be a v2 registry endpoint", e) } if resp.StatusCode == http.StatusOK { // It would seem that no authentication/authorization is required. // So we don't need to parse/add any authorization schemes. return PingResult{Standalone: true}, nil } if resp.StatusCode == http.StatusUnauthorized { // Parse the WWW-Authenticate Header and store the challenges // on this endpoint object. e.AuthChallenges = parseAuthHeader(resp.Header) return PingResult{}, nil } return PingResult{}, fmt.Errorf("v2 registry endpoint returned status %d: %q", resp.StatusCode, http.StatusText(resp.StatusCode)) } docker-1.10.3/registry/endpoint_test.go000066400000000000000000000051201267010174400201210ustar00rootroot00000000000000package registry import ( "net/http" "net/http/httptest" "net/url" "testing" ) func TestEndpointParse(t *testing.T) { testData := []struct { str string expected string }{ {IndexServer, IndexServer}, {"http://0.0.0.0:5000/v1/", "http://0.0.0.0:5000/v1/"}, {"http://0.0.0.0:5000/v2/", "http://0.0.0.0:5000/v2/"}, {"http://0.0.0.0:5000", "http://0.0.0.0:5000/v0/"}, {"0.0.0.0:5000", "https://0.0.0.0:5000/v0/"}, } for _, td := range testData { e, err := newEndpoint(td.str, nil, nil) if err != nil { t.Errorf("%q: %s", td.str, err) } if e == nil { t.Logf("something's fishy, endpoint for %q is nil", td.str) continue } if e.String() != td.expected { t.Errorf("expected %q, got %q", td.expected, e.String()) } } } // Ensure that a registry endpoint that responds with a 401 only is determined // to be a v1 registry unless it includes a valid v2 API header. func TestValidateEndpointAmbiguousAPIVersion(t *testing.T) { requireBasicAuthHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Add("WWW-Authenticate", `Basic realm="localhost"`) w.WriteHeader(http.StatusUnauthorized) }) requireBasicAuthHandlerV2 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // This mock server supports v2.0, v2.1, v42.0, and v100.0 w.Header().Add("Docker-Distribution-API-Version", "registry/100.0 registry/42.0") w.Header().Add("Docker-Distribution-API-Version", "registry/2.0 registry/2.1") requireBasicAuthHandler.ServeHTTP(w, r) }) // Make a test server which should validate as a v1 server. testServer := httptest.NewServer(requireBasicAuthHandler) defer testServer.Close() testServerURL, err := url.Parse(testServer.URL) if err != nil { t.Fatal(err) } testEndpoint := Endpoint{ URL: testServerURL, Version: APIVersionUnknown, client: HTTPClient(NewTransport(nil)), } if err = validateEndpoint(&testEndpoint); err != nil { t.Fatal(err) } if testEndpoint.Version != APIVersion1 { t.Fatalf("expected endpoint to validate to %d, got %d", APIVersion1, testEndpoint.Version) } // Make a test server which should validate as a v2 server. testServer = httptest.NewServer(requireBasicAuthHandlerV2) defer testServer.Close() testServerURL, err = url.Parse(testServer.URL) if err != nil { t.Fatal(err) } testEndpoint.URL = testServerURL testEndpoint.Version = APIVersionUnknown if err = validateEndpoint(&testEndpoint); err != nil { t.Fatal(err) } if testEndpoint.Version != APIVersion2 { t.Fatalf("expected endpoint to validate to %d, got %d", APIVersion2, testEndpoint.Version) } } docker-1.10.3/registry/reference.go000066400000000000000000000026531267010174400172100ustar00rootroot00000000000000package registry import ( "strings" "github.com/docker/distribution/digest" ) // Reference represents a tag or digest within a repository type Reference interface { // HasDigest returns whether the reference has a verifiable // content addressable reference which may be considered secure. HasDigest() bool // ImageName returns an image name for the given repository ImageName(string) string // Returns a string representation of the reference String() string } type tagReference struct { tag string } func (tr tagReference) HasDigest() bool { return false } func (tr tagReference) ImageName(repo string) string { return repo + ":" + tr.tag } func (tr tagReference) String() string { return tr.tag } type digestReference struct { digest digest.Digest } func (dr digestReference) HasDigest() bool { return true } func (dr digestReference) ImageName(repo string) string { return repo + "@" + dr.String() } func (dr digestReference) String() string { return dr.digest.String() } // ParseReference parses a reference into either a digest or tag reference func ParseReference(ref string) Reference { if strings.Contains(ref, ":") { dgst, err := digest.ParseDigest(ref) if err == nil { return digestReference{digest: dgst} } } return tagReference{tag: ref} } // DigestReference creates a digest reference using a digest func DigestReference(dgst digest.Digest) Reference { return digestReference{digest: dgst} } docker-1.10.3/registry/registry.go000066400000000000000000000167711267010174400171300ustar00rootroot00000000000000// Package registry contains client primitives to interact with a remote Docker registry. package registry import ( "crypto/tls" "crypto/x509" "errors" "fmt" "io/ioutil" "net" "net/http" "os" "path/filepath" "runtime" "strings" "syscall" "time" "github.com/Sirupsen/logrus" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/client/transport" "github.com/docker/docker/dockerversion" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/useragent" "github.com/docker/go-connections/tlsconfig" ) var ( // ErrAlreadyExists is an error returned if an image being pushed // already exists on the remote side ErrAlreadyExists = errors.New("Image already exists") ) // dockerUserAgent is the User-Agent the Docker client uses to identify itself. // It is populated on init(), comprising version information of different components. var dockerUserAgent string func init() { httpVersion := make([]useragent.VersionInfo, 0, 6) httpVersion = append(httpVersion, useragent.VersionInfo{Name: "docker", Version: dockerversion.Version}) httpVersion = append(httpVersion, useragent.VersionInfo{Name: "go", Version: runtime.Version()}) httpVersion = append(httpVersion, useragent.VersionInfo{Name: "git-commit", Version: dockerversion.GitCommit}) if kernelVersion, err := kernel.GetKernelVersion(); err == nil { httpVersion = append(httpVersion, useragent.VersionInfo{Name: "kernel", Version: kernelVersion.String()}) } httpVersion = append(httpVersion, useragent.VersionInfo{Name: "os", Version: runtime.GOOS}) httpVersion = append(httpVersion, useragent.VersionInfo{Name: "arch", Version: runtime.GOARCH}) dockerUserAgent = useragent.AppendVersions("", httpVersion...) if runtime.GOOS != "linux" { V2Only = true } } func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) { // PreferredServerCipherSuites should have no effect tlsConfig := tlsconfig.ServerDefault tlsConfig.InsecureSkipVerify = !isSecure if isSecure && CertsDir != "" { hostDir := filepath.Join(CertsDir, cleanPath(hostname)) logrus.Debugf("hostDir: %s", hostDir) if err := ReadCertsDirectory(&tlsConfig, hostDir); err != nil { return nil, err } } return &tlsConfig, nil } func hasFile(files []os.FileInfo, name string) bool { for _, f := range files { if f.Name() == name { return true } } return false } // ReadCertsDirectory reads the directory for TLS certificates // including roots and certificate pairs and updates the // provided TLS configuration. func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { fs, err := ioutil.ReadDir(directory) if err != nil && !os.IsNotExist(err) { return err } for _, f := range fs { if strings.HasSuffix(f.Name(), ".crt") { if tlsConfig.RootCAs == nil { // TODO(dmcgowan): Copy system pool tlsConfig.RootCAs = x509.NewCertPool() } logrus.Debugf("crt: %s", filepath.Join(directory, f.Name())) data, err := ioutil.ReadFile(filepath.Join(directory, f.Name())) if err != nil { return err } tlsConfig.RootCAs.AppendCertsFromPEM(data) } if strings.HasSuffix(f.Name(), ".cert") { certName := f.Name() keyName := certName[:len(certName)-5] + ".key" logrus.Debugf("cert: %s", filepath.Join(directory, f.Name())) if !hasFile(fs, keyName) { return fmt.Errorf("Missing key %s for client certificate %s. Note that CA certificates should use the extension .crt.", keyName, certName) } cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName)) if err != nil { return err } tlsConfig.Certificates = append(tlsConfig.Certificates, cert) } if strings.HasSuffix(f.Name(), ".key") { keyName := f.Name() certName := keyName[:len(keyName)-4] + ".cert" logrus.Debugf("key: %s", filepath.Join(directory, f.Name())) if !hasFile(fs, certName) { return fmt.Errorf("Missing client certificate %s for key %s", certName, keyName) } } } return nil } // DockerHeaders returns request modifiers that ensure requests have // the User-Agent header set to dockerUserAgent and that metaHeaders // are added. func DockerHeaders(metaHeaders http.Header) []transport.RequestModifier { modifiers := []transport.RequestModifier{ transport.NewHeaderRequestModifier(http.Header{"User-Agent": []string{dockerUserAgent}}), } if metaHeaders != nil { modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders)) } return modifiers } // HTTPClient returns a HTTP client structure which uses the given transport // and contains the necessary headers for redirected requests func HTTPClient(transport http.RoundTripper) *http.Client { return &http.Client{ Transport: transport, CheckRedirect: addRequiredHeadersToRedirectedRequests, } } func trustedLocation(req *http.Request) bool { var ( trusteds = []string{"docker.com", "docker.io"} hostname = strings.SplitN(req.Host, ":", 2)[0] ) if req.URL.Scheme != "https" { return false } for _, trusted := range trusteds { if hostname == trusted || strings.HasSuffix(hostname, "."+trusted) { return true } } return false } // addRequiredHeadersToRedirectedRequests adds the necessary redirection headers // for redirected requests func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { if via != nil && via[0] != nil { if trustedLocation(req) && trustedLocation(via[0]) { req.Header = via[0].Header return nil } for k, v := range via[0].Header { if k != "Authorization" { for _, vv := range v { req.Header.Add(k, vv) } } } } return nil } // ShouldV2Fallback returns true if this error is a reason to fall back to v1. func ShouldV2Fallback(err errcode.Error) bool { switch err.Code { case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown: return true } return false } // ErrNoSupport is an error type used for errors indicating that an operation // is not supported. It encapsulates a more specific error. type ErrNoSupport struct{ Err error } func (e ErrNoSupport) Error() string { if e.Err == nil { return "not supported" } return e.Err.Error() } // ContinueOnError returns true if we should fallback to the next endpoint // as a result of this error. func ContinueOnError(err error) bool { switch v := err.(type) { case errcode.Errors: if len(v) == 0 { return true } return ContinueOnError(v[0]) case ErrNoSupport: return ContinueOnError(v.Err) case errcode.Error: return ShouldV2Fallback(v) case *client.UnexpectedHTTPResponseError: return true case error: return !strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) } // let's be nice and fallback if the error is a completely // unexpected one. // If new errors have to be handled in some way, please // add them to the switch above. return true } // NewTransport returns a new HTTP transport. If tlsConfig is nil, it uses the // default TLS configuration. func NewTransport(tlsConfig *tls.Config) *http.Transport { if tlsConfig == nil { var cfg = tlsconfig.ServerDefault tlsConfig = &cfg } return &http.Transport{ Proxy: http.ProxyFromEnvironment, Dial: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, DualStack: true, }).Dial, TLSHandshakeTimeout: 10 * time.Second, TLSClientConfig: tlsConfig, // TODO(dmcgowan): Call close idle connections when complete and use keep alive DisableKeepAlives: true, } } docker-1.10.3/registry/registry_mock_test.go000066400000000000000000000351431267010174400211720ustar00rootroot00000000000000package registry import ( "encoding/json" "errors" "fmt" "io" "io/ioutil" "net" "net/http" "net/http/httptest" "net/url" "strconv" "strings" "testing" "time" "github.com/docker/docker/opts" "github.com/docker/docker/reference" registrytypes "github.com/docker/engine-api/types/registry" "github.com/gorilla/mux" "github.com/Sirupsen/logrus" ) var ( testHTTPServer *httptest.Server testHTTPSServer *httptest.Server testLayers = map[string]map[string]string{ "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20": { "json": `{"id":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", "comment":"test base image","created":"2013-03-23T12:53:11.10432-07:00", "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, "Tty":false,"OpenStdin":false,"StdinOnce":false, "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, "VolumesFrom":"","Entrypoint":null},"Size":424242}`, "checksum_simple": "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", "checksum_tarsum": "tarsum+sha256:4409a0685741ca86d38df878ed6f8cbba4c99de5dc73cd71aef04be3bb70be7c", "ancestry": `["77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, "layer": string([]byte{ 0x1f, 0x8b, 0x08, 0x08, 0x0e, 0xb0, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd2, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0xed, 0x38, 0x4e, 0xce, 0x13, 0x44, 0x2b, 0x66, 0x62, 0x24, 0x8e, 0x4f, 0xa0, 0x15, 0x63, 0xb6, 0x20, 0x21, 0xfc, 0x96, 0xbf, 0x78, 0xb0, 0xf5, 0x1d, 0x16, 0x98, 0x8e, 0x88, 0x8a, 0x2a, 0xbe, 0x33, 0xef, 0x49, 0x31, 0xed, 0x79, 0x40, 0x8e, 0x5c, 0x44, 0x85, 0x88, 0x33, 0x12, 0x73, 0x2c, 0x02, 0xa8, 0xf0, 0x05, 0xf7, 0x66, 0xf5, 0xd6, 0x57, 0x69, 0xd7, 0x7a, 0x19, 0xcd, 0xf5, 0xb1, 0x6d, 0x1b, 0x1f, 0xf9, 0xba, 0xe3, 0x93, 0x3f, 0x22, 0x2c, 0xb6, 0x36, 0x0b, 0xf6, 0xb0, 0xa9, 0xfd, 0xe7, 0x94, 0x46, 0xfd, 0xeb, 0xd1, 0x7f, 0x2c, 0xc4, 0xd2, 0xfb, 0x97, 0xfe, 0x02, 0x80, 0xe4, 0xfd, 0x4f, 0x77, 0xae, 0x6d, 0x3d, 0x81, 0x73, 0xce, 0xb9, 0x7f, 0xf3, 0x04, 0x41, 0xc1, 0xab, 0xc6, 0x00, 0x0a, 0x00, 0x00, }), }, "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d": { "json": `{"id":"42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", "parent":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", "comment":"test base image","created":"2013-03-23T12:55:11.10432-07:00", "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, "Tty":false,"OpenStdin":false,"StdinOnce":false, "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, "VolumesFrom":"","Entrypoint":null},"Size":424242}`, "checksum_simple": "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", "checksum_tarsum": "tarsum+sha256:68fdb56fb364f074eec2c9b3f85ca175329c4dcabc4a6a452b7272aa613a07a2", "ancestry": `["42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, "layer": string([]byte{ 0x1f, 0x8b, 0x08, 0x08, 0xbd, 0xb3, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd1, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0x9d, 0x38, 0x8e, 0xcf, 0x53, 0x51, 0xaa, 0x56, 0xea, 0x44, 0x82, 0xc4, 0xf1, 0x09, 0xb4, 0xea, 0x98, 0x2d, 0x48, 0x08, 0xbf, 0xe5, 0x2f, 0x1e, 0xfc, 0xf5, 0xdd, 0x00, 0xdd, 0x11, 0x91, 0x8a, 0xe0, 0x27, 0xd3, 0x9e, 0x14, 0xe2, 0x9e, 0x07, 0xf4, 0xc1, 0x2b, 0x0b, 0xfb, 0xa4, 0x82, 0xe4, 0x3d, 0x93, 0x02, 0x0a, 0x7c, 0xc1, 0x23, 0x97, 0xf1, 0x5e, 0x5f, 0xc9, 0xcb, 0x38, 0xb5, 0xee, 0xea, 0xd9, 0x3c, 0xb7, 0x4b, 0xbe, 0x7b, 0x9c, 0xf9, 0x23, 0xdc, 0x50, 0x6e, 0xb9, 0xb8, 0xf2, 0x2c, 0x5d, 0xf7, 0x4f, 0x31, 0xb6, 0xf6, 0x4f, 0xc7, 0xfe, 0x41, 0x55, 0x63, 0xdd, 0x9f, 0x89, 0x09, 0x90, 0x6c, 0xff, 0xee, 0xae, 0xcb, 0xba, 0x4d, 0x17, 0x30, 0xc6, 0x18, 0xf3, 0x67, 0x5e, 0xc1, 0xed, 0x21, 0x5d, 0x00, 0x0a, 0x00, 0x00, }), }, } testRepositories = map[string]map[string]string{ "foo42/bar": { "latest": "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", "test": "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", }, } mockHosts = map[string][]net.IP{ "": {net.ParseIP("0.0.0.0")}, "localhost": {net.ParseIP("127.0.0.1"), net.ParseIP("::1")}, "example.com": {net.ParseIP("42.42.42.42")}, "other.com": {net.ParseIP("43.43.43.43")}, } ) func init() { r := mux.NewRouter() // /v1/ r.HandleFunc("/v1/_ping", handlerGetPing).Methods("GET") r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|ancestry}", handlerGetImage).Methods("GET") r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|checksum}", handlerPutImage).Methods("PUT") r.HandleFunc("/v1/repositories/{repository:.+}/tags", handlerGetDeleteTags).Methods("GET", "DELETE") r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerGetTag).Methods("GET") r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerPutTag).Methods("PUT") r.HandleFunc("/v1/users{null:.*}", handlerUsers).Methods("GET", "POST", "PUT") r.HandleFunc("/v1/repositories/{repository:.+}{action:/images|/}", handlerImages).Methods("GET", "PUT", "DELETE") r.HandleFunc("/v1/repositories/{repository:.+}/auth", handlerAuth).Methods("PUT") r.HandleFunc("/v1/search", handlerSearch).Methods("GET") // /v2/ r.HandleFunc("/v2/version", handlerGetPing).Methods("GET") testHTTPServer = httptest.NewServer(handlerAccessLog(r)) testHTTPSServer = httptest.NewTLSServer(handlerAccessLog(r)) // override net.LookupIP lookupIP = func(host string) ([]net.IP, error) { if host == "127.0.0.1" { // I believe in future Go versions this will fail, so let's fix it later return net.LookupIP(host) } for h, addrs := range mockHosts { if host == h { return addrs, nil } for _, addr := range addrs { if addr.String() == host { return []net.IP{addr}, nil } } } return nil, errors.New("lookup: no such host") } } func handlerAccessLog(handler http.Handler) http.Handler { logHandler := func(w http.ResponseWriter, r *http.Request) { logrus.Debugf("%s \"%s %s\"", r.RemoteAddr, r.Method, r.URL) handler.ServeHTTP(w, r) } return http.HandlerFunc(logHandler) } func makeURL(req string) string { return testHTTPServer.URL + req } func makeHTTPSURL(req string) string { return testHTTPSServer.URL + req } func makeIndex(req string) *registrytypes.IndexInfo { index := ®istrytypes.IndexInfo{ Name: makeURL(req), } return index } func makeHTTPSIndex(req string) *registrytypes.IndexInfo { index := ®istrytypes.IndexInfo{ Name: makeHTTPSURL(req), } return index } func makePublicIndex() *registrytypes.IndexInfo { index := ®istrytypes.IndexInfo{ Name: IndexServer, Secure: true, Official: true, } return index } func makeServiceConfig(mirrors []string, insecureRegistries []string) *registrytypes.ServiceConfig { options := &Options{ Mirrors: opts.NewListOpts(nil), InsecureRegistries: opts.NewListOpts(nil), } if mirrors != nil { for _, mirror := range mirrors { options.Mirrors.Set(mirror) } } if insecureRegistries != nil { for _, insecureRegistries := range insecureRegistries { options.InsecureRegistries.Set(insecureRegistries) } } return NewServiceConfig(options) } func writeHeaders(w http.ResponseWriter) { h := w.Header() h.Add("Server", "docker-tests/mock") h.Add("Expires", "-1") h.Add("Content-Type", "application/json") h.Add("Pragma", "no-cache") h.Add("Cache-Control", "no-cache") h.Add("X-Docker-Registry-Version", "0.0.0") h.Add("X-Docker-Registry-Config", "mock") } func writeResponse(w http.ResponseWriter, message interface{}, code int) { writeHeaders(w) w.WriteHeader(code) body, err := json.Marshal(message) if err != nil { io.WriteString(w, err.Error()) return } w.Write(body) } func readJSON(r *http.Request, dest interface{}) error { body, err := ioutil.ReadAll(r.Body) if err != nil { return err } return json.Unmarshal(body, dest) } func apiError(w http.ResponseWriter, message string, code int) { body := map[string]string{ "error": message, } writeResponse(w, body, code) } func assertEqual(t *testing.T, a interface{}, b interface{}, message string) { if a == b { return } if len(message) == 0 { message = fmt.Sprintf("%v != %v", a, b) } t.Fatal(message) } func assertNotEqual(t *testing.T, a interface{}, b interface{}, message string) { if a != b { return } if len(message) == 0 { message = fmt.Sprintf("%v == %v", a, b) } t.Fatal(message) } // Similar to assertEqual, but does not stop test func checkEqual(t *testing.T, a interface{}, b interface{}, messagePrefix string) { if a == b { return } message := fmt.Sprintf("%v != %v", a, b) if len(messagePrefix) != 0 { message = messagePrefix + ": " + message } t.Error(message) } // Similar to assertNotEqual, but does not stop test func checkNotEqual(t *testing.T, a interface{}, b interface{}, messagePrefix string) { if a != b { return } message := fmt.Sprintf("%v == %v", a, b) if len(messagePrefix) != 0 { message = messagePrefix + ": " + message } t.Error(message) } func requiresAuth(w http.ResponseWriter, r *http.Request) bool { writeCookie := func() { value := fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano()) cookie := &http.Cookie{Name: "session", Value: value, MaxAge: 3600} http.SetCookie(w, cookie) //FIXME(sam): this should be sent only on Index routes value = fmt.Sprintf("FAKE-TOKEN-%d", time.Now().UnixNano()) w.Header().Add("X-Docker-Token", value) } if len(r.Cookies()) > 0 { writeCookie() return true } if len(r.Header.Get("Authorization")) > 0 { writeCookie() return true } w.Header().Add("WWW-Authenticate", "token") apiError(w, "Wrong auth", 401) return false } func handlerGetPing(w http.ResponseWriter, r *http.Request) { writeResponse(w, true, 200) } func handlerGetImage(w http.ResponseWriter, r *http.Request) { if !requiresAuth(w, r) { return } vars := mux.Vars(r) layer, exists := testLayers[vars["image_id"]] if !exists { http.NotFound(w, r) return } writeHeaders(w) layerSize := len(layer["layer"]) w.Header().Add("X-Docker-Size", strconv.Itoa(layerSize)) io.WriteString(w, layer[vars["action"]]) } func handlerPutImage(w http.ResponseWriter, r *http.Request) { if !requiresAuth(w, r) { return } vars := mux.Vars(r) imageID := vars["image_id"] action := vars["action"] layer, exists := testLayers[imageID] if !exists { if action != "json" { http.NotFound(w, r) return } layer = make(map[string]string) testLayers[imageID] = layer } if checksum := r.Header.Get("X-Docker-Checksum"); checksum != "" { if checksum != layer["checksum_simple"] && checksum != layer["checksum_tarsum"] { apiError(w, "Wrong checksum", 400) return } } body, err := ioutil.ReadAll(r.Body) if err != nil { apiError(w, fmt.Sprintf("Error: %s", err), 500) return } layer[action] = string(body) writeResponse(w, true, 200) } func handlerGetDeleteTags(w http.ResponseWriter, r *http.Request) { if !requiresAuth(w, r) { return } repositoryName, err := reference.WithName(mux.Vars(r)["repository"]) if err != nil { apiError(w, "Could not parse repository", 400) return } tags, exists := testRepositories[repositoryName.String()] if !exists { apiError(w, "Repository not found", 404) return } if r.Method == "DELETE" { delete(testRepositories, repositoryName.String()) writeResponse(w, true, 200) return } writeResponse(w, tags, 200) } func handlerGetTag(w http.ResponseWriter, r *http.Request) { if !requiresAuth(w, r) { return } vars := mux.Vars(r) repositoryName, err := reference.WithName(vars["repository"]) if err != nil { apiError(w, "Could not parse repository", 400) return } tagName := vars["tag"] tags, exists := testRepositories[repositoryName.String()] if !exists { apiError(w, "Repository not found", 404) return } tag, exists := tags[tagName] if !exists { apiError(w, "Tag not found", 404) return } writeResponse(w, tag, 200) } func handlerPutTag(w http.ResponseWriter, r *http.Request) { if !requiresAuth(w, r) { return } vars := mux.Vars(r) repositoryName, err := reference.WithName(vars["repository"]) if err != nil { apiError(w, "Could not parse repository", 400) return } tagName := vars["tag"] tags, exists := testRepositories[repositoryName.String()] if !exists { tags = make(map[string]string) testRepositories[repositoryName.String()] = tags } tagValue := "" readJSON(r, tagValue) tags[tagName] = tagValue writeResponse(w, true, 200) } func handlerUsers(w http.ResponseWriter, r *http.Request) { code := 200 if r.Method == "POST" { code = 201 } else if r.Method == "PUT" { code = 204 } writeResponse(w, "", code) } func handlerImages(w http.ResponseWriter, r *http.Request) { u, _ := url.Parse(testHTTPServer.URL) w.Header().Add("X-Docker-Endpoints", fmt.Sprintf("%s , %s ", u.Host, "test.example.com")) w.Header().Add("X-Docker-Token", fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano())) if r.Method == "PUT" { if strings.HasSuffix(r.URL.Path, "images") { writeResponse(w, "", 204) return } writeResponse(w, "", 200) return } if r.Method == "DELETE" { writeResponse(w, "", 204) return } images := []map[string]string{} for imageID, layer := range testLayers { image := make(map[string]string) image["id"] = imageID image["checksum"] = layer["checksum_tarsum"] image["Tag"] = "latest" images = append(images, image) } writeResponse(w, images, 200) } func handlerAuth(w http.ResponseWriter, r *http.Request) { writeResponse(w, "OK", 200) } func handlerSearch(w http.ResponseWriter, r *http.Request) { result := ®istrytypes.SearchResults{ Query: "fakequery", NumResults: 1, Results: []registrytypes.SearchResult{{Name: "fakeimage", StarCount: 42}}, } writeResponse(w, result, 200) } func TestPing(t *testing.T) { res, err := http.Get(makeURL("/v1/_ping")) if err != nil { t.Fatal(err) } assertEqual(t, res.StatusCode, 200, "") assertEqual(t, res.Header.Get("X-Docker-Registry-Config"), "mock", "This is not a Mocked Registry") } /* Uncomment this to test Mocked Registry locally with curl * WARNING: Don't push on the repos uncommented, it'll block the tests * func TestWait(t *testing.T) { logrus.Println("Test HTTP server ready and waiting:", testHTTPServer.URL) c := make(chan int) <-c } //*/ docker-1.10.3/registry/registry_test.go000066400000000000000000000634651267010174400201710ustar00rootroot00000000000000package registry import ( "fmt" "net/http" "net/http/httputil" "net/url" "strings" "testing" "github.com/docker/distribution/registry/client/transport" "github.com/docker/docker/reference" "github.com/docker/engine-api/types" registrytypes "github.com/docker/engine-api/types/registry" ) var ( token = []string{"fake-token"} ) const ( imageID = "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d" REPO = "foo42/bar" ) func spawnTestRegistrySession(t *testing.T) *Session { authConfig := &types.AuthConfig{} endpoint, err := NewEndpoint(makeIndex("/v1/"), nil, APIVersionUnknown) if err != nil { t.Fatal(err) } var tr http.RoundTripper = debugTransport{NewTransport(nil), t.Log} tr = transport.NewTransport(AuthTransport(tr, authConfig, false), DockerHeaders(nil)...) client := HTTPClient(tr) r, err := NewSession(client, authConfig, endpoint) if err != nil { t.Fatal(err) } // In a normal scenario for the v1 registry, the client should send a `X-Docker-Token: true` // header while authenticating, in order to retrieve a token that can be later used to // perform authenticated actions. // // The mock v1 registry does not support that, (TODO(tiborvass): support it), instead, // it will consider authenticated any request with the header `X-Docker-Token: fake-token`. // // Because we know that the client's transport is an `*authTransport` we simply cast it, // in order to set the internal cached token to the fake token, and thus send that fake token // upon every subsequent requests. r.client.Transport.(*authTransport).token = token return r } func TestPingRegistryEndpoint(t *testing.T) { testPing := func(index *registrytypes.IndexInfo, expectedStandalone bool, assertMessage string) { ep, err := NewEndpoint(index, nil, APIVersionUnknown) if err != nil { t.Fatal(err) } regInfo, err := ep.Ping() if err != nil { t.Fatal(err) } assertEqual(t, regInfo.Standalone, expectedStandalone, assertMessage) } testPing(makeIndex("/v1/"), true, "Expected standalone to be true (default)") testPing(makeHTTPSIndex("/v1/"), true, "Expected standalone to be true (default)") testPing(makePublicIndex(), false, "Expected standalone to be false for public index") } func TestEndpoint(t *testing.T) { // Simple wrapper to fail test if err != nil expandEndpoint := func(index *registrytypes.IndexInfo) *Endpoint { endpoint, err := NewEndpoint(index, nil, APIVersionUnknown) if err != nil { t.Fatal(err) } return endpoint } assertInsecureIndex := func(index *registrytypes.IndexInfo) { index.Secure = true _, err := NewEndpoint(index, nil, APIVersionUnknown) assertNotEqual(t, err, nil, index.Name+": Expected error for insecure index") assertEqual(t, strings.Contains(err.Error(), "insecure-registry"), true, index.Name+": Expected insecure-registry error for insecure index") index.Secure = false } assertSecureIndex := func(index *registrytypes.IndexInfo) { index.Secure = true _, err := NewEndpoint(index, nil, APIVersionUnknown) assertNotEqual(t, err, nil, index.Name+": Expected cert error for secure index") assertEqual(t, strings.Contains(err.Error(), "certificate signed by unknown authority"), true, index.Name+": Expected cert error for secure index") index.Secure = false } index := ®istrytypes.IndexInfo{} index.Name = makeURL("/v1/") endpoint := expandEndpoint(index) assertEqual(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) if endpoint.Version != APIVersion1 { t.Fatal("Expected endpoint to be v1") } assertInsecureIndex(index) index.Name = makeURL("") endpoint = expandEndpoint(index) assertEqual(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") if endpoint.Version != APIVersion1 { t.Fatal("Expected endpoint to be v1") } assertInsecureIndex(index) httpURL := makeURL("") index.Name = strings.SplitN(httpURL, "://", 2)[1] endpoint = expandEndpoint(index) assertEqual(t, endpoint.String(), httpURL+"/v1/", index.Name+": Expected endpoint to be "+httpURL+"/v1/") if endpoint.Version != APIVersion1 { t.Fatal("Expected endpoint to be v1") } assertInsecureIndex(index) index.Name = makeHTTPSURL("/v1/") endpoint = expandEndpoint(index) assertEqual(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) if endpoint.Version != APIVersion1 { t.Fatal("Expected endpoint to be v1") } assertSecureIndex(index) index.Name = makeHTTPSURL("") endpoint = expandEndpoint(index) assertEqual(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") if endpoint.Version != APIVersion1 { t.Fatal("Expected endpoint to be v1") } assertSecureIndex(index) httpsURL := makeHTTPSURL("") index.Name = strings.SplitN(httpsURL, "://", 2)[1] endpoint = expandEndpoint(index) assertEqual(t, endpoint.String(), httpsURL+"/v1/", index.Name+": Expected endpoint to be "+httpsURL+"/v1/") if endpoint.Version != APIVersion1 { t.Fatal("Expected endpoint to be v1") } assertSecureIndex(index) badEndpoints := []string{ "http://127.0.0.1/v1/", "https://127.0.0.1/v1/", "http://127.0.0.1", "https://127.0.0.1", "127.0.0.1", } for _, address := range badEndpoints { index.Name = address _, err := NewEndpoint(index, nil, APIVersionUnknown) checkNotEqual(t, err, nil, "Expected error while expanding bad endpoint") } } func TestGetRemoteHistory(t *testing.T) { r := spawnTestRegistrySession(t) hist, err := r.GetRemoteHistory(imageID, makeURL("/v1/")) if err != nil { t.Fatal(err) } assertEqual(t, len(hist), 2, "Expected 2 images in history") assertEqual(t, hist[0], imageID, "Expected "+imageID+"as first ancestry") assertEqual(t, hist[1], "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", "Unexpected second ancestry") } func TestLookupRemoteImage(t *testing.T) { r := spawnTestRegistrySession(t) err := r.LookupRemoteImage(imageID, makeURL("/v1/")) assertEqual(t, err, nil, "Expected error of remote lookup to nil") if err := r.LookupRemoteImage("abcdef", makeURL("/v1/")); err == nil { t.Fatal("Expected error of remote lookup to not nil") } } func TestGetRemoteImageJSON(t *testing.T) { r := spawnTestRegistrySession(t) json, size, err := r.GetRemoteImageJSON(imageID, makeURL("/v1/")) if err != nil { t.Fatal(err) } assertEqual(t, size, int64(154), "Expected size 154") if len(json) <= 0 { t.Fatal("Expected non-empty json") } _, _, err = r.GetRemoteImageJSON("abcdef", makeURL("/v1/")) if err == nil { t.Fatal("Expected image not found error") } } func TestGetRemoteImageLayer(t *testing.T) { r := spawnTestRegistrySession(t) data, err := r.GetRemoteImageLayer(imageID, makeURL("/v1/"), 0) if err != nil { t.Fatal(err) } if data == nil { t.Fatal("Expected non-nil data result") } _, err = r.GetRemoteImageLayer("abcdef", makeURL("/v1/"), 0) if err == nil { t.Fatal("Expected image not found error") } } func TestGetRemoteTag(t *testing.T) { r := spawnTestRegistrySession(t) repoRef, err := reference.ParseNamed(REPO) if err != nil { t.Fatal(err) } tag, err := r.GetRemoteTag([]string{makeURL("/v1/")}, repoRef, "test") if err != nil { t.Fatal(err) } assertEqual(t, tag, imageID, "Expected tag test to map to "+imageID) bazRef, err := reference.ParseNamed("foo42/baz") if err != nil { t.Fatal(err) } _, err = r.GetRemoteTag([]string{makeURL("/v1/")}, bazRef, "foo") if err != ErrRepoNotFound { t.Fatal("Expected ErrRepoNotFound error when fetching tag for bogus repo") } } func TestGetRemoteTags(t *testing.T) { r := spawnTestRegistrySession(t) repoRef, err := reference.ParseNamed(REPO) if err != nil { t.Fatal(err) } tags, err := r.GetRemoteTags([]string{makeURL("/v1/")}, repoRef) if err != nil { t.Fatal(err) } assertEqual(t, len(tags), 2, "Expected two tags") assertEqual(t, tags["latest"], imageID, "Expected tag latest to map to "+imageID) assertEqual(t, tags["test"], imageID, "Expected tag test to map to "+imageID) bazRef, err := reference.ParseNamed("foo42/baz") if err != nil { t.Fatal(err) } _, err = r.GetRemoteTags([]string{makeURL("/v1/")}, bazRef) if err != ErrRepoNotFound { t.Fatal("Expected ErrRepoNotFound error when fetching tags for bogus repo") } } func TestGetRepositoryData(t *testing.T) { r := spawnTestRegistrySession(t) parsedURL, err := url.Parse(makeURL("/v1/")) if err != nil { t.Fatal(err) } host := "http://" + parsedURL.Host + "/v1/" repoRef, err := reference.ParseNamed(REPO) if err != nil { t.Fatal(err) } data, err := r.GetRepositoryData(repoRef) if err != nil { t.Fatal(err) } assertEqual(t, len(data.ImgList), 2, "Expected 2 images in ImgList") assertEqual(t, len(data.Endpoints), 2, fmt.Sprintf("Expected 2 endpoints in Endpoints, found %d instead", len(data.Endpoints))) assertEqual(t, data.Endpoints[0], host, fmt.Sprintf("Expected first endpoint to be %s but found %s instead", host, data.Endpoints[0])) assertEqual(t, data.Endpoints[1], "http://test.example.com/v1/", fmt.Sprintf("Expected first endpoint to be http://test.example.com/v1/ but found %s instead", data.Endpoints[1])) } func TestPushImageJSONRegistry(t *testing.T) { r := spawnTestRegistrySession(t) imgData := &ImgData{ ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", } err := r.PushImageJSONRegistry(imgData, []byte{0x42, 0xdf, 0x0}, makeURL("/v1/")) if err != nil { t.Fatal(err) } } func TestPushImageLayerRegistry(t *testing.T) { r := spawnTestRegistrySession(t) layer := strings.NewReader("") _, _, err := r.PushImageLayerRegistry(imageID, layer, makeURL("/v1/"), []byte{}) if err != nil { t.Fatal(err) } } func TestParseRepositoryInfo(t *testing.T) { type staticRepositoryInfo struct { Index *registrytypes.IndexInfo RemoteName string CanonicalName string LocalName string Official bool } expectedRepoInfos := map[string]staticRepositoryInfo{ "fooo/bar": { Index: ®istrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "fooo/bar", LocalName: "fooo/bar", CanonicalName: "docker.io/fooo/bar", Official: false, }, "library/ubuntu": { Index: ®istrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "library/ubuntu", LocalName: "ubuntu", CanonicalName: "docker.io/library/ubuntu", Official: true, }, "nonlibrary/ubuntu": { Index: ®istrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "nonlibrary/ubuntu", LocalName: "nonlibrary/ubuntu", CanonicalName: "docker.io/nonlibrary/ubuntu", Official: false, }, "ubuntu": { Index: ®istrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "library/ubuntu", LocalName: "ubuntu", CanonicalName: "docker.io/library/ubuntu", Official: true, }, "other/library": { Index: ®istrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "other/library", LocalName: "other/library", CanonicalName: "docker.io/other/library", Official: false, }, "127.0.0.1:8000/private/moonbase": { Index: ®istrytypes.IndexInfo{ Name: "127.0.0.1:8000", Official: false, }, RemoteName: "private/moonbase", LocalName: "127.0.0.1:8000/private/moonbase", CanonicalName: "127.0.0.1:8000/private/moonbase", Official: false, }, "127.0.0.1:8000/privatebase": { Index: ®istrytypes.IndexInfo{ Name: "127.0.0.1:8000", Official: false, }, RemoteName: "privatebase", LocalName: "127.0.0.1:8000/privatebase", CanonicalName: "127.0.0.1:8000/privatebase", Official: false, }, "localhost:8000/private/moonbase": { Index: ®istrytypes.IndexInfo{ Name: "localhost:8000", Official: false, }, RemoteName: "private/moonbase", LocalName: "localhost:8000/private/moonbase", CanonicalName: "localhost:8000/private/moonbase", Official: false, }, "localhost:8000/privatebase": { Index: ®istrytypes.IndexInfo{ Name: "localhost:8000", Official: false, }, RemoteName: "privatebase", LocalName: "localhost:8000/privatebase", CanonicalName: "localhost:8000/privatebase", Official: false, }, "example.com/private/moonbase": { Index: ®istrytypes.IndexInfo{ Name: "example.com", Official: false, }, RemoteName: "private/moonbase", LocalName: "example.com/private/moonbase", CanonicalName: "example.com/private/moonbase", Official: false, }, "example.com/privatebase": { Index: ®istrytypes.IndexInfo{ Name: "example.com", Official: false, }, RemoteName: "privatebase", LocalName: "example.com/privatebase", CanonicalName: "example.com/privatebase", Official: false, }, "example.com:8000/private/moonbase": { Index: ®istrytypes.IndexInfo{ Name: "example.com:8000", Official: false, }, RemoteName: "private/moonbase", LocalName: "example.com:8000/private/moonbase", CanonicalName: "example.com:8000/private/moonbase", Official: false, }, "example.com:8000/privatebase": { Index: ®istrytypes.IndexInfo{ Name: "example.com:8000", Official: false, }, RemoteName: "privatebase", LocalName: "example.com:8000/privatebase", CanonicalName: "example.com:8000/privatebase", Official: false, }, "localhost/private/moonbase": { Index: ®istrytypes.IndexInfo{ Name: "localhost", Official: false, }, RemoteName: "private/moonbase", LocalName: "localhost/private/moonbase", CanonicalName: "localhost/private/moonbase", Official: false, }, "localhost/privatebase": { Index: ®istrytypes.IndexInfo{ Name: "localhost", Official: false, }, RemoteName: "privatebase", LocalName: "localhost/privatebase", CanonicalName: "localhost/privatebase", Official: false, }, IndexName + "/public/moonbase": { Index: ®istrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "public/moonbase", LocalName: "public/moonbase", CanonicalName: "docker.io/public/moonbase", Official: false, }, "index." + IndexName + "/public/moonbase": { Index: ®istrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "public/moonbase", LocalName: "public/moonbase", CanonicalName: "docker.io/public/moonbase", Official: false, }, "ubuntu-12.04-base": { Index: ®istrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "library/ubuntu-12.04-base", LocalName: "ubuntu-12.04-base", CanonicalName: "docker.io/library/ubuntu-12.04-base", Official: true, }, IndexName + "/ubuntu-12.04-base": { Index: ®istrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "library/ubuntu-12.04-base", LocalName: "ubuntu-12.04-base", CanonicalName: "docker.io/library/ubuntu-12.04-base", Official: true, }, "index." + IndexName + "/ubuntu-12.04-base": { Index: ®istrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "library/ubuntu-12.04-base", LocalName: "ubuntu-12.04-base", CanonicalName: "docker.io/library/ubuntu-12.04-base", Official: true, }, } for reposName, expectedRepoInfo := range expectedRepoInfos { named, err := reference.WithName(reposName) if err != nil { t.Error(err) } repoInfo, err := ParseRepositoryInfo(named) if err != nil { t.Error(err) } else { checkEqual(t, repoInfo.Index.Name, expectedRepoInfo.Index.Name, reposName) checkEqual(t, repoInfo.RemoteName(), expectedRepoInfo.RemoteName, reposName) checkEqual(t, repoInfo.Name(), expectedRepoInfo.LocalName, reposName) checkEqual(t, repoInfo.FullName(), expectedRepoInfo.CanonicalName, reposName) checkEqual(t, repoInfo.Index.Official, expectedRepoInfo.Index.Official, reposName) checkEqual(t, repoInfo.Official, expectedRepoInfo.Official, reposName) } } } func TestNewIndexInfo(t *testing.T) { testIndexInfo := func(config *registrytypes.ServiceConfig, expectedIndexInfos map[string]*registrytypes.IndexInfo) { for indexName, expectedIndexInfo := range expectedIndexInfos { index, err := newIndexInfo(config, indexName) if err != nil { t.Fatal(err) } else { checkEqual(t, index.Name, expectedIndexInfo.Name, indexName+" name") checkEqual(t, index.Official, expectedIndexInfo.Official, indexName+" is official") checkEqual(t, index.Secure, expectedIndexInfo.Secure, indexName+" is secure") checkEqual(t, len(index.Mirrors), len(expectedIndexInfo.Mirrors), indexName+" mirrors") } } } config := NewServiceConfig(nil) noMirrors := []string{} expectedIndexInfos := map[string]*registrytypes.IndexInfo{ IndexName: { Name: IndexName, Official: true, Secure: true, Mirrors: noMirrors, }, "index." + IndexName: { Name: IndexName, Official: true, Secure: true, Mirrors: noMirrors, }, "example.com": { Name: "example.com", Official: false, Secure: true, Mirrors: noMirrors, }, "127.0.0.1:5000": { Name: "127.0.0.1:5000", Official: false, Secure: false, Mirrors: noMirrors, }, } testIndexInfo(config, expectedIndexInfos) publicMirrors := []string{"http://mirror1.local", "http://mirror2.local"} config = makeServiceConfig(publicMirrors, []string{"example.com"}) expectedIndexInfos = map[string]*registrytypes.IndexInfo{ IndexName: { Name: IndexName, Official: true, Secure: true, Mirrors: publicMirrors, }, "index." + IndexName: { Name: IndexName, Official: true, Secure: true, Mirrors: publicMirrors, }, "example.com": { Name: "example.com", Official: false, Secure: false, Mirrors: noMirrors, }, "example.com:5000": { Name: "example.com:5000", Official: false, Secure: true, Mirrors: noMirrors, }, "127.0.0.1": { Name: "127.0.0.1", Official: false, Secure: false, Mirrors: noMirrors, }, "127.0.0.1:5000": { Name: "127.0.0.1:5000", Official: false, Secure: false, Mirrors: noMirrors, }, "other.com": { Name: "other.com", Official: false, Secure: true, Mirrors: noMirrors, }, } testIndexInfo(config, expectedIndexInfos) config = makeServiceConfig(nil, []string{"42.42.0.0/16"}) expectedIndexInfos = map[string]*registrytypes.IndexInfo{ "example.com": { Name: "example.com", Official: false, Secure: false, Mirrors: noMirrors, }, "example.com:5000": { Name: "example.com:5000", Official: false, Secure: false, Mirrors: noMirrors, }, "127.0.0.1": { Name: "127.0.0.1", Official: false, Secure: false, Mirrors: noMirrors, }, "127.0.0.1:5000": { Name: "127.0.0.1:5000", Official: false, Secure: false, Mirrors: noMirrors, }, "other.com": { Name: "other.com", Official: false, Secure: true, Mirrors: noMirrors, }, } testIndexInfo(config, expectedIndexInfos) } func TestMirrorEndpointLookup(t *testing.T) { containsMirror := func(endpoints []APIEndpoint) bool { for _, pe := range endpoints { if pe.URL == "my.mirror" { return true } } return false } s := Service{Config: makeServiceConfig([]string{"my.mirror"}, nil)} imageName, err := reference.WithName(IndexName + "/test/image") if err != nil { t.Error(err) } pushAPIEndpoints, err := s.LookupPushEndpoints(imageName) if err != nil { t.Fatal(err) } if containsMirror(pushAPIEndpoints) { t.Fatal("Push endpoint should not contain mirror") } pullAPIEndpoints, err := s.LookupPullEndpoints(imageName) if err != nil { t.Fatal(err) } if !containsMirror(pullAPIEndpoints) { t.Fatal("Pull endpoint should contain mirror") } } func TestPushRegistryTag(t *testing.T) { r := spawnTestRegistrySession(t) repoRef, err := reference.ParseNamed(REPO) if err != nil { t.Fatal(err) } err = r.PushRegistryTag(repoRef, imageID, "stable", makeURL("/v1/")) if err != nil { t.Fatal(err) } } func TestPushImageJSONIndex(t *testing.T) { r := spawnTestRegistrySession(t) imgData := []*ImgData{ { ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", }, { ID: "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", Checksum: "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", }, } repoRef, err := reference.ParseNamed(REPO) if err != nil { t.Fatal(err) } repoData, err := r.PushImageJSONIndex(repoRef, imgData, false, nil) if err != nil { t.Fatal(err) } if repoData == nil { t.Fatal("Expected RepositoryData object") } repoData, err = r.PushImageJSONIndex(repoRef, imgData, true, []string{r.indexEndpoint.String()}) if err != nil { t.Fatal(err) } if repoData == nil { t.Fatal("Expected RepositoryData object") } } func TestSearchRepositories(t *testing.T) { r := spawnTestRegistrySession(t) results, err := r.SearchRepositories("fakequery") if err != nil { t.Fatal(err) } if results == nil { t.Fatal("Expected non-nil SearchResults object") } assertEqual(t, results.NumResults, 1, "Expected 1 search results") assertEqual(t, results.Query, "fakequery", "Expected 'fakequery' as query") assertEqual(t, results.Results[0].StarCount, 42, "Expected 'fakeimage' to have 42 stars") } func TestTrustedLocation(t *testing.T) { for _, url := range []string{"http://example.com", "https://example.com:7777", "http://docker.io", "http://test.docker.com", "https://fakedocker.com"} { req, _ := http.NewRequest("GET", url, nil) if trustedLocation(req) == true { t.Fatalf("'%s' shouldn't be detected as a trusted location", url) } } for _, url := range []string{"https://docker.io", "https://test.docker.com:80"} { req, _ := http.NewRequest("GET", url, nil) if trustedLocation(req) == false { t.Fatalf("'%s' should be detected as a trusted location", url) } } } func TestAddRequiredHeadersToRedirectedRequests(t *testing.T) { for _, urls := range [][]string{ {"http://docker.io", "https://docker.com"}, {"https://foo.docker.io:7777", "http://bar.docker.com"}, {"https://foo.docker.io", "https://example.com"}, } { reqFrom, _ := http.NewRequest("GET", urls[0], nil) reqFrom.Header.Add("Content-Type", "application/json") reqFrom.Header.Add("Authorization", "super_secret") reqTo, _ := http.NewRequest("GET", urls[1], nil) addRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) if len(reqTo.Header) != 1 { t.Fatalf("Expected 1 headers, got %d", len(reqTo.Header)) } if reqTo.Header.Get("Content-Type") != "application/json" { t.Fatal("'Content-Type' should be 'application/json'") } if reqTo.Header.Get("Authorization") != "" { t.Fatal("'Authorization' should be empty") } } for _, urls := range [][]string{ {"https://docker.io", "https://docker.com"}, {"https://foo.docker.io:7777", "https://bar.docker.com"}, } { reqFrom, _ := http.NewRequest("GET", urls[0], nil) reqFrom.Header.Add("Content-Type", "application/json") reqFrom.Header.Add("Authorization", "super_secret") reqTo, _ := http.NewRequest("GET", urls[1], nil) addRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) if len(reqTo.Header) != 2 { t.Fatalf("Expected 2 headers, got %d", len(reqTo.Header)) } if reqTo.Header.Get("Content-Type") != "application/json" { t.Fatal("'Content-Type' should be 'application/json'") } if reqTo.Header.Get("Authorization") != "super_secret" { t.Fatal("'Authorization' should be 'super_secret'") } } } func TestIsSecureIndex(t *testing.T) { tests := []struct { addr string insecureRegistries []string expected bool }{ {IndexName, nil, true}, {"example.com", []string{}, true}, {"example.com", []string{"example.com"}, false}, {"localhost", []string{"localhost:5000"}, false}, {"localhost:5000", []string{"localhost:5000"}, false}, {"localhost", []string{"example.com"}, false}, {"127.0.0.1:5000", []string{"127.0.0.1:5000"}, false}, {"localhost", nil, false}, {"localhost:5000", nil, false}, {"127.0.0.1", nil, false}, {"localhost", []string{"example.com"}, false}, {"127.0.0.1", []string{"example.com"}, false}, {"example.com", nil, true}, {"example.com", []string{"example.com"}, false}, {"127.0.0.1", []string{"example.com"}, false}, {"127.0.0.1:5000", []string{"example.com"}, false}, {"example.com:5000", []string{"42.42.0.0/16"}, false}, {"example.com", []string{"42.42.0.0/16"}, false}, {"example.com:5000", []string{"42.42.42.42/8"}, false}, {"127.0.0.1:5000", []string{"127.0.0.0/8"}, false}, {"42.42.42.42:5000", []string{"42.1.1.1/8"}, false}, {"invalid.domain.com", []string{"42.42.0.0/16"}, true}, {"invalid.domain.com", []string{"invalid.domain.com"}, false}, {"invalid.domain.com:5000", []string{"invalid.domain.com"}, true}, {"invalid.domain.com:5000", []string{"invalid.domain.com:5000"}, false}, } for _, tt := range tests { config := makeServiceConfig(nil, tt.insecureRegistries) if sec := isSecureIndex(config, tt.addr); sec != tt.expected { t.Errorf("isSecureIndex failed for %q %v, expected %v got %v", tt.addr, tt.insecureRegistries, tt.expected, sec) } } } type debugTransport struct { http.RoundTripper log func(...interface{}) } func (tr debugTransport) RoundTrip(req *http.Request) (*http.Response, error) { dump, err := httputil.DumpRequestOut(req, false) if err != nil { tr.log("could not dump request") } tr.log(string(dump)) resp, err := tr.RoundTripper.RoundTrip(req) if err != nil { return nil, err } dump, err = httputil.DumpResponse(resp, false) if err != nil { tr.log("could not dump response") } tr.log(string(dump)) return resp, err } docker-1.10.3/registry/service.go000066400000000000000000000125451267010174400167130ustar00rootroot00000000000000package registry import ( "crypto/tls" "net/http" "net/url" "strings" "github.com/docker/docker/reference" "github.com/docker/engine-api/types" registrytypes "github.com/docker/engine-api/types/registry" ) // Service is a registry service. It tracks configuration data such as a list // of mirrors. type Service struct { Config *registrytypes.ServiceConfig } // NewService returns a new instance of Service ready to be // installed into an engine. func NewService(options *Options) *Service { return &Service{ Config: NewServiceConfig(options), } } // Auth contacts the public registry with the provided credentials, // and returns OK if authentication was successful. // It can be used to verify the validity of a client's credentials. func (s *Service) Auth(authConfig *types.AuthConfig) (string, error) { addr := authConfig.ServerAddress if addr == "" { // Use the official registry address if not specified. addr = IndexServer } index, err := s.ResolveIndex(addr) if err != nil { return "", err } endpointVersion := APIVersion(APIVersionUnknown) if V2Only { // Override the endpoint to only attempt a v2 ping endpointVersion = APIVersion2 } endpoint, err := NewEndpoint(index, nil, endpointVersion) if err != nil { return "", err } authConfig.ServerAddress = endpoint.String() return Login(authConfig, endpoint) } // splitReposSearchTerm breaks a search term into an index name and remote name func splitReposSearchTerm(reposName string) (string, string) { nameParts := strings.SplitN(reposName, "/", 2) var indexName, remoteName string if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { // This is a Docker Index repos (ex: samalba/hipache or ubuntu) // 'docker.io' indexName = IndexName remoteName = reposName } else { indexName = nameParts[0] remoteName = nameParts[1] } return indexName, remoteName } // Search queries the public registry for images matching the specified // search terms, and returns the results. func (s *Service) Search(term string, authConfig *types.AuthConfig, headers map[string][]string) (*registrytypes.SearchResults, error) { if err := validateNoSchema(term); err != nil { return nil, err } indexName, remoteName := splitReposSearchTerm(term) index, err := newIndexInfo(s.Config, indexName) if err != nil { return nil, err } // *TODO: Search multiple indexes. endpoint, err := NewEndpoint(index, http.Header(headers), APIVersionUnknown) if err != nil { return nil, err } r, err := NewSession(endpoint.client, authConfig, endpoint) if err != nil { return nil, err } if index.Official { localName := remoteName if strings.HasPrefix(localName, "library/") { // If pull "library/foo", it's stored locally under "foo" localName = strings.SplitN(localName, "/", 2)[1] } return r.SearchRepositories(localName) } return r.SearchRepositories(remoteName) } // ResolveRepository splits a repository name into its components // and configuration of the associated registry. func (s *Service) ResolveRepository(name reference.Named) (*RepositoryInfo, error) { return newRepositoryInfo(s.Config, name) } // ResolveIndex takes indexName and returns index info func (s *Service) ResolveIndex(name string) (*registrytypes.IndexInfo, error) { return newIndexInfo(s.Config, name) } // APIEndpoint represents a remote API endpoint type APIEndpoint struct { Mirror bool URL string Version APIVersion Official bool TrimHostname bool TLSConfig *tls.Config } // ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint func (e APIEndpoint) ToV1Endpoint(metaHeaders http.Header) (*Endpoint, error) { return newEndpoint(e.URL, e.TLSConfig, metaHeaders) } // TLSConfig constructs a client TLS configuration based on server defaults func (s *Service) TLSConfig(hostname string) (*tls.Config, error) { return newTLSConfig(hostname, isSecureIndex(s.Config, hostname)) } func (s *Service) tlsConfigForMirror(mirror string) (*tls.Config, error) { mirrorURL, err := url.Parse(mirror) if err != nil { return nil, err } return s.TLSConfig(mirrorURL.Host) } // LookupPullEndpoints creates an list of endpoints to try to pull from, in order of preference. // It gives preference to v2 endpoints over v1, mirrors over the actual // registry, and HTTPS over plain HTTP. func (s *Service) LookupPullEndpoints(repoName reference.Named) (endpoints []APIEndpoint, err error) { return s.lookupEndpoints(repoName) } // LookupPushEndpoints creates an list of endpoints to try to push to, in order of preference. // It gives preference to v2 endpoints over v1, and HTTPS over plain HTTP. // Mirrors are not included. func (s *Service) LookupPushEndpoints(repoName reference.Named) (endpoints []APIEndpoint, err error) { allEndpoints, err := s.lookupEndpoints(repoName) if err == nil { for _, endpoint := range allEndpoints { if !endpoint.Mirror { endpoints = append(endpoints, endpoint) } } } return endpoints, err } func (s *Service) lookupEndpoints(repoName reference.Named) (endpoints []APIEndpoint, err error) { endpoints, err = s.lookupV2Endpoints(repoName) if err != nil { return nil, err } if V2Only { return endpoints, nil } legacyEndpoints, err := s.lookupV1Endpoints(repoName) if err != nil { return nil, err } endpoints = append(endpoints, legacyEndpoints...) return endpoints, nil } docker-1.10.3/registry/service_v1.go000066400000000000000000000024721267010174400173170ustar00rootroot00000000000000package registry import ( "fmt" "strings" "github.com/docker/docker/reference" "github.com/docker/go-connections/tlsconfig" ) func (s *Service) lookupV1Endpoints(repoName reference.Named) (endpoints []APIEndpoint, err error) { var cfg = tlsconfig.ServerDefault tlsConfig := &cfg nameString := repoName.FullName() if strings.HasPrefix(nameString, DefaultNamespace+"/") { endpoints = append(endpoints, APIEndpoint{ URL: DefaultV1Registry, Version: APIVersion1, Official: true, TrimHostname: true, TLSConfig: tlsConfig, }) return endpoints, nil } slashIndex := strings.IndexRune(nameString, '/') if slashIndex <= 0 { return nil, fmt.Errorf("invalid repo name: missing '/': %s", nameString) } hostname := nameString[:slashIndex] tlsConfig, err = s.TLSConfig(hostname) if err != nil { return nil, err } endpoints = []APIEndpoint{ { URL: "https://" + hostname, Version: APIVersion1, TrimHostname: true, TLSConfig: tlsConfig, }, } if tlsConfig.InsecureSkipVerify { endpoints = append(endpoints, APIEndpoint{ // or this URL: "http://" + hostname, Version: APIVersion1, TrimHostname: true, // used to check if supposed to be secure via InsecureSkipVerify TLSConfig: tlsConfig, }) } return endpoints, nil } docker-1.10.3/registry/service_v2.go000066400000000000000000000032711267010174400173160ustar00rootroot00000000000000package registry import ( "fmt" "strings" "github.com/docker/docker/reference" "github.com/docker/go-connections/tlsconfig" ) func (s *Service) lookupV2Endpoints(repoName reference.Named) (endpoints []APIEndpoint, err error) { var cfg = tlsconfig.ServerDefault tlsConfig := &cfg nameString := repoName.FullName() if strings.HasPrefix(nameString, DefaultNamespace+"/") { // v2 mirrors for _, mirror := range s.Config.Mirrors { mirrorTLSConfig, err := s.tlsConfigForMirror(mirror) if err != nil { return nil, err } endpoints = append(endpoints, APIEndpoint{ URL: mirror, // guess mirrors are v2 Version: APIVersion2, Mirror: true, TrimHostname: true, TLSConfig: mirrorTLSConfig, }) } // v2 registry endpoints = append(endpoints, APIEndpoint{ URL: DefaultV2Registry, Version: APIVersion2, Official: true, TrimHostname: true, TLSConfig: tlsConfig, }) return endpoints, nil } slashIndex := strings.IndexRune(nameString, '/') if slashIndex <= 0 { return nil, fmt.Errorf("invalid repo name: missing '/': %s", nameString) } hostname := nameString[:slashIndex] tlsConfig, err = s.TLSConfig(hostname) if err != nil { return nil, err } endpoints = []APIEndpoint{ { URL: "https://" + hostname, Version: APIVersion2, TrimHostname: true, TLSConfig: tlsConfig, }, } if tlsConfig.InsecureSkipVerify { endpoints = append(endpoints, APIEndpoint{ URL: "http://" + hostname, Version: APIVersion2, TrimHostname: true, // used to check if supposed to be secure via InsecureSkipVerify TLSConfig: tlsConfig, }) } return endpoints, nil } docker-1.10.3/registry/session.go000066400000000000000000000601331267010174400167320ustar00rootroot00000000000000package registry import ( "bytes" "crypto/sha256" "errors" "sync" // this is required for some certificates _ "crypto/sha512" "encoding/hex" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/http/cookiejar" "net/url" "strconv" "strings" "github.com/Sirupsen/logrus" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/docker/pkg/httputils" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/tarsum" "github.com/docker/docker/reference" "github.com/docker/engine-api/types" registrytypes "github.com/docker/engine-api/types/registry" ) var ( // ErrRepoNotFound is returned if the repository didn't exist on the // remote side ErrRepoNotFound = errors.New("Repository not found") ) // A Session is used to communicate with a V1 registry type Session struct { indexEndpoint *Endpoint client *http.Client // TODO(tiborvass): remove authConfig authConfig *types.AuthConfig id string } type authTransport struct { http.RoundTripper *types.AuthConfig alwaysSetBasicAuth bool token []string mu sync.Mutex // guards modReq modReq map[*http.Request]*http.Request // original -> modified } // AuthTransport handles the auth layer when communicating with a v1 registry (private or official) // // For private v1 registries, set alwaysSetBasicAuth to true. // // For the official v1 registry, if there isn't already an Authorization header in the request, // but there is an X-Docker-Token header set to true, then Basic Auth will be used to set the Authorization header. // After sending the request with the provided base http.RoundTripper, if an X-Docker-Token header, representing // a token, is present in the response, then it gets cached and sent in the Authorization header of all subsequent // requests. // // If the server sends a token without the client having requested it, it is ignored. // // This RoundTripper also has a CancelRequest method important for correct timeout handling. func AuthTransport(base http.RoundTripper, authConfig *types.AuthConfig, alwaysSetBasicAuth bool) http.RoundTripper { if base == nil { base = http.DefaultTransport } return &authTransport{ RoundTripper: base, AuthConfig: authConfig, alwaysSetBasicAuth: alwaysSetBasicAuth, modReq: make(map[*http.Request]*http.Request), } } // cloneRequest returns a clone of the provided *http.Request. // The clone is a shallow copy of the struct and its Header map. func cloneRequest(r *http.Request) *http.Request { // shallow copy of the struct r2 := new(http.Request) *r2 = *r // deep copy of the Header r2.Header = make(http.Header, len(r.Header)) for k, s := range r.Header { r2.Header[k] = append([]string(nil), s...) } return r2 } // RoundTrip changes a HTTP request's headers to add the necessary // authentication-related headers func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) { // Authorization should not be set on 302 redirect for untrusted locations. // This logic mirrors the behavior in addRequiredHeadersToRedirectedRequests. // As the authorization logic is currently implemented in RoundTrip, // a 302 redirect is detected by looking at the Referrer header as go http package adds said header. // This is safe as Docker doesn't set Referrer in other scenarios. if orig.Header.Get("Referer") != "" && !trustedLocation(orig) { return tr.RoundTripper.RoundTrip(orig) } req := cloneRequest(orig) tr.mu.Lock() tr.modReq[orig] = req tr.mu.Unlock() if tr.alwaysSetBasicAuth { if tr.AuthConfig == nil { return nil, errors.New("unexpected error: empty auth config") } req.SetBasicAuth(tr.Username, tr.Password) return tr.RoundTripper.RoundTrip(req) } // Don't override if req.Header.Get("Authorization") == "" { if req.Header.Get("X-Docker-Token") == "true" && tr.AuthConfig != nil && len(tr.Username) > 0 { req.SetBasicAuth(tr.Username, tr.Password) } else if len(tr.token) > 0 { req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ",")) } } resp, err := tr.RoundTripper.RoundTrip(req) if err != nil { delete(tr.modReq, orig) return nil, err } if len(resp.Header["X-Docker-Token"]) > 0 { tr.token = resp.Header["X-Docker-Token"] } resp.Body = &ioutils.OnEOFReader{ Rc: resp.Body, Fn: func() { tr.mu.Lock() delete(tr.modReq, orig) tr.mu.Unlock() }, } return resp, nil } // CancelRequest cancels an in-flight request by closing its connection. func (tr *authTransport) CancelRequest(req *http.Request) { type canceler interface { CancelRequest(*http.Request) } if cr, ok := tr.RoundTripper.(canceler); ok { tr.mu.Lock() modReq := tr.modReq[req] delete(tr.modReq, req) tr.mu.Unlock() cr.CancelRequest(modReq) } } // NewSession creates a new session // TODO(tiborvass): remove authConfig param once registry client v2 is vendored func NewSession(client *http.Client, authConfig *types.AuthConfig, endpoint *Endpoint) (r *Session, err error) { r = &Session{ authConfig: authConfig, client: client, indexEndpoint: endpoint, id: stringid.GenerateRandomID(), } var alwaysSetBasicAuth bool // If we're working with a standalone private registry over HTTPS, send Basic Auth headers // alongside all our requests. if endpoint.VersionString(1) != IndexServer && endpoint.URL.Scheme == "https" { info, err := endpoint.Ping() if err != nil { return nil, err } if info.Standalone && authConfig != nil { logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String()) alwaysSetBasicAuth = true } } // Annotate the transport unconditionally so that v2 can // properly fallback on v1 when an image is not found. client.Transport = AuthTransport(client.Transport, authConfig, alwaysSetBasicAuth) jar, err := cookiejar.New(nil) if err != nil { return nil, errors.New("cookiejar.New is not supposed to return an error") } client.Jar = jar return r, nil } // ID returns this registry session's ID. func (r *Session) ID() string { return r.id } // GetRemoteHistory retrieves the history of a given image from the registry. // It returns a list of the parent's JSON files (including the requested image). func (r *Session) GetRemoteHistory(imgID, registry string) ([]string, error) { res, err := r.client.Get(registry + "images/" + imgID + "/ancestry") if err != nil { return nil, err } defer res.Body.Close() if res.StatusCode != 200 { if res.StatusCode == 401 { return nil, errcode.ErrorCodeUnauthorized.WithArgs() } return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) } var history []string if err := json.NewDecoder(res.Body).Decode(&history); err != nil { return nil, fmt.Errorf("Error while reading the http response: %v", err) } logrus.Debugf("Ancestry: %v", history) return history, nil } // LookupRemoteImage checks if an image exists in the registry func (r *Session) LookupRemoteImage(imgID, registry string) error { res, err := r.client.Get(registry + "images/" + imgID + "/json") if err != nil { return err } res.Body.Close() if res.StatusCode != 200 { return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) } return nil } // GetRemoteImageJSON retrieves an image's JSON metadata from the registry. func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int64, error) { res, err := r.client.Get(registry + "images/" + imgID + "/json") if err != nil { return nil, -1, fmt.Errorf("Failed to download json: %s", err) } defer res.Body.Close() if res.StatusCode != 200 { return nil, -1, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) } // if the size header is not present, then set it to '-1' imageSize := int64(-1) if hdr := res.Header.Get("X-Docker-Size"); hdr != "" { imageSize, err = strconv.ParseInt(hdr, 10, 64) if err != nil { return nil, -1, err } } jsonString, err := ioutil.ReadAll(res.Body) if err != nil { return nil, -1, fmt.Errorf("Failed to parse downloaded json: %v (%s)", err, jsonString) } return jsonString, imageSize, nil } // GetRemoteImageLayer retrieves an image layer from the registry func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io.ReadCloser, error) { var ( statusCode = 0 res *http.Response err error imageURL = fmt.Sprintf("%simages/%s/layer", registry, imgID) ) req, err := http.NewRequest("GET", imageURL, nil) if err != nil { return nil, fmt.Errorf("Error while getting from the server: %v", err) } statusCode = 0 res, err = r.client.Do(req) if err != nil { logrus.Debugf("Error contacting registry %s: %v", registry, err) if res != nil { if res.Body != nil { res.Body.Close() } statusCode = res.StatusCode } return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", statusCode, imgID) } if res.StatusCode != 200 { res.Body.Close() return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", res.StatusCode, imgID) } if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 { logrus.Debugf("server supports resume") return httputils.ResumableRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil } logrus.Debugf("server doesn't support resume") return res.Body, nil } // GetRemoteTag retrieves the tag named in the askedTag argument from the given // repository. It queries each of the registries supplied in the registries // argument, and returns data from the first one that answers the query // successfully. func (r *Session) GetRemoteTag(registries []string, repositoryRef reference.Named, askedTag string) (string, error) { repository := repositoryRef.RemoteName() if strings.Count(repository, "/") == 0 { // This will be removed once the registry supports auto-resolution on // the "library" namespace repository = "library/" + repository } for _, host := range registries { endpoint := fmt.Sprintf("%srepositories/%s/tags/%s", host, repository, askedTag) res, err := r.client.Get(endpoint) if err != nil { return "", err } logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) defer res.Body.Close() if res.StatusCode == 404 { return "", ErrRepoNotFound } if res.StatusCode != 200 { continue } var tagID string if err := json.NewDecoder(res.Body).Decode(&tagID); err != nil { return "", err } return tagID, nil } return "", fmt.Errorf("Could not reach any registry endpoint") } // GetRemoteTags retrieves all tags from the given repository. It queries each // of the registries supplied in the registries argument, and returns data from // the first one that answers the query successfully. It returns a map with // tag names as the keys and image IDs as the values. func (r *Session) GetRemoteTags(registries []string, repositoryRef reference.Named) (map[string]string, error) { repository := repositoryRef.RemoteName() if strings.Count(repository, "/") == 0 { // This will be removed once the registry supports auto-resolution on // the "library" namespace repository = "library/" + repository } for _, host := range registries { endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository) res, err := r.client.Get(endpoint) if err != nil { return nil, err } logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) defer res.Body.Close() if res.StatusCode == 404 { return nil, ErrRepoNotFound } if res.StatusCode != 200 { continue } result := make(map[string]string) if err := json.NewDecoder(res.Body).Decode(&result); err != nil { return nil, err } return result, nil } return nil, fmt.Errorf("Could not reach any registry endpoint") } func buildEndpointsList(headers []string, indexEp string) ([]string, error) { var endpoints []string parsedURL, err := url.Parse(indexEp) if err != nil { return nil, err } var urlScheme = parsedURL.Scheme // The registry's URL scheme has to match the Index' for _, ep := range headers { epList := strings.Split(ep, ",") for _, epListElement := range epList { endpoints = append( endpoints, fmt.Sprintf("%s://%s/v1/", urlScheme, strings.TrimSpace(epListElement))) } } return endpoints, nil } // GetRepositoryData returns lists of images and endpoints for the repository func (r *Session) GetRepositoryData(name reference.Named) (*RepositoryData, error) { repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.VersionString(1), name.RemoteName()) logrus.Debugf("[registry] Calling GET %s", repositoryTarget) req, err := http.NewRequest("GET", repositoryTarget, nil) if err != nil { return nil, err } // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests req.Header.Set("X-Docker-Token", "true") res, err := r.client.Do(req) if err != nil { // check if the error is because of i/o timeout // and return a non-obtuse error message for users // "Get https://index.docker.io/v1/repositories/library/busybox/images: i/o timeout" // was a top search on the docker user forum if isTimeout(err) { return nil, fmt.Errorf("Network timed out while trying to connect to %s. You may want to check your internet connection or if you are behind a proxy.", repositoryTarget) } return nil, fmt.Errorf("Error while pulling image: %v", err) } defer res.Body.Close() if res.StatusCode == 401 { return nil, errcode.ErrorCodeUnauthorized.WithArgs() } // TODO: Right now we're ignoring checksums in the response body. // In the future, we need to use them to check image validity. if res.StatusCode == 404 { return nil, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res) } else if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { logrus.Debugf("Error reading response body: %s", err) } return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, name.RemoteName(), errBody), res) } var endpoints []string if res.Header.Get("X-Docker-Endpoints") != "" { endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.VersionString(1)) if err != nil { return nil, err } } else { // Assume the endpoint is on the same host endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", r.indexEndpoint.URL.Scheme, req.URL.Host)) } remoteChecksums := []*ImgData{} if err := json.NewDecoder(res.Body).Decode(&remoteChecksums); err != nil { return nil, err } // Forge a better object from the retrieved data imgsData := make(map[string]*ImgData, len(remoteChecksums)) for _, elem := range remoteChecksums { imgsData[elem.ID] = elem } return &RepositoryData{ ImgList: imgsData, Endpoints: endpoints, }, nil } // PushImageChecksumRegistry uploads checksums for an image func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string) error { u := registry + "images/" + imgData.ID + "/checksum" logrus.Debugf("[registry] Calling PUT %s", u) req, err := http.NewRequest("PUT", u, nil) if err != nil { return err } req.Header.Set("X-Docker-Checksum", imgData.Checksum) req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload) res, err := r.client.Do(req) if err != nil { return fmt.Errorf("Failed to upload metadata: %v", err) } defer res.Body.Close() if len(res.Cookies()) > 0 { r.client.Jar.SetCookies(req.URL, res.Cookies()) } if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err) } var jsonBody map[string]string if err := json.Unmarshal(errBody, &jsonBody); err != nil { errBody = []byte(err.Error()) } else if jsonBody["error"] == "Image already exists" { return ErrAlreadyExists } return fmt.Errorf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody) } return nil } // PushImageJSONRegistry pushes JSON metadata for a local image to the registry func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string) error { u := registry + "images/" + imgData.ID + "/json" logrus.Debugf("[registry] Calling PUT %s", u) req, err := http.NewRequest("PUT", u, bytes.NewReader(jsonRaw)) if err != nil { return err } req.Header.Add("Content-type", "application/json") res, err := r.client.Do(req) if err != nil { return fmt.Errorf("Failed to upload metadata: %s", err) } defer res.Body.Close() if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") { return httputils.NewHTTPRequestError("HTTP code 401, Docker will not send auth headers over HTTP.", res) } if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) } var jsonBody map[string]string if err := json.Unmarshal(errBody, &jsonBody); err != nil { errBody = []byte(err.Error()) } else if jsonBody["error"] == "Image already exists" { return ErrAlreadyExists } return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody), res) } return nil } // PushImageLayerRegistry sends the checksum of an image layer to the registry func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, jsonRaw []byte) (checksum string, checksumPayload string, err error) { u := registry + "images/" + imgID + "/layer" logrus.Debugf("[registry] Calling PUT %s", u) tarsumLayer, err := tarsum.NewTarSum(layer, false, tarsum.Version0) if err != nil { return "", "", err } h := sha256.New() h.Write(jsonRaw) h.Write([]byte{'\n'}) checksumLayer := io.TeeReader(tarsumLayer, h) req, err := http.NewRequest("PUT", u, checksumLayer) if err != nil { return "", "", err } req.Header.Add("Content-Type", "application/octet-stream") req.ContentLength = -1 req.TransferEncoding = []string{"chunked"} res, err := r.client.Do(req) if err != nil { return "", "", fmt.Errorf("Failed to upload layer: %v", err) } if rc, ok := layer.(io.Closer); ok { if err := rc.Close(); err != nil { return "", "", err } } defer res.Body.Close() if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) } return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %q", res.StatusCode, errBody), res) } checksumPayload = "sha256:" + hex.EncodeToString(h.Sum(nil)) return tarsumLayer.Sum(jsonRaw), checksumPayload, nil } // PushRegistryTag pushes a tag on the registry. // Remote has the format '/ func (r *Session) PushRegistryTag(remote reference.Named, revision, tag, registry string) error { // "jsonify" the string revision = "\"" + revision + "\"" path := fmt.Sprintf("repositories/%s/tags/%s", remote.RemoteName(), tag) req, err := http.NewRequest("PUT", registry+path, strings.NewReader(revision)) if err != nil { return err } req.Header.Add("Content-type", "application/json") req.ContentLength = int64(len(revision)) res, err := r.client.Do(req) if err != nil { return err } res.Body.Close() if res.StatusCode != 200 && res.StatusCode != 201 { return httputils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote.RemoteName()), res) } return nil } // PushImageJSONIndex uploads an image list to the repository func (r *Session) PushImageJSONIndex(remote reference.Named, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) { cleanImgList := []*ImgData{} if validate { for _, elem := range imgList { if elem.Checksum != "" { cleanImgList = append(cleanImgList, elem) } } } else { cleanImgList = imgList } imgListJSON, err := json.Marshal(cleanImgList) if err != nil { return nil, err } var suffix string if validate { suffix = "images" } u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.VersionString(1), remote.RemoteName(), suffix) logrus.Debugf("[registry] PUT %s", u) logrus.Debugf("Image list pushed to index:\n%s", imgListJSON) headers := map[string][]string{ "Content-type": {"application/json"}, // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests "X-Docker-Token": {"true"}, } if validate { headers["X-Docker-Endpoints"] = regs } // Redirect if necessary var res *http.Response for { if res, err = r.putImageRequest(u, headers, imgListJSON); err != nil { return nil, err } if !shouldRedirect(res) { break } res.Body.Close() u = res.Header.Get("Location") logrus.Debugf("Redirected to %s", u) } defer res.Body.Close() if res.StatusCode == 401 { return nil, errcode.ErrorCodeUnauthorized.WithArgs() } var tokens, endpoints []string if !validate { if res.StatusCode != 200 && res.StatusCode != 201 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { logrus.Debugf("Error reading response body: %s", err) } return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, remote.RemoteName(), errBody), res) } tokens = res.Header["X-Docker-Token"] logrus.Debugf("Auth token: %v", tokens) if res.Header.Get("X-Docker-Endpoints") == "" { return nil, fmt.Errorf("Index response didn't contain any endpoints") } endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.VersionString(1)) if err != nil { return nil, err } } else { if res.StatusCode != 204 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { logrus.Debugf("Error reading response body: %s", err) } return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, remote.RemoteName(), errBody), res) } } return &RepositoryData{ Endpoints: endpoints, }, nil } func (r *Session) putImageRequest(u string, headers map[string][]string, body []byte) (*http.Response, error) { req, err := http.NewRequest("PUT", u, bytes.NewReader(body)) if err != nil { return nil, err } req.ContentLength = int64(len(body)) for k, v := range headers { req.Header[k] = v } response, err := r.client.Do(req) if err != nil { return nil, err } return response, nil } func shouldRedirect(response *http.Response) bool { return response.StatusCode >= 300 && response.StatusCode < 400 } // SearchRepositories performs a search against the remote repository func (r *Session) SearchRepositories(term string) (*registrytypes.SearchResults, error) { logrus.Debugf("Index server: %s", r.indexEndpoint) u := r.indexEndpoint.VersionString(1) + "search?q=" + url.QueryEscape(term) req, err := http.NewRequest("GET", u, nil) if err != nil { return nil, fmt.Errorf("Error while getting from the server: %v", err) } // Have the AuthTransport send authentication, when logged in. req.Header.Set("X-Docker-Token", "true") res, err := r.client.Do(req) if err != nil { return nil, err } defer res.Body.Close() if res.StatusCode != 200 { return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Unexpected status code %d", res.StatusCode), res) } result := new(registrytypes.SearchResults) return result, json.NewDecoder(res.Body).Decode(result) } // GetAuthConfig returns the authentication settings for a session // TODO(tiborvass): remove this once registry client v2 is vendored func (r *Session) GetAuthConfig(withPasswd bool) *types.AuthConfig { password := "" if withPasswd { password = r.authConfig.Password } return &types.AuthConfig{ Username: r.authConfig.Username, Password: password, Email: r.authConfig.Email, } } func isTimeout(err error) bool { type timeout interface { Timeout() bool } e := err switch urlErr := err.(type) { case *url.Error: e = urlErr.Err } t, ok := e.(timeout) return ok && t.Timeout() } docker-1.10.3/registry/token.go000066400000000000000000000033611267010174400163670ustar00rootroot00000000000000package registry import ( "encoding/json" "errors" "fmt" "net/http" "net/url" "strings" ) type tokenResponse struct { Token string `json:"token"` } func getToken(username, password string, params map[string]string, registryEndpoint *Endpoint) (string, error) { realm, ok := params["realm"] if !ok { return "", errors.New("no realm specified for token auth challenge") } realmURL, err := url.Parse(realm) if err != nil { return "", fmt.Errorf("invalid token auth challenge realm: %s", err) } if realmURL.Scheme == "" { if registryEndpoint.IsSecure { realmURL.Scheme = "https" } else { realmURL.Scheme = "http" } } req, err := http.NewRequest("GET", realmURL.String(), nil) if err != nil { return "", err } reqParams := req.URL.Query() service := params["service"] scope := params["scope"] if service != "" { reqParams.Add("service", service) } for _, scopeField := range strings.Fields(scope) { reqParams.Add("scope", scopeField) } if username != "" { reqParams.Add("account", username) req.SetBasicAuth(username, password) } req.URL.RawQuery = reqParams.Encode() resp, err := registryEndpoint.client.Do(req) if err != nil { return "", err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return "", fmt.Errorf("token auth attempt for registry %s: %s request failed with status: %d %s", registryEndpoint, req.URL, resp.StatusCode, http.StatusText(resp.StatusCode)) } decoder := json.NewDecoder(resp.Body) tr := new(tokenResponse) if err = decoder.Decode(tr); err != nil { return "", fmt.Errorf("unable to decode token response: %s", err) } if tr.Token == "" { return "", errors.New("authorization server did not include a token in the response") } return tr.Token, nil } docker-1.10.3/registry/types.go000066400000000000000000000037361267010174400164210ustar00rootroot00000000000000package registry import ( "github.com/docker/docker/reference" registrytypes "github.com/docker/engine-api/types/registry" ) // RepositoryData tracks the image list, list of endpoints, and list of tokens // for a repository type RepositoryData struct { // ImgList is a list of images in the repository ImgList map[string]*ImgData // Endpoints is a list of endpoints returned in X-Docker-Endpoints Endpoints []string // Tokens is currently unused (remove it?) Tokens []string } // ImgData is used to transfer image checksums to and from the registry type ImgData struct { // ID is an opaque string that identifies the image ID string `json:"id"` Checksum string `json:"checksum,omitempty"` ChecksumPayload string `json:"-"` Tag string `json:",omitempty"` } // PingResult contains the information returned when pinging a registry. It // indicates the registry's version and whether the registry claims to be a // standalone registry. type PingResult struct { // Version is the registry version supplied by the registry in a HTTP // header Version string `json:"version"` // Standalone is set to true if the registry indicates it is a // standalone registry in the X-Docker-Registry-Standalone // header Standalone bool `json:"standalone"` } // APIVersion is an integral representation of an API version (presently // either 1 or 2) type APIVersion int func (av APIVersion) String() string { return apiVersions[av] } var apiVersions = map[APIVersion]string{ 1: "v1", 2: "v2", } // API Version identifiers. const ( APIVersionUnknown = iota APIVersion1 APIVersion2 ) // RepositoryInfo describes a repository type RepositoryInfo struct { reference.Named // Index points to registry information Index *registrytypes.IndexInfo // Official indicates whether the repository is considered official. // If the registry is official, and the normalized name does not // contain a '/' (e.g. "foo"), then it is considered an official repo. Official bool } docker-1.10.3/runconfig/000077500000000000000000000000001267010174400150375ustar00rootroot00000000000000docker-1.10.3/runconfig/compare.go000066400000000000000000000026141267010174400170170ustar00rootroot00000000000000package runconfig import "github.com/docker/engine-api/types/container" // Compare two Config struct. Do not compare the "Image" nor "Hostname" fields // If OpenStdin is set, then it differs func Compare(a, b *container.Config) bool { if a == nil || b == nil || a.OpenStdin || b.OpenStdin { return false } if a.AttachStdout != b.AttachStdout || a.AttachStderr != b.AttachStderr || a.User != b.User || a.OpenStdin != b.OpenStdin || a.Tty != b.Tty { return false } if a.Cmd.Len() != b.Cmd.Len() || len(a.Env) != len(b.Env) || len(a.Labels) != len(b.Labels) || len(a.ExposedPorts) != len(b.ExposedPorts) || a.Entrypoint.Len() != b.Entrypoint.Len() || len(a.Volumes) != len(b.Volumes) { return false } aCmd := a.Cmd.Slice() bCmd := b.Cmd.Slice() for i := 0; i < len(aCmd); i++ { if aCmd[i] != bCmd[i] { return false } } for i := 0; i < len(a.Env); i++ { if a.Env[i] != b.Env[i] { return false } } for k, v := range a.Labels { if v != b.Labels[k] { return false } } for k := range a.ExposedPorts { if _, exists := b.ExposedPorts[k]; !exists { return false } } aEntrypoint := a.Entrypoint.Slice() bEntrypoint := b.Entrypoint.Slice() for i := 0; i < len(aEntrypoint); i++ { if aEntrypoint[i] != bEntrypoint[i] { return false } } for key := range a.Volumes { if _, exists := b.Volumes[key]; !exists { return false } } return true } docker-1.10.3/runconfig/compare_test.go000066400000000000000000000102501267010174400200510ustar00rootroot00000000000000package runconfig import ( "testing" "github.com/docker/engine-api/types/container" "github.com/docker/engine-api/types/strslice" "github.com/docker/go-connections/nat" ) // Just to make life easier func newPortNoError(proto, port string) nat.Port { p, _ := nat.NewPort(proto, port) return p } func TestCompare(t *testing.T) { ports1 := make(nat.PortSet) ports1[newPortNoError("tcp", "1111")] = struct{}{} ports1[newPortNoError("tcp", "2222")] = struct{}{} ports2 := make(nat.PortSet) ports2[newPortNoError("tcp", "3333")] = struct{}{} ports2[newPortNoError("tcp", "4444")] = struct{}{} ports3 := make(nat.PortSet) ports3[newPortNoError("tcp", "1111")] = struct{}{} ports3[newPortNoError("tcp", "2222")] = struct{}{} ports3[newPortNoError("tcp", "5555")] = struct{}{} volumes1 := make(map[string]struct{}) volumes1["/test1"] = struct{}{} volumes2 := make(map[string]struct{}) volumes2["/test2"] = struct{}{} volumes3 := make(map[string]struct{}) volumes3["/test1"] = struct{}{} volumes3["/test3"] = struct{}{} envs1 := []string{"ENV1=value1", "ENV2=value2"} envs2 := []string{"ENV1=value1", "ENV3=value3"} entrypoint1 := strslice.New("/bin/sh", "-c") entrypoint2 := strslice.New("/bin/sh", "-d") entrypoint3 := strslice.New("/bin/sh", "-c", "echo") cmd1 := strslice.New("/bin/sh", "-c") cmd2 := strslice.New("/bin/sh", "-d") cmd3 := strslice.New("/bin/sh", "-c", "echo") labels1 := map[string]string{"LABEL1": "value1", "LABEL2": "value2"} labels2 := map[string]string{"LABEL1": "value1", "LABEL2": "value3"} labels3 := map[string]string{"LABEL1": "value1", "LABEL2": "value2", "LABEL3": "value3"} sameConfigs := map[*container.Config]*container.Config{ // Empty config &container.Config{}: {}, // Does not compare hostname, domainname & image &container.Config{ Hostname: "host1", Domainname: "domain1", Image: "image1", User: "user", }: { Hostname: "host2", Domainname: "domain2", Image: "image2", User: "user", }, // only OpenStdin &container.Config{OpenStdin: false}: {OpenStdin: false}, // only env &container.Config{Env: envs1}: {Env: envs1}, // only cmd &container.Config{Cmd: cmd1}: {Cmd: cmd1}, // only labels &container.Config{Labels: labels1}: {Labels: labels1}, // only exposedPorts &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports1}, // only entrypoints &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint1}, // only volumes &container.Config{Volumes: volumes1}: {Volumes: volumes1}, } differentConfigs := map[*container.Config]*container.Config{ nil: nil, &container.Config{ Hostname: "host1", Domainname: "domain1", Image: "image1", User: "user1", }: { Hostname: "host1", Domainname: "domain1", Image: "image1", User: "user2", }, // only OpenStdin &container.Config{OpenStdin: false}: {OpenStdin: true}, &container.Config{OpenStdin: true}: {OpenStdin: false}, // only env &container.Config{Env: envs1}: {Env: envs2}, // only cmd &container.Config{Cmd: cmd1}: {Cmd: cmd2}, // not the same number of parts &container.Config{Cmd: cmd1}: {Cmd: cmd3}, // only labels &container.Config{Labels: labels1}: {Labels: labels2}, // not the same number of labels &container.Config{Labels: labels1}: {Labels: labels3}, // only exposedPorts &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports2}, // not the same number of ports &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports3}, // only entrypoints &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint2}, // not the same number of parts &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint3}, // only volumes &container.Config{Volumes: volumes1}: {Volumes: volumes2}, // not the same number of labels &container.Config{Volumes: volumes1}: {Volumes: volumes3}, } for config1, config2 := range sameConfigs { if !Compare(config1, config2) { t.Fatalf("Compare should be true for [%v] and [%v]", config1, config2) } } for config1, config2 := range differentConfigs { if Compare(config1, config2) { t.Fatalf("Compare should be false for [%v] and [%v]", config1, config2) } } } docker-1.10.3/runconfig/config.go000066400000000000000000000041751267010174400166420ustar00rootroot00000000000000package runconfig import ( "encoding/json" "fmt" "io" "github.com/docker/docker/volume" "github.com/docker/engine-api/types/container" networktypes "github.com/docker/engine-api/types/network" ) // DecodeContainerConfig decodes a json encoded config into a ContainerConfigWrapper // struct and returns both a Config and an HostConfig struct // Be aware this function is not checking whether the resulted structs are nil, // it's your business to do so func DecodeContainerConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { var w ContainerConfigWrapper decoder := json.NewDecoder(src) if err := decoder.Decode(&w); err != nil { return nil, nil, nil, err } hc := w.getHostConfig() // Perform platform-specific processing of Volumes and Binds. if w.Config != nil && hc != nil { // Initialize the volumes map if currently nil if w.Config.Volumes == nil { w.Config.Volumes = make(map[string]struct{}) } // Now validate all the volumes and binds if err := validateVolumesAndBindSettings(w.Config, hc); err != nil { return nil, nil, nil, err } } // Certain parameters need daemon-side validation that cannot be done // on the client, as only the daemon knows what is valid for the platform. if err := ValidateNetMode(w.Config, hc); err != nil { return nil, nil, nil, err } // Validate the isolation level if err := ValidateIsolationLevel(hc); err != nil { return nil, nil, nil, err } return w.Config, hc, w.NetworkingConfig, nil } // validateVolumesAndBindSettings validates each of the volumes and bind settings // passed by the caller to ensure they are valid. func validateVolumesAndBindSettings(c *container.Config, hc *container.HostConfig) error { // Ensure all volumes and binds are valid. for spec := range c.Volumes { if _, err := volume.ParseMountSpec(spec, hc.VolumeDriver); err != nil { return fmt.Errorf("Invalid volume spec %q: %v", spec, err) } } for _, spec := range hc.Binds { if _, err := volume.ParseMountSpec(spec, hc.VolumeDriver); err != nil { return fmt.Errorf("Invalid bind mount spec %q: %v", spec, err) } } return nil } docker-1.10.3/runconfig/config_test.go000066400000000000000000000063171267010174400177010ustar00rootroot00000000000000package runconfig import ( "bytes" "encoding/json" "fmt" "io/ioutil" "runtime" "strings" "testing" "github.com/docker/engine-api/types/container" networktypes "github.com/docker/engine-api/types/network" "github.com/docker/engine-api/types/strslice" ) type f struct { file string entrypoint *strslice.StrSlice } func TestDecodeContainerConfig(t *testing.T) { var ( fixtures []f image string ) if runtime.GOOS != "windows" { image = "ubuntu" fixtures = []f{ {"fixtures/unix/container_config_1_14.json", strslice.New()}, {"fixtures/unix/container_config_1_17.json", strslice.New("bash")}, {"fixtures/unix/container_config_1_19.json", strslice.New("bash")}, } } else { image = "windows" fixtures = []f{ {"fixtures/windows/container_config_1_19.json", strslice.New("cmd")}, } } for _, f := range fixtures { b, err := ioutil.ReadFile(f.file) if err != nil { t.Fatal(err) } c, h, _, err := DecodeContainerConfig(bytes.NewReader(b)) if err != nil { t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err)) } if c.Image != image { t.Fatalf("Expected %s image, found %s\n", image, c.Image) } if c.Entrypoint.Len() != f.entrypoint.Len() { t.Fatalf("Expected %v, found %v\n", f.entrypoint, c.Entrypoint) } if h != nil && h.Memory != 1000 { t.Fatalf("Expected memory to be 1000, found %d\n", h.Memory) } } } // TestDecodeContainerConfigIsolation validates the isolation level passed // to the daemon in the hostConfig structure. Note this is platform specific // as to what level of container isolation is supported. func TestDecodeContainerConfigIsolation(t *testing.T) { // An invalid isolation level if _, _, _, err := callDecodeContainerConfigIsolation("invalid"); err != nil { if !strings.Contains(err.Error(), `invalid --isolation: "invalid"`) { t.Fatal(err) } } // Blank isolation level (== default) if _, _, _, err := callDecodeContainerConfigIsolation(""); err != nil { t.Fatal("Blank isolation should have succeeded") } // Default isolation level if _, _, _, err := callDecodeContainerConfigIsolation("default"); err != nil { t.Fatal("default isolation should have succeeded") } // Hyper-V Containers isolation level (Valid on Windows only) if runtime.GOOS == "windows" { if _, _, _, err := callDecodeContainerConfigIsolation("hyperv"); err != nil { t.Fatal("hyperv isolation should have succeeded") } } else { if _, _, _, err := callDecodeContainerConfigIsolation("hyperv"); err != nil { if !strings.Contains(err.Error(), `invalid --isolation: "hyperv"`) { t.Fatal(err) } } } } // callDecodeContainerConfigIsolation is a utility function to call // DecodeContainerConfig for validating isolation levels func callDecodeContainerConfigIsolation(isolation string) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { var ( b []byte err error ) w := ContainerConfigWrapper{ Config: &container.Config{}, HostConfig: &container.HostConfig{ NetworkMode: "none", Isolation: container.IsolationLevel(isolation)}, } if b, err = json.Marshal(w); err != nil { return nil, nil, nil, fmt.Errorf("Error on marshal %s", err.Error()) } return DecodeContainerConfig(bytes.NewReader(b)) } docker-1.10.3/runconfig/config_unix.go000066400000000000000000000037211267010174400177010ustar00rootroot00000000000000// +build !windows package runconfig import ( "github.com/docker/engine-api/types/container" networktypes "github.com/docker/engine-api/types/network" ) // ContainerConfigWrapper is a Config wrapper that hold the container Config (portable) // and the corresponding HostConfig (non-portable). type ContainerConfigWrapper struct { *container.Config InnerHostConfig *container.HostConfig `json:"HostConfig,omitempty"` Cpuset string `json:",omitempty"` // Deprecated. Exported for backwards compatibility. NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"` *container.HostConfig // Deprecated. Exported to read attributes from json that are not in the inner host config structure. } // getHostConfig gets the HostConfig of the Config. // It's mostly there to handle Deprecated fields of the ContainerConfigWrapper func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig { hc := w.HostConfig if hc == nil && w.InnerHostConfig != nil { hc = w.InnerHostConfig } else if w.InnerHostConfig != nil { if hc.Memory != 0 && w.InnerHostConfig.Memory == 0 { w.InnerHostConfig.Memory = hc.Memory } if hc.MemorySwap != 0 && w.InnerHostConfig.MemorySwap == 0 { w.InnerHostConfig.MemorySwap = hc.MemorySwap } if hc.CPUShares != 0 && w.InnerHostConfig.CPUShares == 0 { w.InnerHostConfig.CPUShares = hc.CPUShares } if hc.CpusetCpus != "" && w.InnerHostConfig.CpusetCpus == "" { w.InnerHostConfig.CpusetCpus = hc.CpusetCpus } if hc.VolumeDriver != "" && w.InnerHostConfig.VolumeDriver == "" { w.InnerHostConfig.VolumeDriver = hc.VolumeDriver } hc = w.InnerHostConfig } if hc != nil { if w.Cpuset != "" && hc.CpusetCpus == "" { hc.CpusetCpus = w.Cpuset } } // Make sure NetworkMode has an acceptable value. We do this to ensure // backwards compatible API behavior. hc = SetDefaultNetModeIfBlank(hc) return hc } docker-1.10.3/runconfig/config_windows.go000066400000000000000000000012111267010174400204000ustar00rootroot00000000000000package runconfig import ( "github.com/docker/engine-api/types/container" networktypes "github.com/docker/engine-api/types/network" ) // ContainerConfigWrapper is a Config wrapper that hold the container Config (portable) // and the corresponding HostConfig (non-portable). type ContainerConfigWrapper struct { *container.Config HostConfig *container.HostConfig `json:"HostConfig,omitempty"` NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"` } // getHostConfig gets the HostConfig of the Config. func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig { return w.HostConfig } docker-1.10.3/runconfig/errors.go000066400000000000000000000062301267010174400167030ustar00rootroot00000000000000package runconfig import ( "fmt" ) var ( // ErrConflictContainerNetworkAndLinks conflict between --net=container and links ErrConflictContainerNetworkAndLinks = fmt.Errorf("Conflicting options: container type network can't be used with links. This would result in undefined behavior") // ErrConflictUserDefinedNetworkAndLinks conflict between --net= and links ErrConflictUserDefinedNetworkAndLinks = fmt.Errorf("Conflicting options: networking can't be used with links. This would result in undefined behavior") // ErrConflictSharedNetwork conflict between private and other networks ErrConflictSharedNetwork = fmt.Errorf("Container sharing network namespace with another container or host cannot be connected to any other network") // ErrConflictHostNetwork conflict from being disconnected from host network or connected to host network. ErrConflictHostNetwork = fmt.Errorf("Container cannot be disconnected from host network or connected to host network") // ErrConflictNoNetwork conflict between private and other networks ErrConflictNoNetwork = fmt.Errorf("Container cannot be connected to multiple networks with one of the networks in private (none) mode") // ErrConflictNetworkAndDNS conflict between --dns and the network mode ErrConflictNetworkAndDNS = fmt.Errorf("Conflicting options: dns and the network mode") // ErrConflictNetworkHostname conflict between the hostname and the network mode ErrConflictNetworkHostname = fmt.Errorf("Conflicting options: hostname and the network mode") // ErrConflictHostNetworkAndLinks conflict between --net=host and links ErrConflictHostNetworkAndLinks = fmt.Errorf("Conflicting options: host type networking can't be used with links. This would result in undefined behavior") // ErrConflictContainerNetworkAndMac conflict between the mac address and the network mode ErrConflictContainerNetworkAndMac = fmt.Errorf("Conflicting options: mac-address and the network mode") // ErrConflictNetworkHosts conflict between add-host and the network mode ErrConflictNetworkHosts = fmt.Errorf("Conflicting options: custom host-to-IP mapping and the network mode") // ErrConflictNetworkPublishPorts conflict between the publish options and the network mode ErrConflictNetworkPublishPorts = fmt.Errorf("Conflicting options: port publishing and the container type network mode") // ErrConflictNetworkExposePorts conflict between the expose option and the network mode ErrConflictNetworkExposePorts = fmt.Errorf("Conflicting options: port exposing and the container type network mode") // ErrUnsupportedNetworkAndIP conflict between network mode and preferred ip address ErrUnsupportedNetworkAndIP = fmt.Errorf("User specified IP address is supported on user defined networks only") // ErrUnsupportedNetworkNoSubnetAndIP conflict between network with no configured subnet and preferred ip address ErrUnsupportedNetworkNoSubnetAndIP = fmt.Errorf("User specified IP address is supported only when connecting to networks with user configured subnets") // ErrUnsupportedNetworkAndAlias conflict between network mode and alias ErrUnsupportedNetworkAndAlias = fmt.Errorf("Network-scoped alias is supported only for containers in user defined networks") ) docker-1.10.3/runconfig/fixtures/000077500000000000000000000000001267010174400167105ustar00rootroot00000000000000docker-1.10.3/runconfig/fixtures/unix/000077500000000000000000000000001267010174400176735ustar00rootroot00000000000000docker-1.10.3/runconfig/fixtures/unix/container_config_1_14.json000066400000000000000000000011271267010174400246220ustar00rootroot00000000000000{ "Hostname":"", "Domainname": "", "User":"", "Memory": 1000, "MemorySwap":0, "CpuShares": 512, "Cpuset": "0,1", "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, "PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env":null, "Cmd":[ "bash" ], "Image":"ubuntu", "Volumes":{ "/tmp": {} }, "WorkingDir":"", "NetworkDisabled": false, "ExposedPorts":{ "22/tcp": {} }, "RestartPolicy": { "Name": "always" } } docker-1.10.3/runconfig/fixtures/unix/container_config_1_17.json000066400000000000000000000023611267010174400246260ustar00rootroot00000000000000{ "Hostname": "", "Domainname": "", "User": "", "Memory": 1000, "MemorySwap": 0, "CpuShares": 512, "Cpuset": "0,1", "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": null, "Cmd": [ "date" ], "Entrypoint": "bash", "Image": "ubuntu", "Volumes": { "/tmp": {} }, "WorkingDir": "", "NetworkDisabled": false, "MacAddress": "12:34:56:78:9a:bc", "ExposedPorts": { "22/tcp": {} }, "SecurityOpt": [""], "HostConfig": { "Binds": ["/tmp:/tmp"], "Links": ["redis3:redis"], "LxcConf": {"lxc.utsname":"docker"}, "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, "PublishAllPorts": false, "Privileged": false, "ReadonlyRootfs": false, "Dns": ["8.8.8.8"], "DnsSearch": [""], "DnsOptions": [""], "ExtraHosts": null, "VolumesFrom": ["parent", "other:ro"], "CapAdd": ["NET_ADMIN"], "CapDrop": ["MKNOD"], "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, "NetworkMode": "bridge", "Devices": [] } } docker-1.10.3/runconfig/fixtures/unix/container_config_1_19.json000066400000000000000000000030061267010174400246250ustar00rootroot00000000000000{ "Hostname": "", "Domainname": "", "User": "", "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": null, "Cmd": [ "date" ], "Entrypoint": "bash", "Image": "ubuntu", "Labels": { "com.example.vendor": "Acme", "com.example.license": "GPL", "com.example.version": "1.0" }, "Volumes": { "/tmp": {} }, "WorkingDir": "", "NetworkDisabled": false, "MacAddress": "12:34:56:78:9a:bc", "ExposedPorts": { "22/tcp": {} }, "HostConfig": { "Binds": ["/tmp:/tmp"], "Links": ["redis3:redis"], "LxcConf": {"lxc.utsname":"docker"}, "Memory": 1000, "MemorySwap": 0, "CpuShares": 512, "CpusetCpus": "0,1", "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, "PublishAllPorts": false, "Privileged": false, "ReadonlyRootfs": false, "Dns": ["8.8.8.8"], "DnsSearch": [""], "DnsOptions": [""], "ExtraHosts": null, "VolumesFrom": ["parent", "other:ro"], "CapAdd": ["NET_ADMIN"], "CapDrop": ["MKNOD"], "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, "NetworkMode": "bridge", "Devices": [], "Ulimits": [{}], "LogConfig": { "Type": "json-file", "Config": {} }, "SecurityOpt": [""], "CgroupParent": "" } } docker-1.10.3/runconfig/fixtures/unix/container_hostconfig_1_14.json000066400000000000000000000005711267010174400255220ustar00rootroot00000000000000{ "Binds": ["/tmp:/tmp"], "ContainerIDFile": "", "LxcConf": [], "Privileged": false, "PortBindings": { "80/tcp": [ { "HostIp": "0.0.0.0", "HostPort": "49153" } ] }, "Links": ["/name:alias"], "PublishAllPorts": false, "CapAdd": ["NET_ADMIN"], "CapDrop": ["MKNOD"] } docker-1.10.3/runconfig/fixtures/unix/container_hostconfig_1_19.json000066400000000000000000000015101267010174400255210ustar00rootroot00000000000000{ "Binds": ["/tmp:/tmp"], "Links": ["redis3:redis"], "LxcConf": {"lxc.utsname":"docker"}, "Memory": 0, "MemorySwap": 0, "CpuShares": 512, "CpuPeriod": 100000, "CpusetCpus": "0,1", "CpusetMems": "0,1", "BlkioWeight": 300, "OomKillDisable": false, "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, "PublishAllPorts": false, "Privileged": false, "ReadonlyRootfs": false, "Dns": ["8.8.8.8"], "DnsSearch": [""], "ExtraHosts": null, "VolumesFrom": ["parent", "other:ro"], "CapAdd": ["NET_ADMIN"], "CapDrop": ["MKNOD"], "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, "NetworkMode": "bridge", "Devices": [], "Ulimits": [{}], "LogConfig": { "Type": "json-file", "Config": {} }, "SecurityOpt": [""], "CgroupParent": "" } docker-1.10.3/runconfig/fixtures/windows/000077500000000000000000000000001267010174400204025ustar00rootroot00000000000000docker-1.10.3/runconfig/fixtures/windows/container_config_1_19.json000066400000000000000000000030251267010174400253350ustar00rootroot00000000000000{ "Hostname": "", "Domainname": "", "User": "", "AttachStdin": false, "AttachStdout": true, "AttachStderr": true, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": null, "Cmd": [ "date" ], "Entrypoint": "cmd", "Image": "windows", "Labels": { "com.example.vendor": "Acme", "com.example.license": "GPL", "com.example.version": "1.0" }, "Volumes": { "c:/windows": {} }, "WorkingDir": "", "NetworkDisabled": false, "MacAddress": "12:34:56:78:9a:bc", "ExposedPorts": { "22/tcp": {} }, "HostConfig": { "Binds": ["c:/windows:d:/tmp"], "Links": ["redis3:redis"], "LxcConf": {"lxc.utsname":"docker"}, "Memory": 1000, "MemorySwap": 0, "CpuShares": 512, "CpusetCpus": "0,1", "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, "PublishAllPorts": false, "Privileged": false, "ReadonlyRootfs": false, "Dns": ["8.8.8.8"], "DnsSearch": [""], "DnsOptions": [""], "ExtraHosts": null, "VolumesFrom": ["parent", "other:ro"], "CapAdd": ["NET_ADMIN"], "CapDrop": ["MKNOD"], "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, "NetworkMode": "default", "Devices": [], "Ulimits": [{}], "LogConfig": { "Type": "json-file", "Config": {} }, "SecurityOpt": [""], "CgroupParent": "" } } docker-1.10.3/runconfig/hostconfig.go000066400000000000000000000017011267010174400175300ustar00rootroot00000000000000package runconfig import ( "encoding/json" "io" "github.com/docker/engine-api/types/container" ) // DecodeHostConfig creates a HostConfig based on the specified Reader. // It assumes the content of the reader will be JSON, and decodes it. func DecodeHostConfig(src io.Reader) (*container.HostConfig, error) { decoder := json.NewDecoder(src) var w ContainerConfigWrapper if err := decoder.Decode(&w); err != nil { return nil, err } hc := w.getHostConfig() return hc, nil } // SetDefaultNetModeIfBlank changes the NetworkMode in a HostConfig structure // to default if it is not populated. This ensures backwards compatibility after // the validation of the network mode was moved from the docker CLI to the // docker daemon. func SetDefaultNetModeIfBlank(hc *container.HostConfig) *container.HostConfig { if hc != nil { if hc.NetworkMode == container.NetworkMode("") { hc.NetworkMode = container.NetworkMode("default") } } return hc } docker-1.10.3/runconfig/hostconfig_test.go000066400000000000000000000166071267010174400206020ustar00rootroot00000000000000// +build !windows package runconfig import ( "bytes" "fmt" "io/ioutil" "testing" "github.com/docker/engine-api/types/container" ) // TODO Windows: This will need addressing for a Windows daemon. func TestNetworkModeTest(t *testing.T) { networkModes := map[container.NetworkMode][]bool{ // private, bridge, host, container, none, default "": {true, false, false, false, false, false}, "something:weird": {true, false, false, false, false, false}, "bridge": {true, true, false, false, false, false}, DefaultDaemonNetworkMode(): {true, true, false, false, false, false}, "host": {false, false, true, false, false, false}, "container:name": {false, false, false, true, false, false}, "none": {true, false, false, false, true, false}, "default": {true, false, false, false, false, true}, } networkModeNames := map[container.NetworkMode]string{ "": "", "something:weird": "something:weird", "bridge": "bridge", DefaultDaemonNetworkMode(): "bridge", "host": "host", "container:name": "container", "none": "none", "default": "default", } for networkMode, state := range networkModes { if networkMode.IsPrivate() != state[0] { t.Fatalf("NetworkMode.IsPrivate for %v should have been %v but was %v", networkMode, state[0], networkMode.IsPrivate()) } if networkMode.IsBridge() != state[1] { t.Fatalf("NetworkMode.IsBridge for %v should have been %v but was %v", networkMode, state[1], networkMode.IsBridge()) } if networkMode.IsHost() != state[2] { t.Fatalf("NetworkMode.IsHost for %v should have been %v but was %v", networkMode, state[2], networkMode.IsHost()) } if networkMode.IsContainer() != state[3] { t.Fatalf("NetworkMode.IsContainer for %v should have been %v but was %v", networkMode, state[3], networkMode.IsContainer()) } if networkMode.IsNone() != state[4] { t.Fatalf("NetworkMode.IsNone for %v should have been %v but was %v", networkMode, state[4], networkMode.IsNone()) } if networkMode.IsDefault() != state[5] { t.Fatalf("NetworkMode.IsDefault for %v should have been %v but was %v", networkMode, state[5], networkMode.IsDefault()) } if networkMode.NetworkName() != networkModeNames[networkMode] { t.Fatalf("Expected name %v, got %v", networkModeNames[networkMode], networkMode.NetworkName()) } } } func TestIpcModeTest(t *testing.T) { ipcModes := map[container.IpcMode][]bool{ // private, host, container, valid "": {true, false, false, true}, "something:weird": {true, false, false, false}, ":weird": {true, false, false, true}, "host": {false, true, false, true}, "container:name": {false, false, true, true}, "container:name:something": {false, false, true, false}, "container:": {false, false, true, false}, } for ipcMode, state := range ipcModes { if ipcMode.IsPrivate() != state[0] { t.Fatalf("IpcMode.IsPrivate for %v should have been %v but was %v", ipcMode, state[0], ipcMode.IsPrivate()) } if ipcMode.IsHost() != state[1] { t.Fatalf("IpcMode.IsHost for %v should have been %v but was %v", ipcMode, state[1], ipcMode.IsHost()) } if ipcMode.IsContainer() != state[2] { t.Fatalf("IpcMode.IsContainer for %v should have been %v but was %v", ipcMode, state[2], ipcMode.IsContainer()) } if ipcMode.Valid() != state[3] { t.Fatalf("IpcMode.Valid for %v should have been %v but was %v", ipcMode, state[3], ipcMode.Valid()) } } containerIpcModes := map[container.IpcMode]string{ "": "", "something": "", "something:weird": "weird", "container": "", "container:": "", "container:name": "name", "container:name1:name2": "name1:name2", } for ipcMode, container := range containerIpcModes { if ipcMode.Container() != container { t.Fatalf("Expected %v for %v but was %v", container, ipcMode, ipcMode.Container()) } } } func TestUTSModeTest(t *testing.T) { utsModes := map[container.UTSMode][]bool{ // private, host, valid "": {true, false, true}, "something:weird": {true, false, false}, "host": {false, true, true}, "host:name": {true, false, true}, } for utsMode, state := range utsModes { if utsMode.IsPrivate() != state[0] { t.Fatalf("UtsMode.IsPrivate for %v should have been %v but was %v", utsMode, state[0], utsMode.IsPrivate()) } if utsMode.IsHost() != state[1] { t.Fatalf("UtsMode.IsHost for %v should have been %v but was %v", utsMode, state[1], utsMode.IsHost()) } if utsMode.Valid() != state[2] { t.Fatalf("UtsMode.Valid for %v should have been %v but was %v", utsMode, state[2], utsMode.Valid()) } } } func TestPidModeTest(t *testing.T) { pidModes := map[container.PidMode][]bool{ // private, host, valid "": {true, false, true}, "something:weird": {true, false, false}, "host": {false, true, true}, "host:name": {true, false, true}, } for pidMode, state := range pidModes { if pidMode.IsPrivate() != state[0] { t.Fatalf("PidMode.IsPrivate for %v should have been %v but was %v", pidMode, state[0], pidMode.IsPrivate()) } if pidMode.IsHost() != state[1] { t.Fatalf("PidMode.IsHost for %v should have been %v but was %v", pidMode, state[1], pidMode.IsHost()) } if pidMode.Valid() != state[2] { t.Fatalf("PidMode.Valid for %v should have been %v but was %v", pidMode, state[2], pidMode.Valid()) } } } func TestRestartPolicy(t *testing.T) { restartPolicies := map[container.RestartPolicy][]bool{ // none, always, failure container.RestartPolicy{}: {false, false, false}, container.RestartPolicy{"something", 0}: {false, false, false}, container.RestartPolicy{"no", 0}: {true, false, false}, container.RestartPolicy{"always", 0}: {false, true, false}, container.RestartPolicy{"on-failure", 0}: {false, false, true}, } for restartPolicy, state := range restartPolicies { if restartPolicy.IsNone() != state[0] { t.Fatalf("RestartPolicy.IsNone for %v should have been %v but was %v", restartPolicy, state[0], restartPolicy.IsNone()) } if restartPolicy.IsAlways() != state[1] { t.Fatalf("RestartPolicy.IsAlways for %v should have been %v but was %v", restartPolicy, state[1], restartPolicy.IsAlways()) } if restartPolicy.IsOnFailure() != state[2] { t.Fatalf("RestartPolicy.IsOnFailure for %v should have been %v but was %v", restartPolicy, state[2], restartPolicy.IsOnFailure()) } } } func TestDecodeHostConfig(t *testing.T) { fixtures := []struct { file string }{ {"fixtures/unix/container_hostconfig_1_14.json"}, {"fixtures/unix/container_hostconfig_1_19.json"}, } for _, f := range fixtures { b, err := ioutil.ReadFile(f.file) if err != nil { t.Fatal(err) } c, err := DecodeHostConfig(bytes.NewReader(b)) if err != nil { t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err)) } if c.Privileged != false { t.Fatalf("Expected privileged false, found %v\n", c.Privileged) } if l := len(c.Binds); l != 1 { t.Fatalf("Expected 1 bind, found %d\n", l) } if c.CapAdd.Len() != 1 && c.CapAdd.Slice()[0] != "NET_ADMIN" { t.Fatalf("Expected CapAdd NET_ADMIN, got %v", c.CapAdd) } if c.CapDrop.Len() != 1 && c.CapDrop.Slice()[0] != "NET_ADMIN" { t.Fatalf("Expected CapDrop MKNOD, got %v", c.CapDrop) } } } docker-1.10.3/runconfig/hostconfig_unix.go000066400000000000000000000047351267010174400206050ustar00rootroot00000000000000// +build !windows package runconfig import ( "fmt" "runtime" "strings" "github.com/docker/engine-api/types/container" ) // DefaultDaemonNetworkMode returns the default network stack the daemon should // use. func DefaultDaemonNetworkMode() container.NetworkMode { return container.NetworkMode("bridge") } // IsPreDefinedNetwork indicates if a network is predefined by the daemon func IsPreDefinedNetwork(network string) bool { n := container.NetworkMode(network) return n.IsBridge() || n.IsHost() || n.IsNone() || n.IsDefault() } // ValidateNetMode ensures that the various combinations of requested // network settings are valid. func ValidateNetMode(c *container.Config, hc *container.HostConfig) error { // We may not be passed a host config, such as in the case of docker commit if hc == nil { return nil } parts := strings.Split(string(hc.NetworkMode), ":") if parts[0] == "container" { if len(parts) < 2 || parts[1] == "" { return fmt.Errorf("--net: invalid net mode: invalid container format container:") } } if (hc.NetworkMode.IsHost() || hc.NetworkMode.IsContainer()) && c.Hostname != "" { return ErrConflictNetworkHostname } if hc.NetworkMode.IsHost() && len(hc.Links) > 0 { return ErrConflictHostNetworkAndLinks } if hc.NetworkMode.IsContainer() && len(hc.Links) > 0 { return ErrConflictContainerNetworkAndLinks } if (hc.NetworkMode.IsHost() || hc.NetworkMode.IsContainer()) && len(hc.DNS) > 0 { return ErrConflictNetworkAndDNS } if (hc.NetworkMode.IsContainer() || hc.NetworkMode.IsHost()) && len(hc.ExtraHosts) > 0 { return ErrConflictNetworkHosts } if (hc.NetworkMode.IsContainer() || hc.NetworkMode.IsHost()) && c.MacAddress != "" { return ErrConflictContainerNetworkAndMac } if hc.NetworkMode.IsContainer() && (len(hc.PortBindings) > 0 || hc.PublishAllPorts == true) { return ErrConflictNetworkPublishPorts } if hc.NetworkMode.IsContainer() && len(c.ExposedPorts) > 0 { return ErrConflictNetworkExposePorts } return nil } // ValidateIsolationLevel performs platform specific validation of the // isolation level in the hostconfig structure. Linux only supports "default" // which is LXC container isolation func ValidateIsolationLevel(hc *container.HostConfig) error { // We may not be passed a host config, such as in the case of docker commit if hc == nil { return nil } if !hc.Isolation.IsValid() { return fmt.Errorf("invalid --isolation: %q - %s only supports 'default'", hc.Isolation, runtime.GOOS) } return nil } docker-1.10.3/runconfig/hostconfig_windows.go000066400000000000000000000026171267010174400213110ustar00rootroot00000000000000package runconfig import ( "fmt" "strings" "github.com/docker/engine-api/types/container" ) // DefaultDaemonNetworkMode returns the default network stack the daemon should // use. func DefaultDaemonNetworkMode() container.NetworkMode { return container.NetworkMode("default") } // IsPreDefinedNetwork indicates if a network is predefined by the daemon func IsPreDefinedNetwork(network string) bool { return false } // ValidateNetMode ensures that the various combinations of requested // network settings are valid. func ValidateNetMode(c *container.Config, hc *container.HostConfig) error { // We may not be passed a host config, such as in the case of docker commit if hc == nil { return nil } parts := strings.Split(string(hc.NetworkMode), ":") switch mode := parts[0]; mode { case "default", "none": default: return fmt.Errorf("invalid --net: %s", hc.NetworkMode) } return nil } // ValidateIsolationLevel performs platform specific validation of the // isolation level in the hostconfig structure. Windows supports 'default' (or // blank), 'process', or 'hyperv'. func ValidateIsolationLevel(hc *container.HostConfig) error { // We may not be passed a host config, such as in the case of docker commit if hc == nil { return nil } if !hc.Isolation.IsValid() { return fmt.Errorf("invalid --isolation: %q. Windows supports 'default', 'process', or 'hyperv'", hc.Isolation) } return nil } docker-1.10.3/runconfig/opts/000077500000000000000000000000001267010174400160245ustar00rootroot00000000000000docker-1.10.3/runconfig/opts/envfile.go000066400000000000000000000040771267010174400200130ustar00rootroot00000000000000package opts import ( "bufio" "fmt" "os" "strings" ) // ParseEnvFile reads a file with environment variables enumerated by lines // // ``Environment variable names used by the utilities in the Shell and // Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase // letters, digits, and the '_' (underscore) from the characters defined in // Portable Character Set and do not begin with a digit. *But*, other // characters may be permitted by an implementation; applications shall // tolerate the presence of such names.'' // -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html // // As of #16585, it's up to application inside docker to validate or not // environment variables, that's why we just strip leading whitespace and // nothing more. func ParseEnvFile(filename string) ([]string, error) { fh, err := os.Open(filename) if err != nil { return []string{}, err } defer fh.Close() lines := []string{} scanner := bufio.NewScanner(fh) for scanner.Scan() { // trim the line from all leading whitespace first line := strings.TrimLeft(scanner.Text(), whiteSpaces) // line is not empty, and not starting with '#' if len(line) > 0 && !strings.HasPrefix(line, "#") { data := strings.SplitN(line, "=", 2) // trim the front of a variable, but nothing else variable := strings.TrimLeft(data[0], whiteSpaces) if strings.ContainsAny(variable, whiteSpaces) { return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' has white spaces", variable)} } if len(data) > 1 { // pass the value through, no trimming lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1])) } else { // if only a pass-through variable is given, clean it up. lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), os.Getenv(line))) } } } return lines, scanner.Err() } var whiteSpaces = " \t" // ErrBadEnvVariable typed error for bad environment variable type ErrBadEnvVariable struct { msg string } func (e ErrBadEnvVariable) Error() string { return fmt.Sprintf("poorly formatted environment: %s", e.msg) } docker-1.10.3/runconfig/opts/envfile_test.go000066400000000000000000000066211267010174400210470ustar00rootroot00000000000000package opts import ( "bufio" "fmt" "io/ioutil" "os" "reflect" "strings" "testing" ) func tmpFileWithContent(content string, t *testing.T) string { tmpFile, err := ioutil.TempFile("", "envfile-test") if err != nil { t.Fatal(err) } defer tmpFile.Close() tmpFile.WriteString(content) return tmpFile.Name() } // Test ParseEnvFile for a file with a few well formatted lines func TestParseEnvFileGoodFile(t *testing.T) { content := `foo=bar baz=quux # comment _foobar=foobaz with.dots=working and_underscore=working too ` // Adding a newline + a line with pure whitespace. // This is being done like this instead of the block above // because it's common for editors to trim trailing whitespace // from lines, which becomes annoying since that's the // exact thing we need to test. content += "\n \t " tmpFile := tmpFileWithContent(content, t) defer os.Remove(tmpFile) lines, err := ParseEnvFile(tmpFile) if err != nil { t.Fatal(err) } expectedLines := []string{ "foo=bar", "baz=quux", "_foobar=foobaz", "with.dots=working", "and_underscore=working too", } if !reflect.DeepEqual(lines, expectedLines) { t.Fatal("lines not equal to expected_lines") } } // Test ParseEnvFile for an empty file func TestParseEnvFileEmptyFile(t *testing.T) { tmpFile := tmpFileWithContent("", t) defer os.Remove(tmpFile) lines, err := ParseEnvFile(tmpFile) if err != nil { t.Fatal(err) } if len(lines) != 0 { t.Fatal("lines not empty; expected empty") } } // Test ParseEnvFile for a non existent file func TestParseEnvFileNonExistentFile(t *testing.T) { _, err := ParseEnvFile("foo_bar_baz") if err == nil { t.Fatal("ParseEnvFile succeeded; expected failure") } if _, ok := err.(*os.PathError); !ok { t.Fatalf("Expected a PathError, got [%v]", err) } } // Test ParseEnvFile for a badly formatted file func TestParseEnvFileBadlyFormattedFile(t *testing.T) { content := `foo=bar f =quux ` tmpFile := tmpFileWithContent(content, t) defer os.Remove(tmpFile) _, err := ParseEnvFile(tmpFile) if err == nil { t.Fatalf("Expected a ErrBadEnvVariable, got nothing") } if _, ok := err.(ErrBadEnvVariable); !ok { t.Fatalf("Expected a ErrBadEnvVariable, got [%v]", err) } expectedMessage := "poorly formatted environment: variable 'f ' has white spaces" if err.Error() != expectedMessage { t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) } } // Test ParseEnvFile for a file with a line exceeding bufio.MaxScanTokenSize func TestParseEnvFileLineTooLongFile(t *testing.T) { content := strings.Repeat("a", bufio.MaxScanTokenSize+42) content = fmt.Sprint("foo=", content) tmpFile := tmpFileWithContent(content, t) defer os.Remove(tmpFile) _, err := ParseEnvFile(tmpFile) if err == nil { t.Fatal("ParseEnvFile succeeded; expected failure") } } // ParseEnvFile with a random file, pass through func TestParseEnvFileRandomFile(t *testing.T) { content := `first line another invalid line` tmpFile := tmpFileWithContent(content, t) defer os.Remove(tmpFile) _, err := ParseEnvFile(tmpFile) if err == nil { t.Fatalf("Expected a ErrBadEnvVariable, got nothing") } if _, ok := err.(ErrBadEnvVariable); !ok { t.Fatalf("Expected a ErrBadEnvvariable, got [%v]", err) } expectedMessage := "poorly formatted environment: variable 'first line' has white spaces" if err.Error() != expectedMessage { t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) } } docker-1.10.3/runconfig/opts/fixtures/000077500000000000000000000000001267010174400176755ustar00rootroot00000000000000docker-1.10.3/runconfig/opts/fixtures/valid.env000066400000000000000000000000141267010174400215010ustar00rootroot00000000000000ENV1=value1 docker-1.10.3/runconfig/opts/fixtures/valid.label000066400000000000000000000000161267010174400217720ustar00rootroot00000000000000LABEL1=value1 docker-1.10.3/runconfig/opts/opts.go000066400000000000000000000036411267010174400173440ustar00rootroot00000000000000package opts import ( "fmt" fopts "github.com/docker/docker/opts" "net" "os" "strings" ) // ValidateAttach validates that the specified string is a valid attach option. func ValidateAttach(val string) (string, error) { s := strings.ToLower(val) for _, str := range []string{"stdin", "stdout", "stderr"} { if s == str { return s, nil } } return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR") } // ValidateEnv validates an environment variable and returns it. // If no value is specified, it returns the current value using os.Getenv. // // As on ParseEnvFile and related to #16585, environment variable names // are not validate what so ever, it's up to application inside docker // to validate them or not. func ValidateEnv(val string) (string, error) { arr := strings.Split(val, "=") if len(arr) > 1 { return val, nil } if !doesEnvExist(val) { return val, nil } return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil } func doesEnvExist(name string) bool { for _, entry := range os.Environ() { parts := strings.SplitN(entry, "=", 2) if parts[0] == name { return true } } return false } // ValidateExtraHost validates that the specified string is a valid extrahost and returns it. // ExtraHost are in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6). func ValidateExtraHost(val string) (string, error) { // allow for IPv6 addresses in extra hosts by only splitting on first ":" arr := strings.SplitN(val, ":", 2) if len(arr) != 2 || len(arr[0]) == 0 { return "", fmt.Errorf("bad format for add-host: %q", val) } if _, err := fopts.ValidateIPAddress(arr[1]); err != nil { return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) } return val, nil } // ValidateMACAddress validates a MAC address. func ValidateMACAddress(val string) (string, error) { _, err := net.ParseMAC(strings.TrimSpace(val)) if err != nil { return "", err } return val, nil } docker-1.10.3/runconfig/opts/opts_test.go000066400000000000000000000055771267010174400204150ustar00rootroot00000000000000package opts import ( "fmt" "os" "strings" "testing" ) func TestValidateAttach(t *testing.T) { valid := []string{ "stdin", "stdout", "stderr", "STDIN", "STDOUT", "STDERR", } if _, err := ValidateAttach("invalid"); err == nil { t.Fatalf("Expected error with [valid streams are STDIN, STDOUT and STDERR], got nothing") } for _, attach := range valid { value, err := ValidateAttach(attach) if err != nil { t.Fatal(err) } if value != strings.ToLower(attach) { t.Fatalf("Expected [%v], got [%v]", attach, value) } } } func TestValidateEnv(t *testing.T) { valids := map[string]string{ "a": "a", "something": "something", "_=a": "_=a", "env1=value1": "env1=value1", "_env1=value1": "_env1=value1", "env2=value2=value3": "env2=value2=value3", "env3=abc!qwe": "env3=abc!qwe", "env_4=value 4": "env_4=value 4", "PATH": fmt.Sprintf("PATH=%v", os.Getenv("PATH")), "PATH=something": "PATH=something", "asd!qwe": "asd!qwe", "1asd": "1asd", "123": "123", "some space": "some space", " some space before": " some space before", "some space after ": "some space after ", } for value, expected := range valids { actual, err := ValidateEnv(value) if err != nil { t.Fatal(err) } if actual != expected { t.Fatalf("Expected [%v], got [%v]", expected, actual) } } } func TestValidateExtraHosts(t *testing.T) { valid := []string{ `myhost:192.168.0.1`, `thathost:10.0.2.1`, `anipv6host:2003:ab34:e::1`, `ipv6local:::1`, } invalid := map[string]string{ `myhost:192.notanipaddress.1`: `invalid IP`, `thathost-nosemicolon10.0.0.1`: `bad format`, `anipv6host:::::1`: `invalid IP`, `ipv6local:::0::`: `invalid IP`, } for _, extrahost := range valid { if _, err := ValidateExtraHost(extrahost); err != nil { t.Fatalf("ValidateExtraHost(`"+extrahost+"`) should succeed: error %v", err) } } for extraHost, expectedError := range invalid { if _, err := ValidateExtraHost(extraHost); err == nil { t.Fatalf("ValidateExtraHost(`%q`) should have failed validation", extraHost) } else { if !strings.Contains(err.Error(), expectedError) { t.Fatalf("ValidateExtraHost(`%q`) error should contain %q", extraHost, expectedError) } } } } func TestValidateMACAddress(t *testing.T) { if _, err := ValidateMACAddress(`92:d0:c6:0a:29:33`); err != nil { t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:29:33`) got %s", err) } if _, err := ValidateMACAddress(`92:d0:c6:0a:33`); err == nil { t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:33`) succeeded; expected failure on invalid MAC") } if _, err := ValidateMACAddress(`random invalid string`); err == nil { t.Fatalf("ValidateMACAddress(`random invalid string`) succeeded; expected failure on invalid MAC") } } docker-1.10.3/runconfig/opts/parse.go000066400000000000000000000647201267010174400174760ustar00rootroot00000000000000package opts import ( "bytes" "encoding/json" "fmt" "io/ioutil" "path" "strconv" "strings" "github.com/docker/docker/opts" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/signal" "github.com/docker/engine-api/types/container" networktypes "github.com/docker/engine-api/types/network" "github.com/docker/engine-api/types/strslice" "github.com/docker/go-connections/nat" "github.com/docker/go-units" ) // Parse parses the specified args for the specified command and generates a Config, // a HostConfig and returns them with the specified command. // If the specified args are not valid, it will return an error. func Parse(cmd *flag.FlagSet, args []string) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, *flag.FlagSet, error) { var ( // FIXME: use utils.ListOpts for attach and volumes? flAttach = opts.NewListOpts(ValidateAttach) flVolumes = opts.NewListOpts(nil) flTmpfs = opts.NewListOpts(nil) flBlkioWeightDevice = NewWeightdeviceOpt(ValidateWeightDevice) flDeviceReadBps = NewThrottledeviceOpt(ValidateThrottleBpsDevice) flDeviceWriteBps = NewThrottledeviceOpt(ValidateThrottleBpsDevice) flLinks = opts.NewListOpts(ValidateLink) flAliases = opts.NewListOpts(nil) flDeviceReadIOps = NewThrottledeviceOpt(ValidateThrottleIOpsDevice) flDeviceWriteIOps = NewThrottledeviceOpt(ValidateThrottleIOpsDevice) flEnv = opts.NewListOpts(ValidateEnv) flLabels = opts.NewListOpts(ValidateEnv) flDevices = opts.NewListOpts(ValidateDevice) flUlimits = NewUlimitOpt(nil) flPublish = opts.NewListOpts(nil) flExpose = opts.NewListOpts(nil) flDNS = opts.NewListOpts(opts.ValidateIPAddress) flDNSSearch = opts.NewListOpts(opts.ValidateDNSSearch) flDNSOptions = opts.NewListOpts(nil) flExtraHosts = opts.NewListOpts(ValidateExtraHost) flVolumesFrom = opts.NewListOpts(nil) flEnvFile = opts.NewListOpts(nil) flCapAdd = opts.NewListOpts(nil) flCapDrop = opts.NewListOpts(nil) flGroupAdd = opts.NewListOpts(nil) flSecurityOpt = opts.NewListOpts(nil) flLabelsFile = opts.NewListOpts(nil) flLoggingOpts = opts.NewListOpts(nil) flPrivileged = cmd.Bool([]string{"-privileged"}, false, "Give extended privileges to this container") flPidMode = cmd.String([]string{"-pid"}, "", "PID namespace to use") flUTSMode = cmd.String([]string{"-uts"}, "", "UTS namespace to use") flPublishAll = cmd.Bool([]string{"P", "-publish-all"}, false, "Publish all exposed ports to random ports") flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") flOomKillDisable = cmd.Bool([]string{"-oom-kill-disable"}, false, "Disable OOM Killer") flOomScoreAdj = cmd.Int([]string{"-oom-score-adj"}, 0, "Tune host's OOM preferences (-1000 to 1000)") flContainerIDFile = cmd.String([]string{"-cidfile"}, "", "Write the container ID to the file") flEntrypoint = cmd.String([]string{"-entrypoint"}, "", "Overwrite the default ENTRYPOINT of the image") flHostname = cmd.String([]string{"h", "-hostname"}, "", "Container host name") flMemoryString = cmd.String([]string{"m", "-memory"}, "", "Memory limit") flMemoryReservation = cmd.String([]string{"-memory-reservation"}, "", "Memory soft limit") flMemorySwap = cmd.String([]string{"-memory-swap"}, "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") flKernelMemory = cmd.String([]string{"-kernel-memory"}, "", "Kernel memory limit") flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: [:])") flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") flCPUShares = cmd.Int64([]string{"#c", "-cpu-shares"}, 0, "CPU shares (relative weight)") flCPUPeriod = cmd.Int64([]string{"-cpu-period"}, 0, "Limit CPU CFS (Completely Fair Scheduler) period") flCPUQuota = cmd.Int64([]string{"-cpu-quota"}, 0, "Limit CPU CFS (Completely Fair Scheduler) quota") flCpusetCpus = cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") flCpusetMems = cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") flBlkioWeight = cmd.Uint16([]string{"-blkio-weight"}, 0, "Block IO (relative weight), between 10 and 1000") flSwappiness = cmd.Int64([]string{"-memory-swappiness"}, -1, "Tune container memory swappiness (0 to 100)") flNetMode = cmd.String([]string{"-net"}, "default", "Connect a container to a network") flMacAddress = cmd.String([]string{"-mac-address"}, "", "Container MAC address (e.g. 92:d0:c6:0a:29:33)") flIPv4Address = cmd.String([]string{"-ip"}, "", "Container IPv4 address (e.g. 172.30.100.104)") flIPv6Address = cmd.String([]string{"-ip6"}, "", "Container IPv6 address (e.g. 2001:db8::33)") flIpcMode = cmd.String([]string{"-ipc"}, "", "IPC namespace to use") flRestartPolicy = cmd.String([]string{"-restart"}, "no", "Restart policy to apply when a container exits") flReadonlyRootfs = cmd.Bool([]string{"-read-only"}, false, "Mount the container's root filesystem as read only") flLoggingDriver = cmd.String([]string{"-log-driver"}, "", "Logging driver for container") flCgroupParent = cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container") flVolumeDriver = cmd.String([]string{"-volume-driver"}, "", "Optional volume driver for the container") flStopSignal = cmd.String([]string{"-stop-signal"}, signal.DefaultStopSignal, fmt.Sprintf("Signal to stop a container, %v by default", signal.DefaultStopSignal)) flIsolation = cmd.String([]string{"-isolation"}, "", "Container isolation level") flShmSize = cmd.String([]string{"-shm-size"}, "", "Size of /dev/shm, default value is 64MB") ) cmd.Var(&flAttach, []string{"a", "-attach"}, "Attach to STDIN, STDOUT or STDERR") cmd.Var(&flBlkioWeightDevice, []string{"-blkio-weight-device"}, "Block IO weight (relative device weight)") cmd.Var(&flDeviceReadBps, []string{"-device-read-bps"}, "Limit read rate (bytes per second) from a device") cmd.Var(&flDeviceWriteBps, []string{"-device-write-bps"}, "Limit write rate (bytes per second) to a device") cmd.Var(&flDeviceReadIOps, []string{"-device-read-iops"}, "Limit read rate (IO per second) from a device") cmd.Var(&flDeviceWriteIOps, []string{"-device-write-iops"}, "Limit write rate (IO per second) to a device") cmd.Var(&flVolumes, []string{"v", "-volume"}, "Bind mount a volume") cmd.Var(&flTmpfs, []string{"-tmpfs"}, "Mount a tmpfs directory") cmd.Var(&flLinks, []string{"-link"}, "Add link to another container") cmd.Var(&flAliases, []string{"-net-alias"}, "Add network-scoped alias for the container") cmd.Var(&flDevices, []string{"-device"}, "Add a host device to the container") cmd.Var(&flLabels, []string{"l", "-label"}, "Set meta data on a container") cmd.Var(&flLabelsFile, []string{"-label-file"}, "Read in a line delimited file of labels") cmd.Var(&flEnv, []string{"e", "-env"}, "Set environment variables") cmd.Var(&flEnvFile, []string{"-env-file"}, "Read in a file of environment variables") cmd.Var(&flPublish, []string{"p", "-publish"}, "Publish a container's port(s) to the host") cmd.Var(&flExpose, []string{"-expose"}, "Expose a port or a range of ports") cmd.Var(&flDNS, []string{"-dns"}, "Set custom DNS servers") cmd.Var(&flDNSSearch, []string{"-dns-search"}, "Set custom DNS search domains") cmd.Var(&flDNSOptions, []string{"-dns-opt"}, "Set DNS options") cmd.Var(&flExtraHosts, []string{"-add-host"}, "Add a custom host-to-IP mapping (host:ip)") cmd.Var(&flVolumesFrom, []string{"-volumes-from"}, "Mount volumes from the specified container(s)") cmd.Var(&flCapAdd, []string{"-cap-add"}, "Add Linux capabilities") cmd.Var(&flCapDrop, []string{"-cap-drop"}, "Drop Linux capabilities") cmd.Var(&flGroupAdd, []string{"-group-add"}, "Add additional groups to join") cmd.Var(&flSecurityOpt, []string{"-security-opt"}, "Security Options") cmd.Var(flUlimits, []string{"-ulimit"}, "Ulimit options") cmd.Var(&flLoggingOpts, []string{"-log-opt"}, "Log driver options") cmd.Require(flag.Min, 1) if err := cmd.ParseFlags(args, true); err != nil { return nil, nil, nil, cmd, err } var ( attachStdin = flAttach.Get("stdin") attachStdout = flAttach.Get("stdout") attachStderr = flAttach.Get("stderr") ) // Validate the input mac address if *flMacAddress != "" { if _, err := ValidateMACAddress(*flMacAddress); err != nil { return nil, nil, nil, cmd, fmt.Errorf("%s is not a valid mac address", *flMacAddress) } } if *flStdin { attachStdin = true } // If -a is not set attach to the output stdio if flAttach.Len() == 0 { attachStdout = true attachStderr = true } var err error var flMemory int64 if *flMemoryString != "" { flMemory, err = units.RAMInBytes(*flMemoryString) if err != nil { return nil, nil, nil, cmd, err } } var MemoryReservation int64 if *flMemoryReservation != "" { MemoryReservation, err = units.RAMInBytes(*flMemoryReservation) if err != nil { return nil, nil, nil, cmd, err } } var memorySwap int64 if *flMemorySwap != "" { if *flMemorySwap == "-1" { memorySwap = -1 } else { memorySwap, err = units.RAMInBytes(*flMemorySwap) if err != nil { return nil, nil, nil, cmd, err } } } var KernelMemory int64 if *flKernelMemory != "" { KernelMemory, err = units.RAMInBytes(*flKernelMemory) if err != nil { return nil, nil, nil, cmd, err } } swappiness := *flSwappiness if swappiness != -1 && (swappiness < 0 || swappiness > 100) { return nil, nil, nil, cmd, fmt.Errorf("Invalid value: %d. Valid memory swappiness range is 0-100", swappiness) } var shmSize int64 if *flShmSize != "" { shmSize, err = units.RAMInBytes(*flShmSize) if err != nil { return nil, nil, nil, cmd, err } } var binds []string // add any bind targets to the list of container volumes for bind := range flVolumes.GetMap() { if arr := volumeSplitN(bind, 2); len(arr) > 1 { // after creating the bind mount we want to delete it from the flVolumes values because // we do not want bind mounts being committed to image configs binds = append(binds, bind) flVolumes.Delete(bind) } } // Can't evaluate options passed into --tmpfs until we actually mount tmpfs := make(map[string]string) for _, t := range flTmpfs.GetAll() { if arr := strings.SplitN(t, ":", 2); len(arr) > 1 { if _, _, err := mount.ParseTmpfsOptions(arr[1]); err != nil { return nil, nil, nil, cmd, err } tmpfs[arr[0]] = arr[1] } else { tmpfs[arr[0]] = "" } } var ( parsedArgs = cmd.Args() runCmd *strslice.StrSlice entrypoint *strslice.StrSlice image = cmd.Arg(0) ) if len(parsedArgs) > 1 { runCmd = strslice.New(parsedArgs[1:]...) } if *flEntrypoint != "" { entrypoint = strslice.New(*flEntrypoint) } var ( domainname string hostname = *flHostname parts = strings.SplitN(hostname, ".", 2) ) if len(parts) > 1 { hostname = parts[0] domainname = parts[1] } ports, portBindings, err := nat.ParsePortSpecs(flPublish.GetAll()) if err != nil { return nil, nil, nil, cmd, err } // Merge in exposed ports to the map of published ports for _, e := range flExpose.GetAll() { if strings.Contains(e, ":") { return nil, nil, nil, cmd, fmt.Errorf("Invalid port format for --expose: %s", e) } //support two formats for expose, original format /[] or /[] proto, port := nat.SplitProtoPort(e) //parse the start and end port and create a sequence of ports to expose //if expose a port, the start and end port are the same start, end, err := nat.ParsePortRange(port) if err != nil { return nil, nil, nil, cmd, fmt.Errorf("Invalid range format for --expose: %s, error: %s", e, err) } for i := start; i <= end; i++ { p, err := nat.NewPort(proto, strconv.FormatUint(i, 10)) if err != nil { return nil, nil, nil, cmd, err } if _, exists := ports[p]; !exists { ports[p] = struct{}{} } } } // parse device mappings deviceMappings := []container.DeviceMapping{} for _, device := range flDevices.GetAll() { deviceMapping, err := ParseDevice(device) if err != nil { return nil, nil, nil, cmd, err } deviceMappings = append(deviceMappings, deviceMapping) } // collect all the environment variables for the container envVariables, err := readKVStrings(flEnvFile.GetAll(), flEnv.GetAll()) if err != nil { return nil, nil, nil, cmd, err } // collect all the labels for the container labels, err := readKVStrings(flLabelsFile.GetAll(), flLabels.GetAll()) if err != nil { return nil, nil, nil, cmd, err } ipcMode := container.IpcMode(*flIpcMode) if !ipcMode.Valid() { return nil, nil, nil, cmd, fmt.Errorf("--ipc: invalid IPC mode") } pidMode := container.PidMode(*flPidMode) if !pidMode.Valid() { return nil, nil, nil, cmd, fmt.Errorf("--pid: invalid PID mode") } utsMode := container.UTSMode(*flUTSMode) if !utsMode.Valid() { return nil, nil, nil, cmd, fmt.Errorf("--uts: invalid UTS mode") } restartPolicy, err := ParseRestartPolicy(*flRestartPolicy) if err != nil { return nil, nil, nil, cmd, err } loggingOpts, err := parseLoggingOpts(*flLoggingDriver, flLoggingOpts.GetAll()) if err != nil { return nil, nil, nil, cmd, err } securityOpts, err := parseSecurityOpts(flSecurityOpt.GetAll()) if err != nil { return nil, nil, nil, cmd, err } resources := container.Resources{ CgroupParent: *flCgroupParent, Memory: flMemory, MemoryReservation: MemoryReservation, MemorySwap: memorySwap, MemorySwappiness: flSwappiness, KernelMemory: KernelMemory, OomKillDisable: flOomKillDisable, CPUShares: *flCPUShares, CPUPeriod: *flCPUPeriod, CpusetCpus: *flCpusetCpus, CpusetMems: *flCpusetMems, CPUQuota: *flCPUQuota, BlkioWeight: *flBlkioWeight, BlkioWeightDevice: flBlkioWeightDevice.GetList(), BlkioDeviceReadBps: flDeviceReadBps.GetList(), BlkioDeviceWriteBps: flDeviceWriteBps.GetList(), BlkioDeviceReadIOps: flDeviceReadIOps.GetList(), BlkioDeviceWriteIOps: flDeviceWriteIOps.GetList(), Ulimits: flUlimits.GetList(), Devices: deviceMappings, } config := &container.Config{ Hostname: hostname, Domainname: domainname, ExposedPorts: ports, User: *flUser, Tty: *flTty, // TODO: deprecated, it comes from -n, --networking // it's still needed internally to set the network to disabled // if e.g. bridge is none in daemon opts, and in inspect NetworkDisabled: false, OpenStdin: *flStdin, AttachStdin: attachStdin, AttachStdout: attachStdout, AttachStderr: attachStderr, Env: envVariables, Cmd: runCmd, Image: image, Volumes: flVolumes.GetMap(), MacAddress: *flMacAddress, Entrypoint: entrypoint, WorkingDir: *flWorkingDir, Labels: ConvertKVStringsToMap(labels), StopSignal: *flStopSignal, } hostConfig := &container.HostConfig{ Binds: binds, ContainerIDFile: *flContainerIDFile, OomScoreAdj: *flOomScoreAdj, Privileged: *flPrivileged, PortBindings: portBindings, Links: flLinks.GetAll(), PublishAllPorts: *flPublishAll, // Make sure the dns fields are never nil. // New containers don't ever have those fields nil, // but pre created containers can still have those nil values. // See https://github.com/docker/docker/pull/17779 // for a more detailed explanation on why we don't want that. DNS: flDNS.GetAllOrEmpty(), DNSSearch: flDNSSearch.GetAllOrEmpty(), DNSOptions: flDNSOptions.GetAllOrEmpty(), ExtraHosts: flExtraHosts.GetAll(), VolumesFrom: flVolumesFrom.GetAll(), NetworkMode: container.NetworkMode(*flNetMode), IpcMode: ipcMode, PidMode: pidMode, UTSMode: utsMode, CapAdd: strslice.New(flCapAdd.GetAll()...), CapDrop: strslice.New(flCapDrop.GetAll()...), GroupAdd: flGroupAdd.GetAll(), RestartPolicy: restartPolicy, SecurityOpt: securityOpts, ReadonlyRootfs: *flReadonlyRootfs, LogConfig: container.LogConfig{Type: *flLoggingDriver, Config: loggingOpts}, VolumeDriver: *flVolumeDriver, Isolation: container.IsolationLevel(*flIsolation), ShmSize: shmSize, Resources: resources, Tmpfs: tmpfs, } // When allocating stdin in attached mode, close stdin at client disconnect if config.OpenStdin && config.AttachStdin { config.StdinOnce = true } networkingConfig := &networktypes.NetworkingConfig{ EndpointsConfig: make(map[string]*networktypes.EndpointSettings), } if *flIPv4Address != "" || *flIPv6Address != "" { networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = &networktypes.EndpointSettings{ IPAMConfig: &networktypes.EndpointIPAMConfig{ IPv4Address: *flIPv4Address, IPv6Address: *flIPv6Address, }, } } if hostConfig.NetworkMode.IsUserDefined() && len(hostConfig.Links) > 0 { epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] if epConfig == nil { epConfig = &networktypes.EndpointSettings{} } epConfig.Links = make([]string, len(hostConfig.Links)) copy(epConfig.Links, hostConfig.Links) networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig } if flAliases.Len() > 0 { epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] if epConfig == nil { epConfig = &networktypes.EndpointSettings{} } epConfig.Aliases = make([]string, flAliases.Len()) copy(epConfig.Aliases, flAliases.GetAll()) networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig } return config, hostConfig, networkingConfig, cmd, nil } // reads a file of line terminated key=value pairs and override that with override parameter func readKVStrings(files []string, override []string) ([]string, error) { envVariables := []string{} for _, ef := range files { parsedVars, err := ParseEnvFile(ef) if err != nil { return nil, err } envVariables = append(envVariables, parsedVars...) } // parse the '-e' and '--env' after, to allow override envVariables = append(envVariables, override...) return envVariables, nil } // ConvertKVStringsToMap converts ["key=value"] to {"key":"value"} func ConvertKVStringsToMap(values []string) map[string]string { result := make(map[string]string, len(values)) for _, value := range values { kv := strings.SplitN(value, "=", 2) if len(kv) == 1 { result[kv[0]] = "" } else { result[kv[0]] = kv[1] } } return result } func parseLoggingOpts(loggingDriver string, loggingOpts []string) (map[string]string, error) { loggingOptsMap := ConvertKVStringsToMap(loggingOpts) if loggingDriver == "none" && len(loggingOpts) > 0 { return map[string]string{}, fmt.Errorf("Invalid logging opts for driver %s", loggingDriver) } return loggingOptsMap, nil } // takes a local seccomp daemon, reads the file contents for sending to the daemon func parseSecurityOpts(securityOpts []string) ([]string, error) { for key, opt := range securityOpts { con := strings.SplitN(opt, ":", 2) if len(con) == 1 { return securityOpts, fmt.Errorf("Invalid --security-opt: %q", opt) } if con[0] == "seccomp" && con[1] != "unconfined" { f, err := ioutil.ReadFile(con[1]) if err != nil { return securityOpts, fmt.Errorf("Opening seccomp profile (%s) failed: %v", con[1], err) } b := bytes.NewBuffer(nil) if err := json.Compact(b, f); err != nil { return securityOpts, fmt.Errorf("Compacting json for seccomp profile (%s) failed: %v", con[1], err) } securityOpts[key] = fmt.Sprintf("seccomp:%s", b.Bytes()) } } return securityOpts, nil } // ParseRestartPolicy returns the parsed policy or an error indicating what is incorrect func ParseRestartPolicy(policy string) (container.RestartPolicy, error) { p := container.RestartPolicy{} if policy == "" { return p, nil } var ( parts = strings.Split(policy, ":") name = parts[0] ) p.Name = name switch name { case "always", "unless-stopped": if len(parts) > 1 { return p, fmt.Errorf("maximum restart count not valid with restart policy of \"%s\"", name) } case "no": // do nothing case "on-failure": if len(parts) > 2 { return p, fmt.Errorf("restart count format is not valid, usage: 'on-failure:N' or 'on-failure'") } if len(parts) == 2 { count, err := strconv.Atoi(parts[1]) if err != nil { return p, err } p.MaximumRetryCount = count } default: return p, fmt.Errorf("invalid restart policy %s", name) } return p, nil } // ParseDevice parses a device mapping string to a container.DeviceMapping struct func ParseDevice(device string) (container.DeviceMapping, error) { src := "" dst := "" permissions := "rwm" arr := strings.Split(device, ":") switch len(arr) { case 3: permissions = arr[2] fallthrough case 2: if ValidDeviceMode(arr[1]) { permissions = arr[1] } else { dst = arr[1] } fallthrough case 1: src = arr[0] default: return container.DeviceMapping{}, fmt.Errorf("Invalid device specification: %s", device) } if dst == "" { dst = src } deviceMapping := container.DeviceMapping{ PathOnHost: src, PathInContainer: dst, CgroupPermissions: permissions, } return deviceMapping, nil } // ParseLink parses and validates the specified string as a link format (name:alias) func ParseLink(val string) (string, string, error) { if val == "" { return "", "", fmt.Errorf("empty string specified for links") } arr := strings.Split(val, ":") if len(arr) > 2 { return "", "", fmt.Errorf("bad format for links: %s", val) } if len(arr) == 1 { return val, val, nil } // This is kept because we can actually get an HostConfig with links // from an already created container and the format is not `foo:bar` // but `/foo:/c1/bar` if strings.HasPrefix(arr[0], "/") { _, alias := path.Split(arr[1]) return arr[0][1:], alias, nil } return arr[0], arr[1], nil } // ValidateLink validates that the specified string has a valid link format (containerName:alias). func ValidateLink(val string) (string, error) { if _, _, err := ParseLink(val); err != nil { return val, err } return val, nil } // ValidDeviceMode checks if the mode for device is valid or not. // Valid mode is a composition of r (read), w (write), and m (mknod). func ValidDeviceMode(mode string) bool { var legalDeviceMode = map[rune]bool{ 'r': true, 'w': true, 'm': true, } if mode == "" { return false } for _, c := range mode { if !legalDeviceMode[c] { return false } legalDeviceMode[c] = false } return true } // ValidateDevice validates a path for devices // It will make sure 'val' is in the form: // [host-dir:]container-path[:mode] // It also validates the device mode. func ValidateDevice(val string) (string, error) { return validatePath(val, ValidDeviceMode) } func validatePath(val string, validator func(string) bool) (string, error) { var containerPath string var mode string if strings.Count(val, ":") > 2 { return val, fmt.Errorf("bad format for path: %s", val) } split := strings.SplitN(val, ":", 3) if split[0] == "" { return val, fmt.Errorf("bad format for path: %s", val) } switch len(split) { case 1: containerPath = split[0] val = path.Clean(containerPath) case 2: if isValid := validator(split[1]); isValid { containerPath = split[0] mode = split[1] val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode) } else { containerPath = split[1] val = fmt.Sprintf("%s:%s", split[0], path.Clean(containerPath)) } case 3: containerPath = split[1] mode = split[2] if isValid := validator(split[2]); !isValid { return val, fmt.Errorf("bad mode specified: %s", mode) } val = fmt.Sprintf("%s:%s:%s", split[0], containerPath, mode) } if !path.IsAbs(containerPath) { return val, fmt.Errorf("%s is not an absolute path", containerPath) } return val, nil } // SplitN splits raw into a maximum of n parts, separated by a separator colon. // A separator colon is the last `:` character in the regex `[/:\\]?[a-zA-Z]:` (note `\\` is `\` escaped). // This allows to correctly split strings such as `C:\foo:D:\:rw`. func volumeSplitN(raw string, n int) []string { var array []string if len(raw) == 0 || raw[0] == ':' { // invalid return nil } // numberOfParts counts the number of parts separated by a separator colon numberOfParts := 0 // left represents the left-most cursor in raw, updated at every `:` character considered as a separator. left := 0 // right represents the right-most cursor in raw incremented with the loop. Note this // starts at index 1 as index 0 is already handle above as a special case. for right := 1; right < len(raw); right++ { // stop parsing if reached maximum number of parts if n >= 0 && numberOfParts >= n { break } if raw[right] != ':' { continue } potentialDriveLetter := raw[right-1] if (potentialDriveLetter >= 'A' && potentialDriveLetter <= 'Z') || (potentialDriveLetter >= 'a' && potentialDriveLetter <= 'z') { if right > 1 { beforePotentialDriveLetter := raw[right-2] if beforePotentialDriveLetter != ':' && beforePotentialDriveLetter != '/' && beforePotentialDriveLetter != '\\' { // e.g. `C:` is not preceded by any delimiter, therefore it was not a drive letter but a path ending with `C:`. array = append(array, raw[left:right]) left = right + 1 numberOfParts++ } // else, `C:` is considered as a drive letter and not as a delimiter, so we continue parsing. } // if right == 1, then `C:` is the beginning of the raw string, therefore `:` is again not considered a delimiter and we continue parsing. } else { // if `:` is not preceded by a potential drive letter, then consider it as a delimiter. array = append(array, raw[left:right]) left = right + 1 numberOfParts++ } } // need to take care of the last part if left < len(raw) { if n >= 0 && numberOfParts >= n { // if the maximum number of parts is reached, just append the rest to the last part // left-1 is at the last `:` that needs to be included since not considered a separator. array[n-1] += raw[left-1:] } else { array = append(array, raw[left:]) } } return array } docker-1.10.3/runconfig/opts/parse_test.go000066400000000000000000001020431267010174400205240ustar00rootroot00000000000000package opts import ( "bytes" "encoding/json" "fmt" "io/ioutil" "os" "runtime" "strings" "testing" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/runconfig" "github.com/docker/engine-api/types/container" networktypes "github.com/docker/engine-api/types/network" "github.com/docker/go-connections/nat" ) func parseRun(args []string) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, *flag.FlagSet, error) { cmd := flag.NewFlagSet("run", flag.ContinueOnError) cmd.SetOutput(ioutil.Discard) cmd.Usage = nil return Parse(cmd, args) } func parse(t *testing.T, args string) (*container.Config, *container.HostConfig, error) { config, hostConfig, _, _, err := parseRun(strings.Split(args+" ubuntu bash", " ")) return config, hostConfig, err } func mustParse(t *testing.T, args string) (*container.Config, *container.HostConfig) { config, hostConfig, err := parse(t, args) if err != nil { t.Fatal(err) } return config, hostConfig } func TestParseRunLinks(t *testing.T) { if _, hostConfig := mustParse(t, "--link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" { t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links) } if _, hostConfig := mustParse(t, "--link a:b --link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" { t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links) } if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 { t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links) } } func TestParseRunAttach(t *testing.T) { if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr { t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) } if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr { t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) } if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) } if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr { t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) } if config, _ := mustParse(t, "-i"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { t.Fatalf("Error parsing attach flags. Expect Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) } if _, _, err := parse(t, "-a"); err == nil { t.Fatalf("Error parsing attach flags, `-a` should be an error but is not") } if _, _, err := parse(t, "-a invalid"); err == nil { t.Fatalf("Error parsing attach flags, `-a invalid` should be an error but is not") } if _, _, err := parse(t, "-a invalid -a stdout"); err == nil { t.Fatalf("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not") } if _, _, err := parse(t, "-a stdout -a stderr -d"); err == nil { t.Fatalf("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not") } if _, _, err := parse(t, "-a stdin -d"); err == nil { t.Fatalf("Error parsing attach flags, `-a stdin -d` should be an error but is not") } if _, _, err := parse(t, "-a stdout -d"); err == nil { t.Fatalf("Error parsing attach flags, `-a stdout -d` should be an error but is not") } if _, _, err := parse(t, "-a stderr -d"); err == nil { t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not") } if _, _, err := parse(t, "-d --rm"); err == nil { t.Fatalf("Error parsing attach flags, `-d --rm` should be an error but is not") } } func TestParseRunVolumes(t *testing.T) { // A single volume arr, tryit := setupPlatformVolume([]string{`/tmp`}, []string{`c:\tmp`}) if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil { t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds) } else if _, exists := config.Volumes[arr[0]]; !exists { t.Fatalf("Error parsing volume flags, %q is missing from volumes. Received %v", tryit, config.Volumes) } // Two volumes arr, tryit = setupPlatformVolume([]string{`/tmp`, `/var`}, []string{`c:\tmp`, `c:\var`}) if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil { t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds) } else if _, exists := config.Volumes[arr[0]]; !exists { t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[0], config.Volumes) } else if _, exists := config.Volumes[arr[1]]; !exists { t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[1], config.Volumes) } // A single bind-mount arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`}) if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || hostConfig.Binds[0] != arr[0] { t.Fatalf("Error parsing volume flags, %q should mount-bind the path before the colon into the path after the colon. Received %v %v", arr[0], hostConfig.Binds, config.Volumes) } // Two bind-mounts. arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/hostVar:/containerVar`}, []string{os.Getenv("ProgramData") + `:c:\ContainerPD`, os.Getenv("TEMP") + `:c:\containerTmp`}) if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) } // Two bind-mounts, first read-only, second read-write. // TODO Windows: The Windows version uses read-write as that's the only mode it supports. Can change this post TP4 arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:ro`, `/hostVar:/containerVar:rw`}, []string{os.Getenv("TEMP") + `:c:\containerTmp:rw`, os.Getenv("ProgramData") + `:c:\ContainerPD:rw`}) if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) } // Similar to previous test but with alternate modes which are only supported by Linux if runtime.GOOS != "windows" { arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:ro,Z`, `/hostVar:/containerVar:rw,Z`}, []string{}) if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) } arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:Z`, `/hostVar:/containerVar:z`}, []string{}) if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) } } // One bind mount and one volume arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/containerVar`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`, `c:\containerTmp`}) if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] { t.Fatalf("Error parsing volume flags, %s and %s should only one and only one bind mount %s. Received %s", arr[0], arr[1], arr[0], hostConfig.Binds) } else if _, exists := config.Volumes[arr[1]]; !exists { t.Fatalf("Error parsing volume flags %s and %s. %s is missing from volumes. Received %v", arr[0], arr[1], arr[1], config.Volumes) } // Root to non-c: drive letter (Windows specific) if runtime.GOOS == "windows" { arr, tryit = setupPlatformVolume([]string{}, []string{os.Getenv("SystemDrive") + `\:d:`}) if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] || len(config.Volumes) != 0 { t.Fatalf("Error parsing %s. Should have a single bind mount and no volumes", arr[0]) } } } // This tests the cases for binds which are generated through // DecodeContainerConfig rather than Parse() func TestDecodeContainerConfigVolumes(t *testing.T) { // Root to root bindsOrVols, _ := setupPlatformVolume([]string{`/:/`}, []string{os.Getenv("SystemDrive") + `\:c:\`}) if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { t.Fatalf("binds %v should have failed", bindsOrVols) } if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { t.Fatalf("volume %v should have failed", bindsOrVols) } // No destination path bindsOrVols, _ = setupPlatformVolume([]string{`/tmp:`}, []string{os.Getenv("TEMP") + `\:`}) if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { t.Fatalf("binds %v should have failed", bindsOrVols) } if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { t.Fatalf("binds %v should have failed", bindsOrVols) } // // No destination path or mode bindsOrVols, _ = setupPlatformVolume([]string{`/tmp::`}, []string{os.Getenv("TEMP") + `\::`}) if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { t.Fatalf("binds %v should have failed", bindsOrVols) } if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { t.Fatalf("binds %v should have failed", bindsOrVols) } // A whole lot of nothing bindsOrVols = []string{`:`} if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { t.Fatalf("binds %v should have failed", bindsOrVols) } if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { t.Fatalf("binds %v should have failed", bindsOrVols) } // A whole lot of nothing with no mode bindsOrVols = []string{`::`} if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { t.Fatalf("binds %v should have failed", bindsOrVols) } if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { t.Fatalf("binds %v should have failed", bindsOrVols) } // Too much including an invalid mode wTmp := os.Getenv("TEMP") bindsOrVols, _ = setupPlatformVolume([]string{`/tmp:/tmp:/tmp:/tmp`}, []string{wTmp + ":" + wTmp + ":" + wTmp + ":" + wTmp}) if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { t.Fatalf("binds %v should have failed", bindsOrVols) } if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { t.Fatalf("binds %v should have failed", bindsOrVols) } // Windows specific error tests if runtime.GOOS == "windows" { // Volume which does not include a drive letter bindsOrVols = []string{`\tmp`} if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { t.Fatalf("binds %v should have failed", bindsOrVols) } if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { t.Fatalf("binds %v should have failed", bindsOrVols) } // Root to C-Drive bindsOrVols = []string{os.Getenv("SystemDrive") + `\:c:`} if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { t.Fatalf("binds %v should have failed", bindsOrVols) } if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { t.Fatalf("binds %v should have failed", bindsOrVols) } // Container path that does not include a drive letter bindsOrVols = []string{`c:\windows:\somewhere`} if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { t.Fatalf("binds %v should have failed", bindsOrVols) } if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { t.Fatalf("binds %v should have failed", bindsOrVols) } } // Linux-specific error tests if runtime.GOOS != "windows" { // Just root bindsOrVols = []string{`/`} if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { t.Fatalf("binds %v should have failed", bindsOrVols) } if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { t.Fatalf("binds %v should have failed", bindsOrVols) } // A single volume that looks like a bind mount passed in Volumes. // This should be handled as a bind mount, not a volume. vols := []string{`/foo:/bar`} if config, hostConfig, err := callDecodeContainerConfig(vols, nil); err != nil { t.Fatal("Volume /foo:/bar should have succeeded as a volume name") } else if hostConfig.Binds != nil { t.Fatalf("Error parsing volume flags, /foo:/bar should not mount-bind anything. Received %v", hostConfig.Binds) } else if _, exists := config.Volumes[vols[0]]; !exists { t.Fatalf("Error parsing volume flags, /foo:/bar is missing from volumes. Received %v", config.Volumes) } } } // callDecodeContainerConfig is a utility function used by TestDecodeContainerConfigVolumes // to call DecodeContainerConfig. It effectively does what a client would // do when calling the daemon by constructing a JSON stream of a // ContainerConfigWrapper which is populated by the set of volume specs // passed into it. It returns a config and a hostconfig which can be // validated to ensure DecodeContainerConfig has manipulated the structures // correctly. func callDecodeContainerConfig(volumes []string, binds []string) (*container.Config, *container.HostConfig, error) { var ( b []byte err error c *container.Config h *container.HostConfig ) w := runconfig.ContainerConfigWrapper{ Config: &container.Config{ Volumes: map[string]struct{}{}, }, HostConfig: &container.HostConfig{ NetworkMode: "none", Binds: binds, }, } for _, v := range volumes { w.Config.Volumes[v] = struct{}{} } if b, err = json.Marshal(w); err != nil { return nil, nil, fmt.Errorf("Error on marshal %s", err.Error()) } c, h, _, err = runconfig.DecodeContainerConfig(bytes.NewReader(b)) if err != nil { return nil, nil, fmt.Errorf("Error parsing %s: %v", string(b), err) } if c == nil || h == nil { return nil, nil, fmt.Errorf("Empty config or hostconfig") } return c, h, err } // check if (a == c && b == d) || (a == d && b == c) // because maps are randomized func compareRandomizedStrings(a, b, c, d string) error { if a == c && b == d { return nil } if a == d && b == c { return nil } return fmt.Errorf("strings don't match") } // setupPlatformVolume takes two arrays of volume specs - a Unix style // spec and a Windows style spec. Depending on the platform being unit tested, // it returns one of them, along with a volume string that would be passed // on the docker CLI (eg -v /bar -v /foo). func setupPlatformVolume(u []string, w []string) ([]string, string) { var a []string if runtime.GOOS == "windows" { a = w } else { a = u } s := "" for _, v := range a { s = s + "-v " + v + " " } return a, s } // Simple parse with MacAddress validation func TestParseWithMacAddress(t *testing.T) { invalidMacAddress := "--mac-address=invalidMacAddress" validMacAddress := "--mac-address=92:d0:c6:0a:29:33" if _, _, _, _, err := parseRun([]string{invalidMacAddress, "img", "cmd"}); err != nil && err.Error() != "invalidMacAddress is not a valid mac address" { t.Fatalf("Expected an error with %v mac-address, got %v", invalidMacAddress, err) } if config, _ := mustParse(t, validMacAddress); config.MacAddress != "92:d0:c6:0a:29:33" { t.Fatalf("Expected the config to have '92:d0:c6:0a:29:33' as MacAddress, got '%v'", config.MacAddress) } } func TestParseWithMemory(t *testing.T) { invalidMemory := "--memory=invalid" validMemory := "--memory=1G" if _, _, _, _, err := parseRun([]string{invalidMemory, "img", "cmd"}); err != nil && err.Error() != "invalid size: 'invalid'" { t.Fatalf("Expected an error with '%v' Memory, got '%v'", invalidMemory, err) } if _, hostconfig := mustParse(t, validMemory); hostconfig.Memory != 1073741824 { t.Fatalf("Expected the config to have '1G' as Memory, got '%v'", hostconfig.Memory) } } func TestParseWithMemorySwap(t *testing.T) { invalidMemory := "--memory-swap=invalid" validMemory := "--memory-swap=1G" anotherValidMemory := "--memory-swap=-1" if _, _, _, _, err := parseRun([]string{invalidMemory, "img", "cmd"}); err == nil || err.Error() != "invalid size: 'invalid'" { t.Fatalf("Expected an error with '%v' MemorySwap, got '%v'", invalidMemory, err) } if _, hostconfig := mustParse(t, validMemory); hostconfig.MemorySwap != 1073741824 { t.Fatalf("Expected the config to have '1073741824' as MemorySwap, got '%v'", hostconfig.MemorySwap) } if _, hostconfig := mustParse(t, anotherValidMemory); hostconfig.MemorySwap != -1 { t.Fatalf("Expected the config to have '-1' as MemorySwap, got '%v'", hostconfig.MemorySwap) } } func TestParseHostname(t *testing.T) { hostname := "--hostname=hostname" hostnameWithDomain := "--hostname=hostname.domainname" hostnameWithDomainTld := "--hostname=hostname.domainname.tld" if config, _ := mustParse(t, hostname); config.Hostname != "hostname" && config.Domainname != "" { t.Fatalf("Expected the config to have 'hostname' as hostname, got '%v'", config.Hostname) } if config, _ := mustParse(t, hostnameWithDomain); config.Hostname != "hostname" && config.Domainname != "domainname" { t.Fatalf("Expected the config to have 'hostname' as hostname, got '%v'", config.Hostname) } if config, _ := mustParse(t, hostnameWithDomainTld); config.Hostname != "hostname" && config.Domainname != "domainname.tld" { t.Fatalf("Expected the config to have 'hostname' as hostname, got '%v'", config.Hostname) } } func TestParseWithExpose(t *testing.T) { invalids := map[string]string{ ":": "Invalid port format for --expose: :", "8080:9090": "Invalid port format for --expose: 8080:9090", "/tcp": "Invalid range format for --expose: /tcp, error: Empty string specified for ports.", "/udp": "Invalid range format for --expose: /udp, error: Empty string specified for ports.", "NaN/tcp": `Invalid range format for --expose: NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, "NaN-NaN/tcp": `Invalid range format for --expose: NaN-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, "8080-NaN/tcp": `Invalid range format for --expose: 8080-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, "1234567890-8080/tcp": `Invalid range format for --expose: 1234567890-8080/tcp, error: strconv.ParseUint: parsing "1234567890": value out of range`, } valids := map[string][]nat.Port{ "8080/tcp": {"8080/tcp"}, "8080/udp": {"8080/udp"}, "8080/ncp": {"8080/ncp"}, "8080-8080/udp": {"8080/udp"}, "8080-8082/tcp": {"8080/tcp", "8081/tcp", "8082/tcp"}, } for expose, expectedError := range invalids { if _, _, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}); err == nil || err.Error() != expectedError { t.Fatalf("Expected error '%v' with '--expose=%v', got '%v'", expectedError, expose, err) } } for expose, exposedPorts := range valids { config, _, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}) if err != nil { t.Fatal(err) } if len(config.ExposedPorts) != len(exposedPorts) { t.Fatalf("Expected %v exposed port, got %v", len(exposedPorts), len(config.ExposedPorts)) } for _, port := range exposedPorts { if _, ok := config.ExposedPorts[port]; !ok { t.Fatalf("Expected %v, got %v", exposedPorts, config.ExposedPorts) } } } // Merge with actual published port config, _, _, _, err := parseRun([]string{"--publish=80", "--expose=80-81/tcp", "img", "cmd"}) if err != nil { t.Fatal(err) } if len(config.ExposedPorts) != 2 { t.Fatalf("Expected 2 exposed ports, got %v", config.ExposedPorts) } ports := []nat.Port{"80/tcp", "81/tcp"} for _, port := range ports { if _, ok := config.ExposedPorts[port]; !ok { t.Fatalf("Expected %v, got %v", ports, config.ExposedPorts) } } } func TestParseDevice(t *testing.T) { valids := map[string]container.DeviceMapping{ "/dev/snd": { PathOnHost: "/dev/snd", PathInContainer: "/dev/snd", CgroupPermissions: "rwm", }, "/dev/snd:rw": { PathOnHost: "/dev/snd", PathInContainer: "/dev/snd", CgroupPermissions: "rw", }, "/dev/snd:/something": { PathOnHost: "/dev/snd", PathInContainer: "/something", CgroupPermissions: "rwm", }, "/dev/snd:/something:rw": { PathOnHost: "/dev/snd", PathInContainer: "/something", CgroupPermissions: "rw", }, } for device, deviceMapping := range valids { _, hostconfig, _, _, err := parseRun([]string{fmt.Sprintf("--device=%v", device), "img", "cmd"}) if err != nil { t.Fatal(err) } if len(hostconfig.Devices) != 1 { t.Fatalf("Expected 1 devices, got %v", hostconfig.Devices) } if hostconfig.Devices[0] != deviceMapping { t.Fatalf("Expected %v, got %v", deviceMapping, hostconfig.Devices) } } } func TestParseModes(t *testing.T) { // ipc ko if _, _, _, _, err := parseRun([]string{"--ipc=container:", "img", "cmd"}); err == nil || err.Error() != "--ipc: invalid IPC mode" { t.Fatalf("Expected an error with message '--ipc: invalid IPC mode', got %v", err) } // ipc ok _, hostconfig, _, _, err := parseRun([]string{"--ipc=host", "img", "cmd"}) if err != nil { t.Fatal(err) } if !hostconfig.IpcMode.Valid() { t.Fatalf("Expected a valid IpcMode, got %v", hostconfig.IpcMode) } // pid ko if _, _, _, _, err := parseRun([]string{"--pid=container:", "img", "cmd"}); err == nil || err.Error() != "--pid: invalid PID mode" { t.Fatalf("Expected an error with message '--pid: invalid PID mode', got %v", err) } // pid ok _, hostconfig, _, _, err = parseRun([]string{"--pid=host", "img", "cmd"}) if err != nil { t.Fatal(err) } if !hostconfig.PidMode.Valid() { t.Fatalf("Expected a valid PidMode, got %v", hostconfig.PidMode) } // uts ko if _, _, _, _, err := parseRun([]string{"--uts=container:", "img", "cmd"}); err == nil || err.Error() != "--uts: invalid UTS mode" { t.Fatalf("Expected an error with message '--uts: invalid UTS mode', got %v", err) } // uts ok _, hostconfig, _, _, err = parseRun([]string{"--uts=host", "img", "cmd"}) if err != nil { t.Fatal(err) } if !hostconfig.UTSMode.Valid() { t.Fatalf("Expected a valid UTSMode, got %v", hostconfig.UTSMode) } // shm-size ko if _, _, _, _, err = parseRun([]string{"--shm-size=a128m", "img", "cmd"}); err == nil || err.Error() != "invalid size: 'a128m'" { t.Fatalf("Expected an error with message 'invalid size: a128m', got %v", err) } // shm-size ok _, hostconfig, _, _, err = parseRun([]string{"--shm-size=128m", "img", "cmd"}) if err != nil { t.Fatal(err) } if hostconfig.ShmSize != 134217728 { t.Fatalf("Expected a valid ShmSize, got %d", hostconfig.ShmSize) } } func TestParseRestartPolicy(t *testing.T) { invalids := map[string]string{ "something": "invalid restart policy something", "always:2": "maximum restart count not valid with restart policy of \"always\"", "always:2:3": "maximum restart count not valid with restart policy of \"always\"", "on-failure:invalid": `strconv.ParseInt: parsing "invalid": invalid syntax`, "on-failure:2:5": "restart count format is not valid, usage: 'on-failure:N' or 'on-failure'", } valids := map[string]container.RestartPolicy{ "": {}, "always": { Name: "always", MaximumRetryCount: 0, }, "on-failure:1": { Name: "on-failure", MaximumRetryCount: 1, }, } for restart, expectedError := range invalids { if _, _, _, _, err := parseRun([]string{fmt.Sprintf("--restart=%s", restart), "img", "cmd"}); err == nil || err.Error() != expectedError { t.Fatalf("Expected an error with message '%v' for %v, got %v", expectedError, restart, err) } } for restart, expected := range valids { _, hostconfig, _, _, err := parseRun([]string{fmt.Sprintf("--restart=%v", restart), "img", "cmd"}) if err != nil { t.Fatal(err) } if hostconfig.RestartPolicy != expected { t.Fatalf("Expected %v, got %v", expected, hostconfig.RestartPolicy) } } } func TestParseLoggingOpts(t *testing.T) { // logging opts ko if _, _, _, _, err := parseRun([]string{"--log-driver=none", "--log-opt=anything", "img", "cmd"}); err == nil || err.Error() != "Invalid logging opts for driver none" { t.Fatalf("Expected an error with message 'Invalid logging opts for driver none', got %v", err) } // logging opts ok _, hostconfig, _, _, err := parseRun([]string{"--log-driver=syslog", "--log-opt=something", "img", "cmd"}) if err != nil { t.Fatal(err) } if hostconfig.LogConfig.Type != "syslog" || len(hostconfig.LogConfig.Config) != 1 { t.Fatalf("Expected a 'syslog' LogConfig with one config, got %v", hostconfig.RestartPolicy) } } func TestParseEnvfileVariables(t *testing.T) { e := "open nonexistent: no such file or directory" if runtime.GOOS == "windows" { e = "open nonexistent: The system cannot find the file specified." } // env ko if _, _, _, _, err := parseRun([]string{"--env-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e { t.Fatalf("Expected an error with message '%s', got %v", e, err) } // env ok config, _, _, _, err := parseRun([]string{"--env-file=fixtures/valid.env", "img", "cmd"}) if err != nil { t.Fatal(err) } if len(config.Env) != 1 || config.Env[0] != "ENV1=value1" { t.Fatalf("Expected a a config with [ENV1=value1], got %v", config.Env) } config, _, _, _, err = parseRun([]string{"--env-file=fixtures/valid.env", "--env=ENV2=value2", "img", "cmd"}) if err != nil { t.Fatal(err) } if len(config.Env) != 2 || config.Env[0] != "ENV1=value1" || config.Env[1] != "ENV2=value2" { t.Fatalf("Expected a a config with [ENV1=value1 ENV2=value2], got %v", config.Env) } } func TestParseLabelfileVariables(t *testing.T) { e := "open nonexistent: no such file or directory" if runtime.GOOS == "windows" { e = "open nonexistent: The system cannot find the file specified." } // label ko if _, _, _, _, err := parseRun([]string{"--label-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e { t.Fatalf("Expected an error with message '%s', got %v", e, err) } // label ok config, _, _, _, err := parseRun([]string{"--label-file=fixtures/valid.label", "img", "cmd"}) if err != nil { t.Fatal(err) } if len(config.Labels) != 1 || config.Labels["LABEL1"] != "value1" { t.Fatalf("Expected a a config with [LABEL1:value1], got %v", config.Labels) } config, _, _, _, err = parseRun([]string{"--label-file=fixtures/valid.label", "--label=LABEL2=value2", "img", "cmd"}) if err != nil { t.Fatal(err) } if len(config.Labels) != 2 || config.Labels["LABEL1"] != "value1" || config.Labels["LABEL2"] != "value2" { t.Fatalf("Expected a a config with [LABEL1:value1 LABEL2:value2], got %v", config.Labels) } } func TestParseEntryPoint(t *testing.T) { config, _, _, _, err := parseRun([]string{"--entrypoint=anything", "cmd", "img"}) if err != nil { t.Fatal(err) } if config.Entrypoint.Len() != 1 && config.Entrypoint.Slice()[0] != "anything" { t.Fatalf("Expected entrypoint 'anything', got %v", config.Entrypoint) } } func TestValidateLink(t *testing.T) { valid := []string{ "name", "dcdfbe62ecd0:alias", "7a67485460b7642516a4ad82ecefe7f57d0c4916f530561b71a50a3f9c4e33da", "angry_torvalds:linus", } invalid := map[string]string{ "": "empty string specified for links", "too:much:of:it": "bad format for links: too:much:of:it", } for _, link := range valid { if _, err := ValidateLink(link); err != nil { t.Fatalf("ValidateLink(`%q`) should succeed: error %q", link, err) } } for link, expectedError := range invalid { if _, err := ValidateLink(link); err == nil { t.Fatalf("ValidateLink(`%q`) should have failed validation", link) } else { if !strings.Contains(err.Error(), expectedError) { t.Fatalf("ValidateLink(`%q`) error should contain %q", link, expectedError) } } } } func TestParseLink(t *testing.T) { name, alias, err := ParseLink("name:alias") if err != nil { t.Fatalf("Expected not to error out on a valid name:alias format but got: %v", err) } if name != "name" { t.Fatalf("Link name should have been name, got %s instead", name) } if alias != "alias" { t.Fatalf("Link alias should have been alias, got %s instead", alias) } // short format definition name, alias, err = ParseLink("name") if err != nil { t.Fatalf("Expected not to error out on a valid name only format but got: %v", err) } if name != "name" { t.Fatalf("Link name should have been name, got %s instead", name) } if alias != "name" { t.Fatalf("Link alias should have been name, got %s instead", alias) } // empty string link definition is not allowed if _, _, err := ParseLink(""); err == nil || !strings.Contains(err.Error(), "empty string specified for links") { t.Fatalf("Expected error 'empty string specified for links' but got: %v", err) } // more than two colons are not allowed if _, _, err := ParseLink("link:alias:wrong"); err == nil || !strings.Contains(err.Error(), "bad format for links: link:alias:wrong") { t.Fatalf("Expected error 'bad format for links: link:alias:wrong' but got: %v", err) } } func TestValidateDevice(t *testing.T) { valid := []string{ "/home", "/home:/home", "/home:/something/else", "/with space", "/home:/with space", "relative:/absolute-path", "hostPath:/containerPath:r", "/hostPath:/containerPath:rw", "/hostPath:/containerPath:mrw", } invalid := map[string]string{ "": "bad format for path: ", "./": "./ is not an absolute path", "../": "../ is not an absolute path", "/:../": "../ is not an absolute path", "/:path": "path is not an absolute path", ":": "bad format for path: :", "/tmp:": " is not an absolute path", ":test": "bad format for path: :test", ":/test": "bad format for path: :/test", "tmp:": " is not an absolute path", ":test:": "bad format for path: :test:", "::": "bad format for path: ::", ":::": "bad format for path: :::", "/tmp:::": "bad format for path: /tmp:::", ":/tmp::": "bad format for path: :/tmp::", "path:ro": "ro is not an absolute path", "path:rr": "rr is not an absolute path", "a:/b:ro": "bad mode specified: ro", "a:/b:rr": "bad mode specified: rr", } for _, path := range valid { if _, err := ValidateDevice(path); err != nil { t.Fatalf("ValidateDevice(`%q`) should succeed: error %q", path, err) } } for path, expectedError := range invalid { if _, err := ValidateDevice(path); err == nil { t.Fatalf("ValidateDevice(`%q`) should have failed validation", path) } else { if err.Error() != expectedError { t.Fatalf("ValidateDevice(`%q`) error should contain %q, got %q", path, expectedError, err.Error()) } } } } func TestVolumeSplitN(t *testing.T) { for _, x := range []struct { input string n int expected []string }{ {`C:\foo:d:`, -1, []string{`C:\foo`, `d:`}}, {`:C:\foo:d:`, -1, nil}, {`/foo:/bar:ro`, 3, []string{`/foo`, `/bar`, `ro`}}, {`/foo:/bar:ro`, 2, []string{`/foo`, `/bar:ro`}}, {`C:\foo\:/foo`, -1, []string{`C:\foo\`, `/foo`}}, {`d:\`, -1, []string{`d:\`}}, {`d:`, -1, []string{`d:`}}, {`d:\path`, -1, []string{`d:\path`}}, {`d:\path with space`, -1, []string{`d:\path with space`}}, {`d:\pathandmode:rw`, -1, []string{`d:\pathandmode`, `rw`}}, {`c:\:d:\`, -1, []string{`c:\`, `d:\`}}, {`c:\windows\:d:`, -1, []string{`c:\windows\`, `d:`}}, {`c:\windows:d:\s p a c e`, -1, []string{`c:\windows`, `d:\s p a c e`}}, {`c:\windows:d:\s p a c e:RW`, -1, []string{`c:\windows`, `d:\s p a c e`, `RW`}}, {`c:\program files:d:\s p a c e i n h o s t d i r`, -1, []string{`c:\program files`, `d:\s p a c e i n h o s t d i r`}}, {`0123456789name:d:`, -1, []string{`0123456789name`, `d:`}}, {`MiXeDcAsEnAmE:d:`, -1, []string{`MiXeDcAsEnAmE`, `d:`}}, {`name:D:`, -1, []string{`name`, `D:`}}, {`name:D::rW`, -1, []string{`name`, `D:`, `rW`}}, {`name:D::RW`, -1, []string{`name`, `D:`, `RW`}}, {`c:/:d:/forward/slashes/are/good/too`, -1, []string{`c:/`, `d:/forward/slashes/are/good/too`}}, {`c:\Windows`, -1, []string{`c:\Windows`}}, {`c:\Program Files (x86)`, -1, []string{`c:\Program Files (x86)`}}, {``, -1, nil}, {`.`, -1, []string{`.`}}, {`..\`, -1, []string{`..\`}}, {`c:\:..\`, -1, []string{`c:\`, `..\`}}, {`c:\:d:\:xyzzy`, -1, []string{`c:\`, `d:\`, `xyzzy`}}, } { res := volumeSplitN(x.input, x.n) if len(res) < len(x.expected) { t.Fatalf("input: %v, expected: %v, got: %v", x.input, x.expected, res) } for i, e := range res { if e != x.expected[i] { t.Fatalf("input: %v, expected: %v, got: %v", x.input, x.expected, res) } } } } docker-1.10.3/runconfig/opts/throttledevice.go000066400000000000000000000065001267010174400214010ustar00rootroot00000000000000package opts import ( "fmt" "strconv" "strings" "github.com/docker/engine-api/types/blkiodev" "github.com/docker/go-units" ) // ValidatorThrottleFctType defines a validator function that returns a validated struct and/or an error. type ValidatorThrottleFctType func(val string) (*blkiodev.ThrottleDevice, error) // ValidateThrottleBpsDevice validates that the specified string has a valid device-rate format. func ValidateThrottleBpsDevice(val string) (*blkiodev.ThrottleDevice, error) { split := strings.SplitN(val, ":", 2) if len(split) != 2 { return nil, fmt.Errorf("bad format: %s", val) } if !strings.HasPrefix(split[0], "/dev/") { return nil, fmt.Errorf("bad format for device path: %s", val) } rate, err := units.RAMInBytes(split[1]) if err != nil { return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) } if rate < 0 { return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) } return &blkiodev.ThrottleDevice{ Path: split[0], Rate: uint64(rate), }, nil } // ValidateThrottleIOpsDevice validates that the specified string has a valid device-rate format. func ValidateThrottleIOpsDevice(val string) (*blkiodev.ThrottleDevice, error) { split := strings.SplitN(val, ":", 2) if len(split) != 2 { return nil, fmt.Errorf("bad format: %s", val) } if !strings.HasPrefix(split[0], "/dev/") { return nil, fmt.Errorf("bad format for device path: %s", val) } rate, err := strconv.ParseUint(split[1], 10, 64) if err != nil { return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) } if rate < 0 { return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) } return &blkiodev.ThrottleDevice{ Path: split[0], Rate: uint64(rate), }, nil } // ThrottledeviceOpt defines a map of ThrottleDevices type ThrottledeviceOpt struct { values []*blkiodev.ThrottleDevice validator ValidatorThrottleFctType } // NewThrottledeviceOpt creates a new ThrottledeviceOpt func NewThrottledeviceOpt(validator ValidatorThrottleFctType) ThrottledeviceOpt { values := []*blkiodev.ThrottleDevice{} return ThrottledeviceOpt{ values: values, validator: validator, } } // Set validates a ThrottleDevice and sets its name as a key in ThrottledeviceOpt func (opt *ThrottledeviceOpt) Set(val string) error { var value *blkiodev.ThrottleDevice if opt.validator != nil { v, err := opt.validator(val) if err != nil { return err } value = v } (opt.values) = append((opt.values), value) return nil } // String returns ThrottledeviceOpt values as a string. func (opt *ThrottledeviceOpt) String() string { var out []string for _, v := range opt.values { out = append(out, v.String()) } return fmt.Sprintf("%v", out) } // GetList returns a slice of pointers to ThrottleDevices. func (opt *ThrottledeviceOpt) GetList() []*blkiodev.ThrottleDevice { var throttledevice []*blkiodev.ThrottleDevice for _, v := range opt.values { throttledevice = append(throttledevice, v) } return throttledevice } docker-1.10.3/runconfig/opts/ulimit.go000066400000000000000000000017251267010174400176630ustar00rootroot00000000000000package opts import ( "fmt" "github.com/docker/go-units" ) // UlimitOpt defines a map of Ulimits type UlimitOpt struct { values *map[string]*units.Ulimit } // NewUlimitOpt creates a new UlimitOpt func NewUlimitOpt(ref *map[string]*units.Ulimit) *UlimitOpt { if ref == nil { ref = &map[string]*units.Ulimit{} } return &UlimitOpt{ref} } // Set validates a Ulimit and sets its name as a key in UlimitOpt func (o *UlimitOpt) Set(val string) error { l, err := units.ParseUlimit(val) if err != nil { return err } (*o.values)[l.Name] = l return nil } // String returns Ulimit values as a string. func (o *UlimitOpt) String() string { var out []string for _, v := range *o.values { out = append(out, v.String()) } return fmt.Sprintf("%v", out) } // GetList returns a slice of pointers to Ulimits. func (o *UlimitOpt) GetList() []*units.Ulimit { var ulimits []*units.Ulimit for _, v := range *o.values { ulimits = append(ulimits, v) } return ulimits } docker-1.10.3/runconfig/opts/ulimit_test.go000066400000000000000000000020161267010174400207140ustar00rootroot00000000000000package opts import ( "testing" "github.com/docker/go-units" ) func TestUlimitOpt(t *testing.T) { ulimitMap := map[string]*units.Ulimit{ "nofile": {"nofile", 1024, 512}, } ulimitOpt := NewUlimitOpt(&ulimitMap) expected := "[nofile=512:1024]" if ulimitOpt.String() != expected { t.Fatalf("Expected %v, got %v", expected, ulimitOpt) } // Valid ulimit append to opts if err := ulimitOpt.Set("core=1024:1024"); err != nil { t.Fatal(err) } // Invalid ulimit type returns an error and do not append to opts if err := ulimitOpt.Set("notavalidtype=1024:1024"); err == nil { t.Fatalf("Expected error on invalid ulimit type") } expected = "[nofile=512:1024 core=1024:1024]" expected2 := "[core=1024:1024 nofile=512:1024]" result := ulimitOpt.String() if result != expected && result != expected2 { t.Fatalf("Expected %v or %v, got %v", expected, expected2, ulimitOpt) } // And test GetList ulimits := ulimitOpt.GetList() if len(ulimits) != 2 { t.Fatalf("Expected a ulimit list of 2, got %v", ulimits) } } docker-1.10.3/runconfig/opts/weightdevice.go000066400000000000000000000042621267010174400210260ustar00rootroot00000000000000package opts import ( "fmt" "strconv" "strings" "github.com/docker/engine-api/types/blkiodev" ) // ValidatorWeightFctType defines a validator function that returns a validated struct and/or an error. type ValidatorWeightFctType func(val string) (*blkiodev.WeightDevice, error) // ValidateWeightDevice validates that the specified string has a valid device-weight format. func ValidateWeightDevice(val string) (*blkiodev.WeightDevice, error) { split := strings.SplitN(val, ":", 2) if len(split) != 2 { return nil, fmt.Errorf("bad format: %s", val) } if !strings.HasPrefix(split[0], "/dev/") { return nil, fmt.Errorf("bad format for device path: %s", val) } weight, err := strconv.ParseUint(split[1], 10, 0) if err != nil { return nil, fmt.Errorf("invalid weight for device: %s", val) } if weight > 0 && (weight < 10 || weight > 1000) { return nil, fmt.Errorf("invalid weight for device: %s", val) } return &blkiodev.WeightDevice{ Path: split[0], Weight: uint16(weight), }, nil } // WeightdeviceOpt defines a map of WeightDevices type WeightdeviceOpt struct { values []*blkiodev.WeightDevice validator ValidatorWeightFctType } // NewWeightdeviceOpt creates a new WeightdeviceOpt func NewWeightdeviceOpt(validator ValidatorWeightFctType) WeightdeviceOpt { values := []*blkiodev.WeightDevice{} return WeightdeviceOpt{ values: values, validator: validator, } } // Set validates a WeightDevice and sets its name as a key in WeightdeviceOpt func (opt *WeightdeviceOpt) Set(val string) error { var value *blkiodev.WeightDevice if opt.validator != nil { v, err := opt.validator(val) if err != nil { return err } value = v } (opt.values) = append((opt.values), value) return nil } // String returns WeightdeviceOpt values as a string. func (opt *WeightdeviceOpt) String() string { var out []string for _, v := range opt.values { out = append(out, v.String()) } return fmt.Sprintf("%v", out) } // GetList returns a slice of pointers to WeightDevices. func (opt *WeightdeviceOpt) GetList() []*blkiodev.WeightDevice { var weightdevice []*blkiodev.WeightDevice for _, v := range opt.values { weightdevice = append(weightdevice, v) } return weightdevice } docker-1.10.3/runconfig/streams.go000066400000000000000000000064711267010174400170540ustar00rootroot00000000000000package runconfig import ( "fmt" "io" "io/ioutil" "strings" "github.com/docker/docker/pkg/broadcaster" "github.com/docker/docker/pkg/ioutils" ) // StreamConfig holds information about I/O streams managed together. // // streamConfig.StdinPipe returns a WriteCloser which can be used to feed data // to the standard input of the streamConfig's active process. // streamConfig.StdoutPipe and streamConfig.StderrPipe each return a ReadCloser // which can be used to retrieve the standard output (and error) generated // by the container's active process. The output (and error) are actually // copied and delivered to all StdoutPipe and StderrPipe consumers, using // a kind of "broadcaster". type StreamConfig struct { stdout *broadcaster.Unbuffered stderr *broadcaster.Unbuffered stdin io.ReadCloser stdinPipe io.WriteCloser } // NewStreamConfig creates a stream config and initializes // the standard err and standard out to new unbuffered broadcasters. func NewStreamConfig() *StreamConfig { return &StreamConfig{ stderr: new(broadcaster.Unbuffered), stdout: new(broadcaster.Unbuffered), } } // Stdout returns the standard output in the configuration. func (streamConfig *StreamConfig) Stdout() *broadcaster.Unbuffered { return streamConfig.stdout } // Stderr returns the standard error in the configuration. func (streamConfig *StreamConfig) Stderr() *broadcaster.Unbuffered { return streamConfig.stderr } // Stdin returns the standard input in the configuration. func (streamConfig *StreamConfig) Stdin() io.ReadCloser { return streamConfig.stdin } // StdinPipe returns an input writer pipe as an io.WriteCloser. func (streamConfig *StreamConfig) StdinPipe() io.WriteCloser { return streamConfig.stdinPipe } // StdoutPipe creates a new io.ReadCloser with an empty bytes pipe. // It adds this new out pipe to the Stdout broadcaster. func (streamConfig *StreamConfig) StdoutPipe() io.ReadCloser { bytesPipe := ioutils.NewBytesPipe(nil) streamConfig.stdout.Add(bytesPipe) return bytesPipe } // StderrPipe creates a new io.ReadCloser with an empty bytes pipe. // It adds this new err pipe to the Stderr broadcaster. func (streamConfig *StreamConfig) StderrPipe() io.ReadCloser { bytesPipe := ioutils.NewBytesPipe(nil) streamConfig.stderr.Add(bytesPipe) return bytesPipe } // NewInputPipes creates new pipes for both standard inputs, Stdin and StdinPipe. func (streamConfig *StreamConfig) NewInputPipes() { streamConfig.stdin, streamConfig.stdinPipe = io.Pipe() } // NewNopInputPipe creates a new input pipe that will silently drop all messages in the input. func (streamConfig *StreamConfig) NewNopInputPipe() { streamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) } // CloseStreams ensures that the configured streams are properly closed. func (streamConfig *StreamConfig) CloseStreams() error { var errors []string if streamConfig.stdin != nil { if err := streamConfig.stdin.Close(); err != nil { errors = append(errors, fmt.Sprintf("error close stdin: %s", err)) } } if err := streamConfig.stdout.Clean(); err != nil { errors = append(errors, fmt.Sprintf("error close stdout: %s", err)) } if err := streamConfig.stderr.Clean(); err != nil { errors = append(errors, fmt.Sprintf("error close stderr: %s", err)) } if len(errors) > 0 { return fmt.Errorf(strings.Join(errors, "\n")) } return nil } docker-1.10.3/utils/000077500000000000000000000000001267010174400142055ustar00rootroot00000000000000docker-1.10.3/utils/debug.go000066400000000000000000000010431267010174400156200ustar00rootroot00000000000000package utils import ( "os" "github.com/Sirupsen/logrus" ) // EnableDebug sets the DEBUG env var to true // and makes the logger to log at debug level. func EnableDebug() { os.Setenv("DEBUG", "1") logrus.SetLevel(logrus.DebugLevel) } // DisableDebug sets the DEBUG env var to false // and makes the logger to log at info level. func DisableDebug() { os.Setenv("DEBUG", "") logrus.SetLevel(logrus.InfoLevel) } // IsDebugEnabled checks whether the debug flag is set or not. func IsDebugEnabled() bool { return os.Getenv("DEBUG") != "" } docker-1.10.3/utils/experimental.go000066400000000000000000000003071267010174400172310ustar00rootroot00000000000000// +build experimental package utils // ExperimentalBuild is a stub which always returns true for // builds that include the "experimental" build tag func ExperimentalBuild() bool { return true } docker-1.10.3/utils/names.go000066400000000000000000000011731267010174400156410ustar00rootroot00000000000000package utils import "regexp" // RestrictedNameChars collects the characters allowed to represent a name, normally used to validate container and volume names. const RestrictedNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]` // RestrictedNamePattern is a regular expression to validate names against the collection of restricted characters. var RestrictedNamePattern = regexp.MustCompile(`^/?` + RestrictedNameChars + `+$`) // RestrictedVolumeNamePattern is a regular expression to validate volume names against the collection of restricted characters. var RestrictedVolumeNamePattern = regexp.MustCompile(`^` + RestrictedNameChars + `+$`) docker-1.10.3/utils/stubs.go000066400000000000000000000003211267010174400156700ustar00rootroot00000000000000// +build !experimental package utils // ExperimentalBuild is a stub which always returns false for // builds that do not include the "experimental" build tag func ExperimentalBuild() bool { return false } docker-1.10.3/utils/utils.go000066400000000000000000000131541267010174400157000ustar00rootroot00000000000000package utils import ( "crypto/sha1" "encoding/hex" "fmt" "io" "io/ioutil" "os" "os/exec" "path/filepath" "runtime" "strings" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/docker/dockerversion" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/stringid" ) // SelfPath figures out the absolute path of our own binary (if it's still around). func SelfPath() string { path, err := exec.LookPath(os.Args[0]) if err != nil { if os.IsNotExist(err) { return "" } if execErr, ok := err.(*exec.Error); ok && os.IsNotExist(execErr.Err) { return "" } panic(err) } path, err = filepath.Abs(path) if err != nil { if os.IsNotExist(err) { return "" } panic(err) } return path } func dockerInitSha1(target string) string { f, err := os.Open(target) if err != nil { return "" } defer f.Close() h := sha1.New() _, err = io.Copy(h, f) if err != nil { return "" } return hex.EncodeToString(h.Sum(nil)) } func isValidDockerInitPath(target string, selfPath string) bool { // target and selfPath should be absolute (InitPath and SelfPath already do this) if target == "" { return false } if dockerversion.IAmStatic == "true" { if selfPath == "" { return false } if target == selfPath { return true } targetFileInfo, err := os.Lstat(target) if err != nil { return false } selfPathFileInfo, err := os.Lstat(selfPath) if err != nil { return false } return os.SameFile(targetFileInfo, selfPathFileInfo) } return dockerversion.InitSHA1 != "" && dockerInitSha1(target) == dockerversion.InitSHA1 } // DockerInitPath figures out the path of our dockerinit (which may be SelfPath()) func DockerInitPath(localCopy string) string { selfPath := SelfPath() if isValidDockerInitPath(selfPath, selfPath) { // if we're valid, don't bother checking anything else return selfPath } var possibleInits = []string{ localCopy, dockerversion.InitPath, filepath.Join(filepath.Dir(selfPath), "dockerinit"), // FHS 3.0 Draft: "/usr/libexec includes internal binaries that are not intended to be executed directly by users or shell scripts. Applications may use a single subdirectory under /usr/libexec." // https://www.linuxbase.org/betaspecs/fhs/fhs.html#usrlibexec "/usr/libexec/docker/dockerinit", "/usr/local/libexec/docker/dockerinit", // FHS 2.3: "/usr/lib includes object files, libraries, and internal binaries that are not intended to be executed directly by users or shell scripts." // https://refspecs.linuxfoundation.org/FHS_2.3/fhs-2.3.html#USRLIBLIBRARIESFORPROGRAMMINGANDPA "/usr/lib/docker/dockerinit", "/usr/local/lib/docker/dockerinit", } for _, dockerInit := range possibleInits { if dockerInit == "" { continue } path, err := exec.LookPath(dockerInit) if err == nil { path, err = filepath.Abs(path) if err != nil { // LookPath already validated that this file exists and is executable (following symlinks), so how could Abs fail? panic(err) } if isValidDockerInitPath(path, selfPath) { return path } } } return "" } var globalTestID string // TestDirectory creates a new temporary directory and returns its path. // The contents of directory at path `templateDir` is copied into the // new directory. func TestDirectory(templateDir string) (dir string, err error) { if globalTestID == "" { globalTestID = stringid.GenerateNonCryptoID()[:4] } prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, GetCallerName(2)) if prefix == "" { prefix = "docker-test-" } dir, err = ioutil.TempDir("", prefix) if err = os.Remove(dir); err != nil { return } if templateDir != "" { if err = archive.CopyWithTar(templateDir, dir); err != nil { return } } return } // GetCallerName introspects the call stack and returns the name of the // function `depth` levels down in the stack. func GetCallerName(depth int) string { // Use the caller function name as a prefix. // This helps trace temp directories back to their test. pc, _, _, _ := runtime.Caller(depth + 1) callerLongName := runtime.FuncForPC(pc).Name() parts := strings.Split(callerLongName, ".") callerShortName := parts[len(parts)-1] return callerShortName } // ReplaceOrAppendEnvValues returns the defaults with the overrides either // replaced by env key or appended to the list func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { cache := make(map[string]int, len(defaults)) for i, e := range defaults { parts := strings.SplitN(e, "=", 2) cache[parts[0]] = i } for _, value := range overrides { // Values w/o = means they want this env to be removed/unset. if !strings.Contains(value, "=") { if i, exists := cache[value]; exists { defaults[i] = "" // Used to indicate it should be removed } continue } // Just do a normal set/update parts := strings.SplitN(value, "=", 2) if i, exists := cache[parts[0]]; exists { defaults[i] = value } else { defaults = append(defaults, value) } } // Now remove all entries that we want to "unset" for i := 0; i < len(defaults); i++ { if defaults[i] == "" { defaults = append(defaults[:i], defaults[i+1:]...) i-- } } return defaults } // GetErrorMessage returns the human readable message associated with // the passed-in error. In some cases the default Error() func returns // something that is less than useful so based on its types this func // will go and get a better piece of text. func GetErrorMessage(err error) string { switch err.(type) { case errcode.Error: e, _ := err.(errcode.Error) return e.Message case errcode.ErrorCode: ec, _ := err.(errcode.ErrorCode) return ec.Message() default: return err.Error() } } docker-1.10.3/utils/utils_test.go000066400000000000000000000006701267010174400167360ustar00rootroot00000000000000package utils import "testing" func TestReplaceAndAppendEnvVars(t *testing.T) { var ( d = []string{"HOME=/"} o = []string{"HOME=/root", "TERM=xterm"} ) env := ReplaceOrAppendEnvValues(d, o) if len(env) != 2 { t.Fatalf("expected len of 2 got %d", len(env)) } if env[0] != "HOME=/root" { t.Fatalf("expected HOME=/root got '%s'", env[0]) } if env[1] != "TERM=xterm" { t.Fatalf("expected TERM=xterm got '%s'", env[1]) } } docker-1.10.3/vendor/000077500000000000000000000000001267010174400143425ustar00rootroot00000000000000docker-1.10.3/vendor/src/000077500000000000000000000000001267010174400151315ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/000077500000000000000000000000001267010174400171705ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/Azure/000077500000000000000000000000001267010174400202565ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/000077500000000000000000000000001267010174400225035ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/LICENSE000066400000000000000000000021001267010174400235010ustar00rootroot00000000000000The MIT License (MIT) Copyright (c) 2015 Microsoft Corporation Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/README.md000066400000000000000000000017621267010174400237700ustar00rootroot00000000000000# go-ansiterm This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent. For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position. The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go). See parser_test.go for examples exercising the state machine and generating appropriate function calls. docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/constants.go000066400000000000000000000137461267010174400250610ustar00rootroot00000000000000package ansiterm const LogEnv = "DEBUG_TERMINAL" // ANSI constants // References: // -- http://www.ecma-international.org/publications/standards/Ecma-048.htm // -- http://man7.org/linux/man-pages/man4/console_codes.4.html // -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html // -- http://en.wikipedia.org/wiki/ANSI_escape_code // -- http://vt100.net/emu/dec_ansi_parser // -- http://vt100.net/emu/vt500_parser.svg // -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html // -- http://www.inwap.com/pdp10/ansicode.txt const ( // ECMA-48 Set Graphics Rendition // Note: // -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved // -- Fonts could possibly be supported via SetCurrentConsoleFontEx // -- Windows does not expose the per-window cursor (i.e., caret) blink times ANSI_SGR_RESET = 0 ANSI_SGR_BOLD = 1 ANSI_SGR_DIM = 2 _ANSI_SGR_ITALIC = 3 ANSI_SGR_UNDERLINE = 4 _ANSI_SGR_BLINKSLOW = 5 _ANSI_SGR_BLINKFAST = 6 ANSI_SGR_REVERSE = 7 _ANSI_SGR_INVISIBLE = 8 _ANSI_SGR_LINETHROUGH = 9 _ANSI_SGR_FONT_00 = 10 _ANSI_SGR_FONT_01 = 11 _ANSI_SGR_FONT_02 = 12 _ANSI_SGR_FONT_03 = 13 _ANSI_SGR_FONT_04 = 14 _ANSI_SGR_FONT_05 = 15 _ANSI_SGR_FONT_06 = 16 _ANSI_SGR_FONT_07 = 17 _ANSI_SGR_FONT_08 = 18 _ANSI_SGR_FONT_09 = 19 _ANSI_SGR_FONT_10 = 20 _ANSI_SGR_DOUBLEUNDERLINE = 21 ANSI_SGR_BOLD_DIM_OFF = 22 _ANSI_SGR_ITALIC_OFF = 23 ANSI_SGR_UNDERLINE_OFF = 24 _ANSI_SGR_BLINK_OFF = 25 _ANSI_SGR_RESERVED_00 = 26 ANSI_SGR_REVERSE_OFF = 27 _ANSI_SGR_INVISIBLE_OFF = 28 _ANSI_SGR_LINETHROUGH_OFF = 29 ANSI_SGR_FOREGROUND_BLACK = 30 ANSI_SGR_FOREGROUND_RED = 31 ANSI_SGR_FOREGROUND_GREEN = 32 ANSI_SGR_FOREGROUND_YELLOW = 33 ANSI_SGR_FOREGROUND_BLUE = 34 ANSI_SGR_FOREGROUND_MAGENTA = 35 ANSI_SGR_FOREGROUND_CYAN = 36 ANSI_SGR_FOREGROUND_WHITE = 37 _ANSI_SGR_RESERVED_01 = 38 ANSI_SGR_FOREGROUND_DEFAULT = 39 ANSI_SGR_BACKGROUND_BLACK = 40 ANSI_SGR_BACKGROUND_RED = 41 ANSI_SGR_BACKGROUND_GREEN = 42 ANSI_SGR_BACKGROUND_YELLOW = 43 ANSI_SGR_BACKGROUND_BLUE = 44 ANSI_SGR_BACKGROUND_MAGENTA = 45 ANSI_SGR_BACKGROUND_CYAN = 46 ANSI_SGR_BACKGROUND_WHITE = 47 _ANSI_SGR_RESERVED_02 = 48 ANSI_SGR_BACKGROUND_DEFAULT = 49 // 50 - 65: Unsupported ANSI_MAX_CMD_LENGTH = 4096 MAX_INPUT_EVENTS = 128 DEFAULT_WIDTH = 80 DEFAULT_HEIGHT = 24 ANSI_BEL = 0x07 ANSI_BACKSPACE = 0x08 ANSI_TAB = 0x09 ANSI_LINE_FEED = 0x0A ANSI_VERTICAL_TAB = 0x0B ANSI_FORM_FEED = 0x0C ANSI_CARRIAGE_RETURN = 0x0D ANSI_ESCAPE_PRIMARY = 0x1B ANSI_ESCAPE_SECONDARY = 0x5B ANSI_OSC_STRING_ENTRY = 0x5D ANSI_COMMAND_FIRST = 0x40 ANSI_COMMAND_LAST = 0x7E DCS_ENTRY = 0x90 CSI_ENTRY = 0x9B OSC_STRING = 0x9D ANSI_PARAMETER_SEP = ";" ANSI_CMD_G0 = '(' ANSI_CMD_G1 = ')' ANSI_CMD_G2 = '*' ANSI_CMD_G3 = '+' ANSI_CMD_DECPNM = '>' ANSI_CMD_DECPAM = '=' ANSI_CMD_OSC = ']' ANSI_CMD_STR_TERM = '\\' KEY_CONTROL_PARAM_2 = ";2" KEY_CONTROL_PARAM_3 = ";3" KEY_CONTROL_PARAM_4 = ";4" KEY_CONTROL_PARAM_5 = ";5" KEY_CONTROL_PARAM_6 = ";6" KEY_CONTROL_PARAM_7 = ";7" KEY_CONTROL_PARAM_8 = ";8" KEY_ESC_CSI = "\x1B[" KEY_ESC_N = "\x1BN" KEY_ESC_O = "\x1BO" FILL_CHARACTER = ' ' ) func getByteRange(start byte, end byte) []byte { bytes := make([]byte, 0, 32) for i := start; i <= end; i++ { bytes = append(bytes, byte(i)) } return bytes } var ToGroundBytes = getToGroundBytes() var Executors = getExecuteBytes() // SPACE 20+A0 hex Always and everywhere a blank space // Intermediate 20-2F hex !"#$%&'()*+,-./ var Intermeds = getByteRange(0x20, 0x2F) // Parameters 30-3F hex 0123456789:;<=>? // CSI Parameters 30-39, 3B hex 0123456789; var CsiParams = getByteRange(0x30, 0x3F) var CsiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...) // Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_ var UpperCase = getByteRange(0x40, 0x5F) // Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~ var LowerCase = getByteRange(0x60, 0x7E) // Alphabetics 40-7E hex (all of upper and lower case) var Alphabetics = append(UpperCase, LowerCase...) var Printables = getByteRange(0x20, 0x7F) var EscapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E) var EscapeToGroundBytes = getEscapeToGroundBytes() // See http://www.vt100.net/emu/vt500_parser.png for description of the complex // byte ranges below func getEscapeToGroundBytes() []byte { escapeToGroundBytes := getByteRange(0x30, 0x4F) escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...) escapeToGroundBytes = append(escapeToGroundBytes, 0x59) escapeToGroundBytes = append(escapeToGroundBytes, 0x5A) escapeToGroundBytes = append(escapeToGroundBytes, 0x5C) escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...) return escapeToGroundBytes } func getExecuteBytes() []byte { executeBytes := getByteRange(0x00, 0x17) executeBytes = append(executeBytes, 0x19) executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...) return executeBytes } func getToGroundBytes() []byte { groundBytes := []byte{0x18} groundBytes = append(groundBytes, 0x1A) groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...) groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...) groundBytes = append(groundBytes, 0x99) groundBytes = append(groundBytes, 0x9A) groundBytes = append(groundBytes, 0x9C) return groundBytes } // Delete 7F hex Always and everywhere ignored // C1 Control 80-9F hex 32 additional control characters // G1 Displayable A1-FE hex 94 additional displayable characters // Special A0+FF hex Same as SPACE and DELETE docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/context.go000066400000000000000000000001501267010174400245120ustar00rootroot00000000000000package ansiterm type AnsiContext struct { currentChar byte paramBuffer []byte interBuffer []byte } docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/csi_entry_state.go000066400000000000000000000022121267010174400262260ustar00rootroot00000000000000package ansiterm type CsiEntryState struct { BaseState } func (csiState CsiEntryState) Handle(b byte) (s State, e error) { logger.Infof("CsiEntry::Handle %#x", b) nextState, err := csiState.BaseState.Handle(b) if nextState != nil || err != nil { return nextState, err } switch { case sliceContains(Alphabetics, b): return csiState.parser.Ground, nil case sliceContains(CsiCollectables, b): return csiState.parser.CsiParam, nil case sliceContains(Executors, b): return csiState, csiState.parser.execute() } return csiState, nil } func (csiState CsiEntryState) Transition(s State) error { logger.Infof("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name()) csiState.BaseState.Transition(s) switch s { case csiState.parser.Ground: return csiState.parser.csiDispatch() case csiState.parser.CsiParam: switch { case sliceContains(CsiParams, csiState.parser.context.currentChar): csiState.parser.collectParam() case sliceContains(Intermeds, csiState.parser.context.currentChar): csiState.parser.collectInter() } } return nil } func (csiState CsiEntryState) Enter() error { csiState.parser.clear() return nil } docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/csi_param_state.go000066400000000000000000000015061267010174400261720ustar00rootroot00000000000000package ansiterm type CsiParamState struct { BaseState } func (csiState CsiParamState) Handle(b byte) (s State, e error) { logger.Infof("CsiParam::Handle %#x", b) nextState, err := csiState.BaseState.Handle(b) if nextState != nil || err != nil { return nextState, err } switch { case sliceContains(Alphabetics, b): return csiState.parser.Ground, nil case sliceContains(CsiCollectables, b): csiState.parser.collectParam() return csiState, nil case sliceContains(Executors, b): return csiState, csiState.parser.execute() } return csiState, nil } func (csiState CsiParamState) Transition(s State) error { logger.Infof("CsiParam::Transition %s --> %s", csiState.Name(), s.Name()) csiState.BaseState.Transition(s) switch s { case csiState.parser.Ground: return csiState.parser.csiDispatch() } return nil } docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/escape_intermediate_state.go000066400000000000000000000016111267010174400302230ustar00rootroot00000000000000package ansiterm type EscapeIntermediateState struct { BaseState } func (escState EscapeIntermediateState) Handle(b byte) (s State, e error) { logger.Infof("EscapeIntermediateState::Handle %#x", b) nextState, err := escState.BaseState.Handle(b) if nextState != nil || err != nil { return nextState, err } switch { case sliceContains(Intermeds, b): return escState, escState.parser.collectInter() case sliceContains(Executors, b): return escState, escState.parser.execute() case sliceContains(EscapeIntermediateToGroundBytes, b): return escState.parser.Ground, nil } return escState, nil } func (escState EscapeIntermediateState) Transition(s State) error { logger.Infof("EscapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name()) escState.BaseState.Transition(s) switch s { case escState.parser.Ground: return escState.parser.escDispatch() } return nil } docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/escape_state.go000066400000000000000000000021641267010174400254750ustar00rootroot00000000000000package ansiterm type EscapeState struct { BaseState } func (escState EscapeState) Handle(b byte) (s State, e error) { logger.Infof("EscapeState::Handle %#x", b) nextState, err := escState.BaseState.Handle(b) if nextState != nil || err != nil { return nextState, err } switch { case b == ANSI_ESCAPE_SECONDARY: return escState.parser.CsiEntry, nil case b == ANSI_OSC_STRING_ENTRY: return escState.parser.OscString, nil case sliceContains(Executors, b): return escState, escState.parser.execute() case sliceContains(EscapeToGroundBytes, b): return escState.parser.Ground, nil case sliceContains(Intermeds, b): return escState.parser.EscapeIntermediate, nil } return escState, nil } func (escState EscapeState) Transition(s State) error { logger.Infof("Escape::Transition %s --> %s", escState.Name(), s.Name()) escState.BaseState.Transition(s) switch s { case escState.parser.Ground: return escState.parser.escDispatch() case escState.parser.EscapeIntermediate: return escState.parser.collectInter() } return nil } func (escState EscapeState) Enter() error { escState.parser.clear() return nil } docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/event_handler.go000066400000000000000000000023251267010174400256520ustar00rootroot00000000000000package ansiterm type AnsiEventHandler interface { // Print Print(b byte) error // Execute C0 commands Execute(b byte) error // CUrsor Up CUU(int) error // CUrsor Down CUD(int) error // CUrsor Forward CUF(int) error // CUrsor Backward CUB(int) error // Cursor to Next Line CNL(int) error // Cursor to Previous Line CPL(int) error // Cursor Horizontal position Absolute CHA(int) error // Vertical line Position Absolute VPA(int) error // CUrsor Position CUP(int, int) error // Horizontal and Vertical Position (depends on PUM) HVP(int, int) error // Text Cursor Enable Mode DECTCEM(bool) error // Origin Mode DECOM(bool) error // 132 Column Mode DECCOLM(bool) error // Erase in Display ED(int) error // Erase in Line EL(int) error // Insert Line IL(int) error // Delete Line DL(int) error // Insert Character ICH(int) error // Delete Character DCH(int) error // Set Graphics Rendition SGR([]int) error // Pan Down SU(int) error // Pan Up SD(int) error // Device Attributes DA([]string) error // Set Top and Bottom Margins DECSTBM(int, int) error // Index IND() error // Reverse Index RI() error // Flush updates from previous commands Flush() error } docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/ground_state.go000066400000000000000000000006531267010174400255340ustar00rootroot00000000000000package ansiterm type GroundState struct { BaseState } func (gs GroundState) Handle(b byte) (s State, e error) { gs.parser.context.currentChar = b nextState, err := gs.BaseState.Handle(b) if nextState != nil || err != nil { return nextState, err } switch { case sliceContains(Printables, b): return gs, gs.parser.print() case sliceContains(Executors, b): return gs, gs.parser.execute() } return gs, nil } docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/osc_string_state.go000066400000000000000000000011441267010174400264040ustar00rootroot00000000000000package ansiterm type OscStringState struct { BaseState } func (oscState OscStringState) Handle(b byte) (s State, e error) { logger.Infof("OscString::Handle %#x", b) nextState, err := oscState.BaseState.Handle(b) if nextState != nil || err != nil { return nextState, err } switch { case isOscStringTerminator(b): return oscState.parser.Ground, nil } return oscState, nil } // See below for OSC string terminators for linux // http://man7.org/linux/man-pages/man4/console_codes.4.html func isOscStringTerminator(b byte) bool { if b == ANSI_BEL || b == 0x5C { return true } return false } docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/parser.go000066400000000000000000000063371267010174400243370ustar00rootroot00000000000000package ansiterm import ( "errors" "fmt" "io/ioutil" "os" "github.com/Sirupsen/logrus" ) var logger *logrus.Logger type AnsiParser struct { currState State eventHandler AnsiEventHandler context *AnsiContext CsiEntry State CsiParam State DcsEntry State Escape State EscapeIntermediate State Error State Ground State OscString State stateMap []State } func CreateParser(initialState string, evtHandler AnsiEventHandler) *AnsiParser { logFile := ioutil.Discard if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { logFile, _ = os.Create("ansiParser.log") } logger = &logrus.Logger{ Out: logFile, Formatter: new(logrus.TextFormatter), Level: logrus.InfoLevel, } parser := &AnsiParser{ eventHandler: evtHandler, context: &AnsiContext{}, } parser.CsiEntry = CsiEntryState{BaseState{name: "CsiEntry", parser: parser}} parser.CsiParam = CsiParamState{BaseState{name: "CsiParam", parser: parser}} parser.DcsEntry = DcsEntryState{BaseState{name: "DcsEntry", parser: parser}} parser.Escape = EscapeState{BaseState{name: "Escape", parser: parser}} parser.EscapeIntermediate = EscapeIntermediateState{BaseState{name: "EscapeIntermediate", parser: parser}} parser.Error = ErrorState{BaseState{name: "Error", parser: parser}} parser.Ground = GroundState{BaseState{name: "Ground", parser: parser}} parser.OscString = OscStringState{BaseState{name: "OscString", parser: parser}} parser.stateMap = []State{ parser.CsiEntry, parser.CsiParam, parser.DcsEntry, parser.Escape, parser.EscapeIntermediate, parser.Error, parser.Ground, parser.OscString, } parser.currState = getState(initialState, parser.stateMap) logger.Infof("CreateParser: parser %p", parser) return parser } func getState(name string, states []State) State { for _, el := range states { if el.Name() == name { return el } } return nil } func (ap *AnsiParser) Parse(bytes []byte) (int, error) { for i, b := range bytes { if err := ap.handle(b); err != nil { return i, err } } return len(bytes), ap.eventHandler.Flush() } func (ap *AnsiParser) handle(b byte) error { ap.context.currentChar = b newState, err := ap.currState.Handle(b) if err != nil { return err } if newState == nil { logger.Warning("newState is nil") return errors.New(fmt.Sprintf("New state of 'nil' is invalid.")) } if newState != ap.currState { if err := ap.changeState(newState); err != nil { return err } } return nil } func (ap *AnsiParser) changeState(newState State) error { logger.Infof("ChangeState %s --> %s", ap.currState.Name(), newState.Name()) // Exit old state if err := ap.currState.Exit(); err != nil { logger.Infof("Exit state '%s' failed with : '%v'", ap.currState.Name(), err) return err } // Perform transition action if err := ap.currState.Transition(newState); err != nil { logger.Infof("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err) return err } // Enter new state if err := newState.Enter(); err != nil { logger.Infof("Enter state '%s' failed with: '%v'", newState.Name(), err) return err } ap.currState = newState return nil } docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/parser_action_helpers.go000066400000000000000000000036541267010174400274150ustar00rootroot00000000000000package ansiterm import ( "strconv" ) func parseParams(bytes []byte) ([]string, error) { paramBuff := make([]byte, 0, 0) params := []string{} for _, v := range bytes { if v == ';' { if len(paramBuff) > 0 { // Completed parameter, append it to the list s := string(paramBuff) params = append(params, s) paramBuff = make([]byte, 0, 0) } } else { paramBuff = append(paramBuff, v) } } // Last parameter may not be terminated with ';' if len(paramBuff) > 0 { s := string(paramBuff) params = append(params, s) } logger.Infof("Parsed params: %v with length: %d", params, len(params)) return params, nil } func parseCmd(context AnsiContext) (string, error) { return string(context.currentChar), nil } func getInt(params []string, dflt int) int { i := getInts(params, 1, dflt)[0] logger.Infof("getInt: %v", i) return i } func getInts(params []string, minCount int, dflt int) []int { ints := []int{} for _, v := range params { i, _ := strconv.Atoi(v) // Zero is mapped to the default value in VT100. if i == 0 { i = dflt } ints = append(ints, i) } if len(ints) < minCount { remaining := minCount - len(ints) for i := 0; i < remaining; i++ { ints = append(ints, dflt) } } logger.Infof("getInts: %v", ints) return ints } func (ap *AnsiParser) modeDispatch(param string, set bool) error { switch param { case "?3": return ap.eventHandler.DECCOLM(set) case "?6": return ap.eventHandler.DECOM(set) case "?25": return ap.eventHandler.DECTCEM(set) } return nil } func (ap *AnsiParser) hDispatch(params []string) error { if len(params) == 1 { return ap.modeDispatch(params[0], true) } return nil } func (ap *AnsiParser) lDispatch(params []string) error { if len(params) == 1 { return ap.modeDispatch(params[0], false) } return nil } func getEraseParam(params []string) int { param := getInt(params, 0) if param < 0 || 3 < param { param = 0 } return param } docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/parser_actions.go000066400000000000000000000060351267010174400260520ustar00rootroot00000000000000package ansiterm import ( "fmt" ) func (ap *AnsiParser) collectParam() error { currChar := ap.context.currentChar logger.Infof("collectParam %#x", currChar) ap.context.paramBuffer = append(ap.context.paramBuffer, currChar) return nil } func (ap *AnsiParser) collectInter() error { currChar := ap.context.currentChar logger.Infof("collectInter %#x", currChar) ap.context.paramBuffer = append(ap.context.interBuffer, currChar) return nil } func (ap *AnsiParser) escDispatch() error { cmd, _ := parseCmd(*ap.context) intermeds := ap.context.interBuffer logger.Infof("escDispatch currentChar: %#x", ap.context.currentChar) logger.Infof("escDispatch: %v(%v)", cmd, intermeds) switch cmd { case "D": // IND return ap.eventHandler.IND() case "E": // NEL, equivalent to CRLF err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN) if err == nil { err = ap.eventHandler.Execute(ANSI_LINE_FEED) } return err case "M": // RI return ap.eventHandler.RI() } return nil } func (ap *AnsiParser) csiDispatch() error { cmd, _ := parseCmd(*ap.context) params, _ := parseParams(ap.context.paramBuffer) logger.Infof("csiDispatch: %v(%v)", cmd, params) switch cmd { case "@": return ap.eventHandler.ICH(getInt(params, 1)) case "A": return ap.eventHandler.CUU(getInt(params, 1)) case "B": return ap.eventHandler.CUD(getInt(params, 1)) case "C": return ap.eventHandler.CUF(getInt(params, 1)) case "D": return ap.eventHandler.CUB(getInt(params, 1)) case "E": return ap.eventHandler.CNL(getInt(params, 1)) case "F": return ap.eventHandler.CPL(getInt(params, 1)) case "G": return ap.eventHandler.CHA(getInt(params, 1)) case "H": ints := getInts(params, 2, 1) x, y := ints[0], ints[1] return ap.eventHandler.CUP(x, y) case "J": param := getEraseParam(params) return ap.eventHandler.ED(param) case "K": param := getEraseParam(params) return ap.eventHandler.EL(param) case "L": return ap.eventHandler.IL(getInt(params, 1)) case "M": return ap.eventHandler.DL(getInt(params, 1)) case "P": return ap.eventHandler.DCH(getInt(params, 1)) case "S": return ap.eventHandler.SU(getInt(params, 1)) case "T": return ap.eventHandler.SD(getInt(params, 1)) case "c": return ap.eventHandler.DA(params) case "d": return ap.eventHandler.VPA(getInt(params, 1)) case "f": ints := getInts(params, 2, 1) x, y := ints[0], ints[1] return ap.eventHandler.HVP(x, y) case "h": return ap.hDispatch(params) case "l": return ap.lDispatch(params) case "m": return ap.eventHandler.SGR(getInts(params, 1, 0)) case "r": ints := getInts(params, 2, 1) top, bottom := ints[0], ints[1] return ap.eventHandler.DECSTBM(top, bottom) default: logger.Errorf(fmt.Sprintf("Unsupported CSI command: '%s', with full context: %v", cmd, ap.context)) return nil } } func (ap *AnsiParser) print() error { return ap.eventHandler.Print(ap.context.currentChar) } func (ap *AnsiParser) clear() error { ap.context = &AnsiContext{} return nil } func (ap *AnsiParser) execute() error { return ap.eventHandler.Execute(ap.context.currentChar) } docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/parser_test_helpers.go000066400000000000000000000121361267010174400271120ustar00rootroot00000000000000package ansiterm import ( "fmt" "testing" ) func getStateNames() []string { parser, _ := createTestParser("Ground") stateNames := []string{} for _, state := range parser.stateMap { stateNames = append(stateNames, state.Name()) } return stateNames } func stateTransitionHelper(t *testing.T, start string, end string, bytes []byte) { for _, b := range bytes { bytes := []byte{byte(b)} parser, _ := createTestParser(start) parser.Parse(bytes) validateState(t, parser.currState, end) } } func anyToXHelper(t *testing.T, bytes []byte, expectedState string) { for _, s := range getStateNames() { stateTransitionHelper(t, s, expectedState, bytes) } } func funcCallParamHelper(t *testing.T, bytes []byte, start string, expected string, expectedCalls []string) { parser, evtHandler := createTestParser(start) parser.Parse(bytes) validateState(t, parser.currState, expected) validateFuncCalls(t, evtHandler.FunctionCalls, expectedCalls) } func parseParamsHelper(t *testing.T, bytes []byte, expectedParams []string) { params, err := parseParams(bytes) if err != nil { t.Errorf("Parameter parse error: %v", err) return } if len(params) != len(expectedParams) { t.Errorf("Parsed parameters: %v", params) t.Errorf("Expected parameters: %v", expectedParams) t.Errorf("Parameter length failure: %d != %d", len(params), len(expectedParams)) return } for i, v := range expectedParams { if v != params[i] { t.Errorf("Parsed parameters: %v", params) t.Errorf("Expected parameters: %v", expectedParams) t.Errorf("Parameter parse failure: %s != %s at position %d", v, params[i], i) } } } func cursorSingleParamHelper(t *testing.T, command byte, funcName string) { funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) funcCallParamHelper(t, []byte{'2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)}) funcCallParamHelper(t, []byte{'2', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([23])", funcName)}) funcCallParamHelper(t, []byte{'2', ';', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)}) funcCallParamHelper(t, []byte{'2', ';', '3', ';', '4', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)}) } func cursorTwoParamHelper(t *testing.T, command byte, funcName string) { funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1 1])", funcName)}) funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1 1])", funcName)}) funcCallParamHelper(t, []byte{'2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2 1])", funcName)}) funcCallParamHelper(t, []byte{'2', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([23 1])", funcName)}) funcCallParamHelper(t, []byte{'2', ';', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2 3])", funcName)}) funcCallParamHelper(t, []byte{'2', ';', '3', ';', '4', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2 3])", funcName)}) } func eraseHelper(t *testing.T, command byte, funcName string) { funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([0])", funcName)}) funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([0])", funcName)}) funcCallParamHelper(t, []byte{'1', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) funcCallParamHelper(t, []byte{'2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)}) funcCallParamHelper(t, []byte{'3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([3])", funcName)}) funcCallParamHelper(t, []byte{'4', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([0])", funcName)}) funcCallParamHelper(t, []byte{'1', ';', '2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) } func scrollHelper(t *testing.T, command byte, funcName string) { funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) funcCallParamHelper(t, []byte{'1', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) funcCallParamHelper(t, []byte{'5', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([5])", funcName)}) funcCallParamHelper(t, []byte{'4', ';', '6', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([4])", funcName)}) } func clearOnStateChangeHelper(t *testing.T, start string, end string, bytes []byte) { p, _ := createTestParser(start) fillContext(p.context) p.Parse(bytes) validateState(t, p.currState, end) validateEmptyContext(t, p.context) } func c0Helper(t *testing.T, bytes []byte, expectedState string, expectedCalls []string) { parser, evtHandler := createTestParser("Ground") parser.Parse(bytes) validateState(t, parser.currState, expectedState) validateFuncCalls(t, evtHandler.FunctionCalls, expectedCalls) } docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/parser_test_utilities.go000066400000000000000000000033561267010174400274670ustar00rootroot00000000000000package ansiterm import ( "testing" ) func createTestParser(s string) (*AnsiParser, *TestAnsiEventHandler) { evtHandler := CreateTestAnsiEventHandler() parser := CreateParser(s, evtHandler) return parser, evtHandler } func validateState(t *testing.T, actualState State, expectedStateName string) { actualName := "Nil" if actualState != nil { actualName = actualState.Name() } if actualName != expectedStateName { t.Errorf("Invalid State: '%s' != '%s'", actualName, expectedStateName) } } func validateFuncCalls(t *testing.T, actualCalls []string, expectedCalls []string) { actualCount := len(actualCalls) expectedCount := len(expectedCalls) if actualCount != expectedCount { t.Errorf("Actual calls: %v", actualCalls) t.Errorf("Expected calls: %v", expectedCalls) t.Errorf("Call count error: %d != %d", actualCount, expectedCount) return } for i, v := range actualCalls { if v != expectedCalls[i] { t.Errorf("Actual calls: %v", actualCalls) t.Errorf("Expected calls: %v", expectedCalls) t.Errorf("Mismatched calls: %s != %s with lengths %d and %d", v, expectedCalls[i], len(v), len(expectedCalls[i])) } } } func fillContext(context *AnsiContext) { context.currentChar = 'A' context.paramBuffer = []byte{'C', 'D', 'E'} context.interBuffer = []byte{'F', 'G', 'H'} } func validateEmptyContext(t *testing.T, context *AnsiContext) { var expectedCurrChar byte = 0x0 if context.currentChar != expectedCurrChar { t.Errorf("Currentchar mismatch '%#x' != '%#x'", context.currentChar, expectedCurrChar) } if len(context.paramBuffer) != 0 { t.Errorf("Non-empty parameter buffer: %v", context.paramBuffer) } if len(context.paramBuffer) != 0 { t.Errorf("Non-empty intermediate buffer: %v", context.interBuffer) } } docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/states.go000066400000000000000000000024661267010174400243450ustar00rootroot00000000000000package ansiterm type StateId int type State interface { Enter() error Exit() error Handle(byte) (State, error) Name() string Transition(State) error } type BaseState struct { name string parser *AnsiParser } func (base BaseState) Enter() error { return nil } func (base BaseState) Exit() error { return nil } func (base BaseState) Handle(b byte) (s State, e error) { switch { case b == CSI_ENTRY: return base.parser.CsiEntry, nil case b == DCS_ENTRY: return base.parser.DcsEntry, nil case b == ANSI_ESCAPE_PRIMARY: return base.parser.Escape, nil case b == OSC_STRING: return base.parser.OscString, nil case sliceContains(ToGroundBytes, b): return base.parser.Ground, nil } return nil, nil } func (base BaseState) Name() string { return base.name } func (base BaseState) Transition(s State) error { if s == base.parser.Ground { execBytes := []byte{0x18} execBytes = append(execBytes, 0x1A) execBytes = append(execBytes, getByteRange(0x80, 0x8F)...) execBytes = append(execBytes, getByteRange(0x91, 0x97)...) execBytes = append(execBytes, 0x99) execBytes = append(execBytes, 0x9A) if sliceContains(execBytes, base.parser.context.currentChar) { return base.parser.execute() } } return nil } type DcsEntryState struct { BaseState } type ErrorState struct { BaseState } docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/test_event_handler.go000066400000000000000000000077111267010174400267150ustar00rootroot00000000000000package ansiterm import ( "fmt" "strconv" ) type TestAnsiEventHandler struct { FunctionCalls []string } func CreateTestAnsiEventHandler() *TestAnsiEventHandler { evtHandler := TestAnsiEventHandler{} evtHandler.FunctionCalls = make([]string, 0) return &evtHandler } func (h *TestAnsiEventHandler) recordCall(call string, params []string) { s := fmt.Sprintf("%s(%v)", call, params) h.FunctionCalls = append(h.FunctionCalls, s) } func (h *TestAnsiEventHandler) Print(b byte) error { h.recordCall("Print", []string{string(b)}) return nil } func (h *TestAnsiEventHandler) Execute(b byte) error { h.recordCall("Execute", []string{string(b)}) return nil } func (h *TestAnsiEventHandler) CUU(param int) error { h.recordCall("CUU", []string{strconv.Itoa(param)}) return nil } func (h *TestAnsiEventHandler) CUD(param int) error { h.recordCall("CUD", []string{strconv.Itoa(param)}) return nil } func (h *TestAnsiEventHandler) CUF(param int) error { h.recordCall("CUF", []string{strconv.Itoa(param)}) return nil } func (h *TestAnsiEventHandler) CUB(param int) error { h.recordCall("CUB", []string{strconv.Itoa(param)}) return nil } func (h *TestAnsiEventHandler) CNL(param int) error { h.recordCall("CNL", []string{strconv.Itoa(param)}) return nil } func (h *TestAnsiEventHandler) CPL(param int) error { h.recordCall("CPL", []string{strconv.Itoa(param)}) return nil } func (h *TestAnsiEventHandler) CHA(param int) error { h.recordCall("CHA", []string{strconv.Itoa(param)}) return nil } func (h *TestAnsiEventHandler) VPA(param int) error { h.recordCall("VPA", []string{strconv.Itoa(param)}) return nil } func (h *TestAnsiEventHandler) CUP(x int, y int) error { xS, yS := strconv.Itoa(x), strconv.Itoa(y) h.recordCall("CUP", []string{xS, yS}) return nil } func (h *TestAnsiEventHandler) HVP(x int, y int) error { xS, yS := strconv.Itoa(x), strconv.Itoa(y) h.recordCall("HVP", []string{xS, yS}) return nil } func (h *TestAnsiEventHandler) DECTCEM(visible bool) error { h.recordCall("DECTCEM", []string{strconv.FormatBool(visible)}) return nil } func (h *TestAnsiEventHandler) DECOM(visible bool) error { h.recordCall("DECOM", []string{strconv.FormatBool(visible)}) return nil } func (h *TestAnsiEventHandler) DECCOLM(use132 bool) error { h.recordCall("DECOLM", []string{strconv.FormatBool(use132)}) return nil } func (h *TestAnsiEventHandler) ED(param int) error { h.recordCall("ED", []string{strconv.Itoa(param)}) return nil } func (h *TestAnsiEventHandler) EL(param int) error { h.recordCall("EL", []string{strconv.Itoa(param)}) return nil } func (h *TestAnsiEventHandler) IL(param int) error { h.recordCall("IL", []string{strconv.Itoa(param)}) return nil } func (h *TestAnsiEventHandler) DL(param int) error { h.recordCall("DL", []string{strconv.Itoa(param)}) return nil } func (h *TestAnsiEventHandler) ICH(param int) error { h.recordCall("ICH", []string{strconv.Itoa(param)}) return nil } func (h *TestAnsiEventHandler) DCH(param int) error { h.recordCall("DCH", []string{strconv.Itoa(param)}) return nil } func (h *TestAnsiEventHandler) SGR(params []int) error { strings := []string{} for _, v := range params { strings = append(strings, strconv.Itoa(v)) } h.recordCall("SGR", strings) return nil } func (h *TestAnsiEventHandler) SU(param int) error { h.recordCall("SU", []string{strconv.Itoa(param)}) return nil } func (h *TestAnsiEventHandler) SD(param int) error { h.recordCall("SD", []string{strconv.Itoa(param)}) return nil } func (h *TestAnsiEventHandler) DA(params []string) error { h.recordCall("DA", params) return nil } func (h *TestAnsiEventHandler) DECSTBM(top int, bottom int) error { topS, bottomS := strconv.Itoa(top), strconv.Itoa(bottom) h.recordCall("DECSTBM", []string{topS, bottomS}) return nil } func (h *TestAnsiEventHandler) RI() error { h.recordCall("RI", nil) return nil } func (h *TestAnsiEventHandler) IND() error { h.recordCall("IND", nil) return nil } func (h *TestAnsiEventHandler) Flush() error { return nil } docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/utilities.go000066400000000000000000000004221267010174400250430ustar00rootroot00000000000000package ansiterm import ( "strconv" ) func sliceContains(bytes []byte, b byte) bool { for _, v := range bytes { if v == b { return true } } return false } func convertBytesToInteger(bytes []byte) int { s := string(bytes) i, _ := strconv.Atoi(s) return i } docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/winterm/000077500000000000000000000000001267010174400241705ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/winterm/ansi.go000066400000000000000000000110511267010174400254470ustar00rootroot00000000000000// +build windows package winterm import ( "fmt" "os" "strconv" "strings" "syscall" . "github.com/Azure/go-ansiterm" ) // Windows keyboard constants // See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx. const ( VK_PRIOR = 0x21 // PAGE UP key VK_NEXT = 0x22 // PAGE DOWN key VK_END = 0x23 // END key VK_HOME = 0x24 // HOME key VK_LEFT = 0x25 // LEFT ARROW key VK_UP = 0x26 // UP ARROW key VK_RIGHT = 0x27 // RIGHT ARROW key VK_DOWN = 0x28 // DOWN ARROW key VK_SELECT = 0x29 // SELECT key VK_PRINT = 0x2A // PRINT key VK_EXECUTE = 0x2B // EXECUTE key VK_SNAPSHOT = 0x2C // PRINT SCREEN key VK_INSERT = 0x2D // INS key VK_DELETE = 0x2E // DEL key VK_HELP = 0x2F // HELP key VK_F1 = 0x70 // F1 key VK_F2 = 0x71 // F2 key VK_F3 = 0x72 // F3 key VK_F4 = 0x73 // F4 key VK_F5 = 0x74 // F5 key VK_F6 = 0x75 // F6 key VK_F7 = 0x76 // F7 key VK_F8 = 0x77 // F8 key VK_F9 = 0x78 // F9 key VK_F10 = 0x79 // F10 key VK_F11 = 0x7A // F11 key VK_F12 = 0x7B // F12 key RIGHT_ALT_PRESSED = 0x0001 LEFT_ALT_PRESSED = 0x0002 RIGHT_CTRL_PRESSED = 0x0004 LEFT_CTRL_PRESSED = 0x0008 SHIFT_PRESSED = 0x0010 NUMLOCK_ON = 0x0020 SCROLLLOCK_ON = 0x0040 CAPSLOCK_ON = 0x0080 ENHANCED_KEY = 0x0100 ) type ansiCommand struct { CommandBytes []byte Command string Parameters []string IsSpecial bool } func newAnsiCommand(command []byte) *ansiCommand { if isCharacterSelectionCmdChar(command[1]) { // Is Character Set Selection commands return &ansiCommand{ CommandBytes: command, Command: string(command), IsSpecial: true, } } // last char is command character lastCharIndex := len(command) - 1 ac := &ansiCommand{ CommandBytes: command, Command: string(command[lastCharIndex]), IsSpecial: false, } // more than a single escape if lastCharIndex != 0 { start := 1 // skip if double char escape sequence if command[0] == ANSI_ESCAPE_PRIMARY && command[1] == ANSI_ESCAPE_SECONDARY { start++ } // convert this to GetNextParam method ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ANSI_PARAMETER_SEP) } return ac } func (ac *ansiCommand) paramAsSHORT(index int, defaultValue SHORT) SHORT { if index < 0 || index >= len(ac.Parameters) { return defaultValue } param, err := strconv.ParseInt(ac.Parameters[index], 10, 16) if err != nil { return defaultValue } return SHORT(param) } func (ac *ansiCommand) String() string { return fmt.Sprintf("0x%v \"%v\" (\"%v\")", bytesToHex(ac.CommandBytes), ac.Command, strings.Join(ac.Parameters, "\",\"")) } // isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands. // See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html. func isAnsiCommandChar(b byte) bool { switch { case ANSI_COMMAND_FIRST <= b && b <= ANSI_COMMAND_LAST && b != ANSI_ESCAPE_SECONDARY: return true case b == ANSI_CMD_G1 || b == ANSI_CMD_OSC || b == ANSI_CMD_DECPAM || b == ANSI_CMD_DECPNM: // non-CSI escape sequence terminator return true case b == ANSI_CMD_STR_TERM || b == ANSI_BEL: // String escape sequence terminator return true } return false } func isXtermOscSequence(command []byte, current byte) bool { return (len(command) >= 2 && command[0] == ANSI_ESCAPE_PRIMARY && command[1] == ANSI_CMD_OSC && current != ANSI_BEL) } func isCharacterSelectionCmdChar(b byte) bool { return (b == ANSI_CMD_G0 || b == ANSI_CMD_G1 || b == ANSI_CMD_G2 || b == ANSI_CMD_G3) } // bytesToHex converts a slice of bytes to a human-readable string. func bytesToHex(b []byte) string { hex := make([]string, len(b)) for i, ch := range b { hex[i] = fmt.Sprintf("%X", ch) } return strings.Join(hex, "") } // ensureInRange adjusts the passed value, if necessary, to ensure it is within // the passed min / max range. func ensureInRange(n SHORT, min SHORT, max SHORT) SHORT { if n < min { return min } else if n > max { return max } else { return n } } func GetStdFile(nFile int) (*os.File, uintptr) { var file *os.File switch nFile { case syscall.STD_INPUT_HANDLE: file = os.Stdin case syscall.STD_OUTPUT_HANDLE: file = os.Stdout case syscall.STD_ERROR_HANDLE: file = os.Stderr default: panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile)) } fd, err := syscall.GetStdHandle(nFile) if err != nil { panic(fmt.Errorf("Invalid standard handle indentifier: %v -- %v", nFile, err)) } return file, uintptr(fd) } docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/winterm/api.go000066400000000000000000000300331267010174400252670ustar00rootroot00000000000000// +build windows package winterm import ( "fmt" "syscall" "unsafe" ) //=========================================================================================================== // IMPORTANT NOTE: // // The methods below make extensive use of the "unsafe" package to obtain the required pointers. // Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack // variables) the pointers reference *before* the API completes. // // As a result, in those cases, the code must hint that the variables remain in active by invoking the // dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer // require unsafe pointers. // // If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform // the garbage collector the variables remain in use if: // // -- The value is not a pointer (e.g., int32, struct) // -- The value is not referenced by the method after passing the pointer to Windows // // See http://golang.org/doc/go1.3. //=========================================================================================================== var ( kernel32DLL = syscall.NewLazyDLL("kernel32.dll") getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo") setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo") setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition") setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize") scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA") setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute") setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo") writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW") readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW") waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject") ) // Windows Console constants const ( // Console modes // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. ENABLE_PROCESSED_INPUT = 0x0001 ENABLE_LINE_INPUT = 0x0002 ENABLE_ECHO_INPUT = 0x0004 ENABLE_WINDOW_INPUT = 0x0008 ENABLE_MOUSE_INPUT = 0x0010 ENABLE_INSERT_MODE = 0x0020 ENABLE_QUICK_EDIT_MODE = 0x0040 ENABLE_EXTENDED_FLAGS = 0x0080 ENABLE_PROCESSED_OUTPUT = 0x0001 ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 // Character attributes // Note: // -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan). // Clearing all foreground or background colors results in black; setting all creates white. // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes. FOREGROUND_BLUE WORD = 0x0001 FOREGROUND_GREEN WORD = 0x0002 FOREGROUND_RED WORD = 0x0004 FOREGROUND_INTENSITY WORD = 0x0008 FOREGROUND_MASK WORD = 0x000F BACKGROUND_BLUE WORD = 0x0010 BACKGROUND_GREEN WORD = 0x0020 BACKGROUND_RED WORD = 0x0040 BACKGROUND_INTENSITY WORD = 0x0080 BACKGROUND_MASK WORD = 0x00F0 COMMON_LVB_MASK WORD = 0xFF00 COMMON_LVB_REVERSE_VIDEO WORD = 0x4000 COMMON_LVB_UNDERSCORE WORD = 0x8000 // Input event types // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. KEY_EVENT = 0x0001 MOUSE_EVENT = 0x0002 WINDOW_BUFFER_SIZE_EVENT = 0x0004 MENU_EVENT = 0x0008 FOCUS_EVENT = 0x0010 // WaitForSingleObject return codes WAIT_ABANDONED = 0x00000080 WAIT_FAILED = 0xFFFFFFFF WAIT_SIGNALED = 0x0000000 WAIT_TIMEOUT = 0x00000102 // WaitForSingleObject wait duration WAIT_INFINITE = 0xFFFFFFFF WAIT_ONE_SECOND = 1000 WAIT_HALF_SECOND = 500 WAIT_QUARTER_SECOND = 250 ) // Windows API Console types // -- See https://msdn.microsoft.com/en-us/library/windows/desktop/aa383751(v=vs.85).aspx for core types (e.g., SHORT) // -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD) // -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment type ( SHORT int16 BOOL int32 WORD uint16 WCHAR uint16 DWORD uint32 CHAR_INFO struct { UnicodeChar WCHAR Attributes WORD } CONSOLE_CURSOR_INFO struct { Size DWORD Visible BOOL } CONSOLE_SCREEN_BUFFER_INFO struct { Size COORD CursorPosition COORD Attributes WORD Window SMALL_RECT MaximumWindowSize COORD } COORD struct { X SHORT Y SHORT } SMALL_RECT struct { Left SHORT Top SHORT Right SHORT Bottom SHORT } // INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. INPUT_RECORD struct { EventType WORD KeyEvent KEY_EVENT_RECORD } KEY_EVENT_RECORD struct { KeyDown BOOL RepeatCount WORD VirtualKeyCode WORD VirtualScanCode WORD UnicodeChar WCHAR ControlKeyState DWORD } WINDOW_BUFFER_SIZE struct { Size COORD } ) // boolToBOOL converts a Go bool into a Windows BOOL. func boolToBOOL(f bool) BOOL { if f { return BOOL(1) } else { return BOOL(0) } } // GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor. // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx. func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) return checkError(r1, r2, err) } // SetConsoleCursorInfo sets the size and visiblity of the console cursor. // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx. func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) return checkError(r1, r2, err) } // SetConsoleCursorPosition location of the console cursor. // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx. func SetConsoleCursorPosition(handle uintptr, coord COORD) error { r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord)) use(coord) return checkError(r1, r2, err) } // GetConsoleMode gets the console mode for given file descriptor // See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx. func GetConsoleMode(handle uintptr) (mode uint32, err error) { err = syscall.GetConsoleMode(syscall.Handle(handle), &mode) return mode, err } // SetConsoleMode sets the console mode for given file descriptor // See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. func SetConsoleMode(handle uintptr, mode uint32) error { r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0) use(mode) return checkError(r1, r2, err) } // GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer. // See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx. func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { info := CONSOLE_SCREEN_BUFFER_INFO{} err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)) if err != nil { return nil, err } return &info, nil } func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error { r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char))) use(scrollRect) use(clipRect) use(destOrigin) use(char) return checkError(r1, r2, err) } // SetConsoleScreenBufferSize sets the size of the console screen buffer. // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx. func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error { r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord)) use(coord) return checkError(r1, r2, err) } // SetConsoleTextAttribute sets the attributes of characters written to the // console screen buffer by the WriteFile or WriteConsole function. // See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx. func SetConsoleTextAttribute(handle uintptr, attribute WORD) error { r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0) use(attribute) return checkError(r1, r2, err) } // SetConsoleWindowInfo sets the size and position of the console screen buffer's window. // Note that the size and location must be within and no larger than the backing console screen buffer. // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx. func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error { r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect))) use(isAbsolute) use(rect) return checkError(r1, r2, err) } // WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer. // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx. func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error { r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion))) use(buffer) use(bufferSize) use(bufferCoord) return checkError(r1, r2, err) } // ReadConsoleInput reads (and removes) data from the console input buffer. // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx. func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error { r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count))) use(buffer) return checkError(r1, r2, err) } // WaitForSingleObject waits for the passed handle to be signaled. // It returns true if the handle was signaled; false otherwise. // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx. func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) { r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(DWORD(msWait))) switch r1 { case WAIT_ABANDONED, WAIT_TIMEOUT: return false, nil case WAIT_SIGNALED: return true, nil } use(msWait) return false, err } // String helpers func (info CONSOLE_SCREEN_BUFFER_INFO) String() string { return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize) } func (coord COORD) String() string { return fmt.Sprintf("%v,%v", coord.X, coord.Y) } func (rect SMALL_RECT) String() string { return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom) } // checkError evaluates the results of a Windows API call and returns the error if it failed. func checkError(r1, r2 uintptr, err error) error { // Windows APIs return non-zero to indicate success if r1 != 0 { return nil } // Return the error if provided, otherwise default to EINVAL if err != nil { return err } return syscall.EINVAL } // coordToPointer converts a COORD into a uintptr (by fooling the type system). func coordToPointer(c COORD) uintptr { // Note: This code assumes the two SHORTs are correctly laid out; the "cast" to DWORD is just to get a pointer to pass. return uintptr(*((*DWORD)(unsafe.Pointer(&c)))) } // use is a no-op, but the compiler cannot see that it is. // Calling use(p) ensures that p is kept live until that point. func use(p interface{}) {} docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/winterm/attr_translation.go000066400000000000000000000065131267010174400301140ustar00rootroot00000000000000// +build windows package winterm import ( . "github.com/Azure/go-ansiterm" ) const ( FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE ) // collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the // request represented by the passed ANSI mode. func collectAnsiIntoWindowsAttributes(windowsMode WORD, inverted bool, baseMode WORD, ansiMode SHORT) (WORD, bool) { switch ansiMode { // Mode styles case ANSI_SGR_BOLD: windowsMode = windowsMode | FOREGROUND_INTENSITY case ANSI_SGR_DIM, ANSI_SGR_BOLD_DIM_OFF: windowsMode &^= FOREGROUND_INTENSITY case ANSI_SGR_UNDERLINE: windowsMode = windowsMode | COMMON_LVB_UNDERSCORE case ANSI_SGR_REVERSE: inverted = true case ANSI_SGR_REVERSE_OFF: inverted = false case ANSI_SGR_UNDERLINE_OFF: windowsMode &^= COMMON_LVB_UNDERSCORE // Foreground colors case ANSI_SGR_FOREGROUND_DEFAULT: windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK) case ANSI_SGR_FOREGROUND_BLACK: windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) case ANSI_SGR_FOREGROUND_RED: windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED case ANSI_SGR_FOREGROUND_GREEN: windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN case ANSI_SGR_FOREGROUND_YELLOW: windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN case ANSI_SGR_FOREGROUND_BLUE: windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE case ANSI_SGR_FOREGROUND_MAGENTA: windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE case ANSI_SGR_FOREGROUND_CYAN: windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE case ANSI_SGR_FOREGROUND_WHITE: windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE // Background colors case ANSI_SGR_BACKGROUND_DEFAULT: // Black with no intensity windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK) case ANSI_SGR_BACKGROUND_BLACK: windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) case ANSI_SGR_BACKGROUND_RED: windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED case ANSI_SGR_BACKGROUND_GREEN: windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN case ANSI_SGR_BACKGROUND_YELLOW: windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN case ANSI_SGR_BACKGROUND_BLUE: windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE case ANSI_SGR_BACKGROUND_MAGENTA: windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE case ANSI_SGR_BACKGROUND_CYAN: windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE case ANSI_SGR_BACKGROUND_WHITE: windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE } return windowsMode, inverted } // invertAttributes inverts the foreground and background colors of a Windows attributes value func invertAttributes(windowsMode WORD) WORD { return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4) } docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go000066400000000000000000000043451267010174400275640ustar00rootroot00000000000000// +build windows package winterm const ( Horizontal = iota Vertical ) func (h *WindowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT { if h.originMode { sr := h.effectiveSr(info.Window) return SMALL_RECT{ Top: sr.top, Bottom: sr.bottom, Left: 0, Right: info.Size.X - 1, } } else { return SMALL_RECT{ Top: info.Window.Top, Bottom: info.Window.Bottom, Left: 0, Right: info.Size.X - 1, } } } // setCursorPosition sets the cursor to the specified position, bounded to the screen size func (h *WindowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error { position.X = ensureInRange(position.X, window.Left, window.Right) position.Y = ensureInRange(position.Y, window.Top, window.Bottom) err := SetConsoleCursorPosition(h.fd, position) if err != nil { return err } logger.Infof("Cursor position set: (%d, %d)", position.X, position.Y) return err } func (h *WindowsAnsiEventHandler) moveCursorVertical(param int) error { return h.moveCursor(Vertical, param) } func (h *WindowsAnsiEventHandler) moveCursorHorizontal(param int) error { return h.moveCursor(Horizontal, param) } func (h *WindowsAnsiEventHandler) moveCursor(moveMode int, param int) error { info, err := GetConsoleScreenBufferInfo(h.fd) if err != nil { return err } position := info.CursorPosition switch moveMode { case Horizontal: position.X += SHORT(param) case Vertical: position.Y += SHORT(param) } if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { return err } return nil } func (h *WindowsAnsiEventHandler) moveCursorLine(param int) error { info, err := GetConsoleScreenBufferInfo(h.fd) if err != nil { return err } position := info.CursorPosition position.X = 0 position.Y += SHORT(param) if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { return err } return nil } func (h *WindowsAnsiEventHandler) moveCursorColumn(param int) error { info, err := GetConsoleScreenBufferInfo(h.fd) if err != nil { return err } position := info.CursorPosition position.X = SHORT(param) - 1 if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { return err } return nil } docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/winterm/erase_helpers.go000066400000000000000000000034761267010174400273520ustar00rootroot00000000000000// +build windows package winterm import ( . "github.com/Azure/go-ansiterm" ) func (h *WindowsAnsiEventHandler) clearRange(attributes WORD, fromCoord COORD, toCoord COORD) error { // Ignore an invalid (negative area) request if toCoord.Y < fromCoord.Y { return nil } var err error var coordStart = COORD{} var coordEnd = COORD{} xCurrent, yCurrent := fromCoord.X, fromCoord.Y xEnd, yEnd := toCoord.X, toCoord.Y // Clear any partial initial line if xCurrent > 0 { coordStart.X, coordStart.Y = xCurrent, yCurrent coordEnd.X, coordEnd.Y = xEnd, yCurrent err = h.clearRect(attributes, coordStart, coordEnd) if err != nil { return err } xCurrent = 0 yCurrent += 1 } // Clear intervening rectangular section if yCurrent < yEnd { coordStart.X, coordStart.Y = xCurrent, yCurrent coordEnd.X, coordEnd.Y = xEnd, yEnd-1 err = h.clearRect(attributes, coordStart, coordEnd) if err != nil { return err } xCurrent = 0 yCurrent = yEnd } // Clear remaining partial ending line coordStart.X, coordStart.Y = xCurrent, yCurrent coordEnd.X, coordEnd.Y = xEnd, yEnd err = h.clearRect(attributes, coordStart, coordEnd) if err != nil { return err } return nil } func (h *WindowsAnsiEventHandler) clearRect(attributes WORD, fromCoord COORD, toCoord COORD) error { region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X} width := toCoord.X - fromCoord.X + 1 height := toCoord.Y - fromCoord.Y + 1 size := uint32(width) * uint32(height) if size <= 0 { return nil } buffer := make([]CHAR_INFO, size) char := CHAR_INFO{WCHAR(FILL_CHARACTER), attributes} for i := 0; i < int(size); i++ { buffer[i] = char } err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, ®ion) if err != nil { return err } return nil } docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/winterm/scroll_helper.go000066400000000000000000000061471267010174400273640ustar00rootroot00000000000000// +build windows package winterm // effectiveSr gets the current effective scroll region in buffer coordinates func (h *WindowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion { top := AddInRange(window.Top, h.sr.top, window.Top, window.Bottom) bottom := AddInRange(window.Top, h.sr.bottom, window.Top, window.Bottom) if top >= bottom { top = window.Top bottom = window.Bottom } return scrollRegion{top: top, bottom: bottom} } func (h *WindowsAnsiEventHandler) scrollUp(param int) error { info, err := GetConsoleScreenBufferInfo(h.fd) if err != nil { return err } sr := h.effectiveSr(info.Window) return h.scroll(param, sr, info) } func (h *WindowsAnsiEventHandler) scrollDown(param int) error { return h.scrollUp(-param) } func (h *WindowsAnsiEventHandler) deleteLines(param int) error { info, err := GetConsoleScreenBufferInfo(h.fd) if err != nil { return err } start := info.CursorPosition.Y sr := h.effectiveSr(info.Window) // Lines cannot be inserted or deleted outside the scrolling region. if start >= sr.top && start <= sr.bottom { sr.top = start return h.scroll(param, sr, info) } else { return nil } } func (h *WindowsAnsiEventHandler) insertLines(param int) error { return h.deleteLines(-param) } // scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates. func (h *WindowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error { logger.Infof("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom) logger.Infof("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom) // Copy from and clip to the scroll region (full buffer width) scrollRect := SMALL_RECT{ Top: sr.top, Bottom: sr.bottom, Left: 0, Right: info.Size.X - 1, } // Origin to which area should be copied destOrigin := COORD{ X: 0, Y: sr.top - SHORT(param), } char := CHAR_INFO{ UnicodeChar: ' ', Attributes: h.attributes, } if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { return err } return nil } func (h *WindowsAnsiEventHandler) deleteCharacters(param int) error { info, err := GetConsoleScreenBufferInfo(h.fd) if err != nil { return err } return h.scrollLine(param, info.CursorPosition, info) } func (h *WindowsAnsiEventHandler) insertCharacters(param int) error { return h.deleteCharacters(-param) } // scrollLine scrolls a line horizontally starting at the provided position by a number of columns. func (h *WindowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error { // Copy from and clip to the scroll region (full buffer width) scrollRect := SMALL_RECT{ Top: position.Y, Bottom: position.Y, Left: position.X, Right: info.Size.X - 1, } // Origin to which area should be copied destOrigin := COORD{ X: position.X - SHORT(columns), Y: position.Y, } char := CHAR_INFO{ UnicodeChar: ' ', Attributes: h.attributes, } if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { return err } return nil } docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/winterm/utilities.go000066400000000000000000000004431267010174400265330ustar00rootroot00000000000000// +build windows package winterm // AddInRange increments a value by the passed quantity while ensuring the values // always remain within the supplied min / max range. func AddInRange(n SHORT, increment SHORT, min SHORT, max SHORT) SHORT { return ensureInRange(n+increment, min, max) } docker-1.10.3/vendor/src/github.com/Azure/go-ansiterm/winterm/win_event_handler.go000066400000000000000000000412321267010174400302140ustar00rootroot00000000000000// +build windows package winterm import ( "bytes" "io/ioutil" "os" "strconv" . "github.com/Azure/go-ansiterm" "github.com/Sirupsen/logrus" ) var logger *logrus.Logger type WindowsAnsiEventHandler struct { fd uintptr file *os.File infoReset *CONSOLE_SCREEN_BUFFER_INFO sr scrollRegion buffer bytes.Buffer attributes WORD inverted bool wrapNext bool drewMarginByte bool originMode bool marginByte byte curInfo *CONSOLE_SCREEN_BUFFER_INFO curPos COORD } func CreateWinEventHandler(fd uintptr, file *os.File) AnsiEventHandler { logFile := ioutil.Discard if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { logFile, _ = os.Create("winEventHandler.log") } logger = &logrus.Logger{ Out: logFile, Formatter: new(logrus.TextFormatter), Level: logrus.DebugLevel, } infoReset, err := GetConsoleScreenBufferInfo(fd) if err != nil { return nil } return &WindowsAnsiEventHandler{ fd: fd, file: file, infoReset: infoReset, attributes: infoReset.Attributes, } } type scrollRegion struct { top SHORT bottom SHORT } // simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the // current cursor position and scroll region settings, in which case it returns // true. If no special handling is necessary, then it does nothing and returns // false. // // In the false case, the caller should ensure that a carriage return // and line feed are inserted or that the text is otherwise wrapped. func (h *WindowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { if h.wrapNext { if err := h.Flush(); err != nil { return false, err } h.clearWrap() } pos, info, err := h.getCurrentInfo() if err != nil { return false, err } sr := h.effectiveSr(info.Window) if pos.Y == sr.bottom { // Scrolling is necessary. Let Windows automatically scroll if the scrolling region // is the full window. if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom { if includeCR { pos.X = 0 h.updatePos(pos) } return false, nil } else { // A custom scroll region is active. Scroll the window manually to simulate // the LF. if err := h.Flush(); err != nil { return false, err } logger.Info("Simulating LF inside scroll region") if err := h.scrollUp(1); err != nil { return false, err } if includeCR { pos.X = 0 if err := SetConsoleCursorPosition(h.fd, pos); err != nil { return false, err } } return true, nil } } else if pos.Y < info.Window.Bottom { // Let Windows handle the LF. pos.Y++ if includeCR { pos.X = 0 } h.updatePos(pos) return false, nil } else { // The cursor is at the bottom of the screen but outside the scroll // region. Skip the LF. logger.Info("Simulating LF outside scroll region") if includeCR { if err := h.Flush(); err != nil { return false, err } pos.X = 0 if err := SetConsoleCursorPosition(h.fd, pos); err != nil { return false, err } } return true, nil } } // executeLF executes a LF without a CR. func (h *WindowsAnsiEventHandler) executeLF() error { handled, err := h.simulateLF(false) if err != nil { return err } if !handled { // Windows LF will reset the cursor column position. Write the LF // and restore the cursor position. pos, _, err := h.getCurrentInfo() if err != nil { return err } h.buffer.WriteByte(ANSI_LINE_FEED) if pos.X != 0 { if err := h.Flush(); err != nil { return err } logger.Info("Resetting cursor position for LF without CR") if err := SetConsoleCursorPosition(h.fd, pos); err != nil { return err } } } return nil } func (h *WindowsAnsiEventHandler) Print(b byte) error { if h.wrapNext { h.buffer.WriteByte(h.marginByte) h.clearWrap() if _, err := h.simulateLF(true); err != nil { return err } } pos, info, err := h.getCurrentInfo() if err != nil { return err } if pos.X == info.Size.X-1 { h.wrapNext = true h.marginByte = b } else { pos.X++ h.updatePos(pos) h.buffer.WriteByte(b) } return nil } func (h *WindowsAnsiEventHandler) Execute(b byte) error { switch b { case ANSI_TAB: logger.Info("Execute(TAB)") // Move to the next tab stop, but preserve auto-wrap if already set. if !h.wrapNext { pos, info, err := h.getCurrentInfo() if err != nil { return err } pos.X = (pos.X + 8) - pos.X%8 if pos.X >= info.Size.X { pos.X = info.Size.X - 1 } if err := h.Flush(); err != nil { return err } if err := SetConsoleCursorPosition(h.fd, pos); err != nil { return err } } return nil case ANSI_BEL: h.buffer.WriteByte(ANSI_BEL) return nil case ANSI_BACKSPACE: if h.wrapNext { if err := h.Flush(); err != nil { return err } h.clearWrap() } pos, _, err := h.getCurrentInfo() if err != nil { return err } if pos.X > 0 { pos.X-- h.updatePos(pos) h.buffer.WriteByte(ANSI_BACKSPACE) } return nil case ANSI_VERTICAL_TAB, ANSI_FORM_FEED: // Treat as true LF. return h.executeLF() case ANSI_LINE_FEED: // Simulate a CR and LF for now since there is no way in go-ansiterm // to tell if the LF should include CR (and more things break when it's // missing than when it's incorrectly added). handled, err := h.simulateLF(true) if handled || err != nil { return err } return h.buffer.WriteByte(ANSI_LINE_FEED) case ANSI_CARRIAGE_RETURN: if h.wrapNext { if err := h.Flush(); err != nil { return err } h.clearWrap() } pos, _, err := h.getCurrentInfo() if err != nil { return err } if pos.X != 0 { pos.X = 0 h.updatePos(pos) h.buffer.WriteByte(ANSI_CARRIAGE_RETURN) } return nil default: return nil } } func (h *WindowsAnsiEventHandler) CUU(param int) error { if err := h.Flush(); err != nil { return err } logger.Infof("CUU: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() return h.moveCursorVertical(-param) } func (h *WindowsAnsiEventHandler) CUD(param int) error { if err := h.Flush(); err != nil { return err } logger.Infof("CUD: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() return h.moveCursorVertical(param) } func (h *WindowsAnsiEventHandler) CUF(param int) error { if err := h.Flush(); err != nil { return err } logger.Infof("CUF: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() return h.moveCursorHorizontal(param) } func (h *WindowsAnsiEventHandler) CUB(param int) error { if err := h.Flush(); err != nil { return err } logger.Infof("CUB: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() return h.moveCursorHorizontal(-param) } func (h *WindowsAnsiEventHandler) CNL(param int) error { if err := h.Flush(); err != nil { return err } logger.Infof("CNL: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() return h.moveCursorLine(param) } func (h *WindowsAnsiEventHandler) CPL(param int) error { if err := h.Flush(); err != nil { return err } logger.Infof("CPL: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() return h.moveCursorLine(-param) } func (h *WindowsAnsiEventHandler) CHA(param int) error { if err := h.Flush(); err != nil { return err } logger.Infof("CHA: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() return h.moveCursorColumn(param) } func (h *WindowsAnsiEventHandler) VPA(param int) error { if err := h.Flush(); err != nil { return err } logger.Infof("VPA: [[%d]]", param) h.clearWrap() info, err := GetConsoleScreenBufferInfo(h.fd) if err != nil { return err } window := h.getCursorWindow(info) position := info.CursorPosition position.Y = window.Top + SHORT(param) - 1 return h.setCursorPosition(position, window) } func (h *WindowsAnsiEventHandler) CUP(row int, col int) error { if err := h.Flush(); err != nil { return err } logger.Infof("CUP: [[%d %d]]", row, col) h.clearWrap() info, err := GetConsoleScreenBufferInfo(h.fd) if err != nil { return err } window := h.getCursorWindow(info) position := COORD{window.Left + SHORT(col) - 1, window.Top + SHORT(row) - 1} return h.setCursorPosition(position, window) } func (h *WindowsAnsiEventHandler) HVP(row int, col int) error { if err := h.Flush(); err != nil { return err } logger.Infof("HVP: [[%d %d]]", row, col) h.clearWrap() return h.CUP(row, col) } func (h *WindowsAnsiEventHandler) DECTCEM(visible bool) error { if err := h.Flush(); err != nil { return err } logger.Infof("DECTCEM: [%v]", []string{strconv.FormatBool(visible)}) h.clearWrap() return nil } func (h *WindowsAnsiEventHandler) DECOM(enable bool) error { if err := h.Flush(); err != nil { return err } logger.Infof("DECOM: [%v]", []string{strconv.FormatBool(enable)}) h.clearWrap() h.originMode = enable return h.CUP(1, 1) } func (h *WindowsAnsiEventHandler) DECCOLM(use132 bool) error { if err := h.Flush(); err != nil { return err } logger.Infof("DECCOLM: [%v]", []string{strconv.FormatBool(use132)}) h.clearWrap() if err := h.ED(2); err != nil { return err } info, err := GetConsoleScreenBufferInfo(h.fd) if err != nil { return err } targetWidth := SHORT(80) if use132 { targetWidth = 132 } if info.Size.X < targetWidth { if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { logger.Info("set buffer failed:", err) return err } } window := info.Window window.Left = 0 window.Right = targetWidth - 1 if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { logger.Info("set window failed:", err) return err } if info.Size.X > targetWidth { if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { logger.Info("set buffer failed:", err) return err } } return SetConsoleCursorPosition(h.fd, COORD{0, 0}) } func (h *WindowsAnsiEventHandler) ED(param int) error { if err := h.Flush(); err != nil { return err } logger.Infof("ED: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() // [J -- Erases from the cursor to the end of the screen, including the cursor position. // [1J -- Erases from the beginning of the screen to the cursor, including the cursor position. // [2J -- Erases the complete display. The cursor does not move. // Notes: // -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles info, err := GetConsoleScreenBufferInfo(h.fd) if err != nil { return err } var start COORD var end COORD switch param { case 0: start = info.CursorPosition end = COORD{info.Size.X - 1, info.Size.Y - 1} case 1: start = COORD{0, 0} end = info.CursorPosition case 2: start = COORD{0, 0} end = COORD{info.Size.X - 1, info.Size.Y - 1} } err = h.clearRange(h.attributes, start, end) if err != nil { return err } // If the whole buffer was cleared, move the window to the top while preserving // the window-relative cursor position. if param == 2 { pos := info.CursorPosition window := info.Window pos.Y -= window.Top window.Bottom -= window.Top window.Top = 0 if err := SetConsoleCursorPosition(h.fd, pos); err != nil { return err } if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { return err } } return nil } func (h *WindowsAnsiEventHandler) EL(param int) error { if err := h.Flush(); err != nil { return err } logger.Infof("EL: [%v]", strconv.Itoa(param)) h.clearWrap() // [K -- Erases from the cursor to the end of the line, including the cursor position. // [1K -- Erases from the beginning of the line to the cursor, including the cursor position. // [2K -- Erases the complete line. info, err := GetConsoleScreenBufferInfo(h.fd) if err != nil { return err } var start COORD var end COORD switch param { case 0: start = info.CursorPosition end = COORD{info.Size.X, info.CursorPosition.Y} case 1: start = COORD{0, info.CursorPosition.Y} end = info.CursorPosition case 2: start = COORD{0, info.CursorPosition.Y} end = COORD{info.Size.X, info.CursorPosition.Y} } err = h.clearRange(h.attributes, start, end) if err != nil { return err } return nil } func (h *WindowsAnsiEventHandler) IL(param int) error { if err := h.Flush(); err != nil { return err } logger.Infof("IL: [%v]", strconv.Itoa(param)) h.clearWrap() return h.insertLines(param) } func (h *WindowsAnsiEventHandler) DL(param int) error { if err := h.Flush(); err != nil { return err } logger.Infof("DL: [%v]", strconv.Itoa(param)) h.clearWrap() return h.deleteLines(param) } func (h *WindowsAnsiEventHandler) ICH(param int) error { if err := h.Flush(); err != nil { return err } logger.Infof("ICH: [%v]", strconv.Itoa(param)) h.clearWrap() return h.insertCharacters(param) } func (h *WindowsAnsiEventHandler) DCH(param int) error { if err := h.Flush(); err != nil { return err } logger.Infof("DCH: [%v]", strconv.Itoa(param)) h.clearWrap() return h.deleteCharacters(param) } func (h *WindowsAnsiEventHandler) SGR(params []int) error { if err := h.Flush(); err != nil { return err } strings := []string{} for _, v := range params { strings = append(strings, strconv.Itoa(v)) } logger.Infof("SGR: [%v]", strings) if len(params) <= 0 { h.attributes = h.infoReset.Attributes h.inverted = false } else { for _, attr := range params { if attr == ANSI_SGR_RESET { h.attributes = h.infoReset.Attributes h.inverted = false continue } h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, SHORT(attr)) } } attributes := h.attributes if h.inverted { attributes = invertAttributes(attributes) } err := SetConsoleTextAttribute(h.fd, attributes) if err != nil { return err } return nil } func (h *WindowsAnsiEventHandler) SU(param int) error { if err := h.Flush(); err != nil { return err } logger.Infof("SU: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() return h.scrollUp(param) } func (h *WindowsAnsiEventHandler) SD(param int) error { if err := h.Flush(); err != nil { return err } logger.Infof("SD: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() return h.scrollDown(param) } func (h *WindowsAnsiEventHandler) DA(params []string) error { logger.Infof("DA: [%v]", params) // DA cannot be implemented because it must send data on the VT100 input stream, // which is not available to go-ansiterm. return nil } func (h *WindowsAnsiEventHandler) DECSTBM(top int, bottom int) error { if err := h.Flush(); err != nil { return err } logger.Infof("DECSTBM: [%d, %d]", top, bottom) // Windows is 0 indexed, Linux is 1 indexed h.sr.top = SHORT(top - 1) h.sr.bottom = SHORT(bottom - 1) // This command also moves the cursor to the origin. h.clearWrap() return h.CUP(1, 1) } func (h *WindowsAnsiEventHandler) RI() error { if err := h.Flush(); err != nil { return err } logger.Info("RI: []") h.clearWrap() info, err := GetConsoleScreenBufferInfo(h.fd) if err != nil { return err } sr := h.effectiveSr(info.Window) if info.CursorPosition.Y == sr.top { return h.scrollDown(1) } else { return h.moveCursorVertical(-1) } } func (h *WindowsAnsiEventHandler) IND() error { logger.Info("IND: []") return h.executeLF() } func (h *WindowsAnsiEventHandler) Flush() error { h.curInfo = nil if h.buffer.Len() > 0 { logger.Infof("Flush: [%s]", h.buffer.Bytes()) if _, err := h.buffer.WriteTo(h.file); err != nil { return err } } if h.wrapNext && !h.drewMarginByte { logger.Infof("Flush: drawing margin byte '%c'", h.marginByte) info, err := GetConsoleScreenBufferInfo(h.fd) if err != nil { return err } charInfo := []CHAR_INFO{{UnicodeChar: WCHAR(h.marginByte), Attributes: info.Attributes}} size := COORD{1, 1} position := COORD{0, 0} region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y} if err := WriteConsoleOutput(h.fd, charInfo, size, position, ®ion); err != nil { return err } h.drewMarginByte = true } return nil } // cacheConsoleInfo ensures that the current console screen information has been queried // since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos. func (h *WindowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) { if h.curInfo == nil { info, err := GetConsoleScreenBufferInfo(h.fd) if err != nil { return COORD{}, nil, err } h.curInfo = info h.curPos = info.CursorPosition } return h.curPos, h.curInfo, nil } func (h *WindowsAnsiEventHandler) updatePos(pos COORD) { if h.curInfo == nil { panic("failed to call getCurrentInfo before calling updatePos") } h.curPos = pos } // clearWrap clears the state where the cursor is in the margin // waiting for the next character before wrapping the line. This must // be done before most operations that act on the cursor. func (h *WindowsAnsiEventHandler) clearWrap() { h.wrapNext = false h.drewMarginByte = false } docker-1.10.3/vendor/src/github.com/BurntSushi/000077500000000000000000000000001267010174400212765ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/BurntSushi/toml/000077500000000000000000000000001267010174400222515ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/BurntSushi/toml/.gitignore000066400000000000000000000000571267010174400242430ustar00rootroot00000000000000TAGS tags .*.swp tomlcheck/tomlcheck toml.test docker-1.10.3/vendor/src/github.com/BurntSushi/toml/.travis.yml000066400000000000000000000002621267010174400243620ustar00rootroot00000000000000language: go go: - 1.1 - 1.2 - tip install: - go install ./... - go get github.com/BurntSushi/toml-test script: - export PATH="$PATH:$HOME/gopath/bin" - make test docker-1.10.3/vendor/src/github.com/BurntSushi/toml/COMPATIBLE000066400000000000000000000001541267010174400236130ustar00rootroot00000000000000Compatible with TOML version [v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md) docker-1.10.3/vendor/src/github.com/BurntSushi/toml/COPYING000066400000000000000000000007441267010174400233110ustar00rootroot00000000000000 DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE Version 2, December 2004 Copyright (C) 2004 Sam Hocevar Everyone is permitted to copy and distribute verbatim or modified copies of this license document, and changing it is allowed as long as the name is changed. DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. You just DO WHAT THE FUCK YOU WANT TO. docker-1.10.3/vendor/src/github.com/BurntSushi/toml/Makefile000066400000000000000000000004411267010174400237100ustar00rootroot00000000000000install: go install ./... test: install go test -v toml-test toml-test-decoder toml-test -encoder toml-test-encoder fmt: gofmt -w *.go */*.go colcheck *.go */*.go tags: find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS push: git push origin master git push github master docker-1.10.3/vendor/src/github.com/BurntSushi/toml/README.md000066400000000000000000000101611267010174400235270ustar00rootroot00000000000000## TOML parser and encoder for Go with reflection TOML stands for Tom's Obvious, Minimal Language. This Go package provides a reflection interface similar to Go's standard library `json` and `xml` packages. This package also supports the `encoding.TextUnmarshaler` and `encoding.TextMarshaler` interfaces so that you can define custom data representations. (There is an example of this below.) Spec: https://github.com/mojombo/toml Compatible with TOML version [v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md) Documentation: http://godoc.org/github.com/BurntSushi/toml Installation: ```bash go get github.com/BurntSushi/toml ``` Try the toml validator: ```bash go get github.com/BurntSushi/toml/cmd/tomlv tomlv some-toml-file.toml ``` [![Build status](https://api.travis-ci.org/BurntSushi/toml.png)](https://travis-ci.org/BurntSushi/toml) ### Testing This package passes all tests in [toml-test](https://github.com/BurntSushi/toml-test) for both the decoder and the encoder. ### Examples This package works similarly to how the Go standard library handles `XML` and `JSON`. Namely, data is loaded into Go values via reflection. For the simplest example, consider some TOML file as just a list of keys and values: ```toml Age = 25 Cats = [ "Cauchy", "Plato" ] Pi = 3.14 Perfection = [ 6, 28, 496, 8128 ] DOB = 1987-07-05T05:45:00Z ``` Which could be defined in Go as: ```go type Config struct { Age int Cats []string Pi float64 Perfection []int DOB time.Time // requires `import time` } ``` And then decoded with: ```go var conf Config if _, err := toml.Decode(tomlData, &conf); err != nil { // handle error } ``` You can also use struct tags if your struct field name doesn't map to a TOML key value directly: ```toml some_key_NAME = "wat" ``` ```go type TOML struct { ObscureKey string `toml:"some_key_NAME"` } ``` ### Using the `encoding.TextUnmarshaler` interface Here's an example that automatically parses duration strings into `time.Duration` values: ```toml [[song]] name = "Thunder Road" duration = "4m49s" [[song]] name = "Stairway to Heaven" duration = "8m03s" ``` Which can be decoded with: ```go type song struct { Name string Duration duration } type songs struct { Song []song } var favorites songs if _, err := toml.Decode(blob, &favorites); err != nil { log.Fatal(err) } for _, s := range favorites.Song { fmt.Printf("%s (%s)\n", s.Name, s.Duration) } ``` And you'll also need a `duration` type that satisfies the `encoding.TextUnmarshaler` interface: ```go type duration struct { time.Duration } func (d *duration) UnmarshalText(text []byte) error { var err error d.Duration, err = time.ParseDuration(string(text)) return err } ``` ### More complex usage Here's an example of how to load the example from the official spec page: ```toml # This is a TOML document. Boom. title = "TOML Example" [owner] name = "Tom Preston-Werner" organization = "GitHub" bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." dob = 1979-05-27T07:32:00Z # First class dates? Why not? [database] server = "192.168.1.1" ports = [ 8001, 8001, 8002 ] connection_max = 5000 enabled = true [servers] # You can indent as you please. Tabs or spaces. TOML don't care. [servers.alpha] ip = "10.0.0.1" dc = "eqdc10" [servers.beta] ip = "10.0.0.2" dc = "eqdc10" [clients] data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it # Line breaks are OK when inside arrays hosts = [ "alpha", "omega" ] ``` And the corresponding Go types are: ```go type tomlConfig struct { Title string Owner ownerInfo DB database `toml:"database"` Servers map[string]server Clients clients } type ownerInfo struct { Name string Org string `toml:"organization"` Bio string DOB time.Time } type database struct { Server string Ports []int ConnMax int `toml:"connection_max"` Enabled bool } type server struct { IP string DC string } type clients struct { Data [][]interface{} Hosts []string } ``` Note that a case insensitive match will be tried if an exact match can't be found. A working example of the above can be found in `_examples/example.{go,toml}`. docker-1.10.3/vendor/src/github.com/BurntSushi/toml/cmd/000077500000000000000000000000001267010174400230145ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/000077500000000000000000000000001267010174400263475ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING000066400000000000000000000007441267010174400274070ustar00rootroot00000000000000 DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE Version 2, December 2004 Copyright (C) 2004 Sam Hocevar Everyone is permitted to copy and distribute verbatim or modified copies of this license document, and changing it is allowed as long as the name is changed. DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. You just DO WHAT THE FUCK YOU WANT TO. docker-1.10.3/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/000077500000000000000000000000001267010174400263615ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING000066400000000000000000000007441267010174400274210ustar00rootroot00000000000000 DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE Version 2, December 2004 Copyright (C) 2004 Sam Hocevar Everyone is permitted to copy and distribute verbatim or modified copies of this license document, and changing it is allowed as long as the name is changed. DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. You just DO WHAT THE FUCK YOU WANT TO. docker-1.10.3/vendor/src/github.com/BurntSushi/toml/cmd/tomlv/000077500000000000000000000000001267010174400241555ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING000066400000000000000000000007441267010174400252150ustar00rootroot00000000000000 DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE Version 2, December 2004 Copyright (C) 2004 Sam Hocevar Everyone is permitted to copy and distribute verbatim or modified copies of this license document, and changing it is allowed as long as the name is changed. DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. You just DO WHAT THE FUCK YOU WANT TO. docker-1.10.3/vendor/src/github.com/BurntSushi/toml/decode.go000066400000000000000000000335621267010174400240340ustar00rootroot00000000000000package toml import ( "fmt" "io" "io/ioutil" "math" "reflect" "strings" "time" ) var e = fmt.Errorf // Unmarshaler is the interface implemented by objects that can unmarshal a // TOML description of themselves. type Unmarshaler interface { UnmarshalTOML(interface{}) error } // Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. func Unmarshal(p []byte, v interface{}) error { _, err := Decode(string(p), v) return err } // Primitive is a TOML value that hasn't been decoded into a Go value. // When using the various `Decode*` functions, the type `Primitive` may // be given to any value, and its decoding will be delayed. // // A `Primitive` value can be decoded using the `PrimitiveDecode` function. // // The underlying representation of a `Primitive` value is subject to change. // Do not rely on it. // // N.B. Primitive values are still parsed, so using them will only avoid // the overhead of reflection. They can be useful when you don't know the // exact type of TOML data until run time. type Primitive struct { undecoded interface{} context Key } // DEPRECATED! // // Use MetaData.PrimitiveDecode instead. func PrimitiveDecode(primValue Primitive, v interface{}) error { md := MetaData{decoded: make(map[string]bool)} return md.unify(primValue.undecoded, rvalue(v)) } // PrimitiveDecode is just like the other `Decode*` functions, except it // decodes a TOML value that has already been parsed. Valid primitive values // can *only* be obtained from values filled by the decoder functions, // including this method. (i.e., `v` may contain more `Primitive` // values.) // // Meta data for primitive values is included in the meta data returned by // the `Decode*` functions with one exception: keys returned by the Undecoded // method will only reflect keys that were decoded. Namely, any keys hidden // behind a Primitive will be considered undecoded. Executing this method will // update the undecoded keys in the meta data. (See the example.) func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { md.context = primValue.context defer func() { md.context = nil }() return md.unify(primValue.undecoded, rvalue(v)) } // Decode will decode the contents of `data` in TOML format into a pointer // `v`. // // TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be // used interchangeably.) // // TOML arrays of tables correspond to either a slice of structs or a slice // of maps. // // TOML datetimes correspond to Go `time.Time` values. // // All other TOML types (float, string, int, bool and array) correspond // to the obvious Go types. // // An exception to the above rules is if a type implements the // encoding.TextUnmarshaler interface. In this case, any primitive TOML value // (floats, strings, integers, booleans and datetimes) will be converted to // a byte string and given to the value's UnmarshalText method. See the // Unmarshaler example for a demonstration with time duration strings. // // Key mapping // // TOML keys can map to either keys in a Go map or field names in a Go // struct. The special `toml` struct tag may be used to map TOML keys to // struct fields that don't match the key name exactly. (See the example.) // A case insensitive match to struct names will be tried if an exact match // can't be found. // // The mapping between TOML values and Go values is loose. That is, there // may exist TOML values that cannot be placed into your representation, and // there may be parts of your representation that do not correspond to // TOML values. This loose mapping can be made stricter by using the IsDefined // and/or Undecoded methods on the MetaData returned. // // This decoder will not handle cyclic types. If a cyclic type is passed, // `Decode` will not terminate. func Decode(data string, v interface{}) (MetaData, error) { p, err := parse(data) if err != nil { return MetaData{}, err } md := MetaData{ p.mapping, p.types, p.ordered, make(map[string]bool, len(p.ordered)), nil, } return md, md.unify(p.mapping, rvalue(v)) } // DecodeFile is just like Decode, except it will automatically read the // contents of the file at `fpath` and decode it for you. func DecodeFile(fpath string, v interface{}) (MetaData, error) { bs, err := ioutil.ReadFile(fpath) if err != nil { return MetaData{}, err } return Decode(string(bs), v) } // DecodeReader is just like Decode, except it will consume all bytes // from the reader and decode it for you. func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { bs, err := ioutil.ReadAll(r) if err != nil { return MetaData{}, err } return Decode(string(bs), v) } // unify performs a sort of type unification based on the structure of `rv`, // which is the client representation. // // Any type mismatch produces an error. Finding a type that we don't know // how to handle produces an unsupported type error. func (md *MetaData) unify(data interface{}, rv reflect.Value) error { // Special case. Look for a `Primitive` value. if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { // Save the undecoded data and the key context into the primitive // value. context := make(Key, len(md.context)) copy(context, md.context) rv.Set(reflect.ValueOf(Primitive{ undecoded: data, context: context, })) return nil } // Special case. Unmarshaler Interface support. if rv.CanAddr() { if v, ok := rv.Addr().Interface().(Unmarshaler); ok { return v.UnmarshalTOML(data) } } // Special case. Handle time.Time values specifically. // TODO: Remove this code when we decide to drop support for Go 1.1. // This isn't necessary in Go 1.2 because time.Time satisfies the encoding // interfaces. if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) { return md.unifyDatetime(data, rv) } // Special case. Look for a value satisfying the TextUnmarshaler interface. if v, ok := rv.Interface().(TextUnmarshaler); ok { return md.unifyText(data, v) } // BUG(burntsushi) // The behavior here is incorrect whenever a Go type satisfies the // encoding.TextUnmarshaler interface but also corresponds to a TOML // hash or array. In particular, the unmarshaler should only be applied // to primitive TOML values. But at this point, it will be applied to // all kinds of values and produce an incorrect error whenever those values // are hashes or arrays (including arrays of tables). k := rv.Kind() // laziness if k >= reflect.Int && k <= reflect.Uint64 { return md.unifyInt(data, rv) } switch k { case reflect.Ptr: elem := reflect.New(rv.Type().Elem()) err := md.unify(data, reflect.Indirect(elem)) if err != nil { return err } rv.Set(elem) return nil case reflect.Struct: return md.unifyStruct(data, rv) case reflect.Map: return md.unifyMap(data, rv) case reflect.Array: return md.unifyArray(data, rv) case reflect.Slice: return md.unifySlice(data, rv) case reflect.String: return md.unifyString(data, rv) case reflect.Bool: return md.unifyBool(data, rv) case reflect.Interface: // we only support empty interfaces. if rv.NumMethod() > 0 { return e("Unsupported type '%s'.", rv.Kind()) } return md.unifyAnything(data, rv) case reflect.Float32: fallthrough case reflect.Float64: return md.unifyFloat64(data, rv) } return e("Unsupported type '%s'.", rv.Kind()) } func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { tmap, ok := mapping.(map[string]interface{}) if !ok { return mismatch(rv, "map", mapping) } for key, datum := range tmap { var f *field fields := cachedTypeFields(rv.Type()) for i := range fields { ff := &fields[i] if ff.name == key { f = ff break } if f == nil && strings.EqualFold(ff.name, key) { f = ff } } if f != nil { subv := rv for _, i := range f.index { subv = indirect(subv.Field(i)) } if isUnifiable(subv) { md.decoded[md.context.add(key).String()] = true md.context = append(md.context, key) if err := md.unify(datum, subv); err != nil { return e("Type mismatch for '%s.%s': %s", rv.Type().String(), f.name, err) } md.context = md.context[0 : len(md.context)-1] } else if f.name != "" { // Bad user! No soup for you! return e("Field '%s.%s' is unexported, and therefore cannot "+ "be loaded with reflection.", rv.Type().String(), f.name) } } } return nil } func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { tmap, ok := mapping.(map[string]interface{}) if !ok { return badtype("map", mapping) } if rv.IsNil() { rv.Set(reflect.MakeMap(rv.Type())) } for k, v := range tmap { md.decoded[md.context.add(k).String()] = true md.context = append(md.context, k) rvkey := indirect(reflect.New(rv.Type().Key())) rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) if err := md.unify(v, rvval); err != nil { return err } md.context = md.context[0 : len(md.context)-1] rvkey.SetString(k) rv.SetMapIndex(rvkey, rvval) } return nil } func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { datav := reflect.ValueOf(data) if datav.Kind() != reflect.Slice { return badtype("slice", data) } sliceLen := datav.Len() if sliceLen != rv.Len() { return e("expected array length %d; got TOML array of length %d", rv.Len(), sliceLen) } return md.unifySliceArray(datav, rv) } func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { datav := reflect.ValueOf(data) if datav.Kind() != reflect.Slice { return badtype("slice", data) } sliceLen := datav.Len() if rv.IsNil() { rv.Set(reflect.MakeSlice(rv.Type(), sliceLen, sliceLen)) } return md.unifySliceArray(datav, rv) } func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { sliceLen := data.Len() for i := 0; i < sliceLen; i++ { v := data.Index(i).Interface() sliceval := indirect(rv.Index(i)) if err := md.unify(v, sliceval); err != nil { return err } } return nil } func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error { if _, ok := data.(time.Time); ok { rv.Set(reflect.ValueOf(data)) return nil } return badtype("time.Time", data) } func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { if s, ok := data.(string); ok { rv.SetString(s) return nil } return badtype("string", data) } func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { if num, ok := data.(float64); ok { switch rv.Kind() { case reflect.Float32: fallthrough case reflect.Float64: rv.SetFloat(num) default: panic("bug") } return nil } return badtype("float", data) } func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { if num, ok := data.(int64); ok { if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { switch rv.Kind() { case reflect.Int, reflect.Int64: // No bounds checking necessary. case reflect.Int8: if num < math.MinInt8 || num > math.MaxInt8 { return e("Value '%d' is out of range for int8.", num) } case reflect.Int16: if num < math.MinInt16 || num > math.MaxInt16 { return e("Value '%d' is out of range for int16.", num) } case reflect.Int32: if num < math.MinInt32 || num > math.MaxInt32 { return e("Value '%d' is out of range for int32.", num) } } rv.SetInt(num) } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { unum := uint64(num) switch rv.Kind() { case reflect.Uint, reflect.Uint64: // No bounds checking necessary. case reflect.Uint8: if num < 0 || unum > math.MaxUint8 { return e("Value '%d' is out of range for uint8.", num) } case reflect.Uint16: if num < 0 || unum > math.MaxUint16 { return e("Value '%d' is out of range for uint16.", num) } case reflect.Uint32: if num < 0 || unum > math.MaxUint32 { return e("Value '%d' is out of range for uint32.", num) } } rv.SetUint(unum) } else { panic("unreachable") } return nil } return badtype("integer", data) } func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { if b, ok := data.(bool); ok { rv.SetBool(b) return nil } return badtype("boolean", data) } func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { rv.Set(reflect.ValueOf(data)) return nil } func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { var s string switch sdata := data.(type) { case TextMarshaler: text, err := sdata.MarshalText() if err != nil { return err } s = string(text) case fmt.Stringer: s = sdata.String() case string: s = sdata case bool: s = fmt.Sprintf("%v", sdata) case int64: s = fmt.Sprintf("%d", sdata) case float64: s = fmt.Sprintf("%f", sdata) default: return badtype("primitive (string-like)", data) } if err := v.UnmarshalText([]byte(s)); err != nil { return err } return nil } // rvalue returns a reflect.Value of `v`. All pointers are resolved. func rvalue(v interface{}) reflect.Value { return indirect(reflect.ValueOf(v)) } // indirect returns the value pointed to by a pointer. // Pointers are followed until the value is not a pointer. // New values are allocated for each nil pointer. // // An exception to this rule is if the value satisfies an interface of // interest to us (like encoding.TextUnmarshaler). func indirect(v reflect.Value) reflect.Value { if v.Kind() != reflect.Ptr { if v.CanAddr() { pv := v.Addr() if _, ok := pv.Interface().(TextUnmarshaler); ok { return pv } } return v } if v.IsNil() { v.Set(reflect.New(v.Type().Elem())) } return indirect(reflect.Indirect(v)) } func isUnifiable(rv reflect.Value) bool { if rv.CanSet() { return true } if _, ok := rv.Interface().(TextUnmarshaler); ok { return true } return false } func badtype(expected string, data interface{}) error { return e("Expected %s but found '%T'.", expected, data) } func mismatch(user reflect.Value, expected string, data interface{}) error { return e("Type mismatch for %s. Expected %s but found '%T'.", user.Type().String(), expected, data) } docker-1.10.3/vendor/src/github.com/BurntSushi/toml/decode_meta.go000066400000000000000000000062151267010174400250350ustar00rootroot00000000000000package toml import "strings" // MetaData allows access to meta information about TOML data that may not // be inferrable via reflection. In particular, whether a key has been defined // and the TOML type of a key. type MetaData struct { mapping map[string]interface{} types map[string]tomlType keys []Key decoded map[string]bool context Key // Used only during decoding. } // IsDefined returns true if the key given exists in the TOML data. The key // should be specified hierarchially. e.g., // // // access the TOML key 'a.b.c' // IsDefined("a", "b", "c") // // IsDefined will return false if an empty key given. Keys are case sensitive. func (md *MetaData) IsDefined(key ...string) bool { if len(key) == 0 { return false } var hash map[string]interface{} var ok bool var hashOrVal interface{} = md.mapping for _, k := range key { if hash, ok = hashOrVal.(map[string]interface{}); !ok { return false } if hashOrVal, ok = hash[k]; !ok { return false } } return true } // Type returns a string representation of the type of the key specified. // // Type will return the empty string if given an empty key or a key that // does not exist. Keys are case sensitive. func (md *MetaData) Type(key ...string) string { fullkey := strings.Join(key, ".") if typ, ok := md.types[fullkey]; ok { return typ.typeString() } return "" } // Key is the type of any TOML key, including key groups. Use (MetaData).Keys // to get values of this type. type Key []string func (k Key) String() string { return strings.Join(k, ".") } func (k Key) maybeQuotedAll() string { var ss []string for i := range k { ss = append(ss, k.maybeQuoted(i)) } return strings.Join(ss, ".") } func (k Key) maybeQuoted(i int) string { quote := false for _, c := range k[i] { if !isBareKeyChar(c) { quote = true break } } if quote { return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" } else { return k[i] } } func (k Key) add(piece string) Key { newKey := make(Key, len(k)+1) copy(newKey, k) newKey[len(k)] = piece return newKey } // Keys returns a slice of every key in the TOML data, including key groups. // Each key is itself a slice, where the first element is the top of the // hierarchy and the last is the most specific. // // The list will have the same order as the keys appeared in the TOML data. // // All keys returned are non-empty. func (md *MetaData) Keys() []Key { return md.keys } // Undecoded returns all keys that have not been decoded in the order in which // they appear in the original TOML document. // // This includes keys that haven't been decoded because of a Primitive value. // Once the Primitive value is decoded, the keys will be considered decoded. // // Also note that decoding into an empty interface will result in no decoding, // and so no keys will be considered decoded. // // In this sense, the Undecoded keys correspond to keys in the TOML document // that do not have a concrete type in your representation. func (md *MetaData) Undecoded() []Key { undecoded := make([]Key, 0, len(md.keys)) for _, key := range md.keys { if !md.decoded[key.String()] { undecoded = append(undecoded, key) } } return undecoded } docker-1.10.3/vendor/src/github.com/BurntSushi/toml/doc.go000066400000000000000000000021161267010174400233450ustar00rootroot00000000000000/* Package toml provides facilities for decoding and encoding TOML configuration files via reflection. There is also support for delaying decoding with the Primitive type, and querying the set of keys in a TOML document with the MetaData type. The specification implemented: https://github.com/mojombo/toml The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify whether a file is a valid TOML document. It can also be used to print the type of each key in a TOML document. Testing There are two important types of tests used for this package. The first is contained inside '*_test.go' files and uses the standard Go unit testing framework. These tests are primarily devoted to holistically testing the decoder and encoder. The second type of testing is used to verify the implementation's adherence to the TOML specification. These tests have been factored into their own project: https://github.com/BurntSushi/toml-test The reason the tests are in a separate project is so that they can be used by any implementation of TOML. Namely, it is language agnostic. */ package toml docker-1.10.3/vendor/src/github.com/BurntSushi/toml/encode.go000066400000000000000000000320601267010174400240360ustar00rootroot00000000000000package toml import ( "bufio" "errors" "fmt" "io" "reflect" "sort" "strconv" "strings" "time" ) type tomlEncodeError struct{ error } var ( errArrayMixedElementTypes = errors.New( "can't encode array with mixed element types") errArrayNilElement = errors.New( "can't encode array with nil element") errNonString = errors.New( "can't encode a map with non-string key type") errAnonNonStruct = errors.New( "can't encode an anonymous field that is not a struct") errArrayNoTable = errors.New( "TOML array element can't contain a table") errNoKey = errors.New( "top-level values must be a Go map or struct") errAnything = errors.New("") // used in testing ) var quotedReplacer = strings.NewReplacer( "\t", "\\t", "\n", "\\n", "\r", "\\r", "\"", "\\\"", "\\", "\\\\", ) // Encoder controls the encoding of Go values to a TOML document to some // io.Writer. // // The indentation level can be controlled with the Indent field. type Encoder struct { // A single indentation level. By default it is two spaces. Indent string // hasWritten is whether we have written any output to w yet. hasWritten bool w *bufio.Writer } // NewEncoder returns a TOML encoder that encodes Go values to the io.Writer // given. By default, a single indentation level is 2 spaces. func NewEncoder(w io.Writer) *Encoder { return &Encoder{ w: bufio.NewWriter(w), Indent: " ", } } // Encode writes a TOML representation of the Go value to the underlying // io.Writer. If the value given cannot be encoded to a valid TOML document, // then an error is returned. // // The mapping between Go values and TOML values should be precisely the same // as for the Decode* functions. Similarly, the TextMarshaler interface is // supported by encoding the resulting bytes as strings. (If you want to write // arbitrary binary data then you will need to use something like base64 since // TOML does not have any binary types.) // // When encoding TOML hashes (i.e., Go maps or structs), keys without any // sub-hashes are encoded first. // // If a Go map is encoded, then its keys are sorted alphabetically for // deterministic output. More control over this behavior may be provided if // there is demand for it. // // Encoding Go values without a corresponding TOML representation---like map // types with non-string keys---will cause an error to be returned. Similarly // for mixed arrays/slices, arrays/slices with nil elements, embedded // non-struct types and nested slices containing maps or structs. // (e.g., [][]map[string]string is not allowed but []map[string]string is OK // and so is []map[string][]string.) func (enc *Encoder) Encode(v interface{}) error { rv := eindirect(reflect.ValueOf(v)) if err := enc.safeEncode(Key([]string{}), rv); err != nil { return err } return enc.w.Flush() } func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { defer func() { if r := recover(); r != nil { if terr, ok := r.(tomlEncodeError); ok { err = terr.error return } panic(r) } }() enc.encode(key, rv) return nil } func (enc *Encoder) encode(key Key, rv reflect.Value) { // Special case. Time needs to be in ISO8601 format. // Special case. If we can marshal the type to text, then we used that. // Basically, this prevents the encoder for handling these types as // generic structs (or whatever the underlying type of a TextMarshaler is). switch rv.Interface().(type) { case time.Time, TextMarshaler: enc.keyEqElement(key, rv) return } k := rv.Kind() switch k { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: enc.keyEqElement(key, rv) case reflect.Array, reflect.Slice: if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { enc.eArrayOfTables(key, rv) } else { enc.keyEqElement(key, rv) } case reflect.Interface: if rv.IsNil() { return } enc.encode(key, rv.Elem()) case reflect.Map: if rv.IsNil() { return } enc.eTable(key, rv) case reflect.Ptr: if rv.IsNil() { return } enc.encode(key, rv.Elem()) case reflect.Struct: enc.eTable(key, rv) default: panic(e("Unsupported type for key '%s': %s", key, k)) } } // eElement encodes any value that can be an array element (primitives and // arrays). func (enc *Encoder) eElement(rv reflect.Value) { switch v := rv.Interface().(type) { case time.Time: // Special case time.Time as a primitive. Has to come before // TextMarshaler below because time.Time implements // encoding.TextMarshaler, but we need to always use UTC. enc.wf(v.In(time.FixedZone("UTC", 0)).Format("2006-01-02T15:04:05Z")) return case TextMarshaler: // Special case. Use text marshaler if it's available for this value. if s, err := v.MarshalText(); err != nil { encPanic(err) } else { enc.writeQuoted(string(s)) } return } switch rv.Kind() { case reflect.Bool: enc.wf(strconv.FormatBool(rv.Bool())) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: enc.wf(strconv.FormatInt(rv.Int(), 10)) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: enc.wf(strconv.FormatUint(rv.Uint(), 10)) case reflect.Float32: enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32))) case reflect.Float64: enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64))) case reflect.Array, reflect.Slice: enc.eArrayOrSliceElement(rv) case reflect.Interface: enc.eElement(rv.Elem()) case reflect.String: enc.writeQuoted(rv.String()) default: panic(e("Unexpected primitive type: %s", rv.Kind())) } } // By the TOML spec, all floats must have a decimal with at least one // number on either side. func floatAddDecimal(fstr string) string { if !strings.Contains(fstr, ".") { return fstr + ".0" } return fstr } func (enc *Encoder) writeQuoted(s string) { enc.wf("\"%s\"", quotedReplacer.Replace(s)) } func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { length := rv.Len() enc.wf("[") for i := 0; i < length; i++ { elem := rv.Index(i) enc.eElement(elem) if i != length-1 { enc.wf(", ") } } enc.wf("]") } func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { if len(key) == 0 { encPanic(errNoKey) } for i := 0; i < rv.Len(); i++ { trv := rv.Index(i) if isNil(trv) { continue } panicIfInvalidKey(key) enc.newline() enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) enc.newline() enc.eMapOrStruct(key, trv) } } func (enc *Encoder) eTable(key Key, rv reflect.Value) { panicIfInvalidKey(key) if len(key) == 1 { // Output an extra new line between top-level tables. // (The newline isn't written if nothing else has been written though.) enc.newline() } if len(key) > 0 { enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) enc.newline() } enc.eMapOrStruct(key, rv) } func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) { switch rv := eindirect(rv); rv.Kind() { case reflect.Map: enc.eMap(key, rv) case reflect.Struct: enc.eStruct(key, rv) default: panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) } } func (enc *Encoder) eMap(key Key, rv reflect.Value) { rt := rv.Type() if rt.Key().Kind() != reflect.String { encPanic(errNonString) } // Sort keys so that we have deterministic output. And write keys directly // underneath this key first, before writing sub-structs or sub-maps. var mapKeysDirect, mapKeysSub []string for _, mapKey := range rv.MapKeys() { k := mapKey.String() if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { mapKeysSub = append(mapKeysSub, k) } else { mapKeysDirect = append(mapKeysDirect, k) } } var writeMapKeys = func(mapKeys []string) { sort.Strings(mapKeys) for _, mapKey := range mapKeys { mrv := rv.MapIndex(reflect.ValueOf(mapKey)) if isNil(mrv) { // Don't write anything for nil fields. continue } enc.encode(key.add(mapKey), mrv) } } writeMapKeys(mapKeysDirect) writeMapKeys(mapKeysSub) } func (enc *Encoder) eStruct(key Key, rv reflect.Value) { // Write keys for fields directly under this key first, because if we write // a field that creates a new table, then all keys under it will be in that // table (not the one we're writing here). rt := rv.Type() var fieldsDirect, fieldsSub [][]int var addFields func(rt reflect.Type, rv reflect.Value, start []int) addFields = func(rt reflect.Type, rv reflect.Value, start []int) { for i := 0; i < rt.NumField(); i++ { f := rt.Field(i) // skip unexporded fields if f.PkgPath != "" { continue } frv := rv.Field(i) if f.Anonymous { frv := eindirect(frv) t := frv.Type() if t.Kind() != reflect.Struct { encPanic(errAnonNonStruct) } addFields(t, frv, f.Index) } else if typeIsHash(tomlTypeOfGo(frv)) { fieldsSub = append(fieldsSub, append(start, f.Index...)) } else { fieldsDirect = append(fieldsDirect, append(start, f.Index...)) } } } addFields(rt, rv, nil) var writeFields = func(fields [][]int) { for _, fieldIndex := range fields { sft := rt.FieldByIndex(fieldIndex) sf := rv.FieldByIndex(fieldIndex) if isNil(sf) { // Don't write anything for nil fields. continue } keyName := sft.Tag.Get("toml") if keyName == "-" { continue } if keyName == "" { keyName = sft.Name } enc.encode(key.add(keyName), sf) } } writeFields(fieldsDirect) writeFields(fieldsSub) } // tomlTypeName returns the TOML type name of the Go value's type. It is // used to determine whether the types of array elements are mixed (which is // forbidden). If the Go value is nil, then it is illegal for it to be an array // element, and valueIsNil is returned as true. // Returns the TOML type of a Go value. The type may be `nil`, which means // no concrete TOML type could be found. func tomlTypeOfGo(rv reflect.Value) tomlType { if isNil(rv) || !rv.IsValid() { return nil } switch rv.Kind() { case reflect.Bool: return tomlBool case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: return tomlInteger case reflect.Float32, reflect.Float64: return tomlFloat case reflect.Array, reflect.Slice: if typeEqual(tomlHash, tomlArrayType(rv)) { return tomlArrayHash } else { return tomlArray } case reflect.Ptr, reflect.Interface: return tomlTypeOfGo(rv.Elem()) case reflect.String: return tomlString case reflect.Map: return tomlHash case reflect.Struct: switch rv.Interface().(type) { case time.Time: return tomlDatetime case TextMarshaler: return tomlString default: return tomlHash } default: panic("unexpected reflect.Kind: " + rv.Kind().String()) } } // tomlArrayType returns the element type of a TOML array. The type returned // may be nil if it cannot be determined (e.g., a nil slice or a zero length // slize). This function may also panic if it finds a type that cannot be // expressed in TOML (such as nil elements, heterogeneous arrays or directly // nested arrays of tables). func tomlArrayType(rv reflect.Value) tomlType { if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { return nil } firstType := tomlTypeOfGo(rv.Index(0)) if firstType == nil { encPanic(errArrayNilElement) } rvlen := rv.Len() for i := 1; i < rvlen; i++ { elem := rv.Index(i) switch elemType := tomlTypeOfGo(elem); { case elemType == nil: encPanic(errArrayNilElement) case !typeEqual(firstType, elemType): encPanic(errArrayMixedElementTypes) } } // If we have a nested array, then we must make sure that the nested // array contains ONLY primitives. // This checks arbitrarily nested arrays. if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) { nest := tomlArrayType(eindirect(rv.Index(0))) if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) { encPanic(errArrayNoTable) } } return firstType } func (enc *Encoder) newline() { if enc.hasWritten { enc.wf("\n") } } func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { if len(key) == 0 { encPanic(errNoKey) } panicIfInvalidKey(key) enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) enc.eElement(val) enc.newline() } func (enc *Encoder) wf(format string, v ...interface{}) { if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { encPanic(err) } enc.hasWritten = true } func (enc *Encoder) indentStr(key Key) string { return strings.Repeat(enc.Indent, len(key)-1) } func encPanic(err error) { panic(tomlEncodeError{err}) } func eindirect(v reflect.Value) reflect.Value { switch v.Kind() { case reflect.Ptr, reflect.Interface: return eindirect(v.Elem()) default: return v } } func isNil(rv reflect.Value) bool { switch rv.Kind() { case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: return rv.IsNil() default: return false } } func panicIfInvalidKey(key Key) { for _, k := range key { if len(k) == 0 { encPanic(e("Key '%s' is not a valid table name. Key names "+ "cannot be empty.", key.maybeQuotedAll())) } } } func isValidKeyName(s string) bool { return len(s) != 0 } docker-1.10.3/vendor/src/github.com/BurntSushi/toml/encoding_types.go000066400000000000000000000010351267010174400256110ustar00rootroot00000000000000// +build go1.2 package toml // In order to support Go 1.1, we define our own TextMarshaler and // TextUnmarshaler types. For Go 1.2+, we just alias them with the // standard library interfaces. import ( "encoding" ) // TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here // so that Go 1.1 can be supported. type TextMarshaler encoding.TextMarshaler // TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined // here so that Go 1.1 can be supported. type TextUnmarshaler encoding.TextUnmarshaler docker-1.10.3/vendor/src/github.com/BurntSushi/toml/encoding_types_1.1.go000066400000000000000000000007731267010174400262000ustar00rootroot00000000000000// +build !go1.2 package toml // These interfaces were introduced in Go 1.2, so we add them manually when // compiling for Go 1.1. // TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here // so that Go 1.1 can be supported. type TextMarshaler interface { MarshalText() (text []byte, err error) } // TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined // here so that Go 1.1 can be supported. type TextUnmarshaler interface { UnmarshalText(text []byte) error } docker-1.10.3/vendor/src/github.com/BurntSushi/toml/lex.go000066400000000000000000000476121267010174400234020ustar00rootroot00000000000000package toml import ( "fmt" "strings" "unicode/utf8" ) type itemType int const ( itemError itemType = iota itemNIL // used in the parser to indicate no type itemEOF itemText itemString itemRawString itemMultilineString itemRawMultilineString itemBool itemInteger itemFloat itemDatetime itemArray // the start of an array itemArrayEnd itemTableStart itemTableEnd itemArrayTableStart itemArrayTableEnd itemKeyStart itemCommentStart ) const ( eof = 0 tableStart = '[' tableEnd = ']' arrayTableStart = '[' arrayTableEnd = ']' tableSep = '.' keySep = '=' arrayStart = '[' arrayEnd = ']' arrayValTerm = ',' commentStart = '#' stringStart = '"' stringEnd = '"' rawStringStart = '\'' rawStringEnd = '\'' ) type stateFn func(lx *lexer) stateFn type lexer struct { input string start int pos int width int line int state stateFn items chan item // A stack of state functions used to maintain context. // The idea is to reuse parts of the state machine in various places. // For example, values can appear at the top level or within arbitrarily // nested arrays. The last state on the stack is used after a value has // been lexed. Similarly for comments. stack []stateFn } type item struct { typ itemType val string line int } func (lx *lexer) nextItem() item { for { select { case item := <-lx.items: return item default: lx.state = lx.state(lx) } } } func lex(input string) *lexer { lx := &lexer{ input: input + "\n", state: lexTop, line: 1, items: make(chan item, 10), stack: make([]stateFn, 0, 10), } return lx } func (lx *lexer) push(state stateFn) { lx.stack = append(lx.stack, state) } func (lx *lexer) pop() stateFn { if len(lx.stack) == 0 { return lx.errorf("BUG in lexer: no states to pop.") } last := lx.stack[len(lx.stack)-1] lx.stack = lx.stack[0 : len(lx.stack)-1] return last } func (lx *lexer) current() string { return lx.input[lx.start:lx.pos] } func (lx *lexer) emit(typ itemType) { lx.items <- item{typ, lx.current(), lx.line} lx.start = lx.pos } func (lx *lexer) emitTrim(typ itemType) { lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line} lx.start = lx.pos } func (lx *lexer) next() (r rune) { if lx.pos >= len(lx.input) { lx.width = 0 return eof } if lx.input[lx.pos] == '\n' { lx.line++ } r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:]) lx.pos += lx.width return r } // ignore skips over the pending input before this point. func (lx *lexer) ignore() { lx.start = lx.pos } // backup steps back one rune. Can be called only once per call of next. func (lx *lexer) backup() { lx.pos -= lx.width if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { lx.line-- } } // accept consumes the next rune if it's equal to `valid`. func (lx *lexer) accept(valid rune) bool { if lx.next() == valid { return true } lx.backup() return false } // peek returns but does not consume the next rune in the input. func (lx *lexer) peek() rune { r := lx.next() lx.backup() return r } // errorf stops all lexing by emitting an error and returning `nil`. // Note that any value that is a character is escaped if it's a special // character (new lines, tabs, etc.). func (lx *lexer) errorf(format string, values ...interface{}) stateFn { lx.items <- item{ itemError, fmt.Sprintf(format, values...), lx.line, } return nil } // lexTop consumes elements at the top level of TOML data. func lexTop(lx *lexer) stateFn { r := lx.next() if isWhitespace(r) || isNL(r) { return lexSkip(lx, lexTop) } switch r { case commentStart: lx.push(lexTop) return lexCommentStart case tableStart: return lexTableStart case eof: if lx.pos > lx.start { return lx.errorf("Unexpected EOF.") } lx.emit(itemEOF) return nil } // At this point, the only valid item can be a key, so we back up // and let the key lexer do the rest. lx.backup() lx.push(lexTopEnd) return lexKeyStart } // lexTopEnd is entered whenever a top-level item has been consumed. (A value // or a table.) It must see only whitespace, and will turn back to lexTop // upon a new line. If it sees EOF, it will quit the lexer successfully. func lexTopEnd(lx *lexer) stateFn { r := lx.next() switch { case r == commentStart: // a comment will read to a new line for us. lx.push(lexTop) return lexCommentStart case isWhitespace(r): return lexTopEnd case isNL(r): lx.ignore() return lexTop case r == eof: lx.ignore() return lexTop } return lx.errorf("Expected a top-level item to end with a new line, "+ "comment or EOF, but got %q instead.", r) } // lexTable lexes the beginning of a table. Namely, it makes sure that // it starts with a character other than '.' and ']'. // It assumes that '[' has already been consumed. // It also handles the case that this is an item in an array of tables. // e.g., '[[name]]'. func lexTableStart(lx *lexer) stateFn { if lx.peek() == arrayTableStart { lx.next() lx.emit(itemArrayTableStart) lx.push(lexArrayTableEnd) } else { lx.emit(itemTableStart) lx.push(lexTableEnd) } return lexTableNameStart } func lexTableEnd(lx *lexer) stateFn { lx.emit(itemTableEnd) return lexTopEnd } func lexArrayTableEnd(lx *lexer) stateFn { if r := lx.next(); r != arrayTableEnd { return lx.errorf("Expected end of table array name delimiter %q, "+ "but got %q instead.", arrayTableEnd, r) } lx.emit(itemArrayTableEnd) return lexTopEnd } func lexTableNameStart(lx *lexer) stateFn { switch r := lx.peek(); { case r == tableEnd || r == eof: return lx.errorf("Unexpected end of table name. (Table names cannot " + "be empty.)") case r == tableSep: return lx.errorf("Unexpected table separator. (Table names cannot " + "be empty.)") case r == stringStart || r == rawStringStart: lx.ignore() lx.push(lexTableNameEnd) return lexValue // reuse string lexing case isWhitespace(r): return lexTableNameStart default: return lexBareTableName } } // lexTableName lexes the name of a table. It assumes that at least one // valid character for the table has already been read. func lexBareTableName(lx *lexer) stateFn { switch r := lx.next(); { case isBareKeyChar(r): return lexBareTableName case r == tableSep || r == tableEnd: lx.backup() lx.emitTrim(itemText) return lexTableNameEnd default: return lx.errorf("Bare keys cannot contain %q.", r) } } // lexTableNameEnd reads the end of a piece of a table name, optionally // consuming whitespace. func lexTableNameEnd(lx *lexer) stateFn { switch r := lx.next(); { case isWhitespace(r): return lexTableNameEnd case r == tableSep: lx.ignore() return lexTableNameStart case r == tableEnd: return lx.pop() default: return lx.errorf("Expected '.' or ']' to end table name, but got %q "+ "instead.", r) } } // lexKeyStart consumes a key name up until the first non-whitespace character. // lexKeyStart will ignore whitespace. func lexKeyStart(lx *lexer) stateFn { r := lx.peek() switch { case r == keySep: return lx.errorf("Unexpected key separator %q.", keySep) case isWhitespace(r) || isNL(r): lx.next() return lexSkip(lx, lexKeyStart) case r == stringStart || r == rawStringStart: lx.ignore() lx.emit(itemKeyStart) lx.push(lexKeyEnd) return lexValue // reuse string lexing default: lx.ignore() lx.emit(itemKeyStart) return lexBareKey } } // lexBareKey consumes the text of a bare key. Assumes that the first character // (which is not whitespace) has not yet been consumed. func lexBareKey(lx *lexer) stateFn { switch r := lx.next(); { case isBareKeyChar(r): return lexBareKey case isWhitespace(r): lx.emitTrim(itemText) return lexKeyEnd case r == keySep: lx.backup() lx.emitTrim(itemText) return lexKeyEnd default: return lx.errorf("Bare keys cannot contain %q.", r) } } // lexKeyEnd consumes the end of a key and trims whitespace (up to the key // separator). func lexKeyEnd(lx *lexer) stateFn { switch r := lx.next(); { case r == keySep: return lexSkip(lx, lexValue) case isWhitespace(r): return lexSkip(lx, lexKeyEnd) default: return lx.errorf("Expected key separator %q, but got %q instead.", keySep, r) } } // lexValue starts the consumption of a value anywhere a value is expected. // lexValue will ignore whitespace. // After a value is lexed, the last state on the next is popped and returned. func lexValue(lx *lexer) stateFn { // We allow whitespace to precede a value, but NOT new lines. // In array syntax, the array states are responsible for ignoring new // lines. r := lx.next() if isWhitespace(r) { return lexSkip(lx, lexValue) } switch { case r == arrayStart: lx.ignore() lx.emit(itemArray) return lexArrayValue case r == stringStart: if lx.accept(stringStart) { if lx.accept(stringStart) { lx.ignore() // Ignore """ return lexMultilineString } lx.backup() } lx.ignore() // ignore the '"' return lexString case r == rawStringStart: if lx.accept(rawStringStart) { if lx.accept(rawStringStart) { lx.ignore() // Ignore """ return lexMultilineRawString } lx.backup() } lx.ignore() // ignore the "'" return lexRawString case r == 't': return lexTrue case r == 'f': return lexFalse case r == '-': return lexNumberStart case isDigit(r): lx.backup() // avoid an extra state and use the same as above return lexNumberOrDateStart case r == '.': // special error case, be kind to users return lx.errorf("Floats must start with a digit, not '.'.") } return lx.errorf("Expected value but found %q instead.", r) } // lexArrayValue consumes one value in an array. It assumes that '[' or ',' // have already been consumed. All whitespace and new lines are ignored. func lexArrayValue(lx *lexer) stateFn { r := lx.next() switch { case isWhitespace(r) || isNL(r): return lexSkip(lx, lexArrayValue) case r == commentStart: lx.push(lexArrayValue) return lexCommentStart case r == arrayValTerm: return lx.errorf("Unexpected array value terminator %q.", arrayValTerm) case r == arrayEnd: return lexArrayEnd } lx.backup() lx.push(lexArrayValueEnd) return lexValue } // lexArrayValueEnd consumes the cruft between values of an array. Namely, // it ignores whitespace and expects either a ',' or a ']'. func lexArrayValueEnd(lx *lexer) stateFn { r := lx.next() switch { case isWhitespace(r) || isNL(r): return lexSkip(lx, lexArrayValueEnd) case r == commentStart: lx.push(lexArrayValueEnd) return lexCommentStart case r == arrayValTerm: lx.ignore() return lexArrayValue // move on to the next value case r == arrayEnd: return lexArrayEnd } return lx.errorf("Expected an array value terminator %q or an array "+ "terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r) } // lexArrayEnd finishes the lexing of an array. It assumes that a ']' has // just been consumed. func lexArrayEnd(lx *lexer) stateFn { lx.ignore() lx.emit(itemArrayEnd) return lx.pop() } // lexString consumes the inner contents of a string. It assumes that the // beginning '"' has already been consumed and ignored. func lexString(lx *lexer) stateFn { r := lx.next() switch { case isNL(r): return lx.errorf("Strings cannot contain new lines.") case r == '\\': lx.push(lexString) return lexStringEscape case r == stringEnd: lx.backup() lx.emit(itemString) lx.next() lx.ignore() return lx.pop() } return lexString } // lexMultilineString consumes the inner contents of a string. It assumes that // the beginning '"""' has already been consumed and ignored. func lexMultilineString(lx *lexer) stateFn { r := lx.next() switch { case r == '\\': return lexMultilineStringEscape case r == stringEnd: if lx.accept(stringEnd) { if lx.accept(stringEnd) { lx.backup() lx.backup() lx.backup() lx.emit(itemMultilineString) lx.next() lx.next() lx.next() lx.ignore() return lx.pop() } lx.backup() } } return lexMultilineString } // lexRawString consumes a raw string. Nothing can be escaped in such a string. // It assumes that the beginning "'" has already been consumed and ignored. func lexRawString(lx *lexer) stateFn { r := lx.next() switch { case isNL(r): return lx.errorf("Strings cannot contain new lines.") case r == rawStringEnd: lx.backup() lx.emit(itemRawString) lx.next() lx.ignore() return lx.pop() } return lexRawString } // lexMultilineRawString consumes a raw string. Nothing can be escaped in such // a string. It assumes that the beginning "'" has already been consumed and // ignored. func lexMultilineRawString(lx *lexer) stateFn { r := lx.next() switch { case r == rawStringEnd: if lx.accept(rawStringEnd) { if lx.accept(rawStringEnd) { lx.backup() lx.backup() lx.backup() lx.emit(itemRawMultilineString) lx.next() lx.next() lx.next() lx.ignore() return lx.pop() } lx.backup() } } return lexMultilineRawString } // lexMultilineStringEscape consumes an escaped character. It assumes that the // preceding '\\' has already been consumed. func lexMultilineStringEscape(lx *lexer) stateFn { // Handle the special case first: if isNL(lx.next()) { lx.next() return lexMultilineString } else { lx.backup() lx.push(lexMultilineString) return lexStringEscape(lx) } } func lexStringEscape(lx *lexer) stateFn { r := lx.next() switch r { case 'b': fallthrough case 't': fallthrough case 'n': fallthrough case 'f': fallthrough case 'r': fallthrough case '"': fallthrough case '\\': return lx.pop() case 'u': return lexShortUnicodeEscape case 'U': return lexLongUnicodeEscape } return lx.errorf("Invalid escape character %q. Only the following "+ "escape characters are allowed: "+ "\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, "+ "\\uXXXX and \\UXXXXXXXX.", r) } func lexShortUnicodeEscape(lx *lexer) stateFn { var r rune for i := 0; i < 4; i++ { r = lx.next() if !isHexadecimal(r) { return lx.errorf("Expected four hexadecimal digits after '\\u', "+ "but got '%s' instead.", lx.current()) } } return lx.pop() } func lexLongUnicodeEscape(lx *lexer) stateFn { var r rune for i := 0; i < 8; i++ { r = lx.next() if !isHexadecimal(r) { return lx.errorf("Expected eight hexadecimal digits after '\\U', "+ "but got '%s' instead.", lx.current()) } } return lx.pop() } // lexNumberOrDateStart consumes either a (positive) integer, float or // datetime. It assumes that NO negative sign has been consumed. func lexNumberOrDateStart(lx *lexer) stateFn { r := lx.next() if !isDigit(r) { if r == '.' { return lx.errorf("Floats must start with a digit, not '.'.") } else { return lx.errorf("Expected a digit but got %q.", r) } } return lexNumberOrDate } // lexNumberOrDate consumes either a (positive) integer, float or datetime. func lexNumberOrDate(lx *lexer) stateFn { r := lx.next() switch { case r == '-': if lx.pos-lx.start != 5 { return lx.errorf("All ISO8601 dates must be in full Zulu form.") } return lexDateAfterYear case isDigit(r): return lexNumberOrDate case r == '.': return lexFloatStart } lx.backup() lx.emit(itemInteger) return lx.pop() } // lexDateAfterYear consumes a full Zulu Datetime in ISO8601 format. // It assumes that "YYYY-" has already been consumed. func lexDateAfterYear(lx *lexer) stateFn { formats := []rune{ // digits are '0'. // everything else is direct equality. '0', '0', '-', '0', '0', 'T', '0', '0', ':', '0', '0', ':', '0', '0', 'Z', } for _, f := range formats { r := lx.next() if f == '0' { if !isDigit(r) { return lx.errorf("Expected digit in ISO8601 datetime, "+ "but found %q instead.", r) } } else if f != r { return lx.errorf("Expected %q in ISO8601 datetime, "+ "but found %q instead.", f, r) } } lx.emit(itemDatetime) return lx.pop() } // lexNumberStart consumes either an integer or a float. It assumes that // a negative sign has already been read, but that *no* digits have been // consumed. lexNumberStart will move to the appropriate integer or float // states. func lexNumberStart(lx *lexer) stateFn { // we MUST see a digit. Even floats have to start with a digit. r := lx.next() if !isDigit(r) { if r == '.' { return lx.errorf("Floats must start with a digit, not '.'.") } else { return lx.errorf("Expected a digit but got %q.", r) } } return lexNumber } // lexNumber consumes an integer or a float after seeing the first digit. func lexNumber(lx *lexer) stateFn { r := lx.next() switch { case isDigit(r): return lexNumber case r == '.': return lexFloatStart } lx.backup() lx.emit(itemInteger) return lx.pop() } // lexFloatStart starts the consumption of digits of a float after a '.'. // Namely, at least one digit is required. func lexFloatStart(lx *lexer) stateFn { r := lx.next() if !isDigit(r) { return lx.errorf("Floats must have a digit after the '.', but got "+ "%q instead.", r) } return lexFloat } // lexFloat consumes the digits of a float after a '.'. // Assumes that one digit has been consumed after a '.' already. func lexFloat(lx *lexer) stateFn { r := lx.next() if isDigit(r) { return lexFloat } lx.backup() lx.emit(itemFloat) return lx.pop() } // lexConst consumes the s[1:] in s. It assumes that s[0] has already been // consumed. func lexConst(lx *lexer, s string) stateFn { for i := range s[1:] { if r := lx.next(); r != rune(s[i+1]) { return lx.errorf("Expected %q, but found %q instead.", s[:i+1], s[:i]+string(r)) } } return nil } // lexTrue consumes the "rue" in "true". It assumes that 't' has already // been consumed. func lexTrue(lx *lexer) stateFn { if fn := lexConst(lx, "true"); fn != nil { return fn } lx.emit(itemBool) return lx.pop() } // lexFalse consumes the "alse" in "false". It assumes that 'f' has already // been consumed. func lexFalse(lx *lexer) stateFn { if fn := lexConst(lx, "false"); fn != nil { return fn } lx.emit(itemBool) return lx.pop() } // lexCommentStart begins the lexing of a comment. It will emit // itemCommentStart and consume no characters, passing control to lexComment. func lexCommentStart(lx *lexer) stateFn { lx.ignore() lx.emit(itemCommentStart) return lexComment } // lexComment lexes an entire comment. It assumes that '#' has been consumed. // It will consume *up to* the first new line character, and pass control // back to the last state on the stack. func lexComment(lx *lexer) stateFn { r := lx.peek() if isNL(r) || r == eof { lx.emit(itemText) return lx.pop() } lx.next() return lexComment } // lexSkip ignores all slurped input and moves on to the next state. func lexSkip(lx *lexer, nextState stateFn) stateFn { return func(lx *lexer) stateFn { lx.ignore() return nextState } } // isWhitespace returns true if `r` is a whitespace character according // to the spec. func isWhitespace(r rune) bool { return r == '\t' || r == ' ' } func isNL(r rune) bool { return r == '\n' || r == '\r' } func isDigit(r rune) bool { return r >= '0' && r <= '9' } func isHexadecimal(r rune) bool { return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F') } func isBareKeyChar(r rune) bool { return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-' } func (itype itemType) String() string { switch itype { case itemError: return "Error" case itemNIL: return "NIL" case itemEOF: return "EOF" case itemText: return "Text" case itemString: return "String" case itemRawString: return "String" case itemMultilineString: return "String" case itemRawMultilineString: return "String" case itemBool: return "Bool" case itemInteger: return "Integer" case itemFloat: return "Float" case itemDatetime: return "DateTime" case itemTableStart: return "TableStart" case itemTableEnd: return "TableEnd" case itemKeyStart: return "KeyStart" case itemArray: return "Array" case itemArrayEnd: return "ArrayEnd" case itemCommentStart: return "CommentStart" } panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) } func (item item) String() string { return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) } docker-1.10.3/vendor/src/github.com/BurntSushi/toml/parse.go000066400000000000000000000323511267010174400237160ustar00rootroot00000000000000package toml import ( "fmt" "log" "strconv" "strings" "time" "unicode" "unicode/utf8" ) type parser struct { mapping map[string]interface{} types map[string]tomlType lx *lexer // A list of keys in the order that they appear in the TOML data. ordered []Key // the full key for the current hash in scope context Key // the base key name for everything except hashes currentKey string // rough approximation of line number approxLine int // A map of 'key.group.names' to whether they were created implicitly. implicits map[string]bool } type parseError string func (pe parseError) Error() string { return string(pe) } func parse(data string) (p *parser, err error) { defer func() { if r := recover(); r != nil { var ok bool if err, ok = r.(parseError); ok { return } panic(r) } }() p = &parser{ mapping: make(map[string]interface{}), types: make(map[string]tomlType), lx: lex(data), ordered: make([]Key, 0), implicits: make(map[string]bool), } for { item := p.next() if item.typ == itemEOF { break } p.topLevel(item) } return p, nil } func (p *parser) panicf(format string, v ...interface{}) { msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", p.approxLine, p.current(), fmt.Sprintf(format, v...)) panic(parseError(msg)) } func (p *parser) next() item { it := p.lx.nextItem() if it.typ == itemError { p.panicf("%s", it.val) } return it } func (p *parser) bug(format string, v ...interface{}) { log.Fatalf("BUG: %s\n\n", fmt.Sprintf(format, v...)) } func (p *parser) expect(typ itemType) item { it := p.next() p.assertEqual(typ, it.typ) return it } func (p *parser) assertEqual(expected, got itemType) { if expected != got { p.bug("Expected '%s' but got '%s'.", expected, got) } } func (p *parser) topLevel(item item) { switch item.typ { case itemCommentStart: p.approxLine = item.line p.expect(itemText) case itemTableStart: kg := p.next() p.approxLine = kg.line var key Key for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { key = append(key, p.keyString(kg)) } p.assertEqual(itemTableEnd, kg.typ) p.establishContext(key, false) p.setType("", tomlHash) p.ordered = append(p.ordered, key) case itemArrayTableStart: kg := p.next() p.approxLine = kg.line var key Key for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { key = append(key, p.keyString(kg)) } p.assertEqual(itemArrayTableEnd, kg.typ) p.establishContext(key, true) p.setType("", tomlArrayHash) p.ordered = append(p.ordered, key) case itemKeyStart: kname := p.next() p.approxLine = kname.line p.currentKey = p.keyString(kname) val, typ := p.value(p.next()) p.setValue(p.currentKey, val) p.setType(p.currentKey, typ) p.ordered = append(p.ordered, p.context.add(p.currentKey)) p.currentKey = "" default: p.bug("Unexpected type at top level: %s", item.typ) } } // Gets a string for a key (or part of a key in a table name). func (p *parser) keyString(it item) string { switch it.typ { case itemText: return it.val case itemString, itemMultilineString, itemRawString, itemRawMultilineString: s, _ := p.value(it) return s.(string) default: p.bug("Unexpected key type: %s", it.typ) panic("unreachable") } } // value translates an expected value from the lexer into a Go value wrapped // as an empty interface. func (p *parser) value(it item) (interface{}, tomlType) { switch it.typ { case itemString: return p.replaceEscapes(it.val), p.typeOfPrimitive(it) case itemMultilineString: trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) return p.replaceEscapes(trimmed), p.typeOfPrimitive(it) case itemRawString: return it.val, p.typeOfPrimitive(it) case itemRawMultilineString: return stripFirstNewline(it.val), p.typeOfPrimitive(it) case itemBool: switch it.val { case "true": return true, p.typeOfPrimitive(it) case "false": return false, p.typeOfPrimitive(it) } p.bug("Expected boolean value, but got '%s'.", it.val) case itemInteger: num, err := strconv.ParseInt(it.val, 10, 64) if err != nil { // See comment below for floats describing why we make a // distinction between a bug and a user error. if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { p.panicf("Integer '%s' is out of the range of 64-bit "+ "signed integers.", it.val) } else { p.bug("Expected integer value, but got '%s'.", it.val) } } return num, p.typeOfPrimitive(it) case itemFloat: num, err := strconv.ParseFloat(it.val, 64) if err != nil { // Distinguish float values. Normally, it'd be a bug if the lexer // provides an invalid float, but it's possible that the float is // out of range of valid values (which the lexer cannot determine). // So mark the former as a bug but the latter as a legitimate user // error. // // This is also true for integers. if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { p.panicf("Float '%s' is out of the range of 64-bit "+ "IEEE-754 floating-point numbers.", it.val) } else { p.bug("Expected float value, but got '%s'.", it.val) } } return num, p.typeOfPrimitive(it) case itemDatetime: t, err := time.Parse("2006-01-02T15:04:05Z", it.val) if err != nil { p.bug("Expected Zulu formatted DateTime, but got '%s'.", it.val) } return t, p.typeOfPrimitive(it) case itemArray: array := make([]interface{}, 0) types := make([]tomlType, 0) for it = p.next(); it.typ != itemArrayEnd; it = p.next() { if it.typ == itemCommentStart { p.expect(itemText) continue } val, typ := p.value(it) array = append(array, val) types = append(types, typ) } return array, p.typeOfArray(types) } p.bug("Unexpected value type: %s", it.typ) panic("unreachable") } // establishContext sets the current context of the parser, // where the context is either a hash or an array of hashes. Which one is // set depends on the value of the `array` parameter. // // Establishing the context also makes sure that the key isn't a duplicate, and // will create implicit hashes automatically. func (p *parser) establishContext(key Key, array bool) { var ok bool // Always start at the top level and drill down for our context. hashContext := p.mapping keyContext := make(Key, 0) // We only need implicit hashes for key[0:-1] for _, k := range key[0 : len(key)-1] { _, ok = hashContext[k] keyContext = append(keyContext, k) // No key? Make an implicit hash and move on. if !ok { p.addImplicit(keyContext) hashContext[k] = make(map[string]interface{}) } // If the hash context is actually an array of tables, then set // the hash context to the last element in that array. // // Otherwise, it better be a table, since this MUST be a key group (by // virtue of it not being the last element in a key). switch t := hashContext[k].(type) { case []map[string]interface{}: hashContext = t[len(t)-1] case map[string]interface{}: hashContext = t default: p.panicf("Key '%s' was already created as a hash.", keyContext) } } p.context = keyContext if array { // If this is the first element for this array, then allocate a new // list of tables for it. k := key[len(key)-1] if _, ok := hashContext[k]; !ok { hashContext[k] = make([]map[string]interface{}, 0, 5) } // Add a new table. But make sure the key hasn't already been used // for something else. if hash, ok := hashContext[k].([]map[string]interface{}); ok { hashContext[k] = append(hash, make(map[string]interface{})) } else { p.panicf("Key '%s' was already created and cannot be used as "+ "an array.", keyContext) } } else { p.setValue(key[len(key)-1], make(map[string]interface{})) } p.context = append(p.context, key[len(key)-1]) } // setValue sets the given key to the given value in the current context. // It will make sure that the key hasn't already been defined, account for // implicit key groups. func (p *parser) setValue(key string, value interface{}) { var tmpHash interface{} var ok bool hash := p.mapping keyContext := make(Key, 0) for _, k := range p.context { keyContext = append(keyContext, k) if tmpHash, ok = hash[k]; !ok { p.bug("Context for key '%s' has not been established.", keyContext) } switch t := tmpHash.(type) { case []map[string]interface{}: // The context is a table of hashes. Pick the most recent table // defined as the current hash. hash = t[len(t)-1] case map[string]interface{}: hash = t default: p.bug("Expected hash to have type 'map[string]interface{}', but "+ "it has '%T' instead.", tmpHash) } } keyContext = append(keyContext, key) if _, ok := hash[key]; ok { // Typically, if the given key has already been set, then we have // to raise an error since duplicate keys are disallowed. However, // it's possible that a key was previously defined implicitly. In this // case, it is allowed to be redefined concretely. (See the // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.) // // But we have to make sure to stop marking it as an implicit. (So that // another redefinition provokes an error.) // // Note that since it has already been defined (as a hash), we don't // want to overwrite it. So our business is done. if p.isImplicit(keyContext) { p.removeImplicit(keyContext) return } // Otherwise, we have a concrete key trying to override a previous // key, which is *always* wrong. p.panicf("Key '%s' has already been defined.", keyContext) } hash[key] = value } // setType sets the type of a particular value at a given key. // It should be called immediately AFTER setValue. // // Note that if `key` is empty, then the type given will be applied to the // current context (which is either a table or an array of tables). func (p *parser) setType(key string, typ tomlType) { keyContext := make(Key, 0, len(p.context)+1) for _, k := range p.context { keyContext = append(keyContext, k) } if len(key) > 0 { // allow type setting for hashes keyContext = append(keyContext, key) } p.types[keyContext.String()] = typ } // addImplicit sets the given Key as having been created implicitly. func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = true } // removeImplicit stops tagging the given key as having been implicitly // created. func (p *parser) removeImplicit(key Key) { p.implicits[key.String()] = false } // isImplicit returns true if the key group pointed to by the key was created // implicitly. func (p *parser) isImplicit(key Key) bool { return p.implicits[key.String()] } // current returns the full key name of the current context. func (p *parser) current() string { if len(p.currentKey) == 0 { return p.context.String() } if len(p.context) == 0 { return p.currentKey } return fmt.Sprintf("%s.%s", p.context, p.currentKey) } func stripFirstNewline(s string) string { if len(s) == 0 || s[0] != '\n' { return s } return s[1:len(s)] } func stripEscapedWhitespace(s string) string { esc := strings.Split(s, "\\\n") if len(esc) > 1 { for i := 1; i < len(esc); i++ { esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace) } } return strings.Join(esc, "") } func (p *parser) replaceEscapes(str string) string { var replaced []rune s := []byte(str) r := 0 for r < len(s) { if s[r] != '\\' { c, size := utf8.DecodeRune(s[r:]) r += size replaced = append(replaced, c) continue } r += 1 if r >= len(s) { p.bug("Escape sequence at end of string.") return "" } switch s[r] { default: p.bug("Expected valid escape code after \\, but got %q.", s[r]) return "" case 'b': replaced = append(replaced, rune(0x0008)) r += 1 case 't': replaced = append(replaced, rune(0x0009)) r += 1 case 'n': replaced = append(replaced, rune(0x000A)) r += 1 case 'f': replaced = append(replaced, rune(0x000C)) r += 1 case 'r': replaced = append(replaced, rune(0x000D)) r += 1 case '"': replaced = append(replaced, rune(0x0022)) r += 1 case '\\': replaced = append(replaced, rune(0x005C)) r += 1 case 'u': // At this point, we know we have a Unicode escape of the form // `uXXXX` at [r, r+5). (Because the lexer guarantees this // for us.) escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) replaced = append(replaced, escaped) r += 5 case 'U': // At this point, we know we have a Unicode escape of the form // `uXXXX` at [r, r+9). (Because the lexer guarantees this // for us.) escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) replaced = append(replaced, escaped) r += 9 } } return string(replaced) } func (p *parser) asciiEscapeToUnicode(bs []byte) rune { s := string(bs) hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) if err != nil { p.bug("Could not parse '%s' as a hexadecimal number, but the "+ "lexer claims it's OK: %s", s, err) } // BUG(burntsushi) // I honestly don't understand how this works. I can't seem // to find a way to make this fail. I figured this would fail on invalid // UTF-8 characters like U+DCFF, but it doesn't. if !utf8.ValidString(string(rune(hex))) { p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) } return rune(hex) } func isStringType(ty itemType) bool { return ty == itemString || ty == itemMultilineString || ty == itemRawString || ty == itemRawMultilineString } docker-1.10.3/vendor/src/github.com/BurntSushi/toml/session.vim000066400000000000000000000000671267010174400244540ustar00rootroot00000000000000au BufWritePost *.go silent!make tags > /dev/null 2>&1 docker-1.10.3/vendor/src/github.com/BurntSushi/toml/type_check.go000066400000000000000000000047021267010174400247210ustar00rootroot00000000000000package toml // tomlType represents any Go type that corresponds to a TOML type. // While the first draft of the TOML spec has a simplistic type system that // probably doesn't need this level of sophistication, we seem to be militating // toward adding real composite types. type tomlType interface { typeString() string } // typeEqual accepts any two types and returns true if they are equal. func typeEqual(t1, t2 tomlType) bool { if t1 == nil || t2 == nil { return false } return t1.typeString() == t2.typeString() } func typeIsHash(t tomlType) bool { return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) } type tomlBaseType string func (btype tomlBaseType) typeString() string { return string(btype) } func (btype tomlBaseType) String() string { return btype.typeString() } var ( tomlInteger tomlBaseType = "Integer" tomlFloat tomlBaseType = "Float" tomlDatetime tomlBaseType = "Datetime" tomlString tomlBaseType = "String" tomlBool tomlBaseType = "Bool" tomlArray tomlBaseType = "Array" tomlHash tomlBaseType = "Hash" tomlArrayHash tomlBaseType = "ArrayHash" ) // typeOfPrimitive returns a tomlType of any primitive value in TOML. // Primitive values are: Integer, Float, Datetime, String and Bool. // // Passing a lexer item other than the following will cause a BUG message // to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. func (p *parser) typeOfPrimitive(lexItem item) tomlType { switch lexItem.typ { case itemInteger: return tomlInteger case itemFloat: return tomlFloat case itemDatetime: return tomlDatetime case itemString: return tomlString case itemMultilineString: return tomlString case itemRawString: return tomlString case itemRawMultilineString: return tomlString case itemBool: return tomlBool } p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) panic("unreachable") } // typeOfArray returns a tomlType for an array given a list of types of its // values. // // In the current spec, if an array is homogeneous, then its type is always // "Array". If the array is not homogeneous, an error is generated. func (p *parser) typeOfArray(types []tomlType) tomlType { // Empty arrays are cool. if len(types) == 0 { return tomlArray } theType := types[0] for _, t := range types[1:] { if !typeEqual(theType, t) { p.panicf("Array contains values of type '%s' and '%s', but "+ "arrays must be homogeneous.", theType, t) } } return tomlArray } docker-1.10.3/vendor/src/github.com/BurntSushi/toml/type_fields.go000066400000000000000000000144271267010174400251170ustar00rootroot00000000000000package toml // Struct field handling is adapted from code in encoding/json: // // Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the Go distribution. import ( "reflect" "sort" "sync" ) // A field represents a single field found in a struct. type field struct { name string // the name of the field (`toml` tag included) tag bool // whether field has a `toml` tag index []int // represents the depth of an anonymous field typ reflect.Type // the type of the field } // byName sorts field by name, breaking ties with depth, // then breaking ties with "name came from toml tag", then // breaking ties with index sequence. type byName []field func (x byName) Len() int { return len(x) } func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } func (x byName) Less(i, j int) bool { if x[i].name != x[j].name { return x[i].name < x[j].name } if len(x[i].index) != len(x[j].index) { return len(x[i].index) < len(x[j].index) } if x[i].tag != x[j].tag { return x[i].tag } return byIndex(x).Less(i, j) } // byIndex sorts field by index sequence. type byIndex []field func (x byIndex) Len() int { return len(x) } func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } func (x byIndex) Less(i, j int) bool { for k, xik := range x[i].index { if k >= len(x[j].index) { return false } if xik != x[j].index[k] { return xik < x[j].index[k] } } return len(x[i].index) < len(x[j].index) } // typeFields returns a list of fields that TOML should recognize for the given // type. The algorithm is breadth-first search over the set of structs to // include - the top struct and then any reachable anonymous structs. func typeFields(t reflect.Type) []field { // Anonymous fields to explore at the current level and the next. current := []field{} next := []field{{typ: t}} // Count of queued names for current level and the next. count := map[reflect.Type]int{} nextCount := map[reflect.Type]int{} // Types already visited at an earlier level. visited := map[reflect.Type]bool{} // Fields found. var fields []field for len(next) > 0 { current, next = next, current[:0] count, nextCount = nextCount, map[reflect.Type]int{} for _, f := range current { if visited[f.typ] { continue } visited[f.typ] = true // Scan f.typ for fields to include. for i := 0; i < f.typ.NumField(); i++ { sf := f.typ.Field(i) if sf.PkgPath != "" { // unexported continue } name := sf.Tag.Get("toml") if name == "-" { continue } index := make([]int, len(f.index)+1) copy(index, f.index) index[len(f.index)] = i ft := sf.Type if ft.Name() == "" && ft.Kind() == reflect.Ptr { // Follow pointer. ft = ft.Elem() } // Record found field and index sequence. if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { tagged := name != "" if name == "" { name = sf.Name } fields = append(fields, field{name, tagged, index, ft}) if count[f.typ] > 1 { // If there were multiple instances, add a second, // so that the annihilation code will see a duplicate. // It only cares about the distinction between 1 or 2, // so don't bother generating any more copies. fields = append(fields, fields[len(fields)-1]) } continue } // Record new anonymous struct to explore in next round. nextCount[ft]++ if nextCount[ft] == 1 { f := field{name: ft.Name(), index: index, typ: ft} next = append(next, f) } } } } sort.Sort(byName(fields)) // Delete all fields that are hidden by the Go rules for embedded fields, // except that fields with TOML tags are promoted. // The fields are sorted in primary order of name, secondary order // of field index length. Loop over names; for each name, delete // hidden fields by choosing the one dominant field that survives. out := fields[:0] for advance, i := 0, 0; i < len(fields); i += advance { // One iteration per name. // Find the sequence of fields with the name of this first field. fi := fields[i] name := fi.name for advance = 1; i+advance < len(fields); advance++ { fj := fields[i+advance] if fj.name != name { break } } if advance == 1 { // Only one field with this name out = append(out, fi) continue } dominant, ok := dominantField(fields[i : i+advance]) if ok { out = append(out, dominant) } } fields = out sort.Sort(byIndex(fields)) return fields } // dominantField looks through the fields, all of which are known to // have the same name, to find the single field that dominates the // others using Go's embedding rules, modified by the presence of // TOML tags. If there are multiple top-level fields, the boolean // will be false: This condition is an error in Go and we skip all // the fields. func dominantField(fields []field) (field, bool) { // The fields are sorted in increasing index-length order. The winner // must therefore be one with the shortest index length. Drop all // longer entries, which is easy: just truncate the slice. length := len(fields[0].index) tagged := -1 // Index of first tagged field. for i, f := range fields { if len(f.index) > length { fields = fields[:i] break } if f.tag { if tagged >= 0 { // Multiple tagged fields at the same level: conflict. // Return no field. return field{}, false } tagged = i } } if tagged >= 0 { return fields[tagged], true } // All remaining fields have the same length. If there's more than one, // we have a conflict (two fields named "X" at the same level) and we // return no field. if len(fields) > 1 { return field{}, false } return fields[0], true } var fieldCache struct { sync.RWMutex m map[reflect.Type][]field } // cachedTypeFields is like typeFields but uses a cache to avoid repeated work. func cachedTypeFields(t reflect.Type) []field { fieldCache.RLock() f := fieldCache.m[t] fieldCache.RUnlock() if f != nil { return f } // Compute fields without lock. // Might duplicate effort but won't hold other computations back. f = typeFields(t) if f == nil { f = []field{} } fieldCache.Lock() if fieldCache.m == nil { fieldCache.m = map[reflect.Type][]field{} } fieldCache.m[t] = f fieldCache.Unlock() return f } docker-1.10.3/vendor/src/github.com/Graylog2/000077500000000000000000000000001267010174400206565ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/Graylog2/go-gelf/000077500000000000000000000000001267010174400221765ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/Graylog2/go-gelf/LICENSE000066400000000000000000000020331267010174400232010ustar00rootroot00000000000000Copyright 2012 SocialCode Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. docker-1.10.3/vendor/src/github.com/Graylog2/go-gelf/gelf/000077500000000000000000000000001267010174400231135ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/Graylog2/go-gelf/gelf/reader.go000066400000000000000000000062521267010174400247110ustar00rootroot00000000000000// Copyright 2012 SocialCode. All rights reserved. // Use of this source code is governed by the MIT // license that can be found in the LICENSE file. package gelf import ( "bytes" "compress/gzip" "compress/zlib" "encoding/json" "fmt" "io" "net" "strings" "sync" ) type Reader struct { mu sync.Mutex conn net.Conn } func NewReader(addr string) (*Reader, error) { var err error udpAddr, err := net.ResolveUDPAddr("udp", addr) if err != nil { return nil, fmt.Errorf("ResolveUDPAddr('%s'): %s", addr, err) } conn, err := net.ListenUDP("udp", udpAddr) if err != nil { return nil, fmt.Errorf("ListenUDP: %s", err) } r := new(Reader) r.conn = conn return r, nil } func (r *Reader) Addr() string { return r.conn.LocalAddr().String() } // FIXME: this will discard data if p isn't big enough to hold the // full message. func (r *Reader) Read(p []byte) (int, error) { msg, err := r.ReadMessage() if err != nil { return -1, err } var data string if msg.Full == "" { data = msg.Short } else { data = msg.Full } return strings.NewReader(data).Read(p) } func (r *Reader) ReadMessage() (*Message, error) { cBuf := make([]byte, ChunkSize) var ( err error n, length int buf bytes.Buffer cid, ocid []byte seq, total uint8 cHead []byte cReader io.Reader chunks [][]byte ) for got := 0; got < 128 && (total == 0 || got < int(total)); got++ { if n, err = r.conn.Read(cBuf); err != nil { return nil, fmt.Errorf("Read: %s", err) } cHead, cBuf = cBuf[:2], cBuf[:n] if bytes.Equal(cHead, magicChunked) { //fmt.Printf("chunked %v\n", cBuf[:14]) cid, seq, total = cBuf[2:2+8], cBuf[2+8], cBuf[2+8+1] if ocid != nil && !bytes.Equal(cid, ocid) { return nil, fmt.Errorf("out-of-band message %v (awaited %v)", cid, ocid) } else if ocid == nil { ocid = cid chunks = make([][]byte, total) } n = len(cBuf) - chunkedHeaderLen //fmt.Printf("setting chunks[%d]: %d\n", seq, n) chunks[seq] = append(make([]byte, 0, n), cBuf[chunkedHeaderLen:]...) length += n } else { //not chunked if total > 0 { return nil, fmt.Errorf("out-of-band message (not chunked)") } break } } //fmt.Printf("\nchunks: %v\n", chunks) if length > 0 { if cap(cBuf) < length { cBuf = append(cBuf, make([]byte, 0, length-cap(cBuf))...) } cBuf = cBuf[:0] for i := range chunks { //fmt.Printf("appending %d %v\n", i, chunks[i]) cBuf = append(cBuf, chunks[i]...) } cHead = cBuf[:2] } // the data we get from the wire is compressed if bytes.Equal(cHead, magicGzip) { cReader, err = gzip.NewReader(bytes.NewReader(cBuf)) } else if cHead[0] == magicZlib[0] && (int(cHead[0])*256+int(cHead[1]))%31 == 0 { // zlib is slightly more complicated, but correct cReader, err = zlib.NewReader(bytes.NewReader(cBuf)) } else { return nil, fmt.Errorf("unknown magic: %x %v", cHead, cHead) } if err != nil { return nil, fmt.Errorf("NewReader: %s", err) } if _, err = io.Copy(&buf, cReader); err != nil { return nil, fmt.Errorf("io.Copy: %s", err) } msg := new(Message) if err := json.Unmarshal(buf.Bytes(), &msg); err != nil { return nil, fmt.Errorf("json.Unmarshal: %s", err) } return msg, nil } docker-1.10.3/vendor/src/github.com/Graylog2/go-gelf/gelf/writer.go000066400000000000000000000217361267010174400247670ustar00rootroot00000000000000// Copyright 2012 SocialCode. All rights reserved. // Use of this source code is governed by the MIT // license that can be found in the LICENSE file. package gelf import ( "bytes" "compress/flate" "compress/gzip" "compress/zlib" "crypto/rand" "encoding/json" "fmt" "io" "net" "os" "path" "runtime" "strings" "sync" "time" ) // Writer implements io.Writer and is used to send both discrete // messages to a graylog2 server, or data from a stream-oriented // interface (like the functions in log). type Writer struct { mu sync.Mutex conn net.Conn hostname string Facility string // defaults to current process name CompressionLevel int // one of the consts from compress/flate CompressionType CompressType } // What compression type the writer should use when sending messages // to the graylog2 server type CompressType int const ( CompressGzip CompressType = iota CompressZlib ) // Message represents the contents of the GELF message. It is gzipped // before sending. type Message struct { Version string `json:"version"` Host string `json:"host"` Short string `json:"short_message"` Full string `json:"full_message"` TimeUnix float64 `json:"timestamp"` Level int32 `json:"level"` Facility string `json:"facility"` Extra map[string]interface{} `json:"-"` } type innerMessage Message //against circular (Un)MarshalJSON // Used to control GELF chunking. Should be less than (MTU - len(UDP // header)). // // TODO: generate dynamically using Path MTU Discovery? const ( ChunkSize = 1420 chunkedHeaderLen = 12 chunkedDataLen = ChunkSize - chunkedHeaderLen ) var ( magicChunked = []byte{0x1e, 0x0f} magicZlib = []byte{0x78} magicGzip = []byte{0x1f, 0x8b} ) // Syslog severity levels const ( LOG_EMERG = int32(0) LOG_ALERT = int32(1) LOG_CRIT = int32(2) LOG_ERR = int32(3) LOG_WARNING = int32(4) LOG_NOTICE = int32(5) LOG_INFO = int32(6) LOG_DEBUG = int32(7) ) // numChunks returns the number of GELF chunks necessary to transmit // the given compressed buffer. func numChunks(b []byte) int { lenB := len(b) if lenB <= ChunkSize { return 1 } return len(b)/chunkedDataLen + 1 } // New returns a new GELF Writer. This writer can be used to send the // output of the standard Go log functions to a central GELF server by // passing it to log.SetOutput() func NewWriter(addr string) (*Writer, error) { var err error w := new(Writer) w.CompressionLevel = flate.BestSpeed if w.conn, err = net.Dial("udp", addr); err != nil { return nil, err } if w.hostname, err = os.Hostname(); err != nil { return nil, err } w.Facility = path.Base(os.Args[0]) return w, nil } // writes the gzip compressed byte array to the connection as a series // of GELF chunked messages. The header format is documented at // https://github.com/Graylog2/graylog2-docs/wiki/GELF as: // // 2-byte magic (0x1e 0x0f), 8 byte id, 1 byte sequence id, 1 byte // total, chunk-data func (w *Writer) writeChunked(zBytes []byte) (err error) { b := make([]byte, 0, ChunkSize) buf := bytes.NewBuffer(b) nChunksI := numChunks(zBytes) if nChunksI > 255 { return fmt.Errorf("msg too large, would need %d chunks", nChunksI) } nChunks := uint8(nChunksI) // use urandom to get a unique message id msgId := make([]byte, 8) n, err := io.ReadFull(rand.Reader, msgId) if err != nil || n != 8 { return fmt.Errorf("rand.Reader: %d/%s", n, err) } bytesLeft := len(zBytes) for i := uint8(0); i < nChunks; i++ { buf.Reset() // manually write header. Don't care about // host/network byte order, because the spec only // deals in individual bytes. buf.Write(magicChunked) //magic buf.Write(msgId) buf.WriteByte(i) buf.WriteByte(nChunks) // slice out our chunk from zBytes chunkLen := chunkedDataLen if chunkLen > bytesLeft { chunkLen = bytesLeft } off := int(i) * chunkedDataLen chunk := zBytes[off : off+chunkLen] buf.Write(chunk) // write this chunk, and make sure the write was good n, err := w.conn.Write(buf.Bytes()) if err != nil { return fmt.Errorf("Write (chunk %d/%d): %s", i, nChunks, err) } if n != len(buf.Bytes()) { return fmt.Errorf("Write len: (chunk %d/%d) (%d/%d)", i, nChunks, n, len(buf.Bytes())) } bytesLeft -= chunkLen } if bytesLeft != 0 { return fmt.Errorf("error: %d bytes left after sending", bytesLeft) } return nil } // WriteMessage sends the specified message to the GELF server // specified in the call to New(). It assumes all the fields are // filled out appropriately. In general, clients will want to use // Write, rather than WriteMessage. func (w *Writer) WriteMessage(m *Message) (err error) { mBytes, err := json.Marshal(m) if err != nil { return } var zBuf bytes.Buffer var zw io.WriteCloser switch w.CompressionType { case CompressGzip: zw, err = gzip.NewWriterLevel(&zBuf, w.CompressionLevel) case CompressZlib: zw, err = zlib.NewWriterLevel(&zBuf, w.CompressionLevel) default: panic(fmt.Sprintf("unknown compression type %d", w.CompressionType)) } if err != nil { return } if _, err = zw.Write(mBytes); err != nil { return } zw.Close() zBytes := zBuf.Bytes() if numChunks(zBytes) > 1 { return w.writeChunked(zBytes) } n, err := w.conn.Write(zBytes) if err != nil { return } if n != len(zBytes) { return fmt.Errorf("bad write (%d/%d)", n, len(zBytes)) } return nil } // Close connection and interrupt blocked Read or Write operations func (w *Writer) Close() (error) { return w.conn.Close() } /* func (w *Writer) Alert(m string) (err error) func (w *Writer) Close() error func (w *Writer) Crit(m string) (err error) func (w *Writer) Debug(m string) (err error) func (w *Writer) Emerg(m string) (err error) func (w *Writer) Err(m string) (err error) func (w *Writer) Info(m string) (err error) func (w *Writer) Notice(m string) (err error) func (w *Writer) Warning(m string) (err error) */ // getCaller returns the filename and the line info of a function // further down in the call stack. Passing 0 in as callDepth would // return info on the function calling getCallerIgnoringLog, 1 the // parent function, and so on. Any suffixes passed to getCaller are // path fragments like "/pkg/log/log.go", and functions in the call // stack from that file are ignored. func getCaller(callDepth int, suffixesToIgnore ...string) (file string, line int) { // bump by 1 to ignore the getCaller (this) stackframe callDepth++ outer: for { var ok bool _, file, line, ok = runtime.Caller(callDepth) if !ok { file = "???" line = 0 break } for _, s := range suffixesToIgnore { if strings.HasSuffix(file, s) { callDepth++ continue outer } } break } return } func getCallerIgnoringLogMulti(callDepth int) (string, int) { // the +1 is to ignore this (getCallerIgnoringLogMulti) frame return getCaller(callDepth+1, "/pkg/log/log.go", "/pkg/io/multi.go") } // Write encodes the given string in a GELF message and sends it to // the server specified in New(). func (w *Writer) Write(p []byte) (n int, err error) { // 1 for the function that called us. file, line := getCallerIgnoringLogMulti(1) // remove trailing and leading whitespace p = bytes.TrimSpace(p) // If there are newlines in the message, use the first line // for the short message and set the full message to the // original input. If the input has no newlines, stick the // whole thing in Short. short := p full := []byte("") if i := bytes.IndexRune(p, '\n'); i > 0 { short = p[:i] full = p } m := Message{ Version: "1.1", Host: w.hostname, Short: string(short), Full: string(full), TimeUnix: float64(time.Now().Unix()), Level: 6, // info Facility: w.Facility, Extra: map[string]interface{}{ "_file": file, "_line": line, }, } if err = w.WriteMessage(&m); err != nil { return 0, err } return len(p), nil } func (m *Message) MarshalJSON() ([]byte, error) { var err error var b, eb []byte extra := m.Extra b, err = json.Marshal((*innerMessage)(m)) m.Extra = extra if err != nil { return nil, err } if len(extra) == 0 { return b, nil } if eb, err = json.Marshal(extra); err != nil { return nil, err } // merge serialized message + serialized extra map b[len(b)-1] = ',' return append(b, eb[1:len(eb)]...), nil } func (m *Message) UnmarshalJSON(data []byte) error { i := make(map[string]interface{}, 16) if err := json.Unmarshal(data, &i); err != nil { return err } for k, v := range i { if k[0] == '_' { if m.Extra == nil { m.Extra = make(map[string]interface{}, 1) } m.Extra[k] = v continue } switch k { case "version": m.Version = v.(string) case "host": m.Host = v.(string) case "short_message": m.Short = v.(string) case "full_message": m.Full = v.(string) case "timestamp": m.TimeUnix = v.(float64) case "level": m.Level = int32(v.(float64)) case "facility": m.Facility = v.(string) } } return nil } docker-1.10.3/vendor/src/github.com/RackSec/000077500000000000000000000000001267010174400205035ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/RackSec/srslog/000077500000000000000000000000001267010174400220145ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/RackSec/srslog/.gitignore000066400000000000000000000000071267010174400240010ustar00rootroot00000000000000.cover docker-1.10.3/vendor/src/github.com/RackSec/srslog/.travis.yml000066400000000000000000000014761267010174400241350ustar00rootroot00000000000000sudo: required dist: trusty group: edge language: go go: - 1.5 script: - | go get ./... go test -v ./... notifications: slack: secure: dtDue9gP6CRR1jYjEf6raXXFak3QKGcCFvCf5mfvv5XScdpmc3udwgqc5TdyjC0goaC9OK/4jTcCD30dYZm/u6ux3E9mo3xwMl2xRLHx76p5r9rSQtloH19BDwA2+A+bpDfFQVz05k2YXuTiGSvNMMdwzx+Dr294Sl/z43RFB4+b9/R/6LlFpRW89IwftvpLAFnBy4K/ZcspQzKM+rQfQTL5Kk+iZ/KBsuR/VziDq6MoJ8t43i4ee8vwS06vFBKDbUiZ4FIZpLgc2RAL5qso5aWRKYXL6waXfoKHZWKPe0w4+9IY1rDJxG1jEb7YGgcbLaF9xzPRRs2b2yO/c87FKpkh6PDgYHfLjpgXotCoojZrL4p1x6MI1ldJr3NhARGPxS9r4liB9n6Y5nD+ErXi1IMf55fuUHcPY27Jc0ySeLFeM6cIWJ8OhFejCgGw6a5DnnmJo0PqopsaBDHhadpLejT1+K6bL2iGkT4SLcVNuRGLs+VyuNf1+5XpkWZvy32vquO7SZOngLLBv+GIem+t3fWm0Z9s/0i1uRCQei1iUutlYjoV/LBd35H2rhob4B5phIuJin9kb0zbHf6HnaoN0CtN8r0d8G5CZiInVlG5Xcid5Byb4dddf5U2EJTDuCMVyyiM7tcnfjqw9UbVYNxtYM9SzcqIq+uVqM8pYL9xSec= docker-1.10.3/vendor/src/github.com/RackSec/srslog/CODE_OF_CONDUCT.md000066400000000000000000000045251267010174400246210ustar00rootroot00000000000000# Contributor Code of Conduct As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities. We are committed to making participation in this project a harassment-free experience for everyone, regardless of level of experience, gender, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, or nationality. Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery * Personal attacks * Trolling or insulting/derogatory comments * Public or private harassment * Publishing other's private information, such as physical or electronic addresses, without explicit permission * Other unethical or unprofessional conduct Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. By adopting this Code of Conduct, project maintainers commit themselves to fairly and consistently applying these principles to every aspect of managing this project. Project maintainers who do not follow or enforce the Code of Conduct may be permanently removed from the project team. This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting a project maintainer at [sirsean@gmail.com]. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. Maintainers are obligated to maintain confidentiality with regard to the reporter of an incident. This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.3.0, available at [http://contributor-covenant.org/version/1/3/0/][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/3/0/ docker-1.10.3/vendor/src/github.com/RackSec/srslog/LICENSE000066400000000000000000000027021267010174400230220ustar00rootroot00000000000000Copyright (c) 2015 Rackspace. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. docker-1.10.3/vendor/src/github.com/RackSec/srslog/README.md000066400000000000000000000062521267010174400233000ustar00rootroot00000000000000[![Build Status](https://travis-ci.org/RackSec/srslog.svg?branch=master)](https://travis-ci.org/RackSec/srslog) # srslog Go has a `syslog` package in the standard library, but it has the following shortcomings: 1. It doesn't have TLS support 2. [According to bradfitz on the Go team, it is no longer being maintained.](https://github.com/golang/go/issues/13449#issuecomment-161204716) I agree that it doesn't need to be in the standard library. So, I've followed Brad's suggestion and have made a separate project to handle syslog. This code was taken directly from the Go project as a base to start from. However, this _does_ have TLS support. # Usage Basic usage retains the same interface as the original `syslog` package. We only added to the interface where required to support new functionality. Switch from the standard library: ``` import( //"log/syslog" syslog "github.com/RackSec/srslog" ) ``` You can still use it for local syslog: ``` w, err := syslog.Dial("", "", syslog.LOG_ERR, "testtag") ``` Or to unencrypted UDP: ``` w, err := syslog.Dial("udp", "192.168.0.50:514", syslog.LOG_ERR, "testtag") ``` Or to unencrypted TCP: ``` w, err := syslog.Dial("tcp", "192.168.0.51:514", syslog.LOG_ERR, "testtag") ``` But now you can also send messages via TLS-encrypted TCP: ``` w, err := syslog.DialWithTLSCertPath("tcp+tls", "192.168.0.52:514", syslog.LOG_ERR, "testtag", "/path/to/servercert.pem") ``` And if you need more control over your TLS configuration : ``` pool := x509.NewCertPool() serverCert, err := ioutil.ReadFile("/path/to/servercert.pem") if err != nil { return nil, err } pool.AppendCertsFromPEM(serverCert) config := tls.Config{ RootCAs: pool, } w, err := DialWithTLSConfig(network, raddr, priority, tag, &config) ``` (Note that in both TLS cases, this uses a self-signed certificate, where the remote syslog server has the keypair and the client has only the public key.) And then to write log messages, continue like so: ``` if err != nil { log.Fatal("failed to connect to syslog:", err) } defer w.Close() w.Alert("this is an alert") w.Crit("this is critical") w.Err("this is an error") w.Warning("this is a warning") w.Notice("this is a notice") w.Info("this is info") w.Debug("this is debug") w.Write([]byte("these are some bytes")) ``` # Generating TLS Certificates We've provided a script that you can use to generate a self-signed keypair: ``` pip install cryptography python script/gen-certs.py ``` That outputs the public key and private key to standard out. Put those into `.pem` files. (And don't put them into any source control. The certificate in the `test` directory is used by the unit tests, and please do not actually use it anywhere else.) # Running Tests Run the tests as usual: ``` go test ``` But we've also provided a test coverage script that will show you which lines of code are not covered: ``` script/coverage --html ``` That will open a new browser tab showing coverage information. # License This project uses the New BSD License, the same as the Go project itself. # Code of Conduct Please note that this project is released with a Contributor Code of Conduct. By participating in this project you agree to abide by its terms. docker-1.10.3/vendor/src/github.com/RackSec/srslog/constants.go000066400000000000000000000021601267010174400243560ustar00rootroot00000000000000package srslog import ( "errors" ) // Priority is a combination of the syslog facility and // severity. For example, LOG_ALERT | LOG_FTP sends an alert severity // message from the FTP facility. The default severity is LOG_EMERG; // the default facility is LOG_KERN. type Priority int const severityMask = 0x07 const facilityMask = 0xf8 const ( // Severity. // From /usr/include/sys/syslog.h. // These are the same on Linux, BSD, and OS X. LOG_EMERG Priority = iota LOG_ALERT LOG_CRIT LOG_ERR LOG_WARNING LOG_NOTICE LOG_INFO LOG_DEBUG ) const ( // Facility. // From /usr/include/sys/syslog.h. // These are the same up to LOG_FTP on Linux, BSD, and OS X. LOG_KERN Priority = iota << 3 LOG_USER LOG_MAIL LOG_DAEMON LOG_AUTH LOG_SYSLOG LOG_LPR LOG_NEWS LOG_UUCP LOG_CRON LOG_AUTHPRIV LOG_FTP _ // unused _ // unused _ // unused _ // unused LOG_LOCAL0 LOG_LOCAL1 LOG_LOCAL2 LOG_LOCAL3 LOG_LOCAL4 LOG_LOCAL5 LOG_LOCAL6 LOG_LOCAL7 ) func validatePriority(p Priority) error { if p < 0 || p > LOG_LOCAL7|LOG_DEBUG { return errors.New("log/syslog: invalid priority") } else { return nil } } docker-1.10.3/vendor/src/github.com/RackSec/srslog/dialer.go000066400000000000000000000020571267010174400236070ustar00rootroot00000000000000package srslog import ( "crypto/tls" "net" ) func (w Writer) getDialer() func() (serverConn, string, error) { dialers := map[string]func() (serverConn, string, error){ "": w.unixDialer, "tcp+tls": w.tlsDialer, } dialer, ok := dialers[w.network] if !ok { dialer = w.basicDialer } return dialer } func (w Writer) unixDialer() (serverConn, string, error) { sc, err := unixSyslog() hostname := w.hostname if hostname == "" { hostname = "localhost" } return sc, hostname, err } func (w Writer) tlsDialer() (serverConn, string, error) { c, err := tls.Dial("tcp", w.raddr, w.tlsConfig) var sc serverConn hostname := w.hostname if err == nil { sc = &netConn{conn: c} if hostname == "" { hostname = c.LocalAddr().String() } } return sc, hostname, err } func (w Writer) basicDialer() (serverConn, string, error) { c, err := net.Dial(w.network, w.raddr) var sc serverConn hostname := w.hostname if err == nil { sc = &netConn{conn: c} if hostname == "" { hostname = c.LocalAddr().String() } } return sc, hostname, err } docker-1.10.3/vendor/src/github.com/RackSec/srslog/net_conn.go000066400000000000000000000006171267010174400241520ustar00rootroot00000000000000package srslog import ( "fmt" "net" "os" "time" ) type netConn struct { conn net.Conn } func (n *netConn) writeString(p Priority, hostname, tag, msg string) error { timestamp := time.Now().Format(time.RFC3339) _, err := fmt.Fprintf(n.conn, "<%d>%s %s %s[%d]: %s", p, timestamp, hostname, tag, os.Getpid(), msg) return err } func (n *netConn) close() error { return n.conn.Close() } docker-1.10.3/vendor/src/github.com/RackSec/srslog/srslog.go000066400000000000000000000055211267010174400236570ustar00rootroot00000000000000package srslog import ( "crypto/tls" "crypto/x509" "io/ioutil" "log" "os" ) // This interface and the separate syslog_unix.go file exist for // Solaris support as implemented by gccgo. On Solaris you can not // simply open a TCP connection to the syslog daemon. The gccgo // sources have a syslog_solaris.go file that implements unixSyslog to // return a type that satisfies this interface and simply calls the C // library syslog function. type serverConn interface { writeString(p Priority, hostname, tag, s string) error close() error } // New establishes a new connection to the system log daemon. Each // write to the returned Writer sends a log message with the given // priority and prefix. func New(priority Priority, tag string) (w *Writer, err error) { return Dial("", "", priority, tag) } // Dial establishes a connection to a log daemon by connecting to // address raddr on the specified network. Each write to the returned // Writer sends a log message with the given facility, severity and // tag. // If network is empty, Dial will connect to the local syslog server. func Dial(network, raddr string, priority Priority, tag string) (*Writer, error) { return DialWithTLSConfig(network, raddr, priority, tag, nil) } // DialWithTLSCertPath establishes a secure connection to a log daemon by connecting to // address raddr on the specified network. It uses certPath to load TLS certificates and configure // the secure connection. func DialWithTLSCertPath(network, raddr string, priority Priority, tag, certPath string) (*Writer, error) { pool := x509.NewCertPool() serverCert, err := ioutil.ReadFile(certPath) if err != nil { return nil, err } pool.AppendCertsFromPEM(serverCert) config := tls.Config{ RootCAs: pool, } return DialWithTLSConfig(network, raddr, priority, tag, &config) } // DialWithTLSConfig establishes a secure connection to a log daemon by connecting to // address raddr on the specified network. It uses tlsConfig to configure the secure connection. func DialWithTLSConfig(network, raddr string, priority Priority, tag string, tlsConfig *tls.Config) (*Writer, error) { if err := validatePriority(priority); err != nil { return nil, err } if tag == "" { tag = os.Args[0] } hostname, _ := os.Hostname() w := &Writer{ priority: priority, tag: tag, hostname: hostname, network: network, raddr: raddr, tlsConfig: tlsConfig, } w.Lock() defer w.Unlock() err := w.connect() if err != nil { return nil, err } return w, err } // NewLogger creates a log.Logger whose output is written to // the system log service with the specified priority. The logFlag // argument is the flag set passed through to log.New to create // the Logger. func NewLogger(p Priority, logFlag int) (*log.Logger, error) { s, err := New(p, "") if err != nil { return nil, err } return log.New(s, "", logFlag), nil } docker-1.10.3/vendor/src/github.com/RackSec/srslog/srslog_unix.go000066400000000000000000000021161267010174400247170ustar00rootroot00000000000000package srslog import ( "errors" "fmt" "net" "os" "time" ) // unixSyslog opens a connection to the syslog daemon running on the // local machine using a Unix domain socket. func unixSyslog() (conn serverConn, err error) { logTypes := []string{"unixgram", "unix"} logPaths := []string{"/dev/log", "/var/run/syslog", "/var/run/log"} for _, network := range logTypes { for _, path := range logPaths { conn, err := net.Dial(network, path) if err != nil { continue } else { return &localConn{conn: conn}, nil } } } return nil, errors.New("Unix syslog delivery error") } type localConn struct { conn net.Conn } func (n *localConn) writeString(p Priority, hostname, tag, msg string) error { // Compared to the network form at srslog.netConn, the changes are: // 1. Use time.Stamp instead of time.RFC3339. // 2. Drop the hostname field from the Fprintf. timestamp := time.Now().Format(time.Stamp) _, err := fmt.Fprintf(n.conn, "<%d>%s %s[%d]: %s", p, timestamp, tag, os.Getpid(), msg) return err } func (n *localConn) close() error { return n.conn.Close() } docker-1.10.3/vendor/src/github.com/RackSec/srslog/writer.go000066400000000000000000000075771267010174400236770ustar00rootroot00000000000000package srslog import ( "crypto/tls" "strings" "sync" ) // A Writer is a connection to a syslog server. type Writer struct { sync.Mutex // guards conn priority Priority tag string hostname string network string raddr string tlsConfig *tls.Config conn serverConn } // connect makes a connection to the syslog server. // It must be called with w.mu held. func (w *Writer) connect() (err error) { if w.conn != nil { // ignore err from close, it makes sense to continue anyway w.conn.close() w.conn = nil } var conn serverConn var hostname string dialer := w.getDialer() conn, hostname, err = dialer() if err == nil { w.conn = conn w.hostname = hostname } return } // Write sends a log message to the syslog daemon using the default priority // passed into `srslog.New` or the `srslog.Dial*` functions. func (w *Writer) Write(b []byte) (int, error) { return w.writeAndRetry(w.priority, string(b)) } // Close closes a connection to the syslog daemon. func (w *Writer) Close() error { w.Lock() defer w.Unlock() if w.conn != nil { err := w.conn.close() w.conn = nil return err } return nil } // Emerg logs a message with severity LOG_EMERG; this overrides the default // priority passed to `srslog.New` and the `srslog.Dial*` functions. func (w *Writer) Emerg(m string) (err error) { _, err = w.writeAndRetry(LOG_EMERG, m) return err } // Alert logs a message with severity LOG_ALERT; this overrides the default // priority passed to `srslog.New` and the `srslog.Dial*` functions. func (w *Writer) Alert(m string) (err error) { _, err = w.writeAndRetry(LOG_ALERT, m) return err } // Crit logs a message with severity LOG_CRIT; this overrides the default // priority passed to `srslog.New` and the `srslog.Dial*` functions. func (w *Writer) Crit(m string) (err error) { _, err = w.writeAndRetry(LOG_CRIT, m) return err } // Err logs a message with severity LOG_ERR; this overrides the default // priority passed to `srslog.New` and the `srslog.Dial*` functions. func (w *Writer) Err(m string) (err error) { _, err = w.writeAndRetry(LOG_ERR, m) return err } // Warning logs a message with severity LOG_WARNING; this overrides the default // priority passed to `srslog.New` and the `srslog.Dial*` functions. func (w *Writer) Warning(m string) (err error) { _, err = w.writeAndRetry(LOG_WARNING, m) return err } // Notice logs a message with severity LOG_NOTICE; this overrides the default // priority passed to `srslog.New` and the `srslog.Dial*` functions. func (w *Writer) Notice(m string) (err error) { _, err = w.writeAndRetry(LOG_NOTICE, m) return err } // Info logs a message with severity LOG_INFO; this overrides the default // priority passed to `srslog.New` and the `srslog.Dial*` functions. func (w *Writer) Info(m string) (err error) { _, err = w.writeAndRetry(LOG_INFO, m) return err } // Debug logs a message with severity LOG_DEBUG; this overrides the default // priority passed to `srslog.New` and the `srslog.Dial*` functions. func (w *Writer) Debug(m string) (err error) { _, err = w.writeAndRetry(LOG_DEBUG, m) return err } func (w *Writer) writeAndRetry(p Priority, s string) (int, error) { pr := (w.priority & facilityMask) | (p & severityMask) w.Lock() defer w.Unlock() if w.conn != nil { if n, err := w.write(pr, s); err == nil { return n, err } } if err := w.connect(); err != nil { return 0, err } return w.write(pr, s) } // write generates and writes a syslog formatted string. The // format is as follows: TIMESTAMP HOSTNAME TAG[PID]: MSG func (w *Writer) write(p Priority, msg string) (int, error) { // ensure it ends in a \n if !strings.HasSuffix(msg, "\n") { msg += "\n" } err := w.conn.writeString(p, w.hostname, w.tag, msg) if err != nil { return 0, err } // Note: return the length of the input, not the number of // bytes printed by Fprintf, because this must behave like // an io.Writer. return len(msg), nil } docker-1.10.3/vendor/src/github.com/Sirupsen/000077500000000000000000000000001267010174400210005ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/Sirupsen/logrus/000077500000000000000000000000001267010174400223135ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/Sirupsen/logrus/.gitignore000066400000000000000000000000071267010174400243000ustar00rootroot00000000000000logrus docker-1.10.3/vendor/src/github.com/Sirupsen/logrus/.travis.yml000066400000000000000000000001161267010174400244220ustar00rootroot00000000000000language: go go: - 1.2 - 1.3 - 1.4 - tip install: - go get -t ./... docker-1.10.3/vendor/src/github.com/Sirupsen/logrus/CHANGELOG.md000066400000000000000000000006141267010174400241250ustar00rootroot00000000000000# 0.8.2 logrus: fix more Fatal family functions # 0.8.1 logrus: fix not exiting on `Fatalf` and `Fatalln` # 0.8.0 logrus: defaults to stderr instead of stdout hooks/sentry: add special field for `*http.Request` formatter/text: ignore Windows for colors # 0.7.3 formatter/\*: allow configuration of timestamp layout # 0.7.2 formatter/text: Add configuration option for time format (#158) docker-1.10.3/vendor/src/github.com/Sirupsen/logrus/LICENSE000066400000000000000000000020721267010174400233210ustar00rootroot00000000000000The MIT License (MIT) Copyright (c) 2014 Simon Eskildsen Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. docker-1.10.3/vendor/src/github.com/Sirupsen/logrus/README.md000066400000000000000000000301671267010174400236010ustar00rootroot00000000000000# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc] Logrus is a structured logger for Go (golang), completely API compatible with the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not yet stable (pre 1.0). Logrus itself is completely stable and has been used in many large deployments. The core API is unlikely to change much but please version control your Logrus to make sure you aren't fetching latest `master` on every build.** Nicely color-coded in development (when a TTY is attached, otherwise just plain text): ![Colored](http://i.imgur.com/PY7qMwd.png) With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash or Splunk: ```json {"animal":"walrus","level":"info","msg":"A group of walrus emerges from the ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} {"level":"warning","msg":"The group's number increased tremendously!", "number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} {"animal":"walrus","level":"info","msg":"A giant walrus appears!", "size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} {"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", "size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} {"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, "time":"2014-03-10 19:57:38.562543128 -0400 EDT"} ``` With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not attached, the output is compatible with the [logfmt](http://godoc.org/github.com/kr/logfmt) format: ```text time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true exit status 1 ``` #### Example The simplest way to use Logrus is simply the package-level exported logger: ```go package main import ( log "github.com/Sirupsen/logrus" ) func main() { log.WithFields(log.Fields{ "animal": "walrus", }).Info("A walrus appears") } ``` Note that it's completely api-compatible with the stdlib logger, so you can replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` and you'll now have the flexibility of Logrus. You can customize it all you want: ```go package main import ( "os" log "github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus/hooks/airbrake" ) func init() { // Log as JSON instead of the default ASCII formatter. log.SetFormatter(&log.JSONFormatter{}) // Use the Airbrake hook to report errors that have Error severity or above to // an exception tracker. You can create custom hooks, see the Hooks section. log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) // Output to stderr instead of stdout, could also be a file. log.SetOutput(os.Stderr) // Only log the warning severity or above. log.SetLevel(log.WarnLevel) } func main() { log.WithFields(log.Fields{ "animal": "walrus", "size": 10, }).Info("A group of walrus emerges from the ocean") log.WithFields(log.Fields{ "omg": true, "number": 122, }).Warn("The group's number increased tremendously!") log.WithFields(log.Fields{ "omg": true, "number": 100, }).Fatal("The ice breaks!") // A common pattern is to re-use fields between logging statements by re-using // the logrus.Entry returned from WithFields() contextLogger := log.WithFields(log.Fields{ "common": "this is a common field", "other": "I also should be logged always", }) contextLogger.Info("I'll be logged with common and other field") contextLogger.Info("Me too") } ``` For more advanced usage such as logging to multiple locations from the same application, you can also create an instance of the `logrus` Logger: ```go package main import ( "github.com/Sirupsen/logrus" ) // Create a new instance of the logger. You can have any number of instances. var log = logrus.New() func main() { // The API for setting attributes is a little different than the package level // exported logger. See Godoc. log.Out = os.Stderr log.WithFields(logrus.Fields{ "animal": "walrus", "size": 10, }).Info("A group of walrus emerges from the ocean") } ``` #### Fields Logrus encourages careful, structured logging though logging fields instead of long, unparseable error messages. For example, instead of: `log.Fatalf("Failed to send event %s to topic %s with key %d")`, you should log the much more discoverable: ```go log.WithFields(log.Fields{ "event": event, "topic": topic, "key": key, }).Fatal("Failed to send event") ``` We've found this API forces you to think about logging in a way that produces much more useful logging messages. We've been in countless situations where just a single added field to a log statement that was already there would've saved us hours. The `WithFields` call is optional. In general, with Logrus using any of the `printf`-family functions should be seen as a hint you should add a field, however, you can still use the `printf`-family functions with Logrus. #### Hooks You can add hooks for logging levels. For example to send errors to an exception tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to multiple places simultaneously, e.g. syslog. Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in `init`: ```go import ( log "github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus/hooks/airbrake" "github.com/Sirupsen/logrus/hooks/syslog" "log/syslog" ) func init() { log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") if err != nil { log.Error("Unable to connect to local syslog daemon") } else { log.AddHook(hook) } } ``` | Hook | Description | | ----- | ----------- | | [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | | [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. | | [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | | [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | | [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | | [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | | [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | | [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | | [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) | | [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | #### Level logging Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. ```go log.Debug("Useful debugging information.") log.Info("Something noteworthy happened!") log.Warn("You should probably take a look at this.") log.Error("Something failed but I'm not quitting.") // Calls os.Exit(1) after logging log.Fatal("Bye.") // Calls panic() after logging log.Panic("I'm bailing.") ``` You can set the logging level on a `Logger`, then it will only log entries with that severity or anything above it: ```go // Will log anything that is info or above (warn, error, fatal, panic). Default. log.SetLevel(log.InfoLevel) ``` It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose environment if your application has that. #### Entries Besides the fields added with `WithField` or `WithFields` some fields are automatically added to all logging events: 1. `time`. The timestamp when the entry was created. 2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after the `AddFields` call. E.g. `Failed to send event.` 3. `level`. The logging level. E.g. `info`. #### Environments Logrus has no notion of environment. If you wish for hooks and formatters to only be used in specific environments, you should handle that yourself. For example, if your application has a global variable `Environment`, which is a string representation of the environment you could do: ```go import ( log "github.com/Sirupsen/logrus" ) init() { // do something here to set environment depending on an environment variable // or command-line flag if Environment == "production" { log.SetFormatter(logrus.JSONFormatter) } else { // The TextFormatter is default, you don't actually have to do this. log.SetFormatter(&log.TextFormatter{}) } } ``` This configuration is how `logrus` was intended to be used, but JSON in production is mostly only useful if you do log aggregation with tools like Splunk or Logstash. #### Formatters The built-in logging formatters are: * `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise without colors. * *Note:* to force colored output when there is no TTY, set the `ForceColors` field to `true`. To force no colored output even if there is a TTY set the `DisableColors` field to `true` * `logrus.JSONFormatter`. Logs fields as JSON. * `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net). ```go logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"}) ``` Third party logging formatters: * [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. You can define your formatter by implementing the `Formatter` interface, requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a `Fields` type (`map[string]interface{}`) with all your fields as well as the default ones (see Entries section above): ```go type MyJSONFormatter struct { } log.SetFormatter(new(MyJSONFormatter)) func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { // Note this doesn't include Time, Level and Message which are available on // the Entry. Consult `godoc` on information about those fields or read the // source of the official loggers. serialized, err := json.Marshal(entry.Data) if err != nil { return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) } return append(serialized, '\n'), nil } ``` #### Logger as an `io.Writer` Logrus can be transormed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. ```go w := logger.Writer() defer w.Close() srv := http.Server{ // create a stdlib log.Logger that writes to // logrus.Logger. ErrorLog: log.New(w, "", 0), } ``` Each line written to that writer will be printed the usual way, using formatters and hooks. The level for those entries is `info`. #### Rotation Log rotation is not provided with Logrus. Log rotation should be done by an external program (like `logrotate(8)`) that can compress and delete old log entries. It should not be a feature of the application-level logger. [godoc]: https://godoc.org/github.com/Sirupsen/logrus docker-1.10.3/vendor/src/github.com/Sirupsen/logrus/entry.go000066400000000000000000000140301267010174400240010ustar00rootroot00000000000000package logrus import ( "bytes" "fmt" "io" "os" "time" ) // An entry is the final or intermediate Logrus logging entry. It contains all // the fields passed with WithField{,s}. It's finally logged when Debug, Info, // Warn, Error, Fatal or Panic is called on it. These objects can be reused and // passed around as much as you wish to avoid field duplication. type Entry struct { Logger *Logger // Contains all the fields set by the user. Data Fields // Time at which the log entry was created Time time.Time // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic Level Level // Message passed to Debug, Info, Warn, Error, Fatal or Panic Message string } func NewEntry(logger *Logger) *Entry { return &Entry{ Logger: logger, // Default is three fields, give a little extra room Data: make(Fields, 5), } } // Returns a reader for the entry, which is a proxy to the formatter. func (entry *Entry) Reader() (*bytes.Buffer, error) { serialized, err := entry.Logger.Formatter.Format(entry) return bytes.NewBuffer(serialized), err } // Returns the string representation from the reader and ultimately the // formatter. func (entry *Entry) String() (string, error) { reader, err := entry.Reader() if err != nil { return "", err } return reader.String(), err } // Add a single field to the Entry. func (entry *Entry) WithField(key string, value interface{}) *Entry { return entry.WithFields(Fields{key: value}) } // Add a map of fields to the Entry. func (entry *Entry) WithFields(fields Fields) *Entry { data := Fields{} for k, v := range entry.Data { data[k] = v } for k, v := range fields { data[k] = v } return &Entry{Logger: entry.Logger, Data: data} } func (entry *Entry) log(level Level, msg string) { entry.Time = time.Now() entry.Level = level entry.Message = msg if err := entry.Logger.Hooks.Fire(level, entry); err != nil { entry.Logger.mu.Lock() fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) entry.Logger.mu.Unlock() } reader, err := entry.Reader() if err != nil { entry.Logger.mu.Lock() fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) entry.Logger.mu.Unlock() } entry.Logger.mu.Lock() defer entry.Logger.mu.Unlock() _, err = io.Copy(entry.Logger.Out, reader) if err != nil { fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) } // To avoid Entry#log() returning a value that only would make sense for // panic() to use in Entry#Panic(), we avoid the allocation by checking // directly here. if level <= PanicLevel { panic(entry) } } func (entry *Entry) Debug(args ...interface{}) { if entry.Logger.Level >= DebugLevel { entry.log(DebugLevel, fmt.Sprint(args...)) } } func (entry *Entry) Print(args ...interface{}) { entry.Info(args...) } func (entry *Entry) Info(args ...interface{}) { if entry.Logger.Level >= InfoLevel { entry.log(InfoLevel, fmt.Sprint(args...)) } } func (entry *Entry) Warn(args ...interface{}) { if entry.Logger.Level >= WarnLevel { entry.log(WarnLevel, fmt.Sprint(args...)) } } func (entry *Entry) Warning(args ...interface{}) { entry.Warn(args...) } func (entry *Entry) Error(args ...interface{}) { if entry.Logger.Level >= ErrorLevel { entry.log(ErrorLevel, fmt.Sprint(args...)) } } func (entry *Entry) Fatal(args ...interface{}) { if entry.Logger.Level >= FatalLevel { entry.log(FatalLevel, fmt.Sprint(args...)) } os.Exit(1) } func (entry *Entry) Panic(args ...interface{}) { if entry.Logger.Level >= PanicLevel { entry.log(PanicLevel, fmt.Sprint(args...)) } panic(fmt.Sprint(args...)) } // Entry Printf family functions func (entry *Entry) Debugf(format string, args ...interface{}) { if entry.Logger.Level >= DebugLevel { entry.Debug(fmt.Sprintf(format, args...)) } } func (entry *Entry) Infof(format string, args ...interface{}) { if entry.Logger.Level >= InfoLevel { entry.Info(fmt.Sprintf(format, args...)) } } func (entry *Entry) Printf(format string, args ...interface{}) { entry.Infof(format, args...) } func (entry *Entry) Warnf(format string, args ...interface{}) { if entry.Logger.Level >= WarnLevel { entry.Warn(fmt.Sprintf(format, args...)) } } func (entry *Entry) Warningf(format string, args ...interface{}) { entry.Warnf(format, args...) } func (entry *Entry) Errorf(format string, args ...interface{}) { if entry.Logger.Level >= ErrorLevel { entry.Error(fmt.Sprintf(format, args...)) } } func (entry *Entry) Fatalf(format string, args ...interface{}) { if entry.Logger.Level >= FatalLevel { entry.Fatal(fmt.Sprintf(format, args...)) } os.Exit(1) } func (entry *Entry) Panicf(format string, args ...interface{}) { if entry.Logger.Level >= PanicLevel { entry.Panic(fmt.Sprintf(format, args...)) } } // Entry Println family functions func (entry *Entry) Debugln(args ...interface{}) { if entry.Logger.Level >= DebugLevel { entry.Debug(entry.sprintlnn(args...)) } } func (entry *Entry) Infoln(args ...interface{}) { if entry.Logger.Level >= InfoLevel { entry.Info(entry.sprintlnn(args...)) } } func (entry *Entry) Println(args ...interface{}) { entry.Infoln(args...) } func (entry *Entry) Warnln(args ...interface{}) { if entry.Logger.Level >= WarnLevel { entry.Warn(entry.sprintlnn(args...)) } } func (entry *Entry) Warningln(args ...interface{}) { entry.Warnln(args...) } func (entry *Entry) Errorln(args ...interface{}) { if entry.Logger.Level >= ErrorLevel { entry.Error(entry.sprintlnn(args...)) } } func (entry *Entry) Fatalln(args ...interface{}) { if entry.Logger.Level >= FatalLevel { entry.Fatal(entry.sprintlnn(args...)) } os.Exit(1) } func (entry *Entry) Panicln(args ...interface{}) { if entry.Logger.Level >= PanicLevel { entry.Panic(entry.sprintlnn(args...)) } } // Sprintlnn => Sprint no newline. This is to get the behavior of how // fmt.Sprintln where spaces are always added between operands, regardless of // their type. Instead of vendoring the Sprintln implementation to spare a // string allocation, we do the simplest thing. func (entry *Entry) sprintlnn(args ...interface{}) string { msg := fmt.Sprintln(args...) return msg[:len(msg)-1] } docker-1.10.3/vendor/src/github.com/Sirupsen/logrus/exported.go000066400000000000000000000111051267010174400244720ustar00rootroot00000000000000package logrus import ( "io" ) var ( // std is the name of the standard logger in stdlib `log` std = New() ) func StandardLogger() *Logger { return std } // SetOutput sets the standard logger output. func SetOutput(out io.Writer) { std.mu.Lock() defer std.mu.Unlock() std.Out = out } // SetFormatter sets the standard logger formatter. func SetFormatter(formatter Formatter) { std.mu.Lock() defer std.mu.Unlock() std.Formatter = formatter } // SetLevel sets the standard logger level. func SetLevel(level Level) { std.mu.Lock() defer std.mu.Unlock() std.Level = level } // GetLevel returns the standard logger level. func GetLevel() Level { std.mu.Lock() defer std.mu.Unlock() return std.Level } // AddHook adds a hook to the standard logger hooks. func AddHook(hook Hook) { std.mu.Lock() defer std.mu.Unlock() std.Hooks.Add(hook) } // WithField creates an entry from the standard logger and adds a field to // it. If you want multiple fields, use `WithFields`. // // Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal // or Panic on the Entry it returns. func WithField(key string, value interface{}) *Entry { return std.WithField(key, value) } // WithFields creates an entry from the standard logger and adds multiple // fields to it. This is simply a helper for `WithField`, invoking it // once for each field. // // Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal // or Panic on the Entry it returns. func WithFields(fields Fields) *Entry { return std.WithFields(fields) } // Debug logs a message at level Debug on the standard logger. func Debug(args ...interface{}) { std.Debug(args...) } // Print logs a message at level Info on the standard logger. func Print(args ...interface{}) { std.Print(args...) } // Info logs a message at level Info on the standard logger. func Info(args ...interface{}) { std.Info(args...) } // Warn logs a message at level Warn on the standard logger. func Warn(args ...interface{}) { std.Warn(args...) } // Warning logs a message at level Warn on the standard logger. func Warning(args ...interface{}) { std.Warning(args...) } // Error logs a message at level Error on the standard logger. func Error(args ...interface{}) { std.Error(args...) } // Panic logs a message at level Panic on the standard logger. func Panic(args ...interface{}) { std.Panic(args...) } // Fatal logs a message at level Fatal on the standard logger. func Fatal(args ...interface{}) { std.Fatal(args...) } // Debugf logs a message at level Debug on the standard logger. func Debugf(format string, args ...interface{}) { std.Debugf(format, args...) } // Printf logs a message at level Info on the standard logger. func Printf(format string, args ...interface{}) { std.Printf(format, args...) } // Infof logs a message at level Info on the standard logger. func Infof(format string, args ...interface{}) { std.Infof(format, args...) } // Warnf logs a message at level Warn on the standard logger. func Warnf(format string, args ...interface{}) { std.Warnf(format, args...) } // Warningf logs a message at level Warn on the standard logger. func Warningf(format string, args ...interface{}) { std.Warningf(format, args...) } // Errorf logs a message at level Error on the standard logger. func Errorf(format string, args ...interface{}) { std.Errorf(format, args...) } // Panicf logs a message at level Panic on the standard logger. func Panicf(format string, args ...interface{}) { std.Panicf(format, args...) } // Fatalf logs a message at level Fatal on the standard logger. func Fatalf(format string, args ...interface{}) { std.Fatalf(format, args...) } // Debugln logs a message at level Debug on the standard logger. func Debugln(args ...interface{}) { std.Debugln(args...) } // Println logs a message at level Info on the standard logger. func Println(args ...interface{}) { std.Println(args...) } // Infoln logs a message at level Info on the standard logger. func Infoln(args ...interface{}) { std.Infoln(args...) } // Warnln logs a message at level Warn on the standard logger. func Warnln(args ...interface{}) { std.Warnln(args...) } // Warningln logs a message at level Warn on the standard logger. func Warningln(args ...interface{}) { std.Warningln(args...) } // Errorln logs a message at level Error on the standard logger. func Errorln(args ...interface{}) { std.Errorln(args...) } // Panicln logs a message at level Panic on the standard logger. func Panicln(args ...interface{}) { std.Panicln(args...) } // Fatalln logs a message at level Fatal on the standard logger. func Fatalln(args ...interface{}) { std.Fatalln(args...) } docker-1.10.3/vendor/src/github.com/Sirupsen/logrus/formatter.go000066400000000000000000000025671267010174400246570ustar00rootroot00000000000000package logrus import "time" const DefaultTimestampFormat = time.RFC3339 // The Formatter interface is used to implement a custom Formatter. It takes an // `Entry`. It exposes all the fields, including the default ones: // // * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. // * `entry.Data["time"]`. The timestamp. // * `entry.Data["level"]. The level the entry was logged at. // // Any additional fields added with `WithField` or `WithFields` are also in // `entry.Data`. Format is expected to return an array of bytes which are then // logged to `logger.Out`. type Formatter interface { Format(*Entry) ([]byte, error) } // This is to not silently overwrite `time`, `msg` and `level` fields when // dumping it. If this code wasn't there doing: // // logrus.WithField("level", 1).Info("hello") // // Would just silently drop the user provided level. Instead with this code // it'll logged as: // // {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} // // It's not exported because it's still using Data in an opinionated way. It's to // avoid code duplication between the two default formatters. func prefixFieldClashes(data Fields) { _, ok := data["time"] if ok { data["fields.time"] = data["time"] } _, ok = data["msg"] if ok { data["fields.msg"] = data["msg"] } _, ok = data["level"] if ok { data["fields.level"] = data["level"] } } docker-1.10.3/vendor/src/github.com/Sirupsen/logrus/hooks.go000066400000000000000000000021151267010174400237640ustar00rootroot00000000000000package logrus // A hook to be fired when logging on the logging levels returned from // `Levels()` on your implementation of the interface. Note that this is not // fired in a goroutine or a channel with workers, you should handle such // functionality yourself if your call is non-blocking and you don't wish for // the logging calls for levels returned from `Levels()` to block. type Hook interface { Levels() []Level Fire(*Entry) error } // Internal type for storing the hooks on a logger instance. type levelHooks map[Level][]Hook // Add a hook to an instance of logger. This is called with // `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. func (hooks levelHooks) Add(hook Hook) { for _, level := range hook.Levels() { hooks[level] = append(hooks[level], hook) } } // Fire all the hooks for the passed level. Used by `entry.log` to fire // appropriate hooks for a log entry. func (hooks levelHooks) Fire(level Level, entry *Entry) error { for _, hook := range hooks[level] { if err := hook.Fire(entry); err != nil { return err } } return nil } docker-1.10.3/vendor/src/github.com/Sirupsen/logrus/json_formatter.go000066400000000000000000000016541267010174400257040ustar00rootroot00000000000000package logrus import ( "encoding/json" "fmt" ) type JSONFormatter struct { // TimestampFormat sets the format used for marshaling timestamps. TimestampFormat string } func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { data := make(Fields, len(entry.Data)+3) for k, v := range entry.Data { switch v := v.(type) { case error: // Otherwise errors are ignored by `encoding/json` // https://github.com/Sirupsen/logrus/issues/137 data[k] = v.Error() default: data[k] = v } } prefixFieldClashes(data) timestampFormat := f.TimestampFormat if timestampFormat == "" { timestampFormat = DefaultTimestampFormat } data["time"] = entry.Time.Format(timestampFormat) data["msg"] = entry.Message data["level"] = entry.Level.String() serialized, err := json.Marshal(data) if err != nil { return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) } return append(serialized, '\n'), nil } docker-1.10.3/vendor/src/github.com/Sirupsen/logrus/logger.go000066400000000000000000000125021267010174400241210ustar00rootroot00000000000000package logrus import ( "io" "os" "sync" ) type Logger struct { // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a // file, or leave it default which is `os.Stdout`. You can also set this to // something more adventorous, such as logging to Kafka. Out io.Writer // Hooks for the logger instance. These allow firing events based on logging // levels and log entries. For example, to send errors to an error tracking // service, log to StatsD or dump the core on fatal errors. Hooks levelHooks // All log entries pass through the formatter before logged to Out. The // included formatters are `TextFormatter` and `JSONFormatter` for which // TextFormatter is the default. In development (when a TTY is attached) it // logs with colors, but to a file it wouldn't. You can easily implement your // own that implements the `Formatter` interface, see the `README` or included // formatters for examples. Formatter Formatter // The logging level the logger should log at. This is typically (and defaults // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be // logged. `logrus.Debug` is useful in Level Level // Used to sync writing to the log. mu sync.Mutex } // Creates a new logger. Configuration should be set by changing `Formatter`, // `Out` and `Hooks` directly on the default logger instance. You can also just // instantiate your own: // // var log = &Logger{ // Out: os.Stderr, // Formatter: new(JSONFormatter), // Hooks: make(levelHooks), // Level: logrus.DebugLevel, // } // // It's recommended to make this a global instance called `log`. func New() *Logger { return &Logger{ Out: os.Stderr, Formatter: new(TextFormatter), Hooks: make(levelHooks), Level: InfoLevel, } } // Adds a field to the log entry, note that you it doesn't log until you call // Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. // Ff you want multiple fields, use `WithFields`. func (logger *Logger) WithField(key string, value interface{}) *Entry { return NewEntry(logger).WithField(key, value) } // Adds a struct of fields to the log entry. All it does is call `WithField` for // each `Field`. func (logger *Logger) WithFields(fields Fields) *Entry { return NewEntry(logger).WithFields(fields) } func (logger *Logger) Debugf(format string, args ...interface{}) { if logger.Level >= DebugLevel { NewEntry(logger).Debugf(format, args...) } } func (logger *Logger) Infof(format string, args ...interface{}) { if logger.Level >= InfoLevel { NewEntry(logger).Infof(format, args...) } } func (logger *Logger) Printf(format string, args ...interface{}) { NewEntry(logger).Printf(format, args...) } func (logger *Logger) Warnf(format string, args ...interface{}) { if logger.Level >= WarnLevel { NewEntry(logger).Warnf(format, args...) } } func (logger *Logger) Warningf(format string, args ...interface{}) { if logger.Level >= WarnLevel { NewEntry(logger).Warnf(format, args...) } } func (logger *Logger) Errorf(format string, args ...interface{}) { if logger.Level >= ErrorLevel { NewEntry(logger).Errorf(format, args...) } } func (logger *Logger) Fatalf(format string, args ...interface{}) { if logger.Level >= FatalLevel { NewEntry(logger).Fatalf(format, args...) } os.Exit(1) } func (logger *Logger) Panicf(format string, args ...interface{}) { if logger.Level >= PanicLevel { NewEntry(logger).Panicf(format, args...) } } func (logger *Logger) Debug(args ...interface{}) { if logger.Level >= DebugLevel { NewEntry(logger).Debug(args...) } } func (logger *Logger) Info(args ...interface{}) { if logger.Level >= InfoLevel { NewEntry(logger).Info(args...) } } func (logger *Logger) Print(args ...interface{}) { NewEntry(logger).Info(args...) } func (logger *Logger) Warn(args ...interface{}) { if logger.Level >= WarnLevel { NewEntry(logger).Warn(args...) } } func (logger *Logger) Warning(args ...interface{}) { if logger.Level >= WarnLevel { NewEntry(logger).Warn(args...) } } func (logger *Logger) Error(args ...interface{}) { if logger.Level >= ErrorLevel { NewEntry(logger).Error(args...) } } func (logger *Logger) Fatal(args ...interface{}) { if logger.Level >= FatalLevel { NewEntry(logger).Fatal(args...) } os.Exit(1) } func (logger *Logger) Panic(args ...interface{}) { if logger.Level >= PanicLevel { NewEntry(logger).Panic(args...) } } func (logger *Logger) Debugln(args ...interface{}) { if logger.Level >= DebugLevel { NewEntry(logger).Debugln(args...) } } func (logger *Logger) Infoln(args ...interface{}) { if logger.Level >= InfoLevel { NewEntry(logger).Infoln(args...) } } func (logger *Logger) Println(args ...interface{}) { NewEntry(logger).Println(args...) } func (logger *Logger) Warnln(args ...interface{}) { if logger.Level >= WarnLevel { NewEntry(logger).Warnln(args...) } } func (logger *Logger) Warningln(args ...interface{}) { if logger.Level >= WarnLevel { NewEntry(logger).Warnln(args...) } } func (logger *Logger) Errorln(args ...interface{}) { if logger.Level >= ErrorLevel { NewEntry(logger).Errorln(args...) } } func (logger *Logger) Fatalln(args ...interface{}) { if logger.Level >= FatalLevel { NewEntry(logger).Fatalln(args...) } os.Exit(1) } func (logger *Logger) Panicln(args ...interface{}) { if logger.Level >= PanicLevel { NewEntry(logger).Panicln(args...) } } docker-1.10.3/vendor/src/github.com/Sirupsen/logrus/logrus.go000066400000000000000000000045471267010174400241670ustar00rootroot00000000000000package logrus import ( "fmt" "log" ) // Fields type, used to pass to `WithFields`. type Fields map[string]interface{} // Level type type Level uint8 // Convert the Level to a string. E.g. PanicLevel becomes "panic". func (level Level) String() string { switch level { case DebugLevel: return "debug" case InfoLevel: return "info" case WarnLevel: return "warning" case ErrorLevel: return "error" case FatalLevel: return "fatal" case PanicLevel: return "panic" } return "unknown" } // ParseLevel takes a string level and returns the Logrus log level constant. func ParseLevel(lvl string) (Level, error) { switch lvl { case "panic": return PanicLevel, nil case "fatal": return FatalLevel, nil case "error": return ErrorLevel, nil case "warn", "warning": return WarnLevel, nil case "info": return InfoLevel, nil case "debug": return DebugLevel, nil } var l Level return l, fmt.Errorf("not a valid logrus Level: %q", lvl) } // These are the different logging levels. You can set the logging level to log // on your instance of logger, obtained with `logrus.New()`. const ( // PanicLevel level, highest level of severity. Logs and then calls panic with the // message passed to Debug, Info, ... PanicLevel Level = iota // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the // logging level is set to Panic. FatalLevel // ErrorLevel level. Logs. Used for errors that should definitely be noted. // Commonly used for hooks to send errors to an error tracking service. ErrorLevel // WarnLevel level. Non-critical entries that deserve eyes. WarnLevel // InfoLevel level. General operational entries about what's going on inside the // application. InfoLevel // DebugLevel level. Usually only enabled when debugging. Very verbose logging. DebugLevel ) // Won't compile if StdLogger can't be realized by a log.Logger var _ StdLogger = &log.Logger{} // StdLogger is what your logrus-enabled library should take, that way // it'll accept a stdlib logger and a logrus logger. There's no standard // interface, this is the closest we get, unfortunately. type StdLogger interface { Print(...interface{}) Printf(string, ...interface{}) Println(...interface{}) Fatal(...interface{}) Fatalf(string, ...interface{}) Fatalln(...interface{}) Panic(...interface{}) Panicf(string, ...interface{}) Panicln(...interface{}) } docker-1.10.3/vendor/src/github.com/Sirupsen/logrus/terminal_darwin.go000066400000000000000000000004441267010174400260230ustar00rootroot00000000000000// Based on ssh/terminal: // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package logrus import "syscall" const ioctlReadTermios = syscall.TIOCGETA type Termios syscall.Termios docker-1.10.3/vendor/src/github.com/Sirupsen/logrus/terminal_freebsd.go000066400000000000000000000005251267010174400261510ustar00rootroot00000000000000/* Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin. */ package logrus import ( "syscall" ) const ioctlReadTermios = syscall.TIOCGETA type Termios struct { Iflag uint32 Oflag uint32 Cflag uint32 Lflag uint32 Cc [20]uint8 Ispeed uint32 Ospeed uint32 } docker-1.10.3/vendor/src/github.com/Sirupsen/logrus/terminal_linux.go000066400000000000000000000004421267010174400256740ustar00rootroot00000000000000// Based on ssh/terminal: // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package logrus import "syscall" const ioctlReadTermios = syscall.TCGETS type Termios syscall.Termios docker-1.10.3/vendor/src/github.com/Sirupsen/logrus/terminal_notwindows.go000066400000000000000000000010561267010174400267520ustar00rootroot00000000000000// Based on ssh/terminal: // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build linux darwin freebsd openbsd package logrus import ( "syscall" "unsafe" ) // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal() bool { fd := syscall.Stdout var termios Termios _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) return err == 0 } docker-1.10.3/vendor/src/github.com/Sirupsen/logrus/terminal_openbsd.go000066400000000000000000000001521267010174400261650ustar00rootroot00000000000000package logrus import "syscall" const ioctlReadTermios = syscall.TIOCGETA type Termios syscall.Termios docker-1.10.3/vendor/src/github.com/Sirupsen/logrus/terminal_windows.go000066400000000000000000000011731267010174400262310ustar00rootroot00000000000000// Based on ssh/terminal: // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build windows package logrus import ( "syscall" "unsafe" ) var kernel32 = syscall.NewLazyDLL("kernel32.dll") var ( procGetConsoleMode = kernel32.NewProc("GetConsoleMode") ) // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal() bool { fd := syscall.Stdout var st uint32 r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) return r != 0 && e == 0 } docker-1.10.3/vendor/src/github.com/Sirupsen/logrus/text_formatter.go000066400000000000000000000065331267010174400257200ustar00rootroot00000000000000package logrus import ( "bytes" "fmt" "runtime" "sort" "strings" "time" ) const ( nocolor = 0 red = 31 green = 32 yellow = 33 blue = 34 gray = 37 ) var ( baseTimestamp time.Time isTerminal bool ) func init() { baseTimestamp = time.Now() isTerminal = IsTerminal() } func miniTS() int { return int(time.Since(baseTimestamp) / time.Second) } type TextFormatter struct { // Set to true to bypass checking for a TTY before outputting colors. ForceColors bool // Force disabling colors. DisableColors bool // Disable timestamp logging. useful when output is redirected to logging // system that already adds timestamps. DisableTimestamp bool // Enable logging the full timestamp when a TTY is attached instead of just // the time passed since beginning of execution. FullTimestamp bool // TimestampFormat to use for display when a full timestamp is printed TimestampFormat string // The fields are sorted by default for a consistent output. For applications // that log extremely frequently and don't use the JSON formatter this may not // be desired. DisableSorting bool } func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { var keys []string = make([]string, 0, len(entry.Data)) for k := range entry.Data { keys = append(keys, k) } if !f.DisableSorting { sort.Strings(keys) } b := &bytes.Buffer{} prefixFieldClashes(entry.Data) isColorTerminal := isTerminal && (runtime.GOOS != "windows") isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors if f.TimestampFormat == "" { f.TimestampFormat = DefaultTimestampFormat } if isColored { f.printColored(b, entry, keys) } else { if !f.DisableTimestamp { f.appendKeyValue(b, "time", entry.Time.Format(f.TimestampFormat)) } f.appendKeyValue(b, "level", entry.Level.String()) f.appendKeyValue(b, "msg", entry.Message) for _, key := range keys { f.appendKeyValue(b, key, entry.Data[key]) } } b.WriteByte('\n') return b.Bytes(), nil } func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string) { var levelColor int switch entry.Level { case DebugLevel: levelColor = gray case WarnLevel: levelColor = yellow case ErrorLevel, FatalLevel, PanicLevel: levelColor = red default: levelColor = blue } levelText := strings.ToUpper(entry.Level.String())[0:4] if !f.FullTimestamp { fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) } else { fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(f.TimestampFormat), entry.Message) } for _, k := range keys { v := entry.Data[k] fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v) } } func needsQuoting(text string) bool { for _, ch := range text { if !((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || (ch >= '0' && ch <= '9') || ch == '-' || ch == '.') { return false } } return true } func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key, value interface{}) { switch value.(type) { case string: if needsQuoting(value.(string)) { fmt.Fprintf(b, "%v=%s ", key, value) } else { fmt.Fprintf(b, "%v=%q ", key, value) } case error: if needsQuoting(value.(error).Error()) { fmt.Fprintf(b, "%v=%s ", key, value) } else { fmt.Fprintf(b, "%v=%q ", key, value) } default: fmt.Fprintf(b, "%v=%v ", key, value) } } docker-1.10.3/vendor/src/github.com/Sirupsen/logrus/writer.go000066400000000000000000000010721267010174400241560ustar00rootroot00000000000000package logrus import ( "bufio" "io" "runtime" ) func (logger *Logger) Writer() *io.PipeWriter { reader, writer := io.Pipe() go logger.writerScanner(reader) runtime.SetFinalizer(writer, writerFinalizer) return writer } func (logger *Logger) writerScanner(reader *io.PipeReader) { scanner := bufio.NewScanner(reader) for scanner.Scan() { logger.Print(scanner.Text()) } if err := scanner.Err(); err != nil { logger.Errorf("Error while reading from Writer: %s", err) } reader.Close() } func writerFinalizer(writer *io.PipeWriter) { writer.Close() } docker-1.10.3/vendor/src/github.com/agl/000077500000000000000000000000001267010174400177335ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/agl/ed25519/000077500000000000000000000000001267010174400207315ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/agl/ed25519/LICENSE000066400000000000000000000027071267010174400217440ustar00rootroot00000000000000Copyright (c) 2012 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. docker-1.10.3/vendor/src/github.com/agl/ed25519/ed25519.go000066400000000000000000000060221267010174400222560ustar00rootroot00000000000000// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package ed25519 implements the Ed25519 signature algorithm. See // http://ed25519.cr.yp.to/. package ed25519 // This code is a port of the public domain, "ref10" implementation of ed25519 // from SUPERCOP. import ( "crypto/sha512" "crypto/subtle" "io" "github.com/agl/ed25519/edwards25519" ) const ( PublicKeySize = 32 PrivateKeySize = 64 SignatureSize = 64 ) // GenerateKey generates a public/private key pair using randomness from rand. func GenerateKey(rand io.Reader) (publicKey *[PublicKeySize]byte, privateKey *[PrivateKeySize]byte, err error) { privateKey = new([64]byte) publicKey = new([32]byte) _, err = io.ReadFull(rand, privateKey[:32]) if err != nil { return nil, nil, err } h := sha512.New() h.Write(privateKey[:32]) digest := h.Sum(nil) digest[0] &= 248 digest[31] &= 127 digest[31] |= 64 var A edwards25519.ExtendedGroupElement var hBytes [32]byte copy(hBytes[:], digest) edwards25519.GeScalarMultBase(&A, &hBytes) A.ToBytes(publicKey) copy(privateKey[32:], publicKey[:]) return } // Sign signs the message with privateKey and returns a signature. func Sign(privateKey *[PrivateKeySize]byte, message []byte) *[SignatureSize]byte { h := sha512.New() h.Write(privateKey[:32]) var digest1, messageDigest, hramDigest [64]byte var expandedSecretKey [32]byte h.Sum(digest1[:0]) copy(expandedSecretKey[:], digest1[:]) expandedSecretKey[0] &= 248 expandedSecretKey[31] &= 63 expandedSecretKey[31] |= 64 h.Reset() h.Write(digest1[32:]) h.Write(message) h.Sum(messageDigest[:0]) var messageDigestReduced [32]byte edwards25519.ScReduce(&messageDigestReduced, &messageDigest) var R edwards25519.ExtendedGroupElement edwards25519.GeScalarMultBase(&R, &messageDigestReduced) var encodedR [32]byte R.ToBytes(&encodedR) h.Reset() h.Write(encodedR[:]) h.Write(privateKey[32:]) h.Write(message) h.Sum(hramDigest[:0]) var hramDigestReduced [32]byte edwards25519.ScReduce(&hramDigestReduced, &hramDigest) var s [32]byte edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced) signature := new([64]byte) copy(signature[:], encodedR[:]) copy(signature[32:], s[:]) return signature } // Verify returns true iff sig is a valid signature of message by publicKey. func Verify(publicKey *[PublicKeySize]byte, message []byte, sig *[SignatureSize]byte) bool { if sig[63]&224 != 0 { return false } var A edwards25519.ExtendedGroupElement if !A.FromBytes(publicKey) { return false } h := sha512.New() h.Write(sig[:32]) h.Write(publicKey[:]) h.Write(message) var digest [64]byte h.Sum(digest[:0]) var hReduced [32]byte edwards25519.ScReduce(&hReduced, &digest) var R edwards25519.ProjectiveGroupElement var b [32]byte copy(b[:], sig[32:]) edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &b) var checkR [32]byte R.ToBytes(&checkR) return subtle.ConstantTimeCompare(sig[:32], checkR[:]) == 1 } docker-1.10.3/vendor/src/github.com/agl/ed25519/edwards25519/000077500000000000000000000000001267010174400227705ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/agl/ed25519/edwards25519/const.go000066400000000000000000002733131267010174400244560ustar00rootroot00000000000000// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package edwards25519 var d = FieldElement{ -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116, } var d2 = FieldElement{ -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199, } var SqrtM1 = FieldElement{ -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482, } var A = FieldElement{ 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0, } var bi = [8]PreComputedGroupElement{ { FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, }, { FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, }, { FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, }, { FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, }, { FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877}, FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951}, FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784}, }, { FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436}, FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918}, FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877}, }, { FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800}, FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305}, FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300}, }, { FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876}, FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619}, FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683}, }, } var base = [32][8]PreComputedGroupElement{ { { FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, }, { FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303}, FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081}, FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697}, }, { FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, }, { FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540}, FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397}, FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325}, }, { FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, }, { FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777}, FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737}, FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652}, }, { FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, }, { FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726}, FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955}, FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425}, }, }, { { FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171}, FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510}, FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660}, }, { FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639}, FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963}, FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950}, }, { FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568}, FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335}, FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628}, }, { FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007}, FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772}, FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653}, }, { FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567}, FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686}, FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372}, }, { FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887}, FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954}, FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953}, }, { FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833}, FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532}, FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876}, }, { FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268}, FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214}, FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038}, }, }, { { FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800}, FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645}, FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664}, }, { FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933}, FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182}, FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222}, }, { FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991}, FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880}, FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092}, }, { FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295}, FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788}, FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553}, }, { FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026}, FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347}, FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033}, }, { FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395}, FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278}, FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890}, }, { FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995}, FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596}, FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891}, }, { FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060}, FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608}, FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606}, }, }, { { FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389}, FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016}, FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341}, }, { FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505}, FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553}, FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655}, }, { FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220}, FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631}, FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099}, }, { FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556}, FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749}, FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930}, }, { FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391}, FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253}, FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066}, }, { FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958}, FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082}, FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383}, }, { FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521}, FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807}, FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948}, }, { FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134}, FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455}, FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629}, }, }, { { FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069}, FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746}, FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919}, }, { FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837}, FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906}, FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771}, }, { FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817}, FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098}, FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409}, }, { FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504}, FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727}, FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420}, }, { FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003}, FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605}, FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384}, }, { FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701}, FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683}, FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708}, }, { FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563}, FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260}, FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387}, }, { FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672}, FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686}, FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665}, }, }, { { FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182}, FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277}, FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628}, }, { FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474}, FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539}, FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822}, }, { FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970}, FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756}, FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508}, }, { FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683}, FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655}, FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158}, }, { FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125}, FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839}, FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664}, }, { FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294}, FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899}, FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070}, }, { FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294}, FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949}, FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083}, }, { FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420}, FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940}, FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396}, }, }, { { FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567}, FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127}, FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294}, }, { FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887}, FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964}, FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195}, }, { FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244}, FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999}, FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762}, }, { FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274}, FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236}, FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605}, }, { FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761}, FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884}, FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482}, }, { FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638}, FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490}, FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170}, }, { FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736}, FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124}, FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392}, }, { FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029}, FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048}, FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958}, }, }, { { FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593}, FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071}, FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692}, }, { FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687}, FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441}, FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001}, }, { FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460}, FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007}, FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762}, }, { FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005}, FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674}, FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035}, }, { FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590}, FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957}, FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812}, }, { FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740}, FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122}, FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158}, }, { FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885}, FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140}, FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857}, }, { FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155}, FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260}, FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483}, }, }, { { FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677}, FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815}, FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751}, }, { FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203}, FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208}, FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230}, }, { FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850}, FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389}, FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968}, }, { FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689}, FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880}, FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304}, }, { FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632}, FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412}, FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566}, }, { FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038}, FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232}, FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943}, }, { FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856}, FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738}, FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971}, }, { FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718}, FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697}, FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883}, }, }, { { FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912}, FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358}, FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849}, }, { FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307}, FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977}, FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335}, }, { FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644}, FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616}, FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735}, }, { FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099}, FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341}, FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336}, }, { FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646}, FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425}, FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388}, }, { FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743}, FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822}, FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462}, }, { FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985}, FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702}, FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797}, }, { FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293}, FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100}, FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688}, }, }, { { FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186}, FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610}, FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707}, }, { FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220}, FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025}, FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044}, }, { FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992}, FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027}, FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197}, }, { FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901}, FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952}, FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878}, }, { FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390}, FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730}, FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730}, }, { FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180}, FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272}, FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715}, }, { FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970}, FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772}, FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865}, }, { FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750}, FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373}, FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348}, }, }, { { FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144}, FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195}, FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086}, }, { FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684}, FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518}, FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233}, }, { FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793}, FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794}, FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435}, }, { FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921}, FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518}, FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563}, }, { FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278}, FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024}, FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030}, }, { FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783}, FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717}, FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844}, }, { FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333}, FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048}, FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760}, }, { FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760}, FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757}, FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112}, }, }, { { FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468}, FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184}, FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289}, }, { FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066}, FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882}, FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226}, }, { FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101}, FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279}, FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811}, }, { FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709}, FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714}, FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121}, }, { FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464}, FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847}, FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400}, }, { FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414}, FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158}, FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045}, }, { FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415}, FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459}, FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079}, }, { FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412}, FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743}, FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836}, }, }, { { FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022}, FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429}, FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065}, }, { FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861}, FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000}, FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101}, }, { FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815}, FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642}, FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966}, }, { FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574}, FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742}, FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689}, }, { FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020}, FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772}, FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982}, }, { FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953}, FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218}, FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265}, }, { FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073}, FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325}, FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798}, }, { FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870}, FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863}, FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927}, }, }, { { FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267}, FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663}, FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862}, }, { FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673}, FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943}, FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020}, }, { FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238}, FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064}, FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795}, }, { FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052}, FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904}, FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531}, }, { FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979}, FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841}, FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431}, }, { FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324}, FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940}, FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320}, }, { FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184}, FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114}, FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878}, }, { FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784}, FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091}, FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585}, }, }, { { FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208}, FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864}, FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661}, }, { FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233}, FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212}, FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525}, }, { FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068}, FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397}, FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988}, }, { FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889}, FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038}, FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697}, }, { FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875}, FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905}, FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656}, }, { FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818}, FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714}, FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203}, }, { FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931}, FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024}, FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084}, }, { FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204}, FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817}, FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667}, }, }, { { FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504}, FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768}, FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255}, }, { FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790}, FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438}, FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333}, }, { FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971}, FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905}, FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409}, }, { FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409}, FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499}, FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363}, }, { FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664}, FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324}, FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940}, }, { FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990}, FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914}, FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290}, }, { FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257}, FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433}, FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236}, }, { FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045}, FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093}, FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347}, }, }, { { FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191}, FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507}, FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906}, }, { FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018}, FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109}, FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926}, }, { FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528}, FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625}, FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286}, }, { FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033}, FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866}, FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896}, }, { FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075}, FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347}, FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437}, }, { FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165}, FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588}, FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193}, }, { FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017}, FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883}, FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961}, }, { FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043}, FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663}, FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362}, }, }, { { FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860}, FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466}, FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063}, }, { FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997}, FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295}, FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369}, }, { FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385}, FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109}, FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906}, }, { FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424}, FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185}, FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962}, }, { FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325}, FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593}, FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404}, }, { FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644}, FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801}, FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804}, }, { FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884}, FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577}, FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849}, }, { FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473}, FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644}, FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319}, }, }, { { FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599}, FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768}, FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084}, }, { FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328}, FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369}, FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920}, }, { FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815}, FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025}, FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397}, }, { FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448}, FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981}, FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165}, }, { FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501}, FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073}, FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861}, }, { FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845}, FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211}, FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870}, }, { FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096}, FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803}, FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168}, }, { FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965}, FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505}, FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598}, }, }, { { FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782}, FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900}, FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479}, }, { FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208}, FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232}, FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719}, }, { FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271}, FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326}, FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132}, }, { FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300}, FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570}, FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670}, }, { FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994}, FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913}, FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317}, }, { FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730}, FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096}, FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078}, }, { FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411}, FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905}, FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654}, }, { FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870}, FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498}, FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579}, }, }, { { FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677}, FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647}, FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743}, }, { FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468}, FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375}, FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155}, }, { FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725}, FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612}, FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943}, }, { FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944}, FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928}, FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406}, }, { FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139}, FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963}, FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693}, }, { FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734}, FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680}, FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410}, }, { FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931}, FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654}, FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710}, }, { FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180}, FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684}, FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895}, }, }, { { FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501}, FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413}, FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880}, }, { FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874}, FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962}, FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899}, }, { FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152}, FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063}, FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080}, }, { FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146}, FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183}, FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133}, }, { FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421}, FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622}, FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197}, }, { FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663}, FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753}, FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755}, }, { FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862}, FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118}, FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171}, }, { FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380}, FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824}, FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270}, }, }, { { FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438}, FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584}, FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562}, }, { FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471}, FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610}, FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269}, }, { FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650}, FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369}, FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461}, }, { FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462}, FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793}, FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218}, }, { FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226}, FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019}, FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037}, }, { FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171}, FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132}, FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841}, }, { FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181}, FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210}, FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040}, }, { FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935}, FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105}, FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814}, }, }, { { FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852}, FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581}, FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646}, }, { FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844}, FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025}, FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453}, }, { FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068}, FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192}, FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921}, }, { FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259}, FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426}, FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072}, }, { FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305}, FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832}, FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943}, }, { FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011}, FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447}, FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494}, }, { FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245}, FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859}, FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915}, }, { FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707}, FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848}, FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224}, }, }, { { FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391}, FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215}, FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101}, }, { FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713}, FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849}, FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930}, }, { FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940}, FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031}, FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404}, }, { FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243}, FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116}, FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525}, }, { FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509}, FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883}, FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865}, }, { FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660}, FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273}, FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138}, }, { FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560}, FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135}, FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941}, }, { FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739}, FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756}, FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819}, }, }, { { FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347}, FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028}, FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075}, }, { FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799}, FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609}, FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817}, }, { FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989}, FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523}, FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278}, }, { FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045}, FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377}, FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480}, }, { FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016}, FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426}, FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525}, }, { FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396}, FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080}, FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892}, }, { FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275}, FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074}, FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140}, }, { FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717}, FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101}, FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127}, }, }, { { FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632}, FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415}, FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160}, }, { FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876}, FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625}, FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478}, }, { FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164}, FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595}, FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248}, }, { FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858}, FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193}, FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184}, }, { FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942}, FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635}, FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948}, }, { FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935}, FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415}, FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416}, }, { FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018}, FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778}, FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659}, }, { FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385}, FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503}, FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329}, }, }, { { FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056}, FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838}, FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948}, }, { FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691}, FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118}, FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517}, }, { FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269}, FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904}, FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589}, }, { FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193}, FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910}, FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930}, }, { FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667}, FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481}, FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876}, }, { FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640}, FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278}, FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112}, }, { FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272}, FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012}, FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221}, }, { FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046}, FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345}, FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310}, }, }, { { FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937}, FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636}, FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008}, }, { FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429}, FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576}, FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066}, }, { FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490}, FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104}, FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053}, }, { FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275}, FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511}, FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095}, }, { FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439}, FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939}, FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424}, }, { FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310}, FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608}, FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079}, }, { FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101}, FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418}, FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576}, }, { FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356}, FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996}, FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099}, }, }, { { FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728}, FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658}, FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242}, }, { FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001}, FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766}, FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373}, }, { FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458}, FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628}, FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657}, }, { FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062}, FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616}, FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014}, }, { FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383}, FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814}, FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718}, }, { FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417}, FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222}, FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444}, }, { FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597}, FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970}, FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799}, }, { FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647}, FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511}, FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032}, }, }, { { FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834}, FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461}, FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062}, }, { FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516}, FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547}, FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240}, }, { FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038}, FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741}, FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103}, }, { FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747}, FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323}, FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016}, }, { FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373}, FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228}, FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141}, }, { FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399}, FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831}, FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376}, }, { FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313}, FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958}, FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577}, }, { FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743}, FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684}, FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476}, }, }, } docker-1.10.3/vendor/src/github.com/agl/ed25519/edwards25519/edwards25519.go000066400000000000000000001461411267010174400253650ustar00rootroot00000000000000// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package edwards25519 implements operations in GF(2**255-19) and on an // Edwards curve that is isomorphic to curve25519. See // http://ed25519.cr.yp.to/. package edwards25519 // This code is a port of the public domain, "ref10" implementation of ed25519 // from SUPERCOP. // FieldElement represents an element of the field GF(2^255 - 19). An element // t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 // t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on // context. type FieldElement [10]int32 func FeZero(fe *FieldElement) { for i := range fe { fe[i] = 0 } } func FeOne(fe *FieldElement) { FeZero(fe) fe[0] = 1 } func FeAdd(dst, a, b *FieldElement) { for i := range dst { dst[i] = a[i] + b[i] } } func FeSub(dst, a, b *FieldElement) { for i := range dst { dst[i] = a[i] - b[i] } } func FeCopy(dst, src *FieldElement) { for i := range dst { dst[i] = src[i] } } // Replace (f,g) with (g,g) if b == 1; // replace (f,g) with (f,g) if b == 0. // // Preconditions: b in {0,1}. func FeCMove(f, g *FieldElement, b int32) { var x FieldElement b = -b for i := range x { x[i] = b & (f[i] ^ g[i]) } for i := range f { f[i] ^= x[i] } } func load3(in []byte) int64 { var r int64 r = int64(in[0]) r |= int64(in[1]) << 8 r |= int64(in[2]) << 16 return r } func load4(in []byte) int64 { var r int64 r = int64(in[0]) r |= int64(in[1]) << 8 r |= int64(in[2]) << 16 r |= int64(in[3]) << 24 return r } func FeFromBytes(dst *FieldElement, src *[32]byte) { h0 := load4(src[:]) h1 := load3(src[4:]) << 6 h2 := load3(src[7:]) << 5 h3 := load3(src[10:]) << 3 h4 := load3(src[13:]) << 2 h5 := load4(src[16:]) h6 := load3(src[20:]) << 7 h7 := load3(src[23:]) << 5 h8 := load3(src[26:]) << 4 h9 := (load3(src[29:]) & 8388607) << 2 var carry [10]int64 carry[9] = (h9 + 1<<24) >> 25 h0 += carry[9] * 19 h9 -= carry[9] << 25 carry[1] = (h1 + 1<<24) >> 25 h2 += carry[1] h1 -= carry[1] << 25 carry[3] = (h3 + 1<<24) >> 25 h4 += carry[3] h3 -= carry[3] << 25 carry[5] = (h5 + 1<<24) >> 25 h6 += carry[5] h5 -= carry[5] << 25 carry[7] = (h7 + 1<<24) >> 25 h8 += carry[7] h7 -= carry[7] << 25 carry[0] = (h0 + 1<<25) >> 26 h1 += carry[0] h0 -= carry[0] << 26 carry[2] = (h2 + 1<<25) >> 26 h3 += carry[2] h2 -= carry[2] << 26 carry[4] = (h4 + 1<<25) >> 26 h5 += carry[4] h4 -= carry[4] << 26 carry[6] = (h6 + 1<<25) >> 26 h7 += carry[6] h6 -= carry[6] << 26 carry[8] = (h8 + 1<<25) >> 26 h9 += carry[8] h8 -= carry[8] << 26 dst[0] = int32(h0) dst[1] = int32(h1) dst[2] = int32(h2) dst[3] = int32(h3) dst[4] = int32(h4) dst[5] = int32(h5) dst[6] = int32(h6) dst[7] = int32(h7) dst[8] = int32(h8) dst[9] = int32(h9) } // FeToBytes marshals h to s. // Preconditions: // |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. // // Write p=2^255-19; q=floor(h/p). // Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). // // Proof: // Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. // Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. // // Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). // Then 0> 25 q = (h[0] + q) >> 26 q = (h[1] + q) >> 25 q = (h[2] + q) >> 26 q = (h[3] + q) >> 25 q = (h[4] + q) >> 26 q = (h[5] + q) >> 25 q = (h[6] + q) >> 26 q = (h[7] + q) >> 25 q = (h[8] + q) >> 26 q = (h[9] + q) >> 25 // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. h[0] += 19 * q // Goal: Output h-2^255 q, which is between 0 and 2^255-20. carry[0] = h[0] >> 26 h[1] += carry[0] h[0] -= carry[0] << 26 carry[1] = h[1] >> 25 h[2] += carry[1] h[1] -= carry[1] << 25 carry[2] = h[2] >> 26 h[3] += carry[2] h[2] -= carry[2] << 26 carry[3] = h[3] >> 25 h[4] += carry[3] h[3] -= carry[3] << 25 carry[4] = h[4] >> 26 h[5] += carry[4] h[4] -= carry[4] << 26 carry[5] = h[5] >> 25 h[6] += carry[5] h[5] -= carry[5] << 25 carry[6] = h[6] >> 26 h[7] += carry[6] h[6] -= carry[6] << 26 carry[7] = h[7] >> 25 h[8] += carry[7] h[7] -= carry[7] << 25 carry[8] = h[8] >> 26 h[9] += carry[8] h[8] -= carry[8] << 26 carry[9] = h[9] >> 25 h[9] -= carry[9] << 25 // h10 = carry9 // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; // evidently 2^255 h10-2^255 q = 0. // Goal: Output h[0]+...+2^230 h[9]. s[0] = byte(h[0] >> 0) s[1] = byte(h[0] >> 8) s[2] = byte(h[0] >> 16) s[3] = byte((h[0] >> 24) | (h[1] << 2)) s[4] = byte(h[1] >> 6) s[5] = byte(h[1] >> 14) s[6] = byte((h[1] >> 22) | (h[2] << 3)) s[7] = byte(h[2] >> 5) s[8] = byte(h[2] >> 13) s[9] = byte((h[2] >> 21) | (h[3] << 5)) s[10] = byte(h[3] >> 3) s[11] = byte(h[3] >> 11) s[12] = byte((h[3] >> 19) | (h[4] << 6)) s[13] = byte(h[4] >> 2) s[14] = byte(h[4] >> 10) s[15] = byte(h[4] >> 18) s[16] = byte(h[5] >> 0) s[17] = byte(h[5] >> 8) s[18] = byte(h[5] >> 16) s[19] = byte((h[5] >> 24) | (h[6] << 1)) s[20] = byte(h[6] >> 7) s[21] = byte(h[6] >> 15) s[22] = byte((h[6] >> 23) | (h[7] << 3)) s[23] = byte(h[7] >> 5) s[24] = byte(h[7] >> 13) s[25] = byte((h[7] >> 21) | (h[8] << 4)) s[26] = byte(h[8] >> 4) s[27] = byte(h[8] >> 12) s[28] = byte((h[8] >> 20) | (h[9] << 6)) s[29] = byte(h[9] >> 2) s[30] = byte(h[9] >> 10) s[31] = byte(h[9] >> 18) } func FeIsNegative(f *FieldElement) byte { var s [32]byte FeToBytes(&s, f) return s[0] & 1 } func FeIsNonZero(f *FieldElement) int32 { var s [32]byte FeToBytes(&s, f) var x uint8 for _, b := range s { x |= b } x |= x >> 4 x |= x >> 2 x |= x >> 1 return int32(x & 1) } // FeNeg sets h = -f // // Preconditions: // |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. // // Postconditions: // |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. func FeNeg(h, f *FieldElement) { for i := range h { h[i] = -f[i] } } // FeMul calculates h = f * g // Can overlap h with f or g. // // Preconditions: // |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. // |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. // // Postconditions: // |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. // // Notes on implementation strategy: // // Using schoolbook multiplication. // Karatsuba would save a little in some cost models. // // Most multiplications by 2 and 19 are 32-bit precomputations; // cheaper than 64-bit postcomputations. // // There is one remaining multiplication by 19 in the carry chain; // one *19 precomputation can be merged into this, // but the resulting data flow is considerably less clean. // // There are 12 carries below. // 10 of them are 2-way parallelizable and vectorizable. // Can get away with 11 carries, but then data flow is much deeper. // // With tighter constraints on inputs can squeeze carries into int32. func FeMul(h, f, g *FieldElement) { f0 := f[0] f1 := f[1] f2 := f[2] f3 := f[3] f4 := f[4] f5 := f[5] f6 := f[6] f7 := f[7] f8 := f[8] f9 := f[9] g0 := g[0] g1 := g[1] g2 := g[2] g3 := g[3] g4 := g[4] g5 := g[5] g6 := g[6] g7 := g[7] g8 := g[8] g9 := g[9] g1_19 := 19 * g1 /* 1.4*2^29 */ g2_19 := 19 * g2 /* 1.4*2^30; still ok */ g3_19 := 19 * g3 g4_19 := 19 * g4 g5_19 := 19 * g5 g6_19 := 19 * g6 g7_19 := 19 * g7 g8_19 := 19 * g8 g9_19 := 19 * g9 f1_2 := 2 * f1 f3_2 := 2 * f3 f5_2 := 2 * f5 f7_2 := 2 * f7 f9_2 := 2 * f9 f0g0 := int64(f0) * int64(g0) f0g1 := int64(f0) * int64(g1) f0g2 := int64(f0) * int64(g2) f0g3 := int64(f0) * int64(g3) f0g4 := int64(f0) * int64(g4) f0g5 := int64(f0) * int64(g5) f0g6 := int64(f0) * int64(g6) f0g7 := int64(f0) * int64(g7) f0g8 := int64(f0) * int64(g8) f0g9 := int64(f0) * int64(g9) f1g0 := int64(f1) * int64(g0) f1g1_2 := int64(f1_2) * int64(g1) f1g2 := int64(f1) * int64(g2) f1g3_2 := int64(f1_2) * int64(g3) f1g4 := int64(f1) * int64(g4) f1g5_2 := int64(f1_2) * int64(g5) f1g6 := int64(f1) * int64(g6) f1g7_2 := int64(f1_2) * int64(g7) f1g8 := int64(f1) * int64(g8) f1g9_38 := int64(f1_2) * int64(g9_19) f2g0 := int64(f2) * int64(g0) f2g1 := int64(f2) * int64(g1) f2g2 := int64(f2) * int64(g2) f2g3 := int64(f2) * int64(g3) f2g4 := int64(f2) * int64(g4) f2g5 := int64(f2) * int64(g5) f2g6 := int64(f2) * int64(g6) f2g7 := int64(f2) * int64(g7) f2g8_19 := int64(f2) * int64(g8_19) f2g9_19 := int64(f2) * int64(g9_19) f3g0 := int64(f3) * int64(g0) f3g1_2 := int64(f3_2) * int64(g1) f3g2 := int64(f3) * int64(g2) f3g3_2 := int64(f3_2) * int64(g3) f3g4 := int64(f3) * int64(g4) f3g5_2 := int64(f3_2) * int64(g5) f3g6 := int64(f3) * int64(g6) f3g7_38 := int64(f3_2) * int64(g7_19) f3g8_19 := int64(f3) * int64(g8_19) f3g9_38 := int64(f3_2) * int64(g9_19) f4g0 := int64(f4) * int64(g0) f4g1 := int64(f4) * int64(g1) f4g2 := int64(f4) * int64(g2) f4g3 := int64(f4) * int64(g3) f4g4 := int64(f4) * int64(g4) f4g5 := int64(f4) * int64(g5) f4g6_19 := int64(f4) * int64(g6_19) f4g7_19 := int64(f4) * int64(g7_19) f4g8_19 := int64(f4) * int64(g8_19) f4g9_19 := int64(f4) * int64(g9_19) f5g0 := int64(f5) * int64(g0) f5g1_2 := int64(f5_2) * int64(g1) f5g2 := int64(f5) * int64(g2) f5g3_2 := int64(f5_2) * int64(g3) f5g4 := int64(f5) * int64(g4) f5g5_38 := int64(f5_2) * int64(g5_19) f5g6_19 := int64(f5) * int64(g6_19) f5g7_38 := int64(f5_2) * int64(g7_19) f5g8_19 := int64(f5) * int64(g8_19) f5g9_38 := int64(f5_2) * int64(g9_19) f6g0 := int64(f6) * int64(g0) f6g1 := int64(f6) * int64(g1) f6g2 := int64(f6) * int64(g2) f6g3 := int64(f6) * int64(g3) f6g4_19 := int64(f6) * int64(g4_19) f6g5_19 := int64(f6) * int64(g5_19) f6g6_19 := int64(f6) * int64(g6_19) f6g7_19 := int64(f6) * int64(g7_19) f6g8_19 := int64(f6) * int64(g8_19) f6g9_19 := int64(f6) * int64(g9_19) f7g0 := int64(f7) * int64(g0) f7g1_2 := int64(f7_2) * int64(g1) f7g2 := int64(f7) * int64(g2) f7g3_38 := int64(f7_2) * int64(g3_19) f7g4_19 := int64(f7) * int64(g4_19) f7g5_38 := int64(f7_2) * int64(g5_19) f7g6_19 := int64(f7) * int64(g6_19) f7g7_38 := int64(f7_2) * int64(g7_19) f7g8_19 := int64(f7) * int64(g8_19) f7g9_38 := int64(f7_2) * int64(g9_19) f8g0 := int64(f8) * int64(g0) f8g1 := int64(f8) * int64(g1) f8g2_19 := int64(f8) * int64(g2_19) f8g3_19 := int64(f8) * int64(g3_19) f8g4_19 := int64(f8) * int64(g4_19) f8g5_19 := int64(f8) * int64(g5_19) f8g6_19 := int64(f8) * int64(g6_19) f8g7_19 := int64(f8) * int64(g7_19) f8g8_19 := int64(f8) * int64(g8_19) f8g9_19 := int64(f8) * int64(g9_19) f9g0 := int64(f9) * int64(g0) f9g1_38 := int64(f9_2) * int64(g1_19) f9g2_19 := int64(f9) * int64(g2_19) f9g3_38 := int64(f9_2) * int64(g3_19) f9g4_19 := int64(f9) * int64(g4_19) f9g5_38 := int64(f9_2) * int64(g5_19) f9g6_19 := int64(f9) * int64(g6_19) f9g7_38 := int64(f9_2) * int64(g7_19) f9g8_19 := int64(f9) * int64(g8_19) f9g9_38 := int64(f9_2) * int64(g9_19) h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38 h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19 h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38 h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19 h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38 h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19 h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38 h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19 h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38 h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0 var carry [10]int64 /* |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 */ carry[0] = (h0 + (1 << 25)) >> 26 h1 += carry[0] h0 -= carry[0] << 26 carry[4] = (h4 + (1 << 25)) >> 26 h5 += carry[4] h4 -= carry[4] << 26 /* |h0| <= 2^25 */ /* |h4| <= 2^25 */ /* |h1| <= 1.51*2^58 */ /* |h5| <= 1.51*2^58 */ carry[1] = (h1 + (1 << 24)) >> 25 h2 += carry[1] h1 -= carry[1] << 25 carry[5] = (h5 + (1 << 24)) >> 25 h6 += carry[5] h5 -= carry[5] << 25 /* |h1| <= 2^24; from now on fits into int32 */ /* |h5| <= 2^24; from now on fits into int32 */ /* |h2| <= 1.21*2^59 */ /* |h6| <= 1.21*2^59 */ carry[2] = (h2 + (1 << 25)) >> 26 h3 += carry[2] h2 -= carry[2] << 26 carry[6] = (h6 + (1 << 25)) >> 26 h7 += carry[6] h6 -= carry[6] << 26 /* |h2| <= 2^25; from now on fits into int32 unchanged */ /* |h6| <= 2^25; from now on fits into int32 unchanged */ /* |h3| <= 1.51*2^58 */ /* |h7| <= 1.51*2^58 */ carry[3] = (h3 + (1 << 24)) >> 25 h4 += carry[3] h3 -= carry[3] << 25 carry[7] = (h7 + (1 << 24)) >> 25 h8 += carry[7] h7 -= carry[7] << 25 /* |h3| <= 2^24; from now on fits into int32 unchanged */ /* |h7| <= 2^24; from now on fits into int32 unchanged */ /* |h4| <= 1.52*2^33 */ /* |h8| <= 1.52*2^33 */ carry[4] = (h4 + (1 << 25)) >> 26 h5 += carry[4] h4 -= carry[4] << 26 carry[8] = (h8 + (1 << 25)) >> 26 h9 += carry[8] h8 -= carry[8] << 26 /* |h4| <= 2^25; from now on fits into int32 unchanged */ /* |h8| <= 2^25; from now on fits into int32 unchanged */ /* |h5| <= 1.01*2^24 */ /* |h9| <= 1.51*2^58 */ carry[9] = (h9 + (1 << 24)) >> 25 h0 += carry[9] * 19 h9 -= carry[9] << 25 /* |h9| <= 2^24; from now on fits into int32 unchanged */ /* |h0| <= 1.8*2^37 */ carry[0] = (h0 + (1 << 25)) >> 26 h1 += carry[0] h0 -= carry[0] << 26 /* |h0| <= 2^25; from now on fits into int32 unchanged */ /* |h1| <= 1.01*2^24 */ h[0] = int32(h0) h[1] = int32(h1) h[2] = int32(h2) h[3] = int32(h3) h[4] = int32(h4) h[5] = int32(h5) h[6] = int32(h6) h[7] = int32(h7) h[8] = int32(h8) h[9] = int32(h9) } // FeSquare calculates h = f*f. Can overlap h with f. // // Preconditions: // |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. // // Postconditions: // |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. func FeSquare(h, f *FieldElement) { f0 := f[0] f1 := f[1] f2 := f[2] f3 := f[3] f4 := f[4] f5 := f[5] f6 := f[6] f7 := f[7] f8 := f[8] f9 := f[9] f0_2 := 2 * f0 f1_2 := 2 * f1 f2_2 := 2 * f2 f3_2 := 2 * f3 f4_2 := 2 * f4 f5_2 := 2 * f5 f6_2 := 2 * f6 f7_2 := 2 * f7 f5_38 := 38 * f5 // 1.31*2^30 f6_19 := 19 * f6 // 1.31*2^30 f7_38 := 38 * f7 // 1.31*2^30 f8_19 := 19 * f8 // 1.31*2^30 f9_38 := 38 * f9 // 1.31*2^30 f0f0 := int64(f0) * int64(f0) f0f1_2 := int64(f0_2) * int64(f1) f0f2_2 := int64(f0_2) * int64(f2) f0f3_2 := int64(f0_2) * int64(f3) f0f4_2 := int64(f0_2) * int64(f4) f0f5_2 := int64(f0_2) * int64(f5) f0f6_2 := int64(f0_2) * int64(f6) f0f7_2 := int64(f0_2) * int64(f7) f0f8_2 := int64(f0_2) * int64(f8) f0f9_2 := int64(f0_2) * int64(f9) f1f1_2 := int64(f1_2) * int64(f1) f1f2_2 := int64(f1_2) * int64(f2) f1f3_4 := int64(f1_2) * int64(f3_2) f1f4_2 := int64(f1_2) * int64(f4) f1f5_4 := int64(f1_2) * int64(f5_2) f1f6_2 := int64(f1_2) * int64(f6) f1f7_4 := int64(f1_2) * int64(f7_2) f1f8_2 := int64(f1_2) * int64(f8) f1f9_76 := int64(f1_2) * int64(f9_38) f2f2 := int64(f2) * int64(f2) f2f3_2 := int64(f2_2) * int64(f3) f2f4_2 := int64(f2_2) * int64(f4) f2f5_2 := int64(f2_2) * int64(f5) f2f6_2 := int64(f2_2) * int64(f6) f2f7_2 := int64(f2_2) * int64(f7) f2f8_38 := int64(f2_2) * int64(f8_19) f2f9_38 := int64(f2) * int64(f9_38) f3f3_2 := int64(f3_2) * int64(f3) f3f4_2 := int64(f3_2) * int64(f4) f3f5_4 := int64(f3_2) * int64(f5_2) f3f6_2 := int64(f3_2) * int64(f6) f3f7_76 := int64(f3_2) * int64(f7_38) f3f8_38 := int64(f3_2) * int64(f8_19) f3f9_76 := int64(f3_2) * int64(f9_38) f4f4 := int64(f4) * int64(f4) f4f5_2 := int64(f4_2) * int64(f5) f4f6_38 := int64(f4_2) * int64(f6_19) f4f7_38 := int64(f4) * int64(f7_38) f4f8_38 := int64(f4_2) * int64(f8_19) f4f9_38 := int64(f4) * int64(f9_38) f5f5_38 := int64(f5) * int64(f5_38) f5f6_38 := int64(f5_2) * int64(f6_19) f5f7_76 := int64(f5_2) * int64(f7_38) f5f8_38 := int64(f5_2) * int64(f8_19) f5f9_76 := int64(f5_2) * int64(f9_38) f6f6_19 := int64(f6) * int64(f6_19) f6f7_38 := int64(f6) * int64(f7_38) f6f8_38 := int64(f6_2) * int64(f8_19) f6f9_38 := int64(f6) * int64(f9_38) f7f7_38 := int64(f7) * int64(f7_38) f7f8_38 := int64(f7_2) * int64(f8_19) f7f9_76 := int64(f7_2) * int64(f9_38) f8f8_19 := int64(f8) * int64(f8_19) f8f9_38 := int64(f8) * int64(f9_38) f9f9_38 := int64(f9) * int64(f9_38) h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 var carry [10]int64 carry[0] = (h0 + (1 << 25)) >> 26 h1 += carry[0] h0 -= carry[0] << 26 carry[4] = (h4 + (1 << 25)) >> 26 h5 += carry[4] h4 -= carry[4] << 26 carry[1] = (h1 + (1 << 24)) >> 25 h2 += carry[1] h1 -= carry[1] << 25 carry[5] = (h5 + (1 << 24)) >> 25 h6 += carry[5] h5 -= carry[5] << 25 carry[2] = (h2 + (1 << 25)) >> 26 h3 += carry[2] h2 -= carry[2] << 26 carry[6] = (h6 + (1 << 25)) >> 26 h7 += carry[6] h6 -= carry[6] << 26 carry[3] = (h3 + (1 << 24)) >> 25 h4 += carry[3] h3 -= carry[3] << 25 carry[7] = (h7 + (1 << 24)) >> 25 h8 += carry[7] h7 -= carry[7] << 25 carry[4] = (h4 + (1 << 25)) >> 26 h5 += carry[4] h4 -= carry[4] << 26 carry[8] = (h8 + (1 << 25)) >> 26 h9 += carry[8] h8 -= carry[8] << 26 carry[9] = (h9 + (1 << 24)) >> 25 h0 += carry[9] * 19 h9 -= carry[9] << 25 carry[0] = (h0 + (1 << 25)) >> 26 h1 += carry[0] h0 -= carry[0] << 26 h[0] = int32(h0) h[1] = int32(h1) h[2] = int32(h2) h[3] = int32(h3) h[4] = int32(h4) h[5] = int32(h5) h[6] = int32(h6) h[7] = int32(h7) h[8] = int32(h8) h[9] = int32(h9) } // FeSquare2 sets h = 2 * f * f // // Can overlap h with f. // // Preconditions: // |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. // // Postconditions: // |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. // See fe_mul.c for discussion of implementation strategy. func FeSquare2(h, f *FieldElement) { f0 := f[0] f1 := f[1] f2 := f[2] f3 := f[3] f4 := f[4] f5 := f[5] f6 := f[6] f7 := f[7] f8 := f[8] f9 := f[9] f0_2 := 2 * f0 f1_2 := 2 * f1 f2_2 := 2 * f2 f3_2 := 2 * f3 f4_2 := 2 * f4 f5_2 := 2 * f5 f6_2 := 2 * f6 f7_2 := 2 * f7 f5_38 := 38 * f5 // 1.959375*2^30 f6_19 := 19 * f6 // 1.959375*2^30 f7_38 := 38 * f7 // 1.959375*2^30 f8_19 := 19 * f8 // 1.959375*2^30 f9_38 := 38 * f9 // 1.959375*2^30 f0f0 := int64(f0) * int64(f0) f0f1_2 := int64(f0_2) * int64(f1) f0f2_2 := int64(f0_2) * int64(f2) f0f3_2 := int64(f0_2) * int64(f3) f0f4_2 := int64(f0_2) * int64(f4) f0f5_2 := int64(f0_2) * int64(f5) f0f6_2 := int64(f0_2) * int64(f6) f0f7_2 := int64(f0_2) * int64(f7) f0f8_2 := int64(f0_2) * int64(f8) f0f9_2 := int64(f0_2) * int64(f9) f1f1_2 := int64(f1_2) * int64(f1) f1f2_2 := int64(f1_2) * int64(f2) f1f3_4 := int64(f1_2) * int64(f3_2) f1f4_2 := int64(f1_2) * int64(f4) f1f5_4 := int64(f1_2) * int64(f5_2) f1f6_2 := int64(f1_2) * int64(f6) f1f7_4 := int64(f1_2) * int64(f7_2) f1f8_2 := int64(f1_2) * int64(f8) f1f9_76 := int64(f1_2) * int64(f9_38) f2f2 := int64(f2) * int64(f2) f2f3_2 := int64(f2_2) * int64(f3) f2f4_2 := int64(f2_2) * int64(f4) f2f5_2 := int64(f2_2) * int64(f5) f2f6_2 := int64(f2_2) * int64(f6) f2f7_2 := int64(f2_2) * int64(f7) f2f8_38 := int64(f2_2) * int64(f8_19) f2f9_38 := int64(f2) * int64(f9_38) f3f3_2 := int64(f3_2) * int64(f3) f3f4_2 := int64(f3_2) * int64(f4) f3f5_4 := int64(f3_2) * int64(f5_2) f3f6_2 := int64(f3_2) * int64(f6) f3f7_76 := int64(f3_2) * int64(f7_38) f3f8_38 := int64(f3_2) * int64(f8_19) f3f9_76 := int64(f3_2) * int64(f9_38) f4f4 := int64(f4) * int64(f4) f4f5_2 := int64(f4_2) * int64(f5) f4f6_38 := int64(f4_2) * int64(f6_19) f4f7_38 := int64(f4) * int64(f7_38) f4f8_38 := int64(f4_2) * int64(f8_19) f4f9_38 := int64(f4) * int64(f9_38) f5f5_38 := int64(f5) * int64(f5_38) f5f6_38 := int64(f5_2) * int64(f6_19) f5f7_76 := int64(f5_2) * int64(f7_38) f5f8_38 := int64(f5_2) * int64(f8_19) f5f9_76 := int64(f5_2) * int64(f9_38) f6f6_19 := int64(f6) * int64(f6_19) f6f7_38 := int64(f6) * int64(f7_38) f6f8_38 := int64(f6_2) * int64(f8_19) f6f9_38 := int64(f6) * int64(f9_38) f7f7_38 := int64(f7) * int64(f7_38) f7f8_38 := int64(f7_2) * int64(f8_19) f7f9_76 := int64(f7_2) * int64(f9_38) f8f8_19 := int64(f8) * int64(f8_19) f8f9_38 := int64(f8) * int64(f9_38) f9f9_38 := int64(f9) * int64(f9_38) h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 var carry [10]int64 h0 += h0 h1 += h1 h2 += h2 h3 += h3 h4 += h4 h5 += h5 h6 += h6 h7 += h7 h8 += h8 h9 += h9 carry[0] = (h0 + (1 << 25)) >> 26 h1 += carry[0] h0 -= carry[0] << 26 carry[4] = (h4 + (1 << 25)) >> 26 h5 += carry[4] h4 -= carry[4] << 26 carry[1] = (h1 + (1 << 24)) >> 25 h2 += carry[1] h1 -= carry[1] << 25 carry[5] = (h5 + (1 << 24)) >> 25 h6 += carry[5] h5 -= carry[5] << 25 carry[2] = (h2 + (1 << 25)) >> 26 h3 += carry[2] h2 -= carry[2] << 26 carry[6] = (h6 + (1 << 25)) >> 26 h7 += carry[6] h6 -= carry[6] << 26 carry[3] = (h3 + (1 << 24)) >> 25 h4 += carry[3] h3 -= carry[3] << 25 carry[7] = (h7 + (1 << 24)) >> 25 h8 += carry[7] h7 -= carry[7] << 25 carry[4] = (h4 + (1 << 25)) >> 26 h5 += carry[4] h4 -= carry[4] << 26 carry[8] = (h8 + (1 << 25)) >> 26 h9 += carry[8] h8 -= carry[8] << 26 carry[9] = (h9 + (1 << 24)) >> 25 h0 += carry[9] * 19 h9 -= carry[9] << 25 carry[0] = (h0 + (1 << 25)) >> 26 h1 += carry[0] h0 -= carry[0] << 26 h[0] = int32(h0) h[1] = int32(h1) h[2] = int32(h2) h[3] = int32(h3) h[4] = int32(h4) h[5] = int32(h5) h[6] = int32(h6) h[7] = int32(h7) h[8] = int32(h8) h[9] = int32(h9) } func FeInvert(out, z *FieldElement) { var t0, t1, t2, t3 FieldElement var i int FeSquare(&t0, z) // 2^1 FeSquare(&t1, &t0) // 2^2 for i = 1; i < 2; i++ { // 2^3 FeSquare(&t1, &t1) } FeMul(&t1, z, &t1) // 2^3 + 2^0 FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0 FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1 FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0 FeSquare(&t2, &t1) // 5,4,3,2,1 for i = 1; i < 5; i++ { // 9,8,7,6,5 FeSquare(&t2, &t2) } FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0 FeSquare(&t2, &t1) // 10..1 for i = 1; i < 10; i++ { // 19..10 FeSquare(&t2, &t2) } FeMul(&t2, &t2, &t1) // 19..0 FeSquare(&t3, &t2) // 20..1 for i = 1; i < 20; i++ { // 39..20 FeSquare(&t3, &t3) } FeMul(&t2, &t3, &t2) // 39..0 FeSquare(&t2, &t2) // 40..1 for i = 1; i < 10; i++ { // 49..10 FeSquare(&t2, &t2) } FeMul(&t1, &t2, &t1) // 49..0 FeSquare(&t2, &t1) // 50..1 for i = 1; i < 50; i++ { // 99..50 FeSquare(&t2, &t2) } FeMul(&t2, &t2, &t1) // 99..0 FeSquare(&t3, &t2) // 100..1 for i = 1; i < 100; i++ { // 199..100 FeSquare(&t3, &t3) } FeMul(&t2, &t3, &t2) // 199..0 FeSquare(&t2, &t2) // 200..1 for i = 1; i < 50; i++ { // 249..50 FeSquare(&t2, &t2) } FeMul(&t1, &t2, &t1) // 249..0 FeSquare(&t1, &t1) // 250..1 for i = 1; i < 5; i++ { // 254..5 FeSquare(&t1, &t1) } FeMul(out, &t1, &t0) // 254..5,3,1,0 } func fePow22523(out, z *FieldElement) { var t0, t1, t2 FieldElement var i int FeSquare(&t0, z) for i = 1; i < 1; i++ { FeSquare(&t0, &t0) } FeSquare(&t1, &t0) for i = 1; i < 2; i++ { FeSquare(&t1, &t1) } FeMul(&t1, z, &t1) FeMul(&t0, &t0, &t1) FeSquare(&t0, &t0) for i = 1; i < 1; i++ { FeSquare(&t0, &t0) } FeMul(&t0, &t1, &t0) FeSquare(&t1, &t0) for i = 1; i < 5; i++ { FeSquare(&t1, &t1) } FeMul(&t0, &t1, &t0) FeSquare(&t1, &t0) for i = 1; i < 10; i++ { FeSquare(&t1, &t1) } FeMul(&t1, &t1, &t0) FeSquare(&t2, &t1) for i = 1; i < 20; i++ { FeSquare(&t2, &t2) } FeMul(&t1, &t2, &t1) FeSquare(&t1, &t1) for i = 1; i < 10; i++ { FeSquare(&t1, &t1) } FeMul(&t0, &t1, &t0) FeSquare(&t1, &t0) for i = 1; i < 50; i++ { FeSquare(&t1, &t1) } FeMul(&t1, &t1, &t0) FeSquare(&t2, &t1) for i = 1; i < 100; i++ { FeSquare(&t2, &t2) } FeMul(&t1, &t2, &t1) FeSquare(&t1, &t1) for i = 1; i < 50; i++ { FeSquare(&t1, &t1) } FeMul(&t0, &t1, &t0) FeSquare(&t0, &t0) for i = 1; i < 2; i++ { FeSquare(&t0, &t0) } FeMul(out, &t0, z) } // Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 * // y^2 where d = -121665/121666. // // Several representations are used: // ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z // ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT // CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T // PreComputedGroupElement: (y+x,y-x,2dxy) type ProjectiveGroupElement struct { X, Y, Z FieldElement } type ExtendedGroupElement struct { X, Y, Z, T FieldElement } type CompletedGroupElement struct { X, Y, Z, T FieldElement } type PreComputedGroupElement struct { yPlusX, yMinusX, xy2d FieldElement } type CachedGroupElement struct { yPlusX, yMinusX, Z, T2d FieldElement } func (p *ProjectiveGroupElement) Zero() { FeZero(&p.X) FeOne(&p.Y) FeOne(&p.Z) } func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) { var t0 FieldElement FeSquare(&r.X, &p.X) FeSquare(&r.Z, &p.Y) FeSquare2(&r.T, &p.Z) FeAdd(&r.Y, &p.X, &p.Y) FeSquare(&t0, &r.Y) FeAdd(&r.Y, &r.Z, &r.X) FeSub(&r.Z, &r.Z, &r.X) FeSub(&r.X, &t0, &r.Y) FeSub(&r.T, &r.T, &r.Z) } func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) { var recip, x, y FieldElement FeInvert(&recip, &p.Z) FeMul(&x, &p.X, &recip) FeMul(&y, &p.Y, &recip) FeToBytes(s, &y) s[31] ^= FeIsNegative(&x) << 7 } func (p *ExtendedGroupElement) Zero() { FeZero(&p.X) FeOne(&p.Y) FeOne(&p.Z) FeZero(&p.T) } func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) { var q ProjectiveGroupElement p.ToProjective(&q) q.Double(r) } func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) { FeAdd(&r.yPlusX, &p.Y, &p.X) FeSub(&r.yMinusX, &p.Y, &p.X) FeCopy(&r.Z, &p.Z) FeMul(&r.T2d, &p.T, &d2) } func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) { FeCopy(&r.X, &p.X) FeCopy(&r.Y, &p.Y) FeCopy(&r.Z, &p.Z) } func (p *ExtendedGroupElement) ToBytes(s *[32]byte) { var recip, x, y FieldElement FeInvert(&recip, &p.Z) FeMul(&x, &p.X, &recip) FeMul(&y, &p.Y, &recip) FeToBytes(s, &y) s[31] ^= FeIsNegative(&x) << 7 } func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool { var u, v, v3, vxx, check FieldElement FeFromBytes(&p.Y, s) FeOne(&p.Z) FeSquare(&u, &p.Y) FeMul(&v, &u, &d) FeSub(&u, &u, &p.Z) // y = y^2-1 FeAdd(&v, &v, &p.Z) // v = dy^2+1 FeSquare(&v3, &v) FeMul(&v3, &v3, &v) // v3 = v^3 FeSquare(&p.X, &v3) FeMul(&p.X, &p.X, &v) FeMul(&p.X, &p.X, &u) // x = uv^7 fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8) FeMul(&p.X, &p.X, &v3) FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8) var tmpX, tmp2 [32]byte FeSquare(&vxx, &p.X) FeMul(&vxx, &vxx, &v) FeSub(&check, &vxx, &u) // vx^2-u if FeIsNonZero(&check) == 1 { FeAdd(&check, &vxx, &u) // vx^2+u if FeIsNonZero(&check) == 1 { return false } FeMul(&p.X, &p.X, &SqrtM1) FeToBytes(&tmpX, &p.X) for i, v := range tmpX { tmp2[31-i] = v } } if FeIsNegative(&p.X) == (s[31] >> 7) { FeNeg(&p.X, &p.X) } FeMul(&p.T, &p.X, &p.Y) return true } func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) { FeMul(&r.X, &p.X, &p.T) FeMul(&r.Y, &p.Y, &p.Z) FeMul(&r.Z, &p.Z, &p.T) } func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) { FeMul(&r.X, &p.X, &p.T) FeMul(&r.Y, &p.Y, &p.Z) FeMul(&r.Z, &p.Z, &p.T) FeMul(&r.T, &p.X, &p.Y) } func (p *PreComputedGroupElement) Zero() { FeOne(&p.yPlusX) FeOne(&p.yMinusX) FeZero(&p.xy2d) } func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { var t0 FieldElement FeAdd(&r.X, &p.Y, &p.X) FeSub(&r.Y, &p.Y, &p.X) FeMul(&r.Z, &r.X, &q.yPlusX) FeMul(&r.Y, &r.Y, &q.yMinusX) FeMul(&r.T, &q.T2d, &p.T) FeMul(&r.X, &p.Z, &q.Z) FeAdd(&t0, &r.X, &r.X) FeSub(&r.X, &r.Z, &r.Y) FeAdd(&r.Y, &r.Z, &r.Y) FeAdd(&r.Z, &t0, &r.T) FeSub(&r.T, &t0, &r.T) } func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { var t0 FieldElement FeAdd(&r.X, &p.Y, &p.X) FeSub(&r.Y, &p.Y, &p.X) FeMul(&r.Z, &r.X, &q.yMinusX) FeMul(&r.Y, &r.Y, &q.yPlusX) FeMul(&r.T, &q.T2d, &p.T) FeMul(&r.X, &p.Z, &q.Z) FeAdd(&t0, &r.X, &r.X) FeSub(&r.X, &r.Z, &r.Y) FeAdd(&r.Y, &r.Z, &r.Y) FeSub(&r.Z, &t0, &r.T) FeAdd(&r.T, &t0, &r.T) } func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { var t0 FieldElement FeAdd(&r.X, &p.Y, &p.X) FeSub(&r.Y, &p.Y, &p.X) FeMul(&r.Z, &r.X, &q.yPlusX) FeMul(&r.Y, &r.Y, &q.yMinusX) FeMul(&r.T, &q.xy2d, &p.T) FeAdd(&t0, &p.Z, &p.Z) FeSub(&r.X, &r.Z, &r.Y) FeAdd(&r.Y, &r.Z, &r.Y) FeAdd(&r.Z, &t0, &r.T) FeSub(&r.T, &t0, &r.T) } func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { var t0 FieldElement FeAdd(&r.X, &p.Y, &p.X) FeSub(&r.Y, &p.Y, &p.X) FeMul(&r.Z, &r.X, &q.yMinusX) FeMul(&r.Y, &r.Y, &q.yPlusX) FeMul(&r.T, &q.xy2d, &p.T) FeAdd(&t0, &p.Z, &p.Z) FeSub(&r.X, &r.Z, &r.Y) FeAdd(&r.Y, &r.Z, &r.Y) FeSub(&r.Z, &t0, &r.T) FeAdd(&r.T, &t0, &r.T) } func slide(r *[256]int8, a *[32]byte) { for i := range r { r[i] = int8(1 & (a[i>>3] >> uint(i&7))) } for i := range r { if r[i] != 0 { for b := 1; b <= 6 && i+b < 256; b++ { if r[i+b] != 0 { if r[i]+(r[i+b]<= -15 { r[i] -= r[i+b] << uint(b) for k := i + b; k < 256; k++ { if r[k] == 0 { r[k] = 1 break } r[k] = 0 } } else { break } } } } } } // GeDoubleScalarMultVartime sets r = a*A + b*B // where a = a[0]+256*a[1]+...+256^31 a[31]. // and b = b[0]+256*b[1]+...+256^31 b[31]. // B is the Ed25519 base point (x,4/5) with x positive. func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) { var aSlide, bSlide [256]int8 var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A var t CompletedGroupElement var u, A2 ExtendedGroupElement var i int slide(&aSlide, a) slide(&bSlide, b) A.ToCached(&Ai[0]) A.Double(&t) t.ToExtended(&A2) for i := 0; i < 7; i++ { geAdd(&t, &A2, &Ai[i]) t.ToExtended(&u) u.ToCached(&Ai[i+1]) } r.Zero() for i = 255; i >= 0; i-- { if aSlide[i] != 0 || bSlide[i] != 0 { break } } for ; i >= 0; i-- { r.Double(&t) if aSlide[i] > 0 { t.ToExtended(&u) geAdd(&t, &u, &Ai[aSlide[i]/2]) } else if aSlide[i] < 0 { t.ToExtended(&u) geSub(&t, &u, &Ai[(-aSlide[i])/2]) } if bSlide[i] > 0 { t.ToExtended(&u) geMixedAdd(&t, &u, &bi[bSlide[i]/2]) } else if bSlide[i] < 0 { t.ToExtended(&u) geMixedSub(&t, &u, &bi[(-bSlide[i])/2]) } t.ToProjective(r) } } // equal returns 1 if b == c and 0 otherwise. func equal(b, c int32) int32 { x := uint32(b ^ c) x-- return int32(x >> 31) } // negative returns 1 if b < 0 and 0 otherwise. func negative(b int32) int32 { return (b >> 31) & 1 } func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) { FeCMove(&t.yPlusX, &u.yPlusX, b) FeCMove(&t.yMinusX, &u.yMinusX, b) FeCMove(&t.xy2d, &u.xy2d, b) } func selectPoint(t *PreComputedGroupElement, pos int32, b int32) { var minusT PreComputedGroupElement bNegative := negative(b) bAbs := b - (((-bNegative) & b) << 1) t.Zero() for i := int32(0); i < 8; i++ { PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1)) } FeCopy(&minusT.yPlusX, &t.yMinusX) FeCopy(&minusT.yMinusX, &t.yPlusX) FeNeg(&minusT.xy2d, &t.xy2d) PreComputedGroupElementCMove(t, &minusT, bNegative) } // GeScalarMultBase computes h = a*B, where // a = a[0]+256*a[1]+...+256^31 a[31] // B is the Ed25519 base point (x,4/5) with x positive. // // Preconditions: // a[31] <= 127 func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) { var e [64]int8 for i, v := range a { e[2*i] = int8(v & 15) e[2*i+1] = int8((v >> 4) & 15) } // each e[i] is between 0 and 15 and e[63] is between 0 and 7. carry := int8(0) for i := 0; i < 63; i++ { e[i] += carry carry = (e[i] + 8) >> 4 e[i] -= carry << 4 } e[63] += carry // each e[i] is between -8 and 8. h.Zero() var t PreComputedGroupElement var r CompletedGroupElement for i := int32(1); i < 64; i += 2 { selectPoint(&t, i/2, int32(e[i])) geMixedAdd(&r, h, &t) r.ToExtended(h) } var s ProjectiveGroupElement h.Double(&r) r.ToProjective(&s) s.Double(&r) r.ToProjective(&s) s.Double(&r) r.ToProjective(&s) s.Double(&r) r.ToExtended(h) for i := int32(0); i < 64; i += 2 { selectPoint(&t, i/2, int32(e[i])) geMixedAdd(&r, h, &t) r.ToExtended(h) } } // The scalars are GF(2^252 + 27742317777372353535851937790883648493). // Input: // a[0]+256*a[1]+...+256^31*a[31] = a // b[0]+256*b[1]+...+256^31*b[31] = b // c[0]+256*c[1]+...+256^31*c[31] = c // // Output: // s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l // where l = 2^252 + 27742317777372353535851937790883648493. func ScMulAdd(s, a, b, c *[32]byte) { a0 := 2097151 & load3(a[:]) a1 := 2097151 & (load4(a[2:]) >> 5) a2 := 2097151 & (load3(a[5:]) >> 2) a3 := 2097151 & (load4(a[7:]) >> 7) a4 := 2097151 & (load4(a[10:]) >> 4) a5 := 2097151 & (load3(a[13:]) >> 1) a6 := 2097151 & (load4(a[15:]) >> 6) a7 := 2097151 & (load3(a[18:]) >> 3) a8 := 2097151 & load3(a[21:]) a9 := 2097151 & (load4(a[23:]) >> 5) a10 := 2097151 & (load3(a[26:]) >> 2) a11 := (load4(a[28:]) >> 7) b0 := 2097151 & load3(b[:]) b1 := 2097151 & (load4(b[2:]) >> 5) b2 := 2097151 & (load3(b[5:]) >> 2) b3 := 2097151 & (load4(b[7:]) >> 7) b4 := 2097151 & (load4(b[10:]) >> 4) b5 := 2097151 & (load3(b[13:]) >> 1) b6 := 2097151 & (load4(b[15:]) >> 6) b7 := 2097151 & (load3(b[18:]) >> 3) b8 := 2097151 & load3(b[21:]) b9 := 2097151 & (load4(b[23:]) >> 5) b10 := 2097151 & (load3(b[26:]) >> 2) b11 := (load4(b[28:]) >> 7) c0 := 2097151 & load3(c[:]) c1 := 2097151 & (load4(c[2:]) >> 5) c2 := 2097151 & (load3(c[5:]) >> 2) c3 := 2097151 & (load4(c[7:]) >> 7) c4 := 2097151 & (load4(c[10:]) >> 4) c5 := 2097151 & (load3(c[13:]) >> 1) c6 := 2097151 & (load4(c[15:]) >> 6) c7 := 2097151 & (load3(c[18:]) >> 3) c8 := 2097151 & load3(c[21:]) c9 := 2097151 & (load4(c[23:]) >> 5) c10 := 2097151 & (load3(c[26:]) >> 2) c11 := (load4(c[28:]) >> 7) var carry [23]int64 s0 := c0 + a0*b0 s1 := c1 + a0*b1 + a1*b0 s2 := c2 + a0*b2 + a1*b1 + a2*b0 s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0 s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0 s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0 s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0 s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0 s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0 s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0 s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0 s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1 s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2 s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3 s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4 s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5 s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6 s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7 s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8 s20 := a9*b11 + a10*b10 + a11*b9 s21 := a10*b11 + a11*b10 s22 := a11 * b11 s23 := int64(0) carry[0] = (s0 + (1 << 20)) >> 21 s1 += carry[0] s0 -= carry[0] << 21 carry[2] = (s2 + (1 << 20)) >> 21 s3 += carry[2] s2 -= carry[2] << 21 carry[4] = (s4 + (1 << 20)) >> 21 s5 += carry[4] s4 -= carry[4] << 21 carry[6] = (s6 + (1 << 20)) >> 21 s7 += carry[6] s6 -= carry[6] << 21 carry[8] = (s8 + (1 << 20)) >> 21 s9 += carry[8] s8 -= carry[8] << 21 carry[10] = (s10 + (1 << 20)) >> 21 s11 += carry[10] s10 -= carry[10] << 21 carry[12] = (s12 + (1 << 20)) >> 21 s13 += carry[12] s12 -= carry[12] << 21 carry[14] = (s14 + (1 << 20)) >> 21 s15 += carry[14] s14 -= carry[14] << 21 carry[16] = (s16 + (1 << 20)) >> 21 s17 += carry[16] s16 -= carry[16] << 21 carry[18] = (s18 + (1 << 20)) >> 21 s19 += carry[18] s18 -= carry[18] << 21 carry[20] = (s20 + (1 << 20)) >> 21 s21 += carry[20] s20 -= carry[20] << 21 carry[22] = (s22 + (1 << 20)) >> 21 s23 += carry[22] s22 -= carry[22] << 21 carry[1] = (s1 + (1 << 20)) >> 21 s2 += carry[1] s1 -= carry[1] << 21 carry[3] = (s3 + (1 << 20)) >> 21 s4 += carry[3] s3 -= carry[3] << 21 carry[5] = (s5 + (1 << 20)) >> 21 s6 += carry[5] s5 -= carry[5] << 21 carry[7] = (s7 + (1 << 20)) >> 21 s8 += carry[7] s7 -= carry[7] << 21 carry[9] = (s9 + (1 << 20)) >> 21 s10 += carry[9] s9 -= carry[9] << 21 carry[11] = (s11 + (1 << 20)) >> 21 s12 += carry[11] s11 -= carry[11] << 21 carry[13] = (s13 + (1 << 20)) >> 21 s14 += carry[13] s13 -= carry[13] << 21 carry[15] = (s15 + (1 << 20)) >> 21 s16 += carry[15] s15 -= carry[15] << 21 carry[17] = (s17 + (1 << 20)) >> 21 s18 += carry[17] s17 -= carry[17] << 21 carry[19] = (s19 + (1 << 20)) >> 21 s20 += carry[19] s19 -= carry[19] << 21 carry[21] = (s21 + (1 << 20)) >> 21 s22 += carry[21] s21 -= carry[21] << 21 s11 += s23 * 666643 s12 += s23 * 470296 s13 += s23 * 654183 s14 -= s23 * 997805 s15 += s23 * 136657 s16 -= s23 * 683901 s23 = 0 s10 += s22 * 666643 s11 += s22 * 470296 s12 += s22 * 654183 s13 -= s22 * 997805 s14 += s22 * 136657 s15 -= s22 * 683901 s22 = 0 s9 += s21 * 666643 s10 += s21 * 470296 s11 += s21 * 654183 s12 -= s21 * 997805 s13 += s21 * 136657 s14 -= s21 * 683901 s21 = 0 s8 += s20 * 666643 s9 += s20 * 470296 s10 += s20 * 654183 s11 -= s20 * 997805 s12 += s20 * 136657 s13 -= s20 * 683901 s20 = 0 s7 += s19 * 666643 s8 += s19 * 470296 s9 += s19 * 654183 s10 -= s19 * 997805 s11 += s19 * 136657 s12 -= s19 * 683901 s19 = 0 s6 += s18 * 666643 s7 += s18 * 470296 s8 += s18 * 654183 s9 -= s18 * 997805 s10 += s18 * 136657 s11 -= s18 * 683901 s18 = 0 carry[6] = (s6 + (1 << 20)) >> 21 s7 += carry[6] s6 -= carry[6] << 21 carry[8] = (s8 + (1 << 20)) >> 21 s9 += carry[8] s8 -= carry[8] << 21 carry[10] = (s10 + (1 << 20)) >> 21 s11 += carry[10] s10 -= carry[10] << 21 carry[12] = (s12 + (1 << 20)) >> 21 s13 += carry[12] s12 -= carry[12] << 21 carry[14] = (s14 + (1 << 20)) >> 21 s15 += carry[14] s14 -= carry[14] << 21 carry[16] = (s16 + (1 << 20)) >> 21 s17 += carry[16] s16 -= carry[16] << 21 carry[7] = (s7 + (1 << 20)) >> 21 s8 += carry[7] s7 -= carry[7] << 21 carry[9] = (s9 + (1 << 20)) >> 21 s10 += carry[9] s9 -= carry[9] << 21 carry[11] = (s11 + (1 << 20)) >> 21 s12 += carry[11] s11 -= carry[11] << 21 carry[13] = (s13 + (1 << 20)) >> 21 s14 += carry[13] s13 -= carry[13] << 21 carry[15] = (s15 + (1 << 20)) >> 21 s16 += carry[15] s15 -= carry[15] << 21 s5 += s17 * 666643 s6 += s17 * 470296 s7 += s17 * 654183 s8 -= s17 * 997805 s9 += s17 * 136657 s10 -= s17 * 683901 s17 = 0 s4 += s16 * 666643 s5 += s16 * 470296 s6 += s16 * 654183 s7 -= s16 * 997805 s8 += s16 * 136657 s9 -= s16 * 683901 s16 = 0 s3 += s15 * 666643 s4 += s15 * 470296 s5 += s15 * 654183 s6 -= s15 * 997805 s7 += s15 * 136657 s8 -= s15 * 683901 s15 = 0 s2 += s14 * 666643 s3 += s14 * 470296 s4 += s14 * 654183 s5 -= s14 * 997805 s6 += s14 * 136657 s7 -= s14 * 683901 s14 = 0 s1 += s13 * 666643 s2 += s13 * 470296 s3 += s13 * 654183 s4 -= s13 * 997805 s5 += s13 * 136657 s6 -= s13 * 683901 s13 = 0 s0 += s12 * 666643 s1 += s12 * 470296 s2 += s12 * 654183 s3 -= s12 * 997805 s4 += s12 * 136657 s5 -= s12 * 683901 s12 = 0 carry[0] = (s0 + (1 << 20)) >> 21 s1 += carry[0] s0 -= carry[0] << 21 carry[2] = (s2 + (1 << 20)) >> 21 s3 += carry[2] s2 -= carry[2] << 21 carry[4] = (s4 + (1 << 20)) >> 21 s5 += carry[4] s4 -= carry[4] << 21 carry[6] = (s6 + (1 << 20)) >> 21 s7 += carry[6] s6 -= carry[6] << 21 carry[8] = (s8 + (1 << 20)) >> 21 s9 += carry[8] s8 -= carry[8] << 21 carry[10] = (s10 + (1 << 20)) >> 21 s11 += carry[10] s10 -= carry[10] << 21 carry[1] = (s1 + (1 << 20)) >> 21 s2 += carry[1] s1 -= carry[1] << 21 carry[3] = (s3 + (1 << 20)) >> 21 s4 += carry[3] s3 -= carry[3] << 21 carry[5] = (s5 + (1 << 20)) >> 21 s6 += carry[5] s5 -= carry[5] << 21 carry[7] = (s7 + (1 << 20)) >> 21 s8 += carry[7] s7 -= carry[7] << 21 carry[9] = (s9 + (1 << 20)) >> 21 s10 += carry[9] s9 -= carry[9] << 21 carry[11] = (s11 + (1 << 20)) >> 21 s12 += carry[11] s11 -= carry[11] << 21 s0 += s12 * 666643 s1 += s12 * 470296 s2 += s12 * 654183 s3 -= s12 * 997805 s4 += s12 * 136657 s5 -= s12 * 683901 s12 = 0 carry[0] = s0 >> 21 s1 += carry[0] s0 -= carry[0] << 21 carry[1] = s1 >> 21 s2 += carry[1] s1 -= carry[1] << 21 carry[2] = s2 >> 21 s3 += carry[2] s2 -= carry[2] << 21 carry[3] = s3 >> 21 s4 += carry[3] s3 -= carry[3] << 21 carry[4] = s4 >> 21 s5 += carry[4] s4 -= carry[4] << 21 carry[5] = s5 >> 21 s6 += carry[5] s5 -= carry[5] << 21 carry[6] = s6 >> 21 s7 += carry[6] s6 -= carry[6] << 21 carry[7] = s7 >> 21 s8 += carry[7] s7 -= carry[7] << 21 carry[8] = s8 >> 21 s9 += carry[8] s8 -= carry[8] << 21 carry[9] = s9 >> 21 s10 += carry[9] s9 -= carry[9] << 21 carry[10] = s10 >> 21 s11 += carry[10] s10 -= carry[10] << 21 carry[11] = s11 >> 21 s12 += carry[11] s11 -= carry[11] << 21 s0 += s12 * 666643 s1 += s12 * 470296 s2 += s12 * 654183 s3 -= s12 * 997805 s4 += s12 * 136657 s5 -= s12 * 683901 s12 = 0 carry[0] = s0 >> 21 s1 += carry[0] s0 -= carry[0] << 21 carry[1] = s1 >> 21 s2 += carry[1] s1 -= carry[1] << 21 carry[2] = s2 >> 21 s3 += carry[2] s2 -= carry[2] << 21 carry[3] = s3 >> 21 s4 += carry[3] s3 -= carry[3] << 21 carry[4] = s4 >> 21 s5 += carry[4] s4 -= carry[4] << 21 carry[5] = s5 >> 21 s6 += carry[5] s5 -= carry[5] << 21 carry[6] = s6 >> 21 s7 += carry[6] s6 -= carry[6] << 21 carry[7] = s7 >> 21 s8 += carry[7] s7 -= carry[7] << 21 carry[8] = s8 >> 21 s9 += carry[8] s8 -= carry[8] << 21 carry[9] = s9 >> 21 s10 += carry[9] s9 -= carry[9] << 21 carry[10] = s10 >> 21 s11 += carry[10] s10 -= carry[10] << 21 s[0] = byte(s0 >> 0) s[1] = byte(s0 >> 8) s[2] = byte((s0 >> 16) | (s1 << 5)) s[3] = byte(s1 >> 3) s[4] = byte(s1 >> 11) s[5] = byte((s1 >> 19) | (s2 << 2)) s[6] = byte(s2 >> 6) s[7] = byte((s2 >> 14) | (s3 << 7)) s[8] = byte(s3 >> 1) s[9] = byte(s3 >> 9) s[10] = byte((s3 >> 17) | (s4 << 4)) s[11] = byte(s4 >> 4) s[12] = byte(s4 >> 12) s[13] = byte((s4 >> 20) | (s5 << 1)) s[14] = byte(s5 >> 7) s[15] = byte((s5 >> 15) | (s6 << 6)) s[16] = byte(s6 >> 2) s[17] = byte(s6 >> 10) s[18] = byte((s6 >> 18) | (s7 << 3)) s[19] = byte(s7 >> 5) s[20] = byte(s7 >> 13) s[21] = byte(s8 >> 0) s[22] = byte(s8 >> 8) s[23] = byte((s8 >> 16) | (s9 << 5)) s[24] = byte(s9 >> 3) s[25] = byte(s9 >> 11) s[26] = byte((s9 >> 19) | (s10 << 2)) s[27] = byte(s10 >> 6) s[28] = byte((s10 >> 14) | (s11 << 7)) s[29] = byte(s11 >> 1) s[30] = byte(s11 >> 9) s[31] = byte(s11 >> 17) } // Input: // s[0]+256*s[1]+...+256^63*s[63] = s // // Output: // s[0]+256*s[1]+...+256^31*s[31] = s mod l // where l = 2^252 + 27742317777372353535851937790883648493. func ScReduce(out *[32]byte, s *[64]byte) { s0 := 2097151 & load3(s[:]) s1 := 2097151 & (load4(s[2:]) >> 5) s2 := 2097151 & (load3(s[5:]) >> 2) s3 := 2097151 & (load4(s[7:]) >> 7) s4 := 2097151 & (load4(s[10:]) >> 4) s5 := 2097151 & (load3(s[13:]) >> 1) s6 := 2097151 & (load4(s[15:]) >> 6) s7 := 2097151 & (load3(s[18:]) >> 3) s8 := 2097151 & load3(s[21:]) s9 := 2097151 & (load4(s[23:]) >> 5) s10 := 2097151 & (load3(s[26:]) >> 2) s11 := 2097151 & (load4(s[28:]) >> 7) s12 := 2097151 & (load4(s[31:]) >> 4) s13 := 2097151 & (load3(s[34:]) >> 1) s14 := 2097151 & (load4(s[36:]) >> 6) s15 := 2097151 & (load3(s[39:]) >> 3) s16 := 2097151 & load3(s[42:]) s17 := 2097151 & (load4(s[44:]) >> 5) s18 := 2097151 & (load3(s[47:]) >> 2) s19 := 2097151 & (load4(s[49:]) >> 7) s20 := 2097151 & (load4(s[52:]) >> 4) s21 := 2097151 & (load3(s[55:]) >> 1) s22 := 2097151 & (load4(s[57:]) >> 6) s23 := (load4(s[60:]) >> 3) s11 += s23 * 666643 s12 += s23 * 470296 s13 += s23 * 654183 s14 -= s23 * 997805 s15 += s23 * 136657 s16 -= s23 * 683901 s23 = 0 s10 += s22 * 666643 s11 += s22 * 470296 s12 += s22 * 654183 s13 -= s22 * 997805 s14 += s22 * 136657 s15 -= s22 * 683901 s22 = 0 s9 += s21 * 666643 s10 += s21 * 470296 s11 += s21 * 654183 s12 -= s21 * 997805 s13 += s21 * 136657 s14 -= s21 * 683901 s21 = 0 s8 += s20 * 666643 s9 += s20 * 470296 s10 += s20 * 654183 s11 -= s20 * 997805 s12 += s20 * 136657 s13 -= s20 * 683901 s20 = 0 s7 += s19 * 666643 s8 += s19 * 470296 s9 += s19 * 654183 s10 -= s19 * 997805 s11 += s19 * 136657 s12 -= s19 * 683901 s19 = 0 s6 += s18 * 666643 s7 += s18 * 470296 s8 += s18 * 654183 s9 -= s18 * 997805 s10 += s18 * 136657 s11 -= s18 * 683901 s18 = 0 var carry [17]int64 carry[6] = (s6 + (1 << 20)) >> 21 s7 += carry[6] s6 -= carry[6] << 21 carry[8] = (s8 + (1 << 20)) >> 21 s9 += carry[8] s8 -= carry[8] << 21 carry[10] = (s10 + (1 << 20)) >> 21 s11 += carry[10] s10 -= carry[10] << 21 carry[12] = (s12 + (1 << 20)) >> 21 s13 += carry[12] s12 -= carry[12] << 21 carry[14] = (s14 + (1 << 20)) >> 21 s15 += carry[14] s14 -= carry[14] << 21 carry[16] = (s16 + (1 << 20)) >> 21 s17 += carry[16] s16 -= carry[16] << 21 carry[7] = (s7 + (1 << 20)) >> 21 s8 += carry[7] s7 -= carry[7] << 21 carry[9] = (s9 + (1 << 20)) >> 21 s10 += carry[9] s9 -= carry[9] << 21 carry[11] = (s11 + (1 << 20)) >> 21 s12 += carry[11] s11 -= carry[11] << 21 carry[13] = (s13 + (1 << 20)) >> 21 s14 += carry[13] s13 -= carry[13] << 21 carry[15] = (s15 + (1 << 20)) >> 21 s16 += carry[15] s15 -= carry[15] << 21 s5 += s17 * 666643 s6 += s17 * 470296 s7 += s17 * 654183 s8 -= s17 * 997805 s9 += s17 * 136657 s10 -= s17 * 683901 s17 = 0 s4 += s16 * 666643 s5 += s16 * 470296 s6 += s16 * 654183 s7 -= s16 * 997805 s8 += s16 * 136657 s9 -= s16 * 683901 s16 = 0 s3 += s15 * 666643 s4 += s15 * 470296 s5 += s15 * 654183 s6 -= s15 * 997805 s7 += s15 * 136657 s8 -= s15 * 683901 s15 = 0 s2 += s14 * 666643 s3 += s14 * 470296 s4 += s14 * 654183 s5 -= s14 * 997805 s6 += s14 * 136657 s7 -= s14 * 683901 s14 = 0 s1 += s13 * 666643 s2 += s13 * 470296 s3 += s13 * 654183 s4 -= s13 * 997805 s5 += s13 * 136657 s6 -= s13 * 683901 s13 = 0 s0 += s12 * 666643 s1 += s12 * 470296 s2 += s12 * 654183 s3 -= s12 * 997805 s4 += s12 * 136657 s5 -= s12 * 683901 s12 = 0 carry[0] = (s0 + (1 << 20)) >> 21 s1 += carry[0] s0 -= carry[0] << 21 carry[2] = (s2 + (1 << 20)) >> 21 s3 += carry[2] s2 -= carry[2] << 21 carry[4] = (s4 + (1 << 20)) >> 21 s5 += carry[4] s4 -= carry[4] << 21 carry[6] = (s6 + (1 << 20)) >> 21 s7 += carry[6] s6 -= carry[6] << 21 carry[8] = (s8 + (1 << 20)) >> 21 s9 += carry[8] s8 -= carry[8] << 21 carry[10] = (s10 + (1 << 20)) >> 21 s11 += carry[10] s10 -= carry[10] << 21 carry[1] = (s1 + (1 << 20)) >> 21 s2 += carry[1] s1 -= carry[1] << 21 carry[3] = (s3 + (1 << 20)) >> 21 s4 += carry[3] s3 -= carry[3] << 21 carry[5] = (s5 + (1 << 20)) >> 21 s6 += carry[5] s5 -= carry[5] << 21 carry[7] = (s7 + (1 << 20)) >> 21 s8 += carry[7] s7 -= carry[7] << 21 carry[9] = (s9 + (1 << 20)) >> 21 s10 += carry[9] s9 -= carry[9] << 21 carry[11] = (s11 + (1 << 20)) >> 21 s12 += carry[11] s11 -= carry[11] << 21 s0 += s12 * 666643 s1 += s12 * 470296 s2 += s12 * 654183 s3 -= s12 * 997805 s4 += s12 * 136657 s5 -= s12 * 683901 s12 = 0 carry[0] = s0 >> 21 s1 += carry[0] s0 -= carry[0] << 21 carry[1] = s1 >> 21 s2 += carry[1] s1 -= carry[1] << 21 carry[2] = s2 >> 21 s3 += carry[2] s2 -= carry[2] << 21 carry[3] = s3 >> 21 s4 += carry[3] s3 -= carry[3] << 21 carry[4] = s4 >> 21 s5 += carry[4] s4 -= carry[4] << 21 carry[5] = s5 >> 21 s6 += carry[5] s5 -= carry[5] << 21 carry[6] = s6 >> 21 s7 += carry[6] s6 -= carry[6] << 21 carry[7] = s7 >> 21 s8 += carry[7] s7 -= carry[7] << 21 carry[8] = s8 >> 21 s9 += carry[8] s8 -= carry[8] << 21 carry[9] = s9 >> 21 s10 += carry[9] s9 -= carry[9] << 21 carry[10] = s10 >> 21 s11 += carry[10] s10 -= carry[10] << 21 carry[11] = s11 >> 21 s12 += carry[11] s11 -= carry[11] << 21 s0 += s12 * 666643 s1 += s12 * 470296 s2 += s12 * 654183 s3 -= s12 * 997805 s4 += s12 * 136657 s5 -= s12 * 683901 s12 = 0 carry[0] = s0 >> 21 s1 += carry[0] s0 -= carry[0] << 21 carry[1] = s1 >> 21 s2 += carry[1] s1 -= carry[1] << 21 carry[2] = s2 >> 21 s3 += carry[2] s2 -= carry[2] << 21 carry[3] = s3 >> 21 s4 += carry[3] s3 -= carry[3] << 21 carry[4] = s4 >> 21 s5 += carry[4] s4 -= carry[4] << 21 carry[5] = s5 >> 21 s6 += carry[5] s5 -= carry[5] << 21 carry[6] = s6 >> 21 s7 += carry[6] s6 -= carry[6] << 21 carry[7] = s7 >> 21 s8 += carry[7] s7 -= carry[7] << 21 carry[8] = s8 >> 21 s9 += carry[8] s8 -= carry[8] << 21 carry[9] = s9 >> 21 s10 += carry[9] s9 -= carry[9] << 21 carry[10] = s10 >> 21 s11 += carry[10] s10 -= carry[10] << 21 out[0] = byte(s0 >> 0) out[1] = byte(s0 >> 8) out[2] = byte((s0 >> 16) | (s1 << 5)) out[3] = byte(s1 >> 3) out[4] = byte(s1 >> 11) out[5] = byte((s1 >> 19) | (s2 << 2)) out[6] = byte(s2 >> 6) out[7] = byte((s2 >> 14) | (s3 << 7)) out[8] = byte(s3 >> 1) out[9] = byte(s3 >> 9) out[10] = byte((s3 >> 17) | (s4 << 4)) out[11] = byte(s4 >> 4) out[12] = byte(s4 >> 12) out[13] = byte((s4 >> 20) | (s5 << 1)) out[14] = byte(s5 >> 7) out[15] = byte((s5 >> 15) | (s6 << 6)) out[16] = byte(s6 >> 2) out[17] = byte(s6 >> 10) out[18] = byte((s6 >> 18) | (s7 << 3)) out[19] = byte(s7 >> 5) out[20] = byte(s7 >> 13) out[21] = byte(s8 >> 0) out[22] = byte(s8 >> 8) out[23] = byte((s8 >> 16) | (s9 << 5)) out[24] = byte(s9 >> 3) out[25] = byte(s9 >> 11) out[26] = byte((s9 >> 19) | (s10 << 2)) out[27] = byte(s10 >> 6) out[28] = byte((s10 >> 14) | (s11 << 7)) out[29] = byte(s11 >> 1) out[30] = byte(s11 >> 9) out[31] = byte(s11 >> 17) } docker-1.10.3/vendor/src/github.com/armon/000077500000000000000000000000001267010174400203045ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/armon/go-metrics/000077500000000000000000000000001267010174400223555ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/armon/go-metrics/.gitignore000077500000000000000000000003741267010174400243540ustar00rootroot00000000000000# Compiled Object files, Static and Dynamic libs (Shared Objects) *.o *.a *.so # Folders _obj _test # Architecture specific extensions/prefixes *.[568vq] [568vq].out *.cgo1.go *.cgo2.c _cgo_defun.c _cgo_gotypes.go _cgo_export.* _testmain.go *.exe docker-1.10.3/vendor/src/github.com/armon/go-metrics/LICENSE000066400000000000000000000020671267010174400233670ustar00rootroot00000000000000The MIT License (MIT) Copyright (c) 2013 Armon Dadgar Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. docker-1.10.3/vendor/src/github.com/armon/go-metrics/README.md000066400000000000000000000045571267010174400236470ustar00rootroot00000000000000go-metrics ========== This library provides a `metrics` package which can be used to instrument code, expose application metrics, and profile runtime performance in a flexible manner. Sinks ===== The `metrics` package makes use of a `MetricSink` interface to support delivery to any type of backend. Currently the following sinks are provided: * StatsiteSink : Sinks to a statsite instance (TCP) * StatsdSink: Sinks to a statsd / statsite instance (UDP) * InmemSink : Provides in-memory aggregation, can be used to export stats * FanoutSink : Sinks to multiple sinks. Enables writing to multiple statsite instances for example. * BlackholeSink : Sinks to nowhere In addition to the sinks, the `InmemSignal` can be used to catch a signal, and dump a formatted output of recent metrics. For example, when a process gets a SIGUSR1, it can dump to stderr recent performance metrics for debugging. Examples ======== Here is an example of using the package: func SlowMethod() { // Profiling the runtime of a method defer metrics.MeasureSince([]string{"SlowMethod"}, time.Now()) } // Configure a statsite sink as the global metrics sink sink, _ := metrics.NewStatsiteSink("statsite:8125") metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink) // Emit a Key/Value pair metrics.EmitKey([]string{"questions", "meaning of life"}, 42) Here is an example of setting up an signal handler: // Setup the inmem sink and signal handler inm := NewInmemSink(10*time.Second, time.Minute) sig := DefaultInmemSignal(inm) metrics.NewGlobal(metrics.DefaultConfig("service-name"), inm) // Run some code inm.SetGauge([]string{"foo"}, 42) inm.EmitKey([]string{"bar"}, 30) inm.IncrCounter([]string{"baz"}, 42) inm.IncrCounter([]string{"baz"}, 1) inm.IncrCounter([]string{"baz"}, 80) inm.AddSample([]string{"method", "wow"}, 42) inm.AddSample([]string{"method", "wow"}, 100) inm.AddSample([]string{"method", "wow"}, 22) .... When a signal comes in, output like the following will be dumped to stderr: [2014-01-28 14:57:33.04 -0800 PST][G] 'foo': 42.000 [2014-01-28 14:57:33.04 -0800 PST][P] 'bar': 30.000 [2014-01-28 14:57:33.04 -0800 PST][C] 'baz': Count: 3 Min: 1.000 Mean: 41.000 Max: 80.000 Stddev: 39.509 [2014-01-28 14:57:33.04 -0800 PST][S] 'method.wow': Count: 3 Min: 22.000 Mean: 54.667 Max: 100.000 Stddev: 40.513 docker-1.10.3/vendor/src/github.com/armon/go-metrics/const_unix.go000066400000000000000000000002311267010174400250710ustar00rootroot00000000000000// +build !windows package metrics import ( "syscall" ) const ( // DefaultSignal is used with DefaultInmemSignal DefaultSignal = syscall.SIGUSR1 ) docker-1.10.3/vendor/src/github.com/armon/go-metrics/const_windows.go000066400000000000000000000003041267010174400256010ustar00rootroot00000000000000// +build windows package metrics import ( "syscall" ) const ( // DefaultSignal is used with DefaultInmemSignal // Windows has no SIGUSR1, use SIGBREAK DefaultSignal = syscall.Signal(21) ) docker-1.10.3/vendor/src/github.com/armon/go-metrics/inmem.go000066400000000000000000000132031267010174400240100ustar00rootroot00000000000000package metrics import ( "fmt" "math" "strings" "sync" "time" ) // InmemSink provides a MetricSink that does in-memory aggregation // without sending metrics over a network. It can be embedded within // an application to provide profiling information. type InmemSink struct { // How long is each aggregation interval interval time.Duration // Retain controls how many metrics interval we keep retain time.Duration // maxIntervals is the maximum length of intervals. // It is retain / interval. maxIntervals int // intervals is a slice of the retained intervals intervals []*IntervalMetrics intervalLock sync.RWMutex } // IntervalMetrics stores the aggregated metrics // for a specific interval type IntervalMetrics struct { sync.RWMutex // The start time of the interval Interval time.Time // Gauges maps the key to the last set value Gauges map[string]float32 // Points maps the string to the list of emitted values // from EmitKey Points map[string][]float32 // Counters maps the string key to a sum of the counter // values Counters map[string]*AggregateSample // Samples maps the key to an AggregateSample, // which has the rolled up view of a sample Samples map[string]*AggregateSample } // NewIntervalMetrics creates a new IntervalMetrics for a given interval func NewIntervalMetrics(intv time.Time) *IntervalMetrics { return &IntervalMetrics{ Interval: intv, Gauges: make(map[string]float32), Points: make(map[string][]float32), Counters: make(map[string]*AggregateSample), Samples: make(map[string]*AggregateSample), } } // AggregateSample is used to hold aggregate metrics // about a sample type AggregateSample struct { Count int // The count of emitted pairs Sum float64 // The sum of values SumSq float64 // The sum of squared values Min float64 // Minimum value Max float64 // Maximum value } // Computes a Stddev of the values func (a *AggregateSample) Stddev() float64 { num := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2) div := float64(a.Count * (a.Count - 1)) if div == 0 { return 0 } return math.Sqrt(num / div) } // Computes a mean of the values func (a *AggregateSample) Mean() float64 { if a.Count == 0 { return 0 } return a.Sum / float64(a.Count) } // Ingest is used to update a sample func (a *AggregateSample) Ingest(v float64) { a.Count++ a.Sum += v a.SumSq += (v * v) if v < a.Min || a.Count == 1 { a.Min = v } if v > a.Max || a.Count == 1 { a.Max = v } } func (a *AggregateSample) String() string { if a.Count == 0 { return "Count: 0" } else if a.Stddev() == 0 { return fmt.Sprintf("Count: %d Sum: %0.3f", a.Count, a.Sum) } else { return fmt.Sprintf("Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f", a.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum) } } // NewInmemSink is used to construct a new in-memory sink. // Uses an aggregation interval and maximum retention period. func NewInmemSink(interval, retain time.Duration) *InmemSink { i := &InmemSink{ interval: interval, retain: retain, maxIntervals: int(retain / interval), } i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals) return i } func (i *InmemSink) SetGauge(key []string, val float32) { k := i.flattenKey(key) intv := i.getInterval() intv.Lock() defer intv.Unlock() intv.Gauges[k] = val } func (i *InmemSink) EmitKey(key []string, val float32) { k := i.flattenKey(key) intv := i.getInterval() intv.Lock() defer intv.Unlock() vals := intv.Points[k] intv.Points[k] = append(vals, val) } func (i *InmemSink) IncrCounter(key []string, val float32) { k := i.flattenKey(key) intv := i.getInterval() intv.Lock() defer intv.Unlock() agg := intv.Counters[k] if agg == nil { agg = &AggregateSample{} intv.Counters[k] = agg } agg.Ingest(float64(val)) } func (i *InmemSink) AddSample(key []string, val float32) { k := i.flattenKey(key) intv := i.getInterval() intv.Lock() defer intv.Unlock() agg := intv.Samples[k] if agg == nil { agg = &AggregateSample{} intv.Samples[k] = agg } agg.Ingest(float64(val)) } // Data is used to retrieve all the aggregated metrics // Intervals may be in use, and a read lock should be acquired func (i *InmemSink) Data() []*IntervalMetrics { // Get the current interval, forces creation i.getInterval() i.intervalLock.RLock() defer i.intervalLock.RUnlock() intervals := make([]*IntervalMetrics, len(i.intervals)) copy(intervals, i.intervals) return intervals } func (i *InmemSink) getExistingInterval(intv time.Time) *IntervalMetrics { i.intervalLock.RLock() defer i.intervalLock.RUnlock() n := len(i.intervals) if n > 0 && i.intervals[n-1].Interval == intv { return i.intervals[n-1] } return nil } func (i *InmemSink) createInterval(intv time.Time) *IntervalMetrics { i.intervalLock.Lock() defer i.intervalLock.Unlock() // Check for an existing interval n := len(i.intervals) if n > 0 && i.intervals[n-1].Interval == intv { return i.intervals[n-1] } // Add the current interval current := NewIntervalMetrics(intv) i.intervals = append(i.intervals, current) n++ // Truncate the intervals if they are too long if n >= i.maxIntervals { copy(i.intervals[0:], i.intervals[n-i.maxIntervals:]) i.intervals = i.intervals[:i.maxIntervals] } return current } // getInterval returns the current interval to write to func (i *InmemSink) getInterval() *IntervalMetrics { intv := time.Now().Truncate(i.interval) if m := i.getExistingInterval(intv); m != nil { return m } return i.createInterval(intv) } // Flattens the key for formatting, removes spaces func (i *InmemSink) flattenKey(parts []string) string { joined := strings.Join(parts, ".") return strings.Replace(joined, " ", "_", -1) } docker-1.10.3/vendor/src/github.com/armon/go-metrics/inmem_signal.go000066400000000000000000000043311267010174400253470ustar00rootroot00000000000000package metrics import ( "bytes" "fmt" "io" "os" "os/signal" "sync" "syscall" ) // InmemSignal is used to listen for a given signal, and when received, // to dump the current metrics from the InmemSink to an io.Writer type InmemSignal struct { signal syscall.Signal inm *InmemSink w io.Writer sigCh chan os.Signal stop bool stopCh chan struct{} stopLock sync.Mutex } // NewInmemSignal creates a new InmemSignal which listens for a given signal, // and dumps the current metrics out to a writer func NewInmemSignal(inmem *InmemSink, sig syscall.Signal, w io.Writer) *InmemSignal { i := &InmemSignal{ signal: sig, inm: inmem, w: w, sigCh: make(chan os.Signal, 1), stopCh: make(chan struct{}), } signal.Notify(i.sigCh, sig) go i.run() return i } // DefaultInmemSignal returns a new InmemSignal that responds to SIGUSR1 // and writes output to stderr. Windows uses SIGBREAK func DefaultInmemSignal(inmem *InmemSink) *InmemSignal { return NewInmemSignal(inmem, DefaultSignal, os.Stderr) } // Stop is used to stop the InmemSignal from listening func (i *InmemSignal) Stop() { i.stopLock.Lock() defer i.stopLock.Unlock() if i.stop { return } i.stop = true close(i.stopCh) signal.Stop(i.sigCh) } // run is a long running routine that handles signals func (i *InmemSignal) run() { for { select { case <-i.sigCh: i.dumpStats() case <-i.stopCh: return } } } // dumpStats is used to dump the data to output writer func (i *InmemSignal) dumpStats() { buf := bytes.NewBuffer(nil) data := i.inm.Data() // Skip the last period which is still being aggregated for i := 0; i < len(data)-1; i++ { intv := data[i] intv.RLock() for name, val := range intv.Gauges { fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val) } for name, vals := range intv.Points { for _, val := range vals { fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val) } } for name, agg := range intv.Counters { fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg) } for name, agg := range intv.Samples { fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg) } intv.RUnlock() } // Write out the bytes i.w.Write(buf.Bytes()) } docker-1.10.3/vendor/src/github.com/armon/go-metrics/metrics.go000077500000000000000000000054441267010174400243640ustar00rootroot00000000000000package metrics import ( "runtime" "time" ) func (m *Metrics) SetGauge(key []string, val float32) { if m.HostName != "" && m.EnableHostname { key = insert(0, m.HostName, key) } if m.EnableTypePrefix { key = insert(0, "gauge", key) } if m.ServiceName != "" { key = insert(0, m.ServiceName, key) } m.sink.SetGauge(key, val) } func (m *Metrics) EmitKey(key []string, val float32) { if m.EnableTypePrefix { key = insert(0, "kv", key) } if m.ServiceName != "" { key = insert(0, m.ServiceName, key) } m.sink.EmitKey(key, val) } func (m *Metrics) IncrCounter(key []string, val float32) { if m.EnableTypePrefix { key = insert(0, "counter", key) } if m.ServiceName != "" { key = insert(0, m.ServiceName, key) } m.sink.IncrCounter(key, val) } func (m *Metrics) AddSample(key []string, val float32) { if m.EnableTypePrefix { key = insert(0, "sample", key) } if m.ServiceName != "" { key = insert(0, m.ServiceName, key) } m.sink.AddSample(key, val) } func (m *Metrics) MeasureSince(key []string, start time.Time) { if m.EnableTypePrefix { key = insert(0, "timer", key) } if m.ServiceName != "" { key = insert(0, m.ServiceName, key) } now := time.Now() elapsed := now.Sub(start) msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity) m.sink.AddSample(key, msec) } // Periodically collects runtime stats to publish func (m *Metrics) collectStats() { for { time.Sleep(m.ProfileInterval) m.emitRuntimeStats() } } // Emits various runtime statsitics func (m *Metrics) emitRuntimeStats() { // Export number of Goroutines numRoutines := runtime.NumGoroutine() m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines)) // Export memory stats var stats runtime.MemStats runtime.ReadMemStats(&stats) m.SetGauge([]string{"runtime", "alloc_bytes"}, float32(stats.Alloc)) m.SetGauge([]string{"runtime", "sys_bytes"}, float32(stats.Sys)) m.SetGauge([]string{"runtime", "malloc_count"}, float32(stats.Mallocs)) m.SetGauge([]string{"runtime", "free_count"}, float32(stats.Frees)) m.SetGauge([]string{"runtime", "heap_objects"}, float32(stats.HeapObjects)) m.SetGauge([]string{"runtime", "total_gc_pause_ns"}, float32(stats.PauseTotalNs)) m.SetGauge([]string{"runtime", "total_gc_runs"}, float32(stats.NumGC)) // Export info about the last few GC runs num := stats.NumGC // Handle wrap around if num < m.lastNumGC { m.lastNumGC = 0 } // Ensure we don't scan more than 256 if num-m.lastNumGC >= 256 { m.lastNumGC = num - 255 } for i := m.lastNumGC; i < num; i++ { pause := stats.PauseNs[i%256] m.AddSample([]string{"runtime", "gc_pause_ns"}, float32(pause)) } m.lastNumGC = num } // Inserts a string value at an index into the slice func insert(i int, v string, s []string) []string { s = append(s, "") copy(s[i+1:], s[i:]) s[i] = v return s } docker-1.10.3/vendor/src/github.com/armon/go-metrics/sink.go000077500000000000000000000025511267010174400236560ustar00rootroot00000000000000package metrics // The MetricSink interface is used to transmit metrics information // to an external system type MetricSink interface { // A Gauge should retain the last value it is set to SetGauge(key []string, val float32) // Should emit a Key/Value pair for each call EmitKey(key []string, val float32) // Counters should accumulate values IncrCounter(key []string, val float32) // Samples are for timing information, where quantiles are used AddSample(key []string, val float32) } // BlackholeSink is used to just blackhole messages type BlackholeSink struct{} func (*BlackholeSink) SetGauge(key []string, val float32) {} func (*BlackholeSink) EmitKey(key []string, val float32) {} func (*BlackholeSink) IncrCounter(key []string, val float32) {} func (*BlackholeSink) AddSample(key []string, val float32) {} // FanoutSink is used to sink to fanout values to multiple sinks type FanoutSink []MetricSink func (fh FanoutSink) SetGauge(key []string, val float32) { for _, s := range fh { s.SetGauge(key, val) } } func (fh FanoutSink) EmitKey(key []string, val float32) { for _, s := range fh { s.EmitKey(key, val) } } func (fh FanoutSink) IncrCounter(key []string, val float32) { for _, s := range fh { s.IncrCounter(key, val) } } func (fh FanoutSink) AddSample(key []string, val float32) { for _, s := range fh { s.AddSample(key, val) } } docker-1.10.3/vendor/src/github.com/armon/go-metrics/start.go000077500000000000000000000052711267010174400240510ustar00rootroot00000000000000package metrics import ( "os" "time" ) // Config is used to configure metrics settings type Config struct { ServiceName string // Prefixed with keys to seperate services HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname EnableHostname bool // Enable prefixing gauge values with hostname EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory) EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer") TimerGranularity time.Duration // Granularity of timers. ProfileInterval time.Duration // Interval to profile runtime metrics } // Metrics represents an instance of a metrics sink that can // be used to emit type Metrics struct { Config lastNumGC uint32 sink MetricSink } // Shared global metrics instance var globalMetrics *Metrics func init() { // Initialize to a blackhole sink to avoid errors globalMetrics = &Metrics{sink: &BlackholeSink{}} } // DefaultConfig provides a sane default configuration func DefaultConfig(serviceName string) *Config { c := &Config{ ServiceName: serviceName, // Use client provided service HostName: "", EnableHostname: true, // Enable hostname prefix EnableRuntimeMetrics: true, // Enable runtime profiling EnableTypePrefix: false, // Disable type prefix TimerGranularity: time.Millisecond, // Timers are in milliseconds ProfileInterval: time.Second, // Poll runtime every second } // Try to get the hostname name, _ := os.Hostname() c.HostName = name return c } // New is used to create a new instance of Metrics func New(conf *Config, sink MetricSink) (*Metrics, error) { met := &Metrics{} met.Config = *conf met.sink = sink // Start the runtime collector if conf.EnableRuntimeMetrics { go met.collectStats() } return met, nil } // NewGlobal is the same as New, but it assigns the metrics object to be // used globally as well as returning it. func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) { metrics, err := New(conf, sink) if err == nil { globalMetrics = metrics } return metrics, err } // Proxy all the methods to the globalMetrics instance func SetGauge(key []string, val float32) { globalMetrics.SetGauge(key, val) } func EmitKey(key []string, val float32) { globalMetrics.EmitKey(key, val) } func IncrCounter(key []string, val float32) { globalMetrics.IncrCounter(key, val) } func AddSample(key []string, val float32) { globalMetrics.AddSample(key, val) } func MeasureSince(key []string, start time.Time) { globalMetrics.MeasureSince(key, start) } docker-1.10.3/vendor/src/github.com/armon/go-metrics/statsd.go000066400000000000000000000060251267010174400242110ustar00rootroot00000000000000package metrics import ( "bytes" "fmt" "log" "net" "strings" "time" ) const ( // statsdMaxLen is the maximum size of a packet // to send to statsd statsdMaxLen = 1400 ) // StatsdSink provides a MetricSink that can be used // with a statsite or statsd metrics server. It uses // only UDP packets, while StatsiteSink uses TCP. type StatsdSink struct { addr string metricQueue chan string } // NewStatsdSink is used to create a new StatsdSink func NewStatsdSink(addr string) (*StatsdSink, error) { s := &StatsdSink{ addr: addr, metricQueue: make(chan string, 4096), } go s.flushMetrics() return s, nil } // Close is used to stop flushing to statsd func (s *StatsdSink) Shutdown() { close(s.metricQueue) } func (s *StatsdSink) SetGauge(key []string, val float32) { flatKey := s.flattenKey(key) s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) } func (s *StatsdSink) EmitKey(key []string, val float32) { flatKey := s.flattenKey(key) s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) } func (s *StatsdSink) IncrCounter(key []string, val float32) { flatKey := s.flattenKey(key) s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) } func (s *StatsdSink) AddSample(key []string, val float32) { flatKey := s.flattenKey(key) s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) } // Flattens the key for formatting, removes spaces func (s *StatsdSink) flattenKey(parts []string) string { joined := strings.Join(parts, ".") return strings.Map(func(r rune) rune { switch r { case ':': fallthrough case ' ': return '_' default: return r } }, joined) } // Does a non-blocking push to the metrics queue func (s *StatsdSink) pushMetric(m string) { select { case s.metricQueue <- m: default: } } // Flushes metrics func (s *StatsdSink) flushMetrics() { var sock net.Conn var err error var wait <-chan time.Time ticker := time.NewTicker(flushInterval) defer ticker.Stop() CONNECT: // Create a buffer buf := bytes.NewBuffer(nil) // Attempt to connect sock, err = net.Dial("udp", s.addr) if err != nil { log.Printf("[ERR] Error connecting to statsd! Err: %s", err) goto WAIT } for { select { case metric, ok := <-s.metricQueue: // Get a metric from the queue if !ok { goto QUIT } // Check if this would overflow the packet size if len(metric)+buf.Len() > statsdMaxLen { _, err := sock.Write(buf.Bytes()) buf.Reset() if err != nil { log.Printf("[ERR] Error writing to statsd! Err: %s", err) goto WAIT } } // Append to the buffer buf.WriteString(metric) case <-ticker.C: if buf.Len() == 0 { continue } _, err := sock.Write(buf.Bytes()) buf.Reset() if err != nil { log.Printf("[ERR] Error flushing to statsd! Err: %s", err) goto WAIT } } } WAIT: // Wait for a while wait = time.After(time.Duration(5) * time.Second) for { select { // Dequeue the messages to avoid backlog case _, ok := <-s.metricQueue: if !ok { goto QUIT } case <-wait: goto CONNECT } } QUIT: s.metricQueue = nil } docker-1.10.3/vendor/src/github.com/armon/go-metrics/statsite.go000077500000000000000000000056421267010174400245560ustar00rootroot00000000000000package metrics import ( "bufio" "fmt" "log" "net" "strings" "time" ) const ( // We force flush the statsite metrics after this period of // inactivity. Prevents stats from getting stuck in a buffer // forever. flushInterval = 100 * time.Millisecond ) // StatsiteSink provides a MetricSink that can be used with a // statsite metrics server type StatsiteSink struct { addr string metricQueue chan string } // NewStatsiteSink is used to create a new StatsiteSink func NewStatsiteSink(addr string) (*StatsiteSink, error) { s := &StatsiteSink{ addr: addr, metricQueue: make(chan string, 4096), } go s.flushMetrics() return s, nil } // Close is used to stop flushing to statsite func (s *StatsiteSink) Shutdown() { close(s.metricQueue) } func (s *StatsiteSink) SetGauge(key []string, val float32) { flatKey := s.flattenKey(key) s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) } func (s *StatsiteSink) EmitKey(key []string, val float32) { flatKey := s.flattenKey(key) s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) } func (s *StatsiteSink) IncrCounter(key []string, val float32) { flatKey := s.flattenKey(key) s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) } func (s *StatsiteSink) AddSample(key []string, val float32) { flatKey := s.flattenKey(key) s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) } // Flattens the key for formatting, removes spaces func (s *StatsiteSink) flattenKey(parts []string) string { joined := strings.Join(parts, ".") return strings.Map(func(r rune) rune { switch r { case ':': fallthrough case ' ': return '_' default: return r } }, joined) } // Does a non-blocking push to the metrics queue func (s *StatsiteSink) pushMetric(m string) { select { case s.metricQueue <- m: default: } } // Flushes metrics func (s *StatsiteSink) flushMetrics() { var sock net.Conn var err error var wait <-chan time.Time var buffered *bufio.Writer ticker := time.NewTicker(flushInterval) defer ticker.Stop() CONNECT: // Attempt to connect sock, err = net.Dial("tcp", s.addr) if err != nil { log.Printf("[ERR] Error connecting to statsite! Err: %s", err) goto WAIT } // Create a buffered writer buffered = bufio.NewWriter(sock) for { select { case metric, ok := <-s.metricQueue: // Get a metric from the queue if !ok { goto QUIT } // Try to send to statsite _, err := buffered.Write([]byte(metric)) if err != nil { log.Printf("[ERR] Error writing to statsite! Err: %s", err) goto WAIT } case <-ticker.C: if err := buffered.Flush(); err != nil { log.Printf("[ERR] Error flushing to statsite! Err: %s", err) goto WAIT } } } WAIT: // Wait for a while wait = time.After(time.Duration(5) * time.Second) for { select { // Dequeue the messages to avoid backlog case _, ok := <-s.metricQueue: if !ok { goto QUIT } case <-wait: goto CONNECT } } QUIT: s.metricQueue = nil } docker-1.10.3/vendor/src/github.com/aws/000077500000000000000000000000001267010174400177625ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/000077500000000000000000000000001267010174400217365ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/LICENSE.txt000066400000000000000000000261361267010174400235710ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/000077500000000000000000000000001267010174400225305ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/awserr/000077500000000000000000000000001267010174400240335ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/awserr/error.go000066400000000000000000000067151267010174400255240ustar00rootroot00000000000000// Package awserr represents API error interface accessors for the SDK. package awserr // An Error wraps lower level errors with code, message and an original error. // The underlying concrete error type may also satisfy other interfaces which // can be to used to obtain more specific information about the error. // // Calling Error() or String() will always include the full information about // an error based on its underlying type. // // Example: // // output, err := s3manage.Upload(svc, input, opts) // if err != nil { // if awsErr, ok := err.(awserr.Error); ok { // // Get error details // log.Println("Error:", err.Code(), err.Message()) // // // Prints out full error message, including original error if there was one. // log.Println("Error:", err.Error()) // // // Get original error // if origErr := err.Err(); origErr != nil { // // operate on original error. // } // } else { // fmt.Println(err.Error()) // } // } // type Error interface { // Satisfy the generic error interface. error // Returns the short phrase depicting the classification of the error. Code() string // Returns the error details message. Message() string // Returns the original error if one was set. Nil is returned if not set. OrigErr() error } // New returns an Error object described by the code, message, and origErr. // // If origErr satisfies the Error interface it will not be wrapped within a new // Error object and will instead be returned. func New(code, message string, origErr error) Error { if e, ok := origErr.(Error); ok && e != nil { return e } return newBaseError(code, message, origErr) } // A RequestFailure is an interface to extract request failure information from // an Error such as the request ID of the failed request returned by a service. // RequestFailures may not always have a requestID value if the request failed // prior to reaching the service such as a connection error. // // Example: // // output, err := s3manage.Upload(svc, input, opts) // if err != nil { // if reqerr, ok := err.(RequestFailure); ok { // log.Printf("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) // } else { // log.Printf("Error:", err.Error() // } // } // // Combined with awserr.Error: // // output, err := s3manage.Upload(svc, input, opts) // if err != nil { // if awsErr, ok := err.(awserr.Error); ok { // // Generic AWS Error with Code, Message, and original error (if any) // fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) // // if reqErr, ok := err.(awserr.RequestFailure); ok { // // A service error occurred // fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) // } // } else { // fmt.Println(err.Error()) // } // } // type RequestFailure interface { Error // The status code of the HTTP response. StatusCode() int // The request ID returned by the service for a request failure. This will // be empty if no request ID is available such as the request failed due // to a connection error. RequestID() string } // NewRequestFailure returns a new request error wrapper for the given Error // provided. func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { return newRequestError(err, statusCode, reqID) } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/awserr/types.go000066400000000000000000000075211267010174400255330ustar00rootroot00000000000000package awserr import "fmt" // SprintError returns a string of the formatted error code. // // Both extra and origErr are optional. If they are included their lines // will be added, but if they are not included their lines will be ignored. func SprintError(code, message, extra string, origErr error) string { msg := fmt.Sprintf("%s: %s", code, message) if extra != "" { msg = fmt.Sprintf("%s\n\t%s", msg, extra) } if origErr != nil { msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) } return msg } // A baseError wraps the code and message which defines an error. It also // can be used to wrap an original error object. // // Should be used as the root for errors satisfying the awserr.Error. Also // for any error which does not fit into a specific error wrapper type. type baseError struct { // Classification of error code string // Detailed information about error message string // Optional original error this error is based off of. Allows building // chained errors. origErr error } // newBaseError returns an error object for the code, message, and err. // // code is a short no whitespace phrase depicting the classification of // the error that is being created. // // message is the free flow string containing detailed information about the error. // // origErr is the error object which will be nested under the new error to be returned. func newBaseError(code, message string, origErr error) *baseError { return &baseError{ code: code, message: message, origErr: origErr, } } // Error returns the string representation of the error. // // See ErrorWithExtra for formatting. // // Satisfies the error interface. func (b baseError) Error() string { return SprintError(b.code, b.message, "", b.origErr) } // String returns the string representation of the error. // Alias for Error to satisfy the stringer interface. func (b baseError) String() string { return b.Error() } // Code returns the short phrase depicting the classification of the error. func (b baseError) Code() string { return b.code } // Message returns the error details message. func (b baseError) Message() string { return b.message } // OrigErr returns the original error if one was set. Nil is returned if no error // was set. func (b baseError) OrigErr() error { return b.origErr } // So that the Error interface type can be included as an anonymous field // in the requestError struct and not conflict with the error.Error() method. type awsError Error // A requestError wraps a request or service error. // // Composed of baseError for code, message, and original error. type requestError struct { awsError statusCode int requestID string } // newRequestError returns a wrapped error with additional information for request // status code, and service requestID. // // Should be used to wrap all request which involve service requests. Even if // the request failed without a service response, but had an HTTP status code // that may be meaningful. // // Also wraps original errors via the baseError. func newRequestError(err Error, statusCode int, requestID string) *requestError { return &requestError{ awsError: err, statusCode: statusCode, requestID: requestID, } } // Error returns the string representation of the error. // Satisfies the error interface. func (r requestError) Error() string { extra := fmt.Sprintf("status code: %d, request id: %s", r.statusCode, r.requestID) return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) } // String returns the string representation of the error. // Alias for Error to satisfy the stringer interface. func (r requestError) String() string { return r.Error() } // StatusCode returns the wrapped status code for the error func (r requestError) StatusCode() int { return r.statusCode } // RequestID returns the wrapped requestID func (r requestError) RequestID() string { return r.requestID } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/000077500000000000000000000000001267010174400242205ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go000066400000000000000000000050271267010174400255250ustar00rootroot00000000000000package awsutil import ( "io" "reflect" ) // Copy deeply copies a src structure to dst. Useful for copying request and // response structures. // // Can copy between structs of different type, but will only copy fields which // are assignable, and exist in both structs. Fields which are not assignable, // or do not exist in both structs are ignored. func Copy(dst, src interface{}) { dstval := reflect.ValueOf(dst) if !dstval.IsValid() { panic("Copy dst cannot be nil") } rcopy(dstval, reflect.ValueOf(src), true) } // CopyOf returns a copy of src while also allocating the memory for dst. // src must be a pointer type or this operation will fail. func CopyOf(src interface{}) (dst interface{}) { dsti := reflect.New(reflect.TypeOf(src).Elem()) dst = dsti.Interface() rcopy(dsti, reflect.ValueOf(src), true) return } // rcopy performs a recursive copy of values from the source to destination. // // root is used to skip certain aspects of the copy which are not valid // for the root node of a object. func rcopy(dst, src reflect.Value, root bool) { if !src.IsValid() { return } switch src.Kind() { case reflect.Ptr: if _, ok := src.Interface().(io.Reader); ok { if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { dst.Elem().Set(src) } else if dst.CanSet() { dst.Set(src) } } else { e := src.Type().Elem() if dst.CanSet() && !src.IsNil() { dst.Set(reflect.New(e)) } if src.Elem().IsValid() { // Keep the current root state since the depth hasn't changed rcopy(dst.Elem(), src.Elem(), root) } } case reflect.Struct: if !root { dst.Set(reflect.New(src.Type()).Elem()) } t := dst.Type() for i := 0; i < t.NumField(); i++ { name := t.Field(i).Name srcval := src.FieldByName(name) if srcval.IsValid() { rcopy(dst.FieldByName(name), srcval, false) } } case reflect.Slice: if src.IsNil() { break } s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) dst.Set(s) for i := 0; i < src.Len(); i++ { rcopy(dst.Index(i), src.Index(i), false) } case reflect.Map: if src.IsNil() { break } s := reflect.MakeMap(src.Type()) dst.Set(s) for _, k := range src.MapKeys() { v := src.MapIndex(k) v2 := reflect.New(v.Type()).Elem() rcopy(v2, v, false) dst.SetMapIndex(k, v2) } default: // Assign the value if possible. If its not assignable, the value would // need to be converted and the impact of that may be unexpected, or is // not compatible with the dst type. if src.Type().AssignableTo(dst.Type()) { dst.Set(src) } } } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go000066400000000000000000000111231267010174400266750ustar00rootroot00000000000000package awsutil import ( "reflect" "regexp" "strconv" "strings" ) var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) // rValuesAtPath returns a slice of values found in value v. The values // in v are explored recursively so all nested values are collected. func rValuesAtPath(v interface{}, path string, create bool, caseSensitive bool) []reflect.Value { pathparts := strings.Split(path, "||") if len(pathparts) > 1 { for _, pathpart := range pathparts { vals := rValuesAtPath(v, pathpart, create, caseSensitive) if vals != nil && len(vals) > 0 { return vals } } return nil } values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} components := strings.Split(path, ".") for len(values) > 0 && len(components) > 0 { var index *int64 var indexStar bool c := strings.TrimSpace(components[0]) if c == "" { // no actual component, illegal syntax return nil } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { // TODO normalize case for user return nil // don't support unexported fields } // parse this component if m := indexRe.FindStringSubmatch(c); m != nil { c = m[1] if m[2] == "" { index = nil indexStar = true } else { i, _ := strconv.ParseInt(m[2], 10, 32) index = &i indexStar = false } } nextvals := []reflect.Value{} for _, value := range values { // pull component name out of struct member if value.Kind() != reflect.Struct { continue } if c == "*" { // pull all members for i := 0; i < value.NumField(); i++ { if f := reflect.Indirect(value.Field(i)); f.IsValid() { nextvals = append(nextvals, f) } } continue } value = value.FieldByNameFunc(func(name string) bool { if c == name { return true } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { return true } return false }) if create && value.Kind() == reflect.Ptr && value.IsNil() { value.Set(reflect.New(value.Type().Elem())) value = value.Elem() } else { value = reflect.Indirect(value) } if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { if !create && value.IsNil() { value = reflect.ValueOf(nil) } } if value.IsValid() { nextvals = append(nextvals, value) } } values = nextvals if indexStar || index != nil { nextvals = []reflect.Value{} for _, value := range values { value := reflect.Indirect(value) if value.Kind() != reflect.Slice { continue } if indexStar { // grab all indices for i := 0; i < value.Len(); i++ { idx := reflect.Indirect(value.Index(i)) if idx.IsValid() { nextvals = append(nextvals, idx) } } continue } // pull out index i := int(*index) if i >= value.Len() { // check out of bounds if create { // TODO resize slice } else { continue } } else if i < 0 { // support negative indexing i = value.Len() + i } value = reflect.Indirect(value.Index(i)) if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { if !create && value.IsNil() { value = reflect.ValueOf(nil) } } if value.IsValid() { nextvals = append(nextvals, value) } } values = nextvals } components = components[1:] } return values } // ValuesAtPath returns a list of objects at the lexical path inside of a structure func ValuesAtPath(i interface{}, path string) []interface{} { if rvals := rValuesAtPath(i, path, false, true); rvals != nil { vals := make([]interface{}, len(rvals)) for i, rval := range rvals { vals[i] = rval.Interface() } return vals } return nil } // ValuesAtAnyPath returns a list of objects at the case-insensitive lexical // path inside of a structure func ValuesAtAnyPath(i interface{}, path string) []interface{} { if rvals := rValuesAtPath(i, path, false, false); rvals != nil { vals := make([]interface{}, len(rvals)) for i, rval := range rvals { vals[i] = rval.Interface() } return vals } return nil } // SetValueAtPath sets an object at the lexical path inside of a structure func SetValueAtPath(i interface{}, path string, v interface{}) { if rvals := rValuesAtPath(i, path, true, true); rvals != nil { for _, rval := range rvals { rval.Set(reflect.ValueOf(v)) } } } // SetValueAtAnyPath sets an object at the case insensitive lexical path inside // of a structure func SetValueAtAnyPath(i interface{}, path string, v interface{}) { if rvals := rValuesAtPath(i, path, true, false); rvals != nil { for _, rval := range rvals { rval.Set(reflect.ValueOf(v)) } } } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go000066400000000000000000000044161267010174400264220ustar00rootroot00000000000000package awsutil import ( "bytes" "fmt" "io" "reflect" "strings" ) // Prettify returns the string representation of a value. func Prettify(i interface{}) string { var buf bytes.Buffer prettify(reflect.ValueOf(i), 0, &buf) return buf.String() } // prettify will recursively walk value v to build a textual // representation of the value. func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { for v.Kind() == reflect.Ptr { v = v.Elem() } switch v.Kind() { case reflect.Struct: strtype := v.Type().String() if strtype == "time.Time" { fmt.Fprintf(buf, "%s", v.Interface()) break } else if strings.HasPrefix(strtype, "io.") { buf.WriteString("") break } buf.WriteString("{\n") names := []string{} for i := 0; i < v.Type().NumField(); i++ { name := v.Type().Field(i).Name f := v.Field(i) if name[0:1] == strings.ToLower(name[0:1]) { continue // ignore unexported fields } if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { continue // ignore unset fields } names = append(names, name) } for i, n := range names { val := v.FieldByName(n) buf.WriteString(strings.Repeat(" ", indent+2)) buf.WriteString(n + ": ") prettify(val, indent+2, buf) if i < len(names)-1 { buf.WriteString(",\n") } } buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") case reflect.Slice: nl, id, id2 := "", "", "" if v.Len() > 3 { nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) } buf.WriteString("[" + nl) for i := 0; i < v.Len(); i++ { buf.WriteString(id2) prettify(v.Index(i), indent+2, buf) if i < v.Len()-1 { buf.WriteString("," + nl) } } buf.WriteString(nl + id + "]") case reflect.Map: buf.WriteString("{\n") for i, k := range v.MapKeys() { buf.WriteString(strings.Repeat(" ", indent+2)) buf.WriteString(k.String() + ": ") prettify(v.MapIndex(k), indent+2, buf) if i < v.Len()-1 { buf.WriteString(",\n") } } buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") default: format := "%v" switch v.Interface().(type) { case string: format = "%q" case io.ReadSeeker, io.Reader: format = "buffer(%p)" } fmt.Fprintf(buf, format, v.Interface()) } } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/config.go000066400000000000000000000151221267010174400243250ustar00rootroot00000000000000package aws import ( "net/http" "time" "github.com/aws/aws-sdk-go/aws/credentials" ) // The default number of retries for a service. The value of -1 indicates that // the service specific retry default will be used. const DefaultRetries = -1 // A Config provides service configuration for service clients. By default, // all clients will use the {defaults.DefaultConfig} structure. type Config struct { // The credentials object to use when signing requests. Defaults to // {defaults.DefaultChainCredentials}. Credentials *credentials.Credentials // An optional endpoint URL (hostname only or fully qualified URI) // that overrides the default generated endpoint for a client. Set this // to `""` to use the default generated endpoint. // // @note You must still provide a `Region` value when specifying an // endpoint for a client. Endpoint *string // The region to send requests to. This parameter is required and must // be configured globally or on a per-client basis unless otherwise // noted. A full list of regions is found in the "Regions and Endpoints" // document. // // @see http://docs.aws.amazon.com/general/latest/gr/rande.html // AWS Regions and Endpoints Region *string // Set this to `true` to disable SSL when sending requests. Defaults // to `false`. DisableSSL *bool // The HTTP client to use when sending requests. Defaults to // `http.DefaultClient`. HTTPClient *http.Client // An integer value representing the logging level. The default log level // is zero (LogOff), which represents no logging. To enable logging set // to a LogLevel Value. LogLevel *LogLevelType // The logger writer interface to write logging messages to. Defaults to // standard out. Logger Logger // The maximum number of times that a request will be retried for failures. // Defaults to -1, which defers the max retry setting to the service specific // configuration. MaxRetries *int // Disables semantic parameter validation, which validates input for missing // required fields and/or other semantic request input errors. DisableParamValidation *bool // Disables the computation of request and response checksums, e.g., // CRC32 checksums in Amazon DynamoDB. DisableComputeChecksums *bool // Set this to `true` to force the request to use path-style addressing, // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will // use virtual hosted bucket addressing when possible // (`http://BUCKET.s3.amazonaws.com/KEY`). // // @note This configuration option is specific to the Amazon S3 service. // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html // Amazon S3: Virtual Hosting of Buckets S3ForcePathStyle *bool SleepDelay func(time.Duration) } // NewConfig returns a new Config pointer that can be chained with builder methods to // set multiple configuration values inline without using pointers. // // svc := s3.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10)) // func NewConfig() *Config { return &Config{} } // WithCredentials sets a config Credentials value returning a Config pointer // for chaining. func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { c.Credentials = creds return c } // WithEndpoint sets a config Endpoint value returning a Config pointer for // chaining. func (c *Config) WithEndpoint(endpoint string) *Config { c.Endpoint = &endpoint return c } // WithRegion sets a config Region value returning a Config pointer for // chaining. func (c *Config) WithRegion(region string) *Config { c.Region = ®ion return c } // WithDisableSSL sets a config DisableSSL value returning a Config pointer // for chaining. func (c *Config) WithDisableSSL(disable bool) *Config { c.DisableSSL = &disable return c } // WithHTTPClient sets a config HTTPClient value returning a Config pointer // for chaining. func (c *Config) WithHTTPClient(client *http.Client) *Config { c.HTTPClient = client return c } // WithMaxRetries sets a config MaxRetries value returning a Config pointer // for chaining. func (c *Config) WithMaxRetries(max int) *Config { c.MaxRetries = &max return c } // WithDisableParamValidation sets a config DisableParamValidation value // returning a Config pointer for chaining. func (c *Config) WithDisableParamValidation(disable bool) *Config { c.DisableParamValidation = &disable return c } // WithDisableComputeChecksums sets a config DisableComputeChecksums value // returning a Config pointer for chaining. func (c *Config) WithDisableComputeChecksums(disable bool) *Config { c.DisableComputeChecksums = &disable return c } // WithLogLevel sets a config LogLevel value returning a Config pointer for // chaining. func (c *Config) WithLogLevel(level LogLevelType) *Config { c.LogLevel = &level return c } // WithLogger sets a config Logger value returning a Config pointer for // chaining. func (c *Config) WithLogger(logger Logger) *Config { c.Logger = logger return c } // WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config // pointer for chaining. func (c *Config) WithS3ForcePathStyle(force bool) *Config { c.S3ForcePathStyle = &force return c } // WithSleepDelay overrides the function used to sleep while waiting for the // next retry. Defaults to time.Sleep. func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { c.SleepDelay = fn return c } // Merge returns a new Config with the other Config's attribute values merged into // this Config. If the other Config's attribute is nil it will not be merged into // the new Config to be returned. func (c Config) Merge(other *Config) *Config { if other == nil { return &c } dst := c if other.Credentials != nil { dst.Credentials = other.Credentials } if other.Endpoint != nil { dst.Endpoint = other.Endpoint } if other.Region != nil { dst.Region = other.Region } if other.DisableSSL != nil { dst.DisableSSL = other.DisableSSL } if other.HTTPClient != nil { dst.HTTPClient = other.HTTPClient } if other.LogLevel != nil { dst.LogLevel = other.LogLevel } if other.Logger != nil { dst.Logger = other.Logger } if other.MaxRetries != nil { dst.MaxRetries = other.MaxRetries } if other.DisableParamValidation != nil { dst.DisableParamValidation = other.DisableParamValidation } if other.DisableComputeChecksums != nil { dst.DisableComputeChecksums = other.DisableComputeChecksums } if other.S3ForcePathStyle != nil { dst.S3ForcePathStyle = other.S3ForcePathStyle } if other.SleepDelay != nil { dst.SleepDelay = other.SleepDelay } return &dst } // Copy will return a shallow copy of the Config object. func (c Config) Copy() *Config { dst := c return &dst } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/convert_types.go000066400000000000000000000172111267010174400257650ustar00rootroot00000000000000package aws import "time" // String returns a pointer to of the string value passed in. func String(v string) *string { return &v } // StringValue returns the value of the string pointer passed in or // "" if the pointer is nil. func StringValue(v *string) string { if v != nil { return *v } return "" } // StringSlice converts a slice of string values into a slice of // string pointers func StringSlice(src []string) []*string { dst := make([]*string, len(src)) for i := 0; i < len(src); i++ { dst[i] = &(src[i]) } return dst } // StringValueSlice converts a slice of string pointers into a slice of // string values func StringValueSlice(src []*string) []string { dst := make([]string, len(src)) for i := 0; i < len(src); i++ { if src[i] != nil { dst[i] = *(src[i]) } } return dst } // StringMap converts a string map of string values into a string // map of string pointers func StringMap(src map[string]string) map[string]*string { dst := make(map[string]*string) for k, val := range src { v := val dst[k] = &v } return dst } // StringValueMap converts a string map of string pointers into a string // map of string values func StringValueMap(src map[string]*string) map[string]string { dst := make(map[string]string) for k, val := range src { if val != nil { dst[k] = *val } } return dst } // Bool returns a pointer to of the bool value passed in. func Bool(v bool) *bool { return &v } // BoolValue returns the value of the bool pointer passed in or // false if the pointer is nil. func BoolValue(v *bool) bool { if v != nil { return *v } return false } // BoolSlice converts a slice of bool values into a slice of // bool pointers func BoolSlice(src []bool) []*bool { dst := make([]*bool, len(src)) for i := 0; i < len(src); i++ { dst[i] = &(src[i]) } return dst } // BoolValueSlice converts a slice of bool pointers into a slice of // bool values func BoolValueSlice(src []*bool) []bool { dst := make([]bool, len(src)) for i := 0; i < len(src); i++ { if src[i] != nil { dst[i] = *(src[i]) } } return dst } // BoolMap converts a string map of bool values into a string // map of bool pointers func BoolMap(src map[string]bool) map[string]*bool { dst := make(map[string]*bool) for k, val := range src { v := val dst[k] = &v } return dst } // BoolValueMap converts a string map of bool pointers into a string // map of bool values func BoolValueMap(src map[string]*bool) map[string]bool { dst := make(map[string]bool) for k, val := range src { if val != nil { dst[k] = *val } } return dst } // Int returns a pointer to of the int value passed in. func Int(v int) *int { return &v } // IntValue returns the value of the int pointer passed in or // 0 if the pointer is nil. func IntValue(v *int) int { if v != nil { return *v } return 0 } // IntSlice converts a slice of int values into a slice of // int pointers func IntSlice(src []int) []*int { dst := make([]*int, len(src)) for i := 0; i < len(src); i++ { dst[i] = &(src[i]) } return dst } // IntValueSlice converts a slice of int pointers into a slice of // int values func IntValueSlice(src []*int) []int { dst := make([]int, len(src)) for i := 0; i < len(src); i++ { if src[i] != nil { dst[i] = *(src[i]) } } return dst } // IntMap converts a string map of int values into a string // map of int pointers func IntMap(src map[string]int) map[string]*int { dst := make(map[string]*int) for k, val := range src { v := val dst[k] = &v } return dst } // IntValueMap converts a string map of int pointers into a string // map of int values func IntValueMap(src map[string]*int) map[string]int { dst := make(map[string]int) for k, val := range src { if val != nil { dst[k] = *val } } return dst } // Int64 returns a pointer to of the int64 value passed in. func Int64(v int64) *int64 { return &v } // Int64Value returns the value of the int64 pointer passed in or // 0 if the pointer is nil. func Int64Value(v *int64) int64 { if v != nil { return *v } return 0 } // Int64Slice converts a slice of int64 values into a slice of // int64 pointers func Int64Slice(src []int64) []*int64 { dst := make([]*int64, len(src)) for i := 0; i < len(src); i++ { dst[i] = &(src[i]) } return dst } // Int64ValueSlice converts a slice of int64 pointers into a slice of // int64 values func Int64ValueSlice(src []*int64) []int64 { dst := make([]int64, len(src)) for i := 0; i < len(src); i++ { if src[i] != nil { dst[i] = *(src[i]) } } return dst } // Int64Map converts a string map of int64 values into a string // map of int64 pointers func Int64Map(src map[string]int64) map[string]*int64 { dst := make(map[string]*int64) for k, val := range src { v := val dst[k] = &v } return dst } // Int64ValueMap converts a string map of int64 pointers into a string // map of int64 values func Int64ValueMap(src map[string]*int64) map[string]int64 { dst := make(map[string]int64) for k, val := range src { if val != nil { dst[k] = *val } } return dst } // Float64 returns a pointer to of the float64 value passed in. func Float64(v float64) *float64 { return &v } // Float64Value returns the value of the float64 pointer passed in or // 0 if the pointer is nil. func Float64Value(v *float64) float64 { if v != nil { return *v } return 0 } // Float64Slice converts a slice of float64 values into a slice of // float64 pointers func Float64Slice(src []float64) []*float64 { dst := make([]*float64, len(src)) for i := 0; i < len(src); i++ { dst[i] = &(src[i]) } return dst } // Float64ValueSlice converts a slice of float64 pointers into a slice of // float64 values func Float64ValueSlice(src []*float64) []float64 { dst := make([]float64, len(src)) for i := 0; i < len(src); i++ { if src[i] != nil { dst[i] = *(src[i]) } } return dst } // Float64Map converts a string map of float64 values into a string // map of float64 pointers func Float64Map(src map[string]float64) map[string]*float64 { dst := make(map[string]*float64) for k, val := range src { v := val dst[k] = &v } return dst } // Float64ValueMap converts a string map of float64 pointers into a string // map of float64 values func Float64ValueMap(src map[string]*float64) map[string]float64 { dst := make(map[string]float64) for k, val := range src { if val != nil { dst[k] = *val } } return dst } // Time returns a pointer to of the time.Time value passed in. func Time(v time.Time) *time.Time { return &v } // TimeValue returns the value of the time.Time pointer passed in or // time.Time{} if the pointer is nil. func TimeValue(v *time.Time) time.Time { if v != nil { return *v } return time.Time{} } // TimeSlice converts a slice of time.Time values into a slice of // time.Time pointers func TimeSlice(src []time.Time) []*time.Time { dst := make([]*time.Time, len(src)) for i := 0; i < len(src); i++ { dst[i] = &(src[i]) } return dst } // TimeValueSlice converts a slice of time.Time pointers into a slice of // time.Time values func TimeValueSlice(src []*time.Time) []time.Time { dst := make([]time.Time, len(src)) for i := 0; i < len(src); i++ { if src[i] != nil { dst[i] = *(src[i]) } } return dst } // TimeMap converts a string map of time.Time values into a string // map of time.Time pointers func TimeMap(src map[string]time.Time) map[string]*time.Time { dst := make(map[string]*time.Time) for k, val := range src { v := val dst[k] = &v } return dst } // TimeValueMap converts a string map of time.Time pointers into a string // map of time.Time values func TimeValueMap(src map[string]*time.Time) map[string]time.Time { dst := make(map[string]time.Time) for k, val := range src { if val != nil { dst[k] = *val } } return dst } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/000077500000000000000000000000001267010174400252015ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go000066400000000000000000000111041267010174400273250ustar00rootroot00000000000000package corehandlers import ( "bytes" "fmt" "io" "io/ioutil" "net/http" "net/url" "regexp" "strconv" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" ) // Interface for matching types which also have a Len method. type lener interface { Len() int } // BuildContentLength builds the content length of a request based on the body, // or will use the HTTPRequest.Header's "Content-Length" if defined. If unable // to determine request body length and no "Content-Length" was specified it will panic. var BuildContentLengthHandler = request.NamedHandler{"core.BuildContentLengthHandler", func(r *request.Request) { if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { length, _ := strconv.ParseInt(slength, 10, 64) r.HTTPRequest.ContentLength = length return } var length int64 switch body := r.Body.(type) { case nil: length = 0 case lener: length = int64(body.Len()) case io.Seeker: r.BodyStart, _ = body.Seek(0, 1) end, _ := body.Seek(0, 2) body.Seek(r.BodyStart, 0) // make sure to seek back to original location length = end - r.BodyStart default: panic("Cannot get length of body, must provide `ContentLength`") } r.HTTPRequest.ContentLength = length r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) }} // UserAgentHandler is a request handler for injecting User agent into requests. var UserAgentHandler = request.NamedHandler{"core.UserAgentHandler", func(r *request.Request) { r.HTTPRequest.Header.Set("User-Agent", aws.SDKName+"/"+aws.SDKVersion) }} var reStatusCode = regexp.MustCompile(`^(\d{3})`) // SendHandler is a request handler to send service request using HTTP client. var SendHandler = request.NamedHandler{"core.SendHandler", func(r *request.Request) { var err error r.HTTPResponse, err = r.Service.Config.HTTPClient.Do(r.HTTPRequest) if err != nil { // Capture the case where url.Error is returned for error processing // response. e.g. 301 without location header comes back as string // error and r.HTTPResponse is nil. Other url redirect errors will // comeback in a similar method. if e, ok := err.(*url.Error); ok && e.Err != nil { if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { code, _ := strconv.ParseInt(s[1], 10, 64) r.HTTPResponse = &http.Response{ StatusCode: int(code), Status: http.StatusText(int(code)), Body: ioutil.NopCloser(bytes.NewReader([]byte{})), } return } } if r.HTTPResponse == nil { // Add a dummy request response object to ensure the HTTPResponse // value is consistent. r.HTTPResponse = &http.Response{ StatusCode: int(0), Status: http.StatusText(int(0)), Body: ioutil.NopCloser(bytes.NewReader([]byte{})), } } // Catch all other request errors. r.Error = awserr.New("RequestError", "send request failed", err) r.Retryable = aws.Bool(true) // network errors are retryable } }} // ValidateResponseHandler is a request handler to validate service response. var ValidateResponseHandler = request.NamedHandler{"core.ValidateResponseHandler", func(r *request.Request) { if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { // this may be replaced by an UnmarshalError handler r.Error = awserr.New("UnknownError", "unknown error", nil) } }} // AfterRetryHandler performs final checks to determine if the request should // be retried and how long to delay. var AfterRetryHandler = request.NamedHandler{"core.AfterRetryHandler", func(r *request.Request) { // If one of the other handlers already set the retry state // we don't want to override it based on the service's state if r.Retryable == nil { r.Retryable = aws.Bool(r.ShouldRetry(r)) } if r.WillRetry() { r.RetryDelay = r.RetryRules(r) r.Service.Config.SleepDelay(r.RetryDelay) // when the expired token exception occurs the credentials // need to be expired locally so that the next request to // get credentials will trigger a credentials refresh. if r.IsErrorExpired() { r.Service.Config.Credentials.Expire() } r.RetryCount++ r.Error = nil } }} // ValidateEndpointHandler is a request handler to validate a request had the // appropriate Region and Endpoint set. Will set r.Error if the endpoint or // region is not valid. var ValidateEndpointHandler = request.NamedHandler{"core.ValidateEndpointHandler", func(r *request.Request) { if r.Service.SigningRegion == "" && aws.StringValue(r.Service.Config.Region) == "" { r.Error = aws.ErrMissingRegion } else if r.Service.Endpoint == "" { r.Error = aws.ErrMissingEndpoint } }} docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go000066400000000000000000000071651267010174400307060ustar00rootroot00000000000000package corehandlers import ( "fmt" "reflect" "strconv" "strings" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" ) // ValidateParameters is a request handler to validate the input parameters. // Validating parameters only has meaning if done prior to the request being sent. var ValidateParametersHandler = request.NamedHandler{"core.ValidateParametersHandler", func(r *request.Request) { if r.ParamsFilled() { v := validator{errors: []string{}} v.validateAny(reflect.ValueOf(r.Params), "") if count := len(v.errors); count > 0 { format := "%d validation errors:\n- %s" msg := fmt.Sprintf(format, count, strings.Join(v.errors, "\n- ")) r.Error = awserr.New("InvalidParameter", msg, nil) } } }} // A validator validates values. Collects validations errors which occurs. type validator struct { errors []string } // validateAny will validate any struct, slice or map type. All validations // are also performed recursively for nested types. func (v *validator) validateAny(value reflect.Value, path string) { value = reflect.Indirect(value) if !value.IsValid() { return } switch value.Kind() { case reflect.Struct: v.validateStruct(value, path) case reflect.Slice: for i := 0; i < value.Len(); i++ { v.validateAny(value.Index(i), path+fmt.Sprintf("[%d]", i)) } case reflect.Map: for _, n := range value.MapKeys() { v.validateAny(value.MapIndex(n), path+fmt.Sprintf("[%q]", n.String())) } } } // validateStruct will validate the struct value's fields. If the structure has // nested types those types will be validated also. func (v *validator) validateStruct(value reflect.Value, path string) { prefix := "." if path == "" { prefix = "" } for i := 0; i < value.Type().NumField(); i++ { f := value.Type().Field(i) if strings.ToLower(f.Name[0:1]) == f.Name[0:1] { continue } fvalue := value.FieldByName(f.Name) err := validateField(f, fvalue, validateFieldRequired, validateFieldMin) if err != nil { v.errors = append(v.errors, fmt.Sprintf("%s: %s", err.Error(), path+prefix+f.Name)) continue } v.validateAny(fvalue, path+prefix+f.Name) } } type validatorFunc func(f reflect.StructField, fvalue reflect.Value) error func validateField(f reflect.StructField, fvalue reflect.Value, funcs ...validatorFunc) error { for _, fn := range funcs { if err := fn(f, fvalue); err != nil { return err } } return nil } // Validates that a field has a valid value provided for required fields. func validateFieldRequired(f reflect.StructField, fvalue reflect.Value) error { if f.Tag.Get("required") == "" { return nil } switch fvalue.Kind() { case reflect.Ptr, reflect.Slice, reflect.Map: if fvalue.IsNil() { return fmt.Errorf("missing required parameter") } default: if !fvalue.IsValid() { return fmt.Errorf("missing required parameter") } } return nil } // Validates that if a value is provided for a field, that value must be at // least a minimum length. func validateFieldMin(f reflect.StructField, fvalue reflect.Value) error { minStr := f.Tag.Get("min") if minStr == "" { return nil } min, _ := strconv.ParseInt(minStr, 10, 64) kind := fvalue.Kind() if kind == reflect.Ptr { if fvalue.IsNil() { return nil } fvalue = fvalue.Elem() } switch fvalue.Kind() { case reflect.String: if int64(fvalue.Len()) < min { return fmt.Errorf("field too short, minimum length %d", min) } case reflect.Slice, reflect.Map: if fvalue.IsNil() { return nil } if int64(fvalue.Len()) < min { return fmt.Errorf("field too short, minimum length %d", min) } // TODO min can also apply to number minimum value. } return nil } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/000077500000000000000000000000001267010174400250255ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go000066400000000000000000000053231267010174400303530ustar00rootroot00000000000000package credentials import ( "github.com/aws/aws-sdk-go/aws/awserr" ) var ( // ErrNoValidProvidersFoundInChain Is returned when there are no valid // providers in the ChainProvider. // // @readonly ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", "no valid providers in chain", nil) ) // A ChainProvider will search for a provider which returns credentials // and cache that provider until Retrieve is called again. // // The ChainProvider provides a way of chaining multiple providers together // which will pick the first available using priority order of the Providers // in the list. // // If none of the Providers retrieve valid credentials Value, ChainProvider's // Retrieve() will return the error ErrNoValidProvidersFoundInChain. // // If a Provider is found which returns valid credentials Value ChainProvider // will cache that Provider for all calls to IsExpired(), until Retrieve is // called again. // // Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. // In this example EnvProvider will first check if any credentials are available // vai the environment variables. If there are none ChainProvider will check // the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider // does not return any credentials ChainProvider will return the error // ErrNoValidProvidersFoundInChain // // creds := NewChainCredentials( // []Provider{ // &EnvProvider{}, // &EC2RoleProvider{}, // }) // // // Usage of ChainCredentials with aws.Config // svc := ec2.New(&aws.Config{Credentials: creds}) // type ChainProvider struct { Providers []Provider curr Provider } // NewChainCredentials returns a pointer to a new Credentials object // wrapping a chain of providers. func NewChainCredentials(providers []Provider) *Credentials { return NewCredentials(&ChainProvider{ Providers: append([]Provider{}, providers...), }) } // Retrieve returns the credentials value or error if no provider returned // without error. // // If a provider is found it will be cached and any calls to IsExpired() // will return the expired state of the cached provider. func (c *ChainProvider) Retrieve() (Value, error) { for _, p := range c.Providers { if creds, err := p.Retrieve(); err == nil { c.curr = p return creds, nil } } c.curr = nil // TODO better error reporting. maybe report error for each failed retrieve? return Value{}, ErrNoValidProvidersFoundInChain } // IsExpired will returned the expired state of the currently cached provider // if there is one. If there is no current provider, true will be returned. func (c *ChainProvider) IsExpired() bool { if c.curr != nil { return c.curr.IsExpired() } return true } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go000066400000000000000000000154501267010174400276560ustar00rootroot00000000000000// Package credentials provides credential retrieval and management // // The Credentials is the primary method of getting access to and managing // credentials Values. Using dependency injection retrieval of the credential // values is handled by a object which satisfies the Provider interface. // // By default the Credentials.Get() will cache the successful result of a // Provider's Retrieve() until Provider.IsExpired() returns true. At which // point Credentials will call Provider's Retrieve() to get new credential Value. // // The Provider is responsible for determining when credentials Value have expired. // It is also important to note that Credentials will always call Retrieve the // first time Credentials.Get() is called. // // Example of using the environment variable credentials. // // creds := NewEnvCredentials() // // // Retrieve the credentials value // credValue, err := creds.Get() // if err != nil { // // handle error // } // // Example of forcing credentials to expire and be refreshed on the next Get(). // This may be helpful to proactively expire credentials and refresh them sooner // than they would naturally expire on their own. // // creds := NewCredentials(&EC2RoleProvider{}) // creds.Expire() // credsValue, err := creds.Get() // // New credentials will be retrieved instead of from cache. // // // Custom Provider // // Each Provider built into this package also provides a helper method to generate // a Credentials pointer setup with the provider. To use a custom Provider just // create a type which satisfies the Provider interface and pass it to the // NewCredentials method. // // type MyProvider struct{} // func (m *MyProvider) Retrieve() (Value, error) {...} // func (m *MyProvider) IsExpired() bool {...} // // creds := NewCredentials(&MyProvider{}) // credValue, err := creds.Get() // package credentials import ( "sync" "time" ) // Create an empty Credential object that can be used as dummy placeholder // credentials for requests that do not need signed. // // This Credentials can be used to configure a service to not sign requests // when making service API calls. For example, when accessing public // s3 buckets. // // svc := s3.New(&aws.Config{Credentials: AnonymousCredentials}) // // Access public S3 buckets. // // @readonly var AnonymousCredentials = NewStaticCredentials("", "", "") // A Value is the AWS credentials value for individual credential fields. type Value struct { // AWS Access key ID AccessKeyID string // AWS Secret Access Key SecretAccessKey string // AWS Session Token SessionToken string } // A Provider is the interface for any component which will provide credentials // Value. A provider is required to manage its own Expired state, and what to // be expired means. // // The Provider should not need to implement its own mutexes, because // that will be managed by Credentials. type Provider interface { // Refresh returns nil if it successfully retrieved the value. // Error is returned if the value were not obtainable, or empty. Retrieve() (Value, error) // IsExpired returns if the credentials are no longer valid, and need // to be retrieved. IsExpired() bool } // A Expiry provides shared expiration logic to be used by credentials // providers to implement expiry functionality. // // The best method to use this struct is as an anonymous field within the // provider's struct. // // Example: // type EC2RoleProvider struct { // Expiry // ... // } type Expiry struct { // The date/time when to expire on expiration time.Time // If set will be used by IsExpired to determine the current time. // Defaults to time.Now if CurrentTime is not set. Available for testing // to be able to mock out the current time. CurrentTime func() time.Time } // SetExpiration sets the expiration IsExpired will check when called. // // If window is greater than 0 the expiration time will be reduced by the // window value. // // Using a window is helpful to trigger credentials to expire sooner than // the expiration time given to ensure no requests are made with expired // tokens. func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { e.expiration = expiration if window > 0 { e.expiration = e.expiration.Add(-window) } } // IsExpired returns if the credentials are expired. func (e *Expiry) IsExpired() bool { if e.CurrentTime == nil { e.CurrentTime = time.Now } return e.expiration.Before(e.CurrentTime()) } // A Credentials provides synchronous safe retrieval of AWS credentials Value. // Credentials will cache the credentials value until they expire. Once the value // expires the next Get will attempt to retrieve valid credentials. // // Credentials is safe to use across multiple goroutines and will manage the // synchronous state so the Providers do not need to implement their own // synchronization. // // The first Credentials.Get() will always call Provider.Retrieve() to get the // first instance of the credentials Value. All calls to Get() after that // will return the cached credentials Value until IsExpired() returns true. type Credentials struct { creds Value forceRefresh bool m sync.Mutex provider Provider } // NewCredentials returns a pointer to a new Credentials with the provider set. func NewCredentials(provider Provider) *Credentials { return &Credentials{ provider: provider, forceRefresh: true, } } // Get returns the credentials value, or error if the credentials Value failed // to be retrieved. // // Will return the cached credentials Value if it has not expired. If the // credentials Value has expired the Provider's Retrieve() will be called // to refresh the credentials. // // If Credentials.Expire() was called the credentials Value will be force // expired, and the next call to Get() will cause them to be refreshed. func (c *Credentials) Get() (Value, error) { c.m.Lock() defer c.m.Unlock() if c.isExpired() { creds, err := c.provider.Retrieve() if err != nil { return Value{}, err } c.creds = creds c.forceRefresh = false } return c.creds, nil } // Expire expires the credentials and forces them to be retrieved on the // next call to Get(). // // This will override the Provider's expired state, and force Credentials // to call the Provider's Retrieve(). func (c *Credentials) Expire() { c.m.Lock() defer c.m.Unlock() c.forceRefresh = true } // IsExpired returns if the credentials are no longer valid, and need // to be retrieved. // // If the Credentials were forced to be expired with Expire() this will // reflect that override. func (c *Credentials) IsExpired() bool { c.m.Lock() defer c.m.Unlock() return c.isExpired() } // isExpired helper method wrapping the definition of expired credentials. func (c *Credentials) isExpired() bool { return c.forceRefresh || c.provider.IsExpired() } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/000077500000000000000000000000001267010174400274015ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go000066400000000000000000000122771267010174400333450ustar00rootroot00000000000000package ec2rolecreds import ( "bufio" "encoding/json" "fmt" "path" "strings" "time" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/ec2metadata" ) // A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if // those credentials are expired. // // Example how to configure the EC2RoleProvider with custom http Client, Endpoint // or ExpiryWindow // // p := &ec2rolecreds.EC2RoleProvider{ // // Pass in a custom timeout to be used when requesting // // IAM EC2 Role credentials. // Client: &http.Client{ // Timeout: 10 * time.Second, // }, // // Use default EC2 Role metadata endpoint, Alternate endpoints can be // // specified setting Endpoint to something else. // Endpoint: "", // // Do not use early expiry of credentials. If a non zero value is // // specified the credentials will be expired early // ExpiryWindow: 0, // } type EC2RoleProvider struct { credentials.Expiry // EC2Metadata client to use when connecting to EC2 metadata service Client *ec2metadata.Client // ExpiryWindow will allow the credentials to trigger refreshing prior to // the credentials actually expiring. This is beneficial so race conditions // with expiring credentials do not cause request to fail unexpectedly // due to ExpiredTokenException exceptions. // // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true // 10 seconds before the credentials are actually expired. // // If ExpiryWindow is 0 or less it will be ignored. ExpiryWindow time.Duration } // NewCredentials returns a pointer to a new Credentials object // wrapping the EC2RoleProvider. // // Takes a custom http.Client which can be configured for custom handling of // things such as timeout. // // Endpoint is the URL that the EC2RoleProvider will connect to when retrieving // role and credentials. // // Window is the expiry window that will be subtracted from the expiry returned // by the role credential request. This is done so that the credentials will // expire sooner than their actual lifespan. func NewCredentials(client *ec2metadata.Client, window time.Duration) *credentials.Credentials { return credentials.NewCredentials(&EC2RoleProvider{ Client: client, ExpiryWindow: window, }) } // Retrieve retrieves credentials from the EC2 service. // Error will be returned if the request fails, or unable to extract // the desired credentials. func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { if m.Client == nil { m.Client = ec2metadata.New(nil) } credsList, err := requestCredList(m.Client) if err != nil { return credentials.Value{}, err } if len(credsList) == 0 { return credentials.Value{}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) } credsName := credsList[0] roleCreds, err := requestCred(m.Client, credsName) if err != nil { return credentials.Value{}, err } m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) return credentials.Value{ AccessKeyID: roleCreds.AccessKeyID, SecretAccessKey: roleCreds.SecretAccessKey, SessionToken: roleCreds.Token, }, nil } // A ec2RoleCredRespBody provides the shape for deserializing credential // request responses. type ec2RoleCredRespBody struct { // Success State Expiration time.Time AccessKeyID string SecretAccessKey string Token string // Error state Code string Message string } const iamSecurityCredsPath = "/iam/security-credentials" // requestCredList requests a list of credentials from the EC2 service. // If there are no credentials, or there is an error making or receiving the request func requestCredList(client *ec2metadata.Client) ([]string, error) { resp, err := client.GetMetadata(iamSecurityCredsPath) if err != nil { return nil, awserr.New("EC2RoleRequestError", "failed to list EC2 Roles", err) } credsList := []string{} s := bufio.NewScanner(strings.NewReader(resp)) for s.Scan() { credsList = append(credsList, s.Text()) } if err := s.Err(); err != nil { return nil, awserr.New("SerializationError", "failed to read list of EC2 Roles", err) } return credsList, nil } // requestCred requests the credentials for a specific credentials from the EC2 service. // // If the credentials cannot be found, or there is an error reading the response // and error will be returned. func requestCred(client *ec2metadata.Client, credsName string) (ec2RoleCredRespBody, error) { resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName)) if err != nil { return ec2RoleCredRespBody{}, awserr.New("EC2RoleRequestError", fmt.Sprintf("failed to get %s EC2 Role credentials", credsName), err) } respCreds := ec2RoleCredRespBody{} if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { return ec2RoleCredRespBody{}, awserr.New("SerializationError", fmt.Sprintf("failed to decode %s EC2 Role credentials", credsName), err) } if respCreds.Code != "Success" { // If an error code was returned something failed requesting the role. return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) } return respCreds, nil } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go000066400000000000000000000035431267010174400300630ustar00rootroot00000000000000package credentials import ( "os" "github.com/aws/aws-sdk-go/aws/awserr" ) var ( // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be // found in the process's environment. // // @readonly ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key // can't be found in the process's environment. // // @readonly ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) ) // A EnvProvider retrieves credentials from the environment variables of the // running process. Environment credentials never expire. // // Environment variables used: // // * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY // * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY type EnvProvider struct { retrieved bool } // NewEnvCredentials returns a pointer to a new Credentials object // wrapping the environment variable provider. func NewEnvCredentials() *Credentials { return NewCredentials(&EnvProvider{}) } // Retrieve retrieves the keys from the environment. func (e *EnvProvider) Retrieve() (Value, error) { e.retrieved = false id := os.Getenv("AWS_ACCESS_KEY_ID") if id == "" { id = os.Getenv("AWS_ACCESS_KEY") } secret := os.Getenv("AWS_SECRET_ACCESS_KEY") if secret == "" { secret = os.Getenv("AWS_SECRET_KEY") } if id == "" { return Value{}, ErrAccessKeyIDNotFound } if secret == "" { return Value{}, ErrSecretAccessKeyNotFound } e.retrieved = true return Value{ AccessKeyID: id, SecretAccessKey: secret, SessionToken: os.Getenv("AWS_SESSION_TOKEN"), }, nil } // IsExpired returns if the credentials have been retrieved. func (e *EnvProvider) IsExpired() bool { return !e.retrieved } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini000066400000000000000000000002521267010174400271600ustar00rootroot00000000000000[default] aws_access_key_id = accessKey aws_secret_access_key = secret aws_session_token = token [no_token] aws_access_key_id = accessKey aws_secret_access_key = secret docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go000066400000000000000000000077771267010174400331330ustar00rootroot00000000000000package credentials import ( "fmt" "os" "path/filepath" "github.com/vaughan0/go-ini" "github.com/aws/aws-sdk-go/aws/awserr" ) var ( // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. // // @readonly ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) ) // A SharedCredentialsProvider retrieves credentials from the current user's home // directory, and keeps track if those credentials are expired. // // Profile ini file example: $HOME/.aws/credentials type SharedCredentialsProvider struct { // Path to the shared credentials file. // // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the // env value is empty will default to current user's home directory. // Linux/OSX: "$HOME/.aws/credentials" // Windows: "%USERPROFILE%\.aws\credentials" Filename string // AWS Profile to extract credentials from the shared credentials file. If empty // will default to environment variable "AWS_PROFILE" or "default" if // environment variable is also not set. Profile string // retrieved states if the credentials have been successfully retrieved. retrieved bool } // NewSharedCredentials returns a pointer to a new Credentials object // wrapping the Profile file provider. func NewSharedCredentials(filename, profile string) *Credentials { return NewCredentials(&SharedCredentialsProvider{ Filename: filename, Profile: profile, }) } // Retrieve reads and extracts the shared credentials from the current // users home directory. func (p *SharedCredentialsProvider) Retrieve() (Value, error) { p.retrieved = false filename, err := p.filename() if err != nil { return Value{}, err } creds, err := loadProfile(filename, p.profile()) if err != nil { return Value{}, err } p.retrieved = true return creds, nil } // IsExpired returns if the shared credentials have expired. func (p *SharedCredentialsProvider) IsExpired() bool { return !p.retrieved } // loadProfiles loads from the file pointed to by shared credentials filename for profile. // The credentials retrieved from the profile will be returned or error. Error will be // returned if it fails to read from the file, or the data is invalid. func loadProfile(filename, profile string) (Value, error) { config, err := ini.LoadFile(filename) if err != nil { return Value{}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) } iniProfile := config.Section(profile) id, ok := iniProfile["aws_access_key_id"] if !ok { return Value{}, awserr.New("SharedCredsAccessKey", fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), nil) } secret, ok := iniProfile["aws_secret_access_key"] if !ok { return Value{}, awserr.New("SharedCredsSecret", fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), nil) } token := iniProfile["aws_session_token"] return Value{ AccessKeyID: id, SecretAccessKey: secret, SessionToken: token, }, nil } // filename returns the filename to use to read AWS shared credentials. // // Will return an error if the user's home directory path cannot be found. func (p *SharedCredentialsProvider) filename() (string, error) { if p.Filename == "" { if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" { return p.Filename, nil } homeDir := os.Getenv("HOME") // *nix if homeDir == "" { // Windows homeDir = os.Getenv("USERPROFILE") } if homeDir == "" { return "", ErrSharedCredentialsHomeNotFound } p.Filename = filepath.Join(homeDir, ".aws", "credentials") } return p.Filename, nil } // profile returns the AWS shared credentials profile. If empty will read // environment variable "AWS_PROFILE". If that is not set profile will // return "default". func (p *SharedCredentialsProvider) profile() string { if p.Profile == "" { p.Profile = os.Getenv("AWS_PROFILE") } if p.Profile == "" { p.Profile = "default" } return p.Profile } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go000066400000000000000000000022071267010174400305560ustar00rootroot00000000000000package credentials import ( "github.com/aws/aws-sdk-go/aws/awserr" ) var ( // ErrStaticCredentialsEmpty is emitted when static credentials are empty. // // @readonly ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) ) // A StaticProvider is a set of credentials which are set pragmatically, // and will never expire. type StaticProvider struct { Value } // NewStaticCredentials returns a pointer to a new Credentials object // wrapping a static credentials value provider. func NewStaticCredentials(id, secret, token string) *Credentials { return NewCredentials(&StaticProvider{Value: Value{ AccessKeyID: id, SecretAccessKey: secret, SessionToken: token, }}) } // Retrieve returns the credentials or error if the credentials are invalid. func (s *StaticProvider) Retrieve() (Value, error) { if s.AccessKeyID == "" || s.SecretAccessKey == "" { return Value{}, ErrStaticCredentialsEmpty } return s.Value, nil } // IsExpired returns if the credentials are expired. // // For StaticProvider, the credentials never expired. func (s *StaticProvider) IsExpired() bool { return false } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/defaults/000077500000000000000000000000001267010174400243375ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go000066400000000000000000000026311267010174400264770ustar00rootroot00000000000000package defaults import ( "net/http" "os" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" ) // DefaultChainCredentials is a Credentials which will find the first available // credentials Value from the list of Providers. // // This should be used in the default case. Once the type of credentials are // known switching to the specific Credentials will be more efficient. var DefaultChainCredentials = credentials.NewChainCredentials( []credentials.Provider{ &credentials.EnvProvider{}, &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, &ec2rolecreds.EC2RoleProvider{ExpiryWindow: 5 * time.Minute}, }) // DefaultConfig is the default all service configuration will be based off of. // By default, all clients use this structure for initialization options unless // a custom configuration object is passed in. // // You may modify this global structure to change all default configuration // in the SDK. Note that configuration options are copied by value, so any // modifications must happen before constructing a client. var DefaultConfig = aws.NewConfig(). WithCredentials(DefaultChainCredentials). WithRegion(os.Getenv("AWS_REGION")). WithHTTPClient(http.DefaultClient). WithMaxRetries(aws.DefaultRetries). WithLogger(aws.NewDefaultLogger()). WithLogLevel(aws.LogOff). WithSleepDelay(time.Sleep) docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/000077500000000000000000000000001267010174400247025ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go000066400000000000000000000021321267010174400260000ustar00rootroot00000000000000package ec2metadata import ( "path" "github.com/aws/aws-sdk-go/aws/request" ) // GetMetadata uses the path provided to request func (c *Client) GetMetadata(p string) (string, error) { op := &request.Operation{ Name: "GetMetadata", HTTPMethod: "GET", HTTPPath: path.Join("/", "meta-data", p), } output := &metadataOutput{} req := request.New(c.Service.ServiceInfo, c.Service.Handlers, c.Service.Retryer, op, nil, output) return output.Content, req.Send() } // Region returns the region the instance is running in. func (c *Client) Region() (string, error) { resp, err := c.GetMetadata("placement/availability-zone") if err != nil { return "", err } // returns region without the suffix. Eg: us-west-2a becomes us-west-2 return resp[:len(resp)-1], nil } // Available returns if the application has access to the EC2 Metadata service. // Can be used to determine if application is running within an EC2 Instance and // the metadata service is available. func (c *Client) Available() bool { if _, err := c.GetMetadata("instance-id"); err != nil { return false } return true } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go000066400000000000000000000074311267010174400266760ustar00rootroot00000000000000package ec2metadata import ( "io/ioutil" "net/http" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/service" "github.com/aws/aws-sdk-go/aws/service/serviceinfo" ) // DefaultRetries states the default number of times the service client will // attempt to retry a failed request before failing. const DefaultRetries = 3 // A Config provides the configuration for the EC2 Metadata service. type Config struct { // An optional endpoint URL (hostname only or fully qualified URI) // that overrides the default service endpoint for a client. Set this // to nil, or `""` to use the default service endpoint. Endpoint *string // The HTTP client to use when sending requests. Defaults to // `http.DefaultClient`. HTTPClient *http.Client // An integer value representing the logging level. The default log level // is zero (LogOff), which represents no logging. To enable logging set // to a LogLevel Value. Logger aws.Logger // The logger writer interface to write logging messages to. Defaults to // standard out. LogLevel *aws.LogLevelType // The maximum number of times that a request will be retried for failures. // Defaults to DefaultRetries for the number of retries to be performed // per request. MaxRetries *int } // A Client is an EC2 Metadata service Client. type Client struct { *service.Service } // New creates a new instance of the EC2 Metadata service client. // // In the general use case the configuration for this service client should not // be needed and `nil` can be provided. Configuration is only needed if the // `ec2metadata.Config` defaults need to be overridden. Eg. Setting LogLevel. // // @note This configuration will NOT be merged with the default AWS service // client configuration `defaults.DefaultConfig`. Due to circular dependencies // with the defaults package and credentials EC2 Role Provider. func New(config *Config) *Client { service := &service.Service{ ServiceInfo: serviceinfo.ServiceInfo{ Config: copyConfig(config), ServiceName: "Client", Endpoint: "http://169.254.169.254/latest", APIVersion: "latest", }, } service.Initialize() service.Handlers.Unmarshal.PushBack(unmarshalHandler) service.Handlers.UnmarshalError.PushBack(unmarshalError) service.Handlers.Validate.Clear() service.Handlers.Validate.PushBack(validateEndpointHandler) return &Client{service} } func copyConfig(config *Config) *aws.Config { if config == nil { config = &Config{} } c := &aws.Config{ Credentials: credentials.AnonymousCredentials, Endpoint: config.Endpoint, HTTPClient: config.HTTPClient, Logger: config.Logger, LogLevel: config.LogLevel, MaxRetries: config.MaxRetries, } if c.HTTPClient == nil { c.HTTPClient = http.DefaultClient } if c.Logger == nil { c.Logger = aws.NewDefaultLogger() } if c.LogLevel == nil { c.LogLevel = aws.LogLevel(aws.LogOff) } if c.MaxRetries == nil { c.MaxRetries = aws.Int(DefaultRetries) } return c } type metadataOutput struct { Content string } func unmarshalHandler(r *request.Request) { defer r.HTTPResponse.Body.Close() b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) } data := r.Data.(*metadataOutput) data.Content = string(b) } func unmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() _, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) } // TODO extract the error... } func validateEndpointHandler(r *request.Request) { if r.Service.Endpoint == "" { r.Error = aws.ErrMissingEndpoint } } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/errors.go000066400000000000000000000010101267010174400243630ustar00rootroot00000000000000package aws import "github.com/aws/aws-sdk-go/aws/awserr" var ( // ErrMissingRegion is an error that is returned if region configuration is // not found. // // @readonly ErrMissingRegion error = awserr.New("MissingRegion", "could not find region configuration", nil) // ErrMissingEndpoint is an error that is returned if an endpoint cannot be // resolved for a service. // // @readonly ErrMissingEndpoint error = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) ) docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/logger.go000066400000000000000000000062651267010174400243470ustar00rootroot00000000000000package aws import ( "log" "os" ) // A LogLevelType defines the level logging should be performed at. Used to instruct // the SDK which statements should be logged. type LogLevelType uint // LogLevel returns the pointer to a LogLevel. Should be used to workaround // not being able to take the address of a non-composite literal. func LogLevel(l LogLevelType) *LogLevelType { return &l } // Value returns the LogLevel value or the default value LogOff if the LogLevel // is nil. Safe to use on nil value LogLevelTypes. func (l *LogLevelType) Value() LogLevelType { if l != nil { return *l } return LogOff } // Matches returns true if the v LogLevel is enabled by this LogLevel. Should be // used with logging sub levels. Is safe to use on nil value LogLevelTypes. If // LogLevel is nill, will default to LogOff comparison. func (l *LogLevelType) Matches(v LogLevelType) bool { c := l.Value() return c&v == v } // AtLeast returns true if this LogLevel is at least high enough to satisfies v. // Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default // to LogOff comparison. func (l *LogLevelType) AtLeast(v LogLevelType) bool { c := l.Value() return c >= v } const ( // LogOff states that no logging should be performed by the SDK. This is the // default state of the SDK, and should be use to disable all logging. LogOff LogLevelType = iota * 0x1000 // LogDebug state that debug output should be logged by the SDK. This should // be used to inspect request made and responses received. LogDebug ) // Debug Logging Sub Levels const ( // LogDebugWithSigning states that the SDK should log request signing and // presigning events. This should be used to log the signing details of // requests for debugging. Will also enable LogDebug. LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) // LogDebugWithHTTPBody states the SDK should log HTTP request and response // HTTP bodys in addition to the headers and path. This should be used to // see the body content of requests and responses made while using the SDK // Will also enable LogDebug. LogDebugWithHTTPBody // LogDebugWithRequestRetries states the SDK should log when service requests will // be retried. This should be used to log when you want to log when service // requests are being retried. Will also enable LogDebug. LogDebugWithRequestRetries // LogDebugWithRequestErrors states the SDK should log when service requests fail // to build, send, validate, or unmarshal. LogDebugWithRequestErrors ) // A Logger is a minimalistic interface for the SDK to log messages to. Should // be used to provide custom logging writers for the SDK to use. type Logger interface { Log(...interface{}) } // NewDefaultLogger returns a Logger which will write log messages to stdout, and // use same formatting runes as the stdlib log.Logger func NewDefaultLogger() Logger { return &defaultLogger{ logger: log.New(os.Stdout, "", log.LstdFlags), } } // A defaultLogger provides a minimalistic logger satisfying the Logger interface. type defaultLogger struct { logger *log.Logger } // Log logs the parameters to the stdlib logger. See log.Println. func (l defaultLogger) Log(args ...interface{}) { l.logger.Println(args...) } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/request/000077500000000000000000000000001267010174400242205ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/request/handlers.go000066400000000000000000000055511267010174400263550ustar00rootroot00000000000000package request // A Handlers provides a collection of request handlers for various // stages of handling requests. type Handlers struct { Validate HandlerList Build HandlerList Sign HandlerList Send HandlerList ValidateResponse HandlerList Unmarshal HandlerList UnmarshalMeta HandlerList UnmarshalError HandlerList Retry HandlerList AfterRetry HandlerList } // Copy returns of this handler's lists. func (h *Handlers) Copy() Handlers { return Handlers{ Validate: h.Validate.copy(), Build: h.Build.copy(), Sign: h.Sign.copy(), Send: h.Send.copy(), ValidateResponse: h.ValidateResponse.copy(), Unmarshal: h.Unmarshal.copy(), UnmarshalError: h.UnmarshalError.copy(), UnmarshalMeta: h.UnmarshalMeta.copy(), Retry: h.Retry.copy(), AfterRetry: h.AfterRetry.copy(), } } // Clear removes callback functions for all handlers func (h *Handlers) Clear() { h.Validate.Clear() h.Build.Clear() h.Send.Clear() h.Sign.Clear() h.Unmarshal.Clear() h.UnmarshalMeta.Clear() h.UnmarshalError.Clear() h.ValidateResponse.Clear() h.Retry.Clear() h.AfterRetry.Clear() } // A HandlerList manages zero or more handlers in a list. type HandlerList struct { list []NamedHandler } // A NamedHandler is a struct that contains a name and function callback. type NamedHandler struct { Name string Fn func(*Request) } // copy creates a copy of the handler list. func (l *HandlerList) copy() HandlerList { var n HandlerList n.list = append([]NamedHandler{}, l.list...) return n } // Clear clears the handler list. func (l *HandlerList) Clear() { l.list = []NamedHandler{} } // Len returns the number of handlers in the list. func (l *HandlerList) Len() int { return len(l.list) } // PushBack pushes handler f to the back of the handler list. func (l *HandlerList) PushBack(f func(*Request)) { l.list = append(l.list, NamedHandler{"__anonymous", f}) } // PushFront pushes handler f to the front of the handler list. func (l *HandlerList) PushFront(f func(*Request)) { l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...) } // PushBackNamed pushes named handler f to the back of the handler list. func (l *HandlerList) PushBackNamed(n NamedHandler) { l.list = append(l.list, n) } // PushFrontNamed pushes named handler f to the front of the handler list. func (l *HandlerList) PushFrontNamed(n NamedHandler) { l.list = append([]NamedHandler{n}, l.list...) } // Remove removes a NamedHandler n func (l *HandlerList) Remove(n NamedHandler) { newlist := []NamedHandler{} for _, m := range l.list { if m.Name != n.Name { newlist = append(newlist, m) } } l.list = newlist } // Run executes all handlers in the list with a given request object. func (l *HandlerList) Run(r *Request) { for _, f := range l.list { f.Fn(r) } } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/request/request.go000066400000000000000000000221701267010174400262410ustar00rootroot00000000000000package request import ( "bytes" "fmt" "io" "io/ioutil" "net/http" "net/url" "reflect" "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/service/serviceinfo" ) // A Request is the service request to be made. type Request struct { Retryer Service serviceinfo.ServiceInfo Handlers Handlers Time time.Time ExpireTime time.Duration Operation *Operation HTTPRequest *http.Request HTTPResponse *http.Response Body io.ReadSeeker BodyStart int64 // offset from beginning of Body that the request body starts Params interface{} Error error Data interface{} RequestID string RetryCount uint Retryable *bool RetryDelay time.Duration built bool } // An Operation is the service API operation to be made. type Operation struct { Name string HTTPMethod string HTTPPath string *Paginator } // Paginator keeps track of pagination configuration for an API operation. type Paginator struct { InputTokens []string OutputTokens []string LimitToken string TruncationToken string } // New returns a new Request pointer for the service API // operation and parameters. // // Params is any value of input parameters to be the request payload. // Data is pointer value to an object which the request's response // payload will be deserialized to. func New(service serviceinfo.ServiceInfo, handlers Handlers, retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { method := operation.HTTPMethod if method == "" { method = "POST" } p := operation.HTTPPath if p == "" { p = "/" } httpReq, _ := http.NewRequest(method, "", nil) httpReq.URL, _ = url.Parse(service.Endpoint + p) r := &Request{ Retryer: retryer, Service: service, Handlers: handlers.Copy(), Time: time.Now(), ExpireTime: 0, Operation: operation, HTTPRequest: httpReq, Body: nil, Params: params, Error: nil, Data: data, } r.SetBufferBody([]byte{}) return r } // WillRetry returns if the request's can be retried. func (r *Request) WillRetry() bool { return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() } // ParamsFilled returns if the request's parameters have been populated // and the parameters are valid. False is returned if no parameters are // provided or invalid. func (r *Request) ParamsFilled() bool { return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() } // DataFilled returns true if the request's data for response deserialization // target has been set and is a valid. False is returned if data is not // set, or is invalid. func (r *Request) DataFilled() bool { return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() } // SetBufferBody will set the request's body bytes that will be sent to // the service API. func (r *Request) SetBufferBody(buf []byte) { r.SetReaderBody(bytes.NewReader(buf)) } // SetStringBody sets the body of the request to be backed by a string. func (r *Request) SetStringBody(s string) { r.SetReaderBody(strings.NewReader(s)) } // SetReaderBody will set the request's body reader. func (r *Request) SetReaderBody(reader io.ReadSeeker) { r.HTTPRequest.Body = ioutil.NopCloser(reader) r.Body = reader } // Presign returns the request's signed URL. Error will be returned // if the signing fails. func (r *Request) Presign(expireTime time.Duration) (string, error) { r.ExpireTime = expireTime r.Sign() if r.Error != nil { return "", r.Error } return r.HTTPRequest.URL.String(), nil } func debugLogReqError(r *Request, stage string, retrying bool, err error) { if !r.Service.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { return } retryStr := "not retrying" if retrying { retryStr = "will retry" } r.Service.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", stage, r.Service.ServiceName, r.Operation.Name, retryStr, err)) } // Build will build the request's object so it can be signed and sent // to the service. Build will also validate all the request's parameters. // Anny additional build Handlers set on this request will be run // in the order they were set. // // The request will only be built once. Multiple calls to build will have // no effect. // // If any Validate or Build errors occur the build will stop and the error // which occurred will be returned. func (r *Request) Build() error { if !r.built { r.Error = nil r.Handlers.Validate.Run(r) if r.Error != nil { debugLogReqError(r, "Validate Request", false, r.Error) return r.Error } r.Handlers.Build.Run(r) r.built = true } return r.Error } // Sign will sign the request retuning error if errors are encountered. // // Send will build the request prior to signing. All Sign Handlers will // be executed in the order they were set. func (r *Request) Sign() error { r.Build() if r.Error != nil { debugLogReqError(r, "Build Request", false, r.Error) return r.Error } r.Handlers.Sign.Run(r) return r.Error } // Send will send the request returning error if errors are encountered. // // Send will sign the request prior to sending. All Send Handlers will // be executed in the order they were set. func (r *Request) Send() error { for { r.Sign() if r.Error != nil { return r.Error } if aws.BoolValue(r.Retryable) { if r.Service.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { r.Service.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", r.Service.ServiceName, r.Operation.Name, r.RetryCount)) } // Re-seek the body back to the original point in for a retry so that // send will send the body's contents again in the upcoming request. r.Body.Seek(r.BodyStart, 0) r.HTTPRequest.Body = ioutil.NopCloser(r.Body) } r.Retryable = nil r.Handlers.Send.Run(r) if r.Error != nil { err := r.Error r.Handlers.Retry.Run(r) r.Handlers.AfterRetry.Run(r) if r.Error != nil { debugLogReqError(r, "Send Request", false, r.Error) return r.Error } debugLogReqError(r, "Send Request", true, err) continue } r.Handlers.UnmarshalMeta.Run(r) r.Handlers.ValidateResponse.Run(r) if r.Error != nil { err := r.Error r.Handlers.UnmarshalError.Run(r) r.Handlers.Retry.Run(r) r.Handlers.AfterRetry.Run(r) if r.Error != nil { debugLogReqError(r, "Validate Response", false, r.Error) return r.Error } debugLogReqError(r, "Validate Response", true, err) continue } r.Handlers.Unmarshal.Run(r) if r.Error != nil { err := r.Error r.Handlers.Retry.Run(r) r.Handlers.AfterRetry.Run(r) if r.Error != nil { debugLogReqError(r, "Unmarshal Response", false, r.Error) return r.Error } debugLogReqError(r, "Unmarshal Response", true, err) continue } break } return nil } // HasNextPage returns true if this request has more pages of data available. func (r *Request) HasNextPage() bool { return r.nextPageTokens() != nil } // nextPageTokens returns the tokens to use when asking for the next page of // data. func (r *Request) nextPageTokens() []interface{} { if r.Operation.Paginator == nil { return nil } if r.Operation.TruncationToken != "" { tr := awsutil.ValuesAtAnyPath(r.Data, r.Operation.TruncationToken) if tr == nil || len(tr) == 0 { return nil } switch v := tr[0].(type) { case bool: if v == false { return nil } } } found := false tokens := make([]interface{}, len(r.Operation.OutputTokens)) for i, outtok := range r.Operation.OutputTokens { v := awsutil.ValuesAtAnyPath(r.Data, outtok) if v != nil && len(v) > 0 { found = true tokens[i] = v[0] } } if found { return tokens } return nil } // NextPage returns a new Request that can be executed to return the next // page of result data. Call .Send() on this request to execute it. func (r *Request) NextPage() *Request { tokens := r.nextPageTokens() if tokens == nil { return nil } data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() nr := New(r.Service, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) for i, intok := range nr.Operation.InputTokens { awsutil.SetValueAtAnyPath(nr.Params, intok, tokens[i]) } return nr } // EachPage iterates over each page of a paginated request object. The fn // parameter should be a function with the following sample signature: // // func(page *T, lastPage bool) bool { // return true // return false to stop iterating // } // // Where "T" is the structure type matching the output structure of the given // operation. For example, a request object generated by // DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput // as the structure "T". The lastPage value represents whether the page is // the last page of data or not. The return value of this function should // return true to keep iterating or false to stop. func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { for page := r; page != nil; page = page.NextPage() { page.Send() shouldContinue := fn(page.Data, !page.HasNextPage()) if page.Error != nil || !shouldContinue { return page.Error } } return nil } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/request/retryer.go000066400000000000000000000037731267010174400262550ustar00rootroot00000000000000package request import ( "time" "github.com/aws/aws-sdk-go/aws/awserr" ) // Retryer is an interface to control retry logic for a given service. // The default implementation used by most services is the service.DefaultRetryer // structure, which contains basic retry logic using exponential backoff. type Retryer interface { RetryRules(*Request) time.Duration ShouldRetry(*Request) bool MaxRetries() uint } // retryableCodes is a collection of service response codes which are retry-able // without any further action. var retryableCodes = map[string]struct{}{ "RequestError": {}, "ProvisionedThroughputExceededException": {}, "Throttling": {}, "ThrottlingException": {}, "RequestLimitExceeded": {}, "RequestThrottled": {}, } // credsExpiredCodes is a collection of error codes which signify the credentials // need to be refreshed. Expired tokens require refreshing of credentials, and // resigning before the request can be retried. var credsExpiredCodes = map[string]struct{}{ "ExpiredToken": {}, "ExpiredTokenException": {}, "RequestExpired": {}, // EC2 Only } func isCodeRetryable(code string) bool { if _, ok := retryableCodes[code]; ok { return true } return isCodeExpiredCreds(code) } func isCodeExpiredCreds(code string) bool { _, ok := credsExpiredCodes[code] return ok } // IsErrorRetryable returns whether the error is retryable, based on its Code. // Returns false if the request has no Error set. func (r *Request) IsErrorRetryable() bool { if r.Error != nil { if err, ok := r.Error.(awserr.Error); ok { return isCodeRetryable(err.Code()) } } return false } // IsErrorExpired returns whether the error code is a credential expiry error. // Returns false if the request has no Error set. func (r *Request) IsErrorExpired() bool { if r.Error != nil { if err, ok := r.Error.(awserr.Error); ok { return isCodeExpiredCreds(err.Code()) } } return false } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/service/000077500000000000000000000000001267010174400241705ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/service/default_retryer.go000066400000000000000000000030001267010174400277100ustar00rootroot00000000000000package service import ( "math" "math/rand" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/request" ) // DefaultRetryer implements basic retry logic using exponential backoff for // most services. If you want to implement custom retry logic, implement the // request.Retryer interface or create a structure type that composes this // struct and override the specific methods. For example, to override only // the MaxRetries method: // // type retryer struct { // service.DefaultRetryer // } // // // This implementation always has 100 max retries // func (d retryer) MaxRetries() uint { return 100 } type DefaultRetryer struct { *Service } // MaxRetries returns the number of maximum returns the service will use to make // an individual API request. func (d DefaultRetryer) MaxRetries() uint { if aws.IntValue(d.Service.Config.MaxRetries) < 0 { return d.DefaultMaxRetries } return uint(aws.IntValue(d.Service.Config.MaxRetries)) } var seededRand = rand.New(rand.NewSource(time.Now().UnixNano())) // RetryRules returns the delay duration before retrying this request again func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { delay := int(math.Pow(2, float64(r.RetryCount))) * (seededRand.Intn(30) + 30) return time.Duration(delay) * time.Millisecond } // ShouldRetry returns if the request should be retried. func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { if r.HTTPResponse.StatusCode >= 500 { return true } return r.IsErrorRetryable() } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/service/service.go000066400000000000000000000100761267010174400261630ustar00rootroot00000000000000package service import ( "fmt" "io/ioutil" "net/http" "net/http/httputil" "regexp" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/service/serviceinfo" "github.com/aws/aws-sdk-go/internal/endpoints" ) // A Service implements the base service request and response handling // used by all services. type Service struct { serviceinfo.ServiceInfo request.Retryer DefaultMaxRetries uint Handlers request.Handlers } var schemeRE = regexp.MustCompile("^([^:]+)://") // New will return a pointer to a new Server object initialized. func New(config *aws.Config) *Service { svc := &Service{ServiceInfo: serviceinfo.ServiceInfo{Config: config}} svc.Initialize() return svc } // Initialize initializes the service. func (s *Service) Initialize() { if s.Config == nil { s.Config = &aws.Config{} } if s.Config.HTTPClient == nil { s.Config.HTTPClient = http.DefaultClient } if s.Config.SleepDelay == nil { s.Config.SleepDelay = time.Sleep } s.Retryer = DefaultRetryer{s} s.DefaultMaxRetries = 3 s.Handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) s.Handlers.Build.PushBackNamed(corehandlers.UserAgentHandler) s.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) s.Handlers.Send.PushBackNamed(corehandlers.SendHandler) s.Handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) s.Handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) if !aws.BoolValue(s.Config.DisableParamValidation) { s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) } s.AddDebugHandlers() s.buildEndpoint() } // NewRequest returns a new Request pointer for the service API // operation and parameters. func (s *Service) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { return request.New(s.ServiceInfo, s.Handlers, s.Retryer, operation, params, data) } // buildEndpoint builds the endpoint values the service will use to make requests with. func (s *Service) buildEndpoint() { if aws.StringValue(s.Config.Endpoint) != "" { s.Endpoint = *s.Config.Endpoint } else if s.Endpoint == "" { s.Endpoint, s.SigningRegion = endpoints.EndpointForRegion(s.ServiceName, aws.StringValue(s.Config.Region)) } if s.Endpoint != "" && !schemeRE.MatchString(s.Endpoint) { scheme := "https" if aws.BoolValue(s.Config.DisableSSL) { scheme = "http" } s.Endpoint = scheme + "://" + s.Endpoint } } // AddDebugHandlers injects debug logging handlers into the service to log request // debug information. func (s *Service) AddDebugHandlers() { if !s.Config.LogLevel.AtLeast(aws.LogDebug) { return } s.Handlers.Send.PushFront(logRequest) s.Handlers.Send.PushBack(logResponse) } const logReqMsg = `DEBUG: Request %s/%s Details: ---[ REQUEST POST-SIGN ]----------------------------- %s -----------------------------------------------------` func logRequest(r *request.Request) { logBody := r.Service.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody) if logBody { // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's // Body as a NoOpCloser and will not be reset after read by the HTTP // client reader. r.Body.Seek(r.BodyStart, 0) r.HTTPRequest.Body = ioutil.NopCloser(r.Body) } r.Service.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.Service.ServiceName, r.Operation.Name, string(dumpedBody))) } const logRespMsg = `DEBUG: Response %s/%s Details: ---[ RESPONSE ]-------------------------------------- %s -----------------------------------------------------` func logResponse(r *request.Request) { var msg = "no reponse data" if r.HTTPResponse != nil { logBody := r.Service.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody) msg = string(dumpedBody) } else if r.Error != nil { msg = r.Error.Error() } r.Service.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.Service.ServiceName, r.Operation.Name, msg)) } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/service/serviceinfo/000077500000000000000000000000001267010174400265045ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/service/serviceinfo/service_info.go000066400000000000000000000005261267010174400315110ustar00rootroot00000000000000package serviceinfo import "github.com/aws/aws-sdk-go/aws" // ServiceInfo wraps immutable data from the service.Service structure. type ServiceInfo struct { Config *aws.Config ServiceName string APIVersion string Endpoint string SigningName string SigningRegion string JSONVersion string TargetPrefix string } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/types.go000066400000000000000000000046571267010174400242370ustar00rootroot00000000000000package aws import ( "io" "sync" ) // ReadSeekCloser wraps a io.Reader returning a ReaderSeakerCloser func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { return ReaderSeekerCloser{r} } // ReaderSeekerCloser represents a reader that can also delegate io.Seeker and // io.Closer interfaces to the underlying object if they are available. type ReaderSeekerCloser struct { r io.Reader } // Read reads from the reader up to size of p. The number of bytes read, and // error if it occurred will be returned. // // If the reader is not an io.Reader zero bytes read, and nil error will be returned. // // Performs the same functionality as io.Reader Read func (r ReaderSeekerCloser) Read(p []byte) (int, error) { switch t := r.r.(type) { case io.Reader: return t.Read(p) } return 0, nil } // Seek sets the offset for the next Read to offset, interpreted according to // whence: 0 means relative to the origin of the file, 1 means relative to the // current offset, and 2 means relative to the end. Seek returns the new offset // and an error, if any. // // If the ReaderSeekerCloser is not an io.Seeker nothing will be done. func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { switch t := r.r.(type) { case io.Seeker: return t.Seek(offset, whence) } return int64(0), nil } // Close closes the ReaderSeekerCloser. // // If the ReaderSeekerCloser is not an io.Closer nothing will be done. func (r ReaderSeekerCloser) Close() error { switch t := r.r.(type) { case io.Closer: return t.Close() } return nil } // A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface // Can be used with the s3manager.Downloader to download content to a buffer // in memory. Safe to use concurrently. type WriteAtBuffer struct { buf []byte m sync.Mutex } // WriteAt writes a slice of bytes to a buffer starting at the position provided // The number of bytes written will be returned, or error. Can overwrite previous // written slices if the write ats overlap. func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { b.m.Lock() defer b.m.Unlock() expLen := pos + int64(len(p)) if int64(len(b.buf)) < expLen { newBuf := make([]byte, expLen) copy(newBuf, b.buf) b.buf = newBuf } copy(b.buf[pos:], p) return len(p), nil } // Bytes returns a slice of bytes written to the buffer. func (b *WriteAtBuffer) Bytes() []byte { b.m.Lock() defer b.m.Unlock() return b.buf[:len(b.buf):len(b.buf)] } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/aws/version.go000066400000000000000000000003461267010174400245470ustar00rootroot00000000000000// Package aws provides core functionality for making requests to AWS services. package aws // SDKName is the name of this AWS SDK const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK const SDKVersion = "0.9.9" docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/internal/000077500000000000000000000000001267010174400235525ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/internal/endpoints/000077500000000000000000000000001267010174400255555ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.go000066400000000000000000000015721267010174400301140ustar00rootroot00000000000000// Package endpoints validates regional endpoints for services. package endpoints //go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go //go:generate gofmt -s -w endpoints_map.go import "strings" // EndpointForRegion returns an endpoint and its signing region for a service and region. // if the service and region pair are not found endpoint and signingRegion will be empty. func EndpointForRegion(svcName, region string) (endpoint, signingRegion string) { derivedKeys := []string{ region + "/" + svcName, region + "/*", "*/" + svcName, "*/*", } for _, key := range derivedKeys { if val, ok := endpointsMap.Endpoints[key]; ok { ep := val.Endpoint ep = strings.Replace(ep, "{region}", region, -1) ep = strings.Replace(ep, "{service}", svcName, -1) endpoint = ep signingRegion = val.SigningRegion return } } return } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.json000066400000000000000000000035721267010174400304620ustar00rootroot00000000000000{ "version": 2, "endpoints": { "*/*": { "endpoint": "{service}.{region}.amazonaws.com" }, "cn-north-1/*": { "endpoint": "{service}.{region}.amazonaws.com.cn", "signatureVersion": "v4" }, "us-gov-west-1/iam": { "endpoint": "iam.us-gov.amazonaws.com" }, "us-gov-west-1/sts": { "endpoint": "sts.us-gov-west-1.amazonaws.com" }, "us-gov-west-1/s3": { "endpoint": "s3-{region}.amazonaws.com" }, "*/cloudfront": { "endpoint": "cloudfront.amazonaws.com", "signingRegion": "us-east-1" }, "*/cloudsearchdomain": { "endpoint": "", "signingRegion": "us-east-1" }, "*/iam": { "endpoint": "iam.amazonaws.com", "signingRegion": "us-east-1" }, "*/importexport": { "endpoint": "importexport.amazonaws.com", "signingRegion": "us-east-1" }, "*/route53": { "endpoint": "route53.amazonaws.com", "signingRegion": "us-east-1" }, "*/sts": { "endpoint": "sts.amazonaws.com", "signingRegion": "us-east-1" }, "us-east-1/sdb": { "endpoint": "sdb.amazonaws.com", "signingRegion": "us-east-1" }, "us-east-1/s3": { "endpoint": "s3.amazonaws.com" }, "us-west-1/s3": { "endpoint": "s3-{region}.amazonaws.com" }, "us-west-2/s3": { "endpoint": "s3-{region}.amazonaws.com" }, "eu-west-1/s3": { "endpoint": "s3-{region}.amazonaws.com" }, "ap-southeast-1/s3": { "endpoint": "s3-{region}.amazonaws.com" }, "ap-southeast-2/s3": { "endpoint": "s3-{region}.amazonaws.com" }, "ap-northeast-1/s3": { "endpoint": "s3-{region}.amazonaws.com" }, "sa-east-1/s3": { "endpoint": "s3-{region}.amazonaws.com" }, "eu-central-1/s3": { "endpoint": "{service}.{region}.amazonaws.com", "signatureVersion": "v4" } } } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_map.go000066400000000000000000000036561267010174400307560ustar00rootroot00000000000000package endpoints // THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. type endpointStruct struct { Version int Endpoints map[string]endpointEntry } type endpointEntry struct { Endpoint string SigningRegion string } var endpointsMap = endpointStruct{ Version: 2, Endpoints: map[string]endpointEntry{ "*/*": { Endpoint: "{service}.{region}.amazonaws.com", }, "*/cloudfront": { Endpoint: "cloudfront.amazonaws.com", SigningRegion: "us-east-1", }, "*/cloudsearchdomain": { Endpoint: "", SigningRegion: "us-east-1", }, "*/iam": { Endpoint: "iam.amazonaws.com", SigningRegion: "us-east-1", }, "*/importexport": { Endpoint: "importexport.amazonaws.com", SigningRegion: "us-east-1", }, "*/route53": { Endpoint: "route53.amazonaws.com", SigningRegion: "us-east-1", }, "*/sts": { Endpoint: "sts.amazonaws.com", SigningRegion: "us-east-1", }, "ap-northeast-1/s3": { Endpoint: "s3-{region}.amazonaws.com", }, "ap-southeast-1/s3": { Endpoint: "s3-{region}.amazonaws.com", }, "ap-southeast-2/s3": { Endpoint: "s3-{region}.amazonaws.com", }, "cn-north-1/*": { Endpoint: "{service}.{region}.amazonaws.com.cn", }, "eu-central-1/s3": { Endpoint: "{service}.{region}.amazonaws.com", }, "eu-west-1/s3": { Endpoint: "s3-{region}.amazonaws.com", }, "sa-east-1/s3": { Endpoint: "s3-{region}.amazonaws.com", }, "us-east-1/s3": { Endpoint: "s3.amazonaws.com", }, "us-east-1/sdb": { Endpoint: "sdb.amazonaws.com", SigningRegion: "us-east-1", }, "us-gov-west-1/iam": { Endpoint: "iam.us-gov.amazonaws.com", }, "us-gov-west-1/s3": { Endpoint: "s3-{region}.amazonaws.com", }, "us-gov-west-1/sts": { Endpoint: "sts.us-gov-west-1.amazonaws.com", }, "us-west-1/s3": { Endpoint: "s3-{region}.amazonaws.com", }, "us-west-2/s3": { Endpoint: "s3-{region}.amazonaws.com", }, }, } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/internal/protocol/000077500000000000000000000000001267010174400254135ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/internal/protocol/json/000077500000000000000000000000001267010174400263645ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/internal/protocol/json/jsonutil/000077500000000000000000000000001267010174400302335ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/internal/protocol/json/jsonutil/build.go000066400000000000000000000112361267010174400316640ustar00rootroot00000000000000// Package jsonutil provides JSON serialisation of AWS requests and responses. package jsonutil import ( "bytes" "encoding/base64" "fmt" "reflect" "sort" "strconv" "strings" "time" ) // BuildJSON builds a JSON string for a given object v. func BuildJSON(v interface{}) ([]byte, error) { var buf bytes.Buffer err := buildAny(reflect.ValueOf(v), &buf, "") return buf.Bytes(), err } func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { value = reflect.Indirect(value) if !value.IsValid() { return nil } vtype := value.Type() t := tag.Get("type") if t == "" { switch vtype.Kind() { case reflect.Struct: // also it can't be a time object if _, ok := value.Interface().(time.Time); !ok { t = "structure" } case reflect.Slice: // also it can't be a byte slice if _, ok := value.Interface().([]byte); !ok { t = "list" } case reflect.Map: t = "map" } } switch t { case "structure": if field, ok := vtype.FieldByName("SDKShapeTraits"); ok { tag = field.Tag } return buildStruct(value, buf, tag) case "list": return buildList(value, buf, tag) case "map": return buildMap(value, buf, tag) default: return buildScalar(value, buf, tag) } } func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { if !value.IsValid() { return nil } // unwrap payloads if payload := tag.Get("payload"); payload != "" { field, _ := value.Type().FieldByName(payload) tag = field.Tag value = elemOf(value.FieldByName(payload)) if !value.IsValid() { return nil } } buf.WriteString("{") t, fields := value.Type(), []*reflect.StructField{} for i := 0; i < t.NumField(); i++ { field := t.Field(i) member := value.FieldByName(field.Name) if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { continue // ignore unset fields } if c := field.Name[0:1]; strings.ToLower(c) == c { continue // ignore unexported fields } if field.Tag.Get("location") != "" { continue // ignore non-body elements } fields = append(fields, &field) } for i, field := range fields { member := value.FieldByName(field.Name) // figure out what this field is called name := field.Name if locName := field.Tag.Get("locationName"); locName != "" { name = locName } buf.WriteString(fmt.Sprintf("%q:", name)) err := buildAny(member, buf, field.Tag) if err != nil { return err } if i < len(fields)-1 { buf.WriteString(",") } } buf.WriteString("}") return nil } func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { buf.WriteString("[") for i := 0; i < value.Len(); i++ { buildAny(value.Index(i), buf, "") if i < value.Len()-1 { buf.WriteString(",") } } buf.WriteString("]") return nil } func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { buf.WriteString("{") keys := make([]string, value.Len()) for i, n := range value.MapKeys() { keys[i] = n.String() } sort.Strings(keys) for i, k := range keys { buf.WriteString(fmt.Sprintf("%q:", k)) buildAny(value.MapIndex(reflect.ValueOf(k)), buf, "") if i < len(keys)-1 { buf.WriteString(",") } } buf.WriteString("}") return nil } func buildScalar(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { switch converted := value.Interface().(type) { case string: writeString(converted, buf) case []byte: if !value.IsNil() { buf.WriteString(fmt.Sprintf("%q", base64.StdEncoding.EncodeToString(converted))) } case bool: buf.WriteString(strconv.FormatBool(converted)) case int64: buf.WriteString(strconv.FormatInt(converted, 10)) case float64: buf.WriteString(strconv.FormatFloat(converted, 'f', -1, 64)) case time.Time: buf.WriteString(strconv.FormatInt(converted.UTC().Unix(), 10)) default: return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) } return nil } func writeString(s string, buf *bytes.Buffer) { buf.WriteByte('"') for _, r := range s { if r == '"' { buf.WriteString(`\"`) } else if r == '\\' { buf.WriteString(`\\`) } else if r == '\b' { buf.WriteString(`\b`) } else if r == '\f' { buf.WriteString(`\f`) } else if r == '\r' { buf.WriteString(`\r`) } else if r == '\t' { buf.WriteString(`\t`) } else if r == '\n' { buf.WriteString(`\n`) } else if r < 32 { fmt.Fprintf(buf, "\\u%0.4x", r) } else { buf.WriteRune(r) } } buf.WriteByte('"') } // Returns the reflection element of a value, if it is a pointer. func elemOf(value reflect.Value) reflect.Value { for value.Kind() == reflect.Ptr { value = value.Elem() } return value } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/internal/protocol/json/jsonutil/unmarshal.go000066400000000000000000000106131267010174400325550ustar00rootroot00000000000000package jsonutil import ( "encoding/base64" "encoding/json" "fmt" "io" "io/ioutil" "reflect" "time" ) // UnmarshalJSON reads a stream and unmarshals the results in object v. func UnmarshalJSON(v interface{}, stream io.Reader) error { var out interface{} b, err := ioutil.ReadAll(stream) if err != nil { return err } if len(b) == 0 { return nil } if err := json.Unmarshal(b, &out); err != nil { return err } return unmarshalAny(reflect.ValueOf(v), out, "") } func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { vtype := value.Type() if vtype.Kind() == reflect.Ptr { vtype = vtype.Elem() // check kind of actual element type } t := tag.Get("type") if t == "" { switch vtype.Kind() { case reflect.Struct: // also it can't be a time object if _, ok := value.Interface().(*time.Time); !ok { t = "structure" } case reflect.Slice: // also it can't be a byte slice if _, ok := value.Interface().([]byte); !ok { t = "list" } case reflect.Map: t = "map" } } switch t { case "structure": if field, ok := vtype.FieldByName("SDKShapeTraits"); ok { tag = field.Tag } return unmarshalStruct(value, data, tag) case "list": return unmarshalList(value, data, tag) case "map": return unmarshalMap(value, data, tag) default: return unmarshalScalar(value, data, tag) } } func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { if data == nil { return nil } mapData, ok := data.(map[string]interface{}) if !ok { return fmt.Errorf("JSON value is not a structure (%#v)", data) } t := value.Type() if value.Kind() == reflect.Ptr { if value.IsNil() { // create the structure if it's nil s := reflect.New(value.Type().Elem()) value.Set(s) value = s } value = value.Elem() t = t.Elem() } // unwrap any payloads if payload := tag.Get("payload"); payload != "" { field, _ := t.FieldByName(payload) return unmarshalAny(value.FieldByName(payload), data, field.Tag) } for i := 0; i < t.NumField(); i++ { field := t.Field(i) if field.PkgPath != "" { continue // ignore unexported fields } // figure out what this field is called name := field.Name if locName := field.Tag.Get("locationName"); locName != "" { name = locName } member := value.FieldByName(field.Name) err := unmarshalAny(member, mapData[name], field.Tag) if err != nil { return err } } return nil } func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { if data == nil { return nil } listData, ok := data.([]interface{}) if !ok { return fmt.Errorf("JSON value is not a list (%#v)", data) } if value.IsNil() { l := len(listData) value.Set(reflect.MakeSlice(value.Type(), l, l)) } for i, c := range listData { err := unmarshalAny(value.Index(i), c, "") if err != nil { return err } } return nil } func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { if data == nil { return nil } mapData, ok := data.(map[string]interface{}) if !ok { return fmt.Errorf("JSON value is not a map (%#v)", data) } if value.IsNil() { value.Set(reflect.MakeMap(value.Type())) } for k, v := range mapData { kvalue := reflect.ValueOf(k) vvalue := reflect.New(value.Type().Elem()).Elem() unmarshalAny(vvalue, v, "") value.SetMapIndex(kvalue, vvalue) } return nil } func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { errf := func() error { return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) } switch d := data.(type) { case nil: return nil // nothing to do here case string: switch value.Interface().(type) { case *string: value.Set(reflect.ValueOf(&d)) case []byte: b, err := base64.StdEncoding.DecodeString(d) if err != nil { return err } value.Set(reflect.ValueOf(b)) default: return errf() } case float64: switch value.Interface().(type) { case *int64: di := int64(d) value.Set(reflect.ValueOf(&di)) case *float64: value.Set(reflect.ValueOf(&d)) case *time.Time: t := time.Unix(int64(d), 0).UTC() value.Set(reflect.ValueOf(&t)) default: return errf() } case bool: switch value.Interface().(type) { case *bool: value.Set(reflect.ValueOf(&d)) default: return errf() } default: return fmt.Errorf("unsupported JSON value (%v)", data) } return nil } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/internal/protocol/jsonrpc/000077500000000000000000000000001267010174400270715ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/internal/protocol/jsonrpc/jsonrpc.go000066400000000000000000000055121267010174400311010ustar00rootroot00000000000000// Package jsonrpc provides JSON RPC utilities for serialisation of AWS // requests and responses. package jsonrpc //go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/input/json.json build_test.go //go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/output/json.json unmarshal_test.go import ( "encoding/json" "io/ioutil" "strings" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/internal/protocol/json/jsonutil" "github.com/aws/aws-sdk-go/internal/protocol/rest" ) var emptyJSON = []byte("{}") // Build builds a JSON payload for a JSON RPC request. func Build(req *request.Request) { var buf []byte var err error if req.ParamsFilled() { buf, err = jsonutil.BuildJSON(req.Params) if err != nil { req.Error = awserr.New("SerializationError", "failed encoding JSON RPC request", err) return } } else { buf = emptyJSON } if req.Service.TargetPrefix != "" || string(buf) != "{}" { req.SetBufferBody(buf) } if req.Service.TargetPrefix != "" { target := req.Service.TargetPrefix + "." + req.Operation.Name req.HTTPRequest.Header.Add("X-Amz-Target", target) } if req.Service.JSONVersion != "" { jsonVersion := req.Service.JSONVersion req.HTTPRequest.Header.Add("Content-Type", "application/x-amz-json-"+jsonVersion) } } // Unmarshal unmarshals a response for a JSON RPC service. func Unmarshal(req *request.Request) { defer req.HTTPResponse.Body.Close() if req.DataFilled() { err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body) if err != nil { req.Error = awserr.New("SerializationError", "failed decoding JSON RPC response", err) } } return } // UnmarshalMeta unmarshals headers from a response for a JSON RPC service. func UnmarshalMeta(req *request.Request) { rest.UnmarshalMeta(req) } // UnmarshalError unmarshals an error response for a JSON RPC service. func UnmarshalError(req *request.Request) { defer req.HTTPResponse.Body.Close() bodyBytes, err := ioutil.ReadAll(req.HTTPResponse.Body) if err != nil { req.Error = awserr.New("SerializationError", "failed reading JSON RPC error response", err) return } if len(bodyBytes) == 0 { req.Error = awserr.NewRequestFailure( awserr.New("SerializationError", req.HTTPResponse.Status, nil), req.HTTPResponse.StatusCode, "", ) return } var jsonErr jsonErrorResponse if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil { req.Error = awserr.New("SerializationError", "failed decoding JSON RPC error response", err) return } codes := strings.SplitN(jsonErr.Code, "#", 2) req.Error = awserr.NewRequestFailure( awserr.New(codes[len(codes)-1], jsonErr.Message, nil), req.HTTPResponse.StatusCode, req.RequestID, ) } type jsonErrorResponse struct { Code string `json:"__type"` Message string `json:"message"` } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/internal/protocol/rest/000077500000000000000000000000001267010174400263705ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/internal/protocol/rest/build.go000066400000000000000000000125771267010174400300320ustar00rootroot00000000000000// Package rest provides RESTful serialization of AWS requests and responses. package rest import ( "bytes" "encoding/base64" "fmt" "io" "net/url" "path" "reflect" "strconv" "strings" "time" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" ) // RFC822 returns an RFC822 formatted timestamp for AWS protocols const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" // Whether the byte value can be sent without escaping in AWS URLs var noEscape [256]bool func init() { for i := 0; i < len(noEscape); i++ { // AWS expects every character except these to be escaped noEscape[i] = (i >= 'A' && i <= 'Z') || (i >= 'a' && i <= 'z') || (i >= '0' && i <= '9') || i == '-' || i == '.' || i == '_' || i == '~' } } // Build builds the REST component of a service request. func Build(r *request.Request) { if r.ParamsFilled() { v := reflect.ValueOf(r.Params).Elem() buildLocationElements(r, v) buildBody(r, v) } } func buildLocationElements(r *request.Request, v reflect.Value) { query := r.HTTPRequest.URL.Query() for i := 0; i < v.NumField(); i++ { m := v.Field(i) if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { continue } if m.IsValid() { field := v.Type().Field(i) name := field.Tag.Get("locationName") if name == "" { name = field.Name } if m.Kind() == reflect.Ptr { m = m.Elem() } if !m.IsValid() { continue } switch field.Tag.Get("location") { case "headers": // header maps buildHeaderMap(r, m, field.Tag.Get("locationName")) case "header": buildHeader(r, m, name) case "uri": buildURI(r, m, name) case "querystring": buildQueryString(r, m, name, query) } } if r.Error != nil { return } } r.HTTPRequest.URL.RawQuery = query.Encode() updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path) } func buildBody(r *request.Request, v reflect.Value) { if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok { if payloadName := field.Tag.Get("payload"); payloadName != "" { pfield, _ := v.Type().FieldByName(payloadName) if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { payload := reflect.Indirect(v.FieldByName(payloadName)) if payload.IsValid() && payload.Interface() != nil { switch reader := payload.Interface().(type) { case io.ReadSeeker: r.SetReaderBody(reader) case []byte: r.SetBufferBody(reader) case string: r.SetStringBody(reader) default: r.Error = awserr.New("SerializationError", "failed to encode REST request", fmt.Errorf("unknown payload type %s", payload.Type())) } } } } } } func buildHeader(r *request.Request, v reflect.Value, name string) { str, err := convertType(v) if err != nil { r.Error = awserr.New("SerializationError", "failed to encode REST request", err) } else if str != nil { r.HTTPRequest.Header.Add(name, *str) } } func buildHeaderMap(r *request.Request, v reflect.Value, prefix string) { for _, key := range v.MapKeys() { str, err := convertType(v.MapIndex(key)) if err != nil { r.Error = awserr.New("SerializationError", "failed to encode REST request", err) } else if str != nil { r.HTTPRequest.Header.Add(prefix+key.String(), *str) } } } func buildURI(r *request.Request, v reflect.Value, name string) { value, err := convertType(v) if err != nil { r.Error = awserr.New("SerializationError", "failed to encode REST request", err) } else if value != nil { uri := r.HTTPRequest.URL.Path uri = strings.Replace(uri, "{"+name+"}", EscapePath(*value, true), -1) uri = strings.Replace(uri, "{"+name+"+}", EscapePath(*value, false), -1) r.HTTPRequest.URL.Path = uri } } func buildQueryString(r *request.Request, v reflect.Value, name string, query url.Values) { str, err := convertType(v) if err != nil { r.Error = awserr.New("SerializationError", "failed to encode REST request", err) } else if str != nil { query.Set(name, *str) } } func updatePath(url *url.URL, urlPath string) { scheme, query := url.Scheme, url.RawQuery hasSlash := strings.HasSuffix(urlPath, "/") // clean up path urlPath = path.Clean(urlPath) if hasSlash && !strings.HasSuffix(urlPath, "/") { urlPath += "/" } // get formatted URL minus scheme so we can build this into Opaque url.Scheme, url.Path, url.RawQuery = "", "", "" s := url.String() url.Scheme = scheme url.RawQuery = query // build opaque URI url.Opaque = s + urlPath } // EscapePath escapes part of a URL path in Amazon style func EscapePath(path string, encodeSep bool) string { var buf bytes.Buffer for i := 0; i < len(path); i++ { c := path[i] if noEscape[c] || (c == '/' && !encodeSep) { buf.WriteByte(c) } else { buf.WriteByte('%') buf.WriteString(strings.ToUpper(strconv.FormatUint(uint64(c), 16))) } } return buf.String() } func convertType(v reflect.Value) (*string, error) { v = reflect.Indirect(v) if !v.IsValid() { return nil, nil } var str string switch value := v.Interface().(type) { case string: str = value case []byte: str = base64.StdEncoding.EncodeToString(value) case bool: str = strconv.FormatBool(value) case int64: str = strconv.FormatInt(value, 10) case float64: str = strconv.FormatFloat(value, 'f', -1, 64) case time.Time: str = value.UTC().Format(RFC822) default: err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) return nil, err } return &str, nil } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/internal/protocol/rest/payload.go000066400000000000000000000021561267010174400303540ustar00rootroot00000000000000package rest import "reflect" // PayloadMember returns the payload field member of i if there is one, or nil. func PayloadMember(i interface{}) interface{} { if i == nil { return nil } v := reflect.ValueOf(i).Elem() if !v.IsValid() { return nil } if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok { if payloadName := field.Tag.Get("payload"); payloadName != "" { field, _ := v.Type().FieldByName(payloadName) if field.Tag.Get("type") != "structure" { return nil } payload := v.FieldByName(payloadName) if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { return payload.Interface() } } } return nil } // PayloadType returns the type of a payload field member of i if there is one, or "". func PayloadType(i interface{}) string { v := reflect.Indirect(reflect.ValueOf(i)) if !v.IsValid() { return "" } if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok { if payloadName := field.Tag.Get("payload"); payloadName != "" { if member, ok := v.Type().FieldByName(payloadName); ok { return member.Tag.Get("type") } } } return "" } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/internal/protocol/rest/unmarshal.go000066400000000000000000000111361267010174400307130ustar00rootroot00000000000000package rest import ( "encoding/base64" "fmt" "io/ioutil" "net/http" "reflect" "strconv" "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" ) // Unmarshal unmarshals the REST component of a response in a REST service. func Unmarshal(r *request.Request) { if r.DataFilled() { v := reflect.Indirect(reflect.ValueOf(r.Data)) unmarshalBody(r, v) } } // UnmarshalMeta unmarshals the REST metadata of a response in a REST service func UnmarshalMeta(r *request.Request) { r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") if r.DataFilled() { v := reflect.Indirect(reflect.ValueOf(r.Data)) unmarshalLocationElements(r, v) } } func unmarshalBody(r *request.Request, v reflect.Value) { if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok { if payloadName := field.Tag.Get("payload"); payloadName != "" { pfield, _ := v.Type().FieldByName(payloadName) if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { payload := v.FieldByName(payloadName) if payload.IsValid() { switch payload.Interface().(type) { case []byte: b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { r.Error = awserr.New("SerializationError", "failed to decode REST response", err) } else { payload.Set(reflect.ValueOf(b)) } case *string: b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { r.Error = awserr.New("SerializationError", "failed to decode REST response", err) } else { str := string(b) payload.Set(reflect.ValueOf(&str)) } default: switch payload.Type().String() { case "io.ReadSeeker": payload.Set(reflect.ValueOf(aws.ReadSeekCloser(r.HTTPResponse.Body))) case "aws.ReadSeekCloser", "io.ReadCloser": payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) default: r.Error = awserr.New("SerializationError", "failed to decode REST response", fmt.Errorf("unknown payload type %s", payload.Type())) } } } } } } } func unmarshalLocationElements(r *request.Request, v reflect.Value) { for i := 0; i < v.NumField(); i++ { m, field := v.Field(i), v.Type().Field(i) if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { continue } if m.IsValid() { name := field.Tag.Get("locationName") if name == "" { name = field.Name } switch field.Tag.Get("location") { case "statusCode": unmarshalStatusCode(m, r.HTTPResponse.StatusCode) case "header": err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name)) if err != nil { r.Error = awserr.New("SerializationError", "failed to decode REST response", err) break } case "headers": prefix := field.Tag.Get("locationName") err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) if err != nil { r.Error = awserr.New("SerializationError", "failed to decode REST response", err) break } } } if r.Error != nil { return } } } func unmarshalStatusCode(v reflect.Value, statusCode int) { if !v.IsValid() { return } switch v.Interface().(type) { case *int64: s := int64(statusCode) v.Set(reflect.ValueOf(&s)) } } func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { switch r.Interface().(type) { case map[string]*string: // we only support string map value types out := map[string]*string{} for k, v := range headers { k = http.CanonicalHeaderKey(k) if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { out[k[len(prefix):]] = &v[0] } } r.Set(reflect.ValueOf(out)) } return nil } func unmarshalHeader(v reflect.Value, header string) error { if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { return nil } switch v.Interface().(type) { case *string: v.Set(reflect.ValueOf(&header)) case []byte: b, err := base64.StdEncoding.DecodeString(header) if err != nil { return err } v.Set(reflect.ValueOf(&b)) case *bool: b, err := strconv.ParseBool(header) if err != nil { return err } v.Set(reflect.ValueOf(&b)) case *int64: i, err := strconv.ParseInt(header, 10, 64) if err != nil { return err } v.Set(reflect.ValueOf(&i)) case *float64: f, err := strconv.ParseFloat(header, 64) if err != nil { return err } v.Set(reflect.ValueOf(&f)) case *time.Time: t, err := time.Parse(RFC822, header) if err != nil { return err } v.Set(reflect.ValueOf(&t)) default: err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) return err } return nil } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/internal/signer/000077500000000000000000000000001267010174400250415ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/internal/signer/v4/000077500000000000000000000000001267010174400253725ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4.go000066400000000000000000000220751267010174400262600ustar00rootroot00000000000000// Package v4 implements signing for AWS V4 signer package v4 import ( "crypto/hmac" "crypto/sha256" "encoding/hex" "fmt" "io" "net/http" "net/url" "sort" "strconv" "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/internal/protocol/rest" ) const ( authHeaderPrefix = "AWS4-HMAC-SHA256" timeFormat = "20060102T150405Z" shortTimeFormat = "20060102" ) var ignoredHeaders = map[string]bool{ "Authorization": true, "Content-Type": true, "Content-Length": true, "User-Agent": true, } type signer struct { Request *http.Request Time time.Time ExpireTime time.Duration ServiceName string Region string CredValues credentials.Value Credentials *credentials.Credentials Query url.Values Body io.ReadSeeker Debug aws.LogLevelType Logger aws.Logger isPresign bool formattedTime string formattedShortTime string signedHeaders string canonicalHeaders string canonicalString string credentialString string stringToSign string signature string authorization string } // Sign requests with signature version 4. // // Will sign the requests with the service config's Credentials object // Signing is skipped if the credentials is the credentials.AnonymousCredentials // object. func Sign(req *request.Request) { // If the request does not need to be signed ignore the signing of the // request if the AnonymousCredentials object is used. if req.Service.Config.Credentials == credentials.AnonymousCredentials { return } region := req.Service.SigningRegion if region == "" { region = aws.StringValue(req.Service.Config.Region) } name := req.Service.SigningName if name == "" { name = req.Service.ServiceName } s := signer{ Request: req.HTTPRequest, Time: req.Time, ExpireTime: req.ExpireTime, Query: req.HTTPRequest.URL.Query(), Body: req.Body, ServiceName: name, Region: region, Credentials: req.Service.Config.Credentials, Debug: req.Service.Config.LogLevel.Value(), Logger: req.Service.Config.Logger, } req.Error = s.sign() } func (v4 *signer) sign() error { if v4.ExpireTime != 0 { v4.isPresign = true } if v4.isRequestSigned() { if !v4.Credentials.IsExpired() { // If the request is already signed, and the credentials have not // expired yet ignore the signing request. return nil } // The credentials have expired for this request. The current signing // is invalid, and needs to be request because the request will fail. if v4.isPresign { v4.removePresign() // Update the request's query string to ensure the values stays in // sync in the case retrieving the new credentials fails. v4.Request.URL.RawQuery = v4.Query.Encode() } } var err error v4.CredValues, err = v4.Credentials.Get() if err != nil { return err } if v4.isPresign { v4.Query.Set("X-Amz-Algorithm", authHeaderPrefix) if v4.CredValues.SessionToken != "" { v4.Query.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) } else { v4.Query.Del("X-Amz-Security-Token") } } else if v4.CredValues.SessionToken != "" { v4.Request.Header.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) } v4.build() if v4.Debug.Matches(aws.LogDebugWithSigning) { v4.logSigningInfo() } return nil } const logSignInfoMsg = `DEBUG: Request Signiture: ---[ CANONICAL STRING ]----------------------------- %s ---[ STRING TO SIGN ]-------------------------------- %s%s -----------------------------------------------------` const logSignedURLMsg = ` ---[ SIGNED URL ]------------------------------------ %s` func (v4 *signer) logSigningInfo() { signedURLMsg := "" if v4.isPresign { signedURLMsg = fmt.Sprintf(logSignedURLMsg, v4.Request.URL.String()) } msg := fmt.Sprintf(logSignInfoMsg, v4.canonicalString, v4.stringToSign, signedURLMsg) v4.Logger.Log(msg) } func (v4 *signer) build() { v4.buildTime() // no depends v4.buildCredentialString() // no depends if v4.isPresign { v4.buildQuery() // no depends } v4.buildCanonicalHeaders() // depends on cred string v4.buildCanonicalString() // depends on canon headers / signed headers v4.buildStringToSign() // depends on canon string v4.buildSignature() // depends on string to sign if v4.isPresign { v4.Request.URL.RawQuery += "&X-Amz-Signature=" + v4.signature } else { parts := []string{ authHeaderPrefix + " Credential=" + v4.CredValues.AccessKeyID + "/" + v4.credentialString, "SignedHeaders=" + v4.signedHeaders, "Signature=" + v4.signature, } v4.Request.Header.Set("Authorization", strings.Join(parts, ", ")) } } func (v4 *signer) buildTime() { v4.formattedTime = v4.Time.UTC().Format(timeFormat) v4.formattedShortTime = v4.Time.UTC().Format(shortTimeFormat) if v4.isPresign { duration := int64(v4.ExpireTime / time.Second) v4.Query.Set("X-Amz-Date", v4.formattedTime) v4.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) } else { v4.Request.Header.Set("X-Amz-Date", v4.formattedTime) } } func (v4 *signer) buildCredentialString() { v4.credentialString = strings.Join([]string{ v4.formattedShortTime, v4.Region, v4.ServiceName, "aws4_request", }, "/") if v4.isPresign { v4.Query.Set("X-Amz-Credential", v4.CredValues.AccessKeyID+"/"+v4.credentialString) } } func (v4 *signer) buildQuery() { for k, h := range v4.Request.Header { if strings.HasPrefix(http.CanonicalHeaderKey(k), "X-Amz-") { continue // never hoist x-amz-* headers, they must be signed } if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { continue // never hoist ignored headers } v4.Request.Header.Del(k) v4.Query.Del(k) for _, v := range h { v4.Query.Add(k, v) } } } func (v4 *signer) buildCanonicalHeaders() { var headers []string headers = append(headers, "host") for k := range v4.Request.Header { if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { continue // ignored header } headers = append(headers, strings.ToLower(k)) } sort.Strings(headers) v4.signedHeaders = strings.Join(headers, ";") if v4.isPresign { v4.Query.Set("X-Amz-SignedHeaders", v4.signedHeaders) } headerValues := make([]string, len(headers)) for i, k := range headers { if k == "host" { headerValues[i] = "host:" + v4.Request.URL.Host } else { headerValues[i] = k + ":" + strings.Join(v4.Request.Header[http.CanonicalHeaderKey(k)], ",") } } v4.canonicalHeaders = strings.Join(headerValues, "\n") } func (v4 *signer) buildCanonicalString() { v4.Request.URL.RawQuery = strings.Replace(v4.Query.Encode(), "+", "%20", -1) uri := v4.Request.URL.Opaque if uri != "" { uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/") } else { uri = v4.Request.URL.Path } if uri == "" { uri = "/" } if v4.ServiceName != "s3" { uri = rest.EscapePath(uri, false) } v4.canonicalString = strings.Join([]string{ v4.Request.Method, uri, v4.Request.URL.RawQuery, v4.canonicalHeaders + "\n", v4.signedHeaders, v4.bodyDigest(), }, "\n") } func (v4 *signer) buildStringToSign() { v4.stringToSign = strings.Join([]string{ authHeaderPrefix, v4.formattedTime, v4.credentialString, hex.EncodeToString(makeSha256([]byte(v4.canonicalString))), }, "\n") } func (v4 *signer) buildSignature() { secret := v4.CredValues.SecretAccessKey date := makeHmac([]byte("AWS4"+secret), []byte(v4.formattedShortTime)) region := makeHmac(date, []byte(v4.Region)) service := makeHmac(region, []byte(v4.ServiceName)) credentials := makeHmac(service, []byte("aws4_request")) signature := makeHmac(credentials, []byte(v4.stringToSign)) v4.signature = hex.EncodeToString(signature) } func (v4 *signer) bodyDigest() string { hash := v4.Request.Header.Get("X-Amz-Content-Sha256") if hash == "" { if v4.isPresign && v4.ServiceName == "s3" { hash = "UNSIGNED-PAYLOAD" } else if v4.Body == nil { hash = hex.EncodeToString(makeSha256([]byte{})) } else { hash = hex.EncodeToString(makeSha256Reader(v4.Body)) } v4.Request.Header.Add("X-Amz-Content-Sha256", hash) } return hash } // isRequestSigned returns if the request is currently signed or presigned func (v4 *signer) isRequestSigned() bool { if v4.isPresign && v4.Query.Get("X-Amz-Signature") != "" { return true } if v4.Request.Header.Get("Authorization") != "" { return true } return false } // unsign removes signing flags for both signed and presigned requests. func (v4 *signer) removePresign() { v4.Query.Del("X-Amz-Algorithm") v4.Query.Del("X-Amz-Signature") v4.Query.Del("X-Amz-Security-Token") v4.Query.Del("X-Amz-Date") v4.Query.Del("X-Amz-Expires") v4.Query.Del("X-Amz-Credential") v4.Query.Del("X-Amz-SignedHeaders") } func makeHmac(key []byte, data []byte) []byte { hash := hmac.New(sha256.New, key) hash.Write(data) return hash.Sum(nil) } func makeSha256(data []byte) []byte { hash := sha256.New() hash.Write(data) return hash.Sum(nil) } func makeSha256Reader(reader io.ReadSeeker) []byte { hash := sha256.New() start, _ := reader.Seek(0, 1) defer reader.Seek(start, 0) io.Copy(hash, reader) return hash.Sum(nil) } docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/service/000077500000000000000000000000001267010174400233765ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/000077500000000000000000000000001267010174400264205ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go000066400000000000000000002567731267010174400275440ustar00rootroot00000000000000// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. // Package cloudwatchlogs provides a client for Amazon CloudWatch Logs. package cloudwatchlogs import ( "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" ) const opCancelExportTask = "CancelExportTask" // CancelExportTaskRequest generates a request for the CancelExportTask operation. func (c *CloudWatchLogs) CancelExportTaskRequest(input *CancelExportTaskInput) (req *request.Request, output *CancelExportTaskOutput) { op := &request.Operation{ Name: opCancelExportTask, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &CancelExportTaskInput{} } req = c.newRequest(op, input, output) output = &CancelExportTaskOutput{} req.Data = output return } // Cancels an export task if it is in PENDING or RUNNING state. func (c *CloudWatchLogs) CancelExportTask(input *CancelExportTaskInput) (*CancelExportTaskOutput, error) { req, out := c.CancelExportTaskRequest(input) err := req.Send() return out, err } const opCreateExportTask = "CreateExportTask" // CreateExportTaskRequest generates a request for the CreateExportTask operation. func (c *CloudWatchLogs) CreateExportTaskRequest(input *CreateExportTaskInput) (req *request.Request, output *CreateExportTaskOutput) { op := &request.Operation{ Name: opCreateExportTask, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &CreateExportTaskInput{} } req = c.newRequest(op, input, output) output = &CreateExportTaskOutput{} req.Data = output return } // Creates an ExportTask which allows you to efficiently export data from a // Log Group to your Amazon S3 bucket. // // This is an asynchronous call. If all the required information is provided, // this API will initiate an export task and respond with the task Id. Once // started, DescribeExportTasks can be used to get the status of an export task. func (c *CloudWatchLogs) CreateExportTask(input *CreateExportTaskInput) (*CreateExportTaskOutput, error) { req, out := c.CreateExportTaskRequest(input) err := req.Send() return out, err } const opCreateLogGroup = "CreateLogGroup" // CreateLogGroupRequest generates a request for the CreateLogGroup operation. func (c *CloudWatchLogs) CreateLogGroupRequest(input *CreateLogGroupInput) (req *request.Request, output *CreateLogGroupOutput) { op := &request.Operation{ Name: opCreateLogGroup, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &CreateLogGroupInput{} } req = c.newRequest(op, input, output) output = &CreateLogGroupOutput{} req.Data = output return } // Creates a new log group with the specified name. The name of the log group // must be unique within a region for an AWS account. You can create up to 500 // log groups per account. // // You must use the following guidelines when naming a log group: Log group // names can be between 1 and 512 characters long. Allowed characters are a-z, // A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), and '.' (period). func (c *CloudWatchLogs) CreateLogGroup(input *CreateLogGroupInput) (*CreateLogGroupOutput, error) { req, out := c.CreateLogGroupRequest(input) err := req.Send() return out, err } const opCreateLogStream = "CreateLogStream" // CreateLogStreamRequest generates a request for the CreateLogStream operation. func (c *CloudWatchLogs) CreateLogStreamRequest(input *CreateLogStreamInput) (req *request.Request, output *CreateLogStreamOutput) { op := &request.Operation{ Name: opCreateLogStream, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &CreateLogStreamInput{} } req = c.newRequest(op, input, output) output = &CreateLogStreamOutput{} req.Data = output return } // Creates a new log stream in the specified log group. The name of the log // stream must be unique within the log group. There is no limit on the number // of log streams that can exist in a log group. // // You must use the following guidelines when naming a log stream: Log stream // names can be between 1 and 512 characters long. The ':' colon character is // not allowed. func (c *CloudWatchLogs) CreateLogStream(input *CreateLogStreamInput) (*CreateLogStreamOutput, error) { req, out := c.CreateLogStreamRequest(input) err := req.Send() return out, err } const opDeleteDestination = "DeleteDestination" // DeleteDestinationRequest generates a request for the DeleteDestination operation. func (c *CloudWatchLogs) DeleteDestinationRequest(input *DeleteDestinationInput) (req *request.Request, output *DeleteDestinationOutput) { op := &request.Operation{ Name: opDeleteDestination, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DeleteDestinationInput{} } req = c.newRequest(op, input, output) output = &DeleteDestinationOutput{} req.Data = output return } // Deletes the destination with the specified name and eventually disables all // the subscription filters that publish to it. This will not delete the physical // resource encapsulated by the destination. func (c *CloudWatchLogs) DeleteDestination(input *DeleteDestinationInput) (*DeleteDestinationOutput, error) { req, out := c.DeleteDestinationRequest(input) err := req.Send() return out, err } const opDeleteLogGroup = "DeleteLogGroup" // DeleteLogGroupRequest generates a request for the DeleteLogGroup operation. func (c *CloudWatchLogs) DeleteLogGroupRequest(input *DeleteLogGroupInput) (req *request.Request, output *DeleteLogGroupOutput) { op := &request.Operation{ Name: opDeleteLogGroup, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DeleteLogGroupInput{} } req = c.newRequest(op, input, output) output = &DeleteLogGroupOutput{} req.Data = output return } // Deletes the log group with the specified name and permanently deletes all // the archived log events associated with it. func (c *CloudWatchLogs) DeleteLogGroup(input *DeleteLogGroupInput) (*DeleteLogGroupOutput, error) { req, out := c.DeleteLogGroupRequest(input) err := req.Send() return out, err } const opDeleteLogStream = "DeleteLogStream" // DeleteLogStreamRequest generates a request for the DeleteLogStream operation. func (c *CloudWatchLogs) DeleteLogStreamRequest(input *DeleteLogStreamInput) (req *request.Request, output *DeleteLogStreamOutput) { op := &request.Operation{ Name: opDeleteLogStream, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DeleteLogStreamInput{} } req = c.newRequest(op, input, output) output = &DeleteLogStreamOutput{} req.Data = output return } // Deletes a log stream and permanently deletes all the archived log events // associated with it. func (c *CloudWatchLogs) DeleteLogStream(input *DeleteLogStreamInput) (*DeleteLogStreamOutput, error) { req, out := c.DeleteLogStreamRequest(input) err := req.Send() return out, err } const opDeleteMetricFilter = "DeleteMetricFilter" // DeleteMetricFilterRequest generates a request for the DeleteMetricFilter operation. func (c *CloudWatchLogs) DeleteMetricFilterRequest(input *DeleteMetricFilterInput) (req *request.Request, output *DeleteMetricFilterOutput) { op := &request.Operation{ Name: opDeleteMetricFilter, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DeleteMetricFilterInput{} } req = c.newRequest(op, input, output) output = &DeleteMetricFilterOutput{} req.Data = output return } // Deletes a metric filter associated with the specified log group. func (c *CloudWatchLogs) DeleteMetricFilter(input *DeleteMetricFilterInput) (*DeleteMetricFilterOutput, error) { req, out := c.DeleteMetricFilterRequest(input) err := req.Send() return out, err } const opDeleteRetentionPolicy = "DeleteRetentionPolicy" // DeleteRetentionPolicyRequest generates a request for the DeleteRetentionPolicy operation. func (c *CloudWatchLogs) DeleteRetentionPolicyRequest(input *DeleteRetentionPolicyInput) (req *request.Request, output *DeleteRetentionPolicyOutput) { op := &request.Operation{ Name: opDeleteRetentionPolicy, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DeleteRetentionPolicyInput{} } req = c.newRequest(op, input, output) output = &DeleteRetentionPolicyOutput{} req.Data = output return } // Deletes the retention policy of the specified log group. Log events would // not expire if they belong to log groups without a retention policy. func (c *CloudWatchLogs) DeleteRetentionPolicy(input *DeleteRetentionPolicyInput) (*DeleteRetentionPolicyOutput, error) { req, out := c.DeleteRetentionPolicyRequest(input) err := req.Send() return out, err } const opDeleteSubscriptionFilter = "DeleteSubscriptionFilter" // DeleteSubscriptionFilterRequest generates a request for the DeleteSubscriptionFilter operation. func (c *CloudWatchLogs) DeleteSubscriptionFilterRequest(input *DeleteSubscriptionFilterInput) (req *request.Request, output *DeleteSubscriptionFilterOutput) { op := &request.Operation{ Name: opDeleteSubscriptionFilter, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DeleteSubscriptionFilterInput{} } req = c.newRequest(op, input, output) output = &DeleteSubscriptionFilterOutput{} req.Data = output return } // Deletes a subscription filter associated with the specified log group. func (c *CloudWatchLogs) DeleteSubscriptionFilter(input *DeleteSubscriptionFilterInput) (*DeleteSubscriptionFilterOutput, error) { req, out := c.DeleteSubscriptionFilterRequest(input) err := req.Send() return out, err } const opDescribeDestinations = "DescribeDestinations" // DescribeDestinationsRequest generates a request for the DescribeDestinations operation. func (c *CloudWatchLogs) DescribeDestinationsRequest(input *DescribeDestinationsInput) (req *request.Request, output *DescribeDestinationsOutput) { op := &request.Operation{ Name: opDescribeDestinations, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ InputTokens: []string{"nextToken"}, OutputTokens: []string{"nextToken"}, LimitToken: "limit", TruncationToken: "", }, } if input == nil { input = &DescribeDestinationsInput{} } req = c.newRequest(op, input, output) output = &DescribeDestinationsOutput{} req.Data = output return } // Returns all the destinations that are associated with the AWS account making // the request. The list returned in the response is ASCII-sorted by destination // name. // // By default, this operation returns up to 50 destinations. If there are // more destinations to list, the response would contain a nextToken value in // the response body. You can also limit the number of destinations returned // in the response by specifying the limit parameter in the request. func (c *CloudWatchLogs) DescribeDestinations(input *DescribeDestinationsInput) (*DescribeDestinationsOutput, error) { req, out := c.DescribeDestinationsRequest(input) err := req.Send() return out, err } func (c *CloudWatchLogs) DescribeDestinationsPages(input *DescribeDestinationsInput, fn func(p *DescribeDestinationsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeDestinationsRequest(input) return page.EachPage(func(p interface{}, lastPage bool) bool { return fn(p.(*DescribeDestinationsOutput), lastPage) }) } const opDescribeExportTasks = "DescribeExportTasks" // DescribeExportTasksRequest generates a request for the DescribeExportTasks operation. func (c *CloudWatchLogs) DescribeExportTasksRequest(input *DescribeExportTasksInput) (req *request.Request, output *DescribeExportTasksOutput) { op := &request.Operation{ Name: opDescribeExportTasks, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DescribeExportTasksInput{} } req = c.newRequest(op, input, output) output = &DescribeExportTasksOutput{} req.Data = output return } // Returns all the export tasks that are associated with the AWS account making // the request. The export tasks can be filtered based on TaskId or TaskStatus. // // By default, this operation returns up to 50 export tasks that satisfy the // specified filters. If there are more export tasks to list, the response would // contain a nextToken value in the response body. You can also limit the number // of export tasks returned in the response by specifying the limit parameter // in the request. func (c *CloudWatchLogs) DescribeExportTasks(input *DescribeExportTasksInput) (*DescribeExportTasksOutput, error) { req, out := c.DescribeExportTasksRequest(input) err := req.Send() return out, err } const opDescribeLogGroups = "DescribeLogGroups" // DescribeLogGroupsRequest generates a request for the DescribeLogGroups operation. func (c *CloudWatchLogs) DescribeLogGroupsRequest(input *DescribeLogGroupsInput) (req *request.Request, output *DescribeLogGroupsOutput) { op := &request.Operation{ Name: opDescribeLogGroups, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ InputTokens: []string{"nextToken"}, OutputTokens: []string{"nextToken"}, LimitToken: "limit", TruncationToken: "", }, } if input == nil { input = &DescribeLogGroupsInput{} } req = c.newRequest(op, input, output) output = &DescribeLogGroupsOutput{} req.Data = output return } // Returns all the log groups that are associated with the AWS account making // the request. The list returned in the response is ASCII-sorted by log group // name. // // By default, this operation returns up to 50 log groups. If there are more // log groups to list, the response would contain a nextToken value in the response // body. You can also limit the number of log groups returned in the response // by specifying the limit parameter in the request. func (c *CloudWatchLogs) DescribeLogGroups(input *DescribeLogGroupsInput) (*DescribeLogGroupsOutput, error) { req, out := c.DescribeLogGroupsRequest(input) err := req.Send() return out, err } func (c *CloudWatchLogs) DescribeLogGroupsPages(input *DescribeLogGroupsInput, fn func(p *DescribeLogGroupsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeLogGroupsRequest(input) return page.EachPage(func(p interface{}, lastPage bool) bool { return fn(p.(*DescribeLogGroupsOutput), lastPage) }) } const opDescribeLogStreams = "DescribeLogStreams" // DescribeLogStreamsRequest generates a request for the DescribeLogStreams operation. func (c *CloudWatchLogs) DescribeLogStreamsRequest(input *DescribeLogStreamsInput) (req *request.Request, output *DescribeLogStreamsOutput) { op := &request.Operation{ Name: opDescribeLogStreams, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ InputTokens: []string{"nextToken"}, OutputTokens: []string{"nextToken"}, LimitToken: "limit", TruncationToken: "", }, } if input == nil { input = &DescribeLogStreamsInput{} } req = c.newRequest(op, input, output) output = &DescribeLogStreamsOutput{} req.Data = output return } // Returns all the log streams that are associated with the specified log group. // The list returned in the response is ASCII-sorted by log stream name. // // By default, this operation returns up to 50 log streams. If there are more // log streams to list, the response would contain a nextToken value in the // response body. You can also limit the number of log streams returned in the // response by specifying the limit parameter in the request. This operation // has a limit of five transactions per second, after which transactions are // throttled. func (c *CloudWatchLogs) DescribeLogStreams(input *DescribeLogStreamsInput) (*DescribeLogStreamsOutput, error) { req, out := c.DescribeLogStreamsRequest(input) err := req.Send() return out, err } func (c *CloudWatchLogs) DescribeLogStreamsPages(input *DescribeLogStreamsInput, fn func(p *DescribeLogStreamsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeLogStreamsRequest(input) return page.EachPage(func(p interface{}, lastPage bool) bool { return fn(p.(*DescribeLogStreamsOutput), lastPage) }) } const opDescribeMetricFilters = "DescribeMetricFilters" // DescribeMetricFiltersRequest generates a request for the DescribeMetricFilters operation. func (c *CloudWatchLogs) DescribeMetricFiltersRequest(input *DescribeMetricFiltersInput) (req *request.Request, output *DescribeMetricFiltersOutput) { op := &request.Operation{ Name: opDescribeMetricFilters, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ InputTokens: []string{"nextToken"}, OutputTokens: []string{"nextToken"}, LimitToken: "limit", TruncationToken: "", }, } if input == nil { input = &DescribeMetricFiltersInput{} } req = c.newRequest(op, input, output) output = &DescribeMetricFiltersOutput{} req.Data = output return } // Returns all the metrics filters associated with the specified log group. // The list returned in the response is ASCII-sorted by filter name. // // By default, this operation returns up to 50 metric filters. If there are // more metric filters to list, the response would contain a nextToken value // in the response body. You can also limit the number of metric filters returned // in the response by specifying the limit parameter in the request. func (c *CloudWatchLogs) DescribeMetricFilters(input *DescribeMetricFiltersInput) (*DescribeMetricFiltersOutput, error) { req, out := c.DescribeMetricFiltersRequest(input) err := req.Send() return out, err } func (c *CloudWatchLogs) DescribeMetricFiltersPages(input *DescribeMetricFiltersInput, fn func(p *DescribeMetricFiltersOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeMetricFiltersRequest(input) return page.EachPage(func(p interface{}, lastPage bool) bool { return fn(p.(*DescribeMetricFiltersOutput), lastPage) }) } const opDescribeSubscriptionFilters = "DescribeSubscriptionFilters" // DescribeSubscriptionFiltersRequest generates a request for the DescribeSubscriptionFilters operation. func (c *CloudWatchLogs) DescribeSubscriptionFiltersRequest(input *DescribeSubscriptionFiltersInput) (req *request.Request, output *DescribeSubscriptionFiltersOutput) { op := &request.Operation{ Name: opDescribeSubscriptionFilters, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ InputTokens: []string{"nextToken"}, OutputTokens: []string{"nextToken"}, LimitToken: "limit", TruncationToken: "", }, } if input == nil { input = &DescribeSubscriptionFiltersInput{} } req = c.newRequest(op, input, output) output = &DescribeSubscriptionFiltersOutput{} req.Data = output return } // Returns all the subscription filters associated with the specified log group. // The list returned in the response is ASCII-sorted by filter name. // // By default, this operation returns up to 50 subscription filters. If there // are more subscription filters to list, the response would contain a nextToken // value in the response body. You can also limit the number of subscription // filters returned in the response by specifying the limit parameter in the // request. func (c *CloudWatchLogs) DescribeSubscriptionFilters(input *DescribeSubscriptionFiltersInput) (*DescribeSubscriptionFiltersOutput, error) { req, out := c.DescribeSubscriptionFiltersRequest(input) err := req.Send() return out, err } func (c *CloudWatchLogs) DescribeSubscriptionFiltersPages(input *DescribeSubscriptionFiltersInput, fn func(p *DescribeSubscriptionFiltersOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeSubscriptionFiltersRequest(input) return page.EachPage(func(p interface{}, lastPage bool) bool { return fn(p.(*DescribeSubscriptionFiltersOutput), lastPage) }) } const opFilterLogEvents = "FilterLogEvents" // FilterLogEventsRequest generates a request for the FilterLogEvents operation. func (c *CloudWatchLogs) FilterLogEventsRequest(input *FilterLogEventsInput) (req *request.Request, output *FilterLogEventsOutput) { op := &request.Operation{ Name: opFilterLogEvents, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ InputTokens: []string{"nextToken"}, OutputTokens: []string{"nextToken"}, LimitToken: "limit", TruncationToken: "", }, } if input == nil { input = &FilterLogEventsInput{} } req = c.newRequest(op, input, output) output = &FilterLogEventsOutput{} req.Data = output return } // Retrieves log events, optionally filtered by a filter pattern from the specified // log group. You can provide an optional time range to filter the results on // the event timestamp. You can limit the streams searched to an explicit list // of logStreamNames. // // By default, this operation returns as much matching log events as can fit // in a response size of 1MB, up to 10,000 log events, or all the events found // within a time-bounded scan window. If the response includes a nextToken, // then there is more data to search, and the search can be resumed with a new // request providing the nextToken. The response will contain a list of searchedLogStreams // that contains information about which streams were searched in the request // and whether they have been searched completely or require further pagination. // The limit parameter in the request. can be used to specify the maximum number // of events to return in a page. func (c *CloudWatchLogs) FilterLogEvents(input *FilterLogEventsInput) (*FilterLogEventsOutput, error) { req, out := c.FilterLogEventsRequest(input) err := req.Send() return out, err } func (c *CloudWatchLogs) FilterLogEventsPages(input *FilterLogEventsInput, fn func(p *FilterLogEventsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.FilterLogEventsRequest(input) return page.EachPage(func(p interface{}, lastPage bool) bool { return fn(p.(*FilterLogEventsOutput), lastPage) }) } const opGetLogEvents = "GetLogEvents" // GetLogEventsRequest generates a request for the GetLogEvents operation. func (c *CloudWatchLogs) GetLogEventsRequest(input *GetLogEventsInput) (req *request.Request, output *GetLogEventsOutput) { op := &request.Operation{ Name: opGetLogEvents, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ InputTokens: []string{"nextToken"}, OutputTokens: []string{"nextForwardToken"}, LimitToken: "limit", TruncationToken: "", }, } if input == nil { input = &GetLogEventsInput{} } req = c.newRequest(op, input, output) output = &GetLogEventsOutput{} req.Data = output return } // Retrieves log events from the specified log stream. You can provide an optional // time range to filter the results on the event timestamp. // // By default, this operation returns as much log events as can fit in a response // size of 1MB, up to 10,000 log events. The response will always include a // nextForwardToken and a nextBackwardToken in the response body. You can use // any of these tokens in subsequent GetLogEvents requests to paginate through // events in either forward or backward direction. You can also limit the number // of log events returned in the response by specifying the limit parameter // in the request. func (c *CloudWatchLogs) GetLogEvents(input *GetLogEventsInput) (*GetLogEventsOutput, error) { req, out := c.GetLogEventsRequest(input) err := req.Send() return out, err } func (c *CloudWatchLogs) GetLogEventsPages(input *GetLogEventsInput, fn func(p *GetLogEventsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.GetLogEventsRequest(input) return page.EachPage(func(p interface{}, lastPage bool) bool { return fn(p.(*GetLogEventsOutput), lastPage) }) } const opPutDestination = "PutDestination" // PutDestinationRequest generates a request for the PutDestination operation. func (c *CloudWatchLogs) PutDestinationRequest(input *PutDestinationInput) (req *request.Request, output *PutDestinationOutput) { op := &request.Operation{ Name: opPutDestination, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &PutDestinationInput{} } req = c.newRequest(op, input, output) output = &PutDestinationOutput{} req.Data = output return } // Creates or updates a Destination. A destination encapsulates a physical resource // (such as a Kinesis stream) and allows you to subscribe to a real-time stream // of log events of a different account, ingested through PutLogEvents requests. // Currently, the only supported physical resource is a Amazon Kinesis stream // belonging to the same account as the destination. // // A destination controls what is written to its Amazon Kinesis stream through // an access policy. By default, PutDestination does not set any access policy // with the destination, which means a cross-account user will not be able to // call PutSubscriptionFilter against this destination. To enable that, the // destination owner must call PutDestinationPolicy after PutDestination. func (c *CloudWatchLogs) PutDestination(input *PutDestinationInput) (*PutDestinationOutput, error) { req, out := c.PutDestinationRequest(input) err := req.Send() return out, err } const opPutDestinationPolicy = "PutDestinationPolicy" // PutDestinationPolicyRequest generates a request for the PutDestinationPolicy operation. func (c *CloudWatchLogs) PutDestinationPolicyRequest(input *PutDestinationPolicyInput) (req *request.Request, output *PutDestinationPolicyOutput) { op := &request.Operation{ Name: opPutDestinationPolicy, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &PutDestinationPolicyInput{} } req = c.newRequest(op, input, output) output = &PutDestinationPolicyOutput{} req.Data = output return } // Creates or updates an access policy associated with an existing Destination. // An access policy is an IAM policy document (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies_overview.html) // that is used to authorize claims to register a subscription filter against // a given destination. func (c *CloudWatchLogs) PutDestinationPolicy(input *PutDestinationPolicyInput) (*PutDestinationPolicyOutput, error) { req, out := c.PutDestinationPolicyRequest(input) err := req.Send() return out, err } const opPutLogEvents = "PutLogEvents" // PutLogEventsRequest generates a request for the PutLogEvents operation. func (c *CloudWatchLogs) PutLogEventsRequest(input *PutLogEventsInput) (req *request.Request, output *PutLogEventsOutput) { op := &request.Operation{ Name: opPutLogEvents, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &PutLogEventsInput{} } req = c.newRequest(op, input, output) output = &PutLogEventsOutput{} req.Data = output return } // Uploads a batch of log events to the specified log stream. // // Every PutLogEvents request must include the sequenceToken obtained from // the response of the previous request. An upload in a newly created log stream // does not require a sequenceToken. // // The batch of events must satisfy the following constraints: The maximum // batch size is 1,048,576 bytes, and this size is calculated as the sum of // all event messages in UTF-8, plus 26 bytes for each log event. None of the // log events in the batch can be more than 2 hours in the future. None of the // log events in the batch can be older than 14 days or the retention period // of the log group. The log events in the batch must be in chronological ordered // by their timestamp. The maximum number of log events in a batch is 10,000. func (c *CloudWatchLogs) PutLogEvents(input *PutLogEventsInput) (*PutLogEventsOutput, error) { req, out := c.PutLogEventsRequest(input) err := req.Send() return out, err } const opPutMetricFilter = "PutMetricFilter" // PutMetricFilterRequest generates a request for the PutMetricFilter operation. func (c *CloudWatchLogs) PutMetricFilterRequest(input *PutMetricFilterInput) (req *request.Request, output *PutMetricFilterOutput) { op := &request.Operation{ Name: opPutMetricFilter, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &PutMetricFilterInput{} } req = c.newRequest(op, input, output) output = &PutMetricFilterOutput{} req.Data = output return } // Creates or updates a metric filter and associates it with the specified log // group. Metric filters allow you to configure rules to extract metric data // from log events ingested through PutLogEvents requests. // // The maximum number of metric filters that can be associated with a log // group is 100. func (c *CloudWatchLogs) PutMetricFilter(input *PutMetricFilterInput) (*PutMetricFilterOutput, error) { req, out := c.PutMetricFilterRequest(input) err := req.Send() return out, err } const opPutRetentionPolicy = "PutRetentionPolicy" // PutRetentionPolicyRequest generates a request for the PutRetentionPolicy operation. func (c *CloudWatchLogs) PutRetentionPolicyRequest(input *PutRetentionPolicyInput) (req *request.Request, output *PutRetentionPolicyOutput) { op := &request.Operation{ Name: opPutRetentionPolicy, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &PutRetentionPolicyInput{} } req = c.newRequest(op, input, output) output = &PutRetentionPolicyOutput{} req.Data = output return } // Sets the retention of the specified log group. A retention policy allows // you to configure the number of days you want to retain log events in the // specified log group. func (c *CloudWatchLogs) PutRetentionPolicy(input *PutRetentionPolicyInput) (*PutRetentionPolicyOutput, error) { req, out := c.PutRetentionPolicyRequest(input) err := req.Send() return out, err } const opPutSubscriptionFilter = "PutSubscriptionFilter" // PutSubscriptionFilterRequest generates a request for the PutSubscriptionFilter operation. func (c *CloudWatchLogs) PutSubscriptionFilterRequest(input *PutSubscriptionFilterInput) (req *request.Request, output *PutSubscriptionFilterOutput) { op := &request.Operation{ Name: opPutSubscriptionFilter, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &PutSubscriptionFilterInput{} } req = c.newRequest(op, input, output) output = &PutSubscriptionFilterOutput{} req.Data = output return } // Creates or updates a subscription filter and associates it with the specified // log group. Subscription filters allow you to subscribe to a real-time stream // of log events ingested through PutLogEvents requests and have them delivered // to a specific destination. Currently, the supported destinations are: A // Amazon Kinesis stream belonging to the same account as the subscription filter, // for same-account delivery. A logical destination (used via an ARN of Destination) // belonging to a different account, for cross-account delivery. // // Currently there can only be one subscription filter associated with a log // group. func (c *CloudWatchLogs) PutSubscriptionFilter(input *PutSubscriptionFilterInput) (*PutSubscriptionFilterOutput, error) { req, out := c.PutSubscriptionFilterRequest(input) err := req.Send() return out, err } const opTestMetricFilter = "TestMetricFilter" // TestMetricFilterRequest generates a request for the TestMetricFilter operation. func (c *CloudWatchLogs) TestMetricFilterRequest(input *TestMetricFilterInput) (req *request.Request, output *TestMetricFilterOutput) { op := &request.Operation{ Name: opTestMetricFilter, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &TestMetricFilterInput{} } req = c.newRequest(op, input, output) output = &TestMetricFilterOutput{} req.Data = output return } // Tests the filter pattern of a metric filter against a sample of log event // messages. You can use this operation to validate the correctness of a metric // filter pattern. func (c *CloudWatchLogs) TestMetricFilter(input *TestMetricFilterInput) (*TestMetricFilterOutput, error) { req, out := c.TestMetricFilterRequest(input) err := req.Send() return out, err } type CancelExportTaskInput struct { // Id of the export task to cancel. TaskId *string `locationName:"taskId" min:"1" type:"string" required:"true"` metadataCancelExportTaskInput `json:"-" xml:"-"` } type metadataCancelExportTaskInput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s CancelExportTaskInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CancelExportTaskInput) GoString() string { return s.String() } type CancelExportTaskOutput struct { metadataCancelExportTaskOutput `json:"-" xml:"-"` } type metadataCancelExportTaskOutput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s CancelExportTaskOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CancelExportTaskOutput) GoString() string { return s.String() } type CreateExportTaskInput struct { // Name of Amazon S3 bucket to which the log data will be exported. NOTE: Only // buckets in the same AWS region are supported Destination *string `locationName:"destination" min:"1" type:"string" required:"true"` // Prefix that will be used as the start of Amazon S3 key for every object exported. // If not specified, this defaults to 'exportedlogs'. DestinationPrefix *string `locationName:"destinationPrefix" type:"string"` // A unix timestamp indicating the start time of the range for the request. // Events with a timestamp prior to this time will not be exported. From *int64 `locationName:"from" type:"long" required:"true"` // The name of the log group to export. LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` // Will only export log streams that match the provided logStreamNamePrefix. // If you don't specify a value, no prefix filter is applied. LogStreamNamePrefix *string `locationName:"logStreamNamePrefix" min:"1" type:"string"` // The name of the export task. TaskName *string `locationName:"taskName" min:"1" type:"string"` // A unix timestamp indicating the end time of the range for the request. Events // with a timestamp later than this time will not be exported. To *int64 `locationName:"to" type:"long" required:"true"` metadataCreateExportTaskInput `json:"-" xml:"-"` } type metadataCreateExportTaskInput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s CreateExportTaskInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateExportTaskInput) GoString() string { return s.String() } type CreateExportTaskOutput struct { // Id of the export task that got created. TaskId *string `locationName:"taskId" min:"1" type:"string"` metadataCreateExportTaskOutput `json:"-" xml:"-"` } type metadataCreateExportTaskOutput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s CreateExportTaskOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateExportTaskOutput) GoString() string { return s.String() } type CreateLogGroupInput struct { // The name of the log group to create. LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` metadataCreateLogGroupInput `json:"-" xml:"-"` } type metadataCreateLogGroupInput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s CreateLogGroupInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateLogGroupInput) GoString() string { return s.String() } type CreateLogGroupOutput struct { metadataCreateLogGroupOutput `json:"-" xml:"-"` } type metadataCreateLogGroupOutput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s CreateLogGroupOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateLogGroupOutput) GoString() string { return s.String() } type CreateLogStreamInput struct { // The name of the log group under which the log stream is to be created. LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` // The name of the log stream to create. LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` metadataCreateLogStreamInput `json:"-" xml:"-"` } type metadataCreateLogStreamInput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s CreateLogStreamInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateLogStreamInput) GoString() string { return s.String() } type CreateLogStreamOutput struct { metadataCreateLogStreamOutput `json:"-" xml:"-"` } type metadataCreateLogStreamOutput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s CreateLogStreamOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateLogStreamOutput) GoString() string { return s.String() } type DeleteDestinationInput struct { // The name of destination to delete. DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` metadataDeleteDestinationInput `json:"-" xml:"-"` } type metadataDeleteDestinationInput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s DeleteDestinationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteDestinationInput) GoString() string { return s.String() } type DeleteDestinationOutput struct { metadataDeleteDestinationOutput `json:"-" xml:"-"` } type metadataDeleteDestinationOutput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s DeleteDestinationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteDestinationOutput) GoString() string { return s.String() } type DeleteLogGroupInput struct { // The name of the log group to delete. LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` metadataDeleteLogGroupInput `json:"-" xml:"-"` } type metadataDeleteLogGroupInput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s DeleteLogGroupInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteLogGroupInput) GoString() string { return s.String() } type DeleteLogGroupOutput struct { metadataDeleteLogGroupOutput `json:"-" xml:"-"` } type metadataDeleteLogGroupOutput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s DeleteLogGroupOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteLogGroupOutput) GoString() string { return s.String() } type DeleteLogStreamInput struct { // The name of the log group under which the log stream to delete belongs. LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` // The name of the log stream to delete. LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` metadataDeleteLogStreamInput `json:"-" xml:"-"` } type metadataDeleteLogStreamInput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s DeleteLogStreamInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteLogStreamInput) GoString() string { return s.String() } type DeleteLogStreamOutput struct { metadataDeleteLogStreamOutput `json:"-" xml:"-"` } type metadataDeleteLogStreamOutput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s DeleteLogStreamOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteLogStreamOutput) GoString() string { return s.String() } type DeleteMetricFilterInput struct { // The name of the metric filter to delete. FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` // The name of the log group that is associated with the metric filter to delete. LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` metadataDeleteMetricFilterInput `json:"-" xml:"-"` } type metadataDeleteMetricFilterInput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s DeleteMetricFilterInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteMetricFilterInput) GoString() string { return s.String() } type DeleteMetricFilterOutput struct { metadataDeleteMetricFilterOutput `json:"-" xml:"-"` } type metadataDeleteMetricFilterOutput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s DeleteMetricFilterOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteMetricFilterOutput) GoString() string { return s.String() } type DeleteRetentionPolicyInput struct { // The name of the log group that is associated with the retention policy to // delete. LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` metadataDeleteRetentionPolicyInput `json:"-" xml:"-"` } type metadataDeleteRetentionPolicyInput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s DeleteRetentionPolicyInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteRetentionPolicyInput) GoString() string { return s.String() } type DeleteRetentionPolicyOutput struct { metadataDeleteRetentionPolicyOutput `json:"-" xml:"-"` } type metadataDeleteRetentionPolicyOutput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s DeleteRetentionPolicyOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteRetentionPolicyOutput) GoString() string { return s.String() } type DeleteSubscriptionFilterInput struct { // The name of the subscription filter to delete. FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` // The name of the log group that is associated with the subscription filter // to delete. LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` metadataDeleteSubscriptionFilterInput `json:"-" xml:"-"` } type metadataDeleteSubscriptionFilterInput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s DeleteSubscriptionFilterInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteSubscriptionFilterInput) GoString() string { return s.String() } type DeleteSubscriptionFilterOutput struct { metadataDeleteSubscriptionFilterOutput `json:"-" xml:"-"` } type metadataDeleteSubscriptionFilterOutput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s DeleteSubscriptionFilterOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteSubscriptionFilterOutput) GoString() string { return s.String() } type DescribeDestinationsInput struct { // Will only return destinations that match the provided destinationNamePrefix. // If you don't specify a value, no prefix is applied. DestinationNamePrefix *string `min:"1" type:"string"` // The maximum number of results to return. Limit *int64 `locationName:"limit" min:"1" type:"integer"` // A string token used for pagination that points to the next page of results. // It must be a value obtained from the response of the previous request. The // token expires after 24 hours. NextToken *string `locationName:"nextToken" min:"1" type:"string"` metadataDescribeDestinationsInput `json:"-" xml:"-"` } type metadataDescribeDestinationsInput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s DescribeDestinationsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeDestinationsInput) GoString() string { return s.String() } type DescribeDestinationsOutput struct { Destinations []*Destination `locationName:"destinations" type:"list"` // A string token used for pagination that points to the next page of results. // It must be a value obtained from the response of the previous request. The // token expires after 24 hours. NextToken *string `locationName:"nextToken" min:"1" type:"string"` metadataDescribeDestinationsOutput `json:"-" xml:"-"` } type metadataDescribeDestinationsOutput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s DescribeDestinationsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeDestinationsOutput) GoString() string { return s.String() } type DescribeExportTasksInput struct { // The maximum number of items returned in the response. If you don't specify // a value, the request would return up to 50 items. Limit *int64 `locationName:"limit" min:"1" type:"integer"` // A string token used for pagination that points to the next page of results. // It must be a value obtained from the response of the previous DescribeExportTasks // request. NextToken *string `locationName:"nextToken" min:"1" type:"string"` // All export tasks that matches the specified status code will be returned. // This can return zero or more export tasks. StatusCode *string `locationName:"statusCode" type:"string" enum:"ExportTaskStatusCode"` // Export task that matches the specified task Id will be returned. This can // result in zero or one export task. TaskId *string `locationName:"taskId" min:"1" type:"string"` metadataDescribeExportTasksInput `json:"-" xml:"-"` } type metadataDescribeExportTasksInput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s DescribeExportTasksInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeExportTasksInput) GoString() string { return s.String() } type DescribeExportTasksOutput struct { // A list of export tasks. ExportTasks []*ExportTask `locationName:"exportTasks" type:"list"` // A string token used for pagination that points to the next page of results. // It must be a value obtained from the response of the previous request. The // token expires after 24 hours. NextToken *string `locationName:"nextToken" min:"1" type:"string"` metadataDescribeExportTasksOutput `json:"-" xml:"-"` } type metadataDescribeExportTasksOutput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s DescribeExportTasksOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeExportTasksOutput) GoString() string { return s.String() } type DescribeLogGroupsInput struct { // The maximum number of items returned in the response. If you don't specify // a value, the request would return up to 50 items. Limit *int64 `locationName:"limit" min:"1" type:"integer"` // Will only return log groups that match the provided logGroupNamePrefix. If // you don't specify a value, no prefix filter is applied. LogGroupNamePrefix *string `locationName:"logGroupNamePrefix" min:"1" type:"string"` // A string token used for pagination that points to the next page of results. // It must be a value obtained from the response of the previous DescribeLogGroups // request. NextToken *string `locationName:"nextToken" min:"1" type:"string"` metadataDescribeLogGroupsInput `json:"-" xml:"-"` } type metadataDescribeLogGroupsInput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s DescribeLogGroupsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeLogGroupsInput) GoString() string { return s.String() } type DescribeLogGroupsOutput struct { // A list of log groups. LogGroups []*LogGroup `locationName:"logGroups" type:"list"` // A string token used for pagination that points to the next page of results. // It must be a value obtained from the response of the previous request. The // token expires after 24 hours. NextToken *string `locationName:"nextToken" min:"1" type:"string"` metadataDescribeLogGroupsOutput `json:"-" xml:"-"` } type metadataDescribeLogGroupsOutput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s DescribeLogGroupsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeLogGroupsOutput) GoString() string { return s.String() } type DescribeLogStreamsInput struct { // If set to true, results are returned in descending order. If you don't specify // a value or set it to false, results are returned in ascending order. Descending *bool `locationName:"descending" type:"boolean"` // The maximum number of items returned in the response. If you don't specify // a value, the request would return up to 50 items. Limit *int64 `locationName:"limit" min:"1" type:"integer"` // The log group name for which log streams are to be listed. LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` // Will only return log streams that match the provided logStreamNamePrefix. // If you don't specify a value, no prefix filter is applied. LogStreamNamePrefix *string `locationName:"logStreamNamePrefix" min:"1" type:"string"` // A string token used for pagination that points to the next page of results. // It must be a value obtained from the response of the previous DescribeLogStreams // request. NextToken *string `locationName:"nextToken" min:"1" type:"string"` // Specifies what to order the returned log streams by. Valid arguments are // 'LogStreamName' or 'LastEventTime'. If you don't specify a value, results // are ordered by LogStreamName. If 'LastEventTime' is chosen, the request cannot // also contain a logStreamNamePrefix. OrderBy *string `locationName:"orderBy" type:"string" enum:"OrderBy"` metadataDescribeLogStreamsInput `json:"-" xml:"-"` } type metadataDescribeLogStreamsInput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s DescribeLogStreamsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeLogStreamsInput) GoString() string { return s.String() } type DescribeLogStreamsOutput struct { // A list of log streams. LogStreams []*LogStream `locationName:"logStreams" type:"list"` // A string token used for pagination that points to the next page of results. // It must be a value obtained from the response of the previous request. The // token expires after 24 hours. NextToken *string `locationName:"nextToken" min:"1" type:"string"` metadataDescribeLogStreamsOutput `json:"-" xml:"-"` } type metadataDescribeLogStreamsOutput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s DescribeLogStreamsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeLogStreamsOutput) GoString() string { return s.String() } type DescribeMetricFiltersInput struct { // Will only return metric filters that match the provided filterNamePrefix. // If you don't specify a value, no prefix filter is applied. FilterNamePrefix *string `locationName:"filterNamePrefix" min:"1" type:"string"` // The maximum number of items returned in the response. If you don't specify // a value, the request would return up to 50 items. Limit *int64 `locationName:"limit" min:"1" type:"integer"` // The log group name for which metric filters are to be listed. LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` // A string token used for pagination that points to the next page of results. // It must be a value obtained from the response of the previous DescribeMetricFilters // request. NextToken *string `locationName:"nextToken" min:"1" type:"string"` metadataDescribeMetricFiltersInput `json:"-" xml:"-"` } type metadataDescribeMetricFiltersInput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s DescribeMetricFiltersInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeMetricFiltersInput) GoString() string { return s.String() } type DescribeMetricFiltersOutput struct { MetricFilters []*MetricFilter `locationName:"metricFilters" type:"list"` // A string token used for pagination that points to the next page of results. // It must be a value obtained from the response of the previous request. The // token expires after 24 hours. NextToken *string `locationName:"nextToken" min:"1" type:"string"` metadataDescribeMetricFiltersOutput `json:"-" xml:"-"` } type metadataDescribeMetricFiltersOutput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s DescribeMetricFiltersOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeMetricFiltersOutput) GoString() string { return s.String() } type DescribeSubscriptionFiltersInput struct { // Will only return subscription filters that match the provided filterNamePrefix. // If you don't specify a value, no prefix filter is applied. FilterNamePrefix *string `locationName:"filterNamePrefix" min:"1" type:"string"` // The maximum number of results to return. Limit *int64 `locationName:"limit" min:"1" type:"integer"` // The log group name for which subscription filters are to be listed. LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` // A string token used for pagination that points to the next page of results. // It must be a value obtained from the response of the previous request. The // token expires after 24 hours. NextToken *string `locationName:"nextToken" min:"1" type:"string"` metadataDescribeSubscriptionFiltersInput `json:"-" xml:"-"` } type metadataDescribeSubscriptionFiltersInput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s DescribeSubscriptionFiltersInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeSubscriptionFiltersInput) GoString() string { return s.String() } type DescribeSubscriptionFiltersOutput struct { // A string token used for pagination that points to the next page of results. // It must be a value obtained from the response of the previous request. The // token expires after 24 hours. NextToken *string `locationName:"nextToken" min:"1" type:"string"` SubscriptionFilters []*SubscriptionFilter `locationName:"subscriptionFilters" type:"list"` metadataDescribeSubscriptionFiltersOutput `json:"-" xml:"-"` } type metadataDescribeSubscriptionFiltersOutput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s DescribeSubscriptionFiltersOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeSubscriptionFiltersOutput) GoString() string { return s.String() } // A cross account destination that is the recipient of subscription log events. type Destination struct { // An IAM policy document that governs which AWS accounts can create subscription // filters against this destination. AccessPolicy *string `locationName:"accessPolicy" min:"1" type:"string"` // ARN of this destination. Arn *string `locationName:"arn" type:"string"` // A point in time expressed as the number of milliseconds since Jan 1, 1970 // 00:00:00 UTC specifying when this destination was created. CreationTime *int64 `locationName:"creationTime" type:"long"` // Name of the destination. DestinationName *string `locationName:"destinationName" min:"1" type:"string"` // A role for impersonation for delivering log events to the target. RoleArn *string `locationName:"roleArn" min:"1" type:"string"` // ARN of the physical target where the log events will be delivered (eg. ARN // of a Kinesis stream). TargetArn *string `locationName:"targetArn" min:"1" type:"string"` metadataDestination `json:"-" xml:"-"` } type metadataDestination struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s Destination) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Destination) GoString() string { return s.String() } // Represents an export task. type ExportTask struct { // Name of Amazon S3 bucket to which the log data was exported. Destination *string `locationName:"destination" min:"1" type:"string"` // Prefix that was used as the start of Amazon S3 key for every object exported. DestinationPrefix *string `locationName:"destinationPrefix" type:"string"` // Execution info about the export task. ExecutionInfo *ExportTaskExecutionInfo `locationName:"executionInfo" type:"structure"` // A unix timestamp indicating the start time of the range for the request. // Events with a timestamp prior to this time were not exported. From *int64 `locationName:"from" type:"long"` // The name of the log group from which logs data was exported. LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` // Status of the export task. Status *ExportTaskStatus `locationName:"status" type:"structure"` // Id of the export task. TaskId *string `locationName:"taskId" min:"1" type:"string"` // The name of the export task. TaskName *string `locationName:"taskName" min:"1" type:"string"` // A unix timestamp indicating the end time of the range for the request. Events // with a timestamp later than this time were not exported. To *int64 `locationName:"to" type:"long"` metadataExportTask `json:"-" xml:"-"` } type metadataExportTask struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s ExportTask) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ExportTask) GoString() string { return s.String() } // Represents the status of an export task. type ExportTaskExecutionInfo struct { // A point in time when the export task got completed. CompletionTime *int64 `locationName:"completionTime" type:"long"` // A point in time when the export task got created. CreationTime *int64 `locationName:"creationTime" type:"long"` metadataExportTaskExecutionInfo `json:"-" xml:"-"` } type metadataExportTaskExecutionInfo struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s ExportTaskExecutionInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ExportTaskExecutionInfo) GoString() string { return s.String() } // Represents the status of an export task. type ExportTaskStatus struct { // Status code of the export task. Code *string `locationName:"code" type:"string" enum:"ExportTaskStatusCode"` // Status message related to the code. Message *string `locationName:"message" type:"string"` metadataExportTaskStatus `json:"-" xml:"-"` } type metadataExportTaskStatus struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s ExportTaskStatus) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ExportTaskStatus) GoString() string { return s.String() } type FilterLogEventsInput struct { // A unix timestamp indicating the end time of the range for the request. If // provided, events with a timestamp later than this time will not be returned. EndTime *int64 `locationName:"endTime" type:"long"` // A valid CloudWatch Logs filter pattern to use for filtering the response. // If not provided, all the events are matched. FilterPattern *string `locationName:"filterPattern" type:"string"` // If provided, the API will make a best effort to provide responses that contain // events from multiple log streams within the log group interleaved in a single // response. If not provided, all the matched log events in the first log stream // will be searched first, then those in the next log stream, etc. Interleaved *bool `locationName:"interleaved" type:"boolean"` // The maximum number of events to return in a page of results. Default is 10,000 // events. Limit *int64 `locationName:"limit" min:"1" type:"integer"` // The name of the log group to query. LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` // Optional list of log stream names within the specified log group to search. // Defaults to all the log streams in the log group. LogStreamNames []*string `locationName:"logStreamNames" min:"1" type:"list"` // A pagination token obtained from a FilterLogEvents response to continue paginating // the FilterLogEvents results. NextToken *string `locationName:"nextToken" min:"1" type:"string"` // A unix timestamp indicating the start time of the range for the request. // If provided, events with a timestamp prior to this time will not be returned. StartTime *int64 `locationName:"startTime" type:"long"` metadataFilterLogEventsInput `json:"-" xml:"-"` } type metadataFilterLogEventsInput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s FilterLogEventsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s FilterLogEventsInput) GoString() string { return s.String() } type FilterLogEventsOutput struct { // A list of FilteredLogEvent objects representing the matched events from the // request. Events []*FilteredLogEvent `locationName:"events" type:"list"` // A pagination token obtained from a FilterLogEvents response to continue paginating // the FilterLogEvents results. NextToken *string `locationName:"nextToken" min:"1" type:"string"` // A list of SearchedLogStream objects indicating which log streams have been // searched in this request and whether each has been searched completely or // still has more to be paginated. SearchedLogStreams []*SearchedLogStream `locationName:"searchedLogStreams" type:"list"` metadataFilterLogEventsOutput `json:"-" xml:"-"` } type metadataFilterLogEventsOutput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s FilterLogEventsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s FilterLogEventsOutput) GoString() string { return s.String() } // Represents a matched event from a FilterLogEvents request. type FilteredLogEvent struct { // A unique identifier for this event. EventId *string `locationName:"eventId" type:"string"` // A point in time expressed as the number of milliseconds since Jan 1, 1970 // 00:00:00 UTC. IngestionTime *int64 `locationName:"ingestionTime" type:"long"` // The name of the log stream this event belongs to. LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` // The data contained in the log event. Message *string `locationName:"message" min:"1" type:"string"` // A point in time expressed as the number of milliseconds since Jan 1, 1970 // 00:00:00 UTC. Timestamp *int64 `locationName:"timestamp" type:"long"` metadataFilteredLogEvent `json:"-" xml:"-"` } type metadataFilteredLogEvent struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s FilteredLogEvent) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s FilteredLogEvent) GoString() string { return s.String() } type GetLogEventsInput struct { // A point in time expressed as the number of milliseconds since Jan 1, 1970 // 00:00:00 UTC. EndTime *int64 `locationName:"endTime" type:"long"` // The maximum number of log events returned in the response. If you don't specify // a value, the request would return as many log events as can fit in a response // size of 1MB, up to 10,000 log events. Limit *int64 `locationName:"limit" min:"1" type:"integer"` // The name of the log group to query. LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` // The name of the log stream to query. LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` // A string token used for pagination that points to the next page of results. // It must be a value obtained from the nextForwardToken or nextBackwardToken // fields in the response of the previous GetLogEvents request. NextToken *string `locationName:"nextToken" min:"1" type:"string"` // If set to true, the earliest log events would be returned first. The default // is false (the latest log events are returned first). StartFromHead *bool `locationName:"startFromHead" type:"boolean"` // A point in time expressed as the number of milliseconds since Jan 1, 1970 // 00:00:00 UTC. StartTime *int64 `locationName:"startTime" type:"long"` metadataGetLogEventsInput `json:"-" xml:"-"` } type metadataGetLogEventsInput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s GetLogEventsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s GetLogEventsInput) GoString() string { return s.String() } type GetLogEventsOutput struct { Events []*OutputLogEvent `locationName:"events" type:"list"` // A string token used for pagination that points to the next page of results. // It must be a value obtained from the response of the previous request. The // token expires after 24 hours. NextBackwardToken *string `locationName:"nextBackwardToken" min:"1" type:"string"` // A string token used for pagination that points to the next page of results. // It must be a value obtained from the response of the previous request. The // token expires after 24 hours. NextForwardToken *string `locationName:"nextForwardToken" min:"1" type:"string"` metadataGetLogEventsOutput `json:"-" xml:"-"` } type metadataGetLogEventsOutput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s GetLogEventsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s GetLogEventsOutput) GoString() string { return s.String() } // A log event is a record of some activity that was recorded by the application // or resource being monitored. The log event record that Amazon CloudWatch // Logs understands contains two properties: the timestamp of when the event // occurred, and the raw event message. type InputLogEvent struct { Message *string `locationName:"message" min:"1" type:"string" required:"true"` // A point in time expressed as the number of milliseconds since Jan 1, 1970 // 00:00:00 UTC. Timestamp *int64 `locationName:"timestamp" type:"long" required:"true"` metadataInputLogEvent `json:"-" xml:"-"` } type metadataInputLogEvent struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s InputLogEvent) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s InputLogEvent) GoString() string { return s.String() } type LogGroup struct { Arn *string `locationName:"arn" type:"string"` // A point in time expressed as the number of milliseconds since Jan 1, 1970 // 00:00:00 UTC. CreationTime *int64 `locationName:"creationTime" type:"long"` LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` // The number of metric filters associated with the log group. MetricFilterCount *int64 `locationName:"metricFilterCount" type:"integer"` // Specifies the number of days you want to retain log events in the specified // log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, // 365, 400, 545, 731, 1827, 3653. RetentionInDays *int64 `locationName:"retentionInDays" type:"integer"` StoredBytes *int64 `locationName:"storedBytes" type:"long"` metadataLogGroup `json:"-" xml:"-"` } type metadataLogGroup struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s LogGroup) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s LogGroup) GoString() string { return s.String() } // A log stream is sequence of log events from a single emitter of logs. type LogStream struct { Arn *string `locationName:"arn" type:"string"` // A point in time expressed as the number of milliseconds since Jan 1, 1970 // 00:00:00 UTC. CreationTime *int64 `locationName:"creationTime" type:"long"` // A point in time expressed as the number of milliseconds since Jan 1, 1970 // 00:00:00 UTC. FirstEventTimestamp *int64 `locationName:"firstEventTimestamp" type:"long"` // A point in time expressed as the number of milliseconds since Jan 1, 1970 // 00:00:00 UTC. LastEventTimestamp *int64 `locationName:"lastEventTimestamp" type:"long"` // A point in time expressed as the number of milliseconds since Jan 1, 1970 // 00:00:00 UTC. LastIngestionTime *int64 `locationName:"lastIngestionTime" type:"long"` LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` StoredBytes *int64 `locationName:"storedBytes" type:"long"` // A string token used for making PutLogEvents requests. A sequenceToken can // only be used once, and PutLogEvents requests must include the sequenceToken // obtained from the response of the previous request. UploadSequenceToken *string `locationName:"uploadSequenceToken" min:"1" type:"string"` metadataLogStream `json:"-" xml:"-"` } type metadataLogStream struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s LogStream) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s LogStream) GoString() string { return s.String() } // Metric filters can be used to express how Amazon CloudWatch Logs would extract // metric observations from ingested log events and transform them to metric // data in a CloudWatch metric. type MetricFilter struct { // A point in time expressed as the number of milliseconds since Jan 1, 1970 // 00:00:00 UTC. CreationTime *int64 `locationName:"creationTime" type:"long"` // A name for a metric or subscription filter. FilterName *string `locationName:"filterName" min:"1" type:"string"` // A symbolic description of how Amazon CloudWatch Logs should interpret the // data in each log event. For example, a log event may contain timestamps, // IP addresses, strings, and so on. You use the filter pattern to specify what // to look for in the log event message. FilterPattern *string `locationName:"filterPattern" type:"string"` MetricTransformations []*MetricTransformation `locationName:"metricTransformations" min:"1" type:"list"` metadataMetricFilter `json:"-" xml:"-"` } type metadataMetricFilter struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s MetricFilter) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s MetricFilter) GoString() string { return s.String() } type MetricFilterMatchRecord struct { EventMessage *string `locationName:"eventMessage" min:"1" type:"string"` EventNumber *int64 `locationName:"eventNumber" type:"long"` ExtractedValues map[string]*string `locationName:"extractedValues" type:"map"` metadataMetricFilterMatchRecord `json:"-" xml:"-"` } type metadataMetricFilterMatchRecord struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s MetricFilterMatchRecord) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s MetricFilterMatchRecord) GoString() string { return s.String() } type MetricTransformation struct { // The name of the CloudWatch metric to which the monitored log information // should be published. For example, you may publish to a metric called ErrorCount. MetricName *string `locationName:"metricName" type:"string" required:"true"` // The destination namespace of the new CloudWatch metric. MetricNamespace *string `locationName:"metricNamespace" type:"string" required:"true"` // What to publish to the metric. For example, if you're counting the occurrences // of a particular term like "Error", the value will be "1" for each occurrence. // If you're counting the bytes transferred the published value will be the // value in the log event. MetricValue *string `locationName:"metricValue" type:"string" required:"true"` metadataMetricTransformation `json:"-" xml:"-"` } type metadataMetricTransformation struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s MetricTransformation) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s MetricTransformation) GoString() string { return s.String() } type OutputLogEvent struct { // A point in time expressed as the number of milliseconds since Jan 1, 1970 // 00:00:00 UTC. IngestionTime *int64 `locationName:"ingestionTime" type:"long"` Message *string `locationName:"message" min:"1" type:"string"` // A point in time expressed as the number of milliseconds since Jan 1, 1970 // 00:00:00 UTC. Timestamp *int64 `locationName:"timestamp" type:"long"` metadataOutputLogEvent `json:"-" xml:"-"` } type metadataOutputLogEvent struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s OutputLogEvent) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s OutputLogEvent) GoString() string { return s.String() } type PutDestinationInput struct { // A name for the destination. DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` // The ARN of an IAM role that grants Amazon CloudWatch Logs permissions to // do Amazon Kinesis PutRecord requests on the desitnation stream. RoleArn *string `locationName:"roleArn" min:"1" type:"string" required:"true"` // The ARN of an Amazon Kinesis stream to deliver matching log events to. TargetArn *string `locationName:"targetArn" min:"1" type:"string" required:"true"` metadataPutDestinationInput `json:"-" xml:"-"` } type metadataPutDestinationInput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s PutDestinationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PutDestinationInput) GoString() string { return s.String() } type PutDestinationOutput struct { // A cross account destination that is the recipient of subscription log events. Destination *Destination `locationName:"destination" type:"structure"` metadataPutDestinationOutput `json:"-" xml:"-"` } type metadataPutDestinationOutput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s PutDestinationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PutDestinationOutput) GoString() string { return s.String() } type PutDestinationPolicyInput struct { // An IAM policy document that authorizes cross-account users to deliver their // log events to associated destination. AccessPolicy *string `locationName:"accessPolicy" min:"1" type:"string" required:"true"` // A name for an existing destination. DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` metadataPutDestinationPolicyInput `json:"-" xml:"-"` } type metadataPutDestinationPolicyInput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s PutDestinationPolicyInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PutDestinationPolicyInput) GoString() string { return s.String() } type PutDestinationPolicyOutput struct { metadataPutDestinationPolicyOutput `json:"-" xml:"-"` } type metadataPutDestinationPolicyOutput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s PutDestinationPolicyOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PutDestinationPolicyOutput) GoString() string { return s.String() } type PutLogEventsInput struct { // A list of log events belonging to a log stream. LogEvents []*InputLogEvent `locationName:"logEvents" min:"1" type:"list" required:"true"` // The name of the log group to put log events to. LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` // The name of the log stream to put log events to. LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` // A string token that must be obtained from the response of the previous PutLogEvents // request. SequenceToken *string `locationName:"sequenceToken" min:"1" type:"string"` metadataPutLogEventsInput `json:"-" xml:"-"` } type metadataPutLogEventsInput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s PutLogEventsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PutLogEventsInput) GoString() string { return s.String() } type PutLogEventsOutput struct { // A string token used for making PutLogEvents requests. A sequenceToken can // only be used once, and PutLogEvents requests must include the sequenceToken // obtained from the response of the previous request. NextSequenceToken *string `locationName:"nextSequenceToken" min:"1" type:"string"` RejectedLogEventsInfo *RejectedLogEventsInfo `locationName:"rejectedLogEventsInfo" type:"structure"` metadataPutLogEventsOutput `json:"-" xml:"-"` } type metadataPutLogEventsOutput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s PutLogEventsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PutLogEventsOutput) GoString() string { return s.String() } type PutMetricFilterInput struct { // A name for the metric filter. FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` // A valid CloudWatch Logs filter pattern for extracting metric data out of // ingested log events. FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` // The name of the log group to associate the metric filter with. LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` // A collection of information needed to define how metric data gets emitted. MetricTransformations []*MetricTransformation `locationName:"metricTransformations" min:"1" type:"list" required:"true"` metadataPutMetricFilterInput `json:"-" xml:"-"` } type metadataPutMetricFilterInput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s PutMetricFilterInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PutMetricFilterInput) GoString() string { return s.String() } type PutMetricFilterOutput struct { metadataPutMetricFilterOutput `json:"-" xml:"-"` } type metadataPutMetricFilterOutput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s PutMetricFilterOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PutMetricFilterOutput) GoString() string { return s.String() } type PutRetentionPolicyInput struct { // The name of the log group to associate the retention policy with. LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` // Specifies the number of days you want to retain log events in the specified // log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, // 365, 400, 545, 731, 1827, 3653. RetentionInDays *int64 `locationName:"retentionInDays" type:"integer" required:"true"` metadataPutRetentionPolicyInput `json:"-" xml:"-"` } type metadataPutRetentionPolicyInput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s PutRetentionPolicyInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PutRetentionPolicyInput) GoString() string { return s.String() } type PutRetentionPolicyOutput struct { metadataPutRetentionPolicyOutput `json:"-" xml:"-"` } type metadataPutRetentionPolicyOutput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s PutRetentionPolicyOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PutRetentionPolicyOutput) GoString() string { return s.String() } type PutSubscriptionFilterInput struct { // The ARN of the destination to deliver matching log events to. Currently, // the supported destinations are: A Amazon Kinesis stream belonging to the // same account as the subscription filter, for same-account delivery. A logical // destination (used via an ARN of Destination) belonging to a different account, // for cross-account delivery. DestinationArn *string `locationName:"destinationArn" min:"1" type:"string" required:"true"` // A name for the subscription filter. FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` // A valid CloudWatch Logs filter pattern for subscribing to a filtered stream // of log events. FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` // The name of the log group to associate the subscription filter with. LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` // The ARN of an IAM role that grants Amazon CloudWatch Logs permissions to // deliver ingested log events to the destination stream. You don't need to // provide the ARN when you are working with a logical destination (used via // an ARN of Destination) for cross-account delivery. RoleArn *string `locationName:"roleArn" min:"1" type:"string"` metadataPutSubscriptionFilterInput `json:"-" xml:"-"` } type metadataPutSubscriptionFilterInput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s PutSubscriptionFilterInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PutSubscriptionFilterInput) GoString() string { return s.String() } type PutSubscriptionFilterOutput struct { metadataPutSubscriptionFilterOutput `json:"-" xml:"-"` } type metadataPutSubscriptionFilterOutput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s PutSubscriptionFilterOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PutSubscriptionFilterOutput) GoString() string { return s.String() } type RejectedLogEventsInfo struct { ExpiredLogEventEndIndex *int64 `locationName:"expiredLogEventEndIndex" type:"integer"` TooNewLogEventStartIndex *int64 `locationName:"tooNewLogEventStartIndex" type:"integer"` TooOldLogEventEndIndex *int64 `locationName:"tooOldLogEventEndIndex" type:"integer"` metadataRejectedLogEventsInfo `json:"-" xml:"-"` } type metadataRejectedLogEventsInfo struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s RejectedLogEventsInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s RejectedLogEventsInfo) GoString() string { return s.String() } // An object indicating the search status of a log stream in a FilterLogEvents // request. type SearchedLogStream struct { // The name of the log stream. LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` // Indicates whether all the events in this log stream were searched or more // data exists to search by paginating further. SearchedCompletely *bool `locationName:"searchedCompletely" type:"boolean"` metadataSearchedLogStream `json:"-" xml:"-"` } type metadataSearchedLogStream struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s SearchedLogStream) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s SearchedLogStream) GoString() string { return s.String() } type SubscriptionFilter struct { // A point in time expressed as the number of milliseconds since Jan 1, 1970 // 00:00:00 UTC. CreationTime *int64 `locationName:"creationTime" type:"long"` DestinationArn *string `locationName:"destinationArn" min:"1" type:"string"` // A name for a metric or subscription filter. FilterName *string `locationName:"filterName" min:"1" type:"string"` // A symbolic description of how Amazon CloudWatch Logs should interpret the // data in each log event. For example, a log event may contain timestamps, // IP addresses, strings, and so on. You use the filter pattern to specify what // to look for in the log event message. FilterPattern *string `locationName:"filterPattern" type:"string"` LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` RoleArn *string `locationName:"roleArn" min:"1" type:"string"` metadataSubscriptionFilter `json:"-" xml:"-"` } type metadataSubscriptionFilter struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s SubscriptionFilter) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s SubscriptionFilter) GoString() string { return s.String() } type TestMetricFilterInput struct { // A symbolic description of how Amazon CloudWatch Logs should interpret the // data in each log event. For example, a log event may contain timestamps, // IP addresses, strings, and so on. You use the filter pattern to specify what // to look for in the log event message. FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` // A list of log event messages to test. LogEventMessages []*string `locationName:"logEventMessages" min:"1" type:"list" required:"true"` metadataTestMetricFilterInput `json:"-" xml:"-"` } type metadataTestMetricFilterInput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s TestMetricFilterInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s TestMetricFilterInput) GoString() string { return s.String() } type TestMetricFilterOutput struct { Matches []*MetricFilterMatchRecord `locationName:"matches" type:"list"` metadataTestMetricFilterOutput `json:"-" xml:"-"` } type metadataTestMetricFilterOutput struct { SDKShapeTraits bool `type:"structure"` } // String returns the string representation func (s TestMetricFilterOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s TestMetricFilterOutput) GoString() string { return s.String() } const ( // @enum ExportTaskStatusCode ExportTaskStatusCodeCancelled = "CANCELLED" // @enum ExportTaskStatusCode ExportTaskStatusCodeCompleted = "COMPLETED" // @enum ExportTaskStatusCode ExportTaskStatusCodeFailed = "FAILED" // @enum ExportTaskStatusCode ExportTaskStatusCodePending = "PENDING" // @enum ExportTaskStatusCode ExportTaskStatusCodePendingCancel = "PENDING_CANCEL" // @enum ExportTaskStatusCode ExportTaskStatusCodeRunning = "RUNNING" ) const ( // @enum OrderBy OrderByLogStreamName = "LogStreamName" // @enum OrderBy OrderByLastEventTime = "LastEventTime" ) docker-1.10.3/vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go000066400000000000000000000100731267010174400304100ustar00rootroot00000000000000// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package cloudwatchlogs import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/service" "github.com/aws/aws-sdk-go/aws/service/serviceinfo" "github.com/aws/aws-sdk-go/internal/protocol/jsonrpc" "github.com/aws/aws-sdk-go/internal/signer/v4" ) // This is the Amazon CloudWatch Logs API Reference. Amazon CloudWatch Logs // enables you to monitor, store, and access your system, application, and custom // log files. This guide provides detailed information about Amazon CloudWatch // Logs actions, data types, parameters, and errors. For detailed information // about Amazon CloudWatch Logs features and their associated API calls, go // to the Amazon CloudWatch Developer Guide (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide). // // Use the following links to get started using the Amazon CloudWatch Logs // API Reference: // // Actions (http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_Operations.html): // An alphabetical list of all Amazon CloudWatch Logs actions. Data Types (http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_Types.html): // An alphabetical list of all Amazon CloudWatch Logs data types. Common Parameters // (http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/CommonParameters.html): // Parameters that all Query actions can use. Common Errors (http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/CommonErrors.html): // Client and server errors that all actions can return. Regions and Endpoints // (http://docs.aws.amazon.com/general/latest/gr/index.html?rande.html): Itemized // regions and endpoints for all AWS products. In addition to using the Amazon // CloudWatch Logs API, you can also use the following SDKs and third-party // libraries to access Amazon CloudWatch Logs programmatically. // // AWS SDK for Java Documentation (http://aws.amazon.com/documentation/sdkforjava/) // AWS SDK for .NET Documentation (http://aws.amazon.com/documentation/sdkfornet/) // AWS SDK for PHP Documentation (http://aws.amazon.com/documentation/sdkforphp/) // AWS SDK for Ruby Documentation (http://aws.amazon.com/documentation/sdkforruby/) // Developers in the AWS developer community also provide their own libraries, // which you can find at the following AWS developer centers: // // AWS Java Developer Center (http://aws.amazon.com/java/) AWS PHP Developer // Center (http://aws.amazon.com/php/) AWS Python Developer Center (http://aws.amazon.com/python/) // AWS Ruby Developer Center (http://aws.amazon.com/ruby/) AWS Windows and .NET // Developer Center (http://aws.amazon.com/net/) type CloudWatchLogs struct { *service.Service } // Used for custom service initialization logic var initService func(*service.Service) // Used for custom request initialization logic var initRequest func(*request.Request) // New returns a new CloudWatchLogs client. func New(config *aws.Config) *CloudWatchLogs { service := &service.Service{ ServiceInfo: serviceinfo.ServiceInfo{ Config: defaults.DefaultConfig.Merge(config), ServiceName: "logs", APIVersion: "2014-03-28", JSONVersion: "1.1", TargetPrefix: "Logs_20140328", }, } service.Initialize() // Handlers service.Handlers.Sign.PushBack(v4.Sign) service.Handlers.Build.PushBack(jsonrpc.Build) service.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal) service.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta) service.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError) // Run custom service initialization if present if initService != nil { initService(service) } return &CloudWatchLogs{service} } // newRequest creates a new request for a CloudWatchLogs operation and runs any // custom request initialization. func (c *CloudWatchLogs) newRequest(op *request.Operation, params, data interface{}) *request.Request { req := c.NewRequest(op, params, data) // Run custom request initialization if present if initRequest != nil { initRequest(req) } return req } docker-1.10.3/vendor/src/github.com/boltdb/000077500000000000000000000000001267010174400204365ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/boltdb/bolt/000077500000000000000000000000001267010174400213765ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/boltdb/bolt/.gitignore000066400000000000000000000000321267010174400233610ustar00rootroot00000000000000*.prof *.test *.swp /bin/ docker-1.10.3/vendor/src/github.com/boltdb/bolt/LICENSE000066400000000000000000000020661267010174400224070ustar00rootroot00000000000000The MIT License (MIT) Copyright (c) 2013 Ben Johnson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. docker-1.10.3/vendor/src/github.com/boltdb/bolt/Makefile000066400000000000000000000022431267010174400230370ustar00rootroot00000000000000TEST=. BENCH=. COVERPROFILE=/tmp/c.out BRANCH=`git rev-parse --abbrev-ref HEAD` COMMIT=`git rev-parse --short HEAD` GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" default: build bench: go test -v -test.run=NOTHINCONTAINSTHIS -test.bench=$(BENCH) # http://cloc.sourceforge.net/ cloc: @cloc --not-match-f='Makefile|_test.go' . cover: fmt go test -coverprofile=$(COVERPROFILE) -test.run=$(TEST) $(COVERFLAG) . go tool cover -html=$(COVERPROFILE) rm $(COVERPROFILE) cpuprofile: fmt @go test -c @./bolt.test -test.v -test.run=$(TEST) -test.cpuprofile cpu.prof # go get github.com/kisielk/errcheck errcheck: @echo "=== errcheck ===" @errcheck github.com/boltdb/bolt fmt: @go fmt ./... get: @go get -d ./... build: get @mkdir -p bin @go build -ldflags=$(GOLDFLAGS) -a -o bin/bolt ./cmd/bolt test: fmt @go get github.com/stretchr/testify/assert @echo "=== TESTS ===" @go test -v -cover -test.run=$(TEST) @echo "" @echo "" @echo "=== CLI ===" @go test -v -test.run=$(TEST) ./cmd/bolt @echo "" @echo "" @echo "=== RACE DETECTOR ===" @go test -v -race -test.run="TestSimulate_(100op|1000op)" .PHONY: bench cloc cover cpuprofile fmt memprofile test docker-1.10.3/vendor/src/github.com/boltdb/bolt/README.md000066400000000000000000000576571267010174400227010ustar00rootroot00000000000000Bolt [![Build Status](https://drone.io/github.com/boltdb/bolt/status.png)](https://drone.io/github.com/boltdb/bolt/latest) [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.png?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.png)](https://godoc.org/github.com/boltdb/bolt) ![Version](http://img.shields.io/badge/version-1.0-green.png) ==== Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] and the [LMDB project][lmdb]. The goal of the project is to provide a simple, fast, and reliable database for projects that don't require a full database server such as Postgres or MySQL. Since Bolt is meant to be used as such a low-level piece of functionality, simplicity is key. The API will be small and only focus on getting values and setting values. That's it. [hyc_symas]: https://twitter.com/hyc_symas [lmdb]: http://symas.com/mdb/ ## Project Status Bolt is stable and the API is fixed. Full unit test coverage and randomized black box testing are used to ensure database consistency and thread safety. Bolt is currently in high-load production environments serving databases as large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed services every day. ## Getting Started ### Installing To start using Bolt, install Go and run `go get`: ```sh $ go get github.com/boltdb/bolt/... ``` This will retrieve the library and install the `bolt` command line utility into your `$GOBIN` path. ### Opening a database The top-level object in Bolt is a `DB`. It is represented as a single file on your disk and represents a consistent snapshot of your data. To open your database, simply use the `bolt.Open()` function: ```go package main import ( "log" "github.com/boltdb/bolt" ) func main() { // Open the my.db data file in your current directory. // It will be created if it doesn't exist. db, err := bolt.Open("my.db", 0600, nil) if err != nil { log.Fatal(err) } defer db.Close() ... } ``` Please note that Bolt obtains a file lock on the data file so multiple processes cannot open the same database at the same time. Opening an already open Bolt database will cause it to hang until the other process closes it. To prevent an indefinite wait you can pass a timeout option to the `Open()` function: ```go db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) ``` ### Transactions Bolt allows only one read-write transaction at a time but allows as many read-only transactions as you want at a time. Each transaction has a consistent view of the data as it existed when the transaction started. Individual transactions and all objects created from them (e.g. buckets, keys) are not thread safe. To work with data in multiple goroutines you must start a transaction for each one or use locking to ensure only one goroutine accesses a transaction at a time. Creating transaction from the `DB` is thread safe. Read-only transactions and read-write transactions should not depend on one another and generally shouldn't be opened simultaneously in the same goroutine. This can cause a deadlock as the read-write transaction needs to periodically re-map the data file but it cannot do so while a read-only transaction is open. #### Read-write transactions To start a read-write transaction, you can use the `DB.Update()` function: ```go err := db.Update(func(tx *bolt.Tx) error { ... return nil }) ``` Inside the closure, you have a consistent view of the database. You commit the transaction by returning `nil` at the end. You can also rollback the transaction at any point by returning an error. All database operations are allowed inside a read-write transaction. Always check the return error as it will report any disk failures that can cause your transaction to not complete. If you return an error within your closure it will be passed through. #### Read-only transactions To start a read-only transaction, you can use the `DB.View()` function: ```go err := db.View(func(tx *bolt.Tx) error { ... return nil }) ``` You also get a consistent view of the database within this closure, however, no mutating operations are allowed within a read-only transaction. You can only retrieve buckets, retrieve values, and copy the database within a read-only transaction. #### Batch read-write transactions Each `DB.Update()` waits for disk to commit the writes. This overhead can be minimized by combining multiple updates with the `DB.Batch()` function: ```go err := db.Batch(func(tx *bolt.Tx) error { ... return nil }) ``` Concurrent Batch calls are opportunistically combined into larger transactions. Batch is only useful when there are multiple goroutines calling it. The trade-off is that `Batch` can call the given function multiple times, if parts of the transaction fail. The function must be idempotent and side effects must take effect only after a successful return from `DB.Batch()`. For example: don't display messages from inside the function, instead set variables in the enclosing scope: ```go var id uint64 err := db.Batch(func(tx *bolt.Tx) error { // Find last key in bucket, decode as bigendian uint64, increment // by one, encode back to []byte, and add new key. ... id = newValue return nil }) if err != nil { return ... } fmt.Println("Allocated ID %d", id) ``` #### Managing transactions manually The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()` function. These helper functions will start the transaction, execute a function, and then safely close your transaction if an error is returned. This is the recommended way to use Bolt transactions. However, sometimes you may want to manually start and end your transactions. You can use the `Tx.Begin()` function directly but _please_ be sure to close the transaction. ```go // Start a writable transaction. tx, err := db.Begin(true) if err != nil { return err } defer tx.Rollback() // Use the transaction... _, err := tx.CreateBucket([]byte("MyBucket")) if err != nil { return err } // Commit the transaction and check for error. if err := tx.Commit(); err != nil { return err } ``` The first argument to `DB.Begin()` is a boolean stating if the transaction should be writable. ### Using buckets Buckets are collections of key/value pairs within the database. All keys in a bucket must be unique. You can create a bucket using the `DB.CreateBucket()` function: ```go db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("MyBucket")) if err != nil { return fmt.Errorf("create bucket: %s", err) } return nil }) ``` You can also create a bucket only if it doesn't exist by using the `Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this function for all your top-level buckets after you open your database so you can guarantee that they exist for future transactions. To delete a bucket, simply call the `Tx.DeleteBucket()` function. ### Using key/value pairs To save a key/value pair to a bucket, use the `Bucket.Put()` function: ```go db.Update(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("MyBucket")) err := b.Put([]byte("answer"), []byte("42")) return err }) ``` This will set the value of the `"answer"` key to `"42"` in the `MyBucket` bucket. To retrieve this value, we can use the `Bucket.Get()` function: ```go db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("MyBucket")) v := b.Get([]byte("answer")) fmt.Printf("The answer is: %s\n", v) return nil }) ``` The `Get()` function does not return an error because its operation is guaranteed to work (unless there is some kind of system failure). If the key exists then it will return its byte slice value. If it doesn't exist then it will return `nil`. It's important to note that you can have a zero-length value set to a key which is different than the key not existing. Use the `Bucket.Delete()` function to delete a key from the bucket. Please note that values returned from `Get()` are only valid while the transaction is open. If you need to use a value outside of the transaction then you must use `copy()` to copy it to another byte slice. ### Autoincrementing integer for the bucket By using the NextSequence() function, you can let Bolt determine a sequence which can be used as the unique identifier for your key/value pairs. See the example below. ```go // CreateUser saves u to the store. The new user ID is set on u once the data is persisted. func (s *Store) CreateUser(u *User) error { return s.db.Update(func(tx *bolt.Tx) error { // Retrieve the users bucket. // This should be created when the DB is first opened. b := tx.Bucket([]byte("users")) // Generate ID for the user. // This returns an error only if the Tx is closed or not writeable. // That can't happen in an Update() call so I ignore the error check. id, _ = b.NextSequence() u.ID = int(id) // Marshal user data into bytes. buf, err := json.Marshal(u) if err != nil { return err } // Persist bytes to users bucket. return b.Put(itob(u.ID), buf) }) } // itob returns an 8-byte big endian representation of v. func itob(v int) []byte { b := make([]byte, 8) binary.BigEndian.PutUint64(b, uint64(v)) return b } type User struct { ID int ... } ``` ### Iterating over keys Bolt stores its keys in byte-sorted order within a bucket. This makes sequential iteration over these keys extremely fast. To iterate over keys we'll use a `Cursor`: ```go db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("MyBucket")) c := b.Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { fmt.Printf("key=%s, value=%s\n", k, v) } return nil }) ``` The cursor allows you to move to a specific point in the list of keys and move forward or backward through the keys one at a time. The following functions are available on the cursor: ``` First() Move to the first key. Last() Move to the last key. Seek() Move to a specific key. Next() Move to the next key. Prev() Move to the previous key. ``` When you have iterated to the end of the cursor then `Next()` will return `nil`. You must seek to a position using `First()`, `Last()`, or `Seek()` before calling `Next()` or `Prev()`. If you do not seek to a position then these functions will return `nil`. #### Prefix scans To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: ```go db.View(func(tx *bolt.Tx) error { c := tx.Bucket([]byte("MyBucket")).Cursor() prefix := []byte("1234") for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() { fmt.Printf("key=%s, value=%s\n", k, v) } return nil }) ``` #### Range scans Another common use case is scanning over a range such as a time range. If you use a sortable time encoding such as RFC3339 then you can query a specific date range like this: ```go db.View(func(tx *bolt.Tx) error { // Assume our events bucket has RFC3339 encoded time keys. c := tx.Bucket([]byte("Events")).Cursor() // Our time range spans the 90's decade. min := []byte("1990-01-01T00:00:00Z") max := []byte("2000-01-01T00:00:00Z") // Iterate over the 90's. for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() { fmt.Printf("%s: %s\n", k, v) } return nil }) ``` #### ForEach() You can also use the function `ForEach()` if you know you'll be iterating over all the keys in a bucket: ```go db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("MyBucket")) b.ForEach(func(k, v []byte) error { fmt.Printf("key=%s, value=%s\n", k, v) return nil }) return nil }) ``` ### Nested buckets You can also store a bucket in a key to create nested buckets. The API is the same as the bucket management API on the `DB` object: ```go func (*Bucket) CreateBucket(key []byte) (*Bucket, error) func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) func (*Bucket) DeleteBucket(key []byte) error ``` ### Database backups Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` function to write a consistent view of the database to a writer. If you call this from a read-only transaction, it will perform a hot backup and not block your other database reads and writes. It will also use `O_DIRECT` when available to prevent page cache trashing. One common use case is to backup over HTTP so you can use tools like `cURL` to do database backups: ```go func BackupHandleFunc(w http.ResponseWriter, req *http.Request) { err := db.View(func(tx *bolt.Tx) error { w.Header().Set("Content-Type", "application/octet-stream") w.Header().Set("Content-Disposition", `attachment; filename="my.db"`) w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size()))) _, err := tx.WriteTo(w) return err }) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } } ``` Then you can backup using this command: ```sh $ curl http://localhost/backup > my.db ``` Or you can open your browser to `http://localhost/backup` and it will download automatically. If you want to backup to another file you can use the `Tx.CopyFile()` helper function. ### Statistics The database keeps a running count of many of the internal operations it performs so you can better understand what's going on. By grabbing a snapshot of these stats at two points in time we can see what operations were performed in that time range. For example, we could start a goroutine to log stats every 10 seconds: ```go go func() { // Grab the initial stats. prev := db.Stats() for { // Wait for 10s. time.Sleep(10 * time.Second) // Grab the current stats and diff them. stats := db.Stats() diff := stats.Sub(&prev) // Encode stats to JSON and print to STDERR. json.NewEncoder(os.Stderr).Encode(diff) // Save stats for the next loop. prev = stats } }() ``` It's also useful to pipe these stats to a service such as statsd for monitoring or to provide an HTTP endpoint that will perform a fixed-length sample. ### Read-Only Mode Sometimes it is useful to create a shared, read-only Bolt database. To this, set the `Options.ReadOnly` flag when opening your database. Read-only mode uses a shared lock to allow multiple processes to read from the database but it will block any processes from opening the database in read-write mode. ```go db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) if err != nil { log.Fatal(err) } ``` ## Resources For more information on getting started with Bolt, check out the following articles: * [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch). * [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville ## Comparison with other databases ### Postgres, MySQL, & other relational databases Relational databases structure data into rows and are only accessible through the use of SQL. This approach provides flexibility in how you store and query your data but also incurs overhead in parsing and planning SQL statements. Bolt accesses all data by a byte slice key. This makes Bolt fast to read and write data by key but provides no built-in support for joining values together. Most relational databases (with the exception of SQLite) are standalone servers that run separately from your application. This gives your systems flexibility to connect multiple application servers to a single database server but also adds overhead in serializing and transporting data over the network. Bolt runs as a library included in your application so all data access has to go through your application's process. This brings data closer to your application but limits multi-process access to the data. ### LevelDB, RocksDB LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that they are libraries bundled into the application, however, their underlying structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes random writes by using a write ahead log and multi-tiered, sorted files called SSTables. Bolt uses a B+tree internally and only a single file. Both approaches have trade offs. If you require a high random write throughput (>10,000 w/sec) or you need to use spinning disks then LevelDB could be a good choice. If your application is read-heavy or does a lot of range scans then Bolt could be a good choice. One other important consideration is that LevelDB does not have transactions. It supports batch writing of key/values pairs and it supports read snapshots but it will not give you the ability to do a compare-and-swap operation safely. Bolt supports fully serializable ACID transactions. ### LMDB Bolt was originally a port of LMDB so it is architecturally similar. Both use a B+tree, have ACID semantics with fully serializable transactions, and support lock-free MVCC using a single writer and multiple readers. The two projects have somewhat diverged. LMDB heavily focuses on raw performance while Bolt has focused on simplicity and ease of use. For example, LMDB allows several unsafe actions such as direct writes for the sake of performance. Bolt opts to disallow actions which can leave the database in a corrupted state. The only exception to this in Bolt is `DB.NoSync`. There are also a few differences in API. LMDB requires a maximum mmap size when opening an `mdb_env` whereas Bolt will handle incremental mmap resizing automatically. LMDB overloads the getter and setter functions with multiple flags whereas Bolt splits these specialized cases into their own functions. ## Caveats & Limitations It's important to pick the right tool for the job and Bolt is no exception. Here are a few things to note when evaluating and using Bolt: * Bolt is good for read intensive workloads. Sequential write performance is also fast but random writes can be slow. You can add a write-ahead log or [transaction coalescer](https://github.com/boltdb/coalescer) in front of Bolt to mitigate this issue. * Bolt uses a B+tree internally so there can be a lot of random page access. SSDs provide a significant performance boost over spinning disks. * Try to avoid long running read transactions. Bolt uses copy-on-write so old pages cannot be reclaimed while an old transaction is using them. * Byte slices returned from Bolt are only valid during a transaction. Once the transaction has been committed or rolled back then the memory they point to can be reused by a new page or can be unmapped from virtual memory and you'll see an `unexpected fault address` panic when accessing it. * Be careful when using `Bucket.FillPercent`. Setting a high fill percent for buckets that have random inserts will cause your database to have very poor page utilization. * Use larger buckets in general. Smaller buckets causes poor page utilization once they become larger than the page size (typically 4KB). * Bulk loading a lot of random writes into a new bucket can be slow as the page will not split until the transaction is committed. Randomly inserting more than 100,000 key/value pairs into a single new bucket in a single transaction is not advised. * Bolt uses a memory-mapped file so the underlying operating system handles the caching of the data. Typically, the OS will cache as much of the file as it can in memory and will release memory as needed to other processes. This means that Bolt can show very high memory usage when working with large databases. However, this is expected and the OS will release memory as needed. Bolt can handle databases much larger than the available physical RAM, provided its memory-map fits in the process virtual address space. It may be problematic on 32-bits systems. * The data structures in the Bolt database are memory mapped so the data file will be endian specific. This means that you cannot copy a Bolt file from a little endian machine to a big endian machine and have it work. For most users this is not a concern since most modern CPUs are little endian. * Because of the way pages are laid out on disk, Bolt cannot truncate data files and return free pages back to the disk. Instead, Bolt maintains a free list of unused pages within its data file. These free pages can be reused by later transactions. This works well for many use cases as databases generally tend to grow. However, it's important to note that deleting large chunks of data will not allow you to reclaim that space on disk. For more information on page allocation, [see this comment][page-allocation]. [page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 ## Other Projects Using Bolt Below is a list of public, open source projects that use Bolt: * [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. * [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. * [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. * [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. * [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. * [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. * [ChainStore](https://github.com/nulayer/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. * [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. * [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". * [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. * [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. * [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. * [photosite/session](http://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. * [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. * [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. * [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. * [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. * [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. * [SkyDB](https://github.com/skydb/sky) - Behavioral analytics database. * [Seaweed File System](https://github.com/chrislusf/weed-fs) - Highly scalable distributed key~file system with O(1) disk read. * [InfluxDB](http://influxdb.com) - Scalable datastore for metrics, events, and real-time analytics. * [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. * [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. * [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. * [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. * [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. * [stow](https://github.com/djherbis/stow) - a persistence manager for objects backed by boltdb. * [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining simple tx and key scans. If you are using Bolt in a project please send a pull request to add it to the list. docker-1.10.3/vendor/src/github.com/boltdb/bolt/batch.go000066400000000000000000000063161267010174400230140ustar00rootroot00000000000000package bolt import ( "errors" "fmt" "sync" "time" ) // Batch calls fn as part of a batch. It behaves similar to Update, // except: // // 1. concurrent Batch calls can be combined into a single Bolt // transaction. // // 2. the function passed to Batch may be called multiple times, // regardless of whether it returns error or not. // // This means that Batch function side effects must be idempotent and // take permanent effect only after a successful return is seen in // caller. // // The maximum batch size and delay can be adjusted with DB.MaxBatchSize // and DB.MaxBatchDelay, respectively. // // Batch is only useful when there are multiple goroutines calling it. func (db *DB) Batch(fn func(*Tx) error) error { errCh := make(chan error, 1) db.batchMu.Lock() if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { // There is no existing batch, or the existing batch is full; start a new one. db.batch = &batch{ db: db, } db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) } db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) if len(db.batch.calls) >= db.MaxBatchSize { // wake up batch, it's ready to run go db.batch.trigger() } db.batchMu.Unlock() err := <-errCh if err == trySolo { err = db.Update(fn) } return err } type call struct { fn func(*Tx) error err chan<- error } type batch struct { db *DB timer *time.Timer start sync.Once calls []call } // trigger runs the batch if it hasn't already been run. func (b *batch) trigger() { b.start.Do(b.run) } // run performs the transactions in the batch and communicates results // back to DB.Batch. func (b *batch) run() { b.db.batchMu.Lock() b.timer.Stop() // Make sure no new work is added to this batch, but don't break // other batches. if b.db.batch == b { b.db.batch = nil } b.db.batchMu.Unlock() retry: for len(b.calls) > 0 { var failIdx = -1 err := b.db.Update(func(tx *Tx) error { for i, c := range b.calls { if err := safelyCall(c.fn, tx); err != nil { failIdx = i return err } } return nil }) if failIdx >= 0 { // take the failing transaction out of the batch. it's // safe to shorten b.calls here because db.batch no longer // points to us, and we hold the mutex anyway. c := b.calls[failIdx] b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] // tell the submitter re-run it solo, continue with the rest of the batch c.err <- trySolo continue retry } // pass success, or bolt internal errors, to all callers for _, c := range b.calls { if c.err != nil { c.err <- err } } break retry } } // trySolo is a special sentinel error value used for signaling that a // transaction function should be re-run. It should never be seen by // callers. var trySolo = errors.New("batch function returned an error and should be re-run solo") type panicked struct { reason interface{} } func (p panicked) Error() string { if err, ok := p.reason.(error); ok { return err.Error() } return fmt.Sprintf("panic: %v", p.reason) } func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { defer func() { if p := recover(); p != nil { err = panicked{p} } }() return fn(tx) } docker-1.10.3/vendor/src/github.com/boltdb/bolt/bolt_386.go000066400000000000000000000003241267010174400232640ustar00rootroot00000000000000package bolt // maxMapSize represents the largest mmap size supported by Bolt. const maxMapSize = 0x7FFFFFFF // 2GB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0xFFFFFFF docker-1.10.3/vendor/src/github.com/boltdb/bolt/bolt_amd64.go000066400000000000000000000003331267010174400236570ustar00rootroot00000000000000package bolt // maxMapSize represents the largest mmap size supported by Bolt. const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF docker-1.10.3/vendor/src/github.com/boltdb/bolt/bolt_arm.go000066400000000000000000000003241267010174400235230ustar00rootroot00000000000000package bolt // maxMapSize represents the largest mmap size supported by Bolt. const maxMapSize = 0x7FFFFFFF // 2GB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0xFFFFFFF docker-1.10.3/vendor/src/github.com/boltdb/bolt/bolt_arm64.go000066400000000000000000000003541267010174400237000ustar00rootroot00000000000000// +build arm64 package bolt // maxMapSize represents the largest mmap size supported by Bolt. const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF docker-1.10.3/vendor/src/github.com/boltdb/bolt/bolt_linux.go000066400000000000000000000003131267010174400241010ustar00rootroot00000000000000package bolt import ( "syscall" ) var odirect = syscall.O_DIRECT // fdatasync flushes written data to a file descriptor. func fdatasync(db *DB) error { return syscall.Fdatasync(int(db.file.Fd())) } docker-1.10.3/vendor/src/github.com/boltdb/bolt/bolt_openbsd.go000066400000000000000000000010271267010174400243770ustar00rootroot00000000000000package bolt import ( "syscall" "unsafe" ) const ( msAsync = 1 << iota // perform asynchronous writes msSync // perform synchronous writes msInvalidate // invalidate cached data ) var odirect int func msync(db *DB) error { _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) if errno != 0 { return errno } return nil } func fdatasync(db *DB) error { if db.data != nil { return msync(db) } return db.file.Sync() } docker-1.10.3/vendor/src/github.com/boltdb/bolt/bolt_ppc64le.go000066400000000000000000000003561267010174400242260ustar00rootroot00000000000000// +build ppc64le package bolt // maxMapSize represents the largest mmap size supported by Bolt. const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF docker-1.10.3/vendor/src/github.com/boltdb/bolt/bolt_s390x.go000066400000000000000000000003541267010174400236350ustar00rootroot00000000000000// +build s390x package bolt // maxMapSize represents the largest mmap size supported by Bolt. const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF docker-1.10.3/vendor/src/github.com/boltdb/bolt/bolt_unix.go000066400000000000000000000046671267010174400237450ustar00rootroot00000000000000// +build !windows,!plan9,!solaris package bolt import ( "fmt" "os" "syscall" "time" "unsafe" ) // flock acquires an advisory lock on a file descriptor. func flock(f *os.File, exclusive bool, timeout time.Duration) error { var t time.Time for { // If we're beyond our timeout then return an error. // This can only occur after we've attempted a flock once. if t.IsZero() { t = time.Now() } else if timeout > 0 && time.Since(t) > timeout { return ErrTimeout } flag := syscall.LOCK_SH if exclusive { flag = syscall.LOCK_EX } // Otherwise attempt to obtain an exclusive lock. err := syscall.Flock(int(f.Fd()), flag|syscall.LOCK_NB) if err == nil { return nil } else if err != syscall.EWOULDBLOCK { return err } // Wait for a bit and try again. time.Sleep(50 * time.Millisecond) } } // funlock releases an advisory lock on a file descriptor. func funlock(f *os.File) error { return syscall.Flock(int(f.Fd()), syscall.LOCK_UN) } // mmap memory maps a DB's data file. func mmap(db *DB, sz int) error { // Truncate and fsync to ensure file size metadata is flushed. // https://github.com/boltdb/bolt/issues/284 if !db.NoGrowSync && !db.readOnly { if err := db.file.Truncate(int64(sz)); err != nil { return fmt.Errorf("file resize error: %s", err) } if err := db.file.Sync(); err != nil { return fmt.Errorf("file sync error: %s", err) } } // Map the data file to memory. b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED) if err != nil { return err } // Advise the kernel that the mmap is accessed randomly. if err := madvise(b, syscall.MADV_RANDOM); err != nil { return fmt.Errorf("madvise: %s", err) } // Save the original byte slice and convert to a byte array pointer. db.dataref = b db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) db.datasz = sz return nil } // munmap unmaps a DB's data file from memory. func munmap(db *DB) error { // Ignore the unmap if we have no mapped data. if db.dataref == nil { return nil } // Unmap using the original byte slice. err := syscall.Munmap(db.dataref) db.dataref = nil db.data = nil db.datasz = 0 return err } // NOTE: This function is copied from stdlib because it is not available on darwin. func madvise(b []byte, advice int) (err error) { _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice)) if e1 != 0 { err = e1 } return } docker-1.10.3/vendor/src/github.com/boltdb/bolt/bolt_unix_solaris.go000066400000000000000000000045121267010174400254660ustar00rootroot00000000000000 package bolt import ( "fmt" "os" "syscall" "time" "unsafe" "golang.org/x/sys/unix" ) // flock acquires an advisory lock on a file descriptor. func flock(f *os.File, exclusive bool, timeout time.Duration) error { var t time.Time for { // If we're beyond our timeout then return an error. // This can only occur after we've attempted a flock once. if t.IsZero() { t = time.Now() } else if timeout > 0 && time.Since(t) > timeout { return ErrTimeout } var lock syscall.Flock_t lock.Start = 0 lock.Len = 0 lock.Pid = 0 lock.Whence = 0 lock.Pid = 0 if exclusive { lock.Type = syscall.F_WRLCK } else { lock.Type = syscall.F_RDLCK } err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &lock) if err == nil { return nil } else if err != syscall.EAGAIN { return err } // Wait for a bit and try again. time.Sleep(50 * time.Millisecond) } } // funlock releases an advisory lock on a file descriptor. func funlock(f *os.File) error { var lock syscall.Flock_t lock.Start = 0 lock.Len = 0 lock.Type = syscall.F_UNLCK lock.Whence = 0 return syscall.FcntlFlock(uintptr(f.Fd()), syscall.F_SETLK, &lock) } // mmap memory maps a DB's data file. func mmap(db *DB, sz int) error { // Truncate and fsync to ensure file size metadata is flushed. // https://github.com/boltdb/bolt/issues/284 if !db.NoGrowSync && !db.readOnly { if err := db.file.Truncate(int64(sz)); err != nil { return fmt.Errorf("file resize error: %s", err) } if err := db.file.Sync(); err != nil { return fmt.Errorf("file sync error: %s", err) } } // Map the data file to memory. b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED) if err != nil { return err } // Advise the kernel that the mmap is accessed randomly. if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { return fmt.Errorf("madvise: %s", err) } // Save the original byte slice and convert to a byte array pointer. db.dataref = b db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) db.datasz = sz return nil } // munmap unmaps a DB's data file from memory. func munmap(db *DB) error { // Ignore the unmap if we have no mapped data. if db.dataref == nil { return nil } // Unmap using the original byte slice. err := unix.Munmap(db.dataref) db.dataref = nil db.data = nil db.datasz = 0 return err } docker-1.10.3/vendor/src/github.com/boltdb/bolt/bolt_windows.go000066400000000000000000000033621267010174400244430ustar00rootroot00000000000000package bolt import ( "fmt" "os" "syscall" "time" "unsafe" ) var odirect int // fdatasync flushes written data to a file descriptor. func fdatasync(db *DB) error { return db.file.Sync() } // flock acquires an advisory lock on a file descriptor. func flock(f *os.File, _ bool, _ time.Duration) error { return nil } // funlock releases an advisory lock on a file descriptor. func funlock(f *os.File) error { return nil } // mmap memory maps a DB's data file. // Based on: https://github.com/edsrzf/mmap-go func mmap(db *DB, sz int) error { if !db.readOnly { // Truncate the database to the size of the mmap. if err := db.file.Truncate(int64(sz)); err != nil { return fmt.Errorf("truncate: %s", err) } } // Open a file mapping handle. sizelo := uint32(sz >> 32) sizehi := uint32(sz) & 0xffffffff h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil) if h == 0 { return os.NewSyscallError("CreateFileMapping", errno) } // Create the memory map. addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) if addr == 0 { return os.NewSyscallError("MapViewOfFile", errno) } // Close mapping handle. if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { return os.NewSyscallError("CloseHandle", err) } // Convert to a byte array. db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) db.datasz = sz return nil } // munmap unmaps a pointer from a file. // Based on: https://github.com/edsrzf/mmap-go func munmap(db *DB) error { if db.data == nil { return nil } addr := (uintptr)(unsafe.Pointer(&db.data[0])) if err := syscall.UnmapViewOfFile(addr); err != nil { return os.NewSyscallError("UnmapViewOfFile", err) } return nil } docker-1.10.3/vendor/src/github.com/boltdb/bolt/boltsync_unix.go000066400000000000000000000002721267010174400246260ustar00rootroot00000000000000// +build !windows,!plan9,!linux,!openbsd package bolt var odirect int // fdatasync flushes written data to a file descriptor. func fdatasync(db *DB) error { return db.file.Sync() } docker-1.10.3/vendor/src/github.com/boltdb/bolt/bucket.go000066400000000000000000000477141267010174400232170ustar00rootroot00000000000000package bolt import ( "bytes" "fmt" "unsafe" ) const ( // MaxKeySize is the maximum length of a key, in bytes. MaxKeySize = 32768 // MaxValueSize is the maximum length of a value, in bytes. MaxValueSize = 4294967295 ) const ( maxUint = ^uint(0) minUint = 0 maxInt = int(^uint(0) >> 1) minInt = -maxInt - 1 ) const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) const ( minFillPercent = 0.1 maxFillPercent = 1.0 ) // DefaultFillPercent is the percentage that split pages are filled. // This value can be changed by setting Bucket.FillPercent. const DefaultFillPercent = 0.5 // Bucket represents a collection of key/value pairs inside the database. type Bucket struct { *bucket tx *Tx // the associated transaction buckets map[string]*Bucket // subbucket cache page *page // inline page reference rootNode *node // materialized node for the root page. nodes map[pgid]*node // node cache // Sets the threshold for filling nodes when they split. By default, // the bucket will fill to 50% but it can be useful to increase this // amount if you know that your write workloads are mostly append-only. // // This is non-persisted across transactions so it must be set in every Tx. FillPercent float64 } // bucket represents the on-file representation of a bucket. // This is stored as the "value" of a bucket key. If the bucket is small enough, // then its root page can be stored inline in the "value", after the bucket // header. In the case of inline buckets, the "root" will be 0. type bucket struct { root pgid // page id of the bucket's root-level page sequence uint64 // monotonically incrementing, used by NextSequence() } // newBucket returns a new bucket associated with a transaction. func newBucket(tx *Tx) Bucket { var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} if tx.writable { b.buckets = make(map[string]*Bucket) b.nodes = make(map[pgid]*node) } return b } // Tx returns the tx of the bucket. func (b *Bucket) Tx() *Tx { return b.tx } // Root returns the root of the bucket. func (b *Bucket) Root() pgid { return b.root } // Writable returns whether the bucket is writable. func (b *Bucket) Writable() bool { return b.tx.writable } // Cursor creates a cursor associated with the bucket. // The cursor is only valid as long as the transaction is open. // Do not use a cursor after the transaction is closed. func (b *Bucket) Cursor() *Cursor { // Update transaction statistics. b.tx.stats.CursorCount++ // Allocate and return a cursor. return &Cursor{ bucket: b, stack: make([]elemRef, 0), } } // Bucket retrieves a nested bucket by name. // Returns nil if the bucket does not exist. // The bucket instance is only valid for the lifetime of the transaction. func (b *Bucket) Bucket(name []byte) *Bucket { if b.buckets != nil { if child := b.buckets[string(name)]; child != nil { return child } } // Move cursor to key. c := b.Cursor() k, v, flags := c.seek(name) // Return nil if the key doesn't exist or it is not a bucket. if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { return nil } // Otherwise create a bucket and cache it. var child = b.openBucket(v) if b.buckets != nil { b.buckets[string(name)] = child } return child } // Helper method that re-interprets a sub-bucket value // from a parent into a Bucket func (b *Bucket) openBucket(value []byte) *Bucket { var child = newBucket(b.tx) // If this is a writable transaction then we need to copy the bucket entry. // Read-only transactions can point directly at the mmap entry. if b.tx.writable { child.bucket = &bucket{} *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) } else { child.bucket = (*bucket)(unsafe.Pointer(&value[0])) } // Save a reference to the inline page if the bucket is inline. if child.root == 0 { child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) } return &child } // CreateBucket creates a new bucket at the given key and returns the new bucket. // Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. // The bucket instance is only valid for the lifetime of the transaction. func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { if b.tx.db == nil { return nil, ErrTxClosed } else if !b.tx.writable { return nil, ErrTxNotWritable } else if len(key) == 0 { return nil, ErrBucketNameRequired } // Move cursor to correct position. c := b.Cursor() k, _, flags := c.seek(key) // Return an error if there is an existing key. if bytes.Equal(key, k) { if (flags & bucketLeafFlag) != 0 { return nil, ErrBucketExists } else { return nil, ErrIncompatibleValue } } // Create empty, inline bucket. var bucket = Bucket{ bucket: &bucket{}, rootNode: &node{isLeaf: true}, FillPercent: DefaultFillPercent, } var value = bucket.write() // Insert into node. key = cloneBytes(key) c.node().put(key, key, value, 0, bucketLeafFlag) // Since subbuckets are not allowed on inline buckets, we need to // dereference the inline page, if it exists. This will cause the bucket // to be treated as a regular, non-inline bucket for the rest of the tx. b.page = nil return b.Bucket(key), nil } // CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. // Returns an error if the bucket name is blank, or if the bucket name is too long. // The bucket instance is only valid for the lifetime of the transaction. func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { child, err := b.CreateBucket(key) if err == ErrBucketExists { return b.Bucket(key), nil } else if err != nil { return nil, err } return child, nil } // DeleteBucket deletes a bucket at the given key. // Returns an error if the bucket does not exists, or if the key represents a non-bucket value. func (b *Bucket) DeleteBucket(key []byte) error { if b.tx.db == nil { return ErrTxClosed } else if !b.Writable() { return ErrTxNotWritable } // Move cursor to correct position. c := b.Cursor() k, _, flags := c.seek(key) // Return an error if bucket doesn't exist or is not a bucket. if !bytes.Equal(key, k) { return ErrBucketNotFound } else if (flags & bucketLeafFlag) == 0 { return ErrIncompatibleValue } // Recursively delete all child buckets. child := b.Bucket(key) err := child.ForEach(func(k, v []byte) error { if v == nil { if err := child.DeleteBucket(k); err != nil { return fmt.Errorf("delete bucket: %s", err) } } return nil }) if err != nil { return err } // Remove cached copy. delete(b.buckets, string(key)) // Release all bucket pages to freelist. child.nodes = nil child.rootNode = nil child.free() // Delete the node if we have a matching key. c.node().del(key) return nil } // Get retrieves the value for a key in the bucket. // Returns a nil value if the key does not exist or if the key is a nested bucket. // The returned value is only valid for the life of the transaction. func (b *Bucket) Get(key []byte) []byte { k, v, flags := b.Cursor().seek(key) // Return nil if this is a bucket. if (flags & bucketLeafFlag) != 0 { return nil } // If our target node isn't the same key as what's passed in then return nil. if !bytes.Equal(key, k) { return nil } return v } // Put sets the value for a key in the bucket. // If the key exist then its previous value will be overwritten. // Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. func (b *Bucket) Put(key []byte, value []byte) error { if b.tx.db == nil { return ErrTxClosed } else if !b.Writable() { return ErrTxNotWritable } else if len(key) == 0 { return ErrKeyRequired } else if len(key) > MaxKeySize { return ErrKeyTooLarge } else if int64(len(value)) > MaxValueSize { return ErrValueTooLarge } // Move cursor to correct position. c := b.Cursor() k, _, flags := c.seek(key) // Return an error if there is an existing key with a bucket value. if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { return ErrIncompatibleValue } // Insert into node. key = cloneBytes(key) c.node().put(key, key, value, 0, 0) return nil } // Delete removes a key from the bucket. // If the key does not exist then nothing is done and a nil error is returned. // Returns an error if the bucket was created from a read-only transaction. func (b *Bucket) Delete(key []byte) error { if b.tx.db == nil { return ErrTxClosed } else if !b.Writable() { return ErrTxNotWritable } // Move cursor to correct position. c := b.Cursor() _, _, flags := c.seek(key) // Return an error if there is already existing bucket value. if (flags & bucketLeafFlag) != 0 { return ErrIncompatibleValue } // Delete the node if we have a matching key. c.node().del(key) return nil } // NextSequence returns an autoincrementing integer for the bucket. func (b *Bucket) NextSequence() (uint64, error) { if b.tx.db == nil { return 0, ErrTxClosed } else if !b.Writable() { return 0, ErrTxNotWritable } // Materialize the root node if it hasn't been already so that the // bucket will be saved during commit. if b.rootNode == nil { _ = b.node(b.root, nil) } // Increment and return the sequence. b.bucket.sequence++ return b.bucket.sequence, nil } // ForEach executes a function for each key/value pair in a bucket. // If the provided function returns an error then the iteration is stopped and // the error is returned to the caller. The provided function must not modify // the bucket; this will result in undefined behavior. func (b *Bucket) ForEach(fn func(k, v []byte) error) error { if b.tx.db == nil { return ErrTxClosed } c := b.Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { if err := fn(k, v); err != nil { return err } } return nil } // Stat returns stats on a bucket. func (b *Bucket) Stats() BucketStats { var s, subStats BucketStats pageSize := b.tx.db.pageSize s.BucketN += 1 if b.root == 0 { s.InlineBucketN += 1 } b.forEachPage(func(p *page, depth int) { if (p.flags & leafPageFlag) != 0 { s.KeyN += int(p.count) // used totals the used bytes for the page used := pageHeaderSize if p.count != 0 { // If page has any elements, add all element headers. used += leafPageElementSize * int(p.count-1) // Add all element key, value sizes. // The computation takes advantage of the fact that the position // of the last element's key/value equals to the total of the sizes // of all previous elements' keys and values. // It also includes the last element's header. lastElement := p.leafPageElement(p.count - 1) used += int(lastElement.pos + lastElement.ksize + lastElement.vsize) } if b.root == 0 { // For inlined bucket just update the inline stats s.InlineBucketInuse += used } else { // For non-inlined bucket update all the leaf stats s.LeafPageN++ s.LeafInuse += used s.LeafOverflowN += int(p.overflow) // Collect stats from sub-buckets. // Do that by iterating over all element headers // looking for the ones with the bucketLeafFlag. for i := uint16(0); i < p.count; i++ { e := p.leafPageElement(i) if (e.flags & bucketLeafFlag) != 0 { // For any bucket element, open the element value // and recursively call Stats on the contained bucket. subStats.Add(b.openBucket(e.value()).Stats()) } } } } else if (p.flags & branchPageFlag) != 0 { s.BranchPageN++ lastElement := p.branchPageElement(p.count - 1) // used totals the used bytes for the page // Add header and all element headers. used := pageHeaderSize + (branchPageElementSize * int(p.count-1)) // Add size of all keys and values. // Again, use the fact that last element's position equals to // the total of key, value sizes of all previous elements. used += int(lastElement.pos + lastElement.ksize) s.BranchInuse += used s.BranchOverflowN += int(p.overflow) } // Keep track of maximum page depth. if depth+1 > s.Depth { s.Depth = (depth + 1) } }) // Alloc stats can be computed from page counts and pageSize. s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize // Add the max depth of sub-buckets to get total nested depth. s.Depth += subStats.Depth // Add the stats for all sub-buckets s.Add(subStats) return s } // forEachPage iterates over every page in a bucket, including inline pages. func (b *Bucket) forEachPage(fn func(*page, int)) { // If we have an inline page then just use that. if b.page != nil { fn(b.page, 0) return } // Otherwise traverse the page hierarchy. b.tx.forEachPage(b.root, 0, fn) } // forEachPageNode iterates over every page (or node) in a bucket. // This also includes inline pages. func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { // If we have an inline page or root node then just use that. if b.page != nil { fn(b.page, nil, 0) return } b._forEachPageNode(b.root, 0, fn) } func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) { var p, n = b.pageNode(pgid) // Execute function. fn(p, n, depth) // Recursively loop over children. if p != nil { if (p.flags & branchPageFlag) != 0 { for i := 0; i < int(p.count); i++ { elem := p.branchPageElement(uint16(i)) b._forEachPageNode(elem.pgid, depth+1, fn) } } } else { if !n.isLeaf { for _, inode := range n.inodes { b._forEachPageNode(inode.pgid, depth+1, fn) } } } } // spill writes all the nodes for this bucket to dirty pages. func (b *Bucket) spill() error { // Spill all child buckets first. for name, child := range b.buckets { // If the child bucket is small enough and it has no child buckets then // write it inline into the parent bucket's page. Otherwise spill it // like a normal bucket and make the parent value a pointer to the page. var value []byte if child.inlineable() { child.free() value = child.write() } else { if err := child.spill(); err != nil { return err } // Update the child bucket header in this bucket. value = make([]byte, unsafe.Sizeof(bucket{})) var bucket = (*bucket)(unsafe.Pointer(&value[0])) *bucket = *child.bucket } // Skip writing the bucket if there are no materialized nodes. if child.rootNode == nil { continue } // Update parent node. var c = b.Cursor() k, _, flags := c.seek([]byte(name)) if !bytes.Equal([]byte(name), k) { panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) } if flags&bucketLeafFlag == 0 { panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) } c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) } // Ignore if there's not a materialized root node. if b.rootNode == nil { return nil } // Spill nodes. if err := b.rootNode.spill(); err != nil { return err } b.rootNode = b.rootNode.root() // Update the root node for this bucket. if b.rootNode.pgid >= b.tx.meta.pgid { panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) } b.root = b.rootNode.pgid return nil } // inlineable returns true if a bucket is small enough to be written inline // and if it contains no subbuckets. Otherwise returns false. func (b *Bucket) inlineable() bool { var n = b.rootNode // Bucket must only contain a single leaf node. if n == nil || !n.isLeaf { return false } // Bucket is not inlineable if it contains subbuckets or if it goes beyond // our threshold for inline bucket size. var size = pageHeaderSize for _, inode := range n.inodes { size += leafPageElementSize + len(inode.key) + len(inode.value) if inode.flags&bucketLeafFlag != 0 { return false } else if size > b.maxInlineBucketSize() { return false } } return true } // Returns the maximum total size of a bucket to make it a candidate for inlining. func (b *Bucket) maxInlineBucketSize() int { return b.tx.db.pageSize / 4 } // write allocates and writes a bucket to a byte slice. func (b *Bucket) write() []byte { // Allocate the appropriate size. var n = b.rootNode var value = make([]byte, bucketHeaderSize+n.size()) // Write a bucket header. var bucket = (*bucket)(unsafe.Pointer(&value[0])) *bucket = *b.bucket // Convert byte slice to a fake page and write the root node. var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) n.write(p) return value } // rebalance attempts to balance all nodes. func (b *Bucket) rebalance() { for _, n := range b.nodes { n.rebalance() } for _, child := range b.buckets { child.rebalance() } } // node creates a node from a page and associates it with a given parent. func (b *Bucket) node(pgid pgid, parent *node) *node { _assert(b.nodes != nil, "nodes map expected") // Retrieve node if it's already been created. if n := b.nodes[pgid]; n != nil { return n } // Otherwise create a node and cache it. n := &node{bucket: b, parent: parent} if parent == nil { b.rootNode = n } else { parent.children = append(parent.children, n) } // Use the inline page if this is an inline bucket. var p = b.page if p == nil { p = b.tx.page(pgid) } // Read the page into the node and cache it. n.read(p) b.nodes[pgid] = n // Update statistics. b.tx.stats.NodeCount++ return n } // free recursively frees all pages in the bucket. func (b *Bucket) free() { if b.root == 0 { return } var tx = b.tx b.forEachPageNode(func(p *page, n *node, _ int) { if p != nil { tx.db.freelist.free(tx.meta.txid, p) } else { n.free() } }) b.root = 0 } // dereference removes all references to the old mmap. func (b *Bucket) dereference() { if b.rootNode != nil { b.rootNode.root().dereference() } for _, child := range b.buckets { child.dereference() } } // pageNode returns the in-memory node, if it exists. // Otherwise returns the underlying page. func (b *Bucket) pageNode(id pgid) (*page, *node) { // Inline buckets have a fake page embedded in their value so treat them // differently. We'll return the rootNode (if available) or the fake page. if b.root == 0 { if id != 0 { panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) } if b.rootNode != nil { return nil, b.rootNode } return b.page, nil } // Check the node cache for non-inline buckets. if b.nodes != nil { if n := b.nodes[id]; n != nil { return nil, n } } // Finally lookup the page from the transaction if no node is materialized. return b.tx.page(id), nil } // BucketStats records statistics about resources used by a bucket. type BucketStats struct { // Page count statistics. BranchPageN int // number of logical branch pages BranchOverflowN int // number of physical branch overflow pages LeafPageN int // number of logical leaf pages LeafOverflowN int // number of physical leaf overflow pages // Tree statistics. KeyN int // number of keys/value pairs Depth int // number of levels in B+tree // Page size utilization. BranchAlloc int // bytes allocated for physical branch pages BranchInuse int // bytes actually used for branch data LeafAlloc int // bytes allocated for physical leaf pages LeafInuse int // bytes actually used for leaf data // Bucket statistics BucketN int // total number of buckets including the top bucket InlineBucketN int // total number on inlined buckets InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse) } func (s *BucketStats) Add(other BucketStats) { s.BranchPageN += other.BranchPageN s.BranchOverflowN += other.BranchOverflowN s.LeafPageN += other.LeafPageN s.LeafOverflowN += other.LeafOverflowN s.KeyN += other.KeyN if s.Depth < other.Depth { s.Depth = other.Depth } s.BranchAlloc += other.BranchAlloc s.BranchInuse += other.BranchInuse s.LeafAlloc += other.LeafAlloc s.LeafInuse += other.LeafInuse s.BucketN += other.BucketN s.InlineBucketN += other.InlineBucketN s.InlineBucketInuse += other.InlineBucketInuse } // cloneBytes returns a copy of a given slice. func cloneBytes(v []byte) []byte { var clone = make([]byte, len(v)) copy(clone, v) return clone } docker-1.10.3/vendor/src/github.com/boltdb/bolt/cursor.go000066400000000000000000000253471267010174400232550ustar00rootroot00000000000000package bolt import ( "bytes" "fmt" "sort" ) // Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order. // Cursors see nested buckets with value == nil. // Cursors can be obtained from a transaction and are valid as long as the transaction is open. // // Keys and values returned from the cursor are only valid for the life of the transaction. // // Changing data while traversing with a cursor may cause it to be invalidated // and return unexpected keys and/or values. You must reposition your cursor // after mutating data. type Cursor struct { bucket *Bucket stack []elemRef } // Bucket returns the bucket that this cursor was created from. func (c *Cursor) Bucket() *Bucket { return c.bucket } // First moves the cursor to the first item in the bucket and returns its key and value. // If the bucket is empty then a nil key and value are returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) First() (key []byte, value []byte) { _assert(c.bucket.tx.db != nil, "tx closed") c.stack = c.stack[:0] p, n := c.bucket.pageNode(c.bucket.root) c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) c.first() k, v, flags := c.keyValue() if (flags & uint32(bucketLeafFlag)) != 0 { return k, nil } return k, v } // Last moves the cursor to the last item in the bucket and returns its key and value. // If the bucket is empty then a nil key and value are returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Last() (key []byte, value []byte) { _assert(c.bucket.tx.db != nil, "tx closed") c.stack = c.stack[:0] p, n := c.bucket.pageNode(c.bucket.root) ref := elemRef{page: p, node: n} ref.index = ref.count() - 1 c.stack = append(c.stack, ref) c.last() k, v, flags := c.keyValue() if (flags & uint32(bucketLeafFlag)) != 0 { return k, nil } return k, v } // Next moves the cursor to the next item in the bucket and returns its key and value. // If the cursor is at the end of the bucket then a nil key and value are returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Next() (key []byte, value []byte) { _assert(c.bucket.tx.db != nil, "tx closed") k, v, flags := c.next() if (flags & uint32(bucketLeafFlag)) != 0 { return k, nil } return k, v } // Prev moves the cursor to the previous item in the bucket and returns its key and value. // If the cursor is at the beginning of the bucket then a nil key and value are returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Prev() (key []byte, value []byte) { _assert(c.bucket.tx.db != nil, "tx closed") // Attempt to move back one element until we're successful. // Move up the stack as we hit the beginning of each page in our stack. for i := len(c.stack) - 1; i >= 0; i-- { elem := &c.stack[i] if elem.index > 0 { elem.index-- break } c.stack = c.stack[:i] } // If we've hit the end then return nil. if len(c.stack) == 0 { return nil, nil } // Move down the stack to find the last element of the last leaf under this branch. c.last() k, v, flags := c.keyValue() if (flags & uint32(bucketLeafFlag)) != 0 { return k, nil } return k, v } // Seek moves the cursor to a given key and returns it. // If the key does not exist then the next key is used. If no keys // follow, a nil key is returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { k, v, flags := c.seek(seek) // If we ended up after the last element of a page then move to the next one. if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() { k, v, flags = c.next() } if k == nil { return nil, nil } else if (flags & uint32(bucketLeafFlag)) != 0 { return k, nil } return k, v } // Delete removes the current key/value under the cursor from the bucket. // Delete fails if current key/value is a bucket or if the transaction is not writable. func (c *Cursor) Delete() error { if c.bucket.tx.db == nil { return ErrTxClosed } else if !c.bucket.Writable() { return ErrTxNotWritable } key, _, flags := c.keyValue() // Return an error if current value is a bucket. if (flags & bucketLeafFlag) != 0 { return ErrIncompatibleValue } c.node().del(key) return nil } // seek moves the cursor to a given key and returns it. // If the key does not exist then the next key is used. func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { _assert(c.bucket.tx.db != nil, "tx closed") // Start from root page/node and traverse to correct page. c.stack = c.stack[:0] c.search(seek, c.bucket.root) ref := &c.stack[len(c.stack)-1] // If the cursor is pointing to the end of page/node then return nil. if ref.index >= ref.count() { return nil, nil, 0 } // If this is a bucket then return a nil value. return c.keyValue() } // first moves the cursor to the first leaf element under the last page in the stack. func (c *Cursor) first() { for { // Exit when we hit a leaf page. var ref = &c.stack[len(c.stack)-1] if ref.isLeaf() { break } // Keep adding pages pointing to the first element to the stack. var pgid pgid if ref.node != nil { pgid = ref.node.inodes[ref.index].pgid } else { pgid = ref.page.branchPageElement(uint16(ref.index)).pgid } p, n := c.bucket.pageNode(pgid) c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) } } // last moves the cursor to the last leaf element under the last page in the stack. func (c *Cursor) last() { for { // Exit when we hit a leaf page. ref := &c.stack[len(c.stack)-1] if ref.isLeaf() { break } // Keep adding pages pointing to the last element in the stack. var pgid pgid if ref.node != nil { pgid = ref.node.inodes[ref.index].pgid } else { pgid = ref.page.branchPageElement(uint16(ref.index)).pgid } p, n := c.bucket.pageNode(pgid) var nextRef = elemRef{page: p, node: n} nextRef.index = nextRef.count() - 1 c.stack = append(c.stack, nextRef) } } // next moves to the next leaf element and returns the key and value. // If the cursor is at the last leaf element then it stays there and returns nil. func (c *Cursor) next() (key []byte, value []byte, flags uint32) { // Attempt to move over one element until we're successful. // Move up the stack as we hit the end of each page in our stack. var i int for i = len(c.stack) - 1; i >= 0; i-- { elem := &c.stack[i] if elem.index < elem.count()-1 { elem.index++ break } } // If we've hit the root page then stop and return. This will leave the // cursor on the last element of the last page. if i == -1 { return nil, nil, 0 } // Otherwise start from where we left off in the stack and find the // first element of the first leaf page. c.stack = c.stack[:i+1] c.first() return c.keyValue() } // search recursively performs a binary search against a given page/node until it finds a given key. func (c *Cursor) search(key []byte, pgid pgid) { p, n := c.bucket.pageNode(pgid) if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) } e := elemRef{page: p, node: n} c.stack = append(c.stack, e) // If we're on a leaf page/node then find the specific node. if e.isLeaf() { c.nsearch(key) return } if n != nil { c.searchNode(key, n) return } c.searchPage(key, p) } func (c *Cursor) searchNode(key []byte, n *node) { var exact bool index := sort.Search(len(n.inodes), func(i int) bool { // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. // sort.Search() finds the lowest index where f() != -1 but we need the highest index. ret := bytes.Compare(n.inodes[i].key, key) if ret == 0 { exact = true } return ret != -1 }) if !exact && index > 0 { index-- } c.stack[len(c.stack)-1].index = index // Recursively search to the next page. c.search(key, n.inodes[index].pgid) } func (c *Cursor) searchPage(key []byte, p *page) { // Binary search for the correct range. inodes := p.branchPageElements() var exact bool index := sort.Search(int(p.count), func(i int) bool { // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. // sort.Search() finds the lowest index where f() != -1 but we need the highest index. ret := bytes.Compare(inodes[i].key(), key) if ret == 0 { exact = true } return ret != -1 }) if !exact && index > 0 { index-- } c.stack[len(c.stack)-1].index = index // Recursively search to the next page. c.search(key, inodes[index].pgid) } // nsearch searches the leaf node on the top of the stack for a key. func (c *Cursor) nsearch(key []byte) { e := &c.stack[len(c.stack)-1] p, n := e.page, e.node // If we have a node then search its inodes. if n != nil { index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) e.index = index return } // If we have a page then search its leaf elements. inodes := p.leafPageElements() index := sort.Search(int(p.count), func(i int) bool { return bytes.Compare(inodes[i].key(), key) != -1 }) e.index = index } // keyValue returns the key and value of the current leaf element. func (c *Cursor) keyValue() ([]byte, []byte, uint32) { ref := &c.stack[len(c.stack)-1] if ref.count() == 0 || ref.index >= ref.count() { return nil, nil, 0 } // Retrieve value from node. if ref.node != nil { inode := &ref.node.inodes[ref.index] return inode.key, inode.value, inode.flags } // Or retrieve value from page. elem := ref.page.leafPageElement(uint16(ref.index)) return elem.key(), elem.value(), elem.flags } // node returns the node that the cursor is currently positioned on. func (c *Cursor) node() *node { _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") // If the top of the stack is a leaf node then just return it. if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { return ref.node } // Start from root and traverse down the hierarchy. var n = c.stack[0].node if n == nil { n = c.bucket.node(c.stack[0].page.id, nil) } for _, ref := range c.stack[:len(c.stack)-1] { _assert(!n.isLeaf, "expected branch node") n = n.childAt(int(ref.index)) } _assert(n.isLeaf, "expected leaf node") return n } // elemRef represents a reference to an element on a given page/node. type elemRef struct { page *page node *node index int } // isLeaf returns whether the ref is pointing at a leaf page/node. func (r *elemRef) isLeaf() bool { if r.node != nil { return r.node.isLeaf } return (r.page.flags & leafPageFlag) != 0 } // count returns the number of inodes or page elements. func (r *elemRef) count() int { if r.node != nil { return len(r.node.inodes) } return int(r.page.count) } docker-1.10.3/vendor/src/github.com/boltdb/bolt/db.go000066400000000000000000000515611267010174400223220ustar00rootroot00000000000000package bolt import ( "fmt" "hash/fnv" "os" "runtime" "runtime/debug" "strings" "sync" "time" "unsafe" ) // The largest step that can be taken when remapping the mmap. const maxMmapStep = 1 << 30 // 1GB // The data file format version. const version = 2 // Represents a marker value to indicate that a file is a Bolt DB. const magic uint32 = 0xED0CDAED // IgnoreNoSync specifies whether the NoSync field of a DB is ignored when // syncing changes to a file. This is required as some operating systems, // such as OpenBSD, do not have a unified buffer cache (UBC) and writes // must be synchronzied using the msync(2) syscall. const IgnoreNoSync = runtime.GOOS == "openbsd" // Default values if not set in a DB instance. const ( DefaultMaxBatchSize int = 1000 DefaultMaxBatchDelay = 10 * time.Millisecond ) // DB represents a collection of buckets persisted to a file on disk. // All data access is performed through transactions which can be obtained through the DB. // All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. type DB struct { // When enabled, the database will perform a Check() after every commit. // A panic is issued if the database is in an inconsistent state. This // flag has a large performance impact so it should only be used for // debugging purposes. StrictMode bool // Setting the NoSync flag will cause the database to skip fsync() // calls after each commit. This can be useful when bulk loading data // into a database and you can restart the bulk load in the event of // a system failure or database corruption. Do not set this flag for // normal use. // // If the package global IgnoreNoSync constant is true, this value is // ignored. See the comment on that constant for more details. // // THIS IS UNSAFE. PLEASE USE WITH CAUTION. NoSync bool // When true, skips the truncate call when growing the database. // Setting this to true is only safe on non-ext3/ext4 systems. // Skipping truncation avoids preallocation of hard drive space and // bypasses a truncate() and fsync() syscall on remapping. // // https://github.com/boltdb/bolt/issues/284 NoGrowSync bool // MaxBatchSize is the maximum size of a batch. Default value is // copied from DefaultMaxBatchSize in Open. // // If <=0, disables batching. // // Do not change concurrently with calls to Batch. MaxBatchSize int // MaxBatchDelay is the maximum delay before a batch starts. // Default value is copied from DefaultMaxBatchDelay in Open. // // If <=0, effectively disables batching. // // Do not change concurrently with calls to Batch. MaxBatchDelay time.Duration path string file *os.File dataref []byte // mmap'ed readonly, write throws SEGV data *[maxMapSize]byte datasz int meta0 *meta meta1 *meta pageSize int opened bool rwtx *Tx txs []*Tx freelist *freelist stats Stats batchMu sync.Mutex batch *batch rwlock sync.Mutex // Allows only one writer at a time. metalock sync.Mutex // Protects meta page access. mmaplock sync.RWMutex // Protects mmap access during remapping. statlock sync.RWMutex // Protects stats access. ops struct { writeAt func(b []byte, off int64) (n int, err error) } // Read only mode. // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. readOnly bool } // Path returns the path to currently open database file. func (db *DB) Path() string { return db.path } // GoString returns the Go string representation of the database. func (db *DB) GoString() string { return fmt.Sprintf("bolt.DB{path:%q}", db.path) } // String returns the string representation of the database. func (db *DB) String() string { return fmt.Sprintf("DB<%q>", db.path) } // Open creates and opens a database at the given path. // If the file does not exist then it will be created automatically. // Passing in nil options will cause Bolt to open the database with the default options. func Open(path string, mode os.FileMode, options *Options) (*DB, error) { var db = &DB{opened: true} // Set default options if no options are provided. if options == nil { options = DefaultOptions } db.NoGrowSync = options.NoGrowSync // Set default values for later DB operations. db.MaxBatchSize = DefaultMaxBatchSize db.MaxBatchDelay = DefaultMaxBatchDelay flag := os.O_RDWR if options.ReadOnly { flag = os.O_RDONLY db.readOnly = true } // Open data file and separate sync handler for metadata writes. db.path = path var err error if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil { _ = db.close() return nil, err } // Lock file so that other processes using Bolt in read-write mode cannot // use the database at the same time. This would cause corruption since // the two processes would write meta pages and free pages separately. // The database file is locked exclusively (only one process can grab the lock) // if !options.ReadOnly. // The database file is locked using the shared lock (more than one process may // hold a lock at the same time) otherwise (options.ReadOnly is set). if err := flock(db.file, !db.readOnly, options.Timeout); err != nil { _ = db.close() return nil, err } // Default values for test hooks db.ops.writeAt = db.file.WriteAt // Initialize the database if it doesn't exist. if info, err := db.file.Stat(); err != nil { return nil, fmt.Errorf("stat error: %s", err) } else if info.Size() == 0 { // Initialize new files with meta pages. if err := db.init(); err != nil { return nil, err } } else { // Read the first meta page to determine the page size. var buf [0x1000]byte if _, err := db.file.ReadAt(buf[:], 0); err == nil { m := db.pageInBuffer(buf[:], 0).meta() if err := m.validate(); err != nil { return nil, fmt.Errorf("meta0 error: %s", err) } db.pageSize = int(m.pageSize) } } // Memory map the data file. if err := db.mmap(0); err != nil { _ = db.close() return nil, err } // Read in the freelist. db.freelist = newFreelist() db.freelist.read(db.page(db.meta().freelist)) // Mark the database as opened and return. return db, nil } // mmap opens the underlying memory-mapped file and initializes the meta references. // minsz is the minimum size that the new mmap can be. func (db *DB) mmap(minsz int) error { db.mmaplock.Lock() defer db.mmaplock.Unlock() info, err := db.file.Stat() if err != nil { return fmt.Errorf("mmap stat error: %s", err) } else if int(info.Size()) < db.pageSize*2 { return fmt.Errorf("file size too small") } // Ensure the size is at least the minimum size. var size = int(info.Size()) if size < minsz { size = minsz } size, err = db.mmapSize(size) if err != nil { return err } // Dereference all mmap references before unmapping. if db.rwtx != nil { db.rwtx.root.dereference() } // Unmap existing data before continuing. if err := db.munmap(); err != nil { return err } // Memory-map the data file as a byte slice. if err := mmap(db, size); err != nil { return err } // Save references to the meta pages. db.meta0 = db.page(0).meta() db.meta1 = db.page(1).meta() // Validate the meta pages. if err := db.meta0.validate(); err != nil { return fmt.Errorf("meta0 error: %s", err) } if err := db.meta1.validate(); err != nil { return fmt.Errorf("meta1 error: %s", err) } return nil } // munmap unmaps the data file from memory. func (db *DB) munmap() error { if err := munmap(db); err != nil { return fmt.Errorf("unmap error: " + err.Error()) } return nil } // mmapSize determines the appropriate size for the mmap given the current size // of the database. The minimum size is 1MB and doubles until it reaches 1GB. // Returns an error if the new mmap size is greater than the max allowed. func (db *DB) mmapSize(size int) (int, error) { // Double the size from 32KB until 1GB. for i := uint(15); i <= 30; i++ { if size <= 1< maxMapSize { return 0, fmt.Errorf("mmap too large") } // If larger than 1GB then grow by 1GB at a time. sz := int64(size) if remainder := sz % int64(maxMmapStep); remainder > 0 { sz += int64(maxMmapStep) - remainder } // Ensure that the mmap size is a multiple of the page size. // This should always be true since we're incrementing in MBs. pageSize := int64(db.pageSize) if (sz % pageSize) != 0 { sz = ((sz / pageSize) + 1) * pageSize } // If we've exceeded the max size then only grow up to the max size. if sz > maxMapSize { sz = maxMapSize } return int(sz), nil } // init creates a new database file and initializes its meta pages. func (db *DB) init() error { // Set the page size to the OS page size. db.pageSize = os.Getpagesize() // Create two meta pages on a buffer. buf := make([]byte, db.pageSize*4) for i := 0; i < 2; i++ { p := db.pageInBuffer(buf[:], pgid(i)) p.id = pgid(i) p.flags = metaPageFlag // Initialize the meta page. m := p.meta() m.magic = magic m.version = version m.pageSize = uint32(db.pageSize) m.freelist = 2 m.root = bucket{root: 3} m.pgid = 4 m.txid = txid(i) } // Write an empty freelist at page 3. p := db.pageInBuffer(buf[:], pgid(2)) p.id = pgid(2) p.flags = freelistPageFlag p.count = 0 // Write an empty leaf page at page 4. p = db.pageInBuffer(buf[:], pgid(3)) p.id = pgid(3) p.flags = leafPageFlag p.count = 0 // Write the buffer to our data file. if _, err := db.ops.writeAt(buf, 0); err != nil { return err } if err := fdatasync(db); err != nil { return err } return nil } // Close releases all database resources. // All transactions must be closed before closing the database. func (db *DB) Close() error { db.rwlock.Lock() defer db.rwlock.Unlock() db.metalock.Lock() defer db.metalock.Unlock() db.mmaplock.RLock() defer db.mmaplock.RUnlock() return db.close() } func (db *DB) close() error { db.opened = false db.freelist = nil db.path = "" // Clear ops. db.ops.writeAt = nil // Close the mmap. if err := db.munmap(); err != nil { return err } // Close file handles. if db.file != nil { // No need to unlock read-only file. if !db.readOnly { // Unlock the file. _ = funlock(db.file) } // Close the file descriptor. if err := db.file.Close(); err != nil { return fmt.Errorf("db file close: %s", err) } db.file = nil } return nil } // Begin starts a new transaction. // Multiple read-only transactions can be used concurrently but only one // write transaction can be used at a time. Starting multiple write transactions // will cause the calls to block and be serialized until the current write // transaction finishes. // // Transactions should not be depedent on one another. Opening a read // transaction and a write transaction in the same goroutine can cause the // writer to deadlock because the database periodically needs to re-mmap itself // as it grows and it cannot do that while a read transaction is open. // // IMPORTANT: You must close read-only transactions after you are finished or // else the database will not reclaim old pages. func (db *DB) Begin(writable bool) (*Tx, error) { if writable { return db.beginRWTx() } return db.beginTx() } func (db *DB) beginTx() (*Tx, error) { // Lock the meta pages while we initialize the transaction. We obtain // the meta lock before the mmap lock because that's the order that the // write transaction will obtain them. db.metalock.Lock() // Obtain a read-only lock on the mmap. When the mmap is remapped it will // obtain a write lock so all transactions must finish before it can be // remapped. db.mmaplock.RLock() // Exit if the database is not open yet. if !db.opened { db.mmaplock.RUnlock() db.metalock.Unlock() return nil, ErrDatabaseNotOpen } // Create a transaction associated with the database. t := &Tx{} t.init(db) // Keep track of transaction until it closes. db.txs = append(db.txs, t) n := len(db.txs) // Unlock the meta pages. db.metalock.Unlock() // Update the transaction stats. db.statlock.Lock() db.stats.TxN++ db.stats.OpenTxN = n db.statlock.Unlock() return t, nil } func (db *DB) beginRWTx() (*Tx, error) { // If the database was opened with Options.ReadOnly, return an error. if db.readOnly { return nil, ErrDatabaseReadOnly } // Obtain writer lock. This is released by the transaction when it closes. // This enforces only one writer transaction at a time. db.rwlock.Lock() // Once we have the writer lock then we can lock the meta pages so that // we can set up the transaction. db.metalock.Lock() defer db.metalock.Unlock() // Exit if the database is not open yet. if !db.opened { db.rwlock.Unlock() return nil, ErrDatabaseNotOpen } // Create a transaction associated with the database. t := &Tx{writable: true} t.init(db) db.rwtx = t // Free any pages associated with closed read-only transactions. var minid txid = 0xFFFFFFFFFFFFFFFF for _, t := range db.txs { if t.meta.txid < minid { minid = t.meta.txid } } if minid > 0 { db.freelist.release(minid - 1) } return t, nil } // removeTx removes a transaction from the database. func (db *DB) removeTx(tx *Tx) { // Release the read lock on the mmap. db.mmaplock.RUnlock() // Use the meta lock to restrict access to the DB object. db.metalock.Lock() // Remove the transaction. for i, t := range db.txs { if t == tx { db.txs = append(db.txs[:i], db.txs[i+1:]...) break } } n := len(db.txs) // Unlock the meta pages. db.metalock.Unlock() // Merge statistics. db.statlock.Lock() db.stats.OpenTxN = n db.stats.TxStats.add(&tx.stats) db.statlock.Unlock() } // Update executes a function within the context of a read-write managed transaction. // If no error is returned from the function then the transaction is committed. // If an error is returned then the entire transaction is rolled back. // Any error that is returned from the function or returned from the commit is // returned from the Update() method. // // Attempting to manually commit or rollback within the function will cause a panic. func (db *DB) Update(fn func(*Tx) error) error { t, err := db.Begin(true) if err != nil { return err } // Make sure the transaction rolls back in the event of a panic. defer func() { if t.db != nil { t.rollback() } }() // Mark as a managed tx so that the inner function cannot manually commit. t.managed = true // If an error is returned from the function then rollback and return error. err = fn(t) t.managed = false if err != nil { _ = t.Rollback() return err } return t.Commit() } // View executes a function within the context of a managed read-only transaction. // Any error that is returned from the function is returned from the View() method. // // Attempting to manually rollback within the function will cause a panic. func (db *DB) View(fn func(*Tx) error) error { t, err := db.Begin(false) if err != nil { return err } // Make sure the transaction rolls back in the event of a panic. defer func() { if t.db != nil { t.rollback() } }() // Mark as a managed tx so that the inner function cannot manually rollback. t.managed = true // If an error is returned from the function then pass it through. err = fn(t) t.managed = false if err != nil { _ = t.Rollback() return err } if err := t.Rollback(); err != nil { return err } return nil } // Sync executes fdatasync() against the database file handle. // // This is not necessary under normal operation, however, if you use NoSync // then it allows you to force the database file to sync against the disk. func (db *DB) Sync() error { return fdatasync(db) } // Stats retrieves ongoing performance stats for the database. // This is only updated when a transaction closes. func (db *DB) Stats() Stats { db.statlock.RLock() defer db.statlock.RUnlock() return db.stats } // This is for internal access to the raw data bytes from the C cursor, use // carefully, or not at all. func (db *DB) Info() *Info { return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} } // page retrieves a page reference from the mmap based on the current page size. func (db *DB) page(id pgid) *page { pos := id * pgid(db.pageSize) return (*page)(unsafe.Pointer(&db.data[pos])) } // pageInBuffer retrieves a page reference from a given byte array based on the current page size. func (db *DB) pageInBuffer(b []byte, id pgid) *page { return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) } // meta retrieves the current meta page reference. func (db *DB) meta() *meta { if db.meta0.txid > db.meta1.txid { return db.meta0 } return db.meta1 } // allocate returns a contiguous block of memory starting at a given page. func (db *DB) allocate(count int) (*page, error) { // Allocate a temporary buffer for the page. buf := make([]byte, count*db.pageSize) p := (*page)(unsafe.Pointer(&buf[0])) p.overflow = uint32(count - 1) // Use pages from the freelist if they are available. if p.id = db.freelist.allocate(count); p.id != 0 { return p, nil } // Resize mmap() if we're at the end. p.id = db.rwtx.meta.pgid var minsz = int((p.id+pgid(count))+1) * db.pageSize if minsz >= db.datasz { if err := db.mmap(minsz); err != nil { return nil, fmt.Errorf("mmap allocate error: %s", err) } } // Move the page id high water mark. db.rwtx.meta.pgid += pgid(count) return p, nil } func (db *DB) IsReadOnly() bool { return db.readOnly } // Options represents the options that can be set when opening a database. type Options struct { // Timeout is the amount of time to wait to obtain a file lock. // When set to zero it will wait indefinitely. This option is only // available on Darwin and Linux. Timeout time.Duration // Sets the DB.NoGrowSync flag before memory mapping the file. NoGrowSync bool // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to // grab a shared lock (UNIX). ReadOnly bool } // DefaultOptions represent the options used if nil options are passed into Open(). // No timeout is used which will cause Bolt to wait indefinitely for a lock. var DefaultOptions = &Options{ Timeout: 0, NoGrowSync: false, } // Stats represents statistics about the database. type Stats struct { // Freelist stats FreePageN int // total number of free pages on the freelist PendingPageN int // total number of pending pages on the freelist FreeAlloc int // total bytes allocated in free pages FreelistInuse int // total bytes used by the freelist // Transaction stats TxN int // total number of started read transactions OpenTxN int // number of currently open read transactions TxStats TxStats // global, ongoing stats. } // Sub calculates and returns the difference between two sets of database stats. // This is useful when obtaining stats at two different points and time and // you need the performance counters that occurred within that time span. func (s *Stats) Sub(other *Stats) Stats { if other == nil { return *s } var diff Stats diff.FreePageN = s.FreePageN diff.PendingPageN = s.PendingPageN diff.FreeAlloc = s.FreeAlloc diff.FreelistInuse = s.FreelistInuse diff.TxN = other.TxN - s.TxN diff.TxStats = s.TxStats.Sub(&other.TxStats) return diff } func (s *Stats) add(other *Stats) { s.TxStats.add(&other.TxStats) } type Info struct { Data uintptr PageSize int } type meta struct { magic uint32 version uint32 pageSize uint32 flags uint32 root bucket freelist pgid pgid pgid txid txid checksum uint64 } // validate checks the marker bytes and version of the meta page to ensure it matches this binary. func (m *meta) validate() error { if m.checksum != 0 && m.checksum != m.sum64() { return ErrChecksum } else if m.magic != magic { return ErrInvalid } else if m.version != version { return ErrVersionMismatch } return nil } // copy copies one meta object to another. func (m *meta) copy(dest *meta) { *dest = *m } // write writes the meta onto a page. func (m *meta) write(p *page) { if m.root.root >= m.pgid { panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) } else if m.freelist >= m.pgid { panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) } // Page id is either going to be 0 or 1 which we can determine by the transaction ID. p.id = pgid(m.txid % 2) p.flags |= metaPageFlag // Calculate the checksum. m.checksum = m.sum64() m.copy(p.meta()) } // generates the checksum for the meta. func (m *meta) sum64() uint64 { var h = fnv.New64a() _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) return h.Sum64() } // _assert will panic with a given formatted message if the given condition is false. func _assert(condition bool, msg string, v ...interface{}) { if !condition { panic(fmt.Sprintf("assertion failed: "+msg, v...)) } } func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } func printstack() { stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n") fmt.Fprintln(os.Stderr, stack) } docker-1.10.3/vendor/src/github.com/boltdb/bolt/doc.go000066400000000000000000000033651267010174400225010ustar00rootroot00000000000000/* Package bolt implements a low-level key/value store in pure Go. It supports fully serializable transactions, ACID semantics, and lock-free MVCC with multiple readers and a single writer. Bolt can be used for projects that want a simple data store without the need to add large dependencies such as Postgres or MySQL. Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is optimized for fast read access and does not require recovery in the event of a system crash. Transactions which have not finished committing will simply be rolled back in the event of a crash. The design of Bolt is based on Howard Chu's LMDB database project. Bolt currently works on Windows, Mac OS X, and Linux. Basics There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is a collection of buckets and is represented by a single file on disk. A bucket is a collection of unique keys that are associated with values. Transactions provide either read-only or read-write access to the database. Read-only transactions can retrieve key/value pairs and can use Cursors to iterate over the dataset sequentially. Read-write transactions can create and delete buckets and can insert and remove keys. Only one read-write transaction is allowed at a time. Caveats The database uses a read-only, memory-mapped data file to ensure that applications cannot corrupt the database, however, this means that keys and values returned from Bolt cannot be changed. Writing to a read-only byte slice will cause Go to panic. Keys and values retrieved from the database are only valid for the life of the transaction. When used outside the transaction, these byte slices can point to different data or can point to invalid memory which will cause a panic. */ package bolt docker-1.10.3/vendor/src/github.com/boltdb/bolt/errors.go000066400000000000000000000051741267010174400232500ustar00rootroot00000000000000package bolt import "errors" // These errors can be returned when opening or calling methods on a DB. var ( // ErrDatabaseNotOpen is returned when a DB instance is accessed before it // is opened or after it is closed. ErrDatabaseNotOpen = errors.New("database not open") // ErrDatabaseOpen is returned when opening a database that is // already open. ErrDatabaseOpen = errors.New("database already open") // ErrInvalid is returned when a data file is not a Bolt-formatted database. ErrInvalid = errors.New("invalid database") // ErrVersionMismatch is returned when the data file was created with a // different version of Bolt. ErrVersionMismatch = errors.New("version mismatch") // ErrChecksum is returned when either meta page checksum does not match. ErrChecksum = errors.New("checksum error") // ErrTimeout is returned when a database cannot obtain an exclusive lock // on the data file after the timeout passed to Open(). ErrTimeout = errors.New("timeout") ) // These errors can occur when beginning or committing a Tx. var ( // ErrTxNotWritable is returned when performing a write operation on a // read-only transaction. ErrTxNotWritable = errors.New("tx not writable") // ErrTxClosed is returned when committing or rolling back a transaction // that has already been committed or rolled back. ErrTxClosed = errors.New("tx closed") // ErrDatabaseReadOnly is returned when a mutating transaction is started on a // read-only database. ErrDatabaseReadOnly = errors.New("database is in read-only mode") ) // These errors can occur when putting or deleting a value or a bucket. var ( // ErrBucketNotFound is returned when trying to access a bucket that has // not been created yet. ErrBucketNotFound = errors.New("bucket not found") // ErrBucketExists is returned when creating a bucket that already exists. ErrBucketExists = errors.New("bucket already exists") // ErrBucketNameRequired is returned when creating a bucket with a blank name. ErrBucketNameRequired = errors.New("bucket name required") // ErrKeyRequired is returned when inserting a zero-length key. ErrKeyRequired = errors.New("key required") // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. ErrKeyTooLarge = errors.New("key too large") // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. ErrValueTooLarge = errors.New("value too large") // ErrIncompatibleValue is returned when trying create or delete a bucket // on an existing non-bucket key or when trying to create or delete a // non-bucket key on an existing bucket key. ErrIncompatibleValue = errors.New("incompatible value") ) docker-1.10.3/vendor/src/github.com/boltdb/bolt/freelist.go000066400000000000000000000143561267010174400235530ustar00rootroot00000000000000package bolt import ( "fmt" "sort" "unsafe" ) // freelist represents a list of all pages that are available for allocation. // It also tracks pages that have been freed but are still in use by open transactions. type freelist struct { ids []pgid // all free and available free page ids. pending map[txid][]pgid // mapping of soon-to-be free page ids by tx. cache map[pgid]bool // fast lookup of all free and pending page ids. } // newFreelist returns an empty, initialized freelist. func newFreelist() *freelist { return &freelist{ pending: make(map[txid][]pgid), cache: make(map[pgid]bool), } } // size returns the size of the page after serialization. func (f *freelist) size() int { return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count()) } // count returns count of pages on the freelist func (f *freelist) count() int { return f.free_count() + f.pending_count() } // free_count returns count of free pages func (f *freelist) free_count() int { return len(f.ids) } // pending_count returns count of pending pages func (f *freelist) pending_count() int { var count int for _, list := range f.pending { count += len(list) } return count } // all returns a list of all free ids and all pending ids in one sorted list. func (f *freelist) all() []pgid { m := make(pgids, 0) for _, list := range f.pending { m = append(m, list...) } sort.Sort(m) return pgids(f.ids).merge(m) } // allocate returns the starting page id of a contiguous list of pages of a given size. // If a contiguous block cannot be found then 0 is returned. func (f *freelist) allocate(n int) pgid { if len(f.ids) == 0 { return 0 } var initial, previd pgid for i, id := range f.ids { if id <= 1 { panic(fmt.Sprintf("invalid page allocation: %d", id)) } // Reset initial page if this is not contiguous. if previd == 0 || id-previd != 1 { initial = id } // If we found a contiguous block then remove it and return it. if (id-initial)+1 == pgid(n) { // If we're allocating off the beginning then take the fast path // and just adjust the existing slice. This will use extra memory // temporarily but the append() in free() will realloc the slice // as is necessary. if (i + 1) == n { f.ids = f.ids[i+1:] } else { copy(f.ids[i-n+1:], f.ids[i+1:]) f.ids = f.ids[:len(f.ids)-n] } // Remove from the free cache. for i := pgid(0); i < pgid(n); i++ { delete(f.cache, initial+i) } return initial } previd = id } return 0 } // free releases a page and its overflow for a given transaction id. // If the page is already free then a panic will occur. func (f *freelist) free(txid txid, p *page) { if p.id <= 1 { panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) } // Free page and all its overflow pages. var ids = f.pending[txid] for id := p.id; id <= p.id+pgid(p.overflow); id++ { // Verify that page is not already free. if f.cache[id] { panic(fmt.Sprintf("page %d already freed", id)) } // Add to the freelist and cache. ids = append(ids, id) f.cache[id] = true } f.pending[txid] = ids } // release moves all page ids for a transaction id (or older) to the freelist. func (f *freelist) release(txid txid) { m := make(pgids, 0) for tid, ids := range f.pending { if tid <= txid { // Move transaction's pending pages to the available freelist. // Don't remove from the cache since the page is still free. m = append(m, ids...) delete(f.pending, tid) } } sort.Sort(m) f.ids = pgids(f.ids).merge(m) } // rollback removes the pages from a given pending tx. func (f *freelist) rollback(txid txid) { // Remove page ids from cache. for _, id := range f.pending[txid] { delete(f.cache, id) } // Remove pages from pending list. delete(f.pending, txid) } // freed returns whether a given page is in the free list. func (f *freelist) freed(pgid pgid) bool { return f.cache[pgid] } // read initializes the freelist from a freelist page. func (f *freelist) read(p *page) { // If the page.count is at the max uint16 value (64k) then it's considered // an overflow and the size of the freelist is stored as the first element. idx, count := 0, int(p.count) if count == 0xFFFF { idx = 1 count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0]) } // Copy the list of page ids from the freelist. ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count] f.ids = make([]pgid, len(ids)) copy(f.ids, ids) // Make sure they're sorted. sort.Sort(pgids(f.ids)) // Rebuild the page cache. f.reindex() } // write writes the page ids onto a freelist page. All free and pending ids are // saved to disk since in the event of a program crash, all pending ids will // become free. func (f *freelist) write(p *page) error { // Combine the old free pgids and pgids waiting on an open transaction. ids := f.all() // Update the header flag. p.flags |= freelistPageFlag // The page.count can only hold up to 64k elements so if we overflow that // number then we handle it by putting the size in the first element. if len(ids) < 0xFFFF { p.count = uint16(len(ids)) copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids) } else { p.count = 0xFFFF ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids)) copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids) } return nil } // reload reads the freelist from a page and filters out pending items. func (f *freelist) reload(p *page) { f.read(p) // Build a cache of only pending pages. pcache := make(map[pgid]bool) for _, pendingIDs := range f.pending { for _, pendingID := range pendingIDs { pcache[pendingID] = true } } // Check each page in the freelist and build a new available freelist // with any pages not in the pending lists. var a []pgid for _, id := range f.ids { if !pcache[id] { a = append(a, id) } } f.ids = a // Once the available list is rebuilt then rebuild the free cache so that // it includes the available and pending free pages. f.reindex() } // reindex rebuilds the free cache based on available and pending free lists. func (f *freelist) reindex() { f.cache = make(map[pgid]bool) for _, id := range f.ids { f.cache[id] = true } for _, pendingIDs := range f.pending { for _, pendingID := range pendingIDs { f.cache[pendingID] = true } } } docker-1.10.3/vendor/src/github.com/boltdb/bolt/node.go000066400000000000000000000417201267010174400226560ustar00rootroot00000000000000package bolt import ( "bytes" "fmt" "sort" "unsafe" ) // node represents an in-memory, deserialized page. type node struct { bucket *Bucket isLeaf bool unbalanced bool spilled bool key []byte pgid pgid parent *node children nodes inodes inodes } // root returns the top-level node this node is attached to. func (n *node) root() *node { if n.parent == nil { return n } return n.parent.root() } // minKeys returns the minimum number of inodes this node should have. func (n *node) minKeys() int { if n.isLeaf { return 1 } return 2 } // size returns the size of the node after serialization. func (n *node) size() int { sz, elsz := pageHeaderSize, n.pageElementSize() for i := 0; i < len(n.inodes); i++ { item := &n.inodes[i] sz += elsz + len(item.key) + len(item.value) } return sz } // sizeLessThan returns true if the node is less than a given size. // This is an optimization to avoid calculating a large node when we only need // to know if it fits inside a certain page size. func (n *node) sizeLessThan(v int) bool { sz, elsz := pageHeaderSize, n.pageElementSize() for i := 0; i < len(n.inodes); i++ { item := &n.inodes[i] sz += elsz + len(item.key) + len(item.value) if sz >= v { return false } } return true } // pageElementSize returns the size of each page element based on the type of node. func (n *node) pageElementSize() int { if n.isLeaf { return leafPageElementSize } return branchPageElementSize } // childAt returns the child node at a given index. func (n *node) childAt(index int) *node { if n.isLeaf { panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) } return n.bucket.node(n.inodes[index].pgid, n) } // childIndex returns the index of a given child node. func (n *node) childIndex(child *node) int { index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) return index } // numChildren returns the number of children. func (n *node) numChildren() int { return len(n.inodes) } // nextSibling returns the next node with the same parent. func (n *node) nextSibling() *node { if n.parent == nil { return nil } index := n.parent.childIndex(n) if index >= n.parent.numChildren()-1 { return nil } return n.parent.childAt(index + 1) } // prevSibling returns the previous node with the same parent. func (n *node) prevSibling() *node { if n.parent == nil { return nil } index := n.parent.childIndex(n) if index == 0 { return nil } return n.parent.childAt(index - 1) } // put inserts a key/value. func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { if pgid >= n.bucket.tx.meta.pgid { panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) } else if len(oldKey) <= 0 { panic("put: zero-length old key") } else if len(newKey) <= 0 { panic("put: zero-length new key") } // Find insertion index. index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) // Add capacity and shift nodes if we don't have an exact match and need to insert. exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) if !exact { n.inodes = append(n.inodes, inode{}) copy(n.inodes[index+1:], n.inodes[index:]) } inode := &n.inodes[index] inode.flags = flags inode.key = newKey inode.value = value inode.pgid = pgid _assert(len(inode.key) > 0, "put: zero-length inode key") } // del removes a key from the node. func (n *node) del(key []byte) { // Find index of key. index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) // Exit if the key isn't found. if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { return } // Delete inode from the node. n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) // Mark the node as needing rebalancing. n.unbalanced = true } // read initializes the node from a page. func (n *node) read(p *page) { n.pgid = p.id n.isLeaf = ((p.flags & leafPageFlag) != 0) n.inodes = make(inodes, int(p.count)) for i := 0; i < int(p.count); i++ { inode := &n.inodes[i] if n.isLeaf { elem := p.leafPageElement(uint16(i)) inode.flags = elem.flags inode.key = elem.key() inode.value = elem.value() } else { elem := p.branchPageElement(uint16(i)) inode.pgid = elem.pgid inode.key = elem.key() } _assert(len(inode.key) > 0, "read: zero-length inode key") } // Save first key so we can find the node in the parent when we spill. if len(n.inodes) > 0 { n.key = n.inodes[0].key _assert(len(n.key) > 0, "read: zero-length node key") } else { n.key = nil } } // write writes the items onto one or more pages. func (n *node) write(p *page) { // Initialize page. if n.isLeaf { p.flags |= leafPageFlag } else { p.flags |= branchPageFlag } if len(n.inodes) >= 0xFFFF { panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) } p.count = uint16(len(n.inodes)) // Loop over each item and write it to the page. b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):] for i, item := range n.inodes { _assert(len(item.key) > 0, "write: zero-length inode key") // Write the page element. if n.isLeaf { elem := p.leafPageElement(uint16(i)) elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) elem.flags = item.flags elem.ksize = uint32(len(item.key)) elem.vsize = uint32(len(item.value)) } else { elem := p.branchPageElement(uint16(i)) elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) elem.ksize = uint32(len(item.key)) elem.pgid = item.pgid _assert(elem.pgid != p.id, "write: circular dependency occurred") } // If the length of key+value is larger than the max allocation size // then we need to reallocate the byte array pointer. // // See: https://github.com/boltdb/bolt/pull/335 klen, vlen := len(item.key), len(item.value) if len(b) < klen+vlen { b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:] } // Write data for the element to the end of the page. copy(b[0:], item.key) b = b[klen:] copy(b[0:], item.value) b = b[vlen:] } // DEBUG ONLY: n.dump() } // split breaks up a node into multiple smaller nodes, if appropriate. // This should only be called from the spill() function. func (n *node) split(pageSize int) []*node { var nodes []*node node := n for { // Split node into two. a, b := node.splitTwo(pageSize) nodes = append(nodes, a) // If we can't split then exit the loop. if b == nil { break } // Set node to b so it gets split on the next iteration. node = b } return nodes } // splitTwo breaks up a node into two smaller nodes, if appropriate. // This should only be called from the split() function. func (n *node) splitTwo(pageSize int) (*node, *node) { // Ignore the split if the page doesn't have at least enough nodes for // two pages or if the nodes can fit in a single page. if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { return n, nil } // Determine the threshold before starting a new node. var fillPercent = n.bucket.FillPercent if fillPercent < minFillPercent { fillPercent = minFillPercent } else if fillPercent > maxFillPercent { fillPercent = maxFillPercent } threshold := int(float64(pageSize) * fillPercent) // Determine split position and sizes of the two pages. splitIndex, _ := n.splitIndex(threshold) // Split node into two separate nodes. // If there's no parent then we'll need to create one. if n.parent == nil { n.parent = &node{bucket: n.bucket, children: []*node{n}} } // Create a new node and add it to the parent. next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} n.parent.children = append(n.parent.children, next) // Split inodes across two nodes. next.inodes = n.inodes[splitIndex:] n.inodes = n.inodes[:splitIndex] // Update the statistics. n.bucket.tx.stats.Split++ return n, next } // splitIndex finds the position where a page will fill a given threshold. // It returns the index as well as the size of the first page. // This is only be called from split(). func (n *node) splitIndex(threshold int) (index, sz int) { sz = pageHeaderSize // Loop until we only have the minimum number of keys required for the second page. for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { index = i inode := n.inodes[i] elsize := n.pageElementSize() + len(inode.key) + len(inode.value) // If we have at least the minimum number of keys and adding another // node would put us over the threshold then exit and return. if i >= minKeysPerPage && sz+elsize > threshold { break } // Add the element size to the total size. sz += elsize } return } // spill writes the nodes to dirty pages and splits nodes as it goes. // Returns an error if dirty pages cannot be allocated. func (n *node) spill() error { var tx = n.bucket.tx if n.spilled { return nil } // Spill child nodes first. Child nodes can materialize sibling nodes in // the case of split-merge so we cannot use a range loop. We have to check // the children size on every loop iteration. sort.Sort(n.children) for i := 0; i < len(n.children); i++ { if err := n.children[i].spill(); err != nil { return err } } // We no longer need the child list because it's only used for spill tracking. n.children = nil // Split nodes into appropriate sizes. The first node will always be n. var nodes = n.split(tx.db.pageSize) for _, node := range nodes { // Add node's page to the freelist if it's not new. if node.pgid > 0 { tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) node.pgid = 0 } // Allocate contiguous space for the node. p, err := tx.allocate((node.size() / tx.db.pageSize) + 1) if err != nil { return err } // Write the node. if p.id >= tx.meta.pgid { panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) } node.pgid = p.id node.write(p) node.spilled = true // Insert into parent inodes. if node.parent != nil { var key = node.key if key == nil { key = node.inodes[0].key } node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) node.key = node.inodes[0].key _assert(len(node.key) > 0, "spill: zero-length node key") } // Update the statistics. tx.stats.Spill++ } // If the root node split and created a new root then we need to spill that // as well. We'll clear out the children to make sure it doesn't try to respill. if n.parent != nil && n.parent.pgid == 0 { n.children = nil return n.parent.spill() } return nil } // rebalance attempts to combine the node with sibling nodes if the node fill // size is below a threshold or if there are not enough keys. func (n *node) rebalance() { if !n.unbalanced { return } n.unbalanced = false // Update statistics. n.bucket.tx.stats.Rebalance++ // Ignore if node is above threshold (25%) and has enough keys. var threshold = n.bucket.tx.db.pageSize / 4 if n.size() > threshold && len(n.inodes) > n.minKeys() { return } // Root node has special handling. if n.parent == nil { // If root node is a branch and only has one node then collapse it. if !n.isLeaf && len(n.inodes) == 1 { // Move root's child up. child := n.bucket.node(n.inodes[0].pgid, n) n.isLeaf = child.isLeaf n.inodes = child.inodes[:] n.children = child.children // Reparent all child nodes being moved. for _, inode := range n.inodes { if child, ok := n.bucket.nodes[inode.pgid]; ok { child.parent = n } } // Remove old child. child.parent = nil delete(n.bucket.nodes, child.pgid) child.free() } return } // If node has no keys then just remove it. if n.numChildren() == 0 { n.parent.del(n.key) n.parent.removeChild(n) delete(n.bucket.nodes, n.pgid) n.free() n.parent.rebalance() return } _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") // Destination node is right sibling if idx == 0, otherwise left sibling. var target *node var useNextSibling = (n.parent.childIndex(n) == 0) if useNextSibling { target = n.nextSibling() } else { target = n.prevSibling() } // If target node has extra nodes then just move one over. if target.numChildren() > target.minKeys() { if useNextSibling { // Reparent and move node. if child, ok := n.bucket.nodes[target.inodes[0].pgid]; ok { child.parent.removeChild(child) child.parent = n child.parent.children = append(child.parent.children, child) } n.inodes = append(n.inodes, target.inodes[0]) target.inodes = target.inodes[1:] // Update target key on parent. target.parent.put(target.key, target.inodes[0].key, nil, target.pgid, 0) target.key = target.inodes[0].key _assert(len(target.key) > 0, "rebalance(1): zero-length node key") } else { // Reparent and move node. if child, ok := n.bucket.nodes[target.inodes[len(target.inodes)-1].pgid]; ok { child.parent.removeChild(child) child.parent = n child.parent.children = append(child.parent.children, child) } n.inodes = append(n.inodes, inode{}) copy(n.inodes[1:], n.inodes) n.inodes[0] = target.inodes[len(target.inodes)-1] target.inodes = target.inodes[:len(target.inodes)-1] } // Update parent key for node. n.parent.put(n.key, n.inodes[0].key, nil, n.pgid, 0) n.key = n.inodes[0].key _assert(len(n.key) > 0, "rebalance(2): zero-length node key") return } // If both this node and the target node are too small then merge them. if useNextSibling { // Reparent all child nodes being moved. for _, inode := range target.inodes { if child, ok := n.bucket.nodes[inode.pgid]; ok { child.parent.removeChild(child) child.parent = n child.parent.children = append(child.parent.children, child) } } // Copy over inodes from target and remove target. n.inodes = append(n.inodes, target.inodes...) n.parent.del(target.key) n.parent.removeChild(target) delete(n.bucket.nodes, target.pgid) target.free() } else { // Reparent all child nodes being moved. for _, inode := range n.inodes { if child, ok := n.bucket.nodes[inode.pgid]; ok { child.parent.removeChild(child) child.parent = target child.parent.children = append(child.parent.children, child) } } // Copy over inodes to target and remove node. target.inodes = append(target.inodes, n.inodes...) n.parent.del(n.key) n.parent.removeChild(n) delete(n.bucket.nodes, n.pgid) n.free() } // Either this node or the target node was deleted from the parent so rebalance it. n.parent.rebalance() } // removes a node from the list of in-memory children. // This does not affect the inodes. func (n *node) removeChild(target *node) { for i, child := range n.children { if child == target { n.children = append(n.children[:i], n.children[i+1:]...) return } } } // dereference causes the node to copy all its inode key/value references to heap memory. // This is required when the mmap is reallocated so inodes are not pointing to stale data. func (n *node) dereference() { if n.key != nil { key := make([]byte, len(n.key)) copy(key, n.key) n.key = key _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") } for i := range n.inodes { inode := &n.inodes[i] key := make([]byte, len(inode.key)) copy(key, inode.key) inode.key = key _assert(len(inode.key) > 0, "dereference: zero-length inode key") value := make([]byte, len(inode.value)) copy(value, inode.value) inode.value = value } // Recursively dereference children. for _, child := range n.children { child.dereference() } // Update statistics. n.bucket.tx.stats.NodeDeref++ } // free adds the node's underlying page to the freelist. func (n *node) free() { if n.pgid != 0 { n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) n.pgid = 0 } } // dump writes the contents of the node to STDERR for debugging purposes. /* func (n *node) dump() { // Write node header. var typ = "branch" if n.isLeaf { typ = "leaf" } warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) // Write out abbreviated version of each item. for _, item := range n.inodes { if n.isLeaf { if item.flags&bucketLeafFlag != 0 { bucket := (*bucket)(unsafe.Pointer(&item.value[0])) warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) } else { warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) } } else { warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) } } warn("") } */ type nodes []*node func (s nodes) Len() int { return len(s) } func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 } // inode represents an internal node inside of a node. // It can be used to point to elements in a page or point // to an element which hasn't been added to a page yet. type inode struct { flags uint32 pgid pgid key []byte value []byte } type inodes []inode docker-1.10.3/vendor/src/github.com/boltdb/bolt/page.go000066400000000000000000000104511267010174400226420ustar00rootroot00000000000000package bolt import ( "fmt" "os" "sort" "unsafe" ) const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr)) const minKeysPerPage = 2 const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{})) const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{})) const ( branchPageFlag = 0x01 leafPageFlag = 0x02 metaPageFlag = 0x04 freelistPageFlag = 0x10 ) const ( bucketLeafFlag = 0x01 ) type pgid uint64 type page struct { id pgid flags uint16 count uint16 overflow uint32 ptr uintptr } // typ returns a human readable page type string used for debugging. func (p *page) typ() string { if (p.flags & branchPageFlag) != 0 { return "branch" } else if (p.flags & leafPageFlag) != 0 { return "leaf" } else if (p.flags & metaPageFlag) != 0 { return "meta" } else if (p.flags & freelistPageFlag) != 0 { return "freelist" } return fmt.Sprintf("unknown<%02x>", p.flags) } // meta returns a pointer to the metadata section of the page. func (p *page) meta() *meta { return (*meta)(unsafe.Pointer(&p.ptr)) } // leafPageElement retrieves the leaf node by index func (p *page) leafPageElement(index uint16) *leafPageElement { n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] return n } // leafPageElements retrieves a list of leaf nodes. func (p *page) leafPageElements() []leafPageElement { return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:] } // branchPageElement retrieves the branch node by index func (p *page) branchPageElement(index uint16) *branchPageElement { return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] } // branchPageElements retrieves a list of branch nodes. func (p *page) branchPageElements() []branchPageElement { return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:] } // dump writes n bytes of the page to STDERR as hex output. func (p *page) hexdump(n int) { buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n] fmt.Fprintf(os.Stderr, "%x\n", buf) } type pages []*page func (s pages) Len() int { return len(s) } func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } // branchPageElement represents a node on a branch page. type branchPageElement struct { pos uint32 ksize uint32 pgid pgid } // key returns a byte slice of the node key. func (n *branchPageElement) key() []byte { buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] } // leafPageElement represents a node on a leaf page. type leafPageElement struct { flags uint32 pos uint32 ksize uint32 vsize uint32 } // key returns a byte slice of the node key. func (n *leafPageElement) key() []byte { buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] } // value returns a byte slice of the node value. func (n *leafPageElement) value() []byte { buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize] } // PageInfo represents human readable information about a page. type PageInfo struct { ID int Type string Count int OverflowCount int } type pgids []pgid func (s pgids) Len() int { return len(s) } func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s pgids) Less(i, j int) bool { return s[i] < s[j] } // merge returns the sorted union of a and b. func (a pgids) merge(b pgids) pgids { // Return the opposite slice if one is nil. if len(a) == 0 { return b } else if len(b) == 0 { return a } // Create a list to hold all elements from both lists. merged := make(pgids, 0, len(a)+len(b)) // Assign lead to the slice with a lower starting value, follow to the higher value. lead, follow := a, b if b[0] < a[0] { lead, follow = b, a } // Continue while there are elements in the lead. for len(lead) > 0 { // Merge largest prefix of lead that is ahead of follow[0]. n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) merged = append(merged, lead[:n]...) if n >= len(lead) { break } // Swap lead and follow. lead, follow = follow, lead[n:] } // Append what's left in follow. merged = append(merged, follow...) return merged } docker-1.10.3/vendor/src/github.com/boltdb/bolt/tx.go000066400000000000000000000410511267010174400223610ustar00rootroot00000000000000package bolt import ( "fmt" "io" "os" "sort" "time" "unsafe" ) // txid represents the internal transaction identifier. type txid uint64 // Tx represents a read-only or read/write transaction on the database. // Read-only transactions can be used for retrieving values for keys and creating cursors. // Read/write transactions can create and remove buckets and create and remove keys. // // IMPORTANT: You must commit or rollback transactions when you are done with // them. Pages can not be reclaimed by the writer until no more transactions // are using them. A long running read transaction can cause the database to // quickly grow. type Tx struct { writable bool managed bool db *DB meta *meta root Bucket pages map[pgid]*page stats TxStats commitHandlers []func() } // init initializes the transaction. func (tx *Tx) init(db *DB) { tx.db = db tx.pages = nil // Copy the meta page since it can be changed by the writer. tx.meta = &meta{} db.meta().copy(tx.meta) // Copy over the root bucket. tx.root = newBucket(tx) tx.root.bucket = &bucket{} *tx.root.bucket = tx.meta.root // Increment the transaction id and add a page cache for writable transactions. if tx.writable { tx.pages = make(map[pgid]*page) tx.meta.txid += txid(1) } } // ID returns the transaction id. func (tx *Tx) ID() int { return int(tx.meta.txid) } // DB returns a reference to the database that created the transaction. func (tx *Tx) DB() *DB { return tx.db } // Size returns current database size in bytes as seen by this transaction. func (tx *Tx) Size() int64 { return int64(tx.meta.pgid) * int64(tx.db.pageSize) } // Writable returns whether the transaction can perform write operations. func (tx *Tx) Writable() bool { return tx.writable } // Cursor creates a cursor associated with the root bucket. // All items in the cursor will return a nil value because all root bucket keys point to buckets. // The cursor is only valid as long as the transaction is open. // Do not use a cursor after the transaction is closed. func (tx *Tx) Cursor() *Cursor { return tx.root.Cursor() } // Stats retrieves a copy of the current transaction statistics. func (tx *Tx) Stats() TxStats { return tx.stats } // Bucket retrieves a bucket by name. // Returns nil if the bucket does not exist. // The bucket instance is only valid for the lifetime of the transaction. func (tx *Tx) Bucket(name []byte) *Bucket { return tx.root.Bucket(name) } // CreateBucket creates a new bucket. // Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. // The bucket instance is only valid for the lifetime of the transaction. func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { return tx.root.CreateBucket(name) } // CreateBucketIfNotExists creates a new bucket if it doesn't already exist. // Returns an error if the bucket name is blank, or if the bucket name is too long. // The bucket instance is only valid for the lifetime of the transaction. func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { return tx.root.CreateBucketIfNotExists(name) } // DeleteBucket deletes a bucket. // Returns an error if the bucket cannot be found or if the key represents a non-bucket value. func (tx *Tx) DeleteBucket(name []byte) error { return tx.root.DeleteBucket(name) } // ForEach executes a function for each bucket in the root. // If the provided function returns an error then the iteration is stopped and // the error is returned to the caller. func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { return tx.root.ForEach(func(k, v []byte) error { if err := fn(k, tx.root.Bucket(k)); err != nil { return err } return nil }) } // OnCommit adds a handler function to be executed after the transaction successfully commits. func (tx *Tx) OnCommit(fn func()) { tx.commitHandlers = append(tx.commitHandlers, fn) } // Commit writes all changes to disk and updates the meta page. // Returns an error if a disk write error occurs, or if Commit is // called on a read-only transaction. func (tx *Tx) Commit() error { _assert(!tx.managed, "managed tx commit not allowed") if tx.db == nil { return ErrTxClosed } else if !tx.writable { return ErrTxNotWritable } // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. // Rebalance nodes which have had deletions. var startTime = time.Now() tx.root.rebalance() if tx.stats.Rebalance > 0 { tx.stats.RebalanceTime += time.Since(startTime) } // spill data onto dirty pages. startTime = time.Now() if err := tx.root.spill(); err != nil { tx.rollback() return err } tx.stats.SpillTime += time.Since(startTime) // Free the old root bucket. tx.meta.root.root = tx.root.root // Free the freelist and allocate new pages for it. This will overestimate // the size of the freelist but not underestimate the size (which would be bad). tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) if err != nil { tx.rollback() return err } if err := tx.db.freelist.write(p); err != nil { tx.rollback() return err } tx.meta.freelist = p.id // Write dirty pages to disk. startTime = time.Now() if err := tx.write(); err != nil { tx.rollback() return err } // If strict mode is enabled then perform a consistency check. // Only the first consistency error is reported in the panic. if tx.db.StrictMode { if err, ok := <-tx.Check(); ok { panic("check fail: " + err.Error()) } } // Write meta to disk. if err := tx.writeMeta(); err != nil { tx.rollback() return err } tx.stats.WriteTime += time.Since(startTime) // Finalize the transaction. tx.close() // Execute commit handlers now that the locks have been removed. for _, fn := range tx.commitHandlers { fn() } return nil } // Rollback closes the transaction and ignores all previous updates. Read-only // transactions must be rolled back and not committed. func (tx *Tx) Rollback() error { _assert(!tx.managed, "managed tx rollback not allowed") if tx.db == nil { return ErrTxClosed } tx.rollback() return nil } func (tx *Tx) rollback() { if tx.db == nil { return } if tx.writable { tx.db.freelist.rollback(tx.meta.txid) tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) } tx.close() } func (tx *Tx) close() { if tx.db == nil { return } if tx.writable { // Grab freelist stats. var freelistFreeN = tx.db.freelist.free_count() var freelistPendingN = tx.db.freelist.pending_count() var freelistAlloc = tx.db.freelist.size() // Remove transaction ref & writer lock. tx.db.rwtx = nil tx.db.rwlock.Unlock() // Merge statistics. tx.db.statlock.Lock() tx.db.stats.FreePageN = freelistFreeN tx.db.stats.PendingPageN = freelistPendingN tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize tx.db.stats.FreelistInuse = freelistAlloc tx.db.stats.TxStats.add(&tx.stats) tx.db.statlock.Unlock() } else { tx.db.removeTx(tx) } // Clear all references. tx.db = nil tx.meta = nil tx.root = Bucket{tx: tx} tx.pages = nil } // Copy writes the entire database to a writer. // This function exists for backwards compatibility. Use WriteTo() in func (tx *Tx) Copy(w io.Writer) error { _, err := tx.WriteTo(w) return err } // WriteTo writes the entire database to a writer. // If err == nil then exactly tx.Size() bytes will be written into the writer. func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { // Attempt to open reader directly. var f *os.File if f, err = os.OpenFile(tx.db.path, os.O_RDONLY|odirect, 0); err != nil { // Fallback to a regular open if that doesn't work. if f, err = os.OpenFile(tx.db.path, os.O_RDONLY, 0); err != nil { return 0, err } } // Copy the meta pages. tx.db.metalock.Lock() n, err = io.CopyN(w, f, int64(tx.db.pageSize*2)) tx.db.metalock.Unlock() if err != nil { _ = f.Close() return n, fmt.Errorf("meta copy: %s", err) } // Copy data pages. wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) n += wn if err != nil { _ = f.Close() return n, err } return n, f.Close() } // CopyFile copies the entire database to file at the given path. // A reader transaction is maintained during the copy so it is safe to continue // using the database while a copy is in progress. func (tx *Tx) CopyFile(path string, mode os.FileMode) error { f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) if err != nil { return err } err = tx.Copy(f) if err != nil { _ = f.Close() return err } return f.Close() } // Check performs several consistency checks on the database for this transaction. // An error is returned if any inconsistency is found. // // It can be safely run concurrently on a writable transaction. However, this // incurs a high cost for large databases and databases with a lot of subbuckets // because of caching. This overhead can be removed if running on a read-only // transaction, however, it is not safe to execute other writer transactions at // the same time. func (tx *Tx) Check() <-chan error { ch := make(chan error) go tx.check(ch) return ch } func (tx *Tx) check(ch chan error) { // Check if any pages are double freed. freed := make(map[pgid]bool) for _, id := range tx.db.freelist.all() { if freed[id] { ch <- fmt.Errorf("page %d: already freed", id) } freed[id] = true } // Track every reachable page. reachable := make(map[pgid]*page) reachable[0] = tx.page(0) // meta0 reachable[1] = tx.page(1) // meta1 for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) } // Recursively check buckets. tx.checkBucket(&tx.root, reachable, freed, ch) // Ensure all pages below high water mark are either reachable or freed. for i := pgid(0); i < tx.meta.pgid; i++ { _, isReachable := reachable[i] if !isReachable && !freed[i] { ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) } } // Close the channel to signal completion. close(ch) } func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) { // Ignore inline buckets. if b.root == 0 { return } // Check every page used by this bucket. b.tx.forEachPage(b.root, 0, func(p *page, _ int) { if p.id > tx.meta.pgid { ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)) } // Ensure each page is only referenced once. for i := pgid(0); i <= pgid(p.overflow); i++ { var id = p.id + i if _, ok := reachable[id]; ok { ch <- fmt.Errorf("page %d: multiple references", int(id)) } reachable[id] = p } // We should only encounter un-freed leaf and branch pages. if freed[p.id] { ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ()) } }) // Check each bucket within this bucket. _ = b.ForEach(func(k, v []byte) error { if child := b.Bucket(k); child != nil { tx.checkBucket(child, reachable, freed, ch) } return nil }) } // allocate returns a contiguous block of memory starting at a given page. func (tx *Tx) allocate(count int) (*page, error) { p, err := tx.db.allocate(count) if err != nil { return nil, err } // Save to our page cache. tx.pages[p.id] = p // Update statistics. tx.stats.PageCount++ tx.stats.PageAlloc += count * tx.db.pageSize return p, nil } // write writes any dirty pages to disk. func (tx *Tx) write() error { // Sort pages by id. pages := make(pages, 0, len(tx.pages)) for _, p := range tx.pages { pages = append(pages, p) } sort.Sort(pages) // Write pages to disk in order. for _, p := range pages { size := (int(p.overflow) + 1) * tx.db.pageSize offset := int64(p.id) * int64(tx.db.pageSize) // Write out page in "max allocation" sized chunks. ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p)) for { // Limit our write to our max allocation size. sz := size if sz > maxAllocSize-1 { sz = maxAllocSize - 1 } // Write chunk to disk. buf := ptr[:sz] if _, err := tx.db.ops.writeAt(buf, offset); err != nil { return err } // Update statistics. tx.stats.Write++ // Exit inner for loop if we've written all the chunks. size -= sz if size == 0 { break } // Otherwise move offset forward and move pointer to next chunk. offset += int64(sz) ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz])) } } // Ignore file sync if flag is set on DB. if !tx.db.NoSync || IgnoreNoSync { if err := fdatasync(tx.db); err != nil { return err } } // Clear out page cache. tx.pages = make(map[pgid]*page) return nil } // writeMeta writes the meta to the disk. func (tx *Tx) writeMeta() error { // Create a temporary buffer for the meta page. buf := make([]byte, tx.db.pageSize) p := tx.db.pageInBuffer(buf, 0) tx.meta.write(p) // Write the meta page to file. if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { return err } if !tx.db.NoSync || IgnoreNoSync { if err := fdatasync(tx.db); err != nil { return err } } // Update statistics. tx.stats.Write++ return nil } // page returns a reference to the page with a given id. // If page has been written to then a temporary bufferred page is returned. func (tx *Tx) page(id pgid) *page { // Check the dirty pages first. if tx.pages != nil { if p, ok := tx.pages[id]; ok { return p } } // Otherwise return directly from the mmap. return tx.db.page(id) } // forEachPage iterates over every page within a given page and executes a function. func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) { p := tx.page(pgid) // Execute function. fn(p, depth) // Recursively loop over children. if (p.flags & branchPageFlag) != 0 { for i := 0; i < int(p.count); i++ { elem := p.branchPageElement(uint16(i)) tx.forEachPage(elem.pgid, depth+1, fn) } } } // Page returns page information for a given page number. // This is only safe for concurrent use when used by a writable transaction. func (tx *Tx) Page(id int) (*PageInfo, error) { if tx.db == nil { return nil, ErrTxClosed } else if pgid(id) >= tx.meta.pgid { return nil, nil } // Build the page info. p := tx.db.page(pgid(id)) info := &PageInfo{ ID: id, Count: int(p.count), OverflowCount: int(p.overflow), } // Determine the type (or if it's free). if tx.db.freelist.freed(pgid(id)) { info.Type = "free" } else { info.Type = p.typ() } return info, nil } // TxStats represents statistics about the actions performed by the transaction. type TxStats struct { // Page statistics. PageCount int // number of page allocations PageAlloc int // total bytes allocated // Cursor statistics. CursorCount int // number of cursors created // Node statistics NodeCount int // number of node allocations NodeDeref int // number of node dereferences // Rebalance statistics. Rebalance int // number of node rebalances RebalanceTime time.Duration // total time spent rebalancing // Split/Spill statistics. Split int // number of nodes split Spill int // number of nodes spilled SpillTime time.Duration // total time spent spilling // Write statistics. Write int // number of writes performed WriteTime time.Duration // total time spent writing to disk } func (s *TxStats) add(other *TxStats) { s.PageCount += other.PageCount s.PageAlloc += other.PageAlloc s.CursorCount += other.CursorCount s.NodeCount += other.NodeCount s.NodeDeref += other.NodeDeref s.Rebalance += other.Rebalance s.RebalanceTime += other.RebalanceTime s.Split += other.Split s.Spill += other.Spill s.SpillTime += other.SpillTime s.Write += other.Write s.WriteTime += other.WriteTime } // Sub calculates and returns the difference between two sets of transaction stats. // This is useful when obtaining stats at two different points and time and // you need the performance counters that occurred within that time span. func (s *TxStats) Sub(other *TxStats) TxStats { var diff TxStats diff.PageCount = s.PageCount - other.PageCount diff.PageAlloc = s.PageAlloc - other.PageAlloc diff.CursorCount = s.CursorCount - other.CursorCount diff.NodeCount = s.NodeCount - other.NodeCount diff.NodeDeref = s.NodeDeref - other.NodeDeref diff.Rebalance = s.Rebalance - other.Rebalance diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime diff.Split = s.Split - other.Split diff.Spill = s.Spill - other.Spill diff.SpillTime = s.SpillTime - other.SpillTime diff.Write = s.Write - other.Write diff.WriteTime = s.WriteTime - other.WriteTime return diff } docker-1.10.3/vendor/src/github.com/coreos/000077500000000000000000000000001267010174400204625ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/coreos/etcd/000077500000000000000000000000001267010174400214015ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/coreos/etcd/LICENSE000066400000000000000000000261361267010174400224160ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. docker-1.10.3/vendor/src/github.com/coreos/etcd/client/000077500000000000000000000000001267010174400226575ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/coreos/etcd/client/README.md000066400000000000000000000075731267010174400241520ustar00rootroot00000000000000# etcd/client etcd/client is the Go client library for etcd. [![GoDoc](https://godoc.org/github.com/coreos/etcd/client?status.png)](https://godoc.org/github.com/coreos/etcd/client) ## Install ```bash go get github.com/coreos/etcd/client ``` ## Usage ```go package main import ( "log" "time" "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context" "github.com/coreos/etcd/client" ) func main() { cfg := client.Config{ Endpoints: []string{"http://127.0.0.1:2379"}, Transport: client.DefaultTransport, // set timeout per request to fail fast when the target endpoint is unavailable HeaderTimeoutPerRequest: time.Second, } c, err := client.New(cfg) if err != nil { log.Fatal(err) } kapi := client.NewKeysAPI(c) resp, err := kapi.Set(context.Background(), "foo", "bar", nil) if err != nil { log.Fatal(err) } } ``` ## Error Handling etcd client might return three types of errors. - context error Each API call has its first parameter as `context`. A context can be canceled or have an attached deadline. If the context is canceled or reaches its deadline, the responding context error will be returned no matter what internal errors the API call has already encountered. - cluster error Each API call tries to send request to the cluster endpoints one by one until it successfully gets a response. If a requests to an endpoint fails, due to exceeding per request timeout or connection issues, the error will be added into a list of errors. If all possible endpoints fail, a cluster error that includes all encountered errors will be returned. - response error If the response gets from the cluster is invalid, a plain string error will be returned. For example, it might be a invalid JSON error. Here is the example code to handle client errors: ```go cfg := client.Config{Endpoints: []string{"http://etcd1:2379,http://etcd2:2379,http://etcd3:2379"}} c, err := client.New(cfg) if err != nil { log.Fatal(err) } kapi := client.NewKeysAPI(c) resp, err := kapi.Set(ctx, "test", "bar", nil) if err != nil { if err == context.Canceled { // ctx is canceled by another routine } else if err == context.DeadlineExceeded { // ctx is attached with a deadline and it exceeded } else if cerr, ok := err.(*client.ClusterError); ok { // process (cerr.Errors) } else { // bad cluster endpoints, which are not etcd servers } } ``` ## Caveat 1. etcd/client prefers to use the same endpoint as long as the endpoint continues to work well. This saves socket resources, and improves efficiency for both client and server side. This preference doesn't remove consistency from the data consumed by the client because data replicated to each etcd member has already passed through the consensus process. 2. etcd/client does round-robin rotation on other available endpoints if the preferred endpoint isn't functioning properly. For example, if the member that etcd/client connects to is hard killed, etcd/client will fail on the first attempt with the killed member, and succeed on the second attempt with another member. If it fails to talk to all available endpoints, it will return all errors happened. 3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention. 4. etcd/client cannot detect whether the member in use is healthy when doing read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. As a workaround, users could monitor experimental /health endpoint for member healthy information. We are improving it at [#3265](https://github.com/coreos/etcd/issues/3265). docker-1.10.3/vendor/src/github.com/coreos/etcd/client/auth_role.go000066400000000000000000000131211267010174400251660ustar00rootroot00000000000000// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package client import ( "bytes" "encoding/json" "net/http" "net/url" "golang.org/x/net/context" ) type Role struct { Role string `json:"role"` Permissions Permissions `json:"permissions"` Grant *Permissions `json:"grant,omitempty"` Revoke *Permissions `json:"revoke,omitempty"` } type Permissions struct { KV rwPermission `json:"kv"` } type rwPermission struct { Read []string `json:"read"` Write []string `json:"write"` } type PermissionType int const ( ReadPermission PermissionType = iota WritePermission ReadWritePermission ) // NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to // interact with etcd's role creation and modification features. func NewAuthRoleAPI(c Client) AuthRoleAPI { return &httpAuthRoleAPI{ client: c, } } type AuthRoleAPI interface { // Add a role. AddRole(ctx context.Context, role string) error // Remove a role. RemoveRole(ctx context.Context, role string) error // Get role details. GetRole(ctx context.Context, role string) (*Role, error) // Grant a role some permission prefixes for the KV store. GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) // Revoke some some permission prefixes for a role on the KV store. RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) // List roles. ListRoles(ctx context.Context) ([]string, error) } type httpAuthRoleAPI struct { client httpClient } type authRoleAPIAction struct { verb string name string role *Role } type authRoleAPIList struct{} func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request { u := v2AuthURL(ep, "roles", "") req, _ := http.NewRequest("GET", u.String(), nil) req.Header.Set("Content-Type", "application/json") return req } func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request { u := v2AuthURL(ep, "roles", l.name) if l.role == nil { req, _ := http.NewRequest(l.verb, u.String(), nil) return req } b, err := json.Marshal(l.role) if err != nil { panic(err) } body := bytes.NewReader(b) req, _ := http.NewRequest(l.verb, u.String(), body) req.Header.Set("Content-Type", "application/json") return req } func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) { resp, body, err := r.client.Do(ctx, &authRoleAPIList{}) if err != nil { return nil, err } if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { return nil, err } var userList struct { Roles []string `json:"roles"` } err = json.Unmarshal(body, &userList) if err != nil { return nil, err } return userList.Roles, nil } func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error { role := &Role{ Role: rolename, } return r.addRemoveRole(ctx, &authRoleAPIAction{ verb: "PUT", name: rolename, role: role, }) } func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error { return r.addRemoveRole(ctx, &authRoleAPIAction{ verb: "DELETE", name: rolename, }) } func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error { resp, body, err := r.client.Do(ctx, req) if err != nil { return err } if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { var sec authError err := json.Unmarshal(body, &sec) if err != nil { return err } return sec } return nil } func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) { return r.modRole(ctx, &authRoleAPIAction{ verb: "GET", name: rolename, }) } func buildRWPermission(prefixes []string, permType PermissionType) rwPermission { var out rwPermission switch permType { case ReadPermission: out.Read = prefixes case WritePermission: out.Write = prefixes case ReadWritePermission: out.Read = prefixes out.Write = prefixes } return out } func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { rwp := buildRWPermission(prefixes, permType) role := &Role{ Role: rolename, Grant: &Permissions{ KV: rwp, }, } return r.modRole(ctx, &authRoleAPIAction{ verb: "PUT", name: rolename, role: role, }) } func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { rwp := buildRWPermission(prefixes, permType) role := &Role{ Role: rolename, Revoke: &Permissions{ KV: rwp, }, } return r.modRole(ctx, &authRoleAPIAction{ verb: "PUT", name: rolename, role: role, }) } func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) { resp, body, err := r.client.Do(ctx, req) if err != nil { return nil, err } if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { var sec authError err := json.Unmarshal(body, &sec) if err != nil { return nil, err } return nil, sec } var role Role err = json.Unmarshal(body, &role) if err != nil { return nil, err } return &role, nil } docker-1.10.3/vendor/src/github.com/coreos/etcd/client/auth_user.go000066400000000000000000000157211267010174400252130ustar00rootroot00000000000000// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package client import ( "bytes" "encoding/json" "net/http" "net/url" "path" "golang.org/x/net/context" ) var ( defaultV2AuthPrefix = "/v2/auth" ) type User struct { User string `json:"user"` Password string `json:"password,omitempty"` Roles []string `json:"roles"` Grant []string `json:"grant,omitempty"` Revoke []string `json:"revoke,omitempty"` } func v2AuthURL(ep url.URL, action string, name string) *url.URL { if name != "" { ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name) return &ep } ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action) return &ep } // NewAuthAPI constructs a new AuthAPI that uses HTTP to // interact with etcd's general auth features. func NewAuthAPI(c Client) AuthAPI { return &httpAuthAPI{ client: c, } } type AuthAPI interface { // Enable auth. Enable(ctx context.Context) error // Disable auth. Disable(ctx context.Context) error } type httpAuthAPI struct { client httpClient } func (s *httpAuthAPI) Enable(ctx context.Context) error { return s.enableDisable(ctx, &authAPIAction{"PUT"}) } func (s *httpAuthAPI) Disable(ctx context.Context) error { return s.enableDisable(ctx, &authAPIAction{"DELETE"}) } func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error { resp, body, err := s.client.Do(ctx, req) if err != nil { return err } if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { var sec authError err := json.Unmarshal(body, &sec) if err != nil { return err } return sec } return nil } type authAPIAction struct { verb string } func (l *authAPIAction) HTTPRequest(ep url.URL) *http.Request { u := v2AuthURL(ep, "enable", "") req, _ := http.NewRequest(l.verb, u.String(), nil) return req } type authError struct { Message string `json:"message"` Code int `json:"-"` } func (e authError) Error() string { return e.Message } // NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to // interact with etcd's user creation and modification features. func NewAuthUserAPI(c Client) AuthUserAPI { return &httpAuthUserAPI{ client: c, } } type AuthUserAPI interface { // Add a user. AddUser(ctx context.Context, username string, password string) error // Remove a user. RemoveUser(ctx context.Context, username string) error // Get user details. GetUser(ctx context.Context, username string) (*User, error) // Grant a user some permission roles. GrantUser(ctx context.Context, username string, roles []string) (*User, error) // Revoke some permission roles from a user. RevokeUser(ctx context.Context, username string, roles []string) (*User, error) // Change the user's password. ChangePassword(ctx context.Context, username string, password string) (*User, error) // List users. ListUsers(ctx context.Context) ([]string, error) } type httpAuthUserAPI struct { client httpClient } type authUserAPIAction struct { verb string username string user *User } type authUserAPIList struct{} func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request { u := v2AuthURL(ep, "users", "") req, _ := http.NewRequest("GET", u.String(), nil) req.Header.Set("Content-Type", "application/json") return req } func (l *authUserAPIAction) HTTPRequest(ep url.URL) *http.Request { u := v2AuthURL(ep, "users", l.username) if l.user == nil { req, _ := http.NewRequest(l.verb, u.String(), nil) return req } b, err := json.Marshal(l.user) if err != nil { panic(err) } body := bytes.NewReader(b) req, _ := http.NewRequest(l.verb, u.String(), body) req.Header.Set("Content-Type", "application/json") return req } func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) { resp, body, err := u.client.Do(ctx, &authUserAPIList{}) if err != nil { return nil, err } if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { var sec authError err := json.Unmarshal(body, &sec) if err != nil { return nil, err } return nil, sec } var userList struct { Users []string `json:"users"` } err = json.Unmarshal(body, &userList) if err != nil { return nil, err } return userList.Users, nil } func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error { user := &User{ User: username, Password: password, } return u.addRemoveUser(ctx, &authUserAPIAction{ verb: "PUT", username: username, user: user, }) } func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error { return u.addRemoveUser(ctx, &authUserAPIAction{ verb: "DELETE", username: username, }) } func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAction) error { resp, body, err := u.client.Do(ctx, req) if err != nil { return err } if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { var sec authError err := json.Unmarshal(body, &sec) if err != nil { return err } return sec } return nil } func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) { return u.modUser(ctx, &authUserAPIAction{ verb: "GET", username: username, }) } func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) { user := &User{ User: username, Grant: roles, } return u.modUser(ctx, &authUserAPIAction{ verb: "PUT", username: username, user: user, }) } func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) { user := &User{ User: username, Revoke: roles, } return u.modUser(ctx, &authUserAPIAction{ verb: "PUT", username: username, user: user, }) } func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) { user := &User{ User: username, Password: password, } return u.modUser(ctx, &authUserAPIAction{ verb: "PUT", username: username, user: user, }) } func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (*User, error) { resp, body, err := u.client.Do(ctx, req) if err != nil { return nil, err } if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { var sec authError err := json.Unmarshal(body, &sec) if err != nil { return nil, err } return nil, sec } var user User err = json.Unmarshal(body, &user) if err != nil { return nil, err } return &user, nil } docker-1.10.3/vendor/src/github.com/coreos/etcd/client/cancelreq.go000066400000000000000000000006461267010174400251510ustar00rootroot00000000000000// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // borrowed from golang/net/context/ctxhttp/cancelreq.go // +build go1.5 package client import "net/http" func requestCanceler(tr CancelableTransport, req *http.Request) func() { ch := make(chan struct{}) req.Cancel = ch return func() { close(ch) } } docker-1.10.3/vendor/src/github.com/coreos/etcd/client/cancelreq_go14.go000066400000000000000000000006131267010174400257750ustar00rootroot00000000000000// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // borrowed from golang/net/context/ctxhttp/cancelreq_go14.go // +build !go1.5 package client import "net/http" func requestCanceler(tr CancelableTransport, req *http.Request) func() { return func() { tr.CancelRequest(req) } } docker-1.10.3/vendor/src/github.com/coreos/etcd/client/client.go000066400000000000000000000313011267010174400244620ustar00rootroot00000000000000// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package client import ( "errors" "fmt" "io/ioutil" "math/rand" "net" "net/http" "net/url" "reflect" "sort" "sync" "time" "golang.org/x/net/context" ) var ( ErrNoEndpoints = errors.New("client: no endpoints available") ErrTooManyRedirects = errors.New("client: too many redirects") ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured") errTooManyRedirectChecks = errors.New("client: too many redirect checks") ) var DefaultRequestTimeout = 5 * time.Second var DefaultTransport CancelableTransport = &http.Transport{ Proxy: http.ProxyFromEnvironment, Dial: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, }).Dial, TLSHandshakeTimeout: 10 * time.Second, } type Config struct { // Endpoints defines a set of URLs (schemes, hosts and ports only) // that can be used to communicate with a logical etcd cluster. For // example, a three-node cluster could be provided like so: // // Endpoints: []string{ // "http://node1.example.com:2379", // "http://node2.example.com:2379", // "http://node3.example.com:2379", // } // // If multiple endpoints are provided, the Client will attempt to // use them all in the event that one or more of them are unusable. // // If Client.Sync is ever called, the Client may cache an alternate // set of endpoints to continue operation. Endpoints []string // Transport is used by the Client to drive HTTP requests. If not // provided, DefaultTransport will be used. Transport CancelableTransport // CheckRedirect specifies the policy for handling HTTP redirects. // If CheckRedirect is not nil, the Client calls it before // following an HTTP redirect. The sole argument is the number of // requests that have alrady been made. If CheckRedirect returns // an error, Client.Do will not make any further requests and return // the error back it to the caller. // // If CheckRedirect is nil, the Client uses its default policy, // which is to stop after 10 consecutive requests. CheckRedirect CheckRedirectFunc // Username specifies the user credential to add as an authorization header Username string // Password is the password for the specified user to add as an authorization header // to the request. Password string // HeaderTimeoutPerRequest specifies the time limit to wait for response // header in a single request made by the Client. The timeout includes // connection time, any redirects, and header wait time. // // For non-watch GET request, server returns the response body immediately. // For PUT/POST/DELETE request, server will attempt to commit request // before responding, which is expected to take `100ms + 2 * RTT`. // For watch request, server returns the header immediately to notify Client // watch start. But if server is behind some kind of proxy, the response // header may be cached at proxy, and Client cannot rely on this behavior. // // One API call may send multiple requests to different etcd servers until it // succeeds. Use context of the API to specify the overall timeout. // // A HeaderTimeoutPerRequest of zero means no timeout. HeaderTimeoutPerRequest time.Duration } func (cfg *Config) transport() CancelableTransport { if cfg.Transport == nil { return DefaultTransport } return cfg.Transport } func (cfg *Config) checkRedirect() CheckRedirectFunc { if cfg.CheckRedirect == nil { return DefaultCheckRedirect } return cfg.CheckRedirect } // CancelableTransport mimics net/http.Transport, but requires that // the object also support request cancellation. type CancelableTransport interface { http.RoundTripper CancelRequest(req *http.Request) } type CheckRedirectFunc func(via int) error // DefaultCheckRedirect follows up to 10 redirects, but no more. var DefaultCheckRedirect CheckRedirectFunc = func(via int) error { if via > 10 { return ErrTooManyRedirects } return nil } type Client interface { // Sync updates the internal cache of the etcd cluster's membership. Sync(context.Context) error // AutoSync periodically calls Sync() every given interval. // The recommended sync interval is 10 seconds to 1 minute, which does // not bring too much overhead to server and makes client catch up the // cluster change in time. // // The example to use it: // // for { // err := client.AutoSync(ctx, 10*time.Second) // if err == context.DeadlineExceeded || err == context.Canceled { // break // } // log.Print(err) // } AutoSync(context.Context, time.Duration) error // Endpoints returns a copy of the current set of API endpoints used // by Client to resolve HTTP requests. If Sync has ever been called, // this may differ from the initial Endpoints provided in the Config. Endpoints() []string httpClient } func New(cfg Config) (Client, error) { c := &httpClusterClient{ clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest), rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), } if cfg.Username != "" { c.credentials = &credentials{ username: cfg.Username, password: cfg.Password, } } if err := c.reset(cfg.Endpoints); err != nil { return nil, err } return c, nil } type httpClient interface { Do(context.Context, httpAction) (*http.Response, []byte, error) } func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory { return func(ep url.URL) httpClient { return &redirectFollowingHTTPClient{ checkRedirect: cr, client: &simpleHTTPClient{ transport: tr, endpoint: ep, headerTimeout: headerTimeout, }, } } } type credentials struct { username string password string } type httpClientFactory func(url.URL) httpClient type httpAction interface { HTTPRequest(url.URL) *http.Request } type httpClusterClient struct { clientFactory httpClientFactory endpoints []url.URL pinned int credentials *credentials sync.RWMutex rand *rand.Rand } func (c *httpClusterClient) reset(eps []string) error { if len(eps) == 0 { return ErrNoEndpoints } neps := make([]url.URL, len(eps)) for i, ep := range eps { u, err := url.Parse(ep) if err != nil { return err } neps[i] = *u } c.endpoints = shuffleEndpoints(c.rand, neps) // TODO: pin old endpoint if possible, and rebalance when new endpoint appears c.pinned = 0 return nil } func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { action := act c.RLock() leps := len(c.endpoints) eps := make([]url.URL, leps) n := copy(eps, c.endpoints) pinned := c.pinned if c.credentials != nil { action = &authedAction{ act: act, credentials: *c.credentials, } } c.RUnlock() if leps == 0 { return nil, nil, ErrNoEndpoints } if leps != n { return nil, nil, errors.New("unable to pick endpoint: copy failed") } var resp *http.Response var body []byte var err error cerr := &ClusterError{} for i := pinned; i < leps+pinned; i++ { k := i % leps hc := c.clientFactory(eps[k]) resp, body, err = hc.Do(ctx, action) if err != nil { cerr.Errors = append(cerr.Errors, err) // mask previous errors with context error, which is controlled by user if err == context.Canceled || err == context.DeadlineExceeded { return nil, nil, err } continue } if resp.StatusCode/100 == 5 { switch resp.StatusCode { case http.StatusInternalServerError, http.StatusServiceUnavailable: // TODO: make sure this is a no leader response cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String())) default: cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode))) } continue } if k != pinned { c.Lock() c.pinned = k c.Unlock() } return resp, body, nil } return nil, nil, cerr } func (c *httpClusterClient) Endpoints() []string { c.RLock() defer c.RUnlock() eps := make([]string, len(c.endpoints)) for i, ep := range c.endpoints { eps[i] = ep.String() } return eps } func (c *httpClusterClient) Sync(ctx context.Context) error { mAPI := NewMembersAPI(c) ms, err := mAPI.List(ctx) if err != nil { return err } c.Lock() defer c.Unlock() eps := make([]string, 0) for _, m := range ms { eps = append(eps, m.ClientURLs...) } sort.Sort(sort.StringSlice(eps)) ceps := make([]string, len(c.endpoints)) for i, cep := range c.endpoints { ceps[i] = cep.String() } sort.Sort(sort.StringSlice(ceps)) // fast path if no change happens // this helps client to pin the endpoint when no cluster change if reflect.DeepEqual(eps, ceps) { return nil } return c.reset(eps) } func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error { ticker := time.NewTicker(interval) defer ticker.Stop() for { err := c.Sync(ctx) if err != nil { return err } select { case <-ctx.Done(): return ctx.Err() case <-ticker.C: } } } type roundTripResponse struct { resp *http.Response err error } type simpleHTTPClient struct { transport CancelableTransport endpoint url.URL headerTimeout time.Duration } func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { req := act.HTTPRequest(c.endpoint) if err := printcURL(req); err != nil { return nil, nil, err } hctx, hcancel := context.WithCancel(ctx) if c.headerTimeout > 0 { hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout) } defer hcancel() reqcancel := requestCanceler(c.transport, req) rtchan := make(chan roundTripResponse, 1) go func() { resp, err := c.transport.RoundTrip(req) rtchan <- roundTripResponse{resp: resp, err: err} close(rtchan) }() var resp *http.Response var err error select { case rtresp := <-rtchan: resp, err = rtresp.resp, rtresp.err case <-hctx.Done(): // cancel and wait for request to actually exit before continuing reqcancel() rtresp := <-rtchan resp = rtresp.resp switch { case ctx.Err() != nil: err = ctx.Err() case hctx.Err() != nil: err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String()) default: panic("failed to get error from context") } } // always check for resp nil-ness to deal with possible // race conditions between channels above defer func() { if resp != nil { resp.Body.Close() } }() if err != nil { return nil, nil, err } var body []byte done := make(chan struct{}) go func() { body, err = ioutil.ReadAll(resp.Body) done <- struct{}{} }() select { case <-ctx.Done(): resp.Body.Close() <-done return nil, nil, ctx.Err() case <-done: } return resp, body, err } type authedAction struct { act httpAction credentials credentials } func (a *authedAction) HTTPRequest(url url.URL) *http.Request { r := a.act.HTTPRequest(url) r.SetBasicAuth(a.credentials.username, a.credentials.password) return r } type redirectFollowingHTTPClient struct { client httpClient checkRedirect CheckRedirectFunc } func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { next := act for i := 0; i < 100; i++ { if i > 0 { if err := r.checkRedirect(i); err != nil { return nil, nil, err } } resp, body, err := r.client.Do(ctx, next) if err != nil { return nil, nil, err } if resp.StatusCode/100 == 3 { hdr := resp.Header.Get("Location") if hdr == "" { return nil, nil, fmt.Errorf("Location header not set") } loc, err := url.Parse(hdr) if err != nil { return nil, nil, fmt.Errorf("Location header not valid URL: %s", hdr) } next = &redirectedHTTPAction{ action: act, location: *loc, } continue } return resp, body, nil } return nil, nil, errTooManyRedirectChecks } type redirectedHTTPAction struct { action httpAction location url.URL } func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request { orig := r.action.HTTPRequest(ep) orig.URL = &r.location return orig } func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL { p := r.Perm(len(eps)) neps := make([]url.URL, len(eps)) for i, k := range p { neps[i] = eps[k] } return neps } docker-1.10.3/vendor/src/github.com/coreos/etcd/client/cluster_error.go000066400000000000000000000015711267010174400261040ustar00rootroot00000000000000// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package client import "fmt" type ClusterError struct { Errors []error } func (ce *ClusterError) Error() string { return ErrClusterUnavailable.Error() } func (ce *ClusterError) Detail() string { s := "" for i, e := range ce.Errors { s += fmt.Sprintf("error #%d: %s\n", i, e) } return s } docker-1.10.3/vendor/src/github.com/coreos/etcd/client/curl.go000066400000000000000000000027341267010174400241610ustar00rootroot00000000000000// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package client import ( "bytes" "fmt" "io/ioutil" "net/http" "os" ) var ( cURLDebug = false ) func EnablecURLDebug() { cURLDebug = true } func DisablecURLDebug() { cURLDebug = false } // printcURL prints the cURL equivalent request to stderr. // It returns an error if the body of the request cannot // be read. // The caller MUST cancel the request if there is an error. func printcURL(req *http.Request) error { if !cURLDebug { return nil } var ( command string b []byte err error ) if req.URL != nil { command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String()) } if req.Body != nil { b, err = ioutil.ReadAll(req.Body) if err != nil { return err } command += fmt.Sprintf(" -d %q", string(b)) } fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command) // reset body body := bytes.NewBuffer(b) req.Body = ioutil.NopCloser(body) return nil } docker-1.10.3/vendor/src/github.com/coreos/etcd/client/discover.go000066400000000000000000000014331267010174400250250ustar00rootroot00000000000000// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package client // Discoverer is an interface that wraps the Discover method. type Discoverer interface { // Dicover looks up the etcd servers for the domain. Discover(domain string) ([]string, error) } docker-1.10.3/vendor/src/github.com/coreos/etcd/client/doc.go000066400000000000000000000034071267010174400237570ustar00rootroot00000000000000// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Package client provides bindings for the etcd APIs. Create a Config and exchange it for a Client: import ( "net/http" "github.com/coreos/etcd/client" "golang.org/x/net/context" ) cfg := client.Config{ Endpoints: []string{"http://127.0.0.1:2379"}, Transport: DefaultTransport, } c, err := client.New(cfg) if err != nil { // handle error } Create a KeysAPI using the Client, then use it to interact with etcd: kAPI := client.NewKeysAPI(c) // create a new key /foo with the value "bar" _, err = kAPI.Create(context.Background(), "/foo", "bar") if err != nil { // handle error } // delete the newly created key only if the value is still "bar" _, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"}) if err != nil { // handle error } Use a custom context to set timeouts on your operations: import "time" ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() // set a new key, ignoring it's previous state _, err := kAPI.Set(ctx, "/ping", "pong", nil) if err != nil { if err == context.DeadlineExceeded { // request took longer than 5s } else { // handle error } } */ package client docker-1.10.3/vendor/src/github.com/coreos/etcd/client/keys.generated.go000066400000000000000000000412141267010174400261200ustar00rootroot00000000000000// ************************************************************ // DO NOT EDIT. // THIS FILE IS AUTO-GENERATED BY codecgen. // ************************************************************ package client import ( "errors" "fmt" codec1978 "github.com/ugorji/go/codec" "reflect" "runtime" "time" ) const ( codecSelferC_UTF85311 = 1 codecSelferC_RAW5311 = 0 codecSelverValueTypeArray5311 = 10 codecSelverValueTypeMap5311 = 9 ) var ( codecSelferBitsize5311 = uint8(reflect.TypeOf(uint(0)).Bits()) codecSelferOnlyMapOrArrayEncodeToStructErr5311 = errors.New(`only encoded map or array can be decoded into a struct`) ) type codecSelfer5311 struct{} func init() { if codec1978.GenVersion != 2 { _, file, _, _ := runtime.Caller(0) err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", 2, codec1978.GenVersion, file) panic(err) } if false { // reference the types, but skip this branch at build/run time var v0 time.Time _ = v0 } } func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer5311 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { yysep1 := !z.EncBinary() yy2arr1 := z.EncBasicHandle().StructToArray var yyfirst1 bool var yyq1 [3]bool _, _, _, _ = yysep1, yyfirst1, yyq1, yy2arr1 const yyr1 bool = false if yyr1 || yy2arr1 { r.EncodeArrayStart(3) } else { var yynn1 int = 3 for _, b := range yyq1 { if b { yynn1++ } } r.EncodeMapStart(yynn1) } if yyr1 || yy2arr1 { r.EncodeString(codecSelferC_UTF85311, string(x.Action)) } else { yyfirst1 = true r.EncodeString(codecSelferC_UTF85311, string("action")) if yysep1 { r.EncodeMapKVSeparator() } r.EncodeString(codecSelferC_UTF85311, string(x.Action)) } if yyr1 || yy2arr1 { if yysep1 { r.EncodeArrayEntrySeparator() } if x.Node == nil { r.EncodeNil() } else { x.Node.CodecEncodeSelf(e) } } else { if yyfirst1 { r.EncodeMapEntrySeparator() } else { yyfirst1 = true } r.EncodeString(codecSelferC_UTF85311, string("node")) if yysep1 { r.EncodeMapKVSeparator() } if x.Node == nil { r.EncodeNil() } else { x.Node.CodecEncodeSelf(e) } } if yyr1 || yy2arr1 { if yysep1 { r.EncodeArrayEntrySeparator() } if x.PrevNode == nil { r.EncodeNil() } else { x.PrevNode.CodecEncodeSelf(e) } } else { if yyfirst1 { r.EncodeMapEntrySeparator() } else { yyfirst1 = true } r.EncodeString(codecSelferC_UTF85311, string("prevNode")) if yysep1 { r.EncodeMapKVSeparator() } if x.PrevNode == nil { r.EncodeNil() } else { x.PrevNode.CodecEncodeSelf(e) } } if yysep1 { if yyr1 || yy2arr1 { r.EncodeArrayEnd() } else { r.EncodeMapEnd() } } } } func (x *Response) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer5311 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r if r.IsContainerType(codecSelverValueTypeMap5311) { yyl5 := r.ReadMapStart() if yyl5 == 0 { r.ReadMapEnd() } else { x.codecDecodeSelfFromMap(yyl5, d) } } else if r.IsContainerType(codecSelverValueTypeArray5311) { yyl5 := r.ReadArrayStart() if yyl5 == 0 { r.ReadArrayEnd() } else { x.codecDecodeSelfFromArray(yyl5, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr5311) } } func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer5311 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r var yys6Slc = z.DecScratchBuffer() // default slice to decode into _ = yys6Slc var yyhl6 bool = l >= 0 for yyj6 := 0; ; yyj6++ { if yyhl6 { if yyj6 >= l { break } } else { if r.CheckBreak() { break } if yyj6 > 0 { r.ReadMapEntrySeparator() } } yys6Slc = r.DecodeBytes(yys6Slc, true, true) yys6 := string(yys6Slc) if !yyhl6 { r.ReadMapKVSeparator() } switch yys6 { case "action": if r.TryDecodeAsNil() { x.Action = "" } else { x.Action = string(r.DecodeString()) } case "node": if r.TryDecodeAsNil() { if x.Node != nil { x.Node = nil } } else { if x.Node == nil { x.Node = new(Node) } x.Node.CodecDecodeSelf(d) } case "prevNode": if r.TryDecodeAsNil() { if x.PrevNode != nil { x.PrevNode = nil } } else { if x.PrevNode == nil { x.PrevNode = new(Node) } x.PrevNode.CodecDecodeSelf(d) } default: z.DecStructFieldNotFound(-1, yys6) } // end switch yys6 } // end for yyj6 if !yyhl6 { r.ReadMapEnd() } } func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer5311 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r var yyj10 int var yyb10 bool var yyhl10 bool = l >= 0 yyj10++ if yyhl10 { yyb10 = yyj10 > l } else { yyb10 = r.CheckBreak() } if yyb10 { r.ReadArrayEnd() return } if r.TryDecodeAsNil() { x.Action = "" } else { x.Action = string(r.DecodeString()) } yyj10++ if yyhl10 { yyb10 = yyj10 > l } else { yyb10 = r.CheckBreak() } if yyb10 { r.ReadArrayEnd() return } r.ReadArrayEntrySeparator() if r.TryDecodeAsNil() { if x.Node != nil { x.Node = nil } } else { if x.Node == nil { x.Node = new(Node) } x.Node.CodecDecodeSelf(d) } yyj10++ if yyhl10 { yyb10 = yyj10 > l } else { yyb10 = r.CheckBreak() } if yyb10 { r.ReadArrayEnd() return } r.ReadArrayEntrySeparator() if r.TryDecodeAsNil() { if x.PrevNode != nil { x.PrevNode = nil } } else { if x.PrevNode == nil { x.PrevNode = new(Node) } x.PrevNode.CodecDecodeSelf(d) } for { yyj10++ if yyhl10 { yyb10 = yyj10 > l } else { yyb10 = r.CheckBreak() } if yyb10 { break } if yyj10 > 1 { r.ReadArrayEntrySeparator() } z.DecStructFieldNotFound(yyj10-1, "") } r.ReadArrayEnd() } func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer5311 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { yysep14 := !z.EncBinary() yy2arr14 := z.EncBasicHandle().StructToArray var yyfirst14 bool var yyq14 [8]bool _, _, _, _ = yysep14, yyfirst14, yyq14, yy2arr14 const yyr14 bool = false yyq14[1] = x.Dir != false yyq14[6] = x.Expiration != nil yyq14[7] = x.TTL != 0 if yyr14 || yy2arr14 { r.EncodeArrayStart(8) } else { var yynn14 int = 5 for _, b := range yyq14 { if b { yynn14++ } } r.EncodeMapStart(yynn14) } if yyr14 || yy2arr14 { r.EncodeString(codecSelferC_UTF85311, string(x.Key)) } else { yyfirst14 = true r.EncodeString(codecSelferC_UTF85311, string("key")) if yysep14 { r.EncodeMapKVSeparator() } r.EncodeString(codecSelferC_UTF85311, string(x.Key)) } if yyr14 || yy2arr14 { if yysep14 { r.EncodeArrayEntrySeparator() } if yyq14[1] { r.EncodeBool(bool(x.Dir)) } else { r.EncodeBool(false) } } else { if yyq14[1] { if yyfirst14 { r.EncodeMapEntrySeparator() } else { yyfirst14 = true } r.EncodeString(codecSelferC_UTF85311, string("dir")) if yysep14 { r.EncodeMapKVSeparator() } r.EncodeBool(bool(x.Dir)) } } if yyr14 || yy2arr14 { if yysep14 { r.EncodeArrayEntrySeparator() } r.EncodeString(codecSelferC_UTF85311, string(x.Value)) } else { if yyfirst14 { r.EncodeMapEntrySeparator() } else { yyfirst14 = true } r.EncodeString(codecSelferC_UTF85311, string("value")) if yysep14 { r.EncodeMapKVSeparator() } r.EncodeString(codecSelferC_UTF85311, string(x.Value)) } if yyr14 || yy2arr14 { if yysep14 { r.EncodeArrayEntrySeparator() } if x.Nodes == nil { r.EncodeNil() } else { h.encSlicePtrtoNode(([]*Node)(x.Nodes), e) } } else { if yyfirst14 { r.EncodeMapEntrySeparator() } else { yyfirst14 = true } r.EncodeString(codecSelferC_UTF85311, string("nodes")) if yysep14 { r.EncodeMapKVSeparator() } if x.Nodes == nil { r.EncodeNil() } else { h.encSlicePtrtoNode(([]*Node)(x.Nodes), e) } } if yyr14 || yy2arr14 { if yysep14 { r.EncodeArrayEntrySeparator() } r.EncodeUint(uint64(x.CreatedIndex)) } else { if yyfirst14 { r.EncodeMapEntrySeparator() } else { yyfirst14 = true } r.EncodeString(codecSelferC_UTF85311, string("createdIndex")) if yysep14 { r.EncodeMapKVSeparator() } r.EncodeUint(uint64(x.CreatedIndex)) } if yyr14 || yy2arr14 { if yysep14 { r.EncodeArrayEntrySeparator() } r.EncodeUint(uint64(x.ModifiedIndex)) } else { if yyfirst14 { r.EncodeMapEntrySeparator() } else { yyfirst14 = true } r.EncodeString(codecSelferC_UTF85311, string("modifiedIndex")) if yysep14 { r.EncodeMapKVSeparator() } r.EncodeUint(uint64(x.ModifiedIndex)) } if yyr14 || yy2arr14 { if yysep14 { r.EncodeArrayEntrySeparator() } if yyq14[6] { if x.Expiration == nil { r.EncodeNil() } else { z.EncFallback(x.Expiration) } } else { r.EncodeNil() } } else { if yyq14[6] { if yyfirst14 { r.EncodeMapEntrySeparator() } else { yyfirst14 = true } r.EncodeString(codecSelferC_UTF85311, string("expiration")) if yysep14 { r.EncodeMapKVSeparator() } if x.Expiration == nil { r.EncodeNil() } else { z.EncFallback(x.Expiration) } } } if yyr14 || yy2arr14 { if yysep14 { r.EncodeArrayEntrySeparator() } if yyq14[7] { r.EncodeInt(int64(x.TTL)) } else { r.EncodeInt(0) } } else { if yyq14[7] { if yyfirst14 { r.EncodeMapEntrySeparator() } else { yyfirst14 = true } r.EncodeString(codecSelferC_UTF85311, string("ttl")) if yysep14 { r.EncodeMapKVSeparator() } r.EncodeInt(int64(x.TTL)) } } if yysep14 { if yyr14 || yy2arr14 { r.EncodeArrayEnd() } else { r.EncodeMapEnd() } } } } func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer5311 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r if r.IsContainerType(codecSelverValueTypeMap5311) { yyl23 := r.ReadMapStart() if yyl23 == 0 { r.ReadMapEnd() } else { x.codecDecodeSelfFromMap(yyl23, d) } } else if r.IsContainerType(codecSelverValueTypeArray5311) { yyl23 := r.ReadArrayStart() if yyl23 == 0 { r.ReadArrayEnd() } else { x.codecDecodeSelfFromArray(yyl23, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr5311) } } func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer5311 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r var yys24Slc = z.DecScratchBuffer() // default slice to decode into _ = yys24Slc var yyhl24 bool = l >= 0 for yyj24 := 0; ; yyj24++ { if yyhl24 { if yyj24 >= l { break } } else { if r.CheckBreak() { break } if yyj24 > 0 { r.ReadMapEntrySeparator() } } yys24Slc = r.DecodeBytes(yys24Slc, true, true) yys24 := string(yys24Slc) if !yyhl24 { r.ReadMapKVSeparator() } switch yys24 { case "key": if r.TryDecodeAsNil() { x.Key = "" } else { x.Key = string(r.DecodeString()) } case "dir": if r.TryDecodeAsNil() { x.Dir = false } else { x.Dir = bool(r.DecodeBool()) } case "value": if r.TryDecodeAsNil() { x.Value = "" } else { x.Value = string(r.DecodeString()) } case "nodes": if r.TryDecodeAsNil() { x.Nodes = nil } else { yyv28 := &x.Nodes h.decSlicePtrtoNode((*[]*Node)(yyv28), d) } case "createdIndex": if r.TryDecodeAsNil() { x.CreatedIndex = 0 } else { x.CreatedIndex = uint64(r.DecodeUint(64)) } case "modifiedIndex": if r.TryDecodeAsNil() { x.ModifiedIndex = 0 } else { x.ModifiedIndex = uint64(r.DecodeUint(64)) } case "expiration": if r.TryDecodeAsNil() { if x.Expiration != nil { x.Expiration = nil } } else { if x.Expiration == nil { x.Expiration = new(time.Time) } z.DecFallback(x.Expiration, false) } case "ttl": if r.TryDecodeAsNil() { x.TTL = 0 } else { x.TTL = int64(r.DecodeInt(64)) } default: z.DecStructFieldNotFound(-1, yys24) } // end switch yys24 } // end for yyj24 if !yyhl24 { r.ReadMapEnd() } } func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer5311 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r var yyj33 int var yyb33 bool var yyhl33 bool = l >= 0 yyj33++ if yyhl33 { yyb33 = yyj33 > l } else { yyb33 = r.CheckBreak() } if yyb33 { r.ReadArrayEnd() return } if r.TryDecodeAsNil() { x.Key = "" } else { x.Key = string(r.DecodeString()) } yyj33++ if yyhl33 { yyb33 = yyj33 > l } else { yyb33 = r.CheckBreak() } if yyb33 { r.ReadArrayEnd() return } r.ReadArrayEntrySeparator() if r.TryDecodeAsNil() { x.Dir = false } else { x.Dir = bool(r.DecodeBool()) } yyj33++ if yyhl33 { yyb33 = yyj33 > l } else { yyb33 = r.CheckBreak() } if yyb33 { r.ReadArrayEnd() return } r.ReadArrayEntrySeparator() if r.TryDecodeAsNil() { x.Value = "" } else { x.Value = string(r.DecodeString()) } yyj33++ if yyhl33 { yyb33 = yyj33 > l } else { yyb33 = r.CheckBreak() } if yyb33 { r.ReadArrayEnd() return } r.ReadArrayEntrySeparator() if r.TryDecodeAsNil() { x.Nodes = nil } else { yyv37 := &x.Nodes h.decSlicePtrtoNode((*[]*Node)(yyv37), d) } yyj33++ if yyhl33 { yyb33 = yyj33 > l } else { yyb33 = r.CheckBreak() } if yyb33 { r.ReadArrayEnd() return } r.ReadArrayEntrySeparator() if r.TryDecodeAsNil() { x.CreatedIndex = 0 } else { x.CreatedIndex = uint64(r.DecodeUint(64)) } yyj33++ if yyhl33 { yyb33 = yyj33 > l } else { yyb33 = r.CheckBreak() } if yyb33 { r.ReadArrayEnd() return } r.ReadArrayEntrySeparator() if r.TryDecodeAsNil() { x.ModifiedIndex = 0 } else { x.ModifiedIndex = uint64(r.DecodeUint(64)) } yyj33++ if yyhl33 { yyb33 = yyj33 > l } else { yyb33 = r.CheckBreak() } if yyb33 { r.ReadArrayEnd() return } r.ReadArrayEntrySeparator() if r.TryDecodeAsNil() { if x.Expiration != nil { x.Expiration = nil } } else { if x.Expiration == nil { x.Expiration = new(time.Time) } z.DecFallback(x.Expiration, false) } yyj33++ if yyhl33 { yyb33 = yyj33 > l } else { yyb33 = r.CheckBreak() } if yyb33 { r.ReadArrayEnd() return } r.ReadArrayEntrySeparator() if r.TryDecodeAsNil() { x.TTL = 0 } else { x.TTL = int64(r.DecodeInt(64)) } for { yyj33++ if yyhl33 { yyb33 = yyj33 > l } else { yyb33 = r.CheckBreak() } if yyb33 { break } if yyj33 > 1 { r.ReadArrayEntrySeparator() } z.DecStructFieldNotFound(yyj33-1, "") } r.ReadArrayEnd() } func (x codecSelfer5311) encSlicePtrtoNode(v []*Node, e *codec1978.Encoder) { var h codecSelfer5311 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) yys42 := !z.EncBinary() if yys42 { for yyi42, yyv42 := range v { if yyi42 > 0 { r.EncodeArrayEntrySeparator() } if yyv42 == nil { r.EncodeNil() } else { yyv42.CodecEncodeSelf(e) } } r.EncodeArrayEnd() } else { for _, yyv42 := range v { if yyv42 == nil { r.EncodeNil() } else { yyv42.CodecEncodeSelf(e) } } } } func (x codecSelfer5311) decSlicePtrtoNode(v *[]*Node, d *codec1978.Decoder) { var h codecSelfer5311 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r yyv43 := *v yyh43, yyl43 := z.DecSliceHelperStart() var yyc43 bool _ = yyc43 if yyv43 == nil { if yyl43 <= 0 { yyv43 = make([]*Node, 0) } else { yyv43 = make([]*Node, yyl43) } yyc43 = true } if yyl43 == 0 { if len(yyv43) != 0 { yyv43 = yyv43[:0] yyc43 = true } } else if yyl43 > 0 { yyn43 := yyl43 if yyl43 > cap(yyv43) { yyv43 = make([]*Node, yyl43, yyl43) yyc43 = true } else if yyl43 != len(yyv43) { yyv43 = yyv43[:yyl43] yyc43 = true } yyj43 := 0 for ; yyj43 < yyn43; yyj43++ { if r.TryDecodeAsNil() { if yyv43[yyj43] != nil { *yyv43[yyj43] = Node{} } } else { if yyv43[yyj43] == nil { yyv43[yyj43] = new(Node) } yyw44 := yyv43[yyj43] yyw44.CodecDecodeSelf(d) } } } else { for yyj43 := 0; !r.CheckBreak(); yyj43++ { if yyj43 >= len(yyv43) { yyv43 = append(yyv43, nil) // var yyz43 *Node yyc43 = true } if yyj43 > 0 { yyh43.Sep(yyj43) } if yyj43 < len(yyv43) { if r.TryDecodeAsNil() { if yyv43[yyj43] != nil { *yyv43[yyj43] = Node{} } } else { if yyv43[yyj43] == nil { yyv43[yyj43] = new(Node) } yyw45 := yyv43[yyj43] yyw45.CodecDecodeSelf(d) } } else { z.DecSwallow() } } yyh43.End() } if yyc43 { *v = yyv43 } } docker-1.10.3/vendor/src/github.com/coreos/etcd/client/keys.go000066400000000000000000000432751267010174400241740ustar00rootroot00000000000000// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package client //go:generate codecgen -r "Node|Response" -o keys.generated.go keys.go import ( "encoding/json" "errors" "fmt" "net/http" "net/url" "strconv" "strings" "time" "github.com/ugorji/go/codec" "golang.org/x/net/context" "github.com/coreos/etcd/pkg/pathutil" ) const ( ErrorCodeKeyNotFound = 100 ErrorCodeTestFailed = 101 ErrorCodeNotFile = 102 ErrorCodeNotDir = 104 ErrorCodeNodeExist = 105 ErrorCodeRootROnly = 107 ErrorCodeDirNotEmpty = 108 ErrorCodeUnauthorized = 110 ErrorCodePrevValueRequired = 201 ErrorCodeTTLNaN = 202 ErrorCodeIndexNaN = 203 ErrorCodeInvalidField = 209 ErrorCodeInvalidForm = 210 ErrorCodeRaftInternal = 300 ErrorCodeLeaderElect = 301 ErrorCodeWatcherCleared = 400 ErrorCodeEventIndexCleared = 401 ) type Error struct { Code int `json:"errorCode"` Message string `json:"message"` Cause string `json:"cause"` Index uint64 `json:"index"` } func (e Error) Error() string { return fmt.Sprintf("%v: %v (%v) [%v]", e.Code, e.Message, e.Cause, e.Index) } var ( ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint.") ErrEmptyBody = errors.New("client: response body is empty") ) // PrevExistType is used to define an existence condition when setting // or deleting Nodes. type PrevExistType string const ( PrevIgnore = PrevExistType("") PrevExist = PrevExistType("true") PrevNoExist = PrevExistType("false") ) var ( defaultV2KeysPrefix = "/v2/keys" ) // NewKeysAPI builds a KeysAPI that interacts with etcd's key-value // API over HTTP. func NewKeysAPI(c Client) KeysAPI { return NewKeysAPIWithPrefix(c, defaultV2KeysPrefix) } // NewKeysAPIWithPrefix acts like NewKeysAPI, but allows the caller // to provide a custom base URL path. This should only be used in // very rare cases. func NewKeysAPIWithPrefix(c Client, p string) KeysAPI { return &httpKeysAPI{ client: c, prefix: p, } } type KeysAPI interface { // Get retrieves a set of Nodes from etcd Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) // Set assigns a new value to a Node identified by a given key. The caller // may define a set of conditions in the SetOptions. If SetOptions.Dir=true // than value is ignored. Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error) // Delete removes a Node identified by the given key, optionally destroying // all of its children as well. The caller may define a set of required // conditions in an DeleteOptions object. Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) // Create is an alias for Set w/ PrevExist=false Create(ctx context.Context, key, value string) (*Response, error) // CreateInOrder is used to atomically create in-order keys within the given directory. CreateInOrder(ctx context.Context, dir, value string, opts *CreateInOrderOptions) (*Response, error) // Update is an alias for Set w/ PrevExist=true Update(ctx context.Context, key, value string) (*Response, error) // Watcher builds a new Watcher targeted at a specific Node identified // by the given key. The Watcher may be configured at creation time // through a WatcherOptions object. The returned Watcher is designed // to emit events that happen to a Node, and optionally to its children. Watcher(key string, opts *WatcherOptions) Watcher } type WatcherOptions struct { // AfterIndex defines the index after-which the Watcher should // start emitting events. For example, if a value of 5 is // provided, the first event will have an index >= 6. // // Setting AfterIndex to 0 (default) means that the Watcher // should start watching for events starting at the current // index, whatever that may be. AfterIndex uint64 // Recursive specifies whether or not the Watcher should emit // events that occur in children of the given keyspace. If set // to false (default), events will be limited to those that // occur for the exact key. Recursive bool } type CreateInOrderOptions struct { // TTL defines a period of time after-which the Node should // expire and no longer exist. Values <= 0 are ignored. Given // that the zero-value is ignored, TTL cannot be used to set // a TTL of 0. TTL time.Duration } type SetOptions struct { // PrevValue specifies what the current value of the Node must // be in order for the Set operation to succeed. // // Leaving this field empty means that the caller wishes to // ignore the current value of the Node. This cannot be used // to compare the Node's current value to an empty string. // // PrevValue is ignored if Dir=true PrevValue string // PrevIndex indicates what the current ModifiedIndex of the // Node must be in order for the Set operation to succeed. // // If PrevIndex is set to 0 (default), no comparison is made. PrevIndex uint64 // PrevExist specifies whether the Node must currently exist // (PrevExist) or not (PrevNoExist). If the caller does not // care about existence, set PrevExist to PrevIgnore, or simply // leave it unset. PrevExist PrevExistType // TTL defines a period of time after-which the Node should // expire and no longer exist. Values <= 0 are ignored. Given // that the zero-value is ignored, TTL cannot be used to set // a TTL of 0. TTL time.Duration // Dir specifies whether or not this Node should be created as a directory. Dir bool } type GetOptions struct { // Recursive defines whether or not all children of the Node // should be returned. Recursive bool // Sort instructs the server whether or not to sort the Nodes. // If true, the Nodes are sorted alphabetically by key in // ascending order (A to z). If false (default), the Nodes will // not be sorted and the ordering used should not be considered // predictable. Sort bool // Quorum specifies whether it gets the latest committed value that // has been applied in quorum of members, which ensures external // consistency (or linearizability). Quorum bool } type DeleteOptions struct { // PrevValue specifies what the current value of the Node must // be in order for the Delete operation to succeed. // // Leaving this field empty means that the caller wishes to // ignore the current value of the Node. This cannot be used // to compare the Node's current value to an empty string. PrevValue string // PrevIndex indicates what the current ModifiedIndex of the // Node must be in order for the Delete operation to succeed. // // If PrevIndex is set to 0 (default), no comparison is made. PrevIndex uint64 // Recursive defines whether or not all children of the Node // should be deleted. If set to true, all children of the Node // identified by the given key will be deleted. If left unset // or explicitly set to false, only a single Node will be // deleted. Recursive bool // Dir specifies whether or not this Node should be removed as a directory. Dir bool } type Watcher interface { // Next blocks until an etcd event occurs, then returns a Response // represeting that event. The behavior of Next depends on the // WatcherOptions used to construct the Watcher. Next is designed to // be called repeatedly, each time blocking until a subsequent event // is available. // // If the provided context is cancelled, Next will return a non-nil // error. Any other failures encountered while waiting for the next // event (connection issues, deserialization failures, etc) will // also result in a non-nil error. Next(context.Context) (*Response, error) } type Response struct { // Action is the name of the operation that occurred. Possible values // include get, set, delete, update, create, compareAndSwap, // compareAndDelete and expire. Action string `json:"action"` // Node represents the state of the relevant etcd Node. Node *Node `json:"node"` // PrevNode represents the previous state of the Node. PrevNode is non-nil // only if the Node existed before the action occurred and the action // caused a change to the Node. PrevNode *Node `json:"prevNode"` // Index holds the cluster-level index at the time the Response was generated. // This index is not tied to the Node(s) contained in this Response. Index uint64 `json:"-"` } type Node struct { // Key represents the unique location of this Node (e.g. "/foo/bar"). Key string `json:"key"` // Dir reports whether node describes a directory. Dir bool `json:"dir,omitempty"` // Value is the current data stored on this Node. If this Node // is a directory, Value will be empty. Value string `json:"value"` // Nodes holds the children of this Node, only if this Node is a directory. // This slice of will be arbitrarily deep (children, grandchildren, great- // grandchildren, etc.) if a recursive Get or Watch request were made. Nodes []*Node `json:"nodes"` // CreatedIndex is the etcd index at-which this Node was created. CreatedIndex uint64 `json:"createdIndex"` // ModifiedIndex is the etcd index at-which this Node was last modified. ModifiedIndex uint64 `json:"modifiedIndex"` // Expiration is the server side expiration time of the key. Expiration *time.Time `json:"expiration,omitempty"` // TTL is the time to live of the key in second. TTL int64 `json:"ttl,omitempty"` } func (n *Node) String() string { return fmt.Sprintf("{Key: %s, CreatedIndex: %d, ModifiedIndex: %d, TTL: %d}", n.Key, n.CreatedIndex, n.ModifiedIndex, n.TTL) } // TTLDuration returns the Node's TTL as a time.Duration object func (n *Node) TTLDuration() time.Duration { return time.Duration(n.TTL) * time.Second } type httpKeysAPI struct { client httpClient prefix string } func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions) (*Response, error) { act := &setAction{ Prefix: k.prefix, Key: key, Value: val, } if opts != nil { act.PrevValue = opts.PrevValue act.PrevIndex = opts.PrevIndex act.PrevExist = opts.PrevExist act.TTL = opts.TTL act.Dir = opts.Dir } resp, body, err := k.client.Do(ctx, act) if err != nil { return nil, err } return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) } func (k *httpKeysAPI) Create(ctx context.Context, key, val string) (*Response, error) { return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevNoExist}) } func (k *httpKeysAPI) CreateInOrder(ctx context.Context, dir, val string, opts *CreateInOrderOptions) (*Response, error) { act := &createInOrderAction{ Prefix: k.prefix, Dir: dir, Value: val, } if opts != nil { act.TTL = opts.TTL } resp, body, err := k.client.Do(ctx, act) if err != nil { return nil, err } return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) } func (k *httpKeysAPI) Update(ctx context.Context, key, val string) (*Response, error) { return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevExist}) } func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) { act := &deleteAction{ Prefix: k.prefix, Key: key, } if opts != nil { act.PrevValue = opts.PrevValue act.PrevIndex = opts.PrevIndex act.Dir = opts.Dir act.Recursive = opts.Recursive } resp, body, err := k.client.Do(ctx, act) if err != nil { return nil, err } return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) } func (k *httpKeysAPI) Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) { act := &getAction{ Prefix: k.prefix, Key: key, } if opts != nil { act.Recursive = opts.Recursive act.Sorted = opts.Sort act.Quorum = opts.Quorum } resp, body, err := k.client.Do(ctx, act) if err != nil { return nil, err } return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) } func (k *httpKeysAPI) Watcher(key string, opts *WatcherOptions) Watcher { act := waitAction{ Prefix: k.prefix, Key: key, } if opts != nil { act.Recursive = opts.Recursive if opts.AfterIndex > 0 { act.WaitIndex = opts.AfterIndex + 1 } } return &httpWatcher{ client: k.client, nextWait: act, } } type httpWatcher struct { client httpClient nextWait waitAction } func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) { for { httpresp, body, err := hw.client.Do(ctx, &hw.nextWait) if err != nil { return nil, err } resp, err := unmarshalHTTPResponse(httpresp.StatusCode, httpresp.Header, body) if err != nil { if err == ErrEmptyBody { continue } return nil, err } hw.nextWait.WaitIndex = resp.Node.ModifiedIndex + 1 return resp, nil } } // v2KeysURL forms a URL representing the location of a key. // The endpoint argument represents the base URL of an etcd // server. The prefix is the path needed to route from the // provided endpoint's path to the root of the keys API // (typically "/v2/keys"). func v2KeysURL(ep url.URL, prefix, key string) *url.URL { // We concatenate all parts together manually. We cannot use // path.Join because it does not reserve trailing slash. // We call CanonicalURLPath to further cleanup the path. if prefix != "" && prefix[0] != '/' { prefix = "/" + prefix } if key != "" && key[0] != '/' { key = "/" + key } ep.Path = pathutil.CanonicalURLPath(ep.Path + prefix + key) return &ep } type getAction struct { Prefix string Key string Recursive bool Sorted bool Quorum bool } func (g *getAction) HTTPRequest(ep url.URL) *http.Request { u := v2KeysURL(ep, g.Prefix, g.Key) params := u.Query() params.Set("recursive", strconv.FormatBool(g.Recursive)) params.Set("sorted", strconv.FormatBool(g.Sorted)) params.Set("quorum", strconv.FormatBool(g.Quorum)) u.RawQuery = params.Encode() req, _ := http.NewRequest("GET", u.String(), nil) return req } type waitAction struct { Prefix string Key string WaitIndex uint64 Recursive bool } func (w *waitAction) HTTPRequest(ep url.URL) *http.Request { u := v2KeysURL(ep, w.Prefix, w.Key) params := u.Query() params.Set("wait", "true") params.Set("waitIndex", strconv.FormatUint(w.WaitIndex, 10)) params.Set("recursive", strconv.FormatBool(w.Recursive)) u.RawQuery = params.Encode() req, _ := http.NewRequest("GET", u.String(), nil) return req } type setAction struct { Prefix string Key string Value string PrevValue string PrevIndex uint64 PrevExist PrevExistType TTL time.Duration Dir bool } func (a *setAction) HTTPRequest(ep url.URL) *http.Request { u := v2KeysURL(ep, a.Prefix, a.Key) params := u.Query() form := url.Values{} // we're either creating a directory or setting a key if a.Dir { params.Set("dir", strconv.FormatBool(a.Dir)) } else { // These options are only valid for setting a key if a.PrevValue != "" { params.Set("prevValue", a.PrevValue) } form.Add("value", a.Value) } // Options which apply to both setting a key and creating a dir if a.PrevIndex != 0 { params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10)) } if a.PrevExist != PrevIgnore { params.Set("prevExist", string(a.PrevExist)) } if a.TTL > 0 { form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10)) } u.RawQuery = params.Encode() body := strings.NewReader(form.Encode()) req, _ := http.NewRequest("PUT", u.String(), body) req.Header.Set("Content-Type", "application/x-www-form-urlencoded") return req } type deleteAction struct { Prefix string Key string PrevValue string PrevIndex uint64 Dir bool Recursive bool } func (a *deleteAction) HTTPRequest(ep url.URL) *http.Request { u := v2KeysURL(ep, a.Prefix, a.Key) params := u.Query() if a.PrevValue != "" { params.Set("prevValue", a.PrevValue) } if a.PrevIndex != 0 { params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10)) } if a.Dir { params.Set("dir", "true") } if a.Recursive { params.Set("recursive", "true") } u.RawQuery = params.Encode() req, _ := http.NewRequest("DELETE", u.String(), nil) req.Header.Set("Content-Type", "application/x-www-form-urlencoded") return req } type createInOrderAction struct { Prefix string Dir string Value string TTL time.Duration } func (a *createInOrderAction) HTTPRequest(ep url.URL) *http.Request { u := v2KeysURL(ep, a.Prefix, a.Dir) form := url.Values{} form.Add("value", a.Value) if a.TTL > 0 { form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10)) } body := strings.NewReader(form.Encode()) req, _ := http.NewRequest("POST", u.String(), body) req.Header.Set("Content-Type", "application/x-www-form-urlencoded") return req } func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Response, err error) { switch code { case http.StatusOK, http.StatusCreated: if len(body) == 0 { return nil, ErrEmptyBody } res, err = unmarshalSuccessfulKeysResponse(header, body) default: err = unmarshalFailedKeysResponse(body) } return } func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) { var res Response err := codec.NewDecoderBytes(body, new(codec.JsonHandle)).Decode(&res) if err != nil { return nil, ErrInvalidJSON } if header.Get("X-Etcd-Index") != "" { res.Index, err = strconv.ParseUint(header.Get("X-Etcd-Index"), 10, 64) if err != nil { return nil, err } } return &res, nil } func unmarshalFailedKeysResponse(body []byte) error { var etcdErr Error if err := json.Unmarshal(body, &etcdErr); err != nil { return ErrInvalidJSON } return etcdErr } docker-1.10.3/vendor/src/github.com/coreos/etcd/client/members.go000066400000000000000000000145551267010174400246520ustar00rootroot00000000000000// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package client import ( "bytes" "encoding/json" "fmt" "net/http" "net/url" "path" "golang.org/x/net/context" "github.com/coreos/etcd/pkg/types" ) var ( defaultV2MembersPrefix = "/v2/members" ) type Member struct { // ID is the unique identifier of this Member. ID string `json:"id"` // Name is a human-readable, non-unique identifier of this Member. Name string `json:"name"` // PeerURLs represents the HTTP(S) endpoints this Member uses to // participate in etcd's consensus protocol. PeerURLs []string `json:"peerURLs"` // ClientURLs represents the HTTP(S) endpoints on which this Member // serves it's client-facing APIs. ClientURLs []string `json:"clientURLs"` } type memberCollection []Member func (c *memberCollection) UnmarshalJSON(data []byte) error { d := struct { Members []Member }{} if err := json.Unmarshal(data, &d); err != nil { return err } if d.Members == nil { *c = make([]Member, 0) return nil } *c = d.Members return nil } type memberCreateOrUpdateRequest struct { PeerURLs types.URLs } func (m *memberCreateOrUpdateRequest) MarshalJSON() ([]byte, error) { s := struct { PeerURLs []string `json:"peerURLs"` }{ PeerURLs: make([]string, len(m.PeerURLs)), } for i, u := range m.PeerURLs { s.PeerURLs[i] = u.String() } return json.Marshal(&s) } // NewMembersAPI constructs a new MembersAPI that uses HTTP to // interact with etcd's membership API. func NewMembersAPI(c Client) MembersAPI { return &httpMembersAPI{ client: c, } } type MembersAPI interface { // List enumerates the current cluster membership. List(ctx context.Context) ([]Member, error) // Add instructs etcd to accept a new Member into the cluster. Add(ctx context.Context, peerURL string) (*Member, error) // Remove demotes an existing Member out of the cluster. Remove(ctx context.Context, mID string) error // Update instructs etcd to update an existing Member in the cluster. Update(ctx context.Context, mID string, peerURLs []string) error } type httpMembersAPI struct { client httpClient } func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) { req := &membersAPIActionList{} resp, body, err := m.client.Do(ctx, req) if err != nil { return nil, err } if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { return nil, err } var mCollection memberCollection if err := json.Unmarshal(body, &mCollection); err != nil { return nil, err } return []Member(mCollection), nil } func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) { urls, err := types.NewURLs([]string{peerURL}) if err != nil { return nil, err } req := &membersAPIActionAdd{peerURLs: urls} resp, body, err := m.client.Do(ctx, req) if err != nil { return nil, err } if err := assertStatusCode(resp.StatusCode, http.StatusCreated, http.StatusConflict); err != nil { return nil, err } if resp.StatusCode != http.StatusCreated { var merr membersError if err := json.Unmarshal(body, &merr); err != nil { return nil, err } return nil, merr } var memb Member if err := json.Unmarshal(body, &memb); err != nil { return nil, err } return &memb, nil } func (m *httpMembersAPI) Update(ctx context.Context, memberID string, peerURLs []string) error { urls, err := types.NewURLs(peerURLs) if err != nil { return err } req := &membersAPIActionUpdate{peerURLs: urls, memberID: memberID} resp, body, err := m.client.Do(ctx, req) if err != nil { return err } if err := assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusNotFound, http.StatusConflict); err != nil { return err } if resp.StatusCode != http.StatusNoContent { var merr membersError if err := json.Unmarshal(body, &merr); err != nil { return err } return merr } return nil } func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error { req := &membersAPIActionRemove{memberID: memberID} resp, _, err := m.client.Do(ctx, req) if err != nil { return err } return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone) } type membersAPIActionList struct{} func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request { u := v2MembersURL(ep) req, _ := http.NewRequest("GET", u.String(), nil) return req } type membersAPIActionRemove struct { memberID string } func (d *membersAPIActionRemove) HTTPRequest(ep url.URL) *http.Request { u := v2MembersURL(ep) u.Path = path.Join(u.Path, d.memberID) req, _ := http.NewRequest("DELETE", u.String(), nil) return req } type membersAPIActionAdd struct { peerURLs types.URLs } func (a *membersAPIActionAdd) HTTPRequest(ep url.URL) *http.Request { u := v2MembersURL(ep) m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs} b, _ := json.Marshal(&m) req, _ := http.NewRequest("POST", u.String(), bytes.NewReader(b)) req.Header.Set("Content-Type", "application/json") return req } type membersAPIActionUpdate struct { memberID string peerURLs types.URLs } func (a *membersAPIActionUpdate) HTTPRequest(ep url.URL) *http.Request { u := v2MembersURL(ep) m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs} u.Path = path.Join(u.Path, a.memberID) b, _ := json.Marshal(&m) req, _ := http.NewRequest("PUT", u.String(), bytes.NewReader(b)) req.Header.Set("Content-Type", "application/json") return req } func assertStatusCode(got int, want ...int) (err error) { for _, w := range want { if w == got { return nil } } return fmt.Errorf("unexpected status code %d", got) } // v2MembersURL add the necessary path to the provided endpoint // to route requests to the default v2 members API. func v2MembersURL(ep url.URL) *url.URL { ep.Path = path.Join(ep.Path, defaultV2MembersPrefix) return &ep } type membersError struct { Message string `json:"message"` Code int `json:"-"` } func (e membersError) Error() string { return e.Message } docker-1.10.3/vendor/src/github.com/coreos/etcd/client/srv.go000066400000000000000000000032341267010174400240220ustar00rootroot00000000000000// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package client import ( "fmt" "net" "net/url" ) var ( // indirection for testing lookupSRV = net.LookupSRV ) type srvDiscover struct{} // NewSRVDiscover constructs a new Dicoverer that uses the stdlib to lookup SRV records. func NewSRVDiscover() Discoverer { return &srvDiscover{} } // Discover looks up the etcd servers for the domain. func (d *srvDiscover) Discover(domain string) ([]string, error) { var urls []*url.URL updateURLs := func(service, scheme string) error { _, addrs, err := lookupSRV(service, "tcp", domain) if err != nil { return err } for _, srv := range addrs { urls = append(urls, &url.URL{ Scheme: scheme, Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)), }) } return nil } errHTTPS := updateURLs("etcd-server-ssl", "https") errHTTP := updateURLs("etcd-server", "http") if errHTTPS != nil && errHTTP != nil { return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP) } endpoints := make([]string, len(urls)) for i := range urls { endpoints[i] = urls[i].String() } return endpoints, nil } docker-1.10.3/vendor/src/github.com/coreos/etcd/pkg/000077500000000000000000000000001267010174400221625ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/coreos/etcd/pkg/pathutil/000077500000000000000000000000001267010174400240145ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/coreos/etcd/pkg/pathutil/path.go000066400000000000000000000014701267010174400253010ustar00rootroot00000000000000// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package pathutil import "path" // CanonicalURLPath returns the canonical url path for p, which follows the rules: // 1. the path always starts with "/" // 2. replace multiple slashes with a single slash // 3. replace each '.' '..' path name element with equivalent one // 4. keep the trailing slash // The function is borrowed from stdlib http.cleanPath in server.go. func CanonicalURLPath(p string) string { if p == "" { return "/" } if p[0] != '/' { p = "/" + p } np := path.Clean(p) // path.Clean removes trailing slash except for root, // put the trailing slash back if necessary. if p[len(p)-1] == '/' && np != "/" { np += "/" } return np } docker-1.10.3/vendor/src/github.com/coreos/etcd/pkg/types/000077500000000000000000000000001267010174400233265ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/coreos/etcd/pkg/types/id.go000066400000000000000000000024151267010174400242530ustar00rootroot00000000000000// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "strconv" ) // ID represents a generic identifier which is canonically // stored as a uint64 but is typically represented as a // base-16 string for input/output type ID uint64 func (i ID) String() string { return strconv.FormatUint(uint64(i), 16) } // IDFromString attempts to create an ID from a base-16 string. func IDFromString(s string) (ID, error) { i, err := strconv.ParseUint(s, 16, 64) return ID(i), err } // IDSlice implements the sort interface type IDSlice []ID func (p IDSlice) Len() int { return len(p) } func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) } func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } docker-1.10.3/vendor/src/github.com/coreos/etcd/pkg/types/set.go000066400000000000000000000073321267010174400244550ustar00rootroot00000000000000// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "reflect" "sort" "sync" ) type Set interface { Add(string) Remove(string) Contains(string) bool Equals(Set) bool Length() int Values() []string Copy() Set Sub(Set) Set } func NewUnsafeSet(values ...string) *unsafeSet { set := &unsafeSet{make(map[string]struct{})} for _, v := range values { set.Add(v) } return set } func NewThreadsafeSet(values ...string) *tsafeSet { us := NewUnsafeSet(values...) return &tsafeSet{us, sync.RWMutex{}} } type unsafeSet struct { d map[string]struct{} } // Add adds a new value to the set (no-op if the value is already present) func (us *unsafeSet) Add(value string) { us.d[value] = struct{}{} } // Remove removes the given value from the set func (us *unsafeSet) Remove(value string) { delete(us.d, value) } // Contains returns whether the set contains the given value func (us *unsafeSet) Contains(value string) (exists bool) { _, exists = us.d[value] return } // ContainsAll returns whether the set contains all given values func (us *unsafeSet) ContainsAll(values []string) bool { for _, s := range values { if !us.Contains(s) { return false } } return true } // Equals returns whether the contents of two sets are identical func (us *unsafeSet) Equals(other Set) bool { v1 := sort.StringSlice(us.Values()) v2 := sort.StringSlice(other.Values()) v1.Sort() v2.Sort() return reflect.DeepEqual(v1, v2) } // Length returns the number of elements in the set func (us *unsafeSet) Length() int { return len(us.d) } // Values returns the values of the Set in an unspecified order. func (us *unsafeSet) Values() (values []string) { values = make([]string, 0) for val := range us.d { values = append(values, val) } return } // Copy creates a new Set containing the values of the first func (us *unsafeSet) Copy() Set { cp := NewUnsafeSet() for val := range us.d { cp.Add(val) } return cp } // Sub removes all elements in other from the set func (us *unsafeSet) Sub(other Set) Set { oValues := other.Values() result := us.Copy().(*unsafeSet) for _, val := range oValues { if _, ok := result.d[val]; !ok { continue } delete(result.d, val) } return result } type tsafeSet struct { us *unsafeSet m sync.RWMutex } func (ts *tsafeSet) Add(value string) { ts.m.Lock() defer ts.m.Unlock() ts.us.Add(value) } func (ts *tsafeSet) Remove(value string) { ts.m.Lock() defer ts.m.Unlock() ts.us.Remove(value) } func (ts *tsafeSet) Contains(value string) (exists bool) { ts.m.RLock() defer ts.m.RUnlock() return ts.us.Contains(value) } func (ts *tsafeSet) Equals(other Set) bool { ts.m.RLock() defer ts.m.RUnlock() return ts.us.Equals(other) } func (ts *tsafeSet) Length() int { ts.m.RLock() defer ts.m.RUnlock() return ts.us.Length() } func (ts *tsafeSet) Values() (values []string) { ts.m.RLock() defer ts.m.RUnlock() return ts.us.Values() } func (ts *tsafeSet) Copy() Set { ts.m.RLock() defer ts.m.RUnlock() usResult := ts.us.Copy().(*unsafeSet) return &tsafeSet{usResult, sync.RWMutex{}} } func (ts *tsafeSet) Sub(other Set) Set { ts.m.RLock() defer ts.m.RUnlock() usResult := ts.us.Sub(other).(*unsafeSet) return &tsafeSet{usResult, sync.RWMutex{}} } docker-1.10.3/vendor/src/github.com/coreos/etcd/pkg/types/slice.go000066400000000000000000000015411267010174400247550ustar00rootroot00000000000000// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types // Uint64Slice implements sort interface type Uint64Slice []uint64 func (p Uint64Slice) Len() int { return len(p) } func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } docker-1.10.3/vendor/src/github.com/coreos/etcd/pkg/types/urls.go000066400000000000000000000034671267010174400246540ustar00rootroot00000000000000// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "errors" "fmt" "net" "net/url" "sort" "strings" ) type URLs []url.URL func NewURLs(strs []string) (URLs, error) { all := make([]url.URL, len(strs)) if len(all) == 0 { return nil, errors.New("no valid URLs given") } for i, in := range strs { in = strings.TrimSpace(in) u, err := url.Parse(in) if err != nil { return nil, err } if u.Scheme != "http" && u.Scheme != "https" { return nil, fmt.Errorf("URL scheme must be http or https: %s", in) } if _, _, err := net.SplitHostPort(u.Host); err != nil { return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in) } if u.Path != "" { return nil, fmt.Errorf("URL must not contain a path: %s", in) } all[i] = *u } us := URLs(all) us.Sort() return us, nil } func (us URLs) String() string { return strings.Join(us.StringSlice(), ",") } func (us *URLs) Sort() { sort.Sort(us) } func (us URLs) Len() int { return len(us) } func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() } func (us URLs) Swap(i, j int) { us[i], us[j] = us[j], us[i] } func (us URLs) StringSlice() []string { out := make([]string, len(us)) for i := range us { out[i] = us[i].String() } return out } docker-1.10.3/vendor/src/github.com/coreos/etcd/pkg/types/urlsmap.go000066400000000000000000000037111267010174400253420ustar00rootroot00000000000000// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "fmt" "net/url" "sort" "strings" ) type URLsMap map[string]URLs // NewURLsMap returns a URLsMap instantiated from the given string, // which consists of discovery-formatted names-to-URLs, like: // mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380 func NewURLsMap(s string) (URLsMap, error) { cl := URLsMap{} v, err := url.ParseQuery(strings.Replace(s, ",", "&", -1)) if err != nil { return nil, err } for name, urls := range v { if len(urls) == 0 || urls[0] == "" { return nil, fmt.Errorf("empty URL given for %q", name) } us, err := NewURLs(urls) if err != nil { return nil, err } cl[name] = us } return cl, nil } // String returns NameURLPairs into discovery-formatted name-to-URLs sorted by name. func (c URLsMap) String() string { pairs := make([]string, 0) for name, urls := range c { for _, url := range urls { pairs = append(pairs, fmt.Sprintf("%s=%s", name, url.String())) } } sort.Strings(pairs) return strings.Join(pairs, ",") } // URLs returns a list of all URLs. // The returned list is sorted in ascending lexicographical order. func (c URLsMap) URLs() []string { urls := make([]string, 0) for _, us := range c { for _, u := range us { urls = append(urls, u.String()) } } sort.Strings(urls) return urls } func (c URLsMap) Len() int { return len(c) } docker-1.10.3/vendor/src/github.com/coreos/go-systemd/000077500000000000000000000000001267010174400225555ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/coreos/go-systemd/LICENSE000066400000000000000000000240411267010174400235630ustar00rootroot00000000000000Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: You must give any other recipients of the Work or Derivative Works a copy of this License; and You must cause any modified files to carry prominent notices stating that You changed the files; and You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. docker-1.10.3/vendor/src/github.com/coreos/go-systemd/activation/000077500000000000000000000000001267010174400247165ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/coreos/go-systemd/activation/files.go000066400000000000000000000025231267010174400263510ustar00rootroot00000000000000// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package activation implements primitives for systemd socket activation. package activation import ( "os" "strconv" "syscall" ) // based on: https://gist.github.com/alberts/4640792 const ( listenFdsStart = 3 ) func Files(unsetEnv bool) []*os.File { if unsetEnv { defer os.Unsetenv("LISTEN_PID") defer os.Unsetenv("LISTEN_FDS") } pid, err := strconv.Atoi(os.Getenv("LISTEN_PID")) if err != nil || pid != os.Getpid() { return nil } nfds, err := strconv.Atoi(os.Getenv("LISTEN_FDS")) if err != nil || nfds == 0 { return nil } files := make([]*os.File, 0, nfds) for fd := listenFdsStart; fd < listenFdsStart+nfds; fd++ { syscall.CloseOnExec(fd) files = append(files, os.NewFile(uintptr(fd), "LISTEN_FD_"+strconv.Itoa(fd))) } return files } docker-1.10.3/vendor/src/github.com/coreos/go-systemd/activation/listeners.go000066400000000000000000000036561267010174400272670ustar00rootroot00000000000000// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package activation import ( "crypto/tls" "net" ) // Listeners returns a slice containing a net.Listener for each matching socket type // passed to this process. // // The order of the file descriptors is preserved in the returned slice. // Nil values are used to fill any gaps. For example if systemd were to return file descriptors // corresponding with "udp, tcp, tcp", then the slice would contain {nil, net.Listener, net.Listener} func Listeners(unsetEnv bool) ([]net.Listener, error) { files := Files(unsetEnv) listeners := make([]net.Listener, len(files)) for i, f := range files { if pc, err := net.FileListener(f); err == nil { listeners[i] = pc } } return listeners, nil } // TLSListeners returns a slice containing a net.listener for each matching TCP socket type // passed to this process. // It uses default Listeners func and forces TCP sockets handlers to use TLS based on tlsConfig. func TLSListeners(unsetEnv bool, tlsConfig *tls.Config) ([]net.Listener, error) { listeners, err := Listeners(unsetEnv) if listeners == nil || err != nil { return nil, err } if tlsConfig != nil && err == nil { tlsConfig.NextProtos = []string{"http/1.1"} for i, l := range listeners { // Activate TLS only for TCP sockets if l.Addr().Network() == "tcp" { listeners[i] = tls.NewListener(l, tlsConfig) } } } return listeners, err } docker-1.10.3/vendor/src/github.com/coreos/go-systemd/activation/packetconns.go000066400000000000000000000023771267010174400275660ustar00rootroot00000000000000// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package activation import ( "net" ) // PacketConns returns a slice containing a net.PacketConn for each matching socket type // passed to this process. // // The order of the file descriptors is preserved in the returned slice. // Nil values are used to fill any gaps. For example if systemd were to return file descriptors // corresponding with "udp, tcp, udp", then the slice would contain {net.PacketConn, nil, net.PacketConn} func PacketConns(unsetEnv bool) ([]net.PacketConn, error) { files := Files(unsetEnv) conns := make([]net.PacketConn, len(files)) for i, f := range files { if pc, err := net.FilePacketConn(f); err == nil { conns[i] = pc } } return conns, nil } docker-1.10.3/vendor/src/github.com/coreos/go-systemd/daemon/000077500000000000000000000000001267010174400240205ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/coreos/go-systemd/daemon/sdnotify.go000066400000000000000000000010641267010174400262070ustar00rootroot00000000000000// Code forked from Docker project package daemon import ( "errors" "net" "os" ) var SdNotifyNoSocket = errors.New("No socket") // SdNotify sends a message to the init daemon. It is common to ignore the error. func SdNotify(state string) error { socketAddr := &net.UnixAddr{ Name: os.Getenv("NOTIFY_SOCKET"), Net: "unixgram", } if socketAddr.Name == "" { return SdNotifyNoSocket } conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr) if err != nil { return err } defer conn.Close() _, err = conn.Write([]byte(state)) return err } docker-1.10.3/vendor/src/github.com/coreos/go-systemd/dbus/000077500000000000000000000000001267010174400235125ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/coreos/go-systemd/dbus/dbus.go000066400000000000000000000117111267010174400247770ustar00rootroot00000000000000// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/ package dbus import ( "fmt" "os" "strconv" "strings" "sync" "github.com/godbus/dbus" ) const ( alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ` num = `0123456789` alphanum = alpha + num signalBuffer = 100 ) // needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped func needsEscape(i int, b byte) bool { // Escape everything that is not a-z-A-Z-0-9 // Also escape 0-9 if it's the first character return strings.IndexByte(alphanum, b) == -1 || (i == 0 && strings.IndexByte(num, b) != -1) } // PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the // rules that systemd uses for serializing special characters. func PathBusEscape(path string) string { // Special case the empty string if len(path) == 0 { return "_" } n := []byte{} for i := 0; i < len(path); i++ { c := path[i] if needsEscape(i, c) { e := fmt.Sprintf("_%x", c) n = append(n, []byte(e)...) } else { n = append(n, c) } } return string(n) } // Conn is a connection to systemd's dbus endpoint. type Conn struct { // sysconn/sysobj are only used to call dbus methods sysconn *dbus.Conn sysobj dbus.BusObject // sigconn/sigobj are only used to receive dbus signals sigconn *dbus.Conn sigobj dbus.BusObject jobListener struct { jobs map[dbus.ObjectPath]chan<- string sync.Mutex } subscriber struct { updateCh chan<- *SubStateUpdate errCh chan<- error sync.Mutex ignore map[dbus.ObjectPath]int64 cleanIgnore int64 } } // New establishes a connection to the system bus and authenticates. // Callers should call Close() when done with the connection. func New() (*Conn, error) { return newConnection(func() (*dbus.Conn, error) { return dbusAuthHelloConnection(dbus.SystemBusPrivate) }) } // NewUserConnection establishes a connection to the session bus and // authenticates. This can be used to connect to systemd user instances. // Callers should call Close() when done with the connection. func NewUserConnection() (*Conn, error) { return newConnection(func() (*dbus.Conn, error) { return dbusAuthHelloConnection(dbus.SessionBusPrivate) }) } // NewSystemdConnection establishes a private, direct connection to systemd. // This can be used for communicating with systemd without a dbus daemon. // Callers should call Close() when done with the connection. func NewSystemdConnection() (*Conn, error) { return newConnection(func() (*dbus.Conn, error) { // We skip Hello when talking directly to systemd. return dbusAuthConnection(func() (*dbus.Conn, error) { return dbus.Dial("unix:path=/run/systemd/private") }) }) } // Close closes an established connection func (c *Conn) Close() { c.sysconn.Close() c.sigconn.Close() } func newConnection(createBus func() (*dbus.Conn, error)) (*Conn, error) { sysconn, err := createBus() if err != nil { return nil, err } sigconn, err := createBus() if err != nil { sysconn.Close() return nil, err } c := &Conn{ sysconn: sysconn, sysobj: systemdObject(sysconn), sigconn: sigconn, sigobj: systemdObject(sigconn), } c.subscriber.ignore = make(map[dbus.ObjectPath]int64) c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string) // Setup the listeners on jobs so that we can get completions c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'") c.dispatch() return c, nil } func dbusAuthConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) { conn, err := createBus() if err != nil { return nil, err } // Only use EXTERNAL method, and hardcode the uid (not username) // to avoid a username lookup (which requires a dynamically linked // libc) methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))} err = conn.Auth(methods) if err != nil { conn.Close() return nil, err } return conn, nil } func dbusAuthHelloConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) { conn, err := dbusAuthConnection(createBus) if err != nil { return nil, err } if err = conn.Hello(); err != nil { conn.Close() return nil, err } return conn, nil } func systemdObject(conn *dbus.Conn) dbus.BusObject { return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1")) } docker-1.10.3/vendor/src/github.com/coreos/go-systemd/dbus/methods.go000066400000000000000000000374321267010174400255150ustar00rootroot00000000000000// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package dbus import ( "errors" "path" "strconv" "github.com/godbus/dbus" ) func (c *Conn) jobComplete(signal *dbus.Signal) { var id uint32 var job dbus.ObjectPath var unit string var result string dbus.Store(signal.Body, &id, &job, &unit, &result) c.jobListener.Lock() out, ok := c.jobListener.jobs[job] if ok { out <- result delete(c.jobListener.jobs, job) } c.jobListener.Unlock() } func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, error) { if ch != nil { c.jobListener.Lock() defer c.jobListener.Unlock() } var p dbus.ObjectPath err := c.sysobj.Call(job, 0, args...).Store(&p) if err != nil { return 0, err } if ch != nil { c.jobListener.jobs[p] = ch } // ignore error since 0 is fine if conversion fails jobID, _ := strconv.Atoi(path.Base(string(p))) return jobID, nil } // StartUnit enqueues a start job and depending jobs, if any (unless otherwise // specified by the mode string). // // Takes the unit to activate, plus a mode string. The mode needs to be one of // replace, fail, isolate, ignore-dependencies, ignore-requirements. If // "replace" the call will start the unit and its dependencies, possibly // replacing already queued jobs that conflict with this. If "fail" the call // will start the unit and its dependencies, but will fail if this would change // an already queued job. If "isolate" the call will start the unit in question // and terminate all units that aren't dependencies of it. If // "ignore-dependencies" it will start a unit but ignore all its dependencies. // If "ignore-requirements" it will start a unit but only ignore the // requirement dependencies. It is not recommended to make use of the latter // two options. // // If the provided channel is non-nil, a result string will be sent to it upon // job completion: one of done, canceled, timeout, failed, dependency, skipped. // done indicates successful execution of a job. canceled indicates that a job // has been canceled before it finished execution. timeout indicates that the // job timeout was reached. failed indicates that the job failed. dependency // indicates that a job this job has been depending on failed and the job hence // has been removed too. skipped indicates that a job was skipped because it // didn't apply to the units current state. // // If no error occurs, the ID of the underlying systemd job will be returned. There // does exist the possibility for no error to be returned, but for the returned job // ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint // should not be considered authoritative. // // If an error does occur, it will be returned to the user alongside a job ID of 0. func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) { return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode) } // StopUnit is similar to StartUnit but stops the specified unit rather // than starting it. func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) { return c.startJob(ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode) } // ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise. func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) { return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode) } // RestartUnit restarts a service. If a service is restarted that isn't // running it will be started. func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) { return c.startJob(ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode) } // TryRestartUnit is like RestartUnit, except that a service that isn't running // is not affected by the restart. func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) { return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) } // ReloadOrRestart attempts a reload if the unit supports it and use a restart // otherwise. func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) { return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) } // ReloadOrTryRestart attempts a reload if the unit supports it and use a "Try" // flavored restart otherwise. func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) { return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) } // StartTransientUnit() may be used to create and start a transient unit, which // will be released as soon as it is not running or referenced anymore or the // system is rebooted. name is the unit name including suffix, and must be // unique. mode is the same as in StartUnit(), properties contains properties // of the unit. func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) { return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) } // KillUnit takes the unit name and a UNIX signal number to send. All of the unit's // processes are killed. func (c *Conn) KillUnit(name string, signal int32) { c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store() } // ResetFailedUnit resets the "failed" state of a specific unit. func (c *Conn) ResetFailedUnit(name string) error { return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store() } // getProperties takes the unit name and returns all of its dbus object properties, for the given dbus interface func (c *Conn) getProperties(unit string, dbusInterface string) (map[string]interface{}, error) { var err error var props map[string]dbus.Variant path := unitPath(unit) if !path.IsValid() { return nil, errors.New("invalid unit name: " + unit) } obj := c.sysconn.Object("org.freedesktop.systemd1", path) err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props) if err != nil { return nil, err } out := make(map[string]interface{}, len(props)) for k, v := range props { out[k] = v.Value() } return out, nil } // GetUnitProperties takes the unit name and returns all of its dbus object properties. func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) { return c.getProperties(unit, "org.freedesktop.systemd1.Unit") } func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) { var err error var prop dbus.Variant path := unitPath(unit) if !path.IsValid() { return nil, errors.New("invalid unit name: " + unit) } obj := c.sysconn.Object("org.freedesktop.systemd1", path) err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop) if err != nil { return nil, err } return &Property{Name: propertyName, Value: prop}, nil } func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) { return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName) } // GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type. // Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope // return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) { return c.getProperties(unit, "org.freedesktop.systemd1."+unitType) } // SetUnitProperties() may be used to modify certain unit properties at runtime. // Not all properties may be changed at runtime, but many resource management // settings (primarily those in systemd.cgroup(5)) may. The changes are applied // instantly, and stored on disk for future boots, unless runtime is true, in which // case the settings only apply until the next reboot. name is the name of the unit // to modify. properties are the settings to set, encoded as an array of property // name and value pairs. func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store() } func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) { return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName) } // ListUnits returns an array with all currently loaded units. Note that // units may be known by multiple names at the same time, and hence there might // be more unit names loaded than actual units behind them. func (c *Conn) ListUnits() ([]UnitStatus, error) { result := make([][]interface{}, 0) err := c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store(&result) if err != nil { return nil, err } resultInterface := make([]interface{}, len(result)) for i := range result { resultInterface[i] = result[i] } status := make([]UnitStatus, len(result)) statusInterface := make([]interface{}, len(status)) for i := range status { statusInterface[i] = &status[i] } err = dbus.Store(resultInterface, statusInterface...) if err != nil { return nil, err } return status, nil } type UnitStatus struct { Name string // The primary unit name as string Description string // The human readable description string LoadState string // The load state (i.e. whether the unit file has been loaded successfully) ActiveState string // The active state (i.e. whether the unit is currently started or not) SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not) Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string. Path dbus.ObjectPath // The unit object path JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise JobType string // The job type as string JobPath dbus.ObjectPath // The job object path } type LinkUnitFileChange EnableUnitFileChange // LinkUnitFiles() links unit files (that are located outside of the // usual unit search paths) into the unit search path. // // It takes a list of absolute paths to unit files to link and two // booleans. The first boolean controls whether the unit shall be // enabled for runtime only (true, /run), or persistently (false, // /etc). // The second controls whether symlinks pointing to other units shall // be replaced if necessary. // // This call returns a list of the changes made. The list consists of // structures with three strings: the type of the change (one of symlink // or unlink), the file name of the symlink and the destination of the // symlink. func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { result := make([][]interface{}, 0) err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) if err != nil { return nil, err } resultInterface := make([]interface{}, len(result)) for i := range result { resultInterface[i] = result[i] } changes := make([]LinkUnitFileChange, len(result)) changesInterface := make([]interface{}, len(changes)) for i := range changes { changesInterface[i] = &changes[i] } err = dbus.Store(resultInterface, changesInterface...) if err != nil { return nil, err } return changes, nil } // EnableUnitFiles() may be used to enable one or more units in the system (by // creating symlinks to them in /etc or /run). // // It takes a list of unit files to enable (either just file names or full // absolute paths if the unit files are residing outside the usual unit // search paths), and two booleans: the first controls whether the unit shall // be enabled for runtime only (true, /run), or persistently (false, /etc). // The second one controls whether symlinks pointing to other units shall // be replaced if necessary. // // This call returns one boolean and an array with the changes made. The // boolean signals whether the unit files contained any enablement // information (i.e. an [Install]) section. The changes list consists of // structures with three strings: the type of the change (one of symlink // or unlink), the file name of the symlink and the destination of the // symlink. func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { var carries_install_info bool result := make([][]interface{}, 0) err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result) if err != nil { return false, nil, err } resultInterface := make([]interface{}, len(result)) for i := range result { resultInterface[i] = result[i] } changes := make([]EnableUnitFileChange, len(result)) changesInterface := make([]interface{}, len(changes)) for i := range changes { changesInterface[i] = &changes[i] } err = dbus.Store(resultInterface, changesInterface...) if err != nil { return false, nil, err } return carries_install_info, changes, nil } type EnableUnitFileChange struct { Type string // Type of the change (one of symlink or unlink) Filename string // File name of the symlink Destination string // Destination of the symlink } // DisableUnitFiles() may be used to disable one or more units in the system (by // removing symlinks to them from /etc or /run). // // It takes a list of unit files to disable (either just file names or full // absolute paths if the unit files are residing outside the usual unit // search paths), and one boolean: whether the unit was enabled for runtime // only (true, /run), or persistently (false, /etc). // // This call returns an array with the changes made. The changes list // consists of structures with three strings: the type of the change (one of // symlink or unlink), the file name of the symlink and the destination of the // symlink. func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { result := make([][]interface{}, 0) err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) if err != nil { return nil, err } resultInterface := make([]interface{}, len(result)) for i := range result { resultInterface[i] = result[i] } changes := make([]DisableUnitFileChange, len(result)) changesInterface := make([]interface{}, len(changes)) for i := range changes { changesInterface[i] = &changes[i] } err = dbus.Store(resultInterface, changesInterface...) if err != nil { return nil, err } return changes, nil } type DisableUnitFileChange struct { Type string // Type of the change (one of symlink or unlink) Filename string // File name of the symlink Destination string // Destination of the symlink } // Reload instructs systemd to scan for and reload unit files. This is // equivalent to a 'systemctl daemon-reload'. func (c *Conn) Reload() error { return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store() } func unitPath(name string) dbus.ObjectPath { return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name)) } docker-1.10.3/vendor/src/github.com/coreos/go-systemd/dbus/properties.go000066400000000000000000000176351267010174400262510ustar00rootroot00000000000000// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package dbus import ( "github.com/godbus/dbus" ) // From the systemd docs: // // The properties array of StartTransientUnit() may take many of the settings // that may also be configured in unit files. Not all parameters are currently // accepted though, but we plan to cover more properties with future release. // Currently you may set the Description, Slice and all dependency types of // units, as well as RemainAfterExit, ExecStart for service units, // TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares, // BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth, // BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit, // DevicePolicy, DeviceAllow for services/scopes/slices. These fields map // directly to their counterparts in unit files and as normal D-Bus object // properties. The exception here is the PIDs field of scope units which is // used for construction of the scope only and specifies the initial PIDs to // add to the scope object. type Property struct { Name string Value dbus.Variant } type PropertyCollection struct { Name string Properties []Property } type execStart struct { Path string // the binary path to execute Args []string // an array with all arguments to pass to the executed command, starting with argument 0 UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly } // PropExecStart sets the ExecStart service property. The first argument is a // slice with the binary path to execute followed by the arguments to pass to // the executed command. See // http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart= func PropExecStart(command []string, uncleanIsFailure bool) Property { execStarts := []execStart{ execStart{ Path: command[0], Args: command, UncleanIsFailure: uncleanIsFailure, }, } return Property{ Name: "ExecStart", Value: dbus.MakeVariant(execStarts), } } // PropRemainAfterExit sets the RemainAfterExit service property. See // http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit= func PropRemainAfterExit(b bool) Property { return Property{ Name: "RemainAfterExit", Value: dbus.MakeVariant(b), } } // PropDescription sets the Description unit property. See // http://www.freedesktop.org/software/systemd/man/systemd.unit#Description= func PropDescription(desc string) Property { return Property{ Name: "Description", Value: dbus.MakeVariant(desc), } } func propDependency(name string, units []string) Property { return Property{ Name: name, Value: dbus.MakeVariant(units), } } // PropRequires sets the Requires unit property. See // http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires= func PropRequires(units ...string) Property { return propDependency("Requires", units) } // PropRequiresOverridable sets the RequiresOverridable unit property. See // http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable= func PropRequiresOverridable(units ...string) Property { return propDependency("RequiresOverridable", units) } // PropRequisite sets the Requisite unit property. See // http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite= func PropRequisite(units ...string) Property { return propDependency("Requisite", units) } // PropRequisiteOverridable sets the RequisiteOverridable unit property. See // http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable= func PropRequisiteOverridable(units ...string) Property { return propDependency("RequisiteOverridable", units) } // PropWants sets the Wants unit property. See // http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants= func PropWants(units ...string) Property { return propDependency("Wants", units) } // PropBindsTo sets the BindsTo unit property. See // http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo= func PropBindsTo(units ...string) Property { return propDependency("BindsTo", units) } // PropRequiredBy sets the RequiredBy unit property. See // http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy= func PropRequiredBy(units ...string) Property { return propDependency("RequiredBy", units) } // PropRequiredByOverridable sets the RequiredByOverridable unit property. See // http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable= func PropRequiredByOverridable(units ...string) Property { return propDependency("RequiredByOverridable", units) } // PropWantedBy sets the WantedBy unit property. See // http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy= func PropWantedBy(units ...string) Property { return propDependency("WantedBy", units) } // PropBoundBy sets the BoundBy unit property. See // http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy= func PropBoundBy(units ...string) Property { return propDependency("BoundBy", units) } // PropConflicts sets the Conflicts unit property. See // http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts= func PropConflicts(units ...string) Property { return propDependency("Conflicts", units) } // PropConflictedBy sets the ConflictedBy unit property. See // http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy= func PropConflictedBy(units ...string) Property { return propDependency("ConflictedBy", units) } // PropBefore sets the Before unit property. See // http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before= func PropBefore(units ...string) Property { return propDependency("Before", units) } // PropAfter sets the After unit property. See // http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After= func PropAfter(units ...string) Property { return propDependency("After", units) } // PropOnFailure sets the OnFailure unit property. See // http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure= func PropOnFailure(units ...string) Property { return propDependency("OnFailure", units) } // PropTriggers sets the Triggers unit property. See // http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers= func PropTriggers(units ...string) Property { return propDependency("Triggers", units) } // PropTriggeredBy sets the TriggeredBy unit property. See // http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy= func PropTriggeredBy(units ...string) Property { return propDependency("TriggeredBy", units) } // PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See // http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo= func PropPropagatesReloadTo(units ...string) Property { return propDependency("PropagatesReloadTo", units) } // PropRequiresMountsFor sets the RequiresMountsFor unit property. See // http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor= func PropRequiresMountsFor(units ...string) Property { return propDependency("RequiresMountsFor", units) } // PropSlice sets the Slice unit property. See // http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice= func PropSlice(slice string) Property { return Property{ Name: "Slice", Value: dbus.MakeVariant(slice), } } docker-1.10.3/vendor/src/github.com/coreos/go-systemd/dbus/set.go000066400000000000000000000021011267010174400246260ustar00rootroot00000000000000// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package dbus type set struct { data map[string]bool } func (s *set) Add(value string) { s.data[value] = true } func (s *set) Remove(value string) { delete(s.data, value) } func (s *set) Contains(value string) (exists bool) { _, exists = s.data[value] return } func (s *set) Length() int { return len(s.data) } func (s *set) Values() (values []string) { for val, _ := range s.data { values = append(values, val) } return } func newSet() *set { return &set{make(map[string]bool)} } docker-1.10.3/vendor/src/github.com/coreos/go-systemd/dbus/subscription.go000066400000000000000000000165141267010174400265740ustar00rootroot00000000000000// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package dbus import ( "errors" "time" "github.com/godbus/dbus" ) const ( cleanIgnoreInterval = int64(10 * time.Second) ignoreInterval = int64(30 * time.Millisecond) ) // Subscribe sets up this connection to subscribe to all systemd dbus events. // This is required before calling SubscribeUnits. When the connection closes // systemd will automatically stop sending signals so there is no need to // explicitly call Unsubscribe(). func (c *Conn) Subscribe() error { c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'") c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'") err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() if err != nil { return err } return nil } // Unsubscribe this connection from systemd dbus events. func (c *Conn) Unsubscribe() error { err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() if err != nil { return err } return nil } func (c *Conn) dispatch() { ch := make(chan *dbus.Signal, signalBuffer) c.sigconn.Signal(ch) go func() { for { signal, ok := <-ch if !ok { return } if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" { c.jobComplete(signal) } if c.subscriber.updateCh == nil { continue } var unitPath dbus.ObjectPath switch signal.Name { case "org.freedesktop.systemd1.Manager.JobRemoved": unitName := signal.Body[2].(string) c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath) case "org.freedesktop.systemd1.Manager.UnitNew": unitPath = signal.Body[1].(dbus.ObjectPath) case "org.freedesktop.DBus.Properties.PropertiesChanged": if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" { unitPath = signal.Path } } if unitPath == dbus.ObjectPath("") { continue } c.sendSubStateUpdate(unitPath) } }() } // Returns two unbuffered channels which will receive all changed units every // interval. Deleted units are sent as nil. func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) { return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil) } // SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer // size of the channels, the comparison function for detecting changes and a filter // function for cutting down on the noise that your channel receives. func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) { old := make(map[string]*UnitStatus) statusChan := make(chan map[string]*UnitStatus, buffer) errChan := make(chan error, buffer) go func() { for { timerChan := time.After(interval) units, err := c.ListUnits() if err == nil { cur := make(map[string]*UnitStatus) for i := range units { if filterUnit != nil && filterUnit(units[i].Name) { continue } cur[units[i].Name] = &units[i] } // add all new or changed units changed := make(map[string]*UnitStatus) for n, u := range cur { if oldU, ok := old[n]; !ok || isChanged(oldU, u) { changed[n] = u } delete(old, n) } // add all deleted units for oldN := range old { changed[oldN] = nil } old = cur if len(changed) != 0 { statusChan <- changed } } else { errChan <- err } <-timerChan } }() return statusChan, errChan } type SubStateUpdate struct { UnitName string SubState string } // SetSubStateSubscriber writes to updateCh when any unit's substate changes. // Although this writes to updateCh on every state change, the reported state // may be more recent than the change that generated it (due to an unavoidable // race in the systemd dbus interface). That is, this method provides a good // way to keep a current view of all units' states, but is not guaranteed to // show every state transition they go through. Furthermore, state changes // will only be written to the channel with non-blocking writes. If updateCh // is full, it attempts to write an error to errCh; if errCh is full, the error // passes silently. func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) { c.subscriber.Lock() defer c.subscriber.Unlock() c.subscriber.updateCh = updateCh c.subscriber.errCh = errCh } func (c *Conn) sendSubStateUpdate(path dbus.ObjectPath) { c.subscriber.Lock() defer c.subscriber.Unlock() if c.shouldIgnore(path) { return } info, err := c.GetUnitProperties(string(path)) if err != nil { select { case c.subscriber.errCh <- err: default: } } name := info["Id"].(string) substate := info["SubState"].(string) update := &SubStateUpdate{name, substate} select { case c.subscriber.updateCh <- update: default: select { case c.subscriber.errCh <- errors.New("update channel full!"): default: } } c.updateIgnore(path, info) } // The ignore functions work around a wart in the systemd dbus interface. // Requesting the properties of an unloaded unit will cause systemd to send a // pair of UnitNew/UnitRemoved signals. Because we need to get a unit's // properties on UnitNew (as that's the only indication of a new unit coming up // for the first time), we would enter an infinite loop if we did not attempt // to detect and ignore these spurious signals. The signal themselves are // indistinguishable from relevant ones, so we (somewhat hackishly) ignore an // unloaded unit's signals for a short time after requesting its properties. // This means that we will miss e.g. a transient unit being restarted // *immediately* upon failure and also a transient unit being started // immediately after requesting its status (with systemctl status, for example, // because this causes a UnitNew signal to be sent which then causes us to fetch // the properties). func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool { t, ok := c.subscriber.ignore[path] return ok && t >= time.Now().UnixNano() } func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) { c.cleanIgnore() // unit is unloaded - it will trigger bad systemd dbus behavior if info["LoadState"].(string) == "not-found" { c.subscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval } } // without this, ignore would grow unboundedly over time func (c *Conn) cleanIgnore() { now := time.Now().UnixNano() if c.subscriber.cleanIgnore < now { c.subscriber.cleanIgnore = now + cleanIgnoreInterval for p, t := range c.subscriber.ignore { if t < now { delete(c.subscriber.ignore, p) } } } } docker-1.10.3/vendor/src/github.com/coreos/go-systemd/dbus/subscription_set.go000066400000000000000000000036121267010174400274420ustar00rootroot00000000000000// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package dbus import ( "time" ) // SubscriptionSet returns a subscription set which is like conn.Subscribe but // can filter to only return events for a set of units. type SubscriptionSet struct { *set conn *Conn } func (s *SubscriptionSet) filter(unit string) bool { return !s.Contains(unit) } // Subscribe starts listening for dbus events for all of the units in the set. // Returns channels identical to conn.SubscribeUnits. func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) { // TODO: Make fully evented by using systemd 209 with properties changed values return s.conn.SubscribeUnitsCustom(time.Second, 0, mismatchUnitStatus, func(unit string) bool { return s.filter(unit) }, ) } // NewSubscriptionSet returns a new subscription set. func (conn *Conn) NewSubscriptionSet() *SubscriptionSet { return &SubscriptionSet{newSet(), conn} } // mismatchUnitStatus returns true if the provided UnitStatus objects // are not equivalent. false is returned if the objects are equivalent. // Only the Name, Description and state-related fields are used in // the comparison. func mismatchUnitStatus(u1, u2 *UnitStatus) bool { return u1.Name != u2.Name || u1.Description != u2.Description || u1.LoadState != u2.LoadState || u1.ActiveState != u2.ActiveState || u1.SubState != u2.SubState } docker-1.10.3/vendor/src/github.com/coreos/go-systemd/journal/000077500000000000000000000000001267010174400242275ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/coreos/go-systemd/journal/journal.go000066400000000000000000000111241267010174400262270ustar00rootroot00000000000000// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package journal provides write bindings to the local systemd journal. // It is implemented in pure Go and connects to the journal directly over its // unix socket. // // To read from the journal, see the "sdjournal" package, which wraps the // sd-journal a C API. // // http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html package journal import ( "bytes" "encoding/binary" "errors" "fmt" "io" "io/ioutil" "net" "os" "strconv" "strings" "syscall" ) // Priority of a journal message type Priority int const ( PriEmerg Priority = iota PriAlert PriCrit PriErr PriWarning PriNotice PriInfo PriDebug ) var conn net.Conn func init() { var err error conn, err = net.Dial("unixgram", "/run/systemd/journal/socket") if err != nil { conn = nil } } // Enabled returns true if the local systemd journal is available for logging func Enabled() bool { return conn != nil } // Send a message to the local systemd journal. vars is a map of journald // fields to values. Fields must be composed of uppercase letters, numbers, // and underscores, but must not start with an underscore. Within these // restrictions, any arbitrary field name may be used. Some names have special // significance: see the journalctl documentation // (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) // for more details. vars may be nil. func Send(message string, priority Priority, vars map[string]string) error { if conn == nil { return journalError("could not connect to journald socket") } data := new(bytes.Buffer) appendVariable(data, "PRIORITY", strconv.Itoa(int(priority))) appendVariable(data, "MESSAGE", message) for k, v := range vars { appendVariable(data, k, v) } _, err := io.Copy(conn, data) if err != nil && isSocketSpaceError(err) { file, err := tempFd() if err != nil { return journalError(err.Error()) } _, err = io.Copy(file, data) if err != nil { return journalError(err.Error()) } rights := syscall.UnixRights(int(file.Fd())) /* this connection should always be a UnixConn, but better safe than sorry */ unixConn, ok := conn.(*net.UnixConn) if !ok { return journalError("can't send file through non-Unix connection") } unixConn.WriteMsgUnix([]byte{}, rights, nil) } else if err != nil { return journalError(err.Error()) } return nil } // Print prints a message to the local systemd journal using Send(). func Print(priority Priority, format string, a ...interface{}) error { return Send(fmt.Sprintf(format, a...), priority, nil) } func appendVariable(w io.Writer, name, value string) { if !validVarName(name) { journalError("variable name contains invalid character, ignoring") } if strings.ContainsRune(value, '\n') { /* When the value contains a newline, we write: * - the variable name, followed by a newline * - the size (in 64bit little endian format) * - the data, followed by a newline */ fmt.Fprintln(w, name) binary.Write(w, binary.LittleEndian, uint64(len(value))) fmt.Fprintln(w, value) } else { /* just write the variable and value all on one line */ fmt.Fprintf(w, "%s=%s\n", name, value) } } func validVarName(name string) bool { /* The variable name must be in uppercase and consist only of characters, * numbers and underscores, and may not begin with an underscore. (from the docs) */ valid := name[0] != '_' for _, c := range name { valid = valid && ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_' } return valid } func isSocketSpaceError(err error) bool { opErr, ok := err.(*net.OpError) if !ok { return false } sysErr, ok := opErr.Err.(syscall.Errno) if !ok { return false } return sysErr == syscall.EMSGSIZE || sysErr == syscall.ENOBUFS } func tempFd() (*os.File, error) { file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") if err != nil { return nil, err } syscall.Unlink(file.Name()) if err != nil { return nil, err } return file, nil } func journalError(s string) error { s = "journal error: " + s fmt.Fprintln(os.Stderr, s) return errors.New(s) } docker-1.10.3/vendor/src/github.com/coreos/go-systemd/util/000077500000000000000000000000001267010174400235325ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/coreos/go-systemd/util/util.go000066400000000000000000000022371267010174400250420ustar00rootroot00000000000000// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package util contains utility functions related to systemd that applications // can use to check things like whether systemd is running. package util import ( "os" ) // IsRunningSystemd checks whether the host was booted with systemd as its init // system. This functions similar to systemd's `sd_booted(3)`: internally, it // checks whether /run/systemd/system/ exists and is a directory. // http://www.freedesktop.org/software/systemd/man/sd_booted.html func IsRunningSystemd() bool { fi, err := os.Lstat("/run/systemd/system") if err != nil { return false } return fi.IsDir() } docker-1.10.3/vendor/src/github.com/deckarep/000077500000000000000000000000001267010174400207465ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/deckarep/golang-set/000077500000000000000000000000001267010174400230065ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/deckarep/golang-set/.gitignore000066400000000000000000000003741267010174400250020ustar00rootroot00000000000000# Compiled Object files, Static and Dynamic libs (Shared Objects) *.o *.a *.so # Folders _obj _test # Architecture specific extensions/prefixes *.[568vq] [568vq].out *.cgo1.go *.cgo2.c _cgo_defun.c _cgo_gotypes.go _cgo_export.* _testmain.go *.exe docker-1.10.3/vendor/src/github.com/deckarep/golang-set/.travis.yml000066400000000000000000000001251267010174400251150ustar00rootroot00000000000000language: go go: - 1.2 script: - go test ./... #- go test -race ./... docker-1.10.3/vendor/src/github.com/deckarep/golang-set/LICENSE000066400000000000000000000022111267010174400240070ustar00rootroot00000000000000Open Source Initiative OSI - The MIT License (MIT):Licensing The MIT License (MIT) Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.docker-1.10.3/vendor/src/github.com/deckarep/golang-set/README.md000066400000000000000000000110241267010174400242630ustar00rootroot00000000000000[![Build Status](https://travis-ci.org/deckarep/golang-set.png?branch=master)](https://travis-ci.org/deckarep/golang-set) [![GoDoc](https://godoc.org/github.com/deckarep/golang-set?status.png)](http://godoc.org/github.com/deckarep/golang-set) ## golang-set The missing set collection for the Go language. Until Go has sets built-in...use this. Coming from Python one of the things I miss is the superbly wonderful set collection. This is my attempt to mimic the primary features of the set from Python. You can of course argue that there is no need for a set in Go, otherwise the creators would have added one to the standard library. To those I say simply ignore this repository and carry-on and to the rest that find this useful please contribute in helping me make it better by: * Helping to make more idiomatic improvements to the code. * Helping to increase the performance of it. ~~(So far, no attempt has been made, but since it uses a map internally, I expect it to be mostly performant.)~~ * Helping to make the unit-tests more robust and kick-ass. * Helping to fill in the [documentation.](http://godoc.org/github.com/deckarep/golang-set) * Simply offering feedback and suggestions. (Positive, constructive feedback is appreciated.) I have to give some credit for helping seed the idea with this post on [stackoverflow.](http://programmers.stackexchange.com/questions/177428/sets-data-structure-in-golang) *Update* - as of 3/9/2014, you can use a compile-time generic version of this package in the [gen](http://clipperhouse.github.io/gen/) framework. This framework allows you to use the golang-set in a completely generic and type-safe way by allowing you to generate a supporting .go file based on your custom types. ## Features (as of 9/22/2014) * a CartesionProduct() method has been added with unit-tests: [Read more about the cartesion product](http://en.wikipedia.org/wiki/Cartesian_product) ## Features (as of 9/15/2014) * a PowerSet() method has been added with unit-tests: [Read more about the Power set](http://en.wikipedia.org/wiki/Power_set) ## Features (as of 4/22/2014) * One common interface to both implementations * Two set implementations to choose from * a thread-safe implementation designed for concurrent use * a non-thread-safe implementation designed for performance * 75 benchmarks for both implementations * 35 unit tests for both implementations * 14 concurrent tests for the thread-safe implementation Please see the unit test file for additional usage examples. The Python set documentation will also do a better job than I can of explaining how a set typically [works.](http://docs.python.org/2/library/sets.html) Please keep in mind however that the Python set is a built-in type and supports additional features and syntax that make it awesome. ## Examples but not exhaustive: ```go requiredClasses := mapset.NewSet() requiredClasses.Add("Cooking") requiredClasses.Add("English") requiredClasses.Add("Math") requiredClasses.Add("Biology") scienceSlice := []interface{}{"Biology", "Chemistry"} scienceClasses := mapset.NewSetFromSlice(scienceSlice) electiveClasses := mapset.NewSet() electiveClasses.Add("Welding") electiveClasses.Add("Music") electiveClasses.Add("Automotive") bonusClasses := mapset.NewSet() bonusClasses.Add("Go Programming") bonusClasses.Add("Python Programming") //Show me all the available classes I can take allClasses := requiredClasses.Union(scienceClasses).Union(electiveClasses).Union(bonusClasses) fmt.Println(allClasses) //Set{Cooking, English, Math, Chemistry, Welding, Biology, Music, Automotive, Go Programming, Python Programming} //Is cooking considered a science class? fmt.Println(scienceClasses.Contains("Cooking")) //false //Show me all classes that are not science classes, since I hate science. fmt.Println(allClasses.Difference(scienceClasses)) //Set{Music, Automotive, Go Programming, Python Programming, Cooking, English, Math, Welding} //Which science classes are also required classes? fmt.Println(scienceClasses.Intersect(requiredClasses)) //Set{Biology} //How many bonus classes do you offer? fmt.Println(bonusClasses.Cardinality()) //2 //Do you have the following classes? Welding, Automotive and English? fmt.Println(allClasses.IsSuperset(mapset.NewSetFromSlice([]interface{}{"Welding", "Automotive", "English"}))) //true ``` Thanks! -Ralph [![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/deckarep/golang-set/trend.png)](https://bitdeli.com/free "Bitdeli Badge") [![Analytics](https://ga-beacon.appspot.com/UA-42584447-2/deckarep/golang-set)](https://github.com/igrigorik/ga-beacon) docker-1.10.3/vendor/src/github.com/deckarep/golang-set/set.go000066400000000000000000000116741267010174400241410ustar00rootroot00000000000000/* Open Source Initiative OSI - The MIT License (MIT):Licensing The MIT License (MIT) Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ // Package mapset implements a simple and generic set collection. // Items stored within it are unordered and unique. It supports // typical set operations: membership testing, intersection, union, // difference, symmetric difference and cloning. // // Package mapset provides two implementations. The default // implementation is safe for concurrent access. There is a non-threadsafe // implementation which is slightly more performant. package mapset type Set interface { // Adds an element to the set. Returns whether // the item was added. Add(i interface{}) bool // Returns the number of elements in the set. Cardinality() int // Removes all elements from the set, leaving // the emtpy set. Clear() // Returns a clone of the set using the same // implementation, duplicating all keys. Clone() Set // Returns whether the given items // are all in the set. Contains(i ...interface{}) bool // Returns the difference between this set // and other. The returned set will contain // all elements of this set that are not also // elements of other. // // Note that the argument to Difference // must be of the same type as the receiver // of the method. Otherwise, Difference will // panic. Difference(other Set) Set // Determines if two sets are equal to each // other. If they have the same cardinality // and contain the same elements, they are // considered equal. The order in which // the elements were added is irrelevant. // // Note that the argument to Equal must be // of the same type as the receiver of the // method. Otherwise, Equal will panic. Equal(other Set) bool // Returns a new set containing only the elements // that exist only in both sets. // // Note that the argument to Intersect // must be of the same type as the receiver // of the method. Otherwise, Intersect will // panic. Intersect(other Set) Set // Determines if every element in the other set // is in this set. // // Note that the argument to IsSubset // must be of the same type as the receiver // of the method. Otherwise, IsSubset will // panic. IsSubset(other Set) bool // Determines if every element in this set is in // the other set. // // Note that the argument to IsSuperset // must be of the same type as the receiver // of the method. Otherwise, IsSuperset will // panic. IsSuperset(other Set) bool // Returns a channel of elements that you can // range over. Iter() <-chan interface{} // Remove a single element from the set. Remove(i interface{}) // Provides a convenient string representation // of the current state of the set. String() string // Returns a new set with all elements which are // in either this set or the other set but not in both. // // Note that the argument to SymmetricDifference // must be of the same type as the receiver // of the method. Otherwise, SymmetricDifference // will panic. SymmetricDifference(other Set) Set // Returns a new set with all elements in both sets. // // Note that the argument to Union must be of the // same type as the receiver of the method. // Otherwise, IsSuperset will panic. Union(other Set) Set // Returns all subsets of a given set (Power Set). PowerSet() Set // Returns the Cartesian Product of two sets. CartesianProduct(other Set) Set // Returns the members of the set as a slice. ToSlice() []interface{} } // Creates and returns a reference to an empty set. func NewSet() Set { set := newThreadSafeSet() return &set } // Creates and returns a reference to a set from an existing slice func NewSetFromSlice(s []interface{}) Set { a := NewSet() for _, item := range s { a.Add(item) } return a } func NewThreadUnsafeSet() Set { set := newThreadUnsafeSet() return &set } func NewThreadUnsafeSetFromSlice(s []interface{}) Set { a := NewThreadUnsafeSet() for _, item := range s { a.Add(item) } return a } docker-1.10.3/vendor/src/github.com/deckarep/golang-set/threadsafe.go000066400000000000000000000104431267010174400254450ustar00rootroot00000000000000/* Open Source Initiative OSI - The MIT License (MIT):Licensing The MIT License (MIT) Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ package mapset import "sync" type threadSafeSet struct { s threadUnsafeSet sync.RWMutex } func newThreadSafeSet() threadSafeSet { return threadSafeSet{s: newThreadUnsafeSet()} } func (set *threadSafeSet) Add(i interface{}) bool { set.Lock() ret := set.s.Add(i) set.Unlock() return ret } func (set *threadSafeSet) Contains(i ...interface{}) bool { set.RLock() ret := set.s.Contains(i...) set.RUnlock() return ret } func (set *threadSafeSet) IsSubset(other Set) bool { o := other.(*threadSafeSet) set.RLock() o.RLock() ret := set.s.IsSubset(&o.s) set.RUnlock() o.RUnlock() return ret } func (set *threadSafeSet) IsSuperset(other Set) bool { return other.IsSubset(set) } func (set *threadSafeSet) Union(other Set) Set { o := other.(*threadSafeSet) set.RLock() o.RLock() unsafeUnion := set.s.Union(&o.s).(*threadUnsafeSet) ret := &threadSafeSet{s: *unsafeUnion} set.RUnlock() o.RUnlock() return ret } func (set *threadSafeSet) Intersect(other Set) Set { o := other.(*threadSafeSet) set.RLock() o.RLock() unsafeIntersection := set.s.Intersect(&o.s).(*threadUnsafeSet) ret := &threadSafeSet{s: *unsafeIntersection} set.RUnlock() o.RUnlock() return ret } func (set *threadSafeSet) Difference(other Set) Set { o := other.(*threadSafeSet) set.RLock() o.RLock() unsafeDifference := set.s.Difference(&o.s).(*threadUnsafeSet) ret := &threadSafeSet{s: *unsafeDifference} set.RUnlock() o.RUnlock() return ret } func (set *threadSafeSet) SymmetricDifference(other Set) Set { o := other.(*threadSafeSet) unsafeDifference := set.s.SymmetricDifference(&o.s).(*threadUnsafeSet) return &threadSafeSet{s: *unsafeDifference} } func (set *threadSafeSet) Clear() { set.Lock() set.s = newThreadUnsafeSet() set.Unlock() } func (set *threadSafeSet) Remove(i interface{}) { set.Lock() delete(set.s, i) set.Unlock() } func (set *threadSafeSet) Cardinality() int { set.RLock() defer set.RUnlock() return len(set.s) } func (set *threadSafeSet) Iter() <-chan interface{} { ch := make(chan interface{}) go func() { set.RLock() for elem := range set.s { ch <- elem } close(ch) set.RUnlock() }() return ch } func (set *threadSafeSet) Equal(other Set) bool { o := other.(*threadSafeSet) set.RLock() o.RLock() ret := set.s.Equal(&o.s) set.RUnlock() o.RUnlock() return ret } func (set *threadSafeSet) Clone() Set { set.RLock() unsafeClone := set.s.Clone().(*threadUnsafeSet) ret := &threadSafeSet{s: *unsafeClone} set.RUnlock() return ret } func (set *threadSafeSet) String() string { set.RLock() ret := set.s.String() set.RUnlock() return ret } func (set *threadSafeSet) PowerSet() Set { set.RLock() ret := set.s.PowerSet() set.RUnlock() return ret } func (set *threadSafeSet) CartesianProduct(other Set) Set { o := other.(*threadSafeSet) set.RLock() o.RLock() unsafeCartProduct := set.s.CartesianProduct(&o.s).(*threadUnsafeSet) ret := &threadSafeSet{s: *unsafeCartProduct} set.RUnlock() o.RUnlock() return ret } func (set *threadSafeSet) ToSlice() []interface{} { set.RLock() keys := make([]interface{}, 0, set.Cardinality()) for elem := range set.s { keys = append(keys, elem) } set.RUnlock() return keys } docker-1.10.3/vendor/src/github.com/deckarep/golang-set/threadunsafe.go000066400000000000000000000122411267010174400260060ustar00rootroot00000000000000/* Open Source Initiative OSI - The MIT License (MIT):Licensing The MIT License (MIT) Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ package mapset import ( "fmt" "reflect" "strings" ) type threadUnsafeSet map[interface{}]struct{} type orderedPair struct { first interface{} second interface{} } func newThreadUnsafeSet() threadUnsafeSet { return make(threadUnsafeSet) } func (pair *orderedPair) Equal(other orderedPair) bool { if pair.first == other.first && pair.second == other.second { return true } return false } func (set *threadUnsafeSet) Add(i interface{}) bool { _, found := (*set)[i] (*set)[i] = struct{}{} return !found //False if it existed already } func (set *threadUnsafeSet) Contains(i ...interface{}) bool { for _, val := range i { if _, ok := (*set)[val]; !ok { return false } } return true } func (set *threadUnsafeSet) IsSubset(other Set) bool { _ = other.(*threadUnsafeSet) for elem := range *set { if !other.Contains(elem) { return false } } return true } func (set *threadUnsafeSet) IsSuperset(other Set) bool { return other.IsSubset(set) } func (set *threadUnsafeSet) Union(other Set) Set { o := other.(*threadUnsafeSet) unionedSet := newThreadUnsafeSet() for elem := range *set { unionedSet.Add(elem) } for elem := range *o { unionedSet.Add(elem) } return &unionedSet } func (set *threadUnsafeSet) Intersect(other Set) Set { o := other.(*threadUnsafeSet) intersection := newThreadUnsafeSet() // loop over smaller set if set.Cardinality() < other.Cardinality() { for elem := range *set { if other.Contains(elem) { intersection.Add(elem) } } } else { for elem := range *o { if set.Contains(elem) { intersection.Add(elem) } } } return &intersection } func (set *threadUnsafeSet) Difference(other Set) Set { _ = other.(*threadUnsafeSet) difference := newThreadUnsafeSet() for elem := range *set { if !other.Contains(elem) { difference.Add(elem) } } return &difference } func (set *threadUnsafeSet) SymmetricDifference(other Set) Set { _ = other.(*threadUnsafeSet) aDiff := set.Difference(other) bDiff := other.Difference(set) return aDiff.Union(bDiff) } func (set *threadUnsafeSet) Clear() { *set = newThreadUnsafeSet() } func (set *threadUnsafeSet) Remove(i interface{}) { delete(*set, i) } func (set *threadUnsafeSet) Cardinality() int { return len(*set) } func (set *threadUnsafeSet) Iter() <-chan interface{} { ch := make(chan interface{}) go func() { for elem := range *set { ch <- elem } close(ch) }() return ch } func (set *threadUnsafeSet) Equal(other Set) bool { _ = other.(*threadUnsafeSet) if set.Cardinality() != other.Cardinality() { return false } for elem := range *set { if !other.Contains(elem) { return false } } return true } func (set *threadUnsafeSet) Clone() Set { clonedSet := newThreadUnsafeSet() for elem := range *set { clonedSet.Add(elem) } return &clonedSet } func (set *threadUnsafeSet) String() string { items := make([]string, 0, len(*set)) for elem := range *set { items = append(items, fmt.Sprintf("%v", elem)) } return fmt.Sprintf("Set{%s}", strings.Join(items, ", ")) } func (pair orderedPair) String() string { return fmt.Sprintf("(%v, %v)", pair.first, pair.second) } func (set *threadUnsafeSet) PowerSet() Set { powSet := NewThreadUnsafeSet() nullset := newThreadUnsafeSet() powSet.Add(&nullset) for es := range *set { u := newThreadUnsafeSet() j := powSet.Iter() for er := range j { p := newThreadUnsafeSet() if reflect.TypeOf(er).Name() == "" { k := er.(*threadUnsafeSet) for ek := range *(k) { p.Add(ek) } } else { p.Add(er) } p.Add(es) u.Add(&p) } powSet = powSet.Union(&u) } return powSet } func (set *threadUnsafeSet) CartesianProduct(other Set) Set { o := other.(*threadUnsafeSet) cartProduct := NewThreadUnsafeSet() for i := range *set { for j := range *o { elem := orderedPair{first: i, second: j} cartProduct.Add(elem) } } return cartProduct } func (set *threadUnsafeSet) ToSlice() []interface{} { keys := make([]interface{}, 0, set.Cardinality()) for elem := range *set { keys = append(keys, elem) } return keys } docker-1.10.3/vendor/src/github.com/docker/000077500000000000000000000000001267010174400204375ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/distribution/000077500000000000000000000000001267010174400231565ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/distribution/.drone.yml000066400000000000000000000016261267010174400250730ustar00rootroot00000000000000image: dmp42/go:stable script: # To be spoofed back into the test image - go get github.com/modocache/gover - go get -t ./... # Go fmt - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" # Go lint - test -z "$(golint ./... | tee /dev/stderr)" # Go vet - go vet ./... # Go test - go test -v -race -cover ./... # Helper to concatenate reports - gover # Send to coverall - goveralls -service drone.io -coverprofile=gover.coverprofile -repotoken {{COVERALLS_TOKEN}} # Do we want these as well? # - go get code.google.com/p/go.tools/cmd/goimports # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" # http://labix.org/gocheck notify: email: recipients: - distribution@docker.com slack: team: docker channel: "#dt" username: mom token: {{SLACK_TOKEN}} on_success: true on_failure: true docker-1.10.3/vendor/src/github.com/docker/distribution/.gitignore000066400000000000000000000006611267010174400251510ustar00rootroot00000000000000# Compiled Object files, Static and Dynamic libs (Shared Objects) *.o *.a *.so # Folders _obj _test # Architecture specific extensions/prefixes *.[568vq] [568vq].out *.cgo1.go *.cgo2.c _cgo_defun.c _cgo_gotypes.go _cgo_export.* _testmain.go *.exe *.test *.prof # never checkin from the bin file (for now) bin/* # Test key files *.pem # Cover profiles *.out # Editor/IDE specific files. *.sublime-project *.sublime-workspace docker-1.10.3/vendor/src/github.com/docker/distribution/.mailmap000066400000000000000000000021471267010174400246030ustar00rootroot00000000000000Stephen J Day Stephen Day Stephen J Day Stephen Day Olivier Gambier Olivier Gambier Brian Bland Brian Bland Josh Hawn Josh Hawn Richard Scothern Richard Richard Scothern Richard Scothern Andrew Meredith Andrew Meredith harche harche Jessie Frazelle Sharif Nassar Sharif Nassar Sven Dowideit Sven Dowideit Vincent Giersch Vincent Giersch davidli davidli docker-1.10.3/vendor/src/github.com/docker/distribution/AUTHORS000066400000000000000000000073511267010174400242340ustar00rootroot00000000000000Aaron Lehmann Aaron Vinson Adam Enger Adrian Mouat Ahmet Alp Balkan Alex Chan Alex Elman amitshukla Amy Lindburg Andrew Meredith Andrey Kostov Andy Goldstein Anton Tiurin Antonio Mercado Arnaud Porterie Arthur Baars Avi Miller Ayose Cazorla BadZen Ben Firshman bin liu Brian Bland burnettk Chris Dillon Daisuke Fujita Darren Shepherd Dave Trombley Dave Tucker David Lawrence David Verhasselt David Xia davidli Dejan Golja Derek McGowan Diogo Mónica Donald Huang Doug Davis farmerworking Florentin Raud Frederick F. Kautz IV harche Henri Gomez Hu Keping Hua Wang Ian Babrou Jack Griffin Jason Freidman Jeff Nickoloff Jessie Frazelle Jianqing Wang Jon Poler Jonathan Boulle Jordan Liggitt Josh Hawn Julien Fernandez Kelsey Hightower Kenneth Lim Li Yi Louis Kottmann Luke Carpenter Mary Anthony Matt Bentley Matt Moore Matt Robenolt Michael Prokop Miquel Sabaté Morgan Bauer moxiegirl Nathan Sullivan nevermosby Nghia Tran Nuutti Kotivuori Oilbeater Olivier Gambier Olivier Jacques Patrick Devine Philip Misiowiec Richard Scothern Rusty Conover Sebastiaan van Stijn Sharif Nassar Shawn Falkner-Horine Shreyas Karnik Simon Thulbourn Spencer Rinehart Stephen J Day Sungho Moon Sven Dowideit Sylvain Baubeau Ted Reed tgic Thomas Sjögren Tianon Gravi Tibor Vass Tonis Tiigi Troels Thomsen Vincent Batts Vincent Demeester Vincent Giersch W. Trevor King xg.song xiekeyang Yann ROBERT yuzou docker-1.10.3/vendor/src/github.com/docker/distribution/CONTRIBUTING.md000066400000000000000000000163001267010174400254070ustar00rootroot00000000000000# Contributing to the registry ## Before reporting an issue... ### If your problem is with... - automated builds - your account on the [Docker Hub](https://hub.docker.com/) - any other [Docker Hub](https://hub.docker.com/) issue Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com) ### If you... - need help setting up your registry - can't figure out something - are not sure what's going on or what your problem is Then please do not open an issue here yet - you should first try one of the following support forums: - irc: #docker-distribution on freenode - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution ## Reporting an issue properly By following these simple rules you will get better and faster feedback on your issue. - search the bugtracker for an already reported issue ### If you found an issue that describes your problem: - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments - please refrain from adding "same thing here" or "+1" comments - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button - comment if you have some new, technical and relevant information to add to the case - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue. ### If you have not found an existing issue that describes your problem: 1. create a new issue, with a succinct title that describes your issue: - bad title: "It doesn't work with my docker" - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST" 2. copy the output of: - `docker version` - `docker info` - `docker exec registry -version` 3. copy the command line you used to launch your Registry 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) 5. reproduce your problem and get your docker daemon logs showing the error 6. if relevant, copy your registry logs that show the error 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used) 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry ## Contributing a patch for a known bug, or a small correction You should follow the basic GitHub workflow: 1. fork 2. commit a change 3. make sure the tests pass 4. PR Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple: - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com` - sign your commits using `-s`: `git commit -s -m "My commit"` Some simple rules to ensure quick merge: - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`) - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once - if you need to amend your PR following comments, please squash instead of adding more commits ## Contributing new features You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve. If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning. If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. Then you should submit your implementation, clearly linking to the issue (and possible proposal). Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged. It's mandatory to: - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines) - address maintainers' comments and modify your submission accordingly - write tests for any new code Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry. Have a look at a great, succesful contribution: the [Ceph driver PR](https://github.com/docker/distribution/pull/443) ## Coding Style Unless explicitly stated, we follow all coding guidelines from the Go community. While some of these standards may seem arbitrary, they somehow seem to result in a solid, consistent codebase. It is possible that the code base does not currently comply with these guidelines. We are not looking for a massive PR that fixes this, since that goes against the spirit of the guidelines. All new contributions should make a best effort to clean up and make the code base better than they left it. Obviously, apply your best judgement. Remember, the goal here is to make the code base easier for humans to navigate and understand. Always keep that in mind when nudging others to comply. The rules: 1. All code should be formatted with `gofmt -s`. 2. All code should pass the default levels of [`golint`](https://github.com/golang/lint). 3. All code should follow the guidelines covered in [Effective Go](http://golang.org/doc/effective_go.html) and [Go Code Review Comments](https://github.com/golang/go/wiki/CodeReviewComments). 4. Comment the code. Tell us the why, the history and the context. 5. Document _all_ declarations and methods, even private ones. Declare expectations, caveats and anything else that may be important. If a type gets exported, having the comments already there will ensure it's ready. 6. Variable name length should be proportional to its context and no longer. `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. In practice, short methods will have short variable names and globals will have longer names. 7. No underscores in package names. If you need a compound name, step back, and re-examine why you need a compound name. If you still think you need a compound name, lose the underscore. 8. No utils or helpers packages. If a function is not general enough to warrant its own package, it has not been written generally enough to be a part of a util package. Just leave it unexported and well-documented. 9. All tests should run with `go test` and outside tooling should not be required. No, we don't need another unit testing framework. Assertion packages are acceptable if they provide _real_ incremental value. 10. Even though we call these "rules" above, they are actually just guidelines. Since you've read all the rules, you now know that. If you are having trouble getting into the mood of idiomatic Go, we recommend reading through [Effective Go](http://golang.org/doc/effective_go.html). The [Go Blog](http://blog.golang.org/) is also a great resource. Drinking the kool-aid is a lot easier than going thirsty. docker-1.10.3/vendor/src/github.com/docker/distribution/Dockerfile000066400000000000000000000010631267010174400251500ustar00rootroot00000000000000FROM golang:1.5.3 RUN apt-get update && \ apt-get install -y librados-dev apache2-utils && \ rm -rf /var/lib/apt/lists/* ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution ENV GOPATH $DISTRIBUTION_DIR/Godeps/_workspace:$GOPATH ENV DOCKER_BUILDTAGS include_rados include_oss include_gcs WORKDIR $DISTRIBUTION_DIR COPY . $DISTRIBUTION_DIR COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml RUN make PREFIX=/go clean binaries VOLUME ["/var/lib/registry"] EXPOSE 5000 ENTRYPOINT ["registry"] CMD ["/etc/docker/registry/config.yml"] docker-1.10.3/vendor/src/github.com/docker/distribution/LICENSE000066400000000000000000000260751267010174400241750ustar00rootroot00000000000000Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. docker-1.10.3/vendor/src/github.com/docker/distribution/MAINTAINERS000066400000000000000000000025261267010174400246600ustar00rootroot00000000000000# Distribution maintainers file # # This file describes who runs the docker/distribution project and how. # This is a living document - if you see something out of date or missing, speak up! # # It is structured to be consumable by both humans and programs. # To extract its contents programmatically, use any TOML-compliant parser. # # This file is compiled into the MAINTAINERS file in docker/opensource. # [Org] [Org."Core maintainers"] people = [ "aaronlehmann", "dmcgowan", "dmp42", "richardscothern", "shykes", "stevvooe", ] [people] # A reference list of all people associated with the project. # All other sections should refer to people by their canonical key # in the people section. # ADD YOURSELF HERE IN ALPHABETICAL ORDER [people.aaronlehmann] Name = "Aaron Lehmann" Email = "aaron.lehmann@docker.com" GitHub = "aaronlehmann" [people.dmcgowan] Name = "Derek McGowan" Email = "derek@mcgstyle.net" GitHub = "dmcgowan" [people.dmp42] Name = "Olivier Gambier" Email = "olivier@docker.com" GitHub = "dmp42" [people.richardscothern] Name = "Richard Scothern" Email = "richard.scothern@gmail.com" GitHub = "richardscothern" [people.shykes] Name = "Solomon Hykes" Email = "solomon@docker.com" GitHub = "shykes" [people.stevvooe] Name = "Stephen Day" Email = "stephen.day@docker.com" GitHub = "stevvooe" docker-1.10.3/vendor/src/github.com/docker/distribution/Makefile000066400000000000000000000043551267010174400246250ustar00rootroot00000000000000# Set an output prefix, which is the local directory if not specified PREFIX?=$(shell pwd) # Used to populate version variable in main package. VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) # Allow turning off function inlining and variable registerization ifeq (${DISABLE_OPTIMIZATION},true) GO_GCFLAGS=-gcflags "-N -l" VERSION:="$(VERSION)-noopt" endif GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)" .PHONY: clean all fmt vet lint build test binaries .DEFAULT: default all: AUTHORS clean fmt vet fmt lint build test binaries AUTHORS: .mailmap .git/HEAD git log --format='%aN <%aE>' | sort -fu > $@ # This only needs to be generated by hand when cutting full releases. version/version.go: ./version/version.sh > $@ ${PREFIX}/bin/registry: version/version.go $(shell find . -type f -name '*.go') @echo "+ $@" @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry ${PREFIX}/bin/digest: version/version.go $(shell find . -type f -name '*.go') @echo "+ $@" @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest ${PREFIX}/bin/registry-api-descriptor-template: version/version.go $(shell find . -type f -name '*.go') @echo "+ $@" @go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template ./bin/registry-api-descriptor-template $< > $@ # Depends on binaries because vet will silently fail if it can't load compiled # imports vet: binaries @echo "+ $@" @go vet ./... fmt: @echo "+ $@" @test -z "$$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" || \ echo "+ please format Go code with 'gofmt -s'" lint: @echo "+ $@" @test -z "$$(golint ./... | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" build: @echo "+ $@" @go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} ./... test: @echo "+ $@" @go test -test.short -tags "${DOCKER_BUILDTAGS}" ./... test-full: @echo "+ $@" @go test ./... binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/digest ${PREFIX}/bin/registry-api-descriptor-template @echo "+ $@" clean: @echo "+ $@" @rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/registry-api-descriptor-template" docker-1.10.3/vendor/src/github.com/docker/distribution/README.md000066400000000000000000000125571267010174400244470ustar00rootroot00000000000000# Distribution The Docker toolset to pack, ship, store, and deliver content. This repository's main product is the Docker Registry 2.0 implementation for storing and distributing Docker images. It supersedes the [docker/docker-registry](https://github.com/docker/docker-registry) project with a new API design, focused around security and performance. [![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master) [![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution) This repository contains the following components: |**Component** |Description | |--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | | **libraries** | A rich set of libraries for interacting with,distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | | **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | | **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/index.md) related just to the registry. | ### How does this integrate with Docker engine? This project should provide an implementation to a V2 API for use in the [Docker core project](https://github.com/docker/docker). The API should be embeddable and simplify the process of securely pulling and pushing content from `docker` daemons. ### What are the long term goals of the Distribution project? The _Distribution_ project has the further long term goal of providing a secure tool chain for distributing content. The specifications, APIs and tools should be as useful with Docker as they are without. Our goal is to design a professional grade and extensible content distribution system that allow users to: * Enjoy an efficient, secured and reliable way to store, manage, package and exchange content * Hack/roll their own on top of healthy open-source components * Implement their own home made solution through good specs, and solid extensions mechanism. ## More about Registry 2.0 The new registry implementation provides the following benefits: - faster push and pull - new, more efficient implementation - simplified deployment - pluggable storage backend - webhook notifications For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). ### Who needs to deploy a registry? By default, Docker users pull images from Docker's public registry instance. [Installing Docker](https://docs.docker.com/engine/installation/) gives users this ability. Users can also push images to a repository on Docker's public registry, if they have a [Docker Hub](https://hub.docker.com/) account. For some users and even companies, this default behavior is sufficient. For others, it is not. For example, users with their own software products may want to maintain a registry for private, company images. Also, you may wish to deploy your own image repository for images used to test or in continuous integration. For these use cases and others, [deploying your own registry instance](docs/deploying.md) may be the better choice. ### Migration to Registry 2.0 For those who have previously deployed their own registry based on the Registry 1.0 implementation and wish to deploy a Registry 2.0 while retaining images, data migration is required. A tool to assist with migration efforts has been created. For more information see [docker/migrator] (https://github.com/docker/migrator). ## Contribute Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute issues, fixes, and patches to this project. If you are contributing code, see the instructions for [building a development environment](docs/building.md). ## Support If any issues are encountered while using the _Distribution_ project, several avenues are available for support:
IRC #docker-distribution on FreeNode
Issue Tracker github.com/docker/distribution/issues
Google Groups https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution
Mailing List docker@dockerproject.org
## License This project is distributed under [Apache License, Version 2.0](LICENSE.md). docker-1.10.3/vendor/src/github.com/docker/distribution/ROADMAP.md000066400000000000000000000322011267010174400245610ustar00rootroot00000000000000# Roadmap The Distribution Project consists of several components, some of which are still being defined. This document defines the high-level goals of the project, identifies the current components, and defines the release- relationship to the Docker Platform. * [Distribution Goals](#distribution-goals) * [Distribution Components](#distribution-components) * [Project Planning](#project-planning): release-relationship to the Docker Platform. This road map is a living document, providing an overview of the goals and considerations made in respect of the future of the project. ## Distribution Goals - Replace the existing [docker registry](github.com/docker/docker-registry) implementation as the primary implementation. - Replace the existing push and pull code in the docker engine with the distribution package. - Define a strong data model for distributing docker images - Provide a flexible distribution tool kit for use in the docker platform - Unlock new distribution models ## Distribution Components Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming features and bugfixes for a component will be added to the relevant milestone. If a feature or bugfix is not part of a milestone, it is currently unscheduled for implementation. * [Registry](#registry) * [Distribution Package](#distribution-package) *** ### Registry The new Docker registry is the main portion of the distribution repository. Registry 2.0 is the first release of the next-generation registry. This was primarily focused on implementing the [new registry API](https://github.com/docker/distribution/blob/master/docs/spec/api.md), with a focus on security and performance. Following from the Distribution project goals above, we have a set of goals for registry v2 that we would like to follow in the design. New features should be compared against these goals. #### Data Storage and Distribution First The registry's first goal is to provide a reliable, consistent storage location for Docker images. The registry should only provide the minimal amount of indexing required to fetch image data and no more. This means we should be selective in new features and API additions, including those that may require expensive, ever growing indexes. Requests should be servable in "constant time". #### Content Addressability All data objects used in the registry API should be content addressable. Content identifiers should be secure and verifiable. This provides a secure, reliable base from which to build more advanced content distribution systems. #### Content Agnostic In the past, changes to the image format would require large changes in Docker and the Registry. By decoupling the distribution and image format, we can allow the formats to progress without having to coordinate between the two. This means that we should be focused on decoupling Docker from the registry just as much as decoupling the registry from Docker. Such an approach will allow us to unlock new distribution models that haven't been possible before. We can take this further by saying that the new registry should be content agnostic. The registry provides a model of names, tags, manifests and content addresses and that model can be used to work with content. #### Simplicity The new registry should be closer to a microservice component than its predecessor. This means it should have a narrower API and a low number of service dependencies. It should be easy to deploy. This means that other solutions should be explored before changing the API or adding extra dependencies. If functionality is required, can it be added as an extension or companion service. #### Extensibility The registry should provide extension points to add functionality. By keeping the scope narrow, but providing the ability to add functionality. Features like search, indexing, synchronization and registry explorers fall into this category. No such feature should be added unless we've found it impossible to do through an extension. #### Active Feature Discussions The following are feature discussions that are currently active. If you don't see your favorite, unimplemented feature, feel free to contact us via IRC or the mailing list and we can talk about adding it. The goal here is to make sure that new features go through a rigid design process before landing in the registry. ##### Proxying to other Registries A _pull-through caching_ mode exists for the registry, but is restricted from within the docker client to only mirror the official Docker Hub. This functionality can be expanded when image provenance has been specified and implemented in the distribution project. ##### Metadata storage Metadata for the registry is currently stored with the manifest and layer data on the storage backend. While this is a big win for simplicity and reliably maintaining state, it comes with the cost of consistency and high latency. The mutable registry metadata operations should be abstracted behind an API which will allow ACID compliant storage systems to handle metadata. ##### Peer to Peer transfer Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit ##### Indexing, Search and Discovery The original registry provided some implementation of search for use with private registries. Support has been elided from V2 since we'd like to both decouple search functionality from the registry. The makes the registry simpler to deploy, especially in use cases where search is not needed, and let's us decouple the image format from the registry. There are explorations into using the catalog API and notification system to build external indexes. The current line of thought is that we will define a common search API to index and query docker images. Such a system could be run as a companion to a registry or set of registries to power discovery. The main issue with search and discovery is that there are so many ways to accomplish it. There are two aspects to this project. The first is deciding on how it will be done, including an API definition that can work with changing data formats. The second is the process of integrating with `docker search`. We expect that someone attempts to address the problem with the existing tools and propose it as a standard search API or uses it to inform a standardization process. Once this has been explored, we integrate with the docker client. Please see the following for more detail: - https://github.com/docker/distribution/issues/206 ##### Deletes > __NOTE:__ Deletes are a much asked for feature. Before requesting this feature or participating in discussion, we ask that you read this section in full and understand the problems behind deletes. While, at first glance, implementing deleting seems simple, there are a number mitigating factors that make many solutions not ideal or even pathological in the context of a registry. The following paragraph discuss the background and approaches that could be applied to a arrive at a solution. The goal of deletes in any system is to remove unused or unneeded data. Only data requested for deletion should be removed and no other data. Removing unintended data is worse than _not_ removing data that was requested for removal but ideally, both are supported. Generally, according to this rule, we err on holding data longer than needed, ensuring that it is only removed when we can be certain that it can be removed. With the current behavior, we opt to hold onto the data forever, ensuring that data cannot be incorrectly removed. To understand the problems with implementing deletes, one must understand the data model. All registry data is stored in a filesystem layout, implemented on a "storage driver", effectively a _virtual file system_ (VFS). The storage system must assume that this VFS layer will be eventually consistent and has poor read- after-write consistency, since this is the lower common denominator among the storage drivers. This is mitigated by writing values in reverse- dependent order, but makes wider transactional operations unsafe. Layered on the VFS model is a content-addressable _directed, acyclic graph_ (DAG) made up of blobs. Manifests reference layers. Tags reference manifests. Since the same data can be referenced by multiple manifests, we only store data once, even if it is in different repositories. Thus, we have a set of blobs, referenced by tags and manifests. If we want to delete a blob we need to be certain that it is no longer referenced by another manifest or tag. When we delete a manifest, we also can try to delete the referenced blobs. Deciding whether or not a blob has an active reference is the crux of the problem. Conceptually, deleting a manifest and its resources is quite simple. Just find all the manifests, enumerate the referenced blobs and delete the blobs not in that set. An astute observer will recognize this as a garbage collection problem. As with garbage collection in programming languages, this is very simple when one always has a consistent view. When one adds parallelism and an inconsistent view of data, it becomes very challenging. A simple example can demonstrate this. Let's say we are deleting a manifest _A_ in one process. We scan the manifest and decide that all the blobs are ready for deletion. Concurrently, we have another process accepting a new manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_ is accepted and all the blobs are considered present, so the operation proceeds. The original process then deletes the referenced blobs, assuming they were unreferenced. The manifest _B_, which we thought had all of its data present, can no longer be served by the registry, since the dependent data has been deleted. Deleting data from the registry safely requires some way to coordinate this operation. The following approaches are being considered: - _Reference Counting_ - Maintain a count of references to each blob. This is challenging for a number of reasons: 1. maintaining a consistent consensus of reference counts across a set of Registries and 2. Building the initial list of reference counts for an existing registry. These challenges can be met with a consensus protocol like Paxos or Raft in the first case and a necessary but simple scan in the second.. - _Lock the World GC_ - Halt all writes to the data store. Walk the data store and find all blob references. Delete all unreferenced blobs. This approach is very simple but requires disabling writes for a period of time while the service reads all data. This is slow and expensive but very accurate and effective. - _Generational GC_ - Do something similar to above but instead of blocking writes, writes are sent to another storage backend while reads are broadcast to the new and old backends. GC is then performed on the read-only portion. Because writes land in the new backend, the data in the read-only section can be safely deleted. The main drawbacks of this approach are complexity and coordination. - _Centralized Oracle_ - Using a centralized, transactional database, we can know exactly which data is referenced at any given time. This avoids coordination problem by managing this data in a single location. We trade off metadata scalability for simplicity and performance. This is a very good option for most registry deployments. This would create a bottleneck for registry metadata. However, metadata is generally not the main bottleneck when serving images. Please let us know if other solutions exist that we have yet to enumerate. Note that for any approach, implementation is a massive consideration. For example, a mark-sweep based solution may seem simple but the amount of work in coordination offset the extra work it might take to build a _Centralized Oracle_. We'll accept proposals for any solution but please coordinate with us before dropping code. At this time, we have traded off simplicity and ease of deployment for disk space. Simplicity and ease of deployment tend to reduce developer involvement, which is currently the most expensive resource in software engineering. Taking on any solution for deletes will greatly effect these factors, trading off very cheap disk space for a complex deployment and operational story. Please see the following issues for more detail: - https://github.com/docker/distribution/issues/422 - https://github.com/docker/distribution/issues/461 - https://github.com/docker/distribution/issues/462 ### Distribution Package At its core, the Distribution Project is a set of Go packages that make up Distribution Components. At this time, most of these packages make up the Registry implementation. The package itself is considered unstable. If you're using it, please take care to vendor the dependent version. For feature additions, please see the Registry section. In the future, we may break out a separate Roadmap for distribution-specific features that apply to more than just the registry. *** ### Project Planning An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress. docker-1.10.3/vendor/src/github.com/docker/distribution/blobs.go000066400000000000000000000211451267010174400246110ustar00rootroot00000000000000package distribution import ( "errors" "fmt" "io" "net/http" "time" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" ) var ( // ErrBlobExists returned when blob already exists ErrBlobExists = errors.New("blob exists") // ErrBlobDigestUnsupported when blob digest is an unsupported version. ErrBlobDigestUnsupported = errors.New("unsupported blob digest") // ErrBlobUnknown when blob is not found. ErrBlobUnknown = errors.New("unknown blob") // ErrBlobUploadUnknown returned when upload is not found. ErrBlobUploadUnknown = errors.New("blob upload unknown") // ErrBlobInvalidLength returned when the blob has an expected length on // commit, meaning mismatched with the descriptor or an invalid value. ErrBlobInvalidLength = errors.New("blob invalid length") ) // ErrBlobInvalidDigest returned when digest check fails. type ErrBlobInvalidDigest struct { Digest digest.Digest Reason error } func (err ErrBlobInvalidDigest) Error() string { return fmt.Sprintf("invalid digest for referenced layer: %v, %v", err.Digest, err.Reason) } // ErrBlobMounted returned when a blob is mounted from another repository // instead of initiating an upload session. type ErrBlobMounted struct { From reference.Canonical Descriptor Descriptor } func (err ErrBlobMounted) Error() string { return fmt.Sprintf("blob mounted from: %v to: %v", err.From, err.Descriptor) } // Descriptor describes targeted content. Used in conjunction with a blob // store, a descriptor can be used to fetch, store and target any kind of // blob. The struct also describes the wire protocol format. Fields should // only be added but never changed. type Descriptor struct { // MediaType describe the type of the content. All text based formats are // encoded as utf-8. MediaType string `json:"mediaType,omitempty"` // Size in bytes of content. Size int64 `json:"size,omitempty"` // Digest uniquely identifies the content. A byte stream can be verified // against against this digest. Digest digest.Digest `json:"digest,omitempty"` // NOTE: Before adding a field here, please ensure that all // other options have been exhausted. Much of the type relationships // depend on the simplicity of this type. } // Descriptor returns the descriptor, to make it satisfy the Describable // interface. Note that implementations of Describable are generally objects // which can be described, not simply descriptors; this exception is in place // to make it more convenient to pass actual descriptors to functions that // expect Describable objects. func (d Descriptor) Descriptor() Descriptor { return d } // BlobStatter makes blob descriptors available by digest. The service may // provide a descriptor of a different digest if the provided digest is not // canonical. type BlobStatter interface { // Stat provides metadata about a blob identified by the digest. If the // blob is unknown to the describer, ErrBlobUnknown will be returned. Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error) } // BlobDeleter enables deleting blobs from storage. type BlobDeleter interface { Delete(ctx context.Context, dgst digest.Digest) error } // BlobDescriptorService manages metadata about a blob by digest. Most // implementations will not expose such an interface explicitly. Such mappings // should be maintained by interacting with the BlobIngester. Hence, this is // left off of BlobService and BlobStore. type BlobDescriptorService interface { BlobStatter // SetDescriptor assigns the descriptor to the digest. The provided digest and // the digest in the descriptor must map to identical content but they may // differ on their algorithm. The descriptor must have the canonical // digest of the content and the digest algorithm must match the // annotators canonical algorithm. // // Such a facility can be used to map blobs between digest domains, with // the restriction that the algorithm of the descriptor must match the // canonical algorithm (ie sha256) of the annotator. SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error // Clear enables descriptors to be unlinked Clear(ctx context.Context, dgst digest.Digest) error } // ReadSeekCloser is the primary reader type for blob data, combining // io.ReadSeeker with io.Closer. type ReadSeekCloser interface { io.ReadSeeker io.Closer } // BlobProvider describes operations for getting blob data. type BlobProvider interface { // Get returns the entire blob identified by digest along with the descriptor. Get(ctx context.Context, dgst digest.Digest) ([]byte, error) // Open provides a ReadSeekCloser to the blob identified by the provided // descriptor. If the blob is not known to the service, an error will be // returned. Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error) } // BlobServer can serve blobs via http. type BlobServer interface { // ServeBlob attempts to serve the blob, identifed by dgst, via http. The // service may decide to redirect the client elsewhere or serve the data // directly. // // This handler only issues successful responses, such as 2xx or 3xx, // meaning it serves data or issues a redirect. If the blob is not // available, an error will be returned and the caller may still issue a // response. // // The implementation may serve the same blob from a different digest // domain. The appropriate headers will be set for the blob, unless they // have already been set by the caller. ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error } // BlobIngester ingests blob data. type BlobIngester interface { // Put inserts the content p into the blob service, returning a descriptor // or an error. Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error) // Create allocates a new blob writer to add a blob to this service. The // returned handle can be written to and later resumed using an opaque // identifier. With this approach, one can Close and Resume a BlobWriter // multiple times until the BlobWriter is committed or cancelled. Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error) // Resume attempts to resume a write to a blob, identified by an id. Resume(ctx context.Context, id string) (BlobWriter, error) } // BlobCreateOption is a general extensible function argument for blob creation // methods. A BlobIngester may choose to honor any or none of the given // BlobCreateOptions, which can be specific to the implementation of the // BlobIngester receiving them. // TODO (brianbland): unify this with ManifestServiceOption in the future type BlobCreateOption interface { Apply(interface{}) error } // BlobWriter provides a handle for inserting data into a blob store. // Instances should be obtained from BlobWriteService.Writer and // BlobWriteService.Resume. If supported by the store, a writer can be // recovered with the id. type BlobWriter interface { io.WriteSeeker io.ReaderFrom io.Closer // ID returns the identifier for this writer. The ID can be used with the // Blob service to later resume the write. ID() string // StartedAt returns the time this blob write was started. StartedAt() time.Time // Commit completes the blob writer process. The content is verified // against the provided provisional descriptor, which may result in an // error. Depending on the implementation, written data may be validated // against the provisional descriptor fields. If MediaType is not present, // the implementation may reject the commit or assign "application/octet- // stream" to the blob. The returned descriptor may have a different // digest depending on the blob store, referred to as the canonical // descriptor. Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error) // Cancel ends the blob write without storing any data and frees any // associated resources. Any data written thus far will be lost. Cancel // implementations should allow multiple calls even after a commit that // result in a no-op. This allows use of Cancel in a defer statement, // increasing the assurance that it is correctly called. Cancel(ctx context.Context) error // Get a reader to the blob being written by this BlobWriter Reader() (io.ReadCloser, error) } // BlobService combines the operations to access, read and write blobs. This // can be used to describe remote blob services. type BlobService interface { BlobStatter BlobProvider BlobIngester } // BlobStore represent the entire suite of blob related operations. Such an // implementation can access, read, write, delete and serve blobs. type BlobStore interface { BlobService BlobServer BlobDeleter } docker-1.10.3/vendor/src/github.com/docker/distribution/circle.yml000066400000000000000000000056311267010174400251470ustar00rootroot00000000000000# Pony-up! machine: pre: # Install gvm - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer) # Install ceph to test rados driver & create pool - sudo -i ~/distribution/contrib/ceph/ci-setup.sh - ceph osd pool create docker-distribution 1 # Install codecov for coverage - pip install --user codecov post: # go - gvm install go1.5.3 --prefer-binary --name=stable environment: # Convenient shortcuts to "common" locations CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME # Trick circle brainflat "no absolute path" behavior BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR DOCKER_BUILDTAGS: "include_rados include_oss include_gcs" # Workaround Circle parsing dumb bugs and/or YAML wonkyness CIRCLE_PAIN: "mode: set" # Ceph config RADOS_POOL: "docker-distribution" hosts: # Not used yet fancy: 127.0.0.1 dependencies: pre: # Copy the code to the gopath of all go versions - > gvm use stable && mkdir -p "$(dirname $BASE_STABLE)" && cp -R "$CHECKOUT" "$BASE_STABLE" override: # Install dependencies for every copied clone/go version - gvm use stable && go get github.com/tools/godep: pwd: $BASE_STABLE post: # For the stable go version, additionally install linting tools - > gvm use stable && go get github.com/axw/gocov/gocov github.com/golang/lint/golint test: pre: # Output the go versions we are going to test # - gvm use old && go version - gvm use stable && go version # First thing: build everything. This will catch compile errors, and it's # also necessary for go vet to work properly (see #807). - gvm use stable && godep go install ./...: pwd: $BASE_STABLE # FMT - gvm use stable && test -z "$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)": pwd: $BASE_STABLE # VET - gvm use stable && go vet ./...: pwd: $BASE_STABLE # LINT - gvm use stable && test -z "$(golint ./... | grep -v Godeps/_workspace/src/ | tee /dev/stderr)": pwd: $BASE_STABLE override: # Test stable, and report - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE': timeout: 600 pwd: $BASE_STABLE post: # Report to codecov - bash <(curl -s https://codecov.io/bash): pwd: $BASE_STABLE ## Notes # Disabled the -race detector due to massive memory usage. # Do we want these as well? # - go get code.google.com/p/go.tools/cmd/goimports # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" # http://labix.org/gocheck docker-1.10.3/vendor/src/github.com/docker/distribution/context/000077500000000000000000000000001267010174400246425ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/distribution/context/context.go000066400000000000000000000043521267010174400266610ustar00rootroot00000000000000package context import ( "sync" "github.com/docker/distribution/uuid" "golang.org/x/net/context" ) // Context is a copy of Context from the golang.org/x/net/context package. type Context interface { context.Context } // instanceContext is a context that provides only an instance id. It is // provided as the main background context. type instanceContext struct { Context id string // id of context, logged as "instance.id" once sync.Once // once protect generation of the id } func (ic *instanceContext) Value(key interface{}) interface{} { if key == "instance.id" { ic.once.Do(func() { // We want to lazy initialize the UUID such that we don't // call a random generator from the package initialization // code. For various reasons random could not be available // https://github.com/docker/distribution/issues/782 ic.id = uuid.Generate().String() }) return ic.id } return ic.Context.Value(key) } var background = &instanceContext{ Context: context.Background(), } // Background returns a non-nil, empty Context. The background context // provides a single key, "instance.id" that is globally unique to the // process. func Background() Context { return background } // WithValue returns a copy of parent in which the value associated with key is // val. Use context Values only for request-scoped data that transits processes // and APIs, not for passing optional parameters to functions. func WithValue(parent Context, key, val interface{}) Context { return context.WithValue(parent, key, val) } // stringMapContext is a simple context implementation that checks a map for a // key, falling back to a parent if not present. type stringMapContext struct { context.Context m map[string]interface{} } // WithValues returns a context that proxies lookups through a map. Only // supports string keys. func WithValues(ctx context.Context, m map[string]interface{}) context.Context { mo := make(map[string]interface{}, len(m)) // make our own copy. for k, v := range m { mo[k] = v } return stringMapContext{ Context: ctx, m: mo, } } func (smc stringMapContext) Value(key interface{}) interface{} { if ks, ok := key.(string); ok { if v, ok := smc.m[ks]; ok { return v } } return smc.Context.Value(key) } docker-1.10.3/vendor/src/github.com/docker/distribution/context/doc.go000066400000000000000000000075611267010174400257470ustar00rootroot00000000000000// Package context provides several utilities for working with // golang.org/x/net/context in http requests. Primarily, the focus is on // logging relevent request information but this package is not limited to // that purpose. // // The easiest way to get started is to get the background context: // // ctx := context.Background() // // The returned context should be passed around your application and be the // root of all other context instances. If the application has a version, this // line should be called before anything else: // // ctx := context.WithVersion(context.Background(), version) // // The above will store the version in the context and will be available to // the logger. // // Logging // // The most useful aspect of this package is GetLogger. This function takes // any context.Context interface and returns the current logger from the // context. Canonical usage looks like this: // // GetLogger(ctx).Infof("something interesting happened") // // GetLogger also takes optional key arguments. The keys will be looked up in // the context and reported with the logger. The following example would // return a logger that prints the version with each log message: // // ctx := context.Context(context.Background(), "version", version) // GetLogger(ctx, "version").Infof("this log message has a version field") // // The above would print out a log message like this: // // INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m // // When used with WithLogger, we gain the ability to decorate the context with // loggers that have information from disparate parts of the call stack. // Following from the version example, we can build a new context with the // configured logger such that we always print the version field: // // ctx = WithLogger(ctx, GetLogger(ctx, "version")) // // Since the logger has been pushed to the context, we can now get the version // field for free with our log messages. Future calls to GetLogger on the new // context will have the version field: // // GetLogger(ctx).Infof("this log message has a version field") // // This becomes more powerful when we start stacking loggers. Let's say we // have the version logger from above but also want a request id. Using the // context above, in our request scoped function, we place another logger in // the context: // // ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context // ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id")) // // When GetLogger is called on the new context, "http.request.id" will be // included as a logger field, along with the original "version" field: // // INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m // // Note that this only affects the new context, the previous context, with the // version field, can be used independently. Put another way, the new logger, // added to the request context, is unique to that context and can have // request scoped varaibles. // // HTTP Requests // // This package also contains several methods for working with http requests. // The concepts are very similar to those described above. We simply place the // request in the context using WithRequest. This makes the request variables // available. GetRequestLogger can then be called to get request specific // variables in a log line: // // ctx = WithRequest(ctx, req) // GetRequestLogger(ctx).Infof("request variables") // // Like above, if we want to include the request data in all log messages in // the context, we push the logger to a new context and use that one: // // ctx = WithLogger(ctx, GetRequestLogger(ctx)) // // The concept is fairly powerful and ensures that calls throughout the stack // can be traced in log messages. Using the fields like "http.request.id", one // can analyze call flow for a particular request with a simple grep of the // logs. package context docker-1.10.3/vendor/src/github.com/docker/distribution/context/http.go000066400000000000000000000216651267010174400261620ustar00rootroot00000000000000package context import ( "errors" "net" "net/http" "strings" "sync" "time" log "github.com/Sirupsen/logrus" "github.com/docker/distribution/uuid" "github.com/gorilla/mux" ) // Common errors used with this package. var ( ErrNoRequestContext = errors.New("no http request in context") ErrNoResponseWriterContext = errors.New("no http response in context") ) func parseIP(ipStr string) net.IP { ip := net.ParseIP(ipStr) if ip == nil { log.Warnf("invalid remote IP address: %q", ipStr) } return ip } // RemoteAddr extracts the remote address of the request, taking into // account proxy headers. func RemoteAddr(r *http.Request) string { if prior := r.Header.Get("X-Forwarded-For"); prior != "" { proxies := strings.Split(prior, ",") if len(proxies) > 0 { remoteAddr := strings.Trim(proxies[0], " ") if parseIP(remoteAddr) != nil { return remoteAddr } } } // X-Real-Ip is less supported, but worth checking in the // absence of X-Forwarded-For if realIP := r.Header.Get("X-Real-Ip"); realIP != "" { if parseIP(realIP) != nil { return realIP } } return r.RemoteAddr } // RemoteIP extracts the remote IP of the request, taking into // account proxy headers. func RemoteIP(r *http.Request) string { addr := RemoteAddr(r) // Try parsing it as "IP:port" if ip, _, err := net.SplitHostPort(addr); err == nil { return ip } return addr } // WithRequest places the request on the context. The context of the request // is assigned a unique id, available at "http.request.id". The request itself // is available at "http.request". Other common attributes are available under // the prefix "http.request.". If a request is already present on the context, // this method will panic. func WithRequest(ctx Context, r *http.Request) Context { if ctx.Value("http.request") != nil { // NOTE(stevvooe): This needs to be considered a programming error. It // is unlikely that we'd want to have more than one request in // context. panic("only one request per context") } return &httpRequestContext{ Context: ctx, startedAt: time.Now(), id: uuid.Generate().String(), r: r, } } // GetRequest returns the http request in the given context. Returns // ErrNoRequestContext if the context does not have an http request associated // with it. func GetRequest(ctx Context) (*http.Request, error) { if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok { return r, nil } return nil, ErrNoRequestContext } // GetRequestID attempts to resolve the current request id, if possible. An // error is return if it is not available on the context. func GetRequestID(ctx Context) string { return GetStringValue(ctx, "http.request.id") } // WithResponseWriter returns a new context and response writer that makes // interesting response statistics available within the context. func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) { irw := instrumentedResponseWriter{ ResponseWriter: w, Context: ctx, } if closeNotifier, ok := w.(http.CloseNotifier); ok { irwCN := &instrumentedResponseWriterCN{ instrumentedResponseWriter: irw, CloseNotifier: closeNotifier, } return irwCN, irwCN } return &irw, &irw } // GetResponseWriter returns the http.ResponseWriter from the provided // context. If not present, ErrNoResponseWriterContext is returned. The // returned instance provides instrumentation in the context. func GetResponseWriter(ctx Context) (http.ResponseWriter, error) { v := ctx.Value("http.response") rw, ok := v.(http.ResponseWriter) if !ok || rw == nil { return nil, ErrNoResponseWriterContext } return rw, nil } // getVarsFromRequest let's us change request vars implementation for testing // and maybe future changes. var getVarsFromRequest = mux.Vars // WithVars extracts gorilla/mux vars and makes them available on the returned // context. Variables are available at keys with the prefix "vars.". For // example, if looking for the variable "name", it can be accessed as // "vars.name". Implementations that are accessing values need not know that // the underlying context is implemented with gorilla/mux vars. func WithVars(ctx Context, r *http.Request) Context { return &muxVarsContext{ Context: ctx, vars: getVarsFromRequest(r), } } // GetRequestLogger returns a logger that contains fields from the request in // the current context. If the request is not available in the context, no // fields will display. Request loggers can safely be pushed onto the context. func GetRequestLogger(ctx Context) Logger { return GetLogger(ctx, "http.request.id", "http.request.method", "http.request.host", "http.request.uri", "http.request.referer", "http.request.useragent", "http.request.remoteaddr", "http.request.contenttype") } // GetResponseLogger reads the current response stats and builds a logger. // Because the values are read at call time, pushing a logger returned from // this function on the context will lead to missing or invalid data. Only // call this at the end of a request, after the response has been written. func GetResponseLogger(ctx Context) Logger { l := getLogrusLogger(ctx, "http.response.written", "http.response.status", "http.response.contenttype") duration := Since(ctx, "http.request.startedat") if duration > 0 { l = l.WithField("http.response.duration", duration.String()) } return l } // httpRequestContext makes information about a request available to context. type httpRequestContext struct { Context startedAt time.Time id string r *http.Request } // Value returns a keyed element of the request for use in the context. To get // the request itself, query "request". For other components, access them as // "request.". For example, r.RequestURI func (ctx *httpRequestContext) Value(key interface{}) interface{} { if keyStr, ok := key.(string); ok { if keyStr == "http.request" { return ctx.r } if !strings.HasPrefix(keyStr, "http.request.") { goto fallback } parts := strings.Split(keyStr, ".") if len(parts) != 3 { goto fallback } switch parts[2] { case "uri": return ctx.r.RequestURI case "remoteaddr": return RemoteAddr(ctx.r) case "method": return ctx.r.Method case "host": return ctx.r.Host case "referer": referer := ctx.r.Referer() if referer != "" { return referer } case "useragent": return ctx.r.UserAgent() case "id": return ctx.id case "startedat": return ctx.startedAt case "contenttype": ct := ctx.r.Header.Get("Content-Type") if ct != "" { return ct } } } fallback: return ctx.Context.Value(key) } type muxVarsContext struct { Context vars map[string]string } func (ctx *muxVarsContext) Value(key interface{}) interface{} { if keyStr, ok := key.(string); ok { if keyStr == "vars" { return ctx.vars } if strings.HasPrefix(keyStr, "vars.") { keyStr = strings.TrimPrefix(keyStr, "vars.") } if v, ok := ctx.vars[keyStr]; ok { return v } } return ctx.Context.Value(key) } // instrumentedResponseWriterCN provides response writer information in a // context. It implements http.CloseNotifier so that users can detect // early disconnects. type instrumentedResponseWriterCN struct { instrumentedResponseWriter http.CloseNotifier } // instrumentedResponseWriter provides response writer information in a // context. This variant is only used in the case where CloseNotifier is not // implemented by the parent ResponseWriter. type instrumentedResponseWriter struct { http.ResponseWriter Context mu sync.Mutex status int written int64 } func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) { n, err = irw.ResponseWriter.Write(p) irw.mu.Lock() irw.written += int64(n) // Guess the likely status if not set. if irw.status == 0 { irw.status = http.StatusOK } irw.mu.Unlock() return } func (irw *instrumentedResponseWriter) WriteHeader(status int) { irw.ResponseWriter.WriteHeader(status) irw.mu.Lock() irw.status = status irw.mu.Unlock() } func (irw *instrumentedResponseWriter) Flush() { if flusher, ok := irw.ResponseWriter.(http.Flusher); ok { flusher.Flush() } } func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} { if keyStr, ok := key.(string); ok { if keyStr == "http.response" { return irw } if !strings.HasPrefix(keyStr, "http.response.") { goto fallback } parts := strings.Split(keyStr, ".") if len(parts) != 3 { goto fallback } irw.mu.Lock() defer irw.mu.Unlock() switch parts[2] { case "written": return irw.written case "status": return irw.status case "contenttype": contentType := irw.Header().Get("Content-Type") if contentType != "" { return contentType } } } fallback: return irw.Context.Value(key) } func (irw *instrumentedResponseWriterCN) Value(key interface{}) interface{} { if keyStr, ok := key.(string); ok { if keyStr == "http.response" { return irw } } return irw.instrumentedResponseWriter.Value(key) } docker-1.10.3/vendor/src/github.com/docker/distribution/context/logger.go000066400000000000000000000067351267010174400264630ustar00rootroot00000000000000package context import ( "fmt" "github.com/Sirupsen/logrus" "runtime" ) // Logger provides a leveled-logging interface. type Logger interface { // standard logger methods Print(args ...interface{}) Printf(format string, args ...interface{}) Println(args ...interface{}) Fatal(args ...interface{}) Fatalf(format string, args ...interface{}) Fatalln(args ...interface{}) Panic(args ...interface{}) Panicf(format string, args ...interface{}) Panicln(args ...interface{}) // Leveled methods, from logrus Debug(args ...interface{}) Debugf(format string, args ...interface{}) Debugln(args ...interface{}) Error(args ...interface{}) Errorf(format string, args ...interface{}) Errorln(args ...interface{}) Info(args ...interface{}) Infof(format string, args ...interface{}) Infoln(args ...interface{}) Warn(args ...interface{}) Warnf(format string, args ...interface{}) Warnln(args ...interface{}) } // WithLogger creates a new context with provided logger. func WithLogger(ctx Context, logger Logger) Context { return WithValue(ctx, "logger", logger) } // GetLoggerWithField returns a logger instance with the specified field key // and value without affecting the context. Extra specified keys will be // resolved from the context. func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger { return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value) } // GetLoggerWithFields returns a logger instance with the specified fields // without affecting the context. Extra specified keys will be resolved from // the context. func GetLoggerWithFields(ctx Context, fields map[interface{}]interface{}, keys ...interface{}) Logger { // must convert from interface{} -> interface{} to string -> interface{} for logrus. lfields := make(logrus.Fields, len(fields)) for key, value := range fields { lfields[fmt.Sprint(key)] = value } return getLogrusLogger(ctx, keys...).WithFields(lfields) } // GetLogger returns the logger from the current context, if present. If one // or more keys are provided, they will be resolved on the context and // included in the logger. While context.Value takes an interface, any key // argument passed to GetLogger will be passed to fmt.Sprint when expanded as // a logging key field. If context keys are integer constants, for example, // its recommended that a String method is implemented. func GetLogger(ctx Context, keys ...interface{}) Logger { return getLogrusLogger(ctx, keys...) } // GetLogrusLogger returns the logrus logger for the context. If one more keys // are provided, they will be resolved on the context and included in the // logger. Only use this function if specific logrus functionality is // required. func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry { var logger *logrus.Entry // Get a logger, if it is present. loggerInterface := ctx.Value("logger") if loggerInterface != nil { if lgr, ok := loggerInterface.(*logrus.Entry); ok { logger = lgr } } if logger == nil { fields := logrus.Fields{} // Fill in the instance id, if we have it. instanceID := ctx.Value("instance.id") if instanceID != nil { fields["instance.id"] = instanceID } fields["go.version"] = runtime.Version() // If no logger is found, just return the standard logger. logger = logrus.StandardLogger().WithFields(fields) } fields := logrus.Fields{} for _, key := range keys { v := ctx.Value(key) if v != nil { fields[fmt.Sprint(key)] = v } } return logger.WithFields(fields) } docker-1.10.3/vendor/src/github.com/docker/distribution/context/trace.go000066400000000000000000000054521267010174400262750ustar00rootroot00000000000000package context import ( "runtime" "time" "github.com/docker/distribution/uuid" ) // WithTrace allocates a traced timing span in a new context. This allows a // caller to track the time between calling WithTrace and the returned done // function. When the done function is called, a log message is emitted with a // "trace.duration" field, corresponding to the elapased time and a // "trace.func" field, corresponding to the function that called WithTrace. // // The logging keys "trace.id" and "trace.parent.id" are provided to implement // dapper-like tracing. This function should be complemented with a WithSpan // method that could be used for tracing distributed RPC calls. // // The main benefit of this function is to post-process log messages or // intercept them in a hook to provide timing data. Trace ids and parent ids // can also be linked to provide call tracing, if so required. // // Here is an example of the usage: // // func timedOperation(ctx Context) { // ctx, done := WithTrace(ctx) // defer done("this will be the log message") // // ... function body ... // } // // If the function ran for roughly 1s, such a usage would emit a log message // as follows: // // INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id= ... // // Notice that the function name is automatically resolved, along with the // package and a trace id is emitted that can be linked with parent ids. func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) { if ctx == nil { ctx = Background() } pc, file, line, _ := runtime.Caller(1) f := runtime.FuncForPC(pc) ctx = &traced{ Context: ctx, id: uuid.Generate().String(), start: time.Now(), parent: GetStringValue(ctx, "trace.id"), fnname: f.Name(), file: file, line: line, } return ctx, func(format string, a ...interface{}) { GetLogger(ctx, "trace.duration", "trace.id", "trace.parent.id", "trace.func", "trace.file", "trace.line"). Debugf(format, a...) } } // traced represents a context that is traced for function call timing. It // also provides fast lookup for the various attributes that are available on // the trace. type traced struct { Context id string parent string start time.Time fnname string file string line int } func (ts *traced) Value(key interface{}) interface{} { switch key { case "trace.start": return ts.start case "trace.duration": return time.Since(ts.start) case "trace.id": return ts.id case "trace.parent.id": if ts.parent == "" { return nil // must return nil to signal no parent. } return ts.parent case "trace.func": return ts.fnname case "trace.file": return ts.file case "trace.line": return ts.line } return ts.Context.Value(key) } docker-1.10.3/vendor/src/github.com/docker/distribution/context/util.go000066400000000000000000000014321267010174400261460ustar00rootroot00000000000000package context import ( "time" ) // Since looks up key, which should be a time.Time, and returns the duration // since that time. If the key is not found, the value returned will be zero. // This is helpful when inferring metrics related to context execution times. func Since(ctx Context, key interface{}) time.Duration { startedAtI := ctx.Value(key) if startedAtI != nil { if startedAt, ok := startedAtI.(time.Time); ok { return time.Since(startedAt) } } return 0 } // GetStringValue returns a string value from the context. The empty string // will be returned if not found. func GetStringValue(ctx Context, key interface{}) (value string) { stringi := ctx.Value(key) if stringi != nil { if valuev, ok := stringi.(string); ok { value = valuev } } return value } docker-1.10.3/vendor/src/github.com/docker/distribution/context/version.go000066400000000000000000000011121267010174400266510ustar00rootroot00000000000000package context // WithVersion stores the application version in the context. The new context // gets a logger to ensure log messages are marked with the application // version. func WithVersion(ctx Context, version string) Context { ctx = WithValue(ctx, "version", version) // push a new logger onto the stack return WithLogger(ctx, GetLogger(ctx, "version")) } // GetVersion returns the application version from the context. An empty // string may returned if the version was not set on the context. func GetVersion(ctx Context) string { return GetStringValue(ctx, "version") } docker-1.10.3/vendor/src/github.com/docker/distribution/coverpkg.sh000077500000000000000000000006241267010174400253370ustar00rootroot00000000000000#!/usr/bin/env bash # Given a subpackage and the containing package, figures out which packages # need to be passed to `go test -coverpkg`: this includes all of the # subpackage's dependencies within the containing package, as well as the # subpackage itself. DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2})" echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ',' docker-1.10.3/vendor/src/github.com/docker/distribution/digest/000077500000000000000000000000001267010174400244355ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/distribution/digest/digest.go000066400000000000000000000074561267010174400262570ustar00rootroot00000000000000package digest import ( "fmt" "hash" "io" "regexp" "strings" ) const ( // DigestSha256EmptyTar is the canonical sha256 digest of empty data DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" ) // Digest allows simple protection of hex formatted digest strings, prefixed // by their algorithm. Strings of type Digest have some guarantee of being in // the correct format and it provides quick access to the components of a // digest string. // // The following is an example of the contents of Digest types: // // sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc // // This allows to abstract the digest behind this type and work only in those // terms. type Digest string // NewDigest returns a Digest from alg and a hash.Hash object. func NewDigest(alg Algorithm, h hash.Hash) Digest { return NewDigestFromBytes(alg, h.Sum(nil)) } // NewDigestFromBytes returns a new digest from the byte contents of p. // Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...) // functions. This is also useful for rebuilding digests from binary // serializations. func NewDigestFromBytes(alg Algorithm, p []byte) Digest { return Digest(fmt.Sprintf("%s:%x", alg, p)) } // NewDigestFromHex returns a Digest from alg and a the hex encoded digest. func NewDigestFromHex(alg, hex string) Digest { return Digest(fmt.Sprintf("%s:%s", alg, hex)) } // DigestRegexp matches valid digest types. var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`) // DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) var ( // ErrDigestInvalidFormat returned when digest format invalid. ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") // ErrDigestInvalidLength returned when digest has invalid length. ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length") // ErrDigestUnsupported returned when the digest algorithm is unsupported. ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") ) // ParseDigest parses s and returns the validated digest object. An error will // be returned if the format is invalid. func ParseDigest(s string) (Digest, error) { d := Digest(s) return d, d.Validate() } // FromReader returns the most valid digest for the underlying content using // the canonical digest algorithm. func FromReader(rd io.Reader) (Digest, error) { return Canonical.FromReader(rd) } // FromBytes digests the input and returns a Digest. func FromBytes(p []byte) Digest { return Canonical.FromBytes(p) } // Validate checks that the contents of d is a valid digest, returning an // error if not. func (d Digest) Validate() error { s := string(d) if !DigestRegexpAnchored.MatchString(s) { return ErrDigestInvalidFormat } i := strings.Index(s, ":") if i < 0 { return ErrDigestInvalidFormat } // case: "sha256:" with no hex. if i+1 == len(s) { return ErrDigestInvalidFormat } switch algorithm := Algorithm(s[:i]); algorithm { case SHA256, SHA384, SHA512: if algorithm.Size()*2 != len(s[i+1:]) { return ErrDigestInvalidLength } break default: return ErrDigestUnsupported } return nil } // Algorithm returns the algorithm portion of the digest. This will panic if // the underlying digest is not in a valid format. func (d Digest) Algorithm() Algorithm { return Algorithm(d[:d.sepIndex()]) } // Hex returns the hex digest portion of the digest. This will panic if the // underlying digest is not in a valid format. func (d Digest) Hex() string { return string(d[d.sepIndex()+1:]) } func (d Digest) String() string { return string(d) } func (d Digest) sepIndex() int { i := strings.Index(string(d), ":") if i < 0 { panic("could not find ':' in digest: " + d) } return i } docker-1.10.3/vendor/src/github.com/docker/distribution/digest/digester.go000066400000000000000000000104311267010174400265710ustar00rootroot00000000000000package digest import ( "crypto" "fmt" "hash" "io" ) // Algorithm identifies and implementation of a digester by an identifier. // Note the that this defines both the hash algorithm used and the string // encoding. type Algorithm string // supported digest types const ( SHA256 Algorithm = "sha256" // sha256 with hex encoding SHA384 Algorithm = "sha384" // sha384 with hex encoding SHA512 Algorithm = "sha512" // sha512 with hex encoding // Canonical is the primary digest algorithm used with the distribution // project. Other digests may be used but this one is the primary storage // digest. Canonical = SHA256 ) var ( // TODO(stevvooe): Follow the pattern of the standard crypto package for // registration of digests. Effectively, we are a registerable set and // common symbol access. // algorithms maps values to hash.Hash implementations. Other algorithms // may be available but they cannot be calculated by the digest package. algorithms = map[Algorithm]crypto.Hash{ SHA256: crypto.SHA256, SHA384: crypto.SHA384, SHA512: crypto.SHA512, } ) // Available returns true if the digest type is available for use. If this // returns false, New and Hash will return nil. func (a Algorithm) Available() bool { h, ok := algorithms[a] if !ok { return false } // check availability of the hash, as well return h.Available() } func (a Algorithm) String() string { return string(a) } // Size returns number of bytes returned by the hash. func (a Algorithm) Size() int { h, ok := algorithms[a] if !ok { return 0 } return h.Size() } // Set implemented to allow use of Algorithm as a command line flag. func (a *Algorithm) Set(value string) error { if value == "" { *a = Canonical } else { // just do a type conversion, support is queried with Available. *a = Algorithm(value) } return nil } // New returns a new digester for the specified algorithm. If the algorithm // does not have a digester implementation, nil will be returned. This can be // checked by calling Available before calling New. func (a Algorithm) New() Digester { return &digester{ alg: a, hash: a.Hash(), } } // Hash returns a new hash as used by the algorithm. If not available, the // method will panic. Check Algorithm.Available() before calling. func (a Algorithm) Hash() hash.Hash { if !a.Available() { // NOTE(stevvooe): A missing hash is usually a programming error that // must be resolved at compile time. We don't import in the digest // package to allow users to choose their hash implementation (such as // when using stevvooe/resumable or a hardware accelerated package). // // Applications that may want to resolve the hash at runtime should // call Algorithm.Available before call Algorithm.Hash(). panic(fmt.Sprintf("%v not available (make sure it is imported)", a)) } return algorithms[a].New() } // FromReader returns the digest of the reader using the algorithm. func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { digester := a.New() if _, err := io.Copy(digester.Hash(), rd); err != nil { return "", err } return digester.Digest(), nil } // FromBytes digests the input and returns a Digest. func (a Algorithm) FromBytes(p []byte) Digest { digester := a.New() if _, err := digester.Hash().Write(p); err != nil { // Writes to a Hash should never fail. None of the existing // hash implementations in the stdlib or hashes vendored // here can return errors from Write. Having a panic in this // condition instead of having FromBytes return an error value // avoids unnecessary error handling paths in all callers. panic("write to hash function returned error: " + err.Error()) } return digester.Digest() } // TODO(stevvooe): Allow resolution of verifiers using the digest type and // this registration system. // Digester calculates the digest of written data. Writes should go directly // to the return value of Hash, while calling Digest will return the current // value of the digest. type Digester interface { Hash() hash.Hash // provides direct access to underlying hash instance. Digest() Digest } // digester provides a simple digester definition that embeds a hasher. type digester struct { alg Algorithm hash hash.Hash } func (d *digester) Hash() hash.Hash { return d.hash } func (d *digester) Digest() Digest { return NewDigest(d.alg, d.hash) } docker-1.10.3/vendor/src/github.com/docker/distribution/digest/doc.go000066400000000000000000000031401267010174400255270ustar00rootroot00000000000000// Package digest provides a generalized type to opaquely represent message // digests and their operations within the registry. The Digest type is // designed to serve as a flexible identifier in a content-addressable system. // More importantly, it provides tools and wrappers to work with // hash.Hash-based digests with little effort. // // Basics // // The format of a digest is simply a string with two parts, dubbed the // "algorithm" and the "digest", separated by a colon: // // : // // An example of a sha256 digest representation follows: // // sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc // // In this case, the string "sha256" is the algorithm and the hex bytes are // the "digest". // // Because the Digest type is simply a string, once a valid Digest is // obtained, comparisons are cheap, quick and simple to express with the // standard equality operator. // // Verification // // The main benefit of using the Digest type is simple verification against a // given digest. The Verifier interface, modeled after the stdlib hash.Hash // interface, provides a common write sink for digest verification. After // writing is complete, calling the Verifier.Verified method will indicate // whether or not the stream of bytes matches the target digest. // // Missing Features // // In addition to the above, we intend to add the following features to this // package: // // 1. A Digester type that supports write sink digest calculation. // // 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry. // package digest docker-1.10.3/vendor/src/github.com/docker/distribution/digest/set.go000066400000000000000000000147221267010174400255650ustar00rootroot00000000000000package digest import ( "errors" "sort" "strings" "sync" ) var ( // ErrDigestNotFound is used when a matching digest // could not be found in a set. ErrDigestNotFound = errors.New("digest not found") // ErrDigestAmbiguous is used when multiple digests // are found in a set. None of the matching digests // should be considered valid matches. ErrDigestAmbiguous = errors.New("ambiguous digest string") ) // Set is used to hold a unique set of digests which // may be easily referenced by easily referenced by a string // representation of the digest as well as short representation. // The uniqueness of the short representation is based on other // digests in the set. If digests are ommited from this set, // collisions in a larger set may not be detected, therefore it // is important to always do short representation lookups on // the complete set of digests. To mitigate collisions, an // appropriately long short code should be used. type Set struct { mutex sync.RWMutex entries digestEntries } // NewSet creates an empty set of digests // which may have digests added. func NewSet() *Set { return &Set{ entries: digestEntries{}, } } // checkShortMatch checks whether two digests match as either whole // values or short values. This function does not test equality, // rather whether the second value could match against the first // value. func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool { if len(hex) == len(shortHex) { if hex != shortHex { return false } if len(shortAlg) > 0 && string(alg) != shortAlg { return false } } else if !strings.HasPrefix(hex, shortHex) { return false } else if len(shortAlg) > 0 && string(alg) != shortAlg { return false } return true } // Lookup looks for a digest matching the given string representation. // If no digests could be found ErrDigestNotFound will be returned // with an empty digest value. If multiple matches are found // ErrDigestAmbiguous will be returned with an empty digest value. func (dst *Set) Lookup(d string) (Digest, error) { dst.mutex.RLock() defer dst.mutex.RUnlock() if len(dst.entries) == 0 { return "", ErrDigestNotFound } var ( searchFunc func(int) bool alg Algorithm hex string ) dgst, err := ParseDigest(d) if err == ErrDigestInvalidFormat { hex = d searchFunc = func(i int) bool { return dst.entries[i].val >= d } } else { hex = dgst.Hex() alg = dgst.Algorithm() searchFunc = func(i int) bool { if dst.entries[i].val == hex { return dst.entries[i].alg >= alg } return dst.entries[i].val >= hex } } idx := sort.Search(len(dst.entries), searchFunc) if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { return "", ErrDigestNotFound } if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { return dst.entries[idx].digest, nil } if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { return "", ErrDigestAmbiguous } return dst.entries[idx].digest, nil } // Add adds the given digest to the set. An error will be returned // if the given digest is invalid. If the digest already exists in the // set, this operation will be a no-op. func (dst *Set) Add(d Digest) error { if err := d.Validate(); err != nil { return err } dst.mutex.Lock() defer dst.mutex.Unlock() entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} searchFunc := func(i int) bool { if dst.entries[i].val == entry.val { return dst.entries[i].alg >= entry.alg } return dst.entries[i].val >= entry.val } idx := sort.Search(len(dst.entries), searchFunc) if idx == len(dst.entries) { dst.entries = append(dst.entries, entry) return nil } else if dst.entries[idx].digest == d { return nil } entries := append(dst.entries, nil) copy(entries[idx+1:], entries[idx:len(entries)-1]) entries[idx] = entry dst.entries = entries return nil } // Remove removes the given digest from the set. An err will be // returned if the given digest is invalid. If the digest does // not exist in the set, this operation will be a no-op. func (dst *Set) Remove(d Digest) error { if err := d.Validate(); err != nil { return err } dst.mutex.Lock() defer dst.mutex.Unlock() entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} searchFunc := func(i int) bool { if dst.entries[i].val == entry.val { return dst.entries[i].alg >= entry.alg } return dst.entries[i].val >= entry.val } idx := sort.Search(len(dst.entries), searchFunc) // Not found if idx is after or value at idx is not digest if idx == len(dst.entries) || dst.entries[idx].digest != d { return nil } entries := dst.entries copy(entries[idx:], entries[idx+1:]) entries = entries[:len(entries)-1] dst.entries = entries return nil } // All returns all the digests in the set func (dst *Set) All() []Digest { dst.mutex.RLock() defer dst.mutex.RUnlock() retValues := make([]Digest, len(dst.entries)) for i := range dst.entries { retValues[i] = dst.entries[i].digest } return retValues } // ShortCodeTable returns a map of Digest to unique short codes. The // length represents the minimum value, the maximum length may be the // entire value of digest if uniqueness cannot be achieved without the // full value. This function will attempt to make short codes as short // as possible to be unique. func ShortCodeTable(dst *Set, length int) map[Digest]string { dst.mutex.RLock() defer dst.mutex.RUnlock() m := make(map[Digest]string, len(dst.entries)) l := length resetIdx := 0 for i := 0; i < len(dst.entries); i++ { var short string extended := true for extended { extended = false if len(dst.entries[i].val) <= l { short = dst.entries[i].digest.String() } else { short = dst.entries[i].val[:l] for j := i + 1; j < len(dst.entries); j++ { if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { if j > resetIdx { resetIdx = j } extended = true } else { break } } if extended { l++ } } } m[dst.entries[i].digest] = short if i >= resetIdx { l = length } } return m } type digestEntry struct { alg Algorithm val string digest Digest } type digestEntries []*digestEntry func (d digestEntries) Len() int { return len(d) } func (d digestEntries) Less(i, j int) bool { if d[i].val != d[j].val { return d[i].val < d[j].val } return d[i].alg < d[j].alg } func (d digestEntries) Swap(i, j int) { d[i], d[j] = d[j], d[i] } docker-1.10.3/vendor/src/github.com/docker/distribution/digest/verifiers.go000066400000000000000000000017661267010174400267740ustar00rootroot00000000000000package digest import ( "hash" "io" ) // Verifier presents a general verification interface to be used with message // digests and other byte stream verifications. Users instantiate a Verifier // from one of the various methods, write the data under test to it then check // the result with the Verified method. type Verifier interface { io.Writer // Verified will return true if the content written to Verifier matches // the digest. Verified() bool } // NewDigestVerifier returns a verifier that compares the written bytes // against a passed in digest. func NewDigestVerifier(d Digest) (Verifier, error) { if err := d.Validate(); err != nil { return nil, err } return hashVerifier{ hash: d.Algorithm().Hash(), digest: d, }, nil } type hashVerifier struct { digest Digest hash hash.Hash } func (hv hashVerifier) Write(p []byte) (n int, err error) { return hv.hash.Write(p) } func (hv hashVerifier) Verified() bool { return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash) } docker-1.10.3/vendor/src/github.com/docker/distribution/doc.go000066400000000000000000000004661267010174400242600ustar00rootroot00000000000000// Package distribution will define the interfaces for the components of // docker distribution. The goal is to allow users to reliably package, ship // and store content related to docker images. // // This is currently a work in progress. More details are available in the // README.md. package distribution docker-1.10.3/vendor/src/github.com/docker/distribution/errors.go000066400000000000000000000061271267010174400250270ustar00rootroot00000000000000package distribution import ( "errors" "fmt" "strings" "github.com/docker/distribution/digest" ) // ErrManifestNotModified is returned when a conditional manifest GetByTag // returns nil due to the client indicating it has the latest version var ErrManifestNotModified = errors.New("manifest not modified") // ErrUnsupported is returned when an unimplemented or unsupported action is // performed var ErrUnsupported = errors.New("operation unsupported") // ErrTagUnknown is returned if the given tag is not known by the tag service type ErrTagUnknown struct { Tag string } func (err ErrTagUnknown) Error() string { return fmt.Sprintf("unknown tag=%s", err.Tag) } // ErrRepositoryUnknown is returned if the named repository is not known by // the registry. type ErrRepositoryUnknown struct { Name string } func (err ErrRepositoryUnknown) Error() string { return fmt.Sprintf("unknown repository name=%s", err.Name) } // ErrRepositoryNameInvalid should be used to denote an invalid repository // name. Reason may set, indicating the cause of invalidity. type ErrRepositoryNameInvalid struct { Name string Reason error } func (err ErrRepositoryNameInvalid) Error() string { return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason) } // ErrManifestUnknown is returned if the manifest is not known by the // registry. type ErrManifestUnknown struct { Name string Tag string } func (err ErrManifestUnknown) Error() string { return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) } // ErrManifestUnknownRevision is returned when a manifest cannot be found by // revision within a repository. type ErrManifestUnknownRevision struct { Name string Revision digest.Digest } func (err ErrManifestUnknownRevision) Error() string { return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) } // ErrManifestUnverified is returned when the registry is unable to verify // the manifest. type ErrManifestUnverified struct{} func (ErrManifestUnverified) Error() string { return fmt.Sprintf("unverified manifest") } // ErrManifestVerification provides a type to collect errors encountered // during manifest verification. Currently, it accepts errors of all types, // but it may be narrowed to those involving manifest verification. type ErrManifestVerification []error func (errs ErrManifestVerification) Error() string { var parts []string for _, err := range errs { parts = append(parts, err.Error()) } return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) } // ErrManifestBlobUnknown returned when a referenced blob cannot be found. type ErrManifestBlobUnknown struct { Digest digest.Digest } func (err ErrManifestBlobUnknown) Error() string { return fmt.Sprintf("unknown blob %v on manifest", err.Digest) } // ErrManifestNameInvalid should be used to denote an invalid manifest // name. Reason may set, indicating the cause of invalidity. type ErrManifestNameInvalid struct { Name string Reason error } func (err ErrManifestNameInvalid) Error() string { return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason) } docker-1.10.3/vendor/src/github.com/docker/distribution/manifest/000077500000000000000000000000001267010174400247645ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/distribution/manifest/doc.go000066400000000000000000000000211267010174400260510ustar00rootroot00000000000000package manifest docker-1.10.3/vendor/src/github.com/docker/distribution/manifest/manifestlist/000077500000000000000000000000001267010174400274665ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/distribution/manifest/manifestlist/manifestlist.go000066400000000000000000000105731267010174400325250ustar00rootroot00000000000000package manifestlist import ( "encoding/json" "errors" "fmt" "github.com/docker/distribution" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" ) // MediaTypeManifestList specifies the mediaType for manifest lists. const MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" // SchemaVersion provides a pre-initialized version structure for this // packages version of the manifest. var SchemaVersion = manifest.Versioned{ SchemaVersion: 2, MediaType: MediaTypeManifestList, } func init() { manifestListFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { m := new(DeserializedManifestList) err := m.UnmarshalJSON(b) if err != nil { return nil, distribution.Descriptor{}, err } dgst := digest.FromBytes(b) return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifestList}, err } err := distribution.RegisterManifestSchema(MediaTypeManifestList, manifestListFunc) if err != nil { panic(fmt.Sprintf("Unable to register manifest: %s", err)) } } // PlatformSpec specifies a platform where a particular image manifest is // applicable. type PlatformSpec struct { // Architecture field specifies the CPU architecture, for example // `amd64` or `ppc64`. Architecture string `json:"architecture"` // OS specifies the operating system, for example `linux` or `windows`. OS string `json:"os"` // Variant is an optional field specifying a variant of the CPU, for // example `ppc64le` to specify a little-endian version of a PowerPC CPU. Variant string `json:"variant,omitempty"` // Features is an optional field specifuing an array of strings, each // listing a required CPU feature (for example `sse4` or `aes`). Features []string `json:"features,omitempty"` } // A ManifestDescriptor references a platform-specific manifest. type ManifestDescriptor struct { distribution.Descriptor // Platform specifies which platform the manifest pointed to by the // descriptor runs on. Platform PlatformSpec `json:"platform"` } // ManifestList references manifests for various platforms. type ManifestList struct { manifest.Versioned // Config references the image configuration as a blob. Manifests []ManifestDescriptor `json:"manifests"` } // References returnes the distribution descriptors for the referenced image // manifests. func (m ManifestList) References() []distribution.Descriptor { dependencies := make([]distribution.Descriptor, len(m.Manifests)) for i := range m.Manifests { dependencies[i] = m.Manifests[i].Descriptor } return dependencies } // DeserializedManifestList wraps ManifestList with a copy of the original // JSON. type DeserializedManifestList struct { ManifestList // canonical is the canonical byte representation of the Manifest. canonical []byte } // FromDescriptors takes a slice of descriptors, and returns a // DeserializedManifestList which contains the resulting manifest list // and its JSON representation. func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) { m := ManifestList{ Versioned: SchemaVersion, } m.Manifests = make([]ManifestDescriptor, len(descriptors), len(descriptors)) copy(m.Manifests, descriptors) deserialized := DeserializedManifestList{ ManifestList: m, } var err error deserialized.canonical, err = json.MarshalIndent(&m, "", " ") return &deserialized, err } // UnmarshalJSON populates a new ManifestList struct from JSON data. func (m *DeserializedManifestList) UnmarshalJSON(b []byte) error { m.canonical = make([]byte, len(b), len(b)) // store manifest list in canonical copy(m.canonical, b) // Unmarshal canonical JSON into ManifestList object var manifestList ManifestList if err := json.Unmarshal(m.canonical, &manifestList); err != nil { return err } m.ManifestList = manifestList return nil } // MarshalJSON returns the contents of canonical. If canonical is empty, // marshals the inner contents. func (m *DeserializedManifestList) MarshalJSON() ([]byte, error) { if len(m.canonical) > 0 { return m.canonical, nil } return nil, errors.New("JSON representation not initialized in DeserializedManifestList") } // Payload returns the raw content of the manifest list. The contents can be // used to calculate the content identifier. func (m DeserializedManifestList) Payload() (string, []byte, error) { return m.MediaType, m.canonical, nil } docker-1.10.3/vendor/src/github.com/docker/distribution/manifest/schema1/000077500000000000000000000000001267010174400263055ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/distribution/manifest/schema1/config_builder.go000066400000000000000000000202711267010174400316110ustar00rootroot00000000000000package schema1 import ( "crypto/sha512" "encoding/json" "errors" "fmt" "time" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/libtrust" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" ) type diffID digest.Digest // gzippedEmptyTar is a gzip-compressed version of an empty tar file // (1024 NULL bytes) var gzippedEmptyTar = []byte{ 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, } // digestSHA256GzippedEmptyTar is the canonical sha256 digest of // gzippedEmptyTar const digestSHA256GzippedEmptyTar = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") // configManifestBuilder is a type for constructing manifests from an image // configuration and generic descriptors. type configManifestBuilder struct { // bs is a BlobService used to create empty layer tars in the // blob store if necessary. bs distribution.BlobService // pk is the libtrust private key used to sign the final manifest. pk libtrust.PrivateKey // configJSON is configuration supplied when the ManifestBuilder was // created. configJSON []byte // name is the name provided to NewConfigManifestBuilder name string // tag is the tag provided to NewConfigManifestBuilder tag string // descriptors is the set of descriptors referencing the layers. descriptors []distribution.Descriptor // emptyTarDigest is set to a valid digest if an empty tar has been // put in the blob store; otherwise it is empty. emptyTarDigest digest.Digest } // NewConfigManifestBuilder is used to build new manifests for the current // schema version from an image configuration and a set of descriptors. // It takes a BlobService so that it can add an empty tar to the blob store // if the resulting manifest needs empty layers. func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKey, name, tag string, configJSON []byte) distribution.ManifestBuilder { return &configManifestBuilder{ bs: bs, pk: pk, configJSON: configJSON, name: name, tag: tag, } } // Build produces a final manifest from the given references func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Manifest, err error) { type imageRootFS struct { Type string `json:"type"` DiffIDs []diffID `json:"diff_ids,omitempty"` BaseLayer string `json:"base_layer,omitempty"` } type imageHistory struct { Created time.Time `json:"created"` Author string `json:"author,omitempty"` CreatedBy string `json:"created_by,omitempty"` Comment string `json:"comment,omitempty"` EmptyLayer bool `json:"empty_layer,omitempty"` } type imageConfig struct { RootFS *imageRootFS `json:"rootfs,omitempty"` History []imageHistory `json:"history,omitempty"` Architecture string `json:"architecture,omitempty"` } var img imageConfig if err := json.Unmarshal(mb.configJSON, &img); err != nil { return nil, err } if len(img.History) == 0 { return nil, errors.New("empty history when trying to create schema1 manifest") } if len(img.RootFS.DiffIDs) != len(mb.descriptors) { return nil, errors.New("number of descriptors and number of layers in rootfs must match") } // Generate IDs for each layer // For non-top-level layers, create fake V1Compatibility strings that // fit the format and don't collide with anything else, but don't // result in runnable images on their own. type v1Compatibility struct { ID string `json:"id"` Parent string `json:"parent,omitempty"` Comment string `json:"comment,omitempty"` Created time.Time `json:"created"` ContainerConfig struct { Cmd []string } `json:"container_config,omitempty"` ThrowAway bool `json:"throwaway,omitempty"` } fsLayerList := make([]FSLayer, len(img.History)) history := make([]History, len(img.History)) parent := "" layerCounter := 0 for i, h := range img.History[:len(img.History)-1] { var blobsum digest.Digest if h.EmptyLayer { if blobsum, err = mb.emptyTar(ctx); err != nil { return nil, err } } else { if len(img.RootFS.DiffIDs) <= layerCounter { return nil, errors.New("too many non-empty layers in History section") } blobsum = mb.descriptors[layerCounter].Digest layerCounter++ } v1ID := digest.FromBytes([]byte(blobsum.Hex() + " " + parent)).Hex() if i == 0 && img.RootFS.BaseLayer != "" { // windows-only baselayer setup baseID := sha512.Sum384([]byte(img.RootFS.BaseLayer)) parent = fmt.Sprintf("%x", baseID[:32]) } v1Compatibility := v1Compatibility{ ID: v1ID, Parent: parent, Comment: h.Comment, Created: h.Created, } v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy} if h.EmptyLayer { v1Compatibility.ThrowAway = true } jsonBytes, err := json.Marshal(&v1Compatibility) if err != nil { return nil, err } reversedIndex := len(img.History) - i - 1 history[reversedIndex].V1Compatibility = string(jsonBytes) fsLayerList[reversedIndex] = FSLayer{BlobSum: blobsum} parent = v1ID } latestHistory := img.History[len(img.History)-1] var blobsum digest.Digest if latestHistory.EmptyLayer { if blobsum, err = mb.emptyTar(ctx); err != nil { return nil, err } } else { if len(img.RootFS.DiffIDs) <= layerCounter { return nil, errors.New("too many non-empty layers in History section") } blobsum = mb.descriptors[layerCounter].Digest } fsLayerList[0] = FSLayer{BlobSum: blobsum} dgst := digest.FromBytes([]byte(blobsum.Hex() + " " + parent + " " + string(mb.configJSON))) // Top-level v1compatibility string should be a modified version of the // image config. transformedConfig, err := MakeV1ConfigFromConfig(mb.configJSON, dgst.Hex(), parent, latestHistory.EmptyLayer) if err != nil { return nil, err } history[0].V1Compatibility = string(transformedConfig) mfst := Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, Name: mb.name, Tag: mb.tag, Architecture: img.Architecture, FSLayers: fsLayerList, History: history, } return Sign(&mfst, mb.pk) } // emptyTar pushes a compressed empty tar to the blob store if one doesn't // already exist, and returns its blobsum. func (mb *configManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, error) { if mb.emptyTarDigest != "" { // Already put an empty tar return mb.emptyTarDigest, nil } descriptor, err := mb.bs.Stat(ctx, digestSHA256GzippedEmptyTar) switch err { case nil: mb.emptyTarDigest = descriptor.Digest return descriptor.Digest, nil case distribution.ErrBlobUnknown: // nop default: return "", err } // Add gzipped empty tar to the blob store descriptor, err = mb.bs.Put(ctx, "", gzippedEmptyTar) if err != nil { return "", err } mb.emptyTarDigest = descriptor.Digest return descriptor.Digest, nil } // AppendReference adds a reference to the current ManifestBuilder func (mb *configManifestBuilder) AppendReference(d distribution.Describable) error { // todo: verification here? mb.descriptors = append(mb.descriptors, d.Descriptor()) return nil } // References returns the current references added to this builder func (mb *configManifestBuilder) References() []distribution.Descriptor { return mb.descriptors } // MakeV1ConfigFromConfig creates an legacy V1 image config from image config JSON func MakeV1ConfigFromConfig(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { // Top-level v1compatibility string should be a modified version of the // image config. var configAsMap map[string]*json.RawMessage if err := json.Unmarshal(configJSON, &configAsMap); err != nil { return nil, err } // Delete fields that didn't exist in old manifest delete(configAsMap, "rootfs") delete(configAsMap, "history") configAsMap["id"] = rawJSON(v1ID) if parentV1ID != "" { configAsMap["parent"] = rawJSON(parentV1ID) } if throwaway { configAsMap["throwaway"] = rawJSON(true) } return json.Marshal(configAsMap) } func rawJSON(value interface{}) *json.RawMessage { jsonval, err := json.Marshal(value) if err != nil { return nil } return (*json.RawMessage)(&jsonval) } docker-1.10.3/vendor/src/github.com/docker/distribution/manifest/schema1/manifest.go000066400000000000000000000127551267010174400304540ustar00rootroot00000000000000package schema1 import ( "encoding/json" "fmt" "github.com/docker/distribution" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/libtrust" ) const ( // MediaTypeManifest specifies the mediaType for the current version. Note // that for schema version 1, the the media is optionally "application/json". MediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json" // MediaTypeSignedManifest specifies the mediatype for current SignedManifest version MediaTypeSignedManifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" // MediaTypeManifestLayer specifies the media type for manifest layers MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar" ) var ( // SchemaVersion provides a pre-initialized version structure for this // packages version of the manifest. SchemaVersion = manifest.Versioned{ SchemaVersion: 1, } ) func init() { schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { sm := new(SignedManifest) err := sm.UnmarshalJSON(b) if err != nil { return nil, distribution.Descriptor{}, err } desc := distribution.Descriptor{ Digest: digest.FromBytes(sm.Canonical), Size: int64(len(sm.Canonical)), MediaType: MediaTypeSignedManifest, } return sm, desc, err } err := distribution.RegisterManifestSchema(MediaTypeSignedManifest, schema1Func) if err != nil { panic(fmt.Sprintf("Unable to register manifest: %s", err)) } err = distribution.RegisterManifestSchema("", schema1Func) if err != nil { panic(fmt.Sprintf("Unable to register manifest: %s", err)) } err = distribution.RegisterManifestSchema("application/json", schema1Func) if err != nil { panic(fmt.Sprintf("Unable to register manifest: %s", err)) } } // FSLayer is a container struct for BlobSums defined in an image manifest type FSLayer struct { // BlobSum is the tarsum of the referenced filesystem image layer BlobSum digest.Digest `json:"blobSum"` } // History stores unstructured v1 compatibility information type History struct { // V1Compatibility is the raw v1 compatibility information V1Compatibility string `json:"v1Compatibility"` } // Manifest provides the base accessible fields for working with V2 image // format in the registry. type Manifest struct { manifest.Versioned // Name is the name of the image's repository Name string `json:"name"` // Tag is the tag of the image specified by this manifest Tag string `json:"tag"` // Architecture is the host architecture on which this image is intended to // run Architecture string `json:"architecture"` // FSLayers is a list of filesystem layer blobSums contained in this image FSLayers []FSLayer `json:"fsLayers"` // History is a list of unstructured historical data for v1 compatibility History []History `json:"history"` } // SignedManifest provides an envelope for a signed image manifest, including // the format sensitive raw bytes. type SignedManifest struct { Manifest // Canonical is the canonical byte representation of the ImageManifest, // without any attached signatures. The manifest byte // representation cannot change or it will have to be re-signed. Canonical []byte `json:"-"` // all contains the byte representation of the Manifest including signatures // and is retuend by Payload() all []byte } // UnmarshalJSON populates a new SignedManifest struct from JSON data. func (sm *SignedManifest) UnmarshalJSON(b []byte) error { sm.all = make([]byte, len(b), len(b)) // store manifest and signatures in all copy(sm.all, b) jsig, err := libtrust.ParsePrettySignature(b, "signatures") if err != nil { return err } // Resolve the payload in the manifest. bytes, err := jsig.Payload() if err != nil { return err } // sm.Canonical stores the canonical manifest JSON sm.Canonical = make([]byte, len(bytes), len(bytes)) copy(sm.Canonical, bytes) // Unmarshal canonical JSON into Manifest object var manifest Manifest if err := json.Unmarshal(sm.Canonical, &manifest); err != nil { return err } sm.Manifest = manifest return nil } // References returnes the descriptors of this manifests references func (sm SignedManifest) References() []distribution.Descriptor { dependencies := make([]distribution.Descriptor, len(sm.FSLayers)) for i, fsLayer := range sm.FSLayers { dependencies[i] = distribution.Descriptor{ MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar", Digest: fsLayer.BlobSum, } } return dependencies } // MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner // contents. Applications requiring a marshaled signed manifest should simply // use Raw directly, since the the content produced by json.Marshal will be // compacted and will fail signature checks. func (sm *SignedManifest) MarshalJSON() ([]byte, error) { if len(sm.all) > 0 { return sm.all, nil } // If the raw data is not available, just dump the inner content. return json.Marshal(&sm.Manifest) } // Payload returns the signed content of the signed manifest. func (sm SignedManifest) Payload() (string, []byte, error) { return MediaTypeSignedManifest, sm.all, nil } // Signatures returns the signatures as provided by // (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws // signatures. func (sm *SignedManifest) Signatures() ([][]byte, error) { jsig, err := libtrust.ParsePrettySignature(sm.all, "signatures") if err != nil { return nil, err } // Resolve the payload in the manifest. return jsig.Signatures() } docker-1.10.3/vendor/src/github.com/docker/distribution/manifest/schema1/reference_builder.go000066400000000000000000000051531267010174400323040ustar00rootroot00000000000000package schema1 import ( "fmt" "errors" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/libtrust" ) // referenceManifestBuilder is a type for constructing manifests from schema1 // dependencies. type referenceManifestBuilder struct { Manifest pk libtrust.PrivateKey } // NewReferenceManifestBuilder is used to build new manifests for the current // schema version using schema1 dependencies. func NewReferenceManifestBuilder(pk libtrust.PrivateKey, name, tag, architecture string) distribution.ManifestBuilder { return &referenceManifestBuilder{ Manifest: Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, Name: name, Tag: tag, Architecture: architecture, }, pk: pk, } } func (mb *referenceManifestBuilder) Build(ctx context.Context) (distribution.Manifest, error) { m := mb.Manifest if len(m.FSLayers) == 0 { return nil, errors.New("cannot build manifest with zero layers or history") } m.FSLayers = make([]FSLayer, len(mb.Manifest.FSLayers)) m.History = make([]History, len(mb.Manifest.History)) copy(m.FSLayers, mb.Manifest.FSLayers) copy(m.History, mb.Manifest.History) return Sign(&m, mb.pk) } // AppendReference adds a reference to the current ManifestBuilder func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable) error { r, ok := d.(Reference) if !ok { return fmt.Errorf("Unable to add non-reference type to v1 builder") } // Entries need to be prepended mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...) mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...) return nil } // References returns the current references added to this builder func (mb *referenceManifestBuilder) References() []distribution.Descriptor { refs := make([]distribution.Descriptor, len(mb.Manifest.FSLayers)) for i := range mb.Manifest.FSLayers { layerDigest := mb.Manifest.FSLayers[i].BlobSum history := mb.Manifest.History[i] ref := Reference{layerDigest, 0, history} refs[i] = ref.Descriptor() } return refs } // Reference describes a manifest v2, schema version 1 dependency. // An FSLayer associated with a history entry. type Reference struct { Digest digest.Digest Size int64 // if we know it, set it for the descriptor. History History } // Descriptor describes a reference func (r Reference) Descriptor() distribution.Descriptor { return distribution.Descriptor{ MediaType: MediaTypeManifestLayer, Digest: r.Digest, Size: r.Size, } } docker-1.10.3/vendor/src/github.com/docker/distribution/manifest/schema1/sign.go000066400000000000000000000026451267010174400276030ustar00rootroot00000000000000package schema1 import ( "crypto/x509" "encoding/json" "github.com/docker/libtrust" ) // Sign signs the manifest with the provided private key, returning a // SignedManifest. This typically won't be used within the registry, except // for testing. func Sign(m *Manifest, pk libtrust.PrivateKey) (*SignedManifest, error) { p, err := json.MarshalIndent(m, "", " ") if err != nil { return nil, err } js, err := libtrust.NewJSONSignature(p) if err != nil { return nil, err } if err := js.Sign(pk); err != nil { return nil, err } pretty, err := js.PrettySignature("signatures") if err != nil { return nil, err } return &SignedManifest{ Manifest: *m, all: pretty, Canonical: p, }, nil } // SignWithChain signs the manifest with the given private key and x509 chain. // The public key of the first element in the chain must be the public key // corresponding with the sign key. func SignWithChain(m *Manifest, key libtrust.PrivateKey, chain []*x509.Certificate) (*SignedManifest, error) { p, err := json.MarshalIndent(m, "", " ") if err != nil { return nil, err } js, err := libtrust.NewJSONSignature(p) if err != nil { return nil, err } if err := js.SignWithChain(key, chain); err != nil { return nil, err } pretty, err := js.PrettySignature("signatures") if err != nil { return nil, err } return &SignedManifest{ Manifest: *m, all: pretty, Canonical: p, }, nil } docker-1.10.3/vendor/src/github.com/docker/distribution/manifest/schema1/verify.go000066400000000000000000000015541267010174400301450ustar00rootroot00000000000000package schema1 import ( "crypto/x509" "github.com/Sirupsen/logrus" "github.com/docker/libtrust" ) // Verify verifies the signature of the signed manifest returning the public // keys used during signing. func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) { js, err := libtrust.ParsePrettySignature(sm.all, "signatures") if err != nil { logrus.WithField("err", err).Debugf("(*SignedManifest).Verify") return nil, err } return js.Verify() } // VerifyChains verifies the signature of the signed manifest against the // certificate pool returning the list of verified chains. Signatures without // an x509 chain are not checked. func VerifyChains(sm *SignedManifest, ca *x509.CertPool) ([][]*x509.Certificate, error) { js, err := libtrust.ParsePrettySignature(sm.all, "signatures") if err != nil { return nil, err } return js.VerifyChains(ca) } docker-1.10.3/vendor/src/github.com/docker/distribution/manifest/schema2/000077500000000000000000000000001267010174400263065ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/distribution/manifest/schema2/builder.go000066400000000000000000000036231267010174400302670ustar00rootroot00000000000000package schema2 import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" ) // builder is a type for constructing manifests. type builder struct { // bs is a BlobService used to publish the configuration blob. bs distribution.BlobService // configJSON references configJSON []byte // layers is a list of layer descriptors that gets built by successive // calls to AppendReference. layers []distribution.Descriptor } // NewManifestBuilder is used to build new manifests for the current schema // version. It takes a BlobService so it can publish the configuration blob // as part of the Build process. func NewManifestBuilder(bs distribution.BlobService, configJSON []byte) distribution.ManifestBuilder { mb := &builder{ bs: bs, configJSON: make([]byte, len(configJSON)), } copy(mb.configJSON, configJSON) return mb } // Build produces a final manifest from the given references. func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { m := Manifest{ Versioned: SchemaVersion, Layers: make([]distribution.Descriptor, len(mb.layers)), } copy(m.Layers, mb.layers) configDigest := digest.FromBytes(mb.configJSON) var err error m.Config, err = mb.bs.Stat(ctx, configDigest) switch err { case nil: return FromStruct(m) case distribution.ErrBlobUnknown: // nop default: return nil, err } // Add config to the blob store m.Config, err = mb.bs.Put(ctx, MediaTypeConfig, mb.configJSON) if err != nil { return nil, err } return FromStruct(m) } // AppendReference adds a reference to the current ManifestBuilder. func (mb *builder) AppendReference(d distribution.Describable) error { mb.layers = append(mb.layers, d.Descriptor()) return nil } // References returns the current references added to this builder. func (mb *builder) References() []distribution.Descriptor { return mb.layers } docker-1.10.3/vendor/src/github.com/docker/distribution/manifest/schema2/manifest.go000066400000000000000000000070171267010174400304500ustar00rootroot00000000000000package schema2 import ( "encoding/json" "errors" "fmt" "github.com/docker/distribution" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" ) const ( // MediaTypeManifest specifies the mediaType for the current version. MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json" // MediaTypeConfig specifies the mediaType for the image configuration. MediaTypeConfig = "application/vnd.docker.container.image.v1+json" // MediaTypeLayer is the mediaType used for layers referenced by the // manifest. MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip" ) var ( // SchemaVersion provides a pre-initialized version structure for this // packages version of the manifest. SchemaVersion = manifest.Versioned{ SchemaVersion: 2, MediaType: MediaTypeManifest, } ) func init() { schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { m := new(DeserializedManifest) err := m.UnmarshalJSON(b) if err != nil { return nil, distribution.Descriptor{}, err } dgst := digest.FromBytes(b) return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err } err := distribution.RegisterManifestSchema(MediaTypeManifest, schema2Func) if err != nil { panic(fmt.Sprintf("Unable to register manifest: %s", err)) } } // Manifest defines a schema2 manifest. type Manifest struct { manifest.Versioned // Config references the image configuration as a blob. Config distribution.Descriptor `json:"config"` // Layers lists descriptors for the layers referenced by the // configuration. Layers []distribution.Descriptor `json:"layers"` } // References returnes the descriptors of this manifests references. func (m Manifest) References() []distribution.Descriptor { return m.Layers } // Target returns the target of this signed manifest. func (m Manifest) Target() distribution.Descriptor { return m.Config } // DeserializedManifest wraps Manifest with a copy of the original JSON. // It satisfies the distribution.Manifest interface. type DeserializedManifest struct { Manifest // canonical is the canonical byte representation of the Manifest. canonical []byte } // FromStruct takes a Manifest structure, marshals it to JSON, and returns a // DeserializedManifest which contains the manifest and its JSON representation. func FromStruct(m Manifest) (*DeserializedManifest, error) { var deserialized DeserializedManifest deserialized.Manifest = m var err error deserialized.canonical, err = json.MarshalIndent(&m, "", " ") return &deserialized, err } // UnmarshalJSON populates a new Manifest struct from JSON data. func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { m.canonical = make([]byte, len(b), len(b)) // store manifest in canonical copy(m.canonical, b) // Unmarshal canonical JSON into Manifest object var manifest Manifest if err := json.Unmarshal(m.canonical, &manifest); err != nil { return err } m.Manifest = manifest return nil } // MarshalJSON returns the contents of canonical. If canonical is empty, // marshals the inner contents. func (m *DeserializedManifest) MarshalJSON() ([]byte, error) { if len(m.canonical) > 0 { return m.canonical, nil } return nil, errors.New("JSON representation not initialized in DeserializedManifest") } // Payload returns the raw content of the manifest. The contents can be used to // calculate the content identifier. func (m DeserializedManifest) Payload() (string, []byte, error) { return m.MediaType, m.canonical, nil } docker-1.10.3/vendor/src/github.com/docker/distribution/manifest/versioned.go000066400000000000000000000006561267010174400273200ustar00rootroot00000000000000package manifest // Versioned provides a struct with the manifest schemaVersion and . Incoming // content with unknown schema version can be decoded against this struct to // check the version. type Versioned struct { // SchemaVersion is the image manifest schema that this image follows SchemaVersion int `json:"schemaVersion"` // MediaType is the media type of this schema. MediaType string `json:"mediaType,omitempty"` } docker-1.10.3/vendor/src/github.com/docker/distribution/manifests.go000066400000000000000000000100441267010174400254750ustar00rootroot00000000000000package distribution import ( "fmt" "mime" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" ) // Manifest represents a registry object specifying a set of // references and an optional target type Manifest interface { // References returns a list of objects which make up this manifest. // The references are strictly ordered from base to head. A reference // is anything which can be represented by a distribution.Descriptor References() []Descriptor // Payload provides the serialized format of the manifest, in addition to // the mediatype. Payload() (mediatype string, payload []byte, err error) } // ManifestBuilder creates a manifest allowing one to include dependencies. // Instances can be obtained from a version-specific manifest package. Manifest // specific data is passed into the function which creates the builder. type ManifestBuilder interface { // Build creates the manifest from his builder. Build(ctx context.Context) (Manifest, error) // References returns a list of objects which have been added to this // builder. The dependencies are returned in the order they were added, // which should be from base to head. References() []Descriptor // AppendReference includes the given object in the manifest after any // existing dependencies. If the add fails, such as when adding an // unsupported dependency, an error may be returned. AppendReference(dependency Describable) error } // ManifestService describes operations on image manifests. type ManifestService interface { // Exists returns true if the manifest exists. Exists(ctx context.Context, dgst digest.Digest) (bool, error) // Get retrieves the manifest specified by the given digest Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error) // Put creates or updates the given manifest returning the manifest digest Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error) // Delete removes the manifest specified by the given digest. Deleting // a manifest that doesn't exist will return ErrManifestNotFound Delete(ctx context.Context, dgst digest.Digest) error // Enumerate fills 'manifests' with the manifests in this service up // to the size of 'manifests' and returns 'n' for the number of entries // which were filled. 'last' contains an offset in the manifest set // and can be used to resume iteration. //Enumerate(ctx context.Context, manifests []Manifest, last Manifest) (n int, err error) } // Describable is an interface for descriptors type Describable interface { Descriptor() Descriptor } // ManifestMediaTypes returns the supported media types for manifests. func ManifestMediaTypes() (mediaTypes []string) { for t := range mappings { if t != "" { mediaTypes = append(mediaTypes, t) } } return } // UnmarshalFunc implements manifest unmarshalling a given MediaType type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) var mappings = make(map[string]UnmarshalFunc, 0) // UnmarshalManifest looks up manifest unmarshall functions based on // MediaType func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { // Need to look up by the actual media type, not the raw contents of // the header. Strip semicolons and anything following them. var mediatype string if ctHeader != "" { var err error mediatype, _, err = mime.ParseMediaType(ctHeader) if err != nil { return nil, Descriptor{}, err } } unmarshalFunc, ok := mappings[mediatype] if !ok { unmarshalFunc, ok = mappings[""] if !ok { return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype and no default available: %s", mediatype) } } return unmarshalFunc(p) } // RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This // should be called from specific func RegisterManifestSchema(mediatype string, u UnmarshalFunc) error { if _, ok := mappings[mediatype]; ok { return fmt.Errorf("manifest mediatype registration would overwrite existing: %s", mediatype) } mappings[mediatype] = u return nil } docker-1.10.3/vendor/src/github.com/docker/distribution/reference/000077500000000000000000000000001267010174400251145ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/distribution/reference/reference.go000066400000000000000000000205731267010174400274100ustar00rootroot00000000000000// Package reference provides a general type to represent any way of referencing images within the registry. // Its main purpose is to abstract tags and digests (content-addressable hash). // // Grammar // // reference := repository [ ":" tag ] [ "@" digest ] // name := [hostname '/'] component ['/' component]* // hostname := hostcomponent ['.' hostcomponent]* [':' port-number] // hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ // port-number := /[0-9]+/ // component := alpha-numeric [separator alpha-numeric]* // alpha-numeric := /[a-z0-9]+/ // separator := /[_.]|__|[-]*/ // // tag := /[\w][\w.-]{0,127}/ // // digest := digest-algorithm ":" digest-hex // digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ] // digest-algorithm-separator := /[+.-_]/ // digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ // digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value package reference import ( "errors" "fmt" "github.com/docker/distribution/digest" ) const ( // NameTotalLengthMax is the maximum total number of characters in a repository name. NameTotalLengthMax = 255 ) var ( // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. ErrReferenceInvalidFormat = errors.New("invalid reference format") // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. ErrTagInvalidFormat = errors.New("invalid tag format") // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. ErrDigestInvalidFormat = errors.New("invalid digest format") // ErrNameEmpty is returned for empty, invalid repository names. ErrNameEmpty = errors.New("repository name must have at least one component") // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) ) // Reference is an opaque object reference identifier that may include // modifiers such as a hostname, name, tag, and digest. type Reference interface { // String returns the full reference String() string } // Field provides a wrapper type for resolving correct reference types when // working with encoding. type Field struct { reference Reference } // AsField wraps a reference in a Field for encoding. func AsField(reference Reference) Field { return Field{reference} } // Reference unwraps the reference type from the field to // return the Reference object. This object should be // of the appropriate type to further check for different // reference types. func (f Field) Reference() Reference { return f.reference } // MarshalText serializes the field to byte text which // is the string of the reference. func (f Field) MarshalText() (p []byte, err error) { return []byte(f.reference.String()), nil } // UnmarshalText parses text bytes by invoking the // reference parser to ensure the appropriately // typed reference object is wrapped by field. func (f *Field) UnmarshalText(p []byte) error { r, err := Parse(string(p)) if err != nil { return err } f.reference = r return nil } // Named is an object with a full name type Named interface { Reference Name() string } // Tagged is an object which has a tag type Tagged interface { Reference Tag() string } // NamedTagged is an object including a name and tag. type NamedTagged interface { Named Tag() string } // Digested is an object which has a digest // in which it can be referenced by type Digested interface { Reference Digest() digest.Digest } // Canonical reference is an object with a fully unique // name including a name with hostname and digest type Canonical interface { Named Digest() digest.Digest } // SplitHostname splits a named reference into a // hostname and name string. If no valid hostname is // found, the hostname is empty and the full value // is returned as name func SplitHostname(named Named) (string, string) { name := named.Name() match := anchoredNameRegexp.FindStringSubmatch(name) if match == nil || len(match) != 3 { return "", name } return match[1], match[2] } // Parse parses s and returns a syntactically valid Reference. // If an error was encountered it is returned, along with a nil Reference. // NOTE: Parse will not handle short digests. func Parse(s string) (Reference, error) { matches := ReferenceRegexp.FindStringSubmatch(s) if matches == nil { if s == "" { return nil, ErrNameEmpty } // TODO(dmcgowan): Provide more specific and helpful error return nil, ErrReferenceInvalidFormat } if len(matches[1]) > NameTotalLengthMax { return nil, ErrNameTooLong } ref := reference{ name: matches[1], tag: matches[2], } if matches[3] != "" { var err error ref.digest, err = digest.ParseDigest(matches[3]) if err != nil { return nil, err } } r := getBestReferenceType(ref) if r == nil { return nil, ErrNameEmpty } return r, nil } // ParseNamed parses s and returns a syntactically valid reference implementing // the Named interface. The reference must have a name, otherwise an error is // returned. // If an error was encountered it is returned, along with a nil Reference. // NOTE: ParseNamed will not handle short digests. func ParseNamed(s string) (Named, error) { ref, err := Parse(s) if err != nil { return nil, err } named, isNamed := ref.(Named) if !isNamed { return nil, fmt.Errorf("reference %s has no name", ref.String()) } return named, nil } // WithName returns a named object representing the given string. If the input // is invalid ErrReferenceInvalidFormat will be returned. func WithName(name string) (Named, error) { if len(name) > NameTotalLengthMax { return nil, ErrNameTooLong } if !anchoredNameRegexp.MatchString(name) { return nil, ErrReferenceInvalidFormat } return repository(name), nil } // WithTag combines the name from "name" and the tag from "tag" to form a // reference incorporating both the name and the tag. func WithTag(name Named, tag string) (NamedTagged, error) { if !anchoredTagRegexp.MatchString(tag) { return nil, ErrTagInvalidFormat } return taggedReference{ name: name.Name(), tag: tag, }, nil } // WithDigest combines the name from "name" and the digest from "digest" to form // a reference incorporating both the name and the digest. func WithDigest(name Named, digest digest.Digest) (Canonical, error) { if !anchoredDigestRegexp.MatchString(digest.String()) { return nil, ErrDigestInvalidFormat } return canonicalReference{ name: name.Name(), digest: digest, }, nil } func getBestReferenceType(ref reference) Reference { if ref.name == "" { // Allow digest only references if ref.digest != "" { return digestReference(ref.digest) } return nil } if ref.tag == "" { if ref.digest != "" { return canonicalReference{ name: ref.name, digest: ref.digest, } } return repository(ref.name) } if ref.digest == "" { return taggedReference{ name: ref.name, tag: ref.tag, } } return ref } type reference struct { name string tag string digest digest.Digest } func (r reference) String() string { return r.name + ":" + r.tag + "@" + r.digest.String() } func (r reference) Name() string { return r.name } func (r reference) Tag() string { return r.tag } func (r reference) Digest() digest.Digest { return r.digest } type repository string func (r repository) String() string { return string(r) } func (r repository) Name() string { return string(r) } type digestReference digest.Digest func (d digestReference) String() string { return d.String() } func (d digestReference) Digest() digest.Digest { return digest.Digest(d) } type taggedReference struct { name string tag string } func (t taggedReference) String() string { return t.name + ":" + t.tag } func (t taggedReference) Name() string { return t.name } func (t taggedReference) Tag() string { return t.tag } type canonicalReference struct { name string digest digest.Digest } func (c canonicalReference) String() string { return c.name + "@" + c.digest.String() } func (c canonicalReference) Name() string { return c.name } func (c canonicalReference) Digest() digest.Digest { return c.digest } docker-1.10.3/vendor/src/github.com/docker/distribution/reference/regexp.go000066400000000000000000000105231267010174400267360ustar00rootroot00000000000000package reference import "regexp" var ( // alphaNumericRegexp defines the alpha numeric atom, typically a // component of names. This only allows lower case characters and digits. alphaNumericRegexp = match(`[a-z0-9]+`) // separatorRegexp defines the separators allowed to be embedded in name // components. This allow one period, one or two underscore and multiple // dashes. separatorRegexp = match(`(?:[._]|__|[-]*)`) // nameComponentRegexp restricts registry path component names to start // with at least one letter or number, with following parts able to be // separated by one period, one or two underscore and multiple dashes. nameComponentRegexp = expression( alphaNumericRegexp, optional(repeated(separatorRegexp, alphaNumericRegexp))) // hostnameComponentRegexp restricts the registry hostname component of a // repository name to start with a component as defined by hostnameRegexp // and followed by an optional port. hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) // hostnameRegexp defines the structure of potential hostname components // that may be part of image names. This is purposely a subset of what is // allowed by DNS to ensure backwards compatibility with Docker image // names. hostnameRegexp = expression( hostnameComponentRegexp, optional(repeated(literal(`.`), hostnameComponentRegexp)), optional(literal(`:`), match(`[0-9]+`))) // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. TagRegexp = match(`[\w][\w.-]{0,127}`) // anchoredTagRegexp matches valid tag names, anchored at the start and // end of the matched string. anchoredTagRegexp = anchored(TagRegexp) // DigestRegexp matches valid digests. DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) // anchoredDigestRegexp matches valid digests, anchored at the start and // end of the matched string. anchoredDigestRegexp = anchored(DigestRegexp) // NameRegexp is the format for the name component of references. The // regexp has capturing groups for the hostname and name part omitting // the seperating forward slash from either. NameRegexp = expression( optional(hostnameRegexp, literal(`/`)), nameComponentRegexp, optional(repeated(literal(`/`), nameComponentRegexp))) // anchoredNameRegexp is used to parse a name value, capturing the // hostname and trailing components. anchoredNameRegexp = anchored( optional(capture(hostnameRegexp), literal(`/`)), capture(nameComponentRegexp, optional(repeated(literal(`/`), nameComponentRegexp)))) // ReferenceRegexp is the full supported format of a reference. The regexp // is anchored and has capturing groups for name, tag, and digest // components. ReferenceRegexp = anchored(capture(NameRegexp), optional(literal(":"), capture(TagRegexp)), optional(literal("@"), capture(DigestRegexp))) ) // match compiles the string to a regular expression. var match = regexp.MustCompile // literal compiles s into a literal regular expression, escaping any regexp // reserved characters. func literal(s string) *regexp.Regexp { re := match(regexp.QuoteMeta(s)) if _, complete := re.LiteralPrefix(); !complete { panic("must be a literal") } return re } // expression defines a full expression, where each regular expression must // follow the previous. func expression(res ...*regexp.Regexp) *regexp.Regexp { var s string for _, re := range res { s += re.String() } return match(s) } // optional wraps the expression in a non-capturing group and makes the // production optional. func optional(res ...*regexp.Regexp) *regexp.Regexp { return match(group(expression(res...)).String() + `?`) } // repeated wraps the regexp in a non-capturing group to get one or more // matches. func repeated(res ...*regexp.Regexp) *regexp.Regexp { return match(group(expression(res...)).String() + `+`) } // group wraps the regexp in a non-capturing group. func group(res ...*regexp.Regexp) *regexp.Regexp { return match(`(?:` + expression(res...).String() + `)`) } // capture wraps the expression in a capturing group. func capture(res ...*regexp.Regexp) *regexp.Regexp { return match(`(` + expression(res...).String() + `)`) } // anchored anchors the regular expression by adding start and end delimiters. func anchored(res ...*regexp.Regexp) *regexp.Regexp { return match(`^` + expression(res...).String() + `$`) } docker-1.10.3/vendor/src/github.com/docker/distribution/registry.go000066400000000000000000000050601267010174400253560ustar00rootroot00000000000000package distribution import ( "github.com/docker/distribution/context" ) // Scope defines the set of items that match a namespace. type Scope interface { // Contains returns true if the name belongs to the namespace. Contains(name string) bool } type fullScope struct{} func (f fullScope) Contains(string) bool { return true } // GlobalScope represents the full namespace scope which contains // all other scopes. var GlobalScope = Scope(fullScope{}) // Namespace represents a collection of repositories, addressable by name. // Generally, a namespace is backed by a set of one or more services, // providing facilities such as registry access, trust, and indexing. type Namespace interface { // Scope describes the names that can be used with this Namespace. The // global namespace will have a scope that matches all names. The scope // effectively provides an identity for the namespace. Scope() Scope // Repository should return a reference to the named repository. The // registry may or may not have the repository but should always return a // reference. Repository(ctx context.Context, name string) (Repository, error) // Repositories fills 'repos' with a lexigraphically sorted catalog of repositories // up to the size of 'repos' and returns the value 'n' for the number of entries // which were filled. 'last' contains an offset in the catalog, and 'err' will be // set to io.EOF if there are no more entries to obtain. Repositories(ctx context.Context, repos []string, last string) (n int, err error) } // ManifestServiceOption is a function argument for Manifest Service methods type ManifestServiceOption interface { Apply(ManifestService) error } // Repository is a named collection of manifests and layers. type Repository interface { // Name returns the name of the repository. Name() string // Manifests returns a reference to this repository's manifest service. // with the supplied options applied. Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error) // Blobs returns a reference to this repository's blob service. Blobs(ctx context.Context) BlobStore // TODO(stevvooe): The above BlobStore return can probably be relaxed to // be a BlobService for use with clients. This will allow such // implementations to avoid implementing ServeBlob. // Tags returns a reference to this repositories tag service Tags(ctx context.Context) TagService } // TODO(stevvooe): Must add close methods to all these. May want to change the // way instances are created to better reflect internal dependency // relationships. docker-1.10.3/vendor/src/github.com/docker/distribution/registry/000077500000000000000000000000001267010174400250265ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/distribution/registry/api/000077500000000000000000000000001267010174400255775ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/distribution/registry/api/errcode/000077500000000000000000000000001267010174400272225ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/distribution/registry/api/errcode/errors.go000066400000000000000000000150411267010174400310660ustar00rootroot00000000000000package errcode import ( "encoding/json" "fmt" "strings" ) // ErrorCoder is the base interface for ErrorCode and Error allowing // users of each to just call ErrorCode to get the real ID of each type ErrorCoder interface { ErrorCode() ErrorCode } // ErrorCode represents the error type. The errors are serialized via strings // and the integer format may change and should *never* be exported. type ErrorCode int var _ error = ErrorCode(0) // ErrorCode just returns itself func (ec ErrorCode) ErrorCode() ErrorCode { return ec } // Error returns the ID/Value func (ec ErrorCode) Error() string { // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) } // Descriptor returns the descriptor for the error code. func (ec ErrorCode) Descriptor() ErrorDescriptor { d, ok := errorCodeToDescriptors[ec] if !ok { return ErrorCodeUnknown.Descriptor() } return d } // String returns the canonical identifier for this error code. func (ec ErrorCode) String() string { return ec.Descriptor().Value } // Message returned the human-readable error message for this error code. func (ec ErrorCode) Message() string { return ec.Descriptor().Message } // MarshalText encodes the receiver into UTF-8-encoded text and returns the // result. func (ec ErrorCode) MarshalText() (text []byte, err error) { return []byte(ec.String()), nil } // UnmarshalText decodes the form generated by MarshalText. func (ec *ErrorCode) UnmarshalText(text []byte) error { desc, ok := idToDescriptors[string(text)] if !ok { desc = ErrorCodeUnknown.Descriptor() } *ec = desc.Code return nil } // WithMessage creates a new Error struct based on the passed-in info and // overrides the Message property. func (ec ErrorCode) WithMessage(message string) Error { return Error{ Code: ec, Message: message, } } // WithDetail creates a new Error struct based on the passed-in info and // set the Detail property appropriately func (ec ErrorCode) WithDetail(detail interface{}) Error { return Error{ Code: ec, Message: ec.Message(), }.WithDetail(detail) } // WithArgs creates a new Error struct and sets the Args slice func (ec ErrorCode) WithArgs(args ...interface{}) Error { return Error{ Code: ec, Message: ec.Message(), }.WithArgs(args...) } // Error provides a wrapper around ErrorCode with extra Details provided. type Error struct { Code ErrorCode `json:"code"` Message string `json:"message"` Detail interface{} `json:"detail,omitempty"` // TODO(duglin): See if we need an "args" property so we can do the // variable substitution right before showing the message to the user } var _ error = Error{} // ErrorCode returns the ID/Value of this Error func (e Error) ErrorCode() ErrorCode { return e.Code } // Error returns a human readable representation of the error. func (e Error) Error() string { return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) } // WithDetail will return a new Error, based on the current one, but with // some Detail info added func (e Error) WithDetail(detail interface{}) Error { return Error{ Code: e.Code, Message: e.Message, Detail: detail, } } // WithArgs uses the passed-in list of interface{} as the substitution // variables in the Error's Message string, but returns a new Error func (e Error) WithArgs(args ...interface{}) Error { return Error{ Code: e.Code, Message: fmt.Sprintf(e.Code.Message(), args...), Detail: e.Detail, } } // ErrorDescriptor provides relevant information about a given error code. type ErrorDescriptor struct { // Code is the error code that this descriptor describes. Code ErrorCode // Value provides a unique, string key, often captilized with // underscores, to identify the error code. This value is used as the // keyed value when serializing api errors. Value string // Message is a short, human readable decription of the error condition // included in API responses. Message string // Description provides a complete account of the errors purpose, suitable // for use in documentation. Description string // HTTPStatusCode provides the http status code that is associated with // this error condition. HTTPStatusCode int } // ParseErrorCode returns the value by the string error code. // `ErrorCodeUnknown` will be returned if the error is not known. func ParseErrorCode(value string) ErrorCode { ed, ok := idToDescriptors[value] if ok { return ed.Code } return ErrorCodeUnknown } // Errors provides the envelope for multiple errors and a few sugar methods // for use within the application. type Errors []error var _ error = Errors{} func (errs Errors) Error() string { switch len(errs) { case 0: return "" case 1: return errs[0].Error() default: msg := "errors:\n" for _, err := range errs { msg += err.Error() + "\n" } return msg } } // Len returns the current number of errors. func (errs Errors) Len() int { return len(errs) } // MarshalJSON converts slice of error, ErrorCode or Error into a // slice of Error - then serializes func (errs Errors) MarshalJSON() ([]byte, error) { var tmpErrs struct { Errors []Error `json:"errors,omitempty"` } for _, daErr := range errs { var err Error switch daErr.(type) { case ErrorCode: err = daErr.(ErrorCode).WithDetail(nil) case Error: err = daErr.(Error) default: err = ErrorCodeUnknown.WithDetail(daErr) } // If the Error struct was setup and they forgot to set the // Message field (meaning its "") then grab it from the ErrCode msg := err.Message if msg == "" { msg = err.Code.Message() } tmpErrs.Errors = append(tmpErrs.Errors, Error{ Code: err.Code, Message: msg, Detail: err.Detail, }) } return json.Marshal(tmpErrs) } // UnmarshalJSON deserializes []Error and then converts it into slice of // Error or ErrorCode func (errs *Errors) UnmarshalJSON(data []byte) error { var tmpErrs struct { Errors []Error } if err := json.Unmarshal(data, &tmpErrs); err != nil { return err } var newErrs Errors for _, daErr := range tmpErrs.Errors { // If Message is empty or exactly matches the Code's message string // then just use the Code, no need for a full Error struct if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { // Error's w/o details get converted to ErrorCode newErrs = append(newErrs, daErr.Code) } else { // Error's w/ details are untouched newErrs = append(newErrs, Error{ Code: daErr.Code, Message: daErr.Message, Detail: daErr.Detail, }) } } *errs = newErrs return nil } docker-1.10.3/vendor/src/github.com/docker/distribution/registry/api/errcode/handler.go000066400000000000000000000017441267010174400311740ustar00rootroot00000000000000package errcode import ( "encoding/json" "net/http" ) // ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err // and sets the content-type header to 'application/json'. It will handle // ErrorCoder and Errors, and if necessary will create an envelope. func ServeJSON(w http.ResponseWriter, err error) error { w.Header().Set("Content-Type", "application/json; charset=utf-8") var sc int switch errs := err.(type) { case Errors: if len(errs) < 1 { break } if err, ok := errs[0].(ErrorCoder); ok { sc = err.ErrorCode().Descriptor().HTTPStatusCode } case ErrorCoder: sc = errs.ErrorCode().Descriptor().HTTPStatusCode err = Errors{err} // create an envelope. default: // We just have an unhandled error type, so just place in an envelope // and move along. err = Errors{err} } if sc == 0 { sc = http.StatusInternalServerError } w.WriteHeader(sc) if err := json.NewEncoder(w).Encode(err); err != nil { return err } return nil } docker-1.10.3/vendor/src/github.com/docker/distribution/registry/api/errcode/register.go000066400000000000000000000077101267010174400314020ustar00rootroot00000000000000package errcode import ( "fmt" "net/http" "sort" "sync" ) var ( errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} idToDescriptors = map[string]ErrorDescriptor{} groupToDescriptors = map[string][]ErrorDescriptor{} ) var ( // ErrorCodeUnknown is a generic error that can be used as a last // resort if there is no situation-specific error message that can be used ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ Value: "UNKNOWN", Message: "unknown error", Description: `Generic error returned when the error does not have an API classification.`, HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeUnsupported is returned when an operation is not supported. ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ Value: "UNSUPPORTED", Message: "The operation is unsupported.", Description: `The operation was unsupported due to a missing implementation or invalid set of parameters.`, HTTPStatusCode: http.StatusMethodNotAllowed, }) // ErrorCodeUnauthorized is returned if a request requires // authentication. ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ Value: "UNAUTHORIZED", Message: "authentication required", Description: `The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate.`, HTTPStatusCode: http.StatusUnauthorized, }) // ErrorCodeDenied is returned if a client does not have sufficient // permission to perform an action. ErrorCodeDenied = Register("errcode", ErrorDescriptor{ Value: "DENIED", Message: "requested access to the resource is denied", Description: `The access controller denied access for the operation on a resource.`, HTTPStatusCode: http.StatusForbidden, }) // ErrorCodeUnavailable provides a common error to report unavialability // of a service or endpoint. ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ Value: "UNAVAILABLE", Message: "service unavailable", Description: "Returned when a service is not available", HTTPStatusCode: http.StatusServiceUnavailable, }) ) var nextCode = 1000 var registerLock sync.Mutex // Register will make the passed-in error known to the environment and // return a new ErrorCode func Register(group string, descriptor ErrorDescriptor) ErrorCode { registerLock.Lock() defer registerLock.Unlock() descriptor.Code = ErrorCode(nextCode) if _, ok := idToDescriptors[descriptor.Value]; ok { panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) } if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) } groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) errorCodeToDescriptors[descriptor.Code] = descriptor idToDescriptors[descriptor.Value] = descriptor nextCode++ return descriptor.Code } type byValue []ErrorDescriptor func (a byValue) Len() int { return len(a) } func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } // GetGroupNames returns the list of Error group names that are registered func GetGroupNames() []string { keys := []string{} for k := range groupToDescriptors { keys = append(keys, k) } sort.Strings(keys) return keys } // GetErrorCodeGroup returns the named group of error descriptors func GetErrorCodeGroup(name string) []ErrorDescriptor { desc := groupToDescriptors[name] sort.Sort(byValue(desc)) return desc } // GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are // registered, irrespective of what group they're in func GetErrorAllDescriptors() []ErrorDescriptor { result := []ErrorDescriptor{} for _, group := range GetGroupNames() { result = append(result, GetErrorCodeGroup(group)...) } sort.Sort(byValue(result)) return result } docker-1.10.3/vendor/src/github.com/docker/distribution/registry/api/v2/000077500000000000000000000000001267010174400261265ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go000066400000000000000000001440701267010174400310240ustar00rootroot00000000000000package v2 import ( "net/http" "regexp" "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" ) var ( nameParameterDescriptor = ParameterDescriptor{ Name: "name", Type: "string", Format: reference.NameRegexp.String(), Required: true, Description: `Name of the target repository.`, } referenceParameterDescriptor = ParameterDescriptor{ Name: "reference", Type: "string", Format: reference.TagRegexp.String(), Required: true, Description: `Tag or digest of the target manifest.`, } uuidParameterDescriptor = ParameterDescriptor{ Name: "uuid", Type: "opaque", Required: true, Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.", } digestPathParameter = ParameterDescriptor{ Name: "digest", Type: "path", Required: true, Format: digest.DigestRegexp.String(), Description: `Digest of desired blob.`, } hostHeader = ParameterDescriptor{ Name: "Host", Type: "string", Description: "Standard HTTP Host Header. Should be set to the registry host.", Format: "", Examples: []string{"registry-1.docker.io"}, } authHeader = ParameterDescriptor{ Name: "Authorization", Type: "string", Description: "An RFC7235 compliant authorization header.", Format: " ", Examples: []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="}, } authChallengeHeader = ParameterDescriptor{ Name: "WWW-Authenticate", Type: "string", Description: "An RFC7235 compliant authentication challenge header.", Format: ` realm="", ..."`, Examples: []string{ `Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`, }, } contentLengthZeroHeader = ParameterDescriptor{ Name: "Content-Length", Description: "The `Content-Length` header must be zero and the body must be empty.", Type: "integer", Format: "0", } dockerUploadUUIDHeader = ParameterDescriptor{ Name: "Docker-Upload-UUID", Description: "Identifies the docker upload uuid for the current request.", Type: "uuid", Format: "", } digestHeader = ParameterDescriptor{ Name: "Docker-Content-Digest", Description: "Digest of the targeted content for the request.", Type: "digest", Format: "", } linkHeader = ParameterDescriptor{ Name: "Link", Type: "link", Description: "RFC5988 compliant rel='next' with URL to next result set, if available", Format: `<?n=&last=>; rel="next"`, } paginationParameters = []ParameterDescriptor{ { Name: "n", Type: "integer", Description: "Limit the number of entries in each response. It not present, all entries will be returned.", Format: "", Required: false, }, { Name: "last", Type: "string", Description: "Result set will include values lexically after last.", Format: "", Required: false, }, } unauthorizedResponseDescriptor = ResponseDescriptor{ Name: "Authentication Required", StatusCode: http.StatusUnauthorized, Description: "The client is not authenticated.", Headers: []ParameterDescriptor{ authChallengeHeader, { Name: "Content-Length", Type: "integer", Description: "Length of the JSON response body.", Format: "", }, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ errcode.ErrorCodeUnauthorized, }, } repositoryNotFoundResponseDescriptor = ResponseDescriptor{ Name: "No Such Repository Error", StatusCode: http.StatusNotFound, Description: "The repository is not known to the registry.", Headers: []ParameterDescriptor{ { Name: "Content-Length", Type: "integer", Description: "Length of the JSON response body.", Format: "", }, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameUnknown, }, } deniedResponseDescriptor = ResponseDescriptor{ Name: "Access Denied", StatusCode: http.StatusForbidden, Description: "The client does not have required access to the repository.", Headers: []ParameterDescriptor{ { Name: "Content-Length", Type: "integer", Description: "Length of the JSON response body.", Format: "", }, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ errcode.ErrorCodeDenied, }, } ) const ( manifestBody = `{ "name": , "tag": , "fsLayers": [ { "blobSum": "" }, ... ] ], "history": , "signature": }` errorsBody = `{ "errors:" [ { "code": , "message": "", "detail": ... }, ... ] }` unauthorizedErrorsBody = `{ "errors:" [ { "code": "UNAUTHORIZED", "message": "access to the requested resource is not authorized", "detail": ... }, ... ] }` ) // APIDescriptor exports descriptions of the layout of the v2 registry API. var APIDescriptor = struct { // RouteDescriptors provides a list of the routes available in the API. RouteDescriptors []RouteDescriptor }{ RouteDescriptors: routeDescriptors, } // RouteDescriptor describes a route specified by name. type RouteDescriptor struct { // Name is the name of the route, as specified in RouteNameXXX exports. // These names a should be considered a unique reference for a route. If // the route is registered with gorilla, this is the name that will be // used. Name string // Path is a gorilla/mux-compatible regexp that can be used to match the // route. For any incoming method and path, only one route descriptor // should match. Path string // Entity should be a short, human-readalbe description of the object // targeted by the endpoint. Entity string // Description should provide an accurate overview of the functionality // provided by the route. Description string // Methods should describe the various HTTP methods that may be used on // this route, including request and response formats. Methods []MethodDescriptor } // MethodDescriptor provides a description of the requests that may be // conducted with the target method. type MethodDescriptor struct { // Method is an HTTP method, such as GET, PUT or POST. Method string // Description should provide an overview of the functionality provided by // the covered method, suitable for use in documentation. Use of markdown // here is encouraged. Description string // Requests is a slice of request descriptors enumerating how this // endpoint may be used. Requests []RequestDescriptor } // RequestDescriptor covers a particular set of headers and parameters that // can be carried out with the parent method. Its most helpful to have one // RequestDescriptor per API use case. type RequestDescriptor struct { // Name provides a short identifier for the request, usable as a title or // to provide quick context for the particalar request. Name string // Description should cover the requests purpose, covering any details for // this particular use case. Description string // Headers describes headers that must be used with the HTTP request. Headers []ParameterDescriptor // PathParameters enumerate the parameterized path components for the // given request, as defined in the route's regular expression. PathParameters []ParameterDescriptor // QueryParameters provides a list of query parameters for the given // request. QueryParameters []ParameterDescriptor // Body describes the format of the request body. Body BodyDescriptor // Successes enumerates the possible responses that are considered to be // the result of a successful request. Successes []ResponseDescriptor // Failures covers the possible failures from this particular request. Failures []ResponseDescriptor } // ResponseDescriptor describes the components of an API response. type ResponseDescriptor struct { // Name provides a short identifier for the response, usable as a title or // to provide quick context for the particalar response. Name string // Description should provide a brief overview of the role of the // response. Description string // StatusCode specifies the status recieved by this particular response. StatusCode int // Headers covers any headers that may be returned from the response. Headers []ParameterDescriptor // Fields describes any fields that may be present in the response. Fields []ParameterDescriptor // ErrorCodes enumerates the error codes that may be returned along with // the response. ErrorCodes []errcode.ErrorCode // Body describes the body of the response, if any. Body BodyDescriptor } // BodyDescriptor describes a request body and its expected content type. For // the most part, it should be example json or some placeholder for body // data in documentation. type BodyDescriptor struct { ContentType string Format string } // ParameterDescriptor describes the format of a request parameter, which may // be a header, path parameter or query parameter. type ParameterDescriptor struct { // Name is the name of the parameter, either of the path component or // query parameter. Name string // Type specifies the type of the parameter, such as string, integer, etc. Type string // Description provides a human-readable description of the parameter. Description string // Required means the field is required when set. Required bool // Format is a specifying the string format accepted by this parameter. Format string // Regexp is a compiled regular expression that can be used to validate // the contents of the parameter. Regexp *regexp.Regexp // Examples provides multiple examples for the values that might be valid // for this parameter. Examples []string } var routeDescriptors = []RouteDescriptor{ { Name: RouteNameBase, Path: "/v2/", Entity: "Base", Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication.`, Methods: []MethodDescriptor{ { Method: "GET", Description: "Check that the endpoint implements Docker Registry API V2.", Requests: []RequestDescriptor{ { Headers: []ParameterDescriptor{ hostHeader, authHeader, }, Successes: []ResponseDescriptor{ { Description: "The API implements V2 protocol and is accessible.", StatusCode: http.StatusOK, }, }, Failures: []ResponseDescriptor{ { Description: "The registry does not implement the V2 API.", StatusCode: http.StatusNotFound, }, unauthorizedResponseDescriptor, }, }, }, }, }, }, { Name: RouteNameTags, Path: "/v2/{name:" + reference.NameRegexp.String() + "}/tags/list", Entity: "Tags", Description: "Retrieve information about tags.", Methods: []MethodDescriptor{ { Method: "GET", Description: "Fetch the tags under the repository identified by `name`.", Requests: []RequestDescriptor{ { Name: "Tags", Description: "Return all tags for the repository", Headers: []ParameterDescriptor{ hostHeader, authHeader, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, }, Successes: []ResponseDescriptor{ { StatusCode: http.StatusOK, Description: "A list of tags for the named repository.", Headers: []ParameterDescriptor{ { Name: "Content-Length", Type: "integer", Description: "Length of the JSON response body.", Format: "", }, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: `{ "name": , "tags": [ , ... ] }`, }, }, }, Failures: []ResponseDescriptor{ unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, }, }, { Name: "Tags Paginated", Description: "Return a portion of the tags for the specified repository.", PathParameters: []ParameterDescriptor{nameParameterDescriptor}, QueryParameters: paginationParameters, Successes: []ResponseDescriptor{ { StatusCode: http.StatusOK, Description: "A list of tags for the named repository.", Headers: []ParameterDescriptor{ { Name: "Content-Length", Type: "integer", Description: "Length of the JSON response body.", Format: "", }, linkHeader, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: `{ "name": , "tags": [ , ... ], }`, }, }, }, Failures: []ResponseDescriptor{ unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, }, }, }, }, }, }, { Name: RouteNameManifest, Path: "/v2/{name:" + reference.NameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}", Entity: "Manifest", Description: "Create, update, delete and retrieve manifests.", Methods: []MethodDescriptor{ { Method: "GET", Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", Requests: []RequestDescriptor{ { Headers: []ParameterDescriptor{ hostHeader, authHeader, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, referenceParameterDescriptor, }, Successes: []ResponseDescriptor{ { Description: "The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", StatusCode: http.StatusOK, Headers: []ParameterDescriptor{ digestHeader, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: manifestBody, }, }, }, Failures: []ResponseDescriptor{ { Description: "The name or reference was invalid.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeTagInvalid, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, }, }, }, }, { Method: "PUT", Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", Requests: []RequestDescriptor{ { Headers: []ParameterDescriptor{ hostHeader, authHeader, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, referenceParameterDescriptor, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: manifestBody, }, Successes: []ResponseDescriptor{ { Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.", StatusCode: http.StatusCreated, Headers: []ParameterDescriptor{ { Name: "Location", Type: "url", Description: "The canonical location url of the uploaded manifest.", Format: "", }, contentLengthZeroHeader, digestHeader, }, }, }, Failures: []ResponseDescriptor{ { Name: "Invalid Manifest", Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.", StatusCode: http.StatusBadRequest, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeTagInvalid, ErrorCodeManifestInvalid, ErrorCodeManifestUnverified, ErrorCodeBlobUnknown, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, { Name: "Missing Layer(s)", Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUnknown, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: `{ "errors:" [{ "code": "BLOB_UNKNOWN", "message": "blob unknown to registry", "detail": { "digest": "" } }, ... ] }`, }, }, { Name: "Not allowed", Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason", StatusCode: http.StatusMethodNotAllowed, ErrorCodes: []errcode.ErrorCode{ errcode.ErrorCodeUnsupported, }, }, }, }, }, }, { Method: "DELETE", Description: "Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`.", Requests: []RequestDescriptor{ { Headers: []ParameterDescriptor{ hostHeader, authHeader, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, referenceParameterDescriptor, }, Successes: []ResponseDescriptor{ { StatusCode: http.StatusAccepted, }, }, Failures: []ResponseDescriptor{ { Name: "Invalid Name or Reference", Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeTagInvalid, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, { Name: "Unknown Manifest", Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameUnknown, ErrorCodeManifestUnknown, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { Name: "Not allowed", Description: "Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled.", StatusCode: http.StatusMethodNotAllowed, ErrorCodes: []errcode.ErrorCode{ errcode.ErrorCodeUnsupported, }, }, }, }, }, }, }, }, { Name: RouteNameBlob, Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", Entity: "Blob", Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.", Methods: []MethodDescriptor{ { Method: "GET", Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", Requests: []RequestDescriptor{ { Name: "Fetch Blob", Headers: []ParameterDescriptor{ hostHeader, authHeader, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, digestPathParameter, }, Successes: []ResponseDescriptor{ { Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.", StatusCode: http.StatusOK, Headers: []ParameterDescriptor{ { Name: "Content-Length", Type: "integer", Description: "The length of the requested blob content.", Format: "", }, digestHeader, }, Body: BodyDescriptor{ ContentType: "application/octet-stream", Format: "", }, }, { Description: "The blob identified by `digest` is available at the provided location.", StatusCode: http.StatusTemporaryRedirect, Headers: []ParameterDescriptor{ { Name: "Location", Type: "url", Description: "The location where the layer should be accessible.", Format: "", }, digestHeader, }, }, }, Failures: []ResponseDescriptor{ { Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeDigestInvalid, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", StatusCode: http.StatusNotFound, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameUnknown, ErrorCodeBlobUnknown, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, }, }, { Name: "Fetch Blob Part", Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.", Headers: []ParameterDescriptor{ hostHeader, authHeader, { Name: "Range", Type: "string", Description: "HTTP Range header specifying blob chunk.", Format: "bytes=-", }, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, digestPathParameter, }, Successes: []ResponseDescriptor{ { Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.", StatusCode: http.StatusPartialContent, Headers: []ParameterDescriptor{ { Name: "Content-Length", Type: "integer", Description: "The length of the requested blob chunk.", Format: "", }, { Name: "Content-Range", Type: "byte range", Description: "Content range of blob chunk.", Format: "bytes -/", }, }, Body: BodyDescriptor{ ContentType: "application/octet-stream", Format: "", }, }, }, Failures: []ResponseDescriptor{ { Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeDigestInvalid, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameUnknown, ErrorCodeBlobUnknown, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.", StatusCode: http.StatusRequestedRangeNotSatisfiable, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, }, }, }, }, { Method: "DELETE", Description: "Delete the blob identified by `name` and `digest`", Requests: []RequestDescriptor{ { Headers: []ParameterDescriptor{ hostHeader, authHeader, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, digestPathParameter, }, Successes: []ResponseDescriptor{ { StatusCode: http.StatusAccepted, Headers: []ParameterDescriptor{ { Name: "Content-Length", Type: "integer", Description: "0", Format: "0", }, digestHeader, }, }, }, Failures: []ResponseDescriptor{ { Name: "Invalid Name or Digest", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, }, }, { Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", StatusCode: http.StatusNotFound, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameUnknown, ErrorCodeBlobUnknown, }, }, { Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled", StatusCode: http.StatusMethodNotAllowed, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ errcode.ErrorCodeUnsupported, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, }, }, }, }, // TODO(stevvooe): We may want to add a PUT request here to // kickoff an upload of a blob, integrated with the blob upload // API. }, }, { Name: RouteNameBlobUpload, Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/", Entity: "Initiate Blob Upload", Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", Methods: []MethodDescriptor{ { Method: "POST", Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.", Requests: []RequestDescriptor{ { Name: "Initiate Monolithic Blob Upload", Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.", Headers: []ParameterDescriptor{ hostHeader, authHeader, { Name: "Content-Length", Type: "integer", Format: "", }, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, }, QueryParameters: []ParameterDescriptor{ { Name: "digest", Type: "query", Format: "", Regexp: digest.DigestRegexp, Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`, }, }, Body: BodyDescriptor{ ContentType: "application/octect-stream", Format: "", }, Successes: []ResponseDescriptor{ { Description: "The blob has been created in the registry and is available at the provided location.", StatusCode: http.StatusCreated, Headers: []ParameterDescriptor{ { Name: "Location", Type: "url", Format: "", }, contentLengthZeroHeader, dockerUploadUUIDHeader, }, }, }, Failures: []ResponseDescriptor{ { Name: "Invalid Name or Digest", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, }, }, { Name: "Not allowed", Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason", StatusCode: http.StatusMethodNotAllowed, ErrorCodes: []errcode.ErrorCode{ errcode.ErrorCodeUnsupported, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, }, }, { Name: "Initiate Resumable Blob Upload", Description: "Initiate a resumable blob upload with an empty request body.", Headers: []ParameterDescriptor{ hostHeader, authHeader, contentLengthZeroHeader, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, }, Successes: []ResponseDescriptor{ { Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.", StatusCode: http.StatusAccepted, Headers: []ParameterDescriptor{ contentLengthZeroHeader, { Name: "Location", Type: "url", Format: "/v2//blobs/uploads/", Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.", }, { Name: "Range", Format: "0-0", Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.", }, dockerUploadUUIDHeader, }, }, }, Failures: []ResponseDescriptor{ { Name: "Invalid Name or Digest", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, }, }, { Name: "Mount Blob", Description: "Mount a blob identified by the `mount` parameter from another repository.", Headers: []ParameterDescriptor{ hostHeader, authHeader, contentLengthZeroHeader, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, }, QueryParameters: []ParameterDescriptor{ { Name: "mount", Type: "query", Format: "", Regexp: digest.DigestRegexp, Description: `Digest of blob to mount from the source repository.`, }, { Name: "from", Type: "query", Format: "", Regexp: reference.NameRegexp, Description: `Name of the source repository.`, }, }, Successes: []ResponseDescriptor{ { Description: "The blob has been mounted in the repository and is available at the provided location.", StatusCode: http.StatusCreated, Headers: []ParameterDescriptor{ { Name: "Location", Type: "url", Format: "", }, contentLengthZeroHeader, dockerUploadUUIDHeader, }, }, }, Failures: []ResponseDescriptor{ { Name: "Invalid Name or Digest", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, }, }, { Name: "Not allowed", Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason", StatusCode: http.StatusMethodNotAllowed, ErrorCodes: []errcode.ErrorCode{ errcode.ErrorCodeUnsupported, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, }, }, }, }, }, }, { Name: RouteNameBlobUploadChunk, Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", Entity: "Blob Upload", Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", Methods: []MethodDescriptor{ { Method: "GET", Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.", Requests: []RequestDescriptor{ { Description: "Retrieve the progress of the current upload, as reported by the `Range` header.", Headers: []ParameterDescriptor{ hostHeader, authHeader, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, uuidParameterDescriptor, }, Successes: []ResponseDescriptor{ { Name: "Upload Progress", Description: "The upload is known and in progress. The last received offset is available in the `Range` header.", StatusCode: http.StatusNoContent, Headers: []ParameterDescriptor{ { Name: "Range", Type: "header", Format: "0-", Description: "Range indicating the current progress of the upload.", }, contentLengthZeroHeader, dockerUploadUUIDHeader, }, }, }, Failures: []ResponseDescriptor{ { Description: "There was an error processing the upload and it must be restarted.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, ErrorCodeBlobUploadInvalid, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, }, }, }, }, { Method: "PATCH", Description: "Upload a chunk of data for the specified upload.", Requests: []RequestDescriptor{ { Name: "Stream upload", Description: "Upload a stream of data to upload without completing the upload.", PathParameters: []ParameterDescriptor{ nameParameterDescriptor, uuidParameterDescriptor, }, Headers: []ParameterDescriptor{ hostHeader, authHeader, }, Body: BodyDescriptor{ ContentType: "application/octet-stream", Format: "", }, Successes: []ResponseDescriptor{ { Name: "Data Accepted", Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", StatusCode: http.StatusNoContent, Headers: []ParameterDescriptor{ { Name: "Location", Type: "url", Format: "/v2//blobs/uploads/", Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", }, { Name: "Range", Type: "header", Format: "0-", Description: "Range indicating the current progress of the upload.", }, contentLengthZeroHeader, dockerUploadUUIDHeader, }, }, }, Failures: []ResponseDescriptor{ { Description: "There was an error processing the upload and it must be restarted.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, ErrorCodeBlobUploadInvalid, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, }, }, { Name: "Chunked upload", Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.", PathParameters: []ParameterDescriptor{ nameParameterDescriptor, uuidParameterDescriptor, }, Headers: []ParameterDescriptor{ hostHeader, authHeader, { Name: "Content-Range", Type: "header", Format: "-", Required: true, Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.", }, { Name: "Content-Length", Type: "integer", Format: "", Description: "Length of the chunk being uploaded, corresponding the length of the request body.", }, }, Body: BodyDescriptor{ ContentType: "application/octet-stream", Format: "", }, Successes: []ResponseDescriptor{ { Name: "Chunk Accepted", Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", StatusCode: http.StatusNoContent, Headers: []ParameterDescriptor{ { Name: "Location", Type: "url", Format: "/v2//blobs/uploads/", Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", }, { Name: "Range", Type: "header", Format: "0-", Description: "Range indicating the current progress of the upload.", }, contentLengthZeroHeader, dockerUploadUUIDHeader, }, }, }, Failures: []ResponseDescriptor{ { Description: "There was an error processing the upload and it must be restarted.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, ErrorCodeBlobUploadInvalid, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.", StatusCode: http.StatusRequestedRangeNotSatisfiable, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, }, }, }, }, { Method: "PUT", Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.", Requests: []RequestDescriptor{ { Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.", Headers: []ParameterDescriptor{ hostHeader, authHeader, { Name: "Content-Length", Type: "integer", Format: "", Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.", }, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, uuidParameterDescriptor, }, QueryParameters: []ParameterDescriptor{ { Name: "digest", Type: "string", Format: "", Regexp: digest.DigestRegexp, Required: true, Description: `Digest of uploaded blob.`, }, }, Body: BodyDescriptor{ ContentType: "application/octet-stream", Format: "", }, Successes: []ResponseDescriptor{ { Name: "Upload Complete", Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.", StatusCode: http.StatusNoContent, Headers: []ParameterDescriptor{ { Name: "Location", Type: "url", Format: "", Description: "The canonical location of the blob for retrieval", }, { Name: "Content-Range", Type: "header", Format: "-", Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.", }, contentLengthZeroHeader, digestHeader, }, }, }, Failures: []ResponseDescriptor{ { Description: "There was an error processing the upload and it must be restarted.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, ErrorCodeBlobUploadInvalid, errcode.ErrorCodeUnsupported, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, }, }, }, }, { Method: "DELETE", Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.", Requests: []RequestDescriptor{ { Description: "Cancel the upload specified by `uuid`.", PathParameters: []ParameterDescriptor{ nameParameterDescriptor, uuidParameterDescriptor, }, Headers: []ParameterDescriptor{ hostHeader, authHeader, contentLengthZeroHeader, }, Successes: []ResponseDescriptor{ { Name: "Upload Deleted", Description: "The upload has been successfully deleted.", StatusCode: http.StatusNoContent, Headers: []ParameterDescriptor{ contentLengthZeroHeader, }, }, }, Failures: []ResponseDescriptor{ { Description: "An error was encountered processing the delete. The client may ignore this error.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeBlobUploadInvalid, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, }, }, }, }, }, }, { Name: RouteNameCatalog, Path: "/v2/_catalog", Entity: "Catalog", Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.", Methods: []MethodDescriptor{ { Method: "GET", Description: "Retrieve a sorted, json list of repositories available in the registry.", Requests: []RequestDescriptor{ { Name: "Catalog Fetch Complete", Description: "Request an unabridged list of repositories available.", Successes: []ResponseDescriptor{ { Description: "Returns the unabridged list of repositories as a json response.", StatusCode: http.StatusOK, Headers: []ParameterDescriptor{ { Name: "Content-Length", Type: "integer", Description: "Length of the JSON response body.", Format: "", }, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: `{ "repositories": [ , ... ] }`, }, }, }, }, { Name: "Catalog Fetch Paginated", Description: "Return the specified portion of repositories.", QueryParameters: paginationParameters, Successes: []ResponseDescriptor{ { StatusCode: http.StatusOK, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: `{ "repositories": [ , ... ] "next": "?last=&n=" }`, }, Headers: []ParameterDescriptor{ { Name: "Content-Length", Type: "integer", Description: "Length of the JSON response body.", Format: "", }, linkHeader, }, }, }, }, }, }, }, }, } var routeDescriptorsMap map[string]RouteDescriptor func init() { routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) for _, descriptor := range routeDescriptors { routeDescriptorsMap[descriptor.Name] = descriptor } } docker-1.10.3/vendor/src/github.com/docker/distribution/registry/api/v2/doc.go000066400000000000000000000007351267010174400272270ustar00rootroot00000000000000// Package v2 describes routes, urls and the error codes used in the Docker // Registry JSON HTTP API V2. In addition to declarations, descriptors are // provided for routes and error codes that can be used for implementation and // automatically generating documentation. // // Definitions here are considered to be locked down for the V2 registry api. // Any changes must be considered carefully and should not proceed without a // change proposal in docker core. package v2 docker-1.10.3/vendor/src/github.com/docker/distribution/registry/api/v2/errors.go000066400000000000000000000127561267010174400300040ustar00rootroot00000000000000package v2 import ( "net/http" "github.com/docker/distribution/registry/api/errcode" ) const errGroup = "registry.api.v2" var ( // ErrorCodeDigestInvalid is returned when uploading a blob if the // provided digest does not match the blob contents. ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "DIGEST_INVALID", Message: "provided digest did not match uploaded content", Description: `When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest.`, HTTPStatusCode: http.StatusBadRequest, }) // ErrorCodeSizeInvalid is returned when uploading a blob if the provided ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "SIZE_INVALID", Message: "provided length did not match content length", Description: `When a layer is uploaded, the provided size will be checked against the uploaded content. If they do not match, this error will be returned.`, HTTPStatusCode: http.StatusBadRequest, }) // ErrorCodeNameInvalid is returned when the name in the manifest does not // match the provided name. ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NAME_INVALID", Message: "invalid repository name", Description: `Invalid repository name encountered either during manifest validation or any API operation.`, HTTPStatusCode: http.StatusBadRequest, }) // ErrorCodeTagInvalid is returned when the tag in the manifest does not // match the provided tag. ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "TAG_INVALID", Message: "manifest tag did not match URI", Description: `During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned.`, HTTPStatusCode: http.StatusBadRequest, }) // ErrorCodeNameUnknown when the repository name is not known. ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NAME_UNKNOWN", Message: "repository name not known to registry", Description: `This is returned if the name used during an operation is unknown to the registry.`, HTTPStatusCode: http.StatusNotFound, }) // ErrorCodeManifestUnknown returned when image manifest is unknown. ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "MANIFEST_UNKNOWN", Message: "manifest unknown", Description: `This error is returned when the manifest, identified by name and tag is unknown to the repository.`, HTTPStatusCode: http.StatusNotFound, }) // ErrorCodeManifestInvalid returned when an image manifest is invalid, // typically during a PUT operation. This error encompasses all errors // encountered during manifest validation that aren't signature errors. ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "MANIFEST_INVALID", Message: "manifest invalid", Description: `During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation.`, HTTPStatusCode: http.StatusBadRequest, }) // ErrorCodeManifestUnverified is returned when the manifest fails // signature verfication. ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "MANIFEST_UNVERIFIED", Message: "manifest failed signature verification", Description: `During manifest upload, if the manifest fails signature verification, this error will be returned.`, HTTPStatusCode: http.StatusBadRequest, }) // ErrorCodeManifestBlobUnknown is returned when a manifest blob is // unknown to the registry. ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "MANIFEST_BLOB_UNKNOWN", Message: "blob unknown to registry", Description: `This error may be returned when a manifest blob is unknown to the registry.`, HTTPStatusCode: http.StatusBadRequest, }) // ErrorCodeBlobUnknown is returned when a blob is unknown to the // registry. This can happen when the manifest references a nonexistent // layer or the result is not found by a blob fetch. ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "BLOB_UNKNOWN", Message: "blob unknown to registry", Description: `This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload.`, HTTPStatusCode: http.StatusNotFound, }) // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "BLOB_UPLOAD_UNKNOWN", Message: "blob upload unknown to registry", Description: `If a blob upload has been cancelled or was never started, this error code may be returned.`, HTTPStatusCode: http.StatusNotFound, }) // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "BLOB_UPLOAD_INVALID", Message: "blob upload invalid", Description: `The blob upload encountered an error and can no longer proceed.`, HTTPStatusCode: http.StatusNotFound, }) ) docker-1.10.3/vendor/src/github.com/docker/distribution/registry/api/v2/routes.go000066400000000000000000000023621267010174400300010ustar00rootroot00000000000000package v2 import "github.com/gorilla/mux" // The following are definitions of the name under which all V2 routes are // registered. These symbols can be used to look up a route based on the name. const ( RouteNameBase = "base" RouteNameManifest = "manifest" RouteNameTags = "tags" RouteNameBlob = "blob" RouteNameBlobUpload = "blob-upload" RouteNameBlobUploadChunk = "blob-upload-chunk" RouteNameCatalog = "catalog" ) var allEndpoints = []string{ RouteNameManifest, RouteNameCatalog, RouteNameTags, RouteNameBlob, RouteNameBlobUpload, RouteNameBlobUploadChunk, } // Router builds a gorilla router with named routes for the various API // methods. This can be used directly by both server implementations and // clients. func Router() *mux.Router { return RouterWithPrefix("") } // RouterWithPrefix builds a gorilla router with a configured prefix // on all routes. func RouterWithPrefix(prefix string) *mux.Router { rootRouter := mux.NewRouter() router := rootRouter if prefix != "" { router = router.PathPrefix(prefix).Subrouter() } router.StrictSlash(true) for _, descriptor := range routeDescriptors { router.Path(descriptor.Path).Name(descriptor.Name) } return rootRouter } docker-1.10.3/vendor/src/github.com/docker/distribution/registry/api/v2/urls.go000066400000000000000000000141531267010174400274460ustar00rootroot00000000000000package v2 import ( "net/http" "net/url" "strings" "github.com/docker/distribution/digest" "github.com/gorilla/mux" ) // URLBuilder creates registry API urls from a single base endpoint. It can be // used to create urls for use in a registry client or server. // // All urls will be created from the given base, including the api version. // For example, if a root of "/foo/" is provided, urls generated will be fall // under "/foo/v2/...". Most application will only provide a schema, host and // port, such as "https://localhost:5000/". type URLBuilder struct { root *url.URL // url root (ie http://localhost/) router *mux.Router } // NewURLBuilder creates a URLBuilder with provided root url object. func NewURLBuilder(root *url.URL) *URLBuilder { return &URLBuilder{ root: root, router: Router(), } } // NewURLBuilderFromString workes identically to NewURLBuilder except it takes // a string argument for the root, returning an error if it is not a valid // url. func NewURLBuilderFromString(root string) (*URLBuilder, error) { u, err := url.Parse(root) if err != nil { return nil, err } return NewURLBuilder(u), nil } // NewURLBuilderFromRequest uses information from an *http.Request to // construct the root url. func NewURLBuilderFromRequest(r *http.Request) *URLBuilder { var scheme string forwardedProto := r.Header.Get("X-Forwarded-Proto") switch { case len(forwardedProto) > 0: scheme = forwardedProto case r.TLS != nil: scheme = "https" case len(r.URL.Scheme) > 0: scheme = r.URL.Scheme default: scheme = "http" } host := r.Host forwardedHost := r.Header.Get("X-Forwarded-Host") if len(forwardedHost) > 0 { // According to the Apache mod_proxy docs, X-Forwarded-Host can be a // comma-separated list of hosts, to which each proxy appends the // requested host. We want to grab the first from this comma-separated // list. hosts := strings.SplitN(forwardedHost, ",", 2) host = strings.TrimSpace(hosts[0]) } basePath := routeDescriptorsMap[RouteNameBase].Path requestPath := r.URL.Path index := strings.Index(requestPath, basePath) u := &url.URL{ Scheme: scheme, Host: host, } if index > 0 { // N.B. index+1 is important because we want to include the trailing / u.Path = requestPath[0 : index+1] } return NewURLBuilder(u) } // BuildBaseURL constructs a base url for the API, typically just "/v2/". func (ub *URLBuilder) BuildBaseURL() (string, error) { route := ub.cloneRoute(RouteNameBase) baseURL, err := route.URL() if err != nil { return "", err } return baseURL.String(), nil } // BuildCatalogURL constructs a url get a catalog of repositories func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) { route := ub.cloneRoute(RouteNameCatalog) catalogURL, err := route.URL() if err != nil { return "", err } return appendValuesURL(catalogURL, values...).String(), nil } // BuildTagsURL constructs a url to list the tags in the named repository. func (ub *URLBuilder) BuildTagsURL(name string) (string, error) { route := ub.cloneRoute(RouteNameTags) tagsURL, err := route.URL("name", name) if err != nil { return "", err } return tagsURL.String(), nil } // BuildManifestURL constructs a url for the manifest identified by name and // reference. The argument reference may be either a tag or digest. func (ub *URLBuilder) BuildManifestURL(name, reference string) (string, error) { route := ub.cloneRoute(RouteNameManifest) manifestURL, err := route.URL("name", name, "reference", reference) if err != nil { return "", err } return manifestURL.String(), nil } // BuildBlobURL constructs the url for the blob identified by name and dgst. func (ub *URLBuilder) BuildBlobURL(name string, dgst digest.Digest) (string, error) { route := ub.cloneRoute(RouteNameBlob) layerURL, err := route.URL("name", name, "digest", dgst.String()) if err != nil { return "", err } return layerURL.String(), nil } // BuildBlobUploadURL constructs a url to begin a blob upload in the // repository identified by name. func (ub *URLBuilder) BuildBlobUploadURL(name string, values ...url.Values) (string, error) { route := ub.cloneRoute(RouteNameBlobUpload) uploadURL, err := route.URL("name", name) if err != nil { return "", err } return appendValuesURL(uploadURL, values...).String(), nil } // BuildBlobUploadChunkURL constructs a url for the upload identified by uuid, // including any url values. This should generally not be used by clients, as // this url is provided by server implementations during the blob upload // process. func (ub *URLBuilder) BuildBlobUploadChunkURL(name, uuid string, values ...url.Values) (string, error) { route := ub.cloneRoute(RouteNameBlobUploadChunk) uploadURL, err := route.URL("name", name, "uuid", uuid) if err != nil { return "", err } return appendValuesURL(uploadURL, values...).String(), nil } // clondedRoute returns a clone of the named route from the router. Routes // must be cloned to avoid modifying them during url generation. func (ub *URLBuilder) cloneRoute(name string) clonedRoute { route := new(mux.Route) root := new(url.URL) *route = *ub.router.GetRoute(name) // clone the route *root = *ub.root return clonedRoute{Route: route, root: root} } type clonedRoute struct { *mux.Route root *url.URL } func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { routeURL, err := cr.Route.URL(pairs...) if err != nil { return nil, err } if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { routeURL.Path = routeURL.Path[1:] } url := cr.root.ResolveReference(routeURL) url.Scheme = cr.root.Scheme return url, nil } // appendValuesURL appends the parameters to the url. func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { merged := u.Query() for _, v := range values { for k, vv := range v { merged[k] = append(merged[k], vv...) } } u.RawQuery = merged.Encode() return u } // appendValues appends the parameters to the url. Panics if the string is not // a url. func appendValues(u string, values ...url.Values) string { up, err := url.Parse(u) if err != nil { panic(err) // should never happen } return appendValuesURL(up, values...).String() } docker-1.10.3/vendor/src/github.com/docker/distribution/registry/client/000077500000000000000000000000001267010174400263045ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/distribution/registry/client/auth/000077500000000000000000000000001267010174400272455ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/distribution/registry/client/auth/api_version.go000066400000000000000000000031701267010174400321130ustar00rootroot00000000000000package auth import ( "net/http" "strings" ) // APIVersion represents a version of an API including its // type and version number. type APIVersion struct { // Type refers to the name of a specific API specification // such as "registry" Type string // Version is the version of the API specification implemented, // This may omit the revision number and only include // the major and minor version, such as "2.0" Version string } // String returns the string formatted API Version func (v APIVersion) String() string { return v.Type + "/" + v.Version } // APIVersions gets the API versions out of an HTTP response using the provided // version header as the key for the HTTP header. func APIVersions(resp *http.Response, versionHeader string) []APIVersion { versions := []APIVersion{} if versionHeader != "" { for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] { for _, version := range strings.Fields(supportedVersions) { versions = append(versions, ParseAPIVersion(version)) } } } return versions } // ParseAPIVersion parses an API version string into an APIVersion // Format (Expected, not enforced): // API version string = '/' // API type = [a-z][a-z0-9]* // API version = [0-9]+(\.[0-9]+)? // TODO(dmcgowan): Enforce format, add error condition, remove unknown type func ParseAPIVersion(versionStr string) APIVersion { idx := strings.IndexRune(versionStr, '/') if idx == -1 { return APIVersion{ Type: "unknown", Version: versionStr, } } return APIVersion{ Type: strings.ToLower(versionStr[:idx]), Version: versionStr[idx+1:], } } docker-1.10.3/vendor/src/github.com/docker/distribution/registry/client/auth/authchallenge.go000066400000000000000000000126521267010174400324060ustar00rootroot00000000000000package auth import ( "fmt" "net/http" "net/url" "strings" ) // Challenge carries information from a WWW-Authenticate response header. // See RFC 2617. type Challenge struct { // Scheme is the auth-scheme according to RFC 2617 Scheme string // Parameters are the auth-params according to RFC 2617 Parameters map[string]string } // ChallengeManager manages the challenges for endpoints. // The challenges are pulled out of HTTP responses. Only // responses which expect challenges should be added to // the manager, since a non-unauthorized request will be // viewed as not requiring challenges. type ChallengeManager interface { // GetChallenges returns the challenges for the given // endpoint URL. GetChallenges(endpoint string) ([]Challenge, error) // AddResponse adds the response to the challenge // manager. The challenges will be parsed out of // the WWW-Authenicate headers and added to the // URL which was produced the response. If the // response was authorized, any challenges for the // endpoint will be cleared. AddResponse(resp *http.Response) error } // NewSimpleChallengeManager returns an instance of // ChallengeManger which only maps endpoints to challenges // based on the responses which have been added the // manager. The simple manager will make no attempt to // perform requests on the endpoints or cache the responses // to a backend. func NewSimpleChallengeManager() ChallengeManager { return simpleChallengeManager{} } type simpleChallengeManager map[string][]Challenge func (m simpleChallengeManager) GetChallenges(endpoint string) ([]Challenge, error) { challenges := m[endpoint] return challenges, nil } func (m simpleChallengeManager) AddResponse(resp *http.Response) error { challenges := ResponseChallenges(resp) if resp.Request == nil { return fmt.Errorf("missing request reference") } urlCopy := url.URL{ Path: resp.Request.URL.Path, Host: resp.Request.URL.Host, Scheme: resp.Request.URL.Scheme, } m[urlCopy.String()] = challenges return nil } // Octet types from RFC 2616. type octetType byte var octetTypes [256]octetType const ( isToken octetType = 1 << iota isSpace ) func init() { // OCTET = // CHAR = // CTL = // CR = // LF = // SP = // HT = // <"> = // CRLF = CR LF // LWS = [CRLF] 1*( SP | HT ) // TEXT = // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT // token = 1* // qdtext = > for c := 0; c < 256; c++ { var t octetType isCtl := c <= 31 || c == 127 isChar := 0 <= c && c <= 127 isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { t |= isSpace } if isChar && !isCtl && !isSeparator { t |= isToken } octetTypes[c] = t } } // ResponseChallenges returns a list of authorization challenges // for the given http Response. Challenges are only checked if // the response status code was a 401. func ResponseChallenges(resp *http.Response) []Challenge { if resp.StatusCode == http.StatusUnauthorized { // Parse the WWW-Authenticate Header and store the challenges // on this endpoint object. return parseAuthHeader(resp.Header) } return nil } func parseAuthHeader(header http.Header) []Challenge { challenges := []Challenge{} for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { v, p := parseValueAndParams(h) if v != "" { challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) } } return challenges } func parseValueAndParams(header string) (value string, params map[string]string) { params = make(map[string]string) value, s := expectToken(header) if value == "" { return } value = strings.ToLower(value) s = "," + skipSpace(s) for strings.HasPrefix(s, ",") { var pkey string pkey, s = expectToken(skipSpace(s[1:])) if pkey == "" { return } if !strings.HasPrefix(s, "=") { return } var pvalue string pvalue, s = expectTokenOrQuoted(s[1:]) if pvalue == "" { return } pkey = strings.ToLower(pkey) params[pkey] = pvalue s = skipSpace(s) } return } func skipSpace(s string) (rest string) { i := 0 for ; i < len(s); i++ { if octetTypes[s[i]]&isSpace == 0 { break } } return s[i:] } func expectToken(s string) (token, rest string) { i := 0 for ; i < len(s); i++ { if octetTypes[s[i]]&isToken == 0 { break } } return s[:i], s[i:] } func expectTokenOrQuoted(s string) (value string, rest string) { if !strings.HasPrefix(s, "\"") { return expectToken(s) } s = s[1:] for i := 0; i < len(s); i++ { switch s[i] { case '"': return s[:i], s[i+1:] case '\\': p := make([]byte, len(s)-1) j := copy(p, s[:i]) escape := true for i = i + 1; i < len(s); i++ { b := s[i] switch { case escape: escape = false p[j] = b j++ case b == '\\': escape = true case b == '"': return string(p[:j]), s[i+1:] default: p[j] = b j++ } } return "", "" } } return "", "" } docker-1.10.3/vendor/src/github.com/docker/distribution/registry/client/auth/session.go000066400000000000000000000210511267010174400312560ustar00rootroot00000000000000package auth import ( "encoding/json" "errors" "fmt" "net/http" "net/url" "strings" "sync" "time" "github.com/Sirupsen/logrus" "github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/client/transport" ) // AuthenticationHandler is an interface for authorizing a request from // params from a "WWW-Authenicate" header for a single scheme. type AuthenticationHandler interface { // Scheme returns the scheme as expected from the "WWW-Authenicate" header. Scheme() string // AuthorizeRequest adds the authorization header to a request (if needed) // using the parameters from "WWW-Authenticate" method. The parameters // values depend on the scheme. AuthorizeRequest(req *http.Request, params map[string]string) error } // CredentialStore is an interface for getting credentials for // a given URL type CredentialStore interface { // Basic returns basic auth for the given URL Basic(*url.URL) (string, string) } // NewAuthorizer creates an authorizer which can handle multiple authentication // schemes. The handlers are tried in order, the higher priority authentication // methods should be first. The challengeMap holds a list of challenges for // a given root API endpoint (for example "https://registry-1.docker.io/v2/"). func NewAuthorizer(manager ChallengeManager, handlers ...AuthenticationHandler) transport.RequestModifier { return &endpointAuthorizer{ challenges: manager, handlers: handlers, } } type endpointAuthorizer struct { challenges ChallengeManager handlers []AuthenticationHandler transport http.RoundTripper } func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { v2Root := strings.Index(req.URL.Path, "/v2/") if v2Root == -1 { return nil } ping := url.URL{ Host: req.URL.Host, Scheme: req.URL.Scheme, Path: req.URL.Path[:v2Root+4], } pingEndpoint := ping.String() challenges, err := ea.challenges.GetChallenges(pingEndpoint) if err != nil { return err } if len(challenges) > 0 { for _, handler := range ea.handlers { for _, challenge := range challenges { if challenge.Scheme != handler.Scheme() { continue } if err := handler.AuthorizeRequest(req, challenge.Parameters); err != nil { return err } } } } return nil } // This is the minimum duration a token can last (in seconds). // A token must not live less than 60 seconds because older versions // of the Docker client didn't read their expiration from the token // response and assumed 60 seconds. So to remain compatible with // those implementations, a token must live at least this long. const minimumTokenLifetimeSeconds = 60 // Private interface for time used by this package to enable tests to provide their own implementation. type clock interface { Now() time.Time } type tokenHandler struct { header http.Header creds CredentialStore scope tokenScope transport http.RoundTripper clock clock tokenLock sync.Mutex tokenCache string tokenExpiration time.Time additionalScopes map[string]struct{} } // tokenScope represents the scope at which a token will be requested. // This represents a specific action on a registry resource. type tokenScope struct { Resource string Scope string Actions []string } func (ts tokenScope) String() string { return fmt.Sprintf("%s:%s:%s", ts.Resource, ts.Scope, strings.Join(ts.Actions, ",")) } // An implementation of clock for providing real time data. type realClock struct{} // Now implements clock func (realClock) Now() time.Time { return time.Now() } // NewTokenHandler creates a new AuthenicationHandler which supports // fetching tokens from a remote token server. func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler { return newTokenHandler(transport, creds, realClock{}, scope, actions...) } // newTokenHandler exposes the option to provide a clock to manipulate time in unit testing. func newTokenHandler(transport http.RoundTripper, creds CredentialStore, c clock, scope string, actions ...string) AuthenticationHandler { return &tokenHandler{ transport: transport, creds: creds, clock: c, scope: tokenScope{ Resource: "repository", Scope: scope, Actions: actions, }, additionalScopes: map[string]struct{}{}, } } func (th *tokenHandler) client() *http.Client { return &http.Client{ Transport: th.transport, Timeout: 15 * time.Second, } } func (th *tokenHandler) Scheme() string { return "bearer" } func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { var additionalScopes []string if fromParam := req.URL.Query().Get("from"); fromParam != "" { additionalScopes = append(additionalScopes, tokenScope{ Resource: "repository", Scope: fromParam, Actions: []string{"pull"}, }.String()) } if err := th.refreshToken(params, additionalScopes...); err != nil { return err } req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.tokenCache)) return nil } func (th *tokenHandler) refreshToken(params map[string]string, additionalScopes ...string) error { th.tokenLock.Lock() defer th.tokenLock.Unlock() var addedScopes bool for _, scope := range additionalScopes { if _, ok := th.additionalScopes[scope]; !ok { th.additionalScopes[scope] = struct{}{} addedScopes = true } } now := th.clock.Now() if now.After(th.tokenExpiration) || addedScopes { tr, err := th.fetchToken(params) if err != nil { return err } th.tokenCache = tr.Token th.tokenExpiration = tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second) } return nil } type tokenResponse struct { Token string `json:"token"` AccessToken string `json:"access_token"` ExpiresIn int `json:"expires_in"` IssuedAt time.Time `json:"issued_at"` } func (th *tokenHandler) fetchToken(params map[string]string) (token *tokenResponse, err error) { //log.Debugf("Getting bearer token with %s for %s", challenge.Parameters, ta.auth.Username) realm, ok := params["realm"] if !ok { return nil, errors.New("no realm specified for token auth challenge") } // TODO(dmcgowan): Handle empty scheme realmURL, err := url.Parse(realm) if err != nil { return nil, fmt.Errorf("invalid token auth challenge realm: %s", err) } req, err := http.NewRequest("GET", realmURL.String(), nil) if err != nil { return nil, err } reqParams := req.URL.Query() service := params["service"] scope := th.scope.String() if service != "" { reqParams.Add("service", service) } for _, scopeField := range strings.Fields(scope) { reqParams.Add("scope", scopeField) } for scope := range th.additionalScopes { reqParams.Add("scope", scope) } if th.creds != nil { username, password := th.creds.Basic(realmURL) if username != "" && password != "" { reqParams.Add("account", username) req.SetBasicAuth(username, password) } } req.URL.RawQuery = reqParams.Encode() resp, err := th.client().Do(req) if err != nil { return nil, err } defer resp.Body.Close() if !client.SuccessStatus(resp.StatusCode) { err := client.HandleErrorResponse(resp) return nil, err } decoder := json.NewDecoder(resp.Body) tr := new(tokenResponse) if err = decoder.Decode(tr); err != nil { return nil, fmt.Errorf("unable to decode token response: %s", err) } // `access_token` is equivalent to `token` and if both are specified // the choice is undefined. Canonicalize `access_token` by sticking // things in `token`. if tr.AccessToken != "" { tr.Token = tr.AccessToken } if tr.Token == "" { return nil, errors.New("authorization server did not include a token in the response") } if tr.ExpiresIn < minimumTokenLifetimeSeconds { logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) // The default/minimum lifetime. tr.ExpiresIn = minimumTokenLifetimeSeconds } if tr.IssuedAt.IsZero() { // issued_at is optional in the token response. tr.IssuedAt = th.clock.Now() } return tr, nil } type basicHandler struct { creds CredentialStore } // NewBasicHandler creaters a new authentiation handler which adds // basic authentication credentials to a request. func NewBasicHandler(creds CredentialStore) AuthenticationHandler { return &basicHandler{ creds: creds, } } func (*basicHandler) Scheme() string { return "basic" } func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { if bh.creds != nil { username, password := bh.creds.Basic(req.URL) if username != "" && password != "" { req.SetBasicAuth(username, password) return nil } } return errors.New("no basic auth credentials") } docker-1.10.3/vendor/src/github.com/docker/distribution/registry/client/blob_writer.go000066400000000000000000000077121267010174400311540ustar00rootroot00000000000000package client import ( "bytes" "fmt" "io" "io/ioutil" "net/http" "os" "time" "github.com/docker/distribution" "github.com/docker/distribution/context" ) type httpBlobUpload struct { statter distribution.BlobStatter client *http.Client uuid string startedAt time.Time location string // always the last value of the location header. offset int64 closed bool } func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) { panic("Not implemented") } func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { if resp.StatusCode == http.StatusNotFound { return distribution.ErrBlobUploadUnknown } return HandleErrorResponse(resp) } func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r)) if err != nil { return 0, err } defer req.Body.Close() resp, err := hbu.client.Do(req) if err != nil { return 0, err } if !SuccessStatus(resp.StatusCode) { return 0, hbu.handleErrorResponse(resp) } hbu.uuid = resp.Header.Get("Docker-Upload-UUID") hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) if err != nil { return 0, err } rng := resp.Header.Get("Range") var start, end int64 if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { return 0, err } else if n != 2 || end < start { return 0, fmt.Errorf("bad range format: %s", rng) } return (end - start + 1), nil } func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p)) if err != nil { return 0, err } req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1))) req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) req.Header.Set("Content-Type", "application/octet-stream") resp, err := hbu.client.Do(req) if err != nil { return 0, err } if !SuccessStatus(resp.StatusCode) { return 0, hbu.handleErrorResponse(resp) } hbu.uuid = resp.Header.Get("Docker-Upload-UUID") hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) if err != nil { return 0, err } rng := resp.Header.Get("Range") var start, end int if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { return 0, err } else if n != 2 || end < start { return 0, fmt.Errorf("bad range format: %s", rng) } return (end - start + 1), nil } func (hbu *httpBlobUpload) Seek(offset int64, whence int) (int64, error) { newOffset := hbu.offset switch whence { case os.SEEK_CUR: newOffset += int64(offset) case os.SEEK_END: newOffset += int64(offset) case os.SEEK_SET: newOffset = int64(offset) } hbu.offset = newOffset return hbu.offset, nil } func (hbu *httpBlobUpload) ID() string { return hbu.uuid } func (hbu *httpBlobUpload) StartedAt() time.Time { return hbu.startedAt } func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { // TODO(dmcgowan): Check if already finished, if so just fetch req, err := http.NewRequest("PUT", hbu.location, nil) if err != nil { return distribution.Descriptor{}, err } values := req.URL.Query() values.Set("digest", desc.Digest.String()) req.URL.RawQuery = values.Encode() resp, err := hbu.client.Do(req) if err != nil { return distribution.Descriptor{}, err } defer resp.Body.Close() if !SuccessStatus(resp.StatusCode) { return distribution.Descriptor{}, hbu.handleErrorResponse(resp) } return hbu.statter.Stat(ctx, desc.Digest) } func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { req, err := http.NewRequest("DELETE", hbu.location, nil) if err != nil { return err } resp, err := hbu.client.Do(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) { return nil } return hbu.handleErrorResponse(resp) } func (hbu *httpBlobUpload) Close() error { hbu.closed = true return nil } docker-1.10.3/vendor/src/github.com/docker/distribution/registry/client/errors.go000066400000000000000000000046551267010174400301610ustar00rootroot00000000000000package client import ( "encoding/json" "fmt" "io" "io/ioutil" "net/http" "github.com/docker/distribution/registry/api/errcode" ) // UnexpectedHTTPStatusError is returned when an unexpected HTTP status is // returned when making a registry api call. type UnexpectedHTTPStatusError struct { Status string } func (e *UnexpectedHTTPStatusError) Error() string { return fmt.Sprintf("Received unexpected HTTP status: %s", e.Status) } // UnexpectedHTTPResponseError is returned when an expected HTTP status code // is returned, but the content was unexpected and failed to be parsed. type UnexpectedHTTPResponseError struct { ParseErr error Response []byte } func (e *UnexpectedHTTPResponseError) Error() string { return fmt.Sprintf("Error parsing HTTP response: %s: %q", e.ParseErr.Error(), string(e.Response)) } func parseHTTPErrorResponse(statusCode int, r io.Reader) error { var errors errcode.Errors body, err := ioutil.ReadAll(r) if err != nil { return err } // For backward compatibility, handle irregularly formatted // messages that contain a "details" field. var detailsErr struct { Details string `json:"details"` } err = json.Unmarshal(body, &detailsErr) if err == nil && detailsErr.Details != "" { if statusCode == http.StatusUnauthorized { return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) } return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) } if err := json.Unmarshal(body, &errors); err != nil { return &UnexpectedHTTPResponseError{ ParseErr: err, Response: body, } } return errors } // HandleErrorResponse returns error parsed from HTTP response for an // unsuccessful HTTP response code (in the range 400 - 499 inclusive). An // UnexpectedHTTPStatusError returned for response code outside of expected // range. func HandleErrorResponse(resp *http.Response) error { if resp.StatusCode == 401 { err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) if uErr, ok := err.(*UnexpectedHTTPResponseError); ok { return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) } return err } if resp.StatusCode >= 400 && resp.StatusCode < 500 { return parseHTTPErrorResponse(resp.StatusCode, resp.Body) } return &UnexpectedHTTPStatusError{Status: resp.Status} } // SuccessStatus returns true if the argument is a successful HTTP response // code (in the range 200 - 399 inclusive). func SuccessStatus(status int) bool { return status >= 200 && status <= 399 } docker-1.10.3/vendor/src/github.com/docker/distribution/registry/client/repository.go000066400000000000000000000433341267010174400310610ustar00rootroot00000000000000package client import ( "bytes" "encoding/json" "errors" "fmt" "io" "io/ioutil" "net/http" "net/url" "strconv" "time" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/client/transport" "github.com/docker/distribution/registry/storage/cache" "github.com/docker/distribution/registry/storage/cache/memory" ) // Registry provides an interface for calling Repositories, which returns a catalog of repositories. type Registry interface { Repositories(ctx context.Context, repos []string, last string) (n int, err error) } // NewRegistry creates a registry namespace which can be used to get a listing of repositories func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) { ub, err := v2.NewURLBuilderFromString(baseURL) if err != nil { return nil, err } client := &http.Client{ Transport: transport, Timeout: 1 * time.Minute, } return ®istry{ client: client, ub: ub, context: ctx, }, nil } type registry struct { client *http.Client ub *v2.URLBuilder context context.Context } // Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size // of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there // are no more entries func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) { var numFilled int var returnErr error values := buildCatalogValues(len(entries), last) u, err := r.ub.BuildCatalogURL(values) if err != nil { return 0, err } resp, err := r.client.Get(u) if err != nil { return 0, err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { var ctlg struct { Repositories []string `json:"repositories"` } decoder := json.NewDecoder(resp.Body) if err := decoder.Decode(&ctlg); err != nil { return 0, err } for cnt := range ctlg.Repositories { entries[cnt] = ctlg.Repositories[cnt] } numFilled = len(ctlg.Repositories) link := resp.Header.Get("Link") if link == "" { returnErr = io.EOF } } else { return 0, HandleErrorResponse(resp) } return numFilled, returnErr } // NewRepository creates a new Repository for the given repository name and base URL. func NewRepository(ctx context.Context, name, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { if _, err := reference.ParseNamed(name); err != nil { return nil, err } ub, err := v2.NewURLBuilderFromString(baseURL) if err != nil { return nil, err } client := &http.Client{ Transport: transport, // TODO(dmcgowan): create cookie jar } return &repository{ client: client, ub: ub, name: name, context: ctx, }, nil } type repository struct { client *http.Client ub *v2.URLBuilder context context.Context name string } func (r *repository) Name() string { return r.name } func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { statter := &blobStatter{ name: r.Name(), ub: r.ub, client: r.client, } return &blobs{ name: r.Name(), ub: r.ub, client: r.client, statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), } } func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { // todo(richardscothern): options should be sent over the wire return &manifests{ name: r.Name(), ub: r.ub, client: r.client, etags: make(map[string]string), }, nil } func (r *repository) Tags(ctx context.Context) distribution.TagService { return &tags{ client: r.client, ub: r.ub, context: r.context, name: r.Name(), } } // tags implements remote tagging operations. type tags struct { client *http.Client ub *v2.URLBuilder context context.Context name string } // All returns all tags func (t *tags) All(ctx context.Context) ([]string, error) { var tags []string u, err := t.ub.BuildTagsURL(t.name) if err != nil { return tags, err } resp, err := t.client.Get(u) if err != nil { return tags, err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { b, err := ioutil.ReadAll(resp.Body) if err != nil { return tags, err } tagsResponse := struct { Tags []string `json:"tags"` }{} if err := json.Unmarshal(b, &tagsResponse); err != nil { return tags, err } tags = tagsResponse.Tags return tags, nil } return tags, HandleErrorResponse(resp) } func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { desc := distribution.Descriptor{} headers := response.Header ctHeader := headers.Get("Content-Type") if ctHeader == "" { return distribution.Descriptor{}, errors.New("missing or empty Content-Type header") } desc.MediaType = ctHeader digestHeader := headers.Get("Docker-Content-Digest") if digestHeader == "" { bytes, err := ioutil.ReadAll(response.Body) if err != nil { return distribution.Descriptor{}, err } _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes) if err != nil { return distribution.Descriptor{}, err } return desc, nil } dgst, err := digest.ParseDigest(digestHeader) if err != nil { return distribution.Descriptor{}, err } desc.Digest = dgst lengthHeader := headers.Get("Content-Length") if lengthHeader == "" { return distribution.Descriptor{}, errors.New("missing or empty Content-Length header") } length, err := strconv.ParseInt(lengthHeader, 10, 64) if err != nil { return distribution.Descriptor{}, err } desc.Size = length return desc, nil } // Get issues a HEAD request for a Manifest against its named endpoint in order // to construct a descriptor for the tag. If the registry doesn't support HEADing // a manifest, fallback to GET. func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { u, err := t.ub.BuildManifestURL(t.name, tag) if err != nil { return distribution.Descriptor{}, err } var attempts int resp, err := t.client.Head(u) check: if err != nil { return distribution.Descriptor{}, err } switch { case resp.StatusCode >= 200 && resp.StatusCode < 400: return descriptorFromResponse(resp) case resp.StatusCode == http.StatusMethodNotAllowed: resp, err = t.client.Get(u) attempts++ if attempts > 1 { return distribution.Descriptor{}, err } goto check default: return distribution.Descriptor{}, HandleErrorResponse(resp) } } func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { panic("not implemented") } func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { panic("not implemented") } func (t *tags) Untag(ctx context.Context, tag string) error { panic("not implemented") } type manifests struct { name string ub *v2.URLBuilder client *http.Client etags map[string]string } func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { u, err := ms.ub.BuildManifestURL(ms.name, dgst.String()) if err != nil { return false, err } resp, err := ms.client.Head(u) if err != nil { return false, err } if SuccessStatus(resp.StatusCode) { return true, nil } else if resp.StatusCode == http.StatusNotFound { return false, nil } return false, HandleErrorResponse(resp) } // AddEtagToTag allows a client to supply an eTag to Get which will be // used for a conditional HTTP request. If the eTag matches, a nil manifest // and ErrManifestNotModified error will be returned. etag is automatically // quoted when added to this map. func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { return etagOption{tag, etag} } type etagOption struct{ tag, etag string } func (o etagOption) Apply(ms distribution.ManifestService) error { if ms, ok := ms.(*manifests); ok { ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag) return nil } return fmt.Errorf("etag options is a client-only option") } func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { var tag string for _, option := range options { if opt, ok := option.(withTagOption); ok { tag = opt.tag } else { err := option.Apply(ms) if err != nil { return nil, err } } } var ref string if tag != "" { ref = tag } else { ref = dgst.String() } u, err := ms.ub.BuildManifestURL(ms.name, ref) if err != nil { return nil, err } req, err := http.NewRequest("GET", u, nil) if err != nil { return nil, err } for _, t := range distribution.ManifestMediaTypes() { req.Header.Add("Accept", t) } if _, ok := ms.etags[ref]; ok { req.Header.Set("If-None-Match", ms.etags[ref]) } resp, err := ms.client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode == http.StatusNotModified { return nil, distribution.ErrManifestNotModified } else if SuccessStatus(resp.StatusCode) { mt := resp.Header.Get("Content-Type") body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } m, _, err := distribution.UnmarshalManifest(mt, body) if err != nil { return nil, err } return m, nil } return nil, HandleErrorResponse(resp) } // WithTag allows a tag to be passed into Put which enables the client // to build a correct URL. func WithTag(tag string) distribution.ManifestServiceOption { return withTagOption{tag} } type withTagOption struct{ tag string } func (o withTagOption) Apply(m distribution.ManifestService) error { if _, ok := m.(*manifests); ok { return nil } return fmt.Errorf("withTagOption is a client-only option") } // Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the // tag name in order to build the correct upload URL. This state is written and read under a lock. func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { var tag string for _, option := range options { if opt, ok := option.(withTagOption); ok { tag = opt.tag } else { err := option.Apply(ms) if err != nil { return "", err } } } manifestURL, err := ms.ub.BuildManifestURL(ms.name, tag) if err != nil { return "", err } mediaType, p, err := m.Payload() if err != nil { return "", err } putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p)) if err != nil { return "", err } putRequest.Header.Set("Content-Type", mediaType) resp, err := ms.client.Do(putRequest) if err != nil { return "", err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { dgstHeader := resp.Header.Get("Docker-Content-Digest") dgst, err := digest.ParseDigest(dgstHeader) if err != nil { return "", err } return dgst, nil } return "", HandleErrorResponse(resp) } func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { u, err := ms.ub.BuildManifestURL(ms.name, dgst.String()) if err != nil { return err } req, err := http.NewRequest("DELETE", u, nil) if err != nil { return err } resp, err := ms.client.Do(req) if err != nil { return err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { return nil } return HandleErrorResponse(resp) } // todo(richardscothern): Restore interface and implementation with merge of #1050 /*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { panic("not supported") }*/ type blobs struct { name string ub *v2.URLBuilder client *http.Client statter distribution.BlobDescriptorService distribution.BlobDeleter } func sanitizeLocation(location, base string) (string, error) { baseURL, err := url.Parse(base) if err != nil { return "", err } locationURL, err := url.Parse(location) if err != nil { return "", err } return baseURL.ResolveReference(locationURL).String(), nil } func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { return bs.statter.Stat(ctx, dgst) } func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { reader, err := bs.Open(ctx, dgst) if err != nil { return nil, err } defer reader.Close() return ioutil.ReadAll(reader) } func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { blobURL, err := bs.ub.BuildBlobURL(bs.name, dgst) if err != nil { return nil, err } return transport.NewHTTPReadSeeker(bs.client, blobURL, func(resp *http.Response) error { if resp.StatusCode == http.StatusNotFound { return distribution.ErrBlobUnknown } return HandleErrorResponse(resp) }), nil } func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { panic("not implemented") } func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { writer, err := bs.Create(ctx) if err != nil { return distribution.Descriptor{}, err } dgstr := digest.Canonical.New() n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) if err != nil { return distribution.Descriptor{}, err } if n < int64(len(p)) { return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) } desc := distribution.Descriptor{ MediaType: mediaType, Size: int64(len(p)), Digest: dgstr.Digest(), } return writer.Commit(ctx, desc) } // createOptions is a collection of blob creation modifiers relevant to general // blob storage intended to be configured by the BlobCreateOption.Apply method. type createOptions struct { Mount struct { ShouldMount bool From reference.Canonical } } type optionFunc func(interface{}) error func (f optionFunc) Apply(v interface{}) error { return f(v) } // WithMountFrom returns a BlobCreateOption which designates that the blob should be // mounted from the given canonical reference. func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { return optionFunc(func(v interface{}) error { opts, ok := v.(*createOptions) if !ok { return fmt.Errorf("unexpected options type: %T", v) } opts.Mount.ShouldMount = true opts.Mount.From = ref return nil }) } func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { var opts createOptions for _, option := range options { err := option.Apply(&opts) if err != nil { return nil, err } } var values []url.Values if opts.Mount.ShouldMount { values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}}) } u, err := bs.ub.BuildBlobUploadURL(bs.name, values...) if err != nil { return nil, err } resp, err := bs.client.Post(u, "", nil) if err != nil { return nil, err } defer resp.Body.Close() switch resp.StatusCode { case http.StatusCreated: desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest()) if err != nil { return nil, err } return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} case http.StatusAccepted: // TODO(dmcgowan): Check for invalid UUID uuid := resp.Header.Get("Docker-Upload-UUID") location, err := sanitizeLocation(resp.Header.Get("Location"), u) if err != nil { return nil, err } return &httpBlobUpload{ statter: bs.statter, client: bs.client, uuid: uuid, startedAt: time.Now(), location: location, }, nil default: return nil, HandleErrorResponse(resp) } } func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { panic("not implemented") } func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { return bs.statter.Clear(ctx, dgst) } type blobStatter struct { name string ub *v2.URLBuilder client *http.Client } func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { u, err := bs.ub.BuildBlobURL(bs.name, dgst) if err != nil { return distribution.Descriptor{}, err } resp, err := bs.client.Head(u) if err != nil { return distribution.Descriptor{}, err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { lengthHeader := resp.Header.Get("Content-Length") if lengthHeader == "" { return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u) } length, err := strconv.ParseInt(lengthHeader, 10, 64) if err != nil { return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) } return distribution.Descriptor{ MediaType: resp.Header.Get("Content-Type"), Size: length, Digest: dgst, }, nil } else if resp.StatusCode == http.StatusNotFound { return distribution.Descriptor{}, distribution.ErrBlobUnknown } return distribution.Descriptor{}, HandleErrorResponse(resp) } func buildCatalogValues(maxEntries int, last string) url.Values { values := url.Values{} if maxEntries > 0 { values.Add("n", strconv.Itoa(maxEntries)) } if last != "" { values.Add("last", last) } return values } func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { blobURL, err := bs.ub.BuildBlobURL(bs.name, dgst) if err != nil { return err } req, err := http.NewRequest("DELETE", blobURL, nil) if err != nil { return err } resp, err := bs.client.Do(req) if err != nil { return err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { return nil } return HandleErrorResponse(resp) } func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { return nil } docker-1.10.3/vendor/src/github.com/docker/distribution/registry/client/transport/000077500000000000000000000000001267010174400303405ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/distribution/registry/client/transport/http_reader.go000066400000000000000000000106461267010174400331770ustar00rootroot00000000000000package transport import ( "bufio" "errors" "fmt" "io" "net/http" "os" ) // ReadSeekCloser combines io.ReadSeeker with io.Closer. type ReadSeekCloser interface { io.ReadSeeker io.Closer } // NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET // request. When seeking and starting a read from a non-zero offset // the a "Range" header will be added which sets the offset. // TODO(dmcgowan): Move this into a separate utility package func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser { return &httpReadSeeker{ client: client, url: url, errorHandler: errorHandler, } } type httpReadSeeker struct { client *http.Client url string // errorHandler creates an error from an unsuccessful HTTP response. // This allows the error to be created with the HTTP response body // without leaking the body through a returned error. errorHandler func(*http.Response) error size int64 // rc is the remote read closer. rc io.ReadCloser // brd is a buffer for internal buffered io. brd *bufio.Reader // readerOffset tracks the offset as of the last read. readerOffset int64 // seekOffset allows Seek to override the offset. Seek changes // seekOffset instead of changing readOffset directly so that // connection resets can be delayed and possibly avoided if the // seek is undone (i.e. seeking to the end and then back to the // beginning). seekOffset int64 err error } func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { if hrs.err != nil { return 0, hrs.err } // If we seeked to a different position, we need to reset the // connection. This logic is here instead of Seek so that if // a seek is undone before the next read, the connection doesn't // need to be closed and reopened. A common example of this is // seeking to the end to determine the length, and then seeking // back to the original position. if hrs.readerOffset != hrs.seekOffset { hrs.reset() } hrs.readerOffset = hrs.seekOffset rd, err := hrs.reader() if err != nil { return 0, err } n, err = rd.Read(p) hrs.seekOffset += int64(n) hrs.readerOffset += int64(n) // Simulate io.EOF error if we reach filesize. if err == nil && hrs.size >= 0 && hrs.readerOffset >= hrs.size { err = io.EOF } return n, err } func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { if hrs.err != nil { return 0, hrs.err } _, err := hrs.reader() if err != nil { return 0, err } newOffset := hrs.seekOffset switch whence { case os.SEEK_CUR: newOffset += int64(offset) case os.SEEK_END: if hrs.size < 0 { return 0, errors.New("content length not known") } newOffset = hrs.size + int64(offset) case os.SEEK_SET: newOffset = int64(offset) } if newOffset < 0 { err = errors.New("cannot seek to negative position") } else { hrs.seekOffset = newOffset } return hrs.seekOffset, err } func (hrs *httpReadSeeker) Close() error { if hrs.err != nil { return hrs.err } // close and release reader chain if hrs.rc != nil { hrs.rc.Close() } hrs.rc = nil hrs.brd = nil hrs.err = errors.New("httpLayer: closed") return nil } func (hrs *httpReadSeeker) reset() { if hrs.err != nil { return } if hrs.rc != nil { hrs.rc.Close() hrs.rc = nil } } func (hrs *httpReadSeeker) reader() (io.Reader, error) { if hrs.err != nil { return nil, hrs.err } if hrs.rc != nil { return hrs.brd, nil } req, err := http.NewRequest("GET", hrs.url, nil) if err != nil { return nil, err } if hrs.readerOffset > 0 { // TODO(stevvooe): Get this working correctly. // If we are at different offset, issue a range request from there. req.Header.Add("Range", "1-") // TODO: get context in here // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) } resp, err := hrs.client.Do(req) if err != nil { return nil, err } // Normally would use client.SuccessStatus, but that would be a cyclic // import if resp.StatusCode >= 200 && resp.StatusCode <= 399 { hrs.rc = resp.Body if resp.StatusCode == http.StatusOK { hrs.size = resp.ContentLength } else { hrs.size = -1 } } else { defer resp.Body.Close() if hrs.errorHandler != nil { return nil, hrs.errorHandler(resp) } return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) } if hrs.brd == nil { hrs.brd = bufio.NewReader(hrs.rc) } else { hrs.brd.Reset(hrs.rc) } return hrs.brd, nil } docker-1.10.3/vendor/src/github.com/docker/distribution/registry/client/transport/transport.go000066400000000000000000000063571267010174400327360ustar00rootroot00000000000000package transport import ( "io" "net/http" "sync" ) // RequestModifier represents an object which will do an inplace // modification of an HTTP request. type RequestModifier interface { ModifyRequest(*http.Request) error } type headerModifier http.Header // NewHeaderRequestModifier returns a new RequestModifier which will // add the given headers to a request. func NewHeaderRequestModifier(header http.Header) RequestModifier { return headerModifier(header) } func (h headerModifier) ModifyRequest(req *http.Request) error { for k, s := range http.Header(h) { req.Header[k] = append(req.Header[k], s...) } return nil } // NewTransport creates a new transport which will apply modifiers to // the request on a RoundTrip call. func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper { return &transport{ Modifiers: modifiers, Base: base, } } // transport is an http.RoundTripper that makes HTTP requests after // copying and modifying the request type transport struct { Modifiers []RequestModifier Base http.RoundTripper mu sync.Mutex // guards modReq modReq map[*http.Request]*http.Request // original -> modified } // RoundTrip authorizes and authenticates the request with an // access token. If no token exists or token is expired, // tries to refresh/fetch a new token. func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { req2 := cloneRequest(req) for _, modifier := range t.Modifiers { if err := modifier.ModifyRequest(req2); err != nil { return nil, err } } t.setModReq(req, req2) res, err := t.base().RoundTrip(req2) if err != nil { t.setModReq(req, nil) return nil, err } res.Body = &onEOFReader{ rc: res.Body, fn: func() { t.setModReq(req, nil) }, } return res, nil } // CancelRequest cancels an in-flight request by closing its connection. func (t *transport) CancelRequest(req *http.Request) { type canceler interface { CancelRequest(*http.Request) } if cr, ok := t.base().(canceler); ok { t.mu.Lock() modReq := t.modReq[req] delete(t.modReq, req) t.mu.Unlock() cr.CancelRequest(modReq) } } func (t *transport) base() http.RoundTripper { if t.Base != nil { return t.Base } return http.DefaultTransport } func (t *transport) setModReq(orig, mod *http.Request) { t.mu.Lock() defer t.mu.Unlock() if t.modReq == nil { t.modReq = make(map[*http.Request]*http.Request) } if mod == nil { delete(t.modReq, orig) } else { t.modReq[orig] = mod } } // cloneRequest returns a clone of the provided *http.Request. // The clone is a shallow copy of the struct and its Header map. func cloneRequest(r *http.Request) *http.Request { // shallow copy of the struct r2 := new(http.Request) *r2 = *r // deep copy of the Header r2.Header = make(http.Header, len(r.Header)) for k, s := range r.Header { r2.Header[k] = append([]string(nil), s...) } return r2 } type onEOFReader struct { rc io.ReadCloser fn func() } func (r *onEOFReader) Read(p []byte) (n int, err error) { n, err = r.rc.Read(p) if err == io.EOF { r.runFunc() } return } func (r *onEOFReader) Close() error { err := r.rc.Close() r.runFunc() return err } func (r *onEOFReader) runFunc() { if fn := r.fn; fn != nil { fn() r.fn = nil } } docker-1.10.3/vendor/src/github.com/docker/distribution/registry/storage/000077500000000000000000000000001267010174400264725ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/distribution/registry/storage/cache/000077500000000000000000000000001267010174400275355ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/distribution/registry/storage/cache/cache.go000066400000000000000000000016141267010174400311310ustar00rootroot00000000000000// Package cache provides facilities to speed up access to the storage // backend. package cache import ( "fmt" "github.com/docker/distribution" ) // BlobDescriptorCacheProvider provides repository scoped // BlobDescriptorService cache instances and a global descriptor cache. type BlobDescriptorCacheProvider interface { distribution.BlobDescriptorService RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) } // ValidateDescriptor provides a helper function to ensure that caches have // common criteria for admitting descriptors. func ValidateDescriptor(desc distribution.Descriptor) error { if err := desc.Digest.Validate(); err != nil { return err } if desc.Size < 0 { return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size) } if desc.MediaType == "" { return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc) } return nil } cachedblobdescriptorstore.go000066400000000000000000000051241267010174400352310ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/distribution/registry/storage/cachepackage cache import ( "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution" ) // Metrics is used to hold metric counters // related to the number of times a cache was // hit or missed. type Metrics struct { Requests uint64 Hits uint64 Misses uint64 } // MetricsTracker represents a metric tracker // which simply counts the number of hits and misses. type MetricsTracker interface { Hit() Miss() Metrics() Metrics } type cachedBlobStatter struct { cache distribution.BlobDescriptorService backend distribution.BlobDescriptorService tracker MetricsTracker } // NewCachedBlobStatter creates a new statter which prefers a cache and // falls back to a backend. func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService { return &cachedBlobStatter{ cache: cache, backend: backend, } } // NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and // falls back to a backend. Hits and misses will send to the tracker. func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter { return &cachedBlobStatter{ cache: cache, backend: backend, tracker: tracker, } } func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { desc, err := cbds.cache.Stat(ctx, dgst) if err != nil { if err != distribution.ErrBlobUnknown { context.GetLogger(ctx).Errorf("error retrieving descriptor from cache: %v", err) } goto fallback } if cbds.tracker != nil { cbds.tracker.Hit() } return desc, nil fallback: if cbds.tracker != nil { cbds.tracker.Miss() } desc, err = cbds.backend.Stat(ctx, dgst) if err != nil { return desc, err } if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) } return desc, err } func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { err := cbds.cache.Clear(ctx, dgst) if err != nil { return err } err = cbds.backend.Clear(ctx, dgst) if err != nil { return err } return nil } func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) } return nil } docker-1.10.3/vendor/src/github.com/docker/distribution/registry/storage/cache/memory/000077500000000000000000000000001267010174400310455ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/distribution/registry/storage/cache/memory/memory.go000066400000000000000000000117221267010174400327070ustar00rootroot00000000000000package memory import ( "sync" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache" ) type inMemoryBlobDescriptorCacheProvider struct { global *mapBlobDescriptorCache repositories map[string]*mapBlobDescriptorCache mu sync.RWMutex } // NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for // storing blob descriptor data. func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider { return &inMemoryBlobDescriptorCacheProvider{ global: newMapBlobDescriptorCache(), repositories: make(map[string]*mapBlobDescriptorCache), } } func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { if _, err := reference.ParseNamed(repo); err != nil { return nil, err } imbdcp.mu.RLock() defer imbdcp.mu.RUnlock() return &repositoryScopedInMemoryBlobDescriptorCache{ repo: repo, parent: imbdcp, repository: imbdcp.repositories[repo], }, nil } func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { return imbdcp.global.Stat(ctx, dgst) } func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error { return imbdcp.global.Clear(ctx, dgst) } func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { _, err := imbdcp.Stat(ctx, dgst) if err == distribution.ErrBlobUnknown { if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest { // if the digests differ, set the other canonical mapping if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil { return err } } // unknown, just set it return imbdcp.global.SetDescriptor(ctx, dgst, desc) } // we already know it, do nothing return err } // repositoryScopedInMemoryBlobDescriptorCache provides the request scoped // repository cache. Instances are not thread-safe but the delegated // operations are. type repositoryScopedInMemoryBlobDescriptorCache struct { repo string parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map repository *mapBlobDescriptorCache } func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { if rsimbdcp.repository == nil { return distribution.Descriptor{}, distribution.ErrBlobUnknown } return rsimbdcp.repository.Stat(ctx, dgst) } func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { if rsimbdcp.repository == nil { return distribution.ErrBlobUnknown } return rsimbdcp.repository.Clear(ctx, dgst) } func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { if rsimbdcp.repository == nil { // allocate map since we are setting it now. rsimbdcp.parent.mu.Lock() var ok bool // have to read back value since we may have allocated elsewhere. rsimbdcp.repository, ok = rsimbdcp.parent.repositories[rsimbdcp.repo] if !ok { rsimbdcp.repository = newMapBlobDescriptorCache() rsimbdcp.parent.repositories[rsimbdcp.repo] = rsimbdcp.repository } rsimbdcp.parent.mu.Unlock() } if err := rsimbdcp.repository.SetDescriptor(ctx, dgst, desc); err != nil { return err } return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc) } // mapBlobDescriptorCache provides a simple map-based implementation of the // descriptor cache. type mapBlobDescriptorCache struct { descriptors map[digest.Digest]distribution.Descriptor mu sync.RWMutex } var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{} func newMapBlobDescriptorCache() *mapBlobDescriptorCache { return &mapBlobDescriptorCache{ descriptors: make(map[digest.Digest]distribution.Descriptor), } } func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { if err := dgst.Validate(); err != nil { return distribution.Descriptor{}, err } mbdc.mu.RLock() defer mbdc.mu.RUnlock() desc, ok := mbdc.descriptors[dgst] if !ok { return distribution.Descriptor{}, distribution.ErrBlobUnknown } return desc, nil } func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { mbdc.mu.Lock() defer mbdc.mu.Unlock() delete(mbdc.descriptors, dgst) return nil } func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { if err := dgst.Validate(); err != nil { return err } if err := cache.ValidateDescriptor(desc); err != nil { return err } mbdc.mu.Lock() defer mbdc.mu.Unlock() mbdc.descriptors[dgst] = desc return nil } docker-1.10.3/vendor/src/github.com/docker/distribution/tags.go000066400000000000000000000017361267010174400244520ustar00rootroot00000000000000package distribution import ( "github.com/docker/distribution/context" ) // TagService provides access to information about tagged objects. type TagService interface { // Get retrieves the descriptor identified by the tag. Some // implementations may differentiate between "trusted" tags and // "untrusted" tags. If a tag is "untrusted", the mapping will be returned // as an ErrTagUntrusted error, with the target descriptor. Get(ctx context.Context, tag string) (Descriptor, error) // Tag associates the tag with the provided descriptor, updating the // current association, if needed. Tag(ctx context.Context, tag string, desc Descriptor) error // Untag removes the given tag association Untag(ctx context.Context, tag string) error // All returns the set of tags managed by this tag service All(ctx context.Context) ([]string, error) // Lookup returns the set of tags referencing the given digest. Lookup(ctx context.Context, digest Descriptor) ([]string, error) } docker-1.10.3/vendor/src/github.com/docker/distribution/uuid/000077500000000000000000000000001267010174400241245ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/distribution/uuid/uuid.go000066400000000000000000000060331267010174400254230ustar00rootroot00000000000000// Package uuid provides simple UUID generation. Only version 4 style UUIDs // can be generated. // // Please see http://tools.ietf.org/html/rfc4122 for details on UUIDs. package uuid import ( "crypto/rand" "fmt" "io" "os" "syscall" "time" ) const ( // Bits is the number of bits in a UUID Bits = 128 // Size is the number of bytes in a UUID Size = Bits / 8 format = "%08x-%04x-%04x-%04x-%012x" ) var ( // ErrUUIDInvalid indicates a parsed string is not a valid uuid. ErrUUIDInvalid = fmt.Errorf("invalid uuid") // Loggerf can be used to override the default logging destination. Such // log messages in this library should be logged at warning or higher. Loggerf = func(format string, args ...interface{}) {} ) // UUID represents a UUID value. UUIDs can be compared and set to other values // and accessed by byte. type UUID [Size]byte // Generate creates a new, version 4 uuid. func Generate() (u UUID) { const ( // ensures we backoff for less than 450ms total. Use the following to // select new value, in units of 10ms: // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 maxretries = 9 backoff = time.Millisecond * 10 ) var ( totalBackoff time.Duration count int retries int ) for { // This should never block but the read may fail. Because of this, // we just try to read the random number generator until we get // something. This is a very rare condition but may happen. b := time.Duration(retries) * backoff time.Sleep(b) totalBackoff += b n, err := io.ReadFull(rand.Reader, u[count:]) if err != nil { if retryOnError(err) && retries < maxretries { count += n retries++ Loggerf("error generating version 4 uuid, retrying: %v", err) continue } // Any other errors represent a system problem. What did someone // do to /dev/urandom? panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) } break } u[6] = (u[6] & 0x0f) | 0x40 // set version byte u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b} return u } // Parse attempts to extract a uuid from the string or returns an error. func Parse(s string) (u UUID, err error) { if len(s) != 36 { return UUID{}, ErrUUIDInvalid } // create stack addresses for each section of the uuid. p := make([][]byte, 5) if _, err := fmt.Sscanf(s, format, &p[0], &p[1], &p[2], &p[3], &p[4]); err != nil { return u, err } copy(u[0:4], p[0]) copy(u[4:6], p[1]) copy(u[6:8], p[2]) copy(u[8:10], p[3]) copy(u[10:16], p[4]) return } func (u UUID) String() string { return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:]) } // retryOnError tries to detect whether or not retrying would be fruitful. func retryOnError(err error) bool { switch err := err.(type) { case *os.PathError: return retryOnError(err.Err) // unpack the target error case syscall.Errno: if err == syscall.EPERM { // EPERM represents an entropy pool exhaustion, a condition under // which we backoff and retry. return true } } return false } docker-1.10.3/vendor/src/github.com/docker/engine-api/000077500000000000000000000000001267010174400224535ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/engine-api/LICENSE000066400000000000000000000250151267010174400234630ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS Copyright 2015-2016 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. docker-1.10.3/vendor/src/github.com/docker/engine-api/client/000077500000000000000000000000001267010174400237315ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/engine-api/client/client.go000066400000000000000000000102411267010174400255340ustar00rootroot00000000000000package client import ( "crypto/tls" "fmt" "net" "net/http" "net/url" "os" "path/filepath" "strings" "time" ) // Client is the API client that performs all operations // against a docker server. type Client struct { // proto holds the client protocol i.e. unix. proto string // addr holds the client address. addr string // basePath holds the path to prepend to the requests basePath string // scheme holds the scheme of the client i.e. https. scheme string // tlsConfig holds the tls configuration to use in hijacked requests. tlsConfig *tls.Config // httpClient holds the client transport instance. Exported to keep the old code running. httpClient *http.Client // version of the server to talk to. version string // custom http headers configured by users customHTTPHeaders map[string]string } // NewEnvClient initializes a new API client based on environment variables. // Use DOCKER_HOST to set the url to the docker server. // Use DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. // Use DOCKER_CERT_PATH to load the tls certificates from. // Use DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. func NewEnvClient() (*Client, error) { var transport *http.Transport if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { tlsc := &tls.Config{} cert, err := tls.LoadX509KeyPair(filepath.Join(dockerCertPath, "cert.pem"), filepath.Join(dockerCertPath, "key.pem")) if err != nil { return nil, fmt.Errorf("Error loading x509 key pair: %s", err) } tlsc.Certificates = append(tlsc.Certificates, cert) tlsc.InsecureSkipVerify = os.Getenv("DOCKER_TLS_VERIFY") == "" transport = &http.Transport{ TLSClientConfig: tlsc, } } return NewClient(os.Getenv("DOCKER_HOST"), os.Getenv("DOCKER_API_VERSION"), transport, nil) } // NewClient initializes a new API client for the given host and API version. // It won't send any version information if the version number is empty. // It uses the transport to create a new http client. // It also initializes the custom http headers to add to each request. func NewClient(host string, version string, transport *http.Transport, httpHeaders map[string]string) (*Client, error) { var ( basePath string scheme = "http" protoAddrParts = strings.SplitN(host, "://", 2) proto, addr = protoAddrParts[0], protoAddrParts[1] ) if proto == "tcp" { parsed, err := url.Parse("tcp://" + addr) if err != nil { return nil, err } addr = parsed.Host basePath = parsed.Path } transport = configureTransport(transport, proto, addr) if transport.TLSClientConfig != nil { scheme = "https" } return &Client{ proto: proto, addr: addr, basePath: basePath, scheme: scheme, tlsConfig: transport.TLSClientConfig, httpClient: &http.Client{Transport: transport}, version: version, customHTTPHeaders: httpHeaders, }, nil } // getAPIPath returns the versioned request path to call the api. // It appends the query parameters to the path if they are not empty. func (cli *Client) getAPIPath(p string, query url.Values) string { var apiPath string if cli.version != "" { v := strings.TrimPrefix(cli.version, "v") apiPath = fmt.Sprintf("%s/v%s%s", cli.basePath, v, p) } else { apiPath = fmt.Sprintf("%s%s", cli.basePath, p) } if len(query) > 0 { apiPath += "?" + query.Encode() } return apiPath } // ClientVersion returns the version string associated with this // instance of the Client. Note that this value can be changed // via the DOCKER_API_VERSION env var. func (cli *Client) ClientVersion() string { return cli.version } func configureTransport(tr *http.Transport, proto, addr string) *http.Transport { if tr == nil { tr = &http.Transport{} } // Why 32? See https://github.com/docker/docker/pull/8035. timeout := 32 * time.Second if proto == "unix" { // No need for compression in local communications. tr.DisableCompression = true tr.Dial = func(_, _ string) (net.Conn, error) { return net.DialTimeout(proto, addr, timeout) } } else { tr.Proxy = http.ProxyFromEnvironment tr.Dial = (&net.Dialer{Timeout: timeout}).Dial } return tr } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/container_attach.go000066400000000000000000000016341267010174400275720ustar00rootroot00000000000000package client import ( "net/url" "github.com/docker/engine-api/types" ) // ContainerAttach attaches a connection to a container in the server. // It returns a types.HijackedConnection with the hijacked connection // and the a reader to get output. It's up to the called to close // the hijacked connection by calling types.HijackedResponse.Close. func (cli *Client) ContainerAttach(options types.ContainerAttachOptions) (types.HijackedResponse, error) { query := url.Values{} if options.Stream { query.Set("stream", "1") } if options.Stdin { query.Set("stdin", "1") } if options.Stdout { query.Set("stdout", "1") } if options.Stderr { query.Set("stderr", "1") } if options.DetachKeys != "" { query.Set("detachKeys", options.DetachKeys) } headers := map[string][]string{"Content-Type": {"text/plain"}} return cli.postHijacked("/containers/"+options.ContainerID+"/attach", query, nil, headers) } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/container_commit.go000066400000000000000000000016641267010174400276210ustar00rootroot00000000000000package client import ( "encoding/json" "net/url" "github.com/docker/engine-api/types" ) // ContainerCommit applies changes into a container and creates a new tagged image. func (cli *Client) ContainerCommit(options types.ContainerCommitOptions) (types.ContainerCommitResponse, error) { query := url.Values{} query.Set("container", options.ContainerID) query.Set("repo", options.RepositoryName) query.Set("tag", options.Tag) query.Set("comment", options.Comment) query.Set("author", options.Author) for _, change := range options.Changes { query.Add("changes", change) } if options.Pause != true { query.Set("pause", "0") } var response types.ContainerCommitResponse resp, err := cli.post("/commit", query, options.Config, nil) if err != nil { return response, err } defer ensureReaderClosed(resp) if err := json.NewDecoder(resp.body).Decode(&response); err != nil { return response, err } return response, nil } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/container_create.go000066400000000000000000000030271267010174400275670ustar00rootroot00000000000000package client import ( "encoding/json" "net/url" "strings" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/container" "github.com/docker/engine-api/types/network" ) type configWrapper struct { *container.Config HostConfig *container.HostConfig NetworkingConfig *network.NetworkingConfig } // ContainerCreate creates a new container based in the given configuration. // It can be associated with a name, but it's not mandatory. func (cli *Client) ContainerCreate(config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error) { var response types.ContainerCreateResponse query := url.Values{} if containerName != "" { query.Set("name", containerName) } body := configWrapper{ Config: config, HostConfig: hostConfig, NetworkingConfig: networkingConfig, } serverResp, err := cli.post("/containers/create", query, body, nil) if err != nil { if serverResp != nil && serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") { return response, imageNotFoundError{config.Image} } return response, err } if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") { return response, imageNotFoundError{config.Image} } if err != nil { return response, err } defer ensureReaderClosed(serverResp) if err := json.NewDecoder(serverResp.body).Decode(&response); err != nil { return response, err } return response, nil } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/container_inspect.go000066400000000000000000000035711267010174400277750ustar00rootroot00000000000000package client import ( "bytes" "encoding/json" "io/ioutil" "net/http" "net/url" "github.com/docker/engine-api/types" ) // ContainerInspect returns the container information. func (cli *Client) ContainerInspect(containerID string) (types.ContainerJSON, error) { serverResp, err := cli.get("/containers/"+containerID+"/json", nil, nil) if err != nil { if serverResp.statusCode == http.StatusNotFound { return types.ContainerJSON{}, containerNotFoundError{containerID} } return types.ContainerJSON{}, err } defer ensureReaderClosed(serverResp) var response types.ContainerJSON err = json.NewDecoder(serverResp.body).Decode(&response) return response, err } // ContainerInspectWithRaw returns the container information and it's raw representation. func (cli *Client) ContainerInspectWithRaw(containerID string, getSize bool) (types.ContainerJSON, []byte, error) { query := url.Values{} if getSize { query.Set("size", "1") } serverResp, err := cli.get("/containers/"+containerID+"/json", query, nil) if err != nil { if serverResp.statusCode == http.StatusNotFound { return types.ContainerJSON{}, nil, containerNotFoundError{containerID} } return types.ContainerJSON{}, nil, err } defer ensureReaderClosed(serverResp) body, err := ioutil.ReadAll(serverResp.body) if err != nil { return types.ContainerJSON{}, nil, err } var response types.ContainerJSON rdr := bytes.NewReader(body) err = json.NewDecoder(rdr).Decode(&response) return response, body, err } func (cli *Client) containerInspectWithResponse(containerID string, query url.Values) (types.ContainerJSON, *serverResponse, error) { serverResp, err := cli.get("/containers/"+containerID+"/json", nil, nil) if err != nil { return types.ContainerJSON{}, serverResp, err } var response types.ContainerJSON err = json.NewDecoder(serverResp.body).Decode(&response) return response, serverResp, err } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/container_list.go000066400000000000000000000020371267010174400272770ustar00rootroot00000000000000package client import ( "encoding/json" "net/url" "strconv" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/filters" ) // ContainerList returns the list of containers in the docker host. func (cli *Client) ContainerList(options types.ContainerListOptions) ([]types.Container, error) { query := url.Values{} if options.All { query.Set("all", "1") } if options.Limit != -1 { query.Set("limit", strconv.Itoa(options.Limit)) } if options.Since != "" { query.Set("since", options.Since) } if options.Before != "" { query.Set("before", options.Before) } if options.Size { query.Set("size", "1") } if options.Filter.Len() > 0 { filterJSON, err := filters.ToParam(options.Filter) if err != nil { return nil, err } query.Set("filters", filterJSON) } resp, err := cli.get("/containers/json", query, nil) if err != nil { return nil, err } defer ensureReaderClosed(resp) var containers []types.Container err = json.NewDecoder(resp.body).Decode(&containers) return containers, err } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/container_remove.go000066400000000000000000000010141267010174400276130ustar00rootroot00000000000000package client import ( "net/url" "github.com/docker/engine-api/types" ) // ContainerRemove kills and removes a container from the docker host. func (cli *Client) ContainerRemove(options types.ContainerRemoveOptions) error { query := url.Values{} if options.RemoveVolumes { query.Set("v", "1") } if options.RemoveLinks { query.Set("link", "1") } if options.Force { query.Set("force", "1") } resp, err := cli.delete("/containers/"+options.ContainerID, query, nil) ensureReaderClosed(resp) return err } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/container_rename.go000066400000000000000000000005371267010174400275760ustar00rootroot00000000000000package client import "net/url" // ContainerRename changes the name of a given container. func (cli *Client) ContainerRename(containerID, newContainerName string) error { query := url.Values{} query.Set("name", newContainerName) resp, err := cli.post("/containers/"+containerID+"/rename", query, nil, nil) ensureReaderClosed(resp) return err } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/container_restart.go000066400000000000000000000007411267010174400300100ustar00rootroot00000000000000package client import ( "net/url" "strconv" ) // ContainerRestart stops and starts a container again. // It makes the daemon to wait for the container to be up again for // a specific amount of time, given the timeout. func (cli *Client) ContainerRestart(containerID string, timeout int) error { query := url.Values{} query.Set("t", strconv.Itoa(timeout)) resp, err := cli.post("/containers/"+containerID+"/restart", query, nil, nil) ensureReaderClosed(resp) return err } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/container_start.go000066400000000000000000000004161267010174400274600ustar00rootroot00000000000000package client // ContainerStart sends a request to the docker daemon to start a container. func (cli *Client) ContainerStart(containerID string) error { resp, err := cli.post("/containers/"+containerID+"/start", nil, nil, nil) ensureReaderClosed(resp) return err } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/container_stats.go000066400000000000000000000007571267010174400274710ustar00rootroot00000000000000package client import ( "io" "net/url" ) // ContainerStats returns near realtime stats for a given container. // It's up to the caller to close the io.ReadCloser returned. func (cli *Client) ContainerStats(containerID string, stream bool) (io.ReadCloser, error) { query := url.Values{} query.Set("stream", "0") if stream { query.Set("stream", "1") } resp, err := cli.get("/containers/"+containerID+"/stats", query, nil) if err != nil { return nil, err } return resp.body, err } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/container_stop.go000066400000000000000000000006761267010174400273200ustar00rootroot00000000000000package client import ( "net/url" "strconv" ) // ContainerStop stops a container without terminating the process. // The process is blocked until the container stops or the timeout expires. func (cli *Client) ContainerStop(containerID string, timeout int) error { query := url.Values{} query.Set("t", strconv.Itoa(timeout)) resp, err := cli.post("/containers/"+containerID+"/stop", query, nil, nil) ensureReaderClosed(resp) return err } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/container_top.go000066400000000000000000000012121267010174400271200ustar00rootroot00000000000000package client import ( "encoding/json" "net/url" "strings" "github.com/docker/engine-api/types" ) // ContainerTop shows process information from within a container. func (cli *Client) ContainerTop(containerID string, arguments []string) (types.ContainerProcessList, error) { var response types.ContainerProcessList query := url.Values{} if len(arguments) > 0 { query.Set("ps_args", strings.Join(arguments, " ")) } resp, err := cli.get("/containers/"+containerID+"/top", query, nil) if err != nil { return response, err } defer ensureReaderClosed(resp) err = json.NewDecoder(resp.body).Decode(&response) return response, err } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/container_unpause.go000066400000000000000000000004121267010174400277770ustar00rootroot00000000000000package client // ContainerUnpause resumes the process execution within a container func (cli *Client) ContainerUnpause(containerID string) error { resp, err := cli.post("/containers/"+containerID+"/unpause", nil, nil, nil) ensureReaderClosed(resp) return err } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/container_update.go000066400000000000000000000005411267010174400276040ustar00rootroot00000000000000package client import ( "github.com/docker/engine-api/types/container" ) // ContainerUpdate updates resources of a container func (cli *Client) ContainerUpdate(containerID string, updateConfig container.UpdateConfig) error { resp, err := cli.post("/containers/"+containerID+"/update", nil, updateConfig, nil) ensureReaderClosed(resp) return err } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/copy.go000066400000000000000000000064121267010174400252350ustar00rootroot00000000000000package client import ( "encoding/base64" "encoding/json" "fmt" "io" "net/http" "net/url" "path/filepath" "strings" "github.com/docker/engine-api/types" ) // ContainerStatPath returns Stat information about a path inside the container filesystem. func (cli *Client) ContainerStatPath(containerID, path string) (types.ContainerPathStat, error) { query := url.Values{} query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. urlStr := fmt.Sprintf("/containers/%s/archive", containerID) response, err := cli.head(urlStr, query, nil) if err != nil { return types.ContainerPathStat{}, err } defer ensureReaderClosed(response) return getContainerPathStatFromHeader(response.header) } // CopyToContainer copies content into the container filesystem. func (cli *Client) CopyToContainer(options types.CopyToContainerOptions) error { query := url.Values{} query.Set("path", filepath.ToSlash(options.Path)) // Normalize the paths used in the API. // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. if !options.AllowOverwriteDirWithFile { query.Set("noOverwriteDirNonDir", "true") } path := fmt.Sprintf("/containers/%s/archive", options.ContainerID) response, err := cli.putRaw(path, query, options.Content, nil) if err != nil { return err } defer ensureReaderClosed(response) if response.statusCode != http.StatusOK { return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) } return nil } // CopyFromContainer get the content from the container and return it as a Reader // to manipulate it in the host. It's up to the caller to close the reader. func (cli *Client) CopyFromContainer(containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { query := make(url.Values, 1) query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. apiPath := fmt.Sprintf("/containers/%s/archive", containerID) response, err := cli.get(apiPath, query, nil) if err != nil { return nil, types.ContainerPathStat{}, err } if response.statusCode != http.StatusOK { return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) } // In order to get the copy behavior right, we need to know information // about both the source and the destination. The response headers include // stat info about the source that we can use in deciding exactly how to // copy it locally. Along with the stat info about the local destination, // we have everything we need to handle the multiple possibilities there // can be when copying a file/dir from one location to another file/dir. stat, err := getContainerPathStatFromHeader(response.header) if err != nil { return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) } return response.body, stat, err } func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { var stat types.ContainerPathStat encodedStat := header.Get("X-Docker-Container-Path-Stat") statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) err := json.NewDecoder(statDecoder).Decode(&stat) if err != nil { err = fmt.Errorf("unable to decode container path stat header: %s", err) } return stat, err } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/diff.go000066400000000000000000000011231267010174400251650ustar00rootroot00000000000000package client import ( "encoding/json" "net/url" "github.com/docker/engine-api/types" ) // ContainerDiff shows differences in a container filesystem since it was started. func (cli *Client) ContainerDiff(containerID string) ([]types.ContainerChange, error) { var changes []types.ContainerChange serverResp, err := cli.get("/containers/"+containerID+"/changes", url.Values{}, nil) if err != nil { return changes, err } defer ensureReaderClosed(serverResp) if err := json.NewDecoder(serverResp.body).Decode(&changes); err != nil { return changes, err } return changes, nil } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/errors.go000066400000000000000000000054671267010174400256100ustar00rootroot00000000000000package client import ( "errors" "fmt" ) // ErrConnectionFailed is a error raised when the connection between the client and the server failed. var ErrConnectionFailed = errors.New("Cannot connect to the Docker daemon. Is the docker daemon running on this host?") // imageNotFoundError implements an error returned when an image is not in the docker host. type imageNotFoundError struct { imageID string } // Error returns a string representation of an imageNotFoundError func (i imageNotFoundError) Error() string { return fmt.Sprintf("Error: No such image: %s", i.imageID) } // IsErrImageNotFound returns true if the error is caused // when an image is not found in the docker host. func IsErrImageNotFound(err error) bool { _, ok := err.(imageNotFoundError) return ok } // containerNotFoundError implements an error returned when a container is not in the docker host. type containerNotFoundError struct { containerID string } // Error returns a string representation of an containerNotFoundError func (e containerNotFoundError) Error() string { return fmt.Sprintf("Error: No such container: %s", e.containerID) } // IsErrContainerNotFound returns true if the error is caused // when a container is not found in the docker host. func IsErrContainerNotFound(err error) bool { _, ok := err.(containerNotFoundError) return ok } // networkNotFoundError implements an error returned when a network is not in the docker host. type networkNotFoundError struct { networkID string } // Error returns a string representation of an networkNotFoundError func (e networkNotFoundError) Error() string { return fmt.Sprintf("Error: No such network: %s", e.networkID) } // IsErrNetworkNotFound returns true if the error is caused // when a network is not found in the docker host. func IsErrNetworkNotFound(err error) bool { _, ok := err.(networkNotFoundError) return ok } // volumeNotFoundError implements an error returned when a volume is not in the docker host. type volumeNotFoundError struct { volumeID string } // Error returns a string representation of an networkNotFoundError func (e volumeNotFoundError) Error() string { return fmt.Sprintf("Error: No such volume: %s", e.volumeID) } // IsErrVolumeNotFound returns true if the error is caused // when a volume is not found in the docker host. func IsErrVolumeNotFound(err error) bool { _, ok := err.(networkNotFoundError) return ok } // unauthorizedError represents an authorization error in a remote registry. type unauthorizedError struct { cause error } // Error returns a string representation of an unauthorizedError func (u unauthorizedError) Error() string { return u.cause.Error() } // IsErrUnauthorized returns true if the error is caused // when an the remote registry authentication fails func IsErrUnauthorized(err error) bool { _, ok := err.(unauthorizedError) return ok } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/events.go000066400000000000000000000020001267010174400255540ustar00rootroot00000000000000package client import ( "io" "net/url" "time" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/filters" timetypes "github.com/docker/engine-api/types/time" ) // Events returns a stream of events in the daemon in a ReadCloser. // It's up to the caller to close the stream. func (cli *Client) Events(options types.EventsOptions) (io.ReadCloser, error) { query := url.Values{} ref := time.Now() if options.Since != "" { ts, err := timetypes.GetTimestamp(options.Since, ref) if err != nil { return nil, err } query.Set("since", ts) } if options.Until != "" { ts, err := timetypes.GetTimestamp(options.Until, ref) if err != nil { return nil, err } query.Set("until", ts) } if options.Filters.Len() > 0 { filterJSON, err := filters.ToParam(options.Filters) if err != nil { return nil, err } query.Set("filters", filterJSON) } serverResponse, err := cli.get("/events", query, nil) if err != nil { return nil, err } return serverResponse.body, nil } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/exec.go000066400000000000000000000034241267010174400252070ustar00rootroot00000000000000package client import ( "encoding/json" "github.com/docker/engine-api/types" ) // ContainerExecCreate creates a new exec configuration to run an exec process. func (cli *Client) ContainerExecCreate(config types.ExecConfig) (types.ContainerExecCreateResponse, error) { var response types.ContainerExecCreateResponse resp, err := cli.post("/containers/"+config.Container+"/exec", nil, config, nil) if err != nil { return response, err } defer ensureReaderClosed(resp) err = json.NewDecoder(resp.body).Decode(&response) return response, err } // ContainerExecStart starts an exec process already create in the docker host. func (cli *Client) ContainerExecStart(execID string, config types.ExecStartCheck) error { resp, err := cli.post("/exec/"+execID+"/start", nil, config, nil) ensureReaderClosed(resp) return err } // ContainerExecAttach attaches a connection to an exec process in the server. // It returns a types.HijackedConnection with the hijacked connection // and the a reader to get output. It's up to the called to close // the hijacked connection by calling types.HijackedResponse.Close. func (cli *Client) ContainerExecAttach(execID string, config types.ExecConfig) (types.HijackedResponse, error) { headers := map[string][]string{"Content-Type": {"application/json"}} return cli.postHijacked("/exec/"+execID+"/start", nil, config, headers) } // ContainerExecInspect returns information about a specific exec process on the docker host. func (cli *Client) ContainerExecInspect(execID string) (types.ContainerExecInspect, error) { var response types.ContainerExecInspect resp, err := cli.get("/exec/"+execID+"/json", nil, nil) if err != nil { return response, err } defer ensureReaderClosed(resp) err = json.NewDecoder(resp.body).Decode(&response) return response, err } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/export.go000066400000000000000000000006531267010174400256050ustar00rootroot00000000000000package client import ( "io" "net/url" ) // ContainerExport retrieves the raw contents of a container // and returns them as a io.ReadCloser. It's up to the caller // to close the stream. func (cli *Client) ContainerExport(containerID string) (io.ReadCloser, error) { serverResp, err := cli.get("/containers/"+containerID+"/export", url.Values{}, nil) if err != nil { return nil, err } return serverResp.body, nil } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/hijack.go000066400000000000000000000113531267010174400255140ustar00rootroot00000000000000package client import ( "crypto/tls" "errors" "fmt" "net" "net/http/httputil" "net/url" "strings" "time" "github.com/docker/engine-api/types" ) // tlsClientCon holds tls information and a dialed connection. type tlsClientCon struct { *tls.Conn rawConn net.Conn } func (c *tlsClientCon) CloseWrite() error { // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it // on its underlying connection. if conn, ok := c.rawConn.(types.CloseWriter); ok { return conn.CloseWrite() } return nil } // postHijacked sends a POST request and hijacks the connection. func (cli *Client) postHijacked(path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { bodyEncoded, err := encodeData(body) if err != nil { return types.HijackedResponse{}, err } req, err := cli.newRequest("POST", path, query, bodyEncoded, headers) if err != nil { return types.HijackedResponse{}, err } req.Host = cli.addr req.Header.Set("Connection", "Upgrade") req.Header.Set("Upgrade", "tcp") conn, err := dial(cli.proto, cli.addr, cli.tlsConfig) if err != nil { if strings.Contains(err.Error(), "connection refused") { return types.HijackedResponse{}, fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") } return types.HijackedResponse{}, err } // When we set up a TCP connection for hijack, there could be long periods // of inactivity (a long running command with no output) that in certain // network setups may cause ECONNTIMEOUT, leaving the client in an unknown // state. Setting TCP KeepAlive on the socket connection will prohibit // ECONNTIMEOUT unless the socket connection truly is broken if tcpConn, ok := conn.(*net.TCPConn); ok { tcpConn.SetKeepAlive(true) tcpConn.SetKeepAlivePeriod(30 * time.Second) } clientconn := httputil.NewClientConn(conn, nil) defer clientconn.Close() // Server hijacks the connection, error 'connection closed' expected clientconn.Do(req) rwc, br := clientconn.Hijack() return types.HijackedResponse{Conn: rwc, Reader: br}, nil } func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { return tlsDialWithDialer(new(net.Dialer), network, addr, config) } // We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in // order to return our custom tlsClientCon struct which holds both the tls.Conn // object _and_ its underlying raw connection. The rationale for this is that // we need to be able to close the write end of the connection when attaching, // which tls.Conn does not provide. func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { // We want the Timeout and Deadline values from dialer to cover the // whole process: TCP connection and TLS handshake. This means that we // also need to start our own timers now. timeout := dialer.Timeout if !dialer.Deadline.IsZero() { deadlineTimeout := dialer.Deadline.Sub(time.Now()) if timeout == 0 || deadlineTimeout < timeout { timeout = deadlineTimeout } } var errChannel chan error if timeout != 0 { errChannel = make(chan error, 2) time.AfterFunc(timeout, func() { errChannel <- errors.New("") }) } rawConn, err := dialer.Dial(network, addr) if err != nil { return nil, err } // When we set up a TCP connection for hijack, there could be long periods // of inactivity (a long running command with no output) that in certain // network setups may cause ECONNTIMEOUT, leaving the client in an unknown // state. Setting TCP KeepAlive on the socket connection will prohibit // ECONNTIMEOUT unless the socket connection truly is broken if tcpConn, ok := rawConn.(*net.TCPConn); ok { tcpConn.SetKeepAlive(true) tcpConn.SetKeepAlivePeriod(30 * time.Second) } colonPos := strings.LastIndex(addr, ":") if colonPos == -1 { colonPos = len(addr) } hostname := addr[:colonPos] // If no ServerName is set, infer the ServerName // from the hostname we're connecting to. if config.ServerName == "" { // Make a copy to avoid polluting argument or default. c := *config c.ServerName = hostname config = &c } conn := tls.Client(rawConn, config) if timeout == 0 { err = conn.Handshake() } else { go func() { errChannel <- conn.Handshake() }() err = <-errChannel } if err != nil { rawConn.Close() return nil, err } // This is Docker difference with standard's crypto/tls package: returned a // wrapper which holds both the TLS and raw connections. return &tlsClientCon{conn, rawConn}, nil } func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { if tlsConfig != nil && proto != "unix" { // Notice this isn't Go standard's tls.Dial function return tlsDial(proto, addr, tlsConfig) } return net.Dial(proto, addr) } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/history.go000066400000000000000000000010561267010174400257630ustar00rootroot00000000000000package client import ( "encoding/json" "net/url" "github.com/docker/engine-api/types" ) // ImageHistory returns the changes in an image in history format. func (cli *Client) ImageHistory(imageID string) ([]types.ImageHistory, error) { var history []types.ImageHistory serverResp, err := cli.get("/images/"+imageID+"/history", url.Values{}, nil) if err != nil { return history, err } defer ensureReaderClosed(serverResp) if err := json.NewDecoder(serverResp.body).Decode(&history); err != nil { return history, err } return history, nil } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/image_build.go000066400000000000000000000062661267010174400265330ustar00rootroot00000000000000package client import ( "encoding/base64" "encoding/json" "net/http" "net/url" "regexp" "strconv" "strings" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/container" ) var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) // ImageBuild sends request to the daemon to build images. // The Body in the response implement an io.ReadCloser and it's up to the caller to // close it. func (cli *Client) ImageBuild(options types.ImageBuildOptions) (types.ImageBuildResponse, error) { query, err := imageBuildOptionsToQuery(options) if err != nil { return types.ImageBuildResponse{}, err } headers := http.Header(make(map[string][]string)) buf, err := json.Marshal(options.AuthConfigs) if err != nil { return types.ImageBuildResponse{}, err } headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) headers.Set("Content-Type", "application/tar") serverResp, err := cli.postRaw("/build", query, options.Context, headers) if err != nil { return types.ImageBuildResponse{}, err } osType := getDockerOS(serverResp.header.Get("Server")) return types.ImageBuildResponse{ Body: serverResp.body, OSType: osType, }, nil } func imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { query := url.Values{ "t": options.Tags, } if options.SuppressOutput { query.Set("q", "1") } if options.RemoteContext != "" { query.Set("remote", options.RemoteContext) } if options.NoCache { query.Set("nocache", "1") } if options.Remove { query.Set("rm", "1") } else { query.Set("rm", "0") } if options.ForceRemove { query.Set("forcerm", "1") } if options.PullParent { query.Set("pull", "1") } if !container.IsolationLevel.IsDefault(options.IsolationLevel) { query.Set("isolation", string(options.IsolationLevel)) } query.Set("cpusetcpus", options.CPUSetCPUs) query.Set("cpusetmems", options.CPUSetMems) query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) query.Set("memory", strconv.FormatInt(options.Memory, 10)) query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) query.Set("cgroupparent", options.CgroupParent) query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) query.Set("dockerfile", options.Dockerfile) ulimitsJSON, err := json.Marshal(options.Ulimits) if err != nil { return query, err } query.Set("ulimits", string(ulimitsJSON)) buildArgsJSON, err := json.Marshal(options.BuildArgs) if err != nil { return query, err } query.Set("buildargs", string(buildArgsJSON)) return query, nil } func getDockerOS(serverHeader string) string { var osType string matches := headerRegexp.FindStringSubmatch(serverHeader) if len(matches) > 0 { osType = matches[1] } return osType } // convertKVStringsToMap converts ["key=value"] to {"key":"value"} func convertKVStringsToMap(values []string) map[string]string { result := make(map[string]string, len(values)) for _, value := range values { kv := strings.SplitN(value, "=", 2) if len(kv) == 1 { result[kv[0]] = "" } else { result[kv[0]] = kv[1] } } return result } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/image_create.go000066400000000000000000000013401267010174400266630ustar00rootroot00000000000000package client import ( "io" "net/url" "github.com/docker/engine-api/types" ) // ImageCreate creates a new image based in the parent options. // It returns the JSON content in the response body. func (cli *Client) ImageCreate(options types.ImageCreateOptions) (io.ReadCloser, error) { query := url.Values{} query.Set("fromImage", options.Parent) query.Set("tag", options.Tag) resp, err := cli.tryImageCreate(query, options.RegistryAuth) if err != nil { return nil, err } return resp.body, nil } func (cli *Client) tryImageCreate(query url.Values, registryAuth string) (*serverResponse, error) { headers := map[string][]string{"X-Registry-Auth": {registryAuth}} return cli.post("/images/create", query, nil, headers) } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/image_import.go000066400000000000000000000012511267010174400267330ustar00rootroot00000000000000package client import ( "io" "net/url" "github.com/docker/engine-api/types" ) // ImageImport creates a new image based in the source options. // It returns the JSON content in the response body. func (cli *Client) ImageImport(options types.ImageImportOptions) (io.ReadCloser, error) { query := url.Values{} query.Set("fromSrc", options.SourceName) query.Set("repo", options.RepositoryName) query.Set("tag", options.Tag) query.Set("message", options.Message) for _, change := range options.Changes { query.Add("changes", change) } resp, err := cli.postRaw("/images/create", query, options.Source, nil) if err != nil { return nil, err } return resp.body, nil } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/image_inspect.go000066400000000000000000000016251267010174400270730ustar00rootroot00000000000000package client import ( "bytes" "encoding/json" "io/ioutil" "net/http" "net/url" "github.com/docker/engine-api/types" ) // ImageInspectWithRaw returns the image information and it's raw representation. func (cli *Client) ImageInspectWithRaw(imageID string, getSize bool) (types.ImageInspect, []byte, error) { query := url.Values{} if getSize { query.Set("size", "1") } serverResp, err := cli.get("/images/"+imageID+"/json", query, nil) if err != nil { if serverResp.statusCode == http.StatusNotFound { return types.ImageInspect{}, nil, imageNotFoundError{imageID} } return types.ImageInspect{}, nil, err } defer ensureReaderClosed(serverResp) body, err := ioutil.ReadAll(serverResp.body) if err != nil { return types.ImageInspect{}, nil, err } var response types.ImageInspect rdr := bytes.NewReader(body) err = json.NewDecoder(rdr).Decode(&response) return response, body, err } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/image_list.go000066400000000000000000000016201267010174400263740ustar00rootroot00000000000000package client import ( "encoding/json" "net/url" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/filters" ) // ImageList returns a list of images in the docker host. func (cli *Client) ImageList(options types.ImageListOptions) ([]types.Image, error) { var images []types.Image query := url.Values{} if options.Filters.Len() > 0 { filterJSON, err := filters.ToParam(options.Filters) if err != nil { return images, err } query.Set("filters", filterJSON) } if options.MatchName != "" { // FIXME rename this parameter, to not be confused with the filters flag query.Set("filter", options.MatchName) } if options.All { query.Set("all", "1") } serverResp, err := cli.get("/images/json", query, nil) if err != nil { return images, err } defer ensureReaderClosed(serverResp) err = json.NewDecoder(serverResp.body).Decode(&images) return images, err } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/image_load.go000066400000000000000000000010711267010174400263400ustar00rootroot00000000000000package client import ( "io" "net/url" "github.com/docker/engine-api/types" ) // ImageLoad loads an image in the docker host from the client host. // It's up to the caller to close the io.ReadCloser returned by // this function. func (cli *Client) ImageLoad(input io.Reader) (types.ImageLoadResponse, error) { resp, err := cli.postRaw("/images/load", url.Values{}, input, nil) if err != nil { return types.ImageLoadResponse{}, err } return types.ImageLoadResponse{ Body: resp.body, JSON: resp.header.Get("Content-Type") == "application/json", }, nil } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/image_pull.go000066400000000000000000000016531267010174400264030ustar00rootroot00000000000000package client import ( "io" "net/http" "net/url" "github.com/docker/engine-api/types" ) // ImagePull request the docker host to pull an image from a remote registry. // It executes the privileged function if the operation is unauthorized // and it tries one more time. // It's up to the caller to handle the io.ReadCloser and close it properly. func (cli *Client) ImagePull(options types.ImagePullOptions, privilegeFunc RequestPrivilegeFunc) (io.ReadCloser, error) { query := url.Values{} query.Set("fromImage", options.ImageID) if options.Tag != "" { query.Set("tag", options.Tag) } resp, err := cli.tryImageCreate(query, options.RegistryAuth) if resp.statusCode == http.StatusUnauthorized { newAuthHeader, privilegeErr := privilegeFunc() if privilegeErr != nil { return nil, privilegeErr } resp, err = cli.tryImageCreate(query, newAuthHeader) } if err != nil { return nil, err } return resp.body, nil } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/image_push.go000066400000000000000000000021741267010174400264050ustar00rootroot00000000000000package client import ( "io" "net/http" "net/url" "github.com/docker/engine-api/types" ) // ImagePush request the docker host to push an image to a remote registry. // It executes the privileged function if the operation is unauthorized // and it tries one more time. // It's up to the caller to handle the io.ReadCloser and close it properly. func (cli *Client) ImagePush(options types.ImagePushOptions, privilegeFunc RequestPrivilegeFunc) (io.ReadCloser, error) { query := url.Values{} query.Set("tag", options.Tag) resp, err := cli.tryImagePush(options.ImageID, query, options.RegistryAuth) if resp.statusCode == http.StatusUnauthorized { newAuthHeader, privilegeErr := privilegeFunc() if privilegeErr != nil { return nil, privilegeErr } resp, err = cli.tryImagePush(options.ImageID, query, newAuthHeader) } if err != nil { return nil, err } return resp.body, nil } func (cli *Client) tryImagePush(imageID string, query url.Values, registryAuth string) (*serverResponse, error) { headers := map[string][]string{"X-Registry-Auth": {registryAuth}} return cli.post("/images/"+imageID+"/push", query, nil, headers) } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/image_remove.go000066400000000000000000000011451267010174400267200ustar00rootroot00000000000000package client import ( "encoding/json" "net/url" "github.com/docker/engine-api/types" ) // ImageRemove removes an image from the docker host. func (cli *Client) ImageRemove(options types.ImageRemoveOptions) ([]types.ImageDelete, error) { query := url.Values{} if options.Force { query.Set("force", "1") } if !options.PruneChildren { query.Set("noprune", "1") } resp, err := cli.delete("/images/"+options.ImageID, query, nil) if err != nil { return nil, err } defer ensureReaderClosed(resp) var dels []types.ImageDelete err = json.NewDecoder(resp.body).Decode(&dels) return dels, err } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/image_save.go000066400000000000000000000006531267010174400263640ustar00rootroot00000000000000package client import ( "io" "net/url" ) // ImageSave retrieves one or more images from the docker host as a io.ReadCloser. // It's up to the caller to store the images and close the stream. func (cli *Client) ImageSave(imageIDs []string) (io.ReadCloser, error) { query := url.Values{ "names": imageIDs, } resp, err := cli.get("/images/get", query, nil) if err != nil { return nil, err } return resp.body, nil } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/image_search.go000066400000000000000000000022161267010174400266700ustar00rootroot00000000000000package client import ( "encoding/json" "net/http" "net/url" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/registry" ) // ImageSearch makes the docker host to search by a term in a remote registry. // The list of results is not sorted in any fashion. func (cli *Client) ImageSearch(options types.ImageSearchOptions, privilegeFunc RequestPrivilegeFunc) ([]registry.SearchResult, error) { var results []registry.SearchResult query := url.Values{} query.Set("term", options.Term) resp, err := cli.tryImageSearch(query, options.RegistryAuth) if resp.statusCode == http.StatusUnauthorized { newAuthHeader, privilegeErr := privilegeFunc() if privilegeErr != nil { return results, privilegeErr } resp, err = cli.tryImageSearch(query, newAuthHeader) } if err != nil { return results, err } defer ensureReaderClosed(resp) err = json.NewDecoder(resp.body).Decode(&results) return results, err } func (cli *Client) tryImageSearch(query url.Values, registryAuth string) (*serverResponse, error) { headers := map[string][]string{"X-Registry-Auth": {registryAuth}} return cli.get("/images/search", query, headers) } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/image_tag.go000066400000000000000000000007041267010174400261760ustar00rootroot00000000000000package client import ( "net/url" "github.com/docker/engine-api/types" ) // ImageTag tags an image in the docker host func (cli *Client) ImageTag(options types.ImageTagOptions) error { query := url.Values{} query.Set("repo", options.RepositoryName) query.Set("tag", options.Tag) if options.Force { query.Set("force", "1") } resp, err := cli.post("/images/"+options.ImageID+"/tag", query, nil, nil) ensureReaderClosed(resp) return err } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/info.go000066400000000000000000000010061267010174400252100ustar00rootroot00000000000000package client import ( "encoding/json" "fmt" "net/url" "github.com/docker/engine-api/types" ) // Info returns information about the docker server. func (cli *Client) Info() (types.Info, error) { var info types.Info serverResp, err := cli.get("/info", url.Values{}, nil) if err != nil { return info, err } defer ensureReaderClosed(serverResp) if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { return info, fmt.Errorf("Error reading remote info: %v", err) } return info, nil } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/interface.go000066400000000000000000000106111267010174400262170ustar00rootroot00000000000000package client import ( "io" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/container" "github.com/docker/engine-api/types/filters" "github.com/docker/engine-api/types/network" "github.com/docker/engine-api/types/registry" ) // APIClient is an interface that clients that talk with a docker server must implement. type APIClient interface { ClientVersion() string ContainerAttach(options types.ContainerAttachOptions) (types.HijackedResponse, error) ContainerCommit(options types.ContainerCommitOptions) (types.ContainerCommitResponse, error) ContainerCreate(config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error) ContainerDiff(containerID string) ([]types.ContainerChange, error) ContainerExecAttach(execID string, config types.ExecConfig) (types.HijackedResponse, error) ContainerExecCreate(config types.ExecConfig) (types.ContainerExecCreateResponse, error) ContainerExecInspect(execID string) (types.ContainerExecInspect, error) ContainerExecResize(options types.ResizeOptions) error ContainerExecStart(execID string, config types.ExecStartCheck) error ContainerExport(containerID string) (io.ReadCloser, error) ContainerInspect(containerID string) (types.ContainerJSON, error) ContainerInspectWithRaw(containerID string, getSize bool) (types.ContainerJSON, []byte, error) ContainerKill(containerID, signal string) error ContainerList(options types.ContainerListOptions) ([]types.Container, error) ContainerLogs(options types.ContainerLogsOptions) (io.ReadCloser, error) ContainerPause(containerID string) error ContainerRemove(options types.ContainerRemoveOptions) error ContainerRename(containerID, newContainerName string) error ContainerResize(options types.ResizeOptions) error ContainerRestart(containerID string, timeout int) error ContainerStatPath(containerID, path string) (types.ContainerPathStat, error) ContainerStats(containerID string, stream bool) (io.ReadCloser, error) ContainerStart(containerID string) error ContainerStop(containerID string, timeout int) error ContainerTop(containerID string, arguments []string) (types.ContainerProcessList, error) ContainerUnpause(containerID string) error ContainerUpdate(containerID string, updateConfig container.UpdateConfig) error ContainerWait(containerID string) (int, error) CopyFromContainer(containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) CopyToContainer(options types.CopyToContainerOptions) error Events(options types.EventsOptions) (io.ReadCloser, error) ImageBuild(options types.ImageBuildOptions) (types.ImageBuildResponse, error) ImageCreate(options types.ImageCreateOptions) (io.ReadCloser, error) ImageHistory(imageID string) ([]types.ImageHistory, error) ImageImport(options types.ImageImportOptions) (io.ReadCloser, error) ImageInspectWithRaw(imageID string, getSize bool) (types.ImageInspect, []byte, error) ImageList(options types.ImageListOptions) ([]types.Image, error) ImageLoad(input io.Reader) (types.ImageLoadResponse, error) ImagePull(options types.ImagePullOptions, privilegeFunc RequestPrivilegeFunc) (io.ReadCloser, error) ImagePush(options types.ImagePushOptions, privilegeFunc RequestPrivilegeFunc) (io.ReadCloser, error) ImageRemove(options types.ImageRemoveOptions) ([]types.ImageDelete, error) ImageSearch(options types.ImageSearchOptions, privilegeFunc RequestPrivilegeFunc) ([]registry.SearchResult, error) ImageSave(imageIDs []string) (io.ReadCloser, error) ImageTag(options types.ImageTagOptions) error Info() (types.Info, error) NetworkConnect(networkID, containerID string, config *network.EndpointSettings) error NetworkCreate(options types.NetworkCreate) (types.NetworkCreateResponse, error) NetworkDisconnect(networkID, containerID string, force bool) error NetworkInspect(networkID string) (types.NetworkResource, error) NetworkList(options types.NetworkListOptions) ([]types.NetworkResource, error) NetworkRemove(networkID string) error RegistryLogin(auth types.AuthConfig) (types.AuthResponse, error) ServerVersion() (types.Version, error) VolumeCreate(options types.VolumeCreateRequest) (types.Volume, error) VolumeInspect(volumeID string) (types.Volume, error) VolumeList(filter filters.Args) (types.VolumesListResponse, error) VolumeRemove(volumeID string) error } // Ensure that Client always implements APIClient. var _ APIClient = &Client{} docker-1.10.3/vendor/src/github.com/docker/engine-api/client/kill.go000066400000000000000000000005721267010174400252170ustar00rootroot00000000000000package client import "net/url" // ContainerKill terminates the container process but does not remove the container from the docker host. func (cli *Client) ContainerKill(containerID, signal string) error { query := url.Values{} query.Set("signal", signal) resp, err := cli.post("/containers/"+containerID+"/kill", query, nil, nil) ensureReaderClosed(resp) return err } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/login.go000066400000000000000000000013071267010174400253710ustar00rootroot00000000000000package client import ( "encoding/json" "net/http" "net/url" "github.com/docker/engine-api/types" ) // RegistryLogin authenticates the docker server with a given docker registry. // It returns UnauthorizerError when the authentication fails. func (cli *Client) RegistryLogin(auth types.AuthConfig) (types.AuthResponse, error) { resp, err := cli.post("/auth", url.Values{}, auth, nil) if resp != nil && resp.statusCode == http.StatusUnauthorized { return types.AuthResponse{}, unauthorizedError{err} } if err != nil { return types.AuthResponse{}, err } defer ensureReaderClosed(resp) var response types.AuthResponse err = json.NewDecoder(resp.body).Decode(&response) return response, err } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/logs.go000066400000000000000000000016611267010174400252300ustar00rootroot00000000000000package client import ( "io" "net/url" "time" "github.com/docker/engine-api/types" timetypes "github.com/docker/engine-api/types/time" ) // ContainerLogs returns the logs generated by a container in an io.ReadCloser. // It's up to the caller to close the stream. func (cli *Client) ContainerLogs(options types.ContainerLogsOptions) (io.ReadCloser, error) { query := url.Values{} if options.ShowStdout { query.Set("stdout", "1") } if options.ShowStderr { query.Set("stderr", "1") } if options.Since != "" { ts, err := timetypes.GetTimestamp(options.Since, time.Now()) if err != nil { return nil, err } query.Set("since", ts) } if options.Timestamps { query.Set("timestamps", "1") } if options.Follow { query.Set("follow", "1") } query.Set("tail", options.Tail) resp, err := cli.get("/containers/"+options.ContainerID+"/logs", query, nil) if err != nil { return nil, err } return resp.body, nil } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/network.go000066400000000000000000000053541267010174400257600ustar00rootroot00000000000000package client import ( "encoding/json" "net/http" "net/url" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/filters" "github.com/docker/engine-api/types/network" ) // NetworkCreate creates a new network in the docker host. func (cli *Client) NetworkCreate(options types.NetworkCreate) (types.NetworkCreateResponse, error) { var response types.NetworkCreateResponse serverResp, err := cli.post("/networks/create", nil, options, nil) if err != nil { return response, err } json.NewDecoder(serverResp.body).Decode(&response) ensureReaderClosed(serverResp) return response, err } // NetworkRemove removes an existent network from the docker host. func (cli *Client) NetworkRemove(networkID string) error { resp, err := cli.delete("/networks/"+networkID, nil, nil) ensureReaderClosed(resp) return err } // NetworkConnect connects a container to an existent network in the docker host. func (cli *Client) NetworkConnect(networkID, containerID string, config *network.EndpointSettings) error { nc := types.NetworkConnect{ Container: containerID, EndpointConfig: config, } resp, err := cli.post("/networks/"+networkID+"/connect", nil, nc, nil) ensureReaderClosed(resp) return err } // NetworkDisconnect disconnects a container from an existent network in the docker host. func (cli *Client) NetworkDisconnect(networkID, containerID string, force bool) error { nd := types.NetworkDisconnect{Container: containerID, Force: force} resp, err := cli.post("/networks/"+networkID+"/disconnect", nil, nd, nil) ensureReaderClosed(resp) return err } // NetworkList returns the list of networks configured in the docker host. func (cli *Client) NetworkList(options types.NetworkListOptions) ([]types.NetworkResource, error) { query := url.Values{} if options.Filters.Len() > 0 { filterJSON, err := filters.ToParam(options.Filters) if err != nil { return nil, err } query.Set("filters", filterJSON) } var networkResources []types.NetworkResource resp, err := cli.get("/networks", query, nil) if err != nil { return networkResources, err } defer ensureReaderClosed(resp) err = json.NewDecoder(resp.body).Decode(&networkResources) return networkResources, err } // NetworkInspect returns the information for a specific network configured in the docker host. func (cli *Client) NetworkInspect(networkID string) (types.NetworkResource, error) { var networkResource types.NetworkResource resp, err := cli.get("/networks/"+networkID, nil, nil) if err != nil { if resp.statusCode == http.StatusNotFound { return networkResource, networkNotFoundError{networkID} } return networkResource, err } defer ensureReaderClosed(resp) err = json.NewDecoder(resp.body).Decode(&networkResource) return networkResource, err } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/pause.go000066400000000000000000000004301267010174400253720ustar00rootroot00000000000000package client // ContainerPause pauses the main process of a given container without terminating it. func (cli *Client) ContainerPause(containerID string) error { resp, err := cli.post("/containers/"+containerID+"/pause", nil, nil, nil) ensureReaderClosed(resp) return err } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/privileged.go000066400000000000000000000005201267010174400264070ustar00rootroot00000000000000package client // RequestPrivilegeFunc is a function interface that // clients can supply to retry operations after // getting an authorization error. // This function returns the registry authentication // header value in base 64 format, or an error // if the privilege request fails. type RequestPrivilegeFunc func() (string, error) docker-1.10.3/vendor/src/github.com/docker/engine-api/client/request.go000066400000000000000000000127251267010174400257570ustar00rootroot00000000000000package client import ( "bytes" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/url" "strings" ) // serverResponse is a wrapper for http API responses. type serverResponse struct { body io.ReadCloser header http.Header statusCode int } // head sends an http request to the docker API using the method HEAD. func (cli *Client) head(path string, query url.Values, headers map[string][]string) (*serverResponse, error) { return cli.sendRequest("HEAD", path, query, nil, headers) } // get sends an http request to the docker API using the method GET. func (cli *Client) get(path string, query url.Values, headers map[string][]string) (*serverResponse, error) { return cli.sendRequest("GET", path, query, nil, headers) } // post sends an http request to the docker API using the method POST. func (cli *Client) post(path string, query url.Values, body interface{}, headers map[string][]string) (*serverResponse, error) { return cli.sendRequest("POST", path, query, body, headers) } // postRaw sends the raw input to the docker API using the method POST. func (cli *Client) postRaw(path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) { return cli.sendClientRequest("POST", path, query, body, headers) } // put sends an http request to the docker API using the method PUT. func (cli *Client) put(path string, query url.Values, body interface{}, headers map[string][]string) (*serverResponse, error) { return cli.sendRequest("PUT", path, query, body, headers) } // putRaw sends the raw input to the docker API using the method PUT. func (cli *Client) putRaw(path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) { return cli.sendClientRequest("PUT", path, query, body, headers) } // delete sends an http request to the docker API using the method DELETE. func (cli *Client) delete(path string, query url.Values, headers map[string][]string) (*serverResponse, error) { return cli.sendRequest("DELETE", path, query, nil, headers) } func (cli *Client) sendRequest(method, path string, query url.Values, body interface{}, headers map[string][]string) (*serverResponse, error) { params, err := encodeData(body) if err != nil { return nil, err } if body != nil { if headers == nil { headers = make(map[string][]string) } headers["Content-Type"] = []string{"application/json"} } return cli.sendClientRequest(method, path, query, params, headers) } func (cli *Client) sendClientRequest(method, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) { serverResp := &serverResponse{ body: nil, statusCode: -1, } expectedPayload := (method == "POST" || method == "PUT") if expectedPayload && body == nil { body = bytes.NewReader([]byte{}) } req, err := cli.newRequest(method, path, query, body, headers) req.URL.Host = cli.addr req.URL.Scheme = cli.scheme if expectedPayload && req.Header.Get("Content-Type") == "" { req.Header.Set("Content-Type", "text/plain") } resp, err := cli.httpClient.Do(req) if resp != nil { serverResp.statusCode = resp.StatusCode } if err != nil { if isTimeout(err) || strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { return serverResp, ErrConnectionFailed } if cli.scheme == "http" && strings.Contains(err.Error(), "malformed HTTP response") { return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) } if cli.scheme == "https" && strings.Contains(err.Error(), "remote error: bad certificate") { return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err) } return serverResp, fmt.Errorf("An error occurred trying to connect: %v", err) } if serverResp.statusCode < 200 || serverResp.statusCode >= 400 { body, err := ioutil.ReadAll(resp.Body) if err != nil { return serverResp, err } if len(body) == 0 { return serverResp, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), req.URL) } return serverResp, fmt.Errorf("Error response from daemon: %s", bytes.TrimSpace(body)) } serverResp.body = resp.Body serverResp.header = resp.Header return serverResp, nil } func (cli *Client) newRequest(method, path string, query url.Values, body io.Reader, headers map[string][]string) (*http.Request, error) { apiPath := cli.getAPIPath(path, query) req, err := http.NewRequest(method, apiPath, body) if err != nil { return nil, err } // Add CLI Config's HTTP Headers BEFORE we set the Docker headers // then the user can't change OUR headers for k, v := range cli.customHTTPHeaders { req.Header.Set(k, v) } if headers != nil { for k, v := range headers { req.Header[k] = v } } return req, nil } func encodeData(data interface{}) (*bytes.Buffer, error) { params := bytes.NewBuffer(nil) if data != nil { if err := json.NewEncoder(params).Encode(data); err != nil { return nil, err } } return params, nil } func ensureReaderClosed(response *serverResponse) { if response != nil && response.body != nil { response.body.Close() } } func isTimeout(err error) bool { type timeout interface { Timeout() bool } e := err switch urlErr := err.(type) { case *url.Error: e = urlErr.Err } t, ok := e.(timeout) return ok && t.Timeout() } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/resize.go000066400000000000000000000014671267010174400255710ustar00rootroot00000000000000package client import ( "net/url" "strconv" "github.com/docker/engine-api/types" ) // ContainerResize changes the size of the tty for a container. func (cli *Client) ContainerResize(options types.ResizeOptions) error { return cli.resize("/containers/"+options.ID, options.Height, options.Width) } // ContainerExecResize changes the size of the tty for an exec process running inside a container. func (cli *Client) ContainerExecResize(options types.ResizeOptions) error { return cli.resize("/exec/"+options.ID, options.Height, options.Width) } func (cli *Client) resize(basePath string, height, width int) error { query := url.Values{} query.Set("h", strconv.Itoa(height)) query.Set("w", strconv.Itoa(width)) resp, err := cli.post(basePath+"/resize", query, nil, nil) ensureReaderClosed(resp) return err } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/version.go000066400000000000000000000006741267010174400257540ustar00rootroot00000000000000package client import ( "encoding/json" "github.com/docker/engine-api/types" ) // ServerVersion returns information of the docker client and server host. func (cli *Client) ServerVersion() (types.Version, error) { resp, err := cli.get("/version", nil, nil) if err != nil { return types.Version{}, err } defer ensureReaderClosed(resp) var server types.Version err = json.NewDecoder(resp.body).Decode(&server) return server, err } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/volume.go000066400000000000000000000033671267010174400256000ustar00rootroot00000000000000package client import ( "encoding/json" "net/http" "net/url" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/filters" ) // VolumeList returns the volumes configured in the docker host. func (cli *Client) VolumeList(filter filters.Args) (types.VolumesListResponse, error) { var volumes types.VolumesListResponse query := url.Values{} if filter.Len() > 0 { filterJSON, err := filters.ToParam(filter) if err != nil { return volumes, err } query.Set("filters", filterJSON) } resp, err := cli.get("/volumes", query, nil) if err != nil { return volumes, err } defer ensureReaderClosed(resp) err = json.NewDecoder(resp.body).Decode(&volumes) return volumes, err } // VolumeInspect returns the information about a specific volume in the docker host. func (cli *Client) VolumeInspect(volumeID string) (types.Volume, error) { var volume types.Volume resp, err := cli.get("/volumes/"+volumeID, nil, nil) if err != nil { if resp.statusCode == http.StatusNotFound { return volume, volumeNotFoundError{volumeID} } return volume, err } defer ensureReaderClosed(resp) err = json.NewDecoder(resp.body).Decode(&volume) return volume, err } // VolumeCreate creates a volume in the docker host. func (cli *Client) VolumeCreate(options types.VolumeCreateRequest) (types.Volume, error) { var volume types.Volume resp, err := cli.post("/volumes/create", nil, options, nil) if err != nil { return volume, err } defer ensureReaderClosed(resp) err = json.NewDecoder(resp.body).Decode(&volume) return volume, err } // VolumeRemove removes a volume from the docker host. func (cli *Client) VolumeRemove(volumeID string) error { resp, err := cli.delete("/volumes/"+volumeID, nil, nil) ensureReaderClosed(resp) return err } docker-1.10.3/vendor/src/github.com/docker/engine-api/client/wait.go000066400000000000000000000010761267010174400252300ustar00rootroot00000000000000package client import ( "encoding/json" "github.com/docker/engine-api/types" ) // ContainerWait pauses execution util a container is exits. // It returns the API status code as response of its readiness. func (cli *Client) ContainerWait(containerID string) (int, error) { resp, err := cli.post("/containers/"+containerID+"/wait", nil, nil, nil) if err != nil { return -1, err } defer ensureReaderClosed(resp) var res types.ContainerWaitResponse if err := json.NewDecoder(resp.body).Decode(&res); err != nil { return -1, err } return res.StatusCode, nil } docker-1.10.3/vendor/src/github.com/docker/engine-api/types/000077500000000000000000000000001267010174400236175ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/engine-api/types/auth.go000066400000000000000000000006231267010174400251100ustar00rootroot00000000000000package types // AuthConfig contains authorization information for connecting to a Registry type AuthConfig struct { Username string `json:"username,omitempty"` Password string `json:"password,omitempty"` Auth string `json:"auth"` Email string `json:"email"` ServerAddress string `json:"serveraddress,omitempty"` RegistryToken string `json:"registrytoken,omitempty"` } docker-1.10.3/vendor/src/github.com/docker/engine-api/types/blkiodev/000077500000000000000000000000001267010174400254165ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/engine-api/types/blkiodev/blkio.go000066400000000000000000000007161267010174400270510ustar00rootroot00000000000000package blkiodev import "fmt" // WeightDevice is a structure that hold device:weight pair type WeightDevice struct { Path string Weight uint16 } func (w *WeightDevice) String() string { return fmt.Sprintf("%s:%d", w.Path, w.Weight) } // ThrottleDevice is a structure that hold device:rate_per_second pair type ThrottleDevice struct { Path string Rate uint64 } func (t *ThrottleDevice) String() string { return fmt.Sprintf("%s:%d", t.Path, t.Rate) } docker-1.10.3/vendor/src/github.com/docker/engine-api/types/client.go000066400000000000000000000140541267010174400254300ustar00rootroot00000000000000package types import ( "bufio" "io" "net" "github.com/docker/engine-api/types/container" "github.com/docker/engine-api/types/filters" "github.com/docker/go-units" ) // ContainerAttachOptions holds parameters to attach to a container. type ContainerAttachOptions struct { ContainerID string Stream bool Stdin bool Stdout bool Stderr bool DetachKeys string } // ContainerCommitOptions holds parameters to commit changes into a container. type ContainerCommitOptions struct { ContainerID string RepositoryName string Tag string Comment string Author string Changes []string Pause bool Config *container.Config } // ContainerExecInspect holds information returned by exec inspect. type ContainerExecInspect struct { ExecID string ContainerID string Running bool ExitCode int } // ContainerListOptions holds parameters to list containers with. type ContainerListOptions struct { Quiet bool Size bool All bool Latest bool Since string Before string Limit int Filter filters.Args } // ContainerLogsOptions holds parameters to filter logs with. type ContainerLogsOptions struct { ContainerID string ShowStdout bool ShowStderr bool Since string Timestamps bool Follow bool Tail string } // ContainerRemoveOptions holds parameters to remove containers. type ContainerRemoveOptions struct { ContainerID string RemoveVolumes bool RemoveLinks bool Force bool } // CopyToContainerOptions holds information // about files to copy into a container type CopyToContainerOptions struct { ContainerID string Path string Content io.Reader AllowOverwriteDirWithFile bool } // EventsOptions hold parameters to filter events with. type EventsOptions struct { Since string Until string Filters filters.Args } // NetworkListOptions holds parameters to filter the list of networks with. type NetworkListOptions struct { Filters filters.Args } // HijackedResponse holds connection information for a hijacked request. type HijackedResponse struct { Conn net.Conn Reader *bufio.Reader } // Close closes the hijacked connection and reader. func (h *HijackedResponse) Close() { h.Conn.Close() } // CloseWriter is an interface that implement structs // that close input streams to prevent from writing. type CloseWriter interface { CloseWrite() error } // CloseWrite closes a readWriter for writing. func (h *HijackedResponse) CloseWrite() error { if conn, ok := h.Conn.(CloseWriter); ok { return conn.CloseWrite() } return nil } // ImageBuildOptions holds the information // necessary to build images. type ImageBuildOptions struct { Tags []string SuppressOutput bool RemoteContext string NoCache bool Remove bool ForceRemove bool PullParent bool IsolationLevel container.IsolationLevel CPUSetCPUs string CPUSetMems string CPUShares int64 CPUQuota int64 CPUPeriod int64 Memory int64 MemorySwap int64 CgroupParent string ShmSize int64 Dockerfile string Ulimits []*units.Ulimit BuildArgs map[string]string AuthConfigs map[string]AuthConfig Context io.Reader } // ImageBuildResponse holds information // returned by a server after building // an image. type ImageBuildResponse struct { Body io.ReadCloser OSType string } // ImageCreateOptions holds information to create images. type ImageCreateOptions struct { Parent string // Parent is the name of the image to pull Tag string // Tag is the name to tag this image with RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry } // ImageImportOptions holds information to import images from the client host. type ImageImportOptions struct { Source io.Reader // Source is the data to send to the server to create this image from (mutually exclusive with SourceName) SourceName string // SourceName is the name of the image to pull (mutually exclusive with Source) RepositoryName string // RepositoryName is the name of the repository to import this image into Message string // Message is the message to tag the image with Tag string // Tag is the name to tag this image with Changes []string // Changes are the raw changes to apply to this image } // ImageListOptions holds parameters to filter the list of images with. type ImageListOptions struct { MatchName string All bool Filters filters.Args } // ImageLoadResponse returns information to the client about a load process. type ImageLoadResponse struct { Body io.ReadCloser JSON bool } // ImagePullOptions holds information to pull images. type ImagePullOptions struct { ImageID string // ImageID is the name of the image to pull Tag string // Tag is the name of the tag to be pulled RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry } //ImagePushOptions holds information to push images. type ImagePushOptions ImagePullOptions // ImageRemoveOptions holds parameters to remove images. type ImageRemoveOptions struct { ImageID string Force bool PruneChildren bool } // ImageSearchOptions holds parameters to search images with. type ImageSearchOptions struct { Term string RegistryAuth string } // ImageTagOptions holds parameters to tag an image type ImageTagOptions struct { ImageID string RepositoryName string Tag string Force bool } // ResizeOptions holds parameters to resize a tty. // It can be used to resize container ttys and // exec process ttys too. type ResizeOptions struct { ID string Height int Width int } // VersionResponse holds version information for the client and the server type VersionResponse struct { Client *Version Server *Version } // ServerOK return true when the client could connect to the docker server // and parse the information received. It returns false otherwise. func (v VersionResponse) ServerOK() bool { return v.Server != nil } docker-1.10.3/vendor/src/github.com/docker/engine-api/types/configs.go000066400000000000000000000035271267010174400256050ustar00rootroot00000000000000package types import ( "github.com/docker/engine-api/types/container" "github.com/docker/engine-api/types/network" ) // configs holds structs used for internal communication between the // frontend (such as an http server) and the backend (such as the // docker daemon). // ContainerCreateConfig is the parameter set to ContainerCreate() type ContainerCreateConfig struct { Name string Config *container.Config HostConfig *container.HostConfig NetworkingConfig *network.NetworkingConfig AdjustCPUShares bool } // ContainerRmConfig holds arguments for the container remove // operation. This struct is used to tell the backend what operations // to perform. type ContainerRmConfig struct { ForceRemove, RemoveVolume, RemoveLink bool } // ContainerCommitConfig contains build configs for commit operation, // and is used when making a commit with the current state of the container. type ContainerCommitConfig struct { Pause bool Repo string Tag string Author string Comment string // merge container config into commit config before commit MergeConfigs bool Config *container.Config } // ExecConfig is a small subset of the Config struct that hold the configuration // for the exec feature of docker. type ExecConfig struct { User string // User that will run the command Privileged bool // Is the container in privileged mode Tty bool // Attach standard streams to a tty. Container string // Name of the container (to execute in) AttachStdin bool // Attach the standard input, makes possible user interaction AttachStderr bool // Attach the standard output AttachStdout bool // Attach the standard error Detach bool // Execute in detach mode DetachKeys string // Escape keys for detach Cmd []string // Execution commands and args } docker-1.10.3/vendor/src/github.com/docker/engine-api/types/container/000077500000000000000000000000001267010174400256015ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/engine-api/types/container/config.go000066400000000000000000000050301267010174400273730ustar00rootroot00000000000000package container import ( "github.com/docker/engine-api/types/strslice" "github.com/docker/go-connections/nat" ) // Config contains the configuration data about a container. // It should hold only portable information about the container. // Here, "portable" means "independent from the host we are running on". // Non-portable information *should* appear in HostConfig. // All fields added to this struct must be marked `omitempty` to keep getting // predictable hashes from the old `v1Compatibility` configuration. type Config struct { Hostname string // Hostname Domainname string // Domainname User string // User that will run the command(s) inside the container AttachStdin bool // Attach the standard input, makes possible user interaction AttachStdout bool // Attach the standard output AttachStderr bool // Attach the standard error ExposedPorts map[nat.Port]struct{} `json:",omitempty"` // List of exposed ports PublishService string `json:",omitempty"` // Name of the network service exposed by the container Tty bool // Attach standard streams to a tty, including stdin if it is not closed. OpenStdin bool // Open stdin StdinOnce bool // If true, close stdin after the 1 attached client disconnects. Env []string // List of environment variable to set in the container Cmd *strslice.StrSlice // Command to run when starting the container ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) Image string // Name of the image as it was passed by the operator (eg. could be symbolic) Volumes map[string]struct{} // List of volumes (mounts) used for the container WorkingDir string // Current directory (PWD) in the command will be launched Entrypoint *strslice.StrSlice // Entrypoint to run when starting the container NetworkDisabled bool `json:",omitempty"` // Is network disabled MacAddress string `json:",omitempty"` // Mac Address of the container OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile Labels map[string]string // List of labels set to this container StopSignal string `json:",omitempty"` // Signal to stop a container } docker-1.10.3/vendor/src/github.com/docker/engine-api/types/container/host_config.go000066400000000000000000000213361267010174400304370ustar00rootroot00000000000000package container import ( "strings" "github.com/docker/engine-api/types/blkiodev" "github.com/docker/engine-api/types/strslice" "github.com/docker/go-connections/nat" "github.com/docker/go-units" ) // NetworkMode represents the container network stack. type NetworkMode string // IsolationLevel represents the isolation level of a container. The supported // values are platform specific type IsolationLevel string // IsDefault indicates the default isolation level of a container. On Linux this // is the native driver. On Windows, this is a Windows Server Container. func (i IsolationLevel) IsDefault() bool { return strings.ToLower(string(i)) == "default" || string(i) == "" } // IpcMode represents the container ipc stack. type IpcMode string // IsPrivate indicates whether the container uses it's private ipc stack. func (n IpcMode) IsPrivate() bool { return !(n.IsHost() || n.IsContainer()) } // IsHost indicates whether the container uses the host's ipc stack. func (n IpcMode) IsHost() bool { return n == "host" } // IsContainer indicates whether the container uses a container's ipc stack. func (n IpcMode) IsContainer() bool { parts := strings.SplitN(string(n), ":", 2) return len(parts) > 1 && parts[0] == "container" } // Valid indicates whether the ipc stack is valid. func (n IpcMode) Valid() bool { parts := strings.Split(string(n), ":") switch mode := parts[0]; mode { case "", "host": case "container": if len(parts) != 2 || parts[1] == "" { return false } default: return false } return true } // Container returns the name of the container ipc stack is going to be used. func (n IpcMode) Container() string { parts := strings.SplitN(string(n), ":", 2) if len(parts) > 1 { return parts[1] } return "" } // UTSMode represents the UTS namespace of the container. type UTSMode string // IsPrivate indicates whether the container uses it's private UTS namespace. func (n UTSMode) IsPrivate() bool { return !(n.IsHost()) } // IsHost indicates whether the container uses the host's UTS namespace. func (n UTSMode) IsHost() bool { return n == "host" } // Valid indicates whether the UTS namespace is valid. func (n UTSMode) Valid() bool { parts := strings.Split(string(n), ":") switch mode := parts[0]; mode { case "", "host": default: return false } return true } // PidMode represents the pid stack of the container. type PidMode string // IsPrivate indicates whether the container uses it's private pid stack. func (n PidMode) IsPrivate() bool { return !(n.IsHost()) } // IsHost indicates whether the container uses the host's pid stack. func (n PidMode) IsHost() bool { return n == "host" } // Valid indicates whether the pid stack is valid. func (n PidMode) Valid() bool { parts := strings.Split(string(n), ":") switch mode := parts[0]; mode { case "", "host": default: return false } return true } // DeviceMapping represents the device mapping between the host and the container. type DeviceMapping struct { PathOnHost string PathInContainer string CgroupPermissions string } // RestartPolicy represents the restart policies of the container. type RestartPolicy struct { Name string MaximumRetryCount int } // IsNone indicates whether the container has the "no" restart policy. // This means the container will not automatically restart when exiting. func (rp *RestartPolicy) IsNone() bool { return rp.Name == "no" } // IsAlways indicates whether the container has the "always" restart policy. // This means the container will automatically restart regardless of the exit status. func (rp *RestartPolicy) IsAlways() bool { return rp.Name == "always" } // IsOnFailure indicates whether the container has the "on-failure" restart policy. // This means the contain will automatically restart of exiting with a non-zero exit status. func (rp *RestartPolicy) IsOnFailure() bool { return rp.Name == "on-failure" } // IsUnlessStopped indicates whether the container has the // "unless-stopped" restart policy. This means the container will // automatically restart unless user has put it to stopped state. func (rp *RestartPolicy) IsUnlessStopped() bool { return rp.Name == "unless-stopped" } // LogConfig represents the logging configuration of the container. type LogConfig struct { Type string Config map[string]string } // Resources contains container's resources (cgroups config, ulimits...) type Resources struct { // Applicable to all platforms CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) // Applicable to UNIX platforms CgroupParent string // Parent cgroup. BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) BlkioWeightDevice []*blkiodev.WeightDevice BlkioDeviceReadBps []*blkiodev.ThrottleDevice BlkioDeviceWriteBps []*blkiodev.ThrottleDevice BlkioDeviceReadIOps []*blkiodev.ThrottleDevice BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota CpusetCpus string // CpusetCpus 0-2, 0,1 CpusetMems string // CpusetMems 0-2, 0,1 Devices []DeviceMapping // List of devices to map inside the container KernelMemory int64 // Kernel memory limit (in bytes) Memory int64 // Memory limit (in bytes) MemoryReservation int64 // Memory soft limit (in bytes) MemorySwap int64 // Total memory usage (memory + swap); set `-1` to disable swap MemorySwappiness *int64 // Tuning container memory swappiness behaviour OomKillDisable *bool // Whether to disable OOM Killer or not PidsLimit int64 // Setting pids limit for a container Ulimits []*units.Ulimit // List of ulimits to be set in the container } // UpdateConfig holds the mutable attributes of a Container. // Those attributes can be updated at runtime. type UpdateConfig struct { // Contains container's resources (cgroups, ulimits) Resources } // HostConfig the non-portable Config structure of a container. // Here, "non-portable" means "dependent of the host we are running on". // Portable information *should* appear in Config. type HostConfig struct { // Applicable to all platforms Binds []string // List of volume bindings for this container ContainerIDFile string // File (path) where the containerId is written LogConfig LogConfig // Configuration of the logs for this container NetworkMode NetworkMode // Network mode to use for the container PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host RestartPolicy RestartPolicy // Restart policy to be used for the container VolumeDriver string // Name of the volume driver used to mount volumes VolumesFrom []string // List of volumes to take from other container // Applicable to UNIX platforms CapAdd *strslice.StrSlice // List of kernel capabilities to add to the container CapDrop *strslice.StrSlice // List of kernel capabilities to remove from the container DNS []string `json:"Dns"` // List of DNS server to lookup DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for ExtraHosts []string // List of extra hosts GroupAdd []string // List of additional groups that the container process will run as IpcMode IpcMode // IPC namespace to use for the container Links []string // List of links (in the name:alias form) OomScoreAdj int // Container preference for OOM-killing PidMode PidMode // PID namespace to use for the container Privileged bool // Is the container in privileged mode PublishAllPorts bool // Should docker publish all exposed port for the container ReadonlyRootfs bool // Is the container root filesystem in read-only SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container UTSMode UTSMode // UTS namespace to use for the container ShmSize int64 // Total shm memory usage // Applicable to Windows ConsoleSize [2]int // Initial console size Isolation IsolationLevel // Isolation level of the container (eg default, hyperv) // Contains container's resources (cgroups, ulimits) Resources } docker-1.10.3/vendor/src/github.com/docker/engine-api/types/container/hostconfig_unix.go000066400000000000000000000042761267010174400313470ustar00rootroot00000000000000// +build !windows package container import "strings" // IsValid indicates is an isolation level is valid func (i IsolationLevel) IsValid() bool { return i.IsDefault() } // IsPrivate indicates whether container uses it's private network stack. func (n NetworkMode) IsPrivate() bool { return !(n.IsHost() || n.IsContainer()) } // IsDefault indicates whether container uses the default network stack. func (n NetworkMode) IsDefault() bool { return n == "default" } // NetworkName returns the name of the network stack. func (n NetworkMode) NetworkName() string { if n.IsBridge() { return "bridge" } else if n.IsHost() { return "host" } else if n.IsContainer() { return "container" } else if n.IsNone() { return "none" } else if n.IsDefault() { return "default" } else if n.IsUserDefined() { return n.UserDefined() } return "" } // IsBridge indicates whether container uses the bridge network stack func (n NetworkMode) IsBridge() bool { return n == "bridge" } // IsHost indicates whether container uses the host network stack. func (n NetworkMode) IsHost() bool { return n == "host" } // IsContainer indicates whether container uses a container network stack. func (n NetworkMode) IsContainer() bool { parts := strings.SplitN(string(n), ":", 2) return len(parts) > 1 && parts[0] == "container" } // IsNone indicates whether container isn't using a network stack. func (n NetworkMode) IsNone() bool { return n == "none" } // ConnectedContainer is the id of the container which network this container is connected to. func (n NetworkMode) ConnectedContainer() string { parts := strings.SplitN(string(n), ":", 2) if len(parts) > 1 { return parts[1] } return "" } // IsUserDefined indicates user-created network func (n NetworkMode) IsUserDefined() bool { return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() } // IsPreDefinedNetwork indicates if a network is predefined by the daemon func IsPreDefinedNetwork(network string) bool { n := NetworkMode(network) return n.IsBridge() || n.IsHost() || n.IsNone() } //UserDefined indicates user-created network func (n NetworkMode) UserDefined() string { if n.IsUserDefined() { return string(n) } return "" } docker-1.10.3/vendor/src/github.com/docker/engine-api/types/container/hostconfig_windows.go000066400000000000000000000044351267010174400320530ustar00rootroot00000000000000package container import ( "fmt" "strings" ) // IsDefault indicates whether container uses the default network stack. func (n NetworkMode) IsDefault() bool { return n == "default" } // IsNone indicates whether container isn't using a network stack. func (n NetworkMode) IsNone() bool { return n == "none" } // IsUserDefined indicates user-created network func (n NetworkMode) IsUserDefined() bool { return !n.IsDefault() && !n.IsNone() } // IsHyperV indicates the use of a Hyper-V partition for isolation func (i IsolationLevel) IsHyperV() bool { return strings.ToLower(string(i)) == "hyperv" } // IsProcess indicates the use of process isolation func (i IsolationLevel) IsProcess() bool { return strings.ToLower(string(i)) == "process" } // IsValid indicates is an isolation level is valid func (i IsolationLevel) IsValid() bool { return i.IsDefault() || i.IsHyperV() || i.IsProcess() } // DefaultDaemonNetworkMode returns the default network stack the daemon should // use. func DefaultDaemonNetworkMode() NetworkMode { return NetworkMode("default") } // NetworkName returns the name of the network stack. func (n NetworkMode) NetworkName() string { if n.IsDefault() { return "default" } return "" } // IsPreDefinedNetwork indicates if a network is predefined by the daemon func IsPreDefinedNetwork(network string) bool { return false } // ValidateNetMode ensures that the various combinations of requested // network settings are valid. func ValidateNetMode(c *Config, hc *HostConfig) error { // We may not be passed a host config, such as in the case of docker commit if hc == nil { return nil } parts := strings.Split(string(hc.NetworkMode), ":") switch mode := parts[0]; mode { case "default", "none": default: return fmt.Errorf("invalid --net: %s", hc.NetworkMode) } return nil } // ValidateIsolationLevel performs platform specific validation of the // isolation level in the hostconfig structure. Windows supports 'default' (or // blank), 'process', or 'hyperv'. func ValidateIsolationLevel(hc *HostConfig) error { // We may not be passed a host config, such as in the case of docker commit if hc == nil { return nil } if !hc.Isolation.IsValid() { return fmt.Errorf("invalid --isolation: %q. Windows supports 'default', 'process', or 'hyperv'", hc.Isolation) } return nil } docker-1.10.3/vendor/src/github.com/docker/engine-api/types/events/000077500000000000000000000000001267010174400251235ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/engine-api/types/events/events.go000066400000000000000000000021621267010174400267570ustar00rootroot00000000000000package events const ( // ContainerEventType is the event type that containers generate ContainerEventType = "container" // ImageEventType is the event type that images generate ImageEventType = "image" // VolumeEventType is the event type that volumes generate VolumeEventType = "volume" // NetworkEventType is the event type that networks generate NetworkEventType = "network" ) // Actor describes something that generates events, // like a container, or a network, or a volume. // It has a defined name and a set or attributes. // The container attributes are its labels, other actors // can generate these attributes from other properties. type Actor struct { ID string Attributes map[string]string } // Message represents the information an event contains type Message struct { // Deprecated information from JSONMessage. // With data only in container events. Status string `json:"status,omitempty"` ID string `json:"id,omitempty"` From string `json:"from,omitempty"` Type string Action string Actor Actor Time int64 `json:"time,omitempty"` TimeNano int64 `json:"timeNano,omitempty"` } docker-1.10.3/vendor/src/github.com/docker/engine-api/types/filters/000077500000000000000000000000001267010174400252675ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/engine-api/types/filters/parse.go000066400000000000000000000151301267010174400267300ustar00rootroot00000000000000// Package filters provides helper function to parse and handle command line // filter, used for example in docker ps or docker images commands. package filters import ( "encoding/json" "errors" "fmt" "regexp" "strings" ) // Args stores filter arguments as map key:{map key: bool}. // It contains a aggregation of the map of arguments (which are in the form // of -f 'key=value') based on the key, and store values for the same key // in an map with string keys and boolean values. // e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu' // the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}} type Args struct { fields map[string]map[string]bool } // NewArgs initializes a new Args struct. func NewArgs() Args { return Args{fields: map[string]map[string]bool{}} } // ParseFlag parses the argument to the filter flag. Like // // `docker ps -f 'created=today' -f 'image.name=ubuntu*'` // // If prev map is provided, then it is appended to, and returned. By default a new // map is created. func ParseFlag(arg string, prev Args) (Args, error) { filters := prev if len(arg) == 0 { return filters, nil } if !strings.Contains(arg, "=") { return filters, ErrBadFormat } f := strings.SplitN(arg, "=", 2) name := strings.ToLower(strings.TrimSpace(f[0])) value := strings.TrimSpace(f[1]) filters.Add(name, value) return filters, nil } // ErrBadFormat is an error returned in case of bad format for a filter. var ErrBadFormat = errors.New("bad format of filter (expected name=value)") // ToParam packs the Args into an string for easy transport from client to server. func ToParam(a Args) (string, error) { // this way we don't URL encode {}, just empty space if a.Len() == 0 { return "", nil } buf, err := json.Marshal(a.fields) if err != nil { return "", err } return string(buf), nil } // FromParam unpacks the filter Args. func FromParam(p string) (Args, error) { if len(p) == 0 { return NewArgs(), nil } r := strings.NewReader(p) d := json.NewDecoder(r) m := map[string]map[string]bool{} if err := d.Decode(&m); err != nil { r.Seek(0, 0) // Allow parsing old arguments in slice format. // Because other libraries might be sending them in this format. deprecated := map[string][]string{} if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil { m = deprecatedArgs(deprecated) } else { return NewArgs(), err } } return Args{m}, nil } // Get returns the list of values associates with a field. // It returns a slice of strings to keep backwards compatibility with old code. func (filters Args) Get(field string) []string { values := filters.fields[field] if values == nil { return make([]string, 0) } slice := make([]string, 0, len(values)) for key := range values { slice = append(slice, key) } return slice } // Add adds a new value to a filter field. func (filters Args) Add(name, value string) { if _, ok := filters.fields[name]; ok { filters.fields[name][value] = true } else { filters.fields[name] = map[string]bool{value: true} } } // Del removes a value from a filter field. func (filters Args) Del(name, value string) { if _, ok := filters.fields[name]; ok { delete(filters.fields[name], value) } } // Len returns the number of fields in the arguments. func (filters Args) Len() int { return len(filters.fields) } // MatchKVList returns true if the values for the specified field matches the ones // from the sources. // e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, // field is 'label' and sources are {'label1': '1', 'label2': '2'} // it returns true. func (filters Args) MatchKVList(field string, sources map[string]string) bool { fieldValues := filters.fields[field] //do not filter if there is no filter set or cannot determine filter if len(fieldValues) == 0 { return true } if sources == nil || len(sources) == 0 { return false } for name2match := range fieldValues { testKV := strings.SplitN(name2match, "=", 2) v, ok := sources[testKV[0]] if !ok { return false } if len(testKV) == 2 && testKV[1] != v { return false } } return true } // Match returns true if the values for the specified field matches the source string // e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, // field is 'image.name' and source is 'ubuntu' // it returns true. func (filters Args) Match(field, source string) bool { if filters.ExactMatch(field, source) { return true } fieldValues := filters.fields[field] for name2match := range fieldValues { match, err := regexp.MatchString(name2match, source) if err != nil { continue } if match { return true } } return false } // ExactMatch returns true if the source matches exactly one of the filters. func (filters Args) ExactMatch(field, source string) bool { fieldValues, ok := filters.fields[field] //do not filter if there is no filter set or cannot determine filter if !ok || len(fieldValues) == 0 { return true } // try to march full name value to avoid O(N) regular expression matching if fieldValues[source] { return true } return false } // FuzzyMatch returns true if the source matches exactly one of the filters, // or the source has one of the filters as a prefix. func (filters Args) FuzzyMatch(field, source string) bool { if filters.ExactMatch(field, source) { return true } fieldValues := filters.fields[field] for prefix := range fieldValues { if strings.HasPrefix(source, prefix) { return true } } return false } // Include returns true if the name of the field to filter is in the filters. func (filters Args) Include(field string) bool { _, ok := filters.fields[field] return ok } // Validate ensures that all the fields in the filter are valid. // It returns an error as soon as it finds an invalid field. func (filters Args) Validate(accepted map[string]bool) error { for name := range filters.fields { if !accepted[name] { return fmt.Errorf("Invalid filter '%s'", name) } } return nil } // WalkValues iterates over the list of filtered values for a field. // It stops the iteration if it finds an error and it returns that error. func (filters Args) WalkValues(field string, op func(value string) error) error { if _, ok := filters.fields[field]; !ok { return nil } for v := range filters.fields[field] { if err := op(v); err != nil { return err } } return nil } func deprecatedArgs(d map[string][]string) map[string]map[string]bool { m := map[string]map[string]bool{} for k, v := range d { values := map[string]bool{} for _, vv := range v { values[vv] = true } m[k] = values } return m } docker-1.10.3/vendor/src/github.com/docker/engine-api/types/network/000077500000000000000000000000001267010174400253105ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/engine-api/types/network/network.go000066400000000000000000000027671267010174400273440ustar00rootroot00000000000000package network // Address represents an IP address type Address struct { Addr string PrefixLen int } // IPAM represents IP Address Management type IPAM struct { Driver string Options map[string]string //Per network IPAM driver options Config []IPAMConfig } // IPAMConfig represents IPAM configurations type IPAMConfig struct { Subnet string `json:",omitempty"` IPRange string `json:",omitempty"` Gateway string `json:",omitempty"` AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` } // EndpointIPAMConfig represents IPAM configurations for the endpoint type EndpointIPAMConfig struct { IPv4Address string `json:",omitempty"` IPv6Address string `json:",omitempty"` } // EndpointSettings stores the network endpoint details type EndpointSettings struct { // Configurations IPAMConfig *EndpointIPAMConfig Links []string Aliases []string // Operational data NetworkID string EndpointID string Gateway string IPAddress string IPPrefixLen int IPv6Gateway string GlobalIPv6Address string GlobalIPv6PrefixLen int MacAddress string } // NetworkingConfig represents the container's networking configuration for each of its interfaces // Carries the networink configs specified in the `docker run` and `docker network connect` commands type NetworkingConfig struct { EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each conencting network } docker-1.10.3/vendor/src/github.com/docker/engine-api/types/registry/000077500000000000000000000000001267010174400254675ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/engine-api/types/registry/registry.go000066400000000000000000000061431267010174400276720ustar00rootroot00000000000000package registry import ( "encoding/json" "net" ) // ServiceConfig stores daemon registry services configuration. type ServiceConfig struct { InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` Mirrors []string } // NetIPNet is the net.IPNet type, which can be marshalled and // unmarshalled to JSON type NetIPNet net.IPNet // MarshalJSON returns the JSON representation of the IPNet func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { return json.Marshal((*net.IPNet)(ipnet).String()) } // UnmarshalJSON sets the IPNet from a byte array of JSON func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { var ipnetStr string if err = json.Unmarshal(b, &ipnetStr); err == nil { var cidr *net.IPNet if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { *ipnet = NetIPNet(*cidr) } } return } // IndexInfo contains information about a registry // // RepositoryInfo Examples: // { // "Index" : { // "Name" : "docker.io", // "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], // "Secure" : true, // "Official" : true, // }, // "RemoteName" : "library/debian", // "LocalName" : "debian", // "CanonicalName" : "docker.io/debian" // "Official" : true, // } // // { // "Index" : { // "Name" : "127.0.0.1:5000", // "Mirrors" : [], // "Secure" : false, // "Official" : false, // }, // "RemoteName" : "user/repo", // "LocalName" : "127.0.0.1:5000/user/repo", // "CanonicalName" : "127.0.0.1:5000/user/repo", // "Official" : false, // } type IndexInfo struct { // Name is the name of the registry, such as "docker.io" Name string // Mirrors is a list of mirrors, expressed as URIs Mirrors []string // Secure is set to false if the registry is part of the list of // insecure registries. Insecure registries accept HTTP and/or accept // HTTPS with certificates from unknown CAs. Secure bool // Official indicates whether this is an official registry Official bool } // SearchResult describes a search result returned from a registry type SearchResult struct { // StarCount indicates the number of stars this repository has StarCount int `json:"star_count"` // IsOfficial indicates whether the result is an official repository or not IsOfficial bool `json:"is_official"` // Name is the name of the repository Name string `json:"name"` // IsOfficial indicates whether the result is trusted IsTrusted bool `json:"is_trusted"` // IsAutomated indicates whether the result is automated IsAutomated bool `json:"is_automated"` // Description is a textual description of the repository Description string `json:"description"` } // SearchResults lists a collection search results returned from a registry type SearchResults struct { // Query contains the query string that generated the search results Query string `json:"query"` // NumResults indicates the number of results the query returned NumResults int `json:"num_results"` // Results is a slice containing the actual results for the search Results []SearchResult `json:"results"` } docker-1.10.3/vendor/src/github.com/docker/engine-api/types/seccomp.go000066400000000000000000000037741267010174400256120ustar00rootroot00000000000000package types // Seccomp represents the config for a seccomp profile for syscall restriction. type Seccomp struct { DefaultAction Action `json:"defaultAction"` Architectures []Arch `json:"architectures"` Syscalls []*Syscall `json:"syscalls"` } // Arch used for additional architectures type Arch string // Additional architectures permitted to be used for system calls // By default only the native architecture of the kernel is permitted const ( ArchX86 Arch = "SCMP_ARCH_X86" ArchX86_64 Arch = "SCMP_ARCH_X86_64" ArchX32 Arch = "SCMP_ARCH_X32" ArchARM Arch = "SCMP_ARCH_ARM" ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" ArchMIPS Arch = "SCMP_ARCH_MIPS" ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" ) // Action taken upon Seccomp rule match type Action string // Define actions for Seccomp rules const ( ActKill Action = "SCMP_ACT_KILL" ActTrap Action = "SCMP_ACT_TRAP" ActErrno Action = "SCMP_ACT_ERRNO" ActTrace Action = "SCMP_ACT_TRACE" ActAllow Action = "SCMP_ACT_ALLOW" ) // Operator used to match syscall arguments in Seccomp type Operator string // Define operators for syscall arguments in Seccomp const ( OpNotEqual Operator = "SCMP_CMP_NE" OpLessThan Operator = "SCMP_CMP_LT" OpLessEqual Operator = "SCMP_CMP_LE" OpEqualTo Operator = "SCMP_CMP_EQ" OpGreaterEqual Operator = "SCMP_CMP_GE" OpGreaterThan Operator = "SCMP_CMP_GT" OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ" ) // Arg used for matching specific syscall arguments in Seccomp type Arg struct { Index uint `json:"index"` Value uint64 `json:"value"` ValueTwo uint64 `json:"valueTwo"` Op Operator `json:"op"` } // Syscall is used to match a syscall in Seccomp type Syscall struct { Name string `json:"name"` Action Action `json:"action"` Args []*Arg `json:"args"` } docker-1.10.3/vendor/src/github.com/docker/engine-api/types/stats.go000066400000000000000000000077721267010174400253210ustar00rootroot00000000000000// Package types is used for API stability in the types and response to the // consumers of the API stats endpoint. package types import "time" // ThrottlingData stores CPU throttling stats of one running container type ThrottlingData struct { // Number of periods with throttling active Periods uint64 `json:"periods"` // Number of periods when the container hit its throttling limit. ThrottledPeriods uint64 `json:"throttled_periods"` // Aggregate time the container was throttled for in nanoseconds. ThrottledTime uint64 `json:"throttled_time"` } // CPUUsage stores All CPU stats aggregated since container inception. type CPUUsage struct { // Total CPU time consumed. // Units: nanoseconds. TotalUsage uint64 `json:"total_usage"` // Total CPU time consumed per core. // Units: nanoseconds. PercpuUsage []uint64 `json:"percpu_usage"` // Time spent by tasks of the cgroup in kernel mode. // Units: nanoseconds. UsageInKernelmode uint64 `json:"usage_in_kernelmode"` // Time spent by tasks of the cgroup in user mode. // Units: nanoseconds. UsageInUsermode uint64 `json:"usage_in_usermode"` } // CPUStats aggregates and wraps all CPU related info of container type CPUStats struct { CPUUsage CPUUsage `json:"cpu_usage"` SystemUsage uint64 `json:"system_cpu_usage"` ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` } // MemoryStats aggregates All memory stats since container inception type MemoryStats struct { // current res_counter usage for memory Usage uint64 `json:"usage"` // maximum usage ever recorded. MaxUsage uint64 `json:"max_usage"` // TODO(vishh): Export these as stronger types. // all the stats exported via memory.stat. Stats map[string]uint64 `json:"stats"` // number of times memory usage hits limits. Failcnt uint64 `json:"failcnt"` Limit uint64 `json:"limit"` } // BlkioStatEntry is one small entity to store a piece of Blkio stats // TODO Windows: This can be factored out type BlkioStatEntry struct { Major uint64 `json:"major"` Minor uint64 `json:"minor"` Op string `json:"op"` Value uint64 `json:"value"` } // BlkioStats stores All IO service stats for data read and write // TODO Windows: This can be factored out type BlkioStats struct { // number of bytes transferred to and from the block device IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` } // NetworkStats aggregates All network stats of one container // TODO Windows: This will require refactoring type NetworkStats struct { RxBytes uint64 `json:"rx_bytes"` RxPackets uint64 `json:"rx_packets"` RxErrors uint64 `json:"rx_errors"` RxDropped uint64 `json:"rx_dropped"` TxBytes uint64 `json:"tx_bytes"` TxPackets uint64 `json:"tx_packets"` TxErrors uint64 `json:"tx_errors"` TxDropped uint64 `json:"tx_dropped"` } // PidsStats contains the stats of a container's pids type PidsStats struct { // Current is the number of pids in the cgroup Current uint64 `json:"current,omitempty"` } // Stats is Ultimate struct aggregating all types of stats of one container type Stats struct { Read time.Time `json:"read"` PreCPUStats CPUStats `json:"precpu_stats,omitempty"` CPUStats CPUStats `json:"cpu_stats,omitempty"` MemoryStats MemoryStats `json:"memory_stats,omitempty"` BlkioStats BlkioStats `json:"blkio_stats,omitempty"` PidsStats PidsStats `json:"pids_stats,omitempty"` } // StatsJSON is newly used Networks type StatsJSON struct { Stats // Networks request version >=1.21 Networks map[string]NetworkStats `json:"networks,omitempty"` } docker-1.10.3/vendor/src/github.com/docker/engine-api/types/strslice/000077500000000000000000000000001267010174400254475ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/engine-api/types/strslice/strslice.go000066400000000000000000000027431267010174400276340ustar00rootroot00000000000000package strslice import ( "encoding/json" "strings" ) // StrSlice represents a string or an array of strings. // We need to override the json decoder to accept both options. type StrSlice struct { parts []string } // MarshalJSON Marshals (or serializes) the StrSlice into the json format. // This method is needed to implement json.Marshaller. func (e *StrSlice) MarshalJSON() ([]byte, error) { if e == nil { return []byte{}, nil } return json.Marshal(e.Slice()) } // UnmarshalJSON decodes the byte slice whether it's a string or an array of strings. // This method is needed to implement json.Unmarshaler. func (e *StrSlice) UnmarshalJSON(b []byte) error { if len(b) == 0 { return nil } p := make([]string, 0, 1) if err := json.Unmarshal(b, &p); err != nil { var s string if err := json.Unmarshal(b, &s); err != nil { return err } p = append(p, s) } e.parts = p return nil } // Len returns the number of parts of the StrSlice. func (e *StrSlice) Len() int { if e == nil { return 0 } return len(e.parts) } // Slice gets the parts of the StrSlice as a Slice of string. func (e *StrSlice) Slice() []string { if e == nil { return nil } return e.parts } // ToString gets space separated string of all the parts. func (e *StrSlice) ToString() string { s := e.Slice() if s == nil { return "" } return strings.Join(s, " ") } // New creates an StrSlice based on the specified parts (as strings). func New(parts ...string) *StrSlice { return &StrSlice{parts} } docker-1.10.3/vendor/src/github.com/docker/engine-api/types/time/000077500000000000000000000000001267010174400245555ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/engine-api/types/time/timestamp.go000066400000000000000000000100301267010174400271010ustar00rootroot00000000000000package time import ( "fmt" "math" "strconv" "strings" "time" ) // These are additional predefined layouts for use in Time.Format and Time.Parse // with --since and --until parameters for `docker logs` and `docker events` const ( rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 ) // GetTimestamp tries to parse given string as golang duration, // then RFC3339 time and finally as a Unix timestamp. If // any of these were successful, it returns a Unix timestamp // as string otherwise returns the given value back. // In case of duration input, the returned timestamp is computed // as the given reference time minus the amount of the duration. func GetTimestamp(value string, reference time.Time) (string, error) { if d, err := time.ParseDuration(value); value != "0" && err == nil { return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil } var format string var parseInLocation bool // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) if strings.Contains(value, ".") { if parseInLocation { format = rFC3339NanoLocal } else { format = time.RFC3339Nano } } else if strings.Contains(value, "T") { // we want the number of colons in the T portion of the timestamp tcolons := strings.Count(value, ":") // if parseInLocation is off and we have a +/- zone offset (not Z) then // there will be an extra colon in the input for the tz offset subtract that // colon from the tcolons count if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { tcolons-- } if parseInLocation { switch tcolons { case 0: format = "2006-01-02T15" case 1: format = "2006-01-02T15:04" default: format = rFC3339Local } } else { switch tcolons { case 0: format = "2006-01-02T15Z07:00" case 1: format = "2006-01-02T15:04Z07:00" default: format = time.RFC3339 } } } else if parseInLocation { format = dateLocal } else { format = dateWithZone } var t time.Time var err error if parseInLocation { t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) } else { t, err = time.Parse(format, value) } if err != nil { // if there is a `-` then its an RFC3339 like timestamp otherwise assume unixtimestamp if strings.Contains(value, "-") { return "", err // was probably an RFC3339 like timestamp but the parser failed with an error } return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) } return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil } // ParseTimestamps returns seconds and nanoseconds from a timestamp that has the // format "%d.%09d", time.Unix(), int64(time.Nanosecond())) // if the incoming nanosecond portion is longer or shorter than 9 digits it is // converted to nanoseconds. The expectation is that the seconds and // seconds will be used to create a time variable. For example: // seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) // if err == nil since := time.Unix(seconds, nanoseconds) // returns seconds as def(aultSeconds) if value == "" func ParseTimestamps(value string, def int64) (int64, int64, error) { if value == "" { return def, 0, nil } sa := strings.SplitN(value, ".", 2) s, err := strconv.ParseInt(sa[0], 10, 64) if err != nil { return s, 0, err } if len(sa) != 2 { return s, 0, nil } n, err := strconv.ParseInt(sa[1], 10, 64) if err != nil { return s, n, err } // should already be in nanoseconds but just in case convert n to nanoseonds n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) return s, n, nil } docker-1.10.3/vendor/src/github.com/docker/engine-api/types/types.go000066400000000000000000000272321267010174400253200ustar00rootroot00000000000000package types import ( "os" "time" "github.com/docker/engine-api/types/container" "github.com/docker/engine-api/types/network" "github.com/docker/engine-api/types/registry" "github.com/docker/go-connections/nat" ) // ContainerCreateResponse contains the information returned to a client on the // creation of a new container. type ContainerCreateResponse struct { // ID is the ID of the created container. ID string `json:"Id"` // Warnings are any warnings encountered during the creation of the container. Warnings []string `json:"Warnings"` } // ContainerExecCreateResponse contains response of Remote API: // POST "/containers/{name:.*}/exec" type ContainerExecCreateResponse struct { // ID is the exec ID. ID string `json:"Id"` } // ContainerUpdateResponse contains response of Remote API: // POST /containers/{name:.*}/update type ContainerUpdateResponse struct { // Warnings are any warnings encountered during the updating of the container. Warnings []string `json:"Warnings"` } // AuthResponse contains response of Remote API: // POST "/auth" type AuthResponse struct { // Status is the authentication status Status string `json:"Status"` } // ContainerWaitResponse contains response of Remote API: // POST "/containers/"+containerID+"/wait" type ContainerWaitResponse struct { // StatusCode is the status code of the wait job StatusCode int `json:"StatusCode"` } // ContainerCommitResponse contains response of Remote API: // POST "/commit?container="+containerID type ContainerCommitResponse struct { ID string `json:"Id"` } // ContainerChange contains response of Remote API: // GET "/containers/{name:.*}/changes" type ContainerChange struct { Kind int Path string } // ImageHistory contains response of Remote API: // GET "/images/{name:.*}/history" type ImageHistory struct { ID string `json:"Id"` Created int64 CreatedBy string Tags []string Size int64 Comment string } // ImageDelete contains response of Remote API: // DELETE "/images/{name:.*}" type ImageDelete struct { Untagged string `json:",omitempty"` Deleted string `json:",omitempty"` } // Image contains response of Remote API: // GET "/images/json" type Image struct { ID string `json:"Id"` ParentID string `json:"ParentId"` RepoTags []string RepoDigests []string Created int64 Size int64 VirtualSize int64 Labels map[string]string } // GraphDriverData returns Image's graph driver config info // when calling inspect command type GraphDriverData struct { Name string Data map[string]string } // ImageInspect contains response of Remote API: // GET "/images/{name:.*}/json" type ImageInspect struct { ID string `json:"Id"` RepoTags []string RepoDigests []string Parent string Comment string Created string Container string ContainerConfig *container.Config DockerVersion string Author string Config *container.Config Architecture string Os string Size int64 VirtualSize int64 GraphDriver GraphDriverData } // Port stores open ports info of container // e.g. {"PrivatePort": 8080, "PublicPort": 80, "Type": "tcp"} type Port struct { IP string `json:",omitempty"` PrivatePort int PublicPort int `json:",omitempty"` Type string } // Container contains response of Remote API: // GET "/containers/json" type Container struct { ID string `json:"Id"` Names []string Image string ImageID string Command string Created int64 Ports []Port SizeRw int64 `json:",omitempty"` SizeRootFs int64 `json:",omitempty"` Labels map[string]string Status string HostConfig struct { NetworkMode string `json:",omitempty"` } NetworkSettings *SummaryNetworkSettings } // CopyConfig contains request body of Remote API: // POST "/containers/"+containerID+"/copy" type CopyConfig struct { Resource string } // ContainerPathStat is used to encode the header from // GET "/containers/{name:.*}/archive" // "Name" is the file or directory name. type ContainerPathStat struct { Name string `json:"name"` Size int64 `json:"size"` Mode os.FileMode `json:"mode"` Mtime time.Time `json:"mtime"` LinkTarget string `json:"linkTarget"` } // ContainerProcessList contains response of Remote API: // GET "/containers/{name:.*}/top" type ContainerProcessList struct { Processes [][]string Titles []string } // Version contains response of Remote API: // GET "/version" type Version struct { Version string APIVersion string `json:"ApiVersion"` GitCommit string GoVersion string Os string Arch string KernelVersion string `json:",omitempty"` Experimental bool `json:",omitempty"` BuildTime string `json:",omitempty"` } // Info contains response of Remote API: // GET "/info" type Info struct { ID string Containers int ContainersRunning int ContainersPaused int ContainersStopped int Images int Driver string DriverStatus [][2]string SystemStatus [][2]string Plugins PluginsInfo MemoryLimit bool SwapLimit bool CPUCfsPeriod bool `json:"CpuCfsPeriod"` CPUCfsQuota bool `json:"CpuCfsQuota"` CPUShares bool CPUSet bool IPv4Forwarding bool BridgeNfIptables bool BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` Debug bool NFd int OomKillDisable bool NGoroutines int SystemTime string ExecutionDriver string LoggingDriver string NEventsListener int KernelVersion string OperatingSystem string OSType string Architecture string IndexServerAddress string RegistryConfig *registry.ServiceConfig InitSha1 string InitPath string NCPU int MemTotal int64 DockerRootDir string HTTPProxy string `json:"HttpProxy"` HTTPSProxy string `json:"HttpsProxy"` NoProxy string Name string Labels []string ExperimentalBuild bool ServerVersion string ClusterStore string ClusterAdvertise string } // PluginsInfo is temp struct holds Plugins name // registered with docker daemon. It used by Info struct type PluginsInfo struct { // List of Volume plugins registered Volume []string // List of Network plugins registered Network []string // List of Authorization plugins registered Authorization []string } // ExecStartCheck is a temp struct used by execStart // Config fields is part of ExecConfig in runconfig package type ExecStartCheck struct { // ExecStart will first check if it's detached Detach bool // Check if there's a tty Tty bool } // ContainerState stores container's running state // it's part of ContainerJSONBase and will return by "inspect" command type ContainerState struct { Status string Running bool Paused bool Restarting bool OOMKilled bool Dead bool Pid int ExitCode int Error string StartedAt string FinishedAt string } // ContainerJSONBase contains response of Remote API: // GET "/containers/{name:.*}/json" type ContainerJSONBase struct { ID string `json:"Id"` Created string Path string Args []string State *ContainerState Image string ResolvConfPath string HostnamePath string HostsPath string LogPath string Name string RestartCount int Driver string MountLabel string ProcessLabel string AppArmorProfile string ExecIDs []string HostConfig *container.HostConfig GraphDriver GraphDriverData SizeRw *int64 `json:",omitempty"` SizeRootFs *int64 `json:",omitempty"` } // ContainerJSON is newly used struct along with MountPoint type ContainerJSON struct { *ContainerJSONBase Mounts []MountPoint Config *container.Config NetworkSettings *NetworkSettings } // NetworkSettings exposes the network settings in the api type NetworkSettings struct { NetworkSettingsBase DefaultNetworkSettings Networks map[string]*network.EndpointSettings } // SummaryNetworkSettings provides a summary of container's networks // in /containers/json type SummaryNetworkSettings struct { Networks map[string]*network.EndpointSettings } // NetworkSettingsBase holds basic information about networks type NetworkSettingsBase struct { Bridge string SandboxID string HairpinMode bool LinkLocalIPv6Address string LinkLocalIPv6PrefixLen int Ports nat.PortMap SandboxKey string SecondaryIPAddresses []network.Address SecondaryIPv6Addresses []network.Address } // DefaultNetworkSettings holds network information // during the 2 release deprecation period. // It will be removed in Docker 1.11. type DefaultNetworkSettings struct { EndpointID string Gateway string GlobalIPv6Address string GlobalIPv6PrefixLen int IPAddress string IPPrefixLen int IPv6Gateway string MacAddress string } // MountPoint represents a mount point configuration inside the container. type MountPoint struct { Name string `json:",omitempty"` Source string Destination string Driver string `json:",omitempty"` Mode string RW bool Propagation string } // Volume represents the configuration of a volume for the remote API type Volume struct { Name string // Name is the name of the volume Driver string // Driver is the Driver name used to create the volume Mountpoint string // Mountpoint is the location on disk of the volume } // VolumesListResponse contains the response for the remote API: // GET "/volumes" type VolumesListResponse struct { Volumes []*Volume // Volumes is the list of volumes being returned Warnings []string // Warnings is a list of warnings that occurred when getting the list from the volume drivers } // VolumeCreateRequest contains the response for the remote API: // POST "/volumes/create" type VolumeCreateRequest struct { Name string // Name is the requested name of the volume Driver string // Driver is the name of the driver that should be used to create the volume DriverOpts map[string]string // DriverOpts holds the driver specific options to use for when creating the volume. } // NetworkResource is the body of the "get network" http response message type NetworkResource struct { Name string ID string `json:"Id"` Scope string Driver string IPAM network.IPAM Containers map[string]EndpointResource Options map[string]string } // EndpointResource contains network resources allocated and used for a container in a network type EndpointResource struct { Name string EndpointID string MacAddress string IPv4Address string IPv6Address string } // NetworkCreate is the expected body of the "create network" http request message type NetworkCreate struct { Name string CheckDuplicate bool Driver string IPAM network.IPAM Internal bool Options map[string]string } // NetworkCreateResponse is the response message sent by the server for network create call type NetworkCreateResponse struct { ID string `json:"Id"` Warning string } // NetworkConnect represents the data to be used to connect a container to the network type NetworkConnect struct { Container string EndpointConfig *network.EndpointSettings `json:",omitempty"` } // NetworkDisconnect represents the data to be used to disconnect a container from the network type NetworkDisconnect struct { Container string Force bool } docker-1.10.3/vendor/src/github.com/docker/engine-api/types/versions/000077500000000000000000000000001267010174400254675ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/engine-api/types/versions/v1p19/000077500000000000000000000000001267010174400263475ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/engine-api/types/versions/v1p19/types.go000066400000000000000000000017571267010174400300540ustar00rootroot00000000000000// Package v1p19 provides specific API types for the API version 1, patch 19. package v1p19 import ( "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/container" "github.com/docker/engine-api/types/versions/v1p20" "github.com/docker/go-connections/nat" ) // ContainerJSON is a backcompatibility struct for APIs prior to 1.20. // Note this is not used by the Windows daemon. type ContainerJSON struct { *types.ContainerJSONBase Volumes map[string]string VolumesRW map[string]bool Config *ContainerConfig NetworkSettings *v1p20.NetworkSettings } // ContainerConfig is a backcompatibility struct for APIs prior to 1.20. type ContainerConfig struct { *container.Config MacAddress string NetworkDisabled bool ExposedPorts map[nat.Port]struct{} // backward compatibility, they now live in HostConfig VolumeDriver string Memory int64 MemorySwap int64 CPUShares int64 `json:"CpuShares"` CPUSet string `json:"Cpuset"` } docker-1.10.3/vendor/src/github.com/docker/engine-api/types/versions/v1p20/000077500000000000000000000000001267010174400263375ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/engine-api/types/versions/v1p20/types.go000066400000000000000000000021111267010174400300250ustar00rootroot00000000000000// Package v1p20 provides specific API types for the API version 1, patch 20. package v1p20 import ( "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/container" "github.com/docker/go-connections/nat" ) // ContainerJSON is a backcompatibility struct for the API 1.20 type ContainerJSON struct { *types.ContainerJSONBase Mounts []types.MountPoint Config *ContainerConfig NetworkSettings *NetworkSettings } // ContainerConfig is a backcompatibility struct used in ContainerJSON for the API 1.20 type ContainerConfig struct { *container.Config MacAddress string NetworkDisabled bool ExposedPorts map[nat.Port]struct{} // backward compatibility, they now live in HostConfig VolumeDriver string } // StatsJSON is a backcompatibility struct used in Stats for API prior to 1.21 type StatsJSON struct { types.Stats Network types.NetworkStats `json:"network,omitempty"` } // NetworkSettings is a backward compatible struct for APIs prior to 1.21 type NetworkSettings struct { types.NetworkSettingsBase types.DefaultNetworkSettings } docker-1.10.3/vendor/src/github.com/docker/go-connections/000077500000000000000000000000001267010174400233645ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/go-connections/LICENSE000066400000000000000000000250101267010174400243670ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS Copyright 2015 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. docker-1.10.3/vendor/src/github.com/docker/go-connections/nat/000077500000000000000000000000001267010174400241465ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/go-connections/nat/nat.go000066400000000000000000000136411267010174400252640ustar00rootroot00000000000000// Package nat is a convenience package for manipulation of strings describing network ports. package nat import ( "fmt" "net" "strconv" "strings" ) const ( // portSpecTemplate is the expected format for port specifications portSpecTemplate = "ip:hostPort:containerPort" ) // PortBinding represents a binding between a Host IP address and a Host Port type PortBinding struct { // HostIP is the host IP Address HostIP string `json:"HostIp"` // HostPort is the host port number HostPort string } // PortMap is a collection of PortBinding indexed by Port type PortMap map[Port][]PortBinding // PortSet is a collection of structs indexed by Port type PortSet map[Port]struct{} // Port is a string containing port number and protocol in the format "80/tcp" type Port string // NewPort creates a new instance of a Port given a protocol and port number or port range func NewPort(proto, port string) (Port, error) { // Check for parsing issues on "port" now so we can avoid having // to check it later on. portStartInt, portEndInt, err := ParsePortRangeToInt(port) if err != nil { return "", err } if portStartInt == portEndInt { return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil } return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil } // ParsePort parses the port number string and returns an int func ParsePort(rawPort string) (int, error) { if len(rawPort) == 0 { return 0, nil } port, err := strconv.ParseUint(rawPort, 10, 16) if err != nil { return 0, err } return int(port), nil } // ParsePortRangeToInt parses the port range string and returns start/end ints func ParsePortRangeToInt(rawPort string) (int, int, error) { if len(rawPort) == 0 { return 0, 0, nil } start, end, err := ParsePortRange(rawPort) if err != nil { return 0, 0, err } return int(start), int(end), nil } // Proto returns the protocol of a Port func (p Port) Proto() string { proto, _ := SplitProtoPort(string(p)) return proto } // Port returns the port number of a Port func (p Port) Port() string { _, port := SplitProtoPort(string(p)) return port } // Int returns the port number of a Port as an int func (p Port) Int() int { portStr := p.Port() if len(portStr) == 0 { return 0 } // We don't need to check for an error because we're going to // assume that any error would have been found, and reported, in NewPort() port, _ := strconv.ParseUint(portStr, 10, 16) return int(port) } // Range returns the start/end port numbers of a Port range as ints func (p Port) Range() (int, int, error) { return ParsePortRangeToInt(p.Port()) } // SplitProtoPort splits a port in the format of proto/port func SplitProtoPort(rawPort string) (string, string) { parts := strings.Split(rawPort, "/") l := len(parts) if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { return "", "" } if l == 1 { return "tcp", rawPort } if len(parts[1]) == 0 { return "tcp", parts[0] } return parts[1], parts[0] } func validateProto(proto string) bool { for _, availableProto := range []string{"tcp", "udp"} { if availableProto == proto { return true } } return false } // ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses // these in to the internal types func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { var ( exposedPorts = make(map[Port]struct{}, len(ports)) bindings = make(map[Port][]PortBinding) ) for _, rawPort := range ports { proto := "tcp" if i := strings.LastIndex(rawPort, "/"); i != -1 { proto = rawPort[i+1:] rawPort = rawPort[:i] } if !strings.Contains(rawPort, ":") { rawPort = fmt.Sprintf("::%s", rawPort) } else if len(strings.Split(rawPort, ":")) == 2 { rawPort = fmt.Sprintf(":%s", rawPort) } parts, err := PartParser(portSpecTemplate, rawPort) if err != nil { return nil, nil, err } var ( containerPort = parts["containerPort"] rawIP = parts["ip"] hostPort = parts["hostPort"] ) if rawIP != "" && net.ParseIP(rawIP) == nil { return nil, nil, fmt.Errorf("Invalid ip address: %s", rawIP) } if containerPort == "" { return nil, nil, fmt.Errorf("No port specified: %s", rawPort) } startPort, endPort, err := ParsePortRange(containerPort) if err != nil { return nil, nil, fmt.Errorf("Invalid containerPort: %s", containerPort) } var startHostPort, endHostPort uint64 = 0, 0 if len(hostPort) > 0 { startHostPort, endHostPort, err = ParsePortRange(hostPort) if err != nil { return nil, nil, fmt.Errorf("Invalid hostPort: %s", hostPort) } } if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { // Allow host port range iff containerPort is not a range. // In this case, use the host port range as the dynamic // host port range to allocate into. if endPort != startPort { return nil, nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) } } if !validateProto(strings.ToLower(proto)) { return nil, nil, fmt.Errorf("Invalid proto: %s", proto) } for i := uint64(0); i <= (endPort - startPort); i++ { containerPort = strconv.FormatUint(startPort+i, 10) if len(hostPort) > 0 { hostPort = strconv.FormatUint(startHostPort+i, 10) } // Set hostPort to a range only if there is a single container port // and a dynamic host port. if startPort == endPort && startHostPort != endHostPort { hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) } port, err := NewPort(strings.ToLower(proto), containerPort) if err != nil { return nil, nil, err } if _, exists := exposedPorts[port]; !exists { exposedPorts[port] = struct{}{} } binding := PortBinding{ HostIP: rawIP, HostPort: hostPort, } bslice, exists := bindings[port] if !exists { bslice = []PortBinding{} } bindings[port] = append(bslice, binding) } } return exposedPorts, bindings, nil } docker-1.10.3/vendor/src/github.com/docker/go-connections/nat/parse.go000066400000000000000000000026151267010174400256130ustar00rootroot00000000000000package nat import ( "fmt" "strconv" "strings" ) // PartParser parses and validates the specified string (data) using the specified template // e.g. ip:public:private -> 192.168.0.1:80:8000 func PartParser(template, data string) (map[string]string, error) { // ip:public:private var ( templateParts = strings.Split(template, ":") parts = strings.Split(data, ":") out = make(map[string]string, len(templateParts)) ) if len(parts) != len(templateParts) { return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) } for i, t := range templateParts { value := "" if len(parts) > i { value = parts[i] } out[t] = value } return out, nil } // ParsePortRange parses and validates the specified string as a port-range (8000-9000) func ParsePortRange(ports string) (uint64, uint64, error) { if ports == "" { return 0, 0, fmt.Errorf("Empty string specified for ports.") } if !strings.Contains(ports, "-") { start, err := strconv.ParseUint(ports, 10, 16) end := start return start, end, err } parts := strings.Split(ports, "-") start, err := strconv.ParseUint(parts[0], 10, 16) if err != nil { return 0, 0, err } end, err := strconv.ParseUint(parts[1], 10, 16) if err != nil { return 0, 0, err } if end < start { return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) } return start, end, nil } docker-1.10.3/vendor/src/github.com/docker/go-connections/nat/sort.go000066400000000000000000000041651267010174400254720ustar00rootroot00000000000000package nat import ( "sort" "strings" ) type portSorter struct { ports []Port by func(i, j Port) bool } func (s *portSorter) Len() int { return len(s.ports) } func (s *portSorter) Swap(i, j int) { s.ports[i], s.ports[j] = s.ports[j], s.ports[i] } func (s *portSorter) Less(i, j int) bool { ip := s.ports[i] jp := s.ports[j] return s.by(ip, jp) } // Sort sorts a list of ports using the provided predicate // This function should compare `i` and `j`, returning true if `i` is // considered to be less than `j` func Sort(ports []Port, predicate func(i, j Port) bool) { s := &portSorter{ports, predicate} sort.Sort(s) } type portMapEntry struct { port Port binding PortBinding } type portMapSorter []portMapEntry func (s portMapSorter) Len() int { return len(s) } func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // sort the port so that the order is: // 1. port with larger specified bindings // 2. larger port // 3. port with tcp protocol func (s portMapSorter) Less(i, j int) bool { pi, pj := s[i].port, s[j].port hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort) return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp") } // SortPortMap sorts the list of ports and their respected mapping. The ports // will explicit HostPort will be placed first. func SortPortMap(ports []Port, bindings PortMap) { s := portMapSorter{} for _, p := range ports { if binding, ok := bindings[p]; ok { for _, b := range binding { s = append(s, portMapEntry{port: p, binding: b}) } bindings[p] = []PortBinding{} } else { s = append(s, portMapEntry{port: p}) } } sort.Sort(s) var ( i int pm = make(map[Port]struct{}) ) // reorder ports for _, entry := range s { if _, ok := pm[entry.port]; !ok { ports[i] = entry.port pm[entry.port] = struct{}{} i++ } // reorder bindings for this port if _, ok := bindings[entry.port]; ok { bindings[entry.port] = append(bindings[entry.port], entry.binding) } } } func toInt(s string) uint64 { i, _, err := ParsePortRange(s) if err != nil { i = 0 } return i } docker-1.10.3/vendor/src/github.com/docker/go-connections/sockets/000077500000000000000000000000001267010174400250375ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/go-connections/sockets/README.md000066400000000000000000000000001267010174400263040ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/go-connections/sockets/tcp_socket.go000066400000000000000000000024231267010174400275250ustar00rootroot00000000000000// Package sockets provides helper functions to create and configure Unix or TCP sockets. package sockets import ( "crypto/tls" "net" "net/http" "time" ) // NewTCPSocket creates a TCP socket listener with the specified address and // and the specified tls configuration. If TLSConfig is set, will encapsulate the // TCP listener inside a TLS one. func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) { l, err := net.Listen("tcp", addr) if err != nil { return nil, err } if tlsConfig != nil { tlsConfig.NextProtos = []string{"http/1.1"} l = tls.NewListener(l, tlsConfig) } return l, nil } // ConfigureTCPTransport configures the specified Transport according to the // specified proto and addr. // If the proto is unix (using a unix socket to communicate) the compression // is disabled. func ConfigureTCPTransport(tr *http.Transport, proto, addr string) { // Why 32? See https://github.com/docker/docker/pull/8035. timeout := 32 * time.Second if proto == "unix" { // No need for compression in local communications. tr.DisableCompression = true tr.Dial = func(_, _ string) (net.Conn, error) { return net.DialTimeout(proto, addr, timeout) } } else { tr.Proxy = http.ProxyFromEnvironment tr.Dial = (&net.Dialer{Timeout: timeout}).Dial } } docker-1.10.3/vendor/src/github.com/docker/go-connections/sockets/unix_socket.go000066400000000000000000000033571267010174400277310ustar00rootroot00000000000000// +build linux freebsd package sockets import ( "fmt" "net" "os" "strconv" "syscall" "github.com/Sirupsen/logrus" "github.com/opencontainers/runc/libcontainer/user" ) // NewUnixSocket creates a unix socket with the specified path and group. func NewUnixSocket(path, group string) (net.Listener, error) { if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { return nil, err } mask := syscall.Umask(0777) defer syscall.Umask(mask) l, err := net.Listen("unix", path) if err != nil { return nil, err } if err := setSocketGroup(path, group); err != nil { l.Close() return nil, err } if err := os.Chmod(path, 0660); err != nil { l.Close() return nil, err } return l, nil } func setSocketGroup(path, group string) error { if group == "" { return nil } if err := changeGroup(path, group); err != nil { if group != "docker" { return err } logrus.Debugf("Warning: could not change group %s to docker: %v", path, err) } return nil } func changeGroup(path string, nameOrGid string) error { gid, err := lookupGidByName(nameOrGid) if err != nil { return err } logrus.Debugf("%s group found. gid: %d", nameOrGid, gid) return os.Chown(path, 0, gid) } func lookupGidByName(nameOrGid string) (int, error) { groupFile, err := user.GetGroupPath() if err != nil { return -1, err } groups, err := user.ParseGroupFileFilter(groupFile, func(g user.Group) bool { return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid }) if err != nil { return -1, err } if groups != nil && len(groups) > 0 { return groups[0].Gid, nil } gid, err := strconv.Atoi(nameOrGid) if err == nil { logrus.Warnf("Could not find GID %d", gid) return gid, nil } return -1, fmt.Errorf("Group %s not found", nameOrGid) } docker-1.10.3/vendor/src/github.com/docker/go-connections/tlsconfig/000077500000000000000000000000001267010174400253545ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/go-connections/tlsconfig/config.go000066400000000000000000000110031267010174400271430ustar00rootroot00000000000000// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. // // As a reminder from https://golang.org/pkg/crypto/tls/#Config: // A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. // A Config may be reused; the tls package will also not modify it. package tlsconfig import ( "crypto/tls" "crypto/x509" "fmt" "io/ioutil" "os" "github.com/Sirupsen/logrus" ) // Options represents the information needed to create client and server TLS configurations. type Options struct { CAFile string // If either CertFile or KeyFile is empty, Client() will not load them // preventing the client from authenticating to the server. // However, Server() requires them and will error out if they are empty. CertFile string KeyFile string // client-only option InsecureSkipVerify bool // server-only option ClientAuth tls.ClientAuthType } // Extra (server-side) accepted CBC cipher suites - will phase out in the future var acceptedCBCCiphers = []uint16{ tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, tls.TLS_RSA_WITH_AES_256_CBC_SHA, tls.TLS_RSA_WITH_AES_128_CBC_SHA, } // Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) var clientCipherSuites = []uint16{ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, } // DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls // options struct but wants to use a commonly accepted set of TLS cipher suites, with // known weak algorithms removed. var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) // ServerDefault is a secure-enough TLS configuration for the server TLS configuration. var ServerDefault = tls.Config{ // Avoid fallback to SSL protocols < TLS1.0 MinVersion: tls.VersionTLS10, PreferServerCipherSuites: true, CipherSuites: DefaultServerAcceptedCiphers, } // ClientDefault is a secure-enough TLS configuration for the client TLS configuration. var ClientDefault = tls.Config{ // Prefer TLS1.2 as the client minimum MinVersion: tls.VersionTLS12, CipherSuites: clientCipherSuites, } // certPool returns an X.509 certificate pool from `caFile`, the certificate file. func certPool(caFile string) (*x509.CertPool, error) { // If we should verify the server, we need to load a trusted ca certPool := x509.NewCertPool() pem, err := ioutil.ReadFile(caFile) if err != nil { return nil, fmt.Errorf("Could not read CA certificate %q: %v", caFile, err) } if !certPool.AppendCertsFromPEM(pem) { return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) } s := certPool.Subjects() subjects := make([]string, len(s)) for i, subject := range s { subjects[i] = string(subject) } logrus.Debugf("Trusting certs with subjects: %v", subjects) return certPool, nil } // Client returns a TLS configuration meant to be used by a client. func Client(options Options) (*tls.Config, error) { tlsConfig := ClientDefault tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify if !options.InsecureSkipVerify { CAs, err := certPool(options.CAFile) if err != nil { return nil, err } tlsConfig.RootCAs = CAs } if options.CertFile != "" && options.KeyFile != "" { tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) if err != nil { return nil, fmt.Errorf("Could not load X509 key pair: %v. Make sure the key is not encrypted", err) } tlsConfig.Certificates = []tls.Certificate{tlsCert} } return &tlsConfig, nil } // Server returns a TLS configuration meant to be used by a server. func Server(options Options) (*tls.Config, error) { tlsConfig := ServerDefault tlsConfig.ClientAuth = options.ClientAuth tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) if err != nil { if os.IsNotExist(err) { return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) } return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) } tlsConfig.Certificates = []tls.Certificate{tlsCert} if options.ClientAuth >= tls.VerifyClientCertIfGiven { CAs, err := certPool(options.CAFile) if err != nil { return nil, err } tlsConfig.ClientCAs = CAs } return &tlsConfig, nil } docker-1.10.3/vendor/src/github.com/docker/go-units/000077500000000000000000000000001267010174400222045ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/go-units/LICENSE000066400000000000000000000250101267010174400232070ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS Copyright 2015 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. docker-1.10.3/vendor/src/github.com/docker/go-units/README.md000066400000000000000000000007141267010174400234650ustar00rootroot00000000000000[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) # Introduction go-units is a library to transform human friendly measurements into machine friendly values. ## Usage See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. ## License go-units is licensed under the Apache License, Version 2.0. See [LICENSE](LICENSE) for the full license text. docker-1.10.3/vendor/src/github.com/docker/go-units/circle.yml000066400000000000000000000003741267010174400241740ustar00rootroot00000000000000dependencies: post: # install golint - go get github.com/golang/lint/golint test: pre: # run analysis before tests - go vet ./... - test -z "$(golint ./... | tee /dev/stderr)" - test -z "$(gofmt -s -l . | tee /dev/stderr)" docker-1.10.3/vendor/src/github.com/docker/go-units/duration.go000066400000000000000000000020101267010174400243510ustar00rootroot00000000000000// Package units provides helper function to parse and print size and time units // in human-readable format. package units import ( "fmt" "time" ) // HumanDuration returns a human-readable approximation of a duration // (eg. "About a minute", "4 hours ago", etc.). func HumanDuration(d time.Duration) string { if seconds := int(d.Seconds()); seconds < 1 { return "Less than a second" } else if seconds < 60 { return fmt.Sprintf("%d seconds", seconds) } else if minutes := int(d.Minutes()); minutes == 1 { return "About a minute" } else if minutes < 60 { return fmt.Sprintf("%d minutes", minutes) } else if hours := int(d.Hours()); hours == 1 { return "About an hour" } else if hours < 48 { return fmt.Sprintf("%d hours", hours) } else if hours < 24*7*2 { return fmt.Sprintf("%d days", hours/24) } else if hours < 24*30*3 { return fmt.Sprintf("%d weeks", hours/24/7) } else if hours < 24*365*2 { return fmt.Sprintf("%d months", hours/24/30) } return fmt.Sprintf("%d years", int(d.Hours())/24/365) } docker-1.10.3/vendor/src/github.com/docker/go-units/size.go000066400000000000000000000046621267010174400235150ustar00rootroot00000000000000package units import ( "fmt" "regexp" "strconv" "strings" ) // See: http://en.wikipedia.org/wiki/Binary_prefix const ( // Decimal KB = 1000 MB = 1000 * KB GB = 1000 * MB TB = 1000 * GB PB = 1000 * TB // Binary KiB = 1024 MiB = 1024 * KiB GiB = 1024 * MiB TiB = 1024 * GiB PiB = 1024 * TiB ) type unitMap map[string]int64 var ( decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`) ) var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} // CustomSize returns a human-readable approximation of a size // using custom format. func CustomSize(format string, size float64, base float64, _map []string) string { i := 0 for size >= base { size = size / base i++ } return fmt.Sprintf(format, size, _map[i]) } // HumanSize returns a human-readable approximation of a size // capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). func HumanSize(size float64) string { return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs) } // BytesSize returns a human-readable size in bytes, kibibytes, // mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). func BytesSize(size float64) string { return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs) } // FromHumanSize returns an integer from a human-readable specification of a // size using SI standard (eg. "44kB", "17MB"). func FromHumanSize(size string) (int64, error) { return parseSize(size, decimalMap) } // RAMInBytes parses a human-readable string representing an amount of RAM // in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and // returns the number of bytes, or -1 if the string is unparseable. // Units are case-insensitive, and the 'b' suffix is optional. func RAMInBytes(size string) (int64, error) { return parseSize(size, binaryMap) } // Parses the human-readable size string into the amount it represents. func parseSize(sizeStr string, uMap unitMap) (int64, error) { matches := sizeRegex.FindStringSubmatch(sizeStr) if len(matches) != 3 { return -1, fmt.Errorf("invalid size: '%s'", sizeStr) } size, err := strconv.ParseInt(matches[1], 10, 0) if err != nil { return -1, err } unitPrefix := strings.ToLower(matches[2]) if mul, ok := uMap[unitPrefix]; ok { size *= mul } return size, nil } docker-1.10.3/vendor/src/github.com/docker/go-units/ulimit.go000066400000000000000000000055001267010174400240360ustar00rootroot00000000000000package units import ( "fmt" "strconv" "strings" ) // Ulimit is a human friendly version of Rlimit. type Ulimit struct { Name string Hard int64 Soft int64 } // Rlimit specifies the resource limits, such as max open files. type Rlimit struct { Type int `json:"type,omitempty"` Hard uint64 `json:"hard,omitempty"` Soft uint64 `json:"soft,omitempty"` } const ( // magic numbers for making the syscall // some of these are defined in the syscall package, but not all. // Also since Windows client doesn't get access to the syscall package, need to // define these here rlimitAs = 9 rlimitCore = 4 rlimitCPU = 0 rlimitData = 2 rlimitFsize = 1 rlimitLocks = 10 rlimitMemlock = 8 rlimitMsgqueue = 12 rlimitNice = 13 rlimitNofile = 7 rlimitNproc = 6 rlimitRss = 5 rlimitRtprio = 14 rlimitRttime = 15 rlimitSigpending = 11 rlimitStack = 3 ) var ulimitNameMapping = map[string]int{ //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. "core": rlimitCore, "cpu": rlimitCPU, "data": rlimitData, "fsize": rlimitFsize, "locks": rlimitLocks, "memlock": rlimitMemlock, "msgqueue": rlimitMsgqueue, "nice": rlimitNice, "nofile": rlimitNofile, "nproc": rlimitNproc, "rss": rlimitRss, "rtprio": rlimitRtprio, "rttime": rlimitRttime, "sigpending": rlimitSigpending, "stack": rlimitStack, } // ParseUlimit parses and returns a Ulimit from the specified string. func ParseUlimit(val string) (*Ulimit, error) { parts := strings.SplitN(val, "=", 2) if len(parts) != 2 { return nil, fmt.Errorf("invalid ulimit argument: %s", val) } if _, exists := ulimitNameMapping[parts[0]]; !exists { return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) } limitVals := strings.SplitN(parts[1], ":", 2) if len(limitVals) > 2 { return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) } soft, err := strconv.ParseInt(limitVals[0], 10, 64) if err != nil { return nil, err } hard := soft // in case no hard was set if len(limitVals) == 2 { hard, err = strconv.ParseInt(limitVals[1], 10, 64) } if soft > hard { return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, hard) } return &Ulimit{Name: parts[0], Soft: soft, Hard: hard}, nil } // GetRlimit returns the RLimit corresponding to Ulimit. func (u *Ulimit) GetRlimit() (*Rlimit, error) { t, exists := ulimitNameMapping[u.Name] if !exists { return nil, fmt.Errorf("invalid ulimit name %s", u.Name) } return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil } func (u *Ulimit) String() string { return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) } docker-1.10.3/vendor/src/github.com/docker/go/000077500000000000000000000000001267010174400210445ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/go/LICENSE000066400000000000000000000027071267010174400220570ustar00rootroot00000000000000Copyright (c) 2012 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. docker-1.10.3/vendor/src/github.com/docker/go/canonical/000077500000000000000000000000001267010174400227735ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/go/canonical/json/000077500000000000000000000000001267010174400237445ustar00rootroot00000000000000docker-1.10.3/vendor/src/github.com/docker/go/canonical/json/decode.go000066400000000000000000000650231267010174400255240ustar00rootroot00000000000000// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Represents JSON data structure using native Go types: booleans, floats, // strings, arrays, and maps. package json import ( "bytes" "encoding" "encoding/base64" "errors" "fmt" "reflect" "runtime" "strconv" "unicode" "unicode/utf16" "unicode/utf8" ) // Unmarshal parses the JSON-encoded data and stores the result // in the value pointed to by v. // // Unmarshal uses the inverse of the encodings that // Marshal uses, allocating maps, slices, and pointers as necessary, // with the following additional rules: // // To unmarshal JSON into a pointer, Unmarshal first handles the case of // the JSON being the JSON literal null. In that case, Unmarshal sets // the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into // the value pointed at by the pointer. If the pointer is nil, Unmarshal // allocates a new value for it to point to. // // To unmarshal JSON into a struct, Unmarshal matches incoming object // keys to the keys used by Marshal (either the struct field name or its tag), // preferring an exact match but also accepting a case-insensitive match. // // To unmarshal JSON into an interface value, // Unmarshal stores one of these in the interface value: // // bool, for JSON booleans // float64, for JSON numbers // string, for JSON strings // []interface{}, for JSON arrays // map[string]interface{}, for JSON objects // nil for JSON null // // To unmarshal a JSON array into a slice, Unmarshal resets the slice to nil // and then appends each element to the slice. // // To unmarshal a JSON object into a map, Unmarshal replaces the map // with an empty map and then adds key-value pairs from the object to // the map. // // If a JSON value is not appropriate for a given target type, // or if a JSON number overflows the target type, Unmarshal // skips that field and completes the unmarshalling as best it can. // If no more serious errors are encountered, Unmarshal returns // an UnmarshalTypeError describing the earliest such error. // // The JSON null value unmarshals into an interface, map, pointer, or slice // by setting that Go value to nil. Because null is often used in JSON to mean // ``not present,'' unmarshaling a JSON null into any other Go type has no effect // on the value and produces no error. // // When unmarshaling quoted strings, invalid UTF-8 or // invalid UTF-16 surrogate pairs are not treated as an error. // Instead, they are replaced by the Unicode replacement // character U+FFFD. // func Unmarshal(data []byte, v interface{}) error { // Check for well-formedness. // Avoids filling out half a data structure // before discovering a JSON syntax error. var d decodeState err := checkValid(data, &d.scan) if err != nil { return err } d.init(data) return d.unmarshal(v) } // Unmarshaler is the interface implemented by objects // that can unmarshal a JSON description of themselves. // The input can be assumed to be a valid encoding of // a JSON value. UnmarshalJSON must copy the JSON data // if it wishes to retain the data after returning. type Unmarshaler interface { UnmarshalJSON([]byte) error } // An UnmarshalTypeError describes a JSON value that was // not appropriate for a value of a specific Go type. type UnmarshalTypeError struct { Value string // description of JSON value - "bool", "array", "number -5" Type reflect.Type // type of Go value it could not be assigned to Offset int64 // error occurred after reading Offset bytes } func (e *UnmarshalTypeError) Error() string { return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() } // An UnmarshalFieldError describes a JSON object key that // led to an unexported (and therefore unwritable) struct field. // (No longer used; kept for compatibility.) type UnmarshalFieldError struct { Key string Type reflect.Type Field reflect.StructField } func (e *UnmarshalFieldError) Error() string { return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() } // An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. // (The argument to Unmarshal must be a non-nil pointer.) type InvalidUnmarshalError struct { Type reflect.Type } func (e *InvalidUnmarshalError) Error() string { if e.Type == nil { return "json: Unmarshal(nil)" } if e.Type.Kind() != reflect.Ptr { return "json: Unmarshal(non-pointer " + e.Type.String() + ")" } return "json: Unmarshal(nil " + e.Type.String() + ")" } func (d *decodeState) unmarshal(v interface{}) (err error) { defer func() { if r := recover(); r != nil { if _, ok := r.(runtime.Error); ok { panic(r) } err = r.(error) } }() rv := reflect.ValueOf(v) if rv.Kind() != reflect.Ptr || rv.IsNil() { return &InvalidUnmarshalError{reflect.TypeOf(v)} } d.scan.reset() // We decode rv not rv.Elem because the Unmarshaler interface // test must be applied at the top level of the value. d.value(rv) return d.savedError } // A Number represents a JSON number literal. type Number string // String returns the literal text of the number. func (n Number) String() string { return string(n) } // Float64 returns the number as a float64. func (n Number) Float64() (float64, error) { return strconv.ParseFloat(string(n), 64) } // Int64 returns the number as an int64. func (n Number) Int64() (int64, error) { return strconv.ParseInt(string(n), 10, 64) } // decodeState represents the state while decoding a JSON value. type decodeState struct { data []byte off int // read offset in data scan scanner nextscan scanner // for calls to nextValue savedError error useNumber bool canonical bool } // errPhase is used for errors that should not happen unless // there is a bug in the JSON decoder or something is editing // the data slice while the decoder executes. var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") func (d *decodeState) init(data []byte) *decodeState { d.data = data d.off = 0 d.savedError = nil return d } // error aborts the decoding by panicking with err. func (d *decodeState) error(err error) { panic(err) } // saveError saves the first err it is called with, // for reporting at the end of the unmarshal. func (d *decodeState) saveError(err error) { if d.savedError == nil { d.savedError = err } } // next cuts off and returns the next full JSON value in d.data[d.off:]. // The next value is known to be an object or array, not a literal. func (d *decodeState) next() []byte { c := d.data[d.off] item, rest, err := nextValue(d.data[d.off:], &d.nextscan) if err != nil { d.error(err) } d.off = len(d.data) - len(rest) // Our scanner has seen the opening brace/bracket // and thinks we're still in the middle of the object. // invent a closing brace/bracket to get it out. if c == '{' { d.scan.step(&d.scan, '}') } else { d.scan.step(&d.scan, ']') } return item } // scanWhile processes bytes in d.data[d.off:] until it // receives a scan code not equal to op. // It updates d.off and returns the new scan code. func (d *decodeState) scanWhile(op int) int { var newOp int for { if d.off >= len(d.data) { newOp = d.scan.eof() d.off = len(d.data) + 1 // mark processed EOF with len+1 } else { c := int(d.data[d.off]) d.off++ newOp = d.scan.step(&d.scan, c) } if newOp != op { break } } return newOp } // value decodes a JSON value from d.data[d.off:] into the value. // it updates d.off to point past the decoded value. func (d *decodeState) value(v reflect.Value) { if !v.IsValid() { _, rest, err := nextValue(d.data[d.off:], &d.nextscan) if err != nil { d.error(err) } d.off = len(d.data) - len(rest) // d.scan thinks we're still at the beginning of the item. // Feed in an empty string - the shortest, simplest value - // so that it knows we got to the end of the value. if d.scan.redo { // rewind. d.scan.redo = false d.scan.step = stateBeginValue } d.scan.step(&d.scan, '"') d.scan.step(&d.scan, '"') n := len(d.scan.parseState) if n > 0 && d.scan.parseState[n-1] == parseObjectKey { // d.scan thinks we just read an object key; finish the object d.scan.step(&d.scan, ':') d.scan.step(&d.scan, '"') d.scan.step(&d.scan, '"') d.scan.step(&d.scan, '}') } return } switch op := d.scanWhile(scanSkipSpace); op { default: d.error(errPhase) case scanBeginArray: d.array(v) case scanBeginObject: d.object(v) case scanBeginLiteral: d.literal(v) } } type unquotedValue struct{} // valueQuoted is like value but decodes a // quoted string literal or literal null into an interface value. // If it finds anything other than a quoted string literal or null, // valueQuoted returns unquotedValue{}. func (d *decodeState) valueQuoted() interface{} { switch op := d.scanWhile(scanSkipSpace); op { default: d.error(errPhase) case scanBeginArray: d.array(reflect.Value{}) case scanBeginObject: d.object(reflect.Value{}) case scanBeginLiteral: switch v := d.literalInterface().(type) { case nil, string: return v } } return unquotedValue{} } // indirect walks down v allocating pointers as needed, // until it gets to a non-pointer. // if it encounters an Unmarshaler, indirect stops and returns that. // if decodingNull is true, indirect stops at the last pointer so it can be set to nil. func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { // If v is a named type and is addressable, // start with its address, so that if the type has pointer methods, // we find them. if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { v = v.Addr() } for { // Load value from interface, but only if the result will be // usefully addressable. if v.Kind() == reflect.Interface && !v.IsNil() { e := v.Elem() if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { v = e continue } } if v.Kind() != reflect.Ptr { break } if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { break } if v.IsNil() { v.Set(reflect.New(v.Type().Elem())) } if v.Type().NumMethod() > 0 { if u, ok := v.Interface().(Unmarshaler); ok { return u, nil, reflect.Value{} } if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { return nil, u, reflect.Value{} } } v = v.Elem() } return nil, nil, v } // array consumes an array from d.data[d.off-1:], decoding into the value v. // the first byte of the array ('[') has been read already. func (d *decodeState) array(v reflect.Value) { // Check for unmarshaler. u, ut, pv := d.indirect(v, false) if u != nil { d.off-- err := u.UnmarshalJSON(d.next()) if err != nil { d.error(err) } return } if ut != nil { d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) d.off-- d.next() return } v = pv // Check type of target. switch v.Kind() { case reflect.Interface: if v.NumMethod() == 0 { // Decoding into nil interface? Switch to non-reflect code. v.Set(reflect.ValueOf(d.arrayInterface())) return } // Otherwise it's invalid. fallthrough default: d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) d.off-- d.next() return case reflect.Array: case reflect.Slice: break } i := 0 for { // Look ahead for ] - can only happen on first iteration. op := d.scanWhile(scanSkipSpace) if op == scanEndArray { break } // Back up so d.value can have the byte we just read. d.off-- d.scan.undo(op) // Get element of array, growing if necessary. if v.Kind() == reflect.Slice { // Grow slice if necessary if i >= v.Cap() { newcap := v.Cap() + v.Cap()/2 if newcap < 4 { newcap = 4 } newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) reflect.Copy(newv, v) v.Set(newv) } if i >= v.Len() { v.SetLen(i + 1) } } if i < v.Len() { // Decode into element. d.value(v.Index(i)) } else { // Ran out of fixed array: skip. d.value(reflect.Value{}) } i++ // Next token must be , or ]. op = d.scanWhile(scanSkipSpace) if op == scanEndArray { break } if op != scanArrayValue { d.error(errPhase) } } if i < v.Len() { if v.Kind() == reflect.Array { // Array. Zero the rest. z := reflect.Zero(v.Type().Elem()) for ; i < v.Len(); i++ { v.Index(i).Set(z) } } else { v.SetLen(i) } } if i == 0 && v.Kind() == reflect.Slice { v.Set(reflect.MakeSlice(v.Type(), 0, 0)) } } var nullLiteral = []byte("null") // object consumes an object from d.data[d.off-1:], decoding into the value v. // the first byte ('{') of the object has been read already. func (d *decodeState) object(v reflect.Value) { // Check for unmarshaler. u, ut, pv := d.indirect(v, false) if u != nil { d.off-- err := u.UnmarshalJSON(d.next()) if err != nil { d.error(err) } return } if ut != nil { d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) d.off-- d.next() // skip over { } in input return } v = pv // Decoding into nil interface? Switch to non-reflect code. if v.Kind() == reflect.Interface && v.NumMethod() == 0 { v.Set(reflect.ValueOf(d.objectInterface())) return } // Check type of target: struct or map[string]T switch v.Kind() { case reflect.Map: // map must have string kind t := v.Type() if t.Key().Kind() != reflect.String { d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) d.off-- d.next() // skip over { } in input return } if v.IsNil() { v.Set(reflect.MakeMap(t)) } case reflect.Struct: default: d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) d.off-- d.next() // skip over { } in input return } var mapElem reflect.Value for { // Read opening " of string key or closing }. op := d.scanWhile(scanSkipSpace) if op == scanEndObject { // closing } - can only happen on first iteration. break } if op != scanBeginLiteral { d.error(errPhase) } // Read key. start := d.off - 1 op = d.scanWhile(scanContinue) item := d.data[start : d.off-1] key, ok := unquoteBytes(item) if !ok { d.error(errPhase) } // Figure out field corresponding to key. var subv reflect.Value destring := false // whether the value is wrapped in a string to be decoded first if v.Kind() == reflect.Map { elemType := v.Type().Elem() if !mapElem.IsValid() { mapElem = reflect.New(elemType).Elem() } else { mapElem.Set(reflect.Zero(elemType)) } subv = mapElem } else { var f *field fields := cachedTypeFields(v.Type(), false) for i := range fields { ff := &fields[i] if bytes.Equal(ff.nameBytes, key) { f = ff break } if f == nil && ff.equalFold(ff.nameBytes, key) { f = ff } } if f != nil { subv = v destring = f.quoted for _, i := range f.index { if subv.Kind() == reflect.Ptr { if subv.IsNil() { subv.Set(reflect.New(subv.Type().Elem())) } subv = subv.Elem() } subv = subv.Field(i) } } } // Read : before value. if op == scanSkipSpace { op = d.scanWhile(scanSkipSpace) } if op != scanObjectKey { d.error(errPhase) } // Read value. if destring { switch qv := d.valueQuoted().(type) { case nil: d.literalStore(nullLiteral, subv, false) case string: d.literalStore([]byte(qv), subv, true) default: d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) } } else { d.value(subv) } // Write value back to map; // if using struct, subv points into struct already. if v.Kind() == reflect.Map { kv := reflect.ValueOf(key).Convert(v.Type().Key()) v.SetMapIndex(kv, subv) } // Next token must be , or }. op = d.scanWhile(scanSkipSpace) if op == scanEndObject { break } if op != scanObjectValue { d.error(errPhase) } } } // literal consumes a literal from d.data[d.off-1:], decoding into the value v. // The first byte of the literal has been read already // (that's how the caller knows it's a literal). func (d *decodeState) literal(v reflect.Value) { // All bytes inside literal return scanContinue op code. start := d.off - 1 op := d.scanWhile(scanContinue) // Scan read one byte too far; back up. d.off-- d.scan.undo(op) d.literalStore(d.data[start:d.off], v, false) } // convertNumber converts the number literal s to a float64 or a Number // depending on the setting of d.useNumber. func (d *decodeState) convertNumber(s string) (interface{}, error) { if d.useNumber { return Number(s), nil } f, err := strconv.ParseFloat(s, 64) if err != nil { return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} } return f, nil } var numberType = reflect.TypeOf(Number("")) // literalStore decodes a literal stored in item into v. // // fromQuoted indicates whether this literal came from unwrapping a // string from the ",string" struct tag option. this is used only to // produce more helpful error messages. func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { // Check for unmarshaler. if len(item) == 0 { //Empty string given d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) return } wantptr := item[0] == 'n' // null u, ut, pv := d.indirect(v, wantptr) if u != nil { err := u.UnmarshalJSON(item) if err != nil { d.error(err) } return } if ut != nil { if item[0] != '"' { if fromQuoted { d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) } else { d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) } return } s, ok := unquoteBytes(item) if !ok { if fromQuoted { d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) } else { d.error(errPhase) } } err := ut.UnmarshalText(s) if err != nil { d.error(err) } return } v = pv switch c := item[0]; c { case 'n': // null switch v.Kind() { case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: v.Set(reflect.Zero(v.Type())) // otherwise, ignore null for primitives/string } case 't', 'f': // true, false value := c == 't' switch v.Kind() { default: if fromQuoted { d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) } else { d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) } case reflect.Bool: v.SetBool(value) case reflect.Interface: if v.NumMethod() == 0 { v.Set(reflect.ValueOf(value)) } else { d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) } } case '"': // string s, ok := unquoteBytes(item) if !ok { if fromQuoted { d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) } else { d.error(errPhase) } } switch v.Kind() { default: d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) case reflect.Slice: if v.Type().Elem().Kind() != reflect.Uint8 { d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) break } b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) n, err := base64.StdEncoding.Decode(b, s) if err != nil { d.saveError(err) break } v.Set(reflect.ValueOf(b[0:n])) case reflect.String: v.SetString(string(s)) case reflect.Interface: if v.NumMethod() == 0 { v.Set(reflect.ValueOf(string(s))) } else { d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) } } default: // number if c != '-' && (c < '0' || c > '9') { if fromQuoted { d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) } else { d.error(errPhase) } } s := string(item) switch v.Kind() { default: if v.Kind() == reflect.String && v.Type() == numberType { v.SetString(s) break } if fromQuoted { d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) } else { d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) } case reflect.Interface: n, err := d.convertNumber(s) if err != nil { d.saveError(err) break } if v.NumMethod() != 0 { d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) break } v.Set(reflect.ValueOf(n)) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: n, err := strconv.ParseInt(s, 10, 64) if err != nil || v.OverflowInt(n) { d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) break } v.SetInt(n) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: n, err := strconv.ParseUint(s, 10, 64) if err != nil || v.OverflowUint(n) { d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) break } v.SetUint(n) case reflect.Float32, reflect.Float64: n, err := strconv.ParseFloat(s, v.Type().Bits()) if err != nil || v.OverflowFloat(n) { d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) break } v.SetFloat(n) } } } // The xxxInterface routines build up a value to be stored // in an empty interface. They are not strictly necessary, // but they avoid the weight of reflection in this common case. // valueInterface is like value but returns interface{} func (d *decodeState) valueInterface() interface{} { switch d.scanWhile(scanSkipSpace) { default: d.error(errPhase) panic("unreachable") case scanBeginArray: return d.arrayInterface() case scanBeginObject: return d.objectInterface() case scanBeginLiteral: return d.literalInterface() } } // arrayInterface is like array but returns []interface{}. func (d *decodeState) arrayInterface() []interface{} { var v = make([]interface{}, 0) for { // Look ahead for ] - can only happen on first iteration. op := d.scanWhile(scanSkipSpace) if op == scanEndArray { break } // Back up so d.value can have the byte we just read. d.off-- d.scan.undo(op) v = append(v, d.valueInterface()) // Next token must be , or ]. op = d.scanWhile(scanSkipSpace) if op == scanEndArray { break } if op != scanArrayValue { d.error(errPhase) } } return v } // objectInterface is like object but returns map[string]interface{}. func (d *decodeState) objectInterface() map[string]interface{} { m := make(map[string]interface{}) for { // Read opening " of string key or closing }. op := d.scanWhile(scanSkipSpace) if op == scanEndObject { // closing } - can only happen on first iteration. break } if op != scanBeginLiteral { d.error(errPhase) } // Read string key. start := d.off - 1 op = d.scanWhile(scanContinue) item := d.data[start : d.off-1] key, ok := unquote(item) if !ok { d.error(errPhase) } // Read : before value. if op == scanSkipSpace { op = d.scanWhile(scanSkipSpace) } if op != scanObjectKey { d.error(errPhase) } // Read value. m[key] = d.valueInterface() // Next token must be , or }. op = d.scanWhile(scanSkipSpace) if op == scanEndObject { break } if op != scanObjectValue { d.error(errPhase) } } return m } // literalInterface is like literal but returns an interface value. func (d *decodeState) literalInterface() interface{} { // All bytes inside literal return scanContinue op code. start := d.off - 1 op := d.scanWhile(scanContinue) // Scan read one byte too far; back up. d.off-- d.scan.undo(op) item := d.data[start:d.off] switch c := item[0]; c { case 'n': // null return nil case 't', 'f': // true, false return c == 't' case '"': // string s, ok := unquote(item) if !ok { d.error(errPhase) } return s default: // number if c != '-' && (c < '0' || c > '9') { d.error(errPhase) } n, err := d.convertNumber(string(item)) if err != nil { d.saveError(err) } return n } } // getu4 decodes \uXXXX from the beginning of s, returning the hex value, // or it returns -1. func getu4(s []byte) rune { if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { return -1 } r, err := strconv.ParseUint(string(s[2:6]), 16, 64) if err != nil { return -1 } return rune(r) } // unquote converts a quoted JSON string literal s into an actual string t. // The rules are different than for Go, so cannot use strconv.Unquote. func unquote(s []byte) (t string, ok bool) { s, ok = unquoteBytes(s) t = string(s) return } func unquoteBytes(s []byte) (t []byte, ok bool) { if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { return } s = s[1 : len(s)-1] // Check for unusual characters. If there are none, // then no unquoting is needed, so return a slice of the // original bytes. r := 0 for r < len(s) { c := s[r] if c == '\\' || c == '"' || c < ' ' { break } if c < utf8.RuneSelf { r++ continue } rr, size := utf8.DecodeRune(s[r:]) if rr == utf8.RuneError && size == 1 { break } r += size } if r == len(s) { return s, true } b := make([]byte, len(s)+2*utf8.UTFMax) w := copy(b, s[0:r]) for r < len(s) { // Out of room? Can only happen if s is full of // malformed UTF-8 and we're replacing each // byte with RuneError. if w >= len(b)-2*utf8.UTFMax { nb := make([]byte, (len(b)+utf8.UTFMax)*2) copy(nb, b[0:w]) b = nb } switch c := s[r]; { case c == '\\': r++ if r >= len(s) { return } switch s[r] { default: return case '"', '\\', '/', '\'': b[w] = s[r] r++ w++ case 'b': b[w] = '\b' r++ w++ case 'f': b[w] = '\f' r++ w++ case 'n': b[w] = '\n' r++ w++ case 'r': b[w] = '\r' r++ w++ case 't': b[w] = '\t' r++ w++ case 'u': r-- rr := getu4(s[r:]) if rr < 0 { return } r += 6 if utf16.IsSurrogate(rr) { rr1 := getu4(s[r:]) if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { // A valid pair; consume. r += 6 w += utf8.EncodeRune(b[w:], dec) break } // Invalid surrogate; fall back to replacement rune. rr = unicode.ReplacementChar } w += utf8.EncodeRune(b[w:], rr) } // Quote, control characters are invalid. case c == '"', c < ' ': return // ASCII case c < utf8.RuneSelf: b[w] = c r++ w++ // Coerce to well-formed UTF-8. default: rr, size := utf8.DecodeRune(s[r:]) r += size w += utf8.EncodeRune(b[w:], rr) } } return b[0:w], true } docker-1.10.3/vendor/src/github.com/docker/go/canonical/json/encode.go000066400000000000000000000773051267010174400255440ustar00rootroot00000000000000// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package json implements encoding and decoding of JSON objects as defined in // RFC 4627. The mapping between JSON objects and Go values is described // in the documentation for the Marshal and Unmarshal functions. // // See "JSON and Go" for an introduction to this package: // https://golang.org/doc/articles/json_and_go.html package json import ( "bytes" "encoding" "encoding/base64" "math" "reflect" "runtime" "sort" "strconv" "strings" "sync" "unicode" "unicode/utf8" ) // Marshal returns the JSON encoding of v. // // Marshal traverses the value v recursively. // If an encountered value implements the Marshaler interface // and is not a nil pointer, Marshal calls its MarshalJSON method // to produce JSON. The nil pointer exception is not strictly necessary // but mimics a similar, necessary exception in the behavior of // UnmarshalJSON. // // Otherwise, Marshal uses the following type-dependent default encodings: // // Boolean values encode as JSON booleans. // // Floating point, integer, and Number values encode as JSON numbers. // // String values encode as JSON strings coerced to valid UTF-8, // replacing invalid bytes with the Unicode replacement rune. // The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" // to keep some browsers from misinterpreting JSON output as HTML. // Ampersand "&" is also escaped to "\u0026" for the same reason. // // Array and slice values encode as JSON arrays, except that // []byte encodes as a base64-encoded string, and a nil slice // encodes as the null JSON object. // // Struct values encode as JSON objects. Each exported struct field // becomes a member of the object unless // - the field's tag is "-", or // - the field is empty and its tag specifies the "omitempty" option. // The empty values are false, 0, any // nil pointer or interface value, and any array, slice, map, or string of // length zero. The object's default key string is the struct field name // but can be specified in the struct field's tag value. The "json" key in // the struct field's tag value is the key name, followed by an optional comma // and options. Examples: // // // Field is ignored by this package. // Field int `json:"-"` // // // Field appears in JSON as key "myName". // Field int `json:"myName"` // // // Field appears in JSON as key "myName" and // // the field is omitted from the object if its value is empty, // // as defined above. // Field int `json:"myName,omitempty"` // // // Field appears in JSON as key "Field" (the default), but // // the field is skipped if empty. // // Note the leading comma. // Field int `json:",omitempty"` // // The "string" option signals that a field is stored as JSON inside a // JSON-encoded string. It applies only to fields of string, floating point, // integer, or boolean types. This extra level of encoding is sometimes used // when communicating with JavaScript programs: // // Int64String int64 `json:",string"` // // The key name will be used if it's a non-empty string consisting of // only Unicode letters, digits, dollar signs, percent signs, hyphens, // underscores and slashes. // // Anonymous struct fields are usually marshaled as if their inner exported fields // were fields in the outer struct, subject to the usual Go visibility rules amended // as described in the next paragraph. // An anonymous struct field with a name given in its JSON tag is treated as // having that name, rather than being anonymous. // An anonymous struct field of interface type is treated the same as having // that type as its name, rather than being anonymous. // // The Go visibility rules for struct fields are amended for JSON when // deciding which field to marshal or unmarshal. If there are // multiple fields at the same level, and that level is the least // nested (and would therefore be the nesting level selected by the // usual Go rules), the following extra rules apply: // // 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, // even if there are multiple untagged fields that would otherwise conflict. // 2) If there is exactly one field (tagged or not according to the first rule), that is selected. // 3) Otherwise there are multiple fields, and all are ignored; no error occurs. // // Handling of anonymous struct fields is new in Go 1.1. // Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of // an anonymous struct field in both current and earlier versions, give the field // a JSON tag of "-". // // Map values encode as JSON objects. // The map's key type must be string; the map keys are used as JSON object // keys, subject to the UTF-8 coercion described for string values above. // // Pointer values encode as the value pointed to. // A nil pointer encodes as the null JSON object. // // Interface values encode as the value contained in the interface. // A nil interface value encodes as the null JSON object. // // Channel, complex, and function values cannot be encoded in JSON. // Attempting to encode such a value causes Marshal to return // an UnsupportedTypeError. // // JSON cannot represent cyclic data structures and Marshal does not // handle them. Passing cyclic structures to Marshal will result in // an infinite recursion. // func Marshal(v interface{}) ([]byte, error) { return marshal(v, false) } // MarshalIndent is like Marshal but applies Indent to format the output. func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { b, err := Marshal(v) if err != nil { return nil, err } var buf bytes.Buffer err = Indent(&buf, b, prefix, indent) if err != nil { return nil, err } return buf.Bytes(), nil } // MarshalCanonical is like Marshal but encodes into Canonical JSON. // Read more at: http://wiki.laptop.org/go/Canonical_JSON func MarshalCanonical(v interface{}) ([]byte, error) { return marshal(v, true) } func marshal(v interface{}, canonical bool) ([]byte, error) { e := &encodeState{canonical: canonical} err := e.marshal(v) if err != nil { return nil, err } return e.Bytes(), nil } // HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 // characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 // so that the JSON will be safe to embed inside HTML